From ce506c81a8ffe614ff5a0724aa2325c1377d1a2a Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 19 Apr 2015 01:31:30 +0200 Subject: [PATCH 001/254] OpenCL backend implementation. --- CMakeLists.txt | 1 - HEADER | 6 + Makefile | 85 +- SOURCE | 2 + cmake/Dependencies.cmake | 9 +- docs/development.md | 2 +- docs/install_apt.md | 21 +- docs/install_osx.md | 6 +- docs/model_zoo.md | 43 +- docs/tutorial/layers.md | 28 +- examples/classification.ipynb | 13 +- examples/filter_visualization.ipynb | 109 +- examples/hdf5_classification.ipynb | 1257 ++++++++--------- examples/hdf5_classification/solver.prototxt | 6 +- examples/hdf5_classification/solver2.prototxt | 6 +- .../hdf5_classification/train_val.prototxt | 4 +- .../hdf5_classification/train_val2.prototxt | 4 +- .../imagenet/bvlc_caffenet_full_conv.prototxt | 216 +++ examples/imagenet/make_imagenet_mean.sh | 10 +- examples/net_surgery.ipynb | 386 ++--- examples/siamese/mnist_siamese.ipynb | 20 +- examples/web_demo/app.py | 17 +- include/caffe/blob.hpp | 215 +-- include/caffe/caffe.hpp | 1 + include/caffe/common.hpp | 30 +- include/caffe/common_layers.hpp | 342 +++-- include/caffe/data_layers.hpp | 8 +- include/caffe/data_transformer.hpp | 3 +- include/caffe/filler.hpp | 19 +- include/caffe/greentea/cl_kernels.hpp | 13 + include/caffe/greentea/greentea.hpp | 80 ++ include/caffe/greentea/greentea_im2col.hpp | 53 + .../greentea/greentea_math_functions.hpp | 134 ++ include/caffe/layer.hpp | 234 +-- include/caffe/loss_layers.hpp | 253 ++-- include/caffe/neuron_layers.hpp | 96 +- include/caffe/solver.hpp | 96 +- include/caffe/splitnet/splitnet.hpp | 29 + include/caffe/syncedmem.hpp | 28 +- include/caffe/util/cudnn.hpp | 34 +- include/caffe/util/im2col.hpp | 13 + include/caffe/vision_layers.hpp | 409 ++++-- models/bvlc_googlenet/readme.md | 1 + protoc_generator.sh | 3 + python/CMakeLists.txt | 2 +- python/caffe/_caffe.cpp | 30 +- python/caffe/classifier.py | 4 +- python/caffe/detector.py | 4 +- python/caffe/io.py | 17 +- python/caffe/pycaffe.py | 6 +- python/caffe/test/test_python_layer.py | 24 +- python/classify.py | 16 +- python/detect.py | 15 +- python/requirements.txt | 2 +- scripts/travis/travis_install.sh | 1 + src/caffe/blob.cpp | 593 ++++---- src/caffe/common.cpp | 406 ++++-- src/caffe/data_transformer.cpp | 92 +- src/caffe/greentea/cl_kernels.cpp | 25 + src/caffe/greentea/cl_kernels.sh | 57 + .../greentea/cl_kernels/activation_kernels.cl | 24 + src/caffe/greentea/cl_kernels/aux_kernels.cl | 22 + .../greentea/cl_kernels/channel_kernels.cl | 176 +++ .../cl_kernels/im2col_sk_gpu_kernel.cl | 84 ++ .../greentea/cl_kernels/pooling_sk_kernels.cl | 103 ++ .../greentea/cl_kernels/softmax_loss_gpu.cl | 61 + src/caffe/greentea/greentea.cpp | 72 + src/caffe/greentea/greentea_im2col.cpp | 67 + .../greentea/greentea_math_functions.cpp | 584 ++++++++ src/caffe/layer_factory.cpp | 44 +- src/caffe/layers/accuracy_layer.cpp | 72 +- src/caffe/layers/argmax_layer.cpp | 4 +- src/caffe/layers/base_conv_layer.cpp | 177 +-- src/caffe/layers/base_data_layer.cpp | 4 +- src/caffe/layers/base_data_layer.cu | 2 +- src/caffe/layers/concat_layer.cpp | 135 +- src/caffe/layers/concat_layer.cu | 71 +- src/caffe/layers/contrastive_loss_layer.cpp | 8 +- src/caffe/layers/conv_layer.cpp | 19 +- src/caffe/layers/conv_sk_layer.cpp | 158 +++ src/caffe/layers/conv_sk_layer.cu | 199 +++ src/caffe/layers/cudnn_conv_layer.cpp | 12 +- src/caffe/layers/cudnn_conv_layer.cu | 94 +- src/caffe/layers/cudnn_pooling_layer.cpp | 10 +- src/caffe/layers/cudnn_pooling_layer.cu | 12 +- src/caffe/layers/cudnn_relu_layer.cpp | 4 +- src/caffe/layers/cudnn_relu_layer.cu | 16 +- src/caffe/layers/cudnn_sigmoid_layer.cpp | 4 +- src/caffe/layers/cudnn_sigmoid_layer.cu | 16 +- src/caffe/layers/cudnn_softmax_layer.cpp | 12 +- src/caffe/layers/cudnn_softmax_layer.cu | 15 +- src/caffe/layers/cudnn_tanh_layer.cpp | 4 +- src/caffe/layers/cudnn_tanh_layer.cu | 17 +- src/caffe/layers/data_layer.cpp | 67 +- src/caffe/layers/datarandtransform_layer.cpp | 235 +++ src/caffe/layers/dropout_layer.cpp | 2 +- src/caffe/layers/dummy_data_layer.cpp | 62 +- src/caffe/layers/eltwise_layer.cpp | 13 +- src/caffe/layers/euclidean_loss_layer.cpp | 8 +- src/caffe/layers/flatten_layer.cpp | 15 +- src/caffe/layers/flatten_layer.cu | 23 + src/caffe/layers/hdf5_data_layer.cpp | 64 +- src/caffe/layers/hdf5_data_layer.cu | 19 +- src/caffe/layers/hdf5_output_layer.cpp | 54 +- src/caffe/layers/hdf5_output_layer.cu | 17 +- src/caffe/layers/im2col_layer.cpp | 4 +- src/caffe/layers/image_data_layer.cpp | 21 +- src/caffe/layers/infogain_loss_layer.cpp | 2 +- src/caffe/layers/inner_product_layer.cpp | 95 +- src/caffe/layers/loss_layer.cpp | 3 +- src/caffe/layers/lrn_layer.cpp | 12 +- src/caffe/layers/lrn_layer.cu | 39 +- src/caffe/layers/memory_data_layer.cpp | 25 +- src/caffe/layers/mvn_layer.cpp | 101 +- src/caffe/layers/neuron_layer.cpp | 2 +- src/caffe/layers/pooling_layer.cpp | 323 +++-- src/caffe/layers/pooling_layer.cu | 249 ++-- src/caffe/layers/pooling_sk_layer.cpp | 139 ++ src/caffe/layers/pooling_sk_layer.cu | 374 +++++ src/caffe/layers/relu_layer.cu | 67 +- src/caffe/layers/silence_layer.cu | 35 +- src/caffe/layers/slice_layer.cpp | 142 +- src/caffe/layers/slice_layer.cu | 70 +- src/caffe/layers/softmax_layer.cpp | 62 +- src/caffe/layers/softmax_layer.cu | 202 ++- src/caffe/layers/softmax_loss_layer.cpp | 45 +- src/caffe/layers/softmax_loss_layer.cu | 123 +- src/caffe/layers/split_layer.cpp | 3 +- src/caffe/layers/window_data_layer.cpp | 186 +-- src/caffe/net.cpp | 458 +++--- src/caffe/proto/caffe.proto | 146 +- src/caffe/solver.cpp | 83 +- src/caffe/splitnet/splitnet.cpp | 18 + src/caffe/syncedmem.cpp | 119 +- src/caffe/test/test_accuracy_layer.cpp | 108 +- src/caffe/test/test_blob.cpp | 59 +- src/caffe/test/test_concat_layer.cpp | 128 +- src/caffe/test/test_hdf5_output_layer.cpp | 9 +- src/caffe/test/test_hdf5data_layer.cpp | 16 +- src/caffe/test/test_lrn_layer.cpp | 38 - src/caffe/test/test_net.cpp | 17 +- src/caffe/test/test_neuron_layer.cpp | 196 --- src/caffe/test/test_pooling_layer.cpp | 8 + src/caffe/test/test_slice_layer.cpp | 6 +- src/caffe/test/test_solver.cpp | 17 +- src/caffe/util/im2col.cu | 154 +- src/caffe/util/io.cpp | 10 +- src/caffe/util/upgrade_proto.cpp | 479 ++++--- test.txt | 1 + tools/caffe.cpp | 17 +- tools/extract_features.cpp | 6 +- 151 files changed, 8057 insertions(+), 4880 deletions(-) create mode 100644 HEADER create mode 100644 SOURCE create mode 100644 examples/imagenet/bvlc_caffenet_full_conv.prototxt create mode 100644 include/caffe/greentea/cl_kernels.hpp create mode 100644 include/caffe/greentea/greentea.hpp create mode 100644 include/caffe/greentea/greentea_im2col.hpp create mode 100644 include/caffe/greentea/greentea_math_functions.hpp create mode 100644 include/caffe/splitnet/splitnet.hpp create mode 100644 protoc_generator.sh create mode 100644 src/caffe/greentea/cl_kernels.cpp create mode 100644 src/caffe/greentea/cl_kernels.sh create mode 100644 src/caffe/greentea/cl_kernels/activation_kernels.cl create mode 100644 src/caffe/greentea/cl_kernels/aux_kernels.cl create mode 100644 src/caffe/greentea/cl_kernels/channel_kernels.cl create mode 100644 src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl create mode 100644 src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl create mode 100644 src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl create mode 100644 src/caffe/greentea/greentea.cpp create mode 100644 src/caffe/greentea/greentea_im2col.cpp create mode 100644 src/caffe/greentea/greentea_math_functions.cpp create mode 100644 src/caffe/layers/conv_sk_layer.cpp create mode 100644 src/caffe/layers/conv_sk_layer.cu create mode 100644 src/caffe/layers/datarandtransform_layer.cpp create mode 100644 src/caffe/layers/flatten_layer.cu create mode 100644 src/caffe/layers/pooling_sk_layer.cpp create mode 100644 src/caffe/layers/pooling_sk_layer.cu create mode 100644 src/caffe/splitnet/splitnet.cpp create mode 100644 test.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 74fa70c9d20..54b044d347b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,7 +20,6 @@ caffe_option(BUILD_python "Build Python wrapper" ON) set(python_version "2" CACHE STRING "Specify which python version to use") caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE) caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE) -caffe_option(BUILD_python_layer "Build the caffe python layer" ON) # ---[ Dependencies include(cmake/Dependencies.cmake) diff --git a/HEADER b/HEADER new file mode 100644 index 00000000000..3a0cb78f1f0 --- /dev/null +++ b/HEADER @@ -0,0 +1,6 @@ +#ifndef GREENTEA_CL_KERNELS_HPP_ +#define GREENTEA_CL_KERNELS_HPP_ +#endif +#ifndef GREENTEA_CL_KERNELS_HPP_ +#define GREENTEA_CL_KERNELS_HPP_ +#endif diff --git a/Makefile b/Makefile index db0f531eaa0..966e263a011 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,10 @@ PROJECT := caffe CONFIG_FILE := Makefile.config include $(CONFIG_FILE) +CXXFLAGS += -std=c++11 -Wno-deprecated-declarations +LINKFLAGS += -std=c++11 -Wno-deprecated-declarations +NVCCFLAGS += -Xcompiler "-Wno-deprecated-declarations" -Xlinker "-Wno-deprecated-declarations" -Xarchive "-Wno-deprecated-declarations" -Xnvlink "-Wno-deprecated-declarations" + BUILD_DIR_LINK := $(BUILD_DIR) RELEASE_BUILD_DIR ?= .$(BUILD_DIR)_release DEBUG_BUILD_DIR ?= .$(BUILD_DIR)_debug @@ -16,6 +20,7 @@ else OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) endif + # All of the directories containing code. SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) @@ -143,6 +148,63 @@ ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) +############################## +# GreenTea backend related include and lib +############################## + +ifeq ($(USE_GREENTEA),1) + # Find a valid OpenCL library + # TODO: Validate and complete this based on different SDKs + ifdef OPENCL_INC + CLLINC = '$(OPENCL_INC)' + endif + + ifdef OPENCL_LIB + CLLIBS = '$(OPENCL_LIB)' + endif + + ifdef OPENCLROOT + CLLIBS = '$(OPENCLROOT)' + endif + + ifdef CUDA_PATH + CLLIBS = '$(CUDA_PATH)/lib/x64' + endif + + ifdef INTELOCLSDKROOT + CLLIBS = '$(INTELOCLSDKROOT)/lib/x64' + endif + + ifdef AMDAPPSDKROOT + CLLIBS = '$(AMDAPPSDKROOT)/lib/x86_64' + CLLINC = '$(AMDAPPSDKROOT)/include' + endif + + # Requires valid OpenCL library + LIBRARY_DIRS += $(CLLIBS) + # Requires valid OpenCL headers and valid ViennaCL + INCLUDE_DIRS += $(CLLINC) $(VIENNACL_DIR) + # Requires OpenCL compile library flag and librt + LIBRARIES += viennacl OpenCL rt + # Additional flags + COMMON_FLAGS += -DUSE_GREENTEA -DVIENNACL_WITH_OPENCL + + # Viennacl runtime debug output + ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DVIENNACL_DEBUG_ALL + endif + + # Use AMD clBLAS, TODO: Not implemented yet + ifeq ($(USE_CLBLAS), 1) + LIBRARIES += clblas + COMMON_FLAGS += -USE_CLBLAS + endif + + CL_KERNELS_CPP = src/caffe/greentea/cl_kernels.cpp + CL_KERNELS = src/caffe/greentea/cl_kernels/*.cl + CL_KERNELS_SH = src/caffe/greentea/cl_kernels.sh +endif + ############################## # Derive include and lib directories ############################## @@ -159,7 +221,7 @@ INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include ifneq ($(CPU_ONLY), 1) INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) LIBRARY_DIRS += $(CUDA_LIB_DIR) - LIBRARIES := cudart cublas curand + LIBRARIES += cudart cublas curand endif LIBRARIES += glog gflags protobuf leveldb snappy \ lmdb boost_system hdf5_hl hdf5 m \ @@ -171,7 +233,6 @@ WARNINGS := -Wall -Wno-sign-compare # Set build directories ############################## -DISTRIBUTE_DIR ?= distribute DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib DIST_ALIASES := dist ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) @@ -233,15 +294,13 @@ endif # libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 ifeq ($(OSX), 1) CXX := /usr/bin/clang++ - ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') - ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) - CXXFLAGS += -stdlib=libstdc++ - LINKFLAGS += -stdlib=libstdc++ - endif - # clang throws this warning for cuda headers - WARNINGS += -Wno-unneeded-internal-declaration + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') + ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration # gtest needs to use its own tuple to not conflict with clang COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 # boost::thread is called boost_thread-mt to mark multithreading on OS X @@ -382,7 +441,7 @@ endif py mat py$(PROJECT) mat$(PROJECT) proto runtest \ superclean supercleanlist supercleanfiles warn everything -all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples +all: $(CL_KERNELS_CPP) $(STATIC_NAME) $(DYNAMIC_NAME) tools examples everything: $(EVERYTHING_TARGETS) @@ -552,6 +611,10 @@ $(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ -Wl,-rpath,$(ORIGIN)/../../lib +# Copy the OpenCL kernels into C++ char strings +$(CL_KERNELS_CPP) : $(CL_KERNELS) + $(CL_KERNELS_SH) + proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ diff --git a/SOURCE b/SOURCE new file mode 100644 index 00000000000..d3039a2bb7b --- /dev/null +++ b/SOURCE @@ -0,0 +1,2 @@ +#include "+include/caffe/greentea/cl_kernels.hpp+" +#include "include/caffe/greentea/cl_kernels.hpp" diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index f328e8246ab..b1ac96c6777 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -25,7 +25,7 @@ include(cmake/ProtoBuf.cmake) # ---[ HDF5 find_package(HDF5 COMPONENTS HL REQUIRED) -include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR}) +include_directories(SYSTEM ${HDF5_INCLUDE_DIRS}) list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES}) # ---[ LMDB @@ -35,7 +35,7 @@ list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES}) # ---[ LevelDB find_package(LevelDB REQUIRED) -include_directories(SYSTEM ${LevelDB_INCLUDE}) +include_directories(SYSTEM ${LEVELDB_INCLUDE}) list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES}) # ---[ Snappy @@ -127,11 +127,6 @@ if(BUILD_python) endif() if(PYTHONLIBS_FOUND AND NUMPY_FOUND AND Boost_PYTHON_FOUND) set(HAVE_PYTHON TRUE) - if(BUILD_python_layer) - add_definitions(-DWITH_PYTHON_LAYER) - include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) - list(APPEND Caffe_LINKER_LIBS ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) - endif() endif() endif() diff --git a/docs/development.md b/docs/development.md index ccb6a29701d..fe54864bd35 100644 --- a/docs/development.md +++ b/docs/development.md @@ -30,7 +30,7 @@ Similarly for IPython notebooks: simply include `"include_in_docs": true` in the Other docs, such as installation guides, are written in the `docs` directory and manually linked to from the `index.md` page. -We strive to provide lots of usage examples, and to document all code in docstrings. +We strive to provide provide lots of usage examples, and to document all code in docstrings. We absolutely appreciate any contribution to this effort! ### Versioning diff --git a/docs/install_apt.md b/docs/install_apt.md index 75f8bec0e95..89bc9a00aef 100644 --- a/docs/install_apt.md +++ b/docs/install_apt.md @@ -8,24 +8,12 @@ title: Installation: Ubuntu sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libboost-all-dev libhdf5-serial-dev -**CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions. -Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. -This can be skipped for CPU-only installation. - -**BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance. - -**Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface. - **Remaining dependencies, 14.04** -Everything is packaged in 14.04. - sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev protobuf-compiler **Remaining dependencies, 12.04** -These dependencies need manual installation in 12.04. - # glog wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz tar zxvf glog-0.3.3.tar.gz @@ -40,10 +28,17 @@ These dependencies need manual installation in 12.04. export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1 make && make install # lmdb - git clone https://gitorious.org/mdb/mdb.git + git clone git://gitorious.org/mdb/mdb.git cd mdb/libraries/liblmdb make && make install Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first. +**CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions. +Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. + +**BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance. + +**Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface. + Continue with [compilation](installation.html#compilation). diff --git a/docs/install_osx.md b/docs/install_osx.md index 39cb02fe232..55b098731fc 100644 --- a/docs/install_osx.md +++ b/docs/install_osx.md @@ -18,7 +18,7 @@ In other `ENV` settings, things may not work as expected. brew install --fresh -vd snappy leveldb gflags glog szip lmdb # need the homebrew science source for OpenCV and hdf5 brew tap homebrew/science - brew install hdf5 opencv + hdf5 opencv If using Anaconda Python, a modification to the OpenCV formula might be needed Do `brew edit opencv` and change the lines that look like the two lines below to exactly the two lines below. @@ -32,7 +32,7 @@ If using Anaconda Python, HDF5 is bundled and the `hdf5` formula can be skipped. # with Python pycaffe needs dependencies built from source brew install --build-from-source --with-python --fresh -vd protobuf - brew install --build-from-source --fresh -vd boost boost-python + brew install --build-from-source --fresh -vd boost # without Python the usual installation suffices brew install protobuf boost @@ -115,7 +115,7 @@ Then, whenever you want to update homebrew, switch back to the master branches, # Update homebrew; hopefully this works without errors! brew update - # Switch back to the caffe branches with the formulae that you modified earlier + # Switch back to the caffe branches with the forumlae that you modified earlier cd /usr/local git rebase master caffe # Fix any merge conflicts and commit to caffe branch diff --git a/docs/model_zoo.md b/docs/model_zoo.md index 06dc0a49ec7..ad30d0acd55 100644 --- a/docs/model_zoo.md +++ b/docs/model_zoo.md @@ -3,30 +3,28 @@ title: Model Zoo --- # Caffe Model Zoo -Lots of researchers and engineers have made Caffe models for different tasks with all kinds of architectures and data. -These models are learned and applied for problems ranging from simple regression, to large-scale visual classification, to Siamese networks for image similarity, to speech and robotics applications. - -To help share these models, we introduce the model zoo framework: +Lots of people have used Caffe to train models of different architectures and applied to different problems, ranging from simple regression to AlexNet-alikes to Siamese networks for image similarity to speech applications. +To lower the friction of sharing these models, we introduce the model zoo framework: - A standard format for packaging Caffe model info. -- Tools to upload/download model info to/from Github Gists, and to download trained `.caffemodel` binaries. +- Tools to upload/download model info to/from Github Gists, and to download trained `.caffemodel` parameters. - A central wiki page for sharing model info Gists. -## Where to get trained models +## BVLC Reference Models -First of all, we bundle BVLC-trained models for unrestricted, out of the box use. -
-See the [BVLC model license](#bvlc-model-license) for details. +First of all, we provide some trained models out of the box. Each one of these can be downloaded by running `scripts/download_model_binary.py ` where `` is specified below: -- **BVLC Reference CaffeNet** in `models/bvlc_reference_caffenet`: AlexNet trained on ILSVRC 2012, with a minor variation from the version as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Jeff Donahue @jeffdonahue) -- **BVLC AlexNet** in `models/bvlc_alexnet`: AlexNet trained on ILSVRC 2012, almost exactly as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Evan Shelhamer @shelhamer) -- **BVLC Reference R-CNN ILSVRC-2013** in `models/bvlc_reference_rcnn_ilsvrc13`: pure Caffe implementation of [R-CNN](https://github.com/rbgirshick/rcnn) as described by Girshick et al. in CVPR 2014. (Trained by Ross Girshick @rbgirshick) -- **BVLC GoogLeNet** in `models/bvlc_googlenet`: GoogLeNet trained on ILSVRC 2012, almost exactly as described in [Going Deeper with Convolutions](http://arxiv.org/abs/1409.4842) by Szegedy et al. in ILSVRC 2014. (Trained by Sergio Guadarrama @sguada) +- **BVLC Reference CaffeNet** in `models/bvlc_reference_caffenet`: AlexNet trained on ILSVRC 2012, with a minor variation from the version as described in the NIPS 2012 paper. (Trained by Jeff Donahue @jeffdonahue) +- **BVLC AlexNet** in `models/bvlc_alexnet`: AlexNet trained on ILSVRC 2012, almost exactly as described in NIPS 2012. (Trained by Evan Shelhamer @shelhamer) +- **BVLC Reference R-CNN ILSVRC-2013** in `models/bvlc_reference_rcnn_ilsvrc13`: pure Caffe implementation of [R-CNN](https://github.com/rbgirshick/rcnn). (Trained by Ross Girshick @rbgirshick) +- **BVLC GoogleNet** in `models/bvlc_googlenet`: GoogleNet trained on ILSVRC 2012, almost exactly as described in [GoogleNet](http://arxiv.org/abs/1409.4842). (Trained by Sergio Guadarrama @sguada) + -**Community models** made by Caffe users are posted to a publicly editable [wiki page](https://github.com/BVLC/caffe/wiki/Model-Zoo). -These models are subject to conditions of their respective authors such as citation and license. -Thank you for sharing your models! +## Community Models + +The publicly-editable [Caffe Model Zoo wiki](https://github.com/BVLC/caffe/wiki/Model-Zoo) catalogues user-made models. +Refer to the model details for authorship and conditions -- please respect licenses and citations. ## Model info format @@ -46,7 +44,7 @@ A caffe model is distributed as a directory containing: Github Gist is a good format for model info distribution because it can contain multiple files, is versionable, and has in-browser syntax highlighting and markdown rendering. -`scripts/upload_model_to_gist.sh ` uploads non-binary files in the model directory as a Github Gist and prints the Gist ID. If `gist_id` is already part of the `/readme.md` frontmatter, then updates existing Gist. +- `scripts/upload_model_to_gist.sh `: uploads non-binary files in the model directory as a Github Gist and prints the Gist ID. If `gist_id` is already part of the `/readme.md` frontmatter, then updates existing Gist. Try doing `scripts/upload_model_to_gist.sh models/bvlc_alexnet` to test the uploading (don't forget to delete the uploaded gist afterward). @@ -58,13 +56,4 @@ It is up to the user where to host the `.caffemodel` file. We host our BVLC-provided models on our own server. Dropbox also works fine (tip: make sure that `?dl=1` is appended to the end of the URL). -`scripts/download_model_binary.py ` downloads the `.caffemodel` from the URL specified in the `/readme.md` frontmatter and confirms SHA1. - -## BVLC model license - -The Caffe models bundled by the BVLC are released for unrestricted use. - -These models are trained on data from the [ImageNet project](http://www.image-net.org/) and training data includes internet photos that may be subject to copyright. - -Our present understanding as researchers is that there is no restriction placed on the open release of these learned model weights, since none of the original images are distributed in whole or in part. -To the extent that the interpretation arises that weights are derivative works of the original copyright holder and they assert such a copyright, UC Berkeley makes no representations as to what use is allowed other than to consider our present release in the spirit of fair use in the academic mission of the university to disseminate knowledge and tools as broadly as possible without restriction. +- `scripts/download_model_binary.py `: downloads the `.caffemodel` from the URL specified in the `/readme.md` frontmatter and confirms SHA1. diff --git a/docs/tutorial/layers.md b/docs/tutorial/layers.md index 839939f5ad6..34bb48050e8 100644 --- a/docs/tutorial/layers.md +++ b/docs/tutorial/layers.md @@ -453,20 +453,20 @@ The `SLICE` layer is a utility layer that slices an input layer to multiple outp * Sample - layers { - name: "slicer_label" - type: SLICE - bottom: "label" - ## Example of label with a shape N x 3 x 1 x 1 - top: "label1" - top: "label2" - top: "label3" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } - } + layers { + name: "slicer_label" + type: SLICE + bottom: "label" + ## Example of label with a shape N x 3 x 1 x 1 + top: "label1" + top: "label2" + top: "label3" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } + } `slice_dim` indicates the target dimension and can assume only two values: 0 for num or 1 for channel; `slice_point` indicates indexes in the selected dimension (the number of indexes must be equal to the number of top blobs minus one). diff --git a/examples/classification.ipynb b/examples/classification.ipynb index 0babf79f304..6f8fa4252e6 100644 --- a/examples/classification.ipynb +++ b/examples/classification.ipynb @@ -4,7 +4,7 @@ "example_name": "ImageNet classification", "include_in_docs": true, "priority": 1, - "signature": "sha256:a2b12abaa1eb252f436d59833c08ab97948c8a7a0513197f31afad0a0690e318" + "signature": "sha256:918b797b1b7d78125c8f1e3c84756b0679120cbe1071ce7fee7aeafef0fbae55" }, "nbformat": 3, "nbformat_minor": 0, @@ -18,9 +18,9 @@ "Classifying ImageNet: the instant Caffe way\n", "===========================================\n", "\n", - "Caffe has a Python interface, pycaffe, with a `caffe.Net` interface for models. There are both Python and MATLAB interfaces. While this example uses the off-the-shelf Python `caffe.Classifier` interface there is also a MATLAB example at `matlab/caffe/matcaffe_demo.m`.\n", + "Caffe provides a general Python interface for models with `caffe.Net` in `python/caffe/pycaffe.py`, but to make off-the-shelf classification easy we provide a `caffe.Classifier` class and `classify.py` script. Both Python and MATLAB wrappers are provided. However, the Python wrapper has more features so we will describe it here. For MATLAB, refer to `matlab/caffe/matcaffe_demo.m`.\n", "\n", - "Before we begin, you must compile Caffe. You should add the Caffe module to your `PYTHONPATH` although this example includes it automatically. If you haven't yet done so, please refer to the [installation instructions](http://caffe.berkeleyvision.org/installation.html). This example uses our pre-trained CaffeNet model, an ILSVRC12 image classifier. You can download it by running `./scripts/download_model_binary.py models/bvlc_reference_caffenet` or let the first step of this example download it for you.\n", + "Before we begin, you must compile Caffe and install the python wrapper by setting your `PYTHONPATH`. If you haven't yet done so, please refer to the [installation instructions](installation.html). This example uses our pre-trained CaffeNet model, an ILSVRC12 image classifier. You can download it by running `./scripts/download_model_binary.py models/bvlc_reference_caffenet`.\n", "\n", "Ready? Let's start." ] @@ -44,12 +44,7 @@ "# and the image you would like to classify.\n", "MODEL_FILE = '../models/bvlc_reference_caffenet/deploy.prototxt'\n", "PRETRAINED = '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'\n", - "IMAGE_FILE = 'images/cat.jpg'\n", - "\n", - "import os\n", - "if not os.path.isfile(PRETRAINED):\n", - " print(\"Downloading pre-trained CaffeNet model...\")\n", - " !../scripts/download_model_binary.py ../models/bvlc_reference_caffenet" + "IMAGE_FILE = 'images/cat.jpg'" ], "language": "python", "metadata": {}, diff --git a/examples/filter_visualization.ipynb b/examples/filter_visualization.ipynb index 7125907f35e..0bfdb5caf68 100644 --- a/examples/filter_visualization.ipynb +++ b/examples/filter_visualization.ipynb @@ -4,7 +4,7 @@ "example_name": "Filter visualization", "include_in_docs": true, "priority": 2, - "signature": "sha256:64c88129e2eeaa956e4c8a26467ff6119f24ea3d7ef15f8217326249973bea8f" + "signature": "sha256:44536e4f82eb5748b6a3bb6fcfca01bc6c5815dad2641c994dab031f452b7606" }, "nbformat": 3, "nbformat_minor": 0, @@ -24,7 +24,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First, import required modules, set plotting parameters, and run `./scripts/download_model_binary.py models/bvlc_reference_caffenet` to get the pretrained CaffeNet model if it hasn't already been fetched." + "First, import required modules and set plotting parameters" ] }, { @@ -44,12 +44,7 @@ "\n", "plt.rcParams['figure.figsize'] = (10, 10)\n", "plt.rcParams['image.interpolation'] = 'nearest'\n", - "plt.rcParams['image.cmap'] = 'gray'\n", - "\n", - "import os\n", - "if not os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):\n", - " print(\"Downloading pre-trained CaffeNet model...\")\n", - " !../scripts/download_model_binary.py ../models/bvlc_reference_caffenet" + "plt.rcParams['image.cmap'] = 'gray'" ], "language": "python", "metadata": {}, @@ -60,7 +55,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Set Caffe to CPU mode, load the net in the test phase for inference, and configure input preprocessing." + "Run `./scripts/download_model_binary.py models/bvlc_reference_caffenet` to get the pretrained CaffeNet model, load the net, specify test phase and CPU mode, and configure input preprocessing." ] }, { @@ -68,16 +63,12 @@ "collapsed": false, "input": [ "caffe.set_mode_cpu()\n", - "net = caffe.Net(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',\n", - " caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n", - " caffe.TEST)\n", - "\n", + "net = caffe.Classifier(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',\n", + " caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')\n", "# input preprocessing: 'data' is the name of the input blob == net.inputs[0]\n", - "transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n", - "transformer.set_transpose('data', (2,0,1))\n", - "transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel\n", - "transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]\n", - "transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB" + "net.transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # ImageNet mean\n", + "net.transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]\n", + "net.transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB" ], "language": "python", "metadata": {}, @@ -88,36 +79,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Classify the image by reshaping the net for the single input then doing the forward pass." + "Run a classification pass" ] }, { "cell_type": "code", "collapsed": false, "input": [ - "net.blobs['data'].reshape(1,3,227,227)\n", - "net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(caffe_root + 'examples/images/cat.jpg'))\n", - "out = net.forward()\n", - "print(\"Predicted class is #{}.\".format(out['prob'].argmax()))" + "scores = net.predict([caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')])" ], "language": "python", "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "Predicted class is #281.\n" - ] - } - ], + "outputs": [], "prompt_number": 3 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The layer features and their shapes (1 is the batch size, corresponding to the single input image in this example)." + "The layer features and their shapes (10 is the batch size, corresponding to the the ten subcrops used by Krizhevsky et al.)" ] }, { @@ -134,21 +114,21 @@ "output_type": "pyout", "prompt_number": 4, "text": [ - "[('data', (1, 3, 227, 227)),\n", - " ('conv1', (1, 96, 55, 55)),\n", - " ('pool1', (1, 96, 27, 27)),\n", - " ('norm1', (1, 96, 27, 27)),\n", - " ('conv2', (1, 256, 27, 27)),\n", - " ('pool2', (1, 256, 13, 13)),\n", - " ('norm2', (1, 256, 13, 13)),\n", - " ('conv3', (1, 384, 13, 13)),\n", - " ('conv4', (1, 384, 13, 13)),\n", - " ('conv5', (1, 256, 13, 13)),\n", - " ('pool5', (1, 256, 6, 6)),\n", - " ('fc6', (1, 4096)),\n", - " ('fc7', (1, 4096)),\n", - " ('fc8', (1, 1000)),\n", - " ('prob', (1, 1000))]" + "[('data', (10, 3, 227, 227)),\n", + " ('conv1', (10, 96, 55, 55)),\n", + " ('pool1', (10, 96, 27, 27)),\n", + " ('norm1', (10, 96, 27, 27)),\n", + " ('conv2', (10, 256, 27, 27)),\n", + " ('pool2', (10, 256, 13, 13)),\n", + " ('norm2', (10, 256, 13, 13)),\n", + " ('conv3', (10, 384, 13, 13)),\n", + " ('conv4', (10, 384, 13, 13)),\n", + " ('conv5', (10, 256, 13, 13)),\n", + " ('pool5', (10, 256, 6, 6)),\n", + " ('fc6', (10, 4096, 1, 1)),\n", + " ('fc7', (10, 4096, 1, 1)),\n", + " ('fc8', (10, 1000, 1, 1)),\n", + " ('prob', (10, 1000, 1, 1))]" ] } ], @@ -158,7 +138,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The parameters and their shapes. The parameters are `net.params['name'][0]` while biases are `net.params['name'][1]`." + "The parameters and their shapes (each of these layers also has biases which are omitted here)" ] }, { @@ -180,9 +160,9 @@ " ('conv3', (384, 256, 3, 3)),\n", " ('conv4', (384, 192, 3, 3)),\n", " ('conv5', (256, 192, 3, 3)),\n", - " ('fc6', (4096, 9216)),\n", - " ('fc7', (4096, 4096)),\n", - " ('fc8', (1000, 4096))]" + " ('fc6', (1, 1, 4096, 9216)),\n", + " ('fc7', (1, 1, 4096, 4096)),\n", + " ('fc8', (1, 1, 1000, 4096))]" ] } ], @@ -200,7 +180,7 @@ "collapsed": false, "input": [ "# take an array of shape (n, height, width) or (n, height, width, channels)\n", - "# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)\n", + "# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)\n", "def vis_square(data, padsize=1, padval=0):\n", " data -= data.min()\n", " data /= data.max()\n", @@ -232,7 +212,8 @@ "cell_type": "code", "collapsed": false, "input": [ - "plt.imshow(transformer.deprocess('data', net.blobs['data'].data[0]))" + "# index four is the center crop\n", + "plt.imshow(net.transformer.deprocess('data', net.blobs['data'].data[4]))" ], "language": "python", "metadata": {}, @@ -288,7 +269,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv1'].data[0, :36]\n", + "feat = net.blobs['conv1'].data[4, :36]\n", "vis_square(feat, padval=1)" ], "language": "python", @@ -346,7 +327,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv2'].data[0, :36]\n", + "feat = net.blobs['conv2'].data[4, :36]\n", "vis_square(feat, padval=1)" ], "language": "python", @@ -374,7 +355,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv3'].data[0]\n", + "feat = net.blobs['conv3'].data[4]\n", "vis_square(feat, padval=0.5)" ], "language": "python", @@ -402,7 +383,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv4'].data[0]\n", + "feat = net.blobs['conv4'].data[4]\n", "vis_square(feat, padval=0.5)" ], "language": "python", @@ -430,7 +411,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv5'].data[0]\n", + "feat = net.blobs['conv5'].data[4]\n", "vis_square(feat, padval=0.5)" ], "language": "python", @@ -458,7 +439,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['pool5'].data[0]\n", + "feat = net.blobs['pool5'].data[4]\n", "vis_square(feat, padval=1)" ], "language": "python", @@ -488,7 +469,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['fc6'].data[0]\n", + "feat = net.blobs['fc6'].data[4]\n", "plt.subplot(2, 1, 1)\n", "plt.plot(feat.flat)\n", "plt.subplot(2, 1, 2)\n", @@ -519,7 +500,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['fc7'].data[0]\n", + "feat = net.blobs['fc7'].data[4]\n", "plt.subplot(2, 1, 1)\n", "plt.plot(feat.flat)\n", "plt.subplot(2, 1, 2)\n", @@ -550,7 +531,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['prob'].data[0]\n", + "feat = net.blobs['prob'].data[4]\n", "plt.plot(feat.flat)" ], "language": "python", @@ -595,7 +576,7 @@ " labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\\t')\n", "\n", "# sort top k predictions from softmax output\n", - "top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]\n", + "top_k = net.blobs['prob'].data[4].flatten().argsort()[-1:-6:-1]\n", "print labels[top_k]" ], "language": "python", diff --git a/examples/hdf5_classification.ipynb b/examples/hdf5_classification.ipynb index 19d27372754..51d854fa142 100644 --- a/examples/hdf5_classification.ipynb +++ b/examples/hdf5_classification.ipynb @@ -4,7 +4,7 @@ "example_name": "Off-the-shelf SGD for classification", "include_in_docs": true, "priority": 4, - "signature": "sha256:741422697d76b1667287180dc7c6360cf105ee774b1e2def800dc8fe80f78f67" + "signature": "sha256:c3b84add3bb83e91137f396a48f46d46bf7921b242fc42c58390b30806e5a028" }, "nbformat": 3, "nbformat_minor": 0, @@ -15,48 +15,32 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Caffeinated Logistic Regression of HDF5 Data\n", + "# Classification with HDF5 data\n", "\n", - "While Caffe is made for deep networks it can likewise represent \"shallow\" models like logistic regression for classification. We'll do simple logistic regression on synthetic data that we'll generate and save to HDF5 to feed vectors to Caffe. Once that model is done, we'll add layers to improve accuracy. That's what Caffe is about: define a model, experiment, and then deploy." + "In this example we'll use Caffe to do simple logistic regression on a simple binary dataset, showcasing HDF5DataLayer functionality." ] }, { "cell_type": "code", "collapsed": false, "input": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib inline\n", - "\n", - "# Make sure that caffe is on the python path:\n", - "caffe_root = '../' # this file is expected to be in {caffe_root}/examples\n", - "import sys\n", - "sys.path.insert(0, caffe_root + 'python')\n", - "\n", - "import caffe\n", - "\n", "import os\n", "import h5py\n", "import shutil\n", - "import tempfile\n", - "\n", - "# You may need to 'pip install scikit-learn'\n", "import sklearn\n", + "import tempfile\n", + "import numpy as np\n", + "import pandas as pd\n", "import sklearn.datasets\n", - "import sklearn.linear_model" + "import sklearn.linear_model\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" ], "language": "python", "metadata": {}, "outputs": [], "prompt_number": 1 }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Synthesize a dataset of 10,000 4-vectors for binary classification with 2 informative features and 2 noise features." - ] - }, { "cell_type": "code", "collapsed": false, @@ -67,8 +51,17 @@ ")\n", "\n", "# Split into train and test\n", - "X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)\n", - "\n", + "X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 2 + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ "# Visualize sample of the data\n", "ind = np.random.permutation(X.shape[0])[:1000]\n", "df = pd.DataFrame(X[ind])\n", @@ -80,20 +73,13 @@ { "metadata": {}, "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAImCAYAAACB54oCAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXd4HPd57/uZme2LbcCid5AgCPZeVCja6qIKrViWbcmJ\njhLLshM7Tuyce+95ro99zuPclOPYThxbyoks+cgy1TslFlFi7yBAggQBEHXRge29zOzM/QMURIqU\nREkESUv7eR4+WM7sb/admd2Z77y/twiappEjR44cOXLkyHG5EC+3ATly5MiRI0eOzzc5MZIjR44c\nOXLkuKzkxEiOHDly5MiR47KSEyM5cuTIkSNHjstKTozkyJEjR44cOS4rOTGSI0eOHDly5LisTKsY\nEQThF4Ig7BIE4ZfvW24SBOG3giC8LQjCv06nDTly5MiRI0eOK5tpEyOCICwBrJqmrQEMgiAsO2P1\n94A/aJp2vaZpfz1dNuTIkSNHjhw5rnym0zOyEth6+vU2YPUZ664D7hQEYbsgCHdMow05cuTIkSNH\njiuc6RQjTiB6+nX49P/fZQawEVgH/EgQBGka7ciRI0eOHDlyXMHopnHbYcB++rUDCL1v3U5N02RB\nELqBYmDkzMGCIOTq1OfIkSNHjhyfITRNE863fDrFyH7gW8DzwPXAE2es2wcsFAShBagBJs63gVzf\nnM8XgiBc9nO+ceNm9u8fpapq/pQ9Hk8r11xTwW233XTZ7Orp6WHfviP4fGFmzCjnqqtW4Ha7L5s9\nF5Mr4bxfarLZLEePHuPQoeMoSpaFC2exfPlSzGbzR47ds2cfGzcep6ZmMaIooWkaw8OdNDQYue++\nL18C6y8On8fz/knQNI329nb27z9KNJpg9uwaVq9ejsPhuKDxiqLwr//6GOl0OW53GQCZTIrh4SP8\nxV+sY8aMGdNp/lkIwnl1CDCN0zSaprUAKUEQdgGKpmlNgiD82+nV/wT8PbAH+E9N05TpsiNHjgsl\nlUpx6FA7FRVzpn40giBQUTGHgwfbyGQyl8Wuw4eP8NhjGxkasiCKs2hpifGb32xgbGzsstiT49Oh\naRovvPAazz9/mHi8FEWpYcuWbh5//GnS6fRHjt216whlZXMQxcnZbUEQKC9v4OTJIUKh0IeOz/HH\nxzvv7OTJJ9/B53MhCPXs2zfBI488dcHnur+/n0BAmBIiAAaDCZutlv37W6bL7I/NtKb2apr2fU3T\n1rybMaNp2vdO/x3TNO1mTdNWa5r2xIdvJUeOS0MqlULTdCSTKUZHRwkEAqiqhk6nR1UlUqnUJbcp\nnU6zadMeysqWUlBQitmcR2npDESxim3bdl9ye3J8egYHBzl6dIja2qXY7QVYrQ6qq+czMqJx4kTb\n1PsmPR7DtLe3TwnPbDZLIpHGZLKctU1BEJAkE4lE4pLuS47pJRwOs337Uaqrl+NyFWE251FR0UAy\nWcDevQc/dKyqqng8Hk6cOMH5Ll1mcx6BQGSaLP/4TOc0TY4cf1RYLBYG+zs4fmAAh8GMomlIdjsN\n8+dit+vJy8u75DZNTEwgy0aMxrPd9253OR0dO9A07UNdnzmuPAYGhtDpCs45bzZbCe3tvSxduoR4\nPM4rzzxDsLcXqygSVVVKGxu58557qKgoIhicwOUqmhory2kkKUVBQcGl3p0c08ikCLUjSWffqgsK\nyjl5so11684/zu/389Lvf4/i9ZJOJGg95CGT0jN3/gJEcfJ7FwqNs2pV5TTvwYWTEyM5cpzmSFMT\nlboUmpjAaarBYrQx5B9h3642/vFnP0QUL33BYqPRiKbJ5yyX5TRmsyknRP4IsVhMqOq5U36ZTAqb\nbdLjsfWNN8DjYXV19dT64+3t7Hz7bW6++Voee+xVVDWL01lIIhHF6+3gjjtWYjQaL9l+5Jh+DAYD\nmna+70oSq9V03jGapvHyH/5AUTxOxenvTyKWZmfzNgxGPTPrZzExMYheP8GqVTdPq/0fh1w5+Bw5\nTtOyZw83LFrAnVfXYjEN4Y8cobIwwpIZFoqKCi+LTUVFRVRW2pmY8Ewt0zSNkZEOrrlm0WWxKcen\no76+Hr0+TDz+notcljOkUsMsXjyPWCxGf2srs8rKzho3u7yctkOHqKio4KGHvkRxcZTx8b0YDB7u\nu+86rrpq1aXelRzTTFVVFS6XRjD4Xo6Hqmbx+bq56qrF5x0zPDxMZnycisL3rlnXL5nPumWFeLo3\nMzGxjzlz9Dz88NdxuVzTvg8XSs4zkiPHaRKxGGa7nZqSEmpKSqaWNw0OXpZ4kXf5ylfu5Pe/fwGP\nZxxBMKNpERYtqvjQm48sywwNDaGqKhUVFbkn5iuARCLByMgIOp2Or371Zp57bit+vwWQEIQQd9yx\nmqqqKvx+P3pBOMcTp9fpEBQFWZaprq7mwQerz/9BOT4zSJLE/fd/iSeffAmPZwBBMKJpIa69tpEF\nC+af8/5EIkFnZyeRcJisqiKd/g7pJIlls2dBcSHf+3//5lLvxgWREyM5cpympqGBkb4+Kovem4tP\nyzJJSaLojGWXGpfLxV/+5YMMDAwQj8cpLCykuLj4A9/f29vLG08/jSGZRACSej3X33038+afe/HK\ncWk4eOAA+958E5uqogCq3c599/0JiqKQzWaprKzEZrMB4HQ6EfLyiCYS2CzvBar6wmHsJSUXlP6b\n47NDSUkJf/u3D9Hf3086naakpOS8sUGHDx5k75tvYkil6Dx6lNDwMGtWraL4tPdjxOejdt68S23+\nBSNcqXnegiBoV6ptOaaHy113YGxsjGcffZRyUaTY6SSWTNIVDLLirrtYtXqym4Hf78fv92Oz2Sgt\nLf3Q7cmyzMDAANlsloqKCiwWy4e+/2IQiUR44uc/Z77DgcNqBSCeStEyMcFXv/vdDxUxl4vLfd6n\ni3g8zsjICGNjYzRv3MjyqiqMej0wKSy6ZJlv/uAH5/VaHTxwgFcff5w6h4P6igpCsRh9iQR3PPjg\nJa0LMZ38MZz3aDTKyMgIBoOByspKdLor8/nd4/HwyqOPsqy8HJPBQEd7Oy0HDuDT6bhn3Tri6TRj\nosjXHn6YwsLLM+UMU+f8khc9y5Hjj4qSkhK+9p3vcGjvXtq6u7EVFHDz+vU0NDQgyzKbXnuN3uZm\n7KJIXFXJr6tj/Ve/ivX0Tf9M+vr62LBhI8mkERCRpBi3334ty5cvndZ96Ghvx6koU0IEwGoyUaLT\n0XbsGMU3Xb7CbZ8nzvSEtLS14UgkSObnYzz9lOp2OBj0eOjt7aWxsXFqnKZpbN++i3feaSFiaODN\nzh44uY/b7/gC9zzwAOXl5Zdrlz537N69l61bD6FpdjRNxuFQue++u67Ic3D00CGqrVZMBgOhYJBR\njwe7Xs/gyAiPvPIKN9x9N9/48z+/orOtcmIkR44zKCoq4vYvfemc5Xt37WKiqYlramqmMlg6BwbY\n9MorfPm++856bygU4uc/fxxJrMVdWEpBQQGpVIKXXtpNaWkxFRUV02Z/IhbDrNMRDocZ8nhIxmI4\nCgqQrFai4fC0fe7lYHx8nKNNTQQnJiitqWHRkiUXXJXy/cjyZMaS/rTn4tPQ19fHgVdfZWVFBUa9\nngmPB3smQ8v+/Vx7443oTn+GEUgmk2eN7ejoYOvW41RXr6aqSseCBWsJBMbwRQYu2KsVjUbp7u5G\nURQqKyspOSP+KceF0d3dzZtvNlNZuRqdbvJ8hcM+nnzyZX7wg4cwGAyX2cKziYfDuE0mEvE4W15/\nHcJhDJJERUEBdatWkQ0GiUajOTGS46PZuRP+4R8gEIA774Qf/hBM58/cynGJUVWVY/v2say8/KxU\n2lllZew+eZJwODx1EwwEAvzLT3/KqcNe8oQgh/x+otksFeXlSCZ45eWN/NV3H542W8urqtgzMkL/\n0BBOScJsMBDw+Tgei/GVtWun7XMvNadOnWLT739PqU6Hy2JhqK+P4/v2ce9DD30sN7Tf72fHli30\nnTyJIAjMXLCAtTfddMGiZrIIWQKz2Tzlwj966BA1eXlTUzJFRUX4fD4Uv589u3ZRVFREaXk5IThH\nYOzffxSXa8ZZdSXy80sYGBjC4/F85BTNybY2tj77LM5sFgnYr2k0XH01N912Wy4N/GNw4MBRbLaa\nKSEC4HC48XgG6O3tZfbs2ZfRunPJ6nQ8/eyzBHp7MUSjVFksaAYDndksCZuNa1au5Njhw9TU1Hys\n7cqyTDqdxmq1Tvv3JydGrgD+8IdJ8fGzn0F1NfzLv8B118HmzXAFZV59blEUBSWdxvS+pyFBEDCK\nIqlUaurmtenll0kODOEdjjCRCpJJxYln9ZzwpXG59Gz43/vQ63Vcf8Na6urqLnrtkqqqKnp9Pqoy\nGUrz8xEFgVgqhc5iwTsy8tEb+CMgm82y7eWXmZ+fPzUd5XY4GBgfZ+fWred4qj6IeDzOM489Rkkm\nw3UVFWhAX1sbzw0P82ff+c6HPv1qmkZTUzPbtu0nkchiMAisWbOYa6+9mlgoRMkZcSB15eXs3L4d\naWKCOqB/dJQNG99EqarD0TCHW265cSpAOhyOYTKVnecTjR+Z0RWNRtn63HMscbuxnn6Syaoqh/fs\noWbmTBoaGi7ouHzeSSQSnDjRjt9fQCKhUFJScsZ3wXBZM+veTzKZ5OWXX+XRf/ol8dEBChMxaiZX\nMKJplDgcWCYm6B8Zoaa29oK3K8syb7+9kwMHjqMoAi6XiVtuWcPcuXOma1dydUYuN8ePw/e/D9u2\nwX33wTXXwEsvwapVcNddcJnaoeQ4A4PBgLu8nIlg8KzliVQKxWAgPz8fmJyeOX7oMC1tw0TiKSZi\nBsYzxaiUgmLHOxEn6IXnn9zI44+/xYYNL6IoF7ctk9/vZ1FDA5WLFtGpqpxUFByzZvHlm29moLMT\nVVUv6uddDnw+H1osdlZcDEBFYSGejo4LPqZtx49jjUSoKS5GFEUkUWRmWRmC10tnZ+eHjm1pOcqL\nL+7DYplPZeU1uFxL2bSpje3bd1Hd0MDYGX1Dgl4vc1wu5JISjqfSbOgYplueSV9/Bb/5zSG+/e0f\n09HRAUBjYy1+/9miUVWzaFroI6dbent7ccjylBABkESRKpuNE0eOXNAx+bzj9Xr51a+eYGgowalT\nIzQ3D7J9+35isRiqqqJp4Y8MXL9UhMNhfv3r3/Hvv3gFS7qatFDLuGCkTRAYEkVUWWaR241DUeg4\ndYrqWbMueNuvvbaJXbs8FBauoqrqWgShnqeeeouenp5p25+cGLmMqCo89BD8/d/D3LnvLRcE+MUv\nwOGAv/u7y2dfjve47tZb6YxGGfJ6ScsyE8EgzSMjXHPrrej1ejRNY3R0lKbj/Vi1SkrydCSzXsyS\nnYyqI5IexyjFqSlcTiAQobx8EW1tQVpbj19UOyVJQpAkFs2axZduuom7b76ZZY2N6HU6JJ3uM+Gq\nlySJ7HmyMLKqiiCKF+xtGhsaouA8wcdOgwHv6OgHjtM0jW3bDlBaugCzebJFgMFgorJyIbt3H6Vx\n7lxiDgenhoZIptP0dHcTUVXmzJtHIGvH5voitSXXUGirxO1eQDBYziOPPIWiKKxatQyzOcDwcBep\nVIJIJEB/fxOrV8/+yPl+RVGQznN+9TodmSvoaf5K5rXXtqIoFaxYcStut4YoyqRSBg4fPkR//xGW\nLau7YjLStm/fQyTiJB0WqHAVY9XbydcX4pLMlBsMFIkiyUSCQCqFbDKxaMmSC9puMBikubmX6uqF\n6PWTHqG8PCdO5yzeeWf/tO1PToxcRl55BWQZ/uIvzl0nivD730++Z8uWS29bjrOpqanhyw8/jFxT\nQ3M4jD8/n1sffJAly5YxPDzME7/+Nc/++78THxllPNRHgdWOWYwj0otGD4IQpsxSTkaBhKyRzSrk\n51fT1HTiotpZVFSEpbiYUb//rOVdo6PMX7nyMyFGCgoKsJeXM+T1nrW8a2SEOcuXX7AYyS8qInKe\nm3RMlhF0Onp7e/G/7zjCZEPFSCQ1JUTeRa83kM1OXry//tBDFKxaxdF4nD5RpHTuXAS9nkGPB12k\nh/DoDiKh4yhKGqezgr6+MOPj4zidTh5++OusWOEilTqB2TzMvfdexW23fXQWVGVlJQFNQ8lmz1o+\nFAoxa8GCCzomn2ei0Sh9fV7c7nJMJitr1txEXZ0Zo3GUQKCV22+fz1133fqR28lmswwODtLX1/eR\nXZg/DS0tHZSW1qEzGhgJDaBmxggoUeKKSlQQGNc0BrNZJux2/vRv/uaCe2uFQiFEMe+ca4Xdns/w\n8MQHjPr0TGvMiCAIvwCWAs2apn3/jOU/AdYDQeA1TdN+MZ12XIloGvz0p/DjH08Kj/PhdMLjj8MD\nD0Bray5+5HJTUVFxTjxCJBLhxccfZ6bBQEVFBdGaOk51DTM40YXJlIeolIKogeBjOK2gjAfQ2XTs\n3LmJOXPmYbdPzvsePdpKZ2c/NpuFpUvnf+xAs3cRBIHbv/IVXvzd7xjzeCCZpGfCi+zMZ15hIbIs\nX5SMkcuJIAis+/KXeeF3v2PC48EiCEQ1DXNlJWu++MUL3s68BQto3rEDXziM+3TMz8D4OLt6BnFn\njmA09qJpcebPr2T9+nVT9UCMRiN5eXqSydhZgiSbVRDFDFarFZ/Ph6yJCDYXlvIZ7Dl2FDEwykxB\nxG20o2owkgoQ97dgNl+NpmlTIsrlcrFu3c0f2ATtgygqKmL+2rUc3r6dyrw8DDodw+EwhpqaXMG7\nC+Ddmifv3oTN5jzmzVvJ3Lkag4O7Wbx4EZIknTMulUrR2nqckyd7SaVi9PWNYDAUAiKCEGXu3CrM\n5jwKChzMmzf3E2d8vR9JElHVLK4iC4lmD0utTiY0gYnkEGoigc5iQSwqYvX69dzwMVL67XY7mhY/\npwlnLBaipMR9UWw/H9NW9EwQhCXAw5qmPSQIwm+AxzVNazq97sfAHk3T3v6Q8Z/pomfbt8Nf/dVk\nzMhHPch973uTWTZPPXVpbLtc/DEUQXo/2956i70bNlDicKCXJJr3HCQZUeic8DIgOlCyZWQyetJy\nCpujDqMlTsPsuVit+Xi9u/jRj75BW1sfXq8eh6OEdDpJIjHInXeuYPXqT95rJJ1Os3nzFl595R2s\neTW43ZWk035qakz86Z9+BdMVlKr1Sc97JpOhu7ubSDhMYVERNTU1571ZfBiDg4NsfvFFUn4/mqbR\nH4iS1c9kzpzVU3Z5PMeZMUNkZl0Vkk5H/axZ9PcP8PLLh6ioWITBYEKWMwwNHWft2hmAxjvvnODU\nqSBe72Q33cDYUWqiY2RTcXRiJTpjPo6CAkbUGBFbNatX5/Gzn/3kUwc0a5pGd3c3J5qbkVMpymfO\nRE6l6G1rQ2cwsGDlSuYvWPCxj9N0cCX+3v/jP57E73fhdr8XROz1DlFaGufBB79+zvuTySSPP/40\nw8NgtRawffsWVNXN4sVzqKoqZceOTUxMhLj66qswGkV0Oj8PPLCe6upPV8o/nU7z639/hF2bDzPu\n6cAcSaClslgEiaziRbTqEGfN4r//7GcsX778Y3tEn332JVpbI1RUNCJJOpLJGGNjx/izP7vpU2US\nXa6iZyuBradfbwNWA01nrP8nQRCCwA81TTs2jXZckTz2GHzrWx8tRAD+8R9hwQLYuBFuv336bcvx\nHqqq0tfXR++pU+gNBmbPnTsVSDg4OMjLv/0tzvFxgpJEZ38/WUmiVDSh2gzIogb5KpGYH1EqJZ0e\no6ioHoPBSSIRxuFw09vbh89norr6vSh1WS5h06b9zJ8/74Jdq+9H0zSOHu1j0eK7MZnejYuoweM5\nzqFDTaxZc82nPTSXHYPBwJw5ny66v7Kykr/467/G7/cjyzKPPvoMxcVnX7xjwQCbn9jEXWtWIOp0\nNG3axIp167jjjiW8885hMhkRUZT54hcXMmvWDB599FUMhmpSKY2ammo0TSU43k9lQxnh0VPE42Ek\nvYF0WiAdH8NWJvK97/2vi5JZJQgC9fX11NbW0tbWxu8feQRjLMay2bOxmkwcePZZPD093Pknf/KZ\nmLK72KxffzOPPfY8g4NBTCYHyWQIqzXG7bffc973Nze3MDIiUlMzn8HBUxgMFbhcs2hv7yUYHECW\niygoqCORgJkzG4lE/Dz//Jv87d9+6xOfb1mWefb//B8so0MIvk6SQ6ewixI6yYBiMrDomtXcddNN\nHB4fp7Gx8ROd5/Xr12EybaOpaR+gw2IRuPfe66Y1pXk6xYgT6D39OgycEaLJv2ma9j8EQZgJPA6s\nmUY7rjgCAXjjDfjVry7s/RYL/Od/wp/+KVx77WRga47pJ5vN8toLLzBy9CjFJhNyNsvRt99m9Z13\nsnzFCjY++yyL3G5SkQhpv58lNhtjioKY78Qty9xcUYlYWUFJ/QLGxiyYTA7GxrzIskJR0SzS6UJO\nnmyjru7seehEIsLAgI8339zEDTdcP5Wt83EYHBxElq1nCJFJCgtraGpq+0yIkYuFIAi43W6i0SjZ\nrHhWbQmvd4hk33Ea7PlUFxVhsViolWUOvvkm933/+6xYsYxYLIbFYsFoNLJ581sEgzAychJZNqNp\nKoIgYrbW4E300jhjEeVlEql0inA4Rr6umu/+fz/9xNNy5yOdTvPCU0/RtXs3pt5enDYbB/bvZ+my\nZSyrreVASwvDq1ZNa/G9P1aKi4v5679+gOPH2xgb81Fa2si8eXM+8KHg2LEuCgoqAUinkwiCCUEQ\n0TQT3d1tVFRcTyqVJB6fLG5ntxcwMNDF+Pj4J87K6ejoINXXx/DQEMmxYRboRVySxJiaobC0HGSZ\n8VAIyWL5xC0ojEYjd921jptu+iKpVAq73T7t3rTpFCNhwH76tQOYynXTNC14+m/3h6m2n/zkJ1Ov\n165dy9rPSNGmDRvgttvg49xjvvCFyTH/9b/Cf/zH9Nn2WUdVVcLhMEaj8SN/qO3t7Yy1tLCytnbq\n6aJGltm/cSN5NhtaMMjchgb2DA7i8/koM5uxKwqHhoaYvXgxt163huaxMSoqChke9pGfX0d+/mRG\nRDA4wcDASaxWlXR6Mhhy0puxh8OHD+H1Bmlv7+PVV3fxrW/d+wnFw/ld4Lkn4kk0TWNkZIQBjwe9\nXs+MmTNxOAzE42Gs1knFPzF0CpcoYjZJU1NbBr2eQlGkp6uLVVddNdWGfWJighdffJOODgPJpJVg\ncJhgMEB9/XzMtjzaByYY8vqoT7hZs2w29opyZjU0TNX/8Pv9DA0Nodfrqa2t/cQN8ZqPHCHZ3U2p\nIGByu7FZLJTKMkeamykvLMQlCAwNDubEyAeQl5fH6tUrz7sum83S398/Vc3UYNARi01W73U63ahq\n/+l3qoji5O8slYpSX3/xMnB6OzpoP3qcw4cOszCrYZJMqJkELr2eIx4vgl+mOfAWf/433/7Un2U2\nmy9ZY8bpFCP7gW8BzwPXA0+8u0IQBJumaVFBENwfZsOZYuSzxPPPf7KU3X/+Z5g/fzLe5AtfuPh2\nfdZpazvJG2/sJBpVEASFBQtmsG7djR/4Y2tvaaHK6Tzr5m3Q63FpGh6PZ7LomdFIdUMDzQcOEA6H\n0QQBnyRRarOhE0VkTWPJkkV0dLzK8HAXBQVlNDXtoru7j4KCQtxuC5s3P81tt91HPB5m+/YdCEID\ndvtsqqvr8Pk8/OM//icNDfUYjUba2zs4deoUExMRzGYr8+fPZNmyJef0x6mqqsJgSJwTZDkx0cdt\nt83l846qqrz52mv0HTpEviShaBq7RZGGpcvYf+A4Tucs7PZ84tEQanycq5ctO8etnj2jZoumaTz/\n/BsYjTWEwzvx+zUikQzBoItAYBQYobj4OhIRLz7VxtP7R7lm7Uz+n3vvPZ0qvJ2dO1vRNCeQRdNe\nZ8mSmRQUuKmpqaK6uvqC3fonm5qoLSxkIBhEPp1ZY9brsakqY4EAGU3DlOv8e15isRgdHZ1EozEq\nKsqoq6ub8ggEg0GefPIFJiZUBMGMpkUxGuNEIj7s9gLy80spLjYzNHQMg0FHXV0t3d3HKCoqoKJi\nsp9NJOLH6RQ/VXrwkZajNB85RpGsYEJDp4p0KiLdSScGQzFRxUTAWMS///p1jrX2cv/9X5rWYmUX\ni2kTI5qmtQiCkBIEYRfQomlakyAI/6Zp2veA/yUIwjwmU4v/r+my4UpkbGwyM+bGGz/+WIcDHnlk\nMhW4tRXOUyIhx3kYHR3llWef5YUX9+B2z6N+zgLKKso5dqyLWOwVHnjgax97m4qi0NTZS//hVmKj\ngzRUViIlk8iCQLHbTXp0lP2trZSuXk1ZWRnf/ObX2bVrH6+88hIDA2nmzFmBpmnodDpsNpWtW39P\nNisQi9mw2UyUlZWg11soLGykr2+MJ598imhUoqtrjIGBKAZDPhUVBYyM9NLc3M5DD913liAxGo3c\nc89NbNiwBShErzeRSvmprTWzfPmyi3h0r3wSiQTBYJC8vLypTIaOjg48Bw6wqqZm6iYfT6VoaWnm\na1+7m4MHWxkZ6aC6wYGUV4HL6aCnpxefz0844OOEz8cSoxG7w8G8efPw+Xy0t3vo6YkRChlJp63o\ndBKp1ACJxDA2WzX19TZuuGEVRUXFhMMhxsebCJ7uGfL2221UV69GknSMjY2xZ88ge/Zs5JprvkAm\n04IoBnE6CzCZjKxcOZ8VK5Z/aFaUIAiUV1VxzOPBkc1O3lA1jVgyScRkor6+/pIc+09KMBjk5Ml2\nEok0tbWV01Kt+P14PB5+97tXyGQc6HRmFKWdurpD3H//l4nFYvz4x/9MR4eG213OjBlFVFbOYWDg\nBGbzEAMD+xHFfIqLC4A2CgsLMJkEdLokdruJYHAMv38MSfLy7W9//RPvSzwep3X/YWQkNEXFp2TQ\nayKerAOrVIMm6YlmNWYWz8ftdtPXN8xTT73FQw+Zqf0YFVgvB9Oa2ntmOu/p/3/v9N/pa85xhfPy\ny7BuHZyna/gFsW4dPP00/OhH8POfX1zbPouMjo7y3KOP0nuim4aC2Rh1JnqPHCERjzN7zhy6uvYx\nOjp63vnb2YsWsbetjWKXa8o7kpFl+sJhju04jqNyDZ2HN2OJZNB0EBFkopLIYoOBkUyGVCzGA+vX\nA+BwOLjZkGjqAAAgAElEQVTjjls5caKHWCzB4V0HMGiTxbtUg0BZjRFFGQNKEEUTw8MBvN4ANTWV\nCIKNbdsOsnbtNwiHx6iuXosoSni9/dTUOPD7IzQ1NXPdddeeZX9jYyPf/34RbW3tRKNx6urmUF9f\nf8W2Qb/YqKrKznfe4eiuXViARDZLzcKF3HbXXZxoaqLG6TzrpmA1mbCdLnn8zW/eD0y65X/zy1/y\nmxffIC9rxjs6RkRLUTWrijl6PTv/8Aeit9/OjPp6urs9yPIs7HYnBQV5JJNhUqk8gsFxamsX0tBQ\nhcViZceOfaRSIpGIl3/4h19RXV2G3V6DJOmQZZmmpjZcrgaiUT0g0t+vMjoaZvXqSioqZrJx40l6\ne4e4//57zjvl1rh0KR1vvMHCmhqq58+n98QJJEXhVDKJSa9n/X33nbfT9JVCW9tJnnlmK4JQiCga\n2L69g9mz8/na1+6etrR0RVF4+unXycubi832Xg2Fnp5WtmzZxoEDrbS2higr+wLZbJYjR/oIh6M0\nNjbg84V46KEvMTIyislkZMaMr0/FmKTTafbs2cOGDa+TzeopLCzn6aff5M4717Jo0cKPbWd/fz/x\nkRHSqRSDchJ3VkXWDKQFKxICASWLpWQeoVCCsjI7kcgp7PaV7Nx58PMtRnKcywsvwHe/++m28ctf\nTk7XfOUrk2Xjc3ww+3fsoEqno1vTkWeyo9cZqDYY6Dl1ipq6OkQxj0gkMiVGotEoXV1dJBIpystL\nKViwgLf27MElSRjNZiI6HXGTg5KipTidhYiihCf9IjEE9EKcr990HYgisqKQrajA4/GwZ08L0Wic\nxsYa2traaT8cptZZhiSKpFMJQrEwxw61U1RpQhCcZLMJNC1DNmugu7sPQRimrKyCZDIOOBHFSbex\n1eqmv3+ExYsbOHGi5xwxApMFwv5YglUvpHOuoih0dXXR3t6D2WxkwYI5H9jS/fChQ3Rs28ZV1dXo\nJAlVVWk/fpwtooiSyaA7IyAvkUrhDYfxh0JnFarKZDL4IiJ11z1Ay97tmMpczCmuJZb0MeIPUmaz\nsun55/nmD35AJpNBUQRAhywLSJIDi8VCNltAXp6FWCzB/v1HSaetKIpKLBbj5Mk0b799gFWr1uN0\nFuH3+1EUI3q9EdAzPDxCImGmtHQhw8ODzJq1mNraxXR0HMTj8Zw38HXpsmX0trfT1NtLkc1G8fz5\ndIVCfHndOubPn4+iKIRCIZxO5yc6T9NJMpnkhRe2Uli45Izg6zra25tpaTnKihXLP9F2NU1jaGiI\nRCKB2+0+p5rt8PAwsZiOqqqzizmVlMzgpZdewGyuRa+3I4o6dDoDBkMVvb29VFaWMTw8Sk9PL2Vl\npcyYMeOcQM+DB9uZNetmXK7J/kOpVILnntuB213wgXE7fr+f1tYT+P1hamrKmTt3DmazmY6ODtLe\nUaolM1FLPhOJGGYtj4gq41dVdAUzKS+aQyIxTiTixWo1Y7O5GB3t/8hjFIvFOHDgMMeOnUKv17Fq\n1QKWLFn8gQ8v0WiU4eFh9Ho9VVVVn1oo5sTIJSQQgMOH4eabP9123G7413+FP/9zaG7+5F6WzwMD\n3d2sKiigyDnKWCCKM68AURAxCQLRaBRVjeJ0Ounv7+e5555n8+bDWK01p2MukkR9HVRY9XT7/aRE\nkWU33shETxCnc7IzrNtdSXNKoEwxkZEjtBw4xuLFjaQlkYSs8dRTu3C7Z2IyWWhqGqGl+RjZuJ3h\njEIsEkZFh8lkIxP14e9OEk33EVKL0UkFaBKoYozGuTYkqZCmA2/i8YRRVTP5+WXIsoLP56OnR6S+\nfvJG/e6FQ1VVTpw4wdH9+0knEsyYN4/lq1Zhs9ku5+n4QF557jl6jk+Wxq+bN4+1N900FRj6LrIs\n84c/vEBnZ5i8vFIUJcTu3S9wxx0rz6nJomkah3fsYEFZGbKi0D00RCKZxOVw0N3SwuLrr6dn2zbc\nDgfHenrZd2IYVbUxGAkQfX07paWlVFRUMDAwQDZro6qqkc6jpygrkIjEAwyOj/KbDVuZW+QCUeO3\ngkBxcR6trf0MDiro9SWIIghCApfLTDzeQyikcrRliExSIBrrQc0O48qrweRysnfvERRFT03NpLCa\n7EcTIJNxYzLZEQTQtPdiVETRydDQCG63G0VRsNlsCIKAKIoYjUa++sADdHV14enqIt9mY011NXve\neos3Dh7ELIpEVJXG1au58dZbr4iaI+/i8XjIZPLOyQJzu2s5fLjtE4mRcDjMU0+9xMjIZLaLpkVY\nsaKe22+/ZWrfJ3s2netlSiZTHD58nNLSfIaGBhkfN1NVVUV+fj6ZjMhrr72Ky6XwzjujRKPN6PVh\n1q37InPnzsHtdtPd3U08bqK6umhqmyaTBZOpgqamYxQXFyNJ0lkeuu7ubp58ciOq6sZisdPScoyd\nOw9TWGjhp//tv5EIBggCAgITgCSYSAgKQp4Nd0HN6WwemURigEWLFhCJ+Kms/PAYlUQiwWOPPY3f\nb8btno2iKLz0Ugu9vQPce+/d53jg9uzaxeGtW3EAMiDn5bH+/vs/VVB0ToxcQt55Z7IR3sWIHbvn\nnsnqrE88AQ9/bie9Ppo8u514KsWSWVW8uLMdvc6A1WQjnVWYmOhm0aICXn11I08/vYmurggm0xIk\nSSQQ8GGUojg1mYa5k9VVjQYD3tZW/EkDpaUpDAYT7e29ZCyNDIVOocYjBAcV3mprpby+mrzSeSxf\nee+US7y8fCZaxkU00ooolCFlzWgk8KU6yc8GQBUQ1SIEyYGs6lElPWkthX+sD+vgSarNLlLBEAPe\nPvrzF6FlrdQWCRzt28upAxFamlq48ZYbuOmm62g+dIi+vXupy8/HZDAwtGsXTx07xv3f+tYVKUjS\nJ09y7WkPR39HB88MDvLAX/7lWcHFra3H6eyMUVv73g1Jlit4880DNDbOPutJX1EUUtEoMUFg1759\nOGQZsygynM0yKIrc9o1vMFRby5bmZlraQ7jy6omqCjWz55FI2HjkkT/w3//7908X5RIAjWCgFzk0\nghyO4A+NUiplsVjLiClxXMEwwa52fD4boliCLAcxGPJwOCQEwUhRUZKD+58hErQjqBGM2TTV+kqE\neJpoOkE0z0dbm5n8fDPp9DgeTw951gjevl5icT16h5tFi2fg949isdgIhcb43f/eRl/3CD5fGJ1J\nx7LVK7j99hu4/vrrsFgsNDY20tjYCMAffvtbTGNjzD1dbCurqrTs2cPhggJWrV49/Sf4AnnveJ/N\nZLGsT9bk8bnnXsPvt1FdPVkSX1VV9u9vprDwEFddNbnvZWVlGAwpUqn4lBBSVZW3396C3V6I3V5O\nfb2L3t42urpi1NfPpL+/laIigTVrvobXG+bYMT/RqJ/u7leYM6eZm29ejslkQBDOveAnkzFeemkH\nTU0dmEw6rrpqEWvWXI2qqvzLv/wng4NWVDWJw2GlsXEGr7zwJP4TW5BiYW4EjEAPGpVARPMT0rmJ\nWkbQtBOMjrZhNEaYP/9mCgpKCQZPsmbNl857bBRFwePxcOjQYQYHszQ0NE6ts1qX0Np6gKuvHqKy\nsnJqeXd3Ny2bNrG6shL96YcffyTCK08+yTd/8IOpSsUfl5wYuYS89RZ8jKq8H4ogwE9+AvfeCw8+\nCB/S7fxzzbI1a9j7zDMsq6nh9tUz2Huij87BMHKemVmzJnvD7NnTSyCQQVEM2GxudLo8/P5RtFSA\nhDHChv7DzHTasZtFDHYr2fIqRkbacbnq8Hrj6M1l9He+Q0F6BFsEqsx5RIfGkNNlHMzsxl1Wweio\nH1XNkkmLGJQMNt0YGTEPm6ijQA4QUxPEBAe1+hLMJgPxbAp/JkZCZyI9mmJGkY5MNES+aCCbCHOy\nbwsmVzUZi5Vqm4mFtVfRNuHlrbe62Lx5BxWGJOuXLZu6WMyurKR9cJDmpiauuwJTsWaWvVfxckZp\nKTGPh/aTJ1mydOnU8qNHO8jPrzprnF5vQFWdeDyes8SIXq/HWVzMW1u3Ms9oxHVagJVns3iHhujv\n6eHeP/szfhGIo/NFyFpcRL0hoqMpRsayhEK9lJf/jm984+tIUhSPpx1zyks2GiWcylAhStRb8/CG\nxnBWz6f71Ai2pIbNnMHlshKLxUgm+4lEQsxprEMZ76YwPIygjJNRFSyUoJMFdJIevaKAyYVODBGP\nn8BgGKT75CnydS6MYho1MoFvVGRvdAGlFUESiQkGe3cwq2QR5kQFlcbZxOQY+3ccR68vZ3h4nG9+\n8xtTT/1erxd/by9Xn1H1UxJFZpeW0rx795QYGRwcZMeOAwwMjFFU5GLt2pWXPNC1qqoKUdxMJjMp\n9t/F5/Owbt3HL7g1MTGBxxOiquq9bBJRFCktnc3u3S1TYsRoNHL33TfwzDPbkKQSTCYrg4On0DQ/\na9feTkvLKfLzFzB7toXBwTaGhnZgsfi5++7/m0gkzMaNLyOKBQiCgRMnOpg1ayGbNh1h/fpVKEqA\n8fFxotHJejR6vcbu3buYObORWCyP7u4Rjhx5ia6ubux2G8eO+amsXIRebySVirN1614mTh7AFItQ\nC5QAJ4E5gAHoR0XIhtFFJEaUBHqjhcLCYvT6IDpdPw88cNt5K76OjIzw5JMvE43qaG1tJR53oap2\nZs+ehSAIpz1tLgYHh1AUhUAggN1up+XgQapttqlrC0CB3Y5pYIDe3t4pAfxxyYmRS4Smwdatk6Xd\nLxarVkFDw2TdkgceuHjb/SyxcNEign4/+3buxAbU1JcyY9U8XCUl/P7x1/FOGEnE7JhMM8hkoni9\nxyktXYWq6giHA8SzforyZmDRFxKKR5iY6CLc72XBKgN7977O4EAaOTTAAiOUmNyUGqwkMmmGExHG\n4iG8J07R3h2lqrqeZDLG+HgQk6rQkAljkmSyQh7prEhMkAAROZMmq2TIqhqiBpmsjBE9PlkD2UKB\nvRCrQSEZGcdHFqc+y4qGJSRTKRKD4wwFjKSzKnH5JPvSKqtWLZ16Uil1ueg7efKKFCPvJ99sZnx4\nGM4QI6Ionnanvx/tvIGcDYsXs+fZZ9GfrpibymQYjURYuWQJnc3N3LJuHS6Xm8XL53DwYCvJpIFs\nNo0gKGiak/37e1ix4hS33XYVP/jWd3GHomixGPF4iBAaJqMOh8mAM9/BwEAEk95Mvr0IW2Ex4+M9\nSFKGWEym/dhhrOkAquIGnFiANGZGtQkKFAsiYEybScVE+rraCQcc1OY1IKiQTCqMZ4Posi5inhSC\nLgJkkVPlIIsgWLCYbRgUE4GxAEeajpBMzmLNms6p6rSpVArjebI3rCYTca8XTdPo6+vjt799Fat1\nBk7nMgKBEI8/vol77omxZMniT39CLxCr1cqXvrSWl17ahSSVYDCYiMfHqakxsWzZ0o/ewPtIpVII\nwrlP6kajBa83cdayefPm8t3vumlpOU44HKO6ugybzUZNzRwymQwdHU1omh29Pk4iMYHV6mLfvrfp\n7u4kHq/AYnGj0xmRJIGWljYaG6sZHZ1gcPAEJ07sR6fLx2DQE4n04nJV4PONMzaWJZnMw+9PcuzY\nM8yYYcVun3M6ZghMJivxeBJrMoKChovJhm4mwAYkgSR6sriZUboMUdRw1y4nmx1ieHiIa69dOFUf\nJ5PJcORIC01NbSiKQmdnJ1VV11FdXY7fn2J4WKC9fQSn0z4VQ5fJxHhn8yYKslnygARwpKuLL8ya\nxYljxxgfGkKUJMpraxF1OlKfojt0ToxcInp6IJOBT1m9+hy+9z34n/8zJ0Y+CEEQ+MINN7Bs5Uq8\nXi9ms5mWw4d548mnKdKVYjUrdAYC+DMhRElHNptHKjWOKOqJJfyUWiupdOoxSEa8cQNjkVqyBh8e\nD8Ri5fi8bdhUEyEtToU+i2ASQNAgk6a3v4W0uhiz1UQ2cZSh8Q7SShpRquQocVzZIDMJk9TpQDOi\nynGiJMnLmsmioaBDyYJBTBGMSFj0DjIZGU3OYFJBL5voGYqwsDrM6IgPASt5Zid2s4vsaA/hsERn\nRxcLFs4DJm/G5o9oQ3+lEEmlqHef3ZRr6dI5bNiwF6ezcEp8pNNJJCl0VqaAqqq0tbVx4MAx/KKN\n5nAEVyyGLS+P2iVLKC0vZ//pjr+zZ9fwH4++TUdLF8m4Slazomg6VJ0Hnc7Nli27uXrlPBblWyk0\nieyIy+hpRMsInFSyFGVj5GfTaJqKotMTT4YJDnhIRMZJRuNEZTf5mo2oZENV8jEKcfJQJlMzcaPh\nxa3LYzQSxBsfodS+nCK9DT1hfOE4dncpvmwFZjEfcypMaqwXq92KqLjpHx+jvqiIZDKJzxcG2cyA\nZwB/IMLAQBM/+tHfsXr1atxuN0lRJCPLGM4IMhzx+6msr0cQBDZt2onTOQeHY/KYu1xFmM1W3nxz\nN/Pnz7ukzRWXLFlMWVkpra0nicUS1NevZvbs2Z/IhsLCQkQxjiynp27wAH7/CA0N53oLiouLueWW\nyfiK0dFR2tpeAKChYTHV1bPo7j7G/v1pli//EtFolvHxUXp7Q+j1ZaiqQCYzgdWaQpKqGB4e5JVX\nWjl1youm2YnFMqhqiFhsDIPBjN1ej8+XIZUSMJlqiMdljh3bR2FhjGQS3O4ZmM0uTKY8gnIcAzAG\nlAHq6X8xIIkdndFNvsvNWDpOIpHHyEiGSETHli2dHDrUS3m5kaamk0xMyMyevQiDwUFra4Z4vJWr\nriqmunoGHs9ejMY6ensHKS0tJRYLMTTQzNpKO0vP8JAFBwbY8NJLrKuro9ZmI6tpjLW10a7Tccun\nqJ+SEyOXiK1bJ2uLXOzil7feOtlwr6kJln2OykdomsbAwACBQACdTofD4cBut39ghoDNZsNmsxEI\nBOg8eJBCqwNJdWAUZPIDcWKZJIrkJJkOkkhkyGYBScOkS+AwuPEnEgQSOiymUkYSw9jti8lmQkja\nMDr0hDMGhlI9CIrCYDaCS5MpyWgMZfuIZMIEIhlkWYckOBGEEgz6LIH0ECeVdvRakjFkrFiYIEgc\nC6AjTYYoAayqRlZ1EE2l0TIhyvML8ScESm0FBGNxWo8eBjnDhAYU1JInSSStdgS9hX7PKPPmTz7Z\nHejspMxiYfu2bcxdsICioqLzHqvLwUQwSNG7lUyDQUJG4zmdZufOncvixT0cPXoQo7GQbFZG07z8\nyZ984aw4mNdf38T+/R4cjlr0tiX45DA6R4Ib1izFZDDQNTzM7NMel+rqSib695KKglHfgKJmiYW7\n0Gl+htuzPDfxFESuZ/WSJWzcvBNXwULMEY2ImGQ8EWE0A4eOH8GiVwjry4nFMhhCYxSlEqAWkmCC\noBBHogqDsYRUeoAYGdzECGMgSBS9miSqBHBbZZJpiTyDRDKZwmW24Q97UVU96WScwjwbemseNrsJ\n/4SPVCJNKp0gFlGQdEYmIieIkSGedBMOW/jOd37FXXft4Yc//DYrbryRptdfZ5bbjd1iYTwYpD+T\n4Z4bbiCVSjE6GqSqasFZx9tksuL1SgSDwUv+XSkpKZnqAfVpMJvN3HzzKl57rQmXayZWq51AYAxV\nHeKGG77yoWNLS0uZO7eUEyeOUVragF5voqenG5erhgULFpFIJHjiia3odNXIsoSqgtFoxWLJZ2Ji\ngljsEOm0hiQtwGh0YbNp1NU10Nl5iIGBfTidV5FKiVitk9csTcuQTlsJhewkk4MMDfVRUFCA3z9K\nCJmZTJYxdzFZ3jwMeJFIYsQgmAgkwqjWciLBFFZrLaLoxWTKY3Q0wNatR7BaaygurqOjYwiTqR+b\nrRaPZxi7fR8NDctYtKiRI0eaSaWyNDcHOXVyL6mRTvqGSshOTNC4YAE2m42KggJSySS+TIY8VUXO\nZgkBksFwjmckm82iquoFCcmcGLlEvPXWZNDpxUaS4KGHJnvXfF7ESDKZ5KUNGwj19BAaGKDP40Ex\nm5k1bx4Ny5dz2/r1H1hVtaenh2w0SoHTxKA3RJGzgqL8MJFUgNGkF70hjqqO43aLlBQVUWmsRpFl\nRsIJDNYiFEEhEzUwMuIjEw6Sn1eIlEmil40MKRqZ2Dhleh36TIZ8SUIyWjGYJdriWUTRiV6cSVrL\nEk6GMGolTBBCRw8y+RgoRyZNkBHSCMgo6MU6wnoNUzaAWbWQVCOMJ4IkDaUE4inCiWEGowkUnRFz\nYR0ObyfeyEluWPc1etoPooaCHOnpYf/x4xTn51MWjTK+ezcHNm9m1W23cdXVV3/igLOLybDZTNfA\nAAJgLizk7vvuOyfQVpIkvvKV9axY0U9PTz8mk4HGxlvOStMcGxvj0KEeampWI4oii1ZdzYmDB+kZ\nTXKgrZ2CAhdJp5Nbr7sOgKHBQW5ZOIPH+/YhqyLhkJda0UC+3k1Glgj5IuzcsoXF995LACsmRSKZ\niZBKRNBUDc3sYiDmw2WSGY4OI0TTlGNAQSKlJTDrsihalIlMgDypDNCTQENAw0QchSCClmQWImLW\nTE/fCdxFC5FTaSqteRgEHZnkEFZ9IRlJwihIOKxWNJ2HTDxMKDCGqjmJZscIphJIhoWgOjEbC1AU\nmQMHArz88ib+y3/5Gg6Xi6ZduzgVCFA+Ywb3rl1Laen/z957Btl13Ve+v33Szalv54zuBhogAGaC\nYASpQFGiKYvWiENpZEkuzYytcZjnmXpTU66penaVP3jKnueSnzXzniXb8liSZYk2FShSFCNIEIBI\nZBKN1Dnee/vmdPLZ78OFIFJMSqBsWetD172nT9/Tvfep3uv89/qvNYDv+xiG+hqdRhAESOm8bXbg\nb4UgCFhaWmJzc5NYLMbU1NQPde/efPNNZDJpDhw4Srm8wI4dQ9x++wM/lAvqhz70ywwMHOTgwWM0\nm200rcWdd36AeDyOED79/aN4nkuzqaCqLTKZfoRQmJ8/wMhIkkKhhhACTWujKA6WdYTh4UkWFp5j\nc3OVUGgckNTrq7iuSyIxiKp2MTU1gW27zJz6Bl71BbbT2ZpxgZOABSwAaXzAxZQtmr5AVwfQtAi+\nn0cIC9+32Nz00LRxIEkkkiYSSTM//yzr6y+i6z20WqdYXMyxe/cVXHvtbggucObAw1wvBBuKQrJc\nZrPdplGtcss730mrVmPb6Cjh8XHOVSrous7W665jK5BbX2diYoJ2u80zTzzB2aNHCXyf4a1bufPu\nu990rH9BRt4GeF7Hwv1yZcp85CMdIvLnfw5vYzX1Z4Znn3oKf2GBAcAqFnnP8DC5Vot6sYg5M8Mj\nUvLBj7w67tt1XR5++DGeeuooc8c36Y5CqV5BCI3xLSNoEQ2vsMjkdJxf/dWPcccd+/jTP/3/eO7J\nRZJ6hHRPlpfmS9RqFwirFTZXjiLpRUqPTFjSCmo4QuL5kpgbUJMQEwpCq5IJd6M1m/hKFhl4KLQx\nJCBUhIyhoJPWRon6AkVNoMk4bd9CEsELwjQZw4ts4pt5IuEtZCMqW7t3ML/+LP2KRa8QWL6DVV7C\n9ptMjk0QCoWZuOp2UqkdhNNR9kpJTyLBMyfOc+rCIl67zqPffJjhsVFGJid55733svfWW0kmk687\n5pcbn/zt36ZUKgEdb5Q3ys8RQrBly5Y3NHBaW1sDMpdaJfv7+4m94x2cP/sSc+4Fxq++msFkmoWF\nBbZt24bZbNKdTrN1bJRzczClpekJRXBcB03x6E8kMUt5XjxxAs0IUzdDtKWGYUQJfI+kptO0daaM\nDKZfwhE+VQS2v0G/kiQtdCpuiwIL1P0sYSwCFBp41KjTIxz69DhDepxl30Y4RWq5czgiSa20gaGD\nUMpUAgevLnBKNi/nN3G9VQxRY9Wex2zbtAMLRR0kGc6Sjqfxg4BmrY4cHODo0bPcf3+LnTt3snPn\na6MAVFXl5puv5MknzzA+fvWlsV9fP8+uXWP/JLqvbNvmS1/6By5cqKIoKaQ0UdWH2bv3SpLJFBMT\n4wy+QgT9g3hlZ9H38L2cmUqlQjKZZGJi4jWeGrquc+ed+7jzzn34vs9//+//E8PonCOlJJnMMjTk\nsrFRIZPpolzewHFKOM4Svr8P224BBr6fQ0qbctml0TCJxVRM8wyeJ9A0FVW1iUYTJJMhfD+GaRbI\nrRag7RIiwKKzRZMFhuhYl68DJcA0ApJdQ0SNCcJqCNdt4DhzbN06hu8r6Ho3qrqJlJ1oAM/zKBYF\nsVgay4JodIBIZJoDBw5yww09UMlxTSLBlT09HPA8FNsm7Dj45XInnC8I2LQsxn2f4aEhJkdGyCQS\nnFxaIpZIEAQBD37hCygrK9wy2PFTWltd5e//4i/edI5/QUbeBrzwAmzZAper0jk+Dtu2daov73vf\n5bnGPwVYlsWpkyd58K//miv7+phfXmbbxTTJwUSCtWKRwauv5tTMDJVK5VU+FU888Qwvvphnx473\nUtuMEm238IJVYJ5ifYV60OA//ucP8W/+zQPMzJzhv/23P+L48XNUqxY5mSafW4dGntFIhC4Zxm7l\nKIkNGgqkNQvpVUDV2XAhZ4RISR2p6nSJGr6/hMRE0qDlK8QUBV2N4EiLQEpULYaQ0FRCBEGbFjo+\nffjE8IWFToFIbAvxvgxePYdlLZIvVhmmynWZHtrtNr7Q8LwGi+0ipSWH5578O8a3jTM1dSMXTh6h\n3w/44hMzlCoa6WCUlrOMrBaZ6moQOn+ema9/nbmZGT72qU/9TNw5v5ec+5PCMAykdF91LJFIMDg8\nxOrqAseOF9E0H89bIRJ5ln37rqENbJ/o4+z8AhFNUHZNGl6AlB6DVos0Og+9eAq7YpKMxbADn3Yg\niYT6aNstIiJKwRGUmiFU0Y3rrzGERAuK1F2VNILdBJzlu4ToQUNHp4JKmSukQBKwhE3eN3l31xAN\nisx7NUq+S0nC+LZtzJ9ZRMomuuFhe4KIMUU4Wuedd7+b/fv3Yy6fw9B9omEbVVFxPYtsNEy9WiUI\nwnie96bjtm/frVSrdY4fP4CiJAmCFlNTWd7//jd/mn278OyzzzM76zA+3gmwW1w8w4EDyxw+nGfP\nnuT92G0AACAASURBVOsIgiPcdtsO7r77XT9UEGSr1eJv//arrKxYQBxo0du7n49//ENvuNWrqio3\n3XQln/3s1zFNjSCQNBobwDDRqEmrdZ5YTEfTWrRaIep1n2QyTbF4jiDoR4hehKhTLObYvXuC0dEw\n8/MrpNPbqdcr+H6VoaHbEaLJlVdu5dEvfhFLqyEdjzowQSdxtkYnAnMQOI/gtmu2YQlBzpunXD5D\nsbjG9PQ06XSWcjmHaSaJxzXAx/Mc6vUmQSDp7e2l3V4jmcwgZZGJiSm6u6GZmyN98al2x9AQR2dn\n6fJ9/GqVl8+d41yphOG6RDc3aeXzPH7hAtM7d9JKpdi2bRsLCwu0FhfZ8wpDvuGeHlpra286J78g\nI28DvqcXuZz48Ic7NvE/r2Sk2Wzyd5/7HGxs0FUq4VkWM2fOkN6+nVHDQAKNVosTs/OUXYfjx4+z\nuJhneXmDVCrG+fNL7N79fjRN5Zobb+TooUNsboZo5PLsuX6KD//HX+Pd73kPjz/+FF/96kHm5mKM\njT1ANltgaelxMnqZvliIbCyO6/rIZpmk1QTNI6WnCcdVHK2HkaZCSJWoQoF2kzXLJqoETGZCLDZn\ncYIdOGoXtmeiijZhBWL00QzqCCmpYSFIE2ENBROpxAmEiufWkH439eYSaXcTpxkQkg5zeKQjKYyQ\nDoFgAJf5ZpHc6RfZtus2lpbCPH1gldzicUbjO7FrNfKKR8QvMxnup1K1uWFwkEqtRrRa5cTx49xy\n6z99x9Z8Ps+JI0co5/MMjI5y1XXXkclkmJycJBR66lXJu77vcebMITKZXsbGvu9RUqsVOXToJYa3\nbSPbbIK2wdl6jJDSg+3baDQIWT5Nr0ZghQnaTdasAzScHuKiiygtXHeJpCoou1GCII0SzLIVm0Fi\nCEyissUskhiwEwtJlQgaYUUnCAIqqsIIMOe0GE/2kLNaFNwmG45KLJahX3OZn53DllOM9kzTbBQJ\nC4nrBNR9OH78BbLZaylshPB9n3x5jZCxQTY5RjqeJl+/wNTUrZcyeV4Jz/M69uKtFt3d3Xzwg+/n\nzjvLlMtlEonET0Wz8dPC4cMvMTDQ2YdutWqcOPESvb23Uq+vkUoNkEhs5dlnX2DbtgkmJyff8vMe\ne+wp1td1xsa+r0vK5Rb42te+zSc+8cDr/kwQBCwuruH7EcrlFqbp0Gw2cZz9ZLPXMzCwHdetkcud\nQVV3UCg0MQwPRWmgKD34vo2m2eh6k8nJqxgbS3LXXRkOHXoZx0kyM1NjdfUsmUyaI0deZrNaQLZX\n0IAkYANFOtTJp6MfQY3RExpktrZG73CaHdndHDnisLBgUSqtEI9LcrnD3H33AwwMDHHs2BlKpRrt\n9jk2cyGGsn2kkEjfpKdnBChihMMsLS1Ry+c7lcieHgqtFovtNqlolHdu3Up/LMa548fRgoCE4/D0\nyy/zh//rfxGJRCiVSiRfp3ur+y0qbL8gI28DHn8c/uAPLu81PvShTl6N4/x8eo4cPnCASKnEjslJ\nvFyOUK3GlZkMJ+bn6d69m6fn1zhTj9COhjhX2OCZM5/lttvuZWzsVjY31zl58gCp1CoTE1uQUuJ7\nHtlojHCQYTKd5vyxY0xu3cr+/Sfx/R7icZV6vUKjVKSUs8l6PmGpIM0NcEziioGmm/iBhdmyqdsR\nkA0GYhHWGnXafohQEMOTgpmWzc5EArfHxy5dIGZsoVRpE1VMol6A4UXxWcKlgiEGSMtFetGJqjYR\n0aLqedQrPlY1QOKj4qEKA1Uo6L5CwzaJBzZbkxHyUpK3BTekxthcmGF6+gY02YVj9qLGHLKahi8t\npGXhJaIIVUdRFMxmk+2pFMvnz/+TJyOzs7M8/Dd/w6CmkY3FWF9a4tShQ9z/7/4d/f39fPSjv8QX\nv/gtisUoQmhIWSEadbjiilf/XalUN8vL81x1zx4W1os4eoim72AoRbqEIKX2st6cIxNYbA8p6Gjk\nnBongwomIYQdIalEqVo2LTeOGThMEBBCIPHQUfER9BJQAjJCJy49dMAMXGoElAKVYTVMUpi0cRG2\nRcSP0a+GaPthLD+B726iBAbF/AK9oSiaFicUjrBcr7C6tMnuq28lbkCrVcf0YnjeJqpiUG+dYfsV\nBh/60D2vqRaUSiU+//mvUi4LoONKunNnP/ff/4HXWKX/rCGlxHVdVLWzXOXzK0AWTQshRKfdW1FU\nYrFhTpyYeUsy4jgOx49fYHDwllcd7+sbZ3b2APPz8xw9+hIzM/PE4xFuvvkaxsZGOHjwEI8//gKt\nFhQKVUxTQ4gIphkwNFSm2TyP6wboepZMJko+/xS23Y1hjCIlCLFBNpslk9lLrdZEVft573vv4oMf\n/AAnTpzgT/7kc9RqAdFoN6urq2xUVhmghQ/00amMAGwAeTq6kVCki0XHpqz2E6kPk0oZTE6+D0WJ\ns7Z2nqmpPnbvHmZl5RCRyPV0dzsIsYrVbHHD+HX0ZTrVSMtxOHX4KT752+/jW88/SXNzkysNg+5Q\niEouR83z2HbnnUQTCa6enERVOsnDtWoVoShk6vVLItVkMklbyteMe63VetN5+QUZucyo1ToJu5f7\n/3t/f8dz5Nln4V3vurzX+lng7PHjXH1xn2t61y6OPvcc0WQSt1jkmzNnWTYH2b7t+k48erKXcGQH\n58/nGBvbSk/PID09A5w8eZrR0RFmTp0i6brEsmncVBc3bd/ORqnEQ1/+MtCL53kU1ucx2g0yoQgZ\nqdNurJAIAnqNGJoEU7jkfQtH0Rgf2k3ebOE01ik3Wuiih4QRQpeSth/DkU1qnkEm2kUiqaH6TZLh\nGm0zQJVRfHwEDjHqWDKgnxQRFboVMAKVrCo57dfpkQmW8UkSRhM2lqoRkmC7JmlFwyTCBcehN7WF\nVCzFRjHPN77xNfx6Cz+IcHYjT69IoEiTSODiC0GrWWVtDcIjI7Qti/jrPEG/Hmq1Gi+fOkWlUKBv\nZISdu3YRjUYv4x3QQRAEPP7QQ+zOZEhfDCPLJpPENjd55rHHeODjH2dycpL/8l/+PQsLCziOQyqV\n4nOf+3ukfLVlfgeChx9+gmYzy/QV76I+1M38zFHqzQIl0aBX08jIFF2Kw7plEkNhFyEKhAn7Fg2/\ngYFGwW+iECeKgo2GiYOKQEUSBZaBuJTYgMCni46d94YiOO871AKbLXUPjX4UIsSlwHQtTvsenlAQ\nFIh43WgxHU1TsR0Ty26iY3DqyH5iZoXheDfoOkv1Apo4SqZ7nDvedROjo682ipNS8uUvfwPLGmBs\nbPjSsdOnT3LgwEHuvHPfZZ/HHwVCCHbv3srMzDIDAxMEgY8QCq5ro2n+JZ2Toqi47ptvR0FHKyKl\nuJTx9MrrmKbDZz/790Qi2+jpuQnHsfj0p7+C6zrE40McPVqmWNwkk9lBJDKMZdm0Wh5Hjpxk377f\noKcnS6mU48KFOZLJDKbZQtO66ZCnQWIxg1hMo1xepVg0WF5eJpcr8PDDT6AovQwNhdjYeJFcbgmD\nEjYdErIL0AEViNEhInnA0g3KJKjUfEqVVTY2alx11YcJhaJs2bILz9vkhhveyaOPLlAuzxKJ9NLT\nM8HG7AUa7VXikTCqolJtFhhNtSjlNpjq68O3bVYKBRYbDdwgoBaN8sC/+lecOnAA3/dRO1kHZLq6\nOvqsVuuSyd7k5CRPZzKsbG4y0tOJzag2m6y/xVbhZSUjQog/Ba4Djv1ggq/oUPXjwP8jpfzLy/l7\n/Czx9NNw880QDr/1uT8p7r0XvvnNn08yomoa/kXDq1QqxZ477mB5cRHD9zlXDegamsaORBiZmqD8\n0lG6uoYplxep1WpkMhmuvPJannzy2xw//iJHvnsCXXoEcoP7bh1DEYKh7m5OzMxgG2Hi8QxOeY2R\n7hFAoCstPCQhBYRvdSyrg4C8tGgQodquo5g2JauGFYQp4qJqkr6IThC4BKKLiifoc2ysdp1SrQ9N\nDhIOLBxKeDTRiBAljUIdHRfV3yCE2jETkD5RwghpMIaFQhxdRmlSRioSn4BmJMxSNMqOkRHadZ1i\ns029pVGTHq1CDdfuNAqnEoPE/TCb7SJGZY0d3SobBYeoplFOJvnkxz72lnOxsrLCP/7VX9Ht+yTC\nYU4fP86R/fv515/8JF1dXZf1PiiVSvj1OulX2FMDDHV3s//CBRzHwTAMwuEwge9z8LHHaJdKnHr+\nEBuVpxgZvpbs4BDTO3eh6x1fiEolzfDwNKdOnaG7p5/l2AB1S0HRNuiP9OJVKwSKgiMhQxzvYvWj\niz5Cap2mrpGw1vBQKdNGI0IFFR+T1MX8kDLQRacq0k3HOXMJwYiqcNZtU5IhHD/McDRCygujS4Fl\n13GECWICN9ikhI6wQsQUl7bbQBMFNHcC3/PQiVCzbUKyQVq36Y0nmNh7B74fpdFovEqEWigUWF9v\nXrJHbzQaeJ5Pb+8UBw+e/CdHRgDe8Y5bmZ39MqurDqFQmEZjjiDw2LNn56VFsF5fY9eut37qi0Qi\njI52Uy7n6er6/lZUvV6mVssxOLiH/v5xoLOVV60aQDdDQ4O023Po+o3k86eJx6MYRgYpe7Htl8nn\nF+jq6qe3d4R6vcjiYhHD8Gg0HCzrLJqWwnH6aTQCYrEShw9vcPZsDtdVWF5uEYlk8P11qlULv+mQ\nwSagI1qVdDppfDri1QjQQBCWEIvtotHwsCyLcuFZjj7/bXp7+4hnB0ilFZaWltnctLn99vtJJNIs\nLi6xpa+EIpbQ1XkCCTfu6GYgey0HZ2e5YWKCrl27yG9sUGu16MpmkZqGb9vsvOEGnnzoIc6fPcvq\n0hKu55HOZJi65ZZLDyO6rnP/r/0a33rwQQ4sL6MJgYjHee+v/ir/xx/+4RvOy2UjI0KIa4GYlPJ2\nIcT/FEJcL6U88opT7gUKdMb55xZvh17ke7j3Xrjvvk6q70/bz+RnjV179jD72GNceVEUFY/HGRwf\n57qBAcbsMNnsHkKhMEEQcO78STzPQghxybFzaGiSoSGNc+eeo9EukTSiQIpvPL+OaR/k/bfsJRmP\nk8hGOHWySDaq0WxV0LQQjrNGVhUUZUBTumgI8nj0IsgEHiulWRpBQE16eHSRYoiIjLDUblH3l+kz\nohhWnWY5h+n14mMToGMQQyfAZp3thMmicgGbLjZQ0IkHHkiHlpRItQvhe4CBSQILn3ZQJRAORaDq\n+Uw7KuvrDUrNJuHYJG46hdMywWwxGArQZRNbXqAgQ7RxMJU63Ykhhnt7McNhFHjLdkcpJY8++CDb\no1G6L1ZRhoCFXI5nHnuMX/nwhy/XLQCApmn4P1ACdhyH9fV11nI5VlZWmJiYYG5uji/+j/8bvdJg\nfW6ObYFL2NqksiGJSYfvzJ9m13Uj3HnnNbzwQgnDCKOqCi+9dAi/tUqvKNBub5KzLDLCYdP2UYWB\nFBG8oI1BGIGO4hvU/RxXI2liUMRAwQQETSQ1YAPBAJIZYJQOv6wDFgElB1pMopJAlQbLlkeIEr1B\ngINAlSGywkBVMlhBnU1zDUfx6YmGGdC7mDUrdIWytK0lNGudRKAwEBXEwxkqhXMMjV6H7/uvGS9F\n0Wm1Whw9eopy2UQIFVV16esrIuXrO9r+uFhdXWX//sMsLXVs5vft2/Mj28xns1l+8zd/lWPHTrCw\nsI4Qw5TLEl332dxcY3b2u/h+na9/3eHChUVuu23vmwqi77nnHXzucw+yttYgkcjSalXwvHX6+7vJ\nZgcunbe2toSuD+D7Al3X8DyLRsPEddNAnng8ghA2sVgfKyv7SSZVwGdj4zDtdgkpMziOQTg8gG23\nqNePYVlt4vE+stlfZn7+ENnsTnp6JMvLZwiHt+O1N1DswwzgYdNZIJt08mhUOtqRCmCjcU1mFKGp\nOE6eoFpgONlNw7KJeyYbM4/STMHyadCSaSqVGoqiE4/H0SIpIvSx7+otDFzclju3tkbf+Di1YpG2\naVK3bTLd3Qz197OQzxOJxYjEYjy1fz9b6nVu9H08ITi7vs65o0f50l/8BR/9jd8gFouRzWb52K//\nOuVyGc/zyGazbxnIeDkrIzcC37n4+gngJuCVZOTDwJd5vVSknyM8/jg8+ODbc60rr+y0Ec/MwOt0\n8P2zxp69e1mem+OF2Vm6dB3T86gZBr/88Y9z6tQMR4+uMDS0FUVRmJzcyqlTM0QiCVKpFFJK1tbO\nEQ6Hufvu+/jHB78NLYOuZBeuZ/OtwwepN56i/8rt3HHjlaysPEI+WqTdstgsbqKFXNLxDMPNJmFV\n5Uyrxe16BK9tsoyCEUjSCFqim4jsp0FA1XcQIkmYacrOIo6TYxMFHx2BgWSTJg4BYRKkieBiEhBD\nQ+JjKB5tPJJCUg/AC3wkIaooaEgkNfoQJKRABxbaNhkdRlM9CE3hXHkWixBqy0R6JWJ+kelInGRS\n4eVqHndsgi2jfdz/jmuRUtKTTnPm4mL+ZgtFqVTCLpXo/oHS/2hvL8+dPo3rupfVrTOTyZAdH2c5\nl2O0t5dSqcThwydZKrWxBqf4y798lF27+jl99Ls05oukolliQZiknmRKVrngr2OVm4TNFrW17awu\nJZmZOc3i4jrr6xuI1gwTQkMTKlo0xqZTQQibOgoKITxVkJOQlRFCqk7ed+gXkm402tInjUMbH5MA\nDwMfHQuPHA5bUIgBOoIYkjbgkiVGmBoeURnG9gwco5eyt0IIBYUu1ECgoJMhSkwo6OoKw0oPZjhK\nKqHRKB2lyykwhoLug6KmifguTmEVTbviNeLVvr4+NM3iuecO4ftZuro6LbGVyhKFQpXl5eXXzTL5\ncbC4uMjnPvc1IpEtpFLXUCrVLtnMAzzxxNOsrOTp6+vi+uuvflNztWQyyR133M4dd3RI8fz8PCdO\nzHD06HGEMNi16x6i0QSnTq3z8stf4lOf+sgbEpKhoSF+67c+yosvHmd1Nc/0dBd79nyYb37zCc6c\nOUckEiMeT+N5Hoqi4fs2oVAIwwDHKaAo4Pstms1FhOiE+6lqnGZziaWll3GcDK47QDh8NZ5Xpd0u\nEgr1EInswHWXmJ3dZG3tK0gZp1TKkck4tNsuzWYNzZ0nQ4UkPhadqlovnaoIQBvBIgq+0k/DdzAs\nC8Uv0xuWpJMD1M1jrK29xKRmEDF16q0yipnl8Df/lszgToa3bSPc3U1hfoG2ZREEAaubmxR1nT03\n38yf/97vscMwSIfDzM3OciIUIj01xbt37uTP//iPuam3FwVIaBqapjERCvGdSoXK+fMcO3KE2/Z9\nv7L2o1RKLycZSQPzF1/XgEvLoxDiLuAZOuP7c6tbWViARgN+wEjyskEIeP/7O1s1P29kJBQK8cDH\nP87CwgJrKyvEEwmmt28nHo+TyWQ4f/5LLC29RDLZSzweI5MpkU4HXLhwkMXFCwjRptHwEWKedNcE\nNdmkadoYqkqrHeZbR5a4JbmdF//4H9jcPEd1c40tvVeyfXIPKxurzC/sR/V9ru/vp8f3odWiFvgE\nSMIoCCWMDJJE8AjQKQOB9Gjjo1KmF5sJ+qhjs0IRm2kEDTw28LFwL37txsdEkA8cFBEgpSQMKLJO\nR9YaJkKNrUgiqAhCdAmXiIgy65qE43G8SIJmPsBqLLEr3QOKhhEMs+7kaLR9MulBnFQfw71phi/u\n6f6wEEK8cSnzbSrHvfe++/jq5z9Pfm6OU989iallUMd2snfvPRhGmGPHDnPk6UO8d+te6tUCEUUh\nHIriui7B5hx7rxkiE+tnxnZ45O+eoGB3oRoW+bwgGmj0dsVpVdcIazqOHGC5vYAtXTQ0EsInFRpA\n98ENfGq06EajIiGOQpQwHgHrNGgRkESjSogmghIRwGIUiUJAFZUmGRyySAJWRZUuqWE7PjY2PiFC\nRC+eHcbGxQw86o7Dml9D2AHj268nUZ1jhybQAw+kjtZuc8FsMNabYXyw6zVVDsMwuOaaSR577B/o\n6dmDbTcwzRKqWuCKK27l0UefZKS/i/zKCj2Dg1x3001v6uHxZnj00f0kEtNkMr0Xrx0mEknwyCPP\nAfDccxvE41mWliocPvwlPvGJ9zMxMfFmHwl07sPJyUm6u7s5dmyWG264+ZLAdWBggo0NeO65w9x3\n3y+94Wdks1nuvvv7e9rFYpHZ2QUOHVoimZxAiLOEwy3KZZ/+/nGq1ToTEzsplY7geU10fYggMDDN\nlzAMSTq9k2i0m3Z7FdfNoigGrZbA88JImaDdnkNVRwiCJFKCaQoMw8D3MzSbDTxvjbD3NKkgRzcS\nBXmpKvICCp2NtoACghyjpGPjVEWFO64YISoabEunKZUqFFsO10Q1JrLdXFheYHy0G9frYs1v0htW\nyJ09y9g1V+N4KeY9j5W1Nca2b+dfv+tdPPLgg9y5ezeVhQUCzyMhBOVqlUh3Nz09PSzNzHB1OIwX\nCpF+hQVAVgg8z2NhZuZVZORHweUkAjU6HUnQaY+uvuJ7nwQ+Rqc68ob4/d///Uuv77jjDu64446f\n6i94ufH44x39xut0OV02vPe98Cd/Av/1v75913y7oKoqU1NTTE1Nvep4KpXiP/yHj3Py5EvMza3Q\n1dXFpz71f1Gr1fjMZ77I9PQNDA1t5ZFHvsILL5ymv387k9PT5HLzbK6dpmGukR3YwbHDB8lIg0ar\nTKGRwLLqRCMm6XQ/WmIHs2YFc32dwDQRUuKLzhOuED4ubWzaFBnAJ4mFhodEp0APKlEygMRGo4st\nlDEQF3svbM5iUUaQwRIavfgkpI0rEwRalIrXpIBLHRWLBJOsIAjwFZ06kogQ9AqdZc/CiIyxY2yM\nldIJLDeEa7XxHIeG10IRKoofIh6NUjM32bPjqktj2LIsmpr2qqjw10O5XOb4hSXmDh5juLeb6ekt\nDA0NsZDLMXXllW9Lhkk2m+WTv/M7PPPMM7xQEExN3UQ2O3BJkBiNZmnYGm3HQjfCNC5u69imQ9OT\npGMxcs0m5+sB20beQbjeRM2kWFnywE9TMDfYNjTI4lKFwIeMPsGs4mBZkqoT0NJsisKjiYmNT01K\nhlEwCKPREagawAohPIZxCBEmoEGODAnyNPGoUmcQm2EMEggULJkixwUcJAEBCXx0FDzCCDw8JE1c\nfLEbJ8iiyzSLZ1/gCtWh7fvEtSSaqqEYIfo0l3AsRDabpVAocOH8eQAmJicZGBhgbGyUa6/dhW2b\nNJtFJia62bLlLsrlPM997Uvcd/MNbEkkKJ8+zVeOHeOeT3ziR95asW2btbUSo6O7XnU8HI6yudlZ\neoaGpoFOZ1OjkeWhh77D7/7uv79kWvdWyOfzKErqEhH5/j0yyLlzJ4BOFaXRaKCq6ht66Egp+cpX\nHiaR2M1tt01z+vQCUmbJ50vEYjkGB0e5cGGeUCjNyIjK0tIivt9E03R03cUwBmg0FjAMD9dVkLIP\n2z6HlN/rTGoC4/h+L6qq4vuTCJHD93OEQmUsKw3OOQxiSMawsClRwadIjQRxMixjU8HCQyUkrkGN\nW2y9apCxsX6apSKubaLFHLYNpbh5oBvhOKyvq+wYG+bMUh6tEVBrlAhpEc6cepz/8/d+jVtuuYkg\nCFBVlXK5TH1jg1t378beto1yqQRCcG06zYvFIrZto8dizC8vEzNNYpEIuqJ0th2lJKzrhC+Kyn8c\nXE4ycgj4deCrwDuBv37F97YBX6Oz3SyEEM9JKc//4Ae8koz8c8R3vtOpVLyd2LcPHngAmk34Ce6L\nf3aIRqPcdNON3HTTjZeOPfHEc/T1XUdPT6djYNeua1hZeYa1tQ3ajWWclReI1jdJKx612adxQgrb\ntr6blyyboWgWW9ost9u85+67mdy7l3/8/CqDMYfVjQ2GDAO3VmPOdYlISUUGuJjE0HGJIolj4KOw\nSA8BCdIUKOEzgIKGQRGFBlDFI84iJmklgittAlljhCghJQSE2BSCjFRRsVggShOVCmHUQEdQx8DD\npUYgNVbWX2J6yxbSEZd8vUjVddimxtAUhzqwaTpstjze/+EPULQsRD6P5XkUgoC7HnjgUsLn6+Hs\n2bN8/vPfZmjre1myDmBvVpldfZHhrcv0X3UVd9911+Wd5FdA13XGxsYYHJq6NL/fQzgcI5TqYslq\nMRFN4hhhqmaThUYZx9B5/ty5jr01CeaYoVSHeLtNWPcJ6gGu8GhcOIu0bUwRwdc1RsNZGlLBcQ0c\ntUVN+iipEerl0+Qw2XKxIjKLSh2dAiECBohd3GxJEaeIziLLxAhTJ4FOCgUbSRQNSYowJgkinCOG\nTwYfOMsmSQIGMFkBBonq00hVYrubaDJABi6+FsPQdOKZNP0Dg6y2apzNV8iXy3zp05+m5+LifsT3\n2X3nnUxfcQXJZIjx8ZtfNXYHH/8CNw/0suWix0gyFiPVaPDkN77B1H/6Tz+SlkTTtNe1mZdSIqXz\nmvMTiQwrKy6VSuWHbi8Oh8MEgf2a45bVIpmMsbq6yte//jgbG3UgYHp6iHvvves1xmbFYpH19Qaj\no7vp6YGRkWHq9TpBsA3PO8stt+zmD/7gz6hefKQeHNyOoqSoVApYlksQtGi1KihKG99vIETzovbm\ne39rgo5DSKnTjaIaSJkGGsAatn0BDYU4XYTw0Qiw0XGIE8cEwnh0YTMAKASGx+Cwxu///n/m7770\nNebmDtHMr7N3Sw/Dgz20bJtGpUI4mSQTj3PdthDN+SW8UJ7uZJy+niQDA3202+1LBE1KeUkzEQqF\nGLhYDQuCAN/z+Me//3tiqspssciI41Cp10kYBjnLYikcZqTV4t49e36oeXs9XDYyIqU8LoSwhBDP\nAsellEeEEH8mpfwdKeU1AEKIjwPq6xGRf+7wfXjqKfizP3t7rxuPww03wDPPwC+9cYXyXwTOn19m\ncPC2S+8nJ3dx7bUrfOPrX4NckzEJEVWiyAhpvwSexkurR2i6HhGnSH9YI3d+gfktXcQSw/RlBggi\nVZxcjgONBlHfZ0VGaZPGJkSAgskiLaoIugjRRCHAwkAhQEWn0/vSoBsbBQUJ2MRpU0YRvbik4vmI\nzAAAIABJREFUOSlNSkhCgYMfSCL0EBc6NbmMjksNjSYQx6QbBUGEFSxEEEavbrCcn8VylugSJpPJ\nLQivSVRPEBMqllUhe9VePvmpT9Fut1manaUnmeQ9u3a96QIgpeTRR5+lp2cXiUSGvr5RchvzNCqb\n5NUCv/tv/y3xt5n9Dg0NoarN1yx2plnn5pt3YrbTzBdWqUZSrKzMUjQbjBseKT/O7vFxzqzWWZo7\niaenGRgYpCo8Cp5FsbVJIF0axDBlmIptMiI9Ro04OVnBFxBTNMr1BeJCEMgYZ3EpEwGGMQmQNIER\nipTpQhLFR7loedbAQCWOJEKaNjY2gjgeCgF1pjCJkSZHmG5sQuRZZZMAlQAD2/eIGSnUIM9QZpxW\nrcZIIgmKj2W3yZULvGy38AYmOfP889x/443oF9uZPd/n0BNPYDoOzc0ZvnPyGFO79zEyMk0+v4Tf\nXOXad73adTWTSOAsL1OtVl/lavxWUFWVW265iieeeK3N/I4dw685v0NSgtdYsr8ZhoeH6e3V2Nxc\nvURKfd+jWLzAPfdcyV/+5T8QDm9jdPTKizqTBT796c9xzz13kEql2LJlC5qmYVkWa2sFzp3bj2U5\n9Pd3Mz09STwe58yZw3zrW4fYuvVmzpwpkc97eF4Zy7qAEGOAjuf5OM4wrZaB78/RkZn2IEQeKXN0\npKdNoNNxIsQGkUgPuh4jkxmgUDiI18wQx0fBw6TEEJIwKVw8XCQNSkiGEWhoxiLvfvevUC4U2KLa\n3PWRu1nJ5Th28iQzCwuckZJbpqfpKpcJpKRqmniGSrK5yQsvv8jAyAjf+sxn8ONx9r7nPWyZnKRU\nKuEYBvlymb5XaD2WCwWCcJjmmTN8dN8+njYMnnnySezNzY5/TlcXN2/Zgua65NfXmZ6e/qHn75W4\nrHqNH2znlVL+zg+8/5vLef2fJY4cgaEh+DG3Wn8i3HVXpyrzL52MpFIxTLNFLJak2aySzy2iSOhN\n2gyZbbplhMDzaLUrZGSA7qtcKC+yJdqFBih+lF5bcP7Jxzjb8OkWeSKhEFl0MhGV/ZZHiwlahNGQ\nOKQxKePTJEGSND2Y6LgUMPCJESApECaCgoKggUEESY00Pqt+BYs0DiPkUYnio9EmTBhLSsoECMI0\n6WKOAkO4WHiYqBTpRZE2hl3jxMlv0NUVJ9lQSMgQrh7FjScQQrJ7yzYKro2maa+b1/FGsCyLUqnF\n6GhnMQqFIoyN78QeMFlePky73X7byUgsFuOee27loYeeJxIZJhyOUSot0dXl8pu/+Zs88siTfOc7\na+T8JNGxG4jJIyjlPOVSjYTvU96skZHdlO0Cq4tzqI0mil+nIR2ajKGTxEFHJ6DuOKQ0jyF0THcN\nQ8SwPBsFFRcHExUYJUCjD4sKoGLgEadNhU08mvgodKOi4rOCS4QGEbox0fCwkeiU6SeEj0qn3yZB\nBB0VF1BIIan5BeqOyoCu0p3qZlXpoRXSiYWibFQKzBUr6GM347ZMcjOLXMh0sWPHdoQQKEJQvXCB\nJ+bnueuaa1hgkRef/wLz2T7ec+970a/fRfgHgvGCIMCnozX5UXH77bdQLtc4ceJ7NvNtJiYyfOAD\n733NuZuby2zZ0v26brFvBEVR+OhHf4UvfOEfWVpaRwgDaPDOd16JZbn4fu8lvUoQBKyt1Xn55fNs\nbLgXDcie4GMf+yBHjpxgbm6BbHaMZDJNoVAll3uRsbEEBw7sp1RK4DhRWq087fYmUnbTIRclVBVc\n10DKMTqVDh04AyQIgs7jBjjAFBBCCAdV9YE1dF0nGtUIhxUqTXmxhdemC480Bk3AQyGJTh8eedYJ\n1CS9XSEMNeDo00+zb2wMTVXJTE2xY3yc+fV1TpsmJBLkTpzg2MsvU2u1iEjJy+UyAxd1aCtS8o73\nvY+//aM/Ij04yNaeHpx6nQcXFrjliivoTiapmCZmMknY95lIJBBCcOdNNyFrNbxqlcVmk+v27uWK\nyUk0Xee7+/ez56ab3rTC+kb4uRWP/qzxdrb0/iDuuqsTnvcvHbfeeh0PPXQEVYmxcnI/PQisxTmy\nbg3sJhndoeX6hBVB4Emark1EBERtE0tzsR0VRYtiNG2iso7u+XRFE1RdnYX2BgFZIsRQiVBHx0dD\nMgHMYSPp+KWatPHI4WPQAGqYCMKkCaHgYBOjSBcRCjRpEEWwmzZFNFxUdDZYwKVJmyFUJhF4NBhl\nmU001gkTIqVOYnEeQ28QjQTcdeutPPLY87zcLGMoKkmnwp5r9qAY0IhaP7LVt2EYGIZyqQphmk3O\nnXqO+vocrcYKD/6N5J777/+pdWH8sNiz53oGBvp4+ukDPPfc83ieAvTyyCNPsWfPVZw8ucDOnbcx\nPztD9aXv0KcKSsUS+WIeD4kqmoQTPVhBnYa7QVhWUEQvEWOCtuuhSUlVmhhkWTcvMKkFRGjj+hoK\nEXwCemiycpEwRPGIAmUkDg0sokg8mlhIdqBSI4aBYIgWR2njUMYgdNHgO0mTNiFUXCQ6EQQGUVQs\nkrQJU8NGwRJtdOHiW2UGkiokB8nZCsueh9s1hKFGaVYXKLYCvv3tF3Ecm6uuuopCoYBdLLJl1y4G\nursZ6O5m73XX8sLiIvv27eVsV4zZ48fZPvz9ysXcxgajV1zxY2UW6brOhz70y9x5Z/GSzfzAwPdb\nZ5eWXkSIBEHQortbct99P3q0eTab5bd/+5Osra1hmiZ9fX2kUin+9//+CvH49ys5i4tLrKw0yGS2\nk0ymGRu7ilJpg89+9ku023D77b/E0aMn8P1hDCNGLlfh9Omv4nnDSDlFu12k3e5DyhSwRKfZ1sL3\nAzr9Gg2+7wYyefF1m45F2RSKUiEIkoTDKlI2aDaP0W77VCoaqlsgIMsGfWh4pOm0f7epk8TARhBB\nRcMlmbS5adsoseImC4uLqK8IjdQ1jW0jIywtLNA7Nka1UMB1XRYPHsQHMpbFuK4jazU26nW+vLmJ\nEgRsLC0hJyfZPj1N386d5HSdvh07MEwTzXU5fvAgW8fHIR7H930Cx2Egm2W21WJxY4NQNMrU8DAh\n36darf5YUQK/ICOXCY891rFn/1ng6quhXIalJXib14afOizLwjRNkhcD8d4I+Xyew4ePsrpaYHi4\nl717r+OGG65jeXmFL/zZ/8uOaDe6KsnGPRKNCC82GlxoNrG8EKrQiCIoCxMVgRcEqEocPWzgSQtX\nakRlkhZp5lp5ojLCphujiYaGRhONgH4UXFxsQMEjoMxZ4jRRLxINAwOHEElKhKkigCRhUqSp0MbB\nJQU4NGmhUqOfFgY+SaCMzjgAgjohYgT0IokQZgMryKNHoIFCJJ1lbl0QTV9HyvCwnIBNc4XTcy/i\nKTa3ffwjVKvVH8n2W1VVbr31ah5/fIaRkV2cPPQw3a0aXUjGd00zAnztr/6Kj/zWb9HzI3bo/KTo\n6ekhn68yOnoLvb2dG35jY43PfOZvSSa3EY0mWDryNIbvYTdLjAuBisKG9HGkSd7KcUMmy3PFVUKB\noC2j6MIjYuiYdoMWoFInLOuIoEnIt6gIjxiDNC52N3RjUMQGwrQQpEmSZwOTFDoSSQwNF4NeHGpE\nMJAMo3KWOD59qPQSEAEKmFQAD0GOCgFgoTGODmzQFFW6koJoJGAwYzIWDXG0OM9y2WOt7uCaPsGG\nRiKk8bJbYDAeZfNb+/naoZfx/QDpNtj+inRmRVHoi0RYuPD/s/emQZad9Znn7z3r3febW+VWu0ol\nFdoXEAKhFrLEFtjY4MENAxh3dEe3OzpiImYc/aGZDia6JyY6/KG7HeC2GzzYjWkYjBAGraBdaKtN\nqj2rsirXm3dfz37OOx/ORVAWGCQklRTBE5ERmefeuPnmPSfved7///k/zxned+ed/H/NJj9eXiaj\nKFhSkpid5QMf+tCvdY4qlcrPHbP9zGf+Cc1mi0Ihz86dO1+zAFpRlFeIr2dmqpw9u0mhEF+PS0ur\n5HLT9PtnyGTi66RcnuaFF54lkSizf/9ustkcS0vHOXfuMI3GOs3miERCYzDYJIqSYyKiEOs/KsAs\ncJyYnFxHnB6zQOyT2uanqTImiiLIZnu47gjPG6HrCySTU/i+TegfJY9DwDlMYIRLDoUSFhYqGsTi\nZfrctP86PnnHjRiaxtEjR+Jxe+DFs2vU230KaY26ElEMQ3bm85zv9aioKuFwSElKKpqGFQQ0XY/G\n8jJXTk+TKxTYbxicOXyY6b17MaKIHz74IJ0jR5jQdZabTf7miSc4cP31jCyLQ6dPE7kuVV1ndmqK\n2osvcurMGTLbt7/mCulvyMgbgF4PjhyBW2+9NL9fUeKqzAMPwOc/f2nW8OvCdV0evv9+Tj3/PJqU\niFSKd991F++46qpXPHd5eZn//t/vQddnyWTmuO++E/zZn32DPXu2USrluGHvPLuqVVabHQ4vn6DT\nqjOyLCQKk0S4MuIUGkM1S0JYZJDoCFKKSbNnM3AdWkLBk4J2GJFkgINPBKQRBBTQMJGk0WnGpqkM\nCdlEwUEjQ4hGCxWFaVxcKgwpI+gzoseALhZ7SaJh4NGnh80KAoMKGjoRKSJcEgzIEzBkAGQYYdOj\nxpS0SdnQ0XVSoxQThd2kEz7Ly+dQej2ivsGFYZuPvO86dgQBf/Nf/yu/+/nPX7RL/WW49dZ3MRyO\nuO++e3HWjmHmMszOlrjyysvRdJ2p0YhDzz3H+9/ktMYTJ07S6yWYn198+Vi1OsvSUp5a7SDHX3oB\nv7kKzpARko5UGaAxRKUJ1D2NR08ss4CLH0XU6LDhdAlUjUD4aDJihA16wJnQZQ6F7dJgwBoWEp8c\nLQQWm8A8BXIo47OWZYsAgUISkzQSE0EamxGQpIDOZZgEeEg0LGxsfGpozJCggAcE5HFoI5lCYyYr\n+MgHdlJIpzl94gSPHzuB5sCiaSLUkHrUJeMLJqWCgkK9scS6mmOmO0lo6oyEwY8On2OqVMIPQ3RN\nwwsCzGSSVCrFH/zhH7Iy1ojk83nm5+d/5emWV4ufNx33euHaa6/iqadepN3OUypN4boujlMjk/HI\nZAqEYYCqaphmiiCwACgUJgiCo0hZod9vEAQZej0D33eIh7IKxCRkCxgRVz62AceIaxld4LLx8SSx\nVmQT2EDKPFG0iOc1UJRpVHULyxKEgUuGKTIc4vKxeLU2/hsSgCCiS4MBUEqWSCXy/OCZY1y1a4Zs\nucy9P36WRj/BoAnCl6yNagzCFTaLGkXbJrAskv0+IylpAH3bQwoNM5QoEWy0OhSrVWzbZk8ux5Gl\nJdYsi8RwyAf27EERgssnJvhvjz3G6Ac/4H3XXkvVdRl2u8ht26hmMkwKwcGVFdxdu35DRt5K+OEP\nYwv4f9B6fVPx/vfHfiNvVzLy99/5Dv2jR3nn7CyaqrLWaPDl//D/cNm73s2tt97Evn37ME0TKSX3\n3vswudw+8vkKS0vnWF52MIzr2dqqA2WOPPcAz2carJ8bMGxIhmGRFBFZ8owQRIBKgCVLuOYETfcc\nRbtDB4O6G/tESOmQpkkeDw8TmMSmhsUKkiwBKSQdBMuopAlJ4uPQIk2PEVVmCGgjqQAKJ1mmzIAI\nA48We0hjYuPSp4pJBp0+W7iYeDSRCASrZCmSJ0KjTZ8uggvM06OIzkBm2FfJMvA8Ov0tcpkptESC\ntfV10kaCcmWWdx44wGS1SrrZ5JH77uP3P/OZX/mcaJrGhz98N9lskuNKnyt37ryodF/KZKhvbLze\nl8IvxdZWE8P46U4/iiJWVlY5fPgc6+vHmCpvR3c18oFknSzn0AhRxxkyEh0d6ejomQ7VREDNiTjP\nOjLcjmQCQZ2EsoUXOkiZJCRggE4DnRESFZNobP8esUKDDBkgjQPMUyNC4iARKAT4OIT0UWiRwQGy\nKCQJCKmjUGMahQIdsnTpUKXFDjqUBGwqKkZ5J4fPbCC8IUfOrpDpDSlqJTQzQzqKmPPbFJQsiiiS\n0EJwcxhoeAmThW2z2J0OR04NqbUeZHZiJ37gMKDL/zXu7QohWFhYeNNbbq83isUin/vc7/Dd7z7E\nysoZDOM8zeaAMKzy8MMPoeuwc+cOcjkN8Dh06GlMM8naWpdWqwvMoeshYZglth1bI27FxC6r8e1z\nHca1szjdJA3UiN0sOsSEZB+wSRi2sO0CUWQRRQ2k1Mavu06SDaYIiTDxyZBH0mDEBVwkKl3M+FFz\nF3PVm2n02vzp3z6NPTiOK9NE4Tzz5SqZTIY9E9M89ux53OY5Fkt5aq4LQUAGeEJK+kHEjGqghBFD\nBdpewJVrG2y6Lo6i0DVNzrZafPbAAZSx6LjvulxVLOKORrSiiGq1ypXz87xYq/Hs2hoTmQw79u2j\npWmv2cH3N2TkDcD998Odd17aNbz//fBv/k3syPoqxOlvCbRaLVaOHuWW+XmEEJxd3+CB589hu2Ue\ne3iVrS2dqakX+OxnP0EYhtTrI+bnK/h+wIkT5ygWF1FVnXZ7jSuumKDW19lYcZhyfSZJoAgDW6aR\n9PBR2BAZIjmJQMGy1nAweEn6iKGKSZI0AQKTNAkm2SCJx9LY6yPHWeqM8CnG+THkgQlc1vApoLOL\nkBpdOsTjfXk8hkgm6ZJDZ8AUPRYJcRD06SLJoqJSxMNinQQNFFQ2cOnRpE8BiQ70mKHJIhptBLqu\nk07pFBN5TiwdQle3kQxDqrrO3skJOt4Jjh08SPn225kpl3lkaekfdUyVUtJsNgnDkGq1+nKbbHZ2\nltP5/Cs0BO3hkIlL4LZXrZbwvNWX1/zCC0c4eXKNen1EtXoF7d6QhB8QkMBkG30sBIIFBBoRCi49\nHFZDk1ZooWBSIIuNxMeibE7gRzY5/zTTuEwqBuejEW0y7CZJFpUuCg3SrCEx8BjgjgeuBwTsRGMF\nmzOoFFBokaJNQBuPEBebEjouAW1KaGzHQKBgYGIwQqfOgKp0EVoKzxlw8ngDQ2qkXIvJMEDFJ7Q8\nZBQwJRXCyMaP0tjukISaoaIEbI3aeKM5hh6s1rt0hha6sYCRnWF6xw3ce+8P2blz52sSqr5VsW3b\nNv75P/80g8GA733vPr70pQcRYgfpdAXL6vHEE09w4ECGTGaB5eVjbGxs0Gz6GEaV3buv4cUX20i5\niqLMEoYp4jbMBjHRMIj/p03iSshPUmS648dyxG0cj3HGLkHwzPg5k4ShB5wGPAR1VEwSlEmg4CHQ\nyCHosEWIYJEOgn2ZDAPb5shLJ0kONBJqAV3JYVs+61tb7MxmqXe7pKVPWpiYisJUKsXaYEAxiojl\n6grroUsDCCOV96ZzFFEoaRpeGPLk2hqZqSmMn7lxtIdDJg2DdhCwa98+tk6fZqFUQisUGJZKvOu6\n61BUlR83m6/5XL3NblNvfUgZk5F/9a8u7Tqmp2F2Np7quemmS7uWV4t+v09GURBC4Pk+Pzx0lkJm\nP9WCwVK/z8LCVaysHOepp57hlltuRoiQKAqxrBFS6qiqPk72jPC8EUZ6nrW1s2QUiSc1hjLAIMuA\nAbYwMOUCEoNh6JJgiiEeAosZTHRCXKCApI/JOiaLJChjAzY6KlXquDgozNIlxGCdHCNsygSM0MkQ\nUUfiIsd74AIOKpINYiv5ISYmHtPYjLAZoSCQlGiQJ85M8IAOGUwKZNDG5X5Bgy0gTzEzja6F3PKu\n6zn5oxcolUqUM1mi9TWGUYurthWg32djY4PpmRlUXf+F5fdGo8E3v/n3rK/3URSNVCrit3/7Dvbu\n3cuOHTt4fGaGM+vr7JieRlUU6p0ONeD2669/sy6Tl3H55ft46KGnaTTWECLB0tIG58+fwXFaSLmD\nTCbHyFth5PaYDk1cGuwlT5YkFhZpEpSAF+wG80RItUIqzBIg6OETSYnwkyhk8WkxiFx6KCxgINBw\niQc2F9BpY9NmDpV4jM5jFZUlVBR0ljGwMXCwSVNkAZURTVpYuIxw8JnDw0RDRccEhuhk6WESCZ8w\nCrFr57CEyj5zmpbjUCGiF1q0hxYOGrqeJAp8FNVHR8P1XZQQtHQaqScYWTYlVSUlA0p0sVRBt5fk\n8D0nWF/f4q67buW2296NaZpv+rl8o2AYBidPrvGhD32c1dUa9XqbiQmTUulqzpw5zCc+cRs7d97C\n0aOP8/jjzxBFWSYnq5w8WSSKXMKwBjSIyYgJzBFXNVziTOYscYWkSqwfGQF7EWKElEOgCNjAJIaR\nwvO2EU/dtFE5ioJHgiImCipgoKCisI5Biwgdi5IQhMMRjzz1d3i2wWXlCS50IzqjCxTkNLnAZHlp\nCUdKqpqGFUQ0bRt0nbyikIkiWsQ1m22onCJigwiMBH0ZsGXbDBIJ9h04QEsIVvp9rhxPxRiaRtvz\n8HSd7du309rYYGBZ+EC5UiGRSHB8ZYX973zna841+g0ZeZ2xtAS+D5dffqlXEldH7r//7UdGCoUC\nwyi2Qq93u/hhGlNPMLBGZPOxWdHExCIvvHCEO+54H1deuZ1jx85SKs0RRXFMdadzHlUJOfzsc7Tr\ndTwELVHCUEEKGyW06OGhyWkUoeIREEiPDB5O7KvKkAlCfBR6TBAi0cb7XociPtvQiUjj0yCizxan\nGDKJwRwaSUx8bEak0XBxkASEXCBDQBaLgIAkHUaENHCYxSU7diJxCDGJpXAlwENhgMIQFUmauCzs\nM2KCDYbsFSG+3yRfWCA/UWb73inEqM4o6FIfvoT0OqzZWYQwWBk+TXXnDNd//Pd+rijY8zy+8pV4\nimBhIc4yGI36fO1rP+Bf/ss8U1NT/O6nPsVD3/8+T7z0EoqUFGZm+O3/5RdngbyRSCaTfPazv8d3\nv/sAP/zhjzh37hiGkSKdPkA2u50oCglDyVDxWWkq6KGHisACQnQiPEChTICGIIoEAQJQyQLrXp8k\nERYK20mRRgFs8uPRy4hYECuIyKIwIgEkiPCQzAAvMc0KUwhKCM6SRTJNnAXtoOFjEuDj4+PgYI6v\nNIkggcMAgceilCyEEQ0UlAg6bodQCo6LHMgyDjpD2oRBmwlhkjMEfmAjoiYN8uQL03Q6W5RkREMd\nMpUsoHQHbJ58id7E9UzNXk8qNcfjj69Rq32bT3/6E69rYN6lxGAwIIoMcrnYQTWXy6DrGgcPHsc0\nSywtHeHUqTN4nspwWKff76MoRSCLaYLr1omiIlJ2gCni/8rs+EsnrnCYxBM0GrF+JCDeRhT5iYgV\nqhjGBGG4RRgmYPw5kQf6CMpEeCj4gI1PHR3GQYtlvUAlHQAK5/rnWWttMOmMIBzRj8Amie1M4Eno\nCI9p1aPlqXSHQ3KALQSWlMwBqCppCWYUcLSzwWIuSytIMDU5ye0338yZKGLp2WdxazVm02ls3+fZ\n4ZDfvv12kskkB66/nicfeYSz3S7vUhSev3ABc26OW34Nl/TfkJHXGfffH5OAt8L/8J13whe+AP/u\n313qlbw6FItFdlxzDUdfeIG8aYKU2J5LzbI4cM0142fJl2+kd999B+32t1hdfRHD6HLu3IOEbp89\n5VkKmQwH/cdQZRlFZCmmTUb2kJ5sY0ZDRlhEsoGFIElcnnYpkcbCQBCRwcaji0WAJDGemJlGo084\nnpDR0YEOgog+KjUEBUY4eJhIHBQGmASUOY+CjiCghE8Rh3PAWQJGRDSI918KMENsVewg8IBZtPHO\n20WSRUEnIE2bNdakhWa7bE8ssKmqfOJ//RTPfOtbXDhzhqLicqYnaXQdEvik1RzZbMh16s8vxy8t\nLdHr6SwsbHv5WDqdo9+f4fnnD/PBD/4W2WyWj37849gf/jBBEFwUUX8pUK1W+dznPomqBiwtOWzb\ndi2nTx8ek1OJ5zmoahI7NUQZGoT0UdExlBRO5ODLARCiIXBkF48ZsghAEuLg4lMmIIuCCWQJcXFQ\nSRIhkONAvAAXgwEJ1sdWdwEWGcyxT64CSBIE6PgMmMVCw6BFEoskKkN0+gwpY2OPr7c287ikgL4M\ncFEo4RNFIccxSMp5yoqJFoWEJKgrEjU1ZK7qM+h5LLsRAy2iuXEYLfBIiQE53SBl6YRKQFWfoNvZ\nZEOeIXXrZUxObuf06adZXV1l/h8EIr4dsLm5yTPPHGRjo8nc3AQ33ngtmqaxtPQi3/72A3S7Q3K5\nKtPTV7C2do5q1ePFF01SqT2sr58nmbyRRuNBlpZ+RCIxj5QCITooyhZhqBGTjyI/kZfGPy8T+460\niXUjPWAdKRPELR0F8NH1AFWtoKoQRVso2BhSRY6fFRCSJ6CPQocUQ9Lk6VM1Kii6oNeukY0strkj\n+g7oBFQIqYoBunRxpADpEckGfU2J3V6FYBQE2MCsqsapv6FCC40WOdLSpVTI855rrqE+GHCuVuOu\nf/bP8D7wAe79xjc4tLJC/rLL+PznPkf73DleWFlBAZJXX80H3/EOJqpVpmZm2Llz5y9N5v3H8Bsy\n8jrj/vvhD/7gUq8ixi23wNGj0OnAqzBOfEvgrg9/mEdSKY489RQ1e5OeOsEVN930cqrn1tY57rgj\n1idkMhn+6I8+xYULF6jVanzzf36HU89sYup9bHeTA7uSHD3RZeiphK6JofYxpUvdS+EpDpooYUYu\naelik0FjSAIFlwuETBKh0sTGoMU2BAEhNgA6ET4+GZxxRFps2dzBYJ0sKQy2GKLiUyKgi4/LBBYT\n2CRJ0iPFTnSWGTAkQkGhgqSHpES8645rPWK8z1bp4+JSIEsBwQCbHOvMYGoqD55uYOxcwwkCvvf4\nU5TVOZbaOQL1ZmqySyRGFOUE82GZH/zgMd73vve8wqCo3x8gROoV5ySVytNodC46lryUKu2fg+3b\nF9H159H1DNPTM6yvP0+n02QwCEmpSfZMZ1he3cJyh+Qji1BKhKLTDWEDk4AAkz6WOEdNTqChoeDj\nMCSPTQ6Bhs4UBht0KCEJSBIiGNKnR4IsOhlCkgQkUaiNPTTnMckC6XHmjI2NR0SNCioTqASo2ARs\n4I2nbRy6JOmSJUsLi7T0qSCwULDRcEgRsoEdGbjkECRJp3dQ3Kew7Fqkpy6n6OvIU0eoIgp/AAAg\nAElEQVRJ2k10MWRHwqTjWYS2iWdmUdQshD7VZEi/12VycgIhcrRarbcdGVlaWuKv/urvx5N1sxw8\n2OLHP/4a4HD4cJu1tRyKskC3u8HGxvcolfKsrXlcffW7WV1dwbIStNsOcC1wlCg6QRTZRFGaZPJ2\nHOcIYegTVz0CGGcvwxCVFjoKPklCysApYv3IJDEZWQW2o6oeuZzJqO8jvGVytImADD1GTI2zuxU8\nJAE9kiTRVA/Pq1ORDkL46EgaqECSJjZ7pIONg8OAOSTPATLSqGoafeJG0jyQiCIGSgaUHHYEaaqY\n0ufJzRo3eB4d12Xoeezfvx/DMLjhhhsIw/BlV1zXdVldXUVKydzc3GsyN/tF+A0ZeR1h2/DYY/DV\nr17qlcRIJuPx4gcfhN/7vUu9mlcHXde54667eM/tt/P+kyf51rceJgg6nD3bwXWb7N9f5aabfpqD\noCgK27dvJ5fLkYgcKv46yqDNO/bv5537b0H6p1lbb9IN+wSRJDDyJDKTTBV2Mah1wNEYyToOK2RI\nEJLDIEKhDggs1giw6FDCRWCikSSBi4VGGg9BgMIkaQQ6kgGCAT4ONhXWUZBcT0ALD4cTrDFNkwlM\nEgQkUDiPRglBTs1QC/tsjUPDJQoBKgqSHkMsUkCNOjUEgiwFCoaHmoqYiuDCw0+hbitS6dnUWGfk\nTZBLlJFKhoF3gl4n4nwkabeX+Iu/+Bqf//ynLtIHVKsVoujgK87JYNDkuuteaeP9VsLu3bvZv7/E\n+voKup5F0yICXyGp+uyfKrNtcorFyjzPHf0m7qiPEaXwjSLdTJXAapPzV9kpXCazNs8OTnFGaoRo\n+CyyRp0CEgOfBhKLJB7DsbmZiU2WAAMdBQUDicGAEJcOi0gkAhuNMjarNAlwaCDQqRKhAAo5EkRk\niGiTpoQ/DkfssU6OJC4mppBkpUIThzxQJgN0sHDpUGHomvRtyd0f/DzT0/v50d99iaKWoVKa4sLo\nAomoz6wS4pkdhq5DE4e5Hdeye26eVq3G7j17APtNd9T9dSGl5J57HqJQ2E82G+++MpkCx441eOyx\n5wiCOTQtJIpUdH0nnucyGq1jGDqbm6vU6x06HY0w1MjntzMceui6SxCcRIgpgkAd58xIYhFrirhd\ns0meLZL4jJhEpYCPgkeaeAR4CchhGDOoqo9p9ogiCL1TzLDGLAFVoEPIiHUGmFgo6IQY5FASSTJJ\nE3sgSCqCTpQgIQRJqZEkQw+XIzQpE1ISgrKUJIE9iQTVTIYFXWd6MODQcMiFIESPElgoSIrMqQah\nMDkblfnbo0d5/223sf/d735ZxCyEuMie3zTNN2wU+zdk5HXEww/D1VfDq/CSesNx993w/e+//cjI\nT2AYBgcOHCCZTPJXX/5zBhtblPJpgm7I1tbWReOH7Xab//If/yPHHnmEymCAoao8urLC1J49LMyW\nqTfT6LZHJWmyNtwkIo2hp5gtjahv1FEBFZcseVpoDEmhEhACDjNEJBiwQZoyBgMqDIkQOKj0cEiT\nJ4WKgYlFir14PEtERIYUE7gUkHTQyQMShxE5AuxxYHwOhTMETIcjNFS2ABdJcaxHGKFQR0ejgkkC\nlwEBK2SIqJLCtx2qjkqn5eIqHnvNDAXPpcM6ZlphaG8hxB5UNUsqNYFhGKyswBNPPM3tt7/35fdx\n+/btLC6mWVk5zvT0LlRVo9FYxTTbXHPNBxmNRrz44jHW1mpMTJQ4cOCKVwSPvdH4ReODMzMz3HHH\nNfz4x2u0Wj4vHjxB6Bl49oDltR6DgUMlP8nsxPV0OsdYdbdRnruVkpknu3wP+AU26dIfjYhkSIUQ\nE4FDmwidZQJ04rygPLHTap0cLhlMRqRYIcDBYxYPB0GLMh4hCkM8kjAe8F0hAThMkcXFQ8MnQaxe\nqmACeSaQjNggQZ0BZVQEAlvqRHSxkFRRMYlJQxGBUEPCTJr2yimWlo6TSFTJhQGVShktjCgGec5Y\nLRZ1FdXU0BMqZq7E3oU9eEGEmUxRr1+gUhFs/xlnz7cDut0unY73cmTBT+A4AZ0OeF5EJjONoihI\nCWF4BYoCiUQHyzpPq+UTBFOoagLb9omiNsNhBSkjFCWBEJuoqoIQPmF4nthfpI3OMlVy1DBQ2I1B\ndhwiMCJAJxa9KoRhklRK0u0cJ3KWKFJnhlgKqxOrT1zgGHHwX5oEkT6FktjGQB2io9H1AhgHa06h\nYxCrTiwSdBihR9HLcz65KKIgBMlUCsW2ef/0NF9ebVAUVSqkURRBJCW+qpJSM0hrgxeOHeO63//9\nN+eE/QP8hoy8jrjnHvjIRy71Ki7GXXfBv//3EEWxGdrbEbZtc/83v8lN5RLTe+IY81a/z3e+8hU+\n9a//9cvhXU8++ijrBw+yL5fjeK1GRVEwheDoCy/gpbLUai3SWoKu7ZMOIyrmkPpKF0MpUVA0EBIR\npnBZJmAnkgl8Qlw2kGxHYRqBw4g+4XhAF8BBJaLAZWSI/QckKj6xiiLCQyOBiYNNiIKDQxoFF501\n2mNlQnwzGSA5S4RGgQiNHn1cPFIIlsmSY5KdePSwGBAwIkuWLUZenrziI6RKRsBmd8iEmmLGSFN1\nezStZ/CjCSQZzKSJZW1yxRXbmJ+/nKeffv4iMqIoCv/0n/4uP/rR4zz77I8JgpDLL9/OHXd8giAI\n+NKX/pp+P006Xebo0Qs88shBPvOZj77CAfONwNmzZ3nwwSdZXa1RqRS47bYbecc7DlxETO6++w76\nvb/lT7/4X/AsgapcQTa1Hx2XjfYSw6GHH3Tj+DEp6G2dxBcOc26DoplBkSpppU4tdNhNih4RHbq4\neFj44/jDTRQMbMpEVNBIkGMDDZ0kDkUuECLQEQzJsYbCBCYGLn08pjDo4dPGpUNIEkHIgAYmEUUE\nHRwkIQEqaSBDmxZpfEYoNNDZNjZWAw+JQShsksJhxT5DSkty9OiT7N59PYoWm8uvex2WnB5mcobD\n/oAcEe/bt48rr7+BRw6fZLnlsmvblUxMpPjYxz72qgLr3gqIR9Qjoii6aErMNHWCwCGKJMNhHUXR\niTVnbTIZldGoy3C4hOdFRJFGFDVwnAtABNhI2UfKc0CRKBoSRVlihcc2oEKSLXxsIqYxSAEaYqwS\nCsgS60nKhGGHbtdDY4VZWghiC7UesdIkIq6zFInlrw45BqENYYZWLkOjeQYNFw0dkxJ1PCI8bGL3\nkwFwFbGU1heCpu+Tsyy2ul1soTJwfbJ6hEqbVjjAjTIYpJnVNHS67EgY5HSdY08+yU033fSm68De\nXlfbWxhhGJuM/cmfXOqVXIwdO2K9yKFDcO21l3o1rw2nTp0ibVlM/0z/upzLUen3OXr4MO+57TYA\njjzzDLrn0W+3ualcxur3kZ5Hc2BxvjfinbkdjCKbvD/EUByGkU3WD1kVfUZyioJqIqI+WdmmRQIT\nE4lKWinQiSCijU+fPEly7GLAJh08QuYp4uGMC/ouPeYZMBgbYqmM6NPHxUaQpk0ADJC4TALbiXdG\nNiEuCm0m8NmJDrQIEBwng4fPBLMk6TGijYZHkYCALUaUMfCRRFJFkUO6PRvF9EmIJBoauuwy8BUU\nzUBV06TMLjmzwObmMvEH7sWVhmQyyd13v5/f+q1/gpQ/FQv/9V9/E8+bYmFhcfzMGXq9Jt/+9v38\n8R9/7g2dvjh9+jRf+cr3KRT2Mj9/OaNRj69//XFGI4t3vetmICau3/jqV9l47jkuk3BZqcCh+imc\nZILhyIBghk54AUMbkPRtNENhoZzi7GYDx/cIjTRW5NLxAypoqKh08JkmokAwNklTaKOhotClzynW\n6JHAIo3BNA2G9GkzjY6NoE6ZHlOEbFJAwcFDw6OCRGdAB4eQNF0koBHRQqCPJdRFoMUMXbYD6XGI\n3nk8VonYI3QM6eOJEaHioSlZpjIHGDoBrVaHe+75S3TfY7hxFtfWKRk5ClRoMsGK26aumwyjkF0H\ntvPhm2/mxptuelUxAW8lZDIZ9u2b5cyZc8zM7GIw6HD+/GlOnTqIpm3R6+XQtO1IaSCljWUt4/s1\nTHMb5fLldLuncJxDRFEVRdk7DrlbBbJIaRIEk8A8qtomDH8iYi0RYhAyROATUwqQ2Hj0iTcn+fFX\nAMwRITFosMhPnUhM4mi9gNhabQSMRJW0gLS6ytAR2Noilu+QJE+ODjDiFC4hPhKPaWI3kxzQ1zSW\nwhBnaKGrKj1Npeu67DVThM4IgzxdpU8PwZLvM1d2WFxYYM8NN+CHIcdefJGb3vnON+nMxXhDyYgQ\n4k+JlUAHfzbBVwjxvwN3EdvT/Z9Syu+/ket4M/DMMzAxEd/832r4Savm7UpGuq0W6Z+zS8snk3Tq\n9Zd/NlIpthoNbkgkKJkmYTrNS6vruKgsSA/LH5BCMmfmGHkRI6/LlD6BqUYcsRs0A5eE6OOINIlo\nFNu9izKaomFLG0fagIKLwhYbuFSAnZisM6RDnYAkfaq0SRGyBej4QIMhOSJ2oKEhsOjRoMSQWUyy\nhJiEGChUUVjDp06fkAANhRQZDNpjIaVDmxRJZhGoRLhxqixdgsinwIgkGYLQphcInJRLUs+gJjTy\naZd8TlAMz3L11DSFzgarZw+S21PFcZyfK0ZVFAXf9xFCEAQBp06tsG3bxTkH+XyFlZVTtNvtN/RG\n9sADT1AuX04uF/+OTKaAYVzNgw8+w3XXXYNpmhx84QXE+jqLhQJntSyldJlux2bJfomBvBw/CpGi\nzjQ+s5lJmlrA+vp5rMCgL0sY7pCiAFumKY2bMxKoIEkQE60aCg4q54EcKiY9II1CdkxfZqkzRYdl\nJDMoLOBzgXXmadFAZ0CRiBIhJSIUzjIgT4oEfVYRJBEcwCBJSJcM60wSkMUkBfgI5tDYwqYnQ3IM\nMRUfoUzS1jVcx6XjBSzuvJ1crkF9YwnXstil5EmGNv3BBUI9TWXqMsLKNt71yU8yPz//moLw3koY\nDodMTpZ49NHv8dJLj7KxMUBVJ5mc3Em16tBqHSYMW6hqhiBojUlFhlSqihB5pqZ2U6upBEGaIGgR\nT8tUiElHnbh+kUfXsyiKQRSdJgz3YLFIlg6CDUIyqBi4jAhIEtcrZoEd4+8bQIICgjQyTgcfr78K\nnCPemDQwiGQeJdzE7dtE5hxCpMmIJo5UsMaZVIso48kwnwHgopNEJ/RDmobGXKFK03MYCIXFfJlU\nMkPK7dIb2TD0aEZdFnQdkwRbQcBts7O0hkO6v4Z52WvFG0ZGhBDXAGkp5a1CiD8TQlwnpXx+/PB/\nklL+30KINHA/8LYnI2/FFs1PcPfdcWjfpQru+3UxMT3NGd9/xfG2ZbHnZ1oD77nzTu79i7/AHH+o\nbg2HDPp9NBlSUTTaThdThrT9LKEEU/o0vBqBmkaROko0ixB5PLlJngERL2DLPKOgjIYgJSwqImAQ\ntehTHY/XDggpYiAZcBwTC5eQIwgsJCMS2GwnztzcREFg4JAcm5mpKOOUG4GNQEVDH1uEC3K4DNmi\nyR4MNLqsU0RnGwIFlWBsxSZwyBARcgIfkz4OClGksiubYzJpUJkv85FPfIJvfPlrXFteJJfO4zgD\ndldMKvk0zzz9NO993/suen/X1ta4775HOX9+E13XuPHG/eOEUvlzztJrs4D+VeF5HrVah/n5d1x0\n3DAShKFJvV5ndnaWU4cOsVip4Ns2nVGHkRWiEJCNRnRYwTSmIDlHVfOYKORIjtoc6pxhJGfQlQzr\nMqKLhouNRUiISwYYECIRY5niFCYZJDYtigzZGPvlCjQkESMiknhUUEkQcAaBYJI+JVR6mGSxUTAx\nSLAHwZAOF3AZkCTERXKeEWkCukzRIkeSAA0Ln5A4lj6NwhpNtikgQ51e1MbxVUy6JM0cBjWiKE3Y\nOMduvUJez5BWJAnPwxAe9W6D48+3+fFjj7HzVcQCvBVRq9X4y7/8Jo6TZ27uVk6evA9QufXWG5id\nXeTee2Fi4gaWlu4liiwajQGuayDEIu22x9bWsySTBlGkoigm0CJWcATEQtUhcRNliKbNMD19M5ub\nTxCGzyHRaTDEICBkmZAWHiHxXjsApol9garAS6hs4mBgj32FWsQaD5u4PaMS35g9TpNAI4gEoR3g\n0yVDSJo2eVpMoJGiRZGAInAMA58iOUwMBWrRCMdIU5rZRckZsk1GWL7HRLHK4rRg69w5DMtlXzbD\nCSlJ6jrnt7YIVJWrZt98ofo/SkaEEPuAjxA3xyCuIH1XSnniV3jtG4EHxt8/BNwMPA8gpQzGx1PE\ns09ve3znO/A//selXsXPx7vfDcePQ7MJl8CT6tfG7t27eWpyktNra+yYnkYRgpV6nUEmw45du9jY\n2CCfz3PDDTdw+a238txjj7FgmpzfrJH0QqQUdAERRbgIpKIgZZJu4BKpJqX0BBmviulO0g86KFjM\nIElg41FniyZJM0Fo6uRDnaMjiUafLB4JTFxGeDjAAZq0CdgiSZpNuoTMkGYHOhF6LD9EZQODNCYj\nHAI0JB4Cl2jcpdZJMT2e28gzwOEMpymg0aeDRp8ELjouBj5DTEIcVPrMoONTxmBENxAca7hct+8A\ngZXl0KHT3PqOXUzrCYbDAfPzBRYWDhACJw4evIiMbG1t8ed//i2SyV3Mze3F910ee+wUo1GbWm2Z\nmZmfKupbrU1mZvKUSqU37BrQdZ1UysBxLBKJn44db2yc49lnn8S2O1QqOdz2GrPVCsvLK0g1S7uf\nIGmUwGtiBz6KukI5PY3vnWc0KtHuNkEWqSg78dUMivQZhjVsmSTPCrOE2HgYRNRRsciQI49DhCSL\nTXU8E5MnJMNoPPKp0Blb19WJSFNCJYVOD4mLoI06VhiFgIKDjiQiwCeiSJIsYJFBG+eVOOgIIkIk\nESEGAp0eCn3KSHVANRTMq3l0LYOiZAnOv8R5w2IRhbShEqKCDBCRiumpWGGXcJDi0EOPsPvyy7nl\n3e9+w87f6wkpJRsbGwwGA8rlMtVqlXvueQAhFpmbm8FxHDKZHeh6mrW1JRYWdpJOJ4EyO3ZcS612\nniC4lcGgxmgU4vspXNfEdXtomkUQ+MRTMBXidN4ssJu4ZdPAcWxWVjYJwwHxwOw2QgJsNGLFRuwY\nErsE2cStmwRxTU1QYAdDAmx6hLjkkbjEtRdnbAXvIZkCdqGxjsRDxUWhR0AOl1kiVAIq48F/F5UJ\nTGqEeEBG0SE0OdQe8dFdZVptn8ixCMIAhEZgO+xcXOT86iqpXI6qaXJFtcoTzz/PVXfeyb5L4Nr5\nC8nIuJXy+8DfAs+MD88BXxdCfENK+R9+yWsXiKtOEGt0LgqtEEL8GfBR4C3iyvHacfIkWBa87Mf1\nFoNpwm23xR4on/zkpV7Nq4eu63z8M5/h0Yce4slDh5BRxPzevUxoGn/zn/8zSSGwpeTym27iT774\nRb78xS/SPHoUZ71BOlNE9T1qvsMOJctW2EfxBxjCwFF07CjH2tAmFAWGREgEJlO08CiTJIHGoiI4\nj09VRiRKkyTtGtXIpMcIjTIhAaCjkAHqJChikUOyjYARDi4eQ3RyaJTx6SLpEaBxHpcpTFJoWHis\noNBhigIKggCJi02RLnNIEqh0iN1QPDQMDNL4GHRYpYpKwDQKQxK4TFFiLdjEGUjSxSl0fYHVpWP8\n1kc/eFEVY2BZiH+gbn7iiWfRtDnK5TjV1zASLCwc4PTpJkKscOHCAMMo4PtD0ukRv/M7H3tDr4Ew\nDFlYqPCD7/0d89uvZmZmB83mJg88cD+7dl3Lnj23YttDjlw4xZEH/ieDlQ6KMgdqQNft0dEgl6ng\n++fZs+caLpxwKYYetlAxtCQIBYmCo9mM/AxFuuSZxlA7OBE0ZYABSHQsQtbw6XMZISYRSUDikRy3\naQQRCpKTSAxi2zp1XCGzAY01aiQYoqCQIU2EQgcdcBGMCBlSIUeGAIsJzrLCTjwqRHRQuICgTYoc\nRcJohogLTJBCRDlkJDGTWXAsVK9BtjSFNhhgez1GgUFCSzMMbDxsbthxK832gIe/9723BRkZDod8\n/et/x/nzPRQlTRT12b27yoULDRYX41uMoigIEZFOT9BoXMB1bfbt28Ezz5zAslr0eqAoZaKoDqwg\n5Q3ouo7vtwmCDnHlL0vcWomIw/AsYkKxAyGmCYI8se17GiF2IeUqsS5kknjP7RBXQgbENEMDhhhI\nkqRwUXERNNERY1eZ3rgpu0EaSTg2DPCoINhkQIkKHgoO8ZRdHoss8ayOREWgoqOiCJVuYMdtPtfh\n2PMPY6Wy1GRERVdZ1JNIF845Dvsvu4zOYIDUNGq2jcxm+dinP/26+of8qvjHKiN/CFwupbyoPi6E\n+E/AceCXkZEesZYG4rN0UQVESvkvhBD/B/AgcRXlFfjCF77w8vfvfe97ee+vYTX7RuI734EPf/it\n4br6i3D33fCDH7w9yQhANpvlgx/9KMGHPoSUkicefZSlhx/mnfPzaKpKEIYcfeIJdF3nj/7tv+V/\n+xd/zDl9FS+dISFNiv0+qwE4gcF5LMqJJKEfUg8V3CAXm18Jg6IwkFKOiYWHADIiRxjYbERdFsI8\nSblBAYFkRI9VfAxCfATnyRDRJU9AEZUWJdYoYREg6LOOyw4ieuOE3oAmaYZoY+GrRp0CKvMYBOM6\nSoCBSUAWSYBGFp0hQ1JUyCIwCNkgRZMUBWALgYJGDh2LamBTXz3IqLfFgZtu4KwdUWs0mB6bxwGc\nq9fZf9ddF73f589vUCjsu+iYEIJkcoKPfOQqVFVlc7NBubyDffsuI5V6pUna64XRaMQ3vvpVvLU1\n9il1Tj/2/3LMyNCLVLZvv5kbb4zFq6qqMbQSHFp3KZBm2szg+i4NQ2V2+w3s2bvI8vIPqdefYhAK\nlmQHMyEJFQM7aKGRJfKHaCJLXkpMIvJagaxS5Kxdw2b0/7P35kGSnnV+5+d57zfvzMq676o+pL50\nttQ6QAeCHTAQnB4OQQThMWu8G/ba4/GuZyYmdh2x4Ql717Ez/9hjMzMxnpXHgBkhBhAgECAJoW66\n1a0+q++6q7Iq7+u932f/yJRAEgiN2FG3IvSNyKjoNysr336fqnx+7+/3PWjTpk2eNjtRGCZmG0EB\nSYUYDYUsAh+V3sxd9GmNkhwKG6RxSCJpk+AyXUw0DDQ0DNrYqDSIGMdhm3Y/lyamtxWuElF7afwX\n9AeEOToEfb2WgRBV4tCn22limDp5I4tjGdihhk2Zrg9e1GYt7jA6eRvTIzNUmhWuLq79na3f/594\n9NFvsbqqMz3dW3MpJadOHWZzc4Xp6d6o0DAMxseLrK3110AIJibG2di4wupqkyDo4HmXSCYtOh2N\nIDiGlCawARQRoo6Uo/TcVTV6FPOf0hvTuKhhGYHWp4yWkHKdXgHTGw8Likgc4Hy/vFBoc54YD7Vv\nd2ci0MlSImCFBgAKOapohKQZYIMh9L6PjY7FGg4BCgpdYio0KOAS9s+wQkiLEB2binRw0TBJEsZ1\npv0Ol9s1Vm0bbWiI729uEAUBe6emsJNJCtPTfOj223E8j6JhMDg4+GYt58vwWsVIRG88s/iK42P9\n534VfgL8j8BXgHcBf/7iE0IIU0rp0Ssff6ng9OeLkesZjz3Wk89ez3jve+F3f/etmeL789A0Dd/3\nOfHMMxyamEDrqzw0VWXfxATPPfss995/PwcO3cuF46tETRdbxKSlgibSLCmQS88zPzDOyvoitnQY\nkBpXpYslc3SJsKiRQaDjE6JTixxcDJqRQqrTRchekFWOLgYxNRo0cMmyE4McbSIEghybDCFQsZGo\nZIhZ4wwBXcYJWUYlZKrv6OrTRSHGQ9IgRqKTJCZEo4WGSRbooKNioLBOQBMLnUEk65jESDRUNMCm\niYaDgcqwPUgzdPjWt77J/n27OFmvU3NdbF2n4vuk5uc5eMcdL7vOQ0N5VlebWNbLSY1SdikUCkxN\nTbF//5uz5j/6/vcxNze5aXYWZme533W5vLLCl4+d453vvO+lLs/GxiKOY6Eld9BVFFb1KVTNRsQW\nmewQGxvr7NoxRtzc4Lzikk7tJ9RVKs02nUbA9ubl3phE0wiDJgkh8f2IpowoIGkhqWLiM4nBCAoq\nPl2gjcIeBKsobBGjYWOjUMRgkzIFQlqM4pAnjyQiR4xNiqsYGOzEoonHOmn20MbGpojKcWzWmKTL\neH97W0PiIMkBTVTGUVgmwENHETZCJjH1JrEaoCoKgSEJLRUlZVOUkyxc3KIZS9rGOKPqCCfPX0Ax\n4IZd17eZHUCj0WBhYZ2pqXtfOiaEYHb2Zi5dOsrW1irDwz0O2b59N7K+/jhCNNjcvIqUbQ4cSPP7\nv/9v+cIX/ncWFlJ0u116XKcxoIqUg4CGlOPQLyF7dNIX03lVEoqFqliEYe+mIKBLr3My2f/aS6IS\nGMAyBTy0/s1GmzSCIUJCJII0BkkSCCLqJIBhyjQYo8UQGvm+saIk5AIeeTbo0mSKmClUyggEst+P\njdlCQ8NEx6aIxRZtxkWblKpwi2HyE1R2Tt2CKjwWWiWM6Wn27t3LjrExmt0uC7Ua7/7MZ96s5XwV\nXmtb+l+A7wkhLtEblkHviu8E/udf9YOllMeFEK4Q4inguJTyqBDij6WU/wT4f4QQN9BTNP27X++/\ncG2xsdEb09x337U+k9fG5CTMzPQcYl/BU3zLodvtUtnY4NilS/iOQ2FkhNn5eVKpFFoU4TgOUrpU\n2yGamiGNhyIMFoNNanGOu7NjSDdA1zUKwSqWmkONJT4+XbaxqSDQ+u6XPYGeSRYbWG0sYaDQxcMm\nT0zMABp1gr4jqoWPjsY6Y7goBEjKCCJMGhTosobgDCoeBnq/dNDwSLDNNAEep6mh0SBHSIzFNBIf\ngzQmMSXagCSDQYRkhZgWI2TpkKOFTYcBTAJCGqqBCDsMJYs0mhsEocU//v1/w4njx7ly8SKjIyPc\nddddr0poveee2/nP//lvSCYzWFYSKSWbm1cYH7ffFD+RFxHHMQvHjnHP2NhLx5ui6Q8AACAASURB\nVCzLYs+OHaSPnqRcXmNwsLeRbmyssLFxmUajSjo9juNsoetThG6VS84WinaRfYNF9s7NsrzwY+Jq\nk07gE1tpiiNjtGs+UilT76zQoMuadEkjGUAjIqRNREQXwXliygSEqHSQCKCDykCfWBwRcrXfiJdI\nmmjYaOiE+PS8ZzwUYgwCmrRx2CDDIAomAZIECjFFAkrY/Xfo0hsavEjhLeNQJWQAwSKSddoMSR1b\nWJi6QU2vcOuD92JpOtWVMhdrVS4KMLVJbh6bI2OY1NttLnklPn//B960NX2jcF0XRTHwPI9qtYai\nCAYGiui6ydzcDIqyzPJyHdPM4Lp13vGOMd71rg8hhCCbzTI7O4uqqvyjf/Rx/tk/+2OaTZ84TiLE\nIBChKDFxbCHlIvSj6ngpNaYDqMTYmEqxP9C1UWgSU+RFsqrCRWK2UWlgESLJEBGRRUeyhYJAkABs\nLlJBIcJEYmLhExLQJUeIjUUdBwhJEGMSUn6pxwZNElRIs0qIShdJ0D8XhTZduvjMC4/5OIBARVc0\n9Chkq1Hh3n134119ntEDB6gYBhsrK6SKRd798MPsuYYJr7+0GJFSflsIsRu4g16HRNIr/Y7+HAH1\nNfHzct7+v/9J/+sX3vAZX2f4m7/pdR2MX5w5dl3hIx+BRx996xcjx44c4eq5c0zmchQTCRrLyxxZ\nXWXfnXcibBvbtrl4bpVsRkf6Jl0ljxd1kSLA6q7T8hTc0GdMaZDTQ9b9daCOwQg2kjLQwUPFIKLQ\nb8dvMYtPjMMyBa5Qw6bniujSpomFwhCCAUBBcg6FEEiTokmCDpKICB35UlM9oE3ABjEWJXbSU34Y\nJMihsEyXMjoxWyTJ9JUaEmjjE3IJmxgVlV1IMqxzii4rxHQQwiNQDaaUFFHQoOUpaGnJ+Pgw5XKZ\nF55+mpTnUV1a4pHDh7n5wQe578EHX+oyzM3N8ff//n1885tPsb2tI6XHjh0jfOQjH31Tk1yllMg4\nftV7CiGYmxmnVFoglxsijiMuX75As5kkn59EVWepl48TB98jpRr4bpdCqo7X0FioVhiO6lTKLWaE\nxtr6eRYVFeE2sGNIiDQZCTkkEo0VBCY2PVaJh0kZFxeVAVR2YiCJOA8E9DJVu32L9xwOOWALQR2f\nAXycvsmdCoTYeERcIk+KmAFKeKRQSRKQJEmVPBvUGURDI+QWerqOTcDAockSLTIUkZSkT0vEpGMw\nopixqVHMhsulckx+7HYKBZV07RksrUtL97hSWSUIKuSTIaefP8bdd991XXuMFAoFSqWrHDlSRlXz\nQISmnWX37hFuuGGWz3zmY5w7t8D6eonx8VluvPGGXyhXv/vuO7j33km+9KUfADegKGsoio3vn0NK\njV6Sy9307pVVeiTVZ4AtvDiFGmRB5BCygk1Ih2V66hv6nJCejV2RUVIY1Ikpsw2oJOnSoEGERGEQ\ni54BW0CNkAo2LikkETo+Fj5tMoR0AJ8eiwVUKhhE5MlgkAKu0mSCTbK6jx/UKCIJUWijkEPBEYIR\n06a6vUrXczCMDKpu8Y//198hCAIMw7jmCc2v2bCXUkb0xi1v45fgscfgs5+91mfx+vDhD8O73w1/\n9EdvXTfWVqvFyaee4j13382FEyfYZZoMZrN0trf5ztGjfOZf/Sva7TaNusPM6BzLiz+l1ayT1Qwm\n7DRbQmXnmEG0UiIRBFSlSV1JY0kFIf2+bmKaOtMo+BhUsFDQKSCoMUOFJlUaDFAnSdhvm8MeFEb7\nYWcegmk6nCbBFnl8PAwgh0tEihwNuqQYQyOBQCeDTpMVZvARfVukGTR8TLo08an1Z8seKnUsIqpc\nRbKPEIFGlYAibQSLqGiixoiapBE3sRXATrL/jvuZmZnkG488wk2ZDJmREQDCKOLIE08wPjXFzp07\nX7rWt9xyM/v27aVSqWCa5ktOt28mVFVldu9eFs+dwwLW1kpomkqmkGX8ht3suu0Onn76MCsrm/i+\nzthYGsua4eypH5NQbFRjjLSxwYEJk4yjsHbhAuPZLEG3y66ESqfZZFZYaEGNuoxpY7Bb2oTUSJPF\nRyXEZRUISeExQx4NF4lHiMomw2xTpAV0cYgoM4eCiUD2H8P4VIlYQWGwf2/7ouamQYIWPlChCiTI\noqARUKNLnQidWdZQyRLyHGXmaZNExSJNTESdGhohczj4ikoxn2f/jXu489AhnnqhwsG5G1iqVkmO\nDXHXnjt44fKzrDS20DyDIXsaXda58OQPeERReOijH2Xp8mVKy8sUR0e59dAhxn6uK3Utsb6+ThBA\nHHdIJEYwzTTN5gZHjjzO5z73+ywuLvL0089TLtfJ55eA3u9ws9nkyJFjnDt3Fc9zOXz4MFE0wtDQ\nMEEwQ7u9ietexTCG8TwX2A1cpjeaeVHw2Uvplag48goqHSzAoI2gQUgViwTTQIAkYICAXqquSZuY\nIQyKqMSotAjJkUWiorFNSIhGgRYzxGSBBAFlHHRiXkzDuY1eN0BiMoxAsEGGNAoCjZgWKreh4KoG\nnchjAsF5JH4UIA2DvJ3CspJsVkuU44jbhnvW+K/sil4rvIXZA9cerRY8/TT81V9d6zN5fbjxRkil\n4OhReAVF4C2Dzc1NMkKwb24OU9c5s7CAU62iJ5PYY2PcdvAgi4uLdLeuYG3U2ek5GKpFGUk2kSWv\nx6wBemqQ5e4GHZkiSBSY8bvgtVlXhgmiHCYKJiY6o0RcYJAcLjZXECgkuAmDFl1OodJj0O8gxkIS\nENHAArawSVLt+21m+h6NWYbI0aFIjQYTuLi4DACSASIcxvuamXbfvzViCI9lBtExsdkihYuNThsf\nA0NpYGo2YRxiRYJAHWcxdin5SVRlmFB0MBSbnTmDmZlhttYvk/k5gytNVZnOZDh19OjLihHoKZlG\n+kXLtcK9Dz7I//bVx5BrDYrpQdq+y5p/mY9+/tM89NAD3HPPIR555CtoWgMpTU6dOktK17DibRQl\nYP94yAf37uTK5ZBTl6+wXAnYqSXpKia1UAFaZGWMIQRdKbERdFAw0XEJ0UnQRBJzAJ+YGk1idBSS\nWJxgNz4xOjEZPAQ6CVrYZEji4ZPBQVKgzAajQEwHB5syKgF7aXKBHA08LpNgnIgsAR4V0hhMk8Ki\nikeVNYr91OYh8v3QvTbDaChMEFAmFLDS6PDA5AwrWxU0JY+qqBQsm0qrSbOxSRgNY4mA26cG0BSF\nWmeAhlcl3Nzkj//gD3hw/36mMhlqp0/zleef572f/Sy7du1609ddSsnCwgJHjpzEcTxqtS0mJ29l\nft7m4sVztFpLjI8XSCQOcurUGU6c2GBg4EYGB2/A8zp8+cs/plqtcuzYAu12jjBM8eSTJ1ldbTM2\n5uD7Ftvbz9IrOPYipdNX1bTocUXW6XVGBHAD9C3cNWxUQGEFjRE00gjKpNgmBgJssphEhJTx8VH6\nkXc+BTRK6BiM0mabPD4hKgU0BkkzRatvi9Yrf1r0ei5pQEcli6DW19BNoFFCMqDY7FJD3CAm1HQG\n7DSdboP10McESrrOhJmkqWoMajpXaiWyO3Zx331vrsPqr8Lbxcivge98B+6+GzKZX/291wteHNW8\nVYsRy7Lw457l8s7JSXZOThJGEa1ul6X+rOz5Z5/lwECaU+fOMxBLkqqOHUtOrl/h9gM3oE+Msz07\nxeXjz+HUEthRh3ElYFVJkhdZPBH34ruli0KHgJg2lX7WhGAHKZJorBCiMNRX3iRQMInRiWjRZRsD\nhU0iGhh9c/kcaRIERKiYRGTw6ZBHRyKx0fD7f5IBCsv4eOg0cUhQRCVBmQ4wT4IEXa4SYxDJEWIl\nAiVHU7rkbImq7kRTJoiiGDcWKJpPPq8xOjpK7Re0Y01dp93pvFnL+LfC5uYm+am70OdStCsbJFN5\n3jG+g3PnzrC5uUkikeDMmfOcPduhUNiLlDl03WQ2O07CcLhl3MPSNFL5PCU/QcHIk9R04jAGmcGN\nJcQ1pOyVkl1CAqCDS5cEoh9nqPQj7xxET2WlDqJEabo00MiQJgK6+NiYFNCRmHgUSFHGJiLJKjYx\ngjoJYsZRSRCwRYUKg7QQdPFJ0SKBRpEkK8QEaKT7/jEedVKMYtDEYQADiY6r6fgyS8JOkXa3+cmp\nGpYFbaeNEDblep2mprFa2qDdSDNkhmiKQtfzCDWdVHqaC5cvkwsC5kd7cu5sMkmu3eb7jz3Gjt/+\n7ZflvbwZ+Pa3v8ePfnSeXG4WXR/guefOIuU673nPR7nnnp/xlpaWzvHUU8cYGDjI0aNnaTY9IGZo\nKM1/+A9/STK5G9u2+clPnqZUUul0hqhUjqHrOkJ0iOMMQrjEscAwugSBx4vb/88My0xgEMk2fl8V\nYzGCSREIEaTpcAWTbQSw3o+N8Impo5JAwcIggUBDwcYgRKeDR0BPbqojKfbf8Qo9r9YSPY3OEPS7\nKjYWETZW/3NEwTJNVF2QQMNN6HRkTGJ8B6JTwzAVtGQCTcswoCVYdzukd97Ax37zN5ifn3/T1vL1\n4O1i5NfA9ey6+svw4Q/DZz4D/+ZXCbOvU4yPj6MNDrJWLjPed3ATQnBhe5u7f/M3AbiysEDY6pLT\nFXKxCjHoQmHEUPEiyFsGG0FIIjdGMjdAde00bac38ZcywJAeUKdNF4ckghRr1FBoM0HMVQxiEpSI\nCFBQyRFRIqZAkrNkaWFSJmILC4jRMChgYCPR6CBxEEhi6giGkERAFZ8xOixgs8YgDQZQKRKxRIwk\nQEVgYmETIbAYJGCNSBSRiiRj6XQcgRuWKOq3ki+OE6kqUwMDJFMuvm9gmiY1KYniGPXnNpf1ep09\n99776gt+HeDkyQsMDs5TKIzA/E2USiVOHXuBjdXz/Nv6/8Hg5ByGsYPBwYvEccDo6Bztco1LtTPc\nOuowlettXMvtNpnRvXie5HR1iSE1iy8VgkgliA3qwsJBpYyNjkaHKjp5WjiEmETERJjAKJJlvKiJ\nJMAlhdnviZm0+zJviYdLhpCIGJNW3zt1gIAsEgWJjkcNQZUsETtQUJH4tDlPgAYYFIlRKFDDoU2Z\niCEkPuAge3RZEeJHXSJCwlBHlQncIGLP3EG+9tS38JsuXtfD0HVE0IIwpuW7XNlSGRwdZWZsjO3W\nGs16nZ3j4y+79rlUinBlhVqt9qbySba3t3nmmbPMzNyFovQUc3v33sEPf/g0KysXmZ/vSbmklHhe\nCc+D55+/DORotaDRaHHx4mm2to6xb988zeZlLl5so+s2UubxPBvP8xFiB0IE6LpOFHXwfeh1Ol16\nxcgqPb8Qg16/QkOli4ZFAo8MXj9AMSRHng5NOmRp41Ghl2+k0cSmiECh2R/YNtlGvtRDTVCmhkkH\nk95QSPKzNJsGPdGxQNLFwxQ6sRTUVJ2inqFDRBA7qBqkdbATSUgJMtPzVDodPnrffYSdDk8eOU47\nbXH/nXu4885brzlH5JV4uxh5gwgC+OY34Q//8Fqfyd8Ot98OnQ6cO9cb27zVoCgKH374Yf76L/+S\ntaUlLEWhHsfsfec7OXBTT2cgFIX69haTI5MY7TpJ1UBRFcq+y+LGGocrVTxfR7oRg+gMxU1cp4KU\n4EiTPGk2qaExh8oUPh4xY0Sk2GCFIaYBHZ0EHl0kGWJWSHCEYUzSSEyajOGzjWCTDnVapLDpElAn\n0fdNjHEYYIUGs7jEbNLEZ5nRPmk2TYSDZASnf58OOgYxLhARYlLEVzcw1DqTgxOUamUKmWlUZZx2\nFHHj3j3Ytk2jcRlVtUmn0+y86y5++swzzBUK6JrGarWKHB3lwM03X7N1fS0YhkYU9a7Y1tYWp599\nltFkEj2TZndC8Ni3fsDOWz7Ovfc+yMmTR9jeXsTK1vC9KkrC4NTWFk3XZdOyuPHWW1laDKgECkbQ\nQAkFrTCkThKHAQQpVtgg0ZdnO7SoksQnQrCMxk5CJGChsYqkSoSNjoEALFRiVulZUaXxiBFUGcKl\niU+JGm1SBBSJMYB1soTMkiFD3PcocdkgJmACg1EiIKSBzmUCHKaULiEWRqzj4YG0SCk2jtwEmaaG\nRmW7iucfw5Yha9VTFPQ8Y8URrE6DlCUoFnYQt9uMjI2haiqqqOMBozMzL7v2UkoiKTHeZIb+6uoq\nQuRfKkQAxsfHGB4ucvLkEUZGpoiikHL5KrfdNs03vnEVxxlke7uMEEkMY4C1tefpdBJcurRGux2j\nKAVUNYHnbdHTJY0hZQshAqLIRVFUgiBLr+joeY7QF1T3ChIHre/io9GgiILaZw6lCanh0uZGMkyS\nxsWnQbOfNNXT4qSRdNEIcNkgYIiQMQRq38HG4BwCC8lOet2RAmBgcBGTGgkGCAnp0kwoDKbyCN8l\nW0wxW5jkVCtHcWiInQMDeEHA8VqNsfvvZzEMOb6wxuhNH+D2Gw/S7bb4j//xv/Nbv/UhZl6x3tcS\nbxcjbxBPPw07dsArbiSuewjRG9V85SvwB39wrc/m9WNjY4MjzzzD5tIS+eFh3vXBD6JpGo7jMDw8\n/DJy5Z6DB3nyz/6C+cwIJaeFQoQbBCx0m3RbTebzQ4RhnWYo6YQGA9JkKIJFQkzKtPFwSaJSRKOD\nLiI8qRIwT0AbiYWCjU6rr7g5xiAB0zSwiKjhYyCw0ZkixiXAY5UVAgJGiYiQNPv9kG0aNFhhk50E\n1DBQyeID4OCgoPQlvh1qJPAJcHGQ+DhIkUYQMj88wg3T08yNRpTqJjKOUSOLOJY0GlukUjqXLp3i\nG9+w2LNnnts/8hGunj2L57rsOnSIW2699RcqD64H3HTTjXz7249w4fRFLp8/z2QigUhYCFFhx8TN\njCxssn7lMvO7dnPPPf8DnufgOF1OnXqCndMmvu9z6MABHnjgAf75P/8/KQzsIZeb4/LCT1DEBhtu\nCSfeiSkNNDYpopNA0sKiho9FkkmgQ4kuTVqYBIDHKgYGq8QIYkIi1ggpUmUMk4AyZQJ2YBAQo2NQ\nYZsAA4mkt9HlKTKISRUTixYOERHDJFmnTYiHREEnQYiNKdaJTI22V8PF5hwRoyikZZmU0ClFZTpW\nBksbZqORJGtmUZQFdg4H3DMjkXKc//rCJc6tB9TbKgvNMnPjGrccGGVy/zsIXlF0XNncZGz37jc9\nTt4wDF7ht4mu69xyyx48D1T1Kqoas39/Gtu2iSKHjY2zqOoebDtJuXyJbtfBtqdwnC2knCOOfTzP\nIww36RUYbcBFiGGkPEkUjSFljyTee4zRG5pcBi4BBSRVNMqotIgYxSQiQqAS0CaByjAqGiEKSTw0\nAgqEwDJBf0SjoJBBsNG3eHdJk2eKJmlO0eRWfBYR1JFomBiMMo5JiWFKCOqyQireJi8D0gWTsYlB\nfEXh7911F+lkksWlJbBthopFPv7ww3z96z/kroceIAwDwjBgcHCCet3k8cd/xBe+MPMmrupr4+1i\n5A3ia197641oXsQnPwmf+1wvOO8669QB4HkenU6HdDqNrussLS3x6Be/yJRhsCebpb6ywje++EUe\n+MQnXuqG/DweeNe7+LPdOzh+aZWBZI7TTpNlt4PWbXG7qjEoIIpDHF3nbOxgJzI4bZtiGBKjU4w9\nmlgkEBj41KWKiUZvqpvBJcImQCcDXGGYGiMkKdKLwxtCYQOBh4qNQxKwiHDoAiYKQf8OOoPfN1Tq\n0OIsHh5ZHIYRTPa7J+tABYMGESU6WHicAoZRGSCWMWG4ydJ2jGm1+cS7buX0lXWePf1T6h0bJziL\nZkEsBLfffhuOM8mTT66QTrf4/Oc/SS6Xe93rEgQBFy5cYGlpjWw2xZ49N74hhY3neUgpX7fl9JUL\nFzCbF6hWBG69Sa0J5fJRPvmBe8ilUuyaKPDjhTLdbgchJE8/9SQXTh3GaV1ia2SAW3bPsW0YXBwd\nZefOcZ599jyl1YCh9ABbkYOdGUcPZpHeaSZCC5vBvq9LhTSLxPjkydLAwybCJqZECoM5bCwczrBF\nFh+fPG32ABYxCjpDwFk88kh8TJokCRmnxwQooNDCIO7nz0h0DFxUVHRiuqi0MBAohETArKnSikxK\nms1gHJEJI7Zp4ugpFKEQazpWaoh8/i6kVyZrRoyoBk3nCh3f5+iqT8efJZMcQVXaREYHmUlx19/7\nAO9//3v56iOPcPjqVdJC0JESY3SUj12DD7q5uTlM83t0Og2SySwAURTS7a7xuc99jJGREf70T/8b\nZ8+62HaCOC5QrR4jkwmJ4wz1+nls28Y0p4jjDRqNs0AB112hx8QYoJcdowENhLCJ4wq6bhLHJlIO\nEMc+vaGJChgIzmGqOunIJ02HBm3afV6IS4O4P4oN0HBxSLDJFAoJJFkghWQFSYqIBXQK+DSpEmIR\nYKNi02WBDiGGlUANfJQoCSRJEqEqBh1ZJJYjVP2LnFAiDu2epZW1uH/HDubHxlgrl5mensZSVbZc\nl2azycmTF2g2V+jxYBzyeZM77riP1dUKruteE+v3X4S3i5E3ACl7fJFvvUWzhu+8EzwPXngBrrfO\n/Pe+9wOefvoEcayj6xH3338bV8+cYlcqxXB/40tYFplkkqe++U327N2L9gpLWcuy+O1//a/5r//X\n/013s8acOUb1/AlGNYXJVA5DN4hDj0QckYwCWtLFUnRMTaEkQ9Io6HEd6OCgoSIJiRFsoLONJKBK\nHh8bSJKnhkDDIcbGRidkgJAmgoiefdI2CjET2LQYo0WSAQQONUqsAa1+FqXODBKXCA1wMVSThNoh\nCqrcKiNiHErkaNGmSwsbHVdJoaIyM7if58/XeN+hPTiNJ9lcWmAqP0mpUkUMzTIxPkcmUyCTKbC2\ndpFnnnmO97//N17XujiOw1/8xZdYXvax7SJBsM13v/tTHn74fa9S4Pwy1Go1Hn/8Sc6dW0JK2LVr\ngve977VNb7a3tzn/3HN87qH7qTab/PnXHsPqdvBdl6ee+wlLK8tksllClimVznPiuacIV5YYDWuM\nJy18N2ZlYZX9oyOc+e53yQ2OkErqVOUStpEmk1AppA9wcfUCNiEmNqrQCWQENBhHsE2MT5M5FDQk\nNXQkNRxmMWigIdC4CmgUcDEBE4lLhxQwREyNHCUyxGSQDKFT6bvlanSBDAFdDDShoQAV2StthghI\noxEhWMWh6YYMWYPUZchYuoAeSZrSZ3p2iFQqxeELZwmCLNWtyxhmQNYUzE9NcHGhzLPLqzj+JLHI\nMVrcTcVxmLlhN53OCQ4fPs+DD97Hp//BP2B5eZlarUYmk2F6ehpVVV9rif5OYNs2n/70+3nkkW9Q\nLqeQUkWIOg88sI9du3bx1a/+Dc1mjrGxSUBwxx3v4fjxU3ieSz6fJZudoNOpomkuqjqD627R7bbo\nmYcXUJQMUhqoagJF0dD1TeJYMja2lyCw2Ny8gu+n6A1LdISwSKi3Aav4Ypt9yTS+bHC1W8aXOlBG\nx0QQEOBjU2UMgUabHBEWvWiAAXoeMUOEgGSELiW2WELiM46BQVNVKJhZpCwTRAJNiWkJ6KLjyYhY\nSAxb4ebb7mbvbYcwjRXq1S3+6vuHWdsM2N528IM6jtbg3ZgsLraYm3vHSyOvZnOVw4d/yO7dWXRd\nf9PX9pfh7WLkDeCFF0DX4Rqa1f1aEAI+8YleyvD1Vox8//sXmZg4hK4b+L7L179+lLh8gk/ffdfL\nvi9l23RXVvjm17+OIiWjU1Ps3bfvpVHDnYcOYf7e7/LsE09Q2dxErJ5jXB3EjhVAxY1C1EiihT71\nTpNhIVkNTTblEFmpkqBEmwUiZpCoqLQoUKWAR46QbTa5QhoTBwUdGKRLCpMGkiQeNUr4tIEWSVwK\nqDiMUiXHJD2hXkQRC49FSsxCP7O15/b4PClsUvjo0SoDNPEViRJb2CQZRmcLiaHqxHqWLbfF4TMn\n2T01x1d/8BSDUcSHH/4kiqryve8dgTjmp0/8v9z/4f+JbLbI4OAkL7xw7HUXIz/+8WFWVgQzM7e9\ndKzTafLlL3+bf/kvZ37lh5rruvzZn32JdnuA8fF3IIRgaWmZL37xS6/5us3NTXKAqijUmk0yAtxK\nBcNx8CsVNtpt2rbN5NgYMzMR5x6/yM1zO/C3BdOZArGUnGzXOHnmIrccPMDXvv1N4rZGNtBI+xpr\nHQeh1VGVCkkjQsMjjrtIWcek3Y8xi9iJRg4TkAg8xonZ4BKTDGGQwaXJVl/10iFEEBMTkSImg2Ab\nhQEKbOADl8mSwsNDJU8LlQwehjBxpIurhCzKHkW5hoIHdOmQpssQCpFUSKbyrEmfISWGUFIul1BS\nKerSI0WTQtImlQ4pby9zPt7A1QRr5SaqiLEzBSqOw+DEBJZl4zhpHEdjdXWVOI6RUrJjxw5SqdTr\n+t34u8L8/Dy/8zuf5+rVqwRBwPj4OMVikSiKePbZ45TLOj/96VEAUimNbDbDxYtVwKbVWiKdNhGi\nRLtdY2xsB4uLR4giD0WZJY4dEgmbVKpAq7WOEDH5fJFq9QWGh/ejqi2EKCFlHphCoBPIAIVthnI2\n9aDLQBwyqUaM6FlOuXkcWcVhCZMMCWLCvkNvkl4/lD5hXUFQQNAgIkajSIqQOovE6EywxQp5t40R\nxHjUqcY+WyRwGQTmEFobTRmlXHYJQ0kQwJnlMrVlDbduoOkFfH2CREbjiSdOYlmzNJtlcrlhADKZ\nCa5efYEPfvC916TQ/GV4uxh5A3j0UfjQh67PEcfrxac+1QvP+8M/vL4M0CYnD6BpvY3NMCwmJm7i\nqePfwfU8rJ8z51nZ2uL44cMUFYViLsep55/n2FNP8Ynf+i2y2SxCCG659VZuufVWoihivVTCff55\nbEWhWWniRxIRBmwTYyg+1cikJAZADNCSklhmieR5hDiMKg0KhBQxyJMkoEaRgArLNEnTxcSkgiRB\ngwxd6qwh8Pst3CpFBumg4zBMQMwFQiaJidBpMoVGmw4uITkcdGxCFkkTYUVdsriMaCkq0qdOB0UE\nKFJHR+JrFlkzTY4ORjbLSlcyovo8/P7fIJNM8qMnnkBurlBID5Js13n2pSPT6AAAIABJREFU8T/n\n4EOfIpHIYFmvn5R49OhpRkZeXrkmkxkqFZ21tbVfSYQ7f/481arO9PTcS8eGh6dZXm6+5utM08QH\nXN/nxIkTvHNsjCOlEqbvo+s6lWYTY2KCd+3Zw7MvvMDusWkGMkNUaxsAKEIwqKps1ZucXlhgRkru\n/8iD/MWXv8765hJhrcx2oKIoe2mKEgUdFL+FTYk0vbIwRGEQlQifGK3vKiIo0pNgg4NgAJsYhxZx\nn9mTIsJDYQmfEj4KkhQaMQsoJDGx8VjEwqdFli3ZwNIlhcwMcWWbJJu0EBgoTCIYIUFEl9WgSyVK\nMZhI0Q4bEHdY3NrgUm2RXfkBSsESqmgzHWe5cSBH2etQpYm1e45YzqJoYwwNjb3UnpfSBWy+/fWv\nY3sethC0gZve+U4eeOiha6q6sG37VRbljUaD558/RyZzL7ncbsLQ58SJx7DtaXbtmiKVStBuj7Cx\ncZF8XmVoaBjD8AkCaDQmSKdn2dx8HkXp4PshUvrMze3HtiMcZw3LslCUMqY5jO9ryLjSy6OJVlFF\nGVUOsK7ENFNZ/G5MUhlkxOiw3XgeSZOIFF2gQ4ORvmmZ0WeIVYjJoNDsa2NcbAxq5PBJIrFIMSYL\nLEcNkkLHEjGKhAksKtRYZwFdFaTTk4yPH2Jh4QzT0yaT87fQdNt40iGRyjFaGEHXdY4fP0exWMS2\nu1SrVxDCJI5dhoYy3Hbbq0fc1xJvFyNvAH/91/Anf3Ktz+LXw759kM3Cj38M11Ny+IuFyItIJNKk\nhqc4vbLCbfPzCCGI4pjHf/hDDk5OctOOHXiehxZFLC0t8YPvfpcPffzjL/sZqqpy5zvfyQ9XVri0\ntYUuQjxdckVAx0qgCR2/O0w6TuHLAKEm6CoJkHvI6ReYSLjMt9rEUqURdYmlSgKfKVzOY7HJIApd\nClQBjU0cJClGGWedNabwGCZFhwY5TFR8timhkCWNQYcYjTY6Dh4KClWmcUgQs07IClla4RiKGlEV\nJdJykxaTdFHJmjm6YQ1V3UKVJpqlMDE1RiaT4cKZMxQVBbJJQJCyEhTMBOePfo/RXXv54Adf/4eR\nlL/smde3UW1sbGOar+anJBKF13zdzMwMfirFuaUl0nFMFEUULAvDMBiYmGBHKsWypjE7NsYTp05h\nSw9N1/GAWMYoQsEPQzTDora9zYHZWQZzOd6xfyfPXnmMexIKT3sdrnjnaQcZVo0mY0qD0UihAtSR\nuAjqhOgI2viUAQ2BIAQcgr79e4aQDVoU0Wlh0iSgTpcqw6QZpo7T33xUMmxjEZAlJujbdptEqNoI\niajLLhwKaLQJaOKTIw+4NAlZlzGJWHDz4E4qnRKXW8e40TLJqArzY0MEseTE5UXa0TgJK4sRN/n0\ne+/hnOex6SbY2oowzV46daOxSDars7l2hjuyY+zrF5VhFHHs+98nk8tx+8GDr2uN3yycOnWGVGqK\nVstD113CsIFhjOO6BiMjBu9730M0m02uXp3g9OnHMYw2mUxMMjnLsWPHKZVqQIiUdVw3iRBd8vl5\nTLOMZdmsr7fwHA1VsVFxiZVtVCWHrqbwAp2rLgyk94OWoDimovlNVldPMEVMjphL1MjTG89CL1NI\nImnQY6A4SAIEaWKm8HGo0cFAkqFgSLTIIiHb7FJsFJEkRrAc1sgRURNVMtl72XnjrSQSeZaXt3no\noUNsbARoWsT0jt2oam9blzLGsizC0Gf//lswTQvXdTBNg25XY/w6U1+8XYz8LXHhAlQqcOjQtT6T\nXx+f+lTPPfZ6KkaCwEPXf9YBcZw2u/feSGGmyLNnz5JRFNYbDYRhcOehQ1y9usipU5eIY4soDvnu\n6T/l5oMHX3Wn/p4PfID1CxfwSyWOHz2KmkySjCImu10iX2ANjNPwJKttMKWFVASBkkWY83S5hK85\nZFUVw/cIhaDthaxikSOkRZ3L5KgBGh0sTPKME1ElooNPmi0giUmDDhNAkjouXTwMrhCQZJBhRmlS\nZgKFMXxsDAwKrPdb9akoi8TG4xKSCzhKGs1rIKRHxhhgs5tECbZRbp7gwvo626ur7MjlSJsmJ89f\nphIojMqYbuk80+8+wMGDt7/udbn99j386EdXmZr62V1qt9vCsrzX9aE2OFjA91dfddxxGq/5OsMw\n+PBnP8uf/Pt/z3ajge55lF2XfTMzDA0N0fI8hKIQhCHjs7O4pW0a5Q3swiib5TX0OGKxU+Xu23ex\ncPUq87t3E0URV8+dY9/wMKHrkg5a7EwKyo7NaghrUQWBQhsVm5gBdLYJSCJpouCSIaaKQ5siDj4K\nMQ5FK8tWOEBF5GmHbXyZRiPLEFOUaWNzAEXo1GSbBHWmUNGR2OiUiTmKSei0mcyOoCZMgm6XUVKY\n6CzQwURSRqejqkxKl5Xt8+jOJpNKyP1jY9TrNerr60SmyQ26ypnWOsPCQ1UjctkMe4E9O3bzxBNH\nWFq6AAgKBYuhoSJpP8He6emXrrumqtw4PMzRp566boqROI45fPgIv/d7/46VlZgwvIiuj5BI6Hie\ni6pqTE/PYlkWlmWRTicYGWnTaHgsLraxrN3Mz1ssLCziujpCVIA1stk83e5JNG2CVsukUb6AjLeI\n4xU0ZRRVmSSIXULWMQyVlC1wvAaCNBdbFXLJVcbzFtPtkDOdDkls2rj9xCho0uOJhP3HVXrU2Rwx\nETEeMWVMVKPYC2BUJXocoWomMraI/QCNbs86TTq0/Dal0gnCcJtUyuNjH/sg/+k//TdSKZ12u0Mq\n1SP8tlqb7Nmzj5WVS3S7uxgYmEdVI8rlC7z3vbeTSCSu0Ur+YrxdjPwt8eijPeOw62m08UbxiU/0\nyKx/9Ec9Dsz1gJWVE4yPH8A0bVy3w8bGKT760Xs5ePA2Njc3qVardLtdDj/6KN1ulxMnrpDLzaCp\nOmEUoVU1/st/eYx/8S/+4cv+2MbGxnj4n/5TnvrOd1jtdlk9fpwJwyAPNKI2p5urbPl5FAaJpYEh\nA5CbZK0b2W5vMZrwmRMCU1XZ8H2WEGSxGSNFB1inRpOAEQIsBpCUaIokjpwl348+a9Ghw1U8OhjE\n/QAtaDFBGoMGVf4/9t40SI7zvPP8vXlV1n13V98HunFfBMFLAEGIt0jJFoeWRIkKcSTRHtszkm3N\n7tgTlr2anZiInQlHeGPWsfaMJ2zRq4OWLIoiTUokIR4gQIIQiPtooBt93133mXfuh4YoygSpwyRB\nOvT70lVZld1v55OV9eT7Ps//345PmhQtGtiARpAufKZosSR0KoTwaSciqmQ0m5hcRdY6yetRuvu2\nEYsJQpEQs2KF41NTLI2PYwuBlUxy99VXk4lG8RckMokwTz3+OH3Dw6xfv/5n1nzs2nU9Fy48zOTk\nUUKhLJbVQIg899//oZ+rCG7DhvU8/fTL5PPzZDKrXiel0hKaVvqZ+3Z1dfG/f+Ur/Nmf/An9uo6S\nSBBw3dXzpVZjYNs2Rubn2XvXXdi2zbPf+jb18RkuVpdYalZJZjNMVCpkNmyg7jjoloVZrxNUVUby\nFbLpTjJKkENT80y5JvVLqqbtCIpwyTfGI4CHIEULlyIhsngEL3W9mDhMmAbLUhZHtIGiYdnn0bFo\nsIJHCgRUfYMoZbqQSF9q9VzEZpR2FHIIVBbrGl4ohDBmwXPR8HCFTFXrR0g6iphFC7rIUon+uE7T\ndokl4tQadVTbZrFaZ000TUwVhCNJIpkujh0bI7O2gztuuJZPfvLjnDt3jmKxSDabRdM0XvzGN96w\nHBMJBqnPvjGBvFJ8//vP8Dd/8wzN5hDRaBIhXExzDEVJ4HlFurr6yeXa8C9N4y0vT3Lzzddz+PCr\nHD9eJpGQKZdX6OjYhmGs0GqFwYdmw+bE8aNEQosoWjet5jwhbMJ0gNdEFicxpHZa4ioQBwkHNyHL\nIUKyia0HKdZkbDOBonWQZ5QBoliXbkOyQA7ovmSbeQGPJXwal0wVLTwmhEwl1IGkWhTNFhnNo9I0\nCITa8GwVW67jWBE8P4avdBEKr0VVg9RqJ/jyl79IW1sbv/7rN7Ow8AjT03N43mpNDCzQ1tbNjTf2\nkUqFGBs7TDIZ4/77b2Lz5s1XLpBvwjuajAgh/pxVf5+jr3fwFUL8H8Adl55+2ff9Z9/JcbydfOc7\n71/10n/KwMCqVsozz6zWj7wX+PCHt/P880cwTZ9QSObee29g584dAORyOXK5HJ7n8eoLL3Di+Gk0\nLYEir34ZLlTzdA3vwDRjjI2NsXXrVpaWlti370VGRibRNJUPfGAb195solarRJeWCPg+QpFxvUXq\nnk9MbcP1LQx/cVUh1fIIRfvQOqOcnptBajaZtCxkSSMr4iiySlxRCJkGo26dEk0SFHFFjGU/hMGq\nuygIZAJYpDBpMEOQBhuR6EUlSIsCHrN0kgeCWEQwqKID8mpZJaaSIiBlMEUTLdnOXKXMxdYKUVUQ\n0eI0mxdpT/cxMTLPqcqrZNNpGo0GPfE4ihBY1SrPjc/wwtlJzo+tsLYnzcX2FMfWr+cTDzzwloZZ\noVCI3/zNTzMyMsLk5ByxWIqtWz9EKvXWyyyv3/9zn/sNvvvdHzA9PX4pnlE+/el7+dM//b2fuX80\nGuVTv/M7fP/rXyfS18fI6dOUl5cJpNMELIu8LzHx/FFkWWHD3psZSR3jpuE+dm3aRCwcxnFdnj93\njlcWF1EvjnMo3+KFhokkBFtVm2qzjmLXMYkgiDBNjbXYxC4py9goXCSIIIaKBKyjwAIeMyjIVLBp\n+hKS0geujOudQ5V9PL+bBa+ARwPJtwFBDA+dCCYeFgYLhAjTSQsFG5mWAZFwjla4QavlMudYrPgS\nqgsBVaKnZ4BkwiJjtNjR0c6PpqZYyedpGAaO62KZJme9PFY0jRvP0N05yHJxgbFimd8ZHCQQCLBj\nx47Xjm2j0VitjXFdlNcVNC6VSnQNDr4hFleCYrHI/v2nMIw0g4PdXLhwDkVpR4ghEgmHZrOOaZ7g\n7Fmb/fsPUCrNEAxWaTZ3ks8X2LZtPUIEWVqSabVamA0Z14gi3AKW4YGfRGvZtOpT4KdRpQGE1yJE\nFtct47hzeKKILvk0VsZoeSa+7wEWiAEUJUwkGEWrzGOhoxOnyQomq/4ydWRaqCwgIfBIBkL4ER/H\ncsgSIJCJkVu3g3hcJz/+KvqUwZlqgawcpYWPp3VT8G0IpBCiSSIRpatrK11dq9L9W7Zs5o/+KM53\nv/sEL754FEVR6e3t5KabNnDLLTe9Z8zw3op3LBkRQuwAwr7v7xFC/L9CiJ2+7x+59PJDvu//JyFE\nHHgMeF8kI9PTMD4Oe/Zc6ZG8fdx/P3z96++dZGT37g9www3X0Wq1CAaDl632liSJuz7+cf70wCto\nNYOW51NxbMxEhu3rdrK0NEWj0aRYLPI//+ffAz10dd2E41j88IcjzJ19nk/deSc/2LePsfPnWWq1\niIogA0qNmj+KosQIBDXCqd3Y1gqy5LJ2zTCLvsuZqSlM22ZQ0jAdD1UIbMcmq6qseIKSCDDteTR8\niVUviyxlCrQh0AlgYVFkGZMUMt34CBw0Vu9/M+SZJYOOhMWPu2sagKmkUZUIdXsCtBTd3buZNA9h\n2v34joKws+QXirilZwl7TYS5gNTTgZzJ4EsSAUni4ecPs+J0sPeqXyMZjVGoLGJaKwTkUY6++io3\nfOCtjbM0TWPr1q1s3br1l4pte3s7v/3bD1Aul/F9n0Qi8QsVR65bt472L32JkbNnGbz9dpBlwuEw\nTz31IprXQ2fnGiqVAo898iSLFw5w38278C7dJSuyzDUDA3zv/CgltwPRuYfCuVEk2+CAsULSrzIj\ncnj+OiBKnRWOcpEYeWp4mGSQ6bskWqUhCBMSA5T9Fr6IYxMlHC0jqBEwoeyAovQh/HY8OvGo4VMl\nTB0dhToeMquzOzb6JZFxCU3SUYSFVS5TdVo0PRWLbiQpDVIYw1+mszPMVVft4MBjj7GmoiN5Hofy\nefpiMSKKguyrzCsqSjQJ8QwT5RUWXI+brtl92S+lcDjMVXv3cuSZZ1jf1kY0FGKpWORis8m9t932\nS8X67WZpaQnPi+D7VXQ9zNDQWqanJ7DtJsvL5xkYSDA83E+h4BAMNmm1woTD21hYCKKqcU6ePMWd\nd97F8PAAzzx1mkRgPYX6FJJbR/N1HMC0LDxMQqzFQ8ZmDpkyCi4tKmT8Ag23Hc8N06YO0PRsKn6L\ngKjT8n0u5vNIhKnRIkuMImVmcXFIIJHGQFBCp0mICXOMm8I+mUQMX9U5bqyQjKsMrOlj165B9IDP\nf/k//5K5pqDVLOCrEE5upS0Uo6Mjx65dG/D9Os1m87Vj1NPTwxe/+Nt84Qs+zWYTTdPeU627P4t3\ncmbkOuDpS4/3ATcARwB835+8tP3H7sjvCx59FD7ykffOksbbwcc/Dn/8x1Cvrzr6vheQZflnthX2\n9PTw6X/3O3z9a89hxdpoS7bT3t6LJMn4fpmurk5eeeVVXLedzs5eYLU7p79/OycPfpcT587hC0G9\n0UCRJKpCISxpmE6Viu8jojvQFBNNW2bjxo1MO7PM1ev0pFLM2DZWy6RJE8fXSApBDYOCHCKrdiO1\nDOo4+FSQGaKGhEcJDeuSJ03gkrqryqokvIGPhk6KIhoLVEnSoAefCVYYJ0TLDeB5M1iegyoE02NH\nCGptmIEEmtZCcUycpoXkyqSpcFUqQr3RYAa46e67OTcxgRFwGGpfQya+qteSTXSxWDTwHJdzR4/+\nzGTk7eIXEVq73L7Xv26cx48fx3Ey9PWtY3l5hvMHHyPXMlFaEkunTzM3Pc3eG28km0igyDJnz87S\n3TfEUMbDDc/TKhi03DhnaQJpdCWG5Wj4JJBJUycERBDI+ECTRVR6iOJg+RYImYCcBqGSToSRvSTV\n8gItO0eAAIgWplAQvs9qrOdxgClMujGw0MhjYuPioqNJOkt2hXYcDJpo9OFLUZJqiEhQo2xqrIwv\nsO3Gq6lt3syBo0fBMDGTfbxsaJSqNTRV4q6dNzFjG6hbdhEPRQk1i+za9ebumDfdfDOxRIIj+/dT\nnZ+ne80a7r35Znp6et50n3cC13VZWFjthOro6HjtZkTXdTQNwmEV02wRCsVYv34blco84JBOWzhO\nkquu2snzzz/NwMCNyLJKoTDNpk2dzM8XOHbsFdraevDsR1lsrmBZCppvYYsiYb8NhI3wxSU1mVly\nFBhAQ0ZmEYmC5FH3VAIijuSspiu+r+J4NrqYYskPEyJGAQOTMkF0jiPQSaAQwCaJQRqBwKJEszmN\nIzQ82SMeCRK2L3L99TcSicQIBgP8q4/fzj/8wxE0fR2mGcEwfDStAOhks1mWlubJXPLnej1CCMKv\nc+V+v/BOJiMJVhVjYNXrZ9Nl3vMV4K/ewTG8rTzyCPz7f3+lR/H2ks3Crl2rIm7333+lR/OL8YEP\nXM+pU2MUCkFSqXZarTorK+Ns25ajp6eHxx9/lni896f28X0fw5UZOXKETYkE6VSK6VqN54wW+WAX\nejiO44Xp6eqlUpnHs+qcO36QRr5Ij2NR9H3KXoKgGiDh+lTcAjXP56KkkZD7iNsQIYBBgAJ1ZBaI\nkMa4ZKIFy0AEiwYuChLKJX+LCtAghImumDRclzO+wzIyNZJ4/hCuDz5xXFdjuXScZDRLOCBwfZ9S\nrUC7cMCRsVUDw5Rpi0SYLq3WZGRTbcSj0hvulHQtwWJxkcGhN5+hcF2X+fl5PM+jo6PjXfcpeSum\npxcIBtMYhsGLT36DXLmMJWTMpocwbPqTMkdPn+aO3bsZm5tDDqQx80VEvU7UdckEg1SdAFW7Rlh4\nrLhlAnIYy61cEp7ruKS928QmiCAEzOEQQZJqhOUQDXcZobUj2za2BWFdZaVpEFZUNE9QcyusTtaD\nR5xlAoQoUGSBrkvqEUVahInQcAokcbCpARYRoWD4Joa1Ar7HQFCiYqp8/+BBGoUqLV/hbMGmM9ZO\ne0cXOUVlbmacpw4+S7ojw/B1H8JxDHp7NTZvvtwleBUhBDuuvpodV1/9pu95p5mcnOTv//5JarXV\nczEcdvnEJ+5icHCQ3t5eUino7o4zMjKHbWcIBALMzBzG8+aZnFSpVi+iac+hKAk2bdqCLKsoSgjH\ncdm7dxfPP///IcsNhHDRlSyy7eD5EsJvo8Ey+CYyEjUqZMiTQ0bGxQMCOLR5LlVJR/XzOERx/RZZ\nPALo6L6ERJUaFi5BTOrY6LSIYrIBCR0PgYNMCBeJMI4XQpXi5H0P2wpy8ewkDz/8ImvW7MDzLMbG\nlunoWEO1qrC0VMRxklSrCouLo7zwwpPs2JGks3O1/mphYYFTp85RKpVRVUinM3R3dzEwMPCuuy3/\nsryTyUgFiF16HGe1q+k1hBD3AEnf9x9+s1/wla985bXHe/fuZe/evW/7IH9elpfh+HF4j8xavq3c\nfz987Wvvv2QkGAzy4IOf5NChH3H8+Fk0TeWjH93G1VevOlJmMnHGxmqvyUkDFIuLJFyDLZs2MX78\nOBlZpjOVYpNco5HJsKH/Os7PL7Jcn6JVeJWU5uL5CYr1Fp7XTkkF38uQVxs01TotT0XVWth2nDZX\nwvZWP1QpoIGCywk82gnhY1Ihjk+RBpDCYxFI49JCFhUS0kXWynU2qCqmFeBZO0iBLnzW45PGp44k\ndITQESQpN5boCYdIxyMszTeJ4tHyakR8i3y+Qb5YpCoES8Xiasu0LiPHYpiOTeBSC7VpN2naBpve\npGNiZmaGb37zcapVAQgCAYt77731DdoPVwLDMIjFQhhGnpHTI/hL8/RkOhAIzGqeqZkpurIpCsvL\nTCwssOC6ROM6egOm5+dJBgK4vk9EsQl6LnFZUHWLtPBYtSH8sSy8Q4IshuRS9gL48jRqIEDQj6JH\nMzTq40SDGpIZQvJL1L0WSA0skaDq1C6VwUaQCRIihUSUOtMkqJO6NIcmpCWafhPPB48aOcoIFLKS\niiokKngUXZ+VlkW+NceRkyGGs1tw/CCK7KL7Cs1SFd90adfS5J06K4tTPPO9/4c//i9f5vbbb31P\n1w1UKhW++tXvEY1uord3deauXi/z0EOP8/u//xmSySSf+cy9fP3rj+I4HtPTZ5mevoimeSSTG6lW\nE0AM3zdYXDzIhQtn2bBhK65rEgpliMXC3HbbbtraIkyOLGHVQ0xMN5ClGCoWVXeJEEFAockEQZoo\nKBi4mDSJCRvZ9xjzWvRoURpeEcv1VitAhI/iOWRREVQwaJDDpkqDOgKPOglimICBwMBGUKXq+xyp\nlkglcwSVELOVMCkzTUfHIOBz+PARXFchFgsTDOYoFpcpl8vMz8+xdm0n5XKOr33t26xbN8g//uMh\nmk2VU6dO02oppNMxNm3qZN26JJ/61L3v6dj/mHcyGXkZ+DfAt4FbgL/98QtCiK3A7wJ3v9UveH0y\ncqV57DG44w54j8j4v638+q/D7/7uasLV1nalR/OLEYlEuPXWD3LrrR98w2vXX7+DEyceJRpNouur\n05ZTU2fpDMvs2rMHz3GQKxXioRDdQnDGdgjo0+iBRaKtOfZ0RzHqAWZXCpieR1P2MFwVXcmhYCEp\nJhUlQ0xdRuQlUIO4fouav+oJmsLHoUGSEgYyPjIWZZKX7Olb5HGYAmySQY1uGghFx0Yw6fsURAZo\nx/UjSMhohHD8Ip636iyqiEVsO42mhOjKpijPThLRlhiUBbasUmqaNHyXv/jOY9xzz4fZdm0fkjTA\n9LkxokLg2C3yjTFuuPsOtl911RuOX6PR4Ktf/S7B4Hp6e1ft41utOt/4xtN84Qtp2tvb37nAvgUL\nCws88cSzTEws4Tgm586NYBUDhIJB8MF0TFJJmfZIHxfyeWYDAXb09PCv77iDhx56mJeeOItj2wRU\nlZZjYxpLdOtNbNVDaYTQAylsO49j1gnRBsLBFg7hQAJXChANt4iHHQzbRY469GbbUVsFisuTeG6T\naMRHUkLUa+eJC5UAEjUsII9PH+BiESaIjoFJGIlBSQbXYpkCJk2ykoLtSVS9ZeIix4pvo/rguA3S\nUpU+EjRaJWQRIBFOsWwW0esF+rMdlC2DvA0DuXaSapD56an3XBvnP+XMmXO4bopo9CdeR5FIgnI5\nw6lTZ9izZzfZbJYvfvHzzM3NYRgGzz33MmfOVBkdLdPTM8TIyDialkLXk1QqyywuTpFIyGSzGRYW\nTnPPPdcyPT3P5quvZfzkeZSFJp5jYbsuOhqCZQQqQYr4NPAJoGCTwyKp6YxbPsJfYcVJ0PIUogJU\n4VD1Z4iQx0QQxUUAg0AEj1eossQ0LVRsYnj4CGaIKTXSeheeohMXIU6W85TkNk6fnqRWe5pAABYW\nlikWY2zdOkgsluXsWYVsdgulkiCRyDE8/AFOnTrIgQPH2LbtI7zwwlMkEtfQ0ZGgUJhGkjoYHa3w\n0kuH+OAHb7pisf15ecfmb3zfPwYYQoj9gOP7/hEhxH+/9PJ/A9qAp4QQj75TY3g7+da34N57r/Qo\n3hnC4dVamL9/a2Xu9x19fX3cfvt2Dh/+e/7hH/6Cf/zHv0KS5lm3ZR2yLDO8cSOuEETDYQzXZbC3\nh5t3rGdgOMeWaJihVBrFVwgqEdJyG46XR/IdPGwQChXTRgpksZVeSpSYt8uUPReLVWEjjTpRlggx\nRYpZ+pkkSGFVZFzqI6ZcQ1jZSUjagi9itJJZuvv7ORcMcUqKIoJZFFkjQJ44NnEUoujILCMpJQKa\nR73xPOXlQyyWz1P2xhjwClRqJkUjiKG24ek51LrEYl3lYx+7nfb2Bh1DIeyUidZt8aX/9EU++9u/\nfdlCt5GR85hmjFgs/dq2YDCConRw/PjpdzGSP2G1KPlbLC/H6O3dQ3//zcRiQ+TzJ6lLBpPli8AK\n29Z00N/Tg4jFeOAP/oD7P/c5urq6+K3feoBERwtDWWS+OYWlzjMQr5LTQRVT2HKejVuSXLWzg2Co\njhSII6sZLEXBUnVCQY9EtA2PAN3pIXrDObZ1bwdHJmo16PNNtuI5NTAEAAAgAElEQVQSrc/RaY+A\nO4vHNAGKpMni0wBcVBoIIC7p5ISG7BnUaSIDJSFRkYIklHZ0f4mz3hlMfwmXFWTytIkoMc9HNAqr\n7keShxRto+hLXKgWsSyD9qDHFj1MoF7hyUe+C0ChUODAgYPs2/cc4+PjeJ53RWJ4OcrlKqr6xoRJ\n08KUSrXXnkuSRE9PD8PDwxiGg2U5SFIETdMYHOzG8yoIEcU0J5iZeZJczqRYfJVbb93ANddcTV9f\nJ7mOJNt27ybT3U+6rZv29h5EQAMpQ0rpZTDaf8lNxiaJBZJg0XEoyioGPkWxRFmuUKJIxbvIRn+Z\ndXj04hIEgkA7oAO7MEgzT4Bj2LxCSLxEm3QGyVcYNxo0PZsVu87FegMR6CEaHWB+Hs6dMygUTKrV\nIlNTY5TLRXxfxbbLxOMxKpUGAEIEWFhwaTZrGIZMMLhajxUOp5meXiCXG+KVV0694/F7O3hHW3tf\n38576fkXL/38+Qwx3iMsLcHhw6sFrP9Suf9++MpX4AtfuNIjeftYWVnhxRdPMDCwi+HhMLZt0mrN\nMNUYZahSobOzk8ratZw7f57JRoO1fX2MuS5rhoYoLy6iCYGPgSzLxGI5yqU6NZpgj+NaKWpCRm/q\n5M0lApTxMWmRw0IQoYRghSSwSTJRJJuGp6J4MnnCyEoPshJBwgNJJ5yOk+6YwVY9BoIh5nwLq+Ii\nu1WCko/jBZHIoCLwWCGWUvBEiJgzzYZcDFnTuDAOsy2fFTeAqkcwFZ3ecDcrrSJTkyvs33+YYDAM\nmKTTAW66aTe7du160zXlWq2OLAffsF3XI5RKby3j/k5x5MhxPK/9Na0SWVbYvPl6lsdP8+vXdnBx\nfJyQZZE3TeZLJfw1a/jg7be/tn8mk+H/+r//G3/xX/8rL333u4SFwI9GWZPqxa/VUKNbuevDtxMI\nRDDNbzA2No/rxLGdBhHdJxUNEwkJStV55uYm6evs4+LEcTLNFp2pXubLY8Qti6ArCKOTUjqpOkss\nYOGjI7Cp0CDDquS6K1xMWSWshWi1qkxgseAHWXFDKLTQcBEorBE+CSWI7cs0fZ183UTTLdxAmnja\nYrniIiSZrkgUhQYZHbrDcSK+x8lyieeee55nnz0OZJBljR/+8CxbtrTz8Y9/9A1Gk1eC3t5ODhx4\nGej/qe2tVoH+/suL8w0P93DmzFE8b1XrNBQKEgjYSJJCV9cWBgcVdN3m3ns/yPZLJlxbtmxm//4j\nmKZGV08aWc4wNX4CqlU0pR1VtomoATQ5zZw7v1q547lUgBIhPNagBGP05YJUp1/kBgd0VFTfoe77\nOKw28tdQLzX0O3RgEpQdVhSFgKzhOBFmHcEkXRS8EPXmMnJkDapm0moZaFqaUCiA59VptV7CMKLM\nzJzANCWiUZ9MpodS6TyPPfYwxeI8lmVhGE18/yefY9/3EUIgyzK27f5cMbBtm4WFBYQQdHZ2vuu+\nNVf+LHwf8K1vrc4cvMdnOv9Z3HorPPAAjI2tao/8S+C55w7i+1309PS/ts22uxkbq/LKygqFQz+i\n1Wjhx5Pc+MlPsmv3bvr7+3n04YeRu7sZO3qcarNCqdpC19rw9SRNuQvPzWNbs+haJwqTaO4ig3Rc\nqqKvoyLj0ETFQkgSbiyGUBQQKlpdoBoBED6qFqFpVQgEdUIRmUCii3h/F8eOHMJtXiRopZFFiyBZ\nKkzS4iKSqNKj+dSbPrJVYWdPho5IhM1dXeQXF3EbPr7aQSzeQUgOYlgWciRBteXyve8d4J57HmTT\npqtwXYcjR0aoVB7lgQfuu+zx6+rqwLbPvmF7vb7C4OCbF0O+k0xPLxCN/vRaYjKZJBDvpliv8rE7\n7mCxWGSxWCRsmvzmH/7hG5Youru7+dwXvgDhNAdeOIDfqOKl0/yrz3+es+cLRKMpVFVj1649lEov\nYhglNEUnqmaRRBVBE7m5sKoGY6cp5WdItGxa4Ra5gE8VFSQZ1RP4AZ+wFCRpmRSZxCKFBCgsEZKb\njHguIV8mYJmUPJMSEdLSWlxkSp6NRZag2iQWiBORNITkYrdaWHIES67T1bcGTYtSsV6hrk7hOhGG\nkxESksLY0jzlZpmuTYN885uPs3nzR9H11WNRLKb45jef4ODBI2zfvpk9e65h3bp171YY38C6devo\n7DzMzMw5crk1ACwujtPW5rF+/frL7nP99Ts5fPgUExOTVCoxGg2L5eUKsdhq59mqzmk73/nOPgYG\nBjh+/CQvv3yS0yeOkZ+bplJoMrVQIZYYINe5ntmZUSIs051bR8WaRql5LKEwT4AoAgdBmxoiHkpS\nyM+jeBKLuoJs26gOFIE2FHwENiFCyNRxaMktEopEe38/M1WJdGQHQ47NeHEWLxhGmFFCoTiDgxqT\nkzMoSoBWqwisMDg4SCzWT71+hlQqRzq9ifPnv097+xZisS20WhdptUY5efIIkuRh2y1UNUijUWDD\nhn6Wlqa47rrLH7/Xc/78eb797acwDA3wiUZ97rvvbvpep8r7TvOrZOTn4JvfhC9/+UqP4p1FUeAT\nn1h18v3TP73So3l7OHt2nPb2XT+1TVUDNJvQEkEiA7cR1UOAx/RMiQ/FYqiqyvrt23nkr/8GUdOw\n/DSKWmextsKK6rJ587UU5qp4To6kHqHVCFF1PSpGnoC/majwkISGS4WyP0Y447FjaIhEOg2SxGMv\nHSNkV6lJKzTcFtFkFE2TqVRabN06zOysSrXQwZpuhcL8GG7TJu0tEtMkkFp0BCWMRoNlT3B1SGGj\nolCtVnnVtrlx7VoeXjlKznGRXImma2LJPmY0SaNRo7d3C/H4aiugLCv09m7mwoWXmJ+ff60q//UM\nDg4yOPgKExOnyOWGkCSJpaVJUimTLVuujIJje3uKubkSsdhPxNaEEKzfPECiw+LQwgKKECidnXz6\nIx+57MX0xImTPPzwcyQS13Df/bdRLq9gGJPs2rOHweElnnjiRwSDXbS1xdm8OczZs2dIp4dwnCIB\nxUb1Kty6ZTMdsszFyhKziyXiskVQSOi+i0mAtlCE5cYKQadJKNiDL1lYxjRlqqgCYnKRcLiPqNJL\nxaxQtVrUsdCx6dFStHwfYZZpILAQLLgV4lqKeDSJJUHJaVILBQiFdPywzR/84WeZHz/P+W8/QqO0\nSMEOI2SdWHYdS0WD2tgiO3asFrwVCgVefPE4ntdNqdSgWEzzt3/7FPfeW+eaa65MN42qqnz2s5/g\nhRcOcuTIIXwfrr9+I3v2fOBNiy+TyST/9t9+ht7eH/Doo/uYnZ1GlkM4jkZ39/X4fpbR0UUc5wJ/\n/ud/iW3nmJ8x8fNJcqEQCeccV6eyjNWb6J1REok15Ocspmrn6YwGOG2mqFhpgoSQsZFECxFw0X2P\nEBIyCsOxdmbLs0R9l6AnwAtg4yALgfAFVWVV60NJ6Vy/axdHzjQxahH0aBxVz2Imk5RKZ1HVEMPD\nV1GpvIqul1EUjXQ6x3XXbWdysoDjSKxbl+HIkUdQ1Q4ikU4mJk7h+01SqU7Onz/P2rU55uf34/sx\nurszeF6ZRMLgxhvfsjSTfD7P1772fdLp7bS1RQGo1Up89auP8qUvfY5oNPq2x/ty/CoZ+RlMTMDo\n6L/MLpp/yv33w2c+A3/yJ+9vR+IfEw4HsSyDYPAnmiWu6zAyMsqtt36S9vaffAEvL8/w9NP7uemm\n6/irv/o79l8wCLQcQpaFIoMRTdGWipMMz9A/HGB0JkitYGM7Oo4IYok4SRFcVdoUPoFACFn0ouU8\nRDpNNBrFNE06ImHisqCrZy2RSCeNRpWlpRUUxaSjo5+FhVnCSoxqrUkiqNPwGth2g6TwcDWZhiuh\nqDJ9bT0EGzUqVYuAZoHv097VRWdHO2OFFqaooYfiOJE4gdQw3twLXHWZIlUhopRKpcsmI7Is8+lP\n/wYHDx7i8OFjOI7Ldddt4MYbP0ww+Mblm3eDa6/dweHD36ReTxOJJCiVSoyMnEDXF7n7o79DW1sb\nlmWRSCQuu/zkui5PPLGfXG77686LACMjLb7wha/w0Y/ezp13bqNcblCvt7jzzk/T0fG/cebMWQzD\npqurnae/9S3WqyoXDx/mtuEewsKkOjaGaDapOQ6WLxB+gClHAmmZRKOO43vUVJ2OxHoGs0kuTD5P\nTM6gShBWYtRaLogIrm8zZtYJyB14UgjHK+F5VQwtx4hVIVVqYkoStZjHb37pQTZs2MzQUD/9/f0c\nP36cvzx+guL5PKloO51dXaghnaPFAs1mkEJhnmy2mzNnLqDrbQjhIUkGyWQboVCUH/zgINu2bbli\nrdvhcJi77rqdu+66/We/+RKZTIbPfvbTfPazn+Y//sf/zKFDS3R27kWI1djrepwzZ85w4sQyN954\nA2PHLrKmvRPXcTgweoHeDVmuCdv8qDRNV9d6QqEoczMvYZs1tPh61rga9YZJFBnHsZlrLBGywcOn\n6QjOLs8wFPAxZBnhyyygsCJkHAFNLLxABNMzEa7LzOQkphugt3eIlXyRml0mG4/j+2EWF1cYHS3Q\naGiATEdHhkDApa9vLZp2kdtuu5ubb97N44+38dxzC8zMnMXzNPr6NqNpOq2WjevO8OCDd2PbNuVy\nHd+vMjg4QKlUIhqNvqnA4IkTp5GkHKHQT5KOaDRJqZTg3LkRrr323fEm+lUy8jN4+GH4jd/4lyV0\n9mZcey14Hrz6Kuz8+T3U3rPs2rWdxx8/RX//jtc+iJOTZwkGo7S1dfzUezOZLk6f3sf4+BwjIwVC\n0RswVZ2aVcH3DXKZJEF1hYg9z7n5FngaihLGsjx8L0jJA08YxBUNw3OoWHlinUnKwuRUq8XJfJ5K\ntcao0WDTrnu4OD7J5OQkjuNRrVZpa+tEVcOUywXyy7NEZYEphQm7VTKeQQwXw4A516YRybDWD+Bq\nJqbr4psusmxQaTZJJUOsHepledmmbMvEtAC6dIFf+7XdRCJvbAXz/QbxePwN23+Mruvccstebrll\n79sam1+W9vZ2Hnjgw3zve/vYv/8iExPzpNNZ1q/fwl//9RPs2jXM3Xff8aYX3lKpRKslyGQiNJtN\nzpw5y5nT4yRT/fh+O8vLUSYmjvOpT93Cxo0bOHfuHPv2HcD3fbZtW8eGDRt4QdfJpFLke3uZmJmh\nIxbjhOPQNEwygShNx2bCtxBShoS3SIQwshwmI0k0jDE6O+5kudWN78fJ111akk/eB10Iyn4QXQSI\nqGEs16FhCRx/BSFpOGofo2YRR2rybz77SX7v97742v+1srLCU0+9xIV6iCZh0k2HmYtjJNcMcNVN\n9/Dssy+xsDBNKtVBsVgjleqkUDjL2rWrM0eBQBDLUigUCnR0dFz22L3X6e/PsX9/5bVEBMBxbHwf\nHCeCYRhoQiAQ+L6PKiWYnJ9BM3xqrkSqaz3B4CJqQMcSgngoTH3RxBMyVa+JhY3q+yyYY6i00xCC\nEc+mZri0awoTnk1JJAiqOVYkiQZ1ekMwFFZIpwI0Ck0KS1MYlqC38yo2r1tDsbaErg9x5503sLIy\nj+8rjI0dRlXj3HTT3czOnqKtzefjH78P0zQ5c2aUuTmHSiVKIBBkamqcNWvWk0gkSad1hoYGWVrK\nc/Fig2Cwm1OnLH70o8f44Ac3cvvtt1z2uJVKVQKBN4qkKUqIarX+jsXrDX/vXftL70N8f1V/46/e\nN7Js/zyE+InmyPs9GRkfH2d2dolGY5yXXhqjo2MtsuwQiTTZtGnwDV9WjmNRrVaANM2miSSpSK5K\nJNSFabdwHIeK1SIa9DHtMBFKlBtncL0EDSeGi0dLb8NwbQwxjx5ai2kFSHe1c2zhFKYhSMaGUFWD\nfT/8IaFQH/F4Gll2keUk/f07OXnyNMWZQ2Q9h5jdpEKenBYgLOI0cVDae8mZMmW7hhbqJV88jR6K\nYDarXKzWmS+VqKUGuPrqu2k2XRYWZpCkee67715isRhPP32MUChKOBzH8zwWFkbp74+956zEfxZD\nQ0Pcd1+QmZmH2LHjTiKR1Q4Cz3M5ePAVNm5cy+CbeKrouo5tNxm9cI7JMyMsT0wTEBEKldNIsRKp\nVI5wOM6TT+7n9OkRjh1bJJUaQAjB2bOH6O9/lXOzszzxyCNs7ullw/AwbqNBZnKSU2GBHu+maTXw\nKiXk0jJhP4IqB1CkAIGggo7F4WPPsn1rN+VSmJJQUYIR/JaD02rhkcfxBmjYFr7nYkoNfFkib1eQ\nPB9UmUDQR1VVHn/8+1x99TY6Ozt58skf4jhdbL/6NkbCRRQ1hGsaiFiAzs5+hofHqVQusrTUjmWV\nWV4+Ri6n09MzDKwWPHqedcVmvH5ZlpaWGB0dAwS9vd2Ew6dYWholGEzh+x6uW2FgoAvwURSFfL2O\nLnxcp0HNmKFVLdCf3UwomkWSZGZnR7GsNZhOg2INDHMRz3Xw3TgeAg8PDYmUUqBbCyJpXUxWJph0\nDdK5HLqXJhLqoGE0UZpNkqpCCR9F34jTFISCbZybnMAIaWzuvZb8RIHt2zeyZcsmhIBGo0KlcgPV\n6gn27u2mq6uddevWoWka3/7246RS21CUA8hymnC4A8MoMzFxms5On8HBazlw4DD5vKCvbyfl8jKG\n0SAc7ua5506xadP6y37W+/u7OHbsxGtF4T/Gsor09Pxy1g+/DL9KRt6CQ4fAtmH37is9kneP++9f\n9d75sz9brSN5P/L88/t56qnj1GoaExM6y8sXmZm5wG/91ie5997P8zd/8w2Wl6dpa/uJOuv8/AXW\nretmaSlAOJykVMrj+RkggCxkLLuOLK1QqTXpQmKovYeSJJhbush5bwJXbUNVE1hSA8XtBlunWVri\nyMElDAeSWo32UhXJlWnzw5QMBZHoxfcXyWZrRKM6o6deZb0eQJaTmI2LhH2fqNlkxXOItK1haHgH\nU1PLlIs1Gq6NEskxKkzmrBqBXDtrrv8gG9LX0dnZj+e5OE6LkZEK/+N/vMTOnRswzRorKy9TLMbx\nfZstW/r58Idv/4X8Yd4rjI2NE4sNvJaIAEiSTDDYyenT5y+bjDQaDZ79wQ+YP3OQ88cnyaS6kf0Y\n6ViKYn0KYa5w7NgLtFomZ8++gBAp2tt30Nk5z8aNa2m1QvzRH/4FEWeZNk/lxXOHORh6mTXbNlEM\nx7h6YDfd2X4Azk0d59yhp0gpGSJBDYGEYXtYNCACbQM5JssTGOF1+ATJdu9g/uL3CXoWME/NlbBp\nEU4k0fQhdL2DSKSdWm0Zyyrz8suz+P4gL7/8Le65Zzejo/P09OwhEAhy4cIk0eggiYRGsThGqVSg\no0PlvvseZGZmAUlKMTNjcNVVt6yK4QHz86Ns2ND1z5Lqf7d57rkXeOaZ4yhKFs/zmBx/CWP5KIlg\njmZZkO7bzPard7O4eIzZ2Qn27TvASgVGT++jU2rRq9QwWnXOLpwgs/VTjI//CMPQ2bx5D/n8aSYn\nVjCa/bjOFFkpiOIFEAjquHiiTH9yJ5oSIeh5rFglhDZMT6aNo2OnUbQ1tCccEvEIU2WV/IJDrj1D\nW1sXmaEN6DmHm27qIxwOs379T+qvIpEEiqIRDndy5523vra9Xq8zPr5Ef/+NVKsN9u07QKNRAHws\na4KdO/81lUqBM2deolrN8uSTB9H1LJlMN5LURJZLnDp15rLJyObNmzhw4CgzMyPkcgP4vs/Cwhi9\nvTpr1qx5N0IJ/CoZeUv+1/+CBx/8l1E/8fMyPAw9PfDss3D7z790+56hWCyyb99RZLmPixfHiESG\nWbduK0tLJ3nyyVdpb+/gYx/7CA899A9MTS0jRBDPq7J+fZbrrvsADz20j+7uHly3zoXyaeqtGI5r\nEA9XWNsdZGF8lo19O2nv6CCYCNM2OIh67hhzajdDG4fY//zTROQU6aCHZGu0bA/NahLWMsg0CMpZ\nVKmBJMp0dcVJpTZQLL5Eq3UKapNEAnH0SAsv3c3IskvBNnFsk3A8SzKZYGGhgBPrYgLI5+vE40ME\nuraw9bphTp89yUc+stppMjMzysREic7OPZRK47S1rSOZ7MU0z/Hggx8jEom8L/0rfozneZdNooSQ\nXrOQfz2+7/Odr38deWaGO4cHSU5NUzMWOb48gk2RzoTOYsXj6JFxLEdnaSlANBqnszPNygrs2/ci\nJ05cQDbTdEUj9ES6sO0W5fJJJkbGMbMdlKbPYDsWiUiK+ZVFHF8jqIaIhEIE9ACu6+LUTTKdKX7v\nj/8YwzD4/d//z4yPzxKP9zG8dRsz4+O4Zgw9JBMJa8STSQxDZ+PGbZTLBTyvi0ikA8epEgxGSSR2\n8NhjL+C67iXzwSzbt2/k5Mlj+H6cSmWWYtHjk5/8EJs3b2bz5s3cdtsH+d73nuTo0UNIUgzPazAw\nkOKjH/3QuxG6fzaWZXHw4EH+7u+eYM2a3WSz3UxNnEYv1NkajhEJ+SCFmJp7lfPBMrfc/gEajSqG\nUUX1pxnUPVTLRVEE2fY2suFuphvnEEJn69bbUFWVbDaLLPscPQpRkSQdDeMaJqYhEbey+LKDIxQc\ns4rrqXQmhmhlIqQG17NeH0BVQ6QCkxQnx7GNBC4KxaJJteZS8Jts6t7A+fPjnD59nlJJZv36dSST\nq4JvKytT7Nnz051Nq+f06vm+fv02ZmaqeF4IVdUxjNXl11deeY5sto/FxSKStBHTDFOpCIaGtjM9\n/QrHjp3izjvfeFHXdZ3Pf/4+9u9/iaNHDyNJEnv3bmb37hve1bbvXyUjb0K1uupFc+7clR7Ju8+P\nnXzfj8nIzMwMnpdgdHSKSKSTQGB12jkW68W2Z3jhhaPccMO1fPGLn2diYoJ6vU46naa7uxuADRtO\nUK+b6HqdweENzEyMEVJLrOsJ0dMRRKnGKBhFzpxrIkgBMnk7gB6q0te3noi0j45AFAkwXBeQSAqF\nZmsRtBABPYFh1El4No3GMm1tgyhKhN7eFPZCkGwoQDTczcmFCtHMddRKoyhujXK5QLmSR08HMEWM\nWtUjlL6agbWDXHXVejo62hgZucDY2EW2bt3CxMQ4kUjfpfVz/9IxSDE1pVOr1a6Yeurbxdq1Qzzz\nzAlcdwBZXr2M+b5PsznPxo1vPHGnp6epT01xXV8fF8fGWNfTQUTXiY7azPo1dC0OJJGI0WxCV9d6\nTFNjdnaajRu3c+7cKOWyQ0ZWkDAvLQF4QBbRmMGJaUwswdTiDO3JAiulGQJqlBJV0moMSUg4kkve\ntwiHNcrlMvV6nZ071zIzc5JwOEM4HEWWIRCIk0j0YxhzjI0dIZ2+Fl0Ps7JyFt9P0tWVRlVX1XDj\n8Qy+H6GtzWN5eZpcrp+BgU3kcn1cvHiCRKKL//Af/t1PGU8qisK99/4ae/cWKBaLRCIRcrnc+2KG\nbGlpiYce+g7Hjs0xN6exuHiEtrazOIUptibaaaoBBgaCpNMpdhgG44AkKQwMXMfWrSmee+S/s63v\nKsKhGKXSHJJUQw/0INvG/8/ee4fHVZ55/58zvWm6ujQqlixZtmRbrtjYGIyxMcamBwgGAkuAbJZk\nU678tr0hV7KbvHu9IbvJbhohJBBCMaH3jjHuRbZkFatrVGfUpvdzfn+MkRE27rYkez7X5cuaMzPP\nec48c858z/Pc9/emX9ShVCrweAaYOdOBw5HFoYaPUUbl2G0WlDItba1NCDIBdSJB72g/GiRkci0G\nnRJjuh2z1Uw4qkIUFchUCZzDe9ArHKhkGiQJwsgJijp27apBp1vI7Nkr2LlzL62t7cyeXUY4PIzd\nHqO6erwwTEtLw+GwMTjYi92eyyWXzGXHjgOMjAyg13tpaHifkpLZmM0G9u1rwG5fgEwmx+8fxOfz\notWacbuDxGKxYxocpqWlcc01q7nmmtXnaSSPJiVGvoRnnoHLL4esrInuyfnnK19JGqAFg1PPW0Uu\nl5NIxAgEwlitR9a/JSmBWq0hkVDj8XjIzMyk5BiGKrfeej3Tpu3mww9ldHV1MWdOMVkZJgrzsjBY\nrRx480227ehCIaajkKuRpDh52XNoDxzi/fdfRJeI4w31okJNLB4jjpxI3I9MHEYhaQERtVpFMOol\nHgszNOSkvX0PPl8WKkMlLinA9voajJqZpGmsuDRZxPUaJJWSNw7uYsbCK6ialsGBAw0sW3Y5RUVF\nqFQqEok4FouezZvfQKNJEAh4UasV+Hwj2O1pY7EAgqAgFoudr+E4Z+Tm5nLZZRV89NFONJpsBEEg\nFOpjwYKCY04tj46Ooj/8Y2uz2egURTI0GuZOL8XX209zXxcRsYSQ3IfZnIPDkUtLy26iUQuhUABR\nhGg0RoReZHITo7EhEokECkUawXAUtSqbhYtX0tBQT4AwSl0eIm7CehM1wQEEMUZAhKF4kEUmB7/4\nxeu0tnaSlqbEbrcgihI6XYI77rgHj2eAffu2MG9eJkuWrKS5WU447EShiJKTY8dqtTMy0jmW/SBJ\nIldeeSlvv72Fzk4ParWJaNRHdrace++970srYNtsNmw22zGfm4xIksSzz75KIuHAbk8jEAhjNNro\n69uPyuNEbZ5LWJAhCLKx7DCX04nbPYxaXYRSqUKnM2I0WJDJZMjlasrLMnB2u/AMDqPNKKW7exsz\nZlQybVoRfr8fs1VCCMTQqgSUigQzZjjYX7cFgxQiLkrECIAYQ222Yi+ehSbNTFdXH2BEFEPEDOl0\nD7egk2dj1Jqw2QqJu0aRYl1Mn16NRqPHZsviww9f5oMPnmPBgksQxSx+85unuOOO9RQWFo4d/4YN\nV/HYY5twOkfRaExUVNiJRoNs2PBV/vrXl2lt7aG7W0U4HGRgoAajsYBw2M/gYAuLFs1Eq+0jkUgc\nU4xMBlJi5BhIEvzmN/Af/zHRPZkYsrKSmTWvvpoUJlOJoqIiNJr3kMvjxGIRlEo1kiQSDDqpqCgH\nBtBqtWMOhV9EqVSyZMklLFlyyVHPxeNx3n/1Tcy2EnKtuUSiUeRyBf0BPxkhDXK5h8HRPnSChyE/\nRCQNcRJIQjsFKj3RKESjrShVCqylDvLmlrNv3ydMmzadFY0yVUkAACAASURBVCtupKnpEDWfbsMd\nSEeh9pBhziAvr5LFl69AJoP6+o9YtaoKg0GHxaKntDQ5lRuNhtm69T28Xi2hkJLt2xvweA6hVPoo\nLCxlzpz5h/sfAzxTLmD1MxKJBDKZbGzcrrpqJeXlpRw82EQiIVJRcTXFxUcHJwOYzWYCh5dvzBYL\nGcXFdLS2Eo3FcBQV0Ce6ycoqJbewiEOHRtHrLeTmFtPcvAWPR8HwcAuC0AYyGTqlBTERJxbzE48P\nEhQEHPkLsNkzMJmMDA42MGvWJXz8zm9JM2jo6U8nFk8jHAuBIsDwsIzm5j4slgpEUcRobAeiuFxh\nmpvrMJkUrFkzh7vu+gput5vf/vYFrNaZTJuWS11dHyMjw2RnWzEabXg8g1gsUFFRQWlpKY2NjfT1\nucnIKGLGjPJJX5PmVOjv78flCuNwZCOKClpaDgI2zOZSOp0fEopFiMW9ZGUlM4TC0SgJpZKqqul8\n8IETo9GGOacYt8tJhtEKBMnKKsOUkQHBIJetXcvHH+/E65UYGOgkFgtQMVPA1S4nI8uMUqGkp7+R\n/Gw1/qiFQrsFoy6XLpebxkCCrxTOQqPRUle3n8bGbVithQiyKuS6FjzhHrLTC5GrooTDDcycWTxW\nLysaDSMI6eTnF1FdvRyNRoPfP8qTT77C97//dTSHC6JlZmby0EN3U1t7kP7+QbKyZlBZeRN799Yw\nMCCgUs3AZEonMzOA369FLh/GZpOzdOkczGYdNpt9rC1Iirv6+nq2bt2Hx+Nn+nQHS5cumjCBmhIj\nx2DzZgiFkoXxLlbuuCOZVTPVxIher+e229bQ3f0YDQ1bMRrzkMm8FBTY8PtdyGRDfPe7D9PXN0Re\nXgY33LCaZcuWnlQWgUKhYN6y5TzfvZdevx85ECZKblkZPftqmDVrCZ2qNHx1e1CpBOLKKP5APz6t\nloQ1k0Q4giAbRMpKJ73MQW5uFI/HjMlUzYsvvoEkmckougR3YDdObyfWIi2rV62itaWWQzVb8A23\nUJAR55a77qKx0YnHM4jJZKet7SCjoxq0WgvXXFOJ0ZhGb28h9fXvMG3afEQxyuBgDz5fJ2vWzMdo\nNJ7wWCcTbrebd9/dTENDBwqFjAULZnL55cvQarUUFBSclEukw+FAn59PU08PJdnZzKyqokmvZ0tL\nC3MXL+bapSqcTjW5uSV0dHyKzzdMJBLFZJLh89URi3WSl2egt3uQes9O0iWQxDg+WQh14Uws1iIA\nBEHCbrdRWVnF0OB8Og61kJtTilyuoN01jE4+D6fTiVKpJhJR4ff7MJlkXHllNb29HQhCJ7feejvl\n5eWoVCocDgdf+9o1vP76xyiVo2g09ajVBjIy5tHZuR+dLsDdd9+AXC5HLpczZ84cDjufX3B0dXXR\n3NxJf7+GzEwbeXkmurvbUamMJDRmajoPsHxWETabjXA0Sm13N3PXrGHeggXs3duI09lIfslc9g90\n0tNWw3SHhV6/nxGlko0PPEBubi4LFiygpaWF7u4+jMZC/umfbuKdt9/mzedfRhaL4w+1kdCpycxz\nEFRpUdlzSC+uxNPeRWvrQex2GxqNl8zMDAyGDEZGnOTnz8VqNeD3H2D27DK83mZmz16A3+9l795t\nbN/+CYlEGhkZR4z8DAYzQ0N62traxlXINhgMXHLJorHHiUSCzZv3Ul29gq1bDxKLGcnPn0l7+wE8\nnjC5ucX093ewZ89+Zs6cxh/+8BSrVy8nPz+fDz/czNtv12KzlaDRONizp48DB57iwQe/OiGCRDhW\nsNdkQBAEaaL6dt11SSHy4IMTsvtJgc+XDGRtaQG7/fzsUxCEYwYfng4ej4fnntvEjh116PUZh6tg\n9hEOa/F6teh0efj9oygU/axdO4P77rtj3F3D5wmFQjQ1NTE4OEIo5Gfz5nas1nJisRgmkwmFQsFf\n//ob1qy5Dqs1kzde+gMdB5vQyPUEJD9Fs+aQEEXioRFmzMzgB//8faxWKw0NDXznO79AEGYyMBBC\nqTShUiUwGuU4nbsoKJiJUj5MqLWWDLkasyGB3qwjbJJx27cfYsuWA4RCBnbs2IFMVkB2toWFC6tR\nKpP3GC0tW5k7NxOvN4LRqGfBgqpjLk1NNMcbd4/Hw//8z5OIYg7p6fkkEnH6+g5RWCjnnntu/9K6\nOsfC7/fz7htv0FFbi0yS0NpsXHHttZSUlBAKhXjyyU10dAQIBATeffc9vN5hFIp0EokogcAAarWD\nqG8UWSKIGHejFdzY06zoCkvJKrgerdbC0FAnc+cWYDJp8Hh2I4p5GAw5yGQCTz/9CkplOT5fJx7P\nQWy2KlQqI6HQIe6++2bkchm5uUE2brz5qL4n42GCyOVy+vr66OvrR6/XMX369CmXjvsZp3K+b9++\ngxdf/JRdu5rQ6+cQj0cxm6G0tJCmpjpyc0MsWjCTvuZmFPE4CYWC6ssuY+myZchkMrxeL9u27eLA\ngUMIAqTbddiMadiyspg5a9YJBbrf72fT00/z3p/+Qom9ArPBwkjAS2csTMXyGxgYaGTp0kLy8/N5\n442PsVgWIJerGBhwUVPTRCKhxettprLSgkYTR5Jy2bx5Ox6PgVAIwmEZKtUgl146jSuv3IBMJqez\ns46bb549VlfnWASDQX7609+Tn7+M/v5+amoa8fsjDA+34vU2UFKSi1abw4IFV2GxZDIyMkAg0MzG\njWt54onXyc1dMhZzBdDX10pVlY7rr193coN4ihwe82MGJ6VmRr5ASwt8+mkygPNiJi0N1q5NVvL9\n+7+f6N6cOiaTifvu+zvuvDOCx+OhpqaW11+v49AhF3Z7FYIgoNfbGBqSc/DgKAcO1B7TadDlcvHH\nP27C59OiVKYRiYzQ19dEPC6SlVVGOOxlZKSL+fNzDteFULN4+bXEhByUykzMZonLLkta0judDaxa\nVYjD4SAej/POO9vJyCjA6Qyi1dpQq42EQj4ikSB2u5zR0UZ8zoMsTM/BaFBQVDQTuULJvuad/PqR\nX7NizdWYTBoGB41kZpaTk+NAJjtynqtUKhYtmv+lnhtTgd279xEOW8jPT85+yGQqHI5ZtLfvpLOz\nk6KiopNuy2AwcP0ttxBct45YLIbRaBxb0tFqtdx771dpb2/n3Xc/wO+vRBTT6e4Oo9Eo2LbtTWJR\nC3kGHQaFnJzs5fQON5KhakMS/Didb6FU5mA2K6it7UClCnH11Qvp6FCSmZlxOL5ETiDQhcfTQSSi\nxeuVkKQ2JKkDUQSfr4P16y8/Zt+T39fktH5RUdEpHfdUx+fz8frrW3E4FqNS5bBnTw1KZS4ulw+N\nppE5c8x8/esPYLPZiMViBAIB9Hr9uNgIo9HI6tUrWb362MZfJ0IURTydnaxfspjaul4EwYrVYELy\nSzQd+ITiGdlce+06VCoVH3+8G0ief/n5eaSn23G5XHR1ubn99iuprq7m7ru/gdsdwGyejiQNIJMp\nsNkWs3PnZqLRIDZbFmq1h+zs49eU1Wq1GI1qAgEvWVlZLFwIH3/8PiZTHhqNnpGRAEajBb3ehCAI\nWK1ZxGJRXn/9PQTBOE6IANhsuTQ07OP660/rYzojTv624jQQBOEXgiBsFgThv76w/R5BENoEQXjy\nXO7/dPjVr5LpvFM46/Gscddd8Oc/T3Qvzgy1Wk1GRgYDA8PEYiKCYBkXUyCTaREEPY2N7cd8/wsv\nvIko5lNQMJucnGKKiuZRXHwpOTkCmZlesrL83H77En70ox9gtwfp6NhDNBpGLh8gEmmjqmoGoijS\n39+BTudh7tzZQHL9OxxWMm/eEqCLYLCXeDwI+HG7D3DNNTeTmakn36anoryU6dMrUSpU1DrbaHGp\naaxP0Noqo7Y26VESi42OEyLBoA+1OjKWJTRV6ejow2RKP2q7IBhxu92n1aZOp8NkMh0VWyKXyw/P\nHKmpqFjK6GgQuz2ZwqvX5yFFh5FLIIoy/EEvaoUGe34+c6ZlcfWaGZSUJBAEGVlZ06moWMnBg8N0\ndOwjFosiCDJsNhMjI41IUjrp6eUkEiLRqBJRTKOp6X0WLy740qJwFzNdXV1IkhmlUk1+fimXXbac\nnBwRmy2EwTDCTTddRWdnJ7W1tWOlAM52kObg4CAGQaCouJDsbC2DQ+2MjLpIhH0M9dRw661rx2z0\n58+vwOU6cj3RaDSkp1spLbWzaNGiwzOwambMWEBhYQaVlXPIyNDjcjXi9epwOkO0tXkZHAxTU1N7\n3H4JgsDq1ZfictXi842wZ892FIppKJVplJdXYLXOxufT09S0f+w9FksGAwMjiGL0qPYikRBpaRMz\n03bOZkYEQagG9JIkLRcE4deCIMyXJGn34adfBj4GHj5X+z8d3G548kmoPf74XzRceSV87WvJ9OYZ\nMya6N2eG3W4mkWjlszTXz5CkKDKZCr3+yAnocrkIh8MolUq6uz04HOOLwuXkTKOnp49vf/uGcRe9\nBx+8i5aWFlyuQa655l76+tzU1NTi84nMnFnMqlVfGSs6JZPJkCQRuz2H1auv5rXXXkaSQqSlGcjM\nnEZamgWLRYF31I5WY0BAwO0bprbZjTxmR1RH6K9vRmY0Ysk0o9H00dERR6u1E42GkMkGuf32NRNW\nZ+RskZFhpqfHi9E4fg1bFIPnrICXQqFAFJPBsqIoIpcrMRgsxMIRYolRZJKAIJiIxbyM9jjR+bWM\nBsOMxAu5+pobxz7zRKKI4eGnaWn5GKOxEIMhhEKhwGrVotWqsNs1iKIKgyGNqqpCNmy4Zkqk155v\nkqXsE2OPLZZMVCotbnc3o+6dvPH441gEgQTwvlLJ2ttvZ/r06We1D3q9npAoIpfLWbSwmqHhYUaG\nRwnFo+TlVo5b/ly8eCFNTR10dOw55vmYSCTQaJQEg8qxwpVWq5eRkSjxeACzWcXy5YvIzMxg8+bt\nVFfPJj39aEH+GVVVlchkMv72tzfo6+sgM9NCVdW0w7FjNZhMDjo6dlNZuQhBEAgEvBQXOwgGw4dT\nhZOZR6KYwO0+xC23HB28fz44l8s0i4B3Dv/9HnAJsBtAkqQhQRDOTynAU+CRR5IBm1M02eCsI5cn\nA1mfeAJ++tOJ7s2ZMW/ebD76aC+dnf1EIrmo1QYCAQ9KZRidLsG8eZWMjIzw6nPP4enqQiWTMRiJ\n0OOW4XB8sTXhsH22iCRJeL1eVCoVWq2WGTNmjBNuGzYkX5O8oB4hKysLi0XO6Kib/PwyrrlmAzU1\nNYyMhEhPN+D31/LNb97JL//fo3QM91Fky6GlowMhriNCmLJp0yi02Rn0enD1CSxcOIPq6pm0tzsx\nGjOZOfNqrFYrU5358+ewY8dzBAI29Prkuv7gYA8WS/ycuUMuWDCTZ57ZRmFhNvX1LkymLOAgBpMB\nlaDFQJBBzxBRfys5UpA2omRb8zAmVDTV11N5eI1fLleQnz+HK67IRxQFdLocAoEQSmUWgqADolgs\nVioqijAah1NC5EsoKChAqXybUMiPQqGioeYjvN3NjPa3ICRGKb/0UsqrqpDLZPiCQd54+mnyvv/9\ns5pJlJmZibWoiJbubqZlZ2O32zGaTOxxOln+BUOm5JLf7TQ3Nx/zfJTL5SxfPp/HHvuYtLQcFAo1\nPp8fjSYNna6bdevWk5ZmOdyahe7u7uOKEUi6qFqtFsJhLYWFi8ficaxWDSMjQ4AIJDN3Rkaaue66\n1VitVv7ylxfp7OxGJtMgih6WLZvBnDmzz9rndiqcSzFiBtoO/+0BZp7DfZ0xQ0Pw+9/Dvn0T3ZPJ\nxZ13wpo18JOfJMXJVCUjI4N7772eRx99ip073yGR0KPTqamoSGfDhmU4HA7+9OtfYxoZYebh7IxQ\nJMLeve/Rkt5ASckRheF2Oykry6enp4dXXvmAwcEAkKCqqoi1a1eN83X4sgBLmUzGrbeu409/eoHO\nzj4EQUVBgZnZs+Pceuv1TJ8+HbVaTfibUX73yz/S2V5L10A7wzE7JUUzmJ6fD4DNaKSrqxm5fCYV\nFRXjIu8vBLKzs7njjtW88MJ7DA/LEMU4OTlp3HLLTeds1qeyspLm5g527+5AoXDjdLah0/mBUaJR\nOb3uPvTBXqosAkU2G+lZWbzrbCMjv4iBri4qKiuRy+VEIhECAS+Dg0O0troYGUkcrvcxk/z8PHQ6\nHRaLhc7O/Vx5ZeU5OZYLAa1Wy623ruGvf32T5vo21H1d5Bs0ZJvklJqK6G1r44BWy9yyMtJ0Ooxu\nN21tbcyaNevEjR8mGAwSCCSLRn7Z92r9V77Cq88/z6ctLWhkMkIyGfOuvprKqqPrtyiVymOej6FQ\niLfeep/WVhcy2Sg7dvweq7UUr3cUSQqzfv11nxMiACfvC5KZmYnZLBAIeDAYzAiCwMKFc/ngg7dQ\nKAL09OxGoYhwww1LKCtL2gI89NC9OJ1OQqFQUnBN4A3MOcumEQThG4BbkqRNgiDcAORKkvSrzz1f\nAPxEkqSNX/J+6Yc//OHY4xUrVrBixYpz0leAf/1XcLmSgiTFeObNg5/9DFatOrf7OZvZNF9GIpGg\nq6uLvr4+jEYjDocDo9FIV1cXr//udyz8QppoXXs7rzb0U1F1JVqtiWBwGJ3Ox7XXXsazz76HyVSB\n0WhDFBP09bWQnR3l61+/85giJB6P09PTQyKRIDc3F7VaTTAY5OUXX2TXhx9iUavRmUyUVlez6ppr\nxrIk3G4327Zt55k/PEbcp6AgYwEqpTp5PGKCPR3b+H+/+Tdmz56YO5oz5WTGPR6P43a7USgU2O32\ncz6LIEkSTqeT9vZOhoeHUKnU2GwWGurqaN+8mXhnJ+V2OxqVCl8oxIHeXg5JmejtVVy6eg3NjY30\ntDQxPFqDXGNl3qJrmV5WQXNzDZ9+uoWCglIqK2fh9/dTXKxj48abUavV5/SYJhuner739fXxyL/+\nK3PNFux2G80HD2IMh1EoFNSEQtx49dXIZTLqOjupuukm5s6de8I2Y7EY7731Fo07d6IC4kolC1eu\nZPGSJV/6HRscHCQUCmG3208pkykcDvOrXz1KV5dAWVk1crmC5uaDNDd/zPz5RQwP65gx4zJksuRd\nXyDgxevdz/e///WTnuVpaWnhySdfR5LS0WqNBAKDmM0hbrxxDWq1GpvNNqHfs4nKptkG3A9sAlYC\nj3+xXydq4OGHHz77vToGw8NJk7Pdu0/82ouRe+5JirRzLUbOB3K5/JjZCIFAAM0xLj7F2dks0emo\nXpaPyzVCXl4pVVWVvPfexyiV+WOxDDKZnNzcMjo7d9LV1TXOORGgs7OTp59+Fb9fgSDIUCiC3HDD\nlSgUcty1tVxfVYVOoyEhijTW1PBaOMzNd9wBQHp6OuvXX8toXy+++gZqWutIiBZARjjST9WcbKqO\ncXd2IaFQKM5raXtBEHA4HDi+sEY33N+PpbSUtkCAYZ+Pzn4vCUnFSFjElWjDlmZg84evERvsI9sq\nYlNZ0Kgr6D7YiCHNQmnpHGy2TGpq3iAvr4S5cy+hvLx80rpiTiYUCgWFOTmUHZ4V9Obm0ldbS67V\nihSLEYvHkeRyRoD8w685Ee+88QYDO3awJD8fhVxOOBplzyuvoFKrmfclpcvtp+F1UFOznyeffIkd\nOzoxGmfR07ONhQurKC+vwm63kZ8fYeFCM1u2bAMsQByVysvtt19zSstNJSUlPPTQHdTU1DI05KGw\ncCazZs2cEuZ350yMSJK0TxCEsCAIm4F9kiTtFgThl5IkPSQIwjrgB8A0QRA2SZJ0dGL9eeQnP4Gb\nb4aLKFvulLjzTvg//we6ujhG/MSFQXp6Oh5JOsqZdWB0lOmVlaxYsRxIpvg1NTXx1ksvE4vaiMdj\nZGcfqY8iCHo8Hs+4tv1+P3/+88vo9RU4HMkp2HA4wLPPvke6IUy5zYbusMeJXCajIj+fTxsaGBwc\nHHfhW7luHc/39bG0UkskEmE0GCRuyOO2b3wjFW9wnsgpKGDPrl3klZTw6t/ewazJQqvS4A9JmLJm\nkVtgwij2c8nsQnLsdv73pQ8IBZwEA6Ps2DzC1dfditWazfTps1m5cumUdcOdCMxmMwmVilAkgpRI\nEAwG6Rgaormnh1hmJkMeD50+H5VXXHFSgsHv93No1y6WOhzID89kalQqZmVns/PDD6meN++snFe9\nvb1s2vQhGs00TCYlFksh4XCAbdtquPLKpaSlWenrq2XjxpuZP38O3d3dKBQKpk2bdloiwmazsXLl\nijPu9/nmnPqMSJL07S88fujw/68Br53LfZ8sbW3JAM2DBye6J5OXtLSkIPn1r5PLNRcidrudknnz\n2LtrF2VZWejUanqHhugRRW5bmvQJkSSJ1196CefOnRQnIvS7u3AP9+HOKWb2wjXIZHIkyY/JZBrX\ndlPTIaJRI1lZR9aCNRo9SmUOB+veYPHSReNeLwgCBrkcr9c77qKam5vLHf/wD+zduZOB7m4KsrOZ\nu2DBlC96N5WYUVHBrvR0GnfsJqtwJvEodI24iOQVcvU1G+noqCUtcpDSvDxaursJ9rWTIXjJUOvp\ncNax56NNVC1djygGp5wT7kSjVCpZfNVVfPCXvxBqbiZTLiffamVXby+RcJgurZbVN99MaWnpSbXn\n8/nQymRjQuQz0nQ6Al1dxOPxszJjtXfvAVSqXHQ6I6JYDyTP/2BQg8vlQquVUVCQDFBNT08/YbDq\nhcpFb3r2z/8M3/oWpK7nx+eb34TFi5OxNV9Sd2vKc/WGDezIzGTfJ58QcrspKC/nlpUrycjIAJJ+\nB527drG4qAivzcZHw7tI19jp6G2jv78DUYyRn68fsyeXJIm6ujqefvplDhxw4/OFKS6egVab/ADV\naj1KnZFBj4eszwWOiaKIN5EYKyn+eWw2G6uunhql3i9E1Go1t957L//RPUCruxOdOY2s2dewdPo8\n1Goter2ZUW+MSDTKvpoarizIpsXpg4QWhy0LdSTIrk9f4Pa7rz1hanI8Hmffvhp27qwjFoszd24Z\nCxfOn7KOq2eDhYsW8dHbbzPa1oYfMFut3LRkCUa9nkOJBCUlJSc9m2E2mwkLAvFEAsXnovNHfD5M\ndvtZWzobHfWj0egxmexkZRkZGGjBbC5CEBSMjLhJJAIsW3bDGe3D6/WyY8du6upa0Ok0XHLJHGbN\nmnVKDsUTzUUtRrZsSf577LGJ7snkZ9q0pO/IL3+ZFHAXInK5nCVLl7Lk8EzIF2lvaSFdpSIQCBCL\nxZg7dzqNjW0oQqM0173JdV+5jrVrryQajdLS0sJbb71PU5MPs7kUUVTR2hqgu/sdli+/Cq3WgM/n\nYtXalTTv3IFSocBmNBKORmno6aF04cJjipEU54doNIokSccM9ktLS+P6W27gFc1BHI5KEok4LpeT\n/n4no6MdLF00h8319cgjEfLT00nE49Q6u7HZCtAgYtXpWLv2qmPs9QiSJLFp00scODCM3V6MTCbn\n3XfbqKtr5u/+7qsXXbDrZ0SjUeTRKBvXrz9KdCScToaGhk5qZiESiaBQKKi69FL2vf8+s3Jz0arV\neAIBDrrdrNp4zLyKY5JIJOjt7UWSJLKzs48SMaWlDhob67FYMpk/fzkHD+6ms3Mno6P9GI0V3H33\njUfFJn0Rj8dDS0sL0WiMggLHWFViSM7w/O53T+H1GrHby/B6wzz99KcsXdrLunXHd3CdTFy0YiQa\nhfvvh//+75Tb6snyox/B0qXwjW+A2TzRvTn/iMC+/QdRRGR4vX6GhtwYDHrkehXzFlZw003rcbvd\nPP748wwMxNi5sw6DoQy/f4TMTCPDwxI+n55Dhw5gtVpJT4+zevVVOCtmsPmttzjY1YVMpWL2lVdy\n6WWXTfThXpSMjo7y5psfUF/fjiTBjBkFrFlz+VGFwyorZ7F1aw3NzXtoaKinudlFICCg1UYYGnKz\nZFEpPQcPYhweRmO1cuOll2K2WIiLIgeCQRSK4196u7q6qK0doLBw0diPrl4/i87OGurqDjJvXvU5\n+wwmMzKZDJlcTkIUx81mACQk6YSfq9vt5s03P6C5uQeAyspipl1+OXt37SIRDqO1WFj51a9SMfPk\nnCi6urp45pnX8HgEBEFAo4ly001XjXPSraycxaef7sPpbCQzs5CystkYDCqKisr5+tfvPsqD6IvU\n1taxadN7JBIWBEGBKO5kyZLpXHPNagRBYNeuPfh8RvLzk/vUag2kpVnYtm0rixbNmzLLPhetGPnP\n/0ze7d9wZrNjFxXTpyc/r3/7t6Rt/sVGW3s3TYMxSrVGRkf96PWzCAR9jEohpPYw27fvYO/eBhKJ\nPIzGMEZjEKu1mNHRfvLyNGRn62hq8tHRsYtrr72TpUsXodVqmT59OqWlpYTDYVQq1QkvTinODeFw\nmMceewa/30pu7jJAoK2ti8cee45vfvOuccGESWOrW3n44f+ksbEftbqUiopcbDY7Xm8ne2oauGTp\nUsq1WnIzMsYExYGODqpWnrg+itPZjVxuPeruPy0ti8bG9otWjCgUCsrmzaNl927KP1fqoMvlwlZQ\ncNzZRJ/Px6OPPksikUte3nIkSaS+vh2Lxc393/sekiSh0WhOepnH7/fzpz+9hE5XTkFBUqwGgz6e\neuotHnrINiYCdDod9913O598so2amr0olQrWratk8eKFJzzXvV4vmza9i90+H40m+f0TxQRbtuyk\ntLSIsrIyGhraMZvHz6wk04PN9PX1pcTIZGbfvuRyw+7dkEpCODV++lOYNQtuvTU5S3Kx4PP56OgY\noWzJBja//BgZMjuBiJdBRCR1HuXlK3j11Y8ANYWFVbhcTiBZ+8FoTKe7u5V1664kPd2EzVbImjVX\njmtfEISLOhZgMtDY2MjwsJKCgiPOrpmZhXR1+airO3hUIUWZTEYwKFFauhCbrXBsu9lcQG9vC7qM\nPJwhD+6uLrSCwKgoYikt5ZJLLz1hX7RaDaIYOWp7NBomLW3yp2meSy5buZJNvb3s6uwkTRAIShIJ\nq5WbT3BnuX//AUIhE/n5yR9uQZCTk1NCR8ce2tra84f/OgAAIABJREFUTtkwsLGx6XBg+pFZM50u\njZGRLGpqalm16oqx7Wlpaaxde9UJl+e+SFtbG4mEZUyIQFJoGI0F7N17kLKyMvR6HT5f+Bjvjk6p\n5byLToz4fEnL91/+8sJNUz2XWK1JT5bbb4ddu+BwbOcFTzAYRBBUZOdOw1CwkKg8BwmJdH0GwWA/\nCoWKSARksjgANls2Gs1OgsFhtFoLoigRj0cZGWll3bpjV2YFGB4eZseOPbS395Kebmbx4uqT9kxI\ncWb09blRq49ef9RqLfT0uI7ankgkiMdFBGG8Y2fSR0ZNIgEPfuc7tLS0EPD7ycjMpKCg4KTuvHNy\ncujpeYpDh1ykpZkpKsrDajUTDnczd+7FPZ2r1+vZeN99dHR0MDQ4iNFkYtq0aScMOO3udqHXH+0w\nqlKZGRhwc6rmxR6PD4XiaGGo0RgYHvYetb25uZkdO/bj9wcpKytg/vzqkwpihqNnT+RyBZFIEIBL\nLpnD44+/g8lkH7MY8HgG0eujU6q680UlRhIJuPtuWL48eWef4vTYsCE5q7R+Pbz11sURP2KxWFAq\nk0JDrVai1+ehUKgJhfyYTDoggcGgRqWS4/ePYjCYWbx4Odu3b6a7O4zFoqSrawtGIzz//Nts2vQO\n8+aVs2LFpWMXpIGBAX73u2dJJDIwmfJobPSyb98L3HbbSiorT97aOsXpYbOZiUZ7xh4nEgna2trZ\ns+dTDh2SCIXCrFx56VgqtdFopKgok5aWPuBIQGEgMIhSGWfmzKSl/8yTjD/4DJ/Px1NPvYRWm83A\nQDeDg0M0Nu6jtFTDN75xe0qckgw2nzZt2inVJ0pPt9DQ0Atkjdsei/mxWstOuQ95ednEYk1HbR8Z\n6WVgIMGPf/xfJBIi1dXlyOUyPvnkEEZjERqNhQ8+cLJ7dz333//V46Z4JwNbPyWRiI8JDYDR0W5W\nrUqask2fPp1Vq/r48MNtgAmIYTBEufPO66ZUocyLRoxIEnz3u8kaNH/960T3Zurzox+B1wuXXw4v\nvQRfcFG/4FCpVKxevZgXX9xBbm42HR1NaDR5RCLDzJpVTnd3HVdeOReHI5cnnngdrzcbnc7EjBll\nRCLtXHvt5ezceQCv10JWVjGCIGPXrjZaW5/hgQc2otFoeOedzchkDrKykj82BoOZUMjKK698SHl5\nWcql8xwzc2YF7767naGhPmy2bGpqajl0yIlWq2DOnHW0tnpobn6Gv//7r475v9x5543s2/cTOjq2\nYbUWEY8HCIVaWbAgh0WL5p1WP7Zv34XHY2Tu3IXMmhVleLiPWCxKONxJefnZrUZ7MVFdPZtPPtmP\nx2Mbq5brdndjNIbHarWcCiUlJRQU7KCzs5bs7FIEQUZvbyttbTsRhMXk51chk8nZurWe3bs/ZN26\n+9DpktkSBoMZp7OR7dt3cdVVXx5DlJGRwYoVlXzwwU4MBgdyuQKPp4eSEv3YDYogCKxcuYJ58+bQ\n29uLSqU6XFxwal0vzlltmjNFEATpbPVNFOGhh2DrVnj/fUhlTJ4dJAl+/vNkMPDPf56s8HsmMTjn\nozbNmVJXV8cHH2xn5859eDwhCgtLsFp1XHppFVdccRlyuRyXy8WePftxu0coLMxhzpwqnE4nTz31\nKYWF43+gOjv3c+ONc5k9u4of/vAX5OVddpQ3gNO5kwcf3DAune9CYjKNe39/Py+88BbNzX3s3HmQ\nvLxpVFcvxmJJrkf29bUyd66B9evXjr3H5XLx178+y7ZtdahUCq64YgnXXrv6tGzDAX7xi98jk5WN\n+dF8htN5gNtvX8yMz5eFnsJMxLh3dXXxwgvvMDgYBCTy8y3ccMPVpx3kGQqF2LJlGzt31pFIJMjI\nMNLc7KOsbNnYa/r7O3jnnc0sW7acoqLCse3hcIBYrIHvfvf+4+5DkiTa2trYt+8g4XCUWbNKmDlz\n5pQTGzBxtWkmBQMDyaWZUAg+/BC+YI6Z4gwQBPje92DFCnjgAfjd75JZNidRn2rKMmvWrLFqoOFw\nGJ/PR1paGprDdu6QvJu5+urxhXyczj40mvHpoQA6nY2Ojh7mzp2DSqUgHo+iUmnGvUaSYlNqunUq\nk5WVxTe+cTdbt25FJktj+vTF42I8zOZMWlvHT81nZGTw7W//A9/+9hdbOz00Gg2BQOQoMSJJsSn5\nAzSZcDgcfOtb9zI8PIxMJjtjLx+tVsuqVVeMBau+//5H9PePjy+SyxUolSoGB0fHlRyJRiPodCcO\nMBUE4ZSXpKYiF6wYCQSSxd1++lO47z54+GFIncfnhvnzYccOePxxWLsWrrgCfvxjKC6e6J6dWzQa\nzTgRAklzoj179tHZ2U96upkFC+aSmZmJxWIkFus9qo1IxI/Vmo8gCFxySRUffniIwsIjRe9cri7y\n803HvctOJBI0NDRw4MAhZDKBuXMrKC0tnVLui5ON7OxsNBr5UcGmoZCf7OwjdzQ+n4+tW7ezefMO\nAoEwc+bMYOXK5WcU17F4cRXPPLMVg8EyNoZe7xB6fXTM3TfF6SMIwlG+MWcLs9lIPN4xbpvVmoVM\n5gGOZLyIosjgYCu33DK+FMTo6Ch79tTQ1dVPZqaV+fPnjDlAf0YoFOLAgVqamjowmQxUV1deEHFE\nF9QyzeAgfPxxMqjyb3+Dyy5LFsE7xfixFGeA3w+PPJLMVrrttqR9/Mla7U+m6frTwe128/vfP0M4\nbMVotBMMeojH+9i48Wqys7N55JE/otdXYDQmI/r9/lE8njq+9a2NWK1WIpEIzzzzIk1NgwiCEQhh\ntwvcdddNWK1HZwFAUog888wL1NYOYTLlIUkiXq+TxYsL2LDhmilRQG8yjnsikeB//ueP+Hx2MjKS\naXfRaJju7t3ce+9aSktLGRoa4le/epxPPmlEknKRy/VEIm5mzNBy//3XU119elOEoijy6qtvsmNH\nC4JgBqLo9RE2btxwQfzofMZkHPczJRAI8Mgjj6HRlI3FpQQCXjo6PsZg0CIINiRJiSR5WLiwmPXr\n144Jzv7+fh599DnicTsGg+3w9aOXu+5aR0lJCZD0NvnDH57G7VZgNGYRiQQJh7u5/vpLWbDg9GKU\nzifHW6aZ0mJkcBA2b04KkI8+go6OpPfFypXJbJlUQcyJw+2Gf/93ePLJZF2b73znxEtkU/3i9Je/\nbKK9XUFm5pGc8UDAQzTawPe+9wDd3d08++zr+HwCkgR6fYKbb14zdqGB5Ppwd3c3Q0ND6PV6ioqK\njusq2djYyJ///CGFhQvGhIcoinR17eCBB9af0GZ6MjBZx314eJhnn32F7m4fMpkauTzA1VcvZdGi\nhQA899xLvPzyHjweC2ZzIZAULMFgB3PnaviXf/nGUTNnp8LAwAB9fX2o1WqKi4unlGfEyTBZx/1M\ncTqdPPvsa4yOSgiCDK02zo03rqKwsJD29nbC4TDZ2dlHFbh8/PGn6e3VkZ5+xMzN7x8lkTjEd797\nPzKZjHff/YDNm3vJzz8SNxSNhnG7d/GDH3z9tKr8nk8mLGZEEIRfAPOAvZ+v4CsIQg7wF0AN7Acq\nJUladuxWjtDff0R8fPwxOJ1J8XHZZcl4hXnzUksxk4X0dPiv/4Jvfxt++EMoLISbbkrG7yxeDBea\nyWg8HqexsZO8vPE27nq9iaEhGBwcpKCggO9+9376+/uRJImsrKyjhIYgCOTn55/0HXB9fQsGQ864\nGRCZTIZSmU5zc9uUECOTFavVygMP3IXL5SISiZCRkTEmLiRJora2Bb8/isGQPfYelUpDIKDB50uW\nji8+g7XKzMzMVEXmKUh+fj7f+c799PX1IYoi2dnZY+f5523iP08kEqG1tZf8/PHXj2TWTWKs5s7+\n/YdITx9viKJSaUgkDHR3dzN9+tTNtjpnYkQQhGpAL0nSckEQfi0IwnxJknYffvr/A/4FaATqgeYv\na+f99+G555LiY2AAli1Lio+vfQ3mzIETlCJIMcEUFsKf/5wUkn/8YzLQtb8fVq9OBr4uW5a0mZ8C\nqwnHRSaTIZfLEMXEUbEagiCO2T7L5XJyz+KUXTLo9Wj3RVGMo1anlPmZIgjClwoChUKOTCZDFOMk\n76uSSJKIIHDCOikpLlxkMtkpnedy+WffpcQ4PxFJkpCkxNh3SalUkEjEj3q/JCWmfBmJcxnhtgh4\n5/Df7wGXfO65WZIkbQNuA9o4jigaHIQZM+Dpp5N/v/JK0i9k/vyUEJlKZGUlq/3W1sKePUkR8tFH\ncOONSTO6qY5MJmPhwpn09o7X1W53Nzk5aaed5nkiKitnEIn0jbtAxWIRJMlNWdnUvUua7AiCwMKF\ns0hLU+D1do5tDwa9KJUhsrK0Z1V0priwUSgUVFdPp7e3Zdx2t9tJYaFtLOtn0aIqXK7Wcctbfv8o\nen1sys+CnsufczNJoQHgAT4fRioXBEEJXHb4NV961fzKV85Z/1JMEA5HsmLy/cdPr59yXHHFcnp6\nNtHRsROZzIgoBrFY4tx8883nbJ8FBQWsXFnJBx9sQxDsh+/Kh7n22iVTpkDWVGXFikvp6HDy5ps7\n6ejoRZJ0qFQBFi928NWvbpjyd6opzi+rVq2gv38TnZ27EIQ0RDGAzSZy/fVHrh/z5s2ltbWT+vod\nCIIZSYqi0fjYuHH9lE/7PmcBrIIgfANwS5K0SRCEG4BcSZJ+dfi5D4EngSHgHsAuSdLSL7z/wots\nSpEiRYoUKS5iJiKAdRtwP7AJWAk8/rnnDgArgGygGhAEQfh7SZL+9/MNTKVI60AgwM9+9nuyshah\nVB5ZP3Y6G1i+PGdcBccUx+ZCja5PcXwm47i7XC7++7+fJi9v8bg1/I6OfVx//RwWLJg/gb27MJiM\n4z4ZEUWRRx75HYJQSlraEZO2wcEesrIC3HPPbRPYu1PjeFYD5yxmRJKkfUBYEITNQFySpN2CIPzy\n8NP/CeQCeuArQN0XhchUo7u7G0kyjhMiAOnpDvbvPzRBvUqRIsXp4HQ6Acs4IQJgseRTW/ul8fYp\nUpx1hoeH8XgS44QIgM2WQ1tbH5FIZIJ6dnY5pyGgn0/nPfz4ocP/95CcLfmM985lP84HCoUCSTo6\nyjkej6WyGlKkmGIksxeOjqyOxaJoNClr/hTnj89+WyRJGjezkMzcEy4Yp+UL4ygmAQ6HA70+hs83\nMrZNkiTc7lYWLao6zjtTpEgx2SguLkap9BIK+ce2iWICn6+LefNmTWDPUlxsmM1miorScbu7xm3v\n7W1h7tzSKR+4+hlT2oF1stHZ2ckTT7xMOKwH1EjSCLNn53LjjetTngMnQWoN+eJkso57fX0Dzz77\nNvG4GUFQIIrDLF1axtq1V00Jm/3JzmQd98nI8PAwf/rTJoaGBARBjyh6cTi0bNx4M3q9fqK7d9Jc\nsHbwk5FQKERLSwvhcJisrCzy8vJSF66TJHVxujiZzOPu8/lobW0lFkv6OKQcUc8ek3ncJyOxWIzW\n1la8Xi82m43CwsIplz6eEiMppgSpi9PFSWrcL05S437xcTwxkooZSZEiRYoUKVJMKCkxkiJFihQp\nUqSYUFJiJEWKFClSpEgxoaTESIoUKVKkSJFiQknlm05xmpqa2Pvpp3hHRsgvLWXR0qXYbLZxr5Ek\nCY/Hg1KpnFJpYClSXIh4vV4AjEYjkiTR0NDAvk8/xe/1UlhezsIlS8aqtKa4uBgZGWHXtm20NzSg\nVKspr65m0aJFUy5r5nSYsGwaQRBmAr8naXN4UJKkB7/w/EWdTROJROjv70epVJKdnX3M9OCtW7aw\n57XXmGaxYNBq6R8ZoV8m47YHHhir2NrW1sZ7L79MeGiIBOCoqOCqa68lLS3tPB/RibnQo+t/9Sv4\n+c+huBgeewyKiia6R5ODC33cP8PlcvHOyy8z2NkJkoTV4cBot9O9axfTLBZ0Gg39IyO4lUpuf/BB\nrFbrMduJxWL09/cjCALZ2dlT9ofqQhr3kZERPB4PFosFk8l02m389be/xejz4e3ro9fppDcYxDJr\nFvf94z8yY8aMs9zr88+kTO0VBEEhHfZPFwThj8CvDtez+ez5i1aM7N27j9de20wspkGSEtjtSm67\nbf04j4NAIMDv/+//ZXFmJqrPOfC19/Xhz8pi5dq1SJLEC48+SoXJhM1oRBRF2vr7CaSnc9cDD0y6\ni9iFdHH6Io8+mhQizzwD776bfFxTAzrdRPds4rmQx/0zAoEAf/rlL8kTRXLtdgRBoKWnh5c//pgH\nNmwYN2PZ0ttLWnU1a9evB8DtdhMMBklPT8fpdPK3v71LKKRAkiSMRolbb12Hw+GYqEM7bS6EcY9E\nIrz88hscONCJTGZAFP3Mm1fCunWrT9kZ9c1XX8Wzaxe9jY1Ig4PkZ2SATManbjfZlZXc9q1vUVBQ\ncI6O5PxwPDEyYcs00vhCLlpgdKL6crYZHh6mt7cXlUpFYWEhKtXJ17Lo6uri+ec3k509D7Vae7i9\nfv7857/xj/9439gX3OVyYZCkcULE6/VyqLaJre/voMUp0dW6h4VmJbb8fABkMhklOTns6uyks7OT\n4uLis3jUKb6M3l74p3+CLVugvBzmzEkKkR//GH7604nuXYrzQUN9PfpAgLzPiQaVQoE9Hsc1MEDR\n587FPLudmvp6fJdfzqZNr9DaOoRMpiEUcjMwMEBV1TricR+JRJxwWMGf/vQS3/nOPRgMhok4tClB\nMBiks7MTURQpKCg4a5/V22+/z/79HhyOSxEEAVEU2bVrPwbDJ6dcqb1+zx5G99bS39BEji6NhsEW\ncnIzyFCpMEoSOzdvpmDjxrPS78nIhMaMCIKwHvh3YLckSe0T2ZezgSRJvP32+2zZUgeYgRg63Tvc\need15OXlnVQbO3bsQ6t1jAkRAKs1i87OXlpbWykvLwdApVIR/dxdRTweZ+vWvYSietIzM3E4FtB9\nqJm2Q62UFxaMW4PWAx6P52wccoqT4N/+Df7u75JC5DN+9rOkKPne9+ALIT4pLkCGXC6M6vEVvZUK\nBXKFAv/hGJLPCEUiaA0Gnn32Fbq7FRQULAWgru4gDQ1NdHc/j1abDyiQpBEsFhkHD9azaNHC83U4\nU4q6uoNs2vQu8XgagiBDJnuHa69dzoIF886o3VAoxK5dTeTlLRlbRpfJZOTmzmTr1h2sWLHspGdH\nEokE+2sPkeYBs86KXmckISbocroI2w1MN5kY7Os7o/5OdiY0m0aSpFckSaoEfIIgrPri8w8//PDY\nv48++uj8d/AUqa+v56OPkl9Oh6MSh6MalWo6TzzxErFY7KTaGB72otMdK55DQzAYHHuUk5ODJjOT\nbrcbSE7lhkIyhuIxskvmAGBOzyMqqulod45ryU+y+FKKc09vL7zwAvzgB+O35+fD9dfDb387Mf1K\ncX5Jz8rCEw6P25ZpseBTKvn81oQocsjloqC8nPb2YXJySsaeCwSCeL0xRkZMWK0zsVrLsFgW0NXl\np76+4TwdydRieHiYZ599F5utmoKCOTgcVWRkLOTFFz+h7wx/3EOhEKBELh9/T69UqojHBSKRyEm3\n1dHRgdpShF+hIiiKAMhlcoKigoFgGLlcTsZJ3tBOVSZsZkQQBJUkSdHDD73AUWsZDz/88Hnt0/Ho\n7+9nx4699PYOkpeXzqJF88jIyBj3mu3b92OxFCOTHYnFMBptdHWpaW9vZ/r06SfcT0lJPh991ENa\n2pGZjOS6qmcsKBWSa2/X3X47f3viCXo7Oxnp76fO68Ux+zIcBclAp7yiWew+tIdut5u5JC90zb29\naPPzp/za41Thf/8X7rgDjpUc8cADcMstySWcC6QKeIovYUZFBdvfe49OlwvH4fO4Z3CQkkWLiOp0\n7OrsRClJtLpcRA1WhrfX0NcXJDc3PlZkUy6PkkikIYpH7rZlMjkKhRWX64JZ5T6rNDQ0AnY0miMx\nOSqVBpUqmwMH6snOzj7tto1GI1othMOBce37/aOYzeovzVx0Op1s376XwUEPxcW5LFxYnYwJyigh\npM2kfvPzDA/2oNHoCCrV5FmNdIXD3Lhs2Wn3dSowkcs0awRB+A4gAO3AmxPYl+PS1tbG44+/glKZ\nh8GQy969w+ze/TT33HPduB/1QCCMSqU5RgtKotHoMbYfIRgM0traikwmEY930Nen+v/Ze8/wuq7z\nzve3y+kFpwAHvbGAJECCRSwiKUoUJTuW5SLJkh07rrFiO5Fv6s08mdzJ8/hOJhlnnDvjJGNnYtmO\nbEm2ZcmyVSJajaTE3kGCKEQ/AHEAnIPT+673A2hIlKhKyizi74vEfdZZe529sNd611rv+38JhZpQ\nVYXp6QHa26tfd9QTDAb58h//MRMTEwwNDRF9+ijt7VvnP/d6A1S3r6PMCLvHxzEEgUUrV3LLbbch\nXpv93nNKJfje92D//vN/ft114PXCjh1w662/3bZd4+JSKpUYHh6mUCicN0Gmw+HgU/fey/NPP83u\nwUEAahYt4su3304gECAcDvPEE8+Qzweoq+1AVUucPv0rFOUQmzfPhXYGAj4EQUUUdXRdBwzS6RjB\noPMNdlOvUSyWkSTb665bLHby+eIF1S3LMh/60GZ+/vPdVFYuxe32k8nESST6+dznzp/Z+cCBA3z/\n+79EliupqWlmejrGoUMPcued2xCELEuXb6K2sY2Tx3eQmAkjqgpGY4jbv/hFGs/6/l2tXEoH1ieB\nJy/V/d8upmnyxBMvUFHRjtc7d7jvdvtIJt08/fQO7rvvS/NlOzoWsGvXGVwu7/w1XdcwzRR1dXVv\neI+RkREefPApymUPgmChVJLR9VNMT4dxOOx88IOdbN68kXA4zN69R4hGk7S01LJp0zqqq6tpaWmh\nubmZyclZTp8+QW1tG7JsIRaboKZG5o/+6L8iSRIWiwWb7fUv5jXeG558cs4vZNGi838uCPClL8GD\nD14zRq5kJicneeCBxykUnAiCDcM4yPLlNdxzz8fnfQYURWFgYIh4zoBgA6tWLWPz5o3zzu2SJBGN\nmrS3b52fxNauvZ5Dh05QVxdgwYI2JEmksrLAkiWNJBIjiKJIW1sdNpuDzs7Fl+z3X860tjbx4ot9\nwLnO+rncNG1tmy+o7mQyydRUFMjQ1fUkXq+Tzs7l3Hnn7Sxe/Pr+6O3t5a//+p+Q5aVYLBAOH6e1\ntZbq6npOnjxNZ2cDXV3Hqa1dwg033c3U1Ci6HuZP//T3qaysvKC2XglcEz17C9LpNPF4iaamc70M\n/f4Q4+OnyeVy857Z69dfx/HjfUxM9BEI1KMoJZLJEbZuXf6GmgHlcpmHH34aj2c5tbVzfhyGsZSx\nsSPcddcmVq5cCcCJEyf52c924vG04nK10d0do6vrZ3zlK3dTX1+PIAh8+tN3sWfPPvbvP0a5rLJi\nxSK2bfvddx33fo0L48c/hs9//s3L3HMPfOMbc7so9vNtql3jskbXdR5++Ams1jaqquYmDNM06e4+\nTkvLUTZtuh5N03jooUcZHCxSVTUnLvPcc4OMjU3y+c9/CkmSGBsbx2KpOmc1vWzZWhSlyOTkfqzW\nKRYvbqCt7aP09aXp6FiJxWIlHp8kENBYu3bNJfn9lzutra20t1fS23uMYLAFQRCIx8dZuNDFkiVL\n3nW98Xicf/3Xh9G0amprt+Dz5Ugmh1m+fNF5DZFiscgPf/gLZLmDUKgdANNsZWTkJMFgFX19k/zN\n3/wxdXWH2bv3OLOzJdrbF7Bt2xffF4YIXDNG3pK5lY2OYRjnHG0Yho4gGPPnuQAej4evfvWzHDp0\nlJ6eESor7XzkI1tpb29/w/rHxsYol51UV7/iUCqKIsHgAg4d6mblypWoqspTT+2ipmYVDsec4eNw\nuInH7Wzfvot77/09YC7CZtu2rWzbtvXiPYBrvCtmZmDvXnjkkTcvV1cHnZ3w7LPw8Y//dtp2jYvH\nmTNnSKcFmptfmTAEQaC6ejEHDpxg06brGRoaYmgoR2vr2vkybvdqBgYOMzQ0xJIlS7DbbRjGuU7u\noihSW9vMbbct42Mfuw2YM3R6eno4ePAkhUKJW25ZyLp1111TVn4DRFHkd3/3Trq6TnD0aC+6bvDR\nj3awZs2qd6wD8mpefnk/ul5LXd3cjovD4cbjCfDccwdYvXrl6/pjdHQUTatAll9xahUEEaezkZGR\nQZYu9WKxWNiyZTNbtlzYjs2VyjVj5C1wuVy0tzcxMDByjmd7JDLIypWLsL9mOevxeLjllq3ccsvW\nt1W/pmmY5uvFx2TZQqk052cSj8cpl2VCoXNj4wOBGsbG+lFV9YJerGtcfH7ykznj4u3MEZ/61JzR\ncs0YufLQNA1RfP0wKssWstk542JwcAyHo+p1ZRyOKoaGxliyZAltbYsRhH3nOENqmkqpNMmqVXfM\nf0cQBJYvX87y5cvfo1909WGxWFi3bi3r1q1968Jvk7nF5rrX3MeKabqZnp5m4cKF53ymaRoulxe/\nP0sul8TtnvNoF0WZeDzCxo1bzutj8n7imhfj2+CjH/0gVVV5wuHDhMM9hMOHqKtTue22Wy647jmn\n1BSadu6qKB6fYOXKuegbm82GaaqvUyvUNAWLRb7slFSvMWeMfPazb6/s3XfDM8/AqyK3r3GFUFdX\nhyjmUZRzw3aj0fF5Pw6324Gqvj7MU9PKuFxzekJ+v59PfvJWEonjhMMnCYe7iUQOcNtt112R6qpX\nOy6XA0V5vQOsaarn9cv7zTi/alU7FkuaRGKcRCJCJHKEtWsb2LJl02+h1Zc313ZG3gYej4c//MMv\nMjY2Rjqdxufz0dzcfFEiUioqKvjgB9eyffshPJ5mrFY7yWSEmhp9/hzY7/ezcGEV4+Oj1NbObQua\npsnkZD8339x5LTLmMmN8HEZHYevWt1c+FIJ16+YMkrvvfk+bdo2LjMPh4CMf2cLjj+/B6WzCbneS\nTs9QUZHnhhs+DMDy5e288MJxyuXGeTHDUqmAacbo6HhFXmnFiuUsWNDKyMgIhmHQ1NR0LWHeZcrm\nzat4/PFjtLSsmR9/Z2cjVFZaqK+vf135QCCwZzwXAAAgAElEQVTAtm2reP75k3R2tpLNFojHw2zc\nuIC/+Is/vBZYwCXMTfNWvN9y0wwPD3PkyEny+RLLli1g1apOHI5XVFjT6TQPPfQLIpESguDENLMs\nW1bDJz/58XckN385czXkqgD49rfh5En44Q/f/nd+8IM5Y+QXv3jv2nW5cjX0ezgc5vDhE6RSOZYu\nbWbVqpXnSI53dZ3gl7/cia7PZeqV5Sx33bWNlSs7L2GrLy1Xcr/rus5TT23n0KEhRNEHlPH7TT7/\n+U+cowf1WoaGhjh6tJt8vkRHx0JWrux83VH/1cxlmSjvrXi/GSNvB8MwmJiYIJvNEgwGL0iw53Lk\nSh6cXs2NN84prt5++9v/TjIJLS1zuyrvt+Cnq6Xf34p8Ps/4+DgAzc3NON/nWRKvhn6PxebyBTkc\nDlpaWq4dmb8F14yR9zkzMzPzifuampro7+uj5/BhNE1jyapVrF2//pxdmEvF1TA4TU/DsmVz/32n\nO68f/zjcdRd84QvvTdsuV66Gfn8zstkso6Oj80cvkiRxeP9+Rvr6cLrdrNq4kY6OjvedA+PV3u8X\nC9M02bVrF/t37EArl1m7ZQubb7zxipRsuGaMXEbMzMxwaM8eJkdH8QWDrN2yhUWvUcUyTfOiDEyG\nYfDsf/wHgwcO4AMU0+TAwABtVVWsPpsldDKZxKiv5/fuvfeSn1teDYPTv/0bvPTSnAPrO+VnP4MH\nHoBf//qiN+uy5lL0e6FQ4PDBg5zu6kK2WFixfj2r16w5J1T/YtB98iQvPvYYFbqOCERKJRLZLGtq\naqgLBimWywzF47TfeivbPjDnP1Iul5Hl98Yx/WKNLReDq+F9f68xDIP//o1vMPz887Ta7UiiSFTX\nCW3cyH1/9Vd4vXMCm4Iwlwvn2NGj9B45gmmaLLvuOtauW3fJx/VX82bGyKXMTbMB+J+AARw2TfPP\nL1Vb3g3JZJK9u3YxdOoUNrudzo0bWb9hw5uG2E5OTvLY975HgyTR4fORmZnhmR/8gM133cV169YR\ni8XY/cILjPT2IlutrNq4ketvuAHDMLBare94oOzp6WF07142trQgiiKR2Vl8iQSz09Mcj0QwVBW7\n2005FqPn1CnWXHdhWSyvMefz8ZWvvLvvfvSjc/lqYjF4k2Pna1wgpVKJn3z/+1ijUZZUVaEpCscf\nf5zxkRHu+tSn3vFknUwm2ffSSwx2d2O1WuncuJENGzeSy+V48dFHua6qCudZv4DMiRNEe3sJtrbi\ndjhwOxz4PR727txJRSBAz5EjzE5MIMgyHevXc+O2bRfsU6CqKvv27OHEvn0opRLNS5Zw4wc+QHV1\n9QXVezVQLpc5sG8f3QcPoqkqS1evZtONN85P8hf7XsA7Mg727NlD969/zQpZJnc2KWrA7Sb80kt8\nx2rFLggYhsHizk5i09OIkQitVVUIwMAzzzDS18fvfvGLV4T0w6WMphkDbjZNUxEE4SFBEJabpnnq\nErbndRSLRYaHhykWi9TW1s4rnabTab793/4bybFpPA4njSEvJ594gqnxcT7x6U+/4WD28rPPssBm\no+6sop7TbqfC5WLv9u00NDXxyP3302Ca3FRfj6Jp7H/sMR74wcPUL+jEZhPZvHklN910w9s2SroP\nHWJBIDDv7R1NJiGXQ52dxWaz4auoQNB1+gYG2LtjxzVj5ALJZufy0Dz++Lv7vssFH/4wPPoo/NEf\nXdy2XeMVuk+eRJyepqOlZf7aGpeLgydPMr5x49tOIqnrOiMjI/zixz+mWRDYUF2Nquuc/vWviYTD\nNC9eTNA05w0RgGg0SqvHQ2RigmAwiGkYjI2OcXzPIR579hBtVX7WLK7F5bBz6Je/ZGRoiKWdqxga\nOkMg4GXdupXvOEfJE48+SvrUKa6rq8MaDDI5NsbP/s//4bNf/zrBYPCtK7gCiUQiHD7cRSyWpLW1\nnrVrV7/uWMMwDB57+GGUoSFW1tQgSRLhQ4f4yenTfP4P//Bd+/RMTk4SiUSw2+0sWrSIQqHAM8+8\nyOnTZxAEWLasmdtu2/a2IqV2bt+OmEhgdblodTrRDIOpVIrB2VkAvvh7v4coiuzbtYu+3l6+dNdd\n8wENnS4XR0dHGRwcfFPhzcuFS5mbZuZV/1QB7VK15XxMTEzwox/9kmLRdTbfxAE6O+u4++6Pc/+/\nfZ9jByZoqlpCrihzsG+WkL9ASTzB5I03zie003UdQRAQRRFd1zkzPMzNr9EMcNhsWFSVXTt2UKUo\nNJ/9bjadJjmWwNAseFa24Xb7eOqpY4yNjfOZz9wz7+ORyWQQBAGP5/WJssqlEpazhoum65QUhclY\nDE/JoHc8hSwXMfU8slNgsLf3vXyc7wt27IANG8Dtfuuyb8RnPgN///fXjJH3kvDAADWvmZgEQcAv\nSQz09xMIBM77Pr2a0dFRHn10Oz3dYVKjI8zUuqhwuaj2+1nV0sKB/n5Mi4V0Nks8kyHg8SAIAjab\nDT2XQ1PndIUGBoc4dWqSWAp8rkbOzBQ43N/Povom3A6BYy8+xPU3FejoWMPUVI7Dh3/BPffcxOrV\nq9B1/S0XJlNTU0z19LCxuXl+kdQYClGenOTIgQP8zjvxsr5C6Ovr46GHnsVma8TprGLXrkkOHDjF\nV77yqXMiXUZHR8kMDbH+VUZpW0MD3ePjdJ84wYaNG9/0PrquI4ri/HPVdZ1f/vJpjh8fp1x2Ui5n\ncTqfAjTc7g4aGuay7g4NhfnBD37G17/+pXN2vRRFIZ/P43a753cyZmZmEHUdn93ORDLNeKpESgMh\nV0JJpDENA0mWETWNSlUlMjlJS2vrfJ0hp5Pw0NA1Y+TtIAhCJ1Blmmb/pW7Lb9A0jYcffgK7fRmh\n0FxOGdM06eo6ht+/g5d3nmBBzUrcjjmlRLejgqnEMDZrlGg0isvl4vnnX6K7exhRFFi9egm33HIj\nNoeDkqLgeNU2nWmaKKZJfHKSJb5XJOH7+oZwOmuoLOUZHR1kcHCWRCLPyy+H6e8fZ9Om5WRnpklM\nTIAgUNXaygc/9rFzXrbFK1Yw8Otfc3piihND05yJxeiNxGkTK6lERjY0RMFCX3Qat9BFX18fy5Yt\n+y095auPZ56Z29m4EH7nd+aOeXp6oKPj4rTrGufi9Hgols8VIUun0xw5ehRhcpKe3bvP+z79hmQy\nyY9+9CQeTwcWQaW1xodhKDy5t4ffu3UtTrud1HSU/eGXiY7EcBwZRVTiLG2qweX10pNI8OHrrkNT\nVU6eHGBoJMlALIFdKJNUXNitTQwpORprJURzOePjedav9+H1BikWq/judx+muXkX5bJObW2QD3xg\nM21tbef9rbFYDK8gvG63ttrvZ2R4+OI91MsETdP45S9fJBRaPZ86w+sNMj09xgsv7ObTn75rvuxU\nJIL/PMZcyONhYnj4DY2RsbExHnjgEbq6BnG57Nx++xY+8Yk76Onp5eDBSbJZF+PjMQTBSjQ6RbEY\n5qtfvWV+h7qmppVwOENfX/+8Ubl71y66du9G0nVMq5V127axYeNGbA4HY7kcSjJFseQgYA2gaDlE\nQ6aoutm1aw+yZKN/fAJ1ehrLsS4aGhqQzxozJVXFd4WkCrikalmCIASAfwF+/1K247VMTEyQy1nw\nel9JbicIAqHQQl54YQ82Rx2qbpzzHY8jxGAkxcTEBN/61nfo6SlTX7+FmprNHD+e5oEHfs6K66+n\nPxI5x2lrbGaGytZW6pubyeTz89dTqSwOh5sz8Tg7dhxlclLCNFtJp/309mb5/v/3PZS+PrY0N3ND\nYyOuqSke/eEPKRaLZDIZ+vv7qfD72TE0xo+fOcDIeJzJsXFUI0TEEDmTijGWTzJkqjgDy7CLdp59\n5BGKxbeXVvua49m5mCZs3w633XZh9Vgs8OUvzznCXuO9YcWaNZwplSgpc+kWSqUSu3fupFwuc8eq\nVdzQ2Ih7epqfn32fXsuJE93oeiUejx+700VJVXE7KlC1ACNT00wnEuztm6W+fgtWpx8tMosvIxHr\nHeLUkSP0ZrM8e+oU2w8c4OkjxzmaKKDIrcSLBpJZj2jYKJQEhsYjiKIfw7ATj8eJx+McO3aCkyeT\nqGotTU03UyjU8cAD2xkcHDzvb3W5XBTP865mCgUqrsIEbNFolGJRmjdEfkMo1Ehv75yY3G9wezyU\nDOO1VZArlfC+QWLTiYkJ7rvvG+zcmUcQNpNMLuV//++X+OY3v82BAydIJg3C4Sx+/0L8/mYcjkaS\nSQs7djxJPp+Zr8du953N+Asv79zJ6eefZ31lJRvq6mjSNF740Y/4x29+k2hfH7KucyxbYtYQGSnn\nOGNqlG1uDM3K8eMjFIsOFjauIiG7mIorHD16EoBCqcS0rtPReWVo2VxKB1YZeAj4v03TjJ6vzDe+\n8Y35/9+6dStb366k5TvENE3Gx8eJRqO43e6zf7Dnzxej6ybVdTXEhmdxOxxYpLlHODw5wWwmzNHH\nH2dkKIGtJorT6cXvD1Ffv4SxsaPceGMV6dWr2dvVRYUkUTAMbLW13HnnncRiMX62cycWoCYUwuNx\nEZ4KMxAvoOmNlEsK8dgYghCjvyfFMkuWniOnqPJ6qampoTEUYnJggP/nr/6K4e5hBNFFPJ8jP9pP\nR20b2dkJVDVH1tmM23SimTkqPA5S2LBZXFRVefCqKsPDw2+a9yIajfLii3vo6RnB4bCxefNKNm/e\neEU4SL2X9PaCKMLSpRde1733wpo18M1vwvtciuI9oaGhgdYNG/jJT3+KV9cpqSrpfJ6Pf/CD8/4d\nDVVVJMbH6e/rY/WaVzLiplIpDh8+TjJpEgjU0tDSzPHwGF5NRZYcZPIlukfGmcroJF76D0rRMdoC\nlRiKzmh0gmU1Xhb7/Zh2O8/t24daLNPkczGWPUPaMHAhYpRLSGYRwS4xnUxSMvI8/OPTuCtqSSZz\nmGaZcHiK6uoW4rOThHt6+K9/uYcvfO3LbNi8+ZwjppaWFqSqKsajUZpCIWBukhrNZvnYWxxDXInI\nsoxpvv7EX9c1ZFk6Z4eora2Nl+124pkMwbMOq/lSiYiisGX16vPW/9BDPyeTqaW5eRUALlcFHk+Q\nJ598jIULvfT12QkEluFwFLFYRFLTB7AluxnZcYxY726aVlzPlls+RTI5TS5XxfDwMHt//Wuso6M8\n9MwzxJJFrBXVpNUifT9+kGWhKqoEgZToIitUI8kSjgo/BSNGsVTG6nAgnZ2D5PpFpG1O9p0eo+y2\nkTAMOjZvJpPJEAwGL3ul7ksW2isIwqeBfwJ6zl76z6ZpHnjV57+V0N5yucxPf/o4AwMJBKECKGK3\n50kkMixadCsWy9yRimEYjI/3sWFDkK6uQTLpAEPdvaiZDNligdn4af70ng1QUJiIqCTLRc6YBjd/\n5Ks4nW4ikRGWLRPo7OxAVefyzPT3D3D69CSnTw+QyZSp8PrJnOkn5BSorA5ysH+K8ViI6IwFi1AF\nTCMQRdbSLLOkWFJroX1hPQW7ncWdnTzx3HMUU2U6l2xmcGiY0XA3Bd1GwCrhFgqE3B72ZSyIYjM2\nJUPI6yaiaYSaqvjIpkrsDgviokUoqRSYJsvWrGH9pk3zjlyJRILvfOchoIGqqgZUVSES6WfFCh+f\n+cycjnk+n0eSpHcVAXAlh/p961tzEvDf/e7Fqe/22+Gee+CLX7w49V3OXEi/a5pGsVjE5XK97cH2\nxeeeo3fnTgKSRCKT4WB/P0t9Pj60bRvCq+oYnZqi2NREbW0tFpuNYlllz55eIpESp09Hqaiw0NGx\nFJvNR/+xo8xMnSBUkWNX1xg6y3E6QviVLKaRB+EMzUaKFZUuJJ+LrmQCeypNNFsCuRK15OOMmWOW\nhciCG6tUwOoyMU0rboeL2mATJc1gJpukusZCZeUCKuyThNQ0DZ4AqdQQK9avIO/389mvfe2crLGJ\nRIInf/5zMhMTWEURxWJhy0c+co6RdSl4L9530zT57ncfIJ0OUln5iix7ONzNli0NfOhDt55T/vDh\nwzxy//1IuRyVlZVYKyu55c47Wb5ixXnrv+uuP0DX1yFJVmKxEaanx8nnC2SzUbzeEvl8iJqaG5Bl\nHVE5iWd2FK2YQLYHqa+pZbY4RS5YjdPjZMOGG0mnpzn5xP1stluJF0ExKgjno3hQCBplAk4n46U8\n42VQhEbyqFgcFloWLmFkfIwKOyzv6MR0V9B23a34fFUcPfprBCGBx7MAp7MKyNHU5ORzn7v7kmd3\nvixDe03T/Cnw00t1/9+wZ89+BgZKtLRcP38tHp9CFA8xMXEIm62eiYkpentPYrUmqK29jZtvXsMP\nv/8Y+WwS0TApFEZZ5stjUxRGkrP0nB6gxuZCLubY9cR3Wb31Ho4ff4lw2MuhQ1GgQD4/ja5XEJ8Y\nYbD3FIh+rH6DO+76Q6LREQR3klsWtfHP/7wdDBHJmsZh9WCUl1Cil5I2hWy1cTpRYHg2xX/0JElO\nT9LR2MJ0JIpa1Fni9NOfmiFTqkQUJIqCgVqOEdUVvBYXlS4PdsNCfZVKW0MNP921i3XlMstbWxEE\ngbFduxg5fZrP/sEfYLVaOXDgCLpeTV3dXLSBzeagpWUVPT376erqoufIEaJjY5iAr6GBQDBINpGg\nsq6ONevXX9WhhNu3w5/92cWr77774K//ek4A7TKRhbis0HWdPS+9RNeePaCqyG43mz/4QVa9wYr2\nN0QiEXpeeokNTU3IZ3U8PA4Hx3fvZiYaRRVFDvePM5PMMjEZpnFBLb/TuYJ4NssvDgzTueEe1qxZ\nQDZ7gHzewvHjp2hsrCKSChPN6pwcLlDMC0iEKRdmKOomAamRvCnjEdOczsDImTGcskiDIFKnqyS0\nCEkzRqVYS9ocRhHasDga0LQ0VsspRLEShEpMI0GpcJpQ6HOIIiSHeti0eh3ZXJpyMYdQKGAUCpw8\ncYKNm15JvBYIBPji175GLBajXC4TCoWumhQSr0UQBD75yY/w7//+GOHwDOAAMixYUMHWrTecU/al\nnTvpev55rquuZspiYSqTYeMNN9DxJjvDqpqnq+s/mJ2dQtMETNONKMroeoL6+qXoepx4vJ9QqIHk\nRBcSWTwUcBamiZ8ZIye5mEpP8cWv/z2apjHU34OezWOIIImVFAyVxRYLiVwKWQCzBK2yg4iWx2JM\nU2N68di8BNQsw0aW4PJttN/yCbzeALquMR7uY+9zj6ApLqyOMF5/LWs3X08kIvP887u4447L12H5\nfS969rd/+0/4/WuxWu3ouk4mk0WSJJLJU9xxxwb+1//6HkeOjOJyBZBFE6us07QgxCKfwJJAANM0\n6RsYYKFpMjg7S05V8WXLOKx+MuUiWk0zR2dnKFgbaWxcQalkUCwW6e3dR0Acpb6YwyFUYbd6GS8l\nidn9dC7fSD4/RFGLMD5UQDAMSmI1Lscq1LxJXh3BL+ylMeAn5F8DRZPj09NYDR3JWiYkagQDNdjT\nY0SzGaapxmcTiBUTiNZWogrYXVYULUltdYk/+9RH6J+ZQUmn+eSt564cjo2Nsf5Tn2LlypX8y7/8\nEE1rxeU6NwZ/YOAQ1uIprq+vpy4YZDoe58lnn8XncHDLtm2ki0Uius7Hv/QlWl/l6f1artSdkUwG\n6uvnVFcv1sLDNKGzE/7xH+ecWq9m3k2/v/jsswzv2kVHfT12q5VcsciJqSlu/sxnWPEmZ+R7du9m\n/PnnWXI2ag3mji1+9vTTWGWZhBHC62xmNpnj9PgI7Qu93La+iVgqzfb9cRJZg9b2dmqbmjndP0B/\nbxflcpigv5NUWiGf0rHiR9OHsOpuEkzgoIhMjg4pS61FIqsWKQkmrS4XnrxC2ZSIGAYDgoeotIy8\n4EEkg0NKsLa2wGzJR1FyYbNWMJ2MYshBfG43jaUx2hrqGR3qxuuw4LTZKEkqTR/axv/7rW9dVmJX\n5+O9fN8VRWF4eJhcLkdVVRVNTU3n7JxNT0/zyD//MytDIfb1DDI8mcVEZjYf43Nf/wLXXbeGioqK\nc0Kfjxw5xn/6T//A/v3jGEYQWIGuC5hmHEGI4vPlWLx4OUODh3BYQIl2sUG2sMDuxuNyYmDQk0py\nwuZmQdty5OQMUzOjeAs5DEOn0hYgbuqEVIO0XiaAjF0wkCWdQQyWWaxENAHT7qKmOoi9JsBssJWN\nN30Bq9XB0b2/YmLfk6gzCaoqFpESTXLWEEXRzbpbtlBbW+Bv/ubrl/RI/bLcGbkcME0TVVWRZQuT\nkxG6uvrRNAnT1DGMYdrb/YTDJZoaN6FMnsCRzVMuZTh66gC9QSufvOVmGmpqaG5oID44SD6Xo1KW\nWbCwkfFwhGQpj0fzIyWnEGvbgCq8XieRiW6MjIOyXsJPDk2AnFrGYYpYUmOUpmooGzPUakk0vUhZ\nUVGYIVocQCVEwAmLqgLM6jVQglQyCQ4HNQ4n2bxCSRtHyecpqWV0yY5NhCyQM2oRdImg30dNvR9L\nxXIQp8kEg7Q0NmIdG0NT1XlPbICQy8XE8DArV66kstLHyEjmdcZIbHqQtRVQf9Yh7tipU6wLBsmX\nSqiFAgvr6vBlMrzwxBPc+yd/ctkoQF4sXnwRNm68eIYIzO2G/OVfwv/4H1e/MfJOKRQKnNyzh02v\n2t1wOxx0hELsf+EFlq9Y8YZ/Y7+ZAJPZLOMzM+iaRl0oxJqVK3lwzwlcVjcZo0heN1jV1oEoKjyx\n9zh6MYGZDdFi8eLK5ejfsxvNNFnRVE+pbMcUfExNjGKjGsG0YFKJwlFaKaNRppISVsOgqJl4JJlW\nDGKahiiCVZAIaDIWs0hemMQjSdj0DB6LhZZgiOHBPJK0DJurFqdoRSZDMnqSSjFLz8kwTdWLaaqu\nBUz6Jkd47GdPIOs6C9vb2frhD7/pAuBqxWq1vmlk4NDgIEFR5Je7D9M9KmKR/Pg9DmZnU/zJ//VN\nFtQ3YLGbNLUGuP3DH6BzzRqefXYPXm8zdnuSfN6NaZYAA9MEq9UGxTNEex6jXpSwIZIxijgMsNqt\n2GxWTMOgCjDyCezxCOsqKnkmNolpmjgMk+lynqhhYlCBhAddKAASSS2PRVQoGDI2r49gbQO3/M4W\nmlpaeGZgEE07TU/3MOXhwwTLOQRvNV6HD59pMqDEcPqrOX7wGJW3L0LX9cvWv+99a4zE43EmJyfx\n+5309XUxOJjC42nEYrGhqkXi8RF+8pNn0PUqlEg3gayKQ3SStWkEchr2yRj7n3wSR0UFeiBATSBA\nPJej0udDtlpxhfysaF/M4vZ2hh+MkCjLKIpOLhdHLCvYRBBKVnSLjK6LxNRJZMPEgsnxgV2Ish3d\nZ1InlKmwOvE4fIyVswyRo85fQbAySMjVgZ6XkHWdBfX1ZGIx7KpKtlSm3jCISQ5GinFagrXMKgqi\nv5aK+jpMVAILa7nppg8yNTVAqMnFrh37KZzoZsg3TEtzDe3tS5AtFgrlMrVnNRmuv341J08+idcb\nnE+FHoudQTJTtDYsBuZWmflUimAggKEo5HI5AIJeL/3j46TTaXyvCmG+GrgYUTTn49Ofhv/yX+DI\nEVi79uLXf6WSTqexC8K8IfIbfG432fFxNE17wwF30eLFPP797yMePky1JCECe3p7Ces6muRFszvR\nBYNcOcpYdIqQLJNMjiKLJVRZQ7J4kUURl6JQME1SeoqGyjqiKR3RBEMTEUQdTc9TiUoVrcQ5Q4Ug\nY5MgqecQDA3ZbkHWdUoOF0Grk2Q6hWb1ssTto1ExqLBXkMqN0dt9ipJai84siXSUhgo/ddXtzGYl\nNKWbqoIXWZAolnIMTU0wnM9gdfowpmeobW7mV/ffz6aPz2X2drlcLFiw4KJL3l+JhMfG+Mnz+zg+\nnCHoWUbQ6+bkSJjpmQTVgesQlBzlmR5Od59gaud+ggvrSUoOBPtiAoEaZDmIrnspl3VMU6dCHaBR\nKNBksRCwVxDREmQFAcFQGElME8o40SWJWVXBYqo0WG2MZ+KY5RwVpoFThLKgoBOkLNpxGAIl0Y4k\nKJRMC7NGCY/Dg7uxDavXQX1DA4VymdbFi/jSfV/l37/zHfJiiq7JCVR0VK2MRbbhRyCHRi6TIhi8\nvDMEX9Z/lf39/fT0DGKxyHR2LqPlVeI07xbTNHn22RfZvfsU4KNYVNi581c4nctxOCopFmdR1Uk2\nbdrKSy89Tz43ha9QxC7ImIJJPj/FAsOCXZLAMGj3eBjO5cjabJSqqhhXVbymSeOqVTQ2NXHyxAnG\n41FKzgwxdZR4JkON14OBCqiIsoN0OUGTIZDGhYwfmyYyoUWJzQrYRUAuYDVlqkSRWSNJzmbhhiXr\n6BqMkso4sHi9LKpvIO52c2rgBG5R5XSyjxkgK/lJZ8oYQp7qQI7K8gRSMQHhKV58chxPTQ2xmElz\n8030hzNYHC5GRpOUyt20r1jGjGFw69lt79bWVu6550aefvplVNWGYajU1bm54+6Pkj5+nFpAkiQM\nQUA3DEqGQe3Z7QLDMNDhsrXK3y2mOacv8hd/cfHrtljgz/8c/uEf5lRZrzGH1+ulZJpoun6OQZLJ\n53FWVLzpZGuxWMAw8JkmLkFABPKKwuxUEtXbjM93HYJgMnX6B/hy07S2LEVIwBq/nz2JaUazAkuN\nShStQElNUN0gkSmkyBXtaFoRq2ggiRZMJvHhIUuGGYo4TTsO3Ypm6qRJU7JYkG020ppOMZcmLQnk\n9CILlTKSrqEbJUplFUG1YpoKqpDA1HVS6TiSK0NNXSXZdIiIojKVmoSMhVhJxu3ZgKrqvNQzxebr\nNPIDA/zg7/6OG1asoARsd7n4wB130Nraelkkx7wUvPTSbvbuH6dvsgKLuJBMoUgis59iOYNdaEMW\nLETCB+mwB/D6O0kVEixwLmRPz17MVjeBQIh0OorX20wmk8HIj1EnGtiMPA7BCloGSyaBgoVpw4ti\nQERTsEkGstVGWVGIzUaxCRqdoozdYkUzNERd54ygkKSSpJhCcjiIqxYykkxWVVkoiWhjfcQrHPzP\nfx3GdHlZ94m7mJqaYmR4mGJvL4VCDvR44pQAACAASURBVJ8kMZMfwemspWyqRGbDZNQzzI5U8+D3\nvsfNH/7wvDDnu8E0TSKRCLOzs7jd7ouWrfiyNkZ+9KOduN116HqB/fufYtu2Dj7wgW0XVGdvby+7\ndp2mpWUTojj3AIeH00xNncJm81NdXUFz8034/SFqavpJJg9SLGTw2wJk1RxeTUMWSviddhK6Tjyb\nxVR1uuPjNK1bT+PyhQQkiUAoRHd/P0dOnKBzZTtdo7PYpBpsgsHkdD9lcwJDKDJSLFBrqhg40HGh\nYgAZgoCu6SiYpA0D05EjFPSwddlKln/iExiiyNHRH6OoZ1CKFmbiTpwOL+1tHhbWbuUXz+6g2bYE\nUQiQKImkc6M4ZnpxBFvw2GQq8grjE8fYdXCS1Rs+RCiUx924hGf3bceplBHDRaJeJ/fce+85wk9r\n1qymo6OdaDSK1WolFAqRSqV4sLubaDJJyO+ntq6Ok4OD+CsrqT4bTjgyPU3jsmWX3Jv7YnPqFFit\n8AaaUxfMvffOKbIODsLixe/NPa40XC4XyzZsoHvvXpY3NmKRZYrlMj0zM2z+5Cff9BhwZHiYVU1N\n1K9YQXR6GsMwSJ6J4J9IkZrtYnD2DJq9Ek9JRdeddI+exCaVGU0o1Msi/gYrtdVpxsOnKSglwtNB\nFE2jrIbQRZmSOoHFakFmFgs+Biiisoq8EMWPlwJ+VHGC8Vyc8VwOpygiSBJWj59K3cBrTKGgIygB\nPHI9skUnUshTxIIplTEIMRt3kDcLVHrtiDYno2ULsrQMSZaxih7KQoqgbxlPvXSANsoEHA6WNDSw\nv+c0+/b18+KeYVatXc6WLSu55Zatl33I58VC0zROnDjB/fc/gte7lIpgPWMjA4hUoputFIonwGoy\nFttHg1lEEQUSagRTmJuAl9S1cXBqGG9NBaI4Qyz2MoIQwEEYl0XFY9FY3FzFyMQZdLkKr1miWzPw\n4aBSqKQsZpiW3BSdfiYK06yw2EhrRSSLBUGQ8UkSvjJYvH6mE3lmNQUDkUK5zCKLjLWUQ9M1vKJB\nKl0k5vVQ2D7KsWN/S5U5S6XXS3VtNYVohha3m6lShCldBynGx29Yzd0rVzKTSPCL++/n0/fdR+js\n2PxOUBSFJ37+c2b6+vAIAiXTRKis5O4vfIHAG2izvF0ua2OkpWXdq6R2G9i58wCdnR0XFJVx6FA3\nfn/rvCECsGDBYuLxJG1tS6mrWzB/PRiUsNuKjOciSJkMZVGlQk9RYdPJWixMY2UkY2AT7FQEm/D7\nNzCZLDFWHEXpPc3E2ATLmxv52NpVmNJJzsRGMYmTyMexCjUogpuwMYZMAisaCgIGBQRC2JjGQCWE\nA1MroydVhnIZzujDGEt6qJFEvnzzFiLjE+zbe5TBrl+RtDmxiTp79kaRZR9L2hZQV9mEbpocPx7G\nlXajlcbxeRsYj09QLqSo1ksYA8fojp7hjF5Bw4JPoKpFZmb6yAje8+bBsNls51z3+/3c9fu/z3O/\n+hUD4+Oofj+Jxkb8lZUMRCIUAKm6mk9+9KPvut8uV35zRPNeucG43XPS8N/6Fnzve+/NPa5Ebv3Q\nh9gpSRw4cADZMDBsNq6/8863jKYxDINMPk8qk2d0Ypp8YpbkQD8eawVLQ40UYkX6wwfRdJWiYaJI\nJvaqRmZVBYeog2wBBOyeZYQzk/gdHTisdkxGsbk0MNOo+TOAwgQ5NBqQkUkKVajEcJgapukgKohU\nidDsdGKKIqOFHJOKgEeuQtVMBDRspLFgRzcL6EIew2xFEu2YuoaWzWN11XE6sY+C2UbQ2US5NMtU\nKoXbU2BJ8wqOHT/B0sUhZrNZ/vHHP2Fi1kVLfQdW3YbLtZwXXxzAarVw001bfit9dinJ5/M88MAj\nnDoVJRx2Y7XOkkgOYXV40PUWbJKdUnmEkjqDqruxWA1EqiiXixTVceLxKLUNjTgLEZLJIQShhGFE\n0PUydinHkvZWxJKTiako06kSmugki4scC8khMkUEUxWprGjAI9mZjIVZK4kscLmgXEYBSi4XFRQZ\nnemmFj9+zYJipoiTRETAanVRsNgYLsvUVjUhOpwsqGrgyOAwjmqBZYsXUDQMssUiw8kYUdVAdbi4\ncd1KFjc08PNdh6lwWvF5rBzet4/b77jjHT/Hfbt3k+ntZeOrTinGo1GeevRRvvDVr15QH13Wxsir\nVziSJCNJlYyMjF6QMVIoFLFYXlnpZ7NZYrE4ExOjPP10gvXrt7JwYSsTE730H32elWaeoWor01Pj\nVAIFWWNIl5kq+jGFSmTZx4yYx64JZPIqiTMaIyOT1Nd3kCvYiScreObgEKsWVTOd7CeRnUWjllK5\nhNe0kKOZGCJBSoCOieWsLK5ADgsFwEkAm1BCsBpEyhYeeegpbm+r42SyRD5foFTO4TatTEVjhGxB\n/KoLpaAydOg5Bvx1hKoWY5MkKmw+bFYoiils5TgdbgfDmTKJxBSUBSo8DuLRfmRdQdKLZDJ2Dh48\nyq23bn3L59rQ0MCX7ruPdDqNJEk4nU7GxsZIpVJUVFTQ2tr6nqREv9Rs3/7eHNG8mq9/fW7n5Rvf\ngLq69/ZeVwqyLPOB227jxm3bKBQK5+TzeDN0w+DJwyNYShXkMzLZrIC1ZMORzyCpcTyGTJVpY6I8\nQ60YoNFbRb6g4G9qYjo1w+nBKNZRDV/VMtzVJqJUh6JqKGINHr9ENuNEJU/AGserlAihYDDJjCER\noRkoYTXCLEGk0tDIFjTyBjgFHxWym6QhIqt2RMGgZKbJkyJLAAQwzQJxNU5AgArBRiajooserJYS\nZWZQ5CyVNpFGf4BCIY+ha+waHWVxMEgiY9LubSQTjxPRdTRNp6FhBbt3H+WGGzZdle/mq9mx42Wm\np200N19HONxNINDCwMAEgpCistLD7OwUiBqKUkISFzClzGJXEggIIHjoPdnDiZEB8vU+fL520uky\nNpuJJJVQtQFeODZEi92HC4G8IZI1ZBJCEMQGTMFFUW/E5BCFgoTDBjZRJiNJOOx2VFnGabVSKpcZ\nz+VosVVgKllMM0clOpUYRHQYyeWosbloEiA1NcoZmwVf7QpERUNRXYi6TtDlQli0gMV+PzP5PA6H\nA0Vx0D9uw+2oIRIv0j02zKnUk7QsXkxDQ8PrEgjCnNFeLpex2+3z87BpmpzYt4+1rxmEmkIh9obD\nxGKx86ZPeLtc1sbIazFN44IjMTo6FvLCC2O43T6KxSK7dx9B1z0sWtRCdbWP7u6XGB/fRVO1nUY1\nS2swyKr6eqILGtg7MMBMsch02ka9pQpZspNXRLJSiHzBwDYVpVgUkFWB0uwpymWNrtNnWLmsjUd2\n9rC0aTUzgUnUeABF0Mkpkyi6j2lMZCZwUsREJss4ZVw4kRjBxCtqiKZKSdVQNSf5eJGu+FFsliok\nuYJ0vgi6gls0mJLtKIoNxZjFqUehFKEw20tRtNBicdEYsiFkMnRarZTLKnZBpEFR6DPiCJqOv5zC\nY6/E4YLiSDfPbs+/LWME5ozHVzunLly48IL66nInk5lzLt12YSeHb0llJXz+8/Dtb89F11zjFWw2\nG7IsEw6HKZVK1NTUvOF2sa7r7Np1lLqFN3Fi9zFC7ioKmQI5qZUUgwSTJQTBjaDYyJoy6ApyOkXA\n4+ZMbJKEpwI0D26PhQqryvjkJHUt7dQ2NrNr10HKZRNFMbGj4cOBEwGNIlb81FKmxBls+BBIEURH\nM0FWVQwqUAUPFgQGdA8OcrhMgTIOMtixImAYBZzkcKIScHqR/H6KhkCF1Y6VBKrSjd8qIEoWxlI2\n0iPTOANWAhosr6lhNBFHKSsY+SLlconp6Wmqqqool01KpdJVd3z6akzT5MiRPmprNyGKMpCnp+cI\nIFAuF9D1FLqexeVyUBS9mIZMUqkAZglhx2v1kDIzTGRzWKY9OBwNlEppHA43+fwU+bxMxvCRyRcI\nSHYyCGTwoZo1YEYBKwJ2wIkpVJHMjrJEsjOum1T6fAREkVQuR388TlmQcIoeBCFJi+ihqGfJAnk0\nWg0DR7GATZSoFi2YmslA70EM7wJGJ8KECnnaq6tp9vmYSaU4rSgUTQtOsZ4q39wCvqwWiZ6JkJlM\n8tg//AMD0STBRW3c+YmPsX79WpxOJ4cOHuTwjh3kUynSpRKNixaxccsW2pYsQVUUrOfxybKIIsrZ\n9ArvlsvaGDEMY/48U1XLQJzFixe943pSqRS7d+9haGgSm81CsRhmbMwklVLIZstYLGmWL19KR8d6\nNE3jxRd/xoHnH6etWCSZyTBjtSJ6vXxh40YeONVDSQjgDjQyFIljCDaCngBZtUQqNY2cmCCo5MgV\nZASxmmSpxJ7uIRQ1QTrbxVRKQzcdSKaAIFgQ8KIQZIwpKsmjUKZIDV485NERKSAZYBcErIZGLlGk\nrBuoehmHmqTALG5MdBSihgtLsQqraANitJo6IV1AlOyMKRn6S3HSmpVlpsmwpqNbXAQ8NWRzaazF\nFIam4nb6aKzxU1dbQzwT48j+3fzd3/0zVquF665bxsaNG963jm+v5YUXYNOm345k+5//OaxeDf/5\nP8PbyDz+vmFmZoYHH3ycZFJAFO1oWpzGRhfZ+CzDw2dwB0Js2bKerVs3UywWyedFauubGWvWUE2D\nQjFLUKoimZxFKSaIlX6TTFyjRBVjap7BdAq/exHN7lbK6jSz6Qym0YxVaqLv1F4KpSyaZmIRgogY\ngJtJEliRsQAyUZyI+MngxYFKEh0NOzJFVDRUVFMkrVnRWUEeG2mmMZnBQpoq4pQoIqLjwCCejmCW\nnYgWF00OsJomdjWGQ/CR1wyKchZPQyN3/e7XOPnggyTTaeLxKQTDiSZYsTv8HNx7DJtNprraisPh\nwDTN/5+9Nw+y7KrvPD/n7vftL9/L5WVl5VL7IqlKCxLakAQ0IIwYSfbYGsB2Y4+XgY7ocEdHtMPd\nMXbP9MREdxDhCAfRBgLcQLC2EIuwbCS0IYykKlWpqlQqVVVWZVbu29u3u9975o9MZDCysSRQCTPf\nvzJu5n3nRp777v2d7/l+vz86nQ6apv3CFSZhGLK+vo6u6wwNDf3EglVK+fK75MiRo5x78QL9nkoU\nCVyvQav5LOXyTpKkjGmaOI5OElm0ZIWm7GCEVfJpi0p6kvlWxPp6Hd836PeXiWONJJkCcnQp4yTT\naPgYZBCsEVEkRAdWEdTo9F1SLGFJl4Ew5IULF5CmiQHErgdIOuEaFpKaUHGJ6RPRB7KAToyT9OjJ\nFFmZp9qts9KV+OEcYcPi6KUNSlmVPRPjTG3bxpG5JrvzOlIm1Bo1zpx6hGIYo5lQvVBj/+gups+t\n8rWvHeX06Qsc2DfOhSeeYDKV4uz0NPl+nzMnT7Jx6hRD+/dTHhtjuVpl+4/oTfqeR7ilH3w9eFMX\nI/Pzz6DrQ0gZI2WVu+666cdCaH4apJQ88eijfOovPkWrYWNaJTLlMqXhHOn0It3uOqVSjoMHr2Zk\nZBIpJSdOnObUsUtMhjoVTWA5McXEpyu6zNRq+K6DZQ6T6MNsG93GUrVHw5F43gYyqTHlh6z6DkX7\nGlQUVClZaDXwsHHDAlFUJ47Vra0YH4mKxhpDKGQYoksRiywxfSwUXPI0mKMse1SDHAECl5CL5MnT\nZxKHLAZnkHTIkJU1/LjPTnxGsHHpEURtBmTADmLqrsdGIkkJFZOQTrdO3tDRvBgvClAMlcJAloSY\n5y+dJ7S24ftjZLNlHntslunpOX73dz/4L84V81rw87L0vhLGx+F979tsoPfHf/zGjPlmRxzHfOEL\n3yCKxpmYGCFJYk4c+VuO3v9lxvJZhoamqK7M8WA15vz5eX7t194NJKiqhm3bDAxsI5UdYO38c7T6\nXZI4R4pBNAzAw6VLhhEEEX1Hw1A1HLdBEgxTr/Xp9Dv0nC4Rawh2E8s0EhMJROxEMk8GiwSXKkvs\noItKF4hwEMSoeFgEhPRZo8EuVFJoQkfIARIukmIFg4AcfTr4OOhkpCTjOgh/kWpPsFvPMDW8jWwx\nz2qnQWp0kF1X7sdKpdhzzTVUL86SsXXWQ4/BgSlCVLxUgWeffYSPfeyjzM/P881vfpdGw0PKmAMH\nxrnrrnf9WI+bNyteOHWKJx98ECMICJOEdKXC+++7j/KPNAFUFIWDB6f48pe/xbN/d4GiNoJlCDyl\nhaXpJDIijpbR9EmEWCOKmiTJdjS1jJQuqJfIWBInUIljj42NZZIkhRAJUlaAzdZqEgtFGujo+Cwg\nmEDDJkElZhxJhpiXSFCZj3VMJEUZYYY9ImAdFUGeDVRGMLgkO0giykCOzayoGJBE9KVP4nk0hYcr\nHQrmDght3MDjpX6LeW+Rf713H5VRg0DP8sz589RXZ0l3Wuh6hna3xUqgYuoDbC/maQcRy8sJiy/c\nz92HD3H8Bz9gUFHIDQ9T8X3Ot9sM9Hr0Mxnm4xh/ZYWhfJ6O4zDX63H7b/zG634nXM5GeRXgIWA/\nkJZS/kT7xI985B4uXpxF1zX27n3Xq96POvH88zz25a9gBgNcO7UfiWS91cLp5EinLd7+9nHm500q\nlc1QoFqtzvnzi+iJTzGbhygk8PpIN6acEjw6Pc1Cz6flr5L3doCSQlUzeH6CGznEnXliXaBrU3hx\nQOC3sJMYXbok7AO/j43A4yQxZWJ6QAeDDmVS1AlJUcKlTUhIgxyCDAk+q/j0aKJgkecwIV161LnI\nKoIODcZRmKKHAZzEANok9ESKDJKchICYAIGuGCgoiDCgS0wmM8iy0IhFFrfqcP7732fbkIk0Roko\ncOTIixSLeXbtmmB+fp0LFy5w4MCB1zX/nucBvKl97/8Uftil99//+zduzI9+FD7wAfgP/+H/j4iH\nzQ6q9XpCKqUwMzNLt7OOO3OGYXUESwgsYZJpdTi//Cx+fBNXX32Jctmk05Eoiku9vkJt5RKza+dw\nXAdbjhPiA2CToodkjRmKsky767HRCchqKbpRl5XmCjKsIKgAbQQxEguDLDEhKjkkra1vnIXFEA4e\nY0CIZIbMVtmi0EOljoZPE4ULRNIgoY6gj4qHSkyL7QyQR0dQp0+DZW5MPM4IldjvsrG+jB/00G3B\n5GQF1XVZWlzi1PQsYnGdqw/cRC8OeWllhjXHY3LyGoaHp5Ay4a/+6lvk8wfYvr1EkiScPz9Dq/U1\n/vAPf/tN7bSZn5/na5/8JNtsm3I+z9jgIGfn5/mL//pf+Z2PfpSJiYmXWZJsNsXMzHOIxCYhpNdv\nEycrFK0sjoypV19kcMSkWCzQbDrEuCTxOopIIDJZabhoRkwqHeC6HcLQAWykXANWgSwJ80gsEnaS\nUAOyhHgIxhBEQBGFARRGiVjnHH2KsssQHTwkg+QoYXEaSURIloRhdEYI2dwA0YnRERgkuIRynVgY\nDOi7iGKNtaiPZRbRtTzV1iW+8tjzXHPrlRiWQiGfx+umqYRpUghkolFWDRpLy2SVbUR5F8PIUa02\niMMQv91+uXNxxjQJGw0qAwMcW17mf/vIRzh94gRzs7Pkd+7k7htvZGJi4nXP5+VkRhrA24Fv/GN/\nsH379ld0c/xzceypp9ASlbS1WcQIBMOFAudXVkjlJsnlcsTxHL1emUymwMmTp5mZuYDpraDagyy5\n85Q0Hen5vLSwyNHAIj30FsRGjfXmSSQVitkS2ZSPaRdwehX6yTIdv05eQhpwEYTEgIuKi0aBDAU8\nNohYI6GHwiABMT4BFh6CAA0bkxwJCi4KGSwctiOI0dAxEWRI41HBwSJFBUGRHhKFEZrMkJBGlzEp\nAkKgQRqkoCkCbFXDTgROAsuJQBm4mnSQwrYUNnpNTq06SLHBwNAuthX3Eschx47NMDiYcOnS4msu\nRprNJg899Cjnzi0iJezdO8Z73/v2H1vJ/CLg523pfSW85S1gWfDUU3DbbW/cuG8GLC8v88yTT7I6\nP09xcJDrbr0Vz/M4efI84CGETW35OUa6G6TUIvXaKqobkzFNRuKY2dMn+MTHz/F//l9/wt/+7Q9Q\nxQoXT53Faa6Slz2EGKCVQB7BNhSyKARY1HBRkrPYehoUwXJdxQktkjCNTFZRyGNRICAFCCBBUqRP\nhE5MiIJPhI6NRxoFl2Vs8kwRE1FHAywkgxhcwmcGQRaTUUJapMjQJkuKIUJCTCRjFJhGUuUStlAR\nhkWsqZxr1JFpC3mpzdnFk0Tj67z1rXfyxLn/wcWXzjI6UqY8NcH1h25j27ZdLCwc4/Tpc+j6GLnc\nJuOsKArbtu1mfv4oc3Nz7Nix45+YlcsH13X5b//vn7N+ts1SyiZOlllrPEUpN0zbV6n2Ps911+3k\nvvvuJpVKcerURa6++kaebTxN0p8jYwrcSGO9H5BNl0iZu+j1GiwuzqKqN6GINIkMSGQI6ARxF9wG\naQVgGU3bThjWEEJlM9F+YyueYQSdGgKBRCEkjUoEaAg8FFKkSaFRRWUIlSJLzDNGBwMD0CgQoGCT\nQ5CQIKlikGcFSBMRo9EgR48OfqIgkbihj6bswAttlCjBCWPOLy2Smg7RtO9Tn9/AbflEQZWdpqRg\nDWDqOiJJWFhdYv8N7wASpGUQRtGPrXbcMEQ1TUxNQwKlUol3vfe9P/M5vWxlr5TSl1K2fp5jdBoN\nBjJp4iQEwPE9jp4+zYsnT/P4Xz/Iw9/6BnfccRVxPM3Jk3/D2bN/RzYLmeIQrppCLR5kXUsza6aY\njiQDqTK91hpOVEDKcZABze5phKqzfeJKLMtgLUgoyjZZJAOoDAAWkNAiTRETGwObFFMYWOTxkDSp\nEhGQsE4LjwSJjY5AwyOFg42KRQHwcOiRIIlQiTARmBRISOgBkh7bWSaFB0T4BFLSwsQhj8UI2WSA\nbhyxrOisKimS1Dhj+QM0ww381jST0QZDTpegPYNh+GiajmmmKJUmmJtb5LUuljzP49Of/gqzs4Kx\nsVsZH38b8/M6n/nMV+n3+z+bSX+D8PO29L4ShIDf/V34zGfeuDHfDFhYWOD+v/xL9Lk5rs3nKTeb\nPPLZz/Kdh75Dp9OlUBijWBwmky0DNrXaCpqUDGQyOLHP2eos0epLBC8c539+/OOMDVlMDLQ4NNxl\n30CWayYOU7Cy5ElQ8UkRkuDRwSNHzA2K5KCQtC7O0ez2cL1xkmSChCli2gh8dHwkHWJ8JH1CwCOm\niU0HnS59miScJsMyGhsIHDRM8phbT4gEG4MJdFJErKNRxMBGZRsKeQRpPMAHDNKskSaWGudihZNe\njpVkDyvBHh578RILrT7+Wp0zzz/Fze/5IGLsGmr2bq69/T7GxnbT67XIZCJ8X5LJvJIIKU273X4j\np/lV4fHHn2JxHrYPXUmltB0vUKm1x2j2BhnOTVAqXcn8PDz00CMkSYLj+Bw48BakEoKAti+pOVmC\nuEy9t4Kdy1Mq7ccwIIoaIDZDKSU+MT6go6p5PDePSEok0TlULiBYI0udPCtYJMQ0UABd0QlwEWjE\nxICDwAUiBJvb9BJBDoM8aXKARpc2fSJCfCJUVBIkdTRSZBmhSJcU57C5RIkeNhYBA8EcWdqEyTJq\n4KAHARYmOW2YXqeDre2Cfo2r8nX2DqgIEdDxanSCPrXYZV0xGByeII5r3Hrnu7nUaKCm03QchzCO\nmW632bdnD4u1GpP79//cmiy+eTm4nwFGp6YoZC3CeAM38Hjyuedpr7QoiDRFNURcqvHApz7Nhz50\nNwcPVrjxxpvIygbDMsGrTbNWPcu59QXsxirlUMHvOUTdAiQ2UpQR6l4k+1irznP6xafoORZOUiIk\npM0CVdGjS4BFF6iTIsEgQSEhpkqahAiLDBNEjGAxjEKPFgEuDj4tYuapYJJC4tBGpYfBMh49ekh8\nfDQSUgRY1AnYQCWgxgBnSTgPnEGwgIVBDp+IEEE/UenLgEAmbHQ6zNZPMKn1uTY/yPbCEPvzw+wz\nVPqrR+j3N2vGMHSBDqXSa1NQnj17jnbbYmRkCkVREEIwPDxOt5vmzJmXfmbz/kbgjdSL/Cg++EF4\n8EHY2uX6pcD3vvMddmcyjA0OYuo6g4UCV4+O8uyjT3L48GEajRdwnDpmdoQWPlG0hq3pNL0eT86d\nI+UaDFOirJbonbnA8QcfZO6FF9E9g4KVJ3IcYq+OQYIFeIQ0CYnoMImDJQVJr8+obzNCB0ELgxgT\nA31L36VxCYUFoEPCKirnMBgAXLK0iHBxyNNkjIAxqkgW6aOj4uKh0SBhGZ82CktoeKQZwEElISFG\nINGI0OiwSWkL1aRu5KjL7ayLCm1tiF6kEoQjKDLP9kRDXDpHc+UiO3cO4vtNLlx4gYWF0zjOS3zg\nA3cxOVmh2228wn+9/6Zt25AkCUePnmFq1yG6rkucxKzUGgzm99Dpx7R8n0wmw+joHl54YQ7P85iY\nGEEIwchkhcXuCvWehR+mcaMEJ8ixutai0eihaRXieBUpe6hqHkX5oe21hYzTqMl+LK7AlDdhMkZZ\nrrKLLsM4FOgzKFw6xARJhog1IhbYfPZvoDCDQpYedRJAByIkMR424BCwhKBOmRoFZjY3lAhQqJKw\ngUcV8BjEQmVsq1wV+JQRbKeGFC36eKCEFI0BgsYCmSShlNtFgMJ1g4PsHhnBNyWngjXWM2nSI2N8\n/4nP4VTP4HW7MDFBb3SUJxsNvrO8jLVtG4mmUbVt7njPe35u8/qmFrD+2Z/92cs/33777dx+++2v\n6vyb3/EOvj4zw8HJFN858jidlkNeS2OkQt66eyeVYplz8ye4/0tfotmLaS2e423bKzQ2OgR6hmZ9\nhrzfQx8ZwGt4tP0cNlNI6dCnQZLkkTKEqE5WmSQhC8Q45FDFLBkuUcAgS0AXDZdpYiwgxEKQRcdl\nmGHyhEjq9IAUFh0iVlAoMUoaiUaVBkPUGd1iS1aIcLAI6aNgUkelg4XO0JY8rkef3BbFa1KhSYoa\neQJcInpC0MHETu3ANip0G3N4ccCiW2W4XEHIkLFsijBcZXHxKYaGptB1l4MHJxkZGXlN87m6uoFh\n5H7iuG0XWV7eeE2feTnwQ0vv3Ah10gAAIABJREFUHXe88WMPDcHhw/DII/D+97/x47/RiKKIjYUF\n9v+D7VpT1zHDiLGxXZRKI8zOXkRRXPR9u2krdVreKkcWVpCBIJ8ukbVNuv2YpXMb6BdmmPMdMsYg\nbi/EUgcoa2VW42W6qAhMAjqM0yKHSV8GpFUdpMouIvqsEmGjAQYSECR4ZLlEnWUEeRRiJNNkiQkI\nMRhB4yABFgkBCSYJK6xxlhIqwwRbT4B1FBQiQroYOGxHYQUFC4GCD3hIIpooho2hj2Cp2wn1DEES\nkQ99UmqRAJNGv4ctA8K1WfbdeQeZDNxyywh79+6iUqmgqipXX30lx449QKuVpVAYJEliVlYusH27\n/TPRAbwabOpVznPy5FkADh3ax969e38iAyVJEuI4YWJqB0cXl5CtBrHcZBq6rs/U4DjFYhEhBEJo\neJ7Hu9/9Nj7+8a+wtlYlFuMk5JCoKKKMpqtE0Vnq9TWy2TK6rhHHdZKkipRtQAUUNAZRSQijiESA\nIjV0UigkFI1h+mEfRRbREVvzHJFiBo1lDDRyWPSYRUGw+ZxPWKTNDrokwDLDeGSxEfgImpSJ6JLF\nI0eLDCo5TCJaxHhIHIbZFLU2WCEggyvnCNSrsBim6sxRVl1UGWDkyswuCf6q1WJYl1RMkz1X7mFB\nVWltnKaSFNk9NMFAtcpSEHDHvffym//237IwP4/TbjM4OsrBK674uTop3yzFyCuS3T9ajLwWTExM\ncO/v/z7ff+QRopMvsT3TYc9khanRCWzDpNtrUl9r8uADj5MZmqR25jgHD72VkQO7aDSq9BunGdcE\nDdelEdkImUYRBqZUcQUksoOgh6WmsfUsgV8lS8ggKUK1RC5Zx8OnLwUBLhKbLComGSw8mtTRyRMD\nIQkm4GGRQqNMjy51VogJkZSoUqGPhQr0GUIwS50uMTXS9GmjkCFkHckMBbqUUAm3gql1wEbDRyDI\nMEGHOUyylQP0ejFxAorYhiMbVLsOumhRyWiMprJkD06wZ89hoiikUGi+5r4Gg4MDBMHKTxz3vA7D\nw6/esn258PDDcPPNP9suva8Gv/qr8MADvxzFiKqq6JaFFwTYpvnycUVRyJbzdDo1JicPsm3bZqZN\nt9tkcfcgzbkXyU13KLoe6dAgim36fhM7CDFlhEgCyuEaUiq4gU9dGgQEuASkEBSIGUMjQKcFpFTo\nBg5gUCFkgzo+GSK6aLRJCMlSYJwebdroxAyhsB2TGQKWMNBJo6ESoQMJMQFFqpTJo+NQxkAQcFFA\nRmqkCVhE4lEgZB7IEuMznHXxgg4rURldOmhqQiBdYreDrheRmPhBFVIauj6IW69Tr68wOprjzjvf\nxeMPP8yTDzyAIQSJZXH9dVdxaX6FxcXzQMyhQ7u48853vKHiVSklX//6tzl+fJlsdjtCCE6depLD\nh8/y679+D4qisL6+zlNPPcvs7DIrK0t0uzZvufVWps+exZk/R6ezTrFS5K233IIQAsfpkskoFAoF\nSqUSV101zhe/2EHTykQMADFCZIjjBNBQlBjf30BVHZIkg5SSTWmjDWhohChEeDJCEpHCAnLErGAz\nwIBoU5NLqOTxcNGosR2TFAYBfbJsoBCwikmEhSRgnC4aMacxCYkZo0kWhS6SNRQCUhToYxPQpYxB\nhlECNqiyFxVD0YgUi0LcY024bJDGUrIkSodSIU2QGEwvzRHGguuveT++p7CytsTx3iV2qCaRZ3Dz\nxE4KmQL1WpNadZobbjzEie99j7f+8R+/oR2fL6ebRgO+AxwCHhZC/ImU8uhPO8/3fZaXlxFCMDY2\n9lPtRBMTE0z83u/RcGOOfv0xDoxtKg6D0Gd2dgY/yTKy/TCTu6/g2dOnOfrcs2SzJdzmIqLTwCYh\ndEJ0mUcnIJQtImyk3FRxCKqosgfuaQqJg0ClQxo9hiIaJ4SkKsZIkgwORTyamGwgiFCoYhLRQMfB\nJkTHJ8Chh4rDBDohKwRIRojIoWMhSROiIlCQVAGBwSoOKueIcRklS4XdGEpAI1mnyAZ5BH0lg60o\nyLhHlwgDk2ZzgyDYgbSHcb1lNBJ6UZ3hgkkvdllREvYrMd/73gNEUY+77347Gxsbr4kdOXBgP48+\neoRabZlyeRsAjcYahtHkyiuveNWfd7nwzW/CPfdcvvHvuQf+9E8hDDeb6f1LhhCCw7fcwtmHH+bw\nxMTLL8iLKyu89Z13sNHpsLR0jkymhOO0ieNVPvKR32JxYYH/508/yVrzElpkousCVbYpGVnm/A7b\ndYuKTIgDybrw6CcRLYqEDFNDo8UiHhuMCJNAscmHXSJaLGFiYxET4SCxaZEG2qTw6BKi4hIyicIk\nFllMhvHoAB0uEW8tLjTKpGhSJMSgikmChopKipJ0qVHHwkenSYpRBCY+M2iKQ8kqI3KHGBjZx9za\nMkv1dUxjB5FqE6GiKxqK6BF0A6zKODPNRfyXHuOuu97Np//7f2eo3+eWsTEURcHxPJ7/wQ94z4c/\nzOjoKLquXxaH29zcHMePLzE5ecPLDpiBgRFOnTrCddfNkkql+OQn70dVxxgYOMS2bYM8+eRf0+02\nOHDgBqx0wvHjP+Da696OlDG12grd7gwf/OC/QlVVpJScPHmW4eER+v0ucbyHOI4IwxZxLIE14rhG\nkuSx7d3EsQMsAwkggVUcYjSKSAqoRMRIoIdBhSj2MBOooLNElR46FgYWRVyWkHToEJEiIoNPmx4F\nYIyEBKgD40jEloIoDRQJOY/LKDo1FExagEMDnywhhqaQiATd0LD1As1um4gmffl91EhidIdI1Awb\nrRe5YtdBRkvbEUJhdGiIsxsjNJ1lJrI5BrKbrplctkSzFbO2soZayNFsNl8xO6Rer/PMU08xd/Ys\nVibDNTffzOGrr37dxetlK0aklBHwzldzztmzZ7n//u8SBBZCgGl6/MZv3Mnuf0YHsXe/+w4ef+gJ\nFpvrbMsP0m7X6biSmgY3XvVWmo0Wq50Oo25As/sCRhyTN9KsBgERKnkp8VWdZjKLK0sICijUEZyn\nmCRsI00RHY+ARaq0ZZdFfAJlByhlokRHYCPxcDc3SDDZiccG4KChEWNjUCYgR50cPgsMkaVLnSwh\nKWI0ElRAQ6IhidlMK1EYYJP7iJG0CYnwkx5FIchIFQ2XiSRBS0BoWSKR5ywJq22HJFlCEdtYEk2s\nYA41yrC4ruEoKpkxm2PHjmOauykU9vPVr07z2GN/wn/5L3/ElVde+armPJ1O8zu/82t885sPs7Aw\nCwhGR/Pcc8//+guRaQAQBJt6kY997PJdw9gYTEzA0aObDM2/dNx0yy10Gg1+8Pzz5ITASRIKU1Pc\nd999JEnC88+fZH5+jcHBItdddwdDQ0Ps3LmTp595kW9+3aDVrjFq2USupBV0qZPwFruMTR837rDg\nezSYRGWEGIvN9pT7WcYmlEtcFUNKKAgk4HOBNh6TmGjEKPRZYAIXiywZDFpUcbGo46MjsbHosoFD\nHpgkJiFhBpsaZXyGUeiTkAF8+iioqFtkv0oLF4cCHtemMihxyHKrS095CU332Ta6m9BwqNdbCF2n\n7ldJh3W22QbDxSEcsU5LSTFh7uFbXz3ByuIZrpg0OV8+TxAEjAwPMzoywnNPPcWHfu/3Ltscnz8/\ng2UN/1hgmRAC2x7h7NmLNJttDGOSwcFNVnb79h3cdddv8uKLf006vczb3z7Fb/3WW7lwYZ6FhdOM\njQ1w663/C1NTU7RaLb74xa9z7NgCvl8iis7hu49gKHsRUcimNbeOpo2gaddhWR5SQhwfII4vsikZ\n3gMIIiJUVtDIolAjxiGmgEwCElwc6kRkUFBwyeJykQnApkxETI0ma/hkyLJGjw4h9hZLFiBJAxFg\nAiYaGh5NYnagoGHSR2MAnyVgIQqpGDZxnLAe+5xNBgiUXSSygkwaNJx5hkwYICasz/PiwgVy2TKR\nrrPzyqs5fryBG3o0O3VymQKqopJO51lbW8fMZ19xS6bRaPDFv/xLRuKYq0slvCDgyP33s7G6ynve\n977XdQ+8WbZpfipqtRpf+tLDlMtXY9sZAPr9Dl/4wt/wR3/02z9VbLVnzx7+9f/xIb782Qd4Yfp5\n6hurdMIUt7z71wlDwUvPPkc2NYGnLJHva2RVhYaqUtdzCC2hG4d04zVCxjAMCINFFBYZYZ0p0oTo\nhKjowCgeSwQ0GSASo7hREYmDRgOTFBpj2Myj0UPFpEOdhACNXVgEGOQIGaCOgkuMwiBpzmASUtq6\nYWMk60ADHbEVmRZiopECctSYpkyPKT3PbORhSp+SKvAjBSdWQFVJywQlhkB2MaVPDw+fYdJJAcMQ\npI0M9dU+woLJqVGKxQlGRvaytnacT3ziC/z5n//fr1pZPTw8zB/8wW/Ram2KYt+sIrl/DE8+Cfv2\nQaVyea/jXe/a1I38MhQjmqbxvnvvpX7bbTQaDWzbJpPJYJomuq5z++1v+4lzVFXlN3/zbp577gWq\n1h4u9aq08SjqGgWzQuK0yKY1fMOgEQySF1fRR0MqaRRAiBmiUCVLQpeEQMZYKOjY7MJkmj4lioRY\npMmQJSYhhYLK4NYatk0RnSYuFgkVFKaIyCBQ0EkR0KZPjQIxEaAQYgN9LIxNHpYAmyySq8ghvB49\nJWa3qlEYKNNWY/xonfPdBrncDmCZVsvFDRWWfZeVahs1n6NcvpphTIZzZapumqPHLpHbafCO/fvZ\nWFvj2NISxTB8Yyf1H2BTp/GT15AkEYahMz29wNjY7T/2u0KhyMTEft73vne+HAFx7bXX/sRn3H//\nt6nXcxw+/E6OHDmPoXaIlRph9AIJNiohCQlJYmIYEb2eSxxbJEkM5BCkMcRuItklZpF4i6XKYdJm\nlBAPVa5hEGChsxOFDVwarJPBx6AACFQibMDEIMJnFJ0+GnOE5AhI0SKiiLvlu7HxUYnIAGkEPhIh\nfEoS+oAQOn4SIG2TS2GBWNmBnT+A31exRIU4KdGVJ9lh5MkGEk822LHnWixLp1bboLv2Erl8gefO\nHKFoWAyN76VQHKIvHG686qpXXCAeffpphsOQnds2mW3LMLg2leIHzzzDW2688VWFkv5D/MIUI6dP\nn0GIoZcLEYB0OkejUeKll85y0003vnw8jmMWFhbo9/sMDg4yPLxZcd999114nsdDDz1NfnI/S0se\n8/NNLk3/LUNSRSvsou4K1oI63cBlPQrR7VEGbQ836ULfxs6quN4aOWqUqZIhpoTDOi4hGjlUBkjo\nYbKGTifSSBgA0lgsAJKEdYqsUsYmRmWOiC4GJezNltEoKKiE2IQ0GKJAC5M00RbhB2tAD1BRyNHG\np0+TbUQopIkBA5sEP+6gJx4h0E7AFBqJktAXEEQGgjqCnQQso1NnmClMIvQQPOGiJ0MEUZ0gkCwv\nr7FjxwSmWeHMmaf57ne/y6233kou95Oi1J+GX7Qi5If4xjfgNTS7/JnjXe+C//Sf4D//58t9JW8c\nSqUSs7Nz3H//w7hugqYl3HLLYW6//dZXbPQ2OTnJf/yPf8Bf/MX/JJ+/idbGKGLuIotzM7SjLuNK\nicUEbHOEOJQgE2w9RhE2np/C5DwV/C0qXbJOQEKWmM30TIsMCS45bCQRNjoeMRoGKVwcAgISlpAI\nRpEEQAeBxCDGoMw6iywRkGFzU6AL9AgJCOkQUSCLRp8TtChIOCRUBiwbM2XS7KyzXcAFZ4N1/wKh\nAmDhKRX6sQveKVJC5+C4wDJ0Tp+bxe90GFLznLg4Td/1GEkVaPkO3cuc83Pw4D4ef/wUYTiJrm8u\ncKIoJAjWueKKWzhx4ixB4GJZPy7UktL/J7eVNjY2mJ9vMTg4xcmTT7O4OIsSx2iKRkgbgY6tDyLI\n4yQJrtMkTiySpIMQI8AykhyRTBCkERQAF50KaQIkOh3y2NSpMIpGhKBNmYiQDpI866Qw6QEhWQTb\ngT4BCRIPHZ0UPjFrBERUyWyZXOvEaGwyJR0SImIUGeMRkgbmpY8WCbxeREcrIopjIAUQIxAoSh4/\nCkhlFBwvpnr6e+zIpEiSiAvnjlCWAZmuTdcYZtlpcun0c4S5FPf94W/znrs235VRFJHJ/P07d/78\nefb/g4JDVRQKQrC+vv7LUYy0Wj1M8ycVg7pu0+n8fUZFs9nka5//PNHGBrYQdKRk/NAhfuWee9jY\n2OCFF1a57rp7mZ4+yfnzT1CrObRqC3jlNLq0GSpKEm+IXsMhj4orJvA0hcHRKo2lsxQyWTruAjfq\nESWh8myosSZ1dArECBw6hIR00Oig4OEhMDDoYhGgEpKhxl5UYlw8bLYBl9jAZxx1q0CJt9otbRJ1\nHikiEhSWUEiRELFJHDZRAZNBJLMssIC25QGQOEQQB5gE9NAIpEpbKrRjBUERnxBJHpUWGiEqFhqC\nPDGqqmOpCp04xg8TNE2h03FYWVlhaWke6PA3f/MSR45Mc++9b+fw4UNv0J1w+ZAk8K1vbbIjlxs3\n37wZvNZs/vL0qjlx4iRf+9rTbNt2iHI5TRj6PProiwRByJ13/qtXPOf666/n938/4NFHnyOX28lM\n0ED3lmjVbM4pAbJYYptS4uJGSFFLEWsaQeTiJy5ZuphbRkohBLFMqOHTwkLF4ocui2jrGwuCNg4e\nOiEdMgTkUVCRREgkaVRMFLoYRAgggwmoVIlY20raXEbFxGSMCkVa5NAAySnZYT1UCftQ1jQG0jkG\n0jnGzBZV/xKOsxehTaFE56nIJQZUkH6P/uJJnq0vMqSWGbNtQq9FKo4Iag7LhQE0pcDC9ConTpxk\ncLDMkaeeYn1xkdLICNe/7W1viIhxZGSEX/mV63nooSPAAJuehjo33bSHpaVlcjmdM2eeZv/+zbQ/\nwzBYXr5IJiPxfZ84jl+xIO31emxsNPnGNz5PoxGiaYOoWhoZKhhajlJmAkNYJH4D310ilkOoQiBF\nhJQe4AAZYgRs5YboBCS0cdAIiLCpkkGi4QE6CQYGCWnULV9MFpM0Bi5ldDqEZNjMbA2QrKIyQY4O\nDvsxMPBJCOki2UDQRWIAXWJyJIyyyYyMAlkkvTigk4TktQKDhRzLvUU0xSCM+tgiYo9usdpbpRsE\nJJ0VdEPh1uECS/UWNWAkO0qYHqLuVglHK+y96moe/va3mT19GkVK8pUK77jrLsbHx0nncvQbDdL/\noAD0pXzdTptfmGJk587tHDt2FPhxJ4fn1ZmcPAhsKrK/9dWvUu52Gd+ypUkpOXniBEdGRlivNjh9\neo0nnvgKGxs9hodvplwGrxez0VljW26FvaWrmG32CFKDNNw1QplgWhUaIRy+0WaoWWM50hgKJWEQ\n40Q2QqYYIo2OCRRZYI0GWXT2Am1UZkjootMkpMEePApoxAiW8BFoZIhZ27JrGcxQoo1FgrdF3kX0\nKZCiiMUyHTJABgiI8EgwsCigsY4gIkcXg5ktk2FJLeHFffpy02YYMowX91glhSBPTq1TSHLUZRXB\nJtuiCEFWVYEWSdRGVVO4bp3V1QZCdBkbq3DFFW8jikK+9rUnmJjYtNP9S8aRI1AovLGpq/8YLAtu\nuQWeeALuvfdyX83PH1JKHn30GUZGriCOI5aWLqCqGpXKPp555ji33XYzqVfoWCiE4LbbbuXaa69m\nfX0dy7qb2dlZvvu5zzGazfLizCVm17LsMCRLa21imcELHQx1gcOJQ5AIVggpSQUDhT4e6+QwSQES\nlRTrOIwQMo+CzyAqNg5l+tTpU0XiI1jHoESwlfPjAwpN0gjymOgYbOATERCSZowCaXoMoKBgUiOi\nBLSJqUUOM72Y63N5wqhHX3aIlTxqEhNFlxiiwXa1yEA2RbWzRKofQbiCOphHNXT6XouKIsiqNpf8\nDocO3oCWL/D5zz/AmBmyK5vlUD5Pc2WFBz/1Kd7xgQ9wxavUh70W3HTTjezdu4eZmVlgMyTx0UeP\nkyQ1osjixIkTPP74IwwM7EaIHqYZcM01N/OJT3ybQgHuu++uH3P6bTY9/TueffYYy8ujZDJTBEGT\nRCjoepYkMej2q4woaXRpYErwmEeTeUJCYIHNyMofZnMmSAIkAWnqmCSYqNhIHPr0yZOliIKOxCeF\npEOPEjYGCjYRDhARM0bAOhILwQQChxALkzoWFvbWaD0y+MwSYwLprWCIOpsl0gEECgo2MRdki87a\ncYx4DIWAKLZR5AaTZkxWQEeR3Do4iGpHXHv11SwfO4Z0wFEV9u7aTiIlEduZNyw+/+kvcc+hHdyy\nbRuqorDRbPKNz3yGD/ybf8M1N9/MY5/7HAO5HNpW8bdSq8HAAOPj469r/n9hipF9+/YxOnqMhYUz\nDA1NArC+PsvkpMWuXZu20Gq1SndxkSt/xB8vhGBvpcLT3/0uZ5e6dLt5PC+hWLyGXi8mDGP2HryG\nqHGeATtNq3WJIIKO0iPMDjC+6yosy2LnwDiTkx6r3/8udjrN+WqVvutRS0a25E8tChj46NQpEWNs\n7QTnAY0MZ5jY6kHQI0MDgU245arRUdARzGDTYy8OFTZb9cQ02CCmBizhYaK9XBVbWzxKjw4OCgYW\nGg0ifNKiQoYs88zjxSEasICBQ0SXGi4GUuRJ5Dw58liqgh1laDOPQZF0kibwe0i5hG6orK8fQUqB\nqroMDSncdtt9qKqGqmpAienpC9xww/Vv2P1wOfDlL8N9913uq/h73HbbZjT8L0MxEoYhrZZDvz/N\n9PQsUAQiNO0opdKmuP3QoUNor9DeHCCTybxMNw8NDXHu+HHMapUrd+1gqXqavitR8BnQHdrJOpa7\nQj7Z3N+vIVgloYfGGhDj06BKQBeDFhAyjU9IBZMMARFlDHKU6eCTByQz1HGBCSTQZwmLFnWyNGgD\nghZpPCwENio+afqoWKzjk8FlEkGAJAdUNY2FuMdwqYzTzZFddxmS63RlzLDw0dUCQRxj6AmOt0xK\nKdLoLxPJPkW5TtrOk7JTaBrUZcD4vrfw4pGvc/MN+xjb6gFWKZXIplJ876GH2H/gwCsyDz9rlEol\nSqUSvV6Pj33sM5TL12CaKZ555jny+VswjAWmpjKcP38JVd3N0NBV5PN52u0an/3s1/l3/+5/Z2Nj\ng+9//yjPP3+KmRmXTKaCojQRooJpVmh7JwjCi6iKSpw0aEZNTDWDLWwS6ZETCa6MUSgi6ZBwChhm\nM6asQ8QqaSQjBOQZIMbHYRCXOg4KafJIhnGYY5UAhxUaaPioCFS24dNEMgWEKKxhEuHTR6JS2LIM\nJ4TkSbGKQo9lEhQ2uTiAvWzmw0pi5oCdSpeWuIDoBRSESdPbwBRVhtSYi60mhiaYGBzEsyykpuGy\nmXgqkwTLtlGEYKG5jsyUiWsddlYqzCwvc2l+frMRl2Vx9Omnee/730/tzjt5+rHHyEpJKCVqucyv\nfuhDr/v+eF3FiBDiw1LK//G6ruCfCcMw+PCH7+Ppp49y/PiLKIrgne88wI033vDyA8j3ffRXsBdZ\nhsHc7CWKwzeysjJHEARkMmlsG/r9GrbtcM1tt3Ls6W9Q63eohxrW0AGu3X0l+/btoF5fYWbmDM1m\ni2Ynot+LMKOYRcYJ2UdMhnUaNKlSJmQAm0XmCYgBi5geaTxMFOoMEVIij0qdEJcqWQQ9TNII/j/u\n3jRKsrO88/y9d4+4sUdkRu6VmVWqUqlUm0q7kFglaAlrZLcHm56xDeNj+wOGMYf2mfY53TM+TDc+\nbvdpz7HbM4MbmsZuYBi2QVi0MQitaC1ttVdlZeW+Rsa+3P2+8yGCAqEFmkUF/n+KEzci6s26N+I+\n7/P8lwIOI0hMQhIIzIFqJh54hJwhHgRS97UzMUmy+ECPOh10DDL4uNIjpQ0RxVli2cNDZZIUARHz\nxMAqgUySZBKJShQ5QBKFNVpKjbZIIMOAvKZT03SmpzdJpVIUiwe44YY7SSa/R24SQsP3/TfgKrhy\nCEP4/Ofh8cev9Eq+h9tvhw996Eqv4qeHVquFlJJsNvuKY7quEwRtTpzYpFC4hkplnu3tFer1beJ4\ni0ymRLn8JL/6q3ex9/taV57ncfbsWbZWV1EMA6EoPP/Ms8ydOs3C2dPko4iwskNca5Awcwwnc+wp\nwflLfaP3EhFpEoTE9NAQA55InhUUInw0HLK0KJNiFwoWKj367A+FmBwRCxxF8iwX2aSHSgHBNDFX\ns8rqgGkWEZBliBprNAnwMYkQeAR0mUDiAqaiEugaE4bB8VaL53o9UqHkmvIYl9Za6CJBAY8oXsPr\nGmSMLrmUZM3bwCIklVQ4VddZckxip05PlzCT4mB+hKhbp/wD3c1UIoHc2aHRaPxEfID/VjzzzDOc\nO1cll5vHsjQ2N9uUSnvo9SxWV5+lWDyCEDbz80scPXoQ1+3xwgsL/Mt/+b/T6cDU1E1Uq0lcN0uz\nuUQikULT1ghDH8vqoutput1RFNUlly/TCxbQ/HlyUZcwnMSUKgYJJA4agpgaPUICEgiliBNvoyFw\naBBgIRnBIGKdBg1UQrp4QAEVnYBtikQMM0aCDZqUWL6slPEIEBjkCamyhSSBgkaMRoWIApAbXFEG\n/T7NdzVHAf2b+H4h6Vg+qckuFa9OZXMNW9XYn8uhAsvtFvVmE7VUIpvNUtq1iycXH8co7UHKmM1W\nnaphEcmIfSNZHn/+eVrLy4wnk6hCML++zte/9CXuuvtu7njLWzh67Bibm5tYlsX4+PhrynodxyGK\nopfxTl4LP2ln5KPAG1KMACSTSd7xjrfwjne85VWPDw8P42karu9jfZ/KY21nB9XOMTq6ByFMVlb+\njlZrAU3L4DhblMtjZLMZQkVw+92/z6lTp0mljrK5ucWzz34aVc3j+wGl0jDb26D0QlqhiScniFAR\n6GhkUdHpskTENhnSdBH0kEg02licBXZTpo1KF4jQaTHBNuuMMkqbNQxUEijo+CQHhYgJhIQMo5JB\n5TTaQP5lUECQRFAZNBb3Y6Ih2aBKJWyi4JHHH7SOA7ZEQEcGxBRQcAnEDorI0FUcmtEqGSKGVAPC\nNoGMacY54iDHynzEwWOVbjKiAAAgAElEQVRlKpXNl8nv+m6IO8zM3P4zPPNXHt/+dl9O+yOoyN8w\nXH89nD/fd4T9MTjEP1f4m49/nNryMkII8pOTvPO++yiXy5ePCyHIZFK4bpu5uScJwzKt1ihxPI6q\nLrC8vMnY2F3863/9H9i9exe2bTM2luf4ww8i19aIOx3WKxXOr26wK1cmbSYZ6zVZdTpYisGd2RJO\n4LLR2mIksFjwJWcQqGjYWHTR2CZCp0GJNDMkKeKyhuQ0eQw0dBTsgV27j4IYhMd38VnGpEYKwS0Y\npAgJiAjwsRDsoCIRVNkkg42kSoUtIsYGlogOMQ6SbKzixCqGbhI7DpdWmxTbMQveNn7kIfDYosMI\nPkKFq7MpssPDCEXhfK3GeifJ+NA11HsKXc0gn01Q29jiqaceIp210b/PWA763+9ASswfeP5niePH\nn+czn/mvrK0puK5kZ2eRRqNGPj+NEAq9nksqlcQwUlSrW5w8+RQXL27iunlWVzfJZLLE8RyGYZFI\nJCiXZ2i15kkmd2FZGVZWHkXXy+h6g9CtU+94xEISBgoZOUovrqCzQ0AGk2EMbHRGUWlSYwkYx1dG\nWIpbJNgij0TQQZAlwKGFzzgeQ3hoKFwgS8wMKjqrdNBQkNgcJ0AgSRMQouBgEKNjYrNFHZMOE2iU\n6RcdCjAHpOlvRjXApz9IEnFMpdeEiootBDnbZjmKiHUT1ARlq8jF7RUa0UVuuO02MhMTmNcfZXWz\nwbdXzlMY3U1xbIShIUGwkqS2tMTRYvHyb/2E5+F0Opw5c4bDhw+TTqdJJpMsLi7y7DPPkMlm2b17\n92V1Zbvd5ltf/zoLp06hSEl+fJy3/9Ivve55/6HFiBDi5OscfqUjys8IUkouXrzIc8+dwnV9DhzY\nzaFDB1/2JTFNkzfdfTePf/nL7M5kSCeTbDUabAA3v/l25uYa7Nt3FFVVePjhRzFNg1KpxNRUma99\n7ZNIaXHmzBKe57C29hVaLZtWq0OpNIOu+6wu+0SeSeiUcKmhMoKBIERFo0CERY8XGcVlmhQ1Kqwj\nqDCJxz56tBBoDCNxEDSADiUiHEBHwSRApYtLCgUXBYWIkAgdSGFhoLEbwSWaXCAYUKsi2vjMMna5\nayKZJgG06HABB2uQHOlIhyQjZMihoeIpDdrxEmHsksZnSrVIYWKpKSrCI1KnGEvvJ5XU2VrTGJko\n8vd//xne+tZfIY4jGo1FbrpphvGB1OsfKz73Ofhn/+xKr+LlME04dgyefBLe+c4rvZqfDLlqlQOD\n8er6zg5f+OQned+HPvSyHdXQUJmRkSqnT+uEoSQIoFgsoao2m5vLPPnkP1Ctmth2miBo8Zf/7uPk\n/TrjRsC4At1Ol5v0BI36FlW3R1poZBBUjYCOEAwrKsu+x1yjS5IEK2h4jA4KhW3GBt/WkAqrA4m9\nSZISSbaJcKmhoJAfPO/SRKFGHoWLhITkiAlxqSPR6E//RwaZMxKNCJUWU4wSU+AiF1jCR0EliU5u\nUOgo0majEdC10vgiheN0kXEGSQMPhzYhHi6jUcx6VzBfraJNznCpWicIijQNH00FQ7PptFTa7jIO\nNfZfPc03nniSe25/0+Wb0IX1daYPHfqRdrY/DXQ6Hb761UfYu/dtbGw8TCJhMzQ0zcZGk1ptC0Vp\nMjs7S7VaQ0qVZNJnfn6bQuEo29vzJJNlyuVrWV09weSkzdraSVR1BlV1SCQ6tFoVGo1zKMo0mibQ\nzAyd3g4GwyiUULQkUZQdkI0DQooExAjOAx1UsY8o7nvRGKTxsGmzSIY2EWVCfMboK6JqDBNjDOLx\nFAQmgik82gN7f8kMHi4qbQxUoEKGBjEpFHKkgA4CmKTPE9GBg/THNVX6SpsaYMgIG8g6DvU4ZiiT\n4YzrMdd2mSwV2PBcTml5pFLis8++yO984Hf5d3/4hwghOH36LO12l+npCWZmZvhfP/IRjF4PCn0z\ntNMbG5xaXyddKPDxP/1TfvODH+TgoUN88W//FmdlhfTA++fhQoH//n3vI5/P84VPf5pkpXKZd7JZ\nq/GlT3zidc/9j9IZGQbeBdRf5dgTP8L7fyr45je/zUMPnSWdnkLTsly48CLPPXeK97//vS8rSCam\npjB37eErTz6HpcXccsct/A/33IOUkgsXPkurVWDPnsOk03meeeZhXLfC6uoShlFicvJtmGaSVMpl\naekBikWLOC4zNlbmzJmLEGQQCAxh40qBQRrwsHCIcYnpMESLWbKE6OgYjDOMoMsmIyjEVMjSpkIO\niYaJQhcI2cIhHNS6GwSMD36aOkhWicgBKiY9YlL4jKKywhgNUqSQlNhG0GQHA8ksKhoJAppk8ZjC\nYw6DLBa7CNjEJUQoPboySxCnUVHxqdNhizEpEWYeL5LkyeMGLXZl93KptcOhQ29nZeVJKpXHiWPB\ntdfuxvd9/uRP/grbtrjttqNcd91P7sb38wTH6buu/pt/c6VX8krcfjs89tgvfjEy/n3S0rFSidry\nMmdOneLGm2++/Py11+7m/vuf4sCBo9RqDrVagGWl6PWqSJlma6tFuXwzFy+eYGWxgvTGcd0h5nst\ntrUKcbfGKG18JFkkJQJ8obAVOiybCYpmgpofASUqeEgMMqRps8QMCjbDSFqoeOSIeYqYMgE9uoQU\n8egQcokEOUICJMvk6GKQJk8PH0mHOjHDQAsoDcwTv9snLaJwFkmAggFMYBBSJcUCS+zBJImPH0as\n4LEa2kQ+WDImRZsNYgKG0Rljmy5NUccVIZPZMpfmBSYlCum95BImHW+LuLdDMTtGNlnmmsOHuO6m\nf8Jj3/oE8sUX2VUq0Y1j8rt3c9c997xh18HKygpxnCGfH2b//r2cOfMSuj5KIgFzc49w662Hue66\nN/PQQw+ws7PJvn1j1Go5Go1NhoYS9IOGJVGU4bnnTqOqGarV03S7VcKwguuuYRhdEgkLzwsJwxSu\nVHFkB8E2XUbxiUiyG51lbAQRKgEFOgTElFCJMAgQRORJ4aKisEUFnwCLHRw8yujkkPiDeDuAWQQh\nNk2ylAb6nGVGUXGJmMfGIAuYFFAxCfHpK0U9+gVI39pSMA7kkawC24BLn52oxjG6EMx1u4yj4Qc+\nj29u4SmT2NlDlPYcYa12jqXVHd6eTqOqKrfeevPLzsGd997Lt1dXuVivs9FsUqtWecvu3aiKAqkU\nL9x/P48/9BAjjsMN38fPXK1UeOBLX+LWt72NYGODq77v2EihQHtt7XXP/Y9SjDwApKSUL/zgASHE\nIz/C+18TQog/B44Bz0sp/+C1XlepVHjkkZNMTd0yIExCLjfE4uKLvPTSCW688Qagbyn8yU/+f+j6\nBNce+VXa7RrnL65yW7vNzMwM73vfvXzlK99kZeUcUsbcd9+N3H77DXzqU3+HorjMnfk2im5jJUdQ\n1TF8v4Kuhwih4DkeeStNt+eAiFClIGINQR6JAngozJEjoI2HJEVMCg2TAjFbqMS4CFQcsjhEaLTI\n00VlixZbjBOjoNAmxRw+DgENBDqjdFBYxyWNxxgRNVKMkaKFRXuwr7LR2KCLhY+HSkx6QIYbw6VK\nSA+FFFKk6ak9NJEjkjlSwiaOFVQ5Ql3YrMRzTAVNQlLoSHRdoGsmIAmCgNXVOrncAWy7zN/8zROo\nasA73nEvUWTxxS8+w/r6Fvfee/dPcmn8XOHrX4frroOxsSu9klfi9tvhYx+70qv46SNrWVS3Xx6e\nePToEcrlzzM/v4RljeF5VaKoyfBwnlbrIpaVZHn5SbrdGiKcIPAdTCWFjokXaURUcPDJCkEKBVPG\nJIGkjKn7Hk8EIetxAg8LD4lOkhbOQDExREiDmAx9b8wGCg6XyNOjQcQ0MVlSXMBgDYMGWSLKWGio\nBCh0qdJjiJgJ+tTFAgoOoKGjIvuDGOqs00PHJUVAGxWLFQ7SYAmbgJCYNjl0OY5wWzQHGcM+EygI\nAhQ0soxaU9SCOfSWSjKyMCyJKx2224KunySKHLpxBzPTYHL2WhIJm2O3vod0ep1b3nEHmUyG0dHR\nl41lf9boh9tJAPbtO0KpVGZl5RLJZIJez2RqKkmtdppjx8YolVI899xpPA+OHLmJPXtmeeml06yt\nbbO+vkk2W2Jk5CoqlS+Qy6VJJIpUqwaq2qNWO07KPIZUFKRMAg0kwwSBCvi4OIDE7jt24KHjYoDc\nJoVJCYUesINEohNisIZA0sKjgCRNiEL/L/HpG7ynkXRI0k9g9smzzjYaEjnYUoZ0sOjh0iGBgoUx\ncA3xBr5SfcfteQQRGmMoRASsD9y4O3FMU9PoxTEHRYTUDRpikrQYZssP2ZMbRdNCLlxoMD8//zJ+\n1Xdx3fXXc/Laa9mXTLLwjW9woFAg7HapKgo3zc6SsG3+8ktf4gM/kIkxMTTE4vIyi4uLpF7lmin8\nEJftH1qMSCn/p9c59t4f9v7XghDiOsCWUt4hhPg/hRDXSymPv9prV1dXgfzlQuS7yOcnOHFijhtv\nvAEpJfff/y0ymf1ks/1dlm1naTbTfO1rD/LBD/42MzMzfPjDv0OtVkPTNLLZLBcuXODCyZfoXlrF\n2mgiSVBRVXbCHAm7xdRUmXptkdiv0Y5MwtBDk9skUXHZRrKIxEJjgxSNwfyw13dzJD0wb49Q6BBj\nI8hg4ZNAxSdFl0U0kmRokUVllRyQRBDSw8VhgjwpYlxcJF0uUWSbNjpJNEIMfDJs0yI7YKj0AG3w\nCQJBRA1JBkmXNgEJ1cI0HbpeAkUmUaSKJ318VcXWx6n464zGXSxFQ5ohmWyJeq9JIptiaWkFITT2\n7buZc+cuYtvXoighp049z6FD1zM0tJennjrBrbfeSOkKGyn9tPCZz8B7f+wr/WeLW2+F554Dz+uP\nbf6xoOG6HPiB/KNkMsm/+Bcf4KMf/Wtct0M+30HXc6TTSVZXL3DxYgQkCMMUob+FJhRM2SWrxmii\nr1Sp0GVUQkpVaEYxNRmxhUYjtmmh08PGIkWODjY1KnSI0YmRgyjLDCkqpNDp4gMdHFSSPEeWAIOQ\nkO+aEfYtvHu4uMTsBrZZAbIog02IioYgTUiTJIIuHSQGKjOoSBTG8YiRtOnxDpyBti4gJiGbjNFg\niAyL1EnTxKSfltJDpx3ZhFqWnU6IH3joaYud+hLIvRhKijDu0Ao8SlaCdLqA63ZZX79Euz3P/v17\nGR4efkMLEehnien63+M4HRKJFMXiKMXiKAsLz/PLv3wvIyNlHn74O8zNrdFouNx991s4fnyeyclp\ndF3n0KFraDSeoNF4EUXJs7DwOHE8iW3PEMeCKGpjeOvMxC4p/yK90GWDJi1uQBW7ieUyggIxq/SI\n8DGRCCIm6Q9EBEkcJAoKSXrExJfTZHxiplAYRmMPMSukeAIbnTZrBGg4JAfC7gCJiaBIgIakhyAi\noIFJmzY+KSIkCQIkdfrj9745nsAjwRAR8UD8oGOxRY9CrNP2I8qJNCv4rDkeGCbJpIGM4OmnH6ZY\nTDA6Os2FCwuvWozkcjnuuO8+/o9/9a/YnJsja1msKwpmsUi71SKbzWKEIX4Y8oPOX5oQ2LZNN45f\n8bn1Tud1z/2VlPbeBPzD4PG3gFuAVy1G+mqZV/5xYRgghORb33qIxx57jkceOc6hQ3dgmkksq+85\nkM2WWF4+R2vwnyiEeBkrfHl5mdaZ51DbRXLWEK7rMCbBkav4MqaYnqB54Zvk0en568TSGeTDdPFw\nkSQIWSeHP7gcLcZRMfGoskKIoEoXSQYVmywuI0SYxPjAJiYuDRxiTpNDMIaKCdQIGSckQRWDAqBh\n4jPGHFUkFttYuKQxUFC5ihXOo1InoIFOgoAkBWxcqjRZwx9U5pHiMlQcRmsbdHsmbugSawmkkqAT\n7CBJ82zUJSl6uMESXjuFruS55dYbOXXqGd785rvQdYOtrSqmmWdz8yzHn3mcF554hIQRk8lZPH3b\nXu55A9u7Pyvs7MCDD8Kn3jCa9n8b0um+78nzz8Mtt/zw1/+8Yml7m6mBpHRle5tuOs2Ba78Xnug4\nDs8//wIvvnie6ekc1WqV2dlpVleXuXjxcdrtHqnUYYTQaTR6xGQJ4goN1UHGEj120JCcQ9IENqOA\nBrBGApVpIIEC6FSYZZssJjYeKSRrxPSoYjKMTY0cCVx6+KQZAiyaGAiuwkYQ0KPHJi5nsVDQ6PZZ\nWFiYJAlx2SZGINlGMoskJqaFC8Ts4DKCTgcFCCkgySNpI+gQ0wZMJKskqDGKjUObWSJSaAMSfYxN\njw08NEXBkx1SWp6rs8NIv0PHW6bhecSySjE7y/jEbZw9+wLNZptKJebqq/fw6KPrPPbYS7z//fex\n6/va7T9rJBIJ3vOed/K5z30DKUuoqoHn7XDNNSX27NnNn/zJXzA318ay0gwNDVGvb2IYDmtrT6Oq\nQ0gpmJgI2bVLR1EmgQymeZggiFhZOU3QmWNW1jFkEjWOsBSNRJRkjh26coJ+KJ5Hn51hERLQH4C0\nEQhMNi9LDDrExHhkOEDADoIQixw+KhE9hlmmTG7gp+oR0YOBhFcnIKRDnn7SZRcHiWCCNkeJWSdk\na8A46YcvqpQHypoEOhEq24RsoFHBQDJGiE4vjNmmSdd1KCbTIBzU0KNa7+DLDIliiqmpQ5w7d5KX\nXgp597u/N9+t1Wo8+OBjnDw5x/raGpV6j6aWZhOTfMJgMpdn7sQJikNDWKUSZ5eWcLtdut0upWKR\nsZERRCrFsWPHOP3ss1za2GBmZAQhBDvNJptSvu65v5LFSA64NHjcBA681gtnZ2fR9W9frpYB4jii\nXp/H81wuXQopFK7DshwuXmyztfVN7rjjn6DrxiBfIHpFhsry8jJPPHGc//LxjxM2HFQVDCOHaRZx\nnG3GDY9gYor6yQe5u5jnkmhR7zbB7TBHSMgEaVRa5NEYRiWiQx2fHSQeKRIEOGxQocoM0MPAo4g6\nkOQG+GwzRQ+dkAYKNWw8htHJ46DjAxF5oEt9YAwcksIngY5DiwQ6BTRCfHYQKPhoJFklxkOlQEiM\nTYiNYJMWpqiRKSXpxBEdt0MYXk0Y2yS1BBDSiddBJtGS11PYVUI1AwyrwZvffDVTUyk0bYrp6f0A\nJBImp08/y9aGgx5OsssewQs7LM0/w7/93/6M5eUt7r33nb/Q5NbPfhbe/W54FbXpzw3e9Ka+5PgX\nuRgJJid5dG4OAYzs2cOvvfvdl03MPM/jP/2n/4f1dUGxOEU2O0a1epxe+xw3H5nC6WRIJG7Ctkc4\nfvxZhNhC1xN4rkRJDOHrSXZaT5LQNcYDlbKIWJYJKmRxuAqbNA5thmjSF7jraMSDUU1AgQIrVCnR\nwsanRkwFiUEZlRoFIhRsFHQ0IrIkmEOjyy6yWNjoCFTWOD2IyRP4KKzTwsMfaOZ6hDhIhpDk8dlE\nDnwpoDcY4ZwiQQh4GGxiE6ANBgjjQGdgoBijkMDGooUre6QUh5mJIq2mi6WX0KWHDM6QMHOMJHK0\nVxd4emOVkbE3Mzyc4uDBoyQSCVqtEl/+8jf4gz/4nTe0Q7J//34+/OERzp49T6/nMDNzjJmZGf7q\nrz7Od76zTrl8jCiyuHRpm0SiwfR0kV//9dsJw4goipmauoMPfOA8cbwHzzuP63ap1TpACTPqkbV2\n4zhzRFFATNC3QxAuntgmijtAAkmXfidEpS+qrZGmzRQFaizRRiXLOOogfKOf6FtGEqHgIFhliH6H\nDByKTFKlRRKTKgYxCxTxkeTYIKCLis0Os7hs0O+zXIVCHVgkGGw5k9Tp0UZgE+EhWUIBRkkxSo8e\nPqAyxlawiutvc9fkCC/WmyjhNEJYYGoEgYPjrPO1r12k241461tv5MiRg3zyk/8vnjfM6OjtPPQP\nn6G2WSZrJ/Fw0bQUpxe3gBYnNzcJR0f51uOPc10mQzmdZmNzkydOneIDH/sYpmnynve9j2/cfz+P\nnT+PAtjDw9z3nvfwBx/96Gue9ytZjDSB7woSs3zP5u4y/viP//jy49nZWS5depGtrQyKohPHNWZm\nEiwvm4yM7EbTdKamxlhf92m1AjY3F5ic3MfGxjyHDs28zKr25MlTfO5zD5JITLK15ZPXR4jDEMdb\nwI1DdE1FUxX8IOb6yXFumJnhgOfxd99+GNP12CRFFYseOjBKgi5JdBx0QpI4rNIkZguLGkVi6ugs\nozOCwTCSEIc6Y/j0TXUDQoawsFjkEj5XI0gQ0hq4C/gojAAmKut0sLFJorNDPLDKEfTQqZMmYgyN\niBCHChE+CZI0CEjS5VgqjZkr4QRd2qMBC5tncMI9BLJDFFWQAopD4xw+eifj4zo33niU5eWT3Hvv\nAW688Qb+43/8L1Qq65RK45TLaR56aIvIKWDFPjvby6x2KsAuvG6WRx9aZW3tC/zmb979qu3AXwR8\n6lPwZ392pVfx+rjttr7a5w//8Eqv5MfHr/3Wb+E4DlLKVzipnjhxkvX1mF27jgCwtbVMd/kSydYG\n5XwG79ISjcAifdUkw8N7CIISOzsvQdjE9SWRMLHzCWaVFLNCsNZo4XuTJICINC4mKXSqdBgmxEAj\ngY+KTQ8HQYUuBk2yBOyQJIOFhYoxaJJ7A0sqHR1JgxDJMBaJQVNe4NJgHJuYBjEGCVSSwDI7dFBJ\nMYFKGYFDQJIe0wRsozFMTAfJDhodBD1MMiSYpU2XTWqUgRwWkjZtQiKRRtUMYnWTm99yL/b2IuNj\nkvNrbeoXF9EDOFAcQubLzE5McGbhJNX6FpM3FbjuuiOoqsry8jKdZpN2d4Hl5eU3tDsCkM/nX0as\nbDabfPObLzA8fBO23fdCMc009fol1tdruK7Pm9506+XXXnXVXpaWHAxDY2XlBFE0QxSFJEwbTZOY\npk4UuGjCwA1bKEIhmYiQcYGeB1Ec0e/GZ+kPR0YJWKJDhRE1w1LUwiaJpJ+r3LeiaxGTxcInpEZf\n69Kh798UkkOjS4MaaXqk8fCpEBMRs4sOSRwCoDj4tAQqZcBEsIVGTJZ40BURWFg02EbBIoNH3xu2\nhYEphkHG+F4NP5SU0oL11jJSG0aNepw69Ry7dl1HIjFCtWrzF3/xAF7v3zNcvoHrrr8Wx3FptSNG\nCgdxvIv4+ZhTtXXqOx3iuMq7Dh2k0WgwlMlg5HKIbJY909PstW3WFxbgttvIZrO85zd+g263SxiG\nZDKZH1rQXsli5Eng94AvAG/nVfxK/uiP/oj19XUURWF8fBzHcbh06RK+7zM5OclXv/pfOX78HIbR\nRoiYsbEiqZTH5qbDmTPPE8dNJiYs7r77e/rmMAz52tceplw+gqKo6NkxetVz+K0ddqSCopcxkmka\ncZdCsoI9lUfXdZxKBZOYhBJhxAZpQnxGibFxcQgH1ryQpkcCG0mMh8EaJRoUUKixSheQJAmpD1Ip\nuriDWbRGjiQtdmihkUVjBYUWGUoY1OnSxmFroAPooAykvwIFmxwZFukM3EkyWKQGOaAdmgQ0KZGn\nYwyzvNVgXO2SEj7TNqy0L4BI4cUBplaiXNiLoihoWt9Rz7ZLLCysceONN3DvvXfyyU9+geXlOkHQ\nVwJ12vNEqs5GZxtdTJIxU6xv1JjaC4XCQb761Qf5yEf2/MIpbF58EapVeNvbrvRKXh+33Qa///t9\no8Q3eMT/U8VrZVucO7dAJtOPSY7jiPkXH2ZfMoMHiCjimokytbkKjcoSoCCcJXbj4IhV0tksbdmj\nroSUzAhX6CzKBFLY+LKLICCJiYVCDZ2YFrmB7fYOCltY+BSRCKBJg5AiPgoh/aySLt3L4ZR1YiDA\nQ2McD0kTSUyIpIvEJkQCPQRDmGhYVHGxyTCEgzfogDiYpImJsXGIaaLQxSAmYpwCGRIDwa9HhnXm\nuEo1yYuQSHHxjRSxEpGyMtgyxldNdmUy/Mrbb+evv3g/lTWNTHKIZb/J8ws7SNnFTtrEoUccxzz5\n0MNoTo+ErrNdPc/nP/EJ3vfBDzI8/IY5ObwC6+vrJBJD9Hrh5eeiKMJ1Y+bmnuKBByyWlpbobG3Q\nqFRYvLDEzL67OXBgD93uF6jVFokiCycIcVhldHiWVmMDRTZot7t0hIGMU3hBEyl3EMJBymuAWUBH\nsIbKNDVqqFETH5MtXGJiHLLoWAi6qFRRSeNj02GDIVo4QBWJIEdrYBWvYKOzTRETnwCNJhoRDpLS\n4F8ESQcD0MjCgH+Yw6COpEMFDQ+DFhKJg46Ci0FSVjFoQ+zw7GaLdCKPaY4wMZmm67Uw5DWUy0c4\n+9K3aM/3SEnJXGOZat7AqQdMX3MN6dwwnYaHJrKkbYGq5FGiPE40z/UHD/L8U09xzdAQ8/U6B44e\nJZlMEscxj5w/TxzHl3/rbfuVeXKvhStWjEgpXxBCuEKIR4EXXo28+qd/+n/j+xYgSaVC3vveX+Lw\n4X4g2/z8PN/5zkmEyJPP70bKmNXVDcbGkhw6NM6115rceefb2LVr18tugjs7OziOQqmUIggCnHaN\nVquCHxtk2IWIdCqtgCA1TaEEDSVgs91mZ3ubWEriuE8ltQY/TzomLYoIKmQJCekR06aBTgODMbaY\nxUfSn0K2OU9IkogAmxgfA4cUIBF4KPTN4y0EUCNPE58mMQZDQEiTRfKEXEVmICtzaOKxRIoQhQLn\naZMgwAAUfCpodChSpAhtQVf6LAofLW5wrJijkLNIqKOsdWqc76nUduoIM+L66/tGZo7TpljskwlL\npRL/9J/eyblzF1hZcRFsY1tTWFKA9NDUUeK4hRm4bGxukkrlWFkJqdfrb6iD408D//k/w2/9Fvy8\n11ATE2DbcOEC7Nt3pVfz00cyaeH73sBr6CXWLjxHoJsoimDv/jw3HbmGxa1nOL19HC1RJtNeRHca\nJI0CeWuGYdFj0Z2n2XE4YOscLBXpNZNUuz4rbA24WAo2FbJohDjYxMxRJM0Em5gIRgZKt9OssEkJ\nGxWVNkVceoyikcYnps0OEQ26GIP4hSYRNi4+JoIs0BrcPCQmAQptapxCo4VBhMkQAUUUtgB/kHo1\ngsMaMTmSBCAUdBCYZskAACAASURBVFRiPce2n+FJWaMcg4wVYs2nouqkkjnG4ohnKsucSrjsPXAV\nbz5wFS86L/DQ6gKhtourp65hdmyE4+eOc/z5E6ytrJJwXcaGhpCxy8FdRa5OJPj7r3yF3/y937ti\n14BhGIyMFJibaxIEGTRNZ2HhHJVKDcsq4PTyfPk//C3XlpNcd3A/UdHkwUe/xOFb/zvuuOMunnzy\nBI3G06Ryu6lKF5wecdggxGTHshkZOkSrV0H4DTy/TRjmQO5CESpBFCCx8aigkqJORJ69tEjgoQKb\nBFRIY6OzREAeDZU6DVQctimgYAMxEUVMcsScZBpJkTYKAQYREX2Sqo7OMpJw0DUR2Li00aiRRkGg\n0sNlB4UubTRMLPbiY6MiSeCi02AIyWQqyfCuUVbaEWHTZdvrMTK7i4Xzz5L1O0xnUqQTCbphEd/t\nQafD4vnzpNIWXZmitrVIz0uzud0kISXX7N+DnUwSSIkiRN/LqtUimUzihyGGZf3YI70rmk3zenJe\ngFzu6OXI6Ha7zqc//VU+8pHfxrZtHnroKWZnb6FeP47rtrCsDIXCOEtLJzl0SOdXfuV3X1XNYRgG\nUgasra3z7LMv4GyuY4YmnjpDTzHRDYNU5OFZGkIb5bn1FzjhreOtLxJ2u4SAS48ObbosIcmhUKKJ\nPyhSGqTwsTAosUGRmFH0AblVcImALbo0EMyjopNADuKPfLo00XHZIKbOKFskgTTrhEQEKCTQsEnS\nATr4qKRRGSdgjQ4+DgKF/YQYOGwOONkm6UFWTss/z3W2jefaiKhLrdFBWBErUQeiLHrcwHcN8mIX\nixcvkkpZwBZHjtzJwsICD3z+86gDVvTF9U3Stk2l0caSWXwFVGJ6UYchS6JF0cDiO3wFZ+fnHb7f\n54s8+eSVXsmPhttu6/NG/jEWI8eOHeT48fs5e3KJxtmnmfQ9xlWNSqfCS+cT3P22t/Hu2w9Re+I4\njt/CVjfxFEkiMUEUdZGyQ8H36cqIQqhiZlROdDzSwiQje7S5SBfBEBXS9Ngh5jwabYaIseigI3BQ\nBvZTO9jUURADNpaGRo9V1lhHQTCBQZotWqgIJlEp0mUJj0UKOGgodDFpkGITEOhMIEkzhIGLww6b\n1ACbPJMIugRUkXgYLNIFhExTU0zScos8LqpQ2FLAlwZ+ZNKJi0wxxRMXNuk2qqzqPv/XZz9Ly3Fw\no4ji0C6G8wfZabqcvLSKT5LY22D+5FmuLU8wV5tHs3vc8+v3MFYscml5mVarReYKWf1OTU0xOmqi\n61nm5lZpNDpsbzcwjDZvf/tbqa+cZcJIcfbkMq0apFJJZs0OZ1/6IvsOX4/nvUg+P0EudyO6rlKp\nv0Q7rKEaE0xMTuC5KfbtnqLRWOLixfN43iVgcaCAFIBBQESEgsooKgER3mDQngLW6SExyQATxCwx\nhMYOu/G5CkEOiYvCJhodQkwCIuboDkS/UMWiSY6INlehkBxYXlZpsAwIUrTwsPDpEgzYgglianRZ\nRDCDjUGbZSaoE9LDd2POLC4wNlxmvVOh0QvxFp4i7nnkBXRDCyuKyBgaW91NXrpgkC0OMzQ1jqK7\njExnGN1/jKY4QSpY466bD6NrGsK2WW+3CelHNUgpObe+zuE77/zFLEZ+GL5biACk03nq9Qznzp3n\n2LHr2NioUC7fwi23mDz77JPU6yZSQhgu8K53/Y+vKSstFArkchr33/8I6fQEWbWAbyfQexG69DCA\nyNDZrsdcvNhFlRZKPSJ0bZJ0CUmhMYWFSYeYkNP03fpSZNihhCDBBBHgsIOBoIZFiEDHJYPgEjZd\nplikRxGHBB1CklSxkExg4aOyTZqQaSRl1EGlLTmFT4I2Gk0S2PTo0MEhxqeChU6OInkifBxGiNlF\nxHrfVFpASZ8mFhUsVceXJuOKZDmMGfc3ccQGQhM0DZfIj9m4uI2eWOWGwzN86i//kjPHj/O2/fvZ\nf9VVNDodnnvsCUxnB0NVidQaUdTGjSQpvYidyVDKZllZOc+NN06S/iEa8583PPAAXH017N59pVfy\no+G7JNbf/u0rvZKfPqanp7n99qv464/9ew6kRtk2NBy3ys3791CLIs5eukQml+ND/8v/zInjx1l9\n5HnctkLkQ9zbwfMaeL0WGTvNdhwzokp0fZ0tYaLJBP0s6zUSAxVFX0NjDBxGxogwiQjRiNBYR2cD\nnwwh4whsBEV6TNLhLLs4hySkgYlPmZiQmHkCdDbIIogZGfgSrdHDYZoJXAoD+rnAIoFPkU22KWJR\np889WGcGlxCTEgbr1PFFxB6jCAKmrATbocJLvkqKGWxNgVDBcxSyxiiW6nCoWMSIIp7f3OTphR1W\nKltYmoUXBjihx8GpWerho9w82SNlGvixxvLaGuOlEkII5A9RQ/wsoes6v/Ebv8ynP/0Vrrkmw8mT\nWwwPdzh69Aizs/t5/KVvI3e62PZuVNWgWBjDtos0ts/w4Q+/Hykj1tZiTpx4lFZLks+Pomp7MYwy\n9/3yu9nY2OKhh56m0WgRhqvoepkgABhGUgW2gDYxTVz24OAhUEnhAS16GAiG6dEBQpLU2CaLJI9N\nk5gOIVkC8ihcGuizEgiGsAnQaZCjgIlJCgcFiC97xmjkyNBmmh41iqxxNZI1AkqkWMKhiMMKz6GS\nQSHAo8aUplMSKuk4xnYdDF1wIAubzbMY/hBeKKkGAetaTCA2yZkl6p0aHUeHikcm0+bw4SPYdptD\nx3IEGzu8dOYMnVqNWr3Oo2trfZnv5iay02H02mu59U1v+rHP8c91MfKDUNUE7XZ/Vz46OkS1WqVU\nGueuu+6j2dwhjmPa7QxHjx593c8ZHR0ilTpDr7dML2qRUSRd00SLM6RSw2z0Wmh6jnptnnzsUVDy\nuIpBN+6hMIYkgQ3kUNhCp04Ngw3GhIomISTHFhcYxmCIGI2YVSBGRxmkyfSdEjOs0yBiDYUUNntJ\n0KGAQ0yaFA2ywCI6TdKASkQbBY8U0cCKp4dBmZAhosHFHtJCISCghCDCQEGhgyrBiC22vJBhoRMo\nEi2dptPcwQwjdusKG7rO1Ti41TPs9EzaS22Kk1lot5n2PLZPnWJ1Y4NarUax2+GwjDhHj1CLObz7\nAGvNHTba62AXqTuL3DS5i3vv/cWzB/3Up+D977/Sq/jRcdtt8Od/fqVX8bPDzMw077rlCGPJJJ47\nytLcHA3XhTDk6TNnePuv/Rp33XMP1WaLc4+dIe6tkugF5BJ5diIfV9FI9toUp0aJpGQma9Btr0Cs\nMaHBiiuJMKkSU0SjRIDFNpsohBwA7ME+dYMiDVwEPj4eGRQioIFBSIKADkNojJDu9yzwCJCsEHIb\nF3mBGlWy2NjUaQ1sE/teFhGSLjESDRWDKhEBIdvsxicLbNOgQQ6NmPHIJZRN9hSTWHFEKYwZDVSk\n1UNoaXqKy+5ykY1mBO4ShweufZcqO7hBi+r2C1iGSSGZpiALXFxZZW85x5htUUinCaKI48vLTE9M\nkJuYeNUAwzcS4+Pj/PN//rssLCzw2GMaZ8+G7Nt3PVEUUm01SakFQKLr/duarpt4sUGlUkHTNFQ1\nZmZmFsdxSKcthoePcv78MoZh4rouQSBQ1ahvAKlOE0XzxPHT9IWfG/RVNRKVS6iMAjVyFOhgYBBh\nUAM0fFYG3eoOZapk0DEQtNlihQI9/n/u3jxIsqu+8/2cu+fNPSuzKmvvru5W71paVtNqSa2WBAiJ\nRWBsjMEMHgeGeW9msCN4EzMv3gvsF/PPi/fCjrEDvxgmsJmw5RUMGGywMMggI7S21K3uVu9dXXtV\n7tvNm3c9749MtwEbAzK4JX3/qcrMupkn85y6+bvn910GDIMIikyhkUdQoYCHQ5aQLGkaQIxHRB4D\nhRzQQZCgSJsKu+kyzBW2SNEjoMXBUUHbpYtDQKxZ9KOYvpCEmkbBdRFhyJ17pvj2pQ0GA52qMyCb\n8FGNSSYz2zELKolykSPH7qXVusoHPnCc2dlZFEXh47/yK3QuXYJajdRgwF7bpmFZXG61ePjhh3nz\nv9AG+jVVjARBg5mZ2wG4774jfOpTf4lhJEgmM2SzRVZXz3HzzfMUi0Ucx+HixYv0eg5TU5Ns3779\nOnfE92PuvfetBIHPN8NVlOUNnG6brmczCPp0fJdB7EDYIyVKxFE0ktvmSGCjM5ReJRGkkSxjMmAd\nKW0kaSpcYwyTMQxMQKfDBJKLgM3kyB11B4MRPWkYlrUM1DFxgM6IzAbnsNHYjs2Q3OfgE7CJQpcA\njYgJDNIEVJFIoIuPj4IzyrMxgIAMIT0kndBjTPhoekRCj+n6A7oxzKsGVQXQbKYT4wyimHqrTn1p\nC+OoJAoCJLBYrXLy2WdZGB9nemyMtA775+ZZ3Wqy0t4kly5RnIo4uDvP9qNH+aUPf/g1R1zd3IQn\nnhi2aV4r2L8fKhXY2oLvyJd73cA0TTCM6zLx+W3bqFarbNRq3DI+zi986EMoisLeffv4y8JTdNav\nUGJA2xl2+mPNoh+02GjUccwCy12dAfP0tR5F08MMhuTYovQYEyY96ZAGBG0clvHZiaQ18knNMk+f\nVS4woIdPjgSQYkAbSDOGQkyARIwypwR5FLoYJJEjq8IE6qh5amORIUbFJ0TSIULg08TGRaePAvgY\nlHHpU6GCSkZTSJeT6EIwbZr0Y8m65+LqIBSVpu+jCkHX87h1csjXqjkOX19yCaJ9GMokupam2V/C\nYZGcoVIqZmmrKm6jga6qVDsdrsUxP/POd96oqf8uGIbB7t27KRQKXLv2hwwGfSzLxijMsLmyzrhh\nk81OEccx15pbpKduwnVdKpU6tdo4k5NDxU23u8na2lnm5kLOn/8GTzyxSBSl6PeX0LQpPG+DODaA\nOYaFyAGGMt8OIT6SLTIoODSAGgY+Ov2R+nEShR45mkxRJiYcib9V0tTo4SNIE+NjouMTYWHRJiCJ\nNzqPG0j0kdS7jYeHoIeBRTwK0lMwMYhI4KGgEqPSo4OOz6RmMBuHJKVBJpNhpd3mUrvNpKbh1Wvs\nSgT0oya5cEAfm9W+zrV+n9LCAkeO3U8+n8d1O6yvb7Fv3z5eOHGCg+Uyqm2zfOoUU/k8Xr/PS8vL\nOBcu8LlPfYq9+/YxOzv7iuf2VV2MrKycZ2JiG3Ecsbl5hR07Mmzfvh2AHTt28Au/8CY++9kvs7Tk\nkkwmOHJkP29+8/2srKzwP//n5/G8DIpiEUWn2bkzy/ve925M02THjhmuXFlkbm4fb333v+dvvvQH\nDE5+i0gushWsEakSQ1lAjSeR0iWIfaSUyGGANDEqCdUklAGGJjAZICKLVuSRIEaik8XCw8bFAWxU\nImximvg47EIlN7oaChDEIzNghQ5dmhTwMGnSAybJYxEgGEaSD70d+6wBLsO46TYCG40DhIQIYjQu\nYlPBJEFIjxQSn4g+K+zExRaQS+VY6fdpRSGXhUo5lOzUc+iaMfRBGbjMZad47rmTTOyc4/TiIjcn\nEhzSNLbrOlvtNjU1ZGpMJ5Ge4sn1JdLbxrj14GHufOMbuef48ddcIQJDx9V3vhP+lbLBfixQ1aHP\nyLe/Dd/j0vy6wNzcHDKXY6s5jLlXVZXS+DjX+n3e9Mgj19fZ/v37mN5WJtHdR9FI4HsOU1HE5asn\nafUM/qoZYKlpDGFiaRqlXIZutEXorTApbISnsR5HhJhE5LCwSLKGj49JEp0iCm1gjZtwOccifdLE\nQJKYLgYhEfYoFkLFZsBQQSeok0CiUyJiCTHSYtTJkidARxv5mzhUAJcdCDKE1EhRYRIHDxVdibkl\nm2bTcSgUizi1Gj3XJXBdmoGPK8YJ9KH/8rlWFyyPqXyKWrXO585coOfNk9fGGCjxkEKvz9Lrr7A7\n3cYiw5333cfG+jqnr1zh9uPH+cWPfORfLSjvh0WpVOJnf/Z+Pve5x4miFONTJV7a2EBNSp66cp5K\nz8UozrFtXKHZbDM9fQtx3KfRWEIIi8Ggy2Dg86u/+tM8+eQL7No1xtpakyiaQ0oTz1sCdjNskY0D\nM4A3EhkMz+SwSYg/Ckr0EOxAx0Gwk5gzTBCi4hJf9+WFPC4VYmImR426mCwmOiERIT46a/SZREHi\nEtG+LgHWsAlR8BAskiCPRkAVnZgDqMQEdCyDy0qCbXEEQuBoKknDwGk0KAFJw8DqOIR+jCsSGHaC\nrTgiziS58+1vZ9v27dez3sLQI5kcyuwb1Sppw2B1c5PtExNUNjbwm00mDYOBYeBtbfE/fvM3+div\n/Rq5XO4Vzemruhg5enScF198AVVVefObD3DnnW9AVYdy006nw7PPnsTzNHQ9jabFTE1NoGkaf/RH\nXyKR2MPExD+oNy5dOsVTTz3D8ePHuO22W3jqqVMsLb2M7/uUZnew1d4kRZKHH34fX/nyoyxejOlL\niRNBjix9qqSJcdkEpomESoTPBhEyblGOE6yhU0WQGXGjAwwaKCToYKDRBdYwCNmGMios/r6JEwMO\ndTzGUCghqLGBTZYUSVR0Auo0gQFjRIR4mNTpUqSOBA6iYKDTJKKDSp6ACpJnMUnREDpStkjQo6la\neFqGJcehXJim5IdUxXCRh90GMyJiIAQVKcg3+1xodLm0vs6YohCGIWEcoysKpThmTVW5943HUIFy\nez+/8vGPY5rmyDX3tQcphy2a3/mdGz2SHx1/zxt5PRYjmqbxrg98gM//wR+wePUqnWqV1XabXYcP\nf5epnqqqbNtW5KkvnSXSMyiKpN/vE/YTRKGFL5IkrF0gmmTVmH6nh0iM42pbDAZ9CnGIQ4TCNgQ1\nJAUEGgq7EdRRqGORRgEcBAkKmMSELGGiECMZ0EbHRtCgTURAGlhGISLJOBFVYuoM8MmSpk/IJXqj\nNJOABjptJtAZwyJCYZwqPQI8MvgYErqDARfCkP6FyyTiiFbgUQlj1iWEwSX6ShE1tQuhRCQEfOX0\nVY5kMlzrChJWmdCTSGIms1mEIqiKWUJRwQtDNppNGorC3L338nO/+Is/kjzzXwOVSoUXXzxNvd7m\n+PHbyGbTCHE7X/jCGF/5ysv0BjohAmpttMQaTz/9ApnMrdxzzyQXLlzkxImzSKkjRJYvfOGrJJNZ\njh9/G5/+9KcwzRk6nReJ4w7Dr8c+YI9+WqP7fGxsMqiEBET4SNJ4bDEgABqYmITYBLTxEQxJrgGQ\nQqAjUYlIU6WJjoJOTIEmAyJqGATU0XEI6NMlg0eCMdYYOrl26JAnoodFQAIFW0hcy2DvzAJOq04c\neghg5+6dBEKSrDVoBH2abkDGcynk8nihz6adZld5G2e6HnPzs5imiZSS9fUVrl19hq21BEtLZcbG\nx1nxfVAU+r0ebrNJKZnkquOQTSTwVRXb93nhuee4/01vekXz+qr+xnjooTfx0EP/+I1JKXn00T+n\nWk0xPX0E3w+IY5/f+cQfsa38Gc6e3WDvoRKpVP76FVO5vJNnnjnD8ePHSKfTvPe9b+PjH/8Nrl4N\nMM0smdQ8y1ee5TOf/r/ohxaOWyMMJ2kSEysrpKWLwGKNNoINlDhDQJ9sNGCfkUZoBrbvcTl2Rp3E\nLjY9Igr0RhXtBml88ihsEVMgpkPMRTTWkECbBBo3AVUiJDEH2KRJBxWNLWbwSaGjISkQUiFmi2VC\ntqGhjK60VEwCNHqYRNyHg1BDfGlQo09F2Ows7KQTBgy8TfC6KHqalDmD4uusxn0GskcY+IT5W/H1\nDGOGRj+uklFt4qRGs91my/cpl8vcZBgMgoB+EPCG++9/1Z24flScODFM6b3nnhs9kh8dd90F/+W/\n3OhR/ORQLpd574c+xP/3G7+B1DSO7tkD3S5/8Fu/xds/+EFmZmb4vd/7E9rtErff9/N0zj5Hb6OG\njEC1yjT9LVKJBSRZ/KBNY9AlIwR9v4MrA66FAxJoaAgiukgEAxbpU0CjjsslkuSIaOMTsYpKzCwx\nDSJMFAT7FZvLcZsKEh0LcEZcAp+YGZo4wDp5YnYjqRIRkWGLDA4xEcHIWXmYidPBIkEEqNgopNCx\nkNRdH0XJcjkaJxx0iUKfopLgWHE77X6Xmi9Z9C7ywLv/HcFA4W++/HncbgXVtpA+uNLAkApuv4Np\nWqhql32HDnDrm99MZvduds/Osnv3bnRdv7GT/j04f/48f/iHj+H7KZq1dRqbVymULD72v/8q09NF\ngqCJouTI5XJkMrMoisezz57mllsmsKwsly6tMTGxH9NM0GicwzBSPP30V5ma8uh0Ful0qmjaDLAE\nXGbYmhHAwqjUHPq9WChExDQI0MjhsQsoolAlpoVLhgo201hIegwt/Mdo08BkgpgNIgxiBEt0gRoJ\nGmjAfiRtYtIwYoDATcSYI2cpQZ4mARvYJGkziaCFZMKy0KMOlhwQGhoyn6UZBVy+dJWUVGml8mgh\nbMWCSr1Gw1Rxc0Um5/dyqxGxvPwEllXm6uVLdNZf5k23bCM4c4YvnjjBtsOHGWSzkE6zvrxMQkoq\ngwFNTWMhmeRSGPJT8/OsXLoEr8di5PthdXWVlZUenmfx/PNPEIaSxsYJpmnRn7KYcFXWn/4rGtsP\nsP/W4wghUBSVMPwHw5yTJ8+yfftd3HnnAs9++9uEWwpKcjft3gUmadGNq2jmTgzFRNUzDIIuNa+K\nZh9gMieQziUmuk1ysU0Yq4QyYsy06bkubbosozFBnwSLIwqaNiK2lVBoE3CNoensOguYpMmwjhxJ\nBYfh0R5ZQNIhYA6FFAYWIRF9HAxMfJK0cKkQsIWCgkGPJBE9JAU8VCXGiwe4MkBTFDQRoSYMTCek\n53t45gSeDahZ2qGPH6SoOD3M1BQ3H3of1Y3n0PQauIKNQcjsRJ6f//CHuXDyJHEQsNHtotfrTO7b\nx9FjxwBoNps0m02y2exrzlvk937vteEt8k/h8GE4fRr6ffgeE9PXDZ76u79jQVFYuPVWVqtVXM+j\nJCVf+bM/4+iDD7K5Kclm84i5m1jbXOPy0jJ+twaGh6tMoise/UEVLRZ0KdCTAWFUQ1MlbXbxEg1K\n+BioeOhsYBExhY5OgEWdHlnW0dCYxqDBGhUc0hTp4vJCPMCnjxgVEwU8QCMiR8hFYjocpMduhuFn\nHk1qpElRwEEnIsRggwwWAwwC0vSosp0EEQX6hDiiTULmmRUaV8IiLlOMi1VyhGy2+2i6SS5RYkHz\n2Vzf5Lbb7+bgHe8kii4yUcrxjSdeYHzibpxWmzDso+sNMok2t7/5/XzwIx/5vuZzNxphGPK5z30N\nXZ9l6dRXmYgjZhNZNq6t8d/+z19j3VOZnDxGobD9+jFxHLG4uMhgsMhLLzlImULXdRqNa1iWg5Rp\ner08V640gCmEKCKlTzK5HcdpIOUY0ENQJybEYIBBH4cNmkSEFBBMMAw3tNDIMkyWabGFgWSVPBGQ\no0uTNjlylIjo0eEUKn2SSALa17NnOsA2hvbkdSIiukxgEaCPdtMlOUyq9IdEZ6HgqpJACJKJBIbr\nsplMUk6ncZw+qmFT9QZYdpo40ljzJF7cQzFMbrrjrdxx9CFWV5/hl3/5HVy5cgWl9hwP3v1mEqN2\nzXwc88yzz3Lsve/lbKnEU1euUFteZnZsjNlikUu+z+HDh/GDgHSh8Irn9zVZjPR6PVZW6tTrIbnc\nNrqdNcZ8H1vaeD2XsVwOKzHGhWsv05zfS6FQplJZ4siRPdef48UXzzM+fpRqtYZXrZIzDEwtSy63\nwN6SRrddo6Z2EIyjqwaqaeJo2wniFANf4gdF0rKDI2JCICkEXuCSQZCmQx9YwkJjkgAFkwEmkpDT\ngIqFBzRQGEMnRMWiADTpo5MlYhiJpVBE5ykMeiObNY0xUkSkcPFQaZNngw7nsJkGIkxCQtrkaeMi\n0WWMiqQoDJxowOMrL5JXFJQIrnXr9M0D2IkpdBucqIowBoyPTwFNjh47ztzcFCsr57j61F9RnCky\nPT1NoVDg2TNnGMtmedcv/zI7d+4kjmO++Od/ztUXXiClKPTimNmDB3nbu951vQ/5aka7PbRVP3Pm\nRo/klcG24cABeO45uPfeGz2aHz+klJx7/nn2p1J88etfx3QcLCHoSElF15GpPFfOniQVPI3wXBoX\nTzItBdKcxLINNhyHVb+F45vklR2YmgKKg6tGDEILRJq2GKMVr6KhA/NEDFBoEXMNkzWmcJhEIUKn\nQReFKil8JiiwHZuzeGgsYDKGSRuDPjZLBFyjAowBeZSRa6sgS4jDGg0a9Ehh4TJNE9hGTB8fgywe\neWIsfBQEUSwxFIs+GgmpImMfW5ooqo0nPYSqkUumEGqI06xgGDoQY1lpjh3/GQwrxVNPPYPUVCK9\nS2E2wcf+t/+Dt73tba/q9urm5iauq1FZPsWcUMhn8jiOQylVottYorJVpbRw7LuOURQVRcnywAOH\neeyxb7O+vs7Fiz62nWJ8vMzGxllmZ4/Sbp9ieVkjjhMMBhIhEkjZANbQ6KGwxDDcUNBDAOOEzAEh\nkhaMUtlDasARYIkYh00UKlwDemjchE4BiPBpMk6bBVwGpOkh2YGCNfJZnQB8ho2hYfZRgIGBgwQE\nuqogoz5NJLfbBpO2hWuaXHYcrLk5/EqFpWoVp9OhFypYqRyTQcxWu4OUeTTyrPV9lJfOYpl5Dt6c\nZG5ujkvnznHz5OT1QgRAVRQmTJPa5ibv+8Vf5OF3vpP/97/+V1KtFlPFIvOTk+iaxvNrazxy5Aiv\nFK/elffPIJ1Os7R0ienpR1BVDbe9yqSRRA0DXLfBvfce5sSJC5j9iKXFM/R6VQoFj6NHH6bRaGCa\nJrquEccR7VYTW1UJ/QBQEcTYdpKpwhSVXhPbKKNrIZg6bgeUsEFaJtD1HLpVJBV06QZNDJEkBNqi\nw5QMiQlYBTqUMLHIERDSwUchZpOdSGJUzhMSMiBJDhWXkCZdPPqYCHwEWxQpENBBJ4GBgYeDjmQo\n/jJxEYRsjER/FkKEjMkG05pgm6KyFASUibEJWCTipjjGVtN0VQ0tSqBIFz/WyOay7Dh4iK3KEyws\nZHjggXuuQiResAAAIABJREFUm5Xt2XMYp1enqm7w1MoKoZRse+ABPvL2t19vzTz+1a9SO3GCu+fn\nr/sSnDl9mq/bNg+/4x03arn80Pj0p+HBB+E1nOt3nTfyeixGhBAgBE8+/zyzYcj4d+y6Pb64yNN/\n+zUyjYD98/t5efkct1kJGo0lOhEkRZpiELPkBShMEourhEqEqijkU3tp9Nr4gYKISghSRKwgh/oz\nYprY1MjSZ/+IKt4hokuLLCEzDPleF+gyYJYyBQI8kugkKVAjAK6ywFCFZxCTIqaCQMUiRQobjSQW\n0KVMRIVVPIKRHVqPLhUKCCIG6KpGP47pSYUwTqPi41IlGetkkhkkXbzIIzIEY5PTFItFXHeJRMKk\n3W5z5Mhb2LPnEKdPP8bP/uz9PPTQQ68JU0JFUQhDH6e6ypRmc/bsRaJIRcqIINgiayvU65dIpQoo\nypBb2Os1SSZDDh06xOrqJidPutx22xEMI0m9fpVKpYmUm4ShpFAoIcQEtVoFGEdGGYx4kaLSpBAr\naDEsEyPZSURMjTaQQFUXgCWiqEJECnBR8FExSdPFJoVHDZ8AnzQuASU6HGCAgsIGIW3AJEYHSgz3\nzMsMWSYdIEWTHhEpEkgkHdlj2oZ5I8VGFLEZS6JBTF/6yGCDUrvB4elp9EKBrWaXp50WL7oRiSAk\nFj1aIk06exfddo/Lp/+Cj/7Kb/3QZmW5XI5f+c//mb/44z+mV61yoV6nr6oce/e72bZt2yue3xtW\njAghHgJ+E6hJKX+kDr1t20xMpGm1LpLL7UAoGoNBh7Thk83mGS+VuO++DN88cRJ1KuLBt+xDVVU+\n+ck/pNeTSBkgZY92+wyWVaIWx2QSFoNglVTSp5Qb41q2xHRKoed3GMsmObdUJQo9imLYMAl8HxcT\nTemSFE2ENqDuu5QEZIhoyjQp0qPU3kl0THQmSNDHoMUOFK7ioNOjTUyODhYaOVwCOlQBlQQJDEyK\n1FkmjQY4lJHECDooTGJRoYdglpgNptAZ17oUcikq/T6eYZDudChoGhuKgjqAXYrBQNHxlSQytlFj\nn02tT6a0HT9oksslGB9Psr5+nsnJXSiKyubmIrl8xD13vglF09h/8OB3hWcFQcCZZ57hzpmZ64ta\nCMHemRm+/dxz3PemN71qt38B4hg+8Qn4/d+/0SP5l+Guu+BTn7rRo/jJYXrXLi49/ji3fcfacwYD\nJnI51jfX2ZGfZuD1aTYrTPVbzCVtrvXa6PgY0iYhBL42TsFMMJZNs96tY5vT9IMYqXTAh1huJ4hc\nJCGCFDZlApZIsIiJiYNPnT4TwB6GgfOSmMsEvEQLwTjmdYGmwCZDF40ZQlzgBQwMMqiouHgYhHQJ\nmEWSxEUVCiU5QLCEZAWJwEAlgaBAyErk0cBkS01jq0V0BareNcy4iuknSGVtBqLC8iBmu1Pjz/7o\nNzDCGmrb4ut/sYSVK7H34Dz/4T+8jyNH3nBjJvIVoFwuk89rnHd7XK01sMwChqFSrS6haQkq9Sp2\nao1m8xKKYiNlhKK0ue++fUxMTLCx0aFcTuG6TaRUURQNKV2azXV03aTdXgEy+L7EMDxyuTHc1mnK\nukDxAX2MNBoyGqMtXDQRkUiMI0Qep3t5REwdZrmr+ORpM0cKgYWBRQ6fS6xjoTCPhoVCl5AaPg4K\nF4iYAwyGbJU0wwgRGxgQk6SFTRsHha0YVGExME1EILHsMuXMOF3X5fz6BbbCJLJlYmohGSGwem22\nywy2PknOtOnELoveWWZnbuWm2TS9bheAXXv38sVvfpP5OEYd9aqjOGbT8ziyd+/1uRgfH+dDH/0o\nq6urRFHE5OTkv3j3+0YH5d0CfP1HPTCVSrF//000myZLSycJlTodvcO+uZswtB66YRALQX7XAh/8\n6P9Cu93mk5/8AuPjtzA7myGOI5aXz1GpPEexeBObbgNvEGAmVpgwbc6eeYmVfo8WGVJ2m2JBoG9l\nMZwacVzHixMINAahQdMK6QiNbuiyTYObTZMVz0MLLCSSIoI6bVxMTExiuqQZUMGngkOZARXKLNLH\nJCJWBF6skcEnxiMgS4NFIrKsY+KPIqRVdAwMksTsQuMcLaQQzFkppJ6gEzaQlsWLvo+tKHQTCRp9\nlzmhoqBiyBhNU0gZGbx+lW7rOc6ePo+pxlgqeLWAg7c36bQvUBgrEjqblIkZnDqFF4Z8/plneON7\n3sOBgwcBGAwGKFGE/j3bvJqqoo0efzUXI1/5CuRyQ3nsaxl33QW/9EvD4uq1yHv5Qbjtjjv4xqOP\nstxokNQ0vCiiryjsu/VWzj7xBAcOLHDy5Dm6nQqh12MskWAqXcTFIhEVWO1UyJQmyVp5zDgg6LXo\nOB3Q+mTtJJ1WBz8IAAeLARbTSDT6CDQKDHAQgAscIkZjWIwkUJgk5Bo9anQZw0CijKLvfCwitlA4\nSxaP7agY6IQj2e8WO3DJ0GMSj7SUnBq9xjgxNwuBiqSiqCzGkpqUrIoBYRzh+VtAiKMmqWfT9IIN\ntpXKNDo9ZjQF88LX0Ad9yjt3cv+xu0kmElze2KB4U+k1VYjAcGfk/e9/hG/97ddZba4xnVWp19dR\nVYFqTyC1Iorik0qtMD6+gKpG7Nw5x/vf/y4cx0HT0uzYMclf//WXaTTaaBpYVowQZfr9NOXyIba2\nzhHHAUJkCKIzmMmQji+Z1AtE0iMI+8TCIdAsdDR8v0scd4EtVHwENWw0InzG8dFwYRhfh0KfBRRa\nI8lCm5A2CXzShHi0aeASYzLcQbvGcGdEMmzXmCM9V0KopKSgO/CoeB7jRpqc5lCrLXGq2yOKpila\nZUQgsc0ELweXGadGJIc7gcmETtFKIwYd6q0KCv/A85ibm2PPsWM888QTTIyKi03PY++xY9/lIbK1\ntcU3H3uM5YsX0XSdg0eOcPfx4/+iguRGBuW1gFfkY28YBg88cJgvf/klHnjgbei6wfmXn+Lk81/h\nzt1TnF1Zoamq3P/ud5PP5/nSl75KOr2DZHKYq6AoKtu2HUDKNm996yHuuGOKk9/+O0x3Jy9++ylq\nToCaznFTRmLqBeqxIDc2QaMRI6JrSPqARl4ZMGUotJNFcp7HrONgC0FCCPoiJJYJqtTxyeEMTycY\nVDFo0EFhGpVpFM5TZZU820o7SRoCVTTpNNosuQYd6QEGJgY9GsxjIxDU0AkxUXGYwEBlQFvqtOIu\nc3qJajxgdt822ktLyF6POyYmOLe8Sj0akFGG3H7NShMJyVakgjLHTeWD+J0u/WCZQrBBqllj++Q4\n6bkc4ZU6h7b/AzFsbjDga5/9LAs7dmDbNqlUCjOXo9XrkfsOT4Ke6yJs+4ZlWvyw+O3fho9+9LWd\negtDw7Px8SGRdZQp+brCwsICe+68k7LrEvb75JNJJicnuby6St1xuHDiBGO6Tt4StH2F7YUs0jDA\nU/BJUbQTuOmIZDpHs9HDVUDVNygUBJZZoN9dwXdXEGwQY+NSQQFUPNqY1HHIEiORmKgMiEaPS3QE\nCTyq+AwwSKCxiYdHhSwabSaJGKfITnwCPOqo9EgyhuQqJcMmiAI6sU+AQl2mkWQ4icSkx5T0MGRE\nAhiXAwytwmK4RaiMoWkLSDPiwZ+7F81rkbh0icOzs5w4c4apQo5Kvc63n3qK9zzyCHfl83zryhXa\n7fYNd1XdGLk5ZzIZZr5jV/X7YXp6ml/+X/8tv/3//C4btU0UQ4V0iU0rw1T5FiYmAsbGYt7znuOU\ny2VqtTqPPvp5NjdrfO1rj7O4GGAYU2SzB1GUBO32ORznGWx7H6a5HcsK6fcrgIrn9Zkay9HtGjSd\nPgU9QugRA8VFF3ncvouMXRK2hRoKJlyHTQI8ioToVOkhaZGnj8UEfcCnxYCYAQpbjJOlyAALRQmp\nxQla1Mhhj/ykHLI4ZID96OiqhhtJdEXjsozYiiMOmxp9YwwjWcBwOwykjZ6YJZfK0+5vYgYRMsqx\nRZKcDql0Etu2kXGM4g3Ycs5z6eoYLz3/PDt37aJYLPLGt7yF3fv3c+ncOYQQHNmzh9nZ2etz02g0\n+NNPfpI5ReH47Cx+GHLxiSf43MYG7/3gB1+f2TT/HO6++yiKovCNbzyP50kmJjUe/vWPUSyOoaoq\nCwsL178ANzcbpNP/mAigaWnGxsa4++67+ZmfeRe/99//O51mB+dqjUrLxhnkySZVFjdfIm6dZr9S\nIhQhUVxFU4sYcTCkldpJ9k9PU7lyZRgcFEXYUZ+rkU6faSLmEWSJcBhwkU0C5inSJsahS0gf8Dhf\nfZmCHjFtmdSFoCuLROwGTAYkifkGm1xDZw6bCQQxkGUdly6baMxyKfJYaa0TGxahO85y0ET028iV\nFTJC0EbBjHzaqMRBkqq3QTPMMD+7QK/bQhcOu0oFLE2l32wxnUzypS98gZ/7Hq2rbVmkw5Br166x\nb98+hBAce8tb+JtHH2VXEDCWydDq9Thfr3P8ve+97g/znXAch263SyaTwb6B8o9z5+DUKfjiF2/Y\nEH6seOAB+OpXX5/FiK7rvPGnf5qv/fEfM1cqkbFtFisVHj9zhvsOHaKzusqYrlPO5/nrlRonnQ12\nTRSRqqCvZSlN3Uxm+zQvvvg0rbZPzBoiNnDdHSQSHgP/KpIEJjswmKFPiI+DAdSpkGZoCBigsUFE\nWijoUjIYUV27QJ8KESZtYlRcdEpUSJAiDaRGcWuCkCwxAWkEQrVJW1m8IGLNc+kYM6S1eXzXIYhj\nmtJjQ15hP4IxTCpEyNBlt1Doixr1sEmvpfKGN/wCf/foo9w+MYEEZBRhWRb5MKRVr7NRr7OtXMZQ\nFFzXveHFyCc+8VmESCOlw+xsive//6d/YI7VwYMH+Km77+ellyp4Ax0zmWcmNUGrdZlyeQ5FCbEs\ni6tXr/HlL5+iVNpDrSa5fDmF67bI5Qq02018/wJxDFKmyedLNJtX0PUypdJBms02Iu5jKBHluTQ9\nR0V4A1r9HjoxbWcFgUk61cVKRPjtBlvoRMwRYY0a6SZ9lplggEMNgzwONj2SbDFAo0QbCw8TK/bQ\nsYnYhzJqEPYJqLHKFA0UIrZFCqqqs4WkpmrMpSxUVcXzfHwlhV0uMW/Datui7jlM5fOYCRMlDKkP\ndO7cXqLR7bHccYjdgKWoQTpr8fZjxyi2Wvzp7/4u//Y//kds22Zubo65ubl/8vM/8cwzjEcRs+Vh\nkrup6xycn+fpS5dYWVn5vsf9IPzEixEhxATwJ99z96aU8ud/0LG//uu/fv3348ePc/z48eu3FUXh\n7ruPcuTIYVzXxbbtf/ILD2BmZoLFxTql0sx33R/HXfL5PDA0S1peXOTc0oDlLRtVvQlV0Wn2Wthu\nn0ykMplP4idydLsVNqINXMPC1BPcfettGB50Wm1ObW6QBlAjOpGgTx6LBOCNiKYLhPTwyaCRZmuk\nqhkDQhy0OKDqDyCUKNjYIqAvHWJq6Cj0yJMjj05IjpgO0CWBR5IMLilN0I4SyMQsh9/089wWRnz5\nzz7N4uAis6KP48WciCSxopFQWnTtJKnkBHvnElSWq8znsyQNg6YbMfDqQ1l0FOENBv/oc1W+Jzxr\n3/79mB/6EE89/jjn19Yolsu85R3vYPcoSnZzc5Nms4lt25w+fY5nnz3PcAPS5a67bv5By+Enhk98\nAj78YXgNCH5+KDz44NC07T/9pxs9kp8MDhw8SC6f5+Szz7JZq6GMj3Or63Js927WFhb4ypNPcbKR\nRB87SiaRIDE3TrO7RBT0cHXJoLLB+PhdDAbPMzn5NsbHt3HlyjlWFk+gq5MookFCTiBRSQFdBApj\nuKyxKGziyUl61cucCvsc1jTiIKCOwWWgQgLIkGWATohPkh55YvL0CBB0CMiPjK40Bqis4zIXOWw5\nDt1YUKWAaS0wVt7HpWuX2Bg0MYAk49haCy90KRCTI8STJtVIRSdL3434+Md+jXHdJy6OcdPUFE4Y\ncmZ5HSEFFU3Q6XTo53KEhvGqkN3Pzx+9/vva2iW++MXHeP/7f+YHHDPP/v1lTp26imntxLIytFqL\nZDIe09MLbGycJY5jHn/8BHNzb6DVanPtWgPD2IaqxnjeFaQsEQQOk5M34TgauVyRIMgjhE+5nEeR\nTaQXUEhPQXCeyYTghbUKIp4iY6m4YR1dlURBknY4QFBEVUqocQ6LkBwRAZKYMh5tCnRZo0cFHR8H\nD4sEZQJUNCLS+PgUsEmTo0GEZBMdj0nW6dMCLhCTjQJ83SSbz3FbwcZQFBQvxhpLMTO/g8snz1Kc\n3EW7uYGpSBqDAXFmGFbQCCMSts1Kd4O23wRD45ZdeymXSkwXi3SWlzl75gx3HD78z37+G9euMf1P\nFLFpoF6vv3qLESnlFnDfKzn2O4uR7wdN075vJX327FmefvxxFi9d4qVLLfYcfJCdO/cQhgFra+fZ\nu3eS8qi6A7i4WOHiSp8g3kZSzeC4IX23zkyokk9mGPhrpBJFLMMgKSfYsrPESYVqO8vTLy/i9ecJ\nQ8GE3Sb2u7R8CxUb8BBCoEtJiE1Mmjo60EFhHOiRZJ0pbGyZouMPuCJ9dDQyWopmsEUXSZIJdDQ8\nSmziUqdPlhiQWCSYEQp5TTChFPENj3rtGqZR4PDe21mr5lhc/AZG9ibGzBk8T6Gd0BgvBvSdJLqu\nUsqmkXHM5XqbtfYmqVSbOxtNMuUym90us5OT1z+rge/TVpTvIrHC0KZ/x/dE3Xqexxc/8xk2zp0j\nLQTPX7nGSjvH/W9+D7adJIpCvvGNUz/kqvjxotUaZtCcPXtDXv4ngvvvhw984PXtNzIzM8PMzPDi\n4syZMzx/4QIA47kcsZrjnkP34AcR52pVZGaMhJFmotjm6N0H+NM//SZjYyUsSyMcmKxcXcYy0qS0\niLHSHi5efZFELJC4SCkYoBDSRhNZkmYBI59HeA36ffi61ydCIDHQSZDHIT+6NIhHDZ0t6mxiYlDE\nZAWFJn1y+Ag8IhQcYiVPWwoi4TNA0g8U/LaD40lisqQQuMScDqtkUChhkEDwImkEB7AVhYS8QKFr\nkTElWbvPmTMv05Q24yQx4ogGGi+ceJlN3+fBf/NvXnWGZlNTOzl37lt0u91/dnfk7JkzNFevkOUC\n5y49SXnhp9i37zYWFu7GdXvYtkc6nSaOE+i6wdZWFU3LYBgN4rhAHG8ipU0y+VOoaoBlFUgmu1Qq\nAapqkEyGaOoKP7XvJtxWRLWdZPfCJC9cWEGGTRx1nqw1QxxbdLwaTnyKhDmJoqTw4pAUEh0DZZRW\ncxWDDt6oGAWfcTRSeEgCfHIj0wdBFjnKLJKE+CiUyGJSZlxotGWfNUKmEhE/f2gfz1+8yN5ikfmi\nRV9zWbryHH7Ywhqb4J77HuTCqdMMGlV0WiTSk7zcMwlaK5Riwbw9Qz5ncYtt85XHHuPQHXeQtm2q\n6+s/cJ5ypRKdc+e+qx0PQ47TvyQ24EaqaW4H/m/ggBDiq8DbpZTej+v5Tzz3HE9+9rPsLZW4Zf9+\nFqwr/OWLn6HT2c/ERJGjR/fxxjcev/73UkqqTRepFlFRUBUNVVHp9TViqZJKW2TNCENXESJJ2++w\n4ldA7uL8xQ2CcA6CASkrw5a/QuheQAgHTcZoI6+PFgmM0bLTGScgQZ9rZGiRRMPDx49dBArjSDps\n4ofTRLjozKGwxt9fvBuM4SEwRtdtGRGRtDU0QxINVIz2gMc/9yjZ+TewdyxPLpVnIzHHrskHEIpK\ny+1SHp8Dpcea9yT1oICm65y8sokfG0gGzBcO88WnN7jjjbtJb5vlxJUrTCSTDHyfzSji2Dvf+UMt\nvicefxzn3DmOzs0RhCFPnVxijBznz5zl0OHDqKrG9PT+H9fU/0j49KfhoYdgFGj6ukAmA4cOwTe/\nOXxvr3eMj4/TjmOklHT7faLIQNcMmr0me26+mf0jkvXy8hM88sjDLC93qFZU3GaPcjZC03W2GlV6\nPQdL64IS0Y8CDDRUJDERCilUWoggprt2lQUl4tbJcVqVFi/3HHpolLDYpEOBGJ2AYZmSZpIkHTYJ\nUSmRQGGTkA0cfCQKCaZZQ2OLCoZeINJ6+LFko7qJKkooUqLjEbGFJ2boyBXKGLQIiJkkpaTwWWEa\nA8vIEioWF2urmH5ITRkwSOSpi5hEJsdz61tsS2q8odlkMBhgWdYNnr1/gBACIXQ8z/u+xciT3/oW\nf/Kb/w29HbDLymEk26yuPEN/foarV5+h01nh6NHb2djYIAwdpJSoqoqqKpRKeVZWqui6ghAaENPr\nNdi+fZyHH34rn/nMn9HprFIuT5KxDxJWWriDkFiEnFhaxQcCUSAvSpi6StsJUOIUsSwRyhAvrqKQ\nJyZGkhyVqFCmjIHJGhWybGcHE7QIaNInIk9AlwQQUidDggBBFUiiozBMNtOUHHNGEW+wwmZvwF9f\nucL2nTs5vbaG1W4zZtus1+vouk4hVeHcS18gL3T27E6xY3IPL152afZNRC7NjmSefrUCccDLF69i\nGwpP/u3fImybe36IoLtDR47w2RdeoOC6pEaihOVKBcbGrmfHvRLcSALrCeCV+cb+AARBwJOPPcZt\n09PYo3+2W3buZG5ignO+z0c+9u//ka7ecRwymSwzC1NcvvgykVdAoGOkZ+lGL2NnNGYKY4yn08Rx\nzLdWajR7GUr6HAkmCIMtGv2TtJUxpChDbKDKc8RcRbATl8SItrSORjC6rRCQxWQVjcQoUElHoT+i\nMFXwZAtBhIaCjySLIGADwTQqGh2GHq2zaYtSNofXaRDikk8kGUQenbVNntnaYr4cM16YwfUcTDNJ\nN46ZzpXwPIP5+THuvHOBP/j9L7Hp+ShSJ5ta4GpTsO/AHvwwyYOPPMLW1hZLly6RTia59+DB79pV\n+n4Iw5CzzzzDG6amEEIw8P3/n7z3jpLsrO+8PzfXrZyrOufu6cmjGc1olCVLSEIJYQRIYGzABo7h\nGHzswwafPcs67O5Ze/3ar9nX3jW28S4G2TIgRBCSXoRymjw9oSd093QOVV256lbduH/0MDBWQEia\nGZA/f3XfvuGpfm4993ef5/f7fnE9hbZYnNMLC+cGRFW9+IOi46wt0fzDP1z0S19wbrkFHn30X08w\n0rd9O/v27KE7FsN1W+TKRaqSxOjZwbHVMvD5JFKpFO3tIfY8/TLxgIAouuTLDfLlBqIcZWl1HJ/g\nwxILCG4bVSxMDOJig3ZRBamGZlSQPIkps0RfIkuifpKq53ASHyniuPhoUUVCAgJnv9kNVE4hIRNB\nQaXGPAFkrsCPgqeFENRuGlYLvzxJs3Yc3A5UKUTDNWmyTBQL0fNTRqR8VhTRw4/gAdQQPQlBUkEQ\n8dQkdTGI6dSYCvSgq3Vu7B0iHYxg2YvMPPkkizMz3PfRj/7cmFk2GlUCAc4tnf9LLMviK//fXxGu\nqWSyQ4iCSCTcRWjuOMvzLyJ0bSGTuYz5+QCnTx9lcXEKUYzT3t7N+PgcPT1ZisUjeJ5Eo1Gm2Zxj\neHiIO++8hUAgyOjoCFNTdZLJXvKUeOnAMYTmIrYrYjpxXDeEQ4SSKeFi4rguBg0E4nieD1kFy2xQ\nc+vIiLhUkJlHQsTBxiJKlCAiIKMRwMZgmgYGKgZBGsh04BKgiYeCg0CeED5k16VluYTEIGVdo9BK\nEpF1IuvXo1UqFJeXuXb9enpjMZ6fmuJMvc5H7r2XbDrN6fl5JFEkqHoYuRqRjm7mp45SX10ipJr0\nt6eZsSx6IxFWx8eZnp5+xYz3T9LZ2cnN99/PDx9+GDGfx/Y8Il1d3HvvvW9JNO8XNoH19SiXy0it\n1rlA5EfEQiGcmRls235FMKIoCn19XZhmjcHhIXK5FVw3gmmWaYZEoiNtJFIJ5qenmSoUmFKCpLJb\nEQSJVukIemWGsJvGL0ao2y1aYgTV6UPiFE2KNPEjIOPHxSKDI8hY3pqXjEUTB406OrWzOn8JdCRq\neMI+bE9FPRtlN5BxKVOhjH22dMyHiVjXKVo1ghLIXp1mM0Fd1PApGjOlSeIJiOqD1ColZusVYp3D\nNJtVqtVJPvKRW9m+fTN/93ffoqPzOlQ1gW23kCSLUgVaLY1SqcSGDRvYsOFnm8GwLAvPtlHPTgn7\nfT40xcZyLCRBOCfRbxi1N9/hb5JHHoFEAnb9YlU4viFuuQU+9KFL3YqLx+3veQ972ts58OyzEBUp\ntAx27b6FYDCI49jMzx/h9tu3I4oiV1yxlR9+42G0kMbJ2Zco1fxEAxHwp5hbmiIa8KhVC5TdNYkr\nCZWU4Cck+Vl0qqRdiPuDOG6VQnWViiBiewkkwrRo4kPAQcOkgEPrrGVmgw00ieORESVedkVUKYMn\nlKk7NrLXwC/FaTVOIzbniAsCAhM03SUQbfCCSF4ICwELldOih89t4VHEFZOYroQhemA6eHKT7sEB\nDEPHwSWS3EZj7iC4ArZlEY+F2dTTw8uTk0xNTb1iWfVisrw8TSSSpFYrUatNcf/9N71m7t/c3Bz5\nmRWGerYjCmsBlCTJtGX6eWLfM9y7/VdYXV1hZmaWaDRKINCL45yhVKqQydQ5der77NjRRTicJJc7\nhSRlGRnZjm3XmJ1dIBotEQrZPPbYV3FdhXJrgVp1FVXZTsDfhS9kg2HgOHWqdhPXW1mrSpRVBKGF\nLO/Cdc/gmIfxaNKBdTavz2WZICIyKgIWFjY+RAJIhLHJYWCTAmCGeSQMPHy4tJFCRcHxQPCgLrj0\ndw5w5cYb2De3j01tYRq1GjeOjp7TBemMRsnn8yxMTdGWyRAJBHDdOYJ6G3NGnaXJQ6Qlhxouumtx\nZH4et7eXK667jlKzyZEDB143GAHYsHEjI+vWkcvlUBSFZDL5lu+Fd2Qw4vf7MQUB5yeEW2Atz0FQ\n1VethdY0jWuv3Y7rTlKpGPh8FWy7SDCoctttn+amm65l/PBhWidO0JycpHJshYYpcHTyOKnG8pqP\no5ACyyQEeEIFBBmfl2KLUqdgVZkjRkiMkfPAIYeFh8oiFRSmsBGJIBPEpUGBBTwUdDFPxqmcVehr\no4CJSLDnAAAgAElEQVRDk24cXMJUUKijSQVygkXWKYPl4RNkVn1VJJ+fkL9EVhboXreZTCaDbXfg\n98ep1QxE0cHvT3Hnnbfy3//7/8CyFAShCMiEQllcFxYXz7C62nrTGiG6rhNtayNXKpGKRpEliZ2j\nnTz68nFMXwd+v596vczS0sXXYH+nlPO+Gtu2Qa0G4+Owbt1P3/8XHUmSuGL3bq7YvZuPmybf+c6j\n7N8/RrXqw/MMbrhhM1deuSZV3d7ezlU71tGp6zz83MucnKsR0BQKtTzdfTeiqDqV6SPojWUkx6WJ\niiho5PFQXQWXFqZZJawKiI5EydVQiCEh4+GjQYW1NEYZyFPAogcPVRAICCJ5UWTaC+CQpuUmMD0J\noVWhZk/i2Q4BOvH7fNTNCglKbBZlxl2bIiZNr4Em25hKkqIlgLNChQCulMEvLqF6VVTJoLd3mH3H\nj1GVM/hMh6AaI18r4xfqDAysLVtFRZGVpaVLGoysWycxOztOT0+cq69+z+s+BD3PwxQkziqin6Ns\n1DFaKi+//BKy3ImmtVEqlbGsOS6/PM1v/dZ91Go1BEFgZSUHQH//x9F1nSNHjjI/v0Iq1c+3vnWK\nRiNGW9sorivh801zOP8U8WQX6fQIhpFkdur7CE6Flr2E6yWxvTTYJQSniGk+juCtWSemkAggIuAR\nR6GEjUMdQXFxbQfBa9HEwSOMQh2FDHOE0CigUSVKCZcALk1cVFwUWl6VitLkmr4t2LaJZSocmZpl\ni08+7zkX8fmwVZX8ygoAqWiU3qzKgVOLVCUPs2XgFyVCfpF0uoOGZaGn0wSCQVquS7VSeUN9J8sy\nbT+RR/hWeccGI0PbtnFs7142dncjnA1Mjs3Ps+Wm1468b7nlRgqFMpOTZTo6dtJqFenq8vMrv/IB\ngsEg4XCYif37uWvzZv5heQ+61snkxEv47SqG4CIIFpZg4HkGOC0sxUfFbpGjgSSIKN4yq4JIyEvh\nkx1WnBwt10bAR4F2woRxBQHLU9EYoMU0LSrUhXYUL0CRIhZBIshYNLAJUiOG7Qr4/X4a5gRR28QS\nJOxGk1ggyUAsg1ETiUQ6+NVfvYtHH32GfH6aVCpEMOhxzz2/jOu6PPLI8whCB9WqTaNRQJZnSKe3\nYRgFfL4Q7W8hqeL6d7+bh770JZqmSTISIRYM0N4l4sZl5uefIRr1c9991/Hf/tubvsTPzLFja1oc\n99578a55MRFFuPtueOihd7aT76uhqirvfe+d3HRThWq1evYt+cdu0plMhnhfHywvs2VwkIAvSDSU\n4IXj+yjYfswypHxd6IpLSDAZK05StHUEJ0laCDHnrbIeE9u0kYU6qqiw4AokRQ3FTXIGcCihUcfF\nJoZHRBCY8TzOeC66oKJJXWhqG81WC1FI4Ylhms5+IoSQZQWfX8K2HUpOC79bYNXVgBRBKUgy0UXF\nmKfpyrSkLIK8QtRfx5I8ym6BsJ5kvF7GN7qJLi/F7JkFrNoCfTGT7dt3kUqtvYM3XJfgJdb/uffe\nu9/wvu3t7YTaskwVFulPrC37uq7LVH4eR5Lw+0fR9bUlHkUJUirBsWNjpFKpc5/5X8qV79q1Vjky\nPz/P/v0TxOM3IUkqMzP7WV1dxXF8LC7+EFVVUZQI4fggq6uHcc04shzGbjYRhCiu6yIKIhJ1TAwM\n5ugUQ5guWJikxDrzgs2KNA+eH8+2sVEQ0NgsqEgSzDsKmhenjokPsGgwjYOPIi0kDMHH6ODl1Iw6\ne06cBH+C47MmNTlHfzCIfnb2udZs0jMywplqlVypRMDnY6Azxbw5Q93UqJotCs06gi5haxrb+/tZ\nchxKtRpLlQobb3hT9SZvmXdkMAJw87vfzfcsi+cOH8YvCNQ8j5Hdu7n6dUw7/H4/H/vY/Rw6dIiH\nH36McrnByorL17/+He6442aOjo3RWFnhqakpWpUljqwsIVotooqM5RSZs0+QUWPEdY0zpRqCm6ND\nrJMRFKoYBIQWeHOoUgvJ8+jyGkwRwEZGIkqLBC0sErKOhEDJ9pDpolfppeE4GI6NiIuOQQUJER1N\nkFGkNF5zFRudFCbtno4iyUyXpnmyukxy6Cpq1VUeeeAB4pqG59aIRGXu+7VfIxgM8m8//wUaJR2z\nIaCqfhzPo9VSWVp6DE2rc999X3jTQjYAfX19vP83f5OXnnmGI3NzxLu7+fT999Pb24tlWSiK8pbO\n/2Z4p5Xzvhr33AO/93v/+oKRHxEOh19VbM8wDEYvu4wffPvbVG2D6dUl8pbA0NbLefbZZ7GaUST7\nDAGWCRgGo5Qos0LRK6BKfiSnwZxlEBRk8EwKnownWhiiwrJr4+EjgZ8QLikC1CjTxGSLILPXc1j2\nNCJikJyZx3aC+OVJZKlJzVJwaCI4AnKlSY+qUHLCTJnLOHQyEE0S8vmZbTj4tS4CwhlQQwz3p1ld\n3ccV3VFyjTCTlkxOSOLV44BHNOGnQYu7brueznQagJViESMQYGho6IL3g23bnD59mqWlFeLxKMPD\nw28qcdbn8/ErH/8QX/rLr1HIzRJSNEqmgZmMEm/JVCotisUFqtUahmFjGAUSiTzj4+Os+ynTg4uL\ni1iWjqoGOXjg69RXLcJSjIzSS7G1RNM4Rm/vjayspKjXI4RCHfj9QXJLE7SMSWAIGxFZENC8BKro\noIpzJBQRyzTwISAFAiiKi11cQBNlLMFl0VHRvQEkx0LxCjjkSVMjwpqQno8mqwjUEPEkj1pxjj2r\nkGq/glBUYnTdNTz96Dd54OA4dw53U7Nt5GSSVCJBx8AAB/N5tFaLkcsv548++1n+zxe/yPZYDFEU\n2b9nD/byMmFVZaJSYWJpCbWn55yq9utRLBZ57sknOXHgwDkF1iuvueYtJUS/Y4MRTdO45/3vp3jz\nzVQqFWKxGLqus3//AfbtO4bnwfbto2zbtvW8/JFGo8EjjzyHJI2wceOaUNrs7Ax/8zf/SDV/hurx\n42zOZEhGgiwf28OKa1FHJiSZrJMXcUQPn5JGlPL0i2VUUcJAQpZlehyHk6JH2h9AsE1ynozYLCJ4\nNgJ5WthonoYlO3jiMrpbxCZFwamjCTquJCKTIeceRfHCxAU/eNC0LaIU8XAJiBqOZ9GwTARBRVU1\nWrbOnqeeZ/1tO9i5czuCIHBmaYlvfOUruKLI6b3jXNE7xNGpZcqtCi1BJxBOYJoTXHXVRq688srX\n+je/YTo6OnjvBz/4iu2XwqCrVFpz5z127KJf+qJy3XVw+jTMz/9im/+9nRw6dJhvfOMJHCeE53XR\nCAhcdkMnjUaASKST6GGFyaWn6cNgJBDCcBu0AQtNnZJnsOgK6B4k0NHRaXkFMiLkhDPIOAhiiKxb\nIYpJEBk/Kroos+ou0VRkkqJOXoviuQrN+hxpigRsCdHWSOJSFAQCwgAy4FkWgmCi4Mcva7hOi7zh\nIXopQv4MueYkQquGXQ6SUdpICE3i4TBOocbU0lHimTXtHlGssH77Zk5bFouzszieh5JI8Msf/OAF\nr6ap1Wp8+cv/xOKijSxHsO1ThMPP8NGPvrkpyXe965eIREI88shTFAoVtnakGR3t5Q/+4EtMTh6j\nWhWwbdB1j46OBH5/lq985Tt87nPJ181rSCQSaJrAwsIhGrlVuoPrcd0WVamKT2pSyE1zUvoGiuJD\n0xwymSzlpTEGfAVyVnjNpJU1F3cLh6gSp+osM6xCXlA5JYCs++gPhahaNp0olOs1PFqMM4foyaRp\nkqFONy6zrHnSpIE2PAzBpSB5nK5UcGWXYu4IKhEqqzI33vF+9r30IPtlkZ6uLlaBernMxsVFQqJI\nyXEIRSLYtk0L+D/f+Q6jfX0MrVuH1dnJ3kOHaCQSbLjjDnbs3PlTl+RrtRoP/PVfE280uLqtDdtx\nOPXkkyxMT3P/xz72phOi37HByI+IxWLEYjEcx+ErX3mQvXuXsCwNy3I4ePBJdu48wa/92n3nsoDH\nxo5Srwfp7v6xQFo63cPY2CQsTtETDBLVdeZrNa7OJIgtLzNrNRGAq3w+5s1FFuw8eqBFueXguJAM\nh1GsIFXDIRSQmNZihFoeirnCFskhTJ0loUDBFci7NTJug17dR84GlBanzCWc0DpSup+looNpCKQQ\nkbHWhKeFIllPxMCjiUZQ9ai7CrqcJOxVOTM/SVtkkH9+6hQ+ReaGy7aSjkbZu38/puuSjkRIhrM0\nGgaFikm+XsaVRWKxEJ/5zEeYmpqi0WiQSqXo6Oi46LMYbzd/+7fw7nfD27jc+XOJosDtt68t1Xz6\n05e6NW8e13WZmJjg5MlJNE1lw4Z1b2itutFo8O1vf5v9z7+MTxVZt3kzew/O0dt7DZq2NuA2myOs\nrOzjwx++juXlVSQ28tw3jzNQdRgMa5TLAcYLLSaaBqIQouiGKNHGiuAhMEW7LLLJpxOyWrS8Gep2\nkAAmDgoIEoqiIAkKMSFIuL2HhlGlC5lKfYWUmKPD8eOTY+RtgxVMskCJHBF/G5WmQcFaJq7rOIJG\nWrAYrxu4apRWy0BWQPH5kIGKKVCSZEYHBmhJNWRkRq++Ap8vQCAQYWbmMNfduZY3Jssy2Wz2onyP\nH3/8SXI5Hz09I+e25fPzfP3r33tT5xMEgSuu2MWuXTuxLAuAP/7j/8nIyGXMzLjMzxsoSgDHqSPL\nBsPDI8hyB3v2HOC22167eLOzs5OtW7v43sPPobk+LMtgoTKFS5L25AiVVgPTzJNINOnq6qFaWSZh\nrhLS4tRaAuCheiC54Cgqc3aVpCJyShUpyCK93d1MlcucMU0c16HatCh4GiYZNPzIeFSpotEgg4MK\n9EoSDc+jXRSZ8gALqq6N6koEzBY7+npxTYv548fZuHUXn/zk7aiqykNf/jLDkQiyKJKMRGjzPL70\n//wZWC6Xd7ezPp1m8cQJjhw9StfGjQzdfjt3f/CDhMPhN1QNc/jQIQKVCoNnxc1kSXpbEqLf8cHI\nj5iYmOCpp8ZZWvIjij4kyYdpmiws7GHnzi1s3rz2FrG4mEPXo684vtk06ZQVol1dnJmdpVGrgePQ\nFgxStixCioKiKPgNA9dxsFebZG2XsGfSrFqURYWaGMbzdDLdl7E8/gxdZole2aXpiZTtEk00/NTQ\nLVgtzaCLoEt+2j2LGbOIP92P4izgNnKAg+cV0YUScU/AxkPFBlEC0SMaSJGvV3G1GNn0FtqCHTRa\nFb794hjFmsFSwWZiLo8om6QifjS1wvDQAKVyidxqibzc4tY7ruWpp/ZSLsusKaVW2LAhy7333v1z\nJ5j0RvlROe/XvnapW3JxeM971tRYf1GDEcdxePDBhzh0aBldz+A4Nk88cZg779zF7t1XvOZxy8vL\n/NvP/yGLp0okAwmgyr4f/g0EUnR3/3imz+cLIAhparUGt912M1deuZOpIwep7dvHZKHAC0tL6KbJ\nKCotTAqUyQsSVUYRlSCu7wRlp0zdamGrYFFHkCDiediux4xlEhE1JMFkpVqg0DIYjmWZLs+TdDxC\nkobrNUnhguix6oKnFNCT3RRzUwxQxpQEZj2LOVsBSaVuN/GaC6TiATraB9nY2cXi6hjXXz/C4vQ8\nPjWI32ohSQrB4NpYpmkR8vkSO3bsuNBddg7btjlw4CRtbVedtz2Z7GBm5sxbOrcgCKiqyuTkJIah\nsnPnDeRy/4wgtFBVH7Zdp9lcYuPGOzHNJvPzK697PkVR+MQnPsjY3hdZLFdpuSKiHCMRypLOpJGL\ny3Su62HdugShUJXHHv4Bop2jUl7BcGUC2hA+LYZZXUJWJep2kyhNbEuhAew/PUGH6xGXVfKGhYtL\n1VXQULFxkGgioFAiRYV5ZEBxXRygAVTwEZAyqIJNNpKk6cocnphn52gfaddmevIww8O/zdNPP82+\nl45zyt8FqBQrR6k0DJqVGgMBP0tulXhc5aZbbyVXLHLKdYmn03z5z/4Mz/PoW7+e6971rtdV552b\nmCD1KjowUUlieXHxTQcjPx8F5heBI0fGmZysEYn0EI2mCYXiJBK9VCohHn/8qXP7ZbNJms3yK473\nPAs1oLNl+3YGd+1Cy2apKArxnh6uvOEGIl1diJEIq4qCT1UZFUR2KBob9QBbfD5GdR+q2KCqSPj9\nZfyhJiG1SUiGVX+GdGA9cUWlXW6iUSKKQFKwkew8XbpDlCUW5scwGpOEaSIzTYscsufhYGHjskSD\nkOaiqhKm52FKftx4G/FUH4ZpIUsqlTo8O1YnFd1KMDJCJLwFjyjzuQPkK7OIigdanf4BBdO0WVrS\nUZQ20ul+uruvYGysxIsvvnwxu+5t5Xvfg1TqnVnO+2rcdhvs2wdLS5e6JW+OY8eOcfBgjt7enWSz\nvXR0DNLRsZPvfvdFisXiqx7jeR5///cPsjID2/q20ZPppSezHlHowcyvMDM5dt7+iuKjXjcAiEQi\nfOrznye4fj3jhkFQEFivKGiCRBKXQSz6vTyydwRVDNOww7TFY4gBP7uGBhjSbOKuQKcaJStqdHgW\nOSfHtG2xZJkk41kk16ZHVfHJCmHNJRmS0FWbrE8mEBDRNJdaawbPauBTfPjlFlV7BkPWCIeTiNIy\njrBEIDlIIBxkenmcgU6dbDxOJBLAaNZoAH7/jx8Yplkhnb648u+e5+G6LoLwao+Zt2dWxnEcQELX\ng1x11S10dibp7AzT3z9Ib+8IPp+fer1Ie3vqp55reHiY3/l3n6Wt00XUG6RTGTKZDKbdwqbC5s0b\nsW2NWCxMui2KL6AQC0fpCPrXtJ9Ui0gmQUUu0xkss7GjnRU9woobQrV0goqffn+UYclPxRFJoJ71\nO8rTRpU+ysi0WEagDuQ9DweY90REOY4puIS0EJY9g6J4lKoWp86cZrV0iu6Ujud5fPvhH6KI/bTF\nh0hFOshXNEq1bkTLIh5IkEz0UCx65POrDPX1MTM2xvJLL3FVWxvXdnTgnTrFA1/6EvV6/TX/T9Fk\nkuqr2IMYbzEh+l9NMFKplLDtNaOhn0TT/MzOLp77fePG9fh8ZfL5NVncVqvFsWMH8PlakExSMww6\nOjrYdfXV6O0dHMrnScRi3Hj99YzLMg3ANk16FAVLlLEkAQnQLAOfIiAFVNraEvSNbkJJpij5NUwp\nTjqepqezF0d0iKESU5OoUoiMpiJbBVp2Dr9YJugJBNUsjrQenT5aZMkRZJUqiiIzicAZCWbFOlOS\nQnLgl8hk2zHwKNWWcR0BSUyyUCySHRjAF08QD/WTigbZvUGjI1WgZ0Dg1tuv5bkn9pMfn2Tq5Zd5\n7tFHmTh9mra2YZ5//tJIt78d/Kic918Lug533QX/9E+XuiVvjoMHx4lGu85bUlAUFYgzOTl5btvK\nygovvPAiL7zwIidOnGBiYoWUHj2nRwHQme6h2rBYnZ847xqGsUJ//4/9NLZt24YTj+MoCt3hMC1F\nIeK5hNAIoZNFIEaNZnORZtNhxnTIdHezalms01VcSWLOXKbpVZBkE0nyqKlBiqaN0PIzV2hRbRi0\nRA9FElEkiXA4jOZTKIgCDWWQmtEBwhAnhGGWfBm2do4ymIDBXpltGzq4/c6bCATqiPoCgWiJZFhj\nYmGBQCTCXGMeLdWFpq09oFZWZgmFGoyOXtwab0VRWLeuh1xu9rztlUqBWOztmVnt7OxElmu0WgaZ\nTIZoNEyl4lAs5kinM5TLeVx3kcsv3/aGznftdddxxwfuoLPNw7AXKNbOUGvOsn7zANPTC+zZc4jv\nf/8AM4sBliyBrp5NbF+3i6uG1hH35YikDdb1+Pj0Rz6A178VLXs1CWUDQXk9C1YHh1s2juASxaOO\nhY8Gw/gJoBFBZYS1Sk8HeAmYkiQKsoKryEwLIqFID5lgCNGdorS6h/LiM8SNU3hOi/n5eQQxjqP4\n8TyXSqOI64aIBNpYrZvooTW17FAoyczMEktLS9i1GqNdXciShCiK9GazBKtVxg699hi/+bLLWDBN\naoZxbttKsUjjLSZE/6tZphkc7AeewLabyPJa0pZtt3DdFTo6hlhcXKTZbJLNZvn4x+/lW996jH37\nXuL48QlCoQhDQ+tYqOd47NRpwrbN+LFJCq4G3Tv5/vF5Ojvq3PbJT7Lvu9/l1L59+CUJ2RZQZB+u\n6yK1mtgti0zAYYO0xHKwyfGwD12XaBZElho1zpTPoLaqOGjgykiSREAJU7Nz6IqOKEepmkV8Sh+i\noFF28yheiwBB6oTpD0HW7+egaVFRk9S8MNPTOSxLIJTQMAMS83MOYU0itW4dA0ND1Ot1Du/ZS27B\noCZAqLedd73rXXzna18jrQboiq+9TTmuw5kjRwiGgryNqv0XlWPH4MiRd24572tx333w+7//zgrC\nPM87F6A8+eTTPPbYfiRp7e23Wp1ieTlHkPMTFsORMIoKC8vT7NvzIqpPQ5abbN4cZ3Bw8Nx+siyT\nSiQopdNrwlG2jSYIeB4ICFiALPiABVooCAp88oMf5J8efRSjUMI66+YrKRqyoBH1XFxJW7OPlxug\nS6yik3IsZqwmOi7JZIKj5TpFIc5gdgil0UBKaUiui22VqNsm3aE0VU+ja3CQoB4holdx6scJRHRO\njo3RbDYpaxrXfuCX8QQ/MzPPAh59fSnuuuv9b1or6K1w66038Nd//Y/MztYJBhPU62VgmY9+9G5+\n93ff+vl1Xefuu6/nwQefZG7OZHV1lVxuDttewvPS6Hqez33uN86V9v40fD4fH/v0p+lbt44v/vn/\nRhQCjGzaRrlcZ2pqhVBIBkL09W3hpeUF9uUX6QkHcRGoqiE2b7uBxaPfJ1c1cOmgXl7EatpIrohi\nB1iwSkSFNddmEwNVCGFKIj4bRAEaNMgoCqueR9h1KYVC6KrO0YpKtnMzkXSW6uwsXUqUTLTB7aNZ\n8o5Dxbb54z/6Iw48M0az7jKuhBjsGcFyLGzXwgm10RDs8z7rsakp0tnsK8RBk8EgS7PnB5A/STab\n5ZYPf5j//xvfQMnncTwPNZnklz/wgbd0j11Kb5pPAB89++v/63neBV3F37p1K+vXp5md3YsgxAEB\nUayQTvtYWVnhi1/8OqKoIooN3vWuXbzvfbczMbHAHXf8KtHo2o3cahlMTT3DyeIS7dd8mMuyveh6\nEM/zmJraw8bNm6ksLCCbJrUTJ+gWBIxqg0qtznytSk6A0VKAsf1HEMwmctVgT6NKob6KRI12ycUv\nRFn2DAyrguwJLDgqrhal2bTwZBfT9VDxk/D5MZwqcbdFTHCYlZIMbWvHKrcY8YK0X/te5ucnOH58\nhUKhyi/90i4CgW5++MNFrrzmKrq7+5BlmUgkwrYrdlAsetzzG/fR1tbG0aNH6VBVlnwmlm2iyCqS\nKBFTVcaP7uN9H9h+IbvqgvEXfwGf+hRcggKeS8pNN8FHPgKTk9Dff6lb87Oxdes6jh59jlgscy74\nsCwTQSjS19fH3Nwcjz12kK6u3UiSfPbv/Rw+/EWago+4oRPS19wC86VlFNWkM+pgzz9J03WJtsfY\nvv23ztMemp6e5sTJaYpFA8MUWXVEgoCDQQuROQQUSUYVPFSfy46+Pvy6zs5t29izWsItLqN4YQRb\nwfJa5CWXulWkUzIZUVXaMx0cyi1wurSCjkssm+Sg52EmYwz7RhGQ8XSd9UNDVOt1Tk02Wa0u48kp\n9HgcuVQiUK/jlmYJ1aZp87Ks272bTDZLo9XiSD7PJz7/eSzLQhCE1/R5mZ+f59jhw7QMg4HRUYaG\nht6SnPerkUwm+cxnPsKhQ4eZnl4ik8mybdvr5yT8rGzbthXLMvnjP/7fDA93c+21W0gm23Bdl2Lx\n6M/8gAwEAgwMDbFt5xb27j3Fiy9+l/n5eYLBLO3tG1lYOIGqTpFt20SxaGB1D2PbBu7cXvLHX6Be\nXuaJFxq4jCDLYVpKFctoYjlrZnhNsYghiLiegeq1aHpNZKGJKjbJSiZeKERvOMxyocBkPM6Oa65h\n2PLT1X0l42MnmTtxnJpTIK2u8NTJHNuuuIKVqSlqp0/TbfnwSR3kygX2H3oGIdaNqgXpH76MfMCg\nVljEreTI9MUIdnXRb5qv+Pxlw6D9pyiqjo6OMvj5z7O0tPS2JURfypmRRz3P+1/CmmPRi8AFDUaC\nwSCf+tR9PPDA41SrApKkoGl+FhdPoKpXU6/XmZ2dBjwmJx/k5psvQ9O6zgUiAJqmYxgBTFOnr2/j\nue2CIBCL9XLo0EluuOsu8gsLLC8tYRSLuFaTBaPKjCTSFYqTcmHl9CQrgsRQxxChlonjjxNuFmlX\nMxRsky5bouyWcAUZPwpN28JwDDZlk5xZqFE2GriuREt0kRSJGhJdPRHWDQ2yON3ARSSRSDI0tJGe\nnqMcOLCHqakfEA4H6Ozs4Omnf0Ak0smOHVuIx0Pkcke57753n7N+bjWbBBSFazb38IP9x9HVDjTF\nR7m2jKcYXHfdZy5kV10QikV44AE4fvxSt+Tioyhrs0EPPAD//t9f6tb8bKxfv56tW09y6NBL6HoW\nx7Gw7WXuvHM3sViMvXsPoKqZc4EIrC3jbNq0m/n5MU4X51ELIo5Vo9SYZNtQNx+++WYM00SVZUzb\n5smHHmJkZARN06hWq3z5yw/ROXILS6fnwecw1Vqkho0CFAQbCz+K2CTb0UM65K4JFZomiqpypC7i\nihohDxRJY8FzKJh5BgTQXY+juVkqjkl/KErBtQlv28Bd77mdyRdeYDCZ5FtPTlMteMwtl3iqepK2\ndIy+vm7C5SKOpKL7ddx8gVPVAvX6QXb1ZeiJRDg1NkZ7RwfRUIhgocDk5OTrWje88PzzvPyd79Cm\naWiyzFN79nBwZIT3fehDb3tyeigU4uqrr+Lqq9/W057HykqBTZtuIJvtPW97pZJmfPwk6bP6Km+E\n2dlZvvKVR0mnr+Z977uNEyf28sADD6Lr28hkRpBlHxMTM+h6DEmaxTCSrJx+nkR9mZAXYNe2LTy+\n/wjLS2fo7bmMnKLQqJbwATY1pt0GBSQk0UdalvCLJlqriAaYop+phoffdXD8EX79c5/jU5/5DDDO\nAD4AACAASURBVLlcjv/xF/+TyuKztEWL7MgG0ZVBBGDi1Cnq8/N0uiK+dJalskFbOEWtOM9kYZpg\nuo5SW0exoaDG/Wy+cRcf+tC9dHZ28nd//ucsrq7SdjY4LFQq5IBbt/30ZS1FUeh6A8Z6b5RLaZQ3\nffZHB7Bfb9+3i8su20ZnZwdHjx6n0Wji88k88ojA6dMnqVaDhELrcV2H2dnjfPWrD7N79/tfcQ5R\nVGm1XtlcSZKwLIsNGzcS/jf/hicefZS9zz3HgeefRwsmCRUKXBcMI9gOAUEjJcJ07gym45Lwp0nj\nogk1fDJMenUyePjdFnO1OeYFkWu3bCLeJmEToTq9zEqrDoJI0/ORjSts6PUT9fs50SzQimdJJNoQ\nRYnBwS2EQjHGxh5l06a7iESSzMyc4MiRQzzxxNe4+ebtfOhDt7Nx448Hro7OTvZ4Hld0dxMNBhib\nWqBaX6E9Y3D7r3/0bX2ruVj87d/CHXfAG/D2e0dy333wm7/5ixeMSJLE+99/Dzt2THLq1BSqqrB+\n/XXnSnsty0YUXzmMxeNZrrqqnXg8yrFjp0gmI8yeHCfbavHVH7xEzQCwWd+bwBf2Mzs7y+DgIMeO\nHce2YwyPrOfwoZeYrY7TbAlMt3IEXJOI4MMviOSxSHgFbr7ylxhfWWF/pcLhyQVGrrqXZ595mtLq\nBCFcTLvBRlza8WgKMqOpTqZdh0osTbvu57q7bsMsFrlu3Tqa9TrF1TP4fNvozoaYWF1hueQxufgS\n1+xI42+P8MQj3yOjRulKxWjJKfJLZWKahqgo1Go1IpEIMpwre301isUiL33ve+zq6DjnGdWZSrHv\nxAmOjI2x7bLLLkBPXlhs20UUX6msLQgSlvWzPV5efHE/Pl8PgcBaMma5XCEW66Beb1Cv14jH+6hW\nV5mbO0VHR4x8fg/BxhQbh9bT3taDUa4z2p5hJp9jrllFTHZQrTXAKiN4ZcJigPXBCJNGmVUsKpJK\nWPMhyT5qko6mx4iFUiz6XFbLLTzPI5VK4TVKDGTT7M2XeObENAM+kc6gznQ+j9No0J/oJhmOIVFg\nrjhDxCvTrcl86u5340gqlUaTnG3wyU9+lI6zwkPv+9jH+O6DDzI5M7Nm4BeLcfdHP0o8Hn/LffKz\n8vOQM/Ip4KGLdbF0On0uSj569Cirq09TrerE4z9eM85kNjMzM8Hy8mna2s63RFZVk0RCwrYtZPnH\nbxCFwizXXLOmXNfV1cWv/vqvc+udd/K/vvAFpg6eodww8FwP76xssIJLxHGYAkKijiMo6JJKQJdI\n1loYjkldEIjLPtKeRWVuhuHuTq65extf+s4TTC42UMO9hMM6ZmORhVKN8VKRKVpcs+u2876YMzNj\nBIM/nuXp6VlHd/cICwsTbNsWPC8QgTWBsq5t29izdy/9iQQ71/Uxu7qK29bG7rdBAO1iY9triatf\n//qlbsml46qr1sTexsbgDQgs/lwhiiKDg4Pn5XX8iJGRAZ555hE8r+fcNLHneTSbS1x++e309fVx\n0003AvBHv/d7PL53llR0lEwsgOM6HJ2aQVAmuNnzAKhU6kiSjiAIbNy8i4UlmVCii3p9Ca/0DJ5t\n4Xo1Losl6Mvq7DlyhN/6r/+VXVdcwT/8w9dZWAhw8nSBVWWA4vJjDODgkxWano3qubSqJbqTbSxW\nVgmkgmzdupUffvObRLu72Tt+mh19Q5zMzVJ3VFStQqW5TNyv0ReJsLQ4RyqgceuGrfg1nTO5AI25\nUywurKJ3JFFkGdtxKLKW2PlaTE9PE/W8c4HIj+iOxTh+4MAvZDAyOjrASy/9AM/rPHcfuK6Laa4w\nPPyzjVlLS6sEgz8e9xuNJp2d6zl9+gjFog9V7SYWa6fVmmL37gjHj8RZ33kdnYk1y4wgUZxci03t\nJebkVepNlYhWIaGU6HfjBMQmIRxEReOML0ikvZczxXnMUIakFiao+WmGQuy8/CYajQqLi4uYpskL\n+2bpDw8TUhZJCx5yy8aUDHpEkVOWRU1WkVZmoWUQaNVwadH0JHRJYnh4rdx2fHaW2ZmZc8FINpvl\nY5/5DKurq7iuSzKZvGQuzhc8GBEEIQM88C82L3qed78gCLuAW4H3vNqxX/jCF879fP3113P99de/\nrW1Lp9Osrs6jaTvP295oVOjs3EAk0mJq6gCp1NqNmc9Ps359jIGBzTz++Mvoegeq6qNcXqCvT2Pr\n1i3nnSccDiMGg1h2nVQkQa5aIuA5mDjUHAdb9xH0BbDcJsuAKloEZYgJLp7oYes+0pEwqmUyKUmc\nGhujq7OTgc6tjPRmaAQ1+gYHzi43zdC3I8zwDRpHj84gihKSJJPLTROJmMjyELVajUqlgqIoJBIJ\ngsHoq5ZHCoLAHffcw9jgIGMvv4xlmozcdhuXbd9+wRUbLwTf/CZ0d8NFlFj4uUMU12ZHvvpV+C//\n5VK35u2jr6+P7du72LfvZYLBtQG2Wp1j586eV/iQ1ByFph3Dr6351EiiRDTUwdjiwjnzzK6uNkzz\nJNBPb+86kskXWVhYguYyw7FOAoqHqji4do14VyeJZJJQOIwkSWzduo7jx19gZKSPZ5aeJ6xGUQWQ\nXANHaBFWQBFauG4ZNRhn841Xs3nzZg698AKrlQqNRpN4KMa1iXaWSjnyRya4adPNgMVQjx8OnmBB\nDzA+d4pt/Ztoj2UZKywzvjzBup4sJcPgyOnTZLZswTCM85J8f5LXWtv3PO81fbt+3hkcHGTLliMc\nOrSHUKgDz/Oo1ebYvXvgdQOzV6OnJ8uBA/lzMyPpdIpq1aCnp5+2NhfHmaCzM8rAwBY2b+6j2ezA\nmDp83jl8vgjtmTjBdAeVikm95hD1ElBZwLUbIPmpOy3Qw+zoa6ctoiGP3kx79wiu6xCLZfA8gWPH\nnuXo0aPMzKzQ07+b3OlTtGsKbdlOKrUa87VZktkMKVHi+Mo0WwMRkj4/hZbBkiDQ7fczf+YMw8PD\nALiv0seCILwtrrtvlQsejHietwy8wnlHEIQO4E+Auzzv7GvJv+Ang5ELQSqVYsOGbp55ZgJFCaIo\nKtVqEUGo0NWV4p57duA4Hvv2HcV1PW6/fZQdO7ajaRoDA30cPHiURqPJ+vU7GR0dfYW0ua7rXHXr\nrZw+dITVfJ71yQ4KtSKL1Qp1SUUKp+nr2Myxif2YgOdT8dcWsF2DZDjA5q4ufJLE/MICuC5xQWB8\nYhIYYalSwLRkCvsaAChKk97ePn77tz/F/v0HePnlMWzb5eabh0mnL+c//scvcfjwMqADFoEAdHX5\n2b371V+T1wbXrWzduvWC9sHF4E//FD7/+UvdikvPRz4Ct94Kf/iH8Av6zHkFoijy3vfeyaZNpzh0\naC0haMuWmxgaGnrFQ9enR9AzOjOreUKqiuk4VD2P/vU7aJ7VTRgcHKS7+yVmZo6QyQxw881388//\n/DeUKzOEgim6sp2Ioksmo7J79+Ucm5k5tySyYcMGNm06Sa02RVgvEjU8amad/kgQSYtgegaSqrAi\nQt+Vu/nQJz6BoihcedNNPPr3f48e8rFaKDG3NM/+6UWqdoITM2cI6DUigavx6Rqb2gfYm5/ncGGJ\nkCggxzPMihaX7djB42fmMcU4tWmFib/6Nr29Ie6//73nmQTCWgD3hCjSNE18Z8csz/OYLha58rbb\nLnSXXRAkSeLee9/Dli0nGRs7gSiKbNnyLgYHB3/mxMrduy9n376vUij4icUydHUNcPDgA8RiXVx1\n1c3Ytsni4km2bOkim01x8qTFfCDKYjlPJhRHEASWy3n8w3184jd/nUcffZKnZg4QFXW8RB/G3CkK\nTgsr7OOGK7Zx45VXsOfAYV6cO8nWHTcBMDk5xaFDJ6lWTxIM6hw69CKXX34PK4vz2HMGBHX8AT9N\nIcHWK7exd2KWH/xwHzOWgiJVkUSLtD/Exp4O3EYDwzAQZZlVz6P/Ero0vx7Ca8QBF/7CgvBXrAUp\nC2c33eZ5XvMn/v5aMcrbyszMDP/hP3yRcjmO4wi0tSXp7EwC0/zO73z8FV/kf0mz2eT48XEWF1dI\nJmNs2LD+vGNc1+XZp57iT//T72NNTRNVNbr6+2iIAsfKTWJtvQwOdTKyro9isc7hZ55Am5qkp9U6\np3J3YnWVacPAVRRivb2cbsSpE6ar65pzLpVLSydpa1vkL//yjwgGg+e18eWX9/D7v/8lbLuLSKQX\nUZRZXj6JLB/jP//n36Wnp+cNl75dSARB4O3u8xdegA9/GE6efOc8gN8Kl18Of/AHa0HJzwsXot9f\njb/7uwdYWPBjmlDI5dB8PrLt7RSLY3z2sx88t3xrGAbPPvsCe/YcxXFchoc7OXF8nKkfPEF/IkV/\nfweDA/0IksQLs7Pc99nPkk6nWVxcpFgsUiqVeOjr32Ll2eeo1Q18zTqjmRQdmRT75ubQtm/nP/3J\nn5w3Thw7epTHH3qIB//xYZbLKZLZHViVJrLYRJVn+PBNg8RVmRf3nCYXzrBx9x3U62VqtSL9/Sod\nHRn27MnT3b225FoqlRgbe4nuboff+I1foaen57yH8oH9+3nqG98gJQgokkSu1aJ961buet/7Ltrs\nyJvtd+P/svfe0VWdZ6L+s/fpRaeo944khCii2phmwMYN9xYnTtwmmbEn8SRzp2TuFN8pmcnv3jUz\nSWaSiRPixHFwwd0xxsaA6SCKQAj1etR1mk7vZ//+OEAAgY1tIQk4z1qshbZ2eff+tvb3fm8NBGhq\namZkxEZmZhrV1TPRarUTKlt/fz+bN+/AYrEilwtUVOQhSQKtrRa0WhVLl87j+uuX4Ha7+c///C06\n3Qz6OxsYG+wkGgkhaeP84w//nsrKSuLxOJt+9zv69u8nZHNga28jxWhAzM1l3Q03oFQoaOvp4d1m\nC9Xz70MU1WzffhBRjFBdXUBR0Uy2b38HpxNuu20Nh7b8mhJBhVqlJi45ENMNnOwTsfkETKKI3elC\nEGzMKhLIVMqJBAJULVqEV63m+vXrWXLdxSsXX25OjfkFtcMpU0Y+i8lSRgCOHDnKu+/uIhbTAXG0\n2jCPPLJ+nJn3fJxOJxs2vMLYmBqVykgo5EahcPL44/edyUw5TTAY5FBdHe3HjxOPx5m5YAFz5s5F\nqVQSDofxer2YTCbaWlv55b/8C64DB5ibloYrFmNUFBO/i0S47RvfYO+BBjo7jRQUJJreBQJeQqFh\nKipSePjhhSxevOica//oR78kFMpndHSA7u4uAoEg4UCAuH+Q25YUokhJoWjuXG6/554pLfN+OSal\nBx+EZcuurhobX4af/Qx27JheRdAmSxnp7u7m+effITNzDjqdkVgsysBAC1VVWr72tU8vPhMMBtm4\nYQP092NSqYhEIlijUWasWMGK1at5+9VXsbW3oxdFPLEYuoICxiwWakwmrGNjdPf2MubxEE1L429+\n+EOysrLGXcPv9/O///f/w+vNZnDQRn9XJ7PzMslKMxAINfGNm6/ng1276RAMlJQvIR4PkJEh5667\nbubXv36b7OylyGRy2traOXmyD1HU4fHUs2hRFcuXV3LXXbefo5DYbDZaW1oIB4MUl5VRXFw8qX2n\nvsi422w2Nmx4Dbdbg1qd+Obq9T6efPLBy7KgCoVCyGSyT015bm5u5rXXPiQS0RKLRZHL/Xz1q3dQ\nVVWFJEn4/X4EQeBkYyPHDxxg3/btVKSns3zePNRKJcFwmCMDAyy+6y66uwd5992PGRsTmTfvOtzO\nAFaLhZh/jKMdLVRUzqaqugRP6yGUPhd6Q5xWtxxDzmLkCiX6QACTXo8v6EEh76a2PJfDo6Pc88gj\nVMycecH3bjJJKiOXQCAQoL+/H5lMRkFBwSVNyhs3vkFHh0R2dgkOxzCdJ/bgGOhErQ3x8FPfYPW6\ndRe1rEiSRCgUYuvmzXQePYpaFAkKAvNWrKCguJh/+uu/pufIEXJSUjAYDAhGI4tuu42v/9Ef8dpr\nb/POO+14vTJAwGhUM3fuTMJhN4sWGcY1hPrBD36CwTAfpTIR73HkwEFioyOEI0PcUJNCSXY2FoeD\nsjVruHHt2i/9LL8oEz0pdXcnLAHd3XCRUgvXHE4nlJQkao5MQcD8BZksZQSgqamZ99//BJcrgiBE\nWbiwknXr1lxSLFR7ezsbfvxjRjs7kclk5M+axRPPPMPhffvwNTYy81SaoyRJNFosRAsLCdhsyAMB\nEASCSiWLV6+moqLighOnzWbjxz9+jfz8RK+Crs4uehqOk6pSMeZtZNWCYqTMTG594AGsVivHDh7E\nNTREJBTiUGMPi1c9gSiq2LbtEGZzCaIow+k8zOrVa7DZWnjiiZu/VIXMieaLjPuvf/0yfX1qsrL+\nsNizWvvJzvby5JNfnWgRL5lQKETfqUJhBQUFqFQqWlpa2LV5M36HA+RyZl9/PStWr8btdvPOK6/g\nHxxEIYr4ZTKWrlvHkuuvB+Cll15nYECL3eZm8MQJUnU6YrE4/dZ2guE+RHNuYuEbsJKiVtDnSeWm\n27+BwWDg6J49pMTjaFRKhux1LFs+h9u+/vVpM+6fpoxMh2yaaYFGo/lcAxYKhWhu7iUvbzkej5Om\nve9QIlcxK38GVlsbo/v38/roKI9+85vnRCf7fD52bd9O8+HDNJ08SWY8zs3LlpGi1xOJRjm6dSua\nu+/mV6++ytGjR6nbvRuZKLJo2TJqZs9GoVBQUVHCjBlesrMTJkC1Wo0gCPT29pKXN/4eZs4soaFh\ngJycMgKBAI6hQcSAH0v/MQow09vcjNpsxrFjBytXr56yaOqJ5t/+Db71raQicjZmc6JfzcaN8KdX\nXrmYL0119UyqqirxeDyoVKpLDsh2uVxs3riRGwsLSZ89Gykep76xkb987DECPh+1paWkqNXkZ2Qg\nCAJVeXnsHxzkW3/1V4yOjrJ35078ra00bdnCkc2byayo4M4HHjjHvZDomholHA6iVKopLSvFYDTQ\n2XoSSa6h+o47qJ0/H7VazcfvvovOamV2Xh4C4Gru4MjHG8mqWoUo6hFFGaGQB7VaQKczEAzm09DQ\nMm0mpS+Cz+ejo2OYgoLl52xPT8+ju3sPHo/nogXeLjcqleqcbK+uri4+fPFFatLTMRUWEo5EaN65\nkw+8Xu687z4ef/pphoaGqK+vp+vECXb9/vecPHKEpWvXUllZTHNzA53NvYTsduz9/ciB0UA3t95Q\nTGNPDw9ffz01ZXfgDQT4xe/303rkMItXr2HJjTdi6elhsL+L/DlVPPTMM2RfIfUMro5ZZwqQJInT\nSn1fdyM5gPlU9LUoyqjIzcXf10dvb++ZYyKRCK/++tc46uqYazRi9vkoCIc5uncv4XAYhVzOrJwc\nDn3yCYIgsHDhQp7+7nf51rPPMn/BgjMBsjU1szCbgzgc/ahUSuLxGAMD7WRkSFRWVp4vKitWXI9c\nPsLgYAcezxij1kEGeg9zS5GJBRkZLExNRe9y0dTQQDQ6KSVfLjsWC7z+Onzve1MtyfTj8cfhhRem\nWoqpQxRFjEbj58oMO9nYSFo0SrrRCEBbayvujg5mxOMUiiJ5wSD79uxhwGYDQCGXI8RiSJLEYH8/\n3uZmlhcWMr+ggKWFhcQ6O/ng7XMrGiiVStasWURfXz1+vwcAtVpORo6Mv/6777Fs+XJ0Oh1dXV1E\nhoaYkZeHTBQRRZEV1y3AEBihu+MQ0WgQj2cYr7eZOXNqr5rFxZXEgR07KDcYMJ2K31MqFMwpKqKr\nvh6Hw5FYPHZ30717NzUqFauLisgPBvnwN79BoZCTlRWntaUOud9NplJALtm5LltkpLMTldtN/imz\npl6jYWFFNlFPH53treh0OnLyMimvMvFnf/70FaOIQFIZ+cKo1WoqK/OxWi34naOknEoX9PndmExq\nNFotKYJwTupsR0cH0YEBqgoKiMZiaGQyskwmFIEAAwMDQOLlCnm9n6oUaDQannrqK1RXqxgY2M3Q\n0F7mzdPzxBMPnUlRPJu0tDSefvprLFpkIh5vw+msY0F2hMqzunimKhTIo1E8Hs9EPaIp5V//Ff7o\nj+AKrM922VmzBux2OHRoqiW5cnCMjBD1++nv72d4eBhLaytFaWlk6PVEJAmtXE65RkNDU1Nif7cb\nfXo6Go2GI7t2UZ2bi+yUUiAIApV5efSdPMnY2Ng511m69DoefHApkUgzvb07EIROvva1tcydO+fM\nPnabjZTzFIyMjAxuWrGQvIwwsdhR0tK8rFy5nJycklNprgPU1FRc5qd0edHpdJSUZGKzDZyz3W4f\npKgofcqsIhdipL//jOJ6GkEQ0J+aE8LhMAc//pja/HwMp1z5aQYDs9LTqduxgzvuWE1Zhget2IUo\ndFFidlFo1KIVRSIezzlJCktnV7FsjgGn7QAWyw5SUkZ48sk7KSoqmtR7/rIk3TRfgttuW8Mvf/kK\nvoiPUecoYbUGudxPbe2CxIpobIxCr5dIJIJCoWBkcBDzKetGilZLCIjEYuhVKlx2O5SU4PR4MGZk\nMDo6Sv3BgzitVvJKS6lduBCz2Xzm2ikpKdx//53ce28c4DMj4M1mMyk6FSkhO3NS5HhHR/lwbIx5\nZWXo5XLs4TAVM2bg9/uvyAqrZ9PaCps2XZul3y8FmQyeeQZ+9CN46aWplubSkCTpTPGn7Ozsy1Lv\nJhaL0dTURNORI8RiMarmzWP2nDlngs9tBw4w02RizO/HZrdTYjTiisWYOWcO/RYL6SoVVquVfquV\nbr+f2x97jHg8TsjvR3teHQdBEFDLZAQCAfR6PTKZDEEQEASBBQvmM39+LdFo9IKxayazGW88Pv4Z\nKRTc9/D9iEoN27Y1EAz6GB7uIRAYZsGC/CvaRXOa9etvYsOGTVgsTjQaM4HAGFqthzvvnF7dL9Oy\ns3G6XGSYTGe2SZKELx7HaDQyNjaGMhpFrVQiSVKi/ocoYk5JwWuxEAqFWLV4AT1HjjA82E+KR06f\ndZRWvx9Bo0F+1nshE0Wy01P543vvZtnKlZ8rCSEajdLY2EhLfT0AVbW11NTUTHiPokshqYx8CdLS\n0vj2tx9n586dvPvCbyjM0jFzxjy8Xi+vvfEGVlFE/+GHNOzZw83334/BZKLnlMVDpVAwo7KSkydP\nYpIksnU6HG43TXY7ZUuXsumnP6VApSJHp2N4925+e+AAD3/rW0iSxNatu2hu7kGplHPddXNYtWrZ\nZyojnZ2dHP/wQ5YWFqKeM4fY8DBjTif7OjtZsXgxsysqaA2Hp0Xxmy/Ld78L3/8+TINs5WnLU08l\nmuYNDcGpyurTFqvVyjsvv0xodBSFIOCXy1l2660sXLz4sw++RCRJ4r0332ToyBGKzGZkosiR11+n\npaEBuVxOuUxGPDeXaDBISVoaDouFA+3t6EtLWTRnDu6iIhpPnMAZiRAuLOTelSvPZNRl5ucz6nSS\nedZiIhgO0+9ysWnT+4yOjqHXq1mxYgHXXbcEURQRBOGik0pZWRm70tPpGR6mKCvRQNDmcjEsSaxZ\ntIj09HQqKso4ebKVaDTKzJm3UFJSclW4azIzM/nOdx7jxImTDA/byM6eSU1N9bhyBlPNklWr2PLC\nC2hUKvQaDbF4nOb+fvKqq0lPT8fn8+GJRNjd0ERTr41YLE5prpm5ZfkoNBpyc3OxuFz0DwxQEg4j\n+P0ICgVLcnPZHwpR19zMvPJy5DIZ/VYrDpWKOxYt+lyKSDwe561XX8Vx4gSFp97Ng6+8QntNDfd+\n5SuTXgAvmU0zQXR2drLtnXdwDw9z9OBBZuTmsvq669BrNLh9Po47ndz71FO8/eKLzFAoyDSbkSSJ\nuqYm9pw8ycy5c8kpKuK61av55N13ma3ToT+r26RlZARXVhZ9I0GggIyMfGKxCIODbZSXK3nssa98\nalreW6++itjRQX5GBg6Hg6O7dpGpVDIQDJJVU0NIJmPubbexfOXKSXhaF2Yisipefx3+9m+hoeHa\n6877efmTP0kobP/4j1Mrx6eNezQa5Zc/+hG5oRB5pxTl06mQtz/1FGUTVMCpp6eH959/niXn1ePY\n09rKqM/H3fPm4Q+FONrUxEB/PwP9/TgiEf7iiScwGwxEolGOWCxcd//9LDiv1G93dzfv/PKXlOv1\nZJrNuH0+9rS10R1IYVbNWkymDIJBH4ODJ1m1aga33PLZGW1Op5Mt77zDSEcHIqBNT+eme+65okzz\nk5lFNRUcq69nzwcfIAUCRAWBGbW1rL31VtRqNfF4nGef+V/0NIwxs6AKmSjH7hllxH2SZ77/DAsW\nLeKbDz2EqbOT2tRUlDIZDr+f3nCYitWriRcWIobDxKJRimfOZMVNN32uZoCQyA7b+qtfsfislG5J\nkqjr6eGmJ564LJa0ZDbNJFBWVkbpd7/Lto8/JgVYeFZktUGnI8vppKu9nfsef5zNmzbR1deHIEmQ\nm8s/fec7FBYWolAoGBoaQhYIoD/PQpGfkcHmHbvJLr+FgoLTaW1KcnNn0tFxGIvF8qkfoqDPR+qp\n2Tk1NZX5K1bQ1drKcEcHgXichx59lNlz5lz0+CuBgYGE++Hdd5OKyKXw3e8metZ873twljV5WtHT\n04PgcJB31rutViopSUmh/sCBiVNGurpIVyjGKfRpajUdvb2Iooheo2HFggXEamsJh8O8uH079VYr\nKS4XQVFk/i23MH/BgnHnLikp4Z5vfpNdH35IS18f5sxMpOxSqtSzzvSLUqt1FBXNZ+/efdxww5LP\njH8wm8185bHH8Hg8RKNRTCbTpNYISfLZzKutZfacObhcLtRq9TmZU93d3WhSSiicrabHYkEpioRR\nkZK3EEQ5x44coSo9HXk4jNXvJxwMkmI0UqJWI5MkZs6ezc233048Hv/CLpXu9nayNJpz3htBEMjS\naOhqa5t0t15SGZlABEFAlCTSLlBbRKdW4xkbIy8vj6eefZbR0VHi8TiZmZnnmMPkcjmRC/iDI7EY\nY94wVaYs4vE4He3t9HV0EI9ECMTcHFt87FOVkbJZszj57rtngqpSU1MxLVlCKCeH+//08dpH0AAA\nIABJREFUT8nNzZ2AJzB1eL1w113w7LOwZMlUS3NlUFGR6GT8H/8B/+f/TLU0F8bv96O+wCSr12iw\nOhwTdh2VSnXBvzulXI7caGTM6z2TGSETRTyBAEtvvpm7H3kEn89HamoqmrMsmWczMDDAnq1bGbVY\nkMnlFJSV0T5wnNzcczMdZDI5oMPhcFxyMOZ0CtpMMh6ZTHbBDrijo1aUylTK51cRmDmTYDAESJxs\nOMqG//oFJmUMMRDApFKx6KwCmjaXiy6Xi5tnzEA8lUn1RVGqVERisXHbI7EYqinoQTZlTkRBEL4u\nCMJOQRAOCILwxFTJMdHkFBTgCIXGbbf5fOSXJBruCYJAVlYWOTk54/xyGRkZGPLz6bdaz9neNjjI\nrHk1+P1uTp44wfDJkxRptVSkppEScXNw82Z6enouKtfsOXOQcnI40dvLmNeLdWyMQz09lC5efMUr\nIi4X3HknzJ2biBVJcun8/d/Df/0XjI5OtSQXJiMjA1c8Ps6cPzI2RsEFuvh+USqqqrBJEoGz/nbD\nkQjDkQh3PfooJ+x2LCMjePx+uoeHafX5uPH22zGZTOTl5V1UEbFarbz+i19gtFpZWVDAdRkZDO7d\ny6ClC6/33EyaRLmAwLSLf0gy8RiNBiQpACSyI3U6LQ2HDhEY7GVJQTY3zZhBittN59gYDaOjjAUC\nuIJBjttsZM6dOyFWi5k1NQxHowTD4TPbQpEIw9EoVbNmfcqRl4epjGjaKEnSSmAp8PQUyjGhzJgx\nA7KyONTcjC8QIBSJ0NLXRywjg5nV1Zd0jvUPPsiQWs2R3l6aens50NODsrycRx97BJernf62ZgrS\n0lDI5Di9VrLTJK4rLGT/9u0XPadGo+ErTz7JjFtvxaJSYU9NZdkjj3DbnXdO1K1PCceOwdKlMGsW\nPP88JC3Vn4+SEnjyyYRFaTqSk5ND4bx51Pf04A0EiMZi9AwPM6pQsHACe2ykpaVx4/33c8hq5YTF\nwomeHg4ODbHwtttYtWoVDzz9NGJVFV2iiHr2bL7yzDOXFJ9x+MABcgSBnLQ0BEFAqVAwu6iIHC10\ndh4iEklMBJIkMTDQSlVVLgaDAavVit/vn7D7SzK9KCsrw2gMn0lTHh4aIuayY9B6mFWcT35+Prk5\nOeQoFEjZ2fQqFBzy+UhfuZI/+bM/+9Tg0mg0is1mw+v1fqoMWVlZLLvrLg4ND3Oit5cTvb3UDQ2x\n9M47p6Q+yZQHsAqCoAG2nFJMzt5+RQWwQqJ/xebNW6mra2Kwrw/vSDelZQXcdOd6lq5Y8blMqpFI\nhK6uLnw+H2lpaRQWFiIIAh9++BH/82//TZo2E0mKkmmWsXZBNXqNhr0jI3zvMnc6vpx8noC2kRH4\n4Q8Tqan/9/8mOtImFZEvht+fsCr9y78kevlMNp817tFolLqDBzm2dy9Bv5/S6mqWrV59WTK/PB4P\nXV1dSJJEUVHROen0X4Rf/eQnlESjZ2pJnOZEby/xsgosFifxuIZ4PEB1dQG5uens2nWcSESGIIRZ\nuLCSW25ZO64j+NXA1R7A+llYrVZee+33DA156WzrRD7Wx93L5lJ8ShEIBoPsraujB6iaOZOaxYu5\nftmyi1rhAI4dO87mzbsJBECSosyZU8Qdd6z71EaCLpfrjFW9uLgY43n1USaSadubRhCEvwf+CPhb\nSZJ+c97vrjhl5OWX36CpyUdeXtWpcswBBgaO8uija5g1QWYvq9XKb//936k2m1HI5Wf82Ha3mwGN\nhsefeWZCrjMVXMrHaWAgUR9jwwZ45BH4m7+Z/qmpVwJHj8K6dfDeezDZTT2v5knprVdfRWhvp+C8\nTIdDvb3c/OST5Obm4nA40Gq1dHV189pre8nPr0WpVBOLRenvb2LBgnTuvXf9FN3B5eNqHvdLRZIk\nbDYbB/bvx3HwIDXnWdsaLRZmrl/P4ksIhOvo6GDDhvfJzp6HRqMnHo8zONhGSYnA448/crlu4XPx\nacrIZXfTCIKQJQjCjvP+vQwgSdI/AmXAU4IgjHOUPvfcc2f+ffLJJ5db1C+F3W6nsbGP/PxqRDFh\nQlOpNKSnV7FjR92EXScjI4P86mpsHg+GU9puMBym1WZj8apVE3ad6UZdXUL5mD0bgkE4fhx+8pOk\nIjJRzJ8PL74I69fDr34FF4jlTPIFWLh0KT1+P55TLhdJkugZGUGelUVRURFqtZrc3FxMJhM7dtSR\nlVV9pqGlTCanoGAW9fWduN3uqbyNJJcJQRDIyMhg5apVuFQqrGdV5B1xOnGp1VRf4kJ2165DGI3l\naDSJqVQURfLzq+josDM8PHxZ5J9ILns2jSRJI8CN528XBEEpSVIYiABxYJy29NwV5HLweDyIom5c\nep1eb2ZwsGFCr7X+/vvZ8t577D1xAiUQVSi47p57Jsz6Ml3weOCNN+DnP4fhYfj2t+GnP52+aahX\nOrfeClu3JuqPPPccLF+eaKzndoPNBlZr4p/dDgYDFBQkMpeWL4fVq6dPF+DpREFBATd/9atsf+cd\nBLudiCSRWVbGA/fee47fPxaL4XC4KSo610QuijIEQYPH48FgMEy2+EkmCYPBwL1PPMEHr79Ou8WC\nBOiysrj/vvsuOaB5eNiO0Tg+jkkm0+N2u6d9n5qpTO39viAIqwAV8IokSVd0UxSz2Ywk+YjHY2cs\nIwAul438/KwJvZZGo+GeBx/Ec+ut+P1+zGbzVeVT9njg6acTLoOVK+Gv/iqxYp/kgoDXJPPmwb59\n0NSUcN2MjSW6HqenJwqkZWYmlA6PB7q74cCBRNO9J56Aykq46SZYuzZRv+QCbZKuSapnzaKyqgq7\n3Y5CobhgHIpMJiMnJw23247B8Id2DLFYFPBjSmrgVz35+fk89eyz2Gw2BEEg7VTQ86VSUJBFX5+N\n9PS8M9skSSIadX3p2KfJYMoDWC/GlRgz8s4773PgwCB5eQlTq8fjxGZr5Kmn1k9YcaarmdM+ZElK\nxITcdVeypPuVQjicUEy2bk38O3kykeVUXJywrigUIEmc6XS9YkUiRgWSsQOnaWlp4Te/+ZCMjBr0\nehPhcJD+/kZWrizl1ltvmmrxJpzkuE8sfX19/M//vIHJNBOjMZ1IJMzAQDNz55p46KF7p1o8YBoH\nsH4agiBMT8GSJEmSJEmSJF+IK7Ic/HRVlKY7wWCQH/1oA7FYwRmTndttx+Np5jvf+dq07co7GSul\ngYEBfvrTTaSnz0WnMyBJEiMjPZhMTp555olJbw6VJLlCvlaZiHGXJIkNGzbS1yeQm1uBIAin+vzU\n89RTt1M+gYXxknx5Ps3tdOW3cUwyjra2Ntxu9Tm+Q4MhDUnKpL5+YoNprzT27z+CWl2ITpcIBhQE\ngezsEkZGYp9awTZJkiTTj+HhYbq7neTlVZ6Z6NRqHWbzDHbtOjTF0iX5PCSVkasQh2MMuXx8BLZW\na2B01DkFEk0fRkYc6HTji/oIgg6P54qOoU6S5JrD7XYjiuMLeul0RkZG7FMgUZIvSlIZuQrJysog\nGnWN2+7zOSgsnNjMniuN4uIc3G7buO2S5L5gQ6skSZJMX1JTU4nH3ePcPS6XjaKiK7vn1rVGUhm5\nCikvLyc7W0Z/fyuxWJR4PM7ISC9arZu5c+dMtXhTypIlC4BhbLZBJEkiEgljsTRSXm6moKBgqsVL\nkiTJ5yAjI4O5c4vp7T1OOBwEwOkcJRDoZuXKZPvuK4lpnU0zXWW7EvB4PHz88U7q69uIx+NUVRWx\nbt0qMqZxruxkBTIODg7ywQc76O4eRi4XWbSomjVrVqKegrbZSZIBrNcqEzXukUiEnTv3sG9fA+Fw\njLy8NG69dSXFxcVfXsgkE8oVm9o7XWW7kohGo0iShEKhmGpRPpPJnpTC4TAymSyZQTPFXAvKSDQK\nLS1QVQXyaZ3DOHlM9LjHYjGi0SiqZLW9aUtSGUlyRXAtTEpJxnO1j/vAQKIyrdebaGWwdStkXduh\nW8DVP+5JxjOljfKSJEmS5FolHodHH4UHHwSLJdH/54kn/lCJNkmSJAmmTBkRBGGWIAh7BUHYJQjC\nz6ZKjiRJkiS5XGzalGg0+Hd/l/j5n/8Z2tpg586plStJkunGVFpGWiVJukGSpBWAShCE2imUJUmS\nJEkmFEmCf/3XRAfk02FJCgV8//vwwx9OqWhJkkw7pkwZkSQpetaPGmBsqmRJkiRJkolmzx6IROD2\n28/d/vDDcPAg9PdPjVxJkkxHpjRmRBCEOwVBOAEEJUnqnkpZkiRJkmQiee01eOQROL8dh1YLDzwA\nL744NXIlSTIdmRbZNIIg/Bh4T5KkrWdtk/7hH/7hzD6rVq1i1apVUyBdkskiGV1/bXI1jns8Dvn5\n8MknUFEx/ve7dsGzz0J9/aSLNm24Gsc9yafzadk0U5bxLgiCUpKk8Kkf3YDy/H2ee+65SZUpSZIk\nSSaC/fshPf3CigjA0qUJN43FAoWFkytbkiTTkal009wiCMIngiDsBPKBD6ZQliRJkiSZMDZtgvvv\nv/jv5fJELMm7706eTEmSTGemhZvmQiSLnl17JM221yZX27jH41BUBB99BDNnXny/N9+En/0sUQTt\nWuRqG/ckn02y6FmSJEmSTBIHD4LB8OmKCCSqsh44AD7f5MiVJMl0JqmMXKW4XC7cbvdUi3HNIkkS\nY2NjeDyeqRYlySTz+uuJbJnPIiUF5s9PBLMmSXI+8Xgcp9OJ7xrRVpMtmyaR0xOUIAiYTKbLco2R\nkRE+fPttnH19SJJEamEh6+6+m6xkM4xJw2Kx8NHbb+MfHSUO5MyYwbq77vrUMfd4PEQiEUwmE6KY\nXCNcqUhSQhl5//1L2//mmxPunFtvvbxyJZk+uFwuYrEYZrMZ4fy871O0tray/d13ibhcxASB4tmz\nufn229HpdJMs7eSRjBmZJAYHB/no7bdxDw4iAab8fG65554JVRK8Xi8v/OhHFAsCuenpAAzYbPQJ\nAo995zuf+SI7nU5isRhpaWkX/SO5nFwNPmS73c5LP/4xVXo96UYjkiTRPTxMv1zOI089RWZm5jnP\n1uVyseWddxhqa0MmCChMJtbedRfl5eVTeBeTy9Uw7qepq4Ovfx2am8fXF7kQhw7BY4/ByZOXXbRp\nx9U07peCw+Fgy9tvM9rZiUwUUaWmsu7eeykqKjpnv76+Pt742c+YnZ6OSa8nFo/TPjhIvKCArz31\n1AW/zZIk4XA4EAThU5WcqSbZtXcSaGpq4sD27TiGh8nIy+P6NWuoOJXX5/F4eOE//5MyhYLs1FQA\nBm02LJeoJFwqdQcO0Pz731NzXq7gCYuFWXfeyaLFiy94nM1m4403NtPX5wREUlNV3HvvzRQXF0+I\nXJfKlf5xcrlc/PTf/532nTtJNxopKykhKz2dPY1dNPePUVBTw+zZJdx3361kZ2cTi8V44b//G6PT\nSUl2NoIg4PR4ODk2xoNPP01OTs5U39KkcKWP+9n85V+CSgX/9E+Xtn8sBpmZcPx4oi7JtcTVNO4X\nwuFwsHfHDtqOH0eUyxkcHGRRVhalubkIgoDN5aLZ4+Fr3/kO6acWjwBvvvwysq4u8jMyzjnfwd5e\n1v/xH1NQUHDO9v7+ft54YwtWqx9JksjLM3LvvbeQnZ09Kff5eUgGsF5mjh4+zMcvvkh+MMiqggKy\nvV4+eOEFmk4tdxpPnMAUCp1RRABy09PR+Xw0NzVd8nWCwSBNTU0cO3aM0dHRcb+3j45i0mjGbTeq\nVDgusD9AKBTihRc2YbMZKSxcRmHhUuLxYl544R3sdvsly3at4/f72fiLX+A/doxlZjO1Gg3DJ0/y\n401bCYYKyDHWYDLOxuPJZMOGTfh8Prq7u4mNjFCak3NmJWNOSSFPoaC+ru6C14nFYnR0dFBfX4/F\nYrmqP+ZXGpL02Sm95yOTwdq1125GzdWKx+Ph5eefJ9DYyLKcHNICAex1dbQdPYrDbkeSJNKNRnJE\nkeNHjpxzrG14GJNeP+6cWkEYFwfocrnYsOENQqF8CgtvoKhoGS5XOi+88AZ+v/+y3uNEk4wZ+ZJE\no1H2ffQR83Jz0anVAKQbjcyWy9m1ZQszq6txjIxgvICSkKJU4rBaL+k6vb29vPjiOwSDWkAB7OT6\n6yu5/fZ1Zyay9OxsTtbVcf4CaywUougiq+y2tjbGxhQUFf3hKIMhDY8ni/r6BtauvfGS5LvWaWxo\nQDM2xszSUlzt7Zh0OuSCjIhXRywuIyhF0Ol1pKZmYbFYaWpqRiYT0V7gXGa9nsHh4XHbnU4nL774\nOqOjcRLtnDyUl5v5ylfuRX3q3UsydRw9mmiEN2fO5zvudNzI449fHrmSTD7Hjh7F6PNRVlCAx+/n\nw4PHibuhzWXH6amjqCiNRYtqMel02M/7W8/Kz8fR0oL+vDnDK0nj4s4aGhqJxdIxmf5gRUlLy8Fi\nsdHc3MKCBfMv301OMEnLyJfE7XZDIHBGETmNUacjNDaGz+cjMy8PZyAw7lhXKETmJZjiQ6EQv/3t\nO2i11RQV1VJUVENBwfXs2dPJybOczbNqavClpGAZGUGSJOx2O5u37WT7iTa6uvuwWq3jVtJOpwu5\nPGXcNbVaI8PDScvIpdLf1UWGXk9BYSFeUcTt9+MKRtHLNfRbrYgGA+np6fh8Pnp6RnnhhdfYvfsg\n/WPj+0Pa3G4yL2Czf/PNzbjdqRQVLaSoaBZFRdfR2Rlh+/ZkOsZ04LRV5PO662+6CT7+OFGfJMmV\nw8WskpIkUbd7N71tXXyyYx8bN+8gEssBdRomjRmNJgurNU5razs2j2fc3/qiG26gNxTC5nIBEI3F\naOrrI7W8nNzc3HP2HR11oFaP/34rlSnYbM4JutPJIWkZuQinJ/NYLEZGRsZFMxw0Gg0REi+M/HSf\ncCAUiSDJ5ahUKqpnzaJuxw46+vsJO50M9PZi9XqJl5Rw53n+vwvR1dWFxeKFSAvxWIyMvDy0Wi3B\noJYPPtjBrFmzEAQBrVbLQ08+ydbf/563Dhyg4UQnKVkzqV60mj17+vnNb/6CkpICioryWLVqMfPn\n15KZmU40Ot5V5PXaKSws/qKPb8qJRCKcaGig5dgxZDIZ1QsWUF1djeysMZooPB4P7R0dtGzbhkIm\nwy+B3eZn1DGG1e+jpriSBddfj8/nY9euwzidIyxZUoXbnc7Rrk9Qhg+zbH4twUCAYYeDIVFk9Xnx\nPU6nk+5uGwUFN5yzPSengoMH97Nu3ZrLcm9JLo3TWTSvvfb5jy0sTJSOr6+HBQsmXrYkX5xIJILd\nbketVp+xSjQ2NvL66x/Q3z9CYWEO9913C7NmzTpzzNat2zlUbyHbLWDUamjq7CfNkI2k1BJ3OzHJ\nRAwpmdQ3NVK6YjG3nxr0xsZGDu7YgdNqRa7VctTjQeNyIYkiFQsWsHrdunGBqfn5WdTXNwF552wP\nhcZISyvi8KFDtB0/jlyppGbhQqqqqqZttl5SGbkAIyMjvP76ZoaGPAiCDINB5L771lFaWjpuX41G\nQ+WiRTQfPMisggJEUSQej3O4owNdZSUtLS2UlJRw3ze+wT/95V9ib2lBlMnIyMykXK/n7Y0befRb\n30KlUl1QFkmS2LZlC32NLZRnVxKNxdh+uIGgzEB6ZhZNTa2Yzb9j+fJFDPT2IogiN6xeTVuvg+Wl\n92AypWO19tPa2ockzcbpVFNSUsmmTfvxeLwsW7aU7Oy99Pe3kZNTiijKsFr7UaudzJt35+V+1JeF\naDTKppdewtfWRmFqKrF4nD2/+x1dCxdy5333TWikeSAQYMNPfkLL7t20tXQiSBqsITNxTQ5l+XNx\nWbuw9A/RcKwejy+C1+snO1tLSUkNCoWSG9Y8Rd2+33D43c343CGUxlQq5tXgcDjIOCuALRwOIwiK\ncbLL5Qqi0TjxeDypjEwhR46AKEJt7Rc7/rSrJqmMTB8OHTrCK6+8h9sdBqIUFKSSmprC66/vRyab\ngUpVTHe3jQMHfsw//MMTLFmyBIfDwc6dJ5i7cD2tu95Cj4BWayIaUaFOyWM0VYMsFkIcsxJUa/ne\nY49hMpmoO3iQXS+/jCkaJQOIKRTEFApWPfIIM2fOvOj8UFMzi08+OcTISA8ZGYWAxPBwNyZTiIa6\nOuL9/RSYzURjMXa8+CJd113HHXffPZmP8ZKZykZ5S4B/B+LAIUmSvjdVspxNMBjk179+nXi8iMLC\nhPPX43Hym9+8x7e//dVzop5Ps2bdOraEQuw9dgytKNJo6WfEryIj4qO+4QNSU0Vqa0sRAhGU5ipE\nwYzNE6DvQAvmxkbiKhUGYyr7dh3AP2ajsqqMm9avZ+68efT39+OzWMgxgkIG9S3H8TucxAU5dnGM\nVTcuYdvWNg5ueZ+baiqRJIkD771Hl13ihhUrAWhqakCjKUerTcNub0Op1FJQMJ8dOw6yZMkiHnvs\nQbZu/YRjx/YSj0tUVORzyy0PYTAYJvXZTxQtLS1429tZUFJyZlumycSBo0exLFo0LpXuy/C7l17i\n5z9/C6/bTCy+CJe/h1RljJSIgoY+F2V5+cRCY9Tt/B0utxtTTiHl5Wvx+z3Y7YMM9DTS3GKhdtEd\nLFm3BJu1n97mg3z7sWeonFlIXk4OReXlLF6xAq02jt/vQav9g1nWbh+krCwPhUIxYfeU5PPzyivw\n0EOf30VzmltugR/8AL7//YmVK8kX48SJE/z93/+EWCybQCDM8HAX0agKv38MnS6dkhIRmUxJKKRi\neFjJP//zT9i0aS4DAwMIgom0tFyKFt1MR/0OXBE7sbgRMSiy9ta70Om0hMNBiopiFBUVEQ6H+f3G\njYwcPIjbZiPk8yETRQzp6QTlcub94Afj5PP7/TQ2NNDX2Ul5cToj9jEGBnoAgdmzS8nJrqHlgw+Y\nf/438NAhBhYtIi8vb9w5p5qptIz0ADdKkhQWBOElQRBqJElqnEJ5gERAp9utoajoD7EcKSlm3O5E\nQOdNN60ed4xKpeKuBx7AuXYtzc3NHH1xC1FvlO7uMSRJTne3ix3bPsaMkdkl8/B6PQwOdCOLp9E9\n2kTTf/wMTzibWUWVpGizqd/ZgrWji4H778VgMlGo16MsjfPWR2+jcASZqcnBH3Ljsg4x0pdKzKNA\nUsgozMxEIZeTaTBw6PAHOGePYjSm4XSOkZo6i1gshkwmIJOJiKKceFyNw+EgLy+Pe+9dz/r1EeLx\nOCqV6orO0uhsbib7vGh0QRDIUCjo6eqaMGXE7Xbz0/9+FYW4AJNGhyBTEheK8IXb0cpcaOQFmNRG\nSopnsKvhfQpNM/CHUmlv97D1o39GHQetqMfq8NOhOIJtoIs8MUBWXCQ2GiDiO4azxE6JXM577e3M\nXbaMvXuPoVYXotMZcbttiOIIt976ALFYjGAwiFqtTlpIJpl4HF59FbZs+eLnuPFGePhhsNshLW3i\nZEvyxfif//ktfn8eWVlVDA3twWhci883xujoEXJyFtHefhiVagSdrhSFopRjx7bw85//mjVrliFJ\nEQDy8meQlV1M1oxj7NixDX8oSl3dccJhH0qlndtuexaA0dFR9m3dSkUgQGkggFomwylJ9AwPs/3N\nN1n/8MPMOSsq2uVysfEXv0AzNkaGXo8vFCISiXDf/fcza9YsFAoFr7/0ErlG4zn3JIoiqaKIpbc3\nqYycjSRJI2f9GAGiUyXL2SQCOhN1P4JBHz09rQwPDxONBklJyb6gMnIas9nMyIidri4bWu0czOZM\nANzuMdo69pNv0FJVEGHIYsGsUiGXaem264nH45SozBCXkWbIwOoc5f1dTexp/AlzFs1BO9JF2GEn\nLzBEAAUyHKRqZBRlV9DX04IhtRIQicZiKORyDCkpVKQbaGupY8n1d6BWqwiHfXi9LsrKchFFEUmS\niMeD59Q4USgUtLS0sH/bNqyDg6RlZ3P9mjVUV1df1mc+0ag0GnzR8a9T5JSidTF8Ph/t7e0E/H5y\n8/IoLCy8qEtHkiQ2bdrEqFWOJiIixEOo1Qpi8QhyWTGjnn0U5FQQjUm0WPrQKNOpyqumsa2PlpOH\nCHpMmFVKDAYtRl0eHo8La8/7iJlZdPn8ZGpNpGt0mGMxnGNj1BQW0tXayp/8yQMcOHCUwcEeCgvl\nzJmzlI6OzlOpfFG0Wjlr1ixm8eJF07bw0dXGvn1gMsFZYQOfG7UaVq+GzZvh0UcnTrZrDY/Hw6FD\nR2hp6SUlRct1181jxowZl3x8MBhk8+aP2Lx5HxrN9Vite4hGdWi1atRqE7GYiN/vIRg0IpdLSJIS\niKFWp9DTEyYQCKDRBPB4nKSkmJHLFeTklBKL/Z709BkoFDrKyyvIzU1jy5YDzJw5k23bthEaHEQf\nj6OSy1FrtRQplQT8foZ8PvZ8+CGzZ89GEAT8fj///R//Qecnn2DW6yksLGRuZSVZwK733jvzrVZp\nNIQjkXH3F5UkFEolAwMDeL1e0tPTSZsm2u+Ux4wIgjAHyJAkqWWqZQHIzs4kEmkmGPSxa9eHBAJG\ntNpC7HYL9fX9fPLJLlatWnHR43t6LITDOjIzM5EkGBzow2uzIUa1DNtdnKivRwHIU1MJRaPYw5Cn\nM5BlMNBpHaapdz9uqxOloMYXirProw48Y82UqtzMlMuJht2EQlr8Kj3Dg0P0e+1Eh50srDq3ampV\n1QyCfi+9vYcwGBS0tu6ivHwuVVUzkCSJgYFWamoKzkkVazh+nB0vv8zM9HRmFxbi9Hj4+MUXCT34\nILXzr5wUseo5c3hr717yolEU8sQrHgiFsAG3VVVd8Jienh7effFFDOEwCuBIPE727Nnc/cADyOVy\nQqEQTqcTnU5HSkoK27Z9wvvvH0AQjQQFFRG/G7fPgyCIBCMQIUQ4EsCk03C4tZFso4nu/j6GHO04\nQyE0zMAT9KMVPAj6bILuMJqIQGVUwhONMuLoY1SbSoUmm66RERbNmoXfYsFsNlNTU0Fzcw+jo2p2\n7nydrq5RVqy4laKiUoJBH2+9lahRsmTJhYvctbW1Ub9/Pz63m+KqKhYsXozxvFULAokLAAAgAElE\nQVRUkkvn5ZcTLpovy/r18N57SWXki+JyuXj++Y14PAZMpnxcrgAbNnzAbbeNsGLFss88XpIkXnnl\nLZqbvRiNxcTjeQSDUZxOF3p9EKVSiUIhw+sdIhyW43A4CAbHCIXaSU2FlJRcWlp6+PrX7+a//uvX\n7N1rw+32M9TXgErKZYZeT5QwzoE+Cgvz8HpNbNz4Mnvee5c0uZxgKIRRknC53Sg1GuKxGDFg/9at\nRCMRZi1eTFdTE4M7drAyI4NBr5eje/bwyf793LJ6NaJSydDQEEVFRcyqreX3hw+Te1ZihS8YZDAS\nIbhnDxGrFa0o4o7HKV+wgFvuvBO5fGrVgSm9uiAIqcBPgAu2lXruuefO/H/VqlWsWrXqsstUVlZG\nXt5e9u37EJ/PQGpqGR6PA6NRy8KFK/j446PU1s696Mc7Ly+DSKQDAJdrDK/VRqpGg0urRSaLEJLL\n6RwcZDAYQZLLUGjDGLVmhj0uuvrrSA95mS3LJi6G8Pr7sHt16GSZeEJuIn43mkiMgfgAdlIJBdMJ\nRjLwBqFnUOC1HQdZUFmA3W6n2+3msb/4C0wmEy6Xi+bmNpqbBxkebiAe91Ndnc/dd992Ru54PM7e\nDz9kbnY2KdpE9YtUg4G5CgV7t2xhzty5yGSyxLm7u5EkiZKSkgvG0Ew1hYWFLLztNvZv2YJZkpCA\nMbmc1ffdR+pZhedOE4lEeO93v2OWXn+m2JAkSdQ3NHC0tJRoJELdtm2oolFCkkRmeTnHm63U1t7G\niRO/QtSmYvM40URjyGVxggyRovTjdR9myJaKOj5MkbKQ/qEeQuEociEDhTwLcCNTB/F6hkgTNUQk\nEa/bisvvJozE0Z4ACoWcgpoawqeyszweDxs3biEtrRalUs2hQ40IUjEff7ibZSsjpKWlYzSWsW1b\nHQsXLhjnstmzaxdHN2+mzGQiU6VicPduXjpyhK/+8R9ftn5JVzOBQMJFc17dqi/E7bfDn/85hMOg\nVH75811r7NtXh8djJD+/EgC93oTRmM7WrfuprZ1LSsr4FNizGRwcpL3dTlnZ9ZSW9nDy5CAgEg4P\nMjCgQafTUVychtvtxe/vIyXFRDTagF4vUly8lKNHG6ipmU8wGEQQtBQUzGbM6cFuGUBAj1qpwqTX\n4w8F2bV1KxDk6AcHMcshLIrYJAl7IIBKkggEgwwBgigijYwgGxpi98svM9rfT77BQEN/P+GxMYoA\nRzBIw65deNPSWDA4iF6vp7S0lDlr17J/xw7MkkQccCsUKFJSMI2NUX7KVR2Pxzl26BD709JYvnLl\n5Ryez2QqA1jlwEvA/5Ik6YLlQc9WRiYLuVzOY489xNGjf0s8rmBsrJ2cnDSqqxei0+mx2w0MDg5e\nVBlZvHgxRuPH2GwWnDYP0YCHbmsXStFLimwEly+GR9IgxbUYZAEMSg3DgWGcQy4KpQCpcgNaUUE0\nHkEdFokKLmRqMz5RT5fbxwylDnU0RCgikqbJYEwtkq2SIw/K+GBPF9bORgqNBrILCtj15pusfegh\namtrqa2txefz4XA40Ol04yZlr9dLxOMh5bxUY71Gg2Sz4fF4aG1pYd9773HaqLdXEFi0bh03LF9+\nOYbiS3HD8uVU19TQ09ODKIqUlpZe9GNksVhQBQKYMjKQJImRkRE6Oy3Yxtzsbv1/zCnIZWlFBSqF\ngng8zkf79tFpM3HzLauprZ3F7t170KrTiQtRvKFhMjW9rCk2MBQMMBYc5Ia8dHqcw/R6XWQaZkNg\ngEAsTKZWi0quR4o5MeijjHrG6AvJcIrZaAQjxAU2t7tYl+cho7+fmhUraGvrADLQaPS0tx1mqLOF\nHG01kTE7O199FXVqKjm5uUQVQwwNDZF/Vg0Dj8fDoY8/5vrCwjMWI4NOR1t/Pwf27OGWO+6YjKG5\nqnjjDVi4ECYiDCkrCyorE11816798ue71jh5spP09HNdynK5AklKfLMrKys/9Xin04kgJBYjS5bc\nQFfXBgYGYsTjIm53O5JkpqqqHKczQCTix2SqRKHQIIpqnM4xgsFGzOblvPfedrKyatHrTezdsYPq\nwnLa+jy0D4yysFJHJBrFbbGQmSFSlqKhNjub10ZHcbpclMtkaOVyRsJh3LEYBaEQpSoV3q4uOsbG\nyIhGCWRn02exMEOSUAgC+lgMS3c3w/39vP+rX1GXlUV6aSnr77+f2fPmYbFYkMlkmEwm3nr+ecrO\n+saLosjM3FyO7tnDshUrptS1O5WWkQeAhcD/d+oBfF+SpANTKM8ZdDodCxfOo7Iyi5SU1HNWl5IU\nQXmRZYvH48Hr9bJiRRUnTjgY6u9H8tspMkCGLsrcnELebBzFpDagMqgozszBpKlkx7GPkYsjaGJx\nIjEfUZSkaFQEIhEMMQmL145OJ0OjLaY94sURdBFQGAmlGClMzSEccCCqQeN1ojYbWbNuBampqfiC\nQT5+4w3KZ8xArVaj0+ku2gdHo9EQk8kIRyIoz8rMiMZiRAUBl8vF9k2bKNZq0arVZKemIkkSBz/4\ngOLS0mkZEGU2mzGbzZ+5XzQa5fQIt7d30NjYh1abgVymob1xL+neIB1qNSl6PTlpaVTm5fHJyTb8\nfj+rV99HNOCk9eBu4tEQFXkidy1aiEmt5nBHB0d8PhYsnIP9eCsxuxKZQoEuriAQaCcmFuIKKHGF\n/bhjI9QUptHnMWGW5RAOBvGGQqSmlLG/2UHtunyuW7aMjRs34XJFMRpdDDQdJFutQCbFUAR95Bu1\nROJxlNEwYtjJh++8wxNPP33mAzM0NIQhHj+jiJymICOD4ydPQlIZ+dz84hfw7W9P3PnuuSdRPC2p\njHx+1GoVoVAYtfrcb9ynfbPPxmAwIEmJEuoymZy0tEKMxhwcDht6vYfCwiw8nkGKi0WWLfsmH320\nGa/XjEIhA0aBOJs2vYdcnsqcOYkcbb/fj0rQoFQMMuLwEQhl0z80gCziJCfVSLqQglImY15BAdvd\nbnoFAVk0Sns8ToVazSyDgVyTiUyzGafXy6DNRkSr/f/Ze9Mgy67yTPdZez7zmPNUWVmTai6NJSGE\nLAaBEYMBmbERzbVN+7a5jW+4o319u23HdUSHHURHd1/7hruxAhBtSRYCJIQlNKDSrJKqpJqHrJwz\nT2aek3nmcZ893x9VFJIlhDAIYdDzK/NErL3OWStzn3ev7/vej4jrYigKEufNMwPPYzQSIWRZvG1k\nhPlcju/ccQe3fOELF3NCVlZW0IR4heAwNA2r03nT7QHezATWO4E736z5fxLj4/18/esPMTCwlYGB\nMZLJHhqNMrGYw+hLGtE1m02OHz/Bk48/w9q5U2zt7SGtqqSUNQTncPwKSSPKlcN9VEyH4cQE3ZrF\n+Mad6JEIeirFFusKIh2Z/naD/GKeTuDhBTIELk3PoUWVy3sm0DsBppslZ9bwVR1NgYWFwwR4CC2K\n7rpEwvGLpx4RwyDquuRyuZ+YxKWqKruvvprTTzzB7tFR5At+KadzOS655hru/da3mD10iCAexwkC\nLMPguv376dc0Jk+detPEiOu6TE5OcubMDLqusXv3JYy/pJztx9FqtThz5izVaoN0Ok4lCMjl8zz4\n9BGE2kNWMrE8Fw+Z06enWTt3inRvFiWb5bprrkERZZ5++gG6XZtm0yGqOFw/mGDrpkGSF9x4fcMg\nZNts2rqVDZs2UftfD+E36wzFksw2HWx7hpZlYRglDENQdQRGOIIeCiH8OGOhEGMDAyw7DdRQjP/6\nX7/G8nKV06eLHDt2jlGvyeb+DIfOnCVBiHDIwJcF8yunuOX9u+murrK6unpxbzRNw36VKinLcTDC\nr2ZM/xavxdQUTE7CB3+Odjyf+MR5r5G//uu3QjU/jnq9zrFjJ1hdLTEwkGHv3t0kk0muvnoPd9/9\nPL4/wdpaDtd1CYUMEgn3Fc3lXo2RkRFGRiKsrExjmiaGMUQkMoCue2zefAm+L0ilehBiActq09u7\nmaGhLL7v0WxmKBRUjhxp4vtLrK1FyaQiVAsFpFIJRUCxcpSHDy/QajUZ0UwmYvuRRIzlSoWoJLE7\nmyU+OEi1VqNZqbA3GiWuafgXkvF39fdzeGEBv16nJ5FAkmUWSyVM26YvHGa52WRyeZn3CsHGgQGe\nnp3lwe9/H6fTIZ5KsXnbNmxVpWvbGC/54ypUKgxt3PimV+G96Qmsv2wEQcBDDz3KE09MAikOH57E\ndQ+SyQg2bRrg93//sxc9HdbX17n11m+yvh5QOP4iGw2DYm2ZK67cScP1ea7RJJrZTrnr8tBMFUOu\nM7dmouhJ4o5DuNulurSEHgqRifSy1miihAaRbAcvaFN1Tea9BkPpHsJSCFd3KLbKBNEsQWcOc2mZ\nfqHiShL59jIN2WNtvsPCwsLFjrs+vObRm+d5NBoNDMPguhtuoNvt8szhw0QlibbvM3H55QyOjvLo\nN77BZfE4fRdOGqqmyRMHD7Jvzx4c26bZbHLsyBFy09PE02n2XHHF67oB/Cy4rssdd3ybM2eqxOOD\neF6X5577R975zl28613X/9hxKysrfPWr36bbjaPrMbrdOQqFGvcemcSqRIhHY5xeLdGRSxjlIptk\nlVDgM2RZlBYXebDTwXQ1OsUuodAwitJLrnOGF9QKSdfl0OQ8+VoFLarT6na556FH2dw/wObhOLNL\ngrOLebQgYFiP0o4Itm8dxysXWZ1fJKyrrLerkNnA/m37qLdaGHqEb33rAXbseBdjYxtotY6yuFhh\nZXWdiV3byUjL2IpM2zdx3Q6bBiLs3TTOyeVlWq3Wxc89MjICqRTr1Sq9F/bR932m1te58qfp7vYW\nANx6K9xyy89XNIyNwbZt5w3Q3jqoeiX5fJ5bb70bx8kQDic5ezbHk08e43d+52Ps27eXAwee5N57\nb0OIYYQAWV7nE594++sKPwgh+MxnPsp99z3Eww8/T73uI4SD53U4fXqVTsem3V5jYKBILFbE8ybI\nZkdYXp5mYeEMIyNj9PQM0O2eZHLyDEG5zLbhISrAUqnMYHgQ/Ay2aGH5DQq5PCOxEPlOB991ma7X\n6XddMvE4cVlmtV4niEYZvXCaLQORnh6C/n5OHz9Ov2kyqOv0qCpxTWOl06G1vs4jzz+P2enw4uQk\ntXyefdu2ke92Ofb44wzt2sWLR46wOZUiHomwXq2yaNt89D3veWM37nXwlhj5JywuLvLQQ0cZG9vP\n+HiYybNHmDn8MMryCmZngf/j5vtJjY7xoU99nGbLJQhGkIMcY9EEyVCM5cUz3Pm1b+CLNH1SlCAW\nIZscZi6X42T+BQxRoy88QCaRIKwbROwuLyydZnhTlIJIEYk7aJZNpVGnEo8Qjm8impBwkymcwCcy\nNkx4eYHk1LOkRBhEFtWXyXgOhcBkra7w8MPP8/73q8RTKbq6flEUvFR4hEIhjh8/wT33/IBisYGi\nCK6//jJ+8zffy7XXX0+9XieRSJBIJPiHr32N3Rs2sH7y5MV1SoVChCoVzuTz3DQwwN//7d8SbTTo\nSyZp5fN854UXeMfNN7P3n2tJ+To4c+YMZ87U2LjxCgB836PdjvHQQ4fYs2fHyxxMf0gQBHzzm/ej\n61vo6+u5sC7DPPfc86RH3s6aVMfTE0SNDaxPPsioEqIjPMKSIBYKIbpdjpybIrLvQ/zmTZ+gVCri\nuh7Z5Ac4fui73Hm6BV2dmLEBr1nA9Du86K2y2pDxu3WmV2axpChROYocNZmIOVhzRfZkMniaQqFd\nIxEapNla49ziNLmWRaP4AlkRMLO4gpbOYgxvYc+eDTyXf5q19bO8be9mUp5LLBrlxeU2XUXw7Qcf\npCwE+z3v4meXZZnf+sxn+M43vkFucRENqAMT+/e/ofv0q4htw223wVNP/fyv/alPwR13vCVGXo37\n7nsERdlIX995H6h0up9KpcB3v/sIn/zkh6jX4aab/hWdjoWqKmQyGRYWjnL27Fl27tz5smsVCgWe\ne+5FVlbWGRrqZf/+ywiCgJDsMt6jUMgt4fspWi2JdruLJMWwbYd6PY4kNUgkLBanvou5OsNOI47c\nslgqvUAg1SmtrCHZIQ62qnS9FrJk0aP102xVuWTrTsxGkzOlKfYO9+MFAYuShJlIsHFoiHQ0SsX3\naZfLnKjXqRUKeEs5qrbDxuvezud+/9/wV//hP1CfmqIvHKbRaDDT7RIKhQh1OiycOEFEVUkXiwS5\nHNrWrQwNDzPQ6XByepobPvMZjh08yGypxODmzfz2dde9oufNm8FbYuQlzMzM8Fd/9f9x7pzMuXMH\nSSZlROk4V/UPcSyXwyoucmmyl8nJRe7+L/+TFT/go5/8E7qdBvEAcrPHidgWTQsSmTR+u8nRlRzV\nto4sD6FLLWQ5x3JzhWQuymA2g+uV2D5ikC9VaZdrdCyHittCSW1gYtvbaTRWsOQ6w9d9CFXViUaT\nlL7yx/SmU3TMGJ4r8IOAntAIrl1lutEgm4pw54MHGN67nctvuIHv33sv6+UyC8tVZDkOODhOlUcf\nPkqr2UMsOUQsHubs2R9QLJb4vd/73MsSdDvNJhv6+7HrdRaWlkgZBkIIitUq49dfT35piXS7zaYL\n4atMPE6vZfHEffdxyfbtr+nt8bNw/Pg5ksnzCZpLS1OcPn0C2xY0GgVuu+1OvvjFV9rsr6+vU6k4\njIz8SKjU6yV0fRDf1xkYybC+cI5Io02mmadKAyWRIhELMdtuIykKQjUIR+L84KFvUSuuk+gdIpGM\n0fEHCckS2wYSmLbNfFcikurjio39rIdirNXLNKwCshSl7Xi43QpurcFEYFPxfVqWCnKIartBux1w\nrPoimYTE7liCED20Kg2s0hy1lWVC2y/jmvd9BmftOXqGB5g+epRSPk/bdbk+m8XsdEik0zzyzW+S\n+sIXGLjQkLG/v5/f/cM/ZGFhAdM06e/vp7e39w3Zn19l7rvv/AnGli0//2vffDP8yZ9AqwWv0kn+\n15ZWq0UuV2F09OWGLul0P4uL05w8eZIgSJLJ9LzMOC6ZHOXo0TMvEyPz8/N89avfRVGGiMc3cOJE\nmccf/zvifpF9PT28e8MGQs02f3PvA7jSFjKZCTxvif7+Hnp7t7Ow8CC6OsXutIbrj6IoKSyrjr1+\niqYWYiQcp+EFqIFLwddxvRhzZYESBCwXc2wdHqFQzPDE+johXWfZdfnIu9/DgSPTlFe6tKwk+foa\ncavBTPXM+XbQmQGu8AZ4/PFDbN67l3qtRjIWw0il8FZW8IOAfl2n4fucmp5lixHBzFd44J77+K1P\n3Ew8Hkctl4nH43zmd3/3Dd0rz/M4euQIJ55/Hsey2LpnD5fv3/+aY94SIxfI5/Pcdtv9eF4fyWSE\nWGyQxbln6G+sU+mC0+iQ7ushEU2ScGwWmy2q+SWevvu/EYRjNJeniXbqGJJMtd1gONTECxnE1DRr\nLQ1dDWgHsHdsJxuHRlksHGbHhijRUJa/ufseYi2frJvGcSAhJjAtj24HarU09foZvvvd/5d9+24g\nlUoh+S2MaBzHCWE5Dogwlge+UOgqfZy2PRSyLB+Z49TBF9g02Eu11KbsyyRHtxNoGQ498zCOM8rE\n4AaKxQarKy1iiQi33nofN930npcp5Q2XXEL+ySfZvW8fawMDFJaXcV2XeDbLp2+5hTu/8hWu+idf\naCFdJ+S65PP5iyGjnzeKIuP7Pvn8PC+8cJJEYgeRSBjPy3H2bId7772fj3/8Iy8bEwQBP0yb6Hbb\nmGYb2+4ixPlmZxGxzqhbQieKpMRI2GV0ucvYxp0MJxII4LGnnyX37MMMa72EPcHa1BTPdUoE4e3U\nRZsTroMmy2iRFKZl8szUGWrNMu1WG8MfxCaMKnQMqZfZxguEgzYrbQlH9BOOZokZ0GzM0jErZEUE\nSUmxWMqhx9IQxMnKEebOTFJvVbn5Y28n3dvL1i1bOPQ/b0XpatwztcauzSO8/7LLaFsWzz72GB/9\n1KcuroGqqj+VEdRbvJK/+zv4vd97Y67d0wNXX31e8Lxk237tOd/gLSAIgpeFXc7/HlzIeXhlOEYI\nCc/7Ua6U67r8xV/8F44ezQEyGzdu48orr6VcUGg3C3zwQuPRt+3czgMHT7NQbpPJgGvHsWttCtUX\naTU7dESeeKqPgl1hdfUcVqdOGI2iabOGjO70IUkuspNDFv043X66IozZGeLFc7OMDwo+dOONCODb\nhw7x1Mkc5WaWtu9SqU2z0RNkAw2BQsWV6ZRqPPfIvXjeh7juuu2cmJtDchz6+vvZfOmlvPDYYyy3\nWpTbFpaI4AUxrI5Crd3iwIFnef/73/mKtXs1bNtmenqaWrVKtqeHiYmJn8qDJAgCvvftb7N25Aib\nentRFIXcE08wfeq1DdbfEiMXeP75I6jqMBMTCvn8YWCAkBHCLQWsFVaRJRtNjXNwrcRSXaLl64SD\nQdZyOS7fvofDxRwDwHg0SUOXOVWcRhraTa3i4ephdDWBrzSZWi7TtWqoqkrbbHPw2aexSm2G5GGa\nVhUvSBEKZ7AaRaaOPIhPHzG9l9bCPI8s/S8mdu2kJSTOFtdImipaoOJLbWzCFJCxHZvB4XeynnuW\neGuVjZE4S88fo4HPRHaQ1ZPP0rKgXm+iGztZKdZARNAROLbBwkKXP/3T/4c/+7P/m+HhYYQQXH7V\nVdx+9CjTq6sMZTKokQiz5TLXvf3t9PX1oWkajue9rAoHwAuCN9RI59JLd3DixA9YXCwTiUygqmE8\nz0GSLHbuvIYTJ47w7ndXsG2bQqGAruts2LCBSMTn8LPfwyouYwCdIGBtdZWxjVcQ6bbZeMk25ucn\nEXILW1cZ1FRmCgWSoRDHCgVMBCPI6F2BKing6niey1Rjhv74ED2eh2nZ1KwOa16DUaPJoKdge734\nTosWJfLE0EjikmKVOkOej6+7mKbA8VtUghCqvgvfWWSpVqPsa1ADVY2y0l3Fo0omKKPNDbE8N8cP\njp/G98bYuWMPqmYwuTDJka/dw/W7R5EqlZeJkbf42ZifhyNH4LvffePm+PSn4fbb3xIjLyUcDrN1\n6xDz8wv09/8oSX19fZHNmwfYtm0b99//PI5jo6o/SuSpVpe48carLv7+B3/wf3L33S+gaeddTRcX\nj3L8+GFS0SSjcZtGp0MiEsHQNMYHUqzWYni2ilKvMBCJ07XaeIpNZSVH2Q4QwsA0AwIvi6wrqFab\nWtcjKZUZcWx6SOIGIdp+kXnfJDA1uq7NysIMP3jCp3d0lKLt8vzhc2SlMKbTIe6uMSx0RGCgCJ8E\nMvOeg1Pt8MTD95KfSjGejbNSLJJuNpEMAzeRQEmlia7LOFJAxa7SLxuYZoelpSpnZ2YIBgZeMyRT\nLpf55le/ilqrEZEkTnkezwwNcfNnP/sTfVp+yMrKCrljx7h6fPyi8LlkZISTi4uvOe4tMXKB1dUS\n0egwkUiC8fFF5ueP4wmJYreFLLWI6R6zbYuOO4TjtRmLZfBci3JQ44kTT7FTkuh0W8x1AgYG+9ml\n6DxZWMB0N+CKeazSMwRmncAS1MpFOkGdWyfbpHWdwDJwVJB8CcttYZrrBG4H4ZUZzGzCoU696xNT\nL+H4wSppI0rDVJBwGZZDmK5DSVSxQ0OAw/LCGZLlc+wdHiUajtByFAY1n5VmjY4dUA76cO0EbXsa\nPzJGNtmDaXUolYoIyebQoSpf/vLfc9112/mt37qJeDzOp77wBQ4fPMjk6dOEYjGuvfFGdu3aBcDu\nq6/m3IMPsvclJyDr1SoilXpDY5Fbtmzh6qtnefrpJ4nHM3S7BYKgxb59m4lEIpRKYb5z1110lpdJ\nCoEVBDwaiWAoCu7sM4zGhjG0MJ1uk65cZXnpCWrFGvONNp6XQJZ7aHlhzhQrtK0iVjaLvmUL2aqJ\nVISuvUzHl1ECj35DsNRaQ3XjBJLAdQMkN04sWCHlyPiA5HqoCELoNOkCPhpx6oRR0ejxbGrBMmuE\nqYhRJClMy/dxTUE6sgvP7YKwaLoZYobHJSMbaK6vMze7Qmu+TEka4IyzRK5aQ/YjeEEP3370DK7y\nHGo4zI0f+hC7du9+w8Jmvy7ceut5l9QLRVNvCB/+MPzBH8D6OrwVRfsRN930br761btYXKyhqnEc\np0Eq5fCBD3ycdDrN+953Fd///mFUtR9F0Wi3C+zYkb4Yonn66af53vdOEolch2EM4romtdp5QbJm\nlClGNO5/9ggfu34/mqryvv17eer0I5TzPuOxLMvFWebWZnD8AAONU9UlUrEMY2P7KK2XaTTXaAgX\nVdHpxUURKrYnIXAIBx3GWCVo1OkjQNh1jh49SqzZ5PCRU/S5CQbVBOu+TwaBHLTwCBN4BooaIeE3\nWLEaBLhEVlvc8qk/ZHp5mZm5OQ7Nz5Pauxf7xBkIIoz09LFYnKLeKSHpMSp1k6cWc/zZl76E67rM\nzMzgui7Dw8MvC8k/8O1vM2BZjLzEOOfcygqPP/IIH/jIR16xH69GPp8nJUmvOIEZ+Ammim+JkQuM\njPRy5EiZaDTJnj3XMDS0wvLyAqdaYQayCdZOnmW15REIGVtS0CWJGgH9A+PkFubY1ZNkpegQi4Ro\nNZsMjqaIrxdx5HOkfINau0vSAU3rwwskUkqYRnueptOk7mlkPEEgEghh4toOrmgjJAnVmsXorhNF\np2bOo/o6pmeQ8McpsIQfeMhKhIhsEPfyIHoJNRcZ1UPUyyVajSaB6+G4Ft1Wm6bWSzg8yGBUMFPz\n6Dp1bKdKrWnieEUkuUit5DEzvYIQKps3n2LPnvOOs+9673t513vf+4q1u+rqq1ldXOS5yUkSQBfo\nRiJ89LOfvXC0+uq0Wi0ajQaJROLH+p+8FoVCAU14jPcrNLozbJjYx+joDqLR8/1+CvlJeto+b9u2\n7eL7yJdK/P2DD/I7N93I+nqRdrsLhNG9GFqrxanmOuluP5LQUSWFIAhR7hqU4xJf/NznuP/+x1hY\nXGNcjCLLCqZVJyJDIhJF7ywTsmcIeVFaVpeWCND9NsJ1SUsKnSCKTJQAiXvETYMAACAASURBVDAm\nJh4eDkmi+OisEaMYdFlnEM3YRMAZLC2OZ0UJOV1EYGEHdUDCsRNMnX4BbzlBx1eIBWFKQYelSptu\nO8pQOknbqiGbDS5JZ5i653tkLYuThw/zyc9/HuON/Cb9FcZx4KtfhQMH3th5otHzjqzf/OZ5UfIW\n50mlUnzxi59namqKYrFMNnsJW7ZsuSiwr732GjZsGOXkybNYlsO2bdezefPmi2Wrt912J6aZRQib\nbjeP41TwPBUhtuE4eVw/xSMvLhAJS7z7sssQQvAbv7GVU0eXyNeLLORzaF4fvaqH7ankRBm72Uao\na0RTcRZthWhqG5HCHGq3iiaiBAIUCVSvTFoIGpJESpJQJInJpknt1Fm2dTtEZZV1a5F2YJHFO/8A\ng4aMhOd6+CjUhEDtmkS889bvm4aG2Do6SrXVYtkweGA2x7pVpt/UkCIZipEMsfRmqo0cH33XdRQK\nBb7+N39DryzTbbWYKxYZ37ePD//2b5NMJqksLbH9JdYVABP9/Txz7BjOBz7wurqDG4bxqhYCpmW9\n5ri3xMgFrrrqMg4fvoNqNUoq1Uc63Y9ltfjUv/4kV1y+i7/+8peZvv8oitfE92yWbIu+4X4828FQ\nQzStDlFVJSxJ0O1y9PRpih585O0fY2lxitP1WTKeQr2zjCVieCIg5MvUfYcWGmVhkfFDBAR0abAa\nNOgRGj3dLu3ARPPCxHFxqdPx+shgUCdOJKiSCifxdY+o0FkTCkM9WdJOCLuyil2uEeDje108T6dE\nQEwShGQfXW3hegbl+jM0ux4SJj1aLzGnh+VTK+SXphgZMdizZ89rrp2qqtz8mc+Qy+VYW1sjHA6z\nadOmH/sE7jgODz74Aw4dmkSIMNBh//6frsvY2bNnefj22xlUVW7cOMyjTx2l4DcZHf0kjmOzsnKG\nqNRm58gWOhcMfaLRKDHDIG7btF2XzZs3sVws8viBAwxpGnFdpyEn8X2HuGuDCZ5wqeBTWrb437/w\nfxGO7SCQ4xQbi0TlDEIYtLwGS9Yqm2WJqzM6SizEYqFJnxUwZ7vI+PSgUKRFFwkXHRvwMdHooBNC\noY0e6HQCBUW4ePYU4bjAMELU6yHybgPhd9FCPYSsGqJbB7eDFEoykE7R6rTwfYmKDbpsUGhWUZ05\n9iXTbOkfZ6U8RZ9hUMnlOHbkCPuvueanWu+3OM8//iNs3gyXXPLGz/XpT8Nf/MVbYuSHBEHA8vIy\nxWKRSCTCtdde86pfjsPDwy9zHv4hR48e49ixBXw/STw+TLW6iGm20bQJgsBG0wK0iMxyOc7XH5zn\nqTN5YlGPq/buZl4u0SrNMSiPMxg20AhodVWmPIk5OWC902B8aDPR1A58q0TYLKKYM+hKgJAEllMn\nE9g0CPB9D1eysUUfSWsN3VVIC52w3yEdwDQ+TXwCVCJ0MYhhBV1ydDHdISRpDavb4Rtfv41UJs3E\n8DBaPM6MUIn1jXB6vo4fHyadmWCDHqNWW0RRm9xzz6N8+S/vIK5qJLQO+1KC7ek0p+6/n28sLrL1\nbW/j1R4dZUkC38f3/de1T5s2beIxw6DabJK6ENqxHYeldvs1x70lRi6QTCZ53/uu4sCB51hYmERR\nBPv2beY97/kNJEliaNNOoskcqujHth0ss0qrWsNXXFJhWGo0uWJoiEg4TKlSZX01j+T6zJw7garr\nBIGDLemE/SQIB1noeMjYdOhFZSVwqdLCw6aLTSB1ifktPLoEfgQbHWgTxcLBQpHC6L5DmgCz3cF3\nAmzdY2xY4OkylY6J7/p4XhvFM9FkhUU/QFPHsLotmpJDPNZLo92k4xRQkNicvpyYlgUsBgYmmCue\n5eDTz8If/eS7oRCC0dHRlxnC/Th+8IPHOXhwhdHRtyFJMp7n8tRTx1/3Xrmuy6P33MPenh6ioRAA\n8VCIxw6+yAvP/D1bdu3huut28syDJ3nmwAF010WRZYRhsGX3biRZptZscmhygcVVh0IuypzqUgla\ndB2Fqt9LKlhDFT6IFCpJ4sFpbNOmbge4joZDhF7PQRUqzUDGwmG7JgiFw8gEJHQFyekyFQSUkBj3\nJbIIyqyzQogOEj20iTGII8KEVJfhvhiL66vEFAslsPC8zQgxiCxXkNQIgaehCImOUychVUkrYSql\nKqoSomlXcJQRVCOGJmVw3TnGFJ8N/UMECNpdi6efPowvqTyXKzA4PPy69uql1Ot1Dh8+wpkzc3S7\nLQYGeti1aztbt24ldGEfftW59VZ4gwsRLvLud8PnPgezszAx8YuZ85cV27a56657OXt2HSESgEkq\ndYBbbvnYq5bwvxTP8zh69Cj/+T//LaFQFljGdZuEQhEsywY8JClPPK6TSF5ONCYwzdOEYkmMIM/O\nbJaZTAZvagbTk1CEoGo51FyHkDBoo9DwTPIVHc/NMT6mo4V9iKiMREJoQmG1WcLptKkIiaTwiWob\nqHkmGaFiSTq2sEj5LjoBowTMErCMhEGAoEkRgckABikaLHOunaVXDLC22mGlnsM125RSW+jPRvEa\nMvn649RKR4j5LmHZJ18o0up7B0l9I77ZYTa/SGl9gbErE+xIp+k4DsVTp2grCuVGg0w8fnH9Vi6U\nAL/e8G4oFOJDn/0s991+O3q1igw0JImrP/hB+Mu//LHj3hIjwMmTp7jnngM4jo7vG0QiFp/4xAeY\nuHAHePbZgxw7XOC6nddwfGoa2Y7iqUkWWgsIf4Fr+3VSIyOc8X3M3Ar5ZoeykmBY0SgWbURQp+Za\nxAkRZo0E4PoSdSxa6KQQjGJQQcImjoJCIE/hBSaOa6BjYCMQJBB42Jis+wZZ6kToUvW6VF2Zjf19\nbI6pHKyus9BqE+qY6IGHHO2h5rt0tTQhOUXdNZFjSS698p2srJyiVuvQyTcJSVGEsMlk0wghSGhJ\naqWVn+taW5bFc8+dZnh4P5J0/uhUlhWGhl7/ycja2hpyp0P0JU36BgcH+fiH+3h0YYF/9b99lG9+\n8x957PFDjFdLbErEyQz1ElYUTj73HPLAAE+cncHu9DOQ2URt5QQ2gnh8gPbSAZQgTky+hMDzCYC2\nX0WnRTiQaNlzxMQECS1D1S3g08GSsyREG0c3mfM8wq6LLSksWYK2b1DEoEODQRzaCHwMNhAhTwtZ\nctCogy5YDqr09upEG+f3YsE6S8Otk0hEKK1XSCi9SF4TmwqIKm3HxbdtOmvgxuJI9jq269G2NKJa\ng0DyqNRbtO0iHQfS6W1YgUfRFHzlK9/h93//5tftnFutVvkf/+MOarUwk5MF6vUurnuOzZsn2bKl\nj89//mP09fX9VH8L/9IoFuGZZ843xvtFoKrnuwHfcQf8p//0i5nzl5Wnn36Ws2ebbNhw9cXXSqUV\n7rrre/zbf/uvf2yFiO/73H33vRw8uES9PkwmM0wk8i3q9SdR1QF838T3c6RSDrHYTjQtQbW6SkhX\n6DfCGOpGTi/k6UunaSfjnCnVabUTyHICXU/i2F3anSIea/iteTK0qa36yEZAzNCoxxX8doemJlF1\nJIaDgLLr0LIc6lhkZB1JimJLTboBqIGHjiCNzzo+VaLU0fHJoiDoModKGl0fJaZFkOwwC9UOHeEy\nbofYNTDKxniWp178PpniFONDE1TaTQbjm7BbDpXWEhkjSa+apW61eeTEOd67ZxuObTMeDtMeGeHs\nwgL9rRbxUIhKu01Z0/j4q4TnX4uxsTH+zb//9ywtLeG6LkNDQ0R/Qp36jw/ov8EIIQaEEEeEEKYQ\n4k17H/l8nn/4hx+QTO5lZORyxsauRNMu4c4778c0TQBOnpzB7vpY3SZhr0lftEx/vMGmXp3tI1Es\nSWLn0BA7evvQ1SiSkWXr0OU0NZWwKpMQIUBhjRoKNiIQCBxcPOIEeDTwgFEEEbr45DCdNoVA0MbD\nFW081umwRhkTlxYWs1jYTAqFNSMM/b1sHB9jviGjdQYYSV9ON7SNKa+fST9N5JKPoEQNGt0lFOHg\ndlY4duQu4vEaV111DYYuoSgtYnGdwPdotSuomk1vT+Y11++n5XzIREFRXn68qmmvP4dBURS8V3nd\n8310w+DOO7/HyopgW89GvMww657G5PwqS8UiR1dWKXky665B10hzfGWFs9Uyk6trzCxU8fwInj9L\nw52mERRp+fPInGUYh7BvIwc1QEaSFBRZJaIq6LKD0CN0JZloXx8Fz2Ou7iP0Xgylj6y2FYtNrGMQ\nJoROhCI2FTFALhSj3hemHFNJDKT48MQobxvawP7RMW4YGuaKwSxb+sfpT4VwvCnSyjK9SpGk26Xs\ndlkRDjNOC0cK4wgDnHUiYpaN8Ti2olIq5SiUF9iy7XIMI0yh02R8xzWEQuM8/vjB173mTzzxLJaV\npVbz8f0sw8P7GB5+O8WihW338e1vf/91X+tfKnffDb/5m79Y748fVtW8Sgj+14qDB08wOLjtZa9l\ns0MUCh3W11+1zyoACwsLnDixxsTEleh6mHA4xXXXfZ6JiWEGBlokErPoegFNk1hbW2Jm5jDl8jRd\n0+X0/BqleotWx2bH5s1gGAxETBzJRNNUNE2l4hXxMQkJn8s1h3fEsuxU02xyAxqmSW61gGFkGE4O\nYjoec26AQ4DpNGk4FvO2Rdtq4UsyDUliTficxqWDYBsdHAJ0xjCIYdBFRsf3PVLpBE1VJe95dI0+\nhNSL8FssrE5i0yYmuwwKQcKrogYuTkdC6ToQmEgCZEkQkuO4ruD08jIDAwP4vs9Afz+f/uIX6bnm\nGpoDA4zccAO3fPGL/6wHDVVVmZiYYOvWrT9RiMCbezJSAW4A7nkT3wNHjpxA0wbR9TCVSgHb7hKL\npanVopw7d45du3Zx/OghKrPPINsOfUiYWgg9PoQsJ4knPYJwmAeWl4kFEus+6LExkqEUZ+QwSiBI\neIKwCIgGGgVkfBwiSGSJoVCjSoEOTWoYtHBII5MAPBzWyOMFKj0EWLgMoaIhMICqHCETSuJoCjdc\nug1Z05k+u4ZZb+MJh4ar0pXGke0o86ceZX/fMIutJZa7NQIvS9AQnDlZYHVhmba7Sq4eILfzRKMh\nJoaHaNtttu7aycGDzzE+voH+/v6feb1jsRiGEdDtdjCMH/VDMc3Wa4x6Ob29vRi9vRQqFfpf0n14\nanWVzPg40/M+hmEgVJ1Nmy9ltZxnZnWGZ+eL9AztJeRtpNHMsd6t0inN4bldgiBAshtoAThECTFP\nDJ8EATEENRysQGFYdlj25+nYc/QFLhHJx6fLgl1jWrQJvfgijgddX2NZsukGo6Q8B5cQBRI0KNLC\nxKIHTUSQrBVCbY9o0KY0XeRQMsmwiOA16tRsE8XxsFWboewwpfI8tl0m4vmYgWAwsBkHTgc+K5ZO\nPLmBAdskmtTosowSUVmr15DbGqfm5zheKtK/8zL2DmwkCHzm559/3Wt+6tQs6fQ+Dh9+hmRyEwCK\nohMEERRFZXW1TqVSeUU36F8l7rgD/viPf7FzXnXVefOzqanzHX1/HQmCAMuyX/EAAyCEgm3bP3bs\n3NwimpbFMAxGRrIsL6+TSvWzYcPVJJM1zp416HbjBEGI2dkpHMcgGvVBGsaxHc4u5dg8HGLz8DCx\njRuZffYF+kI1Gn6TgmnRAeLhS5HMp1hxNBY9DzuwEL7PmGQRBHCumGPelJHYiA8YdGj6JgYS0EYL\n6lh2gI9LSwh8SSHpB5RQkIiTpoBOhT4EDhoVPGaXX2DP+DWYqRQdU8aq5cD20YIm6ys+5coS8ZCg\nWa0i6WE8v4jihlEUmXbgIXseptdAV1yWHYfxwUGOFwpcuX072WyWG94Ee/g3s1GeBVhvZstigGq1\nSRDA44//I/W6hxAGllXGMFzGxwX//b9/hRcfOEDWipzPFZDaDKoBS5V5qprLjbv3snXDBmaCgKfv\nexDX1qg3q5yqNQCFlgr5bh1XkenzOuhBEiPQUAAbEw2TPqCXNpO0SaMQR2UIBdAAjSlcVpHZQ0CS\ngBodYijYXptTHZt0qJeGJYjLAasd6Hhp5CBETIvgUqTj1Il6Hfxaka7nklF3kIhm8YFVs0WjUUGo\ngyhaP3gR2l2JF2YmSffYmNb7eOCBWTzved7xjh285z3v/JnaTCuKwrvedRX33PM8fX07iEQStFo1\n1tdPv+5rCCH4wMc/zre+/nXyi4uEJYm67xMbH2fTzp1Mz08RiSQoETCi6mzs38BSpU5au4Te1BiJ\nsc3YgcUTjx0iJA2RDm1HjWnU20tEWKEpNLq2wnDgEUXQwaYBdLEJeza6sBjwQyREhIgvaAc2G+kQ\nC4fIqCrHa200wqQ86GIRFmEMBD5hiqi0SSOIIfmzjPkdNlk6fakwitVldn2dI67PpYksY8keirUG\nJ1rLJDIeOgWq3Qw+vdgErNEkxioxP4TvdggFLm6nw6IZoMb62TyUYMXMMddaoze6hdHxSynWuhw9\neohMJsbg4OuvYAqFNGzbuvCE/lLDKRdZVhBCIvgVfnxfWIBz5+DGG3+x8wpx3hb+e9/79RUjQgh2\n7Jjg3Lkc/f0baDabVCoVHMdG05o/9ql9bW2NSqVIu10HYNeu7dj2cdbWZmm1ylSrk4yO7iWbHeGp\npw7j+yaq6uF5IQIklis5+uJV5pfb/O0d3+H4YofV8ChhKUYoFCYZ76Uy3cS0CyjBAF16cLw4JiYy\niyz7NhEq0HIR9GITpkfR8V0HjzWyrDFMQEgI2oHLEhALwApghRhLgIKOTJFLUBBINBEIQnSsKs/O\nHmNwdBedylFG3TYbI/1orsBqVhhyW4Q9nX3RJI6isFJepOyAGpog1dPH4uoZCObp60nhJ5McX1tj\n+/XX/9R5ZD9P/sXnjARBwPT0NMePn8X3A3bv3sqWLVtedwfC8fEhbr/9NlR1B6nUALncKuVyQLn8\nHMePP4XdEuySIhhI1F2brmdQMlfJBw5tLcX9z+S4/4VZ+jcOs2RGWG8WcOwG/QT0CkGPEcdFkBdd\noiGJSrvCHGEMApI0CeOT5nw5rAXECBjFRgJkQmjIbCDAxiMgRSBKqEBZSChBgCZsYtEUp07P4dQW\nkZpZDDwsIrT9HmQ0JH+Frl9gplWlzQDpkIEky9htE4FH1/FIRUe5dtcm1ms1Cq0WUmQfg4MG4+N7\nkSQJz3N5/PFDbNo0fjGX5qfF931M0+Tyyy9D0zQOHHiOpaUWmUycT3/6Bv7qr17/tfr6+vidL32J\nmZkZmo0GvX19bNiwgbNnzzI/9TSKq7NayuM3q4z1jVFumPT3xqn4/vlutrMzeJZC3XUInDKaJIFw\nSOiDNFvHMQOFSXwS+CSQ2IBKFpc6FlrgksbEl7pUPR8hC/p8lWKnS0OYZBGYSGRRmKFJEEToImPS\nRGEEjSEC2iRwGCaE7EvUKjUMPAZsmyUEjXoVxXFpdNrEAhDOApraT+AkCCHoBTQyLBJgUyNoN/C1\nJsWOjSynsR2bNbnCQHIfpdYL1Jodhn2ftbV5ZmdbZLMaV145yOTkJNu2bfuJ63311Xu4776ThMMK\nZ868gO8LhLDo6zMRQiKd1n+lT0Xuugs++tHzeRy/aD7wAfjyl+GP/ugXP/cvCzfccC3T03fy5JPn\nKBQsHCfAcVbYuTPNuXNT7Nr1I6v3drvNXXfdy+xsmW4Xnn/+IM1mk717r+Xqq6+gXC6xuHiQnp79\nrK3FmZ4u4XkZUqkrgYBm8zSNRo5YupfVEsw9fQRJZIgkdpBMx7CsOp4+ht1pIyjguBaCDCYhBBEk\nVFQmgDUa6HiUiJHAwiVwHUrYpPAunLw6JAMISxIZP2COABOfPDYgI9FBRtBGwgAMfNrUMADdq5Fb\nPEVc5MnGNCpeg6BZp9suMBy4zHcDAiEYjsW4rsfmO6vnMH2bdrfAVTujfPDKjzCdzzO0fz8f/MhH\nXrUC6RfJL7UY+fM///OLP19//fVcf/31dDodcrkccL4L6cMPH+D55xeJRocRQuLYscfZu3eSm2/+\n0Gt6XPyQgYE+bLuLLKusrKxSLLaRJNC0QdrNAnHXwNBM+uNpNL9MudVgOZBRxSB9IopcU1j1k8zl\nS8RFBMVPEg/OsBXQAwmzU6AkzldXPN8OcMigECaDxDwp0uTJ0mUNSAEGAVECLGQEoOOj4hPCw8fH\nDEDgkxYgC5mCJPDy01iORZ/nEqDSIkYTj7bbIMDCwCTGRiwvoBa4hD0XxWlhBx00Q0e3QlimR36l\njuXatM02fT2X4PtNHMdC10PIskI4PMyxY2f+WWLk6JEjHHzkEexWCyUU4rJ3vIMvfel3CX4Gl1Zd\n19mx40eJr9VqlQP33suEX8VqKQz2DnNyZYbJSoGi49KbTVMtVxCVCnFZpz/SR71ZgG6RcDRJQtM5\nV7ZxvR4iZFBxMEUTNcijqiqKUyGNjCMChgPBouchk8TzAjpUsYAgkIni0qbOGnHW0XFRkSgTYCGQ\nEIBEBgWBBvhOCwMXV/KxAJmAsm3jBQ0qeAwJmafX1+kGW+hHBQK6F7Jm0sQoUiISGKxX69SDML2a\niusUWVtuYScjSCKL25nn8MElQtF9GKEE+/ZdyqZNm7j99gf5d/8uS/YlycCvxpVXXsHhw0d59NEX\nqdfDCHE+x6hcVpidPcB//I9f/JlOzH7Z+e534SW3o18oN9wAn/wkVKtwodnyrx09PT28//1v59Sp\nr9HTkyUWizE4uJtarcif/ul/45ZbPszQUB8vvniW73//MWw7xJVXXs/WrRMYxjCPP/4AnU6F0dFN\nQJUrrhjjscde4Nlna6hqH6bpIYRA00aBNo5TptEwaHQSILLo+mW0rS4hR0NRZIrFF3Gd88FXmxQu\nUTroyJgI2rh4dLBQiOCxgkeVFiod+vGwiVBAxyOFwCDA9300wEPCQkEhSwB4FNDQ0Qnh4dHARKMB\nCAJkMnIDw7cJazpdT8ZsO4SEjhtIBFLA8XyeyVKJcCTC6OgA7775AwS1GqVSnbsOn6N3bJTrL9n5\nioqkXC7HzLlzAGzauvUN78AOvzxi5FXvYn/+T/77jx07zj33PIbnnU+GabdztFpw2WU3XRQemcwg\nx44d4tJLZ19X/w1Jkrj00stptXQOHDiKYWRIpzN4Xh/tapdkZBPr7VNE/DqSMJDkJoGbxhch2rZE\nw5NIRnpptVRMqUAMizQ6McLYqMxi4gRR/n/y3jRKrvO87/y9d6996a7eV+wgAQIkwX0TBYmylphj\nK7ST2PKS5MxxjhN78iVjz3yanDMzJ2fOaOIkYzlxbDmW4sgKZdESJUILCRIEFxAAAZLYG0Cj9+6q\n7tpv1V3fdz5UCxKGlERTliFK/3PqS9Xtum/f9956n/f//J//U/UcDGIcPFws5hhAZw2TImdYZjfg\nAitImpiYGCgMXCI6KFx0+lHEGASY5KXPOoJ+Cf0hNITo0XxAmxXaGJh0EeTIMIRNjKNimsql3CkT\neGk6tsFIeoBq6xoD2TFqrTpmHKJ1fa6dO8nEzhQXL56mXC7jOA7FYp4g+MG6EaUUy8vL1Ot1stks\nY2NjnD51iqNf/CK3DQ+TLhTo+j6nvvpVwjDk4Q984IfO0bvFq0ePknVd7vrgB5ibm+PKlUUOjPaz\nbMP2TJG5c3NQ65JIJFhtVHEDkz5T0a8J6iqk1o0JY4kt0mgqpkNIR2VpksILawR43G8IGtKnpsDA\nwUfhYRFi4KETEAMJBglYYgPIYnEZhU4HmwgHnUEkDh2SeCgc2jQJSUkNE4iBs4QUQ50pzaRt2Sil\n0ElgXlcTBZsyN4VGiIvCxSAWDitxG5sqRA6dRovt2/fR6LQxjVEkGZyiwfbtW0kk0ggxyBtvnOHg\nwR88Dz2m0eKjH/15PM+nWm0ADomERS5Xvt6I76cRa2tw7hw88sjNOX8i0Tv3oUO9oORnFVeuLLBn\nzwcZGBin3a5z9Oiz+H6OTmeSz3/+RVZWFjlw4IMEwU6SyQGOHXuDO+4ImJrazeOP/zLLyy/wxBO3\nU6vVOXToLXbvfoxXX/08ljVIrbaMrvusV14mjhcJQh2lDJRqYhjg+/NIGbGy0iKdThAEPpZsMFXM\nMLPu0Q4lHl2gjiCPgUIjIkOHcSJSuNhABQuTFJIuJoIADQeJhqRMjx0fRpBDwyXa3FiuUadBb2tq\n4VOgS0hHJrF8j5TuoeptErZBPlsk9mJanoeQPntTCbqAcBxWUik++Su/whe+8DRSpPjgXbtxHJvn\nn7/M/PwKv/mb/xAhBM9+4xucP3KEgU0a8Mxzz7H7kUc4+NhjP9YNx00LRoQQBnAI2Ad8Qwjxvyil\nXvt+x5fLZZ588nkGBw9g2z1Pg9On21y6NM8tt3SvO3gKIUgmhzl3buZdBSMDAwMkEiHDw1PMz1fI\n57dQq63TaJzC0mM64RoxDspv0KelaURtXArkgZyA5ciAjosMXSJWsGjTwiDG5BoakmlGMdhAx6CA\nzzpZFulQQDJMlS4ZoAhEwBopFJIJQgQ6awSsYFMlR5sa/fikSXKSFhEJ0ujo+BRUyAaCRSQpMpi0\nCNAx2YmGJEbSVR4ZobGiVlHaICrOc3XlNI5ZIfQlXjBAXWmkMnnCxgzV2Q4XktsoFrfTbnvMzBxj\n794Hv++19DyPv/zLp7h0aR0h0ijlMjGRplOe547h4eueIAnb5vbxcY698AL33Hff2+rXm80mrVaL\nfD7/rpxZlVIcP36S//iZz5OLDY6fX+SunWPsvXMPzU6H4No1VmurRF6VlYYLcZGkEREG54itScqx\noNwp48UeJuvkRY51dIQaxBaDhKpKGxOHDEtyli0q5jgGEkiQpQG06cchZJk2A/i00VjGIcEQGiZV\nKnTIIkgSs4JCp84El7nAJBGjCAwMVoABEoygOEPAhMxw3mvjME2LOj55FBYaOpKAiBpZQipU8ZWJ\nrrVBGUiVwdAjtBgqq9ewMiEpp0A3iPE6dYIgQEqJ46So11s/9Bp7nkel0mJi4va3fbaw8Cr1ev2H\n+j28X/G1r8Fjj8HNdNF/7DF49tmf7WDE8wIMo/cbcv78aaJoiEJhJ15xYgAAIABJREFUDClXqFZX\n6et7kJmZFYRIkkjkMYxbOHfuLcbHt5PLFZibU7z88imefPIQqdQ4e/cWuO22vVy6dJFEImRt7QwG\nOpY2iiddoAb0qsekjBAiTRSl0fUtBEGbMK6w3MrSCT1iWkAA7NzcHqxjMESbi9TQaNEihUU/Vcq0\n8YjooFGn97vfBZaBfnQ8HBK4DBBzFWhicQ2NAVI42LTRKOOgEWHRRyX2aChFot1CdQM8Qjoq5o7s\nELHjowPewAAf2rePJ//iCyixjX37vpvWmpzcy9WrrzE7O4tpmpx74QXunZzsmZ0B01Jy7IUX2LF7\n949VU3IzBawR8KF3e/xbb51D1wevByIAyWQGKe2eLe62bdfflzLGNN/dv5ZMJvnQh+7ia187hZQt\nzp49jut2kHKdpqvRkBtoyqJqWMzFVSLaWAgmxAABYMuIKKiRo0YRxQgxTQwuUMNjhCw2ihiFiURg\n0k9MhRwuJnmaRFTo3fbzOCSYYoMuFdZR+HQYoM0wGhZNClRZw8DHZjslCgToLLJOi3WSNBlAMoZD\nC48WXVZZRmdyMxPpIIWFroFUc9j6PMLWmJrcwtLMAjVNx3H6aYcVIqoMRWl8t0GUjel02uzYcTuX\nL2/QbDbJfo8pznfwrW8dZmbGZ3Lyu14A166dY+HcGR7+2MEbjrVMEzOKaLVaNwQjX/7y05w8OYOm\nJVGqw3333cpHPnLwB2qAjh17jaeeOk46czv5SCGE5D8+/QrFjMFgYQuvnL1MrEzu3H4n3dY1Ytmh\nEy5zt9OkwywroYGjAkxcdupJbDq8qnQ8BvGVSQyYGGRFgpZKoqsmTQxcthDi4JJgEIGky1WW6bBE\nlwIpduCRok0V2A0kN5M0fehUsPFYZASXC3TRidCwSTOIhYuPBrxBSAuTFGkiDJrMkWIADYhYo491\nfNIYGICOLV3SooUggadilLYBwsEILBbLp0AMYaULfOMbr2FZIZOTCT760e/vIaCU4urVq5w6dZaZ\nmYvABGNjE9eZSCljlAp+qk3PvvIV+Pt//+aO4eBB+PSnb+4Ybjb27NnG2bPHyedLLC+vkMvdg1KK\nOK4jhEE2O8j6epM4bhPHEaaZoN3W6XbbrKwsMTu7xNjYfVjWPoQocPToSUZGcjhOAc/zQCmy6dtQ\nKsT1yihSSDWIlD22U6mAOF6m1ZLoeoFYWTT9EI1+NJaRZBHUAYHARrCBZCsdWmSIqdEgRYM0Nimy\nlHE3UzmwDoyibTLiggIWNhHgcRxFkgnW0VEIfHLk6aPNEtClgg1qGEUBKRWBlkXKZbz2EtIQxOk0\nj+zdy9233sofHn6VvXe/3X1Z0/KsrKzitVsM2fb1QKTVatGo19EaDc699dYNwUgcx5TLZXRdp1Qq\n/cisyU9KmuaHotXqYJo3elEMDo4hxAlct3P9vTiO8P0V9uy5+11/90MPPUBfX4F/82/+X86evUyx\nuJW15SYi2oKFSUwFQp0uAUMEZESHulwnpeXQiEAFWPoGaWUTSJ1hbJoE9LqIxEgUERKNGB2DGA2B\nRR13U0Mwzgx1OmiYZEiQZxWHOinSDJAgJkUDgzSruHiMkCOLho3EQ2OIVbpswUUAPhEBaTLEdKhu\nxtgdwCWSDdKaT1LvJ6FJdK3J7JWrjDu3kLUcAiS54i5W1gzMQodksotprjI+bjI6OoDvV1lZWXlb\nMBKGISdOXGB09MYbfWxsB6+/ElNpNCh9T0OmKI4JNe1tzMeJExXGx290Zk0kXuLRRx9+x7mLoohv\nf/sYo6N3YNtVLr36KrEfIeU2Wp1V+jKCZpCiL3UrDRe2Tm6lvLjIxvo6k6KM61eJojyT9iBerDEb\ntSjikcJGMyzCWCEwCK0c5aiKHgvO0MsrRqQJKOKQoMM6GQQaJSQbmAwSY6Bw8JAICuhY9IjaJgY6\nijXyKBQ6JRwsTCQOIYoWCSJMAkIsOgQsYrEdiU3EIgkUMW2WMIiwaTOIvRm6TlgFkqbOWmcezYJb\nt97K2dUyi66PUjn0MIGtqgRRhZXZSxx8eJS9e/e8o3bn0KFvc+TIBZLJMZLJcZ599lluueU27rqr\n17djaeki+/dvfVc+Au9HdLu9PjR/+qc3dxy7d4PnwdWrsGXLzR3LzcKtt97K1q1nOH/+NVy3jq7X\nCIIWW7cOs7jYII4DNE1j27ZJZmbmSKVKKOXTbNY4c+Z5du++l+XleWYuvIweaaSMDEfeOszw5AGE\nUAhhY5ltbt8+wqFjJ/DCbUCJHm8BoAMxSs3S19fP+noOPx5C0aJAlQ4DWGjYKLqUCcjhUASuksBH\nZ5AqMT4GCXwmyLFGiyRtkoAEXEz6KCKICYnRUSRRSFpYKDwMIEWMQsemRZ0uCWyxHV0U8VDkM8O0\n3SFqcZvZbpWCGaGEYLVaZXCkhOe1gBu7L0rZJZNJ0201EfQ2IefOnGH1yhWSwFKrxYV2m9179jA5\nOcmlS5f45l/9FVq7TQwkBwf5xC/90o9kfHjTzMb+pti2bYJO50Zzm1yun6mpLFLOMj9/noWFCywu\nvsrBg3v/RnSSEIKRkRF03WLHjrtYnptB6yqG9CwlK01GZEgiCEgQaEX69ByausJq/AYtzhOwiNR1\nWo6iKRQ12hRxUIT0OIYGkgUkS3RZJqRNE0FMgxJZcpRYZIAqOjW6tDDpYGBRpFdfo6FTRMPBxEaS\noYtBhEFACh8dRYENdFrELOHSwKVCAosWVWaQzFOiyQQRe5XBNr1XBWFKyEqTMIopWCmKmk1zbY5i\nro+qH6ElLJrNNktLMceOnee1117CfYceA2EYEsc9N9XvhWEYDE9t483FRfww7B0bRbw5P8+t9977\nth312Ngtb3NmffHFU8TxO9mc9ZTznge2nWBkZIShXbs4u1QhVknm1pZ58+LzZA2HTqPM3EqFgb4+\nOlKiS4dLsUnFGqbfmCQtExSNJEmtSEOksEWLSF5FM+psHZtk+9R2QsuiSosyoOMDy2RooNPYZLh0\nfBrUCXCpU2WDKm/h0cRliYg5FDUE9c0ANU0XEw+DNUJcYmr4rGFQJ0NAm1FgApsJJCOsoRHjcyuL\nDLPGEA0G6TCOJE0OCweHiIAAgWUaJM0Mhy+WeXMtg5R70IRLUszRaZ5i+5DO/ulbufryy7x27O2e\nI8vLy7z44nkmJ+9hcHCCu+/+EPv2bef8+Zd4443nmJ9/iZ07HT7+8Q+/62ft/YbDh2H/frjZhUJC\n9ISsP+4GfT/JiKKIfD5DozFPp1NhdvYZdu7Msn//bWzbto2VldOk0wZ79tzKPffswHXfIJttMTUV\nMjaWZ3Z2niOHz5JQY0TdDKvlJRKRjVO/TDLRolQoMD28m3rLpbfJHwESm68M0AEaCGHi+8MYRha0\nChYVsqTQqOMgSaBt8px5JD4mEhvw0DYbetRoABepI/FokOIaBeZIkgAkLhUaXKXOBVroeAzjMYrJ\nFhKMsUGXJSIadOmgiSwJu4+AmFBLUO/WCeMQG5PpVJG7cyWqZ8/y+UOHuO3Aftrtazf4OjUa6yST\nLjt27GD77t2s+j4Li4tUZmbYUigwmM+jpdPcOzbGX3/ucywsLPD1z32OW2ybeyYmuH9igsF2myf/\n7M/wf0gzvB+E9w0zsnPnTqamXufatTcplSZRSlGpzPLoo/v42McOcvXqLFIqtm370N84OqvX6/zR\nH/0F8/MdksldKO8VDJUETWDpOoE0GTIK+N46Hj6aajCBxwIJYJwEDpqVwUiliOxZcJeoBi4ugjR1\nBjHIYuGzyDLtzRKwJYYYwsShjYliEJ82LlfJkkSQQtEgxEJQoYkiJtjkW1L45Gjg4RAjCOjQxiNk\nHIMhJAKdBh2uoohw0dEJgJyWJG+mieMuelwnjtr0J5Kshj4rnSr9CZu+bIK6Lqm5LkNBPwMDvfxi\np9MEfF5++RS33377DbRcMplkeDhPo7FOLvfdygzXbbJr1wT33L2b40eOYEQRoaax9wMf4JGDN6Zu\n4O3BjGU5hKHC87x31I8kk0ksSxGGPqZps2PXLs6en6O+4VLsSg7ecjun5zuQKLBQnuWtmRn0OEZz\nLArpfgx9Gn+9S8cLCJHk8jmaLUEYb6CLZTQjRa06i9aSZOQC47j0pJwgWEQBGhoeFgEuedbpw0Cn\nQUiFDUooJpAM4+MiuYbc5EgS+IxSIMBmhhmKmJj0+hCts0E/5ua8xUgaJNBJUGMDD0igMUZIB4vd\nxCyRoAkiS0c1Sdk+pUwJJXM0amsEooBBDunWiZw0yrKYW/LZZiUYzec5/dJL3P/AAzdc2ytXrmIY\n/TcEh3fc8TCl0iD9/XV27dzCwsWL/Nm///dsv+027r7//ndM372f8dxzvR4xPwk4eBC+9S34p//0\nZo/k5uCLX/xrZmYC7rzzk9x2W8zhw4c5ceIoQgQkEhZjYy36+9MsLZ1CKZd/8k8e5vHHP4ZlWXz1\nq99gZgaqaxvoUhJ5McrL0tDmmKpL4lREYnyKSmWOWvsyQWijsQ4YaFhIdCQuMEAY2HRFCphEqXUE\nFXIkaNJAsoSgH4OQLk18PEIkC+h08AmJsPGwCfBwmaOPPMN4ZFlnlkXKm/WQkhzwnVxAa1PAqgMZ\ndCwWqBECbWIVUQuqYPaTzxQJ/A2yCQNNZpEpn8V2i6Bdp53I87WvnUSpFqurC0xN7UGpmHxe8Bu/\n8YskEgkmJyfZ/sADPPWHf8hkFLHcbLIWhoxs387OiQlOzc/zrUOHGNZ1ct/zezzc18fq3BwzMzPs\n2bOH94L3TTBimia//uu/zPHjJzh58nzP+Orv7eHAgTuwLOtHoodefvk1PK+PvXvv5IUXziIiA0WL\nTtjGjwOgSYIcQgT4ss2iiEggqVMiIkEZg1Ig8Lw2yVQf+WKI78fskpJUq46jTKTqkCNmmIgVXGY3\nq8ZDbHoFuBKDNDpJ2lwgJoVFF40aGltQJLGo9cpNaaDh0CRJjI9JjEmLLfRKewUhgg5NArpkEQwi\nsXFtEyHXyQqBjCVZzadgCYRwqeLhDEygJZP4YYiyA7YOb0UInWp1CYhIJBQHD36I1dUzVCoVBgZu\npPo+/vFH+c//+Sk8b4Jcrp9Wq4brzvKpT32E3bt3c98DD9BqtUilUt+3hb3vd2/QBblug1zOIZlM\nUqlU6Ha7lEql64yKaZo89NDtfPObbzE2dhumadHXl2buwst8cPs0g7l+LOMcaH1MDA/QFAItnabc\nnKNTr+B7NbS4jzx9NGOBaAQEUUBX08im+wjDCyS7AaoD02xQFFBQcE7oDCmfReZoMEZMnj7WGSKP\nRgIdjyQGOi5LNIhIk8SmxRARTXQWSWzyIwZJfPJcI0DQBRqYNDGRmNjkEEQoNkQdWynAQZAnZA2d\nrRgiRayGaHGFgrJRaPhxFdUdw9NCfK1AHKfw3UVslSEOspjYNOMuM4vXkLKE1+m8bS503XhHIzPT\ndFiZu0ZiZZ5tg4OYjsP8Sy/xX8+c4df+2T97V6Lj9wsOH4Y/+IObPYoeDh6E3//9njX8T3EV9Tti\neXmZS5cqTE7eTxAEVCplJienyGRMUqkNnnji59m581cJgoBarUY2m73ue+O6LvPzazQa0wR+AXwf\noepEapaCbnP7WB/HO03G9k/z/PKrbDRjlBrYrG+popFE4NNT9vURxRB11ugFByE6G/gosihC1hE0\nSRHQpAkM41PCQ8MhRGcVBxOXgHFgCReTNjkUGgbraGwjZBTIIVhFkQdmUUSsEV1P5nZJAGObK1FZ\nvkZX3oLq2ggRASvkshLfMJhbKTPVP4y3scraS88wPrKFlh6S3jHAL/3qrzI5OYmmabTbbRYWFpjc\nupXx22/HKJcxHId7h4cZ7utDCIEpBGsrKwTNJi+trWHbNlvGxihms6Q0jWa9/p7n+H0TjEDPV+LB\nBx/gwQcf+OEH/xD4vs+bb77FW2/N8OyzR5mc/ACl0jie9yyh3kZGVdrEGDKLQ8BZb4aAZSY1SUk6\ntFD4pPBIYusFVkIXVAyNLguNNUYyfbQ6TfrlADYGGjERHoI2EBLTpIZLiIaJtmn3XsNlnRAPk54K\nulfoGZJknTySgBKCKgazdEljYaKzTIkW/dhEKC4SE5CmTT8GDhYuKXRUYNLSM3SMDk7soukhRSNL\nhCKK51ha6ZLecRthWGZ4vIRhDrF1ay9tYlkWxWIRXddpNAzCzZTL92Jqaorf/u1f5ujR11hcvMD0\ndD8PPviL11NmlmVRKBRuMKnbt2/XDSZ1i4unGBy8hXQ6T7NZZX39HI8/fg9/8id/wezsBppmo2ld\nDh48wEMPPYAQgocffgApJS++eIw4NikUKgzla/Q7I0SRz47+DKfmTzI4tIP19QZzixcp0SAb2nRC\nhSaqLNAkZAQ9cugSYJpbiIMJEnGLYdVFx2XYSdMJJK04IsBknSQNTJLkAZc+IMkQIRKQaKRx0NAp\nI+gt0IoOkho+JeokqFMnJkCQZwSPKmsM0kER0wdsoYtLAqElyMuA0wg8BrHoBcwQEKoQkLjCASvA\nNjQ0qdFyDFYEuLJDWri05RIhO1Bk8cM2Ml5iV/8wr5yd5WO/9naF5vbtW/n6118lDKcxzZ7IWMqY\ntbUzDMoWd+zcf50d2zU+zpn5eU6fOsUDD37/iqv3E6rVng373e9efvZjxeRkry/O2bPwHjef71vU\n63U0LUOz2eSll17H90103cb3FY3GLL/3e1uxbZu1tTUajQZCCAqFAkIIzp49h+NMYtsZKt4qlhQY\nwkYTKQy5xlp9g/6+IS6efYv+/r3E4UXcapVuZBJhorAQaPTKdnUgiSIkyTLjxCRwyOMi6VInJETg\nIjHRiTBx6UcR0GENmxI+RVw2uEIXmxR1PAIalAgpAkP0qistFEkgBHIo6jgokghMPCpsI2ZC19Gt\nBEPEzPivs+avEqiQnNNiuCswyxtMI1isLKAZJluzfTSXr2IXhymfOs3c/fczPT3NiROv89RTz1Ov\nR9Tr62ysz3HngMMH77rr+hxEcUwlDFmv11k6dYpd/f24ccyzly6x/847aUhJ6UcgBd5XwcjfFoIg\n4L/8l7/k2rWAQmGcTifFyy+/SV9fmunphwi6X6M8F5PDRtckvkwgqTKKz4RIEWMxpFlUpUBgshG3\nsAhIofAI0PFwWytE5JFYSCQOGg5JArpE+AjatFkhg0WRLC2W6NIkRYEMG+Tp4COp4ODRIomOTxpF\nlxQZDFboYwmBzRQpWuSBCJeAGkOkyeIwgoaNj0tdLDKobIJYZ8ldZsIKcNL9rHgeFa/NXjNN073C\npbPXaFJiShWItArXrh3ljjv2sXfvLQgh6HbbOE50AysShiFra2sYhsHQ0BBPPPH4O153pRRPPfU1\nXnttnmy2Z6LzxhvfNakD+Af/4CEOH36NubkGg4MFfu3XPsyLL55gdTXB5OQDm+cL+PrXT1Is5tmz\nZw+6rnPw4Ad48MH7aLVaOI7Df/g/oFhr4LYa3LqtwIc/sJuFapU3nzrOdrPJbmHTDDzKWsyG1Cgi\nuaYbxLKFrecpZbbhBxGhF6ILi6zmYlv9uKFkmRhPJZEkCVBkcDaLpy0MTDQkAaAIiLAJCLApEtFG\nsIrFNCZZQgQao/jMotGiQo4sVZIYxMSk6AllHSLasovCIU2EzgaCQSyyRFzEZpmiiMjqAQ3hs+SX\nGc4k2GgtkNUstoctItFgSKSpqAu05RIJYTOQtBDYnFtb4fcOHHjbfJVKJT7xift4+ulXEaIfIQRR\ntM727QX6K/7b1PNDuRxzFy781AQjR47A/feDZd3skXwXjz7aY2t+1oKRXC5HHLc4efIMUKRY7Inh\nW60YTSvw1a8eotl0WVjoIESKMGywZUuOT33qCRYWVlHKZH19A7QcUdRF12NMPUEku1yqNRkvDbGx\n3sBOKXZsv5eZU1/Gjy10NQV0EASY6ISY9Ep46wxTJb35lAZo5LBoEhKgsOjDwCFGJ8sqMV0k2wjJ\n0KEGDBMwR561zade4tMgg8ShtzCb9IwwV4EuAoVBgEGTiDQOOhH1uE0uSDCSsDAsMPRVjJRF2m2y\nU9mkbAtdmCS7bU52fUSrxhbTYr0yz6rZ4fDXv86O3bv50peep1KJWFpqoOsjhFGWLxz9NkEc85E7\n78ALAmbrdeyhIbYtLdEeGMCMYwZyOUpBwDeff577nniCLT+CuvpnJhhptVocP36S8+evsb5e5urV\nJjt3PkAYhmzfvos33rjCwsIGqVRMIj1J/+QeaqunMUKXki4I9TQFmWbISlPtttGEgy4qoAxyWIyg\n0yEgwQpbEMxt+qa2aeKRwEUg8HHRuboZMRfpkmeeAAMPi2FGMVhmkjQ6EolPjE+bzCYh2CEigYZL\nCoMUDhIHAx0QdJCUsbDIYAMhvQ6RCbJ0lc0qHSIV0cZDiyHXqNFQMTudPPlIse538YISJXsAfb1D\nVwasqFO0WnVsG3K5FJ63wD/6Rx/G3DTEOX78BH/8x3/J8rKHaSr27h3jn//zf/y2FA70OmieODHP\n9PQ91xey75jU3XnnVQBuv30/t9++nziO0XWdlZUV5uebTE5+12nVNC36+3dw5MiJG/KTtm1fLxN+\n9Od/nte+/GVu276VfDrNRrPJpWqVfsNiX7YP2eoQxJIhaSOBDWJSho4b3UEcX6PjVtFUH6Hop8o8\n1QgW2nVUHAI2y3TJIMmgSCBpY1JHox8fY1MXHuHTREOSw8LC2zT8D4iJNmttPDrEGBjEWDQoELCT\nBOvElIhoAwERTQQeGjr92EyhyBAxQIJXmKaKo0qYMqY/7tBJgKPr9KkWg6GHpelU4jpXaDGpbWXF\n1pBWH914DuFeY+eAwV9/9rPc+9GPct/9N1ZD3XvvPWzduoVLly4jZczWrR+gWq1y7L/9t7fNb9f3\nSefz7+0B/QnE4cO9xf8nCY8+Cl/6EvyLf3GzR/J3i5GREQYHLY4cOcvExEMA+H6LIJjnwQcf4ckn\nv8KePR8kldrGmTMXqdc7vPrqZZaXl8hmEywtreH7MVK2UZqBrzQ6co3hYpG8BQOjaQpCZ3BwJ5cv\nn6EdatjKoUsF8HFI0aVIzw2kiI1DEgWUAQOFhYXBAJIYezOR4jNGgT4Ea+TQyNJEEKKRBmwGadNF\nYGJh0qJGtJkccjZfJj3Z7FUkAh+FTQOT4c1ONf2AjCOCbkCCGEeE2Nk8WSfFZT/GCaoUIp91FPt1\nA1NGFKwMcdBlupjn+JkzHD36KqurHZaWPEqlO75nk+HwVvUNppSif3SUD//CL3DkmWcYzudZ7utj\n5tw5unNz5Pv6GBoZ4a6HH37XbVjeCe8qGBFC7KYnLT6mlGp/z/s/p5Q69J7P/neERqPBf/pPf0Gz\nmUHXczz11EssL8/z7LOvksmMMziYIZGIiOM65fIVPA8y2QJJcxqrukImjqiFEZ6n0Q164cBq3GIQ\nnwodXFKEGGRxGcYljWCBkDYmLQbJb6qpOyQps0bIMII0Xao4NGmQQzJOnZghJIqALBoKB4syBll8\nJAl0wk1rtCIGi+ikMakRYuIQYVAhRCGJAY8mFgObC58gj6JJjaKmKJJiSjPQY49V38VT0EFjWBRJ\nhDH15TVsM4VhKjrJGkePfoHf+q1f4bHHnmB0dBQpJS+88AK///v/FsfZT1/fXuI45MiRK5TL/xf/\n7t/971j/vy3lzMwslnVjPXrPpG6I8+cv33Dsd25q13UR4u36kkQiQ7Xa/L5zfvc99+A4Dseee47G\n3BzFoSG2HjjApZdOU91YZ8BOk0451NqCPqFTjupEMgESpMoQeVUcZSFRXMIiyRTDsUMdnxbrTCMp\nENGgQ0iNJDmuYXEVjyIRAp8qBhtITBpUOUtEgMYoBqMIIKSF2gwkJRohOjqQQJBAp0tECaiiU0Yj\nIkUDkzwSB5cOLjaKBC4WMZrSMFSXSd1ksVVj2HQYd9I0my3G0alJHzPRZSw9QNlf5u7hYabG+rnn\nnp0MDA5w/KtfZWh4mOnp6RuuZalUusHQrFgsckjTeP3NN7GUIpvPU+jrY851+R++h9Z9v+PwYfjj\nP77Zo7gRjz4Kv/M7ICW8i24XPzUQQvD44x/hxRdP02gcB3QSCZ377ruLdLpApdJGiAxf+cq38Twd\nx0mRz2/jmWeOEgQ1hCigaSWkdIhZB9ZJWZL+dD8BdSbvuotTh17j9ddfpNkEP3QwN3Ue0CYAIkx6\nXOVqz7MJA0EJRY2IDhVCXCLmiYkxSeCSZ2BzW2nQxSAkxCDcNHvow6JLihThpn5sjiUGSVClS4EI\nAcwBCg2JQ5YCQ6QpM49FhI5AwwCp4RoahulwcSFi/9CtjA4WWVi8yka0gAhXGUdQ9z1WVZVcqUjC\nsmislvnc577ClSsdlJpC12v09fW0NolEhmx2B3c/8ggHNpnTZ/77f+fk0aPko4hiGNLodtmYn0cZ\nxrtqv/KD8EODESHE7wC/DZwH/lQI8btKqac2P/4/6bmo/kTjlVeO02rlGByc5tvfPorvJ1FqB3Fc\nw7L2sLHhMTwMw8OSffsGeO65M3SaddJRgCYEjqmR1rJUugGNzaqULpIUMToRtwD5TVszG2ih0IgJ\nyKGxhTVc2uj06tVLmGTRyRAxS4sZ8nTxuEoXQUgHjwCFwAS20OISF1kiTWpTnDqMoolGxBANYmos\noSNpYdJEIACJSYKADhtomJuNpzsMUGbMtLkWuLSloB8wpeICAkgQyYA8bTTyGMom7XdpBQYDA9Ms\nLS2TzWap1Wo8+ed/zuEvfYXkso/InmE9aJLK7yaV2sIbb7zC66+/zsDAAPV6nVwux9TUFKapbxoI\n3YieQdE734q9RbCNlPH1qg6AWm2N8fEBlpeXyWQyZDKZG/5OCMG+/fvZtXs3p0+f5uTJ0zz91Fdp\nri2ghQax8IjDCKF0grhFE0kY2uj6GpoyEbpBO/Rps4bGCDE6y0QoFBpDBFymSEQGnwZztCiQQLCB\nQZ0EASliJGlqDKDRReKh4xIBOpIu0IfCAsqbfShaSLKcp01qvD7QAAAgAElEQVQHkw6SYUJMFB1g\ngzothhjCIcAHPLIoDBRJaiSVRaiZaFKiZIxnaiyHHRIEJAT0CaiGy6w0qpRSabZMb2HfvmnGxkYB\nmEilePPEievByMrKCufOXSQIQnbs2ML09DSaprG+vo7b6XDu3DnyUhJGEbVEgt/8V/+KycnJ9/qY\n/kShUoG5Objzzps9khsxMgL9/fDmm72S458lTExMcN99+wiCMRKJNI6TQtM0ZmfPks/n+cY3jlCt\nJshkBvH9mIWFDTqdKpAjnR4DLgIOmiaI5SLtMOKtZostW8fxpcS2TFqtU0TRViwCBCtoDOCQJmQD\nRURAFUgSkqRCzAgJdFwsQhQOZWxgCIlPnjV0rrFOgRYSKGEhiSltWse7pDEI0YAuDoOs0yXGw8ag\nDEREJIEUFjqKFgu0SG86DwUk6VX2VVWH9TCiHCq6THBhpcpAIs9Q3zjlDZ1VNkhHLjkipAntyOfY\nzBUoTrNr10PMzT2LEAXm5yuYpkE2myWK2mQyaVqt1nWmuhOG1JtNzHabjKbRXyhQ932evniRY0eO\nsHfv3vdsfvZumJH/EbhTKdUWQkwBTwohppRS//Y9nfHvENVqlW63y6lT5+nv38fKyipB0MvIGYaN\nEAqlOkCSIDDodhv81m/9r9xzzyn+t//50/QXJlmLoO16BME8kXA4HdmkEYSAj4GkxTQShb0Z20Ib\niY+GQUxEg5AkPcOcKoJdCJrE1BikRj/FTQ2AxCOkTMggEG326TVQFOhi4lNGQ9DHCiaQQiNJSEST\nDBGTSLJAHajgMkgEWFRIskGOChlCNGKsIOIWJWkpDQ04i0OLUYZJ0iTLOhKLBnacJtBCymtnqdU+\nwqFDV2i1PoMhNziQy5DtRtj5CYJAMXvpNRYzNZKpCVbqNT7z6U9z7/btpOn13LGGh3nwsceIotOE\n4RSm2WNN4jgiCFbZs+ftroBRFFEulxkacjhz5kW2bz+AbSdZX1/m0sXDxFWdr1w5hyclW++4g498\n4hM3sDErKyt8/jOf4cLRY/iNmG55cdPpNOKa18SSCk9GLBoa7WiQnNnCNou0/AWkqhFTQ6cPiwEc\nBBERGjV0uhiYCJqUsDAJMamwDvSRQtKgTUSAYoQSOTQCsnRpcpkVQjQEQ0gsFBtAA41xIqpcYRGD\nESzySGLmWMehhWSQDvnNZN884yQpIYmISRKRQCcgwgwjZpWGpxQ50yBqNqmaOnnDhNigZDtEhRyf\nfPhhHj5w4AZaNWHbVFs9a/hXXnmVp58+hmEMomkGR44cYv/+ET75yb/HoS99iXsGB/m5J55grVYj\njnu9MJrV6t/yE3zz8Pzz8NBD8B57OP5Y8cEPftf/5KcNy8vLXLw4g1KKHTu23dBJVtM0PvnJn+Oz\nn/1rPK9EIpHDdTfIZpsMDSU5d65DsbgdTdNRStJstllbWyKdHmJjY5m+vltpt7t4Xo0o2oGuu3T9\nLhfOrLJw+b/ihZJu1we1wShlBA51FmhiAn2AApL0DNxdyrTwqZInRmAQM4RgijwGEU26XKafDerE\nFMnTZg6NcXzU5loxj8M0Eh9JihYQksMgyQAaDgE6NSJcFggYQSMmJM0GFjENFJcASUwKnTImDUok\nGaIddHnmwmV2DxSJlc1qbDNpSNKZPJatUcrlWKrU6Zu8lS1b9jA+/jrnz18glbqNpaUlgqAOqs7l\ns+d45elFzr7yCgcefRTh+wSOw1y5zFgyyZrrsi4Et05OUrl0ifn5+fe8IXk3j5r4TmpGKXVNCPEB\n4EtCiEm+T4O7dwshxP8D3Am8rpT6n36U7/petNtt/uqvvsalS6sIYfP66yeYmkpgmgk0zUTTdGw7\nZmPjEkqt4DjDWJbFXXfdgu/7jI6Ocs++IRbmVmjJDUJ/DcNrEsnt+OQ2Q4cENg2SzLCAzygGCSQV\nYpbRUCiMTYlih5Cev1weQRMdF4NFilhoCARpUljkyNPCZYYWXXzSSBro6Bik0OlJJvMkyVAhxiRg\niRUMRgnpwyGJxQAB/bQ5RUABg0Wm8Slu2vBoxOxUMesIdHQa6EgGEWQIgTQedTJ0sSnLRdYJMe2d\n9PfvJwiWSaVu4cVvfJb7PnGAbDbJtWs14q7GVKrIpWiNYuoWymvXkBdj9j/88PXg4PLyMm8eP87H\nP343zzxzDOgJIqWs8Nhjd7ytfXWlUuFLf/7niGqVpFJY7VVePz7D6MQ0hh6yOxvx6LadmIZBLCVn\nT57km0LwiV/4BaCnvv+///W/pnL6TaIgQdZKcn++xJsKKvUaBdXFSSdY9RV+YppsJ0Wf9KgFF0jI\nRSaVywZpKphECGIgg0VEP10WEcRscjYU0LiMTh4dRRYDo9cviCYRbbrkN+8YGCbiGk1AIakCNlBE\nRyfGI2SYFL2eSgECjd24nMJmBybWZp3OFQI0khj4SEIiuig0TBaUhgtsIybp+9iaRhiGvOX7hI5D\nf6mPR/buZandZnFhgWqlgu04jIyPs9JosOuBB6hWqzz99CuMjNx7PWhUaopTp15jZOQVupUKpc0K\nqdHNbr+xlLz41luEv/iL1/VE72f8JOpFvoNHH4XPfx7+5b+82SP528Wzzz7Pc8+9ia73Urnf/vab\nPPLILTz22MHru+2pqSl+93c/xalTb7K+Xmd8fDu33baXP/iDP8K2j9NuL5BMlqjXr9BsXiGV2oZh\nZEmlHDodlzhuIEQ/hnEVJSWhnyefv5Vm5zzIETTOoZBYwqGhQlwEMeP0qhrTCDIoDARXEQzSZIkG\nVTR2k2eEpNCIVIRLlxY5TrJOkgwlxlC0qHEJMOn16O2JYU3kZvOPPA4xkyQAe1MvEpBGMoGOwmIY\nGCDCImJhc1QrKKroaIyRJkmaNLrIUlENzm3USaYzDBUHyY6UiGWEjCMu1Ks0DIfR0ii6bvCRj/wy\nQnyRy5dPEIYGQ4PDRBuXePyeae7fuRPX8zj+1FPUGw2m+/rAsnCDgJRhsD2X46LrktM0qtXqjzUY\nKQsh9iulTgNsMiSfAP4EuO09nRUQQtwBpJRSDwsh/lAIcUApdeK9fl+j0eCl55/nwqlTnH7jDFpq\nB3fe8zFsO0EQOLzwwlH277+bIHBpt2u4rkEqlSWV2orvX2NkZJKZmdP8xq8/D52QlbmrZJwJHt73\n/5H3prGWneWd72+9a97zdOap6tTgKteEy7iMB2wTaAyGkBA66ctw3YTciI6UCPIB3VZ/iJA6Uqu7\no1Z0OyhRmktDAuFGEC7pbsaA8YApj1Ueah7OUOecfc6e573m9d4Pe1NxAaHB4Lbh/qUj7dpnndpL\na9rP87z/4a28cOESz609jWQCiUmWJBKNmAwOdTx6XCbAJaZFbmwDXMengcssgklUNtBoYVAii0JM\nDxODiCQKCgYmoJBAMGCWLUASMcBjBkGCiINouKzTHmec2Djo6AwpoJEnRsWhj0UKQQGJSp4MCg6S\nPgajun40IIQUMatoKKRRUGmOjXZy9OgS0mMNVT+OnVrE9wdMT+cxDBMRF/j2E6eYnypw5sVT2Mo0\nqp4jivpUWufIqg2OTd1ErVZjbm60BLA8Pc13z5/n7b/+6+zfv5erV1eQUrJnzz/7IbKrlJIv/83f\nMOd5zI4v7KNLS5xeW+PmB+7i9KOPcuymUSECoArBoYUFHj91isFb30qtVuNP/uiPGJ4+Td6NUC24\nWi8TGCa3Ts3zTTdk6CUoZBMsxjGr0qHm1un7faaFy2E1wo7h6ThDQEiPDiEZFFQ0VEIcQvp0gAEx\nO6hskSbFHApJfKpoDFhCx8WlT4CkgiSJRTjuov4xHlEZs0VCKkh2M8DAR44XhVJIlgkpo7EXhQGC\nkDZl0gTsYUAPSRmDDkVcUizHPm36DLstckLDUAW2lHQieH0uT29zk6d6Pbz1dQ4UiwyiiKdOnaLw\nhjfwW697HefOnQMK1wuR8f1KNrvI2bNX+GHnkesbvdzb9zWHhx6C3/3dV3svfjTuuw8+/GHGjsev\n9t78fLC1tcW3v/0CCwu3Xzc+jKLdPPzwkxw8uP8GR+1CocDu3YuUr17ie2ee5dwzT6MKhTe+8Q08\n9dQZ4rhBGG4wN3c3zeYKqlrDsg6jqgk2Np4mn48RwqBVNzCtKRL2FJ1BGUvPEURHGQbfY0MGwBwg\n0NlPwA4xqfHs20PBw8BBoY2GT588XXWevuzgyzOMBLlHqNJGsEMFh8x4xqqhUCOiT58GOXQMOoRA\nB4PRtFoQ4qMiMdHoYyLYJmAGddy4+KhExNgEBDRIs0SGAX1C+mikSGHTigYEkcvuuSluO3yCWrfF\nwOlj9bPMRS6aOmocdN3kvvvexa5d54EyfnObd9x1G/NjrljSsjg6O8tqs0kjitit6yyPG5Fyr4ed\nz6Mlkz+0XP7T4CcpRh4cH9nrkFIGiqL8S+AvX/Ynw+3AN8evvwXcAbysYmQ4HPI3/+W/kO/1OGzb\nVIfgehVeeOpr3HrXr7F79yEqlS2uXj1Juy3xvB6qGlAsHkfTciBDHn/0G1hRh0XNIo1gITIg7vLI\n43+PZ+5DAXQUNHwCHBQyQECfgDVsVJZwmMFCjDNzn2GCLC3KwDYmLSxcIEQjTYiCgg+4jBTloCBw\nUTDGZvIqDh1Ckvgk6WADGWAXPdbxsVC4xDQKU6ikAJUI6FAjHq8wDojokGSIwwQ+DtBnxNQeEhPg\n4RMTk0RBo4OPpYTEiocmDTwlRTavYdsOhw/fyuXLK1zbbqP326QWFjCMiMBdZ7u3wdCwWMoqTOam\nGAwG1Go1ZmZmEEIghEBlJAP+QULkD6JcLhPUasy+5AGkKAr7p6d59rHHCAcDEqXSDX+jCoEJPP/8\n8/zVn/4p9toaJdclaHcJ9IAlM8FztU1sBJZtkTJtmnGfZqfGXCKLHbSpxUOiOEZRBFUZ08VHoURM\nGZcOPdLEtElwjf2ENIAyMRsU0dmPhk6f/nUKa3WcwayQQhKgUkMisNnBoYxgkZAEkiIenTETaJSF\nE2OPfUQkKmJMalUYoiOAPCF78PBRUdDRmURykJAQGdWIKeAS0ZQe+Vgho+ukcxMMai1aCZ1FVWXh\nyBHa/T5hGHLTvn0MbiAc/3BhoSgKyWQKfWqKSqvFVD5//XdrlQp7jhz5pZiKlMtQrcKxY6/2nvxo\nTE7C3BycPg0/QpH9C4nz5y+h61M3ODCrqoZpznD27MUbipFLly7xtU9/mv3ZLAfn52n3+5RXL+I6\nKd72trfzwgvn6fXaeJ5LqZTi7ruP8/DDj+E4KVS1zMREkWTyFhq1U2QTecIoQgiJEBGx3AFMXCZQ\nmEVhjYg6kggFG0EXhRCTiAI2ETpF+lzjDJ1oEZXz6MSoHBgrGW1AGcenjnygTHTSNEnTpYuOREfg\nADO4xHQZjBtVFxUPHwUXSYjAH89lVAQKMRYhMSEaKklisphsscJA5gixiNmikM4RDnRWr64SuS4y\njgnDHn5BZeC2OfvCY+xcfR7Vd+n7Td753l/D29KuFyLfR8q2mSmVsA8c4LEvfYl9rkus60SpFHsW\nFggmJ3+I/P7T4H9ajEgpN/6J9yXw3Zf9yZADVsavO8ChH7Ptj8WZF17AbrfZt7hItVrFHYYETpv1\nS5foDATHT9zD7be/hZWVCCkDrl6NaTZjGo0acVxDRE10cswYMUcyC/i+Q6XTJdJDEkOftneBBWJ6\ntDGYJqJFk3ViFoBpAhL4RKgoJIQgjkfJMS4wi0aDGvtwsAhocIUBJhGSOi5pdATzODj0iQiYBCQ6\nUEXDIUOXKiYGDh0kFh4xLWLAJMYkHlvES1wCAiIS6ECWKTwCNtgkjUGAhzc+0J6iEiPRZcSAGhr7\nsLQMUvFxwzJ5tYXQJImpHsdeN086nebKlStcuVLFzGXYGW6y2mkzuXiE8upzTC2VeNeJE9QqFc69\nuMP2YIBpWZxstTj+hjcwDAKsYpHsS8Ly/in4vo/+I7psU9cJPY/0xATtfp/cS8LZvCBgCDzxjW9g\ndLvcsrDA41vbJPyQbmuLSFExZchZ5zw7uoUTK5QMlRk7Sd6yScQhi4rgjJQ0gR6COgEeBoISkhaC\nKik67KOLCWwDDUAlT4xFm21iJhAUUdnNFo8zyRADgUEGiUIfh2OoQJvLXGRIHoc1dNrk6dGmAOTw\n8VEJUOgi2UCQRhCPKbBdHCLOYZGgj42ki4NLB5jAE4KSTOCRIasM6EV9HKmgtDr0Bgk8S2N/KUPs\nebzjJWsRz127xsbGxviB8l2CwH/JMo2k3d7gne+8m0LhLr74qU9Ru3aNjGnScl2CQoF/8da3/tT3\n7msRDz8M99772larfN9v5JelGIlj+SOJj0II4ji+/m8pJY9+/escLBQojqMH8uk0v3LkCI1nTyPl\nKocOTdNun0fTPBYXl2m3JSdO3MvW1iq6rpJICHTdw7RG53joVQiCPq63SRClEGJmJAGmw6gPr6Ax\nA0gkBtrYfkyngGQHkySzdBjwFdKESA6iIvDokEWlh0qEhUMakAyoUGSbJKlxly8oYdGkimSRGjtM\no6FhINmkQ0ADQRaTOg5TgEdAABTGzicBPVpMMI/BLIIefbrUUZRtiopNLZAML5/h+OIykYioeG0S\nyQXaO89y4bnnyWk62VKB99xzN8PVVc5vbXHb3Nz16TNA33FI5HL83h/+IUdvu41vfOlL6EFAPptF\n37OHX3/Pe155ae8rhA6jRh8gy4h5eQM+/vGPX3993333cd999/3I/2hrdZWJ8RdTtVKhubXCdG6Z\nXbZNvVLm1KOPsuvwPo4cuQnPC5maKpFOF5BSsra6yslvfAenWyWnqghFQVN1TKFQH4TosUJOcZnU\nF1GCMl26OAjEOEtmpHmxkfgjOlGcQMMhiYnPRXRMJvAxGGBikcbGJY1Bkj5tavTRaRCTISSFjkRh\nkwFJOuQAjTJZBG1UYloM2UYhg04F0EjisYlDHxDECFT65DGYxaSLTZcsdTavFyLngAkZYgF5BMs0\nucz3iKNlEkYKtAZWwmXx6OsIrGnW11dIp3exs7NNpXKRfN5gZvZtXGjt4LnXkOlZ3ro0T6/TwWg0\nuHligs1kEiuRwG80+Najj5K7+WZ+7UMf+omY1tPT0wxUFdf3sV7SrW/Wauw9fJi9N9/MP/z1X3Nz\nHFPIZOgNh5zd2WHu6FEG586RtG22yjuoIsFOMBipnGIYSIUaNhktTyeo4jhdwuGAuujQikaKk2mg\nKiVrWAgEgh2GYypxlxYJquygsIqKScgUI1OiDlsEpFDHMeKSNj0m6BKSoEESA8EEGim2uECRgGVC\nKgzoEbOExiwKz7BGGx2DCTQiJFuoVEekNCqMXEdChkyhYeEywKXKNHlyDKgQ0qaIkHWS+HiRRguF\ntaiArkwwmYy4dWKGodflwrVt3vaS4x4xklQXi0UeeOB2vvKVp9D1aVRVw3EqvO51Uxw8eBBVVfnQ\nRz/KubNnadXr7J6b48CBA/+kxf8vGh56aEQSfS3jTW8ayY4/9rFXe09+Prjppj08/PA54njXddVc\nHMc4zjYHDz5wfTvf9+lWKhR/gJdgmyY3Lczx5g+8C9/3WVjw+NznnqRWmyaVylGp1BkOK3zsYx+i\nXK5y5kyZ7amIjWuPYGOgyRx+uIEkh2CbmDyjZNs8cImYPpBC4qMxRMGixSUmkICOTkSWBiY2zjhT\nV0MS0yckjUWeDDuohCRI0SSLg0objQgFizoqGSwi+hRZoUaSASo6CSJSRDTp4aPQJ2KaEePkCnAz\nFnl6nGEHnxJJRjGeDj0Wk3mW8Fjrd9mxpjjZ2aHeqVBMGmjnLmCEPu89cYKJdJqW43D+4kV+5d57\nOb+1xXeef54js7OUSiXCOObF7W3ueM97EEJwzz33cNeYX6brOrmfg7/Qq1mMnAQ+DHwBeDPwX39w\ng5cWIz8OmUKB+oUL+L5PZXWVg/NFyo0WQ6mSz+SIYpezz3+dt771Q3S7Xc6fv0g6fQeqqhLHMUHo\nYps+QagShAG+HxBHAVHgI6WLZVokDYtJKTDDNhUsLIZjVUQJlCyK9IDL5ACFDC1eZDd9pnCoE1NB\n5SYkq2TJsoxEoTxerumyxoiuqI/VFRoQoGGgM0WRDiZJdlAZYKGS5woKDl0EBhFFUlTHlfw8YmwB\nPxjn+WYwmRqXPDv4HESSB7pADYVFDAQ+bW2TXpRBKEX6CZW6V6JdCVGUDa5dO0e32ySKLKan72Zu\n7hDK/BHC0GNj45vM3HqIMw8/xO0zM8wvL3NPOs2ltTX6rRaVIODB3/7tn3iEZ9s2d7797Tzx5S+z\nK50mbdtU2m1qhsF777uPUqmE8uCDPP4P/8Dz166RyGS4/Td+g1Qmw3fPnWNxfp4nXjiP7QX4mHgE\n9OOAa4rGhDDJRQNk7CHRkbJEBgOhJKjKJj0CQgQLZLFYYEjMFkMaFBEkaJMnyTRZVIa00CkDTWAP\nsISGjklmbE63hcIuBvikyWKh4o+VVoskxwssESFdqnjEmMyRQOUcKjYRsIhCFochFygjqGOisYsh\nKQJCUuQQmDg0KZCjiMt2nGadCJst+qTwMInJklZ2IRkSWAb1YUxKJhm4LknLojsY4FrW9XH4XXfd\nwe7dS5w9ewHfD7jppmMsLy9f9xJIpVKcuP32n+h8/qLhO9+Bj/7c6PSvDO69Fz74QQgC+CVYGWNx\ncZE779zL448/hW3PoCgKw2GZ22/fdcNzQ9d1VNvG8TxMXafb7SKEIJFM4kvJ7OwsyWSSr33tMe64\n4y6uXdug07lMHLvMzk5w+vQ50ulZZmb2YRgp2rW/YtDTGIZpJClUJLEcpaUrio6UOSBLzDPoXEES\nEaERkMEkT5uQDjuksQGVFAMCgvFTRBtHgJgYXGISHwOFbUKGzJLApIBKDwWYJ0GHRRyaSNKkiMZm\n8DpXsPGZZ2QP32dktVZkpNE0iChhUqLFFsNxxrDk5tLNlAyDXDJmzrtAV6kRiwVuKS5RVC1qOxv0\nhw2+c+oUdx44wPzEBJO+z5lLlxhUq6y5LufOnycGFg8f5tcffJBbbr2VRqOBlJJisfhjl9t/Wrxq\nxYiU8rSiKK6iKI8Cp38W8uqRW27h89/9Lma9ji4lywuzhKLMpWqTyWgDVQS0m0MeeugaELO5eYZu\nt8b09AFcr0EsVtk7P8Xmyhqb9QoJJUHf8wlkl5ZwkFFAKayQMCcgcujRwVMUckqJlGrRinxCaePL\nIhXZQqfCEgOOYqARkEPQQOdFYgQ5BoQ4qARY4xD5KSyu4ePRJUOEhyBCkELjEkXaY/+SeQqUsHEZ\noDJgD5IrJHCZJEENBQ+dJAkiNBr0mUUhYkgeGKKxQEBm7EWSYyQQvqxIprQ0cVwkVAQVDHQlzcpK\nnXz+LlqtATMzu4jj5+n1Kmxv1ykUVpmf34OqjiZDhw7fjN5tc/tL1nbfcHTEb3782rXroVU/KU7c\nfjvFUonTTzzBarPJ4h13cP/tt5Mf8xQOHjzIwYMHCcMQIQSO4xCGIX1VZf/0NN8wTFb6LZIihSdc\nthSLJZFiJnTRNJWCouFjsRN5ZISBkAYJMpRpsIROiEk8NnefQjBgiIdJjt0UUdFRsDBpoyKpAwYG\nXTIUkGOXlwiTBC3EOJvIZDTXSCCw0Bji0cHHpwgUGIlq2+jEHMKjySiOPEWCEgl0nPGkKz8uVF1i\nYjQsHFxidjDQ8bmKxYAl5rGx2cZniEI7LjMp57jQrTO1fJT+sMLlzU00w6CtabzjAx+4QRo9OzvL\n7Ozsy7wrfzGxvg69Hhx62YvG/2tQLMLyMjzzDNxxx6u9Nz87FEXhHe+4n5tv3s/Zs5eQUnLo0NtZ\nXl6+YZoqhOD4G9/Io5/9LN52izjWAUkr7HHLu9/J9vY25XKZ7e02Bw/ej5QK5897aNoC1arD9773\nEO9////BgQOHOXv2SfL2Lrx+lYS2G5QEblAhlnNADiGGRFEL8BAUETiMIk2XSZAgHhuYgaTFBTyS\n6KhYVIgAjwTQIM+QJVRsDJJATMiAAQKLNAo9BGkSBDjExOiM+G8ClRYDkvgcQ46VdKPn9iSjpYQQ\nqGKTZoKiEuCLRdRoQNLoMmVNEMddTD1F15ekMwXmbYuDiRzrlR30QCWhZEkNh5y7fJmg1yM9Pc3J\nZ54hlc/z/je/GaEotPt9zjWbCFXlv37iEwwqFRTAKpV423vew8LCws/lGnhVVfT/Mzmv53mUy2WE\nEMzNzaH9gOi/XC7z+ONPs76+hVac4tnVy2y12zSlJC4V+cjb34aUks9/+wzzc69j164RI216+iau\nXv02i4tDzp/fYXZ5D+efO80w0ulKDytoIYXHUInxZQktbhAG26ixgiZDbBHR0+ZJqEmC2EdRAmJq\nCAYMpEYeSGEyIGIUrCSZRGOLiBYaLjYWJgY9IhR8NGCCGG08GdGRpIioIaljodNHRSWPwoh4qmKh\nYyMoUeQ0LllsZlGpYTKLIIOPRpl1MlSpEpEaZyv0UElhIICQgFBGdOMYJRbkVWgZHradxnFSNJs+\nqppBUVQsa4LhMCYMu5TL29j2SJuzsFBkaWmJ6toa1VaLyZcQGxvd7k/MFflB7Nmzhz179vzYbS5e\nuMBjX/86XqcDuk5yeprza2uYmgHJOdbD0aMkHcZMKTaKIvGjPkosSAgTI/boBT4+PnUkGikYW+4r\n1JAk0UmiUWXIJCqCNiEaI/M2lzQhbTIMkGMnEmMs2RaECGpoZHCBCMmAIQW6dFGpE9EnR4p5EozE\n3xFpNtjmGm0CcgwQBLQpoLCJR5ssgiwONRQMVFQEw1GeheLSlTUKWMwxi4tLgzpdEhRYoq1WMM0s\nU/NF9h+5k3b7GfInTrC4tMRNBw78EBO+0Wjw6KMnOX9+lWTS5q67buH48Vt+ZqfF1zK+L+n9RRAG\nfZ838stQjMCoIFleXv6x+SZhGFLeqfOFkxfQhh4F0yBZmkQtLvH5v3uES2sQhipPPXWWatWhXg8p\nFk8ghEa5XEbTbuKb3/wa09PP8PzzT9KtuRAlkSJEU87UAcYAACAASURBVFVC2QWmgBRR1ELT0kSR\nATIkJEaiElOkQZsiw/FdbiFIMjV+hkyik6BMBYchA3LYhBSvSwViVPIEdBkiSY6ND0cKnSYRISli\nQOLiss0yEsFI6qCMf7KMloe7CIoUcFBoAIFiEGgJkBpVt0XeFoS2RZTNEml5Mkj8MMRzHAASVhYv\nGBJGEXIw4OL6Om3f566jRzHHI7eJXI6ZXo9P/6f/xAPHjjG1sMB2o8Hlixf503/7b/nDP/qjH7Jm\neDl4DVr6/CP+3b/7c9rtGEWRTE6avO99v3pdw3z58mX+43/8v1lb6wIWvt9nakph/xvfyIzvc9v+\n/QgheOyFc7SdBEdf/4+tjmUlCII8jz56luPHf5UDB7IY1kM88tB/x9QMSlN7mSyUOPnct5h0PPZq\nE3g2VPs1hPDQTA1drdDoRwiKgI8flxEih64WsWKPUOo4OGTHCppRxRvTx8HExiUiwCGFBNpjqzMb\nhXkUUkCdiBQ+GjVWyKAxZNRZBygExGOCk0CgkSRDgixdmvRYQ47Z1irb7GFIB4UhAkjRHl/cJoKY\nBA493KhPUSToqTHZ7Dy2ncY0Nfr9AYlEEkVRSKVydDplJiezJJMW+XzAvn17ieMVdu3aRSaT4Uuf\n/CRdx6GYTtPq99kKw5+YK/LT4sKFC/zDZz/L0akpsgsLeEHA2WvXmDl0iGytw9nHrjH0CmjCYlD5\nJlEQIonwI59IKGT1JKrn0ZY+MRpdbAIcMtj00YnwSCDp4RChE+MzRCUHBAg8BD4WMWsEeORRCKnj\nYeMDBpsk6TIgJjPufDLUsPBZGzPxBUtEY/vnJIz7rBJDmswxYIBOBZMaXdpIYpKEdIk5jE9EAkFA\nBUGFvoS+kWTox1ykQ0wJ2IOuWvjUUWKHgVOl19C5/NT/y71HZtg6fRpdCG45fvyGY9tsNvnzP/8c\nUTRNsXgc33f54hefZnu7yq/+6tt/7ufytYLXsr/ID+JNb4L//J/h3/ybV3tP/tfhq1/9Jl/60mlm\nFv8F6XSOXq9Cy1lHHwbE8SEMY5bFxWkuX67w2GOPMDl5jFJpVDw3Gtv0el0Gg5DV1ccZDieJAhWN\nDrbcIQxb6OQJqAATKIpAShNVjQnDMtBiRHOMUYjok0VgI4mJqVBiQIDHVUIiQkIWMAhRGDIqZdQx\nmyTCBhpk2QFcDAQeGk20sSwXFKYZEuLRG39yf/zpU4yWaXYYZYP3hE4dSZUZVDWFEDlCv4uaijlw\n9CjrtVUW3/B6NhtJVi48T58+ke/TkyGa00MqIZVmj14YUbFM7jpxgltuuumG497sdLC7XUrZLN95\n+mnam5uIToftcpnfefpp3v2hD/Gb73sfxWLxZZ/b13Qx8swzfeLYBiSrqx2azc/y8Y9/lEQiwac+\n9f9w5YpLsXg7ppkmjkN2dl5EVaskD8zxV989STFpc7XaZmLPrczMzhLH8fWubn29ytTUFBsbFdbX\nT7OzU8eyDzCZaHH7oVuxDJtL5x9ml99AY0hKSZC2VWb0JJc1DT8OSGazGKrN0DXo+rM40SyqWsNU\nLIIwoIOCioJBzBCXBgoqbWK2RsobHFz6SHxU5onYwmI/o8wDn5Fja5odLAy6OJTRKOEREuASsYmk\nSYTJAAUDDwONRdK0gBDJBCqzZLCRXMMlIsQde8gahHSJaIoik4qJJGBgl8hmC0xMzOF5bbrdHcIQ\noshGSpdkskMuN4Fp+kxN2UTRGv/8n7+ZOI6xLIvf+lf/ihdOnWJ7c5PS/v2898QJpn6GWOkfh5Pf\n+hYHi0WyySQwUtscW1ri8UuX+N9/+wPo9pM0GoIrV9apB7soN1awIxeNBIoM2PIbdJQYVUb0SdIm\nQxqVDgNsNBwEMQ16QJ8EGhtETNDEoEgKA4UWVXQCcrTokyNBdpxnUSVFSERAkTVyqNikMNFpEbFC\njEqRGA0PFY8MQ0IkQzJY6Eyg4JPHx8FjhVFSUYhAYiLwiLDo00TFIyDLVQKUKImu9tC116PJ6fE1\nrxOSJAzXyYjz3H/0Xt549AClbJY4jnn2qad4cXmZ173E0vPkyacJginm5kaTKcOwSCSO88QT3+PO\nO0/8TA+d1yqkHBUjvyhf7vfcA+9/P3gejPMhf6nR7XZ56qmLJBILBIGJricoFHZTq/msrp5mcfEA\nzWaT8+ev4jg2QZDi6tU1BoOYqakCjcYqYVgkiuoEwQRxnCEhfNQ4g5QmEdukmaPNCjHPoWkF4rhJ\nFK2RTksGPZ14vNAiSWBQRKAS0MDAYoiGTZsWMZI8GvvxqeOxgwlI+kQMyGCwwwAXcEkCfTzKTNDl\nXnQsApr4NEd6Ry4SUABsRuw0D1hHsIpGjIUZa5jaHgwZY6oOsVBx7Bh19x7OBW1uftPt3HL8MJ/4\nk0/hGzq9fh+nOyQlXGw5pKEpTFoZrnoBd7/zLSxNTl5vHoeuixcEbFSrzExOcunaNZzNTUr9Pv1G\ng/koQqvX+fKf/RlrFy/yr//4j182mfU1XYwkEssYxoihPxx2efrpp3n++Rc4ePAAL7ywSjb7Bkxz\nNFoWQqNYvJnvfe975PO7safvo+Y0IHWZjbWzDGpdFF1nYe9e9uzdS6u1gabtZmenQSYzT7FY4OqV\np9h2ewzcPpZhkzQTJIZtErFDRkJdgctDQSP2CI0EpmEipUW+kKdfP4MhQpRwCyGHNGgzgUoDlRCF\nCjo2sEiPIRcISKLgMKDPgKOAjyAmxiAmxEIngYoQLkGcok+PPi18VAxaFBklRnaRVIEETTwS2EAf\nFR+TBD0mAAUVnQgLlR0EkgGQp0lIWxmNGLtKlY7UyJUS6GqFfq2LN9whnU4zNTVLPj86xvv2HWRm\npoBtd3n3uw+xtLTIE088yxe/+B0URce2JQ888Ebuf8c7XrHrIooinnziCb72d3/HvG0zOTnJsYMH\nmcznUYUgqSjMzs5wyy1TrKy43HLL/Tx10mbzVJN9aUG/WsEMY84OA9Z8Ax2TSMwg4pgJklTYRuCj\nE+DTpIuOzZBJ+iRpMcCgTBZIYdFCkmCfInhebtKmhYVHBuW6e+sdaBhYCHR8RomdAR41IKJNliUy\nmHTxUfGJaGOgEJJFRZLHHRv1u/ToMeqPVFQUJJMvyQmOmJ7KMOxcwB/6CDVAxiBFSECXRLrE/bdN\nMl3IUW93SNk2lmGwu1jkhSefvKEYuXBhjWLx4A3HXQgVIXJUKpVfymLk6tWRkdj+/a/2nvxkyOXg\nwAF44okRofWXHe12G0VJMjmZp1qtkEhk2N7e4dyZi7Q7NZq1p1lZgX373sDU1CzgcuXKi5RK0/h+\nlURiAcNIsb19DiGOIeM6SdlBU1Ij4rocZc9oxARKFUXxUBQwzR4zM3u55itoYg995xQRC8RYRAxR\nKJPHJELQpTdmlCVQ6GGyQ48mSTw0bAJs1oEKWXT6pGnSp8wsQ1KoeIwsFzQMEkgaJAkJeQaXHB55\nYlYw2EYnYJIQCx9BGDvMmjaRFHSVHe69d4mPfOR3WFqaJ5fL8a9/7/eI66v0Gw3KjospfSZig56d\nJmOVWMjlyUQDvKHH1M0389Rzz1GpdTi3VqczjBgKhzv3z9BfW2NKVdmpVgn7fWQUsb9QIC0l6ydP\n8vm//mt+72VGSr+mi5HvFyIAiUSGWi3F+fMXOXbsKP3+gJmZzA3bb22VkTLDxMQeZmeX2dra4NRj\njxG7ayRKDjOlfayeepKN9eeYnJR0Osp1h9Bs1mD38h7On32OtZ3LdAcNmsMmeX9IPmnT1U00pUTR\ntLjSXkdVkxhSJbYd3DhEYYDqnmJJSOaNDJVIpUePAV0iMoQkOUSEhiCDgo5KjyHhuK+VKIRATJdR\nVqSJAxCHKDiopIjIk2GbeXQS4whrD7iCR4M+DmVCJjBREDjobDKNgkfEEHdMa9U5T4ZNRkRTW9FA\nvYqeTkIcs5jRmJQeatRjPuPg5gXpeZft7VNks1mWl5c4fnyZd7/7AZLJJJ/+9OdZWYmZn78LIQSu\nO+Bv//ZhUqkke/fufUWui29+9atce/xxbs3lmFVV+u02Dz/2GL9y770U0mmGUlIqlXjwwd/ixRfP\n8OyzZ7GVNT78vz3AYDDgiVOnCLtdXiclrK4jwpgmAwJRwpcBSlSiQ4ygyQFmuUKPA3Q5MDZx9hmy\nic8GXZYpcoGYa7JLBoFKCo9FrmLgI5jhLAEREp+R1dyIhOYT0ecwJmsMaBKRw8VHp0IOhwmyxCPt\nCx4+BioKFhlqdFknZgbIIhmFNaqKRJgKCVvg9IvEIsCX6+i6SQCki1kMq8BGzcbSDcKoQ+L8Br92\n11EMTSPwvBuOcTabotEYYFnJG96PYxfbtl+R8/pq4xeJL/J9PPAAfOUr//8oRjKZDHE8oFDYzXD4\nXbbWTrFRdkioGUytTzFt0Wl5XLywRqk0ja4b7N6dRNddLl6skkxOAVvkcgaum0bqoPa30eSAjKLi\nyhohApUkgUwSxyphOEBReiST02jKJYSvopMjoI5Hg9RYuxghaSIJyKEwgwRSXGUR0JnHoUefBm08\nHA6SRh9PTiUaHofwWUMnwEJFjE0SDUJMfGwCFulTZ40aJjZzFK9z0bbxGcZX6bgemqqQnJrgE5/4\nE6anR5PRv/iLv+T8yTPcVtxNNZugKYZ0emW6qsEubZqskWa702d2eZ5hf8A7fuM3+D+ffI7vPLZJ\nRqRJZyfIptN879IqS1qVXMJm2O9jSElsmhRTKbqOw0w+z1Pf+Aa/8+EP/1Bi+0+C13Qx8oOI4yGp\nVIJkMsm+fXOsra0xPT1qY8IwpFIpUywmyWQKdDp1HvnyX7LLG6LGkly0xrWNNRaWlukEDrO7l/nu\n46sEwT4URVCvXcJrPcPCtEon3ESpXeJY1mJAEqGq9AcxjhITJkOSc3tRZcBO16dfS6JIBS9MM8Uq\nepyg7nroWGRQSQIVInRSDOigoxCjo42Nfkdjv5EKZuRyMVoK8PAJgJAWghwOk4RsMYeHzdzYBDhE\nJWYanT4ZbAIyVGniMI2JTYiCwMfBQjCDwgoOOkkC+hSMNDnbZULNk0uoXBhskG5XEPoEqmayVEqw\ntG+WcNccH/zEHxMEAZZlXVe07OzscPlynaWlfwy5G0V37+ORR556RYqRVqvFhSee4O5du9g2TS4+\n8QQL+TzRcMjz588zMTPD8vHj1wmzt956nJtu2k935SyL+Tz69DRz+Tyf/MIXuLK5hfA9DA28aB1H\nghtr5AhJ0sakwcbIN5b9SArEGGP3jwQR27hs4NMiTZMOJjEeBlAiJocgyZAaWzRJ4hPRI8SgS0Af\nA0GERRJBnz5dfFxKbFDExiVmxKd3cAjpjQW+ginMMbk1IBh7MypI2SOXKKDILPMlHy+TYiJnoWsG\nqhCUey0mZhYoJmbIpwuoYopWr87Dz11k/9Ik+97ylhuO8113Heczn/kW6XQeTRsR2er1MoUCN7hh\n/jLh29+GXzTftne+Ex58EP7Df3i19+SVRy6XY9euPP/9c3/BTZrNlfpFYmdATbgsTs6iG10c18Tt\nD1hZeZaZGcGb3/w+arUyKyvfZn6+wGCQw7Jm2Vp/BiXwGMgGBX0KoYQkgxA/DhhSRXIEoiyaMiAI\nKlw99wgTps+m+zzheDI5pEBIhD5mejmESKYQ+IDKBAMMCkh8LEys8QSzxYAJ8gxwadIgQ0gdhQSS\nMj7zqIy+ExTq+BgUmUIjYjcuQzJExExhY5NSXCzpUzfmmbCvocQRm/U2D37gd7nt4C4m83n+9n88\nxLKeI22l2Ilr5JKzKG4TS6q0RYhBB5UhneolxNQyW1tbXHhujbccv4ekNWo8/DDgzHZAU/c41djG\ncxz2FIvszmaRQEdR2GVZVHWdZrPJ9PT0T31+X9PFSKu1Qjo9N05gXCef9zh2bCQX/YM/+Jd87GP/\nF5VKiK7n8P0usMrRo7eRSuV4+pEvMu267MlN0h263Ll7FxcrFV5Yv8BCMknK0Cn4Fa5d/QK18gZ2\nY4uEEFiaiuLr7D12mF36HlTH4ezWFtc2msRCpTi7n/3FBU6tPMlwWEaEBUy1QCx7ZIkxqBKgEWEx\nMpafZYCDS52QmDlMJIIUA3wkXYqozBByCkEe0Ig5i4KHRUAKlz5TRDhYDMZDuZGtToTERJIjwiJi\nFDjtMYegQUwbnSZ9poEEMTtKREXqLJgKXe8iBCUMNccWEWu+SywTeLHKwaV5FASNap1TjzyDf/Ys\nhCHvfO97mZmZuX5+ut0uqpr6ofM2Mhla+aH3fx6o1+tkFAUhBMWJCZid5Tvnz6MrCjudDr/z7nfz\n5vvvp1wus7Ozg2ma1CoVXnjxRXquy9B1eXZtHUsKDufzVFpdkopgEAxpyvNkRYogdpkVAck4Znuc\nA1FAIGHM65CogEXEKi1cUtgcxKA5NnAHgywRgj4zlNlhHh8bD38s3A4Q6JymSB4diyp9kvQwsKii\nMYGKgqCLRhmXPB4D7FHXhkSjjoKOx0h6aJpL3H33b7J1dQ01MkGepZRZxDA0au0qrdYl/tnb/wBT\nz7J29gwF00RTEzy/UmXy0DK33nbbDcf5wIEDvO1tNR566CRSZpDSp1TSeP/7fzaXxdcqpByZnf37\nf/9q78lPh1tvhXYbrlyBV2gQ+ZqBlBLF6XL3Yop+w+Ga7DGrR8yqFu3IJqmpDIx1/KFOPv867r33\nXdh2Cs8bcu+9t9BuG1y58iSDrassSIEIE3hyQMuvoGgJMoksO/0ukkVM0qhIEtYEA1fF8RpUvT4G\nXZK0CZA4OPjM4WMA14AiGioWLSIa47yxNt9XyqgIDFwcNtkmIKJJmj5JNDqE7CKkSsgFRubzLQRt\nJrFJ0UQyyhuHOWJqVLDII6VHEkkl3Gazb2Ia82jqBJdecCmvvsDdhyfx2j06rkZQCNE1laHbBzxE\n4NAb9MgrE0wKhdhQyFoWn/vMZ0hgXS9EAAxNZzaZxcnM8oa3/wpf+7M/Q2garSCgHMfsmp2lF4YU\ndu162ZPT13Qxks936fdrgEKppHPixAn27Rslmh47dow/+ZOP8vnP/zfq9W0SCZ19+5aZmzvIYNAl\naFcpZvL0hl0yCX0kia3VCIcOW6FGIemh9zo06quYQ4e8uRddzxEEbWRth9PPnOLW33wPvUuXeOuh\nQ3TiNfxoF7GeZavZQLdmKCUc+p02brCKwCciSZKIHjEBJUIMIgJ05pCEtDlLlwiVAhoZAvqETGPh\nEqIjmRqTExexaGJSZYoVZqmwiUcWFwWJwciwS4yH/0NiYoYkcQiZpUuEIKCrSJrSpIdHDp2MzLMb\nHTccohIjjASVQCNh7iZjR3QGfVZ2Oij+M0yGAZGqkUgX0FTJoUSCb3z2s+R+//evV72FQoEo6owe\nEi+ZbXc6dRYWXhnSaiKRoO151DsdHjl5kpTrcmhigu1Oh342yy233cZX//7v2XjuOXKMXFtfvHCB\nWw8f5isPP8mVay16jkVCMbhoCSw1RgybTMqIkgKzeoSwsoSaJJfIUGjucHLoE2sqMojQx0I8BxgC\neUwskvjk0OmQoM2APgFd4vHyW5IEKsGYrJYgh4+Di4tNiavkMCkRsAroTKMxSwOHUTi4QoRDl9FD\nUpBEIAjojK+jSaCGbeq02zViRRKFLmnbR7LNTiNNJlHCNJe5ePEKt912K0fuvpvNtXWGnksxvZff\n+uAHSSZvXI5RFIX77ruH17/++PWibm5u7pdW1nvmDGQy8DIDR181CAHveMdoqeYjH3m19+aVRbvd\nZlCt8ua776LT6XDx3IvEcZZcapJe0CNtzjGfEvhyhampSVx3QKOxSRRtUSiUWF2t4neGHEjtRwQ+\nQdCgYE5gdnfYEB5Nz8YlM1r2pIUpptGlBAZICoQETJBF4uBTJ00WhyodNCAkwyS6EjJtaNQ9h5A6\nEh+FFIJJLGy61FDxydGkiYOHTxadAIMNPFK4dBHUMOmzG4sCSXx0FAJGDW8fhR5VpuiRR6WDRz9W\nCdiPG5iYuIhIpz+4mcdeXEFG81zzN0lWt8Br4rcrJIVNUxmyW1UInCpX8xnuv+Mu7rntNj796KMQ\nukg5yukC8AOPzqBKfkrjQ7//+7hxzLc/8xlmdZ3ZbBZhmqiTkxy5886XZeMAr/Fi5F3vOs6LL15B\n01Ruu+0Qb3rTG2/oyo4ePcLhw4fo9XqYpkmz2eSTn/wi5bKF6wwoJFQa7TJHlnfT7PVYaQ+5PMgw\nq++ivmrisUi/f5VZOUcUpdA0ST6/SBAUKTeeoOd5yFyO7VaL/XmDL595Eax5ZmdmGTQr/x95bx4k\n2XWdd/7u23NfKmvfq7qq9wVodGMHsXGHCIKiYJOixSFjtIwkygprLE9MTMwowjOyJyTLYUsOx4Ql\nDYOSg/s2JAASYIMglm6A6AVodFev1VXVtWZlVe758u13/qhkiwRAUwKaACF/f2a99+rEuy/znXvO\nd74P2dKxZQZTDCKkwjIzuGygIbAo0URhAwOPPhwahKQRDOGLEUIMArmKpIjeKe/pjODjo7FIkjVC\nGtSJ2EFIiTV66GeZCIMyXeRQ0dnEYRmf7QgECiVUXCIQAlVLkUjlyVZX2CFyCARu6GGEFjY2ZekS\ns3Yj1IhqvYwIA4SWYqHh0mtGpCKHs0sX0IwR6rZNr6rynW9/m8mpKeKJBNPbt7N//xgvvXSawcEd\n6LpJrbaBbV/h7rt/+bo/DxcvXuSpRx7h7JkzPD43x425HNMTE0RRhOv7jI2N8Zf/6T/R7fvcOj6O\nlJKrp08z5nl8+dEjODKL74aoUuJEXShBL7VoEz06w7Ci41MjlBFELrYd0AgChnoLyGWfY16b/Z2f\nFZuQCygd3RaDOGXWWKebiH4kl1jGJ4GNikmTOKIjHN+Hi03AOgV8NvFYRTBBmxwac/gE5MmQBFJA\nQJsWEh0HQZI8slNxs8mgsYyu9ZKyeogldK5e/haJXB92fZVYrELdyRCzUrR9wfZd+9G0fk6dOsX7\n3vchug/dRLm8Rjqd+W+WVJPJ5M+N+/OLhCNH4L773u4o3hgeeAD+4i/+8SQjUkrOnDnDsWMv0Wza\n9PVl2bt315bycueYUqnEwPBOrpyfwXZjSCFBkZRbVaa3Z/n4xw9RLJYZGhqm2czxwx9W2Lu3m81L\nLzMY76FebxAEFn19vTiXbEzfp67uZGuAtoeQFoHcwI2SCKGhSLXTakki6aWFh4JKikHaRAjaCHwk\n0A4CYnRTx0bDJkYfESabVCiiENDDCmdJ46GTpNSxvUtiImlygIhnkaj4+NgkMHAIENToRiUOlDHY\nRKMXnwgXjQQRBnm9n6pbwRcJ+tIFPL/IjuFBZmYFl+vnuUHxSMXjLLk2ga5gduU4NDzAaszi9oMH\nsQyDke5u3OYSq5uzdKWHWS1dobx6kXZzjd7efXz329/mN3/nd5jato1TTz1FXEpELEb/zp2878EH\n3/C6/0InIw8//GE++tEIIcRP1alQFOVaJjYwMMDv/d6vcerUy3xp/YeMmwoHb7yPlStXmC8WuVC3\nKPRsR8EiFiuQ04Y4v3AKTTewrDiqGqHrFppm4FZjzCwu8on3vY/lpSVePnIEqW+imAbFWg1Lb1Ai\nhSK7CVA7VZBeKswy0Rm29Tu76KtUcOhCMo5PGuQGAo0kLgEgqaEyQESAYJZhVujHJMRCENHCA0Ic\nAoZQsVBZZhNnS7yYQSR5UngIWmKFPtGFp+hsGt0s1UqMGXFCH5zIxcYmwCPApO41CMMy+ZhORiiE\nuoZlxCm1FV5yVuk3ChSlwa7MAb59bAnpzZFNxrFuvBE3inhW03jPP/kn5HKrHD36Q3w/oq8vy0c+\n8kvXTZXvR5ifn+fRz36W3fk84/fcw/+7uIhTLPK869I9MMDEvn2MT07yH776VQ7ecw+e5/H88yc4\nd2Edt92mXCyRFg36pdlxd7lA2V1FUfbiyyFa8gq2CEiRQLpxND1Bud1ivWLjJXop+UVOSge9w+VI\nIBlHsEaDJm2GOgoCATBAkyVsFNJEWGzZjmfxcZCsM4EKJIjjoeAyAwxgASF12qjUkbQIOo6dggRb\nLJ0WDlvEMJUUqBpCLZJM7CSf1enOZKiHBtZASLvWIG3lURWFlhDs3beL2dkVSqU2s7Mvk0ymMIwK\nDz30K9d1nd6pOHIE/tk/e7ujeGO4//6t2Gs1eIOb0l8ofPe7R3jqqQuYZoHTp8+wsdFA149w6NAY\nQX2TnOdhVxrkcn3sOpDkpTPPUwpVmrUXyQzl+MSnP8a73/135kL/5b/8Lel0L7bdRjdN4ok49XqL\nKPJYWztHGLYJIp1UdpJa9SIyrBGQJIwWCf04vmwRsQZ0IwmICDHowWUNnQwQECNJyAJS6oTRVkU8\nwGYOFYMWkio2ASYuaYoMEwIpDEzSRKwiSJLEI2KNJgZJUjhUWcfB6Cg+S2qEuPiIjvHeLLOoRPRj\n0sCBsIiUJjEtQ6VRJ58O2Dk2zHKpTKWicUW0yRsGo6MT3IBk0fMYGhrEs23arkvMMIin09z+3nu5\n9MJxzs1+F3W9yFjMoH/fBO++807OnzzJ9zWNBx56iNvvvptyuUwymXzTEg6/0MkIcK0sHEURp0+f\n5oUXTuM4Hnv3buPmmw+9pryczWa55553sX37FF/5q78i8jwmDx7kYiBorzTpEjqVioumNVBViUuB\nctQmpZg0vHWatosf+lQ0wdgtt/BCqUQ7injZhr7B23A8Qb1VpdGuU/cXOnLAfZi4SOYokCPAxaJJ\nPyoqKhYaF2hQQ+nskH1gBZOQLIImVTxWkFhkWWYQCwXwaJDBQ0WhTURIiV5UsljkMfA6ImpJxaIY\nOQR49EmBrdTxMIiiAEvRaEeCTdVDRpt0AwqSCh5FXNrUSMRG8F0oxHRUxaThB/ieQdvoYiSp09fV\nj92us3Z5ifs//B6mOmp79VaLJ776VX7rD/+Qe+99F57n/dwmLZ5/6im2pVLk02lsx2FoZIT9sRiX\ny2UO3nknuVwOKSVhEKBpGjMzF6hUBGBytbxM0jJu0wAAIABJREFUX6SyEbYo0GQADUHEChvMy3NU\nyVIFFBnjvG8wKARR2KSpKazTw8DoFFVeJl9eJUvYYbtDEhcFlxV0LEZRSWLTwGeTBBU8JC46G7QZ\nQOLRZgAdHYMKEBHD7FBZ50jgoKCziUedHhS27qTPZdbppUDAOmvUaDCAJIFpOPSlcwRUKNdLSJnE\n823uvmOaSj1Js9m3JYgXBJRWVrjnnls5ebLF+Ljg4MFJ9u7dQzqd/m/c9euLMAw5d+4cp06dI4ok\nBw5sZ/cvgO56EMAzz8Bf//XbHckbQzK5NU3zyCPw8Y+/3dG8OZTLZZ555gzDw4d58slHkHKc0dEe\nisWrPPvsIo6zwRntMlmvgS7j5AfGyB68h/1mhnrdxbbLXLmyyPr6Oj09PQDkcmnW1pr09Y2gpNJs\ntDdxnBKeFxGL5bCjBm19kMitoBtZIncVEfkELOHKFJJuIrZTp0KIR4oGCpCgSZuzeHSh0iSNQ4KA\nSEo0QrSOt7pLHQeDLgJiCPpQ6AOaRGiEpDCRBJRQyJDBpkVIQESWJG2iTtUli0DHp4mCz5ZlXz8h\nNyI4j8MZGuTDHFXpgJS43hpJPaJcrhGP6UwP7mR/VjKkaeTSaSqVCqvnzrFSq1GJItx2m1fKZbbd\neCPv+9CHeGH/MVb/+I/Z1T3B2LZtjE9OYpgmu4aGOHriBPe8+93kcrlrAw1vFm9bMiKEeD/wZ8CG\nlPLOn3X8N7/5KC+8sEhX1wSaZvDkk4ucPn2J3/iNXyUej7/m+IGBAT71+7/PmVdeobK+zoRmcmzu\nNA3PwgvLSDWi7gUII8NyGFCzL6JEMXSZox7YJLpGOHD4Vu67724ef/xJvvitM7ScHA27RbWRx26l\nkHK94/qxhEEBCDGQZGjQjX7NgzeHTYoUAS5NNEICNAq02MBngxg+KqtAiQwBLZKotEnRJEdIhRAd\nFbdTCNwSLzep4WEjaUQBc2hkhEZSS6CKPlKKSTloo4cZ1qIio0bATjWJDANU2UaIiD2q4JxaIp7d\nT+jqZOI67aBGt2ejaClSKZN2KkXcSnF59gQ5K0fqx15e6USC2OYmCwsLTE1N/VxHPotLS4x1/G3i\nlkW2UKBWq5FLJPD9LSPu5Y0NxvbuZa1aZXFxHc8ziUSSCANfuiRwmcJAQyciopc2NbnJJj62opBW\nEsSjURap4ouQtIxTrydonV/BbvvESQAuBRRMQhq0WUWhxTixjuiRZAKXFA4aChEqcYo08aiSRhCx\nJWVn04NglQgTjTZF6qTJEVFkDImJRdhRZtyGzRo+kyTpRXCRVdbJkrEyJGMh9aBIKt7NZrOBIddZ\nWdJoNhrMr8wxlpmkHYasrq/TPzLI+Hia3/iNT73lDrtSSr761W9x8uQq2ewIQgg+//nn2b37wlsa\nx+vh+PEtrsh19Px6y/HRj8JXvvLOT0ZWV1cRIsvm5iq2bdLV1YOUEaVSgyjSGB+/m1yuTjaT5tnv\nf4lRM0MoNfz21ijv4GCAlMP85V9+ic985pOkUikOHz7AyZNfJ5s9yM133Mszj3+NllMkFqRY8xqs\nRm08MU3ogK610PUhFNaQfgxdH8EPs/hhD6oW4vovESGJMUeMNlUSQDcBIUlWyBCnjY+gQZIMeVQ2\nCKgTw7jWrm8SIEkR4BGiIDqTeG0ctrx2NJrUOk3abhR66UXpMA9TnfRng02G2JIKiBGSVTdA1bEi\nlYZfR4oWSW2cudVVWn6D4d4kN92wm7PHjmE4ztYGLpXiOzMzJHM5yk88QXpkhN/+1V/FNE1uOnyY\nk4cPc+erpuc0VcWQEtu2X/fd+0bxdrv27geO/KwDV1dXOX78CmNjt16rlCQSu7h69QynTr3E7bff\n9rrnpVIpbr1t62/Fyt+i6sepejkaGARNG8XKks5tGSIViypCjGIKhb7hLj7+8Qd5+ukZ9u3bzczM\nJRSjC6elUG8pKH6anGgjhUdb5hFs4uKi0sKgQQpBHBCEgIGPIIWGTYk681j0ENuS1qGLOgY6FjVU\nLAIicjQB6EeionYcDHRW0Gni0oPGJio+WRL42NRpqgo+KZp+HEsRDMXj9Kghq6Fkw/EZ8BpUhdbp\nQ/qkUIlpFnNRhXL9GKOj26jUlhhNhfTEuzhV92llMwSJYeabLUQqSX/BulaJCsMQ13WRYUgURW/6\nYfhZ6OrtpVIuX/O9ObRvH08++yyNSoWuZpN1x6GZSPDpz3yG73zlKyxWK7QrPnaQoK6bGPYawwg0\nAgQhKhEqkgIu66KGGzmsRAqaCEmbOQoRVMI2vmzQanuEhFwhTpUkZWxcKngISh3p5hJ1YkwTUgNG\nEcSJqCFoI9lNmRlqpDFpkCTd4cprCFIU8RHE6aMGwAQhLg02gBg6XShIHAQxUorFNAFJa5W+0TiB\nLxnNHKDlO5hqnR4ni3ulSC6qUw0j1tuA0kUulebk0S/wf/7p//qWJyIAc3NznDq1zPj4zddarrlc\nLzMzP3zLY3k1vve9dy5f5Ed48MEtzkizuVUpeafCNE2k9PA8ByG2ntNWy8ZxArLZFLqewPfL7Np9\nC7l8D5cvP8H8vCSb7WJsrMDU1DYsy2Rx0eb06Ve4/fbbGBkZ4aMfvYtvfetpMpkYuw7u4ZincGXN\nxtT7CJwmvr+MIvpBCoSh4bQ30fVB4rExaq0yQimjKAWEVcD15nEjmzKTSMZR1R5CKbgqV0nTRpM1\nhsgR0ouLjUuaOBZtwMeiSYIUATnaCFQsoNH5PVqjjkOOMgoByxj0kqeOgk+EwCVPg0pHfcSmjkaD\ngIppcVfXKLbrcqm+xLlAI50dZnp6HFW41O0K1UqRrsK72H3rrVyemeHM0hIzts2DH/gA+6eniZkm\nfhDwvS9+kd7eXgqFArF8nmqzSfbHHqq26xIaxhsmqv40vJ2uvVXg7+VZsrq6CuRew+TXtBhf+9qj\nbGzUmJwcZvv27eg/xU+7VCqjqjqm6WH0jlCrFXGcSzhOg0RCYWRkG6nUAJommJjopaenh2KxyeXL\nsxiGghXPsLB2iUYjgSV9wjAkKbUODVGgM8tgp4S25b24Zam0jkMNQRql48p6FYV5QjzUTvmtBw0T\ng4gkm8JDlU1MoIWCgUIVSQqLDbKsYrJJSIwcJjplAVKtI6Iq7ah3q4cZxZhtrdFjCWKmhxEI/KBN\noGukhUaXmcYPAooIjFQX8STce2CAneO3c/XqVZ5+6SWcRDf3f/C3KBS2WjKnTiRwLn+fXC7HxYuX\nuHhxEccLuOA2GbvnPqanp38u/jM/ws13382jf/3XxC2LZCxGJplkcvt21vfvJ3fDDRT6+ti9dy+J\nRIL/4Xd/l8vLGxz5wnfRzH7C7B0EjSV8GaAJCGUTXwoCITCkS0Z4DKOxIBu0ZAUlyLEhPUoyThRl\nUGhhkCSiyAYN1sh3zL8XCBlEZxwXSZ0UkhVgGjpPhY6NhoLcGpamhIOFh0kehYg6q1RI0o9CAZ82\nkEXFEAZIlxKSGAqakCT1Fn5YR0Qhg2qcM5dn8YMJVlKXSVhtxoVLT6GfjaUmeUtwRz7NU7UFsqMq\nd980SaR0v8Zs8q3C7Ow8ptn9mmckHv+H6xFcbzzyCPzrf/12R/HmkMvBbbfBo4/Cww+/3dG8cYyO\njpJK+dTrEbA1qef7Hp5Xp1DYQbu9wfDwVvslm+0hlcpy3303MTDwk4aaiUSepaV1ms0mMzPnqFTq\nPPjgu4jFYrzySp7jxy/Sm99JV6qLpfUKdXuNIJwlijYQogtFa9FyUtjeKqAhpQvUMM2QdGYYGKNe\nXwFeAnQUJSQWVMlj4GOSJUMLh01CItK00EiQJINGDQ+DkDgNXCI2aLOGgo2HwwAecbpQaGAjiaPQ\nBJSOqEDQ2Zr41EWKV1SPMd2jz1BZLs3QCGE+EsTNGJacJ6P3ceuBPYz27uALR4/y5OXL5ISgkU5T\nBh7YuZN7brjh2n2LmSZ91SpnXn6Ze+6/nzvf+16e+Nu/ZUcYUshkthx819e5+cMf/qnv2jeKX3jO\nCGxly+D9xGdLS5d55pkfkM93k8u5vPDCUcbGTvJrv/bw6+78PM8lnZ6iu3uSVmsdKQsIcYBTpx5h\ncnIQw9hFOj2AZSXY3FxmYWEB09waWd23byd/+qdfQGjdKGqOwFfwow0ibBSSaBSJUyVFHAeTDXxm\ncbAAHYGGQo0aJfpI02YHOkkSBNg4BNhIYqgIYTFh5VhrR0hcfFR8wESQwceghIWByiguBiExsmaW\nml+gHS1jkKKCwKWNkDHm2xeZJM8UBm0hcKVkDZWiI5CoFIMGOcsk02zylcceIz88zD3vex+/+8lP\nsrFZ47nnZlhaqiKly9CISWL0Th49eozNxQaxRBclEbHtpvfx/e9fIh6Pc8cdr1+huh7Ytm0bU3fc\nwVe+8AUUxyHV3c1N99zDrz7wwGvaQ8lkkqkdO0j2nMP3+7H0JLaRZ8XbIKFIUkKi6yp5z2MxDLk5\nnYG2Q8XXKWJTDZJ42IQUMNFQyeF2Jp5MdGIMENGgThudDA4NdNZJYRGxQYsr+PShoaDQIksdFejC\nJ6CLNer4VBCEHcXFNDFK5NCIiKgQ0Y3syMfHaAhBSwgSnkOGrRHkZssmFqkklIhos0JFrLAr202p\nfRVpmix7AWtlF1/EsBJd9OaztD2PoNPSeqthGDpRFLzm8yh6e+L5EUolOHduy+flnY4ftWreycmI\nrut88pMf4XOf+xq6XmZu7gcYRpqtgmgbw9hkbGxLE6dcXmX37glWVpqvuY5tVxHC5N//+7/GdTPo\negLXnaVYnCGR6KPRcJFuRBQ1kCKiv7CXavMEMoRAFImirYk2RRlHCEEQLBFFPr7fRlFUuru3kUop\nbGysbfmn+VvjtzXawCZOx0NqawR/jgRxfBKYDKAzxlUcymyQxcemTQIFm0FcklTQ6MHDRNCmixIl\nEkSkO78PW6olCoqWQbMCArHJaDbNbFBnyc8RhVkUstQClW88e5G665CNm8yvrdHwfbxmk51DQ2SF\noDgzw1ldJwq2vpu9AwMkTJPa5iYAu3bvRvnkJzl25AgvXrxIrdkkmckwf+EC2VyO7a8y1Hsz+Lkn\nI0KIXuALr/p4TUr5sZ917h/90R8B4Ps+q6t1urrGSCazeJ7DiRMvomkD3HjjTXR3dwPDzM+f5vjx\nE9xxx+2vuVZPTx+JxGXa7RaZzDBRFLGycgldT9FqNVlcPIFlNYjFVPr7B5mbW2J6WmV6+t08//xx\ntm27gStX6gitCn43vtRwUIkj0WmQxiJOhE+Eik8GSQFJHI0WIQ1CQqoMoRJHR1Ahh4KPTgWbJhED\nioqCTlKY2FJSJWQUkwyCJh49OKygkcYkRRwfScXZoEUaSQFVdeiKFBRp4hBioaN4AQk1Rj45wXpj\nCUvLUUgPcLW6iqdmyAuFCEFCSXLpXJFy9AP0/CDvfvcd3HrrIVZXVzFNk9HRURzH4X/+g/8Dxifx\nk1l2Dm8nl+vBdds89dRxbrnl8M9t533ku99l9tlnuXNyEsfz2HRdkLKTqP4ktp6XGg8//Gm+851v\nUK83sbK95F2dUnuDtiUYMAxmKhUUy2Iin+dsqUlOibMvMczpjSIekCJDHoGHoE2ys49ZRyeggc2W\n0mqMJJcZJsBgAkhjM88aNSIGMPAp4NKmzFZ3tUQMjS1JuF7SxCmzgUMdnYheFJYJ2ZARbQzWgTkU\nxqRClgQqkpL0mZMxIhRq0QZ9Yhw7UthstOhNxVnaXCOfTrFtaDd+u4ZlbOfrz1wgnvUwd+/GisXY\nvn376967nxd27tzOE0+cwPNGr9k8+L6H666+ZTG8Hr7zHbj3XngD6tW/cHjwQfgX/wJsG65jK/8t\nR39/P3/wB7/Jgw9e5oUXjjM7u8aFCxt43iwHD96Lpumsrs5iGBt85CO/zOc+93VKpSUKhUGEEFQq\n6yhKiYsXJZa1i97eAgBrazHOnTvH/v0xJidvYuHyJcDCdoqoyioTfZOs19v4ikUkU3heGSkvImUX\nqhoBZ4EQTfPo6+tlz57beOSR79NsmrjuHAExQhRc6ixRJ4XKts70C8RpYrPGLDCKh4JCjinKBCi0\nkASkiFDIoNJEdGZ32tTIUaSESoSPTxUXQYaUX8XoSXDD9ptYuHCRBbVAPrYTvyFoRRYZs5tmK8aR\nZ8+xL9XCTFq4vs8tPT3QbLJt2za+fvw4pbPn6O3qJptJsZq6QDOf58Ef61vu2LGDvr4+/uY//2fG\n43EGcjns1VWe+OxnKb73vdx1993XZd1/7smIlLIIvCFT7h8lIwALCwv8zd98k3I5xuZmiVqtzi23\n/CgR2UJ39xgnTpx73WTkwIEdXLjQwPMSLC5eRVVVhod15ubq9PR8hERijVKpQrMZY2bmOUZGQn7v\n9/4nenp6OHNmlgce+DAzMxc5evQp5mZP4NGFgk5ECUmBKi7T+Ci0WUdhBAVJRBENjxRpkuQpYiAJ\nqVEggYlJEx+BTxWDRKTQbEe45KiwThYXC0ENnw1CepAoImBThihCoElJFkGZFioemjDIGUkanktM\neiTwSRCgxLMkdRvPHOGsY7PkGMTUPgZUiyv2BoNGjpRrkZMFFufh299+gRdfnOGhh+7kgQceuKbt\nEgQB3T1jDA//ZAXENGN4nsC27Z/LdMby8jLnnnmGm0dHUTutOiklPzx1issHDjD9KmczIQSKIujp\nGeLhhz/FlSvnOXvaJlq5QNxXyOYSLK6vY6VSHMzliCUSxO0YC06EFxqECqgRWJgdyXWJikaMAJda\nR1cggyCGgcoAMUwCBCtINDK0CLBZI0Sjhck6/XhYpGlhYyKp4tOkic0KSQIcIspI4vgoisrJKIZN\nBodukA5xFunFYB6DGkPopOnFpI5NSa5hkOSqV8atthGaTstPcGphhqBnlIQneOVSjcmBFsqlS/zw\nzBle6O7m4U996rr3fX8aenp6ePDBO/nmN59BymxHTKnMBz5wmH/zb96SEF4Xjz66JRr2jwGFAhw+\nDI89Br98/WV+3lJomsaOHTvYsWMHsMVRO3HiJM8+e4pKZZF9+ya4666P0dXVxac//St84xvfZWHh\nOQB6e5Pcf/+dfP3rz9PTU+icHzAz8zKaFufixRl27DhE27YImnVUJQDpUKoXafkOfpDHdUFVI1RV\nIkSFMKyhaS1GR3dy0005IMHJk89Rr1cJwxxSbkdHdNoosMJVdhIhgRCPiBox0nRR5yqzFFCAgBYR\nORRUFOZoIcjSj0EZBxsD0TGkEOSYo46PTT9pVBxMvU2YHKZg5vAHhpi/AhtuhBEbwPM8Wu0mKVJE\nUQwHhRtySa7WalgjI9jVKqdOvsRipck2qaGJBG5bZaO2RsXzSKZSP7EePzx6lG7HuTZJmU4kKGQy\nHP3e97jh4EFSrzr+Da35m77CG4QQ4iDwb4E9QojHgV+SW42518Xo6Ch/+Ie/xfz8PJcuXSKRSDM9\nPfWqo+RP/X/79u3lxRdfYWUl4q67biQIfJ5//hEymUGSyTzZbB+p1Bpzc+fZ3LyKYRQ4ffoiY2Mj\nxOMWUkYcPnyQ0dEevvKVL3HhbBUZtojoRiWGj8F5ztOPJEFIGskiKiG9CAYAlyRVwELiUcNGx8FD\nUibJOhBIhQidWofOKikiREgan52miR0ETJuCb9jr2NLCAiwiTKp4hGhBH6oBMV2h6pUZxSMfT5Md\ny3NltoYu+jD1JC2vByVYpUwTGfYhhIOhS/zAo16tc/ZsjFarwJ/92be4cmWNT3/6n5LJZEgkEhiG\nxPOcnzAxdN02UWRz+qWX8D2P4bExJiYmrpta55XLlymo6rVEBLYSjsFkkguvvPKaZETTNPbt28aZ\nM/MMDGxj796b2b37EGdOP0N1+RgT44OUNjZQSiXWL16kXq9zuiVp6NMIZRAj7mC2q7jRGpHMEhca\nSBufZULanTVVgFKnmaJ2PHSTwFxHmizEYwWLDW4koEKsI+KfotXpD3s06SOgG58GgjlDxYlUNhjG\niwaRmokmbHx/kFVsNOpUKJAnQwaDLdF5lTYRPvNUUUE2yCk9hPEsNS1LV7aPpUaDvRPTFPJlxvr6\nGAMur6zwgyee4EMf/eh1WaO/Dw4dOsjU1CRzc3NIKRkbGyPfmZB6OxAE8Pjj8O/+3dsWwnXHRz8K\nX/7yOysZCYIAVVX/m5wzVVU5fPgQhw8fes3furu7+fVf/wS1Wo0oishmsx2e4db16vUyx449xZUr\nNZpNnSgqoutnCEKfui8J0Mjm21Rrm+RSO1hbC1HVNFE0hqIsEYtt22rnyFeQsoVl9TM767G+7tBs\n1lHIoyptwkjHxAAmgGVMDFqoQBqBg8DpmJgmqNLCpMFZFIZRCBC41InRh8Ajj6BMiEKFPqokUdHQ\nSGKiUSLCpRHLEe+dIFJyIIr4WkAYxbCsFAo2piII3AppI0m2UEAqDfpVldVajS5d58LlOW7qG2ej\nWeVM4BI3TMzUADv6CywvLLBr165r93j2zBn2vMqpW1NV0sDKysp1ade8nQTWE8C7/yHnGIbB9PQ0\nIyMjHD/+/9BuN4nF/o7lWyrN88EP7nrdcy3L4tOf/hinTr3ESy9dxDR1Dh4cZ3y8iwsXFomiOAsL\nK3iexcDALvbsOUCz2cdf/dXXuP323Rw5conx8YO02000rQ9FjRA0kVEaT1pIIlZZoYlHHA3DsBBh\nHFVmEdIgkiE21paDKnESqCioRIRUhUNDpmgbU+DrRPgklSaG9EnKFqqWphoKnGCDZOSSkUsoNNGV\nNJH06ZU2ESENKpTbOXQ8fEqsq2naZpJSaZ2urmGWyi0ifYx8shev2sYOlkgYGgmhYWgGl51NVH2E\nyO8nnR5Byjyrqwaf/eznufPOw8RiMW67bS/f+95pBgb2YpoxXLfN6dPfJxbMM/9EE0NVOf/kk+R3\n7OAjH/vYdSE5Kar6umlmGEXoP6Ut9J733M3S0hdZWDiJYeTw/Sb9gzr/2//+5/T19XVE0Z7nL//k\nT7g0N8fipiRrTKCaMYb6DlBbOkbTbuHIKk6kEkVtpFTQGUDTxonCHIG8RMBlQmqoKB0/CoWUMGlK\nmzweJgHrqIRAkxguEU0C6gySp0IOSVI0OTjSzwXN5GwxiRJOkrKSuFEc220huYrHMHXOYZHEYGuc\nr0GIQ0SBGAoxutUhpOlQVHxi3dPsnr6TcvkFskqClAG9+b/7roz39fHs6dMEH/7wW0pqzWaz3PBj\nhLm3E08/DePjMDDwdkdy/fCRj8C/+lfvnKma//gf/5L19RqZTJx7772ZG2+84Q0T4X+8ytfb20sq\nBY1GhePHnyUMhxkZmebs2RkMYxuXLjUZHCzQ39/LysoPGB3NU61ux7ImabfPUSz6gEYQZGg2zyNE\nHcNIAJtcvNgmnT5ELueyulxCJY0f1WhRIqAASEIUGoQkKSBJIgGBR4AkJELSZiCZZqHlUpYG3cTZ\nh84KV/A6NhAWbVQaWIBLyAEVLBHiCZUVxcQ2LGS7wfNr80SlRRQnwveg3lJxVAPdsoinPXqtDJrS\npDef53KlghVFNGwbVUugCpVCMsfu6RtJxNN4nsNa7dJr/KeseBzXdYm/io/pwxty6H09vCMIrK+G\nZVk8/PB7+Pznv4uU3ei6heOUmJxMcdNNB3/qeVsv01u57bZbAXjmmed4/PEr3HffrZw+fZqF+Qpd\n6TQhbXRdI5XKUa/nCUPJoUP9PPfcEZ58/GnmLpVIksCTEZ4wMFQFN2gAdTzSeFoMU4noFjoykATR\nVkG+Tr6zd16kRAOBBghMEZK0uvH9GpoWByVOLBxkI1oiIzKkNBM/dGmR4XzUJEabcWq4UYM2goRl\nMhAJLntVIiXCliZZYweOtJB+kmoQUDSqODGDXGaIffsOcvF8nCvnztOv1ZHoLDlNNoGBzDQVb40o\nCpHSY2OtwcvPHSG2eBlhmjiJBDffvJczZ07ieQJF8YmHV3lo/15SnUb1JHDq3DlOnTzJ4ZtvftPr\nvW1qihcfe4xx38foJDdhFLFs23xw797XPSedTvPbv/1JLl68yMrKOl1dE+zcuePaXLxhGNx1113s\n2rWL//v/+lOaT89Ra1TQrASO3WKzVccgx2jvbjRDZ3b5PEJuYun9+FEbD0EQ9CKVFsWoSU+n/WKj\nsCYrVDucIYHGMgmahAg86miUUQnZQKeCUD2ErnPVdmgZcdJWD1EYQ5UadrsFQkeyRd/eICCBT9iR\nh45Q2aJ2+xgIdNMkk4mR0RWW7BUURduSyg+auEqFA5N/JzDWbLe5urTEsaNHmZicZHBw8E2v0zsN\nX/4y/Mo/MgHa7m64/Xb4xjfgE594u6P52fD9MUZGcrRadb785aM4jsvtt9/6pq+rqiq/8ivv4y/+\n4m9ZXi7S1TWK79dJpWzKZY9Uaphi8RUmJ0Puv//9HD36KK5rMTIyhqoatFrHaTaXcd0KijJPIrGN\nnp5BhKgSht1Uq1cprYcoYqsWLkSWSFr4zKOgIgkpUkdDYAKCLA4tSoTo9KPrEj9TYCQTp7X8EiYu\ncXR20cYjZJ6QDCZBKkU2CLjUbnNGUbAk1GRITejsT3fRmJ9hM/BRvDZ39xf4fmmBpXaFuNlLO/DZ\nOTBGu7LJZF6jL5fjmGEQui5xTSOVTnOhtMb2gUni8a02i+M5NHSFqZ07f+J+HrjtNp7/0pe4KZG4\nVvFer1SQ6fR1c/F+RyYjADt37uT3f7+XmZlzNBo2ExN72LZt2z/IUfTAgX0899xLlEoLrM7PkXLb\n6GGNhLbGc99Z4mh6GF23KBZN/viP/xdOHnuacTmPGquhMkGj6VGP1qgE0KussTfy2bAStKIkS47E\niXvE1ICG6+KSQIZxhFikm4ABEeETsiYjasLAsAoI4ijRAKqUBIqDE/QxRxnbKaMZMVrJEUrtMonw\nKioBDoIqOj16HkfWUVBoWxFxMYCl5fBTKWqKSdtRUdQuRkfTZLMqrdbLFHpaVMugygbLfgFViZMn\nRiQdTD3E89okEiF+aZ2hTJ5tg4NkEgnOBFLHAAAgAElEQVTWKxXmL1/gX/7Lz+C6LouLixz7fPla\nIvIjjHd3c+bFF69LMtLb28vh97+fFx57jB5VRRGCku8zfccdjI+P/9TzDMNgz5497Nnz069dKBS4\n4dBh8j13MPPKKyyeP8NmVGS4/1ZKpWWKm+eJJeJYiSQj49uQMsbK0ipRs4WMFMCnQRfnqTFhJFCE\nTskN0AjYxMUmRZ0ubAoYeOSRNBBYXGVS87glmUZGIUUv4nS7jidtVMVGiSSq7xHHpU2bkA26SCHx\n8UngEdHCICMCNLVKl2pgR00ULU6hO4MsLzE7+ziJRJ1qc529mRwiCJBSMru8zJM/+AH5fJ6rTzzB\nqe98h2233ML7f+mXfq7j2b9ICEP42tfg2LG3O5Lrj098Aj73uXdGMpJKbekGJRJphoZu4MiRH3Lo\n0MHrstuemJjgU596iFLp82iaS7ncwDQthPCJok1SqYi7734X8XiKo0e/j+9vjRJns910dWVpt9cw\n1DK5VIxEDnp7NVqtNKX1JaqlRTR/iqQwcaIaUm6RwU1cYpQYY5Ms8DLVjvmpTpsMNgOEyhK37NtP\nKGuEnkaxOMjF4Cp5WmgEqAhAYU2J+NCePeQsi+UTJ9hUFGLxBJEb8dDgFClNZ6Zept+waCoRXjbL\nYV3nQKPJWqvMmtDYaLjcd3CasLLB4wsLpKemSAwN0dfXx/rpCyQGd1C2G2iVdaIoYra6xC//5sdf\nY+ex/8ABVhYXee6FF8gKgQf4qRQf+bVfu24u3u/YZAS2XGNfj6z690UqleLXf/2f8md/8h+gcQpD\ncdneP0KxnkC4PTSqSRKD0wSBw5//+eeYPXGC8VSKUKyw5J6nJmM4UgFq7NE1xhJd7Ch08dzKEnGZ\nwBUTtMQySnycKMjhtl8hJWukRbBlJ60IMuhUUwXGdu9n4fIsbmUFkzRe2CJUXAytl6ru4yeH6e7e\nRebKN+m1TbLkSBDDJ+RSw+OsyGOlyuwfmaBk9wAxhscnKGsaoRanXG5x001j7NhxI+12i+XlM7zn\nnkH01WXmV+vMzm9wYXGRINBI5AoMDsbxGg5JQ8HUJelOstGTyzG/sECxWGRkZGSr1ytf20QRQhCF\n4Rtem1fjtjvuYHJqiksXLxIGAXdMTTE4OPi6L08pJWtrazSbTSzLot1uo2kaw8PD6LqO7/tcuHCB\nq1dXyGZTjI/3c/Toc3jlJrftuomnTj3LRs3CSk7TlY6T6ClQqZYol1fRtE0IBHEtia1XCPxFsoqH\nqyRZMA1inoeqajRClw1imIyQYYIcCk1CKmySwUbBo0HESafFsG6SNEyEXaVvMEUgVZbX6iSFQSh9\nAkoMUcVQUsS0LjbCFVJWDyVnmTY10rEkTixPw60Qr9cwRZOpWIxS9QVGu0e49caDnDh5kscefZTe\n0VEuLS4ylctx5113EU8kiKKIF48d4/zUFDtftSP6x4qnn4ahIZiYeLsjuf740Ifgt38b1teho4j+\njoBhWPi+Rq1W+4nBhDeD7du3MzaW5/TpKlIW6Orqodks4/vrqKpKPJ5CCIGux+jpMdjcPEs8Pkit\nuozmLtGtNNhmDtOobbDSmmP3oXfx1BPfIAoTGJqFYSSIex4tuVXvlLIMeHhMsQYU2GCROh5dWw7u\n2llMJcRt1THMKmvrTXZ2TXJh3ceR6/QhyBAhCAl1jedPn2Yyn0eoKiO5HB/8wAd46luPUatt0pIQ\nIhF4DGWSXF5b4/0DA+iZDKutFlpfH+vd3ey45x5ymQz5/n4mJiYYGBhAURTW19f5r//1G8zPlymV\nN0HYfPI3foeHHnqt2Z2iKHzwwQcp3XYba2trWJbF2NjYddUaeUcnI9cDXV1dKG6D23f0c27mApvl\nJepeLz2ZAVq1Ep63zr5997Kycp7FtXWyQqKrCTJqD/lYkoYTsBomWBE1RlIJik6NAk2k2eKSF7Bt\n7H5k5DO3eoaE2CBJFkPPEQqHhO4zFkjabhu3vU4yE0epzRP5RWRYJ6e0yes7yfQMM9vUsBuLFLw6\ncdUiiDI4UiCIyGBzSQpiDY/zl86SNMs00Wm4m6R6t5HM53GdRep1yeKijudtMjmZ4+677+MHjz7K\nnswGB7cPslDs48mZVXYc2M/U1BRHHvv/MLUy7z204yde+pqiXJNgHx4epq6qOJ6H9WO7mYVSiZ0f\n+MB1Xave3t6facbUbDb5whe+wdxcleJqhYVLp5jqNdkzPU6USnH/Qw9x5MhRVlZCTDOP5xVRlA1i\nsQqt+hWqpqDlVGm0LUZ6xkklYrR8wcTEdo4efRpdGyZGL8lUkqYWUW3kIdDoVuPosSRRbJWMu0DK\n1qj4Q0CIj8QHHFQsUigU6aJFMpBshgEtzSQRBezoyaAPQdNWicmIxdVFgmCNEVYYUZNUpEuohdw0\nMI0nBM7KMu3YTnp7t6HKiMbGZVrhPLa0uXHfHvyNdepRxMTAANPDw1y6epXHX3qJ6YEB7rnzzmu7\nT0VRGEmnOXP8+H83yciXvvSPr0XzIyQSW06+X/wifOYzb3c0f3+EYYAQ/mv8xn768SGrq6tEUUR/\nfz+bm5scP3aM9aUlugcGOHjrrQwMDLBr1zBHjnyXQiGFYcTwvKsYhkMqNcnq6jz9/WNks5KJiX3E\nYnFOnjyK5p1jKm7RZfWQSZgMWBlSjVWOHX0SPerGjwp4fh9B6KKIq3RbMVy3gqNo9CvbyKBCFKGG\naVwWaSlLNOUI/am91CI4eWWeoVyFZGCz0agTV9r0Rxl02cCljgbUXZea59FwHKxYjJXiJv/2s98i\nR5oF4SOpMp5UGBse4tLqKl2Arii0PQ/dslDjcXb19bG0vEY7MFHjLQzDuNZm6enp4Z//8/+RpaUl\nPM+jr6+P5M8gGnV3d19LFMvlMq+8/DK1zU36R0bYs3fvm7IE+e8+GXn+2LH/n733jpOjvvO83xU6\n5zSpJ2dJoyyNAkhCIJDIYHAGY+MXeG3v7Tq8fM+FvV3f3d6+7tbrvXuevfWuExjbOKwNNskiCZCE\nhLI00sxoNDlPT3dP59xdVc8fIwaERJYRwe9/JLWqun9d3dX1qW/4fJk4fpzVNhtX1frZ032aoWgE\nHWYkSaSzcylWqxW3209O1JNNRUjrKnFoLjRFQZEUzIqJtKqnJzpFp0lmqcfJeDRKVC+QzyWQpDzl\n7kWUZBFnSYegJrEINgqEsBnyVOntJDIpzD4vszMJjIKERS9jdTYxkS0yOTNJQRApJuKUl0JoohlE\nN0kliYwCGFDJUqEp1OZzpEthqh11TBVCVLo7sFt0KPkYN9xwO8lkiq6uLFNTdn72s2eoqnKw4vp1\nZBIJFns8fK6sjN7efiYmZlizzk1VTkfNq26vcoUCaVGk6mzVn8Vi4YpbbmH3b39LpU6HyWBgNpVC\nV1vL6jVr3vPP8+GHn2BiQsZma2Pw2G5W+NcTjo0h50s0evT8v3/399grL6epadXCPqlUjGTyedZ0\n2LCISQJRgaKSwWk1kS8qyGYDweAwmqaiKhqaMkMmWUIrzeKSGsmrCaxkKWWzGCwVhPLTuNUCvrOt\nwQnGMVCGdHbihJ1xFqNRIUhE0IgWiiQMOsrKyrju8qW8cOgYY4FhFutjVBugUNAjqOp8zlmJIcuV\nhBPTCKKbltaNuH1+hrtfwqH3klEFqpfZ8bmdqIUcUqFAIBKhvbaWlW1tBBMJtFzuvDC4LEmXzBDt\nvSaXm68XOXr0Uq/kj8dnPwt/8zfvfzGiKCUkSUZVFSYne1m3rvUtzTuZnJzkV796jHgcQCSTmcaU\nnmVFVRUNNhuRnh7+7fhxbvj857FaHWzcuIFkMkU6HWfTpnoCgSypVJaBgWNIUpA///NP0dU1QDxe\nwm430WSxYlOM+HzVmExmQCM/O0Ypq6Pa14qa0gjlE6iqCb3Oh91VJBCN49H5kVUd5PKoaGiChE70\nIGkFHDorgpzFqGrYqhchG7MooX3UWWL0x2P4NBUTCgIwDrQAaU2jAFgNBnqLJnSlCgoGGx2VlWhS\niWSyi2A0yqyiUAaEUimihQKW6mpqamrYvecQs7ZK2pe5mZwUOHLkl3zxi7cu1HmIoviOaj6Gh4d5\n9Cc/oUwQsBoM9Jw4wZE9e/jMPfe8Y7uAj7QYSafTvLRzJ1uXLyfR34/L4WBDewuF0wEyjiK+6g7q\n6uoAyOcTbNl+FUd/9lMEzYjLbCaTz6MIAgaziVQ+hUmUaSr3EslkCIoiLb5KZKOeuKgCfqZLMYRi\nDCkDHoeBgubGJEfJ5FRcZhMz8TwFyUmd2Y9JpyLLBdzJM6QKcTx2O2opj5rPoalGShTQY8DIfGGj\nEwEfoACKmkQRozTayxgc3UdFhZcWt5GnHn0Mq2cZjY3b0OnmL0aBwAinTg3wpS/dtRD9qK+vByAW\ni/GL73+fvslJyh0O0rkcY6kUl91yyzkKeOWqVVRUVtJ78iSZVIp1Z8P9F9su+M2IRqP0989QU3M5\np46fwKXXo5f1eJ21HB/sZlVrE7PjIZz+c9W/1erEbK7EYIF2sxmf00y+OEwseYJoVsQkmSgUwOWq\nweNahKuQIRYMMJWYRaea0YQsCS2DT6cjkkhjUIyIUhwDBSpUKzEtR5jxhSk29RSwIYCmohdE9IJA\nud7InE6HxWSirbGW3OluWux2MqUiKZORXCoJiorFpsdoTGI3yJhkL56yWhwOD76qRqyZBKm0hM1j\nQRBFNE1DhXNaot12O4P5PCVFQX5VrncyGmXVB31Ay1vkkUdg5cr54XgfVrZtg89/HgYGoOW1Dgjv\nI6an9yMIZlQ1y5o1zezYse1N98lkMvzkJw9jMLRRU+NF0zRefLoXT3QaT3MzdosFu8WCI5nkuUcf\nZdnGyzAaZ2ltfcVmV1EUurtfYvPmKm644TosFgubN1/O6dN9PPlkmANnZFbWdTA2NkM6XaBYVIhn\nw1isjVi8lWhaBIfNRTKfJl0sghjC63dRYa5DzUExGERLZ4gpecJKgTxm6i1GPGUOQskcNYvWIYpF\n+qJdmAtZXLKIXCwiMW9Q4QO8QBao1OuJKQJVmpGiXkGwWIgoCj6dCVFfwbQwhdTQQCQaZUKSsFVW\n4qquZt/ze4hkFBqcdTB0gklJprrjMh57bBdf/eoX3vFnpigKT/7mNyx1Ohdm1viZtwvY+9xz3HDr\nre/oeT/SYiQQCGDTNNpbWzkajTIaCmEQBMxSnsHwKLfd+gVEUSQeDyMIIf7yL/8d3x4dpvdwmICq\norfZaKypISPL9I73I2aDDCgKNr+fqzZt4rGXukmm4sTkErKpROOSKxk59huqxByKZCKey5K16HEZ\nZDJCnmQ6Rlpfx7AWxZaNImlmgkUVTV+HVrTir7ExPLwXf0lFUueQsBKhRBABDwUcgoYNkXEBlpbZ\nkdxmpiYm2OKcL4r99b5jlC2u5qzWAKCiooGxsQNMT08jiiIv7trFWH8/ZquVRatXc+XNNzM+OsrM\n2BjW6mpuWrduQay8msrKSiorK9+zz+5C5HI5BMGAIAjkc1lMZ1tWdZKeQlFFUVVEDYqF8yMAoiix\n42O3cWTPHpLRKLIhh91Yw6olyxifCKEodqamXsDlcRGdSpFKFygWQdU0NFEjLTspFksIhQJlZh0r\nyms5E0qB6sBbchLPpDGSxkwCB2CTJFKqRlhTSKkFXFY3Kzs7mTUaieXzTBcK5NJp7Ho7mt7MBGlm\nhCLkZkjbHNQ0tSMkyiiVokQiOTRZZjYRoMqro7qijAq7lcMDAyQMBvzeeeOnQrFIzmjk8ptu4tDR\no9RYLOhkmel4HFNTEx2v05n0YeO+++AL7/y3+AOBLMOnPw0PPAB/+7eXejWvz3/8j18iFoths9ne\nsnFWf38/2axtwdAsn88iZFM4LeWMj0/S0TFv7+Cy2ciNj1Nd7cdkOkQkEsDtnp+FFI+HqKwUuO66\nHQtpIaPRyMqVK2hvb+Mrhw+TTIRobaklnckQCEwimIrUNLTgr17GSOEw2UgMCQGFHPZyB7d9+iaO\nHpkmHZWZFATGxqfJaTZyShpZdDCuWCmVNGyV1TidPsLhAdxON+FSAi2TYACoAayABATP/ikrKpl8\nHkkU0KQSlWfrP8an+olFp/C6YevmzQz19UEggM9k4vTRo4xPTOFrWUNLRT2iIODMJBkf6kIU/aTT\n6becDnsts7OzkErhfE2Ra315OXuPH+f6W255R4XwH2kxotfrKaoqsk7H2o0bmZubIxaN0llfTzEw\nRyrVSzot4vGYuPvu+dDWn/+n/8S3/5+/RV+qxmFxkQHc1ZUscSXZ4q9jWUMDRoMBQRTxVFXx/Z1H\naK1fTiSioNNJ2GuXEQr0kkpHkeQim8sqkXw+mlpaePZMP31jOuz2dYyMnCGRmEYpmHCKNkyWSiyS\nh46lLg6fehGhkEMijYIRCyZUijg05scoCRITsThSocDisjKMOh2BWAyH3YUxW2RsdJSWVxmFCYKR\n8fFxDj31FPU6HRvLyjh+7BgP/v73mGpqaGpvZ8XmzWzeuvWiGZn9MfB4POh08xM/PeXlBIJBbCYz\nyUyMCrdlvkZdX6L36KPMTRzDV9NGTf1iSqUCJlOJpUuXsmLFCqanp7l8aor9+08QDmcJH++hsrKR\nlSs30H3yJNG8yHQ+C0oRlSGs+lpkyU2eEDohSJNDxKXXc3mFRERJMpAoIpUilJPFUCgRBWZVDdAo\nIuA0WUgV8rSvWMFn7riDZ555hoMHjxMSNAKoJFOzeAWZJYJExmykpqqKdddew+n+EDpdI9lsCUVR\nGB8RiE4doKRWEEinCfl8WCwWJsNhVE0jpKqsu/56NmzcyPCqVfQcP042m6Xz2mtZvHjxex7JuhSM\nj8ORI/Otrx927rlnfhrxX//1+9fu3mw2v+0x9IlECkl6JTIrSTKqICBKetLp3MLjqqqiAC6Xi7vv\nvp2HH36S8fFBACoqbNx228cvKIBMJhPf+ttv84P/9R2Gx3oQFBVdnZ1GfydF5qcKt63YTCQSZGqq\njzpXhu9851usXbuW++77OTt3dhMrahRkJy6PkfLyRpLJPMWij1h6EsmQpLvrOSRdmAqjAY+jjJ5E\nBCfQLwjoNQ0DUAeYBR0pRaYoQKCQRjOVkSgUiAwdx5yYolHLUClaUXt70UcirNiwgXgsRmBmhjpv\nJXKpAGddTpxmG6Nz02SzjnflLSSejbq+Fk3T3tX14SMtRvx+P4LbTSASocLtxufz4fV6OToywp/f\ndRetbW0oioLb7V5QeqvXrOEf//U7/OAHP2dgIEgmkSQ+cJIVK5sJ5LK0FIvE43GG+/oYnp7GX+1g\n/dZGBgbGCAYH0EkGBuYUIskgtbLCUDxOi9/P8fEpXFVtaKMnOHHiFFqxEh01QAOqpjCXDmBN6Oio\naaLfM0o4DeaURq2qUCJ71u9TRUbCafFwJp6DXB4hnmFgcBSLUYfmdDNHP4w7FsSIqipAkpH+fqpF\nkWqfj1MnTqAGAlxTX8/RWIxFViunn3kGo9nM+g3v3gPgj4Ver2f79g38/vcHsdnrGDcZOTM5iFkf\nYcOSRr7/2GOEZ+co5uJExyeYOX2UvrJKlnSu5O67b104Qf1+P36/n9WrVxMMBrHZYP/zk6SjU1hS\nk6RDEzjUInmxgKivxKQvUFSCpLKDWIRRjFkDitlLMBKjQpKo1olYKixIKZGeSJ4MGnZNwwWUiyKK\npKM7HqBz/XoMBgMtLS04GpZiMscZHOqhRXJjk8zMFpKoJSNaYI6uPXv41Fe+wlNP7SOb1ZAkjZVr\nXGz55n9BKZXQ6fV8rKWFZDLJ8OAgoiRxdWsrZWfrfxobG2n8MLaSvAnf+958y+u7qLP7wLB4MbS1\nzQuvD/LwvNdSVVWBonQv/Fun0+OuaWO8Zy+L2l7x0hmcmaGuowOLxYLFYuErX/k8sVgMTdNwuVxv\nePe+ePFi/vaf/4mBgQFy2SzVNTWEQmG+971fcvjwY8Tjytk24Dxr167niSde4umnX2J2dgafz0uh\n0IfN5kEQYC4UJZXMk0gNgFJAn9EjKkGsYoLJUp4yTyUeq4WpQg6jIFAly0RUlaAmYVM1dEqBvGxk\nVpcnlSlSI0exlaaoEUu4jCIVDgfjg4Msb2ggFo2yZe1aZgMByqMJhkMpstkUFvP8mI5UMsqWjoZ3\nNZeqrKwM2eUiHI/jfVV9yNDMDIvXrXvH9gAfaTEiiiK33nEHDz3wAFNjYxgEgbiqUr92LavXrHnd\n/umWlhY+8+mbeeL++2lw1NNQWUkinebF4WEeOXWK2MAALqOR8poaNjY1MT01yp996U6isRj/8I1v\ncGu9nvK2DubCYYbicX7/4gl07hXU1trp6Lie0ZEnUEpxBFlAlKqRZR0GnZ5gdILTwwJzCQveCpm4\nMMZsKkKZUsSDxoRsQm+2kJN1pPV2dOk51ssWKr1uZJ2IQprjwW6mzXZU9UrS6QQ9Pc/jdIo8+cRx\nrmqqIpPNMjs2RpPLhSgI2AWBVDbLkqoqjrzwAp3r1r2voyPr1nXicNjZu/cIxaVGCjkvNtnJYKlE\n/3SGJXXbcFrcRONRpoJjkJ7iis1LaXlVYr1YLLJ794vs33+SQkFhuP8U6dAk2azKUqOJuNVLsBCk\nIBkICBApRXCoQap1CpJOoD+RZiJVQDNbOVkoYJIkbDYzAdFOSBbwllQGAR05jFqJKjXPotpq8vn5\naQiFQoHmpZsYkk8QHzqJX2ciqBTBWI7TIDM3GWd28lnG4nnu+dIXqKnxYzabKS8vP++zsdvtH0lD\nswuRTsOPfgQHD17qlbx3fPnL8C//8uESIw0NDTQ0WBkZOUVFRTOSJGN1lxEptzGllsiMj5PWNMw1\nNdx4ww0L+wmCgGt+/O9bwmq1nuMWbDQacTrtdHRsolQCVY0xOjpBJFJOa+sG9u7dw8GDYzQ3g8Xi\np1CoIT4Xp5jpRhZl7PiQtNM4lQJWj59gUENFYM/sGNdWlbPeYubY1BT9qkoCCY+oJyGLWOxuzBY3\nKw0melOzmE15zIkY1WVOmuo7cDmdTB86hKSqhEMhZEmiyu9HU1V08TiRyDijQRibC5G1GVi8uAVF\nUd6xP4goilz/yU/y8P33MxOPY9XpiOTzyH4/m7a+ozF0wKWdTXMv8HLm9v/TNO2Xl2Id5eXl3PP1\nrzM6Okomk6G8vJyKioo33EdRFF54/HHW19fjOhvm8zmdbGlp4SfPPMPipmYGh8bJDE1TTORoaW1g\n986dxNNplhgMrDyba7PY7RzomiCRFPC5FyMIFfSeegm3FsSpA1UYJSGmUEzLiOdk8vkkkUAaRdCj\naYuwuRvICy+RLRVAUHHbdIi+Wlas3crRo8exhk/Q7KvEIM/HaFNpaHGoBKxzTE3tZnh4GEkqx+db\ny9SoxBMvjdLmD2DXNMSz6janaZgMBixGI4XZWYrF4ns67fWd8OoBWy/zT//0L3hsETyO+ciA1+PD\n6/ExPC3S19XF9muvXdj20Ud3cuRIkOrqtUiSjhMvTSBLM9i1GQTFjE6XwGPMkcGAaJTIZAM0GvwU\nspPorG4CiTyZUhZzKoXVYmdOb6A/LVPCTUa1okpOrDoDJZLo5Bn81iKWigry2SwwX3+j1+dx+Jpw\n1qzFhIQQTSOjoJOzRJIiUUkj1RPl/vtfYOXKSu655zPva5H4fuCBB2DTJmhqutQree+49Vb42teg\npweWLHnz7T8ISJLEHXfczosvHuDgwWOUSgrr17fx7//9PxOPx4nFYjgcDmpray/qObFv3yFstjZa\nWxvRNI1dux6lru5KgsEgu3fvo7d3BklaQl/fSVIpjWQihUV0g2ajWEqjI4tMjmDKRCJvQcWOQctj\nV4ycDs5hMRso8/sps1h4cWKG1Q3raG1ZhMlgXEiNRPY/zKeu3cTU0BBrXjXXyet0Mjo3h+lsVfaq\nxYv5/dQUpQofWR0MzghInhVcdvll/O53xxkZmeQTn7j1HR+f6upq7v761+k7fZpENMqSmhqam5vf\nVar3UkZGntI07QeCIMjAAeCSiBEAnU53zp3xGxEOh3nsN7/h0NNPM+VwUOX3s3rJEqwmE6IgMNR3\nhnTfONVmO4Ikczo6zlQoRvnSJkKJOG1mM6l8nvF4kr1DU4yFbaiKlUSyyODgEKZoP3UYcdsdZHLj\n+CgymD0GUgMmqxdFLOGy+vB42imVcmQNemLxAHqXQO2qZlatuZrx8T4kSaa6to1AIow5n0UG4qUi\nJp+XDWtXcuMdt3Dffc9QX9+JIAgsWraG0/vzjMwGqFRz1CkKgXQayeGgzOkklkph9Xov2hyC95pQ\nKI5Bf34PvaKaeXU569zcHMeODVNff/m8cZuqYjLZQalGjk/gNKhoyRzxXIqMVqSg2hELSWRzGIPD\nhK+yhtzIELWim0Qxi2Isx6G3kS2MEyxZsNs7KKQnMCGiFz1EinmC2iyNDgfesykUr9fLhg3tPPDA\nTkRbFZMzZ3AWS+gNGrFkllhuDskmUJ+PEeh6iZPCWg4dOsLWrVveo6P5waNUgn/8x/ni1Y8Sev18\ne+///J/ws59d6tVcPIxGI9u2XcG2bVec87jT6VzogLzYDA1N4nItolgsMDU1yNDQAF6vj1QqTyqV\nwWSyEghEURQ9mmZD1WbJKkVk4qhqjpQWwCZkkKgikgtQRxi/KKOTXUCaRS47U6LIVdu2MbtrFx6X\nGYvplXqaTDaJy2FF1umweL2MR6PUnk2TyA4HoVKJCquV/okJsqpKbWcnDr+fp5/soXPHvOeKyWRC\n0zS6ug6ydu0ITe9CmVutVtasPX9w4TvlUg7KGzv7VwUoXap1vFU0TePYsWN8/+/+Dk8qRWU+zzKT\nicjMDLvica7bsoXxmRlKkTirW5oxyPMKsVxROBGJEO4fpqGlnq7hYTITaTTNx5mgAUUrJ62EcJsd\niMoIHsECcoJsMU2Zy0G5qwJ9eJLDiV4s1jKc3nry+fkvqE5nIpMRMZkkwvFRFK2JQGCQdHqYxYvr\nUcNT1FTVk07FUZUSNqWEo0ykvkE6/IgAACAASURBVL2d/v5hTKbyhfxeVVUl2RUr6Dq0B0XSExwd\npaa2lm3r1hFLpegOhbjmzjs/sHbhS5a0cXjvJJl8DrNhftiToirECjFWvMoPJRKJIIrzroyJRILR\noSEikQjp4BzFcIhOUxVCSUNvtjOey5PVK+iM5YAep9NAXlMpR8BtdJDXmUiVzMiiFYuaJ58vIOlz\n5BAQFAmraALBwUQxwrKGhnNqOK677hqMRh3/8A8/J5B2ki6O4JFkJrIRdEaBbdVLEVTwWsyEJofY\nt8/5JzHyBvzsZ1BTA5s3v/m2Hza++tX5aNDQ0EcrKvRuKJVKnDlzhp6uLuKzszi8XkqlPKnULL29\np0gkdGQyIhMTIaLRMRobm+Yn+IolFKWA0einVDSiZE9jlgcplvJUawoCeiRVJCfE8GgiIgI2WSKr\nyQSjMWYQGYzEcJSXozOUCM+NIoomNK2AKGXp2NhJ0mymqaGBYeDg7CypdJqC18vn/t2/o6a+nmAg\ngMVmo7W1lWeeeYGO5W7Ky18RaIIgYDSWMzDw7sTIxeb9UDPyZ8D7vrZ97+7dPPKjH1ERjbLI5+NU\nIMCuA4doaWggE48zND3Nvu5u/O5ykvkMetmOgIAsiQipOKcnihjLFrF/OI1T9lBt1SEKRnKaGUG2\nkMtNYBZyGPVmVC2NThcjr6pEUyo6IYHf72Ttxk8wOwvR6BixWDfFokY43IvZXEZLSyeSpEdRJvgP\n/+HL/OpXjzM6aGBo4gy1NheyABPBM9gqFrFx61a6u0+jKOdqwKbmZkSpwNKlm/G47AydPMnRcBiH\n18s1n/vcOSOlPygoisLw8DDFYh6Do8CZSBCPwYKgaYQyEdpW1nLFFVcsbG+321HVDJFIhGMvvohb\nFFlcUcEzQ6fRlUocn53FWgDJaMHl9jGaL9DcvhZLeIpSNonZaEIVCkTzCWR7DWosRyh0hlwuj4ZA\nNKWgk9wUhSw5oYhsMKLonVx+zfZzcriiKLJt21X4/VX87GePcPA5lchcANkssrWuHbNOTzidpsrr\nRU3PkYiFL8HR/WBQLMJ//+/zaZqPIg7HvD383/0d/PjHl3o1fzw0TWNsbIwzZ4aQJJH29haqq6vf\n9vN0d/fw4IOPcWzfCSyUaPbbWNVSQ3J2lpcGXkKnX4HX24KmGRgaGsJodJ+1SPditWZIpeKUSiNI\nchpZF6XGJEA2j1t1MlnMUCKLHgWLqCMrCGSUEslShqRiRvL6OHimgGZz0FxfQb3RiB6BEhpRUWTD\nbbdRW1fH7iefxKAo2CoqWLFsGduvuw732bTNqwWG0ahHUeLnvUdFKWIwvL+65/7oYkQQhHLgV695\neEbTtM8IgrAO2AHccqF9v/3tby/8/YorrjjnovFekkwmOfrss1QZjVjsdqLJJMmSkYLi5fhQmpKU\n42RxD8tWdmBIayiKkYl4CL0gkMxnmStoeGvXsGjxFnq7ExTTJoazs5SMblQlTU3ZZaTTp8gIOYKp\nYSpMJT5358cwmQxEolEGMxmuX72Ovr4S09NJ6upW4XRO0dW1E6+3Gb+/kh07LsflcjI+3sPg4Cg3\n3HAF3/3uD+mJxDgxMYzLLnH9bTfwyTvvoKKiAk3TeP75kxSLNeh08zUgxWIeUZzjyis/Nb/Njh2U\nSqUPbMtnsVjkV7/6Hb29c5hMZVRVtdLTcwSLuwy32861S1Zy5523n9PmVl5eTkuLl9//9kmqdU4c\nFhv5Yg6rJcfVTe0MhcPMFkTsdh8edwVL9EYaLt/BmVMvMXDkSerwEjPqcct6ZEkhkTiDIDgxOZox\nqybEnJdiAUriDK7yalRphiuu2s7ISOCC72HRokX81//azM51f+CX3/se+dwsyUSMQCGHqtMhzIyT\nKEa5svWK9+iofvD48Y+huXm+XuSjyte+Bu3t0N3NGw6O/KCiaRqPP/4k+/cPYjBUACq7dp1k27bl\nXHXVFW/5ecbHx/nFL54lMKmjwbkIg05HMBrmyJkJbty4gmcP/R5neZFodAxBgLo6M8Vijv7+fkql\nUerrV+F2f4aenh7AitPewPTwb2lEQxQ03CaZYDGKUedGIYdR1FOQM4gFF7LBxHheQc5ZqPF3ECjN\n0bFyJYGxMZweD9ds3sySs4U/n777borFIpIkvWHdx5Il7Tz//CmKxdoFo8tCIYeizLJ48ZXv4ohf\nfP7oYkTTtFngvBJbQRD8wD8AN2kXalrmXDHyXlAsFjl9+jQDA2NYrWaWLVtMZWUlMzMz2AHN4SA0\nOUlgKobNXI3NDKF8Hs1tJVtVw9otazk8G8KUNeOtrEcpFUlNjpLKGVm3egM6nQ5/bQ06XSMz06ex\niwVMJidDQyfJZCLY7XZCuhLrOurxVpRRVBQyuRyrN23iqh07+PnPHyIcHmNkZIJcLoZeb6auroZl\ny5pwuZwAlJXVs3v3AWTZQH395TQ1bSWbTVEozLF87cqF4tzKykpuvHE9jz/+EprmQtM0JCnGTTdt\nXNhmfoDU2xMiytnheBdrkuO74eTJU/T2xmho6ASgoqKe9vZORkdf4C//8q7XzS3fcsu17HrkEbL5\nELm8DoO+yKpmC612Gy6Hg6iix+VqQSfrORGdxWZzUdeymNUbahgYCFKwOtDF5zCWUujNMmkcZG21\nuCU3s7MjoPciigqaYZyG+kZUVU8mk3nd96HT6WhobMJW10FgcIpIcByDwY/XUkk8nmGaLDan+7z9\nNE37QIvJi0E0Ct/+NuzcealXcmlxu+Gv/gq+8Q146in4gGZbX5fh4WH27x+irm79wsVZUerZtesA\nixa1LoyveDNeeukoJlMt0+MvUkjkKJQk0DTOTIRZ1hjAY7Owdv1yjEYjsixjs9kolQo8//wvGBsb\nQaezk8+n8Xr1CIIeKLJs/ZVkzuzHoRlZ1byKwdEuZhI5huIxKmTwWm1Y5TJ6EwmMdZtpaNpIPp+j\nu3uQG2908hd33rmwvmQyycmTp5iZCVNZ6WXZsqUX9Ep5+dyvqqrixhvX8cQTBxZ+50Uxxk03Xfam\nc77eay5lmua/AGXAw2frEK7VNC33xrv88cjlcjzwwL8xNpbHYimjWAyzZ8+v+djHNuP1eihoGotq\najh09CilgoTdLFEolcijILh8LOnYRCwWZ+V1Ozj2h50kImEUVeT03CjVK7bT0bEcVVUwGArodCK+\nsmoaGiy88MJxBMFKY6MLl8uLzdZCVJfkSDxOeUUF6668kmXLlyNJEvfeeyfbt48xMDDAmTMDHD4c\nYNmyjdjt9oX3oWkaIyNjtLZeSUVF/cLjpVKR5547wIoVy7BarciyzPr162hra2V0dBSYt4F/O61v\nr2Zubo7dTz/NcE8PoiTRvno1W6666h27/F0Mjh3rxe0+V3AYjRZMphoSicTr7mez2Vi9YhEdZ62O\nbWYzo4EAJw8exKSqrFzRzsnuYWaSRZTKRkKhARobLdx555+RSqXo6eml73Qvk/39HE/mKcgdSJof\nQRUxGOIYjWaSyXHs9g70+kV0d58A9ExMTJw3uhtgbGyMX/ziWRYtup7RgQAKsxg0mYwSxeYvY3Hr\nFrq7J7jhhgxmsxlVVTl08CBHdu8ml0rhrqjg8muuofVVRncfFb79bbjllnn79486X/7yvM/KE0/M\nD9L7MNHT04/ZXHVOlECSZGS5jP7+wbcsRoLBKJrmYzKQoMzsx3HWkG02mmbnoW7KvRYSiQBVVa/M\ntspm06xbt4RvfvOz3H//o4DEypVLSaeD6PVRtm3bTs+heqTJSWIzUer8leiKZzAYTSxe1sGxniFi\nBRHPkluprV0OgNmsw26vYPfuY1xzzTXIsszU1BQ//envyeddmExOurqGeOGFo3zxix9fuIFUFIUD\n+/dzbO9e8pkMPr+fTdu3881v3rXwO9/Q0IDT6bwIR/3icikLWP/sUr32hTh69BhjYwr19a98yQqF\nah55ZDdf/epnmM7nKQwNUdfUxK7gANF0lFg2g7u1gw2X3YSmqeTzOfw+D3KZj0hpmrLaWm66/kqC\nQTexWAyr1caqVZ0cOnSQRCJHqdSMxZKlslJHU9NiKiurKS+vJRIJUN2o8OlPf+ycNYqieLbHvoGt\nW7cSj/8Lrw1ABIMj6HQSXu+53hK5XJozXd38w3/+z7g9HlpWrOCKq6/G5XK9YwHyMslkkl/98IdU\nFApsqa5G1TQGDx/m1xMTfO5LX3pXbn/vhtcJuL1pEa4kSXSsW8fEnj0sO2t93+z30+XxsLevj+79\n+ygAtpoaNm/t4LLL1tHW1oYsyxgMBjZv3sSKFct5/OGHeX7XfjLJWdy+Sly+aqqrnXR1HUOSiuh0\nMsHgfmpry6iv38DDDz/FX/zFF89b3759R7FaG7BYHOjM5eja15COjZPOzFG/cjPt7SuYnDxOMBik\nvr6e3c89R9+zz7KsqgqL281cIsEf7r8fvvCFj5Qg6e6GX/4Sensv9UreH+h08E//BPfeC1u3zk/3\n/bDweuf6m/3fa6mvr+TgwRM4PI1kExmMeg0BAaO+RDxtpGVNPe5KjbGx4xiNHgqFNLI8x+c/fwt1\ndXW0tbXR3d3LSy8dohQfxq4zMXGmj41XX81oXx+lM2cw5HK41i9he3MzZr2e6g2zPPz7fsrK6olE\nhhEEEVk2YrGISJKD3/zyl4RGRzly6BgY61mx4XocDi9QRTg8xaOPPsO9985HT57ZuZPxfftYUVmJ\n2eslFIvx6I9/zK333nuOZ8r7kfdDAev7gmPH+vB6z72L1uuNRKNF/sf/+GcEoZbDPX0omTniShFT\n2XJamtpYtXY9oigwMHAAozJE2ZyNjy1bhtrRQe/oKLv37SYQUjAbGtBb3NS3t9LS4sdmS+Dz6VDV\nelatuhpJeuWjsFgcjI6eYnZ2Fq/Xe8GUh06n45OfvJaf//wPRCIedDoTuVyY+noTTmc7+XwGWZ5v\n+8rl0nTt/R2e2AxbNmzC7nAwfOIEv56c5K4vf/m8MH4oFGJychKdTkdjY+Ob2jWf7OrClkpR//Ik\nSKC9poYjo6MMDQ3R1tb2Tj6Sd82qVYt56KFjZ0/ceQqFHIIQveB8nVezaetWHgmF2N/Xh10UCSST\nTEej3LV9O5Ki0HfyJIGxMY4/9QRaKobT6VwwFysUCvz6vvuwx2J8tnMFOw/0EQgdYSIxgd1bhap2\nUVbmI5vNIst2xsdDaNo+/H47p0+fpqmp6Rwvl3A4itXaDIDJZEYUvbhcdUQiU9jtZWdbkHMYjUbS\n6TQn9uxhY13dwiA8j93OIk1j/7PPfmTESKkEX/wi/Lf/Bl7vm2//UeHqq+drZ/76r+G7373Uq7l4\ndHS0ceDATlS15lVpmhKlUuicAXlvxoYNa7nvvt9htbaQF10EI2GK+RkslhyCvYqNW69i69Yt9PX1\nMTY2g8dTTkfHjQuTaiVJYs8zT9G/axdNZjM6kwnyeQ5MTbHlk59kxy23UCwWcbvdiKKIoiiMjo7y\nb7/9EkePhpFlP5qWQxCmWbduJUf3/QFfpI7V7e3MaQYEVeHU3t+x4oqPY7U68XiqGB8fJplMoqoq\npw8c4LK6uoXhmD6nE0VV2b9rF3V3333xD/xF5E9i5CyiKJynoBWlRHd3D52d22lqaqO9fT1zc3Mc\nPvwUZnOJymovodA4mcwsNmsCd0wjHo/zRG8vsiyTmJvDm8uxvnM1M3Npxmb7OHXgCJ++59N86lN/\nRigUYmbm4XOESDqd4fnnn8VojPB//+9vsVrh5puvOs/EC+adYL/+9bvo7e0jkUhRX7+E5uZmjh8/\nwUMPHaG+fhWiKDE9OYAhOktTlXfBBrm1upqjY2MMDg6yaNEiYP4O4qmndrF3bzfgAhT0+l185jPX\nvaEPS2BsDO8F8pZOnY5gIHDJxMjy5cvo6Rmgr+8QZnM5pVKBUinALbdsftOhXAaDgU/ceSfT09NE\nIhEO7t3LDU4nXpuNQ889R4vNRofbzeFIBGc8zsM/+Qmf/4u/YKC/n50PP8zwwYN0Ll3KosVt2GwW\nDnf10TN9Cld5iXS6jLq62wmFwkSjaVTVwdDQCQRhhEwmR2WliyuuWM3mzfNeJ3V1lZw4EcRkstLc\n3MKJE0N4PB1ADrPZQjA4Rk2NjfLycqampjDDORN5AbwOByfHxlBV9SNhjvbd74LVOh8F+BPn8t3v\nzhexfuYzsHr1pV7NxaGxsZF16+o5ePAgRmMFmqZSKMyydWvH23Ig9nq9fPazO3jggReQZT1GSxGf\nr4nW1uVksxM0Nc1bqS9fvpzly5efs6+mafzmpz9l8sABtvn9hDMZxmZmON7fT8vKlTz76KN8/a/+\nauHmsqenl0ceeY6pqTC5nBlJKqeiovpsxNTGc8/tp9GUJGbzcDLfA6i4rQ5ysTCTY720L9n48isj\niiLhcBirIJwzpRvmBcmZsTHe7/xJjJxl7doOfve7E9hsr6QspqYGURQdNTUNwLzqLSsrY/PmG8jn\ne+jsrCCbzdHaeiVdhw+z98E91IoiLRYL8ViM0cFB9C4XgqJwy+VrCEQi9IyMMj06RDabpaqqipYW\nD8eP76GmZhEWi4tnn32GTCbMli23Y7O5SKcT/PznT/KVr9gvmPd0Op1s3Lj+nMdWr15FMDjH/v37\nEAQHo/37aLKrrO1ccU4KwCHLBAOBBTHS39/PCy/0UV+/EVGcP2HS6QS/+MUTfOtb975uhMRVVkaw\nv5/y16R70qUS9kuYm9TpdNxxx8cZHBykv38Es9lNR8fWt1W4VVVVRVVVFS/u3EmFx8PY0BAOUcR4\n1vzNLopIoog1k+G+f/1XDHNzuCIRlsky8TNn2DkywvKWFi5b3cHq1UtI+f2UlAmCwRmiURWrtZZI\nJIaqNqLXlwiFSixZsoadO7swmUx0dq5h48a1HD/+C8bHCzidPmprg/T0PIHH4yGROENlpZFPfvJW\nBEHAZrORUZTzREcincbqdH4khMjJk/Cd78wPxPsIvN23jc83f3zuuQcOHZqf8PtBRxAEbr75epYv\nH+X06QFEUWTx4vXUno3Wvkwul2NgYIBkMkV5eRkNDQ3nnRPXXLON/v5pCoUyysrqAI1AYIi6OvMb\nznOanJwkPjqKS69nKBwmFgxSq9fjNxgInD7NUDDI9N13U1NTw8TEBA8++DRlZSsoFo/T2Hg1MzOz\nhMMTiKJEsWjHZPJT7Y3i9TQSCk2gqnmSyQgOo5nxufnuu2BwjObmCiwWC1arlWSxSCwWQ6/XL/xe\nx1MpHB+A8OCH4Gv49kin0wwNDZHP56murl4Ye79y5Qr6+obp6zuEXu9BUQpEIj0sXtx6nuuoLOsA\n4znuf7ueegpTKkVTXR2qphFOpajM5TgxNoaroYG8InByJEGxZCMyPEumdD/tbT6io/0Y5oY5cOIP\nZHVGVJ2XHTs+sSCKLBY7yWQNhw4d55Zb3loRliiKXH/9djZu7CQYDNJ11Eixt/e8YtJ0qYTzVZbC\nhw6dxOGoXxAiL79+OGxneHiYjtfpCVy2ciUPvvgivlQK59miz5m5ObI220WLigSDQY4cOEBgbAx3\neTmrN2y4YLHna5Ekiba2toV1hEIhjhw5gizLNDY2nlP8+0Y4PB4SoRDZVArDq9JaL9vlh2IxJsfG\n+OSmTUxIEpOTk3gEgb7uMzwTKFDtqWQyNolng0ZjYyPDw6dR1TLS6QSxWBi9HrzeSrJZmVQqRmVl\nBy+8cIi1a1cjyzIec57je35DKpaiaDBw++3XsG3blTidTqqrqxdEpsPhoH75cnpPnmRxdTWiKFIo\nFumdnWX97be/gyP/wSIeh9tug//zf+BNMnEfae68c94I7n//b/jWty71ai4OgiAs1NRdiJmZGe6/\n/yHSaROiaEJVT9DYaOezn70No9G4sJ3dbueeez7J00/v5vTpF5EkgbVrF3HVVVvesEswlUph0+sZ\nLRbJB4OssVgQBQFFVSmVSpRKJXpOnqSmpoYDB45hMtVhNtsoFovodFba21cxNtZ19trURiikEo5N\nMDTQj6oqSPosFkucyWCAXFULY2NdOJ15brxxfvDQyPAw3WfOMDE3R4XFgquykpYlS+ienqZ+0yYO\nHz5MRUXFOb8X7yc+UmJkcHCQBx98nELBAcho2kt0djZy003XLdxFDw8PMzo6gdlsorp6Mz/84UMU\ni4WFHm2AcHiCLVvOzb1LqorRaCSTyxGemSEdDpMvlbBoGlP9/RztS7Fy6TaiqTRNtbVomoGHv/dj\nvnTzdrZs2YSqKOw+coT9MxpOp++c57ZYHMzOzrzt9/tycarP5+NnZ84wl0jgOXvxnQqHydrt54iF\nXC6PTue4wDNJFIvFCzw+j9fr5ca77uKphx5CGR9H1TRsVVXcfvvt55zk75TJyUke+uEPqRJFGux2\n4v39PHTiBNfcccdbNmLTNI1nn32eF144CXgABUl6nttuu4rly5e96f6rL7+cJ++7D4/dTmR6GrvZ\nzGQigehwUO5ysa+/n2rbvHNrRWUlfcePMz40ToW1nBkEJKOVoruOdMYCTFNTU0Gp5CWZzJDP69Hr\ni/h8NQiChqKUMJmshEIZCoUCv33gAaryeTbedB3K2XbvrtAsTqfzgoLs2ptu4ilBYN/JkxiAvCSx\n+tprWfVhicm/DooCn//8fF3EHXdc6tW8vxEE+P73obMTbr4ZPuylRJqm8etfP44kNVJX90pkdHj4\nFHv37ufqq8/13PD5fHz2s7dTKpUQBOEtWRX4fD4yoojD6yUwOLhQIZzJ50lLEovb25kaGgIgGIxg\ntc7XKFZV+ZmZGcZs9qDTmQERg0FHPDqKOaeSVWM49WbCkVlqanw4W71svqqTxYtbaWtrw2g0cvr0\naQ787nd8dtMmjvf2EpyaYnxwkOcnJ3E0dBA+EuXo0TSadpClSyu5/fabL1ljwevx/lrNH5FsNssv\nfvEEdvtyLJb5C7Kqqhw8eISmph6WLl2KKIo0NzfT3Ny8sN+OHet5/PHDWK11GAwmYrEZ3O4c69ef\n68nvKy/HsGwZw6dOEQ4EcJhMpCwWJEVBEY1IWRNTgQBFq432pkbOHH8Wv8FDMh7H7XQiShLLmpo4\n0LeHeDx8jiBJJMIsWVL5jt+72+3m5i98gaceeoj+iQkUTcNRVcXHXyMWOjpaePzxXux2z8Jjqqqg\nadE3dTJsbGzkS9/8JqFQCFEU8Xq9F019v7BzJ01GI5We+XXZLRYc6TTPP/YYbW1tb+mHYmRkhOee\n66G2dsNCjU4ul+Ghh56jrq72TVvdWltbid16K7sffZSBQoHe8XEqamrYuGIFpycmMPv9WM56rOj1\neqqamxkai1DIJAkKIkgSSy67GUUpoSh9hEIB5uYUvN4KAoHTGAwyLlc72WwvTqePRCJCWZmTyclJ\n1FCI+rOeKJIkYbNYqEunOXbgwAXFiNFo5OaPf5zE9u2k02lcLtdFEYXvZzRt3vY8HodfvdZi8U9c\nkMZG+Ju/gbvvht27Oa8z78NEIBAgHC5QW3tuiraysoUDB46cJ0Ze5u1csL1eL82rVxMPBlHtdkYz\nGdRikbiqsnTjRvz19aTORo7r66s4fDiExeLA729ifHyEcLgXTcuhaWlGRvbi1sOqRTsYmzjJxNw4\nHruRrtlZ7vriF7n66qvPee3De/bQ5vHgtFrZ2tlJKpsllc3yvZ17aa24DL+/HpgXZSdOHKO+/hjr\n13e+jSP4x+cjI0ZGR0fJ561UVLwSlhdFEZergUOHTrF06dIL7nfZZRuoqqrg8OEuEokwGza0sGLF\n8vNSHss7O3ni5ElqW1uxqipeh4N6VWVPMEgwrTJXUkAUuXLzJqxWK9lUFLfehKqqC8/h9niochkZ\nGeli6dItSJJMODyFKM7S2XnNu3r/9fX13PuNbxAOhxFFEY/Hc942K1Ys4+jRHkZHT+J2+ymVisRi\nI2zZsgSfz3eBZz0XURQvupFOPp8nODZG+2suunaLBSIR5ubmKDs7YO6NOH68B4ul5pxiYaPRjKK4\nGBgYZO3aNW+w9zyd69axbPlyBgcH6TpyhJnhYXoyGTouu4zrOjt58J//mWgyictmw2wy46tuY6JU\nYNW666irW4QgCEQiAerqmrjppu38/d//gGQyxJIlTsJhjVism9Wrl5LPZ5mb6+Wuu7aTSqUwXUDU\n2S0WxoPBN1yv3W5/y2moDzKqOp9qOHIEnnsO3udDpd9XfPWr8JvfzLf8fu1rl3o1fzzmIxznqy1J\nkikWL95otB033YTN5eIH4+MUslkqKyrY1NGB1+fj8NgYV916KwDr1q3m8OEHCYdNeDxVrFmzma6u\n5xCEFI2Ntex54RButRxNU3F7q3G64dp1iygpCvIFfg9ioRAtr6rZs5pMxNNpUBxnoy3zCIJAWVkT\nBw+e/JMYuVS83pdRlnXk86+fggDeMA/5Mk1NTay+7joe/tGPUHM5kno9OYOBT9x4I/F0ml8/P0pj\nR8dCC5jVXUl8YAS3+9w6jNrFrfhXNtLXtx9FUWlq8rNjxycW5g68GwRBeENRYTKZ+OIXP83x4110\ndw9gMhm55ZZtl6wbBubvTARZpqQo6F51l6JpGiVNO6cF9o3I54vI8vnbiqL8himo12I0Guno6KCj\nowNN086J/tx81108+uCDGKJRcrksPekA7etvo77+lVRSPD7FddetZ9myZXz/+/+LI0eO0dc3Sig0\nSzKZQZbnkKQin/vcNbS3tzM5OUniAj4JoXicyos4MfODSqEwf2c/MjLvLPoR0F4XFVGct8vfsGHe\nCO1VQeEPFRUVFRgMBbLZFCbTK9O7g8Exli+/eDkqWZbZsnUrLW1tPPzTn6JLpQgWiwxMTbF827aF\nrkiv18u9936cJ598gaGhF9DrZW6/fSNXXrkZo9FIQ81POfz405h02f+fvfeOjuM687Sfqs4RjW4A\nDTRCIxCJAcxJpEiKoiUrWrIkW9LI+hzkMOPxN9m7nvlmxzvnTLDXk2fHXnstW7ISrSzREhVJUcwZ\nmcixATTQ3eicq+r7AxDMv9Cc7AAAIABJREFUqGSSAEk85+AAqND1Vt2uW7+69w0sKLGyqHwdFqOR\n9qEhdAbDOcd1VVQwMTBA8WmOqplslgwiJtOZ1crVai3J5Mfv8y4X14wYKSkpQVHeRpKyZ7wd+/1D\n3Hrrxfkybrj+ekrKyvjJP/4jNXl5VLpcqFUqzAYDRlsbWdlHIDCGJGUR9ApisY2EJGFVFFKZDO0e\nDzVr13LnPfcgTUdEXO5U3gaDgeuuW3dOhM5soVKpWLh6NR0HDrD4NM/4vrExnAsWzIi7j2LRoipa\nWg5jtxfOLJNlGUnyU15+TrWCj8XZ01But5tv/cVfMDAwQCaToajlFMeODTMxMYxKpSYU8lBTY56J\nXrJYLNxww+aZaruKokw7s2lmPru4uJi8mhqaOzupcbnQqtV4fD68osi2dXOjjWaLQADuuw8sFnj7\nbThPHz3Px6C6Gv7yL6fysuzadXVGIGk0Gu6+extPP/0mGk0xBoOFcHgciyXC1q0PXvTjuVwuvvXn\nf87AwADpdBqXy3VOX+VyufjqVx8kk8kgiuIZ081btm1joqODVUVFM5F78WQSH3DrdP9xOus2b+a5\nn/wE9eQkztxcEqkUo6EQdpcJjebMBp2YGGTjxrnnJCR8kux0lxNBEC5UsuZTs2vXe7z5ZhNmc9l0\nQjMPLhd87WsPYLiIPVlLczNvP/cc1mnnp6AgsP6WWzCYTDQ1daLTaVi+fBFarZY9b7yBd3AQrV7P\nso0buW7jxjnnWHS5EIRzc73AVDjei888g7+rC4sgEFcUdEVF3POlL31sMZLJZHj88V/T3R3HZitB\nkrKEw4Ns2FDJHXfccrFPBZgSF93d3Zw40UoqlaGhoYaFCxd+YoGZSqXYu3s3zQcPks1kKKmuZvNN\nN81Egl3pXKjdP4z2drjzzinnyx/84Or2d7gcSNJUMrQHH4Q//MPLc8xP0+6/K6Ojoxw71ojfH6aq\nqphly5ZiNps/esdZ4Ojhw7z/6qvYFAVFUQir1Wy7914WX8ClYGBggPd27mR8aGjmeaLS6Nix4zBG\nYxl6vZFQaIzc3CTf+MaDH5lr6VIw3ebndSacNTEiCMLDwNcAHfBTRVEePWv9RRcjAD09PRw92kQ8\nnmLhwkqWLm24JM59sViM/v5+FEXB7XZ/aMNns1lUKtWcDLe6nHxY56QoCh6Ph0AggMViwe12f+Kc\nGZlMhubmFpqaOtFo1KxcuYja2tor5rrLsowsy1edWP2kD6WdO+Hhh6dEyFe+cgkNu8bo6ICNG6eu\n7+UIvJoNMXKlEYlEGBgYQBAEysvLP1atr0wmMzW9Pd2vDQwMTPs8xqitdZ/X5/FyMVfFiFpRlKwg\nCCJwWFGUVWetvyRiZJ4PZ2JigraWFhLRKO4FC6iurr5sD79rpXOKRCK0Njcz6fPhLC6mfuHCizoy\nd6XxcdtdUeDf/m1KhDz77NSDc56Ly7PPwne/O+UMfB4f94vKtXK/X0wymQynTp3C09eH2WZj4eLF\nF8Wf8HIxJ8XIjAGCYAB2Koqy+azl82LkMtPU2Mi7zz6LUxTRazSMJxKYq6q470tfOifx26XgWuic\nPB4Pzz/6KLnpNBadjslkkqTNxv1f+9rvXLDwSuXjtHs6PRX5cegQvPLKfEKzS8l/+2+wb9+UQ/Cl\nfIG+Fu73i0k8Hmf7L39JdniYPIOBeDrNhCBw60MPXTE1pz5MjMyqq5IgCP8D6AQe/aht57m0xONx\n3n3hBVY5ndSUlFDmdLKqvJxkTw8njh2bbfOuChRF4fXnn6daq2VhaSmlBQU0lJWRF4ux+803Z9u8\nOcvo6FQis/HxqYfkvBC5tPzDP0xF1dx1F8Ris23NPB9waP9+VCMjrCgvp8zppK60lGV2Ozt//etP\nFBE4V7nkYkQQBKcgCLvO+nkaQFGUvwWqgEcEQTjHi+j73//+zM/u3bsvtanXNAMDA1iyWQxnhcqW\n5+XROi9GLgqBQID4+DgFZ42AuJ1Oepubr4oO5WLz2muwYgVs2QIvvjgVOTPPpUUU4f/+XygpgU2b\nYHh4ti2aB6D16FEqzsqpZDEa0SWTDF8FjXTJnQEURfEC58ROCoKgVRQlDWQAGThn6Ob73//+pTZv\nnmmuFCfOea4NjhyBv/s7aG6Gp56CGz5d9PU8nxK1Gh59FH74wykx+Pd/P5Vq/yrznZ5nDjGb0zTf\nEwRhF7APeF5RlMgs2nLNU1ZWRkStJp5MnrG8b2KCRVd5TZPLhd1ux+R04p2cPGN5v9dL5ZIllz2n\nzFzj+HH48z+HJUvgC1+YCjVtbZ0XIrOFIEz5j7z1Fjz+ONTWTjkPd3RMORPPc3lZvGoVvV7vGcvC\nsRgpvf4jy3VcCcy6A+uFmHdgvfy0NDfz9vbtFIgiOrWaiUQCa00N9/7e7807sF4kRkZGeO7nP8eW\nTmPRagmmUiRtNh545JGPrI9ztfJBu7/6Kpw8CTfeCGvXzucOmWvs2wdPPgk7dkzVAKqrg7w8sNmm\nhIssT+UricWmfqLR3/7+4G+PZ2ofuDbu94tJIpHgmV/+kszQEHkGA4lMhgm4ahxY57QYmW0b5pln\nnnnmmWeei8eFxMicngGcq0LpSiYWi/HTH/yANQUFM2mGAU7297Po9ttZd911s2bb1f6mJMsy/+ef\n/okFgoD9tCIq3SMjGJcs4Y577plF62aPq73dLzWJRIL/84MfsNJux3haAsfmgQEW3HwzGzdtmkXr\nLsx8u197fJhv4lVYhWCeD2NwcBCrJJ0hRGAqaqbt+PFZsuraYHx8HDkUOkOIAJQ7nXQ2Np5RwXme\neT4uQ0NDmLPZM4QIQHl+Pm3zkXDzXCHMi5FrjAspU0VREOcjai4pgiBMTa6ff+XlNWaeq4YPvaev\nxqp381yVzH9TrzHcbjcRjYbYWVEz/T4fC1etusBe81wMCgoKUOfm4guFzljeMzpK3fLl8w+OeT4V\nZWVlxLRaoonEGct7Jybm7+l5rhhmszbNIuCngAS0Kory+2etn4+muUS0trTw1vbt5MNU1EwqRW5t\nLfc8+OCshpdeC3PIw8PDvPCLX5CTSmHWagmkUlBQwP1f/eqsVNGcC1wL7X6pOXXqFDufemrqnlap\n8KVSWBYs4L6HHroskXCfhvl2v/aYk9E0HxTKm/77UeA/FEU5cdr6eTHyKUilUqTTacxm84c6CwUC\nAdrb2qYK4lVVUVlZiWqWYymvlc4pEolw7OhR4pEIpRUV1NbWztkHxuXgSm/3dDpNKpX6yHvuUjM5\nOUl7WxvxSISyykqqqqpm/Z7+MK70dp/nk/NhYmTWomk+ECLTGIDgbNlypRKJRDi8fz+djY0ogkAi\nm0WORtEIAuaCArbefjuSJHGqqQlFUahdsoTq6mpEUcRut7NhuuxpMpmkq6uLbDZLSUkJmUyGQCCA\n1WqlqKhols/y0pDJZOjv7yeZTFJYWEh+fv55t5NlGY/HQyKRoKCg4JxcIB6Ph+YTJ0hEo5TX1LBw\n0SJ00yn1M5kMx44cofnQIbKZDLXLl5PndHJ41y6iExPIgoAsSSxYsOCSn+88U0L96JEjtBw+jCLL\n1K1YwZp16zAajZ/q89LpNLveeosj776Lb2SErChy/e23c8fnPnfBz1QUhbGxMcLhMHa7/Zzv3cjI\nCD6fD7PZjNvt/kRiIjc3l+s2bPhU5zLPPLPNrOYZEQThTuDvgKOKonzlrHXzIyMfQjwe51c/+Qnm\nyUlK8/I4tH8/nuFhCmtr2bZ2Lb5QiFePHKGooAAzkMpkUJvNVK1fzx2f/zyRSAS1Ws3Y2BivPfUU\nxnQaJImDp05hM5lYWFFBTJaxV1Vx9/33f+oO+5Nwud6URkdHeeGxx9BEImgFgaAsU7t+PTffdtsZ\nfhuBQIAXnniCjNeLVhCIAIs3bmTrTTchiiLHjhxh74sv4tJq0arVdHq9iIWFfPOP/giTycRzTz5J\nsLWVBQUFqFQqWnt62NfWxhe3bsXlcJCVJDo9HsTKSh78yldm3qo/eGCl02kKCwtnxM2HIUkS4XAY\nvV6PwWC4VJfuknA52l2SJJ557DGS3d1UFRQgCAIDExNki4r4vUce+VjX+Gxe/PWv6XnnHdIDA9hE\nEX8ySWsoxMKbbuKPv/c9rGdFTcViMV565hkme3tRZbMEMxmqV69m5bp1yLLMkb17mejsxCoIJBQF\nVUEB9z788FVbzXl+ZOTaY06OjAAoivIK8IogCP8uCMJnFEV56/T1p9em2bJlC1u2bLm8Bs5hGk+e\nROf3U+d2EwgEECMRri8v5/joKN7JSTSKQqKnhzePHqVApUIHBFUqDp48yammJoyKQjKdpqO7m7tW\nrcJVWMiR1lbc8Ti6RAJXfT35+fmc6u/njR07uPsLX5jtU74oSJLES7/6FVWiSL7bDUyNfhzdt4+m\nkhKWLV8OTAmCF598krxIhNLp7SRZ5uju3TgKCqiprWXPK6+wxuXCGwhw8NgxtOk03hMn+NvhYe76\n0pcYb2tjbXn5jMjQJpOUpFJMhsO4HA7UKhULy8o40NODx+OhpKSEiYkJXnnmGRJjY2hEkaRazcZb\nb2Xl6tUXPKeW5mbee+015GiULFC9YgXbbrkF/VmhntcyPT09RLq7WX1ayd9FZWUc7++nvb2dZcuW\nfaLP8/v99B0/jjw6ihZo8ngwSBKWTIYDL79MRW0tX/ryl8/YZ+fLL5Pq7ibp8zHu8RBNJHjz5Zep\nW7QIm9XK6PAwn7vxRsqcTmCqTMCO557jS1//+u949vPMM/eZNfd9QRBOnyQPA+dMmp9etXdeiJzJ\nYGcnRo2Glt5eTnR0kEwmEQQBiyzTMTjIqa4uOoeGqEsk+KzDwQ0OB1t0OkaOHmXs6FE2lJVRo9Xi\n9Pk40thIMp2mt7eXGrsdh16PZ3AQgBqXi4GmJqLR6Cyf8cVhcHAQIRQi/7TpFlEUWZCXx8kDB2aW\njYyMkBobo/S0YXSVKFJbUMCxvXsZGhrCKssk02kOHTpElSCQAxSo1SRPneKZn/8cUzZLJBIhEAiQ\nzWaJTE5SmpPD2NjYGTaZBYFgMEg2m+X5xx8nPxJhvdvNqtJSVjkc7H3hBfr6+s57Pj09Pbz95JMs\n0um4rrSU61wu/MeO8epzz13cC3eF4xkcxHEevxyn2cxgd/fM/4qi4PV6GRoaIpVKXfDzgsEgxOMk\nYjG6h4dZqFZTZzRSpdWSF43y9H/9F4FAYGb7cDjMYGsrw4ODiKOjLDabkf1+bjQaob0drdfL9XY7\nBw8dIhSLAVP5ZyYHBvD5fBfxSswzz9xkNkdGPisIwp8yVa23D3h9Fm254vCMjtK1axeVZjOpRIJT\n/f2MBoNMhkJY43GGx8cRolGKT/P5kNJpFokigyMjAGQliRKzmYlwmEGvFyQJjUqFRqUiMd0Ri6KI\nWhBIJpOYzeZZOdeLSTqdRnseJ0OdVksyHp/5P5FInHc7o05HfHISURSRBYHuwUGM6TTHRkawZDJo\nZZmEIDAeiTApijTk5aECsmo1skpFNBbDWlp6xmfGFYWcnBz6+voQAwGKp0diAPRaLeVmMycOHqSi\nouIcew6/9x7VNhuW6Wk0tUrFotJS9re3Mz4+TsFZJcevVYxmM0lJOmd5Ip0mPycHmJqWe2X7diLD\nw2hEkZRGw6bbb2f5ihXn7Ge1WokrCmPBIEXT90jX+DhKIkGBSkVqbIwf/s//yZ/+1V9RUFBAMpkk\nGg6TnZyk3G6na2ICuyxTaDIRTSYJTEyw1OkkP5Wid3iY5bW1AGgEgXQ6fWkvzjzzzAFmbWREUZRX\nFEXZoijKZkVRvqwoynz6yY/JxMQE0ZERSnU6yqxWFrlcVBuN9PX0kFWr2VRcjNtgwCbLBDOZmf3S\n6TQmtXpmnjbXZiMGGIFUOo3GaMQfjxNKJHBMi5hoIgEGw1Uzb11UVESIKSF2Oh6fj8qFC2f+dzqd\nRAThnO1G/H7K6+pwu90ktFr8k5MMjo9TpijUmExYRZFVJSWY/H6GJybIMxopt9spMxqJ+f0cDwbJ\ns9tJptNkJYlTw8NYKyooKSkhHo9zPs8Fi9FI6LS37NPxjY5iPyskWBAETIJA6Kx8JtcydfX1+EWR\n8PSoA0A8mWQsm2VRQwOSJPHcY4+R4/dzndvN6tJSVubmsufZZ2dGpRRFIR6PI0kS+fn51Kxdy0gy\niSBJjIZC6FIpjCoVNrOZGpcLWyzGa9MjVHa7nRignf4+JdNpDKJIMpPBYrGg1emYjEYxqtXEpm2M\nJZNkdLoLOlfPc3UxPAx/+ZfwF38B0wPT1xRzujbNPOenu7OTKpsNw8qVdDc3o5NlQpKExWBA0GoZ\nDocxOxwIOTkkEgm84TAGjYaELOMXRcorKwGw5eaSV1bG6wcOYEkmUdJp9g0OUlFSwkMOB2OBAN2h\nEFu++MU5HSL4SbBarazcto3DO3dSabNh1OkYmZwkaDJx62mRCBaLheVbtnDkzTepyc/HpNczGgjg\nURQe3LwZnU7HLQ88wL/87d8yHgiwODcXXyyGPicHUaslXxBQ5+fTnslgiscRgXFRxFRZyWtHjyJH\nowgGA+tuuYWvPvAAgiCQn59PmKmH3ukhot7JSUovUDMov7gY/+goRQ7HzDJFUYgqyjVbBfh85OTk\ncNtDD/H69u3o/X4EIKbRsO2LX8TpdNLT04Ps81F22qiUQaejwmLh2P79JOJx3n/jDRKBAIJWy/JN\nm7j985+nra2NY088gSkSoUSrxWC1YrTbSVmt1LrdDI2M4Pf7cTgcbL7lFrYfPUqJyYRJr6cvmUSl\n1WIuLMTmchEIBvEGg9RVVzM8MUF/NMqWL35xVnP/zHN5eOcduP9+ePhhUKth3To4cABO+zpe9cyL\nkSuQbDaLKAi4Kyqw5+XhGR7GEw5zXVUVaaeTtUuWYNDr+YUkMdbRgdNiwWqxIBsM9Pl8bJkuNy3J\nMpOiiGIwUONwYNRoWFVfT+foKC82NbFhyxZuv+8+qqqqZvmMLy7Xb96Ms6iIkwcO4A2HKb/+em5f\ns4ac6eH6D9i8dSuOggJO7N1LJBTCvWQJD27cOPOmWl1dzbf/+3/nbzo6iKtUuPLzMRqNdHg86A0G\n8u127ty2DX84TDqdJtrSguT18vC99yIoCtFkks6JCTweDzU1NbhcLlxLlnCysZGaoiJ0Gg3DExNM\naLV8du3a857Lui1beOmnP8Wg02Ezm8lks7R7PJQsWTL/Rn0W1dXVuL/7XYaGhlAUhZKSkhkn31gs\nhuE803JWo5F9ra14WlpYnJ+PrayMZDpN6xtvkIjF+Iu/+iv+t1pN20svUeJwoDca8csy+rw8ivPz\nGR4enqk59NlbbqHtxAk6Dh7EolaTdDiIqFQoajUr6+sZ9vkY9PvxarVIVit3PPAAldMvDvNcvezd\nCw88AM89B5s3Ty2zWuE734FXXpld2y4nsxra+2HMh/ZeGI/Hw3P/+Z8Iw8N0HD+OKp0mFI8TU6u5\n8/77WTQtHmKJBP/1m98gZ7PIkkTdihV85q67GGhvxz88TDyVou3UKVY7HNRWVZGXn48oikiyzP7h\nYb763e+eE554Pk6dOsX+/ScIh2PU1rpZt27Vp5rWuRJD/RRF4d///u8RurtJ+HwIokhOfj7dp06R\nV1PDoupqdh1uorW9i7BviFXFhZRVVdGwahUarZZT3d0MGwx860//lJKSErLZLIcPHuTEvn2k4nGq\nFi9mww03nCEsgsEge3ftoquxEbVWS05xMVGvl3Q4jKJSsXDNGrZs2/apwlVng7nQ7qOjozz3n//J\n+rKyM0aluj0ejo2Pc2NlJXmnidV0JsOzR46gLyxjMhRnuKsVSyxImdNJRXk5S6qriSUS9AgC3/iT\nP6Gzs5NDu3bhGRzEHwigV6uxmkz4IhGMej02mw2V2Ux8YgKHRkNWURDtdj734IMUFhbOxiW55MyF\ndp9tenpgwwZ47DG4+ebfLk+loLYWnn0WPiSQ7opjTmZg/SjmxciH81ff/S7Hn3iC1TYbBq2WrnCY\ntnCY9YsW8cD995NIp2kbHaVq0yZuuvVWYCocsbenB4BwJMKb27fTsmcP661WRJOJXLebFWvWoFar\nOT48zM2PPEJZWdmH2vHee+/z+usnyc2tQq83EQiMotf7+eY3H8But3+ic7qSOqexsTEGBwYQVSq0\nWi27nn+eAkXBajQSiMXY19mJVaOlfyhFaDQLikTE38HyQguLKwroDwaxmUzkaDTsDwQoqqzEWFLC\nrXffzZKGhgvmdYlGo/zqxz/GHo3idjrJShJdo6NoFyzg9nvuwWAwXHHZXOdKuz//zDNMNjZS53LN\njEoNSBKhcJjb6+pmtstmMrz37ru8drgZS8l6rGYn3kSEVMbDTUucFBr0eEZHCRoMfOmP/xiNWs2u\np5+mzuHAbrEQiERoGR9n4z33sGbtWgRBoLe3l1d/9jNWFRfPVNTuGBri5OQkdz/4IDW1tThOm4q7\nGpgr7T5bTE7C+vXwR38Ev//7567/4Q+hrQ1++cvLbtolY87mGZnn05FKpRg5dYr1CxeSTqWIAMtK\nS1mUyfD28DAvt7ZitdsRTSaaDhygu6UFTU4O4f5+8kWRVCrF06+8wjKLhSKNBqJRiMcZTyQYdDqp\nqKwkJkkfWSslEonw9ttHKStbj1o9Na9dXFyNxyOwd+9B7rzz1stwNS4viqLwzhtv0LZnDw6Viqws\nMykILLvxRpBlAl4vVW43N3/nO/x/f/mPeOMRskoEhyMHtSqMXmukf8xPIjhB5YIFZGQZKRymPBik\na3iY3ZEIxysquP+RR847utR48iTGYJAs8Jtdu4jH4+Tn5yOFwwRuuOG8ETfzfDzuvOceDhQWTo1K\nJRJULFzIAzfeyMtPPUUwGsU2HU3W39dHf3sPelMhNaV16LUGCtMp2n0G3mptoSFfT3FeHoscDt57\n4QXCySQ3lJVhnk5Gl5eTw0qNhuN79rB6zRoEQeDkoUNUWCwzQuRkZyddbW1kQyEOShKH7HbW3nYb\n68/jOxQMBtm3ezcdJ0+i1mhoWLuWdRs3zueZmcNks3DffXDrrecXIgBf/jJUV8NPfgLXQlPOi5Er\nkMnJSeR4nCqHA8NZzm1l0SifufdemvbsoQQodrkY9/l49bHHqF64kEVr1nDg+HEqslkMqRQOp5PR\nsTFqtVr8oRA9HR1MKgqqoiLGxsbQaDQXDOkdGxtDUawzQuQD8vNLaG09wZ13XqorMHv09vbSvns3\n68rLUU1na02m0xx55x3+nz/7sxkB4fV6cRZVISoC0cFujDoN40oB/lgAJRShWJwKKz02MkKD201R\nbi6o1WRkGXssxp633+Zz9913zvE93d2Mj48TGRpigcWCMScHfzDI0e5umhob58XI74BGo2HTli1s\n2rLlDCfidVu38t7TT7Nco8Gg09HX1cVoMo2tpAq9dkpg6LU65EgcnaLlrs2byZ0W8mOBAE/v28et\nZ6X8txiNREdHOXr0KFarFb/XS+X0E2d8cpLu1laW22xMACV5eTgLCzm8YwflFRVnlGiIxWI8/bOf\nYY9Guc7pRJJlunbtwjMwwANf+cp8Jeg5yl//NQgC/K//deFtCgpg2TJ46y24447LZ9tsMS9GLoAk\nSfT19REKhbDZbJSXl8+ZiBKj0YjRbmc8FsN9WsREKpMhplbj9XhwyjJulwuASCDAstxcBkdGCITD\nTIyOYtJqyRFFNIC7tJSW0VEiqRTe3j4K9YWUilU8+eQBRPEN7rjjelavXnmOHVPTAefmQEink5jN\nV1ZK8o9L64kTlFksM0IEpnKBOIDmpiZy7XYymQxWq5VIJEBH7wj+wRAWQw6yIuJDJJtJMkkKZyaD\nw+GgdjoXiAAosky508nepibke+4552GiMZno6upim8uFenpdgdmMKxiku6UF7rrrcl2Kq5rT/UYa\nli4lmUxy8M03EdNpmiIREjYHCwprz9gnGgxSUKhBo/5tt2q3WECS8Pn9OE/L+dLc28fbBzoYiO0l\nHo/j93WyqkDHrWtWMzg6ilOtnsr3w1QEmFajoUCtpqOt7Qwx0tTYiCkcZsF07hoNsMTt5nBvL319\nfVed8/nVwEsvwVNPwdGj8FGPlM9/fmr7eTFyjRIKhXjssWfxeiWmsnDEKC7W8fDD982JxF9Wq5X1\nN9/M7scfRyUIOM1mouk0+4eGaLjzTsLj49ScNsQvSxJqUcQmSUxGozjMZgYFgYyioJJlqvLycNvt\n7Dx1CkPhItasfwCDYeo80+kkL720l+LiIlzT4uYDSkpKyM0VCATGsNunnOxkWWZ8vIt7772KvK5O\nI5tOoz3P2+aYP8Dux17BWbgIQVAjy35OnDiGKK7GmFsKqRQWfQnDviPUVJRi1Wu4e/Nm2vbvB6br\n0aTTLC0pmZpHF8WZB2I6nSaTyWA0Gil2u5EzmakIjWk7IvE45pwcMqfl0Jjn4rJm7VqWr1hBKBRi\nwdq1vP3EMwTCoxTap0aiUtkMoWyE60sLZqZjAFQqFZb8fE6NjJCfl4coioxPTvLsnnZU1kX09kpA\nLolELY83v4ZWPVW6IZvNMuj3Yy8tnYny0qhUZE/LGwQw3NND/nn6JJsoMjY6Oi9G5hijo/CNb8Cr\nr8LHCXa7+Wb4l3+59HbNBebFyHnYseMtJietuN2/vZE9ng5ef/0d7rvvc5fFhmQySXd3N+FgkHyn\nk8rKypmRmXQ6TX1DA80NDexuaSEyMEA0EqGyqgp1NsvY+DhFFstv56idTlq6ukgw9RbvrqxkoLeX\no14vy/Lz8UajjMXjjOj11NdumBEiAFqtHq22iMbG1nPEiEql4qGH7ubxx19gYGAYQdChKEE2bKhl\nxYrlF/2aBAIBerq7kSSJispKnNM1PD4ukUiE9rY2oqEQxW43VVVVqNWf7BZYsHgxB5qazsjrEU0k\neLdliPWf+TqFhcUAjI8PEo0eIy8vg8ZuJDSZYjg0hDEnl9Kltdx2yyY69+7FL8vI4+NERZGc0lJK\nCwro8nioX72aZDKMaeRFAAAgAElEQVTJu2+8Qefx4wiyjLmggGXXXUfhwoUMjY+jzmaRFQXBZKK6\noYGJjxH5NM+H84GTtyRJlJSWEolEGB0awmKzUVdfT15eHpu2bqW7vR3v2+/TMTiGjJmIFGH5dS7K\niqaEg6IotPf3c6yxkXA8TpdGw+D+/Sx0uzne3Y9kLkeWc8l3VEyLziIUJcW7vS0sry3Gn05z08qV\nuKen3RRFYSyZZGXtmaMxVrudUG8vZ+fZTcgy5o/w+Zrn8qIo8Ad/MCVGLhCpfw61tVORNX19cLXP\nwF5zYkSSJJqamjl2rA1Jkli+vI5ly5bORCBEo1Ha24cpLd14xn5FRQtoatrLnXemLnnIpNfr5blf\n/hJ9OIxRFGmSZd53uVi6bh39XV0cef99irVaGiwWPOk0Xr+fpRUV5NlsFEkSXX4/u/v7uXvDBvRa\nLXl5eWRtNvonJlguy4h6PXJBAeacHLIOB72pFJmiIupsefT2jiBJbbjdpTMOrBqNnmg0fl5bnU4n\nf/InX2dgYIBEIkFhYSF5eXkX/ZocOXyYfa+8gl1RUIkih2SZpTfcwJZt2z7W/v39/bz82GPkZrMY\n1Wq6du/mkNvNfQ8/PFPlNp1Oc+rUKUb6+7HY7SxctOgcJ9L6+npa6+o41tFBcU4OWUniYG8vtqIl\nM0JkcjLI0QMHCPvSqLOjuKvKaGiox+ncjE6nIZls4eZbb6V+8WL27d7N+2+8QbHZTEJS+PeX3kJj\nt/H1W/L46X/8B+NHjlFgtFDsykcXibD3pZcoXrgQi9NJgcmEShQxmc0cHxpi7e23X9yLfo3xm1de\nYccTT2FKpLHmmOjwjlGen8/y2lqGMhkO7NzJ7Q89RH93N8lgkOIyJx6vl5I6N/d+8Q+or6/nuSee\n4OjAAF6Ph5PHjmHVaFi/bh21lZW0DA2hdrsxxhRGWkcxGrOYzXGMRhMAFosDs7mSL3/7EdoaG/Ge\nPIkhGERRFIYiEUpXrqT8tEJ/AA0rVrD9wAGcicTMy8dEMEjMaKRmOp/QPHODN9+cio555pmPv48g\nwA03wLvvwte+dulsmwvMWmivIAhrgX8GZOCIoih/etb6ix7aqygK27e/wMmTEzim30j8/kEWLDDw\n8MNfQKPREAwG+dGPHqesbMM5+w8O7uF73/sGJpPpotr1AS0tLRzevZvdr79OicnExpUrser1jHg8\nvLlnD6q8PPJMJoSxMcwmE+OA1NdHkUpFymik3O2mPRbjug0b2NXTQ7HLhUUQyMgyhqIiSqqrObp3\nL/FIhLoVK9DpDXR19KLXqxkZi5JKWWlr82A0LgDCrF/fQH5+Pv39x7n//rU0NDRckvP+gAuF+vl8\nPp74l39hdVHRTLRBVpI4PDjIHd/85jkd9Nlks1l+8qMfUa/VzkREADQPDFA+LWhisRhP//zn4PXi\n0OuJZzKMShJ169djy8nBWVg4MzqVyWRob2+ns6kJtVYLWi2HD4coL19MIpFg12uvkfSP097fgdVU\nRF15CSq7nY033sjExABr1uRy222/TSoQDAb5px/9Jx0dYbQaAxrS9A93IY10cuPC61CrtSSTISwW\nmbKacqipITAxQdfR42hkGZ0jly2f+xxbP/OZM3wdrhTmQojnO++8y3/8j39ksa0Mo95I+0A7xugI\n5aUuVm7disPhwB8O81p7O/V5eSx1u9Go1cSSSV49coSYIIAM5rx8VBqR9154gWUmEzUlJSQFgbBW\nS0N9Pc8cOEFB9SZ2725Cq10OZCkvL8Jmy8HvP0lFhZFvf/sO6urq6Ojo4FRjIwD1y5ZRW1t7hg+R\nz+ejp7ub3p4e+pqbsavVyICYm8sd999PcXHx7FzMj8lcaPfLhSzDihXwN38Dd9/9yfb96U9h376p\nXCRXOnM1tLcfuEFRlLQgCE8IgrBYUZSWS3rA/n4aG8eoqFg702lbrQ56eo7R0dHB4sWLycnJweHQ\nEw77sVp/OxQ/OemluNh+yYTIgX37OPrqqxTrdNRLErZ0mt+89hpWtRoxGkUZGeHU0BD2nBweWLoU\nbzTKRFsbC7Ra7AYDg/E4akGgRK2mZ3CQ6uJibnvkEQB0Oh3JZJKXf/lLygUB0WLhpcefJyQ7WbHu\nBg4ePkEiEWHbttXE4xlGRsbRavM4dOgIixYV43ZrqTstz8LlprOjg3xRnBEiMFUQzmUw0N7U9JFi\nZGRkBHU0iu2snCkLCgs5efQoW7ZtY/+ePQgjI1S7XBh1OsLhMN27dvH8/v1s27CBFlnmwGkjKQ0N\nDTPirLe3l337nkeWJbq7upjo66NUr6fYKJPMDjDUn0Dtz6Ep30RFhZbrrrv5DDsGBweJRE0YJC+5\nMT8GlYaeviGITpJOJ8mxOjAZLfgDo0T8k/QcPY5ocqNybSIry0hCHK8vjCzLc8bJ+koikUjw1BMv\n4jYXkZc7NfWXTqcoU+cghSOMeTw4HA5Mej0TnZ3cVFFBatp343hrKxNHj5NMKdgLauh4r4Xu2Ci1\nWigzGFCCQRZUVDAejfLm7r3odYVUV6+gt7eX/v5uzOZq+vu7KS7WUViox2bTUFJSgkqloqamBq1W\nSzwex2aznSFEDuzfz6HXXiOPqQ5eD+QuXMjGzZspKiqaj6KZY7zwAuh0n86/fO3aa8NvZNbEiKIo\n3tP+zQDZS33M3t4BtNq8c94eTSYnHR19LF68GEEQuOuuz/Dooy8Ti7kwm+1EIn5gjAcf/PwlsSuZ\nTHLorbdYU1rK5OQkg34/A+k0w6OjqHJyGAtmiCYsRCU1neOT+Ly7WFyQS3YyiFdUkwlGGVeBNDJC\nPJ0mHA6zwGqloKAAg8GAoij89J//mXqzGYfVys7DjdgM9dgEA76xSSTJjtVazcmTh9m06bMMD3fT\n39+H39/Dhg0r2bZt66wm0spmMqjO88avPo9D3/mQZfm8IwaCICBLEuFwmO2PPoo1EqGzqQlZq2V8\nZIQcWUbUaNBqNKwuKqJ1cJCdO3ZQVVuLSqXCZDLx9tv76O+foL+/i/f3HCE4GkMTT9Cvz5KRQ9iN\ndgJxH0PDXRRPCnzzH//tnKmf9vZe/CNDlGYzFNgLCYXD5BvyCcYDePpayctzISBgsdhp7GzFay1l\nw6Zl+H0ewj4PGoOZgwf7Wby49ZKPXl2NjIyMkM1o0KmnvuPJVJxEMgpqHeFQFCk71TUlUimikQhv\n7t2LQaUilsnQ3tlJeUxNWBEZiXgozrWTiI3TH46Qp9OijcfRWSzkOxz4mjsw1lRiNFq59dYH+M1v\nnmR09DiSlKWwsAGn08hNN60gJycHn8/HY489RyAgIAh6FCXE0qVlfP7zt+P3+zm0YwcrCwsZ8fsZ\n83rRa7V0HTjA+o0bzytEJiYmOLR3L33t7ZisVlZs2MDSZcuuyJG0K5F//Vf47nenpl0+KYsWwdAQ\nhEJwVsWKq4pZ9xkRBKEByFcU5dSlPpZer0OWz314ZbNpjMbfhshWVFTwne88yOHDx/F4Rqmvd7Jm\nzY2XxBcCppzm9JKEoigcbGxEjERwATbg1WE/MbmCKrWJpCKRlacKtgUne7GIAg5nOQMRH/3JOAui\nUey2HCSdjvHRUfx+PyUlJUxMTJAJBnGUlpKVJPpGQ+TnVIEAnWOjpNMK2axCNNrPxMQw5eX1lJfX\nMzi4lw0b1s968qSKqioa33yTKlk+o6MdiUa5YdGij9zf5XKR0umInjavnpUkujweajZs4NnHHsPi\n97OmoABfLMaBlhbSgQALXC7iwSB7DhzAftNNJKJRXvynf2JRbR2pbJbGoUkWrvg8NTXrGOkZoVjq\nIxlpxS4IBOMq9MbFVNndLHAIdPp8ZKQ8BgYGz8lMq9WKRMYGyCudck4UBAGTxsCk3kwwFiKRjGPU\nm4gkopyaDKASKnjhFz8iT05R5ixC0OsYS0TZsUOcFyOfArVaTY49D58vACPdxMeH0ScTDMfGUIQs\nS6e/Mx1DQwTGx9lQWEipxULv0BAdXh8ThiJErQmjYGIoECARj+CQs8SCQbJqNe+fOsVNK1YQySQp\ncS9Co9Gi0Wi5++6v0tfXRmvr+2zc6OaGG65jwYIFKIrCU0+9RCrlwu2emmpRFIUTJ05QVHSQnq5O\nOo8fZ4/XS54oUldQgKxSMRwOs+PFF/mDP/qjM87P5/Px9I9/jEtRWOFwkEil2L99Oz6vl22f/exl\nv97XGkePTomJz33K2Ae1GpYvh2PHYOvWi2vbXGJWxYggCHbgP4BzszsB3//+92f+3rJlC1u2bPmd\njldfX8vOnYdIJuPo9VPptjOZNKnUKA0NZ2Y2LCgo4PbbL8+NajAYSCkKPR4P2kgEs83Goc5OUokE\nyYwBgwrGUikKgbQgIGPCr2iQpThvjfQgiwIVgkggEsWXTLBgxQpuqKvjrZdf5ivf/jaCIJBKpxmf\nnER7VpK0QHCUUDJLOGxDEFS8//4+GhoCOBwFlJXZP1WNmY9DIBDg+PFGRkd9uFz5rFix9ILblpSU\nULVuHYcPHKDEbEYligyHQuQtXkx1dfVHHkur1XLTvfey88knsWWz9A0O0jcwgGI0slivRwwEWLF4\nMb7ublpHR1mk1eKRFXrHA+gsJoplmad37qS5pY1QTEM4WoRGb2LEL5HJHCAWm2CwZT+GdJJSETKZ\nCGm5GJMGgskkGiCpUlFYWMeePUdZvnzZGfY1NCwingzQ1t9GLB5EpdIQTqXJN1mZ0JloCvkxxCOM\nhMYw51rwDp6iJBHDrjYSGhihuLKKWqOFrqOHyGQy81VePyHFxcUUFVk5PmhivOMYC612SnLyOBKb\npMhiormpiZCisH3nTspzc+no6qJTrycdS2AV1fTGJzCLVuxSgmDGT34GEiozGSWXWHSSRGyCp44c\nIXdxA6JGpK+3BUmWCPs8DHcchdQoh9+IE/EOsunmmyksKsLrTc4IEZgSqIWFNfzqseexRUbIer0o\nvgBjkoqAP8TWJfUssVho3b+f2COPnDGdfPD99ylSFCqm85PotVpWGo0c2LePVevWzVd3vsT87Gfw\nrW9NiYpPy+rVcPjwvBi5JAiCoAaeAP5cUZTx821zuhi5GNjtdu69dyvPP/8OkpSDIIhAkDvvXH/R\nnb2SySTj4+M0N7dx8mQnkiSxcuVCNm26DovFgizLDAwMcOzYCWKxKDFRpL+5mbTPhzmZZIvLRefA\nAEpWYEwaJyrZSar0+KUEZnzYyOJAJECWsCxQIGoQFcgIAt6uLjobGxGrqwmFQjQ2trC7aYijqVEM\neogngyjKCMmMQiAaJS9/EZHIMKIoolZXcvDgQW64oYp77vnWRb0mHzA0NMTPf/4CslyAyWSjq8vD\nvn1NF9xeEARuueMOuurqaDt5krQksXHJEurr6z+2j0RdXR22b3+bf/27v0OfTnPnpk2UuFwca23l\nRFsbS2+7jf7BQUKBAPqkTDytJSBpKMx10doxyBHfEIasngVGC+nxXgYlNaKpgd7WvSQH9mJNBCnR\nG0iqNAgqPeGkjkA0SlajQa3WENFZ6Onx0d5+EptNy8KF9ZSVleF0OrHZbORYRNJdzZSb80jLUVLp\nSXwCTIpOkjEZozFOjk2kPKui19uO1eBCLaeRJImO1ibKFhRRUe3A4/F8pA/NtUAkEqGrq4tMJktZ\nWekZicLORq1W89BDn6P58H50eU66o2Gy2STLljWwYuUyjnd1cWxkhJV5eVzvdjPh8zEyPMyOsTGU\ndBaTIpJVvHTJWXKkBGnRSEY0I8gq9GoHitrCmJKgUFAYO/kKoZTAkN+HJZ3AYdCwsqKMWCBA46s7\nGGtpwb1+PaJ4btJASVLo7+jkS+sX8devH0SUS9GJVpLZCN0He7mpzk6F283AwAALFy6c2W+gs5Nl\nZ9W1UatUWBQFr9c7L0YuIek0PP88HD/+u33OmjXw619fHJvmKrM5MnIfsAr44fS85fcURTl4KQ+o\nKAp6vY6ysjyGhkaoqCji5pu/+KEd1SdFkiR27drDe++d4NixDiKRNA0NK6irW82hQ4N0dj7N1752\nPy+99Do7XnwLZcJHjiKTUCL0B73YgkE25eaSFARclZWMdQ1TkIKkGCehaFALASoVAZUAhYKaqAxN\nyGhUWvLVIqJaIZTN0tPcTDQa5Z//4Qec6kmzdOU9dLe0IaVSxJIifeP7CWe0ROM2YvEJBEEkN1cm\nLy9CVdVq1qxxX5LCXIqi8PLLb2Ew1JCbO5UdITe3AL9/9EP3EwSBmpqa3ylc0efzoYtEsektjI5M\noNVoKC8qoqelhYGxMVatX09jcztqkxmDLo0BHWq1EX8ghSqVpUbvoMg6lalIHh+hOfw+VSoj2fAk\nhUi4rAb8iTBpiwWtkkSfseNXBMzmIurq1+L1niQWU/GTnxxm2bJJcnP3cf31i5DTce5YvZoxi4W0\n30+eSoUurPDORIjl192FKFrpaz9Be2sLDk2IXCFBJOlF0ecSTMSZTCZRNElSk0Pws5+x7dZbWbps\n2SVztr7cJJNJTpxopLm5E51Oy+rVU0L0Qv4Ora1tbN/+JpJkQxDUKMpB1q+v4bbbbj4jguP0/UtL\nS1m1op4R/zBmtRqL3oGiVZEcH6e4oICUJGFVFBLpNPn5+XSMT1CoiOjUOcSVNKIsIUlJfEoKUc7F\noTcgq9SodFryzXrGUsNEugfIUVlQ5ZhxCWnq8ixMRKP4x7yEAhEyEgyndXQMvoyjbjlO5yK02t9O\nkfb3d1CZb+H1Y+2khCU4BAd6lRqD6MQb83LI5+f3rNZzfeIsFmLx+BkO4ABpRZkJa5/n0vD661M+\nHx9Rb/QjWbkSvve9i2PTXGU2HVifBp6+nMd8++1dvPNOKzk5FVitRXR3ewgGd/D1rz940Tru99/f\nxzvvdKLRVKIoAiUlLnp7O9Bq26ivX8nAQCPbtz/LazsOo4wOU51XgjPHgSRlkTOHCIfDiDk5JKJJ\nZAmsBpFQOo4CRNBSpKTRqBKYFAlZBhCxAKNSEpdWj06lJq4odI+NoddqGRIs6OVKOk80UrdyJbIk\nYY9WY/Ia8XqDhMM15Oe7sNlyEAQIBE7gcn2yZGKfhHA4zNhYhLKyM6dlPsjgerGPNTQ0hCAIuFwu\nfvX4MwS6JtAXLUCWJQ4f7sbtziEnN5eOvj5qSkqQdSaygg5BZaTevYTx8Qlichyr3oqg1yGIIvFY\njHy1AWvKg1ptQaU3ISezBENBbBYNCb2eNU4Hr3UGUOeUUVBSS3//MXy+Vhoa7kOSoKWlB4vFwp49\nT1DtSvHQ2tU4c3Px+f2ko1GGmjpZXr+QPFcpXY2dLHVVEPZ4CGbaWeMsYGR8nHgqi06dj1VvIJNU\nE1LraT/QSV4ySeP+/Tzw9a9f8W+9qVSKRx99muFhBbu9hGw2w+OP72LDhgHuuOOWc7aPRqP8+tdv\n4nAsn0neJ8sSe/cepqzMxdDQCAcPNgMC9fUVbNt2PQUFBSiKwoTPRzabRWcxkxUECs1mgmNjtKlU\nrKutpbioiM4jRyiUZXpGxnHrTQzFExhttYhqPZmIl5GkF4PFhKjVUVbkwqDT0T05xkQkyfKichRB\nh8acgyEYIjDuJZOM4fWHcZpzUUjTOzFCTvECQiEf/f0HsdurMRgsBINetNpxHAU2DjaN4C7eSMTn\nBwSQJeyGcsLZNKOZzMzIWCAQQFEUlm/YwN6nnybHZEI9PZI4PDEBublEo1Gam5unsylfminZa5kn\nn4QHH/zdP6eyEiYmrm4n1ll3YL1cBAIBdu9uwu1ej0o1ddoWSy6Dgy0cP36S668/N6/IJyWTybBn\nz0mKi1fS0zOAWm1CpdJgs9XQ3X2c6uoG1GoTTz/6Y4z+KC4BenwjHFckKkrrsOhyGRcGOT7oJUdr\nQlSryRjtCEkPsXSCeHaCAtLkIBJGYRAFPSCh0CODOZ3ClM3gE0UCBgOfq67muD+FwypiUanpaW3j\n+s98BlEUmPA3UVlZxciIjN3umHmbEoQChoZaqa+//ne+Hqfj9Xo5dvAgfZ2ddLZ1YTYvPMOR82Ln\nGzh86BD7duzApijIikK7z0+/X48zpwCDfkp4GgwWBgZ6KawsBYuFQ14vEYOBvf4Y+WojoeF+4iqR\nSYOZFVYj8XiWQDxOOBojJWVRIyGrAhh0FgJZSKQjONNG/J4Q6nCYXKuGnolGhOQ4OQYVBgrpOdVL\nLC0hijHWrFlJOi1y4NhzBJpbWFheCoJAWhBQNFZktZZoNIZOymLQ5VCY56K3p4XNRbl0BoMEYioK\njQK+RBzRrKdh0RYy2SSKpGCPRtm/Zw+3XuHVChsbmxgeVigv/614tdnyOXjwIKtXL6ew8EwR29PT\nQyaTc0YWYVFUoVLl8Wf/7/cwZATMFgdmp5tMJkpPzzP84R9+iWQySdDjYTwYJDM+jlGtplVRUFmt\niGVlJNRqXMXFSJJE89GjhDJpbIBoyUWnU5DkNDarCRM6wlYj2bREWIozHIvTm5WwW/LRa4wkZAW9\nzkBEo0NKycSiUWS9BRIxJJWaXL0Oz0gfzqpFLFvm5Pjh9/GOjFOzsJKvfe0LPPerXxGKREiLg8TT\ncWKygigasdmKmUymuP6226aiw37xC0IjIwiAIS+PvIYG9re1YRUEUrJMWK1GiUbZ9+STqIG3geVb\nt7J569b5CJuLRDIJO3fCj3/8u3+WSjU1wtLSAht+90fVnOSaESMejwewzQiRD7DbS9i9+yBdXYP0\n9XnQakUqK4toaFhMVVUVRqPxYx8jkUiQzQpotXr0eh2S5AdApdIiyxrS6SRdrfspzqTRav5/9t48\nyLKzPPP8ne3u+5b7nllZqk2q0lJSgTYkIQzIBowHA27sxh3ydHvcETi6e6L7jwm7Y9rj9vTg8LTD\nHtvCAgMyq8CAVJJAa2mrfc2qzMp9v3n35Zx79nPmjyxKSAgMDskIrCfiRuS9ee53MuOc833v977P\n+zwBas0KUdvC6LQ4X1zBkhXauESTXWQlGRfQLRPVdvilZAJfFjlfLlNyXZrABBAAmsCwKLIBNHwf\nNxhkuFBgpVZDLNepF+voSggz3o+q3owsC9i2xshIH8GgzszMZXw/jCAIGMYme/fGGX+Ny+hPina7\nzYXz5ylvbJDt7mbvvn00m02+/jd/Q78ssyseZ5o2zz76MLfc/f6ri8nW1uI/6Xyvh/X1dV7+h3/g\nYF8fwStkzqXVMp2qSz0RotSqongSHVWl0m7RTEv8H3/wB3Q6HU5eWiAaSZEIR9F0Fd2X8aMVgmGd\noAyXl5dQbA8HCV0R6U4EUGWPhuAhui7L7TZtQaBXEAgrCjsiIqFQlkBCoVG3sBsl2qZBOg1nXvo2\nQcmFRh3ZbhCSIBqNcnFtjZcrDUh1M+GFCNkBfD9DMhFkSfZ5rtFAUIIU5QJ6NI0lWBzad4hULEu7\n06DaLHFgYoTHnnmGUk1jYWGDWCzMO9+5n4MHb/q50iK5eHGBVOrVNgSiKAFpVldXfygYcV0XQXj1\n/2dZFidffAlno8qdN24zACv1MmvtOn07b+Do0ZPEYiHMzU0+vGcPxXqdaq3GgO9zsdnEXltjWhBY\nOH+eQr6HGSvEkhtBE1xGYj0M5XsRBOjYBsVQBymbxw3kuFAt4nsaqlFEFPKcW1tk9/guupM5VkQR\nU2vhOg5dpk7b6HDGdchGEniBIMVTz/LN9eP82u23kx++lrPz8/yXf/NvqDebWI1lQCEf6MeRFUzF\nxZSbjAzmaTQa/F//+T9zXSbDofFxRFGk0mwyMzfHr95/P6ZpAvDIQw+xN5+/KgLouC7Hvvtd+gYH\nfyJS+Nv4x/Hcc7B3L7xR1e59++DcubeDkZ97qKrK7OxZLl26TDAYZGxsgoGBHZTLa5w/P8OhQzuo\n13NcurTIo4/OMTp6lh07uvn4x9//Ey/M0WiUUEigWq0QiYQRBA3D0JBlCVl2MU2D9tYlfvmGa/nW\nd18goas09TaDrssQPoZpccKx0fKDVINhHNtkzVDpCYaR9Q6FaIRaOMyipjEMOFdcPdueR9b3iYgi\nwXSaGU0jK0nckMngJxKcnS+jSDLH187x+ON/j6ZpFAoyly4do1DYi21rtNsNfN9Flle4557fudpC\nu7S0xNGjp6nX24yN9XPTTddfNe76Qei6zveeeIIv/+VfEgd2jI5SS6U4+fTTEAgwEYnQfSUT8qFb\nb+Irz5zgxWe+zg2H7sbzVAqFV3ZjrVaLVqtFKpX6JxkTXjhzhpwgXHW1BUhEQsiWiUYP3zz3HCG9\nTjoSQsVlj5ugWq1y6exZ3rt7nLNzmzQ0i0wiQ9RpUkn5PLlWQWwb5D2TuGBSFkRSkTRbgk+6WWEn\n4IWDqLZFJBJBSSbp1TTslE/dKLO+JiLYNrLfS0BoExOiRDoadb/GdekkWTvIzOoqng84Hr2egKYJ\nNI8/xXo4TLk+RrneIdd9kI12ibq1TDyXpLdvL512m0xmm8vSMVtkEyG++L2n+cbzl0gmlsn3DHHD\nwf18+9vnKRarfOhDPz+y8aFQANs2X+c3DuVyma994QvUymV6h4e58dAhBgYGgOdwXefqxmNjfR2z\ntspornB1159PZGjXNjF0jfn5NfoLUbLBIAv1JtMVE92K0a5vEfZURicn+cgdd/B///03eWp6g5AU\nR45ey6bapNPYxNAtUsk4y3qJkQOT3P2hD/Lk499jbnGOgNqkIMO661APp1goLlOy2sxWGqxaEr1S\nEMdzAdgnisxaOmOZbkS9wbAbpFKr0VJVVs+fZ6hYJG1Z7Myneam+iCFLSFKWZFRmqXiCcGyC//qH\nD5GqLqHmk2wsrnPbbTeTSyaptNssLy5y6+23MzU1RdyyXqVGLEsSw4kE544ffzsYeYNw+DD80g9X\nEv/J2LcPrgjy/kLiX0QwUqvVeOKJo5TLIqnUOLYtcOLENM1m/Ur55BbK5RZnz67Q07OTQmEXzeZJ\nQqFJHnroEf7jf7x/20L85RPMzCyTTMY4dGg/k68xrTIMA8tSeeKJvyce34HjmNTrp4AWO3bk6XSm\n2HPNMLFclm8SXyQAACAASURBVJbTotZuMOLaxGSZju8RliV2IbDRrDJ0zc2cXbxAFogZHWKRMNlC\ngYGtLRqmCb5PQxDYmcngCwKb7TaRWIzkzp1MmCYFyyJ9hZx2zaDN2fkVBLVNq1LjXe/5GH193fzD\nP/wtJ09+nX373kcms50VKRT2cOTIeQ4dOsTU1CW+/vUjxGLDhMN9HDmyyYkTX+D++3/9VeRW0zT5\nsz/+Y05/5Sv0WhbhUIilapXK2BhduRzPTk1x6Nd//erxiWiU33rPO/nW2bPcfHOK8fEDjI2N8alP\n3c83vvEdTp6cRRQj+H6HW27Zzb333vUT7+YXFxc5/PDDhObnWY3HSRUK9PT3kwyJzK9P41fDREM3\nEoh51IwSfbkKv3pgP0989asYhsEdQ0PsHBxktVSiqXWotSRm1+NE49fjuW1ajk5HKrM/oCK4cFZX\n6ZEVVNdg1/g4bqOB5Dgc39ggCjQsi9FehaXKFqlogoZbJa6kMAybqGQg28v0hQcQfZtIPM50tcFk\neoCo77Poymg6VLQt5qstdo0dYKR/BFXvRzQncMQNhvaOUVmroXZ0HLeDIlc4OdPghaNTpIMT9ISz\n2OU23/vWd7nvIx/k5Mk5br21jKIozF6+jG1ZDA4P09fX95ZMz99ww17Onj1MJtON63p0Oh08z6ZW\nucjFpy8ymU4zGY1SPn+eL505w4fvv5877tjHk08eIx4fRJYDLMwdZyBnkiaEYXYIBsMICOA4XJ46\nSj6/m3Ykj5Iv8N1j6wTFJFVVpdkOYwsOXR5sVCpkEhPkGxqDhQL5XJYzc4usbIrM+2vIuLz3Nz7A\nv/3df8sXv/hl7KUq+3r3gqri2Boho8KmIrPkQWm+iUQ3ffE8KcmkYy8z4TToCoSwDJ358ho7M1mo\nqXznkUcJiwLjjoNgWZiWxU0jIyTCTc53NhH9Mo4jsiObREpcj+C5RBob1NY2MSqb1Irr/NKvvJ9E\nKESttN20aFkWr9f8HQwEaL7t/PyG4fDhbc7IG4Vrr4W//2dlWf7z4hciGLFtm7W1NWBb4Oq1RnYv\nvHAU3+/hnnuu4eWXz2GaAQQhx4kTR0gkPNbWGszNTWGaSer1Rbq70wSDEXzfwzRjnD59mmeeOYVp\n5shkJtjaUnnwwSd473vL3HbbK4Z6X/vat2k0ohQKKWZnXwJkQiGHj33sLg4dOkihUODTf/iHHHv2\nWQqGQcm12PB8BMsGwWfJE9nwYxhNn+LLT7EvHkYOhHC1JglFRm008DyPlCwj+T5hQSDousRCIZRI\nhK2uLu694w6eXVhA3thgrlSiVi6jtlqopkM2VqAVDrCyskGlUiOZnCSbNQkG14hGE0SjaeLxDOXy\nBlNTU3znO0fo7j5AsbjMuXOncF2XSETm8OEn+Y3f+F+AbXXTxx97jLnHHqPbshiNxUCWObaxweXV\nVa7buZPm/DzffOopfunWW68y+iVRJJ/Lcdtt7yTxA26zJ06UGRh4B6Io4boOR46cJRx+gTvvvO0f\nvQ/W19f5h898hslEgnIohFRr8MKpC2w6Di1foG4nCXmzJGKDgE8kpBMMSMTDYYLtNk3LwrAsoqEQ\nIz09OK7L3z3+Ejg5Ygr0dMfxVRVByKIL84yLBpfbbbryGTKJfiKizHy5RthxCeETk0Rcw6C4topo\nd9gXT9MJmzy/cZGAI9Iblgh5BsVakd6AgIeAhMRRrcNlXUBHJCKFiQojhAIh1rc2adQuIYVjJHv3\nkIh14Xuz2GKbsxsbDKdD5GMyz7x0DsV16aWBXFPRBYGQ0sXR51/i5lt38eLzz7N46hQZ30cWRU46\nDiM33cR7f/mX33Iy4mNjY9x11x4+9+CXKK60UHwHmyqFhMeBu+4iecXMMRYOE6pWeeaxx/jYJz/J\nyMggp05dQNfb3Hr7ILOPnENdmqG9uowST6IpIUpby8QTUWIbcU5cOs+xi0uk8xPMrzbR2nkkv4uO\nr/L4S3MkQjF8vxfPqDC3Os2xWQfXEwgGRW7ZvYuxvZP8/n/9AzY2Njhx5Ay7usaYPXcRyQUIs6la\nLLUvUve7EBkASSVRyNMVybNV92n6bQq+het75HqGcQSBjZZGtVFjNBYiHA4j+D6GaVLXNJKiSFBw\nGMsmqPlxpnWXntQIK5cPE+zUGYukwTcRqnVe+t73CA0Pc+311wPbc+QR38d7rYhgvc6OW275GVzl\nXzwsLkK9vi1W9kZh7144f37b5+Yt9pi+Ifi5D0bm5ub48pcPo+vbi1wwaPLhD9/DNddcc/WY6ell\nMpndhEIR3v3ud1Iul1lcXKbRiDA9fY5otBtJChCNZlGUIBsbFbLZ6pW2OpmjR0+h61kgzIULsyiK\nTE/PCN/97nEOHLiOWCxGuVzm5MkZ5uc1JGmcPXtuxDCarK6+zOGvPEysUeXc5cvItRqS7SC7ElkE\ncqLAmu9R80OU3X7ChPBRMRsGp9UmCirdnkuq0yHhOGi+z5brIgsCKc/nfMsg2rao+xZdO3dS7XQ4\neOed1GZmUKemSEYiTPT1MbNe5sxKh7WlJsGUhKYJzM6uEIl0MTAwyPz8HLOzKr4fwLabSFKNcHiA\n1dWXmZ+vEY0OEI0mKZc3+Pznv8lAf4ELL79Mp93m8COP0LW2htnpsC5JLPs+mVCILlEkJwhc19VF\nZ2WFY+fPc9uVCXGhWKR3cvJVgQhAf/+uK5wAkCSZvr7dPP/8SW677R0/NjuytrbGn/7J/0t9dpGx\n3jzzqsb6zAK+rpL1PTw/QLcQpOKvMDIxTCwUJRUfodZeodxs4vs+8Z4evvn44wzGYuR7ekjk81Sa\nNqF0DqNcxnI86ppOJqigCTLBVJiA7yNlM2yUK0iVJrZlgi9giBJ6x6TsuAgEKSLy8OVNImHokiSC\nrkraELAEkZlmkUo4jKBbnLMUfHcclzyykMDwymj2FPnAIGElQbnhkDUKXC5dpONVUdfyfOCde1mQ\n41TrdVbKNnJHY2cgTkQOE1Ii2J7DrLFOcV1A07o4/fQp7hofJ3JFWdfzPI69/DIzk5Ovem7eChAE\ngUQ8wt6cxz2DBYKBAEFlgscef5yly5evLrAA3ZkM04uLOI7D+Pg44+PjWJbFX/yP/4HpOOwcGcSo\nNpgvrbNSK9Ff6OLWWw7QKhZJ1+s011ZY8CWi8l4CYQnd6BALF9Asi0eefQ5LmWCt3AKhh1BwiHQ8\nQ0Pd4PkL0xRGtnWKFhaWcd0A5xcuoasu2XCKqlFjrqUTpIc4A0ik6bg6FzZLDO2epLdnhHppE93V\nsJQYJVtmpgWqKmGQpGyqiIk462oHV/R4bHGR3mAQU1LY0ktsUkbNTuC6JlnPxYmkqdoGScFD1A3m\nL11iq1TCTSZpViq870MfYuKWWzj+wguMZjIossxatYqZzZJMpTh+/DiJRILR0dG3BfT+iTh8GN7z\nnjc2aEint19LS9vdNb9o+LkORprNJl/4wiMkk3vJ57d5DLqu8tBDj/Pv/32OfH67jp5IRGi1tlVX\nA4EAzWabUslGlpNks9fS6Tjouocsr5JOT+B5TTzPuMLIr7O1pbG8bNNoCIRCSVzXZnHxEvm8SrFY\nZHx8HE3TWFraRFGuJRbbbo3tdDSCegSzZbGnq4u5M2cYCQQ4rRr44Th2s0rbsegAm8TpJYJKiWEg\nLYWp2j5twWVLlqmZJoKuowEaECbIppwlKiu0HAddlOgrNcm02wxubrJYr9Pe3OTW4WFkUWSlPs0a\ncfozw3SaZTIje+jpGWVm5llOnChSLPooyiAgoOtFZmfbWNZzrK15KMpuqtUykrRJNpthbVXjs//P\np/nobbcip1J8Y24O03VJhkLgeZiGQcJx0KNRWp0OQ5OTCKLIS1NTRFMpXFkm2NvLh19HH/m1BONA\nIIRleRiG8SPbr2dnZ/nc5x7h8ozLaGwfq6UOF9ccNNPgYCCE4HugJBgMDTFXX2VxbYW7brwLAN93\nMC2L6WKRfk3Dcl1OXriAcvYsWjxOLTLINfuv49GHH8XxJKKhLGvtEqZd5mjHoJXLca5SIdVoEvJB\n8EXmbYMt1waCBMigksIjju3E0NorJEId4orCSadDnyTxznicimUxbRm0vAHiwX4kgkjE8fw4BjXq\n+hqauRfXS1HV6jhOEheJy4ubfNlscsdggQFRpJxK0ZJFugMK6+1NAulhFFEm4/tsGUU0bZXJRPBq\nIAIgiiLDqRQXTpx4ywUjvu9z7OmnOTg6elXKv6VpJGMxyqur6Lt2XdXKMG0bORB4VdA6NzdHVNP4\n4L33cvTMGVSg2mkzIKbZf/B6NhfmCbZaKIZBj+BR82RqHZ1gIIznezRbFRTJp215NOwF8AaIBMbw\nbCiWi0SDOqrXxZnZVUzT5MEHv8jTL50noPtEBEipJVpWkRh9RBBoYBMRAsQIsuGbHJmbY9dAASkc\n5HS1ypoUx1bTIPSi+iayJLFpn+Vkc51xQSFuO8iCzZrjsyFJNJI5Gr6AqTWp15boEkUSuUlWy7Os\nGusEjDYTI8McGB3lrvFxpqenefSb3+SDH/kIU8PDnD92DEPXGbz9di5OL/HQQ88BcaBDLvcsv/Vb\nv/Z2y+8/AYcPw8c//saP+33eyNvByFsMU1OXcN0M0egrhMpwOIYgdHHu3BR33XUHAO94xwE+//ln\niMVSuK7H3NwaoVAC3/eIRvdgWW02NuZoNC7jecskEklisSzLy8d417v28thjz1IqCfT0vOJc63kp\nFhYeQdd1YFvdtVarMjj4in9No7RCQlQgkqPW2nZVzSeTmLWzYHoMhZOU1BoV30EgShWVPB5BIUzb\ntokiEfRFDM9CBkaBJLAMnKYb2StgSSGGx4ZJRGMsr55iZyzGjmgUzfdZ9zymTJP5zTLThoxGhJVy\nhYBp0zu0i2DQQ1GKXL4cpKvrVxCEAJal0t9/CMNY4eLFKWKxe0ilBvE8l1LxDMXZ48QDJmWpzdPi\n83imSciVKTs+TsejInmYvo/l+6zrOplUin3XX080GqWRTrPrvvvo6+tjaGjodUsCpqkTDL4ixKRp\nTZLJ0I/savJ9n+9852nS6T309ik4tTrpWA7LKSD5y4wn0zQsg1Q8w1a7RW8kxVR7i+VqBcE1sN0i\nC0aCiCwTqtUYA4ShIRq6zkqziZuWOXPmBMn8CJsbS0StDrZdIynYdDJZhrNZOisrNC2bLSlEHQi7\nAFEqxHFJkWQQiQCikAFfZtG4gODpxBSRsOOw0Wggx+Mk8/1kKlmaXgffFwEbXwCEHJa3QUhIYng1\nIl6IOEFCgQIhUaa4usJmuMw1o6NUOh2isSCe7ZJWfDrmGr4foNbcQOjK0OlIHJs6TcR2uWbnDoQr\n10ASxauGcG8lOI6DoarEfqANPBGNkioUqC4vY1nWVTPI6Y0N9t1556vuq2a9TkwUySYSvPe221B1\nnedPniRcLrO1vo69tkZfJEIyEKAQFDldrhCW+0lGYjgimJ0Wut3AdXV2yD6LnollLSIIYUTRxvc9\nHDfCsZePcfPNd1NZF1G8ARwphmmXqNsOKj4DBIgh0qKO5WeRCKMQZsts0147z2iPRyccZb0eIRga\nwBcCiEqMeDhFvV1D9tfIRiKoHZem55EKdRPKZOnefRuKH+apMy9SrpxAMn2CLuQiYTRERvJD+IEA\n8StiaDv7+3lhaopWq8XevXvZu3cvAN/61qNUKlGGhl6Z47a2lnj44Uf57d9+E1bVX2AYBjz7LHzu\nc2/82Hv3brf3fvCDb/zYP2v8XAcj7baGLP+wgmAwGKHZVK++3717N/fcU+aZZ15CVQUajVl6enq4\n/vq7eOmlS/T07CKfH2Nz80n27NnHzMxFEoktRkcnGBsbJhh8Ac9r4fveFQl50PUaoZCMc2UCTyQS\nTEz0sbo6SzY7gqIE6ahNohhksjEioRB9fX1cnpsDy0QSgliuTVKJEHRt2q6JA0SQMH0dBw8LgQgu\nvUAGaAC6IFDxZWJCCFcUCcoK+UwWtVqlEO2irZlEQiGuGxtjcWoKzRJIF25hwtHpWB4L5UW2aiUu\nXVK59tq9yPI1XL5soygu0CSsVEBrUGnUEEUZSeqgqhVqpUtE2uvkRQHb0lHrFfxyiBdnFukKD9Ay\nOyw5DbrFOCWKCHhEs1nuue8+AoEADVUlOzDAoUOHfiwvYW3tNF1du4jFUrRaNSqVi3z843f9SHJl\nu92mWtUZHEwzND7B2SPPEQooJCJxipUgdVNHDEcoJLOYTom1eoNoppfIQBTbbPKrH/gtevv6ePKB\nB7DLZYaumCEOAiO5HH919jS6FSai9CLHJFa2ZohJHUYLOyg1l8lKNQ4WClRaLeRoN8fmzlMlQpYY\nVaKI7KCKi4tD0F8mjQ2kyfgNIpZNt6KgAm4gQE8sxFrDR04E8UM5Wi0bfBe3VUWhheMcRfYEguIw\nrh8ERDxE4kKWlXIZa8glGYsxPD6KurJOp94mFoaGWUPJJ/jgR/93CoV+jm8WefHF85SLm1yzexe5\nfJ7Vep3r77rrp30E33QoikKmu5tqq0X2B0p6N+zdy9+VSlyoVEg0m7R8n949e3jn7be/6vvZfJ7v\nVSqsrK1td5Hl8xQKBc7Pz+NoGn2KQiwUwvN9IskEuXaDprFCx/ARbI18zKfeatLrOwwmCpiqj+c4\nNEQVSe6i4ZQICDW6RZv67DpRZTem2EETBDzBxxOT6J6AhkeeIH0EWWeBDjEsDDIU6Q7IUNEQxBAh\nKY4ixRFDSeq1Ks32FkG/Q1wIIoYztEyBcDhJd7wHVZHQtRZd/X2Mjk6S6LVoF5eQnS3kgIiw6hLT\ndZZrNTYMg7VKhYn+ftqCwMrKCrZtk8vlcF2Xkydn6Ol5rT/XEIuLz1Ov19/OjvwUOHIE9uyB1/hh\nviHYswe+9a03fty3An6W3jQ9wCPANUDU933vtcc4jsPW1haSJNHV1fVDC9LQUB/PPjsHjLzq806n\nzOjoDT94Lu6++05uvPEACwsLuG6Fa655H7KsMDbWYHZ2mUAgSjqdo902cByN0dG7WFkJ8kd/9ACt\n1jrZbA+12gkEIQWYRCIWe/bseVVN9ZOf/DX++q+fQFXXaLcdEpkgIafD2ECCRCSCCzyzsICoaeCo\nzDouTURyuDhUgRA2Pll8bHyKGGi4WMA62wtk2PfpxUHyK7QcH0WDc1NTDKVShOJh4tHt9Hs+lSLV\n3c3Tp1a48+CtCHaNs1NLOF6ORLJAu73O5qZKNOoQVARk2cBvnadfhHggyppWB6NBS5qhqjWJqGVy\nooJvWQhscm1Y58LURcJiBEOErniK9U6ARqgbVVAoS2X27t2L6/usVyosqCrv+cQn/lGC5Ec/ehtP\nPXWU5eUmXV1pfvM33/0jSweqqnL61CnmZi5im930DfRzzcGDXDx9GickU3E0pgyBnak8teoGolaH\nkMvNd1/H//q7H2BsbAxBELhw4QIbxSIHXuNOvNJokDVNBnuGKPRNMHN5mkHdxZXi5GJJHE0gpOts\ntNv4nsdW6QJ5y6KFh4NHBxsPDxGBEC2SeCgImEg0bJMoHhuOg+X7qKZJJmJgm03KJYV0bw+yX4FW\nkbQ3xw5BRcNCJYbrORTpQ7Ez4KmkgiEM22euWmXnjTcix+MkgkHCzSZDO3Zw/NISIxO3MDg4Sa1W\npK42UMubONVlasVN2tEo+++7jz1XdslvNbzjnns4/OCD7AKyiQSqrjNbqfDb/+E/MDI+jqqqZDKZ\n17V1qJbLzE1P02sYDORyWMvLXNA0WskkWr2Oo6qsaxoVzyNbKHBvt8OT67P4io8iilhOhQmliuxB\nMhzFa63Q9DwEL0rNdXB8HcsvE45KNDyPiBiiaTrYfpGUt05KEPAwKVMjSoEECilC1GgRZ4UubLr0\nBDHPp6NY6M4yTb0H1exD8QxCSOC5IJhonSrhaATFc3Ask81WE9ePUim2aYRtdqa7ueX9n+DYsWMY\nZ8+Si0YpaRqS7+OsrLCytEQzHmfN85hbWmL/vn0EMxnufP/7cV3/Klfr+xAEAUF4ZcP1Nn4yvNEt\nvT+IvXvhv/23N2fsnzV+lpmRGvAu4Bs/6oA/+ZP/j05HAlxyuSAf+cj7XzXhjI+PMzx8jOXl83R1\njSIIIltbi/T0iK+7gCWTSfbv388992zy8ssXGRjYze7du4hE5jl16ikGBoK026u8733/Cl1XOXHi\nOI6TZGNDRhQXOXBgP319wwSDYaLRBPX6WUZGXgmEDh26mVqtwbFjs0AGTRNZv7zI7uHdzKysUFtY\nYCIW52zZIBIK4ugGZVekjEmSOhksEohEkVBw6cLhJFBCph8FG1Ax2CtAyDeZo0lCSNDsdFiVRQ6O\nphjufkXKfcfoKC+ueiy22zQ1lQougUw/gUAWXV+gWrqAKG6RU3IszD9DXqvRCCSpuBpGZ40UKqrm\nYDvLiITRHBlZbDCqNIk4Mnlg1bPQFZ+2pTPRdw2JcJaFSoBQIcnE+9/PhU6H3NAQH3znOxkaGvpH\nb4rrrruW6667Ftd1fyxhdWtri69+5jMkdJ3hgMmll7/L8uwQe2+8EUVRSIc93n3DJOrlixyfP0M8\nGCSSiBPsH4Bqlc//5V/SMUy2mh7dvaMcXWqiyzrv3jmOLIq4nsfFYpGJfB4hJFAtFgmbPuFwEsvU\nKLa2COAQtnxmGi3CrksCARAx8HAQ6UNmiRVidGPhEyaKgoBLHYiwisqA7yMBIwh4epukr1H1LlLZ\nKJMJSQTlGhOiRm8gRqvZ5jI+YVKEWcf02gQkhYZj0lZVAmWF0sUGoZCJHIsR7O/H6e8nJOTZf+O2\nEuvMiSe4dWASo3uYhZWTpEZHkQWBkZ07CbzGu+SfE77vUy6XAcjn86/aeExOTiJ88pM8/8QTnFtZ\nIZJIcOMHP8gNN92EIAh4nve6Cr6apvGFP/9zRkQRyzS5cOkSYiRCdmCAkYMHea7VYrZWo0uSKIRC\nbK2uU9R1cuEAw4MuttmhVWowkYix0GhxrjrDgNMhTZMSYVp+AAEXwYmzZvRT8VZo6yZRKULKqzLq\nK0QR6Edgg00W0AmSwMYhSZNePFJIdDsCniSTkgO4gs954wKGHCEZTGJaTRA1Or6DYrcRwi4dCy43\nDJYlGaml4XlrDAdMetoBohsb1OfmOLhrF8FAgKOPPophmtwcjVIyDMr1OoVAgHilQsp16fV9Hnvo\nIfL5PqrVDXK5VwxDVbVBIiG9KR5Vv8g4fBg+//k3Z+zJye1OHdOE1zSN/txDeKNluH/qP0AQngbu\nem1mRBAE/9OffopodDs1W69v4boLfOpTv/0qcyfDMHjppaMcPz6F53lcf/0uDh06+GO9Zmzb5okn\nnuLlly8CARTF4a67bqJY3OSzn30W0/RZXl6gp+cd9PVN0G7X0bQlyuU5CoUs6XSOnh6J++//MHv2\n7P6h8efn53niiadZWSkRDodQUDnz3HMMuy5zi0WaJZ+0JG/rWPgGDnF05jiIhwaIQBQwgMuEqJBg\nDIUwIjoqORoEgWUhxJbSRTqRZ0tocP/73slt111LOBzGcV2en5tjwQgzOfkejhw5Squ1PWmbpo7v\nX6YXk14ZphsrnD63wKAbREYggEmEBgEcFpFQ5R7yrknS7zAQkEiFFGwB1l2TGV8kNXIdttKD1QYB\nkBMG/+d//9+4++67f9p74SeWhf/iAw8Q29qiP5/HsCweO3aO6ZU2Gx2BXMBmZ3+UXV1JXnz6adRi\nkUXXZe+ePYyOjdFeXaXYVtGjk4RC/aiiQnagj2e/80WuzXfY152j4XkUdR2xXKYvm2VheYtW3SEg\nS6hWFSUcwDc1Bn2RVU3H8R10oImHgYhJEB2ZFiDQS4AM3Yg4NBHZII1GCIt1fDJAAWgDJtsZsGkE\nEoEw6XCcmCmgyBKWYbPpqNQIY6KgBUcRPI+Gs0osPkx39yCC66JEFMb3JvmLv/gjAoEAf/zHf0Wh\ncJBGo8TykW8wGk9jWSbxuM5tt91Cu9NhxvP4nd///Z/qer1REASBv/r0pzGrVQQgkM3ySx/+8BUB\ns1fDcRwkSUIQBFRV5Xvfe5bTp2fwPJ+9e8e4557br5YUvvaVr/C9P/sz7hkevhq0zCwtsdHpMG3b\nxF2XUVEkGY2yvLKO68tctC1S+T5unjiAIjcptitcOnMGwbZRVJUxBCQEjuIjEEZCwaOXtgBLvoCF\nQkLw2CGYpDwXjxoZNJAUTrgaJiAjE0JAQGAXEhEEPFx8WQIlzAVdZYY8spjD82oEadNNh4zgYMsi\nmhChToFg7np8R0DR1xjN1Ll9zxB3v//9fOvJJxHrdbr6+1mbmaGyvEzBdVlptwnKMqlQCC0cploo\n8Huf+ARrlQryrl1MTRdx3QKJRA5Na+A4G3ziE+9900TQfprn/ecFS0tw8CBsbr557be7d8NDD23r\njvy84co1f92a+1uaM/L9QAQgne5iebnI7Ows+/btu/p5KBTizjtv5847b3+9IV4XiqLwvvfdy7ve\ndRuappFIJGg0GjzwwNfQtDzhcARJClCpuDjOLKlUBsfxiEYHcZwy6XQUSfKp1xv4vs/6+jrtdptk\nMsn581P86Z/+HbquMDw8zvBwPx2zio3I+vIqVsNFcUUW9ToBQEbAIEgHGRuLPOABKlAnSIgEEhJR\nfGIIBImi0kHDRZCDhKMyjrdFJupzeWWZucsz9A8NkRod5cb77mOipfGZBz7D7Nnz9IW7cR2L+foy\noihSsjq80CkTdAyi+EhIdCOSxSYBuEAJlw2nRF4KIHgWirhdX3ckhVokQG8yT9eeWzh0x4fpdDps\nbS0wPAzvete73pib4HWgqirlpSUmryxWoUCAX3nH9dy8q86ff/3r7MkNYpXqPHlyik6zxTWpLD2K\nSAQ4/eKL3D05yYvzJfpGc3Snc1RbTXBcbrzzA5x86UtEukUmhgZYW1/Hr1QIOQ59ERFaNbyOQdH3\n6I4kkYGnVI0cIh0gjUcXChI5bCRqGLQwcVlEpk0HnSgOI3jkAQUZBRuD7QB0kO0HMoaEhguWieAB\nUhjXHIqDRgAAIABJREFUaKF4MgoCQcGh4Wu45iUUySEnhkl5ZWRd4fprb0UQRObXVvn61x8mIoDd\nWOaly/O0OxL1i+doKHEEocOtt+67qjXh2fabdr2+j06nw3PPvcjx41P4/vbG4bbbtnkKg7ZN/oq1\naaXZ5OG//Vv+9ac+9UOt37K8PWXZts3f/u2XqFQi9PS8A0EQuHRpieXlL/G7v/ubhMNhpk+eJBmN\nXs2yaJpGbbPIesumJIcIpbqZ6ZRJt7fwUfA8l6QkYmZ6OKU1qJVX6XQqNJwovtFkHyJtPDbxcZEY\nJ4yBzCYiSZL0oLGIgeVvofodTFzCbFs32K7FfkBHRieAT4ctAERkwMOj4gikfYkAIiG2kD2bCHHi\n9LMd1qyDYFF2exgZvRbZ92nVavSEorQ7Ooubm9i2zejICGc3Ngg3m0SjUZxMBq/RIBQOMxQM4gPR\nUIhmp8O5y5cZ7u9Hdxx+7/f+FSdPnmF5eZPJyQw33viRH5Lbfxs/HocPw733vrk6IHv2bJNYfx6D\nkR+Ht3Qw8u1v/9XVn3fsuJ5oNEmr1b76mWmaTE9Ps7CwRjIZY9++3eRyudcb6nURDoevZlleeOEY\nudxeisUSnuciigrRaBeNxjKGUSESydLb200kkuaGG+7GcWy+/e1nOXHiPJWKiyhGmZ4+xcrKBsnk\nzfT1DVGpbDA//xhDQ4O8fHKGdydiWKJFy66TwcfDZRkfSCMTYAMLHTAJYxDCQmIdhyQJ6rRx8Whj\nYwKKEMCLFxiMTrLWbhOJw6/dcw+WbXNsfp7YwABPPv08jz12grXFFo7ms2GexHdrBHBI4RO6sisP\nILAMWLiM4qKwvTjWAQXI4pCIpGh0LOpygHXfoybBfe97HwSDmF0ZVlePIIpw003j3Hvvu34kN8R1\nXaampjh16tK2o+j+nezZs+fqIvOT4PXIrIIg0JVOo5suWlNG9mNkwh6CGWS1VcePGAyKItW6xsNT\nWyzWJdRVlUZnmfG+Lk6fu0Aw2Y8fGGGq5LOolunOh5BzOWZWVhgOh6kgsGLbKIJA3bKwPJ8SISqE\n8ZHo0KELBQcVD5MgkEOmSYAUHTJY6Fi0kRDwUfAZAFaAEBADLAB8OkANl6zjojsNBgIyuutTRkD2\nRTooZFAZdAUiYoooAqXaZV54YYmh/p2U2zWe/twCH33ve3jXQB+fe/GrnJ6tEiREOmQSicY4dmyG\n2UsXERJxdn3gA1iW9aaVamzb5rOf/TIbGzLd3TcgCCIvv7zA7OyXgG2O0/eRSybJtFpMXbjALYcO\nve54ly9fZmvLZ2joFRXknp4xVlY6nD9/gQMH9hOUZdxcjk1VpScWY35pibImo4kR4ukxUqEcltLN\nzOrz3JBI0pPrRlSbTC9ewHFMzI5O0w5gB7qwPIdLuJhEcUkQwuUiOnl8fIJ4fpAwHRRsXHR8LApX\nrqvOdsawCQi4SHTw2PaVmsejlwAGIg4+bVdjAw+BNApZfOJUMRExsP0YEbdKDJXV5YvcNnEt1USc\nlOMgWlHKpQWajQY7h4Z46cIFyqKI2OmwoGnETZPRbBZUFZPtzc6u3l6WlpdJpFIMDwyQyWS45543\nbxPxLwGHD8NHP/rmnuP74me/aHirBCOvm7a5777fedX75eXjdHdvE1M1TePBB7/MxoZLNFrANNd5\n+unTfPzj72Hnzp2vN9yPxdLSBv39k3hekDNnpjGMdQQhh64bhMM2uVwfpdIFEgmd5557jEIhx6VL\n87Rau9i16yZKpS2q1RzNpkUgUKdUCjIzfY52e4OpqRl0NcERu8iQbzPiS/jIaHgMEGSVTXbioyEx\nS4gcSTx8NpFo041ECx0JiyZ9iIBP03fZsgzaXotYOs9E/wDTK5tMDnSzsNzg60cepFR1cK0xFDuD\n7izQRZlBXOJsT0YuEAQUfHrY7taZArJXfqexTQ3eEHy6AmHWfCiGgwgC3HX7IZLZLJk9e/jQRz+K\nZVlIkvRjFzPP8/jqV7/JmTNl0ult/siXv3yM8+cv87GP/eo/eo08z2N5eXnbayQQYGF9nbH+flzX\nxTAMFre28OM9mIEwflMnKMsk4gm2yiqbukbKdChZBcb9QWLhCp4dYGGpzcb6OoYSIWrbiIbK/rHr\n8XyPIy99k515gRsnJvjKqQsImkM/IQzXpdVUiUhhdot5HM+hik2DND5lJrDIINBCYQ2LawgioaAQ\nJkcLHyjiM45OHoEKPpuABDjAIh5Btks3OiYNoGZ51FFwKKATw8QlSg2fJppXR9BaDEoBJM8lpbfR\nagsI2SGy8TgL8/PIjQ539AxSDKfYWl8gsrmGYqmsizZeKoruOPiWxb/+d//uTQlIZmdnWVuzGR5+\nJas5MLCT5eUzr3t8PBikUan8yPGmp+corm/QLleJprvo7Z8gGAwTiWRZXS1y8KBC18AAQUliamaG\nUq3G6WIVlRTtcI7J/p3UNzZIiRE8L4GgKNgInFyfZ4fn4lkGNT9LkgiOvo6AS4AAOgHq6PhINBCp\noRFFJUwUnw4KJcax2Qmk2Q5A1oAutgP8DBISLuBjIzBNHJ0gUSQ8dOp4BPEQUXBJEKCbCAJVpulC\nJe+6WBjo7iZnF3V2DN7AZkWlo5cY8Q2+8tnP4oWitLp7uPnD7+Pi0aMIsszW4iLdokjdtlFlmXwq\nxXAmwzOlEvVQiF++7ro37mL/C4Vpbrf0Pvjgm3uePXvggQfe3HP8LPCz7KaRgceAa4HHBUH4L77v\nH/vBYzY35ykUhvA8l83NWYaGIlcJoy++eJRiUXmVtbiu9/G1rz3Bf/pPoz/1hJrLpVhdbTE5uYNC\nocDRow7Ly3NEIhGy2SSzs8/iuk1Cof1cuLBMs3mcen2VVstnaamFqtqsrNRx3QCN2jkUf556u4Hr\nFPC9CIJfoOI0KEgqVQQcZGwUfDp0YeDhUUJhk24abJdvWkSwSNPCI0udnJBClwzCgkBIUBiIZtiQ\nPW7efwCwWS/P8/ixM1xYNCg1BHyvC4FNXC4yQp0DeCSBBNuBxgwQYXvCDAMttgMR8crnw8AsoPs+\ny55Kf083ZNNcd+P1TO6/ll0HDrB7924kSXoVj+dHYXFxkbNni4yMHLya3UinC1y8eIy5ubkf+13D\nMPib//k/KV28SCEcxnccHjh2jKzvE3BdpFCIWjhMJrcHP9vLSuMEwXYNJRhkPRxG910ulDQMMUPJ\ndckGI5TVy0QDI6xVVJSojW2c4vodA/i+ycWl81TaGi+WS8yvl9BbGvs8jxywIHgUfAHVjdPBI4hA\nDhmVGiY55nFZoUMCjwQCIYJsLyEi4JADBGx8fIoIyMDmlVcQ2AWkEGjgUyBAAJ8VRNIUWCOIRZw4\nm2SRCRFGoE3MDyD7CmkEVraWGZeh1Wzy1W88RnV1mVq1RSAYYnWrSiKUp+KV6FLixMMC16TDREyT\nC488wrl3vIMbbrzxp3p2fhKsrGwQCv0wETISef1MZt0wuK6//+r7TqfD/Pw8lmXheR7HnziMML9B\nKjtIa32OE5dPce2tH8AwWuRy2+W7W++9l4f/+q+ZHB/HcRy+e3kDU+8iFM5SKRYptlqsGgYCQcqu\nyom1BhHLpFcUWPMVXCJ4tCkg00HEQ2EAjQQuAj5JRDbxSLJFgzJbQDc243h0s/1chdgOSorA8Dbl\nGRHwMPBwiREhTS9zFEmhMEgWkSYCCZrU2EQGJAoYJFCQcBHxiZGi0+lwZOYoCAFkymiGhNTQMIMO\nflMg//I6+/ffzAd+4zf4sz/8Q1ZrNaK5HL2xGEng2MYG+QMH+PX77/+hctjb+Olx5Ajs2vXGufT+\nKLydGXmD4fv/P3vvGiTZXZ55/s795Ml71v1eXVVdfVXrjiQkBEJIICGQYgBBLBh7jYNl7PnAbDDr\nnYmYtSO8M4aYWBze8G54wONw2NwMY7BZtUCiJYSEpL6p1Wr1vaq7quuelVl5P5nn/t8PJyUuuiAx\ntADB0x+6uiozT3X+85zz/N/3eZ5XBMCrKhz37DE5ceIJVFXhlluu4O1v/1Ek+DPPnGZg4CfZfCKR\nolw2WV1d/QmXy2vBLbdczxe+8G1SqRz5fI4777yPM2cOUq/PUShELC9vsmPH+7hw4VkUZYZsdjfl\n8mEWFyMymXmiKE+zWYL2BsKbpyMZ+FyLShoZFx2JFP1EYYOQNkkkXNp0iCd2ngICNJLkcFHpEKIj\n8KjToEUvLcCkoCi0Ig1PT5K3dJxGkefOHefC+iKRd4FSS0ZmpNtssZBx0OgwSoRJXBqGuCXQB2wR\nk486ce7iIvEuzgROA88DhZ07+f0//mP27t3Ljp07f65JugBzcwuY5kst2pY1yLlzF1/xee12mz//\nkz+h8uST7MnlqHY6rBWLXG1ZHF5fZ9fQEB1d55Y9e3jg0DkMfYK9d36MQz84QFZRuXI6RxDOMbda\nYXb2JiobG4hqichfYqk5TyAEpquSsfKsb0V8/+ADJH2dnghaUZKFss80LgVFw4sCBCEJJBwkQBA3\npAQGKQymUHBpobDFBnk2iQCfAiEVPDwcBH73vXYRVInFypPEJX0FKCKRQiOQdPpEghouOhZ5ynRo\nkiNExkEQogIJPOywhiNZpGWFlusS1XSqzZBqaROn2aHRAF8xsZ0qgxgM60lU3WYklWKj3aZgGBx+\n/PHLQkZyuTSet/aS77tunAc0v7bG5EDsBFssFvHyeXbt3g3EgvAHvvQl0p6HKgQPHz7MVQMDWAUL\nRZaZzA9gNSqcfOYA49sHufLKuwGwWy2qnQ6P/uAJypUaZRdqTYspEbDp28iRTLUDodLk+jtu58KD\nD1KQIhIRRFis0yAe0GDRQUJDJiQki0MZjQgVGY9LCHRCWjjMEicqbxG3ODUgB6yjUAF6iGcQNdCp\n4SJo4uKSISKPgSDER0YnJI1GkVVU0hQIkIgISJMniUNEHxorQkWIJjo72cLGkKrkwxC16XDw0f0o\nyr0UV+axazXK5TJTqRSSEFTzeWbuvpuP/+EfvuwU7mq1Sq1WI5vNUrgcgRlvQuzfD3ffffmPs20b\nbG1BowFvJg75q9KmeVl88IPv5wMfEK+oEXg5JXYcTPb6p49u27aN++9/O/v3P065rBFFHvv29fLB\nD/4Zp0+f5tIlhcXFZ+l0UhiGArQwDB0hCpTLK6SSNrnOWXKBhoKFJ9qUmadCC0tWMYWLh0kLmSEC\nehB4xBWK54AhZFQS1FFRSFPCZYuAtF5ABHU8YREpJqUooCM0NEZZW7MpuxKXtk7Qx0VMPPbQi0mZ\nJiFFNpDpAUwU2rzwbvnEpOSFtkAbKAGzwCaxVgTi26yeK/A3f//3XP8LuEGZpk4Yei/5fhB4JBIv\nvSC+gMcOHGDj2We5dXSUpK6zNT/PsBA0ymX2ZDLsGh7mTKnGNx89TSbXz8Gnvk+95XDz7e9j/txJ\nzl06zA3XjKKOjtHTt48H//EsbvEUEyJkJpKxRZnQl7lQ3MJdvcSUKJAghSJLSGqORW+FCiFRCJYs\nIaIID0GEh4WKR0SESoSCS4SNDySQGKROhRQRGjISFhIhZVrYxOQv2/07JPa6K8TE0EbCRMIUIdBG\nIsKnRQEPFZMMCSp4jNDBIyaUPgEl12ZKTeIhEwpB6dxRsh2bPk3hhN8iJbI4UYQfBdT8KlcPZdBk\nmSgIUIUgfI3uhvX1dZ49fJhKscjQxARXX3/9q960du/exUMPHaTZrJJOx24X224Qf/LAuuIKfnj8\nOJIkMX3FFXzkzjsxTRPHcdj/5S9zRSZDNplktVxGqTc5t1IiOzSEqjbpdFLISDQ2Fvmf/uQvyefz\nsaPtH/4BZWGNG8avx9qV4wfHnuRY/Rx+SWdCTxBIAkn1cUWDwwcPErougSzjRxFNPBR6sFBRkNEI\nUBEIcgQEQJqLqERkCNFRUFBZoMYGg8RVxgxx/9kGWggUDCpEaERY6KSRUYAa6yg4JDCQ8QjwEQQI\nHBL4hAh8AhL4JEmhABlCIjx0MrgUCLEpsMpIaCKLgKZXwfVaHHr4H5k1fD6yexfBxATHVldZDkMm\nslmuuPHGlxAR3/f5zre/zcVjx0jKMnYUMb5vH++9776XDCD9LX4S+/fD1752+Y8jy3EF5uRJeAVJ\n1a8lfqXJCECn00GWZcyfCqS67ro9PProBSYmfhTUZNt1EgmfkZGRn36Z14Srr76KvXv3UC6XMQyD\nQqFAtVrl0KFjlMtLeF6VdHonfX0pstkRbHuTRqMIWNjlZ9muJIm8i2j4qET00SSiRL+UwySkLoUU\nhcMAsZCtTnzjTwNyd1ckWMVnjBQmDQSm5ZNU0ljaNpTQQfLq6J6C74UUvRo+CXpQEcAEoGMgodCD\nTBaXOk18Emx1f+50j+cQx8rbQBkYIy4lF4ERVDQtSdXKMrzvJpaXN/hFbJZ3797JgQPH8LyJ7hBC\n8H0P399gz563vexzgiDg7NGjFJLJF/M/2q0W29JpyuUyimWxUKmx1siS0wtsH9sB2gKdzipPPjnH\nfffdwTv/j//M7OwsFy9e5DOf+Rwby6e4QZWIAoEXlhiQ2oSd2MacIoNJIpYTShIhEj0kWEJmg4Cp\nSKaFIAtYNHBQAJkqClvoFFgiD+jUqOLQokMLhRTlbhUswkZiCsF09/9oAvPEN64y4CORRKB1i/ot\nwEBBpkQbjQygd6szm4S0gfPEu3ArDKk6Do6SZK5RZzDw6EiCtqLgSzIiWkTDohjWGUtbjGRHqXQ6\nZLJZzrou77nllp9egpfg3LlzfOfv/54RXUePIs6eOcOJp57iI5/61MsGj0GcUPx7v3cfX/vafpaW\nJCRJIpEI+J3feS9/9mf/lvd94AOE990H8BP5MgsLC1ieRzaZpO047H/6BOtV2JMco91Uyfcm6e83\n2L17B7lm88WK6KHHHiMfBFR9g3whrpsLx2EnDgnFRRJNMrpJjyK46AZcXFmBKKLoeV13kyBBng2q\n9KGioSARItDZBCIkYBgVBZc2Gh4KA9SpIeOgEp9nAE0kUqjYBPShkkXGxmcdlzQaMi4hAQoBJoIm\n0KCERg4PDQXBJjYTuGjkcIEUEUXipp+PQ5Zl9mKSwERRLPywzoXQw9m4RN/0OKnu9fOdMzOcrVTI\n9/Vx/vhx3nHbbT+xTo8/+iilZ57h5vFxZFlGCMHJEyd4JJHg7ve//2d+Nn5TMTcHrdYvdkrvq+EF\nR81vycgbhP/2377MwsImkiTYvXuSu+++/UUm/9a33sDc3CUWF5/BNHvwvDaKssXv/M49Lztp0nEc\nlpeXkWWZsbGxV9SUaJr24gW10WjwhS98hWo1RTK5Hc/bYmOjhqIsY1k6mYyGqlrUakvoYZmkrJLR\nLVQvwJFtFsMtRogYoDeOdhdtMvgsE9/4LeJWSQrIoFInQMEmYpHN7sVPcvJIWpIOBS6EK0hODeFq\nNHGoI5Apo+OTJyIJtKiRJEkbFwufIi1CTFxi/UeWmHAUiV0cCrGDYxFoYtC//V/hahnswGVkeIh7\n772Lo0ef5aab1l/xRvNa0d/fz733vo1vf/sJoih2T8hyjfe//yaGh4dfds1OnTrF4tISPdksq+Uy\nk/k8UvciKRSFShAQtQRZs5+2GyDLMpnsIHe/5z2srBzkxhv3MTIyEg+Dm5wkK2/RmyjTsjtkQo+J\nbvtqFcgi8AlR8BHI+CKgFTi0u/6IZUIahMjEVYw4sL9ICZU1chgI+lFJoCIRkKWJDUhkSVHHoMM5\nIvrRkBHUAfBpEleq6sQi4jaCGQRbQIs4pbcfWMKnjU+BCI+QMTxEd+2SxOJImZCTXkBHS2MqKdJh\ng0TkMa7KJA2D+aCDqTtshirrqsrRchlXlvEMg9133cUNN9zwqmtYqVT4wv/1eaxyhdNbJbKaimUY\nbLou/6Cq/G//8T++4nMnJib4zGf+F9bX1xFCMDQ09BMuqpcLuQuC4MWL1NmlVfxwkGQ+oNNpoeoJ\nenomKJUuMre6yvC11774GuX1dQpISFK8m4+EoNWp0OM5ZGWDjAiIgoj1loMLXDs8zOT4OIePHmXJ\ncQgI2aRNSJ4OTfoRBETUaeMjoyBIoNEmIkSihoNMCw+DIzhMEWfGlIE2MhIqGXzSSOjICEIG8Kng\nIRMLxWOCCwNIzOOzSJEOJi4VEoSsEmLj4KLiIfDJdVNPSvQjsFAJhUfoRyCH9CNYDnw0Q8dutei4\nLoqiENbrPP344/iFAlYqxa3vfjfj4+P4vs+Jp5/mxu75AnEFetfoKE8dOcJtd9zxmrRhv4l48MG4\nRfNzFOV/LrwZdSO/0mSkWEwzNrYDISLOn1+kWPw6f/RHv4emaSQSCf7gDz7K/Pw8i4srZDIj7Nnz\n/pftf5448Tzf+tYj+H4KSRIYRocPf/guCoUC5XKZVCrF8PDwS9o7R44co9XKMTExQ7vt8sgjh6lW\nt6hUxiiVygwPpxCijedFyF6afCQwZZVA2CTCDhoNksh40SYSHgot+gmxidsjk8SViTpqV1MCG7gM\nkMAkYgtwXR/Jq1BrnyCjGhTdWDuikCRFgz7krqgtJAu4uNQxSZBDISKkRi9VNGI9wipxuJYDZJAI\nSKJYY9hhAi2psm3bPvr6ZgGo1YosLi6RTObY2Nj4HyYjANdffy2zszMsLi4ihGBycpLcj9k6X8Da\n2hrf/Lu/I2HbpMplitUqZc/DjSKUVIrn1tcpJhKM9fSw3AjwohAjlaLcatG/bZKFhVMcPXqcTscn\nm32cqakChiZx7swZVEUQhh2yxOJcNxKkgCwRy4Rda67CKhHQh4egg8kK6+i0MIHB7to5QD8BLg1C\nBBIDOFgolMgRkURmkyp9SKSQsRHE8sUkFSI6xETE72aVjKPQwOUSIRZxCy1CIkKnQ4okbTxCAlI4\nRMg0aREyjkYaiRYyfWQoqRp54dMv6yhCJ/BtZE1lMpGmmEsxODhIcniYhusyNDrKHffcw53vfver\ntjiLxSKf//zfsHhqHam+xWjgYemCmalRtqdS7P/ud1n42MdeVa+lKAqjPyZM/VkYHR3lgBAEYcji\nRpWMNUZ6vIdnnvseecWns3yelY01gmqRa4xR5v/Pv+Bd73or/SMjdCoVosgBwPNc7PoW+UggCY9a\n4OF7cTXKlyQuVipMTE9jGwaO41DrNs9k+igxSI0SEQ4eLUZQaOJRxcXHAspYlJnBwMREpU0Tn3ks\nEvSjk6BJkUEkGiRooQEuJgIbh13EomWPuFLaQXTthYK9qARILGFQx6BMB4k8OhYBPjBPhgAJQYsW\nCgZC+EihIIo8bCVgdWWFXLNJUlWpNZucrdfZsCxoNPji5z7H3/3VX3Hfxz/One97H1IQoP/UZk5V\nFFTizcFvycjLY/9++NSn3rjjvRln1PxKk5EXooklSWFoaJpLl44xNzfH7q6wTVVVdu7c+apW3mKx\nyNe//ij9/ddimvHk12azyn/4D59nfHwcyxogitpMTmb5yEfuI51Ov/jc8+eXUBSTAwe+TbFYpd1O\nkc324DhFBgZS6HqLmRkJWR7hqYfPUYsE42Ya33cIJBCSjiWHdJQqsh8QiQAd6IUXg648VFyyNAjJ\nYiLjskqbEhoeIIlJEqxSiNYwQ5eACMEwCRrsQMMkQQOTGh0ahEiksEiSQKJKSJJeQEanThqFZjfc\nzMVinTzb+qbQE1lMZZBsIcvS0kl6eqaQZRXLyrKxUWZ62vqF9ouz2SxXvkpiTxiG/MtXvsKMqtI3\nMcG2XI6jjz+OWi5Ttixk0+RiGLJndpbBRILnn3qGcrvK9Mgo/Tt3omg+zz67QCazix073s7ayjm+\n9PkvYtWXGKhucaHToRxFbANSioIjBKvEbY4AjyIbBFjIzBIi4+KR6zZHmsyj4qITawIqxHqPEh4+\nLVQSCDxUmuiohARotMgjAxK9wAoBaWwietHI4FDHxWOUYUxk0qwiCHGR2EYOlSRbBJxHp8wgMmUm\nMGl3c0sUwJI0DFlhPexgSXnAJyVrNIRHghBZSGh+m3YoOOG0+d//+I/5xCc/+brWbf/+R9G0SULl\nGP1CMJztw/NdllbWmd0+xYiuc/LYsdctHn815PN5rrn9dg49/DCu71BrNVivlqhq/UTWKKdX1mi3\nFd5z8ztYW3PZ3Kzw0EP/D9dfP0becxBKm43iIrV6GzcMaSkKVuiiASPE2o6UEJi+z9d++BQDvoRP\nEphARyEhbdAS8bnYpkyKEUIMLCq4dGgT0McGu5Ax6RBi00FgoaMyTo5kNyHIBPoJaaLRg4mERxmV\nDRRCEsTtugTxpsEGBjEYJImGwMDnIgpbpIhYJ0BlGzY9uJwljQO08MnioCCIpDR1PUENFQMotlqM\nZLMs1uvM2TapTodp10VSFGqNBs9+5Sv4lQqOLFNrtcj9mEi91ekgJRK/ddy8AppNePpp+Kd/euOO\n+UJlRIg3rhpzufErTUZ+GqqaYXOzTJeLvCacOHEKRRl8kYgArK2VWVszmZoaZmxsLwCrq/N861sP\nct99d3HkyDHm5pZ4/vlTnDy5QW/vLXhem1zuGjQtTal0mr17p7nqqqvZ2DjBxz52G3+VUzj43/+F\ncqOKJSVoCxVbNhhQZXQridwqkvIDRogFpC+EXIXIbBHiYbJChwUi6hhINFBRCXiclAhIdLUBCTJY\nuCTxMNG7YjiJqDvrRCdCw6eDYBONNFkcQKLBIhJNegkTY6SzKQYGbmD79h4GB/t47rlL6HqaSmWB\nRmOdXG6MIPDQdRfLkpienn7pm3uZsL6+DrXai4mc2WyWG9/5Ts7PzXGwWOQDn/gEn7nuOqIoolKp\ncNsnXf75nx9DkobI54d46KF/AgbZt2+WMPSYO/w9RhyVph2xra+P4soKiSjiEpAPQxzialEEJPBx\n8GmRxOwGl+0gvmlVMXCw8HB5nriylSG25HYAQRKTBB4BESYBNh4CBRmBwOvueHMobKCQR0HFokYb\nCYUUEQbJrg7IZbV7mzKQyaDTg0ubHsrkkLGR0IhIMorGpqigiwQtEgQiwA4VlgKXfmHRFG1MQvxw\nykkqAAAgAElEQVRQwtUttg+PsnbiBOfPn2d2dvY1rYnjOFy4sM74+K2cSmUJ1xcA0DWDlt1gpVxm\nYts2GtXqz3il14+3v/OdjIyP8/ADD/Dst56kKXrYc8W7CcOISvU8eXOYp58+zPbt72ZgYA+2XWd5\neZW51jxmu0S7tMWFlXVsfJIEeN212yCuFOYAzfUxkWgSIdFLAZMqgi2hkiVCId5AWJzHJ42KisoG\nSST6kH7MdithAsdJomEhAQYym1iEaCiYqEQ4qPiYgEoViRoBFnTnT8WV02TX/+YREBKRRKOGBIRM\nojEmBTSEwh5sSig4gIIGqGyIDiV1lP5sBj9TZ7PTYaHVYrHd5kpZRgDbdZ20aXLBcVgvFsm029DX\nx8lSiVnfpyebpdpscq5S4db773/VWVG/yXjwQbj5ZvixfexlR9d4RrEIb5aQ3F8rMhIELXp6Xt8o\n63q9hWH8qLTYatU5ePAJwlCmVvtRqNLQ0DTPPfcw8/NfQIgh8vkRXHedixdPYpptfN9BVRP4vkcy\nmcW2BYaRQJZNfN+nZ3gbueFtOEsVHE/BREbGZi6o01uvkok8JOL+/ggggAtI9ACCDi10trCok0Gj\nxAguOm73AhNrAmLbp0AnIkuI2rV1GkCAjEBiHgMZixQGKiYNVGxMamSoKNegaCMk0iWmpvrp6dmG\n71dZWChSLFaJojqOU6dYfIIg2Ee9vsxb3zrCxz/+kTdUSR8EAT+d3+oLgauZCCPF0NgYCxcvcuLg\nQdqNBuOzs3z0o+9lcXGFM2fmMU2Xt7zlevr6+lhePo/WqLPVrOA4Nk1ZJifLlLvvqUcsNOwFnkKi\nRR8qbSwURjC6YVdgEmIiUIlj28Pu77VArPPIo3KWBAYtBlARyKwR0UIwgUSLsDuzRmYcgzUk1kh3\nQ/5dplHx6QBy949ORNS1D8etnB4kSoRdQ6+HikEDlS28mNgYeZSgwarfYCizl2JlHV/yUYVMTkmQ\ntFKcDwN2pnvYlcvxxEMPvWYyoigKsgxRFLL32ndxdOEkGbuODtQ6LfKFKTIDA4xeJtI6MzPDzKc/\nzWbN4cCBTTY3F3HdDlBlYGCMpaUWURSvimlaXDi/SkHVuGHfLkbekuah73yHp+fm6FcUlsJ4ErZK\nfC6WAYGEimAYuZuYalDAwMVhjZAefHoQjOIQ4pAgDqV7FhmFBDIhKhoyPkl0QgQdYoIbIBHQR4kK\nvYTIRCgErBLhdeXuSwT0E+tMikAdmQlMAtRuIy7EYp0EYOAjST6rkkQoQnahYAEryGyikJAK1GSV\nfO9uMlqNG6Z6WFhZoRGG6NUq40DJdXFsG1NV6VMUlsMQ1fcJHYf3feITPP3oo5xZXaV3cJA73/e+\nnytI8jcF3/gGfOhDb+wxJelH1ZHfkpE3ANXqJvl8f3ei5zK5nPeaL54vYGZmnGPHjtDbO8Lx40/z\n2GNPsb4eIURArbaOaabYvftaAJaX1xkbu5IdO+ITb3x8mtHRIouLZ7CskFptHsPIkUxazM9fJJs1\nkaQl1tZGCMNBvPwoCxdqDEgmjiwo+XG6YokiCgZ5PK7sBo/5QAbBHIIVFEBCxiDDFkPEI7sTxBfL\nqPt1D7BMmwZBN79AECLhdjMnIqCBSZY8FjIe4KDj4uHIs2BcQd9gFkVpsW3bJPPz5wCZ/v4pEolL\nrK9v4jh1NjZqTE9n+d3fvZMPfvBfvawg+HJiaGgIR9dpOw6WabKwvs53D1+k2jKwxnby2f/8Vaid\n43dvv4nC4CAbi4s8PDfHhz/1Ke666w4URSYIXvhoCxqdFpFTYVqEKK02sq/TT4bzQBGFfsokgDZJ\n8gxTZp1eAlq4BBi00QCFBhUs2rSJKyJJYpIQEpPMHnwiCpRxkBDU6KNOEwWbJhJpZNLACSS2SJFA\nRkYCJCo0usTHwwU8dBza9BEhoeACDQQSAXlqjCJjIdFEsEhABRstKiELl/60RbN1CilSWUVGk6oI\nLUmvkWREqLj1IhOjozyxsvKaI+A1TePKK2c4ceICo6OzbFx/J81LZ7DCiJGZIUZ37mTTNLnqmmt+\n0R+HFxEEAZubVRRFJ4p8wEVVFaJIQpYtoig+b1aWn6OzegRhynz3/1tlW38Pim0zJQQrYUgoSUhC\nYBKT0YAkLh4RARYqMuAg4eCj45NFoOJgEKAyRApBnQZzdOglwkewQcgAKiqgESLTRqdDlnhgZxON\nIgNssYhBh4CQFhoGJkZXoWIDTRSKxEQnViK5GLgkEXTwmUAgE4EQOAjGEOQRSEgkkTmDQb/RTyj7\npBIG2STYrkuj2WR4fJzq2hqu6+JJEglFwWm3cTUN3bIIhCCTy8XEb2bmsq3jmwm2Dd/7HvzX//qz\nH/uLxguOmjvueOOPfTnwK01GLGuNpaU5IGLbtj7uu+/+171D37VrFyMjx3jmmQN873tHsKzrSCTW\nUBQZSRrgm998kOefX0DTEqyvn+Gaa+558bm5XB+Fgko6Pc30dIqjRw9SqwmqFR81XOWJteNMjgR8\nrTxPeuhGZLUHzxhk2dFw/QpxmHcehR1k8GiwyhkusYgALFxCitgEpNnGICqCkAYGNmni8LEX0jlL\nxNbcLULarFEmjY6PjkSHFhE+y93kkDWqbJFFQxCyHgsglT50aZ1GY4Xh4VGEEEjSeXx/iPPnnyaK\nhlAUn1yunyCwOXduhcnJiTeciAAYhsFt997Lo//4jwwoCt89fI4gmsDsLbB7z7U8+8QTJJVJFjZK\n9OfzjPb1ERaLHHz8ce67/36uuGKKL/zVl0noBfJD29jymoyGDr7fpomFRg4FGMBllQKL3TkvggI1\nVBwK6GywxTIOU2iYRLRQaGOTZ4EN8sAMEBLHuceEUVDBR8MghSBNgE+DfiTqgIvBGgZt+hFksJHR\nWWEEhwQuBgGbrCMhU8IjS0RIHY8cGoIWMr00GKLebd6ohHgUaJIGDCVkJpXGSCbpVKuUvSplNISs\nMaga5DUTL6iSz/Xh+j5aIvG6ZgK9+923USx+g6WlZ+gdmWUlsKlX5umbmUbbs4eP3H77ywrIf1F4\n/PEnabVkLCtNT0+8YTh16hBra0uEYZFEYh8bG+epn/0O41GEaDaYzEKP73PK87i2v59vb2zQKwQX\ngZ2Ah4KQshRFoxsymGCEFCc4j06OXgwadDCoksKgCfhYJNGpUUPQYIQ2pW6FSgdaeJTJoGDT6uaK\nOMThbj4TuChIZDBokqFFjTVaFDGJ0EkgkeMCbZp4jHYN4VuE9BFfExzgJBGDAiLFwI4EhpCQZBiQ\nYMMvU1Y1pgs2b9s3ywOPHEButTDX1ylHEX4QkNE0ykGALknY6TS53l7qqsrbb731sq3fmxEPPgg3\n3nj5U1dfDldcAQcPvvHHvVz4lSYj/+bf/D61Wg1FUV63eOrSpUscPnycWq3J1NQIzzxzGDAxjA4z\nM0NsbdWo1SrYdi/Vap3+fgnL6uGpp45w5523o+s6up4gn09z8OAPse0xSqU2tdoidJaYyWQYTiYJ\nyzal6jOcOvQsgTVF4F6iz6+TJ8JHZxOJLQxUevAosIBLDykyyGwhqNGhwBYJ2ng0CWkwQUiKH0VJ\np4Gn4EXR5DoOSRyKyN2RWyYhQ9j047OCoWxgJFM4jk0U7cUwpjHNOtmsRCIxQKVyhsnJ67jzzo/w\nzW8eZGlJx7bb5HJjpNPDOI4NzPOFL/wTg4MDv1BB4mvFviuvpKe3l4f276d5vMLMrusYHR2jXq9j\nAr2ZIc5cOs0Nu+JK2WChwLH5eQ4fOsTJAwe4bTTJ8uICi08fodbaJBW2CSOBhwVCwiYmEhIgU6CE\nx7TcQ1NWuRjkudR1zZg43ZaIikuOUEqyLjrUaVMiwiIuq0sMMcwEKSI8Ito0yLOChIuKRLur//BJ\n04NNLA9sI+gQISEQ+AjStFlBxsJkkwCbNgputxYmsZsOGtDGp04Lh9iJUU+n2VUoYIUhzUoFQ5IY\nkSRkEVAJPLY6HlM5hchUGZ2d5dTqKte8972vONDwBXiex+HDRzh8+CRhGLFv3yw339yDbXfI569j\nZmYGTdN+rqDB14MoinjqqeNcddUdHDr0GJXKHMnkIBMT05w+/QCWVWdz8yRbl46zzzKxq1UCv0ml\nYVBp12nj4A70099xqdkttEhwRpZoBhGeFNEUBSw2aRDgEjJMi0Fq1FGQ8NGx6EdCIcQloATYqMgY\nLOORJ2AQOIPBKsPojJJGJkChgQ2sUKCHEstETJCmQRYZFZkk0wTodGhQI8Qkh8s4Jc5gYeMguJLY\n/hsCiizTiCLKqEhKgg0CzAg0IXCiOuu6y+w1V5PtiTh4YZ4dExMkczkKsoyVTnNmbY2W77MURdSD\nAFSVq/ft46p77uGaa6+9rOv4ZsMvo0XzAt5sM2p+qWREkqS/AK4FjgkhPv0yPyeff30aEYAjR57h\nW996EssaxzQHWF7e5Pz5VcbHr2N0NA5J0/VFGo15FKWDJNW55ZY7aLV28sQTR1hdXWNiYoKjR4+z\nuhpQKGisrc1RrbYxgy3emu0no+pU3TpZt8lQSqFf+JxafYZRr8Y0EKHRIoeFgsIlKgh8fAJmWcNm\nHRsJCYMkAo+ITfJErHbbOApxKyckdt1kuv/eAjw0mqRx8HEwCRlDJgmYaGYaWGLXrvexsnKJVquK\nJGm47hDN5hYTE70UCgU+9KF7MQyDf/mXw/T2juD7bZLJOOsjDF0KhSEg4sknn/mlkBGAkZERbrvj\nDhaWYWwsLht3Ojo+IBDIP3YDbHU6aIkET+3fz1tGRjA0jZ2Tkzz22EHWKk3qLR9ZJGh7OglFQw0F\nRVw6aF1JoIQcRSjCpo1PQIYOESl6ULBoxc0uNCEjY5LHp9AN1y8h4VEgQEZHwcCjhwxbpLuR/wqb\n5GihMYqgnz4EASYVCihcwsHAw8NCR0HHZRWNDClKtJglZIyQNaCCQo0UAo02bSZpA+DLMv25HGvL\ny6SJdSwoKm7gIysavgRPV+vo+Sz5gQF233orN/6MxKROp8NnP/sXHD++Tl/fGOPjUzzxxBr9/Qt8\n8pMfe0kQ4eVEEAQ4TkB/f5ZbbrmTxcWzrK4uYBgab3/7Xv7oj+7nG1//Zx756iYEESguWTlPSjKo\ndlzqkccDK2vc3DeGkxtmtbFFfxTREBErnTRDuRnOl1UWog00OuSIyCIhEbKGYJKQPhI4xOe2hss6\nAh0FH4NLBETIlDHwmcQnS0AHmQCJFC6D+MhkUAGVXkJ8QCKBhsBHoY8Uq7iksdGRSHat4Vb3miCI\nw9g6UUSSuAXXCVNkjAydsEkj7FA1Va69ci837dvDroEBHn3yScaAmmmSSSbZWSiwZ2KC7ywsQDbL\nO++4g3vuuYdt27aRTqfpdDqcPnWKzbU1evr72b137889/uHNjmoVHn4Y/vqvfznH37sXTp+GKIpT\nWX/d8csclHcNkBRC3CpJ0v8rSdJ1Qoijr/acIAg4efIkx4+fA+Caa3a9OKjtBbTbbb785QcwzV0o\nikU6nSeTKdDTs5Pz508yMrK3GyUPudwkmnaGt73tHQwMTNDbG7C0dJ7z5x+jXp/hzJkz9PfnkOVp\nDEPB6yyh2x4yCltOC88tMZvN4TsNTFUi4TcZJiSFSgufgAYWLUaJcChiYxBQQMOlQNS1fsr4aHhd\nFb+MYBOHNLGIc42YhMjEYskNEmwxjcc4ERniZAIFlG1I0ga6Ds3mRdbXV2g263Q6HYQ4hKYN0GpV\nWF6us29fH+l0mkwmw/R0hgMH5omiUcLQx3FaJJMyhhHS1zdMuVy7PB+A14jR0VEMw8G2GySTGXK5\nLHoux8LqGe64tg8APwg4XyoxfvPNlA4dwtA0gjDk0acO8/jzp/HbDWzPJo2KLZrUhYaDg0+SIWQi\nbEwqLOGzJYYwmQJ8fEo0SCJwUFEwcMngENJmlgCZkAoSAWlAZ4MO/RgYKFTxWUPHAEJ0Eli4QAKD\nNhI2ETIyCi4ZArYIGcfGQqVFSAKBA2iYdOggABuFIuOMkUVCxqVNkWWykk3Q6dBRVTRNww9Dqo5L\nTrdYl1wUzaQTBQxdcweT2yf43f/13/7MrI9Op8PnPvd/873vrTIwcCXFosPa2jNceeVuikWZU6dO\nc+21l08f8tPQdZ2Rkd4XdWQ7dlzNjh1X47odarVj7Nmzh/ndJ2nunebsD49hiT4agY3nd4gQ2GqS\nAJVnayUmp/cRmCmeq2yQj3ws0aZOld27r2Njc4mVzZPYBGQQpFFIEOLRYYuQqBsKb6NgEFDGx0Cn\nSY4sfd349iQWdfq6Wo4mAhcJCR8fgUJAiI6EjkMNmwAPFxkbjRQNNvDJUMClSZIULk1CDEAlpNEl\nL1UkQuGSihqYgUNbeHRCQW1ujkY+T3Z6migM2dbfz8rWFk4yyXylQtN16SBxz/338z///u+/2Pre\n2tri63/7tyTqdXKmyVnX5eCBA3zoE5/4hWQMvdnw1a/Ce94Dv6zRPZlM3B5aWIA30Ox42fDLrIzc\nADzc/foAcBPwimQkDEO++tVvcvp0jVxuDBB85StPcfXV83zoQ/chyzK2bfOXf/nXHDmyRjbbA6yQ\nz+vceOM13Hjj7czPf5bV1YPk8zMEgU2tdoqRkRSzs3G1JAh8crkcu3YNEYaCIJimseWxvHQWS0/h\nNOfxQpmtoERSVVF8j4VakYTqoyeTyJqC7kW08Gggk6SFTKo7DLyMTwKBRpIEDgkCfBJIdHARmBgI\nUiSoElJFJk3swGkRE5EWEk3GQZ8k8mLTYOwDWUKSLqCqIe32FqDQbPr4/gBhaAMlPG+ddNpg+/ab\nWFp6iL/5m/+OqlqoapK+vhrnz7dQVYmBgR5M00PTWmxubqEoDgcPHuaKK/aQTCYv00fhlaFpGh/+\n8F186UsPUq32oqomPYMakiSwVYlnlpexJYnr7r6bgaEhNg4dwvE8HnnqKQ499hi5toMu6XSEylIU\noWKjRDppEgwQssESPVLAHknmhLCpiQCTJglMfBScbqPEQiDhUafEKA5ZVNpELKNjk0CQpUVEgxYJ\nBBERgjY58tSJ2KLZfUwNlwwKw0jkuEQFiWXSRAwQ4uJ3nTOCTWSSaGygskmER4Y0OcpI3WD5BE2S\nNIRHMgg5uLDIqIjIqSqWKdPQU1w7OMlousDBdoMbb7sfzytx9uxZVlZWGRjoZ2Ji4mVbNYcPH2V+\n3qNQ2EcyGU/WDYI+Tpx4luuvv565uUtvKBkBuOuut/PFL/4zQeCRy/Vj23UqlfN84AM3o2kanSDg\n9JmzrDkBs0pAQgjaUUSVEEfuwZBtJLnFJZEjP3wL05MpLi08idw4S1+vQiBqoHWYTGmstTqcI4mM\nSkCDBAEaISHQQMJB70qck7gYZBlCx0TuzpMx0PCooWChENHHFr14bBJRwu9mmXiojKCSRkJjg3UM\nqkzRYZkODSy2k2WVKmkibCICwEGhRYQgj4vEVgQpI4MlZPbIJTZabR45eoLT5YhmQ2J9fZE9fSYD\nhsFCqLJYC3CS0yxekvn857/Ivfe+k3PnLvKNf/gqA60GN161i+GREcYUhY1KhYe+9S1+7w//8A1d\n618H/O3fwn/6T7/c3+EFR81vDBmRJOkWoCKEOC1J0juA64BnhRCP/A8cOwe8MKq1Dux5tQfPz89z\n+vQW27a95UcvkOvnuecOc/31i0xNTfGd7zxCsaiRzQ5QKMQth1qtyPPPn2HPnhne//47kOWI558/\nQirls3NnyI033kGxeInjxw+yuLiEridR1avY2rrIxqrDtuQwQ4kECauHWjLBZmWBFTnADwPsIIcZ\npoicCpm2Ta+qskgcMSaI0ACDJiXAQKcPm3VkdPZikqZKmzKraLiUSdHCBVQckiSRsamiE1BGECBT\nZZyB4XfSbIWEwiYIthCiH2gTBDqKMkEYVkgkbsK2A8LQRZIGCcMkcBDb3mJl5WFcV+fYsVUKhR5S\nKZ1q1cO256hUzrK5mWF2dgdCJKhUzjIychsPPHCG73//CH/wB/fT19eHEIJ6vY6maW8IQdm+fTuf\n/vTHOX36DPV6i8nJdzEz8ykqlQqO49DX10cymcTzPJxEgqeff55wdZWcGzCk5+KUBjXB0UaRYSx0\nXHxa2EhskwWmapAxMySaLQq4yASEeCSw0GnSoYpNEw2JMVxMLGxUTiBosJs0KZq4wCAB6a6apEFE\nggV0BEHXGFqmgkIOAwmfeMKIThUJhXXO0cAgYAgoIyEj2CDOL1mk3q2XAHRQEAjAJ8U5GkzKCvV2\nxKLfJhAhQ2aCiXSSjGHxfKuG3D+GoigcOvRDgkDCMLJE0TPMzGT56Ec/+BJh+PHj5+jrm6BSabz4\nPVU1gCzl8jLZ7BW80ZicnORf/+sP8oMfHGRx8Sj9/Xnuvffd7NixAyEEK/Pz9PiCTqaXpZZMJFxc\nSWZT5OhhiEJvnWIF0uEoQ0NX0m43kBhmqbLKYO8QhVwf5+ZKOG4crp9ApwIY5FmiSQ8tEnikiWjS\nponKEAY1esiQZgtwKNPDKjmSdAip0iYig4mLQx2FYRLksJGRGUTuJs1IJPDYRojNGjoWKj451glJ\nkuQSIWkUBCYddFrItHGYwsKS0mQ1g7q3jh9m8YOQTXsCe8nn6h27KW9s8ej8PImFp/HlKUKlQHYy\nT6EwQ6fT4N/9u89y9dV3QFtnIDfL88+vU602uP76axgsFLiwtES1Wv25WuZvVpw4EWd8vOtV585f\nfrzgqOmOdfq1xs8kI5Ik/TlwG6BIkvR94FZgP/AnkiRdI4T4Lz/nsevEUgiIxzK8pB/wp3/6py9+\nrWkJLOsnbb2SJKHrfczPLzI0NMRzz11gx46bWF19gHZ7C8vqIZvtY2VljmzW5777buPmm99Ko9FA\nURRKpRL//t//OXNzLUqlDqY5hQhanDx4AUOHM6eeYOi62ynk01SqDZIJk0mlyUIYUot2IzNAJGQC\nfFrRIm3vDDZgELKLiAwKNQQ5IkZxuYhKDxoSy/ho6PgEtFGREfTgoSMhAwk2qbOARMgIUEehhp6Y\nIhIhmtYL9BIEK3jeIkHQAkYIgiaaZiJEBt8PEaKCJHkoikwUZUgkMiwsLFMo3Ey7Pcry8hqrq88h\nSf0MDLyDwcEBlpef5+zZw0xN7eMDH/j/2XvTKMnO+szz99499siIjIys3LNKtVdpA0lICAFaMDaY\nAZsGY9y4jX3UH9z2Od3MdI/tM32m50N/mub0abeNZ3w4gz09TTdgMwYZkDCSEKBdVVpqU1VlZeWe\nsa837n7f+XBDAlkCZIFUYszzpSojbka8ed/MuM/9/5//83ycQiG5K67XN/jqV7/JbbfdyFe+cj/t\ntgtEHDu2yPvf/57Xva88MTHB29/+Uo3D9PQ0g8GAwWCApmmYpskvf/zj/MFv/zbz3R6oKv1ghFRT\n7Iz6lJFUFRVD6KQ1jSCGldjF1yW7+PiEKLpGOvBRx061FgYd+pg4FCiRx0OisYZHh2lSzAAaA3YI\nOEPyaxyhomORQ7CQTGywBcwyREdFJYWGRMXGwmEOQYBLlr2ss0UGxtlEIwQhNtNIuoywGSEpkmGS\nmJARfVz6tMMYxDLZXBGpBFz0LrG5s4q2u4av6hidHqfP/c/sO/YO9u//fv7MyspzfOc7D3PnnS8N\nTFNVlUqlxMWLu3ie86JXTxj6hKHNtdcee133+4dhbm6Oj3/8wy97vN1uI2ybmcUDeJs7nOm4WHIa\nkxSWAm6whT/0EMYewthmbfUJejuXGfWHZMxlnjz/HKMwjxlVCZFIdJr0SbGPDDp1LhFikGGIjmSb\nmJh9NGnh0aI+rlLtwWMZHZshaQzKdKnRJcMs22SpUMLCYJMRkgwqAocBISoQkaZExIgOETEuDuVx\n0tQ0babGYmebCgZC7BLKJnbQJpYajiwghIIvFPLWDLFS5NLOJnOVBS7tbhOgcnRpgfmZeSzD4OTD\njzA5v0C/nyWVKiAUBd8LUdU8ly7tsP+qDsWJCV5djvM/LvzZn8Fv/RZcaR+448f//2ML/2oqI/8D\ncDXJMEcNmJNS9oQQ/zvwGPBaycgjwD8HvgjcAfxff/+AHyQj9977LR55pPn3DyGOA0xTJwgCpFTQ\nNJ2bbnoHDz/8IO32LmDS653l4MG7uPHGJHo2n88TBAEXLlygUtlPKiXZ3JQ4/SGm7aOPPBYrU3Qz\ns6ycvY/ZvbcQCJe2fY6S6uEHZTTmAJ0hIRlCdAIyKGNnxySRVxJRRiFG0BtPS0gsyhSwgB4hXQxc\nNrEJCLEQRPh08fCRzJPk6SqErFJKWXQ66wjRJgjssZdGC5CYZpVMxsJ1O4xGgkQCGyNlijguoqp9\n4ljiuja+b9BsRnQ6HVz3KuLYY2PjMtXqNNdf/0ucODEik1lEVb8vUKxU5njmmXt4/vltpqauZWFh\ngjiOOXduhW73S9x99yd+7GTGTxOu63LfPfew8vTTWELgaRo33H47t9x6K9ffcguDp09heBbDrS3i\nyGA9UjGZ4XxsIfQh+zKS/ZpOY9jGLxdJaxqqqoDrkpOSKPQJgR4DirSIsMgyxMJHQ+ESEhuLEIjY\nZIIOBVxifFpkcdlDRAqNLho5TFwmcOhRZQT0KJJ48CZS1IgMJhnWaHEN2jjdRpJB0sMgR4SBwwoa\nPiU8ekjkePYjg84ecuZBwriBaWjk/RSzmkU2DCkIlV27TdsoE6yd5ukT93Pt9bcDsGfPfh555MmX\nkZEbbzzGX//1Sd72tqt54onnsG0tiTkIn+eTn/zUFdMQrK2tcerkSex+n8UDBzh85Aj5fB5VVTEt\ni2Ixx9H0DDu799IOu1ikiKSDIg022iYd4VJNu6jxLpW0QTBUSWkG9khBiZYwOMsMI0wS348umyhc\nheAAa1wmQx6THn0kGg4lNEwcXHYI8cmRZoiPQoY0MVksNEJqxMTswcFGJeYF0/kIOZ6v03CJkGMv\nVp8UGhoGNgU8/HEFDfJMEaJqBmGYYUSNJUWwGQTkAU0IPAlbzTMIfS+5Xpdes0MhU8DXRsfZCXkA\nACAASURBVFx94BCalgive+0WKyvrWFYB13XZadnEnYtMGlm6ToenHn+cg9ddR2Fm5udVkR9As5no\nRc6evdIrSSoj//7fX+lV/HTwasiIL6UMgVAIsSKl7AFIKR0hRPxa31hKeVII4QohHiJp+fxI8eqx\nY4f4+tf/kn4/IR2VSplcLkMc1zl8+E5yuRyVSoZ+v0WhMMmdd36AZnOLTqdGsXiMT37y4y9eLJ9+\n+hnuuefbPPnkCr2eJAi66PoBnNY2OTNFGEIYhGQyBQ4Vp9AnepT3T+L2YloDH5cMJiCIgRiLXRaI\nyCG4ajxuu0ySyBuPw8ZzSC4RkSKHjYpDzACJiyDAQbCDQ0xIlsSsOkOiB7kEdBEix3CYIo5XCENJ\n4nIRAzNAiOc9g6LsxfczKEqXODZI9Pc6Um4TxwOiKPEOdZwmcTxBt9skDA+jKB5x7LO2ViOOYxQl\nRxCEOI7zkorH1tY2hw/fTi6XfDApisLMzH7W1h5jfX2dpaWl1/rr8A/G17/yFfrPPsutc3MoioIX\nBJy45x7SmQzX3Hwzp2o11i/3CLNTrA/6IJbQpIYuBFpcYtXpohl1hkTEmsZ52yaIIlxvl9V4wGhs\nHG7QpURIDp9JppBo5PDZh0aTIQF9JukwyQSCHaBCDpUOdZocI8AgyzYGKXLoBAxJk6eFi88iiQC5\niSDDLm2qxONUYMEIgxImKWBrbAeeRsFhF9iHxCJEAjVCVmkPAyQuuX6HRV0gY4W5dI7qxARVx+ZR\nz2FP6ghnH7uPo8dvRdcNVFUbE3n5kvHc66+/josX1zh1aoWjR/fQ7bZQFIe77/4jjh9/41s0AI89\n+iiPfeUrzJomnVqNhz73Odqaxm3vfS93/PIvM7m8jNJs8eiDz1CMdXKqQTPsYZMnqx4gUq1xtXCW\njc3zaKbLqOfQD7bxZZYUNWaBNAoSnRQWKRx22RxrO9JoZPAYMUJlPzoVJnAZYCEADQ2VgCwCFZUI\nh4gQSYg69oUR+EgKqLhs4TOLhkWRgDYODjYZrkanTQkDkxJDdilhA1u0GeFi4IQe+rjZ1xCCORlj\notJHpTSmSFtxiCdVijkFvB473Sb3PvkkC1NzLO+poAuBO+rRaF7m298cMKmVsNND9NADJeD8pUs0\nymU+9Tu/c0X2+82Kz3wGfuVX3hzOp4cOwaVL4HnwBppkvy54NWTEE0KkpZQj4EXFmhCiSHI1fM14\npXHeH4ZarU6nU2NlZQUhykSRzZ49If/6X3+SqakpAD7wgTv47Gf/htFonlyuhBCCXC7kE5/48ItE\nZG1tjS984UGmp69jaiozfu1nOXPmfqpqlY4uCIIBphmRm1CIA4+nnn6aBUVhsl4nDALS2GNj7xQh\nIRZ9DHxyhGhj++4KyV2PMVbTu4CGpMMIkzxDXEZYKAzRKTCFyjYNQsokXqt5En/IaWAdKQW+v04y\n7DtLsnUvhCEkWZ+Oszs+vkOSvDHDC6kqcVwmCBpYVh7HaaOqIapaQMqAOLbHDpZZut0huu7g+31M\n8/uVkXp9jVTKpFSq4nke29s79Ps2hUKWMDTp9Xr/8F+A14hut8vaM89w68LCixdQU9c5Mj3NYw88\nwMfvvpvVc+fYffBRIi1NS6kw1Ay00COlmWiKgilKXHR2GZgK1xcKXN7ZoRKE+FKlhUaagCoxGgVc\nulSJGDEkS5EeQ3xGqDTwUMmTRZAIhyFAR6EINOkhmCAmRmWIwRQTCDpsopPDB5KoPYkkIqKBjmRA\nYZxbMkAhjU5AhM6ANAEuJss4GDCe1YEZJA6C/ePXeoQ4EPh00SIDRVEwVB3dabFba9KhzokTD3PD\nDbdRr69z/Pj+l/mEaJrGxz72q6ytrbGxsUkqdZiDBw+8JEjyjcRwOOSRr32NG2dnOfvcc2w89RRz\nUmLGMbtPPcXfNhrc8qEP8e2tLerBg0Siy1AVrEUGoZjHESaaopKzCphmm4bTY7XfIq24WHTxmSNF\nD4MMIR4CB0lEliwqQ0bYFAjp08VGI4NKmiIeQ2JiYAoDh5g2gioxLt5Y8rqOjkMFlwYeHhKdAxSo\ns4U39hWJCVDYImYKB4s8KiV8fCCiiE2HKgYhHhopPGwCtcVbp/eyNayjuBE1X0Wo84hIIYeBEdfw\ngwyrjV1SSpm9pWOEeshWq0e90wHFZoCNohjYLYesaWGl5nDyAVEY8bYbjuOVSky/Ga66bxK4LvzJ\nn8C3fhK15E8RpgnLy3DuHPyI7NGfCbwaMvJOKaULIKX8QfKhAb/5uqzq76Hf7/PlLz/IDTf8E669\n1qfV2gbAtmuk09/PnVleXuZ3f/ejPPLIU2xtrXLs2BQ33/yRl5SUH374KTKZJSwrw/z8DGtrz+E4\naRQlYhB1KFp5iHVq3R0mgxornS713oCAmChIxm2ztBmwwoh5JAoSF4UuWSIiBEWSCRgNEEh8EtWH\nSkyPy+g4xOQp42MRMmIGly1ypAgBjy2SlApJ0oaxx1/nSELldZJhX5VEerOHJFprc/w99fG7O0AJ\nWEBRBIoCQdAik9EJgnVUVSMMzyJEEUVZJo5DXLdDtaowMxPS7V5mNMrh+30KBZe77rqFU6fWuXCh\niecZ6HqK1dUtHOckv/Zr171Ou/9yDAYDUorysgtoIZNhsLZGNpvln/3u73J6tcbzp9ZQ1mPKcRbR\n3UWNY6IwRDEMbKEzoUT01tY4EkmCKCaQE5SJaWJjMY0/vlRNE6DRoY9HSJaQEiYODo0xEUkh0DAo\nAhEhiS14wJCYDgVCFNqk8DAwialhUwNyaIBFA5MOIwyGqASoKGTHQ6VDIItBHhdvnIyioRIDOjFt\nYiZRcVC0KlE4TYcNimg0wxEZ18EeujiKIB7V6AvJffc9xJkzT3HXXddyxx2vfOcrhGBpaekNrXj9\nMGxubpKPY4b9Pk8++CB7NY2MaYLv8/SJE/zC1BTnTpzg+tveyQOPr3G6+y1mNRPTDvADlVgq+HGP\nSmGaQsFiNdokRURGCOI4YsQKUECQR5JH0kdQQ+CgomIQ4BGQoU+AgTLOLQrpAwYClTQT1BiQQ8FA\nIaBPH50+y0g0VDRiLDwa9DFI0UNFYxIfFRUHE0EKixgVSW+c3J2E5AkC+oywURiRZUDZEuiWxWSU\npmqlGHRs/NAhqdC4yLiFiBdxA42rqhWyms7U/AJhaHP68gU80eQ3/tm/ZXd3jce++TeEVKm3O1y1\nZPDrH/glitksD21vX9mNf5Phv/wXuP56OPojxy3eWFx3HZw48Y+AjLxARF7h8SZJxtTrjtXVVeJ4\nAsOwMAyLTCbRvXY6E5w4cfYlZeM9e/bwK7/y/h/2UjSbXdLpxMRraqrCzEyOixc3yOePEfvP0nEe\nY6FSIRVH+IOAdqSTNdKURx6ZSCdNyBxDHmMdnyE+2jhafDh+B0mBpKbRB0boSNK4pOgTY2KRwyQY\nd4UnSdF9wTOAkBCNhFCskRCQNIluRIxfsQe0EOIFAlJCyhcs0g6Mj3GAHELkxtM2Q0wzh2FIguD8\nOOgv6T+bpo6ULqPR04ShII6H3HTT2/nAB34RXTcYDEZUq/s4fPgQ3W6XL3zhjwjDI0xOzhLHEb1e\nh3y+yNNPn+P61zGX5AdRKpUYCUEYRWg/oCBr9npMzc0hhMCyLH71w+/j/2z9DT27jtPTWFg+Srvb\nIAwHmDnBZEOh7EvywIxpsSY11EAnxKaI4CIxKiUc8lxkhGCHHCoWZRI7fxcPhwFNpsigM0QjJkan\ny4gIB4N1plgjTY6QmKT9FuDio7FECoHKNgcZchGVNiEWNbLkCTCp0aFPCosFHLYJ8QnpopEhZIRg\nhI5JhINEgdghQqfHkFk04sjmcquOrWh4ah7LLJGemCE3eS2+v0kqZVIsFt+QfftJoKoqoZScefZZ\n8nHMxHiKy5SSMtBYWSEyTS5sNen2CpQXP8T5C9/BC4pIKujaFBg6oWywu9vCUK+iGteZkioKBXQu\n0qSOT4DBNBJnPM5tAypp1sigkELisYtDFo9tDASC/PimY0CfEjUqGNSI6KOwZzz/ItGYoY8c++6e\n4jpi6mgoFOkhGAIqHVwUXHLYFAALny16pBmQAYooDNCx0Z0eF2vr7JkqEdgRFiDxkGqAFBFlZQ8o\n0FcLVCtVunFAF5VMfpqjN8xRb16gWp2nVJqis/osC6qGkFnK+YByPs96rcbyzwPyXkQcw6c/DX/8\nx1d6JS/FjTfC448ngtqfZbyp7eBfQBzHwMvtpoVQxs+9euzdO8vjj9fJZPIIIThwYB8bGz06nRUW\nF29HlSP89gbrq9vU+zaRB/uCAXswCDCJ8Jkm4q0MeAoPlzRFYtZRGRKRI6lXXABMdEwqCFS2kKjM\nEBHTweYwmXFCZ4xghIvDiAIRMQn5uEBCRKpoOKjkCMgT4wCXkLJCEp+XaAaScv0Lfq0pIEJKgCcR\nIosQk3jeBQxjABQxjCG2PcA0lykUMszMBAhRR4gM7XaWe+45Q7EY8pu/+SHm5+cByGQyLC3NUqv1\n2Nx8ACEk+/fv5fjxX+HSpSewbfsNGfXNZDJc/fa3c+KBBzg6M0PGsmj1+5xtt3nfeMZtdXWVe7/0\nRTK158l1W+w0FIRWYmF2nsnpRcLgMspA5flRiB0q2H6AKcBSFJwYOqj4zJKjShoXFx2BhUuDSUJq\n2NhMo2CzPR7DTezsajQxaTOByQ4l2rRIoVAgRYkIhQYxNgdRx+RFJcm2sUkxjURDocOIEUOGTBIz\nSYo+Nml89qDTRSOFjo6LRKGHgo1PiiynyWu7pGK4KPs0owg3shkoaTQlTWFqiePXfABdT9HpZGg0\notes9/n7OpPXE4uLi7ipFDs7O6iGkUx4SMmG57E4P4/vOAwGA0ZKGU3LY7sdQn0RxB58d5so6jJX\nOUQYdun3R1iaR1GYyCgCVceIKhxnhadpUSUkg8IAQUSKOcooOIRY46pE8nnUIkeRYGx/1qWOicsi\nUMJlRIYsEYvjsd2IIQEOWWCKOgWeZIhFkz4+DiV0DCQNAkJgmYAc0EMSEnKYHrvkiNGZwCamTI9+\n5GEOBmw4HrEf4COI1AnqUsEIU+haDxmGXNze4cb33M6NY9fd06dPYjtbAOi6ydI1t7F78gEmhSCM\nY1Z2dtgBfu2OO96Q/f1ZwNe/nrRFbr/9Sq/kpbjpJviLv7jSq/jJ8TNBRpIPygcJAh9d/37CaL1+\nkYWFCvfddz/T05McPHjwxwbpve1tb+Wpp/4fGg2LcnkGyzLY3X0aUHHdKUDS9mJqnoNlSqTfJaMo\nxFFEhBwXZ5PL/hQ+l7DQyJNFp02XHUbjAHG4TIksRRwUXCZJkcIkYESLPlkmMBniUqeFjwUcJhkN\nfYgXtABpXEwmEBiESGwyRMwAJ0jaNhMk2pBpkupIlRfyP2ESVV1ACI04rpNOa5TLBzGMI7iuwsKC\nQq12nlbrGRqNENNc4ODBG6nXdXq9Da655jo+//mv8qlP/fMXXW49L2A0KmCaGUDQaMQMBvZPZ6P/\nAXj3XXeRzuV46sEH8ep1itUqv/Rbv8X+/fs5d+4c/8v/+G+pDiOm0wsU57PMT7c5u7YBqsehhaNU\nc8t85pnH2IjmmYgy+EJlENWQcQcdgxoaGiVUlHEiiUUdFYU0PTQ8coRESNL4eKwzSYcmMT4CCWgE\nOGxRRZKjj4GGR0R5bN+fJeQRfDyK+NSZJINFGROVBlkURoRASJ0RDjEKFXQKiS+FqBFJBUGAYIBg\nCZMNFuIek2pMysxCNGKYmaSHRteYJ1e6g0g1AA0pY4SIMYwitv0P27/z58/zzW9+j+3tJuVygXe/\n+0auvfaa15WYGIbBL3/84/zh/feTVRT6rRaYJtlikblCgb/b2OCqmUV0Zw7fP0kU6ZhWCU1bQBtJ\n4riGoqzhedvk8wVKZgWl0UC1HaQQiChpiQzIYBNjEuOiMIOgMA6k7NPCpY/CJCZLqBjU2WGEhz/O\n9lUYElDDoM40Jg18IqZQUDHGpmkObTJchUaaBjso9FkiJmSAN17FiB1CGsToOMwS4aIzT0TECIOQ\nHpdjk2tRuOi6BMVFPF0hcBzsUCGIBAWrxrSl0HVt/FyeoRsTx5IgcNH1LsvLRRxnSCqVZWn5GFYq\nxzOP/r+k56bIXncdH7/lFiYnJ1+3Pf1Zw3/4D/CpT8EbxL9fNa69NtGMOA6kUj/++DcrfibIyMTE\nBL/4izfxta89gWHsQdcN6vXz7OxcQNdvxLJUfP8SpdLDfPKTH/2RY2jlcpm77/4I9933EOfPfxvb\n7jM7qwJ70LQYXU/jOh6+d4mJ7AJhv8tOlNzVKMQwdgToA/WxA+oOAzRMMlTIM6RDb+yqatBjGo0c\naVIYRAgcdHr4dOmioyOJMcZJGC/oQSSMvSgssiTVDh+FGBOBQw75YluoT2LTIkiErzV48bnLCGGg\nqhZCjLAsl8XF9zA5eYDV1TOoap9sNkUYziDEEpXKIZrNBq3WDuCxtfVXHD26l/X1dZaXlxkOh2xt\n7aAo01QqSbXE80Z861v38+EPH35DHVoVReHmW27hbTffTBAEGEZCUm3b5nOf+zKGb3LV3AIAsaxi\ntC6z9+0z3PfUSR47rdMY9LkUVikZJTqezyiMUON5HDRMbNooGOOsXdAZYQAxKurY1G6SpMGjo7BL\nTEifJVSy+FwgIYNvIYeCZAeDIgEuISqSNjE1BF0MHLJMsYHPNCkgNXZXHZAC9qDg0EMZy1gjPBw0\nCnIClRiFCIUODs9TGAfwKbEkb2iktSq+muIpd4gfCjqdNsXiHJ1OF1V12beviqJ0XhSAvxKazSZR\nFDE5OYmqqpw7d47Pfe4blMuHWVy8muGwy3//79/FdV1uvvltr+OOJ5qwT/yrf8X3vvAF+t0uwWCA\nnkrxnUaD6g03cNPbbuSLX3yaUmmOSiXNE098AcfpoSiCfF5h//4Ss7N7qdVa4Gp0ADvaJXIdarTY\nZgqFJVQmEQgEK+zSxGFAmpgRLnmKxOOap6bq5NV9rPm7xMhxE2cHCDFIY6GTZhcfhReca5Kh/4gs\ns+gIdAYItjBRiOmSR8fBokCFmDQOMTYhMElEk5AASZ6ABSxaFGSKlNth7oZ34gwgaO/Q3jmLFcdE\nYkRWz1EtV5ma17l48WGmplwmJ1P8+q/fQSaT5r/+168ThkWE0InjNh/75K/yoQ+9/yURGz9Hosm4\ncAE++tErvZKXw7LgyBE4eRJ+TNzUmxo/E2QE4NZbb2FpaYHnnjuL43hIKSiX38v09OKLx+zurvK1\nr33rFU2RfhDT09N84hMfIY5j/uqvvsrk5FvQNJ21tYsMh3VmqirpaJ7NnW1Uv8Ucgjl0eoxokVCD\nJEE3Mx7gm6ZLZ2z5PaKCxxzwGF1CfHR0QkJcHCI2mWVANPZlHDGFzwSSLBEHSLwntoAnx7m+OcR4\nkDhigEYdDYMACRwk0ZA0SSoqDRJNwqGxIVyTuTkbw5hjcnKOweBZDCODqurMzCwzGDxNNns1YbiC\n5+WRUmE4hCAImJk5xGDwNGfP1nnwwe+yvLzMyZPPcfTo27l4cZVOx0HTcgTBgDhusH//e37qe/5q\nkPyc36+Wra6u4vtZdO37jylCoKppLp0+R2HyEJVjt7HzyNeZtFL0my0iYVGTIESKSMok4Eyfxwlc\nQpIRbgsdAxuNDiNmCCkBfVT6aBwlYJeY0xhMoLIDzBDRJqaPzgCPDAEpNEKgj0cPjQ4+OXaI0YkY\n0cVFYJFHAiEePTpMo2JRZJchOmlGIqQpDQwmEdRRmCMSQ4qKwYTpMF80mCgW2Nz1cAIHV1XYu/da\nVlaeo15vkM3OcfXVe0mnHd761quoVCovO6+NRoMvfelv2drqI4RKNgsf/OCdfP3rD1GpHHtxvDub\nLWIY1/HNbz7OW95y/Uv24vXA7XfdRa/VonX+PJrr0g8CKtUqH7v7bqSUfP7z9xJFFVx3i3R6Hkgj\npUEqlef8+VPMzc3zznfu48kntnDbaWptSdNtEaCiUUIiERhYZFE5Tsh5QoZAl2Ac5RAgaOGjyiJK\nwDhpqofJGjpNLEq4DKkTMgk4NHHQcdAJqaBzFEUEhNLDosYkAWV6NMf2YgEBPm0UzHEgRIKYLhFT\nGOjo5NGUNIqh0xkotC72qFYXGbYeZ07qTJgWfjhi5LToZasYxiHgJIrS4v3v/zXa7S47O3U++MF3\nEkURvh8wP38bc2PN1c/xUnz60/B7vwe6fqVX8sq46SZ47LGfk5E3DHNzc8zNzdHv9zlx4iLz8wsv\neX5qapGzZ7+D4zikXkW9Kooidnfr2HaWyckqpgqDwS52d5fIl6hynTJDyqrOIPboyvjFy38dgzwZ\nVulTx2TBmGehXKYzPMOct8l530fHIWQFnwE+Gio+WYZIFnFoIQmooxJhkLRaRiQjvUlfGS6N+8ez\nxEhU1knRoM8kCR3aIWnNTJBURK4hmbaWgEEcNxgOPVKpIY1GnV6vRbN5HwcP/gKGoRMEPhMTFcLw\nNOVykVptGyHyaJokilyE0CgWF3n++Rq9Xo9ud0i5PMPCwmG2t1fp93vk87PAnpe0z64koijCsrIo\n+RLd0YBiOhlF3d3dYHV7wKA6Tf2BezFcm4xSQs1UcZwBAZIeeaSIEELBsg7jK88QBzYZUULIAWq8\nwaTicSluA+skY7QzxKTQUDFoopMmQ4oQwZAKAS4qc4S4aBhAlohtVDrEWJQQlCig4RLTwGGXiDIW\nIQEWAT4FighsCkpMX0boMo2KgUMLBRWFQwjZpxt9F0/NEk6keXpzh75tMhIWbb3IcOMs8/PL1Gqn\n2LevwKFDgre//RpuuunGl51D3/f53Oe+hO/PsLCQiMNtu8fnPncPjjPkyJGXxswbhkUQaPR6vVck\nNj9NmKbJRz/xCTY2Nmg0GmQyGfbt24c+vkp89KN38kd/9J9oNCTp9FvQ9Q6WpREEXY4dO87sbIZ/\n8S9+hz/90/+Dzzz+DdpOFzPOEoklbFlEEiOp440nZCQGkKZNnZFSZBAPGDFBhI+ITWJUgvG0m0aB\nCYqASwXJCFhDx0IhpEWfMjHvQFUFbrSNxWX24qOiUSSHxYgBPn06RETY+EiK41dqwliRJhmiM0KX\nEdueR0OaVLUqKXXIzOQevM6I9qiFqkpmZ2/BUFWGwyHdbpb77nue++//X9m79xqOHj1CFF3k2LFJ\nPvaxD/+8GvJDsLUFX/sa/Of/fKVX8sNx443wjW9c6VX8ZPiZIiMvQEoJiJcx+ORrMX7+R+P06TN8\n+ct/x/p6nZMn1zGcJlelDezmEN122eldQMYjlkxBVgZ4AvZESY1ClRIHsIlIIdBVlx1K2K0uui6R\nhQKtrk8lvcRObw04T5Y9TJJGMsEAHYUJHFbGeTQWCQmxSS5yOpAmIqCES8zz6GgYRNRIkQhbMySk\nI0XiKaIBD5MYpU0jxBaGoeM4Gq6rEIbnMYwsntfk29/+c0qlCY4fP0qrtY4QNt3uLt2uh6r66LqP\nbffJ5SJuuOFqpGzQaDS46qp5zpw5Tak0zfLykRfP5dra48zNzfxEe/rTwvz8PEI8wFVX38bzj/4t\n5U4NA8HTl57HLRxgas9xsrvPoesZnry8jikrKCJLBsFQqolLrvQIwzNABkVvE9BEo4khfRrkkb6K\nqoyQZInjFpI0BiME/tgSPk3ALrCLj8THxKKMzgiVLVQkOgKDAVNIJGryXsyRYZsNtrFI0yUmi44q\nMiiKTlXPkY1adII6Ayx85omZIqYBSBoYbHg9jkwu0F/1sEWKHQQuB+g3BzQaj5PJlNjYaPK+9xW4\n+ea3veJd8MrKCt2uxuLi95N9M5kChjHLxsZ3XmIPDxDHEeCRTqdf591NIIRgYWGBhYWX3ozUajVW\nVjapVkusra1imk1mZuaRMqRSsXjXu25hZ+dhLly4wFf/+htkpIpROsigFdDz1LHfbQ6fxPRLxCkQ\nMZ7IEzCPZJk1dtAJMDAYcQaP5DMnRYlJRugkLj8tAlIEGPTIkyaFyiQ2O/wtw+g6OjRYYBMNFx+D\nOgKBho5KFo0JDDo06LOBS56YuXHGc2KXGNDHw+UxX2WERbP5JH7H59pMgT2zM6yu1jBTBXL5aZrN\nVVZWnmd29ij9vodhzDMaTXDPPV/HdUd8/vNt7r33Af7gD36fI0eOvNIp/0eNP/kT+I3fgDfz0NmN\nN8K/+3dXehU/GX4myUihUGBmpkC7vUup9H1DnmZzi337pn/oh2Icx5w6dYqH7r2Xb33zEeYP3MaR\nI3eyduHPEbttLl92MIwCUTBiWityzu7iqJKyZbIVhGQRxHFMW0o6oowuFhjFKkHsowpBU8mTM7uk\n81XmRRFCmyljgrN+RIoy4Vj+aiDGnp4TGPRwaZBUQwKEmEfKPLCFy14alMmwjUvMCB+fm0iIywSJ\nO2uLRDOSIskeXEeICEVRCcPsOKNmFSigKPMIsYiuR9j2JufPP4Nh5JidvY1ebwfbjnCcNq67yvLy\nLO9730dIpXS++93H+cxntpib20MQbLO+rlGtLhHHEbXaRfbvL7C8vPx6bvmrRqlU4s47r+O++55h\n7vg7aDc3efbMIwyq+1hYfBeKUNAFTGYr5MQ52t6IdGqZKO5DvINgLwBhOCAMLCAkEA3y2Rxaegln\nNIMiDVKpZWx7k2QEu4tFk0l8wKSNjsEMgoAhJtDDJcJEI880IRE2DQQWfXxUGqQYopHCRMEgTZsC\nHlVS1JGykIhRoxUUcuRJs/vi3FabpIqmEitltvSY//bEc8iwiiemcMQyvhOhKCaKYlMszjM19S7+\n7M++ydLSPO94xztedg57vT5CvPxvKJudYG6uytbWaRYXr0VVNeI4ZmPjDG996/4rkur8Amzb5rOf\n/SKwyHve89t0u/836+sKZ848M9Y7CU6depZcrs+//Jf/GxdP2QhZxg8VRpFNSqkiZEg4HucO4gGG\naaMbcwSBA8EARVyFHxv41PEpEDNJUpEso+ASo9DAIWYRgY6gjskImzpFCqTxMRmwHl/c4AAAIABJ\nREFUwvdwMIgxcSiNc4kUQMcmYhNnnEFTxKOMRoSkiakO8SIdD0Goqqip2xgOL6KgISODYeSyPTpH\n5G8xM5MjjiNsu0Zt2GZ++QaKRYPBoEIURVy8eJJOJ2R+/mYsC5588jx/+Id/zKc//T+xd+/eK7aP\nbzbYNvz5n8Ojj17plfxoHDwI/X5SxZmdvdKreW24YmRECPGLwKeBppTy5Z+IPwYf/OB7+Oxnv8TG\nRodUqoDjdEil+rzvfR/5od/zjXvu4fLDDxPsNjig5PBXn+OZxgbFlIlZSHGp2UKGCpPZMmlziXoc\n8LyzieHr7J9c4HJnm5Hnc4EMM8oc3VjgY+LJadS4hSrXyKYPstPyuGZ+is0L9zMXDkljkQLyBIRo\nOOhjSqKTYhKJJGCHGA0pOyQmZknmTMAsXZZJ9CA2iftqQFI9WSRpy/RI3DgT51YpV5BSJZNR0PVJ\nhsMeun4NQTCgWLwKEKRSBwjDRzCMCCE2qVangBVarRZzc8e4+uq96Lrgy1/+b2SzVZaW7sTzRvj+\ngKmpNsNhF01Tee97j3PTTTe8obk0Pw7vfvc7WVpa4MSJU9RqU6j6XsJTPisrJ1HVacqBTdkqUVZV\ndNUhnXXQ1R5dr0zohwhZQoZrWKKAySFipYtGD9+NcEcBmpEnihpEkQOUkTxLRA9nHHxnMEWGIi1G\nqKhozKCyRpkCMR36hCjsHxtqQZceIetUkKgUCRkhKZBGMsCizgaSNJIKCkN8PFymUZQ8cTyNrk8D\nXVKpKRRli5Y9wDBuRdOmUYMARRkCKaRcIZ/PkckUSKUO8td/fd8rkpFKZRIpT77s8cGgyV13vQPP\nC3jkkYeBNHE84tpr9/JLv3TX67yrPxqnTp3GtrMsLiYVOtNMo+su2ewBJibK5HIFnnzyW0i5iT1c\nJq9ohCgMbElEGssIMAKPQG4jZYOYEaqWJ5PJ0+uNgIgw7GBxkRQBaaaAmB4RQ5Tx37WPZAadKj49\ndDQEJQIUbHqMMAjQxhNQFfp0KFFFwSFpw0zioGNTQqoGQnSSyR2xScbIMpsLWGs3GUbTqPoUrn2K\nAlnK6hSx72KjUEPB7LeYnc1RKldx1QKGprK0dJxebx3fH2AYGYZDl1TqGoQwUZSAiYlF+v0RX/zi\n3/Jv/s3vXcGdfHPhL/8Sbr0V9u270iv50VAUuO02+Pa34dd//Uqv5rXhSlZGHiERObwmY92ZmRl+\n//d/k+eeO8XOTpPZ2f0cP37sh9pV12o1Lj76KDcvLfHoTotipkgqlUW2d6iPBsR+QKgUQNOTdobf\nw8CjQY6zQcx6c5dBHKKrEIbTOAiisRH0hAjwRZpQmphaBl0r0HQCdCumgpE0X9yQLDohw7E9/IAI\nm5AJYpZRySFwx2r704CLYZiE4WXieImEcCQW4QnpiMY/WZpE8JoBNki0JA5x3CKTqWIYS/R6bXTd\nRUoXx9nBMEqYZgHPMygUFllaKnPw4HFU9Va2t1ucOXOe558/wUMPfZU43kc+X+Hv/u5hrr/+MFdd\ndTP1+mP8wR/cjWVZvFkxNzfHU089y7lza3zvexdpt/OUSgeQssVad0g4eoS85ZBVdPxwiJuZoVKc\nZ3e3jvTbWELFIoumGujaNH1vHY9ZFEUjjof4vkOyBxl0QibJowM+GhJvPK5dJk1nbOwdoSGxGaJQ\nJI9JiI+PQGeKITZVhvRoEmJikTTqBgS0qAKT6KiE2MS4SGrIeBIhPBSliaIopNOzjEZtVNVAiAGF\nwn46nSZCqCQxUqMXp6BMM0+zufWK5255eZnFxTTr62eYmdmPoqg0GpuYZpu3vvX9FAoFbrvtFjqd\nDvl8nkKh8MZs6o9ArdbGspJ1DIdDdL3K4mLA5uZZGg0d05xhdrbCyZO7HNh/lJ32ExT0LI1hnTgs\n4sfbpIx5jIzLREmn2+0yNWXylre8g29963tsbY2AS0zTJERnxCoWM2TJMmIHjymGxJjkiImI8ZDE\n+GgITOoI8hzGeDGRaoIePWqMKBOjkMLFZQuTkEksPQXYRNE2btTGd8GLNAK1wuz81RhGh96Kz5y1\nlyCI8XybeS3FelRmI9oh3L1MxQK1YDG9dJhOZz0hU1adINiDlALXXWE0mkBRBLOzB4iiHBcvrr+h\n/jFvZkiZ5ND8x/94pVfy6vDOd8KDD/6cjPyDIaXsAj/RL32hUODWW9/+qo7d3t6mKASKolCpTNBo\nNkmlspTTec4Nu6w0L4Pr0I3T1NwNrMgHYZERe8GIcVICN44oVktsXGhixGAoKuk4hyosfCWgg4If\nKQRBk9YgJh+q9HWFchSxxSYXySOZGHtUuJiUCTAwxSxSQkTiGxGyF8NYp1o9ys7OCeJ4jcTTNUkK\nTizeAxKCkugFkhHfZAYjMTzLoGkzxHGDOK7heUXCsEwUtYmiTXz/MPm8jqaBpumUy4llvqal2Npa\n4cKFbTStxKFDd2CaJkHg8dhjZ7j99huI4xStVovZN3E98Nvf/i4nTjQZDtPMzt6Bql6k1xtRqVzF\njbe8hZWL91GpDOjuDpDmASbUWba3t5BygBAtFG0ZU1NI6QI/8BAxqGpELpXDc0eosSDGY0AXAx2b\nJRx8HEZoCCQFJB5ZFAR9Qpp0aSCRVEgjhUCXKRqExAg0pcj52MNCISJmhDNOBfZIsYyLhouJJioo\nMkQRbRTNIgz7QBZNKxFFQ9JpDcPIEcc7OM7z40TWXWCApikvkpHhcJN3v/vAK547RVH4p//0n3D/\n/Q/x+OMPE0WSw4cXec97fu1F4pHNZl8SonilsWfPJI8/fgpIBLiKYrJnzwF0HY4cmWL//mt56KGH\nEEKlVK5SL+SIhz5lQ2fT38YPN9GMEUuL17NnzyE6naeZmdG4fPkShrGXfF5n2P0OQ0xUFlEQ2FzC\nYIRFzIgUNuDSQcNDYNPEpISBQw+VCWx2ccbJNxIXyQTbZOgwQKGKi0fENOCjaSVMcxFd36DdXiSM\n6rjCxTRNhEjB6CJlw0QKiAnJGRpZTSPtWeyKKlgTnN/UyXsh1epZFCXF4cNv5eqrF/nCF/6Cfl8j\nlTpGtzvCstoEQYUoCqlUJn5ORMZ44gkYjeBd77rSK3l1eNe74E//9Eqv4rXjZ1Iz8lpgGAbB+P/z\nC3OsXNqg12swiiOcbgNVS9FWMhj+BH7sscMuaXwimuSVJUxjAi0K2K13sdQBWVJkNVjzBgip4cgG\nKSuNH0f48SqDnsFccZrTwxqOm5SCfTJEpIiIkWTHVZEm4CfTLdGQOAZDnwIi+v1phLgWIYZIGY7/\n7ZK0awQJ8dgF9gL7SYjKeWCEEDsIUcayWkxNHaDRSBPHJqlUBcMI6XQe4xd+4Tbq9S0MI9HTtNtt\nHnjgezSb50inj9Nutzh79nkOHTqAZVmoaoG1tQ0KBe9HTis1Go0X75qvRMhWFEU8/PCzzM7ewOnT\nf0Mudz2ZzCS12nm2th4ln59lbj7LRz76fk6fXuWxx9Z4/vnvEQQKqtrFkDNEcYq+r2KoklB2qKQr\n9BQXogbFUEMVGWxpY9Pk/2PvzaPtrM4zz9/+xjPPd56v5gmhWYySTGMM2GATz4HYjhM7jiupVUmq\nq1fVqiyvrlq1vKp7pd1d1b0Sm7KrHMfGxlUQA0kIxgxGQgg0IwnpDrrzeOb5G3f/ca4Vy0AFbEAI\n5/nrnO/c79x99j5n73e/+32eR6dnRR01jUDgkMUgj0cXVUoEmKcbF0MxyPlNXBw8aVJDUCOJRw1V\nmEg6gAI6q2nJgBtIEiuE8n4EAXRiGKaO57chlDk0LY2i1EkmE6RSUYLBELlciHR6LbWaRy43jmHk\n8DyLrq5VSCmZnj5JODzHJz/5+6/bh8FgkDvvvI3bb78V3/fRtHf3VLFp00aeeupFpqdHCIVS+H6T\nUmmacLjJ8PBmVFVDVSWRCHhek4ENO5gbO0VANAg0cjhqmUz3TiKRNIXCCdatM9B1nx//+CC23YOm\nGfiik4ZMEkclRAyFNBYXV/x2HVQELlN4bKJV06VTJIeggo6BTTs2EXw0WlPvCFCggUYrsxlDAJIi\njUYZKes0GjaKkiCZ3IXjvEIsZmLbs3hWnX4dFuuzqL5JKhxAAgVPQ9MhE1qHjkLHwHamp5+is9MG\nZpmfz5PJdOP7GqpqEIl0EAxuYHz8KB0dko985J9fqSF81+Eb34DPf751BHI1YMsWWF6G+Xn4OTu2\nqwZv+wwjhOgAHviFywtSyk+93f/757Fq1SqeDAYpVqskIhFuumk3Z86e55EXjqK6Lh19OwiGfBbn\nS2i+JOKaZJlDRWHUWmQooFGr1Cm4FXQWyVIj62XwUGkwhyZtmo5Go7CAL21cR+HYUh1EO6pox1MW\ncfw2ILyiwyjx0PAwacpRPCeCAGzpgF0BFnCcxIrvTBpFOY2UMRSlihBRPG+RVibEo3U0U6VVNxJH\nCI1IpExPT4J8XmXPno8wMXGBqakxwMY0TcLhOKGQw6c/fROapnHu3E954YVThEIZduzYxvy8STTa\nxcWLU8zORlm1aghdN5mdHWXXrk2kUqlX9bFt2zz00KOcOjWNokTx/Spr17bz8Y/f/Yao1m8VHMfB\ntj103URVNfL5RWKxNnp7t6JpAk2rks/rHDtWx/MMarUZwmGTQGAL+fw8zfIMuD6uGyJbyxMiS8VP\nY8RsFHsWiKAoETx/CkmAOsFLgS64SMI0OQ7kqaPjouMSpB2DGDEmaGIRAlwURUXIJp6XpVXE3ItF\nBoEOOCtCWSo6FXQEnmxQtRq4QiUcKaBpCtVqmXx+Ed+3aWtzSQZd7Oo5EskBursHgV6Wly8QCFTJ\n5R7h+us38ru/+6f09va+Ru9dDkVR3lX1QK+Fubk5pqenwckzevIn5PMWhVqTUDzDLbd8FE3TyeXm\nicfrbNu2hlzuDJWKxnJxhmJpASXSZNf2W5mfn2BhYRTTVDl0KAiEqVY1pJRI6WOYvdjNClUkQaor\nYxTGZQoNi6C6GVetULdHaOU5JS4W/koeVKxkPUBZMRAwaW0mkrQynJ20RPZ8PM+hVluidQw7j+No\nmKZPPJ7BNOvkK0WqjQUyepGcFaVUj1FVJBZNhjPrCIcS5BsVCoWLFIuSWi1EV1cPlUqe1auvQVWX\nOH9+kmx2ClUVhEIVfvd3b2Pnzh2v0cO/fqhU4Ic/hLNnr3RL3jh+vm7kk5+80q1583jbgxEp5SJw\n4Je59ytf+cqlx/v372f/r5AvCwQC3H3fffz1X/4lwXweHbA6Mnzgs7/J8ScOEQ9tZNSZwdCTzExf\nRHGjKHTQrqrYUmWsOIIiLSLU6dd8kl4OT+bIoVMRUJcqBbkaxbsG2zmBqg/QtJqE9fVIfxnbX0bS\nuyJrFKJFDF4GHFzquNKkVRNi0HLdjaxQlKsI0YUQEQKB7biujmE0qNdn8P0BWkFIlFbBawpNEwSD\nMZLJXrq7JeVynXp9mp0713DbbbsolUo4jkuxOMYnPnEzBw4cQFEUJicnsW2X1avfx/LyDLOzx0mn\nN2NZDaanDxOP16hUptm/f4B77rnzVf3reR5PPvk0J08WGRi44VKqd3T0HI899gQf/ehdv/TYvVmY\npkk6HeYnP/kJS0sN5uePYprDxOMhyuVzdHRcw6pVQySTvZw8WWBhwSAe1+jrG0aIdtzYAPn5F8Ad\nQxENJAIUm7DSTgiVkObgyzpRFPIIPFIIOvFRYcVxqJW5SuGtuIm4GEz75wmKCo6MrIwXGP4UGnVU\nBqlSwCNJixWloBHAJYRkApcC5grt1GUJoQgqFUkiFKIt5KEpRYqLk/Q7bdy4bSeT0xNcHDtCavsu\ntm7fzu23f/o1NUWuZti2zQ9+8NecPTvPmRNjOIUZupJw1/uuJR4O85PzF3CcUebmxhke7uXeez/P\nxMQUf/Z//L9MvPI8oapCuxnDDEeYOH+crsEbWVqymJ+fQ8oynjeN55lI2YllGahqHPQqVecEGg7g\n0aCJQoj2cJ6CexREGkERSQ2V1Aoh38IliCCIJAg0UCijIJAkcelFoYnCMi6S1iYjRivbGadVO1Kk\n2aySzQaJBFW6QlFWr9+AurBAZ73CUnkRKQQ97dvoTA6Qr5dRI2GWl5cJBjdjmlVUNU4k0suzz57D\nNLvZuPEDNBo1ms08vj+ClK+WS/h1xfe/3zr2uNoyDO97H/z4x/8UjLwpCCF2AF8FNgsh/h74kJTS\n+vm/+flg5K3AwMAAX/yX/5KJiQkcx6G3t5dcLsdzf/ssEV8ifVA0STCapmy3CJpJXUf3BaYdwVgJ\nHlajEtFNZm2LBAq+hAKCiFfGUidRlF6QElXpo+lr+F51JQVfpCVu1qKAtiacOVrHLh6tiaew8jfB\nldcEUtbxfbDtRXzfxfdnUJQBpMwgJahqCt8PoCgFDCNAPN6BaWZZsybKhz70BY4eLTE9XWRqqogQ\nClCjtxeuv/76S7veWCy2ch4tyGS6yWReJpsdo6NjLbrus3p1jFRqNf/qX/2zyyicc3NzPPHEs5w7\nN8GRIyfYtOl2fN+/JKDU07OWEycOcvvt74yJHrTqkExTMD19jra2XQQCKWZnz3Px4jyhkEZvbwKr\nVuaHf/UQeCHcSpKp/BkajWcJBDYTTwxRK1cImxF6UwU2drQxurzAcjNGNr/IOjVA1SkiBAjp4hNB\n4tGq42l5p7ayHBHEikG8TQ3JMHV5FlgNqMSYR6OOg0KMNlyK1PBoUXZDSCSGSGHJESQzVFFW/ocD\nXhRI4boW8USIQnWJQSWCWmwgbY8NQxsZtAaZrM/xhS98nO7ud4cOzFuJZ555jnPnqoTDa8kvHMa3\nTRZzgvPTL/K+7X3sGuxFX9/HPZ/6hySspmm0KxbxrnU0G2ESiTSNRhVrfoLlpVmWlwWNRgPT3Eiz\n+TLQhu9b+L6NlB7ST+KTosR5VPrxCKEiafg2GzffQLmyzPx8mUoljUcXHnV8NBTqQAEfFxUBlFBR\nCGJQx8cggUUMhRoCC5+tyBWfmtZckF0pno5QLR7nxq0x/uBT93BidJQz58/j5/P45TKerjFZyRHr\nGMIIqoiajufVCIUCqCosLGQplUrE421I6RMOxwgGg+TzLzM2tkC5XCYWi12J4XxX4f774d/+2yvd\nijePO+6Ar361VXx7tcWVV7KA9SjwjvMBTdNk3bp1l57HYjE27NzMU397nOVclXpTo2Y1qYk6UeFh\nyzBNmljoOAh68NB8SdETNEgSJUoQSR0dWwmDO4MjtmG7s/h+BolDK8BI0doNV2kVoras9Fqp2Sgt\nRswWWpPPBK2ApERrUTuFlC5SltF1HzBRlE5UVcNxgijKDJrWi5RZEgkP1z1FR4fE8yzm5pY4cuQJ\nTPNaEokBXLdBvV7Eslo1Ij8rQk0mk/T1JVlenqGtrZe9e9/Hyy8f4dCh76KqLqq6k7vu+shlAcXy\n8jJf//qDGMYQPT03outLjI4WaTZPsmvXdoDWMYTQaTab71gwUq/XmZ+vcuedH2Zk5BxQZdu2VYRC\n6zlz5hUyqRjPHDlKR3QATTXw6x52HRampxHGLL29B3CsLNFgCSEUzswtYYQkMc1n2VTIuVkCJhRs\nHzwVGKM1vm20siJZWhTsJK2jljStbMckrfGOoZInShyXKBYNbIqo6LQC0E4kApcanizT+s600aoR\nMoCW2JeKiutVmMhWUd0mw3oHll3m4HPPEY9mUFQNL1RkYmLiPReM+L7PoUOn6O7ezZEjz1IsqfSm\nNxELC8q1GS7OCzx/lsFfUKqanJiAfJnurkFmZ6soikKjUSepJ5nNz1GtVlCUDWhaB6o6i+vGUdUE\nvj+LImoI1cV1bRQ60EQnChbQTb4xR/X046hqBNuOo6rr8T2BpAwM4/MSPnkUkhiksakC82h0oFOh\nCfgkViTUVFR83EuMuQZCKEi5hGUtY/jLxBI9HDpzFtfX2LB2I3f3dfHS3Bwj2SJP/2QUZ8nE9so4\nXoiu7n40rcnIyCzZrIEQHvX6ONPTLqlUHEUp0t7eh5QatVrt1z4YOX26pdfxgQ9c6Za8eaxeDeEw\nnDzZMtC7mvDurkp7B6AoCvd9/rM88fT/imX0UG+qWAoo2iKqWEDTVYRiYNsNhnEwEei+YAmDMCFU\nBI4ApILityMoIJUcQoaAJv/AdonQynyM0lpw2mhphCRp7YQNWtmSBi2dkQo/o45CH6oaADIEgzWa\nzWl0PYCux/E8cN0sQpzFdYtYlk0mE+aGG36P7u4hxsdHsO0EfX0umrZAMBhkcPBmXNfmpZdOXsaI\n+Y3fuINvfetBJieXcRzBK68cp7t7I/v23QJ4/PCHz1OrNdi3r6VNcfDgEYToJpPpQUpJIhHFdaPM\nzuZYu7ZEPB6n0agSCkHiHZQvbDabgEEm00Mm8w+fz3FsLlw4zbnTJ9CVMJpqsLi4RKlSIGqkUZ0Q\nBWucycmHCBhNNKWHoLqOUEgwUythu0Xibd1MLc/hNDU8GcS7pKDboDVmBq3xG0dhM1LMowlwfWgx\nngSQX7FN81tHQBg45FdsEF0kZ/mZA5JE0vqeWLTsAtKoajueV8IniOM7qLIT189StRoEiBJQU6iy\nQcxI8Up2iscf/zHXX82mFa8B3/dxHA9V1Zmfn0c3e1ayfoBQSEY6OD91jI23XG6aqRsGvoBoJIIQ\nOWzbolqrMVZYJOebeF4vQkRpNM7S+t3m+BmVPh3toVK7iEeupZGqmQTUDjxFxW2AbefRtDBS9iJE\nEFWzcd1WXUnryGUEQQmfLArTBFBXvGciyJX6E48cYKFcyp4KoIaUBTxPRdOGCATbOTrW5IXRBdb3\n9ZKM6Rw+e5yGkuMju3Zy82+28dcHT7GQrzGTc4jH+zHNAKbZSzqtsrg4QTLZg+u6wDLr119HrXaR\nUIjXrAX7dcM3vgGf+xxcrer4d9zRkq//p2DkKsSZMxe49f2fAIKMjl7k6NHT5LNpDF8Sy5hML5ZJ\nqTninmAWnTgKkgASQRXJrPQpE8NEBTRcfwrd2IvCFI4TRMpVtBYTjVYQMk9rYWqJyrcCFXXlsbFy\nfZlW8NIEViPlGUzTwLICuG4D359jeLif4eGtTE5OMTExQjCokEh4tLXtYnq6QF/fGgwjQjK5nUpl\njg98YB+G0dIGKZWyFArFy/ohk8nwh3/424yOjvLYY3/Pxo272Lx596Ujl1gszRNPPM/27dcSjUa5\neHGORKJFDxVCsHnztRw69AKNRoBiMY/nNSiVRvnkJw+8o74X8XiccFhcskf/GZrNGjt3ruHIcyex\nnDilskaxMo+q5OhNriFvW4QDOpFIgMXFV0DJkE6FUE2Xpdkl8hUBahlN60MNr6ZeyQIXEKKElG20\npPhtYAaBis80SIkUErhISzHXADwcahTRVn6ANSyaqGxBYRkPB9iMouhIWUaIbnz/JDCAEAFai5uN\nxEBKE0VEcUSEvCzTQ4iAGcX1qtTsLFo8zUvPv4xlWZim+Y6NwdsNTdMYHu5mdnYGTdMIJdIUyxVC\nAQNddZHSp+yrbPqFGXn16tUEejooZrP09XVw/Pg5srUaeTeIavZjKCZStuN5KXz/RXxfpZXRqlCu\nX8Bzp4AaLlFM6WK7HpbXoEWvjqIoHkJIXLeMlB6qauD7daQMAR1IIigsEcUjgsoiy0i6MXFwxATI\nBh4+rSCoDVhAoYCPgiKixGJJIhGd2ZxLf9tq5vJlUrEQRbedanWJ4a4uAobBNatWUarVePLoCY4t\njlEqdZBIRNC0JTo6PFw3SiLRjW3PUi4vIuUUH/7w599T35FfBs0mfPe78NJLV7olvzzuuAP+/b+H\nf/2vr3RL3hzec8GIlJIzZ87w4jPPUMrn6R0e5rr9+/+nmhiLi3lisU4CgSiNRpNm02V2dpaZiUUa\n6gzRYIG14TjZgo5oapwlR4AGEKKMQY0OfDooUQTKeATBG8EMuEAU36/gupJWRiRNq2bAolWomqO1\ns7ZXnhu00v4/Y8moKIqNlAHC4b34fgPfB98/y9xcEM8rUq3m6eszSaW2YFkL9PdvJ5+fYXZ2llgs\nhqJMImWQarV4ST6/VFrkuuteLStomiabNm3isceeZf36LZcFEZqmAzHm5uZYt24dmUyc6enypQW/\no6Ofm2/WOXToEWxbob19DR/72AdZ9Q7LF6qqyu2338QDDzxFIrGGaDRFuZyjVBrhi1+8l7VD7Txw\n/4PkCwvEDJNweAhF6NT9Zfr71qKqOqrqsmP7zWTnxrkwOodU16LoQRTlIprWoo6GokksqxshyjQa\nZxFiAUUJIKVOILCZZuMMviwhWaQVYPbT+skFkQSoYRBARXIBhQIWuZVMSwIhsvi+gWkG0bQmltWL\nlD5SNvH9wkpQAr60cP0sghCLuEiRw2s2sPwFmgQZ6L2eaqVIvV6/tNDkcjnOnTlDo1ZjYNUqVq1a\ndVWapN1228184xs/RFFc4kmTvGczm5ugrytMXlPZvGvjq7xW4vE49/7hP+M//+//AWtuiqZqs6DV\nUeOr6Ex0UCpVqFaLqKqK66YIBMBxphGihOuG0NUehC9wSNLwymjSxSWGSw3hNfH9GlIaSLkVaNVr\ntI5ga4CDj41gnhQ6DhYQxmaZsGqRMHwadoyaFwVxAUM9hS8dPC9F6xiwBlRwnArxtp2UfQfqDi9O\nz2O70Cy5PHf6NAe2bUNVFBKRCB+6fg+cP8/4UpNMRqW7ez+OcwPPP/9TpqePUC5fZNOm9Xz5y7/D\nzTe/aSHs9xweegh27IDBwSvdkl8e+/bBxz4G+TxcTYmu91wwcui55zj+2GOszWTYkE6zODHBg3/+\n53z0i198XSpjX18Hhw4tMTFxnmzWIxrtJJWKsrAwRlVRsM0gy45N+8B12EtzSCfF+eoELjGiDBAi\njIOPr1goRoq43kXvwA48bx5F6WRx0SKXO7Oyex6m5SfSBPpo7ZZfAlYhhIeULwMmQrSj6xlct4SU\nM2jaOur1OlI2SaXWousBHOci1WoTzzNob1/N4GA7S0sKrtsgGEwwN7fMddeaJGXeAAAgAElEQVT1\n09kZ5sKFs/j+dhzHZnFxglCowJYtm1+3H0MhE8tqXmaIBiClc2lRu+GGnXzjG48QiSQIBFpeJo5j\n8f737+bLX/7cFaWEbt16DeFwiKeffoGFhRG6u9v5xCfuZmhoiFQqxdLUFD/5m0PkbY1yM0+VCqG2\nFN3dm8nlziOERW9fL45bZJU2SDZrIefKFIuLWJaJEGl0XcX324nFutG0boTIoWmDNBrzeN4JdMNF\n0/pxnDlcN4ZgGN+3kCtWAB4nqBMmTDeCBmlFw/FFS7NEJPHwUBSHcDiKokhcdxLXzeC659G0fjQt\njuuWUdUcmjaE1aixLF0WmnOEzXYGonuYWXBoqPOMjo6ya9cuXj59mh9///u0CUFA1/nJT3/K0bVr\n+Y1PfxrDeHc4L79R9PT08Pu//ykefvgxHnzwGfoH1/H+D3wIwxBUKhf5yEduec3PtGfvXlZ9+5v8\n2Z/935w4Ps9Q3SGVuhFdD5PNzvHKK2eoVn0MQyEabVKptGwYfHcQ6fogZhCyiSvj+EzhUkAwT0Am\ncaW5wogZoXVc5wM1VGUZX6aRcomWlmsKIRyk2oWuJkh3Behpb2d0/AhWqUAyrLFpaC/pWJKnT87R\naDZw/Sq6HqVcLlCrzazUj0k8OjGMCPnyJN/78cs0mj637b4WQ9fJlkps3bmTNZ7GzIx56djywx/u\nZ2zsBJs23cZnP/ub79iYvdtx//3wxS9e6Vb8aggEWqyaRx6Bz3zmSrfmjUO8EYfbKwEhhHyzbWs0\nGvzFV7/KnvZ2jBVLcYC5bJZqVxef+tznXvO+XC7Hn/7p/8WZMwo9PVupVMqcPfsi0aikrS1DubzA\n0oVnMPwQvtuF1bQoeItIwvjYqKhIbISaJNLWSU9vG4GATT5fJZutEI+vZnz8xZXz6DSquoyuaziO\njuu2UvdCpJGy5UcDJTStF00L4vslbHsCTbsFIVRCIZXu7gyxmEc4vERnZ5zjx8/wkY98kv7+1YyP\nv8zJk5OYZjft7YJdu7YxMzNCo3GWdLqD2dlpGg2LRKKdWCzEjTdey8033/AqUatjx47zgx+8wODg\njktBRaGwhKJM8Ed/9IVLO+ljx47z2GM/xbZ1fN9maCjNRz/6wV+qRkSIN+a4/FagUCjw4AMP8K2/\n+C65kk5X/266u7fhOA0qlWPAPKrazsWLUyjKahxHwbbrOM4clhVBiEESiTBzc4voeh0pZ+jpuYZK\nZYR6fZ5AoAMpA6TTaS6OHcG2u9G1IHWrREsLRgKzBM0A7Z6GI2fxKVF127CI4NGLaggCgTiOo+B5\nz2EYcYSIIEQd217E8xSGhzeTzxdRlH7y+Wk8p4iphQhoaYLCoCmnWT0YZ2BI4w//t3/OS08+yY50\nmtDPSfkfv3iRaz78YXbv2fOO9P0v4q0Y9+npaZ5++jCTk3NkMkn27dvFhg0b/qf3HDnyIj/60Rkc\nB06fniOVWg/A4uIEk5OnKJXOE1AF1WqZhqUhlC2g+PiewLHzeNIDljEooJPAI7RSkBqhzCweJTQR\nRUqBREGINlRlGUEGxz+HlCXMQJhYbDu7du1jcnIUKRvksufIBHtJxXqYWR6haUUpN07h001b+z4s\na4JSKYvrSqLRMKtX78Z1a+Syh5BND12b4drV7Qx2Jol2JvjcH/8x0WiUb33rB2SzAlWN4PsVurp0\nPvOZj72uhcbbjXfy9/5GMD4Oe/bAzAxc7adVf/VX8L3vwaOPXumWXI6VMX9Nns97KjOSzWYJ+v5l\ngQhAVzrNU2Njr+u5kE6n2bFjPRMTR8jlDjI3t0Am08Xg4E7q9SzJpMbS0jCz8xYhYWJ5ORQcFEI0\naBAUAoSBoQo8v0Rn55qVVG8Kx5llcvIYvu+iaR5CVPB9g2YzgK67aFoAz7NQlCCeBy3mRCdSWkSj\nJprWw9JSE0V5BUVpJ53uIRx26OjoxDRddu++henpaVS1RcsdGtpIuVzk5Mln6O29lsnJ5+nri/Cp\nT/0x2WyW++9/hMHBzUSjSWy7yRNPnKVarXHXXXdc1ifXXruV6ek5jhw5RKt2xSYadbj33nsuS+lv\n376NzZs3kc1mV/Q90m/5uL4dSCaTfOFLX2LD5mv41rf+mpGRItPTz6CqZa69thcpNzA/HyAUsiiX\nfWzbx7ZnSSY34vseudw5fH8IXV8iGGwg5TKuO0EyGcU0e9E0iapCOrWahekLaEoGRdRp2jGkbEOg\n4Mtxms0sFS2JL5qElQCmp+LLLIg6vh+nVssBFXy/gpRJFMWjo2OQUKiP5eXzSAmx2BCuu0QwWKUu\nPVwsqvY4IgiZ5CAbB9fhe+M8/N/+G0OJBKFfOLIcamvj7EsvXbFg5K1AX18f993X96bu2bRpI088\ncRjPS+E485w9O08wmEZVizSbowTVDF3JAcbqr+BLF98JIAUEApJIdAfl8jmQM3QaQSr1RWpSwaKB\nR5ggKmpgPY7v0bBnUWmgqgbJ0FokJg1LRdefZ901Q1QqC8zMPE61XKWtLcHGm7czen6C6ewpFnI5\nJDV8PJLJTmx7EjDw/SkUJYKUHtXqKLXaBUrlOpYFmuoxUw7ix7rplQahUIhkMskf/EGrHqxQKJLJ\npBkeHn7XK+u+k/jmN+Hee6/+QATgQx+C3/99KBbhHeQO/Eq4qr+JlmUxPj6OZVl0dXURDAZp+v6r\ngo5as0koGn1dQZ+JiQkunHgRvTxNPJqkqAvS6R6WJg9RLU7Qv3aAvr5eHKdCITuF0GP4joFNHY0E\nQgaoyRplu4JZy5LNZkgkdgPzuG4MVZX4voqUEwixFUVx8H0Xz9MRooFhrMdxSrTqRgZQ1WE8bwzb\nFrS1DaPrLq47TiJh0NbWSTIZp9EYY/v2nZhmkI0bu6nXz3L27Ay6HiSVgj/5k4+xadN6IpEI3d3d\nCCH4/vd/RCKxjmi0xTAwjAADA1s5cuQg+/ffeBmlT1EU7r77Tq6/fpmFhQUCgQCDg4PovxDotd7H\nuGqpozfddANbt25hdHSUXC5Hf38/ExPTHDq0xObN61hamueJJx5DUQaYmytRKs0SCHTR0dGJYRRI\npUw8L86WLXsYHt7Ck08+SqNR4YYbduO6kuMvXSQRjrK0eAgTgyQ6VUpYMg7EkETw/HYUM4LUPCKe\npI0uKkaDgtWg7o0gUYAUnmfieS653AUMvRsNjaX5n9LWuQchogjR8otRRAK7cZ5IuEBHai25cp32\nhEJCUSgUi6/ZD/67aIf6TiEcDnPDDdfw1a/eT60WQFEqlMsjDA/HKRVMmkWDhdwUifAOGtZZ6m4O\nXetFOKPQOEhSeLhKHemaxI11hGyTktRQEQgxj+NVUIQBVAiow0ilStObxvNBiDqGqnL7rTtJJ5Oc\nePJJFmWr7iR7apRoJIOTCtFhDABhbLuKZamEwxLDUIhEVqFpJqXSOPl8mULBxvO6EcLBcRbJ14r8\nxs0HqNUKvPDCUe688zZ0Xf9Hs0W/rnBd+K//Ff7u7650S94axGJw4AD86EfwW791pVvzxnDVBiMz\nMzN8+9sPU6+3VEt9/1l27x4mNTTE2MwMq1cWR8/3OTc/z467737N95mfn+fh//Jf2JVMYMQWCIUT\n5M4dZHb2DBvb1iJEnXarwvjSLKYZpbN9E80KlIuLRJU0dW+WBsvorMcXgnAYJiaK1GqHcJwA0IHr\neivCZRZSTiKESUu8qoGud+C6TYQYRlWbSOmgaQqaFsL3a3jeFLXaJKaZw7Yl58+fJxZL0dvbx6FD\nz6JpNlu2tKMoKpXKItVqg5tu2s6dd37gVZXx09OL9PSsv+xaSwckcslL5hfR1tZGW1vbWzBi717E\nYjG2b99+6fnBg8eIRjMUi0WefPJ5JierNBrP0mzOoaoe4bBAyiDh8AChUILJyWe5eHGOjg6NLVti\nrFmTZOvWa2g0Ghx98TBW4TyD3gI6CSBBg1Eu4tLgAELkcMQcMSWDqSVo+McJmzF0ox1h51BI4xED\nNqEoLTuAZm2ExdlJksFBNEWhlJ1F0ZO0t+/F8xQK+SaGriFlhkptjqDhs74/RSJicr5QoGFZBH/u\nuzGRzbLpgx985zv+TaBWq3H48IucOHEeXdfYu/catm/f9ivt7JvNJs88c4Lbb/8dfN9DSkkut8Cj\nj/6AQiGF4TvkKyWioSLhYDuON4HmHqdP0Yngoioadddjwa8QMXWEEMQxqEmbhgyiewsoSpmAZhEw\nSqhmHyGzB03VaTqLVKtz/N3jz2MuL2I2LfLlPOuTbayOJzk+fo54cjXBVBe2p+O6PkL0YduL9Pam\nGRgY5Nix41QqBRqNFLq+a8WtOY+i7GB5eYpjxw6xe/fNTExMvoUj8d7E449Dby9sfv0yuqsOH/tY\n66jmaglG3t2mE68Dx3H4zncexjDW0t+/jf7+TfT3X8fhwzMMrluP3dPDoclJTkxPc2hmhqGbbnrd\nFPSLBw/Sp+skIxEGBjLksudJeyWitSUWyiOYyRjJcIJuzaJWydE7uAVVESiKjiJMNIIowsTUJeGA\n2jKrKpRwXZ1wOEMo1AEk8TwVKYv4fhJwEWIMRRFEIoP4fhFVHUFRLDStiBAnUBQNx5HMzp7C88qY\n5nqazQia5lKvw+KiIBDoQdfbeeaZMcLhDezY8RF27vwEFy44PPro46/6rO3tSarVy3fGLd+N2hU7\nN343orMzTaGwzAMP/HdGRxcIhXqJxVYhRA+BwBBSZqnXHebmFmk2x3jf+/axYcON6Hqdj370dkql\nMR588Ad897s/opDP0ql4ZMIRDNVEpUpcDdJJiHA4gGGGwYhSsissVSsUhcucX8fFo+nmkQTQ6UNV\nFUDDcQCC4PZRbxRIGlEUqZIvLJHNHsayRhDKBTy/gJRRsqWzrO1z2blumKphcOdv/iZHFxcZmZ1l\nanGRlyYmCKxaxbYd715Pkkajwf33f5ennppG09bjuoM89NAJHnzw4V+65sC2bZ566inGxuYplbIE\nAmGklBw/foqOjv1oRhvRyADpWA+6JuhvTxMNZehUTXoT/SSTCdau3UMq0k2aADl7AYcGPk0QKi4h\nhNKgqy1NJGxR9ywS4QE0VaNUL5ArLuCLdi6Oz6D5SbKeSaHu88JClb8eW+TlimQpV2Fu4hAbN6bo\n7c1Qrx/HtpcZHAxj2yWEWEZVG0AY1y1j2+MIUSceH0RVBzh//mWq1RLt7VcRpeIK4f77W6Z47yXc\ndRccPAhLS1e6JW8MV2VmZGpqikpFZ2DgH+oTFEUhk1nF6dOjfOlLv8PCwgL1ep1MJnPJ9vy1MDcx\nQe3CRU7kGwhhtoIG4RAM6Cz7CZwllfMLJ1nTFyDRsDEDFr4mEaqD55eRAixMDC2K0EykzKNpJori\n4/s2zWYTz3NpiRfFEELi+1Xi8VWoagEhDgI5dH0t4XAviuJRr1soSoV6fRzP0/C8JNVqB4qiUyzW\naGvz6e7uY9++vRw8eJRkcifj4xdoa+tFUVT6+jZy4sQhbr31cmnnAwf28J3vPIVpbsM0g/i+x8zM\nOTZv7r8kdjQ7O8tPf/oC09NLdHamuOmm3QxezTy3XwI7d17L1772h4yNFQmHN1IoNCiXTxAOdxGN\nDtFonGbjxg1YVp1QyGZ5eZlKReXUqWWeffbbRKNhenrizMycxq2PIbw6aBGkdPAJoAqdmG6QpYmi\n2/i+iWIGcGwVTQ/iiSK5ah6fPBpt6EYQV4RWmFU1BCF8USVkSLKVKsHIGoKmJBTSsO0KkUiNUGaA\nenmW7Wti3LRlNaeXlth2663sO3CAjZs2cfb0aRq1GjevWcOaNWveUIbB8zzGx8eZmZkjGo2wbt3a\ndySIPXXqNEtLGgMD/0DTDYe3cfr0YW64YZr+/v439X65XI5vfesHjI9XGR/3mJ8/im0/iud5FIs+\nnZ0RwmGJosVxypP4boRyrUTDmqNHCWJZdUwzzNLSIq7bxFQiKEaSqr6eemMZHxVFuITTBjfe8UFq\ntRyHnz/LYuE0lgWqouBTo+mGqOd8jlVLVJsQUXbguzV818MkQ9Uu0d6+mtnZLPv3X8/OnVs5ffrv\nSadzHD58lJ07r8OyXObmTDQtQLPpoutJTDOKbQt836XRmGLv3nve6iF5T2FqqmUu9+1vX+mWvLWI\nRuHuu+E734E/+qMr3Zp/HFfSm+YLwM/oLf+PlPJ7b/Rex3EQ4tW1C7pu0GhYCCHoeoMOR3PLeXLT\nJdb0tc5SVTXMmfIIdijFddtuwNRDKEKwUDxHT1eW3btX0ZZWGT1uU6nqTC9XMNQIiXgXNbeIakQo\nFl00bRTLKuM4JkLUV5gyVaTMIoTE8xps3LiP/n6VXO4sZ84IgsEOSqUi7e2baTSmgBlsO4Gu78L3\nfQwjiq4HKBaPU6nMYVkWjYZFPD5MNnt0pZ5gHsexqNVsqtXqZcHI5s2bueeeBo8//jy2rQI227ev\n5o47Wqr8Y2NjfPObjxAKDRGLbWZ6usBf/MXD3Hvv+9m0aeOrO+89Cs/zaDbBMDoRQuD7Dpq2GkXx\nkNLHdRWSySFUVeHkyf/O4OBeMpkeyuWXCAZXoSg+lpUnHu9AkSEC9TniQYOGPQpODcdzqYoqTfsl\notFNVKs27e1dVKtnseoqmrYalCKGHkF6NpY3RiB4E45jI3BQaBAzLUxNw/M7MNUoulqhv3+IoaH1\nzM4eYcuWYWq1Nq7b2U2sv5+911xzadFub2+n/ZZb3lSfWJbFd77zQ8bGKuh6EtdtYBjP8dnPfpiB\ngYG3Yxgu4fz5CWKxzsuuCSFQlCQzM7NvOhh5+OG/o9nsZNOmbiYmnmB2dpp6PYKmSYRIMz4+xbp1\nUTKZDnRziIvjJ7BUk1RbgMbSPGnVQNfbVkTNEkh1DqkZoGbQzQiue55gcIFrrtnD7t23cvbs8+zZ\n6zM/b9JsaiwsnGNpSeC5JhKVitWBItK46CgKaEodRS0TCPZgyTqRyBrOnDnF6tV93HXXPo4dG8f3\nVzE3Z6AoATwvSySykVCog0plhmZzEds+x+rVq/jUp26hr+/NFfb+uuHP/xzuu6+1eL/X8Nu/DV/6\nEvyLf/Hu96q5kpmRx6WUXxdCaMBh4A0HI61iyRKu66wIcbWQzc5w881r3nADHMehYmuUA1FqVp2w\nGaLZrNHUIiy4BruCMQy9JcldtcNcs7MTKacZXt2Da9V45fQxFH0cVUlRsS+S6ejGtlNo2hTBYBeq\n2kYul8f3s7T8Jdagqu3oehIhCkxNvcz27Zv52tfu5xvf+Et+9KMXaDYVPO8UqZSDEFFKpR6ECOB5\n5RWKrYeq9lEszhGNRonHw1QqWXQdnnzyEep1DSkNKpUzPP30EJ/4xOXsl927d7Ft27UUi0VCodAl\nvxgpJX/zN0+TSGwgHs8AYJpBQqEojz76FOvXr7sqhbF+GZw9e57e3k1MTIzh+8soioOi2AjRS72+\nSCxmYtslpATL8ojFumk2awjhEQzGCYejTEycpVyuk0jtZrE+D+UFTHMVimJT9aZphruIa0FqtZNA\nCMsSaJqOZ7RjGklMz8RzFTyh0XQnUNVzqKqGRwUhZoloXbiuTSScRALhqEs47FEozOI4knp9hPvu\nu4sDB/a9JX1y6NBhxsZsBgd3XbpWqRT43vce4U/+5PfeVlZGLBZmYqL+Gq84hELB17j++iiVSly8\nmKWvr+Us3dlpMjKiYJr9VKtjQJZAIMn4+Biq6rNnzzquvbaN667r4dFHf8r5Y02SZjsJM0Gz2aTp\nOSzZLh29KQqFYzQaFTQtgaZ1kcvlWVqaJhj0WbUqwcxMHgDfb0NR+tC0ZTxvDa4XRBVgexqa4mKQ\nRgQqRKJxKm4BKRssL1/gt3/7Jg4fPkdn524uXnyFZHKAcLid5eWvU60eRFF6UBQbeIVbbunj29/+\n/wiFQr9y/7+X0Wy2jmiee+5Kt+TtwU03gWXBiy/C7ne5afeVNMr7WVWVR8sB7A0jFotx6607+du/\nPUIsNoRpBikU5kkkquzZ86E3/D6O4xAOJ0nesJWRU88iCgssVXLUulajOVHGczmihkETiHb3smv3\nBvbvv44jR47T1TXI+i0Kx4/2YwbiVKoVpqcX0HWV/v40S0vTOI6GlC3nXeghEFiDprn4fpFYLERH\nRze33fY+Ojo6+Df/5o+54Yan+dM//RpCRAmF2ikUplEUD89roOstCy1FsfH9Kp2d7RiGwZo1/Tz+\n+EOEQgrx+B5isQTF4hw7d97BqVN5hoZe4rrrLq+X0XX9VUWpjUaDxcUK/f2Zy66HQlFyOZ9isXjV\n0HZ/Vdi2g21X8LwmjtOHaSaBCYrFU8TjBu9//0cZHT1PLlcnkYhQqeTwvCJbtqxlaqqOEAlMM4Ci\n1BAiQEGEyVlZQmSxnCZ1NUYkuBadEsFgiXj8ZpLJNczPHyQY3EqzOYMeiKD4NVYPrCOXUwiFLGZn\nLxAKxYlEEjQLDVQfSvYSvoA779jP9u3Xks1mmZkp8uUv38W2bdvesj45cuQMnZ2XV/dFo0mmpjRm\nZ2ff1uzI9u1bOHLkYWy785KdQbVaxDDKrFnzxjcfAK7rIoRyiVnnOA4bNlxLsynJZoN4nkWlIlHV\nPsrlIC++eI716wV79/4GIyMWicRaTrzwHDPVaTzbZtGvorT1k8mswnWXMM0eyuU8kKVQCPP1r/+f\nfPzj+9mzZysPPfSfmZ938P1+VFXFMOLU601ULYOUy3iKiSdtdN1EqgoyJLj1xvfT1zdMNJqgv7+X\ngwfn6OnpJJVqaY3E4xn27Pk4S0sHKZcnWbUqzWc+8wXuueeeKyo6eLXggQdaiqtr117plrw9EKLl\ns3P//f8UjLwR/B7w8Ju9ad++m+ju7uSFF05SrRbYsWOQnTu3E4lE/vGbVxAMBmlvj2JZEfb+L/fS\naFSpVos888xBeiPtbN91DY7jEIlEyeVeYevW9XR1dfHBD7bz0ktHefLJIuXqWTb2r+G66w9w8OBL\nSNmBZeXo7jY4c2YEVU0jpYqmteP7DTwvgOcV6e9PsHfvNjyvNSnW63VefPEVOjp2UKvFSSQ66O62\nOX36BXQ9gKaFsaxZ0mkFRakzPDzI9PRLKEqDT396J488cgrbLgJVtm4dYGhoiGazyqFDx18VjLwW\ndF1H0+Srsk2+7yOlS+DnRLLe62hvTzE3N8eOHbcxMjJBsVhEVUNomsHwsE48Llm3LkI0GuPUqXGS\nSYtNm3ZhmiaLi8+ztDRCb283uq5x7tzz1OsF9MBeil4IdFCVIqlUN7Y9gWEMkUolaDbrqKqGEBJN\niyPELNdfv4N4PM758yNs376OgYEDzM259PVtY3l5ltOnj3Du3FmuvfYAe/fuXmGEVNmxo5+tW7e+\n6nNZlsULL7zIkSMvI6Vk+/YNXHfd7je0e/Y8D0V5dWZMCAXf99+Sfn899Pf3c/fd1/Hoo88hZRTw\nCQSa3Hffh960E3QqlSKVMimXc8RiaQIBk3LZIxQy6enJ4DhhenujTE4eJR6PcM01B3DdOZrNJopS\nY+vWGxkaWs/ExAiu6xAYmSCTidJoNFDVOKZpY5pLJBI9pNO9KEqG6WmLF198lK6uHThOFVXtJ5ud\nIRSKYlkTSOkgpUUg4BCLpZHSJR5vZ3AoSiaT4ejRxxgc7OR731tiacmnv/8adu26lqNHT5HNjmHb\nDTIZg//4H7/CTTfd8LoSBv+EyyEl/Kf/BP/u313plry9+PznYcMG+OpX393y8G97MCKE6AAe+IXL\n81LKTwsh9gAfAD78Wvd+5StfufR4//797N+//7LX16wU3/0KbePOOw/wzW8+gm0PEYulURSVeLyK\novicO9dSCFSUCnv29LNu3ToA/sf/eISjRxfp6NjM+vUhjh8/x8TEGAMDGzh9+iyJhI+mxenr20Es\nlmZ29gxSVhAigaKUGRoaYt++PUCegYFWbcvp02colYIcOHAbr7xynosXJ8lkBujpOUOz+QpSdqLr\nGp2dKh/72J184hN347oumUyGxcVFlpdNurq2oqoqitKajHTdpFJpvqG+0HWdXbs2cujQKwwMbLl0\nfW7uAlu2DL7pSf/djkKhwMmTp8nlSvT3d7F58yaCwVbKPxAI0NXVR62WZ+3afmzbxrKK9PevYtUq\nl9/6rdtIJpM0m00OHz7M3//9UYrFBRKJNKtWxRkZeZ5Uqh/TNJicnKOtbRfLy0UsK4quQzrdDywj\nZYNm02d5eZyWsJxKpXIW3w/T16dz883XUSot8v+3997hcV3Xoe9vTy8YAIM2g95JohEEexObSIqk\niiVZvVuUYl/LjuPY1765L7kvyed745c4fknudfIiyZIVx+q9kjIlUWITKRIkQYAAQfQ+AGYGmMH0\nct4fA4Gk2EmAA5Ln933zcXgws/c6Z+1zZu29V1mwYD1PPvkQKpWK2tqDfPzxLpTKKPPmFXHvvUtx\nODx0dn6BQiFRVVXEpk23njYrjkQi4z4fQSyWMoQQfPZZB01NbTzxxIPnLZBWUzOL3bs7yM09ERru\n93tQq/3nrPs0WSxatIDKynJ6enpQKpXk5+efMefN+RBCcOedN/H88+/gdmdgNps5enQfZnM+qalm\nRke1qFQKiouTWL9+E2q1lv7+CAMDw6xbt5APPjiA2VxCZeVCnM4BEhP9jI3Z+fzzQ4TDBVgsaWRl\nVZCTE1tFsttjeWG02gry8xPp6/sMIUCpNOBydVNYWIrfP0g4rCE9XaDRBHC7W7FaE7Fa0/jqq62U\nli6lpKSakRE79fVvotfvo7JyEcuXL8LtdtPWVst99z3BDTcsB2Jbrm1tbTQ0NKNQCCoqZk44oQ8O\nDhIMBrFYLFdd+v/JZs8eGB2FDRviLcnUYrXGImueeQZ+/vN4S3N24pYOXgiRTcxIuU2SJOcZ/n7R\n6eAvlZ6eHrZv/5Kurn5yciz4fC5qa7tpbR2gr28YjUaipMTC2rWLWbVqEb///R+xWOZit9sJh8P4\n/X4aGw+RnR1Gq1Xh8+lpa/MyPOzEZhPo9am43UeJRJIxm9MpLbVQWhx3rgEAACAASURBVKpj5kw9\n3/veI2i1Wl599R1aW5WYzRaEEITDEXw+H1u3foBe7yEpyUokEouYqKhI4wc/eHziYezz+fjlL/+d\njIyFqNUnflT6+tqoqtLx7W9f2NZVIBDgtdfepbGxH4XCRDTqobDQzP33335FjJGpSg9ts9no7e1F\nrVZTXFyMzWbjhRfeJRJJQ6834fU6SErysnnzfaSkpNDW1sazz25Fp8uju7sfgJwcK1qtIC1thDvv\n3Mhzz73MJ58cxO2OEo2OkJysYu7cOcydW0FOjhWj0YjRaORXv3oBtzuDnTu34PWmkJFRhUZjoKvr\nE3Q6B0plAZmZ5YTDLpzOXvR6H6GQmxUrbiA1NYGsLC0PPngnycnJSJKE3+9HqVTi9/vR6XRoNBok\nSWJsbAy1Wn3WFazm5mZ+97tPT/H5AOjoqOXeexeecSXlZNxuN7/97csMDioxmTIIBDyEQv3cf/86\nKisrLks/k6H3kZEROjo6ACgoKDhnOYJIJMLg4CD19Y309w8zMjJMd7eLvj4vXV2jpKUZWLRo+URR\nya6uo2zaVMrSpYtpbGxk165anE43ubnpNDa2IkQ+IyNO6utt+HxqRkeHqa5eCsDw8FdEIm6Sk5dg\nMLhJTJTYsmU3Pp+ZUMiNwRDAaBzmttuWUlhYSm6uhdmzK0hPT+cPf3iNjg416enZqNWxDMvNzc3s\n2fMeK1duJCEhmbGxAYqK9Dz88N1otVokSeLttz9g374ODIYsJEnC5+ujujqT4WEX/f0eFAo1anWQ\nW265gblzJ28r72KJdzr422+HdevgqafiJsIVo7Y2dr6trXAJNvykMV3Twf8VkAG8Ob6suFGSpAub\nxk8i4XCY1uZm7O2NaP1+bO0O2gYksrIW0tnZQHX1GoRQ4HQe4vhxH21tL+F2J1NXt4twWIcQCiTJ\nS1ZWHuXlaqqqZvDhh5/gch0lHDai16uRJCNqdQGBQAs9PfV4PBGKi9eyfv0TEzNSpTLK3r3bCQYV\nqNVKCguL0evNeL0hFi5cRn7+iRlpR0ctx44do3I8Q49er2fDhsW8884+kpKK0OsTcDoH0GiGWLXq\ngQu+FlqtloceupuBgYGJJGhfZ2+9GpEkifff38KePccRIhlJCqNW/xG/30tGxmJMJjOBgI/e3i4+\n/3w/H330KTNm5FNZOYtAoJeEhBwWLIg9rCORMB0dX3HzzSt5/vlX+fTTXjSaRRQUpBAK+bDbD1Jb\n28jQkAf/mB9HzzF06jBeRQKLlj9CSsrd7Np1gEDgGG63B4Wil7lzH6et7UsikT5ycytJTDSSmupm\n+fJcVq1aSmJiIrm5uQghaGpqYsuWHQwPj6HVKrjhhhpuuGEZELvBzxdi29HRg1abdtpxozGDlpau\n8xojJpOJ7373IerrG2hr6yE5OY05c1ZhsVguUTuTx5df7uWDD/YQiSQjBAjxGbfeuoxFi07dJA8E\nAnzx6afU791LNBwms7CQGzdsIDs7G7fbTV1dHX/4w0eUlq7BaIylA/B63SiVDsrLY/dfWVnZRBbT\n3bu/pL5+jLy8EhITRzlyZD+DgyGcTj/JyXUkJEBhYSY2mwKfbxSDAfbvb8fnUxAMtuHxtGEwWDEY\nUsjISOexx+6dWK2IRCJs3/4ldruWaFRBQoKOyspqZsyYQSg0j6KiMCZThLKyRZSVlU1MTNra2ti3\nr4OCgsUn+cVk8cwzTzN37jJKS2NGkt/v5fXXd5CamjLl0VDTkaNHYysjL74Yb0muDHPnxioRv/EG\n3HdfvKU5M/F0YP1evPo+mS3vvkv/V18xPzsbnUbD53u+ZLB1lKEhDQZDFkpl7CZXKtPxev309HTS\n3HyIoqJ1pKRYx42RKI2Ne7DZbNjtBrKzV2E2B7DZWlAo7AQCfvx+CUlSkJiYh9Wag0o1k5df/pin\nnkpFpVKxf38zLpcas7kCtVpDc3M7IyN7MBj0ZGYWnCKzTpdCT8/AhDECsGTJYtLSUvnyy4M4nTYW\nLswhKSmHd9/9I+FwmOrqmVRXzz7vcjyA1WrFarWe93PTnYaGBnbtaqOgYPGEv0N/fwe7dr3JggUZ\n2GwDtLU1olYX4PPl0ts7ikKRSldXPXl5Fuz2rYyNlaNQaAEX69fXoNfraWsbAVIxmWIbsGq1nlBI\nRU9PiLDXxgxdiIrMMgL+MZoHm/h8y8ssvfFuLJZMNBor/f21aLXgcHSh0ympqrLg8fRgMnkpLNTx\n3e9+55QtiJaWFn73uy2kp1eSlxerKbR1awNjY15uueXC1phNJiOhUO9px4NBH0lJF2ZQ6PV6FiyY\nz4IF8y/o81eC/v5+3n13D9nZJ1YFg0E/7723h4KC/AljSZIk3nr5ZfzNzSzOykKtUtFvs/H6M8/w\nwFNPkZ6ezrJly0hMTOLNN7dhtycghIQQLmpqitm3rxarNY2ZM2dO3EPt7b0kJKQRDPrZu3c7SmUe\nmZk+nM5aOjp2sHLlcubMWcaxY7Xs3r2XQCCbkRE9Fks1PT3bSU9fg9GYQkpKOm+/3Uhq6ps8/PC9\nCCH44IOt9PZGSUyswGBIwu8fZc+er1i+XEVCgpabb95wxvQFDQ3NGAynTiAcjhGCwTQikRO+PTqd\nAaMxnz17Dky5MWK329m79wCdnQNYLCksWlRzRbb2zsU//AP88IdwPQUb/fzn8N/+G9xzD0xH3+bp\n4MA65TgcDoaGhjAYDOTk5EzcqA6Hg+MHDrA8P39ijz3VlEiu2seBznqyi4sm2hgbG+TQoW7cbjUj\nIy6OH28mLc1JYeEsIIrb3YFen0NeXiwfR1XVHLq7vQwODpKSkkoo1Elq6hySkrJRqbwMDnpJT89m\nz579aDRqtNoC1q6t4eDBelyuKKAmFBqjoKB4IoLga0IhD2bz6bkDvvahkSSJN954lx072jCbC1Aq\nlbz11mHq6o7x6KP3XtJe+9XIV1/VYzYXnuJ4qVar6esb4MCBTnQ6Aw5HMpGIj3BYkJRkwWKZhdOp\nIDk5EyESuO22uSQlJRGJRDAYDAwPDxMKKYFTr+HoqBOFIpngYCvFNfMRQqBJMJMXyMCaEKWp4T0K\nCkpoaNhCKORgeFgiEEghMzOXnh4HZWVFmM1pVFRoTtPPtm27SUmZNVFTyO8PodVa2bbtK1asWHrG\nNP7fpLx8Flu27MHjcWE0Jo634yESGWD27DWXeaXjR0NDE2q19ZTtSY1Gh1JpoaGhccIY6e3tZbi5\nmSUn/fBmpaXh6+9n/5dfsvHW2FZmVVUlpaUldHd343Q6+eMf97B/vwONJkww2E5Kym42b76P5ORk\nUlOT2LPnGDabjYEBifz82ORArU5BiFE6O9vJzNxBUpKCm28u4YMP6ohEtNjth9Hrs8nIqCYc9uF2\nj5GcnEdDwwB9fbGQ/X37jjF//ioOHuxCrzeh0yURjRazb9/n3HRT1VknCwrF6VsfwWAQUBKL6jtB\nLFKu63JVcE76+/t55pnXiEYtJCZmU18/woEDr/Hww/Fz1OjuhnfeiW1ZXE9s2gR//dfw1lvw7W/H\nW5rTuaaNkUgkwtYPPuDY3r0kCYFfktBlZXHngw+SnJyMw+EgUREL87Pb7YyOjCJJETQiiIYIXq+L\nxMQ0wmE/PT2HMZlqGB3tR6Mpxu3uwO3uIBodICPDSGKigdTUEwZCamoK6ekp+P0FpKdr0WrnYDKV\n4vWOkpKSSCAwRmJiGl1d7SgUChITc0lISObGG1cwNjaGENDXl4jH08LY2AgJCbE9cJfLjkrlpLz8\n7AWvuru7qa3tprDwxFJtYmIqbW21NDY2Mnv27Km98NOEQCCISnWqk57TOUQ0mopabSUSGUGny8Tj\n8TM2NkZOjh4hQKOJjQ2r1UJfXz9ffFGLyyUQQonH04fb7UaSTl1N8PlG0WgSydDpCIXDuN0uIhGJ\nSCTK8opyMoXg/j95DIfDwf/5P2/S3h5Fq83BZDITjYY5cmQfZWX9LFjw+Gnn0dMzSG5uOaFQiP17\n9zJms2FQqbCNHud3Tz/NE9///nkjYpKSknjooZt55ZWPsNu1gECt9nLffevIyMi47GsdL/z+wMTq\n5ckolWoCgdDE/+12O6YzbDemJybSPu5r8jU6nY7S0lL+/d//AyggPz9n4m+9va387ncvUV5ewvNP\nP81Xe3vw+Y1EKcY2sJ8ZM4swmWDt2ptpbj7AunWlrF69mmPHjjE6aubIkT4GB/UolWUoFEoUChU+\nn4ecnAyUSjPDw8Pj4ccJFBQUMjLipr39GEplApIURJIGuf/+28+6dVpZOYtdu94jGs2dMML1eh2R\nSC8Wy6k5Z0ZGBlmyJOdMzUwaH330GSpVAWlpsZWQhIRkPJ4U3nnnkynt91z84z/GkoGZzXETIS4I\nAf/jf8Bf/iXcccf0Wx255owRr9eLQqFAp9Px1b59dO/ezfKCgomVj46BAd595RUe+e53MZlMjIXD\n7NtXS1+fCyEMSFKYQUc/QpfE6Ggjbncy0egAkmTA5QqQk1OI261Ar5+Pw1FPONzDTTf9lPff/z1p\naYkMDAwwNDRER0c/Tqcdh6MHozERSfLi94+hUoXQag1YramMjY0wa1Yq0WiU48fdJCQko1AIEhNj\n+/86Hdxyyyb27Wukqys2mJKSBI8/fuc5fQQ6O7tQKlNPe2CZTJkcOdKMWq2mo6MHk8lARUU55mv0\nrqyqKuHDD5snVhQg5qyck5OPRuPC5XLj8wUIBhXjdYJiTrrhsIeEhEQCAR8ff1xLScl6srOTGBjo\nwOv10t1di0rlpr8/RFpaIT6fA7XajU5nJKRU0tjYQjSqGU+K1kGKRUvO6pVkZmZy6FA9FsssSkrS\n2b+/DofDgd8fwu12YbWmn7H6cUZGMu3tLezedZDRvmFMBi0WswGLUYnOZuPj99/n9nvuOe/1KC0t\n5ec/L6C7uxtJksjJybmgbbvpzMyZxeza9UckKX9ivMecNm2Ulp7wg/n6Xu/t7cXhGMVg0JGVlcmo\nx0PKGZJMjI6O0t3tJC/vhHNuKBSirW2Q9vY9qPgjzg43+Ql5dEt9hCJBRoZ7aIr2csutaxkZGcFo\n1JKXl4cQgoSEBLTaANXVlWzdupVweASVyojX68BoDFJRUUIw2EtCQgImkwmPx87evfux2UaRJAmV\nykNRUSZFRavPeb/m5+ezYsUsduzYi1qdMZ4xeJDVq0sZHe3DYDChVmsYHOxGpRpk0aKbJlEbpxIM\nBmlr6yc391QjyGhMxOGYsm7PSW9vLO17fX18+o83t9wSWx15+224c5pVCbhmjJGBgQE+ef99Bjs6\nkID88nI6jh9nfmbmKWGOBVYruzs7sdlsWCwWXAoVHU29lBdUoRQKAuEg4UCAwqIMVq9ZQn19E11d\nEp2dY5SUZGOx5DAwMIjN5kSvz8HtPk5Pz1EKCrTU1e2lqWkGXV2DqFRKkpLM5OUZGB1tIxQKkpmp\nJT09H6XSTXb2LPz+NpYsuYtoNMrhw2/h96eg08V+EAcHu0hLE6xYsYIVK1Zgs9kQQmCxWM6bzCgW\nYXF6Hjmfz8OuXftpaHCg06USDg/y8cdf8dBDm5hxlWf9ic0mxSlZYufOreHgwSY6O+tITs4iFArg\ndvcwa9ZsZs+eR2dnO7t3f4ZCkUN/vwOVSoXf7wIGSU+fQXv7bkymArRaA1988REjIwokSYt7KIrC\n/zlGg4FjvSqsBcU88sg6enudfPZRNyVKPSk6NcGgnaycQva22pjzSGzVLBqNolAoMJlMLFu2gC++\n2IPP50erTeXIERu/+c3zPPro3afUUyorK+DFF1/AM5JBdmopSFFa+45SnGWnumgZu+vq8Nx88wVF\nPKnVaoqKis77uauFoqIiKivTqa8/QHJyHiDhdHZRXZ1xynmmpaWxr7WLepsHS5KVaNRF7eFj6Ioy\n2fzoo6e1G8udcqoxf/x4K8PDUQyGdGxttRRZFyNFoggFjIR9eP0mhhxj1NcPAZ0olU3UVBnZ+sor\niEgEd38/XjHEggXl7NixH5/PQUKCxIYNq1EqQ6SlCQoKCohEIgwNddHVNUp29lwUCiWjo4McOLCd\ne+89t6udEIKNG9dRVVVGY2MzQkBFxSrS0tLYuXM3u3fX4vMFqago4sYb75vSiYhSqUSlUhCJhE/J\nXRQrznlReS4njV/8Ap54As5g818XCBG7Bj/+Mdx6a3wja77JNWGMjI6O8uqzz1KgUDAzNxdJkug4\nfpxPtmzBmZ2Nz+cjOTmZqrIycjMy0CoU+P1+BgcHae5ycNwn0Vq3B2tKGuqkdPIXb0Kh9LN48Xwe\neOAe+vv7ue++n5Camo4QCjIzrSQkGOjqqkOIMCMjx2hq6sfj0TI0dAghrKhU4HAc56abNpKaamH3\n7vfQagfQ6bykpKTidO7n5ptXEAgE2L59L05nD/X1h8nOLiIlJZG8vCTuvvuuiR/XM82Yz8bMmTP4\n4IPd+P2eCeMmHA7R2rofs9lCQcGJCq1ebzavvrqFn/2s4KrMOzA0NMTWrdtpaupCqRTMn1/GjTeu\nxGAwoNfr2bz5fg4dqqOhoRWjUUdNzZ1s396GRqNi5sxZZGSksGfPp4yNddLX14fdbkcZidLZ/AXz\n5hcTNebS3FyHy2UkOTmP3mMfUiyU6HSZaFQB0nMyIC+ZzZsfpq6ujs5OB7aeLpz+bjJSUxlOTCGz\nfCFHj7aSlXUEiyUVn+840Wgex44dZ2xMQ1ZWHnb7QWbPXsPIiJ+33vqQxx67f+Ich4ddlJeXs2/n\nHsZ8Q0CQ4qwkEvRKHC4XKiEIBALXXC6YC0GpVHLvvXdQWdnAwYNNAGzcuJSKiopTjPbdu/eRUbCS\nUUMv7UO9qITAo0ghU5N8xnsrOTkZqzUBp9OG2Rzbkmtv70Wp1KLXq+nwOBnw7QUBksJAVOXFNtJF\nIGyit/cwOTkJJOgT2f7Cizx06wa0Gg2Vyclsq69HnWNi06ZiBgYchEJadu58E7NZxaOP3kEoFKKl\npYWsrCqMxgDd3V8RCglCIRfp6WmMjZ0pLf6pjI2NsX//YWprjxGNRhkaGmH9+pWsXh17SZJ0RSLk\nlEolCxaUs2fPMfLyTjjb22ydFBaeHtk11bS1wWuvwbFjV7zracWGDfDrX8PTT0+vsOZrwhg5cvgw\nKYEA2eMFocT4w1kzNIRKpWJJXh4jPh9f7tyJb8ECvIpY1sh/+7eXGBhIIKfobgIBF7axVuaWVpFf\nUEFX10EikQgQKyxWU1NIQ8N+tNpcVCo9oZCd5GQPY2Na+vvDOJ0qLJZy2tubMZvVExEzdns/s2cv\nY9Giddx+ewX79x/h2DEber2F117bQXv7iyxdejOzZ99Ffv4wPT21rFlTyY03XrpTYXJyMvfcs47X\nX99GKJQICIQYITERSktjeSYikVgeE41GSyCgo6en56qbMY+OjvL0068QjWaTk7OCaDTCvn0t9Pa+\nxpNPPoRSqUSv17NkyaKJLLQx575t7Ny5GzADYWpqsrnnnhp+//wb5CpU5KdaSdbq6Wk5Tt2xV1Cl\nVZGScgMuVy9JARch1xBKnQmFIgmDMouu+m5+/Q//wm13bKKiYiH5mx7D5xsjEPAxMNDDtm3b2LXb\nRX39IGlpCahUPtra9lBf34XRmIvDcYiSEutEXovjx3cyOjqKQqEgFArR0zNIdfUygm4w+MdIMiai\nUWkZcLZhczpRmM3nrEx9raNSqaiurj5nePL+/UfJz5+HpnQ+Ho+LSCREQkIyvb219PX1nVZMTgjB\nHXfcxHPPvUl3twO9PgmHo4OkJD2asIfcqI9MhYRWaaDL1UVn0I/RcANqKUBGRg6RyChiZJAUvZFg\nIIBWoyHZZGJVeTkDCQnc8xc/4l//9Xd0d0eYP38tkiTx0ks7eOutbcycmYckJTN37kI0miM0NLSh\n1c7E6XTy9NOvUlRUQFFREU6nE7VafcqWbSgU4oUXXsVm05KZGcvGevx4N52dL/ODHzxKQkLCFQ3V\nX7NmBQMDb9La+iVCmAAf6ekK7rzzLp544oqJAcSiSX70I7hOqlqcFSHgV7+C9evhoYdgujw6rglj\nZLCnB/NJs8JwJEJ9QwNLc3MZ8vtxut0kGo1k+/28unUrVetu4de/fhqlsoTy8ipaWlyYzTkkJGRw\n7NhBsrOLUCrHJsLPlEoljz9+H88++w5ebwghJLRaE5991jLulJpJIKCmv98BBHG5YMaMLMLhAKFQ\nzIlOkrzU1zfR3S0oK1uHJEm0tLQBFXR0DJKVVUBKihWDYSV799axcuWKyyo+VllZQWFhAW1tbUSj\nUfLy8nj22ZcRQtDV1UV9fSuhEOPF3ez4/csvXQFxorb2EIGAmZycWNVWhUJBbm4ZHR37aG9vp6Sk\n5LTvfL2MPX/+nIlEaPn5+fz93/8rKeiZN2smivGHtVFnxNu4k8auOkymhQQ8w2jG7IiogtTUfHw+\nJxqNjtz0fJqP9Y0brw4ikTA6nZHGxoN89tkhQqF0Cgpm09sbIRj0YbEksWxZLu3tDRiNPiKRZMbG\n1LS0tJKbm0MwGOI///N1+vvdCKGkra0Ji0ViVvUc6nbvRu0PIfRKxrzDdI6l8O0HH7xuihheKpIU\nnfgR/jqaKHb87Em3srOz2bz527z99nscPVpPfn6IcCiRVFeA3PJ5NNcfJhw2o46ESYtq6QuGqKhe\nTG5uLnZ7C56uI4j8bCLhE1sSqYmJHOnupr6+AbtdS3l5NXZ7P7t370KILIaHRwmFfHR3H0GStBw/\nPoTVOhshFIyMhMnISOGf//l5MjMteL0gSRFmzszitttuIikpidbWVnp7wxQUnDDMrNYCurs91NUd\nYenSJVNwdc+OXq/n8ccfoKurC4fDgclkorCw8IqP1y1bYom//uM/rmi305bq6pj/yP/8n/D3fx9v\naWJMM3/aSyM1M5MR74nlyzGfD1U4jFavp2bpUhSZmXT5fLQ5RnEHdai0FdTX22lqshMMhjAagzgc\nvQQCAdzuIK2tn3PLLTecEqEwe3YVP/zhvSxalE1enpLMzDA6nZni4rWkpcW2OPT6UpTKVLzeLlyu\nETyeAaxWK729x8jJ0dPRMURWViylfCgUwOMJkpFRyOCgG++4/DqdEZ8vNuu/XIxGI1VVVVRXV2M2\nm5k3r4yGhq/Yv/84Ol0OZnMRBkMmg4ND7N9fd9n9XWk6OwcwmU5f7lUqkxgaGj7nd9PT05kzZw4V\nFRWEw2GGhkZJUWsnDBEAndZAfmYuJTlGnM5DRPAQUQdJS8smGo2gVkvo9XrcIT8JKfmMjnpYubKS\nrq59tLXVcejQUUKhWEIrq7WUtLQKhoclolENAwMuKitL6OwcZWQkEbfbwNGjw2zduo0jRw4yNJRA\nbu5ycnOXUli4in37dhAOjzF3xQqC5mSODLaRXJjMQz/+MZVVVec4UxmAefPKGRhoP+WYxzOKwRA5\nY74OAJfLxUsvvUdfn468vFXk5i7neMNuPEN9mFMs5M+YgV85SFgZxqzXkpWlJi8vtsKSkGDF4fMD\nfkwnhV473G5SrVba2nowmTKQJInDh/ej1ZaSlJSHwWAlLa2ApKQCPvnkPTQaM0Io8HiGUCrtWCy5\nHDjQg92eTG7uUnJzl9PWJnjhhdeIRCL099tQq0+f6hqNqXR2DkzeBb0IhBDk5+dTU1NDSUnJFTdE\nxsbgBz+I1aHRX1yR52uaX/wCnn8eGhriLUmMa8IYmT1nDnaVikFnLKu8Vq1m2OMhpNNRXFTEnHnz\nKKmsQpOYR2ZeGenpOaSkpGMyWejoGGLOnDKqqjJJSvKTkSGxefO3WLBg3mn9lJaW8uSTD/FXf/Vn\nLF++kMTEXKLRCCqVZnym0oNSmYTBEMLrrcfrrUWvH6WiwsB9932LcDg64cilVKpQKCQikRBCKAmP\nz56i0QgQmqiTMpksWbKI0dFmgkEnPp+dkZF2PJ4GVq/eRHOzDbvdPul9TiUZGWa83tONNknykpR0\n/twbX6PT6VCrFfiikVOOR6IRQlKIm26+idtuK2PBwlkEk5Owjw0QDDrJz89i1OvGodKQkmrBZDKw\nbt0aNm/eiNE4iNFoICsrkaKisokwS602g+FhOy6XB4/Hh1YbALwoFBJKpYTN1kQkYiQrq2RiJp+X\nV8TcuSvo7NyBx9OINSfEk09t5J9+848UFxdf+gW8jlixYinp6T46Ow8yNNRDd3cTTmcd99yz4awr\nkDt27GF01EReXiXJyekUFlay+Ibb8IedGI0eamoK+N5Tj7NoyTwqaspJTjUyOjpEMOgjEPDh1SjR\nW5L5eu3F4/fTNDzMojVrSE42EQh48Ps9uN1+9PqYI2kkEiQhwciNN64lGh1jZGQfTuc+DIZBli1b\nycBAD0plLhpNbKIkhCAzswibLUJ7eztmcxLhsOe0c/F6XaSnnz1F/rXM978PK1fG8mzInCAzE/72\nb+F734MprnV5QVwT2zRms5k7H3+cj99+m+auLiQgsaICo1aLcvxB09MzwHAogrWiGqVSRUlJKYcP\ndyBEKmNjYxQXF2IyKUhL015Q6XW9XkdubjqDg72kpuaRkZGLVqvn+PG9FBSo+NnPbmfmzBJSU1PR\narW0tLRgtw/Q0fE+s2bNIT09h+LiYurrGzAaEzAajUiSRE9PI3PnllxQJdWLRa/XU1paTGlpDsPD\nw+j1ieTkzCEhIZnu7lFcLhepV9GG6rx51ezZ8zJjY6kTeViGhnpISgqdcYvmbOh0OtasWcTvmtqx\nuexYElORkBga7iFqNrBq40YKCgpobGwkN1fBmy++jkGvpTXkRZWURvGsBYTD3ZSXxwrQlZSUsGHD\njbhchzh61HnKHn00Gsbnc5GVVcTYmII1axZy4MDn9PQcICEhgfLyGbS3n76qU1BQil6v54EHbh9f\nhZOneBdDQkIC3/3uwzQ1NdHe3ovZnEdV1UZSzlHG9PDhZjIy5hIM+untbWN4eAilUoFLl0xmrpXi\n8QRqSakG6rpcbNz0IAMDdoaGHCiVffzwz58kx5rK7v37UUsSwdta1QAAFcpJREFU6PUsv+suysvL\nSU1NZceOlwgEkpCkKJIUJRj0o1T6sFqtaDQqamrKSErKp6hozoQj+sjIftRq5WlRMEIYcblclJWV\nkZCwC7u9n9TU2IqP2+1EiEFqatZP0dWdvvzDP8S2Z/bujbck05PvfhdeeAGee44r7sPzTeJmjAgh\nHgE2A1rgaUmSnruc9nJycvjOU09NOP5ptVo+fOcddtbVYRSCw6NOojkzKZoRW/EoLCzH7XZx+HAt\ng4OlCDGI1arh3nvvvCAHr9LSEnJzd2AyJdHZ2QaoiET8VFaa+F//6+cTobLRaHS8ym8PJtNsmpqO\n0NGxjVmz8sjNLSIxsZa0tAh9fXVEox7Ky7PZtGnd5VyKc5KXl4XDkUxu7syJY9FolGjUfc7iYtMR\ni8XCww9v5K23ttHdLSFJEbKzE7nrrrsuOjJo48Z12O0OPnj9PY53dKMihMlq5pEfPjVRrXnOnDnM\nmTOHVauW8eKL7xMKGdDpDEAP999/0yk/bDNmlKLX76KwMI2WlnaMxnQUCgV2ez3LlmWxcOFcGho+\n4siR/bhcetLTlxKJhGhvb8Tt7j5NPpdrmJkzrde1o+rlotVqz+voejIajRqPZ5T9+/fg8RjQaMxE\nIj5sIQN7bDbs488J1YxillWZGB1tIynJiMkkUVY2l7vvvg2tVsvqdevw+XwkJiZOrMJYLBbuv389\nb765DbXaTXf3XtLSLCxdOgedTktv73FWr17IyIgbm62N1NRcIpEwweAgaWnpp40DSXJjNpvHfTTu\n4tVX36erqw0hlCQmKnjssduuqonG5SJJsYiR3/wGdu6E6zDQ7IJQKGJRNWvXxooGxrNMUTyr9qok\nSQoLIRTAPkmS5n/j75NStdfpdOJyuRgeHua11/ZQULBwYsnc7/fS2fk59967HqvVSk5OznlzeJzM\n4cN1vPHGJwQCJvz+IBqNhw0bFnDjjasmPtPS0sJvf7uVgoJFCCFwudy0trbR0rKb226bz223bcRk\nMuF0OklKSiItbWpD3lpbW3n22fdIT68iISGZUChAb+9R5s+3cuedt0x8zufzcfjwEZqa2jGZDMyb\nVzVRhnyquNQqnpFIhOHhYVQq1WU/cIeHh2lra0Oj0TBz5syzrkD4fD66u7sRQpCXl3fG5GGHD9fx\n+uufMDQUpadniGDQxtq11TzxxCN0d3fzs5/9gu5uPcXFy0lMjCWpGxrqwOnczqpVt1JYOBulUoXD\nMYDf38JTTz1Aenr6ZZ3fdCTe1VvPxs6du/jnf34dny8HlSoFh2MIr3cMnW6MVassPP743Wg0GjLH\ncxn19/czOjqK2Wy+4NpOwWCQpqYm3nprK4GACSESAA9ZWWoeeeRulEol+/fXUlfXjFarpbQ0m08/\nPYTBUEpKipVIJEx/fwtZWWH+5E8ennh+SZKE3W4nEomQnp5+Uc+1K8VU6f3YsVgNlrY2eO89yMub\n9C6uOX71q1ia+M8/h8uImzgv56raGzdjZEIAIfTAFkmSVn7j+KQYI18jSRIffvgxu3YdQ6FIASIo\nFE6+/e01VFdfenp0l8t1SsTKN42J997bwsGDHqzWglOO9/a2cMMNGaxdu/qS+75Umpqa+PDDL3A6\nvahUsHTpbFatumGiJorH4+HZZ19kaEhNYqI1tv/t7ea22xayZMniKZNruv4oXQ5fj49IJEJ+fj5m\ns5kXX3yDo0edHDrUQGurknAYzGY9GRmppKfrSUuD3NwITmeYaBRyclK55ZY15ORMberueDFd9R4K\nhbj33v9Cb28Kw8MhlMoE1GrIyDChVrfyL//yY2bNmnX+hi6ASCRCW1sbIyMjmM3mc0ac9PT08P77\nn9LTY0ehgJqaUtavX33V5ZmZbL339p7ILvrTn15/hfAuh2g05lMzZw788pdT18+5jJG4+owIIf4H\n8CTwl1egL26++Sbmzaums7MLlUpJSUnJRS17BwIBWlpacDicpKenUVxcTGJiInPmzDnrd1Qq5Rlv\nOEmKolTGZ7Yya9YsZs6cidfrRavVnubAt2/ffoaGdBNF/wBCISsffriHysqK85arlznBN8dHY2Mj\nR486KCxcQG+vHa02lXBYhd3eTlVVDkVFxXR317Fu3ULMZjPNzcfR6XRXZUK6qx21Wk1FxQyGh0cp\nKipEq9VgMiWiVCrp6Ginru7IBRsjNpuNtrZYNE9paclpkxalUklpaekFtZWTk8P3vvcIXq8XlUp1\n3Y+N0VH4u7+DZ56J+T00N19/dWcuF4UCfv97WLIESkri4z8y5caIEMICvPyNwwOSJN0vSdLfCiF+\nCXwihHhDkqSxkz/013/91xPvV61axapVqy5bHqvVesFLqCdjt9t5/vlXcTrVqFQmQqGjWK07eOyx\ne89ZNbWiYiZffPE2kUguSmXscofDIcLhIWbNWnWpp3HZCCHOOpM6dKiZtLRTH4xqtQZJSqKnp4ey\nsrMX6ZM5N0ePtpCQEHMszM8v5KuvjpGaWgXkEolECQa9qFQu+voGeOmlbSgUsW2ZaPRL1q2by6pV\nK+Io/fVHfn46kUj/RPVfAJ9vBLPZSE+P84La+OST7Xz66WEUirTxicmXbNq06LJzfkyFk/vVxu9/\nDz/7GWzcCIcPwzW6eHhFSE+Hjz6CFStiaeLPUCVhSplyY0SSJBtw2l6EEEIjSVIQCAGnF4LgVGMk\n3rz99hb8fiv5+Sc2IPv6Wtiy5RPuueeOs34vLy+PNWsq+eyzvahUsdlQJDLETTfNPWt+g3ij0ajx\n+0+vHSFJ4ctKxCYDWq2aSCQWepmdXczAQB89PQcZG5Ow240YDDZWr65m27Yj5OQsnggFj0RK+Pjj\nvZSUFF2z2zXTkUWL5vPmm7ux2+tQKpORJD9K5QizZ9eg04XO+/2uri4++eQIubmLJyYjoVCQDz7Y\nS3Fx0SlGjsyF4/fHcofs2RPzC5k///zfkTk/paXw6acx466pCf7mb+BKLbzF06vpL4QQnwG7gDck\nSXLHUZZzMjo6SlvbEOnpp6aMtloLOXKknUAgcM7vr1u3hh/+8G7Wrctj/fp8/vRP72PlyhumUuTL\nYtGiKoaGWk/ZXhobG0Gv95MfT3fra4CqqjL8/j4ikTAKhZL581eyePEc8vLc3HffPP78zx8lGhWo\n1ZZTiosplSo0GgtHjzbHUfrrjxkzZrB4cQVz5hQyY4aB6uocbrxxE5HIGIsWnd/XrL6+Ca02c8IQ\ngdgqo1JpoalJ1uWl0NUFN9wALlcsZFc2RCaXsrLYda2vh5oaeOUVCJ3f7r5s4jbNlSTpb4C/iVf/\nF0M0GkUIxWkhv0IokKSvK3yem8zMzCu+EuLz+SbCnC+Gmpo5tLV1c/jwlwhhRpKCaLVuHn741utq\nfzpWit6HRqOZtBWh/Px81q2bzbZte4BUIIpC4eTP/uzRiUR74XB4IuLrZBQK1UR5AZnJIxKJ4Pf7\nMRgMp93jWq2Whx/+Fr///XsoFCaEENhsh5k7N5c5c84fIhwKnVmXQigIheJTufZq5pNPYvVUfvpT\n+PM/j9VZkZl8LBZ4993Yts3f/V2soN6tt8LNN8dCgKciw4C85n4BJCcnk55uYHR0mKSkE45nw8O9\nFBVZp10CKpvNxocffkpraz9CQFVVIRs23HhO35aTUSqV3H33t1iypIfe3j4MBj3FxcVXnbf+5dDU\n1MRHH32B3e5BrRanRRxdDqtXr6SyspyOjg4UCgVFRUWnJLGaObOYzz//AEkqmPhxjBlGA8yateGy\n+5eJEYlE2LlzN198UUswKJGYqOWmm5Yze/ap6fULCwv5yU8209rais/nJzs7i+zs7AvKR1RWVsKX\nX25DknInPh+NRgmFhigtnbrItGuNaDSWwOyf/glefBFWX/kgxOsOIWIRNps2xVaj3n4bfvtb+M53\nYO7cE3+brGoUcQ/tPRuTHdp7uXR1dfHcc28RjWZgNCbj8ThQq+08+eQ9l+QQO1W4XC7+9/9+Acgj\nLS0bSYoyMNCO2ezi+99/bFJ+TKeK6RLi+XUulrS0SkwmM6FQgJ6eoyxcaOWOO245fwOXiSRJvPXW\n+3z1VTcJCdnj+Wm6mTcvm29/+7ZpmTPicoiX3rdu/YTPPmshJ6cSjUaHx+NicPAIDz20loqK8vM3\ncAFEo1Fee+1tDh2yYTJlI0kSbnc3ixcX8K1v3XxFK+hONy5U73Z7zJnS4YhtGXyjwLLMFcbrhe3b\n4cMPY6snBQXwl38ZWzE533Ce1nlGzsZ0M0YAHA4HtbWHGRiwk5OTQU1N9bTLiPnFFzv54x87yM09\nNeKlo+MADz98w7SOhJkuxsgzz/wnDkcqZnPGxLFoNEp3907+63/9zhXJVBuNRjl+/DiHDzcCMHv2\nLGbMmHHNGSIQH717vV5++cunycxccoo/h9vtRKns4M/+bPJiGyORCM3NzdTVHUOhEFRXl1FaWnpd\nGyJwfr1LUsz4+MlP4L77YvkvpvFc6rokHI7p6Be/gJSU2L/nWrU6lzFyzT3Ztm/fPmVtp6SksHbt\nanJy0li1asWUGyKXci49PYMYjacH2avVSdhsQ5PWz6VwKf1MhmwX20ZPzyBJSadmcj1+vBaFIgGn\n88LCOS9XFoVCwcyZM7nnntu5557bmTVr1oQhEo9rMtXtTEWb52pnZGQE0J9iiACYTGaGh0cmCldO\nhjxKpZKysjIslmTuvvtbzJgx47IMkemmu8keA34/vPoqLFoU81f4i7/Yzj/+4+UbItPpfK8VWVQq\nePDBmLPrqlXbefJJWLMGdu26+LZkY2Sa9nGp/VgsKfh8p1eyDYfHSEk584x+Op9PPG42iyUFt/tU\no+PYsQNIkveC/W4mS5bp3MZktjMVbZ6rHZPJhCT5xqtkn8DjcZGUZDwl++l0u1bXWjvhcCxHyL//\nOzzwQKya7L/+K/z3/w4HD8Lw8PSQczLbudZkUSpBrd5OY2PMOHnwQVi/Pubf477AONlrzhi53qmp\nmY0QQ4yOxiq/SpLE4GA3ycnBiYJvMudm9epF2O3H8Ptj+UCi0Qgu1xAVFTnXVbGxaxmTycT8+aV0\nddUTicRWQYJBPzZbAzfeuPi630K5UkhSrHbMfffFcoasWgWNjTGfhNtvj2UGlbl6UKth8+ZYFtyH\nH4Y//CGWlO5CkKNprjFSUlLYvPlO3nprK11dxwCJgoJUbr/97osO8b1eKSsr4667fHz88W6GhkCI\nEJmZeu644+Z4iyYziWzatB6V6lP27t2NJGnQaCJ861sLqak5e3kHmclFCDh+XK6qe62h0cSMkYcf\njhmcF8K0dmCNtwwyMjIyMjIyk8dVF00jIyMjIyMjc30g78jJyMjIyMjIxBXZGJEBQAixMN4yyFw4\nsr6uX2Tdy3zNtTQWrrltGiGETpIk/xXoRytJ0rkr5F18m/OBJUAyMALskSRp/yT3cSYDVABbJUla\nO4n9VAJhSZKaTjq2WJKkLy/w+wnExuekFFCcjHFxsTqfDH1Olr4uVx/jn68BRiRJahdCrAM0wEeS\nJJ2/ONPZ23xKkqTfjL+fdjofb+ei7/XppPvxti5b/+PfmfQxcJ7+Jm1MXM3jYbqNhSl5FlytxogQ\n4n7gJ0AYeBv4fyRJkoQQn0mSNOWVC4QQH0uStH4S2/snYgrdBowCScCNxAbNjyaxHx9wpkFXLUlS\nyiT18WsgAwgB6cDjkiQNnks3QojHge8DHuA54AkgSqyi879cRN9TNi4uRueTpc/J0Nel6OMMbfwb\noAX0gB9wAy4gR5Kkxy6wjR2AROwhCmABioAx4EdMQ52Pt39R9/p00v14O5et//F2LnsMXEAfl/0c\nuBbHw3QaC1M2DiRJuipfwB5iockC+C/AO4AZ+GyS+9lxlpdzkvv54mKOX0Y/tUDyGY5vm8xrdtL7\n2cDnwIJz6YbYjaYYH+DdxG5eAey+0uNiMnQ+WfqcDH1dij7OJTdw5KT3n19EGz8GfgesPknnH00H\nnU+W3qeb7idL/5M1Bi6gj8t+DlyL42E6jYWpGgdXdZ4RSZK+ztn8b0KIWuBdYlbfZJJGzPoMnnxQ\nCPHHSe7ngBDiaeBjYpZmIjHruXaS+7kZ8J3h+GSWg1UIITSSJAUlSaoTQtwB/CdQcY7vBKTYEp9P\nCPHM19dbCHHRW2GTMC4mQ+eTpc/J0Nel6OObKE96/3+d9P6Cl1YlSfp/hRBaYLMQ4ntAArGl3umg\nc5i8e3066R4mR/8wCWPgApiU58A1OB6m01iYmnFwOZZMPF/AnwD53ziWDfx/k9zPRs5skc6bgnOa\nS8yS/wtiS5U18b7Ol3geiwDLN46pgPvP8Z1HANU3jmmA//tKj4vJ0vl00eel6OMMbVScRT+3XaJM\nauBZYkvocdf5ZOp9Oul+svQ/FWPgLH1c9nNAHg9TOxamahxctT4j30QI8aIkSQ9cgX5ekiTp/qnu\n53pnsq7zZIwLWedXhumk88mUR+bSmQwdyOPh6uBaCu3NvEL9WK9QP9c7k3WdJ2NcyDq/MkwnnYOs\n9+nAZOhAHg9XAdeSMSIjIyMjIyNzFSIbIzIyMjIyMjJxRTZGZGRkZGRkZOLKteTAapEkyXat9HO9\nM1nXeTLakXV+ZZhOOp/MdmQunel0/8rjYWq5ZowRGRkZGRkZmasTeZtGRkZGRkZGJq7IxoiMjIyM\njIxMXJGNERkZGRkZGZm4Ihsj0wghxAYhRJMQ4rgQ4ufxlkdm6hFCPCeEsAkhjsRbFpkrgxAiVwjx\nmRCiQQhRL4T403jLJDP1CCF0Qoi9QohDQoijQoi/i7dM0wnZgXWaIIRQAseAtUAv8BWxegGNcRVM\nZkoRQtwAjAH/IUlSVbzlkZl6hBBWwCpJ0iEhRAJwALhdvtevfYQQBkmSvEIIFbAT+KkkSTvjLdd0\nQF4ZmT4sBFokSeqQJCkEvAx8K84yyUwxkiTtAJzxlkPmyiFJ0oAkSYfG348BjUBWfKWSuRJIkuQd\nf6shVv3WEUdxphWyMTJ9yAa6T/p/z/gxGRmZaxQhRAFQA+yNryQyVwIhhEIIcQiwAZ9JknQ03jJN\nF2RjZPog75fJyFxHjG/RvA78aHyFROYaR5KkqCRJc4AcYIUQYlWcRZo2yMbI9KEXyD3p/7nEVkdk\nZGSuMYQQauAN4D8lSXo73vLIXFkkSRoFPgDmx1uW6YJsjEwf9gOlQogCIYQGuBd4N84yycjITDJC\nCAH8FjgqSdI/xVsemSuDECJNCJE8/l4PrAMOxleq6YNsjEwTJEkKAz8AtgJHgVdk7/prHyHES8Bu\nYIYQolsI8Z14yyQz5SwDHgJWCyEOjr82xFsomSknE/h03GdkL/CeJEmfxFmmaYMc2isjIyMjIyMT\nV+SVERkZGRkZGZm4IhsjMjIyMjIyMnFFNkZkZGRkZGRk4opsjMjIyMjIyMjEFdkYkZGRkZGRkYkr\nsjEiIyMjIyMjE1dkY0RGRkZGRkYmrsjGiIyMjIyMjExc+f8Bo2OouhAS2pgAAAAASUVORK5CYII=\n", + "png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAImCAYAAACB54oCAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWmMXOd57/mrfd+33vdmd7O5kyIpkRJJiZKdyLKvk2sp\nUZwBJmPMnQBBYCeTDJB44JkAjpMbw04ugmCQwcxFPiS5jpNJcm3HjmRbJEVRIsWdTfbC3pfa96pT\np6rONh+q2WKLpERKbC5S/YBGV51T55y36lSd83/f93n+DzRp0qRJkyZNmjRp0qRJkyZNmjRp0qRJ\nkyZNmjRp0qRJkyZNmjRp0qRJkyZNmjRp0qRJkyZNmjRp0qRJkyZNmjRp0uQTwHeBE8Cfv2+5Ffh/\ngJ8Bf/GgG9WkSZMmTZo0+XSwC/jr1cd/Bey5ad3vA88+8BY1adKkSZMmTR459Bu4733Aa6uPfwo8\nedO6Q8DngTeAlzawDU2aNGnSpEmTR5yNFCNeoLT6uLD6/Ab9wA+BF4H/HTBsYDuaNGnSpEmTJo8w\nGylGCoB79bEHyL9v3XGgAkwDkfdvPDIyogHNv0/R36FDhx56G5p/zfPe/Gue9+bfhv0d4w4Y77Ti\nPvA28J+A7wPPAf/1pnWngO3ABaAHSL5/4/HxcTRN28DmNXnU0Ol0zXP+kMgvLrJw/DiyKAKgN5lo\n37uX8Ojohh/7k3beU9eusXz6NKokAWC02eh+5hm83d0PuWWPFp+08/4gqBYKzL3xBpXk6i1Tp8M/\nMEDX009jMG7k7fz+oNPpDt1p3UaOjFwAqjSyaWTgLPBfVtf9KfBN4CTwf6+ub9KkyUNA0zTS4+Nr\nQgRAlSRS4+NINy1r8uFIokjy2rU1IQIgiyLpZueqyX2guLz8nhAB0DTy8/Prlz2mbLSU+ur7nv/2\n6v848JkNPnaTJk3uAlVRqJdKtyxXqlXkahWTzfYQWvV4IlerKPU6ZpcLnV6PVKmgShK1UglVljGY\nTA+7iU0eY+rl8i3LVElCqdcfQmvuL4/+uE6TJo8BqZTAzEyOcrlOW5uL3l4vNtvjceMxGI242tsR\ns9l1y60+HxaX6yG16vHE4nJB6yCTV6OIokRXZwd+TwV3S6gpRD6FFApVZmdzpNMVgkE7fX0+PB7r\nR96fIxRCp9ejqeraMpPDgfkT8DttipEmTT4mqZTAT386S6nU6J3MzeVIJgWefroLg2EjZ0LvH8GR\nEaq5HOV4HE1Vsfn9tO7ejf4xmId+lEikRN6dqLE8lkKqCFwfj7Pv2S0MbtnysJvW5AEjCHWOH18g\nHm+MZszN5VleLvLss704HOaPtE9XRwfBzZvJTU8j12qYHQ5aduzAHgjcz6Y/FJpXmiZNPibz8/k1\nIaJpGtVimWtnc/S0GOjoDWO0WD5w+2o+T3FlBblaxREO42prQ294sNnuNq+X3qNHqaTTa2LEbLc/\n0DZ8EpiezqIYbIS2jCIJAuh0ZGQPFdnE7frDqiwjJJPUBQGzw4EjEnng5/5eqFcqlFZWqObz2AMB\nXO3tH/r9/rQSi5VJJNZPq8TjZRIJgb6+uxcjkihSWFqiuLyM2enE292Nf3AQpVrF7HRi8/nud9Mf\nCk0x0qTJx6RSeS9YsRSNUlhaQqeqJAYMKEsTdB44cMcbeyWTYf6NN9amSPQmE607d9KyY8cDafvN\nGM1m3G1tD/y4nyQEYTWDxmzGaG7ccFR01OvKLa9VZJmVM2fITE6iShIGs5nA0BDtTzzxSI5ISaLI\n4ptvUlhcBE1Dp9fjHxig88CB5hTUbahWZd4fs6xpjeV3i1yrsfDmm8z9/OeUYzH0BgORbdvoOXKE\n8JYtj7RwvVcejzHkJk0+AE3TiMVKXL6cYGIiTaFQfaDHb2tzodfrqAsCxeVl1LqEL+TGplXIz81R\nWFi447bZ6el1sRo3sliq+fwdt2ny6NLZ6UanW7/M47Hg8906LlKOxchMTKxl3ij1OumJCcrx+Npr\n8nmR8fEUly8n1ob7HxallZU1IQKgqSq52dl17W3yHoGADYtlvViwWo34/XcfEF6Ox4lfvIiYTqM5\nfBQ1J7PXFsnHM1TS6fvd5IfKoye/m9zCxMQEb7zxBg6Hgy984Qt4PJ6H3aRHiomJNGfOrFCrNXqf\nfr+Nw4d7CAYfzDRDd7eH0dEQV8+V0etUgh1+9uxpQytFAaikUjAycttt3x80Co2MDEkUsXq9t9mi\nyaNMX5+PTKbCwkIBWVbxeKzs3duOxXLrpbZWKKDK63vJqiRRKxYBSCYFjh2bI5+vAWCzGdm3r51N\nm4Ib/0ZuQ61U4v1dfVWWkSqVh9KeR52WFie7drUyNpZEFGVsNiNbtoSJRBx3vQ9JEKiXSlQNLqYn\nMohCo6Nl37SCobWXwVvsQh9fmmLkEaZcLvO1r32NH/zgB3zuc58jm83ye7/3e/zd3/0dzz333MNu\n3iOBINS5fDmxJkQAslmRyck0wWDXA2mD2Wxk//4OulvNRLtVzHIZtbiCXGvcRKwfMKfrbG2luLS0\nbpnJbr8liyWTqSCKMk6nGa/XSiolMDubo1Sq097uoqfn8cneeRQQRYlsVsRg0BMM2jEa788gsc1m\n4uDBbkZGKkiSis9nveN5MTmdKKpGJiWQzYoYTQYirS5MjsbNanw8tSZEGm2WuXw5QWen56Gca5vf\nj85gQFPe+60ZzGYsbvcHbPXpRafTsXVrhM5OD5WKhMNhuqtMmuXlInNzOSRJpT9kwuz2Eb0YRxSq\naIDeaEJUjVy5lqFvtPuxCZL/MJpi5BElm83yi7/4iwwODjI1NYV79Qd/7NgxvvSlL/HDH/6Qffv2\nPeRWPnwEQbrtHGwq9WB7azqdjtbuMLpCnOJCEc3jQdM0zC7XBzpv+vv7KcdilKJRNEXB7HTSumsX\nZqcTAEVRuXgxzvh4mlpNxm43MTgYIB4vEY3eiNLPkUg8Xtk7D5N4vMTbby+Ty1XR63V0dLjZv78d\np/P+BGLq9TpCoQ/v/bpaW7F09mOUVvA5FAr5KiWDn6rB1TCiS9/6HRZFmUpFeihixNXWRnB4mMzU\nVCPGxWIhNDqKIxx+4G15nPB6rXi9d5fOOzeX48SJBWRZJeQzkdAUvJuGCMymyefK1GQdbTt3sJAx\nokoCu5YK9PQ0A1ibbBDVapUXX3yR/fv3893vfhfdTZPQhw8f5q//+q959dVXuXLlCvZPecaDy2XG\n4TCtGxkBaG11PvC2aIqCUq2Sm5+nkkph8/kawavOO7fF4nLR++yzCIkEiiRh8/mw+f1r65NJgfhy\nFodNj6LoKZXqvP76DMPD7w3VaxosLOQZGQkSiTz49/04IUkK774bXSdWZ2dz+HxWdu9+sMG7ialZ\nZs9eoxJbAU2lbcc2TIPbmZgu0NLhp6XFSSaz3gHX4TDjdDYCY+VaDZ1e/8CCRw0mEx379uHr7UUS\nxUb2TziMTt8UwPcDTdMYH09Tryt0hw3kLr/DlFile7iNLZ85xODzR5ldqrCSN7AYk+jyGZiayhAK\nOT5yqvCjRFOMPGJomsZXvvIVuru7bxEiN/jiF7/I97//fb75zW/yzW9+8yG08tHBZjOxc2cr77yz\njCBI6HQQDjvYtOnB592XYjFS165hslrxdHYCkJ6YwNPdjaulZd1rVVVjfj7P7GwOaMQadPd41o1s\n1IpFlk69Teb0OEablbaREbIWP1NTGSRJQa/XoaqNOXxJUpGkWzM2mqynUKiRz98a4Ly4WGDXrtbb\n/t42gmqxyNKZd1mZT6EoJgwGHZl3phj2tKP6G9+d4eEgqZRAKlVB08DpNLN9e4TEcobFsxfJLywQ\nCLnoe2IroeHhB5KBozcacTUzrjYERdGoVCTcbgu1lUmcdiOVXIbZ164S6Qpj3bSduNzC2GySlhYn\nw8MBYrEy+Xz1nsRINisyPZ0lk6kQiTjp7/94Rmz3i6YYecT4y7/8SyYmJnjzzTc/8ML4p3/6p+zY\nsYOvfvWrhEKhB9jCR4/+fj9er5VMRsRk0hMOP5yegpBMUo7FUCSp0WO1WDA7HNTy+VvEyNRUhlOn\nllAUFb/PwtyVLGrBTddQB0arDTSV5dOniV++QmIxg8moJ7eSYOizzxOJODCZDGu1TqrFIk4LGMQ8\n9bLhA0di7oVsVqRUqmE2GwiFHPctruJhYrEYMJkMiOL6qT2n04ymqgjpNKosY/P779kGX0gmqWQy\n6I1GnC0tH+heWy8W0ctVfF4r4uo0o6pqlGIxtj2xDUkUsShlDh9oI1tSkWWVYNBOJlPh0msnmHn7\nIgBzBh2lZIpt6HH2DKyNmtwtqqqh16+/zlQLhbXvsSMcxhn5BEVJ3kc0TSObFRHFRjaU12u758//\nZoxGPZ2dboSigM9sR1xJomllpGqO0myeYiLDs7/+P9PZ5cVo1JPP1zAYdJjNd5/eWyxWOXZsfm0K\ncGmpSCxW4tlnex96zFlTjDxCXL16lT/6oz/i7bffxvYhF8LOzk5eeeUVvvOd7/Ctb33rAbXw0SUQ\nsBMIPLwpK7lWo5rLUVxepl4uU4pGMVqteHt66D50CEWWG+l5gNnjY2oqjcmkp7/LjjB1iYXJWWJG\njatBP6GdT+AJulg8O0mhUKNalciJMj6fSmF+lr17D+B2WyiX6+SWY+iELEPbwyTfOUHB7ab7mWc+\nthHSxESac+eiCIKEyaSnt9fH/v0dWK2P9yXD5bIwNBTg/PkYitIQczabkS2bXMwfO9ZIzZZlrF4v\nHfv3425vv6v9ZqeniZ47h95kQgfk5+dp27173ZTbzRgsFopllWRKYGmpiKZptLW52T3Uht9QZvrH\nJ6iVShhX4zLCo6OIVZmVmSip6QW8kQAmqxmhKDBxLYlifRfdkAmbzcSOHS0f2tNdWMgzMZGmXK7T\n1eVhaCiI221BSKVYOH58LcvLaLXSvm8fwaGhu/+QPwUsLOR54415ZmezuFwWXC4zNpuJzZtDa5/l\nR2Fk0MPUzy+Tu3KGzPXrmOx2WreMsHjyJHazjUoqQb3eQi5XRaeDwcHQPWUNxmLlW2KRbpizPezY\nk8f7yvIJolar8eUvf5k/+ZM/YWBg4K62+d3f/V3279/PN77xDazWhz/M9mmmHIshVSr4+vsZ/6d/\nQlMU6oJA6969VAsFrv/bv1FdvcCbfQE6gwM4xSzK5CLR4ycIdLYRT9epFgVqdQ1h226uXEngsEIk\n4qRalbFajXR1uhl9oh2bzURLyErsShFjVUYrRZGrVWRRJDs9TfsTT3zk95LLiVy4EFsz8JIklevX\nM7S1uR7K9JemaSiKitF4fwyetm6N4PFYWVwsYLEY6OvzocWmyc3MrL1GzGRYefdd7MHghzqMSqJI\nYmwMg9lM7Pz5hjmVyYSQSDD8S790W8M72exC8bZhNEZpa3NRryt4W4KEejuJnn4bSRRRjHakmkp8\nfAqb34/qCKAzGOgeaid7fRoxtoQnGMHpa0eSFCRBYnGxSL2u8OyzvXcMZo5Gixw/vrAW+J3JiOTz\nVY4c6SU9MbEu3VyuVklcuYK7s7PpyLtKLFbihz+c4t13o2QyFVKpCrt3t9La6sLlMlMu1zl8uOcj\nTflVY4uULr9DdmqKxOXLmB0OzHYbvc89R2lpiVDEic0VJJ+v0d7uprvbs+44xWKVcrmOwaAnELDd\n8pu5XbC/qmrU6+otyx80Gy1GvgvsBs6zvoLv/wH8ByAH/PfV132q+fa3v01HRwe/8Ru/cdfb9Pf3\ns2vXLr7//e/z67/+6xvYuk8XN6Y/7nQxqddlFhYKLC0VsdmM9PX5oFRCEkVcHR1s+tznqGQyOEIh\nfAMDxM+fR6pU1kYrtHqV0oWTZLIy9eUZ5FSM5YUZHJ09VBJVbAaJ6sBWRIMLtZQhkSgzMtIotCZa\ngxw/Ps/gYJAWt0I8Po10U7l6aEwXfBzK5Trl8voqoJoGiUR5w8TIwkKehYWG0Vt3t5eursZFdmmp\nwPh4mmKxRlubi5GRID7fx6sibDTq6evzNc7bKpNnlm55Xa1QoFYoYPyQbJG6IKA3GklcukQllUJn\nMCAJAotvvUV461Zatm+/ZZtisYbo7WXkRT/lWAyLy4nB30ohJyBV6wh1A3MnTlFOZXGGfJjcXoZf\neBa/18T4+Bjz71wAIJe/xJYjT9B24ItcjTfOWTxeJpcTCQZvn9EzN5e/5aa0slIina40PHHeh1yp\nIFUqTTGyyvx8nkxGRJZVSqU6mgYTExn6+nwUizUURSOXE/H77/7zUmWZ3Ows8UuXWD59muzUFHK9\nTmllhVqpxPN/9mcUlqMEOloY7Lt9zM7SUoGzZ6NcvpxAUTR27Gjh8OGedSZroZADs9mwzhHYbjfd\n1pTvQbORYmQX4ACeAf4K2AOcXV2nAb8L/GwDj//YMD8/z3e/+13Onj17z2r6N3/zN/n2t7/dFCP3\nAblWIzM1RW5mBp3RSHBoCF9f3y2WyxcuxLlyJbkWPDo3l+OZ7bZGXZp8nvz8PEabjdz8PM7WVkrR\n6Lrher3RSOn6VZxtg8gBD+kzGSqlCkanE01vI70Uw6PUMfaM0uvMYhyfp26w4uwaYKlkR6iUiMcF\nnjnQjq2jB7FYwaRVqRfyaIqCYzWGqC4IjV6uToc9ELjrGAiz2YDFYrzlhvVRh57vhp//fA5JavTO\nZmZyPPVUJ16vlePHF9bs9rNZkVxO5OjRvtuaiH0cbhdnYzCbMdxF3RWzw4HF7aa4uEhhaQmj1Yoj\nEsHm9VJYXLytGLFYDBQFlYxkxxYcpqSoiAmZrlYbdYudq//6I4qxhqgsp7NMvH6c8OhmXEoBg8VK\n+3A39Wyq0VM2q2i69zLKzGYDmsZqYKMJk2n99/fG53wzqtoYfXK2tNzi7Gl2OjE77t6o65OOJCmY\nTPp1sTaSpGAw6HA4zMiyes/X8VI0SnpyEnQ60hMToGkYbXZURUWuSYj5Aj1HP0PVePs4JFGUuHAh\nxg9/OIVOp6OtzcXERAqv18Kzz/atva611cnu3Q0jtmq1YRWwY0fLXaWibzQbKUb2Aa+tPv4p8CTv\niRGAP6UxMvK/Apc2sB2PPF/96lf52te+Rk9Pzz1v++KLL/KVr3yF+fn5j7R9k/eIX7pE4tKlNZfJ\nSjKJpmkEN21ae82NSPQbQgSgXJZIVrwEOjqwuFzkZ2eplUpYvV5MdjuOcBjTzdNomoZBr8Pv0mNq\n6yV/OYRxJY5Jp6K3WbD1DZGLpynWPRT9I2z+wlauTeXJ13WEK40bjt1u4tpkjnzcTGJiAX/AzrbN\n3biMVfyDg5TjcZZOnaKSyaDT6XCEw3QeOHBX1T1DIQf9/T7Gx9Nr7zMUstPdvXHOvzffICVJZW6u\nkW57c90faPT6U6kKHR3312grMDREaWVlzU1Up9cT2LQJ6x3cjrPZ90zN9AYD9VIJIZVCzGbRG43I\n1Srujo47BhMHgw4GBnxcu5ZeG4UKBu209LUzMzuxJkQAbB4nZVGlGE9iMUJri4OsaEEy+VEUlWw0\nRY9SxWSyYzYbCAbtnDy52DDashvZvj1Cb997572ry8PMTHYtZgbA57MSCNhRbcOUk8nGCImmYXI4\niGzffs/BvJ9kOjs9+P1p/H4bpVKNel2gp8eL12vDYNATiTjuylekVpPJ56vYbCbSU1Nc+fu/Z+ur\nr+IIh0ldvYrF48Xq89GycxeaBoo7wvj1EpH2wC1TcIIgMTaWxGw2MDDg59q1FMlkhWRSIBJxMjra\nGN27YcTW1dUwYnM6zbhcG9fJUGQZTVHuqpjiRooRLzC7+rgAjN607r8A/ycwAPy/NEZPPpWcOHGC\ny5cv873vfe8jbW8ymfjlX/5l/uEf/oHf//3fv8+t+/RQK5XIz86us7tWZZnM5CSBgYE1L4V6Xblt\nzzKeqrH1+X3MTcZxHwlSmJ/DYQPvwCCOngEu/9sxdJqC12vFYTQS3rKlEVdSyNN94CkKS0u42jvQ\nu/wkczKaScdnPjPAykqJ2ZUql8Yy+Owa1WQcf8CB19XBsWPLbNoUxNLeS14QmMzY+fwX92Fx25l9\n/fW1Hq6maZTjcZJjY/QcOvShn4Ver+OJJ9pobXWRSJRxucx0dnoeaPpfva5Sq92+B6+9v/rYfcDd\n1kbv0aPk5+ZQajXcHR14biPuazWZ8+djzM7mkGUVn8/Gnk1mZFGk58gRpv/935HKZVRFQanXCQwO\nAo1pmeXlIoJQJxx2rAVcd3a6yWZFuro8bNkSxuOx4uvtJTLUR61QwOJyYPIFKYgG9HodznCEWqFA\nPdc4tzpNw+v3U81l6WgL4fbaGRtLItVlyvE4QjLJ/LsOPv/L2wi1+dGbTHR2etmzp42JifSaoNqz\np60RnGz103/0KEIy2cgqCgQ+EeXp7yednW5GRoIUizUcDhNPPtnB3r3tqKqG02lmdDR825GRVEog\nGi2h04HRaGBysjH92NXlxnDpKkIiQfb6dUa/9CXig4PUhQqRbVup12ScXX1cy9qQlMb00PvFiMmk\nQ9MaU5xvv73M0lKjpIBer+PkyUWCQfs6DyKPx7qhv2dVlslMTZGenESVZbzd3YRGRz9wm40UIwXg\nRvfFA9xc+Su3+n/6g3bw1a9+Fe9qfY7h4WH279+/1vufn58HeKyfa5rG17/+db7xjW8Qi8U+8v5+\n5Vd+hT/+4z/m5ZdffqTe30d5/rDQFAVVuU1lVUlC0zRuXFq8Xisej+UWh9feXg8zMwVOnUmjKEbc\n/i0IZgNyyU6hUMWxZT/FxQXKFiMDmzbT/oSD6NmzJK9cIXHlCr7ubmShTPStt4ns3MHAoaMIkkQc\nFaNOYfeQjQuvvwNeGy3uThYmNKxGI9msSDpdxWAwouWhKGqYjI3pGZ1ej85gIJsqkkwK2NIKcssQ\n7V2BD03jM5uNt8RVPEjcbvPqZ5pdJ/58Pus9FRq7F1wtLbekYMP63t3iYoF4vIzFYqBWU4jHyyzZ\n9KixOFKpROuOHeTn59dGFBRJolCocvz4PJmMiCSp+P1WNK0RKGww6LHbTVQqEhcuxKlU6nS3+ujZ\nu5Pk/ArZgkKhqDC0vZNQbycWp4Pwli3Uy2UkQcAWCNC6cyeKWmV0Z4DllIKiaJQTCfJzc2iqRjyb\nZeJtyNtFbH4/FpeL4b17GRjwU63KeDxWjEY91UKBermMyW7H19d3m0/o8eCGYL1fbsSaprGwUGB2\ntvFd9HgaI3b9/T6MRgP1uoSiaASDNsJh521tBZaXixw/Po8gSIRCds6fj2GxGAmHHRTyVQIGA9Vi\niav/7b8x8h//I672dmqCgKbX0/3cUaaFAMl0mS1bwredovR4bOzb186VK8k1IWIy6Wlvd6FpjRHF\nB2mImJ2ZYentt9dKB8RzOeTqBxcw3Ugx8jbwn4DvA88B//WmdS6gBAQ/qA1//ud/fsedv39K4nF8\n/tprr5FKpfjyl7+M4X1xCfeyv2eeeYZXX311XSDjo/D+Ps7zj4okNW4Q+XwVl8tMa6vrruILLB4P\nzpaWdRkV6HT43xczYrUa2bOnjfHxNIJQRxAkOjrcdHa6+elP55Dlxo2zUKhRqdS5eDHO9u0tzOUs\nOFu3UAOWSzaeGG6l97nnEDMZrD4f5ViM6PnzBAZ66dy1g8LlMyyPLyCIJvr27sYcNGO37OPs6SWi\neQO9I17Ks0mWlmREUcFg0CMIEvl8lUjQgzncTiEnkc+LVC0uarpl9JqZS2Np4qkaBw50PTCDr7uh\no8NNItGwtw+HHWzb1oLPZ2Xfvg6uXUtSrSr4fFZ27Wq9bx4y1apMLFaiXK7j89loaXGu81JZ17tT\nFDybt3PuUoUrY2nsdhObNgUaQYyyAZsGC8ePo9br2INBVFluVOV1OlE7zKhqY2rN4TBht5v44Q+v\n097uIhCw43ZbeOedZQqFGkcOtjB35hpGnQ2r3YpbEnBuGcE1sgXVZMdgMq1NuaFp6PR6asUiJocD\nu8uOPtMYlRESCbTVKTZVqmNyuBDECm67ndzsLCabjc6nnsLhMKNpGsmrV0lcvoxUqWC0WgmPjhLZ\ntu2xcleVZZXp6SxTUxkURaW/38/QUOBjxxfNzuZ4881F6nUFVdUYH0/x9NNd2Gwm5ucbwcCplEip\n5OLixQRHjvQCGsViHbfbTDjsZGwsuZadBg1xYrMZ8fmsKPUaztY2/IObyF2f5No//RN9R48yePAg\n5mCEpaUCdau2VpDzTjz1VCc6HZw6tYSqarS2OhkY8KPTNaY+BaH+wPyXMlNT62oYARSWbg0Sv5mN\nFCMXgCpwYvXxWRrTM78N/BmwBdAD/9sGtuGR5lvf+hZf//rXbxEi94rBYOCll17iBz/4Ab/zO79z\nn1r3+KEoKmfPRhkfTyPLKnq9jt5eL0891fmhIwE6nY7W3btBa/QqdXo93p4eAjf5KyiKysREmvHx\nFOVyo/DVU0910NPjo1aTb0mPq9dVKpXGyIqqahSLjaJn2WzD4rteLhM9f57omTM4W1pwRiK0bNvG\ntX/8R2yBAJrBRm6mhFWp4Nv1FKd+dAZFbyFXL9EVNpLPVGjrCJJLFqkUBDYP9SFXq4h1D1diFq4c\nv0p0MYus6njm+RE6tnUzGZOoVPIMD9+bP8FG8/zzfWQyDafRQMCG2dy4NG3eHKK310u1KuNyme9b\neq8oSpw8ucjCQgFV1TAa9YyOhtizp22tR31z787iD3Dy51OcOFekKDXmvxcXC7zwwgA11UDftu3k\nJ66Rn58HnY7wli0UFhfxbN/HyZ9PcfnMHBYTuIMentjXzZM73FjtNiz1LJpYpRBLEewMoSXmuPiT\nt0gkKzx1eAiDz4WrpYXFrJ5AtMTQUBB/fz/FpaW1+Ba9yUR4dBSTzUZLC3i8VuKrM1l6mwOrx8/i\nUoGlqRTbTJ0MhDsorqwgiSImmw0hlSJ2/jyy2PheSoJA7MIF7OEw7sfIbXV6Ostbby2uxcKk0xVq\nNZknnrg7r5jboaoaExPptewTVVWx200sLxeZn89z7lwMWdbYtatlTZy+8UajU6IoGgaDjt5eH+/X\n/SaTgXq94ZpsqFXRTCqOoJ9KshGjlJubwxWJ0N7ejrWa5cCzT+Hz2z/w+2+3m9m3r5NkskIiIWCz\nGRFFiXSn7nLUAAAgAElEQVS6QiolMDOTZWDAz9at4bXf10agaRqaeu+pwhud2vvV9z3/7dX//8sG\nH/eR5+LFi1y/fp2XX375vuzvpZde4jvf+c6nWowkkwITE+m10QlV1ZidzdHT46W///bmUzdj83ob\noxW5HHqD4ZbgxaWlAqdPr6ztv15XuHIlSXu7G5vNRHu7a53VuNVqpL/fj16vQ6d7LxylpWW1CJ4k\nEd66ldjZs5RjMcxuN4nLl1ElCb07gN5gY+CJTqrlMgvLAunFODodmPwmpk/E2PHSZ6iJdWoLSbr6\nQkT0KYSpKhPydlay4B8cQmCFak1hoWAjrLqQ5dJqT2njrOMVRaVeV7BajXc9+mIyGWhpuX2mgM1m\nuu/ukDduJjfOiSw3hGZ3t2etHdlYBtXXjoU6stnB3PgMXoebck5B1RkQBInFxTyHD3fjsqmERkdx\nd3Y2RkUSCWyBAEvxGlePnUVI5ojFExitVnS1/ewcsmGXNMbPTOIaGCF+7RpP7T2EOL+Ex21BECTS\ny0l0eh1ipYZv37NrTp/ujg76X3iBwtISqizjbm/HtWrO5vPZOHy4F5sqsDSxgMUfQMkluXLsHLZw\nK5fOzCFtibBn1Id+taZNNZdbEyI30FSVarGEySfd03l8WCiKyuRkmmpVplaTMRoNWK1GZmZyjIyE\nPrIzqqqq69x6jUYDvb1ejh1bwG43oqqNjkY8XiaZFHA4zJw4EWuk4ht0KIrG3FyOgQE/fr+1EUzu\nMLN5c4iZmSxGnUo4YGXhBydQy3k6n38RGRNqPkFqchLfpk1E+toJhe/s5nszTqeZo0f7GBtLkkwK\nQCOOJJWqUK024p1umLNtFDqdDv/AAJVUap0ocbW2fuB2TdOzh8Rf/MVf8Fu/9VuY7lORq+eee45f\n+7VfI5fL4fuY7puPK+Vy/Zbg0hspjneLTqfDfgfXzIWFwpoQuUEmI5LJiLS1udi6NYIoykSjJQC2\njvgI2WssXo/h8tsRdE6sDiv9/avnR1UJDg4y+vLLZKancbW1YfV6kdq28tbbswjlIoHOVp48uhtE\nGy29bWjlHGoxjRLsIZ8uMuQv4/XnsDkMCCtlJmI25LSbd99N09XlITjQy+Jigflole2rvTuvd+Pi\nLhYW8oyNJSmX64RCdrZujTwSaYPvJ5er8v442FpNWRvJGh9P88aJGPlYGm/AyY6nWrHYzNTLZYaG\n+8jmJep1hUjEwchICL3iwdPZyfwbb1CMRtEbjQy+9AV+9m6cQl5ElhXMdhtisURybhHnnu0kz76N\nJ9yKzaxy6MXdpNIi+ZkcicU8Ho8Vf8BGqVTH4TTjclvX+YY4wuHbVsutFgrY6nmOHOmmuq+NN14f\n58p0DFukHZvPS61UYnkxz4EXtmFYrWVjtFjQ6fVrNw6j1UrV1cbJc3mkS5P4fDa2bQvfUSw+Ctyw\nZr96tTGlZzDoaG9343CYUJSPbuhlNBro7vasjWZCw/bdZjNiNhtxuy1YLEYiESfxeJn2dhcOhxmD\n4T3xpigaOp2OdFpkZiZLMGhn//4O9u/vIJ8rY1XjtIwOsZJW+enrk8Rmlgl1t/KZV49QXFwkNDJy\nT20OhRwcOdJLKiXwzjvLJJPC2miRpsHMTPaexYhcr1NJpVAVBZvP94GlDgD8g4Mo9TrZ69dRFQV3\nezstO3Z84DZNMfIQyGaz/PM//zMzN8cnfEzsdjvPPPMMP/nJT/jVX/3V+7bfxwmn03yLoY9er/vY\nJlk3uF1tFr1ex41pdbfbwpEjPWSzIjpVoXjtHOWJKG4ZKjmJSFc7gwefxupoDPM7wmEK0Si2PUfR\ndRykbtbhDli4/Ff/SmIpTSWVInp5HFmBz/xPLzF8cAfF5RWMlq0YLFYCXiPlyUuIK1GMlq2MT+So\nKXr279iPzyYj5nKU8mZAT1ubG0VRUVWVlhYnb7+9RDYrYreb6O310dvrxWQyUMlkUCWpkZZ8j+mc\niUSZEycW1nqShUKNQqHGCy/0P3JVRX0+K6JYJ5msUCrVcTrNdHa6cTjMxGIlzpxZoapaEIUqlZKI\nhIneLT2MX14hEHYTiugwGHQ88UQ7VqsJMNH/wgu4u7spzM9j8gXJiGaU8hK5dJl6qYTDacEf9hIM\n2PAFXaibt+Boa8NlUTlxReb82WV2DfYRX0wiywpGvUbIrad/ay8OTxV9OUlVtHJtPM3ERAa73cjw\ncIihoQA6nY7c3Bwrp09TK5XQGwwERjZj9EZo2+ekLlQQ4nHMbi/29k4Umw9RlLDZTDgiEVzt7RRX\n5/RVd4TT5zIYQjZM1trqeazywgv9j0RBtdtRqUiYzQZKpTqqCuVyjYWFAt3dHpLJ8pp4gIZp4dxc\nnnS60kjdD9oZGPDfcQpkZCSEomgkEmWKxRqRiIMnn+ykUKjS1uYklxPJZquN1OwWJx0d7wkXRdEo\nlWokk2XK5TrBoB1RlDl7NsrBg51s39FG9koCoW8f16bGsA/tYNvWrSxenuTidJU9vUa87xu1uluM\nRv2aCdvN3Gtgb61YZPHUKcrRKKqiYHG76XzySTxdXXc+ttlM686dBIeHUWX5Q8ULNMXIQ+F73/se\nn/3sZwnc55S5G3Ejn1YxEg47GB4OMD7eSFm8MV/b1nZ/enTd3V6mp7Nr5lLQ8N8QRZlTp5Zwuxsp\nsKGQg8LiIkKpSkwOEIsW8XjduFQ9tVwaq6MDAL3BwHItwL/8f1dZnEliMJvo6Q0ycuhpUtFsQxT4\n/chGO/Vigf1PDzA+7iWfzNHa6mTXsJ0LP5vG7vGRTZUoZfJs2txGxCFhrWY4dXyKtq4g3bu30N7u\nwuezsrRYYOxyHB0q755P0t3tQSjXMShVTOlZ8gsLjdosHg/t+/bddW0WgGi0dEsBunS6QjpdeeTE\nSDBox2w2Eo2WkCQVUZTYvDmIw2FiaipDva5gD4dQFRkhkSCXzHHghW1YAiGiiSrlskRnp5taTaFQ\nqOLxWNEZDBQWFqgViyREOxPji7SFLQwMR5i8JFIpV+nsaWf/C7u4Op7h6oU4Dm+FruFO7CaF1oAR\nW9cAL/yPQZSV61TTCbq2DbL05gmSk1O079mFPHCAN84LFMsKFoPKpYtRvvTyVgZ6nETPnaNWbGRS\nqLLM/NnL+Hv3MDYp4XQ6sHf1kUoLWH1+roylsM/m6Oz0Eg7b6Xr6aXKzs4iZDEk1iD5gX+eNk8tV\nSaWER1aMZLPV1RiuTs6fjyEINUZHQ/j9Nt59N0qpJLFrV2Oa4OLFOPF4mbGxFPPzeUwmPQcPdvHZ\nzw7c0nFRFJVkUqBcrmM06tm2LUJrqwubzcj4eJpaTSEQsOP1Wjl4sBOr1UgiIXDlSgK73UShUKOn\nx8ulSwmi0RKRiJNCocr0dJa+Pi8eLUe8bOJf/+UyE2MxqvkcLe1+jn7uMMvj81S7g3g/YnC/12ul\nq8vD+Ph7JnZGo/6eXZQzU1MUFxfXnldSKRbefJOhz3/+Q0XGvXRommLkIfA3f/M3fOMb37jv+33x\nxRf5wz/8QxRF+dhBsY8jBoOePXva6ez0UCrVsdmMtLQ475tbZ0eHm2ee6ebatRSVikR7uwuTycDP\nfz631vsIh7McOdJLVaxxcUJg6moUh8tKdCnPypIdf18Png4orqwQnZrnx38/TjZapKWtnbJqZSUu\nYDLpaBvqIeVwYrBY0WQZfVUg5IH6cBtL1KnmcsxeL9L/i79IMZ4kO1egrSvI8IEdJFcybNvRxtCW\nViqFMh29NuYKdc6eWUZMxHh7IsfI5hC9AQM9gTq6+fMspzTK4+fxd3dicbmopNOsnDmD7bOfpVYq\nIZXLGCyWD6xme7MR3M1sgC3IxyaXqxKJOPjc5zYhitKac2YiIaxVQTUYjXg6O1er1mqEu1sYCdga\noyZVBUGoc+rUEi0tTp57rhcllyQ/NweqSsHqJr6SJRhoYe8OH1t3tCOWKozu6kZCz/i1JGavn9jE\nBKlYjv4dmwi2+qkqRozt3Wi1Mv5whOs//THzp86g0+lpe+ZZ/u3v38LbP4C5lKeUziHMG7jYZqQz\nuJl6qYTBYsHg8pEp60jlsoRKaZ7c183khRnysTSj/X4GW2VSNYl//dcZnE4zW7aE2bYtwuatW9Hp\ndFSuJjHP1G/5zB7F83gDg0FHJiPi8VgZGQnR0+OlWKyRy4no9XquX88wNNQIME2lKszN5deybioV\nOH16hdZWF4cP96zb78REei1OLJsVee21GQ4c6MTpNLFpkx+Hw4LTaaalxUGhUOP11+cwmw3s2tW6\n6pUj4/fbOHlyEU2Dyck04bADnQ68dtCqIhd+fhGDpCEkkzhCQTJFmVhOw+OzEd66lbo9uCZ47wWd\nTseuXa3Y7Sbm5vKYzQaGhgL09nrvaT/F5eW1x+VEguLyMkarFXswiH9gAH9//z3t7040xcgDZnJy\nkoWFBZ5//vn7vu+Ojg4ikQgXLlxgz549933/jwOKKGAV4hjrVeyeEGbz/XUN7e310d3tRZYVisUq\nr78+t1aG3eez4nboySVzyJKZclli584WjGoVq9uN4vCRLhuIFAosvvUWBdlOOpqlmCpRFUSCIyPU\njA6qso6Ax002VcZotdDWHcKmE4nGdSzXK+QFEAsqizNJPv/KbgZGKnh2Ay4/7xyb5OqFGWamswxt\nbuPpA63Mn3oH346ncLW4UVu6MLk9GHQqAyGFqX//MW39HdTFJLpMGp1cJzgygtFioZrPk56cpByN\nUoxGETMZAoODdB48iPM28QptbS6uXk2ts5H3+22PVNYONGzy1Xwcr76MoNkQVG11yB5EUaatzY3H\nY6FQaGQ/GcxmWludhMMOkvEiWi6Okk7jdjhwhgKsxPPMTyxjSV9HzOXIz8/j2B1EEkWujSXp39pF\nR1cQg0FP60ArP31tGld7O5VUisimfoKdrYS6I9jMGg63A4Mqkjc78LV5WBAaGTMGA8hKo6demJkm\nmxVQtIZomrk0jfhMBHswSKZm4+y7UeKxMm0DbdgtYXxmie29eqbLAuWpZcamFPRdIyiKnXS6Qi4n\ncv58jFDITjjceJ8Oh2ldKqrbbSEUerTO482EQg4iEQdLS0UmJzMIQiNmyeu1kUwKmEx6FKWRvWKx\nGBqVmreEMBj05HJVMpkK8Xh5XfprrSYzOZlBllXqdZmlpQKRiANBkFZHQCps2+Za8+I5fXplzU3X\n5TJTKNS4eDHGc8/1EQzaSaUaU4J9fT6G+xz0hDSKC2lChjz9ezrZtecwb/xshkTJSKmisuPQPtI1\nO5d/urAqJIJs2xa57XTxnbDbTQy0aIS1KqgqLqsVHfcWU2jxehGSSWrFYsMUsF7H7HQiCQLL77yD\nxePBEQze0z5vR1OMPGD+9m//lldffRWjcWM++ueff57XXnvtUylGxFyO+ePHqawWijNYLLTu2kVk\n69bbvl5RGrn3VqvxrlPdCoUq2YyAVJepiAr1ukJrqxO/pUp1+hz5t6dZ7G4jvPcAYb+R6Jl3WJzP\nEF/O0DbUy+5X/gP2movs9evo3X7aW+1USgJiRaSeyxKvWDh0eBs2xUNNgZ4eL32ddpRSHsEYInp1\niuz0NFavh9z8POd+IrOj34CldwvLBZWrl1dQJBmX08TiTIxLDhjtCqAZVC6fnmHh+gpFzU2k1cVo\nT4hKMo1t+yZCw3sQsoMYjDr0LhM6uYqqKNRKJaLnzpGZnAQgeeUKtVKJLa+8gvGmYXxNVQn5zRw+\n3M3Fi3EEQcLns7FjR+QjZzJsBKVYjIU33yR2fYmJyTTBvm5adu5nIdmoFxMM2vD7bRw+3MPERJps\nVlyrVmwyQvryOcb++RgaOqRqFU/AjW/7HpYvJvBVl2jdtQtJFKnOjzM4PMz1hSrLeRP6kJ2dO1vx\n+gwEQk7Eheu0hJ0MDoVQotPUx84T6OtCM/Xyw3+5zPLFa3RtG2J097OEKwK565Po8nG6BrqYuXid\neg3MFj0Wm4m+Xi9LV6Zx9gzy4//rGLGlDMN7hzl9OsrrxxJ0h1S8RoHNW1pJRWNImkpxbIJw/5Nk\n8o0Kr6Iok8tVCYedhEIODh7s4uLFRuVmj8fK9u0teL2PpiW8KErIssrBg11MTmZQFI16XV4rXGcy\n6env962KCAO1msyJEwtcv57DYjGwY0eE7dsjWCwGTKb3bvT1ukKt1hDWlYpMd7eHmZkcFy7Eb5r6\n1XA6TYRCjnWB8pKkYrMZqdVUMpkKzzzTTTxewuez4XYY8No16skVMmMX8LmMLL3+I3RmM7905BDX\n8l56Rrvxhdxcv96w7a/VFC5ciOHzWentvXsxkZuZYfHkSZR6QyStiCItO3bgam/HEQrdsdzBzQSH\nhylHo5RWVlDqdYxWK5Ft25BEEUkQEDOZphh5HPnRj370gWZuH5cXXniB//yf/zN/8Ad/sGHHeFTJ\nLyysqzqq1Gokx8bwdHXd8qOLx0tcvJhYDeI0smVLhIGBD07/nZ3NcuzHV1icSTK/UKCrx0/XYAQx\nI5JdOUfx2nmqmpWzJ6fYuZIDp59cWSORljDanaRX0kjJZeK2MInL1zAZNEaGD1JIGliJCtTEKi1u\nC3algsFk4tAvbMOlr+ILOBCVdk7+wyXiF6+Qm5uje+coQ09tp7XTg1xdwJhPIIjwC5/fhhhdQENH\nuqgxO5Nl6NlnuHwpQSEWR85nae/3ozPoGZ9IM/zUVnTeMOcmKyy+ew1NltjyzE6e3NuBzaggiyIG\ns5nI9u0UFhao5vMkrlyh5/BhvN3dQMPMKHX1KrVSCXsoxOF9m9HZPdjtpvvmgnk/UGWZpVOnWHzz\nTeR6nYDFTvTCJWxeL22Du+jp8a65VEYiznWOleVkksU3x0i8c4pybAWdxU6tVKK4tERwoBd/0IdS\nlqkWCnQceAajw4EsK2x7to2yYqPNXUedfZep69cJ2oLUe9oolGWm//01Zk+cpLPTw/JbJzEE23n2\ni6/wg0SC9Pwy1+QQu57YT3FuGnF5liOvPE9FqJG7ME8uL7K5J0JmOcHFjIFNLaOkFRfDT7ayGKuz\nFBOxWIyIigNdTSNfNdIy2EHs2jRyFcxGaG11odPp0Ot1WCyNkZa6IGDOzrHZnkP1WAn1ePG1P3qZ\nNPW6zNhYiunpLIqi0t7uZufOFrZuDXPtWoqpqQwWi4GWFg+zszmuXk3R0uJkbCyF223FYjGgKCpT\nU1n27+9gdHS9/0Zj+sVJqZTF5TKTyVSYnMzgcpk5fXqFZFIgl2uYK/b1+Rkc9KOqGjodlEp1PB4r\nQ0ONa8qNVPJ4bAGllOc3/odRXBRx+H0osSSugIvpt88jxKM88ZWv0Le/k3/793lEUcZsNmA2G1AU\njVisdEcxUhcEyrEYcq22ZuOfunZtTYjccOUVkknadu9GU1W6Dx1aK6x5J1wtLfQ9/zz2UAhHJIIj\nEkFTFKr5POh06O9Tx7opRh4g8Xic2dlZnnzyyQ07xqFDh3j55Zcpl8s471Ck65NGKRajuLJC9vp1\n7MEgcrVKvdRIr5WrVaRKZZ0YKZfrnDy5SDZbXXv+1luLOBwmWltvf9EVhDqnjl9n6eo00Swsz8SZ\nHZvnl351N4GwjktvXqU7YiO6UKBSrhEdv87IM3spCxJ1TYfLYsbrMiEszRNTZOyhCOmzp3CrGgeH\nR7B+7gC6YAf5eJoz//hjJKOdzb/wHKPbuunudpB76xTt7U6mjhXoGOzA7PUzPlujUBOxSBae2zFE\nMJbl3f9+AipFlJqItyXEZ754CDWf5PrPjhG7Mou7NYLPLqPXFTGavAzt282bx+cpZisER0aQymVS\nBY2cMYzTnGHs7/6O1LVrOFtb6Tp4kPTkJCarde0CJ6RSLJw4gSQ0PA2quRzVfJ7+55/HYLCsVTLW\n6fV31QvbSGqlEsmxMaq5RjUKh1miv9WBT02z86k23L7b/14USWLl9GlUWUbNxenu9hGNC1TEMk6v\nh5DPiNOicu7kGUwOJ7JQxGg2M/KFl0hNreDv6WT69AQunUBuOUpeTpFVnGzeN8KJvz2H1WYGoxmr\n20ZqcRklvUKkM8zy5CKiKNFx+CihiKuRUhn2cPRQBwN9HqS6zPLkIud/tsS+X3mJsbE0uaoFyeon\nX1wmlyoSDNqpl1Ti596BXJRfemUnttEeZEeQ+YqCbbV0fFubi0jEiaZpRN99l8zU1Nr7r8xNoj9y\n5CMHUm4U169nOXcuuhbLMjGRRlFUDh/uYc+edvr7/UiSwptvLlIsNr6v1arC+HiKTZuChEIOstkK\nZrMBp9N8S2dEp9OxY0cLoihTq0lEo8W14o0rK0WMRj2LiwXGx9N4vTYKhSpXr6aQJIXNm0NYrQZe\neWWUZLLCP/7jtYZ3TLLIkX1BWsx5Fl87Rub6FHK1iqe7h71feJb05CTmSgIxn2diIk2pVMdsNtDe\n3jg/d4p/q+bzLJw4QTmRaBTitFga1aJX/WGUep3i8jJKvY5UaWQRidksubk5pEqlkYIry3h7e/H1\n9WF4n+WEIxSifd8+lHqdciy2lgZuDwZvm2L+UWiKkQfIT37yE44ePbphUzQADoeDvXv3cvz4cV58\n8cUNO86jQm5urtHTrVYpLC5SyWToOXIEg9mMUq9jstluqZyayVTI5dZ7j9RqCtFo6Y5ipFiskV5J\no6kqhVyVUjqHKktMXpyn/akQsiSj2SMsRJOYLWZqqglZVmhtdSHVZRxGCa1SxOrxMHt5huee24yJ\nOmImg7maoSNo4NS5S1z4+UVURWXboe206BIk3xzDvuDG7HKxY9RB8Lc+TyJd5crlGP8/e28aJMd5\nn3n+srLu+z66+r4PoNFA4wYIHiABUqYl0RqNLVvhmRiNx+uYWdmxduynCX/b/TJryzEbnghvxEg7\nY8tjWaKsGzxEiSdA3EADfd9d931fWZlZ+6HBFimSNrUiAFmjJwIRlVVdeN/o7Mx83v/7f56nv0tP\nbHUTxexkJapgjq6TiuaxWnV4PF7qlTrKzhK6oJljJwcYG3GiCFps3Q4EjQZrwIertwdJjaDRaNBo\ntfinD6C3mElECvSOaOg6epRqKkU1kSC9sIBndBSTx4PpnpdNORbbIyLvoJ7JUM9mUWWZxPXrVBIJ\nBEHA0ddH8OBB9OaH03vw0wF7iiSBJKFHwmT88IbvRqFAI5/H2tVFYHoaT6tF/34tsbuLGAxawgMB\nYnOL6MwW5GKGwnaEdktG7ogMnj+PrlUid+0tmnYTotFMOlEkEdti5lA3FpOIXgOl7S0Uqx5NR4vZ\nakBoJmnk8wSGenaP3W5soRDL1xexe+2YNG1iy6vUKypjZ08zH9NgLBSYmQmQydSw2g043FZ0Qhuz\nARS/G1/ATmXpDj2PPo6trw/DnRgdp4Fwv4+BARcmk456Nvs+22613Sa/tvYLRUY6nQ5ra/n3NdXG\n4xWKxSYulwmXy0QiUdlzPr73TcxmPalUlakpP+F7FZ+BARcazfuN3VwuE+fODVIoNHG5zGQyDS5f\njuJ2mzCZdLjdRiRpdwtFoxE4dCh4L4NIYXjYzcCAm2KxRbMpUy63OHWyh19/xMj68/+D7MI8HUVF\nkVpU4zG8oyMIg72oikouXSEctjM3l0KSFDY3iwQCVnp7P5jQFzY2qCaTe8dKq0VmcRH34CDVRAJF\nklDvxYXYurroqCoanY56Ok1mfn7vs3I0ilSt0jU7+74xjHY74aNHyS4uUstksAaDeMfGPpJs96Pg\nV2TkAeLChQs888wz932cc+fO8dJLL/3SkxFVlknfvbsXwGT2+WgUi2Tm5wkcOEC7ViM4M/OBF4sg\nCPfCtHZL1O22+j7L5nfDYNBiMukoi1rCXVZ0SpB6XcLhNKGa7BhcLqrlOuVCjU6ng8Xnxdw3ij1R\nwV2q0UhlCY8NoA32M2B1snlrHqvdjdPjxWi10CoVaJWqqIrKsfOz9LpkYj/8Dk6fk9W4iVKpwdSz\n5/GGfWi8erYiNSLJGv2z+9FZbJRzJfTNOgcOBCiXGthsBhzdZmxqiUosT/XOKrLeSXi4m2b0FsUa\nWEsO9KMORk4dIhktgkaDqEjklxexe1RW1+bRaLWMf/rT7Lz5JhqtFt/kJN7RUcz39og/zPZZEEUS\nN26QX/tJFmZmfh6dyUTo0KGf46z//4feYiF44ACVWGzPSt3odOKbmnrfSvDd0IgiFp+P0vY2xUiM\n9PomRo+PvqOHENU2WpOJSq5AR2oQn19GltqYXU6Udpvc0hJSV4hKKoNSM1IvlfGPHyQWAb3bR3i8\nj1vf+A56ox6zaEdr99AqlejfN0BXv4+xo5NITRnFFiJ6e4HVS8vILRnrwCDDTz7Ba5dyXLteQavP\nMTrmIxj0Ew7b0YkdLFqZVjYFrSqDhyaZGNSSn38b4/omFruRPmuF3uMH7qmFdqF2QHZ2ozq1GI0C\nOrmGlN8llr9o+CDyYDTuuqKmUlUMBi06nYhWq9kzQ5QkhZmZIFtbhb3vBINW+vo+XGGSTNaIREq4\nXAaGhpwsLmbIZGq43UYeeaQPnU7Dvn1+mk0ZVVXJZHYN2DY2Cjgc+r1elFDIxumJDko5T+zKFZr5\nHOq9HoxWqYSo1+ObmEAw29jIdRgddeP1molESjgcRo4c6fpQA8Hau7an34HcaGDy+bD39lKORjHY\n7Zjcbjyjo9QzGQxOJ+VY7D3uuh1VJb+6imd09APvmxafD4vPtxse+jG78v6KjDwgyLLMyy+/zJe+\n9KX7PtZTTz3F5z//+fs+zsOGfK/k+A50JhPe8XE0ooh/chKT2/2BJUSfz0wgYEaWO/ea1BT8fgs9\nPfb3/ew7cLtN7D8ySGJxjfrGJqWdAsH+IMePdxOviEw8cYryxgoHT2uotgR6T53k5TmVoaEjPPO0\niXZLoq2zEllPIZVLXP/RImaLnpnjQ+w/OEt1e4OJPh39+84R9OiZ+9rXaRYKRBa3MFoMGEL93L24\nQODUo1y+tE3H4KYm6vi7b+9w/GQvwwNWTHYrumYet9jGYgbR6sDkcrDz9hUEg5Hw5ChXv/oNDGYj\nfb6+QyYAACAASURBVMdmKUdiRK9dY/r0OYyCgiK3qVc7OEaD9LvbpF7YfQhp9XqGnn4aAeh79FFM\nDgfNUol6NovWYOA9XvfsPuA1Oh3Ve0nU70ZhcxP/9E/cPx8ktAbDboy5IFC555JqCQTwjIx84L53\nu9Ggo6qY3G4QRaK3F4nmVNqyC02yCfEKE7/+a+gNuyvvbCJPuSxRr9QQMlWGzz5GcXMD78gQRrcH\ntd1E7ojIzQZDR6bYyBvpfeRRqqkMrXwat99J36OPITfr9IY9JLMpbn1tGddAH0aazN2IEQi5qZTq\n1DI5hESS8GAXeqsFm03H2JifdLrCk08NICU2GH+ul50VqMRUXDboFNJs7aTpPt4hcfUKXY88Traq\nodgu7yllri9UuHalSCRWRe3AkSNhwu4upoYGHui5+qcgCAIjI549d1FBgEDAgtNp5BvfWKDRkPF6\ndxuoJya8bG+X9hYgo6NuHn20j0KhiSCA32+mUmnuKXDebX62vp7n4sUIjYaMTqfhyJEuvF4L6fSu\nBHxnp4her+X5519hdjbEqVM9+HwmenvtFAotXntth3DYxrFj3ahqB0N7mXqlhMFiIbe4uFvBbbWw\ndXejs1pxDQxg6h3khX9Ikk6n8fvN9Pc7MRq1/2gkgsXvp7S9/Z73tCYTZo8Hx5NP0shm6T52jFIk\nsrvN0ulgcjqRm03k+nsTyFVF+SfJ5/2IB/gVGXlAuHz5Mr29vXQ9gOCpgwcPks1m2dnZofcfccn7\n5w6dyYTZ66VVKu29p9XrcQ4M4Bkb+9ALxmzWc+BAkOefX2RpKYteLzIy4mZoyI3P9+F9NhMDJuQn\n+tjwCwinwevU09ye48CRE/ztl2/RPT3KxG+cAr2Zt6+nqNUk2gE93/rmPDajjGtgiCsv3ERs1zn2\n+c/iCvkJ9gfRhUOMH9hHZmGBuinA0htXScZKNMtVKpUWwz3dTD5xnFZLJnXrNrMT3awkRez1Dsl0\nlJs3k3z6U4/SSbVYvjiH2CwRHvBhsFsx2AKUJR2KxUs5mcaoA1FQKefKmDw+1pdTBI5UWF7JcvNm\nErMBDkzYqSVzDEwcpHTnKrV0mu4TJ3BOHiBVUKmubaJmIpCLIIgi1mCQVrmMIAgYHA5CBw9isFgQ\nPsDrRqN9uDkn7qEhNDodZr8fAbCHw9h7et7zM7IkkZmfJ7+2RkdV8Y6N0a7VqHQsLC1vU28qCFo9\n6/ktpO4Ep0+HsfQO4RxIsz23gkbU4hsewOjvor2VoJivE3j8GQxSiVxdS0vvomdohHobkpEOY598\nFp0q0WnWSM3fRRS1INVZuXSXhmjF3RMgsbyM0+2j2VJpKSKKLBMWKgQPhrlxI47fbaTVkhBQuH0j\nSo/XQGlxiY4k0T0SZv0H38XZN0DP4YOYzVrykRaRtMrqwg4ajUBXl5Vw2MHKSp68bGYrkkCuN6jV\nJM6eG8OS0xAYeTjn7MMwPLzbMLq8nMVq1SNJCt///iqxWGXPDt5qNSBJMrFYBVlWGRpyUSw2mZ9P\n43Sa6HQ6fO1r84yPe7Ba9fh8Fk6d6sFs1pPL1VhezmEyadHpNMTjFTKZGuPjXvx+C9HoLsF56aV1\ntraKhEJWtrZKtNsyi4u5e1s5Iisr2V01kl2Hq+mlsV2i59RJ6tkMpe1tBEFP6NAhnH19tKpV9IKe\nVkulUGhQKDTY2ipy4kQ3er1IMlnF6zW/T97rGhigHIns9Yy8o3jRW62UIhFqqRQ6sxnv+PjuNXCP\niOeWl0lcv/6e/8vs9WKwf/jC7H7hV2TkAeFBbdEAaDQannzySV5++WW+8IUvPJAxHwYEQSAwPU2r\nUqGRzdLpdDB7PAQOHPgnH3iJRBWLRc/0dACzWYfDYWBzs0A4bMNmM3zgd+rJONZ6gpkxM7nNLeI3\ntsgn8vgnJgiMDZHMq+y8kWJywsfGcgp/yM7c9R3a2TzeYyMsLmaRDC5mTk2zURVQ1gWCtRpHTHUG\nDodxWf2s/+g2O9tF/CMDrFy8TnBsiCOfOc/aq2+gMZrpWILc+t4qY6cPMXp2ApvNwOxsiOW7UeZ+\ndJvRvjF8bg0dhxNHf4hqpUZF42ThepIjx/vIlxWCXQZ8QQf5qoTXaWR9o8hbL99FUiBbq5NZkXji\nsT7Kbj/BQ4cQtVqsEzO8PVcmEY+RujuPQZA5frIPh5Sg0WrhHR/HOz6O3mLZqzK4hoZI3ry5VzUR\nRBHv2Biah2jIJ2g0uPr7cf0j/Q+55WXi167tzTu7tITSbtNWBQxmIxIq5YqExtRhK1LlQLVDJp7H\nPHWcJyanqKZSVDJF8ukS+//lc0TWM+RXV+ib3U+5reHWSpO3v3aRM48N8ZufPUV+J0KrVia7fh2j\n1YPNbaVZbxKeGEDr9uMf6UduNFlezXP9eop6vU0gYObA2aN897uLIEvUS2a+//dXGT80RL9X5dvX\nb/PkuXFWX/4hLreFR3/3U5gtOor5GoooY+m30RZNaLUdTCYdzabC/HwaWVbJFdpY/UGUtkRTFDF7\nPWxtl9m3/8FF0H8UaLUaJid9jI662dwsMj+f2QuHU5QOBoOGq1djSJLK4cMhisUmCwsZhofd3LqV\nolqVGB/30m4rrK0VOHYsTDpdY3OziMNh4Mc/3uS113aIxcq43SZGRz1cvhzF6TSi14vEYhW++c0l\nMpk6Q0Mu/H4LX//6PP39Tl56aR2NRuD3fu8QbreZFy+scmKggeCD3PwyrqCX4aefRpHaGOw2/NMH\nSC8u7hKEbI0jR7rw+y2kUlVsNj2BgJVbtxKUyxLd3XaOH+9+j2TeeC/ks5pMorRau1XhQID8xgal\nrS1a5TJqu43e4SAwPY1Go6GRy2ENBvFPT5NfXaWjqlh8PrpmZx/KNXq/yciXgFngBu9P8BWAm8D/\nDfzX+zyPh44LFy48kC2ad3Du3DleeOGFX2oyArt7mEPnztHI5aDTweT1fqQGyXS6RrvRwCBICJKR\ny28laHc01GoSp0/3vi8UrFBokC4o3Lgew2rREwi4GDhoxj3QIDDSz2+O6ail05RrCorNjsE0zvJi\nhnxJi9HuoFmu4PS6MdvNrEQkfAEH2Wiepbsxlm5u0CgfYHzCT6WtQ28yYeua5KmD41jcLtbefItK\nIkk2lqZaVTj4uecorq9RFNzcvLyJx6xw6dVFPGaRjViL5ZUa8dg6n/mdo0weG6MkGSgW6ggGI86Q\nF5kOhVQOndzAMf0IGwkJWQWl2UJnMVNMlqm3oVpVGB3vxTM6SjTbIZGo0m7sqpOaLYmbNxM8dsyF\nkk1Q2tkhODPznu2OwP79aI1GCmtrCKKIZ2zsY3NrvF9QFYX82hqqLFPPZmnkcgiiSNeRIzitIvmd\nKIKoxePxED44QbYqUpINjJ2cYenSbXDZCOyfwlxuY+ntJxvfYv57L2JxWnnl9hr23n5CI0f5xCcC\neANWNrbKJCMya4tFThx/FLdDxmWGF//yqwQnR+lUK9SiUUzhPuT5FFJbRWrJdHQm8oIXRS5iMeuZ\nX8zR0RkR6JDdiVPMVFlazmHzeImsrFPLZmlm2jz/n/6ayf3djJ89xcDsEcy6EunVVSxuJzpLiNw7\noXLCrtnbOw+8TucX14FVqxWpViU6nQ56vbjXH2Kx6FlbS/HYY/3EYmWuXUsgigJer5nRUTeXLsVY\nWMhw5kwfb78d5cKFNXK5OisrWQ4eDFGvtxFFyGTqxGJlbDYD+/YFSKer9PY6abUUNBoBvV7DzEwA\nSVLo7XVgNGrp63OytJRhc7Ow25sm1TArNbbfXkYpF9AadJgcTgS9Dq3JTL0mIWo0mLwBYmWFdFrC\n77cQClmYn8+wupojHLYjSQobGwU8HhMHD743BVdvsbzn+mpWKsQuX2bx+eepJhJY/H76zpxBbjRQ\nZZlmPo/WtOvyOvzssyj1+u4Wzb1/D5qQ3E8ycgiwAGeA/wIcBq696/NfB9LAL+if+MeHByHp/Wk8\n9dRT/Mmf/Mn/FNbwerP5Z1ZouO0iN5eW6R4K8O2/e5tyoYp/dJBtvwGNRsP587vhblKtRilf5dU3\n41hNJkweL8mtOOVKi+FhF5pwN9Vkivnnv8nG3AaKIDJ19iShI+dRR1yst+t09w0R9mgYHfNQlIy8\n8VacpcUUb7++TsBvohBp8A+o/MHvH8LZyaN49IQCFjquEHd/9CbLl7Zo1mp0h/1Q3CK/ME87NIkd\nCX0tTTFtZfXWJtp9IXSVBHpPgHKxTq7YYiclM/TUk5x57hRKvcyhJ4+wenWBdjFHuMuKb3iQ1763\njdlhp1TPoLRlrMEgNr+f/gk34ekgRrebiy+s796g9HpEgwGlJVEu1JAIoAGMDgei/r2rZq3BQGDf\nPvxTU8D92Wf+eaEquzlDGlGkVGpSKbfo2H2019f3VotStUq72WT6c58nJ1mIrMXwjY5Qdw4xHvIQ\ncig0MkWchjYrP/oxnQ7s+41PoxFFrrx4Ha2/F7vPTKqaIhPPcfQEvL5cJZ8tQlhH2G+i57F+vn9h\ng+OnBxjoNoIjQC26zfbVOboOH0YzMEPX7CznRpvEsgpGf4iryy1sLhPdYQcbOzVkrQmdFtqSDDoD\n1aqEx2TA6HTQzKQQdAYOnN6HfyDM3bkEqfILNKxhXGZIvH6ZiUeP4HP58HrN1GoSGo3A1JSPdlul\nt9eBxaKjuLOzG6gnCDh7e7F3dz/kM7gLp9NIpwOjox5u3nxHVSIwObnbL7K2liMardBqKUiSytmz\nA5hMWpxOI/F4BUlS9iS7AwNO4vEqfr+ZwUEXc3MZEokqy8tZnn12lP5+J3a7AVlW71nC6/nEJ0Z4\n6aUNNjcL6HS799upKT8Gg45KRWJ80E4rvYpOr2fik89Qj0VQWg0cvX2Y/X6KLR3BmUlEg4mtqJ5O\np0y53KJSaRGNlnnqqSHq9Z844u7slPbIiKoouw6ppRJasxlrMIhWr6cSjbJ24QKVe3bu1WRy1wBN\nlnEPDgK7Ta655WVEnY7MwgKtchmNVouzv5/w0aM/c1jmz4P7SUaOAS/de/1D4ATvJSOfA/6O3QrJ\nLzVefPFFzp49i+4f6dj/uNHd3U0oFOLatWscO3bsgY37zwUhm0T/gJOdSJ5cIofJYsRratNMxUgY\nNCSjLuydIrVUiqqsJbcSJYKRidkTuAaTNApFArMjaKw2Fr78VyxcXiKXraPVa7n6nVf5xNQ+dnbM\nOGwi7fXbVGMS21sabD3dHJ4Y5vpbRbrD1l2JsE7DyLCL5Zd+hFzMks62aSSjGN1xvF1eNswmkhtx\nhI5Kb5cP2i1cPhtyB3TtCoIs0TMUQFY6mHRazDYTozMDdPd6MNstZBYW+cFbVyjEkhgsJh557hHG\nzj1O7EcvYM+mCDigZfSwLWioF8v0DPoYOzTM/tkuquuLJOfm0BS1pOdTOPsHsIfD5JvrWGwm9Mho\nrFZ8U1MfSjZ+EUmI0m6TW1kht7KC1mwmrwuzGpVptVXkQhWv6sUaCFJLxGk3GhhtNkrLC+x/bBbn\nyCiNhsTJIyEsVNn5/jdZ+YdvYvL5GJk9TrnSInnxVfZ/5jnkQgZVb6RpFdEobbwOLdVylcGwh+03\nrnLp1RjFbJXHnt7Hs48dYznVwGKA47/1aySXNzCPzBAeCbO0UWdxfofh2XEcbgsrawXGRi2YzRrU\ndpvhES/5bB2dwUBHb6DSFBib7qW2XsXXG8Tq87J48RZCucDGlRzWQIDsxiZNn4HVgsSJQwOoqQ2O\nPTnA8HiI9fU8BoMWg0GLw2FgejpAbmWFyMWLezLQ/OoqvadP/0JUu7q6bCQSFZxOA8PDbqLRXVKh\n0Qh85Su3yOUaiKKAKILDYWBrq7DnrDs3l6LdVrl+PcGxY2HqdZl/+IdFenocSJLM44/3sbKSY2Ym\nSLnc5NKlCJVKi099apz/+B/PEImUuXhxd/um0ZAxGnU0Gm0mJrwcOhSkUmnR1WfC5+qlnU3ygz/6\n39CoMo6Aj+4Txxh45tcQHT2UVCMWW5ADBzQ0m/KeY6zL1fseRRDs2vPDrvolcfMmmbt3USQJQRRx\nDQzQc/IkrXIZ+V2Jv4JGs9u4LYrorFYEjYYOYLDbiVy8uFf6UhSF3MoKFp8P3+TkAzuH95OMOIGN\ne69LwNS7PjsHvAoo93kOvxB4kP0i78b58+d58cUXf0VGPgBKJsKBIR16i4/YTC8GUYFiFEKT5NfX\nKG0bmP/6f6VZKmEbnsLStmJwdbO0mMHbE0YX7gWfD7OUIb2dpFJpoSgqOjpYXA7uvnUXa/9pPNUt\nsvFtknID2WMkFc8x/HQ3T5ybIJMsEduxo7aaBA1lMrdWMZl1+F0WajWJ7Ss3OPIbT2G2GPD3+qjm\nyxgdXnpmJtGOjvDKj3fIrO9QDlt47PERUjmJxJ0SzqCbJ8/sxxOw0SqVqKwvk0sVyaXKWIxVNi9e\nocupMH/hR/TOTHJ41s+ArpvUdC9Go5b9+/0MDrqoRTZJ3boFnQ69gQEiNi2FzQ18U1N0H5pmZtJJ\nV5cOi9e7J/V9UMjl6qTTNURRg9+/q6L4WZBdXCR25QodVaXj6+f2/AL+kJOg20nF5GZrLsPsxGE0\n4g0CBw4gN5tkl5cx98iI2SoBk5b2eoObr7wMjSrWQID89jbl7W1GP/VponeWMQgS0/v9FLJVXHYQ\n/CKFYg2r30fy1jo7CxvodSLlcpMrbyzj8toZPXIarU7g7RsxXv3qRZRWG29vkP/1//gdRqa6qKSy\n+FpJeic1hKZsRCMVErESU4MOHNYhSsUaBu8g+w71IFazqGg49anT6ASZwtoaBoOWQrFFrVBm37kz\nxHUG4hsxHK5uTOUMmnKG2dNjzM52Ua9LqOquE6ncarHzLj8KuOdlMT+Ps6/vY3Ph/Hmg1Yqsr6dp\ntVRGRz0cORJiaSmHz2emXm+j02nwenedcYNBK/v2+e+l9Ha4cydNMGjB5zPx3e+uMDHho1RqsrSU\npVBoMjDgpNGQ+d73Vjh2rJtsts6FC6s88siuQKCry4bPZ8Js1nLzZpLubjsDA05cLhM6EZLxKuP7\nwrz1t/+NVrGA0aChtNMAQcDWN0DwTBeb8QbJ+U0eeaSXM2f62N4u0el00Go1LC7uSnf1ehGHw8D4\n+G7ybi2TITM/v2dC2FEU8uvrOPv7MdhsOPv7qWUyKK0WCAImtxv38DCNXI7EjRuosox3fByTx4NU\nLv9Eqt/pUI5Gf2nISAl4pyXXARTf9dkXgN9ltzryofijP/ojnM5d/ff4+DjHjx+n/17j2dbWFsAv\n/HF3dzcvv/wyf/zHf8zW1tYDHf/48eP8xV/8BX/6p3/6C/P7+KeOHxSsPh+pWy/jM3pxqlkK8RyW\nQACpWiUYstBYvkF6bg4EAXMghM5mJeA3Uku2iccr2O0GvF4LupaMPeDD6U6DpgFtCZPdChYnXruA\nKZfFrqnTkCWs3jC2/Ud440qKVFXH3LUdjhzvY9gnYzFKLKbq5HYSIEtYe/oxCwqy0sHb7aOjygQH\nQxx65jD+iTGWc2ZKVZXQUIhmLofbMcHsI2M0np7CZDUS3S7g9ltpZVPo2yUOHAjSGnag1Co0MimK\n8RSh6Sny2xHCU2YOPTOBigadTtyrZMS3tn7SfJqPcuZYkEJLjznsoXswQChkfShVj+3tIm+9FdkL\nJXM6DZw50/e+Pp8PgyxJ5O5twWi0WrQeL11BmUo2h1TMoVVaDPZ7ENxuek4YyN/L9wgfPUp1Y4nQ\nwCCbb13Cc/4pmpkkJqcT0ddNYGg/UnSDVrGAIxRAFrSET5xCuniZSnyTrlCAyadmyJqt6JsFXA4D\nkZ0iAh2sVgPZRJaTow7SZRGbp8Fz/+E55l69yeC+fq5cibG9EkdRVLwOHaPhNjs/fJGrW1rowKB/\nmCeOdZMsuUjsZHHadTht3fT0PYHFbmH79ddxBT2ozQZCScLksODq72F7qYQot9AZdLuJv0bjnoeE\n2fyTbbdmtUE+XaJebGC16vZs09uNBkq7/dDJyPJyjtu3k3Q6Anq9yNZWEVlWcDiM9Pc72d4u0Wwq\n7OwUmZjw8fjj/djtu1s7J070UKlIVCotNjdLjIx4GBlxYzBosVr1iKKGkyd7+MEP1vj856eZnPSS\nyzWwWPTY7Qbm59P88Ifre6qXz352kkZDpt1WeOWVdT73qR4cqoJUqWP1unF3+ZEbdQStDlmS6Mht\nFK2JWKaO3WEklapx82Zyz5jRaBSZnPSh17RRcyl0cg4xryLZ+pBqNZRWC41Oh95iQdBqkQQj8XQT\nT8hD8PAR5FaLWioFgsDA2bNojUaWvv1tVEkCQaB4z4XV0du7504MoP+YzMw+Ku7nneQg8PvA/wL8\nJfAVfrJNcxNIAuF7c/gMsPJT3+/8tGPiP0dcvHiRP/iDP+D27dsPfOxGo4Hf7ycSieyRul9kvOMD\n8HGi0+kQiZSIRMoIAvT2OgmHbbTKZTZfeYXYtWu03YOsRproAt1YqTE9E2brm39DdmkJ39FHSEhO\n0nU9mBwMHJ5CFQ14PLs9KoODTpqrt7nx37/K9lKUcq7MoefOw9QZTA47moVXefPCTcweN5PPnufC\nyxEKhSbegR68Pitqo8rZx3twm1Re/s9fprgToZrJ4R4dx+Cws+/scdbm44wfn6Srx4MiaLi7UKBr\npBunw0g1nUavhWu387zywgINWYvTbeXcJ8Y5sM+FoRzn0le+RlvQU25pqZcq2CwCn/j950iuxzDW\nUgw/9QSDZ8++5/emyDKJa9coR6M0SyU693or9HY7g089hcXj+djO0c9y3iVJ5sKFNVKp97q+Dg66\nOHt24CORI7nVYvk736FZKGAKdXM3aeD5v/wetVqL8Egvx04NYqfE2COHqd96g8LmJp6xMfRWK6nb\ntxF1u83Kgelptm4vk9IEWb0TRTRb8YccPPHJWfQmHatRhdW5TQaGA4yOeTE6HWQLMka7jRvffplr\nL12h0VDQakUmp/wMTPYQeuwc/+n/usL6Wpq+Pje//duTNFsq3/zqNbKxDIJGoHfAy+Mn/XRJ64jd\no1y7leX8E72Qj4LZjmQNMrcDvf0eKok4PX0ehNgCLreZYiRGLlvHEgzRUkWWN2vodFpmQjWUQgpb\nVxfho0cJHz68RzDy+Tq3b6coXH+TyNwyFouO4SE3NrsBz9gY/Y8++rOfdD6+612WVb773WUymff6\nZSjKbk/HykqetbU8kUiZUMjKk08OsrFRQFU77OyUcLmMjI56UNUO7bbCtWsJSqXWvfTpDt3ddp54\nop9Uqk6j0WZgwMncXAqdTiQWK3PjRpJAwMrychaA/fsDHDsWxmAQsVh09NglGvEt+vtsbL74ItFL\nl+h0OrRrNUSzhYkv/HsuZbr49g+22b/fx759fkRR8x6/E5dDx2y4RmHhDnqbDa3BgNnnwzU8THpu\nDkWSqMTj6Fw+sPuIFPXUBAsjPQZ82hKNTGrXU2d0lPWXXiJz9y7teh291YrR7aZVLOKbmtrL9tLb\nbAyePfuxWb2/g3vX5wdepPeTzt4EmsDr915fA/4z8EV2iQrAvwJE3k9EfmnwsLZoAEwmE6dOneKV\nV17hM5/5zEOZw4OGqnbIpCuUsiX0gky9LfL29Qzy7rOU1dU8p0/3MjTkZviZZ3CPjFCORhk7bEZj\nstBMxmiU0kiqiGN0ioWEluVbdzD5fBiCfTjKMv6Ahe99bwVJUhgd9TA4EOT4F7/IxMYqWqMJa08f\nKzGVoE9PJDNApniN/ccnWd+qsjAXwRUKkIzmSW7GGA5pWbxYxBOwM3L6GOuvtqgkUpiNGibOnsDV\n182k00PP9BitRhtZ7jA666NYVogkmhyd6ef6lW2u386RKQk0m03y+SZrS2nM7TwjfhWzx8321ev4\nxye4E5M59dzjyKoWY6eJozuE3mqlnssh6yzEYhWaqRi1jSW0rQpKtYh7eBjYvZGY3G5Q1fviwPhR\n0GjIexWRd6NQaCBJyodmd7wbWoMBz+goxe1tKgY/t9+6hKAR0Or1FHI1Ll/a4tlnBnA49HT6R6mr\netqCASmZxGCzUY7FdvOPajXk7mne/MqPQKvDP+QhmhdYLtgYD3gRbBX2PeahGd/ie99fIb6dxzsQ\nxmVROfvrpwmEXUiqFlGjUIyl6D92kG9e2MRm06E06mwu1dnZ6aUmdcjlalSrLcwGDSs3V+kJGQn1\n6RCqWZ59zMfN//4V2qUcBocDvcvLE1/4N9T1Bux+LxaPm7YhQDkaxdsdQLQ2SUYLdM0ewjxoxW9q\nktyOsxrTIG50kP0F7D0pHOEwAHNzadbXC/SNTBGsN2hlU5TzRdzhYTxjYx/3Kf6ZIQjv70uq1SS0\nWs2ua3K5idNp5ODBEBoNXLoUpdFo02rJlMsSsViFdltBp9vdoiyVWvzgB6sEg1a6uqwcOBDA77dy\n+XIMjUbDzZtJrl6N87nP7UMURcxmHRaLlsOHu6jV2oTDNo4c6WJjo8DSUpaF1BZesYhV48McDGKw\n28mvr2MLhxl45llqjn4uX4iiKB1kuUOlItHf78Jk0tLpQDJZJb6VprvTwOX3U0kkKGxsIGg0jH3q\nU0i1Grf/+q8RdQYSa9tYA0Gmfvt30Ot9LG668J7sxWk1U45GKe3soBFFbD/ld2Xo7cU7Pk7N5UJn\nNuMcGPgnA/Q+btzv2tpPy3m/+FPH/+0+j//Q8YMf/IA///M/f2jjP/3007z44ov/U5ARWVa5di3G\nlR/eJL26jSPgITAYxmYWEexORK1IrdZmfj5DX58TnclEYP9+vOPjtMpltEYjaZOJ9M1lPAcOU8zV\n2HxhCYPTjcHpwRLupS3Da69tYTBoqdXa7GwXkKpV3LYeAtPHkQUdQqfBoeEa2baZa8sSpz7/Scp1\naDdVAoM9lJsiYqXOzp0Vup7Zj8WoIbMZJ50oMv3UMwRmZlD0dpZv7/CJUR+5Rptv/M3V3Sa5oJvZ\nw2GcXicarZb59RqY7IgGA9V6m0ZNItTtIpvI0fJbyVWT+HsDuLufxWjScfL3DpPO1Onk1xHqU109\nXgAAIABJREFUeZoaiXomQzFb5ta2gNdnY/uVl6lVang9Fnq7dNTzeQxWK4WNDfQ2G4XNTXwTE4Q+\noh+BLEmokoTOYvm5CYzZrMNq1VOrtd/zvtttQq//aKqxjqruZhltbbFWKe6uCj1GZK2FVltFo7ax\nOGxsr6Sp5SosvD6Py23GKufRyA0C+/dT2N5GqjeJlQWsg6NYzFoEm4dcGb765bc5fiSAy2tjdLIL\n1erg9vU7GPRaOguLFDQCwcE+SkIXa3c3kVtNDp85SBkHHTnHsZN9jI17uPbjBaqVBhq9CbPLQS2X\nR68TMJqcmGxm+g8G6Qha1r/zLZRqgY7ezNZWCWUtj9b7AvrDTzPcZ8ZezxI4cYL03ByVRAJPwIFr\ncpqlnTbZWJJtDTQzZbZXYth9Lq5eTWAJ93IwHKbZlEmna6hqh1hBZOL0SVo7y7SqNToaLfGrV+k5\nceKB9wy9G6KoYWTETTZb37OCf0dtsrSURZIUxsd9PP/8Ar29Tubmkng8FpLJCkajllZL4fbtNO22\nQixW5sSJbtxuI4uLWc6fH0arFbhyJYYoalhbyxONlrFYdKiqSjpd49atJDqdhoMHQxw6FGRgwIUk\nKSiKSq3Swu9x4NdJvPxXf8fjv/kko5/8JKgqtnCYtn+Yb/z9/G5WlFXH/v0BVFXlwoVVOp0OOp3I\nwYNBzBYdpWKRZmmH7I3Le+aBmcVFcsvL2AIBYjfnqGYLVPNlAnfnaNq7mXziUbZf+D7Zt1+lWSgQ\nmp3FPz1NKRJBZzbvuiWLIq7BQXwTE9jCYZRm81emZ79seEfSe/LkyYc2h/Pnz/OlL33poa1kHyQS\niQo3394gs7ZNR1FQOgK5gkxxo0w0GccdcDI9HUQUBSRJ2XMxFHU6EAS233yTzbvrdKplnOEgrskT\nhBImJNGEIxwkmlboMuloNGQymRobq1ncdg31LhOBoJVMoogs6Dh8rA9du04lkadUVXhzI8e+U/tY\nXNigu9fNeqRBJZdGoxHweMzYzSpOg4l0osjKRgVFNiLnoxyb9VMXbNy4scnd9SZtVWDxdhSHzcCQ\noOX67QwmvUAotLsSq+QrpPNt7HY9Vn0bpZghvnKJ5J15+k8cpeEN4Ng/SzYv0XvgEJbJIwjtOu1W\niZIssn51DsO4i8TCCoJGQGm4CHUNYHY6ya2vY3K7qcTjJG/cIHb1Ku1mE9/4+IeWcjudDvm1NdLz\n88iNBmafj+CBAz/XikunE5mZCb6vZ2Rqyv+R/74riQTZxUUMdjt+uxfrTpP25iZ2iwat0YKIjNag\n585ygUaxTizZZHUxybFZHw6jgKWri+DMDLVsFt3NBti8+A+M8tqLd9lZjRMI2qhkdMy9PsfE/n+J\ntp5hf7dK1/5hFq8uc/DJI9y6lcRo0FAuVClkKkT//hq/9W8f5/qVHb77/Q18fhuTUwMEez309Dh5\n62IEd28PBqWKWs0zM+Vi+43X8Y8Oo2nXMVpNxFJtcrk6FrOWVi6DUCnw+g93OH/CgcmRpO/MGaR6\nnUi0xsXLcUrZGvW6wt1rGzz++AAnHlHJbkaxGGrUy7sPdp1OsyvpLTaxWHREr90gMreE1arHvt+P\nrBNJzs0x8PjjD/X+MjTkplRqcvdummpV4vDhEIGAjXS6htGoRavVMDLiwes1Uyo1SadrlEotjEYt\n8XiF6Wk/Kytl/H4LX/nKTc6c6ScYtJJIVHA4DJhMWjY3iyiKit9vxek08K1vLXP8eDeBgIVcbrcy\nZ7MZmJ/PcPFihGSyypFpF0GXnvpqBm+Xn4v/798TDNkw2a0IWi2hf/HvkOs1enp76O50sFp1zM2l\n6KgqkZ0CuXyLYrHJH37xCI6mho2XbtJqSMiVNBafD1GrpbS9jcXvp6O0EXV6VFWlls4g2row1tPc\neull1NQWnrExohcvsvXaa4x/+tPUczl07Tbho0fxjI2RvHWLzOIiiiRhcDgIHz78QKXbvyIj9xEP\nQ9L70xgfH6fT6bC8vMz4+PhDm8eDQD7foFGpod7bk/GHHLz4WgStCO22QqXcIra8zb/6N4eR0jFU\nd5B0tk4xXyfy+o/JrG5gbJcp7WwhaPXM/msfjpCPdFlAwkA6ncbtNmCx6Mjl6gQCZoyaNvFUg1S2\nxVquSLNYxixlCepLNEQ34wMmfrRSo1WtUq4qGC0tfvtz+1iZs+EwjTLgVnj1by/gCbl56tOn0IcH\nKMcSOMzduJwG1isaam0t9XINjcaATuywtV3C6bFQyZURTJBS2yhSi4E+G225jMmk4eCxPtylRXZi\n2zRKZdILi4z9i/0Uc1V6x3q4tpjnzZfuYLKamT0Spn/QTLtcRKroABW5IVFNtVGFIaRGg44sU9re\nJnb5MlKthiCKBKanKSUzTPza0+gt7w/wqsRiRN56a6/TX6pUaNdqDJ0796H+BaVIhPzaGkqrhaOv\nD9fg4G7+zbvQ1+fEatWTydTRaISfWU3TKBT25uT3C4R7XLsVHkGDRhSYOTqIxmblu8+/jlbs8Mjx\nWaRMlMCREdz6BrmFRQorKwSOn+KR3xgi+j/mKRQaxDdTNKpNxsaHiG1GqaSzZKJZrKU8a6+/jbae\n4+nf/S1SxQ7Xry7hdJqQW220HRVR1PL2W5sEgzayxTKpeIFaQ+HUI/24nHpmZ0OIYgiLXKQ7OIGh\nuE3yzh1sXheWYIhcardhUyNq0OlFHEMj3N1qsbXdYGxmEF2mhM68g9xqkVgpIKoCZqcNndnEMbcD\nc2WV/NoS0eUYgnYNjSqxf7YXezDA5KSPXK6BUZTJptKIWg1dYRvae34a9UyGdqPx0NKYFUXlzp0U\niUSFoSEX2WyDdLqOLO9WFkZGvGxuFkgkKsRiFQ4f7uLSpQhdXVbq9TYazW7Ozfx8mu5uO9evx7l9\nO8nIiIf19QI9PTb27w8Qj1dYWcnymc9MYjLpyGRqvPVWhCeeGERROgwPu9jZKRGPV0ilqnjtAi9/\n/Q1+67cPYdBpqdUqiHo9cqNBPpPGPTSIQ6wzOBZkLqLS3W2jWm0hyk2CbpFmUSDksRMKG6jFoyRu\nvolYyhGY3k9lawNRr0fQarGGQqCqaLUiRpNIo7kbe5CTZNRGHaFeQms0YnQ6Wfne99CIIumhIWzh\nMKJej8ntpp7NEr9+fa83rJ5OE3n7bUaeeeYDr+37gV+RkfuIh9kv8g4EQdiT+P6ykxGLRY/WYNjN\nRFFVBFFLo9agq8dNPlullc9icJqRqlUSy3kW0wkEq5P0VoLIq/MEnBpK0QiNYhWrVc/aK68xfvwx\nHBUdqwmJ4WE3hw+HkRotXnmlTaNpoFqq0N/toNloQ7OOySjQrlQQvdCJrpC9FePR432o1Pj3XzxB\nta6STRY4/egQ5nqcF//q69TrEr1WIy6XgcjFl8gvLqJOTqIfH6LRMlPMllHbEm21g91pRe2oqB0B\nk1ah3VKwGYz4HQJHjo5yqiThsGlx6VskXklh9nrps9vxjI7iHR1FsjpZWC7y49eimEQL8Y0cgtFM\nGz2+bh+C1YnD56EQT9I/PYwn6EJrMGDv7SW/vo5Uq0GnQ1uGQrpEYTGO6h+ie2rkfYmipWh076H/\nDt5xNdV9wIqruLPD1o9/vCtDZJeYtMpluj9Amu7xmPeaiD8KJEkmkahSKDTRyib0viBSNoWSi3N0\nX4jcoBPRG8bjtaDTKNy+m6PQ0NJp1fnOd5Z4/NcOgFbP2uIm6UQdg0FLrnWdiSctfPIT/SytV6g8\nNozVpKEQTZKJ5TAa9YS7HWRXIxx/dIx8oU4iXiWZk3C5zbTrDcp1lVBPCIfdgCq1MOvhsbOD5HMN\nPD4r+XSRjtTk5MkeSoUq7o5Ke/MO2bmroCisvvIGj/yH36NebVKqrGDo99I7ewDT2EFu/PUaFoue\nufkMFsyUFi5QqbR4+3KM3hPHubkmcOV6hj/61/0k3ryDy6IwPOanUJbpFFJkF+bRGvQMDLgwGETi\n0SLaHi+iV8Tt/gmZ1JnNaH/K8E5VFJqlElqD4b4/zN7pzRAEAadToFxuEY9XGBvzMjjoJJGoMD+f\nZmurRK0mUa1KPPPMMG63iVpNIpWqkUrV+MIXDnHpUpTeXicHDgRwOAzU6236+1202yqnTvWyvJzj\nzp00n/3sFI8/PoAkKUQiZQwGDWNjbpaWslSrEmaTiNusUDTqiWyk2DcSxusxUL57jdyd2+itZkIn\nHmHt9Yuc/o3nGJ/ZdWHtaI2U40kWl3N4XXosRhASy6gJP3IuhdVpJ7+6wtBjj5JZWqKRyzH+qU8R\nu3IFayiEUZIIzBzC7PeiF/S4urvwjw1RicepxOOIul3VVLvRYPv11zG5XHs+Me8QkXfQKhZp5PO/\nIiP/3PFOSu+f/dmfPeypcP78eb785S/zh3/4hw97KvcVXV1WBifC1LIZaskUAh2CfiO9ISMBjx6p\nZkDfaaHvNCg1DcRWtlFcKn67nsGwHiWXwu41UdJpEKRdD4BadIeDJw8THrWjSBK1rUWKW5scdenw\njQ0huIf51nfWScZLeM0aAg5wCUW2XrmM3mLmiacP8+YPFxg9th+/voKYyeBxiaCxIXv6+ez/+b8j\nSlUsDgs6uUr86hWMbg8aRaJY6ZDJ5ugd7UKj07NyN4o94ObQ8UEiOwWsJgGd0YKxmaW+tIYukGTY\na0cqKhQqMvbDZ6l1Hcbp0OP36NE7XWyULMwvJlicTzE44IBGlfhKBL+pzpnzsyzejdP3yClGlQpi\nKU7y8kUsXd1kN7aw+7z4DswQv34T59Q+Uptx0Okolppsv7XD+fPD6DQqCML7Hk7vwYeU83PLy3tE\nBIBOh8L6Ot6xMYw/hxpMlhUuX46xspJDUTqochuXxsikP4BSKUA5S18oRO/pAa5djfE3/88bPPbM\nfhw+F4WclkfPzXD2mSlSi8uo7h7kdIPYRha5XkUqfAvzzEn2DfUTsozw0jfeIh9L0TcS5PBj+3AJ\nRcShPqy2FJ7x02SNbix+Aa3ZwsJyHlu5SrVQ5uDMKF6/hZuXt/n/2HuvIEnu+87zU5nlvfdd1d7O\n9HT3eI+ZwWBIDEASAkiKkERK1F5QsXtxoVhdSKd7udi4h4t72FjFRUgrKk6hoKSTyAUIkoIjMBgM\n3Hg/3dPed1eX9zbL3kMBQw5AOAEgIAW/T52Z1dm/yMyu/zd/5vttJTexKvXk4hLrhTgXVyK4B/p5\n7BvbKdyZo1GrYOnuolnIkEvlmb0+h2XPMR5+9FGKlSbZpo7nfnoXu7zMg6fGqNWbXH/hTUa6VMRS\nNRxeKzPTMVbniuwZC6Ipx1i/cYeIvIXTqqLrwC68vQHW33qL+NQUnn37ycqd5Ap1jCMTZBbnqcry\nqKgiV6txjIzcN9pbiMUIX7tGOZVCUCiwDwzgGBlpl0M/ZSwtpXjuuXmuX287RAeDZvbt8/Ozn80S\nibRLLBsbWZxOHbVak3q93edx7VqIJ54YJpEo4vUaCARM5PMSx451YTKp6O42k89X2djIcfVqiFAo\nz+nTffzZnx0km60QjxfZu9fHpUubzMwkGBy0oVCIDAy0FWCdDg1CMURXj40jj+zCIyapppr0bP86\nPPkN8tE4uXwNe0836eUVbr0xjVyhoO/AGFaXGX20hpTP4jDI0asrrP/8X6gmY6gHuzA4HUiFQrvR\nNBjENTGBweul68QJ6pJEOR5HY7ejd7vZOH8emSAgiiL+AwdIzs9jDgapv53tVJvN7ReGXzHVJBPF\nX6sk/G/IyGeEK1eu4Pf78b3dkf554sSJE3z3u9+lUqmgVn88cah/S9BqlTxwrJuuLjOR9ThOR9tC\nO56sEJ+eRiiWMDpNuKxyovESrWYTi1lJK7VGqwUb00u4LCJmtxNrIIh/716qhTw6i4mFC2lKi5Nk\n19fx+kyo5DVayS3kCgGTUUmz2sKmqXLrqZ/hOh5AioTJSRU0Bi0nH+yiJmV47f95mUxWwh7woPV1\nsl53Ick0DAw5OLxPyeI/P4V/ZJhyS8nqhcvIdVmuntlg7OgOdn1rF+nkMFaXCYfXyrPP3KGsrKKR\nVUhcuc7o4R6m/+779Dx4gt5HvsrajShv/eANBFFEUGvo3jOGwVgmHEnQ1W3l9RdvEwvD8HA33UEd\ngz16OjptOAxNysUqRp2LtZcXaLm7uDOfQVYz0EgX6dxzAHP/COHFDSKTsww/fJJiU0toNcb8pTLV\nmUsoDQZco6OYAoH3EAydw4H2fcaCa++yMgdo1us0arVf8emPjkikyOJiikaj/YUryBWkqgZa7g60\nqmUEpRJrXx/lmsCZV9dYW01z/uVbHDoURGvSotRqeOrHcyxfuoZCaHH4gV5G3BZCFy+i0QSwIjF9\nY5lgwMiJRyfIlVo4vWYszSTJG5cI37lLvZCjEA7T7D/A+dkWOouZng4t+YKSriNBhjoEwuEMOnmV\nhevT9O2fwBt0UoiESSxvsP/kOH//g1s8+Ug3Ji1UCwXkAmiSWRQWG5EbVykYtbjGJojOLtBlKmMf\ntrF67lUmjo/TcttoKRqUQ4u4x8cplk3s0uro6dOyObdGYDBAbGEJk9WKxapj4cwr6LwdpJMFsvk3\naDm6mCm6WVrO4DBq2D7gZPc2IyaPE4PnFx4pdUkidOkShUjk3r6t69dR6vX3prI+LUhSndu32+qp\noiij0WixtpbB4dBw8mQ31WoDh0NLLlchl5NwOLQsLaVRKkVEUeSVV1a4eTPMd787zrlzq9y5E0Gp\nlLN7t5dise11k0yWcDp1GI1qpqZiyOUCp0/3cfNmBK/XyeOPD7N9uwuNRo5KJdwT4YvGJfTVBmPb\nzOiyy7z5N/8dWTmPXK2k59Qp/AcOokJDtZBl+uWLRCMlDGYdokJOj0+F3eIjF9PQ022iOpfmztVl\ntPImmeVFKknTPbl2Ua1m7qc/pVGpUIzHsXR1oXO5KKdSbJw/T3ZtjUajgVyrJR8Oc+jP/5z5554j\nHwqhNBjw79uHlMth6emhks1SK/5iZN7g9aL9lEd7Pwi/ISOfEb4IJZp3YLFY2LZtG2+++SYnT578\nvMP5TKHXKxnZ5mZkmxuAQH+FubkkM0IFVSNPb6cBkpuYjV4aoopej8jP/vkCnSNB9v6n71Eu1qjU\nRQzbekg1ZagsSmoqE157nlpWRr0p485Lz5NP5hjb38PgyQd44rHjTE1FKM5NQq2KpcODeTjA6muv\nkZydJXBgPzMvvkIhV8bstJNKVbj42ktMPH6a6ekyucVpTNqj2HUa1l5/nc6Hv4LW7qDVqmI1ynnp\n78/wwFd30+GQM387StqqYt9AD3JDF+nFRTS2fnLXXoOaRCESJpWusBwq07V3Ao1exdpmmZ/+eIrj\nxztZvr3IztOHGBoL0KhIPHKyg0pZYmE6QibfZGQsgF0j0cimEY027txNs7KSoV6rs65o4NhrQBIa\n1MoVhk49gHX3YW5MpYhPTxOVqSlcu4BcraYQidDz0EMEjx4lNjlJrVRC53TiGh1F/j6E2NzZ2RZn\n+iWorVbUFssneiZKpdp9UtoA1WKRrcUCHpLUKxWKkQjG4QkaLRk6p4vFpSi1co0vP3mIv/rrG+j1\nKqxuOyu35zl7ZoE/+M44ldoF3MODFMJR5OhJxUElq6OtFBCTaWx9bqKxKCavi0IUZDoTd9+8zeHH\nv8bthSKh9QQDI27Gxz2snnkRi9/NE09OsDRsxqCTYXcpWbNYCM+bqUkS+wbl1NfnWHrzDYRaGYvL\ngn14G5GshLpZIbWWIn73Lgq9gc1ra2RtJgLHTrK0XiRbNZMK53AP70Rj0OAoRaknE6gsLlxeCwrv\nIeTUsXfZiC6uIeiMLK/mkOrQKTexMXMH1YiedLpCJgPxHAzsGsTvub8ZuZxKUUom79vXajTIrK9/\nbDJSrdaJRosUizWMRhUulw5RFO67r+VyDYNBhdutJxIp0Gi0WF5O43BocTp1FApVfD4j6+sb2O06\nJKmB3a7FZtMwPR2jq8vClStbvPLKMgaDkkymPdb76KP97Njh4rHHhjh/fo1CoYrLpWPHDifLy2l6\ne61oNHJEUaBUqpJMljEalRSLVR5/fIibN8P0d/Wyu09g5u//BrnYQtBp0NlsrJ0/j7Grh6ajk9xG\nmKXNEtW6jH3Hh4nPz6OS1WiGtrDrNcy/sYjFaWb45BHmnvkxCpXinj9UfHYWSzBIan4ec1cXgiCw\n+vrrOIaGsA8NEZ+bQ2ux0CwUEORyCpEItXKZ/kcead+jZpNSMolMJsPgdmPq6CAxN4eUzWLq6MDa\n14f4axSz+w0Z+Yzwwgsv/Fpdej8MDz/8MC+88MK/ezLybphMavbs8TE2bCI6OUlqaQkAp0PDngc7\nEQpJAn4dKqOBt66maSj1hNbjVM5EmDjYj9OlI3ktRSlbYFBT5/JPz2LWi6AXiS2uQ+U5dvaN0tdv\np6Zwos30oZK3uPkP/0CzKkGzSXJuHqQS7m4flWqL+HKESqGElM/R1e2hmUsQXo2y65vfZfXuGpOR\nIoYdD+F3KDndm8b84g22VuPUVP0M7OuhUcwyt1ZGV1zCo8xSmbuOXK1C1OlJLy3jrjbwebQoymnW\nbyyibSn48uEA5VIBk6xIbivKt35/N4Z6hs3FEBfOTCEr54jOGQmvxXjom0cY3NXL1vIW0egmjUYT\nmSBQE5WsJOXITX3s/497CedEplbyZDa3MGtaiLl2qrxeqZBdXyc5P8/AV7+Kye+nUat9qOmWrb8f\nKZcjs7pKq9lEY7W23/4+4ReiwaBELheQpDqiKNBqtShFIxh73DQy7Z6WerlMYnqK4YF+1tfzyFVK\nctUCoY0sUqkC+QRanRL/QJBSOoNgMHHsj56knMkhS0bRFVaxKJzYJ/bSrJnRyBuk8y2StnGK5Sb+\nQwfQChI9pipzcwleeTWCnDrVcgUjOSxNCVkpT/LqW0SvLaLs97I5l8c30Mu3vr2TltVDbvIak88+\nR2wtTEtQYDGGySUy6Ed2EQ2lMNrNyGkSD8UxqJo4gx7C6RaL86v4h3tYvbXEus3GwfE62dkp8mUN\n02+F0CjBNbGHwS89iMVpJHRjkmQ0TTaTQOd0kMlIlIo1tG/zgFarTQSi0QLDw/eTEZkgIBOEd9+C\nj12ikaQ6Fy9usrycpl5vYrNp6Ooy09nZlliXyWTodL8Y835nf7lco7fXSkeHiUSihF6vxG7XYrX2\nMzMTx+PRc/hwgNnZOEajmsOHA7zxxtrbfjUZVCo5lUqdy5dDnDrVy1//9VWWljJ4vXoEQc/8fJru\nbjNra2laLXjppbYAnyjKcLv1HDwYYGkphUIuo6NDT2VzkvDVa+3RXZeT2N272AYHaNRquIJ+slsx\nIutxjjy6h9i1KxSaKpZv36CQSGNzmtDZLOTCUbxH99Kxfz8yUWTo8cepVyrUJYlms4kpGKSUSDDz\n058iyGRUUimUBgMak4lWo0EpkbiXqeo6dgxLTw+phQWy6+uojEYCBw9i8HoR5PL7sly/bvyGjHwG\nCIVCrKysfK4jve/G6dOn+eY3v/mFIki/Tij1evx797bfzlotNFYrQUEkuqykub+PZMvC4uoquZKM\nxGYUjaeDy5e3ePRrQ5x/fRZvwMZoAOrFIvm6iMWkRKMWaMrk1NJR3rixxviQEXeni61r15FKEmqt\nCr3NgsqgRyYDn1vDxlYRuayFUq2iY7iH8nyCUEoiUG/yzA9vMj2dwN3lZfXVKaxWLcdPdHPsyYew\n2HSsbFZ44V+mWJ0L4w+Y2b/dQ2UrRrVUJLcZgkaDjkOHsTj0iLdnmTl/g0pNxvrcJkbbFF/5k+/w\no5deRuV0MzJwkMr8GteWQygbRVApqKRShC5f4ZrdgPexYTzD/ejeWiefl1BpVfTtG2M1q6bXqUPj\n9pKPRtGqBAYGHThpkrp04971rlcqNOt1aLUQ5PKPJBeu0GgIHDqEY2iIZqOB2mJ5T+9JLlehWm1i\nNqvuU6h8P7RaLSSpjkolcudOFK1WQYdPR/+gHYOsQLX5i4yJUmgw0GNgY8vG5maBRl2DL2jDbpKT\njjRIlksgKFHpnViDHSjlUJ++RosaiUwOa8BH9NJ5NCYdkTLciWiZPfsmlUwWUank+LcfQWv3Eb6d\nxePWYxGLrF24jDzpYv+ElfWfPEPP8QdQ2Z28dnYRpdDAHZI4/h+eQNYokc4lSa6sojaYSacr5OUy\n/AoRg0GFbt9OmqIKpBJXzv4zap0SW38fC5MpjFYjMkGGf3w7pUyBZL6Fvb+P7GISi8eIvBAh6NcQ\nWYlSzFVwjI6x/Ozr6N1ulFotMpkMT08HoVybZMhk4HBosVjUv7jGuRyCKKK12zH6fKSXl+9dV1Gl\nwtLV9aH36pcRCuVZXEzRbLZwOLTEYkUuXw7R02NhYMDOxIQHo1HF+Hh7zDufr2KzabDZbAwP27l5\nM0Kt1iQcLlCrNXG7dRw6FMBq1ZJOVwgGzVy8eJdGoz2+nEiU8PuNZLMVmk2R/n4bqVSZbFbCatWQ\nzVaZmopSLjcwGpVks1U2N3M0Gi3UahGTQUkyWeaHP5zi8EE/87MxymsLnD7mod6UkZpfoJyIY+vu\nolmt4h4eQshHCIz0curxPbjcBmYuzeI88ADhagOZIJIMJ7AF/aQ3NiknEwQfeIBKJgOtFgvPP09y\nfh5bfz+mQIDAwYPYBwcpxePIBIHs+jrlVIpCJIKoVGIKBqmV2qXpUiyGY2gIx9BQu6dnaOhzl/OH\n35CRzwQvvPACp06dQv4FuMHvYGxsjEKhwMLCAn19fZ93OJ8LZILwHo0Ld7cfoTJB5PwGRreTRrpC\nZ68LmVIglanSajSpSzVEAWRWL56BLorRKJ5OJwajhnxVoC7VGRh0E85VmNg1gbQ+j95iRKnToHe7\nyayt4x4bI7URpjNoxuIwUVLaKEqQjJcIrcd5NLifH/y3F8iWIF+RoaLC8o11+oNaJJcBqd5idT6O\nTK6g1FSyGpb40ldH6d9uotBtQxAE9D4vcrWGZjJCIGihJe5l7voiHUMKDKoGucVZHH5Twd0uAAAg\nAElEQVQn3f1OxHyU+MoGlUKZJgKNWpNEKIHRakAqFMnG06jdXTz8J99lYyFEMl1jLSnQQqC/30ZH\nhwmv10ilUiUzfYfIzeX7ekMUOh22/v73jOV+6D2SyX6lgFat1uDOnSjz80nq9SZWq4Zdu7y4XPoP\nPN/aWpZz51bR6ZQcPRqkUqnT2WnCKxNJ3Ln5zh9FY7Wi0OlQNJI8dsxKngB6owaDGhYPdPDi0xGy\n4RQGq4EjX9qFLB9jMtTi1OEJ1pEQzTZEtRKp1kQ0u0g3BHKZCBqbnapUpVZtsLUWo7e7n0IhiVpo\nUCtksbtNFFJZ9K5BGlYr67dmKTmH2AgVCPgNrK8kWNso0Ntvxze2jfzGAQrhMBpNEZVGjtQQaZo9\n6HxBrv7op/QM+Bh68BCV2Bb2gAdxbo1kosDc1RmCnRbsXQHqciXxWIxWvUa2AB2+TpyDg5i7e2hV\nytRVBvY++RgLF25RrVTpGAqg6d7Gyqvhtxd8Ld3dFjo6TFSyWcI3blAIh5EJApaeHty7dqE2m8ms\nraHU6bAPDn5svYp0ukyz2UKrbWucvPXWOvV6E4VCIJOp0Gg0OX68i0DAjMGgIpEoIYoCDoeWt95a\nZ3Y2weJiitu3o3g8erq7Lfh8Rg4fDvD666sUiwJKpYjRqGTXLi8Oh46trTyzs3HUajnj4y7S6RI9\nPVbOnl2hWm1gNKrI5yVsNi379/u4fHmLDq+WpdkMTalOaDWD0WpAr1fSG1BhaRaQyhV2fud3yIdC\nhG9cR9Ro6Pvyl1EYjJRKVWqZJO7de9HWMlh7e7C4rZjddhbeWkZvMSJTqejaPYZv924QBKRslo0L\nF4jeuYPe5UImisSnp6lXq2jsdjRWK0q9nuVXXmHbN79JfHaW1Nwcnp07sfX1sXXtGmqTiVKiLV1v\n8Pk+UXP4O6jXGzQarY+kgPx++OKslv+O8Pzzz/P1r3/98w7jPshksnulmn9PUzX5SITcxgatZhOj\nz4fB5/tQ8aVyOk0mXaZYlaEx6HD0DdKdlXPl5R+TiqRYmw8jarSMnjyA26HCZ6phFIpEC1aCRx8g\nszCD1iAjHU2i6+knr/Vx9UIUQQZWl43eR79GvVggsbxKQ8wQr6gJ+LZhODBGORFn3G8mlZZYj9QY\n2O5n25CFerFAPieB2kg+W6JzVzfRRJWF5QyrW2X2G80UsgVGRz0MDzuoV2tIpTKrdQ3+3lEUpQRS\nNsfW5Ss0FFpunb2KZ/sIo/v6Sa6FUAp1zC4bx785gFnIs/zCszT0DtwuLWvTKzSbLZotAUFvxj/Q\nwY2rW4QTK1gHh9Hr7bj6lOi9dRwOLX19NmQyGXK5DL1eTdPno5LJ0Go2SS8tIddq6Tp2DOfIyAfe\nh4+DlZUMN29GaDbbTaihUJ5GI8SpUz0f+AW4uJiiVmuSybRNx0RRxvJymu6DXrRbG5SSSbQ2G8mV\ndRLRDPEcqJRyth3aTlMnJ16pstOSx/6NPsKpBjazCp8hS2Q+itHsYe75nzP92jVsgwPYHXoknZtQ\nREEiXSNTqKOwOAj29FKv1qnpzTjcFnQmHfpmHVGuQKaxoDHpqdaa1DVmCrEERWUJgRZKjZKenaMU\n0PPimQ3U9QyuwXFKVQGhukUqW8JzZDuC2c3F63HGHz6BmNzA87WTWJ0mKpUmtmCDjdA8Uh2yxSYd\ndhtma4tr59bw2JQopBoOj49SLMztN6fwuLTEEyXMHR30nXqQfKFOR5+HjoAZo8tOKJTDbFbT12fD\natWweu4SqcXFe9c7cusWcrUa765duMfH22Wbf4UYmsmkRiYDrVbOjRtxyuU6mUwFi0XN5maOUqnG\n0JAdr9eIxaJ5230XNjez3LoVob/fRjRaoKfHQq3WIBAw0tNjRaEQOHIkyPR0nG99axvLy2lef30V\naJfz/vN/3o/RqOLGjTB9fVZyOQlJqiMIMnw+PUqlmY2NLDIZGPUKXptcp1hqELAb0WgK7Byz47Qq\n6NBrSV2+zJ2nN7EJaVR6Hf2nH8YQ6ERhdzN9dZ5KKsHY8Z2QkcgXakg6J8n1LXr3bENZzVAvFdHI\narSktk6R1m7HNjREJZPBvWPHvd6r6OoqdUnCu2sXsTt3sA8Oonc6aTYayJVK7ENDaB0ORLUajdWK\nXKNBodOhczrxTEx8ojJovd5kfj7J3FyCer1JIGBi2zYnOt0HTNO9D35DRj5lVCoVXn31Vf72b//2\n8w7lPXj44Yf5/ve//++GjGTW1lh74w3q5TLQtoX3HziAvb//V36+1WoRn5lheSHOzGwSvaqF3qCm\nc8iPCYnuLiOVZAKrTYfKoGffLifa7ApuTYmNm+usXRf5yncfZGK0F8oZVHKQRC3X724hFWsUYnFe\njiT4ym9tx7tvP6JOT0Z0kK3pmNxSsHBpGbFZ46GTemxWI8O7TGiXU5z50RvUWgKO7g4W5xOI5hbh\nWJGRsQCHdttYX8+ysZ5heGcvN69vcOapC1jsRh77vT34lTnO/39n2H1ijKmfvEC9JeLePoyo1nL7\nlcsc+QM/a/Ob+P1GrD4H6y8+S1Eqou8IsjqfZvDgGCrdbhbuhui02Oka7aFeKXP97DWc20cBGel0\nhd5eG16vHqtVc5+bK4DR57uXCWnUaqjNZvSfsq/F2lrmHhF5B8lkiVSqjMfz/u6ijUbzXdtt/w+Z\n1kTPqVMU43EK0TiT5+8SidcoptIU43E270zzlf/1O5RTEeaf+QlSuYLe5GR5q8Bmo8KB//l75Go1\nlE4fGVuFWxcjHHtyFxdfm2X30W10jxiJJiS8pjpWTZ1quY65187W7ApWs4Lbr97GatUx2G/l+MlB\npPgW9Qf2o9KquXQpRPewH4PdgrxzG0//ZI5OnxavXU9Mbse808yODi35CpyfqpB/I84Dx3twdJox\niB1k1lZ5/m+eJTI5hXfPfob6Lfi6XVBv0NnrwmuucfBID6tXbyO32dG6fWxev0n/oA+z04q/v0V0\nPYpJIdG1tx+NRsHqaoZyucbAgA2Px4BOp6ScTpMPh++/4G+PYzu3bftEY6E+n4HOTjP5vIQgCGSz\nFRwOLY1Gi0ajRaHQ9pTxeu+XLS+Xa8jlAteuhchmJfbu9eN26yiX66TTFV5/fRW9XoXXq6daFZmZ\nSdwrtSiVclZXM/T0WOjrs+LxGDh6NIhKJadaraNUylGr5bz22ioDA3ZsRpG+ASfnnruJQqwzMuxh\nfNTFxnKEk/vN+JXbMZp2k5yd4c4//YhCLMXR//J/sLKUJLMVwePWUy0UCZ8/j2TwUdfakZOFVpXg\nwQMIcjnVQoF6Lo2UyaAym9G73VQyGSI3b4JM1v5fc7sxer3INRpazSaJhQU6jxyh2WiQXl6mFIuh\nd7nQWq10HDiAd/duaDZRGY2/sr/n42BpKcXFixv3JtXS6QqSVOfw4eDHJqG/ISOfMl577TVGR0ex\nfYqupp8WTp48ye///u9TKBTQ6z84vf1FQa3WYHMzRyxWRK9X0tFhwmhU0Wo278mMv4NGtUp8agpz\nMPgrywPFWIzY8gZz8yU0lTiL565CvUZ5rBu7U89Yjx6fOUgTOQabCSE/R6vRYM+Agt0H95JMFnGb\nWnj6gyz8yy0i0SiZmpZkqMT2fTt5YblFPprln384zR/+wSH6xg/xxlubzM7kmXkphFanYf9BP9fu\n5MimI5SkJgcPBcHsYmszw85dHehsVuqiloEuPT26FKtnX2FrK4vD76RqOYg/YGH/g9u5OxXDIG/w\n1jNv4nKYyJVhYyVGU64mJS0S3DaAw6pElosxPOqje+8E2UIT+0AfsmaDrZUIky+9hZwqu07tYWTb\nODpvgPBGghefvoYgiPdk3hUKkVxOYs+e9x9T15jNaD5iureSy0Grhdpk+sjPgUr13oWt7Wz63i/T\nXE5ifj5JOJxHpRIpFCT0+l88Dw6HDqtVg1wuotTpiK6GiSQk9HolapmOeiZBLpYgsRHBpFOh9XXg\ndVrJbm4hNmsYHF5cfhsutYYzzyVYno8yvr+XSrmG1WUh11Azd3WLEw/1c+epn3Jlch5/pwOxEEPX\nNYAoiZz88jD9E73oChss/svTLNxaxtrhY/zrj9J/4gjqnhRXbyXRFGRIlTrFUpXkZp5rm2XqTZg4\nNEginGYjXMFo1hDayJFKltjWKSd84w7NFvj6/EiRFZwuNwdPHUFlNDI9GSHaVKM22QgcOojUVCAq\n1cilPJtnf07GbkPv8eCfmMCgqmM0qjh3bpVQKH/v+gUCJo4eDb5vs6rsU9Cm0GgUHDoUIBotYjSq\nyeUqlMsNisUqMhn09FgIhXKMjrruy4xFo0XW1jJ0dVkQRZF0uszu3V4WFpI89dQ02Wxb1v7hh/sQ\nhDrJZJlwuO1RY7GoSSSKVCoN1tYyHDzYQW+vlbm5BF1dbi5dCrGwkMRub/vaTF2N8fCXuvjOH+6h\nUpTo3uYnk6tzaLuKlWf+iejtW2i1CjqPHObwn/4JGzcnSWwmePEvfoDJZkY2aEIsJXF2d1BXGYln\nm1i39SMkVlk4+zrxuXmoSujUoHe7qUsSM08/jW1wkOTcHNVCgWIsRvDIEby7d1OIRLD29qI0GKhL\nEvlQCFMwiG1gAN++fZj8fkydnZ+qUu7CQvIeEXkHGxu5t7NYH9yw/m78hox8ynjuued45JFHPu8w\nfiUMBgN79uzh7NmzfPWrX/28w/lQNJstrl3bYno6fu+BdzqTPPBAF3qN7L6Z+HdQr1SoVyq/koxI\n2SylqgydssnmpUlsBoFavkIptEqmqMM3Pkp58iJ1ScLz0ENEVpZo1BrounrJJGNk51ZoukTW3loF\nuQK9P8jm3S0UcqiGNzhwaJgXnrlJbAumrq/i7PZy/lqK828so9YoMTlViCotc/NpbCY1KlWNlZlN\ndh3spVisI7TqfOfUBEqVAml9gTd/dBOdRqSSzbORL+B269H1jjG8I0Cj0SC0EiG8nqBeM6E0ZxH1\nZoRaFVldYnExidNqQ9M5SGwlzsbMCtuO7yNNgaZUZev1u1i8LnRCmalnnsV/YD+1phyjrxtXfxKZ\nSovul3o3tNpPLlhVLZWI3LpFdm0NaGdUPBMTH+l3u7utrK5m37Z1b6Ojw/geFdZqtc6FC+usr+eA\ntoGe06mnWq2jUslxOnXs3Om5r/lV63DQuW8XszMxBIea0WMPU4hFabn8aCygts6yOb9G774xXC1Y\nuz3Dxp0ZnNtHiIbSHP/KBKnpO0RzFWSCjkvnZjA6LITuziOIMnY/MIJGKbCykaMan0YRHGL22gpW\nh474+gIVuRV9t4KteJHc/3iN/b/3GDqThq8dt2HW1/A84mD6bgxBa0ZbVpMvQ6PeYCNU4Ob1TXbs\n6ebO3RgKuUiP3cb01Tk6er0kUkaWZ1bJX9ugWBUJHjuGYHEQDodpLW9Bo47eYkIm5UlO3qJZr6NQ\nq6gtL4NMhnt0lHC4wNZW/r5rvLmZIxot0NlpwRQMEp+aundMJorYBgY+FZ8ajUZBZ6f57RFd6e2m\nVDUul55CocbCQgqvN0p/v7UtrV9vEokUGBlxcvbsMpLUIJ+volK1DTKLxSqHDwcoFtvu181m656d\ngEajQKdT0NtrJRg0cenSBlevblGptDMiiUSZ69e3UKvljIw4mZ6Og6hgYSZMPRUlthEnX9zP17+1\ng7WnnyW/OEthZZGSKJBd3+DI//7nOIYHSURS7PnSXlQKGS6Hmo0bd7ANDGBx2/CKKXKLBerxDbRG\nPbagD61WhUDb2NHg8TA5O0vk1i36Tp8G2tlelcHQHqePRmlIEs7RUQxeL0avF2Qy7MPDuHfs+Ezk\n+t+drWzH1I7r4+KzJiP/DdgJ3OB+B98/A74MaID/ArzwGcfxa0Gz2eQnP/kJr7zyyucdyvvinb6R\nfwtkJJEosrCQuo95x2Il1tczjI660TmdVNLp+37nnQaud6NardMS5SiEJnqNiFaQiM/MIpXKlB1W\naiYR745tOLdvRyaTsfrGGzSrVaR8nsU7S/gOHaVvrIfQhQukV1aolCRUdgfW0d3cevYmqxsljv3h\nOHK5iEEjQ2vWI6tVeeRUgGSqzOpmmZaoIJaoEOi0o2oUSM4tEd+oQk3iS799iCvnl3n6H69i8Vix\npafRahUUskUSqQoarRIqBWy6OnK7jhd/nCCfkLPv8AjpzTCLs2H6Dx8geusmGquFu7NpBnbv59yZ\nOSrpFIf2e8nM3UXr7UBudbJHZyY3fRukPBlRzcbsGuYyOFyd9O4cYnV+i2qhgFKvR6dT0N39ybQ+\nAOJ37963cCVmZ5F9xHq132/kgQeCzM0lKRSqdHaa6e+3IQj3L3rxeImtrcK97VSqjE6nYOdOL8Gg\nGaNRhSDIqJXbCrsKtZpcU8eZC0lW7qyA3sYPn5rlt797EEupxk/+7hk8TiW1ksTkT54jeHA//sNH\nufA/XmaPRk/Xth4Kc9eIzK0weHqAFFYyMytYHAY2FjZZmUoALXweLUaThrWVDA886Mfqc2E0ybl0\nN8Lc3S1kMhlDQ3aKsQiJxWUsKoHom2fIatVEKnrEYouuEydo1OrUSmW2dwoMBwPs3+tF57Dzyqtr\n7Byzki21CPT5aDZbFHIVLB4bVp+D4K4dXJ3KsXenl1xCh6C1UI5HCHrMFDdWMHZ0tBVTRRGZINCs\n1xEUCorp2nvEOZvNFuVymxR6xsdRaDSkl5cRFQps/f335MU/LWi1CoaH20aI0WiRyckojUaL48e7\nmJqKkc1WOH68Pa1jNmu4dStCsVhFqZQTCuWQyWTEYgX27vVx+3aUGzfCjI25efDBbjQaOWq1SH+/\nncuXNykWq6ysZNBoFBgMSkKhPLdvR9m928ujj/bz7LPzbGxk0euVSGUFex8Y5NqLWboHPew90ou6\nUaC0sUytWESp11OIx2nWaoRv3mbnH32PyD/+hKUzr+F0aAhnknjHx3EO9LJ8+Tbx1RDB3TtoinJE\nuYDJaacQ2qCazaA0GNi6fIUd3/42S2dfJTI1g72/h3q5jLmzE7lWS+DAAfQeT1vtVqmkViwiqlTo\nHI6P3Uj+UdHbayMWK91HSrxew8fOisBnS0YmAB1wBPgrYBdw7e1j/xX4v98+/hL/TsjIpUuXsFgs\nDA0Nfd6hvC9Onz79b8bFV5IaSFL9PfszmfbUhnP7dqRslmI8TqvZRGuz4R4bu69WXas1mJlJsLCQ\nxGIU0TfAFXCwVikhFcuICgWCKEdtNLTHf/fvJ3L7Ns1GAymbZevmbZJbcdyFDK1YGVGjQeewE729\nRCu5gsnnZ2hHkGwZltbLPPDlEXxWGbGrb9GyaqhU6jz5UDfzmSDTs3GOHvYzd2WWQnwLT8BBoyVi\ntGgRRZHXzi4QW4/Tu6Mbi1NJLithMeuwWIrYbWqWltL0nbYhSSUCPj1f+cYEdlWJ1fMXufLqJLmC\njvHv/A4thQZbNItep8S6GEVjtSGTK6gjsLWRoDW9gMdrRrF9mGw4iVKTI7u8jFDOoKrmCFoUeHa7\nWFmM4/ZZ2baz7WD6SVCXJDKrq+/Zn1tf/8jnCATMBAIfXApq9xTc3ydSLNYolWqYzWpq5TLR+UU2\nNrOEQgWCI51cncyTbZkI7p5gbiaGwd/BzGqZTnuTza0CgsKM1tqBVKkzc22R3Z09WPUyZJkwHf5t\nTF4vUpGaTJ99i32/8zh7D/UQipRwdQeILGxQKtVQ6zRUKjX2Hu5DbdCj0tUoVGs0miAT5RQLFZbm\n4wz3m3B6Lcy89CqRW1NUKxLbv/Yo6VQZv7mKzFFlcNcA1USMteuT9Jh0pDf1PHykH4NVx8bSOvsP\n7mDlwmWya2vUkTNwcIx4RcX5F66hl1UY6jdjHdiDtDaHz2+kmqhTs9sx9/RSK0vIRdDY7bSabX0P\nhUK4TzROpRLvjfUqNBo84+P3SPxnJR/e19cWGTt3bpXhYQdud9vk7p1sSDpdxm7X4XbrSKUqiGJ7\n6sZkUpNMlrFYNG2fnjtRQEa5XKNQqLJnjw+XS8eNG2HkcpFmE2Znk4RCObZvd9Lfb6NabWdYRked\n2O06UqkStVqDHd8cRqNscerJI/T0WDFWwpSTGVq1KrV8FpPPi8ZiQZC1cAwN0KiUya0u4vKZoVyg\nEE+SmJun6+FHuPD8FZqNBtFUlT0nd6JWQHJ2BlEhx9DRgaBQcPX732f0d38XU3cfC+dvIIWLBPp8\nWLq6cO3YAfBrNyvs67NSrTaYn0/SbLbweg2Mjbn/VWvLh5GRIeCrwDvF4k3gX4CZj3DuvcDLb//8\nCrCfX5CRd1YYLZD5qMF+0fHUU0/xxBNPfN5hfCD6+/tRKpVMTk4yOjr6eYfzgdDrleh0yntW8dDW\nOHC52sZNWquVnoceophI3NMOebep0/x8kitXQlQqdZaXawQ8dvYOmBk+sY+WlEcuiBhMGjzbh1Fb\nrRj9forRKNm1NSrpNNa+fnZ0D+Ea6SeyuMHFKxG2D9uxdRTZml8jEwrjHd9LsKOf63czjG+zEr50\ngcjMAj0PbScvypFimxzbZWPbcD82ZYkOt4KQ4OGNl6eIhtLsPjKIyrrMth1+No1KRJ0Wz4iPzNo6\nAi2atQrLizn2fO1BUhUVEzuDGOxWrp2bJJMuMDS2k2/8n8dIhmIk62auX1nn0E4rubkpLHYDK4tR\n7GY5yUyL+dt30SvrpKduY91/ghuzNTamQ3QN9dHZ08XiuTeJTM+y+9g29u/ciSDGcdg/BXL9PgvV\nJ22gezcsFjUmk/re9AyAXC7g9babXJNzc9y+GWZuLo5aKaeBnPmZCptbRTqDRkp1BVuhDHK5nPyQ\njWSmRqBbAQIUsyXKJQmxVkGoFilKLfoCVurjPeQ2N2k1qtQWr3Jk92GuzwoYgzYcwUXcTi12pwFE\nOR2jQwjyFnWVhZtTaQydXRjjRRotGd5+N2OPTGB2mrFrqtTtWmoNLTplE62ljiIyS9DmJb4e4eLf\nP00pnUep09G/axCPVc7mmgWdRk4mVcC3axcnt+2m2lIiyTRMXl9Do1MBLd588RZqtZyTj+2k90gv\nxVSGW8+eZe7uJuVUEqNJw+7+IaKTk7h2jDE66mJmJkG5XEOrVTAy4sTpvJ+cftZKnTKZjI4OE4GA\niVAoRyJRupexkclkCG8/R3p9u9QSibT7QEKhPLduRfje93aytJQmGDQjijL6+qy43XpeeGGBRx7p\nJ59vC4hpNCoajfbouAwZer2CWq1BKlXhZz+bw+XS8ZWvDCC06li0LWZvrrC+kkBT0HH+rUuMHB5j\n4PGv00Agt7JMqyZh7u/DOTJCJZPF6PGQWlpCpZSj0qrReHxIxTImq4HoyialdJ7X//F5Hv/T76B3\nOankcuQ2N5l79lkqlQalTA6L2cLBP/o2lZqIzGjF4Pd8bo7JCoXI2JibgQEb9XoTg+Ffn4H5oCfo\nz4BvAT8ELr+9rwP4Z+BHwP/1Iec2A+8o32SBd8/5/RXwGPC7HyPeLyyazSZPP/00P//5zz/vUD4Q\nMpmM06dP8/zzz3/hyYjFomFszM3Nm2GKxRoKhUAwaCYQ+EXjo1ytxvQ+Ggb1epPl5TRKpUAmUyOb\nlTgzlyBdaHFq/wFsHd62K6XBQK0pkM1K2FpKjIEAtXPn2q6jDj0NjYVQVs7KVoNEpsHcYpq+gW56\nHQ66D+6hqPOzHpbYNt6Jw9KiqIWR39pDq5glu7FKrSRR1tWxj+ylUZTocCqYWijg7/bg9xkhEyUT\nsZDNNAl2WVlaSrEQ0uM7cBhTM0FJbsC7bZjJDehsNFmaXOfs02+xePkONpeZTrPEpStx3GawO+38\n9ukhonOLVDcXsDoc9HxtDw6/g431LJ6+DtLJEoae7bz03F1Udhc1uZ6VpST5isDYoBmTXiR66xYN\nScK3eze1cplCU0Gp1Jbe1uv/FWN77/i/pFK03hEak8k+db8Sg0HF/v1+rl/fIperolSKDA878PuN\nVEslUtEMlWoDaytNan6dTM7DxNghQtEyBqMamVxEoVER6HXi9NuxOQ0o5AKFooR3xzaMqgbFfBHN\n2BFCkprMpWV6ens59HCBzOI8pXic1YtXmThylKwk8lv/y9dJT90kubiExaZDnlzBHAgQlxk589I8\nO3b0su+3e+hwy2kkNiEboxFrIlJFp1OiMZtQqQTKxTpKgwHJ6mHt9WskVrdoNZrQarC5sInZIGPs\naB+paIbkwhpTFSMbGSWzC2lqtQZjuwI8+e1dzN5YJB7Lo1LKkYsCRp+P5biAfGAPlqIMjcmAKRAg\no3CjLiWJ3bnN+KlTBIMmyuU6Wq0Cm01LXZLIbm1RSafbEx0ez4cq7H4a8PuNLC+n7ysd+f1GLBY1\n5XKNGzciDAzYSCRKZDJl5ueTaDRystkKfX1Wtm930mi0KJdr/MM/3ObEiS58vnbvkctlIJutoNcr\nMRrVIGuRz9e4di3M+noWn89AOlXGZFRy7KifpalVXvynNxgc9RNbzaI0Gll+4wJWsxL/rp3oT38Z\nrdWCqFKxdfs2SoeXYiKJZ6gPmjVc20doCQrMLis7T4ySDPmYvb5AYn2LUiZHZvouifl5GrUaWrsT\njSCgNRtZe+01UKiZu7OGY3CQoX2f3gj9vxYazSfvKfsgMvIfgGHg3S5V/xWY5sPJSBZ4Z+7KxHsz\nIP8R+N+AM7SzKO/BH//xH2N+u0N/cHCQffv20dnZCcDq2ynfL8r2z3/+c7q7uxkeHv5CxPNB26dP\nn+Yv//IvWV1d/ULE88vb78bwsAOXS0cuJ6FSiTiduo+kvPkORFHGlSshtrYKOJ06hoedrK9nKe3p\nQWVIUqtILM9HqTSU2MZ38fKZVXZss9Fx4ABSsUSibqTQ0CBTOvHsttBotpDVKhhMOpQdHuI1A6nZ\nFXq7rfgcOQS1imQrh6wCS6+80lYhFUWi1/L02IxgdiFa3Ey9eY5mS4ZJKyCngSfgRO+UsXr1FvnF\nDcLVNMFH9pMqaym6nfz4bITtOzwUc0VkyRRz52+hU7fYsdPPnR/+GJlKQ2u4g0+L4B8AACAASURB\nVGo8TOruLRomP7liE4WuhLKaRW8MkL7xPOV0Ho3VDdYxmsoQokYLKgNKnUCm0EBQa9G1itSlBqV4\nHJXFyuxSgZmZBJVKezEaH3fT3/9eYbIPg31oCJkgkFpYoNVqYe3txfY+Y9ifBB0dJpzO9jOjVsvv\nva01ZTJErY7szAUWL96iWm2gjOfZtXMPAb+BYrmBz2eko8PI0JCDly4kGH3oEOWNFZSCnIrWwNHf\nO0kilqOynqI+N0P45nkSl9TsfuI0al8nlUKZos7H3/+/F7CY1WQ7W5iFIq1MjKXLd8mHQuz4zh8g\nWbvYM+HkR0/PcPrRQdILk9QzMUZ71cz87DzmQBBbXx/5RIp0KIZ3dARzTy9rSQGzRY8MQACVXk8+\nXaRWktBplUwubNIxMELs9gojfjm79wyzHod0ooDXb+St5xM4PWZ27Axg81qpVussLKRIxlpI2MFg\nIrxcRBtb58RBB618nnqlgt3+i0xIvVpl8+JFUouLbWIpk2Hu7CR4+PD7+g59WujqMtNoNJmdTVCt\nNggGzQwPO5DJZKRSZSKRAq0WOJ06Dh7sYGLCSyxW5Ny5FU6e7OHo0SDXr28xOxtHp1MiigLr6+0e\nkAsXNrBatej1SuLxIkeOdDA/n2RxMQW0hdgEm4Zzryxy8kSAra0cao0CrcVMcMhEZW2OqtaC0Kgx\n9dpV7EEfh//TH3Lzx8+zuJgiOCRHbzOz9OZlvKPDVOslzMEgS2fOUi0W6RjqRe+0k0luQ2uzIvT3\nI9do2Lx0GZ3LhXt8nNjcIhqXm9k3LiMo1ZTjUcqxKKIyQDLZniy02zUolf/2ZlM+KOIG7fLM6rv2\ne98+9mG4CHwPeAo4AfzdLx1TARJQAd43T/sXf/EX73vydxaxL8r2q6++ypEjR+7Vyj7veD5o++jR\no3zjG9/AaDR+pM9/Htu/DJtN+56piY+CVKrE7dtR7tyJIUkNQqE8mUyFxx8fZCNW54HDR4ivh0kZ\nEsjqKjbSDWq1Kleuxzi6cxj3w1ZC18MIKgOvnF1DrdfxxKMPIc+HMSjr6CxGarU6wuICG8/McTca\nY/d3v0Pn0cPM/OMPqJfLyJVypGwag9VE+OJbbP/9PyQjN9G7vZvblxco5Jt0DAaQq5VMuMqYCwY0\noge9RkaXOkbL5cNgNVKrtR2GncYWVlHB//SnD5MLbSFvSuSdakp1AYNJg0ZZJXxrmsEntpNNpAit\np8mkiuhcLiJrEbRCnValjJRKYuzqQUKDfcCI8v9n702D5LrPc79f9+l937fpnp6efcfMYAcIEgBJ\ncAFpWhQlWVdW6ZaXG/uWJd+qVPIpNzdVzoekEldyXZWqW7E/JM61XaZoSaa4k+IGgiAG6wyA2dee\n6X3fu08vJx+apESL2kjRoFT8fcE0CoN655ya7uf83/d9nnaVWmQHrdAkl8+idzjQ2O2IJh835uM0\n6k3ESoVyTs682P5E90RQKHBNTOAYHQX4TOPJ1WoFTudH396UWi16vYJStOuNIZOBa3SEp//Lq/zu\nnzyGwWGlVGqgUinY3sohIWcpZ0FtGKNvWIfOqGU3BTrk7L/zNqn1TbQWE8mNXZ77z/+Vh7/zTba3\nIzgPjrC/m6E/GGL14jyHpq2kbt2knsvRrNVIr6+jGDQwO+JB/2+nGelVsfFsBIe2TTmSBr2NWlvO\n4Jn7Se/FkFptVD0h5DKwKitoh3s4ef4wsUiR/UiRYjKD3nOM8F4Jc8CPTmgSfvtNmnINHYWOvoOT\n6MwhBLWa3//jk6jkoDSaKbb1yOWy7nq0XCCbLJLf3kIml6MdCSKXddBYrSj/RQugkkiQ29r68QmX\nJFHY3aU0OPgr277/qgiCnJERBwMDNjqdzocfuisraba3cxQKDba3c2i1SqpV8UM7+Eqlyd/+7SJP\nPTXGk0+OMz7uIpXqpjnv75fQahU8+OAAyWTl/bVvOclklXZbolRqADJUKgGNuom/xwgygTo6LMEg\nQzMDWMVNLn3v76mm0hh7PASPnaTcgOTaFjduxEhECphVIgaNxMTvPIRnbITs1jabr75KdnMLSWdi\nrqeHgcOTpLb3yd6+iXVggImvfQ3/sWNk1taoFkq0myIKk43ohTexBXz0Tg5STOe4st4mFusObrtc\neo4fD2CzffYnVb9Ofp4Y+Q90Zz02gL33/y4ADAF/9kv83zfoio233//6KvBXwHeA/xMYpStK/rdP\nUvjnCUmSeOaZZ3j22Wfvdim/FBqNhvvuu4+XX36Zr3/963e7nM+MWKyMXq9ieNjO3l4BSeqelFgs\nGlQqgViyRiQjZ2lPQqn88QGgKLbJZyvsxNugNuDT15jxlBiYdFDY3mJnYYmgAwxGPZHLFzE77SRX\nN6mWqyx87zmO/Ol/g/vAAer5PHq7DbfTSS2fx+R109aY+dGLG8w+cAilzUWpIqI1GrBpW2y+eQGT\n08p9B43EdhMsvvgW9/+738PsM6Np5Aj5NRTCm5RrWa4//TyCUsHwvceI3Fhg7P57sNl0lPYzVPMF\nasUKib000Z00ckkkePIkkw/fT/TKPAadgNMqwyd3cWejSinXYTRkxaasYjcrMJ06hVKvZ/Chh4jU\nFeRjScRSiVajgdTpUI7HSc25PpFAhE8nQlqtNvF4hXJZxGBQ4fEYPtZr5GdhsNsYGPHQqInUqiIG\nj4fxI24SqTrff3GRAwfcSJKMhYU4r7+2gcOhZ3bGRT6d4PHzgzgMEsqOjE6jitbuoFgScY2NoVLK\n0ChBOzDF0nKaf/MHJ2nVa7RqDpTyJtTK6I0aTDYT7pAftddAYu0axw8fQSxmSS/dQTLLyRVTNGQa\n7MEelitV4uEUCjnMfcXF+qU3aTbbJCsqLD4nQw47OncVe++9aL0edrJtXCY1iduL6Dw9bG+kEIQq\n4VurzHxpkHazzcD4ILsJkd1oiZMnTUhSd/vhxuUtOnobGkcZsZBloN+CRqdGOzhFpdr6SGuuWal0\nT/x+AqnToVEsfuw1lySJaiZDs1JBodGgczg+tRDt3vPufc/laly/HqNWazI25qRabVIsNiiVugOq\n8XiZ3d08jz8+hNWq5fnn1wGJt94K0253cLv17+fXdFfAW8027XYHlUpgaMjGiRN+FhcT2O1anE4d\nh4/6SSVLDIz5CO9k8RhF4q9dQ6VVIxl1tEolihtrDD75NRpiG5tZhRmoxmNsLizQM9pPz/QkC3/3\ndxT2Y5jtZrxz06RvLZBbuYNKp6VZLmMJ9VMVwdA3QD2fxz46SrsjsTN/DYtdj9NlwGTWkqsK7O0V\nP2xdRSIlFhcTnD7d96mu8b82P0+MvASMAEfonpBIQISuqPjpFYeP5z/8i9ffef/PP/0Vavzcc/Xq\nVVQqFVNTU3e7lF+aD1Z8f5vFiEwGGo2Cvj4LarVAuy0hl0OzKbGxkWZjI4fZrGZ9PUt/vxWNpvvr\nIAgyjDYTznyZyMWrPD+/TihoJnNxDUGtZnyyF00jg6AWyG9u4vTYMNsNSO0WvqFeOqKIZeYY5UqT\nZhuWlqLozE4Cc6fYTCu4vlKh0k5w6HAPEpDNi2i0cm4tRKhWd/GHnPR49fQO+xErJbavLJFc3SUS\n22HizFEEZRO1WKCcb6FQK5g8ewxlp0Ijk6BSETENjrC3todJI5E3arC6veQqEhatDvfoMDuRKuuv\nrzD91CiZisDQoAWfV8fhqWmUhX06gwHMvb2Y+/rYeXuZzOoq2c1N2o0GBo8XS28P7UoR+NdN+BTF\nbprq+nr2w5ySkRE7hw/3oFT+ch9uBreb4IER1CoZ5UoTx3SAOy/ssXwlxvZOmf39EocP9+B06hkb\nc2DUQCe+RbGYon1Ug1hvYBocpCo3sJ3MoNNq2dopYnMYUBkMhBdyvPTPtzh00Mvjj/TRNg9T2V5F\n73LSyKYwB/wo1ErCr72IvneQjX/+J3xzs8w9cJDMrRvIFUbS0QyuwCwdg4PK/CL3/em3aOWz1NJp\n8vEUGneAWqSGOjTAkS/dz/5GlHCkyna2g3lUT7XWQWfUo9ZXaLWhWmqgUbTZDZfYDpfoHfBw6lSQ\nnh4jV65EiOzl8LuUbG2DY2KSQ+M6HFYlV9cqtFMplMosIyN2pqZcXaM4oxG5Ukmn+WMBLxOEj804\nkSSJxOIiicVFWrUaglqNbWiom7XyKWg220QiRZLJKq1WG51OSb3eIperMTvrQS6XEQxaKJcbCIKc\nvj4LMzNeLl3a5803dxgctDE35yGfr3HkiJ+RETvf+94yt24lefDBfjQqgVisjCDIePLJMe65J0in\nKWKzaRgYdpFLV3jumetMHuxDno+RWlqhns9Tz+eRCQrKsShqtYwiekx6GTdeWyA0PYTBYsQ1NoJS\nr0elUqIzG1AYDWitFrZ/9Bq+uVm0NhtqTw9vPfMWnqNN8h0Dc6M2CjdvojfqsFvVyFUqXP0BHJMH\n2KhqkKSPei4lEmWqVfGn3JI/z/yixlKbbrvlC34OzzzzDE899dTnflX2J3n00Uf5j//xP9JutxE+\nw+Pyu0nXtlqJ06lDoZCTy9Xw+020Wm0EQf6+OJHjculJpyv4/WZksq7DpK/PhRjfY3dhDbUCdhbW\nmJp2U9vfxDzmJ76yzdDpU1jdVmr5PFaPHc/UBMmqkvJOFUx6nGNHSCysYhk0IFicXN5RERpsoRVE\n3DqR8Bs/whjs40fP38H0zZN4B4Ncu7BC7HIY0wNDNGVNNHsZ7rx5lb4BJ/uFIvt31hgY9uDqdWOr\nVWnFd5l54hyxxUVaMhWagIve2Sku/sMLGA1axu8bw9jXz82re5w+7qYgtnH3WLGonVyej6JTyxkN\nWBj0NoldfJt2o4LFpKEci1FJJrGqlaibBcqRCNB9Ku4bcqERc7SbTer5PG1RRGM2f6y/y6+TWKzM\n2tqPHR+bzQ6rqxl6ey34/aaf+X1dX4wmarUCQa3G6PeTWV9HWctCs47YkqPRqPD5jCzciCI/7KNU\nrNHj1WOQ1chvFXn4G/eTyBVYWa9x0i1gtFkxpOrEE2WUCjkGlKRFLT1+iYMn+qlVWyzutHA5xhh5\nYAB30EcpHsfs72HvvUuoVQLyWg6jJFK4/DrDjz+Gsp6jVaviHOxn+KEHCd/ZYPaxs3Q6Esk7d4jf\nWUJCTlmUIxcUqKQGWZOBmszB8nYWhaxMT18f4dU2PrcGvd5HPlfB4TSiN2h46YVlvAM9qA0Ghoft\nJJMVVlYyVAslyuEo/W4jSimJUT/AtbeXWby8jv/EPRjcbq5fj2E0qhkctGHweHCOj5NeXqZZr1Op\nSZhCfVRkRrSN1kccUSvJJImFBVr17nZTu9EgvbyMqednu/n+IiTpo2aI+XydalVkZsZLMlkhne7O\nTgwO2jh0qAeXq4BOJ6DXK7Fa1RSLdS5c2KW318wjjwxisWhYWUnjcOj42tcmeOutXdRqgelpF0tL\naSbHbByaMrO9uIGhkUeKFdnblcjEMvT4JhBkaTLr63imJ2hks5QTCVR6Hba+Pt76/jKTR49SSGRQ\nCHDfd/4dCEqyW5v03XcvWz/6EQqtFq3FhNHjwuT1Us4V2b2ziNZsRm9Us3YrRTQ0zfjJ+2iXcvgO\nHUJlMGANhdB5e1h8aeunrpFGo0Cl+s16X//Nm3L5nCFJEk8//TQ/+MEP7nYpvxK9vb14vV6uXLnC\nsWPH7nY5nwlOp55Tp3q5dSuJUa9ketzCyKiLtc0CDocOkJAkiVDIDMgwGrt280NDNnQ6FTpVh/Ep\nL6lYnkRFhlqjRK5oITUbNOodqvUWE1/9Ks1iHkmuYG27QqaupL7b4NryOmOTHkbHDmK3G7j47j6X\nXprnj//9MR48YSd+dZ5SsYrW48Osgys/usmhs3OEagqy0ST+8QEUDg9XX34PkJOMl9AZVOiooTIa\nESWBZqVGI1+gsLeHe/oASoePTDxLNlVAFRym78gkmaaOmwt7TJ+aptSucmVLhqBXMHsmhCNeQyqk\nyd5cY+1KjEIkztgjD6D2+KjWJRI7cTwDPcyGwGWeIZOp4vOaCPXJEFMxYjdukFldpS2KqAwGfIcP\nY+vv/8zuZ6HQ+Cnr6Waz835P/+OJx7umVbFYGUmSGAiZMIjgOXgIuVxGUduDwVyDBpjNKgxaP9G9\nNOceHqFcrFJLJxk+/zBXfrRI7PYy+ZochdGGZWyOgHIduS7K6KSX/tkx5LImQZ+G40cP0WpJNNoC\nrUyUfLqC5J1h8IyP0u0raM1WOkBybRNLX4hOW6SUzCC3ODF5lejcHmQmB/1ne6lEdth5+wLWvl4K\n21vEt/ZQyzUUWhoCc25aGiOptAxVo8DkqQPY7DqyRg21YpnEToyJcQ9Ks47dG0t4xCyjXi9KrYL1\n9Uw3cLHVQaHRoLVa6NAmnOwgW69SV1mZefAYsUiGjsMBCOzu5hkctCEXBHyHDmH0+wmvxygXJLZF\nDTde3SEUsnLiRODDU8bG+wOwP4nUbn+YGvtJSKWqrK9n6XQk7HYtDoeWbLaORiMwMGBlb6+ITCYh\nSfDaa1vU6y28XgM6nZLV1QxPPDFKNFpiZsbDwkKc7e08CoWMra08Dz88SChkoV5vY7Xqus6ulRKp\naAup1UZsNyikRBoFicBoP1qlhM7pYOjcA2y++ho6qwXn+Dj+Y8dQaLWMz4aoV8rovH6cDh0Gj5ul\nZ5/H3t9P4OEnMIxMISZiuMeHESsVIteugsqATBAoxlMcCLhRhdusr2c5/uijOC0CnXb7I1EK4+NO\n3n1370MvmA82yH6VQf/PA1+IkU/JBy2az/ua7Mfx6KOP8vzzz//WihHoGmWZhAq5/RJSLU/p1gZq\nyc6lBbGbSaFVEQpZcDi6IVw7O3l6e02olDIEtRq700C7mEVlk5DyCZyjI8gNZsqCmZX1AhOPnEVF\nh045z9iYwOVrKS6+HaYhGHn7jQ38QRtyVRuHRYm/zwmSDIuqRbFdxN1joG/cwdrVFZqNJq+/voPa\nM447OIFytJ+FK1uItTpSs0U6XmFs2Mfu0hrOQ0dRBoZBoULtcNIoltCLItf/378jfGuZua88wclz\ncxTkNhr7FcYGzcg7Ii9+7zqlGmQSCZbupPjWnz/M8o3b5KQyUiNKMRZn67IT/ZyV6/NhBEnkpMEP\n+QSm6B52rZZ2pEVNCCIMBcmur3+YDdQoFIheuYLO4UBj+tmnFJ8Gk0mFXC77iNujQiH/qVXjer3F\nXjhPLlXi8tUYjabE1laOYlHkilbG8WktwX4HlTrEVxMgEyil0ohtOaERN+VCFbtFwCrVGD8xxNWL\n64QvXkTSGCkV2lTyeZb2wOkdZfjcOI3oFj/8mxcY8Cmp5rLIHz5JPpmhx29hJynjlX+6TLWtxhbq\n5dw9TrKZCjJBSV7lo5JqMnDwAEWFk5paRG6xofWG+Lv/6yW03gDDg1bMXj9lUY7/zP0UK69QzhVw\nTo6jG5zANTWNTdAjnZ7E41RTWlvENzWGyWGjZzpMu1xmZ3WP2t4OnYqcpWef594/+Api1YFOp0AQ\nZICAbzhAeHEDdXGf9KVV8rkKwycPMzIZIFbtgCB85ElbLgjUBBML+2mq1RbQ9QLa3MwSDJoZGLAB\noNRokCsUH50xkck+1Slao9Gi0WjhdOpZW8uwspIilarS32/l3nuDaDQCoZCFZ565QzRaZmDA9n7Y\nowWZTMbly/sEgxai0RLXrsWYnfVSLrcQBDnr6xnGxpzs7haYn49QKtSo5Ao88cQwqUQJrclIKNjD\nmEdOqCkhxDdoGIzI5HIGH3yAWjaDzuVCZ7dTzeXYffmHjJw7S82iJba4gD3gxjQ0Tts7yFtv7VJK\n5bFbzXhdQez9CWqpJLGlVbwHj6F0+YlvRQjNTiHKtSiUwsdet+FhO1qtgnC4gEwmo7fX/HNPCj+v\nfCFGPiVPP/00X/3qV3+jWjQfcP78ef78z/+cv/iLv7jbpXxmFFNZrl1YZW053o0B9xqIrd3ArvVw\nZWkfUaai0wkxO+tlYyOLy6Xn6nu7ZPciuO0qhs/eg1o7T92qpN0U8Rw9QVXvpRMUsB6Y5q//6xbb\nW2mGx30YVG3MBjUWg0BZJge0GFQdLK0UUmWdrzwWpN2ugqyDWqvE3eeBepmjj51gZTlBsaEkUWzR\nG3IT3c8xMOhCE7iXaiKGStYivLrH1OPn0HkDBF1e6sVJ7HY9uxcusPLiqwgKFce/+RSVeIzE5XeR\n27wcHBtAZXDw1//7S6Q295DrTbQbIqLYpJbLIol1rF4jYkRJx+Dg+sVVZnxjyAU59YbAjZtxDh0+\nSznyt9RyeTS+IHWTn2JDSXgpgtdr+HCjoVEs0igUPjMx4vUa6e+3srWVo9OREAQZg4PWj7jDNptt\nLryxzvzzl3AGnLzwzCI2vwdfv5diEfKlJmqTh8vX01y/FqVZLDJ37xh6u4VmukRiP8u5B0L0ejRI\nUoP84hVy+w1sg4M0OwKFZpZiWWJ82kut0WH/9iLRGzcJ9BhJxIsYNQrE2A5Oh5O2So/CrOPQ/bMk\nIzl8oz4C0/3YLUoW3rlDo1zAM9aLcvQIYnyPdq3GxvUl1LsZpk5Oc+XKPtdyFR4+P86tFy9gcDiY\n+qM/wWTRE81JaIO9/N9/v0Wt1qTXLkOW3mF6SIsQX8Xu96DyBpl/5odUKk2y6TL+A2MURDWV8Bah\nIyZMhu7129rKozXq0IoZdhaWsFmUFItt3vm75/n6f/pD8jI1MpmMUOijkQBdZ9t/McgqdS34P3CE\n17vdWPr6yG5u8sGEpdHnw/QzvIF+GYxGFTablmKxzuJigkKhjk6nxO3WE4kU6e+3srycptHoUKu1\n2NsrYDarSSSqnDwZIJmsUK+3EMU2Pp8Rm03L7dtJtrfz+P3dFtaFC7sIclCpFcTKLcoNOVpvgB/+\n41XUr21z7wNjhLwKmqUCW5dWqcTiNPJZFCoVlVQKhVqDTKXGqmrQ3F+n9+QxJh45Qy2yS3J7n9V3\nf4i9L4BeaeKNf3iNXFXGyUkdgZMnCZw8iczk4ubFZZTaCvKAFoWgZG0tQ7HYIBSyfmRouytAfrE7\n8eedL8TIp+CDFs1zzz13t0v5RBw/fpydnR2i0Sg+n+9ul/OZsHgzypsv3qbzvkX4lTeWOHQsSMAp\nMDLiIJerYVI1qdWaWK1aYrECiswuRFbY348ydHSKoYPTVHs9lNNpsrt7JAwG9kp61t7a5tr1GB2x\ngd6Yw+3UkE43GZkO8t6lMCODduy6NrWdDUhtY7S2aPumuLOa494HTpFbmCeysYF3eoKx3x1HaXOx\nGWmyvlXAb2mwuZ0jmW/Tappx2jQEzowSyYtEE2rMjQiv/tXf8uV//xirL72K0eXENjxMNZVi6/JN\nVE4vZSHD7WtbzD50gpnT04RX99EqOliG3LRVBnR6NX6fgYX31pid8YCuSjUaoVZrIlMqsPoCqMxm\ncBuY/uY3yezFqSgsaIPD5Io5wrsFms0Og4Pdp2BBpUJQfnrzo5+FWq3g5MkAoZCFYrGBxaLB6zV8\nOLwqdTokEmWuvjJP/NYtzOZDNIpFNm6UMVt06PUG6nUZcp2e9dUtGqJEo9Hm2vUEWjXc/8AQ2WQe\nuVhm6aXr6JspTDo5gUCQ5Zs7WAdG0PQNs7NXpbKa4Pd+f4ZwZQFN0EgsWkQpSAz0aHnvn17l9Hf+\nGJ1agS6yQ8itYWq0D5neyrPfvYl/wEPbN0ZgyoTG7aGT3ebO95+lWa2h0mnI7kZxOPQoVCrkRiu7\nsTr9YwES8QLvvHIb/72nUZst3L6c5PLFHUYGLWSKaRS1HLeqBX73K6dY+fv/j8FHrKilBoJWjswk\noGwUcJqcBII27Ooaa7diXLqU6Ppz7MuwWxwERwMUdnewmnXU9RqqsX2Gjw7T02P6qadtnU6JSiUg\nij92epDJwGL5sdeIoFTiP34cUyBANZVCY7FgCgR+yin5V8Fi0XLggIdnn12l0WjR12ehXBZ58cV1\ncrkG3/rWAaamXFSrTSKRIvV6i0aju/o6Pu7gsceGWF5OMz3tJp+vs7aWYXjYhtGoYm7Ox9pamt3d\nPO1WB6NJzcGjfRRyNdbWM8h1BppiDbEucvW1Rb781QMUr2xi6vGhHAhRy2aRyeS0Gg3KqRTF8A7V\n6B4Hhgao7cVZ/uHzVKstjHIlses3sIT66D88yeqFq/g7boytDPbRUSQxiqaewj11HLVHSbmtYXk5\nzdpahkajxeSk+xNfv88rX4iRT8H8/Dw6nY7Jycm7XconQqFQcO7cOV588UX+8A//8G6X82unWhXZ\n2MzRed8Pod0UaTYaLN+J0z/qxapMkS9lqMbaKBSTyGVtlM0KzZ0lwjcWQSbQfvk1tl5uMfs797P2\n1mU6Vj85QxwZWvbDpe7qnsPcTYT12UjHsgyNunH1OhkLqshfv4i2UUXSqrj4ym1GzlrwOy3sX1+A\nZofgeJDN119h/dlncU5P4ZudZebRI9yY3yW5n6Gt1LG0lEZtNCAz2NCbjER2szhGQkzffwyZDGyh\nPorpHD0jITKROFqnG41RR3Q7g2AVWLu6ytCjj9A7M04uW6bWUeEO9qK02Kk0BRpNiVimSWCgB1fT\ngsLbh86iQm81I5PJcIUCjIzMcPGdbUrxGpFkg4DDicXrIJvNU6s10epUWIJBdE7nZ3pP1WrFTz2h\n58Ph7jBltUrdOUx2d59mo5sPEuhzsLGRo5jKYQ3qMbn0CAolKrMZl1ZPXq+lXJPYWkrRG0hg0cuQ\n6k3iK2sEB1xozAZktQSTcyG2wjmUFiXHjwfw9zvIxLI43GZyOwL2wwNYdLD39pvo3S70ditr33ua\nzZvrqCxWps4/gG5uGEmTpoWC6ytV2s0Sv/8HPtZ/8BaJ7SgqlUBjP47V60DMpggMjYPZg2/YRirq\nwuCWOP6QlVSyTKlUI7xbpFnM4fL4qa+to9SrqJs8pNtW0h0zzkwJ/8QAycVF5O0CJpkOg9GE1Sin\n2pTz+rubLK115zlkLQV3Fja49+QEYq2OUiHHZzMS6LUyfaL3Y++Fy6VnMTPaSgAAIABJREFUZMTO\n8nKaVqvz/gbLT7cIlFot9qEh7ENDv9K9liSJQqE7D/STAqdaFel0pPddYZtkszXeeWePcrmB329i\nfj6KIMjp77dw/LifjY0sSqWcYrGB2azhhRfWaTTaVCoiOp2SWq1JJlPj0UeHqFZFBEGOzdw1RMvl\n66QzVexWC1aTAkVTicHhplrvUC03aMrVKKbPsvLOdRxaE54+Fzv//DSeuTmyq2tUsnlc4yPUczm2\nXniOWjJBuQqVpgzPyDB1CXoGekht7EFbpBSPM3T+PPFbtxg9PEZRDjuvv47z+Gmgm720upphaMj+\nkUHh3wZ+u36af2V+k1s0H3D+/Hl+8IMf/FaKkXZbwuKyEpwI0SiWSO6n0GsFtGYjTZkai03H4YAb\nd58HvUpCoWzQElNceu11WnURa7AXWbtJLhanUa5Q7agxtqr0uJRU02r6BrTsxlvIZXLk7QadapFD\nh/xMD6mRSy2+9z/+JRafl3C4gNdvZX8/j2UvRd+cm9Wbm/QfOcDtt29STdegVkC+uoraHUCeqHBz\npUS8rEKrE0glSpyc6kUvlWhtrNCOFqg6R7BOH8RkLjB49j5qLYGE0s1edJ9UVo5Tp8feayBXlkCj\nI1uR88gfPcbijX129yrYvBa296vI+mbplXTQrCOzeQkN9rGw02FgwIRMJsNqVuJ1d/1E0hmRXK77\n4RXNyem55z46mSg2twJnfxBLX99namT2cRSjUXbffPPDIUlB50SlkFCoVOwtbXLkyDQOjwWT04bR\nbSXYb8dgUNEXsgIyNjezNJMVXH4bQ+M9qDsVOp0GOocTg1mHQqOhmslgzoU5OTuB3OHEaJQjd5tZ\nvLTKQLAX73ETCzeitAwa/Ocexx9ykbl5jdTiDXoGhtDMnOHdhTyFjQVaSgN+lZLJUTOvPruAsj5M\nWxSRyboBdHK5Hq1KwmFTI9c00Uph9haT7GaVRONlZEKCYMiGTiNn+oCHUqmBTq+i2oGypCcXLVAu\nN7AMDLFwZYvDZ2do1WpkN7fwzc1hDgapFQq0dH7ypRIAOq2ATK7A6XPQaMuwep3oVB16gzZ8k6M/\n89oLgpxDh3z4/SZyuTpGowqv1/jh8OqnoVwWuX49yv5+t0a/38TcnBedTsn8fJStrRzDwzYSiRKv\nvbZFsVjH6dTj9RooFuusr2dQKmU0mx3Onx+iUhFRKq1UKiKhkJXbC1GuX4uxtJRifNzJkSM9KJVy\nvv/9Zc6c6cPm0JOK5dFoFMg7TUJ+Lcvv7pKNpDAr/LTUvRw/f4S1zQKrb19FYzayvrzGyPEDHPuT\n75BduMLCP/4TwVOnGDp3jkp0Hzpt6pkMJqcbqSFQyWQRehxEUi0mHj+Hz5Gn6dCgdThwj49TrzbY\neGsZsQU9iED3d6vZ7NBsdviMgnjvGl+IkU9Ip9Phu9/9Li+++OLdLuVT8fDDD/Ptb38bURRRqX5z\ndtL/Ja16ndz2NsW9PZQ6HdbBQdoaM7WGxEZKiVpmpP+wF4s7wuTREfQ6OcpEm8h+nnI7T6vdIaBM\nYzQo6DSatOo1suEIrgO9KBtF5IKc/oMT5DbW6dUVGf/qcbJVBSbHNqlYDrdVQCjE6VVIVBf2cc/O\nMXP2ENcubVPMVekb7sFkNSIYLeztZkGtR6VVk9iKYDHKie2lGP3dL3HpapKAcp/1lSRXL64zc3IU\nX9BOyNlh5cVXUdBkbyeDPLvP7LljKOwe9BYjqYKVleUUwek5tqLvkVovMTjiwhPQMnbPQTZyLeLJ\nOsFhHweOannlxWVWbmXp6THTUgY5fqafcLSGRaXjoYcsNCo11K0SDiHJ2j++i3NsjIFAkFSqgiR1\njeG2EzAxMc3YicBdE+T57e2PbGuoa2lOnJvh4gvXyEYShG8ucer8YaaODKP1BRGbbcxmDU6nnjt3\nkuh0SlqtDqGQBZtNQzZVRKXV4NE3UJe668xGrxcUarQBF1WVHvtggN35m7S2VogyxOpunbbaxF6+\nQ1RmwDkToFG8QCFdwnyklytX98lF02hGbEQLDTbeW+Br3zrCY1+epZQtMHX6IMpKilw8jcOpx2AQ\nMLpdRG5vEY7nWV9LceyJ0+zVZdy4sU0+H+C+0wMohA6zhwNEt6J4xoeJRgqcOTfKws0woaEZxkeG\niawv4xsdZ/CprxOO1Lk6v8rMg6MEPV6M9iY9rSzVRJz0Xh6L287odACbqECtEXCOjmL9BdtRSqVA\nIGAmEDD/3H/3q3LrVoKVlcyHrxcXE4hii8nJbpyDzablzTd3OHzYx/i4C4VCjr/HRDZbJZOpYtCr\n0GgU7O8Xsdv1PPnkKFeuREhE85QjYaYHu74weztpKmWRzc0cdhNMDuopRWOcOxMglnYT2c8zN24k\n5IKZKRfL7QadWhmvrkrvUD/v/PA9FCoFSo0G29AwhbqMmt6D1uVh4qkncY6OIpfDzltvYevvJ37r\nFvVsBnugl7qgxndkBlVeTnZ7m5s5NY9+6QFKkQiNfDc9xWLVUqhAo/Njke/zGT9RPtTnnS/EyCfk\nvffew2g0MjFx90OKPg1Op5PR0VEuXLjA/ffff7fLAbrT8tVqE4NB9UuZWUmSROTqVdLLyyBJ1PJ5\nEuubbIi97KdbtJETy8sot5t85al76TXViewVKMRTWG2W7lF8LsbCtXc5+uQ5Ju4/xsZ7N5ErVVh9\nLpQeA1i87C1sMzQ3x/ADJ9B7nYQvXOBbD7ooCj1Eri/QiWYpv/4yWwsL9N5zD4f/7M8QVTewroVx\nD/YQuPc+nnsljNWjY3hqnHq5hi/opJlP4R/wUlVaSUfDyKIlBvstrN7SsLsW48v/5hDZ1SsYdTIy\n8QpWPYiJfXav3mBg5itEbkS4fn2fHBZaKi8j586SXN3A6DNz9NFjvPBehZXVXYrFGgemPRw44OF3\nvzxNMl4iuRPFZLJh99gQpTK9vWYOzznJ3F4gv71NJZGgFI+TuHmTkS9/hfFBH/vJ9odx4VNT7rt6\nMviTxlsA1WSSkMtF33ceJRNJopR3cLkNOH02TD0/bh9YrVoCARPVajeJVqhm2bx0HV08jdXrJHhy\nmkrMTiEcRiaT4ZmdQ+YdoNqA8MVLPPs3LzN6IMDmcoS95X16pkYRTDZqpRoLtxIcnpxF/eobyJ0B\nYu/cRN5u4dYLNBRars8nCW8lkeoVjszNUL6xyMz500Ru3aHdkvCMjVAu16mnk4iVNm6HBqla5MDM\nMJIMpGYDXafEzkqMU08cR3fKTyWTQ64xcOvaFuG4yLuXFxk7PMwjDz+O3Sbxj3/1HJVqE9vgIDt5\nLdaajF6fjuTiIpVkFpXegMkgYNd36DtyFJPPh+IuPXpXqyLhcOHD14lEmf39IpFICbVaIJOpIggy\nVla6sfWzM262NtM06k0UCjlKBRw65CGTqWM2q1lZSRGP+xkZsaGopnn9zg71msjBk8M8dJ+H9XAN\nq65NJRrjzMlBXvvna7y0kkBjd3D6dB9SKsz/8hc/4Mj90xw71Y/ZpGR0wke1miZ29QqCWo3B46Ga\nyVDY2yPVp0GdzFCMRLH09xNdvIPnwAHQmTj63/73pG8v0qiLTD36OHmFg+iVi1TyNRpyAx1HL4pa\niQbQqLfQGTXoh4a4vVFDEORMTbk4cOC3b14EvhAjn5gPWjS/DXzgxvp5ECObm1lu3UpQqXTFyOys\n5xdOiVczGfLb2/C+7XR2bQ2twsrKnTt0jC4ENDid3QAsuaDAPzXKRmoTy2A3L6XdbNFpCFRLDeLX\nrnDk4cMEQ1aqySRGpxnfqSfA6MB3YJKeAR8mn5dOu417YoJCJIKpvMPetRfI7+5Sz+dRW20UYjGk\ndofB+++l44khyjSoLVomZ9rkKmAdGsOjqyN6VGQXb6D3uImWGqjNJmSCCpO2w3/3P50nHisyPOKg\nJVhZv1Wk4zDSKohIHSWdep3s8hKrV1fJxhQkK2kkmYItdEwfPINnyEZNq6NcWcdq1TA+7qBYrHN5\nPkJ/2oLXa6B/wEKf30SiLjAz48ZsUhK7cZPV7/4DiYUFlBoN/hMnAMivr9I7p+fA+Tk6nU432fQu\nY+7tJbu5idT+iSFKSSI0PYC/340kSahNJrRW6099bzdfBwp7e6x8//vUsll0JhNSrkX8Wp2Bc+dw\nTU3RbkssbZTZuRyn1ynRqeQ4++Xj2D0Onv/BApE76zSrZcbuO4bRbaXeUWAdn2T40UfAYUOj06A2\nW1AZjQQcWrSPzDJ5eACbsoZaLiIOHaLRllBPmDFrQKORsfnGBXw+L1pNnZrGx15OQSSSJpUsc/q+\nXpRSDUEhI769z8EjvTQqGm4uxskVOkwcGcLXm+X2UprbK3a0cy60fUMYFApURiMKtYalxX0mQwJx\nUx1ZsYnL2ebQERdiKkYposT2GWfM/DwEQf7+yjEUiw22t/OIYhu7vTtw3Ol0yOW6K+Vra1nknRa/\n//UJUpkaDVGiXLJz82qYxaUsNrsBo1HNrVtxnDYt6USZB75ynNhWnMROjNmTw5x7dBapUmDnapzC\nyk1Onuwl19LT7kh4vXreeeEWZpOCrYU1hGoWjVeHsqdF7voSsnqJzM4O5Xic3hMn0Og0mO0m0psd\nRp58CtvYFKmVFfaKGlaursNanqHpewmFLCym1CzeSTF3z2F27+zQbrWQZAr6z5yhFI2yuRJD19HS\n0loJGrutvLExO81mm729wocJyr8tfCFGPgEftGhee+21u13Kr4Xz58/zjW98g7/8y7+826Xw7rt7\n1GrddcFKpcnFi3sYjWqs1p8d+tQRRTqtFpIkUY7FaDebyBQK6o0a+4kMhUIdU08PSrWS6enuU4XJ\nokNQvG//rlRQl9npPTSNGF8lNn8ZtdmMf3YK19QU5UgUtbxD36lTCCoV+b19tt+6wP78VZwjg2hN\nBsrxOEafD6XJSksU6bREkhvb1OU6ZIKVne0c2VtZRoctnDnrJLUbIZJqEgwOozKZqeyHGRzuvgnq\nesw898MVkt9dZXjUhctlYGB0mPh2jGisRKfWwW7XMTgRoLC7haJZ4qGvf4k7awU0Bh1GjwubRY3b\nrqSaL2CT5fAFHJSaEtevx7m1mCDUb2Vk2MY3vz6O0mzBqpVz506KXCyJGNlmKDiNemeHWirF/rvv\nMvDww0iSRD2XQ6cVkAufj2Nic28vvkOHSK+s0BZFtDYbvoMH0dpsaG22j/2eTkcilapQqYjIxCqt\n3WUi8/MAKDQabIODQNc91D40xOpqmtXVNF51iVKiyaWradaubzAyE6T3wBixSAGTQUEtFSe7fIeR\ne2aRC258955FY7JwpGVnay3O0vwqkkrHocM+DFRAY+Tadod4pERiP4NCkLj33hD6wg7IBbbXEoyc\nmOHtm1WiqQx5tZp8usSdhRiOM30UIit4TvVz68oOVxYy3FipkogWCPXb+OrXp0C1i6pZohDrUM9k\n0Fqt1FMJNBYLtUyGdFNEn7zDhFmLmNln7Xu3GXv4/n/1uZ9/iVqtYHjYwfx8hHJZRBTbKBTdgLx0\nusrgoA1JAqMxTbPZJp+vsXs9ytyJfjQOD//r//wjIrEa7WZ3m7hYbFCptHj7zWWmeyXCa1EOP3CA\n+x6bJRots71bxmXXMHBwlDu34lTrYNDKia5t885+EZO/B2s4gtRqoVR0cId8FCJRMpd+xOGj93B9\nXqKYK6NRyzj91QeRR5fQPPAIi0kl5p0O+YSOZ/76NRxeK3qjjs2Xtrjn0Vmu3k7x8vNLqJWzeA0K\nFDoLTp8VtdFI1dXL3q0W8XiZcHiHbLaO12ugWm1Sq7Uol0WMRhVHj/qZmena3/+m84UY+QS8++67\n2O12xsbG7nYpvxZmZ2cpFApsbm4y8IFBwF3iAyHyAaWSSDpd/bliRGO1ojGbqSSTtMWu+ZKyWcLR\n18PKW7vQkdBpFcxMWvBoiqTW1un1WAi7dIhiB53QRJJb8B98AGtzkkoiTqfVQiwWiVy6hMHjoVUu\nU47HaVTrrF28zuL3fkglU2B7aZsHv/1v0VitNMpVipkiSB00Fiu5TJVYLMK+6ObWzTg9E0NcuxYl\nn6vx5d8J4c9luPBf/hZBkGF2mJE2FpmYPsLzF1JISi0WrwqFVsvLL63xR398mNFjU5idYVTacdwD\nvchVGmI3rjN67iyvX42ytVfB4bPTq7eQjOWZfytDS6bEbjcyPmTn2Re2uXw5gsGgopCr8fabW0yN\nWbEYZbx3cRe1zUEplScXyZOPNpibmKNx8XXEahW5QoHB50PndN71D6ufRK5Q4DlwANvgIK16HY3Z\njFzxs9/W2u0ON27EuHMnRa3WpLyzydCABWMwRGm3O39SjES6OSvv+2JEIiVseonyTpibGw2i8Tpi\ntcLCa1c4+pSFQw8dJbsTprCziS/gZLxXwcbb72KePcnFt9eZmfOTiWUYGPUhl0uMzA1z43aUnkEn\na+EsFpuLYZeNO+/c5JWXVvnaN2bxmwwkXrtCSjTSUcvpO+CmIsowqTvkCw0i0TKnv3QCl03JlSsR\nbry3TUtjRtZpsbkaZ3HRzcERHbm1DexqPy6LDI8PBLUJjB2qSgPV3DadZpPkXoxOp41SrUalVWIO\nBv+1bt/PZGzMgUIh5/LlfQCGhmyo1Qqy2Rq1WouzZ/uw23WsrmZo12vUo0WsRiUby5s8/NAAF+cT\nZLIN7C4jJ0/6UakEegIWvIN6oltRBKWCF55dYf7CClqrlZGpHnp7jGTCNaLhDBqrBY/biVel5fLF\nNAfOnsakbuEf8uMIuEh8//9B0JuQdq9x+sgQuv7juAeDaPLLNFUy3r6eIJ2t4xQVbG3WUeoN5LMV\ndrfS9A56ubNexu0z4+11sh0ucfgrY0zNBnC4uq1ESeqeAkWjZaLRMjIZeL0G3ntv/0O3WUGQE42W\ncDp1v/aZnbvBF2LkE/Db1KIBkMvlPPLII7zwwgt8+9vfvtvlfASZjF+o+pVaLT1HjhCZn+/2btNp\nzC47U0E/6XSdQqnNmXvsdHYWuf3POZrDDsxeJydm50huR8nux1AJ4NGMonZ6aTfq5DY3Ke7tUUkm\nqZXrmMamsbYk9hZXqVVEqrkSyGSI1Sbrl24w9Y1vsPPWBZrrOziHBtAH+7l9YQG5f5i1q8vUsg3S\nKjmlSpvE1j6njjjRJdfwmltUM1lSNzdICCpCniEsRjkHDwe67qu1ApVCk9s3wowErBx8fICVcItX\nF3M0M/uolW4mlG4q6StYhRZHjx/k1Tf2qBSriIUC7U6HhMPCwaN9pFNdUWc2qyjmqigFyGaqFPIi\n6c1tnCoNCq0WuUKgWpehCIzSNtxCYzRgnzqA1qDDMfqztyvuJiq9/pfyrkgmK9y+nUIU23TEJqVM\nnqV6g8MjY8j2w0jtNq16HYVWi87hAMBgUEKhQaktEN6Io7fbCB07SCWyTzKc4tSTI7gO6MluKFC3\nSrTXr9Jo6UmubrA8v0qP18CRg05qxQoao56XX1nEHvBREmUsLCTZ299gdMLL4SMH2dlKs5ZSU0jY\nCJx5GJvbzrVUjI4o0szEsRjUhEaCDI1YMaZuksqo2LidJ7+1iWN8HItNRz6eIZ0oYJr1YHU20YsZ\nBhxNrv7gWQSjFc/IIIcfOcbFpSwqew8WuZxSIo6zvwfX4ABKzc9vv4nlMqVYjGa1is5ux+D1/toF\nqlLZtTTv7TVx+XKE/f0ilUq3NWM2a7DZdASDViYmXNRqTSj3UI9sc6tQRFaXOHO6j2pThstlYGcn\nz87OPs1mB5XKwwO/d5r1lTgXXrmNXNbBb+iwenWDi8/neeKxQcRCBqPNSLWt5sCQnUIqy+ZWiqFR\nBw25BrVKTt/ZMyQ3tjA4XWiUHRTpbRQ9JmLLy7RNHnJJDRaPh0Q0RywpUlQ4CITMGFtt0qU2nUQV\npVZH/4iH8XEn04dCDA87Pvz5rVYtbreet9/eBUCrVSKXy9nbK1Iui3i9RpRK3v/Z8l+IkV+C/wM4\nCFznowm+/wl46P2v/wfg9c+4jl8bnU6HZ555hjfeeONul/Jr5fz58/zN3/zNXRcjJpOaYvHHWSM2\nmxaX6xd/yJj8frR2O57ZWQrhMG1RZGM1SX+vjuD0MK2NG+xGkljMGlRqgXI8TvG5H3ajzxMJHMeO\nEb38HpVkkkoqRXxhgeCpU5iPP8Q7r69QvX2TkYIVh1qHSlsndPoUTYURSQaNeoFyqcbMH/wB8a19\nBLWaN//6adK7EezuQeQyiWathlwhUMllcfd5KcciaKUWGqu1+8ZezFGudaBaxGYPUGs2yW+vkljd\nRKXT4T1hJ3l9hUKsl1dfvIPa7sE9MkQxleWdi/tMHRkkEq+RyYsk4kVyyTx+j5bs3v/P3pvGOHZe\naZrP5SV5ebnvO2PfMyLXUGZKKSlTUlqSJdvyomrbPRrUgmm0awboMQwU6tegFgxQmAGmMD2YqXKh\nqzHuKQNV3Xa5XJItW4u1K5XKfY+IjH1hcN/XS/Lyzg+mQkpJXiQrlbas908EGZHkiXuZ33e+c877\nvln0Oo1WvckdByMUy222Ngu0Gk1iI158PhmL1EVtKr3KgsuJ7HKTjadRBAuGod34ZyZZLLkZCzjJ\nLy1hMJmwhsO3TGn1VqJUUqhUFNLpGo1Gm24DGvUiHN6Nd3KSaiKBa3CQgWPHdto8g4MurmUzGPQ6\nvGEvudV1as0aFquEXjbRKFVJv/qv5BcXcY+N4erro7JawRlR2b8/xNyTP0JvNOB0m7nz8c9gC3bQ\nG/WcO73F5lqOekvgypU0xWKTzz40RCBg4/XX17m4IHDf/T6aHZFcTcTu6sPn0mM1donZm5z59n9j\n+J4jDAyPceXUEs1MClcsinPQzcy0n4C+QFkp0sy3yb7yUyJuO97JIGW1y/U3zrP7zl2cf+Y16nWR\n8Mx+9jxyL/mrF6gszzHx5S8jvU9y1yyVWH/5ZaqpFGgaOoOB4J49hPbvvyX3y2qV2LMniMmkJ52u\n3dA28WKz9YZr3W4ZkAE7StjDPb4Cr5+IU2kKaFqHeqVBNtNzsM1m6zz3XJ2QE6rFOrVcjsHpQeqJ\nBMlEm0K+hoYOrVFHJ+pROx3ERoGpYJs7947hsnSZe+lFfvaDFP1jYQ7/j99g+aWXKSfSxPoDFDc2\nQBR7wmeqD0VpUyppDI96mb+6TWutisVuotvRmN4T4tpCiWqtzeioG4vl5ranIAjMzoZYXy9x7VoG\nWdbT12dHUTpoGuhuCLCKoo5Op3tLrv3HjVuZjOwHLMC9wN8As8CZGz/7L8BfAA7gSX6LkpETJ07g\n9XoZHx+/3aF8pDh+/Dh/+Id/SK1Ww/JrqCP+ujh6tL83u1Bo4PWamZ727yw8vwwGWcY1ONhTe8xm\nOTKsceZSga7aIRdPI8sGorGekFdxbY1yPI5vaopqMklpbY301asYzWZESUJvMpFejSPKg2yvppCc\nbvJ5ha1CkcOHYqwtKlz4ySmUSoXxg5OMfXaKomJkK6ng9Js4/PiDPPu3/4jfYyTYH0RncYHVSrSv\nw659YZS1qxgmB1FqdbA48U756A8EGJ6dpLrc5vyLF8lcOo9okBge76N88SQmWSRfaKHW6iiNZVp2\nA1aHh3pNjyXgJ3nyJK6AE4NRj2yRadaaiKKOrY088xfX2H1onI3NMh63CZNBoz9q5o4pC7ZuCW/I\nhU6vR280Yh8cwjsYxRWSsAUeooKNxbNLFFZhb6zTmxvx+3sb9vvYxv8mQ5JENjdLbG9X0ekEPDYv\n5e11DDoN78QEwb17Ce7Z06Pz3oDPZ2H64CgZm4rkq/HKP61TqrfotoB6mbCjTUGno6V0SFxZQDTb\n8PdFaeoMbL7xBhuXFxEEgcBwjPAbp9kzMsVmXkRtVHE4TXTLKh63Ca1axGfvUrpyir39Vs6vw8pK\ngcOHo5RKTS6e2UCOuDky60K3fg5Z0hM/dYq9j42TOz7DdlrBHnIQirqY3uWhXqphCoTplPI4+vrI\n5es0OiLpTI3sXJbPHLiTQ196ALVeo1lrkrlymfLcRQwWC87+fiKHDr2HUVNYXaWaTO487rbbZObm\ncA4M/NwZnV8XXq8Zr/dm4bVms0MyWaFWa+N0mggGrUg2G+MzNpx+N8VEmtL6Ki++sko3nkMW7Qjt\nDpGQnrnXz7H7yDRqvYaotsitb2Jzx9C6MqLQQalUaJRK2F1utuaWeOWffsbdj85ybXWO+FYZyWJi\naUFPOfUMtpEJlLYd1R6gvb1C+tJVfDO7GR52M79eZnBskq1kk8ce30uh0MDpMHD3vQOkUzXQ9xKt\naNSOz/feQVSn08zRowO43TKaBhaLkakpH8vLBUSxl42Mj3vxeH5+C/u3CbcyGTkEPHvj++eBO3k7\nGVm78bUF3GzD+RuO733vezz++OO3O4yPHA6Hg9nZWV544QU+//nP37Y4QiEbwaCVTqf7K9F63w+i\nXo8tGMQGPBTxUSzUidcHaOdT6A0inWaTRj6P7HKhKgp6SaJVrVLd3sY7OYnJ6cTq91MVZLpKT/xM\nb3NgtFrpigaSBVhbLeIeH8dkNmKNBjl5scygN09pK87qi68wcGCCr/9vf4LW1ehrWjjz5hrlzTiC\nQ2K6DzzGEHqLBcfwOF13A4PZjHNymjNPv4bsCzG7x4VLHEcSu0xNO7n4j88wcGA34cEARl2H4vom\nVo8Tm0HCI5lQikWyWykKMQ+H7giylaizenUTQdSz/1AEpSOQzdfZO+Ohq7pxmjpYGin0yydRvR4e\n/uIe0i07xXILh8ONKOqo1VooikpqK0clkaCIE6G/tznV02mKKyvIt+hEfKtgMomEw3ZSqRqqqtEW\nZfY+dDdy2EXXoOEJWXfaM++ErGshC03cucs8+EA/ivkOsrkabq1A7uUfE7n7KE21519kdPkY+uz9\nXDgxT2ZlA0mWkPQQ8eiwCVUkXZ6azU8+UyEQsNI/ZEbWd6kXFIR6mbM/PYXaFbjva4+B2U61qnD2\nzXWahQIbokIi0GXA5sAWidJuNGhcOclnDsxS7HqRYmN0lCaXf/p4qjNvAAAgAElEQVQylXSO/hE/\nI26JZrFAcHI361loKSp6k4ntbAd3t0x1O0lT1eENhHBNqjSScRr5PNVkEue75kea+fx7rk2n2aRd\nr3+oZKTb1Wg02phM+p0N9pehXm9x4UKStbUS1WoLo1FkZsbPzISDeqGE0BWozZ2jkimiVWskF9eo\n1lVG7ppFUsuU4wl8Yh/HvzjL2noBrdslErZw+OgoueUVjHpwB9wMz8TYfm0Fk9dLt1kntbyBzRdC\nUXU36MerHJjegylgJ3H5KrJeY+j+Y5i8PqwtjaEj91BoGIj0t2m1OrjdJkIhG4GAhcOHTVSrLfR6\nHX6/5eeudQMDTlS1y/XrOVRV5StfmWRjo0Qu18DnsxAIWIhEfvsqlO+HW5mMOIGVG9+XgPcT5Phz\n4Nu3MIaPFN1ul3/+53/mueeeu92h3BI8+uijPP3007c1GYFeifLDJiLvhiTpCQTtSHcdYOO112jX\nagCYfT7cIyPE33yTbqeD4UZFRHa5eslIKITaENG5PRhyRpz9A4gGPSadDqPdTHT3OI16G6vLRqNY\npJ4vsHrlApdfvYjVbqaQziIZRKz77uH0hQRmoUV0XwybpBLfLLEtB5geGwG3Qmp1kcDABHNXk1z8\n8RuMHt6LrpQgYlBo16oULjqx9A9RVSUiQRfuWJj0tZ6misEk4fGZGRx08rX/+THWl7P0D9rYf7CP\nxV1B9EKXfKZMudTk7KvXuTZf4K67+5g6GsWh1NGVqoQPHMA1NMSkLFMoNHjjjU1++tMlSiWFWMzB\nnikXCbpEY3ZUpbxzbRvvszH9pqPT0ejrcxAO22g0eroUuWyVf/nuG5h1LSanQ+ydshPaNYnlhrS9\nUqmw/sor1NNpWsU81bU1nENDDO3Zw7X/9hMq+QoOwUYlcpByrYspHESR3IxM97P38ChKXcEmg0mv\nkry+wqDbxVDQy30PTvLGGxtEgxLp+QUCThOblxdoKwrBoI2xMKQ7On70k2WyiTzplU127Y4wd14h\n9MAAeqeHzOoF0qfnsF9d4rP/6//CWlGlqzfgjISJZ7pslmVmjs3iyyRRdSrUqshmO7GZ3YhWF5HB\nIE9fKXD59TlcXjNDAw7ufeA4mtalVa2+5/qZfT7yS0s3PWeQZQwf0oX3+edXyOcb2GxGZmYC9PX9\n4tmHdLrGG29sculSikjETiBgIZ2ucfrla6hxkfmXTtE3NcDmydNIdivDsTDXLkmkE2nahRwGWcfo\nqI/Uq89yV7Sfu48eoJgawEINQ6dI1mRnat8A4/fs40fPbdKWwlj9afRmE61ag+m7vKxtN4mvZRgb\njuKw6Tj1gx9TzpbR6QXUSoHpf/M4Vm8Mz/gQmUydF19cJZmscPlyB1Xtcv/9gzzwwCCxmIPt7QqL\ni1n0epFOp3tDQ0XE5zPT19ezmRgf9zI25gF6RpD9/U5yuTqSJBKJOG60qn77cSuTkRLwVsrmAIrv\n+vmXABfwTz/vBb75zW/ivFEGnpiY4PDhwwwMDACwtrYG8LE+vnLlCg6Hg8nJydvy/rf68ezsLP/x\nP/5HNE1jfX39tsRzq+Ds78dgNvdKzIJAu9EgfekS1kCAei6H2ukw+aUvoSoKjUYLzRVhYHaITTWI\nkxYmuVcR8HrNBII2cvne4tDtapQzBexyk/WFNTqtDvVqA7vDxfbiJrFYhq1UC52i0ey00JltdA0G\nrF2Fiy+eY2Q8wPgRK1euZdGatZ4fRrbK6Pgg688/iy9go1RuoQhWbC4/z/znp9h3/0Gm9vVTbYJ3\nbJTVc9f4/n9NUGsbCPklfvD3z3Hsy3dx4NgMTruBy2dr1Ast3nx9AbPPg9bV6CoKZpMOvTmEe3gY\nvcmEpmlcu5YhlaoRidhRlAIbGyWsFpF990wx4FJopd7eoCyB3z7xJafTRKfTpVpVCARsPPvsEpQz\nUEiis0pcObNGNDBB9/RpRh58EJ1eTy2Vop7JAGByu5ErFaqJBO6REcIHDmAYnOapJ+e5eGKhJzd+\ncA/JtptDBwKMHT1IdWme9QtzJLJFQv1+RKef4txZjt37IIoSRK/X4+j68FlV4peuMjXpI7OdJ75R\noOX1kErXQRNxuCzs2RsksbxFtd6hXr2hhVJrETl0iPObJq7N58jl6rRKBcJeBy8+fYlatcW//cpx\nTIJCeG8dU3SIjOalWOlwYaFKuiFj9riQrSKFushSRs++ARWT472JgXNggPLWFpV4HK3bRS/L+Hfv\n/tDturW13rZQLisUi03uuaePQqG5M6QZjdowGvU0SyWSmzmef26ZzUST7ULPo2V62s9gzMyVEwuE\nLX3k02WiYyr1YhGjWaK1epXHvjzL6TMJ3ANhpveEKZx+hcT8MtL6BiOSSMws0dWJlFIJwqEoer+L\nZgc6bZWXThWIBMfx7vWzX2yjq2aI2tvYdwXZ89ARslevklpPopfNeLxuJG+A9PIG9vAeBu0SJ05s\notMJeL291nel0uLixRQHD0ZZXi6yvl6gVGoiCDp++MN5KpUWo6Mejh7to1hsMjsbRhCEHWFBo7Hn\nzfRuf6ZPAm4lOXkf8O+BbwD/D/D/8nabZjfwfwCP0mvVvB80TfvN6uB861vfwm638+d//ue3O5Rb\nAk3TGB4e5sknn7wt5n+CIPBx3fN2o0H6yhWKq6tomoZ7dBTv+DilRJrrVzZpdCXqOiuaIFIo9Kh0\nwaCNqSkv7XaXl15ao1ZrU6u16NRrTLorPPt//38YjXqcATe5QguLy8HMFx6moPPw+uubHJwN0VHq\nbJ94nWsnriDqYHDUx9f++CE6Dj9nTqxhrW2ipJOMTQXQCxrZjTjGQAxddJLNrQrr81s4vA7uv9ND\nWxN55WKTyy+fp23xc/lKisERL0NRGUsghEXWcfdBLy//+BxbGyUMLi9mp4Mv/94M4e4m1WSSyMGD\nBHbvplxWmJ/PcPJkHIdDwmIxkExWqVbbuN0mHjnqo3D2dVrVKoIoYo9EiB05gmSz/dr34lbe93y+\nwfp6kWq1RTBopa/PQSpV5cKFFJWKwptvxjHkVjHSRCf02gT3PTRBwFBk+PhxzF4vmbk5Fp56ikau\nJ0+uE0UKKyvE7r4HpdmiIMU4cSbDyrVNgmND1EQHme08v/9HB+gP6KktXGD99RMEBiP4xkbZvnwV\nySQx9rnPcWJJpFlroCyc4el/eImAz4TTJqLqzez9yqOYw328+toGmxtFvvj5Ear5MpfObfLZL0wR\nJM21p58lcMdhrpec/Oina/iifhYWsph0HabGbHTyGSx2mYePBZkcc1BPpRD0elyH7mM72+GZZ5bJ\nZxt47F0o5+jUa5itEl/5t3cwuG/ifV2Y240GtVSKjqJgcrmw+HwfSoVXEAT+7u/O7Dyu1VpEoz1V\nXEVR0ekEdu3ysWdUZvPkSVYzIq/+bIFqXUVz+KjpesnSg/dFmHvpFNO7PLSKRayxAdZefZVcPIPb\nJeF36vCPjxA8cIDC6hqoHSSLmfzCPM1SiYG7j9DQjJhlA6lSlyvnt3GFvYzvH+HMxTxvvrrI+KiL\nvYMCpmqcxtJVzP0jqK4YGy+/TKNQwOJ2YPG6MFosmJxODv33/wbBKPH886v8y7/M3Wh5dohG7Rw8\nGOUznxni9OltdDqBQqHB88+vcPp0AovFgMMhsXt3gM99boxDh6J4vZ8cYbMbn5P3/bDcysrIeaAJ\nvHLj+zPA/wX8B+B/B/zAM/QqKF+8hXF8JHiLRfPb7kXziyAIAo888gg//vGPf2udiH9VGGSZyB13\n4J+eRtDpdgb1ipqdxaKNdruLpjURBHC5TNx1V4xw+O3e7IMPDhOPl2k0OrRaHRzdAv6+IAa9jvnr\neaxOG0abjeubbTZzSUZHPRgkA+1UlpWz19B3m6B2SaymWD0/x/3/bhqh1cLQddHZtrD4/EvY/D5C\n9x7nynyZ0nqH1HaTRqmO0WYFs4P50yuUyyYCuybJ5BS6apJUssqDX9hLvtQmeeo1lP7dzA7rmJkc\nQnbaGdk3gkOoUs8K9N19N66RESoVhR/8YI5z57ZZXS1RKNQ5dmyQXbt8eDwQDRjxhDx4Hn6Yei6H\nXpKwBAIfWC68Va9TyZUw2OzYnbd+SDqfr/PCC2vk8z1K6Px8lqkpH3fdFcPns7CyUqBYbFK4nqGR\nu6FPY9RjNeuhDbp3bMTV7W3q2Sx62UIlEcdgsVMXbSSzaRSLgqpqTB0/QmIjh1or0alViS9vs3S2\nyOe/uAejqKIDXvk//4Z2qYAt4KNUruPZdy9NXwx33904nBZyG3G66PDPzHAtLnAgBl/83DBz19Kc\nOZ9i7mqaYNDKayfTjI57OfD7f8DiUoGFpTQWScBAi0quiOC0Umro2Dc7QXlzA8x2OrXaDm3Z7TAQ\nGQyQTteJxyugdWnZTZhlkWDMS9cRJJ1t4veL75nlMMgyzhsVzY8SmUwdt/vtjbfb1UgkKrjrqyil\nEo1mL/E1GaGUy2COOihWVPR6kb139KEVUkihPtaWkgzdfQjPZpxCKkdwxIbVLrHy46ewuJ3YojG2\nzp4nH0+j14u8+eQrWPuHmH7sUU489VNatSYWu4zHqefwuMigI0SrKyILDfwjAxhGo7SKBRShi+HQ\nbpKLq1jdDhwOE0ajnsj+GTAYOXlyi6eeWuDEiS3abZWxMQ/z8zkOHYphNvfYM6VSk1qtTTpdR68X\nMBp71zuVqlKrtWm31fe9Vp9E3Gpq7zff9fg/3Pj68C1+348cp0+fxmKxMDU1dbtDuaV49NFH+au/\n+iv+9E//9HaH8rHAIN/cb63V2rRab1PlNA3y+SaVys0FPJ/Pgs/X21DPnt0mmbHyyDd/n/mXT9HQ\nbxOMeXGMT/PcWYX19SJ79gTo73dwda6EUi6jdlSMRhHZqKHWK5SXF4j/039BcvvoVCsMHT5AVxCI\nL6f4yXffILxnN7bBEbRihUq1Q6msIEf6kYUulaKK3w4TbSN6XRen20olGyc2HEJIrSKmVrBJEmJG\nwnfvCP5dh276W1ZWsrz88iqlUgu320Sh0OD11zc5fDCAmFqCYoHljIxraIjAzAz6X6JD8W5omsb2\nxctcfO5N0okiVo+TiWN3Mrx/HFl+78n7o8LqanEnEenFAcvLBUZHPfj9FkZH3eRydS7WyyjlMqIA\nU3tjWLQK7uFhTA4HWrdLJZEgcOAOrr/yJs1Wh65kx7N7L0o2TXnuEt47bLTTWzQNerLLCaqpNKOz\nExh0GplChWalhmewn9WfvYAr7CevEzH4Iqydm8dc0AjdeS/XdH6uF0IMjfej1iqcvlykUGwT9Rs5\nn6xx5P5xNhN19uwNgSBSqXd443Sa4fEZjB4Jp7vGxvVtuooJh81IrVzF6QoRHQ6SVBv4PTKN7BYI\nAs6BAcweD4IgsHt3gEKhSbPZwRfzo6oac4slttMKRqPI8LCLgwcjO3b17bZKIlGhVFJ2XHo/Kit7\nk0mPw2Ekna7f9HwlnUXXaBD0BzBKvc+Lyy5g95sYGXeydzaGsFWmmNdx9c15kq9fJH1KYvrRB7jz\n6EGKl88w/7PXWF1IUKt3ePAPHkZoNbE6zGznYXW7Tl97i75ECos/gC6dZNfsEIntKnM/eZHp3WGM\neh317U3e/E8/Qi8Z8Y6PM/Lgg4zf9zC5+Xkq29sA2CMRxNAQL7ywzupq/oZsux5FESiVFGZm/HQ6\nKna7hMEgYjYbKZUUfD4z1WqLWq3V001BQ6cTcDpvv+XCx4VPRc9+RXzve9/j937v926rKdjHgWPH\njvHVr36VQqGA6338PD7p8Hhk9Poed7+3YOgwGnXvqwDbaLTJ5xv4fGbsdolUSsJ/zwMYx4sYLFYy\nxQ59fRXsdiO7dvkIBi3o7hji6tNWGrUmdruR/j4nbpuud/LejmOUjGy98jJbr77EyOP/HZnl6xw6\nNslWUaNa62AdHOXQPg9to4rH72azkmImZkJrtxCVMh6PGSW5RcjS4I4HdlO9eILqDaqyPRJ5X8bD\n9naF1dUijYaK1WogEunZwJvUGrrSJma7EaXUInnhAjpR/MCaEpV4nDd/8DM2VrIAFNNFasUqosXK\n5N6BD3WffqX3rby3A9xqqShKT+XXaNRz+HCUgQEn6c1+DJ0qNqGO3e/BecOtVu10qGWyLFxLou+b\nwh9y00wn2JxfhHKOUqqEbekCd953mPPnE8T6vZRNXcYiIiN9Mn7Zz8aJE4SCZmqJbexeJ9ZgkEw8\ng0UCn03D2C7h9IZ56KER5t+4SHojg6iTmRo0cf6nr4E9wGWnmWKmQsTZYXD3KBoCrVYHWdIhBSwM\n91mYP68nsZmlr99NvqAwNmwnU2iz//49hFwVmvogzv5+3CMjKEqHfL6BwSDywAODrK8XAYEzZ7Z3\nqPStlsrCQo5w2MbwsJt2W+XUqTgLCzk6nS6iKDA46OLIkdiHTkj6+x0UCk1sNiP794dYWMju/EwQ\negJnTmOY/JU01m6Bg3cPcfVSgg46hsaDHLyzn4EBJ0pgP83nX2D78jyVahvBZOD0U69i6RSpbm+T\nT+bR6QSCQSvZy5ewBfw0slu4FJX+mRi4HYh0OPLILJraodtuUc7ViU0MUVy8jKC2uP7DH2Jy2pGd\nTnQ6HclLl+i75x4G77uvN8wtCFRViTNn4mxvV9jY6FVORVGH221EFAX8/t5aIUk9UbeVlQKNRpuJ\nCS+lkrIzVL1vXwiv10y1qtzShP03CZ8mI78CNE3j+9//Pk8++eTtDuWWQ5Zl7r33Xp599lm++tWv\n3u5wPnaEQjamp31ksw2Wl/NsbZXo73cxOFjF7e4lKtBzEj1xYpN8voFOJxAIWLnrriiVipPvf79M\n8loao1HE5ZLZvz/E4cMxJEmkmQnyhT+4n7XTFyin89hkAd/wINmlxR6Dp93GGg5T2tyiWa1RWt9E\ns9Z57H/4fSpSiBde2uDslRKCACsrm2iagCSJ7J208+CxKH0BHfV0iuZWhtQzZ3ry7QYDtlAI79TU\ne2irmqZhNusxGvU0GirVaptarcTsPh/GVhGL3fjOXya/vIxv164P1KLJrW+R2r55fr2QzFHYztCa\nin74m/VLEApZWVzM8c5xFKvViMPxduxGo56+PufPNWPUG43oXT4y25dQ1TyptRR9ExFcATfLq+sY\nJT06TcVvKHP3uIAQClLMGElcmiMXcnLq1DYjgw76QxGcfUk2T5zAOz5OKGhFlGSC+yaRvVZsu6Nk\n1uJE6tcY2D/B6deWuPjmKmqni33QTLerYXdIDIz5uHZxi5MvL9BstLj/4Skefmwauxk+89AYly6l\nMJok/uAP9+GSVVomD4GQHWffIO1hFVk2cHUhx7lzSSRJ3DmhHz4coVhUMJlu3hK6XY10usbwsJtU\nqsb167kdkS1V1VhZKTA46PzQA5XHjw/RaHQwmfS02yqVisL6eglV7RKJ2KhUWsTNNipNGf3iNpF+\nL9HP9GOODREeiSLLhp4/TblFvVTBPxQhWU2hKCqyXiC3vIJvMEpKEDGbDdjEJu2qAOEgSqlCenGJ\nVqnI8EMRNKuHp55cYHW1iMWiZ9++EPtGhrnw7E+QxTaVUp12o46m9P7Pi04f8bU02cUmwaCVaNTG\n+sUUuVwDq9XA1laRAwfCPPPMEg6HEbPZiN9vYWLCiyjqmJjw4vWaKZUapFJeHA6Jer1DX5+dWMzB\n5mYJj0fG5/twTKXfNnyajPwKOHHiBBaLhZmZmdsdyseCtyi+v4vJiF6vY3Y2zA9+MMfmZolarc25\nc9skEhW+/vUZpqZ8N/xNkmQyvXKyqmpsbZVZWSliMOiIRu2UywrVagtJEtm924/JpGd5OY8mQHBq\nFIvXib7bQegodHV6kvPLWO0WGtk0nrExRMmE7HJhtNvx7ZlmebvLYjJOJlNncNDF008v0u1qtNsq\nhw6GSVxfYzIUpbkWp7i4SC2Twezx4BoYQG02sQQCRA4efM9AYqulIggCjz02zlNPXadQaOLzmTl8\nZwxzd+0910fQ6XpH1g9yTY0G3q3oL4o6dKJ4Sw2++vudjI9Xd1xfbTaJ2dnwB3Ybdo5NEp5cI7W8\nSVft0lJFxh75HMbQAEaDgF5rs/jUUwjtKpHjn0PJNFnbLBM6ZKCSzXMhm8Pq93Lk6DHq2SzmUISG\nLUa+YUAwBPHKDjrlNnZjm/ziInajja3FTUwGgY7Wxe02oe8q3H80RjxZ56WfXgZBINLnJrm8xUv/\n2uIrX9/L0qVVrLNeZElg7ifPEhwbpeESqTU6LC3l0et1FAoNTp6M76gcj4562LMnwLVrWQYHnRiN\nIq3WzXMKdnsveavVWrTbN6t9drsapZLCh4Uo6rBaewmvXq/jnnv6mZiooapdTp3aJputk9UJ+Pv3\nY9LVsEatBAdDmD09Nls+X+eNN7ZAbdPerlGvtxkYcJHJ9Cj8gYEQXcmMPRqlnU3QyhVwRMP4p6bZ\nePMszVoTBAHvzG6eO5Nhc7NMvd7CYNBx6lQcpzlCdM80Qr3A5okTmIM+dF0Foy9MoqJHt9VgLdPk\n2rUMQ0NO2m2VM2cS7NsXxO+3YTaL/OmfHqFSUXYG4IeH365O9gTdzIBANGrHYBBRFJXl5cLO9fld\nwafJyK+Af/iHf+CJJ574xLdo3sIjjzzCn/3Zn6GqKuJvkCnax4VEosqlS2nm5rJkMnU0rTf8+Nap\nplJp7fD83yppC0JPjOnq1Qz1epuxMQ9Go4iqdkkmqzidMufOJfB6zMgthaUXXqNZLOKPeokdnEWO\nDKDr1nGYTaDTccc3/j3y4CSWA/exkNLzL/+yiM9n4dix/necTLs4HBI6sSfCVC7WiHoNNItFVEVB\n0OuxBIMYLBZEo3GHtfLOz7HRKNJudxkZcfPNbx6mVuuJSEUiNnztNulLuR3DOAShRwM2fjDHXs9g\nP/1jYeYvbexIHPqH+/D2h9Drb93ny2TSc+RIH+PjXlqtXp/e4fjgPXhP2Ivv8L04xnPoBIGaZiLb\nMeDy2cmtbWBoVXF4bWgdEza3g+TJVSYOTdMwumiLJkySyPZ6jsqEg4kvfZmVioNTLy1QrdQRNlIM\nzpjZY2lRTdew9/ejZeNMHxhi8eIKRlnCEQlhstvYPdtH6icLTO+JQLdDp1qlnC6yVK+iPXEHsZCJ\nysY8+VKNkb3jFKQwc3MZdu3ykcv1ZmfOnk2wvl7E6TQhCAJLSznGxtykUlX27QsyPOxifj67c8sD\nAcuO74nNZsRg0N2UkPTmGj7YIPMvgl6vIxSykUxWdhKmblcjmW2REgzo3FaGbiQimqZx/XqOQqHZ\ni7VvCHEtST5fYaDfgaAXce3ah85goFZqYOyP4vaYEY0Sy6+9SXDPbvruuhNJNtK1eMmnU1gsRlS1\nSyJRpVhsMjXh4ejMNOVEktHPPkr8wgWCQReaO4J3ZJpqV8br7VFv19aKTEx4d1yhR0ZcOBwmTp+O\n75jaFYtNDAY9sVjv761UWphM+h3hs7fuE4DDIREK/W5UReDTZOSXQlEUvv/973Pu3LnbHcrHhv7+\nfvr6+njxxRc5fvz47Q7nY0e93rPofucgXbvdq34UCg3MZgNms4FqtbUj2mS3Gzl8OEaz2eH8+SSy\nrGdw0InbbcZuVyiXFfR6Hc89v8L+cTs1OUBbs7NWVCm/cg6r183U5x7CZjdhDQYx+YK8fjLBWqFD\nqdZg9+4gNpsRUdQxPu5he6tEq9FElnRoOhXv5CDhqJHK6hKtSoV2s4lnZITE4gbFShu330X+X3+K\nxazHPzaKZ3wcgywjCAIzMwEuXdymUm3T6XQRhN6JLeTfg95gIL+8jKDT4R4exvshnKrNXi+HvvYo\nzug5EqsJ7JEIg7O76Rvxf5S37X0hijoCgV9vQTca9eyaCbG2oFLP5hh0dhAaWaSgE12rTiGh4b7r\nM0SnhhG1FnfYB3jjbIbUhRTeiA99p4Gm0XOCdnpZmFdYKVopFBtUqwo5JYlJMjAUsGMfGqeR3GLK\nZSMydIRCSyI8Ncqe2X7CMSde9zql9fWbqNB2txWDpKfsHUM3YqUWrzK/pmCzNTh4MMK1axnW10vc\ndVcMVe2iaaAoKiaTHk2DSkXB57MgSXoOHowQDttIp2vY7RKxmH2nMuL3Wxkf97CwkKPd7s2MDA+7\nCYV+fXr3++HddG9NezsvbuTzJK7Okzy5iMvlReeLUldD7Hr4PtLzCwiCgGdsDGN0iOERD7ZYj2Uj\n66vUFy9Rz2aIr66hNFvs/erjKA0Fr89OMldgZaVXlQh5DYj1PKV4gW4pz9Dx+xg9fhSLrKNmChBv\nONCJIufPJ9jYKJHJ1Pn858f4whfGOXNmm7ExD2fOxLFYJKxWA6VSkzfe6Bn2eb1mPB6ZZLJ6wxTQ\ny5EjfczPZ8lkathsRiIRG9vbPcn7UMj6ia+SfJqM/BL85Cc/YXp6mr6+vl/+y58gPPHEE3z3u9/9\nxCYj3a5GNluj1epVF97pf+NwGAmFbqaeSpLI4KCTWq2F3S7h9Zr54Q/nmZ/PIQi9U6PTKTM46ESS\nRGq1NqurRaxWI+GwDUHoLa6bm2VsVgP2UJQLP3qJRqXCkfsmcPT1obPY8U+PYbLb6XS6NBodgkEL\nQ0NO0uk6qtolm63hkDpMRbosXdqilVMwmc2M75+lb0gikXPT98CDdGpVtq8tUNOsOGIRNs5coNA0\n4nSZGCsUaNVq9B05QqtWo7M5jyezhK0r4h4dxzs2uDOwG9q/H9+uXSAIH7gi8k64YlEOfz1Kp9Nr\nC/22LaxGpYi0eR6dCoVraxRXV9AHYlSwYzJbcYxMcnmpzJC/C7UK9a113A43stOK3uxndNzP4L4o\nJdVK/MfnWV4p0O1qjAzYECtp4pfKeIdEghE3aU2gVqmye78P3/go3qGBnWrWrr0xZu4c59KJeQDs\nLgsPPX6IhmrkZz9b5tVXN7BYDHi9ZhKJPA6HDPQ+d4lEBbtdIhKxUa3eoDIbdPj9VkZG3Oh0ApKk\nZ3jYfVMr4a1qml6v4447IvT1OSmXFaxWA8GgFaPxo99GPKEErPAAACAASURBVB4zfr+lRzu+AYNB\nx8CAk2a5zNpLL1FOpEgvpajXrhMcG8A8fYiVip3RYw+zZ08ATWOnBeSYidBVmpSuraIU8/TP7mXk\nwQfpIKJ3+clk60T6Q5y9mAEEomGZPksVYy3Fj374M4b7rNiWtzj6776O0CjTlJ2oDYG5K2mWlwuo\nahdV7XLhQhK9Xsfu3YEbw+8ikYidubnsTjurx0ZqkkwaGRpyoWkaGxtl/H4Lx44NUCw2uXBhmzff\njKOqGq2WyvS0nzvuCGO1fnRVqN80fJqM/BJ897vf5YknnrjdYXzs+NrXvsZf/MVfUK/XMZs/OaI7\nAIrS4fTpOCsrb80SGJmdDe8swE6nicOHYzeGWAuYTHpmZvx4PDIWi5FksqcB4HCYGBx09obZRIFW\nS0VVNYaHXVy9mkHTNCIRO0NDLkRR2HHmXLieZ2Kkj+mvPIYsqgxPhfE5DTht+h0nXL1ex8yMn2vX\nMjz77AoLCzn8fjPHjw+TnV9AKia4/4ERavUOdruEy5Ln+ptpVq6kwWAiEnOimX1Ex0doZLOU2xKt\nVo9BUa0oSOvr1KemSF+6RG5hofeeQPXaGbx+K7gGd67XB9UT+UW4lW2ZW4WuqpK8eBGlVMLs87G1\nsoxSqVGqbZDWPKwsZhneaKDvm8DYKGCrrXP3vQNsJDsoSoeRCQ9HH7uj1yLKN5AkAxaLAb1eQCgn\nSWxuMxwZ59obC+SDVmYePkq1bcA5EcU3FLoploERP0/8T8e5fnSSWqmGfzBIOqPwn/7zRdLpGmNj\nHk6dipPPN/D7LTSbHYzGXuK3vJxndjbC8nIBTdNQFJVdu3w71ZB3o1hsMD+fIx4v43SamJz0EQ7b\niEZvvReKwSBy550xLl1KkUxWkSSRyUkfsZiDwvJST/PFIBIKWVldLZJe3mTX1C7ymozPZ36PC26r\nXqdy9QwXX72G3SxgMzSgmSJ091HKipFKw4TQ1Xj00VHW1oqMD8i01udYfPUUFpuMbDHSzmyz9Oxz\nhPbuYXB3kKZY49VXN4CeiV0kYqdUarK6WsTlkmm3u0Qidmq1Ns1mZyeWQMDCxkYZnU7AbNZz+vQ2\nuVyDZLLKww+PUCo1eeaZVZrNNpVKi2q1xeJinmazw8iIm9FRzy2//rcDnyYjvwC5XI7nn3+ev//7\nv7/doXzsCAaDHDx4kKeeeuoTN8i6vl5ibu7tvnippHDmzDZ+vwWbTcJo1DM15aNYbN7o2fb64hMT\nPjweM+l0pmc/39WwWo1omkY8XkYUe4vLyIiLiQkfBoOOgwcjO6ez3qBgmKWlHOU66C02JKlL34Ab\nj0tCJ4okzp9HL0nYIhEsFgNra0UqFWXnJHvlcpK7BvXQbeOkgNfSRa0rpJbyKAYn2+tZ8vkGmwsy\nTp8Dw6CZfLZNo9FbDLWuhtrV6KoqrXKZ8ubmTdem226TX1rCNTjIp+ihXa/TLL7NBuqqKhoa+WSB\nmmymK4hogp5ksoYiNLh7zIVDqjNwwENXE7Da2zutDrdb5t57+0ina3TqNUpLJe59YBJTt4zJ70A0\nSSRW4sjj+3GF3mvYBzAwEqBvyE9qdYurJxfIrxcZi9qYm2uQzdaZmfFz9myC/ftD+P0Wut1eW0DT\nBKamfNx9dx+tVq9N4/NZ3neIWFE6vP765k5lIpdrkErV+Mxnhnb0dW413G6Zo0f7qdXaGI0CzUya\n1MULNIpFLH4/zWLxRmVGJJtt4HLoGT04uDPj8k7UMxkMrTJDQ06ymTrJKuRyFeRphWTXQaPRQZYF\nwmEb7bZKyNXh3NOX0apFwkEr5k6RXHILTZtBNVppCUZ27bIwPZ0hk6n3qPAmA9lsDbdbJhCw3Ghf\n+TlxYhNJElFVjaEhF8GglQsXUhw8GOHVVzdYXy8BPYbeK6+s43RKxONlZFnPtWsZBKFXyW02O5w5\ns43PZ8bp/GT40bwTnyYjvwDf+c53eOyxx3b8cX7X8MQTT/Cd73znE5eMbG9XeLf6eKXSolhs7rRr\nPB4zDz88QipVQ1E62GwSLpeJVKp6Q/9AYnLSy8mTW2ga2O0mYjEHHo+Zzc2emdyuXb6bpJyDQRtf\n/eouLl5MUi4r+P0WZNlAw2Ags7lKdekaqD0zLcwOTOMHaDQ6jI97EQSBclmhVG6BO0xpo8K1pIGh\nAQc2MUdHaqMzylQqLTStx3zw+DrUOiK4wmhrcQRNw2wxYrEYsPj9GCwWtO7N7AjoJSSf4m3oZRmj\n1UqrUkHTNGzhMLmlZXSSREsFg2TAMTTI6qqGRTIhB3x0cklauTRoGuahO24aGj54MIIgCHSbDSpr\nJqROGb1tgPnFEsliE6fNy8xM4BfqS5Q3Nzj//R9z7uQq6XQNs8PG1x85xrf/cZNg0IbHU8ThMOHz\nmclm60xP+9izJ4jNJtFodHA6Tb/QYC2TqZFM3myUV6222Noqf2zJCPRUoa1WI+mrV9k+fRq11aJZ\nLNIoFokePIiayeD1Wgj1+RjeOwhmM9ev56hUWni9MuFwz9tGU1XQNLxeC16vhVKpSaul0mq04Ubh\nT6cTmJnx4/WaycYziHY3znCXoE+mkhfx3hHDObWXcluiVOp5Gx04EObcuQSqqtHtajeSzX5GRtw7\n98/tlpma8rG9XaHV6pJOV5mZ8WOzGXfWCru91+ZNpapIkh6HQyKTqaOqvYVKknoVxUqlRbmsfJqM\n/C5B0zT+7u/+ju985zu3O5Tbhscff5xvfetbLC0tMTIycrvD+chgs7139sFoFDEab24hGAziTkm6\nXFZ44YVVkskqnU6XVqvD9LQPt1smHq/Q12dn//4QpZKC3S4Rjdrp63O8h4EVDvcsxHO5njvu4mKe\nqFdH6rXXsRja9PU5WF8vksttM9i2kkhImEziDounUGhQqGikkxU2qzWW55Pc99Ak0UOjpHNtYuOb\nJNfTGE0SI3cdwBAKU+wU2fvgXRTXVjEZdVgi/YRnZzE5HJi9XspbWzvxCTodrhuCX5+iB1GvJ7B7\nN0q5jFIq9czxLBaMFY1WUmNgeJTNkgmj1GF4/xiWiEhWtKEh4PHb8YyN3fR6druJw4ej5DNVNjNX\naetdPPmjZTLJIjpRh2p2YwomeeCBofedrel2OqSvXCGXKJBMVimXFTY2yhhs5/nSF+/C57cyMDDN\nsWP9VCotHA4T0aiNxcU8ly6lgN6A8p49QQYG3v+g9dbm+m7cDnlypVIhfeUKaqtXHZQcDlq1GtVU\nCtnjQVNVgnv3oskOXn5pbeewIYoC4+Me7rwzhuzxINntKOW3Nn+J0Ykg5kiQbFrD7Taxd28Ih8PE\n2bMJ4tttBo7ey9orrzO3kmB8Vz+24XFyxgiBsAezuZdo9JIKiaWlHNlsHYfDxPx8lqWlPIcORQmH\nbTidMvv3ywwM1Emna0xP+3E4JFZXC/h8ZmTZgNcrI8t6ms02kiQyMuLeqWY6nSb27w/faLmJH5mj\n+W8aPk1Gfg5efPFFTCYTd9555+0O5bZBlmX+6I/+iL/5m7/hr//6r293OB8anY7K+nqJra0ysmzA\n5zMTCJhJpXpsGUGAwUHnLzzxXbmS3imn9iCQzTY4fLgnvOTxyL/yIiGKOrLZOqlUTwtBT5t6uUpN\n7WCWDSSTtZ6zbq2EzzfAwkIWp1PG4ZBQ1S4mq4x7fAKlVKLbVUkqdsYGoly4dB5TbISZXTPorA4s\ngyE6nS733DdCodDE4ozSaXdYF80IiQ7THj2Rw4cRzpyhnsmgE0Xco6M76qOf4m3suD6nUgiCQOzu\nu8nl6hjniyRyHWJab4OfnvYxP59ldamIBnjreuRoh9i7PloWixGLxY37cw/w5H89QyZdRWeUsAWD\npKsGTp2Ks2dP8H2ZQB1FoZwtoHW7yLKBZrOD2WygnC0w5DWye3/4hgX924nGxYtJ4vEKLleP0ru6\nWiC1lefOfQ6srQyOWAxHfz+ivrcluN0ydrt0k4aIwaC7ZcyZX4ROo0Gn2dx5LAgC9kgEyelk4OhR\n9CYTks3G/Hz2poFXVdVYWircYPw4iB05QuLcOVqVCnpZZuDYNKbIAGO1NjZbT5Rsba3A9XNLlJMp\nik4HndB+XJE2asDBU69tU24scfCQwhNP9Fpoen0vcVDVLoVCk2SyupMIXb2axuORKRabJBIVOp0u\nXq+FaNSOXq/DZOopAL9FTQYIBq1YrXpiMQfj4x727w/SbKrIski12mJ01P2xVqY+TnyajPwcfPvb\n3+Yb3/jG74y2yM/DH//xH3PgwAH+8i//Eqv1t5Pzfv58kosXUzsnPavVyMGDESKRt6zKrQwMOH+u\nAFer1SEeL9/0nCjqUBQVt1v+UNoVxeLbC1ALCavLTiGRpdFso92I0+j2sdsTQNM0rFYDAwNOhodd\nJBJVjBYzRouZdlslla5z9fQS4bCD/j4H1UoLnd1OOl0nFLKwsJDj+vUs4bAds1kmk6hRKvcs2j0e\nN0MPPEBqPUWp2qFkkDGWO3i9H54580mFxefD4vPtPDZ7NHQWB/JSgUajRbOp8vLLGwgCxEZCZLN1\nKpUW584lCAQsO6yTWq1FPN5jVHg8JioGN46xKTR0NDoCzVqHVKr2HvGxt2CQZQw2J/VGh/ExN8lk\njXJFYfLwCINjQfr7HTuW9W8hkahisRio1Vq88soGlWyBVjFPIT3GXYdCXHhujthkk4kDo1itRmw2\niSNH+jh7dptyuafMOjXlIxK59cOr74bRZsNgsaAqN4uruQYGbrofpVLz3f+UVkvdqTA4YjEsgQCt\narV3DW/4Ur2ToVLaTpJZuI7W1Wi0dWxni2wXBT7zkJ2RqRDxeBlN4z3tqqWl/I79gNNpQqcTWFzM\nY7UaSSQqxOMV6vU2/f1Odu3yMTMTwOEwcc89/Vy5kiafb+B0mpie9qPTCZw6FadUajI25qHT0dDp\netUci8XAxkaJSOSj8wT6TcEn66/5iLC6usoLL7zwOzm4+m4MDAxw/Phx/vZv/5Y/+ZM/ud3hfGAU\nCg0WF/M3lZyr1RaJRIV77un/lV5DFHXIsp5C4ebnjUbdTmunVmuRSFRp/P/svXmQXPdZ9/s5ve/7\nPvtopBlZ+2o5kiw7JPFCChPIUgnJLeAm5IbVcAPckPu+RfGmKhAIwQRC2F64EByomBCcgLFlvFu2\nbMnaR6PZ96339fTp093n/nFmWmrNonU0WuZT5bK6+/Tp35zfWZ7fs3wfUcbrtRAK2ZZVF/X7LXPl\nvpDMQ3jnDoRjxzCbQKvXEuxopWwPMTWVpbPTx86dIWw2A8ePT9VEp+bluP1WmVOTMeLTKXYc3ERF\no+PMS2+y7UP7+cEPLjA1lcPns3D69Cy7d0eIROwkk0U1r8RroX8gyVtvzSJJ6sPPZjNw6FDLqjx4\n7iQEQWB8PEMqVeT06RkuXIiRSBTnvGwWtm8PEY2qBomahKmjUCjx2msjjI+rD7VQyIrBoCddUKhW\nLxof8+GTM2dmMJv1hMO2OQXVIjqdhqZd2+k9Pcr08BTBkI2uHW2E778fq9u8wBABtfN0Pq8aRpIk\nU8yk8QVc9PWlyCZy+C0lTp+cZCol8P4PdGC1GmhsdBAIWMhkSpjNugUVKrcKvdlMeOdOJt5+m1Iu\nh6DRYA2F8FwW/vL5Ll5T85jNurqwrM5gQLdIfyYAWRTRZKM43DbS8SxaKuiMelx2BatVz9NPn0WW\nq5w/H0OrFejq8mE26ykU1HJ/Saqg1QqkUkVee22ESMSO223kn//53Fxyu4Hu7ii5nERLixOHw0Qo\nZCMYtCJJFYxGbW3x+9hjHTUjUBBUsbqBgQR6vQ6DQcvkZJb772+4q0I2K22MfAPYBbxHfQffnwf+\nX+BN4DMrPIZr5utf/zq/8Au/gMOxdjMG+J//83/y/ve/ny984Qt3nHdEkiqLrjDn1R2vhCyKpIaH\nCeiz9I6NobO7MLlc6HSa2s0ok5F47bVhpqZUF63BoGX79hDbt4eW3G9Tk5OODs9cg7oyWW+AB/6P\nn8ZhLBGcKDCV1jERUxNJ9XoNfX0JJiYyNSNHEFSPTShoI2DM0jekhpCy+TIX+mME/Q5mo3nicRFR\nlGsNuHp6YjQ3O+ey/3UUi2XOno3WDBFQjbXu7uicPsq97Rlcjni8wOBgCoNBy8BAAkEQkOUKyaSI\nLFe47z7/3ENIX0tmnJjI1gwRgFxOxuMxsXt3hP5+tWlaa6uL3bsjvPHGKLGJOIVEDH+Dj3DExcSM\nhNFqYuNGLzs/8QSTfWOUShVkvY28xsaW9sUftB0dHpLJouolqIJOq8EVcDHYPYpF7ybs1FMtVxgb\nTTI5ma2VjxoMOny+1V+zetrbMbtciMkkGq0WazC4oON2Y6ODDRu8DA4mkeXqXEl+sC6JfDkqsoy+\nmGT3zgBvv54hEx2nvXMrwbYQL786iixXMRg02O1GJiayTE3l0Gjg2LFJxsczjI9n2b+/iYGBZE3V\neHAwxfR0HptNj8VioFJROHcuOieUqHpUBUFY0BNIr9fi9arjfvnlIf7lX86Sy8kIgjqXO3eGiUbz\nRCJ3zzNqJc+ynYAVeBD4FrAbODb32b8DrwK/u4K/f11Eo1Gefvppuru7V3sotw2bNm3i4Ycf5pvf\n/CZf+tKXVns414TLZcLhMBKL1bclb2q68kWsKAqTx44R6+nBbLPxvu1+xqZF7KEAGzY30tKilhAO\nDSWZnLxYeVAqVZiYSKPVakgkClgsaojlUreuyaTjwIFm1q/31qp1VG+JgK2phGM8Qygt4XQaGBlJ\nMzSklpaOj2fxeMysX+9Bp9Nw7twsw6dma2qVglZLKlmgtTVErqi+Z7GoK0ONRqBYLKPRCGzYoMae\nU6kihcLC6pl0WqJSqd6RuiC3AklSQ3cXLsQIhWwUCmWMRi1Op3FOxVY91largc2bA7WHTSYj1a3c\nc7kSBoOGhx5qZefOMIKgdpZ9770p4lMJYj3nQWfgv09Huf/+BirFIuXGVk6cmOHBB1vYdGAbqZSI\nTqclELAueKjN4/Va2Ls3wsREhoGBJIZmI4VsHlks0dTkJJ+JYXS50BlNtTLy2w2zx7No1+l5jEYd\n+/c30dHhoVgsY7cb8PutV21QG+12NFot8qmX2NsaotLlxxksUQg6eDZbqiWbut1GAgEriUSB0dE0\niUQRi8VAMGglGi0gCLB+vQen08jUlJqALMtVqtUqhYKq+xKLqeXAodDyOTiplMi7706Sy6nXqKJA\nX1+CpibnkmG8O5WVNEbuB16Y+/eLwANcNEbiwK3PhLoK/viP/5iPfexjhEJLr2rvRb7yla+wb98+\nPvOZz9DYuHKdVm82JpOOPXsiHD06TiqlSrI3NTnqFCaXohCLkRoeBkWhlM1i0OTZ6LXhjkg0tF/s\nUnppPwlQy37Hx7N0d8dq+ST9/Ql+7MfaCQQuGiSXVutcitVqoLNTTZAbGUkyOJiqhXxU5dgCbreJ\nrVuDHD9ewRIIUhKLFBNJlIpMe1cEweKk0efgzJkZNBpVPVYQBJxO9XtNTY5a2aTaLVRGq1V7bJTL\nVQIB65ohsgTVqsK776pNC0ulCvm8jCSpDwuzWU9zsxO328S2bUHCYXtdEqrHY0arFWolmwD5vEww\naGPrVvWeMz6eUdvJJxJqBYnFQ74YJ5Eu49SVkLJZzC4XAwNJOjt9y5boXorHY+Hxx9fz1lvjzE6l\nSU7D/Qc78Lp0TOWMuJqa0GoFvN47t2xUp9Ned3hREATsDQ3YQiFSw8MIGg2KrgN/63oe3N9IMlNG\np9PgdBpxuUyYzXqyWakWGnI6TXi9ZqrVKhqNhnRaorPTS3OzA1Esk8/Lcx4TL5OTWXp64jz0UOuy\nInL5vIyiqAuJeV0jh8NIpVJZtCrwTmYljREXMDj37zSwaQV/66YwMTHBX/3VX3Hy5MnVHsptR0dH\nB1/4whf44he/yD//8z+v9nCuiaYmJx6PuRZv9/ks6HQacrkSGs1Fz8HlVMtlquWLyolKtYqUyVC8\nLHnE77fQ35+ovdbrtfT1JWhouGhvZ7MlBgYSdcbIUqTTRRIJkURCJJ+X6e2NYzLp5jQTVANBreCx\nsH17kFOnQGtYj6ZSYv0mP3t9Dk6fnsVo1HLgQDMzM3lCISsul4m9exvqqiz0ei07d4Y5e3aWVKqI\nokAgoFaFAJTLVYaHU4yMqB2J29rci4pK3e1IUpnp6VzNaIvHRTIZid27I/T1xdm/v5nBwSRer4W2\nNhctLc65hMYcZrMeh8OIoig4nUa2bQvS358gk1GbpG3a5Mfvt9ZaFIiiTCRiIz+lo2r1kspWSKTK\n6C1msoUqzjnXitW6tA7JUoTDdh57rINEQjWg47MZjh0bx2cLYTLr2LDBu6ga60oiSWUkSX24rnZY\nsJTP425vx9PRgQIolQrKzAD79mxmcFwNc5lMOjo7vRiNOgwGHTabek0mEiLptMTu3Q2MjaVJJESq\n1SqPPLKOqakck5NZGhsd7NkTmfOa6RkYSCxrjFitamuKWMxaa9EwNZXFZjOyiETQHc1KGiNpYP4o\nO4HUZZ8vLGK/jCeffLImONbV1cW+fftobW0FYHh4GOCmvv6jP/ojPvvZz9LU1LQi+7/TX//Mz/wM\njz/+OD/84Q/ZsmXLivzeSqGWUqpGRzYrcfKkWuqo0Qh0dHjYtMm/IDvd7HZjcrkoRKMX3xQEnPMt\nN+dobXUxNpapZdrrdBpcLtOChL+ryVPp6Ylx4sQU58/HSKXURmc7dgQ5fHiw9ltWq572Oc/M5s3q\n6lt196uueq1WQ2Ojk0xGwmJRm6GVSlXcbtOiQlparYbZ2RxjY9laLkpHhxePB86dm+XYscnaSn5o\nKMWDD7bQ1uZesJ+7FVGUOXJkjOHhFJWKQj5fwm43YrcbyOVKdHb6MJl0PPxwGz6fWS2bnSmQy6l5\nPBMTGfbsiXDmzCyzs3m0Wg2trW5CIetcqbm11qJgeDiNLFcQRZlgeyMnT01TFGUOPtRGMVugp3uW\nPQ/78Zh1dHRc2bu3GGaznoYG9TyIROyEG90UCvKyiqwrxfnzUc6fjyFJZXw+Czt2hK86x2MlcEQi\nxHt66sQATS4Xe/Y2sn6Tei6YTDqi0TyvvjpcM+I3bvTT0eFBliu0takVM7FYgaGhFA6HkZYWJ7GY\nSDwu8r3vnaNcVggELBw61LrseFSNkTAOh5Hu7ijHjk3i81mYnc3z8stDfOAD7Xg8d0e7jpU863YA\nnwf+L+DPgb/jYpgGoBX4XyydwKpc3rVxJTlx4gSPPvooPT09uN33zo32Wnn99df5+Mc/zsmTJwkG\ngzd13/Mt7lcSRVF47bURLlyIX/K7sG9fI1u2LPx7spOTjL/zDlIqhUavx93eTnjnzgX9WkRRZno6\nhyiWcTqNHDkyVqcfALB/fxObNi3dqTYWy/P88wMkk2p1RqGgCiB97GObiMdVXZKdO0N0dfmuGGu+\nWhRF4aWXhhgYqPf2hMM2Dh1q5rnnBuq0JkDNt3n00Y6btoq9FfN+I/T1xXnlleGLHWNFmeFh1SiL\nRi9q1Tz0UCuKAq+8Mlz3fUVRaGtzMzKSqu1Dp1PzROaNyu7uKG++OVr7XFEUyuUqDQEDyckodruO\nZKbMVFzhvm2NvO99jXd88qIgCPzv//1erToMVJ2NRx5Zt2plq5VymekTJ0j09VEplTA6HIR378Z1\nSaPU2dkc//VfAxSLau+hWKxAKlXkiSe62LLFj9+vhuVisTzPPdePKJYJh21MTmb4+78/jdmsq3mf\n9u5t4HOf27ms2q6iKPT1xfn+989TqaiNPOfLxPfujbB9e3jJ795uzN0zFr1xrOSMnwCKwGtz/z4G\n/Cnwq8CHgd8G1gHfAz62guO4IpVKhc997nP8/u///pohcgUOHjzIz/3cz/HZz36WZ599dtXdqtdK\nJiPVCSOBGu8dGEjOVT/UK17aIxHWP/YYxWQSrcGwZAKd2ayv8xbs3dvA0aMTZDISer2GlhYnbW3L\ntxVIpyXyeTVjfv6wSlKF0VFV2nvTJj/79jViMl27e34pSqXKgpwXUMNK2WyJcnmhL7hQUPvyaLV3\n1txfL4mEeFm5qNoVV6/XotWq3Wzb2900Nzvp6Ykt+H4mo7Ya0Ggu5oqo4a9kzRiZ96rNIwgC+XwJ\n9A5EwUw2XsLqcrKxwcq2bcE73hCZ51JDBCAWKxCLFVatrFyr09GwZw+ejg4qkoTR6VxQtVMolGuN\n74xGHQ0NDhoaHFgs+pohonLx+pDlCpKkeidlWS0BdrtN6PVqufZyxsh8tY3Xa1nQxmKx5PM7lZU2\nP5+87PWvzv3/R3P/3RY89dRT2O12fvZnf3a1h3JH8Lu/+7scPHiQr3/963zxi19c7eFcM4vZT6oB\nsPjDVWc0YrvGhOaWFhder4VkUsRg0OLzWRaV9r4Ug0E794BXty8U0iiKGmISRZmODs9NNUTmf9Pt\nNtWJsAG1NvRer4V8Pl33WXOz84p/y92E221eoF/R2Ohg82Y/27cH5+ZXDW/4fBYMBm1dpYPVqsds\n1i04xpeGQxbT8PB4zJhMWnQmMzqTmYqiPryvNmH1TkQQuKVhoqUwL7MoNZt1GI3aunJ4QVC1XC7F\n41F74wwMJJGkCgaDhtZWF16vqm5rsxmw2QwL2lAshtNpwm431oV6tVphVRRxV4rVLyBfZY4fP87v\n//7v89Zbb91xq/zVwmAw8L3vfY+9e/eya9cuHn744dUe0lXjdJpoaXFy9uzFPBCtVmDDBu9NvwnO\n32yulkDASlOTg+HhdE3jw2zWEQhY8XjMy4Z4rhdBENi8OVBLvgP14bltWwijUc+uXeE570kBjUag\nocFBV9fi3WTvVhob7bS0OBkdzVCtKuh0Gjo7vTQ1LTTKwmE7O3aEOHcuSrFYxmxWtS4SiQJTUxfL\nv+eTgedZt87N8HCqVlYrCHDffWoegiAIc00cDWzZEqzpT9wNXP5QDwZtt301j99vpavLx7lz0bky\nbjVJ/vJEVI1GYM+eBkwmHRMTGTo6vBSLarfkea2gOC4VSwAAIABJREFUdevcV/X3Op0m9u6NcPz4\nFNmsmh+2YYPnqiQK7hRu56fviueMpNNpdu3axVe/+lU+9rFVjRTdkbz44ot85jOf4d13370p5b63\nKnegUCjR3R1jaCiJTqdhwwYvnZ3eayplVRSFVKpItargdptvmiGTz5cYHk4xM5PH5TLR1OTA4TCu\neAw9mVRbxFcqVYJBW10SYalUJpEQ0WgEvN4re3iulds9ZwTUPJGpqRy5XAm3W1XOXE79MpkUEcUy\nNpseh8NEOl3k7NlZJiaymEw6Nm701QyNeWZn8wwNJcnnZRobHbS0ODEadXPt42WMRt1NP/aZTJFS\nqYrLZbzlpdyCINDfH6e7O4oololE7Gza5K9VjdzOlMtVpqdzJBIiNpuBcNi2bKhFFGWq1Sqzs3km\nJnKUSpW6Ob5a8vkS6bSqzHonesiWyxm5Z42RUqnE448/zqZNm3jqqadW7Hfudr761a/y7LPP8uqr\nr2Iw3Fjd+61+KElSeS7mf2034XlZ7dHR9Jykt409eyLX1aNmjTvDGLlZqOecBp1udcNcpVKZ06dn\n6OtLUC6roZ89eyIEArdOYXl+3iuVak0x9W5ldDTFe+9NkcmoxmxX10Jj9F5gzRhZuGN+9md/llQq\nxfe//3202jVxp+ulWq3ykY98hIaGBr71rW/d0L7ulIfSsWOTvPfeVN1769d7eOihVgRBoFAoMTyc\nZno6h8tloq3NdUes9laLO2Xeb5RKpcrYWJqRkTQ6nYaWFteyGhMryYULMV5/fbSuZ1MoZOPRR9fV\nKjVWmjth3uev5ZkZtb9Tc7Pzmhcd6XSR557rr8v3MBi0fOAD7as2/6vFalXT3JYoisJv/MZvcOHC\nBV566aU1Q+QG0Wg0/MM//AP79u3jL//yL/n85z+/2kNaUWS5wvDw5ZI5MD2dI5uVMJv1vPXWeF2p\n7NBQkh/7sXZcruVvYuVyldHRNMPDFwXG7rWb1d1MT0+Mo0cnahVK/f2JVdNsGRlJ1xkioFYNpVJF\nBEFgcDBJNlsiErHT1uZaNgRxt1IuV3jrrXGmpnLYbHrOnYsiCPDggy10dnqv2mhLJMQFGkOlUoWZ\nmdza9X0J95QxoigKv/7rv86bb77J4cOHsVjunkSw1cTpdPLss89y4MABurq6OHTo0GoPacXQaAT0\n+oUudo1GmBMPyzMyUl99Eo+LjI+nr2iMXC4wNjiY5NCh1loH1zXuXERR5vz5aF2ptCRVOH8+SkuL\n65ZXkBgMC89hrVZAFMu8/fZ4LZl5aCjJ7GyOgwdb7qkKKlBzeMbG0jidJl58cahWDRWPF3jkkQ7u\nv//q8uS0Wk1Nzv1SFruP3MvcM0dDURR+7dd+jSNHjnD48OGasusaN4f169fzne98h0984hMMDQ2t\n9nBWDK1WQ2enry7mrzbG8mK1GpDl6qLaHPn88noA+XyJnp5YXc+S+YfVpa7s6eksr78+wn/8Ry+n\nTk2rWhRr3PZIUqWuamSefF6mUrmyrne1qjA4mOTw4QGef76fvr74oufZ1bJunQejUbvgvVisUCdy\npygwPJwmGs1f92/dqZRKVcxmPcPDqbqy7EKhzMBAkmSyXp9nfDzDK68M8Z//2ce5c7M1LRK/30Io\nVN8Gwm43rJqWyu3KPeEZqVQq/NIv/RInTpzg8OHDOJ33Xm+NW8EHP/hBfud3focnnniCN998E7v9\n7qmBv5T16z1otQK9vXEqFbVV+Lw097x+wKWdT7Va4Yo9aUqlypICY5WKgk4nMDub46WXhmv7npjI\nEosVOHSoddUTItdYHrvdsKhmS2OjY9mqnHn6+uIcOTJWEwkbH88gimW2br0+FeSmJicPPdRKT0+M\nQkGmtdVFZ6eX48enFmwry5UF4mT3Ag6HEbvdSDZ7Ua1ZoxFwOIzIcqVOS2Z8PMPLLw8hiqoBMjGR\nIZUq8r73NWE26zlwoJkLF+JMTGTxeNQE1rupRPtmcNcbI8VikU996lNkMhkOHz6Mw7Fmja4kv/Ir\nv8K5c+f4yZ/8SX70ox9hNt99iZtarYb1672sX+9d8JnHY2bXrjAnTqheC1UPwHtFPQCHw7jow6qp\nyVkzNEZG0gvau4+NZYjF8jdNHn6NlUGr1bBzZwhJKhOPiwiC2hfmvvv8V/xuuVylpydWZxBUKgq9\nvTHWr/dcdz5HS4uLlpZ6D3E4bOfChXhdSMHhMOJ0Gi//+l2Px2Oms9NDJiMxMJBAq9UQDFpxu021\nzr3zDAwkaoYIqB6loaFUzehwuczcf38jiqLccxU0V8tdbYzE43F+6qd+ikgkwn/8x39gNN57F9St\nRhAEvvWtb/HpT3+aj370o/zbv/3bDZf83ml0dvoIhWxksyVMJi1er+WKN6BLH1bzmh6RiL1OYGze\n7Xsp5fLiYaE1bj8CARuPPtpBLFZAqxXmukdf2StSqVTrVuHzyLJaEnsz7f3mZgebNvnp708gy1Vs\nNgN79kRwOO7NsvV167w4nSYsFj0TE2kMBh1ut5k9exrq9EEuNUTmKZerCzxKa4bI0tzOR+aGSnuP\nHz/OT//0T/OJT3yCr371q2g0a27sW4ksy3z84x8nl8vxzDPPXFVo7E4o9Vtp5lfOWq0qMHZp+GVw\nMMnLLw/V5ZV4PCYee2z9AjnxXK5EqVTG4TDd9iGce3XeC4USxWIZh+PKgmNvvTXGmTOzde9dWk5+\nM1EUhXi8gCRVFu0+fbO4k+a9WlWIxQqUy4t3v7680SFAMGjl0Uc7rluwMJ8vIUllnE7TXZM8fE/p\njJRKJb72ta/x1FNP8Rd/8Rd89KMfXYGhrXE1lMtlnnzySV555RWeeeYZurq6lt3+Tro5rQblcoWT\nJ6fp7U1QKlVwOIzs2ROhqcl5yTZVzp2brbn1PR4zu3eHb6mY1bVyr817tapw/nyU7u4oklTB6TSy\na1ek1sl1MbJZiXfemWByMouiqA+6vXsb7mj9mrtp3iWpzPHjkwwNpeYMFjN790auK3wqyxXOnYty\n4YJ6Dft8FnbtCuP3L593didwTxgjoijyj//4j/zhH/4hGzdu5M/+7M9ovqTt8xqrg6Io/PVf/zVf\n/vKX+Z3f+R1++Zd/Gb1+8Rj33XRzWkmSSbG2ar1ctXIx78mNrtBWmntt3sfG0rz44mCdC38pD9el\nVKuqx0JR1HyG293jdSXuxnlPJERkuYLbbbpu8bj+/gSvvjpcdw3fakG6lWI5Y2Slz+ZvAK8Bf3LZ\n+xHgKJABTgN/ca07lmWZEydO8Fd/9Vd8/OMfJxwO8+yzz/K3f/u3/Pu///uaIXKbIAgCv/ALv8CR\nI0d47rnn2LRpE08//TSl0lpJ6vXidpsJhWyLymePjKTqbmKg3iATCXHBtmusDhMTmQW5BMlk8Ypz\npNEI+P1WAgHrHW+I3K14PGaCQdsNGQ3Dwwuv4Xi8QDx+d1/DK3lG7wSswIOAAdh9yWf/D/B/A2Eg\nDhiBHUvtSFEUBgcH+e53v8uv//qvs3//ftxuN5/+9Kd56623ePTRR+nv7+dHP/oRDz744FqS0G3I\n+vXreeGFF/jzP/9z/uZv/obW1la+/OUvc+bMmbtudbSaLCXIdju0ZV9DZTFDYm2O1phnsWtYq9Wg\n1d7d58dKGiP3Ay/M/ftF4IFLPtsMvAHkgSxgBxZqbAN/8Ad/gN/v59ChQzzzzDOEQiG+8pWvMDk5\nyblz5/i7v/s7fv7nfx6f795qa36n8sEPfpCXXnqJF154AUmS+PSnP40sLy8ItsbV09rqXiBmFYnY\n1zQNbiOampxYLPWhynDYXtcpeY17l/b2hddwQ8Pdfw2vpKn1JeA94Hngx4D3Af9r7rNXgUPATwD/\nH6rR8onLvq/Me0RMJhORSGQFh7rG7cDdGENeDUZGUpw/HyOfL9HU5GTjRh92++1b1n4vzvv4eIbu\n7ijZrEQkYmfjRh8u152bjHo93IvzfrUMDSVrgnQtLS66urzYbLfvNXy1rFajvDQwr/TkpN7zMR8w\nfRY1pyQBfBA4PL/Bxo0b18It9xiHDh1am/N7kLV5vzdZm/d7kvRSH6ykMfIW8Hnge6iekb+75LPT\nwAHgBKrBcgY1r6TG+fPnV91qzmYlBs+OkJqcRqPV4W9roH1jw4plNCvVKkMvvURycPDim4JAw549\nhLZvX5HfvJ1YWyndm9yp816tVMjPzFBMpdBbLNjCYXSXCStmp6YYeOEFKtIl7ePtdjoeewzzMv2x\nYrECJ05MEY0WMJl0dHX56Ory3VV5JSsx72IySSEaBcASCCx7jFeK5NAQwy+/TLV8UQjN4vOx7tFH\nMSzTnHV8PMOpU9Ok0xJWq56tW4Or0tF5JREEYUnBqZU0Rk4ARVTPxwngGPCnwK8CXwP+E2gFhoFG\n4LkVHMs1oygK/cfPc+IHh8nEUiAIeBr86D75YTq2rVuR3yym0+RmZi4fCMmhIYJbtyKsCbetscZt\ngaIoTJ04QfTsWSqlEoJGg7OlheYDB9BfIomam56uM0QAStksYiKx5INSksocOTLG9HRO3UeuxNGj\n41it+gXy7WtcJDMxwejrryNlMgAYHQ6aDx7E0dBwy8dxqSECqpFUTCSWNEZSKZE33hglk1HPlVyu\nxJtvjmGzGe4KfZGrYaWLlp+87PWvzv1/Ati2wr99Q6STeQbefEc1RAAUhcT4LP1vvUfb5rYVUcQT\ntNpFDQ6NXq+2hl0CMZFATCbR6HRYA4G6m+FylPJ5BI3mqrdf49YjyzJ/+Zd/ydtvv83Bgwf57Gc/\ni1Z7ZQnxNW4uUjZLIRYDRcHs81EuFol1d1OZK1FXqlVSw8O4WlrwbthQ+55mMU0dQUBz2RyWJYmq\nLGOw2YjHRWKxQt3nslxldDS9ZowsQbVSYebUqZohAiBlMsyeOYMtFFpwvFcS7SJzLmg0CJeMYX6+\n9VYrgiAQjRZqhsg8hYLMzEz+phgjdeev14vpNmwWe2crqKwg1WKBQiq74H0xFkMpy6C9+clEJocD\nd3s7M6dPM68rrNHp8HV2LhlbTQwMMHH0KKVcDkGjwRoK0XLw4LInWymfZ+bUKdJjYwiCgKejA/+m\nTQtczGusLsVikSeeeIJqtcqnPvUp/vZv/5bDhw/zL//yL2sGyS0kPzvLyOuvIyYSAJhcLnwbN1Iu\nFus3VJTaNvPYw2GMTidS+mKo3BYMYvGrDfKqlQqxnh6i58+jlMvYwmFMLZ1otQKXLa6XW4/c85RF\nsc4QmaeYSlEuFjFYb513wdnSQry3l7J4URfEHolg8ftr8x2/cIFKqYQtFCK4bduS4bebMee5mRlG\n33hDPTcVBZPbTfP+/dhvs6KQNb//Ejh8LsLN9R01BY1A4/pGtCvY+C20fTuN+/ZhDQSwNzTQfPAg\nnnWLh4VkUWTqxAlKOdWdq1Sr5CYniff2LvsbUydOMHv2LFI6TTGVYvL4ceIXLtz0v2WNG+O3fuu3\nsFgsPPfcc/zcz/0cL730ErFYjK985SurPbR7BkVRmD59GjEeVxcIikIxmaQQiy28DwgCJnd9jN/i\n9dJy6BCe9esxe70ENm+m+cCBmrs+NTTE+NtvU0wkkDIZ4hcukOk+ybq2eg+I0ahd84osg85sxmBf\nKL1udDrRmW5tkz97KETbww/jamvD7PUS2rGDpgceQKvTkRoeZuLoUQqxmDrfvb2MHz1KwGfC46kf\np91uIBS6sTYOiqIwe/bsxfMXKCaTTJ08SbWysPniarLmGVkCncHAtkfeh1zIMT44i0YjsG5TMxsO\n7F7RDHCd0UhwyxYCmzdf8XekTAY5n1/wfm56esnvFDMZMmNj9W8qComBAfz33YdGt3ZK3A689tpr\n/Nu//RunT59GNzcnBoOBp59+mq1bt/LJT36SDZeEA9ZYGcqiqN7IL6OUy+Hu6CDR10dVlhE0GhxN\nTTgaGxdsaw+FsIdCi7aPTw4Oolz2UBBjs3Ru2gIaP1NTWcxmHRs3+ut6EK1Rj0arJbh1K1ImQymr\nerQNdjvBLVtuaYhmHkdjI47GxgVznhoaWpBPkp+ZQZByHDzYwtmzs8TjIk6nkc2bAzesLVIuFtXw\nzGVI6TRyPo/R4VjkW6vD2pNnGQIbOnjo/3SRmZpGo9Vgj0RuWXb21Rg8eosFndG4IEHO7PEsvd+r\n+G1FUZidzZNIiBiNOoJB64p17lxjIYqi8Ju/+Zt87Wtfw33ZSjsSifDbv/3bfOlLX+Jf//VfV2mE\n9w5aoxGj3V4XZgGgWiWyZw/u1lbEZBKD1YotHF42/2rRa3qJ69xiMXDggB9JKqPTaW5519ZSqcz0\ndI5cTsbhMBIMWtHrb+/QoLOpiXUf+hD5WbW7sS0YXPZeeCuYn3NZrhCLFYjFCqQTqrExP6eCICAA\nwaCNQMBKqVRBr9felMoprcGA0eFYcP7qLRZ0t1mu4JoxcgUsPh+W20zdVVEUpEwGrcFAcPt2Jt5+\nu5ZIZ3K76xLoLsfocOBqbWX2zJnae4JGg3fDhppX5Pz5GMePTyKKZQRBvUgefLAFl+vWujvvVb7/\n/e8jyzKf+MTlOoAqv/iLv8jXv/51zp07x6ZNm27x6O4tNFotgc2bEZPJmhdSZzYT2LIFvdGIfm4F\nfL141q0jMzZWXwYaDKLR66nIMkbj4k0lVxJJKvP22+P09yeoVBT0eg2dnV727m287XviWLxeLF7v\nag+jDlmucPToOAMDSRosXvqHTuJ26Glvd6PVqnl+5rkxC4KwoKFlKZ+nWqlgug4vRu38TSTqzt/g\nli2LJtquJrdzStQ1de29VyjE40y88w7ZyUkMNhv+jRux+P3kpqfRGgzYGxqu6L0pFQpEz50jNTys\nGiLr1+PbuBGtXk8mU+Q//7N/QWb37t0Rdu4Mr+SfdsfqTdxMFEVh165d/N7v/R4f/vCHl9zuq1/9\nKhcuXODv//7vb93gVog7Yd7z0Si5qSkURcEWCmELBm/KfpVqlXhvL9GeHqqlEnqrFb3VSnZyEqPd\nTnDbNlxX0fRTURSkdBqNTofBdmN5BkNDSV56qb7zs16v4UMfWkdDw81z698J834zGBtLc/jwIOVy\nFafTgLsaJ95znpZGC40b1xHYsqVWcFCtVGoLTY1Ox+yZMyQGBlCqVeyRCOEdO64rtLJS5++1sloK\nrGvcZMqlEr0//CFjb71FtVxGb7GQnZyk8yd+gvCOJfsMLsBgsdCwZw+BLVsQUynkTIb06Ci2UIh8\nvoooLuwVMzu7MDdljZvPa6+9RqFQ4PHHH192u8997nN0dHSQSCTwrLIr+l7A6vdj9fuvvOESlCWJ\n3PQ0pVwOk9OJNRhEq9cjaDT4urpwd3RQiEaZePddEn19oCjIuRylXA6j3Y7ZvbT4lZhMMnX8OLmZ\nGTQ6HZ6ODgJbtqC7zkT7TEZa0DVWlqtks2udtq+HTEaiXFZFx9PpEkWjC8e2B4ls8eCxC2TGxykm\nk+jMZqZPnqQQi6G3WjHabCQHBmqhvPiFCyjVKq0PPXTNeYs3ev7eCtaMkTuI5OAg40eP1txtFUki\nfuEC8d7eJStuliPR18fUe++pOSeCgC0cxr9rHyaTDlmuv/H4/Xd3k6bbhW984xs8+eSTaK4gcOfz\n+fjwhz/MP/zDP/Dkk5fL+axxO1GWJMbefJPk0BBKpYJGr8fX1UXD3r215EqtToeYSJC/LPm8lM1S\niEaXNEaqlQoT77xDemSk9t70iRPoLRb8Gzde13gdDiMajUC1etEg0ek02GxreWPXg91uQKsVagae\nJFVQbDryQ70kJwapyjLVOQ+R1e9HzufRmUyMvP46Wr0eayBQ21d2cpJiKrWscXqncnsHAO8ySvk8\nyeFhkoODFC9PiLsKpFSqVp41T6VUonIdXW+LqRSzZ89eTH5VFHKTk0jTY+zYEap1jVRzRqy0t999\nJ//tRn9/P2+++Saf+cxnrmr7z3/+83z729++J1zdt4JCPE5iYID06Cjly5LCb4TsxERd1UxVlon1\n9JC/TG153jARNJr6xNZlDFMxmawlbM6jVKskBgaue7yRiH0un0Edg1YrsGGDh1Do3lACvVmUCgVS\nIyNYK1lam6x1x3NdSIM4NkB17t4tZzJMvvMOSrlcE0cTNBoKl1VyCYJw1ypxr3lGbhGFWIyRN95Q\n+yYoCkank+YDB65JqlhntdZE0UxuN/rG9chmD5VQJ6IoYzZfOSGpLElUSiWkfB65UFjweSEWo/MD\n23G5TMRiIkajlnDYvrYqugX86Z/+KZ/97GexXqVA04EDBwA4evQo+/btW8mh3fXE+/qYfPddVTxQ\nq8UeidC8f/9NKX0splIo1Wrde1VZrukDzWMNBtFF2knnquj0WmyaArpqaVn3ukajWdRlr72BEn2j\nUcf+/U20t7vJZiWcTiPhsB2d7vauprkeKpUqMzN5crkSNpuBQMB6U5J0C4kEo6+/rhqKikJjUyuN\nezopCSYcDiOW4iyjlTKVUgmlWlUNEEVBTCbRWyyUslm8GzYwffJk3X6dLS23pXrqzWDNGLlFzJ49\nS+GSFYyUTjN98qQaO77KG4fV7yeweTMWv5+YZOWddyap2iqM6dOMxod44IGmJduQX6r0WJVlHC0t\ni25n8fkQBIFQyE4otFBEaI2VIZVK8Z3vfIczl1Q5XQlBEPjUpz7FP/3TP60ZIzdAKZdj6r33LooH\nVipkxsZIDAxcUy7WUphcLgSNps4g0ej1dYmmuWiUC6fHePPdFFO9w2j1Otbv6uJDP7l32YePye3G\n0dxMvKenbt/LVdRdDUajjtbWu1tkTZYrHDs2SU9PDFmuotdr6OrysXt35IbLmGPnz9d5vnJjw1jL\nEhsfeQSdwUB6Ik8hFiM1MoJSqWCcyyMyu92Ucjmq5TJGp5Ouj3yE/MwMVVnG1daG7zpDb3cCa8bI\nLaCQE5kZGkfKl+r0OoqpFHIuh/YqtUvsoRCVzZvReUO888oEhkgLgbYGGv06yEwyfraAZcf6RaWP\n0yMjTBw9WishzAwPY/Z4EJNJlHJZzRkJhXC3t9+cP3qNa+Jv/uZveOyxx2i4xqZen/rUp9i/fz/f\n+MY3auJoa1wbUja7qJdwOfHAK6EoColYFjEWxYCMxecjNzuLgNriwdvZWcsFkEWRidPdvPXSGLNj\ncYwOBxqdjrRsYDJWIbz4ugFQDdLIrl0YrFZSw8NojUZ8nZ24Wluve+z3CjMzec6fj9WSS2W5yvnz\nMRobHdclMJdMihSLZVxOQ925I2g0aulupcLsmTM4GhqolsuYvV6Sg4OUi0Wq5TIN+/YR2LyZaHc3\nepsNX1cXrpYWlEoFRVFuu1Lcm81q3r3uB/4YqALvAr+ximNZMcbHM5w/HyUbqzJ9fha/30JzsxOd\nXovebL5m4RlXczOS0Y3Br9DcaqTFFGf81ZcoJNLkwkEMmUmaHnhgQYJTdiZKxd1IBQ1milQycbQm\nEy0HDlCRZXRGI7Zw+Jb2cFhDpVwu881vfpNnnnnmmr/b0dFBW1sbL774Io8++ugKjO7uR282ozOZ\nKF2We3W9gln5fInzZ6eYOf4uoye7MRsEtu3vwtfVhcnlwux2YwuHa7o+YjxOLiORTuRRqgpyvkCw\nwUOrSyI3cJ64I6/2EdEbicdFqlUFn89S06MwWK1Edu2a63GiuWtzCm426XSxZojMUy5XSaclmpqg\nWCyTSBQQBAGfz7Kkt6RcrnDixDS9vXFKpQoNDXb8wkVNJrPPR7S7m8z4OIH77sNgt2O023E0NmLx\n+5FzuZoImSUQoL2hgfzMDFI6TXZyElsodENhtzuF1fwLh4GHgRLwHWAzcHYVx3NN5KaniV24QDGV\nwtHQgHfDhgXxZVGUOXp0nFSqSPO6TpLjM0xMprC7LLR0Bgls3boglnw1WCwGrDYjrd4qvd/9AbHB\nMZq2bcRqVEgND2Px+Wjcu7duHCf6ipw9MkhJknH7ndx/fwPaUgyjy4XtkmztNW49P/jBD2hqamLP\nnj3X9f2f+Zmf4emnn14zRq4Tk8uFf+NGpk6cqCUUmj0ePB0d17W/3t44qfFJBt89Q0UuUyoKnD/W\nR2chy/pHH1mYJyYI6IUyFpuJQlbEG3ThNxcYeuVlGttCDGeGsHRspi9pI56SURTw+Sw88EBjXUfX\nyx9Y1UqFsiiiM5uvKIleiMeJ9/aSn5nBGgzi7ezEsoQxVojFyM2FIGyh0G0nMna1OBzGuioXUJNL\n7XYDs7M5jhwZI5Eoqkn8AStbOwyIA90AeDo6cLe3o9HpGB3NcObMDGazHr2myshQEktLBLMxgZYq\nUiZDoq8Pd1sbWoOBiiQxPThIaPt2xHgcndmMLIqUi0UqksT4kSPq8VUUNHo9oe3bbyhcKIsiuakp\npGwWk9uNPRy+Lb0sq2mMXJpKLgPlpTa8XqpVBVGUMZl0N1VOuRCLMfTyy7UeCPmZGfLRKG3vf39d\n59tkskg6rdbsT+dNND38Y5jJ49RLVHJxpo4fZ3YyQbZiwezxEG5w4fNduYTWYNCye3cEcaSXYipN\n+/3biZ56j9mZMUxWM6nhYUxOJ77OTgCGhlIMT0iIooxSqRCdTPDuMYHHf3wDFo+HcqlEbmqKYiqF\n0eHAHg7f8uZS9zJPPfUUv/Zrv3bd3//4xz/O//gf/wNRFDHfZhLPdwqBLVvUUMrMDDqTCUdDA6br\naP1QLleQ5Qpmo4CvrRmdUqISG0ecHGG6Eiew6b4FxojF58Ni0bF1e4i3siJNzU7KqRLeLbsxeS0Y\nW12cOx+nf2gEncWCRqNBzDoxm3V84APtiyawZiYmmDl1CimTweBwENq6taYUW60qZDISBoMGi8WA\nlM0y8uqrtR4m+dlZctPTtH/gAxgvaz6XGh1l7I03avk1BpuNlgcfvCEV2ltNKiUyPZ2nWCyzZUuQ\nRKJANFpAkiq0tbkJBKy8+uoIs7MXQ3c97/UjzZrZEDajZKKMvPEGFVkmsGkT8XieRmuB+PlzKKKI\nv7GZ0XEfDz54CJMiMvXee/jvu69usaozmdDc98sNAAAgAElEQVTo9Vj8fsqFAggC3q6u2rGfpyrL\nRLu7cbW0YPZ4UBSFQkHGaNReVUKxLIqMvP66WvqtKAhaLb6uLhrvv/+260N2O4xmK+AHeq604bUw\nNZXl9OkZkskidruBLVsCNDdf+81lXnpdo9fXOm2mx8drhsg82clJCtGo2hypWkUWRbQatT6/XK4i\nimVmBT1hTZHhM0cxSAl07dt46z9eQbF5MXs8NNzXwYEDTcuOc2goyfnzMbrPzRC0lWh+6CGqU/3I\ns2PodRqqlQpKpcLM6dNYAwEqksTg+WkMDieejg5y01NUZRlZa8EQaUMBJo4eZeKdd8hMTFAtl2na\nt4+Oxx+/ZX147mWOHz/OyMgIH/nIR657H8FgkJ07d/LCCy/wxBNP3MTR3TtotNpac7N5FEWhEItR\nymbRmkxYAwG0Oh2lQgExkUDQaLD4fOgMBsZGk7x3pJdsLIUr6CU6I3Khv4jXIdDkCiD2DyDqJfKz\nsySHhnA2NVGWpFp/qcju3ZiHhgiEXVRNDv7rX3Nkx5JoemNEUxWmYzKZyUn0ZjNlsYjBZsNm1ZPN\nNuBw1C8cxESCkddeq92jpEwGKZ2m45FHyFdNHD8+SSxWQK/X0tXlo8EuLighnfd+XGqMVMtlZs+c\nqasCKuVyTJ8+rYadVqEh3aUoioJcKKAzGpd80M7M5Hj11WGSySKxWIFstsTu3REiETttbW4iETvF\nYpl4/KIhIhVLTE7nyWaKmI1eink7bQGHqu/U0YFZjHLyv55FyKcABXG0n8j7DiKXW2i8r5OyJNU9\nL5RqFbPbjT0S4cIPf4iUSiEIAvnZWfQ2G1Iuh/GS5OZysah6N6J5Tp+eIRotYLHo2bTJz7p1y4cS\nsxMTNUME1OTseG8v7vZ27OGVVdS+VlbbGPEA3wQ+ttiHTz75JK65B2JXVxf79u2jdS4xa3h4GGDR\n1+l0keefP0Y2W8Jo9JHJSExOjrF3bwPbtnVd8fvzr6VsFv3sLPmZGWLFIo6mJrY/9BAVSSImSQga\nDZFAkKpGTzyXYnR8nBatlmh3N5PRKAaHk7C3keFJKBZj2M1aCoPdFAbOkXI7GTvWy9CxUfybtyBY\nq0RnDJw+bSQSsTM+PrZgPIlEgfFxLS++OEBDQCRXytLgM6EPhrHt3UN6eBivwYA9HGZ0fJzUf/83\nzkoFOWlltm8MR0MDgc2bUapVdIY8yWwCx1iezPg4s/k8eUnCnE7T//zzZE0mwtu30zaX0Ho1x+tm\nvL7XeOqpp/jlX/7lG04+/ehHP8ozzzyzZozcRGbPnmXm1CnkQgGNXo+7rQ3P+vVMHD1aM0ZsoRDW\nzffzo39+h/EzF1i3tYNnvvsedpcVh05Hz3sXmHCaef/e3QQ8eqRCkbGjR8mMj5OdmMAaChHcsgW5\noOYmeBxa+kdjJAYGkMQStnCYQqFEJi1iCYYp53MgqEaAUsgs6GMCqvT35YulUjZLLpHiaI/M5OS8\nMSHz7rsTsMmKRqut7yarKFRL9cKH5WIRKZNZ8HulTIayJNUWa6tBbnaWmdOnEefUSwObN+Nua1uw\nXXd3lFRKIhYr0Nsbp1JR0GoFdu2KMDWVpb3djaIoGI1a8nk1ZJecTRG70Iel0Ub81BAGt5dpx3q6\nWtxUKxWIjkBijInuQaqVChaXneauJoyyWvnibm0lPTpKbmqKiiyTn57Gd999RLu7Ucpl7A0NVCSJ\nUi6HLRwmNz2NoNHUjqfeYkExWHnzzdGatyaTkUgmRcxmHZHIRY9LMZ2mIsuYXC60Oh3FRbSpqrJM\naZFu76vNahojOtRckS8Cs4tt8Cd/8idLfrn1smzxS1/HYgVKJQeXRExQFBdarXvR7Rd73dzUxOB/\n/zfJuYekAxBGRkgODGCPRIh4vYhmP8d6kiQTKQKNHto8zcR6eijE4yhDQ1QtFtq3WAjtaWdi2kmT\nDyZ73yA+O4XT7SE9lqUsihRiUdytLRj0HrJZCVEsLzo+SYoxPNxPa8SI3HeO06d6mXBouK8RIu0N\nNM5JTBtsNuyZDG6NBjGZpDnsYeCCkeJMAatTi6DTct99HUQMBS786EeMvv46equVts2biV+4gFwo\nYMlmCVyyKlrqeI2PZxgYSCBJFVpanJRKZQwG3RWP75Ve3wtMTU3xwx/+cNnz/Gr5yEc+wpe//GUk\nScJ46Ym/xnVRiMdrhgjMCZXNXRvzuiFKtapud36MxOgEJrORqs6IkRIzpwcwro/gb2tBZ9Tj3b2N\nVH8vx/7zPbw+K1t+vAXB5mYiIdD/ry+RHh7G7zUiTk8wnTOxeU8H7xw+SaKvD7vfw64H1pGYSZGZ\nzFHQ2hGUKh2t1kWNkaVI5xViMbHuvUpFYXymRLvXW1eKqjObMV/WIFRnNmNyuRYYOia3G/0qhnVL\n+Txjb7xRCzNJmQzFVAqd2Yw9FKptVy5XicfVv39mJl9rAmg263G5jExP50gmRdxuMxs3+jl6dAJJ\nLDFzoZ+KmKcl4mPoRDc5sZdHvtDCzKTIyNkfUIzO0rhnL0arnWhvLyaDgDQ5jFJShfOMDgdtDz9M\nbmqK3Ows+WgUg9nM6JEj2EIhLvz7vyMmEmi0WnybNtFy4ADFOaNvviljVhIWzJ0kVRgfzxCJOKjI\nMtOnTpHo66NaLmNyu2nYswezx7OgrFxrNN5w/6KVYDWNkY8Bu4Gvzb3+EvD26g2nHjGZXKCQOK9s\nGDr4foybHuDUq+cZHowhaA3oS2bGh2K4MkmSfX1oTSbSo6Mk+vrY8bnPseXRbZRLJcTjYaJ2O5V8\nGl+glelBtSGWzmxGazDgdBpr4mWVSpXBwSR9fQkURcHtNhEIWMkOjHPi7ACCVksiW6Ggc5ONxmh9\nYC+J3l5KuRzBbdsQ43FQFLSpSd5/qInpNJgaHDQ2unBq0sS6L2B0OqnIMunubqqyjLeri2R/P0a7\nvZbMtxSjo2leeWWYYlFdUY2MpEini+zde+fEj1eTb3/723zyk5+8Kb1lwuEwW7Zs4cUXX+THf/zH\nb8Lo7m1KudyCcl+5UCA9NobBZlN7Q1mtmFwuxMw0Wza6sHvdSForx0p5tBqBQiyGTikhaLTkshvp\nOz1MLp7BGQ7TN1KgWNJSSU0x/fZrGJ0e9DoXGlGklBGxywUat3YhV7X4AnbSiTwFsYzG6mDbVjdN\nTS7cDl3N+L8UayCgto2/xIthsNsxOe0IQr0hAWB12Qk0b2Xq2DHkQgG9xUJw27YFie0arZbg1q1I\n2ayqBg0YXS6CW7euagVPIRZbEGYqiyLZiYk6Y0Sn0+D3W0gkRCqVKk6nkZYWF9msxLvvThKJ2JEk\n9V7W1eXDajXQ1z2BJh7EuTdA9OxppqYLNHU2k37vTU5PFEilJbylccolifDOnYTlLOLECGaHDdMl\nFY1aoxGt0Ugpk0Epl6lWKoR27CA3OYnOYsFhsVAplSjM5Yyse+QRDFYrJrcbayDAxMTFuZSyWaRM\nBkGjodSpliCnhoeZOXmyZnTkRJHxt9+m7f3vx71uXU0BWGsw4Nu48bbsU7Oaxsh35/676fh8Ftxu\nE8lksfae1aonGLx6a3AxZcOyXGF6pkD/G2OcPx+jVLSy5QPvI56UaAkbiR99hd4jL1JNzWJ0OGh9\n6CFy09PkJieZCrQzNpYm6d5C5CNe8ideY1PQh2y6n7LRidnjweEwsqnLjZRKgMNB30CaI0fGatne\n/f0JWlpciMpFI8FsNaKz2nH4rNjmYtqCTke8pwdXaytyLqdKvs8O0bVuHZE9QSbefZdzr75KbnIS\nWzhM+4c+RM8zz5AeGaHh/vsJ79iBweG4Ymljb2+8ZoiA6g3s70/S2enD6VxLgF2OYrHIt7/9bV59\n9dWbts/5UM2aMXLj6OYeHpVLZOE1ej1mrxc5n0drNKLR6+l//nk0ngbQmBh+/Xka9h+A5BQOqw1n\n0EdVEjFqqlgECYPZSNPWTgRPhFSyyMBIjt2dRoKbNpFR7EyWFZo6gjQnx5ieHqeYNuDrWMfwaA4x\nF6U80U+1WmW2x4H7o7so6to49Vw/O3eG63QxzG43zQcPMnvmjJqU7nQS3LIFa9hHQ0OOwcFkbVuD\nQUtbmwtPs1pVJxcK6K3WJUMujoYGOj70IfLRKAgCVr//jlIEve8+P/G4iM1mwGTS8/zzA/j9FioV\nhUgkSyRiJxCwodVqaG11EbBXMI2f4MSRXrJZGY3JTMv6EJnRbhIpE+m0hD/sh6khdFoBxR0iHPTS\nuG9f7YFfLZeZeOcd4j09pEZGyM/OEn7/jyPaW+ifmMHQvA2mLlAVRXwbNpCPRjE6HHjXrwfURbBN\nX8FpExgfmCU5MEilVMJiM2EpOMlOWUmPji6ozBQTCUr5PE379+Pp6EDO5zE6HFiDwVXP71mM1c4Z\nWRGcThMHDjQvSGC9tAzuSpjcbhyNjcR7e9U3BIFMUUATDDE9nSebLdHfn0ARNGzdGqA83UduZrbW\nV0BKp5l8911aDh0iqTh56/AgxWKZYkainFJ43+OfJmQtsv6QCVHvwuRyYyolqQyeYlwU0TncnBo2\nUKlcPGmMBg2ZVIHtBzcxfPQYGkEgHLLS6NeSm5pidmSKqaEpLFYjxqqIbnoao8OhJuAZjXg3bCA9\nPIyUSqmJcMUiqaEhtAYDnU88QXp0lPDu3VRlGavff8WeJ/n8wi6e5XKVUqlyVcd4XmzKYLPdc/om\n3/3ud9mxYwddXV03bZ8/9VM/xe/93u8hyzL627B0706hlMsh5XL/P3tvFiTHfd95fjLrvu+7qu+7\nG2g0boAHSBAkBUoiacvWWDP22Duy17G7sfOwYUdo7Y1wbNiOVYTtJ9t6mJjwhNY7trzWTck6SJEA\nCQIgDgJoAN3o+6r7vquyMrNqHxpqmeYhSsvLlL8ReChEITOR1fXvb/7+3wOtyUQ1HsfgcKDV63H2\n9eEaGSFz6xaWUIjk1auIOh3hsRir5y+TW1nFZtHx2ONTbOy0MEZCuD0WhvssmB1mZj/zaWqtLouX\n72L3auhICjpXhBvnN9hcjWMy6bD1KsweCHLi9CyjGieiM8CL/3QHjV6l4/PRQ6RWa1OSTQRMFkqb\nOS5e3OHsWcMbhKz2SARrMIjSbu86N+6vS8eORbDZ9OzsVDGZtExMePeIjN5ieVffQ6PT+XM5jd4v\nmL1ezB7P3jYN7LpVbOHwm97r81k4ciSMx2Pi3r08waCVUqmFxaInFLKRSNTI55v4/bv3weh0Euz3\nM5zOsrFRAsGJ32+lXHfTSjYIDYfx9Tsw7RvBNzaC3m7HrFRwDg1hdDh2p+GFAoWlpb3tE8PAFK/e\nKFMqZ1AkhdJKhf0HDhGw3UNutYgcOoTtvpC6VS6TvHqVVqHAVHCI1mYF1aHHbLYyOeFGzK6TvdN+\nwxTmxxA1GkRRRKvX44jFaJVKVHZ2KG9uYg0GscdiHymL78eSjACEQjYsFi31uozbbcZo3P2vKopK\nu61iNusQxbevYRYEgfCRI/R0BrKJIk3BSsNqQu5ZMJu72O0GDAYN8XiVkydjSAYDholD2AaHEXOb\nlG9fRZYkHKNj3MlCu6OgygqNTIZaIsHFap5Th+0Ep8YZmI6x+eKL3P3BD1DbbWyRCOGHHqGwtY3g\nCiOIGqqJBK1CHrffge/IQX7td8+QX99CLWdRFQlD3yiXv3eNcraILRQkFHUzPWjDNzWF3GrhHBig\nqpi4dmWBzGYVj7MPx6SW5uYyglaHd3qG8JEjVLa3aVcqFFdWyC8tETt58m1zSAYGnGQybxRCud1G\nXK53nor0ej3yi4tkbt/ey0EIHjjwM37C/3rR7Xb5sz/7M/7yL//yPT1uNBpldHSUl156iSeeeOI9\nPfYvCjrNJluvvEJ1ZweT2413fBxVlgnOzqL1hulp9eibIjubWcp4UTQdGrUWnWqJgdEgSA1cnRRW\nRxurz4ZBV0LftKN1DbP58mXa1Sphv4/QuB93xE+uJKHa/KhCiUqtg9EE27keJ2wBcl/7Mt7Hfont\n8y+htCUMTiemcD+GyDAbWQFZiDMUNCELWioV6U2uGlGjeRO5sNkMHDsW5dAhFVEU33ENfDu0WjK1\nWgeLRfeGROkPC3qLhdgDD+wKWAuF3cbimZm3dYuUSm02N8v3tYUqLpcJg0GDLKuoancvCO3H+r3Q\n4aOIRjM94wL5soI+OoJeEXhyn53S0l2yV19F1AgYbBZGxobxBkYoS1qe/6c7ZLNNXG4z0eGD6HOr\nWIJBVjIiq6+9TCObvU/sXCwtpvEfiKLvlBl87DH0JhO9Xo/U9euUNzYAMGp3GNQk6I/p6TazNC9d\nptHtEjp4kNGpKfJGI0r7J7sB9lgM833dT6tYZOPFF2kViwDkFhbwTU8TPX78La3hHwY+lmREUVTm\n57MsLe12DjidRo4cCSNJCrdvZ2k0ZNxuE7OzgTds3aiKgqjRIAgCitJlYaVKU4jw8rrMynKBUlmi\nWGzz+ONDjI15kCTl/hSgx+uLdbL3VtAJCjaTgWOPPYOmsIUt2o9SkWlXJTqlAqJUx+q00dVoES0O\niisraA0GNs+doxaP01UU5FYLSyjM8NgEC6sVavEE2YW7mKxmjp44RSGewjM4Qmj/NOkbN9C4g1z6\n+o8oJnY1Lu1KhbzJhM4fITg7C0A+3+DKK6ssLpXotXvIXZWWLcrA6Unu3s6ytaol6FYJGGwIQpWe\nqtLMZsncvInlzJm33BMeHXVTLrfZ3q6gqrualiNHIj/V/97IZEheu7b3xVE7HRJXrrxHn/5HH889\n9xxms5nTp0+/58f+8VbNv5GRnw/1ZJJqPA7sLuCiTodjYJCsZCF9u0C73uL5v38JQYBaPIHXY+TJ\nMRvtloxG6BKIeFFaTfJ372Ky25CMRswuJ/EfPEct2aSYLaNWQ3gH++jg4d56gVrPxugDh9ELMqIi\noXTapFe36coq9cUbuNwWUps1BMHIxo0NXIMCoaiTr/7DXU4/NkI45tnrV4lGbW/SkMiySqUiYTBo\nsNl2xc0/b+nd2lqRmzfT1OsdzGYd+/YFGB/3fOi/0KyBAJbHHkNuNtEYjXsBcIrSJZGokk7XMZm0\nxGIOTAaBVLKGwaAllaohy11MJg0DA06cTiNOp4GbN9Pcu5dHFKG/34HBMYzngSjaqsROSaKq6adw\nfRWzasIzNI5WrqEVuuiVOnXCvPjD23zl/7nB+nIKg8XCqUeH+NxnxwhoyyTvLSK3WgiiSHFlBbPP\nR+z4UbyzURyaBsb72+NSpbKXOZLPN5DLIppqnfXnn8cVi+w9YKuShN5mo/+RR3YNCI0Gjv5+PGNj\ne+t2aWNjj4jAff3j6irukZGPjH7kY0lGtrYq3LiR2tNapNN1XnhhDbfbRCq1a2urViVqNYknnxxB\nq7bILSxQSyTQW614JyepCg42N8vU6x2+851Ver0esZidTkfl3LlNRkc9eDxmDhwIEo9XsPq8yJUy\nUrVCU1FJSk7O/vsHMPcP07l5j0Kuxqi9STVTxO6xEZoYwKRp0ajXqSWTu4K5dhu9J0AiL7H93asc\n+Z39xAY8LCW2cffHOHzmENvJEjevbGD3ZTj65CHsOgPJdANzpB9XJEM1X6TX7eEbjGIID+zdkxs3\n0vzDly/TqO6m8fkCdp74laOsJppURRddWcf5564wPOxmJizQldpoDQaahQJSvY7xLdpLzWY9Dz/c\nT6HQRFF2yci7Ufc38/k3MHjYFZz9IqDX6/HFL36RL3zhC+/LAv6Zz3yGo0eP8qUvfenfump+DnQa\njT0rpMZgQHFEeHWxy/zSLSZno6TWU9y+vonRZGByJsLm7UWWVyMMHDpEev4uZp+PwvIyvqkpggcO\nIDeb1FMpSou3iU1M4faEMI3M8tzfXyQ8d5C5/UG+/9XXyO2UsFj0WHwefF4b1fUFTFYL6QsvcuzE\nJ7gbDLKTauPyGzl2OEAmU8QfcHDxtSzixR32Hxlia6vCxISHEydieyGP8Z0SF3+0RHItgcGsZ/bo\nIAcO9yOo8p4+5F9WR7wdCoUmly/H9yyvkqRy5UoCp9PwkSjVFEQRvdWKqnZJJmu02zJbW5Xd7fTe\n7vabKFWZHTVjzS/j37efs2dHuHo1idmspb/fwYkTUXK5Jq+/nsJs1mI26/nWt5ZZWysSDts4c2aI\njY0Ki3fSyNsJtGqbU5+cYywkoG3mKK6soJEN3LmdYnUxAWqXdq/BhXNr7Nvvxz7QwufRI2pE9BYL\nVUWhnkqhlZsItRymwQjVnR2EXg9Rr0fUaKhW26yuFrF4YGx8CKPHS7nUwuO34Bnoxx6L0S4WcQ4M\n4Ozre8t781aWbLXTeYMm6sPGx3K12tmpviHiF2B9vYzV+kbLY7HYolys01q4QnV7G9h9Gmpks4gj\nh9BotBQKLWS5e//9bcbHPVQqEqK4u/8aDttYWytisJjxTowjVat0FQVN0Ifi6uPFFzdR1S4D5iJ3\nv/M8jWR81zFTGsE4PYnaUXAbDJhnTlD1zlBHT3C8h0Ujo6NDxG8j9uwhKuksN15fY/4Hr6LttjE7\n7dQaHc5+eh9OQ4ubr4PvwByhXgezz4c2NLi3yDSbHW7eSFGvNKHXwxoO05BlFleqnHooSkOv0iqV\nkGo17l5MEDnTj7x5D8/o6G6o0ztYRXd7G342vYdG/xaj3Y/IqPD9xiuvvEKhUPj/FXL2ThgYGKC/\nv5+XX375fZm8fNxhcrsRtVq6ikLPEeDVKxlW4go7BRCNJsrpKlabgcROBY/PgntskpbOyfCZw8RO\nnKCdidPqdNGazSRu38U32L9LvHUG8iUZUSOycmUVgwai/S5MQouZ/SFef6VEKZlGruT51KefRlpI\nYDU4kIoFqpd/wIGHHmdsoo/N9RxCNUup5MBst7Fwd5u5fW4EunS7PdbWSoyOeggErDTqEj/82hXu\nvXKdRiaDKssk7o6gaR9Hk7iDKIrozGb8+/bhn5n5qeS4UGjtEZEfo91WyGQaHwkyArsOxKtXd5t4\nHQ4DL7+8hU4n0hexkLx7j/hqGqE9itgss/GjHzH51CcYGZnB4zExMOAkGLRx8eI2itLF4zHxd393\nl/X1IlqxR3w9w+ZqjjNPjJBJ19HUVbQIdFsNls/dwIiEweWmNx9nun+MxdlBNlZzdOo12s0W2UIH\n8/Ew4XSW2WOjLN2J44xFsdmNPPbMQfT5Ne784z/iHR3FHovhm5rCOz3N2mICudNFqjdp1NsMPfEk\nKlqcbjMWm4l2tfpT01StwSDF1dU3ZI7oLRb0b/GQ+WHhY0lGDIY3jyC12re2nqm1MvVU6g1/p7Tb\nKKkEihijv9+JXi/S6XSp1SQaDSNTU16OHo3Q1+dEUVSMOkhlsvRUFZ3VisVnwxO0k0hUWVkp4LF2\nEQs7HJgLU/Xt1pNvvnSOriwz8MgjFLRBfnTuAtVGFxkdTpPKmdMDLH/1K5j7hghPDOGNRNn6b+dp\nlKsYDBqsOh3p+TtszkQYHfcyfXiQu6/cIrWZwuLrcDwU2xNhtVoKeoMWo81Ks1ACRUFnMiF3xV0O\n0OtSjcex+P004k3UnkA5XcDg9TNydvodycjPA2sohNnrfYPgzPIL0o/zxS9+kd///d9H8z6q2T/z\nmc/wta997d/IyLtApdKm0ZAxm3U4nUZsoRC+mRlKa2vkmiLFfB2zK0AjWaXRkKnLWjwRP9l0HVnu\ngdLB1Gtw64evYtUpuMcncIxNc/G/fJmuLDN9SiY8M0ElX6FZrmILRug/8ijxwq52YXFpjWatyelP\nzhLyaEheuUpvZ4HAyAClxTt4x8fJ3rmD5u5tGuYCnboGy75D9CptJEnG6TLh9lr2CjclSd1zuGXi\nebbvrNIqFlFlGQQBWepw58I8I9YCFo9nN0H1xg0sfj/WQOAd75VOt7te/Etd+8+SdfJ+I52us7iY\nQ5a79HpQq3XQaASsI1bCQ2E8ET9Wt5FiJkE5k0Go5NB4B8hkGuzfv2sD1us16HQijYbC8nIem0mg\nXihjsluIxyuUym20GgiN9WHu1imv3MNnEllfr2Ks6NAZ29RWaoyPzZDeziM6ndj0XbxO/Z7de8CY\nZ+iTE9QLAaxGMJQ2ef1v/gZHNLq7Vd9okLl1i4HTpxl+5CGw3EHUaonOTbL24ktkltfoxDy4wn7C\nhw4haDQ0crldMv0Wa4tzYIB6KkV5a4uuLGNwOAgfPvyWE+8PCx+dn6L3EAMDTlZXi7RaP7Gdzsz4\n+ZeyB4/HjMWkofAWZXU2ixZRErBYdDz77ATPP7+OKApEIjZOnRogFNp9EuhUysQsNVbzScqFGjqL\nmb7940xMDBKPVxkbddPM5bhzL4NJLhEJmHFHAmhFP66BfkyxQV54LYMYHsXaapK+u0Sh3uPW6zsY\nMkV0Fiul2xUcs3poVHC4rZg9brqKRKPSopQt8dLyFoVql+jYGM9+4giaVhmHuQSqDDoN+p6Ez6oy\nMOwhqdfQrjdxOE08+ugALaWHKtVR2m00Oh1DJw9jtDcxju7HOrmPkujFpXbf024fw/39zeLyMo18\nHqvfj/u+je3jjBs3bnDz5k2+/vWvv6/n+aVf+iUee+wx/uqv/upD38v/oNBVVbqK8q6Jc6/XY2Eh\nx/x8hlZLwWTSMjPjZ2bGT+TIEZyDg0jLZXwTBjroMW+02Nwss2+fH0G1ojFZsZkEHNoGXqeGbqbC\nwvI21XOLHP/1zzD3m/+R3PxNRLON4OwszXaPytY6pr5RXr2Y4Btfm8doMTE44qcv4OTl717nN/7j\nIXyaIrV7OdzuY9ijUUStlkf/9P9CbtQolhVGfQM89/0tquUWDz05jV4v4vZY91wRFotuTxci9LoI\nqrKXF6TR61HaEmK3g3q/tl7U6WhXqzQLhZ9KRvx+C36/5Q2idbfbRDD40QnQqtU6KEoXs1mH0agh\nEDDj8Vi4cTPDtXO3UWSVJz89w5G5cfLxNJVSA4sHjhwJ7z289fU5WF0totWK6PUi3U4bWQVRUugf\ndGO1GQjHXGTSDR4+PIivVKFRkBAtLmyfg70AACAASURBVKRajXqiQvjQHKpRg8utR+lqeOSJKfZP\neyhc+x7u/QeJDI5Ti29j6OoxWU10ajXcw8PorVakWg2L30+90iC/k0EM9FMO6HBYtGwuLGL0B/Eq\nKgZtB9/UFO1ymfUXXgDAEggQPXbsTXZrnclE38MP483lUDsdOvU6nVqN8tYW1lAI7VtNqz9gfCzJ\nSChk49FHB1laylOrdejvdzA66iGfbyKKWWq1Dh6PidnZIA6biMm9Sxh+DFGrJTwxhFPvYWkpz4kT\nMR54IIaq9ohG7UQiNjQakXalQvrmTXT5JJ96dpKiakfpiVhMIl2phV3Nc/VWllyujtViYvv1ZRTZ\nTbAvQjJbRdPzUo93yBU63FutMxQzUZfAgERiq8RDhybJ3JoHr4XA9CRjc8PcfHUBUYCeRovW5sIX\ncnHhYoJCtkaupsFpUplw1xC6ZuKXL+/qMySJIYeHqr2GZ8TI0PggQZdI/6CGtta+G56TcWC16fFb\nVDaW0siyiKEmsnk9idGofUOOwXsBs9uN+fjx9/SYH3X8wR/8AX/4h3+I8X1OqxwfH8dms3H9+nUO\nHz78vp7rw0av16O0vk5ucRGl1cIWDuObnv6pvUrZbIPr11N7U4RarcPrr6fwes2EQjasPh99PRP3\nNlo0mzJHj0bQqBIjoR79USeKPgJoqK7cobSxRSHb4PbNBBqhy87iBis7Mo9/8mFcJpl2pUKr2SZ6\n5AjPfWuBhR2QGhKdtspKq43Lv5/Q+BDbRYG+iYO4DBIGm41Kvcu9ezWa3S4ev4WQx4jYrnHm0Riq\nrNA/7iUaMHDnwm2MXjORsSjhsG3PzebzWxma6aeUziG22yCKeGIBBgYsSGvbqLJMcX0dpdXCMzKC\n1mDAPTz8tvfMYtnViC0tFUin63i9ZiYnveiUOvl7mwhaLdZA4E3leh8k7HY9gYCVQmG3d+app8b4\n0Qtr7OxUsNqNWK0mUutp7qhmjj4yTeTIJCNzQ9jtPymY9PutnDo1QC7X4OjRKNcubZBN1+h2K0xO\n+xkZcrG6lCESteGNBZicmuOH//1FUpkGotLCSBddT+LIsSgPnh7HZhKxO00YlBLt0REqm2vYw2Hc\n0TCNco1CPI03uBul0Gk0sIZC7OxUSKcbFJ01bmwuMjzsQi7nuPDVV5kc9zB9sB9PxE/lfm6J3mpF\nEMVdd6LBwMCpU2+6NxqtFqPTyfaFC1S2tuh1uwgaDZ6xMaInTryp9fnt0FXV3V4lo/E9Dbv7WJIR\ngGjUTjT6xhGU1aonGrUjSQpms27vqTF28iSp69dplUp7CXX2WAynRkM4/OYvllStsnP9Okq7TWZ+\nHq0nwEayx7VbmxTybWL9Dg4dDqHpalDVHqpoIDB3kJBXi6DIpLJNTJEB7sZFbN06rbaKN+JF0KsM\nTPahtpoM91mweVS6EReFfIvXvnOB8cMHMOinKVY62Fw2QoNhsoU2TVVPp9lkbSGO265h5pNe4hdf\nxez1UkxmMfv9mCwtDvSLWKNhEjdeJ7cmsX2hx8iolxMPnGTfyFEqWzvc+MFFtu6uEZ6ZIK/Y2V4u\nMjrqfs/JyC8azp07x/LyMr/zO7/zgZzvmWee4Zvf/ObHnoxU43G2L1zYE+K172ufhs6cecfFtVhs\nvSGwD6DTUSmV2qTTdRKJKj6fhX37/KytlRiJ6shfvYi0kGXh9Q6BvgCDR/dxb3mbbqtFJtfC5XdS\nL5ZpdaDbg1atxnDMTzmRxh32srmcYmO1gNbsxWDU0pG7CBotiUQNt1mlWm3zarrJw0/MoJR2ePGF\nZe68fAu9yYizvw/BYOLZX91PefEW+dUtmqMxDvzK00x+/hQ37pS59OoOWi0sL3o4+WA/6YzE+Gw/\nakdiayWF32fm0MNTqDuLdIxWaqkUzWwW39QUnWaT5PXrKDoLGG3YbPq9JOh/DpfLxPHjP0lYLq2v\ns3bpEnKjAYKAye2m/+GHPzSHRqulMD+fYWkpTzbb4InH+hHpMj7ioNtvolcrUS9VaUlmwocP4x3s\n2yMi7bZMMllDFAUMBi3Npswv//IELqeBmx4DfQNuHjo1xLXLmwQCVh58eBjkFomkRHisn514Daku\n4xqNYR2fZfW1Wxx7cJibr26TSlZwRiNMDFnwe/xc/Mr3aFYb9I1HcUcDmLwBvJOT1FMplK5Is1Ag\nMjrCdl7g7s0dmrUGn37YRcBnRlFUkJoorV0zQuW+3hFBwBoIoLNYkGo1DDYb7UqF8tYWUqWC3mZD\nazBQ2tjgx/PSnqpSXF3FNTj4rpqXq/E4mdu3kapVjPfTd9+rwr2PLRl5O2i1IrIssL1due8xN+IN\nBBh+4gmahQJyu43Ban3HhLr0zZuU1tcx9o3S9Q6ynBe4sJjg4qU49bpMVxB4Jifx+BMj2Pwubi+u\nMTY5grv/CaRSiX6li2i109lpoOnJnD7pY+VejvhaksrmDgG/kaGIA0fQS/KOnoWbS3Q7HUSzjaGx\nKAf2e3ANDHBtReG/fukack9DOBrDolUQBIGOIiJotaTSdQyuMLd/8BKtXI4Dv/osmYUXKKYL1LVe\nHHYtm9fnKdcUCpYB4jsNBNc0j/7nx8gkS2xuFdgpiCjKO4ef/RveGb1ejy984Qv88R//MfoPaBz6\n7LPP8tu//dv8yZ/8yQdyvg8KsqySzTao1ztYrXq6meybHAH1VIpmPv+GKPB/CYNB+yb9g8dj4vbt\nDJXK7vGSyToej4mHH+4jd/UircI9zEoHQSMiFlpUFuGhTx9lc36VfKGJXuMjONLPzAMzBBcWqNyd\nJ0EfOhT8Bw6hy3VwRfxk8h0GxiPsbOSwucwMDPuJxay06i061gCXzi9z7HCQra06llAEpV6mEk/i\nHJ8kWzdw7eV7tBotDIMTZCsi2wtZvvGNZdR2C6XZ4MZFK8Vim3Sqys5mjlNHPfzKbwxQ2dpi9dvf\nxDMQBasHhytG8MABdGYzqiSRUZxc+uotDN4gVqueAweCDA29vdNGkSTSN2/uEhGAXo9WoUB+cfF9\nJSPttsLGRomdnSoWi47hYRfBoA1V7XLvXp5WS6bb7eF2GdhajiPJIqok0avm0Op1+MbGcATcJKo6\nVi/tUKm0cbuNfP/7a9y9m0OSFEZHPZw8GUWjtjl93M0Tj/gp1wXK5TZdQUOp2OHipR1iYTMv/XCb\nJ548zpnxfUhthfWNAgnJzehckG99Z4mFu1miQwHUnRRF/wjJK/NsbRRpFEskNrMceHAaV8jH6Cc/\nSS2RYPvmEraQA4Pdilis0cykWcxnOfvQA9hdFlBker0eepsNqVajVSyi+3Gj/PY2tkgEjU6HVK2y\nee4c1XicajxOu1zGOz6O3G6j0Wox3NeLdGX5TdUHb4VmocDWK6/8pA26UqF9vw36vUjh/YUjI9Wq\nxCuvbJFK1el2e1gsOo4fj+AWqySvX6dTraLR63GPjhI6ePBNCXVSrUY1mcTUP8K9lIAnMkGlkGZ9\nc7dR0WIzYbMaWFwsMD0TYGomSCJeY3GpiJxLs3l3g0aljsfv4PP/21luf/8cK+sdxqaCxPbbMZ46\nTWN5nsS55xn4nz+Pc3Ifw4UKgagHs1HCINfwHXiUsmxE1ecJ9nlZvhNnbVPkgQdiHDreR7tVRBca\nIOYPUt3aInZgH/6JEbqOEMvzm9iMJkJ2gWImjynoIbmRQuz30u6o7OR6bGSSeFwadBqRhx4awGzW\nUSy2aDQ6GAxafD7zL4wW4b3AN7/5TVqtFr/2a7/2gZ3z6NGjFAoFVlZWGP2Y6HFkWeW11+KsrBSR\n5S46nUjI0iHictMpFd/45rfQgf1zhEIWhoddrK2V9giJw2FkZaWAKIr0ej2kapWNZJKJQSPNrTWU\nehWtyYQoCuQWFqju7GCLRAiFrJz81HEyRZXBA6NU1tbQarqMPXqY0tY2itTg1qt3cPZFOfKYn7/9\n8uu4/Sb+p//lONQKhEIygk3DrXUD2m4Vow4EnR5Nt40qarBHYhhNIrOPTGPXSZw4e4RkokKp5yKZ\nqnPhwg6VUoNOuYjBZkXu6Lh0OcGjpyLce32dhXsCuRtX0TYLtJI7rF69g2VslrFDo+yfHSJ/+zaq\nd5BLL62idXpwmT2UUjmya1t84hMjBGJedFY76XSdYnE3Sj0UstJr1t+y/bWRy9FV1fclcrzX63Ht\nWoLFxfze57a5WeaxxwZxOIxUq21qtQ6y3EWRZJoamJrxsbyYRtUbUXtdtrcrDE7GKBTayHKXxcU8\nsqxy/vwW3W4XQRCoVNq0cjmU5Bq1ZIJqTcY/PYklPILDbWVs0oKiKAR9Zv7T/3CAZqOD1DVi0erx\n9gfJbOUopEukSgKR4Qh9Y2EsNgOD/SLLV/JE3D00oeBuuJnSoVOtsXPxIlK1iqNvkMxilrWvfI/g\nocOEIi5KhTrZXIvB44fRtQo4A2Y0JhPhI0d225Qrlb1tF2swiNZopLS+TiOToVUoUI3HEQSB4vo6\n7uHhXfv59DQarRaNXo/+XWytNTKZN5UkSuXybnjbv3IyEgK+C0wCFuCdV4/3AL1ej9XVApKk4vGY\nUJQupVKb7dU0lcwtavkikqSg1Yp0mrcwe71v2kMVtVrM/SNcW+3y8rkN/IHabtyyVovDa0MQRBLJ\nOmq3x8pyAafDwKnTQ/z3/3oJJbmNy6RS2KrQMmm59sNLxKwygiqTu3SecqmBfnCGuqJHawiR3c7h\nmxjD49LT3FmntLmDde5h/vHLL5ONl/CNDPDomREeOTPG9laZw4f8GIxart8sMTkT49o/vcLa5dex\nO4xIV7bZ/8nHcEWCrP/oRxj1InKrTWHVwOzTT7BZbdCpymhlgVJDRzTqZ3Upw8xxHfl8g/n5DM2m\njMGgZWzMzeHD4Z87OOkXCYqi8Id/+If8+Z//OeIHWCYmiiJPP/003/rWt/i93/u9D+y87ydSqRpL\nS4U9274sd1nbbuIedaMRSntjDrPHg8njectjdFWVyvY2la0tol0IzwbISWYcDgN2m55apUWjpZJa\n3qSyvU046iJ39TLdRGJ3Gup2I9fru845826nST6eY+SJx7G39WzeWkFsymjrec79n/8NncWK3u7E\n98gnSNWNXDq/xqHjg0wMmIhfuojNCF/9RhKnx8oDv/wod5s24qke1fMb9B07ipRP0ZYFDh3pI3f7\nOo1mnna5gm//ARrhEDqzCbPNitRMoLM5yBVbtJMZbBYd7XaXgwcDlDc2SCzeZXTEhXc4xPKtFvlk\nAVVV6T84g95mI17qIEkKdo+HWjJBeWubnqqy069FSa6RMQ6xvF5HlrsIwu42+APHw+gtljdlBJm9\n3vet+6RQaLKxUX7DRKvRkFlfL3HyZB8mo4ZEokoqVUduSxR1KkceGOKZz+ynXKwjClAqt8hmG7hc\nJur1DlqtyO3bGXQ6kUjESaHQZG0xxe0fvsLsbIhYyMP8taskvr3AwV/9FFp3kMFBB/HtMquLCV74\nwRJqR+bg4TBqR+bomVlyqSJGQUIspxibmyOdqzI+EiG7skxqM4W1W6WcTOLqi1KWBWKjEUxWM+7h\nYVaefxHX2KFdV2cxzcj4ERD8eGxgEXq4/R4EUaCRSlFaWyN06BCdapVOo4FraAjz/alUp76bqdUs\nFBBEEY1Ot9tzEwySX1xEaTTQeb27xXnvws34tvUgP6U25N3iwyQjReA08I0P4mTdbo+bN9N87WuL\nbG1VsFh0HDgQxOezIEh1ttdSJOMVOpKKVrfb7uge29kjI+1KhUYuh1Stkixr+PrfvUY608RgtfCr\n//4ggaCNttTl3lIeVe1y6FAYs0XHCy+s8dnP7sNhgZogU8nl8PvNOAN2RFnCblBB0LFd7aChh65V\nxOweQurJ6J1ulGwck91GtlTFEo2yHW+QuXGTeq2FWddj8foqR84e49TDUXTNPJ1MnWMPj5Jd3uDq\nS3fwe60oooalO0kMrpuc+exDrJ9/hVyugtMMWrMZrclE+e4KmR2JakdHZHqUoX4bqDKlUotEoobZ\nrKPXA41GIJ8uk97SERkK/NuE5Kfgb//2b/H5fJw9e/YDP/ezzz7Ln/7pn35syEi5LL0pP0hrdSC6\nLJi7FZRWC7PPR/DAgbd11eQWF0leuUJX2dWLaAxbTD/4ID1VInn9DvLdNPZAGK1XQ8DkY3TITuG1\n89i9DpyDgyiyQvLOMo6hYcxTR9heSaLpdXn9yhZr8TYbdzbweCx4ETF5vRRXlgnOuillyqylE3zv\nBxuc/eVZPCtrLN5OEg6aiPi0tBpV6utLxAYPE0/USGxlUNEwPBXl4LiX2sodTCYNPZ2Hnmhj9V6S\nA7F+3N4BhkZcNCthtEqDoqPLdqrDwf0ekitbrC9niDkULF4PUqlAKZFnaHaa1++UGDs8yU5WYdzv\nw1LdFa9q9HpqySQ9VUXUiGg0AuWWyPXXFjAGdzUFvd5ullNy0EVwbo6dV1/dG/Ob3G68k5Pv289A\np/OTuPYfQ6cTMXQbbJw7j70pMdvfw2k00uw6GBq0o9GK3LgRp7S+xfHH95PMSRjMWpLJKrlck17P\nSbfbw+k04vOZefnlLWyaFq1smZtX2yw77HhNNoReiVoqQ6msZ24uQD5T4Rv/7y3u3YmjqhDfLvDZ\n/3CIjY0SLo+F8tI2n/tfz/LqlSxL9wpY2zmq2xs8+NhJ0hdeQm42KW7H2ffsp2ikkiy+fJ4Tv/d7\nRA/OYomEqRydQOdwMvdgFL9Lx865F2k1KyR6Pcqbm4Tm5tDq9excvMjQmTMEw2HalQqOWAwAs8+H\noNGgM5uRGw2kahWb1YotHGbs6aexBYNYg8F3XZxnCQTQW617JAfAYLdjfo9iGT5MMiLd//OBIJtt\ncPt2BkEQMJm0yHKXK1cSfOpTY6g9kWarS0faLXhT5C7VqoQkdWnkcnQaDRKvvUb27l3o9VgSJjDr\noduocfSxCbRakc9+Zpy7iwXCYQuDgy4MBh0IsLFZoVhq8eDpSc6nNih2OnTVNj7/EIcfnaBTLiHo\n9Iy4vCz88ByRaABdNEI7p6EVX0PogaZvEMPUcewWLdU7y5x4cAgxOMi122WGwlZGB50EtHkacont\nG1dpLplRpQ5TE06aXQNSq0NH6dEoVylmy/hPPEJAaRPwmdDo9Sy8dBlLcIh6NkHoyFFiQ0EaHQGt\nw83EhPe+lU/AadehrSTYuniTOytGlCMTBOfm/lW1dn6QaLfb/NEf/RFf+cpXPhTSdvr0aT73uc+R\nzWbxfwxyXGw2PaIo0O3+hJBodSL+gRDho4MonQ6G+66Ct4LcapG/d2/P0qrR61E7HeKXLqGzWJAr\nJfxuLet35omNhqm0GtTXEmSvX6ZqtzNw5nFEuxuNN4ItHGYnWaextkD/Qw9xez5NNZ2hvROnIvmo\nqjJH9x0jM38bx+Ag5rk5lq5V+Q+fP4bHayPz6k0iIQt9PoFWOk29VaGTMnPozHHKNRcrwjBmbQet\nQc/+IwMsZJbo6Q2kUxKhkIVIS0bMLmMouDgxE6RfVbj8/XksSpdf/s2T6DxmvvKlVwgf2M/IPi9B\n537a118gfSOPxmJj6kQMY/84gt1L5GgUlwRpeZP0VnaPqEUGfVjFJk2slLNFgsE3ChyLxRYTJwfR\nW627QZFaLdZg8H1dD9xuI4GAlXj8J4mifqdIY+Eaqtpgc6HAsNfPgakoltgA339+m3PntpkcczIw\n3U+x2uXgkQHK5TbFfAOzUcTrMXH4cIQLF7ZZXi4wOekl4NawLGUp5GuUq0VGToYo78RxeW1U2wLF\nosS9xTwaswWj2YQsddCZTGSTZSZcRg7MRdDtc1CvFBHWr3P6QBS3oc3L14psJBzs+8STBKanELRa\nYnMznP8//neCs7N0qlVyi4tYcznGZiJ4p2dIra6x/MoKi9/8FsGZSXQ9GVGE7N27jJw9i8HpJP7a\nNbo6A1qHG9HhxxIKY49G8U5MUFheJr+0hNntxj0yQmVnB8/oKN6pKXQ/g7PP4vUSe/BBsvcFrAaH\ng+Ds7E91rr1bfKQ1I/V6h2KxiSjupnz+OIv/50G1KlEuSyiKSr3eQa/XYLHoqdUkhmdD1DeiqPES\nGlHAYddi0XWo5MrEL12isLxMr9ejkcmgs7tAaEKrxqOfnqNS7RBfjrPeUZicG2RifAqp0WBrq8r8\n0m4ltUYjILe7fPq3z5JcWmNzfoOjTx1gaSVPbjPFxLgHYxdO/ebTBKYm2bh+m1QyS6duQpU65Ovz\nmA0CV56fx+6xc/faGq3OFfqf+CQvXIzTF7XhCTaZ//KXKceTDJ56kJ7ZTWllGdvwOBafA3vAi28g\nSkc08fy3bhAKmBFmXBjKWwSDXpSwj5kHrYw9vB+11aTZaeJyuXnllR2y2Qblcpt+b49hn4rc7qC2\nuhSWl+kqCoOnT7+nFq+PC770pS8xNzfHyZMnP5TzGwwGnnzySZ577jk+//nPfyjX8F4iHLYxOOhk\nY6NMt9tDFAWGh90Eg1a0ei3an7Kwqp0OaqeDzhuk1DFRKDRxuUxYlQIGm5OmYqMst4gdiSDm1wl4\ntOgQEO5/99N37yEHJ9BbnaTXkggdCW/Uj+ofYuXVJZS6itXmppnP0SpX0Dz8KLO/9VssFyyoFxdx\nBMa5u1pl/k6BuUCAbn6DretxtHINuavB5nMjV8ocHbcip2votAKDU2HsPhc+e49qtUAs4qSwtEx8\ndYuBB0/SVTrU716lMX+D0ZiNarHGwje+Tf+DD2Jz7+oAVu5l2Om1eODIMXzHTpGpiaytNXn+H5aZ\nOdJleMzPyIibRx4ZYOGOkWW5jN+lYyCsp5PdBkcUV+iNglRB2BX8Alh8vvfdPdPr9UgkaiQSu6GP\nAwNOSqUWsqwSc/dopRpotCI2s4bM5jalzW18R3qYLUa8fhvlRo/+kTCxmIPLl3e4dW2LaJ+TWJ8T\no0Ekk65hteq5fj3JykqRx04PMHFsghsv3UQVdHTaHaJjfei8IdLX8ruuq0wTVeni8LlQpSaC1KCn\ndIgNeKiXq2Ru3GD15irJrSo7d1d47NOHmJ7yInW6aEQRlDZqsUzxnsDg6dMonQ6l9XVEUcQcDNO2\nhygpVjo9PRqtBoPXR6XSwdhrYDZqEDUajA4HW9dusrWwgXN8ily6yMrad3lca2Ds+H7cIyPEHniA\n4OwsPUGglc+zde4cla0tqjs7hA4fftsI+beCs68Pezi82wZtMr2n23EfaTLy67/+O6iqAUEQmJ2d\n4rOf/QT79o0DsLm5CezGX7+b17lcgnh8i1RKS7fbQ5YLdLsGJiZmkRVYV8w4D+/D3VXwOjUkilW2\nt7ewj/dR3tqiYTbT0Wqx18r4Y9A3asRkV7A6HLzwnXl6VFjf3KbRdnDigX46jSThsJmJ4Sim/DJr\nN++QEnscPLqPo8cf4TvP36Utw+iAlfmvf4+OSWRkOsJErki62GWnJmGlx50LcT79G31sJzYp9AR0\nHQFVY6Sta9JT8xw5HObOuas4nxqm1G5Q3kmiefUSsV/7LL6TB9E1JGRBYfbxCayDoxQVK/uPDuMO\n9Og0yvhDAQKT4xQQ8O2fQq/TsHZngbLSxjU8SnJLRGM043S2KCQT5FcFHn8kRldNk5cktKkUrVKJ\n7H1h07v9PN7u9ccFlUqFL37xi7z00ksf6nU888wz/P3f//3HgowYDFoeeKCPwUEXlUobp9NIOPzm\nYri3/fc2G9a+QV69lGRpfoNer4cgwNjhMYJmIxe+cxVV7aLKEiGfgQeP+6ncu03k+HEyt27RaTRp\nlqoETx4marfQWl9kKaenXlJYuXwLX9RDYMRP36FhNi9dwRvxU9bpWHjxEuGpUQ5OB/nGP22iqj3O\nPDSOWUyw+IM1Jo4dwOG1Q6PEva//I/59+7FKMt//x3lMT09T8XfRmkx0Wm3q6Q0WLt1GZ3eSq0H6\nOxcIORTUdpNarcHScglVljEurzIxd5RQv59X/u9vUdraxq49DZ4oGwmJfKVL/1gMh8PI3btZYjE7\nfr8V/+kRDk6YSV69SnJ5iWSqQWDKTf/UGMtrNex2A6Io0NdnJxb74NI7l5YKXL4cv19MuuuK3K3j\nsNIrJNlYEhFEkaGZPuyVLtubRSxuF1e/s04m00CrFUmna5w6NcD6YpLBqImt7RLf//YC++ZibG0V\nefJT05w6NYAsd7n8WpLP/btJQpMNRoYcaLsSksHFd19IcOxomEKhgc1loat06EgyWquIWkxz9JFp\nWi2ZpSvzxM+dxz82hL/PTyFdQtEamTk4wL1XrlFeThOcGsUWDLHx4o9o5HL0Pfggla0t3LOHeX0D\nEsk1AiMdzE4H03PHMdy+RzlVwBbzINcKWC0WZEli69YKeqebaktAVbqoSpfVawsMHppGabVopNMI\noojJ5WLj5k1USUKqVGjm88QvXdptDv4ZklhFrRa99b0PuvuokJG3nGE/9dR/fsPravUnwTQ//iX2\nbl+HwzFCoQaZTBqdToMguAmH3UiSgiyrTIwMUNrcxODQ0ihsosnmGZodxWC3E5idpVUs0nW5Kclm\nunoLp8/O0RKsXL64Rb3SRKPRsXy7hjeopV7r8IlPHENrsdG89zqXn7tMuVBHK3S5Va7z+H96GkEf\n5vCUhdXvfZdWq00j04KYn/idFUyBECGrhZ1kg30PzmDVdug0KuybGkIWDDjzDXT5Hi6tDkGv0lBU\nzFobhmKBgYNTGGKjOK1u+h8aweLzUqyD6PDR6IBFD/tnP0MjX8CiVmnvrFHJFmnhQGPS8Xd/812i\nY1EOzPbz+q0che06kQMz+PwxMvkyClV8ES/G0i75EEQRBOFn/jx+2ut/7fiLv/gLzp49y/T09Id6\nHU899RS/+7u/S6PRwGL52TqEPoowGrXvaDd9K9RqEqlUnU5HRbXESOY2QRQRBTA4HGwXRJReFUEU\nQAWNTofW5kbrDeOdkNGazUSOHgWdjoY5RryspSd00UUneP3bL9A/LvHvfusE1Y1Vyju3aCkeHn96\njpF9MV46D0OnH8EfdqHTaXjollrVwwAAIABJREFUgQilsoSEDl/fEHO/HiTi07H8ne9S2dnGZNTR\nkSFy+CTP/I9nMdVTrL/wI1xD/Yx+6tOsXJ5nn7eflgy3Xr5N/3Q/+Z0MkeEIgljkxEOjFMptHGEP\noWMTJFN1ovvH0RiNGEMxmhobwahK/6QVo0lPsdjkzp0MYZ8WXT2HQa5g93vw7p+laooQGlZoCBYk\nRWR83E0gYMXvtxIKWd+TGPhEokq1KmGx6AgGrW8ilj9uRt/YKO0REYBmpcbCa/ewjIoYbVZ0Fgt4\nIsRzPUpqE+uQn2RJwGDQoNeL2O0GzGYd8/MZhse8dNUeyy/uYHWYabRUGvUO8zeSHDvZTyhkxes1\n4/Xb6B4YYWjYST7fQq0rHD2mo15rsblVYf+Mh4zLwNikQChgJuYVCPfZefGHS/idFsaPz2CzCBi2\ncsw9M4fV1GPj6i1CQxF8fT6a+TT1ZBKD3cG+x86g6izovEE2NiosXNym78gc127kWV5e4pFH+jnx\nwGk01y5SKxaYOnkURyyG3JJwxMI0eyaa/6w3qCcIqGoXg92+e2+AenbXBi+I4p6tV6pWaZdKH4lY\n+A+TjGiB7wOzwA+APwDesUc+laqjKOrP5eJotRQGBhxEInby2Rp2mxZRFKlWJRRJoj7/GonFNSpB\nH0F9lYDXQieXZvX1q3RVFZ3FgmbuCV7+u1fpqHVcBQeHT1qot3tYnHYMegGfUQf0EHoKClo2bm+x\n8fw1dL0uTqNCq1xFlfQkF5aZGh9FU83Q3lmjP2qla4pidVnJrO4QNBhxx/oYfHCA5RfOs3NrGzWf\n4ua3r/LI5x4nMhwl2arjH+knfjODxWHBYdcx9slP0rBEydZgu25D39UwGvJgDJkRVYmtW0u8tpRm\ndMzLxLib5O0V3HYN6VKXbGYHS6LAyP4BLry0gmgw4PY7uHJxk9Bkm1bLSHR8gE4ugVH7EwGZPRJ5\n162fvyjI5/P89V//NdeuXfuwLwWn08nBgwc5f/48Tz311Id9OR84isUWL7+8STbbRKsVsVp1NI0B\n/PuC0OuhM1soltoYXWbGjkxSL1XxBaxIO+ssfPu72IXdaO7osWMEDhyg1DGRvpXCpP3/2Huz4Mjy\n88rvl5k3l5v7viORSOwooFbU3l1V3dUbqebSYpMiNWNJY0vDcVhy0NKEw69+sib04JA8YY1GdtAh\ncWSyOaLIJpu9L9Vb7YUqFPYtASSA3Pd9uff6AcUiaTbJJt1UNTk+TwhUJeKL+8977/l///Od06Td\nAqvTiiwr9FtqrFUTSIKM0yTTSayQTUQJj0VRhCSLd5PE13KUKwr9/Q40UgfR4aBezpN47yo7V65g\n7wuhG5hgbbvBavYmjumH8LvC2BsNOqKb9WSPjd0me5s58qkCzXIP7W6Vc48c2jcpmzq9HwgaFug/\nPMz/9dwK2VyTz3/+OHpvGBU9rNSZu3mHwKHDdHtO0okc58/3U12YYeXaPG67gNeuRrC50IydJlHR\n0Ol07l/PYNBKNPrRaAQAXnllnW5XRqPZP3I7fTqMXi/QavVYWMiyvl6g1eqhKODxGMlmG3SbTfLL\nK0h2PU2vm2Yui23yCO9czXP98iaSouKhxyZILJeIRGw4HCKiqGFgwImiKAzHzGyvZbAaZFD16AuZ\naTW79CQFm82A2y3isOkZG3Gwfv0u33nhDbz9PkzhKLtbZRpthVqjx5X3KkT6nTz2xCi9SoHMlbcx\nGs+hFs2YQ24EXZfV736HVqmE0ykiWrV4AjaMbiuZ27eJv/4q9v4IlomjXP/rbzP02EU2dxq4Qz7G\nztp549I2dbUFo8PC1SsJ7M5hTn36c1g0DRzefet50eHAMTjC5tuziLb9ddGJesKToxgMWjA4CRw9\nSm5pCandRqPVYvR60d/TeajU6p8bsvfPhQdZRQ947Bf5gNWq/6UzUpxOA9Vqh1q+gFQuklgtI0lw\n+ukTpLZSXH9zFmSZSq3HyKePoa7ssHnpEnqLBUEUsYwd4u2317GOHaTZU5NPFrh9qc7JE6PsJkqs\nLaVAaTI44sPksLOwVKRebJDN1mnmchyZsGGRSyiVXVTlAAajlZW5TTDZCMR82J1GdEYjGp2B/uNH\n2Mqr2F7aYmV2k1DUS6AvRn9NprC2yuHPfRpXyEsy2cDtsTF2wkdmfRP3o88w/84W88t7dCgQ6HfT\n1KQJBa00mx1e+s5t0okcqZ0QYsuBsZqj1jNQrSsIOj3F3RQD58d4qyezPJfkM89McuLhIXL1HtV2\nheB0gOFjQey6LK2eA3t/P557O/9fla/AryP+4i/+gi984QsMDAw86FIAeOqpp3j55Zf/iyIjiizT\nqlRYuJsjna6jUqno9WS0Wg2lchun00a3lKeWK1DuijR8RlbiPbxeDzG7luytHGG3HYMsIEsS3VYL\nk8eDRaNBo4FEPE8xXcN38ACHh0V2X/422laRvqAf0WZgZqHESu5txn7rKTqNNn3WFvVKAf9gBK3L\nSi5dZa6iYiLcTz0Txzs2jDk6RDyjkE2X0Tp0kK2wvVLm2d85wUayR2EvSeTAMNffWaZcaGEyasns\n5HEf/z26ipZ/+up7LC9l8cbCWLa2ifWbic+tk9txo6rm0clton0WMk4t5fgajoEoU1GBiUCX5cs3\n8GmatNNtMlmZVmWWgCQQHZpmbU/5MdHwR4kfJKJLksLaWoH+fhsDAw7m5zPcupVEUfYnIRcXsxw8\n6MNk0pJOpeg2G4SOD5Jpq8mle+jp8f7NPMW6Fhk1L722zdiYm1SqxuCgA0mSefPNOKJBTbdmx+m1\n0egJlPM1gsUaR44G2Es3yefr3Ly8uU/Qdndo5PNoNCoKqQLJoopjFw6gqNSs3l6nUawTC/mR6nW+\n/fUZPvmZU9ycSZFKN9mZzSAVkpx9/JPoqruEhvtpZNOYjBYQtJTi66g0ApJaT3xpl835HdwjIyiG\nPu4slPjEs8cJ74HbYyLgM9HMZfG6FDR2J067lfzcHIosU0ulGJw+gMpoIZ/MoxUNRKcPEjv6w4km\nz/g45kDgvv+Lcs9LBcASCmF0u38la/uL4uNBiX4KdDrN/dacyaRlYsLzS08l+HxmRoftvDK3ye5O\niVDIztmzfWzNbWAyqJC6++y7Wm5SKLXwqfeFWVqTCaPLhdYXxpKro3N7qGYL5FYKvPLWBodPxQj5\n9FRKdrq9Hla7GbNVJJWukUo1GT4wzPaVIjsbKfocPZqlAs1aA2ltkchQFMsBDwvf+DrpbGp/LPHI\nUfQeL7ff3+LCsJWyT6CyEydRtjD10GE0chtPnwe9U8LX7GKxipiMAlWnmvlUh3dulGi0DJTyFe7e\nzVA7P8jRoyGe+w+v4POZcDhNOGxaKrkCtXwFh09DpSzRaPawGE0Ighq7w8BAzIHd2OUzn51gKyOB\noOPwyX6iUSdazT75EPR6Stvb5N59l06thjUcxjM+fr8F+F8i0uk0f/u3f8vs7OyDLuU+nnzySb70\npS896DL+2dAql9m7cYNes8nKfIdCroMtEkHQ6+l0JA4f9lNM5amms7QMLgSNxPV3N9jerTEy7qOb\nrXHs0DA2qYAgOJEliUYut5+4anOxtJTnrbc2oVXh9AEjS5euUt3Yo52vopK6yArU2gI2jYzT0OG1\nbz0P3TZaeowczuEKnmel0ELbq7GtNxOdOokgmukoAt1cHp3ZQv+RCRLlNmq1hrraxo1//BrlYp2D\n/+5POPelT7Jy7S4Wh42RU1PUsHBzJo0xOkLIGKbT6pDcSGETfZw6FSbgVBOOOTFLZXbfeZ3jQ2NY\nRkZB6lKOr1NYylLY3MJgMaG0WmgdFszRMDqVhLGbZSgSZCvZuRcU+qu7t2VZoVRq0Wr12Nj4oRmd\nWq0iHLaxs1NhasqHSpGJTg2i0ah49du3CUQ9rNxZYW+vjdtnIR4vUomXGB52MDHhodXqcft2BllW\nOD0dIJ0qsbCwziOPjzBzeZ25a6u4/HZOnQyxG0/RHzSQ3ExyvbLL0797hhdfjNPsQjTkIjbgQJ3f\nxBiW6AVEhqxFtq/HOTzdx/s38ghSC005Sa1UJbmcIDwW5aGxCJtvvI5GpdBpNBj/7c9h9nhol4uI\nvgDr81kcYT96hxNF5cA1oMeoh0BzEXdBhappptdQmL1TwNw5QOBoDOUHpn6yTHV9iYPnTqP1hlFp\nNNgdxp/YtIt2O6Ldjt5iIb+8TKtUwhoK4RoZ+chT2X9ZfKzJyGOPxdjbqyAIasLhfYHVL4tarUNm\nt0DACeFAAKndoZwv4/OZKRQa+CNeytkidpuBbq0Gdg3B6Wn0Ntu+Ct/iYGk9w9Jrd9hbjBMZ9PDU\n504xc3ULbS3LJx7fT1yU2y3e+P4djk37qWdztA9EOPiIRHd3nT6nhOA4SiVbYOPKDNNf/iOW3ngb\n0eXBNzyARq+nUShRXV/lzJkhlPQSnUoZum06+Rb5JeibGKRTqTH/t/+RejqFa3AQczBM+Nx5Eokq\niixTyNeoFurY7CK5ZIFauU52cxerETpdhao2QNvnQNDm0ZpMOJ1d0ks5wiODtEU3/n4v02cGkUpb\nvPHKVdKFLqcfjpHRFSmXJtDpdTgcBsyaErvvvrt/vYBmPk+rVGLg4sUPHbr0m4Z//+//PV/84hcJ\nf4ich38uHD58mFKpxObm5m+cNueDkJqZobi+jtZkwuOwsX53G7VWiyMapVxuE/AbmR7yUGv7iG9X\nef+tFVSdHl5NkepaHr1VwHnxDOqdGqWtLao7O1hCISo7O3RKPXZ2KsTjJfr77WzdXULudBg5eYj0\nzWtIXYmNu3EET4zokUlSs3dJLa0jGnU4xS71lEjz2g18vjF2L99lYU1FczxCLDiEo1fC2xAYfuQs\nNSwIy7vQUSHotSiKTKtQZOnGKlsphYOf+QSekJfCXpbV2U0qJYXETp3VlRI6TY9g0Irda+fiaTem\ndpqFr38L8fzDAKilNrW5a9SKVVJ7JfQWC71GjWa7SSmZwRvx0TWK2AcGmPuHf8B58ChjJ84SGB34\nlSb0qtUqbDYDiqL8hI+WzaYnGDQzPR3g0LiV/PYeL3z9yr4IWaNBpdGwm6xhsYskEpV7br27/Omf\nnqZabZNIVIhG7eh0MHttnVymht+t5eQxD/qH+7AHXOhFDXffm0dqdxiZDFGuSmSyTcbGXaRLCgvz\nKYb8Cje/+RoD42H6AwbKS2vY1WqGLkwizSusv/0e1d1dXLEoks9Kp9kmtbBF6s5trMEAtlCI3NIC\n3oOToFah9vgwF/V0NQaqion3XrlDMGyhFGwScatZfvs67VqdA5+8iCZgo5TK0q770ZrN95+7gihi\ndjux+PZHqmVZIZ2uUat1EEUtXu/+JhPA4vf/zJiEB4mP9Rvjg8Luflns7VWJbxTILm6h3Gs57ug0\nPPmFs+Trai783qdo7qwjt9o4g04csSHmb26QWakQ7A9SXS+h1unRatu02z0217KcPTeIz23gjbc2\naMsaBLlLIVcFrDRKVYprqyTtRjpaHSdPTaOsXCNx6y5atUzfkUl8QSdFiwrR7ELSWkglK3RaGizF\nAlpNio25LXxHjrJzYwZadQxGHeHTp0jdvkV2fh73yDC7165hGShgO/kIo+MekjtF1Go1q9UmBp0K\nj1OPwaDBNjhEciuDyajDrNOxttvli//ys+RWVgmYWgxOT6D1RSiUu3zuv7mIoVfhe9+4SaenIhxy\noJa67KUarC0ssJXqMDHhxa2vMxl0wY+Y4LRrdRIbGSSNAaNx/0b4ZY/Wft3QbDb5m7/5G959990H\nXcqPQa1W88QTT/Dyyy/z5S9/+UGX8ytFu1KhmkwC0K3X6fO52R3ykUkXkMJhzBYDQzEHQmKGQlVP\ncrNCo9amvr1OvVzD6HRg1xmoJ/cwSRKppXXQm/DFxtndyiL4Texu52k2u6hUUMiUuHvpDmefmGT8\n/ENs3JrHoVfwnj4NjgDp61ew2gxotFr0VuO+zXy5iH1QzYZaJFdoU5wrYfYPc+7UEOIxB+tbNap7\nOaiXoF5AszfP5IXjbDlW6PNoGBgLE0/1KK/k8LsEJg+Fqc4UqM8kGBlx0mz2yKaKlJM6XvnabYIu\nFeFIGK3RiH1oCGd/P5vvvossOmiWa+gNOvrOPERyfhHTsB+9z4rDLtIuFXF7jDitKszVOEHfxEe+\nXhqNCknaH9MeGLATDFrui5RnZpL3SYlKBaOjbiIRO4piY2czCxoBlQyNnoA/bMe+WkOr1aDRqNBq\ntZw8GebNNzcZGLBjsehZWcmhUdnRm0Sq5TSpnSLxTArfYB8NtQlJqiO4AthNWjo6PbaAlWpLTb4m\nIMldut0e64tJwsN9BMNOqqu3qe0kqBb3RaDBh54hJQqY+1w4Qk6iYxcwCRLt629jMFtQqQUyCwu0\nq1XO/NmfohYECjspgmMDVAQ3t5eqREZDRFwy7e1VrDYjXruack+hl9ri0IVzrCylURuMhCcPUE/v\n+2bZotH7BEOWFWZmkszPZ2m1emi1akZGXBw/HvzQU2cPCh/v6j5CNBpddGYLerudVqGIzqDFHXQi\naAUe/eQEmdl5qoUMOtpozQO8c7NIfF0iv1VhNSkTGgwTGQ4gGAy4jV2kdptOqcDQ8THUXzyPwWqn\nurdHKV/l9LQXp0lm164QsnU4dCRG/eqLrL/xHhabCcvQAAPHD1DbjqOR2qQWd2ipRPaSNbpocR8+\nxmDIw2t3ltG5A4xcuEi4z4au10Ct1dIqluHebsB74hRJbYzv/tMs2v4esiDy1KciWBwidpOaU8f9\nzL51i8GYnZ3NPMuLGY4/epCHPjlJSjay0lUwigpOk5VhTRWLagcUhWouT3/IhMFkwOY0oXE6uDKb\npqay0u4Z99upzTxG2cCw00KnWkVndxAviey+uIpKNKPTaRgZcXL8eAit9jdfT/L3f//3nDx5kpGR\nkQddyk/gySef5Fvf+tZvPBlRCcKPed7IuR3OHPLR0kexRAdwuU143CLxjB5SJTx2LV6njrszZbRG\nEbVazeDkAGKvjMrsQApOoHL4ubahxqAkCTt28dv6KSb2SFoFjvX3oVXfoLixiTw6TejMw0z1BUl0\nXKTTJSStiaNnR+l2epR3d5AqeaYunGKpAT1BZORQANFqxRvykG8bWFrJsrSYpYfA8YtncWRusfL1\nv2P6v/vvcXYsZO/OkszexGAxIfYN8tKruzz9pTOMDDtZWnCg0WpJJyvEghqMcoX4whZb3Raf+u1J\n2s02ssWLMRjCPTJCYjOPymSjo2h4+ZvvcfDRE7iiMYqZMka3CXn1OtZQCJ1eS7dapV0uI3zE5nmP\nPx6jVGpjsegIBi33J3QOHPCgKArxeAmAgQE7Bw7se5moVCoGDkQJbjRoN1rU2mq6XXjqqUHK5TZn\nzuyPLFerbW7eTDI87OTcuQhmsxa1SuGhR8dQqdWY9AqhqX5kq5fRUQ/f//4qq2slGpU6R07FcPjU\nGKxmVm/EQaXiqU8dIhw0kl2Lk7n1Htpchu21JM1ag2qty9mzFzj4xBle/MZlLj8/T/TAAM/87jRi\n7yBqlUJuZZVOMY9KUSht79vthw8dIBg9ypUrSUKBDNE+K9b2Hqvff4OR6XFkWaZTyqGRImhFPUMH\no9R3E3SLOQw2G97JyR/rdGQydebmMjRqbXqtJl1BYHFBxusQCHp1iA7Hx1bb92tFRhRFIZutU612\nEEXhXvvpw11Yj8eEwajDOTiI7CthtYusbda5MV/FulEmYuvRF3WDSsXyRpG1Wym6jgiCL0K1I3H9\ndo6TJ0O0eyqsRhXF1B7Rx85y68oGpaaGSrcJvTbP/N5D9NZnmH/lCgd8bqantKhrG6gdVuw+J9V0\nlpHhIbJz85h9PkInT7D33AvEb93EOzVJUzbQlAQy+TZnHptkfa3A3toOcrVAtVjBdegoaq323vFM\niLKxn7tvrOI6fZHEegFZo8dZtPHF3z2CVWhS2Uty+e1ZzH4/R44FOfvICM6wn2yxy//xF2/x+OMx\n/HYV737nfarDIgd9NbStOn2Dg9RcUNpaZm9dxn3iYZqlHtqgD7m8v10xOBwk0znGAnqoVmlqHcwv\n7GEdGEbDfiT74mKOUMhKf/9Hp8D/OEJRFP7qr/6Kv/zLv3zQpXwgnnjiCf74j/+YXq+H8Bt8hKYz\nGnEND5O8dQtFllEkiV4hTeyhYdyj+y/S1O3bdBsN7GKXQk3m/MNhRPkA9a6asXEvh8ctFBbm6TpN\nbNVM7K7lqGRWKO/u8MX/+iwWd5lPfWqU2cUiPUuAZ/7bp1GVUhQ6IoWWjtuZOianwIGpAObRp0i8\nc4mNS+8RcBkx2yyE+u10uy5qtSGuX45jtnTZ3i4RHPBilGvsvn2J8NQ4mwsNzAETKp2R2uYaQiNL\np2NEKzepbu6iU/UQtUZe/Pp7XHj6CANBHUOHhqiUAiRuz3Pz0hwauUu7JVPXuqgVS7jPfRLcNjyy\nzNr6JVSimXypTn43R77Y4ebaCigyK/Myj54doXvv8arR6dD8Am6dHxaRiJ0P8twSRS3Hj4eYnNxf\ns25XolBoYjL1sNtFgkErR0/0s7ycw9DoUii0OHjQx61be0xPB1layrG8XOSxx2IkEhVu3UqSTNaw\nGeHxxwZw2saZuZFgYDKM0WqiXm8zNuZGp1OjVqvp77eh0ah47/1dJg8G+Po35lhdK3HiRIiLZ/1Y\nMxZmb6TY28ygNxpwBPU0K2VSHTO+fj8mUYNO3eTGa7d49OEjZL7+ddSKjN5mJXbxUaRGk1J8k/jr\nrxP91330WRto4nGSN1o0DT124xmsNpHQ5DjVXAnbyDiy1kRlaQaxz4vS7dAul2kWiww+8QTGe1lM\nlUqbUipHaWuLXrOJ3mqlW6+zJgzQNlUw2O2ETpz4yFxTP0r82jyVFGU/W+bu3cz99tPgoIOTJ8Mf\nat49EDBz6JCfxcUs5qiba9d20ZqsaPR6thaW2G7XefzxGMZeiUqlTbfZRDL3mJnJARCN2qjVungt\nMo22jkO/cwG1aKLZzEGrh8/jIbvbYO7aCr56gqOPHUPvCWHpH6C0nUAMCbhOnMdr0GMKeqhsblC6\nehmDy0v4+DHCJ05g8Aa4enmTt1+4wfFPWzl94RwrW6+RWNvCFfZy4l88QXy3TeDYGbZSPXRRP+Wy\nlcAjQ6xtVJEV0Bg0xONlTp6KMDzi4M3NHIWawubVJbxDbcqSiNbdZGDITTZbZ+bmLp8678QgSJTV\nTjZbBjJrWR6OWbD4vTSyGVzRQVyjY0QMCpWWio5q/wxXqxOJDjvRmfNI7TZVwYToD6PR6e5fd0lS\nKBZb9Pf/qr4ZHw/cuHGDVqvFo48++qBL+UB4vV7C4TC3b99menr6QZfzC0GWFXo96UO3mT2Tk2gM\nBgpra6g1GpzDw/czppqFApn5ebr1OnqbjckxB7LBgrllR6vXYhC6SNkEisFEqa5iJ54D0UwtlwcF\nRF+YeqPL2JCVRx+LYXbYSW27aDc73LqT5+adPUqlEsX8Kg89Ns7nnu4nduoojoAPtUomu7RM4up1\n2rFzrC/uompWSGcyeAYjvPL9ZX7v94+g2MOkigoqqwz+Yc78j/+WbL6NYBvGU8jQfPMN5PQeWo+J\ng0cf4p+eu4No0CCrBHK5GqtLGQpbFaxeJ+VMAcFkoNPuYQ55oddi5U6ekfEhrINJ1OIuPUOL81+e\nIlWC6nYKW8CH3CnT0drYWNxg8mCA4PGxB+JFYTAI3L2bYW4uQ7PZRRS1TE56mZryMj0dxOkUWVrK\nAvueJF6vmdXVArGYg0cfHWBtrUAmU6fblSmXGiSXU5T20jx0NszZsxEiUSevXdrj/WtpcrkGweC+\nf0ouJ9Bs9tBp1bz00iqtZg+VCm7c2MEoqvns1CjXnvseE489TOzxi+itNnQ2Bxt//y5avQ6jDgxm\nK6LXQ0U28eSf/zmdahVZltAaDJR3dykn05gDQUIhG916k9vvzNOotTj25ElO/e5nyC8tobG5OfWV\nr6By+BBNAvWOBkH8od9Wp1qlnk7fJyN6QaaZzyHLoBVFGrkc5c04urNBOtUq7VIJQaej//z5j12u\n2K8NGcnlGszN7RMR2B8JW1kpEA7bPpQJkkaj5ujRANGone3tEpubJURRu/9vWi3VbJNsoYNDaeJ2\nOTFYjOwUW4CCSqXCatVz6piH3GaDnkki4u/y7jt3ya1sg1qF1qDF6vfRkWuMXDjL8luX2XpjDtFq\nZuDMKcotI5mVDFvzG3zp336BzMYO6kaBVrlK/LXrOKJ9hJ/4NLffmccUDOLvc3F9ucVszsqZZ55G\n53Bw6UaVgQE7t/MupIFj1M0qXG4n828n0GjVFIsdeg01EbuK+EaRdquDb7APx8QU04MuYgN2JElG\nLZrQWmwIgorcXoFCuoBOFCnk66iqdfbiRbJ/9z5PPn2MgSMXuHotyc5SBdFiJp/M43Rb2dnMYzCL\njI/HIBxm6ISeTElhMRNHrVahUqnodCRUqv2R7N90fPWrX+UP/uAPPnY3+I/i/PnzXLp06deKjGxt\nlVhczFGttvH7zRw44MXpFH/mZwSdDu/EBN6Jn9Q4dBsNeq0WAL1mk25iC43BQGg4TLtSoVuvY/D5\ncJ0cJXUlw8HzR5BUAkOjfgwWI/M7EF8vES3qCBTUCIY6boeOrizw7e/fQqMGUa/FHXBS3kuzeLmO\nEr/J+pXbDJ85hiviRynu0Wu1yGbqGJxeRLGD1iCiEhok0zXUei1SJc/auxtMxfRsNRXSNZGXn3sX\noyBz+NAU5bl1NPFdRg51OHA0RnFtlbMPnaDS1lJvynRbbYyqDr6BMD67CqmYQjYcJXn5XbwTE3zv\nP19nN61hbGiK8EEn12ayyFRQ22Vk0cbE+cP4+tVorTbcU4P4Dv7wWjbyecrb23TqdSx+P9ZIBOFH\nNiAfJVKpGjMzSdr3csNqtQ4zM0m8XiOg4sqVHer1LtVqh/ffT+DzmZEkiffeS/CD07ofxIiYzTqk\nmojBZqFvrJ/562tIjRpDse97AAAgAElEQVTVGpSKTTptiTt3MrTbPf7wD49iNsmIRoHr15MoQMBv\nBkUhk66ie6KfT/wv/zM3Zsu8OZMhNTfDuWfOUsWCejdOr14hcvoMqYVVDg4eJDkzQ3V3B2tfBGsk\nQt9DD2MdnSIzO0d+fYvI8ABT01GK6RJUc1TVVnSDU+gmppnfbpOcT3PxtIdSrvKB16mez9MqV6jk\nJIxOF3NLJZxuC0ZVnQOnD+CyKGix0czn2X7/fTQGA9ZQ6H6o3scBvzZkpFpt02z2fux3sqyQyzV+\nLhmRJJl4vMTaWuFepoLxx7opRo+HXq1CvSGxu7SDNSgxfGSY/N0GKlUNu11PKGSlnk5jM6nRDwbR\niRp06i5bly9jDQZo1ttogjEmTh2guLfO3etx1EqXWrFGsXqF2BOPM/rMswxfSNKTu5hiE5DbhF4b\nm62O2edD1hkxev0cfeI0keEQ3/l3L2G0Wbhyq0ilnKJdLWP4win+7/90hwNDJk4esuK3Sli8bqh3\n6RsygCzTP+hCliU0GhVGk5ZnPz+FupoluTxHrdZEJ6ixDo1hFkWsMQdOoc7C7A6xkMzu2g7WSBSd\nycROXiG7lKRSqLNz7Ro2m8jUhYfQO13UilV8gz4SiTKJRJnHHosRCBqZmPCwuVmi15Pxek04HAYC\ngV+dAv/jgFarxTe+8Q1mZmYedCk/E+fPn+drX/saf/Znf/agS/lQSKWqvP321v37vlhsUSq1ePzx\nwQ+dU1UqNUkkKtRqnX33UKuI0e2mJotkqzJavQa72MPi9xN56CF6jQaC0UhXUuHa1TCzUGEvUWRq\nyk+u0kORu4yOeYhv13jvWoa+qJtMusYf/esjfPozo/TqDdq1OnK3Q3onj6KyYO6PMSHq6bWa2AaG\n2FM7CI/38+TnnTQaEiqli9yT0Io6VL0OWrrsrW8yODmAQd3jrf98mdjhEQJhB/OX5zFadPQfO4zN\n5yKzss6Z08fpFPPMLhR47/Ief/hfjeFSdGQSFQZjdsI+HT1dPxvz6+TffhGLUcBqDbO+nSddhmKn\nzt3ZFIOjPlLtLoaKDkeqy8q2xOHDQ3RE132dQSOfZ+P112mX9rUc+aUl3BMT9J0+/Ssh4oVC8z4R\nAXA4DAiCmp2dyv7GSn3PL8OiJ5vd37CePdtHsynRaknIskwu12B7u4xWq8bqcqDRa5m5mSC5mmJ4\n6gSGdInTZ/q4eTNJMGRhbMxNNGqn3e4RDln59rcWCQattKo1qjUJq0VLQ9KR6jp492YcVaVIJlVh\ney2NJ+yirlcoL9xGarfQWcwY5Bo3Xr5CqN9De3mFSjJFu1rD5PMTnj6C4AlCq4rVYSa9sIxi7Ucy\ne3BE++mZvBhsFaw9AYPNxujRARLrKSRFhc1pwRv2oADx118nLzt47YVZ3EEnFx8foVBoMDQcI6JJ\nsv3K9+k1mxjsdnyHD1NLpcgvLxO9cAH7x6Rt/WtDRgwGLVqt+r5JDuwrrG22n7/rXlnJc/nyzv3o\n6R8lNRaLDo8nQMFmxuwWsDkMaHRazOEo1kCLsTEX5XKLRqNDvqHj6gu3sTtFzKKawdF+xp98hFq5\nTqXUJCT2ODhmYeW7CerNLlargUyuRWl3B8Ngkt2CmoGIF4e0jTvWh2ksiNSTGHzaQbPWRIwF+MJX\nPot3IIwvaORf/v4xJLWBtXiZhZkt7t7IojPoabd6dCQZv0ekk0nyW589x3P/cId3Lq1htejZSRQ4\ncTqKyWCnsBFnNAC3vvcihc1dtGYzlkPHWL98i5PPPMGt+TLHL4xjoMnM996i3uhi9PgwxWKoDTqq\n9RrVZJJ2pUayo6JwdQt/uIq+V6PV02ByOwFIJquIBjWFQpPt7TLlchuv18j4+ND9DtRvKp5//nmO\nHDlC5BcInHoQOHfuHF/+8peRJAnNx1TE9qNIJCo/sQHJZOrkco0PNWVXLDZ54404+XwTgPn5LMeP\nB5FdI7z2zfep5quo1Cr6xqN86pgPQadD0O1bpKdSNeLxIoJej9Fuwep2sLwSJ+RWoxYMvP/uJkaL\nEZPFRCBoIblT4b1XF7j+zhKBkI1nPz9FLKzj6HSIpW/fpLGzhTs2QLnYJFtV0Zjf4+7tKgvLRTw+\nM4cmXTzz2XGa5TKUUgxFDjM+6mLx/Vs0Gj121pIMHDrMXqqOrDcz/TvnKCzMou01cLr0MHqKO68l\nOXTIx61/eoXFmTgaQU36joaQ38DQ2RP4nDpkt5W5l95Ac/AikWgIlcHE3l6FZ549QDTq4uqVbXZT\nDa5e3cXlEqnXOxw/HmJvr0qr1aO3vUSzUEB9r+2gyDLF9XVcw8M/NSyvWSySX16mureH6HLhHhvD\n7PN9qO+AwSCgUoGi7Afz7exUmZ9PE4s5aLV6DA46MZu1pFI1Mpk6arUKq9VALGYnn28yMuKi25Vo\ntXrU613KpS6nz/hYv7tJKl0nX+qhFQTarTanT4eYmvLx3nsJbt7cQ6fTkExW+f1/dZS11QJrq1lk\nZOwOEcVgYS++RHY7RcCjI3ZwkOzGNqOH1Bx+dIzKpBeX345WapK99gaSokYjipgdZvZu3CS3vEzw\n6FEiD5+jrm+zFy8x/tlPM/7EOW7dyrC6XqSWLTDcdTJ1MMDZc3qkQpKyOUxmJY/UbNDVWggOTFLd\n20Ot0bCyVECSVazfWMSRqWHzudheqqNqze/75GQyKLKMNRLB4vcjtdvklpb+fzLyi8LrNTE05GR5\nOY8s7wdcBYOWn/lQ2jfRaTI7m75PRGD/ITU46MBgEMjlGty4sUe3K3FjpoPbbWR42EH5ZgpR1NFs\ndrlxI8nkpJcbdwrIag3lTJG2RkLXLvG5f/MJ0ts5CqkCJr1MwCazJghgcoCoxerU4e03Eh1w0bO7\n6BX3yGzNs3H5BqHBIKWmisETh9F6w2yVjaAXWVivsrldI2Dt0aqksIt6Dp8exul3otNrGJ3wMjLu\noaXSUa3ukrmzw8p8AjpttlfzpHZNWK0iZ0/68Q1Yib/1FvlUkW67S3JtgdxOlvDFT9ApFzk85cOk\n1LDbjcgGC56oH9fwML4+L+E+M7l8i+qWCkm0kcs18Hi7tBttkvE9+uwhRKeC166mvbnIjfcS1Otw\nbGSUbNdLtdphcTFHNOr4SHIsPq745je/+WthKub3+/H7/czOznLkyJEHXc7PxQe5fsqygvL/NqH4\nKYjHS5TL7fvpsrBPmjOZNsa+GDpXA5UgIFusrMVrBPtcpNM1rlxJkE43qBRrjPQbUDVLGHQgtRo0\nWibK8QLZZAFzo8PQRBC/38zltxZxWDUMjwdAVrh7N8u/+aODJC69QTGxg8tpptVsk33/KgOPXOB7\nL26gdQUYn/CgM2ipNBSMFpFGqcqBAT3JxW023l6k3dGhVilIikKxrrCb7uDtt5BaXIPdLZxHDtFU\nGUmsV9jaLHJ80sxGIo1R06XR6NCUBEwxM2ahQ9s7Rn1Uw8iomw4GvvNmHo2hg2gUKOQbRDxqRgIy\n9ZKE07k/mj8+7mZ5OUcqVUMUtRiSW/QyZSIR2/2RfanTQWq3P3ANOo0G2++8Qy2VAqCRy1FLpxl8\n/PEPtYZ+/34OTibTQJIUbt3aw2LRYTLp6HRk3n8/waFDPprNLpGIDYtFh8Gwn8heKLRIJEr4/RbU\nag3FYpMLF/qxWvUkdmr4hgy0Oz3OnAnznedX0Ok0fPWrtzGZdBgMAul0nZERJwNRB+NjTtrNFgfG\nIthEhXyqhEGvQdDpcPhctLeWsXlNqHUCfX4DjrAVwWxkd24Hnc9D33ANq8/L7b/7O9rNNrZgAHnm\nDt2uhOX0J7h6dQ+pVODQI4dZWEjTrZQJHxihtbvJjY05vE8N0Wp32d2rYpk6Sast0TNbiKcV+o0a\n1IAky6hUakweD5Ik01Xr6RTyOA+Noq7lUKlUiE4n9XQa6Z6Gqtto3AuMfPDHy782b4gfpDSGw1Zy\nuQZWq55w2IrJ9MFnlYXCD0iGzNzcvvteKGRBEDR0uzKFQpPHHovx6qvrRCI2EokK1WqNzc0yrVaP\nU6fCvPjiGuVyi/FxD9NHPHz32xl8g0Mo9TITMQONpVvM/cPX6RSzGENhuu4Qid0GsckYrXqLRqNL\nqqegNtvItETIN3DKbQo18AzHyKeyVKptksvr9I1Ms7na4O7dLQ6OmKnevcpip4rHZwXRjOXACY5M\n9zHz9jyPX4zh8lq4+85dDg65WEgWqe3sIFhs+P1WOl2JeqmC0awjPZvm8mt32V7YwOq2E47FUNcL\nuPUN9J0S66/dIfz0cQ5MuunwKJWGwtCoDaW0hl/xMzzsplKIkUiUUAsdHGY1sZiDVHyPbElGtDdQ\nNpZodLMkk1XUKhWF7V2Gnnicel1Lvd6l0ej+xpKRRqPBK6+8wl//9V8/6FI+FH6gG/m4kxFFUQiF\nrCwu5n4sIM3lMv5czYii7B/fNptdwmErs7NpdncrWK16zpzpo1Jp74sxf0SQmcs1kCSZS5e2ePPN\nOBajivmbcfrCZoJuHeXNOAcP+dndrRMIWjGZ9KiR6QuZqNXadBstWqkdxmIeuooGldTFpG5SLGWg\nuEdyu4reE8ASiODoC5Jq59i8tIZgEDlyIkI1l2N1Totb7NEzWKimM7RaEn0DNlK5Nr7xEZKKlTNP\nHeH4pAX17gLWiXFWltJc+vPnOfsHz3LsyCCVQhmrTc/2bAqLw0xswIla6ZHYqbB14yrbd5eJ33Zz\n/HNPMDEV4PnvrXHiiAdtapHFlQyqXheXWseppx4h0zbj85lZWckjilpUqh5OX4CVO4s47Absjv11\n0JnN6G22D1yLRjZLPZP5sd+1SyWqe3sf6ntgMuk4dy5KIlHmzp00kYgNt9uEXi/c65TsG1dubpbI\nZBqIokC3KxGLOVhd7SDLAn/zNzcJhSy4XSJbaoX1jRKlYpNStkyu2GZi0s/5C1F6PYWdnSprawUW\nF3O4XEbUKNgtAs/+VoiI3CY5+zqlpoy9HCEyNsmRM0PUciWixyZ5/+1VHK0q/f4tVpZm8DoEOrkM\n3slJDEcPk711E3pd9AYBs8dFbS+Fye0hpJeYPn+AuZfeYua9BRRUaLo1crO3EUwmurUaxWNuaiuz\n9B15lJde3aRWbVFpyPhjYf7VvxiD9DZDQ34Sa2naXXAEfdj7+3GpbTjsRQxTUxSMxn1L+E4HjV5/\nv0v1YYhIZWeHwsYGUruNvb8fWzT6keuEHvQb4n8FjgG3gK/8vP+s0wkMDDgYGPjZGpFeT+bq1V0K\nhSZWq55g0MLlyzvo9QJ+vxlBUDMw4CCZrLG1VUatVuF2i6TTNer1DtlsA0HYTz1EUTgYbKLObdPa\nmKdSczB+ehJR3iOTyeKwaKjlChiGD9Gy9XN1tsypc0c44vawPbeO4pIQQ1HuLGSp1ns8Mm3B5LBw\n4/UN+gbD9I2YaauMrKyXSKV6qBQFQ2Wb2ZkF/H0elubTOJ0GhjGwV5ji6ENj+HwizUqN5PI6NuMA\nff0RRg5GSO2V0erA77dy8kSQRq1NudjYN8bxOskli5gsIv02HQaTSK9SopfdYf3tLpMXTzE9IaJ1\n+sjOzSPbnCyvFNDZ7Fx4fJyBqI1uvUHIL6KoBWqc5PZilaMmiepyioYgI0kK9VYXjUZNdTuO2T+J\nKAqYTL+5xzQvvfQS09PTuD8m+Q4/D+fPn+e5557jK1/5ubfbPzsURaEYj5NfXkZqt3GOjnJi2sfS\nSolms4vDIXL0qP+nbkB+8DdmZ9P3u6FvvBFHkpT7mgKXS8Rs1tHr/Xh3xeczkc83WFrKosgyNpOO\n4w+PkM3UGDgYorqxgkFdZOxsgPReif/hf7rInZsJkmu7nLg4hbrToCqXScyvU0wXMZhFVH80SSWd\nRW41QS0gtdvkl5YZ6TZp5POopA6Rfh+ZZBnUGuxOK35LHcvkJBNHByjH16llsoyGR6hr7XhUIjoh\nSCe/R9MUJdOEVreK2QCtnQ0C4TA1owOL8wi59U0sZgGXTSBXg36Hg603rtNDS7PVpVqXuXF1B0WW\nMbdTXHn+bSx2kQsXYiQ2UmRv38R79iKCoLqvz+l0JFo2N+HDB+i1c6BSobdY9p2qLZYPXA+51/uh\ndfmPQPqR8L2fB6tVz4ED+yO+tdoPPydJCkNDDur1LqVSG7NZh0aj4s6dNDqdwJNPDnH9+h4njgfJ\nbu7itxuYuRFHQYXHaSbg8WCziawtpjjzUJTvvLCOz2fmrbc2sZj16HVqOp0uY/168nN3ee1/+yqN\nQpnoWIiiRU0pX+HY1ClEa5AX/mkOfzTA9EE766++wPbtBU48eQKbTkN2aZHxz38BnUamsrmG1mim\nUSrTUwvUu2qKlQ6vvrXIiTOn0Bl0vPbyCp6pEOW7N6DVwep20ErtkN9MYIsUsdoMSIIR74AJm9dB\noa7CZzRibZYYnQqzvFFBbXVhNms5NT1M9vKbqDQazH4/vXabwLFj9JpNStvbaE0mDDYblmDwp17/\nciLB5ptv3hd+l7e28JXLhI4f/9Br+GHwIMnIUcAEnAP+d2Aa+NAxp5Ik02z2EEXhJxw+y+UWvZ5M\nudwmn28yOurCYBBYWysiivvufuvrBfR6gXK5TTJZpb/ffi/7BqamfDgcBiIRG0eGBFa/+Z+QO12G\n+2PcvTLHY48N0JhbxCyXEdVm3M9+kee/PU+9XcA+doD/8z9e47c/f5B6wEyxmue1F1Zp11v4Qk7y\nZRGdpMJsM1Ir12m1JR56dhrDyBjdy7s4TbB36xoen41Mukan00Nn0LKzvInNEmH2WoGRYSf+sAOd\nWuLlb17mk39gIxC2o1X1cLit2A0SB4eNXHn+XXRyC/9QGIPVjNntRtAohE4ewhoM8MZ/+BpWiwFR\nDKEStIRPn2Lv6lWaBhfvvBanVqmjSDJaucmFTx5h/ep7bC1p8Z17DHQi58+78Fo6rKdrdJotRkZc\npJLV/fXpdDGbdUxNeT/2zn//X/CP//iPPPvssw+6jA+Nc+fO8Sd/8icfm9bsj6K0ucnWpUvI3f0o\n9Ho2i+/gQX7rt47RavXuvWx+tptvNlvnzp00nY6ExaIjna7RaHSZnPQhigLttsT0tJt4vES9vu+i\n6nYbGR1102zuv9RMRg2zs0lWlvOgFshmGjx63EJETFO98yp62czomZNEAkN0FS2BiAu3x0Rh0Ekw\nusnsu3eZODqAIIpIZi/miJpSIoFSL+MeGKZbrXD0aIi1zSq7u1UK2RLTZ4dpN1os7+Yxzy5Ta8pU\n2hqsniADFjvajsTNd+6wfmcdbbOARpGwRGMEw05UBiNyt8f8O7ewhYKc/Mwhon1mNm/exREK4jV4\nefubbyHodfTaMirRgs5kYH0xia/fR2VrlXq5itLrIHU6RGNuenQIezRUZYFyuYXdvt8FSWU7hIYP\nMzm8r5vTW60YfkpXBEB0OtFZ9k0RfwCNXv+hNSM/ir4+G35/kVTqng26oGZkxM2tW8l71ucCjUYP\nnU5Do9aiW6lg76Xpi/Qo2A3UJAmjUYfFbkRQJPaSFWbeX2X1jgmt1ODIVJh6W2FpKc/6Wh6NGi6e\nD2NoZWhWkhjoIOlU9Nod9jbS1NopHjl2DK3JSa9SoJguYpzSU8/msDisVOsSys46nWqFwLETOCcO\nEzqZIDlzG/vQICaNEWtflJ1UB4vdzLeeu8PTnxpDQOL21XXcdi8Bu8zAoI3a0jt4Dx3l2nyZF747\nD4IeYzDC2UdM6IwiNleU7NVZxqI+Dj08wV5ZoFbr0FCZ6H/4YfLLy/TabSLnz9MulUjPzqIzmahs\nb9PM54k9/jjmn2Jol19evk9EYF8nVFhbwzUy8jPX/hfFg3xLnAReuffza8BpPiQZSSar3LmTplRq\nYbHomJryEYn88KKk03Vefz1OLtcA4O7dNE8+OcjTTw8zOupiZSVPtdqh15N5+OEI83MZ8oUmgiBy\n8mT43lFQk2DQgo0UUn4PZAWfVc+Jr1zEqm+wXckhdOt02gZuvzpHKV1AsDgxWc3spPJ8/7vzeLwW\nXv3ePLHxIH1TIkadjKQSsMcGkZdzuGP/D3nvGSTJed55/jKzvPeuu6raezs9Mz2uB2YwMDQ4iNJK\nS1EnyqxOp927jY3buI8XQd6ni71QaLUKKbQX0koh7ylBADUACDMw423PtPddXd57m1X3oYEBQQAk\nQPA4xN4/oiO6Kyu73+g3K/N5n/dvPIzP+jFoRVrhNaZ6TOypjbSqXZQSKro7UC03UGuU2AMe6g49\ni3eiGLQCOrOB88/NIX17kTsX73DimWMsPDaMXq/ALJXpVEpMnhyhsLOF2tuL5oSBQqFGtdLA4rfy\nrf/0x7TrVbRKgWalyvZ+mZQ+jbrQ4P79FMVMHrnRILO5SSWVwu4y4XVY2VveRbmxTmD8NOVyg1xD\nydB0L/GtPVqtNoNDDixWLb1np3AM+X9gS/3zjHq9zre//W1+8zd/82EP5ROjq6sLvV7PxsbGT5xT\nbHp9/UEhAkCn8+CmZ7Z+sBvaarXJZqsIgoDNpn2gqigU6tRqh54QkiTgcumpVlvodAoCATM6nQqX\nS0/QpyZ+kEGhFPH67TgcOpLJEkajClFQc/tWlGqhhMVuwKRps3n5FoFTbjS+IEurVYqLMTxeI2aH\nhTf/ZZ+SrCGVa6HqGPjl/+PnSe+FiWRFqj0naJXLeGY6mOQUFqeFZCJDb8DI1HwvS/eS6Ixa9Hol\nq7e32bp2j+OTRiqhbWZ+7qfZjtRRaDQIQoPl6xuYjWqUopZGLkNxdwvNyFnG53qwDA6xfnGHoRkt\n9b1V7v/5X6JzOJFsGg4OZArFGqJKh95ioFLr4LCqGB7zoDSZMbTsmBwWPG49SpWCfK5ES63m7v0M\nSqOMxaKl0ZCxWjW49Q0MzRhCyYLC2vWxD6NGqUQtl0NSq+manyd+5w6NUgmFVotrYgKD1/uprw+T\nSc0jjwQJh4uk0xU0GgWiKHD/fgKDQYVaLVGttqhWmmze2yN0a5HwTpwjxwMYKhE8E5M8+WQ/giix\nux4hH6tQLRTpOuZjZyWEvduNICh49tlB4FCNMupXcO0v3uDUqQBqrRKtTonDbSYPqNVKUqky9WoF\njcvHaMCFwetC4Q4gNBrozAYUDSMCHRrZJOtvJrEE+pgYHaVRrqI0WdB2Bbny7SsM2n3UutTUK03G\nutrMTU/SEpRMTPuQVy+xdjWN7qiddLGO0WpCVKmw+sxEIgVa5SI79+6wsZqieTeMWrdC75NPUkJB\nLFZm6Gw/tv5+Op0OpViMjcXFD3SymuUyhVDoY4uRRrn8odfazean6m59EjzMYsQCbL/7fR4Y/yQn\n5fM13nprj1zukDBVKNTJ5+s8+WQ/Dsdh9sPubu5BMBAcqmdu344xN+fDZtORTh+g0SjwGhpUN1YY\naOWZHvLRc2wQd8BNLFZibS3N9LQbW6XC8NwIjVIRk9OGUU6x953buEcGePPGKmML/TSLIkqFhH1o\nEFlSE4lWaFRqHJsPUi2VuXt5ja7npmnls0yMD5NuKrHNP0qXOc/6tWVsZonoXpLxLzxJvqRBaekh\nsxmnWq7hcuqotwRUvn5eeeuAaqHM+o0VjhclfuqcjflRJWqLE60Qxi4XUbT0XHtpkVJLhdOqZOmt\nW9h0bTpI9J85Rt+JY+y/8iJKmhjsOvR6BaVSHUmhZXnxgAGPmVRi55DYxGHWh0KnI7UfY3CwzXC/\nEZWihH/MztZWjldf3cFj6qahKKChSVevm/75aVxT4//dh+VdvHiRsbExPD+hwVMfh1OnTnH58uWf\nuGLko25unXabtix/4LVstsrVq2ESiTKCAF1dJo4f92EwqNFolCgUIq1WG0kSCQQshEJ5LBbtg0JE\nTO2y9cYrZEMxlEqR2mA/ncceQx/s5+TJbm7ejGKymtBrBLq7LTQKeYq5MoIzwOv/cBm9WYcqWyae\nUbBYN7J6ZwdHwIt7dJi7lw+4UMlz5pF+/vnFLfI7mxikGrTbjI27GVPksPd3c/fFe8SSDd58eQPJ\nYEYptrEaBCSjGWevj51wiPBOnMUtgZoYY+GEm56gmVgohcasQ6xUaDcbdHcZcHUPYhkeIjjRx8bl\nO9xZ3kPV1Ue9WqYld5hfGEAWJEJr+8iSmkC/B4ppJqa7uLeawzw4QHcoRLtWotZok8m3sU8GWNqq\nkkymOXs2wMKCH0UpQeLGLQSFTDQhkVo5XHWbv0dFltvbI3z1KvViEVGhwDYwQO8TTyA3Gii1WlR6\n/ae+NprVKum1NbI7OwgqLRq1i/ubh8ecTh0HB8UH5OZ0PMfIEQtLl3cpl+rsbyZ4+sl+PF4Fm4UO\nF1/fJLST5Oh8kL4RL2a9SKEo87d/cx+T3QSCgE6nJBi0kiq06RvykM63cQa8NJIRlM0i3h4/iaaF\nzYMm4fg6x04PsbOb48I7WbyuIGtvXKVRazAx7MfZK9KqVUldvY7t6WfYeulNJKuL5H6MTDzD6Bef\n5ubFyxh0Ovr9faxlOkQjWaLxGrFIFqdCYPLnvkZFYUbZ2UejkJFpoVGJ2MxKhHoZ2m2alSqNhozT\n70JXCJELpaFmoRBRYvL5EAQBudmk024jiCIqgwFBFGmUy8jfvQj4HliCQcrx+Ade01itaH7ELq4P\nU983BcjAMjALqIBr33X8G7lcjitXrvDGG28Qi8UwGAyUSgIrK2nq9RSyXEGh0NFoyLRaGQShjsFg\n4t69OJ1OFo2mRaulQq9X4vXK+P1q3G7HoVQ3s8P+9Uuk1kLkknkyuTgaqcTA1BhOtwm1uoQk58he\nucTm898inoiQCu0RmJ4hen+Fdv8Qks+PXG3gm5pCcFnQ+900GgoymSp9faCQcwyM9mJ1Wxka09E/\nYsKo05HIygT7RGLrK9TTRSRRRHCZuHLrgNhuAdHipmvciWRQY3H5OPqlBV65EUVrlHC6XQiSRFuq\n4u7W0drdQNkoUGhWSWeyCDoPb1/coGfCwebly1BvIxnMuMY9tKmjU6nJ10Qkh5aGJOHr9uI+coxL\nb98lvrfHwMwEej8/gZ4AACAASURBVIsRWVFDa1bSKLcRRInucQcaRRU9Mn1nTxKLx9m+fp1hj5K+\nPgvGgAXLYA+Dp04SmBxif3+fXC6H2WymnEyyvrRENpPB8W57dnd3l1wuh+XdC3p3d5ff/u3f5hvf\n+MaP9SL8LPjd3/1dpqamOHv27MMeyqdCJBLh1q1bfPnLX37YQwHgm9/8Jt/4xjdot1oUDg4+cMzg\n9eKamHiQN9PpdLh0KfTAy6bVOiSjS5JIV5cJrVZBsVgnmz28MQ8M2LDZtLjderxeI5N9SnZe+heu\nv3yL7bUIyWSZVr2G1WHC7PMgNCr4nRJOrwWDWU9bbmG165HqRQKDXl7+67c5fX4CZSGCza5jc7cE\ncot2rUyn3UGlViLRwmQzsbGRYvjIEEpJoFkuUq22mfvyWTqZCI6hQfJViY3tAnqLkXIqQy5dZPJY\nPz22JomDFN2jvaitNgRBIBXJMn+6n53NBGajGv9IgKFjw0xOdxO+cYuV1y/RNTbIznYGuVzk4O4S\n/rkZTH0DNOMHjE14OPLEcTw+E1aLgrWDNn3jQZKxIvuRKifPTzI21UW+rmDokVNs5PTsh0p0OpBM\nVjj3aID23j2EWolWpUyzXKZVq9Fut7H29T2Yn0a5zO7Fi9SyWeh06MgylVQKrcWCJRj8gDvze/P+\ng9DpdDi4u0x4L0NJVhMLpbn4929idtlYXC/jdOoxm9U4nXq0WiXDAxYi6ztsrkRoNmRMRgXjEx5S\n+3FC+zm0Rh0KtYZ8rsaxYz7SiRy3F1O0RA2NWgOzWcfOTo6BfhuSSkWhAmIhTlefh2q1ic5mpWd+\njr6FU6TyHVRKkdEJLwfhEqFwGW+/j5kTQ7TqdXrmjzD21OOklhYR1FrKsSiJpSVMdgupvQj5ZB6d\nUYc92I2iWaSr10VZYWNpMYZSqcBkNdLRGhH0FgJ+I2uLu5itBrx9fuw2NYOjHnpMFTTtMjq7Dc9g\nAE2rxMGNW+RyVczKOo1kDJ3TidpoRBBFqpkMkkpFMRymkkxi8Hhwjo19bJdLZTIh12o0K5XDQs3p\nPHQM/wFbNJ1Oh2omQ6NUQlKrEUWRb37zmwDf/Kj3P8yl62Xg14G/Bc4Bf/S9b/jP//k/f+ikzc00\nAGr1BwmDLlcXPT2HbaauLhPZrBuDAbq6DmXAh5yQPoDDZMsdkXCiiCgJSJJAj9OEuVGnnExi9vvp\n6elh7623WFtcxD0xjmp7m2o2SyEcpf/Rs1x6Z5daQ8Zk0tGvVzA2O83ORhJdp8rcpI2hQSvr60k0\nkszRcQOPPDJEtaXg26/FKBVl8vEMqy+sUy+XOT4fYHasl1u3t0kXUsgGB+ubQFPHyGyQIYOLteX7\nFJIZdOoozm4HLpcTZUXGNztGuwO3/+EFXD1+IskGVn8Xjf0oyWv3EVUqZK+b8UkPyys5HH6Z0H4e\njcbF5MI4nn4fr3zrJtHVKCPHR0mma0SKKnazbqxGBfM/P0Xs1k0mgxpC//gvmE+epByJULhzH593\nhOTdq4Rf3Mc7Okjb6acY9AM+enp6kJtNDq5cIbOxQatep6PXk5AkXGNjH4qy/zxG21+4cIE/+ZM/\nedjD+NQ4deoUf/AHf/Cwh/Eh2AYGaJRKZLe3abda6J1OfMePfyDYq1CoE49/uG28t5djZsaNSqXg\n5En/A9WdxaLhiSd6kSQRpULg4MZN7l3dJLJzKDUtFypUynWmnlWw+/rrlLIFYpE8ppqCYxOj3FYo\nMKrqDEzOYbXrmTh/itGZIK/9p+dp6Ww0RD/htT0cXXY8Jg2aepP+US86o4KRARPVyB6SUom1tw+1\nyUit1iS9uorhqB+9tsEzz81y916CTkmDVq3l5KkgmTtX2NjM0AlUeftWlPPn+xE1Mmazkq/92hnW\ntsrsb6fQNNXcee0WYipFs1wivbuP68gczVwvp37+WeL3Frn5B3+ExaZH++RZdO4e1mMCkTgcRKuI\nmjDh9T1MJiVvv11mZCZAXmkkExLYD73v9Gm1alBJbYrFIvm9PUqxGHKziaRSIUgSrUcfRaXTAVDP\n52kUvscltNOhGIng/AhH3E+CeCjNyy9vElqPkMtVkNQaBoYGKYV2UakGuHhxD7fbwLPPDiGKAi/8\n0zKpWBGVSsRoUNMWFKgNepqilnY+jyBJ2F1Gbr6zya2bEoNDDgwO6Op1k4nn2N3JsLySYmDARrsN\nN25k+Y2fHqGcj+B/9BzVRpuSyskr/+0tWnonqWQFl88KHRmDUGb9VoSoUYWn/xgZnYt4ooRnagrW\ntlh75SL5dAmfJGG2aFFq1WilJha/neuru3SUWpKRDOmlu5isRuJREcfQINuxBt2SmblpJ5vRDpVG\nC6mWZsDTjdtrpKb2kHn7HQSjlfDly1j8frxBKyazlla1SmZzE5PPh9poxBwIsPinf0ollTqcv0YD\nS0/Px7qxqnQ6gmfPUkmnabdaaG02FOrv7+/VqFSI3LhBYX+ftiyjczjonp//vuc8zGLkNlAD3nz3\n+0/EF3E69Vgs6gfbNHBo8+t2v+/yOTbmIJerEY+XaLc72GxaZmc9D8hvZrOWbp+ebNBCrS5jMqpw\nOHUPgrXkVotaLkcpHj+sJLNZzIEA1r4+qpU65uNTmLfKxC/dpGHQM7UwQY+1iFLaQTPQRbWtYnl5\nk+zV24cEuflBVv7uPl3HjtDX182f/9ldxofMuPxOMvsN9rbjzJwYYGzUSaymQ9IIJBfjJGN5Bkc9\nVMtVVK0i1UwaSSdxkIrjc85iO+pk/1sv0nfuHDavC/fMDGqti7WlMM6pcXwTa5SyRRQaLTSqjI87\nsPT10WPuxWkW6HIoufhPV9lfDWG0GfFOTfKXf3YbtdmCwaThIFLCaFbz5V86R/LNCww89RR6t5v1\nF15A0Bqwu/PEMlFUSoHCwQGtqkR+9R7t+UFESaJwcEByeZnOu232RqlE7NYt9C4X+s+J+uTjsLe3\nRyqV4siRIw97KJ8a09PTbG9vk8/nMf8ICWifFQq1mu75eRwjI7RbLTQWy4cSRpVK8QNbsO9BrX6f\nyN5qtanVWlSrzXcdl7W4XAbqxSLVRgdZeN9IC0Bt0NEoVwitbVOTRUxGFRaTgKRJMvTVk5TLDcyd\nDOSTjE76aFSqZPJ1qqFNxp8bp1Zyk4mmERQqavvbDD7Vj6RVc/+tKOnNbQqRKK1Gg54jE4izcww8\n+zOs7Ld45XIElVrFxKSHricCZPfD6KiyX5aZ/eLjrMSViK0G3/mbN/m5X1nAZDdz+1aYrTubBAc9\nqDUi6ZSJvqPnCCpLGKwGhEKC9WiO5UyY7P372AaHMNuNrC3HGDKvcGJhjo2tPFJ7m06lSDSUIdSU\nMevBpGpg6fLR5dBzsF5Gozfg9FqZmHBh81jJKpUUwuEHn2e5XqfTbtOsVB4UI6JKhahU0m590KxO\nZfjhXZiXV1Psb0Rpt2SKhTqNZoVmq8OxWQdSRSSXq727AK1hMqkZm/ShEpp49vZp1mqMHwni7XWz\nEldhokYkUqQtdxibH+HIcR8ej5FQbp9isYFCpcRsFfEHLXR3GREkiWSyTDKeR05kiF+4RO/cBOH7\nOW7/yzsEjx9h89YOk9Nu5GqHVjpGaCeJ26GmFg1xdPwpSIRQ2A0MPH4WrdlI/N4SereHWCiFTtdG\n7XCxu3qAUjw08cweRLF7bLRzcdBYCV26xNCjJ4ncWaSRCDN3YgG1N0hmfY3ivaschCW0JiO1bAa7\n24Vep6RdSKNouejIKqq5HMLmJgavF4PbTSkaRd8dQLK60OhUqLWHOU724eGPzR8SRPFjTe0+Cum1\nNdKrqw9+LobDRG58/0f8Zy1GfpmP6Gh8CnxqfaHZrGFhIfghAqvDoXvwHotFyxNP9JJKVWi3D29G\n3+0CWq02yct6Ioka9XKNhEKkWmsxNBk8DM67cpWyrKHREqnVZTR2B51GnUI4jNsXoKYyk1N5CTz+\nJM5uNxpDh/t/8ReEF5fRujxMPvs089Nuep0zGLQCte1lIneWUKkl+h7vwSOlye1mePQLp4ncuEk5\nlaaQLTD5yEnKS1VCmzHG5vrY3UwwPGTl1ht3mZtxo5VkUnsh/IM+5o4H8E90U7gZpJItcPrf/RtW\n37yB2IiQ29nlXqPI8PlH2b56GKKVztQIDE9jdxgYmXWhMxtY+8s/pd8DLIwTHO+n2JHQyQUMgohK\nUtBQqFi8fcDjJ53k9vYoHhxg7e2lnssx9MgjJFbWaOfiCJIKtdWITIdUKMruWpjeUT+VZPLBjes9\nNCsVGoXC574Yeemll3jyyScfOFF+nqBUKpmbm+PatWuc/4TmUz9OfL/2r06nYmjIzo0bkQemaEql\nyOioA0kSaTZlLl8OPYieB9jby3PuXB9Ws4aOQoN/bppCNI7RZsLe7WT07HHyiSSbO3kQJHg3MmJg\nRItcLmFql8hvrlPIlhgY7KWczjB8boHQlesUFq/w+JkTiK4TmAJB5Ckz8UgGZ5eDo4+O8/r2Biql\ngH9kgJMLQVLrW7SMPmRRyfmnhrlxM8bF17fxefWcP9dD15CeeL7D25fCrL9zk3o+j0KjRanVUM9n\n6aDg2a+dJLS4THQzhCfYTXCyn+W/+xvqqSSSxYFQaGEc7+fORpTASIC2qkM2WcC4tkO95qSuMKGW\nOlRqMo99YZLFa9s4LApUjSKjvSosigxJfRbBCMMzvRw/04NWq8La34/B46EYiRzmdfn9WHt7qWWz\nDz7POrsda38/yaWlB9WeymjE0tf3Q10LzaZMKttEZTRSz+WQlEokuUO9WscUCJK5lMfrNXLkiBet\nVsHmZoZQqIDebsPitjE5aiUYNLG+U2Fr+1B1Mzxg5sa10KEVvyCyvpljetrN/fsJEgmZYqlOMGjB\n7tRRr7c5dbKbai7LwUacriPHGT0zyJ0/fAeVTkuz1WH0SD+NdIq+gAXztAerHlqlAhNzPbjdWg42\njTSbetShGHqrGUmC9MYmXf1eFDoDbVc32bVFRs6dJroRYmbCyhuhEHKzRatSRaFWMTHbQ/3qPcqV\nKnvP/y2+02dpVmUaghanTk0ul6X71CkklQr74CDZ7W0a5fJhkF4qdZhkff065kCA8HaM9eU09UYL\njUaBv9uMW6ul/SMipHY6HXK7ux96vZxMft/zPmsx8n/y2YqRHwper/EBS/6jpL1w6Eni8310lRcO\nF1mPifQtnCJ27z71YpmWxoJ1+uihZ39Zwfqrr0OrjsbqopmO4ZuaZjDgR+d2Y+i20nl8mM2lME6X\njsiVV2nXq+jUAkK7STFXoHMQQp1Lc//NaxQKDXwD3URv3cI+fxartk0xleHW2zVmHjmNr8tIvthi\nPdLGQJGAR0nPqIuv/MwExVKD3c0UlfgeJ47Y8f3MGOWawMa9PeKhFL6hLzFwIkBse4cX/+YqVpuB\ns+cGyZbAEAjyzJmjJLZ2ydcUvLXeJnflGr/4b04wddxI9+nTyBcvMj9pAClDZjdOKxMnlc+j0Sew\nDQ6RyDSQ221K4TCVRAKN2UxgYYHc7i7NQo52KU+xUEOtkvB0+6nUYH2rgM5W+sjVkKhUIv2AFt/n\nARcuXOArX/nKwx7GD42TJ09y6dKln8hi5AdhYsKJVqtgZyeHKAoMDtoIBg+5R8lkmYODD24T5PN1\nQqE8DocXV7eDdW+Q8//7/0zmzg0ye/tkI1Fa1Rp2s4JMCeSWTHQnhsupoW9O5CAs8up9kbU7KYID\nCp77uRna9zYZfHwBg0lHqVClngijHwzw//z286gdLrq6TIwN6DnzSB80vTQTYXJXXqdh76XsypIt\ntekbcqM47mN2yonVKNAsl4lmtKwuhjArmgxM9ZFPZPAEnQz2GLB3eZjRl3npv/wpS+/cRWu1snp9\njXoyxuj8LDvX7qKVWth1AjqpzOM/NU85GqEk1+kbPezYmp02PBYDbscoq8txfHYdC48NIBRSNPaW\n8eoyNDJp5rprKG1NvNY8Nuvh51VrsdB94sQD3xBBFKkVCh9o2QuCgG9uDr3TSTEcRqnXY+np+VSr\n6u+GQiFitmgx+/0URRG3pKLWEvCPBPAN+HlEaSEYNONy6Vlfz/DOOyEODgp4PHoEQSASLTM+7mB/\nLYJWpyZfLXPjao5TC31E9pLsrB4wOttDvdEhGLRgMqlZWAig0Sh48cU1fvZfjfPSv6xjMasQVRpu\nX97A5nNw7PQAOocTUaOjFtpEKxhQVRrMB6HgtqEUbSj1Bv7pzy5RSmcZGzSx8eobHH/qGF3Hj1Op\ngdpkwDs+ypW3NtEPz/DO1RgLx51UVm9z4kQP4bAFSRLp6zXR62xzbWcLo9OB4LRg9ncTXTygy68D\nQaZZqaDUaqkXCnjeNTSU1GpatRre2VlajQYCUENNXtY+8GtpNhpsbWfxDPei/hERUgVBQKn9sIpS\n/AGChk9SjNz7Psc+Wgv0Y4AkiRgMP5wDXDpdIZ+v09A6sM0/iqLTpI6GqmQinS6SjuSIrO/j8Jrp\nmhzDYDxCJZlk/eo98tVlJN11Rs9M4XcpiNy6g17ooNFrkXwOFAYj+8t7zD1zmvXnn8fkcaOyQ0tU\nYjDrKWysMTLt586rccK7aQrVu/TMHyEWzhNdXEIqJ7EGfNy/H2e0Tw8qLfZuN+FdLcnwoWPra69s\nMDLmISG22VoKoTAYsJbSZCKpQ6dEnZlCPEPxXpi22sT2QYdLV0JEwjkMBg0vPL+KQq/HqNbTkVTU\nsllit26h653FYVOTq4rUSlVq6SRTcwPYTQrKs7NsvfQSud1d+s6fZ+355+k6fpx8PEVHWaHegkq+\niPfxBSI5mWi0xFh/N3q3+30mtiBgCQbR/xAeAz9JaDQavPbaa/z+7//+wx7KD41Tp07xe7/3ew97\nGJ8KkUiBjY0M5XKT3l4Ljz4aRKP5oKHee6TW78V7ad/Wbh+PfFHD1htvUc1m0bndFNJFDC4nUjOM\nVmskuZ+iVS5i7F5A1Oj4q798h3tX1qgXCkR2E9i7nZw8dZw3/+uf0GqD2eXgyFd/it3NGLKoRmmy\nUqk2yGeqRC6/hV3IktyL4pkYxdSt5GAjRDhaodhQsXIvxMxsF//19y8RD2c4/9wcHr+H3P4BvUET\nZWOTowt9aDxevnNhjUKxQcPkwztcYufWEl1jAyy/fZvxhRlCuxmquTy9QSMdrQnr7EkERwwPRQqF\nJnr/MCqzkY2NNP/wN3eQ81kcdjU6VYeTk3oG1Um2XnibVqWC/8wZ2rkk5ZgeudFA1GoxeL0kV1cp\nRaOH/9ROB0tv74c+zwq1GvvgIPbBwc8854IgMNRnZOXNJOVUCrleR2cwMjvXRb3VYXraTX+/DaVS\nIp8/ND7zeo28Z6FzZNrB0qX7yPkMDWTahSKu/hGK+Sp0OsQSFeJv7HHtepRMpsrkpJOtrQy/8AvT\nzM56SabKWA0CW3fWGRhxY5c7RFIy554aY3v1VRwuC7jGSJcllC2B5l4Yc/0ArdPG7eU0paJIJR4j\nLWUoZUus3d5GJba5eWMPp0mi3mjz1uv7bG8ksVo1mLwTbH8njbC3i8tuwaRXYKo5Sa80kMtFMvk8\nw1/6Ao22iEVRRlVpoDB7sQ8O0pFl2s0mtVyOrhMnMHV1Eb5+nfT6OqVIBFGlwjhdRukcpmdugsTG\nDnJLxuJzYhqe/JGqH+3Dw4fcove6LYKAbWDg+57zSf66C3gayH7EsUufcow/EbBYNAjCoeS3WgUQ\nUKlapLM1Lr6xg7qW4ua1PRa+dJzdN9+h5+wZ3vr7t6lLOtbX0ggaHWvrac790pcYGCshVnXkNmoo\nvU7qpRJCTo3W34tjZo7itTsUozkGxgfwjfYSXrrLwInT9HztFGubWbwBO7s5qJdK7G9GGBtxUG4o\nWL+/w0DXMMm9JB2lhvlHhnDbFLz92gYTM124/S7imSYHoRz37sb44hkfvYNORr94npdf26ctg9Xa\nIfxWFK1Bh0qroiMoqAlaDhJVbt+JMeaosPHWLfrGAoeMd3GRs6fnWYt0SCSrDI47efqr04jhZYKP\nPYbJ7ye7vY3GasV79Chaq5WZr/8iodV9GrUGgRPzJNQ+qrEKSuWhIVLPo4+S29ujns+jd7kwBwKf\ne8nv5cuXGRgYwPUxuvzPA06cOMHXv/512u3252KrKRIp8uqrOw+C88LhAqlUhTNnAh8wb7NYNJhM\navL59zllarWEzablzTf3CO8mULVKuGiiaWaR5DySt5fwboLeo8dAqUJtNmH2dWEOBNndzRMO5ZFo\no7dZMdrMXP7Wm0wNf4Ev/MdfpQ2Iah2likxL0jP26DF2N+JIFgtFRPrPnkKR2EDXHcAeDGAcn+T2\ny3sYnXYK5SYOi5LLb22TjGRAbnHjzRX+1VdnGFoYQm/SoqQFOgvP//0S2eW7iCY7y28v8ciTYzgT\nCYRmHYVSolauETnIolHCblwmrWihVmUIH1Tp67Wz8MUZQhkJvVImlSghqbV0DB2KlQr5dInkWDdj\nfgO5ly6AIKAyGNB4u8HiJV9uY9d0UOn19Jw9S25/n1omg87pxBwI/EAy42eFphxl4ZiNeJ8BWe7g\ndmowt0L0zD4CKi1ra2m2t7Nks1VGRhzs7+cIhQqH14KqRT4ax6hX0m7J1LNZ4itrDE8HEAQjx7qd\nXHwrRE+PBa1WQSRSwmrVceNGhJMnuwkfFKmVyjicOsKJBoW8REsso9aoeOYXzpKtKPinv7iKQmjR\nKkgsr4c4cqKPniEvQrFEZukW1VSBM888RjmdpZIrkg9H8FlE9HYLka0oXlMT3xOjRA7yJJNVRs4/\nQimVg1oRs1lLq1ZFpdXSPX8cjc2GbWCAdrNJqZ6lHO9g6elh4KmnKMXjFEIhNFYr9uFhipEI4atX\naR0+5OjIMqm1DQzWXlrdkwSCQ4i0qaJFMNp+pHNm6elBEEUyGxvIjQbWvr4fuFX3SZ4KLwIGDkmm\n34uLP8Q4Hzr8fhN+v5lQKE+nc9gKDATMJJMVWqIKvcGMqFIhiAJtlZbEQRJRo2F3PU2l2kTRqbFy\nN8RUqs7w9HHk1St452aRRBGlwUDA3M39O/vY3EFUAzA4paZRqbB1/T59xyYJ3VmimK+wvlWEk9OY\nB6bQ6lWASKkloRRUDE8FqJdKaMxmdg5qROL7/Pt/e5Qtt47+fhu5gwiNUoTghIdmu0ZDaeD0LzzH\n3/7jFq+9eA+lWsnkwhS9owZuX9tkdtZLLNMmcpDjsSdHCO9n0EXDbKwn8fV50NrtZNZWsbdlzp15\nFMHoIzgxgGfQRStooZpK4RodRW0y0W61kGs1arkclUQCUa0FtYWy0kYsXsFoPFydwOH+v2dq6uFO\n+I8YFy5c4Omnn37Yw/hMcLlcOBwOVlZWGB//RBY/DxVbW5kPJPh2OrC7m2N01IHD8b5vhcmkYX6+\nmxs3whSLDdRqBRMTTu7fT5BKVUitrtGsVtF0ahwbGCd/9ypGcwaz3kI0lMR2dIGqQcZpMkA2DKIf\npcGA1mal2QS1TkOjJFBKZancvIuuy8/lG0nSmQo1fTeiUsmTP3cGuVblxtvrzDx3BrdigtReBJPX\nQ7iiZ2/zKgFBZGZ2mMVkgmy+jiSJtKpNrDYdq5cWaWV9fPXfnieWavJHf3SHdrNJOV3AH+xBJbbZ\nWIkyOeAnfG+Z/ifOoFAradVqmD0uEpkG1rle9nczYLARly2U21qUifscbOyi3C/y2GSQzZybbLJA\nNQVIChSKNrbBQRBFZPcgl5eLtPVl9Ptb9PZaOHrUh9pkwj0x8WOd+2I4DIkwXVotgiDQTFWpKBS0\najWWVgrcvRuj0zlUWi0tJfD7TRQKdaxWDZpWnr5BN5lYlmKtQ6kqoxKrBLsN3FpMk87U2VjPEIkW\nsVi0WCwaKpUmoihiNKrp67Px4nqEnXtRavkiarsLi9NDpq5idzNKNi+T3T+gmi+Qt+qx+Xwk21bs\nsyfoqW1x84U3OPHlBUxmFQOjXjQmE2qhTvyd64x94QlWbm+ze/U24wsw3mtFIbRIhVPEYmW63Gr2\nb9wmH01g/6WfQVQoqKQP1aSBU6cYefZZOoBKr0fv8WB496tZLiNwSDA2BwLk9/YeOKgaDErMVgPh\naJ0MoNGoGOwCIbFFJHUopTd6vZ/ZnVl4twv+aRKBP0kx8ivf59hPflTpR0CnU/HII0EikSKlUuNQ\nuqaS+Od/XiNZlNB6HZz46ScQWyUU6NA6nCg0asqVFnKrjUqS0JiNlCoy6zdWCCibpLJNmuYuzEYX\n3VYLzq4KsWQNQZNmZ3EFjUJmbOEoLYuTwvVNJJ2O4Hg/CcEDByVmj/XQTCfIp/IMjHow6QSiuxH0\n3UGqyRIOk0QxkWBq3MJbf/c6l56/AgKYHGa+/OvPUcyVKAo20CTwj/WjNuhoyiKlQoXZ+R66fXoi\nsQpzJ3rJl2X6PBKVfJ10LEezo8B79gkEi4d2uQC1EvZgF9bgodRLoVI9yC54z0bce/QoG9/+NuVw\nCJVSjXfmGLis9JskRked2O267zcFn2tcuHCB3/md33nYw/jMeI838nkoRr67EHkPrVabZvPDWzI9\nPRbcbj35fB21WiKXq5HJVJHrDRrFInKzRVWlpNDRoNTpqKZS9J4YwHlkHtHqot+noL67Qi2Vxdnf\nRW+PmZBWSzZbp62TGDqqx6iSSRcKhOtF4ntxLL19ePxuDlZ2ufrSTY6O6nnq8W5qa7d58YWXcXY7\nUdg9CCYHI6MuNDqRXjeUB5xEki3k/gA6RQuHETQqiYFTR3jnZo5yW43Kaqcjyxh0MzSKWUYWjlHL\nJjE62sw8vcDEkwuoLRbGzi/QNdhNa6XAOzdS9I10Uao0UDdFolthLOktxFIGdaPO9pXLHPvyU7xZ\n1iKrJYYGrOjVMXxHj6JyeLkXl2jqnSjUaiqVJsvLSfR6JcGg5d3O8o8vSuA9/sl7K3zgkB/RUbK9\nHafTOUxzDocPuyE2m47f+I2jSJKIV53CZhC5fF0isxila7iP6RkPBr3E+nqGVLbG5JSb/VCBRKJM\nV5cJu13LNyu/NQAAIABJREFUqVPd9PZayGSqDI51Ua/WSe2GOXKmh5OPj1KuyoxM+bn45gH2vgB7\n1+9QKjXQVBo0RC2vvRPjxPwQz/6vP0vq1lVe+L9eYmRukC5vGePIAI/+x39PRZbomZsim8gR3gih\n0qVw9vkJnj5J45+f5+D6fYZm+hg8c5TU8n0K+/to7Xa6jh2jnEw+sNgXfT467TaRmzdJra7SbjaR\nNBqs726hqQwGWtXqYT6N14tnMkDTUCeXq9HvbFNZvUGyfdhJlL5L0fZpUCo1kOU2JpP6h742Pt/9\n8s8ArVZJf//7ralKpYFOp6ZUanB3ucbw0AiBHjV+TZZaR03f0Wlu30nSpoXKYGT6/DypXBOXq8lm\nWslKRM3yC8u0pU2e+NIEx+Y8/NMr9+nUdATHjuPpthKpt4mvVOibPY7SYCKWhXuXUqiNNYxihV/9\nD+epJqKIHZnlzTKN/n52Em0qpRaDvSaSuRbdxiZOfZu+UR+iUoXLYyaxtITN7yGV0TJ2pJelzQrN\ntkw7lSbRLDHzZA8TAZn+nhGu3i3SqFYop/PsLIV57Nd/nlC+zp1X1ug/MkX3cBBnwE7P1BCS8v39\n+FisyOpqikymRk+PBWs9hfHdKhpBoFWpYGiEOfr4oz/+yfwxIhqNsru7y4kTJx72UD4zTp06xaVL\nl/i1X/u1hz2UH4hAwMzeXu6BHBcOt2Ss1o+OG9BqlQ8UdOl0hU4HBIWEqFLRqtUBJaZgD0Z9GbXZ\njL2/n9LuBq31JbQ2G0aPm1JbRtsu8PVfnefyzTSL95N4PAaeecxDbfMOiuF+YjE9kuKAajpJ6WAP\no8OJRiFhsmiRs3FKoT3sY+OEDvK08iWczhZf+dqXuH3hbW7/xV8y94v/I4HRAG9f3KFWq2M2KPF2\nW9neyfPKS+tYA36OHfPxykubmPQqlA0tLpvI2efOMDtmpFausnl9ibTk5eZymaZF5MLLW/TPDLMT\nl0mnKqiiEQZ7h7A5e1FvbGHVKhkK6lAU4xybHyfYNcKEr0Zps4mpqwtF/wzyrTwK4bDQq9db7O/n\nKZUaDAzY0OtVHD3qw2T68RDRbYODFCIRKskkdDooNBpck5NIas0DRVW12qRabSHLbTY3M+h0SpLJ\nMuZ5O3YxybgfPLZuFBJ4/Q5aKgMag46D+xlGrUaefLKfRKLM0JCNiQk3zWaL117bxeHQUa40Of+F\ncSyWI1SqLdLpCpVqm+XlKnqTjkxNg3NyGgQwepwY7VZsNi2376UZGrAjbYvoHhlHqBdp5hvsv35A\nUFKSjmawzZ3kS//bL9Nstbm/lELvsLB3+U0Ksg7HzFFs436kchKlVos5GKT3scfQezxkNw/tZ0Wl\nEufICOV4nNTKygNJtVyrUUmlsPb0UIrHket1lHo93tlZHD0+PD2H+W57r79Gs/3+lqZcrxO/fx9z\nMPiRJNTvRb3e4t69OJubWdrtDm63nrk574Mso0+D/98UIz8oGEynU3HqVDd7ezlWV1Ns7+RRqhyY\nZwdwmtoYPB5+cWCUrdUostpEW2vCrqihMwpcvbTDylaZaqWJwelgdT132PIzSNRaTaRqkd231/GP\n+Jk/OkRoP0eqoESphl/5taMYdBI+XYl6aptGSyCWg/7JHjphmXQtTbfPx0EsSyKaZWGogdOppbfH\nQqsjUa+USSYrHHdYqERr3Lu5wdS4lYOtOIVcmdFTfsa8HRa/9c+c+OknOT4X5I3vbLAVyWPSmZDc\nASzKOFJBTW5rk0atSTQLucYedm0Li01HW2fl9dd3KRYPyUhip8X+6i3c5g4q1fs+EOVolHqhgPpj\ntOr/PeDll1/m3LlzKD7nvBeA06dP81u/9VsPexifCL29FrLZKtvbWZrNNhaLmuPHux8kyn4/2O06\njEYVxSIYvV6ylQp6gwaL3YDKdRyLRUvyzvUHuTj1fB6Dz4dreprs5iZmTYR//VPDPPn0AM1CnvL2\nCnI2ibPbTUCnY/WqSD6cohyLYLAlOf4zzyA0KtTyefKRGKGqibXFEHaXhcReDe9sGO+RWWrZAvFY\niXSlyuSkA4NGxKAV0Og0/Lc/3EBUqrFZVBQLNbxdJirFCjIiKp2eLreaV//v/4KoVDPx7BewuIfI\nVBWkk2l+6T88w5VrMdaWUjRkgaP9Jm7dDJNwazlz5DjylTcx2ywEJh0EH59AljuEQznafSasPisq\nrRaFoki9fijLj0RKRKMlgkELpVKDaLSEQiFy9uwnb8F/FmitVvrOnTskRNbraB2OB0F7Xq+BjY0M\noiigUkksL2dYWAiwuHho/VAuN/lf/qd5BFuYg3e2qMh6tlY7dDZDiAqRmRkPuXwDjVbJkSMepqc9\nbGyk+Ou/XubkST+Dgzb8fjNLS3EmTTreeGsPtUrixW9volaJfPVfTzAw5CAZL9Lba2V61oOGOvdX\nQkSyAq5ZBZ1mnfzuNkarkfjaNs1sGs/MJL7BXsRiiNhrd2iLKiz2ATqlFpmNNVQKAVWhQ/jFy8it\nFhM/+7NY+/vRuVw08nlEpRK9y4VzbAxLTw/JpaUPebtUkkmcExN4Zmdp1WofCjUU6FDL5T5wDoJA\nu9mkWa1+omJkczPD7duxB4uEUqlBu93h3Lm+B3lRnxSf/zvqD0AqVWF1NUU8XsLtNjAy4viAJ8l3\no7fXyte/Ps3SUpJqtYksdyhV2kzN+LHbdchaC1XdYXZNuyUT39rENuVHVhmxdRlQJVIoqKGQRLb3\nipw4O0R28Sav//nb0GmjbecxiyUaqiB//w/3kAT42tc6mDQxbr3+Mh0EAueewmjrIxRr8Kd/co9c\nsYFSkHFaJI7NunD1+0iEluntsxKJlimE0wTG+1FpVDhtHSKRMt3eDuMDWmwnxun3Cmy9+h1UCiWl\nSBjLUDdGkwoRC6dPT2NUtbj2Vxeo1toUGkoKF9fomVhHr/4CV1dDnD5mpy2qEMX3yZodBGqNDqVS\n4wNBeIIoIkgPM2Hg/3tcuHCBZ5555mEP40eCsbEx4vE4yWQS5w8pvfxxQa0+dFYdHnY8CG1Tq7//\n7asty9DpYLVqOXXKz+3bUbTaLrx+B06njuWNBEa7BfvWIsn765TKMpWWhNVpxp7MQ6dDKRqlFI2S\n392l6/g8919+gct/+x1K5TrHzo6h7xrAP9bH+uU7aAx6zE4rowMGtIUsJreBWLtDPZXA69ZjtOvo\nNJVkirC1d0DXWD+h3RqlVIYjk1YOrt4iFU7TO2DnK2f7uZey0Vao+NY/rjE54eLMvAe5pKGYyrKx\nuEvP8TksXheWbjeFg7vMj1iJp22olBWOzLhwd9uoVVtkMmW+8/oafX02Zv7dDJLyMnIli3eoh1Kp\nyRtv7D1QG0krJR59NIjBoOLKlQNUKolYrITNpqW720gyeRg+Gg4XKBbrGI0/nu6I2mj8QLjbezhy\nxEuncxieWi430euVSJJIPl9DoRDRahUkch3aei8XNw6o12vkMmWiB3lOnQlSKNTotDusr+eJx8tY\nrVr++I/vcPy4n8ceC1KptHjppU16ey1cuxZhby+PQiEhCFAuNXjppU2e+x9GGBjxMNsL0ZuXCe+G\nqSRauDx+2opZRLlB79FJBLmJnE2A6VDu3CyXuf6Hf4BraoZCPIGk1nHsN36d5lg3zUyCZmyfeqOB\nxmolv79POZGgls8fBkcGg6i0WgweD8K7hGNBkhAE4dC8s91GkCQUKtXHyqrFd7dtqpkMCAJa2+FO\ngahU0qpWP1Gy9+Zm5gPdSjgMqs1mq596q/5zW4wUi3V2dnKkUhXsdi29vdYPtQ0LhRoXL+6STh/u\nNabTVeLxEufP92EyaT7y9x7uGerI5w8JPxbL4U0vlSqzuZkhHi+TTJbZ2soQsBooV9vYutzsLl2l\nUijTFhWg0tHfO0o5laaRijE+3U2jXCbQbWTz1jrdCz4ee3oSlUZJYfUWy8UwFkFALhZY/Ku/ZujZ\n53DZexkfs/PGq1vUmzUCXg9Bu0wxtI/Z4yK5soJbBcGFQdyzs1x74R3iDTNnFwLYXEY66TCl2D5X\n31jEZxMpS3os2Rb5lQTaZg6zVUn4/hrD/Wby6SIKq4tCpkKn0ya+uYdGqBHdS5EZddKIbmLpO0yq\nBCiUWniGhhHCS7RqNRqlEoIk4Zqa+qFCsD4vkOX/l7s3C5LrPq88f5n35r7vmZVLVda+oYDCvgME\nBIqULFGSLavDUozd7bElhyd69GBPTEdMhMMRfpiwI9o9bzM90zMxbffYrZZsa+MGbiAJgthRQKEW\n1J5Vue973tzuPCRYIsRFlE0RIs8TcJG36p83E/ee//ed75wOFy9e5K/+6q8e91I+FgiCwNGjR3nr\nrbd45plnHvdyPhI+Sgp0p9Uit7JCdmWFbqeDfXAQ//g43qdHqFabNBptXn99i67ejlMvUVrNcOfm\nNp2OjKhWUcxXyWiVBA4d2P2ZolZL/OYN4rduIXfbPaJSKFFdfp6z3/gWswe+SCOXo53Yors1T3Rh\nDuvgIDO/+WW2/49/oh5NYAxY6Tt6gILUZvZgEM+Qh/WNVSweB2/9wyuoOxVsijI7b6+xdfltJr76\nVRSBMdQqBbWqRDaaITY3h8PvJbAviDKdZuvSaxTW/LhGh7n1g+dwT4xREMzkBS+vvrRONi9h0Iko\nNDr0Zh3bsTqzR4+itVqxhsNceiu5S0QAVCqBW7cSgMzx40EymRpOp56+PhPlcnP3wSMI7++E+0nD\nYtHyxBMDFAoN0ukqL764xvp6HoejF4xoNKrR69VEIgXcbgPLy1ky2Qa1eptrVyP80R8fpVSWeMqq\nR1SJlEoNvvOdQzSbHS5fjiCKIsWihNWqJZGooNGISFKbWrFKo96k7TOy+iDFqQMWNl64zO1nL2G1\n6rA5bKSXsxjOT6IbDrP4j/+AotvB5PcTPHaMSjJN7sEDujLI7RbKlkRpc43tV14kNDvL2sVFHGOj\nKJRKKqkUjtFR4rdv067Xid+4gc5moxKLYXkoEjX6fDjHxihFoyiUSgS1Gq3VivEXhHi6JiepZ7PI\n3S6Z5WUKm5tYBwZo5PN49+3Du3fvh57/ft8BpVLxS1dF4FNKRur1Vm9ML9oT8KyuQiRS5Ny5MAbD\nz7xHksnqLhF5Bz1CUv1AMgI9VfbiYoZstkYgYEavV3HnToIXX1wjEikyM+Ph7Nkwd++mEKxOZr1m\n0ksW4psNNHotR870I6haGNRK7s5vEt9K4g04UGq0eA4cQra4MYo6hrwKNtcqNJttBLVAaiuC2uYi\nevc+FV2OE2PT3LkukEp0mZm0kbl9Ba0mR//kAP4D+3s32uERkisbbC9HcE5M8tLFO/hCDiYDsHTp\nOv19WnQmPRVJR6ZlJh5vUIjVaORzzBwbQW82IDr7KFXa1BpdtIKAL2ClWe3tgOq1FiaNAtTyw1Tk\nnlJaY+1ncNhI5NJrtCWpJ3iSZaLXryOo1RjcboxeL5V4nOzqKu1abddSX9R+8LX/dcaNGzfw+XwE\nAoHHvZSPDSdOnPhUkZGPgtzKCttXruy6/0azWTqtFv5Dh9BoRBYW0pRKTdwONbE7d/GPjNHgLvlC\nBQUd7F0R60SAaleLUhRp1etUUym63S4olL0k4XabbqsNyMTn5pEEI4ZmmnYmg2H4KEW1BqnexKg1\nMfWlL9Ktl2krVBRVLpwuI46AHbfPysCgHb2UodHNYTfKpNfiaNRKRNok5u7hVop863f2sL6RQ2cC\njcnI9KSF7I3L1Lce0KlXUEpVRDrMfu4wGytJps4dQKMVMSq8bMTaXH07AgKMjbtR6oy0+iZY2Gqw\nfS2NQtEbfX6nJWM0qrh3L4XFosFm0zEw0DPCevHFVSYmXKjV4q7R3LtdrT9pSFKb7e0SyWQFo1FN\nKGQhFLLi85mYn08/NPWqo9EICILi4YayRjpdBYUSd5+VbrfL3L3eff7IkT4OHHBSLErcuBEjn69z\n7FiIt9/eJpWqkExWCIUs3LubZGrSyeJcFLVFh8WsoZDOMeKz8ubf3SQbz1LJCfgqJRx9Psoba+gF\nBYPnnsDkslPa2aHZaCArFLSlBjqHk06zSaNcxtTXR6fRwDowwOgXvkBubQ2N2YzB7Sa/sdHTgaTT\nqI1GmtUqok6325op7eyQW1ujvLNDq9HANjhI4OjRXzh2rbPZGLxwgeTcHOmFBZzj46h0OjqSRGp+\nHnMggN7h+MDzR0cdJJPVR7x9+vstH6jl+jB8KslIIlEhFiu/51g8XmF4+Gei1PdT2n/YcUlqE42W\nefnldRqNNhqNiEKh4P79JFarjr4+Ey67hoC1iaO5wzOnbPjHXFQXbjIV1jA8MIggd9Em7zJ49ADG\nkI/CoRH0WgVGDZgGBim3XOSK0G7VKRWVqLUa/C4bxftb6CwWWu0mRosBSWqhzG/xnT86yFtvx9g7\nbqBa7CCtbJDXdll98UVKOzH6T59E6/Jw+slpbry9ydNf3c/Keomu3czv/Ltv0YqtkS20CLrCLKVE\nitkiKrMLs8mMaPWwkW0SGvLQ7cpYbUUsJgGTQUU6IyGqRRx2DbqGAdFn580XoiSTVex2Lc98eZR2\npoQ5EKBV7QWX3f0v/wVBFHFOTCBqtbgmJylubfXKgEBxe5taPk/o+PFPVI3/ceG555771I/0/jyO\nHz/On/3Znz3uZXxs6LbbZJaXH40hkGXya2u4JiZQG43odL1sGiVdWrUGVZUN94HDKO8v0mlKeCaG\nEPsHiEfzeJFpFIvYJ6eQOzLuyQqm0ADprTiVZBy9P4xlcg8rNxZotGTCx08ha0SajRZL3/shg4k0\n9jO/wd3VKqLZyt4zfXisoGrmiby+iF2CqqTAbBBopiPoNeBw6ZHbLQZmgkitIrPDApJkotlRcuyr\n53DV19iZL9OWFWQKHdrqFrpsmuEjh7DY9aTvXydTqWIstPGVm/zhNw8QTbfptiQEpcy1KztU6l0K\nkppyucnIiJ1Uqrf5UKmUWAxKWrk0qUyHotmMd8DDuXODgIwoCgwP2xkZ+Xh9KX4ZdLsy169HWVzM\n7FZqVldznDsXJhAwc/iwn3i8gtPZS2t++eV1JiZc7NvnIRIpkU5XMJvV7N8fwGjUMDPjQa1WEouV\nGRmx8+STQ7RaHZxOPZFIEanRZsAq4fEIVI7Y0Zn1/P63DxGLFhkMW5kIKBGbeawOI5MHh9lZjdFp\nSjQSUcwWLfVoguhbb1KO7mDp78cSDBE6exad0cjOrTnahSxqkwlBrcJ34AC1XA6FSkUtl0NtMLDy\n7LM0SiUsgQDNSoVyNIr/yBFMfj9aq5VWvU5ybq5nCPewJdNttcivre1qaz4MKp2OTquF/uciOlq1\nWu/e/iFkZHDQhizLLC9nabU6DAzYGB//4Nd/GD6VZKTRaL+nTyXLvYrJu+F06tHrVdRqLTqdLuWy\nhFarQqd779vO5+tcvbpDLlfnjTci6PUqhoZsOJ16dnbKCILAnmkX7a37LL95k7Io43FqkePD+CeG\nWbn4vyN3ZUZPHiQw0Y+6lsLY0HHyS0dRVPMU8zVK2hA//fE8O4km1XKdr//OfvZODELkHsVEGiUy\nBo8Hh99NM16hIwMKgS89PciYr8vl/7aIUqWiuL1No1RBbTKh0ulYffElRr+k49BhP/pBByfPDKEx\nm0lEc8QUPuqWOsV8A5dfRzDsZnOrREsSuPRWjNPH+5g6PktxZZHwsIua0ohgcRLZzHPo+CBmsY5x\nbIrbOy2MRjUajYBGIxKPZChvb1JcuI1Kp0OWZeI3bmAKBLANDaFQKNi6dAnbQ6MbtcWCpLYRy7SR\n15IEw+73tfH/dcbzzz/PX/zFXzzuZXysOHLkCLdv30aSJDSfAZt+GXbtyh85Lsu9ygbg9RoJBMyU\ny03sA0FK+Spzax08ob0IdLiXFzB0uzwzoGXlTo6pkwfYWtwkc/cOUiZJo1AgsG8a98hx9P3DPCjb\n0e5x4DdUqCSjmGwmzAefIOQbx2jRosms8NT5YUSDEUVnm+zNbbqihmp0C93QXnxDPjzqWXLzAvVC\ngZWFGO5wEI3FxuZrV6i0BIwtAc/UFPawk8LdHbKSFqPRSbPTm3Kpl3sPjsrGBp14HKPRhCTVqNbq\nCPFFQv5xpK5APZthY2GLbrWEoNGiNtsBBSaTmkajTZ9Vpmurc31uHVkGpapXGTp2YYZjx3qj/o97\nI9Frk+cfeQbs7JS4di3K4mKvKhIImFGpBNbXe6+LRksIgpJvfWsP6+s53G4jxWKDtbUC169Habc7\nnD0bJhar4PEYGBqyUSw2GBtzMGirc+uFK2x02+w9OobJq8fo9WJQu1C1q1z7/rNUFBKyQqC2tcbY\nnjEK2zt4x4exBzxEyhKa2XPoJiu0thZAqaSLkuAT5xGMZuqpBHKnl1JtHx2lkkyyc/kyq889h2fv\nXhyjoyz96EcoBBFzfxhTMITc7WJwONA7HFQzGZqVynuuU+Ud5+uPgHf0Iu+GqNUi/gIRqyAoGR11\nMjzsoNuV/0Wtu8dJRp4G/j2QAU79MifabLpHSosAarXwHsGM223g6NEAN27EWFhII8u9/IE7dxLU\n620mJ38m7FlYSJNMVul2ZbrdnjgzEikyOGjHaFTT6XRx6prcub9EOpbF51QTX0mR2Ihx8psmnvzO\nN9he2sTt0DL/t/8PRouRRj5L3759HPntf0VZsPJf/3GTUq6K3WpAb9Ty1mtL7P3DWcITAbQGDaXt\nbXx7Z0hn6rz5wh0OfO0pqJaIpCQ89hDhs6cprq8TvXWLdkfG3h/EPbOX5PI6gqjCaLPQzUcI7w+Q\nTeaoNAW05QS5WzewqWHy+AwFfYhWy8zGepapCRfdyAL/+Ldv0+dWo1K26Z+dZvqJwxw4a8SgaqM2\nGEhXRRI319BoxF3RYL2lRCl1adfr6Ox2ipEIyHIv9bjVQtRoaBQKyLKM2mJlq2xk7uYG9VqT4JaC\nPQebHDrkR6X6dAheM5kMCwsLnDr1S31Vf+1hNBoZGxvj1q1bHDt27HEv518MQRR76dq5HO9+Wpn9\n/t1EUp1OxcmTIba2CigkHZEHUY6fDHH7RpRorILV62DQrUdpsuE8dJKWWsn85Xu0cxnsVj2mfivN\nVoexUyf44fduUKtucuYLe+jEEnTVKt6erxOLNakmJPpGvZy5ME5l7TqJlS1kvZWW2kzoyB68Y3u4\n+jff595LVzj6W5/H6Quhtdk4MLkfSann5k9fJ7hnhDdeuk8hW0b38l2+8Mf/ilxdR0fUEklUMdpc\nNKolDP4ASkGg3WhQjsdBmcQSGqBdryLWs/icUG93ef21Fco1JY1iE0Msjt/jJhAwEQpZkGUo3H6L\nfnONzrFBNlazdLsyA16RiVHrYych76DZ7NBsdh75+8pKFq1WRKcTuXEjzvZ2mYkJB0qlEoNBxRtv\nbBONllCrBcJhK6IIXq+JN96IUKm0OHDAx9///Tw2m45z5wbY2irQ12fCYlbxYDVDWWFmbI8PW78H\nrUFPc3uV/PoS5Y1VCvEyYtCNbWgY/54JlM0Kw4dnsA4Pk25b+OEP7pDfjqMSFRx+YpqR2RDp2zfp\ntNpYA35qiVjP2A0o/PjHBI4fRyGKyJ0O2eVlzKEQY1/5Gp1WG8fsIaSOQIf6bvaX2mBA1Ot3jc3e\ngd7pRO52KUWjlKJRBFHEHAy+b7XE4HajEARiN28iqtWYQyG8s7PvqZZ8EP65OpF343GSkSvAXuDl\nX/ZEj8fA/v0+7t1LUa+30GpFpqbceL3vDWYbHrb3HohqJaCgUmlSLErU6wn6+oxYrTparU4vVror\nEwpZGB93EImUqNVaD/uJfjKZGo1KleROFhUddGoFW1tFHG4La7eW2XPhOPv6g9z5T/8RpaCi0+7Q\nlSTWXnqJ0BPnaLkdDI/YEWixcGMNrUrL4VkH9e0NclUl9r2HsY7PEL1+g0zbyOyXziPpPfzj39xC\n0GiYe1Dld377AkN70nQAWVDjnZlhcyWBemgP2pE9bCSaaN0+ggorhUaNsCHHa5d+jJQrkcuXWb/0\nBie+9WWmDn0ehdxmwKfm4r9/i8x2CmXXTrtapt28R/+YH8WR85jtRvQmDVR7gWTvzPQDFMttwpMT\nxLfu06xUMPp8FCMRjG73bh6BOdSz6m6obdy+tkqjJqE2GekKahYXM/j95t2As193XLx4kbNnz34m\nqgc/jxMnTnD58uXPBBkBcI6P02k2KWxsIMsypr6+3fCwd2AyaZie7t2ULW47qecXOXpGTzYvkU0U\nqRUrPLi9jqDRULcpqGaylHcSFFJatAYtCkHAczxDajPJqaf3sfTCayTSEsGDs7z9wtuYPG5c0wco\n1SrMzWcZ0ekQAuPcupdjZ2kZ10aHg6d6InetVuTmxWsIehOnv3iAwN4Ztu4sYQiP0dWYSEVuozIa\nadbrtLMx5retnH7yPLeefQOrs5+x/cN4pycQpRLi6iru6WkEjY5KJos94EVns5B8+UeoA8N4R6dI\nvLlApdqlUm6gVivp6zNj0XbJrqyQuH6VYiSCd2KCgaNDKAQRnSjRqVVYWmoiSW3cbgNer/GxkROz\nWYPRqN61GiiVJOr1Dj6fCYtFTTRa4e7dJBqNwJ49Lur1Nul0lUKhQSZT4+bNGN/85gx6vcjcXJLT\np/uJRAq7FfdMprYbvOe0q3nlufso5DahiQHuLEs4HQritzYZmxqEzTUMjQSh8UOodRrK21vYx8Yw\nhAYpVOHVl9fQ9/VTl5SotWpSsotcXdlzSZVqzP3nn1LY2MDk8yJodQhKBfHbt/Hu28fmK6/QrNbI\nb0ZIr28z+sWn2Uy0Wbl5nxNnR9DabECvzeLZs4fotWu7xnA6ux3n+DjpxUWi167tjq1nlpYYOHsW\n87t0b3K3S2p+Hp3dTvjMGZq1GnqHA7Pf/4l+xo+TjBR+8UveHwqFgj17PASDZiqVFnq96kNV9vF4\nhUzmUSFrtdqkXG5iteoQRSVutwGlUsH8fAqNRuTAAR/tdgeDQcWxYwE6HZnIUoT9h/vZurdKbDMJ\nXZl2OzI3AAAgAElEQVR6VcIxMkytXENnlCklM6j0WlQqJZIsEzz/JPfjatYfpLhzLYJSqWDfyQns\nGonI65cIz9jYvhMlG89w5Pd+h+mv/xZrKZlKrsLcXBytGqRmg8hKgu//XY2v/+5xgk99jfjdee68\nucTLLyzRPzNCxNYglyrRoIHF68LqNBG7fJG1q3N0OjIoBVAoWXnjKof3HeXOXIrpgT5MRpG8SolC\nqWAg7MBuFcmnikTvp1lZyfHEE2HcbgNut4FE4melQI1GwDk0gOaJJ6hnMhg9nl6FZHMTUa1GbTLR\nd/AgjUKB9FZPpKs2GbGFwwgqFZ2OTC5X/9SQkeeee+4zM9L78zh+/Djf+973+JM/+ZPHvZSPBSqd\njsCRI7gmJ5G73Ue8Fd4PgaCV008Mc+faOuVqG3/AjFkHm/Pr6M0GxkZH0BqNtJ0upGqd9HYKe58T\ns93AwMwwjVyOlfkIjoF+VtdyVNtq6okShv4mar2FpXs7TH9liK2lDMViHJ3FhMFiJp8qkEjWsLqt\nFKoKysUab72xxvmJgyiHZll68x/o7yshatUUkxlC/Ta6jQa1aoO1jIOBs2dBoeBWRub6//ICx08N\n8cT5L7Pyg++x+PwPcXosNEolhp/8HN5DR+jUKhze6yObyFPMNwhOj3LgQB9er4GtS5eoJBLonU6y\ny8skbt/GVixi9vup6i3ElyusbPR0elqtwIkjXqzKMvVsFq3NhjkQ+MQm6SwWLfv3+7h1K95zPdWK\nHDzoI5mscOtWmaEhG8PDdrrdLuGwjfX1ng/JO7EfExNO7HYt4+MORkcdCEKviNbzrOmNs3Y6XWq1\nJtFGG6vbxuGTA2ynWrz11jpGgwptJYMzPIB3cASNRsXqixcRRSVSuUxhfZ3g2XPEOh4e3FnH7TVT\nbSpoyU02r88RNA4zNhCmUS4jAy2pgcHlQqlUINNrj9gGB5n+xjfIrG9QrXUYODeEoX+YN398H7XB\njHZo+pGRXefYGFqrlWoqhaBSYfT5UKpUpBcWdokI9HQg6cXFR8hILZOhtL1Nu9FAIQgoRZFKMklu\nbW3XefuTwKdSM/IOrFYdJpOGfL5n92yzvb9NscXy3t3su1sOCoUCjUbglVc2yOcb+HxG4vEKhw71\n0ddnIhrtCZskwcDoqYMsXV9Cb9RiG7LTf2AaSyjEyLgTSlk8YT+NQh61Tovs8dB2j7O4UqbYahIY\n9pJPFRH1ekLmGpYJK63tFdau3qXdhfnv/4AD3x1keaNDPlnAYlHzr//gCGubZebuxOl2Zd6+so1W\nrjM1vp99U9Moh/eTl1RUGkpEsw2v2Yha0Ual6KJExmg1kcvVaNYktFoRrajAqBdRdRusrmSY2NvP\n0KANvUGDoppFARgHhokVGrRaXZLJntnRqVMhlpYyxGJl7HYdExNObEYlzYiJdq1Go1TCPTXFwJkz\n6BwO9HY7OnuvKiWZE/i2FSjUOgTVO9ecT8yj4F+KbrfLCy+8wJ//+Z8/7qX8SnDixAm++93vfiRf\ngV8l5IdtlY9rDe/nS/HzqFQkNjby3LuXYnsxQa0BOq0Sk1pJq9VGUKlw9VmZeuIwi69fp1SoYrQa\nOfTl06RXNuj3aDBoRCw2AzqdgGhRUynVQJZpSB2Uqg7Hz47g9ZtYvL1Ou5SnmEii0ojsOXKBaF8f\nA/1mKhWJ1Y0SDr8TCTVv30wze3wUZSWN267G67Dj8pgptVQoGmX2zYxQTBX46ffexuBw0D89zHqy\ng/9uhHymjM4/QNdswD/jIraRwHPwOKUH96ks3OILv3GAdENDpS2yuJghl6ngF7S063VMfX3YR0bI\nr69TS6dxjo1BcJyttdruNTMbBJYuvopNLqDRCKBQYAkG6T9z5iMZZX0cGBtz4vEYKJUk0ukazz+/\nytJShlary6uvbnD+/CCnT4dYXc3hcPSGDw4c8FGrtbh0aYsHD3I0Gm2++c09JJMVBEFJKlXF5dIj\niko0GoEjRwLk83W8rsMEAwb+7795ARkFNoeZXKTBT//xFv/T/3yWKhLLP/0J5tAAstqE3hdg4+JF\nJr/9PxKYGqKSKWAP+RHo4A062Ht+GEV2G7odXGMjbFy8SDkQoJbLYwsFMPn9qCxW9EOT+PyDFIsS\napePTNdC4IybiiSQU7y3fWL0eB5pwdRyOTrSz9xV3/ESkbtdCpub6BwONCYT3W6358lDL0yv8/DP\nj5z7CeCTICMe4O9/7liCj5Br893vfhertbdzHh8f5+jRowwMDACwublJpSIRj4skEhUajQwul4EL\nF/ZjMKjZ3NwEYGBggP5+K/fuLZPPN9BonAiCAodDolpNAWGKxQavvnqHcjlNtaonFivTbGZRq8to\nNFNkMjUSiSgmk4qqOczX//zfMn9vkWxeYrViop7u0G0vIRdTjDz9eZZ+8APysozt4EGq5gDNikA8\nEUVZyJHNqvGGHMguCa1dSX2rjifsIy9oEIYGWZmPYjN4MTkLLF6ZZ/X5IgeOhjl1vI/bD5qYbToc\nOhVLsQyBIS81g4pUrkqXMm6viXpNyeKDIo5SHvvoMIGwE0jTNNgxW7UMHZpE7/Hg99wilSgyNTLJ\ng9eu0Cjl0BmNHD11kKrWTWlnBwBJ6rksFotJfD44cmSCYrHBG2/Mkc83mBwbwj8bJp2L09Hr8e/b\nh0Kh6F3/UomBgQECYRe+gU2i0QICThQKsFgadDo5wL77ef664tatW9hsNsLh8ONeyq8EoVAItVrN\n2toaw78g5vtXhYWFNCsrvRCwkRE7IyOOX7meqN3ucOtWnJWVHK+/voXdINPOJVEbDZhNbs59YQ++\ngJ3ClZfo87kI/dv/jnJJwuYys3N/hcvPXqfZhqd+8xD+fhdX3o6w93NevCE39baC2cP9tDfvo4lv\n8eDBNuqGiemTe7hzw4IYGOT2YoX+/XvZunUTQaPlyKlh3FNTxCUFT14YJNg3RebmNZR0KZS7mMJD\nbKfbXPjcIIpcjKuvrFKqwep2HLWxTFdQM+mz0kZDvKqhkmzgKORQ1Ms41lN0ZAvhkVE2ijqu3epl\nupTLTTqtJueOu5gJDdFIbeLZswfPnj2o9Hr8R47wxvUszWbPZUihAJOiwoO7D9CP2npkRJbJb0Ww\nRaM4PsHvj9Wqw2LRMjfXE2rq9SpKJQmdTkW53HxoDy9Tq7U4dSpEu93h7/5uHoNBhdGo4o03IszM\neDh4sI9UqsrXvjbBwkKa0VEHktTm9u04t24l0GoFnnlmDN9QH8l4hXKti9YXIpdLkKspMBmMTH7p\nN6g0IF9qkigq6BsaQimVmZ3tY3nVSG47hlZoc/xEP5EXfkJq6QGiANNfeooj/8Mfk1pcoLm9g6DR\n4BifJF3X0XENIzdqNIQmr9wuUyqn6XS6OBz6j2QopjGb0VgstGo9Iql3ucivriKVyxQjETRmM4Gj\nRzF6vejsdqrvErwqlEosv0TI3ceBT4KMJIEn/jkn/of/8B8+8N/6+/t57bVNNjZyD49YSaXg/v00\nhw/7d0kL9IzLvvzlo2xv95TnbrdhV20NvTZOsaij27Xi86nZ2SmRy4l4PLrd3VqtZuD48TA7O2Vi\n8RarCRudpoTJomd7aZvt3DYjxgIth4mZf/P70Gpi8HhYzuj40VtzRKIq2q025Uwcm1nNV48NkF26\nQ7FWx+x1YxK1aI0OVrcKHDvjZWMlhcegpyAlaRRLJNayHPz8k4wFFJDewe/WYjarCZ7185//dh6z\n1cv8fIZ2PU8qIjC+tx/9niGO/ve/x/ILF2lVK3jGR/AePgaZLU4M6dEHJrmzUGTkiZOoaCNarEQ6\nViySEo3GidWqQa8X2dwsIAg2XC4D1WqTV1/dJJNRASpu382yYdXw5JP7H8kjePf1V6tFnn76ENFo\niWy2jtWqJRAwoder3/f1v274LLdo3sHx48e5fPnyYyMjb721vatJSqdrtFpd9u79cMOmfylyuTqd\njszWVpFORyZfU2Ky9WEygahS0ec1IG3MYw2FKOzEubtSZyNWp3/PKHLXjn5ogqBNizI4yojJTanz\nJvm1Nb76jePYh0fQlHZIbSXQdJXcu/oAg8tDy9TCPznC5asp7I4KJ799gsNPTFLN5hEUIGpVpG7e\nZelyAs9vnWLgyF4cQ/3Eo0VWFhL0WZQM2+qUYnGeOB3EvCzx1lvbSG2ZeCxHtePBaDKTLeyAQqDV\n7BAI9xGYHqUpCxgHAqQuRdHpVEQixYcaXyVzd9PoMDHmslLPZ1AIAq7JSXQ2G1ZrDR5aHgqCkk6j\ngaCQ0WrFnj18rEypLNFy7xA2eHYTuz8JyDJ0OjIulwGLRUO93iKRqCLLXSSpjd2uRalUMD7uZG0t\nx9SUm2azTanUJJer8+BBFlmW+clPVpicdLJnjwe/38QLL6ySSlXR6UTq9RaFgoTNpiMSKdHJ16Al\nse/gDN4BH0p9jehKhFIuTSmRwTDST1fjQak3EWoWcE2pKIaDjM6E2XjhWQSjhdDBvVR3tlj+8U84\n8sd/hMpgIHjsGDqbDamrQqjnyW7nSGZbVDBi0KvRCG3GRq2YjCr6+n5xS0wQRfyHD5N4sE69o6Ih\nNWkrNtE5nSDLSMUi0evXGf3iFwkcPUrsxg3quRxKUcQxOor1E74nP842zQHgfwWmgReBLwEfuS5U\nLkuPaBjewdZWgX37PKjVj741s1nD1NS7LM1leTdAq9XqEA5b2dkpAT323+nIBAJmOp2fjQlqNCL7\n9/v44Q+LuPy9nVsqmiW7FaUUiTDyuTCLL73K9o07zH7xNHq3G7tWwu23c38hS7vZwmAxMTHlpdNV\noLE7aZQrxNejBA8fYuTcGViIkbt5mdyd23iGBvB9YZZOOY/VqmF2TM0L/9v/idcuUkkk0OjUjD3z\nVZ45HSLTtbBwd4e21GKj1MIWbKJ8UGDmG8cZOr6fYq5Mtdbh/rMXEbMR4qvbOPcdxOkb5/nn1mi1\nu+w9NoLBrccC2GxaBgdtXL68TakkoVQq8HgMjI46yWZrj1zbQkEikah+aDiSVisyNGRnaOijfsK/\nPnj++ec/U14c74fTp09z6dIlfvd3f/ex/P53i6O7XZmVlSzj485faPn+z0Gt1mRpKcvdu0mazQ56\nverhQ6dNvgwagxEFMlI2TbtWQ263sew7Am9H6bPpqQtGEokC82tqhoetiGsl7i6W2Hf0DKMGGZNJ\nS+zGVXTFLeIbCQS1jnJJIpNeo39Wx8kzw+htNg7M2Khv3efmj++ys7yFp99HMZ3FMzqMf7CP1Rdf\nwqTp0lGKWCdn6TSqWIU6az+5QyqeQ+cfQFHUMLt/nPtrVQI6HR2DHdvENAOpOsVskYMnhlA6A9xe\nayF1OrgreYaG7OzslB8ZjdVYLQgmE2qHBZWo6JX8H+oFRkbsJBI94692u4vGYiE80ruXrq5mKZea\niBoVNVnHa69tcuHC0AdGbnzcUCoVDA3ZSKerqNXiw/u+AqtVi16v4saNGKKoJBi0YLVqqdXa5PM1\narU2Ol2vklIqNUkmK9TrLQYGbMzPp8nnJVqtLrlcHZWoZG0tx8iInUpJQu52MIoSs1M2pEYDldqE\ne8BHaW0FRbOOWtnBv2eC+EqEnRu3CA66GZjZRyWboyrr6FQy6LtV9HY7BreXlRdfInP/Ht1mk6Pf\n/S5Svcza9bvcvBGjVJLw9HsxTx/AHtRhamUYNJswtzLAe1s17XbnoZdKFYuyTO7+PVbvR0ApYPIH\nqAthpkIa2skIAM1ymUahgMnnY+jzn0cqFBA0mo/U4vy48TjJyE3gwj/3ZFFUvq9PhVotoFR++Kxz\ntdrk+vUYOzulh5M2AqOjDur1FltbRUZGHIyM2BketlEs9vhRMGjGYuk5h5pM6t2qSjVfQqHWojXq\nkds9dXetXKNZ7fVf6w/WePJzYRRSlVy2wtCwiwOTBq794EccOtLPgX/ze9TKdVI7GdSyRHl1gUYi\nSmo9Qn4nxtDhGZwDflS0aWaT2FQSykadSr5MPqtEfP0ylpk2Wt8gRruF+HaBitSlWu2lKZ45049k\n01FuGmmu3cBlAllhRqo56Bod/NP/e4kKRtKxHNFEnad/+ygzT09is2l4/fXI7vvvdGRisQoKheIR\nJfs7aLc7fBaRy+W4d+8ep0+fftxL+ZXi3Llz/OVf/uVj1428g05HfmQj8HFBlmVu3oyzuJih2eyw\ntpYjGDSj1Yo0mx1kuResNzphRpm/37PVttkori2jjEYw+4dJlruoFS36gyZaUguDQY3VJDB/ZQGN\nSom1EaFeKLDvQIhaeRNLn5Xw/inyWxHSkTi1bIbBkJHl169hNYosX5lDqVRQESSMTieNYhGDtsv2\nm29hMmuQzW7y8Syf/4N/zd2/+6+o9Dqq6Fm+8gCD3cbg+BDFuonjZ0e5ej2OGPRy4ve+jl7VpVyX\nebDTQVQJGFQC1WqLcrlJX59x1zhSEBTMzPhw6SvklxfolvPk19cpx2IMnDmDzW7nc58Lk0xWabW6\nOOwaJK/MgzdvUC430Rp1BPZNU+zoKZebxGKlT4yMQM8FtNFos7bWS44dGbEzMGDl2rUd+vutiKKS\nO3cSDA/b0Wp7+S0qlRIQOHRoAKtViyxPsrKSpVisMzBgRaGQ0elUpFI1ut0O0WiJ73znAOGwBSVd\nwq4u2mYGUk0Sy0skF5fpP3mcaZeDSkfF9maW8PEjZCUDLZNAQVJhMynYfP0Nits7CHKTPqcatU7N\nxFeeYfvSq/QdOoTB7SZ3Z55yqUH3YcZM9P4D9h8bIrF0j5WtLZRBNVmPC93v//57BKa3bye4ezeJ\nz65k684bLN3ZQqUS0GlFcls7+A4dYTMhMGgy0qxUENTqXUdsQRQ/8ijvrwKfWgGrXq9mdNTBjRux\n3V2VKCoZH3d+oPHKOzfa+/fTPHiQ3T1eKklIUpupKRehkBWrVYPRqCIarWCxaOnvtzA9/bOqysiI\ng+3tEs1mB6vDQCMrcuw3jiFGbqLW67D63fSfOoFjeBilKJK+n8GubyN2RMqZLLmVLKpanvU3EijV\nImZ/AIXWSGJlC4XcRWt34ur3UcsXKEUiBGcmMDksZHbS2AeCxO4vIXUUVKpt7lxdZW//DDqiSA0N\nDo+FgK43b98zbCtx9WqUQJ+B8p0NmqU8I0NWvEMa3l7N0ChX0TitPYMbtZpUoozZrKbb7V2Xn0cu\nV8fp1D9CRnQ6EZfrs5lJ8+KLL3Lq1Cm0n1IL+4+K8fFxWq3WY9WNvBvBoOWRNt7HhUKhQSTSazmo\n1QJ9fSZUKoELFwaJx3uV1uPHgwz4tSSuRhFUKrbefJNqNk8hJbL29ibhvaPUZDdyvc3nnh5nIKDH\nZRP54X+6i97nod1oUk5lCRz4MhuraVZXs+SyNSb2DvLUl0/TtPgxiG3uvFihKvYyVWx2HXq9CqvT\ngmVggPzSIqIIkiTTrneIbEaYTKXpiDqS6zGqTR2tZptUoohuZYOueoQrb+/gcBroH7Dx45c3GRqy\nMzeXoN2WGRy04vf3fFa0WoHpaRfpdB2QGR114LaLNBfvUdlYQCr1KsTlnR30Dgf9p0+j16sJh3/2\neciO/bR0DrrBGB2lmkJTSz7/s43Lx4FOp0sqVaVeb2M0qnC5DO9LlDUakUOH/IyPO3efBd///gK3\nbsURRSVjY07Gx51Uq02+8IVh7t1LUau1aDTaKBTw1399heFhB4cPB7h1K8bnPz/MwICVRKLC8LCN\n1dUs58+HuXhxjWS8xOxeN9HVOFMTdiKXXsXps6Fp5Fl5/Qr5fI2+6SlKHS0NnZe5uwnazSY2l5UL\nJ52YDQoyDYmO3CKXbeHpU2EJBhh5+mlUZjMolWiNeozGBk6ngXJRwu0xUFpbopGXcFhUdOpVipE6\nyfn5R8jIz9pOoOlUiaVyFIsSGk2PjLTaQKNMtiUw6tLSqtdxjI6iezgi/LjxqSUjAFNTLrRakbW1\nHIKgZHjYTjj83lHRej5PdnmZciyGPtDPyv02737rKpXwMG/BiSzLmM0aHA49zWabbpf3xJQHgxYu\nXBhkZSXHllpJKGjGY+4iaSexDA7hCrrw7Jmm3WiQvn+f+vU5hixmqkY1rqCHfp+OiqGElMug93jY\nSUjEM1UMAYG1jSLlTIF9M0M4KnmatQquiXFsY+NkX3iDZHqJ1aUkokaFyaTDZHeRSlYZ9bp45ivj\nXLsWe5jLAKdPhx6avUGzDQa3k+xOkkKpRf9YAOV6jNCwB6UniNFVw2bT4+/rebVotSJaba90/W70\n9ZkYG+sJvMrlJgaDmpkZDx7Pez1ePgv44Q9/+JnKbfkgKBQKzp8/z8svv/xYyMj4uHO3TRoImJiZ\n+cU21v8cdDry7gOrWm0+1ALAiy+uMTTUi4uvVJooRCNdR4hcPEVLqcfoUjJs11FuJMls7bDnqVFy\npS6Re6uINTsjM/2c/9oRHiwmCY/NElfWKdfBMH0EW/c+oQMWFEY7L99ucO6cjJYGh/Z7sQd9GJp5\nyokEKys5Rmx9mFQ6VCYTGlGB3uOkWKwzMjtCsdJBMnhoqmvYTSqabZluF0J7J0juGHA6DVy4EOaV\nVzbw9xnx2sB01MuNOxkSiQputwGVSsBs1nLwoJ/+fgs7O2WKxQYhr8DmpW1q6fTutaokEiTv3aP/\nfaqCCoUCq99L+l7lYfW0R0Q0GuF9/Z5+WTSbba5fj7GykqPZ7KDVikxPu5md9X5g5e6d6bw334w8\n9KBqI4pKfvKTB8RiZfr7LXQ6MgaDmlSqyt27KbRakaeeGiESKaDVCnzrWzN02x1Ghm14vSamprr8\n5m9OoFYrERQyalFG2W0ze24/+0b13Ny5hyU8SKtawdBS4CxX8e2dZm01RyoSZ3tuEZ3FhFKlotly\nYQ/4EXRGSvEkNqcRg7JB9MoV1i5eZPLrX8ccCFBLJunv7zzMB9KiaJQxOyx02jmsohKtwYUl2HPD\n7bRaCKpeRpAktR9W9x5OpaFArRZ2409UWi1GtweL3YNlQMC7bx/mUOhf/Fl9XPhUkxGVSmB8vMd6\nPwitep3Im29SiccB6MhQi8s0tc5dBzvoiSzd7t5YVzJZpVjModeLOJ3vv+Mvl5tsbhaQpC6ZfJOd\njSJHZkO4LeCfGkNnsRC9u8D8G7cpra0gizr0Kh02S5FKVUFu/i7ZyA7VphLL5Cz+vftpWhxI7Tmy\nxQ5b0TpjITMGt4voeox4SaCq96HvH8Ee2iG9lcDgMjH2xEmuXo9z5KtDxKMSFouG4WE7zWab+bkY\nfS4ViY0YcanM5NgotmSaer2O3Gqx98QE9RtJuq0GFrcCUdNlcMKPzaZDEJRMTrq4di26+2U2GtVM\nTrrw+Uy4XAYqlR4Z+Xmy9lmBJEk8//zz/PVf//XjXsongvPnz/Pss8/y7W9/+xP/3adP91MsNpDl\nnuD8VwWbTYvbbWBzs0AkUsTtNnD1ahSzWcPdu0kSiQpKZYDt1STrt+apZPJQrXDkWD/GVoaBfgtS\nq8vQiJPctR0aUpuRCS/p7SQ+r5FQ0EKjXCEUfJqNTIe17QrqwB5K3SbRhW0OHQsjb81x7cYKa1du\n0jc6wL6nTvKj/+s5lPouBn8QvT9EeiOGaWya1RtL6B0OsskuiqIObd8gtXsPqOUaeLwmzIEApmCI\nSbOSmRkPdruO2TE9W2/fYPlWjFDYyf5QiLWikXZbxmgUdjdxwaCVYLC3eatmMkQU761oCGo1bUl6\n38A1i0XLiRMhbt+O706xTE+7PxYBayxWYWkpQ6cj0253qNVk7t1L4vebPnTjU6s1iUZLOBw6ms0O\nDx5kKZebLC9nmZlx8/zzq0xPu7lxI0Y8XsFs1hAImAkGLeTzDer1Jt/7/+5TLjex2Ax4fUb0Bg1j\nYw7CYSsXngih04lsx2psJxoo9GZ0Bh1yu8X9//YT6uUy3XqFwVPneP16jlypSzefJbKZ48tfP4jJ\n7SCWrCKbHFgCTsR6Dp3Lxf4/+AMGPvckLaMHfXicVmMOh6DE4TQgmKwIooiQfw1rOEw1mSS7tESr\nVsPU14drYgKlIGCxaDCZ1ORyDRpKI0aPC3+jRSHf89gyO60YgyHG9oXxej95TcgvwmfzKfIuVFOp\nR0aW2pUyI2Efc2sVeEhGlEoFo6M96+DLlyMkElXW1/PEYmXGxhw89dQIIyP2XUbeaLSZn0/t2tGr\n1QItrZZsXc3ktJ12vUZbkli5s0apocI5McnW7UUMqjIm4wCRK29TLtbp2IOY1Srq1TpqlZkfPxvl\nS184S3D+NsN+NYJKgWwNsHpriZazw81rEU5+7Qx7/3ACoZ6nVKjTUakYOBaipXdw+/Z9QEG12kRb\nz1De2UaYDjAc1BLPZllelhk6doZhL1gtGsION/qBHEv3duh2wR/2cOzkwK4WZ3LShcWiJR4vo1Ip\n8fvNu+2Yd/u0fFbx6quvMjk5ifcXxHB/VnD+/Hn+9E//lG63+wt1V78KvKPJ+lVCEJQcPuyn3e6y\nuprD5dJjMPQ0YJVKk0ajTb0q8eaL9/HoJIJjITQ1LW1ELOFBmmKKRFqi2NRiGQgzHehSWp5j4eoW\n9Vobz2AfwxfOU5RUlPIJNmMxgmGBTjyGUyvB9n1inQJSU0DrC1EuVNi4s8SX/t0f0RG0ZGsCy9kG\n7gMnMdV2KDUETIEAsaaDVy6ncdjUHDp3rvf/sd/N5laO7QfbjM8MMGBtYDAoWVid485rdyiVJDaW\nYoSHY5z/5jNIWgvBoJVw+L0mcGqDAe/sbC/npNzTkpiDQWzhMArhg0esAwEzXq+BcrmJTqf62DYm\n2WyNSqVJLFamUGigVvcqLsWi9KFkRBSVqFTCbitZktqIogKHo9cGSyarjIy0CQTMCHKnF5YoSQiC\nklarQ1vqsLyYIhR2sLWZo1Yooui0mB4/gs6mR9Tr+fsfLFKttjh0wMPw6BjF7U3iN2/g2zOF3mHD\n4PEy99JVFKZBmlKTdktGUIvkcjX6Tz9BR32VVqVCX8BC38HfxNzXR1NjYWVui+byIqLJjG3sCBDV\nyCIAACAASURBVE5TF61Bh9HrpRyPIyi6JO/epbS9vZsZE7t2DbVej21wEL1ezcGDfq5e3SFdahE6\ncIS+wQjUi6DW45qexjXY/5HGgh8HPttPE3omLu8OzupIEg5dkdNnJklUepkzw8MOhoZsRCJFlpdz\nLC9neOutns/GwkKaSqXF178+iSgqKRYl9HqRSkVClmXK8TiFzU1K29s0+t2M6oZRdNvkIjFsPhft\ngJFGfJuhPf2YfV4EtYjKaKaSqLC+EgeNDt9IP914hma5QhcrzpAPUSyzNf8AU1+bwZkhck0tOrFL\nPFrmeraDQqGlP+RmYtyDqd6GbherRYvLraddyJC4twDtJmLHQfHBOiabl05bg85sIjTbv0skTrod\n7JkN0m53sVq1j4iCFQoFgYCZQMD8yX5ovyb4p3/6J77yla887mV8YggGg9hsNubm5pj9Ofv0zxJ6\n+SNhRFEBKLBYUmSzvd2j1aolnqigVomEx3wI+R22bs/xIJVEODvFyOnDTH1hgrrGwX6dgvgrP6W4\ns41JC5VSh8xOCtNWkjfuNRjziyjqRRbejOL3qLE5VGjUXfKrCTRWF22VjobWjv3/Z+89g+Q4r7vf\nX/fknPPuzOaEsMgEwJwpKvmVr2RJVr1OJdmSryzLZd2SZVt6ZZe/yFWW6y2VZdnWdb2uUskqOcm6\nFCVmUARJgCDyZmyOk/P0zPTM9P0wwBCLBUASALkAyF8Vi9iZ2d7T093Pc57nnPM/A0NkYynmZ2KE\nuoI4tQYmxpMMbe9EM2QGn5+X//UMNSpUyzom9QbquRQuv4tIl5twrYYmM0bunI283kR2bg6f14gk\nyVQqdVaXMuSXl1B3Ojl3rin2dccdbeu+E43BgKu/H0GlopzJoNJo0JjNWNvaUKmvPk2o1aprahl/\nOS5USVarNcbGEuTzVQQBikWZarWOLF89UV6rVdPf7yKdviAhYEWlErnrrnbK5RqplMTKSp6BTiPn\nXh+jWq7SEzHS3emkb7iDeKzI4FY/TpeZSjpFeiGNUleYOTOLoNFSLodbi9DTp6O03efHWktiDwUR\nzXZimTr52WWMBjPmiAsFMNn0fOTTB3GbZKZPTBDu7cJqEtBaregtFkSzg9d//BTzJ8aRK1Wsbgft\nB/Zh29WLnhK587k7vuFhkpOTOHt60NntKPU66dlZVFotGpMJk9dLR4cdl8tAJlNGrRZxu7cjNOqo\ntNqbIjH9atz2zoje6URrsbS8fYB6qUh3t4Odl2QiR6NFymWZ8fE3kltLpRqJRIlDh+aw2w2kUhIe\nT9PrrkslyrFV1I0K9UoZj0vH6mtHmjLo2TT1YomVlw6xNrNMOlWibWsPj/4//zcLY4tYQiJurUw8\nKRNPVXh0uB1nSKJ09gg+p4a1iXOsHh+hnlgj1G6nVi/j8FhYnouxklSw2I30PNzNHfuCSCuL5PNJ\nguYSibUS1bVFSokEyBJ+YztE53D4Tey4axeRHd4NOxpvd0WqKApyqYRKp3vTgepWpdFo8JOf/IQX\nX3xxs015V3nsscd48sknb2tnBJrN8oJBK5OTSXp6nGQyK6hUzfJ1u12PUvFCOcGhfzuESq3C7fEg\n6nRU83m8HUHUBgOrJ08SPXmCYiyGyeEhFPZhD0dIZUu49HVGfn6IA7u7WY1bsHqdbIlocDQSvPTK\nKyiZMkN7hnFEOpAWp0jkBWLzKRZ/8VPCO7ew55FHSKZLnHhliqE7DZTKdeIpiZ4eHWqjGYEGC7MJ\njj97jLokEeoO8PCHXditWlIrcUw6A4MDbqRyjXS6zMpqEZ2xiNNp4Ny5FH19rg0OhKuvD0EQSM/P\ng6Lg7O7G+S7W4mezZV54YY5YrEhXlwONRiSbLZ9fJAkMDropleQ3Pc7AgButVsX4eAKzWYvPZ0YU\nYXExx4EDbTQqErOvT/LYY71orXZCIRNWq4Ezp1fR6FTo9FoK+TJOl5G1bBqnx4zLoWNpJY8auZVz\nJAowf24NR8SJyeOhEIthrckoBhuuLhfqTge/9jv3YjZp8JiqHP7BTyml84xXMrRZJAZ/9VcRtm9H\nypVYOjmCUqtjMog0ihnKkydYq2eI51ZBUdCazfiGh7EEg9QrFaR0mtTUFLVyGYPDweLhw7gHB/EM\nDmKx6C5Rt741xuhbw8rrwGC3037wIKsnTlDN5VAbDHi3bMESCGz4rNmsPS+ic7G2iApQiMVKrUk7\nkynT0WFHlBRKDT3FeJ49u/bhtqtYefYonqEhTD4Ls889T0XrwBBSoXZWqNTUzEys0HfvfuZfO45g\nyFFvpOm9ew+nJvJ0exXU6jJKchWHXY8SNqNSlamm4ugENb/yawdJNFzEkhV0GhWFZIrUApTGTxJL\n1RjsDDMXrbMUl7HYjWzd1glzp0hPjKG3mrEYRPTm60suKyUSrJ06RSkeb32Xju7um97rfrscOXIE\np9NJb2/vZpvyrvKhD32Ib3zjG3zta1/bbFPecYaGPBiNGgIBC/t2e6GQwiDKWEJ2XlEaLD57klq9\nQb0BJpuJfEWFLJUpxmJI6TS5xUUMLhcNWcbW0UZ+dRX5XByVMUCPv5O8UcvxnzyDO+TGYuulp3sb\nGo2TO37n18nLOvIVEY3bRHZxEbtNwdCuIY0ZSzWBoxFH57QQ6g1x8vUFdu1pY365iFWvYDBq8UeC\nnH7yBRBEanKN+fFFxoc6uT/spnv3AEeeHSFf05JMltCZjAQHupmOS7hcRmq1xrqutxdQaTR4hobw\nDA29+xcDmJ1NE40WASgUqoTDNiIRO1qtSCBgQaUSKRarb3KUZiiuKc/gIpstMzGRZHk5h9WqY9eu\nAIVYjJGXE7SHbRQLZQwNOPr8NHNzafxdIR5+uIszJ5eR7Wra/fqmIms+y85tbhRqqFQCSkOhPWLH\n6xMw9XgRDUa0CxOkzp4kObfI9v/ro1SMHkrH5/C1dTN38nXWVvJ4zTVoVCmsrhIfGcHZ08PK8Zew\nC1kEjUJ8IYqAQr60SniomwtnWy0UyK+uYnS7yS8vU1hbo1YuozYYsEUiSKkUsZER7B0d75ok/43m\ntndGAOyRCGa/n0o+j8ZguGJDp+5uBxMTFrq7HRw/voZKJRAO29BoRLq7Ha0qFVluoBcqGNPjFOfP\noitLFOZfwzTUjzUcRq3TkZycJJ8ucuqlMyiAM9xGpVTEePQs9o9/iD2/+gEajQalCqwmawRkgZC1\nwviRZZRaDbXdTvu+fcRHR9HoNBi87YyOZzl5apKVrJq6oOHBB5pdcRtylezIKaTZCe54/GH2DO5k\n7lCB+sIpMsuLIIrY2tuxRSLX5TTUymUWX36ZwtoaAJVcjoVMBrVev67x0u3AD3/4Q37t135ts814\n17n33nsZGxsjFovh9Xrf/BduYVQqke5uJ5E2E4svv0x6cQa5Xie7MklPYCvuuwdR8nFsVh0ajYpU\nuows10nPzbF4+DC5pSVcfX1Yw2FiZ8+yePgwrr4+dL1Gzj37HB2dncwelVganSE81ENuZhLX1u2s\nGgd57fmzSPkibpeB3nAvgdI4KyfOYCgWKawUSPZ1I+qMPPChezlxbAVBq6M7YqSSL9A7YOfEK1OU\nKwpaowm50CxJzkoq0mtxhu7ZgznYxpmj5+hUaQnvHiZRNdLWpqNQqGK363E4br5S9VSq3Pp3s1Gd\nzPJynuFhH7lcBUEQ2Lnz7eVv2Wx69u0LASFGR2O8+uoSXfY6EUeVXDSJVCiSKwucPXSaclWksBbD\npN7O/jtCaBplhvoHKaYyUDUgxWMYh7rJVtUUkln6ug3MLRaY/+lpVCoBq6OPXb9+kGFVkbnDhykX\nT7DzsY+QiuaQpTIhr5ZqSSbU7kRaSNKQZaKnTqHWamnkkqyOnUNtcdJAIF/J0ihl0dlsVDLNnrKF\n1VU67r8fnc1GfnUVnc2GZ2CAuiyjNBrUJAlZkt53Rm521DrdZTPCL8Zk0nL33R34fGb6+lwt6fLd\nuwOkUlKrqkSvV1NPrlJOp7Ho6hTSKaRsipVjx9j6qU9RSiaplUoYPW4EpU5dKpNfWcXi92ENhynU\ntAR37wYgEc1S1sQoZYsYQz4GP/YxoqdPozGbKcXjGD0eDA4HybqB//y7n6Cz2fHvuYPV5QynX5vj\nwFYjK8eOkV+ONZ2bRJw9n/gQJqeNasmNqzOCZft+BE+Y1YxA0NisgLkWSskkxYtK/6CZg5NdXLyt\nnJFarcaPfvQjDh8+vNmmvOvodDoeeughnnjiCX7rt35rs815V8gsLjJ19AyFXBmDXo3dUWP27DNs\n/eAjhPvbSCzGqFRkzFY9tqCfUiKBWq9v3vsLC7QfPEhifBzP1mbTTLWhgdcmotFB755BPJEAPb4G\nCy88R9nRxfhLM9TyBQRBT70Bo0fGCN7tb4Y9ZRn30BCOoa1kawaqdTWlTJrFpSK5ZB63XY3F7WhW\nVZTqSLk8Rr8Kg82CxapDbMjotCL3/eavMPholky+xtJyEV2lRqVSQ69Xs29faINC9c2A12vk3Llm\ne490usyOHX5sNh16fbNSZHDQQzh89Q7MVyMSsRONFqmWSphddop5AZtQIjOfxOa2U41L1OQasZkF\nDA4HkYAWITaNGI8jqtV0bO3Duz2ET5NBqjo5NS2TWkuDoMHptiPr9Bw/J7PHnWbsp0/i6urEYHgS\nb6QHQ7eTaCWGqBMxed1oQl3Ydm6nVq8T6vSTnJpCQxWjAdRmM+HhQbJzs3gGBlr2q/V6DE4ntrY2\nNEYjmdlZqvk89WoVQaXC2t5Odn6e+NmzmLxerOEwWuPNmax6OW6+O3KT8fvNuFwGhoY8FIsyLpcR\nrVbkyJFlcrmmRLDRqMZcqaCxGlBM3RicTqqlEqIo4t26lbnnn6ewtkZ4z37659PMvn4Wk9tN9wP3\nkNL46fV5yWQkqtU6LxxaJJM5X6O/muWu/Xeyfdu2ZmLs/DypqSnmXngBqfMgZouBfL6AGpn8yjJt\ngV6i5+bIrsWpKGqqxQJmZ5304grmUBjXYJh03cpzz0ygGCVs7UXaurzcc08Ep/PWuUnfbZ599lk6\nOjpuCvGvzeDDH/4w//3f/31bOSNLSzkmJ5sJkeGwjb4+FyaTllqtzvTIEmMjMZSGAgI4nQZ8PhOZ\nVBHPvruxda4gF0v07+nFrm+wcuwYOosFk89HYW2N1PQ0SqOBWqulXm6u7k2VOO2RbfTv6kEnyEz8\n5Cc4u7pIpitkZqcpJ5MYXC7qDSsNlQ7MDmzt7eidTvSD+3jxpSVi8QoqwwxaiwWnvoaxzYnBpKNh\nsDPY52VuuUxJzqC1gjVoYecdEYLtFqzBIKJaTTDsJggMDDZbX8hyA6fTcNOW4kcizZYcS0t56nWF\nQqHKBz/Yh9drQqtVYTBoruv4JpOWe+6JkEyWaGzzkE3mOfbvP6NcKLLn4DAvPj9NTZZRUcdsVrP3\n/q1YxBKVQgGt0YjJ621WZ8ZiNFwdHHl1htFTi6hUIsHeNrbs6qRWyRNfy1EoC/idHkZ++iTqRoU9\nv/kZhHY3k0fOUEGHEo4wdSxPuSZwn1VNcP9dmP1+NHo9zu5uapUKsZGRlu0qnQ7ftm0t58LR2Ul+\neZlGrQaCgLW9ndziIqnJSQASExM4uroI3303au2NFw98J7g578pNplkaZuJiZdwDB9ro7HRQLFax\nWnWoUgKzz81TWF2lWiyComDv7MTW1oYl0sXS5BKNiRnu+ORHidx3P7mqiqzoxOV1Mjufo3A+EeuC\nIwJQqdQZmchw7wEvTpMJUaslPTuL1mJBZzfi8Rpx662YbQaGDw5iMamhVqGYTOPv7yOZ1GPv7ETn\ndNF3107WRkY5fmSJutaEqDTIra4yUW42uLrrrjBm89V3ii7F6HI1E7XOh2mg+ZBcEOC5XfjBD37A\npz/96c02Y9N4/PHH+dKXvkSpVMJ4C62srsTqap7nn59tCfhFo0VSKYn77usgGi1SrGsRRZF6ow5K\nU8nSH7TR2e/nXFREY3UQDje1KHJzMyAICKKIo6MDg7OZvKizWEhNTyOXStTzeexd3ah0WlZPnia4\npb+pl1Gt4nAYqFcq1CSJSjZLJZejc+cg7cODuB/eh1SF0ZEY3Vs0uDNFFpcK5KsK/Xu2EF+M4ds6\nREeni0Khyu4dXhYmKgiKgtshUiukycwlsV2ySymKwmUVkrPZMisrecrlGh6PiWDQgihuXu6XxaLj\nvvs6iUYLVCp17HbdFVVXr0QyWWoptl6uV5ZKJeL1mqnLOixWHfWH7uClH/4MbTHKr3xsCLkOdpeV\nwTv7qTcgKmlxOENYnAZyy8vMvvhLlk6MILtiBIMepsZUyJU68WiBeLxIf1iP3+bEHQ7g7Q4j5mPk\nZs6x/NoxLDv20/fRHqSaitlakJf/8zW8bW5eeOYcj93tIB3N0HXHDqqFAo1aje5HH8XW3k4lm8Xg\ndK5rXGdwOOh66CFKiQRKo0E5kyF7cedzRSE7P0+pv/+W2bXeTGfkc8CFpdf/Bn54o/9AqVRFpRLf\nVA9DkmRisSKKAk6nHqt1YzxVrVatK3EtqNzUZZno6dPUazXcAwPYwmGy0RRJQ4SqM8Lo6Qns0SLD\nH7gPbd2GXjFQLteRpCoul4Hl5fy6v2GxaDGVVjn3i1OY7SYyMzPoz7d5VjR6hvb0EM+L6PQK6XQG\nm9+HLJVJSWocWguyzYmsNtC9dxsGixGVw4dUiREbGUVtspAVHBQaeUoVBVEU6Oy0o9WqmZ3NoNOp\n6OiwX1WwSK3X037wIGunT1OKxd5IBg6F3uaVuXnJ5/P89Kc/5Vvf+tZmm7JpeDwe9u7dyxNPPMHH\nP/7xzTbnupmby2xQEl5aypFIlCiVZPKKmbbhQZZOj1OXa6i1Guw9fQR7w7QPrV+N29rbcfb0kJmd\npQFoHU40bh+iVUYbTyKKIuaObgzd2yg3NKQWapACxRGiodZiKEfpHe7i6H/NYvZ52bK7i54uG9W5\ncWqmIWqVBsuvHGZ1ZByzw8aWXbuYi9awm1XseqyH9p2DRGNFRkbipGdmUAppGg2YmC7hs/Wj0WUp\nJRKYzuf7KIrC0lKO+fkMigLhsI32dhuZjMTzz8+1Spo1GpHdu4PvmPLt1ZDleqthoV6vJhLZqKJ9\nJVZW8szNpVGpRAqFKsvL+daxtm/3smWLd50zoygK2YUF1k6eRC4WsbS1ceCBLWSiCbRCFEvIg73P\nz/hMgdnZRep1BbNZy8FddlJHDjE9MkcxmuXUL37B9v/xOJ09HqbGY2iNBmq1Orv2hjEXBfZ/8oNk\nFhbwDfThHeijLtcZOzqOJOgwD+zily9PUW2oySSLuPQygnMrkUc/iCClUKsEHN3d6CwWVl8/TnIt\nhSKqad+xlY79u1uKqxqDobUQXLkkfA7QqNWoVd5y79lNZzOdkV8A/3Dehle5gc5ILlfhzJkoS0s5\nNBoVfX1OBgY8l+1Zk0qV+OUvF4jHSy0p+DvvDF9VW6NRr7P4yivIxSKdDz7Y1DERBESNhvFXTpC0\nbaUa3I7PFUYAzuVsRDrsUGxgMmkRRYHp6RSK0iw38/tNaLVq3IYKay+/jv+ObmaefprE/BI1qYyr\nrxff1i189DcfIFszsHB6EvU2O+OvTxIvC/Q9eC+VmoBUqtNx4A7cAwMU43HEcp5KbIV6pULD0cbM\nyXm0Viv2+wdJpcqMjEzR1eVoZbCfO5fi/vs7r3ruRrebzvvvv21Le3/wgx/wwAMPvGeEzq7Epz71\nKX74wx/eFs5IuVzb8FpT2bOBxaIjW2xgDW9lMByhViyisVgJ7uptDfoXo9brCd91F67eXnKpPInV\nJMeeeY3k/DKegI3e/gG0oW7i6gDPPTdPvRqkx21nyyNBJp58ivLxk+wd3saWnb+NoNRR4nOUzr5C\n3Okgt7RIKVfC4Q2xeKJOYm65mfh49/1YdXUcZhVWm55UuowoCtQqVUqZZgdtUSViMmqolSrU5TfK\nX6en07z00kKreubcuRQHDrRTLFZbjgg0k/JHRmJEIrZ3RXzuAlNTSUZGYpTLdXw+E8PDvrccQl5Y\nyHDo0Dzlcg2Xy8DPfz6Nz2dqSfqfOLGG12vC42kq7k5MJMkmstiVFB4dJOZirC3EadvSw9AHHkIQ\nRXQ2G3PRBufOLQJNR2l2No2htErq+DS1WgOXz4fZUWDymRd5+Hf/J1v39aLS6RkYcDO0o53ppyeI\njYwgF4uo9XoElYrgvjuISpO4B7fw/z27zNLEEg0E2jt7Mbm1aOxuljMO7n1kPyaThnq1yth/P8HY\nyTmSiRL1usLEyAr3ac3037F1w3dh9HgQVCqU+htVUhqjEZ311tGI2syZZP78/+vAxtHiGlEUhWPH\nVlqJUADptIRGo6K/f6Ns/OhoojUZA2SzFU6cWMXnM7U6816KlEpRWFkhMztLXZYRRBGlXqdWklBC\nWxBEAVljoSIaqUkSpZk4hXiSoy+MYol0kCuLbNvmJxg0s7ZWYGEhx8CAC1UlR1vESXF1mfj8KmvL\neQTqGIsyi7MxbHt1DO0I4yTFyIuvo+SSREsCWx6+m6KkI6hT49k+gFqnQ2+3U4mvMDzso1yts5Bp\nirRFuty0BQysxppyyx0db6xCJKnGxETiTUXOBEG4YkXSrYyiKPz93/89f/3Xf73Zpmw6H/vYx/jy\nl79MNpvFZrv2pMGbgfZ2GzMz6XUN3Ox2PU6nAZ1OzdCQh5GROPGqDoPBRNhlw+O/8jOg0miwhEKM\nLy8w9cvnyKwmUGl15PNVZqfjGBsuXpwscerkGhZNhdmTVRL7wux54BHcujILL78MCnTu6GN5fhJq\nNRr1OtV8nvz8HN47O7C3B0nPL5FPpAn6jITarLj6+gAIBMxEIjbyaw5KiTgC0DUQwKoqoTKZMDid\nQLOT9uhofF0Zryw3GB2N4/FsnPDL5RqlkvyuOiOHDy+27MvlKhSLVR55pPstJdiOjSWQpBo6nYpM\npkKpJLO2VsDjMbX6amWzFSSpxosvzlOp1MktLXF0dJJwXxBHQyS1HGV+PsNjkS7atg8CMH90svU3\nlpfzxONFQrqmfbOzGXbs8NPW2870dJJEvEBFZyJg09HbZSMxOUn01Cm056+D1mxGLpUop5IMP/4A\nE7N59HotZpsOrcGA1QAemwrKeUwmG1ZnM1RWjMdZnY8SixXh/G0rFSpMnzqHt697g0aMJRTCu3Ur\nyclJauUyWpMJ3/AwJo/nBl2pd56bYVn7e8B/3aiDpdPlVmvsC9TrCufOpTY4I7VanbW1woZjZLMV\nCoXqlVUFBQGNyYTebqdRq2H0NrcCLcEgmt5OFs5Vz39MpBCNUawKiMUySqPO7NgSxZqOYNBKKGRh\n2zYfarVAV5cdh2IkM5EkNRUlnSw0S8qUBrlcmcTSDNYdK/g62wju2UPd7MGyNUVV0XBuUUJvUeHx\nmbFYmslKOrO5WdI8+iQPP9jPWsNLPFbAZNKSTBSRZfVlna2Lu/G+1zhy5AiFQoEHH3xws03ZdOx2\nO/fffz//9V//xW/8xm9stjnXRUeHjWzWz+RkElluKg3v3RtsJUTu2hWkrc3akjT3+02o1VeWQIdm\naDeTqSBcpFis06mJxcv4I1rKpTIkF5k4t4TDZSI1OUlP4DFUbgO5RJbUuVmsNh1SOt2sknA4qJXL\nNKpVDKLM9t0R0u02dBqB/l1dePvf2KnR6dTceWc7bSEz82cdGIQyTl0FdV0itH9/q7RTlhtIUnOX\nxGjUYDWCihoqg4jdvjFfzGjUXCKW9c5zqd5JPF4ikZAIBq/eO6XRUFpSC/W6gtGoQRDe2PECUKkE\njEYNExOJlmqqgkAqJZE/ucQD97bBUpRyuUGu1KCUTqPWaPD7LSwtNXNp4vFm12BMbpw+F+fOpZif\nz9LT42DXwV4cXW1YvG4G+l0UxpsCeKnpaar5PFqzGUtbG8mxMSzt7c1dNbMNx30eHv3QEOlkAVGW\naGTjTeXY4WArZ0dUq5EqSssRaaHWXXZuUqnVhPbtw9HV1XRGzOabphvvW+XdcEZ8wL9e8toq8Gng\nDuAx4LKa23/4h3+I3d5cuQ8MDLB//346zifxzJ1P1rn0Z6u1GfOsVBIA6HRNBySXizI3p219fnp6\nBlEUsNt1pFLSus8bDGpisWWyWfVl/57R6UQym1EPDmLM51l5/XVyWi0hu53hoAPjSo58PoosSQhK\ng55uN4vnRukasqJZ0bK4lEeS4hQKApWKBYNBjSQlQdNAazRibWujbNKjeB3YBRXlmog+4mJpdYVh\nqYLgMFBR11lKppk7uQZ1GdHYwGmJ4Hb3tuyt2O14t28nt7CAO6BnLZolGjXh7PGjVpIMDqpbD+6F\n849Eglf9ft/pnzeTb3/723z+85/flL4sNyOf+tSn+P73v3/LOyNqtYrdu4P09DhbiZEXr7xFUcDv\nt/B2InNarRoEEVdvH8n5Veq15mSXzcv0uBwYdatko3EUmpoZWquBSj5HWShg79+C0W7DGgzSyKXQ\nW61oTCakbBZFUaiVJapLi4TCYZw9PXh6ulBpNOcn4Ao6nRqjUcvgkI/BIR+lVIp6pYLebl+nMWEw\nNAXdREHBUl4jdmSUqlSmZ1cvoe0H2brVw9RUikqljsmkYedOP2bzrVF5IYpNDahkUqJWa6DTqejq\ncpBKSRgMzWsbCllxu42cOdMc48rlGoLehKDVUZPrIIioNGq23jPM2omTSOOvozPosLd30hWxMzLe\nDKUbDGr0Die+e+6iIKupFPLY29uw9m0h0N9OX5+bQixGenq6Kcvu81HN56nk8zTm5iilUgT37kOu\nyphMNXKjp9HbbPS43RQqEu49fXQf6Eerf8MRNHk8tA12sTj7hhq43e9G7wug1V7eURYE4ZbaCbmU\nzZTNDNF0Uj4CpC/zvnKhFfLbQVEUnn9+bl2YRqUSuOuuMP39bmKxAqOjCRKJEm63gVDIysmTa6TT\nzZI8nU7F/v1tlw3pXEylUGD12DFG/u3faMgylkAAs9/f3C47cC9LqxKlbAF9JUWq0KBaAfeuSQAA\nIABJREFUKFLPpckXq6wVdNQsfjo67ESjBSwWHcGghXq9QcABViXL7AsvMntyHFlRI6uM+Hbupqa1\n8OHP3InJYaecyTD38hFyFRWVGtisekyqMs7BrQgmO0ajGq1WjZROk5mfRy4UUJwhojmRotQgGLRg\nNms5cmSZdFpCrW42wtu/v23TBiRBELiWa34jmJqa4uDBg8zMzGCx3HwdLTeDcrlMW1sbx44dazmN\n7wSbed2vh/n5DCOnV1Fll0lMTGIyiAjOIK6hrTz9n0eZPtrs/qo3aPCHbNy9143NZaFYBX9/N91D\nbUizEywcPkxhdRWtxYJv+3YAsotL2PsG6LzrAAa7nViswMmT0aai6vnQUn+/a0OliSQ1e7iYzVpU\nKpFMRmLu1ASn/vNJ6rKMf6AbWWsjmpJx9/YS7nDgchmxWnXvegM1QRD4538+sW53JBg0v+UwTT5f\n4dVXl0gkSphMGlwuIwaDmny+WSDQ0eHAbNZy+vQa//qvZ4nHS1itOuqFHAGHwmCgjslhRY6vEF+M\nMjDgplqtI6hURO65h4zo5uzZWOveLJVqWEwqusJGfCEXOoMGt7tZ7ZNdXOTcz3+OqFajt9tZOXaM\n1NQUBpcLQ/cWchoPY8dn6B0MMry/G71GAEXB4HQil0o0ajXMfj/WtjbE83l4mViasZdPE52aRTRZ\nMbV1YPF72bMntKlVT9fD+fv1ssZvZpjmzwEv8B/nf/4AUL7yx98agiCwZ08QnU61LoG1u9tJLlfh\nxRcXSKWaiVuplEQiIXHgQDvJZIlarUEgYCEQeHPJdJ3ZTKNWa7ZvFkUElQpZrhOfX8W7I8+2bSGU\nhofJ10osja3y6hOvIgqwdTiEtRFn6M5OkrKacNh2fju1uSX4xBNr7Nrl566HHsU4uJv4app8SUFG\nYHibC72laZuUSlFcmsdgs1HXeJiczlDTWjBnV6mrm4p9w8N+Ojoc67brIpecxwc+0Oy3o1aLuFzG\nyyb5vhf41re+xRe+8IX3HZGL0Ov1/Pqv/zrf//73+cu//MvNNuddIRYrEIuV0GhE/H7zVfMnIhE7\ner2aeNxJZOdWPB4jC8tFpqaS7L+7l8LiImZrlWDIztbtXuqJVaxdu2nUjXi6nNSTy+RWVnB0daEx\nmZqVOIEAaxmFBY2Bs6ckZuvLbNsJZ85EWV5uhpTz+Srj43EkqUo2Wz1fgWJrJe5XKnVcLiO7d/up\nVhusTUwTj+YIdQVIlHRMHJlCEEUk0UomJ3PPPZvXyfXgwXZGR9cnsL5VQbZmKXAHZ87EOHMmSjpd\nxu83s3NnAK/3jZw2WW7Q1+dGFFPUag26tkfoiFhRajW0Opm5Y6cJek3Mz2dJJJoJwTXzGGL3btra\nLMzNZVldzePxmAi12xnaEdigeaKzWNCazc0dkWyW4O7deLZuQwz18/TT08yMr6CuCaz87CQqh5cP\n/9aDyLkcM88911JYjY2M4BseJrRnD42GQlYSkJ0dWHe3Y7frcbuN+P3mW9YReTM20xn5vXfqwBcq\nYi4t7V1bK7QckQukUhKlkszw8NuvnhA1mlYcNx4vsrSYw2Q3wWiCxZdSBAIWqpKG+TNTKBWJYlXh\n7Nkov/rJXThJow34+Pd/HyOVklhczGGz6dm5089LLy3gMrURFAuEvFrqDbCa1fj7wq3qFUGlQmM0\nEq/Z+eXTY1Q1FsYnp7F4XDz+sZ0Ui1UOH17AYtFedaAxGDSEQtcnJnSrMzc3x3/8x38wMTGx2abc\ndHz2s5/l0Ucf5Rvf+Abq26xy6lKmppK8+upSqwzY6TRwzz1hvN4rL058PvO6lvY2pwmHw0CpUOY3\nf+8u0surxOZWSY1PEL5jN6LNyZ2DHjRKlamfjVLJZmnU68RHRqgWCtRdHRx6NU4unkZjMlK3BYgm\nmyv9C1itOuJxiaNHl+noaC40XnttmUjETjpdRlGgWMyi0Yjk8xW0qLDb9dT1Fn55aAaxIeNymxFE\ngVqtwcREgp4e5zv0rV6dvj4XnZ32Vjnu221XEY0WOX062tpdmZ/PUirJPPpoN0ajttUF2GrVEQiY\nqdUaGAxqLFY9ZrMWo1JCGHCTXM2wtJgDwGjSEIuXwVRkba1ItVqns9NBX5+TbdsuP0/o7XYCO3ey\nevx48zrKMhVHJ0/+bJGjhxeoZnNY7Eb6B4dJVo3kCzVK09MtRwSaHeaTExM4u7qYjzV45ZXFluq3\nTqfirrvCN6Vy7o3i9j0zwGhcH264uAHeW3n9zXB0dZGZmyObzDMzk0atFhFtHn7xYozl1RKdnXZi\na1n2bOmllJdQEBBFFfFkmfawnfHxRGvwyGQqxGIl2tosuFwm5tZquHcMMNShRWk0MLhc6ypYTF4v\nxrYORp+eQ5YblBGo1qDSUDM2lmDrVg/JpEQsVty0Vc+twp/+6Z/yxS9+Ebf76qG59yJbt24lEonw\ns5/9jI985CObbc47hiTJnD4dXadHkkpJjI0lruqMXIparWpVqNX63Zx4aRy70Yt5m4FkRcv8ySgG\ng4ZOn0j9vAaEqFJh9vkoqdUsLWWQpCqCKGDy+lBptSQSxXXOiFarYmQkhl7/Ru7AwkKWel0hEDCT\nzTaPWyjITE8nObg9wtEnj2DXlUmnSpQKEia3C8358aRWa6AoyqY1u9RoVFesXHwzlpdzG5Jgk0mJ\nZFLCaNS2zulnP5taV+K9spLnD/7gDswmH6rELKOn3mj9oDcZsHd28k8/HgUE1GqREydWufPOML29\nLvT6yy/e3AMDmHw+Krkcsqjj1ddi5NJ5lHodnd2BrBKRRAtaq5VGQ6F8kSNygXq1SrlQYnQ033JE\noCmIOToaJxKx37a717fnWV0Bj8eEybT+RjKZNJdVJ3wrWNvaiNxzD1q3H6vPQ89d+0jowkxPxpFK\nZSqVOtl8jVhKxuFxIIrNB04UBRpGK9VqA5/PjCCAWt18aOLxEj5fU3ekUhextrVhC4c3lNJqDAZs\nfYOorQ50Vgs6qxWDxURDKpCJZ5raJzQVB9/nyhw7doznn3+eP/7jP95sU25aPve5z/Hd7353s814\nRymV5Mu2p08kStecz5LNVRlfqjOTNrCQaIZXFAVGTy5QSiQo5/OtCcns82Hv6MDs8aAxGHB0d2MJ\nNjuLWyy6DeNWrdaUdr+AKArkcpV1z7sgKHR0OFhKQN8Dd+MLWNm+r5tAXyc4gkjlOoIAXV2OW7br\n9uXMFoRWbgJmsxZJkqlU3nBERFFohkGyFUSViuC+fXTftQ932E9goIuBRx9gMqpiebnQOr6iwMxM\ninT6Im0WSaIYjyNd5FQYHA7skQgNrZl8WWDr3h70NhsqjbqpipqRcJhE7Hb9ZTvHa81mBL15nb0X\nkKRaq+DgduS23hm5FLfbyIEDbZw8GaVUkjEaNezY4cPtvradA0EQcHR2EhTtrJlXKCoyz/3sl0jF\nMiq1CnVPU0CoodZjsJoglcdoMdA14Mcd8mOcWcXjMSGKzVXM3FyGvj4XDoceUWx2FL0aLr+T7l0D\nyBoztbU0Rktze7i3y4qcTWK3u/D53tzRatTrlBIJ6tUqWosFg/0N7ZF0WmJ+PkM+X8XvNxMO295U\n0fZWoV6v88UvfpG/+Iu/wGx+66vf9xqf/OQn+epXv8ro6ChDm9Re/p0gmy2zsJAlk2nmGmi1qg1K\nrc3FwrVN1KIobFgMFGMxNBqJpEWFxe9n9dgx6rUaZp8PTyRCeHCY5eIkhaxEQ64hqlQEAma2bWvm\nUqytFXC5DOzeHaBcrpFKlSgUZNRqkaEh97pJrD1kpLY2z3MvzzFxeomOgTaG7tyCabnM0lIOi0XH\n8LCPvj7XNZ3fzUB7u42JieS66+bxmNaN6aGQlUjE3soH8flMBAIWVKrmdTU5bAT37KVgjiDXIV7T\nUK0msdm0reunUglNKf/zmzCpxRUmT82xNJ/EbNHTu7WNju29rTC62axFoxHROSw8+PgWTrw0TqVU\nZfdODz2uEpmZaXQ2G0aPh1IiAYqC1mIhsGsXNo8NjydJPl+lUqmRyTQXtqGQ5bbdFYH3mDMC0NXl\nJBSyUiw2u9feiInV4TAg5UsUVpfxeU3EYgoOtwVVOU9HyM2OvRFScRehwW4GBz30DQVQ63QMDzc4\nenQFlUrAZtNz8GAbAwNuGg2F3l7Xm4qPiaLAjh1+lqdXOTYVo1AR8AXsVKs1NDoTB/b53lTAqFap\nsPzaa6Snp5vOiNlMYNcu3P39pNMSzz4728qzGR9PMDjo5uDB8G2RRPV3f/d3aDQafvu3f3uzTbmp\n0ev1fOELX+Bv//Zv+Yd/+IfNNueGkMuVeeGFuZbg4exsmmDQSqVSb5aACuByGRkYuPbQncNhIBKx\nMToaR1GaTn85FadnfwApudhsZrdvH1qzGXd/PxqrldiZM2z1y4yk05QSKcK7B9lzZwS324jP11wx\nq9XNhcq//dsI586lUKlE+vvdmExavF4N0WiR9nYbPn2BhZVJ+nu9jJ2cZXFinkqxyLaH7mD7dh+7\ndgVu+RBuIGDh7rsjjIzEKBZlfD4T27Z51zUD7OpyMDDgJpcrIwhNB/FC2e8FenqcKIrC6moBtVrg\nwIF2crkq8XgRURTwes10dztxOPRUCgUOPzPCa4fGWyXdY2dX+FWjifBAGGg25du+3cfRo8s0klH2\nbrfj9NnZ2mNESa9x9sWnMPl8WAIBXL29WIJBjF5vayG4Y0eATKbMyy8vks9XiUTsKIrC8eMr7N0b\numV3sq7Ge84ZgaZo0I1c3Vuteg7udnHqlQQf+8Q2jr98juzyKupSmeHBLu6+O4yigFotrlsp9fW5\ncTgMJJNSK3vfZHp7ZbUej4mONgP33t2GgEC1XCa6tES0bEZ7b+eb/n52YYHE2FhzHxKo5vOsHj+O\nyedjbk5al/CrKE156d5e17qkvSvRaCjUavWbMulqfn6eb37zmxw+fPh9XZG3wOc//3n6+/v5q7/6\nKzy3sJbBBZaX8+uUl8vlOolEiYMH26jXFdRqEa/XdN0iYLt2BTCZtMzOplFRZ1ugA2t5lWqt1uwd\nUi5Tr1YxeDwkx8dJjo3RKJXo1zWQVQqeioBebEeWdWg0qta4pdOJ9PQ4W31c6vVmD5oDB9rYv78N\nrVbN3AsvoFHqdHsVPvyxHUyNRZHlGopcZWjIc8s7Ihfo6LDT3m5FlhuX7Ujs85m5774Oxsbi5HIV\n2tqsDAy4141LarVI0FhCXZ2klpVwDAzx4P1h5haavW5sNj179gQxmbQsTsUYPTHfckQAEitpxs8u\nt5wRgP5+NzaLhilTFqFexaqWULIFVo8fJ7+ygt5moxiNUoxG0VnfUNiF5i7+tm2+1nzRaCjkcs1c\noGSyhNt9+ylg33yzxC2KzQhbIirio4fZ66xT9ZoQ61V8xQnqxa4rquF5PKZrzlm5wNJKkenjk+te\nK2hEauLVB1JZkpDOtzKv5nLUq01Fw2qhQCWXI5/fGCuvVusbEsYux8JChtHRBPl8hUDAwtCQZ12M\nezOp1Wp85jOf4Stf+Qr9/f2bbc4tgcfj4eMf/zjf/e53+frXv77Z5lw3l1MazuUqiKJIT8/1KVdW\nSyVq5XIzj8ugYccO//kGdApzL7xAOpZd93mjx4NGrye3tES1UCA+OkqtXMbcFmZG4+Ho/3kVUyBE\nd6+bwUE3RqMWSaqxsrJRPTqTqbQmWfV5AbRqYo1+t43OB9ppqLV07+3EEbh1epa8FVQq8ar5cW1t\nVtramomjl9vVzS4sMHfoUCupWHrpEB27djHw2BDVah27Xd8q563VBcrSxvyiYmnjuOgP2pD9auKj\nk8iNBmq3m1I8jtHtxnqh27kgkF9b49IMklisSDzeDC25XAZkucGRI0skEhJ79gTo63PdlAu9a+X2\nOZNNxuTzkZyaYu3EiVazIq3VSskwQH5l5ZqleavFInKphNZsXqeueDEdg+0sjC9SSqZQGnXUej1t\nQ93YXVfWzEhOThI9c4bUuXPIxSK+4WHkUomaJKHSalFrtfj9TSnli/P3zGYtVuvVnZy1tTyHDs23\n4rjpdJl0WuKRR7pvinyT//W//hcGg4GvfOUrm23KLcWXv/xl7r33Xv7oj/7ols+x8XqNqFTCun41\nRqMGq/XaBf+URoP42Bjx0VHqlQo6m43g7t1YgkEacpVKLod7YIB6pUJ+eRkEAaPbjX/7dgRBQGe1\nImUy1Mpl1AYDZUcXh56dxBquYq+oyeRkyuUad94ZxmjUYDRq1iXdCgLres44Ojtbz3clmwWyuHp6\nsHlv3RyR6+VK4eXU9HTLEblAcnycns5OXIH135cr6MTfHWLh7EzrNY1BR+dg27rPlbNZ6rKMa2AA\nKZulsLKCKIo4e3owejysHj9OJZtF73DQef/9G2zyeEyMjycwGjVkMmUOHZpHoxFxOIy8+uoyjQab\n0mX5nWLzZ4bbBK3JhCUUwhoKUcnn0ZnNGL1e1Ho9DXmjF/1mKIpCcmKC6Jkz1CQJjcmEf+dOnF1d\nGz67ZaufRGInSzNRanINp9fOgfu6NwjzXKCwtsbSq69SK5dR6XRkZmdZPHyY8N13UyuXsXd2YvR4\niDgU+vtdzM5mWqqOe/YE3zQPZXExtyER8IKX/2Z5MO80Tz/9NP/8z//M8ePH3w/PvE0GBgZ44IEH\n+M53vsNXv/rVzTbnugiFrAwNXZBDr51PZvdf1/Z3bnmZ5aNHW8+7XCqx8PLLBHfvZu3ECaqFAiqd\nDld/P77t21EUBaPL1VpkuPr6WD5yBGiW7p+azWJwe2nIMkqj6TTNz2fZsqWM293MjTh1Kkq5XEOl\nEohEmuGKC5i8XjoffJDU1BSVfB5bezuOri5E1bWV0d7O1Msb9TYb9TqN2saqFqvVwAP/4w4O6Y1E\nZ5cx2iwM7eujb0uzlUZdllk7dYrU1BQNWUbvdBLYsQO2bwdBwBIKcepf/oVyuik8LksSyakpfMPD\nGF1vOD7hsJWODjvFYpVTp6KtXBezWUujoTA1lWRg4PbZHbk9zuImwRoM4t22jWr+jUZ9Kp0Ok+/t\ne6+FtTWWjhxpeeu1cpnlV19Fb7Otu2GhmSj38CM9xGKBVsnf1eLBhWiU2vmHT2s04urvR0ql0Nls\neLdtw9bejqhSoVPBnXc2ZfSr1TpWq+4tdfRsNDaGdxoNZdMlv6empvjMZz7Dj370I3zXcE3eB77+\n9a9z77338vu///u3tFqtRqPijjva6O52UC43He3rDSPmlpc3LDwKq6skxsebFRM0n+O1Eyfofvhh\nbG3rV9KWQICBX/kVDC4XGqMR67yB3GwMpdFAY2o+z4qi0Dhftr99uw+/v6kroter8flMG3YeLX4/\nlrfTdOc9ir2zk9zyMhdvAxtcLgyuy+8i9fb78AVsZLMVNBoRt9vUqnTJzM4SPXmyJa9QWFlhpV6n\n57HHUOt0SIkElkAAjcGAqFZjcDoRBAEpmVw3thuNWu65J8Lqap7l5fz5HKY3du6aY+o78W1sDu87\nIzcQg9NJ2/79rVWQ2mDAt20b5msYDIqx2IZtw2qhsOGGvYDJpKWz861tMYuXKGlqTSa0ZjOOzk4c\nneuTXlUq8S0lq15MKGRlbCyxLrfE5TJuas5IOp3mQx/6EH/xF3/Bfffdt2l23OoMDg7y0EMP8Z3v\nfIc/+ZM/2WxzrosLVRI37HiXUaitSdKG57ghy+TX1rBe4owAOHt7URSFzNwcXYKa1aUUllBba/fE\n7ze3OrYKgrBBAfZ9rg1HVxeVfJ709HSzE7vLRXDv3lap7uWwWvVYrRsXZ9mFhZYjcgEplUJKp7H4\n/ai0WoxuN8aLRRYFAeEyO1Y6nZqODge7dgU4cya27r2ODvtNEfa+Udw+Z3KT4OjsxBIIUCkU0BgM\nG8TK3ioq7WUcC0FA1Fy/dLslGERns52PIzcxejyYvN7rPjZAKGRh//42RkfjSJKMw6FvVRVsBrIs\n84lPfIIPfOAD/O7v/u6m2HA78fWvf5177rmHz372s++r1l6Erb2d5Pg4cqnUes0aDm/oAg9XeL5p\nOhju/n6sbW14imXMnX3MzmVpNBQCAQs7d/pvy7LOzUat09G2bx/u/v5maMVuv6xz+VZQ6Tbm1Ikq\nVSs8ZvL70dnt66TgjS7XVcff7dt9NBoKCwtZBEEgErGxdeuNGa9vFjbzrv6fwO8AOuAfgP/3kvev\nqWvv7YKUyTDz9NOtuCI0k2S7Hnromh2ci8mvrREfHaWcSmH2+3EPDKz31G8A5XKNSqXW6iD6ZrwT\n3VsVReFzn/scS0tL/PSnP73t+6u8W/zBH/wBsizfEGXWW7Vr7+XILS0RHxujksthDYVwdHezcuwY\nucXF1me0Fgvdjzxy2R3Oy1EoVGk0GlgsutvKEbmdrvvF5JaWmH3uuVYoHMDZ10fHPfcgnM9TK0Sj\nxEdHmzvdXi+ewUFMb6FkPperIAhcd8n5ZnG1rr2beWergRpNSfqjwJ5L3n9POyMAxXi8FW82+/24\n+/sxOG9sQ6tGvX7TJLS9E4PT1772NZ555hmee+65W74C5GYinU4zMDDAU089xfDw8HUd63aclC5+\nrsrZLMmJCXLLyxicTtz9/dcUur3duB2v+wWyCwskJiaQi0XsHR04e3svu4i8mcbfd4Ob1Rm5gAH4\nOXDvJa+/552R9xo3enD69re/zfe+9z1eeuml98MJ7wD/+I//yPe+9z1eeeUVNNcRPrydJ6X3uTLv\nX/f3HjezM/J14LPAnwH/55L33rPOSDJZ4ty5FKmUhN/flCF+M22P24EbOTj9y7/8C3/2Z3/GSy+9\nRDgcfvNfeJ+3jaIoPP744+zbt49vfvOb13yc9yelK5NINMeCdFoiELDQ3e24ZbfoL+X9635lisUq\n09NpVlbyWK06enocNzTZerPYbGfEB/zrJa+tAZ86/28t8CzwAeBiSUHlS1/6EvbzWv0DAwPs37+f\njo4OAObm5gBuu58dDj/PPDPDykozxqzTuWlvt9LdLaLVqjbdvnfy587OzhsyOP34xz/mi1/8Is8/\n/zyDg4PXfbz3uTKrq6vs3r2bf/qnf+Lxxx+/pmO8PyldnkxG4plnZte1ZAiHbdx/f8dtUUXx/nW/\nPLVanRdemGdm5o18QatVy0MPdV9zU9ebhc12Rq6EFqiet+EF4ENA/qL335M7I6OjcV56aWHdayqV\nwMMPdxMO2zbJqneHGzE4XXBEfvGLX1x3LsP7vDVeeeUVPvrRj/LEE0+wd+/et/37709Kl2dkJMbh\nw4vrXlOpBB55pJv29lt/LHj/ul+e1dU8P//5OWR5fXnwnj0Bdu0KbpJVN4arOSObKUH5J8DzwGHg\n31nviLxnKZc3qrXW6wqy/Ob9YN7r/OhHP3rfEdkEDhw4wPe//30++MEP8tRTT222ObcNkrRe6l2n\nUyEIwoZJ6n1uL2S5Qa228Rpfqmp9u7GZe33fPP/fbcPaWoFotIBGoyIQeEOc6O3g9ZrRaMR1A47J\npMFuf3Pl0/cqiqLw7W9/m7/5m7/hqaeeYvv27Ztt0nuOD3/4w/z4xz/m05/+NJ/4xCf48z//c5w3\nuPLrvYbP1xwLTCYtarVAJlPGZtP//+y9aYwc53nv++uu3vdtept9575TokSRErVQsrM6RozEH2Ij\nyLEdI7GViyBGhMQJnFwguQFu7HsAx3EQGIgNO+fmHOUYtnUdWZaolaS4DoecjbP0LL3v+1JVXfdD\nk0MOh6RIihKH5PwAAepiV3VNv13v+7zP8n8wGu//EM3DRipVIRZr7bd9PstNm6M6nQZsNj35/BWx\nPEFQEQzev4rHt8K9TmC9GfdVmGZqKsWxY2FqtZb16nDoefLJnttWR2w2FUZH44yNJanVWj0zdu5s\ndWh80LkTt221WuXFF1/k3Xff5ZVXXllPVr3HpFIpXnrpJV5++WV+53d+h89+9rPs27fvpn2A1t31\n10eWm0xMpDhzJsbbb88D0NlpZ+tWL0891XNLrRnWMg/LuC8s5Hj77QXK5Zany2zWcuBAF11djhue\nEwrlOHkyTLHYQKcTGBpys3OnH43m/i4DXqs5Ix/EfWOMVKsir7xykXS6uuL44KCLQ4d6b3DWzcnn\na1QqIlarHovl3iiXftzc7uR0/Phx/uAP/oAtW7bwne98B7v9/o+jPyiEQiF+8IMf8KMf/YhkMsnh\nw4d54YUXOHz4MN5rlCYflkXpTshkqrz88jjZbBWDQbOcuLp3b5CdO69tOn9/8TCMuyQ1+a//miYc\nXpmF0N5u5fnnB5b72VyPSqWx3HfoTrzsa5G1mjPywFCpiCtaeV8mk6let2ncrWC3GwgErA+NIXKr\nKIrC22+/zWc+8xk+/elP87WvfY0f/vCH64bIGqOnp4e/+Iu/4MKFC5w4cYKDBw/y8ssvMzQ0xN69\ne/mP//iPe32L9wWViohK1ZoPrq6gyWZXd5ldZ+1Rr0sUi41Vx4vFxrIX/UaYTDoCAesDY4h8EOvB\nx7uAxaLDatWtSjDy+Syo1WvZ+bR2kWWZcDjM/Pw8oVCIUCjE+Pg4b7zxBg6Hgy9+8Yt873vfw3wX\npPHX+Wjp7u7mC1/4Al/4whdoNBocPXp0XQ33FrFYdJhM2lULWlvb/V3i+bBgNGpxOg0UCiubJTqd\n67k/17KWV8r7JkwDrRjf0aOLFIsNVKrWZHHgQDdu9/qkcatcdtuWy2VcLhdtbW10d3fT09NDT08P\nAwMDPPXUU/T23lnoa521ycPgrv8wXLiQ4PTpKNWqhFqtoqPDxv79nfe9+NnDMu7RaJF33llY9mY5\nnQaeeKKLQODBTki9HjcL06xZ0+zJJ598oJpCrfPBXDvm4XCYcDjMe++9dw/vap2PmvVn/eFkfdwf\nSvI3+oe1/Eu4rzwjt0opkSB05Mhy+2hBrye4Zw/ezZvv8Z3dex6WndLtED1zhtjZszTFVk6SyeOh\n56mn7nrDxHvJ+rg/nDwo4y6LIovvvktmehql2QSVCnt3N90HDqA1Phz5HrfKWk1uTplyAAAgAElE\nQVRg3UxL8Owt4MP3Ib9PSE9OLhsiAHK9TuL8eRql0k3OWudhpJrLkRofXzZEACqpFJmZmXt4V+us\ns87VlGIxsrOzLUMEQFHIz89Tikbv7Y3dZ9xLY2QS2A8cBPTAznt4Lx8b1XR61TGpVkOsVq/z7nUe\nZqRKBam2umqimsncg7tZZ511rodYLtOUrqmMURTqhcK9uaH7lHuZM3L16BmB3I3e+CBhDQYpJxIr\njuksFnS3UV3QlCTyCwvkQiFUGg3O3l7snZ13+1bX+ZgpRqNkZ2eRqlXsXV0Y29rQms3U8yvDrBa/\n/x7d4TrrrHMtersdQadDblypeFIJAgan846u1yiVyM7NUYrFMDqdOPv6Hqiw7I241wmsvw78n8BJ\nYO4e38vHgmtoiFI8TjkeR2k20VmtBHbuvK3YYnJsjPCJEyhyq19Nbm6O7oMHca5Xmdy3FCIRQq+/\njlipAJCdmyOwezf+HTuInDyJWC6jEgSswSDOvr57fLfr3A3m5+d59913CQaD68mc9zFmr5e2zZtJ\njo0h1+uotVrcQ0NYg7ff1E6q11l4913y8y3F3dzcHPmFBXqfeQbDA66ltFZ+/f8P8BPgF1cdU776\n1a/icLQkczds2MC+ffvWVMv7O30tVqtMjozQlGUGN27E6HLd8vlBr5eLP/0pS/E4AB59q7yvYrXS\nsW/fctnrWvp7b/V1b2/vA5HQdieE3nyT9OTkimNas5mhX/kVmrJMNZtFo9Nh9vnQ6O/vks5reVAS\nGW+VRqPBSy+9xPe+9z2efvppJicncTqdvPzyy7jdD37bh8s8SOPelGXKySSNYhGt2YzF60Wtuf29\nfn5xkZlXX13eaF6m64knaNu06W7d7j1jrcrB64DLfq2/BY4CP7vq3x/IapoPSzWb5eIrryCWyyuO\nG10uhn/zNxHu4AFYKzxIk9PtMvWzn1EMh1ccE/R6Bl54AYvPd4/u6uPhYRr3er3Opz71KQC+//3v\n43a7aTab/Mmf/Alnz57ltddeQ6vV3uO7/Hh4mMb9VsnMzDD3y1+uOh7YvZvg7t334I7uLmu1muYF\n4AjwJtAB/H/38F7WPIqioCgKepsN03V2T7bOzvvaEHnYsV+nwZ/ebsfguHEzrQ9D85qd1zofPYqi\n8LnPfQ6j0ciPf/zjZS+IWq3mH//xH9Hr9Xzzm9+8x3e5zt1mucrmFjA4HGivUZVWazSYr+nn9CCy\nVsI01+OB84wUCnWWlgpUqyJtbSaCQdtNGyVBq9ImPTVFdnYWlUaDZ3gYo9PJ4nvvUUmnUanVWINB\nOh59FL3N9jH9JR8N98NOqdlUiESKJBIl9HoNweDd6R3RqFSInDhBfn6epixjsNvp2LfvjuLON6MY\niZAcH6eWy2ENBPBs3IjxDhPt7hb3w7jfDb71rW/xb//2b7z77rsYDKs77k5PT7Nv3z5GR0cJBO7v\nJni3wr0Yd0VpPb/xeAmdTqC93faR9X4phMMkx8ep5/PY2tvxbNx4S3kfqclJYmfOIFYqCHo9ng0b\n8O/YgVq4vzv2wtoN03wQD5QxkstVeeONEMlkK0FRo1GzbZuXPXvab3re4rFjJEZH4dJ3odZo6Dpw\nAHtXF9VMBpVajcntvqP45FrjfliURkfjnDwZQRRbux2Xy8BTT/Xg8Xz4HjlKs0kllaIpSRiczrsu\nmFRJpZh59dUVmjaWQIC+Z5+9p+JM98O4f1jGx8c5cOAA77//Pn03SUD+6le/ik6n4x/+4R8+xru7\nN9yLcb9wIcGJExEajZZn0OHQ89RTvXi9d7fHVSkeZ/a111aE020dHfQ+88wt5XzVi0XqhQJao/GB\nqqRZq2Gah4q5udyyIQKt1tJTUxlSsSyZmRliIyPkFxeRrxK4quXz5Obmlg0RaJX1piYnEXQ6rIEA\nFp/vgTBE7geKxToXLiSXDRGvS4dTVWDu2Cmyc3NI9foHXOHmqNRqzF4v1mDwIzEOCuHwKnG9cjxO\nJZW665+1zhUUReEP//AP+au/+qubGiIAf/qnf8q//uu/kr6OHtE6H45SqcH584llQwQgl6szOXn3\nf/+FpaVVeX3FaPS6z1qjVCJ98SLxc+cohMMozSZ6qxVbe/sDZYh8EOur2IdAURQqqRRyvY7ebkdv\nvXHjo+u1/HaaFRbeehMlnwRFQa3R0LZ5M+1796JSq1GazevGGxVJahko15QCSo0GiiyvSxB/RFQq\nIvV6Sx7H49KhCo8zdvYCFpOAEGuV3Hbu3/+RVbvU8nkaxSIao/G6eUMfxNWG7mUURVmVuX8jmrKM\nVK+jNRhQqdf3MbfKj3/8Y3K5HF/+8pc/8L2dnZ38xm/8Bt/97nf58z//84/h7h4eqlWRWk1adTyd\nvnPByaYktbyZsozR5Vqee6/7rDWbq561Wj7P/JtvUorHQVEQdDp827cT2Hl3NUCXn12jcc2WkK8b\nIzehXG4QiRSpVETcbhN+v2U5x0NqNIieOkVmehq50UBnsRDcvRvXwMB1r+XzmZmevqKcqdWqMYpZ\nKtEwRmMre74pSaQmJnD09GDx+TDY7Vh8PrKzs1cupFLh6O1dsRhc9pakJydpShK2jg6827aht1hQ\nmk2keh2NXn9PFhCp0aAcjyNVq+hsNixe7327kNlsesxmLY2GjE1dYeLcGLIoYbebUZpNsrOzOHp7\nb1vvRaxWKcfjyKLYmtBMplXjlRwbIzYyglipoNHr8WzceNtxZGsgQPIacSa9zYbB5SKTqRKLlZDl\nJj6fGa93pQhffmGB+PnzNIpFjE4n3m3bsK6Lr30giqLwjW98g7/+679GuMWx+vKXv8xnPvMZ/uzP\n/uyWz1nng7FYdFiteur1yorjweDqTeTlub9cFnG7jQQC1lX5ffVSifCxYxSWllCaTQxOJx2PPYbV\n78fW3r6qlYPB6Vz2dEj1OqhU5JeWKMViy++RGw2SY2M4uruX39uUJErxOI1SCZ3Fctve8Nz8/HLL\nEaPLhW/btjVZobdujNyAQqHOW2+FiEZLKErLeNi61cvu3UFUKhXFpSWSFy4sey7q+TzhEycweTzX\nrYDo6XEQDhdYXCwgywp2uwG7NkPTuLKMT67XkS5Jw6vUagJ79qAoCuVEApVajaO3F8/w8IpzMjMz\nLB07tmx113I5mrKMs7+f5PnzVLNZDA4Hvq1b73pC5M2QajUWjx4lNzdHU5IQ9Hq8W7YQ2LVrzVrn\nN8No1LJrV4BTp6LI1RiyKOFwGvD6WvFmpdmkUSze1jXrhQLzb79NLZtFazaTnZlBYzTiHhzEu3Ur\n1kCAcjJJ9PTpZUE0sVIhPjKC2eu9LeVdazBIx2OPET93DqlWQ2+zEdi1i1xFzRtvTFMstowUk0nL\n44930tfXSmytpFLMv/32stu5ns9TKxToP3wYw32eNP1R8/Of/xxJkvj1X//1Wz5n7969uN1uXn31\nVT7xiU98hHf3cGE0atm508+xY0sUiw3UahV+v5mhoZWhkFKpzptvzhOJFJfn/i1bvOzZE1wxb6Un\nJlZsFCvJJJGTJxl44QVs7e20P/IIybExpFoNg91OYPdu1Fot0TNnlvtLqQUBk8ezInwj1WqIlQpG\nlwtZFImcPElqYoKmKC4LqrU/+ugtVU+WEgkW3n57ee6o5/PULz27N/Pk3wvWjZEbsLCQIxK5El8X\nxSbj4ym6ux20tZkpxmKrQiiNYpFaoXBdY8Rs1vHkkz0kEmUaDRmHQw9pgVBsdkVOiMZoRGsyLb82\nOhz0PfMMtXy+JTFssxGNFllcTFOtinR02GBxcYX7r9GQKWdyZGZepXlpF1zP56nlcvQfPvyxVU8U\nlpbITE8v/31yvU7ywgVsnZ1Y7tNStb4+Fw6HkcKSDm08iEmvoikrFAo1TGY9+htky5fLDRSltTu7\nmszMDMVwGJPXy8Lbb1OOx9EYjSiyTDWXY+DwYWr5/PJkcpmmJFFOJm/JGCkW6ywuFsjlang8Hrqe\n+wSCXEdntSJotRz/xeyyIQKtcNToaJyODis6nYZiNLoq/l3LZqmmUuvGyAfwz//8z3zlK19BfZve\nwC996Ut85zvfWTdG7jK9vU4cDgPZbBVBUOP1mpc905dZXCwQj5dxOo2o1SpAIRotkk5XsNsNVKsi\nJpOW/OIi0MpFyWSqSJJMWw3aM1ks3ja8mzfj6OlZNvwFrZbo6dNETp1anhOL0Si2jg60ZvPyM6Y1\nGtFeag9SjseXDRGApiiSnpzE3tV1S89+KRpdNXdUMxkqqdS6MXK/kMmszvGo1aTlmKPOvDr7Wq3V\nItxEsEiv19DZeWWxEk2dOHt7yc3Po8gygl5P26ZNmNrakCSZaLREqdTAatXj99vQaARmZzO89toc\n4+NJ8vk6Xq+JQ9s02GQVAjKRSJF0tkGfykR+YRFP0LX8sNXzecqJxMdmjFQzmRWGFlyy+ksluE+N\nEQCXy4jd2oO6uJ0Lb55iYS4Ngobe3ZvJy2auNkWrVZHR0QRzc1kUBbq77Wzb5sNsbhklpVgMtVaL\nVKlQvqSqKzcaSPU69VyOfCRGTVITT1Yx6FTYrHpU6tbuTHeV0XojyuUGb701Tzjc8tioVNDf7+KJ\nJzrR6DTU6xL5/OrfeqnUoFKR0OluMEVc0r1Z58bEYjHefPNNvv/979/2ub/7u7/L1772NRYXF+lc\n7zt1V3E6jTcs500myywu5vH7LaRSZU6ditJsKuzY4WdhocDi4iKVikh7uxVbU0s+X2NyMk390rpQ\naeoJxGsMX5redGbz8loh1Wotj8hVz43R6aQcj+PZsAGxXEZjMODduhXjpQ1tvVBYEeqB1kaklss9\ncP3I1o2RG+DxrJ7oTSbt8iJi7+oic/HilQ6qKhWOnh7MbW23/Blao5GuAwdwDw0hViroL+WISFKT\nY8eWuHgxgyQ10WjUDA+72b07wPnzSWZmMsuVObFYmXGbkc12A/VUgvn5PBqtFq3ZTCInU27kGN7g\nueJe/BgXEL3d3lr9rvpMQadD8wAk2ApaLYb+rRjTOnq6iwhGMwXFzDtHI1jsJtzu1u9nbCzJyEhs\n+SsYHU2gUsG+fa2JxOTxUIrFVizsglaLoNNRr0uEQjnSkoWKYGd2bIaODhudnXZMXi+WWwi5LS0V\niESuhI4UBUKhHIODLjo77eh0Am63cVWCtc2mx2JpGbEWv3/Fzg1a8W+Tx3NnX95Dwg9/+EM+9alP\nYb2DHajZbOazn/0s//Iv/8I3vvGNj+Du1rmWyckU778fZmEhz9mzMaxWHRs3tjEyEqdYbPDTn07i\n97fG8uLFDFu7g6TzF5cNEUGrwbtxA5NzFQLdNWy2lVoyiqKsmn81BgNGtxv/rl3ItRoGp3OFwJnO\nYkElCCs83ypBuGWvhsXvR2syrfCOrNVn96E3RhoNiaWlloiV1aqno8OG3W6gu9vO0pJ9OcfDYNCw\ndasPl6u1kBqdTnqefprc7Cz1QgFLIICjp+e2y2w1ev0q9c3oUo6pqTSy3PrhSlKTyck07e1W8vna\nioVDkpqkKxqcj2wmlC5h96pwdnei6xxEcy5EPlegXBaxWHTorFZMt2EsfVhsHR3YOztb7kxFQSUI\nuIaGHhg1wWi8wsW4GkFwIleaKEqrtDedruJ2m6jXJWZns8vzj0rVCtNkMlXK5QZmsw5Xfz+FxUVQ\nqzF5PC0xskulvfmqmkZDTzhZJ7hhFwPBduqZFM4t/QQ2DV03RKIoLZdyOFxErxfI5eqr7E9Jai57\n+FQqFVu2eMlma2QyVRSlZYjs2OFHo2klT5rb2uh64gni5861kuCcTnzbtz/wjbs+LP/5n//JSy+9\ndMfnf+lLX+K5557jL//yLx8aifh7Rbnc4OzZGNWqhF4vUKtJLC4WCAZt9PU5UakgEinR1mZBEFTI\nskKqYaXt0QPo/PPIYgNbRyd57JSyVWo1mWsfT63RiKO3l9iZMyuOu4eGcF7q0QUr1ySPS4+pvYvK\n0jxKs4lKrcbZ24vlFkXxLD4fnfv3r0xg3b59zYVo4N4aI48C/zfQBE4A/8fHfQPNpsKJExHGx1M0\nm60Z2+s18dRTvTgcBp58sodYrEStJuFwGFYJ45hcLky3WQcuSU0WFvKEQjk0GhW9vU46OmwrEqMK\nhcayIXL1eZLUxOezYDZr6QwacRplapIaq8eOYvfifeJZdNkKVUlgKirTuf8JihfHMNoFrD43vm3b\nMLpcFAp1dDoBg+HGw59Mlslkquh0An6/ZVVc9VbQmc10HTxIKRKhXixidLmwBoMPhJIgtMJuitIa\nm8uoVK2Et8s0GjKNhozLZcRo1LCwkCeVqtDb62Rw0IXR5aLv2WcpxeNY/X6ys7MoioLB4UBj62Jk\nromiNAknRPT6NozBALquLoyOKzNdLZdDFkUMDgezoTzvvbdEvS6jVqvwes0UCnVstivlxgaDBrv9\nymuv18Lhw/0kEmWaTYW2NhMOx0rvlaO7G2t7O3KthsZovOEYNhoSsViZSkXEbtfj81kuxd0fLhKJ\nBKOjozz99NN3fI3NmzczMDDAT37yE37rt37rLt7dOtcSjZYYG2uFvq1WHT09DrRaAVGU2bHDT6Mh\no9GoVqgp5HI1tG1O0iYBo1HDfEFElht4PKYVz5eiKJSSKfL5Goq7C1NfFTEZRn2pIMG7Zcvye5tN\nhZMnI4yNtdYklQoGe7sZ3t8DtRIGux1re/ttyQc4e3uxdXYi12poTaY1W814L42REHCIVrO8HwBb\ngPO3enI6XaFSETEYNHg8pjuqzkgmy8zMZJcNEYBEosLCQg6Hw49er6G7++72BhkbS3LiRHjZ2Jib\ny3HwYDe9vVfyOKxW3bL1fRlBUKHTCezc6UNVSnH+1bdIRNO426xs6N+L096F3W7g4lwRWW7tepdE\nPVufeZYtww60JhP5QoPXX58jkSij1aoZ6nfgMxTJz86gs1pxDw5iDQSYmEhx4kSYalVCpWqVvj3x\nROv616NRKiFeStLS6FYmaOpMphuWO9/vBAIW3G7jsk6BwaCht9OIQ1cjnSgwOZ1HpVIxNZVix44A\nb70VolwW6ey08+67i1SrErt2BdDbbC0p/8FBnFu2k43nkFRasskahUIEna618F82MPT61mup0SB2\n9izZ6WmakoTGYqNg6V3+PTebCrLcpK/LzMLEPIVEGnewjV1Pb6GtbaVhbbXqsVpvPsEJGg2CxXLD\nf69WRY4eXWJuLossK+h0Alu3etm1K3BfVk99GH7yk5/w/PPPo/+QmjNf+tKX+Kd/+qd1Y+QjJJks\nc/FimnJZJJWqkE5XMBg0eL1mhofdOJ0GrFY9yWSFXK6GyaRFrxcolUR6e3Vks1XGx0v4/RY6Omzs\n2RNAr28trY1ymaX33+f8uxdIxIs4OoIE9j6CdbCLrk4TlViUuddfx+Tx4BocpCzrmJ7OotGocdkE\ntEqdYqFKNhhg087BO/4bP+jZXQvcS2MkftX/i8BqNZrroCgKo6MJRkfjy8bIxo0edu4MIAi3Z/HV\n6/KyiNXV5PNXlDRTqQrhcIF6XcLvt9Debrvtz7lMtSoSi5UABVkUUQsa6nWZsbEk3d0OlKaMXKvh\n91vo73cxM5NBlhUEQcXAgAu328j0RIzyxFkMUpFAm442j4C0MI5jdx/Ovj4kSWFyMoUkNenqsrNx\nkxeDWYcoyhw/Hl7+zhRF4dzxCfr9Avp0DCUcprC4SODJw5w9G6dalS593xAOF5mdzbJz50rXYFOW\nSV64QHJ8HLnRQG+zEdy9G1tHxx19P2sdWRRRFGXZ4LLbW96zqak05XIDnzaHuHiKeMnKqVkViZJA\n14Yunnmmj+PHlyiVRHp6HAQCFppNhampNAP9Tow6BY1eTzxR5o03Qpw6FaFQqHPwYDcqFRQKNSwW\nPY2GTH+/k2KxpYGgzYfJnDmJ0dh6jIvpPAUpSfeOg8yEWiXpehoI2Ske22RF2dqDViViL00jVtqu\nm4T9YVhaKjAzk1kOCzUaMhcuJOnosOHzre2J8G7zi1/84q5Uwnz605/mxRdf5OLFiwwO3vlitM6N\nmZvLkU5X2bHDz7vvLlAsNhBFmT17gnR22i9VoiUxGjWEwwUqlQYbN7ZhMqgJL+V49NEOFEVBqxXo\n67HS5m2FQAqFGnPvnGDyzeNMT2daBky2hFqjIdKxBTkTpzl/DhSFYjhMMRzGvvNx1GoImqvEz5yh\nlM5hsJoJmB5B2dh2x0a9WK22CizWsFr3WrizbUAbMHErb04kypw9G1uOeVerEqOjCfx+a6vM9Taw\nWHSYzTpKpStljSoVyxNnPF7i9dfnlssetdoku3cH2bbt9gRjxGqV9NQUCyOTiNkGAx2dRBJ1QlMJ\nTD4/VZeR3GKY5EhLS8Lo8bBjy3Z6ehwUCnWcTgM+n5n5+TypxRiLFyOIDZF6vkApFsM46CB2+hS2\n9iCbNrUxNORClpVl6xwgk6kuL4DT0xlqpQp2bQ3HrwzT53BQTacRy2VS4QTV6mr1wHi8vOpYMRwm\ncvIkTak1FlK1yuKxYwy+8AK6NW6F3w5So0Hm4kUK4TC5S7oCvu3b8W7ejMdjw+MxUYrHmXn1PahW\nKZmdzE/OU6u31HB7h/1YLHqCQStdXVfyLEqZHEtnz9GMzWFp7+RM2MjoaIZYrPVdv/baHL/2a0N4\nPEZKJZFmU+HChQQ//ekUhw71wswEsYkEw8NuVNUChaUw2XKTjcN99Ld7WUhI6Bp5UtPTeHV+BIeX\nkmwgngNjOEX70N01Ri7nnFxNrSZRLq/+PT3IKIrCkSNH+Lu/+7sPfS29Xs/nP/95vvvd7z4U/Wru\nBZWKSKUiYrXqOHy4n0KhgdGoYetWL2NjSVQqOH06hk4n0NFhZfNGN816mUgoQW+/m4u/PIO+msJg\nEGCwi7zHjH1wIwvJJpELs6A1IisqqhURh9NIbjGC3dVBqlHFo9EsV8tUUimshTSdbQKzr75HPt4q\njqgWy6TPnaa4tQtb+817mV1LNZcjMTpKMRJBYzDQtmkTroGBNempvNfGiAv478BvX+8fX3zxRRyX\nSpw2bNjAvn37qNUs1GoS9XpLJEav9yCKTSYmppEkFz2XEoFCoRDAB77escPPmTNRMpkoGo2ajRsH\n6ey0EQqFGBmJUSxeKoutp6jXYWxMS2+vg0R0nkomg89qxeBwkKrVUAvCqut3d3cTOXGCo0eOEQnn\nCZ1aApXAjt95BpOlyvybb7Jp8NcYffs0oizQZrVQSSaJv/sW9u5uur1exFqBs69dJF8TsNn8aLQa\nivUCNaVKm1ZAatRZjEQo/fKXbD/4JAablaWl+RV/bzS6SCi0yIULLe0Una5EOp3i3KiTwYM2UpEI\nAH1KA4NBQ6kUWf5+G+UytXyWs29FGNiwAYvXSygUInHhAsIlQyR1qS+LJ5ulms0SuSTic7vjsRZJ\nT0yQnZsjdOQIlWSypZy4sEAtl6Pr4EEKCwskx8ZYOnYMi8+H2dWBSqVCrjdolEpks3W8XjOJxBWD\nTqxUsDRzNNNNsnNz5AsNZi5CMmXich8pr9dEPZfB4nGSqTUJRyuMjMSp1STOn09waGMQVVOk0chR\nX1zE0dODx2KjNHsRwRhl4/BupHyJYLsdxdnBWyfixBbSgEJfVM1hk/OWDXhFUSjF49QyGTQGA2af\nb5VnxeEwXFs8hV4vYDLd62nm42ViYgKj0bj82/6wfPGLX2Tfvn38zd/8zXW7/a5z6yhKK3R5OTkb\noL3dysWLaYrFBsViA51OQKNpNfFTlJbuCEC1VGX8bBZ1OY3ZqGHT1gDTR95GpwW5XmJ+YpLY6TPs\n//ynWTwdwmTS4vI7yYk1HtvfQyGRIbMYxu10IRh1mLUizUxr/rzcGM+ZjtPlcjFZbs3TgqDC7TFh\nM6kpxWK3ZYzIksTSsWMUFhZan5HPU81kEHQ6HN3dd+srvWvcy1lCQytX5E+BxPXe8M1vfnPVsfn5\n3KW4+ZXSJJWqtej39Fzp13HtRHCz1z6fmWKxC51OwOs1odG0jIqRkRrQWkAuf169LpFKFBFjOTSZ\nGOloFJVajTUYxH9VP4HL16+kUiSmZskvlhGTRewWgXS6RuzURYJ7H2HbIwIdXg3vv9YgnpbQGhs8\n9kQv6qUxCouLHI9EMHs8uIeHiZwbh44KA49v5/z/foVOnxUKSWrpMv5tWzj/P18nOhrDs3MPbR4T\nHV1X8l22bBniF7/IAK0feV2006jXkEWJUqWJ55L8eFvAxRazgVOnWomXtXwefS1Nj8GGPDHB7MIC\nnY8/Tk9fH9pUilg4DIDnUmxcLQio1Gp6rqmBv53xWEtIjQa5hQUqqVTLEAFQFMrJJLn5eUxjYyTO\nn0drNFIvFFpy6W1euvt9XDgzDyoVyWSZnh4HBw50USo1kKUmZlOTDT4T1cgUakGgnk4g5QQESYVB\nb6G/14atFCJ/Ms7Zs000ZiuBTbt4PVtFFFvJzGNhFeffmMeklXn6uf2U585RHhvD6HKTCccZFCQ2\nPHuIhJLmzGyR6HzLQFTrtJTqKkZGYvj95hUT841InD9P9PRp5Esy1pZAgJ6DB1u5Lpe4XHa8uJhH\nUVqdqYeG3KvyUx50jhw5wqFDh+7a9fr7+9mzZw///u//zuc///m7dt2HjVAox/h4knK5JRbZ22vH\nYNDS1WVn8+Y2ZmayiGITi0XL7t1BFAVkuYndrkctN1CqOfQqFYV4hYHdnXjsAqNYiIcyxGfj9AwM\n0d2rR63VkDjyJsmxcQyChLs7SGQpjs3roSk3EdTQ7tNjE5rkMq01IjMzg6DVUs/nEapVBrsMiCo7\nRpsFl9/ZagugUiGL4k21rK6mmkpRvkpqHlqiablQaN0YuYbfBvYA/9el138OHPugk/x+C11dNkKh\n/PKxQMBy3f4C11KtikQixUuhDyPBoAWdToPbfUUX4mra220rwhONhkytJjJyMsT08TF6hgN06tIk\njh5Bo9dTikbpOXSISipFYWkJtVaL0eWi2pCpSSokUYZGBY/biMNpQmOxsrXeREIAACAASURBVGOb\nnYnJNO//8hwGh5PgUBfTR8+gSoXwunSUIhFKkQiCTkd7j5+xkUl2feoF1If2UFkKUW1W8A/1sXBu\niky6wsIrb7BVa+L9SJUnD3TQvbGbto0b0WgENm1qI5mskM/X0Gj0DPYP4TZV0WkVsFhwDQ5ibW9n\nS5eA220imSxRizbRV2so6ZbRIVYqxM6dw9rRgb2ri/Tk5Ar9CUsw+LGWD3/kXFaPvaqfy2WkWo1K\nMklTFNE4ndg6OigsLpI8P8rmZzvRmzcynzdQKjVQqWDPngAGg5Z8OELx/Dih//cVcnNz2Lu66D50\niD2PBfE3XKTyMkOuMmf/9wiBNj16s57Q2UWs2QrtbYMIRjNnz0ZJWqporS4KqSjxVB1tvo6YL6Ax\nmtBbTDTSCaRSEfeW7WTOnECtEVBrddg7OzDYrOTzdUolEYfj5sbIZVevfLkrsaJQikTIzs3h3759\n+X0tleFuIpEipVIDp9NAMGi94xyr+5UjR47wyU9+8q5e8ytf+QovvfQSn/vc59aki32tEw4XeOut\neSqVBpVsgbdePU+g3c6GTT50JhPbt/vo7nagUqlwOPSYTDoqlQYOhx6Px4TVCGKjicGkRVMv0OmB\nRlPgzdcmsepl8pE0uUiCnoFnSIyeJzN+gchECKfXTi0Roe/pQ6RnZtj76U+RvDhL7fxRTP2tKpfU\nxAQGh4PAzp2tnjWKgtFswGm3ozWbWXrvPeqFAr5t26im03Q+/vgt5XvdSFFqrYoV3ktj5EeX/rst\n9HoN+/d30dHRKpF0OPQ4nUbK5VZi5o0mvmpV5J13FgiFcshyE6QGG7YEeOKJ7hvuDIeG3CSTZSKR\nIrKsIIoyXV125kanKFVEjr1ynOyQm163h8LiAk2VwPzJs6TmlpClJkajGsUtk9N3gF+Ht72TxLlz\nhOeTOFQmCuMh5IyRcMnYqvtWmthsejJjIbRqGbet5cJryjL5xUV8vgBOmwZBqpJO5BnYuY2Z//o5\nE2+8R7Um42jvJp8UKScTxKeSzHmgGZ9DFkXqhQKDAT/JDU5KFRlBENDrBXbt8NLXoW0ZTlcpswaD\nVrxuHRdnjq5qey2Wy0jVKhavl56nniI1OUk9l8PW2YlneHhVRc1aplCoLeuwXK+aRKPXYwsGaTYa\naAwGpFpL48XodKK2OJHNbgyeOrVCgY59+8gFAi3viMVEd1sATbpV+pvP13njjXmefypAY+o0jXRi\nuZ2AoNdjaAswF9OwFM6gNpgIn5+kq8uBILR2Z52DQfLFCo/utTMdU5gTRUrJDKpqFptFj9VuJjpS\npByKgs6IYHUSz8oE0ln6Dm6md3eVpsWDoNNdUoKcxdDlRlULADd3/YvlMmJ1dWfTZU/RVRiNWvr7\nH56259fj+PHj/O3f/u1dvebzzz/Piy++yDvvvMOBAwfu6rUfBJLJ8qXigNaG9VpvXCiUa1XKxNKk\nEmUWQmmioQQWoU4o0mBmpoOhIQ9btngxmXQ0SiU0ajVbtvj46U8mOPruAqHZLC63iWcOBrBqJcrZ\nHO1dTsqZPDa7kWIyhUCTSqGMWK2gNeiRGyKpVBb7Ugx7wE+jkCN+/gJSuxejzYJ/5076nn0WqVpF\nrFRoFAqIgoB/504MTidzb71LTdYitHVSrSvk5uaw+Hz4tm37wO/E5HZjamujeMl7DaDWaNakVwTu\nfc7IHWE269i0qY1crsqJExHOnImhUqkIBq088kj7Ck2Fy0QiRUKhHNV8gcLiImK5THZmhnanisHt\n1++yajZrefLJbjKZGo2GxMxMttU4T91SyKzWJCancwQe7STY1UMhX2PkX35EqVDG3d1J4Llf4/X/\nMYraZCaXyFGr1XnqyQNY+5NUtE6cVgFRbSAayeMe3oJSzIBWh9HlRFAktMYmpXgcuVZDMJhIRXMU\n6wLlYg2VLJJNFbl4YYlSvgJqDdu3WWlXa7G4HZSyM8hNFdmpKZr1OiqNlrJ4gZ7ODaQsTlSChm3b\nfPT1OZcNuGKxvkJrwm43oHc4VhkjepsN7SXLXGXzoB+2YtUJd1xifS9QlFYy6OhoglpNwmTSsn27\nnw0bVisTejZuRCUI9D37LPHRURC00NaNFNzM+bkaGslMX7uDSmQWi9dL29NPIzq6eOvnF7mmfRHZ\nRA6pXsfa3k77879BUG5SW5hk/mKMsyNFZKOd/h0b8JqcvPm/LtBswlIoiSfgYue+AdqCTty9VhKp\nKmZZhVdjQJVdorQYQqc00HcFUesNGCwmwqkaDW1Lw2bH7g7yJZnIVIjcXAizRU9vm4Olt15H9fjj\nN62A0prNaI1GGqXSiuNrUcXxXpNMJsnlcvT399/V66rVar7yla/wrW99a90YuYb5+RzvvLOwnCht\nNms5cKB7RbJ4Pl/j/GiMRjbNUrRKLlPGH7RTr8sk5hbR2lo9x2YmYxCfIT83g9ZoZK7hI5WqUMoU\nselFSrkip0a0dP3mMGK2RmC4j3w4jOg04en0Y/X7aEai+Pq7qFVn0KhBYzNisRioVhqMHp/m1JGL\nPPa7G9Dmm1iTSQS9viV8CKBStcQP02miY1PMj17E3tNLoaZmYTzG8LAbeyRyS8aIoNXS8dhjxM6e\nbfW8utTte90YucsoisLJk1Hm5nLLx2Zns1gsOvbtWz2x5vN1xFqd3MwsFpOA3m+nXm0QuziH32vA\nepWinSw3mZ7OMDeXpVqV6O52MDTkXl64DC4X2XyddKaOtaEQixZwBAeZ+K8jZJaiqLV6FLWGt356\nmnxdj5KvYx/agF1ukNM4GHxuG/lUlsWxELpqgd4eOxVJg2eoF0EDHb1umDlNI7WIyeWilExiau+k\nEo+y9eABUKtwdfhZijcYeO4wc0ePk1hMkcwr+LbvJtO0Mrx3E+3tVhqmQcr2VtJTMpwi/V9H2PYb\nnyRcUpNIlBgcbOXZxOMl3nlnYYUK5/79nXi3bm01RbvUZ0ZnteLfsQNBo2F2NsOJExFKpVbSV3+/\nk927gyuqeNYSiqIsG0uxWIlTp6LU6y2Z5Xy+zokTYVwu4ypxO63RiH/7dlyDg/Q+8wyLC3niuSah\nWI3z4xmkUgFRE2BX/yAaTUshMZmT0ek0y1VflxGMZkz9mzg1VmJ2voBarNNp6cHp7yL61iix8TCi\nxox+kxf0JuKhBNVKg3y+StMR5NhIls6gzPhohGee6cOeifD2T14jv6Wf/c9tZunYcQRDA0GvY+Ou\nnVQEO5LUpL3dxtNPdTKuSVPvHsDj1GCoZ1DrdCQnJmiUyxhdruu2MzA6HAR27yY5NkY1k0FpNrH4\n/Tj7+j6ikbp/OXHiBHv27Lntxni3wu/93u/x9a9/nfn5ebrX6ILycSNJTUZHEysqtsplkfPn47S3\nXwkRGgytqhVFlhEEFZLUxO4wUCzU8PltWMxajh9fQs5EGerQsf9RH3qbmXOvx5kLVcmUVagwEItk\nMZgMLCwVURQ1F5caWDQu2vr96DSgdbjw6LooRiPodw2QC8cx93lp372DQlGkMhNn868+T8nSwXsn\nY/Ts2Ii7v4daQ0GrNFCaTaRKBbEhUiiIxC+GWDo3xdDzz6I1mYlFSwwduHXlY5PLRe+hQ4jlMmqt\n9rbE0j5u1uaqcQtc3sVfy+Jinp07/asWRKfTgLpZp3PQz1K0ytxCmWC7DZPTSTEaXWGMzMxkGBmJ\nk8tVcTqNzM1lqdcltm71kUrNUVfUZKo6PEMDDHdqsetL1ApFsukKVl8b5UwBwWQhPhlHH+hGa7eT\nStdQFBCMItZ4GVHUYOnsQa7XGBz2YXeYiUaLCIKK3k09GPtMhN54A6O/g97uXhroUWplkufPEwtn\nyM7MgNWDbvNWbDseZ/NnOghXrBRMZl790Tv83n/bB9o677+fJDz+JorZgafDz5bBAcqZHLWmllAo\nz6ZNVRwOAxcuJJbFu5pNhWy2ytmzMT7xiQH6Dx+mnGiFFUweD0ank0KhxokTkWVNlmpVYmwsRVub\nednAWStUKg2mplrGpdGoZcMGD9WquGyIXKZalchkqquMkcvoTCbQ6JiNZ1lcLPDOOwuXUkrUvPp2\nnL7Ne9mxq51IpEg4XMBm02Gz6cleSjo1m7V4O1z87HSM//Xv55AkBalUYGC4jV/Z4SWWqpOvgFhv\n8NbpIruefQbH1BSldB73QD8LopdioUZTVvjkJ4cY7DEy/UqBbYd243FqSc7HMPZtYPjxHVh6hzi3\nqOH9I0vEUg22bPHisUG7o0FdztFIFNH4fETPnKGSSODbtg2dxUL7I4/gHhpa/pubTYX5+RxzYS0N\nzQAdW/S0+/SY2zx3XafkQeDEiRPs3bv3I7m21Wrlc5/7HN/+9rf5+7//+4/kM+43ajWJYrG+6nih\n0KBelzCZdMhyE1Fscui5QU6/M4HLL9Dd46Czw0p0LsbA1h7eH4mj1yhUQ4s0khrMDgv9G6yIlRqq\npoRWr2V2NoNaraerv40zJ8O0+aw88sQgEyOLZKpw6AkfbqeWutqPa2c3umYVo1mHpi1IKA5jmQyW\nDUO4TRqO/OwsKlMbCdHGqbdiWAxt2IqzKLFZchPnaYhNDN3D9O1/hMlfvkNqahLb4FZUiozzNkUk\nVSrVfSG1cN8aI1qtgE63eveh12vQaFYfb2+3snV7O//je0eZnohjsBqRNXqOnUzwq796RUyoUmnw\n6qszvP9+hGazJTi2c6cfk0nL8LCH/fu7OHMmitdnYdOT+xgIwOLZcQwOA7LBisntRqVaQK4UaO/1\n0bDYqepc1LMSarWKnh4HwaCVhYU8glaDyWKns8vJ1q0+Go3WezQagdRkFk1bOwtzObK/PI+7w0vo\njTew+b1kGgbcbZ1EZyP0P2GioPXyP49WmZ2P8t/+YDvbHuklW2gi2S0oYp3+HQMoGiPVagPZ5MDk\nbUNcbO0QasUy2XKe2FJLifZy7FWSFAqFOo880o7PZ13VyyCfr69oOw+thSsaLa0pY6TlQYswMZFe\nPhaPl9i2zbdK5ValYlnt9EYIghqjUcvUVHpFCataoyWerDIyEmNkJI4oNimXW9/P1q1eFAU2b26j\nUpGIxGtY/AHqlTolUaJUVxGJldi2t593XhujWBQxehycnW2i1g8SlbMUT0uYLGna2234/Vbiiwmq\n2QbeNhMF2UilVEYu5bG6HZRyJd5/M8JUQk17l5vZ2Syjo3F2bWtDzhnoCvbT1qeQHB0lOz2NraMD\nlVrdSk4eGcHa3r5saExOpjh6dGlZ8j6aktA6XAyvGyLX5cSJE/z+7//+R3b9P/qjP+LRRx/l61//\nOub1McBo1OB0GlfNRS6XEYOhVXUiCGosFh2CIPDYwUGUahFRgoszGfbuHyBT01Kvp0FSsNsMyLLI\n/GIZwZCjs8OMyqql1Mhy/rxEX5+Tnl4n46NVRs9GyWYusnmDAzUKbp+TQjaN2ugFt5+tW72k01X+\n+3dOcvp0jKXFHIosc+ipbjbt2cCF83GyhQbZVAm7W+bsL0/R4TcgqNWo5Drz759i46/9CsHtW9Cb\nDQR2bsbR0Y7lNgsEZFFEuiQFv5Zbcdy3xojRqGVoyLNCWl2rVbNxo+e6Saw6nQZXmw27y8TmPT3U\nJRXZeJZf/DxG+0AQnatIIGAlHC4yMZFeltSW5Zbi68CAC5UKNmzw0N3dkvOu10VOTxUxGHw4rVp2\nH36M8dPTdBx8DpPTxobeASYjMotLJaymIv42A08+6ibQ3cbmzW3U6zIWiw6nU8/YWJJUooBFVSEy\nn8Rm1bEwEWNxJolGLdPZ40YsFUhn7RicJgqVJpLWzPx0gvFkgWykzv59Q0iyCpvXg7fDgTYfY9tm\nD7pmldT8LEuxEvo+I/7gFirlBDZDlXponFwmDTmBas2CoDRxu/TE4lUqFZH5+RxerxmVStXK9KaV\n1KnVCmi16lXeBbN5bTX0ikaLnD+foF6XMejUNEolaiqIR4309jqYns4uv9fvt+D333yCFwT1qsot\ntboV1qqU6rz202kSUyGsdhO29gAmtxuzWceOjVZyczMsjodwF7Mc3BXgvfMashkr8XiZVKKMy6Lw\n27+7HbXJStdggMnRRc6eDpOOpBje2sm2ne1MXohQSGbJJIqcr+p5tKsdlVhHr7Gg13gR5CrW7j6K\n75fp7mvDZNIyNZEksRBj35Ca5MUL/Ow/Z9j51E5MUhVLMIg5EEBuNFBrtYjlMo1SCZ3ZTL0uMT6e\nWtF7RxSbjI8n6etzotWu3YntXqAoCidOnODb3/72R/YZfX19PP744/zgBz/gi1/84kf2OfcLgqBm\n+3YfxWKdbLaGStXSu9m2zbuiJ9LGjR6SyTJTUwXSiRo+t4ZP/vo2mhoNk5NZDjzqpcOjolGyMDcV\nQ2820NHrxWLR016T2b+/m09+cgBQtTwtElw4F8HjszB+LgKo2P7YEG5vkGZ6icrcBAndLo6Ni8xP\nx9E1q5jUdWRFYfpiS6hw9yPdVCsSZlUZTbmEupIln9Dh0mloFPI4zQYapRIFrAxs3YHi6aF94Paa\njGZmZ0mMjrbENC81ybP6/Xd3EO4S960xArBpU2uynZvLIghq+vqcdHffOJ6mqAR8nV4WQynmzi2i\n1gjoHW5KVYXTp6M8+6xxuSQxHr8sDtaSbNdqBRwOA01JQi7l6PBqSWdlQpOnmRyZYlyjsO/xbg78\n1pMcO52hFFZoNzXZvMXPzs016ukUZlUVJTXPmfk4sbIBq93E8LCbRKLEyEicHnuF196YYuT9Wbbv\n7WHD5i2oE2cQGnl0ZhMdO7Zx+myc3RvtmLRN4vU0bR1u0k0Rj0PDcLdMWWnSMRAkoI0xMjqGw2nk\n2H/8GJVKhWt4A9HRcZzaGuqmQCG5gNrnwrvnEfYGzbz30/cJXQhhtlt5csdWXIPtpNNVIktZmpk4\n1aVZpFoNV38/ruEN9PQ4VngIHA49PT13t5fPhyEUyjExkeTChSRGQUJJh9GrRdRqFS5jg1/97b04\nnUZKpTpWq57eXicm05VKoFpNIp+vXWosd6XipLPTxic+McjISIxotIROJ1CriZi1EhePXSCXymK3\nanFFw3i3bSPjbDK7dIzpV1+lafFQjMnMRc+x8fEnWAobQaVh564gb/w0hrohMDueRJAq7Oo3s3nj\nNnIFkUalit0GffvtxBcSuDZ5UBTY8olHkBaDJM6dJTszg62zE1FqUlicx20yUFYcxJZS7NnqZObN\ndxg7PoZer2Xy1BR9PTZ0Rgv5+Xl0l5JULcEgWlOrzF0UmzQa8qrvtV6XkaTmujFyDUtLSwB0XqOx\nc7f56le/yh//8R/zhS984b5JGP8oaTU17aZYbKBSqfB4jNhsKyvEXC4Thw/34/GYmJ3NEgrl+feX\np3G5jDy2WUfh2ClO/jKM02lkx74NGId6OXoiSTaextdmoFKus3PfIK8eCdM34EGl0eLr9mI3q2g2\ntBx8biO5skKfUeTkkTfZuaeLqgilTJ70QoRqNo9Rb6KOCr1WRZtbT2fQRDGVRhMdx9DnxVJPICha\nmg4TOosFRVHo3thFz14nzg2bcXnt6HQalGaTYjRKOZFAYzBga29H0OtRC8KKrvHFWIzFd99FrFSo\nViWKySz1YnHNKmTf18aIRqNmYMDFwMCtlRJ6PCYcXieTc2VsHe2gVuNyGbFYtCuaIMlyE5NJSzxe\nRq+X2bTJw/Cwh1o+T/j4cUqxGHq7HYveSXpqEptZjcGgJZsqEP35UWybHkUuQ6mqcPSNCXb0gjEz\nw//P3nvGSHbeZ76/OnUq5xw7VefcM8MJPYHkcGaYRImStbIsy1rJlrW+Nu4H+9PC/mLAhgFf4AIX\nuGvsDYvFtbUrrS050AyimIfDiZzUcTpWd1dXzjmn+6GpkShRwVqRQ0t+gP5QB3X6PXhPnfM+7z88\nj+ByEfAXyGSqqPtHiEbb7O/n6O014nGqKEcTLN8J0G53SETyDM4N0XCNo9cJFJQKvI+4YCBLPlui\nSQ3v0cOopHUmbVUi4TyRW2GO/9Zn8Hh1ZG/eoW/Iwc6125TLLbrdDqZSDrtDz8abl5l65jwydQ/Z\nVIHWyhYaixFFK49R2UTaLdHZX8Uy28/d9QrbdzdpZpOMjNroM4uEb96k2+1y7NgUDoeWSKSITidn\nYMD0gXotHzXarRapSIq3X9tFrdfgduvYvLZAai9Mb68Ri02NVdtl8bVrlMyjtFod3G79+wjH/n6O\nW7eiFIsHDsdjY1ampmxsbmZYW0tSKDTQ6RRMTtpYWUng67WgpYTJrKLTqCPXqhG0MnQq6HWK1Dbz\nSEURSTWPWaOh41IgyYU5c3qG3j4THamcseMTtBotJLIEXn2dd69E8W/E0CqhXS5gtOg498k5wrdf\nZu6zn0RqcbO2lmRi7hAOiQSdy0Vma4vqzjrKdpHE8jKmmSO4rTKGvHKq2SKHp0z4FzbJrcRQTj6N\nRmeg2BxgdzeDFDUzngkk74VyNRoZTqeWQuH9OXm3W/dzuTj/smNpaYnZ2dkPnSCcPXsWQRB44403\nOH/+/Ic61scZzeaB99H2dppWq0tPj56ZGcePNXwUBAmlUoPV1eR77b9qtIou6aWb2HVtVEMmqtUG\nS28vMSwaWbgRxu7Qk6+JdJGwsRbnNz8/wcJqht/+7TlWluKk0yV8/TosQpFCcBNDTssnvnASudCh\nKMgwyWt4XBqW90O0s3mkCgVjvR5cuiaKcgK5UCZaTNMtq3DPTrJ/+QpKqYl2vY7v/Hm8D/2o11di\nZYXk2hqCXI5CpzuQVigWUer1WMfGsAwPIxEEStEohXSe/f0ChUIdQQK2RBnnoQSWfyMjDxZ2u4YT\nJ7zE4yXa7S5Go5LZWQflchOZTEAQDgpjh4bMtFpdNBo5KpXI6dN99PToCb5ziXzgQGa902hQ2l9F\nJzZQm80YVG2k7TI7a3uM9Q2QWE3cFwXTJKT49AX8l64RykpJ5DoojPfoPfc4m4EGOmWHo0fsrAW7\nlMoNJg4PIZEp2NnJMjzVQy5TJZdPk6vriCahmmvSrLURw3Xm+qsUtrcw9/XSO+NkasKKRBDItNtY\nLRpyFjU2m5pOu40oaeNf3CKbLjMm07L3xitE7m0yMDuK0mTEO32EZldGKhinKyq4dWWHaEmBkAzR\nrte4U2qifWwUt0tKIRzGNjHB2Jj1A9thHxTqhQLBa9fI1BT4ry6DTMmhc4cp7WooJpQIcjmTc73U\nUzFi8Rzmk73EUy3y+SBqtQyXS0exWOfatRDNZgedTkG322V/P3/fo+IH0xaFQp2nznkp+jco+bd4\n5nOHeffaPtlQmKHDQygMJu5u1JA0B+h9fBxFcgPJ4gJakxWJUYHrrI9ytUU5W0DRLDA47KGSLyCq\nRIL+e9BuEQ0WEGlRzJQJ7OWQd5vc+dbzDD/1OI4jpygk0qRXt5GWUxSCQQQxyiPnzrOVVVNqg8+r\nwK5pcevGJZQGAyOHRoj4g2QjMZSPXODqP9+lkMwhVVTZC99AojtoI5ZIJMzOOqhUmiSTB8XiTqeW\n6el/Waj4VwXLy8tMT09/6ONIJJL7bb6/ymRkczPNrVuR+yn1tbUUnU6X06d7f4QQtlodtrbStFqd\n91S2BarVFg5Nm3ff9dPr1jI+YWVnI8bueobYhp+x8X6WV9O88OIWs4e8JMNJulIZR+Z9DHjkdItS\nShYFBmkG/2tvImnV2Ljix9TXS/9j51ArZTQSYR4/34deKyUeLtDrs3D+rBerrIxK2iRw9y7Rm9dI\nXq9h7u9j5nOfQSqXUctmye7vE1tcRCKVonO5aFar1HI5qtksrXqd7Noa1XQaqUyGbWKCSjJJMJ1G\nEEXMg4N0u1329vKkU5X78xAKFchka3xUVX2NUolOu41Cr/+pJP1XiowADA2Z+fznJ7l7N0qz2aFY\nbNBotBkaMrO0lOC559axWFRMTNiw2dQ0mx0EQUK1UKIUiyHIZKgtFmQaDc1qlbEhA9VqldT6BsgU\nCA2BeqmMqFBSSSbJB4OMPPkI8Vf/gUoiSaeqwKq3EIjn4OYCobyBIw+5WFmOo7a5+Np/fJblpQj/\n/A8rSCVw/jMKZofVKO1Wvv31a1TiccxWLdaBHoK3F2iMH8U91kKtEtDo1YgqFTK1GqXRSC2XwzEx\nyvr1ZfK5KvbZXobG1DiGBxDaDdQOF2MOK3VDD1ev79LaXGT0xCyOcT21RpdYNIdloJ9c+qAauxSL\ncfutKqpJEa3DTrf9oyH8B43U+jr5QACJbQBRJmXXH0Vv8TPeL+LQuXDaVbQTeySzBdQOB21EoEWt\n1iISOagbymarCIKETqfLzk4ao1GF16vH789QLNYRReF+iiK6n8YhZklu7KFr5mktvY2PNoozU4Sz\nDXb3Q0hkCtZXY7RaLR4728fRU4/QSkVpO4col2pU9zYobG+ikXeRqnM8fGqEVrmC0KjQbbRRq6S0\n6m0GjkzhnR6hpjhHsSlH63DT3LhNPp1Fa9Yhs/pQ2Z0gU7C6lUawqlHVszRSMdq9HlyHDxPZSyI0\nQOnup+ehOS6/ukhqL0S72aTdaFCQSFi6NcHEoX4UioPiwMcf9xENpGiUihh1IkpJA1A90Pv8ccTy\n8jIXLlz4SMb64he/yJ/8yZ+wvb3N0L+wu+KXAd1ul+3tzP0mA7NZRavVIRIpsrubY2DAeH/xi8dL\nbGykePHFTeLxMqFQgUSixMSEDe1JK+OTLmqFEvVqA5lCjlytQm0xUxPVLC+t4HAb0Zq06I0qEukW\nxVyVF68vMXVogD6blI3vXKYeDeD0eZDqfOTiWaIbOwwNTmCyG8lsLPLYmJ7mlAV5u4ahvM/WUpip\nx09S3vcz+cjRA1VUqUglX8Q1O0tqJ0xhf59Wrcb+lSvYJiZIrqzQKJeRKhTk9/Zo1+skVlZoVSoI\noojn2DGqmQyZ7W3Mg4OIJgcdUQF8n4xYvE4SJZEP2/+5Va+TWF4m4/fTbbfRuly4Dh/+ief8ypER\nAK/XgCBI8PuzaLUKensNVKsNrlwJ0Wi02d7Osr2dZWrKzsiIBUGQvfLO2wAAIABJREFUIFPKkdtc\n5IsS9kJ5VGIet8NOPXGRZrGIyayiXixz6MknuL20ByY3jUqN/lE3KirsvvkWCpOFTF5KS8zQNzOD\nqVfPEV8PlVSQYKFNtiwlly6hVUl5+LidoalelPUUqy+tceJzT1GrtwnspKjVW9hnZrDPHabcVVKM\nNxBkSrRzQ4TuLOIYHcJz7Bh7l69SSVY49Ou/hlqnIB9L0iyVqdZaiEqR9Poq7vPPcO1iiNB6lFYb\nqoKWgQEzR04MkNxoIZHK0NgdxBcXEDs1PCOTZMUWpbyAOZLCNdT7oG/nfXxPqRZA1S7gG3GwtZHE\nfy+E6+wwuVtX0NXVZLNV7C4DtvEJwoXv6xN8j7iLooBUKtBsNBkwN4mub9BsGFGZXYRCJYrFBm63\nDpNJiUreQSZABxHL1BwF/xqdnXUUvhGkbaDT5sXntmlJRCTNGiPjTsx2N0anG+9wD7Jals3oFjIa\nbC2FqRUK+PIxRHsPntFetq8vYXWZmDs5Sy2boR3coCo1YPdaWLl0C7ndi1Kup7gXQ9MukA+FMJ77\nLMlyA0HWYeeNy+RDEZrKZxl76Dyp7ZeI+DP0Hz2E3Owgmzl4WXxP6l2Qy6lW6tTr7fvt8cX9ANnr\n12iWy+QApclE/yOPoLH/W4TkB7GyssIf/dEffSRjqdVqfvd3f5e/+qu/+kAPr18FKBRStFo5er2c\n27dj+P0Z5HIpuVyN06d78PnM75GTLLlcnVarQ7FYJ5Op0elAsdhAYTShbPUjdLe4dTuGQSugtZpR\negeoR9r0DdoplttsrCfxDVm5fDWEzSKnmytz859fR90tI43uIMgVyLQ6FFotbWMv+aaM6H6KyfkJ\ndmRtavEwNjOolDqKsRh6vQKpVIbG7WHx6/8N97ETuM89TTzTJHQ7ikrtYPRzs4hKGdndXUSVilou\nh9bjIbezQykeR+d203v6NOEbNyiEQnhPnAA4UHAOF0jkBAYefxJHwE98awet3YZ+aJxS/cO3Z8hs\nbxO9e/e+nUZma+u+4vSPw4MkIy7gJWAc0AA/+Up/wXC79bjdByZfzWabf/7nDVQqkclJO41Gm06n\ng8ulw2hUUK228O8WWEvoWL+xSi6ZI19sMjBk4+nf/Brpy6+i0GnptNuonAZOOXsoCTr0FgNmsiRv\nXMI2MU61UMaqUpArwcC4l1Y9ycar22xupNDq5IyfPUlekLOyHOWLXxgn8M7bbAbT7C1v09NvwWKU\nURrwYveaiW3uUC5UmBufRHd4mkBRzZU7OY7MWmjduYPe5UI5PIegzODrMbP0wiusXN0kuh2iW68y\nfmKCY7/xBW68tU5HpsI0fYhKQ0JNoSZZkWMcGMDdKBEMFVFr1Cg1Slwjo6xu5KiEAih0WvayCp79\nihWr9cHXiQBIBAG5Vks1naaezTA75EL+a7MEwmXU3j6e/H0PSf8uvUoRq6+f9ahA4z3PGZVKvP97\nsNk0KBQCqkqM5/7L81hNMhZfb+AbcTJ8+DjvZg9E8eaPO9EX9rn2NzdJB6MUBs24h7wYvR5i715D\nprPjMh/kbwWpjOOnBwkmmqz/4xZzc04W1pZxiVkGdDJie3EMdhMKKay8fJGBC+d59NwoKqVA36CT\n5RdfxWFVci+4T7MDzk8/Srhm5PY3b9OtlfBapZx5coaZz53m1mYL0e7F7DJTHPAiUyoo1ETiBSkd\n+yBKpYLdpg1nrInb56SYztJptZDK5aisNjw+FzrdQSFvq14ntrT0Pv+hWjZLYnWVgX8jI/fRbDbZ\n3NxkYmLiIxvzD/7gD5idneXP/uzP0Ot/NvflXxYUCnWUSpFisY5MJtBstqjXWzidWoxGJel0lZ2d\nHRYXYyQSFfr6DLRaXe7ciVGpHGgMpVIVnnlmhJrKy+AJB7H6CjqnntGJEf7p9RiHD7kZnvKwt5vF\n7dJRrrRRyqvkkxn6HVpuv7rIyENjnPz0M4SuvsPit59DIojIzFYmnn0G/2aSfCLD2JyPbFRHZnmB\nlWvXkSjUNEQt7WIWjUGL1uPF/eSneeG/v0MsEMfQP0A1V+ST/8uz+IYs7Fay9HSt2EbNVPY22Xnt\nNdKbm+g9HkSVCu/8POV4nC6gMFsIlA3sveqn0Wizv5+nt9fN8IVx0rkm4VyT01Mfrl1Dt9sls739\nfvtuoBSN/sTzHqSDVQZ4jJ/BHO/DhigKWK0qDAYFUqmESqWBRiOnWm3wwgsbpNMVQvtZ/MES7okh\nVFYHhWyJV/7xNpcu7hJumFEaTZTjceKRAst3A+wEStxaSJNtKAmGSgw89SnMvR469RqeIQ8GrZRG\nuUI8VgRBIB5IEF1aZtinQyJAt5hl9+46HQSsThPJ9U0G7BIOnRlHKWmglLaYmXWT2t1ja2ELURR4\n50qQb/6PVS4t1MjWZGTDMRKJGploksuvr4FMiWukH/NAP4GNIK2uFLleR0uuZWMrRyzTYT9cJZSR\nkMtW8YhJjk1pcbgNHD7/EFXU0KiiNBiQCFLisQLr66mfOr8fFSQSCdbxcUSVCrpdGokIveocZx8b\nolKHq6s1JL1TzD5zDu/kEA6HBoNBgcej4+GH+3A6D4q6ZDIpbruSnRsLtOoNTCYVuVydnc0Y6nKY\nuVkboihh1CulEQ8TChWRak0YLHruPff8QetfNkl2d4f8+iJzR3poNjuoNHJWFmPUSlUy+xEKqQJ+\nf46qoEdPiVIqSz5bJFeoIeisZG5f5sgAiLkgDrOMRLLK4lKMZLbFXqhCuiSBTgeFSk5DIieWl1KQ\nWllcTvLc3y/x3HMbjJ07xZHPPE6u3GZxJUu0osLQP4B/O8PVO1kOPzzJ7NnDWAcHsA75mH70IY6f\nGbof4m5WKjR/SAYeoJpO0261fuT4ryo2Nzfp6elBrf7oiHlPTw/nzp3jG9/4xkc25scB+XyNt97a\nZW0tRTRa4pvfXKJQaDA5aWN83EokUuDv//4eb7yxi9msQi4XcDk16NQSxsYstFodlEopHo+Ovb08\nNqeBeNNAWjfE6xsqXrmSobfHiMWq5ty5QdweA7FEBUEq4cR8D9lMBZVWycipo+znFJS7SgJrQdpy\nLSqjFrPHTiWVRqxlSGaaRN+9TnprkzvPvUJoM8T6pZt0SnkapRLtaoWRp58kngel2cL042fom/Ch\ntFp582KApY0CK6sZXvj6RfZDRbKRGMaBAcyjo8h1OiTvuaR75+cPOuIGptiNNmk2O0gkElwuLeFw\nkWrjQA5jft7LwMC/vOMxHwyye/Ei26+8Qmp9ndYHGId+DxKJ5H4R/PuO/xRV4gcZGam/9/fAIZFI\n0GrlvPbaDsVig3C4gFotMjXlYHJQA6F7yEyjlNIF7i2XKeUqOH392D1WcqkiiVic8SMnMZ9yE85I\ncRgkvHMtwX4gQK3Uw3hvP7feXmX26BF6Hn6UrtZMPhRjbSWCu8dF6k4QmVyklknRjAeZm3PTKqRR\nKKTo9Er6+ocohcOU717jC//bf8R/p0MxmiB27waFSh3zqfO88/oanbaCoREzOoPA+l6NwVEnlWiK\n6H4az+w4lYYU/70QDqeXsWNztJAy99hhbn5jm4ZERTqSx+E2YlB2WLvtx0aCPneUiSOHqLT0bC5d\nofO9cL4oRW21Eo+X3iez/qBh7O1Feu4cuUCAdrOJzOIkUlaia1TR6RTodArkcilGo4qzZweoVlso\nFNIf0aYxagWk3RZWqxrle0J6lUqLSCCJ2yfjoYfciJ0G168FKOUaNBot4ns1NCYjkm4bi12PqiUj\nLxEYcyjY8isP3DgVEuwOLWpVG2k1h8trwtxrpRiwsH/1Duq5XiY+8RSx5RVym+vU83lKHSUbqxEy\nhTa1jpR8qcvGRgpRrUFjNlAvlxk8PkmipeXSzTQKrZazT9oJxupYrWpuL/spBvapFCrMPvYQRqed\nT/w7K4l0DfNAD5891E8yUUSQyXH1Wu+LRQHI1GpkWi3NSuV986OyWJCKv5JZ3g/ER1W8+sP46le/\nyp/+6Z/y+7//+x/52A8KwWCeRKJykEpttrFYNGxvZzh1aoaXXtqiVmuTSJTpdrt0Ol3OHlIQu/U2\nzmqVx8dMzIwMc3u1yMyMg1qthShKeP75TXw+E4uLccbGrOwF8mSyVU6d6kUUBSYn7UxO2viHby9T\nTOZ4+OwgwaxAPJrn7qVlCh0tznEPJredbX+O6q0IZ37rMCqzEXm2gNJtxjPuo10p0ZUIaO02NEYt\nlUIJ79gEyaaLcCVI6m4eT7+D6QunufnOJp0ulGIxTCYl0lKawNWbyKkjUyrRejx0Gg30PT2MfOpT\niAoFe6EKrfb3Nw9yuYjXa8Dh0HLokPPnctDO7e+z99Zb91O5+f39A2PQY8d+7DnW0VEqiQSdH9iw\nGH6KhcGv/Nuk1Wqzt5fjO9/Z5vLlfYrFBlarCodDS73aQFrcot6WsBuu8cLza1QrDXRmA6H9Oicf\nsuF0S6mbh4kJbu4uJtncSOIbcTJ/dpjWi+9y/eIa01+ZRLq6jv/mKspqgolf/zyO4R6m9sM0pQoG\nR53UCgUkjTLFfIW+ATUuu4kj8wOUm3IkopSKxU3vmB5JvcSN//evaTWaCFIBqcFCeSvG0Gg/x6Z1\n5JbepbQWRebzIfZeYHUlwqPHnYSSO1y7uIZGbBLeiZLO9DD35ClatTrzj44hKlU0ml1senBZpET3\n48ydtiCpZkivruB5+DEcvh5qtT2kcjlapxOVyYTZrCKbrSKRSDCZPh5FjTq3G53bTbfb5eLFPba2\nvu8uG4uVkEgkPPxwHxKJBLX6g1tUPf1Wpg73UQgFkXQb9PfryWbrWPp62NopUijUGfVY0BlUlFJZ\ntBo5pVwGSbGKQqslubqMY3ISeVfOzKkeKi05FquKbNpCq1QimM5z4pCZZmCVVEuGxarn4S8+RR0F\n1hEfb/+XbzL58BEqsQi+Zz7LxZdXyOdr2LxO6rUGngEXdzdrdBVa5g4PsrJVJJKvY92vUSuVGJnt\n48vPOtl44TnEbIlH50fRDY1yZzXPc89v4HTqefiRXqxWDXqTGr3tg0O3okKBc3aW4NWr91M1KrMZ\n2+Tk/e90Wi2yOzsHoVmJBPPQECaf72Ot9viLxvLyMlNTUx/5uBcuXOB3fud3WFtbY3x8/CMf/0Gg\nUDjYlddqLRKJMnL5gQAjSNjby2EwKHE6NUQiRXpNDdZfuUJoL4XXq0ciyXB4VsrYzDR3F1P09Bh4\n441d4ODdcOSIi2q1hU4n59TJXna2UxQyJdaWw0QjBWbnPCTCShpNkFsc2FodlGaBcLGGtu1g8Z0w\nxVwZk1XHdqjBkMPCbjxGdiGCUuZEEBLoxDqZSIq6VEv/mccptGBhIc7WegxBriR1J0SxreChk2Mk\nd/eJ+UMc/dIj7N1ZAUGkVckj12holsvYJifpmZ8n5/eTCwRomvpoFnKIuu8X8EokB3YoPw8RAUhv\nbt4nIgB0u2S3t7GOjqI0fLCul8nno9tuH5zbbGIaGMAyNvYTx/lYk5E//MM/xGg8CCmNjY1x4sQJ\n+vv7Adjb2wP4n/4slZpIJssolSUeecSA399lczNDs5nGqtWQuxfHc+wEb1xZYGxcwepym3ymxOC4\nGrlZxDncS6YIb715l8BuhtW7eW7dCPHwYzaGZq3EghliiSq64ycwmVW4OkX8V25gOHWKll5BO99k\noF9HS2XEOjZCO9eiGvKTs/tQjw2Suemnki/S89AQ1uFBEqEkaqeLbCmDrceCx2IlYzbhmVCw9vpL\nRC8v0m63UYWjVGoZTs49RSzdQDCITB23U8t0kKsUOMas7EQzjIyPUN0OYnVJMBrVNCJZNm8HeegR\nD9V2CRkHi008FWPosAPBYKNWa9Nopul2c4Dlvd1ICpdLy/nzR1AqxZ/7fvwiUSo17tuK/yCi0SLV\navO+VkajVKKWP9AAUFsstBsNIjdvMnpkiEY6zt7qLuMjTgx9A9A7SWKnwtxcL3c30xy6cIL+vjUa\n+TxauR6zY5ZqOolUFKkXCqi9LurVJuZmmF7bMM0JE9/+rwt86ulBtl57E6ddRbYNBquRSEpKyTiI\nPFrHPjlNoyNl+jOfRtE3wqO//Swb11cpFusMDHnpm+gn3s6j0KkwDZqJLq5itusIhTIMDlpJ+AOE\nvSUygSB7m1HahTQ+k5nnvrVGS5BTq7XIxZPYbBrOXRgmna5QqTRRKmXYbO93XzYNDCDX6aimUkgE\nAa3TieIHahSS6+uEb9y4311VjEToNJvYPsL6iQeNlZUVvvSlL33k40qlUr70pS/xN3/zN/zlX/7l\nRz7+g8CBIvSB8rFEcqAdMj1tx2xWMTpqobfXiFQqYLOpkZYTmIf7MY6M43GpyYeibC9u49b14bCr\nkcuE++Smr8/AyZO91OstwuE8l797h2SyTKPcZXbGickoZ3DIxJHDDvp7NMRjBfbrTroWFUc+fYHF\ny6u02x0MFgMT505QU9l49fVdbEKDZKjIeL+DbCZPC5FmJYvZYifZMVGVqDC5MoyfnScTzZCLRCnl\nq/QNOnjpH59DpVORCwbJBSKc+cTD5Fbv0mk2EUQRx8wMhWCQ3HvvT3kHXFolkWIeud74PguSnxc/\nHBWFgzWh/RNSNYJUinVsDPPwMN1u92eKon5cyMgHxvh/UpX49xaxn/ezRmNjfz9HNhVnZztBLivn\nxZf26e83cvSoG78/w68/1UMoESaTqZCJtSmVGsxM2RAUKhodAY3ShEKtoJqqEt6pU6so0dtklEp1\nlhbK2M1WLjx7iHqzyaWXd+h1a1DVExyem8ZqMNPzud8kuJOgkC6hpEMl3yAdiDJ85BhL9zLorW7c\n50Yx60UUWjV6rQz/mxcZf2weoZondW+VRrHAQ58eoxoJk3x3lXyhgQQwFMs0N7bwnDlPU2ai1LTS\nVOlpmevUkVLc6zB+2MDf/d0yc4e8JBJmbt5MYVe3GRx1cWTMDakAXQ66J4YmJxkXRUZGSiSTFQSh\nl2Sywu5u7j2VTiPRKGxvp5macvxP35+fF8VinWDwQFjOZFJit2vodLooFCLN5sE9VKlk7+2kDuSS\no7du0ahUkKlUGPr60DgclNNpBLmK4595jNkLNbrtJo7pSQJJMLtrvPrqDuVyg7bFjtQpR+eto7dq\nGZryEPvut7GOjSHXaFB4HCy8/hZ6nY5KoURyO8y//+oJLEKOjl1JJpFF0lSisZrRq2BozklB0NI2\ne1G69CzudigsL6JyD6Kes+DSy4jnBf76b9f59V+fJmZWoFbL8A1aKVeatLoyZHKR4FKIzhkzDoeG\n3W0BtUHL7e9cpa/HSyTVoVXIUapJWL2XxuUxcOdOjGq1hUolMjZm5dAh1/s8njRWKxrrj+rJtOp1\n0hsb72vz7rbbpNbXMQ0NIcrlP3LOLyMeVJoG4Mtf/jIXLlzgL/7iL5D+kkejGpUKHqeS6Wk76+tp\n7HYNpVKd8XEbgiBhfr6HVKpKNlthfr4XfVfH5e8uEQmGUShkTB32cvrCNHKnktArG8S6GkSp4n6U\ndGMjiVIp4rKruLge4PHfeJhvf3uVcrGC063lnUt7IEiZP+Fh4tAA4fAyLz6/xtlHexi+cI5uo4bK\nZGIvCXv3kmQyFTxHzMgUCe5c38HcN4B31oNBbHDz0j36jDlW9tK0qiWKqSK+6T6k43aK0RgaeRvX\n+AjFTB65Wo1GLVLa36P39OkD3Q5BwNDbS+jq1e/PTzbNsNXK4LgHiemgEN3l0qFU/vxLvbGvj3Is\n9r5jSrMZpcn0U8/9l0RHHyQZEYHvArPAK8CfAO9+FANns1Vu3AhhkuRZefEy/o0oLp+H//Bb43z9\n7wP09xt45JE+nD43lo6PzWiXh455eefNbTLRNKJGRwcJhw+NYu1EWA4kKKfSFNIFjE4PolyF3W3E\n7jIw0G/k//vfX8BsVFCKRSm222wm5Ez/2iTPf+Ma4XARmdHEcI8Kl03HxNRRotESerMOlc3Bu/ey\nFApZHj7Tx7BJT0PnJLiwSl+PHoXRTK1apezfRK1ToRLb2J1aVAoRrVqgUcxDrYDDZAFByu5+iVis\nhLRRRK+Vo1Yfxu3SceXKPmq1jE99apSRYRMWIU89sE5LFFGZzXiOHr3PbB0OLSYN5LJltrdrtNsH\nTVAajQyVUiSVqn4Ut/ADUak0uHQpQDhcBEAqldDTo0cQJNy7l8BsVtPfb2Biwo4oSqkVCkRv36Yt\nU1NSmanWWlCBVKDA4o6UUiGPTV9gxKdDJW1SLlRJpeD11/3Uay1mZ2z81//7Gul4AW+/lbFhKYl0\ngCeeeJpK6IDIiSqRvkE7Kxdv89DMKHKjmVtvLTHdJyUSSCFVKFCajJRrQDmNU9eiHVjjkU8cJl+T\n8nf/12t4+ix4hjzkNVquLWfwDRiZP+pAJ29yayuEXienXqlRzpQYGzVjt6swzLrxONQEFwtMTdrR\n6DRISy1sRgOlRol6qYDO5KDZ6hAMFqhWD3K71WqLlZUELpcOr/end2h0Wq335YW/h/Z7du2/CigW\ni8RisQem9zExMYHX6+W1117jySeffCDX8GGjXigQX1qiEA4jlcnoHx7G95SPUrmXcrlBJFJCJhPo\ndqFcbhAI5FGr5cg7VVr1Oi6nlkazzfbtDfrdM/S0S0grWRpI6LQlTIzbqdfbXLwYoNvt8plPj/Bb\n/+t5bl4LcGJGj9pk4JVX1zFbdUzMeAgurVMIannqqUEUCikStRLjgIu713dop4ps75Yw2Q34Bq1U\ny1VKxSrlGgTe3aNabeGVJejWW0TCedpd7YGhpT/H4u0wR4846Os3Y9SJzH/qFHTaDPRo2fynIl2F\nFFF94J+l0OuRKZU/0jLbyKQwGvUMzsz9QubePDxMLZ8nHwjQbbd/ZE34ReFBkpEW8EDkA0OhAspW\nkb0rl9i66yebrRLcinC8WeGZJ47QFuR84fNjNAolpP0jTDrrtO6GeOrTM+wGyxSyJeaPudAXtnn1\n//g/Gf3Sf+CORiC6kaKSyWHy+Tg23c/8MScbK2EGBs3E1raRGDRIdFYqGgehQJ7121uYvG586jxe\nnYTM0l1SlwvItRpszn62w2pW7yVxO1XU6m3uLceI5TSc+Mx5ohffoJDM4vY52VrYZuqxE/SNeSln\nCihkHUq5KtpBH4W2ilwgytyUGblCjnoziVph5dCcnXCkwMZ6ip29AodmrJR3t9jeyWCYH8Z5+DBK\noxGlXn9/d9tqNEgsLZHZ3qaFiF1Q4/GY6MoUtMt5wsv3wGEi1w/GX1Ck41+CSKREJFK8/9loVHL7\ndpRKpUmt1qJUaiCXC8zPH3iH1PN5Wgod1+5kCfn9SAQJo8fGiSbTZLf9ZP1+up0OkUODPDzvYHsx\nTSDRIZWqoOxWefv1NDarBqHdQNJuIBMl3Ly5z9lHe9m58i71TBKVToPa4eTMM8eQUuDwvA+vTeDE\nyX4OPzpFp9VGpVXSyOcILNyjmEjhf+0N0qtODn3pNzh5po+Mf4/QxTcwD43y+NkRxoe0bL31DrE7\nIieODFFrtBme6mFzK0MxW8Lm1HP83HHqS5cxuuxIIjEUYp0TnzjFty5WaNbbKIwWZGolY4M6gpsh\nUOpp1mq063UqCOztZel2u5jNKjSaHx/dkGs06Nxu6vn8+47rvV5kqo9HDdGHjdXVVcbHxx9oVOIr\nX/kKf/3Xf/1LSUa6nQ7hmzepptM0ZVpa3S7ptTWcSiW+4QP5romJDktLcarVFt1ul8OHXWg0Mt54\nI8pEbx+ScgZJuYpM0JOriniS+5w/76NhGSSZa3F7Mc0//tMGI6NWvF49e3s5Rkd8xDcv4nTrMQ25\n6e01Uy1VyITi5PcDtKoVJvqknHQk8J0+TiYUxakqced2hF67gfmzk2gtBl7+26u0pUqarQ4Op4HR\nUQuL/3iNuSdOEVPoGLJKmRrT4raOkco28fabmB2QInaraBol/CsBbq+2mf/cZ8nt7hC6+S5yhQzj\nwABapxOl6aCT83uQCALGn1As2u12aTebdBCQy386BZCr1fSdOUNlYoJuq4XSbP5QIp4flzTNR4p2\nu0MtGaNaKqPXK6jVWkilAplQnNmjAo4hF9e+u8DbLy/QbLSYPTnOqSdmyeSaOPrKdLIJFJlt9lZ2\nqGaydLZv89lPHuN6r4lUrMDMyVG8LgU2q4agyURFMOCaP00uWyWdqyPNtBE1Guz9XsaHNMikcO+d\nO9x4+TquXhs6gxb3cAfHgJJjx71cfnODTruDzarkzKyaUjCEXKPEOneYlkJJrxcygQAzv/ZJ9t+5\nTDWTweAbouexJ/jP//kGfWM9CFoTg0N2PG4N4UCKeKKMQqMmk6mikndRZ7a4/dY9jj89z/XbaSIv\nBZg+PcPMMR/9/Qc/vO8J2ci1WiTNClsvv4wgVyJTyuiojZQEI6n9KJJylrlfexrDh2wY9sMolxvv\na20XBAmbm2kGBkz09Hy/0CoSKWKxqJEqFESzEoLbBw+y0XzQ6pevdGnnv09q9rejVJ84wt69ML0D\ndtQ9JWIbfiZ8AwRyGq5F4jRLDVpVLYcfnWX/1l2Wrq6hUsvQ6+voM1k6opLeRx5Ffe8eNkmZYlzD\nwms3CK0eFH3OPHaCsc98lvDqJkazDrPXRm5lgdzmBusLAaStOrUry3z1P/0JjZAfpc3JpUv77PzD\ndxgYdfLMFx/moUkN/vUKY3M2hFqaQqFOfDuCc9CLZWwM9fg4h6o5zC4LrXKZx875GOlXsfBWAIXF\nRiZVJpOu0AVmJsws3inTaEs4daoHh+PHe1k4Z2dpNxoUIxEA9B4PjgeUsngQWFlZYfIHCnofBD7/\n+c/zx3/8x2SzWUw/Q/j8XxOqmQyVBqxG5QT34kgkEoZGbSjjKSzvkZFotMQrr2zzyit+KpUWer2C\nL395BhDItTTozHpQ16hGQ4gqJTsZJbe/s4RruonF18/i3SiPnRtgbS3F66/v4HBosFg0nP30Ca6+\n46exneK1l+9htuo4fXaYrs5OKpMArQm1KkIjuElueQMHSj7unkTOAAAgAElEQVT9ZA/VepvO/grW\nwYd56jOH2FjcRT7joM8poxLwc+SpUzhPnKLhT9PnVLCxEiK0l2Fg3MtMHyTfvUL45i3kGg2uYydQ\narXEd/ZpJlPILC4MJhXdVovI7dv0zM+TUSiopFIIMhmW4WGMAwMfOJf5YJDdd+8S3I4hN9twTE3i\nGfJgs/1kt3KJRPKBadpfJP7VkZF2u0O53ECpFH8mVvdBsNk0RKUSGvU2PV491WqTTqeLzaZmcNhM\noV7n+W9codPuIEgFVhbDNKVKTp+fQKOV0yHJ+ht36JkaIm3Vk9veQBrc5dHpWZRH+7FM9rK3k+Ta\nZT9Gtx2T28qtazvsrQVRmG1MTrtR1jJMeTuo6gkMQ2MsfSeL2WmGRo3Ebppms8HRHivhPZHN5SAG\no5Ixr0Dy+rtUAtsopV06UiljT15g9+YSxWCAxHWRvvmj2A4fA42exG6Q80+McPVGjNPPTrEbqpBJ\nVyhXO7jcesYn7cTCORRdCaEba4w8NM6tu0lqxQDVaov9UBl/uMEXvjCN06kl6/dDt4uoVLJ79SpK\nakhlItHtINFElaNf+CzBHNxbjmAb2fjIyYjZrEIUBVqtDhLJ9zV3ftjUrVY7SCuoLRaKTdlBxVL3\n4IHrClLqzQZytRaNQ0KzXEaqkCGIcuw6SF6/iH9pl2QkQzm0j75/kJOn+tla3qeSy2NWd9i4HkBQ\n6ygXMrSrFSQ6ECNRoguLbF+5zcmv/Xsuff15tq4voDBZ6YpK3n7hBiq3l3trKYYmjuA9NsvGWgSJ\nT8OIYwx5JYFeI8DOHdLBMJEUPHx0hJFBPRv34vzT19/h/GdPYPVYCS+u8fJ/+u/0jXhw2BxEi1I8\nNiftVJi5mV76ejR0mw0cdiWdZoOxo2O89eYum9sZCvkaR4568W/GmZh2sbbXYGEhxoULgwe2CNUm\nrVYHrVZ+v8hVodczcPYs1UwGJBJUZvPHptX7o8Dq6uoD6aT5QZjNZh5//HG+9a1v8Xu/93sP9Fp+\n0ZBIpazv1li6uXf/2M1kAY3ZyCAHO/1795J0Ot9XUS4UaiwsxHnssX5u3YrQaIi029DX7wCk/MN/\nu4ZaLSOdusNTkyNMTDtYWU1y61YUq1VNPl8nsJ/D7XTj3ysypdcxPOogkW2yvpZEQQ29Qcm7byxz\n9ISXzNYCybs3URv1VEKbdA0O0hUZ6t4I40M6VNtBZAoRpUZHY8hFQ6rFPuLEYFRxbzPP4kaFeqbE\nqTNy9t54i9CVS8Tv3EGh05Ld3eXI177GzvWrGKwmrMM+WoUsAK1KhWalgu/CBWr5PKJc/mMdeUvx\nOKsvvcLty5tUSjWkcgXZSJJc+VGOHO/7sQaDHxX+VZGRaLTI3bux99x1RaanHQwO/nQ1uU6ny95e\nDr8/Q7cLw8NmBmaGiK+s0qxU7vdfe0d7GZzu53/8P2/Rea8WQmezEk21CH1nFb3VyPWbcZ54xMXw\nw8eJLK1gGx9FpENqa5PtS1d46GuTLCzGuPvWAiaPA1bC+MYGUCkGcXuMTM84OTKuIXTnLq29VbLF\nPPV6m+FBI+vFHOGNEKIopZzOUa80mBnTEQq4GfHp6FNnufW3b2PUieTzdUxmFbFbt3AMDdFKhKjk\ncuxcvIznyByuY8cpRSLIpTFOTtkwmpRUtorI5SJPnTajqcXI3PAz7bSgtVtZ3lWh0BvYfWMJq+3g\nx9xuNtjezhAI5HA6tUhl7y3q3S61bBapVIIgCAgSCXq1jGYug1KjppTJk8/+aBfLhw2XS8fUlI31\n9TS12oF+yPS0A7VaRiZTJZutolBIOXXqgCQJUimDcyPshyvUcjm6ajUjEwOUF2KInRyFdBy9Xsn4\n6UM06k1cTg2hS1Ek3Q7ybo16ocCopox5RkM1peKRTx0lFs4RjldRaaw4HQaEchqpKGAfHyMZiqFQ\niIhyGY1WF/f4MPsbQbLxfSx9PaT2QpgsZnKigStvrXLrxSsIShX6Ph9zxydxVjeJrqwid/Sj6MZZ\n+9bf4hzzMWa3gaefTEmCx6Vl6YVX6AoyQv4otYzI2OFh9t+5hEylounMk2jqyQWC5ORFNJIyY//u\n84TidTqAViOjWqpx8aIfb7+FsTEL8k6VwM077IVr7AYryPRGegZszM05MBoPUjESQUD9Ie+cPq5Y\nXV39WBjWfeUrX+HP//zPf+nISFNQEc/8UF2SBKLZA/VsOBBCM5tVjI3ZuHs3SqHQYGMjxYULPvR6\nBclkmU4H5masvPrNi8j0RhRCjZYA+UiYk/P9rN1LMjJixunU4nbrkMtlLCwlUZlMBMJlnv3cDIlE\nlcBOikOzfXSKGWKbe4hiHyp3H4axKplQlO07O8g1MXpPzqPpFtm/e5DSyW4FKYb2kSqV9D/7eaKb\n++T9m8Tu7DHa08PIhVkcmgaXnn8BahXMfV7azRb7V2/gPnYMpUZN8J2LGOxGFHo9jWIRJBJEpRJB\nKkVt/slrYdbvJ7i8RWI7AN0ugkxGqNvGPjtHImH9NzLys6JYrHPlyj6ZTA04aNu8ciX4ns35T25b\n2txMc/Vq8L7bajCY5+zZfp742qeJLCwjadYwuOw4ZmeR6nToTQf/TypK6YgKYuEwvhEHglREoRC5\nuxDlwqOH0CscWM0KhGKCvkceoWHuZzstZT8U58jjx2jmM6C3kkhVcNjUmEwq5DKBzPYm6y++QiWV\nwuU1Ud7YxNLrweXWs3+vi0whYvXasY+NQENkaNCCt9+GshRDa9RQyhcoFhtIZCLpSBLd+Cxyg4lK\nQ0LvM88SzCm4+N003aqXYZ8Os6JKem+flZUix4YFvvEXL+JxanA51Wg0+8jq/fgmByjSRa0SaFZr\niColGpuNQqV1/4E3Dw9TjEZBEJCp1TQrFWR6HTK9AYoplEYDzb0MUlGKoefn86zJ52vs7ubIZKrY\nbGr6+40/80MiigJHj3ro7zdRqzXRaORMTFh59dUdAoEcKpWMiQkbW1sZ1GoZAwMmhkZsROMjRCJF\nul2w2HVceELH2oL8QK/A52brXpC9rRgDPWryhQZOlxaFwo1e3qays8nwsSmefmaUrkFDMVDj+LOP\nsvHaW9TEDjqZgNpmo24aIJdSMva5C9xczLIaEWk3NYyceRTVyl3y0STu4R62dksUCnlktTISmQwa\nVUrBXeTzfZQqbaR9Mxh7XWzcWCYdiiNpN2lq81jlGjpmF5lolUIqT7nSxtujY+70EOsvfYeaQ4dj\ndoblW28z/ZCPfDrHwvI6IyMWVBshrr21gdGkpJiuYnMZ6BlysBus4i3uYlXVSWgNXHp5iUa1jsJo\noFQeo15vcf687+fWL/hlwerq6gNP0wA8/vjjfPWrX2V9fZ2xn6Lp8K8JglTA0OOhWKhRzeYQpFI0\nTgcaiwmJRIIoCrhcOlKpCkajkpERC5lMlf5+I/v7OdrtLtevh+9HwCMlJeaxCVSNNH0zI4TTHURL\ng6eeHmF9PUUsXsHvz1Eo1HniCR+5YptmrcrOTo4u4O01Mjxq5drrMY5+4jQVqci9WINoVIdWq6P/\nlJPVN69xdm6Ixcv3OPPUHJFKi/V8AdRTjE67Uej03Pq7FxA6LfaXwwwU4gRTm3h+42nqxRKtchGp\nVoZMrUaQSpDSQaGVY+71kNnexjs/D8UiGrsdrdP5M81jOZ2mUaneDxl3mk0qqRSdVvO+8/GDxL8a\nMpJOV8lma+87Vqu1iEZLH0hGut0usViJfL7GlStBcqki0m4DQRRRaHXcuRPlk58cxT7so12rIdNo\n7oeWj5weZfmmn3gwRb3RRquVc+zMMN1WA4VCYH09jdCoktrbZ8Bn4+iZYZptCUvLOdK5JhuLEQL/\nP3lvFuTYfV55/u692Pd9SwC5Z1aute8LWdyKorhKdlvulmWpoy33jMPu1kzERMf4aSIc9uM45sER\nE+2wrbEdHssamRJpkRTXWsjal8yq3PcEkNj3HbjAnYcki6JJmrQsm8WZ84REAsh/4n9xce73ne+c\nzSIT037e/uE9TF43x48GMZpUqDs1rGKN6socVq+TSqGGx90mHS8w+cyTiK4QHURGjk3RFHScf2eL\njZ0WuUSOhydFHL1BiuvrKArUi2UsoSO0Wh10Hh/W3mkWd1T87V9cRBa0yK02qWPDTA6bGQw3GNvj\nJD77Hvl0BQmFgQEbSrtFdWOFia8+Tr3ZZXUpRTmTw+S1YTAbMJuN92fU7f39KN0uxViMnqNHKW5v\no7LYKct6nENDSCYrenOTwPQYntEhqtUWjcaugdDnbamdP7913xtkdTVHNFpi/34ftVobjUaF12u8\nn5j78+h0uhQKDdRqEY/nw/6nyaRhcNCOy7XrnfGBUdL8fJpw2IrFouXs2T4SiSrd7u5kycZqAou6\nzcixEW7PZtBY7LsnB40GSZERuh2c2jqhQR9d2Yze5WZ9W+HCq7eR1SaSDj0Tp87i0dUQ5CaZksJr\nr63hcJuY+8k6UreF3uVh6b07RCN5zj21j9BwBntfmPiFC0wcG+fW61sYPD7alQrNWgOTy0FNNcHF\n92Io791Gq+ph5LkhWlsLNPVOWpUSvQ6BOzNZRo5OUkhlcQdd0Kggdlp4BsN0dFZcziLxG9foPXKM\nzLoe7+ggSlfA5zUwEtJAMUVN0nD7wjbBPb3MLM7Q4zczeWoSQa2BepNWqUSjVCaZNLxPGv/pfvP/\nl1EoFCiVSoTDX3xgpEql4pvf/Cbf//73+aM/+qMvejm/NJhMGkbH/dRbAu1mC0EQ0ejU7Bnz3B8/\nHx93sbVVIJGoUKu16euzcfx4iKWlNO22QixWolBoYjSqGRzyUUnG8Q2GefHFZZaXUzz5wgHiiTr7\nDvYgdxSuX48BMDho59KlbRqymnhGpq/PysSkh+s3IuQrAsWGmp/89QydegW/306x0sQx2cvD/+Mo\nstlHeuc22YaOl384Q3ZtHUkSSRdkzvWPUkmksHgcBPs96IQay+/eYP9zj9N/+jiRt99CFAQklUTP\n3gmcw0NU02lso2MIChjcbrRmM5JazeY772ANh3GNjqLS6T71fdQYjdgcRnQGDY3arkeIPeClI+ke\niHyxLw0Z+bQW9Kfdv7CQ4fr1GCaTmtnrG6SjafZOudB069TKJszmAdrtLlqtBukf9dgm9of5rf/2\nHPfubFNtgNLpQqNCdD1Go6ZBlqF/yE2rJXPxVoqaNguiGlFQKMUTGC16rl/bpokWUFhf2MHt1PHC\nM0P83f/2f/G13ziGSiNRz6SwOi00o118U9MYrCb8B/Zi1ikohRR3VluYLDYsRZlWo0Hb0o9tuIZc\nyJOPLxGemsB16DiFtgG73Ua5YmDtVhatw4WcL9MRVUS3C/hcGkYGzPgtbSoOiVW9Bq1WhdWig66a\ndq1CIVdh/NgenhLVXHprhXgkgyq2ztf/06MMDOyW/wRRxDk8jGNwEPnoUSrxONV0mh60lBQjhVyN\ngTENA2M9JFJ15ua2abU6WK06Dh7009Pz2aOiyeRH2zs3buzQbndJJitIkvj+SSb4ER1INlvjxo0d\n0ukakiQwOGhn714fWq3qffJRo1L5qEFPrbarfZAkEYNBw8CAhky6yqUL60RuzVFOZ6nvH2JxPke1\nCRJdmuiZ/so52oktVBUtrWoN0dPLjcsbxPIChwbVmFUFJEMLyenHMTTB/EyEaxd3rZT7Jvq4sRDF\nrBfwGlUcfPYslVwJ2/Agx471cOVanP1nJgh4dLxTk2lW6mjNVtCYUSwe3n1jnYXZKE6jTGVnh1a7\nl8cffYjkyiZ6k0Qnt4PPJjF2YD9ep5r01g42S5t9Zw9iDPWyvV3CbNLQ7toxB3s5+huDODRNBG2T\nZ88FOf+XL2MLh7l8aY5WuQpuEZvDQi6RZSdeQ6dW6BoNuyO7yq4u5/9H0pBPxNzcHOPj44ifkbvx\nb4Xf/M3f5Mknn+QP/uAPvrSeI7lcjVari82mu++PMT3tRa2WWF/PIUkiw8NOhoc/bEs4HAYeeqgX\ntVoimayQzzd48cVFZmYS/MZvTPPIIwOsreUwGFScfWyIcsbJwnyafKHBvkN9lPJ1drbS9A04mZry\n0mp2MZk1vPzyCmNjbnp6zHQ6CqOjDpYXU1x5b4tf+8YUr76+QrHapZ6pkM9UEDQaLD4PtmELt5Zb\n7Dk2wcytCJJWg8XrArmF1mIkkaoTmBgiOruM02dHrzGTFrtkNiP0PXwWh89JemEB18gwlmCIer3N\n6mKKYk1h8itnyUg+jEqU2vsOp4k7d8iOjuIaG0NrtyNXq2QWF9FaLLj27MHs96Oz2Rg4fhC1xcH2\nehKt1UbP4UMMTQYfCPfsLw0ZcbkMuFwG0ukP3eCMRvUnOstVKi3u3k3SbHYQ5Ca9AS0Oi5+WpKbR\nAq9RSzigw2T65PEkSRIZm+phZNxPdCPJlUubXJmt0umIqFUCx073Uap3ee3VZWqVJsE+N6WmCr/f\nBM0KzkAfwUoXp0vP6KCJlY0SzXqTcq6A160jl28x/eyTJK9fpVWt4AgH0GoEzLouA06Z7Nw9FI2O\nPeP96DMiSreL0IJMpoZg9jP9qy/g2ruB2hng1obCeqrFb37rKOl3t6hpujS6aUpVGUQJuS2jN2gR\n9QbWbt5hasTNoQMurDYjlWIVlVriyEMTeCcCbL/2MtWVVY4E+mHEg5o2jTvvkNsXpljfjea2WnUE\nAmYa+SL5dBFRY8QR8BF22qlU2uj1KpLJCtev75IIgHq9wuXLEZ58cvhT3/MP8PPTMB/4BQwNOZCk\nXWHq2lqOnh4zo6O7+gRZ7nLtWoxIpHT/eTMzSUwmDePjHkwmDU6n/mNkxOs1odXuHv71XI7sygr5\nukji3jz1hkw6LzNlsZAupEjGS3g9RpKZGu0hD+GxPiyWEQwOB3//F+/gGdLSY66QvHad69tFGl0N\neouBp7/3LRI5mfDBfdQqdUSjBUVroqVV0VWXidyawe6xYTarMVkMHBw3onU4KSZzfO1bp3jtR9fZ\nidc4dHaSQqGBORDg7FeM2EwiK7dM5PINDL4gjpWbYBzkvZeuEJlb40ZPgP1PnmTk1AGCFpmVVxKo\nGgV8PQ5SmQZdi5dkCW6fv41aUvj6rx+kFtsk4JRw9Dmx3k2hMhioRjaxBIMIgoDWoEdQeWmmStTT\nSdrVGl63Dofj811RdbsKkUiR7e0ioijQ22v7XD4mDzoehEman8fk5CR+v58333yTJ5544otezj8L\nrZbM7dsJVldztNtdLBYtR470EAxa0GpV7NvnY3LSgyDwia1Bl8uIouxWNSKR0v1zRrHYpNtVePTR\nfiwWDZIkMjTeQ77cZWzSSyaWQ2/UUGvu2j5MT3vp7bPw5392B7tDz9pqjvFxF+NjLpYWM2yuZhEB\no82MWq0mmarisDnQim1y6RItRY0lPEBjJsPex4+Q/NE1DC4XeqOW6WN7UDfzaKsxwodH0UoKCxdv\nMHF0jFPPncKkbhO98A6B/XtxT+/F4vNSL9e4+splojs1glN7yLbNXP3by0yNOxiwmEnMzNAsFjF6\nPGycv0BqbgH/gX2Y3G7y6+uUd3YYfPxxbOEwpWiUvv2jDBzfT1elw+Zz4xv2/ttv9ifgS0NGjEYN\np06FuXcvRTpdw2zWMDHhweP5uHK4Wm3dN3HKJfMMDzt5+dUtlha3cVoELFYtgT4vnU73n+x3z82l\nefPFG0hqFT6vAb1BzdSBEO9d3OSVN2NYAn6sShej047fbmRjPY/T48WoaWOqbtNn0aMqxjkz3c9S\nRr9rzTvsIbqwzvBIkOApFbVMDu/BA8iCjlKhQn72NqVoFJXJxPzbi3StfpqGAWqKmVoeXH4n7nEH\n+kCIZF1H526SXr+KV19dpqfXiVZbRmWx49HrqJfKOHx2BibD2ANWwv1u0Aocf/IwpfUVmoUc/vEJ\nDH2jFCMR6oUCq2++g6h6D1lUAyKDZ0+RTRZY227SbHdpygJhc43E9SskI1mM6jZWu4mJpx7FM9SH\n1uZlZ6d8n4h8gHy+QS5X/0wyshsHvvvcZrODViuh16spl3fJhKJAMlm9T0by+TqZzC5BbVWrtOt1\nREliZVnP+LgHSRKZmHADH0zRCGg0AlNTHgDa9Trbs0vEYiUqshZf2Es9O4/T76DWkBkesNGo1DHo\nREasJdZfvkzHI7J4c5l9x4c5dfZRGmiZeeUOO7ECiXgFk9NOJVukuLqMqPhJVwS2ozKpWgGtSU+9\n1qbt9mIcEnEPeBiZHmBtIcL6dpVuM0Wr3UXQ6Pj13/kK1UqTVqNJo9PAI0dJrawja1VMTQZQuXrQ\n6yX0E+MYeodIyHZqxQoGuwm714mhXaCaKOGbnKQjd2gUFDRmDWZvD++9fJ5iIoO9N0SurBA0C6Sq\nMYxNPz1eLZGZeUSrFY3ZTFNjw2DWYhINRLfyOPp6GenV028u0W23ELWfrelZXMxw9Wr0/t6uruY4\nfTp8v+r2ZcWDohf5eXzgOfJlIyNbW0Xu3k3d1y9kMjWuXInw1FPDtFodEokqnU4Xj8f4kdagLHdI\np6s0Gh0OHw5QqbT4yU+WCIWsHDvWcz91OxIpkU5XaTY72Gw6xsZcmN1OrG4nxVITXw+YTCpUEmTT\nVZ766jDpVAWPx0Rvn5U7txP0BCwEQzZWZtfJp0sMDlh559U5BMWEO+DEFDAzOh1C0OrYO+GmXJHx\n9/vJxLIMjrioLd4iMrfM8P5hkjs1PDY7/d95CrVOjbWvn4WXX6MQi6M2W/G5Q9RqbfLZCnXnCB05\nwN1oG31xDaNZT2SnymCPnVomg2t0lJU330ERJXbmVpAFDb7pCZx9QdqFPIWtLXoOH6b39GlKsRhy\nvY7R68XS0/NFbffH8KUhI7A7kvvww300mx00GglR/OQascmkwWhU02p1MFoMrK5kKKZz7N/vw6hu\nI7WqRLeyZDK1T/VPKJfq3Lm2Ti1fIr+TpFSREQ1WOnKXfQf93FssEG0phIJmxqcCtNsdNjYKiBo9\njeQWI4MW1Jl1VueitG5vcugbz6M2WRF8w+yfVlFfvUu11iZ48hTFZIqNK5fo3TtKPZ7E4HJSb3QJ\n9xmJZRtMTvn4/g+36OuzYXWYWFop0h/Q0o1EuPbiBXLFLuG9I+zd9zDPPz/G8eNhZu7E0etVDIYN\nrF69w42flnj6iTBL71zG+tgh9BNjFHcStCQDGquNrs2BTedmtC0SuXKVUjyFIIJvbJTc0iL52Q06\nXRg8vp+Vi7dpFfKYxDrp+WV26i067TYjx/fSc/jQJ2o6RFFAkj67pr93r4+FhTSNhozDoae310q9\n3v7IY2y2D7/8did6BCrJFMWtTRBERJVE0yHSLIfIVeDKlSjJZJVarY3PZ+Lxxwex2/W0ajVSS2tE\nNzPEEzWiiSrJgoDN4iKoU2iWqzQLWV54ZgCn20Tz3nvslHeodgScXhuVbIH6xjyeIyexW1V0FBGT\nzYRcLRMY7EHTrfPwYSuX7qi5cCnKdqTC818bQ1EE0skyokbD6YcGmLk0y/l3tpi/uoDFZeWJXztD\nswZXbqQ5uM+FgIKYibNz+zaNpgIItCtlHvsPISyGLldeeQ2N9Dp7/923GDv52yidNoZWhvr8HFVF\nYejJpzCE+nFlC1y/uMyN8/PIggq6HfKbm6Qy+xkYNFNauINQy7N331kaGRfVpoLaZMNq7FIrlMln\nKoR69FgtOob7DJQXZ6mGvFjf10vU83lq6TQIAkaP536IVrMps7iY+QhBbTY7LCxk6O3958eZP0iY\nm5vjqaee+qKX8RF84xvf4Pd///cpFAr3s72+DIhGSx8TUpbLLSKREjMzCQqF3bA2o1HN8eMhBgbs\n5PN1bt2Kk0hUMBjUdLtdenrMfO97x0gmq3g8Bq5e3UGnE9+PruiyE83jdhmw27X09Tu4dy+Nwajl\n1HEzU5NumrksD58OUCzJIPkI99p45cf32Jpbx6IOcvBoL0fPjFIvVzDoVXz7P58gHi9TztWYPhJm\nbMiKwaDi4vVFxJCeXkuDqK6FSd1icWkViw6sUo1moUB5J8ah06doW3wsL8YgOIbfuesDFJtfZuTk\nIdq5NHatjrwooxIFKpEtKt0OXYeGak8I/4ED1DIZqvkyWrudoYdPU0/Gmf2/Fwgf2ot/YhSNyUQt\nl0NSqfBMTDyQo/dfKjICu14Qn+WzbzRqOHQowNxcGkEwklaJ6A1JTGKdbj6HZLUg6E3vZ6p8HF1Z\nJrW2xc7MPSLXblOr1JEFLe6hfhxOA+2WwrPPjNBstLAaBLbidZLpOuce78cmlUneTeEzCGzNx7BY\ntVgcZnw2uHEzTn4zx9JMiZNnxggPN1l7510WLt3GPz2O0mlTz6ZpZhKsLURptzqMf+URekacPP6E\nil6vxObrrxJZ3GLvgRDjR0Z48leOsZ1okq8oJJJV9BWZnWgRUWlj1GtRCQqrd9Zo1hpkJwysX3yP\ngf17iK/HiK/tMHF0lHikwFs/uUGr2caod7D32W8Qfe3vCe6bQlKr2Lxyi2x+lxCkFxaRs3HUGh2V\n6A6t+m7FohBPUa82SM7MEDx4hsXFDNXqhyTC7zd/LpHUgQN++vps1GptjEYV6+sF7tz5MBfB6zV+\n5AvMbtfT4zOwcn4bQaWiVakgl/JYx7WsvVlmuR0ml++iVktYrRLlYo25uztYNC62L15k4fI9br09\ngyPoZejESSrFHM2qyOPPHINOm1w0wcLlWY49NEp+dRWlWcfkMtNOpEHQQKeFI+Snd7wPRaWlkG8Q\nGu2lnY2Tnb2BXmoTFMz8z787xdJGg421LNW6wlNPDjDs85PaSTN7ZYXEdhG52aRrcPLWW2ucOt3H\nwmyEdj5FsMeMPr1E0GsgmqhjclgYPr6PzViDgmLD8fBzqDotXvyb6+iCGRrFMqJK5NjJSZTkBpde\nvIR9usbCUpF2rYpW6pCrtZFMFmqJOGZVk2axytjXvkZ2eZna9Z/xxOPnMA5P0TF5mLm8wM56kuS9\ne7jHxoh3Ovhcw6SX0jQ8MXQZFT0OSF+9eN+JVWe3EzvFZr8AACAASURBVD59GrPPR7Mp02x+3DL+\nA93OlxkPYmXE6XTy2GOP8YMf/IDvfve7X/Ry/klsbRXI5xtYLFpsNu1H/IFgN212aSlzn4gAVKtt\nZmcTWK1aXnppibfe2qDb3b0IPXEiRCpVZXDQiUol0Gm1ye2k2HcoTCZZIpavoVULtNpd/uT/uMy/\n/+Y+Rkac2K1a+vtMbL17haVrc4yd2s/dpQqHvnKY9aUUDreZc8/uQ2yVmbtwm69++zHMJjWVqszd\nmRgWo8joniEGw0aKhTqzN7I0S2V8fi/JV16lV2ngtYzR6TOglTrUtlbpdDoogorNjTyvvvgmGrub\n9Moa+48P4ZFkrLoy2+ffZuPCJWRrAHfPAAa7k5m5NAabiYnpPrZef5XeM2cwh3rRODYwelwUN1Yo\nRWPUajL1Yj+x69cRRJFiJILJ7cY9PY1rdPQzR4H/rfFFk5H/HTgI3AL+6y/rRUulJmtreba3C1Qq\nLawWLU8+f4Dt+Q0Ejx293U5P2IkkCUQiRfR69f0vynK5SX57m0Yiik7Y/QBIKhVagw53f5B3L67j\n77EzGhAwtss4TE42q22Wlwqkdwo8dURL9NpNZIeEN+imJZfQ2Cw0W23IJxGaNepthYXVKs8+t4fI\nnUWmvnKWTrVMcGqMyuoitWQG3fs5CyCxtpYnE8vSuHWXVqkIgsjWahKvVWH8zFdZez1GW+pw7Xqc\nu7MJTp3o4eobM+RqKo6c6OfAYwe5+cYtJJXIxBOnScWy6Lo17IYuGo3E7TeukFzeQtJoqGh0rHnc\nPPK//Dey0QTbt28hmu1YLCL5fINuR8GkF5G0EsX6hycIi8+LqHRpVSo4rCrOnu1jYSFDodAgGLSw\nZ4/rvkbjs+Bw6HE4dgVV09NanE496XQNo1FDMGjBYvmwMiIIAlOjZlpnBtnYLFDfKTO8LwybM6S6\nQ2ylG2j8fYBCObZDLZPGLAdZL81RjcdotruIGjW5aBLd7Aw94QlW53doZ5PUV2Z46PQoO7kebB4r\ntpOHKC7OUlqah65IPlfFLaspxFN4Dp9kdfN1thNFtPoEcjbO3seOUUlnSMwtEj4GzYqNyGqCTqOB\nTd+Hz6Nnc1PA4vVgzomUyw369gQImpv0Gss4BwVcXj3xeJZxv4f3XpvBHbATPDDGT/9+hnK5xdCJ\ng1jMVvpGe+gaS3RQoWj0GM1qEnkFncrJG393nifsYfKZJontBHsmeoktb6OxWDnwiJ8+nxql1kVt\nMuE/eIj85hbl5bvI2R10B8+RXV5F7w8CUEkk0Pv8ZJIlUtEculSJ7aUl3Pomk74P96WRz5O6exeT\n14vJpMXlMtxvtX0Av9/8uY+JBxHZbJZ6vU4wGPyil/IxfPvb3+YP//APH3gy8sYb63Q6CoIAbreB\nQMB8P1vKatUSDlu4eTNBpdL6SIu3Wm2zvV1kfj7DB9Es+Xydt97aYGLCjVrs4teVqW9HePygEcku\nUcxVSO3kQaUhlS1TKjXJpMtksnWcTj3rq0keOjRCp17B57ew79wJrtxIc+vmDipJxGLTcvz4CPaA\nj/W1NOsrWbKZCk+cG8KrbiK2oty51OHmW7eQTDYEi5u3XltknydM9NU/w2TWUo+soe8NY/R6kVtt\n2noHW+tJivEU+qaMe3yca5fm+ebvPE707/+SwPQEqm4Lq6GDKNUIDA1SrQ0z0m/E3klSczrpdjo4\n9+yh+dKbeB12SqsyersdtU1AJQkUNtYRAI3ZzNbiIq1qlW67TejEiV96vsy/BF/kSg4ARuAM8CfA\nIeDGL+OF5+fTrK/nUakkbDY97XaHaktg9NgkqVQVt9uAXq/iT//0FplMnd5eKydPhujttTE7m4D0\nFp10hIGAhtJgDyv3thndN0Ik1cTvNTNoKXP+T18lHDRyuy1RaoocOnWWe2sNyoqBw4/uJTtzk+JO\nkoOPn6FTrVCJzREW2oxO9VBsikQXN6A7TO9EPytvXySfKrJ58x5ahxOt1Yq2V6TQ0eE8fIqZpMTh\nfS7O/2madKKIqJKwjrtQW52sL8VoFovotEai0RLFbJmV9RIHTo/x+itLLMynOH1imtO/asMdlli4\nMo+nGiE8GSQ86EHXN0xBW6avUgeTnfhajIV37zB9qJeOoEa0OLl+LYrTqSeZrLCTrPPUrx2lVcij\nMRmpl2u4h3oJTI7SbZQwBAZQ6XQEAnoCAQvdrvKp7bTPA7Vaoq/PTl/fp1tcGwxqBjxd3O0yuVqG\nysIczWYT68AQGlWXdq1Ks1ikGNkGBSwWNdmF28j1Oq5AEFuwh1I8QTlfILTfRLDHiLpZoNpqYhFr\n1KxWbt+IMj46QE21jePAYZqFIoZAiJpk5d0fX8L3yFdZbgd5+DsHKc1eQRfay92X30Cq56kUmiwn\nopz45m8QWRE48tQk1UKJH/yflwhMjFEpNwj0WHCFfPToisy/dRm514KWNikUAkeOYR8ZR616HZ3N\nzu07caIrMTwjg8S3Usg2Fem6DvfoOH63luLcLbZuLaNN2hh6ZC9nHhul2VaoKRqkThNFkPjO//pr\nKHIbM0XMVgOyTkDt9BJfWqdm8OCYOoVnIEAnGydkaWB2g+fRfczPRLA7zMiFFL6JPazNxag2BbL1\nMoPPjCORur8vjUKBVrWKXKsxNaRDpbKzublbOfF6TUxOun/h4+JBwAdVkQex5H3u3Dm+/e1vs729\n/UCMHX8aOp3dMoiiQDpd48iRHmw2HbLcpVRqsr5eoFxucu9emmDQQk+PGUEQMJk01Ou7gvnd1+mS\nSlVRFHj+uWHE5BLrN2eRFJlkqoposnHusTP8uKWwE69SKNZ5+tk97Nsf5Pr1HVZX8xw86MfkceI9\neJTNaIGlCxH+8q/nkCQBg0HFnhEHb7+xTrjPxsiQjVSsiEZfRZuYp1jJYugd4upL52nXGnQSKZwj\nUJYNtIdDePdO03fyKD0TI2y8/TbdVhNzIIT71GO88U4UrcWKIEoYnQ40NgeS0YLRbqES2aRRKFBM\nZhk+a8BGngl7EWVjnoK8ezGoqPXIJi97f/UF2uUigtGOomoTGBtFziXoyjKSVns/uDK/vo59eJh6\nNovJ+2CIV+GLJSNHgZ+9f/sN4Di/BDLSbneIRksfuU+tltBqJfbu9e5m0OTqfP/7d0gmq7tlQblF\nI5Xg8H4nzWya0HCQ7bhALbLJuFdiZGACQzBI4q11BvoCbLz5Oj6PDkmjpZAqI6i17AmrCY33067X\n0feP4RQErHYD2fm7xO7Mksy0QaOnprax95knOPP0QVJ3brL6yius313H4PZTS6dR6kWm//1/oC7o\nKWbL5FI59o16aHdVBEeC9Az6sdiNaOQq0c0kDb+XuTffY//zT+DxGFlblsikypw508ueqQoOl4mJ\nfUF2NtJk2l1O/w/fwk2GnUvv0KhWkVU6+u1W+k/30e5KtI6P8u75Vaw+N1lbD+Tq1OsblEoivWEr\njZaCYvXTd+gw4YNTlKI7iEoXsV1D53RiHZ3k7t0k0WgJp9PAUK8BoZqn02yidzoxejy/9JO33uHA\nHAiQXVykuL0N7M7US0KXqSkf83EVxUgZjdGE0aylN2hGUPdSz+dRaSUOnhknnRmgWq5jD3jpMdbY\n+MH3KUYixEcO0LQE6DvwMDlZR++pEzSSO+jdZXZWImgdYHYHyGUqnDzsQSu0GTkyxea7V8jFEnh9\nFrrIoCgUVlf47u9+ldu345z/wVu0Wh3UrgDmUIjaTpTDR3vYubrJYy8cQm8xU20opNcj2B0aFK2R\nyeefQWhV2Zqr0DM1Rq2pQKVCPJbAfypAz2AQMTbH9s0ZOvUailUhc+0ie0+fRAp4kc0KS6gotiVu\nXtmko9Zx6qFBypHbJBeWsUweQDN5irVL68zcrtP92XlG+3X4PXq27t3G5PPzzH98AovTyuK1eba2\nihSyZdQmI616ky4CH6iFBFHE5PezfeEC1XQaQRQJ+vyMPTqNotbjchk+UVv0ZcKD2KL5AGq1mqef\nfpoXX3yR3/u93/uil/O5oCi7U1cnT4bZ2irwxhvrdLsKAwN20ukasVgJi0VLIGBm717f+66ruxOW\niUSFel3mwAEfDl2LKz+8xs5WmqFBO3q9mnq1QtjW5rf+035WVnPUmwJGk4a/+P4dtjaL1OoyBqOa\nyUk3itpAoV6i3mqxHSnTlTv4/Ub6nhwmn6+ztJhDbrbptFpMTAVQGgmsVj3Rewvs3L6L0WHH1t9H\nJRrBtWcMtdXJ2f/yXTbefofo1av49k6j9/golVqUyw1qghnb2BRqQaZdrTBydBJfn5em00pxO4LR\nbkHXlqmlU0xO7EFWR2l3+pGaJcRqFtueKW795A3kZhvX2Dia0YNsvnuLdrKJrtbCtWeSnqNHqSQS\nGNxu5GYTQVEQHpBx9A/wRZIRG7D+/u0i8Ev5VEuSeJ8t/zw0Ggm324BGo2JmJk4yWQV2xVByOsrq\nUhq7ehBVbot6bJvw/nHyEhSW7lFcuYLBoMYV8iNKGnRaNfPzJfrHzfTsm6RnYoSsbGBtLk0+nuZW\nJcUzz41RjsyRXFqlVGpTKtZpVnOE99upxzbJymUcQgFBEHH7bNRKOQzmSXC7SGwmyBZlls5fpVZt\n4t+zxumvP8Sx/Q7u/MM7mBQbvYcmEbUGxKEh1jfKpBJlNAYdo4MmuorC1lqSIyf6OHfWj6rbpNGG\nre0ynT4L3UaFSrGKQa+GWonLf/GXGJxOKrUuvccO8fS/+xXyDS2bm0VMrn5GT1Rp51IE+52YegfI\nYcfY1HD4oROkomky2wmQJAx6iYVLt0jFC1iCIWSdk0vf/xlOfQO9ToVKp8N/8CCef4UTuPt9x8lm\nuUyn1UJntyOqVPSHTQSng8zpKzRLBYIhGzZDh6KisPnWW6h0OgSNBvvIGId+/RlQFOb+/DL1XA6t\n2UwuVSS7muPYiTMYtCq2371NI53EblXTLpcoJrOce+5xomsJbvz9NWq5AifOjqCTy/QOeoltpmi0\nBbQmGxvraVzFKtlUiUpXRzzbYO6vbnDysSme+MYZvFawG45SbOu4enmL2MIaQrNO8LibXCSOyukF\ntY4DXpn5hTQqrRaP20ApmcI76GUgqOXWhXWsPg9aGgTsCoWlOVJWM4OhMI8esqMVu5QbkEvKiIKB\nt386w8lJA422QnI+waWb8/imphBEgVQkSTmn5Zmv76Mnl0SvqzI6YCISybE4l6D9vg5ErjfonRrG\nqJL5oBFjCgSopdO7gtb3UdpYx2C10HPkyC99/78IPGhjvf8YL7zwAn/8x3/8pSEjoihgt++admUy\ntftVk0KhyZEjPbRaHUKh3dRbp9NAsdhgcHB3VNxs1hAKWTh2LEg6niEVL2Iy61jbKnPgkYO0GhJ3\nl8qYAyaMYoupg34uvJfC4zFhseg4fjyIw2Hg3lyGvl4bw2M+AgEzyVSNa9diaFQiOr2at/6feVwu\nIzqlSnQ7h15yc3jASGllk3okgX+kl2YbsktLaK02KvEY2rqDSlKgsL5Gq1wmduMWuVwDUyCAT+4w\nNTzMT/76DiapjtGk5cjJR7AZBYRHHmH2r/4KAQF7bwjH+BSbBR3X5xoUEhkcIR+nn/46ikVPcmUb\n954RXvybawRDNnwHD+H3GekdOo3QrlONx8guLVGORhk8dw5LOIz+F9SMyI0GrWoVjcmE6nNM0n1e\nfJFkpAh8YDRgBQr/+AG/8zu/i0az64y6f/8kp0+fpO/9aPrNzU2AT/x5zx4Xkcg27XYHrdaFJAk4\nHE1SqR0CgSCiKGI215DlLjrBRDqRxuJSkAwyle0GhWIDVX+Jjt3NyHPPYTJp2S7W8adkItEOFpeD\nji2Fc+8Qt66XubqyDAYFjVHPqWNjXPzBHFdn9PQZWkgqiVK+gq7Xj9CUaTebOD1moqk4HacWlV6P\noDfT9YTYUXSUIzLT/UYuvvsusiwRcLnI76R498WX6ZkcxeaxI9RyzF+7Qf+jj6BaW+DQ0RDJap2t\njRyj4z6On+yl28mgrqWY+cFVMqkijiE3HrufN99M8/xXwuR1VjR+CzsvvYRaaSNbDFjCDiorc5h5\ngrsrCQrJPJG8mnRWxZFj4xQlkXsbGlqtJHZHmxs3iiwtyTQaMjZ1hI0rN9BWW6jUEtH1ZfrHe8nG\ns6gcElW7GppNpLt3sYRCJHK5T9y/XxSiSoV3chKd1Up2ZYV2tYqttxdzMMjWhQtYM0sUt7eJ3iyi\nOnuW7PIy7okJJJWKbreLVqfGaFDTabep53JoLBbodnH4QhQTHZKbSfr2SFx8/V1UtKHXtvt/9w3S\nqtZIzi+SjecpRSNclpucODOIwVFDvZPH1xdiO69mdMxDtihTqirEUm22FmNIGg0/+ZsrHDnRRywC\nuUiFn/1slmKxTu+gn8GQjq7BTkIWqFQbjE77yaaKbMaiLC9FUEvwzHMTjA3oMVNgaNBGxSHSSW6T\nmb+LpNGgMppYevMCrVoTmy9MoH+UedyoRRn0HmJdI/4TLlQ1gdqFt6gXi6hMVtJtE9aOlkRJxGoP\nonObEEQRt1Xi2Okh5u8mqNeaONxmTj08xMCon0oiiCCKaM1mti5c+PiHfnsb3/79H+YcfYkxNzfH\n888//0Uv41PxxBNP8K1vfYtMJoPrAc0N+mCUX6USGRqy4/fv+kbtBjHuVktarQ7pdA2NRiQctuB0\n7ur7rFYdjzzSTyJRpVBoUKu1uHw5iqbbIdDnJrqeZPTkQd64EGfuTgTv6BClTpTnnh1mUFSRieeo\nlBoMj7pRFPiTP7mGz2dmYsIDdBkZcTE87GBgwI4oQixWZnzCS63aopAro+k2aHdVNPIZNAY9NpPI\no79ymstv3aNVqaI3atl/ag/N9bvkixYQRURRJLsVJZ8soFGBVpygU0/z+JOjmE0qTFIdq0lmLQGr\n9xqEXvhtjNUYjegmjhOP8Xd/fpFGuYpaqwWDjfm1CvtNehAEBJWaaqXJnRvbHDkzTFivZvPGLPnb\nV+mWs3gmJvAfPEg1k0Gt1yP+AqZ4udVVkrOzu2TEbMa3dy/2T0kI/ufiiyQjl4HfBv4OeBT483/8\ngLNn/zOFQhNBAIfDhM32YX/rgy+xT/p5YMDOU08dYmOjQLvdpb/f9v4EhkI6XcNi0eL3h1haymKy\ndOl0OoR6woSsOlacfioNNSvrIsGJILLDjmLVE7Q1SWQS9I9osI8eRzKZWVhooNbqkEQTWzsNSqUm\nDkeRA2cPUCuWCAwGgTo7kTzJtSRKt4v2wASG0ABBgwWnqoLp+ee4+dI7lOcjyKoWrj17KDVFNu9m\nkBtNmv4W+ydt1Ja3sB8/Rvf4WTrNFupkHE2xTLHYwnNkgKWbHQpZifLyOtGVKF/7lUk23lukkMix\nspRGvrrBua8f5/DBCW7eznBszzRWocC9nTyVYgWXPoNc6yJKkF6PoAjD2M0iPodI/7CWUirLjXdX\n8E9PEujzoFY7SKdbVKv13auZzRy1WIl0tc3wkAOppVCZncMWHEZuFPFpd0eo2/U67Wr1n9y/fwms\noRDWUAhFUWjX65R3dqil02jMZvQOB0q3i1yv05VlLMEgrXKZdqGwK7icn0dxhDAdfQJrvUg7n6TU\nVNMj1Qn1O5AUeXd+36VldTWPVifRNRWplWrEIzn6e01s5ESqySSV1ijeiWnWo02yFRHFaMO6Z5pG\nV9gdPVY0aIwGul0YHu+hUu9y8XKCh04HqTVXUWk0ZIpdTj4xzI9eWkclqdCKLebX63icGgYHbIgC\ntGpVohtJYn0GStkKJtGAVswRX12ikkgROnmCtqLizo9fxdkbIr1UwjCf4NS3XmB2pcbCSo50ps7r\nC6s89OwxZI2JSqmB1eZFQaLRkOl2YWsjj8ZgwOByYXC5GCzfxm/zIisSVoeBnrEe9HY7BqcTgFat\nhvgJ4jhJq33gysO/CBRFYXZ2lunp6S96KZ8KvV7PY489xksvvcR3vvOdL3o5n4hz54Yol5sYjZqP\nRD309Fjw+UzE4xUURaFabdHXZ0OjkVAU5X6r9wP3ZEVR2FxJ0qhUiWTqPH7mKCrDHDvpFgtzCUYO\n7aGhtiHmGkTjuxduAZeKZY2C02ng/PlNfD4LWq3EzEwClUpEo1GztJRheMjBof0uRvv0zN+pUutY\n2Ikp1FMxXB4jtj1BBFHANJ4nde8uh6bs6B/dg8liIDlzm7W5OcJTw/gPHCT63nu0qy2a1RoKoNLp\nKUTibMyuMRJUoeoJcn22SMOQ4+arl+m0mpx97jAHw15iqzFarQ6tpkyrVELvdFI2mxG0vQzuH6ZU\nruL2W0nHcgS9Olau3sGua9GsN9Cp1eQ3NvDt34/caNDI5z+yD4qiwGe0biqpFJHLl5HrdQDkep3o\n5ctoLZb7n/t/CT4vGRkDAsBV4Of9up8EXv0F//ZtoAFceP/2x/QiH4TiwS4rnZ/PcOLER2Pp2+0O\nKpX4ER2CIAiEwzbC4Q9HQJtNmcuXo6yv53E69UxO7lr80mlzYtqI19iiUKgTzynM3lwhfGCK5eQG\niiwz6m6gbaQx5aqoHD6Kejfm8QOY2zG0XYmNeznKxQa1apN7M3F6nhzEJraROhUwGJl65hzSu3ex\n+V2EzjxEW9TSSWyyuXoXg7eH0KlTDD3tYLuoZTHSxZ1K4LAbqJR3DxK720I4OEbX5ObyazeJr27j\n8Lt4dN8xQoddvPH6Grdvx3d7o/kMBoOPSz+7y9HxAG//9B65TAUEkbdfmeHZ/2kauWsHU4NOvY5k\nttLNV1Bb7MQjWQxWM2M6E8XlCIntFI8+e5CAscpiR+Dck6OEpvvJ1iSSySq53O5BKUkijXIJjUai\nUmmhoCC3uwgGFWqNgFHzYSlPpdOhNvzr5iBUEgmSs7M0SiXUBgP1bBa9w4E1FMLk92PyeGhVKtQy\nGQobG6h0OrqCip1EnfdeukQxlUejhqljkwjbd/G6NLQySTIba/zqf/0Vrv7odQrFND69mZ6hEEaX\ni1algs/lwGaEQqrI5R++xn/873+Er6LH6HSynFJx7XqCqeNWjp8Ms76cQG/S4XLoOf3oMMlch/hO\nmUSmhXsgRLVUp9OWSSar3L2+wd4jg5jcdqrVFrPxMhOjVnKJHK18BpQelhYzzN1c5cheO3uGxnEW\nC7gmpwidPMG9V89jMGgw2i2U61py6TJio8TqRo1SQ4VFpadvTy/r9zY4dHaaldUcpZaK/r3DGDtF\nxFKSbquOpb8fSadDb7XS99BD1LJZBEHA4HJ9rFyrMRhwjoywc+MGyvvjDqJajXts7Be6InvQEIlE\n0Gq1eB8gAeAn4ZlnnuEf/uEfHlgysuug/XEXbZNJw0MP9bK1VWRxMfN+NUTgZz9bY2zMzb59vvuG\nlYqikJydpZNMcua4j5+9vkG6ZWb03CMUrkSYfNRKuiCzuphAUkkUC3VESUQldDnz8CCyItLfb2d+\nPkO53KDd7pLPNxgddXHkSICVe1Gy5hKhfjuVlXm8w32EjwaRD/nZiBTIFrKsLMQxSG2+/rUDaLZW\nqFbqpGJb3P2HN1GUDpPPnKPZajHy1adJLizh6AgMPf442WgCg9GEx2/BoK1SwEqxIZBNxCnlKmhN\nem5cWiX89QncagkFFeViHb3FjMpgQK3VYHcY6PvqSfKRGCNHNFy7voPVqSctimg1Ah2pS7u8K0so\nbGzQrFbx7t9Po1hEYzKRW1sju7REV5axDwx8asZNLZW6T0Q+QKtSoZ7N/puRkd8DfgdYAP4M+C/A\ni+//7o/4xckI/DPHeXd2yshyB5VKIpWqMjeXIpOp4XDomZhw/5PpvbFYmdXVHN2uQjJZxWbT0dNj\nZnrai7Fb4tqP3yHfMLAwl8Q73E+6oqJaq7BwbYHQ2SDtWITa0iblronRJx8j0tXTqLdZXk1j1Kox\naRWErkAwbCWbKDBx1IvZZqHSCtKI5Tn83cMUOwbanRbxN3+KnIvjNlqQGw0Ss3fpf+brXHlnm0uX\nd/jVF4YZPDBCZWcHq1WLxurAu2+cH/3tFeSWQr0tsbqcRn47xsRkh0S6icGkZ3MlQajHg8GkRaMT\nKNc6iKKATq/B4TKhN+qIxKoMTPXjdEmsXUxz5Le+w9bF94itxtCYrfQ9fJq62kopu04zHWdtIYZn\nv516PEajWkMeDpHNqjAY1PfH8Or1NrZwL7r1GMGgBZ1ORaPRxDk8hKs3QCcVAXavij1TU/fNsP41\n0CiV2Lp0icb7bSBRkqhls0gaDRqTCUmlQlCpCBw5wtKPf4zcaOxWTcJ7uDsbZ2d+HWs4TMfoYDXa\n4vjhY9QSCa68dRddYYt6ocTBb7yAYr9KeDSI2e0kWVXhmZ5mfXaOeldPaNKJ0Rcgu7GNzaYnK9pp\n1DLYfC6cehmjXuD0cT+xSBEEgRvXIhw9O44nYGXuXpK9Ux5++qPb2MwqSvkq3qCTdqPJnVtFRkdc\npJMlNPu9qDRq1GYdjXqLbCyJ0mqyeHsTS/gsrgNnqSWiROM1Vu+sYnfb0Dg8iNkukwMWYtES81dX\n6bTaRBZkpg4PYteIHDzai38kzOpagZE+Fy5Dh0YmieTqQWO2kpmfJ3T8OGq9HutnjLS6JydRGwzk\nNzYQJQn74CC2X1IF7IvGzMwM+/bt+6KX8Zk4d+4c3/ve95BlGdUDNMb5eWCx6HC5dr2KyuXmfdO8\nu3dT+P3m+1Eg9WyWQiJNvOUgGsuzd3+ARLICYSfOgJvb82XeuxxFI4Fe16FcaqDR6UjHc7gDAgeO\nj7G1VWB7u0i73aHZ3M3TMpnUtNtdmuUK2UiTwSEH5r5B/l/u3jRIris903tu3tz3fV9q3wv7DoIA\nCBAkmy02W+xF3S2NRh0jyxGKGMszCtshhUNWOBThHwp7RrZlSzEzYbU1bknN3tjsbi4gCZIAsQMF\nFAq1V2VVZmVW7nvmzd0/EgQbJFtNUE2xOe+firyVVflFnnPP/c53vvd9z18Is+e4BbPHwXY2i1Sv\nsb6coJLNI5fD8T1G5q/eRitvYfa5cA6GEG1eP3ullgAAIABJREFUlLIWXRSM79yNQqFk4coduo0W\nA3sGMGtaiI0qRbkRucuEUIsjiDKqhQoyQaAlqtBpZFhMItFbWexBDyqLjdCQEyG5RnR5HgCDzcZX\nf+cI5VwZdX6FRnwTLCaK5SLtZhOZQoHKYKAtScx++9vYhodpViqUEwnodqmm07QkCf/Bgx8Yjw+r\ndCIICL+kzcVHmZ3/FT0tkDLQBzx/7+e/+6VE8BAwGHq+AoWCxFtvbdzfmedyEplMlTNnBu9TedfX\ne5NLpRIZGLBQLNYfUPfL5yXyeYmRERveAT/BE4+RuRbFPFwnmm5SLJbpD2pplEtUEkmka1dIb+Zp\ny5Q05/Uc/9wTjLj7+P5LW8zfijA9aUVl0HHwUBBBKuPUN4lu17l5aRXBYOONv/wpw/tGOTylIbaR\nwK5pkw+v49u/l1a5Tnb+DicPDyAT4PrtDGdOH2NUIWHUQiEvsVHSki4KGExa9B4FZpOVy1fjBIIm\ndDoFzY4Ms0GBTKPB3+fAH7LSWLqGwaDCYtES364weLCfpUiDrjrLzRs1hgcmKCvrjH05wECpQE00\ns7BRIzu7icpkwjY8TDGRohgpou8WGXxkHxsVBdVqA4WiJ7OuVIpEo0VEZ5CRQ0W0rQJqlQy53kjf\nsaMY7RZK2z1tDY3d/olTyaqp1AMlyGa1inf/fmqZDHK1GrlGg3VkFHRmLJMbKGxuXOPDrG+3Wbnz\nDt12m0apRLVtoJCvkh/U09gM06nVQG1g9dodfPv2kK0oMBaa2Hb08b//u3PsPz7O7ucGSG7EcQbs\nBEMmts/+GP/Ro+zZ2Ud62sbN61Gk2AbrN2fY97WvoHfa2dwo4e4TGfAoKE45uXEtgkNb59kvjNJq\ntwkMelhb3GZ9PoLKaESjljE2akMu9HYlHr+dwVEPC1cXMWlBJSpZWM7h8xnp9/fTKpcYPXWMWDjN\nVqZLdKuCyWVDJtdTKdZo1huAjPBqiumv7KVYaZOKpDl4oJ9qvshrb6yi06vYOe1E1y1S2pLotFoP\nLEztZpNKIkGzVkNlMPQYUzIZolyObWQE28jIJzrmnwZmZmY+E8mIx+MhGAxy5coVjhw58mmH8wEk\nkxVWV7OUSg38fgP9/ZYHTDALBYlqtfmAem+j0SaXq91PRqSqxOxGlx++eK1n5ul34Q+YgQ6Tk3ZW\nVjLEYlbK5QahoJHTT4ywNBelmJeYuXSF4Kgfv0+P32cgco+JOTZmw2JRE93I43AbCQyK3LoeYf7i\nPCa9Ab2yzcpSCptFCbUS7XabbLrM9StRAr4xVINTTB8YxG6AeHibm1fC7D4+xe0bM+Q332F4yIJW\nLeDZuQNHvxeNWkTQ6DBbBrj7D3NILTmh6RFK2QIWrwOtUY8tFOT0E3J2HhxmYynG2IiWgClP+sbs\n/fuxGI2i0OkIHj1KN7eL+e+tobXbUZvNaB0OHBMT1ItFVn7yE1qSxMabb+LeuRPrPaov3S65tTXs\n4+OojQ96R+ndblRmM/X8e+2d2nsMyV8GPkoyIvDe0UwYOAF8Fwjd+90nhp/1KdFqFUxMOBAEgWTy\nvSOCd5HP19nermA2a7h1a5uZmcT95GN9Pc/UlBNRFO53aL/7/9+d+FanCa2lREumpNWqI4pgs2qw\nqJvIKym67Q5yrRa9osvKiy+gtxqIz87z9KlnGPaJlBpyatUGseUoGkWTmkFDqiwi0xoI9pl55PEJ\nwptl6pIct9eKohjFvXOa/MoygtaEza5jceYSTx2YQOmbQJTBRjjLnbktYnNLBCYHyeTqqHz9NFt1\nig0RrVkPCg06bReH08DG4hbFgkS11sbu0GANHEaqd6mWa4w97kfSutBVu2yE09yZy3Biv5WLf/si\nqZUN9GYDnp2TOPqG2Mq2UVoN1NIpDj8+gckAmNyUy3VK9Srb2xJTU04CARN+v5FUqkq73cHy2DDd\ncp5Op9MT3dH0hMtsQ0Of2BwpJ5OUYjG6nQ4Gj6d3U/6MhGO70UBlNOJ5/HHUJhMKrZbVzSqFuTVW\nrt0lubDCmExNW+cmu7KKxmqlnEigHnKhNhlQdSqs3JhlaLCPWluPb8hNaMcwluERoltl3nw7QqMF\nuWict66uYQ4E2VxfxPsb+9EGQujsDmJrMfKpIrGFVXyHQqgsJs5+5wKlhgyj1UhkNY/VMIXLquNr\nXxxA0y7SqNW58sot6jYVQ/0GqqUaZo+NTjrCo2cmCQ7ZcCuH0JhNdBVq9LpJlArIlbok8l0klKxV\nVAgqN6qpAK3OHGKjwN7Hx9D5Q7xxPk6gz87mcoxGs0W9VsdmUnD5Oy8hqtTYdhkxWlRMTzrpttso\nxS6i0YJeIz6QiLTqdaIXL5JbW7uvZ+CcmsKzZ8+HUrgb5TKVewwbrd2OyvDzq5m/ypiZmeErX/nK\npx3GR8ITTzzByy+//CuZjLz22tp9MbyNjTypVJVHHgkiijJSqQrJZBVBEHA6tZTLTarVJjKZgFb7\nXsKSq/Rc2utSE4vPzUs/WaJcbvA7v7uf/QdDjI3Z8ftN91k3K3c2qGxvs3e/H5tFRSFbwagXOXzE\nz0m1ArkosLFZoC61GBiyoulq6BvS8+r3v08ukUNrbFAtV5G11YRXYvS7RZxOA+lICqNJxdZKlGyy\nwL4zh4hVqtxNGzAO2UjWtAyePkl67g4KRZvgmJdqLs/b//e3SG+lcY8NEnpCxdSUi8hSlGwJpo/u\n4cCxYZqNFrWWyOorZ8lvJ1HrNBhGRLaXY3RaLUyBXvtCS5LILC1hn5jAd+AAKqOR5OwsMoWCai5H\no1Ihdv06nWYTuUpFq1YjvbiIdWSEdzuGu53OfU2Sn4XabKb/xAmSc3PUsll0DkfPJdhgoFSq02r1\nnJY/rnTDR0lGksAuYObe6zLweeA/Ap9o99bp0wNsbfXcFwMBE253rwny/f4F76Ld7lAoSCwvZx94\njyS1SCTKjIzYWFhI0+32PE2Gh633DZccDh2HDvmp11vcvBlHEARCPi2jHieNSy+gtphwqDps3bxN\no5Drcc937aK1eo1hS4DNsoaFZINmLs3up/dw7ieXiSWbXDt3h9E9g3zhG0cwmtKYnTocln7WXlvv\naW/YHVhCIXIzV1Cn88gTCgLDTrZyAqKs54GSS7tpydQM75+i3pahVki4bCInDoZw2ASuXiuwa4+P\nZiXI2JSP7eUNvv/vv8tjXzvF7i8+yXY0Q6dWRspmGbIbWIx2mRyQU1hdwiNL49vlJZ1rYO1mCXlk\ndI17qOYKPPKVg8QjGd7+8SyVOniGQhw5YyXQL+8p1eZq2O26+x3wAOjdn9yEeB8KkQgbb71Fs9I7\nD02p1bj37EHrdFJNJlFarGTbJm7OlTBXS0zsNONViYTDeWp3lzEMjVGv1ujUJRwegZ2fO0FkKYpc\nrcbq0DN9bBrl+jsInQbNfAqXz41/7xTxsohcJqKTVTl8wE1XqpBYWKG0skC71aWaybDxThufz0BN\npuHOXIxqvojdrqOaL7H7yWPM/4e3Ca/nGZrw0R8yopO38DkFrLouyz98Fc+uHZx8eppctsDo43ae\ne6YflBqyGxGW7q6hrycwhG+gMhjw7tuHQpZg4fo6BpeLkX37iOVlFHIVFldiLK9k6etzc+rkQW7P\nbuPqlMkkcli1HSYmnSDI2HOwD5ehhcupZWpPkFvffRHPvn3k4jU8TjXZ23PM3W0ycaBnqvju4lfa\n2iKzvHw/AWzX66Tm5jD6/R+ogpWTSSLnz1PNZIDe4hZ85JF/tvnyy8TMzAx/9md/9mmH8ZHwxBNP\n8Md//Mf86Z/+6acdygfws6q83S6Ew3nGx+1IUou3394kl5NYWurNl+PHQzSbbTwePR6Pnmq1gVwu\noy1TodBosLrMpLMSggCHjvYxNOKkVmsAArFY7zkyN1dDWS+xf8pCdf4GYqFOI6Vja72Eq3+AN96K\nkMtW2bfXy6nTAywupPBa1Uj5FCOTPoy7HYidBhaLgGfATr1aoVXL4rUKaA4PsP9QkMJmhCOPDlG/\nex7fnv3IBpRUlu4gJGoYhvsY+vwjNFsdlM0SK29dpFGpUisUWD73DjqnC+3IUf7tn3ye7UiaXCyJ\n3aIgEy+RX46goElLqhOc6MfkdqI16ihFowiCQCkepxCJoHO5iN+4gSUUwjU9jc7pJL+5SRfotlrQ\n6dBpNmkLAqZAgFo+T6fRQJDJ6Lbb6N1u1D/H00jndNLvdNJpt5GJIvV6i6tXt1hdzdFud3C59Ozd\n68Fi0Tz0XPgoyci/AJrvu9YEfhv464f+xIdAIGAiEPhgf4HdrkWvVz5gC6/TKXA6dTQa7Q84xkKv\ntLdvnxe3W3+P9aLB5zMgl7/XPex06njuuQkefTREKV9GiqyirOeZWV+nns+jD/ZjdFrouF3I1Wpm\n/v67GJ0WnKMSupaOrz7zCCu3mnz/P56lIajpGwtQlxuoVFvcWcjTzGRRdCQGH53Gixa3sU1+M0Ls\n+jUyK+sUUhkS16/h2rmTelZi843rLN+NMzbpps9uJhC04OzzkdhMoqjnKW0ss3Q5wpPPHqdUqzAy\nZEHRKDF3/iZKnYH5pRzTExaqiVtcfvE8KqudZLZJYDRIaGInlfUbVDfXUKmVjO2cotvKo+8UefLX\njpOLbrF2c5nXnj9PJVegmCmT3UqhMNsIjflJpaoUi3Xsdt0Hvut/DnQ7HVJzc/cTEejtCrIrK/j2\n7yezuEg4LXDxUgSty02r0OH8+U0O7PdgFkrINCKttobgk89gNwiEX3+FE48donxqF8ViHYdVRWhQ\nQdU8hlanYn0+SlmmYC6tJ7Me41Bfg/N/+Xf4x/qwCFpc+0LcLmURNRpCE/2I1Qzu8X3MRapItRba\naoylC7eYKdSYPjjMnkkPJ587gqqUIH7nLrmry5hzLhSDQ9iPPo5MJdKeuYDdZGH+b39APpXDPjZO\nsaFk4sRRDGrI5jxUigXufOe7RLdrKI1Wmpt58kKVmmGCZLqDzaZmZVXAZFSwvpLilR/N8uvPDvP1\nb+ziytmbFBMpJnb42O2vU5p5C4+sRHkhRz6exJRMsvPRRwi/+hKJlTA2t41ut8v2zAzl7W1MgQCV\nZPJBMxHe0yF4YLy6XZKzs1TT6fvXpFyOxK1bn+xE+QRQKBTY3t5meHj40w7lI+GRRx7h7t27ZLNZ\nrL9ifiTvR6vVodXqMjubpFJpolSKDA9byWRqJJNlHn98EINByZUrMeLxEkqlyOCgFceAH0Ffxt/q\n8mtfnOLqlS2uXInyzjtRvvnNXRw8GODs2VVEUeit79feoL56B6fViKuzRTvoYyNf4dChAJLUxO3U\ncf7Ndcq1Dm6PD6NLIGCa5fprNxFEkcrZyzzy66f4zd88ya23bqE36xkdd6OUd6k1g7TKeaTtAmaD\nnKXLr7Jw7jKh6RFqq3PEzp3FMjhI9PxbDD31FFpXjdhiGElqkdqIsxK5TWp9i065xIhfwFCXs5mt\nokzOYLRoGTjxHIm7i6xeuIysXqbbbmMMBilcuUKn1cI2MkI1maSaTKK2WnuWDC4XBrebxO3bQE+U\nUK7RYOrrQ1uroTKZ6NxLRLz79v3C6sa7TejLyxlmZrbvLwFrazm63S6nTg08tPL2R0lGIj/nehc4\n/1Cf9kuCzabl6NEAMzM9zwKdTsGOHW4cDh3NZhuLRU083jtZkstltNsdQiEzGo2C4eF/vOtXJhNw\nufQ04+vkiyU6Gi07f+u3WH/9dar5Av69u3FMjLP8xgVyuRpau40b55eJplvU6i1Eux+lSk4plUNs\n2fHpahRlXRSdOl/8zb1IyTivvLRAOlXjX/+bR1h69Sz58DqIIiqjCa3XR3pxmaXlMs1Sgb6Ajm45\nRyW8RCewi/MvvINSytM36sFnEzGHBJo3X8O/7zDf/+sf4BkdZHDfFFfPr2CplFCLHQrzN7FYtcRS\nRbRaDfmVJY594TBbWzUaZiOirIuMDsa+EKb+AcLhPJWNOJfeXGQznEdvVKHUaqiXy6STefrGA/eo\nb//0hrh2s/mxNCfazSb1YvED11vVKkq9Hu+RR7j2g3mso7r7N47JqCB+9TLZtTVUFhuFCuhkdVoy\nA6myiObOTcR2A6MoEn9jDe1TT6PzBdlumomUs5SKEpVElD6PiluvLaBTNAnfuI1vpB+TU8fU0Un0\nTjuWTppOZIt6o8PsuZv0B7SEZ+aQqVSg6JJO1zDqtgkYhlh4+yKlxRXMbgelTj9n/9+L2KZ2k5m9\nyfCkF08hihTfQK0x0tiOYA+GaK3eZHErh85px+r1kA+HqdUFMls5WlKFTqxK6JgDk97BkZNDHH80\nxNztLaqlKl/76jg7dvuxKcucORmgVbXSP+Zl9ezr1MpV6CrRq7qYnWbUJhM6dZdaWULvcDC0Z4j8\n+jqxW7MYvD48UxMYg4EHqJYAolJ5/4ju/rhI0gOJyLuovY9i+FnA7du3mZ6eRvyMsIJUKhXHjh3j\ntdde48tf/vKnHc4DeL8xntGoQqmU3a+YvNdQ2ut9czp1vPNOhM3N9+59rVaO12ukXG6yuZQhulWi\nWGxgsagZHraxuVnEZFLz7LOjaBRdpEQMlc9Obh6ahTSZ65c59t/8awI1G5ubJaxWNe12F7tdS7Xa\nJB7JUG1l2V6LYrKbKObLmPUKmrFl9FkvzsxNbLYR5KZRzr2yQPjWEhqtkkNP7qUQDhO/8BZquQql\n2CZ28w6NQp4jE6MUIxFufetbHPnv/nuarQ6Vagtz/wDFW1XK200iK0kUgo+dQS0Wg4QvuB+53c/S\n2xdZm4khyAQCARNOq4JqKoVv/35UZjMyuZzavXutlk6jv9fTYQoEUBoMiCoV6fl5BFFEYzbjnJ7G\n1NdHt9VCbTY/1DHL6mru/XsREokKuVztvh7MR8Vnq736ZxAKmfF49FQqTXQ6xf0Ho0Ihsn+/j7m5\nBJVKE0lq43brGBh4sOxULjdoNtsYjar7FDG4t4NLVnj1bJjYyhZypcjQqJuBg0dp5jPoAiGK0S2S\nkSQOv5N8qc3i7CZtQUExU8EXUmIw69Gb9UQuvI2UL6DQ6rA6xihdi6L0DRC7s0wqVSWf24ulL0R+\ndY12tYbG6cI4OMr2ZgqjyUJ8HRSyNt6gjuXby/RbfcQXw+QX77J+Vcc3/tsvEHnh2yj1evR9A+i1\nCi7/+BK7vuygVKrT12ciF09TzWTxD08gMzQwmlRsL6wSvrmI2hGgs7RMo1ztdZirbDR0Tu7eSOFX\ndzHajXQFgVKxjsNtBpkcEBEECAZNOJ0PN9nK5QblcgOtVo4oFUncudOjhdntOB5SyVKuUqF1OJDy\nD2rlqcxmlDod9WaXrkyOTHzPLdYkr7J58w6GgRHOvrHGykKcdqPJ53/3afY9eYboyy+gqKUQAOuB\nk5y7kMDobBIrq4in2+j0eoJDftqRuyzPp9g5GqJ8e5FmPoVT6WT/F08TX0vQTufRjg9RzaShUaVV\narC9vI7c6iIRLdA/YKUWi1BOJMmurpNYWMY2PsGFs3fYWtpE1z9CuSQxeyuO7ZEA9YaA2MwjSCVE\np5XMWg6Z0cnKpRn2/NopSuUGep+frVubNFtyjHoFOqOObFYivBQlPjOLzWHgwJ4g27dmUG4XKSvV\nTB4cpRTZJL4SxTY+QXL2Nq1ImEK6w+DkTvyHJmhLVaZPH6LWVaLSykldirCdqFETy2TbEXboDGht\ntl5S0e0iiCLW4WF0jgd9Z0SlEqXBcN/V91180jTvTwIzMzPs3Lnz0w7jofD4449z9uzZX7lkZGrK\nyepqjmazjcGgZP9+H1arFotFzeZmoXekWmsiijLMZtW9I/eejYfRqEKhkNHpQKXSIBotMjnpYHY2\nxczMNkajiv5+M/F4CZNJRSLRUz8WagUO7t+B76QcixaarTbheIsfnZ3h6u0COp2cqSkXjz4aJLKZ\np55O4e3rsB3L065W8ASsKFtdtK08jVKJ2LVrBE88xkt/8zort9eJRzL07xji4g/fwvLUEDIZONxW\nVEoZ3UoeWbdJU6qhc9hpSxJSPodjqI/B0ACSOYTbESWxuo7BHUBw9tFuNyETRTIEaEkypHIdnVGH\nVMixtpzEdHQcQSbDPjFBo1ik2+1iHhigmk4jKpXUSyVq2SyCTIbO4WDg1Cncu3bRqtVQ6vVoLD/f\n9+sX4cMsHWQy4YFn6kfFZzYZAVAq5R+6O3c4tKjVCu7cSd03XAI4dMiPTCZw506KxcU0rVYHu13L\n3r1edGKd9MICUqXG5cUOsa0iuWyZYqHOyswyJ5+YRFMt0DdgxTrtZmA7SzuX4trlTbqCnEZNot4V\naSmNPPlckMtnZ4iXq3TlCvwjfrymNrMvvc3oMyY8fU469Q1q5Trakd048xXanS4dmYqO1oxvapyz\n379CeD2P0ajE51aTiucZFEV0ZgOFbodqoUQqXUPv9yOqtcgEGZP7Bth55ihan5/gxAAGvZyNSAmF\nM8TWRoZ8TUaj2aZWlTA4LAj1ElNffo5qsYy9P0hDbeHumsT26hY6VxuPVY6n300mWUZtNKLRq9i1\n18+uaRt2l+mhKiNLS2lu3tymWm0i0sKvq+DspqgXCki5XK+T+yHhmp6mXi73zkEFAYVGg3N6Gplc\njkYOgYCRfL6nVSOKAu1SEXvAzdVbcRLxAvliC7lc5OL5NZzBgxin9hFwKRHocvZchKsvvMHQgWnK\nWj+xdIepiRHKVQlZsYjZrKar1BLaNUlo2EUJLRevZ7h1NcrYDj87Rw0UZi8xuaOfRgs0ZjNyJUzs\n7sfl1BCfXcMW9KLSqHAEXMhtHrZX30auUiHKZBh9PsqZHLmaAkvQSyWygcJoI52p4BgZoWNw4Ot2\n8O2YoJwrkNjK0mw0qFVb2IcMYLQz5JCzsZpma24Z55Fhapd/SjeyRXpFRt/h/aRmKmgddkYOTfeS\nO5VAwWlBaTCgdThox1fJtM28/HfvILVAbbbSykvsP3iEZi5DMlnh1jvzPPX7X73PQtJYrei93g/Q\nAGWiiGtqCimXu3+0JtdocP0Ki4b9PFy9epVjx4592mE8FE6fPs1f/MVffNphfACHDvkZHrbRaLQx\nm1VotT1n3vFxO5cuRSmVemu306nDYtESi5V6sgKmNuXwHarpDJYDB1iKSmxvV9izx41er8DvN+J0\n6qlUGuj1ItFoifPnN+m2mgz4lMxci/CbXxnk8gs/wtIfopDdxunUcfCgnkikyA9/uECt1uLXnx3m\nW28tYTdbCQy4oNVCr1fQH/ChUsmR+cfY/z//byi0KjZnX6Zbb+L0mKHdJLeVo6k/isI7RHQ1jN7l\npIMM58Q4apOJgVOPobHZ8OyYxnPoKGubFWYuLJHbjGL0BwlMjeJxa6G0RWZ+Dsk6zM0L88jWoqSi\nabw+A2aDjM2FdbwDh4hducKdv/97FFotoWPHCJ04gahWs/LSS0j5PIIgoHO7CR49iu7nKPK2Gg06\njQYKne4jVUhGRmwkEuUHWiNCIRNm8wd1Sn4RPtPJyM9DMllheTl7nynT6XRZWsoSDJpotTrMziao\n19t0Ol0qlQJStc6UOU1+eZGOPUT49hoyo5VaU067WabTEVkKV5gY7+Pbf3ORvokg+w8dJ3vzCkpF\nDIVaTmjPOHJnkO/+zZs8+ztnOHk8yKD9KFIuD6kN8lfDNGtNaDfp2zHM5JgFCRV5mQO5d5CFK4tk\n0mkOPTuJKjjEduI1jCYVuWyNZkckuGuKjWiFTruF0eOi06ijVkJdpcS9cxrDxBSNgJJ0uozR4cKo\nlWh35UTu3uCRYwdYevMSqXyFtsXGmX/7u3SzCRbffIOs2YB5YJCa00kmvIxzxz7eXFyCvJGRAT1P\nPDHEZqyG2e1gaNSJx9KlHZ6loxknl6iRWV6m2+lgGRjAMjj4oZbUqVSFK1diVKu91qPcdpKN6CZP\nPDWMKBSh26V2TxfkYaC12/Hu3cva2bOU43GUej3NWg3x6FF0DgfT007q9TZbW72SrtNjp9CpEHs5\nQrXa8wsSRRluFKws5wi0ishzOWzjk2QjcUS5nFQ0xeQTE2zFK0TX0wSdAqF9u6nMXSW/FcdiN1Cu\ntEiIRubvVtA7vWys5yhsJRhR1ZBWzzH4xd/A83tfYO7Fn6JQltEp9Ux/4Wm2Khq6vkky2w18KjVm\npx1BZ6RZq6ELDRLdvNaj0mmt2EeU6F1OZDYP3qPHuXYpjHrcTUPnxHP4URI/fIUDj47hnhzFNTlJ\nqirHZDNBrcyu3z5Ge/UGs9/+DmqDAYM/gMruotFq4HC5iW+mSc9cRyF2cQ4P0AWyy8vo/EFWkgra\noop2OU+n2yGTKpJqhLCaRSgkkBrQQoFzeOAXjpcpGGTwzBlK8Th0u+jd7l8p19CPisuXL/OHf/iH\nn3YYD4XJyUkkSWJ1dZXBwcFPO5z7EAQBu/2D1TGFQmT3bg+Tk70KmyjKyOclUik5o30aZp4/S3ar\n5xCtdW2iEczI6KBSydHplITDeeRyGfF4iWeeGb23MW2TzdYJBfSk0xWqbSUqo4GpU0dYzqhIZFqY\nzb2j/eXlDDqtnHpN4olndtBtt/Dt2Ul2aQm7uQsGGwVTiO88nyCfr/Nf//YoBp+f+vIqjewWKqUM\n/fAkWwU5g7/2DKXnf0RbbWL8S8/hmhijuLlJfm2NdrNFemUVmU5Pt2Oi0Wxz4HOHUHZqpGbPYdSM\nYPQ6GXz8DK9cTbE4t82x3WNkY+eJRkqYJqyYrHrURiN3/+EfAKhls0TeeQfr0BByleq+1EG326Uc\ni5GenyfwPmZVt9slu7JCcm6OVq2G1uHAvXPnByqc78fAgIVut8viYppGo01/v4WxsY9nPfBfZDJS\nLNZpNB6kJvUSj8Y9A6YeBUkmE8hkqsTCCbyhFjJApIsol1HMV5E7PBw/NUm3mMKgU6AwGAiOa2gj\nkGlqcZ94iqemd7I6v4VMoaBbr/HsV/YTHLDTDN9F2LhN+KevIGs3MTotjB0/iW/Ej61URa3SERf1\npHJV/IERlAUTGkeVCzcKHA82ePRfPketZieOAAAgAElEQVR8bpFyKoNtehy72cGF1+YRRDX2sVG8\nfiuDUyHE0W9QkOT8+KV13vjOeQS5iD3g4cmvn0Cm1TGyaxC7X8D5O19ilyQj3dDQ7lTZmrtA39GD\nVKIbxGduU08nGDj+KIKUoN+vIxbLcafexBcwc+bZPQipMNG3v0tEq8Xo91NNJknevYtcpaKWzdJu\nNBh++mmCx46h0j3Y1JrLSfcTkd5YdJCqdbL5Bm6lkna9/rHGudvtkp6f72mY3Cs1VpNJtm/dYuDU\nKfR6FcePh8jlanS7YDLIuVEpotWpMJvbaPUqHEEPmbIMhVbL6PgEytQKrVyK8UkX/aNeVtfyxJbC\nPP21E3RlSnT1JD6PBvXEaSLXb9FpSFh2HyW2JdKKRal1mxiNBjZvXMI9riR67i30FgMag4Ydn3uM\nRKKM3m6hY3TxwvO36HO68Bw8iqBQceJffYmZ83dRWW3cno3jm57COxLAuftf0pQk6HYpNBT8+Hs3\n2Yg3kelNLOWiPHLEj3LXSdwOFY3EJpe+8xIo1Dz5mycZeqyPpTtRcp5Jhr7+TQo330FGF43dzt03\nr5PqWLHajWQTOVKLK+zWabH3h2jX61TrUCxUEZQqmoISjajENTZCPNPC4lOiUCsJ7plG8xC+JzqH\n4xcucL/KyOVyxGKxX2mDvA+DIAicPn2as2fP/kolIz8PoiiQz0v31/F32ZE2mxalUMM6PIjB46aR\n2cZMAWt/gKIkY24uxfXrcZ55ZhSPx8DmZo+d8/rrYUBArVag1GronxpEZTQx9aUvEqmoeOtChO1U\njXq9jc+n5zd+YxK7XYtSrWHu4hrb22UefaSfI18bxahuEi/IuXy5wNXrq5RKdS5N2xje2+sDKZdL\ndOoVjH4/60tJwm0Z09/4HeQ0GZweQCwlaFYqCIJANZ0mNTuLfWyMkV1BbCo3ubUVMitr2BQiq2df\nQ6FRMfTrX2c7vILBpOHabIH9T52hK1UIBE3s3N9P/J03kbJZRIWiV11VqUgvL2Pq70dlNCKTy2lJ\nEs1qlVIs9gGtoGI0yvJPfkI1lUJUKqkmkzQrFYaeeOJD1VjfhUwmMDxsY2jISqfT/VjHM+/i00xG\nngL+VyAN/FJrnjqd8gGNEgCrVU04XODixQjJZBXolZgGBiwkywVk9N4rlpP4HEpSd3MERwykbl5l\n5foSk7t8lDVeNtIwv1zCf6fIU5+f4vDeAbQaOXdePkc5U8Rg1pLp5Aju2YnR78M2PkFhaQ593xDm\noVEyG1uotUrWVzK4T+/m//rBHY4c9NDuiLx+dol9hwfRiRLXl7JspXXIFQZit9v43TmOPDZBZDnG\ngE3CTB5Zt8VGOE9R5ebm+QVQqnGHnLhDTrbjOZ54JkRjJcrN//A8tXIN/cQ+vEeOsXLxJnIE3vw/\n/xONYgHfcID07Awagw7PoaP0q+IMHR2hIdfh8JqR5aLM/ue/xTo0dN+PoFEq0ahWEejteOvFIuHX\nX0dl7Lmy/myFRKmUPdCopjIYEJVK1Go5Xan3vSv1+oce52at9uFNkek0zXuukoIgYLW+t/NS+wc5\n8w01Vy+t4tujpCJ1MFkNHDroQVUKk1nuOVtKMhtFSeTgk6eYj8lY3Gzy9NN9sLDG1f/8U2LRPAan\njekzR8li5rX/7wVK2ynMdj0Otxlbfz8mdwPlo4cob0WoK0Q0bi9qi4V6V0ltO8meIyPcvboE1Tbx\nQo4nvjKG50v7iKbaOFwGdDoFiWiOly4scP2teUaO7KYkddm904lT0WRmocLMzC36ggbsLiOp2Rss\nX5yhIxMxaWH5xy+iDI7zg2+9SastYA14GN15Bo+xwcZyHKXDxWuvb2CzaTmxbz/bC2sIciXhN94g\nPT+P57BIcV1BPt0km6mRr8RxTkxw+EgAnxVcu3dj7w9gMD48je+ziqtXr7J3797PTPPqz+L06dO8\n8MIL/N7v/d6nHcovhM2mJRAwsrz8XsXU69WzvV1h5eYSifkVNAY1+w+NYlfmkLWiPPOFQ3zvh6sM\nDFgQBPB49HQ6Hba2SgSDRorFOu12B41Wid2mQq3TkKu3WQkXGBi2MzjS0yuJRkvs3Onh0CE/f/1X\n17n0zgbjY1beeP5tztPkt7+5l/MXoqxGGtRrHYqFOj98YYn/6X/YR//vf57w3TD+qVF0Lg8/fGGB\n61dy/ODlOA6XCcN3o/yPf7iLYiTCyssv0ygWsfQPkA+vo9CoKcW3Wfre92hLEt49uzE7zFRLNUpb\nEUwmNa1Wl7pCx+2VGv4BL/snQzQy67Sl3nF0p9kEQUCUy9GYzSg0GiqpFLVcDqPXi87lQqHTfeAY\nNX79OvEbN+7riygNBriXLBl/gdIy9JJdUfynyY59mo5VF4FPpAvM7dYxNGS9/+XIZAIOh4719Ty1\nWotMpooktQiHewJdgxNezA4TcpuLYjJDUF/gc782Rr+9QzOzzf5jIxgHR7j09iqx+TU8fU5MehVv\nP/8mkTsrLH7vO1jECv1+FTZ9B5nawNmza7x4ocRcvR/nF/8VbZWBl/6X/4Nrf/s8tVQaW8iPXOxi\nMGn40YvLWEMBfv9PvsSp04OEX3+No/vsaFQC6/NbNMtlXD4bue0UQb8Gr7GFStaiLqgpFiU2lrep\nFCqM7R+lo7dx+XqKazfTrK1maLdb6N1u/IcOUq3Uufv97xMYDyHlC5TTOVrNFo1SAZVOTSWTRaNV\n0kpEaNx6A3crgqmZphqPIVMo0NjtbN+8iQDUSyU6zSbVdJpGpYKoVPYcb9PpDyQILldPF+BdqAwG\nRo/sxOXSISqV6FwuAkePPvQ4fxhrA3q9COLPsbZWadU0FTqeeu4Axw44OLFLw+FQBYu0SXXpFlI2\ni97lIhQyE/AbaCU2CTjlnNilQSNIKCwOnHsPYA14UZvMWAaGqEXWkXVbdOnS7nTJR7boH7Lj8JoI\nnjiF98gxTOO7KGr8LG3LuXAlycxSHZVey/ZWnlJLRWQzS347TavRoJwrcvPCEjevxbD43Ny9skqt\nWqcrKlhbTvOD788jE0WarQ52p45aLo9X2Gb7whso82Em/TAyZCYaq7JwbZHhk8eQq9UIKj2Jjo22\nJYDN78YQHGA0qCRkB8Hk4MjvfoNqKkmrXse1axcyocPwgIlOuYDVbcfo8VDKl3C6jYjOIM6hPsbG\nfznqi58VXL58mYMfIpX9WcDp06d5/fXXaX+IoNWvGkRRxsGDPg4d8uH16hkbs+HzGXtHriodgsVJ\nU21jdr1B19GP0O0Q9Gh59NEQJ0/2oVYrePPNMO12l/5+C1//+jSf+9wQzz47yq5JK4cn1FTiUbbW\nEszdibO8nEGS2thsWkRRIJutsrFRpNnqIirk5OMprr21wOzMFqUaWK1a1m8vM+DXoJALGLUyXnlh\nhr7xAPv2eBA2Z4nfuIpK3kFqdKhV6lTLNYL9dlqSRGJhEVEuR1RrKSeTSPk82dVVVFo1CqWCeqHA\n9q3bVLJ56sU8emWbwyfHqJar6JQdRsfsjATVuPQNHCMjOCcn0TqdIJOh1OvReTwEjhwhfuMG8evX\nyS4tET53rudWPj7+wHct5fNU0+kHhM4apVJvHf+YAmYfB59mZST/i9/y8SCXixw86CMYNJHLSZhM\nKkqlOvPzKQqFOu12l0ikgMWiQatV4A9aeOtcisjdPFadhX6PyHDAQl1Tw3lmB/lklo7Lhr3PTypR\nwGrT0dgOs3l7mdgxL/m5RbTKLo7hART+Ic798DJtjQXbzt0srxXJJ/OEZGl0WjmmwQmuvL2E1Zsj\nZBvn5OkhNkZcmOR1aktz6LRNdPUU2evneWTvTnx2kWIyzeKVu4idOv2HzMQW13GF3HQ6IBNleL0G\nxnYGyNRlnHt1AYfbwpAaXv1PL7B70oxqZZXm1WsMfunrXPvBWYYUSjRWK6JKhUbVk+12jY3SbnXp\nSFX6TpwgvbCAIIqoTSbUVitb164hk8t7BnOZDPaxMTILC9Dt0qrVUBmNmAIBWs3mB9T7NBoFx46F\nWFnJkkiUsdm0DA9bMepEmtUJFDodcqXyocdZlMtxTE0h5fO07u0MRJUK5+Tkz6UL9/WZKRYlwm++\nzfzFWxj1SjxONXW5g0osgiAItOp1up0OjZUVbNO70XTMLL10kepIkODucdZSAh33BKWyRLVU5far\n5zn12BSFcpBSpcXYiBWXukg8JvVYAW+eRzOyi9f+8sc0qjWcU1PkKhJvvLxA/7CThbevYzKraCPy\no//nHBpfkHpHRmojz9nXw+w6Oc2tdxah3UQQZWxtZlEbDTRbeVwuDZpWkVJCwmaSIygVlDbDCN0g\nW+t5BF2DkpRH0Joop5I0FBrk+8ZQOx28+e+/ReTOIu12h+SdYZ78vS8i9vcj6dRUUimyaxtoDEW+\n/C8O0rD0s71dRmjUyMYzPPfU9ENRAOulEsVIhHqxiNbhwOj3f8Bc77OAy5cv881vfvPTDuNjwev1\n4vV6uXHjBvv37/+0w/mF0GqV7NjhZseOnpDixYsRJKnF5naDsqRmayNLtdqkfyzA0MBuRJ0eh7HI\nq9+bo5AsodOb2NoqYrNpOHGiD61WgU7Zprpyl1Q8z8UbBWKJGtWGgnhbx9mz6/zWb+1gc7PIoUMB\nymWJutSi0+nSqUuoNUoEAbY2MowNm7FY9Rj1MsxGBT6vjqOP+li/PsvSGxeZvTDL5KlDWL07OXbI\nRSLnYGTYgkXIE10Io9DoKEQiqM0WlGY7BreLWjaLTKnEFAqSDW8iV6vpNiRK20lkShXT00HsIR/l\nuozBQStepwqjzdgzq3Q6MXi9pBcWkKtUePbu7XnGdLvYx8epFwo9iwZlz+H4Z9GsVtE6HCj1ehrl\n93xwNVYr2oc4gv2n4r/InhHoMW1CITNWa51IJH/fvwYE9Holen3P58ZgUHH+/Aa35oooxZ6ink60\n0wpXGTTJWX/heVrNDvYzZrRClcm9/YhGLTfe3sBs1qBTC5SVClq1PPnNTdSuCfKZMraJPqRsFpVR\nTy67zdDUEOYdDa68EyYQNCNTKNAquwwE9OwKQa1co2Mbpry2gKzb5sKLl3EOJ/HsmKLelhja1Y9e\nI6My/zp9Rw+RmruDJF2hrg7h9eo5cGYv3/m7WUxWPYMTPgxijWa3xWa0zL6BQeJvn6OwOMfkyUNE\nFqPYRicZOfkIjVwGV5+bcrmBQdYlvbiI1maj/9QpnJOTaKxW6sUihY0NmpUK5r4+pHwe69AQOqeT\nyDvvYPD7MXg8WAYH6XY6aD5EWMlkUrN3r/cD1/+pDyTrwAAKtZpCNArdLsZAAKP3g5/zLlQqOX6n\ngnApjdfZq6okkxW6goBNrekpGcZiNCoVcmtrePYfYGstTLPRILq8Sf/BHQwO2bn0yk0UFgf5ioDW\noOf2a5cY3xmgXWkQvhqm/6kdNFUmls+/iXd4iJzSQj55g5ZUx5TP4dt7kLWFLUZOTjA86qR/xEki\n08Q0Mk5bkrAauoQmQmwl60zsnSS2kSa7lWB41Iu/3wVyJaJc5PDhIPrKGkqZjMHdY2xcuU46WUKu\nzWDSq+l6fczciOEO2HBabQR2DOGfHubOS2/Q6XTQmo0UMiX0yja51VVs/X3c+fFZlLIWMpnA6sWb\nhDoi1kddxOaWadSbPPbVkw+diITPnaMcj/cuCAK20VGCR458uPnWryi63S6XL1/mr/7qrz7tUD42\nTp8+zauvvvqZSEbeD4NeSS5Todlss7Utka8IGIx6cuUOL70Ro10pMH9zk+jsIhqNHGo5gpNjgEAy\nWWVuLom5ESd5/QoDB3Yxd2uObhfs/QE6LQWtVodIpMAXvjBCqVQnm63j8xtoNJpYlDqy6+u4/RbU\n3RryYo1/8ydPEysqOPJYi/ExG/pOntnnX2MrmqdabVGMp1Aac8TvpvDtnETelpi7cJ3msJWxyUlk\n7QaCKKJzOFHbbegcLqLXrmIfHun1k2SyyOQKvAcOcuf1y2gWk2woRth7uI/xCecD7tdGjwejx0Pf\n8eP3r0WvXEEQBDQWywP03W6rRbfbpZbJ0Gm3e8c2CgV9jz1Gen6eRrmM3uOh7+TJf9YNwz/HSuAC\n/u5917aBr/2iP/yDP/gDzPdkacfGxjh06BB991w/w+EwwD/6WpKarK1BIlFGpSoxNCTj5k0Jo1GF\n2SwxMGBBklqUSg08TgmzvEZ1M00jWkCxy8vi/DKCQo1Mq6arajI0aWT55ipab5PpR/swm5Q4zHKc\nzz7F0swNFBYLtuFhtN4MhXwMaWsdz/h+sk2IFUrUpTY6k55KQ6SNEmN4gfN/fwm/W0NbVqPTFTj9\nlc/TyKZRe4qU6yUWfvoyg/sm2Zy/RbXSYIehSyGRZjORQtVo09Tb2OgI2AfMHDwzwMCOQcxGke3Z\nd2g081DTQqOOYmKcTLnI8RMHyWbrLM4vYT9xGPlWDLlag8qsweh20W2pycp0VNDRzBUZcTh6k3nP\nHjqpFJN9g6QX5kk3W+j8fvb//u+jUKvZjMVIVavsOn4chUbzkcbnw15/HBi8Xgz/SALyfiRTNarV\nFuWfkaJORHJMf/VRCmtL1AsFRLUak8+HSq/DmI0jsxmpNmXUKxJ7Dg9gcdt569I2t5dKnH7uGHd/\n+jqJ6DalQo2J3X20rQGSW3VqSjM1jbO3AzJZUFtliAoFpfU1jFKOwdBBSukmm5euUpObmH3xdQx2\nC0qdhka9iXtoBLPTimFwlPpWBqVex2OnJjBbtIxPunHqJBKvJ3jzrQ0840M4du0h37iDtb+PQN8Q\ns5uQT0cJTg1jsMsZCmqwKmq47Wo2lEr0bg+juwexWrVU6gI+kwHX1Djbt2fRG1QMHdmDwtNHo1Sk\nI8jp3zvC5L7+hxqfYjT6XiICPTOu1VVsQ0MPNW6fNubn5zEajfh8vk87lI+Nxx9/nD//8z/nj/7o\njz7tUD4y3mV6KDIpVJVtxGIDk05NPi8wMGhjfj5FMV/DW12grTFRyEuACqdehZRK0HXb0WjkJBIV\ndAZQKkREGThdeqIbWWTNGnt39eHzGRgZcyIIMl55ZYUDu2186biOBW0U+f/P3psFR3Lfd56fzMrK\nuu8bVQUUrsaNBtAnm2yySapJkZRkyYcsjaSRZ3asmN31ju1d27H7ug/WTsRsrNYbfrBjxjuODYW1\nkmxLYw0PiVezye5mn+xGN+6jgAJQ930fWbkPIEFRJCVKpthsxn5fgMxIVPyQlcf3/zu+X4uVJx75\nbdR6mdr2Oi39IJdfWCCba9HpQj7Zx1OfGUHUasmliuiNeraWYpw+eZzO4TDpVptCokynK+L1WzE5\nzWhmZojfuEFX7WL2+VER8M0eobixRs+RIxg9XjKb22zcXCUZTWGoahj69CQeC2SWlvD+giZqs99P\nWqvd7yN5EzqrFa3JxNb58xS3tvYXjy4X9khk/zMnJxFEEYPLhaP/l7vP/7n4KMhIEnj4V/nDb33r\n/Y2B33qJ/bztjY08u7sbqCrY7R70epVHH93PirhcRiRpf3Vns8rIe1kWXruB2lUxyDAQ1CK2Ougn\nD2Pt7WXt2Z/gC4YIPDRGpWsg4PfhNCjc/Ou/oO++k4ROPUWyoqXYlrGE+7jz4uvkd3ZpNQ2M3D9H\nj6XKVr6K12/hjVspPnv2FDtXV8inS2jVNv0hIysXrrLgdjPxyAPMCkbisRz6gSEEdw8L33uVB8+O\ns3P9OiP3H8FvtdIS9chyBUkno600mZ4Y4IfrS+Q291DSLZRCjZGTg5TnX8Lh9dH38CMIoVEEY4vj\nkQFsFolurYhSq9GqVtlcjlNVoC4bqTc1tLoifX0ddDqJnr5h5ktW4rUGhpEAfoNCuN+NKxygXavh\nmZxEb7MdlEc+yPfz87Z/nWijxT0yTCGZQ+3uN9B6h/tAFNCaTLhGRnAMDJDf2KCwuYlSLtM74cXg\nD2J0Wli9GSU0GmFytEWzo2LwmJl4cI7heoNcvolks/Pyf73J0P1HGDk6QmVjBe9AkMjMGKnNGO1a\njUomy9DhQSS1xdW/+3uGjo6jb2Qx2cykd9IERyMk1mIMHR4kGDTTPxZGY7Jgsxl46dw21WqLf/21\nSfQGHQ3ZydraG6xGb3HmqRkch0/gO32UV5+7g8ms59/8L1/A65BQ91bpbtwgmpYJBP3MnuhnL5rC\n5jCjcfrYKaks/9cNHLogg58dQq/W0Blkdpe38PbYeWxwgKGpPry+X87c7r2Ucrvt9kFp7V7Byy+/\nzEM/tfK8F/HQQw/xpS99iWq1isl0d6wcflkUolG2X30Vtdtlut9EPmCl0NQyd7yXWKzElStxpsds\npGMpwuNmgn1Ocuky7baC1GrisOno67MRCtlo1wV0NiuNXIapmRAWuwk9LWJXryO7/XhNDpZX83zx\nt0YIqTG2zm+yvZShUWsyNNVL5NQJxmd6efrZdfbWE9QLBUSNyF45w+2Ig9HHzrC+lsMa8KF1uHnj\ndpbjT97H0R4fjUyCbXGP7PwbWA8PolYbhE89gAokVqMIQO8DDxCcmaa8E2PxB/+FZLqOqNUy+dj9\nYHHRP+xCVJrU8i06zebPzVxYg0ECc3OkFxb2DUMtFgJHj+77SS0tHRxX2dtDFEUiZ87QyOfRyDJm\nn+/nTtH8OnA3c6RHgP8NmAR+DHwW+NVmPN8H9frbjLDR6BCJOLhxI47fb8ZqlRkZ8SLLGpRilkvr\nm7hdeiQBXB4z1UoTv8/BXlJPKVOikKuQTizgrTQw9Q5hSG+jWsw4e7wUdEEuf+8KOpeXVj5HaHyA\n+77wENs37uAOeRkO6zAaTBx+ZI69nSJDjmFaOjsri3E0GolOo0mlqOAK+qgWimzsNTn85MOY1gts\n75SJrcZ48uufImJrIgw9hXdmhmS8iMVippHPk7j4Kna/k6nf/R2+8uUJXv2HHMbxKfzGOsryBRRR\ng9bhxDZ9nL//x3VSqQpBh0pv2MLsjJedF39EZi/H7dtJlLbC6NkHcc70sLmZJxKxU6+3ee21GO22\nQjBoJVNUWGsqVCWF0xENOovlnnJf9fnMJL39jDyqJbu6hihpmDw1QW1zkUYmQ2Z5mWoqdZBxkfJ5\nbH0Rao4h/vHbV7C7LbSNVV65lMLuMFDfXGbn9jL3PT7HTqbB7qVbRMYjhNUYu9cvUdtep7Z+m/sf\nfpLisTPsbCYwKhVGhmy06zUq+SKyViL66nlO3H+c+TsZrC4LwWEHhyccrNzeYf7iCv7+HpYWk3Q6\n4HAaqDZVBobcZK0j3PcVC/nYLqLFhe9QPzeX6hSzRTLxHG67SPn2TQqZIgP3H8MV8iKKIoNHR9mN\n5bEN9PPyC+sUGhKIGtJ3bjMx08dMbwd9u4BWo8GurVNL3KQqZVF9ZxDED977bvJ49k24um9Pt0kG\nA7qfsSj/uOPcuXM8+eSTdzuMfxbMZjNzc3OcP3+eT3/603c7nA+E/Pr6wepeW04iFlqINYGyfpjV\n1cr+5KQq0TsSohTb4ujRflbXi2hEGD86xAOPDOJ2mzh2rIdUyoZt3EFzZx3ZIzI0GebWhaX9rOGw\nA3snwf3jJsaPWrjxD1ss3klTr+975GwsxXGO1VC6KunoLpV8kVyySEcBb0ikXKignRji8f/uS9y4\nvsvLP17EHQlx8Y0S4o0cs8MyaqvBbjTJ5OMPIRoliqks0dtRNq7OMzgSoGdylGqrgmNggOCJEwix\nHNZwL61cBjGxTPbVEnqXm1yrjc6oxz87+74lU1GjwX/4MPb+/oO+Pq3BwNL16+86tpbNIgCuu+i3\ndDfJyDXg7K/rw1utzsEUjaqq1GptBAHOnIkwPOwkELBgs+lptxXEioWcXyG9EUcrdHHiwSI48E8e\noyntkNncRtQIBPrc+KfG0NvtSJUU9r4+TMFefvxynFq+SKcLjVKFV/6fH/LoVz/Nk5+bYPHyIunF\nBvrRI5hd4xya83LnB0vECyKRIS+p7RS1Ygm9pKVcr+EeHaEQ22MjcRODxcJEOMTDD56irXTZ2shR\nESRiN0qM3P8Q6YvnufbD5zFZjLSRWHnmOcbOnORTMxLl3U10Jgvy6fsQ9CYki42ddAdNu4I5Oc/C\n+S1SLgOmeASbxUa1FKNZqaG320lv7zE5V6Njs3D58i7pdJVbt5LU6x08HiNnzkRIJqvs7ZWp19sH\n4nL3CkIhK81mgLU1Ga+/H7fHhFVJ0Yruy8mbvF6q6TSdep1Dn/kM5p4euq4+Lj+zhs1rp9KWeOX5\nJRRFx8Z6AbO7iV6nQd+t8ujZYYpHA/jDbpZ//Dy1+C4mrw+NVkLdWWX6AS+2YpHc1i6lPTfOgQg0\nmrQ7CrlEnr2NHzIwOszwuJe9dIF8poRZb6KjCHz3/76ARqvF1eOkWjbSrgcpZ0tcvJLA7nbi7u9j\nU1XZvl3k/pN+5sZOodOKVJeus60ImIYn0Cgtln/wA9Rul97Tp3not88QqxipiimyqRRGk0zvzAR7\ne3FOnBzD3EzgHBqilsnQaTQo7exQy2Z/Ka0Qo8+HPRKhuL1Nt9NBazTim57G4Pr5PlEfJ6iqyrlz\n5/j3//7f3+1Q/tl4q2/kXiEjP01iHQ4Doiig06nYww5yDRmv14jNpsce7mFlfoe9a68SHuphYLqf\n8dk+Op0u5XKTwX4LzVIJVbKx0u5lctqNmN7E/OAAeqMek1gjt5mmsLVHMexkbTlFOlkinyzgcOoJ\n+E20qxW6Jj1Oj4WLT1/BYjdx8oFBbBYNowFILq9RU2QyTQO9s1NUyg10OomN9QImg42xE2eY/vRD\nKDtLbL58jlK+imt8Ao08S30nimzQYR8YwDsxgTUUwnpzhc3zr1LeXEVVFFZ+8iJas43Zr36J5Pw8\n5p4eLP6f75Sut1rBui+m2K7Xkd5jAlGUJMRfwSPsw8S90z32S6BabXHx4g7RaIGtrQKNRoe5uQA6\nnYZw2MahQ+4DR0GtVoPTrsNlVNE6RVRVRGwWaCe2aUiP0gpOcSgSopXeQ5X0dEsZ9q6c3x//WlvD\nfmgMj9fGYrNFt6Vi8PphJ8PijWiSi90AACAASURBVE2s2iAby0m+9GcP07H4WFxI8/r3buL02tiK\nZjh1dAa9dBWlrkdoFBk4dQzR5MBRjxG7fJWW1ooiL3Pic6exBnowCnWa5So9vVZqDYG6Lczhr3yZ\n3NoaK/M7mK1Zcgu3MHu97F15nWKrhWQwUC036HnoU9y+dQdNapvk4gqVUgej0GT5ldeZODpMYGyY\nWLKNxmRBVQCljVYrsrVVRK/XHIjZpNO1/dqrSYssa97henyvQBAEhoacRCI2Wi0Fo1Emv9EkIwho\ntFocg4MYPR66ioKttxfP+Di376QJeTRYe0IsLaZIJaG3x0LNr8HvdtB/PEBxK8oL/+n/wGizMDLm\nRWfUIVqcLNyKIkkSGl0Cz8QklmAYwROhVBfI1bQ8/O/+FVtXbzF0+iTLL11AauQpJxLojR62NzIc\nPW5gbLqH8y+tUqs2MVt1DM/5WFuK49HbmByx8dLLUbKhHnQ6DaOjLlwhP+1SgY5Gi2loHGljlx6P\nlo0fP4vcbaA1mShsbpItqeinHqBU6RLdSKMoKqFeB0OjvbgODeOSg5RisYOVqdrtvmta6v2gqiq5\n1dV9cTxZ3j+vbjcmnw+jy/VLNcHebSwvL6PX6z/ScuKvC2fPnuUb3/jG3Q7jA8Pe308xFju47mw2\nPb7hALVABK29gtW671nz6rU9DCMnmDvSwWzSspZUef6vFnjwwT56rXWkzCYGTYuuZOCBqVFyXZHd\nopbla5v4+nzozSYCo8fw5pMUmlqsLivZvQxqu0m10KZuk+kiIhn1DA3YiIyHmTkcIHnlIopGpWBt\nsLaSpf+RR7h5aZN2W8FpERBMCv0BGclgwGi3cO1HP8BEFb1WRzW+QiWRZOJLv4s4OYjebscwOMHK\nTgew4BwZInHtMm27jc1XXkNRuiBUaOZztM0y9UzmF5KRTqNB8vZtChsbIIqY/X4QRXiL5AkCzqEh\n9Dbbr/mb/Pn4RJKRaLTAxsa+BG44bKNSaVEsNvnsZ4cJBt99wquJBMETxxCuQC2TQWswYBs8RKUp\n0m21WLi6iKl/BIO2S+z55zCZtLjHp6mm02Tv3MI/cwZBq0OQDXR0VhxDQ4weDuBya/nd//G3cfSF\nSK5Eia4WSe3kkIp7hO0GSjUHZ/7lb+DQtShVFWodLZ1SjlpNIfyFz9ASZLrtNuvPv4iEwvZmhpP/\n7g/40fNxFt64RDcbR2fS88SnB7E5srTbCo1cDq3JhK23l2oyic5mw390gK1YBoe9jxsvLKJWq1gs\nduqVKsYBJ4Wt6L6MuM9EIlnGGA6gaPfNqoxGCQEVj1OmUqqDqKFa3XfEHB11v6dR0r0CSdIgSfvx\nmwMBbOEwxe1tRI0Gx8AAJq8X2WKh3WhgaqTYe+UFdupVVtZLhMcGaOkC2L1GtJoG7oid5/7iP1NK\n5TFaTNSKFZJ3luh/8AGSGzEknRatXod5YAhFcPLsX7/IxhuraPU6HvytBzn1mccpbaww8cgJREnL\n/NUNdrbzGO1Z1s9tY3eN8/XfP0k6VWFwyEmt0mR3K0vMVGE8bMb7xQkyVQ29ERfhsJWePievvVZH\np1GwN9qYHXYyKyvU9nYwRkI4+vupl2sUK3uMzrZwhAOMvTm2WynW0ekkvCE3hRsXEUQRrclEp9HA\n4HZ/4IxGeXeX2IULKK23G4Xr+TyOgYF7iogAvPjii5w5c+Zuh/Gh4OjRo8RiMRKJBP5f8CL7OMAe\nidCp18ksL9NttzH5fARmZ2lLJoxmPfl8g+FhJ4VCg5s3k2zudlDVNrduJfF4TBg0Le48+xK06kxO\nejEYGqg7CzgOneJOWcQ72MsrL66Ry1awuq2c+dQIXqudntlpTDroarS0OzD44H3U2iLesI9aMsG/\n/MZpiou30PXsa42Uouu0CgqF1QUeeHiK15+5THp+G5ImZIOB0//2SQaCEtlOBtotSvkKllAYo9WM\nzW3H5LTTaCqsLGVYWc3RViUme0WKlQ6dtopss6MgUa60KZZaqPkWEe0vlkRILyyQuHHjQHWy227j\nGRtDabVQmk3skQj2gV9s5/DrxieSjCST1Xds74/xCqjqez8AO80mSrP5Dr3+SrVFpdpGqBWJr2yh\nNxuZPjWK1ubE5DSAKOIcHqaRz2O3SZz+8uNcuxilWigyevQQM/eFEB1+nD470Vcv0ZEMtCplPFaV\nO6/epNtqYXfbsAhHGfYraAZn6SoCHkOH1/5hkcUb6wwdHqJvtJe+qSHmn7+I5PZzdb7A5XOryGYT\nVpuJnfUkl6+aeeToKKZuEYPPy9qzzyEbDXjGx8lvblKObePz96KL+NkNutnM5rGYNHQ1WmSzEXeP\nG1EQ8NshNHyI0EOPYB8ewmzWsr2eZPP6IlK3S9BupNKCiXEPw4dc9PbeXSb9YUJrMNB7+jSlWIxW\nrUZ2Y5P47UVyq2to9DpkiwWvU0smpUGnVVm/eocH/kWERFlhd2eHgMGHZLJicnWpFMu0PAb0dis2\nr52+AQ96i4HgsRMI3j7+y7eXiVeNyIFe2vUm3/3LZ6h+6SSPnnKyvKNw88Id1q/MM31sgJlBJ7VU\nFoPPzJ2tDLJOIpuuoCDRP+zGamtRj8fo85g59qn7cLpMBPq8yLLEkSMB0sur3HnxGna/m3AkgKae\nR2k06Ha7iKIIGh3R7TKCzkjbFiLk76G318aRk/04ekPo1RnSi4s0i0Uc/f34Z2ffV8PlZ1GMxd5B\nRADq2Sz1bBbtB1B1/DjhmWee4atf/erdDuNDgSRJnDlzhhdeeIGvfOUrdzucXwiNVotvehrn8DBK\nu43OYtkfWQUmJt4W3UskKrz2WoxOp0ul0qLb3fdOoVqkmM4jCPuZXY1GwCPIFKNpvCEnF14tUdeY\nEKw6Ck2RrWQHi6eDZPGjHTXw8OwJGm0Vo8NOK5dCok1blbAa9WTW1slFt8gnsgxNDeB1OmmVShx/\nxEHsikRRsWGz6fAGHChLF9AGTlDbWAJRxD00SHl3j26rDs0KtVSdqmhj87m/xTc7x/pOk7RlAMHi\nwq7t0Cj3sn5rA73ZhMbqIFfVUOqa+XkF006zSW5j4235a/YFzcp7eww/9RTyx8g1+xNJRt7LMVCn\nk/Znz9/r+EiEwuYmtXT6YJ/JF6CityCUywA0KjXy6SKSrKFVqSAIArLRiGwy0TM1Smsthf64B7Xr\nwihDQ9VTyrbp7+silRM0VQPd7C4Wr4tw2E49n8dgBKPdTLxYR4gVaTXaXHzlZewGGwaDjNqss/7y\nKxz/+pfRiR08/V6u7JVpVmpodHqMwR76dEZq9Q4Wvw+j3s9uMkVDa0fSqOhsNgJzc+TX1mjtRXH1\nuPmNf/MEN5/RUe3ImKQ2ZrGOd3ICrdGIc2gIayhE8NgxSqUGqWSFgbAJuRlkfX4TvVzlwRMRjo4Z\ncPb+6rbTH1fIJhOmvkH2XrvG68/f3k+zOnSYtQoCXXomxzBrWthtftYX47R319Gbe/GGDdhdZoxW\nI2qzRj2Xo1jrEugN4hoaRNaKWAIBAkePEa8bSKWqlCttWrkaSrVMu91hfTnJjDvD7NFZwoEZKo9P\noJO6tOKbGANBdtNZ/F4zCwtJJJOZSFhGbub4j//rj9Cb9PzW75/Fm4wiSQ46FT1ahwO3rk45t4Gp\nsIHYSaJxjEO3Sz2bxeh2IxjMyJ4AN1YarK8X6O21YTBoCR/yotHpEDpNcmtr1FIpuopCIRrdd/R1\nuz9QZuN9m1zvsaxIo9Hg3Llz/O3f/u3dDuVDw9mzZ/nJT35yT5CRt6A1GN5TcfktTEx4OXUqzLVr\ne0iSyPS0l/5+O816jq6iUig2cDprVMotshWV0EMidARKlQ7lhkippCLLUCg0iUbz3Hc8yF9/9zpq\nvcrRI37aV26RjGXxDUaQtVq03SaqIFEvVfGHXKidFrXdKLNfuQ+XUeHxY3oM4TN0RC3FWIx6dJ5a\nrsDwE49TXFtBK6mYRyIYXS5cQ0OUkhl23ojSrtWpb60yODzN8vUVTp4+iZy4Q7EGh8MRPKMjdG1+\nzM4Qd1ZL9A37379cLgjvvFcFAVGjeUcfzscFn0gy0t9vJxotkMnse9BoNAKHDjlxOt/7Qrb39RG6\n775907VWC5PPh//wYSxVDZvdJs6Qh1IyC50Wg6dPklm4s8/ORRFrby+OoSHathA17QbdTpeaqmev\nrHLimIvK0hu0k9s0ym36wkGWr9/G5HBQi+WJPHqaTK7JKz+6gb6/xROPBKjkCuidelyT04iyQKNY\nZGdxHVdfkMruBv2T09xesCIKCuV6F9nTy+ioA8tggHP/dI3myh2UWo3Jw0G0NgfFtRWSt28Teegh\ntl56CcloZPL+OZRWa38UV6+nU69Tz2ax9PTgHhlhb6/Ma69tk9hKkl1ZwR/xc/YLRxDLaeRWhsLK\nIs7ee1dr4edha6vI8pVFqpUWZovM6kKCVqOBWMszZYtg0ruxtbYZ9ncJzvRi8IfIXTmPWDMwcuow\nKxduojUawerGNzdHt93aH78TBHYuXqDnkccZH3dTyeYp5BQUVUFvszI224fBXkGRzaxvRrn16jzN\ncgVXj4uBIxFa1SJmScMjT05RrSk0ikUqe2V0Fgtmv5e6aKSRjLF64zVskQgGhwONXo+oEajvRinX\narRzKfxzcziHh7H29GDsP0Ss6UG6mqbdVlhaTGOUVfSaNifmpqnGotSz2XeM+OXW1vYF7z5AA6s1\nHCaztPQOI0STx/ORqjp+GHjllVeYnp7G+R5ifvcqzp49y5//+Z+jquo9VzJ7P1gsOj7/+RGGhvad\nZGOxEvPzSfqmPMg2G06NsG/mJmmwetw4vDZa2TY6nUQuV0dVVcJhK90uOBxGnB4TX/z6/RR299i7\neYdCro5zeJh8Ko8z4EJDi7FH7kMvNslvbSO06pg8Xqz9A+j0GhqCgef+80sUc2Ua9Q6nnphjLhDE\n02oitht0m3UEQcASDLL89LOkqjKrsQb5bA3zep4nZo5jtplJ7uU4OjfNXtNOraGwWdGgMbroJBWc\nzu67FFV/GpIs4xwcZK9QQO9wgKqitFo4BgZ+LrG7G/hEkhGHw8Cjj/azs1OiWm3j85no6bG8700n\niCKesTEcAwN02220JhOCIBB2gstlYLDXTG55EaFexhoO0f/Qg9DtIhmNWPx+JL2eUJ8RNBLRaB5N\nR2Vy1oZL12AjHscZ8iPt7tLjl+gLHSZT7HL/k3O0RAPf/r+exj04SFXRsb1TxeG20W63aHX2exlc\nQ4NYPG52t7fRdNscGbeS60xx6404haqKzajSP+zj2//xIrsrUc6eGSd66TKlQo1GqUo5HufQU0/t\nNyuurCCbTOgsFjyjo3gnJ7H19lLP5UAQMDiddFWBG+fXyOcbgIjS7hBbiGLVd5kOtmkWi5gDgY/w\n2/xw8EEeuqqqsr1dRGs2I2lF6tUmGysJjGYdvb0+cukSS4kSJwY12ANeTHqR5vYSzkgv1XSSiSfP\n4p6apZIvERrwIhYSFJbn0TscBzberfg2D53wsLUQpdPwo69ZGBhyc/rBfmQZsoqV7axA1ztMU1si\no7Oxdb3GZz8zxuLryySLO8Qy7As1jfoIjA8jyyKmZob1F1/D4TRh9vuJXbyI0enEPjCAf3qa+PXr\nVLM5Srkytok5zDNHcQZczL8UJRSyYbXoSGwlaZbLHB4xo0muktjaQmm331GWUd4saX4QWHt66D19\nmvSdO7RrNcw+H96pqXtOBv7pp5/miSeeuNthfKgYGhpCo9GwtLTE2M94ldzLkCQRSRJZWMjQ6XQ5\nfboPp9OA78ufIb+ySLPeRN/TT7quY2GlyLFjQR5+uJ/d3fKbn6BiNmsJBi37xnqCREvvQt93CIen\nTqMlYPa6ydZUULUYtHYGHn2UbjZBu1bGHDnE9oULmPoGWc8bKWTKIHRxBN2sxuocznaJ/f0/Yg/6\nQenQLJWQDAYqDYhtJNHJJgwmPWgkNjYKhIaCBKQMrfgOqiSzshAFwN7UYQuH6O+3/8K+PffYGKIk\nsf788+RWV9Hb7bRrNSS9Ht/U1K/3C/kl8IkkI7DfcW2z/XKiLZJOBz/zoDQaZYwjEXoO9R08mN/v\npRYKWQmF3tZOKO/toXa7+xMakQjteh1HLc7IsRmalh7+6UcreCamiOdhfSnJ0SOHUawe1i/fplCo\n4zSD+cQQg2dO4Ql56XTaOL06/pv/doZLl3apFGu43EZevxTj1kIeTUvgyu0ij/7GkwSdYO1xoNaG\nsQaDpObnD1azSrNJPZcjvbh40Kh5EHOxQbG4/7LRmk0YHA6qqTSpRAW1z4akb+L8qWYnVVVp5PN0\nFQWD0/kOieKPAxKJCouLaXK5OoGAhdFR9/tmyAA6HQVL3wCO3Rjrd2KggqKo9B2fI9O2oOgFHEfG\ncZrUg1FVUZbRTZzkR//v62g0AjavC1OhRfL1BUZHA6jq2xMo9UwGl6ry+18bIVkSkHUy/QN26oUy\nhVoXnSSRb0gUK21yJdhd2gFJRzhkpVRSOHrKS0OoYJANCJ081IqEentoJbYx0Mbs9++XR7pdyvE4\nBpcLnd3O+Be/SKnawTQwxrXrKTo7iwwfHmBoyMnt2ym03QbmZpKBiIN+R4NidBdJp6NSKmH8qYZV\n2Wzed/T8gHAODGDv7UVptz92K7EPAlVV+dGPfsT3vve9ux3KhwpBEHjsscd49tlnP1Fk5ObNJPPz\nqYPtnZ0y4bCNuuimHTqMoKg883KUQiGL0aglHq9y+nSI3/zNMfb29gmJTqchm60xPe3DaNRSqbQQ\ng0G2N3P0D9u4s1JieSWHonR5+MEIWk0Rh81EI51gd3GVpR+f4+SfzFFpdQjcf5pqvoTR6cBos7A2\nH8XrcyGiUk6lKMZiBI4cQbLVQc2j1kqMzkwxev8cDZ2dgUNO2kur1CstRnpDdLsRYptZLFYdk5Me\nxsd/cYZS0ulQVRWNLOMZH0ej1aIqCqn5eazhMIY3Vc7vNj6xZOTDhiAISG+aDNVy+7bWBofjHcRE\nVVWqqdSBip3e5cLo8ZDbilEsNqmXquhlGdXoYGevCBYXVSVDqVKj02oTjZXo8Y7wxP8wTWY3g9tj\nQtXqSWQUlCZ4IxE0kobNrQovvbhJKZGixyMhOzzIRj1dtU2nI2B02HD1mTEFXdRlB6lqG9PEMYSl\n67QrlQOxqfdqRDQYJIxGaf8GFEXs/f1ojUbcbhmr344tNHNgKd2qVtm7epVSLIaqqpi8XoLHj7/D\nB+FuIperc+5c9IBcZbN10ukqZ88OYDS+uwtdabcZC8JOtMn0k49g6d1A514jONZPTdWS3k5idTlw\n9gZpbi2hNRrpyGZ0Pj+rywlK6SI6g4zFbkEyGnFNzRJvdQiEHJiEGmqzhi0SYefiRZTmFmFfAIxe\nYq++SiOTwuK0IGjMzI25uKao3LwRp1ioEegzYLbquHGxwOgcDA87GRpyEF+PceYpLU6LgNvgRq33\nIogirWabutZGo9UkEB7FojVQz+co1iViywXS6Qpebx97e2X6+qw8/vgg0UWZZl8Xu75DN7uz3yho\ntR4IJnXbbWSzGf/c3MEIYDWVIru2RqtUwhIM4hgYQH4PVU9Rku4pD5qfxo0bNwCYmZm5y5F8+Pj8\n5z/PN7/5Tf74j//4bofyoaCQLbOysEen1T0w3ux2VVZXczz4YC+lUoPXX9/D5zMTiTiw23WsrGS5\nfj3B4KCTxcU0iUQFvV7ic58bIRi0EA7b8Pst7O2VCAYtOJ16LlzYpbfXxli/AalVQDZKdCyD5Itg\n6rVy4utejEYtrXaXvWgSWafH0O0ScAkMhx1oLaMUt7bwTk5SicdJ3brFyG9+FdW9jM1jw2qSiN54\ng0Khjqczg7vHQzWVopPeYbLHxlh/CP/hKXyRD26lUEun0fzMPdiu1WhXKv8/GbkX0axUiF+9Sjke\nRxBFTF4vPUePHqiPJm/fZvf111EVBUEUsfT04JqYZiNaZHNzhU61QujIYVR3H9RKuLx1DLE6R4+7\nKJcb9Pba0ekk/u7pHdJ7BcamZPq9bdqmEq/8h79k+P6jPPYHX6JUatIVNCiymeXFXWaPm/if/ueH\nadUbDAR1VDNZqsUqNzZ2aVXKaDwhNlYUJoeP4mok0dtsiFotzqGhd/2PsiwxNeXjwoUY9XoHSaej\nZ2yQ06d76e1950WbWVwku7x8sF3c2kLQaBh49NGPRR16b698QETeQipVJZWqEom8TUbq9TapeIG9\n1y9R3o7S7XTYLXcZevwxCqYIO9E0u9EUatfIkROThMYHqPV4ufp6lM2NHGwVaLcFhmYGUEUNa5tl\n4ud3yefqzERg67UL0KoxNh1CkmWsvb3UsnlWUxKd5WVu/fAZzH4f4X4vXl8NuZgnMjjOaxcsGLsi\n42NuhK7C7P3DuL0WTpwI0dNjIW6vsXluEU2+RVt00tT7MPn95FbW2EmlcUf6eOGZBQw0GOlRcHgc\n1JoG7H19yGYzAJlMnakpP0bFxubONTql+tvnJZ9n4FOfQqPVHpCTt4hmNZ1m44UXaL3Z4F3c3qaW\nTtP34IP3LPF4L3z3u9/ld37ndz4W1/OHjbNnz/K1r32N3d3de9pvByC/uUliLUZmaZNyqYk1GMTk\n8yEIAqqqYrXqmZz0s71d5saNOE6nHqtVx9xcD+vrWXZ2ihw+7OPUqTCCIHDyZBBZ3r+OQyErsixS\nLDZ57rkN/v7vF/jik37euPYGYY+GaztljA47hx46wXrOwNTYMEpiCWMrw961Nzj91CxOcYf0pdu4\nM2YkpY5rdBaN0mTo05+m2+2iExQcU0ew6LvcfvoFWg0Vf9CFpl2lHG9hHxigvLODIAj0DIdx9/1y\npXKDywVra+/YJxkM+/1tHxN8cp4aHwFS8/OUc2VKWg/1RgdrQUFeXiF49AiZ5WUWvvtdKokEstmM\nLRymvLtLXbLR6TuCU7UiSyKi0ci1awkuXd6h0VTx+OxIWpFg0MPIiJtvfesiO7EiIl3azSgr3Sp/\n9N/PYvK4yWUqrF64yeDjn8bvN9NodFDcHq5ejiFptXz1Xx/l1WdvsnRlmUS6gdlm5uTJENpansDs\nNOlamYHZEGqzjntkBPv7CDgNDjoxmbTE4xVEUaCnx4LH884Vr9LpUHgPc7tqMkmzWET/MWDbnc67\nxbnUN8sub6HR6HDhQgw1t8fCs5dRVRWPx8jgoANd8g73H3uAV7oCWqOJYNCKImpZXMrRaHS4eLNM\nqdTFqFYo5soMDPuplutotFrmbybxeWTSi1HyW0lsDhP5koJlawvXyAj4BkgsrKJJbqO3O9DoDSRT\nNTweE1a5yfAxJ43WYVBVctkqiWyHrgputxGfz4xW6NDcXsHpkFFdQ5y/lCB65w2MbheOgIdjTz1O\nYidL5vYdAMaPTrD00kXCDzyA6vbRbCoIAgQC+0Ta9KZKanZl5YBM2/v63tejori1dUBEDvZtb1P9\nACJM9wpUVeV73/veJ65E8xZkWeazn/0s3//+9/nDP/zDux3Or4x6LsfOpUuIGg3hsJ2blzfJb24i\nGQwY7DYGB51Ikkip1ODSpRhOp5GLF3e5des64bCVz31uBFWFZ55Z4/OfH2Fw0IXV+s5r3mbTs7tb\nZmkpw2NnIwTbC2ysz7OXd5LPN9HQpbq7jTU0wxvXNuknia26w9f+4FPo1CY7V68R6A2SzLSx69p0\nozuYBw8x8tn7cPWH0Gg0DGoNLD33Ij6PHpPJisOuPxCbdL7Z+yUZDL/SOK49EqG4tUUlmQRVRdRq\n8YyNvafD+t3C3SQj3wD+1Zu//wXwd3cxll+Idr1Oudzk8kKN6OomXaWLwaTjxJlRzP4dMsvLlPf2\nUJpNOvU6SrOJZ3ycXHSXtttJS2NidTOP3avn2rVN+oa9XL8W59KVFfoidr785UkymSpmsw63x4Qs\ngaa9v52KF3H2BlFlE4VMCZdZ5bEH/az2WVlZy+O09fPEYxH24lVW5mN0VZFms0M5luG6LHLqqBu7\nVUtB68F9ZBCH3fC+Y5fttkIiUaFQaOByGXC5jOTzDTY28litMm73PikRRRHNe7yk7nZKXlVV0ukq\nlUoLvV6LJIl0Om+PsVksMi7X270L8XiZRKKCtZKjq+wfl07X8HiMSFKJbipFqdSiUGoR3d7BYtEh\nigLRaJ7V1RzdrorLKqDUahQrHWStjMuuI/fiEpP9PcQuR2nV6uiENsWsFXpt1HM5LLNT9M8JqFst\nlHaHer2N2lWp1toEwm7MdgOzswFef30HjVbC5TEwPe3j+PEgVquOaiq134RmMnPuappLr6yBqmIV\nWzQcVua3FMRsEUGzX4prKSIDh3xUknuYh8LUalV8PjOgkkxW8PnMBE+cwNbbS6NQQGe1YgkE3tcs\nq12vv2tft9N5h0PovY5r164hCAKzs7N3O5RfG774xS/yzW9+854mI7Vcbp8YCwKjvUFEcYDtzRwm\nncrMXICRkf2ep5WVHH19dpaXsywtZeh2VUqlJplMjW5XZXzczd5eBY1GJB4vMznpPSDrOp1Eo9Gm\nUmny4GEXN//TdZRGg1y2QqfWoFWUyO2maevKpBNFBvotlOJxZIeTej5OPb5DTmegI5moNmScegmd\nc5DFjJFPzdgPSIe3x4EaeWeZW9RokHS6d/Ru/bLQ22z0P/II5Xicdq2G0e3eV2L9GOFukpHngL9+\nM4ZLfMzJiCCKpEoiG0tvW6HXq03uLKSJDLpAVTH7fBRjMVBVWpUKrWoV9/Q0erOR+I5Mva0h4rKS\nyW6iN+Toi9hAI5FIVFheznLyZBC/30QkYqNRa6FVjKxfvIao+pFtDmSDHo9VoBLbwriXYMpk5tGv\nHcbVF0LTbfHDf0xAV6GrtNFqoFwqEVtu0Jl108qXsHj8WCz69yUiitLl6tU9Fhf3O9G9XhO5XJ12\nW0EQBIxGLXNzfsbHvQiiiHtkhFo6ffACEkQR1/DwQQngo4aqqty4keD27RSNRgenU084bCWbrdFu\nd7FYZGZnA9jtb5ORcrlFXzvLfwAAIABJREFUs6mgt9kOUrpqV6XZVNDIMoWqyrVrcbrd/WxKtdom\nkSgDb6ftyw0Rq92N2QDBkJVaXSEUstLpikg6mVa9QbvZwqDdJzsaWaab2iZ14Tw9fS5MShGDxUyu\n2EYWFWw+F+EhD32jBk6cCO7Hp5dwuQz7QmWA1mRCMhjIV2B9JU6jVEJptzF5PdyZ38NikRh0WUlt\nZ9BoRKxmCcmhIxjwoxvzs76eo1Rq8sor2xgMEidOBDl0yI29rw/6+n7hubYGg2SXl9+hVyBbLB+L\njNiHhb/5m7/h61//+ieyRPMWPgmlGlGj2deuUVXaqR1GPTaGe1x4xyMERt/uq1BVdb+vr9oiHLZS\nr3fQakW0WpFqtY0kieh0Evl8g3q9Qy5X57HHBnE49p8XIyNutrdLNLIZfL1e9kp57G4j25s18ukS\nPTM60lUVk8eFXk4yeMhH26ijllPQasBqkckV6qxv1Ki3YO9qjJaUJxy2Mj6+P0Rg7+sjt7ZGu/q2\ncKc1FHrXOPyvMpItm8131QjvF+FukpGtN38qQOcuxvGBIOl0VLp6BFFA7ap0lC71egdTV0elo6PQ\ntNIdfxhbYJfO5jy1bAZLTw/BqRF+8Owu3/neMuVskXJdIRC006pW0Oj0+Dx67CaRiWELtUIJSdzv\ndahU2oQCeo6cmcJmEshrZQJeLY6Am3ouR7dZh2ad0p3r2Kw69m7fRii20MgytWgUo9ZASQNWuxGr\n18HmZpKZo5H3HAPrtFqo3S6FQoudnRKdThetVqRWa/Pyy5scOuTG6zVRq7V5440kgYAFh8OAc3AQ\nUaMhu7pKt9PB3j9ATe/m6adX6XQUBgedDA050ek+mssslaoyP5+k2dwvz+RyDZpNhVOnQlitesxm\n+V2x2O37neYNnYOe8UHiSxsIqJjMOpyHDpGpWg+IiMUi02h0iEYLzM31sLq6X65pNDoUVS1DhwcZ\nGrRx5bUNjhzv5fq1OFOHJ0neuIZW28GgE9DodBjdbvauX0ZqVYguNwlMTNKtlgmNmLF6bDRbKtvn\nzuGdmMA7OEgyWWFhIU02WycQMDPQa4R8HL3NRjudQKdRUBoNLB4XWpOFAY+ZwUE3YqfBwBEtfWEr\nDmOFrujBPzXO4l6VaLR4cA7q9Q63biUJhazv2dj7XrCGw3inp8mtrqK0WsgWC8Fjx+4p9+afh1qt\nxne+8x1u3rx5t0P5tUKWZb7whS/w7W9/mz/7sz+72+H8SjB5vfvmlskkwL78QI+JmmDk+efXqVbb\nRCJ2BgYcLC9n0eslFEVlYsJDJGLHbJZxOg0YDBImk3yg4F0sNkkmq7RaCtvbRRwOPVNTXnYvraK1\nhlDkGBYDeP12FK0e28g4e/Euc6cOoYnVsbodZGIbDByZoOwxsbVTZenKOtZgCM/wIOevb3Ds0cNE\nV+KYlAIGsY3J76f/4YfJrKzQKpexhkK4hocRJYlyIkF0Pc3yUpa2qOfQVJiRUQ9m8we7Zz/u+Dj0\njPxb4Ad3O4gPgvChMCtDacqJJJVcHY3Hj28gyLkLCeZ/cglfwIbdbeHIsSc47AH/3ByZisjubgmn\ny4zVaqBQaDE+7iafl9HLGjLxIj6vzO3zN8gncvzm73+Kzd0ma9EyR46GODrrRapmGZ0dQCd1qezu\nHGhWADTyeXJraxS3txkI97O7a6Y7NEQtk2XyvjCnHpvG79LitQoY63Hg7VVvp9Uiv75OdmWFzNIS\nTcmC3hQk6O+nVBdIJCp0u1CrvZ16r1ZbVCotHI79Uo9jYADHm6O+q6tZzr+8fVAWSSar1Osdjh79\n4F3f/xyUSs0DIvJ2vG1yuQb9/e9dG31r3Hd1NYtz5AhTh4ax6rsMjPdiCfawcWGX2Vk/29sFMpk6\niqLi8ZiJx8v099tpt7vUai2GhpyMjLgZGnJh0GuZPz9POGzDZjegO9VPaXMFa08Q75Fxds+/QuL6\ndVweH3a9jUIygzvgQGs280//53fodjpMHokwnE7TMbk590qMQmG/ETcey7JwPsuxYRGhVSMwNcYD\nBj9do5NsoYPRJKO16FhcSGCyGBDbCgZNHosxg06vBUGgUGi86zzUah2q1fYHJiMarZbQ8eO4hobo\nNJvo7fZ7cnT3/fD973+f++67j3A4fLdD+bXj937v9/jGN77Bn/7pn96TWSDZZKLv9Gkyy8tU4nEM\nbjdd7xCvvr6fIYX9Z9GxYwEeemhfd6S/P0c2W+fKlV30eolDh1yMj3sOFh5vYd/vJkGx2MTp1GMw\nSMw8NM2d584x9dRZ1HaTIZ8T++Ag+Y6ZU9Y0ne072IYP4RsMIV6+juT04PGHkPy7YPdhGxhmO6/F\naW8xEdGSunKOhatF5G6d4PHjBI4cof9nfJAKW1uszW/y46eXqZbqCBqR3a00heIEZ85EDso89zI+\nCjLiA77zM/viwL8ATgCfBj7/Xn/4R3/0R9jfTPuOjo5y8uTJA9fM6JvNkx/ltlZuMXJkmOVlJ7mt\nKGarDkWB1c0KjmEf3VaZarHKylYd11AYsVSiUjFisegIBjvUyjVSqwVul/M88VuD+NwypoYGRTLw\nxo08bqPKS//7X3LkNz6FtldHb0+T4VE/4CcajZJZXYU3iUim2UTUaPCaTLQbDTKNBpr4Gg+eGGA3\nYSJT1BEaDtIjJGlFc6SqVZpShLcerdFolPzmJnI6zdpzz1EURVSjneTGbUbOnkFx+jAY2ggCmEwy\nzWYGALvdj04nvev8rK9vcPnyDp3Ofk/JW8evrekYG3OTTu99oPP9z4HB8O4eEUEAq/X9Rba0Wg0n\nToSIROzUam3M5n68XtPBze3xmIhE7ExNednc3Ff1zeVqWCw6kskqs7N+gkELer1EOGxDFAUiA24K\nyR52r88TX45jsJnwzz1AqijgyFRoVSoAlLej6JxO0LqJ35ingUwpld0/HysJ3F4rymr8gIjA/qov\nsb7F2OAEulICnatGq1hkatpPOtciNBDg+o04DqmO1WEnvbjBne0qoafGEItRtl95hdDQMWKxd54H\no1H6lVZYH6cGuA8Tf/VXf8Wf/Mmf3O0wPhI88MADdDodLl++zIkTJ+52OL8SDE4n4fvuO9g+dy56\nQETewu3baZ56apihIQd37qT5wQ+WmZkJ4HAYkGWBq1fjPPJI5OB4k0lLu60cTOQJgsCVK3tYzBLe\nwTHWby+AqlLT2NAEurQLuzRjq+xtpdld2uC+3zqLwWJg/eYqhWwJvaTSe3gGS7gXOV7CNCiz+L3v\nUskVUe1tmrub5Dc29pW9g8GDcrqqqhSiUTY3ClTfnHZTlS6VeJytTT/ZKS9e790pjX+Y+CjISBJ4\n+D32B4H/AHwOeE89229961vv+6E/a+X9UW1HIi38fhN37hgJBi1cu7ZHsdikKjmJ9EXwuvToLQb8\n4SH8fvP/x96bB8dxnnf+n5np6bnvA/c5IG6eIkXSlERZknXYa2WdxHGta53dcmJXnK2K7ewfm+Nn\nx5uy1055165N9pdNbbxR8nMSZ+P4ikRTVnR4TUqkSPEED5AEiPuYATD3PT3dvz8GHGEIEAQIgLjm\nU8UqooF+++1++3je5/0+z8PwcBibTUdtbQO3eifRO9UY7Hqa6ypx5ca48fqPyVprmTl7i1Q8BQpE\nhkeI6qtRtcglx3cbjdwOBJBSKapqmwhndQQULZVmN9WVcbKhGZTAIDU6LRXmFBZVslhvp8JioWlO\nroS62lrk/n5G+/rIZzKYAUGlRnYamLp1m5oPtmC1mti3L4goatBqjWg0KnbscOLxGFGpSq9PQ0MD\nFy+mSSQKD4tOV1jjlGUFSZKXfb0fhMpKE83NDm7dmkFRCoZIba2V6urFlw4ymcJLy2bT43YbUavf\nnx3u2OEkk8kXl0oEQUN9vRWDQYvBoEUUNezeXYl9jvIdQNKaSXo7sFbuII+Gm5M59NkgGY+dZM0+\nFMWFTY4Q67sO+hSOpgZ6Tlwu7p+Kp5ByeaRcaQ2JfDaLnJeR5cLLMReaotklEVNy+Bqc5EQtxmwQ\n0VRT0C3FYiiyQiyexaTVkksmcUhhamsdjI3FUBQwGrXs3l2JwbC0AnhbnVOnTjE+Ps5HP/rR9e7K\nQ0GlUvHv//2/56WXXtqUxojfH8fvLwhPq6osOJ2GeR5SKETRZbMylZVmtFoNLS2lhnR1tQWdTsBi\nETEatezc6eX69eni7yVJpqLCRG/vNCmPDcF3EEElE9RqMU8l2FGtIZcTcZgLHhaHTYvQXI1WyREO\nGggNDqGduIpYaeXyv5xi975awjMx6psqEJPjZAWB8MAA0fFxksEgplmdiJzPI6VSZHOl5yRLErl0\nriQ6cDOznss0XwK8wA9nf34BmO8/3mAYjSL19Xb6+kJMTsaLYkhZVhB1WgS9DpNJi8VSmGVWVVnw\n+ZyMjkYxGER0HiN791UhyGnUajVGtxtR1KLVashrNajUKux1tcSmZezm9z9umYxEEhMVBw4T9U/z\n9rsTTEwWdCl9/iB1dhuNtjyZSBhBq6X26FHSkQj5bBZBr8fT2VlMVgbArJhLlt6fPUjJBF6HDlOL\nndodTkSbnSNH6vD7C9EpXq+J2tqF0+oLgoamJgczM6VRFpWV5kU9E6uJIGg4fLiW+nob09PJWSPQ\nisl07xn/0FCYs2fHiEQyaLUamprsHDhQg15feDREUWDfvipisQzJZK7k/AyGQk4Wl2t+qJ3P52Rs\nLIY/kEZRJFwuAyaNk+M/Okc2m0fJ57EaRA4feQrRYCCtMhL7yYmCLlYBh8eKrcpLdZOHmyNjxZer\naDbjrHBgErLIskw6HMZaW4tNq0WRUsSMDky19WgsFpRUHEVRELQarFYROVRYbtPp1DzxRAN+f4Jc\nTsblMswL3d7OfPOb3+R3f/d3EbZQvpT78eu//uvs3r2bb3/72xg20XJbX1+QU6cKOZGg4AU9erSB\nxkYbQ0PhucVqcToNOJ364v9nNa9FqqrM7N5dgclUMEa0Wg1TU8liqvhoNENDg514PIfNpiMazaBS\nqTh4sAK3OkwyEkGobKAiFyMdDqNCpv7IEQx2O46JCWqbvKg0GpLxKZ7/+EH0ei0Exxm82ItaraK+\nppHs2MC8aESNIKB3OKivE7h5ZRxp1igRTWY81fZFM0pvJtbzafutdTz2spmZSZJM5jCZRDweE93d\nXi5enKSy0kRHh3tWR6EvzjLvfAC1Wg0+n4NUKksy6QUpSz6V4FZfmL1ddtpefJHw4CCJWJLxkQju\nHS1gdbG/SUdVfcEyHh2NFj+YFosWl8tDQp/H1fr+R9afEenY14xXncLk8RRTvOdSKdRa7bzsexqt\nFnNFBU6fj1B/fzFduWg0UNvRTHNnXTG1+0If24Vob3eRTGYZHo4iywoVFSb27at8qOvQOp1Ac7Oj\nUDr8PiQSWc6eHSMYLNjA+bxEb+80Ho+J9vZS9brFouOJJxo4c2aMYDCFRqPG53Pg8y18HIfDwDPP\nNDM5GSebzWM2i7xxrIdULImSl1FrBeKSjri5gb2Hfbzz6kV2vfgcoxevotfCzsNtVB19inRORVOT\nnZs3Z0ilJNx1XnY+Xo0w2Utao0FrNGJvaMDV3sHli+OMjcewehxcuzaFy6HD7HXja7BgUeJkZRmN\nKGKprsZs1mE2b64aMQ+DmzdvcuLECb773e+ud1ceKnV1dezfv58f/ehHfPKTn1zv7iyJTKYgvL5j\niEDBYLh6dYonnqhjZsbLhQsTRCIZamos7N1bWUxkVlNjxedzMjAQIp9XEEUNnZ0eKistJZ7R9nY3\nwWCKiYk4slyYwP3bf7uTbDaPJCm4XAakbJZX/r8rTA5MIuq1tHZU0FZnwVJZicHhwLv/IOJMHEEU\n0EpxUn4/gXCejCwQy75LJJQoFNQUPXTu3o3D55uXxdrd3k42e4VDR1u5fmUCWa2l+ZFOPvCB+ocW\nILDWbI2zWENkWeHixUmuX58ilZKKs+GdOwsx6IlElgMHqkmlJHI5Gadz/iwzFEoxPZ3i7NlxwuE0\nuXSGmmoTe55ooG5XNd7ubqoffZT4TJhUOg+ZBM7aSmx1dSSTWU6fHil+MNNpNVeuTBEO5/B63zdG\n8mgRrE68DaWhlYuJCl2trcWpwdT164gmE1V791Lz6KMPVGPGaBR57LEGQqFUwU3pMJQ82BuNcDhN\nNJot2aYoBePvbmMECvqR557zEQ6nEQQNdvvitY9MJhGfr+AKnpyMI4tGHE3NxCcnkfMSoslESmXC\nU+Ph0JOthCa97HlqH0a9mpzJy5nrCYLBQaLRDCaTlvZ2F/m8guCwUd9agZxJF8JpbTbGxqJcvDxF\nNpvH6TTw5JONSJJCZ1sr+uAAibFhjG43nq4uLNUPR1C8GfnmN7/Jb/3Wb2FaIK39Vuezn/0sf/qn\nf7ppjJFkMlcirr9DKJRCUVSIoobqaguVlRZA4fr1KZxOAyaTiF4v8NhjdbS0OEgmJWw2HV6vad77\nymbT8/TTTcVcJA6HocTTmslI/PSnw6QEGyp9jGQywZVr0zTteQxzRQWTkzFOnRolGEwVE0gePtzO\n7TcHiMdz7Hj+eQwWC9ODwzir3bT/m+eo2t017/2rt9loOPQo3rYQ+57ciUpnxOmxIAgbqxbYSigb\nI/dhYiLG5ct+stmC5yCRyHHhwgRer5HKSsuSXGSiVs2t3gBKPo9Wq0aWRQJBidGJJF27QGexoLNY\ncPkgG4+j1mqLlU2Dk9GSlObZbB6328jgYBiv9/0XZiEsbXlr/oJOR8WuXThbW/HF42gEAd1svo2V\ncCcuf6NT0MKUCl6BRYWcgqApJn5bDEVRyESjqLVaRKMRi0XEbBaRqqswejwo+TwanUhdkxuVSkVl\nSyPu+myhIq6o5/jxQuXkeDzL9evTZLN5otEsPp+Dd98dw/ohH7W17xtMk5Mx0qksao1mtuJyoYKp\nzmikqfMw2fhOBL3+nknMysCtW7f40Y9+xM2bN9e7K+vCL/3SL/E7v/M7XLlyhe7u7vXuzn0xm0Us\nFnGeQeL1mohEMvT0BIrvbSjUpmpoiNLWVnhuRFGYV+JiIURRoLrauuDvpqcThIJJ9DYbosVCPpNB\nURTGZyS6pTw9PQGmppLo9QKKojA0FKG62oJeryUQSDKaM+F44sN4jqQRjAaqH2nFcI9lZY1Wi9nr\nZfNLVRdm88cDrTHBYKrkhgbIZPLFF/5iKIrCTF8fmalJYpOThIdHMAtZvG49Dod+ntobCpqAPBr6\n+mZ4++1hpqYSJR/LXE5GFDXs2lWBRlMwGvT6e2sXloJWr8fkdqO32zdlaN+D4nYbaW52MPeULRZx\nSUs8i5EKhRh4801uHTvGrVdeYfLiRfQ6NXv2VGEyadFoBUSjvqineeedYS5fniSezKOzWEil8gSD\naSYmYoyMRBBFDSaTllCoMLvK5WRmZpJAobhf4OpVEsO3mbx0icjICPlc4b6SZQWtVo1aENDb7WVD\n5D58+ctf5otf/CLOLRohdD+0Wi2/+Zu/yV/8xV+sd1eWhFarYe/eqqI+T6UqPNOdnR7i8ey897ai\nsGBY+4OQlyQCV68SvNZD8MZ1IsPDZBMJwoNDBPsHiPhnOPlaD1JOornZjiwXkpR5vUZGRiK0tTnR\n6wVyOZnATBZ/TEN1vWdRfdtWp+wZuQ8Gg3ae0EmtVhUFjosRn5xkoref3r44wYkZ+nv99F8fY+eR\nbroeaaS2dr61nc/LnDkzRm/vNIpC0fOSSuWK0Q7pdI5nn20mlyskXrvjYtxOhsRqoFKp2L+/Grfb\nyNhYDJNJS1OTvcTjtFzkfJ6xM2eIDA0Vt42/9x6CwUBrWxtOp55QKI0gqJmZSXHy5DC52YiZW7eC\nPPVUU8GInUly61aQRCLL9HSKpiY7O3fayOcVVKpCBAwUihWOnTmD1V2Dy6FjcnAQJS9jb2ygqsq8\nonPZTly6dIm33nqLv/zLv1zvrqwrn/nMZ9i9ezff+MY3MK9TJuXlUF9vw2ptYXo6iVqtxus1YTYX\nqqsbDEKJnkSlWrr+7X4Eb95k9PRpdFYrDXVWLr7bjzAxgWA0kszrycQSnHzvKvbWdk6+W1geikYz\neL0mfumX2mhqciKKAoODYXI5mfp6Gw0NtlXp22albIzch6oqM9XVlqKiGqCmxkJl5f0f1MTUFONj\nUc79yzn27uwiGY0z5Y8yNTyB69mOeaFlAFNTCfr7Q0XjJxRK4fWasFp1pNMSVquOlhbn7DpomZWi\n0wm0tbmLrtuVkgoGSQQCJdsUWSbU34+7rQ2324TbbSIQiHPixPuGCBTcyKOjUfJ5BZ/Pid+fYHw8\nik6nQafT0N7uZmAgREWFmaoqC/lcjplbt1BkGSU0yZFDtQyN2QnHZbp2eWjr8JbDdZfIH/7hH/IH\nf/AHm+IDvJbU1dXxxBNP8Pd///d89rOfXe/uLAm73VBS4gEK+q6uLm8xI7MgqGlqsi84AVwucj5f\nLCiZDoVor63EavRx69o4lTubyaSyjF4bwF3l5Py5YSYmCmnnRVFDOJwmGCxE5NXUWKmpWXl/tgpl\nY+Q+mEwijz/ewOBgmGAwhdttoKHBvqSXvFqjIRJOE52OornWy2OHmlDpmjC7XTx6oBqbbb7bPJPJ\nl7gXFaVQuK2mxsLTTzev6rmVWX1UavWCHirVXYK0TCZfzG8yl1QqRzRayHJ75EgdkiSTzyuk0zkM\nBoFHH62hqclR0J9kMsXaMHIuB/4BWhwW9E12Gna6EE3lZZml8Pbbb9PT08MPfvCD9e7KhuBzn/sc\nv/d7v8dnPvOZTettValU7NlTSEgYiWQwGrVUVJgWLIexbBQFeU5NpuzUJA5J4pGaNK4dRr73Ug/Z\ndA5HtYdsJobLZUKjKYhXrVYdsqyQz8tbImvqalK+GkvAatWxa1cFTz7ZSHd3BRbL0kIi9Q4H9Z0N\n6C0mouEE10+cZ+DsZWKxNBbrwiJPi0U3T4iqUoHTuTruxTJri8HpxDI3nwug1mrnFaiyWHTzhLKF\ncTZQUWEinZaYmkoSiWSIxTLodALd3V52764s5m0RdLpCKv45H4xsLFYQ0y0SDZJLpYiOjRH3+5Hz\n85NDbScUReEP/uAP+MpXvoJOVw51hkLxvFgsxrvvvrveXVkRarWKigozra0uamutq2OIUKhM7vT5\nSp47tVqN0eMhn0riqPYgWsyoBYHqxsrZ5V8H9fU27PaCVqxsiMyn7BlZI4K3bzN+9ixWs41Hn32E\nS6dugEaksr2FQ8/uvedavtNZKB1/4cIEiUQOUSzkKamrK7vzNgMqlYqa/fsRjUbCQ0MIOh3ujg7s\nd2WYtdv17NtXxfnzE8RiWURRQ2OjnZoaK/m8wvh4rLhkYzRq2bu3akFPmqezE1mSCA8MoCgKtvp6\nKnbvvmf/YhMTjJ4+TToUQqXRYK2pofbQoXWrtLzevPbaawQCAT71qU+td1c2DGq1mv/wH/4D3/72\nt/k//+f/rHd3NiTu9nby2Syh27dRZBlrTQ2ujg7i4+N0+SKcHBhgpCdC12MHMJr12F1GVKpCpE9H\nx+osCW81NrIPTlGUzZnmNh2N0vfTn5KJRgEw1dSRFGxo7R48DVULxrPfTTCYmp0Ra/B4TNvCklap\nVGzWMV8IKVsItV0sZ0s4nCISySCKhXEWhMI4ZzLSbMVQCbtdf99w4my8kG11saq5+VyOvuPHiU9O\nlmyvPnCAqr17l3Fmq8t6jbssyxw4cIDf//3f51d/9Vcf+vE3MrFYjKamJs6cOUNz89osD2+F5z0b\nj6PIMjqrFUVRuP3GG0RHR8HqJaM2oNOqsVZVoLJ7UatVeL2mJRej3IrMLvst+PEre0bWgEwkQib2\nvuA1MTYCjGA1dVJZuePeO86hkLr4wfN1FGrC5IsZB8s8fATx/i+du8V3hTBuBZ1OoL5+6er6pXg2\n0pEI6XB43vbI0NC6GiPrxQ9+8ANUKhW/8iu/st5d2XBYLBZ+8zd/k29/+9v82Z/92Xp3Z8My97nL\nxuMkp6aQs1mYHkULyEA64af1ox9ddlVrSZJRFGXVlpc2Ouv5pfp14DcAHfC/gL9ax76sKhpRRCOK\nhQRWc9AaH47uY2AgxPXrUyQSOaqrLXR1eeapzctsLCRJ5ubNGW7dmkGSZJqaHHR0uFc1GkYjiggG\nA1K6NNeCuIg3ZasiSRJf+tKX+O///b9vWpHmWvM7v/M7dHd385WvfAWXy7Xe3dnwaEQRQacjO2ci\nCiDo9Wi0S3+OJUnm1q0Zbt6cIZeTaWqy09np2fKRcevp+/974CjwAeC317Efq47R7S5oBOa85HR2\nO/aGBqAQMTEwEKK3dxq/P76qrsrx8SgnTgwzOhojFEpz9eoU77wzSjY7P3KjzMbhTsEvvz/BzEyK\nc+fGuXRp8v47LpFMRmIyKJN0tSFX+BBtBa+LYDDgbmtbteNsFr7zne9QXV3Ns88+u95d2bBUV1fz\nr//1v+Z//I//sd5deahIUp7R0SjXr08xMhJBkpYm8hZ0OjydnajnGB4aUSxsW0bRxf7+IO+8U3gX\nBIMpzp2b4MKFiWWfx2ZjI0wJDMCrFAyTuWxazQgUIhYiQ0PExscRLRYcTU0Y3W6i0QwnTgwVCy/p\n9QL79lXS3V1x77ZyeYLBVDFpz2L6kVOnRujpKc1zodWqee65FqqrN/YMeCusIS+FZDJLJFKIkHE6\nDciywssv38DvT5T8ncUi8q/+VeuSo7cWO97Jk8MMD0eRchJyMkZHs4m2ei2W6mosVVUran+lPOxx\nj0ajtLW1cezYMfbt2/fQjrsZ6evr49ChQ/T29uJ2r67wciM+77lcnnffHeXmzSCSJCMIalpbnRw8\nWLuk5RJFlomMjBAeGkKlUuFobkZldRGPF5JW3q+elaIovPLKTSYm4iXbLRaRD394x4Ii9s3ERtaM\nfBn4DPD/rHM/Vh2twYC7vR13e3vJ9oGBUEkCtXRaoqcnQG2tbcEbNRhMcvr0GIFAApUKqqstHDxY\nWwzvvJt8Xp63TZaSh1IJAAAgAElEQVQVZHljPfTblaGhMGfPjhUjaFpanOzZU7ng+CgKqzJuw8NR\nhoYiKEoh943aYmc0LtBd34LFs/0ytP7Jn/wJzz77bNkQWQItLS382q/9Gl//+tf5b//tv613d9ac\nycl40RCBO8unQRoa7NTV3V/DpVKrsTc0FL3gN25Mc/FkH8lkDp1OoLPTw86d3ntOKBUF8vn5z/x2\neIc/DGOkAviHu7ZNAv8G+GPgG8AbwA+AEnPwC1/4AnZ7oZBRe3s7hw4donE2RHJwcBBg0/08PV24\nyTOZaQB0OjfJZI5bt/rxeEwlf68oCv39MqOj0eLfZzKFcvRVVfkF26+rs3PrVpB4PFBs3+k0kEgE\nGBwMrvv53+/nrUw8nuXdd8eK9TFyOZmengBer4nmZgfT08mSsgN3kiStlJmZ0nYBUilpwYqnW53h\n4WH+4i/+gkuXLq13VzYNX/7yl+nq6uJzn/scLS0t692dNSUazcwrnClJMpFIhrq65bU1M5PkvffG\nSSQKz1kul+XChQncbuM9M8Gq1Sp8PgdTU4l574L7eVU2O+u5TCMC2dk+/Bz4V8Bc5c+mXqa5Fxcu\nTHD27HjJNrNZ5IUXWuZVu41G0xw7dotYrLTMvcOh58UX29Dp5tuSsqzQ2zvNtWsBMpk8LpeRffsq\n8XpXN49EOJwiGs2g1ZaGpK6Ejei2XU1GRyO8+mr/vBlOW5uLQ4dquXzZz+3bIWRZobrawt69VSXG\nSCyWIRRKo9Go8HiMS46UunZtipMnh0u2GQwCzz3nW/X74kF4mOP+sY99jD179vBHf/RHD+V4W4X/\n+l//K6+++ir/8i//smqC3434vA8NhXn99dsl3gmNRsUzzzTT0HD/Cr9z6eub4c03B+dtP3Cgmr17\n7700mskUvOX9/UFkWaGqysK+fVVLmphkMhLT00nyeQWn07BoBfL1YKMu0/w+8CSFaJp/oNQQ2bI0\nNzsYGYnOClcLZew7Oz3zDBEoVKXUaud/5HU64Z4ff7VaRWenh6YmO9lswYuy2jlKbt2a4b33xuck\n67Jx8GDtlld7rxRB0CAI6nnVRI1GLTqdwIEDNXR0eJBlGau1dBY0MhLh1KlRIpE0Go2amhoLhw/X\nLekFVV9vo67OyuhoFEUpaIja2914ttkSzQ9/+EN6e3v5h3+421Fb5n584Qtf4Hvf+x4vvfQSn/70\np9e7O2tGVZUZn89Jf3+QfF5Bo1Hh8zmpqlq+0a7ValCrVfMmH/crsqrTCezfX017u3vBd8G9iETS\nvP32CBMTMWRZweHQc/hw3aapf7MRBKz3Ykt6RgASiSzj4zFSKQm320hlpfmeSdB6evycOTNWtNS1\nWjWPP96wYJG9h0Ekkub48T6i0ffDllUqePzxBtrbVyZw24gzpdUkn5f5xS+GuHUrWNxmsYg880zz\nooZBJiNx/PgtAoFkyfZHHqnikUeql3TsZDLLxEScRCKH02mgstK8Kt6s1eBhjPvMzAy7d+/me9/7\nHo8//viaHmurcuXKFT74wQ/y1ltv0d3dveL2Nurzns1KTEzEiUQyWK06qqrMC3qh70c6LfHWWwOM\njESL21wuA88807wmQtQzZ8a4eLE0Aq+y0swLL7RsmFwlG9Uzsm0xmUR27Fha3H5npwejUcvAQBiN\nRkVzs2NZybBWm1isUMRtLopSEH6t1BjZ6mg0ag4erMHrNTE6GsViKVRgvp+HIhrNEIlk5m0fHY0u\n2RgxGkV8vvUxYNcbWZb5d//u3/GJT3yibIisgO7ubr71rW/xy7/8y7zzzjurHl2zURBFYdlLMguh\n1wscOVJPf38Qvz+By2XA53OsiSGiKAojI5F52yORNNFoBpdr49c2KxsjGxyNRo3P57znh0RKp8lE\no2hNpkWLo60Wd8rZp1KleUtWQ2i5HTAaRbq6vHR1eRf9u7wkkQ6F0Igier0enU4gkyld3tnqgrbV\n4utf/zrT09P88Ic/XO+ubHo+9alPcf36dZ5//nnefPNNrNbNsQSwXlitukX1IauFSqXCbtczM5Mq\n2S6KmgWXhaRMhkwkgmAwLFpC4mFSNkY2MeHBQSbOnycbj6PR6fB2d+Pp7FzTjJJutxGfz8G1a9PF\ntVCn00Bj48pnEmUKxP1+xs6eJR0MotZq8XR20rbDzYVLgaLS32wWaW0tZ8W8Hy+99BLf+c53OHny\nJOIS0vOXuT9f+9rXCIfDPP/887zyyis4ndvT47bR6OhwMzkZL0bvaLVqOjs9mEyl931kZITxc+fI\nRqPFpGyerq5Fa2g9DMqakU1KKhym7/jxktTDGlGk+UMfwlpTs6bHzmYlhocjjIxEsdv1NDbaFxTg\nLpeNuoZ8LxKJLFqtelXr/0iZDH2vvkrC7y9uU6nVNHzwKeKCk/HxGKKoob7etmUEqGs17n/+53/O\n1772Nd58803atmGW2bVElmX+03/6Txw7dozjx4/TMJtXYzlstud9vUilcqhUqvsKXwECgTgjI1Gy\n2Tw1NRZqa20lesRMLMatn/6UTOT9JR21VkvTU08Vc6OsJWXNyBYkHQzOq4GQz2aJT06uuTEyNZXk\n9u0w6bSEJMm43cZVMUY2C5FImkuX/ExMxNBqNbS3u2lrc61K1FIqGCQdCpVsU2SZcH8fvmefXZW1\n7K1OIpHgP/7H/8ibb77JiRMn1qzq7HZGrVbzzW9+k9raWg4ePMhLL73ECy+8sN7d2lIkk1l6egIM\nDoZRq1W0tDjp7PQsKqb1es2LhuunQqFiNfk7yLkcsYmJh2KMLMbGkNOXWTZqQUClnj98mjV2RYfD\nad5+e5jBwTCTk3EGBsKcODHM9HTi/jtvAfJ5mdOnR+ntnSYSyTA9neT06VEGB+dXw30Q1IKAagF3\nqUZf1ofcj1wux9/+7d/S3d1NIpHg7NmzZUNkjfn85z/P97//fT772c/yxS9+kVhsW2RoeChcuuTn\n0iU/kUghv9B7743T2zu9ojbVGs2C342lVBhfa8rGyCbF6PFgqqws2aaz2R6CVyRBOFwa2RGPZ+fV\nVdmqzMyk5p2rJMncvh26xx7Lw+hyYbsr1aOg1+Msf1SLyLLMxMQEp06d4nvf+x5f//rX+fSnP01t\nbS1/+Zd/yUsvvcR3v/tdbLb1izrbTjz++OOcP3+eUChER0cH3/nOd0jfVRm6zPKIxTIMDZVGxyhK\noYjeUgv3LYTR48Fy1zdCNJuxLje97BpQXqbZpGgNBuofe4zgzZvExsfRO52429owlMVka8paV5tX\nqdVUP/ooerud8NAQWpMJd2sr1tratT3wBkJRFILBIP39/dy+fZvBwcHiv4GBAYaHh7FarTQ2Nhb/\nHThwgD/8wz/E5/Otd/e3JR6Ph7/+67/m7bff5mtf+xpf+tKX+MQnPsHHPvYxDh48iL7s2VsVVhqc\nIIgi9R/4ADNuN9HRUXR2O+62Nkwezyr18MEpC1jLLItIJM3PftZX4h0xm0Wee8634lj2zSBoy+dl\n3njjNoOD789aBEHNk0820tzsWMeebV7ujPt3v/tdvvWtb3H79m1UKhU+n4+mpiaam5tLDI+GhgZM\nDyGMvcyD09vby/e//31+8pOfcO3aNbq6uti/fz/f+MY3ih6rzfC8ryd3V2BXqeDgwVp27bp3hfeN\nzmIC1rIxUmbZTEzEuHzZTyiUxmoV2bmzYkkVLe/HZnk5RSJpLl/2FyNb2tvdtLaujoB1O3Jn3Pv7\n+wmFQjQ3N5fDRbcQyWSS8+fPc+7cOX77t38brbZQNmKzPO/rRTKZ5cqVqVkBK7S0uOjsdK9q9N7D\nZlNG0xw9enRN82WU2XiUx3x7Uh737cMXvvCF4v/L474tmZ8mdpaNfCcs2zMyODhYLEm/1mynY+Ul\niduvvUZ0dLS4TS0IND75JI4VCCvvPtZSZkqrfS1Ws70Hacvf08Po6dPMrRfubm+n4Ykn1r1vD6Mt\nePAZ8mr1YyO1s1p9Of/GGyi3b5feV52dNDz22EPvz73auNe4r/SY5f037v6LeUbKfuUy9yU1M0Mi\nECjZJksS4cHB9enQFkHO5wn29ZV8MACio6OkI/ecQJQpsyiyJBEdG5t/X42MkL4rx0SZMhuFjeAZ\n+SLwy8DdFazKmpENQtzvp+/VV8lnSkN6nTt20PTBD67acbbbGrKcz3Pz5ZfnGXqixcKOj3wE/Tap\n+7Hdxn2tkSWJGy+/THJqqmS7aLHQ+pGPoNsg91V53LcfG9kzogN2A+U7cgNjdLkw35XTRK3V4mhq\nWqcebQ3UGg3OHTvmJSGy1ddvG0OkzOqjFgRcra3z7it7Y+OGMUTKlLmb9TZGfgP4G1bJQzP4EJcN\nHtaxFEXh2rWbZDLS/f94FVjovNSCQO2hQ3g6O9Hb7ZgqKqg7cgTbCtMHP8g1XO3rvprtLdZWIpEl\nkcjO2+5qbaXm4EGMbjd6h4PKPXuo3Lv3ofZtPdtaCavVj4fRTiqVIx6fP/5r1ZeYTkfNwYMYXK7C\nfbV3LxW7dy+7ndXoz3LbWOkxy/tvzv3XM5pGCxwF/nwd+7ChCQZTXLgwweDgKNeu5ejo8NDe7l6X\nEFK9zUb9Y4+RS6VQa7VohA0biLWhmFtfAqCpyc7OnRUYDIXwRo1WS8XOnbja2kBREHS69exumVUm\nm5W4cmWKvr4gsqxQW2tlz54KzOa1HWeNIJTvqzKbivXUjHwamAF+ApxgAc3I5z//eez2QmGw9vZ2\nDh06VFTp3rG+turP/f39nD07QTRaKECXyUyj0ah5/vn9NDc71r1/a/FzU1PTlltDPnNmjIsXJ4s/\nq1Swd28V+/dXr2OvNhZbWTtw5YqfU6dGS7SkbW0unniiYduHtW7lcZ9LPp9HluVifpXtzEZNevYN\nYA8FvchB4EvA/zvn99tawOr3xzl+vI9strQOQUuLg6ee2pp1SrbayymVyvHyyzcJh0vrdDgcej76\n0bYllQTfDmy1cb+DLCv88z/fIBAorWVkNot85CM7sNm2d4r0rTruczl37hwvvvgi6XSaH//4xzz+\n+N1z7u3FRhWw/h7wPPACcIVSQ+SB2GqakTsTp0xmes62tbUfN/o13KyakbmoVEurcbNRdR5lzcjS\n21lonBcb+81wTmvdxmbVPNy9fzwe5+Mf/zjf+ta3+O53v8unPvUpUqnUQzv+Ztt/vQWsd3hivTuw\n0XC5jFRWmku2abVqmprWrv5JNpEg7vcTGRkht4SHpsziGAxafD5HycdHpSqkddbp1scrkonFCA8N\nERkdRborVLvM6qJWq2bLBJRaH/X1Nmw2PVI6TWRkhPDQEJlYbJ16WWat+J//83+yf/9+PvGJT/Dh\nD3+Y7u5u/u7v/m69u7Vh2ciLltt6mQYKNVCuXAkwNhZDp9PQ2emhpcW5Jt6R2MQEw2+/TToUQqVS\nYfJ6qTtyBKPLterHuhdb0W2byUhcuzbF7dshAHw+Jx0d7nUxRqKjo4ycOkU6HEalVmOqqKD+yBEM\njvUt8LcVx/0OkpTnxo0Zbt6cQZJkGhvtdHV5UGUSDL/9Ngm/H0WW0dvt1B0+vK2qM2/lcc/lctTV\n1fH666/T3d0NwLFjx/jqV7/KqVOn1rl368dG1Yzcj21vjNwhnZYQBBWCoFmT9mVJou+114jNpnvX\nmkxoDQbMVVXUHjr00IR2W/nllM0WQrOXU+RKzufJxuMIev2KoyGkbJb+V18lPjlZst27cyd1hw+v\nqO2VspXH/Q65XB5ZVopG6Mg77xC4cqXkb8yVlbS88AKaFQgdFUUhE4uh0WrRGgwr6vNas5XH/dVX\nX+U//+f/XGJ45HI5vF4v169fp/KuvE3bhY2qGVl1Nrre4UGZnBxdM0MEIBuPkwkXQk9jOh35bJbR\n06e58fLL3H7jjTVLTb6dNCOiKCzLEIn7/Zz6wQ+4dewYt44dY7q3d0Uv7r7r1xccx9j4OHI+v8Ae\n96asGVl+O1qtpmiIyPk8sYmJeX+TjkS4de3aAx8/FQwy8Oab3Dp2jJP/+I/4L19GllaWn6isGXmw\n/b///e/z8Y9/vGS7Vqvlueee49ixY2t+/M24/5YyRso8GILBgKDXoxFF5FyOwZ//nNj4OLl4nFBf\nH2NnzqDI8np3c9uQTSYZeecd4n4/2Xic5PQ0o6dPlxQqXC4anQ5BPz96Q2+3o9asnaFbZj5qjQad\nzTZvu6DXo3lAD1hekhg9fZpQfz/ZWIxsNMrY2bOEBgZW2t0yy0SSJH784x/zq7/6q/N+99xzz/HG\nG2+sQ682PuVlmjIATN+8yUxvL5MXLzJz8yYaUcTV2orB6UQwGNjx4Q+vuX5kK7ttl0NkdJT+V1+d\nZwB6d+2i7tChB2536to1Rt99FzmXAwrLcY1PPom1pmZF/V0p23Hco2NjDP785+QShbBfjShS8+ij\neDo7H6i9eCBA3/Hj8+pH2Rsb8T377Ir7uxZs1XFfaInmDrdu3eLpp59meHh4HXq2/iy2TFNOdFAG\nANeOHejtdhJTU0jZLAa7vVjHQqVWLy0WtcyqoLpH7O/dtUaWi7ujA53VSmxiArUgYK2txeTxrKjN\nMg+GtaYG37PPEh0dRZYkLFVVWFZgFKpUqgW1Xaqy1+uhs9ASzR1aWlrIZDIMDw9TX1//kHu2sdlS\nyzRbVTPycHKaqDB7vWja2nA2N5cU1LLW1q5JxMV20owsB6PbjcnrZXrOLFcwGLDV1T1wm4ODg6hU\nKqy1tdQcOEDV3r0PbIiUNSOr047J46Fq715qDhzAWluLSqV64L4YnM4SY2Y6kykUs2xeWYLEsmZk\neeRyOc6ePbvgEg0U3rNHjhzh5MmTa3L8zbx/2TNSpgRLVRVWt5vpGzfIZ7PYGxrwdHVt+9TVDxNB\np6P+yBFCsow+m0VrMuHt6sJSVbXeXSuzQVFrNNQ8+iiiyURkZARDNkv9o49iny23UObh8MYbb1BX\nV7eo1+Pw4cO8++67fPKTn3yIPdv4rOcXpgv4X0AeuAp87q7flzUjSyQvSaRmZoDCrHo1BImyJCHL\nMoIorritpbLV1pBToRBSOo3OakU0mR6oDSmTQaPVrniJZiOzlcZdliSSwSAoCgaXa10KSkrZLGqN\nZsMLk7fSuN/hN37jN+jq6uJ3f/d37/k3r7/+On/8x3/ML37xi4fYs43BRs0zIgB34s7+Cvgz4MKc\n35eNkSWQCoUYPX2aRCAAUMgNcvAg+lm1fnxyksjICHI+j7WmpugO3ohs1pdTXpKIjowQm5hAazBg\na2ggPDDAzI0bSJkMosVC9b59K3aZb1U267jPJZdKERkeZvjkSVIzM+jtdqz19dQePLjuSeU2Klth\n3OeSy+Woqqri/Pnzi3pGZmZmaG5uJhQKod7Ck4yF2Kh5RuYGwBuA8Eob3Go6jvsdS1EUJi9eJDoy\nQj6TIZ/JEBkcLCZTioyMcPv115m8cIHA5cvcfv11pq9ff39/WSYbj5Ofk4tgI5zXau+z1u1NXrjA\nwJtvcv3cOSbOnWPk1CmGT54kG48j53Kkg0HGzpwhGQoVti0xr8dG1LOsdlsrYaNoRqRslskLFzjz\n058y+NZb+C9fZvLyZYK3buG/dGnBfbLJ5ILp+DfKOa1mO9tFM/LGG2+wY8cO5PukQXC5XNhsNgbu\nEXa9Wc9/pfuvt2bkReBrwHtAOSB+mWTj8XkZNQFiY2PkUimmrl0jl0wWt8u5HIFr17A3NZGJRvFf\nukQqGCxoErq7cTQ1PczubwlSwSAzN2+izBoYgsFAdGiI+MREiSckPDTE5PnzJKemEK1WKnft2lap\nv7cyCb+fuN9Pwu/nzsJIJhIhHQ4Tn5wkG48jmgt1prLxOP6eHiLDw6g0Glw7duDp7FxR1tUyG4PF\nomjuZs+ePVy4cAGfz7fGvdo8bBR//Z8CLwP/Mmeb8vnPfx673Q5Ae3s7hw4donFWkHXH+trOP+ez\nWdKXL5MOBouRF26dDlNFBUJHB6OnTmGZ3X7n91VOJ01PPcW5114jE43ink2yFAJq9u+nY9++dTuf\npqamTee2jY2P0/eznxVzdwh6PXI+z+TFi3i7uoBCAcJgfz/NTz9NcmoKANFioeW55zA4nevW943C\nZnfXz9y8ib+nB/+lS4Ru3y5utzc2UrVvHzs+8pFiOv+hX/yC6d7e4t+o1GpqDx8u3ivbic0+7nNZ\n6hLNHf7oj/6IfD7PV7/61YfQu43DRtWMiEB29v9fBU4Bc/PkljUjSyBw9Sqjp08XZ+ZqrZa6I0dw\nt7YydvYs0dkKvHeSK1lqaqjYuZO+n/0M7rq+1bMhn+vFZnw5ZRMJbv30p6RDoeI2o9dLZHgY7WzG\n07jfj6DT4fD5ikJjgPrHH8fT0QEUlswy8TiCTrfiOjSbjc047gBSOk0+l0PKZBh4802UfJ6BN95A\nSqdRaTR4d+2i9YUXionM0pEIt44dIxuPl7Rj8nppe/HFLS1SXojNOu4LsViis4X40Y9+xP/+3/+b\nV155ZY17trHYqJqR54GfA/8XqAWOr7TBja53WO1j5XM5dHY7no4O9A4HtoYGGp98EktlJUMnThDo\n6SFw7VohIkOvR6XR4OnqWjSB2YOe18xMkqGhMH5/HFle2gtmK2hGRJOJ6kceQWe1Mp3NotZqMTgc\ndP7Kr+DduRNbfT31jz2Gq7WVVDBYsq8kyYyNRRi9McSNV39WUocmPjnJ+ddfZ/rGjVUpLz/3PBVF\nIR4IEB4eJjE9vaK2Vpt0JMJ0by/+y5eJTU4u+rFaL12ELEn4e3q48cor3HzlFQKXL+Pp6CCkKPie\ne47q/ftpfvpp2l98EVdb2/s7qlToHQ6MHg9Grxf9HWHrnOdxKX1JJLKMjEQYHY2QySxce6asGXm4\n+3//+9/n137t15a8f3d3N1evXl2142+F/ddTM/LPs//KLJO8JJGJRglcvkywvx+9zYbOakVrNqOz\n2Zg4f56ZmzeBwsdy+vp16j7wATSiyOTFi1Ts3Im5oqJEbyIYDFiqq8mkUsvqi6Io9PQEuHzZTzKZ\nQ6fT0NrqYv/+arTajR1auFIysRj5XA5bQwMGlwt6e2moq8Po9aIRhGKa9VQkQv/x4yWeKEmjZyAA\nwb5hohdOkJyaosXnwGLVc/2HP8Tb3c3U8DDK7dsYvV4ajx5dVlSGLEkkZ2ZQZLnQt1nyuVzh/rhx\nAymdRms04u3upmL37nWPskpMTTH0f/9v0WgT9HpqHn0Ud3v7uvbrbkIDA4V6TbPeyGAsRj6bxdvV\nRaXTidZoxOhwzPN0JPx+Ji9cYOr6dbQGA+7OTuyNjTh37FiyV2R8NMjVS2Mo6SRqjYbbdifdu6pw\nOjd2hd6tTC6X4yc/+Qlf+cpXlrxPU1MTk5OTJBIJTA8Y9r/V2CiakYUoL9MsQHxykvFz51AUhf7X\nXsPd3o5KrWby/HlyqRStL75IYmKiWBQtPDhIZHgYh89Hxa5dpGZmMHq91B0+jP/yZSLDw8i5HA6f\nD2t9Pba6OkSjccn9CQTi/Oxn/aRS78/QNBoVzzzTTEODfVnntlnctnlJInD5MjM3byJLEka3m6pH\nHlk0o2l0bIxATw/pcBjRaiVuquXKkEKNJcO1fz5GPifhchvZ0WQm0NND1b596Gy24vKaZ+dODE4n\nkcFBdFYrzpaWex4vE40ycuoU8clJFFnG6HZTd/gwRreb8PAwA6+/XlLNVdDr8T33HOaKCrLJJNlY\nDK3RiM5iWd0Ldw/ujPvwyZNM3VW1Vu9wsOPDH37gPC0PSjYeJ5tIIJrN847d/9prhO+a/Ql6PS3P\nP4/J612wvXQkQt/x40THxwn19REbH0djMPDIZz5Dw+OP31fAqigKM7cH6XlvgJuv/5zg2CQmt5vq\n7nbannqcnfs3n/h8szzv92O5SzR32L17N3/1V3/FI488skY923iUa9NsEXKpFCOnTpGcnkZvtxc+\nVIrC9X/6J2RJQqXREB8fJzoygqW2FtFoJDWrZZg788pGo6jUaqofeQQ5nycXjxMdHSU8MICzpYWG\nJ55AvcRkTdFopsQQAcjnFaamkss2RjYL4YEBJs6fLxayiwwPk8/l8D333D2TxFlrajBXViKl00ho\n+OnxftLpNFiLDyjxeJZ0Kks+lytx3Sv5PP5LlzC6XEVtSmR4mOann8bods87VuDqVSJDQ8Wf4xMT\nTFy4QPPTTxMbHycdiaA1GovHldJpMpEImViMyQsXyCWTCHo93u5uPJ2dK/aYKLJMXpLum0AvucCS\nUS6ZJJdIPDRjRFEUpq9fx9/TU/QcVe7ejau1FVmSUODey5yLXKdMNEoqFCIyKzw3V1YCEOrvp+bA\ngRJjJB2JoMgyeru9eO1jExNMD44xeuYso5cLAth0LEkuI2HzOGjbVYcoll/n68HcJZrl0NXVxdWr\nV7eVMbIYW0oxtRF0HGt5rFQwWPgYKQpqrRZ7YyOJqSlkSUJvt2Opri4YKg4H0uyMWms0otHrqdi9\nu6BncLnQGo0Iej2hwUGiw8Okw2G0JhMGl4vh8XESsxEfS0Gv1yIIpbeRSgUWy/0zt25WzUh8chLR\nYkEz5+Oampnh1mx+l3uh1mgQTSZEnYhOV/hwJGQD7oZqAERRg2jQFeqMVFUxMWt4ZOJxTB4Pgl6P\nweXC6PGg1mqJ+f3zjiFls0RHRwGQ83mSwSDR0VFuXLrE5OXLxCcnCVy9ysyNG+Rml+RUGg2yLDN2\n5gzpUIh8JkMmEmH8vfeKS3mKopCYmiIyMsKNy5eXfK3CQ0P0vfoqN37yk0JCsDlC37u584Gei2g2\nI97DQ7MWuoi438/Y2bNkIhHymQzpUIiR06eZuHSJGy+/zM1jxxDNZu6ez1uqq/FHo/c8hqDTFXLO\nhMNI6TS5VAqVIGCtqysYP5cvEx4c5PxbbzH8zjuMvPMOo6dPk5ltMz4xgVrOkJx+/9mUs1nymSwp\n/wRqSnNbrKVmRMpkiI6OEh0bWzBXylLaWM2/X8/97yzRzK1Fs9T9Ozs7uXaXJ3C5x1+Izbp/2ZTe\nRKgEAaPbjXU7H6wAACAASURBVJzPo7NaafzgB/Ffvoy9qYmE38/wyZP0v/46HR/7GI1PPgmKQuWe\nPRhdLqR0mvDgILKiULl3LwogJZNFwWWwr4/E1BRpm410KISlqgpJyjM4GGFoKIwoamhqclBbay3p\nU0WFicZGO/39waIkorLSTE2NdV7/twKBQILeSYGpES3VVQ481jSJkQES09NobTZGczmcO3ZgXCRk\nVxDU7Gm3YJJCJOJxnG3taPVa3CYZd1MlDYcPER0bA0VBpVbj3bmT5NQUI++8g2AwYK+vR6XRcPu1\n15AzGdwdHWgNBc2ARhDQGo2kZmYIDw4WlmryeTJuN8G+PsxVVTQePUpsfLzgAZltT6PVkrsryiOf\nyZAIBDB5PExcuMB0b2/hPgLsKhXe7u5FvSaxiQmGfvELpFmjJx0KkY5EaH7mmQUjhlxtbYV8HYEA\nKApak4nKPXuK5/YwSAYC5O/6wIb6+hB0OrLxOEo+Tz6dxt3aSjoSIZ/NYq2pwdnayuDAAHGzGZVG\nQ/DmTVLhMJaqKpwtLRg9Hhw+H+PnzqERRTSiiKejg7EzZ0hMTeH0+ZBSKdI2G/FQiFQwiNPnQ63R\nUH3gAGpBQEknqa134b89QS5b8J6JeoHqBvc9084risLoaJTBwTB6LXhMORwOfcG4XWAMYuPjhXtP\npSpqnkquz/Q0I++8Q2JqCpVKhdHjoe4DH8A4R5e0nbiT6KzuAYpYdnZ28td//der36lNypYyRu7k\nrNiKx8pEo0yeP8/w7Lpkxc6daEQRc2UlVfv3c/K//BfUajX5TIZkIEByaorWj34UtSBw7R//kZvH\nj5NLJPC0t6O32VAB5ooKDNPTjL77LtGREQCEYJDRM2ewVFdz7XaKCxcmyOcLVsbAQJijRxtKll+0\nWg2HD9dSW2tlaiqB3a6nvt6G2Xx/z8iDXMPVvu7LaW9qKs61cwPIiShGTZax4RAhmxF3Iomo12OV\nJPyXLhEbG6P5mWdKKh/PZbq3l+DZsySv9pFISFh3ddPw2B5M2hze7m5EoxH71BTeUAjBYCA0MMDw\n22+Tz2RQqdUMvvkmjU8+ibWmhvFz55CyWeoOHQIKy3Hujg6C/f0k/H6UfB61KNLs85GYnGTi3Dly\nyYL40d3eTt1jjxU8bH5/waV11xq+oNMRm5gg0NNT1JnYgcmLFzFVVGC+h0YCCktJ0l2C6ITfT3J6\nesEPncHhoPmZZ0gEAsi5XMELtMhHbrXuhbntaO76QOclieT0dKHOy+y1yUQiCDodTR/6EBpBIBUK\nMfyLX5ANh7n+3nsoilLwUgYCxEZHSQQCNH7wg9Q8+ij5XI70zAzZVAqjx0Pc78fs8aAzm0mFQkyf\nPk3t4cMkZo0yQa/H3dGBtaaG0MAADV1NZMMhZoIJRIOehu5mfIf2zhPA3jmngYEQJ04MU2mH8cvv\ncXZknPp6Gzt2NRWMiDnLfMG+vuJ9BjB9/Tr1jz1W/L2iKExeuvS+t4yCx8Z/+TKNTz55T8N0ueO0\n0nF9mPv/0z/907xEZ0vd/84yzUqOvxCbdf8tZYxsZQJXr5IKBqnavZtUJMKlv/kbUKmw1dVha2xk\n/+c+x+ipUziam8nG4wydOEHj0aOEh4cZfOut4qw3cOUKKpUKS1UVgl6P3ukszHI0GgS9HntDA3I2\ny8z4DDdvpoqGCEA6LdHbOz1PC2IwaGltddHaurVnR/5bgwy8/hoRf5BMIoHFbsb66AHM3QcwyfHi\n8lZyeprY+PiCxkhyZobxc+cK+V/GBzAIArHzQTxOkel4HEtlJWJ9PSaPB5PHg5TJMHr6NPb6ehKB\nAMH+flCpSAQCVOzdSyYcJjoyQqarqyg4dTQ10fDEEyDLyPk8tro68rkcPX/3dxicTjSiiKIoTF2/\nTuNTTxUEyxUV8yKsDC4X5upqQv39JYJXACmVIhMOL2qMKAukxVZkeVHRomgyIa5jJmBLVRUGl+v9\nfDCKgrWuDpVGU3I+UiaDavY8Rk+fLupdYhMThG/fpvlDH0IwGJBSKaKjoyQDAay1tXh37WLy4kWk\nXI7pa9foO34ctVqN0evF2dyM0e1GEEX0DgdqjaZQVyqXw+h2U3vwIKGBAbosFjKRCEaPB3d7O7Z7\nzMplWeH69Wn0eoHkwFUmbhSSXI+PhHDZtQjGizQ//TQqlYp8Lkfg6tUSr5A0m8XZVleHWhDIJRLF\npH1zSQQC5FKpZQnftwK5XI4f//jHfPnLX36g/X0+H+Pj4ySTSYzb7NotRFkzsgmOdfPaNeJ+P9HR\nUfKSRGR4GI0oImcyoFIR6Okh4fej1mgIXL1KqK8Pg8uFzmYjNjqKLMsokoQiSaAoxCYmgMIL1VJV\nhberC293N97ubpKzH7RMTiaXK62hYjRq0WhU3LgxTX9/kHg8O6+vy2EzaUbyksT4+YtE/IWwU53J\nRDankJ4OoLPZSQQCTKfTxb+/11p6JhIhl0iQz+UKws5slmwshqIoiGYzY2fPMnHhAqlQqNA3lQqV\nWo3R7cbh82F0uzF5vYVMr2qRpLGaGbWHqZl08SOvUqkwV1Zira/HWltLNpHAH40WtUGyJCFLElqD\ngUwkAoBoNFL/xBNU7t2LuaoK786dNB49iv5OxeE5s97pTKZovC6GtbYW9V1RIganc9Vc+muhi9BZ\nrTQePYp3507MVVXUHDhAy/PPz8sRY6uvRzSbSYfDxWs4nckUM/GGh4aKy0tKPl805lzNzVi8Xowu\nF5HR0WKhtGw8zvj58whtbZirq9FZragFAYfPRzIUInD1KoJeT/0HPkDL88+z85OfxPehDy1oiEhS\nnrNnr3L9+hQajRqPQ0t0bHzO72UkSSE5NVVMwCal0yWlI+4w5vcXBNUUvEaaBcZcazCU6KcWu75L\nYbNoHt566y18Pt+8jKtL3V8QBFpaWuidk5F3Ofvfi826f9kzssFJR6OFglv//M9MXb1K49NPM/jW\nW9jq64uuY4PdjkarRWs0ks5I4KjEe+goWZ0No9eLwekkNTNTzItgqqhAMBgwOBzFpZ7sncRamUwh\n90SNE+eowthYYbtOp0EUNVy4MMnt22FAwWbTc/RoI5WV5nW6Og+PXDKJWkoSjRWih0RRg8kkIqfi\n6Mky1/TQiOI9w241ooh6VtehFgRkScJaV0dqeprx8+dxzmZpDfX3o2lrQxBFnC0tjIVCmLxe3I8c\nJnjjGt7Dj3HynXF6z93C3dnFYMLBnj0yu3ZVAGDyeLDW1jJ19SqyJCGazVTu24eg06HWaDB6PJi8\n3pIoFYPdTs2BA/P6bKmunuc1sdbVYaqoWPSaWWtrqTt8mMDVq0jpNEaXi8q9e5etAclmJTKZPCaT\niFq99tkIjG53yfJFLpVCmi1CqSgKtvp6KnbtAgrLWBqttuhR0FmtqDQadGZzsSCiaLGgt7/vTbT7\nfISHhhBmn71UJEpOrUdjVKN3uRk7f4HJs2cKOp50GpVKhUqlInD1Ko1PPrmoNyqblXj33TF6esYR\nhDRjY1Gammx4m2sJTRS8N2aziEEvFPo+a0SIJhNGl+v998AsOru9aHRqtFq8XV2MhMPks4WJiEan\nK9TWWWL03VZioSWa5dLV1cW1a9fYN1uGYzuznnfQQeBbgAycBf5/9t4sSK4zPc98zsl937fKqsra\nV1QV9o0Ed7JpqZvdklrutmSHbMdceUZjxcT4YkK3czXhC4U9F57whT0jta3o1S31xuZOgiR2oFZU\nFWqvyn3fM0+ezDMXWUiyCBAECJAEW3ojEMFM5n/+c/6Tlef7v+/93vd/e9gD/i5yRsqJBK21NVRa\nLZr9VN7t4ME9Pk6r0UCWJEJPP43W4SIXjmH0BSjqfPz2tS1OjIwQPHkSlUZDMRZDb7Mx+s1vYuvu\nxuT1IpVKBI4doxiJUEkkCOl0bVE0t4vjx3XI8h6ZTBWn08DKShqdTk2pJLG2liEeL7G3V+D55/s5\ndMiLWv1gImePE2ekuu+oq9brMbrdd9a/1VoErRGTSUuhUKdcllCrBYam+nGFuqjF9/CWy52WWHMg\ncGB4U5Ypx+M0ZRmDy0VTkrAEg5SiUZxDQ6Rv3cLkdqPfL+3ENiPY9B4uVffo8vXgPmXk6ocbrK3m\nsfueAKWLre01TP4AaoOBcrHG/Hyc7m4rTqehTXw8dgxrMEi9UEBjMpF0u8nv7HSIsSafD8tduBu3\noSgKtVwORVEIPf00+Z0datksvR4Ptt7ez5StFwQB99gY9r4+5HodrdncDqDvE4qisLaWYXExQbUq\n43IZOXzYh9drPnDvHhafdRyNwUDP6dNt/xhFOVB+09vtOIeHic/O4tbpUDQafNPTdJ85Q6Naxej1\n4hocpF4sUs1k0DudWPx+3GNjhC9dwjE8jKqsUKk2sPk9eFw+Lq2/g713ALVOQ6MlEJ5bZPDZZyju\nbpNeWblnMBKNllhdTaNWtwnUTqeR1dUMA0+PYppfRS00CYXsqHRaPBMTnXsoiCK+mRmkUqmTBTK4\n3Yw/+eSBvwXn0BBqvZ787i7Cfpn4Xt+h+1nfh/38VzFelmV+9rOfcfny5Yeaf2Ji4g7eyNfh+r+I\n8V9lMLIFPEvbn+ZvgEPAvXsjfwfx8dT63SCVSjQlCVGrZeSVV1DpdOhtNuJzcyAI1AoFgidO4J2a\n4tqrF8hnS8jLMdRGE96xw6zEdJz9/VcIHDlCvVTC2tWFtbcXrclEfG6O1M2bNBsNtBYLgWPHsPb0\ndPQgfD4zL788RCZTpVZrkMlUKZUkZmcTLC+nkGWFGzfi1GrtneupU19PF9rE4iLx2Vmk/WDCNTJC\n4OjRA9oPqUwdfAP4euLYbDpQwOJx4JqYxNHXh8njQSoWURuNtCSJ8KVL1PN5zIEAlmCQ+Oxsu5tJ\nlrEGg7hGR3EOD7eDTJOpTSAWRaRymZqiZXU1jV8bo1p0Eg4XqddlMpKVptZKXRaYW0iCtx9dq0gt\nn6fVkKhU1JTLUkeNU1SpMAe6OmRRo8tFbmuLciKB3uHA0df3qcJmUrlM9OpV8ru7oCiYAwG6TpzA\nNzX1wOur1us/s6RzN0QiRT74YJd6vZ1hKBYlymWJb3xjEKPxswnSjxqftlb+w4cxOBzkd3c7gUr0\n2jWq6TSO4WFyGg2F3V2kYhGtxUL3qVN4p6YY/da32Lq6QDydotXU4vSFCG9naNqC6L1aYtevk9na\nwRLw4RwexeL2HPA2uhvy+foBnpfBoKanx47R5eL3/tfvIyUiiKKAvbcX6ydKPGafj8GXX+7MYXS7\n78hi3Q5APo2n8g8F77zzDn19fQ/94J6YmOCv//qvH81Jfc3xVQYjHxdJaAB3N1l4AGxtbX1pGYuH\nnavZbLG+nmFlJU2zqTA46GBkxNXRn7gNvc1G1WrFtK8Zsffhh2iMRnqffBKtxUL/M8+AIFDY20Mt\nV7EJJUr5GBZTN+ZGnFzNQSEv4tlX7Lwd9GQ3N4lcudIp3cjVKrEbN0g3Ggx/TH5bp1MTCFioVhuY\nTJpONkSW2z94Ho+BXK7GjRtxJic9mM33b/L2edbwUd/j5dlZpPn5Tr1crlZJLCxg9vuxh0Kdz1Wr\nMtGqGe+ZZ2mVMu11NDkp0n5AaU0mIskkPp2Ozbfe6oiT5ba20NlsiBpNh09Q2NtD73Aw+OKLNPbX\nvV4oUE4kUGm1CHYvVlULg0uN3gwNROauRdDJJSqpJKJsxtHVxepSjAG/gFqnR6XTYTBoMBrbAVSx\nWGdnYY3I8gY6vQZdwMqpZ862d/f7DrGtZpOmLN81xZ5cXDzgLptdX0dUq+l7+ukv7e9sb6/QCURu\nI52ukk5XMRq1j+w8HvY4Ko0G59AQBbUaTbXK5f/0nyju7tKSZbbeeYfec+foOn4cqVikWa+TXlvD\n6PXSe+4coqebknUZqSaxm2siV/bIbYRRZWRa9TIGkxary05iaRn11Bjdx+9M5yutFvndXQqRCDQt\nNEoFWhoJna5datLpVPh8ZoI9QRi7t2W91mg8QER9FGv8oMd42Dm/jPE/+tGPPrVE8yDz3y7TfN7x\nDzv/4zT+cSj0TQMeYPmzPvi7hLW1DO+/v4sstxn6yWSZWk3mxImDKU+5WqUlyzRKJVIrK3SfOkUt\nlyMxN4dKp8Ps9dJsNFDpdGQXr3Hzt+92WhAnvvUyod/7p5TWFsnfyBM4erSzsy3s7XUCkduo5XIo\nn2LKZjBomJ72E4uVO+fc32+nu9vGjRsxBgYcSFLzrmMfZ0il0h3EPaXZpJJMHghGbDYdiqKwlVDQ\nat0oioJSh/GjB3f8+d3dAw6+AIn5ebqOHz/w3m3NjfTKCtV0muDJk4QvXSKzvkE9XcB/6hzR9TAa\nOc7oN16gsLWBUCujQmZ3c5Npj5WxqW5axQyWrgA6vYbxcTculxFZbrL03jWu/Y/XqVfapFqNz0jQ\n5cRsM1KKxahms1TicTQmE46BAdxjY51dsCxJ5D6m4HobxUikI8L1ZeBu/BBBuKfQ6VeO6PXrpJeX\nqRcKqDQaavk8m2++SeDwYfQOB0qzSeTSJUqRCJ6JCay9g8QKq1x6c5VCrs6xpwL0Hx1HunUNtSgg\n2myYg92svv42Sq3I0Esv3DFn8uZNwpcu0Wo00Lh8uNQi0XI7GBEF6PaIKLE1trer2EMhrN3dX7kP\n0dcZzWaTn/3sZw8s/343DA0Nsbe3R7VaxfAl6uk8jviqgxEn8B+Bu4aYf/EXf4F9n/g1NjbG6dOn\nOxHXbcbuJ1/fxqf9/0f1+vZ7n2d8q6Vw9eoS5XK1s3up1VLMzxcYG3NjsejY2tqi1WxSvHqV8qVL\nSB4PeUWhNTeHb3q6fbxWC8v16wSOHSOayRDe3UKrFWlITRouO7u7OxzS1SklCsQLBTIXL2Lt6cFg\nt5MoFsnU67j3a8apeh1RreZ0d/ennr9WC3/wB2NYrTr29nYAWF5OYTCo8Xgk0ukoTufgA63HJ+/X\nZ+FR78hDvb1srKzc0bqqNho7HBJRrcbjMXH4sL9jCKjXqxkZcREIfJS67+vrY+/ixTvm+GRbKNA5\nbi2fp1GpoDEa6T59GsfgINVak4bFT+7dWQwWI0Jik75eK5GtKqKgxxIIELm1wx/9+bdR6/RITRGX\ny0gg0OZSpBMFNi5c7wQiAEqmxubb7+Ab7KGWzbLx+uugKDiHhqhmMjQlie5Tp9rnK4p3dMFAu+wj\nqNVfWvaxu9vKzZsparWP7o3bbcTlau/cvyzOyIMc5/Kvf90O2FotlFYLlVZLPZ9HURQEQWD99deB\nNqk1duMGjkKBmWkfC5eMlIoN1uYLHPmzU5jHA1STCcrpLPndPXwj/biGBqlls9g+xtGQKhWSS0ud\nrFsjk2Ay6GXEFkLl6UIjlVDCyySvtzvosuvr9D75JM7Be2dIHuXa/K5xRt599126u7sZGBh46Pk1\nGg2Dg4OsrKxw+PDhBx7/sPM/TuO/ymBETZsr8r8Dibt94K/+6q8+dfAnL/jr9LrVUtDr3eh0HwlC\n6XRu9HotrZbS+Xw5mSSWSLQfhkYj7tHRjoGXTVGoF4voHQ4K4TBd4+PcKpXQ2vTIjRaCIOMxaqln\n2mqWbp0OQRBolMsY7HbGjh5lM5PpZAXcOh22UAjjfhfIvc7/xRcHeOMNWFhIEAxqOHrUz/HjwTse\nzA+zXl8WTD4ftt5eshsbnffUBgPVVIrE3BxaiwXf9DS2nh6mpnwEg1ZKJQm9Xo3HY7xjh2n2+Uio\nVAeyTo6+PnR2e0eLQlCpcAwNYfb724q6soxKq6WWyxG9dg1TsId6y4BKJaDRa8lubXByKkik28r2\nZhatTsXYmJsenw57d9cd1yTX69RKH3239AY1dpNAJRFHd2Sys4uGti6GweUiu7GBZ2ICncWCSq3G\nPTrK7sc6sARRxDk8/KVqSQQCFs6d62VpKUmpJOH1mpia8mEw3NtU7quEa2QEnc1GPZvt2DSEzp3D\n4HIRu36dZqOBPRTqZKGK0SjuXj1PnPQi6PuhIZHf3MTcZWL1/BUMJj2tSgmdVoXBaiG7vo57dLTD\nZ5JrtYOKsYqClI5jFFsMPTHJ1rtzNJoSBpeL1r7bd/LmTex9fQ9EJv5HfIQf/ehHB+TfHxa3Say3\ng5F/qPgqg5E/Bo4D/9f+6/8DuPAwB/y6cEbUapH+fgfpdPWA4GUgYMZq/YhzoSgKZp+P3MwMBkUh\nOT9PJZmk9+mn8c/MUIxE0JhMaAwGDG433adPdzQP5HodWZLQmc3U9tnxGoMBzX4rp8Xvp++ZZ0it\nrFAvFLD19OAaGWF3b+8zrysQsPD97x8iHi8jCGC36z8XoXBlbo6A04nGbMZgvz9TvUd9j/ciEbrP\nnsUSDHYUL2u5HOlbt9oy6sVipzXW4HDgdBo+1a59a2uLnmAQ38wM6ZUVmvU6GpOJrmPHMPp8WAIB\npGIR437brahSYe/vJ7e5SXJxEZ3dTu+TT2J0uynG4tTHHTj0JgZOTJNdu8WQXcvQaTuC0kIUyujM\nd5rHleJxxHoRm8NIYmMHj9+GUEyQKlVxm3qp5/Ptdk6hrbbalKR2wKEoNBuNjtOvra8PjclEZm2N\nRrmMc3AQx9DQF3IP7oX+fge9vTbq9SYGg/pA8Pe4cEY+fhznyAgT3/0uycVFBJWK4MmTtBoNYtev\nI6jVDL70UseJGWiLqgX8qBuL7C0s0zAI+JwevL/3NC8N9pC4cZ3i3i4Wn5fY7Cwmrxfn8DDeiQmg\n3UqsdzjuKDWWdDrkep1qKkX40iUalQq27m6svb2dQOl+gpF/5IwcRLPZ5Kc//Snnz59/ZPN/kjfy\nOF//Fzn+qwxG/vv+v689UqkKiUSJQqGO0ailr8+G1Xrv7oGxMTe1mszWVg67VY3f3MDnqlFOJDC4\nXBT29ignk8iyjEqnI/b++2Q3N6mmUohaLb1PPIF2f7ftm5rC3t9P1/HjrL/6KvViEcfAAMGTJ9tm\naIqCSqfDOzV14KFv7e7G2v2JDphPiDt9GtRq1V39ZxRFIZut0mqBw6FHpbpTV09ptYjPzbF3+TJl\nUURjMOD9HF0ajwpaoxHP+DiefRn1+Oxs+32LpS2HHomw9+GH+I8cwfKJlt1PQqXREDx+HOfAAHKt\nhs5qRWtul0/04+MoikJua4utt95CbTCQWV2lViigMRpRZBm5XsfgclGStehtXRjcvSTVZuReGyax\njpCLolWrcAwMUE2nqeVybb0Qo5FiJMLmW2/RajYZP3uIVqVAJbxNIZ7EdXwce08X66++indmhtzm\nJtVMBq3ZTKNSwTk0RHJpicLODlqLhXIyiUavxzk0RODIEQwOxxd+Hz51TVUiRuOj1WdsNltks9W7\nfj8fBha/n+CJE+hsNnRuP3N/9xq5ZJau4V409RxyvYY1EKBRqWD0erEODFI3ehj91jcJPZUnl4sT\ntJvJzl5EqJfQmQyIPd1Er13D4HCg3/eRcvT3U0kmkRsNXKOjyLUatWwWQRQxeDzUrVY2XnuNYiSC\n1mwmMT9PYm6O0DPPtAMkWYbPaM3+R9yJ8+fPEwgEGNoPzB8FJiYm+MEPfvDIjvd1xePMYlLuJRv9\nWZAkmXC4SC5Xw2bTEwiYv5D0bjRaZG4uzsWLYTY2soDC8eNBvvOdsTtM5e6GfLpI9PIFipvr5HJV\ncmVwDQ/hDflIn38dpSVTTae5+dOf4h4ZaWuMlMvoHQ4GXnqJVqOBJRhEyuep7nuZKM0mgijiP3oU\njdFIo1RqS3v7fF8oca1clrh6Ncrubjs709dn59AhLzbbwcCsGImw/tvfdoSToM2hOPxnf3ZPqfDP\nwu26/MMgvbrK1ttvtzkdGk1bSr9SwTk8jGNggNBTTx0gtn4Wavl8m8yo1WJ0u8ltbbH97ru0ZBmD\nw8HqL36BNRRC0tpJpSrYPDYsM2dZ3aywsJikUof+QReK3KAlSZw6082QX6Rya64tUCUImPY7M+Kz\ns2Ru3QLa62kfGqYQiaI1GlBrNVQzGZKLixj3HYBj16+3HYK7u7H39ZFdW8MaDLL74YcUw2EMTifB\nU6cQ/IM0bN3oDFq6uiw4HI+WaCcIwkPd9wdFOl3h0qUwsUgejVZN/4CTo0cDd/19kCSZtbUs6+sZ\ntFoVE6N2zEIFRW6gt9vvUJNtNZtsvH+RVEVD8tY6a2+9S7MuUWu0GB72YlYKdJ06RWZlha6nnmen\nGWBzLU29VsfqtHJoxELhzR+S31hDazJRjEYZfeUVjG43tVyOZr2Oc3gYuV6nGA6jNJvobDb8hw+j\n1utpShLlRIL43FzbJykaRVCpcI+MsPP++3gPHWLiu9/F2t2Ne3T0y1ryu+LLvu+PAn/+53+O3+/n\nL//yLx/ZMZeWlvjOd77D6urqIzvm44r93+e7/kh/1QTWLwSy3OTChTC3brXbZgWh/WB88skeKhWZ\narWB2azFbn/4H9WtrRxraxlWVz/q/5+fj+P3m/n2t0fRaO6dCq1Ht9l98zVSuzFyhQYmfxc7+RI6\nYZJEvIC/2wGiiNHlopxKYevro7y/q5WKRRrlMnqbjVu/+Q1ak4l6oUCzVmv7l8Tj9L/wAt7JyS+l\nPry8nOoQWs1mLR9+uMe1q2FmJuwMDVhxuszobTYq6fSBQATate/Pi2q1wcpKms3NLHq9mtFRF/39\njs8VmBi9XnR2O6JKRfTatbazrV6PzmKhHI+z9utf4xobQ2s24x4Zuav/TDWbpV4sUs/nKUajbeVS\nRcFz6FBbnr/RQFCpaO2TWpOJMhmphUUrITWsvPV312gYXaTTNapSi+XFCN94rpcbH6wyPWbh0vuX\nCXo1mExaUBTK8Tipmzc7ip+311Mq5KlnUuy+M0+jXEZrseCdmkJjMuHs78fe14dUKqExmdh44y1U\nBjPoTW2ZcqHdtpJu2fnwp3MYggVMbhc2m46nnw7h999dc+NxR7PZ4sN315k7P0+9UESlUZPa6cJs\n1jIz47/j83NzCa5fj6Io0O1Vc+G/vY1dVcNuVaGzWgkcO4Z7ZKRjVFfKFrkyn2d5u4a5GGd5KUNv\nrw2dXBTTiQAAIABJREFUIrG9meHQIS8mt5tWC5J1AwvzO1RTSVqSRDVrJX2zyGGPta2s7PHS6j/K\nm+8nsA7ZCQV99Ha3y2uFnZ3OOdbzefYuXOiQkddefRVFllHr9QiiiFQooLPZ6Dl7FrVeT71QOFgq\n+kfcF1qtFj/5yU94++23H+lxh4aG2NnZoVarof8cejy/K/idCkZu16pisTLr65mO+I+iwNJSEkVR\n2NnJY7PpMZk09PU5mJi4u2z3/cwVCoUolxtEIu122Far1SaJNlqk0xUKhXqH+S/LTVQq8cADstlo\ntLU9VlfZ281RLUnozSbGT0+iESSis/Po5RCtYFfb60RRaJTLFMNhQs88024vbTYpJxJ4xsZQ6XQs\n//znqFQqDC4XUqlEbmMDo8tFQbGwtZVFlhV6e22EQra7pqg/b71PlptsbeUAsFh0vPnmJjQlguY6\nscYucthAqM+Ga6C/zVsRBFK1Wqeb5377NT95foqicPVqhKWlVOe9WKyEIAj09392aeGTxzPY7YSe\nfJL0rVvt7IXT2VGYTC0vo7fbacoyyfl5PNPTjHzrFfItM7FYmXB4h26DQG1jCR11BKWJ3m5Ho9cj\narVUUinKiTZXW63TYXC56Dp1inhWpstmoJRIoekboLQepiEVqWe3yCZU1AsVykUvBk2LRqlAYm0T\ni7oLk8nZOe9SLIZvepri3h5NSULYL3/ltrfJbm5Rdzqwp1KEL15k6k//tF3yy+URBQHFYCZfgUY2\nQ11tIp2qYHfocQZ6mFtMUc5X0PvbgU4+X+eDD+b5wz88e1/364vE5/muplMlbl1Zohxv3we5CpnW\nTZZnXUxPH8wcJvcSzH6wQjFfxe53oSkkEMo5KsioFTXVWpPsb96kL5PD4nWTV6mQakZ29krsrmeY\nGrAgKC12d/NMHfJSKlRpKrB2/iLR9Qjl7irxaJ1yNgelDOauLgRtjVaoF9RacoZu3vrtOoqi4DeX\niYXzOL//BEYhicHtplEud1yRsxsbncC4USqRV6kw5XK0ZJliJEIxGsXodGLt6aEpSRg+Jnf/IGvc\nkuWPjPFMJkxe7x2OwZ91jAed80HxRY3/4IMPcLvdjIyMPNL5tVptp6NmZmbmsb3+L3r871Qwchvl\nskSj8VErZaUicf16jGy2RjxeolqVOXkyiCwreL1GHA4D6XSFZlPB5TKg1d7fsgiCgNdrxGrVddRJ\nVSqB3l4bVqsOg0FNJlNlaSlJLFbCatUxMeHplG8q6TStVotqvoBao+XoK08gF7LsvPUm3adPMfbc\naeJXLiKgMPHHf0xmbQ2t2czIt79N3zPPIJVK3PrlL9Ho9ejsdlrNFoGTZ4heuoAgijgGBqjl88R3\nElzaSFMutzso1tcznDgR7PiYPAoIgoDJpKFel0kmyxSLEjMDKlQ7t1i6sAyyzLEnhug7OsngN76B\n2e8n9bF2XkvXnV0h94Nstsr2dv7Ae4VCnevXo3i9pnb24C5QFIVMpko6XcHrlQ4QcC1dXRg9Hmr5\nPKVIBEEUye/uIlerqLu6yG9v02w0yK2vE7m1x2paz6XraZzmPPPXb6BR6jz54iGyF98itbyM99Ah\nVBoNoaefxt7XB6JIJZEgevky9oEBLNl1dt9+n/xehME/DWK1aEhEM7RqFRoVVdsdVythtRtRqwRE\njQaBjznI1mo0ZZliIokpEEBptVCbzKR3ImwtbKDVWamW61g0Ina/HwWIRstsr8YQRRG9MYt3aJhS\neA+T1USrViG3l8N3zkl5t4Zar0Nr+qiLpliUkOUWavWdD6HG/sPxQf1nviy0qmWaterB95otlHrp\nQCCSWVsjurxFem2LfKaEWQgiOirc+s1vqWRzWNR1jB4PM3/yffI1FcVoma10AUVwkdgMI0oK125I\nTD97hujcIga3G0e3Gv9UHxd+8HNUag3Bfh8f/OLv0drsWKxG5EoFq9cGrSZNi5elpSzFTBGr10Vd\nVhg/PEg5sks4ukQtncQ5NITGaiezF6NeqaG0WjSqVQSNhnouh6ZaxdbXh1yrYevtxejxYOvpQWsy\nHeA+VXM58tvb1AsFzPvdZXdTzG02GoQvXSK9ukprX9vIMzlJ4MiRfxCdOfcSOntYTExMsLS0xMzM\nzBdy/K8DfqeCkdvRmNWqQ6tVdUS40ukq2WyN6WkdW1s5ZLnF/HycQMBMPt9WDw2HC7RaCm63kdOn\nu/F47uxUuNtcvb02hoedXLwYplyWMBg0mM1aTCYN+Xydy5cjxGJtZ8xMpkoyWebFFwfa/hr7WgSe\n8THcWgOxa1ex+b24hoYo7IUxu12MvfIKl/7Df6Dne9/jxL/5N6Ru3qReKpFcWmL3vfcoJRKodTo8\ndieKRkchU0DTM4Z3agiN0UglkyFXUqhUGp1zbzYVlpdTDA05Og/hRKJELFZGEAwkk+V7Xn8mU2V1\nNU0sViIYtDDUb0Ws5ugzZnHTpOq2EovZcKljzF6+jkHXzgjVS2V233+fruPHCT31FLaeHirpNEaP\nB8d9RtKfjLibTaXTDq0oCpFIkXC4SLEo7ZOJ7YyNuQ/wSSRJ5saNGKVUhlY+xaW1KAPTg/SOf3Rs\npdXCNzNDq9HoiJjZQiGMDgfJdJa8YmZ7V0JMVMmXRZLJCsM+M7FMkZmzY7QqRaqFUltnolBAZ7EQ\nn59n5l/8C/YuXODWr36FJRikUa+z+urruIeH0Vht5GcvEvQeJrdXxufpwuVR43X0Md5v4OhhPzuL\nGww9cQJ1dofrH65id1rQCRJWnZNL77yO1arFN9KPrrcLWVVEkkWKuTJGixHMRlR6PZkiSHo7NWmP\nSqlMKp3m92cmGRkfYm92kfE/+A6F7S3UUgm7w0HN4OmQcAH6+/vvCESkSoXE/Dz5fbE0x+Ag3kOH\nPtO/5jZWV9OsrbXJ04ODDgYHnXcNdu71XbgfmA0iw6MeriTzne+NFiMjwx9xPxrVKvG5OcRmk0C3\nnXymhE6USS4ukouncdj1SPkcrWiEajqFEZHN5SQ3t2qcOWsnm6lgspkoVtR8sCgxMf0Ew98Yw6KH\n7Pu/weG1IysqfC41/VP9lCQ1GlULpdng+KmjDAZbRDUyGsmMSiWQrWspJjI0NkrMbm1w+EiAwt4e\ne1dv0Pvs80iKBuv4DHuLt1BKORwT06Rfe4taU8Tv8TExPY2lu5tqMolreLgjelYuS0Q3oyz/+nWq\n6RQejxGLtW2H0PvEE23F3Y+tcTESIbW83Gn3btbrJBcWsAaD9yR2P+h9etjunS9ifKvV4sc//jGv\n7+vEPOr5JycnOx41j+P1fxnjH+tgpNVqd2aIovBApDmv18TEhJulpRSS1KTVUjh9Okg+X+2oh94W\nrtrczLOxkcVi0aLTqWk0WqyspHE6DffFtFepROx2A//2354iEikSj5fQaFRcvRpBEATW1tIHJNLL\n5QbpdAVRFGhhoNUCSyCAc2gYRZLQ6rTc/MlPyUViOHr8PPnv/h0v/vt/T71QoBAO72dAmoiiSCWT\noZxMond52F1awzF5FHtXkJ5Dh8gns5gUDZ7JSdZKZhSlXUpSq0V0OhU6nUguVyMWK5HP17l2Ldop\na5lMGs6dC9Hba7vjestliXff3SKRqBDw6NDkdtn4zRaxix+gUqsoZIoYfEGePfMchb0Wel17DS12\nIxpFolGp0qrX0e+T7j4LstwiEinsS4BrCAatmM0fZTEcDgNut5Hd3QL5fJ3t7bbAVF+fnVSq3AlQ\ncrkaDoee8XE3lUqDYjTO3vn3yEaTAOxcusaL/+qb+CfHSN+61XG7NQcCeCYnCZ45Q2Jujt0LF8jU\ndKTzdYweH5EM3NxIYTCocPtsfPOPDkMhSXa2vXs1ut0YnE4ElQq1Vkt2c5PNN99s+5SYzWTX12lV\nK5TicfQ2J5LcYiykY/TwKWJ7WcRGBU0tR+Lt9zBOTTAyOoK3P0gxHyDkz+Cw66gWS4iNCn1DHmav\n7LJ08wqTLxmJZVr0PvEE22+/hVzKEd7dwH3oEKmcTMPgZuaVF2hUKsTSTer2Hm5du0gmVsJkt+Ac\nmMTdY+XUdDdLYZFKrc29crmMdy1tJubnO11I0PZmEUSRwJEjn3mPAd57b7vz/YtGizQaTQ4dunfm\nrlKR2pkd/f3/jOntdiZHbJiMY6yvZ9HpVIyMuBgc/phLb6WCVC7TrNeZGu2h2QqiERrIcovQaDfV\nZIwm4B4apJqMs3vxKpvXbzE4EiLgGeDs8+NceGcZs16DfcDP8JgHVSUDlSqFZAb/ydNYR8axWfW8\n8k9VJFNV6nWZvok+yvPvc+XvLmBwu6lgwW9zoFQ1SC2R3dl5fG4tQquBpTcEFieJjV1Czz2Hq6+H\nnSvXScWLVG/m8IYm6Ds6icrpw+bRU1heQGezYXC0uVSy3GR2NoaSiZDPVSlkJDLpCmPjHpSbN9E7\nnajUaoxud8eBuprJ3KHa3JQk6oXCZ3aZfd1x4cIFHA4H4+PjX8jxJyYm+Nu//dsv5NhfFzzWwcgb\nb2wQj5cRRYFQyPapjPfbuF2rUqlEjh3roqfHRqFQp1Lp4ubNJOfPf0T6cjoNeDxG1tYyiGLblKvN\nK2lnO9xuA2Njn84nuT2XoijE4yV0OjXz83EKBYlCoY7JpEGSmjQaTYJuERVNaooOUaNldjZOtVyj\nsL3Dock+tNYKtXwOi9fF1tvv0JRqGG1G+p56ivClS0heLx69HrXBgNnnQ2sykY3HGf3Od4jOzrNx\neR6bz4Z9bBJVcIiGK8DyTYmNio4BsxuHy4hqp4TDYUCWW+RyVbxeE6+9toHZrGVhIUE0WmJ42IlW\nWwTcLCzECQYtdwRkmUyVcrnBYL+VoBilkUmi1qkZfOEFVn/1S+rRHUS5SmuvC7fXj6fbQ1Oq0xs0\nQ72ALRRC73QeWMNPg6IoXLsWYWEhiSy3EAQwmcp885unOnosarXIyZNBRFEgl6vicOgZG3NjNLY1\nKS5fjgDg95tJpSrEYiXGxtwU1lfIRpPIBgF1VSGbzLN16TpGm5m9Dz/sCEnVslmMbjcDL71Es15H\narRobcYxuqrYRya4slNFo9Pi0CsIUpjswnXCV64QnBwhvbJCa2CgLZoWClHY3aUcj6PSaDB6PBg9\nHkxuN76ZGSx9g+zltSzd2GVvrsTkKRGtEifxm19gdVrxjc+gcXrx+a3IOjvvXl+FJpwaFyhv3KK0\nsYos6unqCrG5WyaZLPPzX+7wzVfGOfWv/4y1y+dxaPT0PPU8P/77DWav7CKIIofPTdLVYyOVraOt\n1igks1TzeRxaF+VEDaOi8NILT1OoKKhUIg6zSHRvFZtptFOKkcplcp9U0FWUtpDa5GTHePFe+Li5\nW7OpsLKSZnj4Tq8maAfE8/MJlpZuYbF4GRpyMjHhuetnPwm1TkfXoTHU4hJ9bgVRq6Wo0WDt6mq7\nFGezyLUa5kCAws4OreQux4ccaL0DJAw58haR5KYZjVqgWa8jq00IhRS1zZukDFVqaz6muxQG/3AI\nwWxHLhXYefeXpAeGKfhDZLtOI4kOElEF7c01iqkMelULt65M4fwCO3u7SDt77NxYInj8GIVslN4T\nT5Kta9DJFoang1jdNvLhKI6gBWuol53dNGtLUaa/933e/2+/RFeu0vD7eP+9TQr1Hb79Z8/i9Pux\n9/d3MlyZZJFGeJ387EVa8SS9w0Nk8w1K2TzF1V1MPh/FWJxIRWboxBMYu0MY9caOPs1tiGp1x038\no1vfJlVXMxnUej0pSTrgd/VZeBw5Dw8idPZ55v94ZuRxvP4vY/xjHYxsbuY6/724mMRo1HDkyP1F\n4CqVSCBg6Zi85fM1Dh3yEo22A4dnngkxOelhfj5BoVDn5z9f6QgrKQpcuhQhGLRisdw7zWy16gkG\nraRSZZLJCtWqjCC0ZaudNg0D+gTxdy/QqEt4B3vQjR0lU5NpFrKktsO8cWuDI0+OcXzCRqNapV4s\nYvb78c/MkNvcoJbLoZmcZPvNNzn0ve8Ru3YNwWBm49IsNr8XR3+I8e8dYmm7wfkrWeSdHOl8klOn\numk0mmxuF8gVJA4f9vPBB7tcvx5leNjF9naefL7GE0/0ksvVKJUkIpEit79DxaJEpVhBziQoJ5Oo\ndDoalQrh9TBDnl4MxW22LrxPcW+PyOXL2HuC9D71FDqbnejSLZS1HQa6ezn7rTMUtzdoVSvoHQG6\nz5zp7LQ+C6lUhZWVdCebpSjtktvOTu7ArtnlMvLCCwMEg1ZWV9NIUpNUqorHY2RrK8fQ0EdEz2JR\nolKuU8/nDsylUgkoUpViNHpQ0ZL2jrCWzeIeHSW6cJPY+i674RKJV2/Sf+oo3vGT9DqblCKrNHN5\nnEE/Fr+f0NNPU47HMTgcbL7xBlKpROTyZfzHT9AS1bz3X37EoZefRahX2M1pefs3CzSbLXrOnGVh\nKYtFVBh8/kV28wbmV1JULq3Qc1jFzIu96DQirUadZirN1f/3B6hooegt1IRlRl55hZLWTEVW8+Mf\nL+H5n09hCg3gdTmJJGqUyzKiKJDNlrl2ZQ9UKl55ZZS8MQXVIk6XEbu9zRnQ2+14Ag48QOrmTfYu\nzRPLZmmtrRE4cuS+ZcUfFLLcQpZbd5XCmJ2NsbCQpF6XkOUaV65EUKnE++ZAGd1uQufO0SiXEbVa\n9iIR5FqN6LVrHWdlALPfTzEWoyKr2VzNYgkeorCTR+dVMBpFaqUael8XG6+9hs1hwu628eH//f9g\n9jhQ6Y1ojEbGv/kyytHDqOweVCYrXq+RosZDOtfg1de3CN/cwOYwcuKJAY6HtFTmZtGpBTRqkfzW\nBlpRR9DepM/pwnbiLLV4mOv/5b+ye22WRl1i6IXnGP2jP6ZRltmJVFmLCVTSFeTwTYwaGya/n52F\nNVSmLMr+NZk8HjLLSyy+9i5WoUxscYXk6jqT33qZSqSIXrfvw7SVYydbpph+H/sJNS6nHruvi2o8\nAorS4aWZ/Qe7kBKLi8SuXWt3yAkCZbOZnkAAve3OTOvXAYqi8JOf/IRf//rXX9gcw8PD7OzsUP/E\nb88/JHyVwUgA+CUwDpiA1r0/DpubWaamfJ9aS/60aMxg0PDkkyFGR91IUhOLRYfT2eZGBAJmbt3K\ndNxBJamJ3a6jXpfJ5+vIcrszRqUS8XpNqFQipZKEz/eRP8TRo36Wl9P09lrZ2Sng8Zjo6bFCLoYm\ns4XYrNGSm7i9ZrLrc2jSOZw2M2MvDxHbjNJIxWhpfPinp9npfZdKNovJ4yG1sozWakWdziLavZQy\nOQStgVJFxtUfopjOo602MU6fpJzaQlZbuLkYYzcu4fOZOXasi0ymSrHYli9vNhXGxjz09dk4f36X\nXK5OoVDH7TaRTFb2Cbg+NBqRYMDE5o1VUosL6FslRFHBPz1N13AvDanJxtsfgCwRvniBWj5PORZB\n1OmwDw5h8vvQ2t2sXt9i5PlzDB8+jNioYnS7sQSDiPsusZ8VPddq8gFfEmjL5mcyd7YBq1QioZCN\ntbU0hUK9s4EzmzUHVG0BNFo1zp4uYuth1FUFhHamzOZ1tJVWXa52V1S12m6BFAQQRYrhMHK5SEUw\nEc+XELQmwsubHJ+ewmPTITZs7Iky0aVFKpFdPJOTjH372+2dts9HZHcXUadH4+9j9/y7GB12mi2o\npHPc3NSiILR3kisruI+epKz4MMwMsvyff4Vcq6HS6CjXBOZm4wyOeDE3tdRurjH27Fl2r86hMhip\nFUGUykQTZWwWDd3dLvLZElNPnsVMgSu/vUhPPU/v0QBZuhCNVia6FcyNNF1np/D7TeQ2N0FRMPl8\n+A8fRhAEitEo4cuXadbrOESRWibD3oULHa0No9tNbHYWFAW9w9FWqx0cPJAVyedrpFIVBEFo8xPu\nEegHg9a7ko+LxXqHsHzb10lRYG0tzcSEG7X6/oiUgiB0sgR9fX3EFxZILi11dv2KopBL5rBMn2Vl\nJc3W1h7yWpaRY09j0TUJ9dpJrW9w690LSJUqE89Okby5jNluQlCpMNlMlHIlKtkCQu8U0UQZZWcb\nt7FGQqiytFElsh4hHU1TrdR49ccppv7P72Ku1cFgxDXgwD48iqg34p8YpVWvYrIaSV27gFQq4uzp\notlskrp5E9vFCwz9k99npyrSEgRS4TSHnjtB19gQogh9IzasLRMtSSJ67Rr+o0ehlGFoIohKrcFo\nMbJ16QaZtVt0j/Zi93vIJnIkEmXUjRaVXAF3q87iqsTzTx3FNz5CLZfD6HJhCQY70vTQ1tVJzM9/\n1KqvKJiKRbIbG/ddsnvcOA8XL17EbDYzue92/UXMr9Vq6e/vZ2Vlhenp6Qce/7DzPw7jv8pgJAM8\nB/zsfgdoNKq7OnneD9Rq8YB3CrRt4avVBocOeVlby9BstnC5jB0r9my2ynvvbVMsSthsOgIBM4WC\nBCjodGpCIRsjI27MZh3Hj3fhdOq5fDlCpdLA6TQgpLfxe410+U2oLXZqqV12zy9j6umnVoxz8Sf/\nHa3NgcZqozxqQSoWCRw7Rn5nB425LfOudXpIrG6i93gRVWoiN2bZ3khhNOvxD3Rj7+5i9laevKSl\nnK/RbDRwO/UUCrUD3bKtloLR2L4utVrEZmt3AEUiRQ4f9pPPV2k0Wuh0Kvr7HUT3Urzx6tugNdI9\n4GXMJ7H805/iP3oUg8NBNREns7YOTRmD1YJUFqnn80j5PN4jJ8k1DOhUejYSAkavl/HDD95CbTJp\nMRo1nS6g2/B6706utVh0PP10H2trGdLpKoGAmWKxfsCGXqtV0dVlJeQ5gVDNE98MY7NqCfb78U1O\nUoxEiF69it7pxD44SEtvRUZNDT3NYgy1SmSg30EuVyOXq6FWC/g9WlQGI3qHHXt3kPjVK1SSSXbe\new+zz4eo09FqNlHpdDTdfawt7JDYSNBz6hSFVJJKpUVdatEoV0AUqKtMSDtpzCYtV6/skWsYUeVi\nGCxmFLWW5asr+FUOYuvL5K5+iN2hZ+i5p9CYzdgiecKxKgmKaGoZRnucRFd3KPeBXEmRWl0lshlH\nbVjlqX/5R+RiW4TfC2NPu7G47HSdOIF7fBxaLQxOZ6dLopxI3JExapTL7axRLkejVsPR309mbQ2p\nWPzoOPsIhwucP79DPt8OFJ1OA+fO9baJ3MDQkJNIpIiiKHR1We6Z5bibfszDit3dDsCgrUWys5Mn\nFt6kp+ljayfH8IibbKrE4uU1XGPjOIc89E4IFJYXaLh0+IYGSF29hFQo0mxIWCwGNCYzs+cXESed\nbM2ukgtHmTkzim/czY0fvIHNbiK9K1IvVdHYDGxupjj+J3/CzrvvUC1WiMwu0HPmLBvXbxIYHURJ\npIgvLbeN9xDQmM3kwltU0mnMDhs+k56X/+XL1OTfZ+1mjPfeWsbhMuF3aTA7RFZ/8Qssfj/FcJjN\nt9+mUqhQlNSEzpxg7OXnsJg1DD51htTiIuVMAXm/I9FoM9MQdCiKRDwjM3Bm+FPX8ePtxh/HbV+m\nryN++MMffmFdNB/HbY+ahw1Gvq74KoOR+v6/T8XHy5NqtcjYmPuewciD1qrMZi2ViozFomVkxEky\nWaFWk9Hr1fT321ldTVMsttt1XS4Df/M3c2xu5nC5jAwPC5w+PYXVqsfvb/+gDgw46eqyks1W0elU\nFBeTpG62xdAMJh251TRTT81g9PqY/+nfI5UrGG0WBkf9LP3wh3QdP465bxCN3YXeYsQW6qMpaFAN\n9WPXGVHrdEhViWqhhNZsZmtpm+BTz4DWgKgXyEUTTEwF2YnWAIFGo/0Q9niMeL1GDAY11apMsSgx\nNeUlm61hNGpoSXW+/WIXTq+VTD7F5maKW8tx3JMzCIUEl3/2OvlhB4e61Nz6xS8Inj6NymBAUIlU\nkimMXg96mw3X6CjWvgEMvQPko2UMvSPsJWqdroUHvV9Op4HpaR83bsSoVmVUKgGbrUpv76cr2zoc\nBk6cCH7stZ5r16IUixI6nYrJSS/BoAVBsPL0v/ojVubm6PJ4MLpcpFdXydy6hWNoiNzOHhf/83/F\n3j+IaWiCkn4Ln7vttOt0GRgaclKvyVgcJrx+GxgsSGKOgZdewtLVRfjiRTRGI/7jx6nE44S3t9H3\njhBJNVDECka3l71YFa/ZQDm9Q9/EGIlYAZXBRDKSwm4P0t8vIjRlYmmJ6WNnKCxeI3JzDYPVQnwp\niVnbxDE0yNxv3qU7Xab33Dl8p88x1BtiNFNld05k/co1Jk4fIhvbwCbVeer732D5+ialUp3S1gbl\neAKv24FOp0YqlYjNzjL8e7/XMcSTJYlSNEqz0aBeKKDW68nuGy8iCAiiSHxhgUo8jtZspvvUqbb+\nzb6wG7RLLjduxMjn23/ut8ttCwsJnn22HVg++2wfmUy1cw8/7e/cYtERCtmYn09Qr6fQ6dwIQjuY\nud+sCLR38AgCequVra2tA63IuVyNcLiIRStgFsrsvPEae28oHHtiiKFQPxmpTqPRwjIQpO/J05gc\nFqqZFA2fl9zcAihN5Fod76lzOEYmWdvcoxrZpac/QEPUExzwoTVoUVkdDB7X06xV6BntJTQzTsOY\nZfBP/icSuymcpQKZ+WvoK1US+TjOUBDX4ADRuQWkuoxR26Lr+DG6jh1BFjW8/84aG2tJIjk1Hr/C\niXMjzP/2fX76H6/zrT8+gtrZFvRb/dWvEFUqRLmG22altLrAyMsvoTO31ZrLiQQaJPQGNbJZh29m\nhlip1Sk/34ZUqdDYF83T7nteaYxG1AZDWx14H6l6na771DWBx4vzcLuL5kFKNJ93/pmZGW7cuMGZ\nM2cem+v/Msc/1pyRs2d7WFvLoNG0Ge8DA4/WH8PpNHD0aIDt7Rxnz/Z0duADA3YCAQvr6+2WTodD\nz+xsnPfe20GjUdFotKhWqxiNCYaGnJ1gBECvV3cyMOrBQXLb26g0GsxdXXhqNXI7O5S38hRW5uke\nHMTqdSEXsogaLdpAH+/8f79k6/oS/SenOfzMEZIrq5h9XroHBylFI/ScOUVFXKSaK2INeNBa7Yxq\nkavxAAAgAElEQVSHXKg0BRxmKDUUvF4LQ0MOFhcThEJ2AgETigJHjvi5dSuDUajhdgqc/F+maBZz\nJOeu04wXKG4ZSGLm0gc11tdyVLJZDk246BoIsru2yfEjU+xdvMjGa6/R//zzGOx2MqurHSt6x+AQ\n5qlT5LUBaj6IJ9rBjs9nvnPx7xOHDnnx+Uzk83V0OjW1WuqBTPl6e+34fGZKuRKCXMXitHR20Vqz\nGUtXF66+Pmr5PNmNDVRaLSqtlp0bi5RKTZrhBJmmFWOugea5F/AOjCDubBAKKdTKNUyBLuRMDINq\nj/DaKjVJoiXLTP/zf045m0Wt0eAeG6PVbLK2sE1DqmJ3Owk9d5rNDy/j7g+yfXWOPinNmRemuTaX\n4Og/mcDuc1LNbqIVdVhdNop1NVg8iKLIQMiMtLxEXq1D7/HRd/oo3VOTrKX1hF/bxT+uJ7e9y/iA\nkZmZAKO9sL28Cc0mol6PKrnGoWeeZ+/SVXxeI1pVnfzuLlqzGalSIb+9jS0UopJIsPHGG6RXVzHv\na6/ktrZoms2g02H2+doicNUqCAJyvY5ULoOi0JJlmpKESq2mUpE6gcjHkUpVO+33giB0BAI/CzMz\nPlQqkcXFAhaLnqEhF6Ojrs8eCNSLReILCxR2d2lJEiafj7rLRdfICIVwmGa9TqkooVKJ9B8ZYfmD\na5TzJWrlGvF1HbpknqHv/gnBLhPx2evkNjdxjoyy/PO/w9rdg+bwFIVIlEa1jjXYRcFgJbY8h294\nkPXtIqZyCv9Iglf+2RmuXw1jNHhppcIYpAw2Ocn2doWdjTTz71xFqFc49eQAIVOV6NWr1DNpgmfP\n4tmOsvHu+6hk6D96Et+Js9yKCaQqWnR2N9nZeWp5BVVVoXfAw+5yhVJDy7GXXqAS3kMURbRmC+r/\nn7s3DZLrPO/9ft19et/37ulZevYZzAxmww4QIEACEClKlC3Tpn2vl1u+cRJ/iW8lt8qVLzeVKiWV\nOFVRua5Tjq+s8MqSbVqWLIoUJe4kCBD7NgBmX3vf971P98mHBkekSFogaYmU/1X40N3n7efMeQ/e\n8/Tz/N//X29ALrVpSxIKhQy5XE5mbQ2L349MLqfriIvFaIlU24YoigwN2ejp6fA+0mtrxG7fplku\nI2g0uPfuxTE2hsZiwTU5SfTGjU4lTSZDa7Nh7e9/oPn5vOHKlSsfq0XzaTA7O8uf//mf/8LjfF7x\nuU5G/st/+d+w3Dd2KxTGEIRDuxnX9n32/s++fhcf9fnPvlarTQSDBWKxICqVwOjoIAMDNhKJMO12\nFug8vGKxEBZLjXJZT6FQR6FosLW1Rbs9+qHff/v2EqVsia7pw4jhNa489xyleByfx4Olrw/NcB81\nhYTHbKAUj9NwuWkY1Gh1AmqrjVStwc3FAFN7xlCYbBT0aqLFLTweH4f/YIpALEY+XaBUaWF3Fhnu\nLpNxm0ik1Hi8RjKZGIVCnkBAhsWi4fbtVfbt8zLbU6OaTBJa2qEc0uPQamkmwuyEQpTSBVq5Bo9+\n6SkSoQ10PjV3r2/z5a/sQWxlyLeaSO02+UCApRs38J88ydmvf51KKkUOsE5M4BmaJrWQpFQKYjAI\nHD06hcOhe+D5+ll0hOUMu+V8+PgkuHI4QOLWLRrlMoJW21k476sovns+9ZpIq9VGLgikVlcRKxXa\nyBBFCSWQj6aoZvNYj81g8Tgox+NIQC2ToVFKsvr664i1GjK3G6ndZuO11xh94gnyOztsvf46OoeD\nua9+ka1QDZXDTXY7gGtqkmyxybE//vektoNYdDJ+/98fpt5SIG9WSBaHiWUlTp3x0JYpufd2jIe+\nOIujneSNV8OIGguBS+vsP3uQvLabUD7PzlYC71AVnbxGuShxYEBH4uoFTFot1uFhdDYbvfPT1CNb\nDMyPE7hwgWIuh9poJL2ygt7tphAKkVpeJvjOOzSrVeSCwM4bb+yaMeqLRTwzM5h6elCoVFiHh8lv\nb9OsVtFYLB3lWrt9t9qg1SrR65WUSu+3ADCb1T/XLuHDoNOpOHDAx9SUC7lc9kC7aACazRaxxRUW\nn32WyLVr0G5jGxlh4qmnUPf303/qFOm1NRqaNNZpO410kuC1W1j1RioqPSqNEo/fjbO6QfbyKmKt\nhlyppFKqofX4qK0u03vkCAqtlkalhmlojOvnInhH+nD0utnjV1EK7KAJ3sQzNsHY7+zl9k/eBlsd\nr1kkv7PD5Vd3ULu66JudoBCL8c6rd/D+3lEMXV6MfX0kohnGf/O32fu7/5ZGoUhZsHBjuUwkVeD6\n7RRTMz5MXjeZnTCBepSeOSulSBiZOEby7l00Rj35VB4NKtpmL2aHGZtJidpsphgKdVSeKxVUJhN6\nnYIDe/toqs0IGi1utx6lUkH5XSfg+5LyrXqdyNWraKxWjB4PrslJdA4H1XQaQaPB4PXuVk4eBJ8n\nzsMnadF80vizs7PcvHmTvo/hffUvGf+zHv95SUY+tCb79a9//SMH/Owf/Ele12oiL764hiSB291D\ntdrkzp0ESqWCo0d7OHzYwqVLYdptCZnMAhRQKjtun7kcjI7aMBpVH/j+hRtBzv3wHvl0EeoV+txy\n3HIlNUlLIlHGPKBidHqG3PY2ocuXKSZSeOfmkIfCjB/eS6PRZnNhnWYkjVMs07VvH3dvJbFrzaRi\neWR3F6iks7j37MFs0XP5h2/T1efGNX+MdK7EwkKca9diNJttdLoAU5MubAYrRkmAaonkzWvoNRrs\nXjeVZBK1yUS3y0UkW+HOrRvYR0eZHPPz6stryNU6VEYjE3122NnB2t9Po1Ijc+cukUSC+sgI41/9\nKiNDQ5h7ewE47TZSrfai0Qi7ZOOPOz8fB41Kher9nrTWbn/fwldJpwldvLi7cIrVKuHLl9GYzRjc\nbrLZKrdvx2k0RIxoSd+4iiBrUkvGkAta7BMjJGotVDo1ap2KViLEyg++D3R2lxQjUQZOP0roynW0\ndhuGri6QKToaIpub3HrmGdqiSCEUwr24jvnUb5IIplh+8XV6Zie5eS9PrVCgx+9kfs7E63/9Peq1\nJjaHgarKwVrRQl2m4atPTRIVSixfWuL0lyZxDvRSrrZRhrIo5G3WVtPUSg3OPtKHVZ1CdLbROXSo\nXSasAzkSd+4QiEYpxmLM/Lt/h9RqYR0cJHrjRsf9N58nFwgwcPo0sZUNJL2dSl2imU4jViqI1Sqh\nixexDgygMhgQdDq233wTrdVKIRwmtbJCORZD0GjoO3ECz/T0bgVKqVQwNeWmWAzuiu8ZjSomJ12f\nmAMGPLDxpSRJrK2lCW4mibz0Mql7cVyeHmqRHdJLS4SvXsU6MIBzfBxzTw/ucoM3X7hBejvSWZlq\nBYb6XEztG6RVKVBLRIlGkqy9/CqCSolrYhz3kB/13gkKgRBGjwur34+528X0jBKZVk/uxnne+vvn\n0Chl6A0qRh85ztTjjzCiCVPKxihtVWlZHqWQzuP2dpNZXkSSwDs1iaS3M/nkl9h86zyXX7xIS1Lg\nPzRD78On+e63r+NyGRjdP46iWSO8GsTqtpNYXsPUY6ScyWC06HA7VFRjEWILd/DumyN2b4WKVCVd\nUTJ3cqbTO5OkjkGkILD1yitoLBbMvb3oPR48x47tJo61TOYD3jZirUYlkcDo8SCTyTB6vb/y2iOf\npEXzadDV1YVMJiMSieDz+X7+gH9l+CyTEQH4CTANvAT8z8CVT/OFH7dXVSzWd3+tFQp11tYy96Xk\nW8jlMvbudXPmzACpVAVB6Ci6rq5mUCrljI0pOHXqp7EaDZFUqkKl0uTOtQ3KmSwtUUZmK4hW7qLo\nGuXahefQu13U/BpOnn4M4+ICglaHvd2mnM5x9W+eZfzMSfpGu9hZ6ehAWDwO6vI6Rx4eIZvM4+l1\nUonHaJZLhBNNfvTsJQx6gXqtjqHdWXR1OhVGowqVSs6JI172DqpBlMhXROrRIrquHtRqgXv/8A/E\nbt1CJpdj7O7G3D+I8/AslXSKiUOzrKxYcfd5mTvQR+XaKplsm6EvPkk9n8c5O4dcBj375rAPDWHq\n7t69FgqF/H3CZP9S8/VhY8rJJIHz56mm00iShEKjwTE6SrNcxtTdTbvZ/ODCWa1STiRQWWxcevMi\nFoMdtdaAzN6F1j+CUdPGFQzTRIFMUCCTy+ieHGZ0by9r3/or7v7t3zJw+jSBCxcw9/TQajQx9/dR\n0+sRK1WSq6s4hoc7qrPVOrRETN3dlAI79EtJ7L3DvJPM48pkMJv17KxGmZ3zceuF1zE5LEwc9bO9\nHCaZjvHQ8XFurDd4/eU1jjz6ENfevEulKvHoH/8bmtUawwvbqI1G8gEZbnOC1MJ1AuEguXCUqROz\nTI48xuL587S7u0n9+McMfuEL1PJ5NBYLxXCY3lOnqYSDFCNhBk6fpm7u5eLrK9SaKUo7MvbO7Ee6\n8wYKtZp2u41Yq7EdCHTaM/U69XyenXPnMHV14ZycBElCrFZpNztJhyRJ1HI5uuxyHntskHC4hFwu\nw+s1PHBb5tPeP4FAngsXgjj1TWIbIaIbUeo+B/4uH6LYJtGUqIs/TYqEdp0eQwnHgXEa4Q3kjTIm\nnYjZZaUQqdKSZDREibbBwfrCMq7xUeQKObFMgXYiTvT6NXQOB6NPPonPbMHY7eb5/+82OiUIggQK\ngci9ZUaPztKWK4kWlURjNfwjLdp2E82WjHqhQL1YpFku4f/vTlHZWqBUbuIZGySfKpAJZ2je3UGw\nOvH26ej2qNl/sJtoKEPPoBWbaQ8To33IMiFOnt1D+u2XMHvdRBZX2HfoCKahPZRqMrB6qJocNKWO\n+J/SYCB8+TKNYhFTTw/JahUpEiGzsUHX/Hzn+mi1u95X7yU1Kz5CbfdX1Zvmk7ZoPml8mUzG7Ows\n586d47d/+7c/9vhPG/+zHv9ZJiMi8OgvMkA2W6Xdlj6SEKfVKtFoFNRqItFokXK5k5jY7Try+TrX\nrkV47LEhPB4jjUaLkRE7TqcelUqBTlem1eoY41UqDe7dS7C+nmFrK8va3QgT4704tCIUUzTVZgrR\nIntOHSV4b503/vr7uExfQR1ZI3r7FmKlSqOQp9kEtdmMf2YK98wMWoMeWavB1sY6Bm+VdGwDxcQo\nGrWBO997kVuXA3T12vF3axnf14/GI2fI0aRUKDA/JHD8oW70hQDxV64iGIw4xsZQymuUigViV+6x\n8vzzKNRq9E4n5VgMZDJ8M/MoGiryWxtMjVnZ//AABnkZYe8hDPuNhNMN8I4hdIHTY8I/2/1zZbt/\nUZAkifjCApVkZyGt5fOklpcpRSJYBwbIbm5iGxr6gFDTu2PXXnuTxNI6a2UT2RJkIwlc3XYeeXSQ\n6d/zEL1+HUkmZ+7kaSwjo7SyMWrFIiNf+hJd+/ej93ZRiETxzM5QjIS5c/ESLaUapVaNwdtFo1JB\nb7N2iJIWMyiUSJUCNpsOl13D8js36d2/D5fXhN2hp/vYFFIhSWX1DjaNEeNML7VYgIluD/19Nmzm\nBrbHxzG77chUTcRMkqkTs2yvxjh+zMrtH9xBIVaR6mX84z143TrCly6isViQORyM/S//K6Ebd7j4\n7I/xToyj1Klx9veh0GjQud2k01We/6tnia11KmAGr5cr7+xwZHov9dXr6Oz2Tgmz3e6oACuVVLNZ\npFaLajaLqbcXuUKB1G7TqFSo5fNEr1/v2NjLZJj9fsZnZ3/pvjWBQJ5ms01LocXS00V8eZVGo0nD\nPER4O4G9bWBho0ZNm2Zw0Ear2SRz/R2qySRTB0fIh0IYvV5Sy8tsX7iETK0jvrpJ36lTNBsili43\nqcV71FQqihubNMsl5EqB6MJtnHv2IvjcHPrqo8jVGuIr68RXt5Ar5Bg9HsrlOsaCHElTxCA0mHto\nnK1NCe/UHmzKCqOzg2hqKWKpLHfevoN3YoiqICewHKNHtc6eI4/Q54btYJmqqMBgtzE65kTelWbA\nnCdfSbL2zH+lEAww8/u/i6TUE1oNImqtSL49FLMabEqB+cMTZNq1TjJZr2Pp70djNlO6n2wUo1HE\nRoP06iqpxUVyOzsYu7pQ6vXUMhm0dvuvfCXkZ/Gd73yHp59++pcac3Z2lrW1tV9qzM8LPi9tmn8R\nvJuNVSoNbtyIsrOTp1JpotUq8fvN6PUq+vutu4JOBoOKyUk3ly4FKZUayGTg91vYs8dJPF6iVGpT\nLjdRqQRsNh0+n+m+ZHwLm82CTieg1QrEYiWWl9P87d/ewenQcfVSkEvnt/hv/vtDHHr8AJnNHVIr\nW9Css2fERMJnJ5Fq4DdaKewEaCOnVigxcPIY7WqJrTfeIr68jrxVwzY8QjEUptXtY+zUMVRakXS9\nyaGvnsXtX0TdKuEc6qews0Xy3l0ePXiQaFpLz5CX2Ms/4N5rL9Os1dGYjAyePoPJ56WWTVNJJmnV\naigEAbFSQWUwoBCUjI0Nk9jYpn96Ao3VhtJgwtTdzdtvbhDfKoJShbJcQyaTka8UGJ8QEYQHJ5R+\n2Hx90jHNavV9WwYrySStep1yIoFteBip1aJRLqPU62mWSrvHae5LYidXVtHofVy+HaMuqcklRcrU\neOXHyxwbbaMyGHDPzKCQiVDKUEkmyW5u0iyVCF69hkKlwdTXy/VvfJPRLz1Buy2RD0cRlALdRw4T\nvHQVlc1JIRggHwgiU8gZOnOG6FaUuTOHWb+xiNXUYv6pYfrnRrn9zH8lux0kn6sS3YnjHR/ikT/9\nD2y9eY7QC2sUTCpMPT3oLPtpygTSy6us/f23aYsiE089hVuRxdpvpmnx0kgnqG0tIRw+xNDZs2yd\nO0+k0mblJ69iG/RTyJUI3w5wyKjDOzlBNZUgk6pSKnQM1wqhECqzGUN3H5LTSL/P1PE6SqVwmUzI\nFQoa9XrnWsrlnZ0196+vQq1GZTQSu3mTzPr67nVP3r2LUqPBOzf3sef9590LD4JcqUXvkaOUkhms\nZiWBnTQqjYaB2YNshUvcXV3iyJEe+i1VWvU64StXCF+9in10FIVSSSXVsZdo1YtItTKpOwsMH5hH\nIcjIbW8jaLVU0ymktkRmbR3H8DDF7Q1alQIL3/42MqWKid98mlIkTM/sFLmdLSJ3V5FV6oxP99Es\nF9G1KowcG0Kv97L2o+eJ/NN5sgYNrokpBo/MEri3hbPLicPWR9eUH+eUjds3gkSycuT1IoN9VmTN\nCl02C5FLb6I16uh7+ATlZILUyhq+qTGsQ0NUDT7aOhs2pRyzAarxKDqnE7XRSL9MRjkaBcCh0YAk\nYfR6Sd671+HaSBJyQSC5tIR3dhbP7CzW/v5d1+BPO0+fB85DvV7n7//+77l69eovNf7s7CzPPvvs\nJx7/aeN/luN/5ZORdLpCItGRjHe79VgsWpaWUiwupqjXRZaXU+TzdebmvPT0dMiqjzzSvyuoND7u\nwGxW43DoEEWJZrPNzZtRnE49fr8Zna7Tl/Z6DYRC2l1lyHa7zcCAFZtNRyCQ5+rVMGaTEo9TzaNn\nRwkHM/T1GjBV11h640coBAGFRkNwaZXRLz5GWVLinpqka/8+UotLKI1GfPv2k1q8h9ZmJXblIgq1\nGrFWY+jMabav3MQxKyDKlPSO9SHI2piePEN6O4Qg1WgLavRGkY2/+X+xjY6iMEwTePUnNCsVavkC\n+e0tlFot47/+a4jVjty1ymikWamgdTgQtFoc42O0JBl9hw+gdPYSTEuUEgIjljoyQcXqVolaTcRg\n6HjE9PdbHrhv/3HxUa6w74WgUqHU6ajnOyJY7wotqQwGJEmi1WhQTaXoOXaMUjTaUUN1OHCMjRG/\nfRuL3cR2SsnqappcpozBpKWQLuJ396KxWEhfvk01k8E+MoJcLid06RIyi4dybodMOINeK8czOUG+\nVmfxhZcY/+IZ7MM5lDo9xVSWwTOnadVKNHJpTF43vn3z1IolSskY7tk5HHYVG+cvEboUxmiQI9Wr\n5PM1jAaBqs2AVquilY6RXrpHZnmRXLuBweNBQMQxOY1teJjQm69i7vKQDwQwmFSUwmEK68uUMjmK\nSPQeO0q9UECU5IiZLJVMZ4eYaY+FXCpPcCnA5BNn2QwGMPu86OwRBI0GsVajmk5jHx7Gf2Qf2vg9\nkvfuYRsZwTk+TjEaRWOxIFcqce/d23GLVSiQ5ArUPj/ZfIPY2jbCewwKAbJbW7j27kUh/PKWnr4+\nC+vrHWHDjMrBsT/6t5S215EM21gHBnjpxWW2dvKMzY+QiempLq5j7O6l9/hxEnfvIpPLaZRKiI0G\npp5uUlsh3HvGaDfrWD1OCrEI7qkpUktL1NJpTD3dtOoKNFYr2a1N5IICqS1RDAZZff6HHPqTP6Ge\nyxK8co2KqKd3fJCVHz4PbRHB5sHdbtMoJhEKMeqlHC2ti0IowNjRk9RrLWLrAQxuF0b/IDa3lf7h\nJsbbNwhubJKMKune38d2LIJJ2SK1E0HndDD6G79Du1FHqZBI1zU0JCM7oTLXL+9gNws4WxHmxvQ4\nHBp0djuWoWHS+RaNlgyDSYOpu4edt97crTCq7m/nlclkuKenH0jy/1cJP/rRj5iYmPjUD+aPi5mZ\nGf70T//0lxrz84IHXRGO0REpWwQeBvYBN4HXfjGn9WDY2clx4UJwl/ehVBZ4/PF9uzLyhUJHYfTd\nY4eGrIBEKJTHbtdhNmtQKhX4fCbqdZFvfesW6+s5bDYtuVwdh0O3682iVgscPtxDLlejVmsSiQTx\n+YyIYgutVkm/34TVIGNlKcHQoIWHvzqAVwyw+eLzmIUakXAehVKJq8tNZmOd8TMnidy8wcRTv4mg\nUSMXBHKBIJLGRKNawzE8gChKNFpywk0lC+9sc2rvEeqlAjqHlVo8RvbKOQSNFp3VhF7e4Nbf/S1y\nhQzD/ZJyNZVCYzYjiSKSBPlogrYEMqUKncPJyJe+RPjqNSSZDNfUXtzHThGqgMfo58Uf7rC6WUKh\nkHHqVD99fWb0+o4QXK0mYrPpGBtzfCqxqQ/rLcZiJRYXE2QyNZzOjinbex2E3ztGLgi4Jiep5XKI\n1Spam42WKGIbGiJ67RqVVArb0BAKpZKeI0fI5+uoVHI0KjlIEo1yCUFnpJQt0mqImB1ejhzrRZdY\nZPviGum33kCs1XCOjzP/R39EOFQgtJlBTBZIpevULHr89QamoRHSWwFS1Ro6UaS4toqgj6BUqbCP\njCEXVKh0WjRmE5VkCqPJQLuQ5uJffhNDdw+JopzuaJx6rc74sVkqiSjWbi8FnZF6No3QLKHTqVAb\nbVQyWSI3bmDs7cO5Zw/9J46js9tQG434Dh5k+bkfUslkkavUWIZHUegtJNa3oN+PQybgGhmg1ZbR\nbtRQG02o9ToquSLmgWGSdxfoG/WxtRRCoVIh6HTYbVo01SRam42+48dpFIvcu3IFbaGA1Grh3beP\nkS9/GZlCQb1YJlmUsZqSI19Ikd/MYRCadHebdu8TuSB8aoGyf+7++TD09Jg4erSHxcWOx5FC1kZS\nallZzzHilqPQtXj0sJPM1g2yF9eRGyU0HjuSJOEYG0OSK5Cr1DQSabKBKFqXF5nFRv+YH6xOlCIY\nzVqyyHCVqrSbDZwjQ+hcLlqNBkqdnpHHv4hcJVAt1cFgJXhjhWpNZOLEXm49+13Wz11EY9BhPKBl\nxKwmencLlVZDU6tF6/ZSaOlpRcvop49w8PhxzDoJZCLZ5Xukb29w8R9fZmBqkJH9Y6y//gpqrwWt\nUUs+VyVTTCBcu0k2XWJg/yQlVKQqDWqVJhMTLu6ev0P3pI10Jo8sE6I9ICIMzRMqlckVEhgaVpTR\nKq2fMckDaLdaH2iBftJ5+qTH/yLGP/PMM/zBH/zBLz3+8PAwWq2WbDaL1frJpCw+D9fvF8UZ+d+B\nk4ACeAM4TkfG/T8Bc8Cffeyo/wJoNlvcvh1/33bBUqnBxkYWpVJOq9Umn68Ti3UIcz09JrRagatX\no7zyyiY2m5Z9+7zMzXXh85lIJis4nXoGB+0kkxXK5cb98UUMBhUbGxm2tnLUai2USjmRSIALF0pM\nT7uZmnJitWr5/rMLOOxqVi/eJrGq5zef6EWqValGgzhMZrL5CjqNgMNvpbR8k3Y6jM7uIHzjFtVc\nDsfQEPHVdfoPzFBHg9qkIZ6oQCpPSwKUaoZnhlj//j8SeOcdpDa0RJHJp76KQqmgUSphdLuoZDId\nQzqrFYVSiVwQqDXaGJVKNA43t37wY4aOHKD3+Anc+w9h9HqRWxz8zT9sUKHBFx7vJp5uotMpUSrl\n3LvXMRA8dqx3V55doZDRbv9cBf9dSJJEIlGmXG6i1QofqaJ67tw2uVwngcxkqqTTVU6fHvhI6XBr\nfz+CRkMxEsE1NUWr0WDn7bcpJxLoXS7sIyOE18NcvF0gnW8hVkr4zCJ+p5rc1haqAStTM10Igoy9\nE3Z8pjzRe2uoPQa0PYOks3UCKYlJmZYWCmRqDRqbFSmSoioqqCtNNBtFrH09YPUQCm/h8A5i1EmE\nLpzraFl0ddGq17n5139Nbnsb/6On6Tp4CP/cHlqNBr4hJyargVSrSTmZYu2ty+gNasxHDiGXy6hn\nU+itZiK373R2/TgsBC9dRmsyYe7rJXnnDjtvn8c6PIRtYJDp3/s9coks7VaLRChGZKdAuW7FZdDi\nPvIwS29epsfhYLTbwuSJvRQjEYy9fVidFg73WnE5NCTTDfomB/Boq0ReeR7/yZPQbnck3+8b/LVb\nLeqZDIYjR9CYzWxv57h2fZNWS8JoVGEdGCRw5RoWiwajUY1MocAxOrqr6vpxUak0iMfL1OstbDYN\n0kc8BAuFOhsbGaLRImq1cF/csMH4uAOXS09te4XqSowTT+xDbnFRa2ZZev4cdRGUkhOFrkq7lMZq\ntxO/d49iIs3EU0/RbMsJ31lC5ekhkZcz0DPCq//5W7hcetyz86QFD6NP/yH2LgeCSomSOhqLheXv\nfY/IzVvI1Fr6T55CJghk8y0C6zmGH5ZQiFXMdiNKnQ6jSU0znepU9poNlCYzK3dCFCowZuIQHbsA\nACAASURBVOtifXGFxWqNwwfc5K++jv/M46jEEvO/9STBYImWxoTGbEGQt8jHEqy+eZXxEweQ1Sv0\nDPvIikbevhRjcatKtdZmeMTJmV+bJ3/zAue+/QKHHxqiqVCjyF5GLbZoZsto5RLZnAGbzU3zPWJm\nAJa+PoSPIK7+qiKZTHL+/Hm+853v/NJjy+VyhoaGuH79Oo8++gulVH7u8CDJyJPAXkAFxIFuIA/8\nX8BlPqNkpFxuUiy+X0hJrXYQj5cZHXVw40aUUqkjBS6KbUZH7bz1VoArV0IYjRqy2RqJRBmFQoFG\nI1Crifh8Zq5cCRMIdMr+a2sZNBoFg4M2XnxxnVyuSjhcYns7yyOPDFCvF6hWmyiVci69E6CFDKXU\nZHMlzJGTe1hfTzEwuYfotSsYFQocNiMms5qeqRFygRBdx79C4OJlln/yGpIEtv5+DHYbhXQRU28v\nxUSa4YePIhls/MZ//D3MLgPlwDabr73ekeHO53CO72HlhR8x9dtPY/APkU6VyK5E8J98hKHHH6cY\nCqHQGdB219nz61/l3isX6TpwGL2/H53PRWEzQrWi4bVLBW6tNRgYcRGMdgzqHA4tExMuajWRcLiA\nViuQy/3UF0aSHuwXbrstcfNmlHv3ktRqIiqVgpERGwcO9Hzg2HcTkXfxbhvu3WTkwzLu924jzAcC\nVDMZXBMTHQVQs5vXLmVZWgtgNKoRiglubC7zxaeP0DU/j76p4uwTE6iqae69coGBExMUtjfR4CVe\nUdNWCEgqHZFoiaETR1C//QZiTUerVqPckCPqHWjUKgxj07z12hrbV5cYO74fD1GErUWQ2ij1OjJr\nazQqFRRGC1JbInzjNuqufm69chWbt4HSaMZ//CEK0QRqs5muqVE843uoVWq4JyaJ3byB1Gqh1qjw\nzsxSiEZJb6yjs5hZ/O53aZQriI0GYqVC7NZtXAePUogmyOcqjB6d48qFdc6tbzEw0cvM7zyNfbCX\nQKhCWSbHrKyTunkN58wcckHAaFRhcDoQq1WSS+sIajVqk4l2s4nB49n1IpErFNQKBcrxOGqTiXi8\ntOu+Wyw20Dn89D+kRt1KY3AYsI+MdAjFnwD5fI23394hGi0hSaDVCszPd33guFpN5Pz5AKFQgUKh\nzvJyCqdTx8GD3aytBRgasjE/4sRg1JC5e4uqY4TK0ha5nR2G5sagDTWFCZ0Eiv5pGrI+hse6sdrV\naG12hh4+Rl2hZzsO4XwZa08XktbIW+ejiOUi4jEb2e0aoe0Q//F/PEjkhR8TvnEbuVxGGznldIbs\nxiaOiQlSoQjFZAbX2CiFzXUMXhdSPEas0cD/8HFufevb6PpHKFXrOPw9yMwugj98kVK2hEkYx12u\noFbLsYyM8uqzC4S204CErirHKZegJaK3mpCqRcxdHkLRApeDO7z0/F1kBhtNmYr1rQL79lqRNatM\nnNjHwNFxdE4nl7/7E5RaLU2lhY3QNfpqNfq+fBKdRkExHAaZDHNvL86JCcR6nWo2i0wuR2e3fyDZ\n/FXjjHz/+9/nd3/3dzEajT//4F9A/NHRUS5duvSJk5HP+vr9IjkjDTo7X0Rgg04iAlDlAcztflHQ\n65UYjeoP+JYYjWq8Xj0zMx7W1zP4fCZMJhUKhZxAII9G0yGdQmfBjMdLpNMV/H4LwWBgNxGBjkLr\nzZtRWi2Jl1/eoNVq02p13l9cTHLwoI9aTby/8DWQA1qdEptVR6VUJ5tsoB4w4j/5MNVMFrlGw+hj\nZ2m25ASv3aTelNi4voJ9fIJGvUEqWabr+EmMehVmlw2dw46EjLU33yF64U1WggEGTxzrKIWq1Zj6\n+mmUyx0vmz/6b1F3D2LUZGnLBN7+xzd45I+ewjU5RaMlQ6XTEF8PgEqFaHQRKqpQWQbZUZu5dilK\nPF7B53fichtJpcq7lYuOk68Fq1VDs9nercharRrs9gfbFZFIlLh7N/E+M8KlpRQ+n4m+Pss/O1aS\n3m8v/8HPO1tHJUlCa7XSajZ3d9cIrm7WN6ucf22NqihDarcxqkRcejOLN7exzhjQa7UM+Fzc+uEd\ndPI68rbI8L49xONFVAYtwWgVj9WAyaojW1fR88jjFHe26D10AIXZSbMlobfb+btvnKOQyCI3mNm+\nuch2PsmjDw2TuX4BvdNJ8MJFvPP7yMXSVGsipWIdszpLCxmbt9YppPI88T88Tf/YKG1kFGMxNt66\ngHV6HyO//huozRYMPRtY+gcopNOdB1s0hsagR6FSodboUZktNCpVFAoFSpmIXGpiHxoi29DiH/Ji\n7/aQKKsQ3UPcDbdYe+suI/snOHNygpp7ksubJQqVFl2uHoYkFbJsBI3JhM5uxzk2RjmVIr+zs3vd\ny4kE5VQKS18fhUgEnWngfXMTTzUwGr3MP3YEh6PDL2i3WpSTSZCkjvfNA3JHNjezRCI/JSFXqyIL\nC3G6u42YTJr33GtlIpHOr/dcroootgkGC0xMNFCpFASDeWZmhmmLItlIknKsSv+Am3rMh82mIpst\n0DbZWNlukHHWCCdVXLt+lfEBHebkAjqvF938I6yERVq1Nn1HH0Vv1LDz2jL2njlEk4lzL6yg1qjY\nWgzSkAT6Tp1CJjZoyVSIChWRpXWkPSeYe/JRlNUMereH2T/8Q2K3bgFg6unF1DeA76GHUTncMKRC\nVJtZuLxOWy7QarURW50kvFkusxOXU8lkcTs1ZMMx+uZGqEZDqIUySrWSrrkZmpKcukzH+nK8Y9xo\nBbEpISEnk28wNdlLcuEW6dV1Apeu4PA6aCn1xAMVNBqB1OYOtJr0HT9OvVAA6Oy2SSQIX7pEJZ1G\nJpdj6u7Gd/AgasMnV13+LFGpVPjGN77BxYsXP7NzOHToEM8888xnFv+zwoOsBHVAB1TotGXehYXP\nMBlRKhVMT7splRrv44xMTo7QaLSpVpv091vvE1Cl+667OpLJCuFwAY1GidGoum++J8fnM2Iyqe67\n27bv63QoyGY7rZ5Cod7hXeTrZDJVjh0zIAgKTCYF6XQFg0lLNJTBpDYhtiCXyjP9G0dIvPodxEYT\n/6lTqOwuZBod5WIbhcVJIlqgkMySjSbpmd/LhYtRjig1TB0cRmPQkbyzQHYniGJ4CGO7zvJzz2Gw\nmfDs28+d7z1Hv9NFo97E0NtPVVSg6J+i1VhGbTZj0Bt551KYPeMOlK5u7kbbeAe9lGUp7uwUEYxG\nbAWRUklEFNt0dRloNts4nQ10Oiebm3nqdRG328DYmAO3W0+xWEevV6JUKpid9fyzrqvvRT7/frM6\n6CQYq6sb9PXNv+/9d/1z3oXRqHqfH8Z7+5GNcpnojRvkAwGQJAxeL86JCdRWK81SiUxFIJcsIlfK\noSUhSRLJWA7nHgf9Y07aYobt7W0GlUrmHpogfusm5eXreKenaepTFAJ5hme8DE35uXk9wve/dR69\nUcPcyRkspjb7D0gYlTUWFzu+K2qbilS4jqRoIpbryMxOVAYj1VKFpthC0BnIxVcw+odYur7GVx4/\nS13tILG8jGd8lJbeSb2YxepzUYuG0I8PYDMqqGVzDJw+i9Jxi+TSCjtLEbRqGa7hAdQWC7ViGc/8\nPJn1TVrNOr59+9HYHVSDGXRaEy/81U+wDznZc/wkN95IUrwcxemxUBUsJLcClE9P8NLby7z63A1K\nlTYqlYInf2MvXz07SmttAfvICHqXC4VaTcpsJpxIoK9UyG5sYB8ZoVmrUQiFMA4r8bqtROMdjxm5\nXEZ3twmrVdfRXCkUCF+5QjESAUDvduM7ePCB7qF4vPyB97LZKOVy324yIkkSzWaLRkMkm60Tj3eI\n7SZTx4VbpepoBdUrDVQGA5a+PhLLCRQeK2OjVjZuLtNuSpTlTTR2H+tbZSSgEovw+q0kv/b0PIvh\nOqtffwHT+DTb0QbpPiNnvzhE25Sl1KoSuF0lGsqi1WuoN32sL2zTzsbpHnQTDwSoVtuM9gxRSJWJ\n7mywd64bhclGKRyi+8hRKnodfX4/5/7zN0ktL9N34mHCjS7SpSwiSjRWO5JcxdhcP0bRRDlfoooL\ny8AgUqWAWq9hNQLdg4NMzRzHtx3E2u9n5+4qgqTG5jR2ZA5qZby9fVQlFaMjNkyZIHWbiWKhhqmv\nl50b9xg9c5ql+CZSVaTPpsVsUiGTydCYOyrIbVEkev06pVjsp3OysYHGbKZr374P/T/7IPgsOQ/P\nPPMMp0+fZugTVvA+bXyA3t5eLl26hPQz5O9fVvzPM2fkBPBubf69yYcA/P7HjvgviL4+CwaDimSy\nglwuo9FI4/EYqddFdDoliURl13xrcNCCw6Gj3W5TqYhUKiI2W+fXvcOhQxAUTE97CAYLVKsiWq3A\nwkIcuVyGTCYjHi/R1WVCrVZQKjWw2XQ0GiL9/dYOIdah4+QXJohGckw8fICpITWGegxhYBDXnnGa\n1To777xDu1LB89AjDD3+BNGtCM5eD8FrN6mgw9erwe3SYfJ4OPe1r1GMRMiHYxgPHuDQrz2J/+gh\nqrk8PUeOsfLjl2k3mzhGRvA9+kWCJT3BjIB79jgyQUk0UqLVbKJ091JuaYikCtxbzxAOl3A4DIx0\nmbh9O8bDD/dz5EgPjUaLTKbK2toGCoWcL3xhCIVCRihUYH09QzRaQq2Ws3evh7173bvE3geBTqdE\noZC9r8Ihk7G7U+m9OHy4h4WFOJVKE6NRxcyMB5vtwyswycVFUktLu6+zGxvIFQr8x4+TuHOHWFii\nKbbYe3iMi68vIbU6CcHQWBf6WoiVV16A/n7yKhW1fB6FXEb81i2SKxtM/PF/QD6uQd6skCrX+cHf\nvEKj3kSQS4QXNyi67ExPOoisXsXoP4Bj0E8huYVvbBAqeVoGAZPdjPnIURRaPYNnz5IJRuk9epRG\nW8HoaR/3lrMsvLWM2awjv1OlK5bFLGUR+vcyNTnNlee+z+ZPXkSSwD4+jm1kDLlGS9dwD75jx8Hk\nJK1Usf8//R9UV2/SajTQ+fx4p8ZZOneDnet3sM+IuPvclKtVQoubjIwN47LIWF+JkAhmmRvtIpko\n88bLK+QrMur5EnWViheeW+HhR8eYPHMGo8eDXBDQ2e30nzxJ+fJl5OEwvceOoXM6dytR5e1VDp46\nS6bqoljsEMB9PuPurqj4nTtkNzd35yu/s/PAfAOnU/e+qiVwX+9HST4YJLW8TKNUQj0wRSZdYXUt\nS6FQJ52u0NVlpKfHzM5ODqtVg9mmp+ZwYLXHMdhqtCUZBl8Pmq0kGoWAdWyESE4gGU6jVUM10/HH\nMvcPErl0nkapglbo8C2q9RaFZBarskx0J4LbPojZbkKpkFjbLDHxpbOs/uCfyG1somhD7+w+DEOT\nbG6XGe81Ezr/Nkuvvo13oIu+2XFawxME8hrcZ34d2+g9arEwB4762choSexEwO9jen8fXSaRfFBF\n3/wUlZ02qytJvEM9GEw6SrEoLUSauTQqalh7vMSieUpbAQ4dnCBfaBJPlNEZ1Az5XYz1KLn94yus\nXbhGqVTD3e2g58B+qpUG3X47+obI1OExjK73O2/XCgWq6fQH5iq3s4N3bq6z5ftXCJVKha997Wt8\n85vf/EzPw+l0olar2draYmBg4OcP+FeCB0lGah/xfur+v0+D/xuYB24Af/IgAxoNkVKpgVarRKtV\nYrfr3qPk2DHLUqsF9u/3celSiFyuhkrVqX7s2ePA6dSztJRCqxUYH3fQ3W3c1R3x+Uzs3+/j/PkA\n8XgJpVLO9LSH9fU0J074WVxM4HLpmZvzcvz4JLlcjdFRB3I5pKI5VNSYm3bj9JjoH7HgUqVI1svU\ni2UWn/sh6fUtHEN+IleuUNNvI4zuJ19TMv7kE1DKYLCZ8Y4NsfHySxQiUdotaNTqNO7dI+Cw4//C\nE1z9p1fo7xvnoa/9nxg0MoTeMZ5/bploMs7yrQAGq4HH/81xgoUGA4NdrCTVXLgQ4ORJP1pdlUKh\n4zhaq4kEg3mKxQbz8170ehV37sR54w0FCws7OBxaJiddxOMlens7rZR6vc3ycor+fgsWy4MLV7nd\nevr7rWxsZJCkTiLS02Nmdrb3A8d2zLhMlMsNDAYVKtX7b9F3M26x0SD3IX42xWgU79xch3C5mSby\nVoAxhx69TsHS9U1sHhsnHu5m6+9ewXZfwl6h0VAPBjH6fAgWO5lImnpwE7X/CPdeuonSZKVeayIo\nZLs8CZPbTjZXI7UUZHr6EP39Vu7myuRTMQb2jHH06CO4VEUKKTvliohxfD+ycZF2uYDFaaGxEOHF\nv/weOpsNhcOH2WnGN+wjGrFQzCjJrK/QShQ7Yl31KrVcgdTSPdzThykrrbz2TohaIYJzuB9Zpc6h\n00/TUN0gnGkQTxpx9w0ju7WIt9dOuhDFhIaHn5wn09Ty3b98CdHowd9vZnLaRyjXoF0pIsjltHV6\nmuUS+XSObK6Kpef9dvF6l4uDX/oSsdu3Sdy5Qzke/+mHkoRGkBgb+6BDq1irdbgGHzJfD4KBASvB\nYJ5ksoIkgVqtYG5uAkUtz8abb+7a1je0EfpccjIpAbGQw+xVMDPvot1q4/Ua2bvXjU6n6uzCymYZ\naiuoZvNEl9ew7NlLq9kGk5NXvvkSXXMz2F1O8m0Jk1WPIGuRy9eJxwpIGzHUDglBanLphSVm9zpp\nl41oTRIH97mRFErylQbi5DSH/qcRxNgmOiXY90yQbwhgEXEJGV57dpFWU8TQ3UPFN8et1zdR25tU\nKnVcnn5OPH2WVLLE1LwbzdEBbr9+lcv/+DImi5bpY1PcWSnTPdrLo7/1ENcuB7lzK4Xf7+PEySGk\nZopEI057M8XAgRlyBRGhmubMmUHSNRUGu5kTDw/QCt8in8qitLsw6Bs0ZALJ7RAjMwdpxrJ0j3TT\ne/jQB5ILQaX60DabSq9/37G/KpyRv/iLv+DQoUOcPXv2M4n/3vEHDx7k0qVLnygZ+dfMGflFYQ7Q\n09md8//Q2S587Z8bEAoVuH49QqFQR6sVmJx0Mzpq/9BSls9n4rHHhshmayiVchKJMuvrGcxmDadO\ndezKV1fTKJUC+Xyd6elO28FoVDEwYEWnc5HPV9nYyLG+nkWnE3jssRF6e82MjzuYm/OgUimo11us\n31xDVY7R5zQjKCvE7kVwW8bRl5bYfP1NBKWC+MIdJr/6FUzdPvQuNwZvFyafh2oqRTYYoaJQcPcn\nb5FeXkKr1yCXyVBZTaiTOlptiVy6gClTReHs4aWfrGHxeTl4ZobrN8LcWYjQ3W2mf3qQXFXBpesp\njh3rw2hUkc3W2L/fx+3bCTY2MiwsxAkGC3R3m/j9358mkSixvJzEbtexvZ3j8OFudnY63JpSqfkB\nye5yuUmp1PhYyYhKJXDkSA+9vWbS6QoWi4bubtNHuu+q1cLPNT+Ty+UoPkTbQCaXIxME5AoFvf0O\npvNNlpfT2D02znzZhMdQR4quUYnHUer1lBIJDG43rXodhVJJ3+kvYA7FCF27TpdKx4FHp6lVGrSe\nPkA01CHpeVwauof7GNzTC5kptHoV+/0NRr1d3LvVYu+8G72YYOG7Pya5voPg6CJe0WKdO0xG08MB\nt5uZ091IaiPx7RBk4+ybsXDu+WssLqXoPTBP7eo1pGoL//A8rVKFpk6N0aLHPtDL9mKRdlvC6HZR\ny+bJxdP80z8sMOBVs371JqlkkbH5Ieaf+CKJWJpyOMTBL5/EYtVw8TtvcfTUOAaLAavdwLWra/TM\n2Zg9Mowkybl9K0psp8rQuBezsVO5qpfLVJJJ6sUiMqWSptKMTG2k2Xg/X0vncKD7CKt4mSAg/5D5\nkisfTKfGatXyyCMDxGIlqlURp1OHx2MgePHibiICUK212L56h8k9I0x0O2k1m6SCawjjJk6emUSt\n7qjG1rJZ7ON7kJx9tMsFpHySUi7Cxt0QNn8v+09OUNVYEJQKho/OM+Rq04xsojHoMXRpyZUlBvwa\nQrc3GJx3cPFb/4R/0o9WkeWhyXFso2O8fW6Hcz+5QyOwgqfPxcEvP0Tl7irlRALDvkco5VVoewfx\nzU3jm5vhe8+8RTlfQu9MYuz1E061eP3VdWRaHQNKB4urSdQyD0d/qxdtM8fW7TUq5RrNRB+Cf5JM\ntobdbsDlNnDz/D3GegUKkSjZrBWt1cLhrzxELluj2ZYha7fRaeX4esyEVquYXU7isSLlSotWroZg\ntqLxduMbmWF4sgeNWfOBOVEZDNhHR4ne6JCroSN45xgff99x7VaLciJBvVBApdejd7t3SdCfF+Tz\nef7sz/6Mt95667M+FaDDG7l8+TK/8/9z915Bkp3nmeZz0ntvK7OysrK8r/ZoAzTQcARAgqIROKKW\nQ0qiViFppdXsRGzEXmyEeKEJTcRqRzEbWuliZXa0GlJDUhRFAiA8iO5Ge1ve2/Tensw8eTL3IhsN\nEY4AaJqc9yoz6/z1n8r/nDrf/33v975f/OK9PpWfG+5lMHIMePHO65eB43xAMFIs1jl/fveuFbko\ntrh0aR+zWUMg0FX+e2etymDQ3H3gdXdT3W6QW7cS7OwU75QBBBYXM3Q6cPRogN3dAs2mjCDAwkIa\nl8vIU08NkcmISJJMT4+Zw4d72NzYRJZMGKpRvPI++9om0ZU1NAY9Dp8DnyrPzvV5LH1hkOrMPPM5\npFqNWipJq1JBa9CxeuUi688/j0KtweD1YvGF2Lh4g8lHToLOgCyKWEMh2sEg7uEJjOEhzJogq/MF\nbt+qYwgUuHAti+Top2bQkIqLaHRqPB4Dk5MeWi2Z9fUcXq+JhYUUS0vpu99VJlNDljuoVAK3b6do\ntdq43UZisV18vgB6vYpisU4mU70rEAfdHek7A4VKpYlSKXygAJpOp2Jw0MHgoOPuZz+JN41CpcI1\nMkItk7n7j1BQKHAODaExdAMolUrBoUM9hMM26nUZRaNM7uo5lDod1r4+xGyWdK2GvtlEa7Ggs1i4\n+vVnqaSz6IQmvukpPCEf+3PX0Key9Bs0TJw5xtKlJaJXLmInS9+gj53XfwjVHKlKhdlTpzHqWkTP\nXmfr0m1a7Q7KchNPZJDs5iraMS+NaoPVUp3Hnp5i92yBym4KWRS5/uYq1ZYGczxNeHyci6+eRR0t\nk1teoi42OfXMI4TaTXLxPLGbt9DotDgHI4gtBbvXVuh9bAhnpJ+2LktdacU8e4za3AJhZy/KgQA1\nWYOxL0IuX0dvs7G4VSdkN1C++AIjgky+ruTXn5ni6i0np073MzDoIr20RHppicSNG0hiHXUgQlJt\nQeMYJuAbQ1PcR6OQMXo8eKen37fsolSpcI+OspfL0W51OUGCUolrdPRDr313s/D279/e3obmjzoB\nq9t1WrUqe2v7NMsVDCYdgeEQlUqTSxf2cNuUEF2kFt3rdsflwHt0HF3vAHp3GVNvGCSRWb8flb+P\nRr2Fsu3A39PlU/zqmJVzF5O88eoqBr2KIycHUBb3EcsVVla3UBXmeDJoolPpIX3hVdooUZksZEtt\nzj13lYcfidDCwtf/03cZHu+hUlcQ6OmlUq5TyuTR2BQISiXpxUVsw2OYxyeYPNhHtiQzMWyhHt+j\nuB3lhb/5r9gcRgaPTNBsylx97gqJmMDwmAdrfZ+t21cpXJI4cLAHi1VH7NoNxEUZ71AYk1pJbCtJ\nvAbphhFDR4vR46J3tE1ibRulzoFzaIhiScJpK2CzdbNjstymUmmi0729WfBMTaE1myns7HQdnPv7\nf8SnqtNuc+2ll1AnEsjNJgqVCvvAAMH77vux9/jHxccZ/2d/9mc89dRTjI2N/UJwLo4dO8a3vvWt\nezb/Lypn5GcFG/BWAbkIfKAbUT5fvxuIvIVGQyaRqNx9wH4QVCoFfX0Wrl9PIMsdgkELU1Oeu2TJ\nQqHO+nqO1dUcoigxMuJiYsLDD36wTk+PmSNHug/o6REThbnrLJ+/Qa9Jg2Won3ZmlyFNifCIAZPH\niMNvIr+/g/3oacSKSM9wH5UbZ9l943U8k5OkE3NU4jEaxSKZ5RV0Tiel/X0cYyJGp5tmQ2LqmS+w\n+fyzKCxWVBNTWGdP88Mrebx9Ply9BhTmOoJC4MTJED/4wQZ78QbZbIORESNarYqtrTzXrydotdr0\n9dnIZmuEQjZkuc3hwz3Icge/30yx2MBu17G+3v27TSbNXbE4r9dIINDVa0mlatTrEvff34da3c1E\nFYt1bt5MkEhUUKkUDA05GR93oVJ9PB2JjwrH0BCCQkF2bY22LOMYHHxX66ggCLhc3c6gzEqcRrGI\n2mgkePx4VxitWkVrNuM/eBCt1cqRX9eTiedx99ix+L28/h/+FHv/ABpZwuXzkjj7GsPTs2xcX0JQ\nqVn83g+Q1UYEtRqt2czeay8x+PgnaNVquAYjrNzeolMTMbnz9AwHCEw7WFhLUyuU2J+X8HRy6ON7\nGA/0o9BbSG6XMaequHxBAmP91PfjCAoB/1gETXicYlOLtceHOTxIeWsNQalGazXgC3bIbmxRLjax\nB3uQDB5WNirkCzqmD48iCTVuL+YQHAGOHLNTzlVI7G+wPP8m1d0trE4LBn8P3uYWf/g/PYIj6MWk\nFIneWGb9hRdIz89TkzVIinm00zP0zNhYavQwPHiI8QNutHfUOD8IzuFhlBoNuY0N6HSwRyLYfsKU\nsDUUIrex8fbOvJrh2OlRlrZE8pUqoYkIi8t5tFsZ3ANNSrvbTI076TObEYp1ivEU2mgc38QJrp5b\npRxP4vIFOBjuo7lxi6v/8C36Ds1ie/pTNIt5Ni9eImJ3MPFbkyj0FlwWB9/7z+dRGc3YQx68w1p8\nQyFu3VggNbeAymhC7faTTpQYOjyM3unm9RfPo1JBW6Vm/IGjaDtVKskkzoCLpkqmtBdFZ7Ui5TOM\nj7lQqhRUKiKr8wk6YpN+v5H+w1PsXLyCopTCMDBGYj/G8PAgM4N6Ln79BzQ6DQJWA6XNVbbeOEf/\nmQeJLs5Rj20RPP0Il8+tE3ngFM18BqXYQudwYKrWGDxxEIPFhO/gQYROjWIsTadzJrcCJgAAIABJ\nREFUmGSywvXrcfL5OlarjolRG36PDq3Z/J733VuopFIUt7ex3ynbtFstcmtrdx2+fxGQSqX4i7/4\nC65du3avT+UuDh06xNzcHPV6HZ3u3Vmp/x5xL4ORIvBWFGEFCu884I/+6I+w2bp8BY+nj04ngFrd\n5YU0Gl26ikrVtVrefgd/4K334XCYvb0iL7xwjU6nQyjkx2TSYLPVKZeztFoWNBolhUKCV17ZZn9f\nQS4nkkjsc/BgD1/60jTZrIhSWcTl0FOcWya2vIlcKrIVK5Cbu44l2Etsfxtbb5Dq7XUCA59jw9HL\njdeuU9kv4Z+WsVusOB99nNZWl22+n0x2Teo8Hpq1Gu1AkFSlgs8/SDZbRz8cYuD3fx+3t5eG2syL\nb6zgCuqZm89waz5DZEBBu6Nifr5BKGSl3c7jdivw+cx4vUZWVtaJRrPcuFHHbtcxNaVhd7dEtWqk\n1WojyzlaLStWay9Wqwartau7EggMks2KNBoZyuUKjz12gBs3Emg0FUymrhz+q6/uMDqqZGkpgyDY\nUauVlEoJbt/OYDCoGRx0/Mj3/871+KD1+nH41xG3QqnEOTyMc3j4Q43VWiwoNRqkahWVXk/vyZP0\nCgLuiQns4TAr81G2gZwhQEVlYQQRo81Kq5ij0bFQTSVY/sErzJgtpBYWGT00xPr1ZXRODyaPG+VG\nlHKtgGNwg0ZLQWlvB0/Ix8bcNtlsleHBXqqSgu31JKX1FSwOM0vRbc48NIRNqhIa8pGrdOjoTVye\nrxLyzXDmkfspJ+JUO0ZeupAislVCZzEhWN2o+nWIpTLhQzO4Twxw47k30FoM3TKg3cT4iAXLfQGi\nsTI3zyfQ6HUYLTVupTMMTvfjdet56eYWXq+JFkpaTYlOIYUuv4nG2ibbFJB1NioiGLw+0uspGvU8\npmyOZi6BIuBna6fM6LgXnenHs/4FhQJ7JIL9p0TKC4fDtFsteg4dIrO8jCxJ6Ox2Zid6CEVqKA1j\nrGyJ6IUYCoWCjiBQL1VYmmvSc9qP0VjA6bVit/p4/WwUGQuWARsGl4mbiyn6VRr6jhwkcmyGG3/7\nt5T391B5gmwurqJb3mDgsUcxjozwyd/+JCuvvI4stQnNjuEcGiJQ28XSG0CWOygFGU/ATmgsglpo\n4R7oY2FrjfVX9zh2vI9HHhyF5k3ue+wgr/231ykn0hgcTqYemMHuMPDi2Tjf/Zc18ru7uFxGcv0a\nQjYvQ9P9KKQazWyCySkfeVmglsvQqjdRdmD4iIPCzVXapTIWgwK13UxyYRHX4RPMfP5XKMt6xOw+\nBoeFmnIKQd9LLZej1hHI/fAa5b0dpn/lSfKZMufP75PNilgtGtTFfS7+l5cIBYz4BoL4Z2fftzzX\nLJfvBiJvodNuvyfx9V+v6096XXwU/Omf/ilf/OIX7477ReFcjI+Pc+3aNU6ePHlP5v95j7+XwcgF\n4HeAbwIPA3/7zgP+/M///O5rUZR46aVNEomu1oBW6/qREs07vwC3u4d4vMzNmwmy2Rpms4disUE2\nKxKLlYnHYWKi61RqsWhIp43odCaCwRbVapOFBYl8PseZMxZmZ/1MTY1T3N9n4+Zlms0WqlKVrTdv\nslWtcN8XAji1WnxuF7rhIfKVDnOvriDIBhTKKtVcge2z83z63xxFSsYRs1lsQGlrG2u4j+z6OoZ6\nHUFQorM7SORkjHObjH3xaS4viexsrdE/1sc//dMSi/NJFFodCaOd69dF/H4j09M+otFu+aNalXjx\nxU3sdh3NpgWNpsWLL27yG78xQ7O5d8fzRcDpDHPhQpnDhwvMzHix2XyMjpruaKl0cDqHmJ72UijU\nKZfrGAwuHA4DWm2XY9Pp2LBYlOzsFMjn67RaqjulnhyDg453rcdHff9xIYoSW6tJoltJrFYt4UE3\nnl4v0CVfuicmSC8udm3uWy2cw8MIVi/XbiR59tktNBolNpuZaKJKPVOl9/Ah8gu30BrVFKJJBIUC\nhUKByaimXq5iMuvQu2zoNR3KlTI2lx2t3QmiEr3bi85spD0aIHLiCN7ZWb793D61YpVcsoBS6OAe\nGKBiCiDUSnjCPoa0HlRGM5ZGG61OyYWLMbaWdjB4fWT2s5gbMuWGAv9gCM9IL+GQBWUpjsbmgE+c\nJFYQyKZKjPUbSV86T2n4IH/5H5+n3lEj6MxoVW0ee2IErUZArVWhszuRVErUJj2NQg4x3aAYS7G3\nvIPa5sRgt6L2hWg3fTilVeJr23QApVqDQqlEpVKg0fx8MmHvBYVKhW92FvvAAHKj0W11zufJrr5C\nYTFJdF9PWwJbeAiVRoNKr6cuirQVGrT+PtxmJW2rj7FghsrOKq14BZvaj8rjQa4Y6B0JUd5cQ9zf\nRKXWoWmVCQfsWEJOXG491y/v4vL7OPxbv4GYzlAUBf7mb27x0OOjzH7qEdaudV2kQxN9nHp0go5U\n5eKbu9y6vkdLksllStREiacfGkJ643mOH3LSOh6h7/AMRrOWaKLOpTc36XTauNxmxFKZhmAlV6gS\n6Q9SWp3H3qxw6MAI0ZaXWnQHjVaJ36OnmslRzFZoV0u0qhXQGFAGRtjcKbOtkrCbWgwE3WQyZW7f\n2Ce1sMDGlTmUWi1HHzmAJp4jvraHMFBif7+E0ajBqS6z+so5mmIDtWRBJzRpiSIDjz32niU6tdGI\nQq2mLf0rfpEgoL3THnyvEY1G+bu/+zsWFhbu9am8C6dOneLs2bMfORj5ZcW9DEZu0O3UeePO6w8k\nr+r1ak6dCrG0lCaRqGCz6Rgbc7+nBkWp1OCHP9wmkajQaMisrmY4cMCP2ayhXG7i9RppNmVsNh0K\nhcDIiJNGI0m53EQUuwRNq1WHwaBGr9dw61YSn8+EWpZpt2SqVYl4vkApV0FAplxr4zp4jNr+JvV8\nnpSUYeW5VzD7/YTPnKEci2FwuUnuJPHq9BRrdXzTU6BSE781h396GksggP/YcWJ5UFX38R86QKph\n4KVvPI+srNMQWzQqIkePh6kUqqjNBhYX0xw44Gd+Ps32dh6zWUsqVeX27RRms5rjx3vp67PSaHQ7\nZwIBExMTbl58cfNOFqiIWq3k2LHuDs5u1yNJWY4fH7mTLarz2mvbnD+/i9GoIZcTmZz04HYbkOU2\ntZrEG2/sEo9XMBrVzMx4iUa7qrQfxkDvJ+GMvBeazRY/fGmZS98/T7PWbQKLTAR54rOH6BnpR6FU\n0nPoEJbeXpqVChqDgc1UgRsXYsRiZVYW48j1BoGQjf6QmWxOZGBoDF18j3IshqolcuSZp+m0atit\nGhIbu4w+fIpCpoSuVSHeaWFuQ6slU04kcU1MMHD8EI2OhljNyHJSzd5ukbYoIhZLJFoSJp+fZAmS\n2TJqpwGvz4XTb8frs7Kyvsn8loxap6WNkqMnImxcvkVL7+S1b58lPORh+Hce4Nz/9X+jNhoZPX2E\nR5/8FV5+tcTrr2/z6c+d4vLtEti8GNVV1uazNJsyff0u7jsNTZeDiSMRyukcJkMbk9NBtdWh3NaR\nXr1NJXcN99gYu2+cxdzjI3joIFKzRdPpxBweoFiWGR93f2i9mZ82/vW1oDWb4Y5iZn6zex8q1Wr6\nIm6i57ZolkroLBYsgQCaRh6j1cT8SgGzVQ/1fZb/+VmK0QQ2t53cbhyzz8Oxzz+GKqWgWSogtRW0\nahIbSysMnzjAwoUlLCUtN5a6LbLh44cZGdfz7LNbxBI1Li7c4rOfGeYzvzuCyaBAqGZpbt9iXbRh\nMysYm+kjES+h0iiJZyR0/gC2SD+VRIqRsJtqZg/ZOMD8QgqTUUtxMYPZrKHd7gq7TZwZx+3MYNTI\neKYG0fUHWLyUQ5YVHHlwkoWr15ByLQxmC70HR4mv79ESRZQOP3Z/ABIy3/ovVxgfd9FSaDGrZDoK\nFUaHnWq1ydpSnNPHhkmLVZwNmf39EgcP+invrdEUu+VyxZ1kWC2TQcxmMfe8WxHX5PHQcLnQpFLd\nUpogYO3r+xFeyQet6096Xfw4/Mmf/Alf/epX8d9RcP55z/9B4++//37+9m/ftUf/uc3/8x5/r117\nP1Q771twOPScPBlCltsfqHOxtZUnHu9mUNTq7k72xo0EDz0UplxuolQqGB628cADIQRBgUqloFhs\nMDeXolJpIssdNBol/f12Op0OtZpEPl+nz+9AZTaTT2zSLFVoVioER/tQWuzs7xZor29i8zqQpRpK\noUNxd5fc+jpStYrB46Z3epD8m7t4jx4neHCa0P33Ez69BzojpvAw6ymIZaJUOmbkWBtDLU21IuHu\nM9FsSqzN71CXOqgUHZDVCEJX6CmbrbG0lCEctuH1mpBlmVKpw/Z2AbNZyxNPDOHxGBDFFn/91ze4\ndi2OJLWZnfXS12fl1q0kktTmwoU9gsE2kUgLrVbFzk6BcrmBxaJlYyNPNiuSz9d5/PEIyWSVs2d3\nWVnppltLpQaFgshXv3qQcrn5M3Pz/SCkUjVuv7lwNxAB2FqMsjXuwx3yodbrERQKzD7f3Z9vX9kl\nkVBSL1epxOK0WzK7lSI2fRANTcyDs0iVCuZwik6riXt8lFKqgMpgoKm24j9xHE8hRWFtmbDbjbZQ\nYvl7z1JSufBY3URbLr7zvS20pgajkxq0Bg2VloXgzASlZIpyVWJAIbK8naK612T5xhtMnLmPgUE7\nJbHIQ08dJOjTkE6UOPfqKsbIGJlomp6xQTwDfor7CbKpInqDyN6lq6DS4VD6GHlyBtloJZ1LU28p\nsBg1CEolCkFGQoFSCa+/EeWRh+/HLMZYv7ZENi/hnJgkWdNSqwu0mhKirMYYClNNxDDYLRz94qfJ\n6h0Y/AFOzRiJRD6emdfPCs1KhcLW1l1J8j6vlvxsH/vRAnR68PX7OXZommSizH65jjO7h9FcRyoW\nqNeaZOMZ1JUmdQkalQpVhQOVO4jZ52VzbhOrx0kHBaVSBaHWJroRI9BrZ/v2BlpTH29ez5EvNJFa\nHbajNzl21M///JuDvPG//a+ETz9AxjaLot1isN9EZKyHjkqL261DqopUGwqsw2M0lXragkRB5UWl\nLuL1W0hGbyN57Xg9HiTaBPtcKPRGDM4QueQWjfg1dnZtzF3Z4LOfn8Q71aC5n2XmwScxaTvc/G//\njKO/j55Tp4nj4PUXrlApilTFDrLQoiG30JtM6D0+lDURZ9CLymKiruoK1jkceur1Fvo7JRe1RoHN\n/q866t6HL6S4Q1x2Tk8j5nJoLRbMPT2o9R++G+9nhWQyyde//nVWV1fv9am8J06ePMlv//Zv0263\nUfySabZ8HNzrYORj4f0CkbeisWz27VY/QRAIBCxsb+fv3i8Oh46ZGR9q9dt//sSEh2pVolRqYDJp\n6O21MDTkoNVqE/aAVsojKHvomZ1h+eo67oaVyOcewT01SzJdw+B00EyaqWSLyOU9Zk6OcfvSGpIo\nYu4J0D8VQWU143/4SdavLKKq6mka3MjBfubnU5Rv79GsVLoeJm2BXzs1Rb6m5tRnHuD21U36hvzM\nHqtTE2VCgz7Wt8qMjbkZH3cDHUqlBul0lUJB5JOfHCGfF7HZdNhsOiIRG81mC41GRy5Xx27Xo9Eo\nOH68l2azTS5X59Klfer1FjMzPqzWFJGInWKxcbeD4a2OmUZDwuUyIkky1WoTh0NPtdpEELrdDuVy\nE5PpwwUiHyd6/qAxtWqTWuFHjbw6nQ7FXBVJFH/kH2Cn0+nqntTVWNs5eoJ65v0WkrECUrOF3Gqj\n93lYuLVPuWwnMjSCFNtk/+wyqnaddlVk4JET1NVmtEE7WlOQ/kaB+MVzaCwWfBYT4089wrNvVkkU\nBSqxIjuxGo8+EkGsNWmKXnTCIANDLvYXNtAF+insphmeDGI0abFYjdTrMkIpQVNvQWqCKGjZWN5F\npdFisZuJDDhYe+kfqZVF7G4rBosRoS0RHrCwslckeSuDWG/RE7CSztToHzXTrtd46OEhClWBmUNB\nLAEbGxt6Kn4D0cIeb35vDYcJjh0KYbOb2dotk0urcbqGMXp9TH/6MdQ63U/NefcnwXtdC+80z2um\nohyIOJmaHsI+MoDDaUSS2nzzOxtIYovoxg7hgJ4enx6dTkU6XcNsUBIOmymXmogaJ0YhT+ThM1Sb\nKhTI2EK9BAN2fvjyOgpFt8tE6MiYDC4mBmLkKnq2dmsYjBqUSiXKjsTpf/8HqA16vLYhlmMLJLNV\nAh4b8WQVjbrJVlIm1RlEk5ZwWAS2t0WiC5v8yq/OcvH8Nl/6zWNs7pRp1hs8/sQwOoOG5//pPFqT\niVNPHMBQy3Nm0E+p1GB+vYqqpKNYsJC/kCPibmEcP8zYEyeJ51WsrZSQOwrQm9jYqfLEpye5cnYV\nb1+AYr5GutBkOuiikCsweugoiUSJBx7oI58XMdhDuHe38Lg0WO+0+5q83vfljAAMDHW7cWx9fR97\nXT8KPuz4v/qrv+KZZ57B7f5RMbdfFM6Fz+fD6XSysLDA1NTUz33+n/f4X8pgBLop+Wq16yz7zlZT\nt9vA+nru7nurVcvhwz1MTXmpViUUCqFrXNXudKWRAYtFy0MPhRkedrK0lO4+fDsSnegS6Z0NVHNt\nYlYz3okJemYnackTqBslLv/TixQTaWY/8QBjn3yS1//jn2HQK/HKq3zmyw9gmjiKrLexv7LD//N/\nPEdwuI9jDx9jOa1ibm6TpSvLjB2McObRYRZv7uA6OkZfxM3AWICX3ojz7e/vINUabMfmOfOJCQwm\nHcWyhNNjpr/fTr0uMTTkJJcTSSar3LyZpNGQefrpUTqdbutuPl/j9u0UTzwxxFe+MsPycpaeHhM6\nnRKVSsV3vrNErSaRTFbZ3i6g16s4diyI3a5FEMBk0jA87ECWuwJmNpuOZlMmEDAjy23K5SYKhYBG\no6Svz/q++iE/a9jsOhx+N8m1t6XDVWolnh4rmnd4ZSSTFWLLm9RuX+Dm+WXMBgWnTpxkN2wjkxG7\npEy9hqXtBns7FeZ3khw5FKHneC8+l5pbl7dY2GpQjxa5divFxpvXGBrx8tCj9+EemqG1u0zbYGd1\nfZ/lxQTVShOtQU82I/LlfzvF2CEnuxspelxqrmzFaCuMFNZXkZpNwkM+Dk4O8ebyRVa/l2BdpWTw\n0Ciff/oIq6M2qtUmw9NhXNIe39/cJDzkIzAURO/yoHX7WduucXk+g6QycPj4ANlcA3tHoDdoYXDQ\njtllI1dskq+0Qa3j5df3aDVa6CQ1lZqMUa+iipnIWIAr3ziLVqNErVWjstpJXL9Os1zGNTr6E3fC\n/CygNZux9vWRmpvrftDp0MhlCQxE8IW7vKrV1SySJNNBoCO3UeoM7G2l0SnbmFUgV9vsJJs4tRZe\n+WGUx88MYdMUmf2CC7QG2q0Wz//lWdLxPIGhAFqLGYdDT8jZJr+5Tr0mcWR8gOB4hMNhmdzl14nf\nXkQyuFCHK0wdGOD6tRilisT0bA9aQeLbf/0qGqsDsa3i8OEgksqGRtXGYlRhNylwu5wcOtaHJNaY\nu7ZHYqmGo7eHxbUSt/7zBRyaKjNnDvPpZw5y8/I2eVUHZaWBXS9z6dlLDB+dYERlQ60TsdoMVBoC\nsWiRwWEP1VKdUw+PoVN3iAx76ekxY9J2qDYV7O/lcYUdrK5mMJm0aKwORh57EEV2F62yjcnnwzM1\n9QunG/LjUK/X+cu//EteffXVe30qH4hTp05x7ty5jxSM/LLilzIY2d0tcONGousMauhyFQYGHHdr\nVV3TuxKxWFccSqdTcfRol9C6tpYjk6lRrUpMTro5cMCHz2eiWGzQarWx23XMjFlZW4pDMcX+6i10\n9SyZ3SxGt5tqMolzZJyF1U3it7bYi9ZwBfpZX4rj6PUx/YXPk1tfxRYK4R4apKDRcu7yLlsxiZ5j\nx1EpBdajbeymJj0OAd8nZxgcD6LWKXni84fptFpkMjX2k3WWljLojDocbhlZtpDMNnn8cC+SJHPu\n3C6FQp1KRSKTqWAwaPjsZ0eZnHRTKjX4h3+Yw2hU8ZnPjJFOiywspO/oqvhRq5U4HDoUCgUXLuyT\nz9dpNGTa7Q5DQwpqtW6mo1ZrEYnY7/j/SKhUCgYG7CiVAjqdmqkpD0aj5u53Fwxa3tNJ9f3w0+aM\nuN1GTjw+ww/rIrn9JHqjjukjESLjvajeIbhVKdXIzt+mXC8S6LURXd1m8bvf4+Hf+iy+T82gFSS+\n+YMEa3vNO+aAbV47F+fppyKkt8vcvBHFN9jHm5eX2dyr4bR5iCai/Mu3qswM6bj/wUmqNRmXy0i9\nJqEQFOj1GgqlBjqDjmypw9B0GKNc5FP/w4Ncefk6uYCbwECA++8P0dqew6CtILWbCJKCeipBc2+F\n4emDONxm2pU8BsHDp//33yOztUc8KaIJB4klaijNJjQmI5mCgpW1ArMHA1gsIn5/EKtVh0ajZH09\nSzxW4drlPcxGFQVJZi+vRB8aROsx4hryUcmucnTKQrNUxNZnp1xXUG4qaBWzVJI/JKJSfWDt/2eN\n97sWvNPTIAgUd3YQBAH7wMC7hLisVi3ptEzvxCjR+Ar+YyfoJDZJ7qYITE9RdE2xut8lXUptBTWM\niNSoF9tQF3nsk1OsjfZSLtWwB31MRAxU9m7ye79/jNvzGaqVJg8ds9Jau4pUrRFNNUlktsm9us4j\nv/frDPZq0Zt0NFoiCq0ay8AQYq5IqE+Nxmzh0IyXdKJCXeqwvFZk6eZtLE4zNVFmfNzDfQ9NsrWR\nI5pI4nAaaFTKZON5LP46T35qjCtX5nGeOElHLOMfjZCKl7h+YZ1PPXMUzE16eu2EgmaMRjUbayns\njhB9fXbknIjb0mFzV2RlvYAzqKXRalOvt5GkOvl8nZhGw+jwESZm3WgNhnd9/x92nX5ax3+c8d/8\n5jeZmZlhfHz8nsz/Ycfff//9vPLKK/zu7/7uPZn/5zn+ly4YyeVEzp/fI58XKRYbiGKXnPn5z799\nUZnN3SzHWwRWu11HrSaxspIllxNZWspQLNaZn0+SyYiYzWqGB6xUk0kchhaUcoT8TlJb60jLlyhk\nMigUCiRRRJXJEDhyFCm9j9etp1SzI4l1ZFFm/fY2x776ZYxTKaJXb5C8sUcakbUruxh8fmJpI7G1\nPZwuE//j792HMh9l6NgkhbaRa1dj/P0/LCA1Wzz+5AiHDtlptTp4vUZcLhWbm9yRYrfj8RixWvW4\n3UbW1rLMzPh5/fUtrlyJ4fWa2N4uIssyk5M9TE66SSar9PfbkaQOxWIdQRDodMDjMeJy6TGbNXQ6\nTQYGvNjtTeLxKsViV0/g8GE/stzG7zeRydRYXc2yuZnnS1+aJhSy0deXY3+/iMWiZWjIidd779w6\nBUHg8H39BHttFDIlNKoO/qAd/TuY+6IoUU4X2Fzao06DsMuE3zNKOZWG2AqGXj1KuxeFzoAgSHdT\n/waDGpdDyzeeW0LnGWQ726Ha0SHToGOyYHHoqBUUeA6MoO2xs7cdxes18sgnRtnZK6PVqhgatKPT\nCmh1Kv7x/7uODhGvW0f/ZJjDJwZYubyIVMiSfPMNtKoONqVEMVcmt16i3FRhVIR47Y19KgURm0NP\nIDRK+MAopniS6PI62wvbPPqVSZIKNfWVNDaLirrYZHMjzvq6jCx3GB11UyyKtBoNqpUOjzwyRDor\nIjVatDuQyYkYXC68XqioZbIFNytbOSrXvsXMoyeQdBpcJshvb9/TYOT9oDEa6b3vPrzT0wiC8C5+\ngk6npFSqd1tVwz5Csyo2L20RPn4/kac9XF7v8L3ntvjCF7z0h604bWqahTL2YC8WMyi1RnL7CRw9\nJQSlgt4+O7eee41sR4nF0eLoyQiKVoOwp8P1c0kKRYmWxkK9U8Zg1pFf3+TCxSRmj5NiDR54MILX\nb2e9oSZTqVDfLiOoFGjVAi/81x9y8MgkWqOO2H6B4SkXBw4HuTkXZXMjh8JgpWfAi0JyonG7uHQl\nht2hZX4xy8rcDqEePQ6Xgb6hPnrDTnZSLXZ2Cpw+HUapFKjVmsT2S1y6HGV1NcvMmI18pcLtKxs4\nQz0YLXqWl3M4HHqUd3g4zabM1k6J8Ukv94a6/JPj7//+7/nqV796r0/jx+LUqVN87Wtfu9en8XPB\nL10wksnUKBTqbG4WSKWqtNtdoubAgJ1HHx24e5xer6a//21y3Y0bcWS5QyrVfdAmEhVarQ6pVIVz\nZ9P4f20IbWGP6LVVjC47lv5+CtkKuXwDtaCiGI2islgRxCYYbThcAeqZJOEeHRurKRr1Jg6fi/n5\nJNpKgvlrW2jMJppmK/VGi62ra1j7IkgKLdlMmdtXNnEq22ytJsh0mrxxbpdioYHFquW559bJZkVM\nJjXz8ymWlrq+GgMDdq5ejfL66zscO9aDXq/C5zNy5UoUnU7Nyy9vEok4OHjQh8djoK/Pxs5OgZWV\nLC+8sMHv/M4hdndLbGzkyGZFPvGJQY4dC5LLdXc8yWSFvT0Fs7N65uZS6PUqBgbs7OwU6XRApVIS\nDFrRaBRIUhuTqWtkNzvre6+l+rH4aXNG3oIvYMcX6K59o9Eimayg06mwWnV0Oh2uXo2RjtWQBTXR\npQRZXZmpKQ9Wr5PA+GA37Wy28mA9htuqoKNUo9BoCfdZqZRr7CdE6pJIMGhFoVLT7KgoliTS0Rou\nt5mSCKVskXatSjpeZS9aJhCw0Gy2UCpBSZtzLy1y68YevX4D1UyOmy/ucOz0CDtvXqVv+FMUMiWo\nlUjnmpRLdRpSkhPjU0RzEhcu7NMUm/gCNl5+dZdPPj3KsdkIK7dj3PdrnwJnEFsuR7NWR61xcP7N\nKE+e6aFTyVEu1qmmYWI2wuSYAxB45eUNNraK7G7lmD7g5yu/eZhKqcrKm3OkdpPE97JIlTI+r55m\nOo5lfBaVRfzRds17gB93LWjeY9cuihLz8ylGR90IgoJYokYoPITvVD+3V7MsXkqjVit5+OEIJ04E\n2d3O8epz8ygbJUp7O5x68gj5ioRRo0RvUKNWa0hlami8Id74xwXWV1ZRKhXpcDw8AAAgAElEQVTM\nHO7l9/6XM2RKUMrVie/lMDkdVPIVWiiolKrojRomZseIL61iGZlGrVFx65bI6dMCW9slHn9skNJ+\nnNvXdjB5XIxOmRgbdXP1eoxKrorPb+PWrRS511YZjFhJFmTUej2lkoTB0kNbvUvPUB+XLu/x+tVl\nHnygj16/loOHg/y/fz/PmUcG+e53V4hGS8i1Cr1BM5lMnSefGiSaE9jI5phS2RBFCa1WiUbzdlD3\nYSwbPuw6/aTHf9Tx8XicK1eu8N3vfveezP9Rxg8NDSGKIru7u4Q+pFDcL9L5fxT80gUjSqVwl6zZ\nbnd3rO12l8CZSFTo67O95zizuct/aDRkyuUmrVYHo1GNRqPComtT3Vohev0c6WuXMfQNkk0U8Y2P\noLU7iV+/ik6rpZ4vMvPlf8vyag59ZJJcssDK+TcxuV34p3oJH5nh5Td2UBaj7G3nKIsZjjwZwBdy\nU5XLuH02VAaJIwdcbNxYIPjoKPFkg41cnnJZolyREBQKZLnDzk6RT3xikHpd5jvfWeLUqV4yGZFy\nuYHH01W73N8vYrXqWVnJ8vjjA3zuc+PcvJmg0ZAJBi0Ui3UsFg0bG3m++MVJqlWJCxf2OHDAh9tt\npFptks+LfPnLMywspDl/fhePx4hGo2R3t0gkYieTqd314FEoBPT67iXzTqLgLyL29opcvhxle7tI\nudxgfNzFzIyPzc08CoWCmUePY1TL1Mpd1+fwZITw6QcwuN1EL11CWtvBpHIyvxhHoTOg1ysRy3Wc\nbhPZXIMjx4JsbORRa5SUS3X243UOPDpAq1Ilut9m48o8p3/lUTRmK9FoGZvNyAOn+1HR4sqrt+i0\nwTQVxGFsgdwkkW7QMzlBPpEhcuIIy69dRKNto9GpkVVKfJNjPP9SGr1OhZoWao0KhUrBlctRwmEb\n1/dUFLU1nPEYi1dWOXZqFKmt5BOn3ZRuvsn6rU0azTZGh4UDY1+gY/bw3e8s8i/fvsXIVBCXQ8fu\nZpYLF/Y580CAlkmgoOzaBJj7rVj6Imzm2yhXKminbUxN9N7rJf7IyGbFu512AwN2hoYcKBQCExNu\nrFYtDocetbqrJiyKEnNzKSwKmUI0htmgRmF1osptsvTiWVRKBS2NgcHJEPrwKBvLcZoS1Ko1CqUW\nt7ZljMF+SoVFOioNNquW0JCfdFODWqdmYDLMiQfCiLUe6kojiWqer3xlFp1GycJ8kgsX97HZXFx9\n7QY9w3DwoJ/VtSzRaIUet5ZgwIReLRPdLTA128PWRoYHHhljcy1JNlvn3/z6QV56aR29UYfZpCW+\nl+Xsc1vQOsaTjwQpNVvcuJHAZFITCjjQKRvE72STBYUCtUbP3l6JmRkvtZrE8nIaSWrj9Zo4eNBH\no9H60AHJLxK+8Y1v8OlPfxr9L0BHz4+DIAh3eSP/vfvU3Du1oh+PP/7jP/7jd32oVitYXc2xs/O2\nnbjPZ2RgwEG1miYS8b9rDIBKaJFNFCkUG2xtFxgcdPDEE4PIcof7ZiwkL59HrpQxmbVU23oqmRw2\nnwvnyAg6ox6t3UnoyCHcs4fZLWpYTVcZnBhCbnfFtFSuAPpAiO9/d45QxMPe7RXKtQ6pvQxf+vdP\nozRbqUkKenrMWO16wsM92D02zC4H61sVLl2KUizWkeU2nU7X1TabrTE97WV8XMX4eIibN5MsL2do\ntdrYbDokqU2r1aZalVheznD6dB8TE+475RcjDz0UptGQGRpycuVKjOXlDNeuxclmawSDVoaGnJRK\nTYaHHUQiDlQqgWYziyxrCQTMGI0aenosdzwp3t4F6/UqZmd9mEw/GVF1e3v7rsIuwNe+9jXea80/\naMz7oVJp8tpr2ywvZ1haSpPNimxu5vH7TWxs5HG7jbR1FlpGFQaLC9dAhNHT9+EMBShHo8QuXwab\nl2tX9rHaDSiFNp6Ak3xJYvZAgFxe5NnvrzI66mJywsuRowEePOPErIJXvn0RlUZFNVvAGuihgZbR\nUTdKpYJKpYFGCds7JZpiA7/XQDpbxzM0iC/kRhTrOPuClDI59KMRBsb6cQ/0M/nEGQz9I5SbSpQq\nJdU6RKNlKpUmrZaMLMn0h604HEZiuxmEXBStVoXN56IV36LaqmBze1FrlOgUEnZ9G9/4EP/8T/Nk\n0jU0aoG23ELQdtufHzwTwa1vIJWLSC1wTU7xwnNLFKqgtCgoVxRonW5CIetP3FnzYdb9Lchym1Sq\nQipVZWtrG6/X9ZHmL5cbrK/nkOUO9XoLUWyRz8fx+12cORPhwAE/U1Me7HY9tVoLqdWhJdYQanls\n/f2o1UouffMFEjspWgiodVqy0QzOvgAtq4F6XY0zFMAWCrGzV6F/JoLeZmXiQB+mcD9jp49itel4\n8On7QG4xf3GZFipMXh8dFJw7N8fzz+0hNlqYTRqsVi0Go5bxqR7m5tI4XUauXo2RjOcRK00OHPAS\nGXAxPumjP2xjP1ZCrLWJ7u+g1prZ2yvj9RhYXojhcpnRGTRo1FBK55k+3E8qXUOvVyMIHQw6BQ6P\nlUjExm60hqDRoVaXsNnsnDgRpNXqoNWqsFg0iGKX8F6vt3C7P9gK4P3u2fdb9w97j3/U+d7CH/zB\nH/Dv/t2/Y2Bg4D1//rOe/6OOj8Vi3Lx5k6eeeuqezP/THH+n5PSedadfurDWbNby8MP9NBot4vEK\nLpeBwcEuydLne++HY25jg/iNG/RrTXgfCjA95SYaLfHC80uUyzLOp7zE9gqM9NtIJ5XsRkuoVAIH\nbDbKJRGlw0urJoNWR7VQQoGNubkM+aQLMamgI1aoXd6hZ2YSi8PCZkrB0H0zpPfTHHriBFduZFle\nK6PSa2nkG3z323McPNzDcMRCaLSX0VEXVqsWSer6a7hceoJBC+vrWZLJCg5HE6WyiFIpUK1K+HxG\narUWpVKT3l4rnU4HWW4jSW1isQpOpx5ZbnP27C6pVJVEokKnA5OTHtbX8wiCQDRa4uTJXjodcDgM\nKBTCHeM8BUZjd8eg1Srp7bUQClm4dStJLidiNGqYmvLcU27Ih0E+L1IodMtxstzN4khSm2i0jN2u\npdPp8P3vr6JUltBonIR1VqRbZT7hb1IvFEAQaCm12LxOXnh+mUS0SO/NHJNHBjCbdZw9t4tapWBu\nLo1Gk8XhMDDUL3HpuWVqdRlvwIFjqpc3l+rMz+8RClkxmTTcf3+I9bUso4eGiG9bMVrUaO06itUO\ngbCTpUqD8z+o8sShXuTiJjtbFUZOzpLReLl+tcDQkBujUcuLP1jHbNXjcRt54qlhblzdY3QkgEEh\nEetIIEtsL2xw5NEDLEb17KfbpKMVIoNBDo+b0CkrILcYmw5RFTso6ab4G20l4bCNVqvNwNFDyGIN\ni6/I7X0Jc48f38ggZmcHk8XPzk6JiYk6DsfPZ4cpSTJXr8ZYWemKt7XbeWTZwsGD/g/UHfrXcDoN\nuN3Gu0rOABpNt/woCAI6nQqdToXZ3O0YW1pKY/L5sFuUvPrCCr/6jANaEo2mTDpVQW+zYjJqEHN5\nxI6erMKFoiWQWclz4kSIpqzklUsljh5w4vS7qSqtmLwK1tfzbMYhVjQT3e7g6+TRaJQUSw1SaRGz\nVUe93sLr0XPy/oN8/9kNFhdTWO9sQnRGLYVKi8SbO4yO2KnH96h1tKyt5LDbtDgcBjKZbnt/LFqm\n0xZYX0mSiOYYn/CiUktItSqTYw72d3M0W3r6B5047Xr6BtyM5lp3srB67HY95XITm01HIlFhcTGN\n1arjwQfDXL8ex+nUfyh/sF8ErKysEI1GOXPmzL0+lQ+NU6dOfSzxs1823Mtg5Ang/wQywP0fZWB/\nv4377w+xt1ei2ZQpFBoMDNg5eDDwrmPFXI7opUs0KxX0ToHmlecJThzn4mYKqw4sahDbCnRmE/ly\ni/h+HrPLjX1knJtJMxpzkMDpccJqkdjiCks3d5h8epRrGy1uzqd4+MAs7cQG9iOD6IwGnvjcIS69\nuUdDM8wnn3mS3f0K16/vsbNXpl6XcTgN9I/2gFqHwmzj4qUYJ0708od/eIylpW7Hy9iYG7fbQE+P\niViszPi4G6/XiFqtQJZlQiE7167FGB11MTzs4FvfWmRkxMUbb2zT6QgYDGoaDZmXX96gv99BLldn\nYyPLE08M8/nPj7K/X8Zs1mIyaThwwI/T2a2tHz/ey82bqjuiZSomJ734fCYEQcDjMVGrSXfagX86\nCbWfNmekLcs0SiWUGg0qlQKlUqDV6gYi3bKSgCAITE15WVhI026DINgwmTTo9SryebEbcJnNaIxG\nTE4HN64tkI4XUaiU1OptqpUmhUIdp9NILFam0egGkPF4mYMHhxg4aeLBHiOhPiuSrMCRiTM66kKr\nVXH0aIB2u4PZoiM04WFswsvaaoaBkA2Xx0gsVrnDc+qg6w9jIsTgSRUXr+V449lNBIWS8aSIz2fi\nT/7Do2xsZsnl6xQKNR59fIBWrYZe3eGxR8M8+/UowUgfi4sZVtfy5PZqNGotoptJhoecBANGtAY9\nT39mnGKlxdpaDlFsMTho7ZYDVnOEwwOMfvJJwsUixVf20AQktPq3KYvdAFj+yGv4cZFIVFha6mYG\nARQKO/8/d+8VJMl13nv+KjPLe1/V1dXezrSZHtcYg9EYGAKkKFBQULQi5UhJjN1QxN7Q0z4pFKEb\netCVVqEN8e4NUbEUyCUpgRQEEQRhZoDxfqZnpr2v7i7vvc19yEZTICj44QD8v8xUdZ+u05nZmd/5\nzt/cvRujrc38rh+GOp3E4cNBpqYiZLNVbDYdFoufWq1JoVB7U7evrc3M4KCTlZUMWlsbNdUaxarM\n6EQ7t2pV6mgQRYH+ASede3uZLlcZHVVCI6uVBo880ovcavBbX9jH3HwKo8PChQtbtAfN5BoakgUB\nb18QQRS5cmWTEye68fuDTN1epFSuKwRRo561tSyrqxkEUSAcLvAnf7KfmekE1Wqd8ad2IVZznPnh\nRYKj/Xz2syOcfnGaoeFBuntdVKtNXnlliXqpxMJMmIFBD/l0nrpGhcUs0dcGjYyM1qhlZNDI2GQ/\n8/MJ+vsdiKJif2A0qsnllI6SRiMyOOhiczMHKGTWRKL0tsf/o8QZeeaZZ/jc5z63Q8b9ZX/++xm/\nZ88eVlZWSKfT2O3vbDL4UZv/u8WDzqYZB155rwMlSWR01IvHYyKfV0zKfD4TavVbL7ByJkOtoKyC\nVIJAamEBleBCFUvhs3uQkRDVWvqPHSJ8/TrOgAffwYd47Wqa5dshGoTp6rLx+OO9SP4h+sZ02J1m\nHjsRRJSbaLw2Og7sYnYqxPW5Mi25Qne7lmIqw9StMKFYA7vTxPpaBodDRyyWZ3jYg8GkQ6eTcLn0\nTE1F8XqNTE4GkCSRaLRINlulUmmQyVS4ezeOLMPJk910ddlIpcocP96F12tkeTnNZz+7G5/PxOnT\nqyQSZZaXlfThZlNGEKCz00qrJdNsNpmcDNLebsVu13HoUDsm088eLl1dNvx+E/l8DaNR/SYXVUFQ\nfeBtmfuJYjxO+Pp1Ssmk4vo4todgu5nFRR0bG1ny+RptbSbsdi1qtcDu3YoEWhBUGI3K7yUIKiRJ\npKKysdb009gqobY5sPsrFEpNYqk63kSJ7m4HIyNuOjutFIt1LBYN3d02PB4jS0sZCuUWKlFNR8DE\n0JCLqakoU1NR9HqR115bx+HQYxvTs7qawWDSkS/WufDcHI8+0ouKFrKshA+uRaAVb/Ds88uoVAL1\nhoxOnwaUrcnNrTzZTJX+AQerK1kkoUUzEcLl0PP0Vx5G721jdj6N+sgIDruGlXtr5LJVbtzY4sDJ\nx2kKGsxm+P3fn2BlJYMsKx49qVQZUVR8Y9R6NWq9nuE9AukLIf4zVcjh0P/SuiLAdgZS603v1est\ncrkqgbeuQ/5LuFwGTpzoZm0tw6VLGywspFCplK7Jww934HYrKc+CoKKjw4Ykieh0Il/930+SjiSx\nDe5mqNqiWKjS3mHFGgyymtHR1WWgu9u2rSxzMT0dZW42CSq2fXt0qCWZ4WE3586V6ex20pKhUmmw\nf38bJpOGr31tH16PAa9Hz8ljAf75/36Z/acm8PqMXL2mLFz+x/+4hM2m48iRIEsrWdoCZn73//wc\nqXCMf/3WOQKdPvZMBLg9FWViwsepU73cuanjoUk/QqvJretr7Dk0QD5ToZyvcPxYO36niLqeJejX\n43J1srmZo15vodWKXLgQ4uLFELFYiUikwMSEj/3723Z4Yzrdx6PBLssyzzzzDN/73vce9FTeE9Rq\nNZOTk1y4cOFdb9V8HPEgr6K3pPS+F6jVIu3tb67Gf5G+WZQkVKK4EzHeqFQwCQ0kuQ75FCogFdah\nHuhhz2930CgXmV1vkGi2yJVK+P1aGo0WU1NRnnqinUY+y/kXbyFZRb74hVFq5TIv/tttyoUqequR\nSq5E1Wyize9kfatILpzi0KNjzM5EUalUNBogiAI6ncS9e3FKpToPPRRkfl4xWpuejqPXq7mwfeN/\n7LEebt+eRZIETCYNd+5E6etz8vrrq0SjRfbs8REMWrBYNDSbijX84cNBUqkKly5tsG+fn1qtRa2m\nbGsVCjV6euzbnI+3CvPC4Y0PXBm/W3xYPiONWo2NS5cohMM7721ePM/YsUdQCV1IkoDNqmWkS6S0\nNYuothEY6CbaY2djYx1Q3CO7uqw0qlVeenGeYhEcNlnhS3S145SVlXk0nMHrG2BhUYUkqTCbNZhM\nGlwuA4uLy9jtVmRZxcJCiv5+J9FogbNn1+npsfFv/zbP7GyCP/qj/dy5E+Pu3RgAExM+fu1YJy+/\nvMTCYorIVp6JCR8DQ2p8vgC1OtRqdYrFGuPjXmRZRq9X4/OZMRq1qGSYmY3jd6khXyOSgpatQWVp\ng2vXI4TDeY4dtdFtcnDn5gZqn5cXzyfp64PHHusll6uxtpahXm+xspJBFFUcOBB4UzHa1+cgl6uy\nvJymUIgRCAQ5cCDwCxcA9wsmk2bHrBCU5G6Dwb1TTL4XNJsy9+7FyeVqVKsJtFoXiUSJu3djHD/e\nRaXS4Nq1LVZW0lgsOrLZCplMhfZ2J9js9Lf50TfzeLv8XJqu8sw/zzEwIOL3txOPl0inqyQSRZ79\n4Sy5XJVSqcHhw+1YrDp+9MMZNreUTunUVJRmU6avz8HIiAejMY/dosYgV0jMzbKr14jJIKFRC3zx\ni2NEInni8RJWq45kssS1a2FsNh3xaJCeHhuf+sqjhBMVXvzpDTY2VIgCzM+nOHbQSTO5RSYS5/O/\nNYA5EOD2rQ2K+RJGvYhLI9Iqp2k1GlgsZopFHdeubZFKhTl3TsmwamszUS7XiUQKBAJmIpEibrcB\nv//tt2w/Kj4jly9fRpIk9u3b90A+/4OMf4PE+m6KkY/i/N8NPh4l7QeA0ePB7PeT29gAFOtiTSHG\nnsO7uDe1pahqHDb8bRYaejVbaYm7ixtsbJWRZTCZtESjRSwWLS1BzU9+cIVmvYmr20DLZ+T2pSXC\nl+5SrskY7DYks5VKqcxQd5CBoJrV2wniG0ke/cQgpVIDnU6kr8/J/LzidZJKVchmK3R12YlGi1it\nOmKx4k4aarMp8+ijveRyykptfNzPf//v59jYyNFsymxt5Xn66WEKhRrXr2+xtJSmXK7z5JMDVCp1\nVCq4cydKLFZgzx4lVK/RaDEy4nmTA+3HGeVkklIi8ab35EaD7NIskqaPEye6MNXizL10hnKhTHJa\nxUR8jT0Tk8iyBVnW09trx6vOcf30dRbOztISJFS7+xg/0MErLy8jt2RCq0kGh9zo9Yr0c3ExRbXa\nZP/+NjY2cmxuVqlWy0iSgNutdK3sdh0dHVYqFYXv8OUvj9FotHjhhQUSiTKSpGJ2NsnXv76PSLSA\nz2PEZBBpD1rJpFMMDupxuQyEQlmGhlyUSnXcbiOXLm3wgx9M8+ST/YTrDW5dWcF0tIPNu2G8gz28\n8OMF+ncHMBgUxdjr50Ls3buLdN1An9XM3FyKcrnJY4/10ttrR60WWF5O02zKdHfbCAQsJBJFNBoR\ni0WHVitx6JDCb1pbUzM01LNNCk5hNmtxuQz33SLe7zfR0WHZkZqrVNDdbcfrNb7nn1Us1shmq296\nT60WyOWqzM8nWV3N8OMfL9DXZ2d+PkU0qmy/xGJFegNqLLoUyfV1SjWYulFkcrKdYjHG3/3dVQYH\nndTrLXp77Zw61cPaWpbp6QRHjnSwtVXk/Lk1Tj3axz/+401KpQadnVZqtSayLNNqqRja5UbdqrJx\n6wrL0xuYvR52DTmRBZH2gBmVSrWtHlRsCvL5GolUhavXZ/mt3xqmt9fJ1NQ8oqgnHi/z9JMBrn3/\neQqxGLVShfzKIhOPHCS9liO2kSQyI5A/0MUnnuhDazZTqTS4dGljpztbrTap1Zp0ddl2ruVmU2Z0\n1ENPjx2LRfdhneL7imeeeYYvfvGLH4kog/eKo0ePvmuS98cVv4xixAv8fz/3XgT4/DsN/NM//dMd\nVu7Q0BAPPfTQTsW1uroK8JbXb+A/f73j6FHuXr5MKptl8KmnyK6uAlUOPdKJ1dOJtztIvZEinc6j\n06kZHnaSTG5hsehotdR4PEZ8vgbReAS12YpYKdLVaWN1+jbVTIO2Xj/GTicqUaJZFCg3BErlBGKj\nwsnPTHLuSoJ0McTBg+3YbF5eemkJg6HIgQNOlpYM/Md/zHPsmJVUqkS5bMJoVDM6qkGlUpFKldnY\n0FEsbmKz1dBqnVSrDbq6oNGQqVSavPLKCk884aKtrcWxYxOEQlny+Qhf/Wo3y8sywWCJ/fv1bG6u\nsbkpkUqVgSx9fXZOnNj7luPVasmsrKwgisI7Hu8P+vrnz9c74RdV3CpRRPULgqRUgkSp1KBWqRGZ\nukEyquxzG4xqGpUqtY0lfuPTT4JKoJyIE7owTXyrxsZamlq9QWQzw8DD+3jykwMIAhw/0YXVpicS\nKfODH9zbXskqqc7z8wnyechm4zSbMlqtiN9vpKPDwuCgg6WlNF/72j66uqycPbuOyaSlXG7QaLRo\nazOxuprB57MgN2qY9DpCa0mcbjNWq5Yvf3mUO3fiZDJl1tezfO5zI3zzm9dQq0VKpRqCICBIEsVS\nk6YMbUEHvbtNuNxGstkq3d02zpyREUUVTz01SD5fJ5EooVKpqNdbqFQqOjttO7L4SKTASy8tkclU\nkCSB/n4H/f0ONBoJu12PTtfHhQsbrK5maDRaGAxq9uzxMjLifVfn8P3ijeTu7u486XQFh6ObQMD8\nvuSl1WoDt1uPXi/RaLQjScK2sqa8U5TlclWq1RZzcwnUaoFIpIhOqyJ0r8pXvzqOEFqlmEyye7iN\noqznekjHl740RjJZJJOpsrKS5sSJ7m1vmzDZbBWNVkJv1NBqysiyCrVaWXSYTBoWFlJ4PUHGx/QY\ntBrUu3eTK7YwOBxceG2LXKGGy2mkt9fORijLlath4vEyY2MewuECjUaLTKZCIGCmUjFy714EtVog\ntZTBoavQ1u+kUatTa4lMX7xL1559bKyliacriAsFRk/Y0UYVwncmo2yJeTwB9PoS5XKDQkEhzGu1\nIvv2+fH5zO/qWH8UOCP1ep3vf//7nD9//oF8/gcdf+jQIW7dukWpVMLwDq63H8X5vxv8MoqRKHDi\n/Qz8m7/5m//yaz//C7/da63Fwr5HH9153ThwgGo2i6TXK9HjwL17dZaWiohiA7fbwMmTe9gKZUin\ny/T0uDh5xE8lneTUp8aweF1UZq6yenmRoQMnePVqgdM/WqGQr9K7q53f/r0jBIUwq6sZbs9uMDLe\njc05SKFQ41vfuq1EgO92s7KSYHDQte1c6kOWCywuJunosHPpUgpZlnE6q9y4EcZo1NBq1XA6s/T1\nOVlaUhGJFBgYEKjVmlitXvr7Jb71rVs4nXrCYSOi6GB4WGk/nzsXIp0usXdvG3a7DrPZQyYjUSzW\nMBo1dHV1Ua8r6oH5+SStltI6fsNL4L0c7w/j9XuBwenEEgiQXl7eeU/UavEO9ZPahNhmmWqhtPM1\nl1OPpBaplUo0azU0BgOlRIJcLI7B4KbelGk2ZfLJLJsLW1gdVjr7HJw+vUq+2MRs1jIy4uXWre0t\nkGOddHTY+PGPF6jXW1QqyjFTiMMpOjpsVCpNvve9e5TLAW7ejDA/n6S93UKrJbOwoPjE5HJl6nU1\nkqQis5igXq4iqRr4vAZiMQPHjnVw61aE5aUUpVIDs1lDKJTn1KkuSqUa3UNuTjw2QKkiMzUV4ebN\nLQwGNfsPtPPwwx3o9WpisQLJZBlBENi1y4XL9bMbWyql+Ni89toqGxt5BEGFKKq4fn2LI0cUw6Xe\nXodiIb6U2uGPlEp1bt+O0tZmue8cEoNBQ3+/832Pl2WZu3djTE1FWV/PEo+XOHiwjVqtwfJyBpVK\nxdmz6zzxRD8+n4lGo0WrJTM/n1S4I6KKRCbDxmaBoeEe5lZqXDq/QkWy8tzzi+h0Er/3exP4/YqM\nWC2CzykyMmDB4zGwvBDlxLEOMvk6gYB5p+PSaDRZW8vyxBO9/NtzMwwOuMilaxx76nHOnV/jzt0Y\nyWgWh12D3WnmkU8MMDcXY3zcy/iYh1dPr/CZzwwjSUpCucWi49OfHqTZlImEFqnkmuzf50arUzMz\nkyAWytLpsFGxdWB0qVA5bKxulsmVI4yMeBBFxdiwXm9y6FA7N29G0OkkjEY1IyMffTXdz+Pll1+m\nu7ubvr6+Bz2V9wWj0cj4+DgXL17k1KlTD3o69wUPMpd4H/ASMAL8FD64s/C7XV1LWi1Gj2enEEkk\nity6FaFQqJPNVqlV6vi1GR47YOALn27jE7sq5M4/T+jMK1z57o8IX77ERjJN2/g4mXSZrY00cqul\nrNAEieXVHIa2DoKjQ8h6O1vRKq1Wi1deWaa314bTqd/+w9bQ1+fg2LFO/vVf7yEI0NZmIZst0dvr\nYHDQycCAE5OpxNpamnJZya2x27VUq00CAQvRaJF9+/yATD5f5StfGeFAWHEAACAASURBVGdw0InL\nZcRgUFOrNanV6mg0AmNjPmRZplCocfduHEFQUanUyWYrTE/HefXVFa5cucv6epbXXlvj+9+/x7Vr\nWx/0tHzg8/VOYwRRJDA5iW/PHvQOB5b2djqPHcMaDDI66qW9242rsw2dXqKj04rBqGF2Nk6yKHL1\nwk3iy2uoBIFCpkwrG+PQsV6MOhCaFbxeA8P9ZpamN4lGi8TjJV58cZFwOI/fbyKZLDM1FcHnM/HI\nI3YcDv2O2uull5aQZRUXL26QzVaIRwuEQpkd9VIqVaaz04YoCtvjFAXD6vwWpXgMnz3H+ecuUEun\n+PVP9iBJAtFogUi0wPi4B4ddh9WqXEOf/exuREFAFFRsrqdxWLVkkkWmb23yk+em6eqSGRpyUirV\nsdl0PP54D4880rPTVZibS/Dii4vMzyd5/vl5Tp9eYWkpxdWrW4RCOTY38xgMahYXk9y5M/8WKW2p\nVKdQqL7l3NxPvJ/rJxYrcvNmhGKxjtNpoK3NzPT0/HaXsYFGIzIx4SOTqTA5GcDnM9JotJBlFUaj\nGp1WxGLWUSuXyTcNzC0kKYa3aPPqOXXcQmeHkhCu0QjYLWrsZoFWscBvPOZnyFUioMtRT2zS7tPz\n5Cd6OXasg+5uO+Vyg9/8zWHy+Rg/+rcFzl/YoFCWmV/OEUnU6egwMzpsx+XUEwyacTv1fONPDvLQ\npJ/ZmQiPnurCaJQ4e3ada9fCFIsxlpfTDAw4GNzXj7/NwsZmgZXVLA6ngUC3h82kTLYElaaE3WFg\nZibB2lpmu1Nm3T5emxgMGh5/vJcvf3mMJ57oZ3zc9562Ot7reXo/5/Wdxj/zzDN86UtfemCf/2GM\nP378OGfOnHlgn3+/xz9Izsh14NF3/K5fAgqFOsViHbVaQJIEPOo0hlaJ9IXX0Rn1zDz3PHq7HfPg\nCDN3EtjEMj2PHURIxNlKNIjeuondasXaFcDk1DF9O8zBA350JgOrm0sUK2VAWQEPDbmZnAyQTlcw\nm7V4PEYuXgzhcCjeIfv2+dFoRFotJVn29OlVVKoGpZJSRLjdBrq7PYpLogra260cPBggm61gMmn4\n7nfvotNJtLWZuXx5kxMnuvjDP9zHCy8ssbKiKHq8XhO3b0cxGCQOHGjjzJk1YjGlIMvnowwPK5Hf\nKysZfvrTJQYGHLjdH+2VkNZsJnDwIP69e5Vtm+2bpdGoYe/eNro8xwn5NKQ3la5EsNuDSpS48uxL\nrBl1PPTEJBa3ndlzC2jVah7e78DSPoK1PUAqUWQrlKTNZ0HSSAQCFi5dCvG1r+1DqxVxOPQYjRJ6\nvUQwaKHVanHzZhiXy0ChUGVpMUWw3UhPt5VUUpFBPvxwB3emoui0Inv3+lChJCyf/LUg616Z2IaE\nztBkPabi2X+5y29qDXgCdj71yX6uXd/CYdciN5oE2y3UKzXuziVo5VKEKiZOPzfFvoeHGRhwsbau\nxm7TIgmQzVbo73dis+kwm7U7suRMpsyNG2EkSbUdt1All6tRqzXJZiu4XAby+QrXrm0higKdnTJ2\nu5V4/GfdpvdiD/4gkU5XqFQaO68FQcXaWhazuUiz2SIazePzmSkUqqhUMqOjXkRR4MyZVer1Jlaj\nyECHl9h6GL/dR7Nex2ER2bg7h9kqYFJVKGZ1VMsWHj3VyWCbzGwohCaW5tpPztM3cQhBZcNoFujs\ncHDqVBe3puIMDDgoFmvcuRNFElXo9WoymSrpjBJSaTVrWVkJYXLaeOm523jsavo6DRzY62Oo34bZ\npObFny5hNqqpN8Fq1XHy5CgajUgqJmPt7Se/vsbtmxvsP9zL6GN7WcoYOHBAhYY6lWSY0HqaYspP\nV5edo0eDOBx67t4t4vMZ6e93/kL5brFYY2UlTTRaxGbT0dNjx27/aDmbFgoFnn/+ef76r//6QU/l\nA+H48eP8+Z//+YOexn3DR//u8R7wflv9ep1In0+mnkthsJrQyjKRy1Os/fRV/OMjJBaWsXidNCU9\nfV1BxHKK6twCapcNZ2cAz+4RNEY9DUHH3N0Q/k4f9UqdlduLdPs1vHQ2QqPRZPduD41Gi9u3o+Ry\nVUZGPMzPJ5mairGxkaOz08rCQhKdTs3FiyEmJ9tpNJo0GiaCQYFKRVnVTk/H6e21Mzzs5vXX1/mn\nf7rFH/3RfjY28gSDVvR6CVFUIYoC8XiJYNDGoUPt2Gw6ZmcT3L0bZc8eL4GAmbm5JMlkGVmWKZcb\nLC9DqRRmcjJAIlEin68Rj5fuSzHyfs7XO40RpF98STva/Vg+/Ulmby6h3VWlHN7gztkbqKp1ksUG\noalZeiYnOGDzcefVK4iSQDJdp6avsJUoc+XcEpY2P6WaivZ2C1/84hjlstJar1abbGzksFp93Lp1\ni1isyN69fpxOPfPzKQRRhcWio9Fo4HIauXxxlYlxL7/71TFcTj03r61z7/oKXUPt5LJlxYBvOMC1\nKyF++uN7WB1WLl/eJJlc5MjDnQwP2DFqYXdQpGPAz7WrWwjZCHM3l9l3cg9Gm4XL51Zxu7SUmhoS\n4TSVUz1shZNshssMDCgEyzt3Yuh0IpHlTerhFew+CznZzNiYl7Nn16nXmzgc+m05p5+pqSitlkyr\n5eb4cdNOMSKKKvr7HTuS2F8W3s/1o9NJb1LkpFJlEgktxWKdCxdC5PN1rFYtf/zH+7lyZZNkssxv\n/MYQdrtiNgYQ30xgMbVjMTTZva+bs68ssDm9js6ow+11MbrXQUevHZtQ4LVvfp9GsUSnf5JCPE3l\n6hWK2g7yhTov3w5z9IlxyjUdUzc2yJWamIxG2oMt9kz4WFtOUq3UGB52sRHK4PLbmZtN4HEb8DjU\nnP7uy2i0asZO7sXd3oWREnqDAUeHH51Ow+uvr6NWC3R22qjZ+hG0Hk4dOEKhoebmep2jR13oKXLp\n1QUsNi39/Q6MBpliVpH1jo56GR39r3lA1WqD8+fXWV39mRv22lqWU6e6sVp/Rmp90JyRZ599locf\nfhiPx/NAPv/DGn/48GFu3rz5jryRj+r83wm/UsXI+0UzvET62llCKwmaLdgz7kUtQrNaQWs2Yw74\nsfnc6K16jCao5gVqxTLTFy8T/PRvoxIEpq/MozFb0JjN7D3Ug92q5l9evcuBR/byhKGHe/NZPvnJ\nftLpMpcubeLzmTCbtbz88hI2mx6rVUtbm5lnn53B4dDx5JMDRCIFjh3rAmRUKvB6TdRqLX7wg2mC\nQSvLy2kaDZlduzz4/Was1tQ20a6Ax2PEbtfhcump15tEIgVu344Qi5VotWRu344yMuKlUKgBinfL\nG/v92Wx1R7LZ1WXbuXF/HNFotEilSsgyOJ16Mk0TuVKdaiyNs7cHmg3KqST1ao1caJ2RJz6F2h3g\n4iv3EAUVgiiiqueo12VklUA8XsTlMtJqyczMxHA49MTjRc6dC/H44/382Z8d4caNMMGghVyuSqFQ\n59q1Leanw3R2OTCatewZ91EpFJGoE9mq4nZokTQiZ15ZRKuVuHl2iVimiddrYfduD7LeRiJRZOrm\nJm6njjvn77Bnog2DxQRaDbN3N2nkSzi7OlCptYwd6OH8a4vUKjXKNZnhXW4W5+I4vFbcbsUvRpIE\nBAGi8yusXbzC1ItTGIwa7O1+Jh8+vi2XlTEaNRSLivS31VJIsCoV2O06Hn64Y6dbFwhYPhbKLJ/P\nRHu7hfV15QFaLjfw+82sr2coFhuEw3m2tnIUizVGRtzcuBHh5s0wPV0WohtJGvUmew90UG2AChlz\nbgqv18iWzUA8ViDY2cJvqqPTCtRbevo+9WlaxSypQpJcTUKfK6Dx1LF4zeiNSZZnQnT0tbFn0EAk\n3cLTZufUIz3cuxvlO9++hdmi4U//j2OMjbdRKzlo73TisanYuDuPUCtSrgqsXJliT4+IU07QrNto\ntdqYnVUsAs6dW+cHP7jH3r1+DAYNY2MejEaBzc08UqvJ8kISd4ePrUiJhVAeg1RHNlp3umZvh1is\nSCiUe9N7iUSJzc38m4qRB41vf/vb/OEf/uGDnsYHhtFoZGxsjEuXLn2sHGTfLX6lipH3o28up1Ik\npu8R8OrQCHZC61kKG2s4OtoRdXoko5F9v/MlkgtziIJIoVzEO9hLUaumXmtRuneF3/39TzK3uotM\nroGnzUqg3UY2U8ZgNnH9tbt0jA1iMGi4dy8OyMiyTK3W5MKFEHNzST7zmSHa2szbq04Zn8+kJMvG\ni5jNWh5/3Em1aiadrqDRiHzmM0NotRKxWBGzWUMuV+PZZ2cIBCzodBL9/Q70egmzWYNWqyaVKhON\nlggGLezZ42N1NUMkUiCfr9Debt32NJFxu/WMjWmo1SxoNAL79/vp7rbdtxvLh+Uz8l8hm1W8VjY3\ncySTZXQ6SeHXaHQsprQs3QthdKoYHezC2QFqowGdQYvXbyVgrtBqtrh9PUO1AY89tYeFtTKOUpPB\nQSdqtUCt1kCWVQSDViqVBjduTLNv3xBtbSa0WgmHQ+DVV1eQZZmZ6STZbIUv/s4E+54epFXKU62r\niCUrpNI1RElkZjaD3WVm36E+spkS8/NhBna1kSlCqSHj8DkQNGo8bXaMXh+jY36iKyEGd/t56ccZ\nFmeWmZtLMjbZz+99/SGS4RSyqIZ6hbVwmL5dbTSaRXK5Km63nrXlBDcXlnA5jBz+jSPMXp2Dcp7E\nzDQ6Yzf9/Q6sVh0//vHiNklai89nwmQqodVK9PS8sxvk/cT7uX50OomjRztYW8sQj5fo7bVz7twU\nL7+cQq8Xt38vG5ubOUwmLZlMhXK+RPTKXQqxBP0DLkzRMFpXL+dvFxmQyowEmuwdmyBbLSCVZSS7\nmavXY4RWEixPLdHZ4+KrXx1HK1eIrW3hnpjg2p0EvXsHGdvXSSlXor1HxWNtVlKpCMvrMj/5yRI9\n/W6CQTM3r2/QN+DmC58fYW5qnTM/nkKlsVNVm6mmkgzv8iCkN3nsySHKkpW51QqGQZF/fz6yIz+f\nn0/SbMocPNhGtdrE5TJw5fIGnV1WLl2NcfnCCmqNGr/fyOJcnFAoi8djfNtjXKs1d6IW/jNKpTen\nOT9In5GNjQ2uX7/Oc88990A+/8Me/wZv5O2KkY/y/N8Ov1LFyPtBrVSiUS6jVos4nQYS8RI1wQCC\nQPDIYeztATYvXaSaz6K12Og59BA1RKr1JpqeESIaP1d+cAHfUB/7joygE1W8/C+vozFZGT8yzL//\nv68xMKnB59NRq9VxOAykUlGSyTJ9fXampqLcuRPj8cd7mZjwMTMTR6tVVjThcAG1WqC3V8XVqyGe\nemqYwUEn58+v09lpxWzWcu7cGplMFVmGjo4CJ050cebMKvF4mcFBFxaLhm996xbJpOKqub6e48SJ\nLkBGrVbUNG/ICtPpCj09dsbGBlGp2Jadmmlre3cSvo8apqairK1lCYWyrK1labVkvF4TW1s5QrEm\npUIFWWwxfTtE78AE3SM9CKKIrDdj6B9FquYYsMncms7REHRMTioqDodDx8ZGHofDQKslY7fruHUr\nisVSYX4+zU9+ssDnPjdKuVwnlVIKSJfPis2moxhPISXL3L0d5sxP7yFp9fhGdpEqyPzayV5m5tNY\njCqMeiOpQp3BsQ4uXQ1TyZbweM2M723H5zGwvJLhhf+YQaLOE091MXagSiFfQRJVaDQC1WKJPQc6\nmbq5ycJKnuG9PsXWO5mjs93EKz9eoTNoIrq0xXrIyPC+HrofOkC9VKS/346uu5dGQ8Zm07J/v59M\npoIoCgiCCo2mhcv10eIFvBeYTBp271Za9plMiXv3Fra7jkqXpL/fSSJRRhRFzGYNHmON5QuzNOsN\njGKVQMCCqtxk9+4xyKvI3H6FxtoWJanJxORerkdUhKMlZElL20AnpWKRS5fDHNo7hrd/P6dfXkRr\nNOJuc/P9793FaNJiMWvxe3LotRnUkpfeHiUrR9VqEo8V0Rm0hCMFzG4HapePtYUwLreLY79+AJvL\nxFqhgK1QR9MMkb0xi28syK4+J9evh9FohO1IBwPlcoN4vMjwsIvZexEmhoxIagGHy0SrKeMN2Onq\ndbK6mmF83Pe2x9Fm02E0qikWf1Z8SJKAx/P20tNfJr7zne/w9NNPfywSet8Njh8/zl/8xV886Gnc\nF/xKFSPvpxpT6/VIOh31krLas1i1JOJNerv60feNkLz0Glu376B3OgnH68zPPMfxr3wKr9NDaayN\nK8/fRGe3c3cdXp+d4beeHmTkyBjx9TBOp44vfOMR4iUtn5j0oFIpe9VqtciVKxvodBJ//Mf7KRRq\nWK06trYKjI56uXMnCoBGI2y7qWoplbIUClWMRg2HDwdZWkpx6dIm0WgJm00RIt2+HcHnM3L4cDu5\nXI1du1x8//vTVKsNDAYNs7OKRbzZrMVi0XL0aJC1tSzDw26Gh900Gi28XiPj416KxTp6vfq+SjXv\nB2fkDZRKdba28tRqTaLR4s5WUzpdZmsrh87lYaTTSbOkSFhzggNbZycATqeRMgZyVZGOIQvnbt3j\nwktLeL0m3G4DTz01hM2mZ31dMd9Sq0UkSWBsbIjXX1/dUb2Mj3vR6yUkScBs1uNyG7HqW4RWEty6\nsko1lUJtUlNYEalZe9mMlHG5jBQW1xDlFhN7hiiVq8TDOSrVBsNj7VQqTV746Rp2q0RsLYIkyKhf\nXcHjt3Hy1ydw2STKNbh4JYLGbCZVVPHIr48RCJhZX4xw9CEv0UgBbS1NKpTB4rKxtFnn3/91iiee\n6OPepWlE68N85hMuDAbF2VSjkbh7N0Y+X8Nu1zE+PvCRMLp6N9dCo9EkHC6QyVQwmzX4/YovSbPZ\nIputoFYLTE7upl5fo9VSpL+zswkmJ9uJRoscezhIK3YPjQSiVk2zqXQ1NeU8Qr1CUe1APXgAIR3F\nJIJ5YITk/Do3bkQolRpUKzV8bj2jahOGtg5++vwSFZ2bPQfaef75eaZuhjBbtNgdBgRZ5vf+YAKP\ny4DdpmErlMVituO26TAY1KTTFV59dZVarcHhU7sZH3Fw+fQ016/MYBbKlNJpdh8Zw9XmJHp9kY6A\nirExL/PzKYJBK1arwgsZGnJhMmkIhwu01Do0QotgmwGVKOD3mzE7FC8RQVC97TF2Og3s29fGrVsR\nisUaOp3E0JCLtrY3E10fFGdElmW+/e1v8/d///cP5PPvx/jDhw9z48aNt+WNfJTn/3b4lSpG3g8M\nTifuXbuITk3RrNUItFtx93YhBIbIbmxRLDcQHV5yFZlcroRWI1DMFXAP7WXm/DQap5uKZGJ+IYvZ\naeHc+U3a201U6laODnYiqiVMkSLpdIW5uQSSJNLfb2dyMoAgqGg0WjQaTV56aZnZ2QSf/exuXC49\nbrdxR3o7NRXDZNLQ3W1ndjaBSgXj437C4aJCTqs1WV5Oo9dLJBIl4nFFeTM87CYaLWE0qkmnlc6I\nIKgolep0ddlYWEhup3uWUKlApVLRbLa2yZcG1OoHqfz+YNBoBLRaxcH2P3NelPAvFfl8ha4uL5Lk\nBsBo/9kNVGnlKwXf9etK0N2RI0G0WglZltnayuH3W3jqqSGq1SbJZInBQSeRSJFqtYXLZSQaLWA0\nBnnyyX7W17NYrVri8RKqfIKaxkSmJOPoCKDTCogaDZ1ddpqCCo9bR1ByQiFFd7DB9FKBow95KFRV\nJDMVpqai2BwGJEnA0e6nVmuQyrWwu2WuXY+xa5ebO3diaLUS1WqTYrHBxYshTp7sJrkRY/Vmivbh\nbuR6nXK1hdHZTja1giwr0vRAt49MRWJrq0BfnwOAzk7FkbVabaDXqz8W3BBQOntXrmwyO5uk0Wgh\nCCp6emzs2uVhakrhT4miivFxD6dO9TAzk0ClUvHkk/07RPF6vUUr2qJYqhOPl8hkqyQTJUYmBxga\nDTC3mCPbNBIpefD5TBTqaprNFqKoXH+iqKXSUGFxmCnVVaQzNbxeI+VynfnFFKWqjFiqE+xQ08xn\nqaZT2L1NnnqimxdeXsXtNZMttLDbdeTzNZLJErdvR3ZSh1fiAo5gB83oCpLFwfXLq3z604PEVjbR\nFhNM7h9jeNiNXq9cux0dVgYGHLRaCnm3Wm0ycrCfM68sImoknH4nWp2aoSH3uzrPQ0Mu/H4ThYJS\njLwhW/8o4PLly5TLZY4ePfqgp/KhwWQy/cryRn6lipH3u1fl27MHk89HOZVC0usx+f00VRJTxSJb\nghXR4oJYHIdNh683gMVhZfbKJdZvh1me3cLd2UawZ5C6oN7+g1dMrmLJKqOjNrw+My+8sIjFoqPZ\nlInHyyQSStjd88/PsbaW5ezZdXp7HXz3u3f40pdGGRvzcONGmHK5yeHDZjo7OymVaoTDhZ297t5e\nO7dvRzCZtAwMOInHS+zd69+5cYVCObxeI5VKnUJBMesymzVMTPjQ6STi8SJut1JwyDLbdtRpZmYS\nrK5mMBrV7Nrl3k6S/fBxPzkjkiQyPOwmlSpjteqoVApotSI+n5lisU4kUkSSRKrVBGaz9y38B4fD\ngNGobF+JokCrJXPu3DrpdJnRUS8dHSKrqxkOHWqnt9fGzZtR4vFN3G4DoqjC41F8Xo4cCTIx4ada\nreP3m1HHF0htxOgb9JJP5VDKJIFyscKRk1045DixuoqW1sK9OzNkFhO0XF3MLjbQu9x0eczkchUi\nkQJjYz4lZdioZnzcRy5XJZ9XHgqjo14WF9OsrKTJZqt43DVMOiuiVcDf4aSht2PQi7QECVu7n84O\nCzafG8nuRq3XUyzWfu54CkiS5n2ft/uBd5pHLFZkbi65E67XaslkMlXOnFkll/uZJ8qrr97i5MkJ\nPv/50TeN1+kkpqfjqId3MXVxHpNJgyxDtSVQtwfRmww89JCV3l47m5sFNjfXaW+3cPhwkOXlNKFQ\nDr/fjNttQKMRKBbrfOITfdy4sUW12kSnU1OpyeyZ8GAVC8zNzZOK6LhwaxOd3c5XvnScWKrJZrRC\noVBjcTFFV5eN1dUMkiQoickqATRqtAY9cqtJtVRjI1whUiwz0mVgfI+ftVCeQMBCIGBhdNS7Q04/\nerSDqakoDoeez31pL/l8DZfLQH+/g64u27s6xqDIiN+OV/agOCPf/OY3+frXv47wC9yZfxmff7/G\nnzx5kpdeeum/LEY+6vP/r/ArVYy8F8iyEipXqTQxmzVY29owt7W96Xs6hjvILg9gdluRywVS65u0\nTY6Ri8apZLIcPtZLOpFDqJXwGBsUtXoGBpyk01Xm5xPMzMS5cmWL8XEvJpOGixc3qNdb2Gw6AgEz\nGxtZjhzpwGpVwrI0GhGrVcff//01HA4Du3e7sdnA6VQC0qamYpw9u4bBoCGVKtPebiGfVyLtC4Ua\n+/cr7qr37sV2VBCHDweJxQrcvRtHkhTXTZVKxdxckocfDiKKIul0GVBuKoJQ3ZFwFos1cjklFdlk\n0uxsR3xc0N/vRKMR8ftNbG7mt91GZQ4daqdebxGJFKjXjUxOdhIMWt8yXqNRipdotMC///s8q6sZ\nmk2ZdLqCx6N0J15+eRmv14QgwK5dbtrb9VSrCtlzczNHpdLg+PEums0W1WoTi2cAfS2NztHJ6dPL\nrC3HcXa00Tfgp9On4af/ss71Cwuomg2sfi3t3g56ehzcSza4fTvCiRNd1GoN9HoNBoN62zBNh0oF\ne/e2odWKLC4mkWWZUqlOLlel0WhRq8vcXc1w9HCAbK7OwG4/S4tJ/C4b/UYLPp+Jy1NJnE4DPT3G\n+7bCbTWbCG8T3/5holis7chx34Asy6yuZt60/SjLsLSUZtcuD/V6k5WVDMvLaVQq6O21k0iI7Hn6\nU6TX19Fotei9PsI1Pel0Bb2+yaVLmyQSJSqVLJnMBlarjuPHu7DZdKyuZlhaSnPnToxEokx/v4PO\nThuCoOLAAR8ejwGfU830qzfo7vdiNUJN1SCzsYWYj3Hzag6Ty4XdrmzjtrWZOXIkSEeHFbtdz717\nMco1CYvZQStXwNNmwyDW8PqtWHr6uXorth182M7goOtNwYZer4lTp5TFilYrvcXI7uOMdDrND3/4\nQ/7qr/7qQU/lQ8cTTzzBN77xDf7yL//yQU/lQ8WD7Ld+Dfjd7f//X8B3f+7r8hsR1R826vUmN2+G\nmZtLUqs1MRjUTEz4GRpy7XxPo9Hk2qVVNLkNNq9eIxtP4xsdxd7dxcv/z79g11Sxex3ktD7WNwto\nXH5Mnb243QaeeeYOyWQZh0NHW5uZUqnBI490s7mZZ24uCSgy0y98YYTeXic3bmyxsZFnZiaG3a7n\n7/7uCgaDwtcoleq0t1s4dqwTtVrg2WdnkWWZ//bfDvOG9DKXq1Is1nf+L8syarXA8LCLer2JLKsQ\nBEV2d/NmmOXlNH19zp3gM0kSkGUZWYZXX1UyaUwmDeVyHYdDTyZTBhQjpt273fT02O9L2JRKpeJ+\nnfNWSyadLm/LUvWoVCoqlQa5nNKqttn0mM1vNQGORAp861u3uHEjzNZWDq1W2laZaBkZ8RCPFwkG\nbWQyFXQ6ke5uG0tLaTIZpQjo63MwMxPHYtEq2SQagVOTNshEiCSqNHQOii0tM/Npjj3k4Z//+kfE\no3mSiTJmiwaz1cSjnxohKXgol+t4vSYsFi2hUAZJUh4ssgyvvLLMww93cuJEF6+/vk44rHBhFhZS\nO1tvogCtRoMr55bYvduNRq1i974eOrvtPP/8AolECafTwGc+M8TRox0f6sMpv7VFfGaGSiaD2e/H\nNTyM3q50o+7XeQ+H87z44hK12s9kqm63gVAo95bY+44OK5/4RB9TU1GuXt3cUYlIkkAwaOb06VX8\nfjOxWJGFhRQul57/7eujzN8Jcf31adR6PTqXh9VIg2DQSrXaIJOpcObMKoGAGbNZS7PZYn4+yZ/9\n2RGsVi1arcStWxEykSSVcIi+bjMLF2/Q12nEYtbiGR6k2TGOWqvh9OkVnn12llZL5umnh+nqstLZ\naWNqKsbp0ytoJJjYZWa4W8/qnSUEq4tk00qgXXFp/vznRxkcO9yAoAAAIABJREFUdPFRwv38e//b\nv/1bLl++zHe+85378vMfJBqNBl6vl6mpKQKBwIOeznvC9nPjFz48HmRn5EXgf27P4RJvLUbuG7a2\n8ty5E9u54eTzSuKt223YWRGm0xUy8zNsXrvB0lIKQVBxb/EyJ37Hjc5sIp+rY2vUcBuz9BxuQxvo\n5vRUjVKpRi5TJh7N0Wo2GR52odEojqC7d7sRBNjaUnxAnE4D6XQZs1nLvXuL9PQ4sNl0PPKIkvSp\nUim+CNWqEqg2O5vYVtFotj0fBLRaifX1OMlkCbfbiMWiweczEQ7nuXUrilotMDLiYdcuFz/5yRJu\ntwm73UC5rPhfNBotHn+8D0kS2NhQvBd0OolXX13B4dCxuZknk6lw8GAAq1VHOl1Gq5Vob3+rG+NH\nGYKgetNqv1Cocv58iM3NPM1mC4tFy0MPte+Exb0Bl8tAR4fCl3jDGrxabVKtNpEkkdFRhewbDFrQ\n6UTm5lLMzyfp6LDi85lYX88yM5Ogo8NKKJSl2ZTR6SS6u4O8PLVErZYkm62iUqmwGCX0Rj25XBKV\noEKr12Jzmth1cJBUWeLs2bWdbTq7XY/FosbtNvIP/3CNhx5qR6+X+F//6yYTEz48HiPFYo1Pf7KH\noEeiki9g8vn5x3+aIldWkcjJ1BpN5n40xze+cZATJ7pptRTZeV+ffacQKcbjFGMxBEnC5PWis735\n+LwbFONxVs+coVZQeA7lZJJSMknPI4+gvo8qB4/HyNCQk5mZBPV6C1FU4febcTj0zM8nd7J11GqB\n3m4z8dUNbl1cplyU0ZgUk79Go0UolGd42MWlS5tcuxZGpYJdA1ZmXrvKvdk05WyeeqFIKpxAdHSw\nvNzk+PFuVlfTuN0GbDY9y8spxsa8TE4GABXXr21x6FCA3bs9VDt0rJxZZvq1K5gMIrNzKfbtb0Pn\nCxAttWjkimSzVex2xY9o1y4XN29GeOmlFQYHnXzhC6PbW3RVJLeTNbnG3XNxGo0cBw8GOHKk/W3/\nXt/guPyqQJZl/uEf/oFvfvObD3oq9wWSJPHYY4/xwgsv8Ad/8AcPejofGh5kMbK2/W8TaLzdN75b\nvNu9qmi0+BZ9fLGo5NK88cBqVcsk5heRWzLVapNKpY7QrDP78lkCo4Ms3bhKcm0Ta4dA94SVss2F\nKEQx61Xo1DJaSWZ02I7TrudWKMbCQpLduz0MDTkZGnLT3m7m3Ll1ZFl5+DcaMnNzCYaHXTgcOlIp\n5aFfKFTp6lK2cIJBC/39DkKhPFqtiNms3SZKqrl5M0ssVmL/fj9LS2nm55OMjnpoteDevTiyDLdu\nRVhf/5lJkU6nWJvXag0kSYPTacBkKjI1lSaVKvNGwqzVqhQlFotiIb6+nvlQipH77TPyBur1Jsmk\n0hVxOpXtlYWF1LZ7agKt1kU2W+X69TBer+lNq2Zla8tNqVTH5dITiRRJJkuMjXk4eNDPmTPrLC+n\nkSQBu72MSmVnaSlFpdLA5dITjRZotWQKhSrNpozJpMFu15NMlpiaiuHxGBFFAVH8/9t78+C4rvPA\n93e7b+/7ikZja2wEuIAbQBKURImiZFGylOh55MyMHHvieF5cfiNHznOSmky9vBfFcXnKlakoVtWU\npXmx68U1kR1viSKF1mKPJWsjJVAkKJIghZUgdnQ3et+X98cFmgQBLgAaG3l/VSqhL/uc8/X9zr33\nu+d8i0AyA5baWowDQeoaXVQ0WRgdV/HhmRBGs459+6qIRKSts/7+IDMzSTo7q6iuthCPZ/nZz3pK\nzsx791Zy/yEvudA0l35zipyQoG73QZTFHEqNBoVKjaKYx+GQqjjPGVlarVhayg/29zPy/vtkE1Km\nVa3Nhu+++5hOJJakg8joaMkQmSM+OUnC78dSU7MkXV7NzeaCUqmgo0M6P5FIGrNZjdstJayzWLQM\nDs6g0YgYdSGE4TOMRtJMX5wmHE5ia2jA4JKcmwUB6uutvPXWMF6vEZtNR0uNyEDXAOmCmanJGBqt\niN4O+VgEq7ea06fHAejrkxITTk3FS/48L/38LCfeG8JkuBtRUcTqMFC/q4nUTIBAPIbTYMa7YytZ\nnRMTSkDN4OAMarWSffsqOXlynAsXAgwOhhgcnOHDD8c4fNjHqVNjBIMpPB4TkcgkAwPSVpXTaWBi\nIkZjo33e+QkEEvT0+JmcjGG369i61bmgEm85/IPW2mfkpZdeQhAEDh06tKz2m8Hn4tFHH+XnP//5\nosbIZpB/MTaCz8hXgH9eywENBtWCY6I4F31RYGBghpg/RCKeIRZNS1k2p/KkwhGi034uTTbRfF8n\nxngMs9tO3eH7mPBnMaouo07H2d/h4SMhh6PCxM9+fg5RJeL3xxkfj3HkiA9RVDI6GiWbLVAsgtUq\nhRqKohKjUcP99zegUl1iZiZFU5MNrzfP6dN+2tu9DA2FueeeWjweAx98ME5PzxR79lTy6KNb+M1v\nhkincwwPh6mrs5QeKvl8kZ4eP42N9nnGSDqdw2zWSE55s5ESVVUWPv54Gq1WqjXicOhQq0WSySy5\nXAGVSskqrayuCsFggvffH5EiWQSorDTS2VnD6GiEQkEK0xRF6c0wEkkTDqfQauenvm9tdZJK5Rga\nCmGz6fB4jOzdW8nJk2NMTMSIxzOcOzeN2ZzkySfr8HpNBINJpqbiuFwG/H4pA+yWLQ5EUUFPjx+v\n18g999TO1oJJMTQUYutWBzWtdeSUGixmLR+duoiocdNzMUg0muHwYR81NWbsdi1Go5qPP55kbCxG\nR4eXt94aIh7PzupNycR4hPd/HWFXi4GGBjuf9EVJTk1Q5xaJZ/RXFWtUYTComJpKIIoKtmxx4HIZ\nyKXTTHZ3lwwRgNTMDFPnziHU1y9JB4XcwneNYrFIMX/zLJ8rRRQVVFebCYWS9PXNcPr0BA6HnuZm\nB7t2VSAIAh8eO0Z4ZASt1Yq31kqwK0zk8ghaiwWl+kqVYINBhVJpQBAEkokMA+eGOfDYIfyTYfxT\nUVRGNWa3mt3tlbz99iUGB0P8m3/TSiyWxes1olAIHDxYzbPf/jUCcGlohv0dHk6fvEwOJVs67sWl\nnKGpsZ5YQc/57mm0WhVbt7p48MEG3ntvBKtVx7lz01itWoxGFSAQCCSJxzM0NNgxmdTkckVyuQIe\nj5nGRhuTkzH6+oKllw+QjJS3377E1JSk30BAmq8PPdS44WrLLJWf/exnPPXUU6uylbxROHr0KF/9\n6ldJJpO3TQ6VtTBGKoAfXXNsHPgccAB4GPjfyjHQrVpj1dVmnE4ppHWO2loLLpee3t4g7747jMWi\nwdfWwJlfn8Tl0mOzqJgS49R37ERhMxPvv8TI2BR1LSn2qJU0NBjZu6eCrjc+YmdrLXfdd5j/9eYl\nvF6L5Pk+W5NmYiLKZz+7HaNRhcOhw+9PkkrluPvu2tlcDtIb3Je/3IHfL2XLDASS/N7vmXC7DRiN\nagwGFW+8McC5c1NMTcV59dU+Ojq8/Pt/v4N8Xtq+0enmG1yCUMTns9HfP8PISGS2yJ6ZqioT//qv\nvSgUsGWLk927W5iYELHZdLhcerZvd9PbG0SvVyGKClQqBbW1C509V1Nfy21TLBb56KNxRkejpWND\nQ2GMRg0mk4aengHi8Rxq9RRVVSYaGmwLfAkAdDoVBw/WsH27i3y+iNWqxe9PMD4ubbcJgsDYWBRB\nkHKPPPbYFrq7J0mnc3R21mCzafH740xNJejqGqOhwUYikSEYTNLR4eXkyXEqKw04HAYaGqw0N9t5\n9dV+NIZKFApIpfK43QZ6e4Ok0zk++SQ467BaSSiUJBZLUVtr4YEH6slmC2g0SuLhOIV8EZfbgDku\nYleryUxPcOSedrbnHRQK0u9oaXGi1YrMzKRwOnVUVpoQBIFMPD7PEJkjGQjQusQ3TlNlJdNqNfnM\nlQgdjdmM1m6/Qaubc6tzIZnM8s47w4yNSaszY2MxxsaiPPhgAxaLFmMqRQJIhcNsra8hm6lmbDSM\nWixS32Rn164Kstk8W7e6OHlyDKNRhT+mwF3lID99icP31zMdzKBSidz16H4CsSIOhx6TScO+fd7Z\nkgRJursnGR+LUuez43brmRn3k09ZOXy4jt+8O85EVEVr61ZefnOUs2cvUFNj4fBhH8Fgkk9/uhmL\nRYtarUSrVeH1mtBolPT2BgHJD0qrVaJUKujqGuPSJQGvV9pmPHNmiomJGCaThro6C9u3u5iais8r\ndAhSGYixseg8Y6QcUVNrmWekt7eXV199leeff37ZfWyGPB0ul4v29naOHTvGE088sebjr0b7tTBG\nJoH7FzleBfw34LeBRd+1/+iP/gjr7B51a2srnZ2dpR86V6Z4OZ8tFi3NzQqMRhBFOx6PAUEI03f2\nY6L+AjX2IuPhKQp6K9sO7SE5NYHRo6Hp7kaMxgrGB8ZIWXU4Kppo3OZDpdUyPHIZqy3DQw/UMJnQ\nMhMfxWxOotOLqFQiIyPDCEIGUTQxNhZhcnKEbdtcOBwWAoEkMMPevTo6O1uxWrVMT4/h9cLOnXWI\nooKJiREgQV2dm1OnxvH7xzAa40xNSQ6MFy/2YbensdkqaWiwlX6vRuNEqRSoqYHJyRH27fPS0VFJ\nPD6NWq0s+YSk034mJ0fR6drZu9dDIDCG3x9l926peqcghNDr4+zfv53qavOKzv+NPpeTaDS94IYL\nMDg4Q0ODbTaPS45UKkc+X+TQodobhihenejr6pcuvV7EbNaQyeRxOHQUCgW8XiP19TZ27/awc2cF\nAwMz/OQn52htdZLLFSgUYOtWF3a7js9/vo1oNItWqyQUklK1ezxGKXQTKdfM3MrNxESMVCqHzaZj\nYGCGw4frEAR4990RLl0KSW/IdRbMFg172l14LHmysQzbtrpIpXPUNBh4YF8bsVgWne5Kpd1rFzvU\nBgMqo3GBQaJ3ua5bjPB6mLxeqjs7mTx7llwyidZioXLvXrTmtfE7mpyMMTERn3csEEgyNibVUNE7\nnST8figWKUyPsLfBxq6dzVTs3IbbYynl2/h3/247jY02hodDDA2FufuJo4QudDP+cQ8V9R5aDu5h\nS1sN+XQSh1XDL4718vJLF0lnpYy1tbUWivk81RUigYkpFIKA06Gl2RKi7j/sYnomy8svf8I771xG\npVIyM5Pivfcu8+CDDSiVCj7zmVb8/iTJZI5EQvJTqqmxkM8X6OysJpvNkU7n8XiMHDxYg1arpK9P\n2rJVqRTk8wXOnJmkWCxit+sWXeGcC4PerHznO9/hy1/+MgbD2hZtXA+efPJJfvjDHy4wRjYr67lN\n838DbuDns58fAVJXf+Fv//Zvr9v4WuvL5/PNe6At9u9X09bWQttsWoFiscjUxxOMnvmY7vcHKAgi\nVbu2k3M3EVN7cPpa2dvuRVnIMvJRN8N6F0O9QygLeoomEdNoDJ/PR6G6mk+6LvDuS+9jdNqx2yqJ\nRfsJBKNcupTHbNZTW2vho48m0OtVmM1KisUolZVGMpkK2ttrqKoyz94sPCiVCgwG9YI9uFQqh0bj\nxOGwEgj4Z53X9FitHqqqTDQ22mczukYRRYGWFietrU7i8QzDw2Gi0QwWi5OhoRBjY9KqgUYjedp3\ndZ3nyScP8ZnPHGR6Oo5CIXD//T4EQUCjUaJWi7d0fm/l8430dT2Wsh+pUikXTdxWKBTx+xMcPuxj\nZGQYsOLxGJe0PG2366msNDEwMIPBMJfZM0o6neenP+1BqxUJBlNEoxnuvbcWr9dEY6OjFEZtMKhR\nKgXsdj1HjjRw7twk58756e6WnI6l6rJh4nEDyWQOUVRQUWFAqxXZscOA0aghEEgwPh5nYCAord4c\n8DI1EUZJgccea2ZbjUDk/CkoFplKpjAbHUSVDs6fn6a21loyRBZD1Gjw7NrFyPvvS/4egoDObse1\nbduS94QFQcDZ2oqlrk7KdGw2o1Qt3CpdKrcqRzZbWLTY41yUTcJsRme3kwwGKRYK5BNxqtp24PTO\nd9a12XQ8+GADg4MhzpyZYHgqTm+4DoXRxeCEkpHTk2hdYzBwEoVSxd6tbtTFFEm0NDRJK1Bjo2Gq\nKw04zQpaWpzUGqOoDSbqt1eSPD2JIIRKfkuiqCAUSpFIZNFolJjNWsxmKQX76dMTpfD+HTvc+HxW\nXnutj1gsQ39/kN7eQUIhLY2NNurqLKVt6HC4yOBgiNpayRE+Gr2yWqXVilRUzH+IbyafkWAwyD/8\nwz/wxhtvLLltOcZf6/ZPPPEEf/zHf0w4HMZiubJavVnkv5b1NEa+so5jzyM+Pc1EdzfFTAq7XcfI\n5QhDH5xm62MVDEW0VFTYsVZID+tiLRAcplqjQqtxkcgpOXFidNb5Uw2WCgy1jfQPhtjuUXLo3jq6\nuqT6EPv2VRGLpclmC9hsOtxuPZcuhWludlBTY8Fu1xEOpzh1aqIUmtnc7MBkmr+37vWa6Onxo9NJ\n6ZfD4RQmk4a2Nje1tVIOg3vvrSMWy6BSKUoPHatVh9UqPXADgQSnT09c95zY7bpVTQW/Fuh0Klpb\nnZw4cSVUUwp5dnH27CSZTAGVSoHBoGVmJjW7EnFrKBQC+/dXodOJjIxEZhPJGenuTuLzWbFatWg0\nIpcvhxkbi1Fba8bl0pPPX3nzFARpezCTyRMOZzh2TAqvdbsNHDlSjyAYiUalNi0tUgrvqak4RqOa\ndDqPw6HH7TYQCCSIhRN49ClqGpVQBFV8kvRInMrduykUigT7RomKVfSei5HNRujrm+HIEV9pPiyG\nrb4ejdlMwu9HUCoxVlSgMZmYjsev2+ZGqHS6VY2euR52u25BDRWNRonLJT14dVYrngcfJDYxQT6b\nxeByYfQsXpdFEATGx6MYjVLhyw9PTpLJ5Nm924NGI3LxYoAtdhfZ8UHUyRkaXXY0TgMZtRqdTuTg\nXXUUD1SjETLY9EX0Rg1GjwdBocBs1uB2GwiHi6Wkc4LAbPXtK5FgFRVGPvWpxpKRMucbVlVl5gc/\n6MbvT2C1qsnl1Jw/P80XvrALg0E1b1vabNZw1101nDo1TjSaQa9X0dbmXuDAupl44YUXePzxx3E6\nN1YI82phs9k4evQoP/jBD/jDP/zD9RZnxWwEB9aysVxrLptMUixIDwmv10Qumyc4kyIXCdHSsp2d\nOysA+OQTPy+90k9vbwCDQcXOnSI2m4ZIJE0olMJoVKPTq8iLWkIJgXfeG+XQoRq++MXd9PUFCAZT\nDA+HsNm0tLRITnEWi/SmY7frKBSKfPjhGAMDMyXZTp8e57775v+u6moze/Z4uHDBj1qtpKrKTHt7\n5YKwVKNRfd3fbLfrqKoylfacQUqV3t6+dVnncDmsts8ISFsher0UjaBUCtTX23A69Vy+HJ7dwrHN\n1uERF9TUuBlms4a7764lmcwiigoGB0OMjg7N+06xCJFIGrVaZP/+Kj74YJSZmSSiqKChQcqi29sb\n4OLFACqVsrSN8/HHk+zcWcuWLU5GRyMMDc3wzjvD2Gw6OjurgUypj7GxKIp0jIlzA6UNz7YdbhSK\nJLlUCk3DdqbOCcSDVx7Gfn+C4eHwDY0RkMol6B2OecfK4UdQDm5VDodDz4EDUoRLLJadzVDrprLS\nOK+fWw1bNhrVTEzEZrP7avD5rKhUCn79v4Kko8M8dLiSnQ1uJi+dZ3L4IhVNNajbDjE6GiUez/LI\nI02lHDFXU1FhoKGhAYUixPR0nEQiS0uLk/b2ytKK5BwKhbDg+jYa1bOZchVEo3qqqvT4fBZisTSF\nQoFstoAgQEODDb1eTV2dlAYgFpOMkWv9zK4+NythLXxGEokEzz33HK+99tqm9ZlYTvunn36aL33p\nSzz11FOlTLObSf6rua2MkaWSDAaZGRwkcvkySrUarcUC09M0NTtIJnP49tRStbMWhUIgGExy4v1L\npGMxtGqBSCTN8eOX+fSnm1Grr2wHeDxGqqstnDo1QbEIb711mUOHati1y1NK4+zzWTGbpVolcyG6\nIBVxm6s5AWA2q1EqFRw71svWrVI4cE2NGYNBw549km9IKiVFxCx2I7kRgiCwb58XrVbk8uUIKpUU\nSdHQsDKnwo2GUik9sK9N9z6XCtvvT5SquHo8xuv0cmPmzr3ZrEajUZJOX1lhkbZipAe+x2Pk4Ycb\nmZlJoSjmMWqLCMU8o6NRWpqt2FUxwsMjxBNZtJ5qCoUCUJwtyCcVPZucjHP+/BQPPdRIba0Vl0vP\nyEiEczNhFKKIgiK77msjEIWP++P4dgo02hZf9YlEMguO3a40Ndnxeo2lVYDFEtzdKrW10gO+rVmP\nmE/h8Jp46aWLKACTtkgwkOT4xBgtjR6Kl/yoDAZyOclKjMezpRw112IyabjvvjoqK43E41LxzMZG\n+7wVykKhyODgDL29AeKBEFUuJV6bgKXCgdEoFcELhVIUi6DXS3VyamosZDI5VColDQ02tm1zlfrT\naMQbbtdtFl544QUOHjzIzp0711uUNeXuu+/GbDbzT//0T5ved2Tzz8KrWMpeVTIUYvDXvyYZCFDI\n5wn29WHyerH6fCQDAdyNtVQ0+VAopCyBY30j9L93kmQsRSFRxFVjJRC14Pcn2LmzorSMKi3ZVpBK\nZZmaSuB26zGbtYyMRDh6tJHJyQSffOJnejqBViuyfburtFysUAglx0hRVKBQKHjjjX5aWkT6+5W8\n8UY/1dVmDh/2sXWrc7YmxPLPl9Go4eDBGnbvzqJUCqjV4prWHVmrPCOL4XJJWyEXL/bR1NRYljT3\niYSfHTvcnD07RTqdR6VS0NwsPQTnUKmUKGdGmDx7lvFUCn1FBTplFSffHqTnxEVsDj1eh5Kxd39D\n5+8eIRQyzTM0q6pMZDJSnSG9XsoJcvBgNZWVRvo8IjazSM+FAJfO9qPS6ckNp5hOjKDTxclkrjzU\nBIEF/gG3ymapTXMter26VIl4Rf0osziivUxP9+Ar5hBQU+PVkyeK3ewgn00TDcVRGbyYHFbMjVu4\nFJBq4czVqRkZiZDJ5DCZNDid+lIYajQ6hc/n4MyZSQYGZggEkrS1uUshuf39QalG0sg4wb5+uily\n4N4m6ky9uNvaaGiwcfFigHTaj0LhxG7Xs29fVSmdwVINj83gM5JMJvnrv/5rjh07tqz2Kx1/PdsL\ngsC3vvUtnnrqKX7rt34LtXqhj+Fqjl/O9reVMbIUIiMjJANSanaFUomtvp50LCalqm5txVJbW8rC\nGB0dJTU5CrkMolDAoMxSiExT6XbT0uJk927J2XR8PMo77wyTyxXQ6aSiebFYhmJRqnHhdBqoqDDi\n8RhIJLKYTJpSdV4Aq1VLba2Fnh4/ZrOGixf92O160ukEp04NA5SSd4mi5PtQDpa6qnK7IAgCWq2q\nbPV2FAqBPXsqZ2sGpdHrVbjdxnn9R8fHGTl+nHwmg6BQ4I8KvPHKm4STEA9HCU7OwLYqDnTUYhRi\nCBYtIGXGTSSy9PcHKRSK+HxWzpyZpLOzmvp6G1u3umhutNB7bpSZ9y5j9fnQ2myo9XqSyRwWi4hC\nIfnGqNVKGhqsZQvRvtOYOH2a+PAAJk2BtKFIRohTZc5StFgoFg04PTbyNi2+tka82xr55HIOyOJ2\n69m61ck771xmcDBELldAr1exe3cFO3ZIW8GxWIYPPrhEKCQZL+FwmpmZJA8/3ITFouXCBT/pVJbo\n+DjFfJ48cLFniuq7HAQuXqTjyEM4nXp6elLU1npoaLBtet+vm/HCCy+wf/9+du/evd6irAsPPfQQ\nLS0tfPOb3+Qb3/jGeouzbG4rY2Qp1lj2Gic8pVqN3m7H5PViqqllZCTCxPkRjEY1msAwmkyIxhYP\nF86MoNWqIA/bWqzs3u1Bp5OWQ0+dmmBmRgoIcjr17N1bicWioabGjNNpKKVcvp5vgiAI7N1biU6n\nIhRKlpJQdXdnEYQcer0KtVpJMJji448naW11lj2xz1q+8a6Fz8hy+8vnC4yORhgfj2EyqdFoRNRq\nJWaz5rrhv3N9VVQYqahYfMsnOjZWyrehMhjoG4mSDocwWZ2EkAzDdCyOu64ep1jE0mBjYGCGaDTD\n1FS8lJo/lysQjWY4dWoCj8eITqdCVKvR2aw0dmwnGs3Mq8lSV+ejrs5COJwuOW8uNwX4RlgVgfLJ\nsZR+0pEIkdFRQPL3ymYT5HR5rPoiGb0Dp0vShWerF6XFTjKYZMcODW63EaNRzfR0nIGBmZJDdSKR\npbt7Eq/XTDabJ502MjZ2ad4KTjSamc0ToiaTyVPI5ShkrySSy2fzFIoK8qk4KmWR7dvdbN/uXvNz\nU64+lvL9YDDIt771LX71q18te7yVjL9R2v/d3/0d7e3t7Nq1a8XbNbLPyBpjcLsRlMp5WSDVRiMa\ns5nu7klOn56YXeEQ8QpRxMnL7NrSgMfTysRkDJtNz5776rBapQdTPC5VzwXJEIlG05w+PYHBoOKx\nx7bgdN7akrjBoKajw0s2m8du1zE0JL0VG41qAoEkkUiKmZkUoigwOhqmunrptUJkbs65c9OcPDmG\nxaLh8uUIAwMz1NRIZdj37auiqWl5vjXX5ugoFqQwWp1RquQMoNVpUKqUmGsqcbkMPPBAPQMDIXK5\nPDt2uNHpxFL+lGg0XQrPvHgxwLlzUwwPh6mvt6HXqwiFUmg0Stxuw7xoKpnlISiVCLOOgoIgSFus\nyhh1v91KylJHOJLBbFYTDKZ4993LKJUKXC49Fy4EEASBeDyDw6EjHE6XfIt0OpHjxy8TDCYRRSVn\nz06XahvNUSyCKCqpq7MSCCTRmE3kklKYeGWNDXUhgb6iApV+daotb1SeeeYZPvvZz9I2l6fhDsXj\n8fDKK6/wyCOPcPHiRf70T/8UVRnC59eS26c6EktLnGWuqcHd1oZKr0dQKtGYzVR2dJBR6rl40V9K\n/pNM5sgbnYRTCrJTo7iKk+ypzWOzJamovvJA0mpF9HoRk0mN35/gV78aZGBghtHRKO+/PzIvauVW\nUKmUbN3qoq7OTGOjglyuQCCQwGzWolQK1NZa6eqaIBZsVu52AAATMUlEQVQrrxPiaiQfK+dY5ZZv\nsf6i0TTnz08DkE7n6eoaIxBIMjEh1Rfp6hojEkktaHcrspmrqkrbf9l4nNoaE+ZKN6bKSjRGI0ql\nkqqGCjxVdiKzhovbbaSzs5r9+6vIZPLzErlJ805Fd/cEH344SjyepVAo8u67wygU4HRKNW1SKf8y\nzs7irOUcuRHlkmMp/agNBuyNjfOy3gmAp8qGy53jwQcbyOfnnI6lsNwPPhjltdf6GR2VjNq5ek/A\n7BaewIULARKJHNlsAItFw/BwmERCuraNRnXJv2frVietrQ4qWxtw1HrYsrOGtlYLKoMBz65dpZXS\n9Tg35erjVr9/+vRpfvSjHy3YmlipzJu1/Z49ezhx4gTd3d34fD7+7M/+jLfeeotsNnvzxmUYf6Xt\n79iVEaUoUrVvH/aGBnKpFGqzGa3ZzMREbF40BMBkVKSl8x6M6XHC00HMHi8quxVRe2W5Xq0WaWur\n4Pz5abq6xigUimg0SrxeE4WCVASvudm+pKVxnU7F3r1eBCGC2ZzGbNaiUgmzKbyVBINJwuHUDUN4\nZZZOMpkjEkmjUAhcvhwuZapMJKSLOhbLEIlk5mVkvVUMLhd1993HdE8P6VAIT72dxxpb6B2M4fA6\nsJuV7N7tobLWxaXh4Xlta2ut9PfPlLZfVCqpiF+hUGBwMFSS0+UyYLFoyeeLfPrTzej1aoaGlpcb\nRGYh7rY2lBoNM/39CAoFji1bsDc1ER8ZASjl8xAEqS7UpUthlEqBbLaA3a5nejpBNJpGrVai04lk\ns3n0euktNpHIcvfdtfT0TKPRiNjtWrZtc2M2S9E/BoOaQ4fq2LHDTTazDb2QgnwOndOJqL5z7gOp\nVIovfOEL/M3f/M0dk1fkVqirq+Pb3/420WiUn/zkJ/zJn/wJAwMDPP7443z5y1+ms7NzvUW8LutZ\nSeg/AP8R0AD/A/j+Nf9eLK5DRbZEQko+FQxeefMtFouzxbKKJOJplKKSpiZHqYDW1QwNzfDTn/YQ\nDqewWLSlm4jTqeO3f7tl0ZC+W+H06QkGBoKAQDKZLeXGeOSRplveAtroCIIUubSeFItFenuD/PjH\nZwkEktjtOrq6xjGZ1NTWWvD5rGi1Ig8/3ITbvbLzXsjlSts2mUyOTKaAwaC6oR9QMJhkdDRCJpOn\nosKI12siHE5x7FjvvKReIEVuPP54a8lBeqOyEfS+HIqFAgjCAn199NE4XV1jKBQCFouGl166iNGo\nZscON2q1klQqx7ZtThwOPXa7jmAwyYcfjpXai6ICq1WqIzMxIfkJmc0a9u+vuq4v0mZkJXp/+umn\nGR0d5ac//eltXRCvHIyMjPDjH/+Y73znO+zcuZPvfve7VFdXr4sss7paVGHruU3zInAfcBfwn9ZR\njnno9Wra270lI0IUFTidepLJLH19M4yNJ7h8OcoHH4zi9y9826yrs9LR4aWmxlLqQxCgvt62bEME\npERnuVwBvz9BPJ5FoRBoarLjcNxZe8SrzcREjK6uMVpbXYiiAr1ehcWinl0qlyqvNjbacLlWft6v\n9h9Rq0WMRvVNb6x2u462tgra271UV5tRKASsVu2CHCmCAI2N9g1viGxmBIViUX01NdmprbUgCNJ2\na22tmbo6C2q1dP1brVp27HCzZ4+UqLC+3jovw2o+LyUnk5LoSYnSxsdjnDgxSiazsALyncYLL7zA\n66+/zve+9z3ZELkFqqur+frXv84nn3zCgQMH2Lt3Ly+//PJ6i7WA9TRG5q4qDVCWNeRy7ZPW19t4\n5JEmjh5t5OjRRrZtc5VquMwRDI7PWz2ZYy4iprnZjsGgwmSS3ohaW5e/lDg0NITTqeeBBxro6PCy\nbZuT++/30d5eWfaL8U73GZmaihOLZUgkstx1Vw1btjj4vd/bzb/9t9vZu9fD4cM+Ojq8i573tfBn\nWQxBEOjo8NLYaCvNuZ07K9iy5crKXTllu5N9Rm6lH7NZw5EjPh5+uIn29kq++MU9bNvmQq9X4XTq\n6eysnpd23WrVcf/9Pg4cqMLjyXL4sA+Xy7Dg/hIMJhe956zFb1rLPm70/RdffJFnnnmGV155pVRE\ndaXj3SntNRoNf/7nf87LL7/MV77yFZ577rk1Hf9mrLfPyP8D/AHw5+XuOJfLEwymEEUFNpt2yQ9t\nKaGY5BMQjfrnVbhUKgUUCq6bn8Js1nD4sI9wOI1CwbJ8CxbD4dDLKyFlIpcrMDMj+dwUi8XS/JhL\nCjVXzRekVYajR6WMpxsVi0XLkSP1hELSnF9JhlGZlaNWi6VEZQA+n4VQKEU+X0SnW3jbtdl02Gw6\nLJYUPp+D0dHIgu8olULZcuJsRp5//nn+6q/+il/+8pc0NTWttziblgMHDvDee+/x6KOP0tfXx7PP\nPotSufxV+3KxFmtcFcCPrjk2ATw5+7ca+BVS1d7YVd9Zts/I9HScEydGCASSKJUKfD4L7e3eZSf3\nikTSvPFGP+m05GiWSuWwWrXs3+/FbpeNg3KxVr4DwWCCEydGmZ5OoFAI1NSY6ejwYjCoCYdT/PKX\nAwQCydL3PR4jDzxQj8GwsR0EM5kcExNSTROrVcptsRm2aTarz8hSGB+XtnbnjMWmJjt79njm1ZzJ\n5wtMTsaJRNLk8wX6+2dK5SEEAVpbndxzT+1tszVxq3pPpVI8/fTTvPPOO7z00ks0NzevgXS3P6FQ\niCeeeAKj0ciLL76IwbD6voc38hlZz1mtBjKzMrwJPAZcvRdS/NrXvlZaimttbaWzs7OUUGVuKeja\nz9XVNbz2Wj8DA4MAaDTO2eJQAo2N9pu2v97n7u4L9PcH6epKUCwKOBxpvF4jjz66H4tFu+T+5M8L\nP9fX16/6Q6lQKPKrXw0wOBiad7y9vZL2di8gRUN88kkAvz+Bx2OkudmOzbax83Mkk1nee+8yQ0Mh\n8nkpkmvHDjd795Z/K6/c3O7GSDqd49ix3nkh2YIA995bR0uLtH2bzxfo6hrj/PlpstkCoqjA7dbj\ndErRNz6flaYm+22VLflW9H7ixAl+//d/nx07dvC9730Pk2nzVhXeiGQyGf7gD/6Anp4e/uVf/gXP\ndapVl4uNaoz8BXAYyWfkR8C1G1hLXhkZGhrCaHTxi1/0kUzOd/SqqDDw+OOtyxY2Hs/wyiufEA5L\naZrTaT8ajXPeQ2y1GNrg9WLKNdat3JxWKt/MTHJe5MmcHp1OHY8/3rrsrKTlkG0l/fX2BnjzzaF5\n24k6ncjRo1LUTzllK/fvXK4xUi45Vruf8fEor77aRzZbmHe8vt7Kpz7VCMDYWJTXXusjFptCo5EM\nFKVS4IEHGvD5lr49uJHOzfX6uJ7eh4aGUCgU/OVf/iXHjh3jueee47Of/ewtG9UrlflOa18sFvnG\nN77BCy+8wPPPP8/OnTtXbfyNGk3zl8D9SNE0i3vSLJHjx48jispFHygrfaOIRjPzDJze3m6AeVV2\nV4vjx4+v+hibZayVyqdSzZ8fc3rUaMQVb2mU+9wtpb9AIMm19/VkMkc8nllyX+WUazUplxyr3Y8o\nKhb19dBqr2zRRKNpstlCaT6ClKNkLqtzuWRZj35utY98Ps+bb77JU089xZ49e6isrKSnp4ff+Z3f\nWdLq3kplvtPaC4LAX/zFX/CP//iPfP3rX+dLX/oSx48fX/Zq5XLlv628oS5cuIDVqqWhwXp1gkQ0\nGiUtLQtzgiwFg0E17+YxONgHMC8kb7W4cOHCqo+xWcZaqXxGo5rm5ishr4ODfahUirLU+Sn3uVtK\nf3NlCa5Go1GWjPByyraWc+RGlEuO1e7H4dAvKEqo04k0NtpKnw0GNaKoKN1XQCq8OJceoFyyrEc/\nN+qjUCjw93//93zuc5+joqKCr33ta+TzeXp7e/nmN7953YiZ5Y4nt78+hw4d4ty5c+h0Oj7/+c+z\nZcsWvvrVr/L973+f119/nVOnTjEwMMDk5CTxeJxCobBoP8sdf72jaVaFPXsqMZs1DA6G0GrFUtz/\nSjCZNLS1uenqGitlaHU69TQ3L69Gicz60dbmxmBQMzAwg9Wq5fBh37KWwjcSNTVmamrMjIxEZuuY\nKGhpcaw4MZvMylEoBPbvr8Ju13HpUgiDQU1zs2NewUyPx0Bzs71kECsUAj6fFa/39vaRUCgUHD9+\nnCNHjvDtb3+bmpoannnmGex2+b66Hmg0Gvbt28crr7zCyZMnefvtt3nzzTcZHx9namqKSCRCPB4n\nFouRSqXQ6/UYDAaMRmPp/xMTE3R3d5c+Hzp0iN/93d+96di3lTESCklOiRqNyLZtbrZtK0/lyjm2\nbXNht+uYmorz/vsKHnywYdlvLkth7netBRt9rHLIp1aLtLY6aW118vrrCurrbTdvdAuU+9wtpT+D\nQc1999UxNhYjFstgt2uprDSVVoDKKdtazpEbUS451qIfnU5FW1sFbW0Vi/67KCo5cKAKt1vJ/v1V\nmM0avF7TvNXYcsmy1v3crI/vfve7ZR1Tbr/y9lLuog46Ojqu+718Pk8ikSgZJ/F4nHg8zrPPPsvn\nPve50jGXy3VL425kN/s3kTK0ytw5hIGVLWHJbEbigLyEc+chX+93Hm8hBa7IyMjIyMjIyMjIyMjI\nyMjIyMjIXJf96y2AzKog6/XOQtb35uWO1N1G9hlZDlpgeYH5S0cDpFeh3w7gIGAFQsD7QNcqjLNY\nWLcAvAY8uArj7UAqjnh13FcnsJSgdCOSjNGbfXEJlHvOLHdelEvvq6HXcuhujj1Iv28Q+BRSJuZf\nAIvHCV6fcs+FcsyD5eh+pXovp77Lpefl6rhcOl2pLpeix5Xorxy6W6nOynU9zvEU8N+X03CzGiNP\nAn+MpIR/Br4NFIFfIyVSWwteBx4qc59/izQZfskV564HkH7n18o8VpLFJ+wuoNxxdX8DuIEs4AK+\nBExxc319CfhPSA6O3wf+d6SL5GcsPVHeWs2Z5cyLcuq93Hpdru4W47tIN3od0sMiCkSAauCLN2lb\nrrmwmvNgqbovh97Lpe9y6XkpOl6pTldLl7eqx5Xqb6W6W6nOVnI9AryNdL6vtiO2A2eBe2+h/W3B\n+0hhyQLwfwAvATYkJZSbt6/z38wqjPWbJR5fCR8hWfPX8stVGOvtq/7eieRRvY+b6+s40tuDDriM\ndOELwHvLkKHcc6ac86Kcei+3Xperu8W4+vd8fNXfb91C23LNhXLMg3Lpvhx6L5e+y6Xnpeh4pTpd\nqS5XqseV6m+lulupzlZyPQL8n8D/x3zD5xe32HYBmznPyFxu9u8iKfVfkKzEcuNEslQz1xx/YxXG\nOgn8DyTLPAqYkSztj1ZhrEeRLPNreXgVxlJwpTDiGeAzwP9EsqJvRBrpTSkJ/L9c0cFyt8fKOWfK\nOS/Kqfdy63W5uluMq+uU/19X/X0reafLORdWOg/Kpfty6L1c+i6Xnpei43LodCW6XKkeV6q/lepu\npTpbyfUI8CzSysp/BL4CvMgKdluUN//KhkQJ+JGWxgBGkSZELfBKmccaACZZuAd5Hhgv81ivIf2W\nLUA90u/8ZyTrs9zEgPwix5e7V3gjziHdYOKzn5PAPwLDSEt616Mw27bAFWtdDdRw69b7HOWeM+Wc\nF+XUe7n1ulzdLcYJpLfOAnBx9pgamL7q8/Uo11woxzwol+7Lofdy6btcel6Kjleq05XqcqV6XKn+\nVqq7lepsJdfjHHngQ6RVqU6kLaPVWF3fNLy4hmP9cA3HkplPOc99ueeMPC/WlnKd73LMA1n35WGl\n53GlupT1uI7cLoXyKtdwLM8ajiUzn3Ke+3LPGXlerC3lOt/lmAey7svDSs/jSnUp63EduV2MERkZ\nGRkZGZlNimyMyMjIyMjIyKwrsjEiIyMjIyMjI1MGFq/LvfnHkplPOc99ufUoz4u1pVznuxz9yLov\nDys9j+vdXkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGZtPyMFIp6F7gP6+zLDJr\nw/eRUlJ/fLMvytw21CAVMzuHlLb76fUVR2aN0CKlYD+NlHL+v66vODIyi6ME+gAfoEKasFvXUyCZ\nNeEQsAfZGLmT8AC7Z/82ItUBka/1OwP97P9FpKrF96yjLBsKOc/IxmE/kjEyhFRs6EfA4+spkMya\nsJzS8zKbmwmklw2QiqX1AN71E0dmDUnM/l+N9AIaXEdZNhSyMbJxqAIuX/V5ZPaYjIzM7YsPaWXs\nxDrLIbM2KJAM0Umkrbrz6yvOxkE2RjYOxfUWQEZGZk0xAj8Fvoa0QiJz+1NA2qKrBu4FDq+rNBsI\n2RjZOIwiObbNUYO0OiIjI3P7oQJ+BvxP4J/XWRaZtScM/CvQsd6CyMhciwj0Iy3bqpEdWO8kfMgO\nrHcSAvAD4Nn1FkRmTXEC1tm/dcBvgAfWTxwZmevzCJJnfR/wX9ZZFpm14YfAGJBG8hn6/fUVR2YN\nuAdpuf40cGr2v4fXVSKZtaAN+AhJ72eAP11fcWRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRk\nZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGTuIP5/xRHll/wr\nALgAAAAASUVORK5CYII=\n", "text": [ - "" + "" ] } ], - "prompt_number": 2 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Learn and evaluate scikit-learn's logistic regression with stochastic gradient descent (SGD) training. Time and check the classifier's accuracy." - ] + "prompt_number": 3 }, { "cell_type": "code", @@ -103,7 +89,7 @@ "clf = sklearn.linear_model.SGDClassifier(\n", " loss='log', n_iter=1000, penalty='l2', alpha=1e-3, class_weight='auto')\n", "\n", - "%timeit clf.fit(X, y)\n", + "clf.fit(X, y)\n", "yt_pred = clf.predict(Xt)\n", "print('Accuracy: {:.3f}'.format(sklearn.metrics.accuracy_score(yt, yt_pred)))" ], @@ -114,19 +100,11 @@ "output_type": "stream", "stream": "stdout", "text": [ - "1 loops, best of 3: 499 ms per loop\n", - "Accuracy: 0.756\n" + "Accuracy: 0.763\n" ] } ], - "prompt_number": 3 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Save the dataset to HDF5 for loading in Caffe." - ] + "prompt_number": 4 }, { "cell_type": "code", @@ -161,69 +139,15 @@ "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 4 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Learn and evaluate logistic regression in Caffe." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "def learn_and_test(solver_file):\n", - " caffe.set_mode_cpu()\n", - " solver = caffe.get_solver(solver_file)\n", - " solver.solve()\n", - "\n", - " accuracy = 0\n", - " test_iters = int(len(Xt) / solver.test_nets[0].blobs['data'].num)\n", - " for i in range(test_iters):\n", - " solver.test_nets[0].forward()\n", - " accuracy += solver.test_nets[0].blobs['accuracy'].data\n", - " accuracy /= test_iters\n", - " return accuracy\n", - "\n", - "%timeit learn_and_test('hdf5_classification/solver.prototxt')\n", - "acc = learn_and_test('hdf5_classification/solver.prototxt')\n", - "print(\"Accuracy: {:.3f}\".format(acc))" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "1 loops, best of 3: 240 ms per loop\n", - "Accuracy: 0.752" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n" - ] - } - ], "prompt_number": 5 }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Do the same through the command line interface for detailed output on the model and solving." - ] - }, { "cell_type": "code", "collapsed": false, "input": [ - "!../build/tools/caffe train -solver hdf5_classification/solver.prototxt" + "# Run caffe. Scroll down in the output to see the final\n", + "# test accuracy, which should be about the same as above.\n", + "!cd .. && ./build/tools/caffe train -solver examples/hdf5_classification/solver.prototxt" ], "language": "python", "metadata": {}, @@ -232,16 +156,9 @@ "output_type": "stream", "stream": "stdout", "text": [ - "I0307 01:34:29.141863 2099749632 caffe.cpp:103] Use CPU.\r\n" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "I0307 01:34:29.418283 2099749632 caffe.cpp:107] Starting Optimization\r\n", - "I0307 01:34:29.418323 2099749632 solver.cpp:32] Initializing solver from parameters: \r\n", - "test_iter: 250\r\n", + "I0905 01:07:27.099238 2129298192 caffe.cpp:90] Starting Optimization\r\n", + "I0905 01:07:27.100469 2129298192 solver.cpp:32] Initializing solver from parameters: \r\n", + "test_iter: 1000\r\n", "test_interval: 1000\r\n", "base_lr: 0.01\r\n", "display: 1000\r\n", @@ -252,43 +169,42 @@ "weight_decay: 0.0005\r\n", "stepsize: 5000\r\n", "snapshot: 10000\r\n", - "snapshot_prefix: \"hdf5_classification/data/train\"\r\n", + "snapshot_prefix: \"examples/hdf5_classification/data/train\"\r\n", "solver_mode: CPU\r\n", - "net: \"hdf5_classification/train_val.prototxt\"\r\n", - "I0307 01:34:29.418416 2099749632 solver.cpp:70] Creating training net from net file: hdf5_classification/train_val.prototxt\r\n", - "I0307 01:34:29.418583 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", - "I0307 01:34:29.418598 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", - "I0307 01:34:29.418608 2099749632 net.cpp:42] Initializing net from parameters: \r\n", + "net: \"examples/hdf5_classification/train_val.prototxt\"\r\n", + "I0905 01:07:27.100630 2129298192 solver.cpp:72] Creating training net from net file: examples/hdf5_classification/train_val.prototxt\r\n" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "I0905 01:07:27.100988 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", + "I0905 01:07:27.101011 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", + "I0905 01:07:27.101022 2129298192 net.cpp:39] Initializing net from parameters: \r\n", "name: \"LogisticRegressionNet\"\r\n", - "state {\r\n", - " phase: TRAIN\r\n", - "}\r\n", - "layer {\r\n", - " name: \"data\"\r\n", - " type: \"HDF5Data\"\r\n", + "layers {\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " include {\r\n", - " phase: TRAIN\r\n", - " }\r\n", + " name: \"data\"\r\n", + " type: HDF5_DATA\r\n", " hdf5_data_param {\r\n", - " source: \"hdf5_classification/data/train.txt\"\r\n", + " source: \"examples/hdf5_classification/data/train.txt\"\r\n", " batch_size: 10\r\n", " }\r\n", + " include {\r\n", + " phase: TRAIN\r\n", + " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"fc1\"\r\n", - " type: \"InnerProduct\"\r\n", + "layers {\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " param {\r\n", - " lr_mult: 1\r\n", - " decay_mult: 1\r\n", - " }\r\n", - " param {\r\n", - " lr_mult: 2\r\n", - " decay_mult: 0\r\n", - " }\r\n", + " name: \"fc1\"\r\n", + " type: INNER_PRODUCT\r\n", + " blobs_lr: 1\r\n", + " blobs_lr: 2\r\n", + " weight_decay: 1\r\n", + " weight_decay: 0\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -301,77 +217,72 @@ " }\r\n", " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"loss\"\r\n", - " type: \"SoftmaxWithLoss\"\r\n", + "layers {\r\n", " bottom: \"fc1\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", + " name: \"loss\"\r\n", + " type: SOFTMAX_LOSS\r\n", "}\r\n", - "I0307 01:34:29.418692 2099749632 layer_factory.hpp:74] Creating layer data\r\n", - "I0307 01:34:29.418853 2099749632 net.cpp:84] Creating Layer data\r\n", - "I0307 01:34:29.418879 2099749632 net.cpp:338] data -> data\r\n", - "I0307 01:34:29.418905 2099749632 net.cpp:338] data -> label\r\n", - "I0307 01:34:29.418918 2099749632 net.cpp:113] Setting up data\r\n", - "I0307 01:34:29.418926 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/train.txt\r\n", - "I0307 01:34:29.418992 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 2\r\n", - "I0307 01:34:29.420812 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", - "I0307 01:34:29.420841 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:29.420852 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", - "I0307 01:34:29.420866 2099749632 net.cpp:84] Creating Layer fc1\r\n", - "I0307 01:34:29.420872 2099749632 net.cpp:380] fc1 <- data\r\n", - "I0307 01:34:29.420882 2099749632 net.cpp:338] fc1 -> fc1\r\n", - "I0307 01:34:29.420894 2099749632 net.cpp:113] Setting up fc1\r\n", - "I0307 01:34:29.425689 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:29.425709 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:29.425724 2099749632 net.cpp:84] Creating Layer loss\r\n", - "I0307 01:34:29.425731 2099749632 net.cpp:380] loss <- fc1\r\n", - "I0307 01:34:29.425739 2099749632 net.cpp:380] loss <- label\r\n", - "I0307 01:34:29.425747 2099749632 net.cpp:338] loss -> loss\r\n", - "I0307 01:34:29.425756 2099749632 net.cpp:113] Setting up loss\r\n", - "I0307 01:34:29.425767 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:29.425781 2099749632 net.cpp:120] Top shape: (1)\r\n", - "I0307 01:34:29.425789 2099749632 net.cpp:122] with loss weight 1\r\n", - "I0307 01:34:29.425801 2099749632 net.cpp:167] loss needs backward computation.\r\n", - "I0307 01:34:29.425808 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", - "I0307 01:34:29.425815 2099749632 net.cpp:169] data does not need backward computation.\r\n", - "I0307 01:34:29.425822 2099749632 net.cpp:205] This network produces output loss\r\n", - "I0307 01:34:29.425829 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", - "I0307 01:34:29.425837 2099749632 net.cpp:217] Network initialization done.\r\n", - "I0307 01:34:29.425843 2099749632 net.cpp:218] Memory required for data: 284\r\n", - "I0307 01:34:29.425961 2099749632 solver.cpp:154] Creating test net (#0) specified by net file: hdf5_classification/train_val.prototxt\r\n", - "I0307 01:34:29.425984 2099749632 net.cpp:257] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", - "I0307 01:34:29.425997 2099749632 net.cpp:42] Initializing net from parameters: \r\n", - "name: \"LogisticRegressionNet\"\r\n", "state {\r\n", - " phase: TEST\r\n", + " phase: TRAIN\r\n", "}\r\n", - "layer {\r\n", - " name: \"data\"\r\n", - " type: \"HDF5Data\"\r\n", + "I0905 01:07:27.105614 2129298192 net.cpp:67] Creating Layer data\r\n", + "I0905 01:07:27.105664 2129298192 net.cpp:356] data -> data\r\n", + "I0905 01:07:27.105698 2129298192 net.cpp:356] data -> label\r\n", + "I0905 01:07:27.105710 2129298192 net.cpp:96] Setting up data\r\n", + "I0905 01:07:27.105717 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/train.txt\r\n", + "I0905 01:07:27.105813 2129298192 hdf5_data_layer.cpp:69] Number of files: 2\r\n", + "I0905 01:07:27.105828 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.109418 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.109501 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", + "I0905 01:07:27.109522 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", + "I0905 01:07:27.109531 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.109560 2129298192 net.cpp:67] Creating Layer fc1\r\n", + "I0905 01:07:27.109570 2129298192 net.cpp:394] fc1 <- data\r\n", + "I0905 01:07:27.109590 2129298192 net.cpp:356] fc1 -> fc1\r\n", + "I0905 01:07:27.109618 2129298192 net.cpp:96] Setting up fc1\r\n", + "I0905 01:07:27.115136 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.115190 2129298192 net.cpp:67] Creating Layer loss\r\n", + "I0905 01:07:27.115198 2129298192 net.cpp:394] loss <- fc1\r\n", + "I0905 01:07:27.115206 2129298192 net.cpp:394] loss <- label\r\n", + "I0905 01:07:27.115214 2129298192 net.cpp:356] loss -> loss\r\n", + "I0905 01:07:27.115224 2129298192 net.cpp:96] Setting up loss\r\n", + "I0905 01:07:27.115237 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", + "I0905 01:07:27.115244 2129298192 net.cpp:109] with loss weight 1\r\n", + "I0905 01:07:27.115260 2129298192 net.cpp:170] loss needs backward computation.\r\n", + "I0905 01:07:27.115267 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", + "I0905 01:07:27.115273 2129298192 net.cpp:172] data does not need backward computation.\r\n", + "I0905 01:07:27.115278 2129298192 net.cpp:208] This network produces output loss\r\n", + "I0905 01:07:27.115288 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", + "I0905 01:07:27.115295 2129298192 net.cpp:219] Network initialization done.\r\n", + "I0905 01:07:27.115301 2129298192 net.cpp:220] Memory required for data: 284\r\n", + "I0905 01:07:27.115622 2129298192 solver.cpp:156] Creating test net (#0) specified by net file: examples/hdf5_classification/train_val.prototxt\r\n", + "I0905 01:07:27.115644 2129298192 net.cpp:275] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", + "I0905 01:07:27.115656 2129298192 net.cpp:39] Initializing net from parameters: \r\n", + "name: \"LogisticRegressionNet\"\r\n", + "layers {\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " include {\r\n", - " phase: TEST\r\n", - " }\r\n", + " name: \"data\"\r\n", + " type: HDF5_DATA\r\n", " hdf5_data_param {\r\n", - " source: \"hdf5_classification/data/test.txt\"\r\n", + " source: \"examples/hdf5_classification/data/test.txt\"\r\n", " batch_size: 10\r\n", " }\r\n", + " include {\r\n", + " phase: TEST\r\n", + " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"fc1\"\r\n", - " type: \"InnerProduct\"\r\n", + "layers {\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " param {\r\n", - " lr_mult: 1\r\n", - " decay_mult: 1\r\n", - " }\r\n", - " param {\r\n", - " lr_mult: 2\r\n", - " decay_mult: 0\r\n", - " }\r\n", + " name: \"fc1\"\r\n", + " type: INNER_PRODUCT\r\n", + " blobs_lr: 1\r\n", + " blobs_lr: 2\r\n", + " weight_decay: 1\r\n", + " weight_decay: 0\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -384,176 +295,194 @@ " }\r\n", " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"loss\"\r\n", - " type: \"SoftmaxWithLoss\"\r\n", + "layers {\r\n", " bottom: \"fc1\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", + " name: \"loss\"\r\n", + " type: SOFTMAX_LOSS\r\n", "}\r\n", - "layer {\r\n", - " name: \"accuracy\"\r\n", - " type: \"Accuracy\"\r\n", + "layers {\r\n", " bottom: \"fc1\"\r\n", " bottom: \"label\"\r\n", " top: \"accuracy\"\r\n", + " name: \"accuracy\"\r\n", + " type: ACCURACY\r\n", " include {\r\n", " phase: TEST\r\n", " }\r\n", "}\r\n", - "I0307 01:34:29.426126 2099749632 layer_factory.hpp:74] Creating layer data\r\n", - "I0307 01:34:29.426311 2099749632 net.cpp:84] Creating Layer data\r\n", - "I0307 01:34:29.426331 2099749632 net.cpp:338] data -> data\r\n", - "I0307 01:34:29.426343 2099749632 net.cpp:338] data -> label\r\n", - "I0307 01:34:29.426354 2099749632 net.cpp:113] Setting up data\r\n", - "I0307 01:34:29.426362 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/test.txt\r\n", - "I0307 01:34:29.426484 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 1\r\n", - "I0307 01:34:29.427692 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", - "I0307 01:34:29.427711 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:29.427721 2099749632 layer_factory.hpp:74] Creating layer label_data_1_split\r\n", - "I0307 01:34:29.427731 2099749632 net.cpp:84] Creating Layer label_data_1_split\r\n", - "I0307 01:34:29.427738 2099749632 net.cpp:380] label_data_1_split <- label\r\n", - "I0307 01:34:29.427747 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_0\r\n", - "I0307 01:34:29.427759 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_1\r\n", - "I0307 01:34:29.427768 2099749632 net.cpp:113] Setting up label_data_1_split\r\n", - "I0307 01:34:29.427777 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:29.427784 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:29.427791 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", - "I0307 01:34:29.427804 2099749632 net.cpp:84] Creating Layer fc1\r\n", - "I0307 01:34:29.427813 2099749632 net.cpp:380] fc1 <- data\r\n", - "I0307 01:34:29.427821 2099749632 net.cpp:338] fc1 -> fc1\r\n", - "I0307 01:34:29.427831 2099749632 net.cpp:113] Setting up fc1\r\n", - "I0307 01:34:29.427845 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:29.427857 2099749632 layer_factory.hpp:74] Creating layer fc1_fc1_0_split\r\n", - "I0307 01:34:29.427866 2099749632 net.cpp:84] Creating Layer fc1_fc1_0_split\r\n", - "I0307 01:34:29.427872 2099749632 net.cpp:380] fc1_fc1_0_split <- fc1\r\n", - "I0307 01:34:29.427881 2099749632 net.cpp:338] fc1_fc1_0_split -> fc1_fc1_0_split_0\r\n", - "I0307 01:34:29.427891 2099749632 net.cpp:338] fc1_fc1_0_split -> fc1_fc1_0_split_1\r\n", - "I0307 01:34:29.427942 2099749632 net.cpp:113] Setting up fc1_fc1_0_split\r\n", - "I0307 01:34:29.427955 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:29.427965 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:29.427976 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:29.427991 2099749632 net.cpp:84] Creating Layer loss\r\n", - "I0307 01:34:29.428001 2099749632 net.cpp:380] loss <- fc1_fc1_0_split_0\r\n", - "I0307 01:34:29.428009 2099749632 net.cpp:380] loss <- label_data_1_split_0\r\n", - "I0307 01:34:29.428017 2099749632 net.cpp:338] loss -> loss\r\n", - "I0307 01:34:29.428026 2099749632 net.cpp:113] Setting up loss\r\n", - "I0307 01:34:29.428035 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:29.428048 2099749632 net.cpp:120] Top shape: (1)\r\n", - "I0307 01:34:29.428056 2099749632 net.cpp:122] with loss weight 1\r\n", - "I0307 01:34:29.428064 2099749632 layer_factory.hpp:74] Creating layer accuracy\r\n", - "I0307 01:34:29.428076 2099749632 net.cpp:84] Creating Layer accuracy\r\n", - "I0307 01:34:29.428084 2099749632 net.cpp:380] accuracy <- fc1_fc1_0_split_1\r\n", - "I0307 01:34:29.428092 2099749632 net.cpp:380] accuracy <- label_data_1_split_1\r\n", - "I0307 01:34:29.428102 2099749632 net.cpp:338] accuracy -> accuracy\r\n", - "I0307 01:34:29.428131 2099749632 net.cpp:113] Setting up accuracy\r\n", - "I0307 01:34:29.428140 2099749632 net.cpp:120] Top shape: (1)\r\n", - "I0307 01:34:29.428148 2099749632 net.cpp:169] accuracy does not need backward computation.\r\n", - "I0307 01:34:29.428154 2099749632 net.cpp:167] loss needs backward computation.\r\n", - "I0307 01:34:29.428161 2099749632 net.cpp:167] fc1_fc1_0_split needs backward computation.\r\n", - "I0307 01:34:29.428167 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", - "I0307 01:34:29.428174 2099749632 net.cpp:169] label_data_1_split does not need backward computation.\r\n", - "I0307 01:34:29.428181 2099749632 net.cpp:169] data does not need backward computation.\r\n", - "I0307 01:34:29.428189 2099749632 net.cpp:205] This network produces output accuracy\r\n", - "I0307 01:34:29.428324 2099749632 net.cpp:205] This network produces output loss\r\n", - "I0307 01:34:29.428342 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", - "I0307 01:34:29.428350 2099749632 net.cpp:217] Network initialization done.\r\n", - "I0307 01:34:29.428357 2099749632 net.cpp:218] Memory required for data: 528\r\n", - "I0307 01:34:29.428388 2099749632 solver.cpp:42] Solver scaffolding done.\r\n", - "I0307 01:34:29.428412 2099749632 solver.cpp:222] Solving LogisticRegressionNet\r\n", - "I0307 01:34:29.428421 2099749632 solver.cpp:223] Learning Rate Policy: step\r\n", - "I0307 01:34:29.428431 2099749632 solver.cpp:266] Iteration 0, Testing net (#0)\r\n" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "I0307 01:34:29.471674 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.4532\r\n", - "I0307 01:34:29.471724 2099749632 solver.cpp:315] Test net output #1: loss = 0.694067 (* 1 = 0.694067 loss)\r\n", - "I0307 01:34:29.471853 2099749632 solver.cpp:189] Iteration 0, loss = 0.692695\r\n", - "I0307 01:34:29.471878 2099749632 solver.cpp:204] Train net output #0: loss = 0.692695 (* 1 = 0.692695 loss)\r\n", - "I0307 01:34:29.471890 2099749632 solver.cpp:464] Iteration 0, lr = 0.01\r\n", - "I0307 01:34:29.483834 2099749632 solver.cpp:266] Iteration 1000, Testing net (#0)\r\n", - "I0307 01:34:29.486868 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7424\r\n", - "I0307 01:34:29.486896 2099749632 solver.cpp:315] Test net output #1: loss = 0.601764 (* 1 = 0.601764 loss)\r\n", - "I0307 01:34:29.486922 2099749632 solver.cpp:189] Iteration 1000, loss = 0.472665\r\n", - "I0307 01:34:29.486934 2099749632 solver.cpp:204] Train net output #0: loss = 0.472665 (* 1 = 0.472665 loss)\r\n", - "I0307 01:34:29.486944 2099749632 solver.cpp:464] Iteration 1000, lr = 0.01\r\n", - "I0307 01:34:29.498821 2099749632 solver.cpp:266] Iteration 2000, Testing net (#0)\r\n", - "I0307 01:34:29.501900 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7364\r\n", - "I0307 01:34:29.501941 2099749632 solver.cpp:315] Test net output #1: loss = 0.60818 (* 1 = 0.60818 loss)\r\n", - "I0307 01:34:29.501988 2099749632 solver.cpp:189] Iteration 2000, loss = 0.6863\r\n", - "I0307 01:34:29.502003 2099749632 solver.cpp:204] Train net output #0: loss = 0.6863 (* 1 = 0.6863 loss)\r\n", - "I0307 01:34:29.502013 2099749632 solver.cpp:464] Iteration 2000, lr = 0.01\r\n", - "I0307 01:34:29.513921 2099749632 solver.cpp:266] Iteration 3000, Testing net (#0)\r\n", - "I0307 01:34:29.517227 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.6964\r\n", - "I0307 01:34:29.517300 2099749632 solver.cpp:315] Test net output #1: loss = 0.604707 (* 1 = 0.604707 loss)\r\n", - "I0307 01:34:29.518105 2099749632 solver.cpp:189] Iteration 3000, loss = 0.617542\r\n", - "I0307 01:34:29.518154 2099749632 solver.cpp:204] Train net output #0: loss = 0.617542 (* 1 = 0.617542 loss)\r\n", - "I0307 01:34:29.518170 2099749632 solver.cpp:464] Iteration 3000, lr = 0.01\r\n" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "I0307 01:34:29.531672 2099749632 solver.cpp:266] Iteration 4000, Testing net (#0)\r\n", - "I0307 01:34:29.534873 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7424\r\n", - "I0307 01:34:29.534920 2099749632 solver.cpp:315] Test net output #1: loss = 0.601764 (* 1 = 0.601764 loss)\r\n", - "I0307 01:34:29.534950 2099749632 solver.cpp:189] Iteration 4000, loss = 0.472666\r\n", - "I0307 01:34:29.534962 2099749632 solver.cpp:204] Train net output #0: loss = 0.472665 (* 1 = 0.472665 loss)\r\n", - "I0307 01:34:29.534973 2099749632 solver.cpp:464] Iteration 4000, lr = 0.01\r\n", - "I0307 01:34:29.546567 2099749632 solver.cpp:266] Iteration 5000, Testing net (#0)\r\n", - "I0307 01:34:29.549762 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7364\r\n", - "I0307 01:34:29.549789 2099749632 solver.cpp:315] Test net output #1: loss = 0.60818 (* 1 = 0.60818 loss)\r\n", - "I0307 01:34:29.549815 2099749632 solver.cpp:189] Iteration 5000, loss = 0.686301\r\n", - "I0307 01:34:29.549828 2099749632 solver.cpp:204] Train net output #0: loss = 0.6863 (* 1 = 0.6863 loss)\r\n", - "I0307 01:34:29.549837 2099749632 solver.cpp:464] Iteration 5000, lr = 0.001\r\n", - "I0307 01:34:29.562142 2099749632 solver.cpp:266] Iteration 6000, Testing net (#0)\r\n", - "I0307 01:34:29.565335 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7476\r\n", - "I0307 01:34:29.565373 2099749632 solver.cpp:315] Test net output #1: loss = 0.59775 (* 1 = 0.59775 loss)\r\n", - "I0307 01:34:29.566051 2099749632 solver.cpp:189] Iteration 6000, loss = 0.664614\r\n", - "I0307 01:34:29.566086 2099749632 solver.cpp:204] Train net output #0: loss = 0.664614 (* 1 = 0.664614 loss)\r\n", - "I0307 01:34:29.566097 2099749632 solver.cpp:464] Iteration 6000, lr = 0.001\r\n" + "state {\r\n", + " phase: TEST\r\n", + "}\r\n", + "I0905 01:07:27.115854 2129298192 net.cpp:67] Creating Layer data\r\n", + "I0905 01:07:27.115864 2129298192 net.cpp:356] data -> data\r\n", + "I0905 01:07:27.116004 2129298192 net.cpp:356] data -> label\r\n", + "I0905 01:07:27.116024 2129298192 net.cpp:96] Setting up data\r\n", + "I0905 01:07:27.116030 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/test.txt\r\n", + "I0905 01:07:27.116080 2129298192 hdf5_data_layer.cpp:69] Number of files: 1\r\n", + "I0905 01:07:27.116089 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/test.h5\r\n", + "I0905 01:07:27.117313 2129298192 hdf5_data_layer.cpp:49] Successully loaded 2500 rows\r\n", + "I0905 01:07:27.117348 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", + "I0905 01:07:27.117357 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", + "I0905 01:07:27.117364 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.117377 2129298192 net.cpp:67] Creating Layer label_data_1_split\r\n", + "I0905 01:07:27.117384 2129298192 net.cpp:394] label_data_1_split <- label\r\n", + "I0905 01:07:27.117393 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_0\r\n", + "I0905 01:07:27.117409 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_1\r\n", + "I0905 01:07:27.117419 2129298192 net.cpp:96] Setting up label_data_1_split\r\n", + "I0905 01:07:27.117427 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.117434 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.117444 2129298192 net.cpp:67] Creating Layer fc1\r\n", + "I0905 01:07:27.117449 2129298192 net.cpp:394] fc1 <- data\r\n", + "I0905 01:07:27.117470 2129298192 net.cpp:356] fc1 -> fc1\r\n", + "I0905 01:07:27.117478 2129298192 net.cpp:96] Setting up fc1\r\n", + "I0905 01:07:27.117506 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.117519 2129298192 net.cpp:67] Creating Layer fc1_fc1_0_split\r\n", + "I0905 01:07:27.117527 2129298192 net.cpp:394] fc1_fc1_0_split <- fc1\r\n", + "I0905 01:07:27.117534 2129298192 net.cpp:356] fc1_fc1_0_split -> fc1_fc1_0_split_0\r\n", + "I0905 01:07:27.117543 2129298192 net.cpp:356] fc1_fc1_0_split -> fc1_fc1_0_split_1\r\n", + "I0905 01:07:27.117640 2129298192 net.cpp:96] Setting up fc1_fc1_0_split\r\n", + "I0905 01:07:27.117655 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.117662 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.117673 2129298192 net.cpp:67] Creating Layer loss\r\n", + "I0905 01:07:27.117679 2129298192 net.cpp:394] loss <- fc1_fc1_0_split_0\r\n", + "I0905 01:07:27.117687 2129298192 net.cpp:394] loss <- label_data_1_split_0\r\n", + "I0905 01:07:27.117696 2129298192 net.cpp:356] loss -> loss\r\n", + "I0905 01:07:27.117704 2129298192 net.cpp:96] Setting up loss\r\n", + "I0905 01:07:27.117717 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", + "I0905 01:07:27.117723 2129298192 net.cpp:109] with loss weight 1\r\n", + "I0905 01:07:27.117743 2129298192 net.cpp:67] Creating Layer accuracy\r\n", + "I0905 01:07:27.117749 2129298192 net.cpp:394] accuracy <- fc1_fc1_0_split_1\r\n", + "I0905 01:07:27.117756 2129298192 net.cpp:394] accuracy <- label_data_1_split_1\r\n", + "I0905 01:07:27.117764 2129298192 net.cpp:356] accuracy -> accuracy\r\n", + "I0905 01:07:27.117774 2129298192 net.cpp:96] Setting up accuracy\r\n", + "I0905 01:07:27.117781 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", + "I0905 01:07:27.117789 2129298192 net.cpp:172] accuracy does not need backward computation.\r\n", + "I0905 01:07:27.117794 2129298192 net.cpp:170] loss needs backward computation.\r\n", + "I0905 01:07:27.117835 2129298192 net.cpp:170] fc1_fc1_0_split needs backward computation.\r\n", + "I0905 01:07:27.117842 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", + "I0905 01:07:27.117848 2129298192 net.cpp:172] label_data_1_split does not need backward computation.\r\n", + "I0905 01:07:27.117854 2129298192 net.cpp:172] data does not need backward computation.\r\n", + "I0905 01:07:27.117861 2129298192 net.cpp:208] This network produces output accuracy\r\n", + "I0905 01:07:27.117866 2129298192 net.cpp:208] This network produces output loss\r\n", + "I0905 01:07:27.117877 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", + "I0905 01:07:27.117926 2129298192 net.cpp:219] Network initialization done.\r\n", + "I0905 01:07:27.117938 2129298192 net.cpp:220] Memory required for data: 528\r\n", + "I0905 01:07:27.117985 2129298192 solver.cpp:46] Solver scaffolding done.\r\n", + "I0905 01:07:27.117992 2129298192 solver.cpp:165] Solving LogisticRegressionNet\r\n", + "I0905 01:07:27.118026 2129298192 solver.cpp:251] Iteration 0, Testing net (#0)\r\n", + "I0905 01:07:27.123764 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.646801\r\n", + "I0905 01:07:27.123847 2129298192 solver.cpp:302] Test net output #1: loss = 0.690777 (* 1 = 0.690777 loss)\r\n", + "I0905 01:07:27.123888 2129298192 solver.cpp:195] Iteration 0, loss = 0.689469\r\n", + "I0905 01:07:27.123898 2129298192 solver.cpp:210] Train net output #0: loss = 0.689469 (* 1 = 0.689469 loss)\r\n", + "I0905 01:07:27.123915 2129298192 solver.cpp:405] Iteration 0, lr = 0.01\r\n", + "I0905 01:07:27.127096 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.128094 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.129258 2129298192 solver.cpp:251] Iteration 1000, Testing net (#0)\r\n", + "I0905 01:07:27.135226 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.745599\r\n", + "I0905 01:07:27.135296 2129298192 solver.cpp:302] Test net output #1: loss = 0.573658 (* 1 = 0.573658 loss)\r\n", + "I0905 01:07:27.135315 2129298192 solver.cpp:195] Iteration 1000, loss = 0.49682\r\n", + "I0905 01:07:27.135325 2129298192 solver.cpp:210] Train net output #0: loss = 0.49682 (* 1 = 0.49682 loss)\r\n", + "I0905 01:07:27.135334 2129298192 solver.cpp:405] Iteration 1000, lr = 0.01\r\n", + "I0905 01:07:27.137315 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.137358 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.138335 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.140410 2129298192 solver.cpp:251] Iteration 2000, Testing net (#0)\r\n", + "I0905 01:07:27.147435 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.746399\r\n", + "I0905 01:07:27.147514 2129298192 solver.cpp:302] Test net output #1: loss = 0.582127 (* 1 = 0.582127 loss)\r\n", + "I0905 01:07:27.147541 2129298192 solver.cpp:195] Iteration 2000, loss = 0.555272\r\n", + "I0905 01:07:27.147553 2129298192 solver.cpp:210] Train net output #0: loss = 0.555272 (* 1 = 0.555272 loss)\r\n", + "I0905 01:07:27.147565 2129298192 solver.cpp:405] Iteration 2000, lr = 0.01\r\n", + "I0905 01:07:27.148572 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.149441 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.152377 2129298192 solver.cpp:251] Iteration 3000, Testing net (#0)\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0307 01:34:29.577900 2099749632 solver.cpp:266] Iteration 7000, Testing net (#0)\r\n", - "I0307 01:34:29.580993 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7524\r\n", - "I0307 01:34:29.581015 2099749632 solver.cpp:315] Test net output #1: loss = 0.597349 (* 1 = 0.597349 loss)\r\n", - "I0307 01:34:29.581038 2099749632 solver.cpp:189] Iteration 7000, loss = 0.456775\r\n", - "I0307 01:34:29.581050 2099749632 solver.cpp:204] Train net output #0: loss = 0.456774 (* 1 = 0.456774 loss)\r\n", - "I0307 01:34:29.581059 2099749632 solver.cpp:464] Iteration 7000, lr = 0.001\r\n", - "I0307 01:34:29.592854 2099749632 solver.cpp:266] Iteration 8000, Testing net (#0)\r\n", - "I0307 01:34:29.595973 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7568\r\n", - "I0307 01:34:29.596002 2099749632 solver.cpp:315] Test net output #1: loss = 0.597265 (* 1 = 0.597265 loss)\r\n", - "I0307 01:34:29.596027 2099749632 solver.cpp:189] Iteration 8000, loss = 0.673885\r\n", - "I0307 01:34:29.596040 2099749632 solver.cpp:204] Train net output #0: loss = 0.673885 (* 1 = 0.673885 loss)\r\n", - "I0307 01:34:29.596048 2099749632 solver.cpp:464] Iteration 8000, lr = 0.001\r\n", - "I0307 01:34:29.607822 2099749632 solver.cpp:266] Iteration 9000, Testing net (#0)\r\n", - "I0307 01:34:29.610930 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7432\r\n", - "I0307 01:34:29.610960 2099749632 solver.cpp:315] Test net output #1: loss = 0.597777 (* 1 = 0.597777 loss)\r\n", - "I0307 01:34:29.611558 2099749632 solver.cpp:189] Iteration 9000, loss = 0.66526\r\n", - "I0307 01:34:29.611583 2099749632 solver.cpp:204] Train net output #0: loss = 0.66526 (* 1 = 0.66526 loss)\r\n", - "I0307 01:34:29.611593 2099749632 solver.cpp:464] Iteration 9000, lr = 0.001\r\n", - "I0307 01:34:29.623009 2099749632 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_10000.caffemodel\r\n", - "I0307 01:34:29.623209 2099749632 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_10000.solverstate\r\n", - "I0307 01:34:29.623319 2099749632 solver.cpp:248] Iteration 10000, loss = 0.457922\r\n", - "I0307 01:34:29.623333 2099749632 solver.cpp:266] Iteration 10000, Testing net (#0)\r\n" + "I0905 01:07:27.158655 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.696\r\n", + "I0905 01:07:27.158746 2129298192 solver.cpp:302] Test net output #1: loss = 0.580239 (* 1 = 0.580239 loss)\r\n", + "I0905 01:07:27.158761 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.158768 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.159765 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.159843 2129298192 solver.cpp:195] Iteration 3000, loss = 0.476517\r\n", + "I0905 01:07:27.159873 2129298192 solver.cpp:210] Train net output #0: loss = 0.476517 (* 1 = 0.476517 loss)\r\n", + "I0905 01:07:27.159983 2129298192 solver.cpp:405] Iteration 3000, lr = 0.01\r\n", + "I0905 01:07:27.163079 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.163602 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.164567 2129298192 solver.cpp:251] Iteration 4000, Testing net (#0)\r\n", + "I0905 01:07:27.170277 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.745599\r\n", + "I0905 01:07:27.170344 2129298192 solver.cpp:302] Test net output #1: loss = 0.573658 (* 1 = 0.573658 loss)\r\n", + "I0905 01:07:27.170364 2129298192 solver.cpp:195] Iteration 4000, loss = 0.49682\r\n", + "I0905 01:07:27.170375 2129298192 solver.cpp:210] Train net output #0: loss = 0.49682 (* 1 = 0.49682 loss)\r\n", + "I0905 01:07:27.170385 2129298192 solver.cpp:405] Iteration 4000, lr = 0.01\r\n", + "I0905 01:07:27.172350 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.172374 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.173084 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.175192 2129298192 solver.cpp:251] Iteration 5000, Testing net (#0)\r\n", + "I0905 01:07:27.181659 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.746399\r\n", + "I0905 01:07:27.181710 2129298192 solver.cpp:302] Test net output #1: loss = 0.582127 (* 1 = 0.582127 loss)\r\n", + "I0905 01:07:27.181730 2129298192 solver.cpp:195] Iteration 5000, loss = 0.555272\r\n", + "I0905 01:07:27.181740 2129298192 solver.cpp:210] Train net output #0: loss = 0.555272 (* 1 = 0.555272 loss)\r\n", + "I0905 01:07:27.181748 2129298192 solver.cpp:405] Iteration 5000, lr = 0.001\r\n", + "I0905 01:07:27.182734 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.183248 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.186180 2129298192 solver.cpp:251] Iteration 6000, Testing net (#0)\r\n", + "I0905 01:07:27.192646 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7684\r\n", + "I0905 01:07:27.192751 2129298192 solver.cpp:302] Test net output #1: loss = 0.574538 (* 1 = 0.574538 loss)\r\n", + "I0905 01:07:27.192766 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.192773 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.193936 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.194007 2129298192 solver.cpp:195] Iteration 6000, loss = 0.464052\r\n", + "I0905 01:07:27.194036 2129298192 solver.cpp:210] Train net output #0: loss = 0.464052 (* 1 = 0.464052 loss)\r\n", + "I0905 01:07:27.194051 2129298192 solver.cpp:405] Iteration 6000, lr = 0.001\r\n", + "I0905 01:07:27.197053 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.198092 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.199162 2129298192 solver.cpp:251] Iteration 7000, Testing net (#0)\r\n", + "I0905 01:07:27.205195 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7684\r\n", + "I0905 01:07:27.205298 2129298192 solver.cpp:302] Test net output #1: loss = 0.574549 (* 1 = 0.574549 loss)\r\n", + "I0905 01:07:27.205327 2129298192 solver.cpp:195] Iteration 7000, loss = 0.495483\r\n", + "I0905 01:07:27.205338 2129298192 solver.cpp:210] Train net output #0: loss = 0.495483 (* 1 = 0.495483 loss)\r\n", + "I0905 01:07:27.205353 2129298192 solver.cpp:405] Iteration 7000, lr = 0.001\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0307 01:34:29.626454 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.752\r\n", - "I0307 01:34:29.626484 2099749632 solver.cpp:315] Test net output #1: loss = 0.597362 (* 1 = 0.597362 loss)\r\n", - "I0307 01:34:29.626493 2099749632 solver.cpp:253] Optimization Done.\r\n", - "I0307 01:34:29.626502 2099749632 caffe.cpp:121] Optimization Done.\r\n" + "I0905 01:07:27.207471 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.207489 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.208534 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.210860 2129298192 solver.cpp:251] Iteration 8000, Testing net (#0)\r\n", + "I0905 01:07:27.216624 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.762\r\n", + "I0905 01:07:27.216704 2129298192 solver.cpp:302] Test net output #1: loss = 0.574515 (* 1 = 0.574515 loss)\r\n", + "I0905 01:07:27.216723 2129298192 solver.cpp:195] Iteration 8000, loss = 0.524565\r\n", + "I0905 01:07:27.216733 2129298192 solver.cpp:210] Train net output #0: loss = 0.524565 (* 1 = 0.524565 loss)\r\n", + "I0905 01:07:27.216743 2129298192 solver.cpp:405] Iteration 8000, lr = 0.001\r\n", + "I0905 01:07:27.217738 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.218291 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.221294 2129298192 solver.cpp:251] Iteration 9000, Testing net (#0)\r\n", + "I0905 01:07:27.227104 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7688\r\n", + "I0905 01:07:27.227171 2129298192 solver.cpp:302] Test net output #1: loss = 0.574278 (* 1 = 0.574278 loss)\r\n", + "I0905 01:07:27.227183 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.227190 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.228143 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.228210 2129298192 solver.cpp:195] Iteration 9000, loss = 0.461831\r\n", + "I0905 01:07:27.228240 2129298192 solver.cpp:210] Train net output #0: loss = 0.461831 (* 1 = 0.461831 loss)\r\n", + "I0905 01:07:27.228252 2129298192 solver.cpp:405] Iteration 9000, lr = 0.001\r\n", + "I0905 01:07:27.231314 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.232293 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.233417 2129298192 solver.cpp:319] Snapshotting to examples/hdf5_classification/data/train_iter_10000\r\n", + "I0905 01:07:27.233680 2129298192 solver.cpp:326] Snapshotting solver state to examples/hdf5_classification/data/train_iter_10000.solverstate\r\n", + "I0905 01:07:27.233795 2129298192 solver.cpp:232] Iteration 10000, loss = 0.49554\r\n", + "I0905 01:07:27.233814 2129298192 solver.cpp:251] Iteration 10000, Testing net (#0)\r\n", + "I0905 01:07:27.240015 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.768\r\n", + "I0905 01:07:27.240099 2129298192 solver.cpp:302] Test net output #1: loss = 0.574488 (* 1 = 0.574488 loss)\r\n", + "I0905 01:07:27.240110 2129298192 solver.cpp:237] Optimization Done.\r\n", + "I0905 01:07:27.240118 2129298192 caffe.cpp:114] Optimization Done.\r\n" ] } ], @@ -563,33 +492,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If you look at output or the `train_val.prototxt`, you'll see that the model is simple logistic regression.\n", - "We can make it a little more advanced by introducing a non-linearity between weights that take the input and weights that give the output -- now we have a two-layer network.\n", + "If you look at the `train_val.prototxt`, you'll see that it's simple logistic regression.\n", + "We can make it a little more advanced by introducing a non-linearity between weights that take the input and weights that give the output -- now we have a two-layer neural network.\n", "That network is given in `train_val2.prototxt`, and that's the only change made in `solver2.prototxt` which we will now use.\n", "\n", - "The final accuracy of the new network be higher than logistic regression!" + "The final accuracy of the network we'll train below should be higher than for the network above!" ] }, { "cell_type": "code", "collapsed": false, "input": [ - "def learn_and_test(solver_file):\n", - " caffe.set_mode_cpu()\n", - " solver = caffe.get_solver(solver_file)\n", - " solver.solve()\n", - "\n", - " accuracy = 0\n", - " test_iters = int(len(Xt) / solver.test_nets[0].blobs['data'].num)\n", - " for i in range(test_iters):\n", - " solver.test_nets[0].forward()\n", - " accuracy += solver.test_nets[0].blobs['accuracy'].data\n", - " accuracy /= test_iters\n", - " return accuracy\n", - "\n", - "%timeit learn_and_test('hdf5_classification/solver2.prototxt')\n", - "acc = learn_and_test('hdf5_classification/solver2.prototxt')\n", - "print(\"Accuracy: {:.3f}\".format(acc))" + "!cd .. && ./build/tools/caffe train -solver examples/hdf5_classification/solver2.prototxt" ], "language": "python", "metadata": {}, @@ -598,50 +512,9 @@ "output_type": "stream", "stream": "stdout", "text": [ - "1 loops, best of 3: 333 ms per loop\n", - "Accuracy: 0.818" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "\n" - ] - } - ], - "prompt_number": 7 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Do the same through the command line interface for detailed output on the model and solving." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "!../build/tools/caffe train -solver hdf5_classification/solver2.prototxt" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "I0307 01:34:31.589234 2099749632 caffe.cpp:103] Use CPU.\r\n" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "I0307 01:34:31.872560 2099749632 caffe.cpp:107] Starting Optimization\r\n", - "I0307 01:34:31.872596 2099749632 solver.cpp:32] Initializing solver from parameters: \r\n", - "test_iter: 250\r\n", + "I0905 01:07:27.466722 2129298192 caffe.cpp:90] Starting Optimization\r\n", + "I0905 01:07:27.468166 2129298192 solver.cpp:32] Initializing solver from parameters: \r\n", + "test_iter: 1000\r\n", "test_interval: 1000\r\n", "base_lr: 0.01\r\n", "display: 1000\r\n", @@ -652,43 +525,36 @@ "weight_decay: 0.0005\r\n", "stepsize: 5000\r\n", "snapshot: 10000\r\n", - "snapshot_prefix: \"hdf5_classification/data/train\"\r\n", + "snapshot_prefix: \"examples/hdf5_classification/data/train\"\r\n", "solver_mode: CPU\r\n", - "net: \"hdf5_classification/train_val2.prototxt\"\r\n", - "I0307 01:34:31.872687 2099749632 solver.cpp:70] Creating training net from net file: hdf5_classification/train_val2.prototxt\r\n", - "I0307 01:34:31.872865 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", - "I0307 01:34:31.872882 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", - "I0307 01:34:31.872891 2099749632 net.cpp:42] Initializing net from parameters: \r\n", + "net: \"examples/hdf5_classification/train_val2.prototxt\"\r\n", + "I0905 01:07:27.468351 2129298192 solver.cpp:72] Creating training net from net file: examples/hdf5_classification/train_val2.prototxt\r\n", + "I0905 01:07:27.469081 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", + "I0905 01:07:27.469100 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", + "I0905 01:07:27.469110 2129298192 net.cpp:39] Initializing net from parameters: \r\n", "name: \"LogisticRegressionNet\"\r\n", - "state {\r\n", - " phase: TRAIN\r\n", - "}\r\n", - "layer {\r\n", - " name: \"data\"\r\n", - " type: \"HDF5Data\"\r\n", + "layers {\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " include {\r\n", - " phase: TRAIN\r\n", - " }\r\n", + " name: \"data\"\r\n", + " type: HDF5_DATA\r\n", " hdf5_data_param {\r\n", - " source: \"hdf5_classification/data/train.txt\"\r\n", + " source: \"examples/hdf5_classification/data/train.txt\"\r\n", " batch_size: 10\r\n", " }\r\n", + " include {\r\n", + " phase: TRAIN\r\n", + " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"fc1\"\r\n", - " type: \"InnerProduct\"\r\n", + "layers {\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " param {\r\n", - " lr_mult: 1\r\n", - " decay_mult: 1\r\n", - " }\r\n", - " param {\r\n", - " lr_mult: 2\r\n", - " decay_mult: 0\r\n", - " }\r\n", + " name: \"fc1\"\r\n", + " type: INNER_PRODUCT\r\n", + " blobs_lr: 1\r\n", + " blobs_lr: 2\r\n", + " weight_decay: 1\r\n", + " weight_decay: 0\r\n", " inner_product_param {\r\n", " num_output: 40\r\n", " weight_filler {\r\n", @@ -701,25 +567,21 @@ " }\r\n", " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"relu1\"\r\n", - " type: \"ReLU\"\r\n", + "layers {\r\n", " bottom: \"fc1\"\r\n", " top: \"fc1\"\r\n", + " name: \"relu1\"\r\n", + " type: RELU\r\n", "}\r\n", - "layer {\r\n", - " name: \"fc2\"\r\n", - " type: \"InnerProduct\"\r\n", + "layers {\r\n", " bottom: \"fc1\"\r\n", " top: \"fc2\"\r\n", - " param {\r\n", - " lr_mult: 1\r\n", - " decay_mult: 1\r\n", - " }\r\n", - " param {\r\n", - " lr_mult: 2\r\n", - " decay_mult: 0\r\n", - " }\r\n", + " name: \"fc2\"\r\n", + " type: INNER_PRODUCT\r\n", + " blobs_lr: 1\r\n", + " blobs_lr: 2\r\n", + " weight_decay: 1\r\n", + " weight_decay: 0\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -732,91 +594,84 @@ " }\r\n", " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"loss\"\r\n", - " type: \"SoftmaxWithLoss\"\r\n", + "layers {\r\n", " bottom: \"fc2\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", + " name: \"loss\"\r\n", + " type: SOFTMAX_LOSS\r\n", "}\r\n", - "I0307 01:34:31.873246 2099749632 layer_factory.hpp:74] Creating layer data\r\n", - "I0307 01:34:31.873276 2099749632 net.cpp:84] Creating Layer data\r\n", - "I0307 01:34:31.873292 2099749632 net.cpp:338] data -> data\r\n", - "I0307 01:34:31.873332 2099749632 net.cpp:338] data -> label\r\n", - "I0307 01:34:31.873352 2099749632 net.cpp:113] Setting up data\r\n", - "I0307 01:34:31.873361 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/train.txt\r\n", - "I0307 01:34:31.873443 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 2\r\n", - "I0307 01:34:31.875783 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", - "I0307 01:34:31.875816 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:31.875829 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", - "I0307 01:34:31.875846 2099749632 net.cpp:84] Creating Layer fc1\r\n", - "I0307 01:34:31.875857 2099749632 net.cpp:380] fc1 <- data\r\n", - "I0307 01:34:31.875875 2099749632 net.cpp:338] fc1 -> fc1\r\n", - "I0307 01:34:31.875892 2099749632 net.cpp:113] Setting up fc1\r\n", - "I0307 01:34:31.882478 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", - "I0307 01:34:31.882505 2099749632 layer_factory.hpp:74] Creating layer relu1\r\n", - "I0307 01:34:31.882524 2099749632 net.cpp:84] Creating Layer relu1\r\n", - "I0307 01:34:31.882532 2099749632 net.cpp:380] relu1 <- fc1\r\n", - "I0307 01:34:31.882544 2099749632 net.cpp:327] relu1 -> fc1 (in-place)\r\n", - "I0307 01:34:31.882555 2099749632 net.cpp:113] Setting up relu1\r\n", - "I0307 01:34:31.882565 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", - "I0307 01:34:31.882583 2099749632 layer_factory.hpp:74] Creating layer fc2\r\n", - "I0307 01:34:31.882609 2099749632 net.cpp:84] Creating Layer fc2\r\n", - "I0307 01:34:31.882619 2099749632 net.cpp:380] fc2 <- fc1\r\n", - "I0307 01:34:31.882632 2099749632 net.cpp:338] fc2 -> fc2\r\n", - "I0307 01:34:31.882644 2099749632 net.cpp:113] Setting up fc2\r\n", - "I0307 01:34:31.882663 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:31.882678 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:31.882694 2099749632 net.cpp:84] Creating Layer loss\r\n", - "I0307 01:34:31.882704 2099749632 net.cpp:380] loss <- fc2\r\n", - "I0307 01:34:31.882712 2099749632 net.cpp:380] loss <- label\r\n", - "I0307 01:34:31.882779 2099749632 net.cpp:338] loss -> loss\r\n", - "I0307 01:34:31.882796 2099749632 net.cpp:113] Setting up loss\r\n", - "I0307 01:34:31.882810 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:31.882833 2099749632 net.cpp:120] Top shape: (1)\r\n", - "I0307 01:34:31.882844 2099749632 net.cpp:122] with loss weight 1\r\n", - "I0307 01:34:31.882860 2099749632 net.cpp:167] loss needs backward computation.\r\n", - "I0307 01:34:31.882869 2099749632 net.cpp:167] fc2 needs backward computation.\r\n", - "I0307 01:34:31.882877 2099749632 net.cpp:167] relu1 needs backward computation.\r\n", - "I0307 01:34:31.882886 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", - "I0307 01:34:31.882894 2099749632 net.cpp:169] data does not need backward computation.\r\n", - "I0307 01:34:31.882904 2099749632 net.cpp:205] This network produces output loss\r\n", - "I0307 01:34:31.882931 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", - "I0307 01:34:31.882942 2099749632 net.cpp:217] Network initialization done.\r\n", - "I0307 01:34:31.882951 2099749632 net.cpp:218] Memory required for data: 3484\r\n", - "I0307 01:34:31.883157 2099749632 solver.cpp:154] Creating test net (#0) specified by net file: hdf5_classification/train_val2.prototxt\r\n", - "I0307 01:34:31.883189 2099749632 net.cpp:257] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", - "I0307 01:34:31.883203 2099749632 net.cpp:42] Initializing net from parameters: \r\n", - "name: \"LogisticRegressionNet\"\r\n", "state {\r\n", - " phase: TEST\r\n", + " phase: TRAIN\r\n", "}\r\n", - "layer {\r\n", - " name: \"data\"\r\n", - " type: \"HDF5Data\"\r\n", + "I0905 01:07:27.469447 2129298192 net.cpp:67] Creating Layer data\r\n", + "I0905 01:07:27.469467 2129298192 net.cpp:356] data -> data\r\n", + "I0905 01:07:27.469493 2129298192 net.cpp:356] data -> label\r\n", + "I0905 01:07:27.469503 2129298192 net.cpp:96] Setting up data\r\n", + "I0905 01:07:27.469511 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/train.txt\r\n", + "I0905 01:07:27.469558 2129298192 hdf5_data_layer.cpp:69] Number of files: 2\r\n", + "I0905 01:07:27.469569 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.471978 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.471997 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", + "I0905 01:07:27.472008 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", + "I0905 01:07:27.472015 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.472026 2129298192 net.cpp:67] Creating Layer fc1\r\n", + "I0905 01:07:27.472033 2129298192 net.cpp:394] fc1 <- data\r\n", + "I0905 01:07:27.472045 2129298192 net.cpp:356] fc1 -> fc1\r\n", + "I0905 01:07:27.472060 2129298192 net.cpp:96] Setting up fc1\r\n", + "I0905 01:07:27.476827 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", + "I0905 01:07:27.476857 2129298192 net.cpp:67] Creating Layer relu1\r\n", + "I0905 01:07:27.476865 2129298192 net.cpp:394] relu1 <- fc1\r\n", + "I0905 01:07:27.476872 2129298192 net.cpp:345] relu1 -> fc1 (in-place)\r\n", + "I0905 01:07:27.476881 2129298192 net.cpp:96] Setting up relu1\r\n", + "I0905 01:07:27.476888 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", + "I0905 01:07:27.476896 2129298192 net.cpp:67] Creating Layer fc2\r\n", + "I0905 01:07:27.476902 2129298192 net.cpp:394] fc2 <- fc1\r\n", + "I0905 01:07:27.476909 2129298192 net.cpp:356] fc2 -> fc2\r\n", + "I0905 01:07:27.476918 2129298192 net.cpp:96] Setting up fc2\r\n", + "I0905 01:07:27.476932 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.476955 2129298192 net.cpp:67] Creating Layer loss\r\n", + "I0905 01:07:27.476963 2129298192 net.cpp:394] loss <- fc2\r\n", + "I0905 01:07:27.476969 2129298192 net.cpp:394] loss <- label\r\n", + "I0905 01:07:27.476975 2129298192 net.cpp:356] loss -> loss\r\n", + "I0905 01:07:27.476984 2129298192 net.cpp:96] Setting up loss\r\n", + "I0905 01:07:27.477005 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", + "I0905 01:07:27.477040 2129298192 net.cpp:109] with loss weight 1\r\n", + "I0905 01:07:27.477051 2129298192 net.cpp:170] loss needs backward computation.\r\n", + "I0905 01:07:27.477058 2129298192 net.cpp:170] fc2 needs backward computation.\r\n", + "I0905 01:07:27.477063 2129298192 net.cpp:170] relu1 needs backward computation.\r\n", + "I0905 01:07:27.477069 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", + "I0905 01:07:27.477076 2129298192 net.cpp:172] data does not need backward computation.\r\n", + "I0905 01:07:27.477080 2129298192 net.cpp:208] This network produces output loss\r\n", + "I0905 01:07:27.477099 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", + "I0905 01:07:27.477105 2129298192 net.cpp:219] Network initialization done.\r\n", + "I0905 01:07:27.477112 2129298192 net.cpp:220] Memory required for data: 3484\r\n", + "I0905 01:07:27.477455 2129298192 solver.cpp:156] Creating test net (#0) specified by net file: examples/hdf5_classification/train_val2.prototxt\r\n", + "I0905 01:07:27.477480 2129298192 net.cpp:275] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", + "I0905 01:07:27.477494 2129298192 net.cpp:39] Initializing net from parameters: \r\n", + "name: \"LogisticRegressionNet\"\r\n", + "layers {\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " include {\r\n", - " phase: TEST\r\n", - " }\r\n", + " name: \"data\"\r\n", + " type: HDF5_DATA\r\n", " hdf5_data_param {\r\n", - " source: \"hdf5_classification/data/test.txt\"\r\n", + " source: \"examples/hdf5_classification/data/test.txt\"\r\n", " batch_size: 10\r\n", " }\r\n", + " include {\r\n", + " phase: TEST\r\n", + " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"fc1\"\r\n", - " type: \"InnerProduct\"\r\n", + "layers {\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " param {\r\n", - " lr_mult: 1\r\n", - " decay_mult: 1\r\n", - " }\r\n", - " param {\r\n", - " lr_mult: 2\r\n", - " decay_mult: 0\r\n", - " }\r\n", + " name: \"fc1\"\r\n", + " type: INNER_PRODUCT\r\n", + " blobs_lr: 1\r\n", + " blobs_lr: 2\r\n", + " weight_decay: 1\r\n", + " weight_decay: 0\r\n", " inner_product_param {\r\n", " num_output: 40\r\n", " weight_filler {\r\n", @@ -829,25 +684,21 @@ " }\r\n", " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"relu1\"\r\n", - " type: \"ReLU\"\r\n", + "layers {\r\n", " bottom: \"fc1\"\r\n", " top: \"fc1\"\r\n", + " name: \"relu1\"\r\n", + " type: RELU\r\n", "}\r\n", - "layer {\r\n", - " name: \"fc2\"\r\n", - " type: \"InnerProduct\"\r\n", + "layers {\r\n", " bottom: \"fc1\"\r\n", " top: \"fc2\"\r\n", - " param {\r\n", - " lr_mult: 1\r\n", - " decay_mult: 1\r\n", - " }\r\n", - " param {\r\n", - " lr_mult: 2\r\n", - " decay_mult: 0\r\n", - " }\r\n", + " name: \"fc2\"\r\n", + " type: INNER_PRODUCT\r\n", + " blobs_lr: 1\r\n", + " blobs_lr: 2\r\n", + " weight_decay: 1\r\n", + " weight_decay: 0\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -860,200 +711,222 @@ " }\r\n", " }\r\n", "}\r\n", - "layer {\r\n", - " name: \"loss\"\r\n", - " type: \"SoftmaxWithLoss\"\r\n", + "layers {\r\n", " bottom: \"fc2\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", + " name: \"loss\"\r\n", + " type: SOFTMAX_LOSS\r\n", "}\r\n", - "layer {\r\n", - " name: \"accuracy\"\r\n", - " type: \"Accuracy\"\r\n", + "layers {\r\n", " bottom: \"fc2\"\r\n", " bottom: \"label\"\r\n", " top: \"accuracy\"\r\n", + " name: \"accuracy\"\r\n", + " type: ACCURACY\r\n", " include {\r\n", " phase: TEST\r\n", " }\r\n", "}\r\n", - "I0307 01:34:31.883535 2099749632 layer_factory.hpp:74] Creating layer data\r\n", - "I0307 01:34:31.883548 2099749632 net.cpp:84] Creating Layer data\r\n", - "I0307 01:34:31.883556 2099749632 net.cpp:338] data -> data\r\n", - "I0307 01:34:31.883569 2099749632 net.cpp:338] data -> label\r\n", - "I0307 01:34:31.883579 2099749632 net.cpp:113] Setting up data\r\n", - "I0307 01:34:31.883585 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/test.txt\r\n", - "I0307 01:34:31.883664 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 1\r\n", - "I0307 01:34:31.884842 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", - "I0307 01:34:31.884860 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:31.884870 2099749632 layer_factory.hpp:74] Creating layer label_data_1_split\r\n", - "I0307 01:34:31.884879 2099749632 net.cpp:84] Creating Layer label_data_1_split\r\n", - "I0307 01:34:31.884886 2099749632 net.cpp:380] label_data_1_split <- label\r\n", - "I0307 01:34:31.884896 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_0\r\n", - "I0307 01:34:31.884909 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_1\r\n", - "I0307 01:34:31.884919 2099749632 net.cpp:113] Setting up label_data_1_split\r\n", - "I0307 01:34:31.884927 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:31.884934 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", - "I0307 01:34:31.884941 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", - "I0307 01:34:31.884951 2099749632 net.cpp:84] Creating Layer fc1\r\n", - "I0307 01:34:31.884958 2099749632 net.cpp:380] fc1 <- data\r\n", - "I0307 01:34:31.884989 2099749632 net.cpp:338] fc1 -> fc1\r\n", - "I0307 01:34:31.885000 2099749632 net.cpp:113] Setting up fc1\r\n", - "I0307 01:34:31.885017 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", - "I0307 01:34:31.885030 2099749632 layer_factory.hpp:74] Creating layer relu1\r\n", - "I0307 01:34:31.885041 2099749632 net.cpp:84] Creating Layer relu1\r\n", - "I0307 01:34:31.885048 2099749632 net.cpp:380] relu1 <- fc1\r\n", - "I0307 01:34:31.885056 2099749632 net.cpp:327] relu1 -> fc1 (in-place)\r\n", - "I0307 01:34:31.885064 2099749632 net.cpp:113] Setting up relu1\r\n", - "I0307 01:34:31.885071 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", - "I0307 01:34:31.885079 2099749632 layer_factory.hpp:74] Creating layer fc2\r\n", - "I0307 01:34:31.885088 2099749632 net.cpp:84] Creating Layer fc2\r\n", - "I0307 01:34:31.885094 2099749632 net.cpp:380] fc2 <- fc1\r\n", - "I0307 01:34:31.885103 2099749632 net.cpp:338] fc2 -> fc2\r\n", - "I0307 01:34:31.885113 2099749632 net.cpp:113] Setting up fc2\r\n", - "I0307 01:34:31.885126 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:31.885138 2099749632 layer_factory.hpp:74] Creating layer fc2_fc2_0_split\r\n", - "I0307 01:34:31.885149 2099749632 net.cpp:84] Creating Layer fc2_fc2_0_split\r\n", - "I0307 01:34:31.885155 2099749632 net.cpp:380] fc2_fc2_0_split <- fc2\r\n", - "I0307 01:34:31.885164 2099749632 net.cpp:338] fc2_fc2_0_split -> fc2_fc2_0_split_0\r\n", - "I0307 01:34:31.885174 2099749632 net.cpp:338] fc2_fc2_0_split -> fc2_fc2_0_split_1\r\n", - "I0307 01:34:31.885182 2099749632 net.cpp:113] Setting up fc2_fc2_0_split\r\n", - "I0307 01:34:31.885190 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:31.885242 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", - "I0307 01:34:31.885256 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:31.885267 2099749632 net.cpp:84] Creating Layer loss\r\n", - "I0307 01:34:31.885275 2099749632 net.cpp:380] loss <- fc2_fc2_0_split_0\r\n", - "I0307 01:34:31.885285 2099749632 net.cpp:380] loss <- label_data_1_split_0\r\n", - "I0307 01:34:31.885296 2099749632 net.cpp:338] loss -> loss\r\n", - "I0307 01:34:31.885308 2099749632 net.cpp:113] Setting up loss\r\n", - "I0307 01:34:31.885316 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", - "I0307 01:34:31.885330 2099749632 net.cpp:120] Top shape: (1)\r\n", - "I0307 01:34:31.885337 2099749632 net.cpp:122] with loss weight 1\r\n", - "I0307 01:34:31.885346 2099749632 layer_factory.hpp:74] Creating layer accuracy\r\n", - "I0307 01:34:31.885360 2099749632 net.cpp:84] Creating Layer accuracy\r\n", - "I0307 01:34:31.885368 2099749632 net.cpp:380] accuracy <- fc2_fc2_0_split_1\r\n", - "I0307 01:34:31.885375 2099749632 net.cpp:380] accuracy <- label_data_1_split_1\r\n", - "I0307 01:34:31.885383 2099749632 net.cpp:338] accuracy -> accuracy\r\n", - "I0307 01:34:31.885392 2099749632 net.cpp:113] Setting up accuracy\r\n", - "I0307 01:34:31.885401 2099749632 net.cpp:120] Top shape: (1)\r\n", - "I0307 01:34:31.885407 2099749632 net.cpp:169] accuracy does not need backward computation.\r\n", - "I0307 01:34:31.885413 2099749632 net.cpp:167] loss needs backward computation.\r\n", - "I0307 01:34:31.885419 2099749632 net.cpp:167] fc2_fc2_0_split needs backward computation.\r\n", - "I0307 01:34:31.885426 2099749632 net.cpp:167] fc2 needs backward computation.\r\n", - "I0307 01:34:31.885432 2099749632 net.cpp:167] relu1 needs backward computation.\r\n", - "I0307 01:34:31.885438 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", - "I0307 01:34:31.885444 2099749632 net.cpp:169] label_data_1_split does not need backward computation.\r\n", - "I0307 01:34:31.885452 2099749632 net.cpp:169] data does not need backward computation.\r\n", - "I0307 01:34:31.885457 2099749632 net.cpp:205] This network produces output accuracy\r\n", - "I0307 01:34:31.885613 2099749632 net.cpp:205] This network produces output loss\r\n", - "I0307 01:34:31.885632 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", - "I0307 01:34:31.885639 2099749632 net.cpp:217] Network initialization done.\r\n", - "I0307 01:34:31.885645 2099749632 net.cpp:218] Memory required for data: 3728\r\n", - "I0307 01:34:31.885685 2099749632 solver.cpp:42] Solver scaffolding done.\r\n", - "I0307 01:34:31.885711 2099749632 solver.cpp:222] Solving LogisticRegressionNet\r\n", - "I0307 01:34:31.885721 2099749632 solver.cpp:223] Learning Rate Policy: step\r\n", - "I0307 01:34:31.885730 2099749632 solver.cpp:266] Iteration 0, Testing net (#0)\r\n", - "I0307 01:34:31.901005 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.5944\r\n", - "I0307 01:34:31.901049 2099749632 solver.cpp:315] Test net output #1: loss = 0.693021 (* 1 = 0.693021 loss)\r\n", - "I0307 01:34:31.901177 2099749632 solver.cpp:189] Iteration 0, loss = 0.693163\r\n", - "I0307 01:34:31.901192 2099749632 solver.cpp:204] Train net output #0: loss = 0.693163 (* 1 = 0.693163 loss)\r\n", - "I0307 01:34:31.901203 2099749632 solver.cpp:464] Iteration 0, lr = 0.01\r\n", - "I0307 01:34:31.920586 2099749632 solver.cpp:266] Iteration 1000, Testing net (#0)\r\n" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "I0307 01:34:31.924612 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7556\r\n", - "I0307 01:34:31.924646 2099749632 solver.cpp:315] Test net output #1: loss = 0.511002 (* 1 = 0.511002 loss)\r\n", - "I0307 01:34:31.924684 2099749632 solver.cpp:189] Iteration 1000, loss = 0.38536\r\n", - "I0307 01:34:31.924696 2099749632 solver.cpp:204] Train net output #0: loss = 0.38536 (* 1 = 0.38536 loss)\r\n", - "I0307 01:34:31.924706 2099749632 solver.cpp:464] Iteration 1000, lr = 0.01\r\n", - "I0307 01:34:31.944727 2099749632 solver.cpp:266] Iteration 2000, Testing net (#0)\r\n", - "I0307 01:34:31.948729 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7824\r\n", - "I0307 01:34:31.948763 2099749632 solver.cpp:315] Test net output #1: loss = 0.489214 (* 1 = 0.489214 loss)\r\n", - "I0307 01:34:31.948799 2099749632 solver.cpp:189] Iteration 2000, loss = 0.532582\r\n", - "I0307 01:34:31.948812 2099749632 solver.cpp:204] Train net output #0: loss = 0.532582 (* 1 = 0.532582 loss)\r\n", - "I0307 01:34:31.948823 2099749632 solver.cpp:464] Iteration 2000, lr = 0.01\r\n", - "I0307 01:34:31.968670 2099749632 solver.cpp:266] Iteration 3000, Testing net (#0)\r\n" + "state {\r\n", + " phase: TEST\r\n", + "}\r\n", + "I0905 01:07:27.477839 2129298192 net.cpp:67] Creating Layer data\r\n", + "I0905 01:07:27.477850 2129298192 net.cpp:356] data -> data\r\n", + "I0905 01:07:27.477861 2129298192 net.cpp:356] data -> label\r\n", + "I0905 01:07:27.477870 2129298192 net.cpp:96] Setting up data\r\n", + "I0905 01:07:27.477876 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/test.txt\r\n", + "I0905 01:07:27.477902 2129298192 hdf5_data_layer.cpp:69] Number of files: 1\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0307 01:34:31.972393 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7956\r\n", - "I0307 01:34:31.972411 2099749632 solver.cpp:315] Test net output #1: loss = 0.454184 (* 1 = 0.454184 loss)\r\n", - "I0307 01:34:31.973024 2099749632 solver.cpp:189] Iteration 3000, loss = 0.541374\r\n", - "I0307 01:34:31.973057 2099749632 solver.cpp:204] Train net output #0: loss = 0.541374 (* 1 = 0.541374 loss)\r\n", - "I0307 01:34:31.973067 2099749632 solver.cpp:464] Iteration 3000, lr = 0.01\r\n", - "I0307 01:34:31.994829 2099749632 solver.cpp:266] Iteration 4000, Testing net (#0)\r\n", - "I0307 01:34:31.998638 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.798\r\n", - "I0307 01:34:31.998663 2099749632 solver.cpp:315] Test net output #1: loss = 0.456348 (* 1 = 0.456348 loss)\r\n", - "I0307 01:34:31.998705 2099749632 solver.cpp:189] Iteration 4000, loss = 0.490437\r\n", - "I0307 01:34:31.998718 2099749632 solver.cpp:204] Train net output #0: loss = 0.490437 (* 1 = 0.490437 loss)\r\n", - "I0307 01:34:31.998725 2099749632 solver.cpp:464] Iteration 4000, lr = 0.01\r\n", - "I0307 01:34:32.021085 2099749632 solver.cpp:266] Iteration 5000, Testing net (#0)\r\n" + "I0905 01:07:27.477910 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/test.h5\r\n", + "I0905 01:07:27.478999 2129298192 hdf5_data_layer.cpp:49] Successully loaded 2500 rows\r\n", + "I0905 01:07:27.479014 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", + "I0905 01:07:27.479022 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", + "I0905 01:07:27.479028 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.479038 2129298192 net.cpp:67] Creating Layer label_data_1_split\r\n", + "I0905 01:07:27.479044 2129298192 net.cpp:394] label_data_1_split <- label\r\n", + "I0905 01:07:27.479058 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_0\r\n", + "I0905 01:07:27.479069 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_1\r\n", + "I0905 01:07:27.479079 2129298192 net.cpp:96] Setting up label_data_1_split\r\n", + "I0905 01:07:27.479086 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.479092 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", + "I0905 01:07:27.479100 2129298192 net.cpp:67] Creating Layer fc1\r\n", + "I0905 01:07:27.480850 2129298192 net.cpp:394] fc1 <- data\r\n", + "I0905 01:07:27.480871 2129298192 net.cpp:356] fc1 -> fc1\r\n", + "I0905 01:07:27.480887 2129298192 net.cpp:96] Setting up fc1\r\n", + "I0905 01:07:27.480908 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", + "I0905 01:07:27.480978 2129298192 net.cpp:67] Creating Layer relu1\r\n", + "I0905 01:07:27.480986 2129298192 net.cpp:394] relu1 <- fc1\r\n", + "I0905 01:07:27.480994 2129298192 net.cpp:345] relu1 -> fc1 (in-place)\r\n", + "I0905 01:07:27.481003 2129298192 net.cpp:96] Setting up relu1\r\n", + "I0905 01:07:27.481009 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", + "I0905 01:07:27.481017 2129298192 net.cpp:67] Creating Layer fc2\r\n", + "I0905 01:07:27.481024 2129298192 net.cpp:394] fc2 <- fc1\r\n", + "I0905 01:07:27.481031 2129298192 net.cpp:356] fc2 -> fc2\r\n", + "I0905 01:07:27.481041 2129298192 net.cpp:96] Setting up fc2\r\n", + "I0905 01:07:27.481055 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.481065 2129298192 net.cpp:67] Creating Layer fc2_fc2_0_split\r\n", + "I0905 01:07:27.481343 2129298192 net.cpp:394] fc2_fc2_0_split <- fc2\r\n", + "I0905 01:07:27.481360 2129298192 net.cpp:356] fc2_fc2_0_split -> fc2_fc2_0_split_0\r\n", + "I0905 01:07:27.481371 2129298192 net.cpp:356] fc2_fc2_0_split -> fc2_fc2_0_split_1\r\n", + "I0905 01:07:27.481379 2129298192 net.cpp:96] Setting up fc2_fc2_0_split\r\n", + "I0905 01:07:27.481387 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.481392 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", + "I0905 01:07:27.481401 2129298192 net.cpp:67] Creating Layer loss\r\n", + "I0905 01:07:27.481407 2129298192 net.cpp:394] loss <- fc2_fc2_0_split_0\r\n", + "I0905 01:07:27.481413 2129298192 net.cpp:394] loss <- label_data_1_split_0\r\n", + "I0905 01:07:27.481421 2129298192 net.cpp:356] loss -> loss\r\n", + "I0905 01:07:27.481434 2129298192 net.cpp:96] Setting up loss\r\n", + "I0905 01:07:27.481446 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", + "I0905 01:07:27.481452 2129298192 net.cpp:109] with loss weight 1\r\n", + "I0905 01:07:27.481466 2129298192 net.cpp:67] Creating Layer accuracy\r\n", + "I0905 01:07:27.481472 2129298192 net.cpp:394] accuracy <- fc2_fc2_0_split_1\r\n", + "I0905 01:07:27.481504 2129298192 net.cpp:394] accuracy <- label_data_1_split_1\r\n", + "I0905 01:07:27.481513 2129298192 net.cpp:356] accuracy -> accuracy\r\n", + "I0905 01:07:27.481521 2129298192 net.cpp:96] Setting up accuracy\r\n", + "I0905 01:07:27.481528 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", + "I0905 01:07:27.481534 2129298192 net.cpp:172] accuracy does not need backward computation.\r\n", + "I0905 01:07:27.481540 2129298192 net.cpp:170] loss needs backward computation.\r\n", + "I0905 01:07:27.481545 2129298192 net.cpp:170] fc2_fc2_0_split needs backward computation.\r\n", + "I0905 01:07:27.481551 2129298192 net.cpp:170] fc2 needs backward computation.\r\n", + "I0905 01:07:27.481557 2129298192 net.cpp:170] relu1 needs backward computation.\r\n", + "I0905 01:07:27.481562 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", + "I0905 01:07:27.481569 2129298192 net.cpp:172] label_data_1_split does not need backward computation.\r\n", + "I0905 01:07:27.481575 2129298192 net.cpp:172] data does not need backward computation.\r\n", + "I0905 01:07:27.481730 2129298192 net.cpp:208] This network produces output accuracy\r\n", + "I0905 01:07:27.481742 2129298192 net.cpp:208] This network produces output loss\r\n", + "I0905 01:07:27.481758 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", + "I0905 01:07:27.481766 2129298192 net.cpp:219] Network initialization done.\r\n", + "I0905 01:07:27.481771 2129298192 net.cpp:220] Memory required for data: 3728\r\n", + "I0905 01:07:27.481814 2129298192 solver.cpp:46] Solver scaffolding done.\r\n", + "I0905 01:07:27.481822 2129298192 solver.cpp:165] Solving LogisticRegressionNet\r\n", + "I0905 01:07:27.481844 2129298192 solver.cpp:251] Iteration 0, Testing net (#0)\r\n", + "I0905 01:07:27.488900 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.4924\r\n", + "I0905 01:07:27.488932 2129298192 solver.cpp:302] Test net output #1: loss = 0.693168 (* 1 = 0.693168 loss)\r\n", + "I0905 01:07:27.488962 2129298192 solver.cpp:195] Iteration 0, loss = 0.692972\r\n", + "I0905 01:07:27.488973 2129298192 solver.cpp:210] Train net output #0: loss = 0.692972 (* 1 = 0.692972 loss)\r\n", + "I0905 01:07:27.488984 2129298192 solver.cpp:405] Iteration 0, lr = 0.01\r\n", + "I0905 01:07:27.495033 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.495604 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.497684 2129298192 solver.cpp:251] Iteration 1000, Testing net (#0)\r\n", + "I0905 01:07:27.504875 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7744\r\n", + "I0905 01:07:27.504930 2129298192 solver.cpp:302] Test net output #1: loss = 0.486552 (* 1 = 0.486552 loss)\r\n", + "I0905 01:07:27.504955 2129298192 solver.cpp:195] Iteration 1000, loss = 0.660151\r\n", + "I0905 01:07:27.504966 2129298192 solver.cpp:210] Train net output #0: loss = 0.660151 (* 1 = 0.660151 loss)\r\n", + "I0905 01:07:27.504976 2129298192 solver.cpp:405] Iteration 1000, lr = 0.01\r\n", + "I0905 01:07:27.509419 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.509467 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.510288 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.514822 2129298192 solver.cpp:251] Iteration 2000, Testing net (#0)\r\n", + "I0905 01:07:27.522342 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8004\r\n", + "I0905 01:07:27.522444 2129298192 solver.cpp:302] Test net output #1: loss = 0.447153 (* 1 = 0.447153 loss)\r\n", + "I0905 01:07:27.522483 2129298192 solver.cpp:195] Iteration 2000, loss = 0.505697\r\n", + "I0905 01:07:27.522495 2129298192 solver.cpp:210] Train net output #0: loss = 0.505697 (* 1 = 0.505697 loss)\r\n", + "I0905 01:07:27.522507 2129298192 solver.cpp:405] Iteration 2000, lr = 0.01\r\n", + "I0905 01:07:27.524762 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.525921 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0307 01:34:32.024950 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.804\r\n", - "I0307 01:34:32.024981 2099749632 solver.cpp:315] Test net output #1: loss = 0.46184 (* 1 = 0.46184 loss)\r\n", - "I0307 01:34:32.025017 2099749632 solver.cpp:189] Iteration 5000, loss = 0.467703\r\n", - "I0307 01:34:32.025028 2099749632 solver.cpp:204] Train net output #0: loss = 0.467704 (* 1 = 0.467704 loss)\r\n", - "I0307 01:34:32.025038 2099749632 solver.cpp:464] Iteration 5000, lr = 0.001\r\n", - "I0307 01:34:32.044390 2099749632 solver.cpp:266] Iteration 6000, Testing net (#0)\r\n", - "I0307 01:34:32.048216 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8208\r\n", - "I0307 01:34:32.048239 2099749632 solver.cpp:315] Test net output #1: loss = 0.423084 (* 1 = 0.423084 loss)\r\n", - "I0307 01:34:32.048790 2099749632 solver.cpp:189] Iteration 6000, loss = 0.480104\r\n", - "I0307 01:34:32.048809 2099749632 solver.cpp:204] Train net output #0: loss = 0.480105 (* 1 = 0.480105 loss)\r\n", - "I0307 01:34:32.048827 2099749632 solver.cpp:464] Iteration 6000, lr = 0.001\r\n", - "I0307 01:34:32.067795 2099749632 solver.cpp:266] Iteration 7000, Testing net (#0)\r\n", - "I0307 01:34:32.071524 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8124\r\n", - "I0307 01:34:32.071542 2099749632 solver.cpp:315] Test net output #1: loss = 0.423947 (* 1 = 0.423947 loss)\r\n", - "I0307 01:34:32.071570 2099749632 solver.cpp:189] Iteration 7000, loss = 0.447471\r\n", - "I0307 01:34:32.071617 2099749632 solver.cpp:204] Train net output #0: loss = 0.447472 (* 1 = 0.447472 loss)\r\n", - "I0307 01:34:32.071626 2099749632 solver.cpp:464] Iteration 7000, lr = 0.001\r\n" + "I0905 01:07:27.533335 2129298192 solver.cpp:251] Iteration 3000, Testing net (#0)\r\n", + "I0905 01:07:27.541055 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8144\r\n", + "I0905 01:07:27.541146 2129298192 solver.cpp:302] Test net output #1: loss = 0.421441 (* 1 = 0.421441 loss)\r\n", + "I0905 01:07:27.541160 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.541167 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.542178 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.542261 2129298192 solver.cpp:195] Iteration 3000, loss = 0.242177\r\n", + "I0905 01:07:27.542284 2129298192 solver.cpp:210] Train net output #0: loss = 0.242177 (* 1 = 0.242177 loss)\r\n", + "I0905 01:07:27.542310 2129298192 solver.cpp:405] Iteration 3000, lr = 0.01\r\n", + "I0905 01:07:27.549348 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.550144 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.552340 2129298192 solver.cpp:251] Iteration 4000, Testing net (#0)\r\n", + "I0905 01:07:27.560089 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.784001\r\n", + "I0905 01:07:27.560227 2129298192 solver.cpp:302] Test net output #1: loss = 0.4395 (* 1 = 0.4395 loss)\r\n", + "I0905 01:07:27.560286 2129298192 solver.cpp:195] Iteration 4000, loss = 1.01631\r\n", + "I0905 01:07:27.560302 2129298192 solver.cpp:210] Train net output #0: loss = 1.01631 (* 1 = 1.01631 loss)\r\n", + "I0905 01:07:27.560315 2129298192 solver.cpp:405] Iteration 4000, lr = 0.01\r\n", + "I0905 01:07:27.565016 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.565101 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.566145 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.570286 2129298192 solver.cpp:251] Iteration 5000, Testing net (#0)\r\n", + "I0905 01:07:27.577373 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.802\r\n", + "I0905 01:07:27.577426 2129298192 solver.cpp:302] Test net output #1: loss = 0.463582 (* 1 = 0.463582 loss)\r\n", + "I0905 01:07:27.577452 2129298192 solver.cpp:195] Iteration 5000, loss = 0.632809\r\n", + "I0905 01:07:27.577463 2129298192 solver.cpp:210] Train net output #0: loss = 0.632809 (* 1 = 0.632809 loss)\r\n", + "I0905 01:07:27.577564 2129298192 solver.cpp:405] Iteration 5000, lr = 0.001\r\n", + "I0905 01:07:27.579649 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.580368 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0307 01:34:32.091625 2099749632 solver.cpp:266] Iteration 8000, Testing net (#0)\r\n", - "I0307 01:34:32.095410 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.814\r\n", - "I0307 01:34:32.095432 2099749632 solver.cpp:315] Test net output #1: loss = 0.423586 (* 1 = 0.423586 loss)\r\n", - "I0307 01:34:32.095461 2099749632 solver.cpp:189] Iteration 8000, loss = 0.386258\r\n", - "I0307 01:34:32.095474 2099749632 solver.cpp:204] Train net output #0: loss = 0.386259 (* 1 = 0.386259 loss)\r\n", - "I0307 01:34:32.095481 2099749632 solver.cpp:464] Iteration 8000, lr = 0.001\r\n", - "I0307 01:34:32.117184 2099749632 solver.cpp:266] Iteration 9000, Testing net (#0)\r\n", - "I0307 01:34:32.121587 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8208\r\n", - "I0307 01:34:32.121608 2099749632 solver.cpp:315] Test net output #1: loss = 0.419969 (* 1 = 0.419969 loss)\r\n", - "I0307 01:34:32.122161 2099749632 solver.cpp:189] Iteration 9000, loss = 0.468262\r\n", - "I0307 01:34:32.122181 2099749632 solver.cpp:204] Train net output #0: loss = 0.468262 (* 1 = 0.468262 loss)\r\n", - "I0307 01:34:32.122191 2099749632 solver.cpp:464] Iteration 9000, lr = 0.001\r\n" + "I0905 01:07:27.586956 2129298192 solver.cpp:251] Iteration 6000, Testing net (#0)\r\n", + "I0905 01:07:27.594288 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.822\r\n", + "I0905 01:07:27.594327 2129298192 solver.cpp:302] Test net output #1: loss = 0.407026 (* 1 = 0.407026 loss)\r\n", + "I0905 01:07:27.594338 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.594344 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.594861 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.594897 2129298192 solver.cpp:195] Iteration 6000, loss = 0.214342\r\n", + "I0905 01:07:27.594910 2129298192 solver.cpp:210] Train net output #0: loss = 0.214342 (* 1 = 0.214342 loss)\r\n", + "I0905 01:07:27.594919 2129298192 solver.cpp:405] Iteration 6000, lr = 0.001\r\n", + "I0905 01:07:27.601003 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.601380 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.603358 2129298192 solver.cpp:251] Iteration 7000, Testing net (#0)\r\n", + "I0905 01:07:27.610307 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8264\r\n", + "I0905 01:07:27.610323 2129298192 solver.cpp:302] Test net output #1: loss = 0.403283 (* 1 = 0.403283 loss)\r\n", + "I0905 01:07:27.610342 2129298192 solver.cpp:195] Iteration 7000, loss = 0.894732\r\n", + "I0905 01:07:27.610352 2129298192 solver.cpp:210] Train net output #0: loss = 0.894732 (* 1 = 0.894732 loss)\r\n", + "I0905 01:07:27.610359 2129298192 solver.cpp:405] Iteration 7000, lr = 0.001\r\n", + "I0905 01:07:27.614289 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.614297 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.614701 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.618602 2129298192 solver.cpp:251] Iteration 8000, Testing net (#0)\r\n", + "I0905 01:07:27.625637 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8216\r\n", + "I0905 01:07:27.625661 2129298192 solver.cpp:302] Test net output #1: loss = 0.402446 (* 1 = 0.402446 loss)\r\n", + "I0905 01:07:27.625680 2129298192 solver.cpp:195] Iteration 8000, loss = 0.500503\r\n", + "I0905 01:07:27.625690 2129298192 solver.cpp:210] Train net output #0: loss = 0.500503 (* 1 = 0.500503 loss)\r\n", + "I0905 01:07:27.625707 2129298192 solver.cpp:405] Iteration 8000, lr = 0.001\r\n", + "I0905 01:07:27.627665 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.628075 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0307 01:34:32.141635 2099749632 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_10000.caffemodel\r\n", - "I0307 01:34:32.141860 2099749632 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_10000.solverstate\r\n", - "I0307 01:34:32.141978 2099749632 solver.cpp:248] Iteration 10000, loss = 0.441529\r\n", - "I0307 01:34:32.141995 2099749632 solver.cpp:266] Iteration 10000, Testing net (#0)\r\n", - "I0307 01:34:32.145747 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8148\r\n", - "I0307 01:34:32.145771 2099749632 solver.cpp:315] Test net output #1: loss = 0.4216 (* 1 = 0.4216 loss)\r\n", - "I0307 01:34:32.145779 2099749632 solver.cpp:253] Optimization Done.\r\n", - "I0307 01:34:32.145786 2099749632 caffe.cpp:121] Optimization Done.\r\n" + "I0905 01:07:27.634202 2129298192 solver.cpp:251] Iteration 9000, Testing net (#0)\r\n", + "I0905 01:07:27.641368 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8252\r\n", + "I0905 01:07:27.641412 2129298192 solver.cpp:302] Test net output #1: loss = 0.404175 (* 1 = 0.404175 loss)\r\n", + "I0905 01:07:27.641422 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", + "I0905 01:07:27.641428 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.641960 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.642004 2129298192 solver.cpp:195] Iteration 9000, loss = 0.201587\r\n", + "I0905 01:07:27.642016 2129298192 solver.cpp:210] Train net output #0: loss = 0.201587 (* 1 = 0.201587 loss)\r\n", + "I0905 01:07:27.642026 2129298192 solver.cpp:405] Iteration 9000, lr = 0.001\r\n", + "I0905 01:07:27.648680 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", + "I0905 01:07:27.649211 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", + "I0905 01:07:27.651327 2129298192 solver.cpp:319] Snapshotting to examples/hdf5_classification/data/train_iter_10000\r\n", + "I0905 01:07:27.651476 2129298192 solver.cpp:326] Snapshotting solver state to examples/hdf5_classification/data/train_iter_10000.solverstate\r\n", + "I0905 01:07:27.651564 2129298192 solver.cpp:232] Iteration 10000, loss = 0.935422\r\n", + "I0905 01:07:27.651582 2129298192 solver.cpp:251] Iteration 10000, Testing net (#0)\r\n", + "I0905 01:07:27.658738 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.826\r\n", + "I0905 01:07:27.658782 2129298192 solver.cpp:302] Test net output #1: loss = 0.400826 (* 1 = 0.400826 loss)\r\n", + "I0905 01:07:27.658790 2129298192 solver.cpp:237] Optimization Done.\r\n", + "I0905 01:07:27.658797 2129298192 caffe.cpp:114] Optimization Done.\r\n" ] } ], - "prompt_number": 8 + "prompt_number": 7 }, { "cell_type": "code", @@ -1065,7 +938,7 @@ "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 9 + "prompt_number": 8 } ], "metadata": {} diff --git a/examples/hdf5_classification/solver.prototxt b/examples/hdf5_classification/solver.prototxt index 65a6eb9e9fb..040162076b8 100644 --- a/examples/hdf5_classification/solver.prototxt +++ b/examples/hdf5_classification/solver.prototxt @@ -1,5 +1,5 @@ -net: "hdf5_classification/train_val.prototxt" -test_iter: 250 +net: "examples/hdf5_classification/train_val.prototxt" +test_iter: 1000 test_interval: 1000 base_lr: 0.01 lr_policy: "step" @@ -10,5 +10,5 @@ max_iter: 10000 momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 -snapshot_prefix: "hdf5_classification/data/train" +snapshot_prefix: "examples/hdf5_classification/data/train" solver_mode: CPU diff --git a/examples/hdf5_classification/solver2.prototxt b/examples/hdf5_classification/solver2.prototxt index 32b9feba346..32a3693b4a1 100644 --- a/examples/hdf5_classification/solver2.prototxt +++ b/examples/hdf5_classification/solver2.prototxt @@ -1,5 +1,5 @@ -net: "hdf5_classification/train_val2.prototxt" -test_iter: 250 +net: "examples/hdf5_classification/train_val2.prototxt" +test_iter: 1000 test_interval: 1000 base_lr: 0.01 lr_policy: "step" @@ -10,5 +10,5 @@ max_iter: 10000 momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 -snapshot_prefix: "hdf5_classification/data/train" +snapshot_prefix: "examples/hdf5_classification/data/train" solver_mode: CPU diff --git a/examples/hdf5_classification/train_val.prototxt b/examples/hdf5_classification/train_val.prototxt index d5e8dbfa169..b9ccc1a93ec 100644 --- a/examples/hdf5_classification/train_val.prototxt +++ b/examples/hdf5_classification/train_val.prototxt @@ -8,7 +8,7 @@ layer { phase: TRAIN } hdf5_data_param { - source: "hdf5_classification/data/train.txt" + source: "examples/hdf5_classification/data/train.txt" batch_size: 10 } } @@ -21,7 +21,7 @@ layer { phase: TEST } hdf5_data_param { - source: "hdf5_classification/data/test.txt" + source: "examples/hdf5_classification/data/test.txt" batch_size: 10 } } diff --git a/examples/hdf5_classification/train_val2.prototxt b/examples/hdf5_classification/train_val2.prototxt index 8795e8facb6..f9ef731fff9 100644 --- a/examples/hdf5_classification/train_val2.prototxt +++ b/examples/hdf5_classification/train_val2.prototxt @@ -8,7 +8,7 @@ layer { phase: TRAIN } hdf5_data_param { - source: "hdf5_classification/data/train.txt" + source: "examples/hdf5_classification/data/train.txt" batch_size: 10 } } @@ -21,7 +21,7 @@ layer { phase: TEST } hdf5_data_param { - source: "hdf5_classification/data/test.txt" + source: "examples/hdf5_classification/data/test.txt" batch_size: 10 } } diff --git a/examples/imagenet/bvlc_caffenet_full_conv.prototxt b/examples/imagenet/bvlc_caffenet_full_conv.prototxt new file mode 100644 index 00000000000..7b22bfa1404 --- /dev/null +++ b/examples/imagenet/bvlc_caffenet_full_conv.prototxt @@ -0,0 +1,216 @@ +# This file is for the net_surgery.ipynb example notebook. +name: "CaffeNetConv" +input: "data" +input_dim: 1 +input_dim: 3 +input_dim: 451 +input_dim: 451 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm1" + type: "LRN" + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "norm1" + top: "conv2" + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm2" + type: "LRN" + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "norm2" + top: "conv3" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6-conv" + type: "Convolution" + bottom: "pool5" + top: "fc6-conv" + convolution_param { + num_output: 4096 + kernel_size: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6-conv" + top: "fc6-conv" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6-conv" + top: "fc6-conv" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc7-conv" + type: "Convolution" + bottom: "fc6-conv" + top: "fc7-conv" + convolution_param { + num_output: 4096 + kernel_size: 1 + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7-conv" + top: "fc7-conv" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7-conv" + top: "fc7-conv" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8-conv" + type: "Convolution" + bottom: "fc7-conv" + top: "fc8-conv" + convolution_param { + num_output: 1000 + kernel_size: 1 + } +} +layer { + name: "prob" + type: "Softmax" + bottom: "fc8-conv" + top: "prob" +} diff --git a/examples/imagenet/make_imagenet_mean.sh b/examples/imagenet/make_imagenet_mean.sh index 57f43766c4b..d3d0c9af5d2 100755 --- a/examples/imagenet/make_imagenet_mean.sh +++ b/examples/imagenet/make_imagenet_mean.sh @@ -1,12 +1,8 @@ #!/usr/bin/env sh -# Compute the mean image from the imagenet training lmdb +# Compute the mean image from the imagenet training leveldb # N.B. this is available in data/ilsvrc12 -EXAMPLE=examples/imagenet -DATA=data/ilsvrc12 -TOOLS=build/tools - -$TOOLS/compute_image_mean $EXAMPLE/ilsvrc12_train_lmdb \ - $DATA/imagenet_mean.binaryproto +./build/tools/compute_image_mean examples/imagenet/ilsvrc12_train_leveldb \ + data/ilsvrc12/imagenet_mean.binaryproto echo "Done." diff --git a/examples/net_surgery.ipynb b/examples/net_surgery.ipynb index 75c9889fb5a..2932687da6a 100644 --- a/examples/net_surgery.ipynb +++ b/examples/net_surgery.ipynb @@ -4,7 +4,7 @@ "example_name": "Editing model parameters", "include_in_docs": true, "priority": 5, - "signature": "sha256:f21c804f76329e70847ccb87e28a91e5d8a375f5da0ba6dd85d3b87a05bebd72" + "signature": "sha256:811097f2151652d2b630c016a5f1de23bd824df3dfcfc72aa0aeb23b2d9686c0" }, "nbformat": 3, "nbformat_minor": 0, @@ -15,219 +15,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Net Surgery\n", + "## Net Surgery for a Fully-Convolutional Model\n", "\n", - "Caffe networks can be transformed to your particular needs by editing the model parameters. The data, diffs, and parameters of a net are all exposed in pycaffe.\n", + "Caffe models can be transformed to your particular needs by editing the network parameters. In this example, we take the standard Caffe Reference ImageNet model \"CaffeNet\" and transform it into a fully-convolutional model for efficient, dense inference on large inputs. This model generates a classification map that covers a given input size instead of a single classification. In particular a 8 $\\times$ 8 classification map on a 451 $\\times$ 451 input gives 64x the output in only 3x the time. The computation exploits a natural efficiency of convolutional neural network (CNN) structure by dynamic programming in the forward pass from shallow to deep layers.\n", "\n", - "Roll up your sleeves for net surgery with pycaffe!" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib inline\n", - "import Image\n", - "\n", - "# Make sure that caffe is on the python path:\n", - "caffe_root = '../' # this file is expected to be in {caffe_root}/examples\n", - "import sys\n", - "sys.path.insert(0, caffe_root + 'python')\n", - "\n", - "import caffe\n", - "\n", - "# configure plotting\n", - "plt.rcParams['figure.figsize'] = (10, 10)\n", - "plt.rcParams['image.interpolation'] = 'nearest'\n", - "plt.rcParams['image.cmap'] = 'gray'" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 1 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Designer Filters\n", + "To do so we translate the inner product classifier layers of CaffeNet into convolutional layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 \\times 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding.\n", "\n", - "To show how to load, manipulate, and save parameters we'll design our own filters into a simple network that's only a single convolution layer. This net has two blobs, `data` for the input and `conv` for the convolution output and one parameter `conv` for the convolution filter weights and biases." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# Load the net, list its data and params, and filter an example image.\n", - "caffe.set_mode_cpu()\n", - "net = caffe.Net('net_surgery/conv.prototxt', caffe.TEST)\n", - "print(\"blobs {}\\nparams {}\".format(net.blobs.keys(), net.params.keys()))\n", - "\n", - "# load image and prepare as a single input batch for Caffe\n", - "im = np.array(Image.open('images/cat_gray.jpg'))\n", - "plt.title(\"original image\")\n", - "plt.imshow(im)\n", - "plt.axis('off')\n", - "\n", - "im_input = im[np.newaxis, np.newaxis, :, :]\n", - "net.blobs['data'].reshape(*im_input.shape)\n", - "net.blobs['data'].data[...] = im_input" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "blobs ['data', 'conv']\n", - "params ['conv']\n" - ] - }, - { - "metadata": {}, - "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAAlIAAAHNCAYAAADVB5V4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWuMZdl13/c/tx733np393T3PPkYDUccPsQZiaRkCYpE\nCYklOwYhfwjCIAEiJDLswAkQf3AQIEoC64OcIEDiIHESBAiCCAkkJ4GtJHCM+KHQjmGZtmxKJBVC\nwxkOZyac4Uz3dHe97q1bt+7Jh+r/rt/517499ER008xZQKGq7j1nn73XXns9/mvtfZq2bdVTTz31\n1FNPPfXU0z86DR52B3rqqaeeeuqpp57+SaXekeqpp5566qmnnnp6j9Q7Uj311FNPPfXUU0/vkXpH\nqqeeeuqpp5566uk9Uu9I9dRTTz311FNPPb1H6h2pnnrqqaeeeuqpp/dIvSPVU089/b5T0zT/RdM0\n/87v97Xv0s4HmqZZNE1T1WtN03y5aZp/6v/rc3rqqaeeSE1/jlRPPfX0vUBN03xA0suSVtu2XTzc\n3vTUU0//f6Eekeqpp55+X2kZItRTTz319L1IvcLrqaee3pWapnmuaZr/s2maO/dTZH8E3/2399Nz\nf7lpmkNJn7n/2S/hmj/dNM03m6Z5vWmaf/V+Cu5p3P9L9//+yfvX/Kmmab51/55/Ge384aZp/mHT\nNPeapnm1aZp/7x9hDK80TfNT9//+95um+R+bpvmVpmn2m6b5naZpPtQ0zb99/7nfaJrmn8a9P980\nze/ev/alpmn+WLT9oPENm6b5j+63+eZ9Xo3+Ueegp556+u6k3pHqqaeeHkhN06xJ+l8l/RVJ1yX9\n65L++6ZpnsVln5P0S23bbkn6vyS193/UNM3PSPo3Jf20pA9J+sl4RLn2Pt2UtCPpcUn/iqT/vGma\n3fvfHUr6F9u23ZX0hyX9iaZpPvttDiXrGP5ZSf+dpCuS/qGkv3r/88cl/ZKk/wrXfkvSH27bdkfS\nz0v6j5umeeHbHN+flfSMpE/c//2EpH/32+xzTz319F1OvSPVU089vRv9iKTNtm3/bNu287Ztf0PS\n/6Zz58n0l9q2/TuS1LbtSdz/z0n6b9q2/b/btp1IqqFIDf4+lfRn2rY9a9v2f9e58/T999v+fNu2\nX7n/95ck/aqkn3iP4/qbbdv+1bZtzyT9T5KuSfqz9///NUkfaJpm5/6z/nLbtl+///fflPR/SPrx\ndxtf0zSNpF+Q9Kfatr3btu2hpF+W9M+/xz731FNP32W0+rA70FNPPX3X0+OSXovPvnH/c+kc6Xn9\nAfc/JukL+P9B10rS7SgWP5a0JUlN0/ywzhGej0palzSU9Bfepb1l9Bb+nki61V7svpnc/70lab9p\nmp/VuYP0IZ0HoBuSfuf+NQ8a3/X71/7WuU8l6dxp7IPYnnr6HqF+MffUU0/vRt+U9FQDT0DS+yX9\nP9/m/W9Iegr/P1W55tvdPvw/SPpLkp5s23ZP0n+p77Aea5pmKOl/lvQfSrrRtu0VSX9ZFyjag8Z3\nS+dO2Ufatr1y/2fvfoqwp556+h6g3pHqqaee3o1+U+eo0J9ummataZqf1Hl90a/e/76p3NPg878g\n6eebpvlw0zQbkn7xAde+G21JutO27axpmk9L+hf07Tth75XW7//ckrS4j079M/h+6fjuI2v/taT/\npGma65LUNM0TTdPw/p566umfYOodqZ566umB1LbtqaQ/IulnJb0t6T+T9C+1bft7vkSXnZnyWdu2\nf0XSfyrpNyT9nqS/c/+akyX3P8gx+tck/ZmmafZ17rD8WuW53w4t6/Ol/9u2PZD0b+jcYXpH57Vh\nv14uevfx/VuSvibpN5umuafzonYW6vfUU0//BFN/IGdPPfX0j5WapnlO0pckrX8vHpz5vT6+nnrq\nqUs9ItVTTz19x6lpmp+7f57SFUn/gaT/5XvJyfheH19PPfW0nHpHqqeeevrHQX9M52cxfU3nxxv8\niYfbnd93+l4fX0899bSE+tReTz311FNPPfXU03ukh3KO1BNPPNE2TaPFYiHvqB4MBhoMBlosFuV/\nO3n+3TSN2rYVnb/FYqGVlZXqc05PT7VYLDQcDstnvLdpms5z3Jemacqz+FzT2dlZ55n+brFYlHH4\nJx1VjpfP8/0cvz9L8rPdL/PS95DIV//tdufzebl/MBjo7OxMknRycqL5fK7FYqGzs7NOv9xX3ucx\num3zb2VlpVzvfq+urmplZUVra2vl89XV1dI3f+/71tbWNBgMtLKyUr73vb7O/fAzJWk+n+vs7Eyn\np6c6OTnR6empjo+PJUmz2Uyz2UxnZ2c6OzvTdDot43Pbq6urha85d23b6uzsrDM2X+f+sR32lfeb\nb6enp6W/Jycnl9bBYrFQ27YdnrkdEvtLWXAfSOvr6+U6yvLKykpZU+6Tn3N2dtaRi9PT0yIz5oHX\np+cq57Bt2zLffObZ2dklHZAyzb7kGC2P8/m8fMd1nOs++8223EZtTS1bl9RVlo1cp+4jn+371tbW\ntLKyUtaI52gwGGg8Hms4HGo0GpU2LZ8ep/uc7fr5ptXV1fIzGo3KnGU/Z7OZjo6OdHJyXi9/fHys\nk5MTHR8fa39/X7PZrHOv1+dgMNBsNtPp6WlHH3D+qE/5v+eLvLWs+Huubz8v9bWfSX3n+9q2LfJt\nvtV4Zdvk/1dXV8sapL7ydysrK1pfX+/MI+ns7KzMK9e9pKKL5vO5ZrNZGZ919Gw203w+78ilx8Hx\n5/z7/tXV1aodSVk+OzvT2tqatra2tL6+rvX1dY1G528yGo/HGo1GHVvHZ7m/bdtqZWVFk8mkyM3R\n0VFn/LQltD2eY/LIY1wsFh09NhqNihxT1wwGA62trZX7PW+cX//YP+AYTk5OOuvTbQ4GA62vr+tv\n/+2/Xd1d/FAP5OTkp3GpkYW7ptxSWbr9FOhUvlRwXNxcvOwrhY6LM9uuKWE6HxQMGxLyo+YUuR90\nxmqGM9twX7y4OEb/b8UkXRhezwevYz+TJzlGj82fe6HZQNvBPT09VdM0Rei9+Gu8oMHwuOikeBFQ\nyfNzf0cn0f3zdTZSVLocHwOAmhOZPHe/05Fi36yI3A5/WxH6s1zo6USQas6Vx2eniW36ev+4Xx6D\nnU+PwfflWkonM+Uz1yWNDOfE8lbTC/zMz2a77puNPOc428hgin9TFvkcyg+NOR0o3pPt+38avGXO\noQ0Ency1tbXyXDvGkooTs76+Xox/ytNoNCpGkkZrNpsVZ97P9H1ux/JKPnrsdrBozNx2zQiTGBT6\nmXTO2VeOJYljTVmkITWPvG7szDMoonw/SD/bAbY+SpmhLPh5/JyBrXShE8kH/+9167mogQm5HpYF\nIPx7fX298MUOYwZZvp5OiHnlZ9jx87gsjx57ggnU2an/amtQurAD7hPXBfuba926NGWQPPZaok7k\n2GvU10j11FNPPfXUU089vUd6KIhUQrn+jPAeI5OE52sRrz8n2fs0qpJ9yL+XpfLyvloUZGLkW0sL\nEalh5OroIlNUCb8yYuczE4FJpGRZipFtJIrk69KrTwQvowf2MXnrMRLpWBZZsm1GXgnVJkpkfrmv\nhMKTl0wFcYyJspHMH/LF0WimpXiN++7ok+1ndJ9RG9uuESNnPt9tE+1gSnR1dbWkGCR1/jZ/2KZR\nKCJTfC7TTSbKVkah7kvKlL/jmBMFznRAppm9/i1zNWKKJJ+Ra8Tk9ZmIOsdBhNd98N+1SNkoD++b\nzWZlXayurmo2m5Vo2eiU59XolPm2vr5evtvY2CjtNE2jtbU1DYfDS+j+6elp0Uuz2ayDco5Go/L/\ncDjU1tZWQZ88Bqd6c70wzWIko5YZyPVCvZI6jql3y1eiGWyH3zFd5LY4T5mKdB8TJed6MoLj/i7T\nr9PptIN0MIVHOaFey/Xk750GJI/5P/mW65F21qijZcufU8aJVhnlNLHf8/m8yJd0IcMea9oWzgOf\nR31qtNa6iXLpezKN7jWWKHUikkTkh8NhR7+kj/CgbNlDcaRcf/Fuxt1Uc6DSmDJ1lQ4RnYma8ksj\nRZg+IfmaEmV7tTSGKVMstVRDpm9qfaNgGKZcljpIZZFUUyx8tttIaPhBqRbynIqByiH7QMeC/6cS\nrEHUNn40DHRoamkBGi8aWjs7mWo1eRxe3CmH7EfNYGTagvfW/qahogzm9w+613VblhvWszgNQyVF\n3pBvNvRUnBybx03nz2Pl5zXFV1uHOXfpcHK8NbmiwiRRRjIwyrXJOc60L/ubTikdVxuTWuqVfXU9\nUuq7tbU1zedznZ6eajweSzp3bE5PTzUcDotTZePlmir3Y21tTRsbG5LUSfNxLJwnp3S4Dj3no9Go\nOFDr6+udtK9rVuxQUV6X6Xp/TqeZDom/s5OVASmvqzlSKQM1R9/EObKusZzSUTU/7EzQqWAKy8+z\nTuA95pn5armhfiDvycO0cymzXNt0UsgPyy9tyXw+13g8Lk6465DMG9dImS+cQ/d/Pp9rOp2WNl03\n5vor8t68qQEP7Bf7kGRniXWzGYBxjdqZo8NF8lpjepZB3zJ6KI6UF4SFS7pcgFYz1Mu+s2BZ4DhR\nifRQGOmAJWqVTlQ6QdLFxFppsdbFPxTgzDcvMwrJq6RliJT7zfFln0kWuHQaaTxS4dXQoFq7OR5G\neUYkzLc0IHSe2J4VP4lzV3MibYASAXJBPcfN+7wg0/GmE8L+nJ6elgXNaI/oVdb+pANtviRRcTEA\nYXTlfidfbNzIRyKAvocKfjablTXFfvK35SZlkQhBLSp3PyhXlI1a/RINajqWjLhrwQCdu3T0jejw\nGdkn8sY1M0ROEo2lImbUTIch5c28MiKUhtE65fDwsGOwiFKxEN3y4s8530YW3Bfy1M82atS2baf4\nmePZ2Njo1MK4bW+YmEwmHcTEsmLjzfusbygfUleW+LnbY11dGjmui1zH+Xmi0v6faycdcmYUqAey\nXsnriN/T4bMT7Pmn82TKNUZnJvV+Xlfru5+dn5mnGxsbGo/HpehcunAkE6RwW9YVRjo9jpOTk0vB\nittMJ4/rn85eAiJeX9ZDGSBbxiwDXjM5hpqudV8pLw9yoEwPxZGqIUy16CWNeP5tIrPTuFPYsu0H\nOWwPui6NgP/f2trSYrEou1aYJnFfahD0g1I2y/rIsdfuz+hpWXTO67NQl22z6JTOIgU8F1cqr+SX\n+WOlYCcrHRRCvORbRlN8Jp1rOgUe2zJidOO+1CJct51zyfHVEJRlz2SElPPNtOayaKtGq6urGo/H\nBWHgzhYTETj3n3NsR9TP4hzTQNBYZWFmyhwRwORVBjQcZxpSts/ou9Yuv2O0nw5YRtyZcq31xd9R\n4ZM36bRJ3eJ6ts35d8GxkbDFYlGifbc3nU5L0bh/M4jz3Cca6X6lvNJBbZqLtI/lwCksr8ssql5Z\nWSlIhh0pOoCZikkkcVn6jP30d6mj89osneC9lBcaaDpl5Jvn07KUc8ni+uRpkp9np9t8zLS626L9\nYhsu3M6AmTwl+sI2OPe+zw6OHWXu2vPGBfMisyAMZtimHTMHZ3ROzUvPZdM0nfRibWMTx+Q5465E\no6UMHIiaWi5qjiUdd+rEZYEh6aHXSOXkZzTo73xfMraWusnrKUzLnDQKbEaFqYRTCXJS1tbWyk4Q\nRnFMi+T2WaJiOU6OaZmx8PcP4lt64eY9lUrNQawZjexD7m6wMUklQqXExcBnZ/uG2wkNW+F4LrjA\nOXYa0ZzD4XCoweC8ZoEQt6FoO1E1VKo29uTRMgWakL+vpbKko2H5riGYbpPjZd9Go1ExfHQIrTCS\n7/4ud9xQdigbtUiQqY1Ee2ikUmbYb/fTz6OyzfVrfqX8p9NOxeiaFtaskei85Nbx/J9rP50nGyFJ\nHSRpWX1JGj1f73XIMR4dHRVEwwbKCK+PNXD7dL69ZigDdBz8HP6YWAs0HA4vbcl3W0YMmE6cTCaa\nTCalTfaPfeLcEkFNBMW8zpS+7880UNqN1HWeW1IGbzT81CmWd/Yz26/ZN/PCa9N1aJI69WfZT/eF\nNsv99fe0d+lEel2srq52HI3hcFhknulIP399fb2DJtLhp7NP/lt/s+bJ8s5Uam3Ocodd6o4sUSAP\n0kHjZ9mGx0J542/Td50jZcNVixRraT9peY2Pr2UxJwdsRcRoS7q8JdXPdxtZdFZLV9UMLRUwhdTn\nGTnq4aRyLKaag0dllf2mYUo+8HmpjDhu9sNKxHynQlldXdXJyUkR5OSplXsa2nRslzkhdBjIJ6N9\njlqcguAC5pi4aDlOR5D+YbrM33FuyYNaFMb2H4SqWBYtI4mK0DAxuk6lxmtSeZq2trYKMmA55BiT\nT/7OxbBO0XC7Mucx5akW6XJeE33NNmtyaaVt+aUO8PNpGBOd5f+cRxrudKLosKaDXgsoMuBiSsHE\nSDjboQwlslBzEpb1M5HDZWlSo0Scd39HXrN/kgrCbid/ZWXl0hZ9/3bAkw6h+zibzTprkQ5IyhQd\n/pQny0Q6Xrkm2Re2l8FlIra8j+k+857nQBFdYmDGZ5qfnO+1tTWdnJyUtLHJ/5vfDMLolJlqtsPy\nyPVFJ8q8J/9cX+dn2jln4FFzUt2GA14Wn/se2xTKs9vMAIJjMVrHdDBlv7beag6S+0rbn8FpbcPA\nsr6R+uMPeuqpp5566qmnnt4jPbQDOTPKq6FG9KLtmdvDpIf4btub7ZHXoLlEpdgXEqOUjJoyIkp0\nROqecJ31R0zfZX/ordfgz2XpUPcvd5cxQieMnMgSU5isr3G06wiMCBn5xkidffV15GFNFjJiIIJF\napruDrq8z5EzecCxZHGk++7xJ4pX47XbSpSHfOD9teLFZeki3+OxMLpMlCZl0SdjJ1rj6NKpFp7s\n7qjRhcS5LjJNlM9mio98ItReGyPRkUzp5L2mRLRr7Zp/mW5wn31PDTFIMpJIPZTPtpzxwEq2lyla\n98upx5QvRvcei8fgnVTr6+saDoed0+qlCxQoU4mudTLKQTTfJzsbcUl0inUjHH+tEJwImVN90+m0\ns0s06+USTSIim8iK5TtloIbmE6mnPPD51mdMCfm+1Lvc1epTvLmzL/Uex8W++H/zk8+ZTqcFESKS\n46yN9UGuC46fetyUpSmsf2V7zLb42UQbieS5Xq+GEBFNTH3FbEnOI/mWtaFEnCgXtCGLxaKzucKf\nmy+5zolAZSnEu9W6PrRi84RqOQF0qkz8nLAmHZvcsr4sx1z7nukOCkme8JoTkbuiMi1QG3M6WZzA\nWrqBcHS2nUJbG5/vpTDkIqnxhqmFWlrExtbPsQKuOYT+u5aarPWBCns2m5XCz6ZpSsFtykC26e/G\n43FV4dChS55L3WJ46SKnbwcjnWHKbebseS3Tekk5127DhjudePLSZ/ywz3aoTk9PL51kvb6+rslk\n0nE+BoNBqYtKntJJ4v/uS65p04MCnVwL6dSQz7mGmfbw9emcS11D4X5n6j7byDQ6yddwvec81NK+\nNVmgk5tOlmstaRhNbduW18bYmWIK2k6mj15gOtxOgHWC15P153Q6LXJuJ+Hk5KTU7XitppNlXZi7\nYRk4DofDjp7mRgrztBbs8RmcS/Yhi4M5jzTgdog572y/dk8G0Jyr2k7dTDXZWWaKzs6mZYXrx8Gf\n5yIBhlpqjzqXa/Ps7OJsNPPXNi3H5P5m2pf6x33hphynNy3/eTZVHjPDfvs3bUDWidYCL96f7ZFP\ntM8eS8695SH9ELe5rLSo3P/Ab79DlB6xpEtOUCoNDrKm5KloKVC5GKmUiKC4DT4/nTbpcrFerd7h\nQQXINBKpMOgQJoJUUxxE8JaNnUqa9S6JHGRk6MJSP5vndNjL39zcLAiGpPJOO+/oSWfB/a1F6nkd\n58F9pAKSLuoWjLBwYbB+ywo6597jT4TTfXc9h/vrV2cQeWQ/+ZnHSmXj/qURrjkS5A3XCxWDDaT5\nsLW11eGjjdrGxoZ2d3d1dHRU+LZYLLS9va3RaKQ7d+6UNnwQI5V7IhxE/2p1UP6byF+2ybmmovfc\n+TvKbN7H+fVz0iG2A+57cq5oGDgGritS1riQWBz/IMoaEuoBFvOyP5ZJRuj+2zUtvM+8MAKVemrZ\nGUZ8rxvfpejaKq8VoiLJ76ZpOs+0HNZqmIhGZ7Evn+f7OH46Tpwr94sOXOqOZSgI+0RdlYdVeh7d\npvVNDSXjLjW3a57O5+fvdyMSLKmzycOoejp9bIsyTEcg1xydS7bj+wgMJL/TiWWAaVnyRgR/58DO\n6FDKqNtLnuW851jz+mW2tMajmm6l/LHuzv1LPZD00IrNM8KkN5reZio+Ks9EWUheCF4ED0K5GPnx\nOct2SjFVlEJaixj8PxUcBclCT0hRulC0NaPt8SfsmX3xdTXEh5T8JgpkgXMagUW1VAwnJyelSJlG\nwuN3P2qITE1RZBrUPPH1VmY1x6XWf99H5ctFxIgwn+d5SDl1mzxnJ5E5olC1uXI/mD7ytcsUsw/I\ncyR48+bNslPqnXfeKdecnp5qd3e33Pf222+X+bh69aomk4nu3r1b+O31lYqf823+1tApU64nUhrQ\nWuCSCs/r1G3TWJCX/Jv/p7OUn7vdmoLmdwyg3FemhdIxJw8sP4z+a333d9ztRCTNhs2yk/rB629l\nZaUEOOYjdcpkMuk4S0Q/WLCeaF/bdl8+TSTH+pFrKA0j1zADOF9L3lMfk9/We6nbPC88r4j3mV/L\nHGXzO3Wk9Y1l0f30phXOB8fqteTfNYc3X0xs/UEdQseKzgvXpK/jeqJc0V6kbuHc+R7zkAief3t+\nfQ4g1yjn1A4mN5J5LrJ/tP2cm2WOTM0hTB1APcY0N51Gy6EdYT7T6+BBAdJDfWlxzdNLB8O/GdVy\nUVKhZhSZ3icdsHSq0qPP74gyeRGn05OMToXsyWB7fp5pPp93YHorJEPAtYh7mYfetm1nl072iahU\nwpzul3ex+XsrDSNWdChPTk7K2SNEp6QLYeQ81RQKHeokf8++LjPWnEsrvExt1QwrDYd5z+sodySO\nj4suFbhlmONINCFRHo7F50K5baKHbduWk69v3LihtbU17e7u6u2339bR0ZGuX78u6fxcn9lspsPD\nw9Ju1g3RGfQY6CTQkSYPa5Egx5Jkx9HP5trLtc01yr7WiP0xcS5qn9eIc+F55VpchnLVomvKKB1s\n95MpbD+PazvrS7z+bBxtJPy/9VTuvGTazrszJXWcLc8j09uWbbfLQJMy7TXDl78S/aND7HvdL+kC\nxSD/0rmyM1ALWjPQoQxTj6ceomNSs0F2MqwP3ad83Q4dW8tv7nZkP2vItOeBvMv7arYz/2eQTr6Z\n31yvzEBYr9euTQczn8s1zN2H1il5vqLHknqYgAPlxfaT81rjnX8vC/D4TDtQPCDVtEyfdZ639Jvv\nIDG1k8rQSp1MIrOWwX1s10Tna1k/an2g8NGIuE0LU0ZQjAJM2feMVPK5NbTKW1LzoM+MElOBJyKW\n4/CPo41EAMwHKw/pomaMyBTn04Wk9u6Pj48ldbekppPBhcGo3v1gfj0dVyploj5OdzCKSx7UiBGO\nlQcXfipn/5+FuaZMP/jvjJrSQaHctW3bORiPxteo0GKx0ObmZvnuySef1M2bN3VwcKCjoyNNJpPy\n3Qc+8AG9/vrrZW6YSl5fXy8pQLfrvrKPHk8qHMt1Takn4snvOFeJEPmzNJZ0rLh2/L/vf7d70zmj\nM7wMOeWp3RwDU3OMvNk+ibojnVken8IaTqn7qhdTbrDwuiJvjXycnp6W2iiiIGybcmFq24vXgWQG\n4Ozs7FI9mvvuYGEwOD+7rZYez/FQzmvBLddIBpl0ZLwxxp9nDRd1dKIXWRtjJ2NlZaU4fDbAGQCw\nbaLNXstOnRo1pI7w3JlPTrm7n3TkExRg8EEkTFKnRMPz4e8SRWf//Qw7+wwWrRcsczUEkGsqbSDt\nEJ/rdhPl8vW09ymnvt6orufJPE2AgKlyBjKmd6uR6o8/6KmnnnrqqaeeenqP9FAQKUag9AIdtWYx\nMj1XwvzSBYxKDzyjHXvoNXTK9xiG5n2MhOid5q6mHIPbleq1SPa2s76Lz6WHzfTbcDgskWTuwMn0\nFseyjByhJTxuqNPeu/njQnOfAM7IzGmP09NTra+vazqdFt64borRPOeZEQERKc/bsnoHp0L9HWuc\nvHtkGQLFCGcZ8iFdrhEyn/muMcPXhLQpb45wKTNZNJ8pahMRCkZGa2tr2tzcLJGuEUvpPI25u7ur\nnZ2dMi9GoK5evarr16/r9ddfL+/CMgrld7OZL5kCYyEmUVWmUGuolGU607fmEefORP4k3J7P+XbQ\nxkT5/Dd1jqNfR8+JDpovNaTLfU20mXNtqqWOjDrVULbFYtE5+dqoIHcVU9aMmJ+cnHRQJKdWvD6I\nuvl+oyOca88do/WcX6fgBoNBKaD2PBoZMPKQNVIp8x5jTf/6+TU97756TnP9U3e4nUQ6vA5ZYM0U\nK1FZ6WKzi9vwvLh9rzGj26yt8jWuIaJObNu2HNZJniWyYz5ST/n7zJ4wxWbExqldv7DYesx/u6/U\nB4nUGnk6OzvTbDbrvMrI/aghzrkBoWYDPQ9ZT0hEjvcxE8W5kbqZllpfPO+ZPXJby+ihvSImoU8q\np3R6qLBS4aZjlSkTTmItx8kUhZ9nITPTs0DQ189mMw2Hw046qJbek7q7c6y8MiXoSSTM6X7ZmeF3\n8/n5m7a5RXYZ7G7KflGJ0JhQwVhxmKy47US5P3agzs7OdHx83Jk3GlH/JJRL2JUK27tAEr63M2Ml\n7q3e7qPbzfScx5MpGPOBcDl3z7iPi8WinL2USjyNZi5YK3grMc8VDRqdkpRFzqGV9COPPKKrV69q\nY2Oj1D299tprOjs70/PPP6+bN2/qox/9qL72ta9Jkt566y09+uijms/nevPNN3Xjxg298cYbki7O\nrjk8PCxjSXifxj8dF65fKl1+nulAy3SmcNJBSsPhdUl4nqlUznemK2igm6ZbJ5PzWktJe3x0CMwf\nzzEdoizArfWtZjBMLIJ1X3JuPHYWmVueuRuMDhSdf6/PxWJR3q2Wzo0pa+TcB6+tmlNsObBDTt5Q\nb1JPMmXjdQ3XAAAgAElEQVTmsbEPNYeWupz1aVI3jc2Uln8zcGOtls+Isp5kP1nL5jmiDuBOQZ7Z\nZp75eazH5JxmKpLymPV+Hn+m9vJ5Juph6zvrIzp9rN/L1B5tWwaUlEuvGcobbY774+eZbPMpF9SZ\nDLS5GaUmgwYjaIfcN65df0Y+1fyH0vbSb77DxHy+yYOhopa6RbX+30QFZsVSi158H+8lCkIB83OX\n1XI5ak1kidFMkoXmQYhHjtvfsSaFyJmjPBdFE5FIHqTCoaHLKNERlGugMjqwl88IVFJHQM1bCn8q\nnozS6VhwjshbzqHbc/TD9mgUJF3ajcGaq+QLo1YrFKn7mh+jgZxLjikNCuWMBsD9TiPNdsk7Opmj\n0Uj37t3TRz7yET3yyCNaX1/XU089JUm6c+eOptOprl69queee07r6+v65Cc/KUn6W3/rb+nWrVtq\nmka3bt3SzZs39fTTT0uSXn75ZY3H4yIDHveyeck1STmhMeN6ZzDAufF1NTSu5iDlOiNKkcYy17rv\nz8g+dQCfx3lLRMbkuaoVufp7yonXYU0vZD8oz+ms5/qiI85de9xR634QDUjDukyXmXIe+W5H8571\ng2ncs+CZ75hjQbudw1owkWvMn+drW0y0MenUSBfvS01ZYzt0zoiomoeJHvmeDOASkXMfvM4dsLNG\nyqgibSI3FNjpa5qm8xotPpPjJRJJuU0UyfNLdDGpFjjyu7zH9XzmH201bSllhvzKoI38tM1wm0bb\nbdusc8gXBkk1gGQZPTRHih2WLhu+7DQhVSo/MjMdErfFk3zTqcq2pK5xqDlHXCw80JCQYc2RqSn1\nGlHYapGfF8XKykpJ9+T7mqww6bXnWDk2evBGunxaMu+z8FEIs2DWO/c4TjuS7sva2lopuuQ1jpJN\nLIhM40g+5a5GojhWOHTUKHtZtGhDkzsPeSihr8u5TxkiWZ64lZjjoPznXFMeeH7NI488ojfeeENP\nPvmkmqbRk08+KUl6/vnny0nDu7u7Ojs706OPPipJ+rmf+zm98sor+o3f+A198Ytf1O3bt/Xxj39c\n0rkDNplMOjsy6cRkVFyT41RuVpJUrjRSuY0+EZlc69Ll1IKJfaWM0imwnHp9cK54v40FnUQGe+m8\n0FFLBIrPoXLnRo+a/jKvanog+WviWvOPZfjo6Kjzkln/li6/A9B6js+jHqYsrqysdDY7mAf+24Zw\nMplcQpXT4SS/2QfqgKZpHojy2SinbufYcnzmW81hzfVMx8X6xbzMAJJjoZw4ADYRYUxkxw6H26It\nM3JXmyuuAT+j5uCbqAuJkltWuP54rBCRP+qMmu5gH2sInHmaPKwhssvsKG0ZgwofYJs6ymhwzX67\nL7UyHdNDc6QywmJ+WOoqzZo3SONrhUPFY2rbtqSGEnXy5GZEWPuM7fG5XIyMLjMdSGXi6/L0VypE\npqjSM+dClFS2xHOXj3fPuR+LxaJzuGSOh5GdpOJI2JmiwNqBS0fKnzl62d3d7QgqPf5UwuY1Uw/u\nl6Mxzov5xV0knlf/pvLKM214GnGiSjQ+dKT8ORVM8sXPdrsZXZMYlVNZpKKzonQQwV0/TzzxhNbW\n1nT79m29733vK/UOH/jAB0r91PHxsTY2NjrG4YMf/KAef/xxrays6Dd/8zfLePb29rS6uqqdnR3t\n7+9rNpuVs6nsrNaMV40Pte8T5RgOh5d2PDEV4rXkwCWROj6vthY5B9Qn6RBzXXBe+Bz3z7+5Xmv9\nMjF4s8HLNu3ceczsB4POdAbT+PB5PteNuojrKxH6JKLfHpeNptEJ7iYdj8edde51s7a2puPj4xLQ\n+hTs5Fc6hBxL8p/6vuYseb0ZqefaZDvkAflGZM5EpI0vXmZg6d90Ij22WpqNMktngzVHadeIcllf\n0AlgDW/WBNlhIJ8YxJhPltNEssyvwWBQMgHZL64njt/31Wwoeez77Cj7ANuac+rnp27Ndk22UUYc\naWct27ad+RqjWnul30u/+Q4SDSGVBhVeKuGMKGvtSZch17wuryVUz+tqk2XKiUul6EnnIqXR9iRT\nAVjgEy2hAq05fL7GyJRzy/bYLZBte/5ONX+fC90RBWk6nWp1dbVTB5ZGh4rPffC1VrLkjdtN9MZ/\nGzqmQvEb0lmv5HlwZJZpIc6b2+PBcUZc3A+Oz/PDE4fNU0fCXnSUX8uR26SSSgeJfzOiJQyf1/l5\n5vnOzo5u3bqlZ599VisrK5pOp3riiScKH40M3r17V2+99ZZ2d3dLm1tbWxqNRvr5n/95Pffcc/rq\nV78q6Vxp3L59W0dHR0WReR5v3brVUaxpxAi3M4IjUmpZdztZDMr167Vkw+S5JF+WOQK+z+uG/aEu\nqUWZnkPqJ/ZP6m5Q4PhJD0LXM7peJhs0ooni+blek+aj0Wkjy4lsUEdxzTCgyQjcBtCoizc6+DR9\nR/mM7JnaW11dLRsajJa7Xc/PdDq9NHbq8gySqS9zLqgbciwcR81JJg85/zxjiGvb93IsWXdF5zJR\nKo7R9zEtad1Ane0atpoz74Brsbh41xyDb+otrkU6Oa53pa1hip/zS/3IwJ1jJAJMnZi1x/6Ozu+y\nmiWvQY6PNpKOpHThRPlz6h7y07rdtLKycum4k6T++IOeeuqpp5566qmn90gPDZHKVBu99kQVMs1H\nBMFETzej5Myzsx0jNlmMyjQSUYAawkQiGuXn+HpHGPbQCSUTlchtljWkKvvg3+6n05ncESNdbLl1\nVObt0X4O+ekaMEYtUjeK4edMXWb9xXA41MbGhlZXVzWZTDroEusKHNUwtcd0G8fCCM1btjOn7zER\n6WHUnKkm/+9+sC98aSujKs6R5zxTCYz6/X/KSKYS3B/3aTablWMlpPPTy0ejkV5++WX9wA/8gK5f\nv64rV65IOo+w3nzzzVJUvr+/X3izubmp2Wymxx9/XB/5yEf0Iz/yI/rhH/5hSecHeX7+85/Xiy++\nqJ2dHY3HY+3t7UmSjo+PC5roiJnjYoTNNcA14ojYKJf54/Rezhl1BaP3lFVTIsWMXIkmLSuWJd8f\nhFqx3exDjsPXU96IvhMNTj3k56RO5JrPvgwGg1KgnCUG7A8jcLfp9qTLB4kSOTci5ZP0R6NR2cGb\nKRwjwX4Gj+JwOUKmWvlc95fjp84wypBrMtM2kjpF2pkupq1omqaDkFlenBZiX1Ivs99sN1NZRP4S\n4WzbtqS0rAN5Unwt80JdQyTG9WzZN6NVlOFEizIDwCyH26wdrZEy5XE6vevrs9+JjmY2wGSEKNeF\nEUDLh0te/ByvfaNRfAUOUU+vYfL3QfTQTjbP1IB0udKfE2xlk4bP1zP1lQVzmUYxURh9rT9nOo1K\n2G3U+s3vqCjdZjo2CVtL3SLXGl+o6MwLwqYJYbN+RLq8s80GkS8o5Xis9OgAMD3jv/kdU1BeNIb+\n5/O5hsNhqZnwmN0f95njcF1Fjd80FLyGdQPpMPk7Kxumarl4CB1L3ddS1FJ1djBIGSi4nZRTGoja\nGK1YCZtL0s2bNzUej7VYLPThD3+4GKjf/d3f1dnZmd555x1NJhPt7OxoMplIkvb397W2tqZ33nlH\nr7/+un7oh35I165dkyR95jOf0ebmpn7lV36lGGKnb3Z2doojVXO+M/VMnjLd4rQj73EaplaLkE4U\n5yd5ZaoVv7JfXitMybjdZW1ynDXj5fVdKy2gHiCPqEcyrev7MkVZ60c+Lw1zli5Yx9RqTzwPdGra\n9rx2bTweazQald88X25zc7OzY4w8cnueZ6b9uIWdu9MexHePwXqVRpG1oi43MC/sHFnfDQaD4ugl\nr/k89zk3TfA3X+GSThHtE4N6yijtDNPiKSvWo9Szqd/YN9a6JSBBuch6Kq6ZlPn8339z3bnP1KWe\nf/JGurzjnZtO/LLjZQ5NtufnUMdLKvK6srJSdqSzTpf1aAQ0ci3V6KEhUu4wJ4DKiDloT2g6QVJ9\nNwCvs/GjsyXpkkKqCUoKHYmL40FjZD+WOYJEqzx2GlYbgqxjqil/fsa6CTujuZuitquJyv7s7KwT\nmbkIcTqdajweV2savFgyGnCU52ttNGxouehoUHgoHZU7ayD8ORVQOpzpyORWX5MVe62Y10q65kh5\njMztZ4TlvtSCAUZvGV1KF2c8+d7t7W1tbGxoOBzqySef1Gg00sHBgSTplVdekSTt7u6WImC/a+/g\n4EBN02hnZ0f37t3T3/t7f0/PPfecJOn973+/fvRHf1Tj8Vh/7s/9Ob300kultmp7e1u3bt3ScDgs\nO/tqyttjqJ35MxgMyo5Of055Ic8sJ9QDiVYl5b3uT35HNK2mJNMwsX3qo6w19PzXiqRpYHLd1IrW\n+fzUUb6PNX35uREFGiE6OenweCcWDSgde4/JheV2pqRzI+X17ppKHgHQtm05HJY1l8fHx2W9Oyiq\n6dRcw4lisPCdSJTl0s5S256/j3I2m+n4+LjzPKIsdhwZGCXKaOImjET/GPzWAnDqmVwvPKIiA29m\nFtwW73Vfc7NS8nQZyJBBtR3cxWJxaSe3+8YsRKKaNdCA9/qZ387mDY4xgRKuFdsM2yBnIszb4XBY\nzgo7O7s4FNXEXZL2I5bRQ3OkclspvXIbHTpZUtfA0wgnFGeqRZspyLnjgp9LFxNTK3JNRyq9bio+\nomqLxaIUYEoXsKLh7fl8XlUK3u1CA0DnLJ0398N9zyJAeuDkMyMS981C5VTfaDQq51ZZGG2cGD3T\nAWPERYcgF3TOIXlAZWMDYgeKBom7P7yguEuSz6PT5rlftvCtMNJ4eV7oRC07Cdd84nEMLHBm35yK\nGI/H2tzcVNu22t/flyTdu3dP73vf+7SxsaEPfehD2t/f1zvvvCPpfCcnnTOujUcffVS3b98uBaUn\nJyd66aWXyjgef/xxvfDCC/qTf/JP6hd/8Rd17969wksrJZ/JQscgnUTuRrUhcfqHfPKasKJLhTWb\nzS6NoRYQpTHy2HNrOJ0IX2M0M9dAznPqnkQQTHQImRJhBC5dTj+lsU7jVou+ibrxc/Yj0RMaQV7r\n9HHy13z02jfCvLm5KUklbe8f8oaHJTqtzBS85cjpQgcDx8fHnefWguma/na/fR/XFI29EV7LN7MB\nRCvIB88PUS/pPMgxcs5+2tbZEU2AIOeH9pDBLG2X15LfaWr9UUMz6az5XpYscC3awXBK0denvNWC\nBzrBtIvD4bDoMKKPlgdSolWZHTDRuXefM4PhPo3H47Lr2HK7vr7eKTx3W3SYjL6aLy5HWUYPxZEi\n/EbHhJNH4a9F8bVJkOqKlWhA1qjUUCde475QELN/CYXaSBoVYlv0oi2IfAlxRjbT6bQ4VRlh+28v\n/vyc/SS8yv6YlzTs/p588ziMipycnJSddL7PzpIXI5W062LovDJKdt+86PmdFSIjBPKNn1H4rbgy\nH+5FxAiICB2VGCkVcsoAIfTc5s4goIY6sT98tuVyOp1qc3NTu7u7xei//fbbOjk50QsvvKDNzU29\n8sorZWfUeDzuGBJH4B7H5uamJpOJhsOh9vf3y3dvvPGGVldXdf36dX3yk5/UH//jf1y/9mu/VuTE\nDv3m5mbn0M50jIhgmAdWZKzZYK1aIs9U+jT8nAvzOI2sece5JVHXZAqHCj0dfAYuNWJ/OId0OGtR\nNNtMncF2Ui44Hn9nw+LPuK551MQylCJ1BvlkJ2I8Hmt3d7fops3NzeLoJxopnc/XaDQqiBSP8ODu\nNPJhdXW1yCWdbfLUDlG+fsnG0zz02hwOh8Xo2/FhXyzTlsd0TrwGmBJ1/4zapLxZ/6S9oLPu+WLA\nTh2e99GhN69qQEAGJ+Qh+57tM4VpIqpG+bOcZLbD91C+yQeibqlvbT8Tacv2s4+eN6d0LVeSyi7m\njY2NS3o29S9tg9TdRVijh+JI1RAiTj4jRv9vAUiUigVxTC+4TS6IjMw4yWyTBZMW5HTA7Mmnckvj\nSzTIDhKRJPLBjh6L9Tx2e9FS1wh4TFRG0uX6L1/PBZWpARqrmmPqZ9uBmk6n2t/f76CLnqM0Nv7O\n/Ga7jjw5HhoT95Nnq5AsLxsbG6Weh3UX/skCecoc+2o+1QxwLeVmYvRpPibaZR4SefB4E6Hk+H3N\nyclJOb3cZzCNRqNy0CL57eMbtre3y3k60gXCY8XStq1u374tSbp9+7Z2dnbUNI0eeeQRffazny08\n+ut//a/r1q1beuONNwpvc55yzjwezz/XHakWLNkRoIEhb/09EWkikbw+5Zjrj/PKYGYZlF/7PNdL\npuG9tmsGg0Rlzb/tNCQST2cnDbT/rvGUupKIlJGcROL9HCLiDo6kc0RqY2NDbXvxmhG273SeX+NE\ndJiGu2masmFie3tb9+7d0+HhYXl/G9cI9Q7bdDqH43E//Zm/51pz4GGdyPokyhWRZPKPzn3Kac4R\n76s5tERu/Kxsk31IO0S9k1kZykTqMK4l1yaR6PQksGHUqYaqWp+ynwyc/f+yWljy1GvLjhJ1Ltfv\nYHBej+ngmnV9NX8gecYx0xmvUX/8QU899dRTTz311NN7pIee2kuyx8xIK3cCSN3TgumVp3fPIteE\n042QEAnxd0ahEuIk9J1pDf9v+NdtsU1He/S+7aUbceEOOkf2fB+Rn+M6o6zHcB8Y+WaemZA9UTJ+\nxzF5PIvFQkdHRyV1xKiUp50TRWRbTrExFTEajQoqZf4y1cK31fOt8szpr66uanNzszPHTCXx8DVC\ny0lG9/w3EUt/lvNO/vhzX8f0tH9nOol89r3ug1MRx8fHunLlis7OznTr1i1J56+B+b7v+z5NJhPd\nu3dPg8Ggc0DidDot8u+onfPQtq2Oj481HA4LyvXWW2/pm9/8Znlx8fb2tn7mZ35GkvSlL31Jw+Gw\nvAqEKSojYIxgKfs8AJXwvmUk6yNMRKWy4DNlheTvslDXfeVxC7X1kygJ5/DbodRDWbzL/jiqzoM1\neX1NjrJeqsYDj9dEtDhTkP48i65zXOZNFnhbXyZaQV09mUw6qV2+SirRupWVFW1vb6tpGh0dHV1C\ng6yDbTPMI89tns7t+3ywZqagXcvDFBbLFvycGm+o9zIzQvQw58K8y9Qlvzffc8243bSP1HNcg+wz\n0UyiW6xzytTaMp0mdXdMJxLLdvLFzOYP54Xj5701ftTG7DlwnRNrpCgTiWQxm0L/hHV/y+ihvmuP\nTEoHZTAYVIu7crHVlJyZyxxzwtlmIhcajRknY1kaIuF7P8f3OB2V/VoGqXsCCQUzFeYxMK3pZ+WC\nIa+YussCPvahVvxs/iXseXh4WNrxd94FYSNLwTOsT8eVRngwGHS2PvNskhTgNNBbW1udRSGdpxpc\nO8E0gPlCqJltpszw+Qn3kprm4ugDywblmw507mSh88zFa/Kuu/l8rmvXrpVjDO7du1fOeDo6Oiq8\nl85lw68O8jO4Q8XpDab5pPPjFKbTqU5PT/XWW29pPp+XZ3z2s5/VN77xDR0eHhbDz/Xk4lfzjfzK\ngIR8t4JmutkywFREprEywGEqxm3znpRTpyJc72fesG8pe3ROavorZYJj9L1Zp0THufYsqZuySKK8\nkuyEZtG05S0Nhv+nI2XyOVHuv+uceCaQ5YHjM9/p7NCRog5lIOr+uADdgRDfTpA6knzzZ6PRqKNP\nneZhKpmBE4NryoIDEf9PvcAAsTYPHCuJutfjYMCa9oN8YZCf/K6lCik36XgyXewaSPOP4/Uc2PGh\nA+71n7qNfcqi+dS9Kdt0dnk9++T+8Dm2C9vb250dwizzsNwycJIuCtW9Rthmja+mh+ZISd0dUYwA\nzUAKXm3Bs51Uam7T7WZ9TBaP83lUPInImHhfLdedn7EgPCefB3IaqcozlvwcjoU1IYmq0YCmQ0PK\nXU7J67zeSptoh9tfX18vhXzHx8eduqRapFbjl8fPwkUqNO7cMwLjxULH24vFirEWlXuRUhFlzVjN\nOHFeajvz6IRSvnPREgG0o1RTtk3TaG9vr/TNr4F57LHHdHBwoMFgUN6rx7nwWpK679jKiHKxWJSi\n3qY534llBODu3bt6/vnnJUnPPfec/ugf/aN69dVXS7s+zsIKisqPjkPTNJ0dQrXAJ/+3c8KC1JSZ\n5HFuIaczYWeRhdJnZ2c6OjoqO8W8O2eZY0OekdyPWtS6WCwKsuj/ExXN3XIcQw1x8+c0TPmbR4+k\n0V5ZWSlzZ6Jc2rF3NO/59Vry2qJjaX1UQ2Rc32dZ5xk9RCJSLrzDb2VlpdQEShfvvaS9MCoyGAyK\nk0cbIF0UZbPfzAwQ7eH8UJ97HdYQ7XdDDjkHXCtN0xRHlXzhGniQ7ubaqq1B9s9y6uvZPx+BYd7z\n6ADKLeXO/XIfszA712wN/MisCANa8pXPTvnjs8xPZyXMI/Iq+2J+5TpKB7BGD82RIjQpXThDiSxI\ndeYTkqNTQ3TG1xMFIZMYfSSSkyhPKjg+k/e5P+5zOiRMabF9pjDoyBB2Tg/cCsLIVKIudkxTCfiZ\niRqR7x4HF6ev8TgN07qNO3fuaGNjo4yHi5/jcrt0lty33L1SQyXpEBh5srFMBIgRCndH+jOPp+bU\nZeTGOZO6BaHpaPvamvPC1Jafs1ic72piFMR55HsLPSdXr14tzs729nZnZyPnKZFTO/WLxfnuPaMy\nbntjY6MUoR8eHuq1116TdP4i5J/+6Z/WX/trf01//+//fQ2Hw04Eyg0VngM/j1uOfX3yquZE04ki\nQpTBUo33vmZ19fx9kUYjNjc3S6G9nVCflWWnygXOGUHXdBPny5/VrnP/aZBqyrlWwL/sWj47fzP6\nTj1iHhH1oIGvjcMOCueRc0X0PFM0fs+nn8/1QmeBzzeiTF7wXaE0tImAEikjcuZ1b91Hp86o+Orq\nannjQ65394W6hkF8Bpcco+Un9QvRJY+HtjB1VCKQLv3g/FO3Uw+nDSR5rLYni8Wi6AXysxbsWYd7\nHmqbInztsvVkOfB9DyIHZh6b2/RcOxPB3ZzcIZz2mTbY/OffecRC0kNN7ZGRhOvSm+bA+b8pvUUK\nDR2ZTC+QGLGnocv/0ws2WZC8rdaT7edzIaW3bQXhA+1yh5UdDC5aw7CO9mg86LC4rxmheNFTgMk/\n54XTyeR30oWCefvttztGiwgJHUGmmPzbfXA6h8o9HT5GFl40nuucE+6upJIiakR5omJNOcuFRqeB\n0b/b4XzkMRyMaJc5YX6ODztcXT0/w8epNu9COTo60tHRUUHnfJ+Vsf/muU48ysDX+buDg4OyTbhp\nmlKTdeXKFe3s7OgXfuEX9Morr2h/f/8S0mEZ4XpyKs0GLAOTRFY8T0aGlim3TD+0bXvJQJtvw+Gw\n1I8Nh8PO29/NV/N0a2tLd+/eLTvGshaJ67eGkqeO4Vr3d4km2AmmrjEyRCPNdWpngvLiZ7hNH9jK\nwMzGZzqdltf+SOdonE+u39jYuLRLaTQaaXNzs5wK7Tny8y1PDpJshL3jji8e5zqx3nS/6NjYaZlO\npwV99Tim02lHpknWw0agiCwxg8HvzDd+R1lMuaXj8aBAP4NtrvuU4zzA2DxKgKHm0JDm83kHzePu\nceox98NkmTMaRbTJ/cgxEgBg/9g+5SFBCTrFtD1+pueMQWnbth0AgbrGZ5p5/lk/ZSSViGj2iXbJ\n/cua46SH4kjRqSBCYsOUkQAdq5rjlUyoRWi+n/VQVDjShUB5cVsAKPz8P59DuDEF3wLq79hvQ+gW\nKAohoef0sH0tD/JkxG7BdxtUxIwcLJQUOI49Bd9zyDSbpFK4fPXqVW1sbFxygPxcj4dG2FGVx0LF\nmIbZ/XFNEGHxmiPlRZpRo/lFRVKL3hKpq6EqrP/yew5JdKAsWx6H/0+5Yv99Bpd0ntKTpOvXr+v0\n9FRXrlzR0dFROQHePKKD3LZt5/123/rWtwpKd3JyUr6zw/nyyy/rySef1M7OTlHub7zxhnZ2dvR9\n3/d9+tznPqc//+f/fAcBTAeRqfJ0GhOp9bP5WfIwgwB+nvrEz3JtjzdHSBebIlzrw/XN9WXeZH+s\np2ho340YDCZKTMeW7dmYzufz0s9lPGBQ6sDCr3JhytAp35OTE02nU+3s7JS05p07d0p7W1tbHWTB\nNUC7u7vlEM50wGm0vTFEOnfQZrNZqUWjUaqlZ8gDf2b0k2gdt+eTN/P5vByQaUokM3lm3vj5RolS\nf1E3PSjtxGcQnaE+9Zp3m5xP62X3MWXN/as9x/22TjU/PBeUOc6h+0hbTBvFYDSDTEkFzSNvmO7n\nmNxP88/ONFE3IvR0Bq0jqW/pgDrATDAhUSg76f6uht6THoRI9ccf9NRTTz311FNPPb1HeiiIFD1E\neoeGOe3B1iJSe+iMBjINmIVn9koZgRDy8zUmXpcpMXrDGSEmesMiyoycMirNMTIScORs9Iw8My8c\nYfo5rkfw27YdpdRSeO4fa0ocjTgSIILAnWFEegaD8xeA7u/va2dn51KkVsudmxw5JYyaUQqRKR6s\nxjRAzkcN4XR7WdPBOq1EG9ieEYREURxZZUSXc8Z5Z+SUtYOOkFwPtbKyUg4sXCwWOjw81NbWVgcC\n931N05RXEY1Go/L6mBs3bmhnZ0eHh4flOIPs087Ojt5+++2yU1A6j2Zff/11PfXUU/rZn/1Z/Y2/\n8Tf0la98pcgM35uW0bXnJ1MgHgdTwEYyrAcYIed8eM48ZpN56fTW5uZmQd34olLzyZ/x9RLb29s6\nPj6+BPFzTDUZqaXDvd4T4TQxDZztEY1KXeFxUGc4micSR9R9dXVVu7u7Jd3ilOd4PNbx8XE53dtp\nEukcoeLmDacLqcM8bs+hd5c69Xx0dKTpdFrejmB+14q7PT6XBBhx2d7elnS+yaFt25K+IpJPBMTI\nCwuO3V6mjGg3skyAO1yznsb/ExWqyYQzCjn/RJf42+Pw2k67Rt1Lm0iZHI1GnfGPRiNNJpOCXrMe\n1WO27kv0jfq+VlrD+2jj2/aifpXpS/+f2R6P3zymXXKbRPX4zjzXBPI1Rn4e++BnEH0mkkey7XpQ\nKjnPKggAACAASURBVPWhpfZsrCmMUtc5oKEhcRKpaBOay5qJGiN8L40iUywJxWc/c1HQYCQMT8XP\nGqOac8WCXQtL1mpJ6jg1fIfV0dGRDg8PO4oo+89UE5Uii1FNXGyZYjVf19fX1bbnBcqTyaSTjmIO\n3M4ZF5D7njl8G9larjxfvZBwN52brK9ZZpS4iGj8zGv3hw6hf7uP5gnrPehgpFNPma3VdKytreng\n4EAf+MAHdO3aNd24cUPSxXsPV1ZWOu9udH84N3aqJZXi9Dt37mixWJSXyXIcOzs75Vr38+mnn9bd\nu3d1584dXblyRT/+4z9e3tHHPngsTgExnVdL6dlgch5yLtwu55BknpJvliPWaEkXRcxMN7Iuh6nf\njY2NToEz+5zk+2xQsg6FNTWcbzqXqVNYK1MzOqytoRH2uxntUNkh4vqxo82TnyeTiQ4ODnR0dKTV\n1dUyh1tbW9re3u7UJJpHnGMXldORmk6nRR9NJpNLtTQOiLJ+zvWS7jeDG+uS3NDi75qmqZ5tZIPr\n52RQnnNL5zSDHM6770sblAF56pxlujTrsUjWpZRVyluuET7Xc+e6M84HeVlLbXq+M4DJeaL+8tqy\n48Z3rHoNso419T5fNZbz4vlz2lk6l2E/3zaTfeHOSJa6UJ9btpgO5jqs0UMrNk+vj0KQyA6/l7qC\nkUzmwpAu76jI+yjEiaBIlwuBa0xN71zqFpiyfdYBpDKmgfd3VpBWuIz2M1fMgj575o4Ca7n7RPWo\n6ImI1ZAlf84jDoiCnJycaGdn55Iz6cVI48nC8tzVlPxlTZt5TMRjWR6bjkTWYBHJ4Ti4Y8zPM+/8\nPfnC+fNcL6ujSKc+60PoHLqW6c0339TTTz9dovLj4+Ny3IR32WWNnJVh0zQlKt3f3y+v03HBsR0J\n7tZ79NFH1bYXL0l+6aWX9PTTTxcD9ZnPfEZf+MIXJElf+cpXyjvYPA4GClxn6cRarr1TKpWax5OG\niBF7KjnPh2Vxd3e340AQ3Uxk2nyaz+fa3t4u9SWJlNaM6jKqGWyPy455onXkoYO9Gio3GAwu7XAb\nj8elVsSvb5G6Dt3JycmlQMTHRPi9eeaZD2H12p9Op6Wu0+QNL0bHvd6MlFsXEzmnYfX6zfF7nnks\nhXVdrUaNZ4IZmSDiTP2R69AOO424eeMgg3Vgfg71fda/5nzTVvn/DDCI1qROsyNkPcJ15r76O/Oc\ngYp1aC3AJD/zuXS4zs7OOvq7NhbykwFAOoveYJU7IS0L1gt08hiwMiiyLrRdqqF9zCZxzZ2cnJR1\nneOrIVWkh+JI0VHg71RKNApETxLtyfY4aURcEgrNScuitGyb/2fEwbG4vVT6fv76+npnWz8XhK+x\n58x0ngWHHraNgRe/PXMW2h4eHurw8PCSICxD17wAiRrVlJ2VO2Fz99MFpjasHJv7SjTHz0vHmI6b\n+7Ys8mKxbkbvVHyeC8tEzWCbP+kAJVLKSIwy2rbnxd3cKJAF/OnUu082NtJ5im13d7fsrnr99ddL\nKsbGxQaF27w9P8sCDKd0DPtbHr1LzTvX2vY8rShJ3/zmN7W2tqannnpK+/v7un79uj73uc9Jkn75\nl3+5nEVl5yNTJaREXYgOuM/mn2WDc06+8fOcL6e4KYt2HhghU76NJI7HY+3t7XWeeXBwcGmDQlIt\n+Mr5zbm3/sl5IvqQn3uMTA1LKs6QkSgfESKpUyJg9Mgyar5sbGx0Dr6U1HHGzs7OdOXKlY5jaWM3\nn89LetAy7LSeC9B5BMPJyYlms1k584cpXPPMYySKb7mgw8FDfKfTabmWeiERl0zB+rk1PlvP+dkk\n7pDLIIEOaepU/zBtlXLBYNnjM9Vkwp8zKKeuNzmDYKKO5C5rPsdrikS+pXPKlKf5TZTPx5BwZ7J0\nUV4yn8/LC9LZLsdFB2wwOD/OxHYiAyXzn84Y+WV7QBlt2+7ZjjV6aK+ISQdF6uaFU1AZkSbMx4ms\ntWuEhY4EhZnwKNusQbTsT418n710tsn0BCNDO1JUloyevZPIcCUFkc6LJ9zf+TMbhcPDw6IguTuy\nBqsm6sa/ueXfi0G6gGrtbFH4iRxZEWVNQzpG5jOdI6kL4XMea9dauedCsJLKZ0vdLd0ZaVJB+RrP\nF99kL+nS4rdMZFpQuoxImccnJyel9ujatWva2dkpDkvW+vDZnDeiUtK5UZxOp7py5Uo5NdpGggbR\nNVR+3iOPPKJXXnlFkvToo49qa2tLP/iDPyhJ+uQnP6lf//Vf15UrV8q5RUQ6SJl+tazZUHKebATp\njHhcNYNCR9IoyOHhYamTklRSzozMuaa820k6l3GnOf0MGlIiVL4/dZfXGuWfz/M1RksyeMkxkm+1\ndJT1pA/IdUrO/DSfnN5JI0vnn7U1Dtq2trZ0enqq0WjUOX/MaUE7MpYbIlRO41OvOhiYz+cdFMj9\n8o5jzgtlImWDc+Qxcfx01Kj3GOQlgs/gjz+c41ow5mfWgq90KhIQqAX7vta6hE40x0HggI4UUaPB\nYNBJdVHnMOjM7x3YpszbHtVsG2XSMmXnmSk46gYequy+cwxe47lm7FyZL5TvRPjoLDGgTvR7mb0v\nc/zAb79DRGHK/LLULTL1dxnFkdJrJ3NqxiUpUa6muXxCsimNbhLh0dp1hFtTgRlB4EK0YBqqtJBI\nl9+czsiHaT9GyHZ6eIJzIiSpwKXuIloW5Tgf7v5RSTP1NBgMOhGj+bGyslLQFTpVVlKpYP15LmyT\nc/eOotIQpdPlz83/B6EollfLhqO7RNdShvMMLbdrBMcnYNMZ83sIr169qg9+8IOlH8fHx8XZsXHj\ndmH+9tj8vIODA+3t7alpGr322mu6fv26JJX3FR4eHpZzenzfZDLR008/rVu3bpWCd8/JH/pDf0j/\n4B/8gwKDM9pN5JOyZnQza9PY35rjSiNB2UjndD6fazKZaH9/v6REfd6WI9laarAWsPlEdBdMs1+O\n3qmMTYlc1pBOOti1ddi27aW5JD/pKNtwO+3hQEpSOaOOAQ8DI+qf4XBYeObPHnvsscLTw8PDok/8\n2crKSiknsKwfHR110EGm6KizF4uFxuNxZ66ti/nj75zyJrJNnnvbvwuJOUbz12UP/s58qRnvNPQZ\n2NUCh0RoMmD1/+4j59BE5I9tZPCYRLTOlGtpsVh0shj+3g4nA2/OE/tDx9C6r3aauH8c0PCQ3lyH\ntJF2qDLYdXBOu2eZoZ3MdWE7yzSybQk3G5GPtTdYkPrjD3rqqaeeeuqpp57eIz201F7CqvSsCd2Z\nGCHYm5S61faZ1mPqypHgsr64DT+LxXgZRRAurD3PkQCjRLaRkYuvZWozoWEjUtx95H44leI0nr+z\np15DJ/w8w+xE/Lg7LfvOMTGSky5OsDaqwkjJJzS7qDhrpIjesG0fYkqkg2NwxJoIEcl9ycithjqY\nh0RNWL/A1FvC6cy953icsmKtBIua3Uby4vT0VFevXi01KbPZrPqeMm9ZJ2LjPhPql87lb2dnR2+8\n8Yb29vY0Go107949Sec1Ui7Y9An95Nd4PNbNmzf11ltvaWdnp5yy/gM/8AP6sR/7MX3+85/XeDzu\nFPUmypepW6ZmiEwxhZG7fKXL6VDWybi/5u90Oi0v2t7b29Pu7m4H4eb6NkpDxMdk1LUmb0ZULW+W\n7zz0j+05Emb9D9tj1E2+pZxTJ/r7rKeULlAAIgxc5y7Mns1m2tvb69Tj+aDd4+Pjcp3bPTg4KDK6\nv7+v9fX1zsu1XWDutczdfp73GoJOBJ96kbKSfOManc/PDzM9OjqSpLLJwvqA2QevedbPEL1x/410\nmizT7Lfvs462vuUYuLa4a9FjYEkB7yM66bXBZ/vafB6/8/wbneH8N01T3syRqS+uE/aV9Vsso7H9\nmc/n5a0BlEmm39Lm8busS+Q9HANLZJj29HfWI7QdHvsyf6TmWyQ9tGLzXBiZWuNvKjoyzERG5nOY\n6qNSNLxdWxg0hMvqO7i4KVy+Lx0iO2W11JH7wUllLQPz2ePxuJO+Y/7d4yJ5gVqxG25PA0VF8CAj\nzDGyv5LKrq2VlZVSP1WD6W1obPRy9555JV0cjZBC7fGzHfad8uScPw1rKqZ0lphL59j5N6Fo85Dp\nnZp8u26KTgqdQ9eJcDv+wcGBHn30UV27dq3jTHj+bAzdrr8zP1dWVjppVs+Nzxh75JFHdPfu3fI8\npxn39vY0GAyKETo7O9OLL76oGzduaHNzUy+99JK+//u/X9J5uuynfuqn9Fu/9VulH56X7FMqb64z\nOw1+HgOTTM2mE0wZpiI/OzsrDqKkcuK3HdlMdzhtYLlg3SGPUCC5H2tra5cMPOWmlqJin5M3/HxZ\niQJl0uR0SNM0nZPNmSIxf53281EXjzzyiLa3tzUYDEoK98aNG9re3i4bZZzGM28Wi4Umk4leeeUV\n3bt3TwcHB7p9+7akcyfLcp2pZwckDjRynFzDdGz43r408uS79b+duoODA21sbHScZfbFMuR58HfW\nd54vrkPfY1lLvUd5oRzTTtQKuK3rrLsZ6DNgltTZaMLnZZlJreyEOsrjyzSm+UoHlm15HP4+Azq+\nOSFLQmjbsy+skcrNUgxk3Kb7bnua8k7HkDxggbl/aEseVGguPUREKhnN7xKtSuOYHjaNewoIJ5vG\nlJPp6Jl9SVQpc+ZepMwXu22+HysXTTorbIuLlWPm7jwWgLpGytdYEXEMZ2cXB3om+ubizVTQOTYq\nMCstKg86du4Xc+P+zblh8TcNTxoTL0z3g44UUUH3mbJBhy0Vbcpg7hSqXeddSFY2vK9mABlF0SGw\nA0bZ5HXk5Xg8Vtu2evXVV/Xcc89dir7m83mnxoFIrevSjFy5rz7uwKgCjy0YjUba3d0tdUV855qN\n8ze/+U0988wz2t7e1te+9jVJ0vPPP6+Pf/zj+qEf+iH99m//dqeIlTUJKQvpBGQxqNcFgxBTLfhK\nZctgwQ7h22+/XXbkOYJ9UCBGA+W16LlNVCz1jL9bLBadukD3k7UZeZaWg7UMukyscyJ6QCczkWEX\nmA+HQ929e7cU/EoXxuSZZ57RE088UWpZ3KZloWmaUjtjlM9y+MQTT2g2m+nOnTudnadt25ZCdD/L\nY/W484wpz0s6Sh67P89gi8GjdK6P3RejLhsbG5eCcr9qhDLJfro/KRN0qmqoI4GDtDF+tsdBnWRZ\ncv0Ui8G9nrkDk4ec+twmB9ocP20Sdaafz01RdHzpQFK+ySsGDP7cbQ6Hw0u7eRlc0TFK3ri/Jjo8\nDETNY/fDgImJtpU6yXx2XRg3DyzzVUgPDZGykjClIlvmMS9DrR5EVmy1CNbMpLPg5xMGZV+o6BMt\n80QmFGiBIkqQSJZU3/XjiN3CSMPJXXtZrGcD6iiSkYzb8PjZn+wzHQb33f3JQl1+xqgmIyvyzYuT\nxeFUgu4jHU1+RyWVY+Di5gLiSzlrEVRtkwI/q6EDVKKeM0ZMNtpWLLWNAU1zcUaMdH6UgFOljOo8\nfvN8sVh0dgmapywg5X1OeRwcHGhtba1zCvXGxoYGg/PzgO7evVui5a2tLV27dk0rKyt688039eST\nT5Y233jjDd28eVN/4A/8AX31q1/tKH46vJYLosScIzquRGoyIkw5sZzSWSPKxbTA8fGxbt26paa5\neDkvHWmmB1ZWLl6ybWTP17VtW9YRneNEKx1Ysdja/fS4ZrNZcfRIteCAc05jwzEyrcj3zjnNaaTq\n6tWrhYcf//jHNR6PyzEHPAvq4OCg6Jr5fF62mHuuvDPw7OxMx8fHevHFF8tux/X1dR0eHhZ0wIX6\nOZ/Sxa5RjtFGjevbTobT3Ua6PXY635z/yWSiO3fuFF1IXe5AjjqlllHwT6IbbIuBsK+xDqDTb0ff\n88M2vbvT64UbVZxCtt40uiypnGbPF5jXUpEpWyYGB5Qby5h5b0pEiQ6h5d46jw6S+8K0JrMNBDnI\nG8+Fx8xANEtGMuXJFGQ6fj4KxH/nLu8M0kgP7UDOGrIkLXeMUqjzvmXtSl0lx8nI692WjWzNgTNl\nxMG2fD2dojS87CMXLe+XumceJeTq2qlEcKSLt5XTkDONwL+zBoPoWI4/HYFEVjJyTEoF5DYdrVuZ\nUcAp8Ibq2S86ZRkRsS80zF68OU9ECog+8DMq6UTFEh1LRMrXEmXgCe3kq3Quizdv3iw1C1kjxZcl\nS906Pzr9RIGsFLxD0kZSUjnt3G0SXZ1Op+VU883NTX3rW98qp6xbeX7iE5/Q+9//fr366qvFIOdr\nPDzH/i4jdZK/YwrH91EmaJSki/Rlyj35Zscl0WErYM8ZX2lhw0wjTdnys7lm3OfRaHRp/N496zcY\n8Dsew+G+UY/4+XZaGTDQkBtpki7Sd2dnZ9rc3NTe3l6nxlE6d5oWi0XHsTPPPB/7+/saDAYFtRqP\nx+UF10dHR3riiSfK8QdGQO2cE1XmWqTO8Xd2Aiw/lBsHrBwvx++AJJ2lyWSiyWRyKcVEBJq6mHzk\nGqfek7oHRSYxWDIxPefvaAedVq7VgtlR4lEF5A3Tlk3TFMTZgQKdUup56jP3TVIJNvy7VotYmwu3\n5QCEjmTq7CQH+Nmm9cD6+volxNU6hvqYAR11Nx1MO06WNcoNAYBl9NAcKQsNPVAqykSkzBQLAiPv\nGqLk+/w7J89EtIqeMgWthoR5EXNh0KAnJE9KFIewcRp2OlE5PqJmvofnpbgOhwiS28iIlsaM11uI\nOG63beVIheLDD33eVUYRTClSUP3bCtufpdPE+pKcZ7bFv6m0pctQbUa6td/mGduuBQOUIzpPlAf/\n7evMRyKN/vv4+FgHBwfFmRoOhyWt4joVO6J8Xq3+w+RonA6Yi8Z5zMbJycmlV6RYeTs1dufOHUnn\nhvT27du6du2afuInfkK/+qu/Wgyz59XHONDBI4Kb8yd1z+5yCofzkvLhKDxRpVxzs9lMk8nkUkTL\nZ1rO6bAxXcqUjr/3WFibkc4WD8H0GrGD7Ejbc8+1R2rbtjwjr3EbrGUispDnS3l877zzTnm2ZdRy\nwwJ7vrbDmxSm06meeuopvf/979d8Ptfbb7+tN954o/DTZ0ml8+F0EcfFcTDYcr/8nWU8nR4iShnc\nORg5OjrSysrFq5DMGyKSXgeSOnxhvzj37F9+R0TK97qGzc4wgxZf73f82RHx2P08llFk0E7kOWXQ\niNcyJ8a6g+d2ea68rlkfx3mlHfU15hvTcP5sOBwW20A59fqrpbXJf9sd88P9sfymjqasZEBn+8Sz\nzuiYLaP++IOeeuqpp5566qmn90gPNbUndSMMIiLp9WfkQTQhIdiaB0qoT9IluJf3JPJAymsTlXC0\nUEtvJTTPNphXXhaFJp98jZGIjI4JzzJ9yHvIN+4scXTNvDL543otv9eIzzXiwAiC8LSjSSJSTK0x\nKneapZZeNdzKnRokIh+sb2IhZO5ASXTMCAfv804jzkHOT0aJ/t+RXqaGnd5zlOr7Njc39c477+ix\nxx7T1atX1bZtOSSxlupxv1w/xlohzgdTyOT3aDQqBy1aDswDoxyurfJrHaTzlJH/fuGFF/TFL35R\nL774oqSLOh1HdpwX84vpJ/LbP5nyJeTvcXOHD+WcSLH7Y+TTqZFMszLtkOuCaG7qKP4mMmk95XVj\nVNF8JcrOlB5TO3yeI2ZH7Jl2MPJiVNH3ra+va3t7W4899lipvfPuurZttbe3V1JyTJUahfIrXYhK\nSedr8eWXX9bx8bHe97736amnniqI1MHBgWazWVmvbNd1kSkTHj9RfSKXTt2SP5wz6j+md4i0JNrO\nsVhP1bIcqb/8eSJinHuWZng8Ror8mZEZqVua4M1C/s6viyKaSznMbEIiWJYfp8ZYZ0d7zNIQ1jx6\nXFk/lWlBtuP5OD4+vrROfA/TaSyi91xThxqFMgLFNCX5n74Cn2fkyWN3loQHG5syhZj00F8Rs8wh\nkrpwKeE6MlzqnjFFyjRPLnw/w0zNepeawSTcnUVwNYcmFyn7kBNL45Y74Hhf3s8FQGVkQ7psgbN2\njAbZ4/D3rK+xQLnonekk1rG4XS5MQqdURHSePM8J3abD6jYJtafTx8XJwkM+I40seWuZ4Pg4Z0yN\nOQ3AtCjnKCHlnE8rJhbq+5mj0ajU0qRR8P9N03ROjGbtjJ+dTkittm57e3tp6nV1dVWTyaSsCzoZ\nt2/f1mg0Kum/F154QV//+tfL/NJRYB2QeUpnOp3ZDIJ4jechAyz3l0ERee96pMPDw3J2jttlHRxl\nwkePsPYqg6haXyV10hdMCY7H43Latp06OlmuU7Pj5k0BqWtYsNy2F0Xwg8GgPENSOUZjfX29HCng\nWjaniF1/c3x8fCmotMHPtKXH5Xd67uzslB19k8mko69pkOkIcD3Vxsg5YV2VdUeWEdAusL/Wqaen\np2VzBftjouNaW7/U+8sKnF3D6HtdC+f2WR7B4nemFdm2pPJuzIODAx0dHRVZpo1aWVkp7y5Mcn+8\nPjxmy13N0aGeo041z1JH+3fWiKZ8+z47dix3kM7XDc8gdJseo3Wfr3dtMIEajoNpugzgrUPTznq9\nflcWm6eDQQSEkTOpZkxr6E86MqzHYnRDR4TPtHedhsREQ8I+0VDVIhn+zu+8aK0AuNg8Tu768vO4\nm4K1AHRqKOQ0DrUIgv0h8sXtvlZaPKvG13GXBZGZs7OzcijfdDrt7M5hFGCnyn1jcWBGRUTaPH4q\nHY/B8070iY5dGk06TMnTdBAzb+8fKnLfK3Vrl+gwWO7NT19vh7VtWx0dHWlvb68TmXpXGR0/z4Wf\nkw6/eeMt1E1z/soYSSX6tYNBnrImZW1tTUdHR7p586Yk6cknnyxnB924cUOf/OQn9du//duSpC9/\n+csF/fCOMcuT64WIDDEap9Odc0F5TVlm8JU1cVyzdgzzkFMHD+yP1731CZ1DKuZ0pIispHw1TVNe\nnOx6GAZvXvd2pkzus5/l89u4LuxM+6wwSXr88cfLLrq7d+9qNBqVQzfNaxcUz2az4mS48Nl9tYzb\nsTP/XMg9n89L/dTx8XHp7+HhYSeA9jg9L/zcTgiDaK5/FjBnYOI+Uh48F24z9buv9QYT2gXOXxpa\n6wXLS47NP5wP85TBF/Wlx5DHMUgqcuL31uWLoP0Mn5fFPuVL1KnLXPtkZ4LOIj/zM4gccr1kbSj5\nbjnzeGwPiNZ6jK4Rs8xxI4n1JFFok+ePdZJ+LgvJGdB5fEQqU5d8VzpS6Zyw01wIy76rOSTpEPgz\ne7SM9LmryTA1i5GpRBmB2WkhEsAIhM9t2/aSA+J+EzqksvazCX1vbGx0JpMQp3f8WBBYGGzv30WF\nXnAeR+7WoPPCBUBjRIifaTXpQjEsFouyy4xKyhGg0zw8kJNpnFq058VRi3rMl0T4fJZKfm/lRQPJ\nfhLF4UGWiXqRT0w/JprpPtYCAUkdRWLHx7uhbCxt3BeLRdmBY8fU8uPI3H1z6iCDCMv9/v5+MZg2\nSrdu3dKNGzd0eHioyWTSiYwXi4W2trbUNOdpo93d3YI63Lx5U5ubm7p9+7aGw6H29vb0Yz/2Y5Kk\nb3zjG8VZMr/9N/vsYxgSWXCxMonri06Tiam/RF0t+0ylUUlyfdsBdL+Hw2Hnt7+zzNpJyt1J7kdG\n6nTMrE9YMM+XRtP4e/w2wr5WUjGgq6urunbtmp577rmyu9LOj78fj8edFw/7hdbz+Vy7u7udAnmi\n4zwSgzw9OjrS4eGhtre3O+82XCwW5Qwh8tv60WuVAWwi/5yj2WzWQWKo21PvkO/mEY02HRDziHIm\ndXddJ/qSG4H4HV/Ia96lA0J7Y6JOdpE39Y7bs+5nitO8YIrPtFgsinNCGeL35gnfUWg95H7wAFHL\ndg0AoBNpvUo7fXJyUnYPM303GAzK0SQuIWEq0Q5UHutj3tgpTTTK65OZEbeZoE6iqOmzkB7aOVLS\ncsfH1zxIiGt583TAfC8njwbaz/H9Fg5H+L7GELifQeVMg2xhYr64lvYjDOsx1PhT+34ZqkSIW1J5\nsSoj/qwncF9SUVGY05HisyaTSWdHiL/zYmPEbqNjo03H1ULshcr5I6/pbLK/fmYaL85NKhOPPb8n\ngkc0i3Of6VL3hf1ynzLFJ3UjXukCUmfaIaNdG1k7pH62+WVHy0bX83pyclJeK0Q5tRJOtOr27dtq\n2/NambfeequDgNnAbm5uljHaWL766qvl9OvDw0NtbW3pmWeekSQ9++yzevnll3Xr1i1NJpMSAEjq\nIBSWEc6/x0fEmrJW0xecF8su15SVur+jfuDRJ1wHbsuBiwMFyg+DA0mdeUqEl/1msEAnxfpla2ur\ncyq426bjRQdkdXVVe3t7evbZZ/XMM8/okUceKam9w8PD8iJsG1Oi3l6bfIOC2yTysrKy0jl/y47N\nYDDQZDLRlStXCppFxDxrdqgfExmyPjdvanrR37NNrtM03EY5vHbYLp1a/8/vuBYTcSYaQ5mh02O0\nj4Eg7VAafn9u5IWIFPXN6upqOVyX/LN+YzDkPvl5nhepixQl32o1n/7bKUnPF/WXx2S+e90kETXy\nfUZnadfyHuoN3+dn0mHluDK952t4QLSBApORs2X00GqkpMsOAyNnevX8nI6TdPmwvzR8CfsSaaEC\np+J0WzRSCeslzMwxME1QM7hExTgGIm6e4IQt3VeOgYaGxXPz+bykzHy4HpU0n01DX/uM47ZSsLAl\nmuZ7jaR4/P4s0zR0XIhGuc00kukUWYHxPvIuoXH/zWcl+knFSH7bOWQNivmdTjnvraX52K75aaed\n9QCOnv1M3s+am5WVFV27dk3SeYrl4OBAOzs7JW3GQlMXDNtYeJ6uX7+u4+NjXblypRzWuLu7K0nl\nZGIbYjtt0nndxp07d/Tkk08WRW9k5Q/+wT+ov/gX/6Jee+21gvJRTj0PXi+eA6JQRBaTiBBQ/jLt\nRmOQaUTODwu5a3LjfhNtdpDAVLvJCB4NLeeQssu0Hw2T58h8Yw2JHSmeQP+xj31MH/vYxy6dvrc/\nrwAAIABJREFUNeT5c1qIemw+nxe0k4Gi+U8EINO+liOiFXakxuNxObcpEZCcz0wL0R6YJ76OPLSj\n4zYzPWN++YBiox8ZtLRtW+SXNiiRQCJLNUcl0TFf75IIPs/rm/qDZQupS63zvH7tnKTusT1I/jGj\nwbqspmnK6fKUCfeHmRnKomXI7xLl/LqPBhess0wM5ms1Ys7iJALIPrNNpvXSQczAlo6U59sBCkEH\n64ka8FPmfOk3PfXUU0899dRTTz09kB5qsXkNdeJ3JOZhE8LnvYlYMcqtwaZug78Tgq+llfxc5pF5\nn68l8sOomaiTP7PHTlTNEa6RB46PuWAiUJIK+sQj7xlB8pUCmZ92JJq8lrpQvGsi2A75lFEjkTMi\nWb6XqZsH5aN5j3+bb5zLWhoz57kmF+4no06PwXPjiJGvafFziJ4wUuV883OmmbxzhQfhuZDY6R3e\nS2RnsViU2qoPfvCDun37tvb390sKz/M7Ho+1WJzXzB0dHWk0GpWDCV2k6jm9d+/epZSvC9L9XOmi\nANZydevWLT3xxBOSzuunPvrRj+rrX/+6RqORvv71r1/a4u/0Bw9tzHS4PzMP+TvXPefc8kx5oewx\nneb2t7e3L6XgPVde71xPXGt+VY+f5znzHGQayykl7hiSuqiEr+F78c7OzrS1taXNzU2dnZ0V5PBD\nH/qQPvGJT5Q0GwuWXV/iOiqn5KSLHVKeY84va6pMREHOzs5Kql86r4t63/veJ0n60pe+VJBIZhY4\nfq+Z2roncsj5MBLhua6l6WtZisViUY5tIdJDquk+t5kpNPLBlKll2wmmkKQLdMVb+TNb4faJ1BEt\nN/JLefahsUZNyVMiPqx/8nNcx2Z9lkhc01ycks7UN9FMfud2PM5MJRo9slzTlvjHa4d8Z73Zsg0/\nHlOt1mnZ+1f9zLTBy2SzjHfpN99BSgeGn5m42HhN5m5r7TA1ZUbX8ut0bhIepDLnThovBvaP46FA\n53NTodfG7HaoDFicS6GxgNE5ch7XgmJl6bbIKxqKWmqvVpfC/rVtW2otpIttuXR4afgswKenpzo5\nObmUQkwF5b/Tsa7l2HnysMfGugm2S6ctZTFrNlJJsn3D68mbmiPm9A7JbTntdnp6Wk7/9nyPx2Nd\nuXJFW1tbRTG6wLttz0/y5u4l75R68cUX9eyzz+rGjRv6vd/7vc66WVtb03g81vb2duGF03RUoDs7\nO5fqZFZXVzvOmR23vb09vfPOO3r77bf1+OOPa2trS3fv3i3ffepTn9KLL76or371q521Y7h9e3u7\nGP3kJ+dgWSrIay1T1myHae1MvdLp9ny4loY8cPDkMTBtMJvNykn0kkr9mNtyP3k2mbe/MwWeqS2m\nwi3nW1tbJf24urqqra0tPf/885KkH/zBH9T6+rr29/dLoOW5dyrQKRwaVAYJJpYf1D73b+vIzc3N\nkh65fv26pHMZtm7i7i7LFNdJzhsLgvk9U4TWK2kvfG06Jf5tOedYUr9mfQ31BuWCuiJrNX29HQ46\nGQw8Ux8nr/J0dbfB8fhvp9n4mZ/DoJ3OkqQim4PBQEdHRx1nQlJZE7zPMkTeeR0zRWqnjvPCE8S5\nHmtUq/NiDRo/J/8pw94tzlRr8i+DNa/1lE3SQ0OkpMuM4UKgAGW0mYZ12QBr9VNs320lYsXiRgs3\nPXMvIhtTCrgXiwududi9WOxMcXw03ByPc73Og9cKQL04lu1CqDlDHov7yMXEZyS//DcRuf39/fKd\nx8KdGKamacpWaBohOjFZm5BzmMTIJZ9nZyqjFCqCZYuGTnWNd7PZrER+fB5rRei4Ek00OkgZtDGY\nTqflBa+SdO3aNY3H44IicZ7sxI7H44IG8fUNX/7yl/XpT39aP/qjP6ovfOELnV1z0+m07LjiqyLW\n1ta0t7enjY0N7e3taTgclgMbrbRu3rxZNjP4u+l0quvXr+vu3bu6ffu2nnrqqeLwra2taWtrS5/+\n9Kf1xS9+Uffu3SvPu3btmkajkW7fvl0KvRPFowOURa+pI/Je8pjyRmQv5d9zMB6POzUtNEBZQ2LZ\nnc/npX7M49/a2tJ4PO44UkSBiJwx8nX/WQNnx9UbE2ycXnjhBX3qU58q/Day0DRNeRWOx+pNA96B\n6bE70KOxJZ2dnRV01I4TdYwPap1MJjo8PCx9HY1GOjo6KhtTuHvZ+rOWOSCvuZPS47dRtA6nk+Qx\neI3TcfNYjPARkTF5rhhYEY16kD0iscYs2yBSaR3vdliDSTTHcph94jwOBoOy25Q2iJS6h2M2r9fX\n1zsv6SaaSN3pAMvPotzQGfT8kXfc6ETUyXNG5yrXdr4OymNIZ4rO6bIA2faoZpf8Wa0+s4xj6Tff\nQaoNJCk9bE52DZ1IzzvbMtGY0nEjskO0ytcQzqfDlTvWfD/bYB8o8Bnp+p6Eov3MLJz3c3xCLXcv\nWNhtfGyk8l6Po8YbOpnkVRp0Gzqfa+Q+5qIx0uIIu/bcdJgcXaVzSbITkjCuF3A+h/cRQeIz2Qb/\nZwSbznBen/d5oVrpUA7Mb74/TVLZReXv8rBG78qzcaMyXVtb09/9u39Xzz33nD71qU/pi1/8Ypmv\nlZUV3b59W+PxWMPhsKT2RqORRqORZrOZfud3fkfXrl0rZwUdHBx0ItWmaYqxPDk50d27d7W9va13\n3nlHq6urJdV0cnKizc1NffjDH9bVq1cLAiWdozY+qdmK00YkAwCmSskjGpZENVksSwfFha1EtDj/\nTpNTnqhrnI5JBW4ZPzg4KCjfbDbTzs5OcWyI4jolOhqNSh/ZTyKrGxsbHSSvbVvt7OzoIx/5iD72\nsY910Anzw8+kQW/btqQDiSS4/3zpsvlomamhPJYbO2+LxUL37t0rsmEn38XtNb3q5zC9af1Dg5pz\n7rESfaFT5nElb6SLwnMGcgxyOfd0lOmYkYwQUu8zsGqaizPD3E8H3rZx/s5rwO2lLuGp5H4Wgwzb\nDAYB/s1gNXV5on8ep4M195lrzbLMsRJcsENKR8z95f98Hn9nxsKfk9+cA17Hde8+ZGDtcfvHjj2P\nEkpdkPRQHKkak7jYPclUhnRslqFLKfyMcNJYppOTCFfNGSJZ4BhdSZePDiBs7udYIVGAPEZuF+XY\n6FARWXBkZiVDqJJ9snGkoWFemLzzXHiBU2nWFpL7yC3VNe/dnr2h/xpilouNjgkVninnkHUS3ObO\nOaYD5Pmlckv54hySx4TpM7XkvjG6sUz9v+y9aW+kx3X2f3U3t17Z3MlZNKNtZMmRBVmILTtAYiBI\nXuQDJB8zrwMDToAEcmAgtmF5kceWRtss3Ju9sJtLd/9f8PkVr/tM9fhBgD/4vGABBMnue6k6VXXq\nOtc5dQrLDWsPIEWMhMea1Ot1bW1taXV1Vfv7+wXFwHsZgz5WUTK1Wk2/+93vdHZ2pnfffVeSUqLM\n1dXVl8YizBDMWblcTiCr1+up3W6neBhYEUnJlTQ/P6/t7W11Op3EeBGPs7CwoH/4h39Qp9NJbR+N\nRur1emo2m1kWk4WORcMNBQAPMvG+oX9w0+X6lD7z+UYf46qLOY9inKLHelE/r6N0BSQ6nU4BCPE+\nAMjy8nJi43zsA9ZYFPmuWq2qVqvp/fff1w9+8ANNp9euSxaC6XSq4XCYwDHfwW5SvO2MO2ff+D9+\n7vPW9R19yEHYMKpnZ2eFDNTStTGK/nFDJWdw8h3sK+2OAJT7AYbO8JBihDEQXe48K8c6RWDEOGMe\nRXbMZYxud0Oc+jOu6CeMFGQZZYB+djlR3PXs7JfXBUPAgQfXe/yQvxdDjvr4uInrsRtDMW7W6009\nYdBcLzuL6sCH+YlxSR38N22JBhc6mXr5muAy8v89UfSscqOuvag0XTFKxbgFSYUJnANSPMPBGdfl\n6E2vx6xn8L8LnBIBhSNtH8S83wMmo5JCCcXB5lR0nJyOrKMFyXuweBcXF1+iXKH4I5PjFGickF5Y\nOBzMEFCa2y46a8HjXn+/09juZo2Fz5FZbmI4iKUAXN1K4X1en8iMIo+4zRfZO6vo48i307pyoS88\nv4orzuXlZa2urqb7/LeDKK8730lX42Zzc1NPnz5N8njnnXf01VdfaTKZFI6EkZTOZeO+Xq+XtrEv\nLCxob28vndN2cnKSWJeLi4sUH0OyzuPj49QGAPmPfvQjPXv2TJ988omkq7xGc3NzheBn6l6tVpMy\nd/aQ9jmgjf3vGZxJ6kc5OztLDBBHongBgPtGDy/UJbKdng/M5xtjkbbGvgRM8JmDDbKL48KD5bl7\n967eeustPXjwIMkN5pD0BuVyOeWDclckCxcyc2MA3RoNDNfBzElnQ3q9XtpoAOD0MQWIgEnxfowG\npRf62RdtZEwfO+Ph/7Ogx74g4N6ZOJ7JQhvfFxlIXzPcwGf+AyRoswPPCAiYby4X3GWeEoAxzHPo\nAzemab/Ln+d5/6M3YzC6G3s5r4EHj0e9yLU+X3I6MgekfKy5TNGF/r64TkZQ5yW2gXnNOx1El0ql\nZIS4N8VJlVnlNv3Bbbktt+W23Jbbcltuy/+y3Cgj5QVK190Tbr3lEKtU3CKdYxMokcVy68AZBO6L\n/uNc8HGMAXImJiJjrBwsFd9pAOLGao8umsjQuMycAYmWG9aA+9mxPthBA9qOFoYzVU7x5vzVbrU6\nMxNpXSyw/5uSe0/s38gI5O5Hlu4yiNf4Lhsv0QLxMRRZTHfHYPXEuByvp9cFFxrf+3ur1Wpyy47H\n43QauvTyIaMxNgHZnJ+fq9FoaH9/Pz13a2tLz549S3LxhIV3795N57GVStfn8O3s7Gg8Huv58+dq\nNpsqlUqJkapWqzo5OUnM0ubmZnLt9Xo9bW9vp3H093//93r+/Lkk6csvv9Tq6mo6u8/7olKppFQC\njN/IVk8mk8RAeP8wT0lWiXUvqRBgDBvkO7fG43HBwo9sLX3oO8WcxXEWkn7h/1xMEC5x5En8GKwA\nMtnZ2dH3vvc9SVeM1MbGRtrsERlgdGJMKsrRNq4/fRyii2i3x5j5XCqVihtfPEi5Xq8XmJRarZaO\nE8Jl4y4678/ofnW2LvaDMyuuc91FiFxpJ24+5orrBeass518B2PM974OePwP7iZPmOrF2wGrRhud\nOYPdh6113Z1zqcH08T0/rAk+7qhvZGLpGx8Tcb65ro/rqbvKXc/TV8xR6hnjqHLxxu7x8bAZ+hBW\nytvn3h2fE+jmuLZTR4qvWcjA51Ku3DiQiguNC44SP/MF1EFWdN/4OzxAmBIX5lgnv4YSXUG54oPI\nFRGUN/V3xY7fnmf7QuHuzBiTxQTmJ4LB6I7ybdbIEqrT+yK6M2McgU8kjxVwGUZXm/eVKyvq4e/P\n9U2MD/J2eFvj85Bf9NXzd3Tf+ef+XQS4noMHJcKmBffNS8UjNrg/F8jsLimvOwHZUnFzADFAUP2u\n3NklxcK1srIiSWlH2srKivr9vlqtVsG9MR6Ptbu7q/F4rNXV1eRanJub09bWlr799tvCdn7qOTc3\np16vl1IfkGVdUgpsr1arWl9f15tvvilJ+sUvfqGHDx/qyy+/TItwjPNzsIfy5WDcCJ5wb5HegfiZ\nubnrHEgeNO1uLx83nqLAXYj+PhYGisdfeN+SQ8jBsy8mlUpF/X5fk8kkHRZNYbdbs9nUBx98kI7d\nwX2DHHxMMxZzAcnj8TgdJxJjupAxYy5uhnFjNbo26/V6ci+Wy1e7xtjNe3BwkPKD8R1GgMe4sGU/\nhnT4WIj6hL/jtQATXLu0JR6L5Pe5DJEb8xG9xSLuMqVdDsbdMGGuu37mmWSId93FuJCUYhHdwOK9\ngIlofDlgiK47ZOOGneeEc/eyGwVc60ZLNK75PoISrvcNNv69g3BfU5CZAygv/r/r8wj6vf5eZ5+n\n/O1B575+uIGRKzd2REwEL+5fzgXu+gSL7JE/NyJQrpkFfCL4kGbnM/K6xvZwH4Mp904GDR3jPnoU\nTByccau8A4ToF/a2OEBwAMC9npDTmQyuiTvLPGaLQZiLZYsTxRfaaMHFgenxBy7jyCZF68Pb5e1n\ncswaK7PGUQ6MesmNJZ7nIMrPIvPnUDePY5pOp2nnGjFtkhKjguyl4hlu0Tp1ObAIAHq4ttFo6PT0\nVA8fPtR4PFa/3y/s7un3+8maHw6HiXWCFZmfn9fz58+1sbFRCGKGdTg5OVG73U7vr9fr6na7SbFX\nKhW9/vrrkqS/+7u/U6VS0ZdffqlWq5UWFuRE3jFig6gLKSLK5XKy3GM/1mo1bWxspBQNyJRga2Jl\nSqVSAmAe3O2ypj459sPHADrAYyqiFc/BwNL1OZ6DwaAwXviu2+3qnXfe0d/8zd/o3XffLYyZ+A5n\nnRhHcWFj7AFmfHEhsSNxJxE0utHGO/0gWTf+FhYWUh6x4+PjpPt8Wz6FdtOmWQZu/Mxj5+L85V2c\nM+l6yA0AB7jR+HRD0BdzB1XIzUGox+xEo5Rn8B0gwjc0eJlMigeR8xtA44HuDoKk4maXyITybJfp\n3Nx1agoHP15vX99c3m44O0BBVm5I+3v5389qpNBvThRQDwAW4zc3ZsAD/pn3aW58zTLm2XE6q9x4\nQs6ceyUCohwL4YtdziLxvx0g5RZBlEROUHHB9et5Zlx4vV1u4cRdGxQGBZOK66TrbMN+iGJu4nN/\nDqCWSqWXrFY/Zy1aZi7zuIgzIWYpPL6jTUxEFnNn9OI7ojWE3Px3LA5KvC+c2s6BSd8pFJksCs/k\nM6w8V6QR7KOA+S4mWgRk5erqk5iFfTKZqNVqpfxbw+EwXVev19OCSf9HOno6nSYQg6uNpIyHh4dp\nUd/d3X1JpixuruQrlYoePXqkn//859rb29P29nYaA6PRKD3Xd5hJ0vr6ui4vLxNoe+eddyRdWdy/\n/OUvtbm5mWTk4HMwGKher6dF/uDgQJKS27HZbOrw8FD9fl+j0aiQZ6nb7erOnTtaWlrS7u5uem6t\nVlOj0UjB8O7O6PV6BVDqIJ6+BWhEpoffkX1gXNBPjUYjyQYwIykxRfx/eXmphw8f6h//8R/1ne98\nR81ms5B5fDAY6PLyMuUzi6DKx6iDLPqGccf72MXldfe+976JOwwBLuRCajabevHiRRrDKysr6T2e\neNUX0eiqpt7Mm5wB7nWNYGVhYSHNEWek4n0Un4/MfTfg0M38eAoLngcD7CcTVCqVQrJixgrXoQsd\ncHHd5eXLyZgByugR9Jm3x92akfn2dcTDQZyVcX3F2HBQ48+h/hg9zlTGtd6BIbJy8BVZNzcEvH9y\nbkBvk5MhnjLGQ1acKPD54kDV5RZDQ7zcKCPloCIHnFxATPwo1FeBKJ4bBc3n/LiVEe99FXrl82hx\n0inxHhRqXLRzlLJTwyhtrPHImDEAvLNd+fCOaJlGoOGyoQ0RwLgrINdObwvWi8uTxccBSqyffx5B\nl7ebvs/1H7KIoNyLy8bBsFvpPqEcpPg7KN6mONldfs5cSMUcOLjqyAoNK4QbYDKZpOzl1NsXSp5D\n7iYYLZcfTMf5+bna7bY2NzcLB5ASi0U6At9F9utf/1rNZlMff/yx/u3f/i0pxfX19SSDxcVFdTod\nbW1tSVJy8+Eqcbnt7Oyo3++r2WwmV1y0okkCeXJyosFgIElaWVnRYDDQ9vZ2WoTPz8+TG/L8/Fyj\n0Uinp6e6c+dOYRdTq9XSxsZGmleDwaAAhKiDsz/Ilv7zRcP7dhbgZ0HF1ei7utziXV5eTvVcXV3V\nv/zLv+jDDz9MC5AnJ/X4Ltg1xhA6gQXQD8rlWCcHiPyOQCrqYFy/MfbGGQyP95SuAD8u236/r+Fw\nmL5zN5P3Pe+LYMbnDEZQjEcslUopWanfEwvpE6KOJHcb4FG6BiYOopApfed6z2NaeQbAKc5VBy3O\nPvmC70apg3hYJ5e3M/7EYfn8zoW5eH1cN/m1TlqUSqXCWGQcuyuU9zmz5V4Lxr3rLx8XDvId1NFm\nd20i47iL2cGSr/kAVAxW6uceIF8PMVhmlRuLkcoxPd5RPnmiVeXFJ/2rlJgPEn675enfRRDiEzEC\nM+9gt/KYBD4wQPQM5rjQ+mT0giIslUoFZer5g+JAdMDiSsHb6NS/g5VoBfrgd8siV89o6Uag4axc\nlBtK0+XmblIHcF6c5ckpTW+HPzOCea7le4CmTyiPr/B7HdjHxcd/A5oi8HMrtVQqpZQDq6urhbgp\n4mWk6w0DLGrRIkdJLC8vq16vF5L8Sdfb6z1+6ujoSKurq1paWlK329XR0VECbtVqVdvb2/r5z3+u\n73//+3rnnXf06aefSrpSRLjmkLG7KZ4/f6719fUU7IwcG42G3njjDR0dHaW6IO/hcKg7d+6oUrlK\nHor7ULpanEejkba2trS0tKTj42PNzc0lQNftdjUajXRycqK1tTXdu3cvxeyQH6rZbCZXoh/Jg6w9\nBQLyctYslzrALVrfch7Hrh+FA/hksUaGP/nJT/TRRx+lxdQTrp6dnaUxQlySxzH6ppZqtZr6Yjgc\nprEdXUK0m/EYxxQ6hb6NBgDhAnNzcxoMBml8k1eMcx2dhUOfOgMa57dnAOc+mC83TNzQQYa+4Ho7\nAKbOnvhvdLT3E0HkDqi8Da7XeBbzk/a5nnWd5TqM+2BromuL692QdN0OG0ecjzPV1NHXyhhIHvWh\n/x1BepRbpVIpJK8sl8vJm+Jy4b0+h9z4ZL30+CnXkV63uB7nSATGEYYM49XHYfQUOIj+S+U2/cFt\nuS235bbclttyW27L/7LcWIyUU4QULBO37vk8d38skZbmGe7Ci35P7vPfkbmJriU+i/WI9ZaKO344\nYwtrA2Tt8UcxUN0pdtiKGMjHdTnXpLcHi1e6tkz9x60PD6qNLI/3ndcBVO80u9cBSz7uwnDLF3eF\nWxO+ldrlS8wK78n1UbR8uQ8Llrp7H2AZUdec5YNbIBa3GiNzF92XOVerM5q8BwaAA43dJePxJe5q\nWVhYUKPRSBS5W4kkhsS91e/3C1mou91ucsdtbGwkGRMDNT8/r88++0zvv/9+YSegJJ2cnGhpaSkl\n+pSuY96m06lOTk5SrJZ0xUh98MEH+vLLL7W3t5d2WUlXc+bNN9/UdDp96Vy49fV1DQYD9ft9ra+v\n68GDByqVSsmd+OLFi7Rt/Pz8XPfu3UuyYdciMT0cU+N9wbmHPv7ZIYcrh9QM0jVDAuvqTAZ9BOPm\nJwz4Nm7k/d3vfleS9PHHH6cYIuYOLGNkij0mDVYAZpN7pesEpeiZuDvJYxhhc/iO96HHfHzzu16v\nJ9chcwHrH6aoVCoVErnCftEfHrTONZS4yyo3ByNrHOeZs81+P890pi6GX9B217fuqor6hHFBH56e\nnhb0tG8cQeYUr0fc1ODB2Yw7Z85h24jNoh3oNR+r8Z2483Fx0n760IPcaT/rAN9TX1hU34zg7Yiu\nZ5eNB/C7bqMdET/4/XF+0L9SkSGO7FMMu+FZHg+ZKzd61p6XSM9KL+9K43tfFB3Q5J7B/fzEhU56\neUeDu/yoK/e50uF3nIw5UAM1jyKJgMip2/g+BjfvdpcJ7iYW/5xbKbo1+SzStj6YiLtwCtn7gN9x\noPr7HTS5C8Tlx/uoP0DKARLvdjeWv496xnFBO6K7gD5jcriLlXoRMO4FGSHn6EpEMXCNj7EccMr1\nU7lcTjEl0jX9Px6PX4qxQJ7T6TQtjr6dnLPEeBY7xarVqkajkV68eJEWBZ5JTNb8/Lz6/b7G43GK\nO+p2u+p2uylA/eLiIgWNf/LJJ5pMJmm3Xr/fT66nWq2WXA3n5+cp8zb1X1pa0vb2dprb1LdUKmlj\nYyMFkQNeJOn+/ft69uxZajPKn/azK/H09DTFTtG39Xpdr7/+ejoTr1arpfE2GAw0HA7T0SoeG/H8\n+fPC4uKbO+IGg3q9np7pQCIu0Mi2Xq/r0aNH+vGPf6y7d++m9hOcT7wa7/PjOkir4gv7ZDJJ7fL3\n+bhkAXMXlYcC+DP5HEMMUO+yqFarKf6JnWy0g1xo0S0TF6cY3uC6OQKfaFC6geHueR/fDhSje348\nHqfjQHDxutzQNa5XqDNz1PWj1991lwc/O+hxfTGZTArxOw4wmevVarUQfkA/siuVthGHJF27ganj\nxcVFwXCJa6XrWtqIcRL7zOMHXUdFgzKSBOPxWLVarQBWfEzmwmDQ24BPH4fI0o938uKbmbyO1Cm2\n3denWeXGGCkmYQQccSB6ieDJ75GuFxX3z3pQ3iwAx/OibzjH1jh7kmO0fOI6s3B5eanRaFQ4ssGt\n1tjGHGKOSsGBI8jdFV+Mf/IB7YsuStrl6Qye3+fvjBaft3cWqOJ7X4Sk66DTGACbezeFfong2tvA\nfXFLsjM3sX+jn9yfQz/E9rm8aa9PRrfk+DzGFszPz2tpaUmtVkubm5vpPq5xS5H6oJgnk0kh+Z/H\n7GAhelzS3bt3NRqNdHx8rPPz87RrjzHx4sULtVotLS8vp/tarZb6/b5OT081Ho/1hz/8QR9++KGk\nq2Nndnd3NTc3p7W1tQT8GF+M9Wq1WrAIy+VyauvZ2Znm5uYKixiLPYwN+adWVlZ0cHCQ7iVuBfBW\nrVbTjr+VlZUCS3R+fp7ivV68eJEC05Hz6uqq+v2+ut1uYvYYC91uV9VqVXt7e6pWqylZ6eLiYprb\nEZwTy+a79ohzm5ub04MHD7Szs6Mf/OAHevToUQqo51w8jl/xnWIsypPJJC3MjLVqtVrIxzULSMXv\nnGHGGPBnELPiDFHOIAIY+DuRR6lUSjuRee4sHUyhjtHIjgHY/h1g0AGTlzjnkWluF5df4+tBbLs/\nO8rX2S4/z9CDs31xh7lyvcm4gGmGIcbopQBmAb7O6GMUjEajpBMcFCFT+j8GYUd96H3IZ1EvRk9K\nlKXPGV9TfY3wcQoAjOBTejlJNv3KdzC8MUYK4Orrohv/eJFmlRtjpKTZOR+YtDF4PLIOs57rHZUD\nTxQGOJ0dF0x/Z5x0fOef5awsB10XFxeJ9naWZJYC8Xq6fLx9HsD+lwAfVib1woLIMVbNoBA5AAAg\nAElEQVT8H9vktDsT2GXijFGuDX69W6xMiOj287r4s/iMCcrf3mYHz66IYwB5rAvyjPKj0AbfKeUB\n7MjWAZvLE6XqYLnVahUAA0yFM0ywTNEyAizB9khXSmA4HGo8HqvdbhdcLQSRz83NpUOIed/i4mLa\n7dbv9zWdThOQIMAZmQ6Hw7TFfXNzU0dHR9rZ2dHR0VHKui1dAYK1tbUEdnI0/fz8vFqtliaT62SY\nWJXSdWZpXHflclk7OztaWVnR6elpAl8csHz//n19++23qW6VSkVfffVV4Z1ra2taWlrSN998kxa3\nra2txKhUq1UtLCwk0LO8vKxf/vKXkqR79+5JUsrQ3mq11Gw209i5vLxM7tLLy0vt7+8nQLG+vp6S\nlU6nU/3kJz/R66+/rmazWdgphsvW5wxjhnmPLqG+PJOxG4PC3XXF/7NYZZ//LKIwcoxFZ3oAL+TG\nikYbCzPsVHwH/eosDgA3GtgReHAv13JNTq/6LjhnzaOR6Bt7pGICTC/OwvgpEhQHZ36KAQYF48X7\nwTdA+C496QpkLS8vF9IveFvH43Fy9cZksLQRttrXIeonvRw+47o+rhes1bMYIOofx5p7UtCbMYCf\n9zkYRn5sbvA6OVhjjEQwyI5b3K6UXKZ36unzMldubNdeXPidpZKKYMQXRQdb0stb1x2URNbLQUjs\n1AjauJ7nxEnqVkmcWKDi2AYUERYqFrKja97hbJUj9bgIUeLgjjKKCsD/zsU7uEuJ334v1gz38hxn\nBN1aYNLE/vH2OdPjQIqYEj7PTdY48X0x8rHDd0xaxohbH0xaLHIHp66U/D2AC2el/J3l8tVOGg6K\ndgW+urqq5eXlQpZjwATgCCDuCTqh5d3a9fpcXl7q+PhYnU5HS0tLCYQ0Go0U34OVS+l2u9re3tb7\n77+vb775Rufn52nXnscEEbtFXqf79++rUqlob29P9+7d02g0SjvsOJqmXq/r9PQ0ue4YAyh7EoXS\ndsYeliPAhvsAeLguyJvDvYypxcXFgrsUpocUCp7QczKZaHd3V0tLS7p//74uLy8T67S9va2NjQ0N\nh0O9+eabOj4+Tu9bW1tLB+SWSqWUdkG6isk6Pj5OuwRXVlb02muvJRlsb29rc3OzwC7RhsFgkHSF\nL7YsyL64O7uB2+fi4qJw9AhWuRth0fD0xcf1jt+HHH18X1xcaDgcpiNicE3htmEeA7ioD7o4GqSS\nklvT6+7y8RL1irMqnlID4BKNwxzbDpBzYywaSFLxwGOvvzMgl5eXic3lOweiEdDCYJPdPMrHTyRw\nIxnjEqAUZZvbeUeh/nH9Qk86MRHXC5ez61M3VuNa5nL1tcP1aAwHYQ1x74X3oRvXDu54Pu/LxYf5\nnIhEx/9zrj13OVFyHRcBkX9HcRdaDoz5gI/0awRITuNG4BCZnhyTkwMKEYxNJlfHfYDSuQ/wFBdE\n961HEOXMSZSf5yOJbZGKE9cDq/06romWqU9qR/HuPnpVX0Uw7FZyToH7/dTX+8V9+7n3RoDj488X\nXq6dZZnEz/xzmAGnlv0aFJQrGGc61tfXC3JlUaxWq6luvMOpZw/69PPhptOrRHQeIAojs7y8rFar\npZWVldRnyOvk5EQnJyfa2dlJ7IMv7hz1Ua/X1e/3U3+Nx2P94Ac/0L/+67+q2WzqwYMHCfSsrq5q\nb29Px8fHybJ1sD2dXsd4EeTtckeBN5vNxLqwmI1Go8RkEawsXbk3Wq1WcoksLy+ne2GfUY5vv/12\nSs5J8tJyuaytrS0dHBykDN3Ly8vprLvFxUX1er3kaiQf1tramkajkY6OjhLQ29ra0tbWltrtdqrr\no0ePJEkPHjzQysqKut2uGo1GgVX09Ca+oPN+xtTi4mJyTfAd90XWARkAzOOCEQ2VaOGjc3KuL8IX\nYEvctUsdMb58caQ/Yhwlz3QdGHUG88H1jjNu/hljiXrnGHnGGqyF3w/wyQVG+4YAN74uLy9Tugfm\nY9Rx1CGCGt6HG8p1lQNpAICve9zLc/075rvHFzLe3F0GIOE719tuXPv4ikHZORIjpx9zfebXOHBy\nuUWm0kEqfZHzGNFu17cOsNyVR91yBEaq58xvbsttuS235bbclttyW27LK8uNxUhF/3V0jznDEl10\nUtFajbTjrOc6LRn9sfGeWeyY18198Xweg92iq2E6naadE46icduw84jv/IBRp0K9bjwr1ps2I6/I\nBHhf+H0xwNktDiwWrEqnPB25R9nwt/c9snE62etMiS49ZxGhi5G7x3A5W+fWR2SkIruUk633Z2TG\nvJ+x1tzS9oKrpd1uJ9cXiSyx4huNRiFOqFarJRePW8rIE8uScUUbCU4mlob6Xl5eptQBxBhw38rK\nSmLKWq2WPvvss0LsDZZ3v9/XyspKkvdPf/pT/dM//ZP++Z//WZ988ona7XY6YFdSCmzHNcb7iO9g\nDnBen3R1XAsJHM/Pz7WyslJweeO+4jnuNmg2m5qbm9P+/r5qtdpLAc9YpbgUiTdpNBrJyr93715h\nR9/Z2ZmWl5e1tbWlo6MjbW5upjMDcSG+9tprOj09LewgbLfbeu+993R2dpZioWCk7t27p+FwmHZI\nuruHDSndbldzc3Oq1+uFecVZcoxjZ5uIRXJmgvsY/9HCdgYkMuO569wVw5iDKfPn08eUONf4bmFh\nIW1G4H9cY7C1pE2AjYJB8PQPzmTFbfcwsLCfOV1D+3zuexJR3zSCvEulYqZvCmwS7cgFviPLqGvc\nJZZLH0Aak0ajUWBMiN10t7jrLMIxmPvOxqPfyIbuSTd9fXWGyF18zjrSBt9gE9dw5izPi54I17fO\nSLkrPvahrzPU3dtAXVzvwdwiIw/hYI3IzQXKjR0Rk/vbK5obUP5/BFIMpAgK3BXnricfNO4ik4rb\nanOgyoGAd7RPiAi2eC5t5JwsPmeL68LCQmG7KvVmsWTw0TYHQj7RUCbuWnLKmffy/Nh+H3A53zCK\nzb9HmTmt7TS2pylwBezuAeTj9H6ki2MbKQ4yaVsOIHk/RJDubq44IePCk3M7UmIwu4OrWq2mVquV\ngpHZhcOus3K5nOJryMe0sLCgfr+ftrxLV6Cn1+sVYlc86z11pA4eSLq0tJRSIJRKpbTl/vDwUIPB\nQIuLi3rvvfc0NzenP/7xj0k2CwsLuri40Onpqcrlsh4+fCjpKg7mD3/4g/7mb/5GP/nJT/T48eMU\niP3aa6+l3Yaj0SgFuEtX8yDGUQDkOGPy9PRUlUol7aLjuk6nk44CKZVKKdUB3/d6vXS8Tr1eL+RQ\nYwGpVqs6Pz9PQeqNRiMBrJ2dnUIG8efPn6tWq6lWq+n58+d69OhR2jWI+5D3EA8nXc2DRqOh0Wik\nu3fvamVlJbn9mCPlcjmlnWA+cYTF4uJiih9jnNVqtcLxQb4zi52o6BPGvI9Zro9g4lUGqeuZ3Lwh\nTxY5z3zxRp/4pgzGov/tcW5kRwcQcai3z6+cEeSAEBkzvqiLH/HjOsRjV12nOnhE7/rOQ3SX52vj\nuxhG4bqd75Fl1BezAOjFxYX6/X7Sv37kD/d7oHkuhMFBDmNiNBoVdJdv9oi7OP1ZzGFPpUCdfYOJ\n63gHUBFoIWMfM5FA8L99DXH554Ab19br9eRGjwefu8wwSnNrCeX/mYScCNXZHf5GqB4r5H59BxPO\nOknFAemLCgLnebNYp1f9HYFWfMcsZotO8h0pgCmpuGDTJiwMX+gdxMV3uYLMLfgRXcdB7GxNjMuK\nfmvfWeZH2Tiqd7AZg0QdtHmbeaYra1f63j4UucsGQIgF7s/hubTD5ZHbeOAy8wB2Z8d4N0yUB8fy\nPWfKOetUq9XS0RONRqMQBI51CAiTrpNfkqSSg3Ynk+sUCFjs0jVbQyEoGiC0u7ubxuLi4qL29/dT\nrNJHH32UgsVZtDmmhmM/JKVUAp9//rnW1tZ0//79FFv05MmTdN7dYDDQ6uqqjo6OJF2N05WVlcSa\nEf8hXTE5n3/+uQaDQQKdFA5Hnk6n6azAk5OT1P5Op6Pj4+PEhngSTPoNgIRlLynlOgJYwWwxLhqN\nhg4PD7W9va2dnZ00Z2GGlpaWNBgMtLm5mdiTubk5nZycpP53UEdaA7ajOxvtfUVsmlvq5Lwi31Uu\nnQqpHpyN9E0Uvgghe58zbs3zG7DjDKBvkmDXo4N6AG88mBggyzPcMONZ8/Pzhdg3qWhEOosmFRNZ\n+rzkHc6y+K48wEBkQKQiG+vrUSzOZiFv6ueAC7nwHgenPMe9G9EoBJwBPn3XJmsLG1Pi7jT0FzFb\nsY9dH8aYo9zmHTeCWB9z8UxRz+aCyGNx/ZoDUoxVnxcY6nzmYx/miX6MORl9/Yj1jMaylxtz7TlT\nJBWDvyPI4joXWGQiCLp11CxdT87IELGwuuUewZlbynFi+KCIgYyzAJjXyf+HNqYNkTaN9CzFd6BE\nMOagxJknfwaLM5MtHnoqXU92X4jdredACgBBPiR2nEhK1mR0fXqJioa6OECNQLVcvj5XSlKBXmdh\nRt5x4lJmWToAtwjMseLiThsfa5EBcPaOzN8s1ix0c3NXKQvW1taSUjw4OEgMTaPR0PLycnp2v99X\no9FI9YhAEvkhV2dkyNLdbrd19+7dAogmPcKf//xnTafT5KL77LPPUsLNarWqdrudFsZms6nJZKK9\nvT1VKhX1+33dv39fkvTs2TMdHR0l4HJ6epr6C9bJ3XQRDBOAvbi4mNoOqzQcDvX06VNVKpXkAkQ2\nJDCdTqfq9XqFjOrj8VitVksnJyeJ8eKdnjuq2WymNt67dy/lxHrvvfd0enqaZMpuzGaz+RJwnZub\nU6vVSuBxc3OzkJhyNBoll1g0gAighxFAhufn50kG1NkX/hgADMhiDrprzMcKAAmd4Kw548i38FN4\nN8bAZDJJQNqZ8Og2kZSyhQPeXId5UL2zEvSZy8t1pq8hzjr5WpErDgR8HtNXtMcXYWfYo25Bpr5G\nuPHL+yJL78Wv5x3cBzMVE6tSf/SXyzSCUd6L69HXSGf+fL32NSzu1va1MQeE+M4/yxEkcYOQ6+ac\n68770L1TzrgRtM96621Adk4WuNxfVW4s/QHF0agzSpHpcUSbE1xE81JxJ0BkZ3IAx5/ntGPOzeiD\nJT4jx6b4NZHNYEIwyH3iUWcW2lynspDG7cFeF6eQkQ2f+zX+nQMtL+QD8ngDr+dkMnnpoFBJiTbG\nVRn7IMrE2+9/u3wdODu1jPLMKThXsLHvXFnH+3zbMHKPIMzl76wQ7WNR80URGQOeut1uAk8wRMvL\ny2mM40765ptvUl4y3gtbBYhl+ziLvKTEiJ2fnycGCcZnc3MzuajW1tb0+eefp636b731lp49e6at\nra20Q5B8SJPJRBsbG3r8+HGK9Xr27Jmkq4Xy4OBArVZL8/PzOjg4SIrLY17q9bp6vV5ix87Pz3V6\nepp237EDSroCUiS4PTw8TOObsUiM0tHRUdr5GN81HA7V6XTU6/UKOygBbMgQUAXwYoel97/3Wb1e\nT4CUvsAVurGxUdiqDkNydnaW+tF1FSASl68fZYPBkmNH3Z0ymUzSfaQiQDd6pnbmOzl2HJj7tnYM\niWjwsQgvLy+n7Pb+nRtlnljUM7TD0lIf+hvWhkIfue53FsWNUi/oCGf/XScCGHKuNZ6NDnI2y0GR\n60w3DqM+LJfL2Tgtl7e7Fr14HCtydkDM0TC4mf3gc57nTA2/Ac/uPeG+VzEygHlnwakfMqHe0bWZ\nY3xcp8d+BETlXJbUBZzgOjmGdPg7Z3leeF4uts3Ljbn24t+RzYm+TkpcfHM0Y6QcX1UPBzpxAQWd\nguy9XpEx8+flkLkruxy4inFSvuUcpQ1Qoi7u7nNWSnoZlLhcpGvXn3/ukwwQgO/dQZaff+TUqLMu\nTDwGJwoSK9KtgVmgxutOXSN4AUx5P9AGB1q+yLpcZk0s5DwroNzfkyuAXq+bM1goOOk6psVlR+4i\n4p5wRbEpQZLu3r2rg4ODQiyMA0lYDNxOjKm9vT3t7u4m96Iv7E+fPlW5XFaj0VCr1dIHH3yQsp5/\n/PHHKfblzp07hfQHJOF87bXXdHh4qHv37iUgQXwQGdidybm8vNTJyYmq1aqWlpYSuHHZkgRUuorh\nQn5cQxZzADzy73Q6Gg6HWl9fV6lUSvVZWVlJQMzjtnjXYDBITFG3203f+fEh0+lVHqu9vT1JV3Nx\nZWUlMWCueAkmb7fb6XzDmBsJ4O/noqEPms2mxuOxTk5OCpnbfUz6/AEcxAB7iusvNjDwOXLGxReN\nK+obj3rhXvQfAJV7ACjMgQj6kIEv+gSv49Z0dtjdePGZ7tpDNhQHY9Et5HPevRg8IxrlOUOQee9t\n8Lr7WkJxNscZGgBN/C664hiXXj9csLzb+xFdCIDN9bkDKorXjTWH+kc3mt9HuyOQ9M8iGHbihLbG\na/06lyUGNG1xeSF/Z55oUw60eVti/b3cpj+4LbflttyW23Jbbstt+V+WGz9rzz9zZB9jU/w+t/Yc\n4ef8svHZXtyi8Ngbtx6im83r7e5Ify/X5dgTvoufY8mxiyfGOlF8G67/dutAKh6D4vEN8b1uKbql\nhKWCLHiW76BwFoXvoEj53PvJ73Nrz/s6bvF1tixagN6WuLPQ5RDZJi/eTq53H7l/h5xol1tDtAmX\nAnL17+MOKq+DBwq7TGEkCLx1BqFer6e4qtFolCh+6YqZGg6HGgwGKb6IY2Cm02nabt/v93VycpLi\nmVZXV9XtdvXs2bO0sxD249tvv9VHH32k//qv/0pB8wSbj0YjnZycaH5+XicnJ9rc3EwxYGQzx2UU\n4zDm5uZSwPd4PE7PdHfQ0tJS2qVE+/r9fspaXi6XE5MkXbFuJIdkZ6AHeBM4zvNhndi0UCqVdHJy\nosFg8NKhrsyBXq+XXGacL+jZ351V3traKiQp5b1cs7S0lA4C9nHA4cfD4VDVajX1BSVa5BRYKbK4\nx7ntbimKu9LcVcf/uMR87rpudAaj1+uljPgwStQnZm93necsLjqLjSvO9uMuGo1GqS0xLMDnr8sV\nl6WzM7Sf+9zF58+mzh7n5euSewK4lrZEj4W77Vxf8Zn/758xT3x9I1Yq9gn9iEzZnYb7PDJyfiyO\n93OMp/O64bnw58VwGG+Hx4u5nH0NdhnPWr/xqsDoI1PWaHS/uw4jE+XF5ZbDHq8qN3pEjPun3f3E\ndz6IvXN8q2903zld6ILzv7kuuuR8wfVnuBDjzo5IK8b3xLpFwBjrzAGqEZz49fE+qNgczUm7oH5z\nrqp4X44ijyCGAc6ZTXyHK5CJkMsODNjIKSnqk6OUZ/nDXVFEZRgntFQ8UDjGUHkgql/n97vS9B1d\nsS0eixFpaV90AEiMG3fvlMtlbW9vpzQIfiwEu+iov6cVQKkuLS2pVqvp7OwsxSxVq1Wtra0VQDLf\nvXjxQtvb22o0GumsPgDR/v6+VlZW9KMf/Ui//vWv1W63k6xqtVoKIh+Px9rd3dU777wjSfr000/V\naDRUrVbV7/c1GAzSM5eXlxMgKZfLyR1B+0qlkjqdjqrVqkajkdbX1yVdufZIf1CtVpPb0OXmO8U8\nSL/T6Wh9fV2tVkt7e3tqNpupHaenp1pYWFCn09H+/n4hoH4ymajdbmthYUGHh4fqdDppASmXy8nV\niXLnSJpGo5GymhPk78HPABvGFAvZ8vJyWtzYoRmPLJGKqUgYT7jdoz5yoI9LzMeuv8MzYpdKpeQi\nnk6vM9H7/CIHF24fAHG/3087yOL5dcxfAJXPFeYkOy/dYMTtie7y+NAcwPG5y7z1sADeG12CLlPX\nDVEnMt5ybiDqGF2CXnJhA9GYm2WIo2/pNzc6yYPmIRjoYNyH/jmbJnzd8Ho5+ImAzceUgyXuibGx\nLovorvT+i/o3gpoYv+RGu6/VOdn52sp4zgE3xwe5ciNAKsfSxMBct0z43zvKOyAyQ5HpYtBHNA17\nQp1y90cg5ZM9x6rxO8eexMU0AjcUwtnZWWp73GLs9QMYuV/YZexWU87CQTYeb8Dn0ece5T2ZTNIW\nemerqK+DXZe5x1J4wK3X260tnp+bRMjMA1EdBPl1OYYTWXrf+I4mZ5m8n/jbF7PY92z5jkwZz/Zn\nETjLZ8T8UOr1ekoZ4IzHcDhMC2/c2dnv99NuMIKrHfCenZ2lNjabzZRYcjweJxas0Wi8xHL96U9/\n0ocffqjvfe97+t3vfqeNjY3ULoDX0tKSdnd30/EpDx480LfffquFhYWU6yla4+fn5+r1eoWFliSc\nl5eX2tvbSwHXFOK4Tk9PdXp6WkiTQODyZDJJweIebF6tVtXpdFQqlVSv11NcFuOh0+mo2+1qZWUl\nPRNZnJycpHgrz7NTqVQKgfLEsrVarRQXValUdHR0VOh7mCh0FIlap9NpYsSiweBxUNHQ8kLgsxuh\nFMaLL+LEyw0GA43H45eO1XHwHVmG8XicAu4PDw8TI3V6eqrRaFR4htfVAZ0zNj7HAIWA00qlopOT\nk7QLK7e4unziAcBe58hI8W5nYCILhfz8ep7v4DTKPzJMDrCkIoj0Y2Fi2zwONbJYvnMZ2Xl6E2cX\no25HbnE98fWK7xzY8T4/fskL66XLIm6eijsw4666CM6oO2kZ+M7X++hRoJ3udfD6OQDzdnnf5MqN\nACka4gJ3cCS9zPRIxQU957abBWxmuQn93U4ruvspChUUz6CIuxAc5efa4J+76y0OKLaokiDOF163\nrgAsUbFRVxZoH3z+fe47B4IRhfv1Dpx4Hp9HgIEyy2X7pv3xN+/LKUGvD5alKxXkG9NkUBfaFunq\naJFGCj0nZ68b9XHLN/YJ1rTn1OFzTjSHsQFknZ6eJpBBfT0BJYsf/7NhYTQaJYaL+2q1WmKTjo6O\nUp4o6XrXXq1WS2yW57uZn5/Xr3/9a/34xz/WgwcP9PjxY0lXLsFms5mykddqNX322WeSpHfeeUdb\nW1s6PDxUtVpNzIaktOtwPB4nBgr5kvxzOBwmMAjAnE6narVaajQa6vV6iSVhwTg7O9P6+rpGo5HW\n1tb0xRdfJNns7Ozo4uIiMU6VyvXhy2SY7/f7Gg6HSRa0fzgcpms9NxUuQRZ7LHuuI/AfEBcBGODS\n3Y24JH2MUdCdjDVP5AmoRj85WHB9xqYRd6XBXkS9B9gdj8cpU7zrb+YtTDWuXtrIgdy+GErXrj3c\nuJHl8va6cU2KCgwGX+SiPvWxn2PUXb5xIafkwjuiPkF+/n5n5HOAl7nv45428C4M5ly4R2StuAdd\nyLmu7vHwPHde3JXO3677Li8vC260yJC57vc2UrdYT38/oC6u2c42er7C+NychyquM85GRRAY1/FZ\nBMmscmN5pFAALpRIm8YFK+emcQCUE5yzBjlQwPc++HOupPjuyIJxPZMzUoRex1gPJpIzVQwyXAHR\nkpKKh9ZG1IzFEAdZHAw5CzEqsuhT5zeT1d1qruzZSs3fsT7uFstNagqLQW4iu8wcaPO90+LeB67Q\n3c0GMJzlKuDzCOhcbihkbxMWJM9nt5h0tWDW6/U0Xk5PTxPTs76+rkqlkoDU8fFx6mMWZIBE3GWD\nWygqNrKX3717V++++67G43ECPU+ePNH29nayvuk36Sq+olS6cvH84he/0A9/+MPEQJDZu1Qq6fj4\nWGtrawlwbGxsaHl5WZ999pnef/99DYfDxOgQq7S2tqbDw8PUTr7DeibBIDIlj1C73U79vrS0lJ7r\nQLRUutqx50zP4eGhFhcXk8uQHWZkDO90OikDOUAT9ypjsdfrJYYEcFyv19Mi7WARQEr2cgAhu6fO\nz89Vq9USy0jdfVy5geIWd07xs+jEWKiYxwlGjDYwj2CiuN/dRmxzx1UnXemdVquVdjQeHBzoxYsX\nkq4TwgKkHBTAqLreirrdx7XrBY5TYnz4fHNdF9l1xjR18bnhRxbFtcWZGP8OudBHrof8vXEdcjaf\n+nqKCPRWDBuIDBwAweOUGCscVwQ7ik5gjERGLK6l3kZY0EiCsH5hOMb1za9zOVCY53EtdYOduvvz\nXMe6PvZxE+uRc+vFZ7uB4t+9qtx4ZnMq68Aqgo8cEpzFTPh9lEjR+vty11McrUa/uwM9f68v8v5e\nB1252CGvq1sG1NkT03G9M1X8OGp3atgBDP9T3+hqcjlRd7daqR/KyJUcn/E83El+xEClUinEDxEz\nEhk03o2ijdZ1rj0RzDhT5f3p8oiWkPe1T1a3DmO/0W4K73TQ6xnFYZ+k6+SSyJkz1CSlWKbj4+OU\n/RoXU7/fT3ViAfZJ74uW1wVAcXh4qFKppAcPHujjjz+WJH311Vfq9/vp+IRarZb6kKzdFxcXGgwG\n+tOf/qTvf//7kqT/+I//ULfb1erqqiqV4nEun3/+uT788MOk2Gu1WmIr6B+Py8HNhpvT28N1uO2m\n02mKTSLbuqSUDbvVaqWxR7sZY+vr6+r1egl8SUouv/n5eT169CixMPR/r9dLR6D4+Ot2u4nFgFX0\nBKS4ttzNwndnZ2dqtVoFd5RUPIMyxxD5vHUm3DckxKNDYMcWFhYSuPNgesZ/Tic68EC3oTOYU8zR\nr776KqXNaDabBeDi9fF4QPSGMwPOWPFZLO7CkpSAqYMU1wvIl/e68edpW2AZeYfr3siuIXvYwMhy\n07bIcmF0U9/oofH+dmPZjWcMWHfd0z+AJdhv0ktQZ89BFw3I2BfULweGousuelsiGxX7LxdLyu+I\nA1iPXafTPuaCG9gRMM0Ct9Er5d/lvFqF9s/85rbclttyW27Lbbktt+W2vLLcCCOV84NH2s9RfbTK\npJfPBZrFRkl/OUaK/ymvYsmcLXHmxe9zRO/Pd1QeGTF3F/q9ntk310be79aCdL2Tx5/p97q7EuvE\nLbMY60OJKD3Wh3uwUN2K4L0wUl5XLM/4TH93jH2Iso8smltApdJ1+oH4XLfA6V8fa96fbpG5tUMf\nYtESB5Qbd24V81xYClg8/77RaGgwGKQz9Zyx6Pf7Bfrft9Xzbqx+2nN5eXU0DNv/+NEAACAASURB\nVLv5Pvvss+RK/Ou//mu9ePFCv/nNb9Rutws7AwlY55iUP/3pT8m19f777+unP/2p5ufntbq6qr29\nvcQQvHjxQqPRSN/5znf0+PFjvfnmm4VUEH4gM+d/IQuez/xxFwW76y4vL1MyTwLDp9OpOp2O7ty5\no5OTk8KzeD5xUCcnJ8lFSX0Zw6PRqBCsPhqN0u5EUkxISnFtHNexsbFRiO1jbFxeXhZciXNzc1pZ\nWUnsoo83DxFgfrubh0SMuNr8iCfGYHQPuWuOQHafF7QNViuyuDGmkLHIYc/I/euvvy4wb5eXlynj\nvVv73h9eN5cD7fTPYLDRXe6+jjrDC0wGzJjHllFc57vegrF5lYsuriuRifFnoveIRYrhFegw+j4y\n3vQFOoHvnYVEH/FsXzdhgmg/qXXQpT5unPmMupV6OKMY174Y8+WydsaKQqD5rF3m0W0X5eaeIR8D\nUR97G+J64mM/d6+XG01/EMGLL/qu+HONz4GK3MLl18RrEVr0d0dXTw6IuMuM4oPBO5L7ZlGE7Jbw\n2IroovJMuV5PlEKObuV6B6HRHeauPRSauy9zz+Td7pLzukXFIhUP8kVZOiBgkYnxaUxKJpaDRV8A\nfOAjU+ofA1wd1MZ20g+4LqPCwN3CIufyceULre71c1eMj3XiMYiT8bgN6sd5ZJ7ZnN2dBNxyFArP\nRG7UEeVDgHe73U4uPOKAPvnkE/34xz/WeDzW48ePC1n2J5OJWq1WYXz+7Gc/kyT97d/+rT7++GP9\n53/+ZwKKjKfV1VV9+eWXeuONN5KL2uPjut1uOryWQ3wlpYznuNsGg0FyeQK+CDp+/vy5SqVSAoTj\n8Vj1el3T6TTFJVFYmDmrkENfpeucR2trayk3Dy466kVAvR/Ci/tpfn5e1WpVw+Ew1ZU6jMdj9Xo9\nHR8fp2eurq7q4OCgoB8cEDHucDF64D/uLOaTZ8PHJco9vquJcToajTQYDFLby+VyOv7JA+l5JmOb\nnX2ckShduUSp997envb29hLIRK68Z25urpA2wtvJLlPGm8vF9RfzEiBExn2+o63+29vvbkZ39cWY\noXgWKz+5+BqKz21AbXSBUpdXHYTsYQQ8N7bHQWHuulyOLdf77qKkvnwenxU3UeXWtigPX/MiqKfN\nubXU18hIPLixG6/1+nBtdB1j1Hhf+3XMj7hevKrcCJCKbBPFhR3LXwJZOSbqVWDAhR0X4Rwb4u/j\nJ7d7y+vkHcDCnrvfF+b4DAdmOXYJ5ULOHJ/AXEPshreT9/tgoj7O0ETZuH+5VCql+JOc7GKOllm7\nHT12ivt8gDs4iLtpnNny4sCFNrkSnjUuXLEDcCi02Vk2f360RL3Qdx7z4XEVPGNhYUGvvfZaWoS6\n3a6WlpbUaDR0cnKS+ky63ogAMGDXG++LQNG/YxFtt9taXFxM+ZlOTk707//+7/rud7+rN998U0+f\nPi0k1uz1eqltZ2dnaXH85JNP9JOf/ETvvfeeHj9+rLfffjvFQVUqFR0eHmp7e1vtdrvQf8T/efwT\niv3s7EyDwSCBneFwmN5XKl3ll1pdXS0YACwcS0tL6Rw+gCjfVatVHR8fp5xcvnAAwFZXV7M7iZBj\nPJSbpKCTydUROLAX0vXuu/Pzc52cnGhhYaEQp8aGkbiQ53YzurHCoj4YDBLYQg4AeYCGz0tnjTkG\nB7l4igKPTYLRY74BtABEMOCMcQ+oJz6M+DL6h/vo8/F4nOLy+M5jhdyoof6VSiUlLfVzJt34cbnB\nTCMjZ8f8sxyz5HVx/YGs/FoK8qbdUcc7iIk6aRZ7ghxgbCIRwLNyejGuaaVSqXB+I3PBUw/4vdwT\nmUnaHdcSH2+lUqkQExfzlEXZ+H0RSHG950rz+/w53r/kP0PWcY3i+d6W3HiI5cYYqcgeOYiKSJXv\nvTFxh0ZO4H5fpPykIkqPwMgXzdyk4v/oTpo1ESJlGJkcf0cESx7E7c90Cw2ZudWAAnXZxOKDLLJr\n3o4I0FxOEehwTw7J85mDPs9cHCcoLgyXQ2RYfAL44M/JiHpGSjdn6VLf2CYOZEbGUtGV6u2Myozn\nO5CiXXNzVwcZLy8vJ3dSr9fTcDhM6Qg8WauzOsjJZSEp5aSKliesG4kw/Qy3fr+v3/zmN3r48GEh\ne3mlUklsCu0FxNRqNf33f/+3fvjDH+r4+FiHh4cJnJE4czQaqV6va39/P7Xv4uIiBYS7K0u6OhMP\nppSFF7AwnU5TmgG38gFjyARZT6fTAmDl3cfHx2q32wXGl0Dx8/PzQjbxvb099fv9BOaRnXQF6AeD\ngS4vL5PLlPd5biUysR8fH6f6EhhPADjtx40Ia+JuL85J8/HjDDvti/O1Wq0WNg9wuC0yLZVKaVcl\n6Upon+sBZLOysiLpWn+Xy2Xt7+/rD3/4Qxo3tI/rfBfwwsJC4aBiX8Cr1WoBJLixCVij3cvLy6lf\nB4NB2gzgoIPCAuqbiCjMcXeTIZtZBhj/+4acyMg7s+X9BLtHHzpzGg11b4Pry6gzI8ng/eZrIHKh\nbgSfA6Zc3r4zm3t97UA+zvTzXW7N8Hrm5OUGfJS3h7FED5DLgH7MuSf5cfZzFlDKAdJYbhxI5cAS\nf3tnOCDwweyf+//8nXtvfNer6ujv5D63TCMAis/09kj55F6vqrtfF60MR+xOO8f7uNcn8asKiN2f\nH4GGKw1KHNi5+jJR3RqIC0DOEgIkxFQFOTDF/65I+d9lG60l3s3kcmXlcgFE+ER010fsa28/MmH3\nFIX6AZhY7LDkWVA90Sk5lIg9OT4+LgAN2k1eJAABixPt8ASZsGylUknffvttSnopSQcHBwUL2McT\n7Ngf//hH7ezs6IsvvkgxSZIKC/Pl5WViQVjM6ANneZrNpvr9viaTSXL9eU4n3FbT6ZXrDIArXadV\n6Pf7aRcdfQWw63Q6mpub0+rqagGc8myMHZi1w8PDNKbYbk992IXqiw8A6uLiIoFBmEXGiOeBg7Vi\nYQNkYnS4BQ+w4jPfpQYAcZemg3Zip2JsGCwPdcoBCk5emE6nWl5eTiDIgfrjx491cHDwkoXP+Ped\ngs6kkXrAY6Y8/YbPRXdn0U/xIGf6zRdzxgJpMVy3M58B5jD9tG8Wy4PB6u2Nxrwv7BTkgpspMln+\nd3wvYyKn1328R1adfvb1wvUVLmQ+c93H/25oevt4ro8FXI8w57MMa36cdXNZ5tbVSJrEEtdQ3OAQ\nDuVyuTAOkRVj1Q2uWR4Myo259tx95MU7MS5IdFSOvXFGK4KzHGp1cBDBEt9Ht5a/j9/eDlcW/uP3\n+n2UHADJsVk5dsMRtS/s0rVF6otELlbC2+PFB7bL2SeuMz456nmWjN1SoN5Yq94uf2ccDzEFg/e1\nB3DGieHxVrHdLPTOOEUmh2dFxeegKrKHbgAQW+Z1ZSHAFePsCYkuWeR8PHi+GS+0ATZrbW3tpQWL\n3Ee0mffV6/WUUqDT6SSw8OjRI/3+979PR7y4S4ws5AcHB1pdXdX29rZ++9vfSrpKyPnWW2/pm2++\nSS43cgytr68XkpMuLCzo6Ogo/Q0bA2hyNxT9WKlU1O/3tbOzUwCgk8lEe3t7KpfLKQeU98/p6alW\nVlbU6/VSbNXFxUU6VoO0G34sC0BnMpmoVqsVguFZYKrVqlqtVqprt9tVo9FIwDfGazHWvH8kpWN1\nnFGLqTdgKXEn0vbpdFoINneWi884VodYrnK5rKOjIw2Hw5SGg7pwzXA4TGklGo1Ggammbl988UWB\nOY3jH+AQxzCLpqcxoF78jgHV7sql0KbFxcUCm039y+VyYuP8+VEfuJ7Luboo7gWgfa7P0RU5Q5a+\n8jABCuPU3W2xvr5eevF1ahbYQL+78Ukf0Rf0P2CRseRMofclbfF3+LyL6zHPiW3n8wg+vU2RJYtt\np66+5g0Gg5QPD4OD90W2zcNjcu8p1HfmN7flttyW23JbbsttuS235ZXlxs7aiy45qbjl05Eo1CwI\n0+nKiJBz1rn7cyND5MxERNZ+vTM3zkJFdyT1iCyNF0f9/r+70WL7Irviz3Y6lffCXBDP4Qh7Vp9E\ndsotIH8n/QFl61uTx+NxsuTdx4/FBXPjfeIxYlht7pKIQZ65MRNlwv/4+qPF5+xh7BtKjKOItG9k\nP93diwvMZe50+Xh8fcgorrVy+eoYHZI7eh80Go3CziTq6vFafvTIdDpN5+mdnp5qdXU1WZewVH4+\nn7sEkRf98/z581Tvv/qrv9Jvf/vbxLJQTxiX6XSqJ0+e6P3330+uld3dXdVqNT148CC51VzGjAt2\nE/Jetuefn5+r0+mksYUsSYwJ++IyJXM8FrufUUiMTr1eV61WU6/XK7hw/Pw2d7kgU+arxyzh8iNb\nOi5FSYnt2t3dTQcQM86azWZKIIkrC/YHGUyn03TUj+90JZi8VqsVDl5mZyR95pnDYfjm5ubUaDRe\n0jVY+hzl47GCyI34KM803+/300aCzz//vHDWoOvZ6M733WG5mB2uja50nussD8/2eCTYHK7Bvcwu\n11lsDnojegNiLA/1ZE1wN60XxpE/09/HNTzX3cRRb8d0D9ENi070NcjlzHfI2tcRZwvRYd5+7wNv\nB+uWxyH5dzyLMA3azXPdC8C7WfMdL3gfuPzi+3xNoO0cW3R+fp7YXmSLq9xZOu8Xdznnyo3HSPnC\n5YMxN9h84vgOM3+eDzCnKqNwfeLOKgxOFnJKfPcs12HOt0vxyeH1jT5i6pkDOu5C83bxXQRl8b1O\n10a/eK5OFAeCXi9++0Sb9Z23sVK5znROfXzBQBky6aPvmzrlQBZtdFBHvXLyczrZY6GQl4MlL1zH\nZIwuDL8vN24mk6szBQ8ODrS9va21tTVJV2Ot1+up0Wgkl59nRC+VSqrVappMJoXA4VLpamcZrqle\nr5dinXCF3blzJwVJUwAvvgGA2JMnT57o+PhYDx480N7enjqdTsrbtLS0pOl0qrW1NX311Vc6PDzU\n22+/LUn6/e9/r/39/RR8PhqNkrsQMEgQt8fPICMCyl0J12q1pAhZyPgtXcczLS8v6+LiojAPzs/P\n1e12E9B1fQKoJf5pPB4nQNjpdJJ7lTgij4cCfA4Gg7RxgL5/+vSpJpOrc/iI+aKfqPfCwoLq9Xrh\niJzFxcW06CNj6Tr4m+t8LuB+Qx+6C5b7PcM+4BOg7ptU4qJHigXGLMAWF9XPfvYz/eIXvyjMfQfl\nxKjEkAx307iuBQSib3gm+sLnl8vA527UGeQGI5YoGliMQZcr/Z0zkh0k+G+Kt99dl1IxX5LrYAcS\nHrrgxXVWdI85gHRjNxqOHmLgcyjKk/7MuUEjkeCyQQ/6dZ4LDpDHdRE8x3f5cyjebjeAaF+Mj+O5\nfvIHxgt9HGNdGaezyo3FSDnAkfTS5PJdbc5QRLTI4HOrZ5aCiXXIoVj/n/f5/17HaAnwfbw23h9B\npC+yEUhxr6NyZ8vi5z4QmdBY/R6zwOSPu7n83T4ZKLQ5go/c79inKAQGpstnfn5e9Xo9WYIocBZP\nHyteFwdC8XsmDXLwAEhn7mLxseH9y+TKxR9g5ZfL17lwYEpcJihNB7T47OkPtxKr1aomk6st9eQu\n8tgy4og8jsfL6uqqer1eSnYpXSmyUqmUzoaL89APl4bNka62+B8cHKjT6ejh/9nRxzEg7XZbo9FI\nKysr+uijj/SrX/0qtYHz8J49e6aFhYV0jIyktHWfmJxoffti1+l0EgAh2Ju+JUkodWUbPfmLYGZ4\nJgqzXC6nnY2SEgBhwRsMBinHFs+o1WppPLH7ENZoOp2q2WymYzh4X7/f1+uvv54OknZjDPah2WwW\nFmr6pNVqvcRQXlxcJHYIoOnjGz2ZM8zYRQeocJBFDBvv8DkOw8kBxC5TjIb/+Z//0Wg0KuSDijFR\ncS57ey8vLxOQnk6vg+jdoGQsejwl7ZKUGCe/L74PMO9B+sjW3+9AivfHXcC0ywEo8o5y8A0RPCN6\nYCgOUKKxHAO6I5Ci/own5oV/F4sDCNoZGfrIzOWKr7sRADrL5aQJ+jKuHb62u072z+Oa64SLvw95\nU38/gzC3WzIG2Odklto185v/H8sslia6Lf5v7nEw4f/783Lo2d/B9/7OCILid3TmLKZo1jt8Usd3\n8LzYabPAoE+sOJGcYp1OrwPzInjMBWo7xctzc4MxFpdLtIAdFMf6475hMfBJSpbvyETmZBHZTVeC\nUlFhRyvPxwoTOwaMswPF5e7bwwH8OWofWXhQPbvTYHVYAE9PT3V4eChJKdXAwsKCTk5OCophaWkp\nHUrKOIQFKZVKaZFst9sFWhowNhqN0s4uZMN5egCTqIiazaZGo5GePHmihw8f6sGDB5KUDk4eDAZa\nWVnR22+/rW+++UaS9NZbb6W+63a7Oj09TYxbo9HQs2fPtLi4qFqtVkgQiTtrY2ND3W63YD2zm+/O\nnTtqNpvprEHaAWODK40M5rSBnEjIzXNeAebn5uaSK4Ax0mw21W63dXp6qmq1mvrZF3wHh9x37969\n5L4k672kJGvqOBqNCocrw4wRkM9iT9A4bfCAfT5nUwGg2cegu0ApjDV3+zDWAa4LCwvJJeb90Ww2\ndXh4qMePH6cx4uwK4zsyHa6/+d4Xa8aNt5P61Gq1wmLnQIh5mtsUIqmQnsJ3s+JKY065Tndg5oCX\n+vE+13+uRzC2XP+7PGIoRFyrouHphjLGAe2gzrTH74061HeuUWi760fX0dLLa6n/xA06sR8ovuZ5\nyAxj0Osxi5HzZ6MDeJ6z2xEgwyzzGc/if5dnlH8sN8pIRQTti3kEKJ5QMqJzR/VxMeV+f7dUzBvh\nCJdrIsKN9fe/fWDm6sX7/PscUONZkcl5VT1dls66xIGNBeM+aWepooKL7Y1AA/lGpcgPLJJbH7CM\nDMw4cZ3idUXkTEUcN3Ey8z/vyQFwv86vl/QSUPLfcTcKypFrsHLoo5gIkd9RwRPDtLy8nNg42kuu\noqWlpcRmeH14Xr1eTwkfKQCtxcXFwjZ3j+/y3VaSkiXPmIFpkVQAH8PhUM+fP0+AaHt7W71eT51O\nR5PJRFtbW3r06JEkpV1xx8fHWl9fTwf1Ikd2WE2n05RLSbpypeFOK5fL6RgS6frIEel6dxdJO6Vi\nhnrkRjuWl5c1NzdXcDGwQMMI7uzs6PDwsJCvaTKZJFchYxR537t3TxcXFzo4OEhuWMBwq9VKQI++\no54wYIyZxcXFl2SDm1K6XgzcZc11PtaY39F16ZY+xwj5PHT3ui/OGA3Ulfgqxs7S0pJevHihwWCQ\n2Dj6h3Ea89rxXPoqzlfXa/QRdT07O0vPR67OWJCtHpYwB+oODw/VbrfTfaREmMWA+5x3wwRWmLa6\n3nM9lFvzaGduIWc9jADJgUJu115kI70f/bn0ZXRReh187Md1x3VnjMHiWg+HcD3Dc2gPrlpfj3yM\neHtyxriDb9fhs3atOyj2eqObfU3PvTeWGwVS0svAwhfZiJzpeO+UyP64cLhnFqJ06yKHeON1uef5\nPQ4aaENEuLHjve0OinJsGiXe6+62Wf5ip/ula4WCTzwCy1z7JBUUBrLwNmHFovy8Pq5QXKnEtpbL\n1/k9PKgWl5wHHEcLNgJQ6Gh3Fc9iF70OswAY10RXabw39hvuPMaO5yDCxbK+vp6sfZdNuVxO8Svu\nlkRJ+3EkvtUX8BHbPD8/r+Xl5bQVfG7u+pR7z8nD4k+fE6sjKQUj4xJ78eKFHv4fd9/nn3+uWq2m\nra0tSUrs1tramobDYXo27WPx6Xa7arfbhQBnWB+UI+0hfgSgSYA0rkaAFnpkNBolgFIqldK5hZXK\nVdZ1Tz2Aa+ji4iJtoZeuWBfG9NLSUkq5gNx3d3eTpdvpdNJ4gz1ifHKeHfcRXwWrR/txazK2PTga\nwJ0zPAEC5ATyBdENEs73gxEtlUopEJeUGFzP+ML1x5jwTPPkjiII3ecHLEXUP4yBuB7wd2RXuI/4\nQOLGXJ8AfsnHxoYE5Mj1zBt/B6DIA6f9Pmf6vS+8vq7znIVDJ3i6BF/0ud7/dtbvVSW6cX1ceCxf\nBHSxHyJIieuJ3xuD2ykuG/6OOtbf5zJ2Wfi6lGP4Zxm4Odl4/WcZ2G7Ax/v/Uh/cpj+4LbflttyW\n23Jbbstt+V+WG4uRihRg7nv/3691BBwtAK7nN6g3RynPctlFZO4WHc/IMQ/+/FdRuJGFi6xW/H9W\nPR3pe5yB3xfp8ZjUTSpSntJ1nEguWN/jAairU+peR7doYnxEdG1GOtzrQrwKrI7Lm+udiaFNLsP4\nTGfgIqsWrSQKrAL3ujylomXq7Yp18DgX6SpOqFwuJwqeQGLpiiHpdrtpezwuFuk6Lqler2swGKhU\nKqX4GjKnkxLB5w9xVT42fEyen5+nw2vZvYcs2NVG3BZsxvHxsb744gu99dZbunv3rnZ3dwtuz8Fg\noI2NjXRWn8sb90ulUinErszNzenOnTuJ/SFRpnTFkBwcHKRjSk5PT3V8fJzOW1tZWUmuoFLp2o1F\nXxADc3x8rGfPniWmq1qtpizya2trevbsWRrDsBvNZlOl0tUByeyE3N3dVaVSUa1W08HBgZrNZpqL\nsAqkYBgOhymRJzLe399PrkN2xhGAz7Eyk8kkMXkwh6QkINaKvvedZLEMh8Pk9pyfny88s16vp2By\nYsXoQ092yjzgiJjpdKovv/xStVpNm5ubOjw8LMjb52rOZebzJbIgzhI4kxePyYl6kp2TMX4M5oln\nxOOg2ADAM/jt9Y6bVKKXJecujQHV3p7IgsNGodNzCUFzepDfjDnWLpexrwHTaXFnZgyr8DahL5yd\n4ztfE3O6nXti7JS33XUy7BQyiOuxe314D/3L5zw3rmGwgjG9Q9zkEduRwyqUG0t/kBO4lE8JHxdo\n/yx2dBRu7HAvs1x+sZ6z6h1BgdcdheG+aQc2sU1xEsY2zHI3eT1iQWl4YHW81mUQwcirZDDr3T7Z\nAAVeUAg5ypjnRXed74gjZkq6jhXwieiBkF43fPDIxXeLvKrNsW+4z92ptIvJCfij/rwTdwlxRij3\no6OjlFH78PCwENAJGADE+JEZAEtkRDslFVIhAHwBZ6SUYIz6zqVKpZIWUwLPARk8x2NS6Ceu/f3v\nf6979+5pZ2cnZSiv1WoppmZlZaUQW3V+fq5+v1/Y0YSbbXFxUcvLyzo5OVGtVtP8/HzKFs691J8+\nAdiUSiX1+/10BpvH81xeXqbnHh8fpx2R9DeB7wA4z09EsHmv11O73U47+o6OjrS9va2Dg4O0K47+\ndfe5dH3AMeOi3++nLOIARklpiz7uW8Y8fS9dxZEdHx8nN5F0DVYYk65PAP+lUinlEqMsLS1pcXEx\n7QScTK6znrN7FODNzjyeu7+/n0DmwsKCms1mQd4s7tQvtzDGOQyI8LUBGY5Go/RMP42AMcz4xMWb\nc+e7PJApoQmz1iDXGf48gGckAZgjtNHbE3VUDNlAl8Y1g7lLmzyujXul61MafDMJc9/1ZE429LOD\nWIwP1x08YxaYQu/52aouHzd4S6XrHGLueozuthgDhZy5L24q8uczT2IcmceMSUU3I3L0I71iufFd\ne7kYmhzQ8EGSA1ZxMEjF42Z4bw7A5CZNrEeOaZrFhOU+i+xQrDuK3n3Ksc4RgLjcpCJ74pOWCeug\nwLct89utowho/F0OyqJf2weoD+r43BjMx+TOAVvui4faslA5WM69i3fk2pHbXejxBV6wVD0Y0Rkw\nt54cTPM9DAJt9P7qdrvJqq7X64WdRDADsBi8k/iQ4XCYApy9eAoDrHDkxgISGTg/Uw2FBcvD4ghj\n5TE7zWZTrVZLe3t7+vbbb/Xw4cOUNwo5Hx0d6c6dO5pOpwlkNRqNFFNDu/xYknhsCjFJT548SXEy\nx8fHiWEBEMG29Pv9FMDuCnY8Hqvf7yd2DmA3nU61sbGRFO7y8nLqC1IUDAaDtEvtyZMnkqQ33nij\nwJDBMHl/cTizB6n7FmxA6+rqqiSlg3cZF6VSKQGbwWCg4XCY4s88ZQjy9nnlgMkXu+l0moAymzro\nU98VR86u6fR6QwBsmHR1DiPzghgq2k/us5gnSCoaNdTLWXVf1H2BZnx6OoE4/svl8kvj1AOvc7tr\nHfR43zkwoES97LtnfQ2Kuj+uVQ4wYn5B5BO9JP5/9B44cZALnAasxu9harwdvivS5RMBLjokgqUY\nfO/P4p2+frmcATye+oj7y+VyykkWAZEHlOc2RLFueeyYfxeJEq6fRVhINwSkfED5YPTspt5RPtii\nK4LveUZOaLmJwSBx1E+JFsmsxTX3zFyd+Y6BkVv43Q3i9zkzw4DJWRBu8fn7Hb37wIkZdiN17O/1\nz93ag32h0Gc+MWPwJIrFJw3PdUsnBhACpFx2pE2Iz6H+uKV8h5P3xSwFFUEz32PNeQCoL5al0nUG\n7cjuucUWz/9ikrKzzHeYtdttnZ+fq9frpbP43K0wnU7T585yEczurtWYXLFcLqcdgoA0gqt5lp9h\nxnNwk7HFXrrOA4b1xvcUdtTt7+8nNoiyurpaOBDZmRUyfff7fTUajUKeKNyWa2tr2tvb02g0Su8E\ngJEtfW1tLcl0Op2q0+no6OhIlUpF7XY7MVmA00qlklg5ZONZ0BcXF/X48WO9/vrrqR3or6dPnxaY\nw/H46gBlEoE6eDk9PU1gD8DR6XTSdw52XMmPx+Pkfp2fn9fZ2VkK/G82m7q4uEjnM/pi5fmV4nZ2\nUkq02+0EopA3Z+fV63UtLi6q3W6r3+/r008/lSQ9ffpUz549S2kqpGvwS9JXNy4cUPlGDN/pyvcU\nZ914Rr/fV6vVSrnCuMfnsB+ejewAhQ5Ambe+9Z7ncP6gj1Ha4GsV8nLGh7ERQygim+5sHC5v9JED\npZgMljEdjTvXUw4OnFVyhg1DEcDuxp7rwmggUz+u9b+p19LSUpbJiyEYvtHC1wNfg319cIDsz6Rv\nc4QMsvNkrD7O4noRwXGu3NiuvRh/49RaZIn4zF0KFAZhrpGRaswxHfx4TwFGhwAAIABJREFUR8V3\n5wCa05l+X66O/h1/++/YbkmFTowskN/Pfb4TJL4j0pY8n8U0x9ZE+cUyC517O3KTfTKZFNiOXF08\nRoH/fZK7dcRCkdsGTHGFJb28y8Pfx3UoPt8lyL2ePdwXfe5nl5gr92jJuhUFyCHvj7sbut1uco0N\nBoPEiElKO9V8YWLxcmq8VCoVGAkSV6JM/H5yLF1cXOjk5ERnZ2cp7oodZvV6PcU6UZfDw0MtLS2l\npJIcaispHajL4gu44btWq5UWpnq9ntxYzWYzKXRX8NIVqCGX03Q6LWyHR8bsIptMJtrY2Egs2MXF\nhV68eKHz83Otrq4mpklSyhE1Nzenfr9fAFKeFPWbb77Rd77znSRn4qI6nU7qI9/tNxqN0vOk6wXX\nE46i5AFEDnrYoYdsAHMcanx4eJjaT16ti4sLDYfDgpFUqVzn1mGhhHGEjQCcEQ/H+K7X68kQkKS9\nvT39+c9/liQ9e/ZMz58/T/mmfDHF1c0C7s/w+cxY9QU5lriwMzY855VUdEMBUmmjszCeANS/5x2u\nC6NR6+CE/6MBFY1b7wv+9/UuhoJ4mhreDwvpMo4pGZwl8ne6Low6nOc7U+3tYc7yXGfbnY13efI/\nOjwayf5cYta4z+vlhjfPAtC5256x68asy5R+zK0Vvta7fo6pgnLlxoCUlA/qll52wVF8MMRANWee\nfAI6rejvcnYiMkQ+SF4lcO5zVOvM0V8qESjG9nhdIrijDdEycBk6tT8LVTt1THGmyQGdt93bn6Pp\nY3ELgwU8WiOU8/Pzgq88x+xJxVPX/cfllusLlDkTKvYhn3m8hLfP7/dxxP/R2vbvqXe04ACYABz/\nDkajWq0mECopATVXwrS/Xq8ni5Y2AQhZSFhk6vV6Yoj6/b5WVlbUbrc1nU7V7XZTX5DLqtlsanl5\nuZBpfHFxUXt7e0lxHh4eJtfe3bt39cUXX2h+fl6tVist3LSPWKbT09NCP3nQdMwWPp1OdXx8rHff\nfVenp6daWFhIrI90tbAzH8mvhdwGg4F6vZ6azabK5bJqtVqKS5pMJin/0NnZWSGZ6fn5uZrNprrd\nrjY3N1Uul9M5hH5kCcCQPhwMBhoMBqlPPE8YdSiVrpOpspisrKxoeXk5sYPOZLGwDodD7e7u6uTk\nJIE0smkfHx8ng4B+Ylx2u90CK8m4gMXsdruFc/+Wl5eTW5MA7idPnujZs2eSpK+//lrHx8eSrty1\njD36Siq6jaNedcPKmQc3Lp0d9zAAnxO0D9kDUnOMCnJ3vepMdvR8wFTA2ObcgrQ3gkBAkbuI0ANu\nOPmz/HnRoKUNyCgaks6qeH24Ltc+13PIis+cTYt1hX0GmDtrjj5wDwhAy2N3+T0LrPjnrl+5h3oC\n2kql0kuhILTNvSr+3auAlMd45cpt+oPbcltuy225LbflttyW/2W5EUYK1iYyKJE5ir7yyAL4fc7k\n5Fx/0cXiSJT6OOvi90dGJJacHzbXPv6HcvZ7nIVw+pPngaKjdUWJMTsxritHc0pK1rFb0FCZzvL4\nu5z6zvnpvR/cEuJ6WKbY97zLGTK/nrbkfO3RWsj1U2SW3MqLTJ67Jn3sRKvT6X7+90Nn3dqNY84t\nday5paWlAo1PQOVwOEzuMncJnp+fF8a0M4icNUe9vK1xhxdpDIbDoY6OjlQqlXTv3j11u920Uw73\nI4k5t7e3U3vW1ta0ubmpr7/+Ol375Zdfpu8fPnyow8PDFP/jcsC9R0B4ZJ5pC+ySdMXytNttDQYD\nVSoVra6uajqdpucPBgPt7OxoMrlOsuhM3ng8TiwLLkTpipFrNpvpuJrpdJrYHDK3r6+vq1wu69NP\nP02xVbANGxsbmpub097eXiHJKQlQOdAYeRMsfXl5qV6vp4uLi/RMguVxKzSbzUKKA5glSVpfX08u\n2J2dHX3zzTeJKXBXFkyRu0KZTzCc/X5f4/HV0UI+ZkjkCuM1GAzS+2EL6TPfKBB1k3TNMDBXIjtA\niUyLjw1SGLjrkPZ4SISvCc4I5dxMzhbFGBrXMzEkwtch1xMuv+jug5FCfq5r0E/+vwdRo6PcpZlj\nwSLLhHvcw1N812L0rvhuYK6P6yXXS9fZ4WFHeQb3+uYAnulhFc7gezs8VQGyQ4d6HxI3GHWe1zPG\n+VJPLzGeOHqtYrmx9Ac5msxpyjhond6MrjenLv077nFQE104PMMFGRfXXB2je8vb5fWMPu+cO4//\no9uMetLx8TtoTad5o5JAcUSXoVOYuUHigy0CFXff/SWQ6c8DZMQ4ghg46FQ3sSHIMbpgXe5RplER\nOQiKSpD7omsSSt4LSsTdePyNYnAglGtjrg5LS0taW1vTysrKS4dpslvOFwXmideVwnfEL7iSvLy8\nTC6l6ErExYbrp9Fo6M6dO5KUdonNz8+rWq2mLejS1fb3+/fva2NjI7m76OejoyMtLi5qbW0tHdfi\n7QYocpiyu0vn5+dVq9VSfJMbWx988IGm06sjbLrdbiE24969eylA/f79+wXXBGOBYPKtra2Ca8Az\nrzt4q1QqKYfUkydP1Gq1Ul0XFhb09ttv6/DwUE+fPi24KdgNt7CwoO3t7cIiNBgMUgyZL0CSkqsM\nl9vR0VGSHf2Fu3Fubi4BW462ITj+8vIy3YfbHBBFSg3+lpQ2Q3jwPkDq7OwsudIIaOcedE2cL/SX\nAwf/nODpCE5Y6Nyoc/cSICqmHMG1xFxkdx/1lK530cbFGdkChqJbiOvdFUm9GcuzAI0bkjzPQyti\nGINfm3Mj0r4oN+qUixt1d53Xjb+JL5L00noBsESWOYAbQa2fCQjIimttbhMWesANUJeFg+SYQgH5\nx91+Pi6p46xd68jK25aL2aPcCJBy364XX+jcYvfvGfzRj87fcXHyToiD1FmeHLqOFkKuxM53688X\n8pwfexbwiEyctyUu8pIK8UR+ve8yife6bDjV3QdVZG8cAPJ9ZFlif0Vrj+fE2AmXBbFUDGrAA/3n\n8kXeKP7Yxw6iHXSgPHJWcO45XqLypC4XFxfpwF2sLn8Gipd3+nfEBhHg6+OGOBW34KPVyPMiWCLR\nZQShi4uLmkwmqlarqlarKf6G+9bX1zWdTnVycqLDw0NtbGxIumI6zs/P1el0UvyQA8P9/f20oAMo\npKsYqadPn2p1dTXFMnEO3fn5uRYWFtJxNXHss5MNBoTFfnt7OyUcZUz6MTDr6+sajUbqdDqFJKK8\nc319PTFRvjhw7A1xMHt7ey8tYi9evEggjLl3584d7e/v6+uvv9b8/LwajUa6j12OvuuTPvdUErBH\nfDeZTFLKBMCLgwWPkVxeXk5B6hyTQ7vL5XICaHHRp89d5uyymk6nSd5bW1sp0H5ubk7Pnz/Xt99+\nW0gc64Ze9Br4HIxgy3VTLkbGwbPvBHTd5EYqC6TrCWekKBF8OFPO9TFInT6KAIT3UHIMObJ1ubtO\n9vscbM1i62gP7YyB5+icaIi6IRgJBQed/kyuYwz4e6Vr8Ersp7fD11NYRNrq60g0aDFMfSely8lJ\nFwwD2useAl+fXQfnxoIDuly/zio3mpDTBeeMQnR95QZTZJ18MMRreFdOqN7hOSbqL4GF+D11pSMi\nePF2+vu87bENgEe/JrbdFaR0rUxA75EFkl4+P+ovtYvfrkT8ur8kN1dw3hcsckwMduHwnQ/+XB29\nPpGKz4F2d+vNYrH4PE4o6u27jngmu/+cuvadNFyLIoqgrFqtprPMottlFv2NFSldMwnS9YLkIMnr\nNZ1O07l3npiRxXN1dTVlB8d9c3R0pNXVVT18+FDn5+cpaaV0BQYbjYbq9bqOjo4KFvuTJ0+0sbGh\n/f39lG+KOvf7/SSzo6MjVavV5KKinwhO9vY1Gg0dHx+n3Uunp6eFvEeekZ30AsiEnFGNRkOLi4s6\nOTnR5uZmeifzgZ2JjKl2u62jo6NkXVer1fTM/f19dTod7ezspN1HAJpS6Srj/GQySekTkDeygAHi\nsN04xp3RpX9xP7G1HBat1WolNyRnEXqoAAA6MiTMGZiDfr+f+tDPLWRe+Fj3TRi8L+fSY5FzfcOC\njqEUd58xD6LrKRq/OSOTOZkzsHie3xeZKN9oMZ0WdxtG0J9ruzPcABpf8+LGKC/oPndVcp/rxOgO\nQ0dQF1KyIA8HQQ5OMbwBRQ6IeP/i4mJqkwMR5rsf4Iz80PvIyF2Jrr+jjo8g2ccw7fVdlnyGfnOg\n5f1OX/hvD0GJfeN9MKvcaPoDKZ+cK9KZjlgpzjRxbw64IGyu8XdxTWRXfEJQfDJEEBffGRWT9HK8\nTGRWqAcd5laGA6bcJOUnunpoAxMkKk2UjLt2vF3ezthfOTbJnxFp2UgZu/sO8MQzPWmey4G2e2oE\nnxA5+tstsRxwncU00g9eF1eE9JUzOa7sooJ21ysLIDuwAFXr6+t64403tL6+nurqOxMja8Nht6QT\n8LiUSG/7mMKdwnURLJbLV0enNBoNbW1tpQSRz54908HBgdrttpaWljQcDhPImk6nGgwGarVaWllZ\nSRmupavdZ3t7ezo8PNTe3l7haBVPM7C4uFjY8eOU/+XlpQaDQTqS5PT0VPfu3dP8/LyePHmi7e1t\njUajBCbm5q6OV2k0GmmnndeHfEcsHN7HAB2+Y6EiLQI5s0qlUooJq1arunfvns7OzhKIoQ/b7bYu\nLy9TXJK7t9F3MG++MJRK11nbkTHfjUYj1Wq1lJ9rd3c3AdDT01O9ePEipeAYj8eFQ6JhTJlTOWaF\ndzmT5Qu4J22kPrTB3VWMMfQkfZljA3yR5XPACzu//D76iXnncpvl9YgsSGTAoqsoMkNeT2f/3ZCL\nxm7Uy67TXDY59xzxaPzv33G9h3fwnR/6LhWPtHFQ5M/FMPA56O+gnbwvxlnSB143xpqzRN4XDvD8\nN/L1dcRBDr9ZA/jOCQPGm6/B3t4IYn0cxnq+yq0n3XCMlAtHyi/S/O8LofQy2+RsQRzgkYWKdWBg\nxIWYEpE59faO9Pf5AMm5seKE4V5AD+4G6eWtns5kAECov8fm0C6PDfEFwwdTjrqMiiNaVTw3IvUo\nC6flkQvWq7s3PLeUTwwHq5EN4kw0+sLb4vWgbx2gM54iaMTH7+3zc7pcBg7+vF1cc3l5mRYiLDIH\nMCxu6+vrWltb0927d9VqtQpxOTArvM+34RKYPD8/n7b3u9XmBoQrTrJrw2R4WgHaDuDb3d1NeZSI\nwWExJiGqdJ1p++DgQLVaTffv308Le6PRULvd1u9+9zv95je/0d7ent566y1J10wHBg8uG+pNIkT6\nhe9gVQ4ODnT//n21Wi396le/SvInMJtUDp7G4ezsLKVmGI1Gunv3biFLfrl8fUwNzI10HYD8/7H3\nJr2RJdcZ9psDp5yYHIs1s6pbPbpbguV2t2XBhlcybMswDHjjP+Bfoj9hLQwv7YU2hleGBcmyBQuW\nujWrq4fqanYNJItDzskpM78FvyfyvYeX+gADH8oLBkCQzJv33ogTJ87wnhMnOELm+fPnunbtWppf\n5rXX62XQquFwmHgV+nkZg9FolMoODIfDjHyglpTnW9EvQsHUGdve3pZ0nlvFPLJemF8M1tFolAw6\n52EQqfn5eXU6ndRPakjxHa+6Lk2rsEej0NcNfEe9sLzGPNAfZFrMu8LA8qOY8iIH7nj493y9u0Mb\nnW3+J18MY8EdZl9zUe5LU0PSc6/4ftRR3m/KYWD4eA4X33MHmXdjCLGuvOioy2r0jL/bHVZHcV0f\nRPQvT3/QkLNuYLpsHo1GF4w8aZpbSl2yKBd8fhwsoXnZC3cQ3Dj3vjugEJ9Hvl2e3k79vvTKVbtq\nV+2qXbWrdtWu2lX7re2FhfZiTBQvn8/ycm8us3zdMnUPK3oljjzkvdc9s+jhRE8DT8bvc+jb/6YP\nHhbKS8b0xOIIDXtIzhv9AK3KO/QRjzYvHOf/e5/IwYgoHda6JyV6rk+E7/E2yGVxyNmRFeYh5k9F\n7895hmt58+vzEceK5wV8DO0ZNyEmxu4oHp68lx2ABowbL8w9R57n26qZy3a7nRCMiHKenp6mzQBx\np8l4PNbz58/VaDTUbDbTLi/6zI9D7tI034Px+hhHo1FCB4H5QQnIX2LnXa/XS8jN4eGh2u12Qis+\n/vhjbW5upv4sLy/rzTff1PHxsf77v/87FeV89dVXM4VGHVVrtVqpTADhS0827na7Go1Gaadhv99P\n+U+lUimF+hqNRgZd4UzAcrmcDib2eSwWzw8JJgne5Qhz0Gq1tLy8nEEj2IHHcS7saINGVKSuVCpp\nLsjfAl1yFODo6CjDl9IUkQINJFwI7aVzhJPiqCcnJ6lgKWOgsCqoMHRhbguFggaDgVZXV9OOTXjJ\nQ321Wi2Vm2A9sCvw+Pj4QhK7y9cYwsvLbyI5n2seTnMZGWW1py04isM4kGFRXvBc+gPyTp8IZ0EL\nR5cd4Xcki2f5mqbx/Lx0Ft6Zh9ZEZAu5m7eVn3yhvBMPGKOnD7jcjkg98iymvXjOKYiPo/eMg3fF\nyuesOUeP+Bv9R64f7/PfzluuS/NymgqFQioZAdrL+JiPiEySo5m3EYL2Qiub+0RFZZgXxvN7vTk8\n6kS9LByYd837gjL1PBoPsUVF6VClh/5iHz3c5de9pD2Lwhl/ZmYm1X+Ji9H74QvaG4ImCo4ovKLh\nyt8xXMq9LK7LaBgNKRcAvqsGgXd8fJxykrwPeVuVoRswLcnePhfQLxrP0ZCHhtI09MGzyuVpzR/p\nXLmxUH1buYeX83InPNRBKIZ8H86oQxF7Um0Mx7oQw3De3d3V8vKy5ubmUs6Sh0ViCLZQKKRjYhCc\nGOCEBfr9fjKgyGfqdrva2dnRcDhUt9tVu91OFdEHg0FGIEtKdaSq1apu3Liht956S++++6663a4+\n+OCDNBe/+7u/m+hNjRtoTV6UG48+9kajoXa7rW63q+vXrydDo9VqaTI5r8x+7do1NRqNFE4iVLSx\nsaF2u62zs7M0RsJwGCrk4EjTXXDkt1Wr1Qy/nZycaH19XZ1OJ9WEct6gDpjLDK8eDq9ieKEA4VUP\n73h18Gi0HBwcpPAeBwjDQ7VaLSlMjDCMTww5Qm++K3MwGKTSD5PJRPv7+9ra2krhaY6iwSnyDSMo\nKF93zivufPm698R/1pobC8hiD81JyhgnGL+eM+gyNG+9IpuibMOBzHNm+U7si38HI86dF5+bPBmH\n/I+Gp5Q9DsodTO8jz3TZ5/LOaU5/XIchAwgzuoHiuoA+Rb3ndKNFh93H5M+hb8PhMHNMF9+LBhXP\ni8ZfNMCgqYfMXaZ6eJMxuKGa116IIcXCcYZmwl3x5RlNlz0rzwDLQwz8f97B4nHvjwnweiVSdicB\nfY3GGoT3Fo3GaCzEBFC+S2yYvvg1mD3G+Z0u0JUfZz6PCUevEPrljSV6UZ4/xf9R6UNfvuu5EG6w\n8l0/c8nRPV/4Lizz6O3NFwbNhQ3KjIKYfM4p99K0TIHv4PFFSt9Go+mBsp4AC+JCfhHe/a1bt1St\nVtP/PkZ/ni98+gfNdnd3tbi4mIRrr9dLAjFv+zD5McPhMON50zyHBcTm5OREvV5Pg8EgGfa8n2NQ\nQCZmZ2fT+XW9Xk+7u7va3d3VO++8o69//etJAf/sZz/T+vq6ms1myvXy8/QajYb29/eT0QLK02g0\n1Gg0MsebzMzMpOdKSjWbTk9Ptb6+nnK2QBzhH/f6QbLckIwHLC8uLqZEbwzpo6Mjrays6PPPP9f+\n/r7G43EyzvDIQb9w0uClSqWiTqejk5OTlCMHT/nxJ2dnZ5mjXsi7QslCm263q0qloqWlJQ0GA43H\n43RftVpVv99Psuv27dvp/na7rVu3bqUcqclkkkE52RVZKBS0sLCQ8uLo69nZWeZ4p+goueyK69Ud\nzagXaKenp4nelL2gxXIK8H1EtZAD7uS5HGAMHnHgmTH6gHInh4bPHZHy8UQUzJ04v4f74jvdaHeU\nnmuelI7ecBnPNf73HDeeyz2+s4659PpcHiXxvvF3RIgYX5TD5fK0Ph5zLCmdFdnr9XR0dJThq3K5\nnDn4PebpOu1wJPk8LyGfZ/qZgP48jyxd1l54QU5XRLHD0RCKVn7ec3wSY2jPmxtWIAwIG0dA3GCg\nuVJyJc67Y7jJ3+fM5EzMJMbdML5DAi/UPSAWKko2IjKXIXL+WTRE8CJ4v9M30t7H4EnoGIbc77tB\nCoVCStiVsmfGcTCvJ3g7miVlC62x6NmdRPOQLf/TfGHxf9wCTL88JEphRTd0HBWAlnNzc6pWq7k7\ngkCjVlZW0m44BBhKcTgcZubYlYDzcqfTyQjU58+fJ/QM2hBm8uZnUFH6gO9Qe0qahlxdmZCYPplM\ntLW1lc5a29vbyyjVQmFaXHJtbU3r6+v6/PPP1ev19M477+i9995LY/joo4/0+uuva319PbPz7tat\nW4nH2GVHTSsgen6q1WqmmjjoCYVHZ2dnk0G4vLyc6metra2lZ9AODg6S4u/1eokX6/V6GhPolBsN\njx8/VqfTSYg28gQeps6To06gJZPJ5EJtKuiIMejXUD6TySQV9GRdLC8vq1qtprAeuxelqYIaj8eJ\nV6DL4uKiSqVSQis9SZnDqOFJisYiJ1izzj+OADhq4vLGx+jKj4bB4uuB3474ufJz58vXOPd54nKU\nfW7E+qafKOejPCHsGGU7Y4qVy2PDOOE6eiAqf8bgqQk+bu+r6yEHCfh7OBxmdG4EMByRw/Fyg42G\nER3RLZ4BAkoo3YujuhM/Go0yjgmHbiMTfC4uAyy8FAwFhGNqBjrTZT3Og28K8qjG/0lEKioh6eKO\nvTwDIFrs8XtReUYUKvbBITtnKPcY+a4r6Rg6dI8xD53y/x069cbkgWK4IYXgjQuL+1wRRzpGQ86f\n63TLW1AufBwh4zs+bj5DaeWF9tglBs0Zhx9jUSwWNRgMEvrjyB8C67LcABeQ9Mtj9pE2eB+E86Sp\nUef5OI6UORSN8PP5K5VKqbhiXHiEJ/CSab1eLx0Ey04zlLCjt3hhLqyOj48TD5ydnSWlyMGxfM/7\nipD2XWhueJ+cnKSjZUBnaDMzM/rkk0/08ccfq91up/ltNptaWVlJBwR3u13t7u5KUipU+fLLL6tQ\nKOi73/2uvva1r0mS3n77bf34xz/O5M8xBs/LefLkiVZWVpJBACJWqVTS4byLi4uZStuHh4epLldE\nOqgGHnM2MIjn5uZSuNLzi1Bi9Xpdh4eHicbj8VhHR0eqVquJDhiE5AdijOzt7WVyvQhZYcDAN5VK\nJYUoaV7eASQjOhGEZlutljY2NjQ3N5eKo8JHlUolGVv0BZStVColQ5F1we5Q1ujW1lYy4JgrR2Th\nL+aC9XuZIQEPe9X734YoeJjPQ3fcVywW09qgf1L2yBKfU2kq0+C5mP6BDIGP3Mnk+b9Nf+UZPN4c\nwY7ymXEyFpe/7sBznSgL44sABQ6f6ygv4hlRGZ7jMuiyCI8jWh7xkZQxbAgjM1cuF3mO53rmyWHu\nh0as30JhehwN8oSadfQXR5LnnJ6eamFhQcViMcOHkRZ57YUhUlJ2e6kjNlI+9HvZ/x7LdCMEBssz\nplxxwFS80xPOohGH8OVaREriePIQNg+5eYNB/HMWz2WIHEzkXkikny90f5eUzZ9y2lzGOL/Nq4qG\niqTkwWNYuEfkW7lRNggSlLd7K3nGCc+OxpmP0ekkTQtXYvR55XA8E2jgiA/3AS37O1C4hAJdUDnt\n4MlWq5XGuLKyorW1NXW73XRkB7TEyKMf5JHRH/rEewhDgSwQKot1bwhpoeTceAZRYS5JKB6Px3r0\n6JEePXqk1dVVvfPOO+mZGF+DwSAJS0J70vmxJT/60Y90+/Zt3bp1S//5n/8pSfr617+uW7duqdVq\naTQ6TxynL9TNarVaSfG7x8r5dZw/eHh4mMJpHJ3C2LwcATWYSqVpFXFQJzxnjFQ3vqrVqqrVqkql\nkvb29iQpjRGkgxy6crmcQolSNjG20WgkgY6QL5fLKemeNeDH1BDm8/XN9v88VF6S7t69q1KppKdP\nn6Zx0n8PbRLGrdfrajabqWJ8THyHN7rdrj766CNtb28n3vDk7rhJhfe5Yo/IOTSYn5/PGKfQlnn0\n8wvpG0fe+FzxTi806++Dbi7PovyMCdXRAXaHBqM2bsN3Zw59EeV4RG/ynu/6CXkdoy8xlOp08Ocy\nP25EQn+nNXSg8VnUGc7bbigxNz7XbpxLU+c30gT0i/C56yScAfrAdfrGuNA1rMNqtarhcJjOksT5\n4X0e1o3AQp7u9XZV/uCqXbWrdtWu2lW7alftf9leaGgvxsOxTGNYioZlmBcuk3QBXuX6ZSiKow6e\nyItHGa11aZpwnLc7w3dm0A+3onkm34khNA/rQReS/AjNxHi3NPXOYsIl93uOgickYrF7aMPv9/F7\nWMjzFfx7oB6Mzz0sL3LJM+KuGI7RIL4tKW0/d8g5eoqEqEBhoI3nUPl8eg4KeVuOnPnWX0/mhWY+\nB5E/Yq6df+4e29zcXPKUlpaWtLS0lLbF+8G+HrpkDhw98d03Hgahyvh4PE4Jm3yX890Ijzl0DX3J\n83Ke+eEPf6jHjx9rc3NT9+/f13g8TqharVbTvXv3NB6Ptb29rQ8//DAViGw0Grp586YODw/161//\nWp1OR6+//rqk8519GxsbOjw8VLfbVbPZ1N27dyWdo1i9Xi+Ny+F2UEOOueEoHBALwqP9fj/xFfNP\nAje5dRzNAo3L5bIGg4G63a7m5uZ08+ZNSeeIzWg0SiECCpTS13K5nBK8fU15Mj+8xPomz4jQ7tHR\nUSZ3rlKppBBj3GzgoSA/mqNSqWh1dVXHx8c6PDzMhGZBNiaTSSoNQa4UoT7CIn5eoKMGBwcHevbs\nWQZ54JxDD+nAUxQP5Xnxe47Qe0jQkVb+j6gL93jyM5EJ1qnv+IKXyOmKyc+g1NDfdYK3y1D5PJkY\n9VhMGne9wXOZ7zw94zoNGefj9wOkY46rz2NMRaGhZxw5Qg64TPMTeIfZAAAgAElEQVQUiUKhkNn4\nwXPJDeNdcTccz/Z0DX5Dk5mZmUyJEs+Roq8+DsYdU2FYn41GQwcHB5mdxsgEohuMA3qTunFZeyGG\nFMrpMqOD/z38xvUYMosQZzQAYj6T3wdjeE6P95GJcKPHq0DHfvLMvFCfK3ZPTqTPl+UnMR5PRI2L\nzePgzuRudPhhm/78crmcyTuBVozDv8dvTxr3PhMKYes4Y3aa+o8rG3YkUaPJc0HcQPSQKELZ87Hc\ncKFPvguHZ7oBzmL1a7QoZKELMLXTF8FLnonPh4doKYWAUV6pVNJxHhhTHlpzge+hamBtpy3t6OhI\ntVpNMzMz6cw4F3RUDC+Xy5mzuNhtt7KykujKmXGtVksvv/xyygH65JNPUj//+q//Wt/85jc1mUz0\n7W9/Wz/72c/S+7a2trSxsaHr169rdnZWn376aXrf6uqqms2marVaypnAEF1YWEjG7vHxsRYXF1MY\najAYqNPppHDY8+fPUyiAeTs5OVG1WtXCwoKePHmSqpC7oGXnWwwZd7tdNRoN3blzJ/EpBomH3uAp\ncutI0vZdqXNzc+mYGkK3zl9UT2fsbiwQNiL07ZtAMIow0n2LOs5Ds9nMhDAItUwmE62urqaDpKEp\n/YG/WWvwt6Rk9B4dHaXnDgaD5MwQBvPQPXK2UqmoXq9nTifAWYoymudgXGHgMYe+/d/5P4bnvOQB\nOoS16u9zY47m8pK1F0M9Hg5i3vLe50Yj11y+eGM80RBivA4OxGcQBsbZyDNIXZZAN095iLlVPge8\nz/O5XPZQwZ5rrudxeumnb0iIvzGS3NihLxFEcAfa6/ThFDLuYrGYSs8cHR2lXansRqY/MfE/LyTr\n7YWVP5CmsWU+c2MoWuBuIPl33cJ2tOeyez0eDaFhnFj+wBcgzyQJ0S3heK6Rx8TdMCKZj8mPcWhf\ntO4JOnLkAt8RIJ7jaBFjcis7Wu4umJyJIxoXc6rcKHNDB6HIFnNPOK1UKpn6Hb6gyI8gNyXW0eIZ\n9IexkBzoBp40PYeLz/0QW77vgtvH54LGBS3KiyRs956ZGwS078LkXkfEXCnym+e6kVmpVDJCGCEq\nTTdFYJi5cVgul7W9vZ05FobGdmKEjOcPNRoNXb9+PR0KPBwO9cUXX0ialkTo9/s6OjpSp9NJ1+7d\nu6evfe1r+uyzz/Rf//VfevDgQSYBFPRnc3NTZ2dn6Yy6L774QpubmwkB4+gVaXq4cKfTUbVa1eHh\nYernwsKCer1eMmYWFha0vLycFDv8B/38oGTnW+bX+ypJL730ktbX17W7u5sM3mLxPHEVXn769Gmi\n69raWkIbG41GJgF8PB6r1WppZmZGzWYzs2YpU0GJh9FoenSS57dAQ+dv1kChUMjk0LCearWaBoOB\nFhYWUu7c4eFhQhpJuPdSIyA4c3NzCZVi7unHRx99pF//+tfJGJfOdwqWy2W1Wq0L5T0iCu3rwvO/\nkN+OxtMvvutyEdnA+o1OKHI9yjzkjxtqPB/+QH66QeDvdkXrxmNMRI9Ik6N4bpAgV6MT50rckRzf\nwYlOcfni80g0g/FHnRbpTT5qdL64lmdw4pgwFjfc3RjyvkAXdKkbVjEq5QBFdCjcWIJezCEODs8g\nlxEnAzS20+loMBio3W4nECCijpcZvdILMqQQbl4dGqXmFnQ0iPw3zY2qy8J3Pil5VqUbINLUyMqD\nQ52JokdAX9x7YUF5COUyWNhDV+4J8ByMg7goHLXxcTBmxuf9QcAgkECSpIuwebT46SOM70JiPB6n\nitNUUJay9bAYX0wM59BdR8mkqeERz4byMGM81JR+etjTFx8L2o0f5oDfEYJGgLK7LhpZ0FVSQpbc\nS2auKpVKpj6V0xtjMHrJrJXRaJRJoHXUz2FsSaleEPzoyCOKhERdlB51jIDSt7a2MrA6zxiPx2q3\n2/r4448lSf/wD/+g999/X9vb2zo8PLwAxX/yySfa3t7WX/3VX+nOnTsp7Lezs5MMZ3amgqy4MUsi\n/r179yRNE0cRsLVaLe06o62traWk8MXFxTSPviOS/32HIyjZw4cPNRwOExK1vb2dDKnBYJCSsuE/\naludnJxobW3tgsFPmNWVQqVS0XA4zChGxo+xzvNdLrC5wQ0FD+d7EV+MJp7dbreTMePoAcU74Tl3\nYpCjn3/+uZ48eaLZ2Vldv35dd+7ckTQ9KJn3sUNXUtrMwRp1p4KxoUzdWMI4gM99rKyZPAeTPvj9\n0fHOQ3s8zCZdlKWuk1wnuEEQ54JrbuRFHYSh5P30Z0QjC3q5LnFjxdd1XuiL70cnAoTb6Ro3C3na\nRtRjbvC50wK6Sd9iaR2fozz9nocGuc73uYevmV93LpGrDqLQz8XFRZ2dnWlvby85kI6AeXQpr70Q\nQworGQUhZb3DSNQ8WNIVTbRw/buOcknTxQETQtTotdBQwu5d5oWrGENcsL7wfBE5IzIekB9nHGcY\nqj5HONQ9qZjP44apI1NUTHakxoWU09f74yGFuPBR9qAEvssKo8qVZJxff5eHEjGGIqrGIuH7cUGB\nHlLMLQpI6O4GJgsQNAuEjO9HWrkwdto42sd15/E4RvjBjQefY//tUHepVEo09R0yGKSE7bygHcq7\n0WioUqmo2WxmtrljdJ+dnen58+cJIVpfX9f9+/f19OlTPXjwQF988UVCqbrdrr773e9mlCdCqtfr\naX19XX/yJ3+ibrer5eVl3b9/X9J5uHB7ezsTDnNjYX5+XoPBQKenp1pdXU3ywncyIRN6vV4y9Kgb\nhWEKX0rnQhM0HBkEbywuLmp1dVUffvihtra2tLe3l4wxPHz3bL2AYK/X0/b2tubn53Xt2jXdvn1b\nklKxVZDGWq2WUSz0BYONNcOBxI4MOArhvO/PPDs7SzV0vCSH8zwGOAcsQzPCUp6GQD/ho8XFRb31\n1lsqFosp7Ht4eJhqXVFSwQ0RR4fc+I9r3sfqDnUeCkLzQ5kZY8xDik65p1e4PPdr0RH250MTb76m\naY42RV2U56y7Lotz7LqFZxP6d0cRmmBQudz3vkJj+kzJC0nJUXCZS6Qhponk6U9kH+MnxcKjHXnp\nON6vmEfnY/P38CNNkTGMwIjSQ2MPl/uzrl27poWFBbVarVRKBRpFw9rbCwvtIQTpHLA4CEqeZR5D\nTnzGdy6D3pyJnInzlC/XWHwwsDOvCwKfKDxNlJ0vYO7xYmSeK+EG1mUT5go3b4x+rxsXLogiIsU9\nk8kko6QQYpFGLtjwIFGAIFInJycJteFeknqHw2E6Sd5hW0e3HCEhbIWX7bzB3EAT7qFRP+gyHuCd\nk8k0eRyli6HpwtJp6CFO5wueQf/8fkcb3SCiinS/38945v5OTwz1CuhOtyjs+RkOh6rX68kgWF9f\n19LSkubm5rS8vKzV1dX0TN5Nrlqr1UrXqIL9+PFjHR8fq1arZbbv00/6Tu2ir33ta/rnf/5nXb9+\nXd/61rf0wx/+MOUr7e3taXt7W2tra6rVahnDnjPkMIYoh0A/q9Vq5oy9vb29JPxWVlbU7/e1vr6e\nBCcGg8P/rAPmaW1tTVtbW/rkk0+0tbWlk5OTBP8fHx9rf38/vcP5u9lspurr7XY7U3/r7t27mpmZ\nUb1e19zcXKb6er/fTwavo42SEj0KhUIyqHi3r+3Z2fMzAb1ulTRF/t2Bm0wmqlQqiWcoc8A1UM9S\n6bxWlssitq5vbGyo1+vp0aNHevjwoSSliu2lUikpYS8O62vC1wUKDKXNPHMfSiwiNr5+ovHiKLEr\nY5rL8+g054X3aK47ohGV1w8pq1Ni//yZ7tzRMHrduPNnIJ9cDvh7MaJ8vNzrBpwbGsgRZDL85jly\n3i9ohV5wB8XfxbWYfM+8eppFpE0e3fjfIz00dBkbHBxVjYac1wpkLeGY8pvNJXnzm/py6ZWrdtWu\n2lW7alftql21q/Zb2wtBpGIYRTr3EoHwpWyYzi3ePGuY7/O9COu6BexQpSMMjryAfLi34Ie6ekJk\nDHvRp+gh+PvjAYz+DPrnniDjImnav4MlHT2bGELKg7h5LiiEJ56DAGHNx/FyL3A/38HjYOs176PE\nAIm0bFmHbtwbkzNp9IW8J/rioTkPJ0lKOQI8O26rZl79fXg5EeHxuQHBil6mh1N9bqWpxzM3N5fQ\nSMJpq6uraVwk3Tp/gyzxOeE0kCLfbekhI77f6/VUr9fTdn3QJGjFOXHQrNPpJN4i1wKeAtUajUYp\nX8jnIo+XqdYuSd/4xjf0y1/+Ml0DceQ+RxUJNbAb1HOZOJvPER08UNqtW7cyyKgjJLybfuB9DgYD\nPXjwQHt7e+n577//vqTzbf/j8TiFNwqFQipI+ejRI62srKQDmmdnZxOtoNft27fTGLwEByUH5ubm\nMoc2exXpwWCQCZl4jhDoIfME2g/qRC6gNEVx4fFGo5HhH/hyfn7+QkHRfr+v58+f69GjRyn06QUN\nQZ3Zru6IqyO5jojQN5DgvDwZR2Sct8bjcUJdPKzGu5ALLhNiuDFuTuJ95MS4fnG94iiP6zPChY7y\nECL1SEecQ5c7NO5hw4HrLniDNY8c87Expx7qBIl1FNuTzPPSHqTz0DU5qjEq5KkXrGNPGifKwPOj\nXM3Tz9I0r5aiyR7S4zehYPrNOgDh96OaYi6xj4WixZ5o7ykdHvbMay+ssjlE9wUuKR1NEaE6Wh7T\n+zOjoRCNMY/d+gKI7+NZTFaEuJnkmAjnCXdepZi+8jyH9z0/wPN+fFw8mzwG+uGhIs87wvCAKaBp\njEU77X0MMHDMEXADggXt4QgXFJyTJCmFH+iPjwNY340dF1okqca5QKD5dQ8lukHk4QVXpIzf+YT3\nRLjZ89ycd7jP5zEaYhHCdv7D6OHvvIZwZ2cfY4SWsb4ZCobkXg6klc75qdVqpcOSS6VS2gZ869Yt\nzc/Pa29vL13rdDpprJzfxgGyvrXYeX00GiVF/OMf/1i/+tWv9NWvflXvvPOO7t27p0ePHiUekabJ\nnp7PUSyeJ0k/f/48JTcj3I6OjlLdKencAKrVaommCwsLqcYU+W5u2LlQ9TDUb37zGx0cHKhUKunj\njz/WBx98kIysV155ReVyOdHl5OREL7/8sqRz+H9vb0+dTkfb29va/H9rbdHXR48e6fT0VNevX8+c\nN4bh7IYBfMORMfCTH6jteV3UrfJEe2QT8+LhDPKgeJevXwwPwu+04XCojz76SA8ePNDu7m46UxDD\nldINVHf3texhcH77eoOPvNwD19yAiakL7iT6Mz30XigUMgc/j8djdTqdjKK/zMjEmXCa+t9OH2Rm\ndKaQ2R7ii4aIO/yuh/xeNz793Dv64sayPwPZFx1Txh7HwD1OX0kpzNdoNC4Yux5mhA7uQPIdZBH9\n9ntiaNNDdhhwOOzoi4WFhcRzHr7zvlHeRDrP/8TIQl94TnHcUAVtvL7fZe2FIVIQz2PVoEJ+sCbX\nsHLzkCiEUF7CmjOpeyckf/oCdwHuk4G3KCnFXrGCowfucVs/qd29n3K5rHq9njEanNHjDgHGHhOP\n/dnRO0JY461FoQADe66FX3N6oZSd3ixCf6YbC3hnMN/x8bG63e4F71SaJukTQ8+L2/vYnE5x518U\nir67xQWy13rxFvnMkzw5kDVu3aZdlr/mz/YaJ7Tj4+N0JhzfcXTT+cHfwaGjlA4ALZHODQm8Y/7G\nWFpaWkrjQgixqwtEolqt6vj4WM1mMxk9JHNjsMzPz2dydpg/R6do3/nOd5Ln7GjkYDBISeHUfPKD\nh9kZRzI18wLyMTMzo4ODg4zBDW1wJFqtViqTAQ0lpdyc5eXllFD/ySefqFQq6YMPPtDW1pZ+53d+\nJxmprVZLtVpNrVYrGU0Yi3fv3tWtW7dUKpX05MkT/epXv0oG7+///u+nk+wPDg5Uq9USqsjcgrrF\n/BKvSZbnXIGq4ZkzF752XUZ5DguKxJF4EE8SdWnPnj3T+++/r52dHXU6HVUqlQwC6geQ+1rmnf7b\n/3Y0HIPAnUFvLjOglTs70aElP4j3MOeTyUTdblf9fv+Cs+vJ3t4cwYnr1xEndILLLzf2XI77zjZ+\noqHoxlV0zBijb/GnId/i55PJJIPEuoFGf6GpG0yg0/BpnpxDx4HqMA84C1HP+j3+Hh8jMr5cnu4c\n9/NQ0XMur73vp6enSUaNx+OUG+rGMu8tlUqpbInLeZ+zy9oL3bXnDMdEoWQxpqRpnR3PuvcWEwlp\nDpdicSI0CAPBOJcRih0o3IfgcYXoC9+VtBsEpdL57iqu+YGaLEL3XC5D3mIisu/gi54C48vbzgvt\nYWIXWg5rw2ARwcLjdIMXeh0dHWXqKkE3qh+XSiW12+0kfKXpAbwxJIrR5mEBNyiZW0cQI72id+ke\neKR3Xo0veMoVU0yadQWIUnAlRf9coNBAE/CwIq25ZzQ6rzHkByyDUMVdRihqR2I9pLy8vJwEVL1e\nz2y5J4zW7/d1+/Zt/eQnP5EkffbZZ3r77be1sbGhg4MD1ev1lGwOneEdRyRefvll/eu//qt+8Ytf\naG1tTWdnZ9rf309zgeCl7pivmcFgkNBFDFnoMjc3l6rAU16Ae09PT5NxiTfruxuhO8YZu89KpZLe\nf/99/fznP9d7772nhYUF3bhxQ9J50vzMzIz+5V/+RR9//LGePn2q3/zmN5Kkhw8f6t1339X6+rru\n3LmjV155Rf/zP/8jSfr5z3+uP/qjP0qCOu5Q9Lnz9e3hFb7POoXnMX4iz7MuCZU6MsAP6zvyqMtl\n5vfBgwd6/vx5CqXWajUVCoXMRpN+v5/ORCwWi6l2FQY4KCbvYoyOFGOUSNOyCcw5JUcYI84qitWN\nA75TLpfVaDQya351dVX1el39fj9zigLoBXTx8J3Txku50G8+Zx7c2XKjwneKeXg3hq0YXzQQ4QGc\nJJ7tzpbLGZqjfN5iArsjatHoOTk5UbfbTQgNvMVvZJTfE/vhOgq9R+kPd5Sgqzu8DoK4rPPGHDnC\n76Hr09NTNZvNJAuc3ugexu0OVwydxvbCdu05AiVllQLXvRAewsZDPvzO+0y6WGsCL1qahr48NMH3\nHd1BIbjXRtjL86akbKVXR0P4DMFXqVQy9ztyEE/kjjuySqVp7YsINTqS4SEm6BENO98h5iG7PEHi\nhgVGm4cJ6Cuxe+8n9+EdcA0GR7HAyDHEF3Mr3HBzr8EXqUPTLI5ocLv36MYp70XZRGMLgcii5T6n\nl3+fvyOs75464QcMhHgf4/bDWd2QZY0QhuNoFOZydnY2ha8kJQPliy++0K1btxKywhw2m00Nh0O9\n9NJL6b6nT5/q/v37ajQaunHjRiasu7OzkxwM5pJnbmxsaHNzU5ubm2q32/rss89SHSlCZeQgDIfD\ndEQK9OI3/XLazczMaGVlRd1uV8fHxwkFZrfe2dmZlpeXMwUrWRPkUOzu7qb+7O7u6ic/+Ylefvll\nlctlHRwc6G//9m8lSffv39dnn32mTz/9VI8fP86EZD///HNJ0le+8hVtbm6qUCjoq1/9qiTpgw8+\n0MOHD/X2229rfn5eS0tLF5xEZJ3PPeMDBfDDtTGMcfScLwj9ECZF/knnSJ2HbvydHimAr0Hqut2u\nZmdnVa1WE6rIu7gX2Ug4zeuWeb4WcyBNHUBkgpdeoY84HnHdsw5xin1nYAz9uQFKIVHCieyuJJRK\nOYFYVsEjGB5tYP3RJ1fCjizjgPnuaF/bLqOcBp5H5n2JqJLrL4/CuA5ANzny7yFYL9/ihvh4PE5A\nAPPr749OkOtQ5j2GHuN8Rn3J54wlpn3grLtByfy4AQzPtVqtBJwUi8WEqtKQlS7XvX/RCPX2Qgwp\nGI7fkpJlSjjKjQK207pXGhebM08MFzqC4JPpC8Ohcb4vXTw13EMYQH4OHzsjucVM3+i75zu4V8Di\nd0YE8kaZ+PcjeuNWNJ4aRmgst+BGhi9MDJIYuqM/0Jjxe4iO552enqper18IQRwfHydUB0OQd7li\n9sXtc+lz76HSaDT6onQjUFJKXHTj1BUUhlwU3tAGOju9oXOEgZ3fvN8+Ni+sGIXGZDJJStKNVGiK\nAUJoDCRqOBxqMBgkVMYRuWKxqKOjo1Tn65e//KWePXsm6bzEAfkxc3NzajQa+ou/+AtJ0re//W1t\nbW3pS1/6khqNhmq1mjY3NxNNd3Z2dHZ2pnq9rnq9nkoc3L9/X/fu3VOn09HTp0/V6/US3L60tKRr\n164lujv6OxqNVKvVEm+4MmEeQDmWl5cznigKazgcanFx8QKSDTpULBa1tbWV5uxHP/qRlpaWdP/+\nfe3u7urTTz/Vt771rTSODz/8UL/4xS/07NkzFYvTQpeDwUA///nP1Wg0VCyeh+ReeuklSdK9e/f0\n61//Wq+88ko6XsdzOlhPzH9EQz3M7kfkgA6VSqULqRDlcjnV7skLpzgdWIcoUt7p9zWbTRUKBT19\n+lTb29uJ/xk/RgnozmQySX+jzHy9xPXhoRiUImhVXiK6RxSQAdDUNwNEFNuPTWFzB/dVq1X1+/2k\nb7woI/IcerthjhFBGDY6Zr6BypU0PI+jAxrMMx0Zd32CIYAcI5KBzAAphF5Rz3K/yz/nRQ8pusxE\nX7KO4pEthP7iGvZ5j/wd5SjONXLO+d83Q9Fvfpw28EOkG7UM2+126qPraNflUa/6JpC8dlX+4Kpd\ntat21a7aVbtqV+1/2V4IIuWQs1d/9nCbw9g0UI3o9Tv8L13MJ+KZviPIUSrPF5KyOwccAaHF/BgP\nJ+FF4Gl4c+TGUSU8Ct+Z5mPhWt72T/di3RJ3Sz8vEZ93eOl/9xa57igJNPWwnoc3vG+S0sGZ0NIt\n+5jkyfwyx44A4vlERAqP0hNy3fNx+sR58FCuezRxnvLi+/CLe3ru/TmdHBVzT5/vcM0RRK/Czvc8\n1OD8xvyCzDAOjioBWYq5JtJ5AvH6+romk0kKb2xtbaXdcXjvX/7ylyWdH0z8T//0Tzo5OdFrr72m\nV199NSFZjUZD165dS5WvFxYWtL6+Luk8Efv4+FhPnjzRs2fP0lEi0jlac+/evcwuGkcyCEWBfsQd\ni51OJ61tEuThPRLX+/2+6vV6Ji8JBGN7e1snJycp1+lXv/qV/viP/1gzM+fHpxwcHOj73/9+4gvy\nM8gTefz4saTz5P5ms6nHjx/r9u3bmp2dTXlXN27cUKvV0uHhoTY3N3V4eJiQHPg77mBlPfDseGYi\noT5HK10mgu6BbHhI38PxyIDI3/AqIaz5+Xk9evQo5eiVy2UtLy8nmvouPg//QG8PpUfe95BgRMsc\naSKEJGWPlfFoBvcREuJvD4c7CgiaBL3n5+cTUutFVRkLGxg8dQGEKk8OcT3mPfpcez6lh/24j+9E\nBAnUDRkIj4COsf49GsH3Pf0ElJOwPvrA0z3Qoeg3T1iP8+zz63nEHvWIcxzTQTwqQ19i2BCkymU8\nvO5IHrLUUcR+v69isZhJaXA6Oy8yv//nyh9gMBUK091w3nFCETHmy9EjfkafQ5F5hhSMErfHe95N\nvIahRp9cCXk4iZBZzB/yyfSdK/Gdnn9EvN+ZTsqe+8e27XgfeRRugNDvaHjm5R9Eg5V4MCFMh4H5\nHAZ1hR93wnlyM+fLkQOWR1P+R6B5X3x+aZ7LFYVVNDDdaIcmUbj7vd588btAQHAyBr93NBplFq40\nFaCRb8jJKBTOD9eNMXrmAJ7zZGlyF2IomvpP8I7vpIH/hsOhDg8Pde/evZQLMxwO1W63tby8nLa0\nM4b33ntPZ2dn+vd//3d99NFHeuWVVzIlFZaWlpLBs7Kykvr57Nkz7e/v6/Hjx3ry5In6/b7eeuut\n9MylpaUUhpKm8L6HMqPxSSkNkvNRHnwPZUHYC2HoczUajbS7u6ter6f/+I//SOMYDAZaXFxMPMp9\nMVS2v7+vd999V5L093//9+p2u/rLv/zLlEtG3lO/31ej0dDDhw/10ksvZUoLHBwcpBCtK3v4pVQq\npVIHHtZtNBrpvEFCQjEHxY/icp5GWZFAy/ji8U3+vrm5OdXr9ZTPhiJyxYTRxkYhngv9Wa9x3dDH\nqIRZYzzHQ0bu0Hi4SlKmz9CQZ2I4eWI79CZv1WVv3LnlxqafBoCBhbzyNAnvL84BdOEaITMvbRNz\nlLgG/VyfeIoFaSOlUinVLHPni+YOKc8tFqfnevq6Y7zu8PC+4+PjdPg2a5Fxs1EEg8flmstn3uU6\n3zf+uHMJ38VcP7/mNPV15Ruwjo+P01xwYHiU/d7XqCe9vRBDiol2hMgXE9Yh3hCM6YrbGc7jtj4Z\nfO5x2ohIEWuPgojFhzESvQGu8bc0tb6Pjo4y3oSkTN/jgpemQjNa1M4o7G5wA4Ux+f3+uQs70Atv\nLhgQGp6T4P3nN4gagjYib57jwPtWVlYyuzUdiYzootMIpU8phWjUFAqFzELNi2NftghiYdTYZ3jO\nG/wAvZwH3PvyhF/64M/wnTrulcUcQBfMeKmeiAmCM5lMMsUsOR4nb9wuXCaTiXZ2dlIS98HBgQ4P\nD9MhwZVKRTdv3kz9/tM//VPdunVL3/ve9/Sb3/wm9XN+fl6VSiUVnPzss8+SkHry5In29/d1cnKi\n27dv691339VXvvKV1B/QlFarpeXl5QsKHTnAUTfSeY7QYDBIGzcoxeA7Qbvdrmq1WkrWdidqNBrp\n4OBAJycn+sUvfpGOOnn99ddVqVTU7/cTyuJzgUGKwUUe1BtvvKHJZKIvf/nLevz4se7cuZM8fZQi\nR8fcuHEjGYutVivxDgVOuc8RtF6vp2azmRK4WXfkwjgCTm0dFKLzP7vckE2+BqIB4M6ldL7Tk/wh\nCp0yHxsbG4mnQUYZB0izI0H+Hl/HGCTwKfweZaYbQO4w8kwpm4zs73MdQk4P/IbxC784wuyRCuaF\n5lvyvbmBQ1QBWUCRSt8BTd+9vlF0yAqFaeFadCYOlXRuFKD7Ym4PRp/nULmMIwE75g9hJPouUd6H\n7Op0OsmA5l6KDzO/oEvMJfLS88Gk6Q5NRxldB7sjGuntxtSHDXsAACAASURBVJk7EY4oxQ1bbhi7\n0cr4fpuRJb1ARMo9CulixVk3INjaPBwOMwwmZRWYL0qu8TlM7tAh/18W2suz3uME+sJ0ryomoktT\ngcq7HT1yS9qZGOHD7+i18ONGGPR0dMsNQ677ova+02Agfyd9wphwpnODiPGw2EjC9Z0qjkB6crfX\nBXEvJm6P9fsvS6CELg7xEk6IgpdrLijj9mApm+gaF58bsD7H7oF7P/2dIK7+LgTfeDwtSMccelFX\nN4Jpk8kkCaRCYbpV3UNGo9EoY4DcuXNHn376qdrttk5OTrS/v5/CG3fu3ElFJf/8z/9cT58+1dbW\nlqRzo6XX66nf76dyDI1GQ5L05ptvSjpP1n7ttddSbSfpHMlYXV1Nhsv+/n5mmzNnyEFXFF0eAhnX\nBmfNYVR6An+/39fBwYE6nY4ePXp0wRFjzbgh57skUQhUPaf90R/9kf7xH/9RxWIx0W0ymSQkR1LG\nADk5OdHe3l7iQUdquR8E6+xsWguMHYeOarrCdgXrCsnXILX0PHTmzkC73dbTp08Tz/iGB0cnpOkh\n0SBkvqYIwzm6HCvi8/fMzEzmnE2cEeSzjxHHMk9ex52wXhEe2cU73bByB8/TN5hz5Gh0SB0dc6PH\nE8BpjnK54xVlKWOIpWSYI+iKYQzdovHhOpF5B0XyNcOuV+q3uQHukSP6HY0Nkud9HN6Hy5LF+cwN\nd482EMb2ufVdgG6sOhLlfeNadBRopJqA8rtDze//c4gUSiYS0hefMz+xZI/7xu2sLFAnqiv7aGU7\nLMr3WNzsLMObyVPmjqA5oR25cc/fDUWHLaUs7Ivw9jFwD4vAFzdbi6OX5EYZ/fawIH3CSHMm9v7g\n1frzPVxALgn3OXM3m810NAjC1xeI9xXYlXH6YnMB5kYIf/v4ozFHv3wREd93oRg9PubQaYYScuM0\nhkn4wYhyzxEh7krTeQPkxPMBWAduYDqfQlfmw9cQ9IlGB8LTFQgGymQyScgH92P0/PKXv9TGxoaW\nl5dTHtTdu3fT+Hu9no6Pj1PoAeFHXaHxeJzqPnldJL5br9e1uLiYMfjw4ieTiWq1WmbHkPPJaDTS\nwsJCBlGoVCrpfz865vj4WP1+X8fHx+noFeaCMTSbTa2traVipDwTfigWz0OYBwcHkqTvf//7WllZ\n0RdffKHV1dULKAo7KJFhoDWSUj2jer2uyWSS6i+BwJXL5zui9vf3M2jG+vp6urawsJBZe6QBsKsL\nHoanWUuOdji/cw0Dm12RXvdrOBymOlPD4TCFSqJDRugZ5cy7oA18jbPgzi4/8LAr2slkklkbvr5x\nHAhN815Hn0ajkSqVSlpPjs5hqHitp4hO0zzHh3v9OciEqLxdt0QDzA03xu10Jf2hXC6nuXc0n5Aw\n/EZzhNyPQGGMR0dHmbUfHVIvT8Fv5oS5cLDBU0EwpqIsykMZoYcb3YzPneMo23m284kbdYwLmvJu\n6O3z4EZtDCFe4IFLr/z/2BB6UlbxMwFucUtTaxGh64odr8qPJXBF5YiOv5fPHcXy0J6jHHzf+8tk\nupXqaAMLzwUHiIUngvIsxl0qZWuUuBeHAeoGT7lcTudaRYQvGlPOcBhJQOmEOaXp6dmSMh4a4/Dw\nq38H+o9Go5R87BWcCTe4MSZlK7tTediRHN7nc0Y/+U1/XNjkhXt5Jt+PixR6uWHngsLDCf5c984Z\nkyspV3L8RslzJIlvn3ZepD+VSiUjFOfn5zN5BG7U48kiiNyoo7I4Qmxubi6hHw8ePNDrr7+e0KTl\n5eWkLLe2tvT48WO1222tr69ncrNqtZpu376dnAnQCeaB5F3oSCixWCympN5qtaqDg4PE/zdv3kzX\nYvXm/f19zc7OamlpKVVuh57MB+9nbhgHhhly5fr166k/ID7UrFpZWUkJ5V5uAJSJd/zZn/2Z5ufn\ntbm5qddffz3jOC0tLenDDz9Mxmez2UzXZ2dntbGxkYx0H4PnQYE4gWRhmPua97p7jhxEBCbyyGUe\n9/z8fOKDVquls7OzlPPS7/d1dnaW5pRilh6miUoxbrDhnRhtLtelafkDlKajV55aQCK0ywMMtOhc\nR4TaDQP4pFAopDyvuA5dznj5FkdUXC64jKO5XGJu8kqycJ2+er0vngky6AYDxpmffZiH5HtaCDRl\nTuv1esYodjmSF0WCj+AP3zwRUXJaBBOcT+EXN7ScNvBxdISjHcFapbmj4O/jOfQJPe28mJc2kmh6\n6ZWrdtWu2lW7alftql21q/Zb2wtBpNyLd4vXEQXgNGkK/wJnel4SrVAoJOjYw2uO4HhYiP95t4eH\ngMTzPDXPrfFkRN7n/XcvgebhnejteB6No1LQBuSId/h2V+D2mMgHbfCmHHnxcKKPw61vwlTe/6Oj\no3S/h1l91wvHMNDHuG3Z4XYPzzE+35Lt0GqEjfGU3LuAhngdjN+PifD55FnQzcNh7s2BinmxPM/P\ngC/wjguF6Y4ovChyCDz05Vv62YXm+VCMHTrAU3iOc3NzKcE3zj88QEjNx8q1+fn5FPqan5/X7u6u\nbt++rZmZGbVarTTGlZUVraysaG9vL+UreliXM+SYM55ZLpfTLruYe3F0dJSQlGfPnqnRaKTk9u3t\nbZ2dnalaraaChYSaqLxO+KJarWowGGR2RFE2oFarpZQASemok2LxPIcPpEg6r9B+eHio69eva25u\nTrdv304oFaEsX3fuJd++fVs3b95MHi9zcXR0pPn5ed27d0/j8TjtCKQtLS2pVDo/ONqPEDk7O1Ov\n11OpVFKz2cygOu5Be94SDbTcEU34AATHQ/SMwcPo5NCwThiTo1p+BA3PJeXAk8ZBl0B5PORNdfKI\nEoCaOgpCA1FnPbLBh77Ck65H+Ix3kqzveWYRRfbjRRxB4TpzwTuRYS57I2ri9HYky/OgPAISc7I8\n1IesdmSJ++hLDH3RJ89f4hp5WRy7xPh9HplLGvoZ/vTSNi4foJXrFubIdSp8RfFpaO85rdAgpi64\nTvFxQxMPBTpPOQ+A9EJz+vt/rvwBE+KJfvGaNwSBJw56UqkzkCsan5woNFxBwhi+APlcysKRMSE9\nLnCYwnOBeCafMR6vTcJCiBPMQuK6LypPjmRBusESBZkrVsbvRpw/18OeHj6g5o3npHneGefFoZyc\npv7jdIZOMfbNd+ATD8MyL4RQPabOPPBdFlpefgbN4+EeSvV++iJEEUUB52FLF4wuXD3kJE2VEbzr\nix+F5MLGt00TXoDWMVmdkADhb+hGWPD4+FitViuFYFGEz54908bGRjoYWVI6sLhWq6W54kiaSqWS\nhO/KykoKu0nniehPnjxJQoqwGOPDIJqfn9err76awoyE7xYXFy+EIfjbDyeem5vLGFLwJvzhByWf\nnp4mo+wrX/lKSqr+zne+o1arpW63q6Ojo1TlnL4+evRI/X4/5QthgL3xxhtaXFxUpVLR+vq6Zmdn\ndf36dUnSp59+qo2NDX3pS19KBz5D04WFBbXbbR0fH6vRaGg4HCaaHh8fpwOm2Q3nBj+hynK5nMm5\ncgXP77hLNtZr43PGxTwho1jTGHvNZlPz8/NprjCaJ5PznaeEqpgL6B3DzOTM4AS4HB4MBikP8DKd\nwNx6GgX9Zjen0wKZyfrwJGaX3WwO4JnkmmG0ut7x/zE08px3fjM+FDbhMN+xiLyOubw01zGkS8DP\nhOiga5xnjE8fL59jYLLBwx1ND6tiODNGjFNo6/XO3HiL4TF3FF1+uy6DFk5HPotpGy6r81JPPEXE\n+cLpEw1T13mXtRdiSEnTE+89iRtC+g+NwWNoOMFhJI+3SvlZ9s6M/M+EONPExEGP0zrRPTcGYsMY\nMblbyu7mYwwoO09487G7co7GAnHpuPARSDCg7xiENhhIeUmSzvw8j8Y4o7c4mUzSoaW1Wi1T9yUa\nsR5Hj0aWG9gYj84fbsyiMKNQiDHtyE9unHlzRYWAjgY1dIlJvDFvzlFB5x9H2XjeeDxOxgAFJn0e\nHaUgb8XzFuhTXPyMIa4l6Ibhi9Lb399XtVpNO+bq9Xo6zoX6QeTr1Go1vfzyy5KmhlSj0UhePsKU\nHX3wC8YNdGMHGqUE9vb20jPZIo6hj5HBuieZmBwdRzLxrD2Phnv5DgbA3/zN3yT6/tu//ZsWFha0\nubmpxcXF1C9ytXZ2dtTr9TIHGq+srGh5eVnNZjPVW6I213g81nvvvadicVpry8cPL0VnqFarpXUC\naudGRql0fhB6vV5PO7ecTx1Big1j2vOHHEXnbD2nmaMcOIKuvCmNgIx2WUM+EgaTI9WOivsczs/P\nazAYqNvtXuBvrh8dHaU8HHcEPRcGY0OanlHIs8ijYox+LmC5XL5gnFA6wOWwO3OsZX8mfWc9xh2L\njN2RWuYlOoHch1zMyyFinlgzLnujg+rXyJMdjUbJmHZn3J1830HNnDJmR0ahhecMR+fSc9NcxjrI\n4u92PRk3AXj+a3T0Y3PdBZ3yDDmci8s2G0gvsLI5gtYFH2f1sBAcxpamXmhUkggbBCS/+dyNkDyh\n4te5H2MNRvY+RIal0T9HnbwvhHMQntEgQsFFVCImC8YEcASJhxqkbH0VBI2PH0ECPf0au/W8j4zR\noXH6Lk1DU6AjHq7MQ2YcYfECilL2/Cfo6kYTDQGVVyk9Gg/xkGdHoeKigV5Od+YCIeZhRgSJI2De\n3Dsaj8eZooTQi23e7hTwLt9Z5IiUG+Vs9+c9vAtl44Kf7eMYaTxzcXExVfvd39/XaDTS7du3JSmd\nUQUidXx8nIws+kPYLKLGJycn6nQ66dkYGZQCoCDe06dP030U3IQvBoNBJnzK2AqFaXKwIxCgB0dH\nR3r+/HlS3uPxOFOC4Pnz5+lcwL/7u7/T7du39YMf/EDb29uZxPg33nhDd+7c0dOnT9O7QN0ajYbW\n19c1MzOjnZ0dff7554nfvv71ryd6Ly4uJgeA+WQTAQfm0nzchK9cRp2eniZ0rFQqJVSKEIsrSd9I\n4oaGKxOuz87Oql6vZzaUgJYSembuuI7cOjs7S3PjssNLMlA+hDF6+gUIDc/k7263m4yxPP7m/dK5\nAe6IQ55D7hsOeLeHHklRcNnG+3mnPzOG4P0a1z0iQd/c2fZdgr5bMG5k8bnCqCgUCqlPrBl38vKi\nKqx9rrm+YLefyxPmEn5yGkNXdx7j+9xR9ve5QelyE4c9Jrl7n93h9eYy2sfHHEQHw/WTy3He4YZ6\nXnth5Q9KpdKFwpIwqsOWUjaj38M/fq80NVbcqne40eE+t4S5FhET3h3hQ+6PiwbDyo0ynunvjXCi\nozaSLjBFtJz9+xiKjhjFZ7iF7kyOwo4GmAsymhu1eILOzNDNi606OkPf/VkojV6vl4xrN/x4tiMo\nl4VLyf1wIeWGtKOYCB6Emgt9F4bRiHbh78oKWjpyxTuhJYKW/sZ3ouDwsBBcXqSP+fOwiHukEf73\nuffG3LA2+v1+ConBS+RAUWtJOq8jRWHNRqOhcrmc8ocI9ezt7SVPlrDXwsJCKpzJYcrQqt/va2lp\nSc+fP0/0dJ7Z39/X0tKSOp1OhsaEik5OTjJGkh8vUygU1O/3U0gKeUPeFU5NpVLJKJdvfvObun//\nvt5//33t7e1pd3dXknTv3j3duHEjoRR+bMT8/Hw66HZ3d1czMzP6gz/4A0nS9evXtbi4mKrAu9OC\ngURdH4x45oK8OvJ3XCZSvsEROx87Ctx5v1QqJcMdJRTlkZfX8C3yGBnQyx0QLyQbUysmk0lCVmN4\ny5HXGNZ3RHs0Oq935sqUsaI7nDfQJdHBos+uZGO6Bu/Mmwuuz8xky74gbxivz5O/y3kYeeeos6M5\ntLyQkst3csloGCCEDiMigyGNXvS+QlOv8C5NjQl3uhwBdSfd+RvUk888jxdaxCiO08Zln/OT69po\nLDtgEiMKrI0YhXEQhPF6VMSN3Lz2wgpygiy414oCc+UhTZWpW/ZMIorVESC3JF3ASNm8p2h5RqOI\n97qC8smMC889BEfP/Nko4RhmcwvaDRunB8LGvUAWe0TjECJuSFwWxosWPd6nx7lpbt1DD0dbPKbv\nRkg0grxCu9dEifkA9N29pwj5+uLLQxzpP9ecvyKky9iYHxcKPl9ReGKcYUzG8RMWkJQUJ7zf6/VS\niIZnOJ/hYYNIwPt40+5lu2d/fHycqg2DdjFPGDQgOghw7kFZNpvNJECePHmijY0NDQYDbW1tZeYe\nZOr09FTLy8tqtVpJCbNVHgHvypYq4o5GraysSFKqk4TiOz4+TqG44XCoYvH8rCzQHPcyEfjwEyE5\n51PmOVaEPz4+1o0bN7S2tqbd3V09ePBA0nn5h36/r7W1Nc3OzqrT6aRxzMzMaGFhQd1uV+vr63r1\n1VcTQrS8vKw333wzoQbdbjfRhm3mEXFxvkXuueE+Go20tramcrmckEEPX3nivz8LXvRTBmigOhhv\nvAc+9FAT80l/UDY4Ai6joxL2xGHWPHlefo338dzT09NMiNPDVB7BcF6DLzzfhtAzazI6rhhzvr6L\nxWlNvUKhkElKd3QK+e3IymVRjEKhkJDbvBCs6yg3YFyGgzQjx5hHN3SjQeKGtiPHUVf6/9Bzfn4+\nJYI7vSLw4PIbY9aRJx8jY3FgwkOzMUeU5t/3CIbznzsR3i9/xmV/+xiY88vaVfmDq3bVrtpVu2pX\n7apdtf9le2GhPazkeHI0Vr+jFzHG7SEch57xXGOozGPQvsst7uCIUDSWvlv7niiN9+ZWdR7E7u9z\nBMWh0Jij5Pd5oqaHIfC4SU52KHoymaRdLx5j9nc7XT3M6t5iTOaLfXXP06HqiCwxnyAgoBT+zLzQ\nlHvi5DA4H7nnEPuCV4ZH4fPrf8fkZ7xkUCafE88B8Gsk+0JTP9yWcXg+w2QySWEoShMcHh5qZmZG\n1Wo15RLhMTqSyU6piJbG8CJrCVSHa6wZeP/o6CiF70BboLdXfiYE+PbbbyeUhjyjVqulZrOZPH6Q\nHfpZKBRSgja79OgDzwdl4L7RaKRarZaKY5IL5bzJTkHCG3HNg9SMx+PM2Z3j8TjlVfFu6Txct7y8\nrGKxqO3tbd26dUubm5uSzsszfPbZZ+r3+ynna2NjQ5LSurxz546Wlpa0tLSkO3fuSJLu3r2bvGd2\nusFvlJIYDAbJ4wV1mZubS+VeGJMj1YTT/ABneNFzq2LVc3gFnnQaELJCzvJM37zg/YBu8AnoU8wP\njSg7c0zpDT+CxMN9nr5QKBRSmBVEknHFXXSRTq4TWBPIQEddPNRfLpcTv3nKiSMl8CIHSzNOR+49\njyeGoNBbCwsLaecqYwA543sxBIkcgUYeMuO6pynwXD73VBau0U8/cJpxcJ0ctzwE0HWAz72P2xFH\nvhPDlz4unutzzTgdtWPMLs89n9jXEbrb0T34Im7M4t68aAfthe3ak7L5Qh7aizHPmBDnTIVyizkB\n0lRB+24Qh+c89IdB4u+MuVL+PhohFZ7n59e5AeaGBWGFmNvi4UoPGWAEIDDdyCJMgZCOyd2zs7Mp\njyLmn9Avvu8LA0aGSV0A+/z5gmIO3AB25vUdZg5VM59Oa38mi5jdYv58X7D+2+nOHHkOhQsbTwBl\n3t1gdoHDnBIacQMMIes86bSCtz0USp9RhMDYKBoXTswDhhQ8i5FFjg7PBIanVhPPJEcIh4TDhqXp\nUR/VajUpY3ik2Wzq8ePH6vV6un79ura2tjIGEXk8sXYYIeilpaWMESGdVxJ3nlxcXMzMKWHHaDSy\n65e5hAZeOoDf9Xo9HXHCZ4Q+l5eXM7zI92/evKmzszPt7++n+1566SVtbm7q4OAgbfGHT/v9vmZm\nZlSv11Wr1VLld655MrRXjPZSAyRvsyszHtp+cnKSSh6g6DEgOfbG+cplDrxIrlretnaMfHa2ebKx\nO7cuF3xrOUnmbpAx/mKxmHbzEj72eWKN+LrBKKPf165dS7TZ39/X/v5+UoieK0gf4RHfXetJ+O54\n8H5P8vZ58jBYdPDJOxuNpqUIvKo/zcfl72T9u87zkJ479JJS7TRSFKKBwbvQbZ7LRp/43FNFnDZx\nfpHlOKHuJMZDlX2M0RmPG5Ccv/IMFX+fy1N/j+sc9Bx0dX5i7vicMKyk5LDgQLizDV/G0KK3F2pI\nuVESE/fi99wTcIJjQV6WPBm3lXKNnBGIHHNvILQLDe8PfXEmdSvWDQGuMcEYQM587j24cYYQQLl2\nOp2EAsQjEHwnGIoUb8HzaWg+Nvci4uKPiA33+Xj8+/7bcy1Y+NzvBihjcMPM54u5dM/W+YfF5tei\n9+xIHuN3LwSaci+L3sfA1m0UhSNkjlZFh8ARMObMx+bz4kUZOZ3ehYkn6iIASOB2Id9ut9Xtdi+c\nt+aJoAsLC1paWkr39Xq9dJwLXrgfsFsqlfT06VPNzs7qlVde0ccff5yZ52vXrmlubi4lpdO84KIr\nPeiLJ+65Lxh4vmPNjVA8Z3jYUU7qD/G+g4ODC7l0IFWNRiMZIdK54m80Gtrc3NTy8nI6mBmHpFar\naW1tLd0vSbdv3047VkEXXJkOBgN1Oh212+3ER1wjGd/5h7nAAUCper0vDghm3WDAzs7OqlqtJofE\neRTjGuXmCtHRdwxsPxhbyh4/43XicFRBOKNc8N1x7igNBgO1Wi2tra0lxNydIMbKO/mMQqrPnj1T\nr9fLlH/w5HNQHYzavOTjaARwzdeTO708PxpU9N1lOwn7OGFuuLrjhdHiaE2j0dB4PE5lPFwusm4c\n4Y7INHzmaBXvRR67E4NsgQ9cfqEPQSYjmOBghvcFJ9hlvRtn3Iej7cgp/fSiqcxhNLppPk7PCaW5\n4Q1tGTsFSBmrjz3SMLYXYki5weBKLn7mqIF7V27YYBx42Mq9DAwB/57fx3e8MVGXWcHc4wuC+7xf\n3lhkLJi4w8x3gvkY4k4BQhmS0inejnDF/pbL5YRauBCRpohcTA6FST3k5ve5gHGUi7njXR7mhJYg\nCA6VuvCkX96gD7uWomHkcxLhWIRCRMrcwHJe4HO/5vOM8ILWcZ49fJdn2Pnf7vGcnZ1pOBwmRe0V\n0V15ueBHWfMuR+vOzs7r+kwmk5TEToP+JCWfnU3PTPNwtyd2S+eKHaH405/+VN1uV6+99pqk8yrk\npVJJOzs7mp2dVaPRSMYRAovQRblcTsiKJHU6HQ2HQzUajQyi5cUroSXjAP1wD9drvRBuQZlGBLhU\nOt9R2W63Va1Wtbq6Kuk8FMh8YGRRK+vp06cqFM6TZ3d2dlSr1bS8vCxpuhbb7bYODg6SIUtfQYxO\nT0/TepSUhDdhMUrAOJ9AE1c08OHi4mIyiuA1SiJgPPuZeb6BAqXiiKukhKwQrqJ5gj686SEz5ATG\nlKcRuIJ2HgZhbTQaSR5FpN6dZN95yc7R/f39lKzvdEPx+9rDoMpbhy7X/Fm8j3mLUQopmxjOnDF2\nR0L4Lu9wkMD7Ca1Ho1Faj466QwfWo+sqlz8eGpOyTpqPnf9dzvNZ/B9edR3gYcUoazEuYz9jgrh0\nsRI6z4jOPe+IwAV84oag09SdefhUOl8zbsz6+JibSBtvLyxHKi9kJk0VWYQAHQFy1EnKHiAcLXNH\nsBwqvsxAkrIeBlCtowe0PIXo3pR7EL51HkMqz8LFI6fPHC8B3Hh2dpZg8ajIo6EUDVanGcYTNPOc\nBv72ReRM5ULGPTP3SpyxGZfDtB6iJJTJos9Ddng2z5GUqu/SPw9h8C4XPh66dFp7P+l7Xi4UC9MV\nkBuRKDc3RPNCopPJJOUfSVNkqVwuq1arXTDg4Xno7eEHDI3xeJwQCqebdG4cYCBJSmhFq9VSp9PJ\n5MIgpFHmbgwPh0PNz89rbm5OlUpFrVYrveOtt97SgwcPEvo1mUwyyrvT6ajT6aR8plarleaiUqmk\nHXClUint2qvVaolWPu/c52sIw9DlCoYViATzjiKAfhyYLJ0bNr1eLxnQrVYr0ebu3buSzsOR8CRz\nwa5FQm0YI5JSeLDZbGp1dVWTySQZks+fP09ozdLSUmaXJP0ndFQsFtN7OKjcUVU3+KEbx+vQ+J6v\nF6eZhzucD0FffY1G5YJ8hZaO8tEYkxuSyDQUsstaDsYmf4n7yG8rFovpmCTeF3PzYjqAh4MceXDa\nRQfSDXJfi1xzneMKHsV8fHycZFBUyMhUDA6eQ4t6BpmLfIm5t64/HAX2e/29jirSD67H6I+HF/ke\n+ol3gy5JyuUlR/8JU8aUFEekGEdEEpHREcn1eYifMca8cGGv10sGvfclomx57YWF9rCSo+WJcnJm\n8Obb4Pnfn4FhIE0RF9plMVhX6DQEtYd9+NzDSW6ouSCKW1URbCT+ej4WC8WtaDfm/DgHXxTdbjfl\nungOkDRNrEeoRIPQtxnHxc3CdG/V0So8fZ4X0SpHnXyRuoHhuQaOvsXxQxc3WmKIyo0hD236nHiM\nO0LrvuBc8DD3LgAcvYoOgfOeP9/7g+Dz7zlqyHV/hzsVjspEDx0Uge85UgGfwDd4uvSZBGeUAAab\noxI4AKAqfg4dob6dnR3Nzc1pZWUlw3uHh4cJNvcz3ECpNjY2knDDURgOh6pWq8kYALGTsoVR3YBl\nXZGXw2eOFg+Hw7QOMZKgL+u+3++rUqlkio4SEqvVarp165YODw/TOm+1WlpeXk5lEU5OThJaRaHU\no6Mj7e7uZsofNBqNZEyAhMEbjka3Wq3kcNBfR04xJJkLHDme6xW64TNHQKQpkgGPuzHo/Op1mJzf\nxuNxJgEbRBMDx2tNxdxGShsUi9M6adFZcd7n/cgML+9BhfW8/CRPXHfnimc6XVxfFIvFdO4hSfDR\nGeczlxHoAhBCn1+/n7G6I45T4mvR54nP3fhgfqJMdFnvecbIDebMk61xRugrtPPQGPNKyRdfZ5Gu\nPjZpGn5nc4obTvydl5cEnTxE6A5VnFOXl5737IYiNkW/309OixvHEaSI7ar8wVW7alftql21q3bV\nrtr/sr0QRAqL0eGyGOby+Lt0bvV60bKIZrgF7l6WpnlYRwAAIABJREFUw5ERLnWPztExLGiSa90b\n43sxd4O+eJzcty7H+7zPHp6JSB1eF/TwEBxhnHq9fgEB8rHiDbg35JZ+tLTxKkhU9O/i7UAbpy/0\nxGuLCYlA9XhDsS/s7oIm0AjvM4ZuQSfdW/KxxDE73O5zH3O+3BuLKBPfiV6W52F5zpmP0Z8TE9VB\nQvCu47lwjMPzgCibgJcYn0li6Onp6YWigXjwEQEkNEWBUA8lg4IQUjs8PExzvLu7m1CuVquVQW7r\n9bru3buXSbZmXP1+X9vb27pz546Gw6FarVZmLZMDc/36dVWr1QthBzzshYUFDQaDlM/FPHh+hoci\n8DzhJxA5r/rN2YEehvMkX6/83Ww2U8ju7OwsHVgsKSFXoHeFwrS4nyMglCpxXiQVgP44gjYanVf7\ndj6E1+bm5lSv11WpVFJ4zJvzaVxrjm47euu8GkP30JhwuydcU6iU8caQN9/znarMEzssvSyDNK0I\nT+5YPG7MkXN4BNqAUHvaAeNnPkAgXA6Dxnq0ItLUEXDG5+vLS7j4PPO/o+0gc6Br/j4PsXp+sfON\nI+YxGsNYR6NRWjOe1H9ZXpCvJ66NRudlSCgc7HPqiBCywyNGbExgjI4Qud6JuXrQir8jwu95ZP7+\nKIdjdIGxlEqljAxmfV7WXlhoD4HkQhMmdKOD5nF+h0AhMELXBYpDiRFOdQZ3uFPKLghnFr+P5/J9\n7xv9Q5FL2WTz2D/uY3HH2DC5WrzLc5TYEeK7UqTp4sNgcLiT5wDh+tjjeKCrM7JvKfYF7PlMHhbk\nmv9G2NJXDARqYUV4mAUQIVYWALT1vvh8xnExZu5zvvAcCJ8jz3vK29gQDS7PJ0PxewjXhdtoNEo7\nvuI5dQhe4HMvY+BHo2BUeV8IYdF/KXuUj9cHkqb5WhhNvkaHw2HKgeF8PM6hW1lZ0f7+vsrlstbX\n1/XFF1+kkM6NGze0sLCgarWacSQY93A41IMHD5KgJSTEmX67u7va29tTo9HI5F0xTgwFD4EeHR1p\nPB6nhHXPyUMpQ08EpzQ9p63b7aat+t7Xs7Mz1ev1FKaCh4fDoRYXF1Wr1VIY/fPPP5ekZIhxkLcr\ndniNWk+EO+AZjAHyUJzffKOJ8ypOAJXgnQ+LxWIKGeXlfDj/ezmVmGODonGe93URc+tmZmbUbDZT\nyMplKPlHfIbRPxgMLhxjkrfGfG4ZPzIc2eKyIO6o9jQMHDNoFp1rfij1IU03PtAXf4enUuSFhtxR\njGkuHK+CU+Iyx8N4HlL0OWRMl+kcxsF9HuKLYTr4BjkWUxh6vV7iYz+Wx0uEcF+sHelz4Dt9XUe7\nUSspY+xFeRnH6LwL7aKcdJuCcCONOY285+2FIVIoWzdUnACxhpNvHwWJ4FqMgeYRJ8aRaUySE4oE\nPBZUzK+JBoJ7AihLR1G8nxhMbix4P5lkzyFA4dP36A1yOrqfxcXY+C4Lwxk3eofeQHmgqfeP/0GQ\n8oxU/z7XnDbRw6C/3OPCDhpwj+/U8DnyuXHPKjaMU4SbC2Ke44nJvpvJ+x8NYUd3oJ0vWBcwzjd4\n024Euwfr3hJ955ko99PT0xTbp98Yu9Hz9fGT5OuJ75zhJp2jCRQHxVA5OjpSs9nUnTt3UkFK6mCd\nnJxocXExIUnSuWIkgZvSEYyh0WhoOBxqZ2dH7XY7c5YdXjle/P7+vvb29iQp1bmamZlJRo/zEMaT\nF3d0/rp27VpS3l5jqt1uq9lsql6vJ/TBd2CBrrTbbTUajTR+0DAMv4ODgwwiA1oHmkd/oTXor+fm\nkP8yHo/VarUyCsNRY+QT95XL5VTmIDYcFhR3dLBYe6PR6ELNL3f0+D8mdjvq7oU+eaaXZYBuo9Eo\nc2ZgrC3F+nalTPK25zG6sQTtPe+Q53Edg9aNHnd8HBXBIHGHDkfBd/T5sUQ0N8xcJnk/oyPohm5E\n0FkPzEVE1uDXvIRq3umomOuEaHi5nnVD17/HmNgtGo3a+H7+j7miMUrgfB1lu+v1GOVhTIwz6ieu\nxYiJ63mQWX9HXo417YWVP2CSvXPA79HC9glD8XtyGYwNokFjEUVPTsomW8ekZfrg3pMjYlJWEcZJ\ndOQiImQIufg+/s9DSHzscSFijOL1OUyNgvV73HBFAMEkjtxgBDrCxjU+82J89NWFex6NXajSFzcM\nYpK+G5RR+DN2Ry4dUnceiIKNcUNDpzPj8ARgf7Y/x+nqhil9cx5xj9ZDdAhNVz4YLzMzM6lQY/TK\nxuOxBoOBzs7O6zB51ft+v5+ZTzfOR6ORWq2WisWims1mZkcbCgpjxsOCa2trWl9f1+LiopaXl3V2\ndpYqTVerVS0sLCRE5tq1a3rppZckTcN+MzMz2tvb0/HxsZaWltL4OCC53+/ryZMnevjwYaL322+/\nnalBFR2ier2e+LTb7aYQEn0hxOWhn1qtlgxEjF6eS82sYrGYUUiSdOvWrVSramVlRfV6PaNoQIY2\nNjYyPAdqzLMGg0EqjcC4QK1AIZxnkBuTySSF9nBwfF1ggGBEM1aMNmgaEWeXCePxOJVrcLq4IwTv\nerjJEVUKh8LLlG/o9/sJIfME/pOTE1WrVQ2Hw/R+nokxByLjicq0GNpiXB6Kjjt2oaevwxhNQK5y\nDQSOdcizjo6OkqEML+bJfcLv0bGJ6zo2n2saKRJuNESEjM/9+S6XcK5dvrkD6062n5JAnz0dh2cR\nMne56O/IQ4yYQ9cJjqhFMMTnijl0fenrB+NWypao4DseVYImDoDQN/9uXnth5Q+kiyEkfrvnz/dg\nzsiEKJr4PClb5ZrnRw+M+9w74F6ER4TGvS/+Tmd4R2VoPvlu4fv1+Ey8D645EuLGGl6XMx39xkCN\nwobvIDB9YTP+iBa5IuO6LwYY2+FhaOPeZRQehFtAbdyw8bF680UQjWEXir5wfOzej4hG8nekNzQG\nxXKUw9FN9+SlaQ5JNGjpC1vg5+bmVK1W03VQExSDw+2gO9xP8U3o7R4dh+UyDmgzGAwyVbfdY+eI\nEp+LmZkZ3bp1K5VpYO11Oh198cUXWl9fV7Va1bNnz1JtppWVlWRcg5RwX7FY1Guvvaaf/exnSQCT\nk/Xo0SNJ0le/+tWUS+F5IxzKu7e3l8lroq/Ly8t6/vy5ZmZmtLq6mikb4YUu3YuGP8mD8aNuWIvM\n77NnzzJClu+vrq6m3XnM4Xg8Tjk+boCheEHYyFOBZ4rFYqZ6uedtTCbnRy4RSozoZ61Wu6BIL3Me\nnYd5nh8P5OEOz1V0Be1Oj4eicEbcwfS++hz4WsWYg+cdOfZ1hMPosg9ax13XvBtaRjnkSLKjfMgm\n6Bl5kdwa38rP2PNQZPriO089SoGR6norprf4uJze/O8Ii/fHHXGXYYyPH78XOnNPNHqi3Isy03WQ\nP5PfLjv9Gs+KRpQ/33PnHL3nur/P6ev2RHy+j8H122XthSFS0chwQyYaGRGS88/cI3MDgGsolMss\nTUfG/D4IDTP6pPN5XiKyNI0x+4T6IkHQRBr4oo+QL/f5Ncab56VhkLl1DsLm1z1fI2+B8S6Hvwk3\nYThFww6kxJNcHbFBUXHNi/X5Nl+nFfPl0LAbQTHp0FG9aNj4QomwMYYgfYn0duPMBa17xXmIYl6h\nOeh9dnaWcnNAZ3guBgtz5/yNouj3+9rb28uc1VUul5NRsbi4qPX19YRm+JEwEdIej8fpSJlbt25l\nwsGdTkeTySTVpapUKplK23Nzczo4OEjHzGAIraysaDgcamlpSW+88Ybu3r2rnZ0dSefe/PLysh4+\nfKhKpaJarZbZ/j43N5feu7y8nMbeaDTU7Xb1gx/8QMPhUOvr67p582bqz82bN5MCu3btmiqVSqb8\ng5/RWavVEgKIHOE9cYMGZ/7t7e2pVDqv8s41jDlChSi+ZrOZQqU80z1ijm1xj5lroMyj0ShTTBK5\n5Xlu9NPRRWScG06eM+JhHvK+CPGQjA+fOMobHRt39Hxtcy/rGNTJEQcadYV83Z2cnCS6ttvtzLpC\nxvLj8gQnydcktGHduzFPX1xWRLnvTpWvcYwWp4HLG3eqHTmiWjm6ydMIMDw4ncLpDV9joHpfaPQ3\nIkf+OTrMdYRHBdzw4R6X59FY8fvzDEeXt1yj72xggHf9xAJ3eJyn4GU3vrnf590NTje46bPzQDTS\nfFy/zZC6Kn9w1a7aVbtqV+2qXbWr9r9sLzS0Fy1MrMq4s0fKFr+MaBXJeDFkhPXru/ncUsaad4+F\nZ/JO/8019wzyIMdoPXMtjimiEzFBmc/xOt0z9vt5L8+gb3hkjNefGyF/p6l7sTEUhWeKNxP7Sh6A\npMwZSR4y4tkewsBLBZaPuzdIFo5esHvakb5eAC/Gznl/pLc0RSH4gd6efE8ozufE8y/yaMp7QBAc\nlej1eims46FoPCveRSiHRjjppZdeUqVSyXhWJOeyRvCEgcMpXMj3Jen69eu6efNmJkzhOULkwTBe\nEm5B0zj3rFarZSrw1+t1HR8f6/3339fKykqmyvrp6aleeeUVvfrqq/rpT3+aEsrX19dTUjioJTRu\nNBpqt9uan5/Xl7/8ZZ2enmpjY0Nra2uSlBCg1157Le3cgzaLi4vqdDoJ5XPecPQABNDlFeGmcrmc\nkCRpyuuERzz3hjybUqmkfr+vdrudyQGs1+uZs+Kcx2dnZzM5bI4cUhYBOefP9NBODPnH6tlRniwu\nLmZQEmjBmgeduAyRKhQKF8JUhEVBUTxFww9e9yOC2HnJYe2O9Li8uCzMeHZ2lo7f8fF7qkRML3F9\n4PzGeAm5OTrlOUGOULMu6JufFiBNiy27bPD3gVDFHDDkl48/6hzPJ3ZU2fWTy1/e7UfqRP0BH/A+\nL6kCiuwomfNU3i5RR6cI27JuXe7FKBR9dR3k9IsoMo359p3hUX/HcKd//tvaCzGkXAG7wvBtkXlG\nhw8o1p3Ky6WJho3HfTFQYtyWZ+T97c/knR7HxjDx6uvOUHmwob8nT5j7fQiqGD/mcw/deR8RCm4U\nRIXtfXVI1KuUx37mJekjOKLQIAEWmsd4P1vDI4zqELL3w+mdZwzzfU9I90RK+MHDdD4WX4get6cP\nhUIhkxjs8xgTWn0c8L7fh/Kl9IGPE2UymUxS2QFCPAgDkjzZgsx9nU4n5UB1u900F5VKJRk7lLIg\n1Lezs6OtrS3Nzs6mvCT6WqvV1Gw2Ux6UH1o7mZzn0NTrdTWbzZTzIZ0fZFyr1TQajVJYkGTrfr+v\n2dnZVAX83r17qWwA32F+Dw8Pk5Ld2dnRysqKvvGNb2h9fV2dTicl/DL+jY0NLS0tpRAY4yD5vVqt\nplAS97EumDs/gYCE9fX1dZVKJT158iTxzeHhYeLjw8PDzGG/3EdOFzvXpKmCYZ16uQvCugh+D6/B\nU3y33++nd87Pz6ter2tlZeVCjhC8xU4zDzXhsHlaAv2Efq6Y3chmzbrz4XKYH56T91w2NvBMQtWe\nOxlDNxiMLpfcyKHOFErf0zx8PUtT5Zw3BvgZ2rmT6degnctVrnsNKfriOWDQimuj0ShzDJYb+Hlh\nVsbhBpXTl/64DIrGhNPVZZTfE8OlkpLjxVqNG5D8XV7r0MPZlFZhDl0H+Pj9ndSWuywM53TBgKY/\nseyP0yLqmbjDMLYXhkh5zPj/q7myzLO+EUTRg+Q91FpxQ8aVKj/+mS+2SGAEQhxLRKpcCdEvR8jc\nWHJG94WIhc+z4sKPnqN7tQhRvgeNpGxNDZ7lSt8XdqzrhLCNxzz4vRiT8X1sAZemxrBvUV9cXNR4\nPE04ZsyMwenGc6NR7c0NV0c882LifM8VgiuhiAa4oI2Cxb1b7vW5cyOLuk7j8Vi7u7sqlUopURtj\nAMHmieGDwUAHBwfp0N8opClTcOPGjZRvQf89Afrg4CA9c35+Xjs7O+k4lnq9rhs3bqQ+j0Yj7e3t\nXdgNOj8/r+XlZa2srKQE7T/8wz9M4zs+Pk41oA4ODvT8+XNJ50jO4uKiCoWChsOhNjY29Hu/93uS\npO9973tqt9u6du2ahsNh5nwvvOdGo5F2dvX7/eTRbm5upnPtfDerpHSe1sLCQjrj77IdqD7vXuD0\nyZMnOjw8TP3hfMLDw0M1Go1UM4n7MZiQRfQT55E+sjtKOs/lYr2g4P0IFlCw+fl5ra2tZRTt3Nyc\narVaoj1j8YR5jDTPS5mbm9PJyUnGoJEuJuPCC87PvhvXZbQrIMpS+C4ul3kcFcM4MPSYb2/uCHpD\nHrrij3KY8XteqctoFLAraMbFNZf7nj9E3/gNneJubUnp0HGe48f8QE8vVePNUTn+93f7/Ph3XGZF\no8MjG1EuwhvIz7ixBfTQnQ/0rkeUXLdE5Mh36kd0z/vNNXJxvZ/8dqdIyj9jNfIFfO15he7MX9Ze\naGgvfuYejn8nWolu9HiiX9x9Fqtrx/c508SwEf+zeOIi8V0SUci4ceJojRtYeUiYe3A0klD9fp9g\nWhw7cDhhHEeu4vj43xPRoSmL0Q1H0AeMGEdzHN1hfrjGOykG6coNZYPiYzFSI8u/616bG9f0L9Iz\n/h0RxzyliRDzOYyhAPckfeG5F5YHZ/PjZ5AVi0VtbGyoXq+ncJ10zsP9fl+tVkuDweCCl1gsFtNZ\ndGdnZ6l6tiS9+uqrmpubS4U6MV4ODw/VbrdTeNEF040bN/T2229rcXFR9XpdhUIhKbB2u629vT3d\nu3dPMzMzevLkScaQmp+f19LSUkooBgVot9taXFzUycmJKpWKBoNBotvi4mIKG/V6Pc3MzOi1115L\n8/yjH/0onc3nybiFQiGdNUl4cG5uLhmgJO/Do76+KSIqKSFWNEcoozc/Go10eHiYDJ7hcJjmaW1t\nTaenp8mwdXSMit+sj1hKxStzgyZJ00Oi5+fnk2HrmwngVWpRYaguLi6mUC4omSssxsX8uAHmRReL\nxelZjhwuGxPNY/Ix97oc9MR4D3kyj1xHtsGL/js6KnlIiitHd4b9e6BJ/l3vT146B/c5Yu46iHWI\ngo7Iuctrn3uMEeRuRD1w1kAaY9qG089lE2kgOErufGLwwOcxhMV4oJPrC3fc3ZH2sDN0YcygcU7z\n+L6oe6VsTauo/91gxCn0cLUbUW5MsksTfvJNEcgK31R1WUpPXnshhpTHd71zEVZ1uD2iQ/GIBYdz\n4+4NRwcgKu93RoxKGGPJr7mFy8J3YeJVkn18LnjyGAMhlGcscY+jJdALlMLDA5KSp8nnvMNRN++X\n/x29HxcMbtRFFEy6KIAQxHEO3DBCkTvaFz0p/+19dqHCWHlm5C3vI8aw84H3Jc5PHCd8E2H3+Ky4\nRVk6V5CTySQhPZQHoB0fH6dilpJSjg+KMdJkNBql42Lu3LmTnnl0dKQnT56o1Wpl6NHtdtXr9VSv\n1/Xmm29qc3Mz4wnjkXW7XT18+DDVOPp/2DuTH0mzq+w/MWRmzBE51VzuqnKVwe1u2thmYQMWyF6w\nQLDA8sIbNkjwB/AHIGGJHYIVYo+EkNhhscFCCATGlsCiy91tuqvb1V1DjpEZGWMOMXyL+H43nvdU\nlBeWvq+8yCuVsiLeeN/33nPPPcNzzj33F37hF/SlL31Jq6urqYAm/dzY2FC5XNaHH36oK1euZDzF\ner2uvb091Wq1lMuEAULJBsoZuKL5xV/8RQ2HQ73//vuaTCYJ9ZHm/L2xsZEOBC4UClpfX08hQnYO\nzWYzPXv2LB1hIWVDRqVSSY1GI9EfIwNDxY341dXVZPhtbGxof38/7UwsFova2tpSpVLR+fl5JtQm\nLarST6dTHR0dpbngeCcKi+7v72fyxwjB1ev1tBsSfsMoZbedoyteniKirsViMdHfETCcB+jnaJXn\ns7COfe3BX4TvY30m593xeJxBnTxUFHP5YlglIkTugPv7YijUZQHVvCPiAQ2WKU1kho8d2YaBBdoW\nZQ9oJAaGyxdKYVxcXKT8O+dRaOe7mV3Xca8bjDhI6AqXQZ6H6boMvkA+x5AwNHR9Rx9cF2PEuXEO\nksp6ipEP5i8ada4TPUrjBhyOvId83Rn2fvJcvvPwpfMA7/u5N6QkvaBMXwZdSgs0x8NbXFuWFxTD\nNq6o47OlbK0mKZt8iRHjCjMmuNEQbjAyyak8i78RDaI/XgkY5nQhuAy1c0TGFw0CCBrBxMvQomXh\nMfdoPDfBjR1JGW9gGWLj/WEcFG1bZtz4/EkLxYYR6d4V73PjOyJA0Dl6phGJ8rg9itT77v3x+YyL\nyz2xOD62+t+5cyezHf/s7EzPnz9Xr9dTuVxWo9FIyAN5UYRoff6Y48FgoGKxqPv376dnPnnyRLu7\nuwn5Ojw8TPkH9+/f1+uvv65Go6HpdKqTk5NkLD158kQffvihnjx5onv37unLX/6y7t+/L2muEHZ2\ndlI19OPjY925c0fSPBH90aNHunXrVqrUDa0wtnK5nI6Pj7W1taVOpyNpjsbVarVkeFDQU5qfbVev\n1/Xaa6/pv//7v1UsLorZlkol3bp1K/ULw5FxsKan06n6/b46nU4mFA0iihHCfHLGIHPp/IwSGgwG\nqtfr+pVf+RU9e/ZM0hzlY47q9XpGEDuqM51OVS6X0zgwgAjDFgqFlBs2nU5VrVbVbreTse2hcnLq\n6H9Md5hOF8U13Rlgw4I7DTRQlUKhkEFAkQcYLKxDV2oeuvH6Y6Di1FviPVyjeXV/xkCfXQb4HPr2\nf5fR/P5ljuMymYDMjmkOPk8YQq7YY0jd++IoDzLQHWE3XjC24BXQQ2rJeVjbK5s7qkdfHWFx9Gw2\nW+T9scnHmzujGKM+Du+HX3OH1HUbMgqa+LqIqGHUifH+6NDmcrkMiABN6SM868aiz7ejuq4nYuN7\nf05sl+UPLttlu2yX7bJdtst22X7G9kpDe26dT6fT5MViSTu64DAcno+UtYpBlRwhoUWPDQvUkRKP\nj3v/PLbtFq9XwKbl8/lMbpJDiR5KdCg6wot+HIB7HctCTo5ERTTId6q4FwQ9HEHyQ0OlRTJvDF2B\nxkHrZYX3JL2QC+G7uE5PT3VycpLCMHhMjjbGEgJOD0cOPXbv3owjf+7x8AxHtvg9tOdIlkIheyYi\n4Q7CmiQO0xeeC/+ANEjz3KObN2+muT08PEzb/GezmdbX17W1tZWB3aE7OUXc60m3FxcXKaTV6XT0\nv//7v5IW4Ya9vT3dunVLb731lq5cuSJJCTV9++239d5772k4HGaS+3/9139dv/u7v5sKJHJky7vv\nvqu1tbWU/P2lL31Jn/nMZyRJH3zwge7fv6/BYJBy4Ci6Se5Xr9fT1atX1W63E5JTrVa1vr6eksW7\n3W5CZCaTeXXyUqmUPGmOO8nn8zo8PNTW1lYKAcQzEx0NXl9fT7xEQiweP+ERflsqlZKH7+fVgUav\nr6+r0+mo1+slnqZvIAVnZ2cpfBbLlziy6Ghwp9PJhCdB1yqVik5PT9O5gzTWCYnzhC6LxWLKr5rN\nZpkxeOh7OBxmKqmDiudyucy8QLOzs7OE8pGYzpg8ZSGivJ6rw3rx/CfQW0JunpRNLmbc3eboDQWC\nvTmCwO5EaY6Gx9MTXMYgg+AHR6T8N45Yx7AUuYWMD7lPOJmGjGJtR1SeMReL86K73hfkvW+coTnS\nwljon/fVc9Ocn3h3TIXxXe4uh52OoF2MExnMrj7WKrRhXXiIT8rmspHL5PobuRw3Ifih79DHedTR\nSqcFctfDiVyLvLCsvdLQnpQ95sMhQBSrtFAKhUIhwb8x+ZvJdXjOE3sjZOehwhhLdQibWi0uaIHE\nHZZlDBgXCFRPOEXYAxM7zIjh4VtsvU8ueDx8xBhiXSPot2wbqn+m327YOYQew6YYuzCeL9IIIUfD\nxnPRvB6SH67L2B36J0fA4WJpsUXWF2dcGNGY8t/E3/FMfgvP+UKM74jXeMfR0ZGuXr2q119/XdI8\nFDUYDPTkyROdnp5qdXU1JQejCKCNCylCT4RaXGGdnZ1pY2NDa2trev78earhJM1zj+7fv69f/dVf\n1XQ6rzSPkfX06VOdnJykPJpcLqevfvWrkqTPf/7zms1mevr0qd5++22dnZ0l46XZbOrs7EwfffSR\nfu/3fk/NZlP/8A//IEn6whe+oE6nk0JOBwcHqaZTo9HQ0dFRRily1h6J0iTck0/BtW63m8Jh7O6T\npN3d3RROKxaL6eBinjscDjUYDFIJAEnJsNnc3FQ+n1en00nV2TG6qMWFYvN16gqnXq9nQnT0OeZn\n8F25XM7MG8fHjMfjlEt1cXGhg4ODZEg1Gg3NZrPEL+7soEhWVlaSjIqGg4fEWB+Mp1KpJFlCvpKf\n6chZeYyH44I8/OJj9PUXw0HIUd9F7E7WaDRK8oAdrNxHf2NYhbIVvBfjnfuiLkCeUHrCFbfLNpdR\n0Wnh2fl8PmOc4TRLC8cHg4ISIR4SpJGDhkPmNMHo8qRv+IR3oadimskyYzOmNcTf+mefP5dr5IBJ\n2dQFaEZf/HeM4fT0NPGx76Jz+eppLM67MYyMUQXPY6BBE59znBjo4ZXj+T3Nx+o0cgPvZe2VGFJu\nKPhxEK7Io3XqExCRqlxucVaZx+F5xstivW5tR0TDUQxPgKVv0cCSlLGafXcE1zxnyN/HgliWz8PC\n5bu4CLgvjg/r2xnSx+zjcXQOertgdAZ3uvNbN7b4PcIyGoLQ5PT0NOXJ+JlgESX0RYMQdsOV/0cv\n0xEpWkwe5J+PNyKf0ViibkmM58Ob5OJ87nOfyxSlfP/991UsFlWv15Mz4CgnPFAozI/RQNGurq6m\nBNyoTDY3N1UozI8pYVfXa6+9JmmOgF1cXOjjjz/Ww4cPdXp6qs3NTUlzhGhlZUX7+/t688039cYb\nbyQj4/Hjx/rud7+r6XSqO/83lwuaPn36VLdu3dI3v/lN7e/v6x//8R/1jW98Q9LcsGm323rw4IGe\nPXuWMU6fPn2aFMZsNlO3202Gy7Vr1zQajdTpdFSr1bS5uZnuY+MGByxTiwq6oPAprTAej9ORLRsb\nG1pZWdHBwUE6JNllTqPRyORCuWEH7yJzlhlCppsiAAAgAElEQVRHp6enaWesND+SZjgcZoxCFJ8n\nN5NT6CU/JKXiorncoihhq9VKNAPZJM9NWmz9hj94JgdWk7wPcg3dcIBAIDCUY86Rrx/y8OBfrnkC\nNDxMf1nzvtNvMsluGuA4Igzo2Sx7cDnyHX7yYr8gaM4T0DTumHaEyHWL6wuXc1EOkd8GasEuRu7D\nkJhOF6VFuObRhOiUg+6DfjvKRV4VPOtOcsw34tnQG7kG/VyGOV19jMhT14duaKFH/H7oTdFc1z88\nE7nMvPuGERBR74e3QqGQduBGlM/H7GgVQAY8Fw0kDDDXwe4sxSgI8xeNTm+vxJDyRefGEwsOIsRE\nSAScewMOm7KwIQDKtVAoZGpbSItznJZNoDMU/YzJ3whYfidlk7dBFqLydsjVkRVHY3yxwfguDJyJ\nQKVisjj9w1pflnTpXpsLTf+Nf/YxuDHjY0LYMfaYfI1wyOVyGU8c796NMd7HYojoEYLU++EGpRtW\nbvDCY7lc7oWKu8wdxlUut9ghyli8rAY0Oj091WAw0LVr13Tjxg0NBgO9/fbbie4bGxtpmz50cDQW\nbw5aojDhXe8Hc12r1fTJJ5/o/Pxct2/fVrVaTdd2dnb08OFDTSYTXb9+XaVSKSnojz/+WDdu3NDX\nvvY1ra2t6dGjR/rP//zPNI5r166p1WolhUGI7jd+4zf0rW99S3//93+v7373u/qTP/kT7e7uSpL+\n53/+R7/1W7+ld955J6Ed+/v7iRd7vV5ao71eL5UpYIciCE+3281URB+NRnr27JmeP3+eCoLCa5xD\nRkI1nq80RwQ3Nzd1dHSUwtwo2lqtpp2dHTUajYSaOW+AjDC/8CL1jGazWapF5UphMBikAqNukGO0\n+G4q5p4xsJbX1tZSiI534Siys0uaG1nVajUpCUcBMJ4o0YCShm7w/draWjJEJaVaZMhi53NQEyqF\nQyuvTZbL5ZJR5sVqmRsfj69TKuXDb27YQB8pu8MQtAqZ6SHBqAAdxcUQc+XqSKEjPNHpXCYPJWXk\nKCEsGmvOFX5Ev/nnCJCXoaBPzGGlUkl99eZhQeYQw8+NTcaGDHLH0ekWoy3QOqblsBbhAV8zfM89\nvhOU+fRd6ZGu3Of6D7p4H/xengk/eckU6OHz7WOHHu5c857YP2+vLEcKoiwLq0WDgYFAvBgy4nvy\nVuKxF75N0mE/mD8iHR4qwrr1PANX2tGi90XonoiP0WP9/kynhU+ab8V2rwkjjlwW6Me1WD/HFx6M\n6WHRZdtdY3/IEfJxOZ1h3rgwHJGjyKDniTCH0cPA21kG3XrozpWTt2VehKN9IJnRMOL50+ni0M+4\nIJ3X1tbWdOvWLa2ururRo0fa39/PFGVkQeN1RzqzgKMB6AYkixtl9eTJExUKBX3uc59ToVDQ3t6e\nnj59mvrIrrZcLqf9/f2kMH/t135Nt2/f1s7Ojr73ve9pOp2m/CnCUCjMbrerb33rW5Kk3/7t39af\n/dmf6d///d/1V3/1V/r444/1N3/zN5KkP/iDP9CjR480HA61sbGhZ8+epRAV5R7y+bwGg4FarVai\nwf7+fkIk9/f3dXR0lJn7Xq+XQngHBwfpGjTAYMD5cAMUob++vq5+v58MtGazqZOTkwza4sYLeVCg\nEq6EqUXDuue+2Wym27dvq9frqdvtpp190jxnZjAYaDweJ2QBg4/SBuwgdKeH/8OH7kB2u12trKyo\n1WqpXC6nqunch+HjRUzhaVc+VL1n7B5W8xpHKPLodNJXeHOZg0LlfMK4HrpHTniBV+aYtYK+cHnq\naFihUMgYWYwTGrgSZh25AcUzI52Q/VK2sCS0c+cUA8KNQP4SMuU3NJ7hzqwbM/QD3oi7z0A6vV/M\nHX1mbnxsjrAs251MP9zIis4ehjbXYmjS6Yh+ZSy+0xP5zbuiDOc7l+PIRw8lRpmOzPR8Yzcsya1y\nnec6OxpuPifL2isL7TEZLGoEYYxRSwvhxtlfJD1KWebHy3KPyT0c95JcIUe0xicFyxwlxLlKwPoR\nDnYl6KhaDAM6WgVKgzfoC4p+OJ2cjv7u+NcZwuF13unM6McOxMR1F2DRA4ihMxSPw9vMDQYL8+9j\n8WNS/LkonmXeAM9yGNdDdD5OFxhOP/rM4naaYaS6t+ILdDweJ8V9/fp1HRwc6OHDhyoUCtrY2Ejo\nEzTF0Ed5wFN4UCAAzvvkRfFdo9FI5QRarZZu3ryp4+Njvf/++8rlckmx8//pdKrd3V1tbGyksF+x\nWNQ///M/q9fraWNjI+M4tFqtlM8wnU71R3/0R3rw4IEk6Y//+I/1ox/9SH/xF3+hZ8+e6a//+q/1\n+7//+5KkDz/8UO+9956+8pWv6O2339bW1lYydp4/f65Go5EEHLlA0tzIGg6H6YgVVxgkom5sbGhj\nY0P5fD55+L1eT+vr67p7926iLUYxvEgpA451oQDqeDzW3bt3M0fgYNhgTLhj4or24uIi5SvhKdPX\nfr+fkC/QHknp2e5A+FrL5/NJbrkAZ7s9hokbMl6Elf67AYKhByLtRgNhSWlxdBPjm06nKfQYz7v0\ndRBReBQhChX6QEOQiIh+gJaRguGKD345OztTv9/PGEu+5jGaoTdH/0A3KZuT69EEeIXmBkFUnBhC\nUbG7XIFHvHAqSCn9cb3DPw+1SXO5F+WNn20XoyLQmbF6eM7LSsBv9N91QnRSlzn76F9Pcse4ch51\n3c24HLSQFuUu6K+/zw1PnAgaBqvrX+83ujXqY5A5N6Kcdq7vI2DhdFrWLssfXLbLdtku22W7bJft\nsv2M7ZUgUliCbp3iNYFouAWI93F+fp62N2Kd492BuPhZbngwy5IJHdqN+TVYrnjPHtfmXVjYETIF\nPWMc7iFLCyTK0SqPOwOn+xjc+vZ3OgoTz9SCbn4gaYSjl4WzeG5EBX2M3tyL4V63+B2OxrMA/o9J\nooRo4tEx7lG4Z+zXPCwW++noEvSO6JXTlOeBTrjXzfyTwEyS8s7Ojvb29jLFMz1/rFCY747ifuaE\n/uAF4ZVznyNlIBR+9t2jR490cnKiarWaQSXW19dTmOn+/fuq1WqpCvfTp0/VaDQySe940OThNBoN\nfeMb31CpVNKf/umfSpqXOPj2t7+t4XCoP//zP9c3v/nNRLfvfe97+trXvqZ33nlH5+fnunXrVkKA\nHKE8Pz/X48ePE6pGAvx4PE67AkHqKEwLD5OHxNi73a5+8pOfaHt7OyGZ9Ofk5CQV+Mzl5gnc5GV5\nflIul1Oj0UgIzebmpkajUUKcCU1ISrk8vuPHk61Z05RXIJ+L9z958iSFOZFfoJTINZdX0gKVIiwM\nD29tbaVjacitol9eSBNe8iTtQqHwwi5AGmOH7r6OkDOeb0VzWRFRVc9dk5RBeZERyE5kIHRzOeso\nPsiCI3ye+wId8/l8Ji3DUzG8f3x22kfaxFQNv854GQPP9Dyf09PTlNrA3PNe0FRkgqPkUnYjE9f5\nLSFhxgWa4/98bqABOiGmpiwLZblczOfzmXQLDkFnvTqaya5UEGKQbqc7esxRLr4HQZUWu279uiNv\nfg863PnCx4T+8eiVz3HcrPDT8qOkV2RIufJlkMPhME0GDBljtyROrq2tZeKzENNL1fu7mHQ3LNxQ\nwGjysFWcGD6fnp6mEIxPFs1/G5nbf+Phqphs579FUHreSIz3swBdSEh6IW/AjUdpkdPEAqL5+COM\nDa15tzMqxgl0cUOCcfpvvDHv8fwvp+WyPDeO2BgOh5n8khj/j2OAVvCiHx5LGJbvfacUeRMbGxtq\nNBop2XowGKjRaKT+YxQ6HTmjjbmPOXGE+1yIorhIZL97927amfbs2TOVy+VkvG1tbaWSCvzmzTff\n1Onpqd55553Ulxs3bqR+oqD5PBgMtL29rTfffFOdTkff+c539MEHH0iS/vAP/1ArKyv69re/rS98\n4Qu6deuW/u7v/k6S9JWvfEW7u7t6/PixfvM3f1NHR0fp3D/4YDwe691339XBwUHKycKIWFlZUbfb\nTYnqtEqlkpKcY2i+VCqp2+2q1+vp+vXrKdfIebHb7SaDgOT3arWaDEmMNuQC4eVer6dKpaJarZaZ\n07W1NY1GIx0fH2cMm36/r9lsfuTH2dmZnjx5koR0uVzW9va2Pv/5z2tnZ0fD4TDlJRGy4kxJ8p2k\nxWn1s9ksGZB+Dh/H/GCYMIbJZL5V3r9jnITJ4H3Pg2Ke2CEa1y5yK4aHoI3vMvPrxWJRlUolY9Dx\nbIxjHEZPZu/3+8mwi3k+KF3yc8iTYhzxnLooT1z2I08wSJCTHpr3vFf0k+8ixxDwv9yHwR5DRO4I\ne4gLesKXXuaBFhV+TMXAeVwWhowy3Pvq+UfRAPH8KE/LYcee3xv1Hc/w0C0lLxyYcJ2PY4EO5zmk\nLPBstyegacy387HDO17Z3Q3ISNNlhmVsr2zXnpStv+ELJSZ5xTONJGWYOE4awrRer6d8D1pMLoOB\nx+NxJmeFiUBxxnipnx0U4/7RG+A7FDv3eZ4AAsHf7+9zD8SvwdC5XC4lwtKcGZbllNEiquQGD/31\nEhAxjyIKBt7jVj39Jt7vi8YRMBdk9CUaQd5XDKx4FIYbmC4EoEukQ8yLw+tC2UiLwohXrlxRo9HQ\nwcFBJqeBfBMEsOemMEaMJTeynD88l4VxdLtd3b17V9euXdN7772XQXNASTBGHj16JGm+Vl5//XXt\n7u5qd3c3UyQQ5AoBPxqNUiL2nTt39ODBA/X7ff3Hf/yHnj17pt/5nd9J1/7yL/9S169f15tvvqnv\nfOc7+tznPpf6//DhQ73xxhs6OTnRzs5O8iA5Eqjdbms2mydls0ZJRMaAmE6nqSAnhTEvLi7UarW0\ntraWntntdjUej1Wr1dTv99Vut9VutxMK5Nv0e72eNjc3kzGC0ba9va3pdJpKL0jz3X7Xrl1L/cbQ\nZC7I4dra2tLe3l7GUD84OEhn+l25ciXN4fvvv68PPvhADx480L1793R2dpYMnM3NTe3u7qbSDhgT\n9NNRCd8Qsrq6mg5HrlQqGfQon8+ng4pBqd2o4zid4+NjnZ2dZersYEBFNNqT3vk/yJb0Yg0eR5ZY\nRy4naOS2cY8jwOTMYpThREtzxxsjwfMNpey5psj1iLogR1zWggxx3RVxjAa4HnG6k4PFvHFckiPL\nnm9K8j2ffe6X0Z3vkNluUDl9mUvmE1SOunTuTLreoUXD09HSaEiR2+g6ItbfQh56Aj/vH4/HydCO\neg9ZHDdNoH+hgzvc3D+dTjMlStBryN9icVH6g+8w6N0Ai1GpZe2Vlj+I4RWuRUXnqAGLzWvwuAUu\nLWpTsf2XZ7higzFhRkdPllm3fOdIFCG4yMA8fxk0SovhQv/snoB7ThgmnlQak/d8sfEXYzPC1u41\nLoO8uceNHlAh3w0TaUMfPawHfV1A0JgHr3zuwg2FT58iDX1B+wJ2YeOhVH7rnlNMvuSze+ydTidV\n6X769GnGaEeQubfufY0GvNPGw7IYDyAW0+lUn/3sZ1UsFvWjH/1I0+lUzWYzzTe0nkwmevz4cUIz\n3nrrLb333nvq9/spMdlDJqur8wN4B4OBhsNhMoi2t7fVbrf16NEj7e3t6Ytf/KLeeOMNSdLf/u3f\nam1tTV//+tf1gx/8QJubmwlZ+qd/+ifdu3dPzWZTDx8+TMpaUjIcqMFFQry0qMItKRUXZQx4rCAk\nHhIC3cNgpjwA1eLxkCn7wMHP0hyRGg6HaSdlt9tN7yyXyzo+PtbVq1d1eHiok5OTNEaQO0K+t2/f\nTkbteDw/Y293d1ez2UytViuhg/fv31er1dInn3yijz76SJ/61KdSSLjb7WpzczOz08/TFigLUijM\na10x/nK5rFarlRwZ5328fJwLr7JObS2cQd/peHp6mupcRQTEDauIRjlf41h6oV5kJE4U5+5J86Kj\n7BJlDL7rmjA6ss+RWl/7ntYA8oVeYFMB64n+8zvWBfzkCJOPzZvLa5ddpJa4MTgcDtVsNl9A/3Gc\n6JPTOK5VeIwxOMpLf6IRQbiU58R5ZAwxnOYOfkykx9gEZaQ/k8n8ZAJQ0KjPXb5GwGFlZXHWbTSy\n3CDEWGbeWPfuJMf3+Q5RjHtHv1zPoNfdLuEZUWbH9soqm0dL0hX9spikI0Su6JzY0ROKjOKK0xEC\nD7FIi91Cjib5IvVn0C9+xyQsM46WhQ5p7hksM3r4PsKV/r2jeuQp+S4VaOL3+dg9bOIonOcQeUgs\nMq1/Zp6iUcg/wmfQ2+c0jh20TVKGmemLe2a+gOGxlwl+DKzoeUbPmRDVzZs3Va1W9cEHH6R3RQ+K\nRUeffEwYvRFVBfGDn3q9XurPF7/4Rc1mM73zzjva2trKIBSOmh0dHanVaqVK6j/84Q/V7XaT8mW3\nmzQvHYASPT4+1oMHDxKS0+/39fjxY3U6HX3qU5/SV7/6Vf3Lv/yLpHmpgq9+9aupQvqDBw/03e9+\nV9I8J+vq1av66KOPMiUBpDk6BGKCcQA6JM2VKYLSqxSPx2Ntb2+nWkJuEJDDw/b9brer0WiUlDCo\nFoaJO0r9fl+NRkPD4VCtViv1T1oYS4TN2FUoKRlm7NDL5Ra7JPFor1+/rk6nk3LmJOnq1asqFou6\nevWqjo+PtbOzk/qC3Nrc3NT29rZ2dnYydANtpWYUeV4gV9DLHQjqDFG5HVROUqasBQg265ADo5FV\njjCwnjwC4IYM9HHD3u9HniBzuIahkM/nU/6YI8cg3yC4zD8GCv33d6EDYkRAenF3lhtuoGPIYnda\nI8LMe7gPQ593sLbhFxwC1xcun5BD/szZbH6qA31y586d+dhXnsc15sr5zcfE/GPwujykP+gk3xEX\n5eR4PFan01G1Ws2EmT186cgZzwIJch3kBZZdn8Zn4rC6QQSvcJ/vlnd+nkwmyThznQa/RNQxpqN4\ne2XlDyCCw5yxPIDnO0Cs6XSa8iUkJRgVD8ULcEmLMvFskY6Wsof3YBp+70iHM4ujDTG2CqNGQ9DD\nhT7JjNUNymhgRu/DFzPCC7pwzfMFYojN+wSNovfinmVMAoyfPV/Ncx+cBj5u5t29S5KcHZWSlHJS\nMAhdkEe0zY3VZWGz+B1CPdLbjUEQA2leGuD999/P5DxEg9iFAzShRZ6Iwh0Pazwe60tf+pKkeS7M\nkydPtLW1lcJ4sTLwYDBQtVrVgwcP9MMf/lCSUuhqNpsl4UZ+DTRrt9u6d++eSqVS2jp+cnKi4XCo\ncrmsr3/96/rggw/03nvvSZrXn5pOp3r27Jm+/vWv6/vf/34aAxXNz8/Pk3HBPHEGHWMcDocJrSHJ\nmi3VIEXS4igXkuFdkeEtr66uamtrS5VKRfv7+5nt7lTNxmjHWPTjSjDGIk2RB2tra8kIqdfrySBY\nXV1NOVf09fDwUO12W81mU81mM4VLyaeqVqup6CjPLJfLKhQKOjk5Ub1e12c+85lkZBJyrNfrqYK5\nG2Ae/nWnzfO7CP8hW6lnlc/Pa3p5xXzyqtzQcCSBNQOy5A6WK0hksstMR/ZdTrhMZDweappOp+lY\nFyrHS9lcUfghIite/HaZE8tadz3jjpAbRJ5CAY2cNm4cehgOQ5CNJg4SOAoS9QblVTwVJKJVjq7x\nGZryvRsb3lx/xc0DzI8bL9CYvkqLvDsMOXdefW6YR57reh4dwxp1MIUwOmkTPrf8defZxxYNVmlx\nQoqHdmkOHCwDCCL9Yrssf3DZLttlu2yX7bJdtsv2M7ZXVtl8GWrjiINbgVTFBaJ3OJaQAYnP7pnF\nEJtbqR4Lx2r30Foul8skn9PcYvZ3MC7QMjwNt3KxxuO4sY6BlB1B8uQ3fxcNZATvI3p6NJ4f7/dd\nHQ638z7y0TxEGUNmEblb5mW6hxI9JLy22WyWPE6Hos/PzzUYDDIesT8rzoc394aW3edQcPxMlW4K\nWT558iSTs+C8Bv1JwPV58PHQV/f2oif8xS9+MXOcy9bWVvJw3aMDEi+Xy7p586Z+/OMfp/fdvHlT\nhUIhJXrX6/V0rVKp6OTkRJubm6rVahoOh2lMnU5HpVJJv/zLv6xOp6Pvfe97mfypf/3Xf9WXv/xl\nffLJJzo4OEjI2f7+fgoXMW+OVnCeGuE5Txrm+Bj6QLL3ZDLRcDhULpdLOUfOh6xfkJ5isZjQHBLq\nz87O0tE5eN4cIAxd2BXGc0GqSqWS1tbW0lxMJpPMkS6UmJDmSepbW1sqlUpqt9taXV3NHD4MsrCy\nsqJyuZzkFzvTrl+/rna7rcPDQ92+fVvSHAE9OTlJfEGIS1qUqWAHs6+vUqmUDnFutVoajUYJjRuN\nRjo4OEi7pOE/SWnrOghSDPGQq+K5avGsPZAFR4DjX/f2nffz+XwqeAwP+fu9ErWXGYj5jvAFqFQM\n7aND6AutUChk5sZD9/TH5aj3k7BgRHeQ2WxUqFarS1F7T0+QssnmHh3gt9AceevrLSLvUfbGtJoY\nxiPfzHW0R2u43xP8QdCYT57tRTddVvp9Ple+KSqmMUQd5FEBD+F6mDtGB3yXYNSPjjJ6WNT56mXt\nlZU/gDBO3Bh2g6h+ICeJyIR+/MDbmD8VQ3A82/sQ4VZvL1O8LNxlzEainYcguD/C5MsYnmd6EqFD\nzX4tQpH0j3e4Eo+hPR87zO+5O24oOUPD7AguLzHgRumyBUVfCYH63DBXlUolIzDJn+K5KEDnGQwf\nFrP3xQ0pp7/f799Np/NkShKU7927pydPnqSx+1lvMf7uysWhblrcAeSKhVyKN998U+fn53r33Xcl\nzUNG5+fnScE5bYC2b926pQ8//FDn5+cphwbnA+PDw2nklty/f18ffPCB1tfXdXR0lPp55coV1Wo1\n/du//ZsqlYo+/elPS5IePnyou3fvajKZ6Mc//nHaoSdJ7XZb5XJZs9niuAhXeoTa6DMHVhNGG41G\nGgwGunPnTqakwMXFhba3t9OOLYTb4eGhisViynUqFAqZ0gmNRiPRmWNYaLPZ/EBfHB/CB8wda2w0\nGqlarWaMEDe0kT3S/NBmwqmNRkOHh4eJNzwHaDqdqlKppDVVrVZTWPXevXvqdDqJptVqVdevX8/0\nx3NvCoVC4lMMTuaekLi0yH2Cv90YdMeA61F2Od3cGXTjhdCay0xf36w/N5r8uW4MeZ4Kn70MgrQI\n7eFM+Dp0w4O+esiXtYeydHmATGNMbgTCq/CwyxqMqWVhTYwCjC36wkYAdxBdTyH7kc2u5H1Ti4+b\nv8ucTil7ogXzj0GELEWu+fzTb88V4n2UF4I2uVwuc1KAO9yeG+ty2R1JrvkufU89ic6pAwhuC6DX\nYx6w/3Vn1sPB6AGnWdyJmKHrS6/8P2wwckQsmCQG4szIonHjQXpx6340ijze7sYFBoDXIHFjhAXF\nPW61x91+rhCZnKi0Yx5PFCbu/TjK4wvK+0FjXCgwT5zkOh4L+QL+DsYRc4ig9TLjww1UN+IcTfO8\nKf76HPr4PTmQPjoiR/9Qeo4IuZBywcH7lnktywS58wwIzb179/T8+fOk6La2thKdoreKkPBFHnPf\naFEAcqbc66+/rkKhoB/84AdJebvh6YUqec7du3d1eHio8XismzdvpmeSE4ji8x1fBwcHeuONN1Ji\nOFvppbmCvnLlih4+fKher6e33noroS6TyUQ3btzQw4cP9dprr+n09DQZYHiWKysrKadtmSeHseve\nHjzMQcAIPpQgOUTNZjMhWSAXrLVicX4+niMN1J2aTCapxAJ8ShK285GkVDcKw2o0GqX/s7vMnTfy\nwVqtlnq9ng4PD7WysqKNjY00XycnJyoW5/WyUO6u2JvNZupLvV5PtCF513cAxvXN9m2vBcZ2f0/M\ndqXA4dmerM77kBUYf4668Bd55LuzoKPLRZrnvsRcoLgjzWU0vH92dqbBYJCS0eGbXC6X+uD9cBSK\nZ7BmPB+Wa8wvypq/GO8+BpwEd8ygE0ZCPAbGnWaQWcbqDpfzpBs60MONE9/p7JuJGIcbi06faIDF\nXXkuN7z5OpXmfOhFfHluRMdcLtMvT4BnLPCeG98+p8yH85rTBqfBDVmPPjEGR8B8PP77ZXrWc+CW\ntVeGSEVvgAXkW0edGV3hRyKjLCCs38dnRzWkxena7k24QeCWa9z26My3bBeVlD1lm7G4Be5j94Xp\nwp37CFPAPP5MlDfM78wArdzDdCUMYhQRK198LlwYFwvXUQenB3PrNM3n52UrKpVK8phdgHk9Ga+H\nxftZIA7/updTrVYzULPvPooo309DLkejkVZWVnT//n3t7u7q8PAwGTXQHW/YFx+Cwo1Fn2P/nRtY\n0jw5eHt7W+vr6/qv//ovbW5upi3ppVLphSR7DJvbt2/r9PRU+/v7KREaYYvRCXKyurqaDjS+cuVK\nQoCuX7+uZ8+epZDYm2++qbOzMx0fH+v69etqNpv6/ve/n64dHByoXq+rUCgkowEa0uKacUMfGmAA\nsOWeMFi/33/hvEBq7TjcXiqVUlFK+Mnf4d5sqVTKOFigVawrL2EC33O/8wbPZoeRG4icrYhjcXJy\nksbhSeoxZOGOXj6fz5SpINyJYRQNF+hHKBKaEkpl5yFrh2u9Xi8ZZigx+uBJ5DH04fNHX52/HVXH\nOZWyhy+jbL1+IPxBzS760+l00tgpdBpDcvTLjRU3eHh3RDPcEXeZ7YiYh39Ys/TXHUhkgRcVdsMN\nA4vnxffR2OggLeQo7/czYx3hczr47js3FrlO8wR8N5iQrchtvyfSIqLxrgscIEGHOGrkKB86Cx0T\n3+lRKnfM/LcYcjRkL7rRjTPGEcONcWefyzN3NF7WXokh5bUcfCHCrExU9PidgSEwQoJ7MVKkrDED\n8Vwo+kKIMK4rSW/AxChmRzdAYpYJWpjbJ9PREzfc/L0oAIQ0O7ekbL4Mz6E5I/g1/42UPc7BLXdn\nVheqvgjZeh9RN1AFjDXeG70NLHyUi0PKTgMgfebPCwhGQy1CvPSDvkM3N8jds5nNZvr0pz+tw8ND\n7e/vp5pN3BcNYV9cLuigqxfCg095N7h5EYgAACAASURBVAJ1Npvp/v37+vDDDzNFJ6UFbC5lC/FJ\n87DYxx9/nN5BCQBJaafQxsaGisWi2u12osPGxobeeecdffrTn9bR0ZGePn2awnflclnvvvuu6vW6\nbt26lXK0GNdgMFChUNDx8XEmhOEoXS6XyxTQY35Resw1fW61WklJ+nE1w+FQa2trCVFgVx80A1Fp\nNpsql8sZR4bPGOZxe/x0Ok15Y27w0r/JZFHh28Pl5Hyw9v1929vbOjk5SXPLfZubmzo6Okr3E7Zg\nDuEVvHx4Gjq1Wq1kFDgPu7Ls9XqZHLBGo6GjoyO12+2UIyYplT5ANnqxSj+6hHxTmq8XZFUMdUwm\nk4yj5buAMZDimnGEl3XiyIs7Tr57ixAlhqTLU69L5DmW3hwVZ57YNey5tl4MmvfyrmhE0hgn1wjf\nebqCpFTSwvWZI4AvMxzQhS4j/fnRWYPHoY3f4wYvOtL1pNPL3+3hO0/vIBfR14zfE1EgRyrd8Y7o\nk/OTG2mML6YR4GC7bIeOrmNdr7GmPELi9/209koMKZhw2YQxaIQZjQlwr0b66VYiSssZMoYb6EdM\njsQTjedROVEjyuXP82R2vnfDMYYgY/NFkM8vtoL6AsLLZcGzCBi709SVF/1B0CBsXIBH9MuFoguh\naGTF0KnDq87sLgQiIuCookOxCGoXfGw2YDyM2yvfYlBFqNi3NNNviizu7e2lc9pi0iH/j4YEig6e\nc6FRqVQS7fC8UQpvvfWWTk5OdHJyoq2trWQk8B4EXavV0sXFhe7cuZPmFRqBQvgRIlQ7n0zmSdv3\n7t2TNA/tEQra2dlJuTjSPNfn7OxMt2/f1mw20+HhoX7pl35J0hwhwHhAWbrgo5SAI5LSXGHMZrOE\nrk2n0xRKrNVqWltb0+7ubtrm7gUpye0CpeQaBoejDh72nc1m6XiZwWCgs7OzDGKzurqaSkdg4DCP\nlUolHaPi4XI3yN3JcF5cX19Xv99PeUjSHGVbX1/XYDBIxSF97YNMQBuUHc/wyt/OfygFEDaMTLz7\narWqvb09HR0dJb7Y2NhIhUORCY7EYwCyNjyMzrPpI8Yt9zImHB+XffSZe11JTSaTdMTOZLKowo1S\nIxyGUc08YQT6OqOvhHXdEeU+D295or4jI8gLl3/+O6cJfXUEKeonrjs/zWaLcDbPdic56hVH1Tz6\nAO94OG1Z/7zvL4tGuA70cjRuzDnYIS3yWD0czDUHMBiDR1uYf3+u98V1lDd4BX0RK9vzzmhfeFgx\njpfv/cgb6B2d+9guyx9ctst22S7bZbtsl+2y/YztlVU2jxCee0ae8ChlD98FtoyICxatW+7ErTmC\nw98LUuHImG//pzmUzX1StsK2X4vwqo8Ja53+R2iWPvlz/C/hBqzpGJaI4/PcIIctoQ395HvfXovn\nEMNi9NNDavwmhrYcoSKUhyflYUlCL+4BuKfg+R2eqOyJxqB9HtbFc4vevHs/oBnb29vpHQcHB0u3\n8ft5WXHuCWeBmhGmdJrwDEp13Lp1S9Icefjoo4/UarWSt+xhT1AuttQTbvzJT36icrmcEIBGo5HJ\nk9ja2tJgMND+/r7u3r2b+vrJJ5/ozp07yft68OBB4rPHjx9re3tbtVpNH374oT71qU+lZ3Y6nZSz\nE7fcxzCuo4NsCyfk6/Sr1+vq9XrK5/MpCdoriROWAXXx9QBKRYmDZWdNeuVveN5zU0DWfF2Anjlq\nwTvdo/UxwrP5/GILP3PY7XZTkjo08PCG78bz0hkXFxfp3fBWDLGAHvmZYp4bsrW1pclkcXjv0dFR\nJuxdq9WSbGR8oC9OTx+foz2OcEgvljOAN1xOsP59nnwnLs9knhwVhjfOz89TtCDKZvpWqVQyPMl9\ny/oIvaUsEsh1wssuXz3E5DlMzoPwu8u0uGvP86J4vufUkccHDVyuQEvnN8aFXPcQGjohFsWkr85f\nThufb/76fVHWRxntNHfd47zq+sYjHvyW98XcMH+mp0wwdp8n15mOZMYcLQ8zgnzH/ERvr8SQ8h1d\ncRFIC+jSt8y74OE7aWHoOKzq0CawJzF6h04hJIvCDRImF8HihoQrN4dG3QCKCtQhzBgW8LFFmFrK\nVgYmLwQ6OoTpBp/XQpEWoQeHpRmz00GaQ/OEA3kXjXF435ctcIzYmOdGDoX30YUJRkSEyLnmAgD6\nR6bntx66cKPWoenJZJIOuJWkZ8+eZcJGkUfjllzfYcUYWegxGZd+wVMksR8dHWXqKPEbxkjezunp\nqba2ttKRNZyLRm0iwlTSPAx4dHSUduzV63X95Cc/kTRPtkbJrKysqFaraXd3N/HU5uamRqORzs7O\ntLW1lZQXOSrQLcLt0MHHyBwyFxgSJNNj5PgxICh96j35jjP+3+v10vEZGC4YFdJi7RNK8zwpDHKc\nLMoy0Nh55ZtcmFsfpysMd+JwNLiv0Wik0BUHX8d1yvMuLi4y9Z2oiB0ru0fnLobYp9Oper2eisWi\ntre3085TjrdhPUAH5gLF48+D9h5Cmk6nGUOD8UYFSH+QNR6Sgaaz2SwdsoyM452e9xjrA/EvGgcx\nvQF6QmdoGEO2LwurOU0Zq8tL1gP3xfvduWYt8EyqydN884anH/h46KOHTz3XCefAU1RiiobLQ5dR\nUT/Q+J6wrBsd5Hl5LhLXyAEkdcT1NHzkaQIxYR3+WOYIw3PeF9dlnvLB+2JOqxvfDqp4SkVMi1nW\nXllBTppPJpPOAD2BDSJwDWF7cnKScitoy4jlRI8txpZ9YlDIEUFzgUJjDJ5UySJmXPw/9sMXZrSw\nfSzkSkmLAzLph48Pj8obAkpa1KGBrhg+Uva0dvdupGws2WlHczo5muRekRt+cVwkkMaF43zCczyP\niMKEPj6nSUycxfjGsGMbvyfLgti4gRPzASIqw/MRLNzruV6j0Ug3b95M4z8+Pk5b9xmrJyMjgNip\ntrOzk/oKz7I13Hdjce3q1avqdrsZgyiXy6Xjb4bDYRp/o9HQ6uqqRqORms1mJvkXI8gNKDd0oZuj\neIzBd8JGpKNUKiX+j3mTXD87O0tFKOlLsTgvicF9nuCNAHdHweeH30JrLy3hAtw9feiKQeG86Dtk\nURaeqLy2tpbZYUfyMwYt8gyeoy8Y46PRKPGC96VQKGROrpfmMhEniPIJbBhot9tJRkUv3BW2O45c\nQyYPBoNk4CPfQP5YN45u0Ffe58aBK074wx0txo+hRJ6bNF9vw+Ew7fzkWezYov+e78J8npycpHw+\nrvmuO+acvjAud64iKs+8R+TCjVMvRcH7iDQgk2Jz3cD7MKIwUKKRwDhfZtS50euyHP70fDTGH9He\nSBvWPQgaY+Qd0MF1lDs/HulgvplLd2Kcv3wOpMWOc1+Xy5x+tyfoCzTxCBfXvIbdsvZKQ3tu6cXd\nBK6EmAg8TJ/g4XCYMXQgspQtyhhRHkcOHE7m/dy/bCJZ8PxzFIb+LjOy/H73ylwASNmKt7wbpnHF\nxbsi4sL30WB1SN3hVA8n0D83pnxuXDHRNz+VO9IuevHU6IlG1mg0SoUKfRzMk/fXkTNpAcV7UiJC\n3oWW0wIkBGHMOyPKiHJgLugTgtHnyeuXOALDc+k7ioqE6/X19QzShUEiLUI6w+EwheMcquaZ0+k0\nHXIrzROca7WaxuN5HZ79/f2MwphMJnr8+LE++9nPZvh0ZWVFpVJJ+/v7L6CR7D6KiCT8Np1Ok8Bx\nA9SdDmjjYV0PZTnPjsdjVSqVZMBGfmJdIDh9nUIznCw3bHgnOykdJSA04LtI4e+oXFxuuFCPaxEn\nhT4Ui8VUGoECq6urqzo4OFCz2cwkW/NsP/8PmoIsxdpc1WpV+/v7ms1mKfSLAVIqlXRwcJAcRJeN\njrS6rJAWu+TYVYkh62iOe/XR4KbP/jze5aGfaNjRT1BZaMM7MKCiQeTP9rICIJw44258L5PRjuzT\nR+R/lK9uGMT5j6g/9zha5rqEviOjXCYh1/ykj9hcTi4zxDwFwOcdvneeg97IIubI5wnDqlgsvuDU\nevgbow8a8xzGG41vB1aWNfiA++KcuFHtCJ3rWH7rPOm8T799TLG9EkMKgRhDGFL2AE4fjCskFoc0\nF+7Ao0yGGzYQE4+I5kwTIUf3ZD1u6/dy3YVDRE5cKLjRxjtdCHOPf/b3oTSXTaaH0HyxuTCEYXkH\nIRpXdm4EITToq3sN0NIFJ9d8oU0mLx5QyTEi5MPQN1AB3hm9OgS3P8/r9rA4oA87thxxcwMEmuLR\neR4MNMUT8flw/nOkwo0anzdoQ60jDH88aUnp2AjWA8KB/pRKpRSe6/V66RBlpwUoE6hTPp9Xt9vV\n1taWLi4u1Ol0tL6+LmluEHS73bQGOaJFkprNptrtdjoepd1uL0UjoSW0ccdhNptljDDPYcPo9XxE\n+NBzqWgYp74e+X46XRwPE50dr6lVr9cz3j4CFT731AGQL5TCbDbLbJ3HSKTPUUn4Goc28F+5XE5H\nwmDYOILdbDY1Ho/1/Pnz1O9ms5mEPmFIaEwlefoCrw2HQ127dk29Xk/dble5XC6NgWNjjo+P03yx\ntqE1CBlHN0lKBpTvzIryxRVWlFWsaTcOeLcrSoyjOE8R5ZTmiHS/31+6sxraeuiGayhXdpnSQMBc\njsXwliOnNJANL8Pj6wNDD55wOvGswWCQdglDT/gqAgHIK48wuG5DntJ/50WniecK0Z+IskeECF3j\n78aRWVtbU6PRSOgzzyT1wlMdoDe6gntcfsbUgEg315sxBSSO26/x1yM40UFzZ8hPqnhZe6XlD9wC\ndQPDPXIpm38SjY6Li4t0ijmEcwKw6FHEoBggMRgcLohiMqgbGQgaFJ+HYaKFLr2YyBe3k0rZHLDo\nDbrFjoewzDr3cBTPj6iYK2j3KmAoNzSWbTf1FvNHeD7IAO+KaN3p6akODw9TWBIa8Xs8dubZw1fQ\n2o1oQjoYEs5PnmPnAhMDCQ/Lx+rJxHHufeMCvOiekAtdFB/0phwBZQcoBkl/oNtoNMoYhBge165d\n02w2S2evMccIae8j/cEgoCo4vxkOhyoWi7px40ZSmih2aLK5uZnyetyD9NwiP1sMYe/oqKPNGJ7c\nB51d2KOkl9UXQzjG0DpJyswp/MZ6psq6o1nUawLddofHc29AxRkHiBzKxBFY5AKKOOZVephvOp2m\nY2BKpVIqJkvtK96HcsV4435pgVqDTLlRXy6Xtbe3p83NTe3t7cnb6upqOoeRzQMuSz106coryiqQ\nHEdrQY4ieuZORlRS0cF1g4z3ufNGMdp2u61Op6PRaJRxlnlmRCFidIB5jAoSJA0Z5nlH7oiC2sOn\nHtaK6A9/oYPzDHoE4xyk0hGTZeF0nsfv3HiLzq0DFjTPeVtWlw955AYRNOE7z0X2DUNuQCN7+W10\nMNwg8ve5PmOtRUTT5yKGbplfv+bhd3Sj0wlegHfd+I4GZmyX5Q8u22W7bJftsl22y3bZfsb2ShAp\noLqYFOiwZ0ycc2/ALWyPdQNpxriztAjnYFX6MSigU25p8zy8ZYfwHRWhD9LCK/XYrKMnPt6Xhe88\n74P3ee7Fsti8o27QiERVYFCq9Triwnex7IN/F0OGeD+OaMUwnPctzkOlUklQNqEm3zkEZO0hSsYH\nyuR5GdDXUR3eTZ7U2dmZhsNhJpQYQ8Ee3iB0Bz0dPYE+8EnMI8Lrgda+fd3REZAp+u5ek3t0QN6z\n2Uz9fj+TX8Q6ICzmPMmOO0J8jUYjobEgIH4mW0Td+v2+Op1OJpwK3aiu7/0GFQNtdh6KIXPewTVH\nTz3PjblwuoNAkZjPmuD9vr7IJSH/jrmCVnjY+Xw+0cY3bzgCxzyBHHGUkCevwksgadAND7nX66X+\nMffT6TTNE/PMbk4qvoPSeYmDtbW1TO7WaDRKOXf1ej3t2rxy5Yr29/cTXxwdHalaraYE/1jIkDE5\nMsG1QmFxYgHzDG3gQw8X+TVQIa+iDd8gN6EV1wj1n52d6eTkRN1uN4NwIxvIvfT7QHaWIfggHPTR\n5bcjzS6/YvgohsRIFvfIivO3o3sxRER/+v1+Zh0iYx0xlV48XHmZfqS/jvwxF/7OmM/lSI2fsIB8\n9f44Uh378LLQv+809L4yZteLjnw6AuXoZgzR5fP5FGb06IS/j40oZ2dnaR3CB45EMU/I5J+7HCl2\nRTj8D5GA9GLNjphQ5wbSeDxOYQhnWIgJ7OuGDIaVQ83OmD6hcSK4z5lLWoRFPIna4UHizDHPy0N+\nMcxIc1iS5pPrDMvv3eDhfX64I4I/JgE6LWL+QTR4vbmx5cbdsntJBJaUhB6LwJU39HRFGUMm0NaF\nCwvD4V1PsPRF5H2Hbg4Ve/gC3vAwQhy750qg+Mrlctqqj6B2qNpp6sJlOp2ms+16vV4KD/JuD6O6\nQpIWtXbIR/PwFRXDNzc39cknn6QxEuI7OjrK5IpAN54Td8eMx/OSBqxtDxl4cjrzi7MzGAzS2J3v\nGHsUkl4ugDAh68XvHQ6HyWiF990gi+vS83I8h46cM8bPETC5XC45R9KiFIXzCvzlxr7ngklzWcjB\nyBhCXKvVaiqVSur3+5pOpyqXy2l+STR22UHb39/X7du3VSqV1Ol0UvkFv95sNtVoNDLGCXzLnLsc\nJGyKzImbMlxBY/jF/CJXok5/7keW+mYh/7uyspL4s1CYb9bo9/tpJxxzyCYCz6+j8Q4PQ3sC+7L0\nDJ9fl9sxrcHlko+d9Q6Pej4wziCGD/PkMnFZfk50QDx86e9nrG68uEHnSfp+v4c16asfneMJ3m5Q\nxYaco4+e7sDYPOzpciJuGvO5cWOXZzM2aIWD5SkG7vy4HGJM0MrlKDSJ4Ie3V2JIocRcAPjCcqXE\ndwhmGCjG7skV8YXo9VhgYveSPLk5xmqjZezM4p5LNPBQ7iwYjwfDoHGBR2TImbtQKGTQBhecEWWL\ndHRh7n2jP57LED2u6I25UetM5YsnIjf+/5gzMR6Pk5JqNBov0MSfj2CGyb2kQMxXiomUHrN3Lxih\niVKFvi7oUAq+gLkHgcIz/YgdjPZogJPn4Tk4Ths3lnnncDhUpVLJCBrnzdPT04yn6M/0hHgXTKA1\npVJJw+FQx8fHyfu8du1aQntA3OBFDghGeHnuTqPRSOPb2trS6elpymdhNya08fPkoDfjzuVyGeMU\nwYgA5j6SwumHtMhDk+aKn0N7QV9pjImSEX5+5enpaabGVswJmUwmOj4+1vb2djr4mb6en58n1MsN\n8F6vp62trYSOuhJqNBo6ODjQzZs3VSqVtLe3lxAplECtVkuGgj+fPmJQ+zN3dnZ0+/ZttVotPXv2\nLJXFIKe00+mkg8PdwHb5OpksjmthxyLXWAtuSDuP5nK5jOxxxZ/LLXarYZiQ67gM+QGJdh7GMWKz\nkdP0/PxcvV5PtVotzYM7Zi4jPB/RDazYD4wol2M05A7PcWfax+y6gPfxGz67ocx6eFmeGs8GdHAe\nd2cg5ke5g02+IM375jIN2sOT7vQgn13murEGzd1A94bcch5GLjA2jClpsS5wkt2I9dyuuDPPHQD4\nhbEjW9BNTjdHFV/WXokhBdzui1jKLjbpxeJhy7wF32be6/XSdmLu9+fjvUrZM+PwFPmtJ8H5byRl\nmAjjLFqvTKQzjPfXGZlxw6jRK5eUUY6erOh082fxPh+jCwnGiJce4WM3fvy6tEBsXDi4R8f4HYWg\nXxg18ZrvluFdKAyEK7zi97KLCMM1himZI/ruStiRB7xFSal+kgtXD28u+ywtFjfC2Xd9eX8wpNyL\nwnhgTXgjVI1g87nGcMHoiXyI8wE/+UkB9Gd/f1/5fD6dxTadTtXpdFKZinx+Efaijg/PGQwGmZDs\neDxORh9JwJJSwjSGhq81+AgB5vSl0KYbbnjsfOcVwz1MxWYCykCAwsGf8AAK2qt7S0rhLxwZeIH/\n9/v9zDolJE3BXFcm5XI5hfXG47Hq9Xqm/tfFxYX29/d17do1NZvNTKjJlbSjqNT6OT8/13A4VK1W\nS2gNu5ifPn2qzc1NXblyJaFdhEdxAiPywmdkH/10z34Z8gHtHAHhd+74uBNKgy4oTXhqMBhkduau\nrKxk5IKHoXw9TafTtCmDNephb5Q78tbRDHiS8bjyhDej/JayDkFE4pAvrHNf9yh7N2y4z51p5zV3\nfn23myMtbkR55MLrWPk8Mj/u5FGfjHc6os9ccj9GOQ4j13AcXEY50oTxFOfQHWD6HA1JlyFRJmLc\nxbIyjrTGqICnvTgfuC59WXslhhQMMxgMEoTpuyJgDI/PuwXqCgNisBiBIKXF4nfUwKvmRqQlQpxu\npXpD+HPNJ1FaCHnfbcFCwRiKhddYaNE4cW8zekku3OJ16BghXxo0xnJ3A9YZk2fRyFECJXMER1LK\nVWARuOGG8sbQ8mrmzIUjTtJCQYGgOPODqJF75EaGG4DLcgG83EVUJih8Dzf4/PpY3IDl/cViMeXl\n+OJzIeOK1qv+giJh2OBpOr85n1E2IvKb51xFZXJxcZGOXGEOaaPRSMfHx2o2myoUCpmQEDQpFovJ\niPKDgKfTacZo4RphzZhTJCmDTETlPZvNUj5iPp9P6BvPxMEZjUaqVquq1+upsGg+n0+5Qzha7rFX\nKpXMTjjeSSXx1dVV9Xq9NFfQhtw+cqRAwAqFQkJtQEC4Ro5Po9FIxU85HgjlheLf3NxMMsrzNzwc\nCG2Y37W1NR0cHCR61+v1NEftdlvXr1/PhJPgW5QhNEWZsT593cN7rBPWoaMwUS77kTVuPDgaKy1q\nziGrmRfWkhum/g7kIs/3UDJyCEXqSEt0LB2hYh3jlHm/WW8Yd97cKfOCs47cwa/uYMfwFY2ixC5H\no4L3yIwjbdDKIxfRCIhzTR8wrqFhNDIdJPC1DHrtckxaGEE8MxpSGDvIRm/IWHSGOzQ8x1Ej7sFe\nQCZ6mBSZHp0Cj4TRF19vP9eGlFeQZSAxP0palA7Awo6TCMNRh4OQAswCsSaTSYLiyTvBYHOhwXMj\nyhO/xyCKFr1Pki/gyWSSOVfLkR9fNBGxkbQU+YpxbM/9wFBwL93H4guc+L8LFH+/zwXv8JwWZ0bC\nPREG5zufLw+9IjThgYgigYa4VwqcDHJxdnaWKcCJoPctzNLCAMNbKpfLmQr0bly6kYERFI+qoJ94\nVuQSDYfDTMKml6/wZGQMXk8kduOfMYD2uIcFXbnHnQCEKciNC0OQXHdemCcM1uFwmEnu99IUuVxO\n1Wo10QLjk7wcSSmchLFHWNCdCJQ568jzGMfjcTLMHAWF7ryfJHxqdUU+HwwGqlaraS69rg31tZin\nwWCQ7u12u5pOp+kcRtBBDHdXQvAO/6bTxekLhUIhhT7X1tb07Nmz1JdGo5Hqqk2nUx0fHyderNfr\nGgwGGaSVvnmdp1KppHq9roODg0TTZrOpWq2mfr+v/f39NAaUGjSM6IkbTM77HiJGFruTOpvNEtrI\nd/7X0xBcLrjXHzcMQAfqRbEBgrkZjUaZUKSPYzqd16wrlUpJefoYWa+OgtCgj4egoR3z6WuG/jP3\nMV8PZ8XTSOhL/C7qC+gR3+UOEnPuERs3MFyfYOCSY+dGLX3H2IuOOfzip2JwzR1WB0E8jSIa29A0\npsHEecBJiZEap7uXICI1xulEX7Ah3N5wurtB7AjVshCpt8vyB5ftsl22y3bZLttlu2w/Y3sliBRe\ngO+0kBZbX6MV7TlKeNNuffN9hDB9VwLWJ3A5Xjx5Jx6iW5bI7qiWtIBVydHxPvgYPHTI/REWdm/T\nUTfGF70/moevsLCdLuSL4dXE4nt+5EcMYcatqIwxJgo7hOyeDP32Pjv0/DKY1HMWoBdhHWjl7weV\nwrPzHVKgMSRV837fnUSI0mFwD3sug8Qdqvex0Rc8t5jo72E0dng6rUHQ/Nlra2tqtVqZ3V2OrNBf\niksyRiqT5/P5dOQJYyyVSqpUKol3IuLIswmpgOKyY3AyWRSq5BqI8MXFRTo30cP2nn8Sd5uCUJAH\nE+kqLZLAHcGQlBKxybPjPfSPhGkP33ruWK1Wy6QY1Ov1FAqqVCoajUYv8D6J7s6LJycnKhaLarVa\n6na7mTAmsoudkq+99lpCViiWWiqVdHh4qPX19SQTDw4OVKlU0rsIG0pKhx8jc6rVahr706dP05mJ\nXOeZV69e1eHhYUItnPehvW+2iOkOPmcgbMyHh1P8OdzvKEoMeSMvHCUgrM/8np6epkKmg8Egg675\n/DoSS5jJ+x+jGTTPxQK18DF4WDLK4xjCYwzwPv1zPcP1ZZEPR82ibAaBQd5BTx8Lc+n5SdwbkWrX\ney77PFWCcSM3HXXztBX0jaNuUVbGHFPfUOP84ii0I6TINdYDaKjzgYeLvaFjY7qOpGQLsK6ivv25\nC+3FGKa0UK6eMOehplxufvwF8LFDtcCsCGlCCoPBIC0OzwPwaw5B0iIM7TkULlxgSodlXan7ex36\ndYXuY2fBLEtW5Lozt+9UWQan03dnZn7v35Mo7MYbv/N58P4wB77Io0J0iNeVNXT1kGAUSjGHjFwY\njkfhORGG9dIAHlJgx5TPdb1eTweeevgPvvB4O2N3g8WFF+En8gToj2+f59muVBgHApYkbXgYY9F3\nFXpI2JPXi8XFdl5Ch14/zBUkpwF4/See6YKVXCBo67TxHD12XnIYrNdzgx7wjdPUQxf8DpkQwyO9\nXi/ljjGOXC6XzhR05c14PXXAeYlQKPLEE3AJA8Y8P9Z+rVZLfENeEnPELqjRaJSZe+7v9/spnCfN\nDaLz8/NUCoQdgZJ0eHiojY2N5Ei4bMARPDw8VLVaVaVSSeHJ9fV1dbvd9Pn4+Djxz61bt5TL5dTv\n91MI10PM/GWNunyGfzG+oowmpyY6oi7XPOeM5o6X50Exp+R8EY6XlGrDjUajlLvmTjnOEmvbZY3L\nQDd6uMfTLFwuYZygxD30487ksrFhfPn1ZcaXG2D8nv5CNzdeMWzcifbwFDLM5wSZ4RuopGwSdwQz\n4H1fFxHMcMPG86DizmKah9wY7wx+SQAAIABJREFUl++q5xr3MgZSBFZWVpKj4yUpXH/5fe7AR31J\nfx3kcN6PvBDbKzOkpOwuGBYRdYGcmX3LJcmgL1P0nkPiyjx6JeTHIDTiuUu+8GPekxtlnrPjkxhr\nQblBgYG1LHkQL3qZVx4T5bjm8XBHvlBQMLAnrFIXxlENV7RxzJ634h6sN+aM/i+bFxceNM+PirRh\nHnK5nLrdbsqVcrp4bg3KC0VG3hxJq4zPhbcLOEfpEFKMHYTPk03dqPYYPAaK57RgZCGwobcbsvTT\nBRi5IPCUe4l8T/I0yoT6TJ7PQp9Zc8tyLVZXVzM5Z9PpNClk3wQAT4DkrKysJHQqn8+r2WxmjkXB\nAHFBzrW4y8Z3zuXzi91lvta5JikZyZPJJPEGyi2XyyWEzNFhp3m1Wk1jA7mMNIE3ML4qlYqGw2EG\nkWN++I0rfWQF6KDTEsOsXq/ryZMnqVDt5uZmMtbK5XI6MkZa5N00m03t7e1ljGj4T1IyPNvttiTp\nk08+SUgaRnJ0zNit5aUvkAls5kGmeF06X7P+GUcJ5MmVqSu9WEqmUJjvMPRdYr6ZwlE/5Dnvg2/g\ngShvPJ8tlirwxPeYa0S+oiNLEUlytMbf65EKrsX6bm4AeH+9MU+uB6Nzwl+MLXe+3DF3HUxzpMkN\njhixcGQPGTMajTJG2nQ6zaDvEamkkVwfEU7klveF/FpQc0kZxI17ItDBvLpucoAk6qWov6KRnKHZ\nS6/8f2oMxFGZWI5AUkIV+v1+ggJpKESHh6WFx4FH4krIt2iyaNwzQcm4B8YznUncM/EE4ig0HKJ0\nb4FrrvSk7AKMi8sXhe9MdEaAWfw7XwiumDAWfFEiLFgUy4w+3w5Lv/BYfNsrz3SGxmjmmgtCn0Pn\njdFolDz6+Ezm370kdmx5ojH9dAPI6c32dRCX6IWMx+NUksGVLQYS4cJlCZQo6Bi+5Z0kS/sWZVcK\n7F7knWwBPzs7S4nRCBZ2tXm/3OtCaLEVnvuYF++/hwTZxeeGCX2B30iqd14sl8sZoy0meVYqlYRW\n0RcMTujrCac8k3n3Q7AZBwn3IDaEWkulUlLM3O8bHxyRjggGic/senQEwWUGoUinD8qg0+mkWlHS\n3NjZ3d3VbDbT/fv3tbOzI0m6cuWKZrNZojk7CRlDr9dTo9HQxsaG9vb2kgFG+BejbWtrKxlgBwcH\n6axFjAZXvI4yu7zkr4dpMUi5Dt2QKS6n3ADC2OQZvvYcdWONMPcbGxvJAOVA53a7rcPDQ62srCRj\ncTgcZvjMDRR39DCMXA7TX490SFl5DGoa9YDLV99Zy7hdXjlN3QD19/g8uOHizv0yxN/nCVq70+nl\nYnwton+Q226cYbxwn6PYLkPRl5407yUfPNrgG2TcePP+05/pdJoxeqITCA09fYK1HEGQSKtItzg/\n6MMIHGTue+mV/8cNQeXKhIUdyw645ctRCfFIB6oMe26PL14gxmgQuNXuXiJ9YzIc/vZQlgsbXzTk\nYdBceCDk8PRd2Xp/JC1V5jFMiWJ3w81/Ez1FWoSP3bDxMGS01KGVIzreVwQgNJdehEljWAx6uoHp\n42XhttvtpDAwXD3M5M/kO4wTF94uBCP6h2EJH7pQRMCAzMW+QuMonJ2uvBN+IwTFNb6Dbuz0xFjg\nNxj1IBxxjNDTlSD3IRQpPgl6QpgJHq1WqynshLfJM30MhUIhra9Y9gHUFvQMhIY+wS8YN76O2I0H\nEs2acdRZWuzwoiFHQDMckaTvGKWuAFdXVxOS7Uguz8TYoJinOxHkHcYdRqCpoH3Hx8eJ3q1WS7lc\nTvV6PckE+lksFpNRUCqVkpHOGDgCh/wSP3amWq1qNpvp9PRUR0dHKSS+vr6e4a1YooW59TAufOwG\nV2wefnJji/44oo1xxDXWE0rXDX54mPu8LAxGfrlcTv8kpSKdONCu9EHumFt3CN2QQbbE3EkQJEel\n3UiIYSDeH1Ef7nNDwNdMTPFwow5UzGWypAzi7iF71yfMN7IR5JG+uuPsO3ahG2vSnV03mvyvzy8O\nBv1j/CCX1WpVuVwurQv6Dh3cAPIwHjI/6m43imN40sOMEU2MMlrKRlpe1l6JIeXK1xU/lmycfP4P\nE4BOcB9QNZ6gG1BMeNweHpnWlZgXFvNQjLTwLN3zcmbzyXBhykSAvDgi48qEMUbPxQ2b6CWxEH2x\n+W9iAiA0RRHEfCYXoPzWn+dJrE43+gJNvfRAhPBdaHk4D2Xq84QhRyiBRN1Wq5UWBOEi91pIUpWU\nEqGlrDCP6JcbHs5/PjZHIL1sAsYyOTh4kjwLtMYRU+6l/xhF9JuQIOGfZrOZoTVIEIrd+wzNmM8I\n+TMfrrzK5bKq1Wo6wmdlZSUTLoWPp9NppjgnlcwJxWD4SUp/XRn7GkVpw08uFFlH8KnnOyDo4TVJ\nGZTUESi+4zesa/rkfOMGrgtU+gji5l4q6xJj1uVLtVrVxsaG9vf3M4i4ND+updPp6M6dOymx+ubN\nm5Lm4dlcbh6a3NjYkLRQlqyhQmF+VEq9Xk8IGDWUSGAfDAYp72p9fT0ZLO5QwI84gIzHUQdHOOLa\niblobjhgrDhCEGUYdPU1y/PJsfFrg8EgI/fW1taSg4XDRT9clzCvzoeONHAmZwzbuTLGMIgIPk5b\nDHO6U+eOmZTVbV5KBznEO9A78EA+v9iA4+gTz+T9OHvu7COLGBv9iY6Yr0UcEUdefV3wFwPPESJH\nnx3hHo/HyWmDnu7QwROu+7iGTmP+IoAA7ZGPPpfuOMQxLPsd8/bTEKnL8geX7bJdtst22S7bZbts\nP2N7JYgUHjH/5y/oSYzB+v/xTN0KBk4HGsTKBBWQsltipQVUipfkcDQokyNDHm4gqdg9EknJK8GL\nLRQKS3MvxuNxpmK0hyVizNif7+iMtEBWfBu209h3UWB9ex4JNC+VSpk4NgiG5625VQ+aViqVXoj7\nS9ljCNzDcm825o/xnBjXZi58PGyB9u31Drszv4yj0+lkvA88Grxsz3uLSKUfseJ5EA47S1rKW/48\nxgBE7iFozgoDbfDjR+r1etrizxhAYz1s4Eiu040QBXzCuNwb9VALiea+bZrfehI8eUfHx8eSlHbU\nEl7xkCD9ogwFHih9cWTJ+8k4crlcQr/cS8Rj9nCDIyMcqUIyuXudyAue4+iuh3IdqSUUCv/5/FLI\nFP7yKuy1Wk0bGxtpLgjJwcO7u7uq1Wq6fv26Dg4OEuI6nU5VqVS0tram/f19VSqVTF+YPzYjkFhe\nKCx2W167dk2dTkdPnjyRND9LsdFo6OTkJK1xRyuQXR5i4X3QBXkTc+v8rLwoH32+l/1lHhxdZA6J\nVCDrnS848cALHZfL5RReBmGLaSKg376GCUsPh8OUauKyDVlKzpfvEuS3RBw87EW/YxiZMTv/uc5z\nOeKoKSH0KO88ooKcyOVymfWETGP+HGXxlI6IwjjS7XMlLVAnZImvs5iDxe9pKysrqlarGT0F3Xxe\npAXKGVFG3zDhyJznbPE7Px7M+xTnxHmWOf+5C+0ty4ORsnkt0gIu9tAPBPSdNAzQt/T7NReO0UBx\n5ReNumVQnicNejxVyu6mQJF5zJw8EPrpApm6PlG4eZiTd8fdA4QjnEFYZD6u2Fxw+liBaGGmeD/z\nFMMtbEP15H7mwuclhtNeZsTQRw/DTKfTtFOq1+slujHHbmRiBCAY/UxAr4DuNAKyxuhxQepKnJ1L\n9JOcMeD4yWRxBE5s/M5LGpCbE+FvnoWy6HQ6SYB7KIF+A/djPBDmITEb2niYyoUG9Lq4uEh5Ni6I\nvczA3t5eorcf48GuNnc+SH53GktKSeh+/puHMPr9fgrNuyEFHQmzESbwUDJHuXgCqjQPCxWLRTUa\njZRb6flT1KSify7AJWV2SMK7pVJJBwcHKcdpZWUlnW/nmx4Io/phz81mU51ORwcHB7p69ap2d3cl\nzcsW5PN5tVotHR8fq91up35Wq9UU9sW4g1cbjYZms1l6no/96OgoGRrlcjnxB/zE3+h0MR9SNhzH\nOOAZtqa7sewGmPTicRueL+WGDc9uNptpo4krZXLSqLHG8UCc0cda9PG7gYCc9XVdKpXSDsnhcJg5\nbox59pQP+k2/MCqiEUAozvOuWEse2o9pBp77CR29KrnT1+mIYeWywmnMmvcNQe5g0S9fs562Eg0Q\nD126TuR3LivdGWAd4czGtBzG5XSBv7y5rOEdkS5OG/rJc9zgjnMHryzLc6O90mTzuPXWDR4pG2fH\nw4GJ3fOGqRD6nguDIuL5TlQEeoyhwiwgTO4NYNTwTGcoR1RIzPMcAjfmPOmTmjRs54yJqjC0K0LG\n7tvmoyfA/f5bz+lxo89zBYg5+5gjbfB6Z7PsjifPt0HQ8X4fU/SEY86Fo1WMP9Zqgf4sYgwReIf+\nnZ9nz3X0vCHmw/OePPfC0U9frNFbjXF8aObCgO39GE6ef0Bx2uihgiyQByUpoT6+MYO8KlCg2WyW\naqX5Thv6QpLuxcVFRmGMRiOVy2W1Wq0kaKANNC0Wi8mYhaYIIBACzy1y4344HKbDpqE3zgWoI/y0\nurqqbrebHAWKjNIXeAjB6qgTieascYxRaa4M2+12QgZB9Hgn84FCZJdot9tVv99PxoLvoiPHo9Pp\npKNy4Nd+v5+UKIoWJO/s7Eybm5u6du2a9vb21Ol0Uo7UaDTS8+fPVSqVklHrMgSDYX19XblcLuWy\nYXCRP+U1vfDKJ5OJGo1GRsaCmIGauqz0HEyX2e4oMo8Yt3G9LHOgmRt36GJOGrIBtAia9vv91CeX\nO9SOQjY67/OZAqY+BpzRXC6nZrOZWRedTiedh4h8XJbcHQuAujPtiBbrkH6Px+PMXHgNtojgRxQZ\nPeXyGsfTaUB/XGe5IcncO/rt8+VoD7/nGeg+zyHzOfRIi/PFbDZTt9vV5uZmJtcpRgEimuybuXw8\ncWxuKHqL4AHothuKnieHM/iy9soQKSlb/A8C+4J16NQ9nejNO3waCelM5Ra/MyLMHqE792p8dw7/\nPITFc9yIiAmO/hnBIM0X/mAwUL/fT0aBG1N+GCpeOGNA4HOIrHuRjCuGeHim0xvPXFIKPYAwQQPG\nj1GGcojhBmmx2D1k4uiGL2D66ugh9HZUCKPNkwW51409voM+9B+0JgqVKNxZUI6AMk8YtvCLe/GO\nSlGLxwWqoy0ejoXPSbaVshX0p9NFUbtarZbmfzyeV98uFovJwHEDDFSJPkLTZrOZFO/JyYkGg0ES\nEhg5hIpWV1dT4i5n2rHrx7fw5/P5lGgO4uKGryN9zsMgUigD3+aNUYPR53OOondDilAP72TcGHnQ\nFgOy2+2q0Wio2+2mOQBJG4/n5/wdHx9nPFPO4ltdXVW1Ws0Ydvl8XvV6Xe12W51OJ+2UW1lZ0fHx\ncQZ983PkDg4OVK1W1Wq1MsjSrVu3NB6PdXR0lMocEPbb399Pa5jq6Kyt/f39tFmg3W6rUqlk6LK6\nuqrBYKDBYJCKXcJ/Kysr6vV6mfCUtNi2jtzEcHTHwefbP/MdssjloPMEz3Sd4Oi/8/Dq6vxgb0fB\nkb3T6VRHR0dJHziS6YhJDLHj3PCbZrOZ1tPNmzd1fHysw8PDF+QIhtrFxYUqlYoGg0HqC3ICZeyJ\n2NAIGRuTpplPxu9hPJfdIOz+e2SNI/qMkZAfutPlq6Pb9MPXk8vVmMqArHW9znwv07ukTVxcXKjf\n76eNELwXmhJdYI3GKEnUwf4XR5r/u33gupKIhTvJUb9EFMzbKzGkIAQCi8YgY4jPF/HLYpUgDD5g\nz8dhsh3K4zdMnuc4oEBjuXgEOOEAP3oEIenWuS+2aDnzvmq1qmazqV6vp06nk6nDgXKMuxHoy2g0\nSn2M6JiPCUMFj458C6dZDBl6PJ3nubEgvbibhLnyujnMoSMI7iWw2MhVi8YwjOwhHK5hAHqoVVrA\nscu8ESlb3iIKMJ8zX1DOBzH0598zVje0YtiVIy68IcDc4HT0CCic+6iQzeGsoJrc50aXI0tra2s6\nOTlJlahBoJjPfD6fUI5er5cEGErm4uIiGVkuMJkXFI0LftYSv4EvvX9eYI9GYU/oHUOJjh572QWQ\nDectp7OHElx5g+LBF6enp3r+/LmkRRHQ09PTNEavebWzs6P19fV07AxrrdVqZXZFOnK4sbGhp0+f\npsOJq9VqOnz44uJCN27cSEprPB6neep2u2q326meVb/fTzxYr9fV6/U0m83UarXU6/UyW/xbrVZ6\njit2jAsMIb/2snCfI66OPLiB4msoOi08N4ZseTY8RmjPHRMMH/8nLQwi8mEwcrmG4UFZDjfqKVHB\n+mUOqWO1sbGho6OjVM8QfnLkGqSLsftY8/nFzjRH4h1NlxaIK0rd830xuqCdGzI8F752xAuecmTL\n6Y0+QFZ42kbUxT6PPB+eiXPsURynB/IRtN2dZJB4R/lorAU35J1PGTeARPyNj9vnbDKZJEfKkTv0\nyM9djhST7MRxDxxl5IrGjSDPhaEBWfs1FiiCiDCCtEB5IJIr71wulzGm4nZYJhzP3WOxDmnGpDqY\n31Ei7gO+r1QqyZiiRcVMX0lQBhWKi9Yha5jLoXGO1oiJyowD4RAhfBQasCkhJ4e3PQ9HWuRPYfVH\ni58xQlenNwIHujJOX+TRiOY55IBEYc2z3ZD2732R+n3D4VCz2eyFGmCed8AzvK/+1w1Kfyd5VVQq\nZ+6bzaZms1ky2kE6CoVCCiVBJwwUN7xIEmecFxcXac5qtVpGQVUqlVS8sd/vZ5AlQkLr6+tqNBov\n5EJgnNIPFBRhR5CoGOYFvcVodIEH71LTyoU+1cOhJ+UXnKdc0TAOeAVFQZkH3nl+fp5o6qgKYc9i\nsaiTk5O0hqDbZDLfCNFoNLS+vp7Cn/1+X2tra4kX8/m89vb2JEk3btzQzZs39fjxY3U6HV2/fj0l\njTO3IJWj0SgZCxsbGyoUCjo4ONDx8bGazWYqcXB6eqqtrS3l8/mUDwafQjP4z1MOMJqZy8ifHhFw\nNAReJNcF+YCx6bIPnqb5M3yN++eo6Jknz9dzY9kNAXiLZ3iVexwUl5PkjzGv6CCOPiqXy2o2m5nx\nEK7O5/PJIIghZuS3K3b41Mfocvb09DTj5PlGCtaJy2k3pKLT7mE4eBoecN2L7HXHi2vISpcl0iIv\nzKMuzjv0B0fYZQbzNZ1O1W63E91Btd2I4ZkR+VwWVeC96BRo6sgaz6Bh0LEunJ+QIS9rl+UPLttl\nu2yX7bJdtst22X7G9kpzpDwvya1ELFCsXodmsWrd4sfLixWssdJ5llvYeB+gEsRheSY5Jngsy7bz\nAz37zg7PoXFvi/AbeSTAuTwLpIIkX3b0MbaYM+M083HF/B9P9CQZVFpsscd79G3+9A1vyMOXHiKI\noUB+47C2Q67QGfSJRjI11z3h3p8B7UFTqIbrYTV+U6lU0vM8KRUe8pi9o2N8xpMBMfG+4hnzDnjN\nm4cbuc5nQqCMkZ2HhBocQSAMCM03NzczCEK1Wk27lnyMlExwpCgmvfpBtzHZ/vj4OIWqvcTB9va2\ntre3k2fn/AbsX6vVMonMIDyE7xwFYN5YS77DDvQXr9l5n3dyD0iXh2JAoxmHI1148iAT9JXQz3Q6\nzXjxtOFwmPG+KWMCPc/OzlLOErTFm87n86kUB+NHLlSrVXU6HXW7XW1tbaX5JndyY2MjMxcgH1ev\nXtXp6alOTk7S+xqNho6Pj9P6cH4vl8tJZlar1RQCpPnpBhGtZQzuoTtaxPeeH8Q1p0MM+UOPiM6A\n0iLTPbTL+6rVakIuaCDirGtHxkFnhsNh2nrvaF21Wk0hPtalNA9FHx0dKZfLZULi9AXZ7yVCoCdp\nDsh735zjsssjH15IkzXsCA7zgN6bTrNlQ+I8eiPC4JtVoM2yXDI+cy9z7PPE/TGC4yHG2BfWGCiY\nhwx9Fy3NUy5Yo8idGK70MUTZDA966oXThmKv0GIZChfbKw3tRXiQheOTICkxL/DtsoXoytYJhyHl\nORP8zkNInjhOTJbPnpDp7ywUCpm4rm+H9rCgtKg2TG6Vw6D0DRrk84ujMOgD43f6xHABz6ahiFgQ\nhLukeS7IycmJarVaUnBc4x62pJOgKGW3lcbjPpxpPWeCvjjMyuKSFiEcF8o0n1voxHXCEB4GY34R\nhvADCpBn+sLz8BXX4T8Ox2QOuE4OQ6zD4mFSD0V4/pYbbdJcKcLbhJY8QZKq1hcXF5nq2OVyOSUf\nN5vNTBVfNyIQnL6rqdFoqNVqpfXhQhrjoFQqvXCcye3bt1NYxQ0+nI21tTW1Wq0XDGDWQrE4r8UU\n6cZYPckWGlIXxw1Fz1tjXJQEkOY5RPQD3o73YLw6f/sxOIRW3KFjDDyLXCdoQgV6D8PBA27c+4HG\nbIm/evWqjo+P0xomv6ndbms6nWp7ezu9jx17udx8h5lvVMDR/Pjjj3X//n3l84tK39Cy1+upXq+r\nXC7r8PBQ0sLZ8ZIgMVzEPGD4uAxHkcawoCs3NwyYC3eOPZTF2kc2uGL3UguDweCFGmOeUiBlD1jn\nnUdHR6pWq5lD0Ak/k2bBfa1WS/V6XU+fPk3r2nOkMN7gWc/Xgmb0P4a0uG82W9Tvwtl25xQ5h2MM\ncMCzYniLMgluaBDq9PxCGjTk+S7rXSaTvxyNWt917noQR5nP0bBBPjmwwjy5/nCZCP/lcrlM6onr\nyJin5uHFGIrkXujkub/sRP65M6Q8UdStWowCz12SFrFQj4W78I2eavSE3BuAcRCgeFGTySQJ00Kh\nkMkr4WR23he9KE/o9t11rtjJV3Cjx/MLXPi4scA4XAnH+DuoGEYP78N4cg/YE4exvGu1WiauD5OR\nD1KtVpNy7ff7L2yZdgZzIzkuVOjmNa6krLBbhjrgWXp+EeMnd4aFzDg9iXk6nWpnZyfNr3tgvkCZ\nCww76Oa5bCwwhCnjw9vybceu+MkZId/H5xihiVDwhHqMY99Vxv/Z5o4RRa4G7wPlmUwmCdWS5krB\nk4o9H2JlZUWj0Ui1Wk29Xi9tTZbmeTnkEZBb41vAy+Vycjp8Vw9jn0zmW+7dqPHET8bl65fdSPCA\nK0hHEjF6oJsrAebNnaGY/8Y4jo+PkyxgV6rnSoA+uNEhLRwxch056w0+Y/yVSkWNRiOjIAaDQVoz\nd+7cSWj0cDhMOWIrKyva3d3N5MCR/9Rut1Wv19PYz8/P05w9f/5cN27c0PXr1yUtjhyazWY6PDzU\nxsaGrl27Jmm+2w/aMTeO0mNMIWfoG3yDvMMYijtQkZfuUPFsd2gj8uLN6wdypA7y3XkYeRELZIJA\nsc6Pj4+TbGfn1tnZWUJrvbwHBpc7UsxT1Gc0lxGeCwhfoNMwDlzWuG7DwZKU+os8BBTwHGJoiqzx\neXJjyR1VdIUjUzGfDQMK0MObz2vMr6pWq2lzFAaozzXvjAY4SJYb4xhsroPdOYVW0N6NJc/txRnk\n/egnDD8cZ9erL2uvrI4UzVEIFIkbSdKCyBDWEwtRVjBEJJq0UHIxgcyTes/PzxMKRKVgdu240kMJ\nwEg+GUxqnEhpUaOl3++niefd7gVGhM3LC0jKKBPQD4eMPdnPBZdXv+b69P+w92bNcSTJubZXFdba\nCwBBsls8PdMtyWQmk270/3+HTBppemOTxF47tirUuajv8XwyAM4xmxt+FwgzGghUZWZkhIcvr7/h\n8fSUBQ9BXxhT/oaR93Wz2SyVhh0bI4U4o54DPscg2ZGwYbNT6TF8CYp9eHiI+Xwe/X6/FmEY/h6N\nRnF3d5eLESPsiNpRsN/Z0Q6GA6NoObTz6OJ0VkRcV5JcQQ5RzhDO6SvXHR4exv39faZ3cBym02mM\nx+M0pBFVpXFKHAwGg1rKCDmkFADPA+WcTCZxe3tb21aPs2qyqguAUnDSBFjmje97bCzDODuHh4f5\nfRwyI0fICagdRTg3m00SvekPuqR0epvNZiyXy0z3eX0Nh8N4fHyM33//PW5vb6PX69UMJrLKVnfO\nd8MIg1j1er0syEn/jTpjlCOihl7d3d3ljrrLy8v44YcfUg/d39/nPb2eKYtQBlHff/99XF5expcv\nX+KHH37I50HKJ7jj/XAiTIou0/Wk/HkXZw0IPrneiBx6qly/DprKdCHvYqfYz6NsDDQMnD7QIU4M\ncCBhvdtqtWI2m8Xnz58jotrtt7e3F91uN25vb2vfjYg4OTnJnbPc004z7+T3Mhpmp4bfccIc0KB3\nXL6FQPDg4KBGK0DPGHlBx9AX9892pqylZP3n+eE+zE9ZKwskz0GPn4dOsGNDoGJ7Y/DDSFfZ0L+l\nHudap+/op51U981/MxWGz0D6PVZl+2aHFkc8R1e8WDwANtr+PaJeUt/oBZ+Zf+PUF9G60QCUG8gR\nk7RarbLw3tHRUUbzTLDTG/SF7bW0p6enhGJvbm5itaq2MhsdK1EQSgmAPhmyNHqCk+g6TQituQk0\nc8eIfDkQF6HmPoxdxNZAobS43ve141im0Lif89u8I3ONo1qmd90XO9r0A36ZHRvkaG9vLw84jog4\nOzurFb90dOXUAs/wIqOPdpq4jvdx+sOQM04YP82V4D4UT+R3EJmDg4PcUo/DT8kMUhTv3r3L6s6k\nMjebTRwfH6czyVwwPhgdxnmxWMR0Ok1EkmiXMUVRLpfLWK1WGR3DM8KBJQDhM6fl7dAj61bQDgzs\nfHo8nRZBSeMA8kzqBCEj5XrFadjb28soudnc1oI6OjrKHZolmttsNrPK9j/8wz9ExBYl8K7hw8PD\nRIVwghjj2WxW01seX6rwR2yNxcePH2M0GuW7mJPF2DQa212SfIZczGazeP/+fbRarUzfwbVC3zIf\nPI/+WG7pJ/Lp+WKOmTtQCa995r00eNzXKR0jNjjApnxwPWsC1HQymeQc8l7WBbS7u7ta6vjp6SnX\njJEhIx4RVX2xRqMR/X6/Nk93d3fx5cuXdFhKZ6m0S/x0yQOQetM9IiKrz9uh4B3M1/QzkTPeE/5k\nRD3NZXSROUT3Oo1G4znQlcQrAAAgAElEQVTofzsmDlL4x+/MG2AGzwNdh3Pm9zdfinnxuDEX6AE3\nvttsNp+VajByZRCg/NzoGrucX3LoaN/EkeJFHQl6sGn+nIWLcPt7pOe4j68xAhJRrwpNdMXCx8ki\nKid6RhlHVIaN/jFZ3NMpNfcfoW+1tvV0Li8vcxFRCM/kVysy0DgXKPV9nb+1E8n1NDt/GCGihdvb\n27z/yclJonJE00aF6A+L206P58UOSukU21H2WKGk/TzGrYxmiRRIN/V6vVpKz5C30z5v3ryJq6ur\nZ2fWRUSWaLDTZg6Y03aOrpBN38upD/rj8TLS0263U1mTKmA+WdDr9TrJsBGRROS3b9/Gu3fv4vz8\nPB0JoHQqOF9cXKTiI9rmHo1Go3bsDvN9d3cX7XY7HX5k5fHxMZERnO/JZJKcGxxjnucjUuBg8RmE\nXgcKNIwaypA0JWOGPDsQ4v2n02k8PT3lVnUcRsbUKU8McERVvBMk8/r6ura+1+t19Hq9RHJIh5EO\nbbfb2S8/D0QB/mGZRgdh44gS2nK5jMViEf1+P4uv0hfSfefn5/Hu3btaBN3v9+Pu7i6ur6/j+Pg4\nn+f5BQUtaREOFLhnv9/PscZR6ff7tWCANf21lL4DjDJYcQDlOTYK4n4y38vlMkajUQ2RMpqPrXDt\nJgfsDtTOzs5qqS3P03A4zA0cm82mVrmedTYej58R7bkfSKsDQSNCfs+Iuq7B0fS2fwqu4owRUPO5\ny594TB3QYt9sa0iVYte4jrG13JpT62ft7e09qwcH2uWswXq9zlMACLyQC4IyB90voZ/o4ZKHZ3kx\nIl7Oq+WUAIPvORCkT19rr+UPXttre22v7bW9ttf22v7O9k0QqdI7jqgQG8OCL6V1yobXivdo5MJo\nARGhOTy+jpxyROSZTkTWTik8PDzEeDyObreb6JLRLqI9oinD1I7I1ut1QsqHh4f57yUeCv0tkRz6\njAdtUp5TnUQwjnhIc8Jz2Gw2yb2BF0OEbFQEBI9rzCEqI0iQJBqRRZnXNnIHT8gRnSO4rxE67+7u\nYj6fJ+pEpEVK1VBtt9vNcSTVxj2J0LnW/Xczt8fzY5JoCXHzXqSzjDQxxhC4TTYHOdjf369xdhqN\nRhwdHcXp6WlcXFzE1dVVTYYjIneQHR4e1sjIFF/kXDwQKeQHJPb4+LgWQW42m0ylnJ6eZjoJefLu\nWqd5SV/v7e3lwbkRkegOCBa8loiopcPm8/mLu2cdKVufEBGTCt3Z2amRRyOqSNqVv3d2duLm5iba\n7XZu4LCOgB94dHRUK5uAfgKpNSeLNCD9PTo6yr7e3NzkGmPN8RlI4GQyScQKeSP19Pbt27i+vo7p\ndFrjTi6Xy+j1es/Ghf6hmyhaS4Pgz0+nQ50JACn02ofyALroNAprxUiUm9NeXnNePxH1qvmbzbYw\nLlX4fYAy6TnKh5R8HlAxo2etVisPjEZfcB2bCbrdbq5V1hgNFBKUxe9lZMU62iicsxv0z1xP68nH\nx8cYj8cp26ZDmIu0v78f/X4/7Ql2wNw3I4DwbOlvWe7GXDnbY5A6l12IiFoqj3krsy3oB1LpERVf\nj7EzkshGF9Bs22DWGciSx4W/I2NO7TEvzoS4OQX6UvsmjtRyuUxDXC5EG9eXUmMMhuE6Gy4z/0tS\npHcM9Pv9GlmcFFlEJIF3uVzmoJvrQ5Vh0hhMOKRh18yx42K41FwnyuHjeDiNV+aHDeH2er0kHZKf\nN7fJabX1el1LDbpys53EiC3JdTQa1e5Z7myjMnxJFmWsmBf6jZOA8sChiqifS1gSI73Dq7yOOUVx\nw++J2KYn+X7Jw2COn5621XSBpCO2C4Zq4OxktMNTjmnJPSgNAQqM+SdVWpIoSQN2u90aERmHnrG+\nvr6uOQKdTid+//33+PXXX2vwt+uz7O/v1w58Zf1BOLYjwWdHR0d5BI2VIjV4UJDMzeHhYRow1raN\nrrkzTrVA4J5MJuls4dCzAWRvb3s4MNvcI6LGI5rP51nV+6VAAk6JHQ3k7PDwMPr9fo17BHcKZ4/3\nYPcunBX6yPMIAnCoPE84mKT9vIZJk5Q7ER8eHmI4HObzdnZ2stxFRMRoNMo0ignPHGGz2Wzi5OSk\n5hDg7Ji74p1wm80mdwOSiqYvOC5sImFnI7LhnXyUJ2ANWBeU3CqoEHzHzhayYl4b78hYoYPNYfTa\nLDfsoG9MWI6onLUvX75kCqokY9NXc2qPj49z8wb60ilYGlSEkn9qvqb5Spa9kgDeam0PjR+Px5n2\ntS3F+Wg2t+VhTHmgtlWz2cwdnNzX5Hqc6YhI6oedFHNccdzMt/NP8wBpHlvLDWPF707hWX5wPkt5\n4Tmkey1rdqJMLzJlp+wn41H+ze2bOFIoDNcO8ouVAo6j8NKAmfVfogcmOeLRGnWByGfUIyLSiOL5\nenGjhGezWU1pRlRRGwgROWeui6gfl2FkAWPJ9Xyf92bC2+12EnyJNBwJo9iIflA4fM9ePlGto5OI\nalcP48P7cJ139ZhoiLAhwL63uV5G13h/R26MZUTFY/DiodnpZp4wiLu7uzEcDmsOjI1fs9lM3sf1\n9XUNWWCnXKmQnWOH62J0DDmDk2OFihIm4qNeEfLGuEIadt0ucyks49QA+u2336LZ3O7eAnXa399P\n5wbjyDuyBdx8CRzQ/f39OD4+zhIJds5ms1ltl54LFhKx9/v9DF5sSJFvxtDzDc8DJNZOHcHV/v5+\nzOfz2m5GO43j8TiOjo5qSBS6wdwWvg+azM4/b+4wVws0EDnF8TCiGxFZfgInGa5URHUsCQHgZrNJ\nNJqz8iaTSa5H5ALjjNMN18bvzHjjEEZE7uK0E4nOiIh8NjJgcjKGnkDBvBR0NgbTzhFGF46V61qZ\n/8QaKblAyKGdLAdQjLMLNYKQ0Ed0DbqV9VlykVxuxe8ACnt/fx8fP36scX3evXtXQ73MAXr//n2+\nP+gu64TSMgTrDrzpj5ESO2BweYwYMZ7YyMfHx+Qy0tA95lVxLbWxyr9H1DM4ZSbGtpi+GszA1pVZ\nCl/LuDG/3vTCGL10sDyZIda+gYPy+Cv6jj9gW2o7w3f8O/JZcvicTfha+2aIFPCjvVKEvzRQbDfF\nGBmaRKBsVC0cEdXuQBuM8Xicu6Ps+NBAqIBCSyfu4eEhC1rSVqtVnquFI8Y9Oc+M/mIgI56ft+S/\nIbikE9g1yHjR77u7u1gulzXHjR0RXA+iEFGv3cTfrZAgpTpC57MSBbTTw9+NwHke/E52JB0VeT75\nDGVq4beA43iwaKbTaabIdnd3M2r3+D49PSVZmoKFKGbQG4qO8n4Yw3I7vjcmYIzKBYlBcXTHM/f3\n92M0GsVyuYzpdJqfsTuHMd3d3a0V5ru8vKyRkO2A/fHHH7mmdnZ20slqtVqJuu3u7sbHjx9Tpt69\ne1c7a4wUV0RFTG+1WjUZjKjScDYIGG/vnMH4ITOcUWcHwcVo7QiQUqRRKA8iL2e90bwxwrKIgcII\nNRrVGYXj8Tg/Zw4wmsi+yeNGejCS3NO7kwhiQHJo0+k0jo6OkjiNg4a8UawVp4fdw6DhIOPL5bI2\nphFVxH9/f18rqnp8fBzj8Th1kKN4jCznLJakcKPpNm5e8/6c64xUMpYR9a3qvKPXN7q5vCeBMYGe\nnQMCRxxop4xsP1izRhp2dnYy2P348WOO5eHhYQYWd3d3tcOON5tNHi4dEXFxcVFLcXHPfr8fq9Uq\nbRC0AtaFA90yW2CHwOPWam2r36PjGDdnUjyHvINtHboDOSO16+dhswjMnbpmDNHFpp80Go0Yj8c1\nZ8/Imfvg8j00AqKX0r22LfTVwArjVqaN0f12XO10EehZDg0YvNS+mSOFo1HWdiihuoh6yf+I52k/\nrmGBeNEwIHi13OPy8jIhUXbgGHK1cJQcA/rCAkWAfcgkCsZw83Q6TY/bSsXva0SDd2BR9fv9OD4+\nrm3fZUFTKNPoEnD/SzsfykqtpQG6u7uL2WyWKbwSdXLK0gqTMStzzM73G8ou71EufqdBQDoYb+65\n2WwSLaGuz2azyd1hOLSOjHiGDX7E1rCRCigjEaJw7u9xK51hnJ4yMnNEi7PWarUSmcCBpeFEAX0T\nhUVsiy2SZnMJCT4DugfFNNcJZ5ZSB6enpxFRHVrsVIudVJAuZNw7COk3z/WYGFWcTqc1hA1DaLSW\nz5h/UtpO+2LQUIzUzoqokKTSKY/Y6h+UOsbYZVHgwkREreo96xv+htGNRqORTvt8Po/JZFKjCsCf\nnM1mNRSl2dzWcgIhd+FUZHS1WuVRKIw3qaxGo5FpHYIBdBm7BHEoIiL7hVxjFLknjsJLKAifIxfI\nON/DGLK2S0NsQ+TUj1GA0mChU0BhcCRxWp1iZ06hbTiVylpzCYuI58e0WH4Xi0X89ttvOYd7e3tx\nenqaa8K79h4eHuL09DSf6xQsBpy1w5iy/gj0Pe78HzuJc887WKdHbNefHRUH2dZBrEHWRWn3LJsl\ndYMA0s5yRKWj2u126k7kbWdnJ4EQAioH0N6xbp1BORPWmNex5w29ZweUMfD3Pb92Mj0uZeqZvrAG\nPRZl+yaOlB0iT4YJ2eWCInoyehFRRUlfi4RQsiX/YDqd5nli9MOViLneqAF9tyPnqMKNCN31QFjU\n5GPt0ePskRpAsaM8gWQxOtyz3W5nVP709FQ7IgPl4cjTwsACsMJzIxq2ssWgGdp11MQYMU82mBCb\nPcY05omF7c9Br7inESKczL29baV2HAIW0nQ6Ta4PRsjwMcoEZ2p/fz/G43HWBLLxwnh47mlEMRgF\n5rfkSHiMGBsUyGKxyLl8KUrGqPP+pF+pE2OEzBw4jkpB9jl7cLFYxGw2i9FoVKshxjiD2pToJQ5i\nu92ucYvYdm1ieUQVCB0eHiaixD3H43Gcnp6mUTT5GafZ6Xka/BUI88gx/Tk8PIzpdJoy5+tbrVZM\nJpOsL0ZR14jIEgpsL/f823ihr2x8qR3X6/VqnCWcNcb58vKyljrcbDYxHo/j7du36Ux73jE0RmuQ\nd7huBwcHmdpDzlqtVgZvFGNFznBOmTtkEoNXcly4J3OPAeMeln/mztE+uofPuS+o0EsN54v7EtTR\nH28Isq4muFytVskjfIk0XRpT7A/oZavVirOzs5wnMg3dbjcDQsYPxInrnGJmvggoqC9GUNRut+Py\n8rI2DuhcEFlnTEgpI4/YFuQG3UM/SsTfa9z6ySCEx/5rnzm4Hg6HGeigNzzPPinA84S9I0h2cE2t\nMGrWOa2PLCGXzkiVjlIJzDjgLxEwnMASDSxtUtleyx+8ttf22l7ba3ttr+21/Z3tm5U/AE4vSV3A\naEZ9TGorURV+N0TpnQMgJ2XxMaoge7eeIV6iE6NkEdXRDEZo8FS9VdxQLPcEYscrN4wJOkL0Tp96\nvV40m9tihiAXjoJBL+B5gMiwo8MRiMcNCJ/IvkSl6D/ImgmSRAdEyOYmfC3CJIIhumLnSETUECYi\nJafIDPn73iBEIC+bTbX9ttz5BkrCPU2iB02IqA47ns1mWQyR8QaZ8E42o3IgUEQ63tUGaoq8Okok\nVcDYeNs/HB/QSqMum80mi11CNId/0W63o9lsZuFAE2Cvr68zHUTaz++PrLhQHrJIfzudTiJaXOex\nKNEa0m6kf4w6lfJkJOv+/j5LjXjMKMTI2EJidRmDiCrl4fQd6Aay6rMM7+7uEuUrd6yCbEyn0yyh\nYe4gP0tKAPL1+fPnGA6HcXJykmm4+Xye5UY415B3cIqZ1BnIIf25u7vLOXdK+OlpewzN09N2ZyqF\nQweDQepf5MmlVowYGB2EgwVxHqTMZHSQt3INmxeKnka/+YxP7mPUmjXD+5G+YrfmZDLJzRnIovlJ\npJxchRw74vQ13zfiaMTmt99+y/V8enr6jBvbarXi8PAw5QI9dX9/XztyzDqB9wFJHo/H+X4uwYOe\nduoJu0W67O7urpYWfvPmTXS73WeoozM76HiPt7M6RqGMyDgVH1Eh4yCnLt/DGsammuPY6XSi1+vV\n9KH1JRxHMifuJ4hdeV3pT+AbML9896XrvIZLpNJZopfaN6tsDunS3AQgY/NpIuqpL64v4bqIisNS\nOlJc59om6/U6rq6uotVq5WGsXlBwjJyH57py4fM8titzD3avRVRcGlIe5XbL8p3NHXMqE+PIdzA+\nZYXmXq+XaQOnapyidArUTi3vaD6YF2TpkJnEa6i/5AeRf+c9Pb+kAvibn4dhsiPGMyIiFY3LW3Q6\nnRw3uB1eiOaAeBcZ0PxgMEgip40zysaKKWJrsHu9Xo5PuduO56P8Sr4W88OmCjvqfMamAhTmZrOJ\nXq8XvV4vBoNB7kLjeRi66XQas9kseRtPT08ppygQVynm0FacNB9MDBeFQARZhMzPM73RAqNlo23Y\nHGcKSJ7PkF+nLklfff78OcbjcZycnORc8M6MGw4xir80Jjs72xpSe3t7ScTHKCGvTm0if/yfOmue\nX/7v9CwpnIeHhzg7O4v379/H999/HxERv//+e8xms3jz5k08PDzExcVFzg2OASR6+FsRkSl9+Dns\ntLVMR2x1xHfffZcpf9K6cOvsKJqrxDrAeTY1ATk1dcG80nIe0Sde75Z9gsjyc+tjpwkjtnp0NBpF\nu92Oi4uL+Pz5c82p44DccrcfPDYH5earUavLzg73/PnnnzO1f3p6mk4PdIPd3d3cBEDjOBlvNnJA\nt7Ozk84+toZGEFTWaLI98Lj6wPLxeFxzdmgmerP2vE5JzZV2zo0+IqdsdrFN9lyQZsbGIteXl5fJ\n3bTsMBfoG9vtiMo2YJvKoNTy5XXJvHJfBzu2Azs7OzVubglgvNS+iSO1WCyi2+3mqecRlYNQ5q0j\nqtonNjQl8Szi+eGNEVXESzOxDkK1OS18dn9/n9diqCMi0S2T5miz2SwVnL3biEjiJ3lrO2fkilGW\nnnyTxyHUoty63W7uyAFxcATpBRtR5ZZpLJaXOChGDNww7DgFNvrOjZfGi3cxP8zRAO/OvUsuhccJ\nhcFzcCIajcYzZcP7+XBWO3k4KeaeYPAZZ4yQeR+QX41MeYdVSdy0I9hqtWpOr7eJl3WSuNYKHYNJ\n+YtGoxHT6TQuLy9r5F/Q0svLy2fRKmR3uGPeNRdRIQWgSdzz4OAgLi4ukoeCA2K+Ds4X48YYIB/w\nixh/b2N3gOHxZc1hEI+OjuL8/DzG43EiU3Y0eA6cJ7gy3BdHC+ffc0DfkRNkic0LvV4vDRyNaNdr\nzLv9QCx2dnaSmxmx3TpPLbNutxtXV1d5Peub8QJFoi84Beys9KG2GCbkyBsqeM/JZFLjlFoXY1TM\nLWLtEUjQJ88xrTTE5pRhyHgWusC8Ke7JWkLv8xwQUHSpDyVfLBZJ1LaepR/ozJdKGURUMlnyWOfz\nefzlL3+JiO2affv2bW1s0aPwVWnU3mK8zQOy7nXJCAjW9K8s90FggSx7Mw3BAI6Kg6G/NUdc6+DB\ngTdj8/j4mJwxvz/zYV7heDzOABDEivfiAPbhcPgsu4FcMF7mfzIGOMIlclaid+aHlcR7fw8nys+I\nqIpQf82xjPiGdaSA1S04JrI2GlXNDtINwO2QwSLqSBYL1AaaKIsB5TMWGdtj2ZFAs1duh8AGytEY\n11xfX8f79+9zG7wjZCM3VtAoKXYuONWwWq2yBhDXcwAphoNxKdNvCAZCUO6cscJ0BPn09JR9YDdc\niSQ4vUlzasVpjYioGUeMJvcy6ZvvlM1RiZUbCp3UDg4DCBHOgNG4kjRpZw8ZIVJzygoHl0jeuw1J\ng5IyczTt8SaC8n1BLFCMfv9Wq5U7hjabTa6FiLqhubi4iMFgkKTi8XicAQGpQjugJrY7AudcN2Tp\n6uqqtmY+ffqUaSP6HlGhAPzfxGjgfeScjQfMKwiQU7t85rVO8BNR1VMjdYIMowiNuJJWQgZx+pA7\nnGDmcTAYRK/Xi/F4XKslhMHiEGKnIXlf1oB3JqKXkJtms5nPYzcuaES3262hKAQkpSxFRNYiI/VB\nPzn3j2dbTrnX7u5uppyMnJEmJO3reyLDTrcYuXEw6PXGdfyzsbJeZm34fU0k9z3v7+9jOp3GfD7P\nINKpZe6NETbST//L9B3zBBpZkpFJt/33f/93UjwiIt6+fRv9fr+WFkS+CfCo9wXhmnXhcSxRet4b\nh6IM/spyHh5rdgSuVqssMRIROT+murhaPJ+VZHGuZeeuz4QkZY9N8BzTN9YKp0wwT5QLQmegv12n\nr3T0WWPIh6kQ7HI0SODnObVp++xgmubf7ZC91L6JI0VU4BwukCIDGFEtCIwaEwlcT/OC9o4TrjH/\nyErKQmUIFEH1IncUgVEnJWDEgpQHBs3KxIrQPBv6ybMbjUZ6+YvFImvTcN3FxUVEbKPSfr+fEDYL\nw2NHH1AY9s4Zu/Jv5LkjIg+9LT9z+orxRmiJmGwEiSiJTO3U2VB+DVJmPnBYIup8nlIRYQRxXgxh\n42AaheI6Fibj0mw2n/EhnBb03FJ9m3e0kUKWzJHi+fAqQB8sDygGnCIcK8/xZDKJwWAQx8fHKRs3\nNzeJ0LGVGwd8NBrF0dFRzt3Ozk4t7Uf0XMrm5eVlFr6cz+c13kCZHnPKBMSHQ27hBzGGZYrFaxSn\nxQEJ3+FoFYIu6wHu65Qh6xRZeXp6ypIDyPdiscgdhoeHh7VyDCh7jDHpf97ffD7Pz3q9TlTPfWbu\n2u12HB8fx2QyqaVqCBBAI7w2QE1IL1qeneJpNpt5rE1E1OZoMBjUPiPtDBLPESQRUeNi2pGiMf9l\nmi6iChydDbD+dtBXUhkw7mQj0IO3t7exWCzi+vo6ZrNZ3NzcJHeQcSGrQP+4H84aHDCjLU4rmi+G\nc9Dr9WI2m8Uff/xRQ9Dev38fw+GwFoQhhw8PD3lEE44//bRjRzqSz0xRcbPxJw1lXcNzcdycwrLz\nUKI8yLepHXxOAOD15RpbRnaMAjK+pPBMIUGGOJLJKHxEvShniSxGVHxlI/bYGOyrx9H0HeTMAT/2\nhb45GEYvfq19M0eKCWVx4Sx0u91EpgzxItQYHRe1sxCVSM9ms6lxhfge0C19QPlHVAsfQTWSRX/M\na3EF54eHhzQUJQHXxtSCaCSM97EA47mv19s6KiiM8Xgc3333XS1Sd/TsiAalWiIk/M1OCNfh7FAf\nxOOGcff9LNQlQmZIn7pHJScCBe7owPMF3O2/MVe8L9eBpkG4xZGjL0Rt5fgzNyas2tB0Op2asXMk\nxHl/m80mick8A0XO4t5sqvpjT09PiargSHlMzGcZDocp36Cpj4+Pefo8lfsp7gjn6e7uLo6OjiJi\nuykC9PTw8DCPPYnYppoitk46zgTOGTVvxuNxDIfDGAwGNePNlnNkx2PocXZ6g/FAcTFXbqASNgIR\n9XpYjImLJPL/3d3d3DwQseV2OJrHufGY3t3dJRLochnIp+tX8U7oC1LQ5l9gXEkPYyQ4qmi9Xsdo\nNEoHNaKqiI7+wrHj3Y3GEYVz3dPTU4xGo5RDnGgoBryPA0GCANK68/m8ZtzYiAFFwc6UdSuOq3WD\n0W47SzbWpY7iu4xP6ejw/ZKaAMWBcSiReE4S4O+uQeTMAXKEXNCgpfz+++8REbk9v9FoJDezTN95\nffMZRZRZO+YAEkQQJBtZKYNHnlHyIymZYi6fU2W2eTSjcoAb3JN/LiVDfxy42OGHG0bQYuSauSHo\nto4ukTCCYfrCdxgX6xP0D+vM64lrcKIcTGP3Sl4ZY/a3Unuv5Q9e22t7ba/ttb221/ba/s72TRAp\nV0AFlcEDhqjmHVx8F++SLZMRzwnUTmmRljCHwMToMp9ryJHo05wgGh44SJCPdOD75HrL9B3Pw3uP\nqLZj4xW7EfltNpuE4bn3bDbLgoRs68Wbxksnp00qgvubl0BzJEhU5bw/fYWXYb5TRHWUD1G5ycJw\nPZhn+sW4GUJ3lEoapNlsPktfUrCRSKkkxnNWGRGtU4lEsMD/JYGdOTcBkQjKRe6cxmEXC+jh7u5u\nLedPJE+EZs4WUSXjYggalJY0j6s7r1arTNd4LcABgYR7cnKSpOxms5kV3N+9e1eDsQ8PD+P333/P\natvz+TzTfp4DCLXMhdNJ/B10DJnneqMXzBncs7JyOYgRXEDewZwu5o/iqxHV0TZw2Q4ODmpbr9l2\nPpvNaqn0RqORPA54m47m0UGz2axGQ0BuSb964wDrl/WBvHBPdnrB40L+XGyVeyBjy+Uy05mLxSI2\nm00WemQzw/X1dTw+Pubh5lxHdE9fWYf7+/tZeb3T6USn06khnMwTqIJRHvNI+Y7XKTsPnSaNqBAb\n6xyuM++pTAeScoeyQQqYe3JNibbTyrQNc0EzV5JmXWZu0ZcvX/JZ3333XWZNIipOEjtgjcjAXzQn\ny7u7fWSKU8wgn+i2EgE0gluiK4yxETyPK3YDGeE9QI2Rf3S5x8VZBLImnP5hhJdme0img2b+mGk2\nfMY8W5/SB8aynPeSMO57+vlkmoyo/620XsQ3cqTgffj0cNJgy+UyD3p0jhTBgA/w0q4C7waKqCox\nM/mGlBFMLzTXmYFo7PPreI65EE4R2dBCBiy5AsCU7ouJcUyihZz7Qy61crm+vq4ZKj6DtI4DFvF8\ntxqQJcLnviKcJiZyD67HSHFPDKGrIHvXGtA2Qu/UH/fBYfYCZvGXhFM4WIx36YSuVqtadXen13A0\nGE/SGyxMUsl26Hkm8uSFihyh2E1oZu6oQA2J1TvFzLtzmgaFwE4aFFQ5hxgrzz9jz84uuF7spHn/\n/n28f/8+rq+v01Eej8fx8ePH+Omnn2J/fz/++OOP2vPYmec5jYgsvcBzz8/Pc54IfEib9Pv9WorK\n81DW3oIUz+5Z9EWj0ci6PdPpNI0LzhL9wFBRwZw5pB8oTO+KfHh4yBSNS7QQAKF/LFPIgnfY2Tn3\nBoVymzvpPOTK+oS0IEaHtUw9qZubm0yN8O4nJyexv78f19fXcXl5Gbe3t+koHh0dpRzhoOLsI3/W\nTU418R3mxtyUiNIT4uAAACAASURBVPqZiqXjwtqwE807Wpas+1jX5uyYI4ccUuPKaRrGmmtMlShT\neE4X8jsBgVNbJVWCuZ9Op/Hzzz9nKvH777+vkfTR+RDNTSlA19iZ5B2wSegrPrNDznt7V58dw3KO\nnPJysOY55R39TPSMuXJeMyUXzYAF/eE0AZ6Hc4atsCMLtae0V34/NveUTryDe2wbn9nxcvBt2cOB\nNMfT7/RS+6YcKZRjRCX8cFt8ECsDw8JAwURUpG3/4zOQLeeTza/xYnG0ZOfIzkFEfWssiuElcimI\nlksQmExdKiCfG2akg37wHAwmz+G5OAZ2ePb29qLb7cZsNkuF6kjQjlTZrPD8HROsebadPqIj3tXk\nd39mAqQXyUvRFU5NSeQkp47slA4I48czeQbOCTL1+PhY49yZy2KjgsIyqliO2cHBQQwGgxiPx7Vi\nnkRl3W43I2krqf39/ej3+9HpdGqEdrgSlisWPwRuo7m8NzsWaaPRqMaB6/f70Wptj8C4ublJwzyZ\nTJL/BKLhdQgyhsNII2hpNpvJEyy3RbN2HSThJMAH6fV6uWa4/2KxyKAGo9/pdJ5xbZjLiMjjYVCk\nm011Fhv3pvbS09NT7lajdg99tTGFd+UDmr3e+J3Cu6UM4/S3Wq0aemIOFX2IqLhOyATX8xm7C2ez\nWUwmk1qtqNFoFN1uN/UjyNLd3V3WmHOQwjwhl+hGxhc0gb+zc4u58jiXQZ3J0f5/RDwzlNZ9BLTs\nLiQ4Zf585pwNdkTUdIERCyMXfk/aS0ET1/l+pVFdLpfx6dOndIg+fPiQ3+X8TJwozz3oD/LrjTd2\n9Kx3HVRGVOcuGjFjLl3OhHHh/Wl2Qvx+Rsj4O8iSv49Tiw4y/3c4HCY/cblcxng8zoDOa6l03OxQ\nl05WRKWLIPI7iPZ7l/23g4cd4n4RVSBd8mbtAL/UvpkjhXLlxahU3Gg08rBcYHwcIe8Y88ChMCHm\nmZRH9F8So5kEDCOVjiPqOw9emkATX7l/RL1eDs6Zya8+T8zIEue6NRqN3KpaLiKiGveHseB0eSLK\niK0h7Xa7MRwO4/T0NI6OjuLjx4/x6dOn7I/RljKdRtQCoueonEVpyJ7rQK+4v50Q1+1x1MC7WwE4\ndQt6aHSHcSOqhiDrKIt+eZu4ZdCOL8+DZMt3mAM+s3PpqsxEMJCCcbQg+W42m9qZcJyxRXMK17tg\nvB3dMhMR6QxFVHA+le13dnbSAen3+9Hv9+OXX37JeSNt5nGmn8fHxzGbzeL6+roGaTPeRLXz+TzH\n+O3bt2nYKA1g2WLnICiCkYjDw8PatU55+7mDwSBRF8j0rDHWFmPKVn6Mk9NE3M+puDLap7iq09r0\nmX45GLC8+fBymoMKgh7eg6rQGCru3+1203kHzfAGFXQMxTI9T2dnZzEcDlOv0beHh4cYj8e1mnZ2\nbkj5euck7w6yB1JnJxuZ47ukHRkbjzFjh7w5ELXO5X7oKeuAzWZ7KDkZA3Qhn5UImNNCRpbskCDX\nLuRJXzy+nkveD1v26dOnrOsXsUUACSL5ng/MdkbA5GeQX+ocgpggf9gQgqzSSTDqQnDjsbS+s63h\nPr6GBuIMiuvxdnbB5XIIjPb3t2eYejMF88DmI48pDVtQ2m/0frk5oEzzEahEPC/MXM5p+a72Ixi7\nr7Vv5khhoFlsi8Uibm9vYzgcxnq9rcxqZAkhQ2mWRQxBYxyVRtQPYeV3mp0JFwIs8+r+/+3tbUaH\nKGlzDIzAkIZ0X15KQUVE1p4pUS63l9Ajw8wYJcYM2PPg4CD+z//5P9Hr9fL63377rbZw7UwwtmyH\n7nQ6z1J0TpfSMGzsfIIr5ftznatw8/58r0RrSv6T+Q3A4nxuY8U8Mp7mp6CULRs8nz6wuMoImXn0\nbhgca3hM6/U6C81FbFMwLM5ytyBKkaNscIIiKqMABw4Dzzyt1+tEbz98+JDzf3Nzkwqs0WjEx48f\n0ymjvyAWcGoiqiNEPn/+nAiMq8Xj6OM4ukK6+YJ2Eh8eHmpVoMv16EBhNpulowxvkPWOgxoRGWwR\n/WJUmH/PA0ggYzOdTmMymeSOTo7T4TOnZm2EGQvvOrZSJjgEXSnRBYwPCKrHbWdnJ1E20negek6J\nI8MYK4JA70SEW8WRI5SN4XnsYsTBAmFoNps5lre3tzXqhWkMjKPLERA4OLBk3Fhv1oVGQaxv0Q80\n1j8GmbHBdrBTlh1wEfVijS+labATpeFG75jfWaZ+rBuMUDnQub6+rqWTkK+yZATBL7XDVquqMCzP\nYcydJcHZ5HrmwoiSUS9nRuyolnbG70WgyJgSKDgVztg4aOD/zC8OP+l9H9zO2vd4GDWnv6w1j53p\nFS5uzFwQfON00hfbEds/1qydVSPRphO91L6JI4Xxc54VztTu7m4MBoNYraozxVhgEZWxttMDxIz3\nWW67jqgKcFrxmXsFzyCiUho22I5aICgjbDQiFStgC6qjVgs7wsmCtxGKqIwP+WV7zwg/aSVQPB8h\nwdh1u9346aefImLrnV9fX6dht/OCUJX5dfqNUkBp2glicZRl9UuyngnVjAOwagm3c1/Gx6gTi4E5\n9LZcpxfNu+Iz8wz8dwwWkbnROBbb7u5ujXsCemAC5Gq1So4JKetGo5GGjsZY4oDCRYiIJFHD2+G4\nCcZlOBwmInt3d5elCnAqWCs4CIxzp9NJJ+jLly85z4wTqI25KJztxljjvEdUpFKu6/V6NXI3SAtK\n3ak9z2WJ1JpXiEMUUSHDk8kk1yFHOiFfBwcHWazRpPx3797Fr7/+GpPJJA4PDzOdyVzQjzII47mP\nj48xGo1q8oQMErUTLEZUyh3U7fb2Ntc4SF2z2czaetSgm8/nuQ7hkJbIAqgQ1bO5DqRqOp1Gq9WK\nN2/e5FzjRONsgZru7u7GcDhMFNOlH5xGIx0DtyWiSqF4LZUpf3SHxw2HCGTExhWdxHOMvKFbQKWM\n1iMbjJObMwoue8H7o0OYZztE6Ogyw8E8YKdw/iMizs7OYjAYRL/fT7TLxpo1wPyyLthYhBPselfI\nptPPdrIZu1JfMidOJ9q2svYAFYzGAjY4qOXnfD5PDibyaF1DUMPmFY+bwRTzA5EXv4vXE/d239zo\n50upYt+3HBuanVTG+yXELJ/31U9e22t7ba/ttb221/baXtvfbN8EkQKKM6ROtOjdTbSSiNxut2v5\nTbaKgkgRRRF9cE1ElbYi2ii9fK5z3p5nRzzfjeLcrmFEPP5yq2ZE5WE7v813F4tFonM8F/SGNEwJ\nxxIl2isnAiJ6IqKk/2/evMldUOW4ma9E2tRoHTs3HM3w/jzDxUhp5go48qSPRrYctRFxedMB80TU\nYXlhTBkfEB9D3/7n7zsq4xk0okyiYefbneJljLyLCcTUkDz3plimizPSnA6CbI2MjUajJJvTD0oV\nrNfr3P1KqgN0zMTw6+vrTA9GRI1nSPFGiOjr9bYg7GAwSETCZ7k1m82MTg2TszGDXTZeX57XMvJj\nHszT4TPu2Ww2M93OO3neSP9Np9P48uVLREScnp7G6elpnJ+fJ7p2dnYWEdtipaQjymNwWq1WdLvd\nrKZdbnN3BAySxHvs7OwkCd27Dw8ODmpn5a1Wq0Sk2u127nRiPTo1wc5ckEjmt9vtxs3NTdzf38fp\n6WmmeSO2aNzt7W3tXNDPnz/n3LNrmirlfgfSlWzbd2HGErkreThO+fi7rIWX0n6sXZPCzZvyhiMj\n9Tc3N7V0nrmMcM1A9FzklP6AtnqTERXvWYtlio7rSM0bbRqPx/Hw8JAHHTslxvuhA0GA2CQEKlva\nnVKX+TvWMUZWPE8eU+tK72b3GBphQhasv4wE2656w47l2J/RvHnDfK8SvXZDB9CQC/PzLGv+vtG1\nkrLi76Gb/lb7Jo6Uc79eiHt7ezGfz/P8HZcjcOopokoVke6CK+C0gQmPJsnyGUoNY2WyuEmK8J0i\nqoNr7dwxyIbd6S8ChRBxv5eEIqKqKG2SekSVUvMuLjtm7EJid8779+9rBGyUlQn1kHCBcO0EsojI\nh9uhdB7d/TB5Ex6CU6nMO5/RcNY45sHj5gUFhMtckLu2wjCviTktOUkYJBZPSXAu03M0O1+bTVW9\nPCKS00ffmCufUQbEDZTPHDN3BBMR1aHFm80mIfPLy8tot9uZhvIOMI5egVuFMWTH03A4zPcnRbFc\nLuP29jYNMM/lvZl310QjWMDQuEr24+NjOhdwQ3h3O4iMH+/nTQOWJwcH7D7z/G42m1pQxc445hg+\n5d7eXh6lFBFxfn6e1y6Xyzg9Pc00JGOMM1Te//DwMIbDYe5CshzjgMFnImBk3tFNDpQeHh4yLQz5\nHYfv7du3WZkeA877XV1dxXw+z40k8/k8rq+vIyLi+vo6hsNh3Nzc5Jicn59HxNbh7XQ6cXR0FJPJ\nJO7v7zM9jY7iMHdzwJgXBw7mgNqBLIOskjPl/5vrik3gnjaQcIa8gQQd1uv1ckMD482OU9az7QCG\nHhkpUz+kWZ1KJMBwSozmVBMbB8o00e3tbczn86wJhwyjd91XvzunZJiyAtfHQaavQdegH80v4r1Z\n27ZDUDrQiTs7OzUSOY10ozlbLjNSliPwmvBZkgRGTrGV69ubFPiJfBG42z4/PT0lxw1dZeespOv4\nnpZtpxzNT/ta+yaOlCMdIz9MAJFZ6UmbNMxLW+lF1POeOGclxyqi8kbhTxhZstKCe1Q6LRhtGwV4\nHDzXdZTw9B19lVFGxDbyYSs8jf6ASpnESnRAH+DInJ2dxZ///OeIqKNm9u5RROYyRdSj/VJx4Cwx\nBnZ6PEdE5GVES0RjYry5co+Pj1nQknd3P02AtbCzMPx+kCPhgXn3lftqArhliPm3sXx8fEwekzco\nREQeVOv6MCa/866Mh40UxOCTk5M0yMwFCq3X69V2fEG23d/fj5ubm7i5uUmjGLGNzN+9exftdjvL\nMfAOnHPZ7Xaj3W6nEWY8QdSWy2U6bnCROp1OlhdwLR+IuuzOY2wODg7y7D94QOZGWX7Kmk44wiYB\n05gDnE0jmRidbrebO4W4L8URkcXxeJzrzXxEEGLvsqL+krlaljfWi/tqAixrzeRfrsVpYp4uLi7i\nxx9/zHpQ5kCenp7GbDaL8XicBoq5o4gqKCYOQ0TkeZD9fj+Ojo6Ss8V4QuhHLzpYctHViEhuF5+j\nL6xHPVfMqaN9xoyxLPUJP12ehuvgG8HDsU3Z3d2NyWSSx/3QIFC7bz5KCJ2F3rGjx4aP0sFGP1G/\nyw6RETj4jSCONvKsA6NVBwcH0e/38/BhGn1DhyOrHmtsA0i59Tc8Y+aNcSVQcNDrHX3Wl3ZC7Nw+\nPj6mvvCceg4NWHgOsJ/0hXEpuV44SThaRn/R7eiv0nZZPiPqO02NJpZc3BI9K9s3caSIQDlDKqIy\n3nj0EVVUjjFx/Smu8w46tjgbqjWZ0QvRA9hoNGrbZ2kmDzMZbDlfLBZZVdnfZ2Jd04d+skAdRURU\nnjKRhreP4nnTdzsdhndZaOxq+f333zMtQ7TgCIT7EiUTVTBm9JV3MOnSEZ3fEcSNcTOsDDyKovbu\nDZwkl7tghxmIAn00wmTFWUYNbPsGHSzJ5jakTichQyhmO8MRkc4TqI4RE8afcaU8BH3keqe0+Wy9\nXsfNzU0isTa09JsUK+k7vutzKvnuyclJ/PnPf47lchk///xzbhWOqB9EfXx8HJ8/f07jPRqNYjgc\nxq+//hrz+Tz++Z//uVYVm8NFHfkxbswrDqxRB2TBKQ/mmnQRP/kOzyIt7bQA+oJ7r9frRBsiIonU\nEVuDPxwOawocWWq1WrkJICJqKAuyhh7yzjmTxbknShvZZg7ZtMD9ymAKA8V72Rn8/PlzljGhDk9E\n5CHVpAmNLA2Hw3TIkDWnMJrNZr7z6elpzhP6zP1xw4DRP4zf175HY97soNiB8Bi4ryCg/N33BMkB\nmXKwvL+/n7tbcTiQCxfxxa4Y4XbWwrvo+v3+s4DURHtqc5UoHn3l2aChEds0MmsIh8Zzwd/R29wX\nmTdoUAatzAGfG3lhNy/3N9JlPVrW0UJ2cIYcKNDu7u7i/Py8tmHqawAC9zOyxHqCkmD6i9+FTSH8\n3fbRqVv3zde/hJqCFtvu8+wShSzbN3Gk4D3YsUEw4BLYCOGEYORQ2hFVntXIhJ0bL+oSbiUatEce\nUUF5XsQ0Sgrg1ft6eCFlnpdnM4H26vkek8f2YxpCBkRv6JvnoaBd1+Xm5ib+53/+J969e5fCZQVp\nhYZAITgU7iQ6dMqMqAyj7pSox4n7enu0OQ3sooyoc6eOjo5qc2jDymIuI136X0bQ3mJeLgI7qDYO\nGGyQH1cIp+9sqfZOGuYVQ1vubCHqB+Wy88a7LRaLODs7i7dv36byhRO1s7OTqRhQIJxElJDTficn\nJ/Hly5f4y1/+kv3zDsPNpqoX9fnz54ySj4+P4+zsLH799df4t3/7txiNRolWWX4xHjjuIFdOjzA2\npBeRX6J65qaMcC0z/KS4KfNE1EgARbkC5Jt1NB6P4+rqKtN7jPV0Ok0E0WmKiCrVzPsa0eC5rENz\nID0nBFPMuWuh2Tn22mQdeJcoyBAHSCNH7DRkTq+urmrlD0hZLZfLWvqK9XxwcBDz+TwajUYeLbNe\nr/NvZb0vHEXQcfpq9LB0dkp9a2e6XMN2tLxTmLHCqfKa9r1KRAZaAzqSNCsZA3Y6Wi+xhlxjzfOE\nLLDuGXsKozqQtGFnLnd3t0dGkbpdr9e5S5L+WbcbzbFT5NQZ3/OYGpHC4TH9gnHmsxJUMD3D92TO\n+d32koYjaX1HfwEXPN6lnJjr6rXC2mGMCFT4WdpaxuQlCg0yZkCDuUeH2PHj2S8FDbRv4kgRuboy\nLguFgXZ6A8ElNWAym6NQBJ/rgMxfguVs8BAmC1TJb7IzRokGCgnaaDrHbAWMAWXyjDI5V1ymgEyU\nJJowcR4BiKjOboqIVB7z+bzG5fEc8E7l+FDg0+ifieCPj9vCgERKJqLjZLJ4HQUwN1zv8WbR7u3t\nxWAwqD0P561cGDyj2Ww+g7Bfmu8ynWDj4kVq5edUKoYc5NBFXDGIZWqpJNAbCXPj3rPZrKYYSDc4\nrWsiPGvp3bt3cXx8nPP/yy+/xHQ6zaNOOp1OGszJZBLtdjvu7+/jl19+iVarlfys+/v7GI/H8eHD\nh3j37l3M5/PkaQyHw5r8N5vNTCX3er1ajTdqNDH3pOFKYjC/o8C95X61WmW6y2uAe2LQIbguFot8\nj263G7u7u3FxcRGtViuurq5qDjG8osViUdM1oGJOG7h0ByiYgynPhcm13JO0JH8j6kU+HBARFdPM\n2cH4R2x1InXnms1m7QgcUw9AjbzWlstlOsOTyaSGkJC+RO78fnakcX6c+kKOy9QcDgY6z4GN0WUC\nytLJcurdDgrlLUD8cEYw2qw3r2mCddKY1lFlOYSIqK01+KmksZE10Cg7krZJ/GSMQH+vrq7i4eEh\nhsNhGuoyDYUzGBG1NWAebqNRnbhAs7Pg9394eKihcLYDlhs7aJ4fxsuoIt+jkLBlH33pFJznHBvy\nUvoM5L/MKPEM7KVTm3DJsF32Byy7OHhG1bDRRspoZUarbK/lD17ba3ttr+21vbbX9tr+zvZNECm8\nXrZXR1TpHVAHIqaI6rgHvlOm0xzhOq1Cjtv5WrxhQ6cR9ZOg4QuZ4OocMz9NzIyoDtk00dqRJ5FE\nyZECojQnx6mCMlIFIen3+5lLh2zqCGq1WuVWbXviEdUuxvL96A8RD1GRUQLG0DslGEM3eCvM79PT\nU0aKoD00ECLuBWy+Wq3i06dPNZlwpE8EATrAeNNPoxCOMEjLGLrnJ5Esc2GEymkdkBe/O1EQiKej\nKKc2DRW7j6vVKrflR2yRE9AIdn8xx0TU8Jqurq4yKifVsF6vs/gmxwPNZrP48OFD/P7777XvMg/t\ndjtLHPzXf/1XjTT++LitdN9sNuPs7Cxl4c2bN9FsNnPXJegC/VwsFnF/f5/8LG//h+dIpI7M3N7e\nJuLGdaQMXO4EVMtnSTKHoCx3d3fJL6JYJRtVqJJuuQDlckrFKACy7J3FToN5rTkt32w2a0flgBqA\nupacndlsVkPHQEH29rZVvklDNZvNODo6iogt4vH0tC04aY4JcxFRbVlvtVq1Y4g6nU5uvTfa3GxW\nh7qDrlhu6a+Rp5KozHecRrWclOkwo/boBtYr6x2S/nq9TvkZj8epv9kFDlJrkjVjZBSbOYB0bfSf\nMeC9jVSD1L6UAnKKESSNOQDd4h14X3N0sCfWr4w140wWhf4Y8fb/yRYwT6UtMupotMopwXJOjcSW\n5G9sEmvA/GXslncVWg85Q2MEDLQJm1pSe/icfjjzgNz67353xtlySfv/HUeKF/cRA6PRKCeuhH9R\nvHd3dzkQziV7EixQm80myxsgXObeuLq04UgTmq0sI+qnjuMQmE8AlAgPiPuQnvT7lVwonC/qCdFw\nsJzO4O/NZjMNCNWdIyKP2iAVEVFPUdrZLHPWKOYy1857wEkBtvUCN9HeHCAWC6RnUlgRVe0RHFvn\nw9m94maFTr9NhORvhoDtKHuueJbLQhhKN9yObABzo8QtM9SegdvCHOMIO13sBR5RGRAO8KU/du4a\njUY6mcDTjcb2CJj7+/sk7GMIu91u9Hq9+Otf/5ppuH/4h3+I6+vr7AOORcR26/xyuYx/+qd/irOz\ns7i/v4/vv/8++4JcTyaTuL6+jj/96U8RsXX4qEeEUTG8z/U0r1FkCGWJXLCeSdm22+2s6u5UD/do\ntVq18g+TySSJx+bBwTNrtVq5Ziw3yBHv4DIVbJDxzuGIKi1kQ0Nj/drIUKZkMBjkPLCbkLno9/vp\nBN7e3uaOyIitQ9Tr9fI8QY8JKRGcTHZBR9SP68HBcKmV+XyevJ1Wqzo6yBwlAgzrTBuy0ijyuR1C\nZB3jzBq2TrTTyucmXOPYOOhinnCsKDvA+u71etl3TgswSdsUE2p88RmnBUDdsAxzHf0qid+uis67\nE4ShKyIqLmV53JhpC6Qynford6cxFyXlgPuZjmJnhb+x7uzYoYuYF8ubbabnCVACp9XOGUEA82c6\nDQ4R48r6sSwtFosMLO0M02/e28G8OVpl+o7xNAWHd/9/tW/iSCFojlparVZuyTYxNaJaGNQ2Iaec\nL6H8f0RlkDA0zp1bgKzwbbxNfkZImQy+78E1zyuiyqdagP0cFizfJyLHWHAwaERF7ja3onSkMO6t\nVnWqPErJx+V4MZHzNzfFAscYec4iKtStRFx4BouMe3vHE0YIUisN48+7lk4m6BmOFgufd2ShOUpy\nn5h7I44eEzvtzHnE81PVPdc4IEayMDL0uVRgzFXJ40IJ8hlISUTUtqYTQTI/OG0gKJxnxdwdHR1F\no9GI//3f/62VMWi1Wrn79OrqKtrtdjpB0+k0/uM//iPG43FMJpP405/+VON2YNzn83kMBoN0sggU\niPpANRjnZrOZu5ow8owTHI+SbE2Q4I0NyKiP02H++v1+EnnH43EiZPAIcUIcUCF/DixK3pbrW/Ee\nIJKus+N5K51GjAZGkXty5l+3242dnZ2YzWbp8D4+Pkav10sZIApHLkajUR4bRF+RW8uvOSQU5+Tc\nRIyRx3uxWGQRVgez3IuAzOuC57sfNtDmQZbcSX/uYMgoNU42fWVevIMYmaJILXO9XC5rXEWXJzFX\nEp3NWjY66KKTBC9G3HBOGK9y3fNeluFms5m6H9vAO5Q7PyHJc2/GxFkVmjdyvBRE4pzj1JtfxHiU\nG4jgWuGk2H69NJfmKbNxjADGGwbIlpT8KPsBL70DcsguaaOcrVZVWsd/517oW29qMO8aOXVdSNb+\n19o3caSYFEogRFQkQG+pNgmw3W6nYvYL2XiWRE0GgAVgeJAokf7YQNuz32w2uaWbe7qhcLknjk7p\nnLFLDQ/aaBgCjxDv7+8nhE8dFJwMjwtRCv2zQOHBf/nyJRVEGUXbgTBaRZ9N2LOQY/DL+zlKIerk\nXovFIueY75FSWK/Xeb7i7e3ts8gU+N2pFfrvui1W0I6WIuopX+YbZVo6p3ZCgcLpJ3Lj/3NPGjt/\njMp4zB0EMI+kPIlE6f90Ok2UBkTECoxgABTCjjSE5GazGZ1OJ43jYDCI/f39OD8/z5pd3PPf//3f\nYzqdxs3NTbx9+zYJye4rNa9+/PHHnEPqez0+Psb79+9ryDBGwakNFx+ldhrIsaNg3os5NXpyd3cX\nnU4nlaBR0JubmxgMBnF5eRmj0ajmZHqDCGkVxoZ16ajc+sBrFJlg/hlHo5Zch0EhYOH9IUvz7tZf\nk8kkv3t2dlbrC87saDSqUSUYt3a7nboLRwuZmc/niXAdHh7WUD7Gu9y9ZxKxEfcyrU/gUeoZrgcB\nt/EtEYDyOtBEB4bM/3K5zFIPzCk7StF7dly5D3rE5x4yXhGRBh/75E0P9LcM6HDSyjQvc4Ls29nC\nQIPaYF9wPuyg0kB/SC8b4UMWsYu8p8EG+o4edzBgRBF55Zl2gMpd97x/SZnB1nFff8Y9WMcObgjI\nCKJs51kv5WYgnjebzTLA9o5Vnm09YZuHvbPTH1GVdvlb7Zs4Uru7u3kMjA9njaiiPhucdrudfCoW\n+0vfL1EXDCyD4PyseVYINM/jXkykBxXhKhd4RJWuMcJkhW1nwGkfvm+0A+EYDAZxcHBQ40PQ4ExQ\nHM4CBe9qsVjkifXcm/6U/xzJ0Fe+S7OBJCX3krPA4ud5KCHm0NfgLLIbzgubcWOePcceU3+/7AvO\nUOn0WaEy9+zEw/Fy6oPrO51OLnBD/yxCO+WlU8DYGlYG5cAZ8Nig+Oinjfju7m5GyqQxnGrkmBC2\n/r979y7l5suXL3FxcZE7Z/mMQ2zfv3+fUT/je3R0FHd3d3F4eBhv3rxJ7mLEdm1NJpP48OFDNBqN\nmEwmKfukz3AEHh4ect3zDGS/THmyFghCGDMCBYw6yINT0IzV/f19HB0d1Zxq5AinlzH10T4YMqf2\nLQcg3jwPAE5hCwAAIABJREFUJwFuF89DP1EM0ty6vb29dOyYCwd+Z2dneXyQFTpcQZ4F0sX8Ird7\ne3vPnIxGoxGLxSJPkCC1wgHAIC42sow7awkn7KXGOkX2jcgQXDht4u8ZIUJ/oJuNLhCQPj4+5vEt\nTl8a2bYRxrkiI+Lgw4gEuyl5b8pp8CwQScY7YmuL2NHpNJkDVgelOE7oRae2kF9SgiWPEn3vXWpG\nUMq5cMqs1OvuG84EffG6NE/L/2eesKnW0fSV59pRs+PJWqPhYGGLDZKUjqRtADsHoZ+UTqZ3QZfO\nJ0h1GYwzfmUA7PZNHCkWLk5ARIVycO7US3WkUHoR9e3t5aQZAkUR41BZEFjcLxlZ/k4E4QgDZ25v\nb68GG3PfiEp5um9WEBYM5+TL55HTpdQCW8jd106nk3V0SpLf8fHxM3Ih/cHZiqhQg4gq522iu+F/\nuEAsNISYBY/w+/1xOIn0rRSbzS0Bt9vtZmTgaN78CCslX89Y+DPfAyeN3/lXKvanp6c8S5BoyqkK\n5h8EjWgex8oRr+UUeWPeS+XOfZ2q4h3m83kqCwxuRIWacL3R3C9fvsTT0/Z8PY4FMX/u+vo62u12\nvH37NlqtVhLRV6tVjEajNDj9fr+GvoCQUJuJfp6fn8fR0VF0u93aWW4RdUI5TjQ6gNpwpOncWPc4\nJ4bXGQ+cEPhnyCn13g4PD9PZNCeP4An5dkFSlCny5Ll0WsVGH8cGI2QjjGOy2WyyZhByA0cKOWu1\nqlIUyNf5+fkzsj1oCRsRkAEaaE273Y7j4+M8IgbHeDAYxGQyqSHdh4eHacDMSWFdeCxIMzr4fMlx\nKj+zbuY9+P0l9AXUGB2OA4qeaTS23DIQHdZFmWLld/cNWXHNI5OUbaAxvs5yGOVh3eOA81zkxMib\nZZifOF8u8Iu+LtOFpfxRssOZnPKaMsA0guTUHsip0dWI6lxcxqG0lw4+nTovEX87Z5av0pnFcUMf\ne+5xlqbTaXKg/TwQfBxinmPqSGlnDCaUwECZxn6pvZY/eG2v7bW9ttf22l7ba/s72zdBpEajUcLU\neLx44ERtRiyI7rwLjobn7Dw73rijVe8qiKg4UnACSvIcKTryyWWEY8Ia183n8/SE8d5fSo85vRNR\noTygbsDKfBcEp4wCgURB9oCxI7Ze+9PTU0KcLrZGg+DL+72EANK4LykK+ASGPP0dw9f8NNLn1Kqj\nJ1IP/szIAXPnVsLX/I2oDfnwThpSL07v0k++T3Tmwqr8PaIqLBtRFTEl3VtGMKAN5PSRL48XyILR\nKpAx754qd6yaQwWfaXd3e5grxNvpdJr3JAV4cnKSu1dJUfGsvb29ODo6itvb24yS4Uydn5/H+fl5\n/Ou//muSu5fLZXz48CHu7+/zqBvI7WzxdukLk3+R75c4jUTcpLG9RknhuOQDRG3QwX6/H81mM5bL\nZaJg+/v7MZ1OE/32bjgQpfV6neiaURNHvE5v0CdayRMBRWw0trsu6TNn9+3sVEdcse4oQ4E+cLqQ\nMfVmmpLW4PQIpRFms1mmUTgPkesXi0WmF42yMGZ7e3u5/b9E5Iy8Iqfl+nbqzH8rKQ4eR3Qlusop\nHOYNBJjyFug0Ut9GGczrRCa5rtfrJUrDeHqNIqtGuv2u/CvHxTQCnut3QBdYlmxX0G/Wf6Q2ndJl\nDYO2Oo1Jf8wlJV3qFNbj42MN9UQ2yh13nnOPATbTGRzktCz7UqJm9JvP6OvR0VFSgXxPTsG4v79P\n/vR4PM77QXlxdofM0EsoE/rbxw5FVPawTPG7fTOOVEQFaUZUC/D29jYrxZZnv5FOi6igQBoD6p0T\nXGuCop0pjFu5648+4sSYQ1OS9pyiW61WydPwYvLzEG5PMEqfrcdOa5owCJmXMSOdAFfHfBYWCie9\n25BFVAbMBDveA4fMZx2WitJpljJX/lLqAy4AzgsOB9fjPGAw+Iz58Th4btfrdRoGO4p8D+VgJeCx\nZ1F5yz0LB8K2nSzabDbLVAjX4WTZKTJUzZbykhPnvnItjflhF9r+/n7tOIl2u52E29vb20wLQeTE\nEYLPxvwOh8N4enqK2WwWnU4njQl9xYCbl0NZh0+fPsUPP/wQ+/v7cXl5GRHbqthwekgn8e6LxaKW\n0sTBpy+eAzvJpPuQTXMOcaSY12azmbvfIqrDu0nvcbxOxJZSQLqUtca4jcfjXL+c+4mTRfV2gjOn\nr1iLcFbKtPbd3V2NZI5Sht+GnJPG5DrkBFnhmJvd3d24ubmJx8fHGI/HMRqN8jtsIMDxMmmY47fg\nm+3u7qbjtlgsYjab1Wp0OdBAFk2JMK8ThwXdUKZIWL8lcZfxdnDFPfn8/v4+D5+m0Q8cPJxFKtZv\nNps86BvZn81muduPPvPZZrN5xlWyjuP9GAtXkmcOv+ZcW/e7XA625SVnCV3h4Jl72W7B47Njx3g6\nnY2cEpzyPrwrOgSaQFkuh8DFqVOa72XqBoFjs1nVXfTYOnh0OhlnuNvtRr/fj+FwmOvCO3b5PjaR\n6vN7e3vZJwe+pFDNieZ5zBv3sK9gp/ql9s3KH5DnfMmrXS6Xz4htJRHNxpt6VPP5vEYAxji3Wq3o\n9XrPJp9m40YzEmSSekTdq+f3iOqEcHvldgjg6UBwtHOyWq3i5uYm+v1+HB8f15w/cuB2eiIqD346\nnaaxwcj6Pfr9fvJQTJDEszefw9exNR1UgDHFyUIB2EB7F5sdD4wBC6fMj+O8lWRrE05ZqH+LM2Bl\ngnJizFx0kTEw0sX7gS4xXy5GimPC2PjdI6LmyL+0HdqLn35gdOwMlvwpHG3X1aLEguXE26XhdGFI\nmA8iOWoQcdRIxPaMvuPj47i9vU1ekhX/bDaLH374IT58+BAfP37MvnBg8tPTUwyHw+h2u1lSAYNr\nJWpngeOCvCOPuScQYi3ZAcEpJ9o1fwynDIeJDRuMG4fQQkZnHo1QTyaTLD4asTXCw+Ew9YvHNKIK\nviyTyAF1veA2cU8MoI054w2SzK5GNt1EbJ3B9XqdZ+xdXV3lzkOcAwwOHEueNxqNYrlcxmQyyaDB\nc2iU3zoRuWQeza/h3c0DMv8S/VeuU3OCynHzWqH/5Zl5BC82dAThOMDwq5ApHy3jYA++TatVlTJw\nRgF9CZeJvoMOsjvNtovfzZGzjkKvY/i9KQgdav1Iw/EoESCutS4xj5U1ZSeS96CQK+jL3l51HFm3\n242Dg4Pc0cnYRlR6jHVjgMRrgrl0AImjz1zwGU4XZ0xSuJNxA7TAsYNz6B1+Jd8Jnc544eBFVPYQ\nHqht9/+LHxXxDc/aM5kuooLrcDIQLD7jOgsEn2HsIGPbATMZknSXrwdJKCMoP8cKw9FxmbZD8bCg\nPAFMEg6NYUyUy2q1iouLi2fn0Hk3CgsronJIqeHT7XZr6TkWY6vVyoNqEVQrH0dlfueI+kLgmShU\nE/Y9h95JYqfBBFEraIoGEkWXAu77lOPNvf0ufIdxBbXw9w1D27FzGtCy4zFtt9uJQLwUdQMNO51M\nUUWQGRvhMvr1+/r/L6Xh+I7J18yb0zBOC+Fg7OzsxHg8joODg0RyTk5OotGo6rusVlUNtdVqW+l5\nNBpligiEBIfkzZs30W634+bmJpXb4eFhbm9eLpfR7XZr6xfD43pREfVzJnGkyzpeGL71el0bm8Fg\nkMZnNptFt9tNYz0ej/N9n56e4s2bNzUna2dnJ1EdIwiz2Sym02kiH05RsduNMXeg1G63c+fiZDKp\nobm8vw/Z9U6p5XKZQRYOCeMNujSdTms7KNlVulqtsnQK+ouU/f7+fqbGXOsNB5Q+uoYW6TICCqP9\nOFjonDJN85Khj6ijURi5MmhGL3gOJ5NJjMfjdAScgqWvh4eHiT5ZbhycujmtizNhG8R7gcw6eLIu\nsL2wYw2ibH2C7POOTkPZoS3BA+anJK7zLGTE9sDX4jiUtm13d1unsdPpxMHBQe2gc8CPk5OTmjM1\nmUyy6KkRQt7D+tRZkzIVuLOzk/ccDAa5RiHvs+7YMToej2M2m8VyuUwHm/WKHSkDGmcn7Jyie7Dt\n6BX6+VKGye2bIVIMnAWNhU9kWfJWIiKhca5DiBlMFg7Piah2BrFV0v0oHQHuzeQzgDx/PB4nKkB5\nBDsLPt7CSA9pBN4F+J/rIrYTNplM4vLyMgUYp8XRkB0pIF5H1VyH4eEdOMCUZ6LsEXBD0oayvWBx\n2jyPFkbSli+l4UB/XkoJehu455doHYcXeSjlyf3h/dmlQXTl6Bk0g0Jx3lpr9Mi5cuYd5Wf0D6O7\nWq3yfu4nyr6s98W7olDMV/Bc0Mx1enp6yiKQIDJOlYN8PD4+Jl+AvqzXVYHbo6OjWuQ/Ho9TuQ+H\nw1rEyRZ+EElQECK54+PjuLm5iel0mn2hphHpFiJ+xtvOyO3tbXKrdnd34+zsLKNhR5gYHvpFGpax\nwXHzESl2el0qgfIQEVVNK8ogePzZ+Qj9wIYYhxVF7Xd0KQbk3Gv+/v6+dugtzgm7eClT4CrmrAfG\nZjqdZn/u7u7S6eB+vAMIpE9a4J6grfTfc4NBQmdbvpBhr2enjNCRyFRJvbD+ceDgNc/cIVMgijc3\nN6kj4Qcy9kbduQ9oFSgPcxZROTY8z2uUccSJBGHku+h1B+iMhdGl2WxWQ2boQxmQeecfes16AHk2\nCMCzmQsQd+tFEDBnTEqH/+TkJB0ngghKDlFU15mDyWQSFxcX8eXLl0TNjbZbfqj7xrh5N7jt/HQ6\nTd2DrqVw87t373ItgdRbXzqgtn13MVFnDdy8Tkod7GC+bN/EkQI2da4VQwSU7Tx1RP2sMje2MuMY\n4KxE1CvQAsWW6R0iMASSZ+EIlAt/tVrl1u7BYJDRD/dcrVapwHg2DQ8ZI4/iM1LTbDZrXAAbAD63\nh2042FESzgef43Q5p0wEQMqkVBwIpAWohJq9oFCWOAXmifCe5lvRUNAQbv0eVrLlAn5JPvhbyVPw\nIsXweu4cudlY2JHi+8yJiyc6jegjbTx2yAPGAiOPkraDamSNOXcV84jIjQk4py6Sh1LEYcfxYdwu\nLi6i2WzGn/70p0x3RGxRl8lkEu12O3q9XoxGo+wLHC/m3+nynZ2dODo6iouLi5hOp3F4eFhDbs7O\nztIRfXqqOFInJyeJvJQI3/39fTq9yBuK9+7uLobDYRoC82AiIonpjJ1lkfXJfSeTSQYuGFqcKVBA\n3h8Hi3F1oUenFOBXRWyDr/l8HqvVKrlnyJvnDF3E86bTaTorOKomhiPTIIrML6kOc2l43nK5TN4K\nfCGaNwHwTPpGSgr9Rl+dirLDYn5VGQQZ8WaNOJApr2NcrQv39vbi+Pg4ms1mXFxcZLAQsTXsLgJp\nbuhms8l6WWUpFqNRERW/LiJqWQb0rtNb9JE+lzoOFAj9xpihR5BTb2xAp30tyDVqwv24FufO+svf\nKYNYz816vT2fs9Pp5Pu32+3o9/u5PsqyGaenp7G/vx9XV1d51BHzyRhjSxwsUtgVmgLyDeL4p//v\nCKqIqNlLBxi+J6iYuVZG/rHRzJODIQc/zohZfr/WXssfvLbX9tpe22t7ba/ttf2d7ZshUnjbJiSa\n0FdG0EQ/L6FSRKrwHJz2wyMGHizRqpLgF1GPkEgdmo8CeRS40ygI7wTXgqjFRDzQFzxeUBATac11\nMTpjIp/TlbwL1xEhE0X5ufQHJAK0zrwl8yQcCXIfPP8S/jdq5pQoCN9L3r1TcyCTHm9kxTwN3tE7\nkIxaMqaeX1f+Bf3kXQ19g/LwLMshssR7esemPzf3B9kiMkOGyk0McBQcUXEvpzrpN3NNuujw8DCj\ncu7DmB4eHqYsXl1dxXA4jH6/n7sPzeWCHxGxRUW88wfonvsz3p1OJ6bTaczn8zg8PIx+v5/jPp1O\nk7S+t7cX19fX+e69Xi83aJTrwOgb/DTLN2sK9MnzX3KV1ut1DX00Z+b6+jplsNvtJmpwfHycCDl9\n6HQ6Od6gPfSHdJA5fhGR6BQ6ylxN1hbIsPlqFNXkHg8PD5n25FByZMSEcsjUpIE7nU6tn8gKRTlp\nICnmOprnVPJyjIiUKSzTNlgL6HGnAXl/aAfmMpYcxZI7tbe3l7tPXSKk3+/H/f19pkJNGWAeeUdz\nhDjmaHd3NzqdzjPEgjWPHbKu8fp01sDjgAyASNlegLTzzu4jNsgFnEE1bRPMvcIucF/3weP4EoEd\nefTmFdZlu92upWkjtutpMpkkSuq0O/PN2HlDCMgmyHC/36+lruHvnZycJDKHfG82m2i324mce10w\nV8yvqSQvcd4i6kdCoVdsZ5xyfql9E0cqImoGJ6LiCsDELw0uLwMR2/UtgCLLVjoShnip4RFRKQRv\n/0dBt1qtrKsTESmgKFgbWsimCA7OVMQW3rcjZ2WK4baTiJJkxwVQKoQ/PmMcymrZGGxvC3WDw4NC\ncjqJBY2xNMSPQWIhe9EwjzYS7g8OdJmiY+xw/nhfnuf0BlyTiOdbhCOeVxKnHygVPqMBt5fEWDuv\nNDtY5iDw3jYmfj73Ne8CnhJ9Nr+MHYHc105A6fQhN5xHaRnm3hgTZIXSBJPJJDlhvAelRyIiU1t+\n/+FwmAqu1+vlO04mk+TsQNTFAfHxMHBMUHwofQjqb968yXdAcQLhe4dRufOHs+EYJ2TtJUODs3B9\nfR1HR0fR6/WSGM9643NSD8gRTjvvwrhhaNgs463nfEb60QYTmSFVtFgs0iEidcpacOBJ3Ti4TlSj\nZ9zYgUddHe7poBMjxpiaIE8Kz5whO0AOsvjd6VNzJO1A2ajyGY5CaYQdhJS8V/NpGU8+Q3fiTMFl\n5Z6Hh4exXm83J3gnM2e0HRwc5Lt7V6EpBQ6CSse5lDWaAxsajq0Da77rVJt1B7KHLkCmmONut5u7\nTW0T6Lvvb3oEY4jceA5sr3EibfccBDsgd3r56al+EgYyjH5zYLa3tz2Q+NOnT9Hv92upctb509NT\nDAaD3LEfETVZKpvTdeUckj51CtU25W85URHf2JEynwnhoh6SvXwf6UDEXZaUh6lvTo/RAr5b5ovN\nrfEgRlR1RUo+ixcCzhR9iKgQj1arlSRWHEUiCyMlEXUis9+dhU2UZwFGKWA0/G7O/TsqMXEawjWL\nwxwLhBEhsmHG2D89PSWpnnmiMaZGB1G8cNUsnDhrnhP+bifI8+moECVvw8911BVBdnB2MXg20EaV\nkM8SBUXhY8j8fp1OJ4nfcP2YKxt5lE5E5dRybx93gbJk/I3KMkcYate1ohAr48WOlIhIRwgkh7pR\n3B+HiF1m8BTa7XZuK+/3+zVuAuOKscAp8NywPr0pA6I1xoTdpbwDRW4Hg0H88ccfz9AKnETOoWTn\nGvdBH3gNU6YEVHB3t6oVZQ4RjqePbMEhIEhxkUCQnv39/bi5ucmxwYFip5yRYwIjxsbIOhGynWOj\nIHA97u7uktcWUTkSg8EgHVT+RiAKR5Pgj3vyDAIbZNS8KFoZLDDHpUOEzkCWHbTZAfW93JAHjCdj\nMZ/P80xHry8QRcoZ0Ad+8rz5fJ71qSIiZQG55lgUnodjYieQcbCz4t9L0nKppxkTdvq6n9hIvufN\nD5ZDdBtIJqUCuLcdYNBfxqa0ew5KvduTsxlHo1He344lc/4SL8tghscJ+UAOncFoNBqxXC7jr3/9\na3S73dpOdnSFuYXmN/udSn4v4+hjZbgOzuNLOoZx/1r7Zo5URLUdMaLa1dVsbgvrmXnPIjEpr0x1\nAA/7IE2ntPg/SondTjzbUQXXEFnbeKHcOBAxooITXX0VR8GHyK7X6zg7O0tDiSDyLAx/uf3fKQ4v\nZP6P02RBdASJILu4ohUdC9aKj7+h9LgvTiJGxM4LDh3v5oWDwjcK5uKEXMP1jvSdtijlwgRiRw6Q\ne3l/qjZH1HdR8f7e3cU1yJU3ExC1QFalLzx7d3c3d0yBakRUu8QYS6f9UGxOmdlZL1Msjio9vrPZ\nLO9pZWGSPPcYDodZc+3NmzfpSFBD6/z8PO7v7+Onn37Ksb+5uUmnlIgeWez3+wnH23GKiHS6GVOn\nw5fLZTpSw+GwdnYl6X4czfF4nDKD3JGKw7B4JxVjS/BjFAtj6iiV63hHHEOjk8iCHRPmn1IEvV4v\njo+Pc+5vbm6SUEs07RQ7fWa7uY0//xgDy4GbN68wt7PZLPr9fpyenqaz4Cr9EVWtqojKSfduMho6\n0alCI2tln8pxIxgrEVCjxegwf8b13BvnFnSf3bDMtRvoJ+PHmOJksKvTwRCBK880Gsma3d3dTTQF\nmbG+c5BM4OC1YGcY+4Vd89w7Y+G0FBt2ms1mnJycxGg0yvM0I6qAnnfkLELmiB3eOO4+NcF63rLy\n9u3b3DSBzCM3nN3J2idgiKjOvESOLVM+H9YOFM8/ODiIq6urODs7i+Pj47zOB7y7dhdjit5HPplD\n9LptrQNUHFQcQaNjBBlfa9/EkcKgl3lH5599HAwGC2PValXHSKCIyM96m3fpmRoiJqL04LphpHDe\nzK9gCyi1j2g3Nze5Wwlnw5yN0WiUUVS5swBOR5mic6qp9JT5HAiaBc13vKWY7zki4HqUl6MhBBIH\noOSC8E4ggYyNnS732TwJlH6ZejOCUDqZdgZ8JIajWws/ix2F+/j4mHA1KTA7Fu4nsgZqZbSI8QSx\n4h2QCcaj3NV0e3sb0+k0VqtVLkyUDeUErIzdN5QCn7nOEE400WU59kD8ZVoxYpviGwwGcXNzkzJ1\nd3eXu+7+5V/+JXZ2duLjx4+5Joge37x5U+sLjg5Ov2vJGIWF20DwwXgC3eOs0T92wOFgMU/IAcaD\ndelUFGkE1hD9WSwWMRqNMqq2o+5AgMYcYrhZr/P5PI34cDhMWaBa/Nu3byNii2idn5/HbDbLsQE9\nIEA0j8cRsPkz1KKime7Q6XRqOhGe1OXlZS3A4z34HnwYZJh5QoYdtCCPBF1Gap3WQ19aFq17rL+d\n6isLspbUDHQ2Y0S1+sVikXWtPGe9Xq+GSPK8vb29Z0eO8BnrnUDbaJJ1aYmGe94c7PJ3nDfrBBA/\nbCFOX0S169rv7lRxt9uNXq8XJycn8ebNmyxXEFEhw+zeNGcrorKnrCHzDnkutov3R2bX63Vyahm3\n29vbuL6+TlS4PFrM8mAdtbe3l/QAOIK2ewTrFxcX8fnz57yOEgqMnxEwp5BZP0by+D/p3bIOmndV\nO/PDevha+2aIlJViRDXgX4NOidZRelYGl5eXtbowL8GK/LPBiKigVyMPEfEsAjDSBPy3u1s/dXu1\nWsVkMskT3bmWdxiNRmmE5/P5s8VtVMOGvSRK+p5GjEqvmYX48PDwTJmyOBGSElpnQRGlsRCJql/y\n+JvNZjoE9NP3xAlDgJ3ztsNYCjGKgMXFO2IAcAKtLOC2tNvtrHhvmcFZhDPibewvGVOPGYrHUTeG\njrkD2WAe2+12HlVCLRnky1W+TRKNiNqYlP3BkMJzKQMHIPj5fJ6IRMTW6FOj5fPnz8lRYixHo1ES\nrc/OzvI6qmh/+PAhlRXt/Pw8NptNPtM1hkC+4HFA6o2oUvKk3rwOcQR3d3fj/Pw8DVJExZtC1kAP\neP/7+/vke4FAMf/U8+r3+7UjnRhveJcmqtNASymFAFnb9XNAdJCpdrsd3333XVxfX2etLNY0ZSqo\ni2SU3s4I+stIR6/Xi6urq6y5Y94Z66vUNUTzjFer1ao5w+iBMsB0msjGruRHlpwg/5/rLd92jP0M\nPmOdsP7p6+7u9hw9Ni4YHcWAsjGA+3NPOFOWPcYNtJnvligX34uoHCcHob4f98CJQh86o8B3+LtR\navfZDgpHIR0fH8fx8XGMRqMa0uXMBmND/6iDBvhgvYiux6G07ub4Keut0unhPubaYTvshJR8T9LL\nFJ+NiOQEdzqdmM1m8eXLl1oxVmdg3CfI8GRyeCfLk8fJQZK5WvSdn6V/ULbX8gev7bW9ttf22l7b\na3ttf2f7ZkfEEP3Q8LZNirbnC6cGb9yRxtXVVUK4jpCA/ZzWcCMyMyRN/0zms+dKhWwfKeFidxA/\nv//++0SJIioO2GAwyPcGiQDFMCzpaAC40QRC99epMb8bKAZcAUctvh+5eROjndp0lExRRj/H6UtQ\nMg6VdPRFuhCeBOPmSAfI2OfJ0UjXODIwCRRZ4XkgI6RESn4IHBzzrnhXw7vIB7C/eUnMJVwk8wcc\nvdOXbrcbk8kktwtzHxdmNBoGpwjuUpmChptjNDOiqpoMB8SIBf3+9OlTNBqNWmXz4XCY79xobI/D\n+e233yJim7L6x3/8xzg6Okr0lfcHoXGBW6dFQAxAnoxwEpG+tA6bze0RLxcXF7WjZUBwkRe4Hy6S\neHd3F/1+P49Q8Rl5lGTgOiMKpGzhiZQ8zogqfUaKDmQL5IS+8TwQ6VarFbPZLGWROfPGBsYNukK3\n2817mwO2v78fx8fH8fPPP8disYiTk5OIqPM/WRs8zzoXlNq6kY0+ROGgAGQEQBiMftIf5q1ETtHB\nrB3TGpBhj1MpB3zXOyEbjUa8ffu2htL5uZSHaDabcXx8nPNGMU7QQ/NRkdu7u7uYzWZJ0aBvfMfp\nf/5m7pr1kxE90si2Ud5IYKTOckmanNQdx6dAwGYuTP6GPkHaE1oDRzvd398nLcbVxUHOGo3GsywG\nRzAhH6YKlHxNyxm6H26ZU6KQ3km3IlOsa747mUwSHUcX8t4u8kza/eDgIMfAdBcoNKW8mF9b0mSc\nQvxa+2ZHxJBSQVCddmHQnTIjL3x8fPxsgqfTaUK8hhAZjOVymXAlCoVjK1CIdnpQ4F40Jp4BObIr\ny1uL2S01n8/j5OSkRmREATHRJQ/IcKj5BU7vGHrEsJK2wEiV42zyOO/BgsGAe7cG6T4LpwWcbdMm\nYPIe5gdhCCIqDgWOoB1CeDU7OztJjCQVdXp6Wsu505+ISNL/09NTpoCsoHEG7ExFVAbKaRs7mGyJ\nRtF64aMwkJ0yzUjeP6I66JM+c583b97UlC0y4/Sc5x/DYz5aREWQJI1jhxLl3mq18uwsV8zGQcO5\nMmkhkt0rAAAgAElEQVTaHKBPnz6lXPz4448xHA5zfqbTaXz//fcRsd2qP5vNkh9kxx4lv9lsnjk1\n7XY7FotFchst+5BlF4tFEstpOKlwS2j8v9frxePjY+6apSQA98WYMJ/mOpEOGAwGqacsT+gJ6wWC\nOxNTS7I19+cYHuYeRc3zvL6pB0VZCesI6iQdHR3F1dVVHhLd6/VSR2JomcMyYMKR5XfWMPrQgRCO\nJ30z36Xk8TmIYK3hpNqRMuWiTHmxFng2mzV4XrPZjHfv3iWV4uLiIiKqndQ+fxV5Y5ezd0mXwUmz\nud39ae4kzrW5tuZysW7os+/pVJF3FZsP63sgM8gQARpOrY9HIQVHMG254d9yuYyrq6uIiORE+VgV\n7zonqME+OAVrWgNEfd6L5xLsWrez3gBD0InIBGNmXes0LqAL74Df4DReuQOeMbF9KmXRtpRSMuif\nssaf/Y2X2jdxpDBgCEPENlLodrs1h8oC2Ov1otvtxmAweLaLrtfrJQfDEYa5P+v1OksrRFSCQKT1\nUqTPdXZOUBAonPKIBQZ8Pp9Hp9PJd7i7u0uP26eS01j4Jpry004Mz46oHByEAuMfUW0RRTFbYCOq\nMg54/RgtmqNLowXeMs7C9+JnfECu2I5Ozt+L15wGBJnf6ctkMkkkwjn1iMhCkpAnqVWEXPj+cIb4\nHcSsRPkiqoJvLGZzIUz+Rh54P5w/5LBE71C+RJn0r9frpUOE019ypIzwWG4ajUY6w5YNR9soThck\n3dnZiV6vl+/nOmmdTifm83n89ttv0Wq14ocffkiZmM1m+fPDhw/x7t27iIj45Zdf4vHxMR2Yku9h\nvt16vc5dOBDwefbt7W1thw7bwr2jkbVAYNHpdGIymdTOokPeMYiHh4c1gjsOD/LE+9/c3KQjuFgs\nkksVUW0Hhw8C8Z7nEZCg12xgzEXp9/u1s/2oPUVJCq/DTqdTc9iN0j8+Pia68vbt29RTIBGLxeJZ\nfT2cdProulV8hgPW6/We8XRwvJAtI3nIKfrdXCcHUs4CRMSzde8xjah2hxmlZ7yQ3ePj4+Q2zWaz\nnNfHx8faMT+gfqwnb1CgD5vNJnlFXt/oPO7r/ltPWy4YN+yZSzIMBoMM4sssDI4fusubNwgeAAcg\nlLuMBRshOPeSsQFVp78GJQgEmGPPE0619aQRKH56juirHS4K0jK/XttuzmBgGxjz6+vruL+/Ty4n\nepl7An5g+x0o8BP7YE6fkaiXHF6/e9m+iSNlD9GQa0R1fpgJzgjSwcFBIg8M3N3dXRrQciLxSL3t\nGWO6s7OTqTi8ZUf7EXVkyIsNhc5EMOh8B0V7c3OTC4LnOP3C+3nRWfmWf4uoH2ZpR6bVqh8iasFi\n0TjCREiJhktCrYXGSAeLkJo5VnyOaJ1GjKgIgihTFFJEVQQRobcimkwmuRMSpWKUp9frJXkSo8J7\nozDLVEO/34+9vb086BXHppxD+shnpHvYUuw5RA7Yudjv93NsI+rVeBl/pyy4/0v95fuO5HjHMpK1\nY080zrg6LeICkc1mM2Hz09PTWK1WcX5+Hu12O46OjvJdHZV2u9346aef4j//8z8jIuLLly/x/fff\n55g7JYaTiLM7GAzSYHz+/Dl+/PHHRHF3d3ezphUlLCIiKxjbgPEsEN7r6+taIMK7objRMSBuTg16\nXgguMCQYIeoKgUyQjmMOHdHaCFuBs4UcRwrjRSDlYIgxNBLqgqSs7Yioka2Nitl55N3t8BnBR77R\nF6Sc/dn/Ze/NfttKkjzcIClq46bNdpXdNd3ThcG8z8z//zbvszxMowvdXVWu8iJLJMVVEkXyPhBf\nnu+EWXMvGhjoPigBw7YonpNLZCy/+GUkjhB/m2aADHoOeHc+CGT9ApLF851KZ24oEWKn+8uXL8X5\npC4Ua+j0k20Jz2G9nGoz2szY6IsPJ7muodce5y2ntXl2pp6QDuZ2CdsY0DcCIh9Acn9ADq3bHx8f\nyxhx1HPmB8fNwT9OKQVJjWT6uzyP/2MTcGLywSWc16enpxiPx2WvcdIVe+D0MPuBFKpTa+yBxWJR\nDs0gF7bZgCXMKw6R9asP+jhdmIOEiK9Ljrg9iyOFoXReFUTFhp5FBGoEdnRkCvxoFr8dJ35nX979\n6uqqHPE3IsJ3zdfKCEREvRQA/+cZ+fJdPGyEzekqn9CwgPI+KyU7Ofyco98cX2bM3mygWeZlOVrw\nWvAOlIznBUVMmhDEI6KKonB2XI02IuLz58+l767ezfwQnWRFZJTAGwqZQOHhOERUl6yyUR0xNhqN\ngmJOJpOYTqdlI5pX5e/zPaf5zK3i+ZvNJobDYTnlwjvZ8EbNnMJxCi+nxVg/UEYaa4TSdzkGGwXk\nysejKRrKZyBL/X4/3r9/HycnJ9Hr9eLm5qbICHPSbrfjj3/8Y3z48CF+/fXXiNjVmen1etFq7W4C\nIHWFLFrpNZvN8jlXyXz58iVOTk5qx/hns1lJGVLoFIQT5Utqo9FolAKUEXXFSGqHlCvGhdQA5Qci\nKmNiOUWm+I7RFaMQ/L6d5Iio8RNZJ8YBaomsgZSyLk6h2ZiCcNMH73eOoeP0g5AyPtARZDsHH+io\njCDjuOIQ2Olx6svpE9YC3Yc8O/jCCTBixR7z3np8fCxyMxwOY7vd1U776aefagjCZDKp3SQB8kRf\ncDLYI+bd2TEEDeH/lhHkgf5hh3xaknlzZgV7gsxwMTVcVFqer5zuJ2ADmSOIo6/8Xg607JRnxwDZ\nR5/4e6D+rIOzNDirppA4S+Gxr9frGI1G5bsEVft4S0b/cxqZ/X97e/tVCpaWKQHIL/OaA1DQOnwI\n15/ah7bV5u43P/k/bJBNM78moiLp2rnAyOBQmSsQUU3C/f194T5F1AvamfMTUTkXh4eHcX5+XgTT\nffGGt/OCcsLZQ4CdIgT+dRqPiJwog2gPw8uC+eipNzqC5X7a4XC66PT0NO7v72M8Htfy+a7fYvjd\nggJ/i7WwMgAyJUoAQeJ7kAm9KVnzu7u7WtToHLiJmCbwHx7urv+4vb2Nb775poaAkQpmDhyZgDbi\nNHgTMI84TBR+i9ilBQw5k86IiBp0jvOeG4oKkiPOC7VPkDk7x0Sc/DxHdGx4Uo05vcP4HHmjzKik\n3el0as77ZDKJzabilqFsbm9vCyry/v37WCwWBT0hzfv999/Her2OX375paBHkNTZDzlNzd1+y+Wy\n5pxTDmG1WsW3335bS+kfHR0VxJDvei88PT2VUiibza72D5wvUoYYZ6OujoI56OAUhh1G1oc55boo\nUil2pHESMUTWQwQeFN1EvpElxujaVOhJy7D362q1u7KHAMlXJ6FDcL4cXcNHwzgbBffvOqjjWayd\n6QXIBnKDrs0omH9GY96sX7Jjw7sp+hoR8Ze//KWUdZnNZrWq94+Pj3F3dxej0WjvO3E60BE0HE76\nawd0s9mU4MxBDt/DGcyBEJ+xh0FCkYvNZhPT6bR25Q/vA2kFdTL6CJeU+xhtB1gndEJOb+EU0XJq\nzw67i53iQFFY2QgVeg0ZNRptp7LZbJZnsu/gf5liYRlj/DRz0VarVQyHw9qBAyOa+AQenwEPyz4p\n0+12G5eXl+V7Dqp+q72UP3hpL+2lvbSX9tJe2kv7O9uzkc3xsJ0qIXVDysL53Ih67tunB5bLZYlk\nfRWII8RMGMMTBZ4178oeaGb98128cPNlQC84yulrBHxCDvQBrxj0yDA+jdSVI1/Dpk4LZGI06RsX\nWvMxf3OfQBsidkgPiArpEx91hXsGIudCaaBSRHT0td/vx/n5ee24Os90Cq3RaNQKrrK20+m0kCB5\nJgif8/JGwCi8eHFxUTudBBpHtH9+fl7m7fPnz6XyfLPZjF6vVyO3A20z10ReVKbOJFM4KqQMF4tF\nQao8DlJMrC9yg+wR6eVjxUS8ufAgEV2j0SicB/q6WCwK+R5uzZ///Ocip1wcvtls4urqqqBqj4+P\nhZMGcsWpPTgN7LHlchkXFxdlDOfn5+VdFNnzWsCXcCoVOeWOP1Jk9IVn+UJvk/RBnFxgMaJ+d+fR\n0VG5EobvwVdh7xjl4y4+iqBa3s7OzopucqoNlIt97JOd6BDW0ukl5IoLhnOa3frOqaGcnrMOI5W3\nXq9L9G19wv/z/XdGXMxDYQ8zJ6TNXAAVtNlrYHQwo2B8ZhoBKAmI47t37+JPf/pTOY15fHxcTi1C\ntAe9v76+LnMGgs6+MfnbiB/vN3pkjiHzynwbQXYajv0AsmYkGp3tAsG/deraGRquJyMTw/s8x+wr\ndPg+JNDUCGTRtIrtdls7dQ6CZ9pNRHXlljlGNBBPp25pli/G6cwP68WhCMtFRHWbAbo+Imo2Bf1u\nZJz+OCXNZ+axIR8RFaUhnx53e7Y6Uvvys0B2FsKIiq/EJaZ2LIBoUdIWRlfhdh6b9xkeNK8h9y0r\n9oj6qZmc24aUahg3oroPDkVkweCkFA6NlT7v5TSG+wdcifLEkJsH43oa5lhYMbPRI6r7rJz3z2P0\nfBmqHY1GtbsSPX7nnd2YE893zk8/Pj7GfD6vEY4pXQG/zmuHHGGw+/1+MV7Ox6MQ+YzULWkBH7k3\n9M8fO7gYdOqaWE6n02kxyigik+3NPbHTYwXK85z2pQ+sh9NRrCm8JQcmOHWbzSZubm6+4hUeHByU\nK41YZ47ncyEvvDVkBsNwe3tbg+k7nU40GjsC+jfffFOrst7tdmuydXh4WOSJe/jgpzjFTPkG0kL0\nP9emg+/glCjryB42n+fg4KCWQnx8fCx7kHc1m82yp3wCibRw3i+ZrDqZTEq6lEuOfR+aDz64XlZO\nl6Hw5/N54Y4yb+wNc3voA4aNAM6pYhtjX9rLHuQ0qoORiJ0+YT1szPiuqQKu+5MdKq8xP3eqlGd+\n//33MZ1O4+effy57mGdyKTdpqsfHx8LLIViPqNJVdlxt2E3E93jZd7YXDoqgNfh96HtSvH4mOjYf\n+OF7OEbMC79r0rdTf8wVTj22yH1FN1g+sJ/mvxmwADzIlBaew7p7LQj06Yc5nsw/e8Z2wGn57PBZ\nXpgnp/74jHIL5qQR4LpeGu/DGXVql/nM78/tWRwp35HliCaiykNaESGAkOtM8iVfbKKueTJm7OOh\nRlSOAwrWgsiC47ka6fFkmrjod+wzbC4+aVQmokKWUELk/pkHPjNRlL7QB4yOyeYRUa6eoFaRyd8m\nv2dnEUcTwTI3w/NgxcRJOG6iz+Rnk6OtNMwLy84skS3v4Th7RIVs8jveTMzN09NT3N7e1hzefr9f\n5tf9YAyvX7+O9XpdSKusPQ6ueRM+DYPsse7mBzCHh4e7m9p9TYg5UHa23SdkgEtA6bsdYDuuPA/5\nwAgig1YioGX0BfmHAP7dd99FRMT79+9LuQF4OD4J980338Rf//rXgtjRF+48Qz6/fPlSHIl2e3e/\nHg5Uq1XVQluv14UvB5cCzuHj42MMBoNy3Q7Iqfc7z8Dw55NX/r85REavfTzeAaBRK+YebmFGE82x\nYl1w8EGocJ7t9KC8XZ6AqDiTmH2whbnHcTFnpdGoymUg03bUMSDZGcr8VAy8kTzrYOtJ81TR4+ZY\n8V3WwEiWUSC3VqsV3333XaxWq/jhhx9iPB6XS6LzNVWQ0plvo2lPT1UNIjss7DnztbLOsk7EmWMv\nZSfSyBTOPmvBWB0g571sp3W7rV9jw3hMhqeved54nrlcRt2Yf3SZZdkHqH7rpDey5+CBAHKzqd+P\nyfdyPSrPKTIMQsozsZW2t3zGOuZx2dml8UzLApmynC3LqJ7bszlSEVGL9vedmjNZk0ldLpe10xTA\njUYJeA5CbOWQoUR77vaWgQRttCPqpHicDBNHSRHiRLBJ8YJZlExGRZE6VcczgcbZqBYEPiOKR/Fg\nHEgvTiaTWoFMKzungZg3lB19zg6h4W0TGTebTXz8+DHOz8/LJsj9iqg7ITyPZxpiR1njGLkKNesA\napCVDf0jIjVakZ1jK7eIKCkEn1BxypENZ5QDBwj0xOli+kZUjQLlHTmNQN+QWaJ9EAEa91PxnuzU\ns69AN1gDR7NOeyLDkMK/++67eP/+fUTs7tP7l3/5lzg4OIj/+q//ipOTk3j16lVE7NCqu7u7uLm5\niYeHh9oJuuPj40KMHo1G0Ww2y95nP5+cnBQHjMb36QupNtYCB2swGNT2IWNcrVYFBXXFcxwrZNx7\nitTPyclJQW0dYPkAxsPDQ1nH5XJZCosif3a6+Bmpa/pDtG5Umd93LaN+v18zrugDn5BiDCaS814b\nK4KPiCoFyr9B63LAlJ0a9BljRN68j63fIio9472f98i+tHYOkCKq0hDdbjfOzs5iOBx+ha47lWZn\nkb2KfkT27QTiODozwHx7bvns6ekp5vN52btZlxIgWyc6Bcf/mWMHxYzB9AocGZAp20v+DeqaM0Am\n1FuHIYPoQztLnGjDEbXcoa+dQrNjY/TecoGso989r6wZyBF6ivlA9jJKZPuBw8n7MiptB4y95YDP\nyPD/5kRFPCNHCpjQJ6EYCM6DlRtGgdM4hjkRSBbMgunN3GhUx/+d42YBUTa8z6mwfWMAIfCx44OD\ng1KBGmXDMxk3C2OjDG+C/rmuDUUa7TFH1IuWNZu7o9PwUlAk/M7Z2VlMp9PixFq48e4zPIpQMUf8\nnO9ZcTKnmbPGWuAIYJgcuUdUjit9M/TfbDbj4uIiNptNjMfjEl2ykdhUTiuguKxQmdvPnz/H09NT\nXFxclOs3aCim9XpdjtxbBtjw7i/f8+ZEdoz0oLhcUZtnGv1zetvy4XpbXot9R7lx9H1qC3kHwcTR\n6PV6ZS045XJ4eBhXV1dxfX0dP/30U0RE/PM//3NcXV3Fv//7v8dwOIx//dd/LcqGAp6r1SouLi6i\n1+uVyJNIdbVaxd3dXa3QIykh+m+nEM4NhsnGAM4R8+UghHeCxOB822CyD1GuDgY4IccfdJSjV97j\n9R2NRnF0dBSnp6dxe3tb5AM0A+fK6SRzwAiK+B5GBHQEh5Q+MH6CwGzIrL/sKA+Hwzg5OYlut1sL\nPpAT9o0DW+TNDpMdBpBYI6R2bBzY2CkAMaSBavF962bXykIv3t3dFZ7jt99+W+YUBDYHZk45s/7Z\nkWLNfNqXZ7AmDnZZH3QvThXy7XSaUR4KCmP7DAJQOgbd5SDCts+OhlEwvo/TlB0pI0UeI2vCz+0Q\ngsqwJ5yWRdcgG8wxKWaQZdKCyBvlQkDXLcNGvtx/mp0yo77oWcAMAiyP9elpd20aY/B7mTNnHv63\ntF7EMxbk3Gx2VYfhprD5DTd6EUlPAeHnaIXFiqgEwkRRCwKfkU4Cyt/Hs0FBZIcK4adYZESUu8eI\nsM0Hyn3K0WVERazjGfQhC7ohRzsr3CPFfDin3+l0ym33PCvPsfPFkEeNGvFOoiiUt4XThGZvRMPb\nfg7vxtBlB6vZbJZrBUBmMN6Qwp0eyAKPkXGqCbI0Ssx5dKJRNo8RoMFgUIPqGTPNKBKlJ0xUJ9WW\no33PrbkRNL6DwvY7I+r3eTk4oC84YSgU114CjSEtAupGam80GpU73H7/+9/Hf/zHf8Tnz5/j3/7t\n3+Lg4KDcwweacHFxEZ1Op2bYIf2Px+NinO7u7iIiyvwSqXvclhUUW1ZuzClOodcR9IG0t6NKp7iz\nXsDQMEdGfChrAkqUkczr6+v4/e9/H4PBoMwp84qSdnXn9XpH+saozufz8j6QDiJ2owB8F+fEe8by\nwVF/p3GPjo5iOp2WquYucJs5S5YnHCfQIKdoPLfoAOstZIo96vQ0Tk+O/NELjH84HNaoIOjRX3/9\nNT58+FCrMYb+y6lG7ADy6WDI+jjrXPpNf7wORu3zdWNZ39uOIO/+mR1MO6I4h7yPdK5tofeGUT07\nxZZr5NzvJDDNDhGyYOfZhynM87Q8gIYSkFoWjV4yT0admHNssPc26+xglPHRRwcrXif+jUzzfZxB\nyke4EZT9Vnspf/DSXtpLe2kv7aW9tJf2d7ZnQaSIgrfbbYki8AINM2evkOOOERUxjWj31atXBc3I\nRHWnBTKHxnCiv+Ncqb+XoU4QE8ZALhkYmsjT5F9QFEcYQMwHBwdxd3dXoGFQJ8ZgLxoEj0ji6emp\nnE5xocGcQuS7GbWyx79arQqXxH01zwfSrlMRoIdOtzF+5of3Eb3QR9bcXBcXkiSV6iPw4/G4Fn3k\ndAScj30RBkRpl3BABpHRRqNREMfBYFDSMhzH5b2WXZCu2WxW3uvrjUA1kFOjCVl+Sd1RcDFD8Ya8\nc/rLc+so2b97eHgYHz58qJ1aQ/4hf799+zYidoVjf/nll/inf/qnaDab8cMPP5Tn9Xq9glQis6RS\nKbrJGt/d3RUOWrfbraWSHHmDUBKpWp4gLVMtfR/Pj1QDe8R8Ra+XOR2gFIzB8kSVc1CJdrtd9BdF\nNofDYSlOCtrOaT5OnZpG0O12y54h1eQUItwxUFCnzeDjUWrFyAJyStrESHG73S5pPd9DR5kMo0PM\np9FQo+smamfU38greiNzQPm+9YVRDfpwfHwc5+fncXNzExFRkI3VahWz2Syur69L1XPeY6TE6TD3\nLaNBIDXsT/oHdwiujrlNRpXg74L8I3eupo4Mm6vldDJzy+9EVFeMMXbSUqZYmH5BSswcNMZhBDvr\nHsaVv8ucMJ+et+l0WuNist8sF5R9MVrm8bP3nYJ1RsPvM9/Up2/ppw+BOP0H2gaH1adSmXuKg4Ke\nMWdkxH6rPetdezYKuYZFRAWr+pjzdDqtOVksFEezqSHi75uv44Xi+0wQv08/yAXnnK8JqoaiSYdx\nrBxnhGe3Wq2iGP0+w8ZsZHhAzAuGwmOgTz4tyPdGo1EMBoMiuIzfuWAElZQcY0FpzGazr4y335lh\nXOaAvjpd6E1hUj7PzFwEGu8xNI8jxf175tPxPvM1SCXQX5cnmEwm5ToXZM0pouPj41K9G9gew0Xl\n3IhqA6NsmQPWmtN/+5SbTzJlhwBlBx/CcsO7rODNyzGJ3ScoI6rLiVG4jJFgBeVsnsiPP/4Yg8Eg\nDg4O4qeffqrtQ4wavBSud4mo7iEkvdftdsta4Fizp+BM8b31el3qkrmEw2KxKDwQFKpTNexNHGTL\nDYbCqXGXDeH3ebYPKTAnOOLmlcDjIWXGs0hduoQHqU0CHhP/aRj236I74HjhCPG+w8PDmM/n5YJp\nOwStVium02ms1+u4uLionYayc4BMe1+iJ3BqkVn6gx7ACWNPkpY2l4fP0G3sU1Me0AV8p9/vl0Dx\n06dPZf/haHGAgT3tZv1ASQS/x+PHaYUSEVHtNVNJkEWCcfZvpoGQpuMAQ05DZZ1Hc3radtHpKObX\nup13klKz/s7pzkwo93wZZDAvzCnXiCicYM8ffeP/7HM79TzTZHPr74io6T2aaSmtVr1avKkX1PZz\nag/7jVwxBkAG9IjlEAcs98PtWRwpn8JyLhfuBpvfXIHtdhuz2eyr0194wiyE66kQBZjPY+8UD5p3\no8Q48mzF4U1DTp8J9zMbjUatcNc+j98kWhqb1E4FY2DD2TGIqAieRol4H0iNDbEVscm1/pu+mIRn\nATePANQhH1HO/Cs3b+j8MzshfE4EnSPkiChEcdZvPB4XYXd0heLwprGMmFt2dHQUl5eXpf8YGxoG\nFufNXCnehTNhJGy9Xsd4PC6cFK8xRscbnHHC/WMNPW+sqb+Xlfs+hMAcFu5sMz+u1WqV8gzv3r0r\nBQ3n83m8efMmRqPRVzwYnHWcJDun0+m0zMnp6Wmcnp6W+W6323F2dlYrAGlnIvNYTLiF94Wz4b1j\n3eGCuTyDtTPi4sYzzTujzArzZMU6mUyKEV0ulzEYDGpGAQXOOqMjZrNZnJ+f1/a/+2KF7n2BU847\n4Z9FVLWpbm9va/IQsXPczs7OYrlcxmg0in6/X9bJKHuzWV0v47ljbtCN/NyBlZEd3m3UOQd0OIWZ\nf5iDDTiRrOtkMinXax0cVJfvbre7y6HNu2PeTHTGCTMyjqHH4SIQ9ljQHy78zNhAbjwGozmscZY1\nnr1PZxJoZztjMr6dU+wrJ44dmKEPQEF9yIh5wgm5v78vzim614RyNx+gMDqGXKPjzKNFJoyk0bDr\nBKoEhW6np6eFe+q5Qvf7PsKISkcTKOeA3air14gx/P8OkUIAXQzOpxqIThnMdDotkbEJ4jzDxjIj\nKxCNjUDxN9+xoaTRH28svscC5VNUPoaaIU76aCE0ugN0aCFmfETBRi1o9/f3JXLkhEREdRs8pyLy\n0VtHXRZ6GhsOpZg3yXq9rtWgYb7tvPAczztzYEK9YXcrWMZxdHRUijpmcreh2KOjo/jy5UttLdjg\nRIQRUUMvQF981x7RyT7EjY2P7Jocyd+OTP1ODDOy7RQeP8uHG9brdYn4qOPC9wxn4/zbKWUfGRX0\n3KHYSCdH7Gps3d/fx2QyiTdv3hRSckSUC4SROc8p+wQD7wurqUr/9PRULikFOb26uqqltI1yeX5B\nMbMz6HnPSIdTe/tSnqzJPuMVUZH37bw6qs4pMwzcarUqRTE9RqJyggOex7w1Go1YLBa1u8Gc3vIa\nYhScTkOGDw8PYzAYxOXlZQyHw1gsFjVndDAY1Iq+IudGVdin1jXMox1E61MjLXZemENSJH4na4HD\n4aDNcz6fz0s1/YgoqZn1elfaJdM5kIl8QtZrBvpLAy3ySTHS/ybH08+ctnfQk4MX1shzwNo76LRO\nzMG/aRL8n/2Sswa8D0fKFBXmw7YpoiKxozOQS2QKCgpyYEQMPZsDOmRjX4rut5xG5gFHDzn0HiBz\n4EwNDRTKJ8g9pzTsgufWiFruS5ZLt2dxpHL0SGMRnCKIqC6hHAwGZUCuRE3OE0OEMFJp2sgAk+MU\nGY6ZNyDRsbkdfGYUKC8GjhyolRUfAsz3Mupjz9kKmg1jQ0lf/LehdrhGhljzOJhzDHwWfiMlOdok\nYvA62Vm0gucz5p+xu6w/x40zUkZ0gJPVarVqhoZNxdzQz+FwWFIscAqIvPv9fkH5Wq1W9Pv9MhtC\n9csAACAASURBVPfj8ThGo1G5VgauUET9VCVpHRfUw3lpNBo1tIjvUtOIOXdqNxsiO6AogG63WzPQ\nnst9yCLFP70eNIIOInMbttvb2+j3+3F4eBjX19e1S025WcDHxiMqnpkRGXPn6Av9N7IAsgkny4YG\npbxvfHZA8l4gzb/dbr+6mJi153lGSLzHnPrmd/luhvqREwzqarWqnYRkDY6Pj2vHrk9PT2sGLKJK\nsdh4ZFTCOoM+Iac3NzexWCzi7du3cXJyEp8+fapF9uv1rrQHCIn1IIEhhtfp5Jx6+a3+sSbeNy4C\n6jnNgSF7ww2DuFwuy5w6C5FT3/BgnPFwIJ2RL2SGOUUGMxJPYIIsZS4jc2QZYl54b9bfvpjaOtiB\nKE6PP0OOWDOPlcY6ZIQsol5RPqfh0PsEBP6eHUP/DNTNzhXzho1AXrz2/l3LIv1gDbzvSWmDGNpR\nZt32lU1ANul7Djz5PnNtR9N7b197tjpSTIaPUOI547wYNvYGv7u7i8+fP0fEblK5EoAjxI5aEGJD\nuHzPnrkFdTKZlPdtNjtODEaYRUBI891ILJ55DxFVJMMCYWwjohwl5nlGjpgbNkM2mnZ2ECDmjD4w\nVnN2UKKsg9NGKDMbNKNWQNsWdsZv/lHuq//vjcia0R8r18ViUYo4np6elhIEEVWFcqITOGoRu036\n6dOnaDabJXWVSbwoNztnIE1E/Mvlspb2g+fCMXWnNa2QQRDpa6vVKmkv0kom2rKGrLOLweFInp6e\nlut3WAvkFjn0++gXCtxpXRAp1pO1ABWK2Bnkk5OTgjrhBJ2entaQWBpV9I+OjqLX6xWEhLFThXy5\nXJaUAc/AMXdai3Ejj4bcQWDNZ8u8O5AxG3vmBLQHJyobLtI11guOaJEPv49CpHCUkJvJZFLumWQ8\nfMY8YhS979ATBCx2Fk5OTspeIxBhDDhxh4eHcXl5Ga9evarxXxyYGDkkELWsECjY6PAuIw04EZSV\nsaNtZBuHynOKfDr1x2c8p9frxWg0KgjRaDQqe4mrgByIoivZG8gV/cQhcvCFEXbAuY+a4SrvEdVd\nbOxd6wKPFZ1pZ2a9rooME2Qx3xnFsSFHDvdlU0yQx07ZnmRagtfQNZc2m00tLc5tFYzP+x9b43Qz\njWdZfnivnalMk4moHHkDHwYWWG90opHinPFgv/B31hf+/Qw6+B372kv5g5f20l7aS3tpL+2lvbS/\nsz0LIhVRoTf2ToGAiUrwJDudTrmvCxgUb3E4HBZ4m9QADSg9R7K8L0cPJtxyegjvnb4AJeLtG+Ll\nma5ebk8ZZMnpvYiKqGfYme+Z3EckmcfoNF2upA4MTVTrKsJwvZjT3IjIHZU5ktvHsbA372ie+SL9\n5TQkaVqQSJPB4XJMp9Ov0qytVqugUPATQJaIrrgvzpEu/fGamQtA/0CKHBXBZYO/gzwxB/TfRFDG\n0el0ylp6/pxeZT0cQdNAF00AhfuWo0sTYA1le+zA1ZkAyrvOz8+j1WoV9ATCLKmr4+PjghAQTT8+\nPsbl5WVst9vCu3r9+nXc3NzUIkhOCSLrIDzmV4BiGC0yx8Gpa+bM4wB5Y0w+qs+cIY+ZX4L+MeeS\nSDYjiMhup9MpJ7PgNvLZ2dlZ7bQSn0G+59Jic9pIXbLfTD/g+piDg4OyPtxfyOXny+WyoIBG+tB7\nyJPTmv7/er2uFevkfXALM5ePueP53hMglcy/v+f0EjqQNeVz0Fini9FfyKOv5yHVSRrazXxF9ilj\ndDYgp5G9R9nnNL7jK7QYX5brfSn9XC4G+TLdxPNi7hConhGyiPq9kaZ8gEbxPc+p18LjQdfSV1A9\nt30onNN4UEJymtH20FQQ1n+z2dRoHx4zupj3QeNYLBbl1B7rT6ke9DNzQV/Qp/TJ47Fc7mvP4kgh\nXDnHTPO1KxERl5eXNeVmSI4FYELyUUg3Q3MIl6Fl+tBq7a5dIH3jflp49zlg/DyTVHkuStHQ6sPD\nQ0lTkN7zpZQIN6kvK33Dj84V+7QfQmHnBW4RZDwLqlOShtw9DpzgbrdbOy3z+PhYq3HiOcjOgSsj\nt9vtUmHc78KxWa93FapJu7LWlCdwOoy1oPYKc+sN5ZOjHp8rImNg2VxWMN1uNzabTVknDDlyZEct\nojpJZQVkp4d1xRC7BhPrinzSH3hT6/W6EGX5HStpxumUAo6lT7DRjo+Po9/vx3q9jru7u9p6NRqN\nmM/npW6bHeXhcBjdbjdOT0/j48ePNY7BbDaLt2/fxmKxiKurqzJ2k6uRGada2PPZcDHHrBHGhblB\n6ZNmyukkjD77g8b6oRc875Q8wfnabreldhE/e3x8jMlkUkuLcO8ka7BcLmsne5GFfLyalA8Oix2p\nx8fHwp/EqSAgefPmTdzd3ZV9mnki6DAcGr/35OSkpged4uPAj51zp5StQ3LQgs7L3CrG7r9zehdZ\n9R5HL8FF9clqKrfbuTHXyzowE5U9H6wp823ZcH9tQzglZtvAPOMY2DlCdnmO+5Fl1in9ZrNZLt7m\n3XZQPE95/dk7dnwjqjJD6E1/bz6fF71p3ULLoERucETN5aPlAzbMW54P5BK9g33x7y6Xy7i7u6vt\ntxyI4RSi55kXvsPBFq+hSf/72rM4UkRk7pw3dzaKLDboko8i+tilnxNRGSgEMp/ow/HhcwutT1b5\ntJCjyexgRFROIv3KUYSRMCsQrjIBgfE77DQ6V8x3UbDmSHmzmkeSCbdcG2PUjZNsFjK+B4mTwpjm\nQjAfGGcLI3NHP3yShJ/zvHzM36dIjORERHE6KHZpx6Xf78f5+Xnc3t4WdJKx0wecRvME2LTInfP2\n/IHvxPhwXDE2djD8TkfBzK8NFHwOHDQ7EE9PT4V7FVFFyVzr4popILy824VTGZORMPp3enoajUYj\nJpNJQTUonkl/cN6enp4KOsbJvIuLixiNRjGZTMr7XQ6g0WhEt9utnThkDTNX0QGSOS+WH9bIDq9l\nln5STJN5pKFzMv/EPCbPv3lOllN0GSccvYcPDw9LTSe4Scz34+NjDSX2HvHJPMsf/cZpIyBiXBym\nQP8YxfOpMiPVPJ/Cif1+v7Z3vT9wUO2E2gEBrTLXCZm37PE8DCm6LuvX7XZbC/iYbxwk86/y2kdU\nqJf7b32Y+XGWB+tL/nZGgrVgvDhc5pfaeWTd/Cz+YLDdT5rl00gd+tL2xDXzmPMcvNEXo5XsRes8\nvjeZTL7imjmgx1HKBU+t67D56IzM1fL3CP58ctCymJFsnLP7+/tasWTuDaXPHq/BG+ya0S0aeigj\ncG7P4khl5nxEFe0b2kYxLJfLWhTrNM1sNqvBjo44HH2tVquSBomoCi9CfrUj4X6iII0eRFTk2Kz4\nnTbxGOzx0kwIRpggnXoj5pMtdgaBN7OhMRLTau1OplF9mv6hwKjwahQK+B4HkOPqs9msEDyppmxj\nYkXgzYZhMYnfhsYb3uNHQVjJG+2h72wOKz5QKCI3YOqzs7PyTKBek/Adydh44eTiwKA8IuqVh7vd\nbjnubhnmO6B5bMzBYFAzJC7ah2FmnC5Y6Sj17OysKB/mwcqVOeFv5N3KkHGgoNg7duSRN2q4/O1v\nfyvvf/XqVQyHwwL98z3WdDab1W4BoC/ed74zE0eGk4cumZERaeSK+Vqv18V5NCLLd3wowg44cmxn\nne+BAmVEgf4Q3FHj6Pz8vMjpbDar1eui4QDjJHst0FutVis6nU7NWWIMpPEGg0FBxwj+fKDAhzB8\n0tSNgHGz2VXuN/kZmSf9yLr5WP1isahlDLLcgP7YUcnZAxfKZO2Y97u7u9rhJHQ6joWJ8RRB3Ww2\npfAsDUeSPZ77YDI573NAbuSZz2zUnU3hO+i+HODYKfL+xTm0Pst2kfmzvmbeeKbfwXPQi0aimBcj\nkHYyQao43OOTvkaUmWcHqw5OszPCvBFkGHiwDUCGvE7YOFfnZ3+ia72+/r/T1/zNHzt2/1/bszlS\nCJ0XignDAGSjjxK3sCJEKIEcgRkFMofCG4SJZVJt8A2vRlSX4PJsvwshsZKmsUAWanNvaAhb5gyg\nEB2xue+kdeBJ0Afms9vt1hwOUlMooNPT02LkuJYFgzudTktF4Zubm5jNZsWwRdSjJUfQmXvCnLFu\nVkSkTbbbbTkSzvfIkV9dXcVwOKxFszgkjIP32Jgb/aGhuFByKAM7ShiyvK6srRFO5oHSCKQ881F+\nIh87z61WqzitPi3GOKbTaTktd3JyUjuSTP+JmByJ2nnI6CiyRXMwYJTXaCwRW6/Xi8PDw/jxxx9L\nPy8uLuLLly8xnU5LZGp+Dc8EeYRbhdHjRCRONeOz07her2scFAwpKQkj3OawOeqPqAzlvlOpdp69\nDp5DHC2nIXFAQZ9saFD0PBuuHGP0dSIeP+gXgUu73S6O4nw+L6dYr6+vYzAYlNSejQHOkCN9dMLx\n8XEtDcX4SaNz/Qw/d7CZeVfsocViEYvFIrrdbi3wRR4dlPKZjet6va6lS32adj6fl8/u7u5isVjU\nys14j/N//tDYM9ng+3PrWJprGtlJ9Jx6DH5e5tPRsi7M+xE9470ZUe1t1sDoLA0aAXbNto3Peee+\nkjPMH8+ECoHddHCPHWde/D50tJFABx/sTzvEPNOBisfPvgQBNBXFJyfRK+bbunZcDjyxs/Q5gyIZ\nGXN71jpS/DuiXjMmok5IXK12VW1R8laoIEvm2xgVceVjIyYQ0djY+wSZln9uQ5sdAjsvhi1ZNH5m\nHg1j8sJmiNNEbvfNYz09PY0//OEPERHxzTfflFpAKACuhmDeyHmjJHDCLi4uCnrlO94idh4/5SbY\nQEakiKAzIsV8OJ3qqIXfyfW3OE5NBGrCaUTUkJ19iog1cRQHomK5YA3NQ2EtfedSRP26BDcTGTFQ\ndpascB1EGPrfbDa1NBTcMQyUAwX33xErfWFc/HwfAsr/M6q6Xld1jRwZomB++eWXmE6n8Y//+I8R\nUdUuQhlxPD9ilxYAHXI6lebUjfe2jRafmScGoRvlbh3AOrGmTvs5LcdagWbY8cp7kTlpNBqlRpXn\nnOfe3NzEdrstTsh0Oi10hG63G0dHR8V5coCUG7wUHMyzs7Ov6kH1+/3CCWL/sv+Qt4xms098PQ9j\nJyjJFALzM9lXzElEFQifnJyUK1qcsmSuLYP0x/rAKf8ff/yxHFggoKCOFI6rHSnXrcpVqL0PMyLm\nz2jej/QTR9loFg09jd7Z5xDk9zgr41Sbn5mRVPpMJsHBtZE82x2PFydhvV6XPeqK+Ov1rkbjzc1N\nDS02Gpf1DSgi8+IsRZ53+ue/XbvJBzKs53P6nb2HTWF/8x70DfXreAbBtm0/fSGNaB+EZzr7sK+9\nlD94aS/tpb20l/bSXtpL+zvbs15abC8TDzNHAfxtBAteQ0R1RNepQUeiJmI7kgKqzoRL3mEv39B/\njvqJ4mhOqWVOQ/aE88kuIitDoxEVXGluS0SUO6aOjo6i2+3GH/7wh3j79m1ERLx9+zZev34dFxcX\nEbHjNs1ms9oN4qAfoAA+Xpr5NY4GOJmCB+/0EWkF0lB8xpyAZEEWzc8nSqYvPkoOkuESB+aYOIom\n0oQUbzRusViU+QfRyjJHis/jZr5BedxPTovmS2f9b6ORLvEA/8L9cfTj9JGRFbiB5ld5HI5M3R+i\nNaJV5Io5Zf5AB/M+fP/+fSyXy3j79m1J0a1Wq5jP53F4eFiuILHsk3o1GhQRBX0zkdVE9Ha7XfaR\n9y/cIFASyOqG4ZEzUCsib9bWvBcaew+emtFM9i0pN0fQpG05en12dlaTN/YGiBbfm81mZZ1ms1lB\nIPke6ZsvX74Urhj9BE0mmrZeZa4sE8gMc4IuMeeLeTd6zzPRdei2xWJR43WRwuGkKY15A7Fwf5A5\n+DXtdrsga91uN3744Yd4//592WsgebPZrKR12FvIIqgRpUqc9gM1ps/eHyBilllkkrW27XF60NQF\n/vAcf25kJu9xpxSdWja/lPVxWo7DJk67MlbQGo+Dz0A5mW+4T6A4vV4vPn78WOvPPp4Rssfesb3E\nrpj7+luZg8xTBpXaR0WIqC5mNhUB1MlpXacS2fvmyTEfLn3A7zOfWZfm9iyOlCczOxrAooZHgTEh\n8ObTOXmy/UzXDDGnATKdDbEn0NCtnbu8MBmyxanj/2x8uFUYMDZ6xNcXpnperCgMtfI7bLZXr14V\nQnlElIrUHAuGw+ATC9PpNCaTSTEOFn4MkY1DRJQUDMIL/yWiSgv5FBpzikLYx0tBHphvX6HhtWm3\n23F3d1fjlrHxTeZmzTBKrAnv5lQHp4tcD8qp2cxpILXs/iMXOECu++UUg1M4mYOB42QOEZ/jSNJP\njjvTH4/VhiFzwPibvttomODrlCrf8RpmR8GptsViUaqbO53GuwzB40S7wniv16uNgXlErqgLRF98\nuXjm63kP+znMG2tFX/fxL9in2dGiwrv5gA8PD4Wzg9zB58GYEIBQ+Zx+ku4j/YcjtV7vyka8evWq\nlP7wZcd2RglSIqq0rnlpNHN2rDcZOzqSdbV8M9/cvmB9xL7BiO2rsZRTJl4v7zs+f/PmTfzDP/xD\nfPr0KX7++eeaY4Ne4m/vfV94i6OaCdUOALMD6n3itCe6n72fnS70pfUE62GOrNP25jRa1ngWcpaN\nOHKNfeNQkZ/l9JqdZMZ9cHBQ+I4R1Z5Hh5+fn8dkMinyZhsRUbdRtpfWNU9PT6VMB7LH9xhDXgeP\nMac7mRvey/tyqtY0C+s20zasA9G76Gm/E93hwDi3Z3GkuEjRCorOo8gwDhGVo2HByPlgC5qVG5sP\nz53fQ0idh7YC5/8Wdt6dOUCMA4XokwruC0oWAcpKDG+aSJoxwxHJRticqdvb27i7uytH1REIvG82\nJnMIMoQR4lQMfeX0w2KxiPF4HNfX1xER5eJQO0GOchzx4QTwbzgNKLKMAkEOZK4iKqWL8nGNMRwV\njPF2u62dCkMpgo4Z1eT3MWw+1ZSNqBUUyFdWpqwN8mbHO/+NA2huD2uBrBoFYtMjd2xoHAHkBXSR\n/iAH9DnzkuxgGJVgzjGKVryQuClyyjrd3t7WDjA4aGFuTERnPZfLZRwfHxeZAS2MqE4TEUBlDiH7\nmeKUliVH56BPNPPKspKOiHJpOL/Hszi5BFJnLshyuSxOLtwgDmiAAKFz7BCiP1jbjDqydw8ODsr9\njqwNe/zgYHekHI7QfD6vRfmZH4bh8ykvr5M5e/4MZIf1MoLQbDZrwaKdbOSKsWYHzOvi1ul04urq\nKl69ehWfP38uPDzWlz1EaQlzGJnv1WoVr1+/Lg5oNqz7goh9Db2GPgbd8BryPoJGf4YutjEngHIZ\nD88Xjvd2W5WPoOGsWu4zX8/rh2wYgYd/SnPpHPYwzYTx/PsEJoAY3lfojPl8XkMC6WPODNkm8Md6\nxO/2qWjmAd2JPFgX23ly4B4R5UCHA37z3DJCm9uzOFJAslmR2ZGKqBdwRMF6siOqUykIKAY7ImqK\nDoXjUygoLjtQEdWE5z8R1f197rOdOgTVhoxnm6DthtF2VGWvPY+B5k10d3cXv/76a0GkECjSA0S0\nPAsh5FTeeDwuCvf29rY4J5zYc2SCUiEStjJlg/NzNpuLdLrvfI/oiurkHjOONSfTjICB1jAe5pa6\nUtvtNnq9XvR6veK4OAWcU2nZObYDYifWp/toRlptsDxGZMtwtCs+ZyTATjvPsXFjHKCKtIzQOl1s\nxWL0xZ/xb6dFPAand5jvo6OjUmzSyHC32y0nPdfrdUwmk9phERwiyOpGajFOGBTWkEMmoIvcA+YT\nSDZq9Js1zWlBIygEBxgv5n69XhckifG56jvvbTZ3aTp+5nsFswOCPsEJc3QNAmkSLPN2enpanEvI\n8qzTarWK2WxWHGtkzvKMI2CEBCQCPbYvTY0MnJyc1MpR4Dz5cnA31iEj1TaYdrQiKoI5znu73a6l\nkh8eHmqnDhkHc/b4+BgXFxcFlWbd0FM5E0F/9jlWBPaM3zQCUwsIFu3U4AxYv0fU64IR0LD2yKj/\nuGwCiKzXJ6N7/J71EjaIvd9sNuPy8rLIooNYzw1pZWeETGsgzZyzK+gnp81+K6BDhrxODnqzTTBI\nYL2Zi7b6XTwDxyjTL+wLeA35/Lfas6X2iIhyNEREnL3ziMrQ2KAhDBg2BCWiyrFuNpty8ozvkS4k\nWmVj8Uw88OzZs6jeMK4ldH5+XsZhLxbPG0Vh1MFKOI+PPru+UebWsIE/f/5cvvfhw4fo9Xo1Lo5T\nEaTfiL7H43ERlOl0WjbNcrmspREYF+vFc7wWFl7PN0JOn5364XcoxWDOAQaFSB7HAycQZMhoAI7U\nZrMplbqJynhOLnXAM73OHguKxbJqfhiN73kunKrOCKv5LVaYPNcOljc7xh5ZArXkfcwd77Aj2WzW\ni97ug7+RDxu29XodnU4nnp6e4u7urqwFNbZms1ntOif6zJH42WxWLpyNiJJudo03o0qkBbbbqthl\nxM45cd0uZNsOsHkXeX1sWCiSyfdQ1Ow15sPFOBeLRY0DaQUNkgsKQloSFM9OJmm/09PTcprPaBpc\nQp7F9/j9VqsVs9msoDK8jz6x1zIagM4kOrdsGbFzs0PtcgARVYV65txyFFGllDCkpk3we/yM/4/H\n4xgOhzEejwtPkjVHz+/jaq5Wq1gul9Hv9+Pbb7+NXq9XmxsbZ88XDogDnoxy2WDn1BJ2IZfIARWx\n/vX7jCx7LeyoYm+YHzvCOSWGg2bUzGthRNU6CocfJ5QK/bnlWlA8i2yL9RA6HwfMto2+OP1Og4u6\nb76ZD/4YbGCsyKKBDuYNnWlk1HzBzWZT6iVGVMj4PvS69Pc3P/k/bEymNzjohr1Q13rCkWAy+QyF\nyQS4mCHCizFwuhBPlujJi4SBtEIz5MjGQeAsrL1erxRH3CeEjNFRgo2yo7WIeg2WvLkRCMZ5f38f\nP/30U+lLTgu9evWqjJd0l9NYNCJ0ogzDyk7d0V8LGvO8b4z8nZUshgXFYRTEBGFD0vTl6ekplstl\nOca7z7BRbweZcS2nzWZTyMD8vg1QNsI5HWBo3NwQK0Q+I4XH/NPs/Dv9yzgyguE0If1DBjL/wpwq\nr2FO69B8LYn7xJgwRBi2nMZCpiynnz59Kr93fX1djFzEbq/ZIeFqk4hdhW76MZ/Pa1XWIf7jZJPi\n5Tkc03bh3Jz2xOBmNI/5zqjqer07Mk4aLyOOrBXf974g9eGAjM+8DkazQdvRlQ48mX/SKqTzaK1W\nqzhuRrLMi+MdONSgqBhhGz2cWWTOSD7vc9rLzYEl85gDFjv1ds5ms1mRj9lsVvYwP+OP9yEBxOvX\nr6PX69U4kBh97EROw7OP8rpwbN5IjsdHP7J9MocUXZXBA6NA1sME3kavWSeXSOFZdnT9HT/T6Sr0\ngwvfdjqdstaAHTxzMpnUHB4jZNbpdl7QTfxxOhLZ9p+cyeFZDj6wldb1zCllfdB/OKm8L4MmzqZg\nf0A5vU5G7fe1l/IHL+2lvbSX9tJe2kt7aX9nexZECh4PKauI+nUAREQmFhJRkgrAO+Q0gSMiPHVz\nHfZFguZROJIy6gEilGFtPNSMVsBN4DnO+RKRZoSAZs5P9vrzBaaMgWf7RE1ERX5l/PAXvvvuu9JX\nOGOgM/AP8Pg5CeIoGUTCpEXe6agSNIfv5cq7hsmZe9ICvJv3Ad+SAjL5m2gb2aEvpJJAKUDgeCbz\nvF6vy/U3EbuojHcSkTtqcYqk3W7HYDAofWm32yVyBWkx3wUOivk3ERWXjz+MhzV2FOl5A0HwPDvN\nCroFquj8v7lnTjnkfZQROU6DzWaz6Pf7Ze6Wy2V88803ZQ06nU45vLBareLk5CSGw2FBDowac8ko\nqS1QUw6lgEY4KmUeOEGH7JgjR9TtPW6Z4gLsfegwiMt8Pv8KjSZq9VrQWEuXMXAKgetjzC/h2fCS\nHM1nsiyNiu6gwp1Op/AYjWRAZjafJyIKSo/cek2MOBuRMPJJoV5H7ehN5HFf+s78JMZtVHSxWJSD\nLT/++GPc3NyU+SRrQV9ZcxAIp8EuLy/j1atXhVuVdQbr+FsUkswZBJ0BlfIBBqOdNMbkTAPvxj6h\nz9AlOWUGwpznk3XEZoKe5DQga+uMilFs0Ni8xhEV9QN96u9jO7K+dzqT+QC55XtGeninm+0e46a/\nRobJotAPI1KgrKSC6QtpevaH9Rr6Mp8gZ+ym4uxrz+JIuQ5Q6YhOugGl0XELXq/Xi06nU5TUwcHu\n1nmcLhsMjGur1YrJZFKu2fD7fA0Dgsh34AH4NJRz0iZsMx6qOJMW8+kNp2S8kCbPmSzHZwhuhhaZ\nKwQtp30Wi0XtuorxeFyc2IuLizLfOBZWUu43HBTGb8VmZzFzg5zeYf2c1vX7nBO3s8hzXQfFBFcT\nIDl9yLtcSTqiqnzd7XbLFRhOtUbUT6Iwl3bqbFRNqiUVyrgg1zudhlzCAcMJ4fd8EsUwOfNhBUEf\nzPewMnNqIh9ggPjtk4xuOCSso8fQaDSKg+6ThxcXF7Fe747rX15elis8InYpuvV6d/VHfmbEzglD\n8Tvd//j4WNtjNsBHR0cxHA7LaVyUK86ygxKUJ59tt9uSJsIZcaAFiZnx2rGBU2jj6DWMqBxu3ucT\nUyhxE4XZKwR8JrV6b5h7Yl7hPgcIvg7cOT5rt9vleXyGw+u6e8ynU3AYLIy/Tx9SJds63XNjMnUO\nSPiMNNft7W1ERAyHw/jy5Uvc3NyUfjCn7Pmjo6Na6i5it7+4VDyiCjhYi/l8Xpxrp0tJW7EGDlCc\nXkIPZHqCA+4cQNPsrFh3M347vDzP9AzWnL3CuzL/Kn83Uywi6ulk+or8kDbn9+ERIgfIkcdIn1ar\nVbEzzLlTxt4n9I858DOzXaEhR6QJj46Oio63Q+p38n8feMjyjZwyd56ffUGT27MV5KRjTKr5P0ag\nInYL2+l0Sr7bXKfLy8tyuoPfN2vfJ198XJJNDwKQvdNc98mKD6HNRhjF741KM/pGiQN7cuaZAgAA\nIABJREFU+0Zw9kVJzjPn5/K3NzInVY6Pj2MymZRo1YRrol0QJ96FAOLxm9th7sS+/ngtnU9H4aMQ\nLJR2HHL0YWQwol4HhfcTkRodQ2HacLCGg8GgnEZEDs0twunx/XcR1Y3z3mjmchHxgK40m80y38yd\nx+IoHfn0VSl8xkbGONuRYo585RGySO01F+20XIFYeZ38b6M/rCHPxvFzNHt9fR3v3r2Lx8fHuL29\nLcVgW61WQUDY10bH7u7uiqNkZ4kSHEblGN9oNIqnp921RhDfOegREQVtshHhu1wwjYEFYaGvrAEH\nIzK6AKfOhHcrYpBzrzt7DfK8uUrIE9w7O0kYKYI9notT52icv3GGMhpPX+gPjm5GG/hjRMrIBnw/\nrslBVlqtVgkiHXCiC3imZcpZAAj8vvOz0WjEdDqNu7u7mkOCTNsYw7vjeivGsVqtiv4bjUbFwfYf\n+uRg0HPD5+bLZITCqG7mq+F4O1DgWXZK7Chnuc3vyjwkyw0OBXvA/DT+oL/4HgVzWU/khDmNqLiJ\n+4qJmvzOGLmmbLFY1AKiiCoww75YLzCH7Ll9iHJExYXMTibv8il7I1roGfsf9IF3+5nes/vas57a\nc2TiiMUQfkRFGPalnBZwECMMGMIAYuLFyKkFnuf3sThGmvievXwrWP5er9dF0WZ0jN/B83ZxTIwN\nEauFhu9ltAIHaF/EgYM0GAyKU+XSEHd3d+VyYMPWfldEfKUAbSTw8C2Mnh+iTfrKJsMpcIrT76Zk\nA435oB/8LlGjjYn7iUywvswbztfp6Wl5Hv10VVzkyugQlznTB9aQlKChfJNPeY+jX69tji5pJmIy\nD5yuQdmRxvUYGQsKgNRvfq5TmJZvO+VGDj0OR3+j0SgGg0EcHx/Hzc1NzUA9PDzU0micqEGGbm9v\no91ux9XVVQ3lIaXHOhwdHdUKnOIocmLNaTCQYaOG/i6OIM4xDSSTvkXU7wPNhOF82ADlbQXOAQoc\nYgJC5pTvshd5dqfTKbKBYbChcRBH2pi1NaqELuAzBx9eZ+ZovV6X6tbIBQEuCADpdx9rd0rMe4Q1\nxcBaRjFcm80mPn78GB8/fqyhIOx75sDGjGet1+vodrvFPmADWK/5fF4cKcopoDN8uTT6hSAb+WDt\ns/PiMaAnmYeMzGU7wJxiA52moy/ekwYd6BcoEXonIz127GxPCKLYB3bY2C/0w4R21w9DtzLfRhit\nh5BBkN+cRuf9GXVjL6NLeRbvcDbFjib7hXkxoMHPQdWto+zcGeVHzvZlhNyetSBnxG974M1ms3bM\nnQ2XYT7gaRaVCDyiqprcbDYLKmWDzXuJtuwQdDqdGl/Eih+h9QZ0P1Fe3hg+RbRYLGppODsVhhv5\nLKLaDPboOZHE9xCSiKpOBv188+ZNOaXEGszn84K6tNvV1QwHBwelrhSCbmTICs0Kk4gUAbXwZcVj\nZYPQ2oB6LegDa4/xyjLhuVsul19d6WIl2G63yyWwpM0Yw2ZTpVet+PicyMUKkrlmI8/n8xgOhzVj\nDLJgY4hMoTBAshg/6Rbk2gqaueNZv2XIcKic3jGaiNPPOOgnishjZD+gcBhfp9MptY2QVd53c3NT\nngNSxjPH43GsVqt49epVbY6RUQwgR9iNHOH8b7fbePXqVRwcHJR0Kf1yqQmvP0EVDpX5JaxFs9ks\n6HLELq0E5+L09LRWK4o5Zy/ZOQaFAgFEL7EW/swpG+YQx4a54Hs4GMiU0xvZWPoUlNfRkT7yh6xY\nnzCudrtdUnsOWtl77B+nshkLv2ddT2s0GjEej+N//ud/alyv4XBYgg4bffYf6TmuguJd6Ajq5+Ec\n45h5r7oiPDo2IxDoc+su17Nj7tA1diKRUebF68ReyWkjO5vZgDvt7sDSmRh/17bKTibyz/epEcj+\n9HNwgAAYXE/KJ40dWNAX7MA+gMIOIHuDZrvgvhCws17WQ+wVZ7XyiXyekZE1z31GInOQmtuzpfZA\nV+x1I8Sk2kyMJL0HidQGGqV/dHQUJycnNSKnc8URX9dsMgRKY6FwpvZVGiY9kBf58fGxcDYspDzT\nKZYcmTkKsKKxIsybG+VFxIZgoExxJNjs5onglCBUvB+j9eXLl6JsvGmazWZBFWz0czRmw29HcTab\nFePCmHxlT85He/1cBRujZ2XC3/BxVquqSByfHRwcFFTn5OSk5iw4PWPlx/f53AY3okpDrdc7ntB4\nPK5ddWP0Edkz0Zh1hLPB99jscNI838wVCF+73S6KjHQfht/H3B00MAZHiY707IBlHgHoC880tG/+\nFc+hRIERzul0Gufn59FqtWI+n9f4eNQwOzzc3QmWUWoU5uXlZUEyaexpAq3tdlvmO9d/MlLdaDQK\n0mKibESUK0dwQFwviL3Ivttut7XinZ1OJ2azWTG2fMbeZOzHx8fl0Md8Pi+6pN1ux8XFRY0OYAR4\nX4rKQYYdKe99O7zdbrdwi9CjdrIODg7KtSy80/okI2o0B0n79jd9PT8/j81mEz/88ENERFkDHA1n\nDjy/V1dXtVQqKPVsNovJZFI7MICxNippZ5B5zKi5ETfmz0FMDqzcsjNqZ5AACf2HnsXBdSCZOTv0\nOZdMQQcb9bYNw+7hMBnpQS/jlDrAZD/wXdpsNis8O+xNTouZU+h9jKPDM22nT09PS61Hk8at90hj\n0+xcOpNB35AlAg/bWfY032cMDhB+q72UP3hpL+2lvbSX9tJe2kv7O9uzIFJ4fYY5ncckteDoa7lc\nxsePH+N3v/td4SFF1AskEj3zTLxhIganAEnvEB35Ql/4FkRX9Ie+m+Nl5MynWohmTIzm/4bhI+op\nwdVqVSumlnPgPqFANAliMZ/Py7xA+uQZRH1454Y0STkalQAuJ32R89P8ntN+pD2JZI0G4dEzp0Sw\nEfW0QYb+Gbu5YE5tIkv39/c1/oWfDZJjFILn7LsihnUAgTKXy2k4TnDxPdaXIndOERoFog/MI/07\nODgo62ekw1GnUz+kYUCDvL6UKCAd5aPjcAUcsTqSZm9mNJF3wDHw+Ej5cBKOasZ8HxknCvbVMp1O\nJ25ubgqnyVd9kCbNp+Q8P5BZ1+t1QUjMU2TPMR7k5uzsrJYG8vg5AZq5TkTDeZ1Mokb2+d5gMCjr\na54X3wPJIso2Wsh63d/fR7/fL3uf/XpyclLG57W0jBuRcoqMvWuS+uHhYSkbYmTJZT32Veo+ODgo\nl0475U4DzUF+8v1/8/k8Wq1WvHv3Lv76179GRMRf/vKXMldGQJBFOLFOqyOLyE3mwvD7ZD7W66r8\nCYdzzImkWX9xwnkftYODIs5EGJHKtAh+x2lFv9Of79PZzLXRHNaI5zklStkM0LDMn2JtSFVmCgxp\naCO8EVFOiIO6+io20HdTdLyGTm2a9oCu4BAOaX2nZVkrj93FYp2iZbzOhNh2weFifLaHeW1ye7bU\nHik54Nl9REQb7O12d+/br7/++pXy8xUPNg4IHgbAKSIUD5PLwkVUqSSMFD/j7+zE0Xy6ACeC76EM\nUZqZm4DSwdmzEnLe3kqRNBdCDs8gYkcmRzHgVGUuFpsJ5Y+goqAMWTOOXCU455+Bh7mbyRuD57Va\nrej1erWNQVqS8bBOTi/kzeO0RualoEibzV1tL6fSSGUwt05HopQx3CaqIh/MiU+Bkj5CcXv96Ctz\nh7IyRw5lgpNi54W5gOjs+SYNmZ1znL7pdFqD11lDxm5Z5x18hlNppx4Zx4jb2WIdUNQmd/NeHCx+\n9ubNm0KWJ8XnvQ38ztqavI9D6Jo53kco88yjIDXL/uWEntcWJ86pD9/7x/zntUDmHx4eSvqYtI2v\nZWJNcXKdmnFwieyuVtX1OxFRHCjz++zwO3hysMNn5qv4vayRT3wxZ5YB7xnkm1Srj+7z3cxz9BH1\nL1++xPX1dTm9eX5+HhG7E1+sz2QyKfIYEV+l9eywsP7I2eHhYSFR41ijD1wtnc+tA/I1Sk7xmU5g\n59QBNLLDuNnfrCFEbZP2mWfsGjrOOop3npycFF1rh81XOzHmiPpVPXkd0dHor/v7+6/2EH30wYej\no6M4Ozsr9eDMceX3Scs6aMXxJI0KVYZ+5sDJV9hkPeDmOUMnWQ5x6H0LiteH30Un4hhbTnJ7FkeK\n/LvvEuLOK5P17IRERHGmvKFQ6iwWiE7E/gt/zYWBmGYCZkR1CtAnmuyouViZERqe6xolfObTNe6D\nx0Cfs3PG99hAvvA1olp0bxgULwgJCBNzavRsNBoVZU8fbDC8Kc2DwmgYQeDdOGlshG63W5xnjKIj\nXjZqPo7P/DpHzffsYBgl4Xsce2Z9fS0GRGUXFuVZ8OPYvBQInE6nxXnAYNj5NCKFY44iOjk5KQqa\nKMyGLyKKExJRlalA1n1C08TKrHhsFJkDHKqM5vD7R0dHNZlDEeVIjDXDaTNP5OnpqTZG9hbvYY1w\ncJkX+sZccRiDhpJlfRj7fD6Ps7OzYtTyiS7ewZ7YbKoSJr6yw46X55H18T133PdoZI/G+JCzw8PD\nWgFYz5NPipn/0m63Y7lc1vQD1zUR0TM3Rt7Mn2PO7FQ4Kke3Mp8Y84j6lVOg6uarwfkDseZd/M1Y\nWHcHbehR8wojojg50+k0/vznPxf0IWIXuPT7/RgOh2VvWxbfvHkT/X6/hi6zlvP5vNiTfGKVPqGn\nzE3FHvCujNagoxzQ8Szz0RxgmnNjXqFrwjkI9Lp5Tfy9x8fHghzybJ+cs23KNsrBF9kefk5RWOvi\niKqgMDaIE7MRVSYCGwu3EZnieaBk5pbRjJ4xBmqcAUBkJMu8RjugRvzQD36HMw40Aj+cqX08vyxD\ntbX6zU/+D9tgMIinp6cC2UdUG9E1nfZ5/Cb70fgMJZWNMIuybzHwkBGuiChRPBOfC74RJWcSGu/y\nouSIzgrUhhSY1saKMZjg7MjAkRb9otK2U4pcagp5LyLKEeyHh4eYTqc1UjFwP0egQZkidoam0+nE\n4eFhSVOwaVzPC8PoU4Kkl3AADDc7mjXUi3PCGtqpIxXBnBjJsuIl8rFjiDE7OjqKbrdbDBOGm8j8\n9PQ03r59GxG7i6A/fvwY0+m0GFynEp26cLoyokKzDLX71I/RKUefRNXIi+UwHyW2I2Vj5tQn7wCZ\nQMaNMtIn0q92sDEoHLawEeZ7rFFWRkSBEZUjAEG52+1+lfLFqOFEWg5pmVhqMjLvY208V5BUiWjt\nUPF/jGx2vCy7GR0FcTWSgfE4PT0t83Z5eVm+z95zmg9ZZJ2NqCIXXHjcau1qX/n+RAwvz3IaGUOf\n5cnpG1LD3k927iOqUiGsO303OuPPLCfMN87J0dFRPDw8xIcPHwoCTDDm0i085+rqKi4uLgrq4EAY\nfcfzbStYx31IBuvtAxg52GF+fa+na6Txe9bftO22XqqFuTZy58AZvQBC5LX03X9ZNph/9lpGB0Eo\n6Rvrip05ODgodRstH3bkcrYFHbJYLOLk5KScAMdeYEc8DkAH9oRJ9F5/DgyAQiKLOH527AFP0AXY\neM8N+9GOm9OOzhCwFpaFfe3ZECk8SqfJUCZGnPjMnAh+FlEvkmZBjqgfjweWd6TAd4wiRVTHOfnD\naSO/1wowIwTeHHzm3Kx5LhH1iMX5/Ih6Ne2np6dSE4pmRbPZbAr0z+Lf3d0VxTidTmunQhyxcxop\nIkpF8PPz8+h0OtHtdsupjHZ7VyZhMBjEwcFB3N7elv644vV8Po9Op1PWdz6fx/n5+VenLGi8H4Qo\np1RRfrkuiIv/2ZFgXTKfLSIKqoDSd8oXtKrb7ZYNykW5r1+/jm+//TZ++OGH+Omnn2qOMgoB5QhU\nbcfEacucFvIm9rhZWzhARnEjvr740w4hcgDfh+85TZxlzkoj86BwdLiWxcqbz1DKRrlIkbCP+/1+\nkZmnp6fodrsxGAyKMbXDg6PbbrdraZiLi4vYbDYlGON5KFu4Vqy1nR8cVT7DeWbe7HSDgrNOpB55\nhmUZeTDyGxEF1To+Po7RaBTdbrcYmi9fvhTejiuFe16pbeV1Ql8S9IBO0A9+x+P1M5E50yIcyeNw\n8SxQepDNRqNRDCTzZLTBXBh0jNEr1mk8Hsd0Oi2lQqbTaaEnYNhJi67X66Lfrq6uyryxt5GN0WhU\n6uRhMJ3aJHXO3rdtITjBbiDDGFZslDlLPN+oDA0nHtk3hSSvVXbQWbd8GtIBFPbPzqplmsLLDqyw\nP9hEZzEYG+vFvFGyCCcJPcH6bja7WwXOzs6KXeEz9Ln3GfOGDMKvIigHEGk2m4Ub6MCCtSM16ufy\nbJ5vm8+c2yeIqCqiQ4Oxw8sa5L3p9iyOlBWz0zM4Ihg+p/Qi6l5+9rAj6gXaIqLGVzEUyLPYOEDW\n5oegLDFMjjxBxPyHts+o80xvTBtvzwnHPf08+spYzWchEjUxMGJnSF6/fl1+h42BoC6Xy1J1FofD\nqS+M/Wq1uyft+++/L+/E0YjYOcWOhDEkpAIMm/NdhNbKHUXovDzzwsZHRowCOS3CumUZIRLMUDyf\n+2gt88RVRL1erxi9VqsVg8Egvvvuu/jTn/4U//mf/xnD4bCsL0bW8DTrStSKI2VEEtTJt5bbgLHu\nKAejjTiEzIH5T0boLBvmXvBzFBGRPc+m0Cj9BOEBceT5RpNBHu3wub6YDUGn0ynOOd/xFVI4y6vV\nKhaLRRkf/BajypPJpOwd7u1j/3ivYWAc/duxg6SMM4GBtrJGvrLj4CryyBRR+XQ6jc1mE+fn5zU0\ng8Kbs9msoL3MKVE58m/n1U4VBomfo18w7Nad6CbW2PrV6Y8czDp1OZvN4vT0tFbAMafrbaQyOokj\n9de//jVubm7i5uYmvnz5EqPRqHbHqp2gi4uL4khh6OCKUeogoqp677XyGK1PPI/MicuheI6dqiZg\njqicTPa7qRrIBuiJUTxSdqyJMy12kIx2Mh70DX/b3uRg3ugRQQLvpaRBRNTmbD6fF3pKRJRUHraN\nqvo8v9lsxmAwKOUznCpnXo0wM6fISa/Xi6enpxKwj8fjGI/H5Wo3gxlXV1clqwV5nn6iW0Cb7ACZ\ne+xMB82p1Jze+9+cqIiX8gcv7aW9tJf20l7aS3tpf3d7FkSK9J0JmeTA8c5NArTnmKNLokJH8TRg\nPaNbjsyI7vHQDXHzBxItzeRW0kw5dwp8nI8QO5WYYU6e7Zw4P8PzNnxLP5fLZZycnESv16shK0RA\ng8GgHAflZFnELjIZj8clmjHScX9/X+654udEwhwZBynhpA9z6xTtdDqtpW45jt/r9Uqaj3nh+Zy0\nc7TCWuS1N3cORCYjYEbzvE5E0ETRPBeiJLwdrpKJiFIcsd/vl2j8v//7vyNix58immV9OZEVESXd\nQ9RDusMyBVpocmxOJxithCdAhEqqw+MzGZ1m+crROXsPJMipa6LXfr9fSyVFVAVJfQqJMZCaY67N\n2QBhcArHiCtzBtrE73uujci47IW5aKR5eC4IIRGs0UHGn1PJlq+cnvfedVorouJUNhqNODs7K6kM\n5DQiSuTtS7aREVLGRqMiKg6JTy7yPpel8PicyrJei6hO6zIvvjnBd+ixVk6BHh0dlfQWaAoNdGBf\nqnG5XMYvv/wSNzc3MR6PayejWAf2InLE+9DPy+UyptNp3NzcRESUA0kej/Ui68ffToM7HeR+ooc8\nNsuCT95ZT8GnMvLrkiiZo+jsijl/tm3m8NAnZ2accqMotVE+xpdTwNjZ+/v7ImvoPqPi2CKfggUh\nuru7q6FORgKZX5p1HPab752fn8doNIrb29u4vb2tpei2223JhDAe0wGcsTCdJcuBqRnOLpGVYH5A\n0X0gJ7dncaQMX9q4sehZUIEUUXImgtnostA53cBGtlGwYPq294jKkQKKNMTpMaBkrbzNE7Bid5/J\nPTtdyN84BZ4Xb0znyk9PT2O9XpfqzxjciArGxKHKRFE7hAiQlcuXL1/i1atXtes8IqLwWRBscwVI\ntZ2cnBRY1afafK0A9YNYdxOUzZNptapq3/w7H6unL05t9Xq9kkqh9pYNtMefa9Q8Pj7Ghw8fipJi\ncwMh4/Sfn5/HH//4xzJnP//8czHA9M1ke1KakJF9Qo/vLBaLWK1WNeeN9edIvlMSXjPzXZgXnIFM\nnrSxxfGPqO6OI3VLfRi+d3V1VQ5nWGbg6TC/BwcHZQw+4YWh9TVOELwhKzsdyd43p493MC/T6TS6\n3W7hbiFfOFOcfDMH0EECZG3Gj9NOanofcZh58/gxNjg4yDckXMZ/c3PzVdrMxHGnIni2dQfvpuU0\nhJ0Dp4b4zKevTLrm/zjlJlR7jUipuT/oGTsFTjUhY+bLsP7w37jSibWg7+fn57XDCBFVSQl4NfP5\nvAQivnEiOxnI0j5iOGuIo5ADV5PQ85wyfqetmQOCOQIJ82ztrDm4MoeL5+f+8gxsm/mVPjDhsbLe\nXnf+9tyzf7PdIUC0TUQv4dS7Vhpj9z5yStDcJI9ru91Gr9crJzdxrCKinMhsNpvl5C6O4mKxKAEs\nNoUDWOgA9JffBzjAHwfX2JBcb87t2RApIi07NgxwH3HRvBOUcUTl9CCEv3UaA+OVo90cqfAZKA7H\n5BGMXq9Xi/JyVIp3jBfrz/OxUnvYmd+VlTdzY0/ZxMDJZFK7RgOHk/eiyFyqwIiaBX29Xsd4PI5P\nnz6VInteJyKd4XBYnFA+I6ImmspFGVFuNiSNxq4WDgqMUxyM24RFbzhQCis8RxEoAtbdPAHeAVJH\nY21Ho1F5rj/v9/tlXMhQxA6pe3h4iOFwWE4uOorCECHXJp0iWyjxZrNZfoZzimPsvWBFzJz6KDN/\n81k+vWIjQWOfwIcyIjsYDGqkbDtnzCVzYkeRnyP3j4+PBVmyobGjxztwrEHIMm+Sz7iXLyt371Hm\nlNIIrIk5HeYNsXZGJRy5Zh4MsoS88TmBgU/yOlCYTqe1k8Pwh3CiGWNGpPJdmzwTh5r5Mr+H3+Xd\nx8fHNZTT+xK0h59D0sYJ84lhAhk7GEY52eM4P+adcfqY+TWKPRgMCprrZ7NX0NVwelgL+owM2JEw\nNw7Zo4GoIgdGI1lHZDyjSOwH633PtYM5non+Bi3JzyIzs4+jg244OzurodXsNetgB9jMPX8bPYXD\nyqEmH5Biv+D8eE5Bxwjmrb9AW3HQ2fsg271erwYmMG/sFUrRoC/JIFm3ucYfVwKhvwjwWN+M+vFe\nuF6MzXNuXb6vPYsjhaA5ZcciUYsmoo44+Hi5UShHQHzPzsvBwa7iro9UR1RwIv82pOyjtrlmEwoa\nRYkDwHvZtPzbDtH9/X0h7x4dHRWFmcmduQaJj8lamZg0mNEhb2afhLNTsNlsahdX+oRZRMTPP/8c\nh4eH8bvf/a4QOc/OzmKxWJRNul6va0eNqevCOjraYa0cDUdE2Ujr9boUdWPecCqog+Xostvtxng8\nLmlG+mC5oFKzDRupM5OYMVKGrUejUfk8YpcyePfuXTl16jlrt9txfn5eNud4PK45B5vNphRbpZ9O\nb1HzKh8tRqFzIMCKwAaCuWK+UfoYbTvpGE8Uh1E3DAunffr9fq0vjup86pZnsDfsuCJ7jMPGiwAK\n4w9hH7ll7/Ne3ud0wOHhYQ3Cj9idAmWd7UBH7BBAB17IAb/D2tLPjEowDqcw0GcuIEozCsEYjGI3\nGo0SgNhxPzk5KU4suggdh7OGgjdiAZkeo8d88Rnr2263a0VVI6qsAOiokWwcL6ejnBnImQajpfTj\n4eEhrq+v48OHDxGxu9B6u92WuwhJ4zFG3/lnfYIzZtQw2wEMO3/TN+8fp7iQJ88Hn7laNpkNIxY8\nL1M97HDxu5nEDJJnpNbUEz+LuXbakv3mQxrsa+qEeR1Jw6JnrBetz/LJNf4mEEQ2jfjzPAchLhzt\nvUFAy7Ndm8oEe4IyZw6MdkbUbT52EefeqXJsBX2xrCAbIJ9e1wya5PYsjhTXN1i5w6lwusLKDUcL\nA+S6ETgsmYXvWin27mn8m1M7OdpFkFHk7hMbzt68I4yIqI2PvrLRDg4OSkRHCmq73Zajzl5Ep1Ai\n6mgDEfBms4nxeFw7Een0m3PqfM4fjAf95mTJ09NTfPr0qbbByIHj4JjvwfyyoTi6zjiMlqCw3U5O\nTuLi4qLmnD09VSf92ARG8nAOObWFXBi+Pjg4iMFgUFN8zM8+B9Och+FwWIz3x48f429/+1tcXV2V\nvlhm2u12dLvdchLUChwFtNlsas53RJRSCzhChtSdTsBo2CjyO0a8aHbOLft2uLMRQsmcnJwUR8bO\nMO+9v78vR8xZu/F4XL7PdS80X9KbT3AZ3bLRZb5Q0vP5vMgQSDFjmM/ntdQEjj9Ij1M/8PDy3EVU\naVY7oMw3eySnaDwOR7s23kTQcBORfRxQAh87505pwm9kj+IAohvNAWPeQJBwcJFD5pqUGuPgmDnO\nXaPRqBkvn9g0AoBsZP2ZeTKbzSYmk0nc3NzE+/fvI2LHLZxOp+WkpGvj5bSQUQF+H7qAg1bQGfYf\ne5z5zs5OPkXOnDnNCvqDg2FEBj4pfbUNYxzIVj45bgfI82l9QeDptCZOFH0hOOdzbA160KnrvE6W\nWWeEvFcZE/bNgRJOFb/vsfMs+mL7i5whk0axj4+PaydCTZPhO7ZtjAmbTf/tyLKmzFd2vplj5sB2\nNtuq3J7trr2IekSNgC8Wi5qXzO8xmdmjt4OTc8mZN+L3odgwxuYD8LtEnxGVgIPgIDSOYJw7t7Ph\nvlowvbldNfjk5KQYHsO9/G52qoBNMeARUbvviOjJhFRHMhjr/BlHi3/55ZfyPjYKzzMSgBA7heTr\nLugj68mmGo/HpTZJp9MpRUJZJ37O77iiLZEsRofvMcfMj4nf1DhiDb3ZqLdENLfdbosBvrm5ievr\n6zg6Oiq1tLzx2ZxET8iy5xTFYmeKqJT0IGhZRBUJU+IiEx7t9Du9Y8csNyJxp1wcpbMXMWyuobZe\nr0tpCx80GI1G8fj4WI7vcww6Iso1O8fHx3F5eVnQtX3zZmdhu62KWObrGUBnSCuY94PJAAAgAElE\nQVSwnuZ44GA1m82vCvrRb2TJTqmd8OyYImc4Wk5fgSqxd3jm4eFh3N3dlSr7nU6nli5cr9c1nei+\nGBGwzuC59NXBHo3negyZk+Pv8Bz012AwKEGESfggFXYm0InIh9EMdMt8Po/RaBTX19dFp/z8889l\nXeCn7HNCqHhuJwD9wdwYeWCeKbZrpGNfHaKISlc5Tcn6Ukep3+/XSr4gO1nvOzuC3aLmGbIPqtnp\ndIoDa12a9y79w+mgYDDvdWrc+hzbyb+dDcIp4TOoECA5Rn6c9nbQwPtAY/cFagZO6FdOXZLGQxYY\nHylz0zdAnAiGnSqnL6BgyEmmBjgrgL50cGZd6nna117KH7y0l/bSXtpLe2kv7aX9ne1ZECkiOnt9\nIA1E5M5dR1Qn8CIqz5xngdo4qqZlbxwPNJO6IyqIk1QZUYUJzuSusyfL951+dPRHdEmaarvd1nLM\nnKwDIs6pBiJhv9upJZNJI6I2f0QB5k0ZkQIVMCJnyPXx8TE+fvxYPnN/XOiUuTafIeffibR8pQVo\nA+lVUkMRUS5/7ff7cXZ2Fp1Op5zeoHgn7z08PCzRLRyC4+PjGrE4IgqR0vA5c5qRyXa7XQpykl4a\nDoclzcRnRqcajV1BTxNAiYDgpZkL6OrHHDGnEaGDHJGqolGdF/k3QmDo3PuA3yUyN/JKOmO73ZaU\nI+tLZM/cRkRBAJfLZUENT09P4/GxupDcldDhZ3iOQZ2I7B01kk5gHzEGkGHkb1/qYD6fl+dxMCKi\nOilIP4xuEO16n2d9wt/ef4PBoBCjiXzNzeI+y8vLy3KCL6KOGu1LheX9nnmgPt5vHovTki6M+/T0\nFN9++21Zn1evXtWQGF8XQnqQ7202mxqnynJozo0RTr57f38f4/E4bm5uauly+mi03il4dBYIqfUX\nqInTmMgwXKter1e7TxCOGDqKdHxElbr23COnoNfw1rwPzVXi3x4DXC4u5zYRnfkCrTYCxhyB8PgC\nYb6HHjP3yOhK5hIh8yBjtl+M0ffqGZXhO5nWwrtZR8+Nsz408wOZB2TYfDX/rnmc6C37AT4FbOI4\nv88YeB9/3E/rJGea3M/fas/iSJFnt5JCmbFghvJQdk5BeXAokXwKyXlOfteTYSfKBF/exxHqLDQ0\nE8D9PvrkmiEoChwKw+pWCjyLaxLgFiFkwM55TOv1unYCh7niGRkmxmjjKLnuj51baifRbm9vi1IF\n5vcpGxSH6y/Rz4gojs9qtSopM04GQgLmvRE7p2cwGES/34/Ly8saN8ZH8/meDS4OAQqSucGJg2Rs\nXgpkZxxCG0QcC5z66+vrr4jIPKfT6USr1SrOhDkO2XCSQrChpK9Uusc4+AAEyqXf79fSB6w5f5t/\nSF8YXyZdcu2HFbTLP0TsHBR+jqGlZlm32y1zk3lXcEmQG75HatPGjX7ijMORywEGBs9lTlgL0rNw\naVgr+G/MleXTwYeNE59l5ZvvroTgbsVMgESq3Pucgw7oGe9tZA0Z9jrZ6Nop83dx2nEc+Z3RaFT6\n6TQnv8dcIXeWUeQok6gz/cBtu92VU7m+vi4pMo6kOz2ZHUf6i/PB6S3LFO8y+ZuggxS703AEyDgL\nrorN/HvvmANmI22Hn5+ht+ycIMP39/elrh7OKsE0e82cpOVyWTtV6nRZPiCQObQ4LgRMtl84Uug4\n02hciiBzGVn/fbYWGUWGLafIinl6lmGn/MxJcyqRMZijad6hHTUfdMmfmSKUU/c5Jcm8IoNOA+5r\nz4ZIkce0cmdBF4tFiWojqlN0VhgmAdrDjKgjUlmg7LVHVJGtT/9F7ISVSMWePXwM+uuoFGGirxZg\nnCeIeZC8eT4C4FNZjBXBN5LAZwgu9TMcsToK9ZHoiMoJzBFJRB2ZazabBWXguXDZIPtlwvV6vS53\np3kc/L3dbmvcGxRoRBQukB2pXq9XDJUdglzozsfBKRgJyulrOYhy920OSLy0HIW02+1ymedisSik\nZiNvdrB5FkYeZWO+mhUR/ATL9z4eg+fUXCWOFi+XyxiNRjVukeeb92I8/H1OEbo//D7HzHu9XtnD\nrBNF+a6vr2O5XNaOHXOlDPObo2ATRB0A4DxhYLn3sN1uF86REVLGa4SPUg6eL1AyuFRGDmmZC2Id\nwbyBSBIg4UDBzaMvzNf9/X25sDiiQs326Sbkx84s/UNGMBbZiex2u3F1dRXD4bAWUCK37D/zchz4\n8ZmdM0jdyI33Po19tc+AsQ7Hx8fxzTffFDlFpphro0cELaAojJH+cTDH/TBaYx4o/YPUTLCH7Ltk\nQHZcCbaM+toxwJYhwz7NaWTDwQABBCimneNer1fj6nkf0qzHMn/H8uFj/qBQ7p9lw2vm/zM2n6i2\n3s+Ode5P5kcxfvalHXRkzXwzj8GIK33LfC36lfuAc4bM8Dsmydt583j+N47UszhSEdXmslHMpQZQ\nLK5ia2URUZ3QcLqQzzxwNoGFJqJeA8QeL+kfo1/8PpMOouZNyu/Td6d7GDffNbLCO4l0MELr9bqc\nUGETW2g8T2y8ffPM73lunN7LiAVz2e124927dyWCPDg4iMViER8+fIjr6+ty1NRziIL1vXMRO+eG\nQmlee6pio0x4BvPGpsGhBEngokscUSt+iKTeAD7pSZ/pK30BqWKuXHsLInqr1SplLPxZLvppdAXD\nCjKBEaAPGWmFdOm0TE7rMF9GRmgcqaYir42knUWfaOIz1/ixQzCfz2O73Ua/3y9oCXJBSYjb29uY\nTqdf3cGIXK3X69qJHyJ51jDXSkKZ8z2fArUDleePVC/pZxO3nY7OQZRT2tmJzk5UruoPQooDZ4cV\nJ3KxWJQLvZlj5iaiIuCyzqvVqoYYOdVkXZNTEaQuz8/PizPFOlHM0vsmoqpLxR2DnK7mmegtB2me\nN36Wg1bQee5TYywRlW4nNejTWjhCOGEmm/Oex8fH2gER5tQHJFhz+hJR7U8/E6ceGbTu9NxmKgl/\ngxQ5jQ5SttlsirzwTJe5yXaDvWKk3X3nOw7a6SN72nKc0SDmyPbE5RDsDPKunG6zk4XTaxuS598Z\nn4ioIetGqegfwRzP9cELbI33gdfHQYazV54rk9SxMaxXdqL2Oatuz4ZIWVAiqguHSY84usTb57SP\nuT4oUzaVI3anL1CWjiZc3dn/NqQZUVUi59/2xP1/n7TIkZojZBSdDaRz196k8FIYZ04z4gAxpz5e\nmzemlRvPw9jbeUF5PT09xdXVVbx7965E0XDZUEKu0sxcoJydhuSKBxTV01N1QSXKzDwaR0bb7bZE\nrg8PD4UjxWWnpImbzfqlxYeHh8WZWiwWRUHb8BrVi9gpb4w1StqpS1If/K6LanJaCYXK/PJdw82O\nkkk1ohidFj0+Po7JZFIiea8pCginpd1u12qTRVTH/532pNbXPmeXsXAdB+OK2O3Rs7Ozks61zGw2\nmxiNRjGZTMoaY5C63e5XqKidN/YFStFyaDlHtiIqFA45Y12sNFk/5pp3mk/D33xmNMjPiqiuVLHO\nsiPE6cJmsxm9Xq98n9Sd69tkPgaGwwbHCLfTJpZv+uZaWOiPyWQSl5eXcXV1VagCj4+PBRHPJSqc\nZmGvWSfakBA0OkBEpnKgiB7nJgbv06enXcFXir0eHx/H+fl5RFQXWm82m5hOp185vBSX9PUnrA/7\njHUxv4Y+YGP4HmMB/bZ843wakbPMYD9arVZJyzFPToWC6tAX9KxRPq8HqLBr0DF//Az9nU/XGm2y\nk8n/87iNKoFkGbhg3rBHNH7PKBeN72E3vE68y7yyjN4x3znNzvwbyOB7ZEroi/WJ05m23TxzHxrF\nc3Jg5fZsjlTOiRKZkitHOCPqV8Uw4c4zO41mD5QUAgLqlAK/7yObNnoYdoSZ75G3diTo5hQBSpyf\nO9LJUR3OgmHKiIpfAYKR04UR9UiIfqIgMpRKy9F8RNTG32zujqOfnZ3Ft99+W5QbimA+n8fFxUWN\nxEsDnneEQiSGMnRfSYeYJ5TnYLFYxHa7LXVoInaOFFXE90UNfAcj5Tz6ZDKppVFdxgDndTab1coN\nEOkBz9tw46zbwWg0GjXEwiiA1xGD73U0UntwcBDj8bimNFlPIjWiQqclURLMC/LnO9bgLTn6zKgi\nxnYwGBQ5BVXzPpzNZsUxtzPIAQH2qvcTc0Pw5OBqX7rL+gIUDB6ZycGuep1T5uZeuA/MtxEeI0Q2\nFDhyTnMYMTZnx4VzncbhmSDSEXWUwgRukEnWkGCF9+V0NJXCHx4e4uLiogQH5iV2u92vUpcusmrS\nsB2pnO5FxtiH7EXvfdJ6yKJ1NH93u92SAmZuCDJ5nknFjUajxo3LurjVapX37iu4Sr9zyQGccHQB\nv2eZPTio6j/1+/1arb19NQkt40YVPV/W18wRAQjrijyxV4w+edz8HvrP8sZaOrhzX/m3gQEH5Pvs\nBfNjqk1E5YA43UtfkWHsutFRUoiM0/1jDf1er72DVQcfyA2ygpPKs60XbIP+39CoiJfyBy/tpb20\nl/bSXtpLe2l/d3sWRMq8JLxNkB48TUjZERUE6sg5F+jjuY7YHanjpRLR+Fl4tDyTyACY0MRgIEaQ\nhcxJ4r0RUXumI2O8crxhCOxEWY7YSE9Cms2et087MRaeCfzriMARBlE373AqA+Tl/Pw8zs/PC6mW\nMRGR93q9EsHyXubTcCwRkBE9omM4DnBAKAQZUXHnptNpjEajGsEb0uxmszutlhFHIF4QAtIboGPc\nydRut786WkyF5cViUb7nQoMgIiawwzEhskGWmDfzuEA8WWOOR19eXpZIO6Lil9CXiApRAqbmea6I\nTz/4Tq4kDzzPeJk3n2KKiHj9+nV5H3eaudI4awgyBEJGWobPQBtJhznVQLROFLnvdAzrnPkO7G+e\nYZ1BQw7yQQz+D+GXfyMHGeUwVwVUxgi00wTsd55Jior0tm9IAMEgtZWJ7+xVEGv6ws/pp6NrUr4g\nGhcXFxER5aAEXKmMyvl+P+s9kEanzbLOsO7KqASIFQiYUQnmB1n25cOkG41gR1TpXtBSI4DIGIgK\nKcCIqJ1KhGJhPhPIdZZTz5M5Qcw340ZOzQfy+M3PY//RF8s988d8k6nhfTldaKQLnh7fz5kJ7xmn\n+Iw25TUEoeJ5mROHns0ps2xT/DOjwvYJLN/IisfnPmcCPuNzCnYfny8j404L0y+vef5/bs921x4E\nWCuqTIpjkOZBIUSZxxQRX8GcTushjBYU/m8YMCJqBu7u7q4cteYdbHz3KaJevRvIOJ8iQsE1Go3a\nVQi87/HxMbrdbg1KBdL3JoyonBYfo+dZKCHmgLGa6Ggh8nzCZeEklisDNxqNcoy30djVSxoOh6Wv\nvIP19aYhXYTRd6VpGv2lkfZgQ1CLhTGgFEgb2hCycXHcmDvGjoK1U9vtdktKkFQRY3eqmMa84Xii\nnDMXxOuEcvW1DfP5PMbjcTSbza+u+0DhGvpmvlkrO6i8o9lslgMMVN2OiCJ/PjThqu849Dj9cNIe\nHh7i9PQ0Op1OjMfjuLu7K+M/Pz8vzh77zWlmO/sm9LJuKDCcFH7PTpiblSAOgOXIqSV+3/w5GyP3\nh7QkgQtX/dAfHNRWqzotyDNxnigZwglKZI7ncGkycoODzjvMWbEzQwqYMftwgxU9PCzW8ODgoHzv\n4uKiHJRwatDzjZPvNCvzgMNgg8P7bVhJcyM32VmhOe0KfcE6ykaPtCjr6YMS3pc4xvQNKgJzwx/2\niw9TOJhGrhiTU+WZ5wa30Zcns2bMGfrdTkY+KJXT8ayRHRE7eKxBPjDiFJWDk+wg2kFxmo059QEV\n/rZzw/f4m+fZJtqJRp95juHcGgTJB0gcqJgaYeDAfzslSrPNxkG1DfS8ICu0HCjl9mx1pEA1zDGI\nqOpV2NP0orMomQti3pI3d/Y+feO1c8QINf1brVYFjbAy9ak3E9loOC9EdNlRBAnx9xCiw8PDUuoh\nEwfZ1CipiHokZOGIiK+EYh+HiD7tO1p8eHgYvV4v+v1+DV1Amd/c3BTlgcGAr2RkwdwWNia/Y+cM\nRUg/mTeUD3eCUXbBMmPFzmeQna3cnX+3HDIO5AJl02w24/9h7816G9uO8/0iRWrgrKlbPZwcx7Gd\nxBe5yvf/CrmKgQSGY5+xWwPFUdTA4X9BPMVnL6nzAwwE+l9oAwc6LYp777VWrRreeqvWbDbL59B+\ngWdaHhgr9wIhKBFA3tuKkUObiZTX63UFIeNdQSu4Z6vVSie72dydTu/1dlEFMudjOHgnV81w1Mtm\ns0mSr2VqNBrluY6sPf2jQHqMuiyXy9xDyBLybeJteSQNCtPor5W2ESj4LDinRoNZWxsDxsO6lRG7\n+T3WP8yrCcIRkc7m2dlZrhXjwDGv1XbnJfLeGBbzjqwbjKCbs+LWDQ4m+Fuc/hLlQ74YC+NizlwN\nbX1C0QGOD/e23ubffM8VrUaBeD+ew/2QbRBn7yHW3xWl3gu12o6PCPLptXPTUeaM90QGkR/0YRn8\nWFZKPYQzjB4uOWsEEzg3XAR1jJP3dXEEsm6bUNo47oXM2sFwUMk7IgPmFRoFM3rDZ3bYjA67kIR3\neMnBfykY4vcEhdYZ7K9y3tAJyJsRN9aX35nj6MCgfE8j3gRLdpTtcL50vRrZHI/ekSnRHMrbhzAC\nyTJICziTRqqjREFsVDwZbHaTObnKqjYuOx4lyc2wtpUe74KhtyKO2LUUKEt2+QyFz0+MBU6NEQVH\nl5Ab/Xt+lv2NTH5/fHyMXq8XJycncXx8nEhSxK4/0WQyqVRxROwqYlqtViyXy0rkidLiXUtvn7ll\nbR2VktLg/nxmJMrjZlxl+sDpFH9vs9nEzc1Nju/4+LjSfqJMm7pSy6kkV1ExZiOLjppL2Biir5Gw\niEgnCceOXj68mxEltw6o1WpJDjfszjjYLxg4R/MoRObIChO0gmfyPcr6QQ0cwd/d3cV4PI7T09Nn\nqbvlcpkHPS+XyxgMBhUSvpFjOzxOwfJOzWYzU3Q44KS2bXSNABg95WKeQTztZCF/q9Wq4rhtNpv4\n9OlTrvXBwUGmqJbLZeqhsqAEpIX96ojdxODlclmpsOO+rLXXd7OpUhgODg6yTQUo27eaDWMwmW/u\n6SaQdsDsSFnO+X/eEyNV6tOyn5VRBJ7PWNw0GFTbwZQd1/v7+wxQPFcgXiCP3ofYH+TOqLEDPAoZ\nQDgh0e/t7eWJBg6wnJosg10yDUZb+Gl94u/ZObLT46q+Ui4sG8ivG1OWf29nhe+VqCdjtHOP01fu\nU/7fziK/s71zGg5ZYC1eyrzw3g6S7LCXqT+nij0e5tdj9/h4l48fP8ZL16s4UpTfGhpm8vf29ipQ\nb8R28CiAiGojRnvKbGxvbgQ/Ip4ZBd7B1QMRu8V1btbGG+XKhHtRcRJ5vo2eo3/SVDwvIjItYo5K\nt9tNw4TBszBTOo9Rs5E3j+Ilp8/polqtVulrhFFiM/PZcDjMXkERUUE6SDExjs1mUzl+wfCyv0cv\nKBwsp6gwJmx4O1lErHt7ewmps/bL5TIbiZaOratznNLleWx2KtOYb9YF2fHam7tiKNpGx6kWR/PT\n6TQajW2PJH5vp8cw9tHRUUWB8zdE5DyPPWY0xfPmVC97gHsxjyAlvCfOFYgw88CF01dykFgbd/3m\nYrwYE7fMwLjyPjZ8/lnyRSIinfnxeJz713uYOeEzR+VOx5RUAWQVx4TvnZycZAqJZsI2bGVazIEJ\nvCnm1I5brbbtEUagVzopDsqc9np4eEjnez6f5/vSBd9pYsZAdSzrZ0TnJU4NY+OZGCKOTyK15/1m\nQ8k4jOx4vzm7wJ5yqtyVya6EZK5IGRnhRodaT1uWMJqk+fgeDp9TWuV7rtfbNg1lypX0LO1YjDQZ\nTbRuL/meXien60BquFdEpIPpLIT3vmXEOpN7gBg6gC51q4NXZ16MSPOuZAf8rp4bAsgSEeX9S54X\nqBm/cxrPDlxpn0E9zVFzup99yrt5f9h2vnS9iiOFQrUBIx1mpe6XJ8IrkSwGD2xHuWvETuAwevy9\nL4x06Z3ymSN137NUaFx2zCKi4tXyfRSK/+alKI73tYMU8fwIF/+/HSkUgcnbfk+E3pAtYwWtG4/H\nFY98Pp/H1dVVxZHiXTG6jO3+/j6jcvrEcE5V6dgYKnfKxJuSsTvt4jmP2PHbvC4o6JKHQsRiWWy1\nWhkplqkWNqI3ael82+CU5FEcSIyf4WUceZQysmFiaEQ8i/aQXZxfv7Ojfz+vbNtgQ+PfEy2bM4jM\nYKRKbgKO0NHRUfYJe3x8TJJz+RwQAIz7YrGopFlRkvTS4XKKiN9b8ZNKIQVUIqA2gmVEzBqXaXue\nQed9Chz43nA4jHa7nXMKgkTK9eDgINNWfk/4PG53YHl7qfyfMdspKNtbYCjr9XpcXV1FRMT5+Xki\njpZP/t4yab1UBmJ2cjyvj4+PMRqNYjKZ5Fhd8GJDGVFFwTDWDjjtPDkwZQ9j/FmXiK2s0werdP5s\nL0rUxYgTMsI6cS+coYidQ848O2BjfUgxko2wrsFAOwiwA1JmU+ys2EkHWfJakQYt18jvWepKO3Ps\nuZe+R2BnFMiBg+WUlDV70ek/9j3tKdwAln1Arz4HhbbT5gIjgyXowUUgQ4sPZITLDqqfZ+f7W9db\n+4O36+16u96ut+vterverr/zehVEyhBmvog8QBAbIy9EbF+/fs0IIGLXFRVv8yWEiIjOeVe8fxCi\nx8fHZxwSOBg+M8xpP3OlPA7gW+fYSRcQfZSQNvd2RBQRCY9TyfWSBw2079TH/v5+JRLyO0fsCLUg\nCEYlWJ/JZBKXl5cxn89zjIvFIptDgiAYfjdMXKvVErm6vLzMKJ1xOlrwmUu8f0Q1lVrmykEk+N3T\n01MlLdpqtXJOLHNGuZADIihIwuYGOPIyP8ERG6kpGlh6nZFToiy+zztAFPe6OBI2OuV0Ycl/sBzy\nPubRlGhQKUc8w+lAw9pEa6TpXF3Knux2u9Fut2M2myXvDITRcu/IezweJ6/IqMVLKTEjGozRqT1z\n/0C5PL989lLpP3JH9Oln80z0DWgL8jaZTHJsnPPo1NbBwUEi5bVaLT+zzuMZrD0I5WKxyK7gXE6h\ngriA/q7X62y4WSJL7PdyP0Rs036k540UMc4yg+DP2WOkS+7v77NtyGQyybWwfuQ+INRO7XAxp6Cu\nRl1AlCk0MOqE3ivXl3EYqXKatF6vV1LQyBP/b46uW7QwbtA3I9Xs9TIlxmeWeSO7RnzNmyRV6H1b\nVtvxk33guWFe0R2MH/2FTfQ+BRUrW4lE7NKE2ALrT/QF/GYj4/P5PCaTSUyn0yxmYN7oLO+xlf6B\nEUX+1ilmrxHjY6z4AeZou9mt9aALN751vYojZTj8pTQVEJ8NBsb04eEhxuNxLobPGGPA3B8yrY1m\naRRQjK4g5FnNZjPLxl1NYAHld9wfISuNt/+O3zFeFIgFmqtMQbD5eE/4KqVSsGLCGBviRticRuFA\n2Ha7HZ1OJ2F6Q9Wj0SghYypcmF9KxuEucGZXROT5a8y1nQW4TvAySGexvqQ+MG4mx+JAeQ24p1Nd\n3hg2zNwPmeEdgY1xnlgXjDYKGUfZaQH+zr2bHCggW4aODeWbS4Iswk2yDJMKsUL0+pPSBFb3OMwj\nfKnC06kzKykcdkr8kYu9vb04OzuLXq8XNzc38csvv1ScHj/74eEhq/0oJmCt9/b20uFgLcwh8bxQ\n5epAxHID38dGnr/jd8w53yv7PJXpKK9xs9ms9IlbrVYxGAxis9nEly9fct6Yq4eHhwpHLmJXFeie\nYE6R0LUd54Z34Z4ljwvZf3x8zM7vrgi0nvPfc7HvTZC2nPjvS74Uzs5LKWHSKdAPvBcptPF+9HPY\nR66EhFPI+Gy80ROuYmUt0D1OCzp4Q+59aD3ri8OHTjDdgj1tPpTniJS8gxsCATvETtkh4yV/iL93\nxZ/twsHBQQY5nAHq9J15U6QA+Qx7gcNnR4SghL1kpwQnsky/uYCAcbh9Tdn7rpQl3tNBtp2aUn8R\noJdcLJ7Pfbrd7jMfw4Gm0/+M+3/jSb2KI1VykiJ2+WYLpD8jv8rkwL+4v7/PzYliLKMaR2V2qngX\n8rB2spi4b+WfEWRvGjYlQgCfJGKHKJngZm4RwoKwefwRO6cBEjHf5z3xzM1xGAwG8d1338X5+Xm0\nWq2KMWEz2alAcdATqNvtJoeFPiyz2SyVXuloMI8oG5en7+3tZRNAjlOwMR8MBtFoNNJw4yBjeMoq\nTeQCRcUmsSNlZ4J59k9vLDsLlh9zKDC4oH2utkEWarVaOpLmGeAIehy8K+Pgd+UzI3ZOnJ0g/g5l\nUvYcIhJFIdsB9Rwhr6xFKYMlz8pyDA/o+Pg46vV6/Pzzz3FzcxPr9boS5IDSULXGe5qXQ38lG2zP\nQ8lnMukXQ2J+I8a0dArMH3PJOmOFH4czYGVrJQ7iwjg4hxDd5BYH3Ofo6KjS84rAAq4HbWGYb8sJ\n+43vbTabLEbBGeWyLEVEBallv+Bkm8SLE4XzZSPEVRotPmce2ScOFAkkcJTKwg10ETLIOqHfCEyM\nUtiZcGBWOkasKfOG0cTJ9vid4SgzDZ5H26/FYlEJMGxn7LTd398/O07LSJP1jmU1YleswnsSfPKe\ndpY5K3C93h0VZD6Ugy7QPp6Pw4+Dilx5/AcHB5XeinzP728bhc1A1sp+buj9EiXF5iNfjKF0suxH\nMNcGElzNjGw6mOL3PAO9aZl3kPrS9SqOlHtqMBCiDgwKJMyI54eE2mGgDB/hsSHkJHEqWGww7JE7\nRcTzUKQ2WhE7Q4tBsYIGZrYidiRgheJIgPfFsBvCRkD5XlkeDBR9dHRUUVBEVN9//32OqUTIeA+n\nSSN2yABGzaRFHK7RaBSz2awStfD3rsC008K8OvXFGLk/CpjPkAEcUxSgv1c6H8wpBEi+42jWG8My\n46qxUjEzJ6yDDRsG2+OyYfeckzryOkK2LNEFO78gQE6fGrEp03yOqrzOL0r9hz4AACAASURBVP3O\nc4dcl+/C/sFB8f3puXZ7e5tIru9Zr9dzHx4eHqbs39zcpEOGs4Eclmiyf1oOHBRYEZNeQzbKlAqH\nPbfb7WeNF7mXnSyu1Wp7qG8ZYBHlPjw8ZHEF42AvcLahURcQEAyyAzN0kBHCiKhE8uzX0qkhSCwJ\nuI7mPS92bko5tRyUTr5lir/f399PhLvT6cR8Pk+ytaN/5Iy9Y5kxisw82CjyDuwRrxMpOJBpp5Ih\n8GMsjSxxHwwyzye1aj1jNJYABwfArXt4P9a9dLDZ34vFohJkg046dc09jWyWjp9/8re8K/Ns3cgY\nsVugwk4XMk4cP9Au1ubu7i5Tey+thQn8pk3YGbIDip4BZHjJibGe5p6MCafY54qyruwr20KvRakT\n3bvsW9erOFI+zJcJAolAkRkSPDs7q6AB6/U6ERJY/44AHLWsVqvk5XhjIJj8zoYNQXGlnb1vIEnn\nZvk7M/+tMCJ2R2WU71mmpdiQEZHpMZyp5XLXrNNdq2mc6YNinZZxGsP/5t44OqwFCIbTSoxhsVhk\nry8rcO6JoLIGjJdndLvdilNrhIn343coIs+nq/0ceXgNidAeHh7SkNkIs4m5p40skTmGtIwOee58\nPq8cj4OSQMm9xGGAK+IxOnePgfQzUQx3d3cVRxrkzsiUEYQyhWUFgbPQbDYrvA0+93whn8w9zoK5\nR3S7xzlZLnedrUnBHBwcRLvdjvV6nRwjl+m7WSDPc/TJe/szO9F2GIjYQczKI6VIa4OQ2rlE6ePY\n2TmDy0FQxNyw35gDZIh79vv9NHy0u4jYdZnnvc3/xAFgjNYl/J53LXtMkfqLiIrzgWzZmHndmW9+\nX6Z1mTsjwdyX4LAMQJhDH95cogteY6eHQEeQ17LlB/sJ2eK5HLtT6nf0kINgyxTpV+7tzwjcTDPx\nvLFOBAsR1SOHarVapmr5HqgQ+ob5xlF3IO35BG2jythOqNsfcFnv22EwWIAdMOfMz8Qh4p2NcEN1\nKOeUABZns3TIrbOcvgP4YL6tj1g/ZN+ABWvG+hlZ5/vYb1/MM/cymlvazpeuV3GkHL1zTSaTNAoM\nngm4ubmJ9+/fV9JbFxcXEbHdGNfX1xGxg7Md0TldZ8PGxOBNm+vTaDQqTdmswIigMEDm4fhyCsrP\nJ+r0uXkIE0JuRypi1y6CCJNIr9frpQOFE+UNbJjdabOInWFHQbzETSBn73QSUCkG0Uq5zEVHRCUy\nY1M4zcTziMR5lhW0N1KJALo8vHx35p1/28ii/MrvReyOsWDuzCNDHsr1ZQ4pjy55RyhzI6Ceb2Qe\nJWjH1albyzBjQg55Dz8Tg2+H11E66+/0ldEIo4VE1lZcRlXZC/QS42JPksIiamUNO51OOjYR1dJ+\nFDsOjtFbK+ASOWYezYHyOBhfrbZNxRrNQS/wOXvK84ETyTv3+/0cM58xDvf0Yp6McuIk8zfmZ3lf\n2pg4pYN8uImsZc6y5rUmALWuKVMlvr71e9Z/OBxm/yXLAAEXAbIROQw0PEgjsDgfNpZ2CsfjcUwm\nkwzOGBd0BBtExohcYMTt1BHcWhaQbzt0XiP+Dr0MbcGcLL5vhCxi197h6enpWcoXuX3pNIX9/f1n\nfFOey7NIETMHvofTe6ZN2KlhzxlM4KfllTESPGM3S7TSOpw5YT/xX1kcAIpdIuMeh5FF1rdE6nGw\nmW/aLRjYsD5hH3rflaBIeb21P3i73q636+16u96ut+vt+juvV2t/QITq/HBE1fN1ftxw+MHBQZyf\nn+ff1ev1jEyMDhF5ANGbC8G9iFzwZHk/R4dl8zpXiZRRGpGL78E9I3ZwvSvaHL3CsXqJbN7v9+P4\n+DgRqZOTkzg5OYlOp5P3dTlnibgYdSs5Fa5KAP0ADSovUEFzzJhv7odHz2eOyl9Ci0Cx+K75S8wf\n//kzZAli90uctBJBIqp0tUz5Gd9zJOQIjfUllUqqGeTDuX/e1ZwA39scn9VqFbPZLNtGkKJiv7hR\nKBG001tOi/C5Uyfl88rCDCNQ/J3HwJhANElvgP447cVe6/V6icKCGrvM2UhfCfXT0gKUmKtEHplf\noytGJRyVU/RQq9WyZYD5J/V6PQ/HdkrU6XhQLMYxnU7zeBAqW09PT/OePAtOFu9vvpWRGd4flA/k\nkDEQXYNu8V3uCZ/I5f6MgfE6nek58Bw7feN5L/92tVrl8VF0f/dFA0SoCS5CgbdkYjlrityiE01V\nME/V5fLORLD3ywtd5H3Id5jTkmwPr2ixWFT0M4U8JSUiYptpYZ+5KzvPs36zDsZeGTn3+Fh75MDo\nGegtYzDvzkiUES4+Q5eY5sGFHeVz3sfyVtpFo+geA+PAxrJXGT8Vty5MK3WG19JrZ/3mtK5pCeY5\nM3ZzqKwv+L3f/Zk8ffOT/8MLgaP6KaLK27BBjIiKgel0OhUYM2I3CfB2bKCB050LjqgeFAycW1Yd\n+Pe8D++LMDrdgECYTOhKMUOGFgR4PKR29vb2ctPxHoeHh9HtdmMwGORxD/1+P51DK1su4Gt3fmbe\nECIg8zJlxGfmCHEPBA3n16kENpBTcVwWTDs2QNBs/NJxZSy+v58LBF5+BoT7ktInpYtyNMRrgw4X\nis9QeDhSXE6fMu67u7sKb8NKsuQKoERQGK5429vbVizBh3AVHf9h9O1ksZeczvX6mmjrNCX/NmTu\n9eX7ZcGE95KfV3K0GEtEVNIlOAplhSbpIKeRS4fWijtiu9/m83mlw7TfCw4dh1bbyYzYVqdiBHG2\nTk5OknfJmiAbe3t70ev10jiWfXAwpCWBN6KaHi2JunzHqbmIqOgm84UidmkZ5NH7wuvpg3x5vg1Q\neTllw37kfjiIw+Ew01gm+k6n0xgOh0lPYF+wn9BVnh/0noMeOx4UGC2XyyTxezzWXXaGMZLs+9KR\nIlhxyhzj/fT0FMfHx2noeQ56mKIpdON8Pq84dawJ33N6nnEx9pIK4fQVOsp8L2wGwQx2xHw587NK\nHhx6sV6vP9OdOEqr1Sp7RlmfWM+UdADLnNtU4CTh1DlFT4BBmpgUdcS2KpE1MoeW5+E/MH92epGh\nMv3Id5HB0jcpeaTl9SqOFIrB3q4/Y+HttHwrJ3p4eBjHx8c5kdPpNAfMwqNkOJcuokrYI+r3RJUI\nFJNO9YYjayt3Frd0FGkOyec8N6J6GCz/76gMQWi32zEYDJILQW4aB8oePQLKBkUZWWkbkXCE8RKf\nw0aR79vg8T2+WxJOXQ1j5c9nRBUoVBs2bxLPm9eL55pbZQNbVuYYBZvP54kAMa9uGuiqOeaUcbjK\nhujqJQVeGusSBSNyQ24wNETMNipGM3DAN5tNxXChDOBSlO9kx82KDkeWy9ElCt+RnflMrBtryBio\nKmO+zdlxBa8rJrmnnT4jWciyo0iPg+/aebVj4/5xfO4xwpskUkYuCOJoOsr6ttvt/B5z73Mmedf5\nfB7Hx8fPKuTm83kiM8gbetDcTz4DAWMP22Cw9+v1eiLHNvrw1Ox08xlrbqJxKQf+W/+u1WrF2dlZ\npRcV74osLhaLRPsYP/qXefMasiceHx8rhHKjjA7qeFf2DQ4t62tSN/NipGhvby8mk0k6zC60oAlt\nxNapNtmc90YueR/zLZEh5NC/512M6ltfWrej890nq9SLRq/s2LAHy3YZETuHnwxNeSHDAB6WU/it\nOB3sReYR3e49bsTcup95BGBwdTRzaGfHAaR1RhlcMW/fQikJgLH3Rj/xJb51vYojRQrK8CLOAwrD\njhQKpOwSHrEbJH2Ibm9vnx0GzN/b8FHeiUPHYZ4RUTH0LBZC6vQUxslwu2HIshEeqIU3dkQVkSgh\n8dlsFrPZLKNcHDKeV8LEVnCMA+E3cR4hMSQKSsBGxGgYSgXdw6GwsrXX7k0fUT3YMqLaKblU9GXK\nibFx/5J8yf+bSItjiuIsz5djDkBH+Iy/x6Ep78n7oCCtvEBPTKwso31ko3x3fl8qAae5/OyISIXN\nnNgQsZ5EipYtPxuDY6cWRYpi9Pjr9XqldLpMFaH8+/1+9Hq9fJ7nn4g5IrLAxIqb5xHcuJKG98Sp\nBFFiznEmn56e8vDcx8fH6HQ6FZQPJInneYwgdXQrt+O6Wq2y0SXGnfk2quk0Ow0nI7YEaVAmPnPk\n3Ov1KujB4eFh3N3dZbDIe4PSm7hsnYIBoyu6969TSHaEcKxLJNnz8i3ECv30D//wDxlIcb4fa06F\n3Xw+z15bOKyQtUHleCYBEfLswBCd8NJei9hWi56cnDxrqExAVDqLrBE6zvoN42rkhcvBvasumRfL\nrh3ziCqR2VmTcs96rdAfBG2mKHgty6pPfg8aD6rmTIEdJWcA2u12xV6bUM+cmIjv5/k+1us8z/Lo\nLIWzG8iDn1eiTRG7fVGigJYZO9LlM2xj7GOUgUR5vXpDTsOLrug4PDysNJFjkah6s7I1d4NGiRGR\nB+E6D2r4l/41RKgoWibN0W5p2Bzle8IjdgJi5WblWkZfhndLCBZFCZRa5v9LbgljtdOAk1GiYDy3\nFHKcKMZQjt9jdprSOe0SIWBMzEPpPLF5S5jdskIEZ0eyREts9EAh3ZgyYufU1mq15Ok5VeQmjd7k\nvA+yw5xylQqjTIUgVzb8yDAb10hZxK6CzakUfoeMgP6AmEREVnAig1burJODFz4Htkepl/LhVILR\nI5BkIjnzH+2Mkjbgu/v7+zEajWKz2SRKZLlAVh4fHytNc5EjHPqyxBkdQosQzw0VZYzT69TpdOLo\n6Cj6/X7SCcyLQk/ByXPTSebIFYqMA90C4meHAL31+PgY0+k0uVWknknPG80wVQC5cEAHgvISr4O/\ntZFnznAGSK97r5WpXu7FvFv+G41GnJ2dRcSWJ9RutyvHQKH7Hh8fU9czJuQb59oojeWYvktO1fl7\n6/U6kQR+h+OGDjbKGbFLNZPa4jOcZ3qb2YCjB9AXTsWi7325pxPPZJ+xXk5/od9LVA1g4FsIo9e6\nRFwJFo1WoZP4jN6EEbsejugEUn0ROyeTVCJ/53fgOy/xmZC1MlAyqmZ0GVnBttmu4TSWVZsRO32C\n3vcaYHvsK/gq5b68XsWRIlLyyyKEeLZGQVgEoHZ7vD4+BO/TpEujMm7Qxf1BXSzs3NvRhx2oiCr8\nXUblLIpz+ihroit3cDbkSMTA8/b39zOdiDKz0ua+Nj78tIPGRi3hSZQFyjpilxJljCgej5t3i6j2\nmmEM/J0Rx8VikeMoOVne/IbUuS//BoFgDdm0PmIj4nmDTMPDKHLWyLwFKx7Wq0yPls5TxC5d7b8x\nsub0FH9jTgFyhBLge6yd0wEocZQgcmFIHQOBsrSTaeTBqdqISKcTFPju7q6ShmHdUESMH+4i7/ZS\n6gCn3Kli1on5K+fX6S4jLqAbDiJKpMTGxdwyDOJoNMpxU6rf7Xbj48ePue9ms1l+RmoKBMVR+d7e\n7mgbHB4rd4ISCMTlWXtHR0fR7XbzKBXemaNJ3KIhYpfa6/V6z6Jq5JuAwagDsvgS+kTaDcfXaV4b\n1ZcCU/7tgIG5+fjxYzw8PMRkMsk14Ygg0pO8q/d6q9Wq7HenZF7ah9bhZDB8Tqgv97FCZ9B4mD3v\nZxOUkWYk4OIeDhSs25FJt5owOsYzvDciqvu8RAedtsaZdMBjNAfdZkqLnSPf3+vKu5Ryw/1s2/xO\n6M5SPzDPJurzLp4/yyL7y8Vb/I2pIyWAYPv7UnoaPet7lrL0/0Kgyuut/cHb9Xa9XW/X2/V2vV1v\n1995vQoiRdSNhxux8+SJSs2DMgrTbDZjNptV+Ex8D6jWnuRqta1EaLVa0W63s+IN6JBqC6eJeEci\nb0c0RsmMcPCTvy3z4URQjMvvSTQBauBUA+9qr53vAXUbPSrz3eTS7amX3zW8yZwSBdj791hJGTnq\n9/1KzpbhWSMSjMeImc+jMp9psVhUogh+wqugeZvvCdTsgoG9vd0p5D6Lje85Bed0AvczIuo14Tus\no+ebOWc+WRfuQXQGImkkK6J6dh7vY9JnCet7L1jOvPaOQP08p2Udwfoz/h5koV6vx3w+r9zT1atG\nijkuhHE5Ted7m0uETnCE67QUqcQypQ3fqWwCyzqR5kCfXF9fZ1UQ62V0DR6To2DGAZJepsBcNk+z\nSGScDtLr9TrTkOahWMaMTIAar1arRAiM4h4fH2c0D8rPvdiDPId5ubu7i59//jkRI9MInOIvES7W\nGFmzfkJGTk5OYjQaPePDwS1DVnkOa4TuYs5MqEfG0N2me3A00MPDQx58zjhcqWlOFrrFdBDrIjeT\nNW8WJA1kyLYEdNkpTC70DjzUzWZX7UeazEiTdSlotCv4LG/oEfad541CGqNIETuqAP9vHeGWISWH\njndFr1pn8BxQvpJzauTfdt52zVXnrIPPRLScIofoe2d3/G9/h8/M5yrRqP8XOvUqjlREtS9HxA6C\ndG7YJ6tjmBEuSpIhYnJEgxcRRwjFcH5+nlUflNzDJbETVCozbwzSaCxI6SzYcBreR/DNbbCBdkrI\nm6fdbsdqtaocemwYsyQplkosYnc0jeF2ExlRHM4bG6b1JkWZojycu3Zqz+vM85yDt/HGCOFEbTab\nSqFBrVarVNaU/CwUpufe6VycPjs9GGZIw76nFaONIunoEj5mDOU8lXPn9zcvC4XKOvozy5G5BIzD\nKXGKESKe9+3y9xgbStXOcrmWJYfECg7Dz9w4RWlCPX3OaO9BKoN5I03HcSnu7owjxHqRTvERIHDO\nms1mpUs1c8062MnC0eK7yD69kNbrdeWEBMbulLDnhXdjbY+OjlJHRew6apdOtOeKquKSb4U+KI0q\nfCIoC3YIeNfSCPN+dL72PmSfQQR3+op9XXJZuOysvxTwnZ6eZvXedDpNeePZ1ouWNfau0/YRUdnX\nflZE5JmVDib9XVMwXI3FeNFRbm/B2HAYLIvI6XQ6rZTsI084NAcHB9HpdCq8NRwUeLp8bzgcZqq1\nXt+dp8p32NcOKk0xsb50/zVS9qUsW26cAuNijzL2krbAWpqu4HVFfsqiCJ7LfuR7OP+WQV+m7fgd\nDFJgG2yD+Ftk34GfbZd5bsyRdUJ5vZojhQIzh6YctDcbCpfIi0E+PDzEzc1NLJfLODk5qZDUjIBw\nnAqIlPuE+D24rIh9WZnxd3ZsbGQtiCi1knPF91CypXPGpnHU4Dm6v7+vKGkrTiIEHAcLDpwNR3tu\nroizxN+VeXVHJnYWI3aKqlRe3rSr1Sq5IDRDvbu7i4eHh2cl0DgRcHe8WT1PZWTC5xg9rxPjs/PO\nOqGIcGxMDvW8oHg8H1yes/JznAOeSwsA5vIlpW+Oj50eGzhImH4+37eSctNMoxWWS+btWygPChFn\nASfOCBCK/+DgICaTSdzd3cVms6kYE/rsOOhwBErFFc8teRwlWdZVbEZBPQ4bCxSqHQH4NSBwPiII\n2ej3+xVC/d7etiEmfXsspwQyPiLHFWKME91hvcc4eV8739wDbs/x8XGukx1P6yz2EOthOeTdMZKO\n5nE6/T6WF+aOvy2NFAgC+9vBVjleF1Ogf8rDlz2H7AEjciZS39/fVxwL/h4HxeiJuZvmMXpvlkES\npOnj4+OKrPOerDF7mHvSRoN38BjgJ47H41ittsTuk5OTiKg6X+xDEGjemywGQSpzaoQeveJed0bN\nvYYgd0aMvd5Gs+xk2WHj37ZB7Fk7+tyTIK9Ef9n36CGADcue5cpZCtYfXWJdShaGgIL7uGnw/68c\nKSJMR7sIJ8JhyNWKgAm08DuCt9PRarWS+NfpdKLT6VTKw70IpbfJhsHQlsiKoycvsqFKDE/EzpHi\ndx5TGcWVqJqf53dhYRE4k5S5DFeWa0BF2mq1rVKhlxKC/1KqwlEFSoJ3L9MGRqv4LkalTOE5rUn0\nw+UKmnq9ngfeopzYjE5TPj4+xnw+TyKz54D1JrVXEtuJ4hyd+DPkr0zbMX7ey0bR8+X3QE55P9Ai\ny3OZ9nIbENYXcrRJno68mWPWAdlGzsoKUu5txcQYWd/5fJ7Po+qsXq8n8ZzP6GiNzJrETKoMOSNg\n4p57e3sVON5pNubZCKoVMpVWbhkQEYk0gvZ6bozg8b6lYu52u9mfiXsPBoN8frfbzdYLPA/nkf1G\nawhkEATRUT1/64CGq0SgV6tdbyDObavVahXjwBoyBigS3Pf+/j6urq7S0D49PVUQfN+n1CeeO+sL\nf9ZsNmM6nVbWCeeQUnwbVQJg9ken03nWBw8kj88jIh1FZzDsDDNe94XiM9ae/9wShv1QOv7IVqvV\nyoDdhHIj7g4wWCOj1A4MaZtB3y1kjc7x/X4/74cNQKaMRtqRBIHHvrbb7bxvq9VKXWPknbkh4C2z\nDuhzvlfqRuaPueC92HNUNpZZg4idzsGx9zpxeV9gX+wQ2uHl3yU6xj7BL0HfRuyyDS/JfL7DNz/5\nP7zgVdiZIOpwtFimvnBq7GFH7LqN4wjwbxaIA33tGXtTgIaVufLSEYjYRWZMtHO+CKEXiO87j196\n2SgEQ5yGbMt0lh0polkcQvfJcjoNBM7RkJ3RWq0Wo9Eo70seuoyuKRu2YHkcZQqJ8TO/ID12bOwQ\nAZ8breLvqWIqBbzT6VSUKrJgpWfHl02JQ1XKoQ2BS3lttMu14KfHUkatdkbLYMDzao4U70/AQJQe\nsUOW2u12lu3zWYkUGpFB2dEry9GXUT07/YyRMVOF5L28Wq2Sm1NGqSBLyCNzCoLMvnLHaI+XPWpU\njd42/K3XDbSZ5pAOtsyNQnka6cABMeKH7DPPOP7oGoKyXq+XaJajZNZ8uVzGbDZLY9pqtRLB8Poh\nM94vOFx85pSRKyZBWjD2Jd+UdAmHnXstZrNZjMfj+PTpU7x//z4/43slks5lXYBuY97QFRcXF/HL\nL7/EZDKpIGt3d3eJbMJbjYhKaxrGUvIxSePhhPNZxI7n46o1Ak90vA9BtjPDOOxIGsVw8OVgCZ1s\ndJW/Jw2LE03F4ksUg81mE4PBIPb39+P29jYzDxERV1dXcXBwEIvFIo6PjysBpNcKYIKAmPHbDs3n\n85Q5pyvLVjvWOWUGh7kkOAeB5Hu2oQ7M+DcZJv/eGSrskd8FfYk+9t72XHh9uJfpEB57xG7/Gzlz\nsGLH29erOFKUAFv4mVAm2oYPRWIHxz1hmBQWEWEk3WCinC9D/mXulsm0YeInC8IY/FlEtZeNvWWn\nRkoHzZFQqbDKCMAOJhsGo8jfuVGjkb7SeLvkl4teKygaFFrEThGhbOy5R+xIzn4/xr5eryvlryaA\nGmms1+vpSBGlYkBwxnhPyKG8P99DiTKPjoQctSBvzAv/9tzZWbLxtRwyXygAjLHRLOYLhekNz/qx\nyZk3lAVOhlO7oD5HR0e5RjZodl4x/FwQX9l3VjZeh729vZxT5BQ0wKmter0eZ2dnKRtloDMej+P+\n/j76/X4lPWD5Ho1GGVCx9hg2ggK/hwOSRqNR4SSZY+GjLSwjpLRLw2jH0c5bt9uNfr+fjliv16ug\nfZB6F4tFdDqdvA9pmIeHh0QHMIroJkrr2ZO8B3KCIbFu855oNBqp5O1A7O3tPTsiBONeBkSDwSDO\nzs7i6uoqhsNhdLvddCTYY+zjUkdZNlgb65TNZtt5//T0tOJIU3hA/zJ3y0cn4IC4iS/B3cHBQTZh\nZt6sYwm6S/SDe5VG3UHAS4G+G8va0BIA8rfm8rF3Sb+zTpzNGLHricV7gkahb6fTacot5xYOh8O4\nvr6OXq+XGRfGCMG+2+1Gq9XK3mQcm2QbwkVmAtsHCZ6/I5jFoXXwVdI/bDdJ6aOvcLJw8tEXTgki\nI+wVOH3smdJhdxNuZK8MvngmOtGpuvV6XeFm2jllj/xviNRb+4O36+16u96ut+vterverr/zerUj\nYpyrjagiPnzuPHqZV/e9QGWI5k24dXrFED4eJ6iT03gmpJE+chRC7t0oUsTz9ge+uIc9bv8/0VHJ\nxwD98vw4mmg0GpUT440clZV6TicZDiU95tQTYyO68PeIIohATUB39Zyf91LDUxAER+AgPY5mI6pH\nrHCRrpxOp8/I3bwzESbrybu4fNZRGd8zF6DkpvB+Xm8iT1IA7gzu9ycK87uSkqRCx6kII6Dm80VU\nKxOdgoyI5Nu4oWYZXdfr9SxUMFfA6JWP1/A5ZUZBI3aVeYyRueB7e3t7cXx8nKgNa8EcLhaLPOTZ\n8srflpwNokZS/uaXMDfMf5lqdFdzIlOj0SBUrD+f0VkdfpXPY9zb21YzzWazePfuXRZORET0er1M\n7Ww2m0zpMEZQzvl8XiH/Ov2IfjKJud/vZyr4/v6+kjJiHGUqharAyWQS19fXFVnb29tLQvPNzU0c\nHx8ngR0OCUhQicow58yPOTqmFFxcXMT9/X1WBpJROD09TT4biBRpXloHkHVgjKQw4ROZqE3aeTab\nVc7RRKch204HIwesu2XKyCNyZ3tkGgGHi/P3Tota7/vs14gqknN0dJScsPv7+zg9Pc15ub29zcOh\nn562x72QrWGMNHAFmTLCPZvNUtfw91ymOpTZFNJpjNk2x2uPjkM+jAwig9yj0WgkXWOz2VTmDR8B\nmoptt9+7TNuX2RxTQbBZ6Eae5+akoI6uAPbavHS9Wh8pJhdIDkUBdGiYz04GisWKDw4M3YZduYNB\nYUIt/NzX/CU+8zvxDvwkzQDUWRLmMXwWRnLFVtrepOZ6OD/rXiLcwwLFu2AoyiNFSOuRZrLgYODg\njvEOjIs18HyzSXhHuGdcTqnZiXW6hE1Vcmj4W6d1MeQYPVI1EZFcFfd2spK0jJgL4TRR2d6CDe3/\nXiJAlukLp/DI+Tt9h+FHFuxoNBqNyuHDJrlioG1UbYT5m5IXwLOXy2VWBdqpZR2RN74PP83Kn5QB\nBgpi9P7+fhrtw8PDJMTCLbFjfnFxkfLhvd1oNGI8HqdD7nQK6QOfVeegCGiesZE25bt7e3upFHFk\nLANWsrxP6XSTGkPe6DtX6gVSU5yJN5lMKlxNUu3wR5zWZp65h51zZty5IgAAIABJREFU5gPnCqcY\n5wkeE2ensb6sn3khXKSkN5tNFmREbB2+wWAQ5+fn8ec//zlub2/j06dP+TzmjHcxRcF6kfn1vmE+\ne71ezk9E5AHBR0dH6dyXQSMBBE5DxNYoci/SUcw3jhzjs55Bbzt9Z2I478tewz459YMDx1jZa3Z4\nuRfvjd7a29sVTEwmk9QTpOjNLdrf34/BYJD3gxR+enoa8/k8ZrNZchUPDw/TkTZxm0CDi/Sl9b+D\nAeaeQNH/RhZxRlyggp1lvbx/0K/MMwGGKQwR1cKOl7horD29wSKi4oiyTgSVJYVms9mk00YKnjEw\nV4+PjxUqA/OJ7fzuu+/ipetVHCmM4mQyqZC8yH1inGyEMDDk/b2BUaRE515ghMWC5av8bkS1HQGX\nnSs7e5AhuZdJch6D+Tg4Jy+dJehoKGIX0bzkDaO4XqqE87g3m03lfLmI6tlJKBQT3BFilw0zfnPE\nyKszBngnlN8aBWE88EScY4fcXvYCQ9HxO6MgzH2z2axwrzzv5irYOcVxRGF4fm3kjYQaGXELAeYH\nzhBrwv0Zo422KxaJeOzYgNa57Ydl1GtiJM/IktElc8Qwshhjj3k+n+d5YrwbChoEEKVP9VrE7lgK\n95vBYGDovbfZv6PRKLrdbnS73exFxcW6gpx5r3G5cMGE65ITVxp9B0LlfrPjYUIyjToxmsgtzzg+\nPo5arRZfv35NI8hYceTQEcy7jRxOPe95d3eX+wgOVnle4Hg8jnfv3kW3283v3d/fV/af0WfmhKo8\nOwv1+rby7J//+Z/znZFDxm7DbV1aoumuBqTQgmDQxSvIHA64nUzrByOEyM1kMsl97AADOUGnQKqP\niCw8cjDvve9WJF4bileoiOv3+xlEWCaMoETsAivkxXLIGm42m0pbHr6HE8R+LZ1veHfwj3gf9Lz1\nTYnK8H3WhGc6qPa7urChtG0OwAmSbEtKuSt/Vxbi8DwCJYIM6yjujd7z941io7N4Ns9C97EPWTf2\nPwic19BOXXm9miMFzM3EEXljoL3R2EQIvZUNG6hETfie008mlDu6x5Ba+B2VOJ3GpmZRTCzEMcEg\nsJBc3MPoD89zJFymCN0N3R42UYBhdkdXjIE5sxDzXgioFSH3NNnXwmgFbeIiY0dJ2Qnh/VlL3olx\nOO3puXEaDfKjHeWIXeWXWxawVm4LgNFjzKQFXlonZMLpOUPzoHyeb77HvFkmUVCO1IwuUGGGo2mE\nxA6BI33GjOK2Axqxq3h8enrKrtIREefn5xkhllE5zhxOoJE92hogo14nnK+Tk5Not9uVoGU2m8WX\nL1/SKTTZejAYxGAwiNlslvc1wRljWDoeyIxlPiLSsVutVs+iahupTqeTMmw95GianzhSOJRHR0cx\nGAwqyh0jNp/Pk1TLvqDP3Xq9TjSrdPoJdpx6IoKG6mA0qNPp5P1ns1mcnZ1V0sFG4O3Ez+fz7Pfl\nc/UitqkmUmKnp6cxHo8rRh9HxwbFASYXDhjvY0OJw+t7kO6lp5gdAnQ21YRGemi/MRgMKtQE9k+t\nVsuSft7PvbXKjtkQ2tF/TpfjALNn5vN5fs9NJ/lbX/69U6F21Ph/3un+/j7u7u4qjnCJ4PO+Tqnx\nXfQBY/VnzE35GYE8Py0bUGYIwCzDpsmURS04gbxPidJbzxmpZkz1ej11tG2wD1dH5hiDnSD/P/vA\ntBzLLTad5t4GAco1La9XcaTgZZToEIuL0WdybKwxWChMO0OlJ44gvAQxR0Tl78o0nqMrvhuxi7wR\nrDJ3iuEHtvQCIIQ82/dGAEGK7JlzLxtyX077OOK384PRsfFjs7AeRiF4FgqKuUIpMV+eT1BFxmfn\nzP8PemOHyBvBUSLRFuMwQsAY4SUYdcKI4kjRw4bne60pr+Yq00dWNI7mQJ4itkbIfDrQIKInlBCy\nY2eCjU0bh9IZZDwoPe8bFOxqtapEX+T4QUJc0eaO7ay/kRXkqByj0yk4OXyPeV8sFomQsE4Yebo+\nTyaTHCM9fzBw+/v7le7OtC9g7v085hKn1Nwr5IP9Z5mi6SSIolOjGFEiUNYlYme84DzZWQDZcSNE\nIwI2EsvlMt69e5f3vL29TfTKe5T3Qj/ZwSb9y5Ez4/G4QjFg/Pxng4i8vXv3LmazWQyHw3w2nEO6\nj/O84XAY+/v78f79+0rQYVn0c1xJy/xw+agUOoX3+/18H+sCG/DpdJopQRA1HAjLxnw+j06nk+vl\nlO90Oq0EgZZd0ousgYNkDCuUBHMVqR72XHu+cXrpeF6mz0Cbms1m6oTpdBrj8bgSYHDPdrsd3W43\nBoNBIvLsrYhIDtR6vc4UZGn37NzyE3QXygR6jjUD7GDflylDrwVygY1wWs+UBv+t6R4EFryT9V63\n262ker234fxhw6wT5vN5OuPouPIwZ9r9lClP3udb16s4UnjmHiQoTwm/R8SzyMAIkR2aMjoyIlMi\nOl4YOx08A6NneJl7WhE42vE72bGL2B3L4fE7SjM6g+MTUeUm+Ds8z4RSc6CAmf18O4i8z3Q6zZO2\njQ76dPsS1sRDd08jfm8nAqfDa+LowBvRzlnJaWA85cZg/HZczTGwk+s0Fs6YYWzek7/HCbFzimGB\n01Gr1dJRQpEBR7vFQsQ2FYGicT4+IioGAHTNRtfBQnkRWSPbzB0ybc6aI3YMD6iKo0s7Guv1rplh\nvV7tTO20gJEfDLs5Qhj+VquVTkNEJCkbuXl4eIivX7+mTADfk9Y0pxIn0YGGHSLGZaQ3YpeidBrR\nQZtl0k42aSee69QexsmXCdaz2Syenp6y8zWoy97eloR/dnYW8/k8z7iL2BrEwWCQZxiSUo3YcdJs\nOLz2ZdqjROWm02keWcKFjJJaxnBGRHz9+jU6nU6cnJxUUh5lMMyYXiKk8/ePj49JYqeRK4Yd3g/j\niIhMk3748CGfMZ/PKy1fHHARkDqjYCSe37t4gPfmHe/v7/NsRNYSR8sE74hdGgo7g/5kfDj6tHYw\nR4iiBdBH1tD6g2DRuhOOjx1NIzbsXTew5ZmkzJkXy7cdIqNgOJfQR0hxsj7YIWy1u9MvFosciwPh\n6XSa6DV/Z8eG/e9CAr8L7+jAkzYSs9ksgwZ09Hg8TpQPvqMDVZxd0ro8k33vwKC83tofvF1v19v1\ndr1db9fb9Xb9nderIFI0net2u5WUAHAw5FQfd+F8v73okuhWpvJMGHYEDWzp/K0vf6+Exg37ObI1\n98fRCZ+5SsH3NApH+uqltJzJ5L5HmT/mJ6kBR6VuWkeUDKfDc1Ov1xOVKvPz5OXLc8qYj4hdCtQo\nChGLIz+/s+/h6Jq5dKqC9eVepLbKiM5oHnNkNJGUi9HB9XpXAgvMy2dElLwPqQY4DSVqSGS2WCwy\n2gbp4AK+BmEzodMIYhl5mi/C35lfYySiXq9XiJWr1SpLzS3Pnmfvs4hdStAkfaJSGoKC2O3t7cXl\n5WVEbGW41+vlGji6hne1WCxiPB7HZDLJyhgaDzKuUgYh7ZeFIVzNZjPu7u5ynzBfpFhANEq0FfSK\ndyyPweF+Lpgg9cPBvCAu3PPdu3dRq21PDzg8PMzP4YQxR+12O6uGTC7u9/uZxouIlBPWx80qQbxB\neYzG8f/NZjOGw2EeJ8M9QQ+QPeR7NBrFYDDIPVA2OGZOeHf4ZP4MXQE/kfUH4aCKyqg632u32/Hx\n48eck19//TVub2/j8fExptNpHB0dJWGbPWsCOJf/TSf5Ml2KbrLe8xmJzWYzWq1WpZDIfCzzcthn\nk8kkDzW2HLN2EfHsMxAwPydiu99/+umnaLVa8fnz52i1WpUO9XDdSH+RrWAtQKE3m20bD+tT0CGI\n8MzNdDqt8EKhUzCnoMW8g9OMpPJB6vis2WwmcmS9zthNGuf9IiLT5mUxmmXYSBM2k7Y3/DS9hLnh\nvpZR9pFtb3m9iiPV7/efEaVZeKcIUKJAm6WR9uW0idsfRFTLLF9yiMoc8ktkcH9mx8RpL94ThWGO\ngR09vyvfsxPonK/5QEDI3li8e5m69P2YV48DJ2N/fz8VuVMzODMoD0iu5qGhaEziJu8MsdYVOHYG\nXjJezKVz6lwoRhO8cXR5l9LJgqjLmpSlrqS3mNeI3WZ7fHxMhcQ68V4mW9o5wXFeLneHantD2zjb\n8FnR8878HcrkpTlBLsw3tJwzJ/x0t/iDg4NMq6GIucy34DwvrxOOIDwUPiPlzVph2AgAlstdXyvP\nAyX3h4eHcX5+nob98vIyvn79mkqYd2KdPEfmhjHvpSNpIm8p915j9h96xEax5P255Hy1WmUBw4cP\nH/LZm80muYb1ej1OT08zvQkvg7PSHHyw3pzjt1qtMiVIwDkajZ5xNR2gMXe8M2kpAoYff/wxn3Nx\ncRF7e3vJ+cEoRUSutx35cn5Jozw9PcXp6WklBch8Ybw9hxhSjJu5o+w/HF86dLOncdapqkOmcGbK\ntBjrhMNYr+8O3iZdt7+/n0UB5f5GT9mQ8z4Onpm3xWIR0+k0gyh/pySMs2a8C845vEG/J7YR58z0\nFOYNh8N6n98zH6bJIHukrk3mHo/HmX6MiMq5tegTU0NstyJ2nf/dl86yiW607ceeAU7wPeSB/VLa\nNQ6C5vsELcynA3cHnoAG3NfFBHAgv3W9iiMFv8QbnFLE2WyWSrdsjbBcLp8hHSh3lByGkM8c1Vg5\nIvA2qiYBskAIlhcfgw6HyGgQThjfc57fqJcFGI4P72CUy/wunmXBZxw2xtwfJwpCp4UfIiLfh2vD\nWhjpqdfraRS9EZbLZSoK3gfjSgk2Df6IDNgwRlFwEsh7LxaLigNmlMToiT9DNphvVw/S98ib2orL\nhwTjzOF4MBc8zwrGjhuInpVNyZGDn4ACRR7gTiBn7iVEWwSTwK0I7EDaocI4sf7srYht/5p2u51j\n8D7EUUJe4YNFRFZM4jAZPSjXZr1ep5Nxf3+fDQRZXwyBW4CUvb84+mIymeSxK1w446ztZrOpkFVd\nQUQ06R5TyD5G0bJBNGpdwT0ho242mzg+Ps53wvnCKWy1WhX+FEYCUrEDSNaQ9i8YxHK9MfCeAxqY\nGsXEEJgTyQU/ptlsxvn5eczn8/j5558jIrInHLLkgojT09PKWYA2XlzD4TCdhaenpzg/P8/7Gm3o\n9/s5pwQrIKB2KkAwyh5cyAqGcTweV46IabVa0el0cj+iX5lvI9F2bB4fH2MymWThio2py+XLAiRQ\nVs5/5F6MD7lkndFtIGd2Rrkn2Rre1foLvtx6vY4vX77E1dVVdDqddDJBKuEq2Qnhwqk3Lwv5ALFG\njvx71n+1WiVaOR6PMyDAiWK+W61WxbHCnkRE6mz+xs4g+9D6xM43z8D58fE8OFLYcfsK2AuDD7xX\n6QCaU8n+/Nb1Ko4UG8PnDi2Xy1yYiK2nXSo3lGaZSkLh4927Uy+OgjdQxC464vtl2i9ih2SV8CJR\ny0spKf/OhpaF4/dWmCWa4FRdSTT2GHCqcBBLlAujDOpgpI1IkPG5d1Cn00nFUaYMy54pEPj4Hqka\n0CiUBukUjJbTRhhunBejNVyeozKdC9HRf/f09BSz2SydH883/2bdDdOXFWwPDw+pFIhkUTTInNea\nEn/elfsapmbjGz0ySdNKA0Iz8LmhaN6JMVlOnMqlWaMPgnZUaqSHMfV6vTg+Pq6k39mDdqK81/jJ\n+to5pfM5KW5XUD49PeV+9/52ZGiSqsfeaDSyms3IImO0EbOSZq5x7P09HEIqPm34WDMcKt4VJ+T4\n+DgPkWbv48RiOIjgGb8rOtFHjO/x8TH3khFnZMmHRrsTc4mMcpHq5Do7O4tff/01IiKur6+j2WzG\naDTKTvNl2icisk+c5Y13gjjO3zE31qOcWcczifR5X1fzGiVx81/k+x/+4R/iy5cvMZ1Oc/yz2Szl\niOIP3p1AG7mxjttsNnkfnDHvfewQOtepYhwk0CH2DM6ou4+zZgSi6FqjK65QLnWxEfzLy8u4vb2N\nTqcT//iP/xgRW6f37u4uKy+dwbGzQoViGXwR0BN8+8Jhsm6bTqeZ9nexBPLd7XYzVYjjHLE7g9J0\nGKfvmBPmyfckwMMOs/bOWrGmzkyVtAWvPTbQBQl8xnO/db2aI+XNErHjCtgLR4iZ1Iid124DTzXL\n0dFRVjFE7BQf9/NCEa2+hEhF7Jyoer3aOJNFJ3KCE8JlR8e5WyNc/Nscl4hI6NwX0eFLjpc/Y+N5\nDAgNyFmZ52V8KG82OPNDVdfj42Pc3Nzks1Fu9PIwT8ZIhisqUJ7T6TQjfkeXKGbQKTcJRHGVKQxH\nk0YXGDu9glBELuPHEep0Ool2shb+Wz+PeSZlYNTCDpbljMtjMjISEdkHx+lkvk/FFvLksmvfG2eC\n9QX1JQ3A+0VsjSkGmujOfAAcJDeAjNhVQlr5sYZHR0f5rowFBMyBCdWc5nvgyIzH46zi4TOca4xs\nabjLd/Kc4FywtsgL1Wmnp6eZUuTIEpwdI7KmGDC20omwjKxW26o3o54475PJpIIcgsItl9u2A+X+\nZ1zoqjIY4//tfDutz2XklPeBk+Z+Zg8PD4lQ2ZiQlqWS8PHxscLfwSEwWoMscuAs8miHEAe/5LlE\n7OQdvXVwcFBJF8PZIhC7urrKzwgU+dzOGvdG97uFCVVim8321AY3ypzP5xUeJPdEN79UBet0J++P\nPD09PcX19XU2Ae12u4niYuOGw2HyK603bONwtli3m5ub5Dk5A8G7EpxYr0ZEBpS1Wi3Tr7bTzWYz\nUWKvE20aCAqcdvceKoNPbKhtiCsTcfSwX85QsW/YHw5oWXPGYn0CKu6/s1yw1wxKOEgmqCqvV3Gk\n2IQWRoQDEmmpGMo2ByguTih3p2WXXfO3GDffs3RMXOrJ37CgRkEQRj7333sBrMxwEF4qZydyXi6X\nKYR8RoqKVCeRdsSuf5QNq6Nu7gP6YljVJHLIo2yQ0og4ZcIYfLSE58bIAj2AIrZw/sePH19M3WL0\ncEyIbCOqKSPmzQ4U788clGRc1qtsqVCm9vw9b1Dn7RuNRoWYW6ZoWJunp6d06rkvcDNRIo5KRCTS\nan4RRoj3wTA5LYlcUTTg93FLhdlsFrPZLBEp0CBImXAXmJtWqxW9Xi96vV4FAUMuFotFpX8N74G8\nMUfwgHDkut1uclYs+/v7+3k0zXg8TjmEQGtyr1NK7GnLCM4pv38J5YXgihEy34V7gGSyPhGRARtr\nRCqbiz0FksTcgCQul8sYDAa5fyKqPZVAyMq+aaXTFvG8hYoRXkfZyL+dqna7nWmsxWKRzsL9/X1c\nX19X+EBGCGazWfK5cEDQ36PR6FmvMuTs8vIyzs7OMt1p7hUBD7zEktOzXq8zZbi/v5/zxhwMBoNn\naMZoNEo6iA2p18IIr8ePM4hMIP+DwSB6vV7ynXD+uCfPQAcwPsYLFxBHLWKbCr29vY1arRYnJyfR\n6/XyXY6OjnJPuK0Ka02gzx7FyY2IdMj5e/c0sywQiFgPky6mNUAJWIAgO7CHqmCagBFXUp7ePzwP\nO+LsAt/D7uLEsLcJ4nge9obvkUWgeawDKZxI9jfvUrb7wcn0nL2Uzk5Z/OYnb9fb9Xa9XW/X2/V2\nvV1v1/96vQoiFbFDb1zmTxoOUjQRBtE1HiIRc8Suyyu/A+aPqDZfK/Oe5hQAIePV4qkTdZdduM0d\ncCrP0KMrGiKqh6Jyf5+CTarPUTZjBloEPnbFmzkEnk/fn6hof38/yd9EMUDOpF24iHhOT08rCOD9\n/X3c3t7msRdOYQFBN5vNuLi4yFw68wB35P7+Pm5ubrLihkoi1sh8FhAJ5KNM1RC1MWaTjb22/D+y\nx5zzfe65Xq8T+SSSK/lZJkA6SgE52N/fz7QiqBPRD5ENURzf46wx5tpcHJNejbKAtPG5D1IFxr67\nu4vb29uYTqdxe3ub8rDZbOLu7i5arVYlgl6v1/Hhw4fkh7iBIBGlU8tccEQYt2WRKrFut5vRIvdg\nfagUBJmM2O7tyWSSyJHTRUSJruRxeq8krnodifo3m012OvbeBSWjczhrSLoDBNQoJ6gDJH7uxXwj\nw6ytkTMIwRzfwkW1WSlnXOZt+iqRca8VSDx7ZjweJ3K0Xq+zkzbNfE0S5mw7UNyIXWX1ZDLJcdJC\nwBwxUqkUGjgbYESURqDMN6gnc2YE1BwZ5I55I4UDr4r5RieBZJti8O7du9xTf/vb3+Ly8jLRSLiC\nZA8Wi0UlXcj68LlTnEZWPHdwhU0hccYApPrg4KCS4kdekUPub2oAe4GWBdbtvJtToMwbmQlQS5/d\niv1hXaGC9Pv9Co2A9DjyBk8MvWAkz4R4tyKhjQiFCS4icxEUGR5XjjOf6GrTWdBp2HCnGXlfUCn2\nCLLnOSyvV3GkDNFZ8WP8MTpOvwC5NpvNhFkjtpArsCGGpCSN45zYyUKYTGg1L8ZKyqkmFh5j7Od4\nAUkZlL0nSKeZX2MHjs3B2MsqpJIcyLsDR5ojw7sgJFQMcT8UP0oPnsh8Pk/eDrC7HUNg6OFwWHFA\nIXhzoOfp6WnOM1D/4eFhwtAUFxjqhwhqZwkeh40Pn8HD4vsoPhtzeGKG39nQ/I1TQFQXomic+qU9\ngavbWEOgYZTYarU7nNaON8qV78Kp4H7O+TMe5MJwe8TufKgylcU7oPycMiH9sFwuK5VOEbuzJCm/\nNu+F1B6p2Ol0+ozX1Wq10jHFQMORcTDBuLjXbDaL//mf/4lPnz5VeB3mL5YOiA2LeYDMCzIC6Z65\n6Xa7eU9aQJiTR0Uba2UHHNIzhtXrw96m+7kvnGd+z/dIAeOAOD1JdaCDv5JD5fSd+Zhlaq50fn/8\n8cf45Zdf4pdffokffvghIiLXDb3h3kSs93g8juFwmPJrWkOZIqWCDQ7i9fV1DIfDuLq6yvf5/Plz\nrFarXIf7+/uURThDh4eHcXl5WTla6OnpKf/daDRiMBhU9v5oNIp6fXtOG5w1xsH7LJfL6Ha7cXZ2\nlu9Zq9Xi+vo6Wq1W9n6KiPjrX/8am80mixFM4KZikDS6U0a1Wi33m6t2mTMcdrh35nHyfdof8C4E\nSRRbsN7YRJO4S04xuo3fHR4e5vdwyPr9fnQ6nQp3ydXMvLsLoFjDm5ubrNhmHPSXYqx0qOcYKZxS\n+oIhPw5WsFURO1vIO9jusW/MozU/DOcQveiebcgd+5R7WuaojCyvVzsipiSZmaMB2dXcG7xqNjCL\n3+12K1VEVrClsxRRRYbKxbJiRoFjcEy6NEnPRh/BZSGN2KDkMepsAr4HImXDx8W7sCnsIBKtosh4\nFyrWut1uEqrhRUTszvADzfM5XggzKJ+JfnB9Wq1WnJ2dJZoQEelYrdfrPO/KjgbGdTabJaLFZzg6\nVFawEak+eXp6ik6nE/1+Pw0NCh9jbyN0eHiY0bUbF/pdeK433tPTU9ze3maFGvwd5MgOrWUHJ5Uj\nJFD0boAKFw1FZySBfzebzcoZdjaSbsFh2cCg2VlAMUBwNZJ5d3eXCALfN8o1Go3i4uIi59XGm3nm\nM8Ywn8+j1+tVOCYl/xFZtVNLddvt7W1cXl7G999/n2sI4ReD7v1kbhTOsM9pA91lbzjwMRfJpd7c\nl7/B4XXgB9dlMBhUIv29vb0MRHDwuRgz/DFkj+cgZ0Tkrk7ECS0vO03+6eulz9Af3W43fvjhh/j6\n9WsF5YO4DErgyjT2Mghjp9NJPdzv95Pv1ev1KoRy3v/y8jKGw2H8/PPPFeeFvwP9M1KPk7xcLvNM\nxojI/m8cIG10EINIQOq+aIvFIvl833//fXz+/Dnfj+qzZrMZHz9+rHA8r6+vM0C28xKxa0PR7XZj\nOp1W+EpUTeOAuZgCfW3+qxFAG3DOlIuo9h5D/l0JisOGk27Ujf5ZzJ/RQPYUPM9Go1FBSGnLUga0\nkOmR61arVRm/Hd3Dw8NEhLl3q9WKH3/8MatHI7boIIEo47NzhF7CmfJa2HcwKMHvcbDN/eUe6BLr\nKObeRWXl9SqOFB4/qYKIXZrGREuUBZAnyAiebURktZ4hfSYOMi33QjlGVNM73vS8Cx47gmM4FAOO\n0eciSjHCZGXGIpYwPc83cdROHY6Vn8U98chx4Bx5Imh2AlEoJktDHvQGKTe7WwKYqOd3Xa+3J7dz\nOOT+frWBHmOgaswRTbPZzLSIoVrSCybSlmgdisXOCQoTAqMhbJNDnX5jDKB5QOtuD+BGfXYwOPvL\nzrdLdjebTbb7oEqtTF8C+1vxMzZkHIjcc0GUWcob84Fzhwz3er04Pz/PoMCEzL29vWzCiWww9xSI\nEFg0m7tDVnEuSYc2Go1ce6M9/OQ9HQT84Q9/iH/6p39KeUKZsUalM4mzZqTWjhSyj8zxmZ10fucW\nBzwLeXUUjtNN9Rb6CyfPzifzTVoCh7Hb7eac4KyQYrdcszYgp95rrDFz4ZSJr9Lh4vr8+XP88Y9/\njEajEb/88ktEbJ26i4uLdOZB33kG+xAdQJuMiIiTk5NKisnoCo56u92OXq8X33//fTZJRO4JLF1B\ne3l5mfNNmgd5I3im99Z8Ps/xk1qnrUZEpOOGLJycnMR3330XjUYjq91++umnqNfrcXJyksEn3+eM\nSWTVpHfQu/39/TwzEaf6+vo65vN59vsiwEaeQP673W4iwKwXc469scNJ2pM0FX/Dd5F9uq1bt9Me\ngiABpMdpx8vLyyx0QhbRX4yVtUAnIefD4bBS5X1xcRGnp6dxfHxcQeS4SM0eHx9XwA90OM6d28qw\n50tainVMxA6t5nc8m7Qun+Gs4jQZxTMq9q3rVRypn376KQ/AZHImk0luGLxKVwyB5BwcHGSVXsSu\nEWLEzrDYKPgqnRUz+3EmuBDkiGr1HdEIC2whBeXiWf4en6HwXYEEYkZkAlQasSub9/f5HgqNyx2q\n2bR2QBy1wB0gHWeB63a7cXx8nM6uOQDAtFTzMVesBc4xBtV5ZuYYZIbnkbZCeTtFAGepXq9nbxsj\nK0QkODAuVQelLNM9Ebv0FhGiEQSMqnkGXkNXCNmpw5gy7+4UsmdeAAAgAElEQVSpwrrhDODEWN6I\nwGq1Xcdslzrj+JuP4UojpxvgMhilNWxObx93eGaOQOGM6DAGUvAEF9zz7OwsOQ9GYJgjy5dTVMjJ\nb37zmyynt8PrikxHmlRa4kSU6AdoIYaGCJzx48DwXSNk7Dnv/4hICgF71cYNjh9BQKvVyrUg2KEx\npR0+UHlKwc2vKZ0j6zPzRMrP+NyyZYcKR/ff/u3f4vz8PP70pz9FRMTf/va3jMxPTk6iXq/nfgI5\nw7EldW+uar1ej8lkErPZLFNyjIMg+P3793F2dpb8Ghxz5s6ouRFy5h3ZgL+GY2I9jL5Arh2Ut1qt\ndPqoRByNRilTpGVJGzK/2BuqzLx3I3aVsDihXBzBwjtafzebzTg7O4uLi4vodDqV+xoNLZ0DgkYo\nEdy3lAF0nzuhl8BBibjCqYLX5s7upMDJZphislwu4+bmJn788ce4vr5OO/Tx48c4Pj6Oer2eus1Z\nGgc8FxcX+Tz6eZlS47lBxzBGxg4tASTL9JpyT5hSApIIV5U0PfNOFuZ3v/tdvHS9Wh8pBAoBRdkA\nRcIZiKiSxolQmACUK0rdyA0RFBNuBMEGGLTIaRiUHRGqF8opBtIL3BPDY15FRKSBsZPgz8qGlb7Y\nnBgxnodBAHKGm8R3UMxwGxiP35Vn9Xq9inMKFPvw8FCB1P1Ow+GwQthzygtugCMs5oBxGBUiAjFE\nz2c4OiBzbg2AcmWjlRvPyrj8f55rpQj6CYfLaT8rDjapnXjWFwSl5AXZ8TRpnVQehvGlxoOklcqj\nZTilHmXk1IdRjLLHD+sVEc8CFu4LAsU92UMYKUjZzBv/Zu4dtPR6vUSEnUrEwdjb20s5A62A81ii\nunwPeUPpGVUFLSOV7NQAwYAdDL43Go3i6Ogoer1e1Gq1CveGdzGHzbxKnndycpLcj4hdihxODo5I\nxM7xQxat7EEjmLPyYr1BO/kuMmjUzwEXOoriHMZ3fHwcP/zwQyVAtL5g7kGWnOrAOWu32xmYcoHU\nWuczfpOzkVXz22iJcXR0FJ1OJwMzsg04bEaVSXsh86vVqtIb6/Pnz/Hw8BCXl5fRarUSOcXxs5Na\noiAgzZYBvkOvNgfQIDgEJp5TAk/k3I45xhv5cUoNZJo97gIQv6s5nqwdzzcSbfQbcGEwGFQI3rPZ\nLKbTaTYX5V4RkfQP+GHtdju72r979y7pGI+PjzEajZ6BDQ5Y0VEuggENs1OP3seOOZXnFKP5ik7r\n4lw70+LCHSNnZVbkpeut/cHb9Xa9XW/X2/V2vV1v1995vVr7A/LXPiOnTKW57BhSecSu8iBiV2WE\nZ+roixwqHnvptRslMcGZz0AJTAwnbQWSYfiXdykrhhgvkVeZ1omIZxEE98DrJgppNpsJp4Nq4D0b\nkYrYdrgdj8eVqMrnBc1mszg4OIjz8/M4Pj7O+RmNRnF7e5tpv6urqxwHJeBPT09xfHxcicoobWZO\nzEkDigd9Ia3EBSLJd3keZEo4KU4ZQfY36lKmec1ZKc8+g9zrFAroz3K5jPF4nKlkv2ez2cxKHSOP\npC+BsEEakVneGfny2XfwPfgPdIG0K3C+OXbA2jwbtAR5Iu9vLhrvCnRffg+5JkJzIzyiMlCr9Xqd\nc8NZeCcnJ5n2dfqAdyp5hbVaLStHN5tNBf11I8Fyv4Aqcq8yLdZoNLJQAaTTvAkjJlTY8r1Go5G6\nibQE8xaxa1wIf433odksRFmjZ666fHp6ynmjaenBwUGlOMLvZX6m0Tn0RKlTQK2tm5za43c//fRT\n3N7eVhoY+2Bi0IWI3eHV3BfU2e8DQtRobFs5ONUGmowcG6k1b9IUC/ZQt9vNfW/dyl5jTfiMOaYU\nv9lsZuPUyWSSxHGQY6O/7AkoI6wheh1CvOeWfUl7D37H2Lm3ic58ZpsF6o48L5e7o5WwR8goVdDo\nNmcq4KchS65II3VHwYCzAOg2yPHmItdq22bLVEN67Z+enmIwGKRtwlZFbNPF7969yxMEsCnsGeTO\nFYesL3YNuos75XPGK60xnE2JiCyEGI1GqSMajUbKMPdE9imS4HP7CmU7k5euV3GknNYxHO20gSFE\nJh4yMpsuYqekKB+1AuM5CJoNNFCzKwAQejYKn1nxY8yBVc1TYCyukDLXCRjbpFnehffEULp3FmlH\njL85G+ZWmQD59PQU4/E4SdGcgYfgkEOGq4QTELGtvpvP53F+fh79fj8FkrExJ+fn53F6eprjphIP\nhe9UKmNibsxboNKPdJuNScSuvBiombnlfSE/WnlHRCpmO2X+CX/AFTERO/gbUqmNM4YrYpePj9ga\nWXqekNLs9Xr5txhSOA3wMCIibm9vM4WEQvUY4SqYtxMRWQlnA2p+IAaFTuSsPRwtZM18AO5FqiIi\nKsrUMmtyLIaVdOfNzU32rfr06VOlKhM+G3NIOshEb+Zlvd6e3k7lqGXGnEg4HIbq4UAwPubG6wxH\nh3lttVpZcUl6yOnC0pnlYo0wXE4FjEajuL+/j4uLi9QbdhA5YHc4HMbd3V38/ve/zznmvexc8NNp\nX1+l4+cUph2AiIhff/21shakmajEYn3h0tGhfLVapT7ivlQK2wGJiOSp8ZnPW8Owsycs41Qzsn4O\nrr3PXIwQEan3cMwbjUaSzd+9e5fONfLhAMs8QvY7Y8Bg8yz3WGK/8X52os0psj7abDZJQuff5hE2\nm81nnEDWyUUxpMC8F1ln0ocGF/hbFzogJzinFH69e/cuIrb74vz8PHmfOFURW71/fX2dsr2/v5+6\n7eHhIb58+RKfP3+O3/72t5WWGlQ3kvqM2AUWyB56db1ep32bTqdpo2q1WuVoNVLAFHfM5/PcH4Ax\njLOsji/9Bj4jBfpSej2/+81P/g8v8pauFrLQIAAm7bnizYtvZ8ZRe8QuakOoysi7JJv6+Y7WbUB4\nX75vwhoT7aM8jEiZO+LokucQJfmAWQiuEdXjSzwvEZF9TJgXzoUyKdh8H8aHMbGRQqFdXV3luxkh\nwUHBkHFPIiScO3MFUGxU5pTVfigj5huFxLuxdo+Pj1kRgwNNPh9SKhdzyvoSXeMIs0a0V+Az+A/N\nZjMmk0muhY8cQLFBMkU5mC8GGZ41RkGVJG6M983NTUZJdsBQligpomsCA+8HG0nmjEDCzhKRMEEL\nShqSPPJghIggBXmwIqLvDITddrudlTu3t7dZHYRM2JB6PxiNQ1lTseX9hDOCM4Excdm1uUIO3vg+\nssG8sm4YgYuLi3j//n06oKAjROwPDw/Z4BY940OEicprtVpWJF1eXmY0HLFrDsraXV1dxcXFRc4N\na8feL7lOPq7HqCqf2/HyxTEnrJsvdK3fjYOM2+12GtcSAaXJoVFLyyRkZZotRuzadCBXnU6n0rCS\nY4WWy2UliGJPE5QSaPC8h4eHSpDI3v/06VNcXV3Fjz/+mH9vJ4PgaTKZVCpPHUwQNDDfkJRxvspC\nKS72hwPvo6OjdBr29/fzPV19yl5zxsSOEtkdZB99gHMLZ4h3KHsnOTihgq7Z3DY/paLz9PQ0UZ5m\ns5nBTcS2Jxm2ECAE2UHP0yeuXF/QSc4UZD9RHW1nz/J0dnZWCdC40D3YDXNRsYMEciXflmpcOGLI\nBUjd/8aRehVHqtfrZfUVAoeRR6ggUEZslTT9Rdg4DAohgnBtEqArrEqCNwtvhWxSLUqI71uwXWXm\nijZHT3yf55WOG85ExI5UawKuy/95BgbIZGM2L4icET7DxlbsEZGQKIRODBIXzsavv/76DMlbLpeZ\nhjAigrMUEdmjyO0tVqtd1aBJpURyGFEjUihLkLjJZJLPoFM6UW7Z/NTpY1JmXKQFSf+xvm6QiLLE\nsELCxvnwe/L3pBIwLEYzkGGIklyuvnt4eIjb29scI4qBtfeJ9LRJsJxZ3phn5p8xvkRCxjljD+Hc\n4FRzT5dwu5JmPp9n5Vmj0YiLi4uMIC8vLxPZfHh4SKPJO7CnqMJCLjqdTnz58iVubm4SBfHl/cS7\nIgPj8Tg7R5OO4sKhx8AYWSEAGAwG8fHjx6jX62l4Op1OBitfv37NMnq+NxgMot/v53p6bgkMv379\nGk9PT/Hb3/421wXjM5/PkyjNviC1amPhtWPOynSpf/rid3/5y1/i69evSX6PqBLmkTsCBc6WI5r3\ngbPIHc7GbDaL8XiccorRvbm5Sb3utbSDeHJykg7/aDRK4i/Ov59HyfzR0VEiZr5nxO4sv48fP0bE\nroUHAbD1ED33aFECshWx1QsnJyd5Xp4D7H6/nwcFWzZ5D/QsQSQXusAUBC47otgGf45MzufzyskD\nEVEx+OhPNwYm1erKNr7X7XZzb3BuasRW3i4vL7MIxRmMDx8+5Fhubm7i8fEx3r9/HxFbBJB7zWaz\nDCQiotLDirV030Gcrtvb23S6IrbOsPUgKDj/z1yCaPJ3puo4pcvzsPvoBFePImfful7FkcLbNC/J\nPTrgrhius1Iw94R7kB4gguOeEbs2ARZENpOREITf1QFGXriXK8Kc0+edHHWXXi2esPPCKHtSNVQc\nROyiOJQNqE/E7qBcKhfN2Tk+Po5ut5tOG/NtYzqdTmM6nVacFuYXJKJUUEDFdujYUMw/EZs3v6tz\n+En0hQNK0zrGG7FzMuGL0MuEd0Ehong830bpnKagfQYOE8aP77m7up1T+EY0uDMCRv8bokKUjbl1\npJ7NiWCN6cCMgkSWcFxecvpIkRDRW7nxfBSt03esNUqjjLZA20i7oKhRJn4PDJtThKSMkW+qfVwp\nZCcatKHZbMZ0Ok14/+LiImazWXY+dnNUno/zBKrG5yAoyCHOIWPHmWU9uZjHz58/Z4QKKkHAAc/D\nUTl8OuTf1ZU2lvf39xUuI++F03RxcZF73zSFMpXotSJ1478xEv/SdXZ2Fn/+858rUTkBKXwXc0je\nv3+fmYThcBjHx8eVSlgHHvV6vVLVh7NM1oCu8BE7ngxzY4dsMBgk3469ZNSNYA39D1JNzydoIEbW\n/uM//iP+9re/JQJqFAhH6eDgIA9wdpNVgiiCVqNjFxcXGWAh74wd2ggUBKPmpMOQSXQC6+dWMdgg\nAg8jfebVEliwFiUPiipAjgdyS4mrq6u0ieaj/vrrr3F5eZn7xg4KegLbhTOGTLnNA9V/rP1sNsvK\nSYKSiN1egydFo9SIHXcQvW9aDgEujp3Ts656pNLTHEdQbeaQMczn8+SGfet6tdQeL1miR6AWODFc\nNCpE8ThfjIOCw2Pjzabi9ybkAY1yH6MpNr5lZIeQowAdXUfsjpkw9I0D4ny/eQ/mcTkqZ554ptMp\ntdquLHM+n2fkHhHxu9/9Ls7OzlLZ3d3dxWKxSMV4cnKSKUQrOq8Jl9N+KFenVu30mBtg5Q5s71Qk\nyoYGccDMTtE0m9vu68zL6elpKj7Wlw1vrgibCyccQxWxc0AgkptbZIgaJ9KRtZ0OOzVE509PTxkt\nYVAiIlMUoI0YgIgd6RRehOfNJcOgB3aWcbQYn5EJ7k1EbzSWZ5lLxboh1/BIUHDwFnHI4b2wTqQv\nacaKHHW73XRqHfn5e+xheDYRW+T64uIifvnllxiPx9mhPWLXTBNHj+eSTnRKlvVzKh1FaeSZ8eME\njMfjXD/WolarpfL/8OFDBf2Bs8i+9BqQ3jg4OIjvvvuuYoRYb+TGTq05ayXnECQLZMpGmJ9GHfle\nrVaLDx8+xHfffRd/+tOfcl+s1+uM9klPMY5+v5+BAHqMwhLmFK4L+sIBH0R25NdkbNoGNBqNisGC\nF8dVNlBdr9dxdXUV4/E4ut1uXF1d5d9++vSpksr/7//+74jYpqHa7XY6TJ5XN1TudrvRbrcrfZSQ\nU5xijK6dwdlsVkH+4Sbyvm4Zgt5uNBo5d+xD7JMLNRzoghzbufIeNg/R6S36ZsGpfXx8TJtxd3eX\nAUir1UqHGlm6uLhIp84ZjM1mk73a3r17lzy0iIirq6tETWu1WnKbkKkPHz6kvqGfGrJIsQPAirvT\nw88iu8VagIb2er3UucwLndVxgh14LJfL1LHT6TQ2m02ll1/JTy2vt/YHb9fb9Xa9XW/X2/V2vV1/\n5/VqVXv9fv8ZikAUQ8qh5OxERPKjiLJo3mkkxCiBkShHZvw/qIJTGE5dAJmaiM47A3OaswT5GmjU\nsKKhSRNn+W6ZiuAivQWPjPE9PDxEr9eLwWAQs9ms0pQPGJr0C+gNUQrdyyFmkl5hfUjBEXkxX6CJ\nvIcjOqBpyNucvxWxRU+ISIG/mQ84KXt7e9nh3pAr9wbeJzL59ddfk/sEWmLSN9wJkEevuSMyCP4R\nWySj3W5nBF6v17NBJJC40w1EQrwTCBc8EsYBYkWq1VVdoJ6gjfBPIrYVlHAZIHkyb05xR+w4UxE7\nzhPRoFMBEbt0KpEu72IEjlQLc+Mzp0B/TbilupKUmDkIFD+4fQZjIAWM7MJNAP3sdrt5aC3R83w+\nTzlbrVYxHo/j69ev8eXLl4iIbMLpZo+G9tm3oGtGkyO2lUgcqsp+A9kGGQWZYQ1Bl7g33yNVwnuU\nCCgoFrxEX6y7OYq+zDcyqlYWiXA5ffTp06f4z//8z/j69WtEbLtQU5FFusa6lEIH5PPk5CSfyfhd\nEcp8s+YgLFSbITfozX6/n5WvjAPSb71ez/eMqHY2J51DupRnn56eRr/fj9FolCm63/zmN3lP9Dry\nFrFNRXGI88nJSaKxyB060aijK/u8xyMikRGqip0VsN0A6XXRCLaNLt9lk16eYbsVsTuuistI12g0\nSltxfHxcscHwzricMmOcvJf5kegEF22QDm+328kZfHh4yGdHRLbccUW4Dzv+y1/+Eo+Pj/Hu3bsK\npxQ+LHrbPDeeR8aHTA4XSDq2hjHQtoH9ad4pTV+/lV6PeCVHCoIwXKKIHcHaUDSTymIDZXpiSF2g\nIJz6QqHhSNmZgluBUTRUyb9R5O4zQ2oRCNgOj50gv1fEjgQXseNQmcM1HA7j9vY2jo6O4uzsrMIF\ngW+B02LljXJ+9+5dDAaDLDnnMFIcxF6vVykzpSU+aUYrPgyQjygoOzDbGSyrGZrNZh4wbOPNvLqU\nmDXkflSN2JFy6sVVJ9fX16nQmRN3jPYGY+28TuamADePx+NUDDiNjI9UAwaYiilklZw+z3AKezKZ\nxGQyiffv3+ehruaXMHbS2nx2cHAQJycnCW2zlsiwKzLPzs4yJYqRwIA7XYwjAXfKF9/ZbDaZjsAI\n8Ty3DWCPNhqNVHDIuVMGw+Ew96DT2jb+vCvfpwUBzvloNKpUxV1fX2e65/LyMn744YeUDVIbTj07\nnWR+n7ll5h26fxjPpOoKjh1zM51On6XeGCOyzTjtNBLsNBqNbDOCc84zPAYrc5ws7lOm8JiH8t+M\nt9lsxsnJSVZmXVxcRLvdjtvb27i9va0Y2f39/ZjNZsmFwfFjXKTfn56eKkUVvCdkdSgRDkx9TFOp\nL1xp/C//8i8Vh+bo6Cg+fvwY4/E4uWLIHSn/6+vr+K//+q98HgUDi8UieaSlc0p/osVikfrUtgqe\nJrIPb5WO3m7Pg2Pp9J5TsI1Go1K0ZMcV3cT9kQvmCeerPOYJ2UN/m1B+fHwc5+fnqVOto8zDxB6a\n7oKsQlnwHnIHfMtNr9fLoB7Agz36888/Vzh64/E43+XLly9ZWdlut+PDhw/JdQOU4ExDgi3mhrQ0\nh7M7ONpsNnlW4V//+teU/cFgkNV+OPTsLfMUqagtr1dxpMjvll4tXAKicxaDc6vgdNhbRGiILB3R\nIfw8y0LsEmYiDCMroAYvlT26Ms4KjHfDWDi/D1JhQ/GSMn16eqqcgk0+nzy0S5U5uBKSMgowIioV\nSzhiJo1jmMy9MdHO5FAUE99jva6urrKiISIy0gS5MRF/MpmkU8icMd84RxDcjZwxTzQ75NkRu8rC\n+/v7bNpnQ4sCKvlaJlkjP3wPQvPt7W3c3d1lg8GIyL4lJiszBhwX/m3kBJn6/PlzNn0zQsSYuU9E\nVGQEZTkej/MMsIgtx6DValVIoW5iCzLGs8z3idg1nywDGivR5XKZUfnd3V1cX1+nvG42m6zOGY/H\niW5yXxdTLBaL+Otf/5rvyBgIjnDQzGlAXnD44Yt5fYfDYfzyyy9Z/l46y+7NZXlCJpALO5n7+/vR\n6/Vivd6Wj1uhPj09VQoL2A8gIxgiE1kJPlyk4mIZgod+v18x3qvVtscUjkvJkzLS5gAUgjLyXiId\nzHOj0Yjf//73cX19HRFb9BMngfdkPs3fw7Fzs1f+DWJhfXN7e5tFA3zuRoinp6fR7Xbj6Wnb+45x\nwFEimIIvE7Fz3AheTEbGOR4Oh3F9fR2r1So+f/6c+4tiBypljQ7u7+/HH/7wh6jX68mVYX0jdsU/\noOARO87O3t5eHrXFPiQQsWNqp9aBi4P59Xqdfcl6vV5mFXgXZBi75vWnsMOOmREbkPQShUXGCRQN\nWHDQNnbawQH6nMpTAsKIXQaDNg+Q1nkW3+cIM/Rwp9OJ7777Lm2BA2KyGlSOl4EQsjUajWK5XOY+\nRPYbjUbc3t5WirOwQWQyptNpIpVfvnxJbuO//uu/xkvXqzhSGGorN2A1elmQzop4XhHiRfa/rTAi\nqtEYRp1n4kRZsTh9BcRpUjn/Ngpl5wylhqE1ub1EVFarVTognU4nzs7O4unpKS4vL2M2m1VSMwg6\niBJCOhgMYrVaZentcDjMzQYi5qpDN8KL2HWqxng41eQ5Yw2Yb0iSpCzc0I4Ig/n0mWRuzIeBiNgR\nJSFrU5USsUuXsoGdLgWaRolMJpOMMChHxyh44xNV2yEBirYThcPgA0kpjUZZo6hWq1Wl4Ruywbz1\ner04ODjIChDkLGKrpLrdbnbh9Zwyf/f393F6ehrj8TgV0XA4jF6vF/1+P8meyAo/SX3a6WONTMJE\ngbl1B8gYYyQQuLm5STQLgu9kMomjo6NMp9jQNBqNdISbzWZGft5POJcuCliv19nAD3njXZ6enuLq\n6ip+/fXXNL6Qmnnm/v5+7sMyILLzaDSW7xE5+zMj2qPRKNFDLirPQFGcJiEQwhHBwUa2QQBBKCIi\nx2wEzeR+UMCbm5tKew/3PeNykQ0FHfRp+vd///eIiPjzn/8c9/f38f79+6xAdGrdzjl72A1+F4tF\nXFxcxN7eXlxdXVXSJpwIQWDrVjMYRQJXO0uQ6NHRrD+2gvHa8NnB/OMf/5jBW8SuSo6siPfFbDaL\ner0ep6enqedcsdrpdOL6+jr76xmNbbVacXt7G4+PjzEYDDL4QHbH43FsNtsGnD6hgzYZ0FV43nA4\nzDUsq2fZowTAzWYzyfPIGvOIw++ihVqtlojLarXKeSNd2Ov1YrPZVA6md7NN1sL91QhiIXsbBQLp\nwvHD7rFP6MdmSgt945A1n6WIXf348WMS/HmXZrOZ6b6IXU/EiJ3TTqFVt9vNsYOaEVw53U/BgB3O\n8noVR2o0GuVklGkTHBcizojIKoKyd0TEzilztRUbzCgEShBhxOiAjjl9BDLmlKCdOlAl7m+kh0ox\nIjpHCfA6rMS5Hh8f4/r6OkajUQwGg/jw4UOOgQZ2bH6X82L0cTKZT3coJvdMaoL7YvAwinxGrvzw\n8LAyT8w74yKv7Bw9qZiS74JTgbFjk3udWHfn//f392M0GqUid0oQ5OvhYXuwMohAxFb4z8/P8+gN\nK2FgZqMXLlf2cUSkynhexBaVc1TI83C6eE/Sg4yfZqblkSVUQ/EujgQZEzC+2wpQsQTChpFkjPRK\nYZ95/DjROOpOKcBls6xHbB2u9+/fZ7PW4XCYn83n80z9oNjMa6CSjxSIy9hJT/L/Tr+jwIbDYeVI\nKar8kBVX/vBdl6lTNcsaI+dl+tcVbziOlmF+T/NAo6TsQ9AVyxQO6GKxyCNKeA6OPfwbBxFHR0eJ\nhP5/7Z3bT2PZ0cWXaWMa2/h+AWygQd1z0+RlpEjzNA9R/uZIUf6GSEmUzGS6e7qbuzE2Ngcb8AU7\nD9avXMc9Tzx8LX3aS4pmJmBz9j5776pataq2NyaDwcA0Qt1uV/1+386Fp6cn5fN5NRoNc+wZsy+D\nZw/SY4keTugJaTnB2IfDoTG0s9lM5XLZfu6ZQh9YsjdoLMzvw7qhWUGz49cw84PzcXt7a/uRWwzG\n40WDXpxHSbEeQVw+jWNCdSB7yTughUIhlkb1aT/YmGRy0SMtiiIbM+mgYrFoQS3znEqlTCPFmcLY\nfcUx5yOfQz8E00c7EtYte4Bn8sFnIrFoAAsryhnP+/dOIMwkc9Pr9azSHUJDWuojsSFeO8j7A163\nR2YJFsy/X9bEdDqN2SLWMA4NAZQ/izc2Nqwz+tPTU+yKGGwCJAu6Sc4yxlWpVGLNrjc2NhRFkW5u\nbixwl6S9vb3PslKr+GKOFHlOf8UGBoiXhiMlLfqerN7qLcU95dXGljhZXpzOP3E8+JxPbXEg0pTO\npwYw+Gh9VoWqvHwOFBYbEdNkMjFD7e+J6vf7ur+/j/V/YnywB3jXUPGMhwjf66d4HqJrFqSfN3/I\nekeKUmUcH1/q64Wk5N854DFoLHif10+n06pUKhaBemeL78EZ8NojhMsYPH8Y4wASBUlLkSMCxX6/\nH2OUeC6cDy/gZHz5fN7WWSaTMUMaRZEd3slkUvl83lgHjAA9oXzKyn8WJxvjKS30bL4Tt3fcYXJo\nDUG5tLQwfHd3d7q4uLCDnO9kbBy2vmCAKBedQavVsvFXKhVlMpmYTocIkkN/Op2qXC5bBM+eoS8Z\nzgfr4P7+3kTorH3ffyuKIpt33/bk9vbWUl69Xu+znmf05vHOFfOdSqW0u7urXC5ne9TvYd4Nh60v\nRAB8xjsJXkbgtV44egcHB3Z3IoYtiiIlEglz6DudTswpIogcjUax9D3P9vj4GOsIzb749OmTOfTd\nbtfE2AQ5iURCh4eHtsZY+1yjkU6nY+lQuq/joNLzin2B0ebM8OkmdGX8fXpAScs+aQQ8RPjSIm3y\n4sULFYtFY128EwLTSfoHNpbg4Pb21hjVarVq84Yjvds4flYAABpXSURBVCojuLy8VKVSMQbEBwqM\nzeuEGD8sm9fq8jlK6nkXJycn+vjxo+3RSqUSS2Ph1LF/vH3yDBD7jRYFPoD2bDfnLnaBdw0DuLa2\nFkuZkW2AdfdOCM4cbCw6QXp/ITHAxjGm9fV1W+f5fN6ehQantLhJJBK2Lkhdsjc8G4m8Zjpd3DTg\n2ejxeKybmxtdX19bcMI53Gw2VS6XrR/dN998Y/vm9PRUNzc36na7xjwyPjRssMypVCqWpfAp/t9D\naH8QEBAQEBAQEPBMfBFGisgSz15apoyIeL2omiiBiNAzPXzOs0M+XUgEwb8TmZDaQH/gWS5y2TTm\n8+JQIlHYJ88e8D1EKz5iI/r1FKFPtSDApE2+pzHxzCmt9VE3bA7RrNcekGZBFLqaZ26323YPmk8p\nUa7qU5Bem4KQ1bM4kqxBny8/ZbykJom+faqCVImvuOK7vfgZ9o3IgL9HOs6zVfP5XFEUxS7s9XMP\nlcuaIKLhxnjP4nhNQyKRUKlUMrbOp6g8k0ZkzdqA7vaVekTXzGsURapUKrHOz2gWSKnQDkFaUOOU\naHe7XQ0GA0v7oVMj/env/4JV444wIjRpQWP7zxBtsmdYZ8Vi0VJCfr6ZcxgWaZkW4bn81Tlra2vW\nyRwdG2uYVDjVUIVCIaZr8lE7TLAvXoGlbTQaFmWz3tifpCt888LVFCP7Ah2FL/HnPdF4sNVq2dzC\nkm5ubhrbBotyfn4uaVmZxtodDodWfp/JZFQsFjUYDCwdwfhgXa6urmxvsZ9I+VarVV1dXcWqDz2r\nSVqeNcn4SI3V63U7hyaTiQqFgqV90MOwvmHuYWN99TSpTtJw6KukpS04PT3V5eWlVU9Jy6tAjo+P\nbd/5DtbpdFrffvutcrmc7u7uLIVDSh0dW6PRsGqr6XSqVqulXq+n3d1da8DIGGBIYBR9yj2dTqvT\n6RhzzPh6vZ5ub2+VTqfNDrG3T05OYvqaRqNhFyhvbGzEpABek3RxcaHxeKxisWjsmdfs+AKp8Xhs\nejJpUX3Z7/f19u1bTadT1Wo1NRoNSUu92uPjo7GDvvCB4hWfqeBzSAzQXPobQPh39LPsb9KjsLGk\n8JjTfD6vp6cnE3/zOa5SWltbU7/fj2lqT09PrZ0KFYOcw71ez4oLuIMVZml7e1vv3r0zFnK1dQxn\nP4wqa41Kep8hW0XC56P/r/CHP/xh7juKS/HOvb6qQVrePF0qleyaAg5pShlJAUBPSktdFNVgXliI\nocO5QTAnKdbrhJQXC4VUIY6d19r4sUAN+2oPxoTjxBxQiVUoFFQoFGJ6Hi9EXK2k8BUHOAYYCyp3\nvHaMPjg8D/obRKS+vHY+X5TPYpg5UL2zhR7M38pN1RYb1lfYcXijWfFCVRYwug1f0dfpdKwvjK+M\n89oSxsq88Tv0+2Fc/AwHGI0RhxsVHVT7+ENwfX1dg8HAel15fRhOFWlRUr9Q6rxn5hCaXJJpPxD6\nstalxdUc3nm+u7uLafJwmK+urnR+fm4GAz0S2qDVi1RxJEajkY6Pj23d7O3tqV6vm0aCdA7PjbOO\nQN47SzjfpDh8FRWaqcFgELsf7MWLF7aGWKf+3V9cXCiKIkvdMAacUu/o+3TLcDjU1dWVXr58qaOj\no5hg26exMZZ+TpELSPFu2uPx8hoJAhMOV+4lo/LHH9TVatXaXuBA4GSVSiWrTuNqDap2EeJj3Lmv\nkDn1lc/VatXu70NTRnBRrVZjmlKq1Xgf3rn3GstEImHPyTlJawPmwwcuqwVEjB8ZA2lFr6nhwu52\nu22OEHuxVqspkUiYpsj37aLgYWtrS+VyWY1Gw/7e4+Ojzs7O9P79e1UqFX399ddmvElNzedzC1p5\n11EU6e7uzopkstls7Cqf4XCof/zjH2q1WlZJzbyVy2VLaW9ubprGdTgc2rUrs9nMzkVpmYJFD+xT\nR/72hVwuFxObJ5NJczbb7bY2NjaUTqdj/dem06lVVfOM0kLE7SvLCYwl2XdSiOXtF1WO8/ncbNT7\n9+/tc999951+/vlnnZ+f69WrVyYHmEwmVrFYrVZjmkNsGmelT7NeXV2ZNhDNH2NAZ8zZtrW1ZWlP\nbFK9XtfNzY1++eUX2/dc+5RMJk1Ty3qChFkV1EsLG/LixQu122396U9/+vwCS30hRmptbc0Wva/4\nWl9fj/Ur8oyHL/30bAaHBRvcfweOEwI0aekI0HuD7/Flk2xuX0Hm8/ZeaLyqn2KBIub0wlm0PkQX\nfCdOXDqdNgbJN5dDI5FMJnV3d2cGmJyyv27Fl3Hzt7i80Y+D/49oCPEq33tzc6Pb21tzGLy+SFoc\navl83iJDPnd8fKwoisxArGpO+BuUOktLUS0OnXeqiXIxGNLygKbBo2986ZvjTaeLcuZUKmVVSowB\nbRxrw98Ej1OLRsUzefx//X5f8/nyMlA0DGhyJpOJNfXk/Xujg26En2HEiIo51HC40On4y3ARB6dS\nKTUajZgmDQ0U7CG6BklWyQhz5LUwtVrN5gxhOIfpcDhUNptVFEX6+PGjXr58GdOlsI8ox/fsEUYf\nFpDnxCHDEfHaCwpMBoOBGVPYK94xjrkXpjJvu7u71vx1Z2fHnBgcvnw+/5kDRrk9/02jWNYG0Tdl\n8J5Rf3p60qtXr5RKpXR1dRWr6GT+0ZH4829nZ8cYG9hHSeawZTIZbW1txYpzuMQ5n8/bO2Psm5ub\n+uMf/2hOt69ikpZnA/uR8U0mE9NEnp2dmZPC2KlalWSVxexJzksKMWhPwDhms5lVl/p1ivaRJrj+\nXEAjx88RiEuyd3t+fq7T01P9+uuvtk8JKJvNpumT/HnCBbgEYN5phhm+vLy0fSVJ+/v7kmRViVzS\nzXeyBrPZrAqFgmq1mj1Lo9GwSt9+vx/TjsFKYavY59ij+Xyu6+vrGFtDtfZgMFCn0zG2knfDfqvV\nauakMEaCx2KxqFwuF6sQz2Qyarfbury8tHXGvPG7vrCJYI++VNgwX0jVbrf1888/K5FIaH9/X6lU\nKuacszZWK1ZhqllfPhNBQOIzUfysXq+r2+3qw4cP5tijGbu/v9fXX39tLVXIPDHfZJl8wCgt+l35\nc/f38EUcKV6sd5a8ANRHCR7Q8CxWaekskbrxgjWMCYccDSH5GVVLMFK8RDxvnomDQVpS6kRglO1K\nMuMLwzSbzWKfI6UAo8VhQrNMIkSfaqI6hMh0MpnYRsRRQ8BNVAuurq40GAxULpftQPKVPbAgPL+f\nc++g0UZAWkZK9OfxY+RAvr6+Nq8e8JzMi4/6+S5SDr6ij8ibCAm2h7mRllU6ns0gDeK7gnu2CoaG\n6MynYPl9L+Zk3aZSKe3s7KhQKNidVdLSIYBBHA6Hlu6QluJvbj/P5/O2hjlcMHoIM/2cEmV6VgmB\nPv+sVCqWFoJRXF9f/6xhIVE3hg/hvCS7ccALSZlv3gtj9OwDQk0ON290cRx8xZhvnOqDFtLYkqwn\nTb1eV7lc1vHxsfWiqtfrxprgTOHA8s5SqZQxdDQ2lRbCcIKZ1QDOV1nCkHpDy7/Dhnr2m4o9qjc9\nfOd932NqlWH1UTKXa1NE441JsVg0doGzjLU4GAz04cMHOxP93/BVU2trazGGiRYG9GvyDBWGnBYO\n7C//7Gtra1ZtNhqNzEg9PDxY6s2LwKWFker1etrZ2dHu7m5M1sHav7i4ULvd1uHhYayaud/vx6oM\nWVPMCeyXvzOR85ssw/v37038vbW1pUajYa0OfLuYx8dH5XI57e3t2VlzcHAgadE24uLiQgcHB/Zc\nf/3rX+3db25uqlqtqlwuq1ar2edgx7BhVIIy9qenJ/373/9Wt9vVV199Zc7Z4+OjOVc4jY+Pj3rz\n5k1sbp6ennR5eRlLpZKi7Xa76nQ6+uqrr+w5j4+PLcCjnx7I5XJKp9M6Pz+3NeADpVarZa0/fIFP\np9MxZom0N2xVpVIxSQupQtjITCajfr9ve2U8HltWhH2Gffad67e2tpTNZu08ffPmjZ0BOLG8y3w+\nb5/DtsDCUpAgLc6LQqHwWR8+jy/iSElLw7BaTUeE6L0/r0WRZAyUpFhUwMHBYYdhJQLCuPjPScvI\n1lfu+FSA/11K2HnOVCplC2NjY0O9Xk9RFJlh8uwBDgRXl3iWLZVKxQ5aTzn6hURuXIrrQGARfOqO\nAwWD4Z0byvFvbm60v79vz8ff9NV62Ww21tuH3+PfMV48697enu7v79XtdmPMCvOcyWRMTyDFW/6T\n5vOUMiwfhhNnAmPqy/R9+wMOKFJ0bAR6LvE8/A3WAhEO0aq/uBSnpVAoxFI7OBZoJXDQvd5HkqXD\nRqORbWK0X8wpjirrTZKxlb4ykxQpTCjODHOKgUGz45me8Xis09NTiyz5WbfbVaVSsaaD3qmCydjc\n3NSrV69ipeI4it4I+opc5pFUnq9mJXIkKmW+ifRpnVAul+1S4tFopHq9bqkftFnMDbokjGkymYyl\np5PJZKwa1AcgrEVYUNY3jiLVXL7FA3uPYI10DOPHMPr+PqwpHEbmh/3ty+ZJA/n9S4n34+OjisWi\nORn9fl9nZ2exNKgPaljz3lGSZC1SZrOZKpWK5vO5rW+MDAEk/+N7PfOKDsr/rFgs2lUng8HAzsft\n7W2bS3SrrCmY0mw2qw8fPkiSVYNxsflsNrMGioyfliewO9PpVH//+99tHD/88ENMK8Waurq6UrFY\nVLFY1DfffGPMBXNKehVHAaaD88obaJha9FS3t7eW5oLl2t3dNe3PbLbo1M/az+fzxhaxdxnP/f29\nvQ+qoX/99Vf961//kiS9efNGe3t7lm73QeRgMLCbMzqdjtrtdkw2AAt8e3uru7s7c8B++eUXTSaL\nZrTdbteaFUvLimQyF94BOzw81Pb2tq6vr+1KMt/nrNlsmmNN5oWfkeYmJbxaOV6v11UqlfTbb79Z\nWvjdu3c6OjpSOp3WxcWFOp2OzTeMKHKN1bYzrP9EYtGTkO9EcuOD2FV8EY3Umzdv7I96r9azVJ4d\nyWazajabajabdhXKKiNFPtl/bnNzM0bT+V4eHGb09PG0OX8TI8LhIC2vOaGkFO9dikcY5IN9SwGM\nKekkL+QkDUa6yIvicQRJOWH0cbwwpOSbpeXhiWCYnileIEg39GKxqHK5bAc/hobfY76AT9PA4PA5\n5nI2m8Uas2HwPdPCd2PQ6NQNCyjJ8tnj8ViVSiUmusTB4iDzuhxvCH27BsbkmcHZbBZrrocjiYbA\ndzDmbzDnvj8LzifsTyaTsUODd8PcIZDnXfnrUNDZSDJDAgNA2llaOrw4qYPBwETMk8lEzWbTnHDK\nhaVFzn82m+nk5ESFQkHFYtHGkU6ndXR0ZEGIF2JDfTMOr4GDcUokEsZUcngzxxyI6+vrsdvacZIz\nmUyssaJPAxLt8g4vLy+Vz+dVKpVMlI5j7PcbLC3sjbRwGCjEYJ/jMGD4WNe+mSNGiBQsncj9+ycl\n6tPtOMY+bcm6YO1ykPvmqDC1fD6bzZp+6vr6WtVq1fYbjLUkffz4Uf1+X5VKxXRABJ6sJVh6f/6j\nb/RzBDCQPgDxzhntOXq9nvWfYm2Q1vPCZu8s8844N3CW7u/vTRf38PCgdrtt5xeSBa+VI/2Gg8V7\n297etu+czWb67rvvNBgM9N///tfmhGeCASSI5NnQ5uKcE4Qyhul0qlwup3q9rul0ao2BOUvp7l6p\nVIxZgsWhaMQLuHk3tIB4//69sc3FYlHNZtMC0kKhoMfHx1ivLNjLFy9eGLsmLdiV+Xxua+ef//yn\nrUlauLBPYCiZ70KhoGQyqX6/b20kpMX9hTBLPvCQZFf4UDCw2sKhWq3GdHO+8IFiGPYSNmhjY8Ma\nvh4dHalWq9m8+X1Fip3zq1Ao6P7+XgcHB0qlUjo9Pf3MzpF9effunc13rVYzfe6PP/74u/m90P4g\nICAgICAgIOCZ+GKpPR/1A9gGSpuJaGi5f3h4aBGBb8znNRt48ZKMHYCN8lQeeWUvOvNVe6RQoBzx\n2onI0WNR0isty/FzuVysek5adv1GaEyULS3ywVDl5H6JoHzFIZczetbJ642iKIoxPgjCqTxCayUt\nrzuRljQqgl4odbQOiBn5XaI92ir4KMIzUC9fvoyJJ2FbEEaSvoI59NoRvyYon+U9ep0KrRRWq0Bh\nfohMYG2kZSsGnseLi3lHjMVXdMG2sG589QxMBu+e7t6rVY+k9Gq1mr0rWC60G76qqdFoxNpf+Co6\nUlCsL0+pUyk1Go2UzWZjFW++ioW55DlhsHhnPsXu0zXMtW86+fT0ZCJjr+fyjANj5Ge+mSzr2N/R\nxz9hBoloi8WihsOhMUFeD8TckJaGWeTZ0UCORiO7fJWIljWQSCRUqVS0vb1tUXm32zX2qFarxdIU\nrEvSML6CbW1tzS7D9oyetGAc2dPZbNZ0UcwvImXSf+g20JewFhEC8956vZ4mk4mq1aqlzhib36/s\nE9Youin+vmfj8vn8ZzIBXyVJVV+pVDJmivfOheykvP0NE7DniURCJycnxpx+//33ur+/V6vVMlaN\nux0zmYwxmA8PD1ZJJi3L4/f39+09UdGYSqXUarUURZEymYwqlUosnba+vq5Wq2XXbXnRNZeHYzNW\ndZwHBweaTqemS5IWtms8HhvjynnIvMCc3tzcqFQqxdJl7AW0WrQbqdVqenx81MXFhYnVacgrLbRO\n19fXlmbE1krLNgYwvWRBpOXdjsxrJpOJMTU02iSteHZ2JmlRFLC3t6fvv//e9pIvwkBPxt737Ws6\nnY7K5bKKxaJubm7MBlEhy92kPo3smdS3b9/GtIpktWazmYrFog4PD40dbLfbKpVKdu+fP4Npm0Cb\nlL29PdtrnU4n1ij29/BFHCn0PF4czMv1OUsqRprNpnZ3d1WtVm2heR0Uh+Jq7x5+zu9jlKVlB2MO\nAwyOtDhsqPJCk4Oh9v13cHa8PgD4tAv/jfKfNKJPM/qOsqSpJFnPHp+DZuzoiXheUiSMi3nx6VNA\nmoV0kn8en9pjzrxjiFPKeHylpbSk6P1GJP+NaI+KM0km2vYlyjwrjiTl8b7s2Au5MZReNM6cYcAw\n3qQg0bj5d0cVG04oTiZrBiMZRZFGo1HMsHuRsNeU8IwYGsbAoVGpVLS/v6/xeGwpKl9FRtsI//x8\nJ3uI9UOKCoeX52Wt8E7b7bZevnypV69exQwiDgn7yd8diGH1zhxGyAv4STXxOa/V8gJ/aXkZKul8\nr9sh2MBh8p/jviz0UxRvMP7NzU29fv3aCjF8FSHp+XQ6reFwqPl8bmuYzusIWqVlpWCz2bR0DC0r\n/NrAseVZ/ZnB2m00GrHziTmjapYUGWufefC6KRBFUawTOs56oVDQTz/9ZOl8f0m0tLy5IZvNmjMl\nLYwsvXS8/k1apso5f+hkzrPidBDclEolGz9BK+ezD5IfHh40HA5N3Hx9fW3iYAqESH1vb2/bM83n\nc/X7fX369En/+c9/lMlk9Oc//1mS9O233+pvf/ubvdurqyt7lv39fRO8U8HH/F1fX6vX61ngQQWY\ntDijRqORbm5uLDDlO0ulkg4ODpTL5ayS0t8GMJlMtL+/bxd38zPGiXgcHZYk7ezsmGHf2dlRs9mM\nVd1GUWTONSloNGKlUklRFOnt27f68OGDisWiOaC0wuh2u/rLX/6iy8vLWNsM0Ov1VCqVLA25vr6u\nXq9nleOz2Ux7e3uSZPo8HHgf4GF/0Ce/fv1ax8fHkqTffvtN+/v7pv3LZDIW0JBibbVadmOCdzLv\n7u6so/unT5/sZ1QSl8tl9fv92B6tVCrWfoKqeR+UTyYTs0tessJND6tBvscXcaQwAD6CRI+Bx5/P\n5+0FHx0dqVQqWdWbP/gwOl547V+i1wBgcKRlybkXY3v2yH+nd14kmcYDLYuvvmLRUEHkI3iqmWA6\nvBeNsUBfgdHMZrNmFEejkW1saeFhJxIJE/H5y2Dp6USeH20KefT19XVjD+7u7jQcDs1xLRQKiqLI\nhMVedOpF1Gg3mBsqZdDVJJNJE/pxsSq9YbiZW5IZNd+rZJV1KxaLarVaJnjk/cIgrYp42WgwEl4D\nx5hon4DWSlocamhn6FPCoe/XmL8+iDFgfDDCvE/mBp1QOp02po/P9no90xkkk0ljOemngnbL9y3z\nwmzGxvNQHcPfZQ74e+gO9/f3zdBJS90Za9M7WRSH4LzxvKxhHL7VfegLQFj/vsEtBpZ97VkO1jzP\n7PuxPT0t7q/b2tqKNQCVZD2ZqIJj7nlWxjAajWJCUuZ3OBzq+Pg4psekMo3rb3BUWIu0i1htYYEm\nslarWSNLH3TBcMFqegaPv1Mul62VA2ux3W7b3+eqDJ6F99bv91WtVmPsJ3osKX4mSkstkGd7JVnA\ngGPD//gs+2w0GlkLBOaUwJnz2/dE4qwrlUqaz+dqNpuxYJerbjxjydpgDb5+/VrpdNrE3/P53Kq1\nYLN5/lwup9FopNPTU9uLvmGptHBwGo2G5vO5vfv5fG6OPWcgWj5fsIF2lv2Lxomg0velQ7TuGW/W\nfq1Ws7ONd4GTcXJyonq9bvde4qQwN9wZyN+cTqfmvO3s7GgwGKjdbhvb46uSh8Oh6Sa73a6tE/RT\n0rKdBU7W0dGRstms2RyaB/OdvEscSeaNZsJeowiZgQOcTCb1ww8/6OzszMb/8PBgVc8EugQY+Xze\nAn2cTMbHuQNp4jWeBPIEXv4sqVarsV57v4cvIjYPCAgICAgICPj/gCA2DwgICAgICAh4JoIjFRAQ\nEBAQEBDwTARHKiAgICAgICDgmQiOVEBAQEBAQEDAMxEcqYCAgICAgICAZyI4UgEBAQEBAQEBz0Rw\npAICAgICAgICnongSAUEBAQEBAQEPBPBkQoICAgICAgIeCaCIxUQEBAQEBAQ8EwERyogICAgICAg\n4JkIjlRAQEBAQEBAwDMRHKmAgICAgICAgGciOFIBAQEBAQEBAc9EcKQCAgICAgICAp6J4EgFBAQE\nBAQEBDwTwZEKCAgICAgICHgmgiMVEBAQEBAQEPBMBEcqICAgICAgIOCZCI5UQEBAQEBAQMAz8T90\nn59+FodZjgAAAABJRU5ErkJggg==\n", - "text": [ - "" - ] - } - ], - "prompt_number": 2 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The convolution weights are initialized from Gaussian noise while the biases are initialized to zero. These random filters give output somewhat like edge detections." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# helper show filter outputs\n", - "def show_filters(net):\n", - " net.forward()\n", - " plt.figure()\n", - " filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()\n", - " for i in range(3):\n", - " plt.subplot(1,4,i+2)\n", - " plt.title(\"filter #{} output\".format(i))\n", - " plt.imshow(net.blobs['conv'].data[0, i], vmin=filt_min, vmax=filt_max)\n", - " plt.tight_layout()\n", - " plt.axis('off')\n", - "\n", - "# filter the image with initial \n", - "show_filters(net)" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAAicAAACbCAYAAAC5xzv6AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvVuMbVl2pvWvfb/FjkueW568VN5dXSUbl4sHbBBYbYRK\njRqEJW7qfkD90MItN4gGgQC3QHYJiwdejJFfcNvgRtBuaBAPyA9gt5FBcrnc1bbLVemqPFmZlZdz\nTuaJc+KybxH7sniI8839rxFrx4lMU7mjKveQQhGx97rMNeeYY/zjH2POleV5ro1sZCMb2chGNrKR\nqyKVdTdgIxvZyEY2spGNbMRlA042spGNbGQjG9nIlZINONnIRjaykY1sZCNXSjbgZCMb2chGNrKR\njVwp2YCTjWxkIxvZyEY2cqVkA042spGNbGQjG9nIlZJPDTjJsuyHsiz7x1mWHWVZ9jezLPuVLMt+\n7vF3P5ll2TvrbuNGNvJxZKPbG/lBlY1uf3rlUwNOJP2Hkv6vPM/7eZ7/13me/0ye518uOzDLsrey\nLPuL36uGZFn2lSzLXsmy7KUsy/4wfLeXZdn/mmXZ4HE7/s3vURv+8yzLfuOqXm8jH0m+X3T7Z7Ms\n+2qWZZMsy37te9iGjW7/4MiV1+0syxpZlv3q4/sfZVn2tSzLvvQ9asOnRrc/TeDkM5K+ccljc0nZ\nx7lJ9lgu+L4u6fk8z9+Q9EVJfxgO+W8kTSTdkPRXJP1KlmWf+zht2cinRr5fdPs9Sb8g6e98nPtv\n5FMp3w+6XZP0XUn/bJ7nfUk/J+k3syz7zMdpy0YeS57nP/A/kn5b0kzSWNKRpFcl/bqkX3j8/U9K\neufx378haS5pJOlY0n/w+PN/StL/K+mRpH8s6Z+z6/9DSV+W9P88Pu+lC9ryBUm//fjv/1LSz9h3\nXUknkl6xz/47Sb+44lqZzibCW5LuPz62H5/Jjn9L0k9J+tLj+5w+fsav2XP8oqTfl3Qo6X+TtPtx\nr7f52ej2iuN+QdKvPeG5Nrr9Kf/5ftRtO/6PJP0rG93+c4z/uhvwCSr670j6a/b/r0n6+bIBlPQd\nSX/R/n9G0gNJX3r8/z//+P+nTDnekvQXdMZG1Uru/289niDDxxPhkaTp40n3UGcRwhckDcN5f0vS\n/77imf6apG9LekFnwOZ/kfTfX6CU6bkk/Wcca9//Q0nvSvqcpI6k/1nSb3zc621+NrqNbofjv6wn\ng5ONbm9+vu90+/E5N3UGqF5b8Uwb3b7Ez6cprSOdp/wuSwH+VUn/R57nvyVJeZ7/n5K+KulffPx9\nLunX8zz/Zp7nizzPZ/ECeZ7/ep7nuzqjA39c0j8h6ev5WS51L8/ztyX1dKb0LseStla0669I+q/y\nPH8rz/OhpP9Y0r+RZdllxjXT+efPdaao38jzfCTpb0v61y6iO59wvY18cnLVdbtwyiXatdHtjSDf\nN7r9OP3zPzy+7rdWtGuj25eQTxs4uYxRLJPPSPpXsyx7xI+kf1rSLTtmZdX44yLXgyzLDiT9hM6Q\n7uuSfujx9f7dx4cOJPXD6ds6Ayhl8rQknxzf1Vn+8+blHqtU/Dm+K6ku6dqf43ob+WTkqut24bRL\ntGuj2xtBvi90+zG4+A2d1Qz+7AXt2uj2JaS27gasWVYpffz8uzqjyf76x7iW8jx/KGkny7J/XdJP\n5nn+M1mW/QNJv5zn+W/bod+SVMuy7JX8rPBKeozUV1z6fZ1Rg8jzOsvR3pf0rM4oPklSlmVVSdcv\n0d7nw99TnVGhw495vY2sR66abl/qeiYb3d7IKrlyuv2YpfhVnenNX8rzfH7BPTe6fQn5tDEnWfh7\nVQR3X9LL9v/flfSXsyz7F7Isq2ZZ1nq8xv6ZFddeJf+kpH/0+O8vKKxmeEzx/QNJP59lWSfLsn9G\n0l/WGRovk/9R0r+XZdkLWZb1JP0Xkv6nPM8XOgM6rSzL/tJjqvHnJDXt3HuSXgjUXybpr2ZZ9hey\nLOtI+nlJfz8/S05+nOtt5JOTK63b0plhzLKspbOgqJplWfOxsSyTjW5vBLnyui3pVyR9VtK/lOf5\nyROut9HtS8inDZzk4e/4P/KLkn7uMXX3t/I8f1fSvyzpP5H0gc4Q+b+vomJfBoH+mKR/lGXZU5Jm\neZ4flhzzNyS1H9/n70r6t/M8/+aK6/0dnQGX/1vSmzor2PqbkvT42n9D0n+rs2KpgYrU399//Hs/\ny7Kv2jP8hs4q4u9Kakj6d/4c19vIJyffD7r9t3Wmo/+RzuoBxpL+0xXX2+j2RpArrduPlwz/dZ2x\n3PeyLDt+/LNqj6qNbl9CsscVuxvZiLIs+x2d0aCbfSg28gMlG93eyA+q/KDq9qeNOdnIk+XK0Xsb\n2cj/T7LR7Y38oMoPnG5vwMlGomyotI38oMpGtzfygyo/cLq9SetsZCMb2chGNrKRKyVrWUr85S9/\nOZekCIyyLEuf+Xfz+Vzz+VxZlomi4tlspkqlcu68LMs0ny9XcVWrVVUqFdXrdVWr1XTtxWKRrn1y\ncqLpdKrT01Odnp5qPp+rUqmo0+mo3+8ryzLNZrN0D87ld6VSUa227Eramee5ptNpOrder6efarWq\nPM81m83SPfM8T8/Eb4R7rRKes1KpFPpoNBrp8PBQh4eHGo1GmkwmkqRWq6V+v6+dnR11u13V63W1\nWi3V63VlWabFYpF+8jzXfD5Pz5HneXrGWq2m2WymyWSiyWSSnqNarardbqvVaqlWq6Wf2H+z2UyD\nwUCDwUCnp6eqVCpqNBrq9/tqNBqF56ZPFotF6jPuxzP/0i/90lrpzV/+5V9+Itr39kapVCpPHOt4\njYuud9H5ktKceNI94/3iuWVz+Xsh3g7mELJYLAr9x/9IpVJJduSjSBwTtzVl7XKhjd622K4n3Q/5\n2Z/92bXp9q/+6q/m2LQyvfPvsixTtVpN8x374cKx3pd+jfl8rsViURgvPwZ7Uq/Xk82bz+eaTqea\nTqfJ5ktKfR6FNmHv8jxXs9lUrVZLdm48Hms2m6XjKpWKqtVqwZfQJv/b27xKoj9x27m3t6ednZ30\nHHmeazKZJBt+enoqScln4TvcB/A37XBbmee5Wq1W8hvIZDLRdDot9L/7JMa13++n/qXfR6NRei7v\n78ViUdCLarWa/pakn/7pny7tpLWAE1dIHsIH1X/TQThHHKJUBAeu6NVqNSkeA7JYLJIiRwDkk4iO\n5LjJZKJKpaLZbJaMigMAfrvB4brT6VSTySQpXLVaLYAkFMCvw/O4cG2fqP4MgKdqtZqO5X7dblfT\n6VTHx8eFezl4wtEvFgu1223V6/UCOKH/mcDcs1qtponLM/mz0GfxWfgNMGPSTafTdJ2Tk5PC9eKk\nbzQaaVLw+6rIk8DCRd9F57nqWv4Zf18W2JSdf9G5ZQ6pXq8XxiuOzyq5LJC66LndgWHoIjBxo4qU\nAZPLtCeCCZ8HZeevumbZXF91P5ePMq7fK8EeRr0ps9m1Wq0wZ7FLZcdK520ZgRzAbjqdnutvArvF\nYqFGo5HscrvdVqdztq0HNoa//VkcmEhLH9BoNJL/wLbQ/2778RMuEZSUjRuflQE7+rjZPFvpe3p6\nqmazmYJnAl138LPZTPV6PbWTfvdAzn0e7SSoZKzoW9oT/Ztfl/H0gBj/io9+EhB3gLdK1gZOVhkz\nV1SUCCVzpFn2wN6pDlBwgigyqDse32g0JJ05xvl8npwm1+ZcQI63l78BOKenpwUk2W63VavVkqJE\nR889eD4HMPzmud1ReLQSjW+WZUmpa7Wams1mgb0BOTP5K5WKTk5OzvV9jHrimM1mszQ5nOHiWb1f\nYl8xrovFIhmDarWqk5OTwiSj78oM+3Q6XRkdfdKyKk16Wad8GWCyShys87dfo8xYOuux6l7+OYbp\n9PRUW1tbOjw8TON8ERtQdq3LHFfWB2VO0Z8FvYlOpOy8y/Y38yu2LwYL8RnLQJIf/yRgcxWASZmU\nAVYPPNzxuO2SljrkOkp/wELX63U1Gg01Gg2dnJzo9PRUs9ksARJ3qOieO7xKpaLT09Nzjpd7u45I\nSmxtp9PRcDhMPsCBQ5n9KdMld9jeXxEAcGxZH2APZ7OZxuNxsovYSreLbi+jTjMePkawHwBAAnhn\nx+O8oG3z+TyBwQjEvH1+Lvf38XeGfpWsDZz4gJYZT0fdPKArFY4Qx9hoNAoUn6QU2U8mk5Q6kKRO\np6Otra10DgKFxjVRjizLEtXnk4BJ522ez+caj8c6ODjQu+++qzzPde3aNV2/fl27u7vn0iX+TGXU\ncewvZy3cOPj1XHmhODudTkqz0KeuhDAPtMEpc48cmCTev0wg2A4HElGBXQBP3q/uaGIqD8PlkYBH\nFFdB4qS+rJQ522j8Vzkqp7WlYpRYq9XOgcuyNrtReVLKIcsytVotjcdjtdttNZvNRAf/eaUsOl/V\n5pjCicwg7fUo8qLrMqcjOPC+LRunVePs7Yu2bdVzl9nCqwBQyvTanRbHEM27vfbghPnbarVSSt0D\nT/qZQKrVaqnT6Wgymej4+Lgw7gQ3BCcEYNgp/IPbGA/wuFej0VCn09He3p729va0u7uru3fv6lvf\n+lYBxHiKxO1wTJdIRV2LrLrP5/l8nnwLwIE+nE6nqX9w6O7geVaCT+4V2X8HAbBSLm4jYGHo0zKW\nCyAzHo/TfX3uR9CBXjAGjUaj1B9EuVLb18fJyISI1BPoF6Wez+caDAZqNBqpbgKFAlkS7ZFGWCwW\n6vf7Ojk5SU6PHwbdmQRH85ERoO0MynQ61d27d3V8fKw7d+6o1Wrptdde087OTjrfjZ1UrKvxSYRE\nJ1+WYuI6brABE51OJ01CFB82yettOp1Oaluk3SKIoH8iwPJnojYIVio6UcaI7xnvmNPlbyYq+kBE\ntSpCXaesAiqr2IB4jn+HnkfnBT3rn7luODApc3CrAoVVMpvNUkqtVqtpMBjorbfe0quvvprSPE8C\nQ2XyJPamTFzPpdWgCp2LYCY+r9sc7AzzJ9okjnlSn5UBi4vGn+8Z2wgA1iVRT8qYImcxnBXlWBwZ\ngQ/znDmM7s7n85TSkJapFmdxZ7NZsj1+H2cYCJy4p1RkNTwYqtfr2tra0u7urgaDga5du6Z+v6+v\nfvWrOj09TYGXAxO3ac7oRJ3it/cZ/9NWB0C03dPtMWW/vb2tWq2m6XSqo6Mj1Wq1VHsSWQq/N+1E\nt/BZHijzTA5u6DcPDGkn13Uf7deJ9gUfi/1eJWsBJ2URR9mAOgBAkRytukIvFosEOnB2gJp2u63h\ncJgAyng8LkTmlUpF3W5XnU5H9Xo9gRMv4qxUKmo2m+doMB9Q/j88PNR8Ptd7772XFPj1119XpVLR\nj/7oj6rVap1jI4j+YW98Mrlxc9Dlfed0JROW8+r1umq1WipO5RiM9sHBQUpB7e7upmO8FofnRdEA\ngEyK2Fan1RlTV+qYMuK5pSU4WZXC4nz6bTwep/OugpQ5H5fLOOA47tF5oSuec45swpPuA7BBd570\nLO5QpLMxxaB/61vf0o/8yI/o3r1758ahjBW8yDnH+z/pWdyOxGfxttBni8UyP17WRgciRJVlrN+T\n2hgDDI6Nx7lee/uvCjApk7JncGePrXb7TP8zt3k2D0QAGpPJRNVqVVtbW0nvtra2ku12Z0ywhW5i\nv6QlYI3sh7c7z3P1+33t7u7qzp076vV6Go1G2t3d1WuvvaY/+7M/03Q6PcfoxYJsrue/yySCOMTZ\nFQcMkgoByNbWllqtllqtVlqEcHx8nOYzz4ofAQB6mguB8fB+cWDjwMnH1ot30XPaXMZ+wtjAiNVq\ntQS6VsnarPmqCer/0wH+PYpdrVbVarWSg3MmAjbAacTt7W0dHBxoPB4rz3MNBgPl+VktCJG9MwMY\nfgdHeb4szooUtg/u0dGRPvzwQ12/fr3Qtq997Wvq9/v63Oc+l5AmE9cLQF0hHAVHZfYJ6MoelcfT\nIdKyGKler+v4+Dj9gIxZJZPnZ8VZAMHJZJKcGSxVZLicQncj5Mg6gjoHOzxPjGzoF66d52eV9JPJ\npJBvvQqyiv34KOITvuxaDhgwzBgmJj46EZ0wv8uKBS9qN846y7IEZmu1mkajkV599VX96Z/+qV54\n4QWNx+NzLAXyUZ1tBFyrjnHdiM/qc4h+jaDM+9vBAde5KMXyUcbWxy2yZdgDTx84wL8K4kDK7ZMH\nE94fMZCEiYYNKWOhOe/k5CQtSCAVlOd5Ykyk5Xh3u10tFot0DHaGa/mqFPwCz1Ov13Xjxg29++67\n6vf76ZkePHigl19+WScnJ/r617+eGENJKYD1e5WB8LL+879dD8rAidtM2I56va5er5f+h0Vqt9vn\n0usANoABLHNkEGMaKjInDsRoN230AFM6X/vJMVmWJQbfGaxVsjZwsspI8dBx6arTS16Y2uv1VKlU\nCstxpSV9RwdDhTmDMhqNCoWwDBJ1DL6s2BXaC7OkInXGtZ3+ZnBarZZ+53d+R9vb27px40YadBSG\nNsQIRFqmfWLNhRsCn3z+nSs9AmKdTqcaDoc6PDxUp9PRaDTScDhMFP3Ozk6KMukPnE+r1Up9FyMU\nV2D6gHbHHG1kg5xyjODQQd14PNZwOLwy9SZl8iQW5Unn+AQuG/PIapQxIe6cy6K9J4m3oVar6fj4\nOBW08dnJyYlee+013blzR7du3UqRZtRJb0vZfSKYiueW9eWqzz0d48EEn3n9lKfDykBVbIvfO0Z/\n0dCXtdXHN9a5OO1+VdKVcT6uAiZxoYGk1M8EjNgRZ6W5hzu6xWKhg4ODlN7xAAfWhBWJsGHScixd\n19BFZ2l4DtrMKh/sb7fb1f7+vl555RV98MEHOjo6KqT5ELdJDty2trZUqVRSAOX9dhkh0KDfZrNZ\nWlgR9YVAsd1uK8uylLL3gBOGibmAzXCGJj6b+1//oZbRAX0ZuOY3qRxA6WX6YW1pnbLIAeXE+Xva\nxsEJBbCgQtIxdLIPihubfr+vyWSiBw8eFFAvUTv0GPdm8HyZFuKpE58Ep6enGg6HiUrziKDVamk6\nneoP//AP9VM/9VPn2JgITBBWDh0dHanRaKTalWgovD9daWjrycmJ6vV6YovI7boBBOgx2Q8PD9Ne\nLyg97WWyMOl9nBAin0j3emU99CMKHp2rK7IDwNFolIAg7MtVk4/D5jggpT8w5mV7zUhFJ4lOO+D1\n65RR2xe11Z02UZh/x/UfPXqk3d3d0rlS1h/cNxbVMedie1a1z/UdXYxRnt8vgodY4M6xq+7nNmVV\nSuxJUSHXdwaRz914XwVggkR2QzrvgMrASWREAa5ZliW75Owv18uys4J59AqbBLCsVCppXxIvJPW2\nOcDzFIQX7WOXHWQ6ADo8PNQP//AP6/d+7/cKwTHX9lU93BeGoNlsqtPpaH9//9z2CNzHWTmENDwB\n7uHh2bsGm81mYkAkpS0Y6NsI0PCVnkWgX13oE/ezPgdIoTN2+BS3KVy7bCybzWYqZ/Ag5KLAcq1L\niVehJx7cnbsPIP+jcKDySuVsVUpc0uQFkxQ8eWEQyBy2hOO5NgPEPWO+2qvOAREuXItamLfeekvv\nvPOOXnrppcIyZ681QbIs02Qy0cHBQapYn06nunXr1rlVNyhEBGW+YinPc21tbaW86nA4lHRWdNbt\ndgt0IaCIPCGrMlBO9nBptVrnDKwXvcYo3pUVwxLzm4yBF5pxzOnpqQaDQUoz4bivas3JR2FPykA7\nfTmfz9P+ItFpAdiazWZhXwhnL8qcs9/Hj4/HLBYLNZtNjUajwjgxF/is0WjoO9/5jl599dW04V98\nNn9GB/+NRiPNhdimi8TBEPoSlyo6EPJVYJGd8H5dBS4uSvEgzPkI2Mv6N4I5b3ds/7olOmHXmfgc\nHIPNpJ/ZyoDjDw8Pk+PmGH9e3wQNB+k2we8LK+sRvKTkVNEL199Op5PASWTJmUfb29u6fv267t+/\nX7DRzqxH0Oo1H1tbW6XLoMv6F/vY6/XUarUkSe12W6PRSJ1OJ+ksAdpkMkmZBEAHARvp12azmZ6b\ntK+3x5/BWVZp6fdIZVHbSb8yn/Cl/DgAAqh5H/sqrTJZqzWPhqfMCDnIkJSQNx2G8wbZbW1taW9v\nL6FbV1KQGh2HAjlz4jUidKSnI/icamPoOtqJQfYVC6BZihdbrZa+8pWvpHbGCNcVnPMpaJrNZikF\ns729nZzCRVEW4Obg4CBVd3e73QSMqtWz3Vy3t7fTzn/0qRfH8hyHh4eFfQdA0J7S8bFzOpDnYh8B\nIgxQPUYBgAIThbHnc3ak9Wr8i4qrPilZxWRdFqCUHeOpEXbR9XtFiWm9+Pmqc5/UvhhdMlc85ZPn\nuV566SUdHx8X5k+kvLlfTK3Ezy4q1uX7sr99eaWPCY7Ha9T8ntHJuETgclG7uGYZExjnt9uKSKl7\n361TygJJt3s+z6MNkJbzE6aKbRxwjMfHxynY8Wt3Oh21Wi2dnp6mFIdUXDKLzYTNRTxlBiPMs/h4\n++IDxNkZ2JMvfvGL+t3f/d0CsF0F5ofDoba2tpIPIvijPs4Z0LJgIcuytLqStpElaDabyR/AZE8m\nk7RNBM/M3PQlvzGthb67/+Bzzxigf/gAgnEHNwQYgBDf0M6fId5/lVyNUNMEJXZnE42aU2mLxUKD\nwUCj0Uiz2Swhyd3d3YQ6XZmOj48LkbbXr0Sa2Vf8OICIjIu0rDRnJ9ZoVFBQPj8+Ptaf/Mmf6Id/\n+IcLhYw+mRBSOe+//356DlYE0RaYmbjXC0rHLrGj0UiDwUDtdlvHx8eJTen1enr++ecTc9JutxPQ\n8sIqKEKvDi+bpB6JxsjW0f1wONTp6WlaLeVGmPF0YOaOmijJXwmwbomsR/xcejLl71EM53q06sAc\nAbhBkT8JiFwkcQw5lw2x/BgvHsdYtdttPXr0SM8++2xiQlaxpW74ynS/DCz4/57Omk6nqQZqNBql\n+3r/cB8cV7wHz8hcjP3ubeBzZ17KWBX01schGu04Xv78H3X8vhcSwQbithEpq2vydFWlUkl7Lrl9\n9GthTyuVSmFvJt/XiPGmj5xhdJAEQKC9nuIAADjzTHs4D1A+GAz06quv6s0330w6X8ZM8kz7+/sp\nJQ6r74XAnirx1D7PwHUbjYZ2d3dTkMozTadTHRwcJFs4HA4TewKj7PpJCkwqrsjxOci9OZ82tdvt\n1D+AJHyu22KCXeyTL8xwpsTLA1bJWpcSl004qDaQ7Sr6KyqxVyk3m820vNTpYwbWV/I4OHFjxiAA\nTkCMzgI4WvfNfvy+7mRQqkajoa2tLb3++uu6deuW+v1+ul803NKZcet0OnrqqafSUjqiDBRDWlJn\nbEGPOFW3WCz04MEDnZycpM1wFouF7t27p1deeSXlKD3qJf+JEd3e3k60oufH3WH4+nX2hwE48R4d\nH7/T09NEsUpKBV6+F4tPaqhPwIkX565TLkrp8PdFwEQq7o0AG8H5q4SUT5wzq0DRKsATgaY7UV/O\nyLnOJjIWJycneumll/Tw4cNzqbYyRwujiLgD53tvY+wrImCCBQASbWw0Gjo6OirQ+R51e99Sl+Wp\nhCjOEHrQMpvNEqiJ876MUYvpZ38mB5/rBiYuEUxI51dv8NsBXOxzfzaPvCWd00NAijN1HuC4TvC9\n38tTOlzfA1Lvf+w97cGpSmfg9/nnn9fdu3cLjtl/JBVekwJrwfJZxhVg1Gq1CiUBrh/cd3t7OwVp\ngJLpdKper1cAcWVsHs93cnKSGJdY08RvbDx6zSZwzI12u53OZ9556hI/DChxP8L96DPf22aVXLnt\n66UiQHGH51GEG9o4URgkf3g6BJoVFgCwUMY2oJwMJk7RC2ERFMyLr/xYR7A+Ed9++2392I/9WFJo\nABGTxCu1O52OKpVK2s6Z1UZ8P5lMVK/XNR6P1ev1CuCD9h4cHKRnRrj3/v6+nn766TQGTq3TNz6x\n3QBHQwwy5jsKcaFyffURE86NuqN1trEm1+lRASm2uNvvumQVY/Jxo98IMhyo+GcOImKUGKN6b5tT\nu9L5FwF6VMw9ABOuV5JSpIkOHh8f69atW+eMbhlw8g3c+O3Ax9sICPD2NxqNwl4U7nRI/zEfvG/L\nAIAHKPRFBEURKHoBL4yi93e0d6tW4bi9uqoSmWGeDefjDGZkFaRioSo/Dgax7+48qesjHYy4j6Bt\n/oONQDexydPpNNXSRUDlLI37FnZFvn37tu7cuVNgDZwF85qmyWSSbC2+hPNgfTqdTgqsmSv1el3d\nble3b9/Ww4cP0xJpQMRisUjpbhdf0eR9DuD2eeNz3/2cAyfPKMzn81SM66wR/ppnp46x3W4XFq64\nT8SGx/a7XLm0DsIDOzXkfzttRCdRiOObvDAIsTgny7ICzcY93Vj5oPG3G2RP/9Tr9UJxKYjfn4ff\nLL2sVqv69re/rdu3b+vll19ONTAMphsBT62gVBhjVu7AZjBZAQNHR0caj8e6d+9eyr060oZmRbiH\n52IdEKKYPE80PB7d+KQfj8eF/Uz8xVqknqD+nA2CeiUv7crtIPMqgJMyWQXGL5Pe4XykzJl6hBnv\nW+YAy66HlLUHZ4tRpWao2+1qPB4n44ue0vann35a+/v72traKgCmMom0fAw+vO/cAWCsvQg49h9t\nj8yaj4kDLP6OgRESGQ9vqwcnDq4cTHsA5e2MIOiqAZQYbTswkc4v+faatchaOePktRNeJ+Q6je45\nW4E4YPe+5b5e4O/to/6j0WikGinpzC7B9jrIwOa88MILun//vg4ODs6x/FzfAwVYBHyXB7L4ljxf\n7inVbDa1u7ur559/XoeHh2l5sLOZ9F2z2Ux+h89cnOHwANzrSdwX4GM4lwDR0zP0ny9p9g00fczo\nex8b/MOVBCceOV3EnjiSc5Tn1J2kROujBCA+SYU8FwrCeTi+sloFH7iYX3ZE7jQ6VB/sgkdSRAcY\nyXa7ndIZ3/72t/VDP/RDkooRWKQ8QbTOklDVTVThWzqPRiPN53MdHR3p9ddfT0oDWHNWhWv6+MS/\nHZzEfCbfuUGu1+upDZ5j9T5lzGjbYDAoGHtH+uRcPcp3pukqpHXKpAxQSE92Pm6I4rER2Di1G89d\n1aYyBiMpQdUAAAAgAElEQVRKHAvGuFKpaDQapT6fz+cJqKCzTmdftE21t8FX7EjLZdHoBzaA4IDi\n9gh+ot5GEH1Rf3hQ4Ncs+wyJKdnoqNwplPUvziPeOz7LuqQsRcX/rt8OJImSfb5zDvOYfndWzuc8\nOuWpg9gfDkZivwFwPGrnOrPZLNUlwmjgNwicSMXQvsFgoCzL9OKLL6b3taGzrj/cx5lt6kHm87n2\n9/fV7XZVrVbT3iWz2Uy9Xk97e3t68cUXdffuXXW73QKTT78yt2GkAQ8O+qL/igDK/Zr7VUBPzExE\nNorrkIp33XYw6j7c5wHB9CpZ+4v/yiI7JKZAYuEZD45BJO/u1LMXQ8WqbCJul4gIIyXujhgDjBFm\nmS+5716vp8FgUIg8fQK1221Np1O98847+vDDD7W7u5vaTsWztHQKTFhf3VKtni1T29nZSVvzHx8f\nK89zPXr0SKenp/rGN76RlNHBCG1xqg0nAOJ1YIiB8NwvCsh5fAfz1W63NRgMCikemB6vIXHnw1h5\nvtdRdqTLYc2uCnNykdO/bHrHDXd0WDGSLxO/x0XO5KJniMbeAwKPMom2+v1+Ai/V6tk+P+TLy+4f\nGSUciUdfnqpxB0kRNE4h2hFsA33HRl2rntX7w+/v93XmA/12QB7lMixIZFw8TepA6KqwKBH8+f+u\nn3yHfYm1fW4nqtVqAgkONKXivksXAcxoE/gd9Y3jvJ5jOBwWbJ37iUqlkuqouOZ8Ptf169eTrUfX\nCL4Wi0XaJsHv529Pr1arqT5jOp2mgPrll19Ws9nU3bt3Cyt1XAf5jX8jpRkDFPwshcduR5xJIUMB\n++gMPudFEBHBhjMiMDDebr837QJQrZK1bsLmNGyM5CJ1KC2LVBeLRVpKykP7Rl7QdSBSp8F8NYsb\nD1dkAIpUpOgioHJkyvlON2NonXImpcNkbLVaGgwGeuONN/TZz362sKKHIk/qSxaLRYpUccSOoKG4\nK5WK7t+/r4cPH+rtt99OBhXlYZK6YsGcOE3vRcAcByUZx8z3KaB+gD5sNpsaDAYFI+4Rtiu2KzXg\nj/YRdaAL/HaEfhXE9Tu26bLARFrSomUOq0w8aoq6LRXBwJPa5OeQOiMf7oC+VqsVGJTZbKatrS0d\nHx+r0+no9PRUH374oW7cuJEcE2CWZ+FllDyDt6fMwWdZpt3dXR0cHBScSmy/Py+6VdZ/Hr0zTyI4\ncFra+xsQ7f3vQUCZeFtjPU0ZEI39sA6JtvgiHYr97qwvgQnBCk5SWq6SJEjyKD8yBj4WHvn7/z5u\nnOPpNlZl+UoZT7VUKmepaGwiQPzk5ESj0Uif//zn9eabb6bnunfvXmJ5YPPzfPmqFBy8B7oA7Ha7\nrS9+8YvK81zD4TAxJqukVqulnWdJvWAn6QNP53hKB7DowTttJlB0hhq/6+Prfe/pe0/NegDt/hHm\nijm58hlXfvMJSYzkfEBQSJ8QGKL5fJ5e0+479oEevXN9MxxJBQcdGRE+j4Y8IvM4MSJFyXf8Pjk5\nUafTObcklxc4feMb30jfZ1mmnZ0d9fv9VGtB5XSv11O32y0t4kUxxuOxPvzwQ925c6ewj4qkQkTo\ntB3KNBgMEmJ3JgLwEAvSJKV0kjuvGDWBzmezWWFVldO49JWPlwMo/5GWS5yvyjJil4sAyqrPOM/1\nCIYrpg1W3bNM3LhfxOqUtQ+DHR2In9/tdhMobbfbaVdh6PTnnnsuFW878OZ6MCtPYiCoc9nd3dWD\nBw9KgUlZH1FM6cxLdKb8dqAgFanwstSaf+cAIwIWF783ztCvWcYCXDXxKNiBoAc9/p33PfOdBQpc\nz5lSaWmnfTxiH7s9iAFmBCnxtzPnvm0C1yWAOjk5KWzQSZp+b28v2avT09NUMEvw6YCYcwFlgB3p\njEF/6aWXCqlRB3VefO7MPas+T09PE+jzVZA8j+vofD5Xt9st2EzGIII0fGmWZYXNHdFZZ7kARowL\n13f77cdh3y7S77W/ldjpprJjyj5zcACLQirFQQcTBcV3JOiV1g4wUEhvG2jPAYBPNraAZw8Vvuc8\ntnBn4rVaLY1Go5RzbLVaevDggb7+9a+nDdB2d3fV6/XUbreTIsxmM127dk1bW1tqt9vq9XqSzsDI\n1tZWymV+85vf1P3799Vut5OyQI26ktN/GODBYJB2t6W9zkr4JkYwI0xGJhrHepU2+xSwp8l4PE5M\nCjseSsV9ZRws+q690pItisDEt4e+KrJKj1exKjGl4MeUGWHpyamiaLQv2+YyQx/vhaFZLBapQHZv\nby/VGvV6veSEfI4DumDDKGr1wm8MPgWK3W5XzWZTH374YWqT0+QxWsaI+1b+0Un587pNkc7vRRJp\naY4p6zsvGr9IcHS0fVVNyrqZE6l8pViZDvtxCEtZ3f4QWbte+1g4MPE+8L7y+7ueRlDiwAldhZ3w\nqJ40PIw2tvH09DTt8orNabVa6UWB7EUlKYEF7CXpTna13t7eTun8mzdv6vOf/7yOjo4KBcHurxys\nxr+r1WoqpPUVb85wRh9G4O7jQ0Eyxx4fH6vRaGh7e7uQPpKWQI7+xwZ4MXwZ0wJj5u27qE5wrW8l\nXmUsLzK2lUol0cV0pufb3LBG+imiT0eUfh6f8T8d65/TFpiAR48epTf7gmop2MMAAVLq9br6/X5a\nCsY1j46OUq7ywYMHevDgQWIQyNEeHBykXRO3t7fT81+/fl2SdOfOHd2/f1/dbje10YEJEz72P0rG\nToP0qae3EGeh/D0PvnqJCnOcTa/X08HBgR49epQUlHvQTq7N2Hhxb3TMXggLEIzvi1i3lIGHyGJE\nI48j9vyxg2K/9mVYkHj8ZQEN4gXftMVXnAAQK5WKHj16lPZtIFgYDoeFVCX39iDCgTBAHIDrugtw\nR38jHc08jlE8+ua1BJ4CKBsjaclq+BzlnAhCaKuzQ2UMTexzZ6ViWkhaOqCLVjV8EhIZ7chWxb5z\nACcp7c+E7cLBe996MMkYx927nen2NFxkzOhDHyv0jF3EnSUhTeLMPLUg6OpoNNLOzo46nU56gSw6\nOZvNUl0TOsdqUFI8BwcHGo/Hab+Rev3sbchHR0eF9jq45n/3QV6r4/0FUPEl0PSXrw7ylBrP6nOT\n3ycnJxoMBtrZ2UnAif5kPBwwck3aHcGVA8wry5wgbpjjJEbhPXJgoOr1unZ3d1Wr1dIyWe9wp/Nw\nbjhfN3w4uGh8+Jx2uLLQPo/W7969q8FgkJwpqSYcNxR2o9FIK3qgC3n3Qa/XS89AAZazP7Az77//\nfkL2/X4/PQ/LbA8ODgrpK/rOWROcuhtxd4ZQmBRsuRIxoWNE7wW0gJc4eVDymHsFxERlZgLE8XGB\nsmd8r7qsmowOWkiHRAcaj7/oeh+1HWWO06Mfj+59LwV0r1ar6fr166kYG0cN5Y3RIjXDZmdOfTOv\n/EVwXgzutDPHe+2VR8HofZkRjMEIDqqsb5yBuagfcTa+Gs1ZFq93KTPaZZ/Hvl+nRHbJ+5r/Oc7t\nt5/PuOLcPOXs58OqYBcYW16p4e2QVKh3iixJDFCr1eUW8JVKJTlfzqemDmACi02qGyDSbDZ1fHys\nnZ2dpMsAFrfZbGM/n8+1s7OTNjID9LRarXQe/YD+kGr3vsc2AERgaTxzEAGHVEwvwmDAeESw7+eR\njoUB9Wu5T3Cd8Pussk+XWVl5JWpOLkrpeNrB0THROIM1Ho8L18IwudP1LXMXi0Vh+2R+s7wJRfbv\nYtQonRklgAHtYldA0jYUUR0eHqpWq6U3THJtEDaK4G/JjHUfTPLBYJCQO9ElEUTsz7KoGwNAH52e\nnqrX66VJwcukKpVKWjkU2Rb6wHO3cZyYaDBGe3t7CaBgtGNeP0YKMfqJUZIX+l5FuYgddFDi4kWZ\nZZFz2TVXOTc/J0bA3hb+xlnHdnlRrBt+xvDw8DBFkNKy4FBSMsLs3oxjYjwx1G64HRDx0kFJhcLa\nOObxfwcyZRE+fRuZJSQypav6lmNX1YysoubLzr+qgm0pAx9Ska3wc/gO0Opg1HXJbbczJw5Q2fnb\nARu2OjpX7ulMMedi+6j/o8aCN8pLyxqn8Xic2Hr8xmg0Urvd1nA4TLuosq/U8fFx0mX07+HDh9rb\n20uF4tPpVDdu3NBkMkkpTKkYEABMYD1gL7AJ7veYi7G2kj7gu7Ixc9uLuC6T3nLgF1nQMvvlvjKC\n1jgPy+RK7hDrx0jFqI3vpOUab5TT9/Hw853aoqCTDkP5nSHxIs1IrcXrMYCgUBSY+1AIyq6trDBC\nydnvBDDjRaJOczabzYTqfe07hpsUEbvIes6btnt9DUoMWzOfz/XUU0+lGhcYJU/vuDEo22/AgUlZ\nZC6dpYF2dnbSpHIWqwykeiQR9aLsvldBLtJr5CL9jxP4spT+RY7T7xsdSAQpzlxR5MffjMd8Pk81\nBLCDRJvHx8fJUPMZG0Wx/NLrBciBE7F2u920AohrA0x4TvonRuw8B6ADXY9sifetBygXjd2T+tf3\nybjseCDObn6ce3+vJQJS16OyzxH/HsaEwMf3hHL76nrI+fRrDMY8gCxzkFyHe5AC5Lvt7W09evSo\nsI0COt7tdpPtARhMJpP0Zm4CLJ4DRoLXamAzAeXxeHSaa8d5Tjt4dtrNrq2NRkOHh4eFNCV9hs57\nutT3j/Hr099ufwEWznhGm+TnRabQWZkyPWLMrhw4cbS2yhCUfe6Dx0N3Op0ETBx4+HVitL+KGfBc\nXYww4yT0CB/jCZvB24aZeKenp5pMJglJ93q9lN6BOoQBAnF7kSnUHceDZllr74WEgCFHql5j48vE\nPNed57meeeaZ1EcODKiDAD37mn3GwiPUaLwAN0TDtJH2eZ5eOq/ADtQ8Gohg9aoAlCcBE5eLnOFH\nqTPw9GdkkWI/rWJMytpBv3e73VQsyPGMKztsbm1taTqdam9vTw8ePNDW1lZyJjgj0oaMv+95w28v\nbGZextVKOKYItjjGI7dovMtSxfTLRWP3JNCJrn8UiYCwTJ7E2HwS4vMxMiPuhJ4UcErLvUwcmOBM\no47CcuCkfZmx1+hEhob7wS47WJCUmGI+w45mWZbS8u6U8zxP9YOj0Uj9fj8Fb7B9HvTyDNSxtNvt\nxLAA9LHx+C/u7WlGZ9NY9gzYIPWOfXYg7rZdUtoXCB11QOW/y/rUt7BnjGKhuAf3PLsDTtcjZ7Iu\nmk9r3SGWvy+KHpFIr/rDoTSkTzyq92ugiO6kWbZFDpJzy6Io/xsUy3UlJeWrVCoFxQe1eoFpp9PR\nwcFBUhKuQ3SaZZlarVZSWgAF92S7eYw3IIt2xpwubfZUjkcs9XpdN2/eVKWyLKD1zY+IgJlI9B+5\nT+7rrJKn43hOJrAj6viaAq7hNKRTvlCUPoGvGnsiXc75X/azy4g7Ywd68bplc6Psnm602JHY91Hw\nczudjsbjcVrl8NRTT2kymaTjOA+WLIIHxg8g6rl2dNyXRbrBdMfouhVZIOYiALmsRiGCPBfXxY8C\nFvy6Zd+tEmc2r4I4o7cqOFh1jLS0Sxzrf2MvYhDoq0wcYPi13A54m3CcXvsmLW13v99P774hsHTW\njWCSth4fH2t3dzcxKg5GHJg6U0ib40v08DOkOZ3BJ/h1W0cKp9vtajKZ6ObNm2lxgfs7MgnOtrhP\ni0xiBJ2eiikLCH1MnIkpAyc+Dv75YrHcr+vKMScurryeM3dlipM6Oi83RnyO4vi5rthScdc+BnQV\nq4OCMtherYxzxjgTITIQAAVSM7Sh3++nzdV4LpSRiNKZCNDybDZLG8yRs5RUoBQpFAW00S9Ohzpw\n6Pf7aWmyrzyAxYGy3traKuSMQfoRPfOdgzNH2/QvYxiBifc/Ch4NkhuIyBZcBYlRedn3Uvlqh3jM\nk8Rzyu4sypzEk67rjpSxAhz4d6RtML5ZliWdGwwGBRaPa0GrMzdOT0/TCgNnyDD06KB/54aUom2u\nT/uYG9S/wDgins7x/lgFTGJfP6nffNwvut5F3zmFv27mRDrPUMYoOR4XdTo6ObcV2DBnyNARt+Ow\nLewT4sWZzoRxnAc12FRJqViVZb3UMXkAiW/wPakcsLhjH41GyQ9EIE/bKY7lM3SQ15BQaOsbEqLz\n6DI+ZzKZ6M6dO2m7CPoWf+DbLPCiWOaAsx0+l7zvHdTgT+IGb5H5cAadcY/gB9DHWFw55kQqN45P\nmoD+MJFG9EIg74gIMDiOgZ/NZim/Tb2FVFQMJhMrZvJ8ufeGdAZ62u12KrCFVfD8M9flPQjSkmnx\niQWLsb29nZbfwWYAAlA8FJxzcBL0I5PXKVQMHudyzvb2dgIqcXI7kyIt30nkII22OzCK6R4mBYDJ\no5I4/vSxT4BoAHnusvqXqyBPmnzIk0BKvM5F142OzKPZeL/LCPrebrdTnRDX9b1tpGVQ4Pn3WKcC\nYOUaRFC+2stX8qCnOHEAPufgvAHcvuSRa/imWFFgAr2fvEgWiYAt9vEqoyxdbNcu+i469XXKKtC8\nirGMnzlQZpylYv0QOuLP6oGkM8ySUv0eQvDlRaWIA2VvB2kZbCvMntsdD5YJPvkcxh37SOEsjp5V\nPezc7bpCyh8gDQDHXvL3cDhMby5mKXS1WtV7772Xns/7jDlLX+ELR6NR2qoCJt99Zhwjntn3BJN0\njvWgn3xbB+ZkZGdYVu3nrZK1bsJWZpQR7+zIoETk5wgwrhpxY4rik3/DuVLkhLFzh+vsBfcmP8kx\nAJ5ut5v+n06nqeDJ834MNNvv53me3lBcqVTS2zEpbEVhQdQs6WLlhLTcCt7TIEwWFNEn3WJxtmaf\n5W/VajWxOBH88Vyj0Sg9U6fTSREAtTNuSJl4DkgiW+KTxkEef3uBWpZlhaVnPrb04WWBwDrko4KU\ni/6O/0dAs4p5ig7uojZFJo/8uBcxSstISSpuWEa0yVyLbAS/iRTRHWfYMHCkDQGiq5yPAxOeHdbu\n4OAgLRctEw9mmN+eNvQ2x7+9n1dJBDNl//t13J7F/ZvWLatYkzKWMDo7ByU+Z3H+XuDKddArB46S\nErssFdM53ja+A4QAJObzs51YDw4OUt+SivegFXDBWJAmRyepccQmHx0daXt7O92j0+no3r17un37\ntqTlTso+T/EFgHgCxHgcz+8rO9l+wrePR2ByAFNem8jzMJ+kJfjzNqzyzz5OPqbMN77jGq7vHvyX\nzWOXK8GcXIZFKUPTHq04nYdT90nhk4Gljy5eKOoTMHa8t9UroXd3d7W/v5/W4g+HQw2HwwQMKpVK\nYkJms1kqlIKNYRBxuIAJIrvF4uz11ZPJpJAf5b0KPmkdXBEx+DMTvbIMjt1m2TAr9g1g7uTkJD2T\nMyBMJp4hKqOnG8qYgXg8EzNuj8x5XCfWr1wF6tslPjPycYFU2XkOJi9ygKuu59fwaAn9yrIs0d4U\nCqKPUjHF6uABnfS0ZbPZTICaSNJrqTqdzjnGArDCNXzpOfl2GEvmK20mMqVoHr33fsRIeqrzo4xN\n1OmyICuOC32MREaG/igbq3VJBCDIRcA5SgwkY82Gz3G3Gc4Ee32a109wnrQEK86IwQB4vQV2BhBN\n6poiWAJGQDOpCIJB7xsYPIpUt7e3dXR0pGvXrhXS+ZJS0Ij+NhqNtGzZ00gEtwSasB71el1f+cpX\nCr7OXwXgfVCtVtOKUPqY4NHbDyBxkOMAxD9jTDwQ8DQSx8Sx8jovr31cJWuvObms+ASWzqNjqfge\nAf7ne1dEvvPqZ2m5ix4GxH/8etXq8i2a7hwZTF8JxL29AHc0GiUFJU/INsgc78W6FGxVq1U9fPgw\nnVOr1Qq7DfqKCPoKpZ9MJundPc7gnJyc6KWXXtKNGze0WCxXUrix98nMe3RwLkx2B4kOXGKajGM8\nokIwJvQV+9JgaLyv+Zxr+9heFYnMHvLnASurQMeqlIOfdxG48b99nKTiC+58fjgFj0Nx1gFwzByD\nIcSQslvmfD5PdSc+rr5TbtRFj6K5J22nLVDisKGRoUA8beTR/GWkrP+ik/Bcvh8bGa0I+K4a6C4D\nSB4suK2M33GuB3vSsnCeMfaaDw8U6UPArL9Kg3v5+EXWfDqdpqCPdna73WSjfFsIGAq/frPZTMzJ\n0dGRut1ugUXMskxbW1uJbcnzs4JRtq5Hr7HnnItuVqvVVEDO/XkdCiuBTk9PC6uE6J+oY8wNB1je\n3+6vmMdS0UZ7n8eUEcEqNoF7SCqAHnQ7gkjaGEFSlLWmdZ4k0ZnxmS9L5TOUyhEgiNdzaxxPkarn\np924cLwbLYwyaJQIbTabpSVdZR3uu6JS40I6qVarpRQOk4Tc4mQySe9zkM62t+cNlr46idoWVzzS\nQJXK2S6I/X4/0aCAB87p9/uFCNwnNhMLNobPcABc0wuLnamJURH3ROgHJhy/OY7zPFqJxtrH/ipI\nZCSepO8XgZVVTvMiQOMGK9K1q9icVeLREzVKDgYd9PiSxvl8nhg+38tHUgF0Y8SZp1mWFXLavtTU\nGQV36g6YYEwAUkTDHkFf5vk/Lrvl4NCDk7LUtB/jjCO/rwooQdxRrZprZdF2DBhdB9FTZ8M4x1N4\neZ6nFAVsHPrAvTy9g+33PVHYdZv3NO3t7Wk8Hms2myXWDp1DCLZYgXNycpKCRWqqnMVGz2u1WtrX\nylND/loHBxBcA+Z6OBymYI1AFtDCPiv4ntgXzvhwXR877ycPNPhfUiEdy+cOQtyX0E8OLl2HncXx\nY580v64cc+LG2HONvrTQaSgGzjvfd9tDKdyQSUpvNN7a2kosCPd3IEM7UEoUj/zlaDTSZDLRw4cP\nU1vYbEdabjmM4Cyazaa2t7dTzcbOzk6qQwGhc42jo6N0HRArUaFTZIAVBx9Q8bQ7bnglLTezI/2E\n0A++PDv++CSICk/7vT+YbL6XBeMYqWyPhnmmVVHRVTPkZRNv1YS8iFlZlapxwy0VV6gxHpHGXgWW\nIgDieg5AB4NBum+lsqzLiECM/+fzedoskPomSYWiPwclTnsTSeJUInihnZ7W4xlhYnguj8ovAxbj\n80Qm6knfl6Vo4jh6P3LN2P84m48DkL5X4kyGVEyroytebO/Pgi4CXklre2TPGNI3/uwU4XPt8Xhc\nGNfI3viSWuwX86VWq6UFBzAdXA/QzDVhPL773e/qww8/TGDjpZdeSky7gwdPYfhnMB29Xk+DwSC9\nc8fZM3SAII2XosKMw6awGg4d8XmIX4S98RoQZ+s8PeO2nLZEW+DpVOw540nw7eO4WCw0Ho+TL2Y8\n8SOM50VyJcCJd4Qr+3g8LtQsRNoe6l9aTgBf5hqjO67Npmg46Nu3b6fB9UIg2sObd8fjsdrtdmFn\nTElpTTsG1NMZPpnzPFe/30+vrH7mmWeS4X/77bfPpVycBfJ184AIPtva2kpLKh2hSkuAxhIz6gAA\nB61WS71eL1GcPgYxveVOg+MYu0jX8n1kNHgmLzhD6HtnsRjDVqulra2tc3Qw4814XgVZ5VAu62i8\nT70g8iLWJEayGC4MEQ47nkv/lZ3rKbSYI+Y6vuwR4+VV/b4HD0YaHZpMJmq324Xoy1NH/rlfP+qc\nz3mYRNrBSjwPUlYBvstI7PdV59Ie/i7r5+iEow0k8Lgqeh2BWaTpoftd7zgPR+3Le+N4eGTO9+5g\n2SyzVqvpvffeS9fzY/mbtDN2L8/zlKpZLJZbsnc6neQHsEfs6IrOHB8f6+7du2m7hfv37+uP//iP\n9dprr6ler6etF2ifB7j1ej0x5dvb26mehQ0LY+EpOsGqzqOjI7Xb7UI9yuHhYSpQBwhhDwGFnoql\nb5k3/rcHmnGOeHG6v7xzFUiNjDhz0Oc03w2HwwQIV8nawcmqaJJNa3BMdAAdSt7R99mIOXBfT01n\nYhhx0MPhUL1eLympOwOuNR6Pk5ITAbLja7VaTecDlDzSY2ICiNrttp555pn0Tp7r16/r2rVrmkwm\n+s53vpNQLytnWKNOesXz5+QiDw8PUx945MizMglQVq4Jc0TKJ8/zlPd0gOMvIkRZMUDObnku08Gg\nVCzQ4v84/u6AmQyMG5PQoxPuzTLuq2TEPyoQcXEn7RGNf+8GpUwwNN4nGPoYqbkzdEeBzsQVI+ga\nbCJtxOiwm+y7776rF198UdevX9f+/n7STSJBzvWi2Pl8rna7XajLQhecKfE5DdChpspTIjgqvwfP\n5IbYDbvLZYCLsyM+Vt6fMU3jUWaZHjgI59UA6xR3XogDAkCD70Lq5zmblWVZYkH4H/DiG08ylgBc\nD1xY+cJxpLG9MJ9xrVQqaZ8plu26ffO+JcVMbcWNGzf01ltv6fbt23rjjTf03HPP6eHDh+r3+3r4\n8KF6vV4BRJ2cnCQQ5Kzlzs6OHj16lEAMthpdRtc5j+fZ3d1N/cfxb775pnZ3dzUYDNTtdlMQQNvp\nS57d7UkEc4xfzD4wPtw7BkfYBNh8AmBshjOoi8UilSLgQ1khe5GdvBLgRDpPZUMVoZDecZ5KIOXi\nUT7OHHCQZVlSQOiuo6Oj9NnBwUFqA/uXgFIlJVTtkSPpHKebmSgOFKTlAKE8eZ7r1q1bunv3rp5+\n+mm9++67eu655/T2228n2hBFopaFNtBPtVpNn/nMZ3R6eqqDg4OU+sG4O4iAYaIynPbwHhNyo3me\nazAYaD4/2z3R33SJcXVQUubcpCUDwuf0mdfxOCDxFRmkthh3vvNxB7iyz8H+/r76/X4hPbdOicVf\nUXj+mMZySjTWPMVjPOLw6BqDzrWjM3FnKZ1n9kif8T15dahmZ1PcKANWtra2dHx8rKefflrb29sa\nDodpGSdOixefsVkVhpPrEyT0er1CatB1340gTs2fqdFopP0hKGCMYxMNLp99FHAZAUYZ4PAl2D5n\nvJ8d4DN+6MBwOLwwwvwkxB0tvyOY4ofniCCS60hnzs8jZ9e76Fh5fk/N+EaRnEtRtYMeBzr4Ed+b\nBL0m6CMAbLVa2tvbS6vLHjx4oPv376clvF/60pf0W7/1W3rxxRcLgB8g7Xat2Wzq61//evIXtVpN\nO7tsnBgAACAASURBVDs7unbtWkGXnfEg6OY53WYSrPtKTEmp/ZGlcxbSg3fsjAeWUnFDRw9IPbD0\nNB1tYMsBSeeuSWDMpnHb29sp6F0lawEnh4eH6Y29TmlJZw9OjhADy+C4USbXhXKhfOTDYDF8tUee\n50nx/B6wGjgG1q+DtGkbqN5Xk3hBZ7PZTO1i0pAWIl+5u7uryWSip556Srdu3VKe5/rGN76hV199\nNTEhGEicsG9RvL29reeee05PPfVU2iL84cOHOjg40DvvvJNYGi9clJQAGn06nU7V6XS0s7OT6l7o\nP69TcSqdAkP6pSzKZMJzz0qlkgxGs9lMfQQAZCKgtFyHQjUHOCcnJ2l1Eud6cTFb+q9bYgSN4Mil\n8++J8sjaDQmfx4nu7BjAZLFYLjnEgMT8sUf50SHyN4YLA+ibSmFAY5tY4k4kR8rxxRdf1B//8R/r\n+eefT5sOOqMYgRjOodfr6eTkRKPRSLdu3Upvo+W5HESg4x7FM0/j/j7+3FzPo2g+9/SE9xvnlElZ\nH0tKqV7GyW2e972L2x2PRNclOzs7CWhGUIJN9b6LKSv6nufyMWDuR+BAWkY606+jo6MEWEl90AYi\nfVgWZ3F9/B1E4VjRl8VikcCyAxnY2Z2dnWSb3n777bQNA/bLyxB4Zkn65je/qdPTU/3ET/yE/t7f\n+3u6detW6qNnn322MPeY217/B0hpNBr60z/907QCk5dy0q+k1bgOwV2/30/z2Df29BU39LkXtLo+\ne/qH7/GdtN0XQdAmlj870Gb7C16HskrWAk4Gg4FOTk4K0bm0ZExgC6SlU3KEiZMEnKCY5Jw5l85j\nkOmkavVsKTDvVvCqbpBirPp3qtKdPk7CC0zpcAykU8kffvihXnjhBd27d09PPfWUsizTF7/4xULa\nhAnJczJxms2mrl27phdffFH37t3TM888oxs3bqT2fPDBBwkY0RaPEpm49OnNmzd148aNtINhr9cr\nMBNeGAZoIZ3FVsxx1ZKnxqjNcYbLAYhTmp6qAhy6k6xWq0kvYIFY6QRrRL+tUzzyLUvXSOdXmrhE\nx8k1/douXksiKekyRgrn65FPZAn8mk6FM6e4j7fHDSnXbrfbevrpp3Xnzh3t7e2pWj1b+s7qCF+m\n7sCHe3o+/s0331Sj0UhzBeDuTtpXAEUn79vnOxPhgM/ZO4yug3f6M/a5j0MZAI2sGHOOKNiP9fnp\nTjWCkcsyOd8rabVaqtfrOj4+PrdZI4GMs9e+ygqnzzi7HvEdNsOvQf/AbDiwdSAdGXW+YwxwwlKx\n9kJa6rUDAvTy6OhI/X5fr7zyij744AM999xzOjk50Y0bN3Tnzh299tprGo1GhTe5+7Pmea7Dw0O9\n++67unnzpn7zN39TjUZDOzs7SQ+w8bQNn0NJAv6KawLO8Ce+pBow4i+dBbQA5lh15N87MHFGPLKo\n+AY+A0iyRQD3935mXBgTFkPwve+iHmUt4GQ4HCZFu379emHpFvtoOI3ra8Sl5YubcIiSEhrDsXGc\nKz3Rvnc+CuBK71G9O1sMPwrBdWLxEeyJ5y4p5vzggw/U7/d1eHiowWCQHOvt27fTLrXxWaWzAe71\nenr55Zc1nU71kz/5k6pUKnrnnXdS5TdKAENBPzJhvU30zXA41OnpaaK9URb2MolAkLTLeDxOb+ck\n5+n7VDDJPDVE7Q59DPihXUQibDaH4/IIObIJTAqe6SpJWR2B051OkyORSXSJDiqCFa4VDUMZMFnV\nVj/ewQ3O2hkEDBXOd3t7W2+88Yb29vaSsWcDtP39/TRHfG5xLwBvv9/XvXv31Gg09Nxzz+n+/ftp\njwneiuz9ik4Q1TGXmQ8+F2OqzOt6Yt+UgZgI5Lw/0EdPffnqOMCX0/dRygCPB2XrFBwPAaTXMuFU\nHRB4wEefABKwCW5b6ZcsyxKIc9tdqVTSHh9E5d6HjD22wPXfN5ZE//J8udmls/bOumVZpsPDQ3U6\nHe3u7qZnPzk50ec+97kUMHmNIcEVYOrevXtpB1l05Pnnn9e3v/1tzefztPs2/s7BBH3sATzXp68J\nADkX2x/HzmsVve98paf7Bc6hPV73A6gA/JOF4NrOurhvl4rbgpQFWy5rASdEuFmWqdfrJVQuLfNm\nvkTYKWyUeDKZJOcLymTSewEP/7M9MMp9eHhYeHcLRs4jQo9ypOWyYlJADDaoVVoCKUfznh7Jskxv\nvPGG+v2+jo+PU3+88847kpTe7grFTd1JvX72cqi9vb20E+3W1pb+6I/+SC+++GIaZIpcSZ94eqTZ\nbOro6KhQIU50QF9KS+p8PB4XwAypJgwudTA4Mq8bQNygwWp5HRB7A/AzHo/V6/VS9EKbvAiLZ/XU\n0kepE/ikJLbJUxBScQlvrFPx1BDXKQM6fh83Ur40153mqjQGOhvb68CbucP/njqp1c5eJb+zs5Mi\nOgD/cDgs1ELhgHz+MEcBQq1WS7u7u3r48GEynnEHWq878P7yfo4MqEfmHj171B6Bg+t0HBPEU20I\nKVDf6BHH6mwWoCfeAwdVdu1PWtxGEpljRxz0uZ65TSA1ApChaDSu2PCUEA6+Xq/r2rVr6V4OSLEt\n3D/+po85F7uCA+Y47DTPStuuX7+u8Xic3meDHt69ezc9J2CEZ6fG6tGjR9rd3dU777yja9eupdTP\n7//+7+uFF17QYnFW18hqS1+4gH7GeU4hOS+ZrVQqhb+jfkYGhGt5RoD54+DOQYikQhDCcxIQYMPd\nfpWlr2P9EZ+tkrWAE2coACpQtDj9GEWQi3bDyOez2UyPHj1KqM/BCdfFiI1Go7SMyZXcgQmgyGlt\nlBCA4imjWENB2gRFc6qc9elHR0c6ODhIwGNnZyetNvDVOkQagK8HDx6o3W7r/fff1+3bt/XjP/7j\nev/991NOdjAYKMuyVOXtyyo9lZJlZ8W/g8EgLYEjvQMAo/LcJwoK6n0DuHQAwSTFMPlvV243OIwr\n+wAgtN0NeJk+rTsvLxXTaDEi98/8WSJ7EqlnruHGir50sOZO39sirY5SympfPLWAcfJnwYFznLMD\nfo0sy9JSSNKCFHsDdKncr1arOjo60tNPP63xeKxGo6G7d++eCy78uZi3MYVGP3hhtqcSor5EnXIw\nEMeJzznfr+MADhDiu9cSnfLcTplHBsb70VMX6xJAgDtx1ynG3qNi9NidEscDcHwlJud7bU6tVtPe\n3l4BTDrA9nbxP/3lNgv7AsPR6/VSG7HTjI8X2mIfSdfgY2Dy+v1+quPwFDXB3s7Ojj772c9qMBjo\n+eefV7VaTanoz3/+8ymI4EdSAvn0G5/DzMCOHx8fazabpTovfAdsPMDcwTf+JAbf2GGvtyGAdH/o\nLAzj5sCFQILxYX6UpSk9iC+TtYATBh7QgEL6qg06F4X0CIKHAmGChH0vD2db6CAKX0kr8T3O1pc6\n+YRxx8jkigW7pKN8MnB9KN1a7WzXQFYKsawMNAy74bsK8pykTv7gD/5AL7/8csrns4x4f38/7emw\nWJwVbXU6nbSMMoItwANMyt7eXlIi9nSRzvZQ2dnZKeRDmeBEyETFHhECcHxnV4CcGxlfukzbvDYF\nJ8bfkd5m/KKDX5d4e5yVkFYXyl4kXkgZNz6L1/I0hTs39Bew4QyUzxXO8+8jEHHn4REqlfcYSKem\nqYHxqJe0j9PxN2/e1Lvvvps2BORzVv0Afn0Vnb9i3qM5jwi9H2BmGCdSiHGJpfdtGauBrAI6ztT4\nMQ5guJ+nbiNIiX+vSzwFE/eNwcZKSzbTwYY7X5/XOEoK5r0ImFUcDnqd8fDI3NORzjL6XPE5OJ/P\nUyCGnjrDg83BrhFwYl99bjx48EC7u7vpGjwLYz+fz/WZz3xG+/v7mk6XL4P9whe+oDzPE8AAiNE/\nvAzQnw2bzbt4YJmcjeRZASj+PePEPRxs4acoMuY85pzbfknn5rKPNWPp9i8GWj4/VslawInvnueG\nxYFJGXXqA056wEEOSND3+kCR3aFyHQAAhXh+X2lJZflqmZjPzLIsGVNpSdsSFUhKSNcpN/ZlQMko\nZKI/eDZPZQEa/uRP/kSdTke3bt1KTMP9+/dTbQsRG+DAl4L69UHGrIrgGTDcgDloWJxFrMZnHKEp\nuf7JyUkyAgAsj064B32M+OR3xF3GZhHZew58neJRfFma6UnAxIEt/eJOCpr6IpqfsfHUgVO1fkx0\nxDggDwrQZ8CGM13OdFYqZzsmV6vVtJLAa46cSYyMB/rNNuLsA1SpVBIAB+TTfoCGR2fohzNN6BI1\nAqy6q1arhSJqgIKPW4zI0X2eqQxExH6WlrUOOFokAs1o5CMYX5f4/MLmOiNEu2PahWfF/knLmkEc\nPfVxbu+d0ZBUGNcY+EnLlEEMSr0WBpBB+3G0jCO+AYaClYGVytmGlzdv3iwEWKPRSN1uN6UrmXOw\n4YzjdDrV7u6uJKXNMg8PD5MP8PS2z1X6LabK8jxPzA/PDcPTaDS0vb2d7uX1MzwrYzifzwt9IC33\nBcLfuT1wEoH/GUfGlWs7uPG6k2q1mlhUGJpVsva3ErtSoFxMBI8u3EjCVETamS3pSWc4clxVVBYN\ns1NdKLFHrtwrFvr48lnAENFfrKEhHzkYDJTnZxXdvV5P0nLrfSYTO2iiYL4Py3vvvZeQ9M2bN5Mj\nkpZFuR6NlTl3BxEYhF6vlybmaDTS/fv3tb29XQAobiAc2HlkSyrMDZFTrCg3zoPr+P4X3tdetEVf\nY9x4tqsi0WHFtNSTGJRI9/s5MZ0jFd9p4oYsRiZlxsANl+sKgrF0YOtACbbOHQIggLdYR8AFa+JR\nlxt01wFfAk/huzMS3NeBHM8VozYMa+w/N7re/3EMo5P2Yz1S538PqHxO+nX529Mg9DMOZN3g22l6\nxt37SyruCM24eMqEMaNPsHXoj5/vTpPxhpGDVYhMatRtricVC5xdfxCCOWfZ8SXT6VR7e3uJvWZM\nFotFes8PO2wDZggQfT76LtaAVH+1gz87eoHNi6k9mBJqFKln4d6wW9hfn9c8H3PV3xbOPKxUKikQ\n8HStM1OrBEDjY4feeLqU41bJWsAJnc2EhGrG6WI8fJJiDCWd25QMFO+Rm6cPnC3BAHA/irToLCYV\n9wdBOoKXllXgEZHSdugx2tjr9dK1BoOBjo+PE8KG/bh586Y6nY4ePnxYOBZAEukzwA8Tl36JhhUA\ngALGaJBCVO/b7e1t7e3t6eDgQPP5PBXHQoU6TY0TAdy5UXDAhxFytgxWx8GJR0keaXp7eX6nES9C\n4Z+UuBFELprIUSJbJJ1/s+0qwOKfScs3iK5ybPRbDAa4Btf2JbBu5AAtrEIBqABG/T1P0PEYdliQ\nXq9XqE3ytIu03IAMBsaZEGeDPKqkfRzvDp95QHvd4HqUGMfRDaz3v38fI0dnr6Rlca6PR9QNT/M4\nUFo3g+JpLwcW0vll3P4bJ+6BCc/o6S4Hv4wNfcr40gb00VkWqViXJRXfVs33UnG3agIb9JR24qBP\nT0+1t7eXtlngOo1GQ7dv39brr7+u/f39VPB6dHSkGzduqFo9qy1xANrpdDQYDBJL7bUd9BUCs+CB\nM0wxesk2DrVaLe1Bc3BwUNjw0H0b+o3v9D5xPeWa3u/O6tCvzD/muzM+HhhH9tv16coxJzjLSL/i\nnGIEIhVrD1xhMWieciCSosNw7K6UODnqPpgYbiS5L2jWQZTTrjAbOHDOZ4DzPE9AZDAY6IMPPijs\nepjnuR49eqR79+7ps5/9rCQlULK9va379++nTa24P2iZ/SN8Z77FYlmvg1NwFoq+caBANIRytVqt\ntEmb061MEKdtuT6GiB8fR0CTT1ZJ54wubfA+lnTu7zIHcVVkVdtWRQn+bBGARKcbrxmdm/c5feuG\nJ7anjKGJ90GnAKZHR0ep4I8NnlgOyXujYCJ4Jwg7DwOo0V/moVR8W3GsL0BnKVb06BtjylxyGpr/\neX6Oc3DtDnMVEPC8vDNXXM/ZFe8/Z8oceDqA9PtFVsyPW6fEqJ05T6rWAa7bRbcdnBtBh7O5UjHy\nduZsNjtb/dRqtXR0dFQAks6GeK1RTEtij2EPSN/Qxvl8+cJKnmkwGBRqMQiMRqORXnvtNb3xxhtJ\n927cuKFWq5U2t/SxJRUEsOd+tVotMYX0dSyYxm85AHAbycaLgBQfg8iWOwHA3OZ/dNnvKS2DEfoQ\n8OH2nftFW+L1pD7O3HuVrAWc+EYukRKlvsNzmihZZFK8M1FAf5WzU74cB9BAIdjVj9xejJZQIpCu\nTzofePLsvkS51Wqp1WppNBoltOr7L6BsTJZ3331X9Xo9FVgdHBwoz8+KpmiT/87zs132vEAQEMVq\nIZ4dytInoeeH3TjPZrNEx+OQMA5EgyihOwJfigzoi5PAxwaaDxTN94A6Z4uYuF6z4LR9BD1XRdAh\nf2Y+p71OPUcmQDq/gsMlRj5+fcakbHUF142fex9iqGOhYLVaTcvP2cyvWq3q3r176vf7Ojo6SjrJ\nPjq9Xi+9UG2xWKQ3rjLGtNXtwNbWVnp7baz5oo+IfD2v7oAOI+0sCc/moA1b43Pe+8aPpY9wys6y\nuPGNQMZtEp9H4FlWa8GcW6fQZrd/6IKDQ0kFQMnYOVsCoAF0RLbUI3ie28fJAUbZnOA8Uv9ScYVX\ns9ksFEKz1B0fwsaBW1tbGg6HevbZZzWdTgsgxdP0165d0/7+frKXnU4nvRIFPwbIcp8Vi2djKpvg\n2hkKntltB8ujfUECfYY98eDE2RrYfx8vjvc+4/oRfNBeBzi0i/s7K+ZgO86XKGtjTtyxz+fzQtW2\nVESKjpy9aphruJOD6pJUMEpc2wfG84uei445tYg2uS9g4OHDh2kSOfU4mUzU7XZ169YtvfHGG2mv\nEkn6zGc+k9iRbrebjPC9e/fS1sS1Wi1tXuXbM/tkd+fjL5Ki3UwI0jYYB/o3rohiZcVsNkvUom+l\n7JMD5XblZFkdq3hwUp4C86JZ71s3Sp739PF0AyktAVkcs3VJdPKuz25M/bhoQCNo4btV0bl0nh5l\nbDnGjRbihsIpeNJ3vlcJ+XY3thRwP3z4UJK0t7en09NTdTqdlDqpVCppTpIfJ5XnYBrwyfPgPHyT\nLEmJCseAx1oOB9/SMmdf1keMh3/nDKC3hz6NEagzI+hxGfsbGZCY3vEINdbo+NxYl7hDon/c4fDs\n/I/d9a0MfBx4Lq8n4hxsvbPP9CkLCLyuzZ2wdH7HX28XAIAl7thdtm9gkUKj0dD+/r4k6dGjR6rX\n6wmQE6xRb+JgzVffUKOHTtRqNe3u7iaGnT4YjUYFfwg75G33ue86689JcI5fYe54Gh4g7zrqdjwC\nbrdBTg5gbxkzacnqe8rG/XScKzzXKlkLOHGnifHDwdCB0hJ9oYBS8eVkXiCJkSK941E4g+ab9XgE\nJamwBS/3ccVgUBlY8ovNZjNVXAOQ+Pv09FT7+/vq9Xp6/vnndXh4qHq9rldffVX3798vOKlr166d\nc2i8kA/Q4I45y7JElbfb7aSATFwHH4eHhwVFZsLwjBgC/uYdNlSTO4JfLBZpySh9xfcYLU/heH7S\niyDdALtyM76wWTESiCk9ZxWugpSxHU5rXnRsBB9lkWFkVFaJ1zfECB7x6IbrAVIYY/TGgTuAFkYE\nVnBrayuttpGW0VG1Wk10tjMisB6ka6Ix9JUKDl4crMb+jfrggYszbG4cI5CINTrOfnkEHEEmx0WQ\nHO/BuT4HYnqHc5wRWqe4vZCWO8A6ve/94cDP0+jOlKJTETjH1IuDE2dfsCWeUpCKK85YUFCr1dLL\nWre3twsBEuc1Gg0dHx+rXq8X3gR9+/Ztvf/++3r06FEBCACw0V1W4jA/RqNRWvlCehMg5PtgbW1t\nJRYSVsmDLfeX0tJ/YbO5Hz6xWq0WXtsCWxkzD9gH37CR8cOHon8UIMcgC10ASPKeOXwRUsYyogOr\nZG37nPiD4vCgkF25XBkciNAhoFhp2bFujJwa9E3c3MgNh8M0CHQqiukGg1VC0lmn7uzsFKqc+Z3n\nedrpFGaFHQ7v3r2rra0tLRaLtMxXUlJaUkHsporx95c8eRTi+wGs6ueYSqDGhueNNCAgBYDlG+/4\ne3C8z1E6JqOn7ubzeWJAIgXuQJRaBCYxoIu2eWqOcUFHrgpzQt+5I+SZ6cMYNayKJDwalIqbfEXx\nz9xISyqMb2QSYrqC6JT5546BOUg+3PPTpBiHw6GGw2GqLYFBAWiiP6Q7+S0pGVfvv/F4rH6/n95h\ngpPziG1VSoZ2OTChH/jfdTCCOAdlZaxIHDdnbvyzOM5x/CJb48/gAGyd4u8gw+ETwDAenl5jLB18\n8JuUr1TcFM/HCGE+sacN4JAxdb30FJCDKXSM4+PeSp1OJ62aIbj88MMPtbu7q62tLX3ta1/TrVu3\n0rmw4pJSMMg29Owe3u1203j7bsjUB2IXqtWqhsNh6jMWaNB+T/14qs9tOyDX57mkBOzcVjtjg067\njvoSXx8Tgvsy4M1LCambgc1nVV7Ue59zF8lawInvM+Bpm0ajUaie9sLLsvfZkKP2yCvStC44CC+U\nhZr2NeG+PTMKxKQAuaPcPMvJyUnaVZUUFe8LoYBrPB7rxo0b6e2tktJeEN1uN+Xnh8NhMug7Ozs6\nPj5OeVFpiWpB+L7k0tNhtElaGnAmOIjb2QsUDwTvyl6v19XpdAoOwDcwos/5ATgywbiH9yPiY0VE\nTr97FMwKEHcEPsGugrgRZkLzf5zYZcyJS3SAq6Jov04EM4ALqbgMNBoymDf0JdKwgHbmXHzeVqul\nDz74IL1SHjDqG3b5VvPT6TSlf3ye43iYn/V6PRXPkmZicyrmIYYvpgf4G/EAIj6jU/8OTOh3ZwEc\nlHlfenTv4xL7mZo0HIuDHwf+Mehap9C/9A1stKf+vL0cTx9iE7Ar9JWkBGCRCMqjA/UFAdjCyWRy\nLk1HX1IDRSBHaob9R3hJJMCh1WolJvvOnTt69tlnNZ/PdXh4mBgVxo3/YZjRDYI66Txopm0wLfg8\nNs2MfYg9Rkcig0UfMReYK/Szv0gWYOh22hkVabnSxus8AdBxPJ3d8nHDtwBY6Qc+KwPpUdYCTqDZ\nJKVcHyCFKB2D6KkWAIhU3OwKB+YdhtGMBt1pas7tdrvKsixVOns1P9dhMvlyL2n5ojxPGfkSNopT\nJWl7ezvlOgEH0OC+pT4reaSzHVo93eXGAWPPM/veJExgULojXGdImDhcH6ViddBkMkm5WIq04hj6\nWABkHGUTRftEig6AZ4IxidG8O3oU2yNZANdVkRgtSuep+yhlKZwYua86DvE+8VQEuhxpWfTMUznR\n2fr3jJFveLhYLNKS806nk/LdUnEHVc5lHCeTSdqBk/s5a0nE65/hAHwjQHQJ48lz+1x3wwlL4cDN\nwa6ncbxvPUjiHHeiDubKxgZg7sytAypnspxxuwriAQK1dLQTe1nGbkvFeitsss8NZ6h8IUQMeqRi\nioi0s9sQrjObzdLeH6xo9IAzyzJ1u109ePBAe3t7qlQq6R06bg9feeWVtIs2tXgAHXS92WwmsEKK\npl6vF5hfdM7rIt1e8a4hxNlhDyI9+HQd53t8J+3xNwAzv7wfuY6ngRijWMTq7XW2DIbTx9uZc/df\nzNPoh8tkbe/WibQpoABE7lQW7ABRB+I5LxQZWhjUCPJkwJzKRcG9AIlIgJUmGHTPlznNCqhibxBW\n6zDwi8WiMNC0FWfNJGYwKVyl2Orhw4fpGWBjuHae5+lzvx4/kbFwSjwiWJTIlYX7zefz1E84E4/W\nPYqEzkVR6TeiXQdGPqG4BnsDuPPwqAFGiWdkEse6gXUJho+2EaWUAQOpCABo/yomxK/v/3Ms14hR\nKDpW5ugwth7N+zWYCzgg3/PEi/Yo4KZNkWL3dgJoJKW5yT1dt5zFZM44W+JGjzw+OuL9A1PjdW2S\nClF8ZEK8HoR2O5DzovR4jKfDfIz82Jju4V5loOgqiI8Pv9FlZ00iw+ERt4N0T62hz85QY68BzFyH\nH+wS4IAxZgxarZYGg0EK7tBdauV4v40Hj9h1d8osB+Y3vsB1GBsPoOR1Hw64uJ7Pf+pQXNccIPtq\nJvc9jIEHKwjLo6lpwUc4+0eb+d83yvQNNj3odFvk4A0ggg13sOl2niADPbiMfq91h1iPpjwv5XlE\nR7t0nDtTouroYOkofz03n3M9nL60fK00gMVTFgy4p3VgGRaLRTKwvmLA8+DVajVRbTgBR6m+Ex/f\nocikijAAfn8iOc7BaDD4kcZG+RAHiLSZ+9Bfk8kk9QfGn70saI9PHNrRbDYLbxyFvvT35nik6Xl8\n3/DLz3fnTQTtOdSrIPSDRxXO8pQdy3HovQMN71eu5UbGgbK0HGscLN+5YXO2yYtmnVlwY0X0NZ/P\nNRgMUlQGeJWUisK97RhmB2c+T7y93g/+bMxXH3//m2LY0WhUqC1xI++6Tduk4kZRzsgSLETG1QOT\nuKLHGVVPzfhY8T8GOqaEfJykYsooXueTllj7h02i/7yo3leHEPU7uyGp8L4lAlNsrgNP9MVX7bju\nuG/w8SY9E+0S7aDGER3FefIc6CO6AJvr15OKWxugq7GepowlcJCBfnuQQiDpab+YHo6AnTQ/fQNo\ngBViVZBvN+/Pw/3cV8KEO1sjLQNKxsrnegw0fK56DSHtXKlzH19dP754wV2WZYnmipMRxZSWyL0s\nQqZjSItADeK8iJDcOLkT8S2Hm81mSrG4gfFIwRkRZx74n8EkDeWokgGMlcuR+WEQXRH9f8Rpc56R\n6Nap1LhduAM5Z1K4FudT4EU/kB/mfUKeanOnkGXFF1kxfl5T5BEWz04/k7LCAMUlpYwN11238Ubc\nkESn5SmasvZG50xfer846EXnoqDzzB1pmTLhezc+buydHWCZZdQbfmq1mvr9fgGUuPPl/1V95IDa\nv3M99M/cGdIGdBDw7xGbp3hoB/dCP7FF3g5/YWUE3/7b03T8HR1QZFcisKKdPiel4ovVvN3raEGS\npgAAIABJREFUEuYfOoWjY+7hFLFxMaXIc7vN8fPdPsLUuf1H1+NCBwIplu3CqMR30jhw4l7MHfaK\n4jgKZrvdbqon9OJenyN5nqe0oxfs4icc8MegDNvmPs1ZE0mpXoQ5SDtInTqL5Wwec6GMnfZaEvqF\na/OsnONMErruQNPtugP7arV6Ljhl7Dz4vojtXtsmbBhQImqMDBMZA+FpgIi+fVAADCBmIj6AB6DF\nIylyhp4CYrAoZGXwcdbSkipHwZm4IFSWXcEuOP3pyuD35TiUAQVjsnqE6cyPVKTOPZJ2cFWpVAoT\nFmTNMzu4AszQZnKqCCkj+seNsOeJiaAcvHj6KU5OnsmL2zyCANjE/iPHexUAikfgGBFfpRP7wh2c\ngxY3ZO6YHKwg8RjG1j/HaDImGChPM3r6h4jJ2+eGEPBf1k7GmPb6/PXjI0j36MxXDXCver2eVo24\nrrdarcTuuX57ZB3bGZkVdC8CyAgm/VoYbx8bd8gRUHgwJC1XEjEHGLtVY79OcWYPXUJfcHxeS8PY\n8DzuyLxQlN9uw6NDo3+4L6llUjXMCZhexJkIB03YUk89S8tdrLkvb8L2FSfoFHtKAQDKarK8VlIq\nroZ0fVsF5F2/ou9zJsn/pw95fxvPCHhykIjQj/ge/57r8lzYA/wm53JdHyufzx50xA1IV8naCmLd\nueR5nhz6YrFIqNEpTwwSkmVZWmeP0vmD+rIwruP0oOfGJKVrcV+Mna8SKGMayiIbb6+DBaedI2vi\nLIykhEZjJbz3gVNmOATahIPneXgmz5W6o/eoBkq7Uqmk1RL9fr/w9s08z1OKhvMYPxQYw4XS4yic\nuYpLAgeDQRpfn/yc646ZtkoqGPh1igM1npHCT09XuQNy9sJZCeYGn3sfl9GhTqdKq2sjPJqi3zyF\nKp1/10aZvsc2u+PiHjgPJBpH12HqB4h8eU5nZnxHWHSGueoAnWvHokwXn2+AaS+gj88YgYrn8f0Z\nGSPvQ9oQmRhnsSLLEHVlnRJTVWyoh91y5xMdkqTCSkJ0xAE0feHpIbf5zt5Jy8JQCmIJUNyeuZ9B\nfBWig3P+Z+yxvXxPigib5n4gjqvrBXMqgk/XG19aTTvoczZDlIqr75xB4rquv6RiB4PBOcaDWjHv\na+y2tFzN5Asg0EkCU56XdlDX4sGur4p1RpB+ATCukrUtJfbohY4hx+1gwzsFhfV8l0d50nJDGAwc\ngpIy4DG/V6vVNBgMCq95j4Yalod2OdvAPaRiLUe8B4ZVWg44QMKRsCNyd9DOyHBPJgvHMbH8PlzX\nt12ODs8/Y4L5tsVUmvNMTh/6TpCMQzQuDv5wJEQlzgShA05h8uxMTO9n2uxGaF0SGQSPNBy4SOVv\nUeUa8Vox4nc63A2Ui4N/9IiIDgDh0Zm329uBOEvAvQFA7jSkJSiBAfHAwJ895viZNwASoiyKILk3\nx0Clk250IOXsm7fN54Pbm1g0uEqnIliIgQnX8HP9uh69u464M4y6sm7g7eOEbQag+J5Q7vwdHAOc\neU7pvL3kPogHMP45fzsTjaDPnlrxrd1h5Pk/6hU/vgmhrxQ8OTnR9vZ2wT5LZ2kuavF8rLmH2zi3\nafV6Xd1uV4PBIPlFr3dy4O9MC3rt98MudDqdtGqIOj8YqzhPOY/+5B7uv7DVkgrZDQelPA/j6syS\nB7xcz9N1q2Qt4ITB9j0TcD7tdjtF6OwqGY2Eb5kund811iMPp3opPHUWxB0dvwEwoMR6vZ4oMe5N\nu9iYDeOGOHCJkZgrlztekCfRo2+6xoTjuVFEEDxGnEI1p1g5hr5zytqNdyw8zPOzzeQePnyoTqeT\nHAHH0g6MPffxqJjP3KA46IvgSzqLWlBu0mZs1OW0phv46EjXJZHWjnSrR/YxdSAVNyh0NjBOYhya\nj188zh1bo9FI+yq4XnhaLTpWxo/reg0Q18Wh8wzM6bgB0//X3rsst5UkWbsLAEmRBAHwJqWUWdWV\n1tWTfv8n6KfoQVn34M+bUuIFd1IkAfwDnC/2t0PMLLNj5xQ0QJjJKInA3nHxcF++3MPDFSedP2SA\nZS/aawrz1O/3SxzbwGiz2bRqqyRt4MdaeK5gpkxPO/ZfP8PsksHFH7ErfrcdjtqLtJPh9ah13q6B\nNwYeNhp9C8NVMyteU+bfOjtpwLMBODqM37umTdIYa2TIR2UpZlY7XDyXd3gPAhD4LH848eIj8bDZ\nDlkQ0mB+fHmlHVL67nckKaXsOfbc7XZbzqVrkliOXhunGWhOoM5ms9Lvbre5kbjeJ3X4iDVljMwX\nJIDzfBySS1JspfNvWCPbSDM/r7WdaHQvMoM3q1CfisEzB3ERTsAAsxHIjUAQ2TAUtzGFZMPJBJ2d\nnZX8l36/Xyoh2vMyg2H2wErKeSwgYRKr8PD4ztPTU6mySAljPkfNEo4L2xOmP4yXOeBGTeaUeaDP\nDw8PBRB5bGYzrGxfXl5KP3w6iWqIZk34bg06WHMSz9wvmBfmAaXHJgNUoqyRAZR4zYLtutVMRu35\nWhnz7yRfzXud52HPmTnCGwGovDZ+5Nx1CFCeNCs++vAa4PNaJm2AlDT33hj0o+A8J8iKn2slxak2\nPn9yclL2AUmAlrsk5Qh+rXx5fu1ho18Mcl+rgHl8fNyioL0W9kLt3Lj58wbodlK8tn6WDem3AL7R\nWYBCQjM4R97vSZPDAYiBMcDJS/KV0TXQdTi4Ds8kzUlHDOlisSg6CSDFu10MkD2BXMDq8Zk6x8S2\nijASjjQ2BEMNMOD72AuD76enpzw8PBRZTtp6AXC22Wwyn89LH2zMnVeIDPH7N2/elL1hvXx0dJTR\naFTGAJAycDR7yPqxzn/E0hpw+N9mtW0b2aP/TK53VueEY08gK9e3cF0SAAAG6+npKdPpNOv1ulQs\nZaPUNSUwZISLzs7OSpYzjAqTzh+U1dHRUTmJwIQiyPZoLXQII5vYpdwpve1iQNQyeXl5KQAFRoLv\nwcrwHkJOvIt5dFIXmw+BhP1J0nqH45ZG1QgUz5xOpzk/P28J2ePjY2azWZ6enjIcDnN+fp71et1K\nnvXcYGS73W7rWB7CbwOCEUC54MXYCyGc5Qx5NvUuG2trg1MbLCsuFL2ZBeTc+TU1yIGVIV7Nv2tF\nmDSxdeYfZez+ITNWenVYhL/zDgMiG56kfW8PFZOdkGgDTR/t/dkTwxDQDLapMkuf2Nf2DP13jwnZ\nr6lz5srsJ+NzSJVx+99mof6o1QCIPtWsgvfOrpmTJEVP2dAAdCkmSbVVnEFARb/fLwwo+x02D6BS\ns77r9bqUnTfbAEuctMOIhDCS9rUFNfBFv6NLkS2D6i9fvpRrRpbLZa6uropM8hnkhPpbNfOHTXO9\nKOTW4aSk7dSwp7vdbgHHrgdk1sT7BjmCnTw5Ocl8Pi+OLxXHT05OMplMcnNz05qzpH3akJAnDBU6\nhvA+TiLfc80YrrBIGt3FvDCPh4eHLSepbjsDJ574ZCsYGNnxeJzNZpPLy8si3NQxAG1x3Hc0GhXg\nYqDB0baHh4c8Pj6Wd/GThQbM2Iv58uVLKXnsDYbi4nsg7ZeXpvy9lQiZ4wcHB+XoHUqOI7oGFe6P\n6WcMCieGzs7OWhRqTf8yBhQlmdgYDJ6JoNTMkNfEXiVzBNC7u7vLr7/+mul0mm53W3wIBcM4UCIG\nWoAK1gsFhDJxnxxSY3xsRjaRQeyu2x9R+vzbhgZAggwhn8yhPXkrINbQ+Sz1SYQ6xGAlyl4x2OTz\ngBTLup9Jf/27mtIF9PtZHrdpaOaFOXD+FM9Ejvm+HRFocVdqNjjmfYy1VubIE+927ke32xydd3jC\nxpN1dI6D2SSzZx6rw2ZmpAA2NnQY7F02+mWnxmDu5OSk6EHmnRwHHLGknXsDSLCjuNlsL5CELbPx\ns2fPHwz+yclJzs/P8+nTp9Yx26Rx2JIU55c5RzfiGCFbHCN+fn4upzp7vV4rfGjGwCyecy+QRd/F\ngzx6DyDvPAP9bfmAYa4TcutIgUMosNMAlZOTk1xdXSVJZrNZqVtkB9IOfpLC0iKP3l/IA7YQW8X8\nAF49X0RLPFevtZ1whWx+aHl7cSic8XhcBskC4SENh8MMh8PWEd7VqrlLBmXJxXs8D8UEkkya0uos\nMKeGxuNxJpNJoQWTtJAifcWzB+myeCjPpIkr1hvcFFfSgAH6b5qTRGDTlXjK9rIAPwgtxeWMuu2h\nooTdN97NZ0H7SQozcnx8nLdv36bb7eb29jY3Nzet9XXIis2MgDKPfKauycL6cImc69DYUHhc9jx2\n2ezt160GkihaU7t16IJmNgQ5drGnOixDcwgCI85nbPBQTO67jSR9wuO0MeY7PplVZ+GbvXN+AuNE\nOdrD5DPIH33BmD08PJS9S9+d7O3QsD3vmvVM2iEpzykOk/NsalYEQGSWqw4duBmk1YCtDg/VRmtX\nDX2RNKG6pM2wOVmefY18T6fT3N3dFbYFAzUYDFqsno3ja/KNgfZckWuC00Qfa5as291eVUKpCYdf\ner1eOQxh9psTm85XxBjzbOtwO5zot6QNPM0O8sz6WgSveT0OO5BmL+28UwWXiwTZU58/f87t7W26\n3W2pfYAZc227UTvE2Ft0sKMF7NE3b95kMBgUAGXAn3wdOv0zhnFngcw6KZJNDyqHQkxSyqZbiK+v\nr3N+ft6iwyeTSSsfBG+Iz2AUvdEx+izq8fFxCTsQwwQUWPGv19v7BObzeau0PIvFcSwotjphlf/D\nIDmkwR/6zhxxN06tBJlPPsdcoezt7dGshO1N0hBmfgIUHh4eSp8uLi7y17/+Nf1+P9PpNLPZLN1u\nt1C3vAcgAohbLBaZz+ctRsreODHT1WpVnlsjeoOUbwWYJG1vvQYMhAbxLpxMZm8cpW5g6rwaDJYr\nnFo50uqN79CQmZmaQUNZ0+ekDRAwBniT3W63dd2D9xNj94kL9oUNM0oXAFMfc6bPprMJlRq48HuP\ngfnzvvfnaGYjn56ecnp6WpISTbVbZ/Fun2pjvgzI6+e7ua9189h22XxhqHNBki04oKYUexjG2vkF\nDpWdnp7m6uqqsBKsPaULADfowqRhnWqQjy6mUQeFPplhYAzIO8zYbDYrdznhdD48POTi4qKAT/ar\nQxHdbrfkvbhMPEwQDig2xHuXk0113pMdSjMr7MWkCakh8wYz1jvuN3N/f3+fp6enDAaDXF9fp9PZ\n5nWxh60XsFcHBwctsJM0ABBnEr3f6XQK6ERecLitr+wcvNZ2FtbxZgd0MLmj0agMCEOOUJHwZPT4\n/Pyc33//PUnyl7/8pQgjSt7xTzMHbAh7qfwffSIhyRUQnaTFIq7X67LR7DX5hMFkMslqtc1AZzNZ\nuSKwPMuCwTw4PJOkZexq0IJyt7CZ+UFIeL49EtOJvOf5+bnUSkHw/vrXv+bx8TH39/f56aefihJa\nLBaFnnW+CRubSwl5tt+TbC88PDo6yu3tbbkIsd/vFwWDwtq1R1m31zxrxs66oBBZC5SUq7rWBjZp\n19XguXg4VtoYdCt1+gZFmzQlyfnsa7kkSTs0MZ/Pi3d5cHDQOiHDM90Ps4T0l/+jnw41GiyjHLlo\njZMh9M2gFgVt1oI5qvd8zSYxDu+Hbreb2WyWwWBQQryLxSL9fr8YIxwI1sWAlHf/Eagw2GI9DF7M\nJDhMtMtWM3cGl7PZrMiFQ8BJw1Cx9uSokMxeh/3MRtghxQ54jxAaShoGi/fBwjHXAN+kAcuup4Ms\nwcqNRqOiE2FPfEKF77BnDIScFIr8O1xq+4EsJU3Yh/6anWD+HRZhHSzzjCFpcnHMIn769CmHh4cZ\njUa5vLwsfSbBl0rrOBrL5bLMk2WA9R0MBuWCWIAn+8x2nLlCJjzu19pOwAkCtlqtChVYK1KEyqEJ\nFIizt1erVUHonHCBirWyQchNpTlWzqQjYBYSUDXvBlUT22SSqVdC4hdCMhwOC1PAM8zusMFchZZn\ngtRr78tGi88yHtOKjq0naWWkW9hqmpaGEAKA5vN568TN8fFxfvzxxyTbGOZvv/1WhPPp6SlXV1dl\n7lgLCgsx90bi3FA9GAwKQMMLYS7qPiavF9naRbNhdm6Bc4HwxGmAUtbfe8EsB+vld7mQFYaMdXdy\nLH0hodEx8qShq53Ma08naY5v813H6tmL7FPT2O43cg9I4nkoXCrAoszYh0lal8CZ2fFeZfxmEpN2\nPlXNsKArzMowXupXAJABSxzzr/UHCYZepzpU5PwiJxE7pOPvfwtyzdpyoZ4Ba7fbLbkmBlroHdab\nBEiMf5KiQ2t2CF1qjxsmPGlYSEINOIVJU5CMPpo5MHimn91uN6PRqOQNPjw8lMv7aNPptLAB6Fdk\nDPkzqERODLjq/WkHgn4jxzWg9sknjDzA2HmQvMtrQRgLFufNmzf59ddf8/LykvPz8/T7/cJU4SCZ\nmWL8rjDr8Azrt1gs8ubNm8I+Yb9xYLBjzJEdhdfaTuA4tzGCtGoKuNfbZjo7KxvlhXCs1+tyKR1J\nPu/fv0+SknvCZUcusgMrYsYBAwLNt9lsyoLybgAQzInZGIQNYZzNZkX4QJyM8+joqJzdrz1eK05A\nEoKIYDAn3vw1GjVt6dAAdKLDIg7dAFIQchtH/r1arXJ/f19O6qzX29ye//iP/8j19XW+fPlSlBdM\nF54mG2g0GuXs7CzHx8cl1MNczmazkjeEkYNFcSjQuSrIyLcQ2sHjAVBZGRlwJmltUN/fYUVDjk7y\ntdFiTu1d83dTxQ578T4rfZqVrZWjARaghH2C0UE2AVo29PboanbPgAJgYuVlT5HfMZeAAffdgAej\nRJ/dH36SH2adYG8Vuv7s7KzsJxLsWSMrWIdoaWZJvL/QSzUL6BDft8CYJE3VUMoBMJ66fAMyzFyy\n/y2PvV4v9/f3+eWXX3J7e5vk6zouHP81m5akZS+QQ/QXIWxkG5aGfpMAagCF/uUE6GazKZdYbjbb\n2inoc4fhsQ3ezzgd3tPoXN5lwG9GPPn6vieHzniX9zfyyNzwfOsX7AN2z3P422+/5fPnz8WJAUSc\nnp629kXNShoE+bb5WrckjY5CHmx32PN/1HZ2K3Gvty2ty7XWGHbH1RgwE85mcAiDCbq+vs7T01MW\ni0VeXl5ydnZWkPZ6vS7I3hQpiogFgNZKUsI4fM4erD176i8YEYI+UWCgd55PbNO0lgEAfQDcoJyd\nL2Mq0nkbPMv0m2lNCzBKmz+vGXcAnecBpeDEsOFwmL///e/5+eefi5fJBqX5JAZCbqrdiL1mHmoP\nOGkbU/q362aAmTS5B2xanyZB0bB2zrFgLK5uiaeE3LmxT6wETT/72TXbQvPvDRhNufNsji9D+fp7\nzIPZSwClT9kAYpAlxgFwZ+/WHiJePPLCPrOSRkH6Ejnvj6R9/xF99r6xgl0ulzk7OyuyDQh7zcu0\no4EMADj4WXuRXgMzJX8UFtpFA5TZaQOU1Dk26BvkdrPZFF3A3HAP2nA4LMma7O/Dw8PixLK2lll0\nGBVfsQvIDwYedp71n06nJXcCGYLNRM8AqqbTabEjgAYzNYzV7DaOJb/DtrA3vObsK8t2p7OtVYVM\nERKCkQNsmY1L2owke5F9jk0F1HBSdL1e5+PHj6VvsOlmZer5d4jGa4F981w6fIxMOILxz8D3zi7+\n464RkBMUEAYbwQHJUonVnv/5+Xnm83kR5sPDw3z8+LHcJ3BxcVEW/e7uLsvlssU6bDabkuhFVna/\n3y9on4kjsxtFCJq+vr7O5eVlxuNxQcmENJ6fn7NYLAoLZAqPvBkLq8M7pp0RRv6OssJztZCYQTGI\nMVXufBo2kGPHfJ9/0z/Ago3F4+NjBoNBQc8kLt/d3eX29rZlBJ0M6vABf1B01L6ZzWYFwXvu2Mhc\n9mZU/i0ocp9KcTjNRr6m8W2QDAIMUpJm7NPptMwH62pDyk+vVR3br8MRKG+MDUqfz/sIZNKEWBzf\nBiQgVwaoVm44DYAnn/7BwCFrHovfjw6pCxvCaKJw/R2eZcXOvJOAiJya9qcfPJM1BmjWeVwAsz8K\nbWH88JgNUh2O8nN33WysYH/pL/k4nHY5ONheBYL8JU1CJ04b84gskHBpRpTvO9Thk1rIFM+wswYL\n3uv1slgscn5+3rIjyC3POzw8zN3dXQaDQZKUPCeO3yaNkTUjUQNZO1wG96xvzQwyN94fSQPuGRtH\nfgmHmblzw04AqqbTaZJtjh85exTQJCxGBOPi4uKrkPNr4X7GzN4ghaHuk3WAdRnj/LN8k2RH4MQo\nCiOXNPUCfNwLAANASJp4I4ILc0DZc46hgrwBLlZ4AAiy8k29omAQWhQ2i4nC5cw5qBMljEANBoNy\nXw+KHNSKQPM+/i9p0LJzD2xoWPQ6b8ZGxkLCGHq95g4SmCGAg5u/v9lsslwuy7Hqi4uLcpppPp+X\nSrpJE/46OTkpNCog0JuW9TJK513O3jc74FCDWR7mraZAd9WIN9f5BEnjsZs9cX4Km5c1wUtjnQeD\nQSaTSYslqVkyyzh5LA6JAUCsaGgYGrNryAo5FuRz4SFyooU9wx5gXIAOG3z+Tt9gUSyzjMWKnn4x\nt4Q+GQP9JAm7VviM3z+9r/z/zCmspT1IanA4NEvzfPq9rC3zbwDH/zssVyfJ7rqx/0gypa6JQ1Do\nQyeNouvt3OCAouc+f/5c1skhQcLGzA3OG4ADPeJwh+cRY0y4x3uE3BJYlcVikYuLiwJeuJuGd7KH\nALGMNWnCLWb/AJ8+Zo48GvAawPLv15ws7jECTDEOmllB9g1sBn3APmIDYKeYm8ViUcZEfx2OYy7Y\nuwbiDpPa+Wftea5DYHX4s247ASdQegio2RAmhUVYLBa5u7trHY+lmbbiRMxf/vKXzGazIrg25mdn\nZ0XhsCBfvnzJ/f19rq+vW/kB/LG3RCG1Xq+X4XDYyv6GmnQoaT6f5+LiIvf39y3602yACwQxNwYY\nzIU3OayOF9YCwb95n71e2BqEq45b0oekEXjmC+YKAXcs+fDwMOPxuLzb9QscxkGh1YwPc9/tNsdS\nHS7DiHiDwxpgqGp6fReNsJ6TW+u59Xw79GJwXDfWHY/VANeeNw1gnzRg3sCFviBTDkEkaa3dw8ND\nYbRMzQJGkpTwDvvWILMGy8gVwAS2EoDCs63QfZzTuSkYBZwNF8xij9XhHBpj8RFYx9XZqzWb4vmo\nPWav8WvsVG2IWHuPhfd5Lr6FxtyzF50/Y10MC83eRQ5xPliTfr+fN2/e5O7uLt3u9jRep9Mpzo/3\nNMm0yOBgMMh4PC65SN4/nrNer1ee63oonU6nJPc7ZI9jQCI0uo5+Pz4+luex3sxF0oTlLeNmdy0n\nyBQ63uCJ7zgM62P27HlaDebpB7reoK/f7xcbhO5lvs2UJ+2aNowvafJjzFbxfdbEibrYUsbBuF9j\nf2g7AydsyvPz88xms21n/h+FQx4DKOzp6amEakCA/D+5JLPZLF++fMnFxUW+++67LJfLkg9CApNp\n1IeHh5yenibJV7FzI0F7M0lKYixH4VxA7OzsrNRaIQnr8fExFxcXmc1mRcFyAyWVVZP2aSEzIRjp\n+hSAE14t1LVSR+klTbKmvdTX8lVq5gR0nKQ1h0nKunCjJgq20+m0CuwBEO1tIKSMkXeDtE3D4o0A\nDm1YvBm/hcbcODZPA1glbQBgBozPGbihpNgbSVNDxrQp84BnV+cTmZ3hGZYbhyCs4E1jm85mz6Cw\nGRN0OM4A64aCQjly4Rmy4P7ZmAP4kLV63JYHvFyHaWg1UDGDC/sKMDF97wYoYxyeMzsXnlOPC8Nj\nx4N9w7o4n8PrtatG6BYHwd55khJi8/wyPwBr1pFTWRwl3mw2JTRuBhsmPWkcWnQn76yNtMOi6Abe\nYzYNRw0QAjB9enoqpyt5hpOuT05OCpCuWbhku6dns1mRZxwtwpc03pc0Cf7IIQ4I3wM4sb8sP+6D\ndTfAjn6iP1gDPzdp8gkBZ3Z+vC/tEPlyVpxXO0/YGu8Jvksf/6ztBJxwkgaQYu9ovW5yJJxklbRR\nHII5m80KNTeZTPL4+JgffvihKE0mwFQYhjPZnhzivaakvdjEsvv9flFYRvYoQICTz9vD/mC8fUTO\ntUxQ+owZxYZx5x3O43CyZdIkRdlj9700SQMueJa98JomtJJgY+E1Esqxt2RmA6FHCeC5G/igMLy+\n9NEljzmeSpjEdCNzhwx8C835A/aW+Il8edz2zB3qcZVNA1XnkNgw2itPGian9ubdHLrAwzF4xcOv\nk7JZF8CuT5+5qqaPA9dKCnYiaZJtzRog34At64zXGBEDhKQNAGn8nvmmD+wDlD/j90++X/+s38v/\nA0CSpqAdY6XvHu9rOUe1Ad5FY10BZi8vL2XfY/yRac+Hx4ze22w2Bdz1er1SqZQwO7rRjpbnxbf3\nmvVjLkmQpaJ4XZsHPYXugxkBMKHjFotF3r17V5zXJCVUUoe3GDtOnNkQ+s0e8lzUDsrBwUHJVcTA\nW4/w3VqneA+gg3kG6QWLxaIFprFdyKLnBl0LQeC+Mv8AE+wnzigy0O/3c3p6mtlsVsZe5yH+Geje\nCThxLJ4wAZckkafAImP0+ZwZFehD0DnCfXd3l+FwmG63W042+MiwjQKby4wFz+d5bCaOvjoHBZqT\nCT87OysABAFAufN8WBAnlNWgiGfWeRQWJH7PRnVRMyvLfr+fpJ1RXytihNsCbzBB8hoesalBknYB\nFPQRlgkQgkA6fOexf/nypYCMugLily9f8unTp3KHBgarXstdN9butTAGgAJjY48EEMZPK/I6ORLW\nIGnnRtQGuDZqBgj8G8XjMtt1SMI5JrWxZB1IhERpYzgMovCskZvValVAO8rZ/UShO/GWd3N6ok64\ndKsBcf2TseHIeJ55Xx0qqMGHGaV6TnkO/XutiBjfOz09LTF/+vCacd5VW61WxUAfHm4Lc3HJH+tL\nf2E4yUfiz3q9LqcpMWrIAU6ic/KSRh8RjkAmYLpYD55F+fokBTh0u93y3KSdRA7YIW9fRaVLAAAg\nAElEQVSQ/LzlcpnRaJTxeFyqqCIjTvB2CI69ioPq0EfNQvAcGiGjGihTm8ThSebF85O0i2qyD52Y\nzZxjTxeLRetZ7DM32wLARLfbzbt370rkgfIPdtyxT+S8jMfjVt4g4O3P5HpnR4nPzs5aniBC5QJH\nSVpn1KG8EKLXBsfmnk6nJanWCgVjTJ4KaDlpZ03jSSJkhBUQZJ9QsbFIGkWTpLVQ/JuNxLMcrzV4\nwsCwqGxIxgFAQ/AQQgTE5/WTtMIsDl/VSNyNTUU2NmACkIbBOjs7a3kKrxno2gMCzMzn89zc3GQy\nmWQwGOTt27ctg87zHh4eSoEk5+2wKf8ZTfivaA63Je1L7vhjEMXcWnGYQUIRO+RphgSP1qybjWzN\nxKFAauCAkfV+sjGn8WzABuDUIIKGl83zUVTsQUAoa8j7DX4I57AHTPX7OC4nBRwOShovk797XIzH\nx5qTJhfEAMXryZ6rGZmkCeUBfNgjNEJTPKvX65UrHTBQvpeL/u66kU9wcHBQQALXSiyXy+JUsF6M\ngzXnfjGAHLoCxvrk5KTlqNTsFH9H58HYmWE22EM3oBORdwMgwCb6czgctsrmv7y8lCTZzWZTLv3j\nvRhskkSZH4fqXGTOsuhkaN7P9+i7c7aQFduHmi1BHs24UweGvz89PWU0GrWAjAEeesOOMk65w0AA\nabNXziVinpOUFAjGTB5M8ucJ3zsBJ46bY8Sh1pgE8kkQCo6o4i0xSCdFMll4ecQ5kzbNjQK3EFkx\nW0GS/5A09xmQ6c1zCDEx4ShS2BZT9PaMTHMlTUVKKqryPhuTpO0Vm9qn7yhwV7O0ENoztsKm1cqQ\nTbxerwtaPjw8zHA4zP39fUuRAIxQIqwV/w8bRe4AoKfe+KwL8dvhcJizs7NMp9NMp9PWJWM2LN9C\nAwywLovFohwnN/NQU6pWXjXbhtz5CK5ZIzMSKGP/vVb2KD/+rzaYKGj3mXchmz6NQHM9B4wV/49M\n1Bf2MR7mjv1cy6wT562MGYt/x1j+SJnbg7ODwd6xPDl2bwXtUBzjqZU788T3PM/M22QyycXFRTl+\naxaH8e9avjudTs7Ozop+4WfNusIckDD/5s2bjEajVj0SA0N0tMEE82ZGPEmxB+iNuoYNewT5BDgg\nY4PBoAAn9hZsFiALMMAxdIe9AQ7IHseikWf0nJ0JH2YwE8begnVk7pK07APzw/o7NFSz3F4HLkxl\nTxwfH+fTp09J2kd50VNm4n1K6e3btxkMBlkulxmPxyWPM2lCV+PxuLV29PXu7q7koBjcM78121u3\nnYETJv3+/r5sRAwRwrbZbMrFb6enp3nz5k0mk0nu7u5yfn5ekB4CzHc5qcAkAkT4N6EgNk2v1yuX\nVxEeccEhTiEYyd7c3JQNdH5+XsJI1DCBWeHZ9ma5KM//h2KDxcE7Sxoa38JnWh2aESBkqtGVZtlU\n9kBgqSzkyddeJnkgLlPN/5kNYnPSJ451Pj4+5vb2tgjy27dvy2VTbM6Tk5OyzlwkhZLp9Xr529/+\nVvqLEdxsNuWc/beQc+LcD+hTQhdJ+yivE1aTJg+oHoeVSb1OBqcoUmQV8OB+8dMXU9ahHFPWNuh4\nW8gOBgejAIPhEJEZNN6LwjJrw15ARjE8VuTuZw2meQZgAaXLe0191+Fdym47ZICD4bmjOaeI9aQP\nPqX12ndZAzNQh4dNHQvH4+vQ0i6bnbj379+Xu7Mw2knDGpKXARtoet9z4bW1E2l2CueP8hDsjeVy\nWULJrl6L48g6I5voeeoy3d3dZbFY5Pn5OcPhsPTJIcPz8/PC/CWN00HIBFk/OTkptsvsGzowaVeD\nTtosEM+mhgngjHlHLvj8a7JQs4PsMdh5bNvj42PG43GRW54L2HQOEbZrsVhkOp2WPKHRaFTqoWDv\nCPlTvoO8xF9//bXcVJyk5F9anv6o7QScIKyr1So3Nzfpdrs5Pz8vCh1kbsqbDcDv7L3Y4wE5M+F1\nvJaS9tfX160kURSnwyYk26LEQbnHx8e5uLgoAIWaKp50noERJ/sbpQ5qB4w4Ucgeq4+lMi7mg7Gh\naAFfvMcGkX8zn9CX9ljd/G824cHBQekPioey3gChmnrlrpz1el1qopydnRUwN5/PS80IQCDgjrAA\nd16QYEWpacZX5wLssmHYam/BFK7zY+rEb8BeHa7EINZ5Nc5dQalDqWOUUU42AE5oNFOALFnRE66p\n84ZgBBwqAlgk7evRWdc6Dp40OWhmkQyqHeJ1bROvOc830LYhpKEQ6Xev1yvhT37PT8uX2Zo6P8X7\n0IUSAZP+nJ/N3GO07HHXa/zaWP6VDSPPvjQD4BMayEeSwhQDHgBvBpgwF/P5PMnXYUpygi4vL1ul\nzm3MeRdyi6dPTuL9/X1eXl6KUeb76H/LIbrLOSDr9Trn5+eZTCY5OzvLbDbLcDgsa8iFgDC/fr7X\nzkCf+at1ugEL+525Muv/mmFnHybJp0+fMp/PMx6Pc3p62tpbdujdJ+wbtogaXbbLyCpy7VweGBvW\nF/v7/PycyWRSdAPOLev1R21ntxI7CzhpCrBxXw4NdIey5OdyucxgMCiIFKV+c3OT6+vrEsNk07AR\nENLJZJKrq6skKcKVpCjjTmdbVpubhLldeLPZJmadn58naWJvCDZ9ZtOS2Y7QJimnf0DfPDdplABC\nTaKRKV7ewf85Ju4cEQtIHf5h0zmmbg8zaViTk5OT/Pjjj3n79m0BWgA2U4q8s/YSki0ABMD5lBDv\nPzzcFgTC4LEZh8NhS6H0er2iGJKUo4UHBwdFwe2ysVY0jyVpjKOTHvl9/T0bYf6Nwsdw8516zS0v\ni8Wi1B4w6wTIR+E5QY93+hZeg8A63o2TgOJhPC5G533quXGYludbQfNdxkr/6VM9B47R1wygmUEc\nEHujzhlJ2vfiuLFXTNvzf95nNRPpUBZ70kDRoSj6+Boo/Vc3OyWcisTweK87/OCj/4wZ1gigcnh4\nWMAEpwrRATwf58S5HehNGMKkSYq9uLhohd4xxBwdRkbQRewj9iFsg/MLyauhpD17AVlz3aikufaj\nBhgwOOjL2sHk/8zA8lzn17mZNUEf9/v9UqV7Op2W+Tw6OipgBZl1/hhrRk0r+oCsYutgdxi3c34I\nY9MX5qcuKgkI/KO2E3CCB9Xr9TIajcqxMdcvcUweBWZPI2kqbcJCHB0d5fPnzzk6Osrl5WURIMeq\nnc3NJK9WqyJ8gJ+Dg6ac8nK5bCVrsRimwDabTQEiSXN8kskHiXNMmveafkSgjYpZTN5pDwpa0zQ6\nY+Vn7SlaaQIMkvYJiZrKx+gz1qurq5Zw1rFDMt95LkJNOI21A4SRaMuJD9aYNfDmY62TFIbgNW9z\nl405NPCiOXSDgVqtVq0Mf5RyHYNOXk/w9O+Yz5omJ7wENetEPj4LeHec36EgK0uHUuhTt9s+HQeA\n4CfA254zMlon5nrMZiMwILA3KEf2t/vn/tKs7DebTTlCyvxYlmsQVQMUgIXBC+/lHQ7LmOVinvmO\nAY0NkB2PXTYcxm63W8LUm82m0Po+8p40Ce+Mg3HDMFhnDofD9Pv9Uu8KUJA0BySo5sq6dzqdXF5e\nZj6fF1Z3vW5KqZOT4svreC4sM/NqJoI9RO4fn0U+ABDURzGw4BkOTfkdzAtHm9nvZoutk1erVetE\nGvPL+F4D26wFNsBgnbHDgNA3l9BAXwEeASs+pZo0trMGFzj3SftaBuaYxGjWxvu8bjsBJyjPzWZb\n6Y/YFpQhVI9DMi58wySRF4KBBzCAtEHhpnvX63VGo9FXSUqcxBkMBsUzXa1Wuby8LKd6iGWywCBv\nb0gEpKZq6R/lglerpgAVVBuK0YKO0sZgma5mk+IFmxUxI4JCJ9kURYPRchjBSpgxUAbazUesMS5O\nDgRU8Lmjo6OSfwKFyXhZb7wXQJ3Xj2fbwJFF7hyXXTcbWPeZubQXjGJjHn0S5bWNS3gGZez3Je3q\nrrXn1ev1Sv0Gx7MpxW6PJmnAtRkTP5P3YGQZ7+3tbZG15OuifnXo0r+v6WszL1bGBtR4wyhe7xe/\nL0nrPcx7XajL4MTPq2n4PwI9zImpfINVAzOU92vNc1yzartozq+pveCkAcGeb7Nm1imsF59D/gwa\n0c8OH74G2M7OzsrhidVqVUo5rNfrcvO5nT3XvUoaxp7cEQNlmEROpKDLeQb949kGA8iwwcp6vS5F\nQQHFZop5LzqZOUiaECHvrfdCDY7m83nRyci8WXk+x5idgM34GBd2ot4z2FucSdgXOxyev8fHxwJs\n0S91+NptZzknLPZwOCxIdL1ujgQ6wZFFgFFhoyTNwBH04XDYin+aXvMCARaMcslwJmzDZ8mkH4/H\nX93vw79Z1E6nk9FoVBK4eL69McANFF/SZj2S5iQP40aY7KXCSDB/VhA29DBTSb4CBt48tXGnL8Ph\nsMSGARJQtKPRqCStcmytNjQ+6cDY2AAcC2eu+Q5KCc+FsZMke3R0VPJWxuNxWc9dt3qz4TGY+uYz\nKHKzG/y/4+32tpmbOgznsJH7gPzzbIP81WpVvLg696WmlpN2PgBrTEPuXA7czgXf5Xm1MXKioOfH\n/ajZBhRiv98vSa3Mr2lzGu/h/YASktNfAzTMtefQHmG9Dih6xoF3CWBxWOw1JsbhTjzvb4EVZN2f\nn5/LHrSjyO9ms1krL4X9yzOQc7632WyK08c8OefOyf0YWwzlYrHIcDjM1dVVFotFbm5u0ul0cnp6\nmru7u8KIo9ORJQNjwCZhIPaGk7DtdL3mCBtoGczbOUTu2MPj8bgAK/aiTx9Np9NSS4p38Szm0vuV\n/sG2UHUZ1sesFnaHP8ik5ZXPG+h77wKccLQs28yRc2g4PYrtYy7+zKHc2cV/LN6bN29Khm/STkQj\nDOCbi5O0QizE0lEc0IYog1pYHGbxM8ni/vnnn8sdMTAjxEY5XeLnI9CEpFg8qsk6y7nf75fjgsRv\nTW/buJpm5N9JO3YNdQk9DeJO0jJ0zBkAEBDhvBPmx4aITTUajVoxcjaTGQ4E7zU6ns8CtuxtORRB\ngSYAqr01GxVAIYDQRnHXDRlzSMBMSR0CsPKjsT/MvvB55sThEcB3kq++hwdIkUPytQCZBjVJE9ox\n64NiJAaNguH5Tua2gYUxg2qu995rXh+KzeOm3DlAjX4hi1D+ptiZR4Mt5sXgy9dWeD7quamBxJ+F\nW9A7jMf5RV5L//Tf2We851sAJ4Qe0L3j8bgwqhg79rqPoaOn7dXjYPIdEklxVJLmxIoveSSPjzWm\n+FeSXFxclBwTnNwkBZiQm2ZjDONqJ9WeP418F4d8krRyMpJ2CfgkX4EfO9s4dIASf+f5+bnYRDOQ\nSVqVs5Ft5BkZ73a7xS5RHZb+sS5mwWwja/adubEdOTra3kLtCwRZH+bODjDpDKwF9rpOB6jbTsAJ\nAmyqExCC4TFlhgCAIFFuNYXr0zYsPr/j0j8W3HQehpOjw9PpNOPxuBxXNuJz38mbAM1zmgTlykZM\nGi/w5OSk1HbBsENDsmgIitkNhM+gxSEmFCDjZ1PDLPm6ANN1r8XYaZwmQkDn83lBwvTFng1eS705\nraSZK5QSf7+8vCxIH7kAePBObqd2PhBskzf4LlsdzjGFn6QFXJB9mj0Pb1pkgVBikuKN4inVe8Js\nQ6ezzaKnUJY/WxtYclFsUJFnswcvL+1TSabtMSpWUJ4fsyrev8yTQYGZFL5XG2z64T7YSEC3I6M1\nyHsN/DI/Bkx/tNZ2evh/K3uvR31U3PNs5oY59X7dZXP/0XVmm+inwz0YUoeO0ck2dj/++GMxlA41\n/PTTTyXxlpLxOC/dbnNBKO8HxJJcb0YLucGIUr12PB6X/CiHaJIGePtE283NTS4uLpK0QauZgtp2\nAdjrOZhMJjk/Py/MkcEFwJ659zvMRNfRhfF4nN9//711whPHF0fJDiI6wpEK6w3mmjk8PT0tegTm\ng/3oongAReacsJmZliStfVO3nYETBmGEy6QATpwYSWIqyZZ1YhAK5B//+MdXl/o9Pj7mw4cPef/+\nfasPGEMUoMEMGyVJOZ3iWD7Ik8v9MNy+J8JGGg80aW7KTFIQLuOndbvdcsmUq+kh+PSFXA6O1bHJ\nHPtkU5iWs6JOvmZqkq3S//vf/15OPzFvGBCE0LVZGCPALWkE0HONR09hH7MhSTuUR1iKTeZ7imC1\nmOdvqaGYrCBrqtifsTJN2qEQvE3WGS/NNDdejJkp3luDHcCwlaa9Hj7HOry8vLSO8dJ3/0Qhwzii\nCD02xod8Grj57/5JBdEaCKAjrDw99zyz9kx5BrUf2FMwsZbvOizk/eJ1qoEx64W8m9mxh8q7DHTM\nTmIYd80KAkrJA3OIgpoX9JMxHhxsa5RQRgH9hRO5Xm9DzhRgZE8TipnNZi3g6DvXHMKGOTs7Oyt6\nGO8eveokcbOHw+GwMCE0h1iQF3IDGevBwUE5lcK+TlJkyswZdgU54d0AMj6TNBcs1o5kt9tthcuS\ntPQenzs5OSmXKGKD+v1+kSPWxc4ln7P+sGNbyzeRAW55RmZtp6w77GTyrDoZ/7W2E3BitAQaxrA/\nPDxkPp+X41oGDPP5PI+Pj3n79m0ZGEg62RrmH374IYvFooCBwWCQTqfJA0EBEA7qdrvluPB6vU2W\nXa/Xubu7y9nZWU5PT3N7e9tCkWwO+nV6eloWfzabtSrgEgayUibL2wwLWel4rTYgKEkWlfnjWFjS\nJDBipMjLQDBcPhhWwsJUhwoMvi4vL4uXwp0aCJtPQpGgyhzB1lCQablctvJKSF4jNmwDCUKvlbir\n59JflIiPa++qOW6Ol+PTVIAJnzCycTQgMUPAnJi9SBrji+K3geP3BkJJkx+FrDhJLkmLGbM3Zwqb\nZ7vZi/b72ItmA01z41wYxHk+/e71et1yHPwMK/LaY3YYhd/55EfSJFfWSe4GOjQDTFP3ngvCReQP\nvRYGslfMO9z/Ogl+V409TCVvDh2sVquyJ22k0S8YW4f6ABaU7f+v//qv9Hq9vH//Pr1erzDLPv4O\nUEu28sVRVY7K9nrbAmvD4fArJgJ2mTmFSZ5Op0madWXdyXHju4AR1pe9c3Z2Vu5xe3x8LGwNehEA\nbCeAED9A2KEfxsb+pK/0n9AaMu1wC2P7+eef0+l0ynsHg0GxQWZbfDQc4GcdZGBkub+/v8/Z2Vlx\nsK2DDfJ8mitJOZHJ8znl+WdH5Hd2KzEThGGjABf0fdJ4fklasUcm2YmuKIC//e1vpZLs77//XhgP\n19ZImrAP7wdVQglOJpOStFQfhURw8BDxEBA6jFKS4lkAUAA1pjVrYIDQ2ICgtGAPMEpJY4AYj4ur\n2Ztljgxc2CQYPDb0mzdvcnV1lfPz8yJUT09PmU6nBX2jbOy9stEYvw0fY+X/Dw621XWn02nr4igQ\nPIALQ8lmfHp6+up69Tr5cVeNeSfsQWlrU6woBDMmNdPlcB7KfLFYZLPZFG8PltGJmskf5y7QJydo\nWmFgWFy7woCmNsgoYmSBMZldYE/5HT6uD9BwX2t2ws/lXfz0HjYLQeO7BoJJWvkJyCw6Ablz43Nu\nrwG2+v/sELwmy2ZpkubUi9fyNVCzi4ZM+oQJMu4+moWlKBe/96kPchoosOacBDN2yJBDEg4bdLvd\nwmKQW9fpdArbi/OHc4t8oEMoJIksole95hhS5Isxc5CCPlAtFtZ7s9km8dvZJMfG+XXMpfWfnRUa\njik2yGtze3ubxWJRWJ7T09McHBwU/cqdSMy/dRIy6WRkQCE2zPkqjN25OmaMWGv2LPuL39e5LK+1\nnR1vYNCc8mAxT09PW7dZMmlJWrfr0jBKeIDL5bIYBMI2KD8WwwlWKF0r8Tdv3pRLoJjg1WpVEL2Z\nBaNYgBV1UQ4PDwvFRr//iMLFM4a6dOU9NrfpUnsFVmyAILM7q9WqVXYeJQmar9E3LMh3332X0WhU\ngM90Oi2nZJKtgrcX7zFCx9M/0/HUlXDVUmKksAC+0RiF7qRO5pHnM5+7bsiaDSfr7Lo9fK6m+R32\nqY0860fhMIy7jWzNNBkQ06c6edugo+4nzYra/alDSIyJfwPeUbaUG2eN6xAX7+D7Bi6uaUJugR0U\nU8n2+gz2aDaA6A47DXWrGRqzWu67f+K9Mp9ea/4YjLJe3s8e2y6b8yVms1lWq+3Fosxdt9ttXfDG\n/sUDR48DHLx/X15eyr09m832BBVJnITdAAacgHT46Pl5W6jt5uamgI2kCaPaLsDkuAQFoIFnJe3a\nM96PrI3B8dNTc5ke80G4BFnBJgHOYJWYW8JOyL0rPHe73VKDq9vttu6Xo+G83N/fJ9kCr6urq6Kj\n+/1+2TOuP4N8Ol0A+waAcp6cw2r0FZsDeATIuTI1wM9lOlxA77W2E3AC+Dg8PCxHiaGW8NrJ41iv\nt/FCMsMRLhaNRcSTpJAN5YsREt+DwOZJmnPuKABO3rx7964V/+T39MPC+fLyUrLBWVhOSJCTwsaz\np5Y08c2aumRzgzCNNvmML9FC2aGA7R2b7cFDmE6nRch8OiPZbszT09NcX18XtI1SIDGN00sodYMQ\n+uNcCCdS4snwXl8Uxtx6w9BH1itpjlrbc67DDLtoVmTOF3C+hQ2ywZyNEXNqehlGDuAOa3VyctIC\nCkk7J8JABGNtTxYlPBqNMpvNinzacLsvGCpCfVaivLs23Ky7nQGeC3BlXKbg8a6TNqBAwdpwm4F0\niInwCuMh7Ov+MBeMz3H518Ju/DTA8HgNQgwybYj8O3/PToWdn102mIJut1sMrQtKIluu8oqeRW9Z\nPpOmiCT3p71586YAHwzxwcFBbm5uiuPK2tr7h/0jN5DwMmEHvk+1V9YZHYKuAiyQs8j77ehhqAEz\nvjMIefa6scbOQ0Q3ADRcfI1cF/IpkW3kkXoutVF/enrK3d1dut3tCbR3797l+fk58/m8HB5g39dO\nMe92agDrSmidEgSU9OckEPaSvjNfgEGckJOTk6KvsJlOaH6t7azOCZML9cZEoLgQMiaVBfHGTtpX\nT+NZDYfDVmweqmu5XObs7Czv3r3LfD4vTAiKh4mlL1CEm80mt7e3mc/n+f7770tsdDweF8ADs0Ll\nu9VqVSoeAlBYYECH47NsEDYB3wEEobQstD51YaXMhqUZRAFOjG4BQqzD8fFx3r9/3zp5tF6vCzCz\nd4wn7FAW68Ia1YXaSGp23PXu7i7L5TL39/eF8YI94r4MU7vr9bqEAgE/NdW5iwZbBMuRpKXMknYd\nDRtSGziavWg2N4ZqvV4XeXI5d472JQ1ITZq7adg/yBZrOh6Pyxoa7AAWHfo0M9Dtdlsgy8weDS+K\nz6IY+cm7MHQ0lKi9M8+NqWaHKGFaANBeH3vdGFEfdTaAM71tI2yGw142fbYz4jli3uh/7bHyOdqf\nnWb4Vzb0CuOHZWBvr9fbPD30X9IGacwfYQzGvtlsT4CgS5Ptnri4uChl7ZOtDl8ul+XaEDPi1kNJ\nAzL7/X7+7d/+LUdHR/nll19aFaXn83lxdjH42CLnObKm6BkYAJhxHGISSPmugb3HnzRhS+fQWK7Z\nE3VtE8CRGRl+9/PPP5fcSWp0Mac1W8uJPOfFuW/oGv+byIZPqCKzXNaKHDA3R0dHmUwmhVAgvAR4\nNYPzWtv53TpJAzAwvMk2hENGcJ3tmzRHHK3goP2Ojo4yGo1asa9ff/01z8/POT8/z/Hxcd69e5f7\n+/vWZU6wE/Ymid2t19sblP/7v/876/U6//7v/96qLeIy2NyiybhMVTMWUD4hGJA1G6Tb7ZakYFfk\nIyZqpgCjD6gDgNCsZBEue8EWzMPDw1xeXubDhw+ty99IjCUkxvwAMhiDDTNKiA3OXDFOxgEljPH1\n0eqnp6eMx+Ny/xDvOzk5yWg0SpIWw7XrVlPwKBEbHMBp0lDPNuT+vsMyAAZ74tx9Azvn8AnPNaC0\nMUcOVqvt3VHj8bjE7k1Rw2aaAncYyt4+wMTG3UDHgCppco/s1RkcOB5vb9RHVpFF5M5MlNk8vs/p\nDowLfXBoir7WjIXBsdcIT5zfYVgMZvAw+T3yzv5xmNlMlIHPrprXHXBiIJikhFfq8RPCQH48fozn\n6elp5vN5AffT6bSE1mFAyO04OjrK7e1trq6uCoNjxmS5XObx8TE//fRTye/gRBA5RT4skDS62Sww\nHr9P6cCe83eSVNGTPAfnEjtl5xE7Q+4chp1TUMg/z/HJNqqi26j71KLBl0O1Blnsm263W5z92rFA\n/ngm/aI/dSLrarUq4PDk5CQnJyd5+/ZtyXt5eXnJu3fvis53GsUftZ2AE5BajebcMFj21j15TDYL\nmjR3ukAnUhr/+vo6s9ks0+m0CD6K03TzbDbLaDQqz4AJOTo6yvX1dY6PjzMej/Pysr38yuf4bZh5\nJgbItPrLy0vrfpMkJXHK4SEWmL7gpaKoURCcQGKxXQ+AMYL0if3xXgML+nl0dJSLi4t0Op3iUbA5\n8TApwYzw41WTQ0DfADHMyWq1at1jwsZwAi+XeZkd6vV6mUwmJeRgj4E+7Vp50wwA7H3UoAWAYhYF\nIFJ/1h55kpZ8PTw8FOXtuUZWFotFicED3pAfU72Pj4+lEjIhSYAFMW76YWXm/rOPzNr4plbWDuNk\nJsXxfa8lex85RGc41EQ/7N3jjda5Oy8vLxkMBgVA0U/mhWd5/g1yDJbYdw77OBxjZqXum/OmzKok\nbQaNPrs/u2ibTVOdmfkw8EvSMjoAZYwuupKQu4GBQ3sYwfl8nl9++aXldBGe4B1c8GdWGtaZI/c/\n//xzer1ezs/PW7VPyLuyDYJZRk+ZuQC4cFIJmYYp+vLlS7nFl+fU689ewy6ROEufHBZN2se3yWc0\ns8F8/vbbb5nNZkUPECoCCJCrSWQC547nEqojnMX+5fsOowH+WU/6ip62PcFusK99ei35up5T3XYC\nTqDqoeXxlOv4K4thpYbnDrVFAi3AgEUDZYJQR6NRzs/Pi0HnOCyeJyEFjudi1EiSfa8AACAASURB\nVDHCnKO/vr4ux2lZNIBM0tBgIE5QMoiesSUpRoES2syDj6oRWnmtzgMLPZ/Py2YhMzxJy0ggHIvF\n4quYpY3BcDjMxcVFnp+fc3d3Vyg6AACK5fn5uVScRUEAMnq9Xuv+IjxUxue4a5ISx3S4wmCU9bTB\nR1mgEM3Q7LLVRq/Oi0iaUAVjxMB7szuUSaufiwH48uVLOZaNPNb5ESgW1hKamoZB4H0oe6+J85gY\nh40Q8sizeYZzXKzgWNt6Hth7AHhCePTb4NR5AM4jqBvvYe5qw8gYUZzOQ2OsdijsMAGWeK4VukNA\nrLG9U5prsfjk2bdSw4f5Zw/CZDIvjJN9jyyQP8J8wUJzZUCSMnfkhKD7zs7O8ssvv5TwCyG6+Xye\n4XCYXq9XShCQIE4jwRZWdTab5ezsrADDpMlhYg0JL9nBShqn4fDwMPP5vMWmsD/QQThLXn+zpDBI\nZsmT9u3kyJOPGicNs22W5enpKWdnZ+Xfw+GwsHE4tOxZ1sqX35IzQ8KtIwWbzaY4F94HXnMDMOtg\ngKYLxmGP0GN/BkySHYETFs+5A0mjuFhkKyGEg88lW6GeTqflCBkxZqO82WyWT58+5f/8n/+T//zP\n/8xgMEiSkg9CbQMWltMp0It4p5PJpCw8TASTjWBMp9NCuVGrxUfKfLKChSPEg2GHhWGRYTEQ6IeH\nh9bpn6enp5JgnLTvbUnaxdAAATaEzHfSCNSvv/5avO46zwBFwXq4P7PZrBgLvGLWimdYaQFubOz4\nPJsAT9OeAs9DhpyAtutm78fy62ampO6z859gLZApswBWhm/evMnnz58zHA5byXvkohggeV4NCGBH\nYK1sfGuPB8DPPgB80D9T9zBEZh2QsxrkGKQl7Vwd53y4/6aok6bsNrLGu0xV86euX0FfcDDMCnmO\n6IvL3vPd5Ouj0PZ2a2+a+bIxNMjmd/9Mkf//3egreRW14U22sn93d9eqb9LpdHJ3d9cKpbhx9BZn\nEq/83/7t39Lr9fLbb7+1gB0s7ZcvX/L7778XJxOGhdMp19fXSVJYE8CzD19QhI2TjMlWtn17uoEF\nzAL5LbAesNtcxcI1HEkK++95JFyEPq1BOo4tgIk9hq5w+MdMUK/Xy3Q6LeUwcNwdmfARbPQLDbBj\n9tDF61wegs8zJphMA6mkyQO1LcDu2TF4re0EnKAg8Q4ZGIaTDiM8PpWB8OC9Y3CdK8LGMJvAGfea\nOgedkqOC1+eMafcNj8FKmLL3bA4MAhSfE7WShskgPou3S3P45/HxsSBdh1VYWAwA36efbBrQLz/N\nrPAulDaeMmvCpgKkEM5hrDBfjIHicgYkfgdUqzeikycxnklz4gPa0bkWPNPPr3+/q4airL0uy7Vz\nJJg/fm9Dz3zAeNAMLJyMZ2aBuXXuBobVzbWGkGmHjQA5vIe+Wrm436zVH8mYDTTNjAb/9nN4lgEp\nSX027H424+ffda6OmTr2OR4nc+4QE2EAJ63TT+bN4JG+oEu87vzdfTYw5xlej12HdZhj5/kxX2Y4\nWS/rNeaNeTg7O8tgMGhd4wHgob158ya///57qXhKCIIkWfY8a4jsM5fUHDFgZ47RmQ6zoO8BMk9P\nTyWPizUYDAYF2Jgt9EEKZIm9gt5lDgEe/BtnDp3qvetogNlAy5vvzQH4mZlyWQCHgp1AzN6DyQE8\nmJUkB8YOCn1l7XBasZNms2z7cGjZnwDJuu0sIZYBseBJWh4iAm4lzWKzkCS1LZfLVu0EX5+OYA2H\nwxJrRtFgqK1kOYLMYtugcLSZvtXJdCyUT98gvFRyRUAAGNDb9rq8oGZnmDMfPXbSI2NwXgm/h5JN\n2hUtrdwpuFavFaj86OioVcb+5aW558LrlzT3HBl8mvaHSQHIOUGLGCjKgSQvh3KsAJnbb4E5Sdpl\n59ncZgf8b7NC/p4NMvuD79vTwhvlmB8nmnyCyh4QILQGzEkb8PlEGSEay2gNqF8LW7E3bHQNGmx0\nzaoYrKL42fc8g1AhPz2HNkTur3Ma6j+vGTAD/DqB0CDIFD5j593+Wcvua8+0DJutqEHlv7rBYKFv\n2cMOH6LbACPPz8+tmlXodp9qZF0Ya6+3rVPy6dOnTCaTUjMK7x+QPhqNyj5I2ndKwerCohHyZD19\nGMOna2CLkQHrddbg4GBb1Ozq6qrFHJjdQkbRs4CRJCVVgfnjj8NLDqHQN4dJkLenp6dysMJsnp1s\n9q33Ms7u8fFxsZu9Xq8w+wAa9i9JrXwfe4uuwY4C6gGPdXjSziPMkZm3r2Tu/xPJ/X/RmIDlcpl+\nv9/K+kaIEWRiYb5nxiyKQxCcrWazQ2Nx8Z+LABG/5D2155I0RpiFMRUHyHJcznF8H6mzx4miWa1W\nGQwGhdZ04R17TbyTvmBQDKLot5WhPTOe5TCJQwUYeU4d4fGhaBxjxih1Op1y/wXvNYB0QhS/e3h4\nyGKxKJsHBgYjxHFxnkdCMnRtnW+Dcf4WqO+kWQN757UhB2i+BkoMRGqalFNgBhmwgev1ulV+HaoZ\nGa1Bda0AUUT8zrKbtK9op5ltqWUNA2C2o2YV+J3XzvPEv82esHf4npNpeY/77eewj+p18hg9PkA9\n78MI1ODRTKFPfBhcJE11VN7Dd2vWlHcz1po520VDhg4PD3N+fl6OndvIek7JY0AOanZzvd4eQSWU\nzj4xE8GJD+Ydufzw4UOSdrI04IV5Y3+5sjiMOv3w7cIYSzONDvWgY8bjcVnPxWJR2GKMO/3CFsDu\neU+ORqNi7wyazUIbUPNMg2vmk8hC0gAAHMNOp1McN++bJAUwnpyclDGgS11LBTCHvUUHOyGWvUBY\nzqFkAx3WnLnyd19rOwEnRmyfP38uA2JSXJqcxUgaJoEkK9PAj4+Pub+/z93dXf7nf/6nRUGb6mOR\n6gRNK1Umn36xMIQsMC6msqG0UH7kU5jmRPgxyPaI+DtGwIiSRXYSMMJudsc0HQjdIMfeFwrRyJY8\nAvrg3x0fHxcAmKS1Pg7T+eg34JECPKyvjbWBEkKMR0z/GDfJZKwHsuC8hG+hISvINKcEnMdRb0qU\nkQ21P2MK2Zudz5lC5/c0e/O1ATbI43nIDN6P2UHWnLyOmhkwnc+znNthJsgGwMAaAIz3y3fZW2Zb\nmFOHavkdzyX3hjE5Zm6QTD/tHGAU6JNZQAMgGxjez3gd1/f46U8N8vic5aAGW//qhuwlKXl7GPfD\nw8Ny2zqshfPLjo+Pi3OIbjg6OipHgdEBTjDFsfEJM9bGsgTbwncAHcvlsoB5O4027qwh+nS9XhdH\nmM+ancap4uQiSbe1zLHPqCHV7XZLDiIGnn4Q5vD9WDBUdnRfc5xJGyCEUx+N5hQR8mXG1s49dhEw\nSZkH9CkX/LH/AVnYtOFw2JoH5hUiAAcTmeCzTrh/re3sVuIkBY3++uuvefv2bQuRJs1RVz6XbA3w\nzc1NASgIyu3tbWazWWsToyAw7jAcfMY1P8wu1Emjjlvyf0m7ZPd6vS65MCwg5+2TJiHURsJ9eO2E\nQdJmXQ4ODnJ2dlYUIEdrUcAYBdPTgBL+JM0dGY6R867BYFAMPXPBM3zMzMaI76L0B4NBAXLMOxsD\noOON5DDeYrEo68/vicnCFJCBT3P9lF03xkk7ODgo7B8G3UesARL2tg2yzD7Q6vCBQTC/N4PgdTZd\n+xowtvJOvr7ML2m8OP+ecdMf11PAcJthcz/piz1MKztk38/i72aXaAZtBu7sP7xkgyje4X1P/7g1\nm/U0kKidKOdg4EB5jeo9mDSshMMNfv8/8zD/FQ3n4ODgoLDdm82mhA87nU7Oz89bYAFHp9PpFA98\ntVqVat528Lx/qTyaNGuIHfCRdkLjABzXg8LrX62aqztsGHu9XmHsefZgMCjHcXGIcIAIqWBQzXDX\njoENOwmqgAcDm6R9bJxnWNaYe/YGMtLpdPLhw4dSHHS5XJY8HmSFAwqj0aicXEUXw5ZQ9DJJiSIc\nHR2V5zhXZLPZtHSXw3JmSLDjHOZwHpz3q+3ga21nOScvLy8l+zlJxuNxRqNRUSaLxSLj8fgr42eg\nkWwVKwlNFlyEm9LIbCAEwMmuCJ6peNA/DAt9cJKPY3mbzaZFa1HREM8hSXkmmwZDW7MVTpxz3slw\nOEy32y2Cg8K2N+6Yueun1BsTQ290TF+vrq6SpChYNsXh4WEmk0lZQ/6fRKk3b96U43qOIaN4AGje\nyDSU3GuGOUlr7VH4GNM/ovJ31Uzzkk/g/AvnDzFe+o7HxJpi1A2Ka+NrA8nvMbQGDcylEweRXX/W\n1HLSPk3jMJQNrcfN352QSv/5jD04Pt/pdEqBK37PPgbM0l8Uqve9wbnZOPa5w3/Ik0Mu1McwhY7c\nQ8PX4TbWyawp4Z26VoYdBIdCoMENRgziasC6i8baPT09Fb1G4cSLi4ui28ziWU7MxvlOGXSbHVDq\ncfhwgkEPup938Xz0KBffUXTNBtDOnNkv1o/3WVeaLYZlTJpwEP2z04DMOPdmtVoVloE5RcbsEDAu\nyxiOqI8Bv7y85Pz8vNQyQhe4+dAFz16tVplOp8WZvru7K6zRcDgsZS04rm1WxkeDCTeSc4IcW18B\nYJO0dJLX44/aTsAJx9Gge05OTjKdTjOZTPL8vL1g7suXLyU0Y3TsBpK7vb1tAQrAg9kJKDDTw8lW\nUbtGRK34fILFl0Xx+6RRzgizvUBaTdXzjF6v16Ie6SPAw/+G6kT5+bkWCAw9v6cvbMyTk5NcXFzk\n06dPrRozX758yf39fYvGxkCgRKEh/S5/niNsoPlut5u3b98W+pF5hklwkiuGhDUDxDh8ZvapNqbf\nAjih/ygsFBj0qgFH0sgD6+gxYGQBZK7IiTGzwfNzUHR8x4rB1LhDQABHg2OaZRuQxRq4DLWBDcCY\n33s/0GeHUBhPDdDpk0MLgC2zIwYOyI4ZIBtU1goFy1iQZYM2DI+TC5FLwgKcrMMwAZjsecIgsNeQ\nAYdQzXDV87nL5vwO5KTT6ZTilDhRPkHFOlpeAHusB/Wf0C+Pj4+ZTqetMvgOsxB68y3v1puACRtj\n5r4Ob6Dn0WOwCbWcueFMU6jQjBwAC9lF9jlhyf/zf87hsDwDWJgzfkfVXAMZ5ufx8bFUPsZZ9x1I\nw+GwOMXUBasdysFg0Dr5R2gMJxWwZ/sKI2OWxfoY0I8tYS+9xhLXbSfgBOFhstjk0+m0RemjQJMG\nZTIB9eDW66bcsCvQvnnzJu/fvy+VR/0s3k8SJrRh3RAE0CNelz2zOnwzn89bXpE9fzYZYSjeDXXK\ngidNTgCl7Dudbc7J4eFhPn78WASQd5v65vuuoGoQMRgMSr9RfvP5vFRThN2B1rZiYZ34DgLJuFer\nVdm4HIeG+bm9vW3FmJkPBBow4pwDxk1irpXPrhW3G/Rm0jYwyJqBCc0eZm2M+Ls3P7LvBDzmnO/A\nKFhRYxzNeNhDA5g7pOKcDjyg2nDTLwCMmRrHvW2MDbT4ye+RMd7B/nKOCyymvXIciLu7u3z48KEV\n+ttsNqXgFHJGHspyuSwKmdMlljH0jC+NYz9tNptMp9MkTeEvvEU8f8snesO5KayF6XEML47IrhNi\nmX/6iaOSpDhXLnOetAvLGRADRizrOKo1s9HrbU9Jmv0yo4KMozu73W4xvkla8unQGWwuurHf75dS\nB9gRnLzBYFBSCBgHxhog471pBytpnIx+v9/af3WBM5pzdmCCkGvfrMweuby8zP/+7/8WRodIQa/X\nKzklPAdQslqtWiUqut1uxuNxYU8cwq+jDOxz9AlADOYsSZlfwm1mUrHVq9WqdZKpbju7lZhTB6Ax\nBuBEqjqkwyQmTc4DwubENT5vhsG0k424gcN6vS5389CHpNl8LAxCYDaHeCSbBQYHhZWkJEg+PDxk\nNpu1jnuS68HnWMzaG6QPVEa0wgclJ41woFjtQZIHwVFssxd4JDxzvV7nu+++K7cGI6BO2GVzLpfL\nAmBQYNRGQeE7nGBPHmPrNTVdyvtIXrNnzTiceLirxlrZ+0GR8DszBQYezqEw+HXIBkXHXjFl6zwW\n/pC8lrSP8CdNMSfTywZIhDRN69a1UljjpM0KIpckSdI/My98lvlgTgxOeJaPHpKXgqzwDE51AWLN\nuPEOAAh7HObUoB4ZRTegdA3eAEckzDNX7pdlnXGwLg4TeU39e/d/12Ed+oUcms3wsWLGyHolaSVQ\n+u8OY/Ms7pqB4WIu2Qsu2AlgA6zA6mK8zVpRD2S5XBYHkXcQgmXNALXIkp0BqodTMXw6nRYH0bko\n6/W6nCRlXQ8PD0v+Gf1NUhxPnEY7D4TB6PPnz5/z448/lvfQ5/Pz81Y4HZYFUI0NY2/BXPd6vbJf\nkNebm5skTZ0xWG+zYOgRKtQmyWQyKfk+7K1Op1NO0OKwAozqpN+67Yw5scLivhU2qBcYL4QFc0JO\nTcXWIRdimy6PjsGz0fYEdTqdElNN0gIlzjWp7yZA8fF+cg36/X5B8aB01zhhfNBp3LvgvhntJk21\nQhrje3x8LDcwY/AYL2MzQOAkDV5esjUWVKxlUyDgMCnMNYARJYA3vF5vj7US+3XxovF4nOvr6zL/\n9TFoh5KGw2G53t5AldM/yRaYUGDvWwAnSdtIO7Rg4GHvGAWfpDXW2kM3I8H3AA4opZp1oD+mjU2D\nW76ReYNuvCYzmA6/8NN0Nv2Greh2uwWgAjwZj49U+h29Xq8Y+k6nU+6ccnVk56Qw32dnZ7m7u0uS\nVn0Le7x2KDgOyvcZP8bIFVEd6nJdGeaU7xh08EzXhEhSEiMdRuLd9op5758p8X9FGw6Hmc1mJUy5\n2WxKkqvZMhto1gfd69Cs5dyfJYeCtaQmCrqcvVXX5EC/wDyjV/l/dDcgBEYXgE4fkNXlcllkpd/v\nl/L5sDfkkgyHw1aelKteU0UWUNXr9UrBzrdv35YwIDbC+sEMFHqXMAxgZzqdlitZzs7OSv0Z9Kjl\nE3uArvHlqpxuury8LDbr4OAg9/f3JVTnveB57/W2dwS9efOmBUjJZ+HvvHcwGLwKcl9rOwEnnz9/\nTr/fL2iPP6BaAwyDlKShrI12ASgsJhsCOtDPYlFhSEzvooiTtJQSRWhcCdbxThe/SppLr1DO5+fn\npdwyZ+5ns1kBEaenp2Ujfvz4sVCmLCZAwgmuxLjn83kmk0mZIwStjgEiEMfHxxmNRuW4GPPF3T3c\nO3R2dlYE8Keffiq3FFMcDU8Vj9pJbvydPB+El9COAUntTQN+mCeML5s3acJIq9WqJHMxzl032Cd7\nGVbYSfs4oA1RDVpQlvbqa1Dz9PRUkvKShir2RY3OR0raV7S79oYZMbMF9ioBI6w962+Wx8wO4MPH\n3h2GYXzsQQx90lT9pLAiYT7mzewac8++ZmwYiX6/X/aPy3ATggWgmBkaDodFhwBuksZhcSVNAwh+\n8hzXj2Bf2UGzoq91nR2UXTbWh3Lx3PtVh9/xnM0+ITsvLy9FVuv8FQ4KPD1t70FjvgAcAG+cV3IW\n0Q/sDYq2wWZ0Op3i/QP2AA3kglCUkLmfzWaFJfnhhx+KnGIrGM98Ps90Ok2/3y/63g7acrlslX1w\nQUTfccZ+MTto4Pb4+Jibm5tiG3HYnp6eyjFiElw7nW0+0+XlZcnhZG7Ze+hnbBB26NOnTzk8PMz3\n33+fu7u7HB8fF0cVVopnUUmX577G0HNaDeaSz7FnHeJ6re2MORmPx0lSQMR4PC5F1rxRQXiOjZvG\nht5DWSXNLb945kwu8WQMt5Pb8ITYXH6uQydsQsCL2RnXRGHTQf8dHx/n48ePxXNC0blEMpsd4UZA\nEQjmzoyFQRKgzciYhtBQcwCUfH19XWKNeCv2EjFyHz9+zPn5eZImj4XNxm3Px8fHuby8LEqm3+/n\nl19+KYoVNgvwiFKj0qGPBvJ+EDveDO9PmvhunQS3y4YiQ/EmTQjSYQoa4AEZ9+ZmQ/Nv55JguAlz\nEB6ELQAI1AnEVob2ZpN2QTbei8dXn1hg/9gr9NUIeMfsE+hf9iYyYYbH7JCBFP0E0Fg+aFDNPl2W\nNLebG5TwDkAJ7Bs5KXyGXJSaZsewEqZFfxjAoFdQ6hh0M0WMDwfLYUz6jmLfNSuI8cTYj0ajYswZ\nJ/92bRIcDfQD7LLHjXMEeFssFnn//n055cKNu93uti4WenY+n7cYKBwjJ2EDihxKplihw5jonV6v\nl6urq6KDAVKcvgLIEBq8ubnJxcVFK+zOn9PT0wIeAPuAIhwIZMrH570/Op1OPn/+nPF4XJ738PCQ\n4XCY4XCYzWaTjx8/5t27d+U4NKCF/YYz++HDh9zf37dOz7BPqeHS7/cLwHt5eSml5X1IBObMR7sn\nk0k6nW2CdK/Xy6dPn1q5nIzdJ19J6fijthNwMhgMSt7F09NTbm5uCvrlj+lr0DHAAAEETePZIHAk\ncHKSxLUxKCDExPEs2BEjffrB7/HwQLPkzjjc4mS/zWaT+/v7Et4ALSYp5+kxNMvlMm/fvs3nz5/L\nAkKJs4kAHpQPpnQx2esYqTpxkr4lWyF7//59AWPX19c5PT0tawCFCWLebDZ5+/ZtZrNZJpNJTk9P\nyy3K1DsYDocFyXNjKP9OmsQwMvHxWBw2QkE5qZlj4N1ut8RPURjOjUEJ7pr6TtoFwjCSDnuhMPlc\nHXKjAaIdiknSAiYAcudEGZSaIsZYOmQEPVwrGjMUzDnvR4adz4VhN8vJZ5zUx0kznu2wBrLGH/pn\n8PRa+Mvz1u1uk60vLi6KHDnEQj4CMkSuCUaNPlIKoAa9zAWGxSd96I9ZKEIAyRbkkZPlOeCEBXOF\nIXe4Gcp8l+3Tp0+FPUYfAdDQw4BadJ3HgV4y0HOeHpemrtfrsn7owX6/n/l8Xhi4pDkpCWvtyuDo\nAoA/evng4KB1uzuAK0lhO9mz2AUDVBg/O8Xv378v8orMJimMG+9dLpelRIMdu6TJ2XM+DXOUJD/+\n+GMrD/Ho6KgcTe52u7m+vi6y9+nTp7JGT09PJRyXbCMWRAfMmFIlFnZytVqVUNN0Om2dwOL3OBn3\n9/fFeXz//n0JeY1Go9ze3rZyFZ0bhP3+M4dyJ+Dku+++K7TYr7/+WjqL0kgaj9LCTAO4cConScnb\nICSBkpvP50UR2asajUZJ2ln/UL/2+Jy7AbpF+AEdJH+SBwL9hbInDJI09CheII2wj+OMbHKus2aT\nOyuevroUM58zAqdNJpP84x//KHkfk8mkABs2J8d9uYob2pnEKI4Ls2G92Z6envLx48eSCIuRhgHB\n0Jo+RxmQCAf4OTg4KN5Bp9Mp8UpAJCeG2Ji7ThpMGjk1aE6aM/7IJYbNORxJmxVE9lA0DpMgQ47z\nOhHbMV3nhdigO//EBQSde1IzBKbwUdJJu0gbYyC5EfBppwOljVK2PHNywgAC+XPisOccY0FxMIcc\niZGzHvZQ+Sz5AA6vAeBRog6T4iwBTviMw2f8ZDzkaBBenUwmLZ1E+BZmgXlCXnbdmAtqIpHUiPw6\nFMnet4PnUzaEN9brdcbjcZHjs7OzfP78uYR/Tk9PMx6PWyA1SWGH6UfSsKxm28m9MPuHLHW73cIS\nmLGm/0lzMs2hO4AHSfiLxSKnp6dFpwNIkVvrcoy6k2/Pz89LMbTXjPUvv/xS9udqtSoOdpISImQP\nMKfsOeqHPT8/l9OhLy8vubi4KGF8n8Kibz6B9do9XHzeckG43Swm15sYXGNz7QS91nZWhA1BPDo6\nysXFRW5vb1sdRQmy4fGYLKQYxaSpEooX4qIxxMdM3dm7TBqBtQDxfxyrYvFJuqqpexI/AS0IOooL\ndsBsAcoZANTtdkvlQk4S1OEBjJSFg8UG6fomTNPmg8Ego9GoeIar1aqAKTw1gEeSFoBIthvu8+fP\nubq6KgqVvBLi9ggx3hWKHqEEpGCcaYvFIhcXF0lSlHTSnNYidstJJYcn/hlF+K9qyAwN42dPHw/P\nXrkTWVkXvGg2sY2ewyWsncEPtCxzxHdsKMy0AIJhEgz26pwXJ2w77OmTdvwfYzXLBYhHNvjpEAaG\nhr4sl8uMRqNScdPzTZtMJrm4uCh7ybk4zJvDSfSJtcDR2Gw2OT09zWQyKdU1ofQZD/uZvzNG1pk5\nA4DhicPOcPoBz3U2m5VcBOfcOMS1yzYYDAowpl/MDSCXELqdsSQlodWMUtLkFMGUJMnNzU0Bw0dH\nR5nP562SDxjT2WxWTjlyMov5dWiEhHz2EuNItvoGubYjYUehrqjqPDj2HQw6DDi3KBP2ZI/A5qH/\n2XOE0y2bBilHR0e5u7vLd99918rXSVLsEUCH7wFmcGpJnrXNcZ4abDe6wuweugDGmj7DRB4dHRXd\nP5/PS07jeDzOu3fvWhV6Aeg885tLiP348WNRMHjNzupmEEwkE5Y0J17wPhDMJC0PxJOQpEXRouz4\nO54TeSMu9wuNdnCwPY5GXJTEJP7/+Pi4MAoOQVlJIYwO19ze3rb62utt70R49+5dRqNRPnz4UKh8\nDIKVHQ3l7rABPxk7Xs+HDx9KaAUBwRsEYZOEljTH9mArhsNhbm5uWsi609leioXxQTGAmA3WACn0\nlY2aJPf39y1QyO9B/z7yZgDrmgq7bDXrUBvnpDmNkjTGFeBlWh+ZQZm8dk8GzwNY21t1sjYso3Mq\n6BO5Tj5tZrmh1TkUdUiKPpghq8t3A9wdtqjngmdgAJLm8jV7sRg6gPxgMMh4PM7FxUUxaABjmFiO\nc8IoEXIyo4G+waj1+/3MZrMis/SFNbChMmAz2+Kj2OghJ3QCqiwDzLe91102xojedM4HoMsAbrPZ\nFIBJGBwASSjNdZTI5eBZSaM/ky34xOg6rw7bwXoljZPAmlBuALnjQAAev/P1zDwjH+PxOC8vL6Ua\nK+NBFjnhOR6PW3dpJSmOw3Q6bbFi7DUcXeRms9km5cIU+7Z4HBWiA9ieTqdT7u6pWeRud5t35Rvl\nYYmQL4clfaACZ55QO/ucd+Dk+D48F+D01SWbzSa3t7e5urpq1V75/vvv1OWGhQAAB0pJREFUX5W3\nnYATx43tUUEvYby5cMjUdz35hDTY2CBFBDFJC2UmKQuOADrhFDorSVFahIpQEiBkP5cKfYeHhyVx\n9MuXL61kOLyAwWCQ09PTfPjwoSR64Tk4J+Xy8rIIvFmE1WrVqvg3GAxyc3NTKGIy3AFyKF7m8/ff\nfy+KmYRUo1o2MJRcst1A5+fneX7eVvDF2wQ9A/LwDii0hDJzKGKz2ZT5s5Ew0+R7G0DpTjhcr9et\nBOdut1tYl103gwN7ZfbW+R39xyuELqY6Ixsb4A34cTIfQAjDh4G18nC4wkYWeWLdYFoM+vCE8fr6\n/X7ranlACUod40GoDoWKh2baHcCdtG8ato7g+Un75l+AweXlZYmrQ7UnDciBYTKNj9FjfIC3yWRS\nZI81AHQD2jkh5fViHv1+jDN7xeARGWAPcdoCZy1pEvAnk0nryPQu2nw+L3OO7rSjQWgFXYisEVbB\naTk/Py96EvYoad9Sj1zBUqCTLy4uCvhwAiynYtBhsM/cNO8QJmEQ2DuSe7nUFF1yenqaT58+5d27\ndyU/0CEtFymDaR8MBmWPAAR4H2kMLy/bAyA4pTA/7FHk6/T0tFWXi6qvJAkjQ5wGAnDAFpL3AThg\nj7E35/N5FotFAfTMD3PEQQ7W4e7uriR0w5ACKNnH2DnmfDKZZLPZlr0YjUY5Pz//in0i4fa1thNw\ncnV1VSaM4jhJQ7ViqJ2sljQhHbwLNq+pLZgTFtWeFsbw4OCgCG7SgBc2DcYaQ4+yxgvgvfQJQHJ3\nd1c2MXFBAACb9OHhIYvFosTjfWw4Sen/8/NzPn782Ep2xQNFgfk+BeoDQCPzPd7NnxoYYFxgZCz4\nrrY7n88zn89zcXFRjt8ZvEDrnZ+fp9fbFvIhhHN0dJTb29uyxtfX1616AEmK0XTYjjoUvV6v3POA\nYb25uSnH3fg89/7sutUhPQy+ATbKjo3uvAVySepYuZtzTvA0ydZH2fEee58wDSgj5ARP1EYXdi1J\nAb5JinfI3vLeSFJqLgCOXTDw/v6+gC4YIcsgbGmv1yvAnT3W6WwLOhmooexrFpS97QrK7I3n5+ec\nnZ2VfIHRaFQ8Z/bAly9fSrgBDxanpPbGyX0ajUa5v79vlTj/8uVL2af0t9vtFiaQ5FrCITBT5N5g\n/P+M/v5XNHTjwcH21CNrw3ohT9TaQP5IyiQnDpkGPAPcmDP2RX0a7fT0tIBIdD/rBWipLyZl3gk9\nOS/GuYKwIVS3dn4VsgPwHI1GrRQAZIk54EQN4MRHj7FB3333Xfk8cmpW/fDwsFzpgj7GmYHlZP84\nXw02nWdiW9EvHz9+TK/Xy+XlZbrdbs7Pz8spOoOZ4+Pj/PLLL7m8vCxsliuXA3jQbTAqnz9/bpXb\n4HQXoR1OZDqfaDKZfFvMCYlnKLD7+/s8Pz+3PB6ED48JQUSg7UmxaLVnjreBR8+kOp/CoRDeSzIX\niwyz4eqDgA6EebPZFBTpOKVDTHiBJBk6Ix0vL9l6f6enp1kul63jYHim9JvNnzQ5ISh2F2QzM5E0\nFLNDKhgqnuXwEMfS+Dx06A8//FCOgHNyiPlmDh4fH/Pdd98VT4BTRskWyPB3DC1z6hg13/Emd0Ip\nNQr+rBTyv7Ixz05GTRoQ7JguSof8KR/DZHwOESUN64IHBFMC24hS9f6pQ172dmAnqQ/B+mPweZeP\nhLKPzF45yZCGbAHCkHufdKvDr8zXcrksoA2vEqAH43lwcFBAlStR8j3+wN5gbCaTSdlnzLlP+5HM\nBwgnTOky5svlMrPZrCTX393dZTAYZDqdlpu5fS0GLOJ6vS61fyaTSVkrG3fndlgudtW8fpeXl2X9\nfcqF36PHAQUYNWQQZhemD6AL6+cCk+RNwMSyXrwHMIcXTz/ZYzCEDnOik5ElwvROCH16esrV1VXZ\nG5YT7wdYOGSfvtEIj3D6hfkARMPS+fQN7DQ6HrtBCM3MInIOuwxYBGytVqtSa4s9CghbLpf5/vvv\nW3k6zMn19XU6nU7r9ul+v5+PHz+W00Aw5AbzgCaYxufn59ze3ubgYHsIBSLBl9f+Ues4Fr5v+7Zv\n+7Zv+7Zv+7brtvuqVfu2b/u2b/u2b/u2b2p7cLJv+7Zv+7Zv+7Zv31Tbg5N927d927d927d9+6ba\nHpzs277t277t277t2zfV9uBk3/Zt3/Zt3/Zt376ptgcn+7Zv+7Zv+7Zv+/ZNtT042bd927d927d9\n27dvqu3Byb7t277t277t2759U20PTvZt3/Zt3/Zt3/btm2p7cLJv+7Zv+7Zv+7Zv31Tbg5N927d9\n27d927d9+6baHpzs277t277t277t2zfV9uBk3/Zt3/Zt3/Zt376ptgcn+7Zv+7Zv+7Zv+/ZNtT04\n2bd927d927d927dvqu3Byb7t277t277t2759U20PTvZt3/Zt3/Zt3/btm2p7cLJv+7Zv+7Zv+7Zv\n31Tbg5N927d927d927d9+6baHpzs277t277t277t2zfV/i+IAQDEy/wsagAAAABJRU5ErkJggg==\n", - "text": [ - "" - ] - } - ], - "prompt_number": 3 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Raising the bias of a filter will correspondingly raise its output:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# pick first filter output\n", - "conv0 = net.blobs['conv'].data[0, 0]\n", - "print(\"pre-surgery output mean {:.2f}\".format(conv0.mean()))\n", - "# set first filter bias to 10\n", - "net.params['conv'][1].data[0] = 1.\n", - "net.forward()\n", - "print(\"post-surgery output mean {:.2f}\".format(conv0.mean()))" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "pre-surgery output mean -12.93\n", - "post-surgery output mean -11.93\n" - ] - } - ], - "prompt_number": 4 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Altering the filter weights is more exciting since we can assign any kernel like Gaussian blur, the Sobel operator for edges, and so on. The following surgery turns the 0th filter into a Gaussian blur and the 1st and 2nd filters into the horizontal and vertical gradient parts of the Sobel operator.\n", - "\n", - "See how the 0th output is blurred, the 1st picks up horizontal edges, and the 2nd picks up vertical edges." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ksize = net.params['conv'][0].data.shape[2:]\n", - "# make Gaussian blur\n", - "sigma = 1.\n", - "y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1]\n", - "g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))\n", - "gaussian = (g / g.sum()).astype(np.float32)\n", - "net.params['conv'][0].data[0] = gaussian\n", - "# make Sobel operator for edge detection\n", - "net.params['conv'][0].data[1:] = 0.\n", - "sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3))\n", - "net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel # horizontal\n", - "net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T # vertical\n", - "show_filters(net)" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAAicAAACbCAYAAAC5xzv6AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWuMbNl13/c/9eh6V7/uvT1zHzNDzgw5HNIWNInpMCEi\n2wkCwYElBFASBTLg2DCM2LATSAkSJ5GlWDJi5EMAA0ngL/EjkQPFcuIQgREEcCIbAkJD9JhDgdJ4\nyOFjHnfuq2/fflV1VXc9Tj7U/e3+1+pTfe+MqOkmWQtodHfVOfvsvfbaa/3XY++T5XmuJS1pSUta\n0pKWtKTLQqWL7sCSlrSkJS1pSUtaktMSnCxpSUta0pKWtKRLRUtwsqQlLWlJS1rSki4VLcHJkpa0\npCUtaUlLulS0BCdLWtKSlrSkJS3pUtESnCxpSUta0pKWtKRLRT804CTLsk9nWfa1LMsOsiz7C1mW\n/fUsy37+8Xd/KMuy9y+6j0ta0kehpWwv6QeVlrL9w0s/NOBE0n8q6f/N87yb5/l/l+f5n83z/K8U\nXZhl2TtZlv2R36uOZFn2lSzLXsqy7JNZlv2z8N1GlmX/R5Zlvcf9+Pd+j/rwX2VZ9iuXtb0lfSj6\nfpHtP59l2etZlg2zLPtbv4d9WMr2Dw5detnOsmwly7K/8fj5B1mWvZFl2Y//HvXhh0a2f5jAyfOS\n3nzKa3NJ2Ud5SPaYzvm+Kum5PM+/JelfkPTPwiX/g6ShpGuSfkbSX8+y7NWP0pcl/dDQ94tsfyDp\nlyX9zY/y/CX9UNL3g2xXJL0n6V/N87wr6ecl/VqWZc9/lL4s6THlef4D/yPp1yWNJQ0kHUh6WdLf\nlvTLj7//Q5Lef/z3r0iaSDqSdCjpP3n8+b8k6cuSdiV9TdKPWfv/WNJfkfT/Pb7vk+f05Ucl/frj\nv/8bSX/WvmtJOpb0kn32P0n6qwvayjRbCO9Iuv/42m4ck13/jqR/TdKPP37OyeMxvmHj+KuSflPS\nvqQvSVr/qO0tf5ayveC6X5b0t54wrqVs/5D/fD/Ktl3/W5L+raVs/y7m/6I78DEK+j+S9Kfs/78l\n6ZeKJlDSdyX9Efv/hqSHkn788f//+uP/N0043pH0Gc2iUZWC5//7jxdI//FC2JU0erzoHmnmIfyo\npH647+ck/Z8LxvSnJL0t6QXNgM3/Lul/Pkco07gk/SLX2vf/WNJtSa9Kakr63yT9ykdtb/mzlG1k\nO1z/V/RkcLKU7eXP951sP75nSzNA9akFY1rK9lP8/DCldaSzIb+nDQH+cUn/V57n/7ck5Xn+/0h6\nXdK/+fj7XNLfzvP8n+d5Ps3zfBwbyPP8b+d5vq5ZOPALkn5E0m/ns1zqRp7n70pqayb0ToeSOgv6\n9TOS/ts8z9/J87wv6T+X9NNZlj3NvGY6O/5cM0F9M8/zI0l/SdK/c1648wntLenjo8su23O3PEW/\nlrK9JOj7RrYfp3/+l8ftfnNBv5ay/RT0wwZOnkYpFtHzkv7tLMt2+ZH0r0h6xq5ZWDX+uMh1L8uy\nPUn/smZI9y1Jn37c3n/0+NKepG64fVUzgFJEz0ryxfGeZvnPracbViH5ON6TVJV05XfR3pI+Hrrs\nsj1321P0aynbS4K+L2T7Mbj4Fc1qBv/8Of1ayvZTUOWiO3DBtEjo4+fvaRYm+zMfoS3lef5I0lqW\nZf+upD+U5/mfzbLs70v67/M8/3W79JuSKlmWvZTPCq+kx0h9QdN3NAsNQs9plqO9L+mmZiE+SVKW\nZWVJV5+iv8+Fv0eahUL7H7G9JV0MXTbZfqr2jJayvaRFdOlk+3GU4m9oJjd/NM/zyTnPXMr2U9AP\nW+QkC38v8uDuS3rR/v87kv5YlmX/RpZl5SzL6o/32N9Y0PYi+hclffXx3z+qsJvhcYjv70v6pSzL\nmlmWfVHSH9MMjRfRr0r62SzLXsiyrC3pv5b0v+Z5PtUM6NSzLPujj0ONPy+pZvfek/RCCP1lkv54\nlmWfybKsKemXJP29fJac/CjtLenjo0st29JMMWZZVtfMKSpnWVZ7rCyLaCnbS4IuvWxL+uuSXpH0\nE3meHz+hvaVsPwX9sIGTPPwd/4f+qqSffxy6+7k8z29L+klJ/4WkB5oh8v9Y84L9NAj0NUlfzbJs\nU9I4z/P9gmv+nKTG4+f8HUn/QZ7n/3xBe39TM+DyG5K+o1nB1l+QpMdt/zlJ/6NmxVI9zYf+/t7j\n3ztZlr1uY/gVzSri70pakfQf/i7aW9LHR98Psv2XNJPR/0yzeoCBpP9yQXtL2V4SdKll+/GW4T+j\nWZT7XpZlh49/Fp1RtZTtp6DsccXukpakLMv+kWZh0OU5FEv6gaKlbC/pB5V+UGX7hy1ysqQn06UL\n7y1pSd8jWsr2kn5Q6QdOtpfgZEmRlqG0Jf2g0lK2l/SDSj9wsr1M6yxpSUta0pKWtKRLRReylfiX\nf/mXPxQi+l4UEtvpeGfazbJMpVIp/Yam06kmk4nyPP/QfeBZ3uZ0Oj3TjyzLvifjK3o27VcqFVWr\nVZXL5bm+TCaT9JPnuabTqabT6VxbRf0rlUpnriuVSiqXy+kexjkej1OfYr/4XS6XValU5u6jX4uI\nccT+/cW/+BcvNLz5l//yX84jbyAfX6lUSr+n06mazaam06mGw+Ecb5gX/5+/ua5cLhfyyuVtOp3O\nzRs8529Jab6yLEvXweeiflerVY1Go9Q+c12tVueeNZ1O0zrgb6fJZKJqtTonUz6+eN9oNJIk9Xo9\nVatV1Wq1M/KDXMGDRqOho6OjOf75syLf+Iz7K5XKXNv8TCaTuXmhvVKplHglSZVKRePxeE6XwDe/\nP89znZycpHbK5bLK5bKyLNMv/uIvXphs/8Iv/EJOP73/UV9K8/qUuapUKmd4xby6jDk/aB9eO4/h\niaQk+ycnJ6l/9Xo9fRfn3NumH1wHv1038rxoI9w2xPXha3g6napcLs/pQO9TlJ1qtapGo5H0Af0Y\nj8caj8eaTCaq1+uqVCo6OTnRysrKmfF4H+Ap+n1lZUXj8ViNRiPxjOuOj4/nxsrYfc6yLFOr1dLx\n8bGGw2Fq9+TkRK1Wa279MD7ajDIwnU71S7/0S4Vy/X11zokb1g9LRQLuFBUjxMT4Z24wznse90fl\nU/Sc7yXFcUZgsAgouJJY1EdfAJEHvlD9+vOMNb/9B+H2RSHNz5ErtUV9vSiKRpIx+f+TyWQOHAwG\nA00mE9VqtWTEJpNJUgZx/viNMq3X6xqNRkmZwp88z5OBQElmWaaVlZX0MxwOdXJyMqfEHDi5UpdO\nZWA8HqtcLms0Gmk8Hqtarc6NnWdyL226IZCklZWVubl1OULxOQ8AHZubmyqVShoMBmnso9EoAWVA\neZZlGgwGid+AKJ+LaPjgQbVa1Xg81mg0OgNQHNRFw8146E/kp7cT9Uu9Xk/f05eLlu0iYCKdGr4I\nlv0+QIobTV8PyGrUA7QPOT/8ueVyWePxOM2PdCqb8LEI6LAGef5kMlGj0dBgMJiTi2q1KumsfqQd\nN7r+OeCBeV8EZGgzGm3nn+uOWq02txbH43EhiItrlzbQKycnJwmIwYNoY+Gp95U2mGvmAz4w//45\njo9/F/V7pO8bcBIN0Ye9F8TrCq+IXAjcO+e+85gZnxeVl3u3LsgfZhxPC8yiZ+jjdo86Kka/flHf\nIjCIisXbLvL645giL3yeoKJ7fUFdtPKGFoFc/x/ejkajM3JG5AQlRGSiSKETLeDzer2uyWSi4+Pj\npLSyLEtG2b0YPPTj4+M5Beny796YG55KpaJOp5NADWDHjYwrT1fgrgD5PkZ9sixLAMLXkCtI/qb/\no9FIx8fHGo/HWllZSUo3rv0I8uCR95HvmE/4SkQD2UQRF4ESnkVECM/SjaGvEY8yMT50iBvdi6Ki\n56PTisBIlFf/znkcwQ5tMf7xeKzhcJgAZwR7gADvAzxENlwXE+2jP/C/Wq2q3W7r0aNHajQa2t3d\n1crKyplx+frjcwf19AmjHQGNrwPGALjwSJKP8/h4dmxKjHRCHkGBt75WHMg56GCN8dvnhGf5unA9\nwryz/llj/iyupQ+sFXfYz7NnlwacROF3xc4gFqUTuDYqFtqBca7Mx+NxIUiJIaloYCJDI9JEyEij\nYGCGw+HcuD6KMX1aYOJ88v6hUKEIYFjE5wlNVKLc6wClqC9xfn3huAHi99Mq4+hpXTRFUBdBm3TK\nEwwoAMRD1e5t1ut1TadTHR0dzS1wT8dhaF2pYFS5ZjQazaVfUPjIBe3iKUbPB2OxtbWl27dv6w/8\ngT+g/f19PXjwQL1e70xkwI2+K6ToxbkCc7BZJIfOH9YwkQ1JarVac6mQqDjpD8DQAXEE3bVaTZPJ\nREdHR0k5cx26RFLqh4M5riPszbPwHh3ESPNpCj5nrhnbRdKiSKkbX8YICHMg6voB+RoOh8rzfA4E\nwFt0AtEv0hu0U61WU7rD9UdMcbrM+Xc+nlqtlu7f3NzU7u7uGcAcwWSUG5d3f6ZHKhzIuEwCPhmL\nr1kAMSDV1yL30Sb3+bwAvIbDYeI7AHllZSVFBI+Pj1OUMc414+M5AG70E5FeB94Q8o7Me8rX136h\nzC385mOmIhAQ/3ePP97LZBV50QiagxSurVaraTF4LtGjDPGniOJCLJfLajabqtfr6nQ6arfbSaEt\nmhDa+N1EiWJbruQJfcdxRJ48LUUvtIgWjdX5EOf2w3iJboAuC0CJER9XiDGU6/ld8skoGa7t9/ua\nTCZqt9tqtVqqVqvp++Pj47k5PTk5SXJHRMO9UffQAM1e41BUW8GzWC8ffPCBrl69qq985Sv65je/\nqS9+8Yu6du3amVqTouhJBOcobE9HkVuPawpyw4DiJ5oT8+WNRkOtVkvNZjOtP4xATEm44pekfr8/\nxxf66j/0GyPK355Wqtfrc+Fs5ou2I3h13qCfAJ0XRRF0S2droKIT6Y6MO4fMJTJ6cnIyx5PRaJTk\nNs9z1Wo1VavVVA+Bxw7Ydu/d142DEgyvdLYmg3sajUbS1Z1OR1/72tfORJv9N8bYdVcEJi5j8X4c\nlLgmWP84EZVKRbVabU7HAehYKw5OfL7grxNy6vchn57mYd5YU643fC24fuHZ/PYoD+OmRgadt4gu\nDTiRnhxNcIEr8qgQfPecIPfeWShENxyY8ByUJMaiyKAvMuT0gZAcqHR1dXXOS3CKSvV7Qb74ouDy\nG8ULP6LHUzSuou/8WfwuimB5lCXSkwBKfK6H7L8XgO57QUVK3EGiyyB9RvnU6/Ukk0QDfBH3+30d\nHR1Jmnl7ROaoueAHZTQej3VycpI8G+TAUyYYAQpLiZ5BrjBRYleuXNEbb7yhwWCg8Xisv/bX/ppe\neumlBCji+GL0zuc2epIuY+5U+Lp3xc+1tVot3cvYAVrwxQ09hp9n1mo1NZtNtdttVavVVJToBcIo\nZXQLfXej616wh83RHcwv97txdwPjYAj+XyRF+ZXmo9YOYD0S5pE3vHSIQmj/DJk/PDxUpVJJha0r\nKyspCh2vJfpImzyfuXbADbmHPx6Ptbm5qevXr+u73/2uBoOByuWyfuInfkKvv/66arVaKqZ2AOS0\nqPAWPvG/f+eRFo+sMKaYCeDaRqMxB95wUJBVl1F3ODzSQYGy9xlZc7DhEZMIPH1uvZ9Q/Jy16jJ9\nXjbgcmj0xxQXatGPC0VEred53M4IhMsRYRQoyNFqUYgyeuz8T9iR9ih2JIISDWpEzx+Wbx8m2gAh\ndB4yBKzxA7nXzfUOVNwbiF535A3t+e8i4/Mk8jmMsnFRFCNCEaw5Hx0YYticp+PxWMfHxwlgoAjI\nw2MsG41G8iLxJD0SwU+WzarsW62Wut2ums1m8uxj/5ljB/yMYXd3Vzdu3NDq6qqm06l+3+/7ffrS\nl76kP/yH/3AyvihYV7juVHgaxPkUDbb3Cz56SDiCXZdRrqfO4Pj4OAGabrc7F3mZTCYaDAbp+na7\nnfjAs6h5iLIe++QGhz4xx0Qvfe59/Hjj0qkXGz3QiyDkx42RpxGgqIsjsMNALtKdrg/6/X7y8svl\nshqNhiSlQk6cR7xxUm8RyDkQwIgzDgBSs9nU66+/rmazmcDh7du39VM/9VP6B//gH+jg4GBOn/Mc\nN7RRlqX52g8fK3PqNinqDbcJDvzr9XqSWeTy+PhY9Xo9OS+kU+A1axKnh/56dMbnBZ5Hfcb10e6y\n7rmOsdMmc0Yxb1wjRXTx2tyoyOC7Mo+KXZoPAxcpQSeE2ZkXQYkrPE/DeH+ipxf/RuH4fQhYvV5P\n4UwoCsV5qaOnoZhWcP7ymY+3aBwOQiJf+HxRGm1R+4yhCJjEuYsetvOgSBaiorsoikrFveEIpJx3\nLl947fCeYkwACUoRXmH0SP8MBgMdHR0lEIMiajabKpVKKXrHNkIiK6whgI506ulzP+sNw9lsNjUc\nDnXt2jX93b/7d/XKK69I0lz0kv56CNejKbVaTVtbW8n4eMrDr0X5OeCBx3iSfMe1k8lE+/v7CZzA\ns16vlyJHRJgcPFCYzNiJLnnks16vq9FozIH5CK4whi4XGAieG9cl0QSPeF103Un0+KOedPAa16Lv\nDvE0BPPleoBnYXxHo5FWV1fT+gHgABJLpVKqraIWwo2mry/66s8C5Gxubia559nValWDwUA//dM/\nra985SsJ+EvzoCMWlPo8ukGnHw5MIqCJvPAx8X8cBymmPJ/V7xwfH5/RMURamQ9sITyNO9ikU0Aa\n+xejOi4bbjt8jdLfCNDPc6ovBThZ5Pm78EYD6r+5H6YvMs5efOio168vOhOkqL9Q7J8XcxVFQ6bT\nacqNR8Md23aiXy5kTkWGOSL4WFfiHmbkhQtZfJ5714uiSBHMoNyLUm6+oGO0oYiKnn3e9R8nORhx\nBYnMRa8u5rQdyFAr0W63VavVknL3KAnPbLfbyQhIOmMUXQFhGDHkHoHiGnb9uDKeTqdqNBqpPfo8\nGo3U6/W0ubmpt99+W61Wa86ro09FnjJnlOzv76tSqWhzc/OMTEYD7V5qlmUJ8G9sbKjT6SSgUq/X\nUxTp+Pg4pbyYBzeQHnXy3RE+f+5les4fuVtZWVGz2Uw7l2K0AWNADYWDGp5Tq9XmtpNfBsANFUVF\n3MD4mof4HzkrlUpzdU5EUtx4uZywAwuj1m63UzsrKytz9Ti+o0qa9+5p1+Ud+apUKrp//746nc6c\nHOT5bEfb9va2vvCFL+jRo0eprw4YptPpXB0YbTqY3d/fPwOYuJdxR7vCWSYUZUun9SKDwUDSaYSE\nowQAL6R6vYgbOYvR6Qj0pZksu/MUASF6ivn3eUcH+v3IuQcHiDYuoksBTqLCks4KaYxWuHHy3KIr\nGhdOCIYUGcMYFTmPzgMo0fhyPYKRZZmazebcuPyaSD5ejHxUAEX3OWDwugKvbfB2HNxFYLMoolEU\nOYlgpSjSEZ/NvAAaYxGzp+BcqT9tNOnjpKKIjisCohNey+QG04EMhnd9fV3PPfecms1m2l3gSlmS\nNjc3U/rQFZJ0qjC8NsXlMXqm/C8p9fXo6CidFwK4IT+PPO/v7+vGjRtpvD5Gl2FoOBwmQ0y0J9Zl\nuZGPvPXoHbtE1tfXkwfpEaY8z5Ox43mHh4dJ7tmVw3W+U8Lnj3Sbg8Q8zzUYDHR4eKjj4+ME4rhf\nOq0Bcg8dAMpvQKQb/uhwXSQVOVJRZ3vdR9ThjJu0d5ZlOjw8TPdHvT+ZTNTv97W2tpZ4Q4oIAOAy\n7ODaQQggyHUIstNoNPT222+nKBZpP3azVKtVPffcc/qH//AfJqDkZxHRF8YuSe12O60j0qb9fv9M\n3QWG3deqpARSGeNkMtHBwUEaG2uS59Xr9bnUFnLpEZEYNUXv0CfmM25dz/N8Li0J/yPQKZVKyXl2\nHsHvPM/TDi2cjfMcykuxlTh6youiAItQuv/2XOQiQ+/PiwDFnxc/9+cjFBEknNd3aDweq9VqpZB7\nBGcuvNLZA3lA7+4lnOdhwRdH0R49YTzON/f4fMz+nRve6FGd5/XxvSuzIl65QYpeWgRFi4Ddx03O\nM5SOg0+POERvie9dSeEZkbPtdrsajUbJc3J+oPQxbu4teorAFQrrhXoKvBuPCGRZlkLFHp3Jslk6\nBS8P5fn2229ra2tL29vbSbF5JCE6C71eT61WS5J0eHioer2eUkgOLvhxvhEJIbSPfAPQWCNey9Hv\n95Nn6NsqUZxxB0FMM/nfbgBcVo+Pj1MEBcPoUSd4xzM7nU5h5Pey1JxALs940IsKTn2dMy95ns+d\naAovqC+J5+VwiikGkfM8oqfutRCSEljwg78g11flcjlt0XcQSRQGh246nepP/sk/qTfffFMvvvhi\nenae53Nrjn4dHx+r0+mo3++nPnvE0vvsa8r1qzsB7DZy5wJHoVKpaDQapXoOn6OjoyO12+20zqXT\ng0U9Aun1Iq6fIHdeAEWAJo/++RogyhN1huuW85zLSwFOnIqUuit3yBWFpwq4Lxo9v9YFwZVffM6T\nDF5RiLMokhDvkWYTtrq6qoODg4UAyckP7EII/EROH09E5d4/DJLn6KXZoqYvfj/fedqHau/oFRUB\nPO5xPkVA50K/qN8u3P7b5+EygJMiQMtPTMXE8Cq8ByRg2LmeuokYnYNvk8lkziA6CPVnS6cnO7oH\nCDDBu/J+UXTna4o+4TmenJxobW1NR0dHc+uQ53hdAu3WajUdHx9rZ2cnpTvoCxFCV6wnJydzx5QD\nJg4PD1UqlbS5uamNjQ1tb29re3tb7XZbm5ub6vV6yvM8GRwMheublZUVHR0dpV1LhMVjvz36w7gG\ng4FOTk4SKHLDjHcP8EC5D4fDpLv29vbS9bF439fkRZGvX+SKOS46RIwx+LqF7254vSDZD1uDV6TB\nAL4eWfNn8T/8d17SZ98t5LpwY2NDt2/fTp+z7pBlIl6NRkM7Ozv65Cc/mebR0zKu54jisc7ol9se\n6RR8Ai7oe1zL5XJZ169f187OTroPYNtoNObAFHwk6kPdzu7u7plIU0xrQh7tBGQcHx8nvZ/neYqa\nwudSaVb/w5le6BOADOOg/9JpmqqILkVMPBqiGEkpMj5FBiB62R7Gk86erOftegoogpain6J+F0Ub\nivrL/z7xT2NcY2QDwfCtwPw4cPBxudJnESBYLihxvN6Gh+r9uwhWPITqCtsNl/MvzouneWKKzucp\nzstFUwSnkT8+fucByszlNM9noVDSESghH6sbQNINGHg3ENLZd+owN767LK43PGPkhvbH47GazWba\n9bO5uZmA67vvvqutra1U7yGdPeMDD5pUI9dw/WAwUK/XU6/XS/NLvQcRD9cX6+vrunPnjt58800d\nHh6q2WxqMpnom9/8prrdblKmWZalrcMYBJQuqRUHdRGQ0YdYF3TlypXULsXHgCfWnusowObR0dHc\nWR/uibo3epHkMu2AQJqvDfJ+xj57ZCtuKeb6aAN8h5U7O74d1q93z57/+XHD6ilLP5wQfeXgcTKZ\nqNvt6uTkRD/+4z+uN998c64WC4BGP5FrT+MQgXFdiw6nloo23HGRpCtXrkiS7t27Nwf0X3rpJT18\n+DCBEQextE27h4eH6nQ6CRz6kfxuT5gX17kArXq9Pgfo3dbx7PF4nNqG7x5BcSB5nhMvXZLIiRsc\nF7SIvIvuO2+AcUFFA+gTEJ/H/7GPi/qyyMDGBRqNa7PZfOJOI0fbCHdE4TzbkWz0zN3wubH0diMV\n8YB7/Hf8PipUlFKMiPl8u6cDwRsfcxHw8nm8aCoCovQ31kV5usFlIypkf0cHCtT5RtqFe5l/B5wo\nWv52Im3k/UFp4r36rjiupf4Cj5aQOy8AIySOjNHfLJs/HKpUOi3GzfPTY+lRaO6ds8MGg0PE5N69\ne+m9NJ7b39raSpEZ2vFIBn3Gw4N/bsyguIUUXrTb7eRxHx0dndlN4YoanlDzwjjgp8s5vLloQpch\ny16X4PojAm7IQXnRSx7hd9FnyEmUF+msjXBHwKMmvu7oFwe78T9zyX3UdlSr1XS4IeCfefUUo9ed\nIGMHBwcpAkKUkAgCab1Go5E+J/qAnK2urqYic+l0DY/HYz148EC3b9/W1tZWeh59j4AtHtvPvDnQ\nAGRlWZbW2HA4TGkjb8f1yHg8TnVwOADMkRd7e9TkPLue5vg8gfy4KHbQ/4/hzUURhuhFx+tccBdF\nQnhe/Oxpn+nPiQYnEn1ESD1SE+/zz2IUo8gwI2x4iVRzg+i5xqM90Vv035Gi0vH+RpDmz4g7oRZd\n6/fQDwxJ0RwXzfdFURE/XVlHhRBD/ShPL3aNZwsURbJ8J4qkFF2Q5j3aWESMFwWwoJ/MPZES8vIo\nXyIRXH/t2rUULaCY9N1339X169eTMiLiUBTZ41n0kVA6hxiylfrk5CQZ/0ajoVqtphdffFH3799P\n0RdP22BAut1uMvxRHqfT6VxxHuOMCr5UKqX6BwzldDrb2dHv9zUYDBLYcZ6j1DmfAmo0Gtra2tLa\n2loqfC7ayXNZIoLS4vOMfC7ROXENu2MU1yv6ynnvxsyNrl/DHBU5lTyfmhB/NxLAh2jgyclJ2t2F\nQa3Vamq1WslQA8i/+MUvpq3FXmvh43HekKJxsAT5Rg523NC/Gzdu6N69e+p2u6ltr9fAKfCXdhIx\nZfwOIvjOgRXXMQYH9dimSqUyd64S68Vf2On3eaG46zzn1dPQpQAn0uJiUunsdkvprPFeBEyiwERD\nsGjhn2f4i54bjQi/mUxftDHF4Tt3/PtFPPDaDIQBpemFwBg5ThR0PsfoBc9GeJ4kQDFKVPSZG+Ii\nMBQ/cwXlB4Dxcx74uCzgBIpKyOc78pbxudx4sZyDVk7KRIE5yPW0j3v3TnweUziS5jwi+sj25aOj\nozSGRqOhR48eaW1tLX0/GAzU7/c1HA5TcepwOEx1H3meJ0VH2x7hIzrj/SHqc3h4mIw/r2nHC3zh\nhRf09ttvz+W9pdPIEGum1+vNnc7qz8GAeWjcQ9YeZfLDpsitRxBNG6SnmBs/R8bn1SNO/n4TX4dF\n9XMfJ8UIUgQj/p3rJwfi6Jaow1jrtOv6gdRIUbqAtiNQihEvl2lfSxSUAkyGw6FWVlbU7/cTKK9U\nKur1ehrW5XRPAAAgAElEQVSPx+r1eumVBq+++qq63W46D8fXGvYFYz4ej1MxNPO+v7+f1oNHuxuN\nhqrVqj71qU/pq1/9amE0xNO/nU5Hq6ur6dkxosV683QKbbL9mWsdaHhKmDVA/z3KRxlB1Fk8qyha\n7wXn58n1haV1okAXUdFigGHREDpKQ8DPi0Kc542cZ+hoPz6b53vo3Ptb9Ey/p16vz70h1vt/Hm9c\nqFDOCBJ5QhdCVwJOrhAZf1EfXHl4P4rADuPz633eI5/d03I++5xGHsQIwkWTh43jWKFoRH0OpdN3\ns6AckS88Pdp2hVAqlVJOG2XiYXjuQem6d+XrhjHQNxQb6Uc/7Gk4HKbi0U6noyzL5nYkdDodHRwc\n6ObNm9rb20uFrMgDyhjgjOIjIjOdTtVqtdLaAHwBuJ9//nn903/6T1Wr1eY8U49AuTy5EvXP+fGX\nybmsOf88rVMqlZJBc17FCEwEO8iqp46IclHwzPh9Li+aXM9KxccgeK2TdPY9Ng5IvSjSPfJYc+V1\nDp4yiMDat6BHpwj+kTqhn1evXtU3v/nNuWuQt0ajkXa7IHsrKys6OTnRzZs39frrr+uVV15JkRn6\nOp1O0y4j1szBwUGK6CFDnEjsa3djY0ONRkNf+9rXEqDhmuiolEolra+vJ7mODhw8ZltzUVrXHSPA\nNLt7aMPllXucXz6n2B8+90hUUTDgPBt3YRIfjfzTXO/RjKIohBuxmLeN0ZUPS4v6+jRj8IXi9/iE\n8WIyogaOOPkMpO1eeARgLkQUKjqAKIpQOZgoSrcUXRvBm0c9YjqjKAR83o/zbRH4KeLxZaMir1I6\n+xoEUgzRCPGel0qlks4PcCXgQNRBCafLesG1NF+g5usjAr8YwcJzRYkxL+TrO52O7ty5o4cPHyrP\n85RH530kAOROp6P19fX0MsxGo6F2u53qaYoMMAa81WppdXVVrVZL165d05UrV/Tmm2+mAw09ZeMA\ngXngDAsUfVTyzg8Hbhi8RqORdkWUSqW5tJmnn2I6A6OGJ03qRzrd3ux9JoLiHvNlASZRZqJzIJ11\nOpFXZDjLsgQ0pfnDHpkTj/ph+Fjzrv/8OR4diA4S17mMcF7K1atX1ev1Utv0tdFoaDAYzPWl2+3q\n6OgoFVr3ej3dvn07FYYfHBxoOp0mkMvrHTjLh0gKMsazWCdbW1tqNpv67ne/m6KIyA9zgKyUSqVU\nd4Je8JOdnbd8RlqLqInz0qMZfjpyBCE+V/AaZwQeuwMSoya0g+1a9K456YLTOk9jUIquiUo1eub8\n/jDg57xnezvntRk9iaJ7QKEOLpikWq2WlCCH8OCV4TUTpuP+IuNNuC8KUVFfI2iKAMOvj8oJcqGN\nURVXIg4gFwGM+BNTI0XXuyK7aCrqY5GiLIrOOeDm1FX47qAAxeRnl/hceoE1oVeUoSs6nsmPH2jm\nUSjeNQMoIv+OsalWq+p2u3rhhRdUKpW0sbGhnZ2d9OK8d999V+vr66rValpdXU31FZubm+r3+2q1\nWmkcACrm0iv9pRlg63a7+sY3vpFSqRh1D0W7LACQ4AWASzpNxWRZloADaVIHj4T02WHDlmbfHRdT\nS3k+/wK/6XSainIxen4Sp89P1GexgPkiyKMmEfxFUFK0vvmMuSCKwXceMYopyxiB4bn+WTTi/I1+\ncKeOZ+HAxSjXeDxWu91Wo9HQ/v6+JOnb3/62Op2O8jxXq9XSa6+9ps3NTeV5nuqgqBva3NzU/v5+\nKqZFpgDJABgOC+x0Oup0Ovr2t7+dapMAx0RcfJ3zd6PRSLuKWO8uOzzHSww43A2ibZ9H+EBqxiOG\nzLPz1aOtFHxzjTvQ7lx5Oq+ILsVunSdRFHoY5p8tMr5PIldiRSmMqCTOa+e8PdtS8YL2v3npFH0p\nMr6er0YxuwDW6/VU2R1DsHEcUal4SK5IEUTQwT2LFBY/GBGMnxe+RYMZjbYXv0U+LgJoF00xxege\nu6Qzf0MoOQeB7u2xqJl/VwoOYmiLfvgr5d17gSIYcY8ryzKtrq6mLZ3SbE6oLyHkvbm5qbt376Yc\n/bVr15JnmOe5PvjgA5VKJd25c0dra2upePbGjRuSlA4k9L4hL+wIWl1dVa/X03e+8x01Go1k3ON6\nYPx4iHyPUXT+MX4Al/MhplggT29xwixGJAKkWO/AgVxe8Bwjix758fV9keQA2yMXvvadf1FnezE+\n12JIoxfuhhPiHsjr+5AzqfgEW08B+W6cSqWi3d3duV1S3s7x8bH6/b6uXLmivb09ffrTn9adO3e0\nsbGhjY0Ndbtd3b17N0U5Op2OJKWibV7pgLNA9IgDBt999139/t//+/XZz35W+/v7+u3f/u3EJ/gT\nX5ToRa0e4QOgwBPptP4E2cahwWnwaCvpFwcTlUpFzWZTo9Eo7YLz4tsIVvmbscbyBtfPHpVdRJca\nnBRFTfiMgrgYOSny0p8mQnMePcnoUSAUc6VuIPCKHWzgIbohkzS32GiL7wg9YvC5H3544dV5/Xc+\nOblR8ghNETigr35t/OF6H3NRBKVojtyrWhQ18fFdBnAC8HAA4t9FEBG/q1QqqegOmUIxFM2r16AA\n5LyCH9Dsnkw0HG4kAAO0t7m5qcFgkDw8j+Ksra1pNBrp7t27Oj4+1tramvr9fgIopCIBXOyGODw8\nTFEKP1SLtM10OlW/358z1Ovr63r48KEePXqU1hmnbjIOz31zDbUIKFaIHD98oZAYHjH+uFOK+YC3\n7Fhg3IAi6RRYk+9n94M/w4Fn9OrRK7EG5qLoSXo0ppil02hFr9dLL3b03VpehySdvpLB6xe8wJLr\nmBtPdfqOFAfu6Arko1KpqNvtqt/v6/DwMNV3wX/08mQy0bPPPqt33nlHq6urOjo60gsvvKA8z/Wt\nb31La2trarfbOjg4OFM3+PDhw7TLplKpaGtrS++8845Go5E2Nzd1eHiYouXb29sp8uL6nK3ObvO8\nPge+OPjx9c24PApCfYx0un3fnV/kEh3AuT882wt9mV946uvOHQB3uD0yC6hbRJcanCwihI3iokWe\nqBv9mI7wlIrn1NwzeBpC+Xvoscjbkk4LIYuMludVvU8uOIyB0DRj95Coj4G2FxnvaNz9f8YCuXFz\nQiFEfkWQyPjPi4JA/v2i6FURELkM4CT2MRoW5tprRTxaVS7PzsygfgGj6vJQFBL1iCKGlTbxihyo\nuIfrbfs5G6VSKR0QRr0EtVGHh4c6ODhIAOT555/XN77xDT377LOpaHB/fz+BFHLrtVotHRrFzh5q\nVw4PD9Vut7WysqJGo6Ht7e3k5XEYGzwplU6L7QAWrAd2YLhOGAwGc++U8vCyA0PuZx2hwKX5NzTT\nLnKLkfIcOoaWMHcE4w4KXVY8MoFR8JqAi6AYCZTmXwUSx1aka46OjhL4RI+5nHvxtztzGFl4ESMH\nADiXe/rkfefv1dXVFGV79OiROp1OknPp1Dms1+v64IMP9JnPfEbb29spRdlsNnX16lU9evRI6+vr\naZs9RbBZlqW6r8lkovX1dR0fH6dtyScnJ1pdXU1Ag+gNssXuIYA1AIL0Dueg+FooOgbAX9QHT5Fx\n1+MAZ7dBzO3h4WFyQpgj7oenXjsVo2vu9PiaZE6/byInRUJ9HlCITD7vWhjn3ngRFQGUon55uyhy\nR6zSfL7NF07c7iXNv2mVe/k+9qMI6eJlRPBQ5HHFyAb984Iq+PW0Bv+8KJePB4V8XjrN+1TUtivv\n8+bmIol5iukT6XT+ixQyvMGz9+hBBGkxouVpBBSj70yJIXNASpFCl07fXgzguXr1qo6Pj1WtVlOq\nh/6/8847KYqwu7ubvCvAjG9FrtVqGo1GarVaSW7xVvf29hL4wqvc2tqae9mYe4+sK093+Qv7PDXE\nOSnw19Ok8N4jXyhzNwSACnd2uJYQONd64aJ0NprrwJ4x+Prjt6erLoq8765/FgGROE7677s3Yo2T\nA2v0hPMBHeVOoPPR0zX87YA9ptmm02kCDb1eL8k7RdqVSkWdTkcPHz5M87q6uqp2u63d3V11Op0E\n1G/cuJEOajs4ONDq6mra+n7r1i29++672tzc1IMHD3Tt2rUE1Pf29jSdTlO6Tzp1+Oijgw1qWIim\nAlRcTohGcr3bJS8XiM6wyxipyvF4drpspVKZO5TR5QDe0lfa9+exzqNNOS9defGxQp01wNCTIhiA\nE5Ss52+9zSjggIkIIiLQKeqj9yca/hhxWNQHNyr+zDi5Hg3xNj3MyWdFudaisRQBDj7zQr0oeEUU\n+xUpGk7a8blyBF7khfnnEbhdRvJ8bJEc+DUoEul02y5Kg8XuEQ4PmfpcIRNe2Q8xr34GgRuXIs/F\nPVN2VjSbTe3s7Gh/f1/3799PzxsOh+nY9itXrqjX66XCbtIYk8ns0Db30Djbh1DxcDjU3t6eqtWq\nWq1War/T6Wh7e1sHBwdz50YAmtABnFLpB5nBN2n+3VSSUj4fDxmZo4+ef3c9g7Hzc3jgIUXsXO/f\nAcB8pxRtY5zj8xyEXzZy4xQ/j/pWOtXVXgTru5HifQCMWEuCIXf95GvDvXr0ImCS38PhMJ32Wi6X\ntbOzo+l0muYOmTg5OVGn00kRvt3dXfX7fR0cHGhvb0+S9OjRI1WrVT169EiS5opTKTy9ffu29vb2\nNBgMUj/yfHY6OClPf5N4BL6sb+QV4EFxroMaZBMggby6foGfDjb52yMn9MXfZeUAMkZzXQY8MglF\n/fYk+34pwEmkCFDOIwcmfgCSg4kiAxoXhLe36NmLDDDPY4G4MXBh8Jw4HlNUPrEYzp/D2Lyv8f/Y\nx9h+EViifxyh7AdGuSFzKuLfIkErAkdR4S+KvEQD7wAlem2XIYISCyJ90cNL9ySl+UiQdDqvflKo\ne5duOB2cuGFDMXtEzdeKA1/IQS73EOUYDAZqt9tqtVqpgM6jHVevXtXdu3clSbdv3067UDjOmjoa\nUku9Xi8p2UajoTyfFRSym8W9YbZuNhqNubfTxl1vR0dHc4WBPhf0gR+iKOy8cb5JSkaAOYxpMD+N\nNxpVThb1GgCMalxXDnJIJ/h8XBZ6UhTVDU0EGtEgeRTNgbBfx7XS+duNixzEGHWGpxhVQFKj0dDG\nxoYePnwoSXOGn9cMfPDBB6rVajo5OdGLL76ow8NDHR0d6eWXX04vI5Rm9U+DwWCu8J8i6StXruiz\nn/1s6kuWZSlaA6hGlrFjTkTvqtVqAjiM88qVK3PRR8bqLw2Evw7MPTLFevHaNJ9P6s2k+WitrzNf\ni1Hn4VC4bMQIYRFdCnCyqIMx/LToWlfsi4x+UXvuSS0y4ovaKopU+IIqMqxMiEcOpPmdOd6eLzL/\n7cas6CeOORqyRUrPt7vF02adT867+Hlsu8iLiv1b1O8iKrqWti8DOFkkr274fQHHcKqHoFEcnlbw\nNICfVuoy4VuJoyfk6TWPBKysrKQQsXQaYeFtqZ1OJ9WPdLvd1FdqSO7cuZN2K2xtben9999PefjB\nYJC2xWdZltJBHELlbzt2Rea7awiTk9JpNptz4X/4wuFw7iHjMbfbbdXr9RTVIdKysrKidrs9B/BQ\nxvFlmvAR4wWvYkqGfpOi8iiK9xfZyPN87mVsDv5p4yIp6k3vu4OE6JnH610HFTmCRW2zJqRTwCnN\np4ijTme9RaCEnCFT3/nOd1IhN8+aTCapDqrb7aZC8Pv37+vVV1/V6uqq+v1+OlW2VCqlc3sAJBw6\nmGWZer1eems1ckTqhQgOaRppvobGdQNpo9FopI2NDQ2HQ7VarbndY35elsuZF867I89n/qwYBYlz\nhmx7eifaTuagSFd71OQ8AH4pwMmTyJHYede4sY8gJTIvKm5vI3q1/I6LIAIRb889hggmYhTEF/J5\nnrA/w+sQioyhG+yivjoCXiRI8RkeOnUqmhtXsJFvRfNS5GktIh9TUQTlIqlIzqTzAbhTTKn5fbEu\nATmJ9/s9ePIxhcN8RkXB/XhqpdKsKHZ7e3vOgyLsjbePDOF9Pffcc5pOZ8fGk6bJ8zwdPe/e73Q6\nTVuDvX2MXqUyO2GTQ+m8OPLKlSvKsixtoffCWEDG0dFRqhXAgFQqlXRwGump+J4d38kAEflxeXXg\nyLZn37HjxJxyvobPq7c5nZ4e3oVBuGiKssX/nqIq0p2u2xi/R7ljhCTyIepVCH7HKJRHyxyUAzzg\nJXNAZAIZ5kRVj7CwY21nZyc9KxpsUjakVJDvWq2mwWCQZIzD3dBbRTIiKTmJjJs0zv7+fvru6tWr\niZ9ZNr8zjD4SEUfeiorp0QFuC3CiIe+v82uRQ1bkhOV5nmpYnkSXApwUoadFnvR5SEsqTm3Edvx7\nL9RZ9OxoJKLRdSF1Y74IGPiE8TmTR1/8gK0IvBAcF6oIYOKzPXriIWrGGt9YCbmH5+Mo+ryIV/Fv\nH8+T5ioqq6L+xe8vA7msMseLgGzRPX4tcuLjdOXioVWXhQhaUNQo4ghivXjW2yiVSsl7ROlzD8Ye\ngEI0gpf0cWAaY+BE2GazmeozJKWzF3hXDRS3le7v76edESjcfr+feEf+HrDhp2y61ww/SCG50gWA\n+dpwj9ZTOniQXmTJdUS2mJOi9cIzfY0y15VKRRsbG+mguyfpvd9r8r7HtCXfF+nJeK80X3Pl/9N2\nkV7wKBTy66k9AHXR+gGYeJu8o4niV98R5dvwXS+zc4waqGq1qvX19bkaD3bvcJZJtVrV/v6+Op1O\nAiu+TgHHRFLyPE/gGzmiiJgxX7lyRcPhUF/60pfmgCG8lJTOEcKWeGG7g7bo7ERn0mtOWEcAuuho\n+y43lwGf/yzLks5wGSmiCwcnH8br/bALNHo9LrQ+oT5hRdcXUZHxBzV71XiRV+T38Bl98fwqffX+\nRiUXoxBxbA4EihY+/TlvW5eHAKMR9H5HIOb9L5o7n59oCJy/RR4Vvy8jOIHiwuez8/rrCtXBb0yh\nOT+j8kcJujx67Yo/w71Pwtm0hRG4c+dOUkhEU0gBofQIZ5dKJa2trSnLZgdDDYfD1DYeI6+Op5Cv\n2Wxqd3d37oh6SekMCt5zkmVZMtal0unbjxkz/ZhOp+n9IKVSKaVviiJF0ingx/D5IYa+3Rd+Mh6P\n/PB3rBPyNe6yEL1KijABZOVyOdXFeI3QRZHLFmMpchijA1PkuCxyBF0PO7D3M0gcvKG3nDzS4k6d\nryFOXyYt6O9GIm3EGEktOgDpdrtzZ5jUajX1+33t7++r1WqlNGCn09G9e/e0tbWlLMu0trY2B/4l\npTeC8xlgws8NAUhTd8U9bMOPgAfQk2VZqteaTCYpYuF89jcN+7zl+emuONfDrjd4lqegXbdEZ5k5\nKJfLC51hp0tzfP33wsBEIy2dLRr16yJA4XdRX6Kxj8bBQYNXR7tn5BEXvyYqniJDVCqdHiBEWM2/\nX/S3C1Y0anzuJxEWgTTaK/L0igxljI5EAx2ByiLQ6cCEvoHOPRR7Xr8/bloUOYqRr6ioHTRI86k2\nvH7kIPK/KLzqgAMjGz0d/9+VjR/Pnuezw9ZQiLQ9HA7V7/dTGoU0iTQzYEdHR5Jminw8Hs+ladg2\n6UWN1Wo15fAhL2hlCzOeV6lUSufBcFBUls2KLdkBBLDa29tL70/B0CMrfhgbz4L/yFUsfKWduDuK\nNVpkLLx2yAsQpdOCR+aEtFOlUlG73Van01G9Xv/IMvm9IDfuyFuR/Bat7Sfpdl8bXOu1V65HPOoX\nt5Tz7FjP52ke+iQpvbRRUjqXBH3o6wfwwk+n09Hu7q6Ojo706NGjJNt7e3sJxE4mE927d09XrlzR\nysqK9vf3leenKRb4SD+JtOX57K3fvV5Ph4eHCYB5lGc0Gumf/JN/os985jMJELBWkTnebwVgpzYG\nXnkkyfV41E3Rlsb5cZvnTqTrY/RLPN6gCFw6Xbw2N4pCXIS0z7sv/j4v0lIUaYhebRHQiUY3evpu\nIIuMlAOUGIY8bzwOap401gjSokHz36DY6OW4AvfURAQoi7wm+OS1M5GXsV0foytC/4mRFF8IlwGc\nRPDqHpF70UXki93nF1lxBemK1tslUoCxo8gZ3rvH7/1AOaLAULC+RRelN52ebuekMBTDHD3Ou3fv\nJtBAf/M8T9EJPNJOp5N2s3CCLCFy6kl4IRk596Ojo3QYFm1LSvIszYzOs88+qyzLEniCV74LAsXp\n9QKctBvnkX77tmAP/wOq8jyfe3cL0SWAJrUp/X5fR0dHiedHR0fpjIzhcKher5f4d1HkhsrXLt/F\na6Ri3eBrHfl0EEIRqXR6CizgDrklYse8xfXFfMR0mzt3AGIOOotbzev1+txamUwmCbAfHh5qdXU1\nRUNoYzqd6t69e6lwVZoVhz948EAHBwc6OTlRv9/X5uZm2rGEgUYesmwWcXz22WcTSCJFc3BwoE6n\nk05lJr1TpEuJinhqBx4gqw7GpLM1Iu4cYEdcl8NLnksU1a/jXn/FCvqMeV1EF/pWYmkx4o50nkKP\nvx3QuDEsui+GZYuiL0UUowLRc4+G3o2KG1JfgDHCEMfmExk9F7+Pv/1/9yai0iiKgEReLeJ/EfCI\nzygCIQCcGB2J6aE4nqL5eRr5+bjIPUzp7M6LOLZYpOq8imHVmOpxkMi9fI6njcyhXOmT98HPBYke\nU6VS0bVr1yTNe5ooGIwrO2kwuJz/8Nxzz6WTZAHCKODhcKjNzU2VSqW0DZOICHUuKL3BYKBut5sA\nRKVSSR4m/UbxEvLmnJSjo6O5l5sRHaI4t2gNxiJJjzK5zLJrCIIv7AJizjC8Hl0imuURKQAa65Ww\n/WWQb9c5UX/zO8pW0f3cxz3+Sgs8fOTdjR9RN4xp3HYNOHbdCu+Yfz6v1WrpLcPUiEiaq81gLkul\nUtruTirlgw8+SLUTgM/r16/r8PAw1Zdcu3YtHW0/mUxUr9e1vr6ufr8/dzgiO8okpWfxpmLfYlyr\n1bSxsSHpNGoEeYGuO3ExKoWti2Auzi19dpvG36xlj8byDJ4D6HHZcH1FuvQ8upD9aYuMb1yA0fDG\n6xalH/z+okXtBiJWJBdRNCoeQZFODQDfETKL4+R7vDhfZCixaMiLnk/bHlpDKIv4WMRXAAJ9cvDi\nfY33FfGzaJxQkbLyMfqiWAS0fK74zq8tuv+iKBp3yD0QDBJKx+fNlTz3uyLzKIl7J3yHwqZYD+PN\nZ76LBBnAw/cIGm12u11tb2/r5OQkpUD6/f5cmofnjkYjvf/++2lNkU75g3/wD2p/f3/OwPD2bXbh\nYIQBLRsbG8nT9cgMb3ClhgVetlotHR4eJi9zf39ftVpNh4eHqfDWPUw3bBgkpyIdw9p1WaN4Ns4h\n7aInACruzQO82OETI4vU0fhbmi+SXK4jSHH9BQCl/66vkGu2k2dZNlfPAb94hhtBapXYAeYGlvbp\nCwXWGEkcROaKLbzMzWAwSP2gTiPPZ+mSVquVonW8WfuFF17Qm2++qfX1dR0eHiYwSpqTNN7e3t5c\nSqNer6fzUTxq4zxCrr32qtvtqlQq6eHDh4nHjDcWjkunwIx5ijbOdZHLrvPRwZzrHX+NhUe/IlAl\nAuvyQ1v0+bxdaBe2eb7IM47fu9ItQuvS6YFW8RpHjUXPZiHBLPdwInlUxA2FKwz64OHEeI10CoZ8\nuyKLxT07N+DufcOP6Mm5Eigyju6NuJeNgXSvMfY5RmnO84yehiLQKmrHPVT66HxljNHYXjS5B+9j\n9PmM3mD0KF0GIyCMBtJBjD9rOp2mN6CywyXLsrl3vyB7pVIphYclpZ0Gw+FQb731VjrnhFoTFC9p\nFvrIAX7PPfecbty4oYcPH+q73/2uvvGNb+iFF15Iio0UEUdxYxjYNVGr1dTr9RLA4pnc67spqF3h\nSG/kmJ0VhNg93eK89uPUnefS6XkTTu5VuqKNkUnalWay6UYL75/5Y3uzyzP1BYPBYO61EhdNLqsx\nFUy6RTqVraiTJKVI2erq6lxaQZrXbfDVgTRRil6vlw4g8+gAOp2oR7/fTwDHU4PT6VQHBwfpfBHA\nIYcOEokYj8d69OiR+v1+SuXs7++rXC7r5ZdfVrVa1c7Ojvb29tI7dNABFDNzjgkRkclkkl7dcHR0\nlNJ9nETrB6iNx2Otr69rNBqldtyZZA34sf5Eo1yvu+53W+k2gLVEhDOmZwDTfk8En6wJd3QoYve0\nnUdgF9GFp3UWkRtl/ywqZj73ayA3EB4Oh9kIMALPAiCkG9Mwblhi7p8xxdSJF855uCvLsrSjgDyz\ng4w4hggOPGRZlBbx+1iwRX8jSB7OPs/QL4qCFM3tIkBzXnSrKELk98fP/EVul+EsCF/I/O/E+NwT\niTzykCgy6n8zh4R28UJGo1GqV2BHAsaP9rmPMPF0Ok1pD+YCw7KysqLNzU2trq4mxUXhINcAGlut\nlmq1mj73uc/pzp07ajQaun//froXWWs0GnO7glBSKHUMFtuE9/f3kzLloDTGgVIHxHBOCYdmoeyb\nzWby9FZXV+dkiJRWkW5xAwtoIAXB5zzT15MbS/hD2og30cbIDb9rtZra7bakWb3Myy+/rI2NjXTw\n3UVRlNEIqF238L0Db9cvEWi5gwVI5TN457qY9CGAD6BLtAO96G+hRk9Vq9W5N14jy76uPBp25coV\n7ezs6BOf+IR6vZ4+9alP6Rvf+IbK5bLW1tb06NGj5AgAbjgYDd1KnwAApFw9YkEfkG+Koj0dBZh/\n//339dJLL6larardbieeSfORPPhCNJ+2PZLC/+604Fy5HAOK3BnBVtE2KV23y8g2c0f92t7eXgKB\ni+hCwEmR4StS0i700QieB1SikfY2PVLgxsB/IkCJz/britIWHup0bwEEXyrNdh/s7OzMHbftk180\nFv9uOBymfCmvn48RlsijojMuvEgsArHz5s/nJF4bn+s/MaLDuJ1XPm/xejcEjCeebnqRxCmoHs2L\n88l4ve7AIyA+fjd20nxBG4rB594L96ir8LeB4rW7lxu3MuLhoHT5XzpNgfC8RqOhW7duaTAYpBTS\n1eWWozkAACAASURBVKtX9Zu/+Zvqdrv62Z/9Wb300ktzR33neZ68SJQ4qYuvf/3runPnTjpR8733\n3tPDhw81Ho9THt69MdYBRbHOt263q2eeeSalTkhJURMC7+gDPCiKpLrhdZ3gesRPVfb14S91Y9cR\nxZy+tvkfoEbhY6PR0Orq6vdIQj8a4fG6M+E61XWke/VcBy8dpAIipeID2CSllFej0Zh7942ne4g0\n+Bt8MepZlqV0DGeZlEqlBFDoL/2iAFtSSufcvXtX/X5fu7u7euedd/TZz35Wt27d0pe//GVtbW2p\nUqmo1+uluhWijr5u7969m87pqVar6na76VBBZJNzd+hLlmUpagJYLpfLevDggZ555hkdHBzMOTl+\nngm8jRFo191FTiQ6hO9ZHz43RP5xOtx2YM9wcAFBDv6IYh0eHs6lnSNdCDiho0UEo2LBpBtXX9Qx\nQhCNZszZAk58u5g0D1CKDHUEQtHg0PYiTzmO2Y1OPHHSjb0/n/v6/f6Zo+Y5TTIaOG/LxwDqBdg4\naCkCX9E7iv3yZ3h0Jj7XFVpM08WIUXxOnN8I/CjqvEgql8spZOvbvaXT00UdjBbx3fmEsXOD554l\nbaPA2FYLCBkMBnOyg4fjBaHsfHAlBnDJ8zwpUn+Tb7PZTICQN6s+88wzOj4+1tramq5evapKpaI/\n8Sf+hO7evZuOtncAhVfHmiRK8oUvfEH7+/va2dnR5z//eVUqlXRGCooTuUWWnQe1Wk0HBwf64IMP\n5p7JmFHk8JmxU0+wsbGR7uF737YO3x2Elkqzw+oATfSVehj6j+GUZjtCeHa5XE4Rrkpl9qZb6hWe\npMQ/DiqXyyklEQE36xT947sQ4RFGyg02c+Db0Nm2iwzD24ODg2TAJaVIGH3w57tBBbR4Kh1wQ92f\nR7/pO8/Z29vT0dGRDg4OEgjKskw/+qM/qvfff183btxIYJ/TY0nBoOPff/99lUolbW1taW9vT51O\nR3fv3lWz2UyHuBH9oOAW/cFYJpOJ2u22vvOd7+jWrVspWgg/vS4E3npND/0CQLPuPFKOrongm7Xg\nsoDt8KiuR75cLkgTMT8UEhMhXEQXAk4IycYiNGle2KHzAEI0kE/y2iH3knwhFYGEJ4ET/03bEaQw\n8Xip9Xo9CSDG4LxnQyhl0LkrvachN/Iu2M5zT034b36IVng43EFdEahhHh10+vxyfYygFCnCaLzp\ncwRKF0Eo71arNZdvl+ZPW4zRIOYQUMA9Mc2H8uBZbiT5vbKykiI4XBcVNsDItxP6czwiQ4rIdxmg\nzIiYbG9vq9FoaHd3VxsbG7p586Y+9alP6Sd/8ifTibDuIcInjHie59rd3dWP/MiP6Bd+4Re0tram\na9eu6Xd+53e0ubmZ+pRlWdoZ5Ip7OBym39PpVGtra3rmmWcknb4tGDDgRM0BkSQKcldXV5PhKJdn\nu5+Itvi8MSfw1aOu/q6ca9euJWBC6J25xnnylEi9Xk/bq10uLooA1aQMIQC3e+YYxaLUDrKPbHn0\nz42c1+T5EfJ8X6vV0n3+GgCvUUHOmTeOcPc0CQabSICkVHi7v7+vq1ev6uWXX9YnPvGJFBlYW1vT\nr/3ar+nVV1/V3t5ekhGijAAnANeDBw/02c9+Vmtra6rVatrZ2dHLL7+st99+O609BxJem+G6dTAY\n6NGjR0nHsq05rmXpFGQRpcLeElFFPt258dQ4fISXHs3zdDIv9kTuvQaFfnhJA5FcrjvPobwQcOIo\nKxqU6B1LZw20G8sisFAEKiCPlETA4u0vut/BkxtgJiRGBNxAsKjJ2+N9TqfTM9Xn3g/+RvlKpyAF\n9Oy5VhS+K083lBEYOKjwsXsEJQKMeEZEETDxZ/vc+vkQcQG68Szqh6faIsi6DOQL0M8BkDRn2N3A\n8517FxhMN3Iuq26s/H0ylcrsxXwU4BF6paYEGXRvKqYiMESsQQAwnwOMiT5m2exgtK9+9av63Oc+\np+PjY929e1df/vKXNZlM9MUvfnEOVBNRINxeLpeTR/no0SP96T/9p9XtdnXz5k3duHEjhYoxJuPx\nOIElFDMeLcrw4OBAWZalaxzExh1S8JC/8zxPdThEi1DqRFuZL0+ZwTvfygyf+/1+2kkkzYNqDLSf\ntYGecYV/0cRYom6J6535jbIMePA1TjvutXOd83YwGKjT6cztsPG0EO0iC64TAEHU1rEeAJPIFsaS\nqCPv2PnMZz6j3/qt39KVK1fUarW0u7urvb09vfDCC3rrrbfmrqUAHb22vb2dzjr56le/qkqlop/5\nmZ/R0dGR1tbW9OKLL6b0vOvP6NgAxpvNZqpbgvy0VeQG+eRz5o417rwAPCDXRDWZM2p84CvtSzNA\nSHppUaTbeY+M0J9LV3PiCtoNHxQBhhNGmO+iAV3049e7IS66zhXBouhJBEVF0RLvc8zTNhoNtVot\ntVotdbvduaLB6Cnz2z1ljB6nCMJX0DPXR4ASxwQfPB8Z+VQEFiOoWZQGi6AoAotFcxR5Ge+Jvy9D\n1EQ6relAMXi6iTnxhepzIJ3m2B1g+ntouA5gPx7PTl/Nsiy9XXc4HKZ1Eg1CBK/wlIgeJ1jyLC/i\n4zpOaC2VSmq1WukkyldeeUW3b9/WBx98oJWVFX3iE59Qv9/XG2+8kUC17yqg/5x+ef36dVUqs1NR\nS6WSfvVXfzWNtdPpqFwu6+rVq+kcFC/0ZXcOfKUNxoScEPGjfsGP//bCVebJj5B3J4C6EbxtT7Oy\ny6Zarerw8DDN6XQ6VafTmZs7wJ0bcgdx7oFfJLnuQH6YS4pRMXTwOkbjvBAUEOnvbIoRKOk05b2+\nvp54yxxxj58RQl9dL0SnCXANf7nHU6ij0UiDwUBra2va29vTzZs3UwSGqMIHH3yQdtAwlna7nVJF\nyMB4PNbW1pYODw/VaDT0G7/xG2l3jgMwaVaTxPuopNPzdlg/8HB/f1+lUiltmXYg69FInAoveGW8\nRYdvOliWlNaVz/1wOFS3203rAWDNsxx8+NjclhGpOk+2L3y3ThGwgKKH7/fG6ARGgLbckHG/X7co\n2uIKoogWgZUiY4txhmJOD6+YsXnY08eIssqy03QB+X5vw0ODvhDjGBkn18Y3c7o34REmH4vzMwLM\nRQAu8nERuHAwtAgUxrm4TGkdV6DwzV8CB4+lU3lDyfMOGa9NGI/H6Z0apNR8Kx4RGVITyBkKwz0x\nFJbXXOX56fHXblgIiXe7XW1sbKQtuVyLES6VZrUupH3YuXP//n0dHBzo4OBA6+vraRzULjhwqFQq\neuWVV5Ky/vSnP62f+7mfU6vV0sbGhvI8V7/fT4daAeAwGC4X8IVDqwAxLtP+t9cjsEuIufGws+fd\nJc0ZNue3R2HW1tYkKdU9jMenL3gD7MV172tWUoqAXSRh8OKpuA4IkQkIAAKA8HXLbhTakE6PfyCF\nAH/ZPs7ntOeyGx0cwAuf+9t6qe1gXBBRkzzPU1qUF0hmWZZASLPZ1P7+vo6Pj3X16lXdu3dP7XY7\npQi5ZjAY6ObNm2lXCjuujo+Ptbe3p+eeey69t4o1h/H3LcPIICkl5JJ0lNsfSXO7kTyq59GWmA7H\nvnikgwgLgN7TRx6RxDlhTXqBPVE/LyFgbTxpt86FbG9wYXZwIRWnUvzaaIRcoRaFw3ieRwYcYXM/\n//tvfy59g6JhdIpgxz8n1Otj9+vcmDivWGws6hhOxpsFyLinHBdq5J3zyHlJn/2ApNivyBvnl8+H\n3+/9iLyP/HV+xnqUIn5fNLHoUSrwEgXjvJROFbLz0UPinF7JwWe+u4t7UAK+o4a8MvxFMaNg3Et0\nkIhXiMFgyx+RChQfHv/Kyor29vbSoWoUndJfFBDvFsnzPBXt+vba8Xisr3/967p165aeffZZvfHG\nG8qyTDdv3lSWzcLj/X4/RW6QdwoVY3gfJQ94gue9Xi+dGkuhKnPjRjKerYKihgA4eZ4nrxbD5DUX\nw+Fwbr3DDzcCzE+RAr8sKcto1D0lEl8LIJ3qAebD66QAWhg9drBISsXZpBYoBKUWSNLc26aJaHGQ\nmhcYs67QlR71idFExuf9Hw6HqV+sPa5fXV3VaDTS/fv3dePGjQRue71ekmfk5vOf/7x+53d+J9Vd\n7e3tpcje7du35wrDXRfQDv2GH51OJ70UE74y3nq9rv39fTUajZQ29XXucsZ8+P2ARUC02848nx0M\nB588Pcuac2AjaU7efX55zqUDJ1Jx9CMCk0UARTobSXGv2g1nBAfx+ecZRf8+euuxX0zsovbdCHmF\nOH30RbIIWPE94TtHu5LmlKf3tQhgRT575Xusy0Gp+pgiqOK6+H302ov6tyiysug5Rfy/LOAEQxSV\nAEbSgQFGKPadOcXb46Am0gl4kM5jP6eEfhBiBZAAPh3wkqN3BUnaiXQK52+gcDFGKJe1tbUEiCiQ\nIy+ObJ6cnKQCT/rhnrg0MwD3799PRmZzc1N3797Vzs6OxuOxut2uqtWqDg4OtLq6qnK5nA4rc8Pu\nawaPF7DHCZ0ArY2NjdR3UqQu/36qLqeKOo8ARn7GA1ECgIqnIfzcCfhC5MYLS+kD25AvWr4BWZLm\nTkL1vro8QQ5+Me7oE04lpn2ihsg9ckk7yBtpjlqtluQt1re4MfY5ybIsgWYKszHyDk7L5XIC21k2\nezdUo9FQu93Wo0ePEnBoNBpzu3OyLEt9Aij1ej09++yzc0C41+vpwYMHiTfoBUk6Ojqa2x02GAwk\naW69Az4AJA8fPtTW1lYC3f1+P8klER3uRW6RV0Anc0f0TzotjKUvjJE17E6yrz1kwKOTkfe+1oro\nQs9EdjAS0yKLrpfmd3cUGT43aNFTj8/2vxelIGK73nb0PB1sgOr9b1CpA5GYgor9W8QD90YweB4i\n9nsieIpteqjWx8BzvLgzth+f5UayaK7iffH+2M84x0VANgKfiyIMkEcq3Fh5TQOeH+SREOl094F0\nasTY/st2YPiLt040Y21tLSncKLcobZQnoXSPnHnf8/z0BE1PWUizmifSLSidWq2W8snT6TQpsPfe\ne087OztzhhsvejKZHURYKpV0//59jcdj3blzR6PRSFtbW3OGnXfssDuC8UenBD5xjDnPrFar6vV6\nGo1GOjw8TMWIDqaQdULn/M0c+xrx4kCiAQBGalGYQ+acwkP3XH2N8DlA4KJ360jzp0zHrefwnnlw\nHnGvpCRrfuYIJxLHHYsQOg2Qxv1cB8h3HnkE2OucMLYAE4iCbdp0PU0dyLVr17Szs6Nut5te5Ac/\nOOPEoy/0o9VqqVKpJDDFYYmeKgEcIKPwlIMHIecncwCYYf3BJwChn3nCGof38MKjX0QlkU/nr9eV\nQMyfrweXDcCNg9jj4+O5iGehvD1BHn9PKIKBaJSlxSdr+j1+LwrfDRn/S2fPzeAzD+35c4oiDG4g\n+RvwEfvroCgaHdC/gwIWNcLECYg+0TH1BYp1JRB544s0GqnIV69hoX0vmoogg7E7+CoCCQ58Yj+K\n8u3R83LlF2VkEUi6KPK59ggBxh1eOpjwNKc0v0WY00TdCMRthxHEcr4JCoXIgAMTftNnvHdfMw5O\nfYfZ6uqqarVa+syff+XKleTJ5Xmua9euqdvtqtvtpvfroGwPDg5S+ocQ+GQy0fr6evIYV1dX07t4\nUNzscsPDKzr3xQFiuVyeAyikhg4ODtI5Fr1eb24tuC4BaHndBHNBBAFQ5uvK61WIlrILyIGOA/9o\nEKTT9MdFEoALWfXjzeOaRVb523WXdFoQ6cXARKiQ66J6IIwi/PHICjKFfOX56TkckK/JPM/TTjHa\nxKhHp7LZbKrVaqX05cnJiTY2NlSv1/XOO+/o4OAgGVvmql6vq1arpTOp/KwS1hkAGwBLZJLoKGDG\ngbdH3ugv0bnt7e0E8IlMUFDtUSFPFSF/yLM7h0ShmG90lad8yuVy2okGj7PsNPrFmnSHgbXpEc8i\nulBwEv/2/xdFDRaBGSh62UUGyw2iT0Q02N5+UVTC23bDzGLkB6PvR1cz0b5rAgXNhDko8LSL99FB\nwXnRhUXA5LxrvY9utCIoWwQknzR3RXNWNI5F1/P3eW1/3OSgkb4x33gpKOqihelz77wgCoK8EnGI\nxpkdJsfHxyqXyyml4Uo78s5BET/SqTePEWZXzdramtrttgaDQTr1stlsajgcpkLB1dXV9C6Td955\nR/V6Xa+99po2NjZ0fHysl19+WS+88IJu3ryZABTKinqZtbU1bW9va3t7O0Vt9vf39eDBA/V6PR0c\nHCTDRpQGoOCHTVG854dw1ev19PqIRQ4RXiKesSt2+MghiPCN9eLgUTo9CI4CXgwZax4gidKPBdWx\nGPcyECDBAbbXc3i0wmVKmt8KD5BzB9OBN/z0KAPy7R46upPdI/THHTRpPmKLbuVlf/TF3yjtu7B4\njw7plFarpXv37iUZYS2QbmQsgCtOSiZKd3x8nI679xNi4ZHLlO+KcvvlBh9ZdBnzKK2DRa4nzeOR\nDYrvoxPqgJR1QDveLvPt10P020FsEV2aF//x2XlGtIhcwbqHXWQ0i+5zI78I+LAwPFIRDXtM6/Dj\nnqJ7Q9wbIzlsDUNgYsTEyQGC9yn2M0Y9nD+umP160HgRD4uiFbFvMSJ13j1P+r9IJrzP8fOLpEWg\n2MPYfO+7qvgM7wKljpLw8zWk+ZQPyh+P09MKcdcVis5DrgAn5ozryuVy2iq8s7Oj/f19SUpbeYkA\nrK6u6uWXX9aDBw+U57m+/e1vp+3Mk8lEzz33nL71rW/p5ZdfTuHsb33rW1pbW9Pdu3fTZ+12O8nM\nycnJXFSFAkmeT7/39/fT+3IIbVPfE8PPFG8CArxQrwhoO7/9HkAfR537gWFRDkg9SUovaEPXRD2B\nIWRsGIfLkNJxsI3xOj4+TsXNDtx8Z1qRc+fn5Xh9CfqRa2kHOSMFRH3QxsbGnE6lD/F5UX+4LiyV\nSkn+RqOR1tfX1e/31ev1tLGxIUkJLB8cHGhra0uj0SgVnlJHxcF9zzzzjCaTiXZ3d1PfSKtwsNut\nW7dS3+JWYKLngGG3BfDAZYe1y/hpy9Ml7oSjJwDLtVpt7kRXaRbZjzVyABN3ZCLYi5Et6dQWwV/X\nMzGKHulCwEmRQfTw9CKDyWf+A4OYsCiQRdEE/o/P9O9if4uAUxH684XihY9uqPH2JM15Bb5gYjuQ\ng5W4ldH/9ol/GtBXFEWJ+eQoSEVeuIM0R/j+3NjHJ4GQ2OcIxi4LMJGU5ts9NmkeMHq0y5WyKx3p\nbK1Qls3v9qJd2nDgQl4ejw4P39v1Cn28H9rZ3NzU/v6+7ty5o2vXriVFCgC4c+eOXnvtNb3xxhv6\n+te/rldffVVvvfWWhsOhXnvttXTcerVa1b1795Rlmd566y11Oh2trKzo4cOHqe+3bt2aiwAR/uYs\nBWTp0aNHSZFSdHjt2rW0gwhjh1cbPTzAP8bCdzehsPFeAQWsvTzP0xZPzrHx+gqAJP1gHh1oYBhI\n72AQfAt/rNnwGoSLJE9Jx7SXgwIP5QPo/PA66dQpg//u+aMPPUIlnUZpANfw0wG1A37klD4Actrt\ndkqbS0rR7Dyfpe92d3e1tramlZUV3b9/X594fDLs7u6ubt26pXv37qX+MJ5Op6N3331X165dS7uK\nSIOgo0kbAX7oE7JCGoa/PfqDrJHmQV49mg1fWfukGeEV7yfyKCrPJ2pCX4hKYadcN7FT1O0TfOC5\nDjK53+uE4AfzvIgu9E1pRcAgGqhoEBd5/xFYnBchiIwtMqTenwhuojcQEaR7pY463bMo8tIgFh1K\nkv45avYIhxs7FxYMkIeuiwxgHKPzOqJ0+kL78KDICHt7Hkk5b079Pgd0RQBx0TxdJPli8/mm6DJG\nk1CazK90GhXxPLqf2OgF1Q54pfmXruGBsQXTyb105Mp3CLz33nvqdrtaW1tLioXajWvXrunrX/+6\n7t+/r+eff17ValXf/va30yFqHgafTqfpUKkHDx6kMDYnG2dZpnfffVfT6TS93A3ZR8FLSt83Go20\nqybLsuTV0kciLByyhUJtNptzfKNGgAgPfPSdUF7ULJ2+9dm3ZxKZAgj6mqTfkubOy3Dj6anbmH7L\nsizVtJznYX4chOfrY4R30UsHYAFIWMsAOOkUxGC0OU9DOjVcRAzdiaHoU1KKRhAlc6NLv3zbcaVS\nSeuQ97v4561WS71eT71eT61WK23ZBQwTMcGQE+kj8kI9EXUy1E71ej3V6/W5HU9e8Mzc03fO5aHe\nBLlG1hyYOUCD74ABwEi9Xp97NxWggahJ3NLtYA9g5CDddY/rNPjEGqZ/bjOYF+532Yl0ITUnUTlL\nOmPwIrhwigDGFT5tFRkyro0/tFlkjKMhh+neTwcWcYyek/VQM4uVPngVP9cXhZr5P47XozhusF0o\nfPyxTedV5J/fuwhcxL45f523DnCKIh8YBTfWkeL8F8nIRZEfxOVy4SctSqdnYpASQB7gLwvcZYT2\nuV86lVsvAIV31F5IszmlvgH+OQji2RTOkjNniyx9H41G2t3d1Y0bNxKwWFtbSwCFKAsRm1KppO3t\nbZVKJV2/fl2f/OQn1e12tbOzk57Xbrd15cqVOcDCy+6Ojo4S4Njd3dX29nZS0rw4DIPjWzU50K7Z\nbKZcvUc5ABrk5sfj2VuP3QtEscLnlZUVdbvdM/LN2sV4u3PghpJIAd/7WiUS5A6GR2MumhiLH2YG\nQKGmjrEjL4BpN26+K8yjwPz2KLHrEk8JwBdkkigIsu3nbLhOxMAD8PkNCD46OlK73dbKyooePXqU\n1uCdO3f0uc99LoEGTitmnLVaTc8//7wODg4SeNnY2NBkMntpo+tnJ4C8p6IAXgA/PwfH02XOH0+/\nkM5FJr1wtshuoosB+YA/TwOTYiNqCT/9O58PACVRGfrru6YASF4oHenCwEn0umP0IhrEaIDc8Ppn\nizzo+Bn/u1HwNrxNro+G3z93kBO/p6+gzAhsYuErCzrLTo+ALgIqtB9DrFLx7qTz+BB56IspRoqK\ngF0R7+IcxmfEn8hnb8v5ViQTlwWgsBg9PE9/PUTqcy2djVJxvXvNXgNUJH8AEPeuJpNJUt4oC+4n\nVEze/ejoKMmTK/XpdFYQ2+v10q6b0Wikg4ODVOD67rvv6sd+7Mf04MGDlP44OTlRq9XSzZs3kxJD\nWX7605/WeDxO2yzpKx46L74bDodaWVnRO++8o2q1quvXr6ter8/VA0hKQKrVas05AM5vQs6dTkfN\nZlOdTidt4fQXo8EHxs468nSMFyCyG8o9YCcv3GQLqr8XBqPZ6/Xm0m+SUvsXXXdCesBTZkQ8Iuh2\nz9r1CH+ztRxjS7QKcr0ToyHSKbhjcwHANBpUogkeZUCWHcADxh2McvDhgwcP9PnPf17vv/++Dg4O\n1G63dXh4mPrCSbCs452dHbXb7QR0m81memkhMsbhboPBIB13j8zDH+ehR5yQazfqHu3AnkinQJDo\nG/qDrcIANGpPkNF4pL5H4P3ZtOF1UpKSY4b9Qpd4DZFft4guNK0jnU2FFIGLaASh6HkX3Uu78Xlu\nUBGAWLlf9LzIzCJjEfviUaHYTjTCDiaYXD6PQGPRGN17jtd7CqYIREWQFnOb7g1G3kSe+P9F4LKI\nHLBG4AHfiiJiRc+9CPKwLZ4Zn7HwUai+w0M6BQyM3UFYBL1ed5Rl2ZynBSAiWkPBnhdnAmbyfBby\nbrfbSd4wCP4ulN3d3RRNaTQaWltbS4a8Uqnok5/8pN566y21Wi1JSodWYcw5sGpvby+FzzmYyj0t\nDufK81mNx82bN5XnuW7dupVy6BToMt+8MdhfUIjMwg838AAePoOXeZ6nz1HWzIvXj8E/Qu7OS//e\ngSNeMsWGjUZjzsCR3nD9wmfu/FwUMT4MbbPZ1O7uriaTSTK2AAB3Lly/OQhlfTA/1NV4vYIXyyIf\npCIAu34oIPd7xJaoG9GReGR6r9fT6upqAg8uO91uV9evX9frr7+uzc3NZCdqtVrauTYajdTpdNJn\nvJm72+2m55DWAbCsrq6m+gwHah51lU51MM4qtSDOt2gjkElpJoOkdpFBP5rC01r+Ek2iL/5s/o58\nBXSwpgAqpIt4jgceOFeFsSyiCwEnLrzRYC36P6YHitpzA+jILxpd/9tzZjCRRebXu6JyilEfnr3o\nefS3yLh7H5g0XuoWx+5GKoaJ4zVuvIvGswgocE9MuUVFWRQtiW27kfW+0G78PLbp/Y5g9TKAEgjv\nxFNjeBvR2OCFFoX7fSeOG74YLVpUf4JMssMExUfkhGvK5dk5BZwbwlwTfalUKnr48KFarVbanTEe\nz47glmZzsL+/n5Q7yp/zINjNUiqVUuHq3t5eWmMocA8Ru2L0k1f5nDNcMGhZlun9999Xq9VKBZjO\nD5S5F3PSFp4vY+33+3NKnDHCMzxT+MBz3HMtcq6YY+aD+XMPlvs468T10Hm5+Y+LSqVSihKQ2pBO\ngbinsL1wm7nkb9IhXn+CjDPPyCkRphj1y/M8ySM7XphjjONoNErvInPZZn739vbSCwXRo6TmpFkU\n5OHDhymK0mq1EmDHIGMvABt+/o6nVvwsF9fx6ADWsUeeiAQSyfNiY+e9R2C95kOayR4OhKSUNoLX\nrmtcB/CZp+Qc0DBn8MzBpV+P49Hv97W+vq7hcJjkgfsXytvvQlY/MhUZzUUGx41ykQfu3skibz5G\nQaIxxpC4AfVajUUGPeZNFxn6ImO96O84rvOASex/jLoUUeRFUUSDtoqKaV3oY3olzkWRko7GtQhY\nLAJuPp+Lnn/RRLgfZeMH6rlXh7LNstnx6ngZzmPpFHy6N+TFlkQe/FwMvJ2joyNJp2emoCxJY0gz\nQ7i6ulrYxng8Tgeh8fbswWCQ+thqtVQqlbSxsZE8RT/Hh5B5tVpNxtzXOZEXjLenMzD2nNyJwe73\n+wl84IH3+31dv349bWv1gjyIiIgrc4CTg5XV1dW5VC/f4Z1Sy+Lbhz1a5lG/LMsSUIu7QtwbAAYo\n7AAAIABJREFU9TVDv2q12tycX3RaR1J6VwtGJctmLyItSvGSAgKUObBmXaBjIt8isPHdOdPpNAFl\ngAzRAuaQPhDF43/qjySlU5Rpv9frJf6vra2p2Wzq6tWreumll9KBa0RepNOXMZIW5D1NpJccgLis\nui3waI5Hnfg7y7IEgHxt+DX+tvIsy9K5Jg6GJaWieNqFJ8gx/eB+L4D2aIp0GjX09CU6wUEgn7Oe\nACrww0FWEV0IOIkph1j0xzXS2foEpyIjFkHGk6ICRQaU5/v9RZ5LBCb+u+hZ/rtoTP4MB0VFbSwC\nYou+d+8uRje87diWgzS+cwR/HiApAg1F9SI+bzyzaN6LAGgc40VTlmUp3OzFjMhVuTw7O8TD/75Q\n4a0reRSqe0gocLb3ugI4OTlJkQ08FVIGeHL0BSPgZ09Mp6dvhh0Oh+r1ejo8PEwnq0pKCh3jDYjw\nuhiveSqVZmdU+Iv7JKXj5ZEHV+IoP89Rk/LxWpBms6kHDx4k4w94ATSQHqDmhBx/v9/X3t5eeg7p\nL3jqc0rI3Ne8A3XqCeCfRxFQ8hiTuEUZoObgCR3JQVoX/VZir3/gRFN4gU5gfbpxdeAhnaYekHe8\na9aHnx7Kc5EhUooeGaMfGGn6R7qNPjvIHwwG6dC+0WiktbW1BLqGw6Fu376d0ofb29va29ubS82S\nLpGU2jk8PExABdDmYIQUiMsVET36jPz6+if1g0FHFj2tgxwS0YB/jBee7+/vp3uQPyKktEekEp3A\n+Ogvcx3nm4JYIj6eegKQF4GR80D3pTi+XjoNh3tEYhEokeYjAEWGrCgKUtRGvM4X0aJoS4w+xHHF\nZ/j9iwBVfG6MEjgIgBZFLuL951FRVKfoe55X1GYR8Ip9j58XtXFeXwAvtOd8izy7SMLwSUrhfwAL\nyoI6DK+LQP75mwWPMpA0p2ylU6+Hdvmp1+vprAaPfG1sbMzVEPlbVv14aun0QCe2HKLUPRwb+e+h\natr0KA/RkcFgoP+fuTf5cezKrr0XyWB07IOMNlNKlUqlaqBCGTDgkQHP/Wd74omBMuxB+ZWsLlUZ\nTbJvoyX5BoHf5uLJy5D8fX5iHiCRDDb3nnuavddeuzmj0ShiCGhYmzQPNuU6k8lkLVgcC+/4+Fi9\nXi/AHOsJhfHw8BCsD5/n83m1Wi1Jq+A8gMTh4WFcy8fRgwJhcxgzZ46k1fpnTSC4vaGwuAdji0JF\n8bpS20bDWkZRUumUZ3Z2A4XEbzzmQFqPDWGePBbLlSX3llYuRMAbSg9Xxv7+viaTSawr7geQZ8+5\n+yKXy2k0GgWo5ViGx8dH9Xq9iK9xcIjidlkM40OfYTyYe/rq9WBYX8TNYEjQl0qlEicMO5ijOdPE\nenZw7q5JScGKsBYdLACaiQ1z4AngdtcboMVBu7tz0iMfGD93r/6UbtoKOEmVHILNFVT6vzf/zEHE\nS1a7C9KUufHPGDjeS5VrFhuQ1c8s0OCbzNkC71f6e4/z8Pc2ARPGNwUE0odsVFZ/NzFVLmhdgW0C\nkll9TYGEf+6BdOn4+fyk453O6bYbyg0hvlyujlfH/wvA4NmJd/DxcBfL4+PqoD8EI58DEvL5VaZO\nur4J8ET4Iyyl1YnGgBHmAtqcPZDLrU6mdsDkz+JWFPPrbgsEHsoYgMF16BtWIn0mkyIriJW6I5PJ\nJFgVaT3Wi+d2qhoLnZRR3GCwF54i6+sL4Y4y9rWZpnpS2TRVxsRHOE3O3KEQuBfg7CXf/C/RGEP6\nxenOxFGkDCeK2fc8gMtZPMBZypZIq/N8pBVjwTUA4tLq0EjWGSn0WP6S1jJg/CA87uuyB8DFeUvE\nTjkr4bEwLuNYn647iPl4enoKJZ3P54PBGY1Gajaba8xJsVjUeDyO1HWPXUFm8F1nVFOXzGg0CuAL\nS+L9dgDHXJFiz5r273h8FK5ongXWE3DHMyM/YBg9g+ejc+uk9HyqeFzo+nd+juWdotms+6TC25Wg\no9OfUrbuisoCO+lr74d/tokleokNyAIP6TNvep32IwsAZvWH72aBrJeYlyyAuIld8fd8nPn9x8SS\nbGoACJQ9EfNOiUsrIcx6g6VAWaYK390+KDvuh8UE2wCgwcqfTqcaj8eSVtYLlWNx43At7uunrqKo\nAQkIO6wlL6DlwbfME8Cf/gBQDg8P43mxWnHV8Ky4XnZ2dkKJu3uK2BqEPS4S3DRQ8KwbwNTBwYHO\nzs6iT41GI1KRPXiRceZvdzXQRwCdAzKeEQsZRQf4pM8IdxSBU91eQ2WbDSCWz+ejgFmlUolzZgAc\nAGeAKM9DUTAHGYAamlvorG/XDcw54MAzZSaTSSh3H0t+h8xHqTp75syctIqTYL0UCs8p5Nzz4uJi\nrd+AI1jAQqEQrkpnlqgVwrosFouq1Wo6PDzUzc1NxMiQir+3txdrm/GRVnVEvM/S+qnR9I/ibzA0\nqcx3A9kZGElrzC1j6IfRMlfMJ0CJMeYaboAwD27kblxzP7Em/580t6T4m0WYBVyyXmf97da2X9ut\nRGc/+JxreZ98caZKwH/n101bVt+z/mfTOKOQXncTm5EFsHzBpfdPQUDWOPr1oORcaTl4S3/j/c+6\nV/rsWe87K/QSSEr7+jE0D34jvZU5JqVPWs9QQng4IHFr210HLvA44E9a1VdxYeDWFNk0KGqsI3eB\nACCcDuZ5SB10Xz5pv+wbshrI2pHW9xUgi+dgHOinsya4mebzuer1umazma6urqKuCoqIANbhcKhG\noxGZN57x4YwHio+YGRRrsViMs3xQgF5BF3CUji3P4LUhHLxx/g4AhDGWVhY62VKz2SysUUAM13zJ\nwvwlGqnoxeLz4XWVSiVYN8AHqeq+flm7gBtpvQK2xwYxb6wJ1jzzARCEgcDlxfWp+gsL6LLQgZID\nF4AA/fJ96fE+xCJRGBDwSlYagePISWqdpHFPMBeSwlio1+uxhufzeZTYf3h4UKvV0ng8XmMq0Rce\nlMp93XXDXidwHVDsYM/dvO5q4TNpxZB7are0iqNZLBaxXx2YkFoMKHMXrj/PprY1HtyVT2o9Zylh\n/74DD77vf6ef+e9fUmRusUOp+mLdxHRsUtIvsSe0NGjQX/8UqyJ9ePgfLXX90K8sl1ZW2/ScPgbp\nPPjrLPCQ9uelMdoEUP3vrHWz7caGhgJFGd/e3oYVtFyuAj9Rlu4q8FN1CeREqSFkvWiTAxLKtkvr\ngZy1Wk3SiuEYj8drNSZgbRB60opKRymQXSOtZyW5MkAhE/CKNQ2ocVcG/XFB+vS0OjYeS24wGGh3\nd1etViviVvDR397eajweh5VJ312RueXo7M3BwYGq1apms1kcWsjY7e/v6+TkJCzCNEWZeBPWs48Z\njBWAhf6yPrwyJooO5Z5awwCUbYNvntULiaEMPZ7DlZuPcy6XC9cLay0N9JRWAaCMG4yMMykOaN+/\nfx8W/t3dXTApLldh/dIUWGkVh+EuPNYpAJF/0+lUlUol+spawvVIv7yysq83gsu5N4cEEtcC4HPg\nTnVlaRXcTuN77gp0Q8PZIq6Xy+XCJUW2mjN9ktYMEH7DNbPACf1iLJwB4/eMP3sDY+yjc+tIHyqW\n1DJnUJ3BiE7nV/UbpOwsnU33c6o2tczT32UBJv+XAh4WCIvFFby0bv2Aov0zX1AODPzv9Dn4m2fk\n/7SPaXsJMKXf82fyZ8U1gND3PvJbv5d/7myA9yUFWg4Ys4BIuo623dh40ipgFVbDA12d3i+Xy9rf\n31+by+Pj47DolstluD588+MuYs+gAD3YjT2CBZPP51Wv13V0dBQAAEWLkIEpwY+MgnH/MusVgQ+1\nns/nVa1W4364JbgP15cUp7/mcrkopIaiRvkVCoW1k18RgFDi0PmM1d3dnabTaYAg1uVsNltLZ1ws\nnv3xw+EwzlHBiiRYl3vjxqBc/v7+fgRJMjeHh4cxplDby+Vz0TJn0zxYlvcAZvQZZoBr4QbcZkMJ\n+TMAHt0N6e95kDVriJRd1idMG2MPGzeZTLS7uxvuEq7hLAhrQVK4XtyFwP8AS1gZroU8Zn48Xfz2\n9jbqmsxmM9VqtbXUc+K4iFdhbNx95UybtHKNEfwLG+Muv1wuF8xUuVwOoAWAQ9axjp1tdXkLs+PB\nsdVqVeVyWTs7z4UAqTcEY4gx7nuE78KmetCyH/EAU4hr05kcgEoul4vxQ056OEfatl7nxBWLDzIL\nMDpqljoC3N/Lsuidrvq5bAv394Hz97MYHQcHLH5vfn9pHemyQbJYEb+Hf5YFirKUcwr+NrFR3vz7\n6YL3e6NgiAPgmfy7znLxfroYmesU+Ph3/yfM17YbFp+07uZCoUnPwg8LjEBMhGy/39d4PNZgMAih\n4Osd4Uw2gLTKwimXy3F4Ht/xgl5YpLlcTuVyOeYR0OMBbLy+vb2NsuowBgCAu7u7ACLSSsiTPQE4\ng+nxuV8sFlGFFUbm4eFBo9EoBGqpVFrz1x8eHgaQuLu703A4DEt+Op0GzY9gvr29DZDEmAAosBQZ\nfxQMmR29Xk+dTmctVRsXHQoHyp/7UGSO/Y6bgrEB0BUKhVA8KHYUHn0jZkFaFcfaZgNMMxcAMBg8\nYqpwdeTz+QiMfHp6iqw1AlJTme9xG4VCQdVqVaPRKFxqKHCPNeL+yOpyuaxerxfgA8sedxpGgbMJ\nyCzXLbe3t6pWq5rP56G4vZIr5ev9AEoYQBgz9hAgBODOXALac7nnDCTWPrKCQwTZR1RcZX0yBqT8\n+zixXhkv3F2ML+7PyWQSQJjvpG5pADYAmjFAntDHdE3QH5gy/macAEwvta27dbKsYbfU/bssBGi3\n9OGcypM+BDvcw0FGqvAc0HgGBS0LRKT995YK5JRd2PS79Nop25PF6PhzpOzDTynudFz8dQowuB7B\nUSxA/34WQHGLKes+6ZgwB+n9/VofAyDxhoBwZi/1CzslDoBA8VWr1agrgvDyKH/Gm2BWFDtjlBVw\n6+uGoFHcCE5jexGr/f19nZ6eRoE15oLiVADSyWSi+/v7KMmO5eXCFUYHVxcCjLEBVBCfsbe3p3K5\nrOl0GuOay+U0mUw0nU5jrBqNRghTt9x5dgJsPY7FXWcI45OTE5XLZTUajbV6Jf1+X2/fvl1jiljb\ngJ/FYhHKhJORSf3E2nTA5+MFsGdsCVgEOCGH2DPbbE7T0x83DB00My5ulN3c3MTYeFo9Lh9A9P39\nver1uqSVe8FlJmvIZQzVR4k3ccaGFFnkBkyA6wiYQgANwez0odFoBAjzAGfAF4CKPe2l3TnYEuVM\nqq60yuzzNQyYwkB29xMy0d0n3Je1kuoI12G4VfL5vE5PT1UoFMJlCmjCYFoul5G2D2CiX2msi68F\nmBK+6/FkkgJkuR7f1LbGnDiISAEASsfRrLsBeM8VtrtJUFjug+N6qQLmft7c3eA0bKoMAVH+LP4M\nfi3+d0bIAYa7aJyByWIN/Hc/xX6kbM+m7/2UayR1LfEbp/bSACpvKTPibqAUoKRAzN1fad8/toai\n4ZmcKl0un+MnOKnUhaxbk61WKwq1LRYLnZychC/b/b2cnYPF5cKA77A3EJy5XC6qspJNAljxfUGR\nsuFwqLu7O/V6vQA+ngqLsD4+Pg7XTaVSCeGFtcSaIKCS+UegTqfTYBdwZeAGQBATNwIA7PV6wYrQ\nf7fUAGMp3Y8y4r3BYKDpdBq0+uPjYwQlUlr89PQ0ADiAApk0n8+jUirsC1k/HiiLBQ/zhKsJoJey\nZC7PXqK/f4kG8PSaPMzVdDoN0HF3d7fG2mF9o3QZw2KxGIXKYOVYr71eL76DbGEcWS809s5isYg+\nEvPhwbIuS0i1Zd1QGNCDw2G9Go1GsIQYZO5mkhRsDXuJfYSRwHqAQZJWMVuwLtJKX7VarbWYNEnB\nOPl9AFOpPoNdYtzYr+z9arWqYrGoZrMZaxBQ5wY/BgHHQtAH1qnHmrjLGjYQIJS6NJ0B26SbpC2e\nSpxlAaOUvHgT38lyl9DYyExyymQgQPy+mxgUvx+fu4/Tr526irKek/tLilgE+pyOg/fNr+n9yrqX\nK32u7ddxZiW9Jm0TUPPrZzFaKCyEC9Y4NN+mMWH8stxtbiWlACUFsh9bc8oWAY5lxGYlBsKBKwod\nxYrg4RTg5XJVV8BZK0lrygAlTiwKa8796whXMnjq9XooGoQM4AnLE9cRgnt/f1/ValXdbleSAsRU\nq9UIQPVAX2ciid3w4DhcAZ5pgNKn5gjrDvdMpVJZqyuBMiC+wKlqru3gjXXtpxBDxS+XSx0fH+v6\n+loPDw+6ublRrVYLWVCr1aL+CjQ24wr48PTsnZ0d1et11ev1OC6AgnQeU+JAhDX0kuz7pRoMhwev\nQs9j/RIvAguCPCYeCLAuKbKscrmc+v3+mrzwefLMGg/8hlGRFPE5pMiXSiU9Pj6fms0+Ys0AXgCu\nZJAAgmezmQ4PDyOeazqdajqdxtENhUIhGC5nMwGze3t7qlara/sNgIZLw10szkYAEABCDkxdhzib\n5EYAspPYD5ieQuH51Gba999/H2D7/PxcvV4vZA4l+B0wTSaTNTCVxtFwPhhMZ71eD3aFfzxjuv8+\nupgTZyakbIDiAZ8pcHBgkQIMBCuTl7oUHGBkMSnuz/TvbYoNcWXqin3T+7RNrIYDiixmI4vBSa+Z\nAh3+dyYqfZaXmv+WMfK+k/aGEOZzFHN6H1feqeDl9yhhXwc/t7/bbAAQBxnu6kGhSiultb+/r/F4\nHODAi6b1+32NRiO9e/cu3B77+/sqlUoqlUoR4IblDWjBx00DfKBAPcvm8fFRBwcHoVBQnG7lknp8\ndHQUKY+9Xk/NZnOtqNZgMIh55zmxUB0AeCwI848/3dcO7g+Uy3w+j0BEYnWg4FH6WO8oRQQ/v6dh\nCAGMJEVcC5b0xcWFrq6uNJvNIoBwNBqp3+8HRQ27Ij0rOQA7c+nF67g/YA/WivWAZSyt19vYNlOI\nQnbGCEW1u7urbrcbVXV9/t2t4sYZsSleG0ZaGSkEJbMeYUX8pG1JEbd0c3Ojp6enCGz2M2ZYv9J6\nbInHZtRqtcj6urm5UbPZjBRcaZV6D4MJs8iegpmgSu1wOIx1AehBUcPa4ErCJQgrg2JnvKgoDVAD\nXDGOyBH3JuC+Amzn8/lYazs7O/ruu+90cnIS690BCACOvjN/fqAgWXXs+3w+r+FwGPf3lG3PCPI5\nduMpq22NOZE+jBFJmQQam8KVslveuBec9ubzTQyJsyNpQyj4tdLrORuT9peWukq4JwLKn2mTANp0\nff9+FhDx36eAJgVNDubS8fA++7ylsSBewdSfy+fX5yAFW8wFffHfpi69tM+bwNo2WrlcjgwWBCpW\nITVBqtVqHByGICIAEKvR008nk0mACOIVZrNZWN1kq6AomS/PQOC7TvO6ewNlAfXLScNOJc/nc717\n9y4sP5SR08YpcEW4QUUzTwhnLFisbk/XJWaAbB2EMBYqboT5/PmYgE6no93dXdVqtaDlPWbDrX5p\nVcsBa3Q2m0WmD8BisVjo7OxMNzc3Go/HmkwmUdiKuBVpdUbJ0dFRBLrihnK27P7+XuPxOMbEA27z\n+XyUMncrFRZxm42MFeYLZg3wAHMhrWf49fv9UNrI1FwuFwHH7IvUoPI4oVxudeAewB+5wO9gLwCc\nzrK5CwGZAQsDGJ5MJrHOyMrZ399Xr9cLWcTcwEoUCoVw0wCG+/1+BP8yBnd3dyqVSnHfNHaHGCWC\ntcfjcTzLdDoN8MPYALqlVXqwZ/NIq9g3+u01Sbj3zc2Nzs7OIi5rMBjEYYhkFHHtyWQiaZ39Bxgy\nPzwDwNLdbABG1jLz89K63mpAbKrwXeFI2ZZ9VmwKD5wqxzR2IY0RSZU4wtzjPiR9oBxRygixFORk\nKX36nuWuyuoL13CA48+Ttqy4l5TxyGJ2sgBJVtv0HQcXFAVDaKTMU3qvTa+zxiHrs/TvbVPfkkLp\nk32C0sJyQVjhu8XKf3p6Uq1WC8DgAaWNRkONRkO1Wk2np6e6vb2NeBC+JymUJamtrHtAO1kBh4eH\naxYwwIUMoZOTkwAT0qpSpGcx1Go11Wo1jUYjjUajuObh4eGaJQigwEoCIOVyuQA5WKhkIHlwYC6X\ni3RTrx0DLV6tVmOdHxwchGJ5enqKwEqUQUohU8iOrAsKahGo6GD+/PxcNzc3Go1GETtALAJ7i+wm\nB98wUICop6cnHR8fBw3u6a3Q+ihl4hq2nakjKZ4P0OVp68ViMdwizKOzErj0CBjO5/MR8H14eKjz\n83NNp9M1Iw3glrKmKDbkNXEsFLNzxpU1hMXfbrcjQHW5XAazAQswHo91cHCgRqOh0Wik9+/f6/T0\nNIAKDAAybjwe6/7+PornwVZ4TBSGHM8E2GRMYIAc5NIXd415PB/K3uOZGBPe9zLxHtvmOmg+n+vq\n6kpnZ2cqFArhQvWibXyXeXSASdo06xTAB7hEHqfuOgczL63trZ1K7HQQEwhtlrIkWTEjbBYHBkwO\nEdcpU8G1/T5ueWe5k3jtFC2WsQt4acW40Hjtz8HzZ73vv3+J/UhBRtqcbUktEu73c9kGB5D8nQJL\nHzOC0diAKWhyEJHVjzRWI2W80r/T8dl2Y+0RzMfrnZ3VCcLSqjIqqbIAFT5zQeDFw66urlSpVFSp\nVHRycrIWqMh18PljfeFSqtVqa64RFAXXAFAAQqDIEUSHh4dhKQ6HQw2Hwyi/PRqN4nuwOE7rOjD1\nNYFARAjDmiCApQ8VEkBnPB7HAXQoluVyGSmSlFZ3xsT34HK5VKlU0uvXryO1m6BhSuvTp0KhoN/8\n5jeh6FA2rH0swDStns9gip6enjQej6OOhfvykV8wUlyDGKJtNj+SgPmVFHEYruQlRYAz1jVrFEWJ\ne0ZSBMd2Op3YB4xj6pZfLpeRDcTaxjWX6gyYM2KhDg8PA6C78sYIYMz7/b5KpZJms5n6/b5OT0/X\napB4n2DQlsvlWir009OTTk5OIn6ENe8l7l0/sOb9sE2MGUAY7zljwe+llWz2SqywQ9PpNLKGptNp\nPPt8Ple73Q55UCqVwh3k3gPPusHFiosLtokMO/qF/CMTj/ly3fBS22q2jrQ5hsStCd+8zh64sGJA\n2ESpAuVeaT/82t78b++jpLVJ4/O0H/Q9vc7PaZv68tK1NgEWf52CI373U/1iHF2Q5nKrACfG2AHF\nJgDkc/ISg+ULOAU4WWzPz1nsv0RD8QNkR6NRFBtLgyQXi0UEVgJgGGOYCMD3zs6OLi8vI9sHUOjp\nh55xgiA7OjqKuWE9ovQxCnZ3d3V0dKTXr1+rXC5rPB6v+ej7/b6k573Vbre1s7Oj8/NzdTqdtbLm\npEgi8D0DhfXgQm9n57meSLlcjvRqrC83LhCCWJtYyKVSKbJ6Dg8PI27HC6dJ625MX3P0rd1uB0DC\nVYEl2Gq1dHBwoNPT03DbdDodzWazsG7Z62k8A6CCPQGzAGtCf1Do9I2AXgcD226kkgKAUYB+VAHx\nDAQKo7gYG5iFYrEY7k+qrnrWUi6XWzt8zo0g5n00GkXMCnPpzDigfrlcnTkFqwhIgiEAGBDbQiA1\njNf+/n64YUulUrgfmWdcs/xP3weDQewvZ+QlrYFs1nmlUgmw5YYz7mAK0/Gs3lzHIX8BtGQHMgfz\n+epgy8PDw3AX49bCFUvDYHD3HZ/73vKUZGnlsnE9gaHxc2T11mJO6Jy7AFIkyUQ6s+KfO5jxhydz\nwT/jXm7lS1oTGlluDmcMvB9Ob6V0Gc/nz5HlcshiAbieAx1p/SyWrOukCt5f+3ilcTQ/p3mfUtCH\nEHbFyn3cb5+2rDlM2Zm0r+n3eM5N8TLbaARDEvMgac26xipB2HKo3f7+viqVSlwHoYTi39vb09nZ\nWYw97AjWDu9zz16vF2nGzEs+vwoKdX8+1DQCClcJ/mUvtFapVPT4+KirqytdXFysxRT0er3wOSN0\neVbALWwNbq7j42NNp9NgZaDQU0DjLGW1Wg3l7dT83t6erq6uQgG51c7+cZct4wXIub6+DkubAEZA\nFgDm9vZWp6en4cbE/YJbGSXHa/fdM5/MAVYmJey5N/LKD7KDVdtWI/CTDJh0D6JQUdhkzCwWizWg\nC8Phhsx3330Xqa0EiM/ncw2Hww9kDe6Vg4ODNbeJz7G7OZHbMIIwJMvlUu12W7VaLWKtSqWSbm5u\nIpUXEPX9998rn8/r5uYmMlJg49hL6Jvlchnpx9VqNZQx96SPsO+s6WKxqOFwGN+TFOzrbDbT0dFR\n6B0MH28pI0gNFhpxJRgmXjSPuJB2ux3zgsyg0Sf0H2AsZXl5VuQxe4igYFhJD5DfuOb+P67V/1+N\nBQcac79Z6mNMFVjWQLjCZDBoKevCNfmuKzgHCDRX6lhxaR9o7mNN75VOBH3yZ0/vze83fe6MT/o6\n67cOVLKYpU2NOUndYPx+U7wL92RcHISlr1PQkQXYXgJnP4cB+iUaAimfz8dBYAhjWhpLAH2bz+fX\nSmLDfuBmKZfLkcHC+sP/DWhAaRYKBfV6vUj5Zey4N6CE115NFgHi6xsrVFpR0g8PDzo9PVW321Wh\n8FzVc7FYqNlsqtfrrVVlhTFyhoHYA863IXMHYITVS1+xIEejUQQn4h4AFJJ5ICnqlbgxQmPt3t7e\nxomwjUYjxj6XywVo8tIGrLVqtSpJUUOF+UW4M9Yu41BK7hZmrlAU7r4hm2PbwbDSqvbNcrmMmi68\nv1gsAhACQgHCPD/zj9uAAFbcb+VyOcZ0Z2cnWDAHGpLWzrdhrtwo9MBZwDcgG6AprVwOgC32ArV1\nDg8PAxywrk9PT4M18/oixWIxjI6DgwNdXV1FcGylUlk7HZyS9awFCjE+Pj6GEVAqlTQej6P6MLFR\n3BPdxfi7HkHewHLs7OyEsfD09BR1TmAqXRcDIKRn8OLxMakeBqQw/riT2Y/5fD4AkOtP1yWuf7Pa\nVsCJA4Y0/iKNN/GWZWmnQpff8+AgO+lDhSmtKlT6595SpekDysR68/vR3ywl7Ao76/mDW9nQAAAg\nAElEQVSyhGlWS4HIS6/T936uQvcxcICSRZungOKn7uNzlLJN/h02hLT5UMOPAZxQnAyKGSAhrao8\nkh3CuKEI3bJZLBbhTsnn81GqHTcBcRUwBE6XkvYJPetl6z3+gmqs3P/29jZqTvh5KcvlKsaDfUYt\nCIIBPdiVzB/SKgkKdcODfuMuwLqVVkrZ/dXcj8A9Mpomk0kAAmJ63KID3LDn+Iwx3tnZiQqgjAVB\nxjwzAMGtQU+9Zh4BMhgngBqvTSEpsnlwH+zs7ETNGJ4XCh7Fj3LfVuM5sOR5ZndnIBsZAz89GmXK\n39fX12q1WuGmGA6H4QYjaNtTZgnqdiYG5cfe9zAAmBdq07A+fC1Iz2sAlgewm8vlIq240Wio3W7H\n+nj16lW4aLmuV8SFMcMooC/oOhgNwADsHzVvAOLsXZhBlxWSIijZdYUbPbCr7p4qFAoB1mnuQgJM\nelFGD7xl7QM+PF4OGcG1GS+CpYl9ISiW/eHxMWnbGnPiSsxdIh7TkC46ab0UOJPg3+MafId/afxH\nqjy9OTPAPwaU+6TX8gXi7h4HTy/52TYxKylT8VJLwY2zPlnX3XTvrOv6tZ1id6Di4+LzmMXw+Liw\nCdwiSGOFENbp+POdnxqbX6rhz0WJkqWwWCzCGpJWligxG+VyOVJqpVXAN8IWoYxfnO9LK6HE2GC5\n4hvHwuS+xFfk8/moh1IsFlWv16OYGvcDNCLEPQDQBae0yjRAKHY6HdXr9TUrj/7i2vDgOxQ+Cg6/\nOKyNW+XdbjeEP2OJT97dtIwL93d5gJ+fIlRY1s5kMF5ck9+STutWJcoUNqtYLOr09FTSqlDY9fV1\nKB7fl51ORycnJ3r//n0oJZgwYgO22fzcIU9vJh7D9yTfA5yyXjgc0vc88w/jSNYPFrhnllDozEug\nS+sy9+DgIOrfOPPg4A45AggmPdzlGSxFu91Ws9nU/f19VCTu9/ux/wgy9/kk9orrs5ZQzM7wM8/o\ns8PDw4jLce8CwJk16UYi/7OPdnaej0mgH5xFRWA8wB+d5CQB1+CejAX7kixDXJsYP8TveE0fjIub\nm5voB/MNm/WSQbm1mJM0kAzBmga0+nedqXD6yq1oz/ZwMJA1CJtcEln9ZQLT6wKsfHO6/y9Vxs7S\npACB9/k7BThZQjYFSemz+r1dsfv7P/X8PAfPynUQHg5OfFxShmMTI8K4QkO6PzpVMOn4+3h/DA2B\nRL8PDw8jU4D6ByhBT/ldLp991QhmSkZzPgxCAqsGBY+QcaGAi2M6na5Zk/QL0IQFisChgBrBf5LC\n3eNCzFlHZ0G4TqFQCD85+xbQhKWFYeGl2zn0D+XnhaoQvrA3zWYzrgUgoEAdY+/rM907PAOZM3t7\ne3GgG0CDOapUKlH0rlQqSVK4tegrcQfEAJFxc3Nzo3fv3uny8lI//PCD7u7uIn6kXq9rf38/Yg16\nvV5kP/kYe6G2bTVYscPDw1iPBPUSVwPbRio8MTbEHLi7EtcCTCHl/lHiyHXmHjcRABW2AuCOwscF\nhuJ3Vsf1ibt42Fs8F7KN92azmWazmY6PjzWZTHR8fKzFYqGjo6NwMXqqdKPRiNTxQuH5ZOu0lgtu\nU+Qj4AP3JQwe4yqt13hxQznLbYlrCMDMOiVdn/nwWE7XN8hlxphxhP3hc4+fkrQGwIjl2t/fjwwh\n5hhm8iWDfavl63lA90PzubQKQnWB6ErLgUUWg+LXdbDD36mLZhMYSAEIgtIXd9YC8b44EEgVtH/u\n/dv0efos9DEFKllAZxNweak5s+XWBe+l90xdPz4ffo6Fu+AQfq5Q/FousDb1+WNgT9iUtVotrH+o\nbyq7OvPQbDbDZQENzriSscN3AW8IGhiGXG6Vosj8UrgNgUCUPcFqHniZz+ejLsvt7a329/fVaDTW\n4lVQADAjZAwBFIbDoer1etQBWS6XAX5ggLrd7loRKmf1KEzn4wi9vVwuI3DYs3CwwLCqOYBP0pqf\nX1qtDYQna3WxWGgwGOjh4SFiGRDAFFAj/oXnqtfrcaowMqbVaukPf/hDBC7O53ONRqMoZNfv9/Xj\njz+q1+sF4CN1tlqt6vLyUu12W+12O/bCYvGcccG9ttkIcsXNJK0OoUTRu7zF/UOslLtBmKNcblWA\nDyXs4BDXBNflmAHuhbvC5QigGpnlgdmcrkuwMy7AfH5VCwS20M+HAUjAamL9z2YzHRwcaDQaqVwu\nxzEQMJMAq8lkEvdz45uGrIS94Tlx8VF1OmX9XD8CQFi/BOSyVyhS6CnTjDXzcn9/H6wpoI9qyQTz\nM1/EscC0kGnkzOPXX3+t4+PjMLCYZ+YQlmfjmvvfX8Y/3Vwp0lk6zmAyeU5rO13kFfNSq8iBQKqQ\nnf52sOEgCMrRQYffnwXCwBLkBrJ0xOl9cX9g2md/nQVMsoBP+ltpvUaGC+D0en6dn2pp35mTlJpO\nWSF/HhdeCBL3xSL8fbwZYzYcBbLcr5zO6bYbFs9i8RxAh6Bwf720iiPBZUHxMqx1D8J0NwiBbbwP\nPcrrg4MD9fv9KOXOukSQME4cm+4WH4Dg5uZGR0dHITCxdAjgw3WSy+WimuXBwYG63a4ajUYISkkR\nJHd/f69qtRr3WiwW4RqRtMYMMBbEFJDV4zEguLiq1WqwHygSrzXhbiesa99fy+VSn376aQAUhCvZ\nRygq+ozywJ2B0L2+vtY333wTypI6NFSTHY/HqtfroQhvb2+jGNhy+XyOz2AwiL4j8xaLVdDzNptn\niuzu7ur9+/d6/fp1GG2uZDgPyt1S0spVyZ5HNqXBtMgAXDPScyAse6VWq625/AgyzefzGgwG0Q9n\n27kmv+V7xGF4GvRyuQyQvr+/H5VU2Uf0m7lvNpv65ptvdHZ2FnILlxJgmj3hx0fgDkLf4M6RVgdm\n3t3dqVKprAHy1IPAPfkbdpT1RGaTFxcEgPs1AGH0zfuKTGNM8vnnEgMAGK5BWQCO6vj222+1s7MT\nxewwYvw8q01ta+AEAYHQppOe1SB9GKvBd7MUubsA/DPpw5RghLazACmY4btu+TMJULgOQLyxgNOF\nlLb02j/X+k/7mgVwnDFJFXcW+7SpIeRTMOL0Yso6pffgOg4IpXU3TfoZwjllqPwZfU4+BubEQdx0\nOl1LpcPSL5VKQeX70ewoIKwyBMZyuYyaJQh9ytYzPgjrQqGgZrOpfr8fcSfD4TBiVVB2i8UimA6s\nKazHnZ0dtdvtAERuXcLUULW1Wq2q3+/Hcy6Xyyhc1W63I5ZAUliDCORGoxGBvr5e3Vr19TYYDNZ8\n8oAtTrf13wAgAFyMVSrEp9Op/vM//zPWcqVSCQFcrVbX4gOQLWQKUTQLvzsgA1CFC8c/90qg/X5/\nzbWHmyCXew5O9HohR0dHv+Qy/qBR7wPXijPZ/E1AqbSqiwIwATwiD1xBUmUX5gpggouDNfn27Vt9\n8sknwUgShE2sA4odoMF9YDYWi+e6QtKq0qqn4cNQ5HK5AB7sj4eHhwBkvV4vlD/rtV6vazKZRMaR\npKggm+4h1h5AGcaPezvDgovPDdrlchnB48wB44keQ74gi3AxszfSEAWuSeVjjCxncdGHsKcE8MJ0\ncZ3Dw8NgbnE940ZirwKAmLOsttWAWP4hUFKXgH/XrWRpZQXR3NJ2JZoFZJyN8Ws5SHJ2xBkZX1R+\nH9+wriz9WVIAxLU8L9yfmetmgZYUfHn/0s9dIPgc/Fxlzqby1L0UlHDNtH/e99SNln7XY4vSefeG\nsvONxXxtu7E5sRIkhQBeLp/rD/C+pMhSgfkoFothJUrPzwjVSultDtiisBQsIsqQrB4UI6m9pGfu\n7DwfKEh/cEEQr0EtDqhed2EiWGBwmEcCNu/u7lSr1TSdTsOHz/4ikJEGNcxa4jVgi7VArA0CDkuX\n2AOvnYKlxxxIq5Nccem45f7rX/86XCmwP5Iik8iZPvYBLrFcblVTA0AjKZ6T4l+sZ34DU1AulyO7\nC/DEHACAyOaAcdtWA2zwXF6/hXFgbGHZmFPfwz5/Hk9xc3MT68xrdHjGCsCW/tzc3IQ7j2wXzjbq\n9XrhinIwj5uk1WqFiwLZ74wsCpcCf/P5XCcnJ5pOp+HeIgi03+8HKMNw9aBPZ5Wo0QIAd73D/mbO\nPY6MuA/Gmf66/vH/CfJlHeFOJ5CY37u7ibgxZ2gB1Bg36AHXdQ5YyV4i/RkjHpDnpRKQd5vaVg/+\nk7TGYEgfnh+TKuAUZCBkstwt/hsaQt1ZE0lrwpc+eLS4uywAH1ngwMFV+s+BA8/uzEUW48M903+p\nYua19yvr2ik78XObK/7U5w8CBiHzHbd6HYT6c6Vj5N9HKaSMTdY1/ydg6/9lIxiQvkNNo+xIU/Wx\n4IhxgAbCAeEB1Y8w9dLwbpGjkIlfkVZr59WrVyHIpedTcQeDQWRPYPEhuNxnj0JmfAEY0M4ooMFg\nEEqEg9QI5iRyH7CBhYg17a5arn14eBisCs1ZDBTcYrEIqh6GD6sv3eOpLPjb3/4Wlm+hUNDJyYlO\nT0/XXEMoLa9+ydwR0OsAiz55bASKgecEgKHIYbMAhhR8k/STgYO/VAOM4PKAbfC96jE6HuMEGwFD\n5W5a3FiAXSxyxo+6NScnJ2HgoGx7vd6aq4IUYAf3vq88VsUrGvuhlPShVqupWCyq2+1qMpkEW+Lp\nuE9PTwFW6C9rgvWSMtej0WjNPcPzo/xd7klai0NzPUffkROMw+PjY5zf5HK10WisudbpFwwv1wYg\nOcuTpm17kVMYzFwuF3PF34wV90X2URQuLSbnbSsr3gUzDwztJX1odTMZKUDwICGaT6Jfi/eyfiMp\nQE0qBHyROFXIc2SBkPTa6Wf+/K6gsxiILDdJyrD4okmv5X3w/joI/Cmg4vEhWWAoBRwpCPN7ZzEu\nm1xDKMgUsLq/OmVRtt08IA+h46mg1OgoFArxXTJNACJkCBC8R4Go8Xi8llGCUPB/HPXuAuz29laD\nwUDNZnNN6J+ensZhf9KKISOmBLbHmZLJZBL0d+qnptrmaDQKQc6zko2FUmYuy+VyWN4oddYbIMuf\nk/vCNAGaUAoE5iH8vVaGGxCsZ0+rpBLrYDBQv9/X3/72N3W73agMSjVcwKa0MjA8UwX3F8oB9mRT\nqXeeCcbED3sjSJR1vq3GesbSJbaAZ+RzACdrfjAYxNohoBmXg/Q8fmRD+fWYM8aGeKD0b4JtuRaV\nVtNqvMR0cE3WH3EsXJv5lBQxFa9fv1apVFK73dbR0VEcE8EzEPtCn2DDPBjbAdR8Pg/mhnXD/WFS\n2X++dgEQACze93FCjwH8fC8wRj62MGEOGj1+krmAHQG4AaQxmthr3ieeiecjoBZmiHttaltx69AY\ncG+pUnUl7r/jM/6xoJyF8e9nKX82FQuGzYdA8cUEgEoVa9p3LE/pw7LqHheR9TxZ72cpewc0KRjb\nNIap8v+5TAP3YB7S+XDWIgWQWc/gzJNfw8fvJYDnAiprPLbdoO0BGghcfL7SszAjtY6qkCh4LDgE\nG/+7heZjARvglpOn8y6Xy6g1sb+/H0IRv/KbN280HA7jaPhGoxFuFKhlD1ymmqunOXJPQIUH2tE3\ngn3dtYL/G9DPZ25NEZ/j6bX8lvidbrerX/3qV3GGiI8Zc4Hy5PestcFgoLOzsygshsAmXgda2un3\nfD4fisXpbJhExgwXM2wP90bOwB7QcOF5PBtjtG1W0BUmlq9b4SgaWDK+T0CzK02ChpkrAsYZA9ht\nnhngjkuBAGXYht3d3Si9zlxdX1/rzZs30adGoxF9ocEMMl+4OCWtncHT6XTimXG/cShmt9uNzDyu\nybPhBsMgwB0mPQf4NpvNOCfKjT/pue7NmzdvdHl5Gc8qKfa2tJK9i8Ui9hDrhWBtWE836LzyMKUO\nPFUckAcjybh7yARMaKFQCPY1NRB5loODA43H45hTCklS6mBT2+rBfw4ooJC8ucUOCOG7bHoXWKll\nnrp9UuXHd3jPfdQe7Eo/HQihGFIA4nSZswqbmJWUIXpJyaaAImWMfKz8mt4v/5f2fdM9nZ3wBZgF\nJl5ik9L7cg3/XRZTxOc01oF/n+fednOLjIOwUOL4xVH8XscD4QDVj5Xn1pnXG6FWiVcYJVMGId1q\ntXR2dqYvvvgisngIQAWE397ehvukVCpFJolbw2REAJxIBcflgv99Op0GGCDd2EGBzzkWrbQKlE4F\n7c7OTpQSdzD79PQUQcOz2SyybebzeVSklVaH0mFY+BwBkM7PzzWfz9VoNFStVoMRYe2hXGB1UGQO\nynyPM1ekIrsrjWdjjnyto9Bhb7i/P/c2G2NQLpeDhSBo11kCl6EoYjJxcElStt6tddgHfo8iWy6X\nUXUYxoCxef/+vXZ3d1Wv19VsNjUajdayyjgzygsDSgpWAVCFQQkDgKW/WKwOzzs4ONDt7a0eHh70\n7t27tRg85h+w6mAY+QnTAKPEOVR3d3ehrImpYT/3+/0oYoisA0x5zA9Aw8FAr9dTq9VaY7Jo9I1Y\nkuVyFe/F/f2ZMCrQA/SBE8HZS8h25kDSWlzNcrmM88OoZ/TS2t5aQKy31AJOYwiyPk/f5/8UkPji\ny2IZ/LvO4nhglCvh9DXXkFYAIVXim57TNyefu9spvY//1lt6Dd5z4JAFRNJrp83dKVzPwRx/p/dK\n+5mCEx83roew8Of0+ffxcJCYPvfH0BBO4/E4/NZsRKwSBCBWZblclrQKzgaceKbL3/3d36lQKMTJ\nwFiOg8FA7969Uy6XCzAEa0J/qtVqWPC9Xk+S4vvSOgtJH1EmKFmyD3jP0yA5NJDnXi6Xa4oL1nF3\ndzdAGMGf3N+BLPvAA3BdydH/YrEYVUW9FDfZSxgZKErW1Ww20/v379VoNLS7u6tutxtjx72RAfTD\ngzs9+JN94EIcgQ8j4IwI+yVlx3iNhc71PSB5Ww0lg9vK00xZJ4yBpwVLK8DCOHrlUa7nAJaiXe12\nW2dnZ3Fd9grjTAG7k5MTSYpA2vl8HooXF9xsNotAXGmVPdTr9VSv19eU8N7eXhypQAYOIGmxeC4R\ngDHB86XMhJ+fA+PkMUyPj4969+5duMekldxwhp7vL5fLyIaCuQAguvwnEwc56UGurGncOP4ZrBHP\nwDrH/ULfYLEmk4lyuZzq9bokRYwZMSVkatFv2CbG2NmzTW2rbh0pm/Z3xZYCj/S3qUDDqvNYBfdl\nS+spwalyS+/rVlaqPCWtKckUWGSBCr8vCPalfvj9U3bFFTXP5a/93mlf0mfIYlByudxaRDv9SZmP\nLPbCv+Nj6s+JcuFzR/9ZIMfvkQIlf55tNq/DQq0Q6FwKauF7JVASdwaCQ1oVYOPsmPv7e3399dex\nVohFYbypXAp1DWDwuBO+32g0tFgs4tqSgrKFMmd9jUYj3d3dqdFoqNlsBuhxgMj/uVwu6pnAbGJl\nQzFzP7IoHLR4MDBKnswfPzgQBc/4YdFKq7NbsJIZjzSwtlKpqNls6vr6ei0bqtFoSNIHz+fWqwMd\nVzbuapzPnzOg6C8yCFACO+W+fGdWuB7MGHEV22qpocG6Zd0ABjHyYEMAgrh4pBWDxPyWSiV9++23\na0wV1jfX4iA9Ulr5h/wsFp9PEB4MBjo/P9fd3V24DTzFmPUFI0gcFUGgzLfvHwA3VYmZ0/l8HjFT\nGBOAy1RnuBuL1PDZbBaMI24e2CMv+4+MA3ABInDBIPfo2w8//KA3b95Ef9jT7AN31bpBzjjB9HgG\nEtemfguAnnpKzt4AUGFrYHy5Pr93eZDVPgpwIq3T+iwIpzRd4XsQTcouULnSYxMQPlkK0wWCtAqo\nhbb2Q4/cbeD39UlOmQlXpOn73g8sTPrpz5QqbK6Z+il9nHjtQMw3tTMpm1gHru2shlf4pPk9eN/H\nm/fSDYvicPeZsyNZTIz/7QDJx3SbDYsJ94yzSoABXB8Ezs3n80gfdmsd6hXQ/d///d8aDAZqtVpq\nNptRhyNNnweoLJfLtawEhAyFne7v7yPlEEsWAYSVhr+43W6vgSlcq1j1rMeUGSBFkbgJUkM9JVLS\nGlDy9Nzb29sI8JW0JgMeHh50f38fwg5g6AYDAtD3TqFQiGqZBE0Wi8W4n7vPABWAKBSD19IAnLBP\n6Etawh2WCreeM4jufvJgSErE89ttNTKwYDZgTIrFYsQhsa4A03t7e6rX65HCzrNC6e/t7WkwGGg4\nHEbJ/+FwGPVDzs7O1gKxl8tlKGPiue7v7zUcDlWpVDSfz6Omz+7ubqSvshZZ4wBRMn6YO+YZ5sVT\n9InLcLa7UqloMBhEufpGo6HxeBylAxgH4mJwJ+HOgaFhrVGUkb3schX2ETewA2FJ8WzUYEEuLJfL\nyMxjLyAvCE5ljJ2VonEvntuBXKvVCtACq4ZRgEyBFYKB4bkZ75faVmNOUJ4IFDa/B/c53cz7rlSz\n2AwEv3+WZWUDYFLLm3t7rAnXcEvI3RJQnL6oUrZDygYa3I/fg0AdwKRjkQKllGHKAjZ+nbRPKUAB\nlDhtjb/XY3183NLX6dz4M6VMjbNPqWsuZWf4Ox2Lj4E5QXFj2VD0qVwuB42NJSkpAmWxPBC8jLW0\ncgl88cUX+qd/+ie1Wq1gndK9IX3oBhuNRiHU6AM1RlDOpVIp4iPYF36mC/2Bzsdq9APTEGTua5dW\nh4nhk0epsdddQLJm+A1ABmUNa8Hz7OzsBC3PIWysVUBduo4AB3yOZT4cDvX+/fu1UgPOGhIvwFgB\nanzNwtweHByo0Wjo4OAgik+5C8QZE54ZS5lsCL5Lhtc2G3NJGjGHWKLkAFUen4b8QGEBaN09BoAc\nj8dxBhVxGJIiriOfz6+tH2dEAIK1Wi3S9QkAx/AkuBlQCeCRFO5HQClj77rBi6Q9PT2fA8XexV2H\nOwcmVFpl72GU1Go1LZfLyE5yBtNZG9Y6ip97M+YeR8Jev729jfHl3nt7exqNRhqPx2sg3wEO93AZ\ny+f8o+4R8+s6mO8CUJDN6RlgxKwhI16qcSJtsUJs2lLGBBTH9936z7qWC+RNqYcpc5AiT3/tSJL+\n+b0duW5iJFIXS3rPTfd2Ab0JRDjY4O9NIAHFkY41/cpS6ghJrHi+70rQ3VFZ983yJzq4yOqj59an\nv0+BKb97if35pRtMEBtVenYh5PP5UDKkCyOAKJzm7AfMQD6fjwqiHORVq9Ui8HN3dzdYF8YHC2s+\nn8c9Eeqz2UyTyUTFYlEnJyfhIvI+cw8AELUKoO9hflACnp0EW7JcLsMq9cqQktasWd8THrTnmTaA\nEFxhZCM4g0rQYy73XHmWYEiYDE9zxF/vIJIU6dPT03hO3zdY1MwPgtWtbGQN4BNFSd+k1UGDrFmY\nKtaOK3Dq1my7AJukYCoAbACxQqGg0WgUh9tJK/eJB3HDbsGEuUzmkMTLy0uVy+VgJAj4Rg4/Pj5q\nOp3GuLgrHUCDAmZvoNilZ6DDWVGsPW+AnkqlotFo9IHRw/ol7qPb7QazeHFxoU6nE+AXdoA1zLrz\n1HjcvHwPVxagB3ewsyAOcL0YmqSQCa9evdLDw4Pq9bra7XbEvnm6PnKWv53h5zmdQaVfuVxOo9Fo\n7dRo9qob6R4wXigU1tKikTUYKZvaVsCJU92eNphaOG5ZeCaKNwSbMzAgM2cJuD4I0ZV1lssFwcWE\nuOXvAIrN4ddz5erfow8pU5P1PCnocIDm4+hj569TMObf8f5kuZ0khYJxdM3fWYDP++8AhPt5H5jP\nlLZkE6bsioMwd/850k/B2bYaQhyFlcutykBjZeNOAajwTNCiKF18ygiVSqWidrsd6YcINeJVptOp\nZrNZBKzSB5QFJyNLz2xMr9eLVMJaraa3b9+GO9PvjRCjj3t7e2o0GppOp0FLQ1F7jQ4XuoCKUqmk\n2Wy2JpTc0sSqRYCxdgigRQ5AkXNtBGGx+Hz+DrFSBEH6PgUEHB0dxfoiawmmxNlPAgelFbhBuDN/\nKCHcMMQNADYoh88hfmQg4UbiRGbO7Lm7uwsFSbbXNhvZIex9mCDfo8wxQALlieJCkfEe4zgYDAIM\nE4tD7AZjBAPpLnh3Ly8Wi3DjLJfLMBL6/X4Ec3M0AfEcAHECl5k31loa9+LPiFtHUuwT2BxcOs4i\nciwBOgXQ0ev14m8H0hgZPCfj5rE9DiQkRYD3bDbT6empJpOJptNpAJPHx0c1Go1YkzT2jfede7Af\nceEir5En7KXHx8co1AazA4BkXUsKA5R1/ZJrZ2tunSxLN3XneAqSlJ2OmwIaJpSFxaRDO0orq9QX\ndupy4H4eCOclqlOAwYZyxsSpOleeKRhAADtLQz8dIHh/N7l1vG/+dxZQ8XFM54HFJWltTL0/L82D\nP5/f0/vA/LoQwFqHkuQfY+kuH4998Ptss6X0rJdBZ85gQO7v76OgExYkjAc0KoAGCrjZbK4FVO7s\n7Oj4+DjAy8nJSaSkLpfLOCCPOAbp2SKq1WqhdDudjmazmf7+7/9ep6enkhQH4S2XS02n0wAaktaE\nv1dS3d3djZNoHZTzDDBApVIp1pfT/J76m84lhekQdMSI+FrwmhUEJKNAnZ6/vr4O5URm0/39fZwg\nLSmYKAdAsEjul0cws1cbjUb4/AGDjFen09FkMlG73dYPP/ygm5ubADjL5VIXFxeRtYWSZPzciNtG\nc+UKeOT8J0+zBsCg7JkbD2yVFPIXRUgaMHvk5OQk7tHv9yNuBVBEbAYMBNf04PJGo6GzszPd3NwE\n68aa4eDFYrEYgbOk5zso9Mwfz4LjPY8hgSnkvB5irWAVJUXfWB/cI00GgC3K5/NrtXZYt6QYs/7f\nvn2rcrkc6485c28DzJW0SntOQYfrHrwH6EsAp7vkDg8PI5vPjUx+A4PGXDJ+7KWX2tYDYqV1Bc/f\nWEIo7ZRRSH/rn6EIXGm5j5fBk7QWDOQUs8eSLBaLUCAoTfzCLhxTHzLK260wru39T10h/vtNgbbp\nePnfLBJvzuakLEvaAIaS1hYb4CFlgXxRpyAl7TPvZ8WKOABjvLi/F6XyZ/Q5/RyrqEwAACAASURB\nVBgaQtAtSmha1rIfxrdYLMKKZtNiSSEYeebpdKpKpRJVRaXnYk0eT1KpVPT69euwgNg7AAesQs73\nGI1Gms/n+rd/+ze9fftWrVYrghiXy2UIc5QlWS+j0Sj6xj5YLpdRsRPhyjryIk6e5SEpmA2eQ9Ka\n0PQ4BrKYqEnB7wFIAB9PkXT25/b2Vl9++WXsS2o11Gq1tb0H2EEoSwqZ4srX05v9cEGYK8YDpbBc\nLoN+LxQKEWB4fHwcmUknJydRTp9x8WJt22oem8A8eXXjyWQSrB2yCzAD8GROPU4BGcxRCADL29tb\nXV5eBpswHA51fn6uxeK55DqnHy8Wq9NwYV92dnYic6fT6UTgLWufjDkPfJVWp3sDtnDJYThIKxdi\npVKJWj7D4TDqlPhZP8g6YrSY93w+H8yaH8iH7PD1BIDCPeJndbn8ZU4oXEiWHP1y17yzI8SFuIz3\nfcB3mR/YUUA54JQA8FKpFDVdYFZJX/bzqkgj39S25tZJUWKqJFOF5cI+pft5L8vNgXBDODoq5XO+\nm2Y98D36SzomQt+VK6xJFhhx4f0SMKFPaVxH6pZxgJYq5tTl4d93MJU11twb4IVic6Tsvlq/L8/v\n1/F54vv010FICnjS3/nYpCAn7f+2G9So+1NRjDQsQBgqgC3CCKBBsKHvFwJZOVTv4uJCP/74YwSG\ncvZHLpeLIEPiXUhJxYLB572/v69araa//vWvenh40KtXr0Joe+wHghOhh9Chb25tMf9+ABlMEmX5\n3f/te4rXuJeGw6GKxWKkXQLipBXoeXh4CKscEOWsp6Qo1MYhh7hqqtVqGB300ZkVSWtF87xuBGBP\nUsTsMDfMe7PZDEEMMMNifnx8VLvdDmVJo6YMwv0l+vuXau6WSCl/FA8xNrBqzpowFx4gDMMA4ISJ\nePfunY6Pj1Uul2OvkJ1TLBZ1dXWlzz77TKPRKNyTBwcHsRboxzfffKOjoyPV6/VIG9/Z2QlwjkzL\n5XJrqffSSuawTlHSkgJMeTgBVYNxpXjQKsqfZAqehzgUSeFWxJgAiDBuKHJiQZiTXq+nh4cH/frX\nv5akSDtnj7L+eTaMHi80yrOXSqUIdmYfOiD36/ncSor94fLMA969xIe7tja1rYATj1lwt430YexE\n2nm3/gEkDiiklQJDuXJNj/zmunzuAiOrD1zfB9s3nP9L4zKklaWfXjtlNHjt/lkfFwcojpb5LIsV\nSZmctPlvEBhejMp/x/3cveJ98ziStK9Z/UrBafrddG2kQMTB6ccAUsiAkVYCjHXAmsEiQzg4XYq1\nDgXNepNW51DxvAiDf/iHf9C3334bawX2hriP/f19tdvt6ANpxFg91WpVo9FIX331lSaTSQAXrBv3\npTsd3el01hQx9wYc3d3dqV6vhxXK3iKNFoYFS4zxk1br7Pr6WoeHh6FsfJ7z+dUhnyg7LGqAgLRa\nQ/V6PWIXyAyhIit9H4/Hse889Zc9uVgsdHR0FGCDeg9Y7cgQD5hlvljr9C39HsD/+vr6A1Z229k6\nWM7u7pBWGUbS8/4kGBnAiRuPViwW1+q/YDzO5/NYl1jyFBvM5XKqVCrB2BUKhWCWYMlIb0UGeTB1\nv9/X0dFRlIt3txtgCAMAdpKibbTpdBrA9fHxUbVaLeaQe9J3QHy1Wv1g7SCzWS97e3sRsOqynmdi\nLzqYcR3FPFxcXEQsDwHysFoewwJwwPDwSs0eX8j3MCJcD7ph7gwZsh/WB1bIwwHQ+ezdl0D31tw6\nrpDTDroASj9LlZFPkvvOpJWljtU1m83WAAq/ow/4wX2g+S6L0pUv12BSPYDJla0H1aaKmXu5snfa\nk+s7cHFQlLp2fJy8L6nV4wvRf8OiSZWA3yMrONnnk9+81LcUWPn7/tqBj49ZulY+BmAirTIVcONg\nFXq2B0KWAD7qkMzn8zgojTWA0EIw4mKk5kSlUgnL0dNeiRNZLpex7huNRvyWWAq3YJ6enkKgeuEs\nqmQCljjQz0915R9pls1mM6xhd025yxBAAUNBPQj6PR6Pw4XFfsAy5jfOWCLM3UXg1vpyudRgMNDe\n3p7evn0bzwvVT1o1QngymQQIYYwKhULEijjgJAbC4wPcTZWyOPyPq8MpdbdWGZeX6O9fonlGEuyF\n9GGdKQ/+Rp7ALnn59ul0GinxMFme9XF2dqbLy0t9//33+sMf/hDKejgcRqBzv9+PIE/mjXk+Pj5W\nt9uNOiScVox75u7uTq1WS8ViUYPBIIwFAHValp/4kZ2d5xL+nU4n0oKR+yhk4pd4z90bkuJ+HiQK\nkIIxlZ7XBuDfD8xzoxcWBObPARB9J86DfUUBRMYMdvPu7m6NDeL5keNe/t/dQ4AtXnvJiZSNcp3v\n+iurbTWVOO1klqWPJe9AwMGHsyBck8b3sLj8unw3CwSlLhkUSpYSTJVmyoakuehZLhl3ofj104Da\n9L5Zr1M2w/uVunTSZ0mf09Ex1/ZFL62Qu89VKoD9dTrXzrD42Gf1LWu8/Rk/hgYTQJYMdC4Cgvk8\nODgIIeEguFarhfJlrIlNAIBAmcOQcGw8wrFYLEYMBW4Ip7xxOcGsPD4+hvuHPmDdsAb9YDXYl93d\n3cgIABgREMghfJ5iK2nt3g60F4tFZKwQZEs2gINo3D3ud6cRoOllvlP5AmAisBU3F+ABMElBNgfz\n7rYBlHjsDPE4ABoUghtCKDNiUrBqeTaCLAF6Xjxrm40xdMXD2ABGYMjcmPO4E9YfqfXT6TSClmlk\nrJ2cnOgvf/mLPv/8cw0Gg2BKAKgAQQAl65Vr4WKh4i8MJaAfxqJSqURtksViVT2ZfcFz5HK5qOpK\n1hmZVqQvs748sJc96QatuwF9f8CYeI0jGCtn3Uh9z+VyUXeGmDIYP0AB8ge3J64xlynEaaUl7N2F\nxdi7XGI9s6c9xsxjFmFX+NsNe97Pals9Wyd1M3hHnW2Q1svEuxXGIKZWPs2pJqevsHpSRQfVlDIa\nWb4xBxdZz8f9s1xOL4GiLFfGJjdJlmLeBBCymn+eplFm3YuNml7XmSaumwI2bz6H6Xh43IGDniwX\nltOKH0PL5XKxHrHEsFDoJzQtQsozCRC4WGBUfETIuNI7PDxUu93WZDKJVMydnR31+/2ImyAqHoVB\nlL+7GZ+enuJ8mvl8rm63G25OrCVSKBlnCrsdHh6GYELwLpfPgbSUt2aOUP4If2mVxlgoPJcTh93x\neizOjiG8YUYADSgoQI1n6cCccP4HLMty+Vw9ExCHAmUPnJ2dhZLyNObJZLJWAyiXy8V3OIeF50yz\nNVCeADpJEaPBWHgV3k8++WTNRbWt5q6c8XisVqsVGTweuIucxB2I0nTrmvFwA8hjrSSp2+3Ge5Ji\nTCljj2vNg289jgWrHTnvQAP2Z39/P5g0ZwkADfSL9UWwa7vd1ps3b2J9AiDQHfyeNSCt5BX1der1\n+hojAWjGYGDfkUmEnvOq5YwxDCZyBzlDBV5q5mAsAQAB+IBl1j2y3DPj+B5yDCOFNQ4D+fDwEMxq\nvV5fS8VmDIil8RiezDX3v7qCf2ZzS4iFwyJPLX1HY5uYDoQKloyDFb+uRyg79eXg4yUg4e+7snUA\nkirnVKikDI5b05uAht/H2Q+/p7NLKaBI3TJcK32mFHS4Ukj/9nv6WKRuIxfgjDnuN2ennFlKI8bT\nZ0sb8/sSRfhLNVKDl8ulTk5OIsuF01yXy2X47vnuzs7Omi+ZeYWCXi6fT/OsVCpBhUvPTAHpgq1W\nKwA8qcRck+A5XE4oU0/hhomBPgcIEKToaxZKGmHkYJ/r+lkkzmTQAEWwTAi6fr8fghlqXVrNMeuf\neANeu08fEMffDw8POj09VaHwXDDMLe9+v698Ph/z46npnmGD779YLIZ1fXt7G8IfC90L4j09PcX7\nWL8on1wut+ayIn6BGBgYgX6//1HEnJB2TtExrG4HA5wbhQVfKpWCYcLiBhzAXuCCgSGDWfnxxx8l\nKdJmHx8fY23A8mG4IMMBGswf4+3uBcAPgah+QCBz6udUSSt3hvScHUcc1Wg0itfu/gBosl48nmw+\nn+v4+FjD4TCYjWKxGOX5YafcZeiBtZ5qn8vl1kAzz0Z/ACH8TlIYTV5iA0bFgSLvM/e+L/gtHgnk\nN6wfhR/ZkzA+/AZg6s+V1baWrUNzP5srMGc0UvbBGRQmAgHPJk6DT5kIBitVtJI+uDdUHL9P2QpX\nJA5EssCAN79+Cgb4P1W07qvz370EZvg7BRlZjetmATLeTzNm6AfzkMac8L9fE+WRgke/Xhrz4iDH\n7+Eg7aee75dquGVgDs7OzjQYDGLzkx1DWiJBd8PhMKwNrJz5fB6xEIvFQtfX1yG0GQeEEkAH2tTn\nxil4fPsIUKx1qF5OFiYWAncR/u9cbpWBg58bxcp9AFP0kd+j/H09Eo9AITKAAGm1uVwuMiuYX0/r\nT/ekgxQ+29nZ0XA4XEt/JnaAOg24lKQVEKJ+htd7aLfba0fcf/bZZ+G3x0XEPFCKHaFOfBGuJ/r6\n/v37CMSsVqs6OTmJQE9Sc7ddhI2A0Lu7Ox0dHenq6ipOA6YUe6/XizXi2Uh+2JzLBVwjHhyK24R1\nhysBmc9rP4IgNVRTgwi5AVPO/AAYyQZCFmXJF+YKhvL+/j7AGcwKlXwdWFA75+HhIcA2fzebzTBS\nYD9IPwYMejyKr/Xl8jl+6vj4OM7+kVbsN3FvPC9yGMOEa7ZarQBGHu/FuFLbhfguZ7WRE4AXdDUg\nRVoFTLOnXFf81HlRW8vWcWsY5eSxFY5E0996EBwPDHp2IcX3+U0KBrLYAulDhsHZD+9TChLS5/D4\nDPrpit37t6ltYosAW95PR/DOmKTXyWqwTk5p8n2eAYbKr+mAwp89C8il7EYW0PIYnZ8aDxr9+xjA\niWddoHio0FgoFNYOOcPtsru7GyWn/bTixWKhwWCgwWCwxl543Q6sb66PYPCsKwJEocQXi0UcVIZV\niiIlkI/vEMCIAQGzgtDEIkqLHfIdLESEmbRiwjAqsHrz+byazeaav19a+bxRIIC2LOPB3WduXVar\n1TigjmsOh0PlcquTtzncLZ9/Ls3ebrdjfBHUrVYrFCjZQdyb+AOuT+EwytWnqdPSs1X+5Zdfxu+4\nbrFYjBRn5nybDWANqD4+Po60dWIwCKYkboffeQwKTGIu95ytxfrB8nYA7ewX4ABWgXF0d7K7X1D+\n1CFxtxTzQHo7MWGNRkO1Wi3WA6DdS9ATCE7NFGJbeCae1dk3+uSGA/EruEyIP4PFRJbTd1fqPHOj\n0VCn04lzoVibjB+yvFarBeuEnK9Wq5pMJlFN2TNnYDYkhSsOo/L+/j7cZ8wFz89cOpDyjCBneLje\nS4zg1tw6CA4HEbyPUk8VkTMKDgRSl0oW6+DXTpWk/85dLH7frGfwZ+G1AwN3cfjvHJh4SxmTlPUg\n0MrdIj5G/pzpNfxeKR3K93wD8x5CgeYpgmnfHYCkzFQWWPG+p39vcuP8FJjcdsMK6vf7kQmAlUDt\nABQNUf2eaocAl56FBMoxBbesK1wPUMa8pqIq/l1YBizIRqOhwWAQ1CruDoQUKdEUasOg8NRDt1K9\nSN5yuYx6LIAeZ994Bn4rrRgkzisBDOEGgbImawn2hH75tTillt/c39+r2+2qXq+H6ymfz4cwR5mg\nSFjz5XI5DocrFotqt9sajUaqVqs6Pj7WcrlUv9+P8a3X6yGkPXsCgMYc+pEDXKPT6YSV7ZYpv3uJ\n/v4lGrJhPn8u0e9W/9nZ2RqT4NY+INeroXrqPGvC63Z4wTZnWlGIHjuIvHalDQsAkGJ9AmaQLaz1\n5XJ16jPuEGS41xshoBe2P5fLrWWBejgBwGY6nWo+n+vk5ESTyUT1el2j0Shcgyh7AtUBTAAW2EMv\n4Y98KBaLajaburm50fn5+VosCvoRo8VdNhgrBNYyVgAvWD1YRsbOSx8g173AG1lJqV5hTny8/bON\na+5/ae3+jxodoqOgY6yolyxhV7QsSqeM3JriO6BQvzcD48qXhY6wywIqm9gWWrpZfqo548H10zgO\nn0T6lvbBn+ElZe1WRlb/WMg8twcN+xi7a8ybsyHuF5YUwWObruNrIWV9sp4hZcE+hkZAJ6yAB+7h\nXsDC5JmdYfNCR9DXgA2CMKXneep2uwECcDMwf9DI+MYp3EQdBAI1UQR8lz0oKRgKFAtgqFAoRF8Q\nSlDfMBewQghMF1wONsiawIXi1D1KbrlcrlXrRBhK64HVBEdKq+BAB0Kz2Swsxf39fTWbzRgPWCdc\nKChEAlLv7u50cXERgpx4nEajoXa7HQATBU0ALm4Zd/WgINyNhBLhux7UuO1gWBrujJ2d52J+9Xo9\n1mlqOLH33W2PnPOgaOYPNwQFxFiX7qLzOCNcQLjYiFnxfQRw93omHjuBjGbNHR4eajweS1oFLqOU\nnaFfLp/T3DE4nDWXVqwj7KjLtUKhEDVZWAMYAs4+k3EDK0l/KUro8nMymcSeRxawRpfLZRyWiAwA\nJPJ99iUxQwTtHh0dBTsE4wXoB0jDliA3nG1nHQNy3BD9OXFUW8vWSRed/4/iSRVQqkxZKM6+AAy8\npdZ/VsuiiPl70yCm13Sr3wGT38Of2Tcw10uZFq7LMzhIyWImuM5LLp0UoPhnWKr+fK5I0+ulTEgK\nrFDSCGTeS4sJcd2UMXFryIGLA6ZNjNk2Wq/XW2O2UEgwENStALQBRhCazmBxwBifQxmzdk5OTuJ7\n7AEUHkKLM3Tm87mOjo6iCJWXxnYBjFsHwPT4+BjxGg8PD2q1WmsBh7gzyJaoVqtrdTnu7u7Cp43A\nhb0gEM/ZFFxCs9kslJOnetJP4kQIGkawTyYT/f73v9cPP/wQSow9hZAvFouq1+u6vb1Vu93W7u7u\nWqAxwl+Srq6uVCqVVKlUdHl5KUnhbjs5OdH+/r7evHkTIMfL/hPUiuJDmJN1wZziwoLmLxQKQcEP\nh8Ngd7bZvMgce/Dp6UkHBwehHFlLgErWHd8FOOMKkRSH8rnimkwmobxgIJhfDmj0wxy5HjIFAIjC\nBAzzPfoGWIf1GQwGIWM5+RtgTUwjawPXBsXaWF/EbC0Wi7Vqube3txEXwvOVSqUwWtwgZE17Zl6t\nVtOf//xn/fM///NaNeFcLqfPP/9c7XZb5+fnMd6Hh4fqdrtxWOjnn38ewdWwg5QCIPYHME2to8vL\ny2AYiT2BGfYwCcAdTAqynjk/ODiIU6YxNnZ2diLAelPb6qnEtCzl5/9L6wBhkzJKYxj4TpblneUa\n8H6gbLNYFwdE6XX5rVuR3j+UtYMUaf3QQwdnfo20ZkrWOG1yl6TNGZI0IJVN71aFWxlZY+h/O8DC\nopa0ViMhC0ikNF8KPpzt4l5pHMO2G8Iml8sFQ8IYYEGgJBG+7trweBEsLNaSp5i6tUN6LPPmcS8e\nJDscDlWr1bS7uxspiqw7ikQVCoVw95AGncvlAuSkwITfE3w3GAzU7XbDUj47O9OrV6/07t27YHBQ\nFh7Y6lajjxFKJ5fLheLGmnSFzm+Ojo70/ffffwD6+R/XTrfbjbNvCoXneidYhC5r3rx5o/v7ew0G\nA5VKJZVKJXU6HX366acaj8d6+/ZtxLQQi4DlS6Ay92csOWsHFgHQVq1WQ1HPZjP1ej198sknkYm1\nzYZsfHp6CmXMmKcup8ViETVf/ERaruPGBkGfXAOWke+5oZTLrbJTkEUO5mEGiH9h70haS0vf2dmJ\ntHKKs0mrUgmz2UzHx8fBOEgK5o+znUjxf3x8DPbSi6DBRsIgIJs4gwfXln8OaCI+DEAIW1KpVAJw\nYESUSqUoFog7i2DqV69eqd/vRyl91iVz51lKrpPG47EKhULIClytHk8DaCNry3UlZQf8qAr2Li4j\nYrGQJ1ltK+AkK+7DLe9UmWaxAKkyZBFmgQ7uKX2Ydst7/n8KnJxRcfbCrfn0d/7dtG+u+P2+WNz+\n/RQEpQwFr3k+rIV0fLwx9vh1vQ+MlbNbXrUwZU1S3yKbzClePnMLPQU3jqi9j1lAMp3HFJBtsx0d\nHWk8HmswGITCdqHg6w7Wg/GhUiYC9OnpKYSB09x7e3sRLzEcDiNldzQaqVKpRC0T9+mjCFG0lUpF\nj4+PkQo7Ho8D1FSrVdVqNZ2dnQWFjyXocyophBBWEC4ghGe73dZf//pXXVxc6Msvv9RisYgDz7D+\noMBZgzANWHEECRP/QcMq9do73333nY6OjiQpDv1zS53rN5tN3d3d6eTkRJeXl5GBBLVNBsTNzU24\nXCaTibrdbox3qVQKN950OtW3336r169fx5hTeK9QeM4MIYC02Wzq7OwshPZwOAxlDnja29vT73//\n+yi2BVjbVgOIeDwMY+4ucNxjACxcKq68WNvuKuHvarUa7gP2ByySZ/nhpvHD8nBTEKcBIwIIhj3x\nGh2sO+aYf6RzM8/OuuDS8ZgmSQH6fbxINfbzcCRFsC5MJowa+8zl2tPTcyXj6+trDYfDqNHDs0uK\nDC/W2/v37yOQ3c8kIr4M9olaJ9PpNOKoCJAfjUY6PT2NoorVajX0kdc9caa3VqutBcvyTMwba5tr\neJp22rYCTlKa3xVsVpDoJqXjIMVBhLQOaNx6BxQ4LeX+u9Rd4BZilgvBWQxX8B67wqbyw5Q84Dd1\nJ2UxNamLxccMheHtpdgT/I1c29kIZ48YQ4+udjDibhbQONd3PzrXg4KV1ovscQ2fM5/PFJBuYkg+\nBnDS6/XCioaNcJ8rQph4Dg/ckxSvsQTxgTM+BLrOZjPV63XV63VdXV1FiifpuMRJEMSHAPQKrtIq\nELVSqQSAQshDaxP0JyliTVLgWq/Xw7pE4RwcHKher6vRaGg0Gumvf/1rHCdPaiL7CuAD2wPgefv2\n7VrMCowSgcO+NnBd4c6iSBgKBcVVrVY1nU5DaOOeQaGR8nt/f683b95oNBrp6elJvV5PhUJB4/E4\nqPJOpxNj9Omnn0atDVwzuHdS9x3P0u12Va1WQ8AjE+/v73V9fR2gctvZOoeHh+Gy5EgF9rC0OoDR\nDSVpxQjjXkFJA3B2d3cjBkhSuPsA6ovFcw2fXq8XMUH5/HN1WeSpx+DBWHAGD/dmfPleuVxWr9cL\n1xsxFcQtwQgQFE0GjPcdNwgKHtnJ3BLU2u12Y33xTNSD2dvbU6lUisJl8/l8rToxcrXT6ejo6CjO\nH2ItMZ6j0UjHx8cBPur1ui4vL6O+j8e7nZ+fx2GhBOQ703V+fq52u629vT1dXV2pXq+vMVboacax\n0+lEgC/PhPwH3HmsD2Powc1ZbWs8OANGJ1HYTtdlWcbOBmQpb67t10FIMNn8HmXBAvB/ruydyXBw\nk7o6UpDiAMcngb+dqXF/XKqsX+qLK35Xzi8BEw7mInCLDIPFYhF0o48/izIN1iTYy5+dZ8ByT8EE\n8+3X8YBQd004NZ8FTHh2B3cfQ/OUSihwrHFfC6PRKPzADr6wcDgYz4NLAYrlclmDwUC3t7c6OzvT\nn/70p/ju7e1tlLQHbLhf/+npKep7IHQXi0WcRZLP59XpdMLXTroshZlarZZarZZ+85vf6Kuvvopq\nmaPRKGjx2Wymq6uroJwBDAAyZ/d4Pmjsg4MDNRqNteDfQqEQTIVboG4wTCYT/fjjj8rlnl0nw+Ew\nhPrFxYUkRQbT4eGhzs7O9P79e3377bcBAPr9vqRVTZL/+I//CN95rVYLmfX5558H68VaPjg40Bdf\nfKHf/e53uri40NnZWSjw2Wym9+/f6+bmRt9//30E4ZLRdX19HbVWcHdgFQMMt90I1iX+AorejRNn\nirCK2cueKeUxKVTVxdKeTCY6OjqKa/7444+6uLiI2kFY5s4w4jJjHj1biv56HCCFEXERSgogT5+l\nZ4aD+Ke7u7tIdYcRefv2rY6Pj6OEPXPHeOHKBATxOQBcUmTqeFq165G3b98ql8vFsRKMYaPR0HQ6\n1XQ61W9+8xstFgtVKpUANhcXF+p2uyFDOYOo3++vudGkZ7kLY+W6kdL2zDdsO2wSbEmn09HT01P8\n3gvmET/j7BGuXN/LadsKc8LgwhI4kyF9mJYqfRgMK61b/XyH67pVz2/9tVv6fi2aU/BZwa300/vN\ne/4cXhjOn8mZGpRGCsx4rrRvrqjpVxovs8kd4jEv6XP461Tx81tnmWBIfEN5vrz7MRFU6cZzMEJD\nWfvvU7eS9x8q8mMAJ/1+P9wSrVYrzt8gMBMhvVyul3KHlfJzeTz1bnd3V69fv9a7d+8ivbHRaCiX\ny4XCI5J+MBjo4OBA+/v7kVUBhYy7wdcY1iL+9cfHR7169SrcL4+PjwEKYDRms1lkTsAOMIdQ3vyT\npC+++EK9Xi/YNAQejAZKBGZjMBiEUnMWkNoh9N3X+unpqf7xH/9Rf/nLX0Lh5fN59fv9YCg+/fTT\nOHSRM1twnR0cHKhSqYTVPRqNwn1GX87Pz+OZ8MMfHByo1+uFUH779m3EC2EF39zcxLx9+eWX6nQ6\nYZWyfofDYViv1BSBweGe22wAWlfw7H9iJCStzbu0XpSSNePZHZ6BtVgsQtFjYR8cHOj6+nqtjIHH\nmQCaYFIA2/l8PpSfByJLK6UMI4ALSFply8xmM3U6ndiTXq2ZPsDUcQ8qJj88PB+0ScAswbLj8Vj3\n9/dxDZhIDAFYT3ednZ2dqd1uq9lsBqNUqVTimAqP/bm8vNTR0VGwIrBOvIdby41swC9Gw2g0iirR\nuNVcxzw9PQWI9/pInIPkxlej0Yj78Jywj8zXprYV5gRKlsnG7eJgwpVk6m7xQD4EEA/N4nVWw906\n0oq54PceMCd9eGZOmu3D7/htGoeSgiC/b8rKpIrd36Nxf/8/C3hgtbiPL6s5WwGC5fspG7XJtYSw\nTt1NMCxc1+fTx8/nweeGuAqeFWoQJUSgJtaGuxheWui/VGs0Gnr9+nX4wweDQXzGeCKgsaAAIxRc\ng+YGoBA09+c//zmCSbGAfvjhh0g1Zu5JYSRmBaq8UHiuQAvzUa1Wg+2YsFi22wAADWlJREFUTqca\njUbqdru6v7/X+/fv1ev14gyYq6srdbtd9fv9oLMlBQWOi4U5gCXis36/r6en5zN8EIaTySQoYGqb\n0DzgD5eKp0f63mdtjUYj/eu//qsKhUIAEtYLFvl//dd/RezE8fFxKH+Yi1/96leRXYCV7qmo7XZb\n3377rTqdjhaLhcbjsa6ursKydBcQMT29Xk9fffVVgM93796p0+moUHg+4RiL9/j4WL/97W/1pz/9\nSa1WS+fn58EGVCqVX2T9vtS80qikqGPjChUlCShg78IKAGSJayBWCtnBKdsYraenp+p2u3GWEi49\ngi6x7mHscGfSl2q1GvLm4eEhWGHqhgDUAdcwPtwP0AMrub+/r5ubmxiTu7u7AAnIfACcMzruOiHm\nA7fi3t6e3r9/v1b7xWXew8ODLi4uAtTQb+5RrVZDhpAJyDEKBIkTmFooFIIxRXeRNbS3t6fhcBhh\nCG5ISCsjHYaT6rLz+TyK8h0eHmo2m0XW0mAwiGBz9AxxJx9lhVi3zKVVFTnpw+DUTZS9MyQpFeaM\nQvrbl6zrrDgNp23dFZMKRmdiUus+VfDeP6xjty5YmMTl8Gz8QwGkFfZApi81V+ZZAMb77KDQWR7G\nOWVbUsBIywJLPm9cw/uV3tvfp39Z7227Ufthd3c3UoclBQPBZ71eT5LCYsrlclG6HgFaLpcjFTeX\ny+n169fa39/XZDJRr9fTfD7X6elpVOiEyWKs/QTS09PTAAfEddzf36vZbIbVCM0Ne+FxApSX393d\njb+hwWFTiAOAHeJZvXR9pVKJyqCk0GJBISjJoEBIk/rrtD1z7UC92Wzqm2++Ub1ej4yG6+vrKKB1\neHioi4uLqJJLHIefZfMv//Iv+uMf/6hqtao//vGPury81O3trY6Pj6NIG4qFsZzP5xFrQ0zE7e2t\nptOp+v2+Wq2W3r17F0phMBjoV7/6lRaLhU5PTzWfz9VsNiPgcTabxanVpHW+lNXwSzRfCyhGGDkP\nZoeFICbCrWZiDPxMG2JYUK65XC6YM9Z6tVoNQCEpXIAEzRLbRRCzr5t+v69KpRKsBHFHFEr0gwM9\nxdtdant7e3GIXj7/XFARUFutVgNk4eKCrUQu+fpZLJ6L+1G8j/TltL4Q7Goul9P79+/12WefRVow\nz0BtFFL1cZUtl88l+bne3t6e3r59q4uLC7VaLV1eXkbRNggCmD5pVWPGSQJJwfDiqvHxA7jc3Nyo\nXC6HO5tn8Zon6C3A5Ka2FXDi6ZNMgFvgmwJbXaG628PjFVIAkbZNjEIamOMLRVov+OYUV+qC2hTg\n432C+oMuywJSLAQWT/rsLAgUkt8ndev46zQQlbHPcofR3K3Ftdi86fy5cnTwkZV1lILB1DUF4Evj\nbAAsCIO03P42G1aUpFCKT09PoUQRsKenp8ECuD8WxqFWq+nm5kaVSkWlUilcEWSfIHyI2O92u0Hx\nYtl4ZkSn09Hr168j/gWLsl6vR+VTBLRXpnSL99NPP9XXX38dwj6fX52PQiT/eDyO4nD7+/uhyNk7\nxNFMJhN9+umnEYfgRdY8HsOVEJYj5bax0HnObrerP/3pT+Fa29nZ0fn5eVTwvL+/V6fTUaVSWStY\n12g01O/3Va/X9dVXX8UcYOFyCrFnrXlmBcIXt9rt7a3q9bomk4larVYoDdjixWKhXq8XVvWrV6/i\nmVAsxMWwZrrd7tbWtLQ68wVGgrmRFFa2pKgxgkzCzSIp3BiAbsAL9UBcxrIfxuNxVEmGxaAWEwpU\nUoBC9hQn5Z6cnES2WS73nCZLphouV0AzbjT6y29wb1IBuNls6ttvv430+sfHR71//z4y9WC8GCti\ncAB0gKSHhwcdHx+H0k5dH7Crr1+/1mg00sHBQZSdpz4ILKYXg0MWIxOurq6igCCxIP1+X9VqVf1+\nX+Vyee0cH68l49eFQYRZcQAHI8O65VTp4+Pj6AcpzsTLHR0d6fr6euOa2wo4cWbALWRJHwCTLPbB\nFbLTjJ5RwwZJGQAHAmm8iLtiUjeBMwNpbItfA2Xgv3Ol72NAcJQzJ/6cAJAUfEirA57obxpM68+N\nEseqRXk6IOTaXkCM/noKn7uN/He8lz5nyibx/fR5s+aZ7/m8YKGgHF0QfgzghAP4cK84W4JVKK3O\nrECY4uJxVowsGQIEPf0WAUVWEAF6i8UiWA9Ja/e/ublRqVRaYzzI5EHxelYPcRdv374NVxCR/Chn\nr34qPR98+Ic//EGFQkHff/+97u7u1G63w8J6fHzU8fFxpNyizKi4ijXtacOdTidSJXHpQcGfnJyE\nP75YLOry8jLie7gmbq5erxeWLe6mYrEYfcFaB0y12+1gL/Grt9ttffLJJ/EsuVxOx8fHkp73xr//\n+7/r/Px8jRFjHlAkJycnAXZms5n+z//5P9rZ2VG/31cu91wS/Xe/+52Gw6G+++47lUol/fa3v/1l\nFvCGhiyBoveyALhy2ZMEyuKuIw3XwbKf3IzCZf0SQ/H4+BhxPShygCfxSpzwS00RScGAPDw8BPAh\nrklaxUIAvKlSzN7EpdTv99VsNiN+YzKZRDGxcrkc1xyPx2q1WgHW5/PnQnC3t7dhfIzHYz09PUUK\nOewDWTycrQTr9vDwoM8++0zfffedarVaGBMpkEHmjkajuAbjQNwIGTXHx8fBXj0+PqrdbqvVaqnd\nbqter6vX6wUAy+efT+qm0ByB7GQuSc8kA/Ep6DBcRLjGOp1OABp0C0zv9fW1Wq3WxjW3tYBYaaUU\nWQy8RkE58EibB2O6i4j//Tog9JRtcSYCpYqC5n3/nMWIsqehnL1YlIMgSWufp0yI99vZF5Svu3/8\nu84s0CevnAiwQUg6COS5UpDg93A3C699Q3g/nd1wYOMti0lJmTKfo/Q56QuWhYM67/s2mxcNKxSe\nT/2EhaCeAIGqOzs7QQ/DMmDJSSsQR0YN1gen1+Jnr1QqYT2NRiNJq7gurBssc9wXgChX3jAmrNX7\n+3udnZ3p/Pw8zn85ODhQq9WKYmOklxLV//j4+H/bu5ue1LUoDMDvFTgqWFMkCEIwAXVi/P9Dh/4I\nYqiJaEqjhWoVP1J7BifvctN7z/S6B+8zMTEqUNu9115rf+Dq6srKFWVZYjQabax+mM1m1kkxc1KW\npS2p5cTEVqtlO4hGUWSTFFlX5zVI09TmCARBgDzP7fnmCJAd1tfX10aa/uTkBNPpFLVazZZis2yR\n5zm63S6GwyHiOLZM0/HxMZIkAQDEcYzVaoU8z1Gv13FxcWGBCBtvNso8ZPHm5gZ5nuPw8BDNZhOT\nyQQAMB6PrTFfLpdI0xSDwQDb29v/2uPl/8bMFu9rlm3YTjCY5QRPPgPsiHivMhhlW8XSAveX4fPM\nElG9XsfDwwPOzs4se+Q+829vb9ah8vX5/2WwzeDFfYbSNMXe3h7CMMR8Psf+/j6SJLFdVtmOMRAr\nisLue95HRVHYEngeHMh7lJu1MUhmWxzHMXZ2duz1mVXjnBVmjhgcu2VeTrTn5+Z9zdVILy8vNieH\ng471em2bnTEw6HQ6GweSdrtdCzr4laWp5XKJwWBgc6QYJHFwxLYoTVPMZjOcn5/j+fnZ2nVm/5gt\nzfPcdqvlBnJ/86OnSXHU687cdoMNtyNzsyBu4OGO8quBDP++W4JxV5fw9/l9t9ThcrMQ7ByrnTw7\nSzewYWdZ7XyrWYIqN+gCNiebVssg7nt0rx8/t3sNq+UaVzWoqAZs7s9Ur4v7XqrZE14XBojV12dq\n1v271WyTG6Qx6OJruV99CE6YIfj4+LDDvjhSAb63Pg/D0HYK5ZJDjhIB2Kiv2Wyi0+nYrq08iTQI\nAmRZhjAMbT4IJ1YyK8Dab1EU9j5YvgG+Z9qz7MFOhUE4/2+slXMZJScsukuD2aE3Gg30+31kWWaT\ndFmmKYoCcRyj3W7bKO3u7s5q54vFAo3Gn8PMtra2EEURgO8SJ1dAMbOSZRmSJMFyucTR0RHKskQc\nxzZ/jW0FU+gsD7y/v2OxWNiW5aPRCFEU4devPycEJ0mCwWCAy8tLALBSQL1eR7vdxnQ6xWq1Qq/X\nw+npqa3E6ff7uL6+xnA4tH0mmPpvtVq4vb3FZDJBu90GANuwi/uZcE5LrVazn+H15qFvP4VbnvOz\ncGUR2wTOdeA2/rwX+Bn5DLME4GYBWBbic+IGLNyXZz6f22ohzm1ie8jyEjtkZlYY1PF7LK8wU8Hs\nDOdBuFkaljFZVuPnYhmZmbMgCGxSLU8qZtnIDeA4x6LX61l5lgNR7jDLVUYciLJcyu9z1Q8DRban\nHARwPhcACwhZvmIWpt/v2yq0x8dHHBwc4P7+Hru7u7ZUmXufrNdrdDodO/n46enJBjd85jnp+PPz\nE+Px2PaKeX193TgLrNFoWNvIoNTdb+m//PO3DlJERETkJ/z8YSQiIiIiDgUnIiIi4hUFJyIiIuIV\nBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUnIiIi4hUF\nJyIiIuIVBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUn\nIiIi4pXfPRZNtgyLF3IAAAAASUVORK5CYII=\n", - "text": [ - "" - ] - } - ], - "prompt_number": 5 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With net surgery, parameters can be transplanted across nets, regularized by custom per-parameter operations, and transformed according to your schemes." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Casting a Classifier into a Fully Convolutional Network\n", - "\n", - "Let's take the standard Caffe Reference ImageNet model \"CaffeNet\" and transform it into a fully convolutional net for efficient, dense inference on large inputs. This model generates a classification map that covers a given input size instead of a single classification. In particular a 8 $\\times$ 8 classification map on a 451 $\\times$ 451 input gives 64x the output in only 3x the time. The computation exploits a natural efficiency of convolutional network (convnet) structure by amortizing the computation of overlapping receptive fields.\n", - "\n", - "To do so we translate the `InnerProduct` matrix multiplication layers of CaffeNet into `Convolutional` layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 \\times 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding." + "Roll up your sleeves for net surgery with pycaffe!" ] }, { @@ -243,17 +37,101 @@ "output_type": "stream", "stream": "stdout", "text": [ - "diff: imagenet/bvlc_caffenet_full_conv.prototxt: No such file or directory\r\n" + "1,2c1\r\n", + "< # This file is for the net_surgery.ipynb example notebook.\r\n", + "< name: \"CaffeNetConv\"\r\n", + "---\r\n", + "> name: \"CaffeNet\"\r\n", + "4c3\r\n", + "< input_dim: 1\r\n", + "---\r\n", + "> input_dim: 10\r\n", + "6,7c5,6\r\n", + "< input_dim: 451\r\n", + "< input_dim: 451\r\n", + "---\r\n", + "> input_dim: 227\r\n", + "> input_dim: 227\r\n", + "152,153c151,152\r\n", + "< name: \"fc6-conv\"\r\n", + "< type: CONVOLUTION\r\n", + "---\r\n", + "> name: \"fc6\"\r\n", + "> type: INNER_PRODUCT\r\n", + "155,156c154,155\r\n", + "< top: \"fc6-conv\"\r\n", + "< convolution_param {\r\n", + "---\r\n", + "> top: \"fc6\"\r\n", + "> inner_product_param {\r\n", + "158d156\r\n", + "< kernel_size: 6\r\n", + "164,165c162,163\r\n", + "< bottom: \"fc6-conv\"\r\n", + "< top: \"fc6-conv\"\r\n", + "---\r\n", + "> bottom: \"fc6\"\r\n", + "> top: \"fc6\"\r\n", + "170,171c168,169\r\n", + "< bottom: \"fc6-conv\"\r\n", + "< top: \"fc6-conv\"\r\n", + "---\r\n", + "> bottom: \"fc6\"\r\n", + "> top: \"fc6\"\r\n", + "177,181c175,179\r\n", + "< name: \"fc7-conv\"\r\n", + "< type: CONVOLUTION\r\n", + "< bottom: \"fc6-conv\"\r\n", + "< top: \"fc7-conv\"\r\n", + "< convolution_param {\r\n", + "---\r\n", + "> name: \"fc7\"\r\n", + "> type: INNER_PRODUCT\r\n", + "> bottom: \"fc6\"\r\n", + "> top: \"fc7\"\r\n", + "> inner_product_param {\r\n", + "183d180\r\n", + "< kernel_size: 1\r\n", + "189,190c186,187\r\n", + "< bottom: \"fc7-conv\"\r\n", + "< top: \"fc7-conv\"\r\n", + "---\r\n", + "> bottom: \"fc7\"\r\n", + "> top: \"fc7\"\r\n", + "195,196c192,193\r\n", + "< bottom: \"fc7-conv\"\r\n", + "< top: \"fc7-conv\"\r\n", + "---\r\n", + "> bottom: \"fc7\"\r\n", + "> top: \"fc7\"\r\n", + "202,206c199,203\r\n", + "< name: \"fc8-conv\"\r\n", + "< type: CONVOLUTION\r\n", + "< bottom: \"fc7-conv\"\r\n", + "< top: \"fc8-conv\"\r\n", + "< convolution_param {\r\n", + "---\r\n", + "> name: \"fc8\"\r\n", + "> type: INNER_PRODUCT\r\n", + "> bottom: \"fc7\"\r\n", + "> top: \"fc8\"\r\n", + "> inner_product_param {\r\n", + "208d204\r\n", + "< kernel_size: 1\r\n", + "214c210\r\n", + "< bottom: \"fc8-conv\"\r\n", + "---\r\n", + "> bottom: \"fc8\"\r\n" ] } ], - "prompt_number": 6 + "prompt_number": 1 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The only differences needed in the architecture are to change the fully connected classifier inner product layers into convolutional layers with the right filter size -- 6 x 6, since the reference model classifiers take the 36 elements of `pool5` as input -- and stride 1 for dense classification. Note that the layers are renamed so that Caffe does not try to blindly load the old parameters when it maps layer names to the pretrained model." + "The only differences needed in the architecture are to change the fully-connected classifier inner product layers into convolutional layers with the right filter size -- 6 x 6, since the reference model classifiers take the 36 elements of `pool5` as input -- and stride 1 for dense classification. Note that the layers are renamed so that Caffe does not try to blindly load the old parameters when it maps layer names to the pretrained model." ] }, { @@ -267,7 +145,7 @@ "\n", "import caffe\n", "\n", - "# Load the original network and extract the fully connected layers' parameters.\n", + "# Load the original network and extract the fully-connected layers' parameters.\n", "net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt', \n", " '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', \n", " caffe.TEST)\n", @@ -285,27 +163,27 @@ "output_type": "stream", "stream": "stdout", "text": [ - "fc6 weights are (4096, 9216) dimensional and biases are (4096,) dimensional\n", - "fc7 weights are (4096, 4096) dimensional and biases are (4096,) dimensional\n", - "fc8 weights are (1000, 4096) dimensional and biases are (1000,) dimensional\n" + "fc6 weights are (1, 1, 4096, 9216) dimensional and biases are (1, 1, 1, 4096) dimensional\n", + "fc7 weights are (1, 1, 4096, 4096) dimensional and biases are (1, 1, 1, 4096) dimensional\n", + "fc8 weights are (1, 1, 1000, 4096) dimensional and biases are (1, 1, 1, 1000) dimensional\n" ] } ], - "prompt_number": 7 + "prompt_number": 2 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Consider the shapes of the inner product parameters. The weight dimensions are the output and input sizes while the bias dimension is the output size." + "Consider the shapes of the inner product parameters. For weights and biases the zeroth and first dimensions are both 1. The second and third weight dimensions are the output and input sizes while the last bias dimension is the output size." ] }, { "cell_type": "code", "collapsed": false, "input": [ - "# Load the fully convolutional network to transplant the parameters.\n", - "net_full_conv = caffe.Net('net_surgery/bvlc_caffenet_full_conv.prototxt', \n", + "# Load the fully-convolutional network to transplant the parameters.\n", + "net_full_conv = caffe.Net('imagenet/bvlc_caffenet_full_conv.prototxt', \n", " '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n", " caffe.TEST)\n", "params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']\n", @@ -322,23 +200,21 @@ "output_type": "stream", "stream": "stdout", "text": [ - "fc6-conv weights are (4096, 256, 6, 6) dimensional and biases are (4096,) dimensional\n", - "fc7-conv weights are (4096, 4096, 1, 1) dimensional and biases are (4096,) dimensional\n", - "fc8-conv weights are (1000, 4096, 1, 1) dimensional and biases are (1000,) dimensional\n" + "fc6-conv weights are (4096, 256, 6, 6) dimensional and biases are (1, 1, 1, 4096) dimensional\n", + "fc7-conv weights are (4096, 4096, 1, 1) dimensional and biases are (1, 1, 1, 4096) dimensional\n", + "fc8-conv weights are (1000, 4096, 1, 1) dimensional and biases are (1, 1, 1, 1000) dimensional\n" ] } ], - "prompt_number": 8 + "prompt_number": 3 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The convolution weights are arranged in output $\\times$ input $\\times$ height $\\times$ width dimensions. To map the inner product weights to convolution filters, we could roll the flat inner product vectors into channel $\\times$ height $\\times$ width filter matrices, but actually these are identical in memory (as row major arrays) so we can assign them directly.\n", - "\n", - "The biases are identical to those of the inner product.\n", + "The convolution weights are arranged in output $\\times$ input $\\times$ height $\\times$ width dimensions. To map the inner product weights to convolution filters, we need to roll the flat inner product vectors into channel $\\times$ height $\\times$ width filter matrices.\n", "\n", - "Let's transplant!" + "The biases are identical to those of the inner product -- let's transplant these first since no reshaping is needed." ] }, { @@ -346,13 +222,33 @@ "collapsed": false, "input": [ "for pr, pr_conv in zip(params, params_full_conv):\n", - " conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays\n", " conv_params[pr_conv][1][...] = fc_params[pr][1]" ], "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 9 + "prompt_number": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The output channels have the leading dimension of both the inner product and convolution weights, so the parameters are translated by reshaping the flat input dimensional parameter vector from the inner product into the channel $\\times$ height $\\times$ width filter shape." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "for pr, pr_conv in zip(params, params_full_conv):\n", + " out, in_, h, w = conv_params[pr_conv][0].shape\n", + " W = fc_params[pr][0].reshape((out, in_, h, w))\n", + " conv_params[pr_conv][0][...] = W" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 5 }, { "cell_type": "markdown", @@ -365,18 +261,18 @@ "cell_type": "code", "collapsed": false, "input": [ - "net_full_conv.save('net_surgery/bvlc_caffenet_full_conv.caffemodel')" + "net_full_conv.save('imagenet/bvlc_caffenet_full_conv.caffemodel')" ], "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 10 + "prompt_number": 6 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To conclude, let's make a classification map from the example cat image and visualize the confidence of \"tiger cat\" as a probability heatmap. This gives an 8-by-8 prediction on overlapping regions of the 451 $\\times$ 451 input." + "To conclude, let's make a classification map from the example cat image and visualize the confidence as a probability heatmap. This gives an 8-by-8 prediction on overlapping regions of the 451 $\\times$ 451 input." ] }, { @@ -401,7 +297,7 @@ "plt.subplot(1, 2, 1)\n", "plt.imshow(transformer.deprocess('data', net_full_conv.blobs['data'].data[0]))\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(out['prob'][0,281])" + "plt.imshow(out['prob'][0].max(axis=0))" ], "language": "python", "metadata": {}, @@ -411,33 +307,33 @@ "stream": "stdout", "text": [ "[[282 282 281 281 281 281 277 282]\n", - " [281 283 283 281 281 281 281 282]\n", - " [283 283 283 283 283 283 287 282]\n", + " [281 283 281 281 281 281 281 282]\n", + " [283 283 283 283 283 283 281 282]\n", " [283 283 283 281 283 283 283 259]\n", " [283 283 283 283 283 283 283 259]\n", " [283 283 283 283 283 283 259 259]\n", " [283 283 283 283 259 259 259 277]\n", - " [335 335 283 259 263 263 263 277]]\n" + " [335 335 283 283 263 263 263 277]]\n" ] }, { "metadata": {}, "output_type": "pyout", - "prompt_number": 11, + "prompt_number": 7, "text": [ - "" + "" ] }, { "metadata": {}, "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAAXMAAAC5CAYAAADavt/0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWnMbWl23/Vbz7T3PtM73aHGrqrurrbdbbttuulYVhqT\ngcgycQwIGVlYFiCQhSCRAUu2IyHExxAhWSIKcsRoRSJBfLAi5AgMIU4CiWxIYqc73XZPRVXXcOu+\n953OtPczLT48b7dN1O6qxHVz7e7z+/See/Z99nnPfe561l7Df4mqcuDAgQMHfn9jnvQHOHDgwIED\nv3sOxvzAgQMHvgE4GPMDBw4c+AbgYMwPHDhw4BuAgzE/cODAgW8ADsb8wIEDB74BeCzGXES+X0Q+\nKyKfE5Gfehz3OHDgwIEDv4W813XmImKB3wD+KPA68KvAj6jqZ97TGx04cODAga/yODzzTwCfV9VX\nVDUBfwn4ocdwnwMHDhw4cMvjMObPAq/9ttdfvv2zAwcOHDjwmHgcxvygD3DgwIED/5Rxj2HN14Hn\nf9vr52ne+VcRkYPBP/BYUVV5Evc97O0Dj5vfaW8/DmP+fwMvi8iLwBvAvwb8yD960dPf889gMO3Z\nwCjGG7QX1IM4gxGhqiJiEFMINdMZIe935H3ECRgpqCkoiqpiUgUUjGCtRUSotVJrJidlt1NSugEs\nxhi2byaWzw7YXlgOM+YLz2J2zKyb4Z1jHEfOLy94eH4BCMMM7GBx3qA2Y5zFONCijGOmZAUpCFBq\npVah1szqeGC2MtRJmR5Zbq4i41bJSTHGMF2NPPOtc3zvEWvJ2aLVUKtBFFQV1YK9/X1s8IgD64Qa\nKkpGfaWkSiqFGoWyVXStCLC6f8S954/50Hc/z73nnmV5Z4Hzls1mw9tvvcErX3yDh2894pW//oDh\n2TnbhxNmctSaMQiOijWK85XQO5wFJ+0zxpTJxdAd9/izwN17c/ysYK0jJxi3mcu3R3YXyngZmTax\nrest4iriwBnLyQt3+MC3PM9v/p3P8x3/wrcwTYnz1895+PoFXgQ1ghqotQKGUguhh5mtuOAIXQUs\nIPy1v/Cbj2Fbv3t++Id/+Ou+/6lPfYpv//Zvf8d17t69+47X/Mqv/Aqf+MQn3vG6H/qhd05b/fzP\n/zw/9mM/9o7XffCDH3zHa372Z3+Wn/iJn3jH6y4vL9/xmp/7uZ/jx3/8x9/xOoBf+IVf+Lrv//Iv\n/zLf933f967W+qVf+qWv+/5rr73G888//3Wv+Qpf+tKXvu77m82GxWLxjus8ePDgd3zvPTfmqppF\n5N8H/hfa/67/+mtVsli5jfCIIKIoFVUB45rxEgGpGGPbBzWCFWGxGtA+cLO+xqpSKxgxxJpbfEcV\nLQKqON+MOlSMqfTDiNoeTQVnLGIi3nswlZgifoI9O5wK2VpSSogIq6M5OSeWqx58QZwSKVQBtUqt\nGRcsWjIUoYpB1YKOrFaeo/tbqnSIeqaq7DaVkgzWGayR26/BtN8FEHGUmhEsIgDtUCsoYdGjoiCK\nmgLGYL1CqLi5IkmpYyEboVgLO8fFo0v8mSNZwQwGO/dIFYyxqAW/Uvyo2F45fb6naGH3+oQ3gnMG\nq5ngPc4YnBSMCtYKuRScM4g35FQwpVBToVbBSDtgfTB0vSW6hHUVqF/dA2IF7yzzbmC1OKLrB4IP\nHB0ds9tu2Q0di9WcqoVaK6qZUoVCRXMmRcu2E7pswLcDepi/1zv6wIHfHzwOzxxV/avAX/161xhv\n0aqogGIRCoJQVBERVBUrjpQzM2dxttI7Q60RkcIw88QxIhVyyph2XxADBapCpmCMQcXRzZSjowDG\nsL0ZyVHZPkq4AM1/hf2UyGWHsY7ZfMD2hpNhgbVHiBiyTkSNxLpDCqhJZAtiLMQC1qElgSjOVUzX\n0d2PDKce3SnXo7BPE0LFOoOxig2CiJC0YtVg0fawYhxFBWpBMAiF0DmcETKgCrV4XNnBzFLMhErF\nDoI6wRmD6SCZyiCeo9MFpWbiOLE53xBLZXP5iPXVmpyu6GYV1yvhbmERlY7A9jziLVh1WGswGtFi\nECeots0zJaWaQph5LIZJMr3pySkDQh4rVjzGZ5SKmAzVoFWRJJjOYhfhq09n1js67yhDR7+cEbY7\n8tgO+lIMIhWyImLbvimCCWCtoeulHc4HDnwT8liM+btCoBq5zcDehhlFUASp7bE6N4tFTIXBCkJB\ngVoLaYrUknDWEmYdu92eqgpqUKNUMpotnRe6vtLNPMuzGasjTy7Ko4eRMgmut8SoWBVUKzllilZE\nDH0fmM07jDFYa5ninov9I0wVSAYlItZhXKVWgzUV7S0qMD+Z03fK++4McASP5JLVdsXDPAGCCPhg\nsFaZn4XmgVdBjKVYEOMgtsOIWvHBIEZQzYhx1KqIFDRZyiYjnUE8MGS8GIorlB3Y5OkXHgZhSpWr\nyw16fsPF+Zr99hz8SLEJFzJ3PtIR5tDdUbwVclbMFqQCVERamEPEkWLBqqfWQqmC2J7l2YzF3GOs\nIghxqoxjZLPJpCkiGIxxVNHbEFih7JWyKoTeYZ3wvo88hwSPLwXvDLNZz75mam0HbvPQlZQStRR8\naJ9TzMBsPsf7J7el3y337t17z9Z69tn3rlDsox/96Hu21vd8z/e8Z2t97GMfe8/WeuGFF96ztVar\n1Xu2Vgjhd73GE9v5VhyqGWMMSRWVSikZqR1GDDVXrAqCRWtkjyEMilQl50wpFS0VrMEFx+n8lKuL\nG1KKgFDVgShjivQnM/yicrSA49MeGxxh2OGs5fpiIt/sMcYg6qilkFLz0AsDIh3GCVUnqikYa6gx\nMdWJSW4IdU5vA84pyVvUKt3Ms5jB0dnAP/ttH+F8+xZ3ujt86vwKTR3iMkEc1uptvFdQgWocWVyL\n96sgXtCoCAqYduY5jxGYxh0yQSqKDmC6FmZxwaALcNViUYoU+sUMYxLjuOHBw8T64Y7t248oLtLP\nCgwb8gxOP9SRdc+wEqKFcGmQWDFVKFOkytDCYbVCgTFWijp0Jkjn6JxnOczBF/b7PdhE11kmImNp\nYZWcKrXqba6k7YV9zhgrIMLT33KXlCcymbHuESKqGbRAVsiZFEdyrNTJQhb6IeC9IziPd/ax7lsR\n+X7gZ2khxP9KVf/MP+4aB2P+j8fHP/7x92ytF1988T1b6+jo6D1b6/e1MVdVrLGU2sIrBsGIJedC\nNi3UQKmoFKRC2k5siyA1oTmSU8JQ29+UwtB3DM+c8uh6y2azQycBqxh1bDd7Fkcr7GDpF5750ZL5\n6oT9/g3GGJmvjsmpUEplGiP7fcLulSxK6IQQAiKVsYys95fs40RmxPeCtS1ujRpMZ6imYoJhNu/5\nnhe/lSN/n3/3j/8kf/vv/R+sv/Q/8WgO67qjWnDOIaI4X8g1YJylH3rEQsnajCgVQdBSMM5DqaRc\nsDmAVNQKOSayGjCKhoLrCmIVMxN832F6pdrKzc3I9uaKR6+vsVHp+ogLFqrBaWXoF5g+sWZL0Up3\nBuMuY64CBYuUiHeOXKTZ1ipkKRytThhCTymFKUaCNRhjWvzfecLSMZsiV+sbKpWqisuGTMXOPNYL\nu7RnO22RfcZ7Ty4JzbcJcBFyqeRcQAw+BFQTtQpKwrkZw9BjTUuAPi5uu5v/HL+tu1lE/sqhu/nA\n7wWemDGvNA8aFDGGioGiWNviqaRMAUQrVSoinu31FsqeNG6ASt8ZhsHhXQ9iCH3HU13P9dAxjiNX\nm4laM2lSri5HnnrfDNcJR7NTzHJgfN5g5SHee6YpsdmM1EfCuLlknBzGCo92kYX1FAO7ccc+ZvZp\nfxvr7rHWY51BxFGL0puAx7GczdnulT/+iR/k+guv873f8oeQ7tvYvvqn+Zz1rKcrTAsqgfUYFXxv\nmS0ctbZvJ08FRKAAtwa+ilIqqFEwhhozUgyahWwyMhdAcN5iqYgWnAuMaQe7yvlbG/YPY4uBG6WU\nDFV5+vh93Dk9wc48b9RXGMcrfDAkDFEyWg1ilUrCucA0ZWoRUipcXWyYnwzU6igUjPGoEYrpqBpx\nRtFQMcHhbQCrFAOzIdAvAqG3LRGuEKfYnrxyJpYJlYL1jhQzIhXNFckgYvFBSDninGWxmDPvbfP6\nHx9f7W4GEJGvdDcfjPmBJ84TM+ZSFRVt//G1NrMmUGMBUUouOBXEpmaUbg1YTQI44n6iTpk+FOZz\nxTlLk5nJdL1HELwTdrvEOI5sb/acvzVy5+4xetoRwqw97npHjLHFYM8rm+sNKSW2+4ldvsbkxEIE\ncY4coRiwPuCD4vr2TGFsRZTbckhPqpljf8SJnvDrf+tvc3P+AHHCR77z4/zMT/7n/Cd/9mf43AWI\nte3pxLYY8mI1ox8CKWa0Vvaqt0ldoaZMLrQKH+dQa6i1VQGBoeSC5hZeqVVRlRbr9xYjmYphTJG4\nGTHFUHImRc+0zZzcPeXe0XM8+/Tz2M4iainxdd68fpNynJG1QWzFiLA8mmOXjgG4fG3PtANjDSlN\nLIMHDCllKhWt5TYkBhaDteCXLfzVGUNYuHYoBsF6ZUwbyujw3lOrUmrLYeQYAUvXOSKZnEdUlJKF\n2bBkPp8xny+YBUeO+XFu26/V3fwHHucNDxx4tzy5bFFJqLW0gjvBqFC0Ai0RaSg4cVhjMOa2TDE7\nsq9EIi5ZJCZ2u5FlmjXj7xwI7XDQRNWKdY7QdaSUub6cGHeGFJUuwHy5YJ4n2BZC77m63IC0KpiS\nJorCMBN8V2+NmSPngNaKOI/3ihiBWlvBnSglR0LXk/Ke7376O3n0a/8Q38+ZdwOf/X9+lR/86T9H\nSn+SMBvAteIbKaAWul7AZQLCGAUwGK1ULRQFi6BiKa2UvoV2xJPqRDUeam6lgcW0Wm7rEIFcEik7\n0l7R6Kk6UqtBkyEnxVXD7OiI+WKF8cJyfsrRfMPN8ZZ6ueZmscPtPFaEcGSZny1IVTFhRn17i6On\nkFEVxrwjiuCdx1iLxbVSSpcwTvChPYmIE1xooSkR15KhGTCFWpVaC8E5sJmorX+gZEOtFmPAZ08m\nYbww6wJWDcEHRB5rr9C7cvs/9alPffXne/fuvacx8gPfXMQYiTG+q2ufnDGvGcRSpMW9KxFTDUkz\nzkJwCjoSPFgPBovthJwNLhrGKoxF2F5FLswNUi39ImOtxVRFSqHGQsotPCEus9vtePRwzbPPVGqs\nLBdHpGMl5RHjA+Jyq2HWyt17K6Tbc3wi2LlSipDUNi/TWWzvGfoOY1vZ4LTPVBTjPFITi7JCrze4\nrmPoO0BZnt7jH/7FP8Nf+rm/xvf/5B+iGKGKEqeKN4YuCMYaCo6cb24972a5BUvRSlVBChTbnmZU\nAGORotSk6OjJU6F0CefbwSYVSorN221dSIgYUs7ECfpwxJ2zezz33HNsxjX78YybmxuWyy1H7zvi\njf3bXHxhTT/v6JceNxhWqyNWdyKLM8fmagTjSbrHpEoGYpxweJRCqe1gNcHgnGAcOO9wwbdyRdeS\nP1VbYrVVK02UMaGlYD04K+ziCDjsVyp7KvSD5+zsmNVqhnO21dw/Pt6xuxl4Vw1BBw68G0II/7/k\n6Ha7/R2vfXIx85RBLRXbaocVqihOlOAqnVeMhy4IzjZPVFSheqaxIyAYI0zrxPpmonLFfBNah2Jn\nyLmSs1JiJmvGWoeYwlsPL3nmrUvs0wus3eI6h+06ttsbprhDidy/v8LayvJsRlgWJt3jgydmENdq\n2YMPGGvpuo7gAr3fMu0mjKm4rBzNe24entP5GUYqRiymZG7qwPWX/y6f/MC38n+dfwESJN3R9wHj\nKgZHLIUSa+tsNRZRqKqE3vPMM0/x1oMH7HZbEItoRqQlP41aarG4CSgeVaXeVv20sFZlfuTJ1jHe\nRNKYGWqHdZ75YoUag/vq7zVwtFhRpHLn2US9UnS/RzvHsOjpVoG5n2GGgJt35F2iACZknLXkDLUU\n6BRvelwQfMiUVMHS6v+rRcSgKNMmoqKEmUOAuB/JcUJSpuTW/Wo9FNOcADGVzlnOzo5YrVYMQw/W\ntPUfH++qu/nAgSfBk6tmqRVyppaKWIsaizEF68BowQUIoTIbHM4ZQLHib2PT7tbDM0QXWd8kNtcT\nNSvx0R7rhdC12rdaMniLDQklsr2GT3/uM5ycnPAwXbE4XqApsltvKRl8B8enCzqv+Hkmm20LWyCI\nMRipVIFMoqPHuYCIYdZ3zIZmgJfO8cE7z5FfiQzHAzWNmOBQEeI0svn0b/Azf/K/5F/+T/8wMXhc\nMayGHmOEmoUYEzlWqCDavFBjhHtP3eXO3WOeun/GF175Eg/evrh97hf0tktUkpJGMLsO6xJqCyIR\nwdLPA3fvnRL3HW9/+Zy3395gZnB2dgcqpNIe6a5vLsh5x2oxZ6uR+0/fI75duX5zT14X7P3AbD6A\ns9xddFxd7lhfbyklotVhRNBSsUDv5y1x6QLHqwDO4kQopXJ9ecN6MyKikJRxP5Gl4BDiLmOKorWg\nWimSwRuMozVdUZkNMxbHgTBz+L4DI5S4f3x79l12Nx848CR4YsbclKZfYu1XGoUUNYAWnC94I/gA\n3hWsLxjjMCimGuzCs3WAqzgbqEwM8xOCc+zGxMO3r4lrwFSMEexQEPFY40ET2/2eXVrjcOzHDbv1\nFZvdGiOW2SIwzD3DUDBB2UZBEGpsIQ/jhJoqacpElwhTjwkZ2xmGWYcw8ezpKXfK81T/BqKCsZ5h\neUTcXGEqbEXJn/s1nrfHfCFu6X2rU7dGSEUZp4zxYKppnrVahlXgzumKWedZLgbG+BTnDy8p6jCS\n2ncnSlWPTBUtLQlqb8uu++BYLu/w1J2nWPhjNk8/w+de/QKzU8/Z/adACtNuz8XFFW+9+YCb3Tmr\n+QnOCRPK6Z0j1o8u2F5s2G6OCGPCL2HoAovjQNHIzdUepFXZ9J0juIDSId4RjG+JAeG2+UkJ907p\nhh1XVxtKzZRcyfuIxsK0jzjN9J2wWPR0/RysMKbUmsW8cPfoDvefuks/C1jXEqulPtYE6Lvqbj5w\n4Enw5GLmClag5Nza4GtFrFC0oMpt7bmCqcitfgt6G/P10ElL7hU1zBZ3GYYFs9kMb3pOTx/yud/8\nInlXUTFgKsWBmQxVhPmsZz2+zZ3lHaa6Z8qRNE5oNfjOYAdzW/pnGXOhqCVmi6ug2u4bY2GT13jp\n8LYDMZSS6OcdN48uKekGEY9oa/nZ3WywoWec9sxqYfvoDX7qR/40f+rP/yRuPuCsAwzGtQoe1zVv\nu0aFquzHiWE2Y5gFQh94+pkTXntjzvmDK1CPaV8PSivnlNw6I0PXPm/nLad3Vzz37HOcnJxx/eCa\ncGzJLuONJcXIozf3vPXm69xcPuR6d0GeCst+RTKVyWQyhjoaHrxyRb+a0816MGBdYJh59ntH3GVS\nrhjJGBMwtWBtQKV10dr2r0qtlVIqi8VANZWHFxeUXKmxMO4m0r7SB0vvLN4tuH/nHvPFjKnsidM1\nYg1DF+iHHj/zGG/J45YxPz7P/MCB38s8QWNeqUURNdQ6UkWwxpEnpXihdIlqleybYFapE025BFDw\nYglDILuMdXOOV6esliusCbjOk3LhzVffYooRcRatQk2GMLccHc8ZmchmR0wT1ELOlRgjxllKVSCz\n3+8Yd4liLLUaHJ5SKjWCE89uiqxvdnjx+FCxYtnvM8/YpwkV/Lwj54oYYbfboGPg6adO2U8Tj87f\n5qOf+D4++f5P8A+2X8B4xTlDHismFBxKqQ4yaDGUmLjYrHnq3nOEmWXcVJ5+7g7VavNoS6JGSBVy\nghRbW79qxYhBpeDVcud9d1nOZ7gQiDaxmgWeCTOuLrdsdxt2l1dcnp+zK2vWmw359FlInl2ZULGk\nKbE/j1yf7+kXjuJ6fAC8UC2kW2EcY7rWH+CESsGZgSlljAjeW0CxpnWCzmaBUz3m4uqKNCXqBDaD\nN47QzTk7vk8XFlgJHM1XzO4+w1jXWCksjga64FAtxBQp7zLzf+DANxpPLmb+FcEl6q0nbimxIAbi\npEyjwTrFdJXswRghaWsW4VbO1ztBcHReODqac+fuXazxWB/YryO73YbdbgQFpVI0kZJhOTvheNkS\nk/v9yH5XIFd2+x2hs+x3GbHbZuCzRdVRi0XwxF1EFYrRVnanypgjPnYEP7Cwx9y/8zK8HolR6Pqe\nWpX5fM7q7B67zQ29D1xfXvDF//2v8B/9O/8h/8Ff+GmSm1rFeJ5wvQHNpH37blTAiONLr7/Ciy+e\ntQNGCt3ScsesMMaS4khVZbct7LeRXblpf/dWuKyUStd3+HnHYjkjRGHVH/Hhco9n4oydFP7f6S3+\nuwe/RoqJEjPRRG52F/g6UERIktgXRafCl3/jTfpjxyCV+aqn1fc70uQg5qZKiZBrZZx2bPJESorm\nQugDi1mgc5YpRay1zAdHSUt21xM6ZfKYmZKlFkE0INrhTEfvPEpL+voQyLliyagqcUrE8WDMD3xz\n8gTb+cttw4vHUFsiVE0ro1OhDJmcwEYlmYoNIDiK5qaqiMWIQ8ShBoZFz/JkQQg9+5QZlgOL5RIx\nTQCkCsQ0MQxzpMzw5gjRTE6P2F5PbG+2pP0ExuCcJcdIzg4VzzRB2kVEM3Uq9F0HHlRbq791nq4/\nInSn9HpEX+6R6hprCqVmnBnY7jfkiytyGrnRkTurI/YpEt7e0psN2nWkPJJ0RKRJvpavaklWhoXQ\ndYFXH77G2ckx3sHR2QI7twTnCf6p25K+wtXlhqttzzpdAptWd38r6bu9uGKohg+/scC9Hbm6fJXP\nWIvUxLd910d55tWBq+rYlALGcb2/gTQy3Sj7apj2rZlre17ZXkdMB5WmsVNzi1dbY1GthG7AiGBn\nc3KEm/WOqWQuby7YJcu9u3fxvaOMBecsvQvMfMdms4Fo2ZuR8zd3HM12eAkMDpIprXzRgTphuxnJ\n3lJK4uZyw3b7eGPmBw78XuWJSswZo9SSbzW75VaeTykqxKSYMWG9xxulSquCEGmlaxhDTc1jx0ir\nmNARYwPOg+scznlC6NpjvbOsd4aT1Sk1CXnblA/31y0uvd+OaCmkpOypWOfJsVKyJcVK3NIagpxn\ns53wnSC9aRozt1oky+EI63oWXYeyYRoz2UaGobbSOWcY5ieQR9I0Erzh9S/9Xf7Ih/8o//MX/yZj\nSa2ccl+Jm8J4AxVhNnhc77C+cPXoEiMwW3gWbsbJ6Qm5ZAyW3g/YI8swX9DfePobyz5BqiNSK2Wa\n0HXlfa9nysNXicXgwpycEpoyn/u1z/Kddz/Mr169QlVHKqklnL0g3lJMJlfQJKgUNtdruuMlk1Gc\nGEquaLVNz9w6+sXAarG81dypLE9mnD+6wOwzxkGVieXpXcbNRNyOTXjMwTB07NNIRZm217z95pcx\nFGJcEDphftzh+ogTxUhlt5vaQbXZsd2OT3JLA/DJT37yPVnnC1/4wnuyDsDV1dV7ttbp6el7ttZL\nL730nq0FcH5+/p6t9fnPf/49W+vdNv68E/9Uh1P846AIYpp8bEvdtUYYK4U6VZKBFEpr0lFuq1cM\nznr01mutmpn5OSJKrhPjuCWXiawTSsZbh/MerDDvZ3R9jxRP3CrjLnFznllfXZGnTEHREUgKXqA0\nTzRHJU+QciXFiJiKDwOmKlorU2wyvcYFFrMT9g+ukN1ErULQgkigimNzc8Pp8phdjMyCBevxYc6/\n9L1/jL/xpf+Tqwj7zch+W9lvlZIqNvQU07pAq1imXDi/OOcsz/FWWHQrrHXspoxzinGGfnDc6+8S\nOsPDR5Gr7Z7OzrBm4NvfEI5zItkeCZ5geiiRuN9wtdtQUsGXQtKCLRZNlegjxRowrkn05kqYWYy2\nA4JgMC6QKmhuE6I63xFmPf1sBhR6ZxDjWNzruLi8IseEw1NJdDOHFM/65gY0Y13Lk4TO4zrDZnfD\n5z9/w7AKHB+vOEsL5suOOHX4rnW77raR7XpL3h8SoAe+OXly2iy3E3QUEClN8U8VY2yrXKlCqUJO\nhXE0hKJkJ+AsWEUdhOAxpiCqVC3s91uSj1yvL7i6ecB+v8aZBWib5OO84/rmguViyWadMN6xudmS\nUmEay22oQClScf52wMWopLGQpkzVinOG0Jk2vmwspD6iIuxD4PLqkiO3pE89qBKnDH1ls73Cd3PO\n7jxNzCN+CGzHDfEiUqaRk8//fV7uPsBn3vpV4tZQYkRLAflK4tZTklBtRyqJdLNn1c+pkzJuI1PK\nbGNElzNk2aY1GRQjlvt3n2Ffthz3Z/zznPGMOeImb+ntDGsD1jhqyfSzjpBG7vcrKg5RS9aKKRYJ\n0kbT0UYhaYFuFrC9Ilaa+qMRco4YCTjvsbaNuCsmMswHOuPIonhZ4DpHLhNpn5AEg5vzKEWqKazO\nujb8winDzOKcIeVCmipKYbO7Ydg5xGaETC8dpSTimMil9RscOPDNyJPTMze302P+ke5r1TYEAVEk\nV1JyZDI1O4xX1CneWyyeJJEQDFkTN/sr1C9gB/v9DTFuSbWVG1o3x0hrF5/yyG7a3Hq9le00UvaZ\nXDK2WLBtis5+bI1NOSk55ta4UgpGWtGk1Saxm2NkihMOx0PewEzCxxbfyk2aSFOb/rM6OWPcrVmb\nwNHZPY5PT7i5eJtxfc6wOCVvEz/6J/4N/vJ//LdYzRZsRpB+B7G1A4lCHgt5vyNNe2qKzBc9Z8tT\npu3Idp/4wmtv0s8sJ2dLju8cs1rMsGpIJfH86lleckd81+l3sN9eM3NzbOiwxmGswbm+jbQbE8/e\nfz+7X29PNLHGplVTmvSsOJqSThVs57C9w3qPrQasRbUpH1oxIJUp75mSxUyGsPIEH4il0AffFBe9\nhyxIqsyWA7OjnnE3MnOFMGvt+VpaYnPaT1AtIqU9Gflwqz1jMDhKuiGnCuV3rwt94MDvR55c0xC0\nJhenaDK/FTen1Zhzq9mSUmkj4UqGDCUEiiqehNiOUkA1M047ylXCGU+piWEW6OaWvImkbDDVotag\nOfHw4k0kKCqFcb8lj2PzRLPAVFBxjFMEdZTaDh2RipiKSGnDmo3BuExR2G0Labwgb5V5GuiOPUUC\nxhbyuGOVply1AAAgAElEQVS9vqEAc2d48OABcZrwpuK6gQcP3mIcL/iu+8/y7afP8xuPXicEh3U9\nmERMlZInxm3Gux5jlG5u2Kw3aLKMWtGqXL19wRS3bE6PePvNt3jppZeoBjotnIVjfvSpP8jN+gpj\nO/phTjWewXvEAnmkjBlnHPMwYxEWFANjviFKoro23i0sCvtQKc7RHVtC72+rhG6nIZmAOIezFuc8\nvQ3ETWWzPmc265nPFswGSzQDcdxjaiJqZNqO5DrhByFVpTcWOkvNLbzlS8XMeozO8KFQa8QYxVgl\npZFaLIXAfjexPyRAD3yT8gT1zNsUewu3nYvNm5PbSexVviIwJeTSNEaktvhozoWZOFansFh61NBa\n4FPF2YgxjtVyzjQW1mlHzolCATVtRGiaiGmkilJVm2pfNkgBiqFqRZMlpYqUgjrFhtqGH0imUCjW\nolKhNjGoaW9ZT1u6E9jvtjiEJC0RaNzA2ck9osILz99lt9+z3W+Qkig5M4Qz4sUF/94P/9v82f/x\nz/PWuCbRBL2u1iNxLMy7gdO7S1x/TayVPCX2aU/d0xKgUiFXLh5ew5VjZpa4heUD/ogf/Y6Pc755\nhNWmIFmNbx21Yql5QtQQuo4+TUzjjsWwIuuIStd0bWhPPyVZjPf4U0NYCNUIEtpAjrhLLOwCNUrw\nnuVyRZHCfrvHmMDVw5HF3OG6wLIb2BmDlh1pyjgfqL7gvKOfGdRbSi5MO5jGxH4smGro+5bwns1a\njqSURAiekjN5LNRJmabHqs1y4MDvWZ5caaK06UJNgISW/9Q23LhSEdMqRbjtGCyqTRmQTCmWYSmE\nLnB2eowIrDcbgNY09JVJ7ctAHTO7bSZVJaepraMVNS2xqgC21a6nUpDqMFqxpTKlBKXivcfYirG5\niTlZQ9GCcc0Q+t5TimEct9T1RC4VrYJiqKrEGCkF5ssVDx68zTSueekDH+ThW68yLBc8urrkbH3E\nyx/7A7w4W9E7T5HCjd/R24EH0yXPP3uH0V5wcrpgo2t2NxO7eEnaBIpmck2I0MZBj4V0tSbfRP7N\nH/pB/DZRaNrpVS2LoVWwlJKQmls5pjGIs0QnLO6ckKaRsq/sNyOaU5MyMIJxyvFywbB0mK4Zc2ol\nBEeH42g2Z9YfkV1iTBPDbCCnSpoS66sNp/0puWT6oSOOE6Hz1FQRbZvAWEGrZbsfyUlJ+4mahBIr\nvoc+dDjf1Beda7NQ06RNhrh0VN09tj0rIv8N8C8Cb6vqdzy2Gx048E/AE+wAbYMVWvu5Ymit9vW2\nJ11ra5YRade12nIDVdsw5L4nhA4xhvmsw1jL9fUNIq32O+fMEAamPkGc0KmQNREzGCq+s1Aqwd0e\nGlZw3hNLpaR2f8nSkn42I0bwg6UGcCHjvLk9ZhQjhiIViuXZe0/T9zMu315T04Q1luXxGWE+xwfP\n6f07PHgj8ulf+/soGWeVxczz6PqGu3HLR+7e530pc391wt/44mf40FHHpybDVXzEC0/Nee6Z5/jS\nzWehGHbbPXmXMaFiDISuY7ed6HTGEOFf/94/gouJbRwRcYT5jH6+JE0T3a06oqb23brOotuKdIan\nTp/mYrdGnLIeR7bpEoMFDG5Q3EwwvUFsIZsmNXzs55x2A7N+hnOtbFSppDhRSm3H9jRRpja4ousC\ni8UKTbeKibf/7nlS4rQnx0TceqZtJe4y89Wcvhe8N2htyeFaKzXDtB9Z30zs9hV1jzXM8t8C/wXw\n84/zJgcO/JPw5Dxz4Ldc8qbDUm6n4yja9K9v29Fb9NygtbaKkzYxFME1I6/mdpivp7MdNUPNipE2\nJFiMQV2hxISKoVZIU8YHwXnB945aDLkUrIFYaxv8gEU7xYUm1sUg2FDb0OCQqVgY25AHSRC6wKo/\nwTiDoBQMq6Mj+vkRPnRcXV2RcmS5WLG+Ouel97+fz3zmHxD6BVUr4+U5n/zgyzy43nDsLOPd55i8\n5fn+iHPd0R05Xrj3ItXu6cY3GesxD9aP2I4TT89nJCwPs/JMWPCvfvx7+ehzL7G5ugYRrLGI75hi\nZN71eGcoKSICwXt2+zWd73CrJbN4hiLE3Y5gBrxsyVOk5IL3Htu3XEISQeIOj6VzrVSxVbSAcYW8\nn4hxpNTMbr9BUYo1LM4G3MyyWh3h1FF2kNcRSyCnG0oGEd/Et8jYbqCbO5wXrCsYZ4BKViHHQi5Q\ns6HoSJibx7dnVf/mrfztgQO/53iCHaBCkWbGkeahG2kmsCVAv3JhS4i26WlC1owgPHp4zb27M1Ly\nBN/i2Z117VgoisNjS25ecynkkkm3E+G9WFJO2CxUZ5FesKJMouBashNpioPVCSYoYQnGQ7WK9K1Z\nRUQoMVCjRbXwwnNPcbI8olRHzi28cnn+iGm/4cWXXiYMHc/de47f/PRnMc7x6pff4N5z72fpEq7v\nCBjmyzPuZ4jjjo+9+AJZhJqU6xTx88CdxV3yzZYXTp+i3O/4crdg3I3MuxnGDfydz32eT9x5jj/8\nke/ien2Bn80Y1zuG4yXDMGs5gJSompGi5JrZx4xxQswJMBwv5xAz/dUaa8OtTjrtew0W60urKU9A\nTiiF6DKp20MS1BamHLmZLoj5mjxW9mPk0eacXR15n3sW41vdv/eWzndtdqcYqIY8RfZjRFxgtupx\nKjifcN424TUytdo2yDop+3FLjC05bb19Ulv6wIEnyhNsGmrW+ithleajl1uFRGmva21zP2tLfOWq\nGNOmtU9j4fJyRzdzBO+wnSd4j1Ylp+bLN4ldQ+c7YlGoCe8szt5qbmtCq1BKgeox4XaEnTMw1jbw\nISi4NtjBWI/6iliwOGLKOOnIBp5/5g7P3L2HMxVvHNY7TBTG7Z79+pppv2V5csZ+v+N6uyaNW9DC\nh7/zO8lXb5J2G/bba6xp5ZkpV+7fPSPXgpPASUkYKg7lu4+e59IPfO7BQ77t5CnsMkEYuN5HPvrc\nff6Vj/8x6rhjv5+oueB9U3XUWkGVoesoeaLU29r52++61NrCQeqxzuKCx7n2XVkxiCj2dphEG3zR\nGpvUwtZFgilkK+T9xCau2cYdWjMlCzVX9nHk3D3i/v071FiZ9olh6OmHpgmfU6bWdnBrbUJc1nms\nLfjgMU4x5nawhdYmX5BaPM55A0no5/7JbelbfvEXf/GrP7/88su8/PLLT/DTHPj9zHq9Zr1ev6tr\n39GYf62kj4icAn8ZeAF4BfhhVb26fe9ngH+LNlP+T6nq//q1120iUKqKoUmiqmlGvNbaHtURklS0\ntjn2apRSBaltcOZrr15TTaaqcrQIDNaTcxsibIwFVWoBsHhjW3ghBOb9gGplO67bMOdscNbQ+0CU\nTMrAREtymtvhFs5SXMYYRcggDqk9McPgPR944f08e3KPo9WAicLTzz/LG69liusInWd5NGe73fDZ\nT3+GP/jPfZLPfvrXoez4/Kf+Ht/1bS8x9D3T1Rpbt3TzGV/60is8/cwzxJjY18r65oJ7p6fMjzru\n18SLd7+FYwZeu3yL02GJdp7dSeBZt2S1KFy9fYHmdghaaTNQVZWhm0PJ1FoYpw3iPON+YjZvQmHJ\nRkJncZ2lm3uOFwsqRzzcRrwIVWDMCRsL+WZCRZiHjvW0xVeH2jVFCpFCLZmSWyVSKRWNwuZqy/n5\nFcvlMfubLVKVaRyxJjT5g1RbD4Cx7bO7Shc8zoH3BmtNm06kELwjThljPP1QICzx3ZMX2vqBH/iB\nJ/0RDnyDsFwuWS6XX3391ltv/Y7XvhvP/GslfX4a+CVV/c9E5KduX/+0iHyYNkrrw7RJ5v+biHxI\nVb9mvZhIG+LcRpoF5LZ+W1QpVVFuJVwNZCquGrJmjG3VMDEWHj3csOh886xnM+IUqdU2ZcNSsFrp\nHFQxGO/ou8DMdnQhUEUo5abVkIshm4JxhfncUVKmFIeIYkwF7ZCYKLYgxVN9RUWwtdANc56+c8Ti\nZEVyCc2KEVgsl3zxy79BHNccn54yLOZ86EMv85lPf5rTu3dZBDiee8acePD515ktPcuuxztHP1/x\naLOj7HdcXl2Sa221+VbZTRP9ouel9z3P2ekxr375FZ45u8ubF2v6O3fQ4nBujh33iHdt5Jz36JSI\ncUO/mFOqwZqezc0GMZk4OVzX8+buglIStoP5vGOxmrMbO2bDEuYTeW8pgKYmk2aDQ6uScuEygYaM\nCwlsxXolRaEUocQ2o9SUxKtffJXlbIVm4eZyhymV/X7TSlMtiBX6rsdgEWrLlUhoM1FvK55aDkVA\nK2IyxQp9b7B29i629IED33i8ozH/HZI+fwL4vtuf/3vgr9MM+g8B/4Pq/8fem/7alt53Xp9nXtMe\nznDHGm6Vy44dD22bzJ3QoiGgRrSEFAGiBShITV40gqCWQLxA4g2oheg3/AMRYpAQYQpE3YLutJIm\nUcc2bsdxbJeryi5XuaruPXc40x7W8Iy8WKcqgcTB7VT1DfH5Svecfc7ZWmvfc579W7/1e75DCcAb\nQohvAj8OfO67HBslFFy5BM4jhjS/YfM8ly5FIoogZzH7Y4t5NCOFhJLpLz3bpaeIiCyF5P08Yomz\nShA5b/6pPHd1lTF0dYs2mgOuNs/SRPABKRMg5mLSFIJXjMWTsmDYZYzVIBRBClQ1Qgk4bchqQC8k\ndinx20DnGqp1hRSW+4sHeN8z9QMpZU6ePGZhHT/0mY9y8eornG4vqfcFbRsuLy+IdpwVn3k2kFJW\nopQhiEzKUIpivTzkO995g8bU1G3LUbfi8vIMKzTPfuhFHj16RAgR17aEUlDFotBoFLZZEoc9KUR2\n+x3j2GPcvJGsjeM7FydcpHP2456UJ4wtNJ3l4kJSroqykJkoI/IqqENKhYyKZDy6KJAK0iyoKiVR\nooIiyTkggHHneevbb1JCwjpLDondbkMucY75A3IOV/NxiVGKECNFZKyes0198MiiiGU2IzPOIq5G\nMB8UhBD/3dW6PxJCvAX8x6WU//IDO+E1rvGPgO93Zn6rlPKufddD4NbV47v8Pwv328wd+h/GleJT\n5PKepH8OhZg7dinnxBySJpOuPL1n+qIoBZULMSfiJNid9xAkqR9QzqKFolCIeaCU2TNdKcHkA845\nnDM4ZyiuZcyBizNPCoVUEqbJCJFRToMZEd5BP2+IhkEjlUeYgCgSbQvWZkyViER82HIwLaEKnJ5u\n2G08U7+hZEm9PGbvt6yXjtZZXvvC51neWFIPYWa9LBpi2iJMxfbyjJAi9XLFc0c3uTzdUleOxxdn\n+OQRRXBy/zv8xE/+ed566ztUVYXEUjvNNO4QpczjJj9QVR1SGrSpZmEWAtsuyDnhrCP4kTAF6OYx\n0+uvv84TcYbSkiH2hOSRKBbNkt5GvJ9IWlNQGF0wWYEX5BzRUoKGGGa+UQ4RJoHMEpJg3puUlATb\n3cDp+RlOzOOUadox+YEsQFkFZDRz159SIqUMRiLSVXZszsiSEcVQciKLjJHmXZrUB4JSynV48zX+\n1OJPvAFaSilCiD/uLfRH/+yqKBcpoEhI5SqkYuYn5wRFFkRJiKAwenZINBrmSN/CVTonpw/3hF4z\ndJK6yVSVRmlFFABpns0LgRSCcZpYdBlhJK2s2FU1Wk8MlxNjyFSiYGwmkZBaIMdCHt7lk8NYEq6S\nCKnQKhBK5sBZtuMZrTa8sH6W7ZMnPHr7EbvdQImRYb9nHC557oW7xOjZ+ku0hKU54rXtJTZlbty4\nRxs35AyPHz9ktboBRTKkgLGWpq6onKGu15ydPeb41i36aeTy8pLdfsdifYCTkml7yeb8AmNaZDEI\nocg54v0c6pytJhZFu1wxTHsQZbbnRbC4ccT/9etfQnaKuqlJbkI7yRRHYvbUdYM3mT4ElCxYYaiu\n2CMxR3IEIwTSFYScBVlON/gU0RSKkuQExhpa1xJ2E4GCUhDjSIgRpRS5JIxWiJzIcV5BIUZEkRSV\nSSW9F7ghESDnBiDnjNIfXGd+jWv8acb3W8wfCiFul1JOhBB3gEdX338HeO4PPO/Zq+/9IVzcvwQE\nFLCNw9QzCyFfRetoCSJpkogUIRBJgpSUlFBKEMu8sUbO5CEwpEjMjlICOYFzBVSmMBfeUmaVod/3\n5NUaiUAJRV1pnLP0QhC2mZQCzVJT1EyflBSKnzfcpuxBaIIq2CSQosIiCVIQ/AUh3GHq9yzqJafm\ngsfvvHo1909sNxu0vMu23yHCHi3hc5/7LT716U/Tn2/46tc+x4v3XiKHAWSiCIFtHNvLPTlM+Elg\nrGHTb9DG0hysyWLutMdpovaBoC2XTx6jnGGKA9ZV7EaPoqBzJluJVhWCic3ZjrZpOL84o23XjKHH\nVjXvvPaYndxjGkV3w1EtJbYzhCljZUPfjyQhkAqMTLSmgDBsp4EpztRSgwYlMaUiEmeVgM6YYphK\nQAmFKpLoZ9FUjIVhjIQYMKaglEJETchpDmm+8rzPSaKkIqWrIPAiyEQuTkYu39wjr0Ktr3GNH0R8\nv23M/wb8/NXjnwd+5Q98/18VQlghxIvAR4Av/FEHWN9dsH5mwfpOR7UwvCcjEgCzOAhRKFlCyoiU\nETmjEIgiUDNzkBLT7OwXBGGX6DeJ/Tax30f8qEiToHggCMKYSQlCeFf6XjBCY4RECkGYAn4r6C8z\n434ihgIkcsxXFD9Ju9B0rcLZCqtmlktJgr3PmElze3GMVJH1esHy4AAfPEoqtDHsdjvG7TlKGHbb\nCyiZ3/nyl7jYbzk4us3p2flMuxxmp8D95cBmvyflSLiS3z98+BCpJEVoHpycsO+3tG1DLpkYI916\nxbjZIUshxJH1wRIpLdvNgEgVl/ffZvfoCTpH+n2Psy2jH7l98zaqa3j7ySXbx3OB3Dy8ZLOZ4+eq\nyuJMzZ2jO0xjZAoeY0GIidoWKmswolyFhcwxfCpnKlFhdY0zDikztXVz3JuPiChIQTGOCT8mZBLI\nolEoUinIAt5HYsxwdUEOKc0boRkEBorm5r0jXvrJYz75s8/zmX/u3ve5pK9xjf9/43uhJr676XP8\n7qYP8J8BvyyE+KtcURMBSilfF0L8MvB1IAL/dpkNVv4QCrMl7cyJEAgpKHFmiIAkhXkzslx1Wu/R\nFaMkqTILjXJB5pmTHFMg5ky+OnhJkGuwlfx9LruQRC9Jscz0RS2wyuKkpq4cVWXph8zUJwgCIQNa\nSaQVlKJwnaRZGKSWaFWwtcPWGVVJ+rGwLz3TzRGRC34c0XWFULPXTE6eYRzQJbPdnXLz+Dbn5/c5\nbhas1musBp8tIRZQmmHa89bbr7Jc36BzsDnbsj44QElF1bZMY8/l6YOZIonCth1KSk4fP2F5sGCI\niqpp8WOkaY/oFpo4bZHaQolMXrC93HFweMBmd0Fda07DiJAJZy3aitnAKhQudzuWbYtVULkKKzXD\nlQFYrQTZeqSKqJwQelbnGglO1ShpyGiy0UgfmaZIjHHOek2C4DMhjSgBunZoqZFFXPncS2pniGmm\nmxqnr9bBfA4pDELC5eNLKIK2m1fTNa7xg4jvhc3y3TZ9fva7PP9vAH/jezl5ARSKrGZGgpDlXXX/\nux+ujjl37EJIMrNIJCcBUaCKZsoTqkhKTKQBhpgRncUIyFIj1FWwMQnl5sCHMAVk0Egk1jiOVmvO\nz3fsNxMhgKg0VTPz1YXLQMbUEmkKSmeMTTS1w7QZYWenxCF7tvs9cb+lqmcDrKquWK5qku8xorDb\n7ehqxaOTt3nm7rOM2y3FR7CKrr3D5cWbHN24hVGWB/ff5vadhmG3IYSJRw+fcHzrBg9PHrBuF5w8\nuM+tOy+CqRhGD2HCykLvM1XbzZz6gwMuTneMu0zse6TwpDCx3Q8cHd5ic3pGTCMhZ37rt/4+9cJh\nxWwvW4qiRME4DQgdWeqbOCuxqmLfj2wfRepnBTZNGAu+JHLJWFvhsqYSmpwKgjmwW4iMUgqZIfae\nGApj70EWmoUlF0ixoKydXTKlwFlFLQo+RKY4Z40qaWc2U8pIpTg4OkCpgJIOIZ6+Be4fFA39SfC9\nikW+F1xeXr5vx/rqV7/6vh3rwYMH79uxAL5L7/h94f18be/n7/+74Sm7JkISQC5IBEXNRlo5ZdJ7\nz7xyVkSQYkZq5uqOuGKsxHkDFTXznaeMKQo/gbMFVQo6z/PVuaNWpBSJsZBTwlUGbRWCmtVyxfbx\nI0IpSCdwtZtZFHJ2HBRzagYlC4SyyFqhHKAiVlgOaNnvdoybS/wUqauKVAmk8FSHDVJGCh6fJc40\n7Pc7lssDtv2eo+M79OM5xi6onES4itW4ZQwJnyMZwebynOM7dzl9+3XknReoq4aq6ajbA/rpglQg\nhjmQYz88wrgLDqnQRjDtAv1uRyIggsfHCFIwklgerFC24u3tlrqarYYhk0RBFIEQiTFJ9lwikmFV\nWbYbw0UfaLYGrcBqjbVi5t7HecM5CIlM891XDPPdUCnzPkkIHu/nODxBQSgHZb5byzlfpU5FtOa9\njFW4unOhUMocISjQKKVxlZ03xMVTTUK8xjWeGp5iMS9XDoiaLBPEmc1SeFeuPV9h5zf1/LUQkPPs\n2xJzpCDnwGHmhBtKQaPJvpBtJASDniI4QUETk0ApBcx2AClFpKjJUSGFxFpHu9TspoBUAqkEOUMO\niSwLJUpSgpwEky9IEa6YLR3TbsOrm9e4197BCY11iTZLVs0RMXi2u3NSkjRtg9UVi+WK1eKQNO1Y\ndQd4HzHasrl8AtQYXbNarYlhD2XujlXV8ej+CbZtWR4fcn62plus6fsBowTb3ZbKtuz8iCqBdnmD\n4LeMfWG76ZliIA8TMUds25BEoD06wFYas1ziG8ntW7fZbp8QQ8Yz56CmWMgq0HNOW61ZHiq6nSAV\nTUgKj0Z5ha6vWEZ5LtyUERE1oghCzkzTNDNq/Py3N0Ix+h7X6j+g2oUY4xxFBzgnkCohKkGRYLMj\n54hzljAmhFBYqxCyMHkwWnz3RXeNa/wZxlP1ZskIREmzmo9ZhSmuAiNkEVcFXF45IxaKuJL/C4ks\ncu6aAZCg5tR4nxJCFUxSTFNAazlHx6mEdVeba1HgS8RPCVKPEHNIhkZQ1RU+J0hzN0mcuc0pCeIo\nmcIsU2+ipe8zdVchlSAVy4aefugRSrGoairb4P0WazUxRfw0cbi6xfZyw2qxQOIJpdA1a5qjG0yX\nD+mqjpBGbh8s2TKho2K/Gbi4uOCHf/izvPrK7/Ezf+FnoWRu377Ldrshjj0hekxdsx3OiT4gmwNO\nLy44vtHiqgZpPMfr27zzyjcxbU3O8+9Zi0xXN/zek0e8fPYKqlZ06nDumvc7tI5EaVAls/UTnfW0\njWK10jy+zEypEHJGIqnRlJKIJNIUScUTxkBOc/h2ygmjBKRy1X3PF26BIJSApKC0nT3rZcBqR73Q\nOJtRyuGsgOyQai7YZQFd113F2imEkFcX62tc4wcPT5WUKxG/zxcWAqETvDtguRIQSSlARKTK75lw\n5fxuEZ/zMaUq2KqgXJkDh7NA29n9cBgCk0/4KSPkHGYw+2vPkXSbzY5pyuz3PSFklNTMIxwog8Dv\nM1MPaYRpFxg3iXGT2Z4H9heR3TYx7DJ5MHRuha0cdV1jlCbngFGGlNKcvtN1DH3kzu3niKHQtSs+\n/vFP8eTshLt37xCLxocJU6/oug5KRsoOKQvt4oC2bbn7/Ieo2262GRAGbRwYg65W+HH+fzmzYL/b\nYYwgxMg4bGk6hzECYQ1nTx5TSqLfXGKcY337Fq+cvYXXAeMa0IoiBcrZWYGbwShHi8LoiKsC3Vph\nqkKRkXGMc6ZnyPPehZ+To6YxMewGNpc7dhcbxt1E9DMjpZQ0K0zNfKelVEFZiaslbatoF5b1UYWr\nA+1CUNWSZiGpF4a2M7hKsFrVWCNQVaZZGA5u1DSr6878Gj+YeIoWuOVqUxKUnh0RBSBkIReBLAJK\nQcjy3px77tTnW/HEzBsXMqKMRpmMk5HoM04bjJvVJv02Y6Qia5hSpMoWskAKjRKGcexJk2CaBqZx\nP2/CpitPkRwRk0AISBoUmZIEOQp8KGyfJLROVHWhVh2VrGi7Q+Swo5SErQyxj8QQqauaplkyek/T\nNRwcHrNarUEann/pY3zx81/kxq0V/aMRy+FsAFYstZ3VnMM4sNlteeaZe5iq4uLsnXlerCP16hid\nRt7Z71BCIaXHOYdEEvxIKYKj2wc8euOMzfacupL4aaKylu2jE7p/9l/g7d/7W4isZnfFNPvKK52R\nxuKTx6ERImGUQsV+7o5VISrJJMGRMb4QzEwdLUkSpjw7WJZCTAmnLSKBkYpiJabMNrZFZbquoesM\nzgmsmQNGtJJoLRFkwJOCIqSJLOdgCqkL6Nm7HpGJJZJK+uMX3jWu8WcUT60zL2kenSCYrWZV+f2o\nilJAFIQUaAPGKKw1V9FizOpRId9TL7YLiTHgGo2SBu0SVS1wTkEupJBm462Yrtz3NClFlssF6/WK\ni+0F+8stIYQ54m3MpK2g7AQxFHKQaAHrlcE5NY94JkH/uHD+zsT2NBIn+PSNjyLDQD9uKCKj5TwK\nss4ilcGHgBQgqiW2XXC53fHqN77C2aMTnnnuBX7tN36dJB26bohZUDWOnA1n5+fce+6HyHnmko/j\nSCkNWcJ6cYPDG7e4uLikkokY92g1R6vttqfst2e4WpH9wMXZI6bdFpTDDzuQkmefvYdeHCGiYjvs\n6YceHzzDNCKAPvmrsZPAFoMWiVTilUAnozQIImPwM5tllOAN3gtSkEihAEXjOhrTzN70uVBbQ20y\nba1YrWua2tBUDmcNRjlkkRQPaZJEn9BYnJ59XKYpMQxhfr1jTwgj/bBl3++ZfPjA1qwQ4jkhxK8L\nIb4mhPiqEOIXP7CTXeMa/4h4ap25YKYaCpmvnPDkrNgsCkS+UhkWFitDjorgM1rPAqGSr5SiQlF1\nsFg5zi8DSkVMLUEIrIUwe8BSCoScqEpE6oCQEWNa1l1FWdVs+55HDx6iXCFPfvbqiwmUREmJFJn1\nDUx/IjEAACAASURBVEO1iNSLBbtTwfZih99E6soSleT20YIcJcFPWG3Q0nF5ekoIsx9MVdd471HC\nMm4u+NK3v807b53wnXdOiD5x6+Zv89P/1M/y8PEFK6XZb3eM48Sqrjg8XKGM4M7t27SrI6QRhGI4\nvzjhY596kTRN5BjRKnO8PCJFxeVuQ+ssGMO0n4hTZIw7Fsc1Qhpu3V3SHqzRFUDkzTff5uzynP3l\nlhQ8VgtEbcllB0nT+5HaVYiS0cphrUBUEl3EbE8cExlDZv4sUiKjUUIgmSmFRRSUnC/IdWuRWWMr\nw1QiOQXiIOiqg9m6OCZ8mC+8gYAKnl0vudwmzvc7gvdoZRBKIaREykzbVjT1B+pnHoC/Xkr5shCi\nA/6hEOLvllJe/iBPeo1rfC94imMW5u4OgZBAmcMRZgikTNR1R7cQ5GTYbnfYpIk5gypYofCTpzt0\n1F3mcueRahYJJS8QOmJEmSPjckEWiTEWZSYymkV3wHK1oGtramsZ04YxnqMryCiUEQTkHPzsBKtn\nBetVy+5UkoZC9AqtLKvqiCwD/S7y+OItDo6eQ0vH2fkTQr+nbVtyKWTfUxvLlCUPnox8+2Tg62+c\n07bHfOvh63ztrXOE+QKrO7dZbT3LznD74CaTv8QohTOWUEAbS1KKMSZM3WLqms2TR9y+fcyYC1ZX\n7PuBQzWbku03l5RqwJmKo5tHqGy4f/8xybQsDhYEDDlcMEw9+7Md425EmZliWMjMHxM+Q4gjUneI\nNMvztVZIK1CVnlOLUpij9kqAcrUnIgVSSYydPWKEsOQ8kUukriTWZsiFKQikVMQY0KJm2sPQB/wY\nWC8DsfT03rDdeDabhLpSwQoKj8/OmKae5YGj69oPcM2WE+Dk6vFOCPEys7ncdTG/xlPH0xuzFK5S\nZWbDLBBz5idXc3KjaLqBeqFp1gphFaYGYwXWKqrKoI3ANhLpIqqefVRMJZCygNW42rI8NkiTkGSE\nzQhd8HGLTxNNW7FYrXjmuTs88/xNbFeoDwT1KnPzw4I7H5HcfNFx9JxjeVNiXcLUBbfUVIeCG8/d\n4PD2khs3jqGDumkZQmIIeza7S3q/53Jzzsmj13nw5re4/+bLlHHLzXuf4rTP3L73Mf76f/SfcPve\nj9Pdfo5f/fUv0rUrXvrQR9mkQiqBNHlyhLqtaJoWt1igzIptSNx65gUePtmiukO6Wx/m4MZzuGaN\nvOLhay1pGk1lFU1bce+5Y4a0o6oNt28foESgbiVEwcmjx+xPd4gouLm6zc31XfSVyCeL2SelFEOh\nUFVzrqpUAqFn62JjJULKmbXi0+ycqCNCBJTKQMFaTSkTMQr6foeQI4lxvttKIIpEBYUOmrpUKCnw\nfWLyA0VItNpjZCD0e/w+ESePypo7Bze5d/vD7Dbw5GL8x7J+r2yhPwt8/h/LCa9xjf8PPFVqokAi\nZHmPepjzTE8suWCMxtiAqebuUCuF0LNLohCSLMXMzpAzP7lpKnyMSFVQpuCqubjXqwJhzug0misB\nS2C/mQAwTqOsZH1wyPngSN6j15p2UcjK0o8JbWpMNZB8wDQdevLoArqG5bomqUKYdsSYUEaSUsAo\nzRA80zQRfWSzH1kdLnj2pU/xe2/3VOsFv/mbv8NfevCQ3335y+B33Di8RRozw+QRORNjxgfPrVvH\ndE1HVTuKWZJT4BOf+AQP33yDy7MzXKtZr45xpsG0EVlusdme46eBAmilqLsFRle89JEPcf/Nd1Bq\n9hpfLI44+84blNHTqArrLE4ZFlXDflqwLwOlgBEOLQuyFJQxs6DHaLQps5d8nv9uhVnipfWsDZBa\nzX47JWCUReVCibOMH5XnvZGi8FPA4BmyQtkaZxtsHJl8IgSPkhMpRXLhalzlOOw6rFC01YpaV3Su\n4c3zNz/4lTuPWP5H4N8rpez+3z9/7bXX3nt8eHjI0dHRB/6arvFnE8MwMAzD9/TcpzczlwXkPEN9\nd7yilJo7dQWmEigrkMbPGZRWYFxFCuGKmjirNp0DY+fZ+uP9Hi38nFWJZLmQIDUpjchc4axGC5hC\nZH/6kHvP3+Ho5hFSypm6V0kaUyM6S7t0uGrJdjdQRMFWI2TLZAeyKzircI2gsZbJjmhqkvLENJL3\nI+M04oxh22+RxbE4avnYJ3+U+uhZfvf/+NscH3c8PH3Ev/VX/xrKSv7mf/43+V//+/+Btx4+4p90\nFu80phhk44l+RNuK7Cz1Yk1Ie377N/4eb72x4fHZEyoyTRv5+J97iU9+8rPk0sD2DJEK6+UxiYBV\nljFusFXL8y89wzQFchFUqwVvvvk6n7r3MT73yjeoXUulDV1jaahJacl53qHyQMFBsbP2VoCWCSUN\noObov5JIRWC0QaqEzHP82xxMUagbGHeJEAOJkRAFlZ2pj8NlYBCJZ247pBVokVnUjogm5YGSHaJE\npIiUFJAYrIwcrI5Z1YfkAqvlIXVt+F1e++OW3p9s3QphgP8J+G9LKb/yRz3nOvPzGu8X6rqmruv3\nvj4/P/+uz31qxfzmnYazs2FmtMgrKiIzZdHognURYzSuBiESbTu7E+q2wXuPlIrtpqC0x1qHJGOc\nJHmJNRIhPNlUVG1GW02lW7S0KBmJvnD+YOLRozNu3tlDkWgtaOoOFyxK1KwXFaqai9boRwyWojMq\nFZSdMJXCmoJwYCporzY493FCJY9VmnGaCDGhhESaBbZZsugafBg4eRgIQcwOgjny5/6Jz/LFz30R\nGZ9giqTf7UnrI6qqRtYVi/Ux1eoIDygheO31ntf7yHMf+Qz7y0te/drLNO6SxfId1ouaqu7mqDgE\nhsK+P2PZHSG1xo8TSmba5QpVN/yD3/ttvv3Wm9w9vMOTzQnGLbBO0RXD1BtU1FSqhZSwUlK7hoVN\nKLnDpJn/n8WcBqWFQSlJUye0TIyDR1ChBagyYFRi8hFtLSEktMzkNLNicnL46ABBYh7xLGyDLy1C\n9ahSqJSirgO1AKH37Mf73Dq6hZwcojbcEOsPbM2K2c/hl4Cvl1L+iw/sRNe4xveBpzYzv3XrJkdH\n7Wy8VOBdvw1BYbGsuHFjQWUdUhaUKtRdwVYZ12TapWOx6FisLFW1QBsBQiB1IaPp1hbjLLJI6lqy\nPnTYOqIriLFQpowymfOHPScnJ1xcPpqtZN2C2lmcW6JlgzKGwogU6mojz6BRNK1m2TUYbSjSY/Q8\n6z+oOiolcUhyyYRhS1N1rA8WHB0uaaxCpolPfuyjPHv3eQ5aSQqeGCb+w1/8a3zrtW9AKkgjWC8X\nZBUIKWCqFm0NarVAS8eXP/cNXrnY8wv/zi/ya7/1Ff6Xv/V3+PjP/Bjf2GlyXlMwswpUaiBhbYPV\nDTFGnFA4K+mahvXqgPMnZ7xx/1VuH92iqizdomb0Txj9ntrOkvulckgm6txhjKZpHXfvHPPx5z9C\n69azJW3M1LqiUi3domHR1CxqTW1riKBFQcmAtRU3Dw5YOIcUiSL3ZCJV3dDWx+z3ge04h4MoEzE6\ns+hWtHWLkAXlMstO0q4VdS2wVeJbD76CdJmcB6z7QNksPw3868BfFEL8ztW/v/RBnvAa1/he8dQ6\n84ODlraqeb28wZPHe+awIomrLE0XqVuFM7M/SikDTbNgFwNVbfE+YlVmIRuqOqCNZRz6WRGpE21r\nQQ5oA0p1WFdISgKBfjfMxQdNTIVxFyBFJr9DGD139bJGC4vOBsUJqMLCLRmypzBhbcWqPiCNFm0K\nWk9IZiHSOASKH8k+EjNUtkYZy+VmxyuvfImXdM1PfeJ5/tNf+mX+mb/407z88jdZHSz5zGc+y3/z\nX//P/OU//y9DAKMN1gj82ROqm88QU0ZEjVOO3/jCV7hz6xb/0l/5BYb9jt3mlNe+9TovfviTiO55\nLPcZ/Ejoe1arAzKZAvjQE0KLUgYpE+29j3H68AGNMjS3V5xf7NHNim1+CLKnMhlnDUJHEBVOKBZV\nRdUaOtfhuiXLgwUPn5zx6OFjFtbQNWvapaaULSJJor8kjbPLYSoB5yQq16TSIOQeazNTibSdRcSG\n9eqIfurZRU/jwNYCZQq2lrM2WCuObuhZ6eoKmoqkCg8u3uR4fUjK/Qe2Zkspv8VTVk1f4xrfDU+t\nmK+7jugEQzhm8JFhIyhMFEZcbWma2TQpCD17ZauEGgQhDnAVAt3UlqYGZxTJO870lhw1tpKU7JAW\nrBA0rmVkYgg7Yu4Jk6Xkkefv3sVlS0mJFKB2hpQTMvdQWpQaaCvJ6CFHkNIgSqZSFeuqJasKY0Aq\nSchb9sOGKmnksEcrS9Jqnh2XQJ4mii5882tfoD6+w7//b/wc/9Wv/Cqf+sxHGS96fv1//z/5d//N\nn6O1mu3mMbURKGvoDtYoaXBVja4b7n/1Tf7OF77IT/74j7C7eDgHQZ8/4qd+5Ee5fedDvP7OW7RH\nO95+9XfQtePi4oIXP/RRLraPWS2P6Ydzbty4OxP9nUGWyIdv3uWN0xNuHt1i2hum6QwpQQqN1QGn\nKiqnERmECiyWNVV7TJcs7XAIpmNRdaSpp7KOdpFIpaWkicsLR4iZaTdyeKuwWtVMynK8foExHRHV\nG1zEDU1zQBxgvezohOPNh2+AtNh1oqo1rlbEHNCqxlIjxYqcJZpu9upJI6VMFK4VoNf4wcRTK+au\nUmhVaDtD22mII/v9TEusW4F1nqoS9JeeuqooaqKIQko13o9EFTk8XKCNZdGtEUi2yx2ncY8xcwqN\ntBlNhVIOa6EPp+gqoUzi8HjB+mBJSZkkRiqtMVoSQiamCR/3ODFH1GmTISiEsDiTsVpRm4osa5xd\nkuVjfBJs44RTFmsdQz9g644cC75MTOPIZnvGxz9+g0ZmvvP2N/jX/vmfmROBkiD+01vGfWIYMxdn\nD5iMQq0aVnWFFwXhE+LJCSdPzlCq5vDoGGsUOXh+7l/5Kzz/4rOk4lm4hhd/+Bn6J69z/8Hb+NJz\ncvKAtm3Zbs9plwtSyXRVA+NE3j6hqRx1lXkUnqCMoC0dRe9o2jXrKy947QS6gJQDPlccryqKXmMu\nBnwyVHJB9hZBxlWRlHp8XzAqIbMkhow2hcyeu3c+hUZRpprl4hYh9KR8TtPVZHYoA8uFYkobsgCh\nPUp52qXEDxIpJ8gDUqxnewUpUEiK8NTNtTfLNX4w8dSKuRSRIDOucrSdRBQ9Gy4JiSgjyEyREm2a\nOShBBZrO0G8nvI+MKVK3kuWhwlQVK9Fy6yiR8n3qNlFEjyxH1HpBSCMFxWKxIIYti8PCvZvPY51F\nicK+97TNksSIEBNDv8HompB6IGKkJMmMVYbKgTWSLDLaKGrTUuxE3gdMcYyTRwtJRHNwsEThsCIi\nlGLzZMfDh29xQyheOlgSxOzF/vHPfIr733mLh2+8yjuP3+LG4Q8hSqGuDP1+ZFVPhJSIMVE3lh9+\n4S4Rz2c+8XE+98Uv8/jkm/zYj/4Fmrbjo/fuUh8sef6lD7MdtiipGIctWkvatkZri59G5PEhOQyk\nYaRbrDj2NzE5cx5GRLBEt2LR3ODgqOLh+XfIGKbtFmMjRQpKtWC5WJAZuYFFlsDU7/FjAenRMlKU\npTKOysKoa0qZZm65vI8WL9A0HdJMNE3Ffjui3ewiqRIcLisutiOIRBEOsLRtwuiEsZnsJTkFsk8o\nE1CiRjIRy/UU5Bo/mHhqxTykgXHqESJSN4I4FdrWkMuEMRKlZq5yZRVCFUoWWKfwg8SYRNGKnAMh\neEBQV45bt55n0R6R9RuUolCyQ8oWXyTjtEUZT91K6grW3RKpBrRr8MNAY5coJRn8E8Z0QZ1aNpsJ\n1EAOFdY2IBJSFIwqyJKwTqKNQ6oDhBMcHd3B5UzY7GkXgVIEpq64ODshTz0Hh0fU7QH7aUsXaiqx\n5+j4Ng9e/Qq77SUxDtSu8PD+m9y5c5NHb++puwXy/BSpDb4/w1R3+Q9+4S/zS7/89/ipn/4xXnjp\nFs8982E22wvW+5FnP/sc0/YNchg4PDjidLNnHD3tyiB1g5AapRXSLCmP38EAd82CannM65sTQiOJ\nMiKaIw6WN7Gmw1UNb73zMlY7dttTUjKkpJmip20awjBwdGg5ixbSOSVOGD0QZY+rC3UrGQaL0gUh\nEyGeYu2KVDzeP6btKhp7wDjM7KScPMZYVk1NCBfovKRxHdYcQThHuj2eQg4RISW5RLSKkA2iXFvg\nXuMHE0+tmPfDJfvpEqE0TQM5OGoTMe4Q7UaUHRBa4mpNChasIDMhpcJYjSwWa+b8T8QcH1a5ioVb\n44VhH75B626TkfTTSMieKQqqRqOpcN1jvNdsLs8hZ6ysQM92u0papBLEFJGAjxNKWaxpcZWgspoi\nIkbPdxOowAP/Dba7S55Rd9AxEEPCWss0DmQMRSgenDxGKmi7Q4QynF5ecrHbk/2AwLDf7Nj5wsX2\nlJtHNyhtjRQK70f8xSVn5zvuPjNi3B1+/l/8LG/eH/jK+Rq76fns7UN+/Cc+TT1e8tZrX+Xi4hKh\nLYuVxWjP5eUZXVcjVYdUhrI+oGwfkaOnNYreVFTtAUZc0MojdmLLsruJUZqD9W3OH53Qp1NClvhh\niyeh/Z6SEipfMHiFVY5iWrb9OVZNKG1oOiDVTNMZMUpCiOzSOePwNs5VaFNom4qsEkYvyamg5BE+\nPqatD+n9JUoJpFBo0WBUJqWAUgktWko2mDSQgqeuFsTx6aeAvl+xaovF4n05DvC++rzfu/f+hWb/\ncbzp7wef//z7J8g9OTl534612/0hbdn7jqdWzKdpJDNCKlhjcJXE1g1GW5yJWAwiBipaQlGgIOsJ\n4wIxaoyU1E0FAkLa44dvc2P1I1gcXX2TdH6BkjVGKQafUDoi0ohUEik86MTYP2Cc9kjRIXVE6Qpt\nE00ryGJEFMPoBWlMGAWrZYUYJ+rKMYUzfJk3Wg09UgqcrdhPAzZHiHMYRZhGLnfnuKrm+eef48nj\nJ5ydnnJ+esYLH3qJySeOD28y7C9IRChwcHiTdnkXciAWRX95wXZzRt119OenrG7d4N6P/gziH/x9\nXnrhkywOj1gfdGzOT/mHf/fXSP1Ad7AkxoQWgsWy5fRiR9M0UBIHd+6BnS1ynZLEXFg0HbeZndy3\nYmQhF2zHM+4e3aPuOj790Z/hm298nvPdY0b/DtP2Q8j1ElMkIdaI7NFoKAfYzrDtXwMZMcrQLBLH\nYcVuSJSUGUNAiz0gUVpBVjhnibkhZYHUiVY+h5Q9wt3AGojJMwWPDwMxBYzuEMkgRIWsA33uKWJE\nq+ZpLelrXOOp4inGxgFItARkoW01hgqjJaZyUMIcMQYIYygloqVEqoy2ic41LJyhiMI4PqE2R/TT\nQ3T9LEaC1oYx7FBKgInY4q6KhSfR4/PEYnkHRGCcLjEKsvQsmmNyeYiiYkwjMRqymhPhU8wY2ZHi\nnqrSpHJJCTsQE3VpWVVrtFToEtluHxCGkc1FT4iBEuCtb72OVIp+v2PXB1586UPce+4OzimeZE/T\nRoYycXjzGZrlISkEtJEkAmV3gZQaKT2rpmb/xivcPuqYpsDDb3+Zt78xYbWiajqenD9A95CpsG3N\no0ePoRTOTk6495GPIg4PYL8hl0KYBnLwVM6xkJqhtIxJMurCfveI/aLjWByij+7SD5/A1hO7+E38\n7hTrHEVacmnwuzOEvzH7uKuGTj/L5fQWuXhKtrSdQrJiCAHvN5hWzZF0ITFNPUquKGJEihY/RW4c\ntvT7fnZljJcgFVPOxBTIscK6A6RJJF9ANCB6NttHrJrnn9aSvsY1niqeHpvFVBgkQkVyziThca5G\na4GQhpQsOZer8GaNcgKtK5QYMarGGIvWFUVuKezxCcKkZl53gJgz07RFKYgx4eyCXKCEQsgj1rRo\nIVgf3eTB42+QxSXQopTDWEHyPQWNFookKpRKTGPAmRqEAenJKRHjAEiCz0wmgBCU7On3I9N+zzBM\nFG2onGNPprYGkSW6XaHrQ/ZJc/K45/atFwjuki/92ud48s7L3P+a4/kXP4lt1rQHN3HNEW2d0WrJ\nfvOY6vmP8PhLLyOtpnhPUzlO3nkH6wTt8piqrtntdmhabhyt8DFSYqLplqRhjx52kKFtWvbbCxyZ\nThtyCEhZYaeJLGqenH6Zo8VtKnFIVTWsOSD3Nwn9Ewa7wgpPCJGkMpv+DTpzE6EUOSW0aAiMsyLU\ndSTfs91PKNlhdY0UhlIKu/0pUnaQO1IvUUXiB4+1ljwZkoc+epKYQHugINQaUQy59Axjf0Vf7dlM\nrzytJX2NazxVPLVibmyFwKK0x8c9U8ngPFJpyIWSAwKHFHr2OC+FgsVYgUgdlbEok0FmpH031WbL\ndn+C0GsoE1OaEOM8AhFR0Jo1wlj2+y2qWAoRZQztQrPzG5wtc06ltCinKaMkaovKc2pRyp7oIyoV\nhErEVCAn9vs9Y+6JSXNoVySVaBer2X9GOVIR9P2Wul1wvu+xpuPjn/40BzduoZ3m2Rc/RtVUxHce\n8OwLH+I3f/Xr/MSnbvLmK7/Dzbsv8ODBt7j97A9ha8HBukFbSXV8zDOf/kne+fqXWa4XxGlg3bWE\nDALJPkS6w5tM00RVLSn7LbazsyLTtpQnJ6QwMI4Di3pOvV9rwzhOLIqmq44I5ZSTOPLm219jUT1D\n8IkYJbrUDLsdyk54uUNh2W335HLKxXTBWi9QFsomQp55+MpZmmUilBU5RtqmYZriXJCBaRwQosxe\n6qLm7PRtlgcLfLwgy4hMs++LSA7jFIKANjUxjnPEHx5TSRAfXDjFNa7xpxlPrZhroUA6hACjIyp6\nYh5x1vB/s/dmMbel+XnX7x3XtMdvPGOdquqq6m7b7cR2xwkRgcTECkJOyBVESDgi3KBcwBUi5o6b\nCHGDhJBAkBvCRUQgggARIQkOKDZOHMvtod3uqbq6qs45dYZv2NOa3pGLddzpeOiuNn36ROnvJy3p\n095rr7Wl793vfvf/ff7PU5g1/TggpcCPHhjJIWNNSRBTlmfOCiE90giUVrghgRjw6YphUMDkihhi\nQIaGsrCkGChMg5SOruupqoDImdXijIvLRySOyEkhlUJLQTKCEDKlVSAc47CnS46qrCjGTIgJIzI5\nQ0gte/chjZCU5bThOT++xe3X1pTVjKZesR899157E10oVk2BNYphGHn0fEc3POfu65/gR/9wxVc/\n/2UuNh9x+2TOL3zuIX/oj/4Ey9Ml9x+csd8dmN1/B/QdzK0z9Be/yuX1BUpkQjUjHHZ0uy1nD94i\nKs1sdkSKgUoVVJXBzSx22BOcZ+j7yRdcKXwIJJGZaUvwA5u+wyhHaSyb4Uvs2itwDcaClDU5dfTb\nR0itydkyuAMGS1EKIpcUpsCWEt8KggdyR8oDdW3wftpoRgRillirGcYDSo6EtGCMmbKyDO4CKWcY\nu0DIwK57is4KAaQ0fQF470k4hLBoaZHypmnohu9PXtlkPmm1BVJpfMws6xl92GNtQWVmCBlJYkNO\nEH3EyBVGT5O9zAKtLTJDTAEjSpQKuHFgHA2FPeDHSM6J6CUhjbTRUxQrfDwgVWDsJH14xok9Z12/\nxVAp9oc91lQIkREiYbRhkB6lzbQST5mu3eFDTyMEkmIKuxAD5JGkPXbVcFbdp7JP+Mqv/xJPP3zE\n/bc/wdXmORcX1/za53+J+XzFj/7Yj3Hn5ITF/JSybnj79h3arsMWlp/8s3+WX/yf/grbq5Gf/NM/\nzZAuePDgLuUnPoHe9iS1QjXn+Kv3ORjN9eUjaC+p56fY2QKODWk8sN/vCfMFhbUUtsBYTe4DY+zJ\nWWKLhsPmCToHEJObocgSGWApJKmQHLLCFbBvd8goUdEiRcPQP0X5Eec9QgiCj+ScUaohFz1RHSiK\nBYdeIoLB6IJ+vCZnhbYJYsCNLaYsEKkgiQ0xOIyeQ1K4w4iRFaYSVLYgREdt75Dlc0LydO6C2t4m\nJkuKEaUTwUPCvrQxK4Qogf8HmITv8Ddzzj/z0m54ww3fAd92MhdC3Af+KnDGFNH53+Sc/wshxBHw\nPwAPgK8D/0bOefPiNT8D/AUgAv9+zvnv/PbrWltOqgZtUMoSwogXCq3mCBSFbRidAzFQ2obSLrCV\nRWtDLzNKgHcO5zVWzrBa4NWO4D2X189AQsiZ6DPeRZLShHg5BSkYCURyVhi1IKUDVbFks9uTUaRo\nQB6IcTL+knIyq9qxJyUPWRJiREvD6LdUleWTb/4oh6uK4TpxefiI5BLLW29hxMiv/8qvcue1u/zw\nZ/8YX/3yuxzfvs3y+Jwvfu2rzOdbiArz7ld47f6bDIcD61v3mX/iRxiurymPSm7P7xB85gv/+PPo\nouL2/RXzlLDVnHVd8bUgMM0JV0OPdD0iJPqyIRM5u3MPKwTbzSVv/MBnCYsCuekIpsENI94HrJrk\ni0SQZUkpE30YISVquaILiZSvyFkT4hItK0JI+LTHjx3BKZLUWCGJLZw0DbbaIfXIub1FtzV4p5Fi\nwRCuyDhiMHQuUylBYwXG1FOotwRtDFloRudRtiJFQ06JkJ4TfUTZRPCZgStSWlIWS8gQ8jCV5F4S\nOedBCPEncs6dEEIDPyeE+BdfeLbccMMr5eOszH/X3EPg3wH+bs75PxNC/EfAXwL+khDiB4B/E/gB\n4C7w94QQ7+Sc/ykBsNE1WfYoHZCiptAVQz+ihcZai4tTCSQ5gZQzlLYoJZgXjsyenASHgySEgmjt\nFLQsDhwGz0g3lWxMSc6C0Qu6MWGDnNLfEbiQSVHw/PIRt8/eJsbMqjjDhRaXA1mG35LcIHJC5gIt\nR2yRSHKPkhbouXPrExy2l1xuHlLK1zArhdsLApFyvuLyo8f8sZ/4U/z9n/27fOU3/wr/1r/3F/k/\n/vb/ydc/eMgf+fHPcrQ+Yb8bWCzmfPj+B3gfCYcPmJ/dw9Qrnm9a5rMlF7sN83nNGMGNI6nbkIzg\n7O3P8Cml8NtrfPLIGHj88AOunz/h+PiYdncB9YJbr91HCI/dedxuj3d7unZHMT8nFxJx2E6ek7zd\nvQAAIABJREFU5TLTaIlCsY+SBsOzXKFGza694nhxgvCZZXGLznkQHp8Dw9DhMYg+UZY1s8UcoTK4\nTFlUSGp8nxjdBdaC0ooYPCFMKVNaSdCZupgjo6XvAloUXF0/JlC/sAIYSTli5YxKL3HhCqUkRtdE\np9Amkbz7bn02fldyzr/l5GUBBVy91BvecMPH5NtO5r9H7uFd4M8A//KL0/474P9mmtD/deCv5Zw9\n8HUhxFeBHwf+4TdfVyg9deslkFqirYbdZtqws5GUPCIWxNwjpcDayfcj6wIzOzDuHTFlhs4y2kxR\nCJQUSFnRKEXvRrKMpMA0QYYwrbpFYvAZN2YUFVLC1e6rlHKB1gtSXLDvR6Tp0XYkIYnREoJHCk1T\nzmlmx2R1xXL2Cb7y3q9wa7WmLE9JOeByz6I+Z6WPMcqQgufDhx/wyU99ioTi7/3vfxNtTvjxP/Qj\nFHWFCyO9a9k93E6hGwHGYeRLv/yr3H3wGtfbLV9jpFAe5I5bd17n3JbE0KOSQSzX3HrwKZ49fkj3\n0XsMuy3HpytkcCAz1xdbxDHce/0tRNkQ+x3F6Zr2/SuG/YZs95SuQpsSYabSlROCIA6IMCK9YJZW\nXIw79ocPWJQRjKQ0pwgsMT7Emkv2bUsUidLO2G08hW44OQuE4JBIZDJU6pyZibj8HsZoZnWF1Bql\n4otQkkBIG2pdMG9WkC1ZRvb7D7F1h9JQ2hlalwgpKNQRkBDSIZREOE9WL3cDVAghgV8GPgH8Vznn\nL7zUG95ww8fkOzKy+G25h+c556cvnnoKnL/4+w7w8Jte9pBp8v9tF4uQBSGBQCOlxdo1resZxj05\nZ2LypAAuOsZxQAqDRL/Qm0eUFvgUCWFyU8xkkBayxuoCrQRSTvFoOWcgkHJmGDyH/cgwDgQHfddB\nrtBak5PGj5ExOCBATnifiBGktpTaorXj/OjTfPDwS2hZklPA+xalLZEdu/4JIY+89967yGxAzVDF\njM3zh1TljH/7p/8CX/rSl/jw/Uf86q9+nsNhh7WG4D2PP3rEe1/9ClppLp8+pS5LYhCUzYx6vkap\nkovLZwTnCb4nuBEJNCfHLI5us5wtkEmzWCwJIVHYTFkVU85qIcjVnHa7wQ09ViuS84SxI7o9pIAP\nPdJqpFAU0lKLgjLXpChw3nI47CFP4csxRkQyCNLkKS8zIbQMXaTvPH0/acj7bkRIhdElhZ6hVUPK\nibK0iBeB3inlKbuVDmOgrmbMZ3MKOaOQK2QyyJwJsUcpi1KgtCclT06Brj/QtdekOH4nQ/o7Juec\ncs5/ELgH/EtCiD/+28/ZbrffOIbhe5NJesM/n3jv6bruG8e34mNvgL4osfwNptzD/RS6MpFzzmIy\nJP+9+B3P/e2/8Y/RWhOi44f+wAPe+qEHHM3O+Wizx+VrVJoRosOFESk3k2OfnlPqhJEGp93UJGMk\n/TBi/RaEpLKa0Wmk0JOcMY/kDELGqVsQQQwjQz+QgqEqAtZrBr/B6hJTCGZVw8ZJYk5oAzEkBjcy\n+kQzW1JYy777MnVxzKE/sO87BjGwSmvq4pST0/tcPnrCanWEP0QWR8e0+2vWZ69xdOtN/tv/+r/k\nzU9/kqHfU1iFIHFx+YRf+9zneeetTzObNVTHZwwu8oXPf4HXX7/LmAqOTwrKeUE9L2n7jkU949CN\nHK4vif2eduy4/enP8vXP/X0unz2hLA0xGVar21T33sY/fRcdIuP2wGG3JfueujAIIZG6RAoQUuMO\nW2wKpPFAlJneO2RskSgO+xYhNPPZKVLUdK0gGoXSieQCCINWJ8yK27huzzg+wQ9PmRcrpLAooYgh\noa3EVgW596TcoZQlpUnRNPgdi/Ub6FTRl9DtHdFP/0stAylfgTgmxsw4JD744nO+9htXeD/A98ho\nK+e8FUL8LeCzTL9Kv8FyufyevIcb/vnHGIMx/yRw5VstDj7WZP5NuYf//TflHj4VQtzKOT8RQtwG\nnr14/BFw/5tefu/FY/8UP/LHTyhtwW645uxIoVWJKWtkqhnHPVqBD4kYIj4dGA3oUWIKhbKT2kTJ\nRF1ZxjgSc6DUkiwzIQaE1ggRyEmSsybFgegTxihgKtskn0FkNts9IQ0YUVDpYwprsGkGaYc2AaJC\nkklhJGSHSxklMyiHlHBwGypW9OaSpV6xufqI+WLN5vkeIypySCzXpwQX2e+2/Kmf+ld5/Ojp1FFq\nDNfXVyilefONexQ2cXz/NqWeGmrGYc0HH77P2z/0Q8SYaduW9WpFURQILSnLil5KHn30NXSCz//8\nFzm6dcKDz/wBDo8/4M7rn2R97x3i/orU3Gb30Zfo2y1aakKevmO990jrKbUlC6gXDd4bblcWediy\np+DarNhpGBMM454YHTlpDu0W6i2mEAiZII1UtWTXXrPUkpQzQnfs+w+p7F2G0CKkIOcEMlKWNVIp\ncnIo6wheYK1ms33E0eIeKI9UAu8Eh25AhzQ1iZXPiAjwd3njU2fcecvS9x3Rl/y/f+vxxxnW3zFC\niBMg5Jw3QogK+EngP3kpN7vhhu+Qb7uM+Ra5h/8r8Odf/P3ngf/lmx7/c0IIK4R4A3gb+MXfcWOR\nSXlkXs/ZtdeUxQJbNlhT4X0gREdKAYMhB0Hbt/Rji3Oe4ANudCidKMoMMuBSAikJYcT5EYRESkWl\nT1iVpyyLezR6jnDT495lBBYtZ8zqOW07cLW/QFqPKkZyTqSoUaLCWk2IPaEV9NuA77e4cUSryV4g\n5ohSls49x42ZUpa0mwvu37pPYdQUjSc0RydHhJj4+X/wCwz9gPM9T589QWtN3/domYlppCklSmdW\ns5Lz20veeft1RrdhtZpTVxXDMBBDoNvvMU1JSgMnJ69ztFxy79YxYuyI3vH2v/AnObn3AHl2glo/\nYNQK25zRXT0l9heUtpgyV41Ba0Xf98SU8D4CGtcPNFlxbCpKsUBZQ1ll+mFLjJG222ELMzVfSSiN\nQqiWEK9QNpGZJIsgMXqkHR8x5gtC7IkxAwKR5qh0hJKzyTRLLdjvRoah42LzPiHvyThAEqMi9BI3\nQnvwxCCJIdJ1B2II+HBgdPvf72fh43Ab+FkhxK8wlRr/t5zz//Uyb3jDDR+Xj7My/63cw18TQnzu\nxWM/A/ynwF8XQvy7vJAmAuScvyCE+OvAF4AA/MWc8+8osyit0CqRsiTFGVkKRNYoKfCDJxOIWaG1\nwDvFOIwIu0UcMnUaCM4hpUQYS+oGfB+I0pCJU61YSCp9wjBoNJGiGEk4xqRQuZic+NIMLSyFUlBU\n9F1kv99SzgvK1uIjpJCRClLy3L/zGZx7ipVrfLpE5B4lA9YYUg6889of5vqjxwxSUKpznj57jMyW\nYtZQ1w3ee954/RPcuXOH3W7HxeVHCCHphwM5C1IeSNEhRKAwmqOzBePYs17fxczmzJolTdMwX66w\nViK1QY4Hjt94i+7h18EesX38LovidcqmRCRN9dqnyD4RUo9NmaI5Zn1yj93VQ3wYsEWBVBpjLGVR\nAoJ2mBqhlLGoJBiGkXFMLGbnDCmw2z5lHGeM7oAuHQKmPRAySieyPND2gr4FoQ9IpnKVG7a0+0zZ\nSBCRGDusytOvr5hJckSlGTkYnHeMzmELEKJkGB1CVpP9cQikBLtuZF5lQh5Ad6QUgJeXAZpz/nXg\nR1/aDW644f8HH0fN8q1yD//k7/Gavwz85W91XR8FRtWkmJFJoKUlpTi170dJ1BmtSmJMhCwwtmEc\nWwQjIgnAMpmeSzKJofcUyuBCi1BAVohU0DQzunhAG0HEIgA/JKwVhAGMqDlaHLFvN+zDgXEYqSrJ\nvKl5cjlS6QVKeRaLFc8uPmBZLzCqAhGQKVJlQZPfQuSKh1//ImVR0uUDJ0efYnj/Q7rkyFpyOAy0\n3TVam2mjNSeEkBhjGEdHVTbILAk+EFNgcJHt7hoh4OTsLuvzO+jCUhaWopwTskMqCcUcFSJls0QI\nkHc/jY6BZn0LkRLh+TPM3fvIrUfuWt794j9iVlrG0SGlQFlNTJkYE0pp+mEAIckhYLWh8x6k4qie\nE1VHDImqathuL0gkQufJhSRZTRIj5IZhDMRhQ0wj67VB6QR6T0wZHzPKl6QskHKHVDsIkbEfCTpi\n5JY8CsgWpStCmhRFQhqULEB6cpRkYLPtiXGPUhGpPMErkn9lfXA33PBKeWUjX8lE30WUUuQM3a4l\n5kmjrNoZznV4HFYarKhRWiO1YQyB4VAgVaRsJDFGfMhsDx11UyBlJsaBoYdlWUDQHNqRohjQZURo\nPzkwaoPQipQsVjZY5TA60nYtzRysLCgLi0grcr4mxB60ROsKhMSqY8a4Y2HvgoDbpz/A5bMvUY0C\nPZzz5GsfoBDMmob5cokLidt3jhFC4pzj8vISRCanhLISKacvtz7uud5cYZSmrCqkzOjCIKXCaoOU\nGiEFRhVkkUlRI2OPPX2APrTohSW1W0bXUTSnqNUZ8cmXSboits+4fes2Tz74GmVZkMgIIRBC4Jyb\n4uGUREpJlBIdBad1QYh7LtOeGDw+HljOz/D9no+ef4QyHTpopMxIHRFY3KBI2SGEZr93LEQF4oDW\nK/o2kGNFNVMIIn1/jRaGJAJWNuQ8kJMneMOsLhAoYnRY62jHjIoGqQpyigTv6YeBsupQUTG0Nbx6\nO/MbbnglvLLJfF2dMaiETJHdMHC5efxC7uYRuaGwEKMDDbW05GRRRhNSC9EgxOSqKKRBK41AobTC\nqojpIA6JcQwsmorlcsU4XGK1Ycg9wY+IrAhxJKXEZrOlampgD1Gw3e45PXqdxTwhXElQk1qjLmvQ\nkeAFpalQSDRLFtWazaPHzOwRLgXOTtd0Tzt8ijjnuXp+QTmfsd8HmmbGbrcDwOjJmyalhNISoS2L\nukDlTFNVxBxoFie4lOnHESEF4zgihMCUhjA60A1KJnSM0MzI4zNkveDw/ldAlVi3J519EvneP6Kf\nrdj+xs8hRMI5hy4LfICZMQgFmclvxiqDVImcJV0/+bMsy5q4eQgiYeya9cyw2e65OrQUMaOUxccR\nIcYpPEQpUgKRZkSnyXkkxoTVmr4PZJlRUiNFRKgSLRR10dB2F8TUUdfltDkqMzkOxNyTc2YYph6F\nLJi+2ERAyEAaC3IoyPEmNu6G709e2cg3VCzLM1IWFMbw9OJ9Nt1DQkiUZsWqvMfZ8nWshnLmsFag\npEHJRJYJIRtELlFCgEjUsxqix5hIXRX4EBhdACL1TFLYAiGmlXxInpgg+8Dl1XNikBy6jtJamvkx\n47BA65Kz4zcABzljjMKWBmMkQllyBmuOqMoTiuqYpDNu7DFxhFaR8tRyLjKEGOm7gRAmvXtKCaUU\nQghyhqqqMMZQlhVVuaRZHVMuT1ge32W2OmN08YWroCDnTNd3DJ0jDgPD0JNGBykRxp7+omd78ZjZ\nfE6VA5mEvPoqYz/CsGcXIkenayLTrwIkU5KRiyhtKZvZpJSRGh8CxhS4FNj2e2RODENPzhpyRCLJ\n0aCocGNNiIaYFAiPzJ5aL4ljxeWzRGxL4pAolCClSPCJvsuEEDFGIk0gpQHve0LaYapIU88gKVKa\nzLWasiZ4zaHPdEPG6oqyNCjRsFq8QVWeYMxNOMUN35+8ssk8pIiSlqJcIJKgqY/YH64RoqMplgQ/\nOSMqIcniElVcMboDKQfQnkRk9AkpFLNyhpaKUhlEFtPmphQ4v32hYQZVSqQQGLmgtDWFVRSFJocB\nox1aOIy2lHbN8fIB1hxTmxOivCbEHT612EqSUQglp6SklJB6xtj1yOwQfiCOGe9alFSkMND3B2Ca\nwK+urhiG4Ru18pTSJJFMmcJWlGWFtVOTUMyS7aFlu93SdR3jOLLZbBiGnvZwIAMuQh4O9LsDXTuw\nv7okSYlqe8TiHN91sGhI0mKrms3D9zk9u8XF80uKogAh0FIiyCgpCCGSfCaGgDGG+foIIRUxw3p1\nRIoZPzoOhwt2/SWyGDhaawor0Ri0rJEyIZPCyhnJWdxQk/yC611mGBVJRAqrUUpSFACBffchPl1z\nvb+iHQekiRSFBDESYph+rWlFWUiq0hOiJ0dL9Bo/ZqrilKwMy9kZ8+ZG433D9yevMNA5YSXEkFGi\npC5KhmGJj5FDd01dNoQph4AQHda0KBswo8XTE7xGCUuIisJOdd6+T5gYGF0CBS62dH5DZRoQgbKa\nkYaRM2NpD442RSyGPF5DWeOd5s6916iKkn13jRINpmzYXF1ibUIXkWE/MjqHEoFcW/r+mtLUKCvw\nbqTKFnKe4tDCSBghScO8qBik5Orqapoo57OpaSoENpsN8/lyKp9YCw6kkDjnUWqkruopQzAm5Kwm\nJdhuN9P7fPYRq7O7RNdhNDgMhYRn736e88UctetxfUuSitW8pB8dZWVJCYIPSKlJOVMYjdEKJTPa\nGJIAkTNl02DjyOHxR+QccJ3j8dW7WGOADmU9Si2x0jBmBbmikJbsSlwvGcf+hV7esVppCmvRxiKV\nQctIyokhXNN76A8GkRJN9U+Sg9qDYnSBWVUQRKQuJVJa2p3CSE30AyJXVHZFAAIv15vl4/Ddyo68\nvLz8rlwHmCIDv0tUVfVdu9Z3Ky/1t3j27Nm3P+ljcnX13bPd8f7l++y/ssm8d8+oytUUnlwmcg6c\nr25xfXhK7wLO75AWmrokp4IQHGVZ4bJjHCNZbVDpCC2rydcjS4bekVG4YSQEgy0io+9QWaKUIBIo\nyxqEJ4VEjBnhBVEMCBSZxKyuqYoGFxwCQRFPWViBH97jEPeQNIfWI8UAtsB1T1iUK8qypFCO+eyc\nHC2KCDIzq2dkW0DMSClRSpFSImeB95Hlcon3nqIwVFU9+dCYqWGoLiuWq9W0+WnsFLKBQAootEGk\nwHy+YPADxvccNldk7wlaMjx5l8fjGefDJebeG/jYs/OZcb9DGzkpX6Sc3AjDNNhihLIsMVkhqwKQ\n2LihEBJrS8YAgxRUakY7XFHbjNaGqtAoWeJdh5AVfkzoUCKyIOWOEBxKKFKIFHVJlJGisJA0WRVc\n7yRJ9Hg3ZcEiAn5oGYYdh93UF7DZRNbrmihH6jqRhgofHIYjZNbI7InCk+RNOMUN35+8sjLLYXzG\nOOzRsmG3vyCFgJGKUlpiHGiHDSEMSCzkFZIlQjmUDS9WkRKjBVooclIIKqIQ9H0meEdyCZUbok+0\n7YFu3NENLSkFRFqDtiTtUAXsx4RLI7aS7LpLQgBlDNebZ6hUoZLh4Bb0PXTDQNt1jC7g0xYpB4a+\nw2JYlifEMEL0jH2HmvrjsVYjlGa1WFKW9sUE7qiqAudGZrM56/UapRTWWubzBdYWlGVJ0zTUdUVV\nF9RVRVkWrFZLpABNmgI3hIQYsELQP/sS3gea0/vUp8cENzJ+7TdwlxcUJnF8dkyKAiE0+UUNXghB\nCB5lLdLUZKOIo2PY7RjGQFPWfOrsDT5z+oOs5BrnI5U9xsgGKzUpRVLwSCwhOqQY8akj5oEYw+SZ\nIxI5Z3rXEn1Pzh2FVZAMUBKGGikMs6rCao2LnkN7jQsdPjpUnqPyOU31AJEMzSy/UL5IgpOM447E\nFiHCqxrSN9zwSnllk/k4Kno3TE03RcXBP2fXXWLLBbWdUYiSIhlkNhjd4J1g6AJJHJjNFIWp6HxL\n58ULP5CK3gl8zoxjnjTHTjPT53RuoO1Htt2OlAMxJlKqSAR8bolqai8Pfs+jx+/RjR3DuOfJ5W/S\n5z2BPYUq0OqUnAuqas5idTJtAspIbSWFMIwxgBBYI9HCIIUE5fDBo01BcJGmaV6EOEx182EYODo6\npmkajLEsFguWyxVVVVLXNU3TsFqtSSlR1yVVWU1yRiUJfiSnkfawYf/8Qw6bJ1SzI1qXWaws/cMv\ncbh4DBL8cI01K9yw4/btuxijKasaLyGQMbokJ8847nDDgMiZommo50sgc/A9lcicL1fMixmFKWjq\nM+ryDm03MroRckFOCp9HhtjTdR1aSyKJbAZ6d8AnR+96nN8R04CSsLT3EWFFJStqs6As5wxDIsSA\ni5Ex9tRVRX8AFVeEkMliT1Fpcta0B08/DlMqVbpZmd/w/ckrm8xVsad1zxlDj1GWMQ54RgbvULmh\neWHMFNpIHAUql/ggGHuB0Zp5vWa9vEM/PEVLS9PMUGJO8BCTQaaIlmekJCjNMX0/pQ5tdlf4MDJ0\nU0BzlOGFkgJk1hRa4n2P8z3b/j2e7n6ZNj2bYtB0whaR5dpitGZWNQgGlBix2SKSIoSRvmvRlWS1\nPqYsaqqyoa4MKU97BKenpy/KLZnTk3O0NpRFTVlWzOfLb9Q3F8vFi/r6nPPzWxhb4GPADQO+7ygK\ni1aSHHo2F4/oN89xQ0cWOy6fXyK1RC5WbLdbZvMVfXdFTop9e6CsGqqqokAiU8LHEWKitnN0WaFM\nTc4SKTIzW/HayTGiUGy7A4dwjdSS9foep+u3+OF3fopV/YPkqKjNbYYw9QREIRl6wXDIGK1JMnHo\nPe3YMfiWbfsEFx05VKg0J0tDQuHGxNinySVTJJQSDGNHqZe4PkI4meIGbUYg6Hs4bEeyAyVeXgfo\nDTf8s8wrq5lnRkJoaccrpJrc+vbdASUENs/RcobMEXe4IAwF8/UMkRy2qPFOcLw84nx9i97d52r3\nLhlHXdZcXO9Qac6isIzdNaZYYJSG1BDdji73RPGYQzfifUKbnqqYMatWCD9iTCDHHiMrUk6I3EMS\nJDWnEGuETrh0TcUSIzJd6PB2wd7v0C5wtLpNERua9ZJu3yL0gC0X9D5QlCXHx6fs9xuEUJyfn2N0\nMXnRBKjrCq0nL5emaUCISUoZJsuA0hqCj+zGHjcc2F/uifsrTFNTmcyYEjkllmXF5uIZsh9Yv30L\nu0k8fPgh50crDruIUXpS02SPKaYUH2KcJIOEacMYIERyCpRVxdX1BQs7Z1lVqEMmBkfXD9y5/w4P\n7n6aW0fP+fDZEU+vv8zobtP555R1QRg0KVn6Q089t1RVjaXA9zt6OWnPiQ5rNGUx6d5DFORcQ9Yo\nlXGuY8tTvEsIPMiI1gKpAqaIBDI+wHa/4fTo9Zc6boUQCvgl4GHO+U+/1JvdcMN3wCtbmUspGeKO\nXf+cIbRkCVFkrg9PEVlydnSfVXPOzgMUiFBx+/gtrD4mh4YQE01dsqxOqMolhTKUBVhtmBU1RsL2\n4n367oBQkvOjO4hsEUrRDVcIlVGyIqcarStyBpQmpoGYO7QSvHn+rxBCJkbNOB6IyZHZItgi2ZHT\ngIuO1reMwWNkxW57YPQju+sNUkJVlSQSVVVxdLSmbVv6vufBa69T2MlD/ezsDCkzy+W02SmEoGlm\nCCG/0fo/jiPBeYQAlTPdboPbPsMdrtg9fUS/n3TgspC0uz3zWc2tB/cZdwd6N7CY1XRdO72fHEgC\n2sOBoe8JIaCUQSoLCIauxfmBnNJkpxAipa04PjrBKkN0jhh7nl9+hFaak9NjfvwP/hgnxyuEcNiy\nQss5xhiqQjKvS2o9ZXWWuWaml6zMOTkavEsoqTlaLybjLy/oekhpgUgFRhXEOJLoyHJDCIHRO1JO\nZKboOm0SznuUiuy7r77sofsfMPkOfSvL5xtu+J7zCnXmCp8cXXjKvn9CSiO6iEjrqErN2eltzm+/\nRlPPGGKLyBUiW+bNKdrWfPjofS62Fy9kiglkj9QH5tWc+bxCK4cQkcvr50gKZmbN3aN3kHGJUTV1\nXVAXlugs2+sW7xJQkUXmcvtl6rLi3q03acwD3BgZxhGpYNmcU4gFOb8IZ7CSy27H9fgR+9hO8WdC\nkFLi+fOP2G0vUVpRlPZFt+ckE8tZ4tzUEWqtJeWIkhol7YsgDUFV1YgM+90U1tH3Pe2hRYpAe/mM\nw9UTri+fUBWGan6KqRt812NFRMXM5vo5dVHQ7w+MXUs/uBe5q5oQM9Mic1KyuOBRymJsQ1E12KpB\nKEsQGo9EyZKtcwxa0HvPZnNJzh3vfvgP2e4umDUNb7/5gygaYtpTlwadNVYbqsrQlAu0kFhqGBQ2\nzyjDAuEsZVmhSzBFQYqG0AUqPWNZ3mJdvc7p8g209qTUkeWWKA7EKMhoQkx470hEhFFYG1/amBVC\n3AP+NeCvAOLbnH7DDd9TXt3KPBVIMgSP8y0xDUBCG48yEWs1R+sjbNNQNoE+bthteiQFOiuSMHzh\nvZ/j0bNfRxAZ3BZjBbdunzOrTvAIhJLEsaWQhpQis3pJqWsK2ZCBnAM5KMYh411i9IkUM/vhCdpK\nKltze/1Zcqjou5HoMwRLyhVSzEEI5vOau6enrOx95nLNfL5CSoOxllt3H9AsTol5UtQAOOe4fese\nMXqGoSfGSEqJ+WzJ84tnDEOPcw7vPdZO1rjTRN4xDCPGZLa7LfP1GmMNq6NbdKOnWi0RerLHvXr+\nhN3+CbOyZL+5Yr1c4NxkKVyXc4SySD2FUsQQURKqwk5NVDmSsoAsUVpTVQvK1THVcsbx8hbnJ29y\ntpiTGMgy8OjZe3zhvV/kqx/+Ko2cM1sek8JApQxVXaFziUwag6Exc642T+h9h46GW8V9TC7xIVGX\nS1IUeN+Tg6bb7PBuwKiKs6O3OFt8CmUUQQS0hnpWMgyB4GEcPcpmlEoI81KH9H8O/IfcOMDc8M8g\nr2wyt2JBoWqkfuEB0gZi8IgXpZbej8SUaeolUsOuvWR/uMYNnhQhB/BxRxi3HPpniNhwfvo282aF\n1gKsRZeCwiouth/h04gLPUZrNAoCGKMxKiJyouv3CD19Rl10XGyeopSFLDlZPSDmERlhdAdkhhw1\nRi8orGRW1+ScsErT9h1d1yKN5b33v0ZZ1hRlgbGGrt9jjcaamrbbU1UVbdsSQkBKRYqJmDxt22KM\nYbPZ0HUdUkq8c+TkuXj2mPKFCsb5ESkU0Xu6riUliZCK2XJJDtB1HSpB13fMFgs6NyC0JCTIMeBj\nImVB8CP7/Z7t9SXRj0iREClBUZCtJPQdYezQEpRUoDKZjPcOhOALv/k5ujFCCtw/fgMmjwlCAAAg\nAElEQVQfBM18gS4MQQ7E7BAqIynRlWbMgSwypSk5qtfMyjVGlBhq/JBJXhHdyL67JsjJXXI5f5PK\nHqPEFFEXQyAGOUXO5Txp08MUmPEyEEL8FPAs5/w5vs2qPMb4jSOlm3n/ht8/KSVCCN84vhWvbDIv\nOKNWJ/ihwLtEcILdxpG84tn+Me144PHFh1S1IeWBftgypAPX2w1aLhiHKwxryNOm4P3br/Pm3c9w\n5/wBBz/ikkcvC5p1pPVb9uMFl/sniBwxKWNSRCpB1j3WTi6MOW0IeaQ0xyQ5MMaEFJJVfU6t7vH0\n4hH73QV99xxyhDinLs5IemAQB3LyxOgp64qL5x+hlODLX/48w34PUnJoO9548w3KqkApxXK5JMaI\ntZayLNhsNpTl9Jx80S2qlJrCqGPEDyN9u8fvr7BWU9gGqSWg2D7/CNduqNcn+P2W1eoIVKZqLO04\n0MznrFfHpBgxRpNzRiqJEBkpBCm+sBbICbIgEXHbDbnr0RK67RXD1RWLouD89C5alTjfMw4j7fCU\nrz/+In1IvPPgR/jkG3+C0tzmnTf/CMdHdymakjZcYEtJlrAZ9qCgsIb1fMV6vkC8kJLmYOj2HiVr\nDl3P+4/eZUyaWXmbu+sfZWHuIbLm0Pb0w4YYHdqAVAIhFDnalzVk/yjwZ4QQ7wF/DfgJIcRf/d1O\nVEp945Dyxvjrht8/Uk77Zr91fMtzv0fv6Xcg8oKU5qRYEcaSsZO4Fq6fJ/re8+HTX2fTfkjXbXHR\nM6bI9vAMGQ1hsDTFLSpdEpJmuTilqdYcr844Xt5GSIWWDTJbslkgdeIwXLFpL2nbPZ6A0DVlOePs\n+Daq9BgrCB6kKClNxdht2R0uqGcNOWvurD+DMceT85/UjONzNEfUxetkVaN1phdTLmmKmbFrOT+/\nzdHREUJknj95zOn6lJyh7w809ZzNZoMQAmtL2nZ44XVuqKqpq1VrTYoRP47sN9dT4k7MjDkx7K5B\nCEJyiHSgmS3xoedw/ZQoLd4HYsjknDg5OqE9DJRljU+w71qCd4RxAATtGDBGE7wnuoG+3xN9RGsL\npQapWR/dxY8d7fU1plyzWCwRXpBTIPrIP/j5/5nnmyeENPLpN36Yo9WbNPUxb73+Y5yf3GYxP6IN\nHdrMaMqCp/0VwxjwOJI/4NNIZjIgWx8dc7L+AY7nn2Z/2PPkySNc1yJINMUxMpZEp8hJInUg4xDC\nI03mJS3MyTn/xznn+znnN4A/B/xszvmnX87dbrjhO+eVTeZ9H6jKGcvZEa4zyGRxo2bop9b+Rxfv\ncnn4kI37Iv24JYZEJiLNVI4pzRpBxdHqwbRSdyN953j67BKjJcfLOcezY1b1kpP1Gq0zMTo8PV3o\n6XNHGjNGNTQzizYFMUhmsmFVntEeLgjjNSLvQUSsaThdf5IQSpANQp5OP+99Q6PvUpQ1STl2fosQ\nnrOzW1xf7TBqxocffB1b2Bf68UzXdTx/foG1lqIoKcuSEDxVXXE4HF4oWDJVU7PZbtEvfFP80KJk\nZOhbYkr0Y0tOClOv6MeBLAKlEizPTqCyLFZr3BAZhumLIqeEVpr18Qn9GLne9+hy+lLLGWxdImyF\nEAqpFFIr0uiJYSTGQGUthbBUpuTurXOOV0csdQVKYIj87C/8j2z2GxbVkgf3HuDbiCoM9fwUW2qs\nVfg0lVj+P/berMeWLD3Pe9YQc+whd04nzzlV1dVd1d2kSYomRdm0RIm0LAK+sOwbD/CNLnznP2D5\nD8iA/4Bh+IoQDAGCIdoCfGGRht2WmzIpm02y1VPNdaY8Oe0p5jX6Yh8SbbPJ7ha7dEh0PkAiA7Ej\nYgGZa3+x4ovve98xOJ75Lb0bGdxIP3b0pkfmCWen77I4WnF68phEL7B2y7r7mG1/hVeQZUdYZ0my\nHCWzP1r9BgzR/Sub0vfVLPf8ueK1BfNmu2FRHHG8qFnU6SFdEqCsSiQpeZETfECIQKIUZaFZLo55\nevcd9vYGGydmVY0Mkln9gCeXT/nN3/p1Pn7yB4TY4BOHSgSJTtBCUZcZVTlHqBwTPMZ2DM4zToZM\nVBjXIEJKP/UgAlJFxvGavrsk9Fu0DAQjWS6+SFF8gfniEVd3z7m6eULfOZAFg5goypooEjyCo6MV\nL68+wLsGHQUhHnLMiMB68xKlFMfHJwA4ZwivcqzTNGGNIREK4QNt16ATzeXVC8zYI7zH2wktC+Bw\nc6vnC/Jiho+CYA11XjBNE3mZE5RgMB37tsM4R79vKaqSqihYbw/CX6vTc+rlQ4oqoSgrRJIyDSMx\ngG17unFi01t2zQ3Nbk9dzTm7WKLKGViBo+Du5RW/9bX/jX3T4yaHLiRXm0uKWcby9A2klCRxROcD\nMh3pxw19GDDOYLqBfbsjyTXVckFRzdn3a+q0Zlas2LV79v01LvTEYHBGo/yco/IhuZzjncQNKWH6\n7Kd0jPErMca//ZkPdM89PwSvrWlIRHGoFS9OWR3vGKeRFEmVZaR5RhgNOp2RqTmNNWilcM5S1hVW\n3DHLF1g7IZXHO8vd7pZd+4KiUDw4epu+nUB6YtCHR3AhqHMN6mBZNowtWboHmxPjhHY5/djSj46J\nPTqL3G4+QMkZaVKy3n1CsDWZyiiygkLVLGrPpy8+YDm/YFbknM6OwWbYqWe92XNUH7NaLhAyBQH7\n7Ybt7R1BK/IsJ4RA1/as12tWqyVpluK9PbT5W0dnerIso2l2yDyh0AX77R3CTZSZRMoEaw+iVVpG\nlExJT1YE29L3PUIryDQn84dcPf+Ui7MH3G02LOoZu2YHwPHxMUk83NWd61Ayw8mArkoylRKmgegD\nU7PlqK5IhCJxEZDEPKAyQ+wlbrR4H/l/fv+rLGcnPFid0fsOaQzjDqpsxdHynIYNk9kz+IBKAplW\niGlARUeZKYJwKOkBQ4iW7f6W+VFCnip8dPg4YK1DxCVhkBgdQUB0GWMniPqzK028554/z7w+oa3m\nUy5fbPBeoXTN0XJBXZRk2QwhDlZhxhlCzICK29sBYxwET51qrBuZYsfN7gVtt4OQMHYGXOT8+A3+\nxi/8p1zfTdysd3z64pbru0uyNOF4seRoccZ8CfMqkBc90zBhfMpgJ3rbcbNf0w8TIRE05pbWtHRN\nw93mBfv9jvXtHW2/Zp7UpPqI6+srjA/EWBKUpxnuyNICIaGcrajqOev1M+zQsVguyZWmns0I4bDq\nVkqgtCBJDp2f2+2aptvjnKOY1fjJsds1ZLnE+YAWka7rSbMEKROm0eC9R+clxlmi0CRFztiNTP1A\ns2u5ePwuzTCxWC5p+gbcxGxeMnQdR0dHBKUOphtKo5MCbMRlCfLRQ9TqhHRW8/T2im7siCZhnCaU\nFGRlgrOOwUXadkSh+T9++3/ho8v3sOOeenlMXq4QISf1RyiT46YZfR8YjMd4S5ACEwMqVRjTMdoN\nXkbm5YrT1RHjtGNwLT4qhmkCJSlUwjhZumFCB42fAlMvaLf32Y97fjx5bcF8OZ8jYosdJYv6TY6q\nYx6dnFLIyKp6RHAZkxmx1iCUwobAvm+xYcCMB02Otu242nxEPzbU5QnEAjM5ThZfwkwjP/H458jz\nGdFJoq+ZjKd41R1al4KssszmnrKscdYc1BeVpe9HgsvxzpFmMDqDC45m13D38pLnN+9zc/chOY6F\nTOiHgX17Q5KXJNkSKzPQEnTGy6sn7JuW46PHtO2Wq5fPXpUbHQSh0lSTpinDMBBC+CNPTji093dt\ne6jA0XB9c8M09uw2a2JUmMljraWsC6TMAUGR5wQi+13H6vSMtKhxpmfYXpPVM4a+Y7FcYaPixcuX\naKG4Xd8gpEblKaosIJFEJRH7hrjdo7DUWc67bz1kVZ6S5Ss0pyg1YzZbsDjK2O1u6DvDsBuo0pSP\nnn+D0U0EZ6nzHEIgBkUkw1tNDOWhQUpbRjfRu5EgPFpG+nHNvn+OkAPHq5w8tyRKAZYQxEFoTEpk\n4okYopQQC4iWNFOva0rfc89r5bUF8/myRhYWT8c4OOpqgQQWs3MyPQeRs2tb2vGORa05PSkJwbDf\nDuxaw3bXgZCMdmC3vyHBkec5u13k8urbNP0W53rKDOpFBlIiDDS7Dm9HcIoYPTLRPHr0eZROEHLC\njAdl83FscYODqEjTCRcldaKZZyAj7NuOm6tPCK4n4CFabtefIqSmPlpiR8ftzXNOTx7ivCFGQ1kV\nVNWhq3NWzymrHOct1jqG4WALF0IgSTLGccQ7jzc9Wnpc21JkOYnQJGWNkgprJ4oyw3mJcQYfHZMZ\nETJlvlxyd3fHNHbMV8e4NMWPDcY6dtsdq+UR9WLJECLeetxk8SEQzQSyIEqNzBJie4e3jnR+jJgC\n75wseTCboUWOcnPOTt/m4uKCxWmFUIqTk3OO6hPmecnLqytkSJkGhw4eFwa8CKRZTpVUlJkmkxWz\nuaaelcToDl6ipuPm7mM6d4MuBIvlA+bLJVW+QCEJwaC1RWtHmhdIIUAY0pmmWLx+c4p77nkdvLac\nedAJJ8fHbHZrJqeQ81NGEUiDJwP6bsILxTgZlsuRNy9KtoWk38EwdeQk4AR1fnpozEkjRVEwtpKv\n/s5XODk9Ic0DuYbjxcFrcjf2iFbj3EheOYryiNn8DfI058uf/3n+72/+7wgSkgSS1CPkHJ1EYhhB\nGOqy4qhSLDhiDBnNtGN0gUwFlPS83H1KNTuljBXTdMvF6UP6fiC4ESHmXD5/gk7uOLt4m8Ia1us1\nx6sTpJQYHwjhUCIYfCDPS5x1ZDKhGbdgR2i3xCTHdD3l6SkhRIwxZLmmyA5130pqlFa0TU9RzhB4\nttstVZHhjSN4j3EjKnqkSJjVOfiR6AM6nRFxxOgRWkMxA5WjgiVYS7o8ZXf7gjJPEX1kMX+LZb5E\nn0fW7Q0b3fPw/HNkWYUxI/10ySfPfp8qnyGFQ0pPXiQY16OTQGTE0VHlFd54Ji9QSYYPllzD1K0R\nZUKqZkghKbMVRIvxHVFZZtLShR2jCcyyCqSnrH507jz33PMXidcWzMuyJApHMw1MnWVRLbAY3OTp\nGYgiJbrI1DmsbcnzOUdlTRUVPY7JWcZ+pC7PyKQkTQWPT89ZZoKPPvqQp09uWZ1Jjo9T5lVK9ClG\nS+6aFi0EBkWWZ1xdP+PR2SNibJiVC5x0qKwhLwTBHRzmbQgIBU6MCErm1YqT5Iypu+Hj5x9Qleog\ngKUmds0T8uqLFCczNs0e8OybAeg5OzshSyom05Nnj5nP5kgpKYoCszN4J5B5Ql2VCH9YZacJuLEl\n15Ht0LEqSkQ1R7iA1uJQi+48MRUkukQpQdM05EVKkS/YbjcUuaZvenSao/ICaSSb3RaUIEZNVc+R\nWhCFAJUjgiBIDTGCMvhmT4yeAsHRyUOePn+ODYLJBfCKMj/ltK7I3okIPyJiBVgQnslMdH2HcwN1\nnWOMZxpHgoBgHKnrSMaEIDR5ek5eBpyfwI5EobnZr5n5llX2JsYPqNRRxhMMd4zGE6ykTAs0Oc55\nJvf6V+be/2hewn6/jr8fht/93d/9kV3ra1/72o/sWj9q/jBF+aPgoJH0F4c/Nc0ihMiFEL8thPg9\nIcQ3hRD/1av9KyHEbwgh3hNC/BMhxPK7zvkvhRDvCyG+LYT41T/p2jmWRbpkkZ+TqYgWEm0CVZaT\nak2dJCREtBeYJkPFJYKEUsMs08wqDQqULHn7jX+LYBXzRc0bjx7xM1/+BdApfSuxRqFVitYpgoSi\n1AQtMK7m5m5kvd/z5PLrrDfPmKeKsi6Y1Uck+qArbk1gGix53dGFls14h/WGKgsUKkEFRaZyJBKp\ncrr2hilOzFYrjO2w3iC1R0hD2+y5ur6hKmd47xBC8OzZM/q+J8ZIlhWM03RoHKpnByu6PCdJEvbN\nlsXqjGGwzGc1xluUTCiLGQDjOEKMtN1AmmdkacXV1cGLsp8ceVURMHRty3a7xRpLu75jGvZ03ZrZ\n8QVCa/w4wbIikiJ0CkWOms+YvORyf8XYtCyqORGFnw4t62MfWZ2+TZYKvFzjxXO8vKaoAlnZkRcZ\n/eTZtZZ2GhkGh3cJUOCMABKM9QcvUr1gPjtF6Tn9CF0fMK6n6y4Zp5dIkZPqc2R4g5PFuyRJwTQp\npFTEKBibz6wD9J57/lzzp67MY4yjEOJXYoy9EEID/6cQ4q8Bfxv4jRjjfy2E+C+Avwv8XSHETwL/\nMfCTwCPgN4UQX4wx/jGBijqVFIVGiYBQOV3bM/aGqAbqcs58ucRKA2FAiwJrcqIQdFNDVitEVKRZ\nZF7mrx7RPcq2LBYXFMmKo5cz+u4O0xbc+sis1GQhoRM7MpkTY4qZBM527PsPKFRBLgqy5BgRC6IP\njHZHkiSU5emh/nu+oe894/rbGB+QUYFSzKpjlHCMcodMZ+ztNekEZJJp11LVx7T9HiUyiqoAIt55\nrm9eUlYliIiSMA4N1gUytSDTiiDiwZIuLciWxwTnyPOMvh9IEoUPHust+axiGAzt0FHkGmsCu3F/\n0EQH+rZFRIn3UM9qtuOEEa+qX4xlVmeYsUU/fAgx4AeHPDkh2B758o5oRnRRcBHPuY0bapdT5S8Y\nx47NYPHeEJVHxhTXdUzBQKzwk+Xs+BhnBXOTY6yFwVAFiQsBLw1JkjKODZtmIJEnnK48IVjG0TFF\nQ1lm6DTBMpHJh5T6Td48/yLjquPZ5QecloEp3aBUgjGBTXcfzO/58eT7vgCNMfavNlNAARsOwfzX\nXu3/NeA/eLX97wP/IMZoY4yfAB8Af+V7XjcEmt0NUvlXQlp7nry84ur6jt61IC2pztBFTj/uGMcJ\npUo6M3G9tew7MD7j6dUHbJtLurbD+Y522tJ0az7/4ILTozOmaWLoe4TNyHRCCrgwYIynLo85rj5P\noY/QgIwOH9cEJtpmpGsNSuY4K8jTisenJywWc9LsmHby3PQ9vkhQWUImFMoNxNhxe/sMT8RHhUoK\nRIzM6iXDuOX05BTrD23rf/g4XpU1RVng7EQqIwSDGRuyRJJISLTEBU/X9/hwWNG3bc8wjoQQGMeR\nqsjJ05ymaVFKveoqdWw2G2azGS9fPMGMDS+evyArUqy1SJ2SFBlBCcahRQ4GLUDpSNzdEXY9ZAnI\nBJTm5e6aXEmib6nSCZt8yN3uG+yGb9OuG/rbGZvbhLtrz83VjmiXTINAKk9WGnzoSKVCaU+SjCQq\noINj6EecL3l0+pc4Pf5ZrJc09payFhwtJEWSEEbBsjpnUZzy+Oxd7BBpui0yHQhMCOkpq5S6mv9L\nfRHuuecvOt83Zy6EkMDvAl8A/psY4zeEEOcxxqtXh1wB56+2HwL/13ed/ozDCv2PMcaUu6tLdFFx\nNF+x2XrWTaQb13htqKoMhEIqGAbBuvmEL7z1JZanb7De3OBjxn67JpUjlzffIShD0zuMeUK/V5RJ\nwenROXfS46PFhAkVC2SAVEW6yZKmOXWeU+c1m923maIhkynB90ilaXaOOvEsVkt6e02RK+aLU2RV\n0OwtfWwQRaB1G8ZxwqQeySU+KdmZW46KI+bzE6x1vHz+hEcP3+H2bsPnv/RlpBCslqfMF3M2myu8\nDaQqIa0O/5I0FbS3axKpuGtuiVHjw0SiMsqypMhSnDE45w6ljq9MLfLskMIpq4y2bfHOcX19xYPz\nx2x3WzKt2G+29F2LmSR1PUeKjKJcQrREWQAp+BF9dIp40RIXCxLjWa3Oubn5lM53qLSj3TynHywi\nrsil5G4zsNl6lBJIJLc3WxI1J7F7rLuBtMBMEUGOk4YwOZpQEaXgeP6YL3zup3nn7bd4fPIlPl3+\nc26uv4r1OaPNOT1+gxeXT/jcX/5FqiTnwdEF33yvJdAgdY91FhsFaX7fNHTPjyc/yMo8xBh/FngM\n/HUhxK/8/z6P/Ok6Fd/zsxaHSSqCdDxYvs3p4jFSOaJXbO76g8Sra0icJlcnmKGgabYURcHDs3cI\nqidN58zUnMAeokCKFC80+2HPaCNFekpVn5AWc6QusEHgRQ4cNEzHYcA6i5IFSXaMTkq0PCLVR0gZ\nkUKTkDJsBdNWsl13ODEhdECmltlMkmYGY3uuh1u6YYsPlijvaJqXr6zZcsZxIC8ymrbj9PSU3d0d\neEeSwDB2zOo5hZaMQ0PEMKsy2v2WrlmjhEDGESUMaTrj9u6Ou/X1IWUhJfaV2cQ4jK90XQ5qfbtt\nQ5ZkzGYVWgS6fkeWJOy6icEY0qwAIEmTQ0lkkkGUkBeQp8hiSdi0+EwQxwHcRDe1SJ1wefcBSufI\nbs7TT3v6vTpIz8qJJDlU4pTlDJ0kdO3EZtvQdgHrDU71uNJyMa946/SCVVWQMSPENU9e/B5X1zco\nLdg3W4SNFDLyePkupaqoy5yvf/gVggocH81YzTMUCVoUDF3Hvtkz2e0PNPH/ZRFCfCKE+AMhxNeE\nEL/zmQ52zz0/BD9wNUuMcSeE+J+BnweuhBAPYowvhRAXwPWrw54Db3zXaY9f7ftj/PZX3idP5php\nQvzcHcw8i0WKaUELgQw5WeqR1vLg+JS6fIPN+D5FMaNUFzzfv0eaWGblCRJP1P5QZ97cEuIr9wCZ\nkZIgk4rBjGQqp8ofsG+f4OkZzMTm+Q0XR4+JQVPmR2g5Y3KOurxgt/uAbX/HMj+nsxLb7/HG4pKR\nyQrOT96mLAs++eQbZHJO144oFSlqQVJNOGHo7j4i0wUIwWKxZLtd8+47b/P82VPKxRLfN2zHnrpM\nqQrN2DV80m45P14xJhGhInkxYxgnyjJBMgcChIDOMqQ61MtHAbPFgk+fPeHB8RExKpyPlNXs0B0a\nQArBos6ZppTb62dUqaZrt9SzGUIKQhQIAlRzEHMkLxFjTkg1od9xtbtj6wekWiBjIFFLBvMpVbJA\no6nSgk57hBbkaUYljnBiYHI13bSHxHFaPODfeOttHpxVXO17Pr5uudze0N7dsW3uiGywoeE7z36L\ni/mMk0wz2mvK/CFZ2fHs8kM+fPpXCGMDWmGd4cX7Lc8+2OKjJIofXQXIn0AEfjnGuP6sB7rnnh+G\n71fNcvKHlSpCiAL4W8DXgH8M/J1Xh/0d4H98tf2Pgf9ECJEKId4G3gW+5+rlF//WY/7Nv/kFfvnf\n/df5t3/lb6ISwdFpYH7sAEfwAtNIlCwp05TT5ZLz5RtcXz9BxsiXL/4aVSnQcjxUfOgCKVKiP6ww\n+2mLCI5cLRFBIIMkmg68ZLV4kzSpQAxYE3FecrvbIMmp8xWRhlTUvPvWL5CXJbt4wzB07HeGrnXc\n3t6wvr0Gd/Am/Zkv/CKzVBMmz+464EZPVD17d0OxWKGznOXynIjnc5/7HJeXTzk+eczN9TPssKPM\nJG4aaZsdZZEhvKfvtiQU9H2HsZblckGWJehEIqUkOMN+e8vY7djt9ugkZ7SBs7MLbm5uSZKEbuwZ\npgmd5Fze3WJlYDSert1wcnKKzjO8DwTvkd4gMMTZMX6I+H6H0AUxSkQcUHnBO4/eAVmS6zc5W7zF\noj5Gq5zN/pb1bsPgLLM8ZV4IssSTSMGquuBk9pPM1BkPZMW7eck8AdN17LZ7rtZXdJs7Uhuodc7g\nXrJv70hUznpcczvtUaVEyZbgJk7mS/7gW1/lvauPKOrHLOt3uHh8wi//jS/x13/p8/z8X/+eWb0f\nNfeWcff8ueP7rcwvgF97lTeXwN+PMf6vQoivAf9QCPGfAZ8A/xFAjPGbQoh/yMHw1gH/efwTijXX\nt3fkqWBZPuTN8y/z1sU1m83vUAWNSTN6Y5Fpgczm7NuWxWqFQCNlwmgMZpzI9QIXLXjHZAJJGqnS\nilHtDsYTsUfJcKjGIBDZovQxeV7xbvk237j8lCTLCTHBOkkzWhbVQJYIEBNSzDl/8A4vn3+LNDQE\nk5HrE7we6doN337/D/jpn/h5vJqoU40fU0yIRJsd2sqdBAvWWGQiqOdHh5eTRUXT3PH4rTe5ub4k\n2W/ph4HlrMANPVUmGdsWhEfJhE3ToJKSYEemcTwYiSaa2XwFMaJFJIgAeIa24eHjN+m6w3vrbt8g\nlWJWz9he3SGSgLUT1mqScs7i+ORgHl3NEUISEMiqRHYW129QyxNoGsbNJVfNlixqiBB8JEkFqVJs\nmitud56izJnVEF0ORU6DpCpXpFPGw+N3Kc0HNI3l9977mNFHnjYdbdgzLxIezBXFyjGanmlyCFfg\n/cimmXh44kiTQIqlC7C7e87R6jHFbIkdJdpLGB1WGEad/wi+Fn8qkUOVlgf+2xjjf/dZD3jPPT8I\n36808evAz32P/Wvg3/kTzvl7wN/7fgNLKozpkUWkqmqk0AiRkEiN1inL1SPa7mCF5pRk6vfYyZIW\nOevdBkVOKhb0/hlh0kQELljybM5RlrENl0y2x7sUEonCIULG5O6oxZLj4zeo11v2ZkRIiZYLrm6v\nWGQFeZGx3X/CYlahUTw8+gle3H2F4/KYqjyn8ze4cE2/XvPkybdI3/wCCIGSCcINmK4k2ADsae2C\nVAryMqefRqarK4Zhx9HinKfPn3NxdsLL50/IspwYM0bTk1U11jpCPFSunByf4Z2hmtVM1mHGgSxJ\nsMFTFYfyQmcswXu0VDTdIV++3x7yzirTpHmGlxE3TAipQEryPMObEXV8jkoSXAQlNVhBKOdIYcE6\nxPKU9sn7XK1v+ejqBe9d3fFTb7/JZAfSzBOiw40BZx1tHyhyULoi1Ybe3lFlp7jeEoWgdY6rbYNT\nKV5qFBIhICsiUgXGYWLoIt5rXJBECm5uPyJdnSEJeBPx1tA0t2hK2mHDXEISAqPwJHH5/aben5W/\nGmO8FEKcAr8hhPh2jPGffvcB320V991aO/fc88MSY/yBm5demzZLlc/xNsULxba54Xb3IU1jCUlC\nPT9HyJFymaA0SFHRj5a72yuaQRGRZIliWb+J7RVydDg30po78kzyuTc/hzeOl3cfsBu36DSSZJ4p\nSvqp5aZ5RphyHh6/RSo6QvAcLc7x3nO9bdi2O7wf+PjJV4gIyuyUB4/+Kt2+Y08GwLgAACAASURB\nVD6fU89OcF7gRsvtzR3vvfxd1lwyO4a60khyhm6k9dd4NmR5ydAb9u2OfduQZjXlvGQxW+CdYL/f\ncXZ+jlSaNM2IMlLVBVmWMZvPqWdH5EWFj5osL6nmK6JOMCGyaSaskPT9oSM2Ks28WiJ1QqlSxv4l\n0XRIIcmKGXW9ZL46IRz+sBSpRkaPHSZkPkeIFD/uQVhEWUFaEqcNpz/1l9l2e9778Ftcb/4Fv/f+\nP+PFzXOsG9CFZX5UkeucQs9RaoYfJZ4G55+wMx/RuJ6trtlEReMlXipcsKQa0lLQKcmmH7m53HPz\n4pZ+PeKninEU7Pd37JqnuCmQG42zA8ZseXb9TfrxhihaTDXilEGkn23XXozx8tXvG+DX+R6lt1LK\nP/q5D+T3/FkQQvx/5tOfxmsL5tZOKCUxZuR685z17SVKZ+z9xF7dEvIt7fSSIAxCeYie3jQ4uyXK\nHVY0RDyz+k0UjrJyWD/S2VsWy1MeHL8LQYD3jP2E9QFZjpikZxgaPrn9Dt34jDxN6c0tMfYs5iU3\nm5dcXu3Y7HtGY/n2x7+DU5GsekR5+nl8IkhJGbtIN0RwGdqc4myGLAPJkaeoL5iJt7G+wbhD52cI\nBiUiZVlytDqirudMbiTEkfl8jhSCKBSTjQyTp2l7usnSND3GdiitCTEyOUte1NTVMbPlGSqf0XYG\ni8KQ0E8T682WYAxGRo5OHzJfniCkRGmF856xH5EonPGgcqTQxNEgvSAIgZq/gdjfICmI6Qwax/Th\nN/j8xZfpgiV6uO7uWHdwfvzTvHn+s7z71pf53MOf5q2HP8VJUpGGwH4baJqGfnqGLhOsqNl2HX1v\n2TcGJSuKvCZKQQSGfmS/G5gmRxAOISB4yWY/crW27NxEGw15polOYaYJfGBynt4LjBAUdfOZzVkh\nRCmEmL3aroBfBb7+mQ14zz0/BK9Nm6Xb34Je4oLn8uqaRAiq8phdt8a5Pa25IQ0PcDHiwgiq5MHZ\nQzq/R8qa0R8c5o/rx+yixcdbEnLaYU3QE2dnp7xYf5NoI007sQgZoSrQWNKs4NnVhyyKHIRCSEWU\nhovztzhdRD78+D10NqMfNqQ0fPDsn/Nw8TPM8oTz1UPWdzv8lBCMYb1tOD5Z4EIk5hYnJrJa4UKE\nyeGkQyaRpuvIyxrnDE8+fcrzZ08oypy9t5yfXjCMIwCLRY2zIzKryPDoVEOIjKMjTRIW82O8kKis\nRAiF8iO1THCmZ7IG4SQ2GkwSSZMUyHAcKlmcc2y214TBIpUnzObo4KjTlOrhW8QsQYSDJjyzC3x7\nhZzNEQ8uuHz2MXfjHo9HJhUIR1GckicleaopkuSgFmksy6Xi08vvYPqRKTOkcSLGlmmsDgG7H8gL\nzypPyKscMYVXDVCCVEMsJWmVESxEC7e7HTF2HHmJt54YKqSLpC5idh3XvcRJT3WmKOafaZ35OfDr\nr1bbGvjvY4z/5LMc8J57flBeXzDfNaiyQK00m90LkqTgLH/MMN6B2pH5Gd1uR6GWZIs547hHhznz\nVODdSFHUOK+RMXCz3pFXHi89sjJcrr8JPiNNI84YwjijjR7tIUmX6LKmzHu0kljTIvMBKRVpXlBl\nmkePTnnx/A4TFcfVCWHa853nX+VULVlUF3iTMKkRmSis7Vmvn5EvxKEVv1zRTh/T2ZQkS6jKHi8m\nnLVMg0IjmS1WEEbGYUBpuNtuOFbH5HlBURS4JEEIaPY7hM7QQpJlCmTCMAxkOifqhBhAKsdmu6dr\ntpweLdjv7yi1QJqUXMdDA48U7Pd7lNaM/UhCQlHnxBgwbsJaR9OsWVRLYi3x+0tUqRH1Ejvu0VNA\nFSXP3r+lSGruts8Bg0HQq5zhpWe1yEhUzbysEa5Hy4lEeRTxIBrmdoxG4iO4wWCVwDpL9JJxmOid\nJTrI9ZIsTZjchHeC7WakaSPLVc44eMxgSFTOMo0sVc9tC9d3nsUDxTzXxGn6zOZsjPFj4Gc/swHu\nuefPwGsL5jJX+Gi5fHFF8ILR3ZCqkixPUFlOkBKxiEyTZNe0SDGQaInpU4y3SNXhPQxmz6Z7SW0h\nyTweyXr/AkVFxKCzAWdnOCkI3hC6FOIeERVWJExC0myfMqtz2mcdF/MHvHVxwWy+4l98+E2IkmV1\nzt1H38SeV4ymYz9sODlOYCUJXaBaPsC5PXY/kBVz5nXF85efILsKIS45lSVlesQ07dmv18z7iTSN\nr/w/A6uTE4QQbLc7Ts4fEmQgEpFphkcio8J6T1FVKOsRSUqaFnTdhLGR9eaWTMGTp09ZLmeoPDm8\n1M0SovMgFEppnn70bWbzFUV6UGZM0gIlPEFCkhY4M+A//BbpW2/jmg26qtHaErodHz79FCN6erFl\nNGuwBUJ1qGgYxh1958hLzW6dcJyXeBlIUkWzH5mJDBlGlJSUuqZPW6KI9F1HqhOCF/RNZOzhqJIU\nIqW1EzEcGqCyLIPgGYYR02fE3DE7Elg7MtxGgrCoIiEEQzskr2tK33PPa+W1BfNsUaFsxvHyIVIn\nfHp3SZ2uyLM51gV0luBsz8v1R+zGipPlkt44zHhoOE3agx9kWUhUIjDeEE2KlIK+ddR14PjoiG5r\nUXnChCYKg1CRIlW0zQAiI00Ux/oxcVyy6TZ8uH+fx4++xF96tOL67iWZSkjSkvnxKU5Zrm4+oekH\nklSQF55kVbAqj2m3kjAZZF/g9hWL5Cd5evcROpNUxUgJmHEgSQuII1pXWONJkgTnLG3bsjo6x3iQ\nUeOdI0kzohAInaITgVQ5OpMImYFOSVOJcxv6fmTAM00TWV6gZMAwQqxYzWc0TcM4jATnKfKErJgh\nM025PCZJNLOTBxTVgqg1Ion46yckjz9HHG4Q+QnS70gzxWbzlGV+Qls9Y7OFKi2QUTEMBePUIBXM\nZjUDAouBKPGjZkSRLmucCMRXkgn5IlKkmhg0UuekiWA/NhgNqZBEn+CDpSgLQgw4GzEe3ASzWWQU\nhnbwtN4yP8opUnuQ17X3Lxzv+fHktQXzKp8TtSJPJciUaeeYnQuUmnO7u2NWBLqhJykkIU5M8pBA\ndQLavWe4ukEKSVJ4gnSkmSChxDHy8rbhNAiWVc7ZaU2avkOZ5jy9+RZVrsBEprbHu8NqV0+QF59j\nkT3ik91XsVNKmmoenp7TOYPSmrzO0IlARIEYBal6CO4SkQhQE3USKY4e8Oxmz9h3FNUZJ4sL/NCj\ncoHHk0rFOOzxdqSoj5mcZ3V8xtXVC07OLqgXC6ZhZFHXhBDph55ZmeF9RGnF6AM6Kw83AAsueIRS\nqCTDjj1VuWQaGzJKkjRhbHvi7IgYLXjLxaO3kBKqIiHEwLC+Qh8tMWNDnuVMw0R1dAz9lrBfE6oF\n/tnvo+ojpnZk1zQU+ozj5QWIHu8tWuasRM1m15AmKSLNWbctQhjKPCGRBcEJ3JiBaonRkCYjZZUi\nZSDPZgeRsCSjbSb6PpKqhHmSsZ9G8mTFbHnMMF3j7IidFFHCzky8uLaQ5eRzSFVg6MC/vil9zz2v\nldc48xURz25omcYNq6NzsrQAOZIlKW23J0lShDZEPOO4YV6cUFc5SvdkyQwzeoapxWGJviYKy0V9\nwr/3q/8hZZ2zbl+g9cSj88+hkop/+tsTH3z4dQiODI3z0HYdp+kxQgyY2DBL5uTZChFmFNJy3TZU\nRxlnJ1/AuGt6uyEmCRKN6JZ45WiTW1Yqo1ys2FjB9fNrvLyjKEqyrCC4HJVoRJIx9AMxBHwwyKD5\n8OP3OTo6YZo83kW8dEzWgAg4H9h3E2VZkacpMYA1gUQrpqnHGMs0OVyQPH32jHfe/gLWWEY14sYd\n3hiUCvS7O4iK5dHiUPoYI0RIyxxPgpQKR0a1OsJ3O+R8jjAD6ugtxMLzP/2jf8Rl9xH7bkNEYKUm\nnV8TpiO0LBC6pDcVUmTYyePs4aYrBSRFQaITfLQMdiKfSVZHJ6hZgReQJhmPHr5LkZ5ihq9wfbkH\nBdZbpmFgXmVoUtpO0HeRRKbEGGg7h8dzVDrmicCOEYLmIOx5zz0/fry2YD4OLcFL6kVNUhsYHCIK\nQrDMqoKt39G6G5TKyFJNKj1ajZwefQklX9LpDjMWZL5i32wZpw7nc7TMOZudki8SHD3XN8+55IoQ\nM54/v2W3NzAGTpOUbX+LX+bspxHlPmRIAqZp2d3dMq8CZ8u3+eDFP6M3AxcnX2S93zCGK2woyMqI\npj6o9U2CKwUP7J6z8xwhjpnaW6RUSGa4GBBKE0VE6xyRRG6fP+Ps4Vu0Tc/iWCOFZrPbkeY5xEhd\n15jRUpYlfTdS1wusd2R5hnOBvu+RUjFNhs32hqIseHH5glkaGLc31HlONDs2MpKWGaujFSFahmHA\n+5QqLxAyR+cKpWcM3R60JNf6IGxTzohmg5qfcHS24je/8g/YTDsm+5KuG3j7i6dQe+KYoIqCYpjh\nnMMER9f1eCEhSqpZQpqkdMPEMPUs6opidYZSKWbaIrVkvniT6AQXp49p+/eZjCXRKUrk7Pe3CAp8\njEQvkamCCH3r0CqlLCRSCVIpSVXOYO6D+T0/nry+nLkQGCMZxxGtHbiRTbslyoTjkxllVjE1LfhA\nqhV+mBjTjnldM7k5l+uPkDGHuEQpgSClLEs+efKCX/sf/j71yYzFbEE3XtMOVwzbwE2z4ez4hLLW\nBDMxTRE/tYTB84VViZUJd3LBi6fvUc/+NR6cHjFPa6beIlUgxMDYTwgKjB1xWoHy6LxAGMHWWXKV\noArQusZMIy4acgWDH1HB402HGTxFOWOzvkZKjfee9d2afhz44pe+yNX1FUodFBG994QAZjKEEIgx\nZZoM1jqE8IzjwGRH7GSYFSVFpRk3e8rZDDHNURrqcsH6+gqpBDrRYDOkP2jgHB8/RvoBnVbQNoSj\nGQRJdI5Ypahxy4sXVzRbw93aYqNhtThnHt7EpH+ALa9hqkmynCgNU9fTTi0yKqZB4k2FzSRD19K5\nhizLGcaBIPa4cIN1PV9/r+PR6U8ymDvKKiUUCqJlMop2P6DlIWBnSUkiFV3n6ftAkjjSNMdrQ54X\njENP13zmQlvflx+V3diP0rbsL5oF2p8HfpQNX/8qmsdeWzCvS8GIYLe/5M1HX+Tl9TOsteyGPUoZ\nlIpgJIMxSD8RRo0NA5vTK8y0P1Sz0DINPSos0CqSVTNknPGdjz9h/bsbfumXfpbFbEFvJGkpObI5\npRQEKaGqmMeUqMCnYDNFmqXkFnrXsGnWfP7sjAfLR3xw/R369inSQR5WOOd4cPQWWb3ibvOEIghs\nqdhtb7kxd+RVTaYUk4QgDKMdGFNDGR3BDgQXCEJhjadeLJn65qA5M/Y8f/oMnWiuhETrlGEc0DrF\nGEuMkcvLlxRFQd/3JEnCbrcjenB2YnF+Riot1CW5zvERYvRMU0OSaK5fvke9OMHrnGmQyLBkamdk\n1RlN21AXCbqJiHqOWh4RXeTl+x/zye1ToisQtmdeFLx18RCFpGktWW6J7CjKithD8HtEEEy9xw4W\nMUV8ZfHOgIRxWtO5lihbUqVIY2C7uWKePub49CHb7jssVgrweFcwNB1KWNT/y96bxdq2pfddv9HN\nfq52d2effbpbdetW7zhuExuch7wQkfCCAi/IIrwhIBICxUSCNyLIAxCekECKIkTAAQnEQ4QAS7YV\nB6fsclW5XL63bn/6vc/uVjP7ORoe1vZVxS7KVaaury3v38tZa+251tj7aKxvjPmN//f9SYijBBdr\nmn6N1Jr5zIOxeATbqqbvDcjbnPktfzb5xGa+lrsy1TzSGFGymD7kyfNvkkk4e/mKOM3pWo+KNF3n\nkMHjreH09AVFMUGPnijy9L5Fygl+6HH9gIoTjo7uECcZr148Y/GZEgZFiCLiaYWvR0ahiFxElpdo\nZamlZetG5DDg05RYONbtmm2zIc8SkGu8vGJvfh/nI6SSxElMFJUMzRWh3zCEhqbbUG02SF9xcnDI\nB5ct1jmO70yQTmKDJE5KXHdN13dkxa7l7eAsUsVkacbjx0947bXXuLy8IssyDosjjImomwYpJV3f\n07QtXduSpinee7y3xGmC946quWBSTmitZzlZcH3+kjgZieOY6XRvd2iZZGit6J1ExgUynSDbS7yX\nyChCZCnepMhR8M6Tx1TXHf1YkeaaR4/uo5XgnQ/fwieBvX2Jjq8gaAIjfrCY3qONZhgFkTIYpZEi\ngBKMotlVscaBNDIYEdMGR1HGeN9z5zhn2z8m1nPi1LLYjxnbgAgQa8U29OAscQqmzLGu2xlkaE2i\nMqLydgd6y59NPrFgbmvP6CyXlxfMJp9ie33NLEtI04inlx4dYo4XE7JFRpSmrNcXbFcX9OECaQe8\n0yjrsK1kCDUhCMbBIsWASmIO9vfRwWIbyb3l67y4fMFmK7hzcB/COeVsRRokr4ZX5Dk43WNUiQwZ\n61XLph157/QJKpbcf/SQKHZY/wR0gfeSqh3oKsX66pLMSPJFQ9ELVBIxSxOIRuKoZXtu0MJQTEq8\nGnbqk0jincX5kTiJyIs5V+tLQlIymczxUnDx6pLDQ4WSO+OJKIoIITAMA03TYMfxJpA79K4F1a43\neZwggkArRTMMu26Tcmd+vVje3x0W2wGC5+69R6TTA0ZriZMEhUckBSGKEaPDVyPfeP9D3j37GgHY\nti2R0ZxfXRM2Dtopa1VTLmuMrAg+xriROIoYrSXKoCwycq24ajrQkthH9JuIPPV4FLKAg4MF2/YD\nFrM7OB9TtxlPHr8kVxmFgSEonDTEdiQ1hotYoXRE3w708UBaahIj8aFhe/0H7GZvueXPBJ9YMD/f\nrBEoQhh45+3fYHA1ZZ6jQoxRKVEUUajAJFfEWYGwirGGzeYSqQeIOlAxY7DU65pJPifJJPP5kg8+\nfIfZYs6yXFDZDlNDcAqjInRUsFq9jTAVeZKRKo83ZyQ6Io6gHzSZueDCWd59ecZsep/5coKUjjKf\n8d673yTLFBfX19S9Yy4OmZgpvfQcHEw4fWmwTtA6TVrmzPXAy/qUJJ+TxzHYgThJaOuaWEcYAb7f\noAOsry+I05LttSRgqesaQvioyU7f94ibsvy6rhmHkc1mg3WegKcoJsRipG7XLCZ32W6usMIzKyfE\nMsZkKUU0wVrPdDrZ6cuzBRerFbFRZJMZQkeIfIIdPZcvHmPtiPMxl9eXfPq1Y7QSbFY1s3RG5Rqu\nXjniLBBPK6Sx+MTRd9D0u+rTOAiGqkF5iyoEkSiJ9BR7vWa97bGzgSZao5TBDS9YzqfcmR+SDAWr\niwsmkwVxOUcEGNY1V3VFZiNWmy06DxSTgCJgm45upQnX2Sc1pW+55RPlEwvmPREMDUIN2LHBaMW6\nbRiDoh8cYazpgqK96sjHDi0zBJrQlmzOO6K0YAwjyiiyuWZot0QqJdYpJycnfPjsfertFUmS4rzj\n/smnKItHfPrR5zj7ypt86xvnbKbPmCxi+nSfQnikkaz9NbnU5GJgZVuuqnNG13N8cMLB8if40b/8\n1/nNt/4xz1/8En3vIHakpDSjR+jA0f4hV+sVm8qhdcL+QtF3gWaoSbUiS3KaTYdWCmEHxOgQzlHE\nGV5KZsuCdVWxnOyC7OXVFVEcU223GB2zrTcIIej7jr5v8bZDYCmzFO9GrHDEImK73SIFjM2WIdJo\nbdBKE8UpSS4IdqeM6YaGavuK8vABQimCiQlBo4eWD86f8a0n34DIUM4Kyizi4uwVzgq61NA7Dy6i\nq3vSvCJNYBOBkQHf7n4nOQRkUESRxEcabVKi0TEO0PU9UhWs64p267j3yGP9hpO9fe4dlnzptc+R\n7e8RxZar6zXvv/8By3xGf14RpS2TzLBZrdk0I2OvEBvFUZp/rPP2xqzlvwO+wK63+d8IIfz6937X\nLbd8/Hxyp0ViiUgzgt0pWKIsxdcdVTOCU2wZuXxRka8MyzuBLOsZnN1VFQ4JXe8IkSDNPEIKetOh\nshUmHpmqPRTvc3F+TeASrRLuH6Z84bN/kUkxQQZDbFK++a1rPv9wyvxwwYXdsrd/gpQCN1ZM8wc0\n7bsY5RiGns11x+XsFX/hx36Oo8Ofp21qvvLbv8ZlX7F/0CC6CZfDisP5kjSOWdc9UR8T6RgXHALw\nkaTfdMgAcZpSJBOq7ZoQLJPcMDrFxEiILLFqWZQRm9UFQkUYbShLydD3SClpmhqlBd5bhFAEtzOE\n1q5jvXqF9zMODpaEsDs4Dd5igkcKQRZnuCjgvcWHiCyd0rYNyXyBKEtsvSFSMf/st3+L9foCgefB\n/SWjdLz1wVPcWHJwoBgRJBk0a0WcefK8IzJgHBwtJ6jBE+yACIE4kXgMclSIrmEyj4n1jMtNy3gW\nsw0V225FMcScXZxzkD6gHh3t5hVFHrNYHDGd3OXyckUnvsZR+jp91bA6FTRXklikpEaRlXPgzY9z\n5v494B+HEP5VIYQGPt7V45Zbvk8+OZ15PTKbTBkGgZEtkckwxRQte5zzFHZkS8z1ao31DXuHChki\nMIG+t+TaMjqFwzMqT1EaBllxsX4LM5xwZ+8BVfUm1+cOKTrefOd3+OyjHyGOYlAjD04WbF90uK5n\nL2ie1JqklBzuP+D85W9QyJL97BHz6YRX2zOquufXf+tXMTrjU/de42D/AfeO3+X0+RUuDEzzY17V\nG+p2S9M6pBSgDZebFolCxTWrBibeU5qESVEw9BvSLEcpSRxp1OgRviHXLZvrDXUfYW1g0DEyOLx1\n1E0NgHUdwxjoRosioLVHigrnR4auYn5yF6MTetNCcNhxYLu5QCeGyOXM50uUTri8WpMmKVmkQCq8\nd0Qy5v233iKWEbkWFKUhFoqXlxVWGJS0CBmjXEBqgTaWph2Rgp31nDZICUbHRE7hGfFBQW/wsSLf\nP6BMY5pxZHN9RTbTHO4tKA80OsSEvsSbh5xf9Kzfu6ZcOh6+5jk8OOBYFZTZIZICN8R8+e7ILM3J\nspw0itBa8d/+/V/5WOasEGIK/AshhJ8HCCFYYP2xDHbLLT8gn1gwH901Zxc1tmkZx5bFvkcoRd9Y\nUDFhcJRE+CJhGAeqVUuexqRJiXcVOlfMEsPmvKUbBvq0J9jAQI1y53iVE5t4p5oRnqbZ8o33vsKd\nzT26cU2u5xztLRBOUg3X7AXIo30e3P08q+ff5NXpKdl8n9kkpogKtr3FaM3/8av/M/iBz75+l8Xy\niGrbYN2IEXBv/0tIzinMyNg7rtsGoTxhbGmtRasAskQJtfPdlJKqqjg5ucfLF0/Zn0/JE4Uu75Cm\nHW8/eYKvFd0AVgSqTU0cxyil6Lpd/jyEgIkVy9mURFouN69I4pz19QVGatJyinKWoW+Rg2Jzcckk\nn6BEwAdPlme0TcMkWyCiCDlarp6+5Otvvs1X3/k1pguwoef0wwaXxszKJWVccLx/B+Ele8cHIGCw\nI3cPDsijJXlc0DUj5+dPQAryLOd6dY0xMZPpnPlkSpxEhCCpNzXj0BGlGbOlwY0KERRxnFAkOSEE\nxjDigiOLc/JZip0PdH1Pmkwo4xKRCMZhJDIR3n+sapZHwLkQ4u8DPwJ8FfibIYTm4xz0llu+Hz65\noiEtuK4rVpcbNAItL/FSIKTAiQhawRgkKpG7Ev4wYrRldCNFNqEoDYXRJMUrxkvBeuUZ+oEodozd\nOaPo0FpRzlKGZkuWG56ePuZyfYr3DbUzGCkI3tLZiCjuOdy7y/HyhDeDJkci2kBbO9Iyw40tbW1Z\nTnJOX17y/OV7nNx5gyw1hKFFRQOSOVKsSLzEjReUU9isKryUdDWEsOXOcon0Chc8bVMxnc45PX2J\nCG4nvzMZWZmhVcz+4pCXlxu22w6VJHRtR6UkZTHBOcvOmhX2lksmWUpdXZIVGcQBjQU3oqXGO4cg\n4MeRwa1ot1fIgyP60TFaTxRplIkQWoFQfPD+Bww+8PDgdbqwZRwh3u8wcYydG5blHpN8wmQyZ3+5\nRAqFlIFJPiXKNLNyhnOWD58WGCNJopSu65FaMp9NcYMHLTBKM84GmqamLOYUkxQ3SpzvcM6SJDFp\nWtB76NoaNw7Y0ZEkBUlaEMUxSRwxDgNd1zCOI0nysXqAanY2iv9OCOE3hBD/FfALwH/ynRf9/gKd\nW7ehW/6o/CC2cZ9YMC8nBUEHGGc0m4aqtowOIh0whUYJSVAwX+YoExi3nrHZ0rmWPH+ADBO6MTBR\nnvv7kt9+GuMHTWUbxlYy+i1FkjFbGMQcismcn/nJf4lf/covUW1iosgzyaeYwXJ+vWF6pMgzyfq6\nobpqIfYs5lOmxZyRa2aFoSgSnp9/wKwoqFdXVPkpeRIxdoHN9hkqKskmJ4z+BVEUcbgouRIFT16t\nmaYFyuYIbxB6Zzgxn+6hlKDyjv3lkiwpiFJz09MlxgtJO1heXW4Jg0UbAwSGOEHpBILDGEWsY86v\nTtFK0/cjRZrvrN+waO8x+ZQOCONAHCfgBmSwpFHGiIGhR2U5XhqE9aTTJYvBw8mXWOxN2V8ec3V2\nTh8c221FHAnaukUZRb/dsNjbI0sKEIHM5MQmoR23PLp7l36wxFGEdRYpFV3X4qylzAqkUhitiZOY\nJM1I0hTvLH3ryPKc4D29HRjGEaUEWid474mimDiO8N4x9CMhOJQSoMDzsVaAPgOehRB+4+b5/8Iu\nmP9z3AbvW35Y/H4P2e/0l/39fGLBXDiJkoY80Wiv2fQdRkGaRUQxJJMUKQRD31Ove9za4hlRWlNV\nH9A2SzITYbUg6A11K8hCwiJfcmU7Rueo/QDKURYj9x494M995kd5/OQxX391ho8MeZ7R25r19YiM\nLJdXT3l6fsplf8VUzmjqDdYFAi33D+8xPzjk7Opd9g5Sxr6g70/JsteI51OabUPfr9nbO8EPNVoK\nlDB42yOD5WBaIIaCWKfkWc7YbqmrLc717O3tI/yA1hFZkiOlJBBIkphJWVIWW07PL5iYBd57JuUc\nk6YI71FS4IaOtukxsSMv5kgG0DHDMLLZXrIXG7K8JDYxaZqyd3hENDugNLqqXgAAIABJREFUbsB2\nNYv5dOe4JAJh8BhtyCKNnM+Yl0sUgcOjA8pygrUWrTWXlxdsNhuur6/ZrLdIGYiimNlsijGGrtvl\n9kPwdH1304ogYO2IlIJhGJBKkec56STFAzY4xn7XpTKKIrZNjRACYwxRFCGlxDlH37eMY78z/zYG\nGSeUN56mf5hP4v8fQginQoinQojPhBDeZmdq/q2PbcBbbvkB+OSkiU2LDz0iRKADkZBIBVGqsENN\nLyASiusXLU/O1izKgjSR2MSx3WzZrteU6YwsUmR7CcuDlvY6oOMJOgHVb3GjZLvq0Uby2r3PkZYl\n1q1x9Aw+RUUGO/PEY4LzcH5V040XqIlnYCBIyXZ4Tj0E/AB1fcmkzNBKYNMYhMZ7hSdG+Ix2NWKO\nNWV0jA4LosGwiJbMDzOKJGLQYISiqSuU79GpIY9nxEawWm85vjPF2hGlA2PvwI9kSUyWxIzjQF1X\nxHFClCRMspzReYTvuT67ZH86RceKsamYz0pErlHDgJDQNQ3FdIbSknw+JZ8tCAiC7dE4PBavHaFt\nqFYdfd/jXGBvecDl5RXjODCbTWjbljTNePDgAXFsuHv3mKbpuLq6QsrAOI689947lGWJMTuTCO8t\nSu2aXymlKYoCpRQhBLTWH7UmCAi01gghGIeBYewRWiOUpu9avLW40YIQ6DhCCBiHjrapUPEu0GdJ\nuqs0/Xj5d4H/QQgRAe8B/+bHPeAtt3w/fHLl/C4mZI6h7aiua5I0YbPu6CsPwhElFY0KJCIn8RGE\ngEXhRosYwdmEy65iJRSHY8pkHtPpmihJKG3OZrVGKtAyR0jPJCl4+vQxZ6u3UNKzrTqkiTjZf8ho\nHhO6EadGlBNk8ynr1TULjpDE5D5BhCnG7XE3SxnDiDGaJEqQ1hCrkiF1DPM1qkvIKOmTHonm5O6n\nMFKzqTecnp4j8hgrBzbrLYvpguAFKs6wHqQSDENPbCRNvUFHMWm06/meZzmbzRopoWkauqYlTmKK\nJGYymyBkYOx7cANVdcn+7ACpY4zRu3a6dmBSThgGi4pTvId1XRH7gLA9YXlEOLvm+fMP2VZ2Zyk3\n9Fg3ghRUVY31jknf81ZbIaQgyxIImqapGUdLnme0bXvTPE1TliVZliGEYDabfHRgG0UxbdvuPEmv\nr6mbmuADeVHu0jAhsNluyLKc4zvHRFm2W1TsSJqmmGAQQDt0aGVQIXD6/Bl1XZOm6cc6b0MI3wB+\n4mMd5JZb/gh8cuYUE0UfT0nihhdPV8TakAjJer1FKcW0zBACdKI4OlnQD47KbtkrU6yGetuhdUQ/\n9FxeDXghySe7isokiqhWHSLSlJFnkd7hg+ffQvmSzaonySXrq2sev3jJp+MHBKeI0wX72QE6naOV\nwmaQmCkaRVRqZosDpNY01YbSRPjRs73eUBQp6TSiTDXDmNB3HYGAkBJCYL1a7W79hSDPU4wxeB+R\nFXsE70mimA8+eJ+9xYzBWqrNNVrMMEpircf7QJJq3NjjrUNLSZZNSBNDCJ6m2rJdnXJnb5+h2yKD\nRxDR91umkxnee4pyikQQlCbLM1ScEkTCcq6xtiWez/EhIRRHPL/8Oh9+8DYHBwc0TUuSpjtFSlRC\nBFmcUDcViYkRGKSSHB0d0Q093nkmkwlVVX20M6/rXbql6zp0ZGibljzLODi8Q7WtkDqQpDFnZ2c0\nXU2S5SAEry7Omc8dZVnQVBWnp6coE7G3t0ee7XLraZpgjN4tEIkhTmZ4/7EaOt9yy59YPjlpoteM\nnWVSBr7wxWO++ZvXLBeG6MQgBgitZbpYomJBc9VSVS3SejJv8EWAkFCtepxXjMbRdYG95YRJNucL\n91/jL7zxIxgdMQbHdJaTFYpVHfiR+3+JMjdEIiXRmuViyZce/DSpUZRlgbcwDD1j37EdOrDgvWMc\nWqQVZHmMiWPOXp3jhdhJ4YQgBM/qJnCP40g39CymM7q+x0uBDND1A0JKTJTj+xHvKy6uLohis9tR\nBsMwdPgAUkUoBKJ3aKmJtEFqxWy+JEtjlNoFscv6GUIE6rqmyDPKNCaMA8Eroiii7Rqq1RWzxR4E\njxKBkCWE4ph4mRHhIS6QQjCOF+zvL5B8mkAgBFAqQimFkz3aGuTUMInmJNpQ1zVSQZ6kaG3w3tP2\nPQdHR8ymM9arFW3X0jYNp69e4b1nuVygIkVR5sxmM4ax5+LynEk7wboBqQKb7Zq22WCU4oV3hODJ\nspjJfEaSaLquRmtF17YIIZjOFuRZyjCMNM34SU3pW275RPm+grkQQgG/ye4k/68KIRbALwIPgA+B\nvx5CWN1c+x8BfwNwwL8XQvg/v9tnnj4/JSkduJyjoynn97YEJDayHGYZSmuurjckaYrsepLeIdOI\nbqwxIiLNot0OeNshMku6N+WLd36Kh/deJ89ykjRmudhDCokgYN3IPGm4V2SMzoMKTIspxmiUVvRd\nRxSlRJFmtI662mL6nZWa9W4XoLuGOI4Z2p0DfJ5nRNrQNA1JktD3PY+fPaUoCmazGU6A0IokTolM\nTD7ZyYzGvscJy9jXuL7GOY8Umm11RZ4XWA/TIuN6swHAO49SgSTNkFHC+fkFgwuUeYq1I5O4wDmL\nsyNDL8ijhKA9bd+ilcEkKUoqotiQ5zNENsFve8RiD0kEAsDxwTvvcb2uqKoNAcd8vkeRpzjnGK3k\n5dUlJyohzfTOws4Yuq6jLEvOzs85OztjsphxePcOIgSMMaR5iptNidOEEAJpmpJmMS+fP8eYCG0U\nSgr29hYIYLSWse85PrqDd4E03aWK1qsVl5fnDMNAmqSYKCKMFhNFAKw3G5zzH7c08ZZb/sTy/e7M\n/ybwu0B58/wXgP8rhPB3hRB/6+b5LwghPg/8a8DngbvA/31z8v8H9DS5yphmESYzSBfx5T/3ab75\n7feJrWLQA/eXE6Q0uLaBhaCYFFifkuQF+5M5B3uHoKBuO0bbY7Ti+PgBUaaw9DS9RW12u+qymCOF\nYL5Y0LUR9XqFQODdgBeOLCmRaYaUOxmQVoLl3h7r1TWTyZTLyyuePnvGwcE+683mow6GAXZ53CTF\neY+JIk5O7pKmKX0/cPrqjHv3HjCbLHHWY51jGDp63+MdrDdrYhnIsgJjIpIIvB3QRjGOA1prQj8g\npWIcPUU2R+mEdvC8OnvJJi24e5gSXI2Rjmk+Z311wfwoo2rWWKPIpwV22C0+WhuivSVjMOjlQ2jX\nkEYQJEIKDu/eJyjJi+cfcH15yTvvvs98PiEyMdO8IC4Krtan6MqQ58WuJsA62qYhS1O+9MUvMjpL\n33Y8Oz0jyzKyPGXoB7RW5FmOHUf6qqHtO5x3hHqkrRukVvR9T5QkaKm4d3yX0Tq88yACy71DUJK6\nrhidYzKbk98EcmkUOkkQUmAi80f+Mtxyy59m/tBgLoQ4Af4K8J8C//7Ny38N+Lmbx/8A+GV2Af1f\nAf7HEMIIfCiEeBf4SeAPNCLSkSM3C4wZeXXZsr835Xh+yNOnL5AyRcklD/dTgotJ8gKpUoxJiOMc\nKSBLExbLfUZruTg/53J1hvcjXXOjyxSCvqkRUjJJMtI8J4kURmUksWEYRrquwwXJYB0hQNeNWGfJ\n85J2W7FabxiGgclkxny+ICtyiixjW1d457B2ZPSBbhh2O848w9qOYWjZbhuKYsJssk+apmzWK5QU\nVNuKartmbAfSbMo8T4mUpm073OiJpMTZQB9GggsI4YhMYDqfkmYL3n7/A4be8elPvcb11SWr65r9\nYmet5+xInuV0o0OJgBtHvB9R2hClMVFkEHGJzA6gu2Tz7Ixock1y+ACCZnHygNnRMY+ffZtxcGhT\nk6QZq+srvHc0Zy8QUhBHKbP5HOctWZJydvqcKErIsgylFHleEBvJ2Lc8OT9FCEFRFDTVFqUkl1eX\nu/8vm6KFxHoHg2cyW5BlGVLKmwWx36VumgYhFVJJ7uwfEicp5XQCAvq+R0pDFGWIxMCto84tf0b5\nfnbm/yXwHwKT73jtMIRwdvP4DDi8eXzMPx+4n7Hbof8BqnbLrJa0Q0+9rgg2MM3nzLKeNx58mcXy\nmMLkaBPI0gLnHYP1RLFgGEYEAmsbvLMURYyODhmGgaqqiKKIpm2RwROs42i6wEhFLBVJkaFkhJQ9\nWmuUUFR1Q13XlIvZRyL9YehRSjGfz7m6ukbJQLfZUHct4UbDP3YDWkukUNTbCk+g7Sqc9YBEaklR\nlECg67qbQ0GBVBJrHWWeUbc1RAn7ywlNtUFqjx17MhXhlSQSEi0VUu7SGnXd0tQ9r16d0/ctyzKj\n7TqyuMA6MErSdy2zyQSjY6wTxEaSpgV5URD2DnEvniOzKb/97pt85t5DksVdRGQI7HL1r9/7HMbv\ncuJZYpjlE642aw4P74KA1fqKNElompq+H8nznNVqzfX1isPDA373d9/k4uoVh4f7LOZLDvYPePud\nt/Desre3B+za+VbVhmk5JU1yRudo2w6lFEop6rreadDTFGUMWu4WB7yjbxuGrkMIQZZl9KLHjz10\nCik+Pp3598sPS+v+w7R6+2EWMv1JtqD7Yf6dvyep/WHwvYp9flif8z2DuRDiXwZehRC+JoT4S9/t\nmhBCEOJ7inu/689iPSUIT191jG3LZTWw98ZD3vjUfU72HwCewe4KSJwfgYDSisjEpGmGMQbrLATB\nYm9GbCK6vmVbVVRVRdu2JElC0zScXrziUAlUbKDTWDd+JJNbbVcM48j1+optvaEsS4LddRqsqhrr\nLH3f4pxnCBbLrn/2pChQQhBFGu/BOkccJbR9RpTEXFysmE53OXkfPEIIoigiigybjcWYmL7fkErD\nwWLJdnNNHhdkuWazOmea3mfd1hgdE8c5ZZKw3taUeYmShrZtUSpQZDFFGpEWBU21QsWCSO8mtA8W\nSbgp1hnQOsaPCn14QnN2yvbqGndyD8JICAl4h1eC6cFdhm9/k65v6HrHvXsPme3t0dTVTk54BV3X\nM5nOmRRTtFEcH9/FWov3ji996UtsNteIINk72MO6EaVjnn74gjQt+OxnPgNSsF6vubpaM19o5rPZ\nTTfIBufcR8VCUkmG0aPSlCTftQno2oY0TRmGge22IoojlBLYMPJdMnq33PJngj9sZ/4Xgb8mhPgr\nQAJMhBD/PXAmhDi6qYi7A7y6uf45cO873n9y89of4K03zzFhTRAje4cZh4tDxGg4PDxA6UDT9Bht\ndoHIO+xo0VGMiTTGmN0XHkGapeAtVTMwjiPL+YK+78myBKM01juuVyvatmU+n6O1IopipJREUcRq\nsyaWO2VI0zSEEDg7PUUbQ2DXP0VKuXuf1MTBgYDUaIQyNzt4g8ChleTk8JiqqSgmJUcH93DecnV5\nQVEU9MOAtRYTJfRdQ55NWOQxbd/grcUav6uwlJp+6AnOIyO5e4/WbDdXrNdr2n6g71tSLfnSawvi\n2NA3G1w/sh1G9mYl69WK+XROW23ZPzpCCo2e3AUjsFdX2LGhbRs22zVHbkAI8M4ihWDv5B6Hxyf0\nT97j4mLNs2dP2JstOTw85Fu/8y1UgLHrmR3dwclA27bk+e7MwTlJluXkeYJzjuPjE/q+Z1LOmS2W\nfPvtN6n7lk89/BTHR/fIsyl931JVFXmef1TpGUUR3ns2my3L/X3qbU0Sx6zXG7x3GBPTtjsrvV/6\nlV/l//nK14kis8ux33LLn0G+ZzAPIfxt4G8DCCF+DvgPQgj/hhDi7wI/D/znN//+bzdv+d+BfyiE\n+C/YpVdeB77y3T774NOCqU9Rg6QsZ+h8RplkDLZD9AJjFElyU8JNwNyUtjs3ArvS8DhOsdZSVRtM\nHO1y4OOAHQfiOKYsS6LKoPLAMI40TY21ljzPGceRKIpYr3cHmmVZ4vG7cedzttstZbE7mJRS3BSj\n+F2pfQj0/YjWu0VFKUldt4zjwPtPPqRtGtLpjKZtcN7dHKTO6LuWy8tLZtMJx5/6NCd3lmRJSpLG\nvPzwd6hfvSA4h1QprbVExtD2PUkWI6RiGBu00URYlEw4mOeYKKJvamy9Zrm/oN62yChDS0XQGXmZ\nMdjAfHlESA2SfUxZc/r0CT/+4z/N8+fvcv3qJcsHS6RWdOfPSA4eUs73WH/ztyjLKVkWc359wenV\nOWkWk8S7BXWwu6KirmkZXvRoLXnw4DVWqxV932FMxDe+8Q329/eZLxe89vAhi1lJ2zYs5nOk3O2+\nQ3AkSYLWmjiJGMZh12vFyhvJpkdLqKoNUsLV1Zo4jhnHESklP/vTP8mPfunznJ29ZBwtf++/+Qc/\nhK/GLbf86eIH1Zn/XsrkPwP+kRDi3+JGmggQQvhdIcQ/Yqd8scC/Hf4/EmzeKlRuGHxHPXoOohyh\n5U0Xv51Vmve74CmFYDopyPN8VyEZxx/toqWUZFm2U0aEQD8OEAJxkhBpw3ldk0cxeZrupIKw20Ha\nAXxgMimJ4xjnPGWckqcpnkBZ3qEsJzvHHimYz+eMY0/f9ygh6PuR69UVfdviBfQ9JIkiSEmcpwTv\n2Gw2pGnK1eUV42Cpmi1CeyIjOVjO6UcYfMf92T6f+/M/x+rijGfvvk19/ZyhrklMhI4MwQswMVGc\ncxhHCAl9X7M3yzFCoGRMMpvTjxE626OcP0SlCbEC27e7Hbp1lMkE7wXj9orl/gEvnz+n7zrGvgM8\nCEH94pL44AF5Pufw8ICqasiykg/e/xDrR47v3CMy6saLtEYaQ5oWLOZLhsGy3W4wRrFcnvDkyfvs\n7+8OgN9+802qZsOnHj7ijdc/w7vvvcu5e4XREWmaMo4O7wNRrLH9iJUO7xxVtWVSTNCR2Uk4pSBJ\nNRcX54QQKMuSx48/wLqdlHI2m/0Rvga33PKnn+87mIcQfgX4lZvHV+yaDH236/4O8Hf+sM+bRkv6\n2uEAFadoJdDakKU5xmjyPP/odjsEdxPAa4wxCCEoyxJrHVIqIKEbetqhBx92QX8c2VYbFpMpzo6k\necZiucR7z9XFKybFPrPJDJNESKVp+w7b7UrRFQJnR1ary5vbfsX5+cWuWCUyNFXNarXaybOV5MWL\nF2TZhNUmMJ/PEUIhgG11hZL7OOd5/vQdeu8xUcqdgwNW6zX3Hz2kLGbEScLV9RnXFy/4whe+iDc/\nxre+/lVcv2G0Oxmj84Khd+TzGC0U5WxCpiS1dUyLknxxwMHRPbTSPHjwgOlsTr3ZkhU5GQ7bbAlK\nI6Vn3DY8u7hkbLe8OD3ns1+wBCTV2WPmJzMEAq1GPv3oU1xcX6CV4qd+/Mf55X/ya4AnzSaI4HDj\nSNtb7t69Q1VVlOUUHwbmiz2ePXvO9eqag4NjlNKkWUpWJHzrrTe5c3KXOEq4eHVOkgV0pPDB8vTZ\nc6pqy/2Hj9ibL+m6Yafy8Q6ld3cDhN87A3BY70hGy/0HjzDaEN8sxB8XQog3gP/pO156DfiPQwj/\n9cc26C23fJ98YhWgn/2xL/PWm29y8eqcxSQCJZnNZ8wmC9IswVqLUgJjdk2VtNZovft1x3GnoGi7\njtm8oGs6ijxns92gI03fddi2ZX9vgQ/Qti2r7RVaBiwCpQ13Dg9JspwgoCinXJyfESJDnuc473He\nc319zTiOhGBxbrfTbvqOqqmJpaYd+p1+fOjxUY80MVVVfXS3sK22SKE5v3jF4/c/ZLaccfd+wWK5\n5Gh/ycHRCVEUoYBuLFFBcvrsW8wPHnF4chchH9FWWz547y2azlJ3DeNV2Jldi5x8f4/ZfMFy74DP\nfvZz9P1AtVmhjMHEMXE+Mp0vCPMluh8gbABF0Boxej58+oTV9TUheMS44eLsBc1kwuHCsjef8Pid\nbxJFu51zmsT8zM/8LF/72lcYBsukSImMYbNZEUWa/YN9zi+es16vefHiKRcXl6TJhNPTF4QQmM2n\nrFZX7O0t+cVf/EU+97nPUzcNg+0Zx5Y0y5mUe9w5ekQ2KTi/uGB/b07wAR8Ce3tLhJC7Xj7dwP2D\nuwitUEpjlMK5XXDPbrTnHwchhG8DPwogds3knwP/68c24C23/AB8YsHciIT7Jw8YnCWJCu6dnLB/\ncIciT0jilL5vCSEghGBvbw/nHN77jwwIhmG4yV/vjJg3mw3jMCIMREoh0l2Ofb3dcnx8BwKM3vH2\n229xeOcuNjgOjg6oqi2Xl6fYocOGQBwnrC6vGYaeKIro+xHnRoZhIEkShJLMZjO89WyaijzJef48\nUJQTjImomhaA7XbL9fWaatvw/pP3efzhYxbbAz73+S+TJCnWw2Z1wWKxx3J/D+8HkgcPOX33q1w8\n+4B0NkdEBclkQpxNcPYJzllC2MkcizwjSnIcAqUUTdPS1B15UbKta6wP3Dm5C8UEGRSYKYQFQXii\nZMrZ6Vd58t5zQPDhh08ZrefZ0ydMZiXj+hWbas1kMmfbbLHWsq0H7hwfUNdfpG0rlvNdOmN5cMjT\nly949PAR773f0zQN5+cXNE1HZFK+9vWv8tqj13n06FNcXp4Tgufk5GR3N5PmDG2HHQJluWQyKdHa\nkJiIh/cf3LS7dUxn0xupqWMxWVLO2QV5AXmaU7c7BdM0L1B/fNLEvwy8F0J4+sc14C23fC8+sWA+\n9JZ5sc/DezH7asLDk9fIEoUfPaYwBL8rygnwkVytKIqbsvmBEHrKsmAcBpqm2h16mgg3DvRdx2df\nf51Xr15x994Jp6dnfOFzXyBNYg4Pj3j/6ZNd06e25dnzJwxDR6QSnp8+486dOyD4qMdKmiaMo8SY\nXdm+8IFuUyGjiOVswcuXp+R5SZblWLuT1HnnGUfLbDbDxBFf/tKX2V8cMPQ9680FVbXFaM1ZXe36\ncd9IY/PZknXdc3j8iOXxXZyy9K1EqIIQHAJFuLljODm+R5rkfOb1N+i6gb4bQQiSJMVozdGdI7TQ\nBASCGEQA4RFBErIpv/xrv8xgBwzw9d/+TZ6+eJc3PvNFZtMpV5sNv/rr/4Sf+6mfZT5fsFwu+Wdf\n+adY+yH3773GaqW5OD/n3r37HN45Js4zvva1bzCZlMyme3St5+zsbcaxJ00zlNKcn58jiIgiRZaV\nPLz3iP3DO4zBgXUYs7sDGMaeJElYr654/ORD9vYPWB68wfj8JderC9brNQd7B3jn0LFBxQGjDZGJ\nqKqaovxes+6Hyr8O/MM/ttFuueUP4RML5i8uXjJJU3SSUCQT8jhitCPeOfpm53XprWX0DmstIgSU\nUmitsdbuJIkBxmHEjY6x6/HjTgGyf7zEB0GaZ1SbDYvFHBNr1tWG7XrF/mRKU7d06Rbbj2hpEAKK\nYkLTdDjnPiryURrqeiCKIiaTKVfWYaKYvh9o6pr5fI6UgaZtGQPIsJNLbuqGut5SypIiS3n48IS2\nbZlOSuwYsDbgrEVpjQi7tgNRnjGZ3yEuFwgdU6Rz6vYKEWms9dRNzTCOxHFE19dE0e4g2HvP6EZM\npLF2pGsbgoNqdU4WxQjtAI24yScnScZP/tS/yNOn7xLC7m+7c3SfPM8Yx5FyUvDnv/zjqDjG+YH3\n3n+bSTmnLAuc80zKCRLPwcEd3nzrTQ4PDpjPp1RVhdaag4M9NpsNIXju33/A4eEhxkTM5wvm8xlX\nV1e7gi0l6Zue6XSCkprHTz5EysDh4RH1tmGzqfjMG18gjJ4sT7hcQd20mPX1rpgoaJq+pm07vPOc\nnT3nnXevP/a5e9PL/K8Cf+u7/fw7Czt+v1PMLbf8IPyeqcv3wycWzN/+9jfBC+7dOeTktUPW22uk\nUBwcHpCk8UfXlXEMN6oW5xwaj/SedrNh6DvGwZKkGU0IHB8fs1gsSJKED588Js0zlosl4zjStu2N\niYLeBeu25eLykmEYEEJ8lMbp+548zxFil7rZbDZkac44Oq6uzkiShDRN6Loe6yyR2vUUaeqGOM0p\nJxlSG/bzhLLLuLy4wrmd0iZJEtp2p6ku8pw4NozDQB12Pp5KGu698WUcBhnHeLFbuJwXvDxd07Yd\nm82WNE05OzslyzKMiYDAaEeUhDgyPHjwgGa9ZrG/T4hTBAHXrpBpgUAihOT47glDv0EIwfHxMWma\nUZYTNpsNruk4WOxhreXs4gznHJHJePnyJcvlEuk922bg2++/xXSWE8SuU2GapkRRxDAM/MRP/OSN\n5LMAwP2/7Z3Zj2T3Vcc/v7tW3dqrq7urt9k9nvEksRN7Ria2WSIICUJxhJBYJAiLeEICCSlA8g+A\neIEnXoAgCCgPBIjCIhRHSUQestnj8T6x25merbuqu6tru/v24+HWOBNrxp7p7ukyk/uRSn37Vtf5\nfavu6VO/+1vOSVJmZmYYj8dvXVtNU6nVqgxHQ5BQLlf4waU3uHptjWatjqpm1Y/eeO1FKqUaVaPI\nwN1ird9jdXWVSqXM0tJSVii6WsU0ixyuz3MAfBx4Tkq5dasn72W1o5wfL97uS3F8+7KIUxwzB88P\nCd2EkT/g8rrggaMnUDUFTc0KHgBYho7j+QS+j23bk8nIFM/zME2TUqXMcDBCUxRM08DzXDqdTtY7\nktmbX19fR1UUkjgmSiN0zcQPfLzAzcbFw2xduhIq+J5LpWxQr2XZ91qtJkEQ0els0GrN0el22BkM\nGQ2G1Go1fM8nmJRA6212iaVAVRWazSaOk62+yXZGplnKgEqFRE6WAY7HjC2L2WPH6ff7zLdnUY0i\nBbPA2uUrtGZmiMKQ3nBAEPqkabamXVVVFDXLStjr92g2GhiaQZpAd3ObRrPF0HFozNZBMZBpgmJO\nkiMCnmczHG9SNAuoujZ5ny1c18V1fUAyGF6jXq9nm52CkCROKBaL+L4PEp564km+9dy38dyISz+4\nysLCIsvLy3S7XaRM8XyfIImpNxpEYdb7H49Hk9wtJYbDIY7jYDsOxaKJqqoMB0Pac4u49piR41Aq\nN1ldvUTgj9jRbWqNKqVKGdsb8/DDH8Qwsk1lqqoRxwlJKigUD2Sr+a8BXziIhnJy7pSpBfOlY3P4\nToqmlnBdhzBIOXnsBJ7r4LnZEIIEOusbuFEAKZP6kWBZFuXyHIaIn/ryAAAQTUlEQVSuU65U0DWd\n8XiE49j4fsCZM+9nbI+JPJ9Ll9YoFEyEqpJEEbquE4URqqKSpNFkvbRLr9ejUCyxsrKM7/v4foTr\n+Kz21qg1G5QLNaIoQjcMqrUGpaKF7TrMzc1hlUvEcYzr2lzvbJJKiRTQmmvhuh5RFDEaDLEsC9/3\nuXr5KvVqBW80oF6vI1DZ2e5hlbI85cNxnygMGQ6HDIYDNrodwiDCskpomoppGszPzeH5HiN7QKFQ\nQDcMSlaJjY1rFM0iw3gHoVVIhY8iVNIYUEARUCgUePPiRY4cPU5rdhaEpLvZpV6ro2lZAYuZ1jy+\nZ7O1vUWaSsIoJApC0hSWVlY4/+xzNGdmqJebPProWfo7A8rlCorQuXZtHV3XsWoV7PH4ra33QmS3\njWEYvnX76Hs+l9cuE8Uxs60WjuNjmCZRLJip12lU62z3NpiprFCrFVD1mPZCm9HAwfFHFMwixWKB\nUqlMoVjGtu176rdCiBLZ5Ofv3dOGcnLukqkF80pjHtWIsCIdmUKxoGGPxygCTNNgY+Mavp/lSGk0\nmyiKwLJKQLZb0DB0FKnx/R+8TqVQpl6vIaXk5MlTvPzSy5CGGKUiiBTP9yhYRWIhEanEKhWQEnTd\nIgxjZmdn8TyXwIvpbW3TG/RpNRsULRPDNImDgEsb67TqdQ4fXpnUyAxoV+aoVMqoqqBYK2PrGkIq\n9Ho95mdmiKMEvZQNDzWrVcaOjUAhSGLOv3A+u5uolFHVV+h0NihUSoDEsX0Mq8irL13kjUurPP/d\n7+GHPsVSgYKqE0QRcaIQRgGL7WV6wz6WVcAPFSTgBQ7zK8skSoDqxUjNAD8CvYSUkIYjCkWLMPTY\n3lzHKlZQBMQli1qtzE6vR63epFKpTm7zBK1mE7NYxB6NcG0PpZXt3kzigAsXzlOtNfCDrBTc4tIc\npmkgZTZm7DjO5M4ky+1yo2RcqZQVqMiGSvS3vlhnZmYYDseUrTpxGmPbPp3NZxFxyvr6Omfe9wHc\n0YjWbJvR0GZrawtVVVnvXOfw4SP31G+llA7QuqeN5OTsAjGNDGhCCPnrv3WOOFAwU4tapY4hVI4c\nOUKtVidNJGHkoSoG1Vo5mwxNU6IopNmsE4Yho5HNwvwsb169zqHFRUql4mQseQuhqoydIWkQYJUq\nBH6QFaFQNQajHQwj2/ofBgnVWok0kdl4tOMxM9OiWquyfv06J06coN/rkQYuaAVG9gjT1FEUBd8P\naLWaWEUTz/MxTRPTsIiiGMdxuHR5jUYjS7RVq2WFmp2xS9Es4EYeg8GQi6+/ThBFPHD0JL7jc2hl\nhSQRhNLHMAq88eqrXLl+BUjRFIVCvYIqwSwUUUVKpdygUW9hWAWKps7h5RUura1x9rGzLLTnGY6z\nsflCscjqm5eYaTZYOn2WqL9Gb2MTNwhIZIKhF2m3l3DtEY5nUy5ZmJbFdrdLo9ZAKApB6ANwfX2d\n2XYbkSRZeb96C103GI16uI6DlJI0TWk0GkjIvqAVhY2NDRzHfmvuoFyuUKtVcV3vrQRbpVKJIPCp\nVLJ6od3ONjKN8UOfcq2GORmyGjsuIo7RdJPxeEynu87Zs2fZ6Q/Z2enxsV/+XaSUU5l1FELI/cq2\nl2dNvHvu96yJcRzf1renFsyf+PAySlmnpJY4MnMEqQgeOnmaWrVKYTKGmuVfMTANC9e1ieIQyyqw\nszMgjmMajSalkkUSRkRpjKpmvWBF0ehtbRGmAZVyGU3TaDab9HZ2svqecYTvRVzd6PK+UycRQuC6\nLv3hiAceOMlmr4euawgE1VoJXdMomgU2utvs9Dcpl0tUihalcpkkjvGikG63y3BnxPHjR1hcXKTT\n6XL58hqVSiXTaZm8/NJFHjv7CK5jkwpBzaowdl2e+cY3WVhYYLjTx9R00BWGg/FkJ2uY5Q23ijQq\ndWzbxvbGRJHCznaH2E9RNEGpVkcVKnPNeY6dPEx7rsX1a1ewSmVURScII0bjHk9/8pewhzaKIjBV\nhUSCF0SYpskLL1/ksUfeTxC6jEYjFEXFdWxKeoEwjpiZbzMej5Fpwvxim+2tbdoLi+hatkN17dIl\nPNvBdmy2elscOnSEGxkgVlff5PTpU5P0CUU2NjbY2Njg0Ucfpd/vZ/lywoTvnn+eB08cQdN0zFIx\ne860SOIE1x4TBB6u5/HG62u05mY4d+4cxaLF6xe/j2oqVCs1zn7kk+/pYH5j/8S7cSf/m3dq607b\nO2hdd8Ld2Hq3v7sbW+92HW+kG7kT3i2Y36mtdwrmUxtm6Yx8KkmAXjEpFgosrqxQq9ao12okMpsw\nNAwDVdVw3DGappKkWR7whYV5rl/fwHWzWpB6sUBZs+j3tomiBEVRaM5WUYQOStY7XFtbo9lsUq1W\ns4k4xeDl197kkfedIUli5ubmiJIU13WoVirU6w3SNCGIPApGdmfQmq2zuDRLr9djq9OlaFkYZoGR\n69Cen0dTFBYWFhkOhzSaNZrND9LpdEiSBNt2SCe92UZzBtu22e5vUSqV6G4OePrjH8H3fQqWheeF\njIZOVtezXsUej+lv73DixFFc1yNMYjwvRIgH6A9GjMdjqtUqg+EYL7K5fOUS29tbWeKwYonZ2SZL\ny4ewqgViL6A4ySgZeC6abrAwM4OMY5752jf40MMPEsdxVj/U89FUHS8MWFxZ5vKVKyDANExs28EL\nPF568QLD4ZiHzpzGtm38MKHWbFGp1mkvLOB6DvZ4xJkzD7G1leVT2dxc5djRk/hewuW1q7Rm6nQ6\nHYLQ55mv/y8njq5gj12UQZ9XXn2NuYUFDFNH07J0woZV4tyHH6fVbNDv91ldXWV5eQWhSpLkvV/Q\neVqBLre1v7buJpi/G/vRqZ5aMFeVENcXzFgpC4eWWWgtULQMUCRKIhCqQhgGFIsF0jRhMBiyON9m\nZ9gnTWFubi7LkpjEpK5Lb5xlP1RFQprGCFEiCD2iKKVolTHMYpYbfVJ0IvBDojAk9G0GdpZKtVAo\nZDlhNB1JSCpjqrUqoZ+tnCloBltbXUzTZLY9T384pGgWUVMFTQiqlSqbm13m5udQFEkSZ9WLymUN\nxwmYW2ijmwYCQalUojXTwHF9VF0nihKsUgXbGRL4KSoppXKRoqHT9z0KBR3f9wgDD1XTKBRUoiDk\nA6dOopsFVE3FHu9g6EVs16WgFzh06DCNRoPu5iZJGHH+Wy9w6vQZlg6fIRluo2s6iq7juR6qIjB0\ngziIaLVmJ2lrJVHgougmq2++iSJMlg4fpt/bZGfgoBsmiply4vQh4jjh2PFjrHc7HDp8mI3L6wxG\nfaLAp92eYzzOvpw0TePkyQfZ2dlmcWmWzc0tLjx/geZMnbE9RqYpa1ev8cgHHkamKYmiU6nUmGvN\nE4cenc51Qt+FJMAPPYqWSV3WKFgGWzs7iPdAcYqcnGkwtWBeqht4TsLDDz7MkYUVUpEwGAwAMDQN\nFEEcx9mSQUUllZLVtUsUzCz3hqZLhKJmOa8HfVqtVvZ6TUchJU2zhEzlSo0gCJBSEoYhCTqqVCmV\nKkipsLJyiLLrkKYSzTBxHAdVNybFETRMM0HTtEl5MoV2u43vB1QqFUghDLJVHp4/5tDKCmPbpmCa\nWUEGM8WyClne9GGfU6dOQypJNUGqSBzXBQG6pjE33ybwszXv1YrgytoVZtrzrF+9RhAEHD9+nCDM\ninVomkarXmdpaZFr16/gOEMSoFQos7K08lZecMuyGI1GLC0s0tnc4MFTZ5hdWMbtbaGbBaIoQAlT\nLKuIYugohk51ps5oOMLzXLa3t4l9h3KjxcJCm972Dooe4acRzXoVRSTYtouqgBemdNcuc3j5KDLV\nWD60TLezwcrKUa5fvcLs3Dw/8XgTz/NIZczy8iGee/Y5Hjt3jv/8n//micefZH7hGEXrOaqlGrVq\ni/F4xJFDx9jobtBolOl0xmi6he0FjIYOplWlVqtRa7Wp1aoI3UDbx3HOnJz/T0xtzPzAG835sWKa\nY+bTaDfnx4f31ARoTk5OTs7+kg8w5uTk5NwH5ME8Jycn5z7gwIO5EOJjQoiLQog3hBC3zDq3T+18\nTgjRFUK8dNO5phDiGSHE60KIrwgh6jc995mJpotCiI/uo44VIcTXhRCvCCFeFkL8wTS0CCEKQojv\nCCEuCCFeFUL82TR0TOyqQojnhRD/MS0N02K//P9W/r0HW7f00V3auqWf7VHfj/jLHuysCSFenNi6\nZW3iu7BVF0J8UQjx2uR9Pr5LOw9O9Nx4DHf9+ctJGa6DeAAqsAocgSyVNnD6HrX1FFlVmJduOvcX\nwB9Pjv8E+PPJ8UMTLfpE2yqg7JOONvDI5LgMfB84PSUt1uSnBnwbeHJKOv4I+Gfgy9O6LtN47Kf/\n38q/99tH99PP9tNf9mDnEtDcp2v5D8Dv3PQ+a/tgUwE2gJXdvP6ge+bngFUp5ZqUMiKrp/j0vWhI\nSvlN4O3JrT9BdhGY/Pzk5Php4AtSykhKuUb2D3dun3R0pJQXJsc28BqwNCUt7uTQIAss/YPWIYRY\nBn4B+Ft+mMjxwD+LKbFv/n8b/94Vt/HRxT3Ye7uf7ezW1m38ZS/s2YYQogY8JaX8HICUMpZSDves\nbI/Vqw46mC8BNwu9Njl3UMxLKbuT4y5wI/n14kTLPdUlhDhC1pv6zjS0CCEUIcSFSXtfl1K+MgUd\nfwl8Grh5f/NUr8sBMm3/f1fe5qO7tfF2P3t1D5Ju5S+7RQJfFUI8K4TYS9bLo8CWEOLvhRDnhRB/\nI4Sw9kHfnqpXHXQwf8+sg5TZfc076dlXrUKIMvCvwB9KKcc3P3dQWqSUqZTyEWAZ+EkhxM8cpA4h\nxC8Cm1LK57lND+mgr8sB857WPvHRL5L56K5zCd/Cz356l3re1V/ukieklB8kKy7y+0KIp3ZpRwM+\nBPy1lPJDgAP86V6EiR9Wr/qX3do46GB+HVi56fcVfrTnda/pCiHaAEKIBWDzNrqWJ+f2BSGEThbI\nPy+l/NI0tQBMbgn/C3j0gHV8GPiEEOISWXGHjwghPn/AGqbJtP3/ttzko/90k4/uiZv87LFdmriV\nv/zjHvRsTH5uAf/O7ofsrgHXpJTfm/z+RbLgvhfesXrVnXDQwfxZ4AEhxJHJN9GvAF8+wPa/DHxq\ncvwp4Es3nf9VIYQhhDgKPADsabb7BkIIAfwd8KqU8q+mpUUI0bqxSkQIUQR+Dnj+IHVIKT8rpVyR\nUh4lu6X8mpTyNw5Sw5SZtv/fknfw0d3Yup2f3TW38Zff3KUuSwhRmRyXgI8Cu1oJJKXsAFeFECcn\np34WeGU3tm5i79Wr9mNm9y5nbD9ONlu+CnzmHrbzBWAdCMnGKX8baAJfBV4HvgLUb/r7z040XQR+\nfh91PEk23neBzKmfBz520FqA9wPnJzpeBD49OX/gn8nE9k/xw9UsU9Ewjcd++f9N/h3c8O/99tH9\n9LP99Jddvv7oRNMF4OW9xh7gYeB7wAvAv7GH1SxACdgGKnvRlG/nz8nJybkPyHeA5uTk5NwH5ME8\nJycn5z4gD+Y5OTk59wF5MM/Jycm5D8iDeU5OTs59QB7Mc3Jycu4D8mCek5OTcx+QB/OcnJyc+4D/\nA43ph1xlbAoPAAAAAElFTkSuQmCC\n", + "png": "iVBORw0KGgoAAAANSUhEUgAAAXMAAAC5CAYAAADavt/0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvVusLVt63/X7vjFGVc3Luu3bubbdbcdOupMYbIidODE+\nIjdbIJnEyImDxHsQL0gIJAhgQBYOT0ggRSjcZKSQvEQRoAQeiEniRJhEYAvbiS9Ju7vdp885e++1\n12VeqmpcPh5GzbXmWmefPrv7nM1OzPqkWjXqsmrWnLPmf/zH/7sMuLM7u7M7u7M7u7M7u7M7u7M7\nu7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7+8fefgj4B8CvAf/2K76X\nO7uzO7uzO/smzAG/DnwWCMDPA59/lTd0Z3d2Z3f2W930JVzze6lg/htABP4i8CMv4XXu7M7u7M7u\nbLKXAeZvAV/Z2/7Nad+d3dmd3dmdvSR7GWBuL+Gad3Znd3Znd/Z1zL+Ea34V+Mze9meo7PzKtA1W\nhvgSXvrO7gxmJ4Htsyiv4rUf/ODvsCd/4x+8ipe+s/9f2PcDf+e5z/bLeOA98CvAHwTeBf5P4MeB\nv793jr3xe78HRevYQEE80CkWQIKgohPFF0QTbck0IsTtmtxHvIBKxjRjGJghsQCGCTjnUFXMjJwj\n42BsN5mUV0A9tv5a5PDtGa4VlvM5i0VgOT9m3s4J3jOOA0+ePeODx08pxehmQpgprlNMEi541BuW\noe8TORlIRoBcCqUIpWQODjvmR4KNEJ85Ls4i23UhjYaoMjzreevzS3wXEFVSclhRSlHEwMwwyzgR\nSim4JiDOUK9YUygkJBTimEnFKAPkFbApIMLxG8c8+swx3/Fdb/PgrTc4fLDEB89qvebx+1/jS198\nlw++9oQv/sx7LD6zYPWkx9YeSkZFCBRUDR8KTefxDrzUexxjImWlPe7wJ4EHry1o5wV19f6HTeaD\n39wwPDO2zyLDusesoMEhviAevDpOvvUB3/adb/NrP/cP+d1/+LczDJHT957x/pcfV8ahQtHdZ6EY\nmbYT5i7jGk9ocn30TPjrf/5XX9az/SJmP2r//dc94Zd/4i/zHf/unyDFQIqBmPxVO6VAjNfbcdp3\n3fZ1vTv+F38S/yf+DC4kQog4P61DwoeIDwnvp/Vun987trf9j37yL/CF//CPE4h40o3l9r6P2/6r\nP/F/8SM/8bvJOAp6Y/lG9/2tn/jf+b5/748wppZxbKZ1y5gaxtgwxvaF1kNs6f/ST2H/4n8EW6Dn\nev1R7a93fPUTUH6CKkZ80uWngH/rBR6vB/ARz/bLYOYJ+NeB/5Ua2fJfcxPIAXCiYPVtqICJYWaI\nuvqDFTAMp4oATgUvsDyak9rIenWJA0oBFSWWVMHfwEwogKqhouAc3hfaWQ+xxZLhVBA1nPeIGjFG\nxgG2ssWbkL0nxjp6ODiYM8aBg4MZvoPsEklKvUdnlFLwXrGcIQtFFDMFG1ksHEePtpjzFGlYUdhs\nMmlQRAXvlUEAhJINNRDx5JIQHCL1TYko2Yxm0WE6PQCu1GsEwxqjnYPGQm4KqkoMimw8Tx+fEu45\nohfc3OEWDWKCcw4chANotobrjOPPdJgKl1/cVtD24KwQvBKcx0tBDZwTUi54L0hQUixIKVgslKKo\nClbq8ab1RDeirlB7J0BAvBC8Y9HOOFgc0nYzmtBweHjEZrNhO1uzPFyQS6aUjCOTChQrlFwYBqN0\nSpsU84pzjm7xEp7oV2KfZl8kX3fzzn5r2MsAc4C/Ni0faRocVgxk99veMVpDVDCrQJxSYu4dwRmt\nV0rpca7QzQNxGJECKSaEytoQgQIlG4mCOjAc7UI4OjwkZWFzOVCSsX6SCI3UvtGMfozkvEbVMfeK\ntsJRM+dYl6gKhcRoI31ZVQbsYgUlcUCB6LEcAcP5graB2aNEd1+RXriMMKQRy4YqiDNcAypCouBw\ngCGAqiebVHaMAnlixUICzKAUjys94hVcJLuEC4p5AVWkgSSFmQ8cHC/IJRGHkfWzNbEY62enrM5X\nxHhGOy+4zmjvF2Y5YmtHPC8EB86U4B1qI5YV8YIZeIwhQtFMmHk8Qm+RVjtyKhhCiYWgHudHVECl\nUETqZxAF13rcskEcuOBw3tEGT+lausWMdrkmbgtmSilAyqRcEFEMQXJ9n845mlYIIbykR/r/a/s0\nXU/2dTfv7LeGvSww/3gTKCqTB3Z6ukQwASmCqZDNoGRiyhVcKWSglEyKkZIT3jn8rKHf9hWUi1SW\nTyElpRElNIX5QcPBg47lgafvC6dPRtIg+M4REzgDK0YsiVQyiNC2LbN5g3MO5zwpjTzrnzKOBlkx\nEuomuSALzhVMFVNhcTynaYy3HyTciePZ+RnH4zGP87Pa8Qj4oDhnLO41iAiWAVGyA5EAMaGqUAoh\n6NTJRUQbSskIBaKQNwkakJki84wTUA+jA81CuwzIXBli4fxsxfmzFc9O12wuH2O6pfiEC4mHv3NG\nWBSaY7A3DHJGR4cUMDIi9TMSUWLMuOIxM0oG0ZbF8ZzFMuC9YDkTR+i3I+t1JMYRo468KIYI5JzJ\n/UgpM5rO44LwLb/rM2jbEMxogmM+79jkRClCTkJKCQxSSpQCIlpHNG7OfNHSNP/4g/nDdz7FtIvP\nv/OpXer+O7/zU7vWb3/ntU/tWp9553Of2rXk8z/46fVl+g6UT+tiv/8TX+GVgbmKwywjIiQExMgl\ngwV0khxc1TEoeaQXCF2GXEgpkVPGSgYnNE1DNzvi4mxFTAkQigmI0seRsGxol8bhgXJ8f46J0s62\nhBC4eNaTLgdEasdixer1c0+hQ7WrbNdGslYmXooxWE+SDYE5rXicN/AOnNHMAsu5sjz2/LPf+d08\nGz7gXnOff3D6DE0topnGKd5D8I7m2ChIZeMaqvxhICZYNASDSZbCBVQhjj02FGIGawztDC+GNuBn\nghRQlKKFbhkQTfT9ig+eZFZPNqw+OCXrQJiN0K3JM+P4OzxmmfmRYVFJ54qegxYjx4xJS7FSR0AJ\nhmhkc+QWtA10bcPhbIE0mXEciXkgtA5vA32hfsepUIpRhyaGFKNPCalvmTe+8yEpj2Qiow2IRMwi\nWIFcIGfiOBCHQomKZKHtGkJQ2tDg3csI0LphPwT8Z1QJ8b8C/uw3eoGH73ye+LH+/xfUQr7wDlXZ\n/Aau9RGXrmD+cdd6Matg/unA5re88zlS/lQuhX7+Bynjp3Mt9J1P6UIAf4BP+nm9OmZu4NSRS0YQ\nBCqwZyOrIWZgBbOMFWOIAy4JYglLkZwSYgVBES3M5y2z2Qmn52vW656SFCioKNvtyH13RDMLzA5a\nZgcL2sWS7fCYMUa6xRLLVkG6H+mHiO8ha6FpFQsBVSPmyDau2YxbMgOuKzifEGnAC9o6TDOuc8wW\nDd/7ue+glSX/xh/61/i5/+dnWP3G/8IH+pTUFoqA9w5RcN7IJSDe0bYtOKEkQ4uQKQiClYy6AMVI\nKcPoUau6fU6ZPBSkrw7RMGnmOs+ErkM7MMlcXvRs1hc8e3cF20RoBpaPHBIEVxJNOCDMFXEj42C0\nJ0bfJ2TlyaZoSTjnSMkoWUhFSWQODo/p2o6SCzFGgldUFSdCdp6wWND1jrGsqt5thk9C9oY2injo\n08B23OKGgveelCJ5+gWLamXxKWOAD56cI2qCWSKEBW3boqKIvFQwd8B/AfwhatTW3wX+R57jE/rk\ndiez3Nk3Zq8MzAvx6pkSVQzFcnVMWi5QqpNLxDCDjHJ5tkWtZ9yugMxs5mm7iHctokrTdrzWdpxf\nrtluei77kZIzFgubTUKc4jvhaHHC0aKlXwlOntGGlu0wsl4NgHK5PacfM+qF8+GcRWgwZ2y2Pet+\nyxAHtFWEFhGPOEEIWDG8Nngc86Zh2Bo/+nt/jOai8Ed/z7/CuL7H+Zf+O74oA6u4xmkFalwdjYTO\nMVt6ci5ElDRWuYdc/YY5G0WMkg1TMFFKyhAFS44YEs1cwTLeC2qGkvGNZ8hb+lXh9IM1q/d6vDoW\nB0bJGTHj3uJ1Ht17hJ87HvMu64un+M7jVEmSsKyYM+rNKKUUcjJiylyeb1iczMnFk8h0zjMWo6jH\nJNMEYdOBOIdTj1ehOOhmDd2iIbQTAJsRx5Gc68hrTAMmBfUKoyBSRxySwDsHKNlGfHAslnPmjXvZ\nj+1+djNcZze/BDC/szv7xuyVgblYoXoPHWJGRlGBnApIoaSMKwYuo1SmXijEVJ1hsU9YjLShgQNw\nzgOC08J80eKc0rSO7aZnGCOnT1fcO53x4KFHtaNp5rzxxpu4tiPnzHwckdNzNustKUXOhp4+r9CU\nmBfQ1pF6IwEaAiEUXKeoaNWIAZxDxZNL5v78AQfumHd/6Yu8/+V/iFnhe77/D/LGn/4z/Cd/7s/y\nxbN+irhRsqtO3+XhnHbWkMYEZvQ2hVyKkGMmFwBDvMOcVLmC+jmWVLAsWHEUM0BxaqhTVArZlH4Y\n6S+2aBKSJVLnGTaZg3tLHh6/xVuvfQvNvKXxcy7PM6fLc/K8wEqnyCBjdtDhloEUM/lrI2b1vcc0\nchACgpJzwahRPjHGGvViimsc4cCDgFOlWQZcK/hG0WAMcYP1nhACKWVSzqSUKSkBSvANJUdizNjk\nRJ0tlsxmMw4WB3TBkeOnIxN8hD0vu/n7Xs5L3UWz3Nk3Zq9OZkkRcw5VMLRKCjIxUTPUMh7wKqhW\nh5loIIkxSqKkTBlGLi82zBYtbRsJruqwVjJYAgo+NBhKzCNP3tvw9tuOsYfglZN7JyRnrNbnzKVj\ntd6gCmNJ5HHEdcayE8IsIy4j6pGhxVvBNR2hBXV1JFGESROOtE3HdnXB93/hR/jKL/wyLsw4Wh7w\ni3/zr/PP/fif5nv/qe/hg5//WQaLoKBJUC/M5oqGjBNHPybMpDJyq/KCK9VpXB1/gAkinmQjZXKY\n5jHjkpJLnmQcI+aROFbgzluHlYFSlNwr4xBprOHegzd4+OhN1AspJ+4fn5F7QVYrLrcbdOMJTpg/\naFjcOyAjzO4VTk+3SPQUjTUiKK0ZtjLF+bsaoSMFnOEaoRWHOI94oe080gjiQnVqp4INkZwLKUa8\nKqgQzTASOTtKcZWVJyNLoekCR4s5jQt0bUsKLzUZ7YUEil/8D/7KVfvBD36BB+984UPnZPPV34BS\nzNWRKVJjmaT6kFBD1BAtqMu4orXzLMIUuwsYzuUaK+5qnLnzCe8SXjNOM85Na82olGkxRKZXFJtC\nYG+aTHeklBvt/cVRQ0b327tY8901bsePK6W27XqfI19d8frcPMWbK2qlvmeY1ns3LFKj4kQwBbQG\nUJgTrExBEVPIMgZSCnipaQme57Y/tO2oUWJuajvATdsvYt9smk/5WbC/fb39dZ7AVwfmpbKtIoZQ\nKJpRU5IlnBrBG0EyPhScVwRBW8hJ8aPDmbLNyvY8cioXYMr8IOG9RwqQEhYTKVZNWXxmvdrw+P1z\nXnv0JmWEg8WCeFQY4xacxyQSLSFSuPdggZ+NHN4Dv/BkM+LgKVrjwH0bmHUtzjuwxKBxisTxOIu8\nNnud/v2n+KZl1gVSyRwe3+OX/qef5t/80/8xf+/f+WHeHwpRCmkozJqWphFElOQcMa5r+CGVtQs1\nusdEoViVPKoCM3WABYtQtp7YF0Kb67drhmQjp6E6Lr1gfXWojjkSeiH4JceH93n99Tfo45YxjiyX\nB/SHAwffcsTXto85/UdnzJdzZgcNYeY4PDzk4GFifs+xuRgwa0gy4KaQ0JhBrT7pViLmDN86pBGc\nF5wP+MZjFMTV3IKUEyXWSJU4DpQhIimBZkIjxDwiqqgTvNb3Npu33Lt3xMHBHOcUVz618ILn2cdm\nNwP8tn//x29sp+f8AFOpYF5sD7xEr3FqwnRVQ7VgO0DfDcYMxGoXoD7hfa4g7hPOVTC/WmvCuYxq\nqYuU2kFIuQJ0rrqS5y+3QXwH3ru1J31oDVDqHU7gLc+5Sl3MbgJ4mbqQ3f9HK4jVPq7616bO5noD\nVBCdYgVqlO/0We13AlRfUxLIgiWwLJDkCryv11Pb1bU4uQZ0LxOgP+cpkQ81nnMMrpFZPvJ0+IFp\nmWz7nz7vJOBVauYxgdMqEugUG66Gk0KjhbYxfFNoGiUoqE69ag70fUODoCr0l5H1KmKcs101NRa7\ndZRipGSksQK09wEj8d6TU9784Awnc0Qv0VbREFitzhnHLSKR114/wrnC8sTTLBODbWtGaKxJQrg6\n5FfnaNsOr54urInbEdFCk4XjRUd/eUnjW1TAiUGOyPyA3/z5v8qf+qN/ij/3Mz+NDIlcBtq5oprB\nHCll8jiBUg2Up5jRzlvefusNnp4+5dmzZxgOLCNapSopDoqiMWHZ1c4gFazUZCQTY3kciD6wPevJ\nvWEWaNsZ8+WSguFDAIGuaTlYHhGJ3H/zkHKesWFL8Yd0y47usMWFOdo2+MWGtInkAhJ6WudJuVSn\ncjC64wOavjDOCiXnClQIpdT4fAPGVcTEaOYeAVI/MA4DkuooTIXKVJuEoOCMtvHcv3/E8mBJ21XH\nMcNLlVn+HvAd1PLO7wJ/gprdfMNS+fjwyGzuarkCtCtmzgRKEytXwVyeoruovwPsmmVPzNz5jJtA\n3Lm8t86o7i97QD4h5G0cuQbyD7PyHYjvs/H9ZZ+Z2wTIxfZAHbm1Xd/7PlO//X/OSn3GuYWVu45P\n7SqbvKZr1FFtxcv6H2ZT6PMOzNMOsAULgqVp8TvQntj9DWA3bGLk4qjt/fu50bAPA/Rzh0DGRyD5\nN2SvkJkXzFJNHHKOIg7RjPoqsThnzDroWsG7+oA7CTXZxAWclzpUd57V5Ui/qT/ys7MeVGjbXRed\nMRVoR8wKm4tL/v4Xf42jwwO2sbA4XJDHnvVqTc5CaOHo3pwuFPw8U9yWHOsQTp3iFDKZTELoUPUg\nwrzrYFYzHA984NsffSvx17e0xy0lDbhmRhFh6HvKB2t++J0/zk//jZ9mCEIjnmXXVv08wjgkSjKs\ngFiNL3fe8fC1+5zcO+D1Ryd86atf5ctfeRewSbe2CnKjkbaK6xqcS+AL2IiIY37Y8vCN+5Rxxge/\n+ZR3v3aGWyr37t9HUMYUyTlydn4GNrBczNgWR/P6Q4bHhcv3tqRVxr0eaGct4j0PFi3NecPF+bqG\nSxaPiEBRRGHmF9WJ7TtOfAVcJ1VXvzi/5OJiO0liwtCPJKmgFfuIpvoDNiu1bEOofZtIxgscHhyy\nPG5oZp7QtSBG6l/qU/tC2c2xfPzPqhR3zcztGsBqrkWVWWSSWdRNEoNLE6hWiUml4CYwdxMzd1Nb\n97d1f9kB+W7ZSSw7lniTqe/B6h5X/vogvlt2Ektl3VN3sN9Grhj57lVunnvdhTirjvorZo6hcHX/\nItSyIFMnWAHd9mSJqTiI2BUzr4tiWShJMK/XoO6VEgSLE5A7rX4qL1V6cZMMk7mFw5NEtrd5k3nv\nsXHh1nmfzF6dAzRDkfpVGb4+QNPQ2YVMECEEI4SMOsFpPUed4J0Hb4ivGYpFBuaLExofGGLh8ftn\nrE7HaVjpkMZwTQsqpGFgvVmzSWuCeoZxxebynPVmhYoyWzbMF4Guy2gD26SIOXK0ynC9kMbCMES8\ni4ShpW0LzgvdrEVl5K2TexzFh1jzPpSqazfzJePqHOccz/oNh3//7/KHPv8D/KVf+Bk676uzUoQB\nY4gJDaCmlVmbo1k4HpwcMm8DB8sZsTzi/fce16gbSZVFYBQ8OtZIIKjDdBBmBA6PHvLGwzdYhCO+\n5fU3+PUvf5GwNE4ePkJdYdz0XFxe8v7X3ufZ5SkHi2PUCVGE45MDLp+esnq64vLRAf5wTnMIs6Zh\nduiJ2RPPyvTsCk0j+NwgBNS1BOfAwsQEBe8L9+8f07QNz85WpBwpyWq255gYtiOuZGYdLJczZvNm\nCmFMWI6EoNw/useDB/fp5i3ee4xMLi+VmcMLZDe/CDMvtgM2dyUl7KATmBCrSizVYQL7UoiKUqRQ\nJF+B+TWoT21XUE04V6pePgG5akbFbunm1y979fI3AP2j5JY8VVH5MKgLNjHrm+C8Y+HXAH4T3J+3\nv4J5vbMPM3MBrakIO2ZuttOz5eozNSZmjlGSYkmu10GxqBQ/AbkviFOK1ytWXnwldMXZpJtPr7F/\nQzsgvwHizwHwfTZ+BfKfDNFfbZw5kFOq6YoFcNXZV3L9MgTDJNd0eclgkEWwAK34Gv1iynz5kNls\nyXw+p3Ez7t17yq//2hfZnCcsC06NPCbEK9J4ZrOGbXpKe3hMXzaMOZKGEStKCIp2UxieKNuYiEUZ\nk8OZIMUTrDD2kXVa46UhuBZaR84jzaJl9eyCPFwiOHT6AjcXFzTdgmHcMpPM4/ff5cf/4L/K//x3\n/zfCoqtgZIo4qRpoywTOBsXY9iNt1zGbtzRdw8NHhzx644ivfuUDrHgUKFNihZWCxCozhcYjAm3j\nOHlwwFtvvcXxyT0uPrggHCqjRDofKCnx7INTPnj/a5w9fZ9nq8eksTAPc5LCoJlsSu6FD750zuxo\nSTtvQcG5htl8ZLv1DJtEzhmVjGjA2yQTqFZNe4p+yRmMwmIxI0vhybNnNXJlzPSbgdQXmuCYBY93\nc+6fPOTg4IihrMhpjTpl3nXM5h1+5hCvpO2GPm1f2SO9sxcC8zIBl10Dm12x8kkGVpuCAWrHvIPC\nMo3DTJQigrqJnbuM+lLXk0Z+JbFcAXqepJuJmVPZ+ZV+w77E8jwQvym3uKk72gfyfafobfepIVPt\npB1YT/tNboC5XbH2er6zcoV5Yrv7vMZM2WnmxjSKuf6sd0qLTaAv1KS4EpQyrS1N20koXhGvmDfE\nG8UpxYM4rSTS2Q1n6I0ekP2bug3ixs1ec297//+/SXuFYF6TR8SUYkPNgDRHHgrZKaVLtR2qtz6X\njOLYFdMK4mi6huQSzs85PrzH4cEh3rWEriWmwrtferdGqASPZSGPNcri+HjB1gaWsmEYB5giKcZx\nRL2rei+JnDYVWMRhWVFzlS1HITjHdkisLrc0GghNRsWz3RY+49+icYaft+RcEFXGYSThODlastkO\nACzjyO98+G18lTM0GM4pIxltM96MnD0ksKyUGHlycc6br5/QzB3jpvDg0RGDjcQxk3Mk9tXxmLIn\npkjA1QqFojXeG8+9z9zncLEgNA1RI/PG8brr2Jxu2PQb1qfPOHv6mMvhjM22597xG9ig9CVSxJGG\nSP9k5PzJlm7pyH5G0wKhxo4nMlhBaDAxstbqliod/ZBwsutgDDfpnPN5yz2OOT07YztEygAaIZgj\nnMy4d/SIWXuEdw3L5SFNC0O5xKsxO2hoQy1MNsaRPH5a6X3fvMUXAPMaWSHXgGY1qusKobRKCmgN\nPXVXAF5QqdEa5LpWLRXQfb4C9rrea++Y+Q2Zxa6AfJ+/wm1Av5ZXburmOzaePiSzBCI78W8HzDsg\nv17rre2P3p9KRq80c7si5DLpLqLTrdtNb/MNB+lOiqFQgqPkHYC7a2D3Sgl1JF68XUW2VKcnFCfI\nBOhXoH71QvZhIH/e/ucNg/bX36S9MjA3TVAqz6CAiKOkWv9jHGDohdAZ2hScqw7QaPWx3r3r4AXB\n0wbl8HDOw0cP8S7g25Z+NbJZr9BWa3gSuYbw5Y6D+QmHiw6VzHY7sN0kiIXNdkPTOtxWEV2TcmaI\nU+hY9gQa0iZiGVIxVGr336dIiC1taJi7Yx7c/074aiSOQtt1lBLpupblyX3GYUsILZvVmq/83/8H\nf/KH/jh//m/+RZKLFDPUJVyjkDNjX+qPXEDF8+X3vsLnPveQsMlky7SHjvuyRNST4kjKmc0qsd2M\nbMsuRG+KU8+Fpm0I85bFckYYYNkc8IXykNdlTi+F98Yn/A+nv0IcImnIDHlN2J6hKZAEokS2GWzI\nfPVX36M7fpsZBQ5nmEXazjP2Hkh4UWTiZuvNhpx7xrFAKrTzhuUs0DjHmCLOOeadIy+WbM4HbEik\nPqNOSRHEGsgNjkDQBrOIqic0npQKkUQphXGIjP2rB/NkL/CzMqmywOTUvOaaXLNzraBluyG4XB+j\nMAE6qCvIDrx3wH57346N78stN5ygXN3BTjPf181va+d6g5Hf1NB37Lx++9ddAs8FboF9oN87xt45\nbopmuZac9294+mz0+vgVpMv0TqR+XpavwTxPIJ5ToSSleEcODkm1NIfsg7mvoYniy3VI4n40y41h\nwh6Qy/V3d8XEn7vvBR6sj7FXB+aWK5ATEAqWS3X2SdW+8iikVEgj1WkTqlaWLdWiVNQEHRFPUaNb\ntiyPF7TdjKEY3cGMxcEBeaolUgRiGpg1B0iZE/QAykiKT9lcDKwv18TtAKp470gWa11xaRgHI/WR\nbYyQofEBGsBqXRjnA213RNPeYybHdPkBsVziNJNyJLiOTb8mnp6R8ohqYtm2bFPke978AvMusVFh\nHHuSjahkxpLJBrXYQWG2FNqu5ctPvsKj+/fwzjg4XiBd9Sc0YQaWGfrM2dkl55uWVXqG2bqOglSJ\nMbI+PWNWlM9/bUHzeMnZ2Vf5VRFKHvn8d30X3/r+Ie+XwFmpWv359hmlD/SXMGRl2Ga8CasnhfX5\niLZU3Va1RihNeq5ZZeAqjuVsydBnLlc9Y8k8Oz+lj54HDx8QnCcPNSa+dQ3z0HK5voRB2UqPvR84\nmq9oZEYXjOQFS4USjOxg6HtScKQUuXi2Yr35lIp4fAJ7EWYuU3jh1fZuQ66XWpmgXGPE3jHU6jWU\nCtpq07o8f9uV6kyd5JW6nnRz7Pr1AdhnwOVq/TydXD/C+blj5teAzF77JrCzNzp5/n7BTw7QGx/R\nzvmpcKM7nP7IrtObKqmy+7zEyMHhUgV0TWUCdkOSkf0kp3h3E8yn6BaZ5BduO0D3v58bLHwfsJ8D\n6P+kM3OozrmS09TJytUTXkyICcZtInglSy2Q66U6E6X+MyVOIYsiqBhWelSbWsCqdTjnCKHBewXv\n2PSeo+UxeRDSWiiDY3tu5NHYrvta6S8aWwrOe3IspFjZ4bipunvbNKzHEW+C62p8tHOKc57l7Ahx\nDYumwRCEBMymAAAgAElEQVSGPpFcQWZG17XgA0eLY9KwwYYNxeDivX/EF+59Bz93+iv0JRGTkbeF\ncZUZL+tPeT4LNbU+ZC6fneNVmS89Czfn+OSEVBJijrZdcHCkzBYLZpcNzy6UIQmp9Egp5GHALguf\neTdhH/wm6yw4P4OUkJz5lV/4Zb777e/ib7z3i2CBlEdccUhTs16TRFKu3n40szq/pD0+YFDDi1Jy\nwUqNSDBxtIsZh8sDnHpSMg76yJMnT9CxwXkFjSyP7jGsRsZNj3OG98Zs1rIde0yMcXPB4/fexSuM\n8YDFsmd26PCzhBcQMpvNwDhGNqsNm/XLDWd5EXsRzbxGZtiVBnzlhLTJKalWR6w6bU+FykQNKZME\nU+o1rhOL6lqugPz62NU5Mm2L3ZJa4JqNf1hmuXbR7hj5bXb+4WUKPJ6k0T0hZ49111HJteT0oe3p\n/Gz5mmJzi3nLbi01YWh3fBrV2kQQy5RwJWKk4CjJIdEjvqC+kLzBrmDeHhuv0SxSfW6TxMLkBL0S\nCj4O0G+D93PP/2T2SsHcEETrjDw18bMOy1QKZTTSWBh7UOfrMM8XnCreBYwK8MUS8zCvvS0jfb8h\npZ5UBgojwTmcD0ijtCnTzmaQPOMGYhm4fJq4PDsnDYmMYT0QDYJAFobeyGMhDTDGQowjIoWDrkNK\nrW0yxrq40LKcnbD94BzZDFPmWUZoKOLYrFaQjDFHWjUQz+rpJf/SD/4Yv/xXfoqL0RjWA9t1Zrs2\nUqyZplkLxTJFHEMqPD19guUFXmE5O8C5lu1QoAXxynzZEWYPCI3w5HTkfLNl7uc4nfGFd+EkRUbX\nEJoGJw2SI7F3rPo1JWZcycSSUGo0TdFCdjW5R7QOSxvnUKsdBI2ivqkTR6T6gwxtQzPr6BZzVIx5\n8BjK8mHD6bMzSs54q7Vb2oVHSuDy4gIs4zw4LzRtILTCdljzxS/+Ot3C8/C1+xzHOYtlw2LW4hsl\npchmPbBerUnbfzIcoDLFTVcdeM9FuGNvVCBXs3qu2FUHoFqu2mITKOuk434IwG8v1wAue6z8Sntm\n9/r7zs8PSywvEp64c9xeqdy2a3OFstftaz8CcAXsu07gCsyvPp7pZo0Klrr/GhOw78BcZfotyhXK\nq/dkX5BgdUKXiWnfYN1ewOXrWHNXKE4nvZxrQL8eKuyBtt0E6hsM/WMA/pu0VxeaOD209bsoU8sQ\n0av3mbMSU4beEbKQPbRTmVlz0DShJtpgFMtst2tSiJyvzjhfPWazXeHKbIpQUkLjWW8uWSwWrC8S\nxcHqYkOMVZ5QVUoysha8B8ORByNuM+OQyLnKPe3M1ZKwZsRuBFU2zYrzizOO/JIuNmDGMCZKW1ht\nzmnaOccnD8kl4QuMY+TxkyfMGsebaeBhOeZXV08Y19TaLKWAKFaEEgM5CsU1xJKIlz3LZkZZGOM6\nkgqs+h4pC2ZLEBfQLHgXePjgDfr3NxzNHvADdsLbesxFWtO5OaoB7xrMDL/oGNLIzB8g1OSjDEim\nJkdYuYoGsAztvMF1hjipWbcqpBRRcXgf8L4W4yoamR0sCJMcFuyQMAukPBC3CY3Q+Tmn40jRzMFJ\nU19DC93cEbwScyYOhWyRy/UF7UJBIkKmpSGnSBxGUkqszodX9Uhf2YvILEqp8c5W0ElKU8oUOld/\n+BXA63lCjWpRK1f/K1NbxKYY64nR78WoX5UE2Ekz+/v3kobgNlncB/LnAXr+EKBXrfwazN2kQdy4\n+hWg7kB7b9/esZv7qx9ip5lf36fdzMGZQpuR3XXrvl37ah9GajKaPBILGnzVySeZhQgWpBaw84L6\ngnmheEN9oexkmMkp+pHgfQXaPF9yed7yCeyVgbnTMg2fbh+53mEpk0YlW6IkhwbD/DSpQxOIcaRp\nlFQSl9tzaArab9hszun7S1LuKcXQvMDFWphqtb5gebCkjAUJifXQk7eJnGvWZI3Xhr6nVgaMkMaI\nFcNKnSUHanlXJJOGkWEY8Xge8y5+dHxP+21cppGx7xGFdn7CsLlEfMfi6ITXH32Wi2dP2Jy9TyyB\n9XuP+dEf+FH++n/5kyzaBfSGtP3k+KxD6tRnct8zDlssjrSN48HRfYbNQD8kfvO9U9r5Ew5Plhzf\nO6RtA86UUoS3j97i28MJ333vuxg2F8z9Ag0NTj3OKeoaFM+8ixzM7zEUIzhPJIJonaxD6wxD2axO\nOdd6XOdR73GlTs1nU1JH/WwKMQ8M0dPEhmYZ8M5hpdA1gZKFGDIkQWMt4DU/6uh9zyIUwnw+TcUH\ncYwM/YjkBrOaZet9Wye6QKAIaexJsUB+9ZNTpBdIGnIUjHylJ++yz41cv3d2YFUB3e2BqLNrMFUr\newBOBY1JF67gfXsf1wAu7DHya6fnh6WW50Wz3I5o2QF6vgnmto9Qe+09oL4J5LeP1fbOAXqbnMNu\nDDGdekN3ryeWSYcpV9KNodFIwZA9AL+SVgJXyUI6LeYVvWLmTMycW8ycPca91/564H67/Qns1U1O\nASBGcYaliY1L/QJ0AkoMYixTDLUhSciN0JgQSIg25AyQ2A4bynnCa9V6m9bTzj3jZSSmLUimiJHT\nyNOz97FQww/77YbUbxFzpCQwFgzHECMURy42yUAZpCBaa20EBfWJVAb6bSENp8SNcZAWNJ/97SQC\n6hLDZkPfbolWaC1xcXHBdtvTOENcw3pzyeMnkd/2z3wf3374iC+dP6FrmzrZhUvEWMh5ZNikWupX\nCqETNpstJSqDFVLKXJxecPmlU45PlizvLXj7rbdJkplReNDc48de+31sVxeIBtpuhrmGzgfECaQe\ni4XgApYKQRu6VnFlxUiqSVuN4GcJaaB4T3vkCV2o36EBpeClQbyvsz85T+sDcQ2n41MWizldM6dz\nRtKOYbtFSyJaZNxuKRYJMyEWaKYJRyxTJ722ydGVO9qZUcqI9wtECzkPpKIUAttNz3b10pOGPtaS\nvUBoIhlFJva6+73bla6M7BjxHojf0KN3USSp/vMVSLPrCa6BZH9bJ4K4zySnO9rZTSC/1spvJuCX\nvXu4GZ7objBzJr/AbfC+xrx9IJdb5+zS8pPVJKSbzHzvM9tdbLfaI4m7dtndi+xAvMosEg0CdYns\n1WWpIG6+UFyZJiC3qqs7u+kAfS4r32vrrr377D+CnX8Ce3XRLFRnp5venDHNPKSCSZ2JHZm+yDSx\nA6eUXGPC5+JZHhuLZQCFGCMlF5wbUbRmSY6Z87gmxcSQypWEMw5rxn7AtLJvwVOyIpk6IbPVWWzi\nmJFcMG+4UOoUbmSKJbLWDDzLA3mEvBXcsGZ+Xxm2PR4hIog6UhaOj98kAfdP7jOMI6XUmizDZsWD\nowP07Jw/9v1/mL/wt/8az+IGZ4Y6x+V6YOwTnW85ebAgdKsa6RIjfexhrJqxWCb1I6fvnXF2tsLl\nlrBQ3uge8mPf+U9zuT5DzNO0HUUDTjwqHssjIorzgeAdkjOH8xNKWbFJI8nKNG9nxoqiQQjHSrME\nVNBGKWqM25GZzkGM4AOHBwcUhX67QWLDxdOe+eyEEDyzpqb0b8qWNCTUeZyf5v+cCY1vak33bWXl\n/ZixJHShSjpd12GW6qhLHaUk4pApI3VKv1dsLxRnjuBE9n7/OxlD2GkHu/3KlLq/Y7+SrgFU8kcM\n8a8dg7eXj9zPDk927Hyfld/sWG4C+c2QRL/XhmuMei5Ht1vtHdAXboC+u4Kq6Q7lWqa5aiM32895\nJ/UcrkHccwvEmSQVmTJBtZaRnoB8F1/+Yc18D8D11vaNOPfbQH5r+xPYqyu0JeAq556cnzZlfQqm\nBZHrL22qfA7ZKERycXRLT9u1PLh/hJmx2W4oxQjOEZqA84XZQST3mfXlSCxGjMPUO2dMM0KtoS7e\nYSaknJFSZ/BxuabVS7FJigB1dYKLokKWiNMBESW0gbwWNpsVbCIxxim1uGrwxaoEfvLgIZeXl8Rx\ny70HDzh98ozFfMH55YpnTx/z+77n9/Ozv/B3mKeWbCMXvsdLgGHFW68/IDYXHB3P2SCsL9Zs4jPy\npqGYMcYeqHVrxtXI5mtPmR04/uU/8sO0gzGUCqAFx7xbkFOiWERL7UC9c7gQoHHo0Zz5aJQ+0W9H\nyFXbFXGIh6OjJd3SQyPVUWwF75UWx+FswaxbYsGqBj+bk1Oh34xsVlsO2iVSjG7eMQ4joQ2UaDgZ\nEQHvhWKOftySciH2I3mAMho5gPMeH/w0f2otqJZHmypDBlJZvczH9r8B/gXgA+B3f9RJLyKzUEf7\ne45IRVFMCuxG3FPEiVJrsDjZAXjC75aJmdsEGjerxMpetdjrtu2DPddBiVda/dWyz9BvSi3u1mjB\nP8cB6sgfAvIrBX2fnV+t7aoy4u1jVyz/Fpjv3s+1OFT3Fa731eMTmO9i+EPVyK+APHBdcGuXPOQV\n9Q6dol10x86dXWvm+zKL7rV3IL6Tua7OeQ6Q7//fJ7BXmAG6S1avYFBjzKcnfHJWlGLTl1WdgYhM\nvYAQupYmNKhTFvMO55XLyzWqjhACOSe6pmXoRnToKdtILJmYDYfhG0ENGjd5SJzUSZvVanXebGgW\nSkm1BK0qoat6musSzlEzGItRnGBqEIUHBye03Zxnj1dYGhGUdr7EtQ0pRZpZy3bY8Bu/9it085Zx\ne0mat/T9yNvHh/yOkwf0uXBvecDPffkf8tuOGn5p/ArbdMqb91o+8+abfHX9RSw7hn5L2uQ67JvC\nKUsyXPIcmeNP/q4fZAEMcQR1uGZGM5uTpkk9nBqWqoThg2K94WaBb3n0Nl+9fIp64WLYkPOIUCNZ\nmjk0C8HNBBcKxRlpKByGGSftnGU3x/uWIUeiCEMcq1SmPWnTkw/nmBSaxrNYHGCjMUqtYS5WZZU4\nDsRxZFw7hnVh3ERmszlNq2jYEQCllEyORr8ZWF0ObPtSi76/PPtvgf8c+Omvd9KLRLPsfrz7YGnT\nqNSmH/ouDrwm+GScZLxUZh4kToAeJ1YtN4FtCko3ucVYRa7OqfkaE6Awta9u77bMYreA/MOAfjui\nZQfmV0GJxlWxrKvXuAHa07Fy3d6BvDd/HWfPnigk+8Au18dvgH09v8hOa59klp20MgH6DsgrM98B\nep7Yubtm5lfhidTlCoxlj31zi6XzHPlLbp338Y/N17NXKLPAjguI1DosuQiqilEr5dVUf6sVC5Fa\n6EqVnMGpRyXAlPrbNR2DHwnqa5pzFrx6VF0tteugxIxJ1cDTWJBGCU2dpci8oqUmL42xzmakOCwU\ntBV8V6AD1xWaUL3ZJg5LCsmhCXxoOZgf47xDqCX2jg5PmB/cQ31gs63ToC3nS7YXZywXC778la8R\n/AlDGsibC37gs5/lbEjcbxryesvWw1thyalsmR03vP3obeRsQPt36e2QJ3LOZhw5aB3OOrabzKNu\nzh/73j/Ad7/9Odbn55gIXhosBFIpzJu2xu+nWOcgFaUfexrn8Ys5rd3nMEVyP9C6OUMZGcdIjpnQ\nBKSp5XUjhRQ3eHN03uMxcsp1xOKMGHvGcSDGkZRH/GmAzjM7bAgzz+HhIZqFtC5E3yPmSbGQooF5\nckzEPILr8DMlNELja/x0ja5xjGON5slJKQw0By916ri/BXz24056odDEqUaITsz7GjJ3J0ys/Spb\ns9ZVcZLwGvESCRIJGq/A+wp+99u3t6/a14AP1yz9+ujzWPl+RMuHpZZ9EL8Gc9vDrz22P4E33ATu\n67Zds/VSK5XCteNzt74elXAdUy5Q9jq1MnVguzXKtaNzF0se9gG8ViktXskTK5crZr4H5DXZ+RqI\n90H9lp/iQ+D+oX1XX8U3ba8wznwnn8ikn9f3uGtf6YZWlTyzCvrF6jQ7Z6crHj1a1PC3XCeH9upw\nEqCA4nF5qqlcMsUySTI4RYBcCi4Z5l2dVFiENPWWMkk/IuCCwzUFPy9oVzU1uoxz1MJYY0vcVnno\nzdcfcu/gHoYnl8I4Fs7PLwlN4P69h3Tzlvms48u/8WVQ5YPTJ9x/7TMcLmtNcU3Cw5PXmF9eUMaB\n3/Ntn2UUJQ+J0+0Wf9DwcPEaeXXJw8NHyJHjV/JX2G57HsyOeLzuee/0knc+93n++c9/FxeXp0jb\nYmOimS2ZzZdTh5UpJUPJ5BIpJmiAUjIaWo6Ojxg2G9YXG5wGyAK5SmEuKL6x/5e5d4u1LcvPu37j\nOm9rrb323udWVaeq2477krax3YE4MlGDLQLkARGQEC9EiggKSEgkQkIiSIhLAgGigJKIhxDkByRE\nFBIkFAukJJZoy0Gy46STttPtbqq6qrvrds4+Z599Wbc557jxMOZcl33OqTru6vLxkIbmmGPOvfbe\na831zf/8xvf//vQpEn1CRE9MHm8cQfUID0JHXPK0fkEXrmnblnXneXx9xobfwz1uISw0ZUNV1VRl\nx4JLhNSkKAnO0bcOtKaeloggsGW+kSBD5kmjxHc93gfW6zVt6wGP1J/wG/FDaO4FFkBFzECtUl6P\nGKFyvPByZuMoK0womY2yMpiP3WFEfwDW4+sc8sRybyxIYtzftXQDSW7cDp6pNX9ewtCOatkD8y2o\nDwva2/EI7ukZYJ6BPJeVzGUht08c46uKG10eAneS7MBc7ubSaG9rxHacHRIzoCsdCFoN3jZqy5nL\nkTMfF0H3I/NnbsVHHONpUP8E7aWCeSIXXWD4QPOjZb6oZALIqfgZvwU+paFIhWC56Hjw4VW+Sx4L\n6tJSFQUxJFyfbxPe50VLo0uECIjksj+JVIQQiKknBkEIAZEMosiyOu0kXRfyh1bGLbemjCRqjzQC\nlRSdS2gKlIIf+8wr3L/zBqVNKKHQ1iL7DZvrJe8trjh/8B6mbrh15x59DFwvFsjo+PwXfy9VWLBe\nXtJePEQpgZKCZddz//X7hJRQybD2HUaCEpo/dOvHeVQ+5u0Pz3jtjS+Q+o5mMufNB2d0dwL/2pe/\nQlgvM9+dQOuKJOTwJRKUxhB8SwiOSEQlCC7gQmRaVpRJY2x2Z7S2wHQKjxgStjQxh0qkXhCcx2vJ\nlWiRKWCUxS8v2IQly25FSjnhyneezcIh5PvMZjVNM2W92lBXJWVlgVzCbrjH4J1HSYGyBq0C1hq0\nAaHIXHmK+BDou6x6soXCJ0U9+9SLOn9sU3/xP92N/8BXUH/gn3vqHCN7bHQYekzqMdJh98dpGKce\nI4b9uHdsGBvRgzgE6l0C/j5w741Ha1mxg+pxbHAY3FPuh1uqZA/eR5JlhG6H2QK9IOEHMJfp5s2B\nfHMQO2CX2/GgXBqqKI2JTQGNFzudzPYZIA06GqG2+we3lqgJQhGkIsR8XoyKGOXQb+ynIYM5im2i\n0ViqL8VhwTUNdO+4SDta7+5vd4sEH31s3D4vBjn7Kjz66gtddy8C5s9a9DkB/jrwGXKl8n8TuByO\n/SfAHyeLdv4k8Hee98IpJUYxVozkLK7h0QqRH0UT5DdSDo9PQzgRY+LDDxagMy/np5ba2FxdyHuk\n1JlzjyCRGCmRVYW1lqaoiCmxWF/T+w4ZZS4wrA1eeHwA2UlccvkmYHKpsyjjcJfJNQwlJc4lZs2E\nN159g7vzW5SlQTrJrbu3eYynlwalJc20RhnFo4fnvPraK7j1Gmg5//D7vHZSU0+mXJ89pNKJom44\nf+e73H/9dVbLJT4K1strbp2c0BzV9N2GL772WU7NlIfX55gY0FXFxFT4VmCtY7HsECGvSkRPvnmF\nQFHUpJhvkr3rSAg2XUtVlxhT4FXACI2qJOXEcrSeAi2PVuc4kWudbpxD94IQe5KUNFhW/QYVHFIt\nidrj8IToiQGCz+b/wiWun1zz5PySaTPDSJUtb1cbpNC4PuBd7lpphJQokbBKY6xAm6wsgGyeZpTG\nSdAGZB0RpcbYT5Uzf6F2+h/8ycOJtHjqHBMzEBuGbXK5i3445jAyA7kR/ZZS2Y73jmdOWO6i0b1x\nRG6j1nG8BXGxd17Kx8zgqzJC5ihJHInRhNgSLA69B/Rpa2MbkXh0vhGktAf/I4Cze34Qw3zaG5Of\nSGQcfw48ik5YOgo6Ctph21HQpYIuWjpRHHYKOmHphaWPwxZLHyy9tzhv8c7gvMZ7Q/Ca4BR+8GyJ\n2y6HIhZyW8wiDcq3rTRxlAKNoL0/HgH8edTKR9Esxz+X+9i++V8+97p7ETB/1qLPnwb+LvDngf94\n2P/TwJfIpbS+RK5k/kvA54d/56kmhMh0R0xEYRAxgczWZylmmBcygoSQsvrFJ49SiiQEzkUuHq+Z\nFIaYLKKucqQYgCQJwSNSLmBRlBpIlEVBJUu00QQBcXnNuKDmZYAUqGpDcI4QNFGErN+NBlyOSGNQ\nRJONf7QIFLXi9q0JzdGELrVIjmhEzWYy4+F738ZtrrGFZXo85/5nfoTNasW9z7yGiT1WOTyJsw8f\noG1e+IwhUE+PObtcIFzHxeUFfcg3rSQCrXOUseLkeEbdlDx48D7HdcP19SV3X3kF1yVizHdCWeQs\nT6MN9IEQWlRV5PUJYVguFyA8facxheGaHudabCGo64LJpGTTGsqiItUOv8mOG6HPKh1lFcSED5GF\n92AjSjrQAW0lYZ3XQoJLBCeQoed7b3+fqmgIDqRYIkNis1mCiAiV6zgW1iJR+Ys9yEdDSPlzIuVr\nIEJMgYQnSUlZKpSyL3BJf7ptlq4/9hyDQ28BfODA49542/th32PoD46N432aIQpJkiPNsAfw+8A9\n7G+ph73IXBPy08JedD7G/aM6JCaJRyExB/M7Q1xDj0WlsAXnvRIVBwC+nRcfcR6JgBrAuaDD7kAc\nS0dJh6VNBf14fADzfrwBiIJ+2O99gXMmd2/wbugjqDtN2AL6YJG7D+Qe8AOQj2C+D9jj+CZQv8j+\nJ2gvAubPWvT5V4F/fhj/L8BXyWD+R4C/Rhb7fBd4C/gZ4Fef9cIpZUoCEYbFTQgx+wQCyJQrleTM\nr8yXjzJGISQpRtZXPauZQ0iPjIEUhqyzmGVsiJyEpEXmxQtjmFQNUkuCCHifCKHH9X22BhACoRJF\nBX0n2aRc4qxdpxwtqkBUWdEicFhtiKrFTBRmIghXicZWmInBO7D1e/Tdmhih3XQ8OHvIUdNwfDql\nf/SQ1fKa4vSEJAztZsET9wTvAkkIGh9QViGVyU8aSYDQNFXJBx+8T6ELJtMJGsn14pqT+ZTbJ8c8\nevwY5wNFMyEkgUgKiUZLja2nhG6F7x2r9Yq23WALgxACbQrea694dPGItl/jfIs2iaq2KK0Irid0\nCWTEC49GD9yuQjpJsh6VJEJICJ4QsncN3pBiVp8IFJtFx/vffw9CxGhN9JHV8oqQ3GC8JgjR5QxG\nJEYNtFgccwUEvesRSW3jQlMYhByM1z699teG6/4UeBf4z8jBzkGbxqcj8ZttBGg9gjl+C9CaZ80N\nY9xTcyNXHLfbPUCXe6A9AvresRiH/SFqV4QtkOsDmgVIDJG3IhDxe9F62lsSdRgMbhvVj9H7oZXu\nLhJ/3nEpB8BPkZDULhLfRuiWPuUovU8ZwPsB4HsK+gH0e5HH/TgfLM4ZvN8DdK+HrghB5Sg9DIAe\nZAb10WRuG5nzdNLQfmS+D/LPAvrfJQugd4GHw/jhsA/wKofA/R45Qn+6javnMW1T+kMcFjsZA/SA\n8IagQ66oMixgkBIqJnwMhF5w/WRNchK3ajGVRaGySiP4wRQ/5htFCFhrsVZhC0uyiY13XF30BJfw\nMWLqiBQgC5WLB7ca0WZppEMipUeUEYVGmoC1AWU6fOrouyV3wylet1yfX3F1scFtlngXqZo5pimY\nTQtunzQ8+M7/x2tf+Az92xdcPHnMUTXPj7rKslo8JgKN63nt+DYXj6+pSsv51SVR5KeWJ48e8JM/\n9dO8//77We2jNNYK2nZFCjn5pGs7rK2zUZmyJBQxQtHMIEW87+i7Da53yEZhreX9Dx/w8OxDTKHY\n+DU+OpRQTKopm8LjLjuS1uAkxoBOCtxwE1aCpBLBZ+oshYhwueyeiAKjBGL4G1arlvPzcwppkBK6\nfkXft0QB0uTkLp1y9D0CeZQC8BmcYkTECFGTi0InlPzUl4CeKt78rDZ9Bq2S2/g8DjqNOvEBOIXD\ncLg/WsmOevKbxzKw+xy7yAHAh6pOW9A+mB+OsUe7yIFDH7j2MatUDzH2aBkw8ggj1AbU8N+ILcB7\nFD6NLLo9APMDkN5un54TZGfHQ5CPRNQuImcH2F3aAXuXcvS9i9ztIYgPAO+8HSJyvYvMBzAP2+0Q\nlW+BfKhGNCjlDgD9Jjg/D7CfNX+zf4L2w7j6d4Ta848/3YTIShU5/Kc+DWm3eQEkRTEcCyivkCo/\nUuvB2CYr1LPy5OLRirDRFBNJ3aT8uK0VToCQOYJPQ0TfO0eaJKQRNKZkWZWsFh2bq462zyn/1kYC\nufaoTJG4AYhIoelSpIwSpESpSCRRFZJFd0mF5f7RPRZnjzh7/4zF9YrgHe16Td0UTJoJ09mUR4/f\nozKGx9/5DovNGtE5Tn70VWBDSvD48Rnz47uAZO06jLVUZYk1CltMubp8Qj2dsnEdi+WCGAPTozmE\nQLu4Yr1ckWSB1hYhVDaicg4pIiZqXJ8omwmbboVQgsIUkKCZz/n6//t3+O7195nMakLRoa2g8x0+\neoqipNWe1nuUTBihqKyCBC55epdziJTI2asxCYwqQYTsPaI0KUBZWGpb49cOT49SOaHLOZ8lYSn7\nm4sYiR5SJJeUG4yU0iBUjmG4aYz7Mddifdlt9lww3zWNQ6U9D/C9sdoH7hv9Wcf2wTrKofDwuK9u\ngLzKC3pRDtApxK50XRIDkO5MtLKx106umI8mQO+APan8fcFsI/nxJvC0DcCY0fqsUnQjgD89F4Xa\nAnefDsE6g7jd0i6Hx3fbftjuqBU9ROga7/QW1INTezTLjjPPkXmuGZoCmWrJtcg/vu/ryJ9KLuKl\ngvlD4B7wAHiFvDgK8D7w+t5594e5p9rlB1eAyEqE2mIqm3OChuhckhBBEmSmHESQoCQpBJTKypYY\nIv8k8DwAACAASURBVMRI3Hg2IeBTNmKKwVIU5ASSXK4bABGhW65J0yMkAiUUZaEprGEtBG4ZCcGR\nZpqkBk5YCGKfF2b75EloQiEgSrTQaKHwQtJ3lyT9Ov16zbSe8cRecv7gzeFLElgul7xh7/Phg/eo\nC8H55RMenW34/Be/wOrJFQ8ffpeT+ZzQbbKxWAJdWJbXa6Lr6LusfV1ulkilObp1j4DAu0DnNtT1\nhN4kri8fIawmRlCmZNX1KBIhRmKhUMIihWd5taYsSmKUNJMZnW+pqorvv/Mhb37wXcojQ33LUM8N\nulQ4Jyh1wcONIwiQGowM1DqB0Cy7ljYALpCSAqXQFJlRFZLMeGlcCkihkFHgg0erhHOJtvO51J3J\nfDhe42POCwjJk8gePiiF9xGlJDEJBIHLBy3X318dFCl4me35kfmubYE8+Z2vSdoD7LS/DQfnavzw\n8wGVfLZolSIDtRI5ilTDfpS7+VHFMkbYYqxBups/lCEeCBp3nPmARjGNQdVhFH2gd0mjjHEPvEVE\nphv7N0Bf3dgPqAGw7Y4+GYE73QDuYa4fI/NxnApcsvRDVB4GjnxHseS54HecedhG52Jv8VM8zZl/\nXI83xjznvE/QflAw/1vAHwP+u2H7f+7N/2/A/0CmVz4H/P1nvcD81SkIkVP0Y46ydr7mKUfUKZHC\nkPE3VCeXUmTXvkHSGHwAkRUTLCObFIneE3zCFCZTJjJz7M7HHAX6XFItiYQVBi0kUghc5xBB5IzU\n0uGdgRSIId90tJXY0lA0EWMtRieUyrKlTZ8oS8OtZs5ydcHR0ZTpfM6js8dopdHGsF6vCX5F6zQh\n5IIRb37nLe6e3KauJlxcXjGrimHBJbG+XhOTRyaPcz1Iw/n5FXdv30Zqw/n5EzbdmtksV1Ty3jE5\nnnN19ghVa4QIHM3nrK87los1J/qU6wcfoJSmKjTrLqJVgQue46NjZFnyW997j+V1pOs6QloTpGVe\nnlCVBcJX3Du5y9sP3ifKiLEVQnSUVuGTJoQ+fz4qLxRLEdCUKK3xydMFTyEtWmtC71FCEIKk9x19\nF1BCZBdG5FAfFnoXiGHwxRHZckEi8ueKJiXFnTdqbr1S0cxqtIJv/coHP+Bl/cNps/jxC6Aq7QBc\nDdSESuEApFUKWzA/3B/PDbvIfARvlZNeUhTEKHcgnoYofQvPNxZE05YZBwayc5QMs8OZDNMqfz75\neWt7bDyXUW54A9D3s0f3gVylQ1DfnZO2x0OShyAeR9DegfXBeDi+D+wj0Dtns2rFaYLTA7CrISrf\nB/Ucme8ol8yZ58j8GWAOHw3oL3L8E7QXAfNx0ecWu0Wf/xb434F/h500EeCbw/w3yQ8g/z7PCZWy\nxjxzcQkypRJHf3NB9EOVkr1HaAlILwkqgYh50TRCigkfHD7GfNNLiRQ8MYItsn4UAkIoXAfBJ3zw\nSARGGQqpqcqCsrSsN5Fuk61ZhXRoKZE2RzimltRTjbYKpRNFpdBlRBeC9SaySEu64w6REn3boqsS\nqQV9GyB52m6DCA6fVsyP5iyXTzi2BZOjGYUReJcLUSehWG2uef+DNZPZKZMCrp8sOJrPEYAuCvpu\nw/XFw8HPRlFOJkghuLq8YjKfEESBtiV9F6mbUybT2/huidQWkRy9kyyulsyPj1m3C159xXJN4sni\nMUVZoFV+n3rnuV4tmdeCQlUUtsCgaBeevg1UShCtQ2qPitkMjSTQUmCVxUhLxBCNQnSKvvdZ1w/4\nIPAu4kKLEqCrImftDrawQihKawgx4J1HGpWT52R2ABciuzZenF2ilaGZbFPOXmp7sch8B9AqBeQA\n1HLY34L1/lwMT/2cSrviCVGJnPQSx/EA6Dei731FS5SHx0ZIHiH6cDskGu2XgdtuxeDEsTtfpLSX\n+h+eCd4q7YP4QO+kp4E9Ig8i85H/3s4lSx8L+mTo4wDge4DeR4tLBS4OfLlXNyJw/dTcIZDv1Cwj\nX37AmcOLgffHzX2C9iJg/rxFnz/0nPk/N/SPbYksN4wyS+nEsLiX/zFxeBdIZD8OIiSRo2UvUEnT\nxQ6VJMkHYgutD9AUGJGIUmWb1yEm0UqTosC1DqUMMgoKW3J6JLm4WLK67nAORKEpm4CUCmFzAUFT\nSaRNSB0oKqjKAt0EhJG0mzWb2LNar3GrBWVliQTKqmQ6LYl+g0qBtmspTOL64jHz4xNiiFyePeH4\n1oz5/HUuz7/L8a3bGGk4O3vAnbs1m+U1znU8fvSEk9unXJyfUxclH77/AbfvvIouG9o+QN+iUmDd\necpGYaylnh9x9WTFZhEImw1S9ATXsVhtODm+w+Lykr5f47nHd978FnZWUAiJ1IkYNckn2n7NUidq\ndUpZCKwqWW86Fmee6r7Ahm5YDA2EFLFFSREVlbTg8/WO0EgZ0UoRXcJ3Pa5PdJseZKKeWmLKNxBl\nbabQJBRGo2Si144+xHxTFybXGQ0RrTWnt0/QOqBkgRAv3wJ3BPNRejyO2duXcQfGh+O4A+oYDoBc\nHgD67txMrYzgPWQyxjEi36dVdhH5doE07cD+cJlyP5s0y1ETQwLNTVIl3VziHP0f0w6gb4C62gJ5\n2IF3ymZi+9ml43gL5sLu6BMOwXofzPPY0keTx3EP1J3JvPjIjTtFdDtQD1tFy6BkOdCX7xZAtzrz\n8YN9HmDz2zj2CdrLywCV2egqZJxFAklJpMhceH70yxmhWZImCD4iNTmpaGDxfPTkstyKFBOui5go\n8Sabz8syYEWW9Skt0FoRgsN7i+sDxlq0UQgqjmZHLB6d4VJCFgJbWmKM2aRKKuRgoUuSCK2QtURb\nQZI9WhiOqFgul7TXV7g+UBYFvhRIOopJiRAOH9ZIJSl0jXMdt+68RhKB2eQWq/YCpWuqZgrGMOtX\ndD7Sx0BEsLi64PjOXc7e/z6nd17FaI22NfV0zmqzwMfsh6KU4frqgnW7Zk6J1oIuetarFSH1CNfT\n+5xs0qVAM59ibMXZk2vqkqzrluRMz5gXode+ZyOvIWhmhWW5MFyuHfXCoBVYrTEm12nVPvvp+CQR\nkZwF6iIhhvxZxYDrezoXs1xRJoTKi7AxZcDOstWA1gKtZX7vnaJ3ibHqgFa5IpI2BlsUQ7T+Uish\nAjB9AZ35FozjsMgYw3asYj6WQX48FrdgLrfjfCxpsVVZxDAAuX4GRz6OZQb9LBcdovi9pceAIqbR\neWWoOIUipnEBdA+Wk9rbV9sF0Qzm5L/zAJj3wFqELYDvTLv2AT9sAT8lmQGZZwD5XndxAPYwAvxh\nd9Hi3B6F4nLkHfwO0KOXW6CPN1Qt2wXQ7B5xuADKx4w/7vhL4sw/cYvkL6UQaoi2RzVLplm2Frgp\nm2uloUBBjAIE+OhJyJy9idg6sSk0yUHoPcEaYp+140nkC1SpbHfbtWHQPZfEkIHa2oJmpll2Lhdk\nyGbr+N4TZfZxCV4SRUT2CaU0thQgJrTLC76zepvX6jtYFNoE6kowq07wrmO5uiQGqKqKqpzQTKY0\n9YzLq3NOT2/hXK5vutgskWqCtg31ZELoV6QEXd8ii5qrJ5cILZjdOuXy4ohmOufhw3OaCjabJUbX\nbNoORaBsTvBuyWYZuL5a0fc9sc3KFFNXJBWo5lOKpsBMJlymd3nl7l0Wy3O8CyAkIgqCByd7NvqK\nqpwyO1FcrQQRjQuSPmlUr9CVIIqU10FCoPMd0msIEhf9VhseXEAkMEKx8husVUPWblbGeO9zKToS\nRSGQylMUAtqENgUpBYxR+C4hpcEYhZCR3gnM7wI1y4vozDMQZzCWzxirp47dPL4bZxAfAHpQpmx5\n8jFS3kbjA/c7APrNyHwrMSS//35UraQsItiXJvo0ChiHJdq0c2YZpYs7V8Wdj0umV8IW0HNy0Q7U\n920ExnFE4hiyOMUO0F0yh1TKHpC7aHagHvbA3GuiU0QnBypF7pKDRmAPkhjGY4OiJYw3TXZR+bNo\nFp6zfZFjn6C9ZKMtgUh5DV0MqodcqGKQnA3mWlsnxe0NQCKHZJ7cJChIDnyMICMqWlrnUV1Bl0Cb\nTLHopAhekKLPHi5xk3XrgEZQViV9DKQYcS4hokDE/AXwElzMafFJWDabSFGV+akhWS7jkrbdgBBM\ny4qqqOn7BSkYYoq0mw3z6YzVcklTV8jUMZ0dcXpyF1lPaC/PKHRFSoGjacN13wAB59ZcXl7yhS/8\nFG+9+U/4p3//H6S0mtdevc+mbzHS8+T8Els3rNorgosU9RGX19ccm4aiqtGFo5nNePDWO+i6JKWE\nD4FKC6Z1wwfB87UPv46oDRN9ymrZ4vs1SkWQOTV76ToaWzKpBbOZ5nwR6QL5SYZEhSbFQCAQO49M\njtCFLI2MgUREiUQK2egspcFQCoFLDklCKUsUgSAcRheUjaYoBFpbqo0i+Gx7nFJOKGqaGh8CkC16\npXz53iwvkgGao/Ghh4iIKYN1SDfmd+On5oe5pAXRDPz4ll4ZQH1vkXObTCRvcOlpF7UHNHIA8pGc\nTOP3kz1NedoXSRpcGlXywzaZgWbJ0K+3KUV7IM5AGe2D+Hb/EOwjEifMDsSHyNxhDiPyaLaA7sJA\ns4zjkAHeOUN0MvuW73e/15063PdjZC620sRncub722fNfdz2E7SX+kyaq6pknlQLATJAHLI7Q9ou\ndiU8UgzRBmN1IIDBYU0ltIEkE77NGnWlBUJE2o3PNrpRUVbZzVCGgJKaruvw/YqiKEm+x/sx8SQv\n5tAKnA94l1U0MQS8yFGnkJ7VpccWES0kOEttpyijaYoSKxTO9RhtaF2LkpLpZELfe+7duU8Mkfnx\nKdPphEdnD/mpH/08b12eZ6fGYsKkabh6HJGyRqkNVTOnrhtO7rzK0fEJwXVoZSkKxfnyQ2bH97i+\nPCemiNEV6/Wa+UlNiJHgVlS1xRqNtJariwuOTuZ0qxXN8YTju/f4tSff5YolZVmxSQFpNAoLqYMo\n0KKgjAJtHUWlmM4VV30u8tF1CWMNzkWMkgQXCFHgeo9bRdpNIKbst14UxUCjeHyIGCNBZMWRMhJb\nZz8WZQyzpqCoPZNGItC5cIXPpfNSTFhboERe3C5sQVEaQnAv8YrO7UUWQGVMO6COCTEAswhpD6yf\nP7+9AYSYo8WbvHc6TOnfRuWj5lyJwVhqBPP8s55cIB32EoKSQuzPpZ3BlsNkuR8DsA7RsiM7Rz7P\nWXHbxa6wxSGQH56T4gDmYgBy9oA8Wfo0/B0jmA+RuRt9WKLJ22DxTm+lhjuQzvLDnaZ8J0d8Fm+O\nfwZnvr991txHHbs5/gHay/MzT2mIrkBqgd+ueyaiFKihZIqQmX6IMX+gYnDf8gMrJ2RAaY0ygUJl\nbtYKgykiUgX6jdiuVbgYKJOFIDJYCUO3bgmdoO3WdJsVQkREBNcLQvLQ5d8XtcgyxShyav5FYjHx\naBUoCrCioqprmukpssuSQlMo/LrHO09VVdTVlN55qqZkOplyND+hqics255v/OZvUlWKZbdmPjki\nxEBSFUUhuL5wtN2G1WbFa699Bl2UrFdP6HtPGzec3r1Pt74mSk30JQ5HVVbEEOnaNSTByb17nL1z\nznJ9hdGZs/Y6sDw/Z/KHv8B77/w9ZMqValOQSCnQFoQyED2lMAgCRimUXCO1QOiIV4pWCGwK6B68\nEVntEMG3Cdd7iImQIkYqRBBoKcFYlILgPEklJpOaujFUpcAaibUWrSRahyHtuycGRe97lBCkmAuM\nMKyxCJmtZMP22/Xy2jR9fLUjGTMYizAAdUy5JmVIGaT9cNzfOB4T0g8/FxPCp+zFfQOUd9SK3CUQ\n+b2korjHm2/VLoIeO2D5Pu0Shyj9aaMtl8wgGRx13juVCbAD5HRYkUhtbwfPAfYb50SRwdyJvZvG\nHs3i0gjieeuCxQVDH8wwzvpyFyze62x/6yXR7RKBottF3mMUnkFcHCQNPZXOn9+uXfsk40/QXh6Y\nB0EiIqTcVlRJMRMuW9dEBNqInCASBGHI+08CclqRpygMpoo5TVBqunVEKT/YqgralcsfSJIEF1FK\noI0mpsB0OsFIxYcPzoltP5hqRXwX8MvhMZSIIlfmnk40qzayWSh8hNVZQsSOZma4NZ/xE7c+hw49\n63bBUVOjpcIlMEajtcWHXLNSlUdUs2NcSLz7rW9w53TOSim+/s1f5/bpPVRR4SKUdUV0gcura964\n/2MEn9U1PgRIJaoInNhjgrVcXTyBviWknqqcEZNntXiCKRzz01uEbs3F+UPaxYLJ8Zx+s4LJnNOT\nU2RzTPKK6/WSTbvBeYcPHiGhjZ4YPHUsscmiRfYvlzLLApUGgaPz5Ci7MwgM3gWCz74wkP1qtDSk\nFCFCVRhi8CRtUI2iqi1NbdEmZV96JMILYq/woqewJUYqVq3DC0WMASfWWKNJybHpEioafmjfjGe3\n18mGc3fI2PZXgb9886QX0Zlnr+7BRGwAZRH2ur+x/YhxvEmtjAueI5jvpfbHMGrQb/LlOwUKsOPF\nB+56G60PypXMkZttlNxR0qWCljK7GqYS2IG5HuqV6mcA+b7d7n4d0QMwlyKDNDtAd2kPzEcQjwa3\nBfDsu5IjcnOQxp+c2AL6bry3PeijwZbYbgkiL37ejB1uXn6/3f1P0F4amAvIQC5H7lSCyHdgISJB\nCIyGyWywpXQRpTqElMQILibQimoimcwU16uAELnCe/KSogQvIuJaZb47RgQObRxCWrQumE8q/LTi\nerXifLPEWEjOoaIgulzIQolM10xmiuYIpiczLj6MLC/XuOtArzUywq2TiujA6RYtBVJY2uWStu3Q\nuqCsKrwPiKRoF1e8/fiM777zPd7+3rvoJPi9P/F5MFO6WOGMwW06YgpURnF8PCUSOb11h8n8FKUk\nLlmulw957Y07lGXNpX6XTjmOJ3Ni0KzbVY6ijaVvHa6/onVr6iOFNhXNkaQ6OmJ+OoXQ8+DBIy7O\nL1heL3Bdh1GCVA6GV07hfE+pLTIJhLQYK6CQqABJC4IPWU8u8hMXnvx4LgVSDIubQqJVvlGXlUUm\nhSkNfQoQPWEjmBRHSJFfrwsglcJLhUmJbu25WgaWXYvre5RSKJ3NvpSCuimpyk/1knbAfwj8Y2AC\n/EOye+hv7Z/0IjSLiNlEipgQIe8TUi7uERL4EbDZG+f3dTfO5488+ehZHrccudim8Ec/qFaC3C2W\nxt1i6RiZ5xBrR6OotLO5ZbxJ7MHsGCWPQL6hok0lLSU5rWgEZv/RW5G3NysV5RuAz3LivcjcMW53\noO62oD5G53vA7s3Qc8LQFrhdBuURuBmUKgf7g1PiSK/s5Ik8DebbD/gF5z5q/rfZXiLNMsgREdlU\ni5iBYLhohAg0zZRmkkuCrZYbjFdEEkLmxVLX9zQnmqqJLNsOIROm0LgAQgWUDJlzH2hApSTIDoRm\nOp0zm02pqwKjBL8RF/RpgfCJMmWjLpckSQaUkRy9rpmfSMJa0y88wSsKo5iWx0gZSb3mev2YubxF\nrQuuri5w6xVFUeQ1AO+wWtF7wZOrjrPrjr//rTN8qnj/0UPeefQb/Ms//zM8ulrQLE4oK8FpM6fr\nr5AIpk2D0AZbVGAMy/YJHkk5nbI8f4RVcHL7NWISOB+pZSIE6NfXpNBilOXOq7dJ/W0ePnyCrI45\nOp2z9pD6S1rXsbpYsllsiNFjJsVwkUWSSLQBKjqkbhBeonRCa4kqBKIQyCTwwSFRe+LqQYmkJMoI\nwCOEIiWHVJFCC8oS8BHnQUo1uGaWdOtEu/F0G8982hPlmmUruXjSs2pzRqhVhuA7Hl9eEULP0bxk\nMm0+zcv2wdABlmQQf5UfBMxT9tonjkA+gPgWvPe3z5rbAXtMh4qVg4VONWR+Dlr0DORyLzoXu59P\nebF0jJEdfptWL2CbFDTKD7dgPnihtJRsUsWG3HdgflhObgviYh/QPVrswH1razCCuRjAfATxNCy6\nbimWvch8iM79XnQ+ArlzhuAVuFFeKEiOrc/Kdu7GdgR8wgjqu+0LgfEP65yPaC8VzGPMHPlYhUIg\nt2CtpKRsHPW0wgXJupfYlAYFixx8JTxFI1GVQ5Qg+4QpwXc5WrTGUM0U/dINlYQSqIAPGxKRZlIz\nm84o64qz1UM+OL/CSokgMjnJPiAhGoJIzO4KJqWgTWAnihLB6cldmqIk6YSzWQK58QHJhtXikuQ2\n+FDR9QuKwVe9md7h5N6X+aV/8Lf5zE/8M/yzX/kX+Ct/9RdYXD/iF3/pV/kj//q/xP37P8qTh29z\nFHtC75BJUpSWsqooJ1N6r9gEwd1XX+fiekPVHHFSzwluzWaxxF88QqIwlYTUYlSiqDS3ZxO+/eYH\nlJXl5HRKYRNNo0AVvPW9d1g8viZ5wa3bJ5hSsRHXCJHwUhKjI4iGJKAsgD4vMqNzIpCSAuGGBbXO\n4/rsfClSQmsBQuTFUR9wTrBpV9RzTSAghSaFgEgS3WuMtKSo6FjRrQKtaWmqAqlaFB7fJkCC7ihU\nxb2juyw2HZcXT1i369+pS/izwJeBX7t54EVolhHIx+hujLq33d0c7wH7jWMxjhHzwHPvlUmL46Ln\n6M+i5U6TnnaLoGNkngZFi8MNzoeHfuYRQRj8zB2aPpnsJU7BJlWsqXNPNQmBFv4A0LfALvbmxB7Y\nC39j/gaYi7F8Ru5+4MpHQPd7NIvfA/Jsd5sj9ODU7n10u/dxH9Sf6sPnlMbjNznzF22fIgv4EsFc\nIJBZjShEVpwMSUIpJYzVFHWknCaMg8WVQpEIIVMyUWS6RemsST6aNawvHCpFbBEpa4nRgnQKG6lI\nMVAWecFsvVpyJVe88RqUjcWUmrt377GO7xG6nvKOoW4EwlqW64SyFfVkSYweWStEs6IqFfVUc3o0\noxM9Ia6HN1TSdx0SQY9itV7hfc9m09FMSz73uZ/k7XPN8f1X+LVf/xp2dpdvvvUmKq15dX6M6DVC\naSSgdF74ffX+PermiHrSIJtb0Dk+/8Uv8PZvfYMP3/1NhIL7r3+Gu3deY3qkKa3l4vIx3nUYpTHG\nMD0+obYTvvRPzXj4/gOqSkEKNNMTHn3vbfrFiiN9RFSBiW04ns95fxNIcZXVLNFibEKlrDRROmKs\nQBUJREBFSIOdQiQNiVggh6cckTxWZ4vU5ATtpiceZ+gA2Kw6pBeoWjIpSirT4KPnsl/R9S1abwix\nBxKbVcTqI2bNjFJZ6mLKjxyXrGLLW2dv/U5cvhPgbwJ/ihyhH7T/+hd2ipqf+3LuT7X4ET1wAPQH\nY7+3HcYi5AU7ocXAw+dkoFx9BYYKx6QkcwSVJDFxo8gysJUg7lwSx7aftp/ELhN03/twvwfUnqSR\nbfk3QcoB3PhqYu93CZ46J8+n4YlhUJ4P3/+03YqndNxCpCxmEEN26VAUW+VK2tuei0ELkEPuody9\n1nZfjueKzApISOP8zbqd6anB4e5Nc5P0rMn99lXglz/i+K69NDA3Nmf7KSEIxJ2efEgOMqVAmYQ0\nPQiDGkyugnODtE0iqLE2YSw0U81qETEyQJNQQjCpNUhJwiF9Q2EMWkDbOx6+f8Zn3rjL8a0TAKqJ\nwRSKRjcIYWmOCspqSrnYZNN+2yI7uFRrZBkptKKoJKU2YB2qN3RiQ+c3iNWGEDxaKpb9NUTF0fEp\nX/jiT3B05w2+94//AbePjzl78pj/6a/8z8TY84u/+Lf4C//Vn+PBk3OsEpmW6A2FLfCup6hqRFlQ\nNlN82PCr/89XefPbD3l0tUC4DW+/ecbv+/1P+OKXfhLne6QQKCT19IQkPDIKOn+NLmte+8w9+j47\nM1bHUx49+IA37tznfPVdYgqU2lJXmioUzNyEleiQaU2igGSzVpyUeU5hAIUUkAiECEZZpApokasx\npagQIlFUiRQCnfeI5PFeomRWD/UrT7daU9kGWQqUDNSFIYiCPnWkaAbDpp7oA0ZalOiZHp1wVN0i\nhsREzVE68Q/51qd66QL/B/C/sjOYO2j/xR+7MfGs6O2jwDzd2D5rLj09zoWuxV7tSrnbslscjUO8\nnZIk7GnMRxDeqbzlFrBHL8UDJCTfEbY3BbHnkjgUqd430JLEPVCPTwH8Pqhv283xIJYYAVrJMPi0\nDwCr8tOgjLmohUxxq6TJy7mOkNQNTpyDhc20pVT2Fj2HRKGbfHmKW07xBmA/Y39v8xSIPxfUvzL0\nsf2ZZ5yT20sD8zuvNDx+vCZlroWcip2jcq0SZRUxRmFtIplIXStEkqi6xvtcjWa5ECjdYW2RZXNW\nEnpJVWuUCngpKCqFKRSFrNDaAj2xiyzOOs4ePeH0zjUgkQKaaoJ1FiUqZrXF1IKQEm3fY4QkKY0y\nkaJyiEpidAQTsZVgUpZE72ldPxghgQ8h84ZKoYuSajJjPp8jZWS18nSbkJU1UVLVNfPTu5jSoJPE\ndx2xnFJXE7QRNLMTbDPHK03oHf/kW+d8Z91z9/6PkmLLm98/Y/btSyazB8yaAlvWJGuynWyErr9m\nZk+RUtP3PYLAZD7HVBN++Wu/wgcP3uf27Bbn1w/RVlJYxaQsWQeDCp5CNsgYsUpSlzXHDh4tFyjv\nsjxQSELIGbhKS5oyoZWnax3RFTktH4dWCe8TRml88CgvSDGijSD6irbXpCTwRJRS1LrAcIw0l9BH\naqup6kRTCJTdsOkecmt+G+1KRGE5FbNP87IVwC+QjeT+4nPPehG3r48C84/rN4E8DnkRY/Fh2BYi\njux48UimVEKSBwA+gvt+Qv1hpL0fke+9E3tyYrlXgHkEdOAQyDmsKjQCeX65w+3Y9k2/xt819hxp\niyFKFqDEUJlokHemuPW20YQtGx+SJGlJMoMMMYihvucA3mZfijgcC6NkUUCQO848Dn/l+Jnsj8c3\nK+0De3oGkKft+7mbF88ZP7/dfEj4HWt379zhzp0ZWutcI5lESlnPOpkW3DptsNbkauwmUk0SRRUp\nmkg9tUwmEyYzQ1VN0SZzsspAEpp6ZjCFQQmN1YKj4wJdeXSRXQBdm1A6cvlow9nZIy6vHrFewL+w\nOwAAIABJREFUL6nthLIwFHaKFhO0KYAOKSRSGZQyFMLSNAVHVYOSElSP0ZBEYmYaCiWx5IzVdnWF\nxNBMKqZNAb4nug2f++wbiCh5/dVjCA6RPP/9f/NneevNb6NR6EIym8wQOuBjj9Il0ijU0QwRNd/6\nze/yzvWGP/pv/wm+/c4Vf/1v/t/c/ZFXeLer0PYeKalsQiVNXszUJVbXOO+wUmG1YFLXzI+Ouby8\n5r0Hb3Pv1h2a0jCfz2jdOa1fUhuBlYapLpCip2GKNYa6KXjl3gk//tnPMZ/eBhSESG0qGjtjNp0x\naSqmtaUqisyFp4CSkbIsuTOfMbFFjsr1mmxINqGpb+McXG88IUWEcWiTqKuKpsyl/nQVOZpAdSSp\nKlC2552H30DYSIgbjP1U45M/CPxR4OeBfzT0P/zUWZ8EqD8OuJ8XrQ+Red6OoD5G53KnWhmhNe0n\nyx9G5AcuKmIfgvfhdwyaDyPr/Uh8NM3adrEH6PtR+XM5iN1v2e0NS7AiImVEyix0UNKjpUNLj1YO\no3qs7ih0T6E7Ct1S6pZSb6hMS2k2FKalMB2Fyeda02OMw5isetPGo01AGY80AWki0iSEiQiT8jPa\nQU85PNZp19XYc2b6QReRQcrEIacWP2L8/PbSIvOjeU1TVaT0Pc4eLrKcDYGxinoSKZuAtZnzEtJR\nVZrNylGWmr53WGWYqoqy8mht6NoWgUFKR91YtNmgdUKpmsJKtBSk2NF3LTEmcgm1rENPAXq3QhiN\n1QpEgZYlJjgUDiMjjanpYo/AUxQNk2pKaDOPLHWHRCOjovWO1LXE3hOTZFJPUMawXrd88OHbeF3y\ne1495W//ygN+9md+H2X5De6/fp+mOeZrv/517tz6OfzGo5XOEsTFhuru68QIPhms0Pzyr32du6+8\nwh//d/8UXdtSGMPX/tFv8LM/+/N4fRvhL+hWV7SxYz4/BSkI3hH6Hu8btLIIGale/zH81RXTosLY\ngnLZY+OUx/0CIVaURmY6SVmkqCmkZFJaqolB2QlFM2V6NOHs/JhHDx9RKcm0OaKZGoRcQ1BEf0l3\nDSCJdFTFFB1rIhOk3GBMJMVAMzEIX3B8dMLGtSx9R2WhLBNagxl8X3xS3LpboITCGoGkIgg4u3qP\nk6M5IWw+zcv27/EiAdAPIzJPH7H/LIolil1kvvVo2UXnW5pl7Iw1O+UNUy1FEGovej8E8lxwYABY\nsc+7p63z4QjcaZA0fBTVwt6NQAyveRPUx8g8/7IhKpdxqEaWP5Fs1pajcYUazL4UIT/j5bFQRDHc\npA5qe459Z6qVi1LkfRFk1vNvFz5lrnIVEykNT0VxjMiHu2p8znjc34Xyu7vwJ2wvDcznkwm+gHuv\n3GLd9SwvIglHSj1FVVBVAmsVQYKQAVsHuk7gfEsia9LrylKVUFpN8hVP9IKoDbaQKFUgLRgEpalp\nU8cmLIm4rDONLffvvUKZDDE6ohdURfbOFnENaYJULXWpaB2kCEIYBFDpmqOyJsoKW4BUEh82LLtr\nSq+RmyVKGoIEKT0yges6WjwP07cJ5YQ/8W/8i/yN/+vv8uNf+jEWj6959903+Y/+vX8LHT3L60eU\nBoRWNEczhFDYokTZgg9/612++rWv89Nf/mn85pKYNH/5L/2P/KW/8OfpXc/1eoXxSz54+xuoomC1\nWvPqa59ltbzk6OgWq9UTbt95NWv8jSb0G96YnfDOxQPuHN+hW60oKZBKoJWgMImiKCmszokuytNM\nKqrJKdNYUK+PwTRMiprQbTBKMZlCpIBYsFpYOufp1j1z65nNSpyuOZm9RhvnePV9LsM1dTPHbwSz\naU0tLO89fpe4EdjjTGMVlSLJgKSg0jWCKSlpZGxQQiFjS0od6XeBBe4Lg/l+wPWigP4RoJ4iA7iI\nXXQ+2uAegPPOGXEH6oe+hQclIoTc8u5PQY7IAHXAfaex5LPcgvhNbny3P1Iru4XXjxJ8bF9DxFxV\nUrAD85SyDDNFlApP00VCZeEEcueaaAYzLa0IWhGNJASF9IoQYj4nqJ3aaFS0jDLSg89qD6xl2u2L\ncX6PYol7j1PiGdTLD9BeGphnwE3UjaaeaPAdi0VCqoQtE7boKUrBYu0otEWILtuq+gLnOoLznJzO\n0MYyaY6QSG7Nb3H2+AprQWuTZXNUGF0SUmDjPLYKmEIyPznlaD4lhYAXgVJn7w/nIiH2uLCmwOeM\n0eRJvUQKizWSwhhKXZBkTVk2RPGI3q1ZRUelC4qiZLPeYOsJMQgcPc45VqsrPjudcasueOut3+Bf\n+cqXMVLS+YRrr2k3nvXGsbh6TK8FalrRNBVOJGTnKc/POHtyiVI109mMqioJi5b//M/8WV6/c5Qz\nNSvNG3c/R3f5PmePHnC9XFE+eUxZViyXlzTTGSFGJmUFXY9cX9FMGpoWHnSPUFpyVNZ4taKZNJzI\n/IVTWqKJIFo8Fc1RCXqOvlzTB0MhGkKvESlSlIEQl/hNRMuICOB8wFiBVD13734WjST1FZPJbfp+\nRYgXVJOKkJaoQjKdSDZumSFBe6TsaCYKgUCpnhRbpJjnaEnl8n4IR2k/1QzQF2svAubPAvKb4L0P\n2M8C8WFuG9gNUeI+kO+SgvbpltHidqzfubfd0i4DkG+3exC8x++OYhCRdjSL2gPwmzTL4f4+Z/60\nam//39wKVQQ7IM++fLlO7wDiuSi6OPzbt4U4BgVMkngzeJYHPWx3XYb/n7o3ibUtS++8fqvb3Wlu\n99p40WWkM9OZttO98aCwXaY8KFMqBkggJAQCZgxgSNUMJggYMGXCBJUEooRUgFQITCFlUaaqMJCU\n0850pp2R0b8XL957tzndblbzMVj7NPe+F5mRkRUV5JKW1m7OOffce87972//v//3/ywxJFQ0xCDX\nZIkSFSqMbSyfA/PtHMF6C+jqALy3xzk4J4e/6acfnxuYaxXwKlKURdY6R42QS7z1WFSirUGbEm1A\nqUA9Kdgse4Yh0kVNPV0zPzW4qmKmG26fQkhQNwPabVBpjtMTUspWqU1T4/2G+Znw+ksPKMrcQXy9\nGWjqGZEOpXrazRJna4bQARGrhGgErS1lmVuuiRacdTRugjctaRVxqaT3EastSRdMjucYShwe9IZN\nv2GxeEYzm/PFsynKRox1fO1Xf5nv/skfkx69x8On73M6fx1tGqrC0K575lXPELJhmLOaX/u5n2G1\nWfPrv/R1/tdv/CHJL3jplV/EILz68n2a2YYHX3iDdb9GRLFeL3IysamwtmAYetTZKSn2+NWC+fyY\nW/1tdDVwFQeehAFpjpk3d5gdw5OrDxBxDKsFRRmy10c5ZT6bkui4hUOLo9u0DK1HiDgXkUFR2ILK\npXyHrJaIXpPMR8AD6nqKci1NU7FZ9VjXExC0F46nBXptgYGkCvRYdaqmnqEIpEEjKRBJBAkYVaPo\nifK5pYH24yehWW4C9486d40n57mIfMebHyQ793Mfgd/kzm8mQNMu+TnC7kix5CMj9UGmWbYg/iKa\nZU/6PD93L3htbH/eKFdUkpVTKo2MT5Ytap0Qc3jRGbeV2oH5djuhc2s4awk2EK0lurHTkLPZdmPr\nk3NDYy5bGehopfAcgN+cSkZO/ACwd8fHc+oQ1D/9+NzA3MeWrt+gVKRuIPQKiYaYAsYqtGGMgi1a\nRyQpikIzWJV5VmtI4nNvTHLXnzt3XmbanBL1O6AMWk+x+gRPy7prKVyibgyzVyxHsyYXobiCoW1p\nijnGaNrhKX26YggNy4XHlC1xMBhToY2gfHaw0BIpCo1zFVqd4MqCk9M7lCnhl2uaaUBE4eqKxeWC\n2K6ZHx9hixnPrp5RFxWl2jCZNLz9J/8X3eUz2m7BpFZcPP2Q+v5dnn64pplMURfP0MbSPzrH2Dv8\n63/9N/jv/ud/xOyrX+H2vWPOzu5itOaNcsLtr9zHv///Ero1x8enPL1a07U9s2OLtjVKG4w1mGKO\nPPsQm4Q7usHNzpDlE2IRGXTEzu5xNLuNsxXWljz+6E2MLlivLonREZOlDwNNXTO0G05PCi5jAWEJ\nacDoDq3WlFWinhri0qJMQhjw/inW1SR64vCMybSiKU7oWjB2IKUBVxQc1RV9bDGpoS6mlNUZEp+g\nyxavsme9LRUiCWsCJDd6wXzO458WZ/6jQP25uaVYbkTnL+LLJfPi15OfN2mWQ3ni9VQlMGLsTdpk\nr2YR1AvULC+QJe4oFrgWnY7h+HMM+hbfddzTSiiQcf9AfUNkx/Vv9egZzB0hWkywhBiJMaKDIxx4\n32z1/Fvpog4aiXpvp7CLzOWANpNMwWwB+kV82I5eGYH82rlPPz43MN+0V6z7BcoY6lqIg6O0EWdP\nKaoWbTvECEVlITUkQ+6HqB2usGgpKCxjm7msU6/KmombE03BcvgzmvIeUNENkc0wIGKo6khpGorp\nOX1vaDdPUaIodAVW5QIY7dB6dE0MQu97KlXibEFRKJwDTMIZmy0IVM/D4TssVle8ZO5igyf4SFEU\n9F1LiBqlHY8fP+P27cBUnbDpBq5WK67WG0K3gmToNj2XG4+Rlru37mDqCqUMw9AxbHourzbcvr2h\nqO/zr/3VX+LtD1Z8c7VArTrunk35vb/2m/DkHT5687ssVmuMq5jNHYX1XF2dM5lUaDNFG4ccnyAP\nn5FioLJC7Wrq5hirLpmqE1ZywdH0axhtmE3PuHr6GJynHWBolwxE7LBGYsSkK9pBYbWjKmas2g8y\nmFtN2VhOz0qiXBBCwvvIMjyjVZqqqrAO6qYimYizc1IUjDlj8E+oqmOkv8Ka/A9raCitpx08Rits\nNUFiQUwdKQSqckboPwmSfsbjk1QF/rhqlsP/9+cep3Z0y3NAvqVXDlQs++TmDzOnvZkAHYtz5Aao\nb0FZDmJ/tefMn3sl9Rxps+POD1Kd2xc/IGHGI7skqNrx5WzvGGT3NLZ3D7t5kCxNSuNDIESbK0Oj\ny43CQxpdLFO2SxgBe98uTpOSRkchbQFc2AtR9AjwW1oljmAdb3x4IlnNIuO++ikH877vSXQQE85a\nyspgqwKrC6oqURkwqccyJYhCbCDZJbYYiNHhtKFusn+ID2sG33J7/ktYSormFvHqPtY0aFXgk8K6\nSBharANtPKIHWv+Irl9j7Rxtb2NshS0idaNIukOLpRsUvhesFo7mJXoI1KWlD5cM4iBZlF2jlKYs\nStZ9S5EChIDvB4ahY7G6wBUlD+7dY7G4YrF4l/Vqw6uvv46xBc3JLfrNirj0GG04mp9QT+9CCgQx\nbK4uWS7OqadTuuUlpw/ucevLv4Tob/Ly/a9RzY64deeE1eW7/PEf/xGkSDOf0fuAcwY7NVwu1zRN\nAxI5ufcqlDVaFKXVDAmauuGOgpQSlxislPSx5XRym7Ka8stf/R2+9+Y/YGme0Q2P6JdfQB/NcKIJ\noUalDoeFdMRJY1lu3gTd4rRjMhNEjlmuB6IHLx6rNmht0EZDspRlQUgNMSm0iUzUyyjdYao7FC6S\nJDKENW2/JiahcBOIDqVKVFWyWXeI7rGm/ry+0vvxTyMy/2EJ0RdE5YeJz73e/IaqRa5TLbu2b/IC\nIFfbBOkNVbhS1zBnz2Vvteb7nwDcULAcAvmebtlGp3vIvhmLH+Bzls/sVpSg9MF7UdsLA2MEPz5+\n/BsqDQmFcXEs90+5yUdMYzQuN5KdmuT0aFC2fVzaPy4xXiQOgFwxAvgLrsKyvepu6ZWbV+iDIqTd\nb3XtKvWx4/Mr5x8/HasNaKFuDI4SZwyuLNASkFSjtBrNmRxWg7HgisS0tMxKl02g+mfU7oz18Jij\n+lVQgrMFvW8xZSCZFldkT+MkgURLYGA6O0NUj/dXOANJD8yaM5I8RlMyxCE3eFUepXSOtk1O0hWF\nJnJF3x9jU0vDlHl5hNUGmzzL5YeErmdxuWHwA1LDo3fezVRO13K12HB8esIXXn6Jo+MZH30UqOo5\nPZ7TO6/QHJ0R+wFbaKIMyOoSrR1adUycYfP+m5wdlfjOc37+Ft//4DsoSRTNhIuP3sE5TRSHa0qe\nfPQURDh//CGv/cxXUKen0C5JIoSuQ7ynLBxTDEd6Qh8UrQtcLt6jaWrO7Cn65B4P7v8CTxcDbXqP\nYXVOURaIKoipxq8vYKgQAWcq5sWrXHVvk8SjdcWkEUin9KHH+w2u0SQP0SaGocXoI0R1aDVh6AO3\nTqd0myH72ocNooRBhJQCEmuKagZGSEFhU4PSHcvVE+b1K5/XV3o/PgswvxmZvxDQeQ7Idw2YbyRA\no5hd/84bPYC4pmS5IU3cJ0APSvUPKBbFnmbZyRUPXm3rIHP9OXvN+g64dri1P3Pt5ynJCpbDn89+\nHzLIZ3uD7cz7gsaEiLcxa8bHaHzrUqlu8ONb6aKOZgf6e5pFduX+HFxg9jTL9sM5APKURorlANB3\noP7px+cG5qUrsczRJpJSIiqfKQyrUNqSKBAfiCKgHKbsQVcY3SOmxLlsYyt6hbBhiOB7TeUmmKAJ\nKTL0ayAR0kBZzFDk/pNBeqytKVTBydkdPnz65yS1ABqsqXCFIg4dgsEoS9IVWkW8jyircM6htB8b\nQ7fZkncI9DZ7coh42k1Pv16x2fQkbbBKszEWq0DXFlGGYnqLVMx5+/GKl1/+Emse8u1/9E+4fPwm\nj8qaV9/4eVxzRDU9pqhPqCvQTGhX50y+8LOcf/sdEpHYdjit+fDho1Gnf4wyBdK3OCbcOj1iCB6S\n0EznpH6DaZeQoK4rVkswEpg6R/AeUSUqKj6KHU/Pv8XZ5A5ON9TVhFN1m2ebNX7zlK6Y45Qn+EA0\nicXmPSb2Fko7RAJGJkRWWbU0neH7FZtuQKuGwtZolf3NV+tLtJ4icUJqNVoMw6bHOUtMhughhIGk\nI2I3aFXkxt7R4NOafmhR2qP1mmX/F5/XV3o/flIwfxFgvwjQn3vOdb58F4nLi5KfN/nybcH7QTn/\nqPw4BPQ81A5sD1UsN5Od1xKg1y4PslOyPA/E1+PPPc0yyhgPdOYqja+jxwvFrjI0ofTY0EPL2NFJ\ndmX+SSl0KDAx7qLyDNI8n+yMemx8rYkxoqLZWxMfRuZbyudFIH4zItfj/hbUr32In358ft4srqRQ\nBdr2+LBhSBFVDmiT6ydD9CgKjLaI2DEDXOCcQscplSswLnNPulD4PhHCgsXqEcadkFLPkHpCv8Zo\nqHSDLY4Qa2i7Fi0FqIC2imbqWPslBUJM2fdDFwbpDcFYtFJoLYhEwhDQCKlKxCioGGn7lj72RO1o\niiOivqCezEgSmOiSJIoQB7Rr2IQBY2p+8Wtf5eXXvsD0aMbrX76FaNgEw+z0A77197/Db/zCXX7w\np/8nt196lQ7N3Qc/Q1kLx0cNrjCUR3POvvx1Hn33W8xOjulWS+aTKUkUSmu8UtTzUzrfU5YTSGvK\n2lKVNco1yNPHRN/SdS2TyhFCxClNL4FjNM4es5aey/YD3vngz2iKuwQ/EKPG0dCtWkzRM6gVGsdq\ntSamZ1z2l8zNFGdGU6cI2lpMUdDMKwIKUqKuarxPII6YhL5rUUqI3uOouLxYMz+Z4+OSpDxKBcIg\nmLLGlBZFj7EzBr1BENAeV2mE7vP6Su/HTwLmP5Qb54VAv/NkucGXX1O1yPNJ0MNE5/W2yzchOMM1\n1wB9HKNO/Cao73Tm115lLAK8AeLXIvLxNWFPr+R1C+j7aH8L6rtCJdl7tmhJ+2SsTrk9n+R9UQod\nUm7LFwRl8xSndsnONHYZynLFmIF/fM5O6bJNgL4oEr9+u7QH8i2YX4vODz/gTz8+NzC32oCyKKVw\nNqFDT0wD4iKFPUIGj1IQhoBIjzWKwlQELXjtAJujMacw1uB7AXqCXND1NheQCFntYhp0VZCSYE2F\nIjB0A6qKWFUynx1xefEBidOcrTYGZ7PncwhC6QxK9wzDkuB7alXiVCIJWBIxJgJr1v4xE6UpK4c2\nlmp2wumdOXU95+T0Dl2Ao7Pb1LOGymrK2YSnlys+ePQ2tqx47Ytf4Vd/U/P+997i6eUF92/P+Qd/\n9Ba/+pu/RTWb8PLrd1leLZjcfQPKBxQPFPV7H/L++3+BVQLNnGG5gBQ4ufUK2jlq45AUMaaiKBSh\ntth2QRwGNus11mjYdnvSilpbQuxZDlcUOuKs4qL9LhfLJ6ihwZYaTYPElu7qEdoZJLmce8BQlBA5\np7AlroTBa8KgiGZAqZ5JY8cuRvl+NomiKCxdv8bogRBn9FEoK0vbPwHlcEWFUpHWf4QWC6MVsg9r\nYvREGVA4jLEY7T6vr/R+fFY0y8cB/Q471I3ofEutHHizcKgv3ypa9s0mwjVw19mVEL2nbm4oWp6j\nWEjXaJadZJE9+/4C4mZPj3ysPHEPkFv+3WyBXEbrgJQwsgXzuDtvJO4aaJtrYJ52bfmIsjPQkrE9\n3BbM0wjou+fEMcF5SHPrg7f63JVW9iX8cgDgOuYkKT/lYF64zMspawhRmNVT+rjBOUNdTXJ7tPSM\nFB0pJAxTrOkpnEMlhTEOLUJMAadKjIkMkhOOQ7HMbctSRKJmiJ5NWmHtBOUCxiTaNtCGc06LOxxP\nv0q/+T7rtqWwNWa8XXPW0OuItRZlE0MvrLuOqAoqAU0FpiepHpGOoNfYWc1ZdZ9Zc8rb3/sWHz16\nyPHdgWV7RdsGhre+A8rxi1//OpPiZY6P79I8mHFy9x5Pz5/x4I0v8mu/9Vv84A//J64uPL//L/8b\n9HLFF7/0GvVrr6Eu1gR3TNHcRTbPiPMZm3aBv/yQqj6hns8ZVKJbPKPvO5rZDGsMzjjKqia2kSQd\nCkNdTVkvnqBTACX4GFGiMEmYSuS202yweN2zDhfoEFG6RNsJffcUHRaj6ZkhDAMi2T5BlSuiWVNW\nE9reovxYTScdqBrrEhI8fuiwZYGWgihXBG+wdgIJ/CYwmIKi0hRFSZKBafMyUR4ShewAqc4IMVfw\nGrONpKrP8mtbkf1IS6AA/gfgbz73qE8L5j8qGn8hgPMcblxXsxzEyzcjc3meZkm79QY5Mtre5nEz\nSccOyA95ckFdP84hX77tXnQgSdyxNzcvF3LgICDXLgq730BlQDeynWlnspVdE9NuW5QCdyPRuTXb\nGqmVXPmpiTEQo8XEdKB2kd0FICc9uQ7icrCdthH59gIwgrqOB9F5PIjOP/34JGD+cX0PT4H/FngN\neBv4V4DL8Tl/E/i385+Jfw/4g5sv6lxJHzzOOLS22OSJaAo7QytHWRhit0KrQFFMKYs5Rd1hTEun\nc1Pg4AM+OgozwRnNoJcEP3C5OAfASyAGwfsAVrBlZBgS2iiyXaumMCdIamnKMxbL97ObWnQksx67\n3iRQgaKoWK83hDAgEUJIWF0wyCV1XXL/7Kuo4S7DAp6tH8OQcLO7HJ3Bm3/xFxir+Plf/ufxQeMl\ncnLnJb731lvU1TmFayjfe5d7dx/Qr1d85Vf+Od753reRKLhZyd3jW6QIP/iztzFFyZ0H91HKYGyF\n6T3LTU8xOWXZrlhddKgIm7KiKBwn5R0Krbm6esYbX/s10kmNXgwEG+jXLcMwUJsSozuSQFmWRCO0\nfUCLUMsJnVwiskAoiNGCKvPfNC3xfUfwiqQsTmkCwq2mwhYD2m44MXdZPqtQytHLmhCXROmJoWDT\nC40RXKGxriL0uaGFdQUJYRgCtihJ0QKJGC8ICaxJ9EMgmQUiU+riBBHofI+kz7RoqCObbG3I/zt/\nCPylcd2PTyNN/GGA/SLwvnY+K1ieU7PcmHvufJ/8TNd8TG7KEs31+Fkd0CzbXN+NCPvwEnATzA/V\nLDtVizqM95+PyrdUy3bkitPR71HlSHvripjBOhxsjx2Lts2kx/OiVO7SNFIrO2vboEluDArGMv4Q\nIyYGdLToEHeR+Y5m2YH5+N6v0SojeG9X8wJ6ZcfVbNdPPz4JmH9c38N/a1z/M+A/AP7GOL8G/Kvj\n+gD4e8CXb75TZ2swGm08TjVIinSxxyiHcyUxeVRISNBYXVO4GmOE2dQjsgHRrDdCiHYsoTcY3bIe\negZZ41NH4UoQzRA0Xd9RBoMrsrVu73Pv0fPLh9w6fQ0RYVaeEKPHh4RoD5K7q2iVsDTZlc0MRLWh\nVAUhrrl79jM4l7i4esrMNTTzY1TrGHTP6b0HvPfmX/CzP/91Pnr8jG/8wd/ht3//X+Tpkyu+8fe/\nwe/+7u9iVIEfhKou+cH330Sh2VytOH31K0gY2ETPqW64WC6whWUYYHW1pJhcoErL7S/+LF+Wge7i\nKSEGku959vgh6+UVejaj2yww0xn3X34FTMCtOoZnT+n7BW3fMTm+T4gt2hsc2bvd+ZapsazFMNEV\n56FE/JJNu+RocoSKmok7ow8DUVpSjPTBE7DQRapqQj2tcVaB10yaCX1fgCzo+iusy4mpECI+KGrA\nWQtRqIoGHR2DCEqVLBZPSWoY/e83+BApyimOkhRbtDY4WxMGhbGR5D9zb5ZtK6MCMMD5c4/4JP+T\nP64vyw87fxD8cZM3P4jId9tbWeLHJT4/Ljo/JEUOEPY5ID/wZnleV/68znwbnT8flTMeUePP4Tqd\nw9gjVEaSSLZdiuJue398f0y02vda3VErjDpylWmVYHNPgpj16CbGHJ2nA6plx5lv36js3vEezMcP\nJ26pFjlwTDwE9H82YP6ivocPgL8O/PZ4/L8it8T4G8C/BPw35IvA28D3gd8A/vHhixpbEEIAEbTR\nOGfRmw1+CFAlRAKSCiI92lhc4VC6QBeOYpboVwMxKfpNxVBAWeqxl2hJaTUyZC6VpOn7SAiRhCVI\npB3A9wqrK1arBUX5HpaK0s0ZhsC661G2x7iBhCLGguAznVCXU46OT/HyjPnkZZ5cvM1xXTOZ3CVF\nTxtXFO4Ot6Zn9F3LK1/4Ih+8812S7/nN3/49/u9/+H9Qz+/zl//KX2W92YBsiF5xcRnwg0cCpBh4\n83vf56VXX+HRh09o24qmiFhXcOf+EZPZnBQ7bF+gZ3Puv/5VPio+4Ol73yMMA6e3T7lzSW9sAAAg\nAElEQVRMEesMF0/PMSjuvfwaKEv0G4rbp/SP1vj1gs2wpCon2HJGtDVEj06J0IVcjq8ctRzxpL3i\navmQxj0AF5kU97C6Jsb3cfaC5foChaMqT1leJkrXcOuOzxa+usRRMy1ew/cQ5FG20q1qrHMovf0e\nRERtcEWNUVOUFDBErhYfUDYt2kaa6iQrjlyFUGfVjBEkJUwUkvF8xkMD3wS+CPwXZG/z6+Mn5cxf\nxIl/kmj9ZiTOXmN+COTXqkB3FMxNuuVjQPzGeJ773tMsHEbgL5zpGqA/D+Q3NNY7PfrhHcC2GXSe\nbuxMugXu3f7Btii1S3ambYPrbTRux5L+YLHRYmLYA3lI2QL3pprl2vve0ik35jYqv2aBe9P29p8t\nZ/46+76Hd4HH4/HH4z7kBreHwP0+GfyvjZB6SAoPVEXWkzfVXS43j3DdeZYdpkDwiV632K6gmRQo\nCkqXPRWsswwpuykWfgkktG1IUaiLCYPa5FsqPILk8v8g+CGwWnrqMmLVjOXiipPJMc45hk4zdJFU\n9ExsNtcafM6IK+WYVI7Sddw9/gW+//Z3kFgRi55hWFAVR6A2LNYfcTSd8vSDj2jsjLM7r9N3a95/\n6885Ob7NX/oX/hrf+e6fMp+eYQvNyfExMSY+fPSIzXrD5nKBc44PH37A/OSrxKCRWlPNbqFNxdNn\nH3GijqmKgNbZt3x6coRWX+Lqne/Rd0uO5scsl5dMK8XR6SmumCCFkCjprq7YrJZIGHLT55QgDWjr\niBJQzlCEkokkUkyUHqJXDEPBanXF0fwMVxakXqFCiVZgC0UIiRCXhNWE2aRicSX4YU1qNzRuTmlK\nanuLjVwhCE1TESJoE4hR0CaR0hXazpnOjyh0w8VFJLZrxA9oHUmpQ9sTjM3+98MQCNKz3vS063Ns\n+ZmngRLwS8AR8L8Av0MOZHbjPzzoP/Q7X4bf+crHvMoYEfKCKYf/5zeP3ZghQowQQ17TOGVsUJwi\npCA5+tw1LeaFDaNzV7Xr9MeOFrkWFV/3Lt8RMyqNMXlCjTTLdarlOpBfA3u5eWz/nG0fUbcFZrkO\n0NdAW64fOwR4hx8pJzO+q/HCpQxRG5LOazCWaMbVWoK1WGexKYO8TRYrdswn3ATmvApxr2BJCVIO\nOnKULruKVNE7zur5If8PmQz50ePH+eZPye2y/n3gZvvxFxNe189fG3/wd76JcxYfer729Vf5yi+8\nwdGkoh2u6MMlSs0Z/EDvO7S6wCRB2ymlSVm1YCV3vSlsplD8Aq0NlbMMlMTUo5XFy/a226OoMVrR\nxh4/DGgqhjpiPWz8Oc6VuNIwa2acd4ZERNtEDJF+6AlRKOtj5rNbXK3+DKePaMOadevpGTDxhLq4\nxYMHb/DRux9wfHSGXyeUVYjumJ894PZLr/GNb/w9Xn79VZIMPHu6pO9a2rbjw4ePefXl16mMxZmS\nhOKdt97m9u0TvJwituP0tqaoTFaEqIohJhbLC4bNFReX59z58q/y6Hv/mMunb1MUDkxDUU0ozl4i\nXXyA7jfQelaLS8QPmDKrPxIaoxRGG1h7XIzEbkk0isFvMKlHiWa52uSLWjxBUdG2Gm8V1gq5K66l\nLCZYdQbe4/vHDP0TDHNINVppJAlWG4q6pOsDSMLoipSWxDQQpcM5oVCOogQzJPoI0vWI9tRSEdOc\nEFuGruS9P3/Km3/6lOCHgyTdZz6ugL8L/Bo3wfz3bzzyBRy67BJvL1jDwflwsB9efD54CAaCE4IX\ngk1Ek2eyCTEjX2siSiu00WitMGOjZ6sY+13mdcuNi9qbVSWlsdttNEkFzGgnm1Qu209KE4mYnQrm\nRgJU0vX9F62yLy46XLf89x6UD2gVOQTxjzkm++cSFWHwRG8J3o7OiZaQLFHGyTAC/EDSJjfCdiY3\nwE57+2CFIDohI3WyW8dIW2S/ioyAbnJEL5Gxx6jOf/8XfXXVrwO/vt9P/+XHfiE/KZhv+x7+LfZ9\nDx8D98gUzH3go/H4B+Sk6Xa8PB67Nn7lL9+idCVrv+R4IjhToa3BMaMbnqDUavQTTgRZM4jB9gZX\napRJQEAbTV1p+ihEiRRGSCoQgs98vNYQLAqNpJ7k8xVRAdYWpJCrxJaLFSF2OEqm5W16pynjDIkX\n2CKiUvbyDjHSDkueLs6xxoIOKJ1Y9lc0nNK6p9yZvMTTx+9TVVMW51cocegoHB3fIgZhvVrzc1//\nOZ48fgqFomka2naDMZa7d04oS5hNTphPJsQYCbePeefddzi6excBNpsNR/M5zlpC9GjnKJzj8dOH\nqM2a7/+Ttzk6m3Pvy19luHjGK69/mdMHXyL1K8L0Lu35t1meP8MoQxybaIcQsGUaaSpNPW2wXnO3\nNLBZc+IMV8UJa2fwCdpuQQg9kizrzYJUXVHWCqUTKXaUVaIPa8pkEARlO9btQ6w9ow1LlFYkiYhO\nFEWDNhakx9ieGDTaKBbLjziZl4jpUQboDes2YumAt6iqiqQKZLjHq186494bhrbdEH3NP/y7Dz/h\n1/rHHrfIcewlUAO/B/xHzz3qkyRAdzwtO1Deze25GyAuNx83ngsGYshAHo0QbF6TEZLJACMHnW6U\njqisSMVsmxVrcvm7UQcOgwf2sSqM+5qkMnDbcU3KjD0/DYZIVGaXAD2UKj7n3Sg3yZybxw45+JQB\n+RCcJWBGYDeSgdvcOH/z8VYCkhRuCIQh4ELIjVuiJSab7YF3RVNjtG40yejsd57UCOb574KSEcAF\nUSm7OJLIXXK3IJ4jckkyzhysK6PGlne5ybSonywQ+SRgrnhx38P/Efg3gf90XP/7g+P/NfCfk+mV\nLwF/dPNFjVYIPbNmwrodeVJdjKqRRDB99tHGEL1mHVdgLLavsAR879HGUFZC3waGGCnRxBRypD5R\nGG0p9RRTQKOnJFpIfZbS+YTTJUZNKBvHprviXF8wmRxhKw9dJAaLKSqM03RdT+gts+IYwgfEBNYU\nWWKkBGMKogycX55zx73M5uoZt45eyc+ThMJwfDLh4cMPee+dh8znM9abK0IQjuZHbDYbGqsZ/Jo7\np3OMUcxdg3KGsnyNnpb57D7TyQQRwQ8DWEtdFwzDhvsPfobV47eY1rkoqiwnPPjV36KpStTZCSpa\nUtrQnDzg/L0foFJHVUyBUXqpFW2bbQ9SEhBLGAIzLGfO8L6aYIsNCmG9XKDrM9p2jSssgxaUStnn\nRXcEuaQ0Bd47hmFA6wKllmy8p48bFEOuMUCjZIZJEdFZ6WL1hM064NTAk/MfoG0gSZerbINC95rO\nBCQJ2ilUELq4QZQnpHVW2Xx24z45P7Tty/63gP/tuUd9QjDnBQCebgB2Cs8DeArXgT16IRqIFpLJ\nQB61EHUimUTSCTERtBrtMTJ+iMpReU4sgkbAjKCiRzDXB6Cu85p0jryTjtgR6I3WRBXRegR0DKi9\nN8uOhZd4/djh/g7IR9b+ANiNxGsAbsf9w22bbuxLuPa87b5ERfSB6HN0vlWuxGhGoB67EmlN1BnI\nk81VoDIqgrYNpJVOJC2IltGOdw/lIkIaAyaRbM4lY0SejEIMYDQSJUfoL9TYf/LxScB82/fwW+Se\nh5Clh/8J8LeBf4e9NBEy6P/tcQ3Av8sLaBZlNNZAigrFdLzS5yKiEHx2IxSTI9BB0/YDuAVqnWhS\nT/BDjrxtSZKe2EWScSQJxJiVKrU5padAJOAKITHQJ4uNFcZYrJpSmJrCOhQ13SawXFwxOSqpXcFm\niMQAxhpEOm6dvEa/uWA2uYXnHKU6rBaUq0nJc/fO12jPL+lTzaR6icXqCqKmnExwRUXfD9y//xKb\nzZoQAilFlFL0QzsqZwaQQEwDVkeOTo/p+57XvvAybjpjMpkxmUyZzo9yJay2qLDm9htf4eIH32H+\nyuusnzxkfvs1qukElSzVnTeQJERZoxZLbDHn6OQeV8/ew4cue7pbR1GUmKpBa82qXSMqYZxDotCG\ngRgNs8kduuRZXD2m7yeZftE9SAZzEIyBJEvWLWyWClf1WLXElif0V0u6VigbjTaBENcU+ojoI0OI\niB2IcYIES+87rNMUlaB0iQ8erWuM7lHREEQYVp5pHfAExGyI4ZOg6E80/gT4lR/5qE/wNiRen+kA\nqNMNIP/YcxGSh6RHELfbaDxH5DmiHG/9jdol3XJUft1VUOkRzO0WtHUGbZ33M4AHkta7nptGRopF\nm6yB0VkOGFUGdEHlgp1trCuHQH1Qbyr7lOtWRrhVxOyObaWGEjBpC9ARk8JOcmhTHJupxxHY4+7Y\n4WMlKYL3OG+yr3kYm1Qkf810LClN1Cb7pCeVAX1L/IyNpJVO2bRSMcbijAC+zX3KSJnnpKmknL/A\n5LuhpMdG9kp+4o7MnwTMf1jfw7/yMcf/43F+7IhRIboce+kpDA6R3PBBks5FU6oceThwtmEYNmzw\nqKiAAqwBq0kS6dqB0lpC3ICR0QKgpK5m4NcYFxFdIDEwJItzGoLCUnM6P2GxukTCiqH3TIJl2tQs\n2pbkZxgTaJoZz84fclzdprAlKQ0UNqGwOPUqVTHn2YfvUjhHS8vpdML64gOCKlHe0fWBtltQlhVF\nkRtEpATGaPq+p6knENf0nSeEAa0si+UVWsOd+cuc3LmPdo6qKinrCYmA1gpdnOC7NUf3XiMNPdZN\nKJWimJ2hUyJeXuDu3kMNA2Yz8NZ3/3cmVYkPIdsUuFzQE0LClJa279HakGLEKoOyGmscp+WEoFZE\nFE0z5fLynChZg0upiLYgmQFFjR8gDktC8MxEM5kpMEtiTAxeYXwFaLReovUSCZGh6wk24fQCGRSS\nCqybEOJATAZUrkdQFiRGkiSulp6Ylhgb0CoSvEXC51YHtx8/Js2SDubN/S2AX9u+CfBm3Pd7MBcj\nY0SuRppFZSBX+fZeq7gDcq1kdNEVCCp37RkBXEYwj0ZjdY7Gk475QiF5ewd/yux031FlX/md2FGy\nE4zdgfeNYh4Oj73gnERMOgDttD+2BW6TDsD94DE2hYPHByTpkV4ZaZYYSNFnH6CdAdmeZhE9RuaY\nkWbK3YzEKNDbvx0j0N8Qsmyl5VGhxs8Umz//ZFROOGtQWvOTpns+v05DOtG1+RY/SaJdrUEbjHE4\ne0Tv12iTMNpRqBLjLMpY+hAZNhVKR8xEZV45wmLd0kwrrIaUOvquRtUOnRzdEHGpQxcDyiSSBJwr\nAU1KBZaawvRYE9i0ayaD4ExJXRZoOUbkEmMGjCNHqxicPkaLpymPAMOt49dZCNSDwfV3+Ojd99Gi\nqCcl08mEzkeOju+TkpBSJIRAWdbEGHBOY4yhcBP6fsNytaAuK4qywlqFLgxKm1z9qjQigi1cvuVN\nDuMC4kpMXGFu3SKuzvG+o2jOMEe3iI/fJJkCv3zEvXsv8fjdH1AWuTBHK41SCu97tM53e8ZYjFI4\nq3FmYPBLniRBUsTHNfPpHfxmwcMnjzBugwk2GxvZhFaOoTMkCSgsm81AWVXABmuPaNcBJTXVRFEU\niba7xChHUoFST0jS5rsTX2GbAmscXegpXM9mUEgsUAgpRvwQaU1HVW/QydGvzYHW93McPw6Y36BP\ntjP+kP1r5/yYtDTjHNUReY7dd1TcmUHtKBa2Pic7dhdIKJujRTE5OSo6g3tKI8CbTD0Y0RhzAL4q\ng6dWESM5Mgd1rRRpV8zDCLhso+1R6X5Y/DPqwndAfgjQHzNtiuib25J9VbZRuk4jzRItKWWufOuK\nGJMZqZR91WvSI3BvtTUq0yskEAvKJKLKnZgSEMfirLxCTIqUFDFuAV1BUDkyNwo1AvroIvYTfe0+\nNzA/qe/QW0FFzzIMPL14mMu2UwCpKVwkJg8uUusCEZcbJ6cNEl0GgCQo7TDaoJVFa50bEHeKoU30\nved4UnI0P6Ft11jjiNIRw4ASSz90xBi5ulpQTWpgCUlzuVhx5/Q1jmZC6guSNWA8TV2BCsRY4Iop\nxERpTmmKKesnz5i4E4YQuHN2zDqs8Sk7LS4ur7B1RdclJpMp6/WKlFIuanIFSXIiVrmaSV1igLKp\nESXU8zN8gk3XobSCYUAphdIua99Ng1WSPSeqmuDPwU3oPnwHfb/GDgvS2RfQ734TPzvl6tt/iDaK\noR+wVYmPQuMcxihiynI/bR1K54TNMAQqVzIvSoarK5RRODvheGK5uFpyvlpTRsHaAh8HlBqwzmCd\nJQZBc0LsDTF05I5Amk3rEZ0dKbWOKFNhlaEuJ6w3nhA3VHVJ00xQSpBYEMIGUHStUGhLgnx7bxLa\nJMQ7JFZI/P9B27hPSLNsE5vpxowHgB1fdGy77fOKEcRLXnVeMWlMrqUc8ekx2abU3v97LBwFwYyg\nLjbTCWI0yRiSCXsFh4lECViTlV6RAyDfTsnHNdvIfF+hmcF5y2nvQd2yTVzGPc+9A/wbkfYWpGO6\nfuzGvomjV8u4vXteSkhS2dMnGVzypGQyxbJTqYyc+ZZqQu0j8vHilpOYCmWEqBQRRRSNGYFcJ0VM\nGhVVppKtItrcNzRz5Wq861GjouWnGMwdNWU5YbF+TGEt51ePcbVi6s6o3THazCjqyLJ7n6oeCK1D\nlMUaIUkEPUdJQI+ewPWkRiWfO9fUBe1K6HtPrANVowi+QNGRUiSmrGuWELi4fMakfJX1pqUqCnRZ\ns95YjKm4NTvl6UePEAxKQ1lZLA4Z81/OzrF2TlEdsV5fMPQDLkFaKZIkoheUgRADsRuo6gqlFMMI\nyHkoJk0NKlEWNdYayspSlg2z2ZzJ/Ih+iLi+Y9LUiAht15IAFT3JKCDiVI4C+mcbVCNMmgY7dEQD\navEu3aaFOLBKwr1bcz581KJjxDhN8D1JCqq6xDlL6QrabkOIiaIoeba+4qpbYYGrrsUZA0Q02frA\noBm6kqhz0toVEZUSjTsl9I7LzjObliQJFMYy+IgfFK0WyiJSV4qoPSkFvG8JKWCrKU3d0LV9vmgr\nxaSsOb/q8KJQWnC2oigGrJrRTF/NjaOHz7xo6EePTxKZHypU4vWoOx6AdwwQ/Q/Z9+zSscqM+mWT\ncrJTpREk2K0KYPQQ33YJyrq4XKmoHEjQiM1RuFiVgd3mqDUmnXsCyJb9HkF9C+Z6n8jc68O3ssKb\nVZp+X5n5QhVKPudGgN+Csd5VY15ftwCu4xbQR3OtGMdj+ZwkTZRAEr+XGY4+73LgXbOLzMdbGtFq\n51CZN0DFRFS5e1MUTRCFTno/oyZEBdsG0FHnvERQKENOSGuN2t4W78tJx/VG4dQPGZ8bmIcUqW1B\nWc3xq3Mm9TEX6/eYHdcUdk5MEU3AaAX6Kab0bNaOmDza5i9R7xOT0jCtJgzDhlJblOTnKAVtf0WS\no5xYdRqtNFZmlEXEiMUERfIbnB0QFVC2wJk5lTuhcDOaokH09+n9JUWxpqmPiV2Oovu+pbQzrJ0z\ntB06DSgfSUNBUBuMNsSwYRg8rp6gUmK5XFLXuROO1tkf2RhD23bMZlPKssIYQwyRVGoWqzUJRVmU\nOOu4urqiLAsYNK4oCT4RVucE4/AKhr4DowhPn2Jv3Uf7Ner0FnLhKZsJ5997k1u37/LsySPKskQA\nqzVIQity+bJ2ePFUZYUuHMurBSKKk6Mz3nz4Pn3bo3hG2y7RZcfpyfYrZFG6AX2JEo1TE5KvCH2J\n9wNR1hRFpmOKwmKtoSpA8KzaD7DWsV4KXd9RFYqqMqD6fCEMA9pZnDEUG89mIzhTIETCMDCb3EKp\ngqPpMX1/swTicxifgmY55MgPAT3c3A7Pb6PJvt1mu8ooQcwJOqUyaOyTnWMBkDJj9aUeC4I0RJUl\neFETRwVHsoYogZg0dgTx5yLyFEc72r1LehbHbOmUgyrNHWD7nV58rwf3B+cP9OQp7ErpdwA97t/0\nJTdpdDhM189tt1PSOHKy0+HzRUm264FYctTUi9lX1ArsaxlEQRTCDsgNOmlCzKuKetSN6iwFsQox\nghjQJtM1aQvkW/L8JxifY0PnRKGFEARrGpwr8OEWQwj4sKSpjgg+IjERdI+za4wrkeQItAzeoZUj\nRENZZE151yWSeHofEQNRWtpwSaOnKJ2o6xlqKDGmoF0HNhIpcchwCWWNHyz3H7xCU9Ws2iuMarDl\njHb1BFeAKgZINcvVBqMC1Atcv6DQFl2AH3qaVIAIxhoIPaEHMQW1LRhS4uLiAoDJZA/qq9Wasmww\nxmCdI3jQKLwPtG1HXdWsViuIGXQlwUJfUZc1m8tzju++QuzXOCMs28AUz+L8IWeFw617fNcyCBxN\nS7rBU9XF2FDDo3T+ChhrsMailWCdBa1wYqiaGjusWX/4CCWJoe358PIH2UuFDabwGDOn0AWD9CSp\n8kU1ThhaQ9d1xOhpQ2I2N5TO4oqsoLEmEmKi9+dsBkW7sjmirxpEbViunrBeObre0xwVJB2YTTRK\nWYaNRStNig5NTVUcEVAEfjoi88MioXQQnR/SKuEAxEN4wbbP20pncFA+e95ovff93urJ0eyaN2Rb\n2m2VpUaPYK5Fg1OZNx4bMqRkSCmMkec+Eo/qOphvrWZzAjTt/cQP+HIrcQTqDOJuWwC0BXC223nf\nyb5qM9Mke28Uk/bgrQ9av+2PxRccy6uMFySHz0lO/K74aVevOm6nrSzzkDPfgroCYsKIyUAeDSGZ\n0VnRoKLJhl5RwCuwabzTEbQZE6BmTGJo89ML5u3wEXV1hNEO5XJ4cjY543LzmN73DH6DLRNVPSXF\nnqg8ZVmixDMMATEX6HSK1Zl60Bj6rgdlGPrcRKFqhN5v0GLGK6FQVQ02aoibrKQJiqQ6sjArMWlq\nJtWMkAKIUMkt5qXH92+xThsMDcvlgLYdqpzgLz9kVk2pyorSRGbTO0gscmSihWkzIdlMRSilcuQd\nI9YWDMOAc5b5fEZZOqoq0zBlURBjpC4rjo6P0dpQuCJHWgJKKQrjUNEznx9nZ7fUcvX0CcaVbNo1\ny6dvo2+/zHF3gbn/Ck56LoPQLxdYZ1BKMCYnDEMUtPcgBqU0Nil0M0EJOO+plUFbSx+EXhlK3bDp\nLmgKwVhHXVqsrghDh1INwXtsKNEpN4vwoUNrg4RE1ZQEnSgLDQmULbi8yJ+BH6YUpQYVCH1LOyxY\nLrMe/eoqcXJSEfSa2VSz6AuSRKycoMSgJeSqPf3TAea7QqH44uTndvoDEPcjreJHII8BvN8WEcpz\nU/4/6t7lV7Y0PfP6fbd1jdu+nEuezMqsqqyyG7uNobtFSy2LNkKipRYtZiAmDPgDQEIgaCSEGDBh\nwpAxTBATpB4waLCQLVqy225s2qa66KpyZVZmnjzXvXdc1+27MfjWioh9MrMq7XI5O5f0aa1YEXHO\n2XFiP+tZz/u8zyumRrCT2sIRxBOgK05j3mRIUkDwkmBGSSXIU1RudMkFLlSSFkTSlrVMcdN+LDhO\nAswks0xJ6andfgTyI2hbsun4jfPnzyeZZZwu9CaIj5nk54+n8W6f97oYxNFHHkZtfOpyTd76SSdP\nK8lUI3hPpvzpwhhiKvhGhQwaGRTSJ1IiQhwHQ2ui8Wl6kY4EnbBbKnGSWYSE0QH0592+MjDf9y+p\nu8cYfcXd7hllXpHJnELl7NsDnW2Z6RIlCmK4QMQBZSwmCrKgCRFMEGihGIJCUOClpesCzg34IUMW\nJcFJeteASVfhwii0uCCqnmgalNTsekeue8oqZ9/ekZs5Shvu1k/RsUCHjK2bE7wn+AP71lIUgcGv\nyRH0nWGeF8yKArcdUGGgH/pRAlPkmcYLyXK+IBKSxBEDWWawdqAoSpbLJcNgKYqcLMs57PcURUFd\n10ghyYwm14Y8zyiKMkkkBDo3oKUiOEewHftnP+TBW99hdfEEuZzj9mviJz+kHzxaOmYPr7h5+Rpj\ndHI5QOoCtQMmL5GmSuxsGOi7Hm8982rGrz76Lr0zbPbf55XbUGaXaNGSSUcMnhCG5GTxW4xwWH8g\nBIP3KRdnGrTbDS1Sa0QmMKaisxkxFrhBIDDMSkVmwHnPbn9LP1QIkZPrGcJfUBclbXdDPRNsNj2C\nOX4Q9GJLEH0a9PtVb39Gn/m94qf/rJRizxi5PWPk0/kE3gIlQcmIGoE80fKQQFwCRISQSDEBeUCS\ninYKmZh5SLklCcRHm150I79OAH7Sx8d15hyRIzuX+ATmU7Fz3J+Y+AnEsziMYG7JwriPFhOH42t0\nSHUY6eNxPw2XSBODAtLF42PpwheeC0FipB199Pbopw9SJekjJnfK5CU/X4zNQnHKVgkRGTUy6JGR\n6zEnfcy9MRCtSNHaOnn/pToR8aSXC44nf47tKwPzvpe0eUeeebK84DDc4WNBns8phx58JPMaEQ1a\nCaz1WNcitWVWX9C2knZoMW6FUhlalewPB7Ry2D7iBknoDVV5ya57TQwDfejJVYWLgKjw3AKRKHKy\nGHB2z6fPPqDMHtC5Ha/ufkRVXjGEbRolJypCTIA7XygIHVoFSiHJhGZwAxpFbhTO51ilcWLABU9W\nFkQfqOYVznkgUpYl+73n8vKK2WzGfn9gNptRljUxBsq8oKoq8izncNhRVwVZloYmSyXxQ4vAMvQe\nt36JjAPGSD69ecU7D+e0rz9G9AP16gJiR1le0W4/4vFbb7Pd3KAzTTv0aAG5LpAChn4HMcdkOXlV\nEVxGs3tN4zpKCY9WlxyGAbSgymcoHbndvSBTA1FlxKixoU1SWGuRWhJkQOmBpvcEmZiL1AO6VGgp\nWObvcNPcUBioTEmRB5qmx3nJ4D1S9VzXb9G3lrm+xNnnKA1ZoXFWcNhbAo4oPeJfBGui+3KviWfs\n/Nxy+HkSixtZuHWn5eyJmWsRCaPnOYqAStOMQQgkpKaUNIkZgRgn9Eh0DEcwVzF5oX0YmXlUx8Ke\nF5P67Y6M3EuPlw6v1OjjDkcgP3V5TjLLKcXQYMc1kGHJpn2YgD2BeHa2V8EnVh5GFu7jaVCEG4Ha\nnh27zzm26TjEyakzdnce7YenIRzH/HY5dmrKyYWSNG+hxtpEDMho0r/PJzYuXH6BKxgAACAASURB\nVDwNvbCCaGRi5lYSdExLicTMJ5nl68zMZXbgMLymdAuUMvShRQSPtxkiFlSZhtDj9h5RSAgGGwXS\nQzU3GLlECUnbvWI1X1GVNbtdh7O3eK+QCJS4IMacIr/gbv8xOsBW3JHrjPbgGYaBLEu+6hgNMkqM\nBGsbrGvZtB+y6T9ASEFdPMLIVETN8gyjNVpHRN+jpSULRRqGESzdcEAbw2J5QWsjURXkuaYfHENv\nubq+Yrfb0vc9lxdX43DqjKqC+XxBluVstxvmizlZlrFYLKjqEhUidhzMLL2gzDLwA83+wP7VR7im\nZbZYUNaaw/7A0O9Rpma72zFfzNndvEQIQ9M2FGVNxOMHO0otA3KA+ewKmWu0KdJts4BFXiGM4cO7\n56wPW7qwp9JLVst3yLXh0YN/mRfPXrLpn1Llj9k0HVIoHILQC/oe8mvw0dP0AqVbtFOEQyDLluBW\nyDCA7Ilj1MLQRQQGIQJKKZp2x6p8iO0GFNcgbskyRdtD04APHXVtUObr0zTEefv+uQXx84D8zTUC\n++BSc2eQoOU4kUcCIiCESHUFAlIkjSAVOgUyirEDU6AQ6JjkNRHEsXHGMM0IdScwFwo3MXLvk9fc\ne5Q+83WP3ZsnmeXM0TIVNkdmfloDGQN5HEYQPwF5HgdUTGA+yRfTgAg5AvW9ZRMbPx1HpD09n4q6\npzb9qE/6eJgA/B4zB3TylaPicY9OmeWT7IOLoz4OuORYSUA+/n064HXSy4Vi9JiL0cnyNQZzIXqc\nO9D2a5RWCKnZdy1aaJQvyXSN9Aa33+IGTb2cM/Q9ShWEIFnOLnh08Zhts2XXPCMKS1XWvLrZIP2M\nuc7p9hvyQmMyjR1ypOzZtRtcITg0PS6AMR1ZPqPMZyjvyLJIcA2KjIAj+ENy1OgFWuRI7fE0mFii\nJQyhYVAde7tBO89y9oCKJeVyTntoCPsDRTXHC8Xg4yinpPmkjx8/Js+q1NrvJXVdo7VGa01VlQgp\nCSEF9eRZQa4VdnBs16/x9sBu6KHbI/ICEVqMERACi7Ki2azRViOvlrib19zd3lDnBfttS50ptNYg\nJCY3KZaTdDvuhUebOn25ggUCeV5wt224KJesypJP947gHb11vPXgu7z96Lu883DDR8+/z4vbHzKY\nh+zaV+RFjhskzmpc5ykqQ5ZVqCAI7YGDPGBdDcGSaU2eR5wX+AaczSAqtBIMQ8s+vkZGQwg9QkeU\nFChjyfKCtvekm7kt1xfv/qK/ugr4J6Ro57/3ua/488gsk2b+hqPF+c8C+vDGsRqDspKmO/YsTA1C\nYgTyMyusSKk4aUWBjmIsTibj+QTiZ32ZaJGKnl46tEqDG45DG9Sp23KcMHovf+Wom8dzdj7JLcMR\nsNO+v/84pL0OLg2NTp05Z9N+7gM5lhOI23j/2CbGnMKypq7O01TSY8fr5FwRI5CrtKImIaYBxiHQ\nMF5gphF0bmTjThxZeTAKpVPcglLgxwJoIuSTZj4i/BdaEn/2HedXCOaCIezYdq/I8yK1HktY71+x\nMt/g6vIxzW7Lq91rajVDhYKHF3OaYYvtPMw0dV2RmyUu7IlhgzUOozSFKcnxHG6e09YFyhRcL56w\nbj5CqsCueU2UObiCEBxaF0ipgYgPDYEWrWveffAb/Onz3wIv6Ps9SheYvMG7BsESESWdG1AcKOWS\nWlb0rcMYS9ztkVIwm9U4JFIILi8viDFgreXRw8fkWbIizmYzIFLXs5SZARRFiZSpM9RaS/AeQ47S\nitxkbPc3yZa4v8OrjNA26LzCS89+vYbomK0WuCGF8Sug61vqWZ00bjTtbs9gB6QQx47YGGBoW1Se\nqvLTxUQpw8XyMrV0+wHvW17dvOCX3vtXefToEd969z3a4QWvt548n+GCBW/JpAEfECJZTWtZUeYz\nrK1Zu9c4IhmSi9WCKPcMg6UPoMQCFSVGQ9cf8AZa9wrpKwQOYVLAWRAHtMkYOocSPbvmJ7/or+5/\nTModmn/hK/4sBdA3GofOi5+fx8wHm9b5sYYx5Iljl2ci6DExzZjEFRgL6AhEFMjACORgosCMISNh\nlFb0BOZCouUkrSi0crhpH9S91vlJalFjc/ux+HkE8rPCJvYMuHvykMA8jwN5GPfjYxVcGpU5Tvg5\n16UTiEeE5bgXNn7uMTbJLDGq4xi9o6SiRLLbw/hBxhOY68TEhYmQxTRD1HwBmLsTmIdBEYxPS2uC\njsixACqUQB495qOj5esI5gFN73s8r7ChRhuDNAFhBrJMcHnxgKpacNs/TxOD/JJcFwilaYcDHz/9\nkFyvyMwc70CoAal31EVFZXJEu0MKz+ubZ8wX3+Vy9ggtAxv7FKn3SUYQ0A+S3bZFzysyU4KwbHYf\n8uTRX2M5/2Ve3P5/bNqPIXbMLkiWyd0AQuCDI2rBbbdDxudI9YCVzIgCuq6jaTaUZYUqDYvVJV7o\nFOsbInlWYa3j4mKJ1or9YYvRGc55pJEYk5FlBhGha1uKoqBtW2IExcDu1TP84TWuP/DwyTdp+hVB\neHzfk+sKrQzW9aByDrsdhRFoY+icpTTZMdrUO0dnLVUtKco5JktJjTrPsG2Hi6lwmemanTvgjaHp\nB7r2lsVc88nzP+bb3/wu7z1+wnfe/5f40Ud/jI+vMVrhXUQrjZ5Lglc09gaTV8hOUag52jVEn1HM\nS7LCEWVBvwHbHChnc6q8QoocLSva8DEChSNd2GQQKCXp7MCYpo3UGq27X+TX9h3g7wL/LfCffOGr\n/gwyy7Fh6Gzv/X2p5TMSy8jIBwe9g5AsTqPtUKQCp4hIIcbOTsZ2TxKIM/YWxXSRVxOgByCK0ban\n8MKdOVbcOLRBpZA65XHeo73Hjc08KoRjEVRGn2Z0nneAnuWKfx6g56PMkseeIvYjkPcUoUfF5FsX\nIWWBH8e2eY4adRq0MbJzO+2BYTwe96eJS2fWQ5na7mM418w5Fj2P8oqJxAwwAZGlhsV0dwCMjDxY\nSbSSOAG59kle0QE5yiwTmAslTnr519WaGL1G0OFtS2BA6jkiKrR2KN0lJ8dsjnppiH7NEBYcdoJi\nWaCxDD7wzz78PR5cvI0ROW1/h9KRR48eEzvJYTgQZcT2TQqLA+b1ima7TsUMUhEyWEPnIjYPEEFm\ngV33nHezv06RV7y1+hvsmlu6rklRlV4h4gwhKhB7FvMKUcwphiWVWpLJkhggyzKuHn6X9XqLyTL6\nvgcN+/2Bi8tLlBZstju8v0BrxWKxZLNZUxQl1gb6vmc2q2kPB7TWDEOPj4Ki0Gy2G+YXK7bdHXkx\nZ39omF9c0ux3uG7H9uYZeWmoqwXBB5bzir5r8MFxMb9MXbAotDJIqShLSVEkXT8SEGQgFCYvMVme\ndEU7cBEky/ljrmYVr7e3BAb+9OPv8ejxNykKxTy7YLl6wIvt96lUCWWNb8BaS6ZyhKnZ7F4z0xes\niiXX6m1uRotons9oulus64hOYvcNcVagi5pH9SWNL9h1L3DRkueavNA0+5bgJdb1SB3QJqLMz5lW\n9NO3/x74z4DFT33VnyM1Mbo3JJbP8ZbfA3J72iewPskrSoAXoI4jEkZmPpI7wTjoJpJklsBxgbgH\n4GY6VhonPVr6xMi1PrbJqzMny5Q9fl9iOcksZmwamhj6qdA5svMRyIvYJTAPA0Xs0MFzTLHynAb7\nTGsE8SSzpD3DtJ/APJ0LcfKRn4WJjUOdJ0Sc7nKi4KSPGxIbNyDygMiTSws/BpTZUVrJJHGQhCyN\noPMmoExA6YjSEakiUgnkFEk86eZfV828lA9Qekfv7/BOsB8ceZHGoK2bl7TuwDK/ZjZfsjvccrt7\nStctuTYPUvKYFzTDS/qDoUGg45y33v4WuIy7l7dsjUaUgkUueb1+islmKO3ItYaY0dmkMXvvsFay\nb9ZcXF2lPBI/8HL9Kd998hClch5f/hV+8vIfM7QdnYiIYIgxgZ+pAnmcMdwqCmVo+w7tA/XyET/+\n4Ed84+1vY0dfatvtMcZwffmYu81L6rqmaRryPMfonP1uj9KSQzNQ1zWvX78i+sDq4iKxc6W42T6j\nNoa7Q0MgUOgC6x3b3Rqja0LcU9YzXN8SvCPaSOcHqrrmdrtmSSCSYvZcDAyDh9DirMcOfbr7yPP0\nS5PnxDjgDg3BO4xM8o8uFHETGGxHZgr+4A9/hyePv8Oj+opfeuvX+NHHf8h8sSRGwdbdpQRGHTGy\nogsHOhwIwapaoOOMWM4oJNhg6Zs7XKvoVcNtcFxkMwpdczn7FbSZ8+ruewiZgraGAYiBEAVKO6yz\n5Cb/RX1l/23SAJY/An7zp73wv/6j0/FvvgW/+dZnLzCjPM00oit8Zp8YdUg1tnv7KYkv+NP+6IyR\np/Fwkz86SebxZJHmFMiuGFk6Yz0PmYYkiwTcTpwAXEuFVwrtNNo5tFZ4p9Baob3DeIX3bsw8UUh8\nYuDBYsII4mEE8TDaEMfns+O5EdjHc3lIurnyfiwscnSKHI+nx/7s/JtgP61Aauo7ykI+hYaRQsKk\nUCksbLJeqtSlrZRHKo/UHmXS3bM0HiUSUEvjj8xb6IAYQVvo5HoRMl1skWfduHJi5ccnPvtl6n8b\nht/+aV+34/YVZrNcI6WmaQdsGAhe4KwnywV9v+F2/4q+H8hzzd2u49B2gGC7q1jOlvTdn5JlFxAN\nMXqePHqH99/+FbyXfPLp/0Vre8y8RviWKAPb9lX64KVHhRRMpbKcwe8o1AJjAsFvcDKjzC6JYmDb\n7zBGU5oF17Nf5Xb9ESLmSCHQWYGMl5RZGqvmZMQ5i9E5RTHn9cvnICI//uAHPH7rXVRZ03UDv/xL\n38ZkCoFgPp8zDAN1PUMI2O62XF1fHtv89/s983pG33XIGPAhAW7b7lIWvMrT4IzDAN5hrSevF/jD\nmuXyAhcHijLj9vWWqwdXzF1g6C3GpKKn9+nbbYwmxEieF2NXZUgDIQ4blNKYTLFdr+kOlnlR8fDq\nHV69XmOHjugFIfN8/0e/z/Kv/h0eP36PX//uv8Wr1x/y5O13uMme8cmz79F2d9RljUCzO+x5VF+T\nG4PUBuoKP/QpvtYZ+rZhcXlFYzs+evpjTL5iNX9Ivag5bF8yhB37w4Gub1EiR+kMpQAhCf4X9pX+\nW6Qh5n8XKEjs/H8C/oM3X/hf/a37DOvziHqQacWxeMk4B3LqS5FRjDEr8UgMjwCuSGxwfH82LiNS\nbc4kSXeSeafcrWlyXLrF92MBzo0AP6UqqrMCok4hZtKl9nilPMp5tHMYm4A9KEWwY46LEgQn0oVE\ngZT+CMZZGDBh9JOH1J4/rVM8bTg2Bk0rXcE4gbY9W9O582HXcMaox/PnX4mpEz8TxxVMsmImx4kc\nZ3+OS2qc1FhhsNJghWFITniGmNGLtB9iNhouNS4aXBwTaKYxdHG8XxmzX2KUo6TD/Sv6m5v5zbSm\n7fDffM6L0vbV+bh88lLHkNwqfkjBQM3WE1Xgg6f/D29ff4vGbxicZQge0a25qr+B7Qyz4hFRbokU\nzGclVbliMbuiax11OeNgZuA6gjFIEWn6LSEMlApMZhG6xmQZ1+WM9WGDksmzXFUVUgXaww1ddSDP\nM9ZbwdX8XYzI6PufMK80dtii6vcp8wU2fozSaV6pioq+6zjs17z97rfZbnZ4b7l98ZyrB+8c42ar\nqqZtW7JsbBI6HJBSIYSgKAq895gsgW70nv1uw3yWE53HS7DtAedd0kZjl6INhgYfGowsE7PzghgC\nVxdXHPYtZVUjpaRpDgjvid6jlabpB+rS4L3HDz2D2lFUS2RRgUmdmsvVA17sP6bfbMmKJavlit1d\nk9IVfeSf/vH/yVuX7/Pk4Tf41tvfoipnCOl5+8mctr/hbutobYsUBUYPvGpvyWSZmEufdFcfHEor\nLq8ec7X8K1S24YOX3+eTpx9yXV2idaA0V+OUmA5iYkc+pHtrZWJKuPzFbP/luAD+NvCf8jlADuDL\nn/1v8IqUPS5HMJ+anc5Y9EnbThJIUKRsD8+opSRANxIyTsuMS/NTAP0sDkRMfxmMgV3JRy1tRMqA\nUgnItfLps1cKryTGySQtjCCOFTDZ7mR67wTmR1D3NoF6sOhg7wH6MUgrhJNnewLqc9b9BiOPR8Y9\nrjdvPaZzUycngphDyMS4Rl+9UXg9LY1TaVk5LjGCuUjO+IE8AfnklI/TGv06UeOiGv36p6z0BOij\nNn9cfJka50/dvjIwPxx6Vlc1F+qCjz+5QwlD1+/xUSLo2LVrNt0zel4z2D3EnIgjqB7vHZleMoSB\n1eIdEJGmbbi7W7Ned4BnNa8YhgwvAzpT3KzT7b4TAestgojuDfk8p6oyolf4QVJSkGUlm+45frjD\nB5DCYtSMWfmETVijdIazM3wIxKGklG/R5zf4YGltw0JVvPX4G2w2O7TIefHsKfOrR1RVRQiRfujY\nbjasVqvxXBg7QXOapqEsS5yz5EXB5uaOx48eYrTC9Qek8Bz2W4wQOG/RSmGqFX3fpIamoKmuZkQl\nqIqSoekI3lJVFTEEAoKLyytePnvBenugKjOyMRcmryp0USOEBikRMRIGm+4IrE/xuEGQq5wnjx5x\nK7a4xtJIRxYVv/N7/yv/5m/8+1zOL3lw9YCb169wYqCaX3Donyf5svcgA02wfGrXXLgZijRe7jA0\nqCLjav5NVvMLimHGJ7cfkxm42X+A0oDRqFgz7J9TlDUSMf42W3y06C/TsPMXs33hr54vfnYhKxlH\n4kluEZOHIqYiZYzIEXwnWUX7OA5FGHXjEYj1OTvns0CuwwnI5bQmQJ9ci9PnpkgukXvMPKCcR9kk\nO2jn8PbUeJOaYZJYP80lFSplw2QjK8/CgPFnkotPQyb0WUTtFGUrj52e8aiR3wP0s+Lxffnk7AOe\nAF1NNYXTuSh4g5UL/MjMvU6zPhMz10dm7iZWPgE69wHdxlMrlEOPQJ7WcXpRPDHzNwGdrzOYN7st\n33j7ffK84bA6sF4HYtTkOSgjyDKdXAsmYrSiLCRlMeP55sesygGtM+qqRkZFVS/58NN/ygcf/4B5\n8RAvDgRjx/Q+jSCyqGta5Qn0ON8RvaOPFbIzZKZkZ2/R4ZJDfyArNUJEdodPCE4S+x5RVUQrqKtv\nUFYzRIi8fP0UF3rKeomSOb08UOUrVCzonWU+m/PRT35ECBYjJM6nARDe97y+ecbFxYqyrPA+jY+b\n/jOHYSCGQK4MOM9uv6E2hueffsTFrEDFSN/u0TIn+HSfmecFwij80CGCJ8/KNACjLtgfHE3fEDzM\n53P2mx15WVA2OU3XMMtzFqtLqsWDcVi2QWiDPezTnUTb0gKHIXA4rDmEHVU9Qz6C1y874n7LQIbf\n7vhH/+Qf8q//jb9HlgtMoXnx4in1fIaNbzO8+CFR9ojCEnykHwRd1GQ+4pqOQ7OlWNZU8xlFPef1\n5gMymTHLVzTtAHJHXc+J3kOoka5iUVfsw45+sLgoMOovJc/8d8b1uZsvfjYzD2Jk5WOOeNIG4mgb\njMggkOEE6HEEclQcw1iSc0WImOSVCcg/R2qZWPkxHdefybVndkYA1OjNnpi5Ckh7xs6tJyifcs6d\nJE6DFizjhSA5SU5gbsn8kFawR0BPGrtLk4HGeFp1DuQjmItzIA+nFc+OPwPmk8wiT4+PLN2Pz2eC\nmI17MzFzeWLmSp3klpGZHwGd7IydH/tXE6BHfdy7qMfS79kEo3OZZZRajqldX1cwJwT6bqBeXrNc\nrjkcGoKV1EVNVgZc32IWmkwu2Q0OJTUhOPIyI6gNOp9h7YCQFmcHtoctt5ufUBYf8ejyXaIvaPo7\niBqBR0nBvMqIoiJEQT8cyMwB6StCHNC+5NDuObSOLm7IC8V69wlSVBR6xu3mY/xQUcwqtMjIdEZZ\nDPzk0w+4Wr7FvCp5OLsGm9HuNxx2B5b1iovlAqmylOp42PP0wx8jMkNmMkJMGvZms+by8oL5YkGM\nqeOxHyxd35LnOYfDHplrZnnF5u4V0vVUuQSpiVEiREArkLKgqJdE32GtRSgJRvHg8RNefvoxDy+v\nWW+3zKua/WGHEILlYkGuFFIIYhgQZEQZkWVFpjNC3+F9oN+vmeU5GoHxCQFCHlD5QDxIXGfxDn7w\nw+8xr694/53vMNBhYsA2kSq/YLV8xC7e0Q07Wh/QJpBphWhbRPSUucLHAaU8xAGpoGm22IuMUifn\nsgsNzgVkXOHbiNMiTcrxOd1BEvSX8QX+Yjdf/uwLih/BPBBPHvGRnok4AnoEFUSa6H7ORFU8piHK\nFI99H9AZmTln7PwcyOUJ0EepPG3x3CU3WunsSStXVqJHEA82DWlIs0XTXybsyMjHC4AUEePHoqZP\nQJ6AfQJ0mwDd+2Pm+ImZh2NTED7ek1HieaX47PieVDH9YFNNIpJuh/TEzCEYkdbE0CdmrhROq5PM\nogxOTqw8w54BeR9P8soR1Cd2zsTM9ZnM8llWHoM4XZx+ju2rY+aHT3j16oKsWmLMisVC04selSmE\nSF7h3vXkOieEktubLZeXFbkWVIXCe4sNB15tnrKoIiJmDK2n0IqHF9/gvbf+Jr/1u/+A7f4VXd8w\nrzzffOd9irLEx5rGHsgI+OHAfusZYknnWiIdftewpKaqJE13h0XQtT273YbaXeH6htViySpfcSt2\nvHj5guyddwlURG3Z92u0rpBaUZYrpJA8e/GU1eoxq6vHdENPPZsRfGB/2KW7kOjQWqKUYbtdH9l6\nUVc0r3bs+o5FmWJxZ1qw3x+4vLpKVjbrCMqi8wrnPTJCWRR0+z3EiB/g8dvvs1vfsFgu2W032O7A\nfFGxXq9ZXV8jshykBq1RKiO6QCgK1MWKbL0mCwPPPn3KoW+JTtP1e1QuyauM8KqltZGhSemTf/Qn\nv0ug53q1oqhmxBjpO9BujrQdtg807Y68DFhnUVJgo0dlis42tMMdVXlNVcx5dH1F12+QRYnSirZv\nkTrHRE/TDygTMcbg+wO2ldjwc9Kbv4DtSzFz4ji0fGTmo3VlKvxNUog6B3EXj3ZkKQVSRtQosxwb\nE6f9GyB+XOKMmfuTzHIE9LEAKlWSWqRKmSZSeZSSBCvR2iW5x4kkzyhSQXJ678joxQTmI5Abb8nc\ndDwy8tGrrqaBE/6zQH5k53AC6zf29yJ5piLnZN9543VRCqI5rQTk4oyZS7zS9wugZ4XQ4Z7McgLw\nc0Y+FUE/C+RqBHL5Wb3868rMV4sl0W+w3Yq6eAyzDdJ07Owd9fw9Xtx9QNt1qEyitMYGz7Y9sCpy\nhq5GZpHDoWEdN2gxpy4uIJb0Xc/l4jt4a3n/0V/le8MfYHsH0WCHwGpV0Q9bqiIipUMXEPycw11D\nDAq0pW0Ds+ISZzuyLNJ3A9Z7ml3D0Djuqg7CI967+C4LmbHub9g3N+iH76NDhRUHijwDZXj+4iMW\n84dcrJ7QdDv86+fk5QyZmTTENXqEMHRdhzEmOU0ixBipqpJmd0BEhzHw+uaG6B3b/Ya8qHBD6tCs\n6lT2SvG5BucCu+2O1eqSED32sKfbOLJqRrtbs1xd8Kq3fPr8ObOi4G5zx1WWo4ocoTMQMt34r9fE\noUTgmBcV77/zhJfblrbviJSg1sznOcuLjhcvn+JaTfRw9WjJxy9+QF39GnleUFczumZLDFn6MlsD\noUp3W9riBmhcj84EWkba/o7bzU/IRMVqmdE4UFITsYQoETGglERnkRAHkIYYcqAjK35h1sQvvX2h\nZn7WyOdHME8WxDNAH/XyECLKR6KPaD+6TtQpXM/LiJJp7sER0N8A9c8DdCnGAqi4R15PYDJWXMWo\nlScgDygXiNanmaL2FEA1FUuTA2Z8rfIom3J1jE9Dk40fpZUzMDcunVPeJeY/AroY42vFBOTu9Lkd\nhx6fuT3fPBdhnNrzxrnpdRKiIY3IOwPyoBXBJFbuz5n5OYjLk8zSk9OTp77WaLAxw8U3Qf1MN4/n\nurk4snO+7pr5fFnjVMSzx/UzZuWcduiZVddoWZKbCzaHj1CF5WIxp+tLDs2O7ToiigGpB1CS3nZs\ndq+os5qyKNhuO37y8R/z9uNfxfkDVRkJUaeurD7QbFtQAygFwqIyw9tPvsXt7gcIu2foBUZF+m6P\nESArjcl7mn1OaTRlFtkF2Oy23LhPIAqC8MTQcbt5yturbzFbrRheHWh3z3hw/YSm7ZCho55VCJEx\n9APLqyvy3IwDduWYbW6IMZJlOev1gbIo8EODlgG7O1DmOW3bQJanDk87UJQ5EUOIAeeHBHbCMFvM\n2e/3aBWZry4Y7IDqDgzW4dYbLlcrrGvp2uSLt4PDh4AaBihnCJOBGwiHNZQ1+fwS9eol71+vGDYH\nPhkCuCUX13OiV7y8W3P7iWO5uuB6eYUqLC9ePuNi9pihGchVZBNbvIhkeU4dJXnmKVSNWvRIWdG6\nHUpluKFl3X1CUSwwBSz0Q1Se44aB7faGSI/WERUcJk9zQoW05HNDVQxf1Vf6uIUv4WYJhATkMa3J\nRD45OOTIxpUbG2T0CJrqJK8cm4POgHwCcXUO5OduFnHGyjlbI5jcsyaOkomygaB8cq1YQRyLnceu\ny/G16QIQUSOoSyL6COIuWRp92ut7+yTjnLLH4zF7nCmBEN74B/NGxvj545PHM8r774mCNPhaC8Jx\njRKLuV8A9WfM/LwIasUp83EYAdydFT/vySz39HJ1rwB6zs6/1pp50IbLy0vW2zWD33M5v6YXIWWQ\naGialsFKur5nuTB880nN+k7TbCNd35CjwQlmxQPyosJkkbIq6RrBH/3JH/L0xceozFIYycUiMPSB\nu+4wXuR78ipQVUvmy2+Qm4Jf/tZf4//+/u8gMGgNWe6RcokyAUEPoqcsCx4Uirm6xIaMzWFDGzy5\nSjasZ+sPqOpLSir6/jWPr9+i63psvyfPHvDpJ0+ZzZZcPnwHO/R478mKFGnroyMEjxCMw54LvPPk\nUrPreoQfGG5ukMWMbtdRz2ZEBM4l1l6WBdZ2JINvoG0ixhRI4dlut1S5UQ6qxQAAIABJREFUSXKM\n9wyuQ0aPVjmzOkeEHmJEmzK1L8cISkK1QpgS4XsiETW/ZHvzjCo3yAYWi3dZFSvMo8jt/hW12vPo\n6j3KcsEw9HT2BX/64R9wMbtCSI8QnqIwWNehTQB6HA1VUeEHT+8FyqSh3pkC166RpUGpGikLSnNJ\nqHdYtyEqB7KnCQFcYFHMEMpTVvuv6it93L6UmyUmrT/GMN77nyQW4UMC8zEPW4zukKBEyj+T6b9n\nirxVZ0szNgBN+xHEJ/ydHHufy8oj962JbzJzlwB9siDeK3aOEbNKnzR2QbwP3KNH/c1zaloTmJ/F\n1t5j5vL+im8+huMUjs8898Zx1AnQoxYEdQbo94qf+uhosedA/kbx0x0dLOYeGz/KLGMPbHiz+Hnm\nZDnWAX6O7SsD86IsQHpaN7Bbr5lXcywD1jp8hN4HgjN0hz1u1VBncy7qGTWSRjgG6+jajln1kEIq\nMiN4+/oBcx35+ONP+PjjVyyvJFeXhmWVUWUZTSPYtAMyOoYoyfOcl68+4fHVY2DHvFripEPlO4qS\nVOjxKg09UA4bPZGSZf2A3Fyx3zxl/fIjqloDASFatvtPyKv3Ka/nrPc7hAgcWksIOy4uVkgM+80d\ni9US2w8UeUWWGYa9TBkzaGZVjXAOa3uMEbhuT6FhP3RcVDXMlkgfUTpNJRr6njzL0KpASmi7FqUc\ny8WCzeaOMtc0hw5tclRRIgfJertJHmWVUVQ1OtNEoUAbhI8EUyBEiu0LzZ4QHLWSLC8f89HTp7go\n6V0Aryjzax7MZ2TfduD68W4j4MOAkiUvbp8To6Wuc/reMfQdQcQUQewadKcJQlNkj8jLgA89wvU4\nr7jZ3jGrK1aZJsYeZSDLHnDYPUfojNBE6rxCU+Cco3f9V/WVPm5fSjOPSWAJIUlqMUTwYcwdkQnQ\nJ5ufiUgnCDr5zJPfnKNHXU5gzn1AP2fjxz0niUXGxMjFsUBIAvM3mLm0HqVOud5o7rPxiYkrhbKp\nMU8rlyJwnT+CuLq3/+w55fyxQekYWTtFyjL9UG+sKZ4Wjvr4ka1P1sSz10/BWWl828jMx1zzpJVP\nBdCkmVuZCqAnMM8YONPMY34C7mMmZALxyWd+9Jpzzs6TzMJYAJ06U3+e7WeBeUGyYOWkfoR/APx9\n4BL4X4D3gA+BfxdYj+/5+8B/SCrZ/EfA//55f3CJZ5EtOeSSTvwYjUTbFPUahGRmNAcryILBNQUy\nW6Cw1CaNXOuySDeAkiXffOdv8uzlHzKba5azJavqAb//z36f/W5gXknUsiBEiZISYwKDVwxuzs1d\nj1QD3v8zgpUsMkWnDSbTGB2QpmBwLdY5TNly2AnWzvIwOEoTiLpABUUmC5RQoDP2+5csy29weXHB\n7fopCPCxoygv2GxuKYoLrh5cIaVktVqx3mzHZiFJVdUM1lLmhqKeoXqZbtqMYbt9yeLiAU3bs1zO\ncENHJjPyvEaInr7vyYxh3/VoJSnyilevXqK1phk887LE+p7DrmFoDkRn6ds9ZV0QKXn06FcQWuN7\ni1zNiHG8ny8LVJzR3Kx5vn3BcOhY1As+Xu/xvcd7T99FLq7eo7vd0cU7XLBYBsraQ2yQYsmr29e4\nCCE42tajswKhIm4AtGGwjoiiMEtUFml2W3Z9w751aNPQNM+RSpBnj9FiSSYLilmgcy8YBkVWKGL0\ndLvs5/qF+IvYvgwzn+oi0ZOA/Jg5EsEFpJNEFxCW0VsOUo0+c5nS/FLudkxgzhlYcyatxFPjkQpn\nZHUCcsb9xMqPmnli5tGJBOTOo86KnW/q5MmLrnDOj0PJ07BoZf0RvI9e9fFYnx1PQK7syMzHQRJM\n0baMoDxqSTHxp6M7BcSJnZ9LLON77r1XjUCuTkCe1omVp6VTIVSe2PkwNQ2Jc2Y+AvlRG3+z+1O/\noZe/KbPwl6KZd8C/ATTja/8R8Buktub/A/jvgP8c+C/G9SvAvzfu3wZ+C/glPucGYpYJilInW102\no206urZHectqcYVfXtCGAyE4ZCjwtsTFiB8sxsg0FzOPzMuSYAM2RITbslo+pjbXXD2/YrP7lL4V\n3Lz0rGYlBYJOHijIiTGj6yJW7Ng2P6BUJbkoyc0lgpLoI33Ypnb+4oIQl/jVlsPO8mz9Q2zwSeNU\ninl1iRSWXu0QZsbOvSIbrokZdOsdi8U16+0NRpe4YFEqgferl8+pF/PEzHtH3yXbXSYVmZJAoGsb\njCm4WF4SvCPPMvp+QGuJD47e9mRVYrxt31AUGc4KttsddZ2idZvDAREl3kdm8xmbfqD3HlUUDIOj\nrjRDdyB/8ja0DdFG5EWNb/eoTYdvW/LFgkcx8jpumLmMKn9O3zWsu+c414MOSFIU797fQZjhA1xe\nzPFKMrMGZz2h6ym9wMdAVBatJV2/Y73v0OKKB5eeGBx97+lCaqQymcEJi47XXM+/y+XsCf2w5+NP\n/znXJfTmDq0Mfe+5O3w9CqAxCghh7F6MKVPeyRQX7CW4gLBizNAWRxBKTDP5zCcGKsWJbR/BnLMO\n0jMQP57nDWZ+tqZY2cmZElVAOTGC+Pi8jWP+SJJgvAt4OwL52OIvSBeEBODhCOTSnjUhuZH5u6TN\nSzuOejsOk+DEzA33PeVHX/kI5GdgGM9+4KjSe+NYGY56lFomUJcJ1L0cGbo895iffOaf0cvJ6Mnw\nUROiGkFdncD7+HgC8nOpZbIl8pdaAG3GfZY+Gu5IYP63x/P/I/DbJDD/d4D/mfTxfwj8CPjXgN/7\n7B8babY3SV8zsG/2fPrsObO6Qhc5QUSMypEqo+l3GLOkyGasb27xLiNqxeAzPn39AbPqIc3hgNQN\n+35NaB3vPXrIM2Xpmg1t03KRX5LnCuN22NjhrWZWXyPlgkP/HBkHJA4f75DM6A4hNfssS7wPlGXJ\nkwcZa9XQ7RSbdmCwA7406DwjEwFnW2Iuef36E1aX1/iokg7tA4vZBa9vPuXb3/41mrajdA6l0/AJ\nEGRZRt8dqIoSES1uaMi0wEvIM01rA+2hpagyhJC0bQsmTSey1lIVJbYLqYO0mKNUTte1WGtZzOc8\ne/oT5vM529ue1XJO0x7QOkNlmiAFfXeg6IaUlKqBoQUbiFIgs4K267jZ31KZgv3QUGc9+/ApN+uI\n0A5l53TrOXd3A8PQEsKGuloyFJqstJjcMQwDuVI406OMJ6iIjo62gcHmvPPWr3CxXPD09R+xG16R\nVwV5JjHK4JynLBdc1G/xcPUOP/jgT+htg9ANIlikyqjqjNp/ccz4X9b2Zdr5iWFsehFjiycJwCcg\nNyL16ZswTnZnpN5xBN0E6MgzMBf3gXoaUzkBupiOJxAf/4hpXKgY63DTSDQ5ajNTQ5BQEX+UX2QC\nZSkJyo85LRLv5JjVkoaFq1F3VzaBeALyEbSPYH7/NWKSWcYccizpH+9IKHTuOT+TU+6dE3wWyE16\nfwLzNHgiqDSUwks57tUx6veolUuNO89mESeP+RDzcXz1Wds++ljwPBU/p3yWzwH0OF3cf77v3ZcB\ncwn8IfA+8D8A3wMeAS/G51+MjwGecB+4PyEx9M9sfci4ffEMWVSs5ku2W7jbBfbNGit75osSACE1\nh6Zn137Ku0++yeL6CXebO7zL2G0tmo5nr/45TrbsW4v3T2m2mkIXXF885E5GBt/goiV6j4xplNUQ\nHHleUWVLZsWM9fYH9LEnF5rgW4RQ7Pc9pY6sLpb09o6siNSzBfN8wXY3cPBbyAMHt6EfOnrjkOI5\nTtVs+htW5ZKqWhEj3L56waMH73FoOh4+eYc8z1AyY14vcPaA7V1K09QOURRkmeBwt0YLSbO7SZNR\nGJAYqqIkN5rgEkAKmybGOOsxusLagdl8hm8sXdvgneXB9Vvs9ju0gPXrGw6HPVpL6nqGUgVFuUAE\nSxQZQpWEdo+qF9BKmC2oixllu+fZ3TPa0KGyA4ftU9qNRbCiEILbu47bO5eiPZHYvqHMM0LcEMJr\n0DP6TiBFgRM9oXfsfUmQguvlu3znm7/Ot957h2+99et88PQf8/Ll7+JDRh9y5vVjDts1ebni4fIh\nN7Mr/vTDHbraI3WL8w6LIC/+RfCZ/2yZRYxALnw4RbpaEC6AkQhDyswePYdCxRFQk0AuRqQWkzvl\nDMgnfBuzu46gLsKZJTGO7wtnbxCjzOJSQTPFyobRP34qdgYXCU4i3ShPOIFy/hi4FUY/uiAegVra\n8+Mko3zRc9PzYsojn8D8vDnoqIuPdyvTc9MHPBU9J0BPYanpYjAx83FIcxiXlxIvErB7ObFyhRMT\nMz9j52de81TcnABdnh2PQH4WsHXeAXqvnf/NOII/x/ZlwDwA/wqwBP4hSXY5337WDcLnPrfH0amS\nXDjeWr6HwaH0/0uwsL7t08DeHGaUtCy42W5oLw5cLB6gVc2zzUdoWTOLERfWRHIEBTZKtu2GmCvm\n1SOGmUfaLYiMKBQ2aiQDRBi6jkIbjJqjs0vwO7S6AAkhNiiZo6Km30p6JF27pS5TaqLJA7WJWNfT\nWWj6W3Q2UBlPNA27/TOuZpdoNadtbtFG0bQtDx9fMrQtdZGTZYLB9tRVhXSOpusJUVIVK+5u9zS7\nWy4XD1KQlsrIzIzNZo11A2VRgRAE78nyHDuknyk1gSg2mw2zqk6Z6Pstg5XkWcbt7R4ZLcbkeN+j\njQEBQafPh7wAo5H5JX6zRuoIXZta6JVAmoznr76H1hnsKz55+pSL+QVmoUAOKB0wusIojQ+e5jDQ\nDluc8LjYEJVB6ZyrskIVc6yTtH1GZMuL1z/g+vIKo9Odh3SCzMCqfA+yOaKwvN7/hO88/iYPry6Y\nV4KejBgt+8Oeg3MI8wsPZ/kQ2JJu+C3pzvPe9mWYuQgiTaiZrIejlzzZ/ZLEIoxAGoHQZ2vs/ry3\nuA/qIp5AXUDK8xdnr4mnx8dsFn96gxirp0KFNPzmWOwUSBsJKoyac5pqH5UkqHHvRMpqUeNUoxGs\nxTCx7RPrlkM47u8/F5DD2ai3gfQDfVHLvv7i5+4x8wxiPjYLCUEQiZUHIcf9yMpFAnAvNU6cWRLl\nSS/v0ygNevJx2IU67Sfw5rxR6L7MEqIgBvmV+cw3wP8G/HUSG///uXuTGNu2NL/rt7rdni66G3Hv\nffc1mVmZWVWmqrKMoYxBWMiFhBgwMANmSDCzkBCMGCGBEIgJgqEFkpEoQCohYUqWYYDBZVSm7HI1\nWeVMsnn9e7eJG91pd7c6BnufiBPx4r53M1+lH8WSltbaa69z7om4J37nO9/6vu9/ArwAHtLXeQZ4\nCjzZecwbw9pn2u/+9rskaoSzLfzKJcleQlka2rVHiYiKOYlq0T7ycHpEnh4zX33EowfvcDA64cXm\nPZJUMM5nvdJ4EsnzhNXmghAFNkCUmkxmiMTRuBbT5YyyY5abT4g0rJuKxeaKB5MTcIYim6HFCOs9\nRTZiIT9g3SyZpRmVF7SbDa6zpNJiveDxg28QY+DFiw+RoaStGpSOZCXIrCHqwNXFh8hBJuDg4Ajn\nLEeHM85Pn3H81tcJqyXYhjwxGNVXLXzx/GOmZc5GB4SKKJ3SWUeaakIo+vTuEDBpilAS7y1S9vBc\nLtbMygItE5wPlKNxLxPXebQS7E8K1hvL/PI5ZZJQb5aUZYmUQ0UiAaQJgRxVtAgbiImmW17y7PwZ\nFR7rExKl0MxYV59wPB2jScl1xkLVSC1JEkMpJwThqDpNHSNCWQ7KCb/6+C0eHBScb1o+Ottwsb5k\nM3/Kcn2Jc5dE1fDDT/9vDnLFYWKw7oKynKCzlh/88H/n8fgxXbUgKknnWp6/W/Hxuxe9xSN/5un8\nkb6e+eWrNryOZS4Ha1xabkBuYw9x20NcDgCXulekkYMAsLzOAr0VUn1/3z3kFDv775n3L4xr5R7p\nINrB1eL61xZVQG5l0dxu2dtBmGHwQ0fbP+GN/uYW6sP1nVG+Yp2uf47d84ZbtVY011UTtz7zOPwc\ntyxzA9GIHuhm4OcA9H4cLPJBjMMPQL/pt2PMd+PMA4Olzdbq3oH2Z+7Je/3m/yRCEw/pPVVzIAd+\nHfiPgN8C/k3gPx/Gvzns/y3gfwD+C3r3ys8B//C+J/61X39Ero8RIeEXv/7P8Yfv/T6TfUubeJpK\n4WzEdQmkitloTJpnLNqWzeaK2f4jvnn8F/lR9zskymHyEW4oLB98SggdbbdCesj0BNs1uLYhSfsC\nTdPRm1yuzoi0VJuGNgssqhUn2SGj/IDLzUcYccI7j77Dxfw9lpxTN45N0yGiovJnEHPEXkGepXzz\n8YQfvv8POJ97Vp3AKA95xdKeMS6PoPPsTw6o6oo3nrzJcrlACMN6M6eQkOqE6DvaumJU7veFraoO\nHVPqpkYIyXQ6wTlLCJYYIsFZ1s2GPOstg8lkRlaOcCEyXyw42H/ApqmJAkxacrZ4zsH+HqurFttu\nmM326Jp1ry8aAtJ3QCDmY0JQKAkiyYltAzqQjsYc75/w8vwF0/KbTNKG6ahGqYSqWULsCNoyznJM\n0mt3plGTpjPybEyc/wljAj+XTzgZZajgaDYVl8sL1leX4CymENT+Oc2mReC5apbIRPNgClJVCCq0\nsPzxD34XlaaMJm8RVjmP30z52uN9bIjUquKP/t7Fl/ur+OImPu/m64QmxgGW2C00I9LKa6kz2cVr\noF/3bQao7FVqtn7y7YvZAnkLcuLOnCFiZQf+d+fADcy3hbNURA2JQlFxozpvtweyW4gP10M9862e\n5o2gMtBtIU0v4bYz7yXdIqLbubcF+dYyH17fDcjFjRDFnRK423rm1y6WLcgHV0sUW2m4HuSB3kL3\nQvbFscTQ2bpX+mSg2yVw+x6voS2IW2izC+zb8eXb9dsZoOJnbpk/pD/g3P4K/zvg79Crrfwm8G9z\nE5oIvdDtbw6jA/4ar3iJy/mcThvyZJ+3H/055o3l2Yv/k4nK2BhF2zmiHPeHb1VFPpnQxJK23VA3\nHbGFIp3hg0d4R+MDaSYpTM5GtoNYQYWRhthZMhmx9pSs6P3YuVF8dHWGSXJiTKkaS2MjM9mhpEOr\niFIFJyff5OXZ9xn5hroCXU7xcsPqasnHT9/n57/+y6TGMs1ynte9f2x2oEkySbARLTXB9jqeR4fH\nff3wuqLIJ0yKks3yis1yQessRaJxdYUWjmpZ96FowMVyzqE+xncdTV1DCBitKEZjog8kqUJoSWdr\njJKMD4+x1iGAelMRI+RZweWLM3QmadsaQSQd7zHem/VVIMtRfxAnFCrNYLEhiI44mSHmc7pqjc5K\nCpkTXYvKUtLUkKqE86tTlBKkuaHII8QEKTPWQZDne+RdzpP9b1L6p2zWHd9/70MaL3h/PmfeLhgZ\nxYN9Q3EQ6NoldWuJXUKQFZerlsO9iBEeIwJIw8fPf8zDh99iks7ALZFdRDlFK2rq8DNPnYj0UVoe\n+OvAf313w+sU2mLHtRJ3ZM+2IBdGIg1I3ecTSA1K9WGCUvVJQ1IK1BAjfvfTRez+1d13/xXXQvTh\nj9vQQ7bQtgxuC3ETsz3oXt6dX8dzR3qpti3I70i5iXaYtzv32gHgwz3RDfPt5+OWRLshiv5+n/l1\nNIsWvWWeAGk/RkQvOS16UWdPD/JbQndCX2dy3rLIbxXaSvsPhQHk947xVeM/2dosfwL86j3rl8Bf\necVj/tOhf24TYURVrSnkjOlkitEZMSpSlZHmGXJvRusdPioaAWF5RWMbsrLk7PIco0py+YBl9wGd\nEyAkra3J0xmPxxNO64/ZNFfgJ8hEDXWcDa07ZSy+xRtv/AoX9e+zbGuQksw84KOnn5AISVlkLFY/\nIM++TsGUNw/+PJ+e/x8cpTOmkydcdB9h/ZJPP3kPQ+TxoyOidCQyp7Ub3Hqf0DqEmjNvSzJSpsWI\nqmtx8yuatiXPIovFiv3pjKcfv8v+wRGIQNNumBQl0BfVUkpx/OBxH+kyHtFYR7AdKkvwMZKXJTEq\nnHPYuo8xr7uGRCcszs8waITWZEVG1IJqU6GTFKENWZYSupb08CEmK+hC6P8fbEDsHSKa3gUkHjzm\n6o8/5ntP3+P7n37E++cv+cW33mLTLjFZSxSWrgEaj3OCoggIClLTsuo+YWweQfB4qbioW16erxFJ\njkOgpEBryWgcEcazWWxYLgKtN4Rg0C7l2YsfYo4WSNfgaonrPBcXH1OlNev6JcdGoLqOCkciH3zR\nW+/Ltr8EPAeO6MNzfwD8X7sb/vp/cnY9/86/OOJX//JnI2yi6hNxGNLkGeZIMQj9+j4bVzGICu/c\nl/TuFtlnhN4LgnjPZXz19XW7T2ptKwShd+b2xrd+d34d/xi5o8V5z7wDOkHcuY6d6AG+3TvA/NrK\n1tvoFHFdhGzX1bL92fqYc3EdphgHH7pXQ1naXpkUdz3eneub9Pyob1L2g76pXe7NIAIzaIgOY9wZ\nCVtVIXFd8vY68TfEAebxFTD/u3xOteVb7avTAE1KVl2DE4KLxUsu5h9TVQGZCMblDGkCETBIREyp\nu47zy5dM5Zvsl5oskSh9zNX8E3LhsGlk1S2Y5GOOHn+Ni/dOuZh/jNGPKEcRlXmaCmJXc7H+lP3y\nTQ4nh7TrHxBCx2R8QN0942y+pHEW7xrOzv8Bb7/9z5OrPd54+1/ie9/9bY7LnELOaLvn0ETOz89w\n5hKhV4yPBHppECKjqQI+P2PEGG0e0TYdLjQkaU6ejUjLbFBiD9jOkxcFAk+a5bjoSTND10WKsqSR\nDiH6r2VZVhCzkhAsTggWG4vSFh0lWIsoC8ZpgYuOMslZzJ9RlnvEmJFkIxKTgBBY14GQlEUJIeDq\nBn1wDLF/c4noIRsjhILNJcff+EV+/4Mf8sEnP2YZrviTDy7oOocPHlNAWY6IrUZphdEG4RVW1SRx\nztJGRBDUomAeVtROYEzER49JBDqFpRSEdcP5C89i0aK1JCkLOtHQVA1X86fkakpiM6LvaOoFy/WC\nIpN4YbF5xLaWNP2ZH4A+H8Yz4H+mPwC9BfN/499759YDNsvPPonaBOQmoIYu64CqA6rtu+z6BBrl\nbup8q0EsWQm/owAUbkPgVfMvut7Otxbt3X1bn+5dyNudx4g7jwncA+7PWbsrCber58lg/W/B7W6+\nDWwFMaLtO90Ae0v/IWGGbxTDB4GLmjUjNpRsYklFSU1BHQtqcpqY0ZINwhPpDbiHLM8+9HBwrThB\nXENcAxXEKhJriDV9lk4biV3sP6A6rt1UDDrGN4efuw6x3fYXh75t//E9e/r2lcG863pfsHOWl5ef\nMr98jjEZVfT4eEUua+oGRLJHrkpE9LS+wvsFHkETWkTImU7eRtSfIrOGTXSsu3O+dvIXeHLy5/jB\nj3+nT0CpLGLsEWmLpQ9/fPf0HyPECqMNjVuQmYLJZMTV1YKq9qSJo3Pwgw/+Ad/59r9CqmfsPfkF\n2uAwMaOrFXYT2RtnaDullRUi69DCU2bHTFTB0n2fzrbUfkOuDEpqjDGkWYoxGav2iufPz5jNZiQm\nQUjoPGgpcU2Hj5HNpiGGDqkUIUZcjBR5f2AptKZuHa5pcEQ8CaFr8V2fRWol7B8+Qpu8D1aQAtt6\n6mpNlmXYtiWMRv3hp/cIG0AZpJnh5+9jpg9w6QiuLqgufszR/iOiVAivWXYVwc54dPAdsgzKIsFu\nCpI0oV1/wqpacjH3eNeQlc+ZFN+gqy2rzRmb2mO8ZTwtKHJBjDUQaduOzbrXMjWZIBIQJFzOl8RQ\nMhm11IDWAmfBOYuTgkpELH10RT5a/SzftgW93bkCSuBfpj9DutWqVfaFTySrgKriLZDLa5DHa5BL\nF66l1HqYRyQBJcM10F8L5q873w2D2b1/Vwximw16d//2Mbsw/zzr/D5Nz/uEmRlcKX4H6tdj7waK\nw/NEzQ3I9TCqwW+uwEbdQzwWVPRjD/OcJua0ZLQxvdH1vC6mtavlqXp3iRPEDQPItxCP0EBs4843\njOFDx9IDfVds40/BXw5fIczXizOE2cPHyPnFJcTAZHTMYnNBjAvm1RzZHhISiYsN0WScHJ5QhxVK\njbCxI4TIXvmIS1sj4zk6JtTtkpYl40mCzANY2FQerRJIA6nyiCTj4uoZkzwDKXurVzQ8PH6T4z34\n4Y//HwwpdWNJC8l7n/4ej/d+iVxGHh+9xenFOdEqbNdytVgy289wDkLqcMKi8kDrW7CeqANCeKrK\nY5IM7TrOzxqaeoNSgtA1pNMDlqsVaZoyGo3xrkOlJTI4TGogQNM4jNZMxns4BDorECiEq5EGrs5O\nKYqE0ASE8lgFSZINWeIBKTXOOS6vTgm1pVaeSTlCB0ehE4rxBFJDJEB9iRof4us5Ki/gwQkXzz7k\nvFpR+5aoUoiOLBljZI5SBo0hHU1wNrCfn1CtrvBtg809RrRYv6SqDetNzbpqKETKfqZJ8hRaT1XX\neCdItYDSoHOD8JJ2bVmvHND2OqdtgDjBhN6na5ctp6q3xMbHkmz0JUMCPr8d01vj0P/t/PfcU67i\ntWBexx7iVUDWsYd5E29g3g0g93EQbOhFyCUBJXoVHykjSofPulninfFV81dZ5p8H513Qqp29XwTz\nu1B/Vb+j73l9uAm3XSpba/x6FNfWOWYY9Q7Qd9L6XdRUsaSK+c1ID/ImZrRxC/MbSbi79cnjEKUS\nnYDNAPKKHup1JDY9yGPLjQvJbj+A4u2f7U/pLfuVwbxeVaiihLHg/OoTYoSD8SOsW+O4JPU5ra3B\nSnRR0IUVWk8ZSYl3LXle4qWC0HE+n5Pmjqg9gsDTi++Ryn2SFLxviDaj2nhEq0kSgykK8rRBSUFr\na0ReIZVGmZzcJLz11gnPPr3AItnL9khj5Ecf/y7H6SH7s7/EfF3hkhZlBJ1bM18IVBEIDtJyj8p+\nwqoRpFlCYVY4NSH6iOgEzVqQ5mNs11G7jrJM2TQ1eZ4RQhxk5BIgslouiFJjNEySAqShrhsylSCS\nhBgkSjsWiyWXyzWj8pjOtygp0UGilLgOO1sul0ilaKqGXOfkY42KhfllAAAgAElEQVSSCh8D3nuq\nasmk2Qdjib5CCIXIR7hmidxYvEx4cXlGkZQ8mz8nxo6WyFqm1M8ss3GCljnT0QSiQ4qWzIAIjuha\nWrGmsyUAvrV0RtB1ljyVtFVL5RzBRhK1R5IYatvgg2C16FivYTzNaBtPu+4l8aaZRKk1LxaCy2Vg\n77FknKb4pvlZvm0/oM+5+Ny2WeZf+ESyicgmoOp4PZd1RDUR2W4TaYYMSh93YB57EW8ZUSogVfx8\nOL9qfNW9z3OZbLvjJjyQO/t2ob/1mb8K2q8Ddb/zXLfgzQ28r61x8VmAq+HezuiC7q3xmFOHvB+H\n3sScJtwGuo0JNtwAPURFGGLEoxPX7pVrkNeDZT4AnTYOrzMOP1cEHwd1oeH/70sW2YKvEOYyV/ho\nuTi/xOiCxp1iZI5UgizJ8S4SbWBTdUhdoVWDFBJfGVrnQKzxHjZ2wbI+I+88SRbwUbDaXNCqDiEc\nytRIk2AliOChSUCsIWo6YWilYHX1jPEoY/7RikezE9548JDZ7AHf/dGf4F1gPD5iefEhlVyzrlfM\nV8/Z39eIg/5TuZgc07VX2I0lK2eMi5xnq4/YbDKEOEOokpE5xLYbqtWayQy0iUynM5IkoShHCNGL\nbdgAyD7NX5iE/os1tN6TpwnKBtAJSVJQ1xYfJKv1kjQ1nF9dkmeGNJP4GHvZON9HqAgh+eT9H7C3\n/4BEScajETrJMBKCkuikIFhLePEh6uQhoWqR2QlSdzi34YPnH9OGDRVzGntJtAVKtSgCbbPkRd2R\nFZpqbThIS7wO6CSy3nRoo9HRo6Um0wWZqRCyj/AxShG9oFpFmgr2S0lOwtp3hCDRWpGkKQRPXXua\nJiMxgTLvqNuOpgWpLTrTeN/R1l99oa3XsszbAeK7YxtRbR9zfZ0Z6eM1zGWMqDjAfAC61JFblvmX\nHXct821s911Ib4F/93hiu28XwHfB7b5g7S7Ed9wsuyLO91rnFtDi2s1yHe3S3VjmPcxV7x8PWQ/w\nkFPHjDrsWOdhAHkYLPNgeqs8DJmdoY8lx4ke5PXgL6/o50289pcz9DiUJ4gDzHs5vAhbtakv6Wv5\nymCeTkZon7E3OkFJzcv5KVkyIk9ybNeiU01UKy7mn1D5C/ZHe71F1wIxoFcdgYY8kwgV6byDLkEK\nwXrpmU4D0/GUZtWii4Q6BIRwvTi0kaxWNVompNqQmSfEdp95dcX76/d449G3+PnHDzi7eEH0kSQt\nKGd7+NhxcfUUFwJppsgyT7I/YZbOWF45onOwNlidM9Hf5tPLDzBZpCxaihhomwYpDN5XmKSAqPAO\nFstLtDLM9h7QejBC4VzAJCkoSVQJWguEytCpRKgMoRO0kXi/pKpaCK7/e3OOLFFsmjVSlMxGI9br\nFW3TIoDESNJiDIkmm8xIEsP06IS8mBClRhpBPH+KevsbxOYSmY4xLBDKUVUX7BcPqMoXzBeCJMlQ\naOo6p20tSoMcl1RR9oW0kITWYFeGRKXY0CFwpNqQTwS5UQhSZKLIUsl6vqLVkUQIREwIoSPN0t5V\n5ANNHbGdQBnHJrbM144qeiZ7KZmxtC209jXCAn/GbfMaMFddRAzwFt0QVz4AXW6vh7T6rbCzjLG3\nzBlAPlRSfG2Yv86eL3KzbIF+357d+3dhvhMF85n53T33AZ3+ueOOm+f2QSi9e2V7KHodJ39joW9D\nJn1Q1CGnDekA8Iwm3PQ2ZnQhpQsJXRj85UHjgsaHHcs8DD7z4bCzh/rgYmnCDtDDUGMmXFvlfbnj\nHuQ3WUN/RmFeZhOilxgDWifUc0t2BEqVLNYLEtvQdh0mFwTR0YgWosCLyHrlac4uEAh05oiqj4ow\n5Dg6LhctIW44nJYcHpVk5msUacmHL75HmSWExtJuGoIL5EWB6RRZ9iaT9IQPF1e4LsUYw8nhEefr\nBSo1JKUhxEiMniwU4KcItxwK3XdMUkmZnPDp2YKmrijKI/YnJ4SmRmayT96RkrbZMG8rjp98E4+i\nTErWm3Nm+0dkZYltWorxmBgDVV2j84SAQGlF6wM6LXrNSyfwIeBjBKXp2orMpHTdmm5jSJSgWVeE\nYoJ3LfiO45MnKCUos16ZqJ2fYaYTunpFahK8yEnzHOEFYTMn6hT77IdEmdCuKharDbk+Yn/6AGSD\ndxYtM/ZnJVeLJUYbpMlYVBWxbSjzFKNSvDXYShPYILBkqSMvFEpFsnREmWdkJmW9athUgVQbxgnM\nXUWW7DE2CXV7TmsXeO8JEuZtx/MLhxkXZJM+Br2qIHyFGuXbVi1fBfObNEY5xFDLjt4CHwAu7HC9\nHR0IvwtzBqucXnRZ8frgfp21V0Wz3AX5q+4NqkjXha/ug/RP2rcw3wH4rkW+e+h5De/toed1pUmu\nD2x9UDQhpQ0Zje/HNqQ9yEN6fa8NvWXehcHNEgbLPOzCXEI9gLthcLHsAL0NvVXehb5wnevFVKKP\nfdXMELhRc/4zCnPo9S/XbY1dbZjNDvsoDW1JE8N6syRJDMJ0xGhpuzmT4pCRzFB6Q7oa0TWeutvg\nsEQ/ImI5zmf8q3/lr7J/NON8/hSTWPYmh+hkwuZ3Lnn+7H2868hQdC6wqVoO9R5RbOjiirEZk6V7\niDAhFY5qs2acFRzuv4W1l1TtBVHnKFLiaoJPHRt1yYFOySZjriycPXtJkJfkedEfQvoUZRTCpDRV\nPYQGtiyW51xdveTBg4d4L3E2EPB0tiMSsM6zqjqKoiRLEmIA2wW0knRdS9dZvAfvBaenFzw42KNM\nJOt6jQkdwTmUClSLS4iS6WxCkqSDTBmYIiMIgxAKJzKy0QxbzzGjEtG2MH6boCr+1t/+X3m6/iHL\nzRUQsUKRjF8S2j20zBE6p+pGEBWuCzjn6AJIGdFZTmJSXGhooyWfKHKzjxrn+NhLyD1+/E0yfYht\n/h4vns5BCVrX0TUN48KgYkqDoakiyaCRut44kIFJ3jHWgq6NELbCaV9t27pZIp8NCrlOzhkyP6Xt\nk2Ru5jcwF65P+5c+9vVW4mA4DzXMpQKhue0K4Z75fWuvmt8XzbJrmQtu4MrOeqCH5e7h6Bbmu7Hq\nd69fNb/PMt+1yHet8l2g6535AO+4U+w9SghB0oSUzqe0Yeg+oQ0ZXUhoh/UupHR+C3KD94NlHhTB\nqz523Alo+rj42LDjK489yNst0HuYYz240FvmPgwVM7dfOb7cSehX9s5v6jXBS4pxSToKvZpPlFjb\nMipyrFWs3Tlap71rQAaMcTyYvoXgKUbXdE1O6ktWqwVNu8L5AqLhsDzgYLqHp+H09BPa5iUhrjk9\nvWS+7JBtINWKdXNBVDkr3yLd+9Qm0K3WLC7OmZRwOHnCjz75lHWz5tHRN6iarhdDiA3FWKNrg/Ud\ntoWXOnLs1hw9yBDigHZ9jpQSJSb4EEAphBQonYLwLE5POTh5TNt12CDwLnK1WJAXBZuqpihybOvQ\nuaHaNIzKKdY70izF+8hms7kuhbvaLIgEruZXUGjoVoOob8NCgU4Ns+mMGB11XeN9QpnlCJGiM4PS\nBW2zIQrIsxLhPRQl+CuKoyfoQvPjH/4R59UC6xOquuadnzuCkSc2GpUX5M2Iruuw0bNZV7gYEXiy\nQpNoxXpd4UJDOppQ7B0hpaJt5gitGI+fEB2cHD5mud7QWYtWBiUylosLpCzx0RO9QOUSbwP1OqC1\npsg0QgZSo0hUTt39f8DN8hoHoFtQC9dD/dZ4d93TVzzclrEVO7VZlPgsmD8P2l90b/cAdBfou5y5\nz+1yt5C64ra1fjdu/CdZ24W53wH4fRb6XVWhndfU64EKgpe0vre4O98Du4f4dm1YH+Z2C3S/Bboi\neEkIg8+8BVrRw7zdBfkwdr63ygeQR+fB+QHkOxlPtz4lf/L21fnMhaCzgq7t0CEgfMtydYGNgv2D\nMaNizPryChEiqdK4qqUtNoxHI9Ztyen8I2TMIE77qA1S8jzn+ekF/+Nv/SbTowllOcKGNfPVh6zO\nPat2w+HBAdkIfNvSdRHfrgm152t7OVYaLuSU50/fpRwZ3jiZsV/ssa4a1FCHqqk7gk3pXIvTCqEC\nOsuIHcytJU01KgetR3Rti4sXpHKf2jeY4Ai2wlqHSQPzy3MiCuscq8WSl2dnvPO1d7jYrBHs45wj\nhEAI0HUdIYRBVKO9zg5t25a6rnDBU6YJJk1oKst4kmFiiZCRshhzdXaGVKCNBpsivSNGy8HBqK/l\nrsZ9dUQDUeUgQl8/Yn3OxcWC+VXL1ZXDxo6jvcdM4iM6/SfY4gzaESbNCEJQVzWbbkP0kaauKcuc\nUEiq1YY6biiykqrZEOhw4Qy3avnej2seHn6LurugKBNCUIDDWsVq1aHQSBVJVIaIkvXG0jSBLHOY\nRBJMR5rmNHXFZmW/6K33M2/V+ot95sKBCGJwowywdqKvLe5v5tL190QQO4wVA8i5LqL1Sli/DtB3\nr+8LNdzd67kN/jCMuwejcufefRml92WYft79bSi9F/cDfQvxIUN1119+7WLZgbr3ErsDbusNnTf9\n2vV1P9phdMHgvMb7wSr3su9O9BmsQzx5P+74ygerPHZ+sMo9w9fpvhLpFubXXzd++vbV+cxzgUaw\nXL/g4fHXOL94Qds0LJoaITqyTCE6SeUtwglc3Vt3i8MzXLdCqjWeNW1dIeMErQJpOUIhePfjj7n4\nowu+851v8/jkmMYa0rL/cVMRiVqDKRmLpP/apcGmfdREZqFya+brK77OCSezx3z/0z9itfwAgkXb\nCcHDyf7b6HzCYvEJedS0uWC5OKe1l6RlQaoUraT397ua2rfoGIiuw9sWhaANgrSYsFnOEVIRnOP5\n02ckaYK3niRJqZsGrc1QECtyevqSJEnYbDYkScJ6vUYg6NqGfG+KBpI8pcgKRAjE6OjaDVpLzl6+\ny3h6iG9S2loi/JR2PSYpDlltVhSpwtcBiUDuTxEu8P6P3uX5/AW+TVDOUOQ5bx6foAaoJqklsiDL\nR0SgWq3BR2zlsY2HBpSLdE2DSCJVe4nKFwSx7isvxsDV5XNG5iF7BydcrVdM9voC2tFnVKs1Co0i\nJ0kNPtFUTYVODLNZIEhLjILVekPbGpBfvZvldQ5ApRfXkL6G9fba37ne7omir1EuhpBT2QP+C8H9\nqrX71u/CfNcKh9sRLndqod+a78L8Pqj/hGs9zOmBPsD/Mxb6F0A8DnHxQUusMwOs73TXW+Cd3xnd\njlXuda8L7NVQOVL0B663yhHEHuh2cLPYAeTWDda5B+8GkO8eBPwZdbMYJXBKkSpBrqcczN7hRx/8\nHoWSnL88J8sLmiYitKSOFhklvhWcnj4jTXOMDxgVaEOFFGOi7fBth85yjh4ck2YFF2fPeXywh3IG\naRTlpCNUDo/CiISiHKNloFKWdXAoa4l5ToJj1a5YVQuKLEWqGhvP2Ru93V9LSTmegtDYzQXK1zS+\nYV0tqdYr9sKax4dHfHTZYL3l5PgNVJDYEDGm1+J0zmKUQstIs5njPIxGE549e86TJ0+4urqiKAqO\njh9gkpT1pkIpRd00bKqKarOhKAqapsG5DpMYQvC0bUWWFTgUs/GYxcVLkrQlSTMm4z2C69BpjlKS\nLkiEKVDZBG2XBA8qSRFFTjQF4WrB8/MLLs8WBPrQw3fefoKUkXc/+hEhixwcSnR6RZ8/7QjWobuA\n0hqlAonUaKlwRiETgWNNYxV5FijSvK8Xj6AcpcRoOXlYsLFPSfWYJHfsHaZ0VX/wp5VkTYvwgSwH\nlWfY0NCsPUJqUlWQjL7cIdKfRqtex80ywPlWD0MN8FvX2z6AHdHDXIqhPvkr3CyvC/P7/Ov3RarA\n7SiWsLPn7gfAbljjLqS/zMgO8zS9e0NxOzJmF+A77qK4+wEDBC9xTmO9HkCth2uDcxrn9QB1fc8+\n1Vvnbscyt9wA/VodKfTlg3ddLNaDc+Ac0bveQg+Dn2ibzvol2lcGc7cJ2OBYLq443KvYLBdMs5zx\nKOKvPISEk/1HlPsl0iiW83M2qyuacA5+hLcajcc3Ehur4XDQopVGyoSjg0MIY7pGcjJ9m2eXn9I0\nkv3ZCVJcMp2uSaLirHtJUUq8ajBqhIg5q0XLqrnk/dNPSLOEx289QSeeKE+RqiBGwaY5Z1UH1osl\nZSYoxjWjmcTkCdMihaQjMRWrRYI+1ozGE4KySKmQGoIP+OiItBTJPhfzc1w6IstKbAysFwuUUmhl\naOoGYwwhBJqmoWka/OCC8d4hAygpsV1NUQ7iGSbBxsjs4AAhe7/y/sGbg+KShRg4fviEYnpEpN+v\nRECYAkwK1hI8/N0/+Ee8WL2HD57OW9LU8PJ8Tlg4Yj1hqSpG+xVGrgkxQ3lLZgzWeWIWKcuMQima\nNiCkRLkMu9aUPhKQZCPNeDxi03zI3uwEH1KqxvDp81NSkTFKoIsKi6IEEqFYJApjFG1n6fBkhSJL\nJCFUrBdfPcxfxzIXcQAz4mZ+Z21wqNxe38J8B+qfC+mfBurXL/LOfvE5e171uC2M75YD+EnX4NbB\nZw91ceMz34L9urojt8vlbottCYheXEPbux7Qzg1zvzN36hrofrvHa4LrYR6cvKm1ch1mOcSTuwHq\nbhfkg3Xu3WCZOwiWeOsrxk/fvjKYn6+WCKEIoeHdd/+Azm7IswQjChLjCcIwSyWjUYJJp0hn8I1k\ntZ4jpjWYBqEzuuColhtG+ZQ0k0xnMz599iHjyZi98T6N7/BVhWslWmfoZMzV6l1M2pInCbm2eH1G\nrhPSJNB2inxzzmVwfPDyJdPxG8z2RpgkkJqc508/RquWy8WCqoMZJxSqwGnP0dGIs1ON85I2GLJR\nwVh0nNbn5ONDyjRBOEtWltTrNalMSCW4ZolBUK0XmGxEtVzivO11PmNESomUkq7rALDWUtcViU5Y\nrVf9AasQFEWOwNG0G2ajkvXyikQKpuMxaZJispJRoodEqBGzo0cU4wfMVxuUjBSjKSiNGE+JXeC9\nH79LmmZs6sBqVfH1rz1CiMhqsWGaTalCzeXLQJJb0skGqRxkAVcJNm2ftJRISahaTHBoZTBiRCpG\n2Msldt3im5ZNGjE6JbgXzKZjHkwfkHUT5hdn5GlBenQIEdy64WrTkbuM9XqJLqEcBZQU+LqjnivC\nvPiq3tLX7dWhibtN7kC5Nx1798nOfNjzmb3iZq/gJ4T5XaiHnfl9Vvru/O4h6N353bXdQ9Ld/qr1\nz7sPt5KGdq3xW+V5XwFw4PpDJiqBd+oz3dl+DP5m7p3GO7kzH/Y41YtzOMG2hjuDqEc/BqIfIlj8\nAHI35KI4B972fQB67yP6MwrzNmpiW4Nsse0SKSSrtiasEtrGE7FURtMuWoqyResUYopbj1i6BpMW\nWO+RWpJNJLbdkJiCPM159PAxHz57n816wXg0onMdb73xdVKzx6OHb/PJsz/g6ccb3pqdkk0lXXaA\nEBFlJKswJ9eKInYs3Aa3OqWzLQ95yJPH3+GXfu1f47f/0W+wXm5w0WFSRx5TLl0fMnh0cMRitWJV\nBZTKeXAo6RpH1a7JzZQiy6mXDWmSYOuKKCJSdpRJQdSR6V7Bcr1mOjpgvlwyXywwSUJdVQgUVVMR\nY6RtWlpqvG0JviPVGoHAeU8mE6qqghhx9YZWg1EGPdIkWZ9lKwKkRU7TVaxWL9mbHiKkhDQnoHDr\nJS+XF/zo6ffQec5YjhkXhsuzc7wXdFkvQB2cptlE8nJJlk9YJhGhILaWNMnQDmQQZEYhEkNiMpIu\n0jZQNRYhCpbVmmY958k7h/iw5tHBAaOjnG89+WcYHZ2QZIHFcsEnH3+KXBjWp1eYqaHMYL1oWDfQ\nNQqxMhwlo5/1W3cG/DfAL9Jj59/ijmD5a1nmQvZWtZA9oOUAZ7mF9SvWBoDfWoPbMA58Psjv+sh/\nEgv+Veuv6uGe8adZg9uHn0Md8+swyF2o34rIEbf9+AKiFL3wtJN421vZ3qlew9SpQZR6Ow737uwJ\nTvaiHE4MB7PDt4RtZqeLN+GHfohgGQ4+cfbaMo+h47omwZ9VmEexT8wqvJ0TpSHJCmLdsG5anIOO\nyPvPFpSrhIPjgNYGFxzRC0KT09QOkVqKsj9Ob6oamc7RScdUH5DIj7g4v+Tq8oKI5O2Tf4qf/9Zf\nQKcaI0t8t+KP//EZ3357j/JwyoWt2D88IAqB9xXj4jFV9QFaejq7YTVvuCgu+Wd/+df5q4f/Pr/x\nN/8z3v3oI65cxWzfE6uCeag4nB7QZR2Ly4YyZuR6BjISYyBogWscidZ03vHg+IjV4gohPKNcE4Rm\nmkhi4khlzbQ0zK/OkDpFS8VoNOqzSKWkqjYgHCH2mZ9aSowEvKOqVsCY2ThHiD7MwHuLIaKEJEuy\n/m8terwN5OmItm2JxiBGJaFucBF+57t/n/nyDCklb765hxOeH33wCSFMEAeaDsgySbWEtIiUZUVi\nBEmI7E9KVAvSW2IIpFrjo0FagepqJrOMVEuuVjXNqWHhLeXqkqw2nJ53nJTvEISmsktkknB4eMJs\n9pB33/2ITajJJwdUi5rzp55qqdA+ZaQNaTb9Wb91/yvgbwP/Ov3fT3l3w+uk8wvZxxYKNYxSDjXL\n5c09KfvruHPN7gfA8Pi7AN0ePv6kwN29hs/CO9wz/6LxdedfdG8b3z7AO+5CfLffjai5Dknk2mfe\ny9rJAcySYMUwyn7dbe+Je9aG/VYOljn3HOD2QI9hC/QhamXXveK37s5t31Yg++nbVwZz28B4tE/s\nDELUpCYjM1Mq2WE7Rx46VlZx9nKF85G9gzFEjUgCdWMpTaBrJTHxBO0oJoaYrLlav0/inrA/fshq\nteHqbIOICS/PTxFR41qHSSOPH+/zw9MOV9XshwOeriEdaw72H3N19sfkSrKXv83eZMTF+pxN3fHd\n7/8eo3Kfb7z9Td568ktcLi6pFy0hNkzyEy4v32e9WdG0HoEEnTBfWYgCPe1YdWuC82QODmYHSDqy\nLCfLM7TSfe1wv6aUNetlRWNTnAs4nSHps8XqugIE1jX44AdFIYmQgRg22HZNUy053JuRZiWdawnB\nErylWl1hsoSMksl0hhCaqm6IEUaZRghD9B6lEz758EdkoiAnMp0KUmU4vbqiDQrlLGGIMJEGlDY0\ntUNJj4gWlERKQWoSdJBE2Yc5CqvwRpHNpuS5QXQN8QJMInhymLF/lCBCSleXbDhmtRZcvPuc6Szj\nna+VHB4e8HPvfJsHR29DKLC14dtHnmmek6U5RZJijOJv/MbvftHb76dtU+BfoJdKhB4ti7ubXivO\nXPWgRg9AVoN8kBqu9Rbkwzpbq1wOQebq5jngs/CTfLGVvr13N0Jl+3y7PvLPc4t8kcvky3R2xl0X\nyw684y68xZ35PWGWUQ5Sd7d6D+lba04QOnmjb3rfXhchiJ2fOQ493OkD0IMjbl0roev7blnFL9G+\nOjdLd0l3lWHrmrbdgAdlWrrGEtFEF5mYFJe3tLVjs6jJ0hlZmmHtCpnDfqpZX6peoKFoaIh0cYl0\nz0GNMVojSfE+8NHTD/jBB39MXuS4UDNORhwfzBAYnFsyDR4j93h88k0WL37A5fk5STlBTQpGyZiN\n9yAkf+fv/y/8w++mPHljn+OTx5zaj/ExYmTGyd7PocWCTFmsDqzqBqECsbNUwaN8QIgRaZr2LhHn\nCTEymUy5PD9lMioo0hSZH2Hqmvc/fU4iJY1d4/DUm/paXKJt++qA1jkSI5mOJ2Sqo16siD6yXl5S\n5iXZeIp0FussoqnZXF1R5iNE6CNOskyzWi0xegSmfzu05xf87nf/kB89/wP2jwwhOs6fXtBIGI2m\njJKS470Doo/sHR8gVMR6x8nhPqU+IP2VkmDh4uxTnHeMRiWrzYYkzZlM95mOSvKiwHtYLzY422Hy\nlOm+JgaNCIY8LSmyjBgjzluC9IyLMdOjMW1d01lPqkcUZoQuJCHEviSB/3LhXV/Q3qEXpfgbwC8D\nvw/8u0C1u+l1LHO0RGjVQ1tLuJ73owjD9VaVeAA4UfYf+tusIa0+3xq/z43yKojvwvNVGaD3hRve\nNw/3POdPM99es/Nv7EL9jn/882B+7TuX3NRv6baA3lm7jky5c6+7s2frHdlqeF5/K4lDlvUA8bgb\nT+5uIliC+/+HZZ6nkvlyyeXZajj5PSXI0PsRVUpsFBrBaJSi8xThW7RosV6xPz1kNM7IVWRmz1BX\nNeeXgtrUSNMS3SldXGGkZP9oQrNeMRoZfvzR93FxQ2SBNJEiSQjOsmwNozLw1uOv887xN/kgpgTf\nIhqJbTXFaIIIK+rWMS41p88/wNqnPDp+h7IwWLsgSUak4iFQMxaG1fyUw6OE+WVNrT12E/FBcnJ4\nhAmKSMC7jgdHD3n+/FPGmcFIRZGNycuC0cjROcnHp5dcLWpUlrJerlBaUpZjrLPDGz1yeHDI4axg\nfv6M8WSCTwWJimBb0mIPHwXONkTnqOZnVIXh4OgQ6zw20JcFTgswCqlLPnj3D5Ey4Wsnv0AUDusE\n+1mHIOK94mB6RJZmTKf7HB8cIIVEKUWRFZhUM5lMIQZenL4kxMBkNKWqK7RW5HmOsx6hJUbrvvxt\nvWEynjGajAgOnG0IMZAkhjTLsRHquuqTLLxkNnkAApI0xWhN09Sslmu8d2TZ6xw+/tRN08so/jvA\n7wH/JfAfAP/h7qZ6/d/uXP154J/+7DMZNfQByNt5UBCuc9BvIL67trsutyEa3IQMXoNwIOG1mk3k\nOsnnriW+u3ZfwtAuyF838ec+65rXXIvis/fu/nu75QPugzp8NmRyO94nmnG33vrr3HNh5/e7+/rj\nTg/cKve4DUW8lkbafdK77cOhf3H7ymA+GhUE6YndhGpRU7UO5yVaRbKRQKea3GhG+xlEj1072mpF\n6ypkWeAzQ+0lE2l5eOh48YHCiBJPS72JOF9DmlGMFOOy4M23v8Ev/8Kv8b/99t+i3hTURrI/nSGb\njrOLK6YPBVpbzs6vuHq5QuWB/aOSLEnxrmOcj9mbvcHT8/xNrgcAACAASURBVI+ZlhnNesWmfEmR\n58QQ2axPUcke072vUXUfkpqCg70xJmR8/HLJfjkhNCnBif5rnpCUxYzF/Jw8SynKjDwfkeZ9JmuS\nek6Ojlg3HWeXK+pFR5pmeBvJsrKvuhgCWkORpDx78QllllFXLUVa4l2Fjx0ET1GM0VISbceoLFBS\nkig5RLUKtNAIY0AlxKYmG+1zvL8hN4ZHj0+YlHssL6+onaWqa5QKbJYboohcnL7g4PCIMs2Ig2BF\nnuas10tOjo6o6pY0MaSJIcZIVfUC0+N8hBACPS4oy4IkzUjzFO8cQhhM0lvZnbP4EDBGodKEEPo0\nfmMM3ju6tkPQfyChJPFLxup+Qft06L83XP9P9DC/3eRf++JnkoO7hP4g85o6kRs4hAG+PnJdnEWE\nO3Aa6HGfVf4qN8fn7b3l9+W2xf257pT4WUt6d35tHt/zu4h3Pz3uedzuc0Z2fld3foZbLo+d17+T\nPXrtf/+in+/u7+ren+tn3d4e+rb99it3fmUwDx38v+y9WawlSXrf94uMyD3Pfs7d6tbS1dt0z0zP\nxsWkLZKWBFmUZenNy4MhyIZfbFgEBMukDBgQYHgRH0zYD/aDARu0bFOSZWhgQbAgUeIMRYKkOZzu\nIWfptaprv+tZc9/CD3nurXNv3Vq61hZdfyCRkZGZceLkifOPL7/vi+8TQuKaEhmYzNIE2xQ4roll\nNn9OJQ2iecosjDEijZYlUkkW4VXCRYfAcUklGHZMXmnIXQZWD01CVFfEaYkWmqCV8+rrX+DNi2/x\nbv9dPp4cUFkSy7GIkoTZtEBaNYeHN7ixv8OcGUHZJoxC0rqZ+jd6QzZ7Q3Ym1xit+xR5SVlNcL02\nKvBYTEPqaMJwuIVlDqmlQlcSXWaoGnqej2EHeGYLz7TRRUwaRSAk/V6HKoswTRvHsjGMo3yfJoHv\n0wo8dvYOMC2nIcJ2F9cP0GWFNAR1mVLmFaVVE7T7iDLGEDVVWbGYH2BZGziuh9Xu4rk+6+fOYbaH\nFLGmyiLavRaGVM1q2DhHKhPT0Iz6fUxM0jii0+twLgjQGizL4vDwgDCMmE4n5HlB4aYow8R1HZSS\nGIZoVtvVFWlWL0MR6CZIlCHIsgxlmgRBgKWsxvBcVw05G42knxZ5o0EQAtM0MZcrYdM0pizUck2B\ngbQa46eQjQvnM8QOcAN4A/iQJqn5D+65ypD3VN2DpWfKMaEj7r6unyAofXer6lM64OVFj0riq23e\n79w9hjzOILv73H/EcveQ32kiFyfPnyD608R+n+OjSe+YdFcnwFVCF3f7fLR69Cj8wOnvdXS8+iZy\n5vd8VnjQpPZwvLgcoElKTYYhbGpRYlmCWtQoS1BVKVleUtYG01spO+OYbtvFcQ0KXRMlMdEiJHLa\njfQ+chgMExYHFdLysDxFGo0RpSKJSqTSXNx8jcLQSBVT1Bll5aNsGzGQBCIAXXMwzcjLmwi/Iicl\nUIqsOiRMasJ5Qh5HOI6JMgNqb5mjsgoQwscz+8TzEjMxsewtLAZ4lYl0EoZbNp5tkmQlpiHJ8gTy\nhFpW2GYbwzBZZBlbXkBe5JimpMxydJXjOza+61IUBXHcLOE3pMI1LUqpEXVBNE/odfrUlKTRgmEv\nwJQOqizQaLIkJeg0Komg38b0ArSQ6DrDFJqySDHtHjpNyOKSKAxRyqLdbrOzswvUdDpt5vMZnudz\n8eJFoMdoNCBNNxmPx0BNWRZcvfoJnU4Hy2qSREgp0Fo3C6CUiW1bSKnQWjeJJ5YhCQxDIpVCCEFZ\nFGRZimGZKGVRFjllUZDHCRpQjoXWmiSOGkndMgFwLLsxCj5b/Mc06eIs4BPgL99zhXiEPpwg89PK\nXU4S55FkviqVH1/4AGJelZbvS/Cn7j8mtVNEfkTsJwx9q+3qewnvLEI/QeRH5dUvdVonckbdahLk\nYyLXK+S9JPQT0rdovsPRIquzVEWf1Xj7WDhrqezTwYtbzq9t8DRxkTLbX+B5DnGYkoc1wqiwnAJT\nCjzTxjGalZOVrqmrGlFAXTmMsxgpDEaFQ6dvkdopyrZpGz32d/eRssZUDroWUFV8cuUjDmdXMUTJ\nIk4QUrHVv0Amr6HTClSJrA2CbpfZbIrWFVK7tA0LzxniygFdy0GLEtMysaWDUSs8s0XiZRTdCFm6\nuNLEc5ookFvr5zGlySJekIcHGK5FIUqiOKHtOkipqABpOk1C5yzFtRRxNEOaNo4lsRT4nsdsPscw\nWmRpxkF+iO3YOKaJZUuKOsNAI+qcLAkJugMMZWGaCoyaqiqwnC5pVmE6HnWtmYULzKpCYqA7A5hn\nXL/2IQezmIqaJImodQVCEEUxpa6h1nzwwY8wpIHnuaAlSRKRZQVB4KO15vDwsIlo6HkEQYBpmvh+\nq8meJARKmWRZRpqmhGFInMTUVY0ftMiylBqYzqa4rsfW1jlM22F/f58kTen1upg1aCqKMscybQyt\nuX37Fmma4LrPfNHQ94Aff+AVjySZi7tEvkroq5L5ESmJI6n8yMpJc4E27iXRs4j1tPT8IFI/Tear\nx2cSub63DZbleyyPp8j7M9WvEPjRNaukfkTkJyZCcVcir5b71ZgxDyPys0j9sfAopP3kpP7CyNxr\nSXKng2sr9u4sqPMaTyqmYYwhJJ5j4VgW0jcZqi5pVhKXEaOWRyEt4jBDSYusyBlPCxASt2eiqBDK\npEw0tSkwqVhvj7h664/Y340YjxMczyRZhNzZO+Sicw6lXdygy2ZrDVmPUEpRtUBJDxMTpSWtdg/b\ndYnDOZ7tEscxcRrjByaWq3A6FnnhkGcZta6bBR3AdDpFCIEhJY5joZSi1haeP0AZFe1Wi6tX3+fc\n5iZVrVks5thL42AT7ljjuCZlkaGrGmVIXC+gFXhUVUmRJETzQwatNnUZIZeqhqJICPyAuq7xvQBl\nKGrDoOU5SMdFGx79vkVVpTj9HjU2eB0mqeCTT68yHI7IssZ1UkhJYAdgNe6GcRJiOy6GUAhDsL6+\nQVbk6Kqm1WqRJAmW1ei3oyhGCEjSFAQUWUGrFbC+sUW4CJFmCYZmZ2eHJItxfR+E4HBySKssCOYB\neZqwv7/f2BkCD8e2EELgeU7zW1UVfsslaHmU1ZMtvHgqeBQyNwTHCVpPkBV3yUnQkNHRJdVR5fIa\nrU+qMO5L5Ksk+5BrV4m60nfJ+5jwVupW7zndHpozyfi+lsmHWSxX31xOT3wr5dNqloqloXQpqR/N\nm49iD3iYVP7Y5H4aT0c6f3GuiaVBEZcErYq339rkD353j0HPZLgZIAqN0hrf9hHKYDGOiGYJltB4\nKKqgGcThLKOqDKRZkxbQNtv0W2ucH73GT775DpbpUOka13fw24o2GU7l0PJsFDaOqej3+nzFMHFM\no5Esq8btr8hSoiKjzCvQUFcFWZxj2QoUhFnc6IURy3FcM5tOEctl91mRM+j2yIuiEQjKgrysMKRE\nWS5VlpPlU65dv4oyHUxlkqUlRZFR1RopLbQGQYkyJI5lEcmMTreP5zoIoZDSYBbvU5UFeZ7jWCYt\n32niEFUa0zRJs4R4PqPTGyC0RgnQroP2N7D7PhoNlochBHk0ptUKeOXSq+R51vRV2Wg0uU4QicTx\nXVpmF0eZRFGEYRjYjokjmrylaZ7TGwwY9AcsFgvSNGE2nXL7zh2EEAyHA5TT6NYDv/GDPzg8oN3p\nUNcFUgkW4YIsjTAQ7InbgMa2FX6nhWlBtnz2SRxjmiatdgfbspt46vnngMwfSc1ymszhHslcL0l0\nFcceEmJJWmKlnlNkc5qszyL1M645IYmf3p9Rd9zGWaR+mshXiHj1O59J3GfU6dWNk8/heOMucRsr\nJF6yNCSL+4fdfZARlJX9Q/E4kviTkfqjkrkEvkNjyf83gD7wd4CLNH4z/yYwXV7712mWOFfAXwH+\n8VkN7t3Zw23V1KXL1nrAK68HhFGOtmp6noNvWhzOm/gjRprhV2C4JkkWYWHj+XaT7GGegFfiDzu8\ntfHjXL7wBr4bYLkW/W4fpRSGgCLP6VoJ51stiqpGSOi0OphKYihJlqZYlotlKcrSI44jzDQjT3Py\nsqQoc9I0IbBsirwAAwLfx5SKOI5xHIckTbl5+zZBK6Db7VKiEUriWA625RBoTV3XlHlOJjVlXVBW\nOYZuVvSlWYjr+pQ1+IHLZD4HaFRLosZxPaTtsr9/SIXAsS2qssCx3EYvbRjUZY2QJsLS5GWONBS2\nbWEqE9Mx8YIuwmtTRzmiO8AQ9nIM1dy+fpOD8YRwMafWJd1uH8duAnyVFRyMx9i2j+1I0qpGmSZp\nmtKSkslkwu7uLm7gs7a1QV2WCKDdaeO4Dq7vYRgGjmNj2xa3b93EsmyUKVHSYDjogdaUVUWZ5Wys\nrVNXGsexsSxFuFgwmYzZ2cnwXR/TMqGssGybqq6Zz+dorXGchy/YeeZ4JDULSyJf2R9LnNwlEWgI\nqLngpERusEJq+viSM1UeJ4j+AcR/TNL3IXN9xn6VxI87sfJF9SmCPkHyp889gMjvIfVTq1/vJ2Ef\nEzv36swfJpWfpWZZLX8mPH0CX8WjkvkvAD8EWsvjXwL+CfDLwC8uj38JeBv4t5b7c8Cv01j+61Pt\n4SuPjmtitRx0pXjzCxf54cfXqPOSrEo5t9GhqgS6zGGo6PQ8Ku3h+gGjTo9Rfw0tIU5TiiLDkHBu\n6xKWKylESp0XzBYaXde0gi6WZdHr90hik2TRkGRVZkihcJwAw/UwjCaAkZSCXr/PbDqlFbSZzmbc\nvnKbXq9LGEVIKREayroGXeLYNmVVYTsO2+fP4TgOWZazu7/H9vZFep0BdaUpykbyzuqcuoJFGGKJ\nmm6rh2FIXMeiKjKUKSmKHCkVNTmGISlLTeB1kMohLWom4wMMITm/EVBkGUqD1+6TxBFuOyBPYwol\nabXa1GWB1hpTKqzhgEJbqP4FSObgmqAbdclwY5tSa753sMPtWzf56JMrtNselrLpBi0s32f/8BaW\naeN5PsIQ1FVNHIVYpsnbb71FXhaUWc7O3j6mZeIHHnmaY5kmjuNQFjlpGJHmGRpNHGYkUYyhJHme\nYzkO0jA4v3WOvCip6+Yf1e+P0MogXCzAMOh0OrjKhOWEaS/7Yy6NoS8Uj6RmYYWzVv7Qq6oLBMes\ncUSYhribku30QqHV61bLn+XcsSFxZX+i7pQ0fk/dqnT+IKJ+jO2YwI/K+u7xCcncOEXk4tGSZjyq\nquWJcVqNdLr8eHgUMt8G/hzwXwJ/dVn3F4CfXZZ/FfgWDZn/ReDXaDzgPwU+Bn6CU4GIAJRZ4ak1\nbLNm7zBmNHLY6I64fuM2rrLQVYvLGxcR2NhegGE4mKaDZXsYAnzPpdcfUpQlhwcH7I93qOuCNFlG\nmBOCLI7RNH7YbuBimgZmy8exTYpimUJNG024Vg1pWlBWJb7fIlmEzBcLkiSh0+nR7w9wPAsTn/Fy\nMijLgqLWpHmG67o4nks5T8nzhMUiJghadNtDXNdjMZ9hCIjCiDCck4YxluXT8y1cyybPciQGSkBd\nQVrnUIMhKixT0+m1sZ0en1y7TpoUvHr5MuPxAfNZySiwMMgB0bjv1UBVUeQ5UKFMC8u1sSwLYQUY\n3hDSMfNbe5j+Ie7GJdAmrbUN/MEau/vXmUymOFTYtsV8Nm0STO/eBgGeG9DutKnqCs9x2d25hWna\n+L6PlM3CIMc2qfKM2zcOl/ptl3AxQxgwmUxwXRevKjA0VLqmKjTtbh/PO5LgHfI8b3zTowixjE2y\nMVzH83y8VgBo8jzHMExMy0PYakU6fIF4JDXLyraKVcm61mecOyL15b2GPnlutY1VCfxRy8dS9+ny\nCnmvkvsJ8l6V0I8mo7OI3Dij7uj4tHfP0bmj+hUCP2H4XF6zOhEdEfmRzv+I1AVPR2f+SDj9Az87\n6fxRyPxXgL8GtFfq1oHdZXl3eQywxUnivkkjod+DMJnTdbbJqoRkHnE7Kxh01+j5A86PXmW0dpGe\n20Eqjee2KKuCstaYliDLmpVSZRlTVyW+byHNdfI8J4oilFJkeY6oKuqqou+3CGwHMwhwPBdDmEiZ\no5RCCkkYxcRxTNDrHE8EeZ5hGAa9Xo/FYkFd5ZRJTZimlLpCa02RZiglEUISzhfUaJI0pCproImx\n4ftBkwgizzGMZtn50d42TeI0wcSg02pR5imGMijyFMdWIBWWMFCGxDBqTNMijlPCRcLOzg5lVVD4\nDkUlsB2boqyRyqAsCnrtLkpalJVo/PcdH7/dQg/Xqe7cwXDb/ODKB1ze2MLtbiA8q+EIQ/Laq+8Q\nTRbkeY5tGvRaHaaLBevrXWpdswhnuI5LHDdeLL7vM53OmE6nrK2t8cEHH7J/uEuv36XX6bG1eY4f\n/ugHWJai2+0d5y6NogXtoIPrNBEwkyRdujA2qivXdRtylxJpSKoipa5K4nBBGscIIXBdl1xkVEUG\niYHxKFLxs8Yjq1lOqSX0su7o9f7YeUWfcU6fbGOVmOFeKfy+dWeUVwn7iKTvIfMzrjluf1l+KImv\nevEcleu7507cq1eu03fPa5akbtAkRl7ua6Mh+SN3xKOPO03mNfeS+hFhP8ib5ZnIDE9G6g8j8z8P\n7AHvAj93n2seNledec6SHbTQpGFKHieEs5RR+xKvnP8y50YXMaWirHKUsKjqvIlkKQ0s08Z1vcaL\noa5AC3qDDrZpkeUZi3BBGIbkeY40myTKu/t7BK0AVdnoVFDVJUI0/s/TxZQsz5nOpiyiOUEQoMsC\nIQRhGOG4JUkaU1U1cd0QuTIaclSGgWkq6rrR9VqmTZK5WI7N4eGUdruNZd997bcsC9u2CMMZSpoU\nRYxn2Qy6XebzA9peG8e1CGf7tN1t5kmMUja27RPYDrMwatLBIUmzDFMJXNtEKontBcR5hG2UOMqg\n1jZlXeBio7WmKnOktKgLhVo7R7y7QziZUYzW0JTHsSRqQxN0R1RlSRjPSaXB9vYFOv0BcRRSVRXz\n2Yw0zWi1u7SDDqal2NjYpKoq6romCAJms03KomJjc528yEFIbt3ew/fbvHr5AhoYTyZMpnPoK7qd\nDoZhLBNON5OlYRhIJcmLCseW2G6bsiop8gzLsknTlMUixLItlBLkdQ36Ho3e88cjkblebsvy0d9I\nr6hW6uU5vXJtfeq+YzI/i9TvQ+gPuu5Y2l4h7NMkf/rcav3RdvwZq5L2EUE/Qt3xbLZC5kfP5lhX\nfkTmRvO718aKVK6bCe+oLMRyAhT3SuZnxZR5kN78vmz3ICn8fueen878p2lUKn8OcGik879FI41v\n0KyI26QhfIBbwPmV+7eXdffgg/cPuMIcTcFw3WPUG0GuGK2vY1uKNM1QUlJXNUVVUlUVpulgWkdL\nuSsEAtdzgZo4jSmKgn6vT57nuK6DAeRlQZQkfHL1Kt1OB6Vkk1VnKQHOwwW2oZZudM0ilL3dXdRy\nuXgchcDdRS91XYEhcE0FhqSqGuuKIQS2ZTLsnSOMQ7J2m831C9R1yXSpVqjqZiWkMi3yusZz2/Q7\nDkneBM2q6oosyzBEozOvqwplGpRlEzY3WkxZLObEaUaeZyg0r5/7Ar7XIksWWMIgTmL8fpc4ivAd\nlyRcMNrYQCkLGWwgTCjHh1RVSp6nTGYTtqq8EXqqCuoCv9ul2x8QJwvGkwlKKQadPsPhkA/f/wDK\niiov6AUtaiVI05R+v4fWirKssG2bIPBJkpitrW2yLMP329zu7XDl048I05hXLrzC9tYFpt6MPE9Z\nLBYEQYBS5nLSswGYzxf0BwPSNAOtGY/HmKaJlE1MFiEE/+zb/5zf/f33UGbzO75wPJKaRa+Q+ApW\n1UTHJL68Ttzn+DQxH+/OOHc/kj9Wt5wi5fo+9afJ/ZjEV8ur0rhx8viB9fWp/Yo0zt3u3nVHpCFy\nrVekcn2SxI/8zY/mhvsR+INULSuP957yA3E/wn6+OvP/bLlBoyP/T4B/l8bw+ZeAv7ncf3N5zf8N\n/B/Af0ujXnkd+H/Panj9NYNO7WHkOUHQRno9AtdD65wsSzAMA9drouZpQ2Aqm3YroKoKhGi8QizL\noSxLomiBspqFKHVZUOYZlmU2QZ3yAsOtyMtmBaXW+tgH2jRNZrM5lmURBAE1NVIKOp1OQy6+j207\ngMZ1XbSuUEpRFCVHD7+udeNHnWSkWcbB+JA0TbCDNmmWotHMplN83yfLcg4Px3iew6XLl1kfdWm7\nHsqSjG/+kHC8j65LpPKI8hxTKrIiw3YtMCRZESOkQJkCkPQCB8uxKfMMkcVY7YCyAGV7lEWGsNsE\nLZe81AStASLwEAwwWym7N2/yxS9+hRs3PmJ6sMcgWEMYBtn+PvbaBYRSRFFCrzdASsGd/R2MscTz\nHUyzcUfMy4yiLMiXRmgpDba3LxKGIWmaopTiRz/6EZubm/T6fTrdDmujHkWR0261kbJZASqExrZt\nTNPEsk3KqkRJgyKvcF0X0BhokqRxhTw8PMQ0TcqyxDAMfuonvsGX336D8XifPC/57/7HX32sP8NT\nwyOpejSIU+KfXjl3xIVHBClWRESxcs+R5L563ypZn0Xk8IB7lsS3SsqfdTvxlnFav7EavuCsMIfL\n8vFkcPS9jTP6dJY3y5F0vrKtkvgqmT+KAfSZe7M8qP6z4bP6mR99hf8G+LvAv89d10RoPF7+7nJf\nAv8h9/naVSUQniSva6KiYmT5SGWSFzWmqVFKUFVV4zkiBJ12gOd55HmGZVlNSjU41ptWdfNqnmQp\nWmtsx8FzPfYWeyitcW0HbYjl/6OJW1KVjW/5EbkHVuP7rIXG99dptVrkeUGeZ/R6PbSumgUxSjGZ\nLUizlDxr/LGjuMBzLWohUJZJXVXM5zNc12MyHpMmKVmeUtYJtukx7HcRwiSuNZfXthkMh0z2d9j5\n9AqLcI8iSbFNC2k2ahphOZiWy8bAxpCCNA3p+DaBbYOusFyPrDARVhe7dY6W52PLmixPWW8PKBAY\ndkCtJcXigG5/wP7uLkWeES7mDKhAGEQHM+xhTRD0WVsfEUUxQdDlyiffRSrJcLiGY1tL+0SItCxc\nN6DT7lIUJVEUYhiC4XDE/v4O7XZjavmj9/6Aoip4/dVX2d7a4tNrnzIeH6DkctIta6BsJok0o5Tq\n+BnmiYdlW+R5M5Hbjsnu7g5SSnzf586d29S1Jmj1ONftfsYh/QzwSGR+9AaxSlZL5jhWJ5xmjxVC\nP71/GFGfdXwmoZ8hYZ8pedePcO1pMj8K6Xuq7rQErh8gkZ9+HMePRZxB4vrk28wRZ94vNstTM34+\njJyfDal/FjL/NndDdo1pggydhf9quT0QgeyRJ5pagGE5KNUEV3JtD9Ns/uBH0fGOJLckibFtG6UU\nQRBQVY1eVeuKrMhJshRq8H2/Ua9EIZbRrKZsd9oErRZlVRHNp5idDoEXYDpNrJM0z6iylCzLlkNQ\nE4ZzpJTYts10OqMsM4RhEEcR48NDhGFQ1BU7d3Zw3ICpkrTbbaRq4pKE0RRDKKqqZuf2VdKyxHE8\nhDIIw5D1rU3WhptkpSZLQsoy5e13vkKY5Fz54I+gzprkEqVBVYtG6vYllqFwgjUCUxEWOb5lY7QG\nnL/0Jlpr3nj9DfxWm2QR4gc+ZplhVBnakBiipk5LxtMpeR6zP57xSpGhkWSLMX7bBAFtT3Jx+zz7\n4wMc2+TrX/kav/fd72BZFo7rQV2iq4osLWi32sRxQrvdpaoz+r0+k8mMw/E+W5sXEMIgaLcoy4L3\nP/yQn93cxJQW0+kU23FQpkTKitu375BmCRsbmwyHI9IkYzKZkGZJo36xrUZtXFVUVUVeNLaNwXAN\n23Gw3WfuY/4m8LdXji8D/znw35+46lHULEfQK0R+LMmekqyPCa4+VT6yETyIvFcnBB6N6I/aPIvA\n70f4nCL341eLIwn8YdvyOehVEj+1rerjVx5NQyL1vaQujkj9qCvLe1fdFp+7a+LT05GfxgtbAfqF\nr32ZDz/4kMnBIW3PBMNokjD3+nh+oy81DLAsm7qujklVSklRFLiuS1Ek+C2fLMlo2RZhFGKIxpsj\nCyM21kZo32+i+y3G2JYiTptQsutra1i2gzQVjuNxeHgArkNZFNR1Ta31sb44yzKqqmKxmDEPF8wW\nc0whl7r8GkGNKZv8m0mSLF3xPBaLOYZQHI4P+fSTq7S6LdY3bbq9HmvDPptbF/BcD3RNns6I5gvG\nxYzO8BW2Lr+OQBAv5ly7+jFJVhKlMRZQWxa2YeF12nQ6Af3hBq+//jp1LSiyBNv18fwmxGynN4BO\nD7IURAwYlFpTJBk379xmsoyjQrFgsnsbXRVsAaNhn8P9G7TbLZRSDIcD3im+ykcf/5BuZ0i37SMN\ng+l0glIG3V6Pvf0bhGHIwcEOk8mMujKIOzFJktDrdZlMxriuyze/+U0uX36NOIkpqpyyyrBtG9sJ\naHdGuK0WO7uHDAc9Rmsj0jRjNBogpdnYMLRibeMcldZYlo2lFLquKamxlhPpM8IHwNeWZYPGHvT3\n77nqUSTz1VWP+siDQ5zxKn+aPGua2NhLIterjHN0PQ+oWzl3Tx2cIOzjff2Yx6cl8AdsxyQum/uP\nnenPYNITEjlL90MaIhf1XS8WcUTkK4QO9+rMH2QAfapqllWcNoA+e535M4Nt+pzbPkehCyzps742\nYm19k06nhWM55Hm6NHoq+v016rqmWBKtYTRGwVrXjX3DNAijkDwvsE0Tgcb3HSxTEScJW1sblEXJ\nIo7Y3bvFue1LVLpifXODyWRMHM+pioSy1riex3QyI8tSHMchjo+8K+rmzcFx8YOAuqoJ4whL2ty6\nfR3Ltmg7LrNFiBCC+XzOeDxlMY+4dutTrl67RmfWZ/v8ZXrdPkI1CZE7rYB2p4uSFXYdM7n5Qw5v\nfoLV6oA7xO0oLK9FWdyiLAt01hgCbctESBNp+7iuR1VpsjTH9VzCJKEChqMRwm8jDAucFmCgRYXj\n9Tg8/AFXPrpGVdbcurODZX/I9RvXcRyTIg1JkxDXBPIQ5wAAF8pJREFUaZGWGUmSYEh49bVLjSGS\nCt/3m2BarTbj2ZSvv3KJGzc/JctS9vb2mEzmDPoDvvvu7/P2W+/Q6/VI01sYhiAIAq5cvULbDzBi\nQeUFeOsdBv0Otu1g2zbDTh8hYL5Y0OkOieIQ1zEZdtfJqZDLxAyO7RLGC7Iso+V6p1/InyX+NE3U\nxBv3nHkkMuck6Z4OLqXhhCfJsWpjdatOke+Jhk/uH3Ren65bIecT5RXp/JHKq2R+RoLOEwS+ypir\n6pXVc6vdXHFL1Mu3mrpeEnvdGD5XiRy95Et999yDDKD6jP3q59+DxyHjp+vR8sLIvCxqBq0Nqk2b\nrgi4fPF1Oi0PanBsG6UMsrTxO87znKqqcBznWFLOsox2u0We5yRxSJHmWFJRFwVFlnFxe5s0Tdg6\nd469vT2+/MV3sGybazevc3PnDn4rIE0TxpO9Rh0jHfYO9xgOB5RlRVk2HjSu65KmCVobZFmGFII8\nSrADj0F/yOHBIb1eH9/3KcsKy7KoqqrxrOn3MaRk+9w2QjchA9IsJIwWCNFid3cX3/dJswRbmbQH\na/zovd/mlddeoT0coW1JmgQYqkVdFwgkQsD+/j6j4Rq+32Z78zxSKrK0QAO24yzjlbRRpg3KBGwQ\nFaAR2qBQDr/x7X/KLFzgWYr33v0ON29f5fz2RVqtHkkW83vf/T2+8ZWv4/tNaIL3vvcHaF1x6dJ5\nxuNDwvmctbU1zl+4xM3d2/zgB+/jujaO0wIdk2UZOzt3sB136bdfYJk+RZkxGo24fKFNdzgkK4tl\nRqkWnucSJ8nSzzzk5s3rmJbFm+vvoEyH69euMLNsBoMBlc4xlAJpNT7oZXnsEfOc8G/TGPvvxSOp\nWU4TqT5JUMfcqE+5BK4SedUcnyk6Lvf3JfEHlPVZTLZK0qcNt/pU/aru/yy9+Gli16fOPUQUPhG/\nRtyVyI8WCK3qyauVe8+SzB9G6Pfb7ov7SdyPQtjPT2f+VPHJ9U9o+T6e59P3+3R8n7IoQGvyNMG2\nbUpDUpYlZVEgaBIUKCmbSICBh640WZxQZgVpHEOt8VyX4bltqDXSLEnjhLW1NWzXYjqdUGQp22vr\nzCYz6qIgizNMw6KuK1zXYz4P0Vofe2OYlqQsCzwvaF7z4whDKvKsoMhLer0udV0QJyl5VaOEgec7\nREnaREC0bUaDPt12i7Js0pplSYFr10hRo9FYsgmaZXs9zl36El5/C7vVxrZd4nQPw7bIsoowCpGp\nQkmDOF5gWzZCSLTWhFFIr9dBaE0cRwx6feKDAzzLAdsGLZchQARe0OFP/9m/yPfe+11MU6CUYmN9\nm9FojTRNCVo+3/j6T6BMk6JM+ejj9/HcFr7vIaXBcDAEXbO2tsnHVz5qPFNaPvP5vGlrY30ZgqBg\nNFpjfX0dx3EZDke4rtuonwwDrTUqV7RaAaYyuXHzBmkaMxgMiBYhk/GUr37jxxAVWMrADwL2d/dI\ns4wg8DFNkzxvJnYhBLfu3CSKFs9j+Fo0MYp+8cyzs//ibtn5OXB/7t5rjuyfYlVXLk4SVc1dMj8i\n8aMEwcflkkcm6Ucq34fI7yuu3m9/WjI/InN5qnyazM+SyOW9XTyWyle2WjfPU4iTPvhi5eajbt3P\n8Ln6Nc4i9aeCz0LaV4Crj3TlCyPz69c+pi40a4MuG2+O2D/YwXN9BoMBQatJFiGEwFDq+GctiwLK\nnCovmU0nFHkGNY3u25BsbK4TBI3Xy+2dOwTdDq7lIASMDw+ZLxZ0uz2iKFr6lJfH+vCqqih1kw3H\n8zwMIYnikMlkgmlaTCYTyrJoVo1KRVkmlHVFVdcURcZ4MqXfHyDQYEDQ8rBsxWw6J45jDKMJTZtl\nGWEY4vs+htn8gQ8OD9ncXEcIi/VX3kQbFloqsqWEnxU1u/tzwjCirits22F39w6O6yz93yWLcEG1\njMq4tbXF9HDM1sULaMtFaE2dz8EOltK9QVWXbKytk5cFF86fJwhaS/tETTSfY0lFnqYcTA4QwsBU\nNvv7+wz6feqiwFAWH1/7CM+xSbMItKTT6dBkExIMBmso06TX7QAgpcK2beI4OlaXeZ6HMhubQlEU\nSCnJ8oz3P/g+ptGsNZhPp4TTCXWh8QIfqoqrVz/h+vVrrK2tMRqN8P0Wtm3jum1Gw/MPGnZPCz9P\nk8x5/8yzvb/xPPrwEv+/wOXldoTfuO+VL05nLg2SNMeoTeJ8wY2dW7z1+psoy0BoTbZc/m4azSt6\nmqYkSUxVNZJImmb4vo/X9pmMp03YBUNQlgV37tyhrirqoqQQjX95lqbkecZsPqEsGv17lIQopRrp\n2HWQJcxmc3zPxHEs0iyj3Q5QymJn5zb9/pDbd26TpCllXmDbNlVRUtU1gR9wuL9PVlaYpkm73SbP\n86X0mB+rilzXJc2bmOdxlBIu5rTbHeIoxnEshDKxbZubN2/T6/Uo8pzxbEZeZBiGwDBUs6jGMTFN\nk8lsvDQa+wghuXr1Kptb55iECzapGsm9KhCmQCwlgjhekOULTKXwA58kSdjePk+eZyRJhmmaHEwO\n8b2ATqdLlmaUZUWn06EoS7SGr3/16/z27/8OulYcjKe0Wi22traZTCbEcURZ6sZYu1RVlmWJEBz7\nlsdxzGw2I45jpBTYjkO4CFGGRScYkJUlo/URiyhlMTvAd7tUBjiuQ6/XY/vcBaSSaF03WYqkRJk2\n0nwuWvN/hyYG0Uu8xOcGL4zMt1/bII1rhHZIkoQ8ralerYnjmCxNME2bsiw5OIio6hqBoK4rDKNx\nPex0eiipcDwXaUiSJCbLUrIsZ2trizzPKLKcGzdvYttm82KnFIbR2KeUVli2PNaP7+/vIaXF1uYm\nWZ6TJClVWXHn5l5D9MJujGytFt1en/l0QlFWbKyvYygDgSCMFtzZ3W+W9js2QcsnihqV0WK+wLZt\niqJg984Ovu8hiow4ThgMhhzu7GLZClOZhMtl85PxhIPDw0b9kKS4rodpqsYovFzpWlQ57XYXx7Ea\ntQtgSoXvOgjTB1JAoKvmjcEQjU3iygcfcO78BfqDAUpKZsvMPlIauI7L4LW3ODzYI0lqEIqyKsgW\nIVVVc/HSK7z33Xc5d+4cnuXzxhtvUlca07QQKNKkxPNdHN9bkrhYEnqx9EYqKcsmhMBsNieKIoqy\npNftIqXCsh2KZMHa1ggpJDXQCUZsrg2J0wlRGJGlJYtoiu8H+L7XGE4dnzRNn/XQ9WmMn//Bs/6g\nl/gseGo6kOeAZ9PXF5hpaIAwSrzCbIyejiIOQ2zTxLIU4/EhUZRiWk0uSgGYpkLKpe5cGRha8Mn1\nKwSWR6vlYxhNHJEP3v+AIo+xfQdhaPK8QNlWoxbJ8kbF4dho3azkbLfbBEFAlhTs7x8wmc8Y9fso\nU+K2fCgqdqZjJJqLFy9QlgVBO8C2mljbQeChpMI2Faa0ODw8pNtqoWuNrZrQA912izAKAUFR1/zo\nR9+nriqCbpciT9nb30erRqpMkozaEHzw/Q94/+P3+f733qOoSmzXwpEmRVVRa0mWp3Q6HSazCW0d\n4Dg2tdYkaUR3MKCqElSSo5WFjnNE26NGossFg+EIdM34YBfPayPQKClptQIODvcoq5Jut7fM5SlY\nG40wLZMojJgcThmuryEQFHnKzZtjbMc9tmeM1vpIaSBlk9MzjmO0bsIllGUTrbIsSzzPY21tDc/z\nsKxmIVKapvR6PabTOY5ykKbBzRsLbu/c5uOPBePxmFdfe41oNmc42uDwYMLhYaOm2T/c48KFi896\n6EbA8Fl/yEt8Vjw7/+2nj2fT1xdG5sXhAlFIihKMbqOKiJeZYwzRuBQ6tofnO9iWTbnMpjMY9Miy\njIODMZsba+Rpid/1cRwH23a4evUqpmOTVinhfEGr3SWOEhQCQ5ksspQ0TYmimKrUtFoeSZw2+tko\nYTgcceHSgN2dHS5dusRsOqXOEl7Z3iZOE2azGZZtYhgGwTKcbhhGBEGLoNXGcX183+f6zRu0Wj62\nbdLtDppwsTMbx7LJ64JB1+eDjz7hn//Ot9la2yZeRBwczBoVEDlKWXz68Yfc3rlDu9vCMU3sdoCh\nNZbtYFBx/cY1WpMOrW6HMJxjK5PD8SE7O7sM+j1uXm3CCJiWxcdXPqXf63LpSz9JHh7y41//MZI8\np6obVcxwsE5Z5ORlxqULr2KYZuOxMlxHCEEUR5TzBdPFnLWNDfI4xrIsut0RUknm8zHpkqSLsqTf\n71NrmE2n2LbNeHxIkiRIKWm1WriuRxD4FEVJXddMp1M8z1+mvCtoBT4HB2PQFa4lGa1dwnNsirJk\nMQ8xTZOiaFRXs/mEt956i/5gxHh8+KKG9Eu8xAvFCyPzax/dwfAVnvLoul0KQ1AvrdKub2O7JnVV\n0263qcoaXdcIYRBFEdPpnKqqGE/mnFtfb/TKadIkS6gr6hqi6QJt1BTlAUIIWu0hs9kMpRRJEqNr\nuL23z5f6b6BUY+KO04x+r8t4NqPd7RBGEe1eCyW7eI7L3sGERThFKYFnOXie2/i9WybXbt5gMYt4\n443LnL+4heUobty4jmlKtIZW4LN7e4+tc+vUVYE2+rz9xhuMp1N+63e+w4WLFxiPD7CUotAVs9kO\ng/U1Wv0uZVUgDYP14YgwjAiTkDzTjKcH3Lx+q4lhY0g+eP8j1ocb/MD5Eb1Oh/39XVzPRxoKjeDq\nlQ8ZDHtEsxAEtFwLLQyyoqIoE77z7h/y0z/5Y8TxgnQxJ8tyFrMZRqUpdMVobQPf9Ti4s8Pm1ibT\n6RTHdZGFxrFtrn36KeXSPvC9996l3+8RtNqURcHu7i6vvvoqnueilMWtW7e4evVT3n77LaIoalbt\nZgXf+e4f8ebrl1DKwvYcwmhBELQQVc1k74C9g12iKOFgf8rmuQ3efvtttrfPc+P6dYSCbudzsJz/\nYUi+BfafeEqNvQt89Sm19YfAl59SWx8AX3hKbX2Xh+XQfmTVxfxb0PmTT9ifJYpvgfyZx7jxjL7q\nT4ALT9SdF0bme2FGq85RgYVtWWxdOE+306Hb6aFFRZ436pCiKKiqEqkkVFBVNRsba9y5s0uaJo0O\n2XVwpcP4YJ+6biIctjoetu1S182qzN3dXVqtFr7vU1Ulnuvw/d/4Hd55+y3qumIwGJAVJVmW4nse\n3W6Puq6pdI6tmtRpna7P+kafnZ0dwkWI47o4jsssXLC2NsRUkuFwxHQ6pdNt0e2+w+3btyjLkvls\nTp5lOI6NlB5ZlnF75xaDwYCd/Sn/+r/2M+R5ibJM0rRkMY+I45hWt818Pmc+mXHp0nmiKKEWNMZj\n8QazeUgUxbRaAZPJnLLKORgfMJvP0AKCTpet9Q02traxPUWZZLi+2wTxSmKEIRn1+lDX/LNv/xbf\n+OpbZHlOXWu01tS1pqhKzl+8wCdXPmkSXZgm80VIGIfc/sNb5HnJK5dfAaDQgqDTw3Jctra2KIqC\nyeSQjY0NJpMJURSxWCy4cP4yVWlw5/Yug36H3d0dsjzjn/7mb/Haq+fJo5jZdMz3v/8D1je3cDwb\n07SwAh+v3WXr/CVGgz7j8ZgoilhbW8dQjaH1c4/0Wy/J/DPhUcj8YVi6fs6//Tkg87NwhX9hyVwa\nOXEm6Lo169tbbI22cD0LqSDPK1zXaaLrtduEYUEcxYwGQ8IkQmvBaDQiTVOqqqSKIsZRiG3b6Dqn\nKkss26UoUqbTkG6viyFNlFRUeYZtO8RxRp5lFFnEPE6PM+R4noeQCk2BpsTzfYosxVQmnvKZz6eN\nS51SLMKIPCtR2sA0FIEfcHCwz2hthBA1VXn0XVzm85BWt0NZ1ziui2EYvPH660RJE++lLBv1SV4k\nFHmBpG5UTGbTZ8uSjRtlmSOVwjYFdV3zxisXcb0AqSRxOMEwbOIkwXd9zp3bptvtcnB4SJUXvPf9\n93jr7S+xdeFVqvkYJRXG0leb5dL4Mi+Pjatog8V8gu35fPTxRxjCZbh2jsVsTJyWmLaDHQjWu12q\nqmT7/DZ39nY5f+EiOzdvs7O3gzIEo9GQJEn5zne+w/b2NufPX2C+OGRjc8B4POHdd99jMOwRRiF1\nVXP10xt87atfQ9cVphtg2x6b6xsUecqNG1cp0hQv8MmKFNsx0cLFdBTT+Zyqqh8++F7ijyEepod+\nqo7iT4g/Zjpzv2uRxhVffO1LXD53CW1UTKdNTmglJcRNMocwDFHKJMmmXLl+DddpvFyUqRFGY3Cb\nz+cMBgPCMKQ2JIIm1kVRlIxGG2R5htZQ1JpSSKQwCHwbkGyfv8AsaZJPBGbzedK0msQIUmKaJUo1\n4VbrumY0WmM6a1zxqCFLM6qyJM8Tzm9vEycJlmlS1SWGVeG6NoaUVHXOG2++jlKSoi7RQpOk6TK+\nuslwtEaRl8hM4jpw/dPrDDc3uHPzFmmacvny5cZdU0oMw2Ct32djY53dvTvE8ZyyqvFsn0uvXMZ1\nXWzLwrIsoihiNBiyu7/Dm1/4IoP1LeLxAabtUBQpRqGxLauJKCkNOsMei+mCMFwwn8/QZY4MPTY3\nt7hx4waGWZJUOf1WAFoShjGG0ORa8Mn1m2yMNpHS5cLFixwe7tMfrHHr2nUGwzV+7mf/FFmWYkiB\n4zi8//4HfPmdd/gH/88/5Ke6P81orZmYOq0unfaA+WLO1nqbWTjD8x32oymm6TOdx0S7h5y/cIFO\nt8PIa/TvprOPIf5FMoS9xNPD54WoHwXPpq8vauS/B3zlBX32S/zxx7e5f2asZ41vcTc/7ku8xNPG\nixzbL/ESL/ESL/ESL/ESL/ESL/ESL/ESL/ESL/Fs8GeB94GPuF/UuaeD/5km8fQfrdT1gX8CfAj8\nY2DVKfmvL/v0PvBnnmI/ztNEx/kB8H3gr7ygvjjA79HYK34I/NcvqB/QhMF7F/gHL7APLwpPa/yf\nNb4fF/cbo4+D+42zJ8Hp8fK4+JTG//Jd7pOb+DOgC/w94Ec03/Nfesx23lz252ib8WTP/7lBAh8D\nlwCT5gd/6xl91p+gyQqzOth/GfhPl+VfpMllCvD2si/msm8fc0/iwcfGBncdgQMaB9y3XlBfvOVe\nAb8L/CsvqB9/FfjfaRKA84L68CLwNMf/WeP7cXG/Mfq4OGucPQlOj5fHxVUaweFp4FeBf29ZVkDn\nKbRpAHdoJtfPPX4K+Ecrx7+03J4VLnFysL8PrC/LG8tjaKS/VSnpH/H4M+3D8E2aQE0vsi8e8PvA\nF19AP7aBXwf+Ve5KWp+H3+V54GmP/0s8HTI/jW8Cf+optHM0zt5+gjbOGi+Pi6vA4AnbgIa4rzyF\ndk7jzwC/9bg3P28p5xwn02zdXNY9L6zTvJqy3B8RyNayL8+6X5dopKnfe0F9MWikwV3uvlY/7378\nCvDXuJuegRfQhxeFFz3+HwWXuDtGHxenx9kPn6Cts8bL40LTTAzf4cmiXr5CE8v+f6FZnvo/cfdt\n5Elw/+xVj4DnTeafJ8/+hy0Je9p9DYD/C/gF4HQ6nOfVl5rmdXob+Bkaaed59uPPA3s0usH7rXF4\n3r/L88Tnve8BjR74F4DwCdo5Pc5+7jHbeZTx8lnwL9NMVD8P/Ec0qqrHgQK+DvwPy33Ek2sYjrJX\n/Z+P28DzJvNbnNQHneek5PWssUvzGg+wSTNQzurX9rLuacGkIfK/RfMK+yL7Ao2R5R8C33jO/fhp\n4C/QvO7+GvAnaZ7Ji3wWzxMvevw/CEdj9H/j7hh9UhyNsx97zPvPGi//6xP0585yvw/8feAnHrOd\nm8vt95fHf4+G1J8ED85e9TmEoslofolmJnqWBlC4V6f4y9zVwf4S9xraLJpXqE94eqtjBc0A/JVT\n9c+7L0Pueom4wG/S6EVfxDOBZpXkkQ70RfXheeNpj/9LPB2d+f3G6OPgfuPsSbE6Xh4HHtBaln3g\nt3ky76jfBN5Ylv8G8DefoC2Avw38pSds47nj52ms5R/TGLieFX4NuA3kNHrKv0xjyf51/r927d4G\nQSiMAujtncIBjAvY60I2DuUs2DmEC9BZfBAspOGRvOackuqGXH5z/0/gHlOmd5Lrjjkuqc/OIcv8\n6NYhyyn1f29IzbPu0/Ee5ySpi3NeJ/TK0MNe/Z/7PWbp91ZrHd1irWetfvuyxTGVaUjNL1vvPefU\nm/kryTNta5ZDkk+Whw0AAAAAAAAAAAAAAABA8gUCSawI/1yNXAAAAABJRU5ErkJggg==\n", "text": [ - "" + "" ] } ], - "prompt_number": 11 + "prompt_number": 7 }, { "cell_type": "markdown", @@ -445,7 +341,7 @@ "source": [ "The classifications include various cats -- 282 = tiger cat, 281 = tabby, 283 = persian -- and foxes and other mammals.\n", "\n", - "In this way the fully connected layers can be extracted as dense features across an image (see `net_full_conv.blobs['fc6'].data` for instance), which is perhaps more useful than the classification map itself.\n", + "In this way the fully-connected layers can be extracted as dense features across an image (see `net_full_conv.blobs['fc6'].data` for instance), which is perhaps more useful than the classification map itself.\n", "\n", "Note that this model isn't totally appropriate for sliding-window detection since it was trained for whole-image classification. Nevertheless it can work just fine. Sliding-window training and finetuning can be done by defining a sliding-window ground truth and loss such that a loss map is made for every location and solving as usual. (This is an exercise for the reader.)" ] diff --git a/examples/siamese/mnist_siamese.ipynb b/examples/siamese/mnist_siamese.ipynb index 8e076663ca6..5abd0469ba6 100644 --- a/examples/siamese/mnist_siamese.ipynb +++ b/examples/siamese/mnist_siamese.ipynb @@ -3,8 +3,7 @@ "description": "Extracting features and plotting the Siamese network embedding.", "example_name": "Siamese network embedding", "include_in_docs": true, - "priority": 6, - "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" + "priority": 6 }, "nbformat": 3, "nbformat_minor": 0, @@ -56,8 +55,10 @@ "MODEL_FILE = 'mnist_siamese.prototxt'\n", "# decrease if you want to preview during training\n", "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", - "caffe.set_mode_cpu()\n", - "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" + "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE)\n", + "net.set_phase_test()\n", + "net.set_mode_cpu()\n", + "net.set_input_scale('data', 0.00390625)" ], "language": "python", "metadata": {}, @@ -104,7 +105,10 @@ "collapsed": false, "input": [ "# reshape and preprocess\n", - "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", + "caffe_in = raw_data.reshape(n, 28, 28).transpose((1,2,0))\n", + "caffe_in = net.preprocess('data', caffe_in) \n", + "caffe_in = caffe_in.reshape((n,1,28,28))\n", + "# pass data through network\n", "out = net.forward_all(data=caffe_in)" ], "language": "python", @@ -139,9 +143,9 @@ { "metadata": {}, "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", + "png": "iVBORw0KGgoAAAANSUhEUgAAA6MAAAIXCAYAAABpSojLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXt0W9Wd9/3V3RdZlm05OI5jxSkkGAjYwQlpkxTTkNKa\ngEWLO4PplDClelbpdOiaNcmzuqZM553CmllP2unM2y7omzKTUAYBThhCQhNCnMRO4oDzALmVpJgm\nxMU4iuO7ndiybOv9Y2ufi3R0l+Uj+fdZy8uSztn77HN+un31uwEEQRAEQRAEQRAEQRAEQRAEQRAE\nQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRBE\nkskC0A7gFIBzAP5ldpdDEARBEARBEARBzBVy/P/1AN4DsGYW10IQBEEQBEEQBEGkAdokzHHd/98I\nQAegPwlzEgRBEARBEARBEBlMMsSoFixM9wqAw2DhugRBEARBEARBEASREvLBwnRrZ3kdBEEQBEEQ\nBEEQhMrRJ3GuIQC/B1ADoIU/WFpa6uvu7k7iYQiCIAiCIAiCIAgVcQHAjbEO0iR4UBuASQCDALIB\n7Afw/wA4KNnH5/P5EjwMkQw2btyI7du3z/YyCJAt1AbZQz2QLdQD2UI9kC3UA9lCXZA91INGowHi\n0JaJekbnA3gRLG9UC+AlyIUoQRAEQRAEQRAEQQSRqBg9C2B5MhZCzDyLFi2a7SUQfsgW6oLsoR7I\nFuqBbKEeyBbqgWyhLsge6U8yqukSaUJtbe1sL4HwQ7ZQF2QP9UC2UA9kC/VAtlAPZAt1QfZIf0iM\nEgRBEARBEARBECknmdV0CYIgCIIgCIIgiAAKCwsxMDAw28tImIKCAvT39ydtvkSr6UYDVdMlCIIg\nCIIgCGLOotFokAmaKNR5xFtNl8J0CYIgCIIgCIIgiJRDYnQO0dLSMttLIPyQLdQF2UM9kC3UA9lC\nPZAt1APZQl2QPdIfEqMEQRAEQRAEQRBEyqGcUYIgCIIgCIIgiBmEckaVIc8oQRAEQRAEQRDEHKa/\nvx8PPfQQzGYzFi1ahFdeeSUlxyUxOoeguHr1QLZQF2QP9UC2UA9kC/VAtlAPZAt1QfZIHj/4wQ+Q\nlZWFnp4evPzyy/j+97+Pc+fOzfhxSYwSBEEQBEEQBEHMUa5du4b/+Z//wc9+9jPk5ORg9erVqK+v\nx0svvTTjx6acUYIgCIIgCIIgiBkkYs6o0wl0dAA5OYDLBVitsR0ggfEnT57EmjVrcO3aNeGxf/u3\nf0NLSwt2794d1XlQzihBEARBEARBEEQ60tEBtLYC+/YxYZnC8aOjo7BYLLLH8vLyMDIyEvs6YoTE\n6ByC4urVA9lCXZA91APZQj2QLdQD2UI9kC3URUbZIyeH/a+pAbZuTel4s9mM4eFh2WNDQ0PIy8uL\nfR0xQmKUIAiCIAiCIAhiNnG5gIYG4MCB2EN0Exy/ZMkSTE5O4k9/+pPw2OnTp3HbbbfFvo4YoZxR\ngiAIgiAIgiCIGUTtfUYfeeQRaDQavPDCC/jwww+xYcMGvPvuu6isrJTtRzmjBEEQBEEQBEEQRNJ4\n7rnnMDY2hnnz5uHb3/42fvOb3wQJ0ZmAxOgcIqPi6tMcsoW6IHuoB7KFeiBbqAeyhXogW6gLskfy\nKCgowBtvvIHR0VFcunQJf/mXf5mS45IYJQiCIAiCIAiCIFIO5YwSBEEQBEEQBEHMIGrPGY0Wyhkl\nCIIgCIIgCIIg0h4So3MIiqtXD2QLdUH2UA9kC/VAtlAPZAv1QLZQF2SP9IfEKEEQBEEQBEEQBJFy\nKGeUIAiCIAiCIAhiBqGcUWXIM0oQBEEQBEEQBEGkHBKjcwiKq1cPZAt1QfZQD2QL9UC2UA9kC/VA\ntlAXZI/0h8QoQRAEQRAEQRAEkXIoZ5QgCIIgCIIgCGIGUXPO6K9//Wts374df/jDH/DII49g27Zt\nIfdNds6oPtYBBEEQBEEQBEEQRGawYMECPP3009i/fz/GxsZSemwK051DUFy9eiBbqAuyh3ogW6gH\nsoV6IFuoB7KFuiB7JIeHHnoI9fX1KCoqSvmxSYwSBEEQBEEQBEHMKk4AtQDqAAzOwnjMShgx5YwS\nBEEQBEEQBEHMIJFzRmsBtPpvNwBoivEIiY4Hnn76aXR1daU0Z5Q8owRBEARBEARBELNKjv9/DYCt\nszB+djyjJEbnEBRXrx7IFuqC7KEeyBbqgWyhHsgW6oFsoS4yyx4uMI/mAQDWWRgveDdTClXTJQiC\nIAiCIAiCmFWsiCe0Nhnjp6am4PV6MTk5iampKXg8Huj1euh0ugTWEx2UM0oQBEEQBEEQBDGDqLnP\n6D/90z/hn//5n4Me+8d//MegfZOdM0pilCAIgiAIgiAIYgZRsxiNBSpgRMRNZsXVpzdkC3VB9lAP\nZAv1QLZQD2QL9UC2UBdkj/SHxChBEARBEARBEASRcihMlyAIgiAIgiAIYgahMF1lyDNKEARBEARB\nEARBpBwSo3MIiqtXD2QLdUH2UA9kC/VAtlAPaWMLpxOorQXq6oDBwdlezYyQNraYI5A90h8SowRB\nEARBEETidHQAra3Avn1MmBIEQUSAckYJgiAIgiCIxKmrY0K0pgY4cACwWmd7RQShGihnNMR8SVhT\nJEiMEgRBEARBZDqDg8wjunUrCVGCCIDEqDIUpjuHoLh69UC2UBdkD/VAtlAPZAv1kDa2sFqBpqaM\nFqJpY4s5Atkj/SExShAEQRAEMZuksvDPHCgyRBBEbExMTOC73/0uFi1aBIvFgurqarz99tspOTaF\n6RIEQRAEQcwmtbWs8A8AFBcDJhNgtwMWC+ByJdfTKD1WRQVQXg7k5CR+HKeTFTBKxlwEkYGoOUz3\n+vXr2LJlCx5//HGUl5fj97//PR555BGcPXsWdrtdtm+yw3T18S6aIAiCIAiCSAI5Oey/2Qxcvcpu\nd3Wx/5WVwPnzyRN3/Fg1NUz0cmHqdLIQ23jhlXSTMRcJW4JIKTk5OfjpT38q3L///vtRUVGBDz/8\nMEiMJhsK051DUFy9eiBbqAuyh3ogW6gHsgVSF9LqcgENDcCqVey+xSJuc7vR4nAk/1gHDojHqalh\nRYcSQSpyE51LxS1i6HWhLjLJHk44UYta1KEOg4j9/SbR8VKuXLmCjo4O3HrrrQnNEw0kRgmCIAiC\nIJRIpihyOoH584HCQmD9erm45YV/duxgQvHMGaCkhG2rqQH+/u/l84QSyNGIZ2mRIakwTdT7mMy5\nkils44Vya4kU04EOtKIV+7APTsT+fpPoeI7X68Wjjz6KjRs3YsmSJXHPEy2UM0oQBEEQBKEE75tp\nswFLlyaWwynN1QSYcGtqCh2SGqpNinQePkc021Id+prI8dTQIibctSSIOIiUM1qHOuzDPtSgBgdw\nAFbE9txPdDwATE9Po7GxEaOjo3jzzTeh0+miPg9q7UIQBEEQBJFMuLdv6VKgrS0xDyn39gFAdbXo\n8QvlfZV6MKVeOoOBbVfyGobzKKY69DWR46mhRYwavLPEnMIFFxrQELeQTHS8z+fDd7/7XVy9ehWv\nv/66ohCdCUiMziEyKa4+3SFbqAuyh3ogW6gHsgVEUZSM3EqXC6ivBxwO4NAhUWhFIXpaTpwQhZ3Z\nHDoctriYeXGVRNyFC+x/fj6wZUt85xAL6S7mQoQd0+tCXWSSPaywoglNcQnJZIz//ve/jz/+8Y/Y\nvXs3TCZTXHPEA4lRgiAIgiCIcCQjH9JqBXbtAt54Qz5HNHPzL4Y1NcC2baG9hp2dQG8v0Nwc7I3k\nFTGHhoAVK+S5kDORH5nMHNLZQA3eWYJIEZ2dndi6dStOnz6NkpIS5OXlIS8vD6+88sqMH5tyRgmC\nIAiCIJLBTOVlRptDyXNca2qCRSDfZjYDo6PssYYGUXQNDbHHiovZ+FDrp7YrBBEXau4zGguUM0oQ\nBEEQBKFGpHmSlZWxeRnDeSetVvbncISvouv1slBgLkSlcz7/vLx9DA+f7egQhahez/qchsvzVHHb\nFYIg0g8So3OITIqrT3fIFuqC7KEeyBbqgWwRI04na8nCcbtDizUl4blnjyjyNm6U7d7S0iIXgcuX\ny8fzbc3NgNEoeiulY+65B+jpYY9LBSvP7SwoAFavZrfD5Xmmey5ogtDrQl2QPdIfEqMEQRAEQRCJ\n5k12dAADA+L9cGJNybvo8YjbNQqRblIRWFoqHx8oEPm5fPRR8JhAwcpzOy9eZDmtkfI8lXJBQ/VQ\npV6dBEFEgHJGCYIgCIKYG4TLd0y0ryTPyayuBsrLge3b5WJNetzGxuDczvXrmVCsrpZX2+VI80YD\nxwPMW1payir/Dg+zVjQAUFYGnD2rfMxkEaqHKvXqJAgByhlVRp+ENREEQRAEka7MpYI03CMJsPOW\niqNEw09drtBFhgKPq7RvaSlry1JUpDw/LzTkdDKxWVIC3HgjyyPNyWHjuQDV+gPfdDrg979nY8Ot\nLxrCPU+kPVSrqsTrN8dDegmCiAyF6c4hKK5ePZAt1AXZQz2QLWaBEAVpZtIWR5xO7K6txd66OnhS\nGb4ZThzF24qEh6IuWwZ0dzMPZGCYqjRclovBwIJEYdqyyGzR0cFEp9sNHDwo2o73ETWbgelpdntq\nCvj619ntwFYlfG0LFwJr1kQOpQ2Xs+pysXOprwcOHw4OAU7X9i4K0HuUuiB7pD/kGSUIgiCIucws\neK8GOzrg9nsKjzqduDdV4ZvhvINcrMWK1OvZ1cX+c6+rdFtZGRNlmzezx8+cEXNMQ+V9ck/kk0+K\nx5Pu193NbufnA/v3A/fey6rhSqmqin7dlZXA+fPKwlF6XJMp2MP8xhvBY+K9pgRBzBkoZ5QgCIKY\nVY44nRjs6IA+JwfrXC6YMsSDkjZE28Myieytq0PXvn2w1dTg/gMH0tvmPFc0P5+1SKmpAW65hXk6\nP/qIeTuleZqB+ZVVVcybCITO+5TmWw4OivudOMHauQDMM+nxsLVw8vOBS5eU7crXrdMxDyonVG5n\nuJzVdLYfQaQIyhkNMV8S1hQJEqMEQRBESHbX1gpessUNDanzkhGzhmdwEEedTqzdujW9hSggirQt\nW4BNm5hYczjkHtGzZ0WPKBeonKws4M47gwVodjYwNgYYDEx8FhaKuZqBghZguaYmE2vfMjnJ9jt1\nCrDblfM9BwdZ3qm0iq/NBixdytYSLn94Fn7AIIh0h8SoMpQzOoeguHr1QLZQF2SP2UXvD/+z1dRg\n+jvfmeXVRM+s5T2miJl8XZisVtzb1JT+QhQQQ1HtdjEPVJojevYse5yHxfb2MtHIGR9nAnTfPuD4\ncfZYVRXL/QQArxct7e3ynF4eMpuXx/7n5gJ9fSxsd3KSPabTAd//vrwP6b59wE03Ma8oAEi/UJaU\nMCHK1xKqR6r0nDPBfjFCnxfqguyRPL797W9j/vz5sFgsWLx4MZ599tmUHJfEKEEQBDGrrHO5sLih\nAfcfOAAj/wKeBvC8x659+3A03Bd3IjOIpmemVHDyHNFVq5ho40Kzpgb4+GMm/gDm+eRwcbhokSgq\nOdKcXpcLqKhg4cAlJWwbwMJyAVZNt6+PicrKSvEYZjNbGxeb2dni/KtWMY9o4LHiuQ4EQaQdP/7x\nj/Hpp59ieHgY+/btw69+9Su8/fbbM35cCtMlCIIgVEM65Y9mVN7jXCOedjbz57MKtgCrGrtrV/A+\nPA9TmktptbJcUgDQaIBPPwWefRY4dw744ANArwdGR8U5eA5pQwOrrFtQwMYtXw7s2CGuVbqeujrm\nHd2yhR1bGgbM12s0soJJzc3i+vgxeG9Tfm2Uwm/5NZMWXqLeoQQRNekUpvvxxx9j3bp12L17N5Yv\nXy7bRn1GCYIgiIxl1qqsxsE6lytz8h7nGtJKsjfdBKxYgSPFxRjs7Az9Q4g0t1Kj8H1L2v9z505R\nzAV6Pu+5B+jvFwWqdE6rleVtAkx4Op1snT09TDQWFjLvZ00NyyflGI1s7GOPiY/p9cy7ajYD//f/\nMk9qdjYLI962je1fWgoUF4u9TaXVbwMFu/SaAWwdW7aEv84EQUSNE0AHgBwALgCxfqokOh4Annzy\nSbz44ovweDz49a9/HSREZwIK051DUFy9eiBbqAuyR/wkO2/ynP8Ltq2mBmtT1GYkXjIq71GBjH5d\n8JxLSdjq4N694cOu77yT/a+uZmIuEGn/z02bxHDWW28Vc0RrapgADBSiABOt3HP5+OOiMPR60cL3\n8flYaGxzM3DtGnvs9ttZeG1TkxgirNUCd9zB5hwdZbmkbW1snM8nCuXOTtYKRqG3aVD/WX7NcnPZ\n/6Ehdp5zjIx+XaQhmWSPDgCtAPaBCctUjweA5557DqOjo2hubsZPfvITnDhxIs6ZoofEKEEQBBE3\nyc6bXP7000L+aKaKPEIFuFwsxHTVKna/pgZ6fz/OkD+E7NjBxhw6FLkP59atophrbQW+9jXmmfzz\nn4H29uCxer28vcrx42LF2unp4P3NZjGntKODhQxLBe70NAsB5m1fpHCvrtPJQm4BFhoceM6B58Pz\nVLmnV2lMJJxOFl5cWAisX085pwQhwf+KQw2AeH6KTXQ8R6PRoLa2Fg0NDXjllVcSmCnK4834EShn\nlCAIImOZC3mT6ZTHSsSIpEWJBwgdds1DVi9cYFVzeesT3q7lwgVgwQLmaXzvPbZPYP6otN0LwMRY\nayswMaG8toYGFp4b2MKltJQVLmpuDn1ePES3qop5RXt62OO3387m27wZ2L5dFKvFxew8pOet1L5F\n2lLG4QDeeCPMxVUgsCUN5ZwSc4hIOaODYB7NrYgvxDbR8YE88cQTKCkpwTPPPCN7nPqMEgRBEFET\nj5CKZUxG9YsMQTr0QT3idGJwzx7oPR6su/NOmKSFboj4USraw2loAN56S567CYgiLVDMcXEKsDDa\ntWuZl7G5md2XekAtFnbMNWuAri5RXHJhCwDz5il7PgHghhvYfHfcAbzwAvDUU6zn6NgYG8PDfaVU\nVADl5cFFnaS5o16vvABSrM8x6TXgRZPoeUrMEdRcwOjq1as4ePAgHnjgAWRlZaG5uRnf+ta30Nzc\njBUrVsj2JTFKxE1LSwtqa2tnexkEyBZqg9tjpoXbbCAVUnkVFcgtL4+41tkUX2p8baSD91dmMwD3\nJsHjpEZbJJWbb2b5nQYD8P77zJsZSKAnLz+fhcOazSzE9/BheXgtwATXggWigCsuZh5Tg4GFzkpF\nrdnM+oxOTTGBKBWlxcVM/A0OogVAbVkZ61nKvbHvvsu8qmYzq7j72WfK52mzAVlZrLjS8LDyPtXV\nbK1tbeKYFSvYGt58UwwBdjjYeShV242GwUFg40YWKsyLKKUZGf+6SDPSyR5qFqO9vb14+OGHcfr0\nafh8PixZsgQ/+clP8OCDDwbtS9V0CYIgZoh4Krmqvfqr3p/3Zaupgc5kimqt0jFqLyKUCtKhaq5g\nMwBr48nlm4u43aLIWrqU3Zfa1+lkgg9ggu/LXwaee05sndLczIRZoBhtb2eCkotOqcDUS752VVUB\nFy/K+4lKvaNXr4q3s7LYtptvZseWHnN0VN4vVIpWG9zmBWBFiHgBpNJS5qFsbBTPlfciNRhE72tB\nQeIC0mpVbolDEMSsYrPZZq0YVKKe0YUAfgdgHgAfWJjy/xuwD3lGCYJIC+LxgKXSaxaPF1YaRnuw\nsTGqtc6F0NtMwzM4iKOPP461Ph9M27enpccp5RQXy4VaYA5kqPzGhQtZ6KxOx4TpiRPMq6nEsmXA\nH/4QvL20FPjoI9ZWRroGqfiLFpuNiWo+TqMRj1dcLBe1AGs9c9ttTEzbbEyIWyzM68ur7fb1ycfk\n5wOnTyt7j4H4+rYSxBxDzZ7RWFBbmG6J/+8UADOADwA4AJyX7ENilCCItCAeERavcItHWCYaPksi\nk5jzSEXTv/4r807y7yj19XKvnTS/MS+Phcja7SyPk4ezAnLxp9PJvZalpax4kNT7yUN8L10CrlwB\nRkbY41p/g4PA6rkWCwuv5f+jRacDWlqAb3xDFKRWK/Dgg6zgEi/GxKv72myiMDaZ5H1VIxUrkgp3\nKkpEEIqQGFUm0dYubjAhCgCjYCK0NME5iRkik3oxpTtkC3XB7RFP38hYxkh7cvafOxdzS5REw2ej\nXWuye4fGAr021ENG2kLaO/O++1joLcBE6fbt8n1dLqCoiN0eGRF7alos8v34l7KqKiZUOVotE528\nL+fNN7OWJqOjzAN56ZIoRAEmQqVCdO9eJuzOnEHL3Xezgkbl5dGf69QUsG6duL6CAlbEqLNT7Ifa\n2cm21dSw9fPbH3/MPKj8/rZtYt/Uurrg4keBbWAymIx8XaQxZI/0J5l9RhcBqAag0ECLIAhC/Rxx\nOvHS/PnYXliI369fH5cQCyfkpD05hy9cABCbsFzncqWkB2eye4cShGrwv+4AMDH2hz+wfEuTiYXU\nrlkjii2rVczFzM8Htmxht10u5imcN4/dLyxkorWnhxUj4nmh09PA7t3yvNS77mK3a2pET2gofv1r\ntobHHmNVcPPzQ7eBCcXEBPN2lpWx/FS7XbwGFguwfz8TvAcOiH1UDxxg+50/L963WoE9e0Qh//jj\n8uPwvq3xVNglCGJOk6wCRmYAOwE8BeYhlbFx40YsWrQIAGC1WlFVVSVUvuK/aND9mb9fW1urqvXQ\nfbo/2/f/Y8MGjHZ1obq0FJ6qKhw9cQL9bjeWAvi8uRm/djhw5z/9U0zzt504geLTpwEgaPy5sTFc\nBbC6pgbrd+7E1scew6K//3tBWEaa/91Tp6B/8km0b96MwY4OnBsbw/Knn8ZXN2xI6vXhHtiLFgt6\nzp/HRF0d1rlcePfUqaTMH+k+Z7afH3P9Pn9MLeuJ+77LBXR0oGVsDMjLAz+7Fq0W6Otj99vb0QIA\nXV3svtOJliefBEZH2f2hIbQ8+ijwzDNs/uJitExMADodaqem2HYA6O6Wzz85ye5rNGhpbgZuvBG1\ndjtgMrHxgLi//79wf+9e4X4tgJZFi8T5lPYPdd9iAY4dQ8upU8DPf45af6hvy/Aw8Ld/i1r/dWpp\naQGefBK1/P3o1Cn5/dFRcX6fT369N29GS0cHcN99qN2/H7Ba1WN/uk/3VXQ/U2hpacGpU6cw6P/R\n/dKlS3HPlYzWLgYAbwHYB+DfFbZTzihBEKokMAdzYnQUXf4csaLqamw4dChmD2S4gkbhcjZjySGd\n6dYrfJ3XurtxxZ8bp9b+mgQRkdpaMZ+xpIR5RI1Gsd1JXh4Ll+U5mdIemoWFYlVcad6kdE4Ob/si\nrVQLBOeRJkJWFvO+xsr69cA77wSv++67WZ5sqPcbaY7t9etsbFUVcPvtLMSXFyxyOChnlCAiQDmj\nymgTXQ+A/wRwDspClFARmfarTDpDtlAH3APoXrIEa7duxTqXC/b6etgdjriEKCAPpW3fvFkWshsu\nZzOW0Nh4c0ejzQXl6zT4c+NS2eKFXhvqIR1tofgc5/mMNhsThQaDKEQBYO1aoKICqKxkYnXnTtbL\ns7ZWDKWtrmZ5kxw+J8Aq0zocrNpsQwPwxS+yx4uK2F8SwlZbADGcOBq0AV/vjh5l/6XrBpiADAy5\nBcT80J07xdDcggJ2focPMyHKH3c6KWeUmDXIHulPomG6qwF8G8AZACf9j/0YwNsJzksQBDHj8P6R\ni77zHUEg3pdADzypd/P4U0/h0ptvwuvPF4vUgzQWgRlv38tYe6KmQ3/NOclstNFIk9Ydis9xl4ut\nv7tbrILL26DU1AAvvyz37G3axPI/+f2yMpZP6nCI5+9yMRH34YfMI+rxsP9NTSzf1OlkOZbReDG5\nZzYSY2PsLxoKCpiXdnKSVfutq2Pi8pNPgvdV8tTs2cM8yBxexIjbXUl8Op3stkqfGwRBqJNkhOlG\ngsJ0CYJIOxJtvWIqLobH307BWFCARy5eDDtHKtquqL0nKhEl0lDLVIVEJuuYMyxqwz7Hpa1ali0D\nFi9mFXStVnEbD9FtbJTfl4rVkhJW3MdqDX1dnE7gt7+VL46HugYibQ0Ty7ZIFBWxcOH2duBv/zY4\nrJivyWplnmGLRbSJNDzZaGRVh3fsEO3FBTeJT4KIGgrTVSZZBYwIgiAyikAPi9FqjSiupN5No9WK\n7uZmGAsK8M2TJyOKMR4aGw1c6I1cuIBcux1GiyUqwZdKT2esXlgiBmYjJDJZx+StVQAmZqTPi0hC\nNQohK3uOb94s3597M30+UYRyuPeUi6viYhbWG+gJBJjHcPly5qXkfTm1WuDQIeCRR4DLl4F335Uv\nbNky1lNUSYyG+3IazRfXrCzgK19hwlMqOPv6mEC+/XZx/Tyv1WhkIcvXr7O/7m62feNGlkN6552s\n/QzPf21ultvLaqW8UILIUD755BMsW7YMDQ0NeOmll2b8eInmjBJpBMXVqweyxcyilDcWLl9SyR6B\nYbPR5HRK80XX79iBxQ0NeOTiReTZ7Uk9P76Wa11d6Glri7oFS7ic1WT2Fj3idKL/zBkArBBUtPmm\nR5xOPFtVNSv9TdOKFLXRkL0uknXMcKJW2gNU6fkcaruk/6UJEJ/jgftbrawAkVLBHi6u+OOdnUxo\nNjcDS5awsN6sLHH/Tz9lonRykt2fnmbir6mJHTOwBUt3d3yFh/y0hNs4OclE64svspYsvLVMbi4L\nS16zhq3fbGbrNBqZOFYqqnTqFLuWAFBfL+a/1tSwnFX/dQ7qMzqHoM9vdUH2SD4/+MEPsHLlSu7p\nnHFIjBIEkXEoCcdYe2cGFiLi4qqwqiqkuJKKvXDC77Wbb8Y2qxUvFhdjhDedR3R9TqVCz5CXByD6\nAkPR9kBNtLfoYEcHJvwhfuby8oheWL6uizt3ov/0aepvGonNm1lOY2Nj6kRBoFiLl3CiNpL3NdT2\nUCI1EW8uH2s2A1evil7CSITyZPb1iQWTwuF/TcfE5CQ793vuYaKXC+Rr18Q82eZmJoZHRphQ/sMf\n2D4FBcDq1ex2dTXLj21tZfsbjfLeo4FFiwiCyDheffVVFBQUYN26dSkLKSYxOofgvY6I2YdsMbMo\nFQMKVyBvj4r2AAAgAElEQVRIyR5SMSkVVxMDA9jvcCTkvbvudsM7NARPby92r1kjPD7Y0YExtxsT\nAwP4vLlZUZBJ11Kydq0gmEMJPqkAHTh3LqTgjLdCrxLSuWq3b4+4PxfCEwMDWJqkNWQ0kTyISWJG\n3qfCidrA0NhAQgnZUKIzGm/uzTeLYbmSH4aEtfDqtbwSbziWLBG9kvESopBRbeADvFouF69mM9DV\nJRZmUoJ7OXJygBMn2LW5eBF46y12+9AhljcKiNfSamV/Dgfw0UfybXMU+vxWF5lkD0mQR1y/MyY6\nfnh4GD/96U/xy1/+MqW5rZQzShBExqGUG5lIvqRUXOlMpoRyIY84nZj09yDU5eTgwWPHgo4DhA5v\nla7lKy+/LJxLqDxSae5mdkmJMDZw7mTmk8Y6Fz+noupqmMvLUbt9OxU8CkemttGQhsYG5pMCofMU\nA/M9I+0vxe1mOZQAC2f97DP5WgAm4iYmQns2NRomQj/9NLwYTAbf+AZw5Ahw663s/LKzWfuV0dHI\nY2trWeGlY8dYOK/02jQ1sWs4PCy2t+HXUprnW1YWWtynScVlglAr4VLqUzH+6aefxhNPPIHS0tKU\nhegCJEbnFC0tLRn1C1I6Q7YIT6KVWJWKAYUrENTS0gKtX7gpFQWSiquDjY0AYvPeSc/HOzwMnz+M\nrrS2VpZPus7lQsvGjYBGg9pt22TnzefQGQyw19cHCTap6LzW1QWAiWWpeF2/cyfaN21SFImxFFCK\nRKi5QtlVen3fPXWKhGgkQomvJJPy96l4RXYixXQMBvHYkh+GZAWLfD5lIarXsxzM6WllEWowJE2c\ntgCozc9nPT4HBtg3zooKoL9fDMsNh07HqvuGy1/v6BBb39xzD1Bezq4Dv0a8sjB/zgWKz0S/CacJ\n9PmtLjLJHon+zpjI+FOnTuHgwYM4eZJ16iTPKEEQc5rZqMQaSszxUF2+Bi6cdNnZ2O9wRCWYQ3kn\ns2w27K6tlc0Rqs+pdI7FDQ1Bxxu+cEF2X5rbKvVShruWR5xOdO7ZgymPB8V33ol7d+xIqjAMZddk\nCuE50VImUyuZpkhky3j/feYR5d5C6VoqK8Vem0ptWSKJwCSKUQCiBxdg3zZNJuaNjYapKdY/lRd2\nyskRQ5MvXGDn/vHH8rm5sKyvZ8LXZGJ5ytzrGSg+M9VjTxApItG3wETGt7a24tKlSygvLwcAjI6O\nYmpqCufPn8f7778f+2JigPqMEgShOlLZDzPwmIb8fHiHhiIeW9pTdHFDQ1gxJT0fqXdyv8MR1xxK\n63pzzRpc4V4NAHaHA/e98QaAYIHWvnmzomCTnlM0a4qV/164ENe7umCwWPDwmTNJrzIMxGYXIkOJ\nJlw0mn0GB8VWMLy9SSzodPI806IiVsgoUYqKWLGhc+dYgSKNhrVvOX069Jhly1h4r7RfanExK84k\npawMOHtW7LNqswFLl7Jj8b6jvJ9qYG9WgHqPEkQY1NxndGxsDCP+nHWfz4ef//znuHTpEn7zm9+g\nqKhIti/1GSUIIuNJZT/MbVYrvKOj0Gi1WLB+Pb7829+GDGWVEk3BHy4CtQYD7A6HEHrLBVIsRYPW\nuVz4n+XLoTOZcLCxMcjrZ+CFRwDo8/LwpX//d+F+oEfyek+PcP+1ykos/OpXMdLZiQFeoASxtWSJ\nljy7Hde7uuAdHkb7pk0zIhSTWYiJSFOiCReNtE+gWH3qKRaWG01ILIcL0awsJky5mEuE/HwmaKXC\n2OcLL0RLSpgQtVrl3kurlc3DPbj5+cxDbLWKLhZejZcj9XoquWHoxx+CSEuys7ORnZ0t3DebzcjO\nzg4SojMBVdOdQ1AvJvVAtghPuLYoSiTSI9M7OoqPp6bg83rhPnoUeXZ7VMeWtn4JtS8Xgd3NzdAZ\nDEH7ZRcXw2SzyYoQBbZ24ed2sLER2aWluOLvK/rqkiWy813nckHrr/w5OTKC4089JczZ8+67AFgr\nmLu2bJEVShp3u/HnvXvhbm2Fp7cXuuxsmIqKYJqBDyAumMMJxURfG9HYhYiOtH2fiiZcNNI+gRWL\nOztjE6JSdDrmWZ2ejm88gBb8HYDDwJALQL58o7SvqMnERKXRyB6rqWFFizZvZgWMTp9mnlWrFXjh\nBeblXL6c7Ts0xEJ5ATEUnP/IVVXFvKrSnNFktftJM9L2dZGhkD1mhp/+9Kf43e9+l5JjkRglCCLt\nSaRHpoa3SNBo4Ghvj3pcNIKZ53EaLBbctWVL0PaRzk54enuFNi6BrV1eX74cF5qahHPj82n0eniu\nXkXXvn2s4JF/PVKRyds4DHZ0YHpiAgDgHRlB+6ZNWOdyIUuSu6rzf3E1WCwovP12ePr60B2itYyU\nWH8ESIVQjPWHDCJNCNWzQOnxSC1dlKrGOp3A/PlAYSGwYIHoaayqYmJV+triGI1MaHKU2rosWybf\nBwA2bIjlzP0sBGvwUgfg/xMfzs8HHnyQHfvaNcDjYaJyYkJe+XbPHiauu7tFz+qmTUxMFhayucxm\n5r2VXl9+LQ8fBt54Y84JT4IgZh4So3OITKk2lgmQLZJLIqGZ5Q88gKUaDW740pdg9ifuBxKv55Xn\nRPKw1EjrDmztklNaCq+/aImxoAAPvfceFjc0iP0CAfR++KGwtqLbbxfG1m7bJjsGwIoa9Z46Bdei\nRfB5vVhYV4f7DxxAXkWFsM4Rf6/FwGupdA1i/RGgffNmXO/pwcHGxpDXMR1fG4l45tXMjNkinmZ4\noXqrKj0u7Y2pdAxeNdbtFj2B//3f7P7AABNsfMxnn7H8yeefZ6Ls7rvFeSYm5DmhgZ7TggImArmX\nEmDey7ffju6cOXo9ag23+e+cAPC/xG21tUxc8mPz9wZpyC3ARKqUwHDb4mLWHoa31eEk4v1MtOmh\nSknH96hMhuyR/pAYJQgi7UnE4+bp6wN8PlxpawspqMKJrnBChIelmmw2XOvuDtoncN3rXC7klpcL\nYbIGf/6GsaAA3zx5UgghNuTmCnNcv3JFWFv/Rx/B7nBgw6FDsrYpdocD9vp6PHD4MMZ6euAdGoKn\nrw/9Z87AZLXKwmcfeu89mCsqoPXnpvJQYamHll+DWH8ESMSDrWYy9bxmjFDCMhROJ3DmDLvNPZWc\nUOG24Y6hNMYfPSDDbGZCb98+YNUqtm+IateKDAywirx2O8AjMK5diz3cd3IS8D4MYAeArwKQVNVt\naQHeey94jDTk1ukUQ4QrK8VwWx6629gI3HEH2x4qbDmZPyAQBEFIoAJGc4hM6sWU7swFW6SyxUYi\nrUH0OTn4GMDqMIJKSXTxNihjV68K3pHXly+HubxcOGdeiOlad7dQ6TZcSxOT1Yq8igohzzRr3jyh\npyivgDt84QKm+RdLgwE+yZdoT2+vMI90Tl5VFwC0/p6BGp0O2QsWYG9dHdY+/7ysaJO5vFwocPQ/\ny5djvL9f5qFdu3Urjjid8A4PI7ukhFUIllTozS4uxkhnZ1D1Xl4gKVLOaLq9NoTnh60Ga7u3skhK\nF4A0j2hsaWlBLe8fGa7qbKzE2gKko0Ms/rNokXwNoXoZ8GPYbMzTWVcnrl9pjNUqr3RrMgF33ikW\nOXK7xUJHJlOwpxEA8vIAfzVKgbEx4IMPIp+jFK02KL+0BUOoxbeC95W2ewFYMSNOTw/w2GPAK6+I\nLWYqKli4LSAv4uRwMM9vqEq40n2XLGHXwG5nOaWhnhcZ2uolHd+jMhmyR/pDYpQgiBkh2b1CZ0rc\nrnO5cN7hwP27doWcU6m6L8/v5GiNRoz39WHU3/ePn/O9TU3YW1cHQBRh4VqtcLEIAOM9PdAZjTBZ\nrbLrKaDQw7Bzzx68WFyMb7z/vmLrlG+8/z52r1mDyfFx9PpzZI//6EcywSoV3zqTCSP+c9IaDPjm\nyZMwWa1MiPvP//iPfgTPwICwPlNxMTz+lhGB1Xtzy8oyrriQ8Pzo3gpTm/+8nAAyobBoNJVpYyXW\nZnhSUeMPPxcI1XOVH6O1VawG+/jjYt5j4JgPPmAtTLjI9HhYD1Ip+/cD69cDt90mCkyTiYXjvvce\n8MQTsbd/AXAEwCDYF7J1N98MU34+EEP+uqIIBti5B1YAlry/BF3XcLbg+5rNYjsYfz/mkM+L2egb\nSxBE2kF9RgmCmBGS3St0tvpHhhLB/PwAwGi1wrJ0qSDuACC7pASWL3wBo52dyF6wAKOdnXjovfeQ\nZ7fLziWrpAT5X/iC4Dk1FRXBOzKC6YkJ2bUT+qBaLPAOD8NWU4PekyflOWsajeAZyS0rw8Kvfx2d\ne/ZgyuOB7c47sX7HDmH92wsLMeH3Ntnr63Hfrl3CuQ5/8gkmx8dRvHw5fAC6m5tlocJK46cmJgR7\nG61WdDc3C+s/2NgY8bkgvc6BnlWT1ZpST3vc1AHYB6AGwAGkvWcUQHAvydm47oOD8YuawkLRq1pe\nzjyDoby8g4MsjNXtZufb0cEKHQVSUsL2MZnY629qihUpys0FxseZ+JO+LpWQ9CDdDYD/rLW4rg73\n+nzsmnNMJuDECeC++9hxufjMzwdWrwaeew740Y+YQL58WRSfZjPLA+XwXqP8vJWua6j+q3zfgQEm\nuC0Wdm1m83lBEGmGmvuMxkKy+4xSzihBEDNCsiunzlb/yFD5gOtcLtjr62F3OPDIp58ii1ek9DPm\nduNKWxuudXWht70d4243dq1ahcOPPYZ+nv8G1lqFC1FdTg48fX2YnpgI8iDy6/nwmTPCdZ2/Zg0A\noODWW2Gvr4fJvwZdTg4ePHZMVp2321+dd3dtLV5euBDT/i/C2pwcTF67Bs/goHCu17u7MdHfj8+b\nm6EzGrG4oQGPXLwo87Ta7rwTACuKVLt9u8ze63fskOWdrn3++ajb4HTt24c/79sXdM1D2UFVxYNc\nABqQOUIUiFyZNhXEUkQnMLfR/zxFVRWwcKGYw7h8eXAOpNXK2qDw81UKxTWbmQfV4WAicXKS/QA0\nOclCZj0eUYgaDMxrGojBwNbj91LyEDUbgLUGA7vmUnw+4Be/YP8NBiA7mx0bAE6dAh59lB23vFwU\noqWlLM+Vn1ddnVyIhrquofI8N29mYb8AUF/Pcnj5deK5pxlWqIggiNRAntE5BMXVqweyRex4Bgdl\nobLJ9JSFs0e0Hl7P4CCaKitlobuKSDwiAGT5YVqTCdMeD0w2G/KXLsVoZyemJyYw7fXKPJs8X3Vy\nfBw6oxGlX/kKLre0YHJsDJNjY7Bv2IDxvj4MfPSRkEdaWFUFfW4ueqQN7CXkVVTAMzjIvJ2SNZmK\nilC8cqUQTsw9rYW33w5TQQFqt21TvCaxerL5dXYvWYLl5eUyz6rUMxxoh9nymM8Fkv4+Fcrrlsz5\nm5rEPEqeA8m9f42NopfXZBLDdxsalMNMV60Sw2VLSlhYPM8r5e1O/K8vAWm+Z0GB6JXlmExMiPJ5\ny8rgmZzEUbcbay0WmOrqmHezrU0WXttisaBWyUsrZd48JhjNZrZ2mw04eJAVJ9qxI7rrHcoTXlsr\nhmsHXq/585nHFmAiXRLyn4nQ57e6SCd7kGdUGfKMEgSRFgT2j4y1gmm8HjTu8ZsYGoJr0SK86A8h\nVVrft86fR9a8eeEn9AtRo9XKvJl+z4kuJwcPnTiBxQ0NyF+6FD1tbbje1YXxnh7Bs9lUWYnDjz2G\nC01NGHO74R0cxHhPDz59/XV2f2gIvokJXD5yBO7WViZEtVoYCwow3tMjFBAKWntREcb7+oSwW6EI\nilYLT18fuvbtw6s33YSLO3cKnlZ3ayt0BkNIcR6rJ5tf51W/+AXW79gR5EkN5WmfLY85EQczXV21\no0MUogUFYvgpb/Pi9TKv3oEDLMwUCF9c5/PP2f/8fJYTunKlfExgTqnBAHzxi+y2ViuGyErzND0e\ngL9/2GyA3Q7T2BjuBWAaHmbisbVVnuep08kLGuXlibe1kq9xPh9QVCS2aDl4kOV3BrZrCUcoT3i4\nYkRSD3IGfNEmCCK1kGeUIIi0JNac1Fg8aELu5IULyLPbYbBY4G5rw6TfM5FbVoZHP/tMti/30AJA\ny8aN6P3wQ3ivX4d3eBg+XmjI7zXh+Zcnn30WfWfOoO/UKTx04oTQJ/S/Fy7EdV4cJABTURFrRxMC\nQ14ebMuX43JgsaNAFCp2hkJvNmNSmnsG5ml94PDhsJ7iwKJPM0GqjqNW0iKXlhNL/mk8XlQ+f0EB\ncPIkq/YKKHv1eA5kdjYThzk5rNcmv+1yARs2iN7TkhImSDdtkudYGo3yQmL19cDvfy+KSS4k+fcg\ni4WFuG7axKr8SiMVqqqYZ7O7m+13003ySrylpUwQ//u/sxxRn4+dh/S1npXF8lYNBnYeQ0Ns3sOH\nE/NEP/YYu7ZKXtb165ngTcZxCCKDUbtntLa2Fu3t7dDrWfJAWVkZzp8/H7Rfsj2jJEYJgkgqqfpy\nHKsIiVa88p6a3sCWCX502dn41vnzyLPbg/Y1V1QIrV0mhocVQ2Jzy8rw8Nmz2LVqFYY+/lh43O5w\n4L433sARpxN/eu01QfgGojEY4PN6YcjPD7lGY2EhvKOj8E1MCAWP+H9O1rx5GOc5YCHQ6PUo37AB\nk6Oj+Ly5GUXV1ciZPx8DH32E3LIyGCyWIBtHKkQ055jh0FTVhClHc56xFCIKFxYailDzhxPB0uPY\nbGLYbUkJq5orrY5bUcHyMqXnaLWK3tj8fODSJSYie3vZfjk58lDekhIWhut0Ajt3ysN4HQ7myeTv\nG9nZrDUMIC8+JL3Wzz/PQnJ50SWdLrgSbzJCZ8PZI5ECUwQxh1C7GL3nnnvwV3/1V/jrv/7rsPtR\nmC4RNy0tLbO9BMJPJttCGj7LC+bMRHGZwLBdIHwobriCSlJ7DHZ0hBR5AOCbmsKhRx/F3ro69J87\nJ+u/OXntmnDuw598wgZIw+g0GmQvWICDjY0YvngxYGKfIG5DCVEA8Hm90BgMmLdiRch9Jvr74ZuY\nQHZJCR4+cwZ5FRXQGo3C9qLqajx04gSyS0pCzgEApsJCjF+9Ch+YWC5ctgy9H36I0c5OXGlrQ9e+\nfWjZuFE2JlIhokhk3GujowNHWluxe98+7K2sTPrrYCbDlGOyRTQhuLEUIoqnR2Wo+V0uJiRNJpY3\nKrWB9DhVVeLjbjfLveSvkZoa5pnk53jTTUzk8jH5+cDXv86En17PPKYrVsjDbTUa5l0F5L1TATbP\ntm3y8GF/pATAckaFQkEvviiu46mnWNiuXg98+inzjALyeQLb4cRDOHvEYtcMIOPeo9IcskdymQ2x\nTGKUIIikIv1ynFNaGrMYSYRweaRK4lUJvn5jQQFuWL2aPSgRlNMTE4IQu3riBADmrbQsWYJx3n8P\nQNEdd8Bks8lDYX0+9La3o2vfPjF0F4A+Lw+127ejc88eRSGs0euh0YttoX1eLz6Pop+hRqtFnt2O\n3PJyoZARAOTMn488ux1lX/0qE6kajVw0+xnv6cGVtjZ0+4/V9c47QQWa3MeOYW9dHV696SZss1px\n5fhxAMz+RXfcIdyORigdcTrR9qMfpbQy7oxX483JwSBY644utzvpr4NkV62Om3jEYziireIrrZ77\n2GPKVV2tVuZhbGtjAm7jRnGcNI90xw65+Ny2TV5dlws8s5l5O3lIcEUF86Lu389EotsNTEyw25If\ngVBYCFRXs7BWaR4pwI6zeDFw/ToTtAcOAGfPituHhoA332RzTkyIj2s0LLR3cpIVV/rkE7ZeabXb\nZDwv1FBVmSAyHSeAWrA2YfF8HCU6HsCPf/xjFBcXY82aNWiNlO6TJChMlyCIpCINn42mv2QySUZv\nU+n6AeCo04mxnh4hB9OQlwfvyIg8jzKwsTyYQDXk5WGivz/oGLaaGuhMJlxpa4MhPx8Pnz6Nk88+\niz/+9rfBC9JoYH/oIXQfOgRvjGLphtWr8bW33sLLCxZg8vp12ZwanQ763NywXuDAdYQrTqLR6+Hz\nXwNdVha+ffkyAMRUAXk2Qk5n/JiDg9hbWYkutztlr4NZIdmhmtGGN4cLsz1/Xhwn7TfqcLDbgWGn\nTidw7hxw4QLzYEpaGcm2LV3KxtbUALfcwkSi0utIo2EC1mRiglUaPltfD7zzjhiGK4WvJz9f3ufU\nYJDnp1ZXA4cOycOCz50Dnn12ZqsWEwQRFxHDdGsBcP3XACDWj6MEx584cQK33norjEYjXnnlFfzN\n3/wNTp06hcWLF8v2ozBdgiBUjdQDmWqvTTKOJ10/v/3VXbtgdzhgr6/Hw2fPYnFDA+bxHn5abZAQ\nBZj3cqK/H7llZTD6K+ZqsrKwsK4O9x84AMsXvgCtv1dg6xNP4NKbbyovyOeD++jRsEI0q7hYvKPT\nCTevtLWhqbJS9hif0zc5GVGIGrgnyD8mFBq9HnqzmR0+Jwff+uMf0b55M/Y7HJiQFD6KVAE5npDT\nRD2boY6ZNI+p1Yp1588n/XWgqv6qQPJDNSOF/XLPJq8QrRRmKx3H+41WVzOPp5Int6ODeU/dblZg\nKHA9fFtBgeglfOcdZSGq07HXzNAQ81z6oygAMC/q9u3yqrjScVu2sNtSr+rttwNf+pI43uFgQtRq\nZVV9y8qYELXbZ75qMUEQM4P/bQk1AOIJMElw/MqVK5GbmwuDwYDvfOc7WL16Nfbu3RvHQmKDxOgc\nguLq1cNcsUW0obGpOl6oL/Ch7MH3P9jYiNpt23Dfrl3Is9txb1MT6/kZEIary82VjS9YtgwPnz2L\nb548idyyMlQ4HPBeu4aDjY0YunAB0x4PvEND6G5uhkeSP6bLyxNaxOjNZkwofNnV+b/ImoqKkLd4\nMbRZWTAWFgbtN+Z2Y3JkRPH8NNnZyFmwgHl2FfAODyuG7/J1cXyTkzDm5UFrNGLeihUw5ucrCk8u\n/PRmM8YHBgQb8Ovs83oxuHp1TKIt1hY/gYT6ASPReaXMxOsgmesLRUrfp6ThtoODcrGYnR0cfssF\nV28vE2JKYbbSHzR27GACkgs4pbDTcKHG0m3btonCW9rWRIpG4hwwm+U/5tx4Ixsr9XJypqZEIfz+\n+yxPta4OLc8+C+zaxdZ89CgrSMTXbbcDn30menKTHTIdiUDbZThz5fM7Xcgoe7jAPJoHAMTzcZHo\n+FmCxChBEHOGaL/AH3E68dL8+fjjf/1X2BzU4oAiQr6AL5eWxYthslqRZ7fj0c8+w/XLl4X5rko8\nJYVVVSjhXg+dDvOWL8dDJ07AVFyMSX9V3CAmJ6E1meAZHMTV9nZMj4+zkGB/H1PuDZWKxkB8Y2MY\nu3JF0bMLAPrc3JDj53/5y0IBJFtNDczl5ZiemMDl1lYcdToVPY7rXC5oTSZMjo6iu7kZr954o1AI\nyt3ais+bm6HT62MSbYkW8AklFNXev1Tt64sZqTdv+XIWnlpSwirOdnYGe/qkguvsWbGyrTTHU2rT\nQM+tkic3XF5kqG3Z2crnw19T69ezarcc7pl1OkWBys8FYN7Q7m4m7PLzgfvvB65dA372M7ZduuZQ\nIjCa/M5kCkjyxBJEcrCChdbGKyQTGD80NIT9+/djfHwck5OTePnll3H06FF87Wtfi3Mx0UM5owRB\nzBkCc0rbN29WzGGU5hECrJjRIxcvBgkWz+AgXqusxLg/H/Du//xPvLFyJaY9HthqalBwyy2y1iY8\nh1aab5pTWooGf6jhqzfdJBQayiopQeFtt6G7uTmoLQs0Gujz8sJW3c0qLsb09DQ0QNi+pNGiN5uh\nz87G+NWrwvV7Y+VKjHz6KYz5+Si87TZcbm0VtgFQbL2zvbAQE9IqohDb1Uh7l0bbImim+oyqvX+p\n2tcXM9L2KyaT2N6koQEYHQ1uzZKqdiKRclfXrBHXqpRXXVbGxPLGjWz7tm1sDmmu67x5LJS3oIBF\nKfBCaPX18j6igXmw8bS/4SQyNpBY+scSxBxGza1dent7UVdXhz/+8Y/Q6XSorKzEz372M6xbty5o\nX8oZJQiCiJPAkMxQnlK9xFNhtFrxzZMnFb/wm6xW/IUkH7Do9tvxV263cH+ks1M2Pz8+zzfVm83w\nAXh7wwYcbGyUFRkad7vRf+YM7A4HHj5zRsgvBQD4fGGFKACMX72Kib6+6ISoVhsUYqwJCN2dHB2F\nT6OB3eEQhPzwn/4E3+QkPH19uHz8OMx2O7QmE3YsW4a3N2yQ5YvyUFx+jhqJ55Z7lPMWLUL75s3Y\nXVuLizt3RuXFnqlQcJPVCqPViv0Oh3ryMiWkOgR+xpF686RtSbZuVfb0paqdSCSvn3Stn37KhBkP\n0dXpgAULWDuZ7dvlobVSz+6JE+z8Ll4MbgUj9ZoG5sEmEo6bzFBeqrRLEGmPzWbDiRMnMDw8jIGB\nARw/flxRiM4E5BmdQ7S0tKC2tna2l0GAbKEWuKfUvWQJ/nd7u/DF3jM4iJaNG9F36hRyyspgtFhk\n3rlovXb/vXAhrnd1wWCxsH6f/pwuz+AgXiopwXSofDMJerMZUx5PUAjwjKLT4YZVqzD4ySfw9PTI\nNvGKs4HeYwAwFRfDI2lvA7Cc1uKVK+EdHsYV7kHyk1tWhvybb0Z3c7PgUf15bS2KT58W9gm8dvES\nrc2kzEZ1XzWR8vcp7oU0GFieJfciziaRvH7cQ5udzYoZeTyswu6HHzIvJ8/XllbsDXeO69cDzc1A\nbi7wxS8CL7wArFqFFrcbtYFrkHqHN2+OrYJuqjzLGQh9fquLdLKHmj2jsUCeUZUzx/L4CSKt4Z7K\nVb/4hUycmKxW3LdrF8yLFqHH31NU6p2TelSbKivhGRxULI7EBZR3eBh77rkHL82fj+2FhWhuaICO\nN6cPg95igUarTa0QBYCpKVxpa8O8mhqYioqEh7VGI0a7u7G3rg66gD6JxsJCTPN1Sooeefr60LVv\nH4YvXAAAGPLzAbBcx9KvfAX9Z86wNjh+z6zO7wHmuare4WG0B1Y29aN0zXm+7/bCQvx+/Xrh8XgK\n/mRcXmYipOLDjXshm5uZWItGJM30uiJ5/biHtrOTeS4HBljYrtksCtGCAnnF3nDnuGMHC1O+do3t\n831Wm+AAACAASURBVKMfsdDcu+8OnwcbTd6m9FoBqfEsEwRBRIA8o0kmmWkYBBFItK33iMTgXrSB\njz6Cp7c3qD8k96hyjEVFwPS0kAvJvWgvL1yIa11dMOTnw3rzzbgq6TOYNW8exnt6xPxRrVZWmRcA\nsktKMD05KeSRAmChf7xIUbzo9TDE0mPUj/WOOzDo91ra6+sBANNeL7QGA8Z7ewXPp8lmk63ZWFCA\nb548ifZNm3DXli1o37QJa7duxX6HQ+ZdXdzQgLVbt+Ko04nxgQGZx1Qpj1Q6PpTHlj8eTw/ajMvL\nTIRUfLgpeSEjvemp5UN34UKgq4vdvv12lgfa3MyE6MmTYqXbaPIrpT1RCwuBu+6K/IYfzbxquVYE\nMUchz6gyyvX8ibhJdUV1Ym7Bf/wG2Hc0+i6RGKFCN7kXDWChpFLxcsTpZMWEJMVKJqR5mTodxnp6\n4BkcRK7djmtdXfAODaH35ElhF73ZDI1WC1NREQpvuw3GggJMDAzgcmsrNDodfFNT0OXkYH5tLT5v\nbhbH5eZi8to15ZPR6aDNysJ0qO0StHo9NNx7qSCCQzF0/jwA5ims3b5dJtBeXrhQWEf+0qUY1mox\n3tMjCFHeEgdgebhNlZUYlwjWwqoqQfTd29SkKASldglVsVea72sqKhI8uWuff14QwdEKS74WAqn5\ncHO5gkNHI73pRbOucII23m2B2O2iGK2oYDmiPHz3sceACxfYPtnZrDDR9u2h57vzTjFUt7+ficyb\nbgJWrAi9RoMh8rz0BYUgCBVCYboBJBrxo+Y8/ozqxZTmxGsL+i6RXLi4ORiigJGtpgYPnz0b1H/y\nSlubWDWTizpetGRqSmhvYuTFTQD4JiaQU1oqtDYZc7tZ4Z/WVvSdPo2pyUlos7NRcNtt0JpMcLz7\nLq5fvizzMOoC2kjo8/Kg4eGyU1NRCVFoNNDn5IgVbUMJUU3wj5vGvDzY6+uDxPnu2lpM8JDEqSn0\ntLXB098PbVYWLEuW4Oj3vy8rADTY0YExtxs+f7GWnNJSoYouf20oFegJFJ88zLrglluEQkNrn38e\n5vJymIqKMD05KYRZt2/aJMwXqt8sIUf2PjWTH278g7exMTiHMfBNL/BDOnBdSh/i4UJYlbY5ncD8\n+cB//Vf0LUukhYy4IOThu62tTKi2tTGRaTSGv4a8J+oXv8jua7Vo6e0V295Iz08a9htpXjV/QUkj\n6LuUuiB7pD8kRgNItF1Wqgr8EXOTTPouMdOCINz8fNuAv6WKwWIRPGiewcGgqrtSuCAqqq4WwnMB\nyFo6FFVXC2JJ6xeQBosF9cePyzx3fN+c0lL0tLVhemwM/adPY9rjwclnngnad97KlaL4BDB/7Vpk\n33BDbBfG52P9SCXosrJgu+uuoP0C8fT14Yok1BgQBX1gyK9vchLT4+PobW8PW624sKoKDR99FJW3\ncp3LhbyKCuhMJhxsbAQA3NvUJKta3L5pE8wVFfD09QlrCsz5jCd/dM4zkx9u4T54A9/0AvuROhys\n9Uu4ucL9iqe0raOD5X/ycHhpzmcoQr058/n9udKoqWHe0XC/evNrvWMHYLOJ7zEFBUBpqfz8YvmF\nkr6gEAShQkiMBpDJnqd0qTY2F4jXFvF+l1CjJ2imBUG4+fk2T28vtCYT7qqsFDxory9fjv0Oh9CW\nJPDacaG64dAhzFu5EgATmgALP11YV4cNhw7BZLXCZLWiePlyAKwQz1v33BOUZ2EuLxc8qHwevdkM\nt9+7yD2Uhrw8rHnuOZTefbewz9TEBHIXLJDNZ7RaURwoLMOh1cLR3o7rn38ue1ifm4sbVq9GdkkJ\nsoqLhcfH3e6QwjIUBosFd23ZItxf53LB7nDAXl8veEQ54V4bJqsVueXluBJQVIqvQW82Y3xgAMOf\nfMKOm5eH3IULofWLV/7cp8JE0ZGyz4xwH7yBb3r+QliwWFheZjTCM9yveErbpM9pnY7lgEZi82bW\nK7SxUS4w+fynT4vH4d7SSL96W62Av9BZrV4PtLRE1/aGmFHou5S6IHukP1TAKACqdk5kImpsURFP\nQZlkzR9YgCi7pARjbjdsNTXQmkzo8RfiWdzQgOs9PSGvHc9rlBblCTwP6Tp0JpOsvQlfG8ByIKHT\n4dOdO4Xw1UCMhYXQZ2VhrKdH2EdrNGJ6YkLYZ2FdHb7y8sv43bx5UVfhNdvtuPb554rH5UWFXqus\nxLj/Gkmvp2dwEL8rLpaNNRUVwdPXB11ODqb8fUWT9bwLtGv75s0YOHcOPSdOCOfLjw/I283wNURb\nmCiedjBEHAwOMi9naSkTW+HyM9esYeGuAMuT9HqB6mrg0CE2hn+InzwJXL3K9nn/fbGAULTr2bgR\nOHYM4PngkQr+SIsDVVQA5eWh81B37mQFiqqqgMOHw3/ZkJ5vQ4MYqkxfUggi7aACRsqQZzSATI5i\nobh69ZBqW6jRExQuFHam51/nciGrpAQAuyZFv/ylsC/3UvJrFe7a8bxGXpxH6Tyk6+Cez6LqapjL\ny4PCTS+3tIQUogAw0d+P693dsn2kQtRgtWJiaAg7ly2L+joBgHd0VPm4Wi3+vH8/XiopQe6CBcgu\nKYGnvx+/mzcPW7VabDUY8EpFhWzsA0eP4i//9CcsbmhAyZe+BED52oXy1re0tIT15AfalefwciFq\nq6mBrbpauF10xx1Ba1DKR1UiGu/9azffjG1WK14sLsZIZ2fY+RIhVBubmYx4SNn7lNXKxFtbW2Rv\nIfcMms1MiAJsLLcl/xC/ehUYGgJ6e5mgi3U9u3YB/siHqEKlpB7ZwFBaKR0dYqXcRYsif9nwn2/L\nkiUsvDcwLDkaEi2GQT3rZNB3KXVB9kh/SIwSxBxgpoVfPEQrCJI5P//yfrCxEQ+9955wTXJKSoR9\nA69VtNculDAwWa0wWq3Y73Bg2uuF3eHAhkOHYK6oCAo3nfJ4Qs6v0fuLn+t0svu8HycAaHU6XGlr\nw7Wurqi9ogaLRejtKXs8P59V7x0exrTHg74PPsCY242RixfZ3D4fMDmJiYAvpwcaGnD8qadwvacH\nPjCvq9Zkwo5ly/DmmjXC9encs0cQei2PPy6bI5wIDLSrNIeXF1e6d8cOwWbrJbdjfa5F8yPOdbcb\n3qEheHp7sTtW0RMDStcko3Jfo82R4WGpq1aJ+2/fHrwfz63OyWEeznCEEluxhMBK9w0MpZXCw4zz\n81kIbiSRx+f9xS+iD+8NJNFiGImOJwgibXj11VdRWVkJs9mMG2+8EccivX8mAQrTJQhizjCT4crh\n5lbaphRG/Nb69eiWtHLhaPR6bDh8GO889JBQXXdhXR0uHznCepQC0OXlwZibizG3W2gPAwAag4GF\n04TwuOaUlmJidBSTw8Oyx+0OB9zHjrHjxdD+BZCHxkpb4HAWNzSgq7lZqOhrLCrCvJUrhVBYfm30\nZjPmrVqF9Tt2hBSSoUJupSG22X6vZWC4baQw3GjCeV8sLoantxe6nBx869w55MUSDhoDSs+XmQ51\nTymx5shE2r+zk3lEjx2LHKKb7P6b4dYmDbu12ZjnNtrjRtNLNJnjkjWeIAgA6g/TPXDgAL73ve+h\nqakJK1euxOXLl+Hz+VBaWirbL9lhuiRGCYKYM8Ty5T2UUOGPj1y4gFy7HUaLBetcLhxsbIyYo6o3\nm3HDqlW4d8cOAAgSOp7BQbx6441CvqOUxQ0NmBgdlR3DtWiRrIqtsbBQqJSbU1qK4hUrMN7bK8tT\nDUJBLAIAdDroc3IwPTGB+5ub8c43viEKzDDYampgtFoVRTXAPK4Pnz6N1ieeQHdzM3S5uZjyt6SR\n5nS+umRJUK4nJ5pcTukPACabTRDxeRUVyC0vhz4nB97hYeHa8GPEmic60tmJ3WvW4MFjx2ZMiALK\nwjja3FciAgsXstYrFgtw5kxs+aWxIhV2VitryRKtyIu3qEWixTComAZBJAW1i9EvfelL+N73vofH\nA6KVAqGcUSJuKK5ePajdFpmaIhQq5FbJHqFCIPnj17q6hAq8R51OZBcXw2SzhcwbNdlsmBwdxefN\nzXitshIAhPBdHrravnkzfBIPpDTHlLeKMVdUCNVhtTx0FyxcV2c0snH5+ag/fhz37dolzKGE3mxW\nFqIAMDWFyZERTHs82PvVr4q5qTodNP7j8JBhANDodDAWFMBktSLLZhNa0Bj8YcS63FwAgHdoCLtW\nrcLdL7yAxQ0NuMHfS1FvNsMzMIB33noLJqtVCB0OrMQrtUG48FRpiK2tqkq4nVNaKowd9odMSsNw\nYw19zbPb8ehnn82oEAXE8OT2zZuFcHAAMxrqrvb3KYFIb1iRtnPbDQ8DmzbN5Erl4by8n2gUQrSl\npSX+ohaJFsPI5GIacaCK14UTQC2AOgAZ9BkdD6qwR5JItA5AIuOnpqbwwQcfoKenBzfddBMWLlyI\nH/7whxgfH495HbFCYpQgMph4RWW6pghF6i0qbdkSiVD5gvxxg79vIN8+0tkJT28vPm9uRlNlZVDe\naPGKFcJ93h4lUPgMdnQIoaumoiJ4/aGzI3/+M3YsW4bXli7Ftc8+E0TwvLvugtYvDCdHRzHtzxP1\nDg2hfdMmHHE64R0ehtbfHkLKwro6zON5dwAKbr1VaCMTyNTYmOCBtT/wgHguU1PQZWcDGg18ACYG\nBvB5czOrCOxfS8mXvywTnQAw5najfdMm3NvUhPU7dkBrMglC/dS//isACOLOOzyM9gCBILWNLjtb\n0ebSHx54DmnBLbdg4Nw5AKy/qUOSNxyYg6qmYl9SMipPNFlEesOKtD1cjmco4n1zlQo7EnlEvHQA\naAWwD0yYEhlBou/viYy/cuUKvF4vXn/9dRw7dgynTp3CyZMn8cwzz8S8jlghMTqHoF5M6iFVtohX\nVKZrv91oeosqbVOyRygvKn/84dOnZdul/TbHAnpx8nHSCr5KlXql94v8FWH1ZjMm+vpwvasL45KW\nLgCw8l/+BaXr1gWN4SLt4+3bcaWtDdMKv2wacnORW1oKY2EhsubNQ8GyZbJiSFxkAszrCbBw17Gr\nVzH08cfCcWzLlzPvKs9R1euFNWr0eqx57jlBdGYHnD+AoGt3h83G1hdQ1RgQf2zweb1CsaKRzk7B\nrq9JfgSQFjrit0c6OwWxn7dokWIVZDUW+5KSSrGcNp8Zkd6wIm2Pp1dnsn+xiyBu08YWc4CYbTET\nXkz+llkDII0+o2eCTHptJPr+nsj47OxsAMAPf/hD3HDDDSgqKsLf/d3fYe/evTGvI1ZIjKqYTA2V\nJFJHvKIyXfuoh3ojPuJ0ov/MGQDMIxbNm3Soar+h2rmsc7kUxZZ03F+cPx+2Uq/0fm5pKUzFxZjy\nh8dqJCGxnDdWrsTa559XrBw70tkZsqKu1Js70d+P8Z4efPr665gcGQEAGAsKWIsbfwivb2qK9Qyd\nmEBPWxs8vb3ILSuTeRoB5i2+YfVq4b5vclLw0DZVVmK8rw8agwHXurrw9oYNgjdT7/8Q1Fss+NJ/\n/IdwPQNFIf9B4fPmZuiMxiAhO+5249WbbgrZ/kTnDx221dSgdtu2mOweiplurxKIKsSy2j6cIr1h\nRdoej4cy2b/YpWs4ChGZmfBiugA0ADgAII0+o4nwJPr+nsj4goIClJWVxXzMZEBiVMUk+7Mpk+Lq\n051U2SJeUZmu0WOh3oil4a95ixYFvUknwx4mqxXfChCbSvtIhU5gHuDBxkahGM1IZyc8V6/C5xej\nvqkpISSXM+3x4K177sH1nh5Zv9JAkQaw4kb2b3wDpuJiGP3HH+FtJgBBuBoLClB2330Y41U+wTyc\nBcuWCRV3tUYjpiYm8MnLLwvXNae0FIvq6+GbnBTWyUVv5549GHO74fN64fN6MeZ2C21tXqusRM7C\nhQCAyeFh/J9Vq4KuBUfpxwapx1lvNsPT2xuy/Yk+NzfpQi7VYbMz0hIphLgM+bpQm3CK9IYl3Z4s\nIZ3sX+wiiFv6/FYPIW0RygM6E15MK4AmkBBFZr02En1/T3T8448/jl/96le4evUqBgYG8Mtf/hIP\nPPBAXHPFgj7yLsRska6hkoR64N/B5gr8jTgQqYgJ5RGLFaWqq6GOHwkuaABWYffepiYxN9VigXd4\nWF6l1l8B11ZTA53JFDQWYCLtRZtNCJ+d6O/Hn/fsgc/rRXdzM446nci123GtqwsAawFjtFhQVF2N\na599JowDmIdz9OJFAEys5i9Zgqvt7cJ2Y0EBGj76CPsdDqE6bW5ZmSD6AvunGvLy4PV7YcfdbuFx\nW00NesbHZedjtFqF67z2+efRvmmTTKRyj/NRpxMef84qF6tSj3hRdTVqt29PujdR7TmmUcHFJcDE\nWqTncDp/OMV6rqFI9pury0UVa9Md7gEFmDDlTw+X//5WkHgkVM3TTz+N3t5eLFmyBFlZWfiLv/gL\n/MM//MOMH5dau6gYqqZOEMkhkRYYoVp9JLNnqVLLGb7mu7ZsEQTY8aeewp/37UPBLbfAVFCA2m3b\nFFvK8DVfOX5cMVTXZLMhf+lSDH38MTy9vTAWFMCyZAl6/QIzu6QEYxKRCADQ65Fls8GQk4MRvzAF\nmNcUOh18k5PQaLXweb3Qm80wWizIq6iAwWLB1PXruNzaCkN+PkpWr8aa555D0803Y2p8HAaLBQ8e\nPYqTzzyDtVu3Bp3Pfocj6uscaGepjez19bhv1664bRTtMdOSWPtIpvOHUyzn6nQy8ZqTw8Riup0r\nkVrqwEJxaxA6fNYJJlpzwEQqPaXmFGpv7RIt1GeUIAgihXBB89K3geHbbFiwbAVc61w4/lDovqKx\nEknQcHHZf+aMEBYr7ckZOFYqwnRZWZiSFDDKLilB3he+gB6JB7P0K19B5549mBgYQFF1NfKXLsWl\nN97AdIBHEwCg1wOSIkoao1EIJWYPBPcttTsc0BkMsjW+uWaNrMcn94DqDAYMdXQgd+FCGCwWTPs9\nufFc52T0lY0XPt/whQvIs9th8PejVaVgTWdxGSuxnOv/z97bR8dR3mfDl6SV1pLW0q60MooqLGzC\nZ21HwgsmMXQ3D3KIBanUNMoHpQL6VHtOeHLaPjmx3z7tyfvmNP0mz5O273lL7Saxoa3S2lDbOCCo\nFUuyITGEFAwFEiUQOxgjjM0KIdv6sP17/7j33rnn3ntmZ3ZndmeluXzmeDVzz/05uzPXXL+PREJT\nUfv7l5aZiQ/7mEJuBTQBTT3th6ae+lgS8MmoGr7P6BLCYrKrtwuvxdtYymvhRZitBzfDfO/KEF6K\nnMbwiWEkDycdDSSTy8/j+P79mBwfzxBRORqtfK5oOnr322+jWji2YsMGFpwoffwzL7+sizKbeu01\nvPnEE3oiKuYTldO/CHlRAZaSRkRNWsGV+/jB8eMAtDyi3FT54MgIFs6fz/iUVodChvNsFDxIFXU3\n1xo57fvJ6zt34kRmLJ5NxWLgc7kof6fsOMR7yBx5Ua5FmcJwLaz4cfpRcB2H/90of/hk1MeSgNfi\nbSxWeI30OwFOOkPLGIG7KtWI/3fdA+4EkjGA6HNZVVuLqmAQ37/rLsPorbzPkeuvx1N9fahMk8ma\npibMnj6dRdLe/dGPMudemp3N5DcFmH/nZel8pMHmZjStW5dJ+dLc1YVWIXpudTiMhiuvzAQw4sGQ\nnurryxDGQ8kkvl1Xh3NpX9WF6WnsvfnmTKTbxquvRvNHPgJA8/EV51kkoKlXX1USSFXU3Vxw2vdT\n9Pl1st4lj2L+yDgdpMhq350YoxvpRHwUDj8Krg8fWfDNdH0sCdh1ifKRHxazVdvQplvwf1Y9g7v/\nGVhzZ2E+okYmoUb7v7dpE06OjKCpsxPV9fU681beD9W5orluZTCI5s7OTOAh8dwdjY0aAU2b2VZU\nVYHSQYxCHR1YOHsWdOEC5tMPx/Xt7fjMyy8DAMbuuw8gwjs//CFmT50CwKLr8qBGos/nuVOnMn+L\n6OjtRVVNTYawcdPjZ7du1Y1LrI/7tspmuCrz3FxmuE77fqp8fotpouu02bFnUOofmUL8SK323Ykx\nJuCbg/rw4TH4Zrpq+MqojyWBcs2bWW7wkFWb4whXNyD5j8DKXy1c4TIyCTXaz/OHfmp0VKm0HUom\n8fquXVnnTgupWy7NzWEmbRobjEYxc/JkRq0UU8aE161D1bJlCNTXA2DqZ117O+ZOn84QUW7eyyMI\n375nD27fuzdzHAAWzp/H9++6C5VpxROVlXjr4EHlXaq6sREf+9u/zaS5eaqvD/MzMwA0E+UTw8MY\nu+8+nYLZd+SI0oS3tqUFwWg0K72PmRmuVaXbal5Ro3y0xUKxU84UDU7+yOSjQBZiZmO1706M0TcH\n9eHDR5nAJ6NLCEvZrt5reTMX61qUI+k/lEzizzo7c5ILJ31EjUxCjfaLREnVj6mJCSy8/z4AZhrL\nz13e0ZGpoyYSyZC3xmuuwSnBj/HTzz+P+vZ2RDdswNTRo7gomOqGVq7M+Jg2d3UZ+mAeSiZxSQhs\ntJBKZXw+KwIB4NIlzJ05g1M/+hFqIhFUBoMIp81xF95/H89u2QIAOPzcczoSpUsLQ6QbvxHR++D4\nccydPo230ilszObWLsqF5JmO1yIJ8+TvlJM/MvkQy0KIotW+K8rZXgsjc9DF6EtRZHjye7GE4a9H\n+WPJk1H/d9k6/LnykQteI/1WMDUxgfeOHs1JLpz0ETUitlYIr1nAoppIBL/5wguZY1xF5fs5eauR\n1NXlHR34rTffxLKmJt15gVAIC2fP4tYHH8Tq/n7cefAgbt+7V9m3qYmJTDCjikAgU39ixw7UpMtX\n1dWhae1azKdSuDQ3h9l33mFjikZxNq3Ucv/WikAAJw4cyJgCNXV2IrFzJ57duhXnTp3S+czKaiWf\nj0AohNlUCnNTUwW/TOBtpF55RTd3XoXpeMvZid7gR8aqYq1DPsTSDhmWb5py341uqk78kBoF0ynn\ntffhw8eixJL3GS21+0k5wZ8rH4sRdtJ/lAr5+jta3c/rr6quxvs/+xkWzp3D3LvvZsovX7UKF86f\nx8W5OUTXr0d9Wxs+OH5c1x8+jzWRCD41NpbJGxoMh/HB8eN47JZb8OtPP43DX/wiTgwPIxAKoWX9\netSEw5g9fTrjB9vR14fJp5/G3OnTmfa5f6rsB8v9XuV9t27fjn+9+urMGArNAwvo0+WI/bG7Vk6h\noHYWoRN9Xnl/3U5pk+umWYqb6iJce8/Dzy3qIw3fZ1SNJa+MLmYfN6fhz5WPxQgnzW/dgplp6KFk\nUudjKcJIzZX3i9FnF86e1RHRmkgEdW1tOD85iflUCidHRvCzhx/W/DjvvReANo9feOMNNK9bl6n/\nUDKJ0XvuQWTtWtQ0NjLi1NKCCzMzeHt8HFU1NTo/2MSOHWi58cZM+02dnTripzI/lfcFw2G0xGJZ\n5QqB2IYRERXnMi8zXhvmJwW1U4729DmQlxm226YcuW6a3Ke7oQF44AF3+iBjEa695zEBFkxqGIyY\n+vDhQ4clT0YL/V0uJ9PVQu3q/XuYc/B9HLyDYDiMwP33e5aIAuYP2kakxI7ZIq8/GI3q/D5RWYnm\ndesQqK01Pjmd5sWI+MoBiFREUXwh8MMXX8RtQ0Po6OtDR28vPjU6mtOU2eq+QmC1voJ8U22YUBbU\njkUSVk6/U558qZTrpsl9uqengRtvNH2QcGwtytGXwi5cTmtjey1KFUxqiaT3KaffKS8jFAph+fLl\nmS0QCOD3fu/3itJ2oCiteBj8dzlf8GcHgD07LGbT1ULnyocPH/nhtqEhw7QjRqSEk1SApUkxM1vk\n9c+cPIlTaXPZiupq0MIC3h4fR01zc+ZvEc1dXahpaMBjiQSmX38dyzs6UN3QgNuGhjIpWWYFc1uk\nzXpU4xH7xyP0qsBJbz77CoHV+szWKidsmJ8U1M4ihNPrDUCfxqWlBTh+3F5Kl1w3zbRFAEIh4N13\ntZcQ/o22MHAlEmCErNTTOZTux3YU10TXa/Pgw9OYEayrzp49i9bWVnz2s58tSttL3me0UDjtflFI\nCjMfPnwsbqj8BI38QrkPZzAaRUVlJS4tLCC6fj027d5tSF5E/9lgOIy3RkYQCIVwQWECXLVsGe5+\n+21d3k8OVT7Rps7OLJXTaExLFoX6MDp4A0keSmJiagJ1gToM3TaEcHAJrovo09nSwggj4Jx/J1/v\nVAoYGfGGH2cx/RvdeuDpATOJjSE7mrDb8JJ/qJV5yKe/XhpjmaFcfEYfeughfP3rX8fPf/5z5XHf\nZ9RjcNp01Q9058PH4oHTZvwqk1wj81hutth4zTWYPXUq4+9p5l8omjp2796N0KpVqEhHt23u6kKw\nuRkAi4r72Z/8hJk4p9U8Of8p38/TwXAiKpsPl0uqlKKgUBNKB28gE1MTGJ8cx/CJYSQPL9F1EZXq\ndBoiR4Mm8PXevds7PjBm/o1O/6C59cBjlNamGPCSf6iVecinv14a4yLDoUNJPPZYAk880YO5Ofvf\nsULP53jooYcwMDCQ9/l24ZPRAuG0+4WbQYK4Xb1X/Vy92i834Ps4eAturYfTz1q5/ARFogdAl8YF\nYOqkmX+hSGyD4TBCK1dm8peGVq7Ep3/8Y9S3t+Ozr76ayWHKCexnXnpJ57PH98vpYGTyKY/JaC3y\nSt2x1ODgDaQuUAf8FIhFY9h+6xKNWCe+bXaTMFp4kCjaPcPMv9HpHzS3HniM0to4BNO1KJV/qApW\n5iGf/nppjFhcz1NTUxOYnBzHiRPDOJzHS8BCzweA48eP49ChQ7jnnnvyOt+rIB/WkUoR9fez/53G\n6OgoERHF40TMeYu15RV4tV9ugK+FD2/ArfXYvJldz7GYM9/p2VSKDvT306xBZQ+3ttI2gLYB9GRf\nX+acJ/v66MneXsPzVBgfHKQdkQhtA2h3Z6etc43q2xeP085olLYB9GgsRrOplG5M44OD9Kcf+Qg9\nvnlzVnv74vHM2A4s9h+IfOHgDSQ1m6L4N+KUmnXhZlRuGCSiOBFtJqISTUfmN2pwkN0sN29234Yt\nRAAAIABJREFU50EhRUT9pB6n0z9obj7wOAGDdTe9X5jNnxeRT389NsZyep7KxYkef3wzbdsGevTR\nGM3m8dtb6PlERF//+tcpkUiYljEaB4C8bJB9n1EBySSwfz8wNwesX89ehJbaYsYNcD/XUAi4+Wb1\nOEvhu+qnP/Ox2OB2GkMZO5uaMJ9KAQA6entx+969edcl5m3s6OszDCiUT32qPJ2Hkkm8vmtXRomV\nc0WWQz5YL8D3wXUBCWiBYPpR2kAwpUz4XewfNDMUw28xAeN1L6R93+dyySKXz+jc3BQOH07i1lu3\nI5iHn36h5wPA1VdfjT/6oz/Cvem0bSr4PqMC3HBfmJzUYgksVveloSEWi2Fmho3zqquy57AUvqt+\n6hgfiw3FzqJQlU7BEli+HB/727+1dI6R+atoPpvYsaPgvuXK0zk1MZEhopU1NZg5eVLXJ0+m7jBB\nqcyKfR9cF1Bss0SzlBylTPjtxg9avulHiuG3aGqyXED7vs+lDwMEg2F0d+/Km0gWev4PfvADnDx5\nEv39/Xmdny/Kmoy65b4AAJ2dxf+ddxvcrj4cZvcxgKmjp09nz6GV+53TLwOWQvozjsXk47AYsGjW\n4+JFAMCFDz7AD37/93MW52qkirwUO08nJ6tvhEJo7urCqWeesRSoyasoFSksKP+ohEXzvSgUxQ6I\noyArmbVw4q2tlwI05EvMivGCwGDdx8bGCmvfib7nS+IXYe5R/3fKOTz88MP4zd/8TdTX1xe13bLO\nM+r0C8KhIeC++5jX4s6di5sUDQ2x+9EzzzCFtKEBeOCB7ONm1jhLKceqDx/lgIvz89ofFbktZUQ1\nsiYS0ZGXYuTpFE1Kb33wQTy7ZQuuGBjAhb//ewD2CJXXzFNVpLAY6VJ0+Ue3bgUmJnDo9dcx1dGB\nQDoHbKnnpuzAA8EUC2ZkxYmE3166eedLzIqRu9Ns3Y3at2KC60Tf880h6uce9WGCf/iHfyhJu2Xt\nM+ol94VygugP+t57wLPPsv123U98H08fPtxDPuTq8U2b8NbICJq7unDnwYOm5xxKJvHGI49gPpVC\ndWMjPnP0aCZCbrEg+pEGo1G03HgjbhsaAgBl7lSrdcn+pqWAKv9r4rEExidZH/tX92NXt8t9TPsX\nPgZgMr0rn7lJIokJTKAOdRjCEMK+k5u7mIK7RMtLN2+3x1psJFAc/2Ixh+j1AI7Dmg9qKXOw+iib\nPKO54PuMClhKZp1OQjRvPn6c7ctHXfZ9PH34cA/5mHl2796dSaeSi8RNTUxkgh21ffzjRSeigKYe\nBkIhzJ0+nRlrPia5TpqnWoWZX6hqDHUB1seipUvh8yvlgLWLCUxgHOMYxjCSvpOb+1Cl5HDSvDLf\nm7cbJp4up2EpOtwyH5bnXjQhPg7rps6lzMFqhEVoOuzDHsqajHodbrhlFFInt6sXzZuPHMmfUPKX\nAVu3esf9pFzg+zh4C15cj3zIlR0S53SAonzA/Ugvu/nmTF8u5ZlouxQBjuy+MBi6bQj9q/tx4I4D\nrpjoykj+z/+JxF/+Jf52xw6s+MIXbM9N5p6RfsKOIYbtxUwsmAQOfSiJx5oSeGLT0s4vO/bcmHNB\nb/J9k2/Fv7PciIXV/grlxr43ZlzOLbInz71I4u0QYDPyX6q1KzCgkxfv3z7swSejLsKNiLSF1plM\nAtPTQGsr8MgjQEdH4eqy2Kerr/ZJqQ8fMvKJrOo2ufJCdFpOnrmie8eBA6gJhQqqq5hj4YQ+GI3i\nrBT9V4VwMIxd3buKQkQBYOL8eYxHIviP06cx9Du/k/fcDGEI/ejHARworonuBDA1OYHJ1DhOjCzx\n6MDB9P/FiuirghXS49VIsUZES+zvdTAmYWK5b5i0w8neVoP28oXZ3JsRYDsEs1RrV+xo1T48h7L2\nGfU63HDLKLRON1KUiXlLZ2acrdstlCKPqo+lC6/5M/pwBtwv9OzJk3jnmWcAFLi+Dv8w9TzxBIZP\nnEAsGsWBO+5AOBjMfZKX0AM8MdyDExhGtCuGOw4W/8WJK0Gn8llnL/hWWumDV30SE1D7cvL+QnFM\nhF0fTaP28kW+62+nH6VaOy9c20WC7zOqhq+Mugg3fCoLrZOb6IZCLJ+qFZEml2kw71Pa0q4k6c/s\nohR5VMsBpcqNuNhRCn/GpYJSXrNcja0u0CczA4d/mIZuuw39q1d7m4ia3WCGgNt6h7C6r986EXXY\n1HBiagLjk+MYPjGM5OESmTglAfQBmHGm+bxhxb/Tiz6JgLH6NgSgVThWC/X1Y9dH02m1L1/fWrEf\nRmPjKNXaLTa/YR+eBPmwj8FBonicaPNmolTKmTpHR0cplSJqaSFiCWyI+vtznxePWyufSrHjTvXX\nTWzezMYTi5Wmv6Ojo8Vv1AL2xeO0DaBtAB2wcnEsEri9HrOpFB3o76dZFy628cFB2heP0+ObN7tS\nf7Fhdy28cM06tr6l/mGSUJTfKas3GMv1ERHSmwPVbX58M2EbKPZojFKzDq2J3XWOE41i1LExlRyD\nxNZpMxEV6zIfIKIWIupWtJkiNq8psnT9jN40yo7HFHWp6ix0vIWcb3NsBbdXAnj1eUqFxcKJjMYB\nIC/Zd9Epo17K5VwI3PLDDIeZcglYVzCt5nMtp+jGfiRgNXwFzx246c+YT9RdJ1FqNd0L16zZ+tqa\nn6X4w+REwnBRDa1O73NCkUomMfTNafS/1YoDtzzinK+v3XUud586Wa0uhW/icQDvAhhRtKkKBhQF\ncBJqFfGryK0ginUWOt5Czs8n0FGh/bVrnVBuQa98eBLfAfAOgJcNjheVrTv9krVU4C9OQyHnxyMq\nmLICq1Jk3VA8nVB+3VCPlzrcVPB85EY+KufjmzfTNoAejcWKum68rzsikZIqk16/Zr2g3HoaTtxg\n4qQpPr2kKUFGsHrz8MoDhahulSPipFfkNlNuZdFp5GqTq4HdxK6hjeScwl7oeJ2aL6vXUaHtxcne\n3InlV1FZqbJ2UWxO5BaMxoE8lVEncCuALniEjDpp5VRKssPvz93d2ngGBpzvj3yvzffea3eurLST\nq043nhNKseY+qfbBkQ9xKRUZE/taCjJcLijVy4IlBbsPz1ZvHh4zm7YNr5hbyutjh1w7NQajNnn9\nESqMMJv1s9CXCcV+GVFoe3bnTizv5EsAD6LYnMgu3nzzTbrzzjupqamJWltb6Utf+hJduHAhq5zR\nOFBCMgoAV8AjZNRJFU+8X61aVRrCII6nUPKlsquX77X53nvt9s1KO7nqdOM5oVgvwsW18MrL96UM\nr/iclBNx4X19pKuLnuztday/XlkLp+B15ZaIDB+k7/ibOyi+L06bH9/snL+kG7D78Gz15uGhQAh5\nfS/iVNwHeyNCVgi5iZO7YxDrt0iYlWvhdj9FWCXopXoZYddfViyfhypbTveMYnMiu/iN3/gNuvfe\ne2lubo4mJydp7dq19Hd/93dZ5YzGAd9nlCGX36Idn1LRlaWtrTTRV8XxOOFaI4O7rlx/PdDXByws\nsP/tuizZ7ZsVl5lcdQ4NAatWAcEgcNddzvjUujHHxW5zsfhNL0V4IfenVfC+3nnwIG7fu9fz/S0V\n8vUXlr/HyUNJJB5LoOeJHkzNOfzFNvAROzFzwvlIsm7AbjROqz6bbgZCKMYPdbF9TY18DQuJlurk\nGFS+ibz+TrCIxdwP1KjP1wK4E0ALmB9qof1U9SmXD6VVn05VuWL4Z9r1lxXLezUa8xLBK6+8gs99\n7nOoqanBZZddhk9+8pN45ZVXXG834HoLAO69915cccUVAIBwOIzOzk4kEgkAwNjYGAAU7e/nnhvD\n0aMAkEAyCdx//xi+8Q1gZiaBujr2dyjEyg8NAX19Y/jKV4C//3t2/tVXj2FgQDv/uefGEAwCTz2V\nQDjsbv/F/oTD+ZyfwB/8gb6/d989hhMngF/+MoFUCgDGEI+z+pNJ4JFHxrCwANx8cwK7d2vl29pY\nf158kdU/NMTKDwyM4cUXrfVn167CxhsOAw0NY2Dp/bT1LGS+779/DOfOAXv3JrB1qzPra3R98eP3\n3w+EQgls367NZyHXy3PPAUePsr/7+sbwta8V7/vl/13Y3z988UUE7r8/Q1yK0f7Rb3wDq2ZmEKir\nQ+D++1ETCuU8v3JoCFMTE3j1/HlcevppfOLOOx3rzzeOfgNfm/4a6gJ1uD9wP0I1ufvj5b+/cfQb\nmFk1Y3s8LIgd+zuZTODU3RMYT+eqTdYksat7l3P9rUv/ffUYMAAkwP4OVgWBnwKxjTFsv3W7J+bT\nsb93Gc/fUOUQJqYmcP7V8/jqDV/FnZ9QXN/JJMaeew4IBpF46ikgHLbX/sQExtLrmUgmTfuTSCSQ\nSCTsj/f+MeAckNibAFx+PgGAsfPpv2MJYHue9X0DSMwkgLp0/+8HEqF0ffL98c4x4ASQaEsAQ4rj\ncv3PjQFH09f3dcDYPyrqN3t+uXMM+BmQuJQAzgJjsTFgd/r4EDDWNwZ8BUjw55WVY8BJIFGRADYC\nY18ZA6T7P54DEun79dh1Y8AckJhKAASMYQy4E0g8LfXH4PtquB6hBJACxr4ntdc3BnzN5e/b+XT/\nYsDYwBgwZuH8XS72xwN/m6HQvMaFnn/77bdjaGgI8Xgc7733HoaHh/Gnf/qnyrJjY2N48cUXMZV+\nmXbs2DFbbbmBK+ARM91cUFnnWDGTVFnriOe1tLhnwmvVp9COj2U0yspt3Kjtk+eltVV/rBCf0lzj\na20likSYj6ydOXTTpcepsRZSTz7+pOXu5uSjuMjHT9XNoDzxfXHCNhC2gfoPlL/dujielp0tlk1e\ns1wo3EgxwmFgkpiaTVH/gX5vm+i6AEvXYKE3iMX0Qy0G/+mjwkxH42Td3FVV1qz+zUL5fMxpxfaq\niOhYjvKNFtoz8pfkWwdlj8eq2XOKWDqbUgaPKvfgWw4jFycq9P5X6Plnzpyhrq4uCgQCVFFRQffd\nd5+ynNE44PuMWoOKVFq5J6hIgd2It6o6rJAN1T1PdZ7Kx7W9nRHOzZuJbrhhNKu/1dXs/3XriHp7\n9X2IRLRyjY3sWHs7+7uhgeiY4oc4H/Ik9tvufd1Nlx6nnhVU9Vj1ccjnecdDbk5lg3LyOXEa+fip\nuunbetNf3OQe6SoBOIkMfTtk6yFB/h6Xghgu1e+FJeJf6A3C5g+1p9ciToURSBF2yJKqrFn9KSJq\ntVG/QXujVaNERy2Ujwp9WWPQnspfkm+VpCe0Vgi3jDrSyPNRMiaHdv1LvRIcizz+3ZCQixMV+tKx\nkPMvXbpEsViM/vzP/5zm5+fpzJkz1NvbS1u3bs0qazQOlJCMfhcsG9McgDcB3Ccdtz2ZxYaVe4KK\nFKgi3lqtg5NFkfC1tqrPt6rmiuVkxRMg2rhxVNdfkZT29mYTSV6uuppowwa2f8MGc3JkRJ7MSCrv\nN0DU1ZU/iXI6Km2+pE7uh6oeqz+ebr485/38X+2D9OhGe+lEFhusrsdijHycT4AdN4Py7H9qv+Ok\nK5+UOU6Bk8ju/d1lR7LL6SHPSVgi/kV+6+eJtTAiIIUSSBF2lLQUsVQgG4U+mdU/mC7bSrlVTaP2\n+olG949mH1PNzTEiaiOiHqkvg+k+RIipyTzQz0YiWkFEYcpWSMXxtAr7VyjGKaJBKNtuUi4ulLOb\njqXEBiye+G5YRC5OVOhLx0LOP3XqFFVUVND09HRm3549e2jNmjVZZY3GgRIro2awPSFehBkpsHpP\nykUWjQieVTVXLMePNzYal5NJtEwkeTmxrzU15uTIaJ7MFL5UipHhvj51nWYEQDwm9rOUUWmdNGV2\n83mH9/PL8PMgWoUf+bg84YVcn0vV5NUu7L44GKRBilOcNtNmSrkl0XhIBZJR1BctcVITEE4KV5Ce\nYKmQj9mm2fzLfTKrXyzrdD5LuR9Wy/Ly4j6RbNYRm1eRPEcU5xshSJrKaqbmFpKOxWPfCS/Dy5zo\n0qVL1NbWRn/1V39FFy5coFQqRX19ffRbv/VbWWWNxgGfjLoLJ0iBiix2dREtW2Zu+ppPf/jxY8eM\ny8l1GBFJUbkEmKlurnaN1E9etx2FyYwAiMe4j2upXXDKxRWI9/OPG8onnUipUS5r60MPbla8Mxql\nPRs3LmkrAK9j01da6aovg371S6A9X+jNWT5OcUL6X79bEk2cPKMCySjqixYzAiISKLvzJJLNAcom\niXGTeq2QIl5/VCgr+me2mpxrFXI/ZPVzQPi7WWi7K12+Pf13AzHS2CuVE8fdLexfTuYq7waDOkQM\nEiO81cTmyYoJbz4vFczqWyLwOic6cuQI3XLLLRQOhykajdLnPvc5OnXqVFY5o3HAJ6OlQb5meyJp\nu+wyjVA1N7tnAmhkysDH0N2tVidTKY3oRaOaD6qVPop19/YSDQxkmyfnun8aEYDBQa2eri5z4l1M\nWHlx4QWzEt7Pd46VQR5El2F1Pf5jYJD+Mhqnfd0+mXELbnw3uFnxno0bS66QlhNK8Tu15s8iGd/a\nTz/el7P8ZtpMIFCMYu4pox5QgYzWoqi5ic0IiKjWNRqUMUJcOFf0s+RfUaP5t2p2K9bfTmr/TBs/\nB6Ojo9mESp4bsU0QUUD6O0h6893LFOX5nMrjTlE2UeX9aSeiZenzm4R+xEhN9FV9lecibnKMKPfL\nBBlG9eVJUr3wPGUVi4UTGY0DPhl1F0akM5fZnhWyKhIzvvWZ3IPzjT5r9IVVRdk1MkUWTWGDQWa2\nGw4b90P2k+Vmw3yzogYPDLBoxXIbYrTfnh5rc+AVuPXjuRh9GosBq+vhBXPPxQ43HyyK+uC+CFCK\nh7xP7GO+tet3dVkyaU5Rivqp3z0iyhopeURQo7WYHUjRgZZ+mu1OlVZl4mpdI2UTw1wkQySb3cLn\nXCpcnKyRSV5/gJj6F06fu0LRlgq8//VE1EA02jCqVxxbFGMTyW698FkmfbxumayKpFX1jCTO2QBl\nR+4VVV8+d6J63UHamohKa6diLnK9jIkL56teJsjrb1RfXHGuBfhktPgwGgd8MpoNJx/MjUhnLrM9\nkTAZEUzuu1lZqZXtNbFOkqPP1taq1Uqr41dFBc4VTEksa0bGxflZsSL7HLN54f03UlHF/WbkvRDw\nPohRifOZ42LB92l0F5zM/J9ojLo3pjyz7j7MMTg+SPF9cfrEvm7a84XekhJR3herKV48CRfN7Hzf\nWpuIU14P8I5DRRj5dSKqpiqTWPFc/tmKwsZJTSUxpdDoxXaK9CSJb72KPssYJEZgVSSRty3uixAL\nWtRCjGC2kLZGjaSlWokSI8NGJFTcRP9WPi9t6fbCpA94JNfXJIwvIu0X56Ev/b9q3nO9jMn1MoGP\nn1+jA+l5kH2LPWCB4DZKyYmchNE44JPRbDj5YK4inYODjKC0thqreyJhMiKYqRRTDRsaWLlIRCM+\n3KxVlVJGtYlmvlaIMG9fDGgkz5lIuLgprFyWp34xqjuVUivAZqbJMumWCT/vQz4ReK2SSLkP8rXk\nNfLn+zS6C27u2b0x5al192EOL+Uudbsvg+OD1PpwK0V2RKh7f7c7pC5O3iBAPuw9wBfbVy9OanKV\n2xXYminnMdKTr3ZFGT5O2Sy3i6zNl5Hi+ATpc3hWGZQDMaInk21VTlFxLDWkratYtsWknRoiWq/Y\nz+ePE8VOUpNGovy+26qXCWYk06iNQi0QysAXtZScyEkYjQM+Gc1GoQ/mKhJm1USXn9vczI53dhr3\nYXBQb77Kz+Fms3IbAwNETU1aZNuqKjVhkomwaMqgUvyOHVP7hqqi1KZSmtLZ2JhNxlVkTyawAFF9\nvfEc8pymy5czM9x8oxirIK+dETnl1xB/USC/jOBznGt9xboHB4k+8pFRV1S1ImcbWDSwa+bjk357\nsBPx0w2Tq0Jzt5VTX0SyWyjhNVwLJxQMr5mVeByGa2HnAT5OxX2JwK+TTtITrQ6yrnrK15hoatpH\nmuJZR5oyGqfscQ4QU0+jlJ1qRYZK0RW2UYwy4sv7WEN60iqqpVWkji4sk2Nxi6THwlVETkyXC2MT\nU7asTc+FiuCKZrcD6fF3C/XbNcm1C5Vfr8NtZL4bcSru9Z0HSsmJnITROOCT0WwU+mCeS/UyeyAV\nzzWLPiuX5X6gvN62NvZZzPUpksP2do1AcjPfWIwRVk5w16xh7Y+Ojmbu/5zIygRWVmm5ya5qnAMD\njLS2tWWbsIqq7IoVlMm3GQho+9vazHO0iuPkeVmdem6R185orc2iEovnmCnPct3s79Elp6p5+dnz\njjtGbfXNJ/32YOZrKxNVN8iol0w/3e4LJ7vYBurc3alrx66JsCMEyAheMyvxMgaJRj8yakzerCpC\nuXJwWqnDDsTrhCtxXaQnTUYBcTYSC8TTQvp0MTXCuT3ECE4dMR9O3ncembaKtOiw8RxtipAjAzcQ\n0eVEVEEaGT0qjE8cTy1pxFRUS2W/0hQxArlCaEMkohxiv/mY+9NluHmtrP52pcv1kn4txbpUfq5E\nesIqHhMDJIl5XXNdN+JcckXcid8PAZnfqTIw8y0lJ3ISRuOAT0adRy71w+yBVDxXNLU1M7utqWGE\nU4w8K5JCvlVX69U4mZzK5EokSiqzU3mMMjk2ilKrqosHQAqHs4kukUY+ed85+VWZJYtEVRxjNGrs\nw2kV8tpxFdZOeh2r6phcziuqWrHJoZefPZ3qm5cJdylhFjjIDwrlENIXX+rXu6lvfw/1PtmbRTi9\nZK7smR/CckCczIlUruMcZiTAah1GyEVKxLbtBMQRtxbSE7xeRXmVCWx/jjblvsuKaA1lm+GKvq+8\n7ijpFUuuBofIeG75vBwT5kfsjxhcSByzCrwuMY1MG2nksY2ySbKcb1WeSw5xv3jcqDyHOJcuxfbI\nQL6+PWi2W86cSITROOCTUedRiPohnitHqxUVx1SK6KqrmGIoqoaagsY2fkwMHMRJpuqenitnKCe5\n4TAzC25rYwpmJKKppoEA0VEhSbL8oM3r4gqs2DcxWJFowsrnhZPO9nY94Q4Gtc+XX87mq7tbI6ZW\nAyfZhdhfq8GQrF4fcjk3VbVC87e6Saa8/OzpVN+8TLhLCe5rqzLRXSwRbksemMjCxeeEibBj4zT5\nISz5XHoNucibE4qQ1TqMFLI4WSezuZQxrmyKpr0ioQMx81NOuni5rvQ+kQAFiCmVcdKriCLEvq8i\nfUAgs41Hw+U5OnMFI+Ims7lIktifZtICKDWRfs5FiHWqzHV5XeLfsj+qGWnn+xul47muG1ERt3pt\nOkUi42T9miwSypkTiTAaB3wyWnrk8juMxdRBgrjSybdQSE/AamqI1q9n5JU/b4gPzap7uqg4chXx\npptGMyon/19UHFVbb686qi1PtdLXlx3UKBBgBDUaVft6EqlV1UjE2He2o4PVx8kq95N1itiIY3Mz\n1yuHW6HI7ZAhFQFzk0x52bR1//5RR/pWCKnVBZ75nkuBZzwImaiWU5h+EaVQHXWk7ddNfB7SsGsi\nrFqLYozTVhseVD8cR4poND5qPD4nzB6t1hEnNSlz0kRSJEjVxMgeN/dcTsxkdiNlK5i9Uv9qKLuf\nKoh9l4nc8uzxjmJUHYwn18ZfdIvnVBNTLFek+1shHKsTPgdJn05GjrDbKBwT6+BblDSSvY40E155\n3YyuA5WCSybl+feym4xfAhhBnB8LPzGu+rY7jMXCiYzGAZ+MFg92c46KD+FilFv+zFBRoZ0nqqN9\nfdmEkZMyVV5PuV/ZhG8047/Jwc1TxU1MMdPXp/f/lNVJUVG77LLsumTFjRNjrhBz8snNgcUIuaIa\nKpv9cpJuJc+qFbWPt2UWUMkqVAGL5PbdeuC2Q4ZU5NDL6qWbcGo9Cgqq5WDgmXJGuZLRUgRJ0pG2\nx/vyvviMlEjVWhRjnLbaiJOtB9dyhWe+F/wBn5uj5iIxhbQhBgISVT1VqhWuGvKARmKaEfF4u/Q/\nj8ormxDXEyOIR4kRN0H1HK0ezTbRVW2iUioqg+0m54ibTKb530bmwCB1VN9lpCfZoqlvihix5XNh\nJaWOFcSF9ux+L22SyNE7RtV9dvKadAiLhRMZjQM+GS0ejEin1Qd5+YFVNFNdtkwjadx3kdcbjerL\nymRJDhpUV5dN4OTzRPPUQIApmXx8PGWKHOyIn2OkqPGtqkobg+p4W1u2P+rAgKa6HjumN2sWFVFx\nUwU3EgmgKhqw0ZqYBVSyCnXAosIIrlUUqj56Wb1c7BADz3Q90rVklNHFglIESXKKGNpRIosxTltt\neFD9WNQwUsicbkPMC1pF2SamIGb2ejkxoima1gbT/UuRlj9zheJ8sTwPknRMars6vV8khpeRRmij\nxMhglDTCV0eMADaTlmv0mDA+IzNaedtE2cGUasjcjLhB+ruSmKIqknR5zeJC+RbhcyHPK4V8L+2S\nyDgZ99ljlhNe50SvvvoqffzjH6fGxkb68Ic/THv27FGWMxoHfDJaPHByyM1p8/UF5ISpqYnV19nJ\nAhjJxIXXK5KqSCS7HTmPp6iy8o1H1uXti2V6etj+D3+Y7W9qIvr857PrOHxYGycfg+jrqSK+fM5k\n1VWcBxVx5Od1dTGTYVml5TlZ+d8tLdn1GEUDVsEJMiZfH04Q3HJGLmXaD/rDkJpNUe+TvdT3ZN+i\nI6Ki8jZwcMBVf8Cl5G/ISVuhc+q22unqmnhQ/TCExx6MPQ1OZkSlL0gaCeWmn3HhuLhFSa9+NkrH\nqxX1czKjUjubDNoRN9G/U944YV1Fxr6llVL5DcSIMCfSst+s2baCmPnvBql+HnVYVEC5ghwV5qme\njCPqWrl+i/m9NCO+cXKGXDsEL3OihYUFuuqqq+ib3/wmXbp0iQ4ePEj19fU0MTGRVdZoHPDJKIPR\ng62TD7ypFCM9hapdomJWV8cIFCd1qqiunORwk1YZoj9qfb3mi6oRwFGqrtYItEjsGhq0oEIiQVWl\ngGlvJ7rmGqbeqgivijBzksfbXL5cG4PYD26yzI/L0XbFea+uZmVE8if3OxYzjgZsFXZU/aC6AAAg\nAElEQVSvnYEBvel1T092+1ZMrhYLSculDHsh6E8pTeDk1CaLkUyJylt0ZzTzOfEXq7Lyjxa6Fm75\nNBa6Lm6ua6FjNlIinfpeFNQ/rxK4fPoVp7wfjD1jplsscDLDiVKI9EGI+Pxzxa+TzEkbP7YmXccG\nRZnKdJ3HSE8qeYqY9N+jtaOkVCFVZBek9uG0s/F0LjwabqVBOTF6LyeSRmbEYv96SR2FGKT3Tc2V\nkqdEGN0/akx8VUS1hL8pxeZEdvDyyy9TKBTS7fvEJz5BX/3qV7PKGo0DeZLRgKO00wOYmADGx9nn\nZBLYtct8v4xkkpWtqwOGhoBwOLtMOAzEYsDwMPt/+3bzPhnVWVfH/o/FgGAQeOYZ7ZzpaWDLFn0/\nh4ZYXdu36/vF6//Zz4BIBJiZAc6eZccqK4GPflSre2EBGBlh58zNaXXccgtw/Lg2RwBQVQXMz+vH\nUlsLPP008JGPAO+/bz7udevY/9deC0xOAtXVjHIAwAcfADfdBPz0p/p+iMevugqIx4G2Nv3ccGza\nBPzZn7G5am0FrrlG6//8PDvvwAE2V0brLUO1VlavHY7jx7VxAEBNjfX2Rdht101Y+V4YQbzOVd+V\nXMcXO6YmJjCZXujDySQm7j6F8Un2d/JwEru6S7jwJkgeSmJiagJ1gToM3TaEcND4oqgLsEX+8JkQ\nQhcJp1cAsWgMg7uDurF3O3CR87Zi0Ri23+rcBTUxNZFZlxv+/QasrF9pOnZ5fsTznV7XQsccDoax\nq3tXVp890b8JAPy+lATgla9DPv1K/9YhBiDfSzOZbrsOwBAAG7/FRa2zUITB5nQKwNUA3gWb79b0\ncXH+2wGMpj8nAaQAjACoAnARQBeAPQC2gM17GEBPunwIwPl0uUvpOrcAiKfrQPrYfwFYAbZ2vw3g\n39P1PwLggtDvRgAfB/A2gGelMVWm2+CogLVH9yfS/y9P/8/rUNUHADPpvl8H4Ej6//PQ5gMAFtL/\nxwDsBLAV2niXA/ggfSwIbZ5bhXPsXL9uX18hGH//htLt83UHrH93S/C9SB46hImpKdQFAhi67TaE\ng8Gini/j0qVL+K//+q+C6vAKXGHwRsiV0iSXqaRVlcaOOadYJ0/nItchp1yxYtKpinKr2mT1srOT\nqXc8KNC6dawtHswoFGL9FE2G+bZpE2tb9uUUN9GvUxUtWNw6OrR+qPxBRTNbUfkMh5k5sXhOX5++\nrd5es9nLxjXXZPedyH5QH16ez/2GDfmpm14KJlSIepnru7LU/VTl1CalCISTD+z6GW78oxb6Zi3o\nm7WgX/uTdkrNppRpXawoiGZlCvFpNDMnFtdl496NOccuz4+b66oacz7zmGtN81V3C/Iz9apPqNwv\nK2qLYLqYt1IeJ+fVKbFOMX9mvjCbC/GY1WA5srrXT1oQoAbS+2NSui7R91H1LCCX4VuEmIoaJk09\nFJVWI6UQxExceV/kPi+jbP9PebOjoIaIBVfqI7U/Ld+ipFZplxNTXPm8x6U5aCWiz5MWsbiL8vcT\nFuv2gqJq9TclTo73Oxcniu/bR9i2jbBtG/UfOGC7/kLOn5+fp9WrV9Nf//Vf0/z8PD311FNUU1ND\nn/zkJ7PKGo0Dvpkug9GDrdkDr2gO6YZ/n0hOOGky6ncuk1Ij/0qRzK1bp5nmqgheOKyPTsvTt4jB\nkVpbs9PQACz3ZzzOtra2bKJZXa31i/tMmhFl2axX3tfZmZ06pqqKtS/2t6KCtcv9b+vrWf+WLdP8\nX7lZsJH5q5hWpro6f1/gVEqflsYtEucWVPPjJWK82CCnNilFIJx8YJdcqYinKv+oFZLrlimuWG9g\ne0DXxsDBAWrZ2ULd+7up+3vdOccuz08x1lUkOBv3OE+YTefdLdM3r/qEyv2Kk60H17yvYTfIuUyc\nLObaNkScjOdCPBY1KSciRRqR4+MWiaBIEPmcyPN0DTGyGCVjwgjK9udsI81UWE79woMahUlP7AbT\n5WqE8wYo25+U/92QPl/2a8218Si5QWk/J5wqc17R91Wcc07uRR9a2ZQ3X/C6q0nzWS3l99nqb4oL\n37VcnGjz448Ttm2j2KOPUmp21nb9hZ7/0ksvUTwep+bmZvrkJz9Jd999N/3u7/5uVjmjccAno/lD\nVH16e50nAKmUnrQ1NeXvByirrAAL7iPn5BQDBWnbaNY+HjFXFe125UqN3IlEViRXKvW0ulo/XlGZ\nFfslE2VZbRVVZD6PYhnuF1pVxfKwisRURXbb2rLnUEw9w/tZWUl09Gj23Nvx4eTkjRNcmcSV0v8n\n1zhU87PY1Usn12Ox+Prmgl1ypSKeKtz0FzfZJnpOgdcb+nYoQxQiOyJZqmHvk705x54P+ZT9h1Uw\nU9TEPrY+1FowYZa/F6bz3voTgczMWR7zooHNB1e713BmLdwg5ylyjngQmc+FeKzbpJz8ckMeN68n\nSHrSFaXsdC1EeqLXnt6XIqJaYb9q65Xa3pzOMyqWaSa9OhuX2kpJ+1RbE2WTSk4sK8g6UZWDFoG0\nXKldijnn82wWobdQMqbyRXVQIXXtecqF71ouTpSanaX+AwfyIpJOnC/jox/9KG3fvj1rv9E44JPR\n/FEM1ccsj2U+JEcOzCPulxVTTrDWrMkmo9yUVVZv6+vVpI6ndYlG9flCRSInE1OuYDY0aEQnEskm\nwC0teuKqMrPlpsRVVYwI8/Qwcv9VG48WLM4VD9okqrK8nAyZpJmtWy6lu5RkNJfJ7VJUQZ1cDy8E\nZCpn7H9qvytETwWZ2PF6u/d3Z4josWn2lMnJQ+W2Smre2UyfP/B5UzPLfMwwN32lla76MuhXvwTa\n8wU1IzBT1ESCc2z6WMHzKH8vTMtHfph+0HyWqPces2EuTth8cLV7Dbt+z+AkpZPMx2BFAR8gZgIr\nR2Ml0uZpgBhJaaVsM1siPXlTKZ+8HqPItOJXQyRyFem6OBFTBSHidfLcpFK7o5FRylIe64Q+ikGX\njAIJGQUhMtpUJr7Lpbo60/PKTWt5sKZjpF2b8nVqZjrcRRoZLwTy2CMO1CmgnIJ7eZ0TvfTSS3T+\n/Hk6e/YsPfDAA7R69Wqan5/PKmc0DvhkNH9YUX0KVTvM8ljaeXi1YoYsE7OqKi1vp0iGxTQvcoTg\nXJuscIZCjLxypVT07+RbX5++n6mUPs8pJ6+A5tcqz7lcnqunAwPauXV1rJxsdtvQwMjz0aNaH+RU\nMXK6HhEySTMjp6q+O418U6bkIpuLXQV1G0uRzLsFtyMLy8SOt9e9v1uXYmdwfFDnJ4ptoJp/rDE1\nsxTrbtnZYmkMa/4skjnn049rtpKiYvqJfWoTYd7H1odbMwS6qOj+DBH+lagz4V/4boKTQTFtiRPT\nbZVMi+RlhUH5uFDGKO9jxKQMkV5BNYviKpLRZuEcsV9y+hZeT1yxH8SIdK/ULvel5Sa4rcIY6kjv\nf9oj/b0qfQ5Xnyul8efaYsQIZS/p/UN7SG/q3CeNqZrURF+Eqh9OkVCOFGnXTcRCnxYxvM6JtmzZ\nQpFIhEKhEPX09NDrr7+uLGc0Dvhk1F04pXaoHvSdfnhVkSzeb5GgVVToiZccRMnKFovpy1dXs7b5\nfBkFZOJEqbs7m2C2tTEyJ/aVk06VX2l/v9rMuKmJpcrZtEnfx/Z2rR9ifaJpsUr5lNdOzid62WXa\n+aLC65Y6ZhQYS3Vc7INPNu3B7ouokvn6Fpp6JMc4OSG6/X+1062PbnQ99czg+Dg1fudPCNu+RNhW\nmyF8ThJU2VTSSHUU92MbqO5bdZl9RmaWKpPfXL6BnGiu39Wlq3NfPE7bANoGppiu+pdVtHGPfg3c\n8qO1jEIvfK+mcLGKYvU/TmpiVSzI5CUXkZTnIi6db2QCKpnGEogRrxXpPnQTUwG56smVSZFE8TWR\nAwNFiZFDTuQilG1Kqso3KhPYFmJmsZ8nPSnuJX2AJTnXp0gWjfKS8m2ZNB4xoFCKsv0xo9L5lemx\nGb286CZ9Wz3kzvXrgslrOWKxcCKjccAno+7CacIoPvh9/vNMsTNS5MTy9fWawqfKNcohK52BANF3\nvzuayWMqbqtWacSwt5fVW1trTkKXLWPmrKmU2j+1vZ21H48zRXRggBGmSIS1I5oS9/ZmR8FVkcvq\nao08crLH10Mm0mKfWlr0qumxY9p8cl/b+nrtMzffFcmwilDKPqyqqMHRPxinjY/uo/a/epw2ds9m\nHvRFs5J8VXdZAZf76Ct01mFm5lMuZreFkpFc4+SE6Kovw1XSw9dCjAoY2bHVFdIlm0oa+fHx/Wv+\nbQ21/VMbHZs+ltPMUjb5zeUbqFI2OfGOfjNIq78C6vrjBpp855hyDtzwo3Xa/M30RUKcSkOunEKc\n7Pffxo9/Zi04QeLmpcWOMiySFyOTXpl4iERdNAfuI2sRdXl9MmFURZEV515UcSuI6FPEyFpY2h8n\notXCvnrSfC95ECJuctst+YzWkF55rSBGDMV9YiAj0WRYPi5vVek+cCIpvgjoSPc7F5lVbeIcpYgR\nUE6q46StRT6Rj0sA30y3+DAaB3wymj+s3A+cVjvEBz+R0Bg97KrIWTBo3h/uX8m3qqpRWr5cv6+r\nS69M9vVlR9blROvwYUYoW1v1RFiuE9AHVOrvz1Zqxb8DAY3INTSwuuW+i1tNjUZy+fivuorVE4lk\nB10St/Xr1ZGIRRItm+ByM+dcprviOWvWsP5tfFR7oMbggcx8iD+e+ZKdVEp7acDnTT7uK6DWYHYz\nKxdSXygZyTVOHg23648bXE09w9eCRwWM7PhrOjb9jtbPHOMcHB+n+L59tPnxx20HcUjNpijxF6vo\nX7o36oIIycRzcHyQWh9upciOCHV/r5tSsynDtDBWfDeJ1CRbVmTNot66Ea3X6Yc80xcJJUzhUrDa\nLipWuXwuRdj48c8KYHSM3FeaVGpvihiJtGPGGSeNCPWS5jMap9zmuiJU6VJAmirJo9byPotErYfM\n/SONfE9lsrhSEcDIylZDjMzGFftF0ltHzFe1lbJVTr7JpsvilssXVZ4jcQ3FvqlUYNU6ecCiwSej\nxYfROOCT0fxRCuVDfPCzkk7GKECPqr+cXIuRb+WtsZEpmwMD+kBFPT3ZxLe6mvmCtrczU9SaGrY1\nNrK+c/LH1ciuruwxyea1ZrlRW1uN/VdFn9fqatbOwIC+DFd/jfKW8nMBvW9rJJJtgivWoTKFlX1g\n+/qYspupJ/1AjT98lFA7q2tDdS3YJTsiqfayalfOKBdSXygZyTVOHg138p1scuWGb6dRVMBc4yw0\nT5toEnvA4Eslk8T+A/1ZPqIqX1Sz+VERzPZ/bidsA1Vtryp6mhg3YPoioYRmfAWr7XHSHtTtpEUp\n1ZsuqwRCJG+FpHtRvWiIC3WDNNPZXAqpSDBF9VEVtVYs20TGhLOestVJUXk2Su0iblWUbcLMiaGY\npzQgfV4v/C2aE/cp+iTmBm2XjlUTU4p5/bWkj84bFM4V50i83OV1shL52KguH0qUAyeyAqNxwCej\n+YPfD3iE2GKkZJBJTK6HXV5GJB+dnWz/4KCxCSxXHvnndev0ZEkmnn19+rQkPT3GKqJIYNvbiT79\nac3cmEeR5YF8VHk3xU0mjqLJ66ZNGslT5S2V6960SR8symzjqnAkolcWUylmviya6lohfFl+prOz\n1Pp/H8gQUbEN0W9WVHntoFxUOx+LGyX3VxRQaJ41VT7U7DY2Z8bb9Qjz7xSJlmiaO3BwgBq/05hz\nflQEU8wV2v7P7fn5Alsgwqoy7rxgyB6j20GqrKBgE+d8VV1L0RPJeeUpTtYIhEiumiz0waiv/EWD\nqMhxcsOD5ZgFKRLRJpTjmzjvfC1CQv/rpfKdlE1MxbG2kzoCrRipVhUUiZNC/vlyYiSbmxOrVEvR\nDFj2O1WlW+Fzs0LaL0YFXpbuvzinEdLMbWUVn69bN+kVb/EF0UB6LHKEZCcsGjygrhYL5cCJrMBo\nHPDJaP5QET2vqEyqIDqy+qZSMgGitWs1H9DeXqJly0az/E1FxXXdOspEpuWkcmBAUzFVvqFVVSzg\n0MaNerUzGGR/i2Suvd2YlG7cqNXP07bwegIBtsXjrN+yya9oZtzYqL1QUJWtqtLWmafHWbVKO0eM\ngiuTcKMgTOLLC3EtZF9cOc+oE4q8VdVuqeS+NEKu8ZeTmY8X4aS/YqFrkSvPWi7yYyUfamo2Rb1P\n9uoi7opES/wsEvWqbVUZs95cGBwfpMgOFl23c3dn3vNq5UWBoYnwl91/wVDIiwyniGzBSrMbqq4U\ndXYUo84pT1YJBCeMIpkz60M8RznxODfXlUmk3KdB0iLXdlN2Ds02qXyK9Oat7aQR2EpiCqGcz7OB\njJU/Rf9HMZpNBjnh4/VESU8Q28mYlMoBljhJ5HXxFC5iqhijaLyiwiymmBGP889c7Y4L+4zW16iM\nE9e+lfZNUE7373LgRFZgNA74ZLRweFFlMiIs/OG6vV1P+ESz0xUrNOU0HieqrBzVEcPBQUa4VqzQ\nghHJbYr+rOJnlZqp6gPfKipYeVWQH5WCW1vLTHVlAtzczMgd70tXFwsA1dzMxiGa965YoZHN9esZ\nsT16NJvAiYRVJMvLlrH/ly/Xz49qbaJRdv1wNTYa1fvdymTzjjtGM+SdK9yFwoxwlUsQHreQa/zl\ndDPzIpw0G3V7LXKRHyeVOpFQVm2ryrS7amiVYRu8fTF1TM8TBsmPLcDKiwJVmc2PbyZ82T3fYDv9\nM4KXFHnHEScdyRi9ejT3A79VhckqgeDlRIJlZkqbi+TKx8U0NRuImZZukOqPk55siabD64RyqiBJ\nvB0j01q+HZXmRCTAbZRF7kavHmXKo9iXMLF9vB6RrDam9x9Lj/Uo6RVUURGtEdrsJi1PqJwqRg5c\nVJluUyTrfaRfPyNzWysvJ9xUQFV121BLy+n+XU6cyAxG44BPRgtHwZHpLapPdlQqI4KsCmjU1pZt\nmtramq3w1dUxAidHi+X94kQvGNQI57p1jNjJZI8TTZHMiX1bu9Y8oFC+W1+ftlZie2JfeLlcRERU\ndDkBbWxkBNbovMFB7TzRj5X3S5xzlZ+o2Ke+QnxxDOq0GlnXzrVYzuqqF180lRu8YE7pBHKRH5Hg\ntD7UamrayoMYtT3cRhv3Zqe7Eeuq2FZB2AZa/p3ltOHfNxiSKFXQoo5/6ch77q28KFCVsXKeE9dE\nIS8y3Igg7BnwB3QeddbK8OKkEREnuTknWLlMaXORXPl4XKhP3lpITyz5XBwjpqrKcyLWVZHeNpLe\n9xGkVytFcieqr0bklZM7bm7cTcwPU7U+onLJ3yWJBIv3t1P4LCqY4jyLAbLqSW2+yzdOkEViJ5rY\nHqPsNbLycsJNBVRVt1HZXPC4yW8kEuGEray3SCSiHB98Mlp6WFWf7KhURgRZ9OsUH7JVOUb532vX\nMsJ67Ji+DzU1ajNbMUpuXZ2eqEajzJS2tTVbCe3tJero0FRMkSCrTH35JvtnypuczoVITwo7O7OV\nV25ubEZE+PGuLs08WJw31Xni/HFSLvoc8zplP1EOHi24sdE8RY8dmBEuo+vIzrVYzupquQQh8jLc\nUqGKTXIHDg5Qy84W6t6vNpcV/UGtmrYalVflGsU20LJvLSNsAzV+pzGTxkU+Z/l3lmd8UkXfUTfy\nrdoFb5urvvK4jSILO91PTwRycuvhN5+Hf7cjEhspm1bHLqcK4QSLm6FyoiiSspXEzFtXECNSRulG\nOGmVTWD5HPal6+K5OMV0KzL5FBXP5ZQ9p3GprNgP/pmb5HYZnLeCNBKbIkaIGw3aNIsC3CjUGyON\nbIp9suqPaxd219/O9ZnvtRwnd8bqwxLgk9HSw6r6wsuFQua5Rc3AH65ln0eZkHK/yP5+ov37R7P6\nEInoCZi41dSw/6uq9MS0vV2v/IlqZCiU7avZ08O2tjZmJsvrlTfR95NvXImtqGB9rahg5q983uR2\nVIS2t9eciPC5FP1ju7q0eVOdx8lkdTVTUFtb9fOYq801a0YdJ3b5EC7xmhX9ZXOlsclV1k24odCW\nk5lPqZCPCmUlMI5Mco3WQjzv2+s+TN9pbKT4fw/Sxl0bsoiOGVHTKZ8PZyufqdkUtT7UajjWTO7P\nndFMPcu/vVxZXs41yrfAtoBhH/g5YkoYkdRyEt36cGumjpX/stIVwme0FjIRl8ctHg9sDygJ66JB\nnIry8GvpN6pQ9SoXuRggplhyv8U46ceeKy+lWF40O+0hfboao+i1croR0VSVkzsx92iI9AS2RtHm\ngLR/bfocnr6G90kwHx0NjeoJoeiHKY6rXZiHdspWNLn6K88l7xtvU+UfWkNadNxcyqJKLTWDVZIp\ntmHl2rdzfdooq/tulDBFlA+fjHoCVslAKmUttyiRtQdvle+iikjdccdoJuqumKeTkwwx52g0qvlZ\niuaq3ORUTjUjEtKWFr3/Z0eHNoaBAbWvaUMDMx0WzxMJsWqTTXIDgex0Nl1dxsRJnNuBAT2R7e3V\nRymuqyNd8CdVhGEzJVVe01CIkdGmpuJFcFZBvGZzKZ9GZXmgpmKNwQ2F1iejuZGPCpUrd6aYMzP0\n7RB1f6+b9j+1P2ddsfsDtA2gq76sViSNVFzRh9NM0eve360LTmTUj9pv1VLzjmaK74tnyqtykKZm\nU7TioRVKJdUKSUvNprLSxYjjUBE+K+okL9P+T+20ce9Gav/ndtq4RzM3Nvpe8DXr3N2pnCeVIhzZ\nESkvU1o5cI5R1wt9+LX44J9ZCzfNEOOkJzBy/eLxfsoeu3g8KpUlqbwYXEeeX5GIGKUbUW39RPR5\nxb64oix3j5FVR9ltRr4ONkp5RmU/TB4sqSrdrpHJr6j+quZSBK9fjgAcSM/zMcoGT/3SQMxH1c5L\nirjUNyPwPpv5EhfBbFb3O+WEObGPvAGfjJYXrKqoVh68RaVVVZaTLjm3J/dV5GlMNm7U0oxw30lO\nTEXSyFO3iCaxvLxMNLu69MRNFcBo+fJsH9ZcRNRsq6hgJsKXX55tfizOi6iqim01NmYTLrlffE54\nkKJQiJWXo+aKUK2DGDCp2KROhh2/SrFssaNQ+/6f5QPDwDjb9DkzRZXRiJiJ5/1/7U20DaA1v1+p\nVObEsiIRE81dcyl6qvygcoTbjXs3ZpUX07iI+y976DIdMaveXp0xw7VC0uR56/6eXm0VlVM5nYxI\nZLkfrKqv4hbdGc1WWtMPlqlfT1H/48YvJjKKcLqPkR2RLHNkzyNO2aRGhUIffsV2rPx+2i1vhwzI\nRE+uXyZM8thz5aUUy6dIryJanV/RbLWTtEBBKkIcIT2B5eavoumsqDqGKXuOxPpALMgSiJkDcyIo\n9lEmn9VCeRALutRL2WT8GGVfR3LKFTmSMN/aFPNWiGmu1Rcs3BdVVHzltuIW++FxX08f1oA8yWil\nw8TTh0UMDQH9/cCBA0A4bFyuro79H4sB27ezz8kkkEgAPT3A1JRW1803Z5cFgIkJYHwcSKX0dVP6\nkgmHgZUrgWeeAUZGgOpqYGFBKxcIAC0t7PP0NCsTjwP19WzfmjXARz7CPl+8yP4PhYAVK4A9e4CG\nBq0uXm9Vlbbv/HngySeB999nf1dUsD7xuuyCCDhzBnj7bTYmPu5QiH2emmJ/z81p58zPa583bmTt\n87kXUVnJyp4+DbS3Ay+9xOZmZobNcU2N8XrK6xCLAV1d7HM0yvo8Pg4MD7M1Lhb49XT0KNDcnN1/\n+XoD2DW3ahUQDAI//SnbJ193bsHqd8cOVGP0UTiGbhtC/+p+HLjjAMLBsHJfOBjGjS03AgBi0Ri2\n36q+iMTzfvvp/0R9ezue+NILWfXzsqtCqxCsDGL/8f0YnxzH8IlhPPfucwCANeE16Ah1IFgZxNrd\na3HLvlvQ80QPqquqAQChQAip2RT2HduXOfe+sfswMTWB1Dz7Al+x/Ao0VLMft2gwipMzJ/HQxEN4\nf+H9TD8qUYlT507hsWOP4Z3ZdzL7P5j/AAvEfgxXhlbq+m6EltoWRIPRTNnd3bvRuqwVANDZ1Inm\nYDNmLsxg5OQIvvvz7+r6cYEuZD5Pzk7i8n+5HP/4k3/UlQGA5dXLM+M/PXcawyeGkTws/BhNABgH\ntjZsxeHXD2P1d1dj0/c2YWpO/6UJB8PY1b0Lu7t3o391P974whvoWN6Rc4yegvj73wXA6LctDGBX\n+v9C2omZtFFI+fSaYRhArvvKEIBWk/qHAPQDOAA2Xnns4vHdUllI5cPpNnKNZSuAUwDuAjCVPu+1\ndN2jAH4qtcPnpwbA1enzHkyXeTn9/8F02SS0R+cGAC+m20sA6AFwD4CXhL7UA6hOf74E4DSALdK4\nGqTyC0J5AFgFYG96fqIAZgCMAPi4NE5AW7uR9HiaDOaoMz0W3u8poR/y3MrlVJDX2QjHweaA16Na\nR9X1quqDnevUx6JDRRHaSJNlH/lgaoo9KG/frj14f+hDwOQk+9zbC+zda1wWYA/Yw8PAsmVjmJ1N\nAGCE84YbgKYm9nB/112sTCzGHvI//GFGjurqgFdfBb74RXaco6kJeO899rmjg5HUVIqVn58HLqSf\nffr7WX+uu471ORRiZa67jhEvGZEIcPXVwLPPGs9JTQ07/yc/AX7t14Dnn88m2vX1wNmz7HNVFSO4\nYp927QI2bWLEOhRiZBJg8/aLX7D/p6aAa64BTp1i7b3/PnDVVazffJ7CYW1+xX0q8HKdnUB9/Riu\nvDKB118H/vM/GeH/4ANtDt54wxmilUwyElxXx9ZZVWcikb0WfI7k40b729uBl192jhwWG52dYzh6\nNAFAP0Yf7iN5KIlX33sVr0+/jiO/cQS/+PEvkEgkCq438VgC45PahR0KhDBzgX3R+zr6kJpL6Y4D\nQG9HL55880nMXWJvqqorqjOksbejF/MX5zF8YhihQAg3X3YzvvVr38LH938cZ2bPYPrCdF793NCy\nAU3BJgzdNmRKSsXxrAqtwsrQSvxs+mc4M3sGl+gSCKQjnTIqUAECobOpE2988P58GxwAACAASURB\nVAamF7L7u6xyGeoCdaioqMCZuTO4evJqPPt/Pav1qwfAMJD4fxIYb9Pmrn91P3Z1e/dLkzyUxMTU\nBOoCdTnnOYMpAPeCPSXtgOlDeV71i+0kwR7UTU4bGxtj34tc5ZNgD/Z1YKTiLrAH/Bhykwsb/XEE\nVtpKgJEUAGgBEATQAUa2hhTn8TpPAngmva8fjCya1d0HYI+0LwCAf6WqkSGWYxhDAglGAkelPojX\nDSeajQDeB1uD68FIXF26vpH0/qDU3zCARwCkoLXD50D8mq8FcCjd/3Hh/O1Qz604vlYwYp/vOqd/\nD9AFYCWAnYq6VGss9oGvDa/L6nUqIPPd8FFyVFRUAHlwS18Z9TjCYfZgLD7ki4peRYV5WYCRkJYW\nYHZW27ewwAgfV+FktenHP2YEZu1aRkQffJApnQAjUzfcwD5Ho4wIcjJ47pxG+rhSFg4Dr72mKYin\nTrF9ra1af+rqWP0vvMCILsBImYy6OmDZMqbizc0Bhw4Bly4xgsrVWUBTbSsqmMIq9+naa4GxMbaP\nz0sgwMhVOMzmpK8PWLeOEf4bb2REtLqake9gkBF4UZnmc2eksvFy69axedi/nym3589rRBRg/cqH\n1Kna5WqsmdrKFeDGRv0cycfN9rtBRIupVgaD7P9iqbs+NExMTeCZU89gcnYSW57doiyTPJRE4rEE\nep7oyVLhZFz7b9civCOMH7zzAwBMNezr6MPNK5jpSCgQwszCDKormcTB1c1YNIadiZ2oC2iSWGMN\n+1LUVNRg3/F9GD7B3sjNXJjByFsj2PLsFqwMrcwQ0UBFQNkno/0A8Oy7z2L4xDCu+7frMDU3ZThW\n3q9QIIT35t7D+OQ4Tp47iblLc1igBVMiWh+oB6UloNR8CucunNOOVdVnPs9emsV78+/hzNwZtNe3\n43/f/L/1xCqtmNTdoM1RZ1OnoZrtFUxMTWQUbp3Sa4YwmIK1BzkfjPOqX2zHjrKaq7ysMFlVufLt\nD4dK7cqlwllpi19qIQDvAjgBRtpUCloSjJTNAKgVzksZtC+qdjsU7YlfqZvS/3cC2ACgF3oiysd6\nFxgp2wNNHT4KbQ2OQ1ufemk/wEjvL6ERUQC4ApqazNurBZvXQ9ArwlyBNJpbUfGfhHUVUrWW/No6\nCPZdUa2jqh8qtdTudepjUcFXRssQXNHr6gIOHrRGArgq19DAVMzGRqb0hULMvHf37ux6ROWrpYWR\nvVAI2JH+0b7hBqaeTqdfsAcCjPR1dTHVtL2dtccVOVlBBIB77wWefprVAwC1tYysHT/OSJ+okFZX\nAzfdxAicCq2tmvq6fj1rc3xcIzJtbUB3N6v76afVZsDBIDPzvXhRO97SwsbFCbdoxsxVNFGBnJ7W\n+qhS2WQlkq8JR77KnKxghsPAI4+wfnd2AqOj6muFK+oPPABs2ZKtrIuK+9at2jgffFBd3ikYKbJu\nwMiqwIf7uPyfL8eJcyfQUN2Alz7zktKcU1QFW5e14rXPvWaoPoV3hDPmp8uqluHtu99GOBjG1NwU\nrv7Xq/Hu3LsAmDpaXVWNBzY8gC3PbsH2W7cjHAxj0+ObMPLWCLqau7DnE3vw8f0fxy9mfpHVTlNN\nBNeFr8dP3/8pTs+dRk1lDdaG1+I/3/vPDPEDmLpaW1WL6QvTqKusw7lL57Lq4ujr6MORU0cweZ6Z\nvgQQwIraFbhAFzB/cR4zCzO4IDwhN85X4f0a7YcsUhPBB/Mf6MpUV1SjoaYBZ+bOIBqMYv7ivE7F\nXd+8Hi+nXsb8pXk0Vjfi/YX3EYvGskyfRUzNTeG+sftAIOxM7MwqV5BS6AJ6nujB8InhnOPyav32\nOoO8FaaCkEC22qXaZwRZ0eX95spaCkxFbAAwDfX4xPZWgJHJ90zaV6l2cnsctWDksROMZG6V+isr\nk0ZjvRyMUDeAmQDzn7tboCmjIuRxHk+XfVo4l/f7BgBtMFaNebnrwIionWskAetrmQvFVN99FBW+\nMrqEsHs3ezi3SkQBTZV76SX2/+bNjDzOzDBiK6pmXJF65RX2dygEvPuu5k8aDmt+ppxEVVczNZX3\n64orGCETFTlZQQyHmYnxTTdpbZ8/zwjo5CQjjRyVlcwcV/Q/FdHQABw5wpRa0X8zlvZJ6epi4zl+\nnB1TEdGqKqa2zs9rxwMBNnbRDJgT0ViMkedEAnjoIU2B/NnPtOMqlY0ril1dTHV96SVNJTY6x4pK\nKCuYExNav6+4wvha4Yp6R4daWRcVd1Fp3bJFXd4pGCmybsDIqsCH++Dkc3phOqOMyuqgqFZOzk4i\neThpqCByxbMSldjQsiGzPxwMI9bCfhBi0Rh2JHYgXBPGPaP3YGZ+JtPu+Qvn0Vrbimsar8E9o/dg\nal79hVuYmcEzp57JENHaqlr8+L0f64goAKxrWpchf/M0r6oqgwMnDmSIKABcwAWcPH8Sp2ZPYWph\nKkMym2YDaP4A6HzjInrebUV9oB6BigA+WNAT0VAghAVayCidlRWVWebEPz7zY8xfmkd7fTuOfuao\n0gdXRjgYxp7b92Dv7Xux9dmtWetQkFLoAkR/Y1V/C0ISGPrmEPrf6seBW0pMRAF3FCYrfoYqtcuO\nf6uRzyBX1rjK+BKMxyeqfqfA/CzN2pdVO1FZ/Rb0j9RBMP/IEWjEWeyv1bFyAjkN5m/KwZ9thHga\nqEH2GDsAvAk9EeVjWQm9aqxatzA0n1s714hdX2Uz5Ku++1i08MloGSKfB+ebbwaeeGIMsRhTwN5+\nWzNdjUTUAY94kB6jwEicLEQijICtW6f1ix8TgwaZmRFzMiaaih45wshXXR0zk/3DP2RqnGjey3HL\nLYxM3Xijvq8ycef9WrOG1cMDM0UirA4RFRVa+bVrNTNlTiIPHNDIrRgAKRYzD7AzNATE42M4eJCR\n8Y4OZsZsdo5sbmsUVEisQyRzXM0uFMUkiG4EKjLCGLfZ9lF0iGay22/djrGxsSwyM3TbUCZYDy9n\nRHie//TzqKmswSVcwvjkuO6YHDxJrmNiagLPvPMMJs9PYuStEYxPjmcCFgHIBDu58lQdbmz/GABG\n+OYvzWfU2ErptvrWubcAMJ9NbkobrAxmzcOVoStRVVGVtb9KeDoNBULoubwH150N4cxyYPxa4Ee/\nsoDZC7O4QBcy9cumybFoDC9/5uUsYt1Y3ag73rG8A7u6d2UIldn3InkoiQ/904fwrZ98SxfkCdBM\nis0CUhUTPKCSas0LxgQQHglj15/sQvhL7v1QGa6FTDjceNC3ElxGRYLNiLHc71xkh4+rA8bjkwMw\nHTFpXwVxnFsA/Fp6/xoA/B5fBYzdPpbd31wvAfh4fyqdx9ECFtCIk9L6dJsjYEpmrvcmSWjBljrT\ndeci+HauEQ+b0fr37/KHT0aXCCYnWUCf06cZ6RKJ5Asv6H0duSIaiwH/7b8xtbK1lZl8iqSAk4U3\n3mCESsTQkKZSjoywAEBTU8xXs7KSEb2mJuALX2C+mWvWsP+PHtUISEcH8Cu/wlRX7t+6ZQsjbn19\nrH6A+Ye++CIb18KC3qcT0BNg3ufDhxkhn5jQxsCJIY/0S6Qpv1deyaLGcmK7d6+e8C1nQSjR3MzM\nn3lAJBXCYeBrX9PPJSfqW7eyAFVNTcwcmxNNmdy/+mq2L6hM9q2SuWuvZcdbWvRqtArFJIhLUa10\n2k/Wjq9lqaCKuMvJTDQYxcmzJ3HX9+/Ckd84oitnRHg6lnfgtrbblMdEUpI8lMQPT/0QADNl/eX0\nL/FK6pXMeZ3RTgBAV3MXei7vQUeoA801Tai6WIEPXd+FFaEPAUAmKBLHpUzYTFbPkb4jWBVapVNM\neYAkEb88+0ucv3g+a/9FXER1RXWmrfrqery4Qnv79e78GVwEM+Woq6pDz+U9WNe8Dqk5RqL7Ovpw\nfeR6XLfrOsxf0s4LVgZx6NcPWVJCVZiYmsDk+UnduF448wISjyWwQAvo7eg1rDff6/JQMonHEgk8\n0dODOTtfEIH41MFhoiz6GBr5JrqJYkQhzUUURUVRhBHpSab3F+LbqoKs+onElV8Dl4OZuapUXnmc\ne9N1HQYLZgQAFwH8oaK/ZgRPHO9pAO2KcfLItKn08Y8KxyaRm5BOQPMxTYGtxyvSeFT9SsBc8ebw\n1UwfLsL3GV0iaGlhRJRHx21szPaRU0VH5VFwAUYA9+yx3ib3EeXo7wf+4z+0FC4AM6XlqmIu/0o5\nyuzUFIu8++67+nOiUTZWozrNoIosaxYlV/a3PHnS3F/UCNzn9KWX9CbBvI6pKRZAiY+L+62a+YJa\nRTisrUl7O/Dmm/nX5aMwOO0nK/paej3iqYipuSkkDydx8uxJPPMO+0L1r+5HuCac8UV88NYHdb6e\nqvNVxzjkaLsc7fXtePkzLwOArg6j8ipEaiKoRCW6ol3YvWk3+p7qs3wuj3qrwrqmdRj/1DhaHmrR\nmeNe33g9js0cw9qmtWgKNuHHp3+MU7OnADDSeZEuKgMcideE6OfZUtuC4x8cN/X55H6SHGvCa9AY\nbMysV2ttK177rNq31851Kfbr7m9OY2aE1b+6vx/d/AuSK2x4Ahl/t6kvTCH5O+bXhi1MgaUQ4feh\nQv3p7KIYPqJTMPfxS8CeP6FYPgLgDYN6AWNfUrsQ2+SQo8lOwdjvsgWMLNYBeBXZZrJm/RTbNhqv\nvI6A5tvJoZrbJID96b5dAFNF66H5nwbB1FhVZiWxX8W+bn0sSuTrM2oc4s9H2cHsfvz880w5fPpp\nTcWUH3RFE0xOvsTIveI7BSspQ1pa9KRp+3ZGqDiWLQM2bNBSpchmn8kkI2cAq/+FF/QBdIaG2HnD\nw1pApliMlR0Zya7z2msZsa6uZvPB54GP5fXXsxXN9nY9EVWNmwcwOnXKfs5N3qezZzWzaY6uLq2O\ncJiZIA8P61PRmPmCWkV1Om9aXR27PnyUDk6bQXvNXNIquILZ80QPAK3/IqmL/XsMN7bciN//we9n\nESd+PqCRmR+d/hECCKCmqgbPf/p5nR9qQ6AB0xemEQqEcIku4c7hO9FQ06AjYmL5XJien8ZFXMTI\nyRFEHsoOC14fqMfZC2eV51ahSkc0Rbz63qu46/t3ZZRQjnfn3kVXtCtDBLmSCqhVWICZ6FahConH\nEqgL1GF6fhrPnNLO5yltkoeTSrI4dNsQ7hu7D/OX5lFdWY2diZ246/t3ZY5Pnp80PNfOdcnNagHg\ng4+14rdHgGgshltVviUA+zHOurml/48B4b8PY1fYwafuMKs3QySK/TUbgpooOkXiAE0VM4Jdf0Je\nPgLgBZj3myu//JjdpeP1cZWQp1kBtGiyYaHNNmhETmyvG8C/A7gxXYcMs36ajZdDtY6vQR9sqBaM\nQIprOgE9YW0DdO+y5sBMjlXzVqgfqJPXmI8lDd9MtwxhZMpnlsajowP4p38ayzKnFaEywVy/nv3P\n83aapQyR+3X8uBbsh5Om559nJr+trSxP6N69WpvcRDUYZCrorl2aShiPszHI7fI+i+a93E9UVjMn\nJxlh5abK8rydOKGfT1XKkv37tfbvuy+7Du5nm8uMlfs48D5xIrpmDZu/vr7sAFV8rNyHNxplqrBV\nk06j6+b551mfX30129y63JCvmatXfE6cNoNWmcB6CSpzTXEteP+vj1yPvqf6Mma0oUAIp+dOY/jE\nMJ745ROmPoCczJy7cA7TF6Zxeu40PrbvYxi6bQh9HX3oCHXgush1qEAFZi7M4OS5k3jm1DMYPjGM\nFQ+vwPEPjmf6smLZCkvjkskiR2ttK1aFVunIooi6yjodEZX9Ty/gAoZPDGcpp+/OvovXp18HwEyb\nxTQwRnh/4f2MX+zwieHM+QAyRHTZz5chNZtSmtLyIEaPb34ce2/fi3AwjJbalszYupq7DImmnetS\nJK6P/I8jWN3fjzsOHEBQ/ILkeovjtr+bE/XnMJk0/I0yMp8shvkuh5Xxi+N7MF3+DWQrdvkGBhLb\n+RCAJgCbwFRM0Tz2KPR+pbJv5evSMY63wXw4x4GxjrHsdTLrJ5+fTwG4B+o1Vq2jbHYspoW5AWw+\nX4EeNcj2nTWat0KvW7euMTvmw/DO/dtH/vCV0TKE0UtgK6qKmaLJVT4Ru3drprDc9/O119Rtyf16\nPf2j3tgI/M3fsM8dHcxXUwRvc/9+zSRYDgi0c6d6jFu3MkXyi1/Uj0dl3sgVwKoq5ova08PO4XVW\nVWlRdNva1IRApRSLCq6Z2aw49/ffr+8Tx4c/bGwKzdeHmwaLJsEqMUCG0XXT0bF4THNzCSReh+o7\nWFB9gkLoJJxK3SGqXrKKJrZxcv5kRrVrr2/HteFrMfLWCGLRGMLBcOazSH74+UdOHclq9/yF87jr\n+3ehpbYFU3NTOD6jdpZeoAVc+a9XohKVIBA2tGzImL+aoRKViAQjODN3Rrd/am4Kk5cm9YUJqLwI\nLF/WiEBVAOfmtNQvov+pGbqau3BN4zXYc2wPUnOpDBmOBCOYvTirNNGN1ETQGe3MzN0jmx7BzXtv\nxuT5SXQ2deLNs2/izMUzGDk5Yro24vof/+B4hsiuDK00Tr9j47ocum0IycNJ3PNoLZ79u3sQ4D/Y\nukJD5rmZcil7hcKJ+gtVAGUUqnrZgZXxi+O7EaxfQLa6puq3mYmwqh3+FRuBRspCAK4FUzVfk+oU\n23wETEmU2xN9g6ehETA+bisK9UnoVVdRkTVSFsW55US5ASx1DZ/PSgCXACwH8DfQSGyueRPrNlM5\njY65dY05/V0oInyxOD8sWp9RK2ak5Qo5X6foQ5krV6IqF2WueVL5fm7fnt2W3K8777TnP9nUpPeX\nXLsWWL2akdnjx9W5LcXxtLYyoiyPgV8LAIv6e/nlWv5ScSynTrG6zAglz/EqlhH7YORXy4kR983k\n83H8OHDNNYzkRqPss5ib1QxG14FT5csRS2GMXoBTvqhm+Rnl3KKTs5OZcoDmz8k/11bV6sx1rfhp\nNgebswijiApUoBKVhkqnVxBAAJfS/zgiNRHUVNbgndl3lOV//oWfo7GmUecXK/rarn1kLU6cPYHG\n6kYc/cxRXf5Xo/W3km8z3xcZjyUSmEz/0Or8RRcLnPb9zOXnWWzw8YWgBTrqB0vBIvotbof1fque\n/Hk7ANAFYA8Y+TXz6TWbK95GtdD3EVhfp4Qwvlbo83v2AbZ8NsVcpGJdVQB4TnZej11WJPZT7ovR\nMbeuMae/C0VEAkvbDdfPM/r/s/f20VHcZ77nR+o31HprSS2MZaANmRgbx47A8svYJnQChCDbQYmt\nZEOycXLPuO8NO5k7c+fCnpnZ7OTcvdnZe5yzd7LnnsnCzF7jOFHMi21MYhgGYYRkjOWxxxhf40EJ\n2NhCFiBQSwhJrZZU+0f1r1RdXdVd1V3dakF9z+Gg7q76/Z7fS1fXt57v8zwapJOsznUYSfnMZB8V\nD5WDQdmztmdP5nlSl14RtTVbWuD4cZlwCkmk1i5RE1T0lUk6KSTBHo8s073pJtkjKsqn6NW2VD8k\n7+/Xr5cqxnj0KExPz2S+Fd5VMW9CMpzOs/nee7J91aqYET0PsBY9PTNEVF1KJxSS7V6yRPYGi9qs\nK1emyk21ElSrkk4rx9ud1TWXdq2cox3jXMgmOxdhVyxqOrmmkI1Weao4+MjBpOOEZ21r91ZaDrYw\nMjHCmaEzSXLdM1fl80W5lEp3ZUr/4rMSg99OCck0EXXhosabGh9aCEwymeJFjU5EdYkowJcXfZlQ\nZSgpuzAkZxsOlcvkcyg+pNR/FTBafzPy22xLqwiPaEq86PWCbCST6eSMRvLd2YIYXyLMRPGmab1r\nVuzWk4m2ARuRid6ryDJg4YU18uBtRSbFm0idR9FHOzIhFfVOza6TenzaUjNWPYui7Iu2rVqddqxK\naNPZYvRZvvaY3d+FAqKQgoTrCdetZ9TxkKSio6ODxsZwisQT9OdJ7V1WeyRbWlIzzup5PvXkpEuW\nwOLFcpuilIjwyoo+tcdHo7LHdMWK1DjKaHQm4696DFpPpBobN8pZfMvKkvtPl6QIUjPtijE//HBm\nD7DYj6KUzocfdhAOh5XPtVmDly9PbdPuTKvpkK++rLZr5FE23Z9JD15HR/J6XG+IdHbSE43id7tp\nW7OGgC+1zmX685O9WUDGbLXZ2Rnhza43aVjRwJXYFbovyY/7jdYunff00QOPJkl7X/vqa9z74r1c\niskukoA3wInHT7Clewv/9Mk/KXVCQZbZBucFuTh+UTe7bQklVHuqicaT73qaFzXTfr49qYRKscGF\ni0pvJX63nyUVS5ISNanXOT4dp/1IO00PJXs5b995O2eHzxKX4jx000P89iu/tbQHzHhP9RCLRumK\nRFi1fXtyvKhJ2CUtzxsyeLJ0r1Fh5p4bRutNy8W7ZtaDlqmPm5mR9rYge1PT9GHp9yJd31bHbnS8\n3vvp5kZvr9lpZ4HR0dFB+MfhovguFPlU5R1ONl0NMoWQ3KgQXsDmZvl1Y6OcXOiZZ1LnSR1/JzyS\nMOONVGew1cuEKwhdWZn8XlOTnJxItKkuwSJIh9o2cfyHH8qvFy9OtTEQkKW5Yq1Ftt2TJ2cITHW1\n3M7Fi3Im2mvXZI+rmlSr4wszxeRCcqZb4QFOF6tbXy//+/znk72qAtq6rz/4QWqbag/s00/r92MX\n7M7qmm27Rh5lLYweIMzVbLJ2oyca5WgiIDvS1cWutWtNnxvp7GTXmUUMxX3A/6fED+YjFrUn2sO7\nV97l3d53WTBPlmOkW7uk5Dbr9rClewtlrjJaDrZweui08tmhRw6xtXurEjtZ7anmxOMnZO+gNyB+\nQBVMM82UJHtEBRF14VK8pBKSbjbcC2MXqPJUMRAbyDhW0Z67xM2kNEkJJdw3/z66L3ZnPDcXTDFF\ndCJKdCJK32gfAAueW0BTfROnBk8xODFTn3R1w2r2PrI3ibj1j/YrcaHHLhwzzJirh0hnhOH4MAvK\nFrBn3Z4UApyOJPoCgZykuenik4sC2cTJpXPD5Dt4Ldv2hTfN6LUVGMVpZupTiyuqv7XPkeqBYIb2\n00Gvb7vmLt376eZGb6+lm6Nc1qhQKBKX5FyYqmLEdSvTNSNZvdGgfpIn5IxHjsgxjnrzZEQc9DLY\nGhFZUYpEHKcmbo2N6dvXHi+SGGmhXmvRr4g9DQRkO0+flsnvyIgc8xmJ6I9PnYxITTiFXRs3pma6\nTZGGRuSswLW1coypyCwskkDJWYDDivT0ySdheFiWQr/zjizd1ZPUiky3Q0PywwGBdDLWbOW2dmd1\nzbZdLUlP8tx3dhLet4/m/fs59WFMV25uNmvn9ewVBfC75eeOTcEg21et0j3GSNLcE40yFL8JuIsa\n7w/zSur9bj8skwnkG197I+Paqdc3VBli19pdnLt6jqP9RxmIDeB3+/G5fGw6vCmJaH2x4YtKDGRP\ntIfohDxed4k8T3W+OianZeJ6V81dbAxt5My3zrCgTCbIFe6KFAIL8PbA26aIKMADNz3AxtBG7gnK\n8QkSEgvKFhD0BU2d787iWbKeVBnk8i/HLhxT5qfCXcFIfIS9f7w3Ze7Hp8aVv++sudPSfuiJ9nDs\nwjH6x/oV6W+2sl2rKPoHUxlupnWvUenkjPnKdFqo9s3ALplomepvr+azc8iZeNuBzwLNEG4M59Zf\nprkzIzlVH/OkzvHp5ka910S5mFmWt+aCcDic/4zZDvKK61am6yB3mEmIZAQzSZYgc/tWbdAmW9q4\nUY4FBTlxUW+v7Fl89135f23bahlpeTm43XKM6Nq1chZgMwmx9OS8IyPJ86H2ytbXy0RVHGv08N9o\nTtNJX83IYos52Ve69Q/v26d4+xacX0r/f1rryPINEI3FiHR1sX3VKkOJrnFSmv0c6O2lxjvEO48/\nSajSXHmT7OyMZiX/TZGX9skZYn0un1J7c0HZAvrHZBnv8prlSrKjo58eZXRqNKm9Om8dlyfkxEae\nEg/1ZfUsqVhCmbuMrv4uw9qdVlHhrmBscowppvhc4HN0bezi37/+7/nV735lKlZVXQ/UDGpjbtxV\nNUwjJZHmSnclVyevUu4qZ2J6QmlTTx79wEsPKPLpjaGN7F2/13T/QqIb9AVZVr2MKm+Vsl4V7goe\nmP8Au9fttkVCWyhpuW2wW9+X7yQwdrYfAX6DXBPzHuS4zEIu0TpksrkCOc5U3bcYpzbxUi7ur0Sb\nHzTB/34I/j6gGW6YzJJT9TFBZMJs1jb1Xmsx0ZcDBybhJDBykBFWazEZeZfNeNuELFVPVivaNOO9\n1h6TqW9tsiW1N1XtWbz3Xti0KZXkqL2lLtdMXdIXXzSfEEst521slPsQHsHly2Ui+vrrHYBMiJcv\nn+lTT4YqxhyPy+eKNsQcpJPvZpLFChmyGFtDgxwDa3fiomyRbo+ovX1v/C+rkuZ3rtYZzRcCPh+7\n1q5NGytqnJRmDa1Ll3L2W3+SVyIq2xlgs3uzZbKg9q5VeCoUb2mVR5ZWNAWbeKNlxtMqvKcHeg8Q\nm0omlivqVrAiuEJ5HZfiSu3R9r5224goyF5GQTp7hnvYdHgT//jJP6YlosJ768JFtVdH828A1xRc\n8U1yMXYJCYn5vvmsu2UdG0Mbea/1PYK+INemrilEtCnYxLHOYwSeCVD/bL1Sb7XWV6t8viO8w9J4\nhSfbVeJSarl6XV7qffWMTI4oZWTsgNbjqk3WVHTI4OWzfI3Kt6fIzvZFOZZBZFJYaE+rSEqkJaKg\nm3ip47sd2fUjvJlx6NoIf3gI9gR0hmtGcqo+ptHE8Wqo95peX4VOBpRjf9f77/eNAIeMOgCsyTnN\nZCpWy1IXLEiWrGbbr5m+t26Fz3xG7nPPnmQSIyS/FRWybQcOyMmP1P2qZaTehFzH74c//EP5bzOx\njm1tMiHauHEmM68gVSIzcDzhzBgago8+kiXERqRcjLm9XfbSqrMLRyIzNVmHhuSMxOq5zCSLVcdk\nAoyNzWT0tTsLda4ZetWy3GgsppCkQ488Qmi+L2l+r8cs2vmGkaTZDJGdSCvNSgAAIABJREFUbaiJ\n9DPhZxTSoSfjDfgCScdrycnN/pvZvW63Isk1g3mueay+ebUlmxtrG5VsvgAT0xMc6D3ApfFLac5C\niXudYiqjJNhTMlPIuCSRbLfCVcHl2GUuxi5y9NOj/OzBnxGqDHFv/b2ATMY3hjayvGY5/df6GYoP\nMRAb4Ladt9G8v5mfr/p50j6JdEa4+bmbqd1Ryy2/vIWHX37YMHu1IITqBE/eUi9N9XK605wktJob\nWjOyXCNpeoQIYcI000x0rmoX7ZKwFqJ91QNcVmAt5s8O4iTGsjW5rQgQDkDzLhhSZ9GtyLIfVWZe\nyQtDAQP+aIboq4+xmuE3U1+FlmAXg+TbwazCkek6AOQYx4TiMUnaqod0mYqF5PONN+TamS4XTKke\n8mcjIzXbt7a9JUtkchWLyUmUFi2aqeupltHq1ScVsaMnTsCbb8rJk7KRLGslsJs2yfZXVclxoiJJ\nU7psvNoxizbE66VLk+uzGrWjB9G22w2Tk8lJqeyWu+aaoVcty21dulQ3CY+TRfvGgJEEU1tr1MgL\nppYDD00MseTXS5RERS2hFl5a/xLRWJTvd3yf3577LZNMGtpS5a6ieXEzn1z7hH++9M+mMukuLF/I\nlxq+xEtnX+Lq1FXlfbVs1wpEEiQ1Kj2V3F1zt5JRGOQswFWeqqTMwd5SL2sa1nB66DQfj3xMtbea\ndbes48AnB5KOExDzI6CWd6shJL56CYrWvbKO9vPtrKhbwauPvgrYIKENkyQ3jD6XWfJtJE0PE+Zo\norFWWtnlaBfziyjwPeQ70mewRqjC2Ccz1bQV3mWx6UyJiVTS5qFD8FSgSLOu5kPinW5u8i0pd1Aw\nODJdBzkhplKe6eTlSEI6b5vw4on21ERUSFbVMCMjNVtbU5t8qKFBJtiDg3K5mO5u+XVNzYyUF+T3\n/uAPkj12PT3y8bEY/Of/rC8XNePp03pyhf0nT+onadKbA+2Y6+uTPamiPqu2fqoZiLZ///vMSamM\noPVYGkGsdUWFvCYpXvIM7ZhJwpOvxEvFDrNrkOs5xQIjCaZafnvHzjt0vXORzggtB1s43n+cR//x\nUX7Q9QO+cPMXANkr+Ez4GeWY2FSMap8shS01+Lkcnhxm59mdHLtwjInpCcOapWpcGrvEL373C4WI\nllJKCSWMTI5YIqLeUi8uXFR4KlISGl2NX6W+rD7pPQkpiWCWUKJ4ZD8e+ZhJaZLLscu8+NGLynEl\nlFDpqkxqA2a8iu8Pvq98VumRj6twVzA4Pkg0FtVNULR77W5al7by6qOvJtWNzUlCq5EbmmnTyHvq\nTzTWRBPbb4RqgTbLMi03FwD2IpdUsboF7MyiqmnLctNqD98dpA5e5YWsDqRxLGsnsNCy2XxIvNN5\nP53kQzc8HDJ6AyGdrl4QmhUr5DIv6ZA2ji9x9RblS1askMmaWrKqhhkZqZrMpeu7p2fGQ7h48QzJ\ngxmiFgzKEt3PfU72Bgpcvpws7dSSZD3iaUaurG1H2P/hhx3KODLNgXbM//RPchxrezt873uwe7d8\n/nvvWSdiou1QKPl/K0ROlA050NtLpKvL8Li2ttSMxlbaUctyjSSj2WbRNhNzkqvMOJ8wuwa5nlMI\nmFkLPRIR6Yxw/OJx5Zj+8X5WvrgyRYYpyFHvaC/HLshxizXeGoUcbe3eyq4zuxQCdTl2GW+pl+ZF\nzdT56nTtUdchlZCSCKm31JtynjbudJrplFqmWohYUYEFZQuYVzpPKdUyzXTScUKyrO67wi3rC++u\nvZsGf4NCwpuCTUp8rd/l5w/ny3EJFWcr+PBbH3L/TfcDUOutZWB8gOb9zZy6ckrJWtzgb5BjT594\nLyX+U2+t8hK/mcUNrZE0vY02WmnlEIcIFMndsa1xcVpyY7NM0rbmzJAwO4mMpi2jpg3XQi037id1\n8GalzdoJ/I3q9fczjiJ35EPinY7Z59ifEzM69+GQ0RsU2htrQWjUJUuygbbsy6uvwiuvyLJfM0RL\nCyt1KdXH7tiRHLv5yCOyRzEelyWx7e1yHCvoeya1BFGPeJqxzYynziqB0nqxtYSy0B5BMx5LkO26\nVw5L052zTO3MduyimYcPswWza5DrOcUCPRLRE+1JksjWeGto8DekeOUEOVInN1LHmfZEe1LkqRPT\nExy/cJzLscsZbavz1Slti3MrPBVK/GaVu8ro1LSYkqZwIceYLq9ezgff+ACvSw5s97v8rKiTky5N\nSpMsLF+ozM3bX3+bBn8DzYua+R+t/4MlFUuodFfy+drP82z4WWUe/+Xxf2Fh+UJOfeMUe9fvpXVp\nK7/+0q8JVYbYvU72ZHpcHoXA/27od8r8vd/6PnvX7yVUGUqJ/zQifEbxmlkjixtaI1IcIMAudhUN\nEc0WhlxOS3bs9C7a2ZwZVmsncdK0Zdj0T4GbgVrkTLxictsAobrKJSGQdgLVz660z6wK7TXNFo73\n00EaODGjNyhyjd8rFLSlPdKVIUlbBiScHCfa1CQnONqyRc5Au2VL+nhQbVmYUMh62ZlIZyc90Sh+\nt5u2NWuyJlXr1slkesWK3B8e2AEzZUOUY9PMmZV2ZgOFjke1UnJHPXdbu3+YEqNndM7KF16gwe+n\nyuvNaU9mHItO3KDdEGVDAALeACceP8EPun7Agd4DNAWbFDIk4kWfvv9ptnRvSYkpVLejhiijUuGu\nYGRyRDdO01fqo7GuUSl9ArIEt8JTwXB8GID6efU01jVy6PyhrMcq4jY/8+vPcPbqWWq8NdxddzdH\nPz2qWyJFPf/DE8NKHKle6ZZ0qN1Rq9Qi9ZZ6kSSJB296kL3r97K1eys90R48Lg/l7nJ2hHekr+1r\nEK/pIA0yxSRqEMYg5lEboyfatimAUShLc26uWGMJw8xMLCRPrt7g1cdbLb0SYKb0TCNwhOR5sNq2\nAwd5RLYxow4ZvUExVxO9WCHRt98ux4N6PHDnnfJ5K1bIEt4dO5LHrD72rbdmysAIPPywfpIhK4TB\nTAIeM8il/utsoJjrmFpBoec92wdGVm7y7dqTdtqULaKxKN/r+B4llPBM+Jkk4mklMU40FuW252/j\nUiw1q+3C8oW89tXX2NK9hafvf5rNr23m8PnDSbJbb4mXCSl9EqPWpa0c6z9G32if7ueC+LpL3JRQ\nQlyKU0op00wnEevAM4Gk2E4XLiXZUlJCHtX8L5i3gP7x/hTSauaBgUg8JAi5ejwXRy8qfQR9Qe6t\nvzftg4ekmqOBZVR5qkw9qFDs/Fc/bYfaCHgCpojZdYEwloiHIZczyRYtcl/7YRurtRliYkG/NqnR\n8ZlItdGEp5uHYiXsDm5IOAmMHGSEWlc/VxO9pKupqUV//0yd0N/9bkY2rCcZVh/78MOpbRklGTIr\n3YxE4OTbsixyRU2Q705PZxipcTstLXLc5VxBMctbBczEnGQbj5otrEjUk84zUdJi5tjCSHWt2GQ2\n/kcr8wz4Auxdv5eX1r+kEJpsYhO3dm9VPJ4BT0CJwWysbeS9J95TSsSEKkO8suEV+v/nfhbMm8mI\nNiFN4CtN9jD7XX68pbKkNugL0jfSx7X4tZS+Xbion1evJAKalCaJS3FKKOG3X/mtInfd2r2V8L4w\no5OjyrkSkkJESynl4thFRf6qnv83vvaGbl1PvURD6rWIdEYYi4+xYN4C7gneo7zfWNvI9lXblT7c\nJW4GYgMc6D3A9zuMA9yEfHdZYJki/TVTY1SxM3CAyJLIDVMOoqOjw7L+1VAZaVLaOqsVNyJAC1AM\nv3UaKWzH5g7YiGxfJiIK5iWqRhOebr0c+asTM3odwCGjNyjsuLGOROSSMEY1RK20YzYxjPBYDg3J\n0tp08CRK7Pn98Prr6cerPva111I/NyLvgjAEg3LGXqMx9PTA4NNr4K2lLH7pESpEEVOLmAvETots\nSdWNjmwfGBnF6OkfmzkplB2wYpNZ6JEnu9oVUlRXqUshprdW3qpre8AX4INvfpCUtOgLN3+BCneF\nEuM5OjXKxPQEC8sXsqx6GccuHkuKSy2lFG+JlymmuDR+SZH0CkhItPxTC8cvHOfm527m7//17zna\nf5S4FNcdwzTTHP30KEt/vTSpNujymuU8sPcBZXyCSELmBwY90R6OXTxG/3g/H418RJ2vjvnz5rN3\n/d6kmq4VnpkijOmSMokHBerYXTM1RhU7B5vY/svttsU5zglYJB65hlPaHEpqDcVUe1Jty0rgfwMm\nMF+GRm8h9GI9s5nwbBdZ9L8IeJjijzl1cF3Dkek6yBraOMzW1plkP1YkmVbkiFbkxefOyV7O115L\nld1mOtastFRIN/v6UmW86jbicTnOM1dZ9FyUV881WbGDuQEh81TLVu3Aol8uone0FxcuqrxVDE4M\npu1DyEaPXTimENeNoY109HUkEU7RxqbDm9LGpILsPR2IDdgyHpCTKt1Xf19SvKjAgrIFfKbyM3w4\n8jCXx12UuV00Bd9l97pnk+S77w++z0BsgKZgE75Sn2Hc6brfrqO9r53G2kaOPHYk47pYlVIrx9+9\nncAfB4pPwnkdYVZVssUkP1Xb4gPEVyiXGM0wqZLrQk64un8BJ+bUQY5wYkYdFByCGMFMMp2WFutx\nblYIVqGIjZYgZyLZ6jEsXy6T25MnZ0rNbNwIXm/udjvEzoEDGXokJptESdpzHv3HRzl2YYawLSxf\nyHtPvGfYljoeE1BI2Gef/ywDsQFKKCHoC/LPX/9nfvLOTzg1eIrXL7ye5DWs9lQzzz2PC2MX8JR4\nuLv2bj659gnjU+NJXtJqT7VCcF24eGD+Axy7eIwabw0dj3Vwz4v3pCRVUkPEi+rjPwDLEn+/RdD3\nEvfW38twfFiZD5G19/TQaYWYakm60br85txviE3FuCd4T1JyJQcOdJGOmBU6mFVtyybsIcmzTbZF\n/9XA0Cza4eC6gkNGHWRER0cH4XDYtvaiUbnOZUmJXJs0EMjOc5cPgpVr0hztODKRbPUY1MeC/lzY\nvRYOcoOzHsWDXNYim0RJNz93M/1jMkHbGNrIxNSE4rk08u7dvvN2+kf78ZR6uLPmTo72H6WxtpFb\nK2+lylvFuavnoARe739d8XZqk/zMc83jzsCdvH35bUD2Xs6UjvkOMB9vaQkT039HY+0ybq28lb99\n8G/Z/NpmTlw+wesbX6faW51E/ESCIXeJm3vq7qF7oJtKTyVX41dpCjaxZ90e7nvpPi6OX1TGIhIj\nVXr+iqvxxcCHlLv+gWtTA3AaPLfLHtsVdSvwu/yKR9Tv9rOidgVV3vSJhyKdEXad2ZXkJc41iVUh\nsjPPBtKNy7lGqRBm9jLIRqGjpYPw3nBuxG22kzOJ/p8GtsyiHTbA+W4UD7Ilo+7MhzhwoI9AQE4G\npEZb2wwp27rVHCEU8at2QsRWAtxxB3zwgcW4u7Zkgpwp7lE9BnFsYyPceusMUXeQDLtK3djd1lzE\n9XpzbhVWEiUJxKZmMuGWUELbmja+3/F9JCSlPIl2fvtH+xVy9bvh39G6tFUhhHqZa4U9mw5vAmQZ\n7rLqZZweOq3YG/AFaD/fnrBkPrCMiWnwlf4Re9f/OaFKOX7glZ5r4P88PFoNibhLYZ+n1EOoIsRC\n/0LKPGVsDG3kZw/+LKmEzelvnubbr36bfxn4FyamJrgycQWAVQv+FW/pg0h0cG2ykfbz7cxzzWNc\nGgdgccViJqYmFHt9Lp/iMY10RQzJpbZuqzpONVuIeOFMfZvCbKaM1fRt67iuZ8xmMGsA+DG57xMR\n6zlbUPevZ8esp1J2cCPB8Yw6yBtms5apWkJsR//RKKxcCQ0NcmZdM3GksymlnQvlVOwsK1KoEiXF\nCqdmo4wnjzzJ/o/30xhsZPfazFLQSGeE3Wd3E52Icnft3Rx97KjuOdr5PXL+CAOxAfwuP6e+cUom\nigmoY1n3rNuTRASFhLVvpE/xMAoZMMAdO++gf7yfEv4EiTuBD4Gf4Sudxu/2c89H0zR8NMS5OvDX\nLaDtzz9IIcDqeNNMe8Eo7lbYOTg+SHtfu/I5oHhiRexrpphd0UfAG+DBmx7kV1/6Vc4PS2yNFw4z\ne142Td/N30uM61oTh/YfurHK1ljBbHsVC4XZJIRhnPqlDizDKe3ioOgwm1lU29pgwQL7+g8E5Pqk\nx45lzmRb6BIgetmI50LWXTvLihSqREmx4sywXPOoylPF0/dnqHl0HePc1XMMxAZoP99uukxIdEL+\n0iypXGJIaoTHNegL0netjztr76TB36AQUVFmZtEvF3EldoUF8xawZ90epQyMKMVy15676BvpS/KI\ninjUgC/Alxd9maAvSKVnF/AW8DNcTBCbjjE4MUh7wxAHPgdHl8GBYL8yRrVH2OuSM3Wb2QtGWY5F\nptvd63Ynfa4ulWM2Q7I47sNvfcgrG16xxWtva3Zmu7xsetlRLfatjGv/IQLtgeLIJFuMyDVNsBlk\ns552tzGbGYVnNZWygxsNDhm9gVDoWkyzWcs0EJCluXb2b7aMixnYuRZ6xFPYWlEhJ1F68knz5XMK\nBTvLiuTa1lyvUyY8c8PxYbZ0Z6h5VOTIZS30ZLqRzk7C+/bRvH8/0VgsqT6pp9SjHL8jvMOwXW1N\nzKOfHuWhBQ8p8y7klb2jvXRf6qZ/vJ8t3VuUvvac3SN/fq2XYxePMRAboMHfkEKmBJkejvcDf0+N\ndx4P3zxT+Lix+i4+H5+fNMZIZ4Th+DBlrjJcJS6l9qiZvaAml9q6rQAnjp8wrNNqtoar3nERIoQJ\n00wz0Szu0rOpH2sIu+o09kBkUYTwHWGa/25mDs32HTkZoeVgCyMTI7pEYK5fo2YN2RLCNERQWYtM\nbedKJmfzQckcql/qfDfmPpyYUQd5Qz5iQfPdfzp5q4gjVZdxiURmd4yg74Fua4PbboNLl+SSMsEg\nDCQqRRSDzQABn88WOa06XtTo/es9jtRqrcZigzomc7N7c9bttK1pS8nk2hONKhLuSFcXF0dn4vJa\nQi1J8Z5aW0T8rSA/zfubgdR5FiS4ylPFcHxY+bzlYEtSpl0XLqaYAuC++vvY2r11Jsts/T0KOS53\nlVPuKefNr71Jtbc6KYZVHsfMGEVZGYDuS91KX1obM8UVFzJesYcejiY0gBEi7CqEBtBI8mhX7J4f\neub3cHSZhTlU9Z00//82wi7frjkvQy2KsENBCIVBZtfaDBFM13YEOJn4uzFNG+nQhj1y5GzmwMz3\noigW2MH1AMczegPByTaWGenkrYLcVsn3/TnJf+1cCz0PdCAg2wfy/42NM3/nIlnWkwTPNgTZONDb\nS6SrK+P7epjr3w3huVte83/ScrBT8QLOFYgb8QO9B/hF6S+ybkfPY6aVcKu9p8+En0k5Xm2LVupr\nJA8V75984iRLKpfgc/nYdHiTQi5X1K1gY2ij4uVcUbeCZ8LP0BPtoX+sX5bgnm+nwlNBva+ea1PX\nuDh+kS3dWwj4Ary0/iX2rt+bIpWVxzdDhEFOENQSakmxMd241O2oSWy+vhf+xJ1+E01st1EDqOfd\nVZBvyWMb+OusJ9ASSJr/NdtTZKhz8Ro1mypTBdl6F9N4BpW1SNd2D5Ao7catqW0YIcmJaZccOV+S\n26JY4Ln53XCQDIeMOnCggpk419mUH+vBKEZVbefu3fbYnG0saj5JrFG86I0URyoIyrmrY6YJeDEh\nmyy4WhgREbWEe2t3N8PxP2JB2U/Ys04/flFri7pdIIkIis82Hd7E9lXbCVWGWFy+mGMXjnGg9wAV\nngpal7by6qOvsnf9XvZ+ea/yemv3Vk5eOan0KwhqU32T4VzojVEQ4eZFzfhKfXx49UNG4iNpx1Xm\nKjNsx5Y4zAxoo41WWjnEIQI2ulPSEu7EtT3ypxHC/5MBYc0FAWj78yzmMME+2ra10bqwMPNfKBQk\n7DCTBDVbuakZIpiubfXgnzHfbV74Xb4kt05cqYM5BMlBceDIkSOzbULRY3BQklpb5f/zibm6Fhs2\nSBJIUlOTtTlavVo+D+T51eKpp+RjNmywPveD4+NS66FD0uD4uKn39TBX10OLDa+8IrFtm9T0wgum\nxm0WTx19Slr98mppwysbpMFx+78cg+ODUuuhVmlwfDCrtXjq6FGp+r//J4ltfyyxrUxqPaSzySRJ\nWv3yyxLbtkls2ya1HjqU0Rb5nNUS25DYRkq76s8W/GKBNDg+KG14ZYPENqSmF5qS5krM4cJfLpQe\neukhqeaZGuVcz3aPtPY3a6XB8cGU/o36S2cL25CCO4JJ66VuN107aphdi3zvD7MwmntJkiRpUJKk\nVkla/YK5sRcMqyVJIvEvjTlz8RqVmHIprztitWRq/uyEqbXIcvAbJHkoQUmSHkq8nr1vVAYUZIEz\nYy5+N65XAFmVT3E8ow4cqKD1MmqTnxjB7HFzAem8mNl6hTN5nHPJ/itiT7UxoQGfj4DXS8vBg7O6\nLrkma7ECO5NCqZFJ4pkOZsafa0KanmiUofhNwF3UeH9o6F0V3vKg7xp9I/9F1ztmJIPV81SKzwD6\nx+TstkYeRiXJUSKJ0eCE0PB9h7j0Q9r7bufzL/zhTBIbXfv1bYl0dnLyypeBPwbKKHeVMxAbSFov\n9bjs8ESrkWl/pJXP2oi03t2Ep8s/z96x54zr2LtUiKS3RTt/WQ5eODGXAceYdQVsehRkgR3cCHDq\njDpwkAZm61darXNZzHVA81EfNlPtVVEXtqnJXvlzMdQfDRNWkrW00lqYZC02I5e6joUYf/P+/Rzo\n7aXGO8Q7jz9JqHK+7nHRWIxIVxd9I/+FYxfbZZsy1OKMxqKsfHElDf4GqjxVScl/orEod+y6g/6x\nftP1Nqs91QzFh2isbeSTa59wOfYk8q0n1Pk+4nLsbwztEvU/1QmXIHmfN/j7WF5zjPbz7YY2GbWj\nRaakR9qxGfU3m3VwtWOA5ARQZseYN0S5MWpm5gvX6fw1IxPRJuZEQlsHDgCnzqgDB3mB2bhDq/GJ\nxVwHNB/1YTPVXs1XHK6VdTHybufq9c5XspZCIpd4wkKMX3iEz37rTwyJKMx40au8MyVdtHGhep5S\ndRyo2vMX8AX44BsfWKq3+e4T79K6tJUjjx3hvvr7gAkAyl2XkKTnkuxKtV/fg6ze5++3/gW71ybX\nB9WOL5MnWniz90T3KB7PO3bdYejVzLQ/7PbEWoHWa5tczqaTXWcWcbT/Lg70dlj2+tuCOeRdKqTK\nwzTm0PxZwRyqrOLAQc5wyOgNBKcWk3UYyR61Ular8sixsQ7AXsJnF2YjQVMmspotzK5LR0eHLVl5\ndW3IU7KWdLBbFrm1+yQXR7/DpsOvWybkVsefzXXKSKptaJOGPGWTaXamb2v1NkOVIeX4+rI/w1sS\nAIa5NvV/c2Wil4XlCy2TfrHPl9ccouXgeiWh0kx5m9TxpSMWovTK4KlB5T0hQ9bD1u6tXBy9yKbD\nm3T3WyGTI2mRbu3MyruLAcXw+y32xQEOECle8WjeUYi1uE45dl5QDN8NB7nBDjL6FeBfgd8B/6sN\n7TlwUBCY8XgZ3eRqPZtWb4Z/9CN7CF8+stRmGzdbjLCyLvnKyhsgwC52FYyIQm4xnvrtZSbkRgR4\nNsafyTYrcaGQPzJ17uoYE9IioAp4HK/XS+iJEJt8myx5nsQ+P3f1A9111xtfOmJxpvMM7IMyqYz6\nefUp52qRab/lGhOcDnrXJ/V6/3zVzw3XTny3ZXn3n183WWwtIVM2WhWuB5WHAwcOig+5xoy6gNPA\nWuA88M/At4APVMc4MaMOihK5xBPaFeOYa+yoEt/5nU58oSh+l5t7Tqxh97O+6yruMtLZSU80it/t\npm3NGluT8wiIeMLtq1YltW/0vlUUYgwCucR46rcnx2Q2BYOGXmYzcYGzFZ+nZ1uECD304MfPz2M/\nZ0vXlowxlEbIdlxiXuFDqj3PcvsTi+mu7JbtzCK+Vl73Oircn+GBm1aye+16Aj5fUozo1u6t9ER7\neN/9PgNrBmjyNaV4rR/e9zDH+o8BsDG0Ea/LmzbGMt1+iyCXq/AjSw/tXnG965PZGFW7vttzGmFI\nhHTLutA0Wy5KlAgRtrN9Vh8u2YJ8b0wHDm5AzFbM6H3A74GPgDjwPLAxxzYdOCgIsvF4CU9kPA4t\nLbNXt1NAxHdWfCZKbHE/g7f00n5Tl61xqHbEXeYKtWfutuefz4uXNl1WXitebyPkKve1Ars9eWbk\nzmbiAtN50PKZcTWTZ3CLb0uO2Xyz80S3rVlDS2ghG0Pn+GjTKWora2U7s/Q8ta1pI+j7HCOTDbSf\n71f2mdozKWwd6B1gYddCXfl0lbtKtiPYxI7wjqS50Rtruv2Wl7qJKuhdn8zGqNr13Z7TsJCNdrZV\nDhacuJmPz/fGdODAgWnkSkZvAT5Rve5NvOegCOHo6pORTRkMQR7b28HjyZ6IirXINVmQiO98YKV8\nQ8aHQRrfX6XbVrZE0TBuVqe9fBEuccNZClyKxWxvvxDfjVzlvlZgtyzSzE27GQKcPn4vQXIO209U\n9WyzU3J4ZvhB4D9Q5fkrnr7/v5k+L+Dz8dL6Zvaulz2M2cYXi/nZdHgTjcE7AON9pl6D91a9l9SP\n+E7HpX/HxtC3+OuKv072cnZGOHnlJAAr6lZQ5nqK8L59bDr8OttXPaefvCjxf7aVNzKtvd71aTZj\nVLOBmcRAuV6jDOdxDmXKEfyxJQLSzUAtsA5DZpqWb+awMefqvZRVMj9XMFfXw8EMciWjjv7WgW3I\nR/xjOmTzVNyOTLORCPzpn8rj/PnPc4sdFfGdu9evoWXhUjaee4Qjr+hLdLMlioZxszrt2UW4tES3\nbc0a6n0+phOf13i9thO6fMfG5qsGaLHADAFORxIESbqt+jZjopplDKyebXYmlgpVNgHLGI4vZkv3\nyazbydbzpJ6fcvfzafdZWi9m4jvdfr4fr+vfUOGtSCIxp66cUuqjLq5YzLmrYxmvKblynczxqKnX\np3zGqOYDdiUG0pJaNfk4ZTSPhcqUYwMTEvzxnh6o6QcGgXYMPZusTotNAAAgAElEQVRp+eYcIuF2\nwXEGOyhWuHM8/zywSPV6EbJ3NAnf+973uPXWWwEIBAI0NjYSDoeBmScazuv8vw6Hw0Vlj/a17HWU\nX0ciYXbt0j/+pz+FkZEwfj9s3txBRUXh7N28uYPRUdi7N0wgkF17b74J774b5t134cknO/jxj2Hr\nVnn8Y2Md/OhH8Oij1u17qXktHf4OTpzQ/9zvdsPp09xWXc32J5/MeT702tvsdjN69Sp7n3ySgM+X\ndfs9w8NyHNjp07R88AEdf/7nNNXXc+DwYSo8Ht75q79S2tfbD22lpfREo4ydOsWPVq7k0S9/OWP/\nP963j6OJAqsRr5dda9faun8CPh+b3W5OHD9eFN+32Xh94vgJNrs3KyRB/XnbmjZa/lsL//GB/6jE\nNY6dGuNHK380Q1T7b+O7t34XgZzWgwCbOzZzghPJ15d3f8rIkhH8bj+b3Zup8FZkbK/KMw+Aeb//\nZ85cfpboqiYCvkDB5lc9P3906yYeTcR1Gx0vYii1n4+dOgWXLtH00ENsX7WKE8eP82bXm7xb/y4A\nNR/WwAQ0PSTLd9f/1/836XjD/nIY39ipMaiVPbnfnf4uHR0dRbOfbVu/cGL9Om7ju3xXJm2a49tK\nS/nTn/wEn8vFwT/7M93r65sdb/Iu70JYJqY9HZt5Vz6ABW4/nE487Hlye+HH2wMdid/3cCQMBr/v\n6V5v7uhgFLjbn/icDvgMhLenP35vOExA+3kAOjZ3gMHv5fX4eizxuikcZnsR2OO8nvuvT5w4QTTh\nPfroo4+YLbiBM8CtgBc4AdyhOUZy4MAMNmyQJJCkpiZJGhw0Pm71avk4kKTW1oKZZxv0xlmIMQ2O\nj0uthw5Jg+PjkiRJ0lNPyf1u2JB+vtVQn/PRheT2ssVTR49Kq19+WdrwyitKWxteeUVi2zap6YUX\nlPcGx8elJW1t0kN79yYdq8zdd45Kwf9Lbuehl16S2LZNYts2qfXQIVN26PU528hmjSRJf07nEla/\nvFpiGxLbkFoPtUqD44PK/4Xu2wwGx8el+h1/KbGtzNJ5dsGu+dFeIyRJkja8skFiG1LTC03SR8Mf\nJfWjd7zdsDK2p44+Ja1+ebW04ZUNGY+3cqxZPCVJ0mpJkjZIkmSlxUFpUGqVWqXBNGetfvnljNe0\nDdIGCQmpSWqSBqVBaf9TknRktSS9tkGSPrpQuO+QgXGShCRJTZK1ydHDoCRJLZIkbcy9rWzXbC5i\nUJKkVun6H6eD2QOzqJjdgJxR9/fAX+h8Pttz4yCBI0eOzLYJaTE4KBOxTDfdZklrIWHl5n9wUJJW\nrz6SZPtsjCkbApwtaU43P3o3WUY3uXrHirmr+OuZzxY8+6wlYnnkyJGMN9azQfCyne9MN67FTFaP\nHDmSRIDM3jzbRS6s9v2U9JS0WlotBV8JWra52CF/L2aZxFiAlQcJ2Tx0yNi/JPMtJPmm307c9zd/\nk/GapiW18XwaZBVFyoRWSzNTtEQyR0yL/V7qRoOzHsUDsiSjucp0QZafH7ChHQc3OET8Yya0tclx\nl9u355bJ1k6IWCuASFeXbvkTdWmP//iX7iTb8zUmbTmRrd3dymtP9RrAZyn+NduYWfX83LFzJx98\n85tKjJdenKmIA0vpX+dYMXeDK92098uf7Vm3ji3d3ZZKNhj1qTcGozW2G34/8J1OKj4TZXClm2jM\nXEmYTLG7hRpLtiVP2ta0KaVIzJ4jYgsBIl0Rw3Iedvct4v1Yg5yddtXcSJpjFiL+ci7AbBZdq8ea\n7j/xf7bJmtLhRytX8ovS0rTXNBFzLODOp0FWIWJTZxERUqu5qKfIx0yVmwizbq4DBzcMcq0zagYJ\nsuzAwfULc3UYC1+vU9vnxdFR5XXLwqV4dqy1RICj0exI80w9RRRbxPijsRgrX3iBBr+fKq83pQan\nmlD/fNUqQ5JptmZgtvU+zaxxLn3onRONwme37WOgxtq+yTQXVsaSC8zWe8wFYt7eH3yXgdjf0BS8\nM29ZVHVrbNLMAQ7QRGq9zmLBbNV3LbQ9Sj3VF7YT+CCQtoakuvaqbf0jk5jt+l0WHkVn0OwiTGpJ\nVfUUbUL2rDRxQ+U1cuDANmRbZ9Qhow4ckD1BETBDhApFANL1uenw4YLbAPL83LFzJ/3j47p9pyPq\nVkl8prXM9qGAdo3T9ZNNH0bn5GPfmCXuuaJ5fzMHeg/QFGyyjSBqiUzLwU5l3haWX+C9J36YN7Kl\nR67lrKURtrPdNBFVj6G+rJ5zV8/llSgW4qFAUdkTJpV1FBDqa0N92W84d/UDy+ub629SwaHndiwy\nNJOebDrcPRlzYEkdFBmyJaOl9pvioFghMmE5SEWu9THN1WGcKe1x4vhxoPDlROwqL2LV7oDPxwff\n/KZh3+lkpVbLxWRaS732zHw3tGucrp9sStwYnWPHmmlrDGZT1igbZFPvMdNaaEt9qOftvSe25tXr\npyftzKYci3oM+z/en1PZmmztBjKW28jXb0Y+JLLJHST+TyNPzee1V31tOPBxbVbrq72+FP3vd4Hq\nhmTYsmnRRvpqLmar3Dza0ZG1DbMFo3lLN59GS5rLGuQDRf/dcJARdsSMOnAw52FXfcx00ItJzHfs\nnrbPdHGRVp7EZ2N3ur7b1qwx9NSl+0wPmdYyU3tm5yFdP6KPMpeLloMHTc2pkV2ZYlnN2G1XPKVV\n5CPeMJXIlOXk5bUiGa0v+zOCvtUEfHcAZVmOIHkMAV+A9vPt+SNmpImDFXebkPcgudt3/oT+0Wk8\npXD40f+HCu9fWpbIml6rNjK6uPJ57VVfGwLeV2jvs068tdcX8QCzaFGg+NRctmymsFWznsBekEvm\nZGGDGeTikTQ612je0s2n0ZIW8LLh4AaBI9N14IDCyRa1mA3pLugTFyvSUqt2F1Jylutamp0HM/3k\nM05YO6ctBw8a9pUPuexswe5YPyuSUbvWUz0GwPbYRdPIpFtMA6txn4Fn/g+G4jcBspz6k2//yLK5\nucp71TbHpX9H+/l+e669Ggbw5DtHOPDxx3y+ro5/WH0fW7r/2PL6ztZvUtYokMY1hy2bEWHMqbvz\nZkNiH73hh6+0wVDAuso8jP4YjGxONxajJc3nGjiY28hWput4Rh04wLz3yS4IIuFxudgYCrEjHLbt\nhsMM8dPzCljxDut58dJ75rL3Qoh2zwwPE6qspMrjSUtoc11Ls/Ngpp98ety1c5reUzvjGdvafXJu\nxaJpYLe31VoGVnvWUzuGgDdAy8GWwicYMuFBNIJVb7snERTkd13kta/+W8umQu7yXrXNG0MBWpf+\nG3vInsZVdO47V7kUi9He18eW7pNZ7ddC/ybljAJly81hy2aEWedu3mxI7KMHgG0R+Oku605mozGo\nbd7KzLOTnwNb0B+L0ZLmcw0c3JhwYkZvIBS7rj4SgXAYmpvlrK3XM97s6uJofz/t58/jdblSssfm\nEstkJv5Vt0SKhdjErd3ddH36KUt//WvW/fa3RGMxUzGUQZ+PvpERS2MT7faOjnLswoWs43rTQf3d\nEPOwvKaGloMHc4opsytGVw/aNUzXlyA/AV8g5/jofKPQ1ykzca3iOxmXJDaGQraspzqO99SVUznF\njUaIECZMM81ErURxZQiSS7cWZomhGOedtcdp8Pdx6hsRQpXzzduoQjYxyEY27wj/nX1x0xoGkI+H\nUMX++20r0gQlmo3rzNSOHjLFlAqc6Ogwb4MVJPbRZBMc2J6d19FoDOp5U8eCbsHCfOq0VQy4ob4b\n1ykcz6iDokFPDxxNPF2ORMzVHJ2r8LlcgP7NipEXMdtYRr3z9DybVuJJe6JR+sfGAGjv60vyzAV9\nPo729VG7Ywf3BIPsXrdO6a9vZIRjFy+mjC0dRLtVHg/D8Xhe43phZh7UksxsY8ry6d3QW0Mr85nv\necw37CoPYsbTqv5Oti5daguBUXvpFsxbAOTg8RO1TpGJ6S6zLqocgtPM1GONdEbYdWYXQ/EhAFqX\nVmRNRCF3r3ham3MJ1NO4iqzGuWeLCBF66MGPnzbairKsUFZIE5SYaZnUn+/rAbeF4EatJ9DsltA7\nLqvtlNhH7u2wQ3WCmbas9Kd+dlKGzNfPACGgyoq9DhzYBCdm1EHRoLkZDhyApiY4dMhaHcu5hnTx\nQEbxmNnGMtoR56ZtY2RiQqkbWunx8N4TT1Dt9cqE89o1jl24oJzbEgrx0vr1acemhpb4gkwGn77/\nfsMao7lCj7ALW4PXgizb/whVHh9tbcW5L63E5M65WDQDFLJcST5iu9VxvHvW7WFL95asE/q8736f\ngTUDNPks1joNk9cSKOo1qvHWcPZbZ2c1XjntA4wws1oOJhuECSsPIVppNf8QotiRJigxTPplUn/+\ndjOszCG4MVNf6Y4ze65ddljpTx0L2qI6T8CMvU7ZFwd6cEq7OJjzaGuD1tbrn4iCLHO9ODrKpsOH\nUySgRnJLq7GM4tx055mVBOtJQusS7V+Nx9nS3a30e25kJOncielppa/heJwFZWXsWbfO8IZeLSO9\nY+dOQPb4/eSddwznTItIZyc3P/cctTt2sO6VVzIeryddFeuwbP8jHGv3ceCA7LG3ikxzbEeJCSvS\n20KVdck38l4eRIV8yK3VktNQZUiRUVuB8K4O9A6wsGuhNSIKec+AKtaoxlvDO4+/M+uJs7RlgZKQ\n57nIRzkMf8LoJprYns8UtoVGGr1sumWKACcTf68APmNWd6tpI4y8Th6dvvTWUc+mXLdTJju0sNKf\nWmYrzquycD4UrJKPgxsEDhm9gVDsuvpAQJbmXu9EFGZiRvXIQ8DnI+D1psQram+IzZKYdDfSeiRG\nr11tGwGfj/vq64FUkhsqL0/qw1NaqvR17MIF+sfG2NLdrXyu7U8QX4D+8XHFLiuES8iIBycmaD9/\nns8+/3xaMvj2668DsKKuThmLIG1VHnnOmppgexZ3FWq7V77wQsrc2hHDeb1Ib8H8dSqb+EFtvVWz\nyAeBV8fxZgs1IX9v1XvWZZoZbtZz/c0Qa3T2W2cJVYZyassOpH2AkQVxsYJcb9711qKNNlpptf4Q\notiRJigx3TL1AIOJvxcD1VkEN6rXqUKnrx7gaEdH0joKm5YjexqbkRMDmdlORg8pMtmhRbbbV5x3\n0uL5BarkYwrFfm/rIDMcMurAwSxAxIwaJfRRE5T6X/yC+mefZWhigl1r17K1u5vwvn3sOXvWFIlJ\ndyOtJjFlLpdhu3ptGJHcKq9X+ftzgQA7wuGUvtSESUvG2tasYcG8eSnHWiFcakJb7nYzEIsZzlNP\nNMpIPA7A4oqK1DqnOh57K8m21HY3+P0pc2sHkcxnoiQrsMPLaxbZkLkZz1gdn31+W0HszCdyTeiT\n70wkuRLurBMzGaBtTRtLli7B94iPTb5NyW3meS7ycfMeIMAudl1fRDQD0i2Teo53ZNm+uo1ndPrS\nW0dh0zmsJwZSk87PMkNK1f1UAReBTRh71bPdvuK8kMXz8/zsJi3yoTJwMLtwYkYdzAoKWXeyGCHi\n9tQJfdTxnCJGrRSYTpyzsLycT7797aT4TSCnODZhR5nLxcsffcRQgpSBTJSXVVdT5fVaWqNoLMb3\nOzp45/JlxiYniU9Pc08wyD+sXs0Xf/MbGvz+pDb14vH04hqtxDpGYzG+19FBCTAyOUn7+fOG85RN\nPGA4PJNsq7UVAn88s5/ry8o4d/VqSszr9lWr2HT4sKmxzlXks66qFnrxf5mSGok4zQr3XzMy2VAQ\nOx2kwmzyqXzERM5WnGWBynDe0LBjjjO1ke7zbOpvinMqABHg0ppoXy+uc46EM+cVYZz5KFZkGzPq\nkFEHs4JC3rQWM4yIkCAoh3t7uTIxgd/l4tQ3vkGoslI5Z0VdHYsrKkzVKM1E/tXrUe3x8MWGBi6N\njekSZbPQkuZarxcJGJyYSGpTS8bselCh1HItLaXC4+EZg3nKhgxqk221dM6Mtd7n41LC26adt+uJ\neOohH4l+jKCXwChTUqNoLEqkK8Jg7Du0n+8viJ3FgGJ7+Gc2+VQzzRzgAE1YTMyUBvlos1C4UZPG\nzJVxq4mqupZnOpvFOYNAO/pENhuSm2/YvSZW2ivG+XAgw0lg5CAjiklXfz3FuGUDsRZG8kohi/2X\nxx9nYXm5QkTV57z66KPsXb9eOc9IIhnp7GTH6dOKPHTZzp0p0kSxHjVeL+8+8QQvrV+vyG21a2Q1\n6ZHAlYkJhYiq29Qmc0qKsfyvXablsMKuRb/8JQ+//LIiN27v6+O1/n7DxEcBn4/NbnfaOFzt+1rp\nrno/f76uTnfeRF/XQ/IgI9ghFzZ7ndKL/8uU1EjIRnevXV8UsuZskE3ca7ZxyUZrkat81mzyqXzE\nRM7VOMuOjo4bNmlMtuPOl5zT6HthVMsznc3inN2kyl6F/XFgI8VFvOzei1ba00qEi+ne1kF2cMio\ng1lBrjethYxNyycykZNQZSWffPvbChFNd47RDWdPNEpcpU64qEoKJNC2Zg1LKitZXlPDD7q6ePLI\nEYYnJlgwb56S+VbMudlY1bY1a5ifiP2s9Mj5AMtdLuaVluIqKdEnny++yMkrVwA5mVDD4VUcPUrG\nTLaRzk52nTnD0f5+ekdHOXbhgkJ83SUlSszo99L8aKnb0JtDJcPvrl1QFktKtqXez7vXrZuzRCcd\nzJCgQpJtvXhJszGUxfxQINO1LW1GWAPY/fBP1DU9wAEiWdyKml6nPMREzuU4y2JKGlNIZDtuq4TJ\nTvJq1Wa9mE9hfzvgZaZ+qdZGM3bfnji/Hjm2NVfYvRezzQbs4PqAI9N1MCdxo8h8b9+5k/7RUTyl\npbz19a8nkVItjCSS4n2BcrebP7zpJnYnkiEJ+d5wPK7UB/WVlhJLlGTZGAqxd/36rGJVhSz16fvv\n594XX1TkqwLqmqVNwSA+l0uxYWMoxMTP1hvWnlVLD98eGODi+DggX9Qk5CdttT4f8elpJRZWXfNU\n287JK1cUAlvj9XL2W99Sxrbol7+kd3Q0yW7tnis2KaTdKGRdTy0KObf56MtsjCRkvrap65OaTVxk\ntzzcrNQ1QoQeevDjp422WSeAc0XuaYQbNe7UaNyZ1tOsnFO0c5KZbLyZYhGN+hbve4By5ERK2a6V\n1v6tCZuGNDaGSY6hFJ5ZtW0B1XkLgU+ytEnA7r14o+7t6w2OTNfBDYVilPmm82hk68ntHx1lKB5n\nIBbj4X370h5r5G1uW7OGjaEQzYsWUefzcS2R0CfS1ZXk8TszPAzIczqteoDU+emnNO/fjyeRAXhF\nXR0bQ6GMRDTS2UnLwYOMTExQ7fXSlCgFU5Xwkqprli6prMTncnE6ocVtCgbZEQ7T1gZLftSJ7y/2\nsel144zDg6r3RfvTwEAsxnCCiJYCg7FYyvyLdtSe1I7HHksam/ohQI3Xq1uv1cirer2gkHU9tbCj\n/M1s9mXFm5np2pZNBl27PcFmpa65elDthh3SwtnM5HmjeoSMxp1pPc1mfBXtCCJqxjtn1LeeN1MN\nK/tHa38PM4SyhpkkR8cT71UCTxvYJmqV+oHXsrRHDbv34o26tx3IcGc+xMH1go6ODsKJMhvFjEgE\nenrA75fLaujVHW1bs6bgiWAyeUzETSzI2VMDXq+u1zHS1cVmt9vUWoganX6Xi9e++tW0x4obTr33\n9ya8gWrvqcjuCvKN755169jS3c32VatY8utfE02Qs8GE53JxeTn1Ph91Pp9h0iT1HA1PTCgJkO7Y\nuZM3vvY1tnR38/T99yv9iDYWl5crc7ewvHyG6PpgcdPMvDb88pdUeb0Mjo8rWYbLXS48paVE43Ea\na2sJzptHe1+fYpOg1dPA0f5+Fv7qV7hLSvCUlrL2lltkWfDp07BsGQCTksRfv/VWkgdVEFyAq9em\nufVvDnHvO+vY/ayPQEBee+F9rfF6lTI515OXtG1NG5GuCNtXbc+pNmYm6F2nCvXwKdIZ4eSVm4Fb\nWFFXo/vQIRuvqRUin+naJuJe7YTwYJ7hDCFCVFFFG22c6Dihe50SUtdM8CfEd000sb0IhKV2SAvF\njT7IN/KF0gfMxu93sXuSM62nIDhm22kEbkUu6ZJurB0dHfgTaxEE+oBFyOVRTmts0s6hlf2jtV/Y\nWQOsR86yexKYSLx/FbmkjN68vAU8jExE1RV/rdiTzX7I5EG2Y2/NlXtbB8ZwyKiDokNPz0zZjEgE\ndulcHY2IV17t0pBNbf/aG+aWgweV4xeUlSV9duL4cczgra9/nYf37eO1r341yTtndFOc7mY50tnJ\ncDzOgrIyJQ5Ue+MrxtRUX0/7+fNKaZkVdXX4XS6OXbxIe18fka4uTl6+nCIhVs+RqBUK0D8+zpbu\nbqV9o7nzlZYyHIux4LnnqPJ6mZyeprRkRvExNjXF2NhY0rnXpqZgaoqF5eUceewxAOqefVYhq1rE\npqa4lvD8vvjRR0xMpx6pfk/Mm6ekhLgkMemZZKihj3b/Tr63+ZvsbfMlJYB65/HHefLIkbR7JVfM\nhiTYLhKUje2FevjUE+1hcOKXwHdYXDFCwNeq+Tz9NcAIVoj8rFzbEh5MgF5kSX+ECJvZnFO7bbQR\nIcJ2tudVomtWDtxG7lLAGyluU0tU9OSfswk71jPbduoT/+LAscR7IhhmIcneTPUc5rJ/1Haqy70I\nrFC1qR1PCH1prhV71GO5A/iAzPNlRHZn66GOg+KEQ0ZvIMyVJ0f+xNWxqQm2F9GvfWb5XPINs/p4\ntdcx4POZXguRwEh7A290U2z0vpCRCu+dIIZGN767167ltuefV2I8379yRSF3jbW1bF+1ilvb2pT2\nlu3cyaKKCj66ehWAu2trWR4I8MKHHxKXJOUcYUtPNMqZ4WEmpqeJT0/jc7nwJuJURazqpUQMqBm4\nS0q4vboakG/mK9xuhicnAZjnclHp8XBpfJwVdXV8PDLC5ViMEuT42YmJCao+9zlFzqtFTzSqeLWT\nEBin5DtdwNq0a68dt1kSlk4hkC0pKgZksl3XE1cggiZ7MMdoCr7DjvAhnc+z89Dmw5tpJ4QHs5pq\nhhhSPJmBcG50I6MH1Sb3iJpMR4gY9mnWUyba0SO4dhEgq5iN328tUVEToHx40axCxEK25NiPlX0B\n8lr8GLikeq8aWUKrjU/VI3tG+yfTnKntVHtzG5Alwe8je3Y9yJ5QM3NhZT/7VX/3Y45EGpFdOx/q\nzJV7WwfGcBIYOSg6RKPyjfj27foS3dmC1SQgdiYN0SY1USf9OfTII2z9oY+eHnh/zX4GbkpNYqQ+\nX5ucR02S6svKOHf1Kn63m/j0NO19fVS43YwkiB1A/bx5bFi0iLbf/55J1XfbXVKivG7w+/lsVZXS\n5/x58/jKokWcu3o1KVFQLvCWlDAhSbiAqcR7vtJS/G43I/E4cUlS6rNWe73KWgxNTHDbzp2K93Nh\neTkP33QTz589q7Qdqqjg1ooKeR4kifbz51lRV8fNfj+He3uJSRKVbg/vtT6hm1RKb+2tJt0Kh2cU\nAq2tyQoBq/U8iymRTLa1SDs7I0SjPbjdftasacOXB6mwqENq5MG8XuvERokSIcLTPM0WtuTdk6kg\njC3V6/NROzRMWCG4rbSakiVrUUzfu2ygTSpjpb5jmMxLawdhTdePtn2ztT/NQMyFkPb+LbJEVsyV\nXiKjTP3fjEzyQC7lsjdN/1FgJTIRrUq0dyv2JinS6/OOhI1ma3waJSZyEhZdn8g2gZFDRm8gOLr6\n2YcgfmOnTnH7/fcrxC+Tp0zcwAd9PpZVV1PmdlPh8fBMInZTIS5lMRb+RRfvbU2+WRbn13i9BHw+\nroyPMz41RWNdHT1DQwo5rPf5FG9oqKKCa/E4EnBZk/Sn1uvliopQ1vl8RGMxpoCy0lI++OY3+UFX\nV1IWXzVZ1UJNKEuAu2tq+HR0lMuJNrVoCgb5g6oqDp8/z5QkcWViIoU0u5DlxrUJObLefAhCFP7p\nT3k3kWDJDdxTX0/3Jfm598ZQCK/LRZnLxbmrV3n3yhUlntZsJudIZyd7zp5lcGKCFXV1vProoxnJ\nzKK/7KQ3FqWqzM3JP1lDaP7M8VZJUS431lYywZqB2vat3T9MadvoOrVvX5j+RDbfpUtbWTtLnsa5\nnjXZiv15/82wqXq9INN2kmg7CK4dhFagGH6/rRAIM0sbZoZI1ieOtUoSFyHLY6uQ4yfV8ZBachfF\nlmcfdHR00BgOp52LsE5feu+pUctMEqU64D7Sz4e2vSPAAInfvkR74ny7PNV6e2C2Y4vV343ZtuVG\nR7Zk1JHpOnBQQCgSxUuX+PDjjxXil0lqKWSgfSMjSlKg1qVLlRtJIW0O/ttuQneOctfu3YQqK6ny\neGhbsyZJRqqW1wrCBVDt8bC8tpajn35KhdtN37VrSn1SUS5FYEQjaZ2WJIU0fvGWWwhVVlJfVqbE\nWZa73VxLEMWA18vqm2/mSF+fIo390i234He7KQEujY0pY9Re0cpdLsamppAkiYO9vQqJXlhezu2B\nAO3nzwNQ5XZzR02NMr6VL77I4vJyxfsraqj+QVUVLQcP8uHVq5Ago5PAu5cvK3PyswcfJFRZmVLa\nJujz0TcyQvP+/Rlv6nuiUcXWxRUVhsdGOjv5zblzxKammP6sBPE4w8CWk8n7w0i2auQ9zCWRjMgE\nCxDpiuQsN1XbbqVtdyIJUDDYxKoCZ/NVQy0z/uzzz3Nvff2cIqW5SLyz8/SluT20SfNqNqGSFdgR\n71rIBE6F8MJakbOaWVoh1axAlryK7K9WVjKETEaHkT2Tu5jZcQOq40qwTxr6U2CEZNmqFnp9pes/\nwsxvbClwmczzoW1vCDlJ0S1Ad+IzEdspYjS3ReB8DwQysDWjb63YA+rP3wYuJj7/PvCSgb1m+7CK\nnwI/TrQzzEwMrxOLOnfgeEYdOMgBVr0kao9cwOulva/PUK6oJib3BIPsXreOTYcPJ8tzE3VCPbgp\nf34NA08c5NhAf1I7rpISyl0uvC4Xb3396zS9+CIDKk+nSGVsYNwAACAASURBVFIEUOf1MpyQuKqh\n9oSWAu88/jhNL76oHDd/3jwujo8rntsqrzcpm674XGSx9ZSWMjY5SVySkjyFag+igJYIa1HhdlPl\n9bLI7+fty5cV7+uCsjL6x8bwlZYSn55Wxqj20AZ9PmUu1P3UeL2KDcL7KdausbaWWysrk0hz0Ofj\n3vr6JJmz2A9WvKI3P/cc/ZoETVbkrEbew1w8R9nUtcxH27FYlK6uCKtWbeeHvq2zJn8U+0DtiV9S\nUcHihKy72IlptjJpyNbTF8aKP2quS1vVyIfH1gh2emHNIlcyIbxsg8ilULJxkOt5YMMkJ/dZAbyq\nsjnbZx+Z6pGq5+PnwBdJltEa9S9I0xDJqAHOprHVyFMt5kSgFZk8HwDeCsM9Jr6OYdJ/a9Wfe5AT\nOUFmebGVPrQw2m/qdhZgTUbswF44dUYdOJgFWK1JqK4FunvdOt26oDCTcKh/bIzBiQklg622lqjo\nv72/l5OPvcip4SspfU5JEsOTk0qt0re+/nUWlpdzTzAIzBDRCrebyxMTCsGsTJQzqXDPCCg8JSW8\n8/jj3F1Xx+qGBkDOtPvm175G69KlLKuu5tjFixzo7eXNhFeysbaWN7/2Nep9PiYlibGpKYXw+kpL\nk8iZ2oMo4Epk0xX/i4tWnc+HCxiZnKRvdJTugQGFZDYFg7zR0kLr0qX4XK6kzLrimBqvl8bEHDQF\ng3zh5puV8dyjel8kqhFzf+Sxx3hp/XqqvF5lfgZiMQ709nLg449T9oN6TD3RKLe2tVH/7LOcSyR7\nUiM2NSNKrvZ4TNVzVcPIeyg8R9ncEGdT1zIfbft8Adau3YXPF5jV+pViHzxw002AvEca/P6014EI\nEcKEaaaZaMGrUybDqB6xGWTn6bPmjyq22qS5IJfvnVXY4YW1WnPSat1WbfvCy7Yb43qgRjaJ9+PI\nCYz0kgZVAPORPXUi2VEutSwz1SNVz8cWYDGyl07Mj1H/6vqhJar/lwKb0F+LCPK4RzTvhZHnZL7G\nxjbkOb7b5Ncxkxf3ZOLvRuBB1d87MF6z25ETLXkSn4uCaeoyOHrnCRjtN7Wtb2CutqyD4oJDRm8g\ndHR0zLYJRYNIZyfhffto3r+fqCYe0gqsZtcUEsUTx4+nLUSvrlsJMxlsteeo+2/w+xXSo6e/d5WU\nEJucZMULL3B7dTU1iTYq3G7m+3zck5CpNtbW0hIK8ciiRbhLShiZnOTKxAQLy8u5+N3vcnddHSBn\n3G1dupRXH32UUGUlu9au5dyI/NNYCgqpvbWyklBlJT6XK8WmGp8vafx+d7LlLmbI45caGlhYXk5L\nKISvtDQpntSdIKor6urYGAqxvKaGJ48cYWRiQqnVKtoTWF5Twz984Qu0Ll1K7blzfDA4iKekhCqv\nl39YvTrlhl0793qk5POJuVFLeEX/TcEgrtJShuJx5cGAGpHOTsUzW+Xx8O4TT7B3/XpLhGHNmjaW\nLm3lkUcO2ZbgR2SCzUdt0YAvQGBtgBZfi0LUzFynZrN+pdgHYv8feuQR5cGE0XWgmAhWuuuOFtq1\naKONVlotxlCK2+DU20M9km732hbTg4BckOl7oV6brQQskUoBq+TSquzVqP10JNHoHPF+OzKpUZ/b\nhhyDOoIsH91iwjYzOJP4v6yjg2ZSd7R2PrSvjciWOK4GOc4TZJXO28jj/raOLdp5Ed5VMScPAksA\nHzKhJfG5x/jrmIR0h/UwQ8hvRfaEipjVgI5tAv3IRHky8blX00em/We03zZ3dCjthMjtgYOD2YET\nM+rghoRdpTFyrX9oJPMVpCzg9fLgTTfxqy99STf77c9XrVJKxmw6fBiQb4jPDg9zZWICF/Cbr3yF\nSFcXt/j9Sgxle18ftV4vngTZHJmcZOrKFRaUlXF7IMCno6OcvHIlyYtY5nIx/xe/QAKC8+bxmaoq\n3rl8mdCvfkVseprGujouJsqxqCWxR/r6WPfKK0leP4B5JSW80dKSMp7PPv+8QmRrvF4GJiZoCgaV\nG+jwvn1K+ReBLzY08PvhYfxuNxNTU5wZGlIktDVeL6WAv7SUEdV5xy5cUErcNHZ3c7G8HICjn36a\nVBM13VrtWrs2KSEPkBLb2xIK0bp0KdsTYwPwu1y89tWvJrXfE40qiZG+1NCgm6U33Z6BGe/hXIK2\nLIeZ2paFql+ZDurY10zXgdkkz3YgN+mscaShXkkWu9fWbNmXuQ513Gy2NRytkkurIb/ZxGxmUxok\nkHj/gM7nVqTF2mNFfOoYcobcrcBvgBhwD/APJGfUFfNThhy7OYBMxETbYl3U83iTjh0dyMmYRD+7\nmSHGVcDTwJPMeFdrgGdILsWzEtlT6w9A267MmX3TxQer5/6ZxDxcRCa9bRivjYdkiIcILYlztJ5S\nLbT7TazPGHBQZwwO5g6cmFEHNyRyiZuyE0blPtJlSzVzztDEBA/v28drX/2qQmrEmEFOBHRNRQ7V\nr32Jep8C3tJSVtTV8dalS7qZbY3gLS1VyqcASjIjgYfmz+e3GzbQcvBg0ngGx8dp7+ujsbaWvevX\nK2R7a3c3vzl3joHx8aSsvC6g0utFkiTFm6wkTtKMU406n4/bE/GtoowNyN7Vu2pr+afe3qR4Xa2d\n6R5gGO2vc1evpqxLpnO0sFoiptiRj7IchUam2PFCxQ7mK94yX/GIhVj762F/WUW2SYrzXW4jm/bV\nJUzOIRPCKuSYTDXxM9tXGPNxiupjFwCfIzm2VU32wDgjcFhznBv4PbAe2Vso6oKGkL2F6vSAfuAu\nZhISCbuPMpM0SJ33QZ1VWOyDCmQiK9ptQZaz9qtem006BKlzqx5fKzNeYO3cnwNuAyaQJb1HSJ7D\njcjjN7s/tP0K76qWYDsZdgsHp7SLAwcWkEu9QDtLO2hLtpwbGUnKgqtuWyQ0EmSssbaWI489ptu/\nXu1QT2kpXpeL/3HlCsMTE0RVMmCRYKjC7WZsclIhnd7SUiQgrvFEGiUVqvR4uBqPs6KujjqfTyF4\nFW43ntLSlHhQvZqpIHsXy1wuhRCWud1cHB1lUnXugrIyLo6NkWxZMua5XIxPTSn2uktKeHjBAgJe\nb1ICopZQiHcuX2YkHmdFMMhYPK58BrC4vJyr8bhuEiLtftja3c2+jz7iSizGgwsWsPfLXzZdl3bl\niy/S4Pfrrr9AsTxIsQu5EjW7S89kg2J5QJCv2pj5InR2kfR0N5uFTCJULMg3qbQbmchCmGQyB9mX\ngklH1LV23IXsCRVoQSaO2rqrkEwItSRXm1AIZAntx8yUNBN1QWuYkfD6gNPAD1TnCxJXp+pPjQbg\nfOLvKDL5u6Q5phk4zozUVi/pkBUCZ/bhRwQ4hezVfYNkwtwELEcmrKLPTN5bbb9qYqteg7DB+w7s\nh5PAyEFGODGjM7ASN6WF1aRFevGpYi1E3KFI/NM7OsqxCxd02+6JRukfG1O8gg3l5Yb2q238xe9+\nJyc56uuj3ONhSWVlEhFtCgaVBEMjKiJaCkxMTycR0RLkeMgHb0oWE1W43bSEQrz3xBO0Ll3KXbW1\njE1NMX/ePGq8XkYmJ1OIaLnLxdP33099WZkcYzk6yqP/+I9sOnyY7atWsfvsWSWBU5+GiAa8Xr68\ncGFaIgrgS8SSCuI8KUnUl5VRX1bGqaj8k99YW8voqVNEYzEux2K0nz+vJF8S+OTaNcX+4Lx5yrzf\nvnMn//1f/zVpP/REo1wYHycuSRz99FNTewTkPbm4vNxw/QWySUBjV4x0PqBN8mJ0nYp0RgjvC9O8\nv5lobCbiSpSHOdB7gEjX7MRjWo0dN4NsYh3tkAOr41tbOlqAbGNFM8OuBD/pYs30+7Caqmf2YeX3\nO9dEPYWG2VjB6sT/2lIwkLqiRitsFAsZQU6+I+z4Psk1S2uQvY4XgfUdHUQTbXkTn4vfoiDQp7Fj\nGDmhUL3K/ivMEFEX8Fri76bE/43InstQ4ry6RBt7MV7XMuB11WshV9Yijiz3Ff1UkzpXZuKHI8jy\n4dcTtu0hdU5Fu08i78ljiXGJON565DkLIJNUdZ/pbBDzOg+41tHBJoxlvmbl4XPvqnD9wIkZdeDA\nIqzeeGrjUwNeL28eO0bD6Cj1ZWVcHB3l9JAc7VHl8TAcj+u2fWZ4OOm1OjGPkY1qiDZFbKkoUVLl\n9fLA3r0pZFFbCkU8+R2IxRi6eJH/n713j46ruvM9P1K9pNKrJJWMLGwLOwlgAo5lBCExvi6QHdom\niQVBHUJ6AT3rUneS6Tvpnhu7+951+/asNcnMrKHvTPesmcvF6Y7NdKOADTGY2L60BdYDE0xwsE3H\nNEpwUCIbWZatkvxSSbL3/LHPPrXPqVNPlV72+Wp5uerUOXvvs/epx/d8f7/fN1BURFwIvEVFfGXR\nIs6OjfEdo+Jv6+uvc/D0aUCqq6r/j2Ixzht2GBevXGHzoUP0fPqpaa9y6tIlAGqeey6tnctrDzxA\nW0eHZZsHksKIRyYnLc+bw2FKPR62f/SRGTLcUFbGz8+cYcTIGfVAkrWN/uxXw8Pm44FLl8w+fcXF\nlvkFKPN6GY7HicXjab1FlbLqM4o8qXFGdu9OUuBTeYymQ6FypGcTqTxJg0YF4eZwM1tnwH/USTls\nb2lhVc//RWDNTh4L/F1BQmTzyXUstDfm9/k+kJ2P52xasuSei5hvVqWL6UCm9VO5gk8jSYxuBaP2\nt6/oIM4rrKrqqjxFpbj1Yg2P7Sah1lQD7yMJlT0HUz+mHvkdqXwu/9gYq3q+CUnc9FudPuBrRttB\nEnmnKs9UkVb1PaO8VKuRXqSlwBeN8f8rrR11Xmrudmp9vonMefUDz5EIFwZ4EkksVbXcJpyr6dot\nbjD6X6r1r69JHYmcVo9xXAyphA4h17PeeF2t62O25zp6Sczrh8a/TSTChO0FrbKJFHA/FWYPbpiu\nCxc5ItcQX3tYpZ57WBcIcMYgYovKynjr6183cyTtbd/76qsmwavy+fjkscdMH0t72HAsHqf6uefM\nYyMLF7LLCBe1j18PMXSC7iGWCrpfZ6C4mCtCWPI6g14vFT4fpzUPzaDHw5dvuIE3Tp1KSzyzgR4i\n5YRKn4/7GhrYHomwfMcOi5dnXUkJE1evmsWD1JqoNksNX9TRyUlKi4v58Jvf5Ifvv09vLMbbp08z\nIQQe4JeG5U0sHufJzk7eGhjgrDEn6UI39fnf1NiI3+Nh65o1OeWoZsJ0hfbOJAFJ5Ukai8eI9kTZ\numZryhDdQo4zVShsofMqF7OYfvqpooqjHKXRotNMH/INa50Nn0twDv3LjHyzKl1MB3INK3ba376i\nj5F6hReSIF8LkKGwan+QZOqS9rgc+R14CZl7WYUkcvbw101IEqsImheppp5Dqn+3GH0NIX98+4Bf\nYCWDjcgKtT9H5lbqKAIqjL6/hCza8yWkWroFq1ep1zhnlYua6sf+IiRxV30tJlH1FuS87rEdEyE5\nbFr1qW7/6t6mYaO9Eaw3jZci13IYSXp3Yc0D1tfZHrKr1qvKaLccuAdJuvN9NxfyU+F6zVN1w3Rd\nuJgh5Briaw+r1JVVZQXSHA7zwSOPmBYpTr6jxw1FLuT3c/SRRyzenPaw4VAgQM/XvkYx0PO1r1ly\nS+3jtyuuOprDYYI+ew08K7xFRSaRA4hfvWohouFAAC9YiCjApStX6CgAEYVkIloEfNGwqvEXF7M8\nFGLcKGRkr+p7ZmzMVJmbamv5xcMPs7SigqZwmEBxMe889BDH2tpYVFbGh9/8Jo0VFfyjEfqsFNR7\nFy5kSXk5IOf3lQce4G6j/0wKun49bI9EzLUpZOjnVLwl02EmLUtSeZJmYz1TyHGmCoUtdMVcRT5H\nGGFzFuYUhbIwyTd0NpfzV2PdF93HZGRySnFxSiHRQ/8yI0t/i2sMczUMMdewYqf97StaZ/xzalNP\nVBhEzouqlusnQcyajH+DSMKkjhshef5UuKvF0xpJRAPI76SDSCIKMtpGFfLRbwWPIImenYiqY0aN\nNn5m7NOFVGB1r1LV9xBwr/FczUMJUlUFSZTeIqFMgyTc+s1n9e2vrp0yEiHFVVgr5E5q259Gzn8A\nOQdqbLXG/83I/FZF3P/Z+F9fV32d7SG7ar2PIsnuBaS6OpVP90J+KuRqk3S9wyWj1xHcnNHZgZM/\n5drz59n/4IPsXL8+K5LQG4uZYbRrFy60VGJVpKVcCwkFSZCuRKPcu3AhkJw3qJ7bSSLA+htvZGl5\nOYHiYjxFqW9y+Y0Q3kmH6IdKn4/G8nLGr1xh1BYumw1Ki4tzur3mKyoy8w4EcPjMGfxFRdwRCnHo\nzBn29fdz8wsvUOxwPvHjx2ltbDQ9U5eUlXF4aIj41av84P33aayo4Pff/rY57+M2QuuUG5otAXTa\nL9rdzejEBPWlpby0fv2UCWSuN1BS5WfaUXBPyO5uVv7wh465rVPxOy3kOFPlTxY6r7KSSiD7MedK\nuLMhr7l8Z+Ry/mqspb2leLu8sA+i/2d215wd+diFzL+syuzWIhPZvJZ/IOvhtxuRSvkZnAnKndpj\nFYYaQiqS4yRI1RIw3oUS6lv35s5Ovmw8LiORy9mHlRAqxElN/u3fnOrbWPWV6jtQ2B6r94Hdzfui\nMa4jSBX0X5BKrFJm/zXwBWPflcj5UPAhiyXVIUNyu5BkVX37jWAlrmXa9nuQaxAnQdD9SDW0Hvgs\nMqJBYYIEcXaCOr9yEgR2B/IGwjLjvZHbZ0AyCvmpkN/n0vULl4y6cDHDCAUC/M933kkoEMiaJNjV\nMx3tLS2EjeJDHSdPpix8oyuoq15+mR0ff2xR9xQ8wAfnznF2bIyDg4OcGx8n4JCfGiguZjxNCH6g\nuJiTFy8miKht10xE8/LVq0lf1KUp8mTLvF4mhLAUOZoExoXg8LlzgDyvM0aRonqjaJLC6MQEApJ8\nXu0EH2TRIntuqqeoiP/Y1GTZlu3aOu3XG4tx8PRpBi5fZvOhQ2mOnh5kWxSo0ASsNxbj6LlzWRcH\nyxaFHGcq5dC+fapKZa5jVoS7nHKGGc7YZ7bkVT+PJ3gi5TnloqiqsfqChq7SDL0r8ytEdX1qnM7I\nthDQtfoDWT9/5cPpdK47kSG1rciKuYrA6ipfBfC3JBTTWmSeaCvwn5Hksw5J9gaNft7Rjl2PJKlq\nDEoRzITbjT4+QBK38iyOeQepPtaTTOiGkeHBVchqvY3Ap8iv4xEkWQ8Zff0Lknz6jPHrSuyRNP1X\nIefzS9q2AeDXtn2akBY1A8hiR3q+qVJpFew3VtpJrYAqJTyf93+20QK5RhW4n0u5wc0ZdeFiGqBs\nWOJXrnBnXR0786zcq2DP87z1xRcZuHQJX3Ex7z38MN/p6WFffz/lXi/3LFjATkNN0/NJlZdmudeL\np6jI9ORUSGXX4i0q4vDDD7N+zx4Gx8a4o7qaz1RW8ubJk46KZ5HxL10Op2fSwxVvLq6lzlh/4438\nZmSE/osXk0i1HaoQE0iP0abaWjo//dTc1trYyK4HHgDkfH/uhRfMPFg9Z9P/ox859rWorIzff/vb\nlm352gDNtn1LqvzM6e/32rGtmWoOZXd3lFisF683SEtLO4EMaxAjxs3czBkjky1Tn7pdy23cRh99\njjm1C1nIgBFIWEMN5ziX9znpY40S5UexH1EVrYKtsPHt2bnmriVkynnLNTczG8yl3Dj9/F8ivQ+p\ngp4/uhFJls4az8PAXUhCporlLEWql3GkmnMWSRgv2Nq1+21+VWtDh1PhvaVIVdZeICgbLDDaO2vb\nrud+6lYzK0n2+wRruDJIcvo+iTBjfdyq7RhQQ+J3hBe4D3ltbCdhlaP/1qhEzt/bJPK9b0Xm1iqo\nPNpfGf3br+8I+Vu32I9VIcH263kqfVxPcHNGXbiYQ1A2LMPj42nVymwQ7e5m+Y4ddPT307Z/P7F4\nnIFLlxiZmGAoHufe3bupKy3FW1Qk1dFTp8z+dDW03Ocz7VvsRBSsRLRcq8Y7KQQ/eP99PvrmN2lb\ntozur3+dXQ88gN9jDQgKejyUFBUlEdEiZMguAL+rxjdaRnnp1D96PMAHZ89yNh63kMNUn4Ihv988\nt7PxOB2nTlFjkJ2m2lq2GYpztLub1tdfN49TOZsqrFnvy2uE/BYBn6msTAotzdUGSCFV6G6+9izZ\nht0m+nfOz5xuTFdu62xgqqHBsVgvAwNd9PfvoycLpTBEiGbDyCGbPnXltY++lCppXMuw8xqB8FMN\ndw4Zf5tCm9i4YyOxUGzWrrlrCZnUmOkITi5E6G+hcln18/8hUrF8LEOb+iepH7jbeFyOJD77SNil\neJAkbwBJElU1W/VtqUJrlRqrhw7r5EqHnYj6jHa7SBDRKqAHmX/phDLt8SAYt4us6MCqMrYi1cwD\nxhiDtv3tOaujJGxsqpBhuKrvcaPdLVhJxSSSQL5i9KHIpv5b416kL6peeOyEre+TyPkYMsZgv37V\n2J1sdSKkv67s0QKprudrPapgtuGS0esIbs7ozEG3VmmqrU0qQJPLWliI7alT/HFnp1lwJ+jx8NbX\nv07f+fOmwlft95u2IMqGpDkcZlskQrNRVKfcZv3iteVRfumGG6gvKTGPLfV4aH39dfb87nc0Pv88\ndc89x66vfMW0bQFZkGhMiCRFVCDDYAECN1xm1dJyRzKcLRQFvgIMjI2ZbYO0V3HSR6t8Pt57+GHa\nli3jngULzPN696GHWFpezm/fe49lP/kJ63/2M44PD9M1MMBQPM6isjKTGOn2KAAramo4/PDD+I0+\nuz79lOUvvmghinp4tVqTbMhkqtBdO7HNlqDm6sWZS35mIT1MQ4EA3/V6p5WIFqrQTyZMNTTYa1jW\nhMPNrNEsa9KNP5c+9bDaVMS5s7OTO40MuyaaeJd3WcpSAgR4jMfM/vOZU3uYsH7NFXqNokRZyEJq\nqGE966d13fU+C3kO2XxnzEYm7FR/pCsLjWwJbTqCka7gTarjVf5oE7CNBKG9RztGfadcQZIytLVQ\nRAxgDdabAfq5DZGZTBYh59NeUjAO/AUy1NfJj7HE+Gcfr0KRMc59yKJBXzXa3K6NM3UZQ4lDSKIH\nMrxXFSe6iCS6dcj5s5Prlcb/quo1JEKPm4DnSV4T+/gntePGjf5WGccsBg52duIzXjtonOdyo79M\n15X9Bk6q69kNu51euGTUhYtpQHtLC5saG82iOFP5cW33DBXAew8/zKKyMo7/4R/SWFFh7lPt9/P+\nN75B3/nzjoSqrrSUukDAQiKLgRIbGQ16vXxoKKH7H3zQbO/S5CSjk5MMxeM8sG8fuSIeGOO3F2WJ\nh5U1NZZxZINSjydt9V2n1qqN6sOqUrFeNKqxooIl5eXExsdNsv/u4CAgbyJ8oFUtVnO8sqaG1sZG\nur72NVbU1tLS0GD2NTA2ZlFAdaVPzWG2Kqmd5DlV181WeZ1OL8581d9c1dpCYaYqAOdblVahpaWd\nZcvaePDB/ZYQ3XTjz7fPdCR2Jztpo403eZNGGlnCEg5ykH3sYznLiRHLa07TKcev8ZrZ3pM8mdO5\nOKGXXgYYYJhhOugwxz2dmMlK04VCPgrlVH+k61Vgq5FKY7oxZKvEpiIV9uN3GuN/k4SSucPYrnwv\n9Rgg+3eMIl/VSGK1A6kQRpChwurc/EhiZo/caUD6jHpJ5HGClXSOIUnWKcCpFOBZkpVNHfp35mUS\nhO2zyHl+FecQ4lSoQobX6rceJx3GpmxXFiOJqlJ6v0TCj/Qxkknjals75cY41Q0CVYm3Cxn2O4os\ngKQT6gHS5w0r2G/gpLqe51/Js/kFN2fUhYs5gHS5hbF4nFtefJHBsTFW1tRYbFoUnjhwgH2/+x1f\nqK2loayM1/r6GB4ft+wf7e5mx8cfm6pk0OOh1OslFo9b7mY69VH24x9zyZYfWuH1ct62rRjwFhcz\nfjV1xmjI7+f8+DhBj4fzV7LPG202rFYOGmRRQeWfVPp8IISZx1oEbFi8mHBJCX3nzyfNrZrzdwYH\niTuMd1NjI68YOaSQ2l82Fo+z/MUXGRgbozkc5rbq6qT+ot3dvHTiRNKapIPuP9q2bJkMFbb1n22O\nZTZenAq55rnmm+cZ2R2ha8DIqVzWxo51M5OFo+dKFqrw0kxitsev+ldoo40LXMhpTFGiHOc4H/Mx\n7/COxUM1SpQf82OuGJ9KrbSyi11TGrPybNUx3V6os7lO+eZxRpj5vDiVv1iNzEl8IsMYsvWCTJUf\nm+l4fe6eQeY8nkUSnWIkwbLncaqxN5JQQ0dsr9+MVBd1hIDfIlU8FXNTDPwB8F+Q5GsASfxGcc4v\nVQiTyOcsFMqQ1XPtv+A3IhVRu9doCTJFR4X42vNOFTYh10cdr7zMm5A3BUDeCNDJbimwAlkZOAL8\n1Ghbzz9djlwrVVAq27xhF4WDmzPqwsU8Rjp1KRQImPmaqUhM3/nznDHyIPf+7nemDcxNFRUWP1JF\nRKv9fprCYc5qRLQIqCsp4ZUHHrD0Ee3uZsyhUJGdiILxRZSGiALExse5AimJaMjvZ61hRwNwR3U1\nmxobua26muMxea+8yufj4cZGwoGAmY86OjFBiaEe+oqKOPKNb7BnwwaLIrnqpz811UYVjutERL1F\nRfztl79s2ZaqOm4oEHBUkfW11K159DVJB7sS6tR/tjmWmcJudRX2+LlzOSmd+eZ5Tqdamw65hLIW\nMgS5UFDjv43baKU1YxhoocNF22mn3tCMlKqZa0hyL70c5CADDCR5qPbSaxLRECG2sW3KY260ZKQV\nzoooHQpdaToX5JvHWci8uGxVVqVEnUCSOacx6G09Q7Jy5dRXKiUrk5Krz91mZCEhpbhdRRJR9SnX\nhCRWaux2IupHFvS5k0Q+qarY6zX6CWHN8bxqHNcIfGiM9ZjRZ7pbt3eTuWKvP8PrkCAFHuQa/BZr\nCHAx8L/hrMSGsOavOv0SuB1J6I8Zz8tJ2MOcQxLKG7ASUZCKrqrE+wYJkqsT5VuQublqfRtx1cz5\nApeMXkdwc0bnDuxr4RSCqSOTTYg63ltURMwgPXpRouIxMwAAIABJREFUHn0fFcqrSFy1UdhHAGfG\nxrhn1y7zh7dSU52+VD4fCrFx8eKU51ju86V8Ld1ts/sXLmTyyhUWlJSwcfFiur/+dV554AH6zp83\nCd19DQ28PTjIUDxuEuxwIMDS8nLqS0r49aOPsqK2lltffJG3DHXxjupqGoJBk2h9PCp/YlT6fPDR\nR1Rp450Ugtt/so3m5/4XTp8fJBP09XFaS32bvibpkA3Jy9U/NBX0myFqXlJdi4UaQ6qCNdP9OZVL\nKGu+IcjTCTX+dEWHIEFCX+KlvMNFndYiRIgP+dBCtHIND04Xoqteq6aaIxwpCJFTnq1NNLGJTTNC\nEKcapm1HLu+LfEllIfPisiXE2YRJ2gminWDkQr4zhVva5049r9S2b+/sNEN7VXEeNQ5FRH1IsnoO\nmeN4DFk0aJXx+iTwA1ufCj1IYo1x3OdIJmf6j/fbkSHCYynOSUFXKYuxWtno271I4tuFnG/da/Uq\n8rzesx3nQVrMlBrPK4EvauPbiCTuPUh1U6nL+nwvQpJNJzVVn/8vaNsrADo7aUaGAutFo6ZaEAsK\nV1zLRXq4ZNSFizmAOsP3Ml9SobxGJ4UwCxktKS93VNFOfOtbNFZU0N7SwtLycq7YlEE97/G1vr6k\nYkMlHg8bFy/mrU2b2LNhg4XEgSS3tYEA5V6vI+lsCAap8ae+R/vTvj4ODg4yODbGe2fO8L2333Ys\nxhTXlNWGYJBbqqo4NDTEwNiY6c05cOmSeTf5bDxuEvDmcJjIwoWEAwGawmFW33ADRx95xDLeC5Rw\nOH4Df7j72ZRjdYITidS3bTl0KCu1rVBEMxvoZPmdhx6akYq2uRRJmi1kukk03UinaqYjdFGi7GAH\nXXQxbPzsy6QG5qKg5kK0nNpNpxqq105wwlQ0C+XZ+iZv8gqvzLvQ7FyRL6nUiZr9R3iuP8rTEeJs\nixBl05b99Uw5p5mg5u42JKGZQJKoYyTmtN5hjPo4qpEemzXaa4NI8nfSeF4JPG07TuEskljXIJU+\n/RvYgySL6lu7AUnwtiDVQx36t2wRCXUW4/gJ41z0T3k997MWmad6xtbuVdu2IuCXyArGyuJmFKlk\nLgB+BtyIXI/HSJDgZuBdZP5ogOSKwx5k/qg+/zHjGD/SbuYDYC2pb17UGf/6bG1nez0Xolq0i8xw\nc0ZdXFfI1/dxuqHnBy49t4zLZ/3Eq2PcebuXnQ9kN87yH/+Yi0bo7PLKSt5+6KGUx6l5OHbunKk2\nKui5fzXbt1teV69tOXSI3liMj0dHOT8xwcjEBJVeL1WBACPxuKP/qMLGxYvp6O9nPMvPhdpAgLMG\naVtUVmYWFVr/s5/RceoUZR4PX6qvByHoOHXKMv66555jKB4n6PFw/A//kCq/38y7bH39dUtO5o51\n6wht22Yh30HivP+Nh7i5dlFWY80G9lzQHevWTem6LMQ1nSof9nrHbM9LOq9S5dW5la1J5Eo/rooq\n7uM+trEtLQlL11eUKL30OvqQpoI65hjHTEKcb57mVD1brzXMhLdnBGvu5iC55ZOmytl0ansqbdlf\nb82xbSfYw22V72em+baP8wngJ0jStxJpo6L7jarx3UtuBYQUwkjCNo6MbNJJldcY70US4b0lSPW0\nGEkow8jQ1lKgk+QCRF6HbU5YjSSc+tzrKEUSUBXuXESCaL5iO85vvH5F61uf/4NaO4uA3xuP9ffE\nBFKNVuep+vwtCQuZCNldJ9nmKLuQyDdn1KlKtAsX1yx0e45oTw871q0raPv5EgNdgQnsXMPBFa9D\neICOgeRxpupDr/g3MjlpEkansdhtSnzFxayorubkpUu8tH69uW+p18vw+DjeoiIqfD5L/ql+PMgv\nj1MXL6bNa1F9BX0+xsedgnGSoYjo7aEQPZs2mWPYuX49n3vhBYbicTpOniRQXEwRcPTsWW5qb+eu\nBQt446tf5cH/9t946+tfp7FC3hdWc+mket1VV0fHqVMEi4u4dFVwiQD/8f1/Yce6wpFRe7/2wlK5\nXpeFuKaVCuvCitmel4+NepBVVPG0qaNIKHUy3XFevHTTzQpWZOwrndKqKsMCfI7PcRd3ZSSl+jGp\n2s2EW7mVAQa4xKW827gWodQaSBCnQkNVIlUK3neM59mG/iqF04k45xpGrNoiRXt6XyoXcWWWbSvo\n7Y5irWq7gPTznermQB8JVXMYmQ+pigzp49PDYFOhBvkdq8bVgCRpTiRWFQTSq8s2I38jHEIStADw\nGe34BcgbDjoUGdRJndPzg0j1MdV3/2Wsqq0w2u5Czp26HspJKKsK1SQq50JC7Q0Cb2n7vUaiCNRG\nEhWSz2p93kuCvGZ7DbaT/kaIi8LADdO9juDmjE5/2F22OWb2tdDDOCt9ARiX42yqTh5nqj78HklH\ngx4Pb2/aZNlPL9yj24QolHu9HD57loHLl80Q12h3NxcMwjgphLQ+OXmSVS+/zLFzsuRChRb2Gp+c\nzEhEPx8KsT0SoTkcthyfDc6Nj5shrmU//jGNzz/PiDG+cq+X+NWrCGBCCEYmJug4eZK/eu89fv/t\nb5tEVC9I88yaNeac/9F//a9Edu+GoiI2NTby5YUN5nkV+jqxh/HaC0vl2t9sh5JOFfYiQe7nVAIq\nTHWEEe7jvpShqvYwVnXcJJP8wMxMSw+n0Fm1FoqollPOEEOW/FOnENooUY4ZtGAFK/LO0xxggBFG\nmGCCYop5iZcKEmI7U16zhYT+vkj9Qzq3YNp0eysFaRSZN5hv6K9TmONUclP19pTXpBp/L4lcxJum\nMM6Pte2TJEI81Xzra2H3SV1OYi5VO1XIcNgBEgTvOLAMGWr6DFIdXKD1e5txjCJf50iEt9YiiZ+9\nOm8qNCDn+qS2LQ4cNR5XGq8vxapQfd4Yl/pWUdedUw2JSazFhDxIgpoOIeR82r1d9SJPnSTIehi4\nA0mkf07iGu3s7LTk1PqRa7Je21YM7DEeK1/VemTF3XTXiWvpMjNwlVEX1xXaW1qmNewuX2KgKzDt\n7fDkd1soWtTDtpbkcep9lHo8RHbvJuj1JqmAFrXV4zHVs/p/+Aeu2kJkfYbfp97msXPnksJtq/1+\nLk5OmqG7xUB9SQmfrazkvaFEYfnVN9zA+2fPsryqin8eHmby6lVqSkq4o6aG1tdfx+fxsKmxkb/9\n8pf57ltvsff3v8cOf1GRGcqrCPYTBw4kKbIlHg+Vfj8XHEKDez79lMX/+I80VlRQ6fMxOjHBwdOn\nAdh86JA55/0XLnBUC53dHomkvE7synQ6BdoJdrXNXlgq90JA03tNTzfsyu53vdf215JTyGuqMFhV\neKeZZgIETKVR5YQq6CpklKjluGyVxHRKazvtRImaPp16u3rfy1nOh3zIa7xmhuYuYhGv8Era808F\nn1Zi5SpX2czmgoTo2udrvoX9plZrctNM0+2tF4xR/eQzS07EOd+29PbKkcTzt8ZzXWFTxWzsSBfe\nrB/7EglblUwWIXrRIoxjViHDSs8b20aQ1i86xo1/HcBmDrCLp4nxE56kiiISxXhqSBT0uRtZrfYU\nVkXUrlTqOablwNtGW41gGhxVIwnjGJKY3W+0oX+L9iIJXBVy7g4irV6KjeOHcSamGP39Ajl/6pdB\nJZJInjFeP0IiP1nl1LYCnyKJtirypK53/bwfQFYbVutxJ3Ium0is/afaeK4abe0wzku1owpiuZhd\nuDmjLlwUEIXIMcsU6qv34ZT3qO+36qc/pSEY5KNYjKF4nHKvN4m0Vfl8HH3kETYfOpTUph1rFy7k\nyNBQUlGjukCAM0Y4baXXS9+3v83yHTsYuGwtqVDj93POILJLy8u5fOUK8StXkvJWARaXlXFhfJxL\nV67w7kMPsaK21vS09BYVMSmEmQt6965dDI7JWoIqz6XM4+GizT6mvrSUgcuXLXmlugdoU20tb371\nq2kJ6Oj4uOl12rZsGYOXLiXyfSsqWFJWllOYdi7XzFzNeZ4K8vUpnU+IdkfpjfUS9AYZbRnlYED+\nFFL5j6lyIp/gCfaxjy8Y9SMVEbSrjHZPSyBlPulU4JSn6uQ72kGHSUbtPqG55H/20ccylnGVq1RS\nyTGOJdm05INsPUDzyZWdKqbWZ24Zbun2zpSnmS0K1Y7e3uewemqq8WPry04+9dzEehJkJopUKj9G\nVoRtzGHcag7V904YSR5HHfYNIBXJChJEtYle7uBt+riJIJW0s8rs71bgN0gV9DYkgdqCJMfD5vFS\nCUynkrYZ5/CcMTYvcBipKts9U+1oIKGo2nNbU/mI6v3+nAQBVjYuQSR5bECqzse0cejHVCHVW/WO\nV3OtsAnM21xO66XvrzxMQ7h5oNMJ12fUhYs5gGwroKbzL8wU6pvJRkTfb0lZGQdPn2YoHmdRWRn3\n3HBDUnufrari3t27OXXhAo+98QY+I9y3qbaWBbZ81K5PP+WyQfBUiG2518sV44ZTtd/PsbY2thw6\nxJnL9tp+4NEU2IZgkIHLlx2JaK1RCGl4YoL41av84H15X7m9pYWlFRV8obaWQHExP29tpbGiwjKH\n9914I23LlvEl41z1CrrvtLYmVYrVPUDtFYgV0lmf6GugW8fY1y7VmudSNXcuWo1MFfn6lM4n9MZ6\n6RroYl//Pj7ukcF7urqYKl+zjz7OcIYOOiinPGMFWierFRWSupjF3Mu9UwpNdaqiq/uOevGaZBik\nlco2tlnCYpXa2UwzpZSmHVsjjXyJLwEwyih3cVdWYcqZkK0HqFJQ87HFyRdT6zO3ANh0excqPDGb\ndnIJLg4BdxmPlc+nGr+9L3uIsF61doBE2LBSygbAdL3N9vzVHP7G+P8WEkQ0RCL0NowkoT5jHNXG\na7v4IX3cRBcR9rHKHFMUWV1W3U79NTJE9UUSxK0BSbBU1V4/iXBH9eNeKdK9JIjjJPBXJGxYFG7H\nWgEYZG6rGs9xbXutcT5FJMJq9Wq9ADtJEFGQ+aBxY/wdyHXp0s6nzHh8o/F8hMR6qNBaHUXG9oXI\nkOeDyAJR6jpqRyrUtVg9WAtpYeSiMHDJ6HUENxdr7uDdnp6UpCKXUN9MP+T1tj545BF2rlvHpsZG\ni7XK4aEh+i9e5ODgIPv6++kdGaEuEKA2EODdhx+mbdky7rvxRnP/8atXzaq2geJiLkxOcm58nIZg\nkK81NvLEgQO8dOKEY/7oXXV1idxYbQz6bTQPMDI+bgkRHrx8mVg8bhLsw0NDFpIa1HJPe0dGGLx0\nycz/PPbII2afjRUVhPx+Wl9/ncX/+I/c++qr0jLmo48IFBdz4ORJ6p57jr7z59GRzvrEmu+bIOjD\n8biFdOpE8uYXXsho7eKE+Z4f6gQ7Gb8WP6eCXoNshpt5Z807SUTIiRzpeZeK1KWyU0lntaLITT/9\nHORgTiQnm7VQvqNhwkwySYwYwwyziEW8yZuECFkIVhll5rn+E/+UcWwq5Liccs5wxnEfO4HLRE6z\ntaZJV9RpupCqz+zeF7lRyLmSD5erfYYiE3afTx16MaMmErmJ9cY23QbmV9q2rdrxEZIJchRY2dlp\nbt+CDDH9jnGsCm+uRoahfkSCpA4hlcHTSNI1CGzmRwTNa1xuV/mvOiaQxE4RMi/wr5Bq71Ek2QqS\nCLNtwUq47PYxPyO5YNESEt6gIHNGnycRwq1IY7Hx+CwyT3QCWdn2A6ONbFBNss/pRSRJtefogjW0\nFhLhuK8BA52dDJMI41XXUQiZB3vWaFddW3PluneRgEtGXbiYBQQM9dGJVOSiFKkf8qm8K+1thQIB\nXnngAb64QN6v9cmQCrzG/83hMIvLyjgTj9Nx6pSZV7lz3TrqS0rMfT545BF++P77TGoepXfX1fFP\n/f10DQw4qp0ra2p4/v77TeLR3tJCY3k5NX4/X77hBhqCQSq8XlnS3Rba3/XppyZpdyJkqiBSjd/P\nyYsX6RoYoOPkSYqAxooKC9lRpLD/0iVTNQ6XlBAoLmZ0cpKheJx7d+9OuSb29nQyVVdaireoiAuT\nk3ScPMnnNNKpxl3u9XImHs9L3bweVMRrEe0t7bQta2P/g/tpDDQmEaEtbGGQQR7jMZM89dJrhrou\nYUla4pSNH2kVVUB6YpWNwuiktH6P73FFu/3URBMf8IE5Zp1gbWe7ef5xrDdjnMamiPo9RnkTp33s\nBK5Qiqbq+63oW4QiofzNK/PoM5+CT/MVufqEZkMm9GJGS4x9tyAryKrCNX1IEjyEDKFVxWzsRYl0\ntXIHkvypYkXHsRZUUoVx3keGl6qx6hVzlYIoyZafdlZRh1QOO5BhyL8iPSaBN4y+TyEJl5qvcuP4\nLuAGJPF7H2uRGN1PVGEvksyFkfP/ljF+e17sVay5oncgiWgj6cN2FTzACmCxtk2NrRkZKq17vW4k\nQVxXGttUyG3coQ2d1OdSuTlXL10XhYObM+rCxSygkP6F0e5utn/0ERPG+2xBSQkfffObaT1Gjw8P\n8/HoKDcaKiMkPDwfe+MNxxw++5h1v8yQ389vv/Utlv3kJ45EtCEY5FdtbZYx2S1NllZU8PsLF5gU\ngpLiYsp9PoYMYq3ncsbicVa9/DINwSCVfj/tLS2yvZ4eTl28aBYoAtjU2MiC0lJLnqU6P4Vqv58T\n3/qWaRNTBPyrhQt55StfyXlt9DnR0bZsmbRx6elheGwsyQ81G1yL+aLXK+x5ga20JuVRZpvXCNn5\nkT7N02xmsyXf0z6O5SxnwDBI2MQmS+Ehp74U6qjjDGcAaKCBX/Ery3hTeaKuZz0ddLCCFSxlKdvZ\nzha2OOZMxoixilVc4hLjjHMnd7KTnYQI8QRP8CIvUkIJdxlBnKnya/NChJzNK2cj33S+Qs/30/M6\n8/UJBee8wIit7QtYcxBVf/p+fmRYcCWSaNqtVFSV3GYkoVWv271JAZ5E/uj+G5ILIi1GKp8eEqG5\nDUjCOEiy52c1iaI9CpXGfucc5iNXFBltlSFVq3O2selYD/yT8biGxE2AWuScqQoTVUabym5F5ZxW\nI6vmtiLPuRI5Z8ux2rWUIedsC5Igf4zMvR0x2u5GKsIqn7jN2H8VUnWdQM7ZThLzbrf0sXvAusgN\nbs6oCxfzCLnkCWZCbyxmElGAwbGxtIrba319HDx9moHLlzl2Vn4tKLXTHnaqj88+Zr0K7JFvfIMt\nhw6hbjyFfD6qjJDVFTU1SURUjVu3NGkIBk1F9P4bb+TXjz7KpsZGWhsbLUWFQoEAS8rLzbDiaE+P\nObZKLVz3dsNGxp5n2d7SQn1pqdmvqmD73sMP4y8uRmBVYlPBKQfUbpmj5lYR+JDfz+XJSepLSkw/\n12h3lMjuCBv3biQWT30/di7li6bLeXaRGaZy113L53Y/y6/2tkC81KL65aKQpQsn3cIWeuihiSaG\nGeZ7fM9UP49z3KIg6krlHvZQRx19ZtCctS+9Yq8qsFROObdxm7mvUlHv4A5OcYpFLMJn/EWI8Hf8\nHW200UUXr/BKUkivrmqGCLGEJZzmtFnVdxWriBDhNV4jTpwRRuigwxIKXBASqMkr39v6vYzqMcxO\nvmm2mGvWNrrSmasHqR1K3ZrAmk+KQ9v2sN2ttv2qkeGgKvRTWbXYVbylSCL6nrG9koQ36T7j8eeR\nJCmOJE66sqvnQyqyFzbaXYEktdXaOVYhq8kew4oAkniB1Xc8FUpJTQJU+G0MSUQXAfel2LcLeAI5\n76q9lcg8WlVSsRrYQKJwE0gi6kcSxP9s9KPm2l6kStm1KKW2C0nelWIbQc6VyicOIxXjx5AEd5BE\nrqr+bnSy9Mn32nORP1wyeh3hWszFmq8o5FrYCVBTbW1S6K9OHi5ruZgTQrCorMxCPLMlyoq0nvjW\nt2isqKA3FiNmkEtPcbFF8UyXz6oIoV5o6Pn77zdDinc98IB5vDqPXw0Pm/vq59re0kJrYyObGhvp\n2bTJschTKBDgK4sWEQ4EuLOujiq/n87OThorKmhpyOwvqsbw0okTSeSwvaXFDGduqq1lU2NjUrGk\ng4ODDIyNmX6uenGbaE/qH62FzhedCqGcTmI83z+nsvmhb/p2xj7D0EA1Q/03sKjn31vIU7Z5jZCe\nuPbSywADJoHbxz6TJP2SXwIyhPdpnuZO7gSgmGImmWSoc4h7udexr41spI46QoT4O/6OYoq5wAVT\n6YwQ4SVeoovH6OcfOMh/4CJeJo2/LrpMqxZ9zOmIdVDLfCummHOco4suM5wZZIiwHgpcEGgVT46G\njmZFMgudb1rI98VcJspTLS6jyEUHkqypME9V0EZvO4SsqKu2bSFBZFuBEyQK+ujho//Q2WmGkT5h\ntH2QRMjoKBjvLIkJkvMZ7WPWix5tQuaYHjTO41Mw4g4k7ja22XM+zxjnrHxIU2EFkqB9SILkFiOJ\nuZOx1kpkGO5OEgWBdIyTKEZ0Fkkcw8Zr7UhSfRvwOslhvMreZg8JYulBElH1S2UFVqse9SmgQp9v\n7uxku9afytG130RQ56K/G/UbFGp93cJGM49r29DNhYvrAO0tLfxxZyfjV6/iLy5mWyTiqEKq8NEF\nJSVgEMWVNTUc+NrXclZo9ZDR7739Nn3nz1sIYigQoOPkSZrDYbZHIinHrYf9pvLLVH19fP48o/G4\nWdjITqIBM2+zNxbjsTfeoL2lxbHdvvPnGYrH6Th50uJtmY1npz6X6nwVOQwFAnz4zW+mbMOJUOrF\nbbauSf2jtdB+onZ/T90WKBOuxUJKhUI2HpZ13XWEY2HEcBEXkPO4f80ThMh9XfVwUKfXjmn6iSKZ\nIEmSBw+HOMQII2xmMzvZySpW8VvDvbGIIvaYVvESiiQvJMgZLtNBB9/juxRpkVmnOc3vUd7BNyN/\n4gM8CzwKwApWmCRNP4dneCYpnNicN+rw4OEKV7jKVUaMn69NNLGQhfjxs41tluPs4bKpwoDTQkl3\nZE8ylTdroe11CoGZLsyUzt/TDm2q84JOLgIk+6ja29b7031Xw0hV7RmsIbU7kCGl9nBekIrlCDJn\n8UKK8TmpbroSq3JNN2qvT9j2fwsoSdG+8utU7cWQKifGeOuM/u8BvgBcNvb/MpL82ZNMGoADJNZs\nKdZzBhmTqchyEOlFqhTIHUgCaz8mhMw7VSRc/V+NJJ/6/ouwWvX4kIT9b5Fr87g2PrVGav6UT+yf\nGvOwHev1Z/fsdUNzZwduzqgLF9cBlJdjOBDgM5WV/Pb8eZrr6kwFMhVS5SnquZHhQMDM7VR5p997\n+232/u53+D0elpaXm7md2ZAoe59Ovqcqz3PLoUNJ49PHZvdetc+HPW8z1fnq2yeuXqXj1CnKPB7K\nfD7efeghGivsRe2d4ZQrHIvHiPZE2bpmK6HAzP1onYq/ZyFznjNhvuXeZZPrGdkdoWugCyhlUdm/\n54NHtuQ9j/Z8URXqGiTIKKMcTMpyg0UEuJ+HeIGfMs44FVTwAR/wQ37IDnaYJE+16USoa/AybOgv\nrTTQQ5yznKWUUu7mbrroYiUr+RVPM8E64F2K+AMEw9RSy2EOm56h6c5Brbki9vrYbud2Pstn0xJQ\nfQ7aaGOQwax9Tp2QKv91PmGmzyFCYfJAs4Gef/oYqf0knQiyyjPVyWS68ar9y4x/+4EfkAgHLSZR\n6KfK+LeYRE6kGo8acymyqFIQSYLvQZLDJqQyOWnsU4Y1hFVHGTI/EuQPfP3XdyuYjr8hrEWJMNq+\nTCI3NIxUGPXxOs2RExSh3ELC37TcmIMhJJlWVXkVVEVekKHNioSrcUewXkfHkPPjQ4ZI6w7E+nUw\nP9+l8xNuzqgLF/MY052Dp0JqbwmFOHTmDINjY5T5fEkKpH0MqcIxlddmpc/H52tkIJOed6qUx1OX\nLllyO7OBvU+lwqkcVD3P02l82ah2qfJiU52vvr3c56MuEODilSsMauG2OnLxFA0FQuxYt2NGiShM\nrTJvIXOeM2EuhxQ6IZtcz4Qa/nk+eOTf5j2PdvsXeyXZj40AtUqtlqf8YRvnn9jFuBE0d57z3Md9\nSWRP+YbeyI0WH9AoUYTxe2MFJWzjbdazHj9+7uZunuM5yinnBP9MOY9Swkt42Igwfnqe5SybDQdB\n/RzKKGOYYUsu683cbOa3qrEVUUSIEPXUJxFRwHEOlAo4VVUwl/DpuYqZPoep5oHmAj3/NF3Ir5Od\njNr/nizH245UGi8iFckfGP0otVGvONuNVBWdQnWVPcxr2pg2A18x2q8FDiPJ2odYQ3Dt9ihqrsux\nEtFKrKGu6jjdj7TMeKza13M4U81RFc74NZJEvkQiNPcCibzZESQRVZ98zUgiqsKn1xrblYWLfm5q\nXQaMdobAlkyQmNPHSK6Mm6lqbrrX3Yq70wOXjF5HmO+5WNcS7Gsx3cVpthw6xOClS3wUkx+fTkTN\nPoZodzfHzsm6fPY8VKUEjk5MEPL7k0iNnUBmCufUyZvPZnujSNNRwy9U5ajq/ejtZ0OyUnlbpiKy\n+vZtkQjNdXXm81KPJ2sSP5cwk4QyF9jfG7Ph9TgVZPNDX7d6mcpNCN3+pZdebuIm3uZtQM5XhAh1\n1NFEExvZSCsLDEuEZuKa5X2IEA00mGTPi5dqqpnslL6hpzhl+oAuZznHOU7MCPcdoILv8Z/Yxz7G\nGTdzQT14GGWSYc4Sp40rZg1NWejoaZ42xv2aeQ4XuUgHHfyaX5v7KW/Rd3kXkPmtd3M3MWJ00JF0\ng8JO0N/B6uuaj3VKvgV/ClkoaD5/f081DzRfpLOAcSLIav+dpB+vWouQcbzejvLDtIfW/iBFn5Ag\nxkolbEKqlK8iw187jON/j1T/7jT2W4lUBHUZ6i6sZFHhXhKhrhFkQaUGZFiwOld7nqki0+nm6CiS\nmAVJFE0qJaGM6srnSuM1hSoSPqz2uVbtv0lyLq/aVxHqQGcnb9nGns67NpOv7VSOdZEfXDLqwsUc\nwHTn4ClyNBSPO+Za6mMIBwKcunCBl06cMG1alpSXW/bXiw1tj0SSSI2dQGZS33TyVub10rZsGbdV\nV9P6+us89sYbbF2zJsnfU+/HqQBTKu/VdEhVVv/iAAAgAElEQVRFZO3b9ed958/npc5OF7q7o+ze\nHWHv3o3E01TnnS+Yi76L+ZAM/RgCFEQN14m6Bw8jjDDBhKkcvsmbnOEMXXThw8cuPiJk/PS70/gZ\nXUUVRzhiqqfVVPMbfmP6egJUkAhDH2DAVBsBBjlDO+0mkfXj5xSnGDWzwKwKDcAFLpjKaNAo+6JX\n/2ym2eItWk45E8bP+/u4jxqjrIzTDQq7P2sjVl/XfFTBfNX5+abqTxfSkcLZgiLIupeleifnMl47\n0VaEcyUy1BSs1XudiJc923sJMlxXxShUYyWvO5EqaxnwF0gvUZD+pf+FBFlU/a8EnjceKzLVhSSb\nyoO1lWRCUIKcF1XcaSGyoNN64/UdSHK8B0mCFZktJVE0qAypfvqAT5D5pAoB4NtIxfR7RvsB43yV\nLYtePbfDaEfN3XtItXg71hBdSCb+uqLps71mRzolX51XFRi301zMFwgXLlykx/DYmGjbv18Mj41N\nS/sb9uwRPPusaH755ZR9qDGs3rVL8Oyz5r/KH/9YfDI6Kp7q6hJrX31VbNizR3wyOpo0Xv31XM/D\naXxrX33VHEP9c8/l3KZ+fNv+/Tkdmwucxj7d65kOr766Vjz7LOLZZxH797fNeP/XA9aKtQLjr01k\nN8eZjnlKPCXWirVig9gghsVwVm0Oi2HRJtrEsBgWYREWCESxKDb78Qmf+bhVtJr91It6ERIhsUAs\nEJ+IT4QQQjwuHhd1ok6sE+vEsPG3SWwSraJVfCI+EfWiXiAQzaJZfCI+EUWiyGxb/VULj/iiqEra\nHhJVYoFYYD6vETVitVgtNogN4hOxVrQJxFpRLhCIJtFknr86v3VinUAg/qgL8eevesQLe1aLW8eW\nmG3o87VBbBAIRLkoN89lqlBtNovmnNrL97jpxFNCiLVCiA1CzJERpcZMjHWtEALjXyE+LYeNdoZt\njzMdU2+Modl4vsF47hdCfFFY5+ApIUSVNu7aFOdg7/8pIUS1tq/af6323C+ECAghfMb2x43/nY5T\nuEUI4TW2F2v7FNmOSfevzmFb2Djvdba5yQb2c9fPcZNIvy7p1u0GWzsurCD53mNWcAsYuXBxHSCX\ngjOqsE2Z18tFo3Jt27JlDF66lLYwUDaFg3IZ3+Lnn6f/4kVzn1zb1Is23RIKUenzmUWJUhUqygcz\nWcwnG+zdu5H+/n2Ew808+OB+AjOci3o9IJsiRbkeYy/ik2thnT76uJd7+QyfoYsu/rQ7TE1sgkHv\nCLtaqrkpcBuVVPJLfslpTpvHqb70/uuoo5lm02dUVbm9j/tooIFKKjnIQVP99OChAQ9LGOc9wB6H\nsJa1PMdz/Cl/ikAwxJBWVKiVHfh4Ag//Hy+Yx1RTzUUuUmr8DTDA/7QbbjFqmR1eBlvXWc8hSpTj\nHOdd3jWVVKdzybUQlr3gT7rqsE6VgUspzbvvQiPCzBUTmioiFGas6dZLFeRxKnI0U4gCx5Gq2ztI\nle8JY1zjJBRSNQcREvNSjQzb7cC54JAO/TiQeaUq/sFecEmhDqutDMAdyBxYvYKtvRiSH6mUprOY\nUVDhzh3aNr0QUytSzZxKMSKndc6lyrNCDYnQY70gVDbIp7/5hnwLGM0EZpuouzBw4MCB2R6CCwNz\neS2UqrfuZz+zKH6Z1NVs1NdcoCu0nq1bxbrXXsupXVPpfeWVJIXUrprO5fXIFWNjw2L//jYxNjbX\ndQ9nzIe10BXJQh1TKBVN9fPyq6tNhfy7+72mKqkrmlWiyuxLVxTV65UHKs3HYREWfuE3n9eJOov6\nmayTWv9qRa14XDwu1oq1poqrn6uuHKf6+5M98nz+4mVE6RhJSqq9Df1cVJ9qe5WoEmERNpXhXLBW\npFbTnBTwfJR0Owr1vlCKWy4q03QjlQJaqLGuFanXK1v1Uh/ja1NYi1uEVDUDIqF4rhZWNXCREKJS\nWFVCNQe6uhkSQnyinYPeTptIntdFxmsVwqp0NoqEson2uFkkVMky7fVW27zYVdFq49zU/pUiWVnV\n/200xrfJeLxJ5KaGZvPecFrntSL5usikxqtxNWUxLjuc+rvWQJ7KqJsz6sKFCwtUzuXOdetS5kk6\nKYBTqc7qhEq/33x8RQg6Tp3icy+8kHUOqDqPSociSteyT2YgEGLduh3ToogWsiBLUttGEas/f+ed\naakonbLfPM4pn7zDTMf8WXcd//vuMH+5N0RpitN3Gqt9m+qnxCtzQH8bhm1rZIRDJZWW/M/VrDbH\no3Jz9TxNhXLKGWLIrL4LcBd3WbxF7b9Aimw3x89ylj3soYsuhhgiSJAAAR7jMWLELHmoqfD3LfDe\nMvjbB+Gy8RGzhCXmOagc2pWspJVW81yaaWYlK83HxRQzwghDDHFvUh1OK5zmPFVOmVN1Y31czTRT\nSum0vYeywWwVE0qHVEVhCjXWdDmA2eaH6mP86xz6tldfVRVg48Aho71fG/uWIyvD9pPw3azCWrTn\nNRLK3JeRKqo6B1UzWy+mpM+ryqs8j7WK7SIwypFJBfIwiXlXhYS+pO2vV+Xt1Y5tMfY9AUZWt8wF\nbyJRdAlkDuta7fHzxjm8gsw/fYXMRaRyhdM6O10XmQoU6YWVch3XTFaVdpGM2SbqLly4mIcYHhsT\n4e3bE+qolseaSw6oU/7m42++KcLbt4t1P/vZrOR1The6up4Sr766VuzZs2FalNFCKDwp256hHN+k\nfqfxnFLBKb9az/X9T/vrTbXvFnGLqeJ9UXwxrepWLxLH/fdjj4v/Yb9PlI4hPMKTpGiWiTJLTqXK\nJdXzTFXeaUAEBCLRTq2oFavF6oxKZjqV1CsSam2raLXklKp+60SdRdG0/60UKy0qsl191p/rObG1\nolYgEEERzKiMqlxZBGKTkSWWSk3T12KTllGmj2M2rre5jafEBvFzQwWbmBa1Nlv1MxV0NTJXRWyt\nsKphYeGsDLaJhOrmMf6vElL51NW6kHbcAttY7OdpV5b1558IIZYKqaaqMVUb252Qag5TqdfDtnPd\nJKSiuknklk87nXAagzqfciHXo5DjmwvnPN3AVUZduHAxF5Gvh2ooEOAuw0Kl2u/n3oULAWc1M10f\nThYmyge14+TJOWu9kg9isV4GBrro799HT0/hq3dOp83KbKnVs2Edk2SjRJTDXqmo/TYMT68ZMKuv\nDjBgqnhHOGIZq67EqX2Xs5wYMT4M9PH/rpvgcgCuaJlbQwzhwWPaqKh+eullgAEz11JhggniRhbo\nFa7gw8cVrpg5n/lghBEmTT0FBMKx3zOcQSCSVFaAeupZwQqWs5waalhv1PhMVTm3jz7OcIYOOggS\nxI+fu7iLKs0p0UkFjWsZsGocSmXZYttfv5a2s908Th/HfLMqmn700s4f0MaL7OdfT4taq7wr7VVz\ns0UvCTVSVZ/NFnY1TFWAXW1sb0Iqg6oCbphEnuV9SDVTV+sS8ULSR1P/lLerf3XGP/VcryD8BHIe\nDiLV2EVIRdNelVZhC9ADLENW01VzqKvXyoJlo/HaXdq5b0fmV75CYj2ms8JyNn6gTmNQ3rEXkDms\nTt+i+XqNzsWq0tcTZpuouzAwH3KxrhfMhbWYSvXbXJCv4vVUV5dYvWuXqH/uOfHJ6GiSwqmP3ykv\nNB3s+a1zYT0KgT17Nohnn0W8/HLztCij+eRKZt22sb6vvf56wdtO2+80nlMq2K+/elEvSscQT+2X\nuZB6LqVSBoMiKI6Ko5axpsqzXCKWiGpRbSqgqZRFr/CayuAisSh5jwP5ap/Z/1WJKlEiSnI6JiRC\nYrVYbZ5jNkqjnpOrq7r6MU6qparkq+empto/m2sp3+vtWvmMSsbMZLGuFfnn69lHmGktdCXzE+Gs\nhuWiNOpq3VohFdFsZmytcD5nfXu2M28/xmkO7f0VUglMl8t54MCBpNftY3Fq53Hh3GamKzJV2y5c\nZdSFCxc5wq7OTBfyVbx6YzEODg4yMDbG5kOHkhROffwfj47m1Eeh81vnClpa2lm2rG3aqujmkyuZ\nddvG+pb7/Zl3LmS/2jlNZ06sDvv1FyfO5QD8aB1UBxos1Xbf4z0WsYjjHGcFKyxjVaroClbgxWu2\nf4pTptdmGWUpxzHJJJvZzK3cyilOOe5TTTXF0/hT4QIXGGMs7T6VZjachAcPBzloniNIL9Snbc5/\n+no+wzOmX61qz65OOqmWO9lJG228yZtJ1719/y1sYZBBMw/WCdP5HpqfmJks1qnk66UboZNKpiuZ\nm3FWw+wqmWpnAthk66sdmeN5wWi3Oc14dKgs7Eqsnpi6F6qej5oOuhdqE9n5cxZSCcyUy2l/PdV6\n6/vtTdFmpivSzf2cn5htou7ChQsH5FL9dioqar6em7lU73XyPZ1LmCkV2sXUMFv5fEp9U7mY2XiN\n2vMTG0SDQFh9RqtFtXhUPJqUB6r+lAKb6nWf8ImgCFpyTmf6r1gUi6PiqJm7WS7KzZxP+1+dqLPM\nnZ7vuUAssOSSLhXlYrWoFBtEWAwb6rBSLVXV30ViUdr1sKuchbl+5pMT6PzBdOXrrRXJKlk+Wq9T\nOzr0arStIrurxF5dVyGfuVDVbhuNdp36nc6cyExzan89G+U5H/9SkaZtF/krozOB2Z4bFy5cOCAX\nkjgbxWUyjS9fkjsbmK3iPC5yQ6HsVXKFIjWpwkedYB+rvaCQR3jEWrHWUhhI2boUi2KLrUmRgzGL\nTmpn+2+pWJpU4EgVVaoU0n5Gt3EpFaVitVhtKZJkn9O1osrcXi8ClvV2Cn/OhlwW5vpZK9wgwPkD\nJ5KUD1nJVDjHbimyVqS/Sm4RCcuVFTmORYjUZDdTv9OFTHOa7Zzr+7mksvDADdN1kQmdnZ2zPQQX\nBubCWjgV9kmF2Sguk2l8+uv5FklSmO71uJatZAqNQq1FPteEsjfRw2RnAip0M1X4qBPsY1XHrmQl\ntdRyhSt00WUJZRXG74SrXGWIIT7H51jPeovdi8JVrkJn5rHfwA1ZnmV+aKKJBhoYZNDcVkQRt3M7\n9dSzjnUECHCZy+brl7nMQQ5aiiTVUMMpTmnFhnzmawPEzUJOkAi/VcWNwoQtx6ZCYa4f5yDAufCd\n4UJCXwunkM5cwlOjwELg54CX1IVzdEuRLWCWLVuJc6joAAnLlaEsx6IjVVjsbIWo6nNqD43u7OzM\nes71/dyCQnMHLhl14cJFRsz1HMuZyn/NF3N9/q5F5HNN5JrPV+gc0zrjL5v+7WNVROgAB7ibuwFJ\namupTdnGBBN00GHxD80F5ZRbqs3mgjrqLM99GjnU8Tt+x0d8ZNkmEBzmMAMMsJvdxIlbKgarHNfb\nuZ2NbGQTm1jOcg5ykH3sI0qUdt6j3nBbtJN/NZdHOUobbdzCLZZjU6Ew+aBz0Ql05pFvxdKZxlQJ\nTS+SOMZIkMdMfqh6dd+bUvSt3k1B4O08xpWKdDpdnTO9Vk5Eeb5cLy6ckVwvvfAwlFsXLlzkgmh3\nN72xGEGvl/aWlnlBYmZrzBv37mVffz/N4bBL+FwAM3NNRIjQRRcAbbSxgx2z1l6UKL30EiTIMzzD\nfdzHJS4xxJBJ1IoowoPHohjezu2UUcYhDiW16cOXZLlSKBRRZCq1AF68lnHZ0UADk0wyyCBVVDHC\nSMZjWmllF7sA2MhG9rGPZppN5TJGjChRtrLVJJD6PLbTToiQ47EuphcRMN4JkvxM7Z01d7ERSaoA\n7kBap2wnPblVxzST+pZFH3Av8Bap7VrSIYYkeKoQkROiSGJ4jAQ5nom1cjr/CNfH9TLXUVRUBHlw\nS5eMunAxRxHZvZuugQEA2pYtY8e6dbM8osyYrTHH4nGiPT1sXbNmxolovgS8uztKLNaL1xukpaV9\nWqrfXs+YiWtCkZQwYW7hFiqpNAnMVNqzkx4ngmSHTmSXspRznGOEEfP1IoooptiiIIIkbHHi7DN/\nEmeGB4+lHeW/KfJLF6KYYhkWnAaP8ihv8ibDDHM3d1NHHXvYk5IsV1PNCU6Yc6WIZyml9NGXci71\neaynng/5ECCJtM51ZHPNzGVkQ7iuBcSAP0Ym2m0nu/PMhijOBCIkCCDM3Fo5nf/1cr3MdeRLRt0w\n3esIbs7J3EE2azEf8wxna8xTzR+dynsj3xDhWKyXgYEu+vv30dOTOvTvekOhPqecco6j3VEiuyNs\n3LuRWDy7YK50obgqnDPbMM5MSJVz2EsvXXSxj30sZ7ljSLBuM9JAg4WIgiSKdiLaTDPb2MZBDprb\nLDYqnc7jtLczVazB+llR5PBb5mVeZpBBJpjgIAc5ytGURNSPn5u52WKxokJo++gz59JprYKaicUA\nA0SJzgk7llzfF/o1M5VrcrYwl4OVC/lbKgTsAl4h+/PMJzR4OsJY1TuliWQ7mumE/fw7Ozvn9PXi\nIjOmQkbbgF8BV4BVhRmOCxcuFOZjnuFcGPNM54/mS8C9XvlVHg43s2aN61amSN+f8+fT5vHZG+ul\na6CLff37iGZ5AyDdj/p8Cg+lQ8j4a6XVQn7tBGkVq4gQYTGLuYEbCBDgIAdZwAJe4iVzPKnIUyWV\nbGKTSXovctF8bZRR87EfP6tZnXHcquRsNtD9UEGGAr/CKyxggfm6U1s68byDO2igIWmflayklVbu\n4i4OcchxzT7mz4EDVPIWT/OjpDbaaaeeemDq65kKM5Hf5uSZOp/gFpcpLDL5dOYDRQDfJDcyPR1w\nr5frF7cCNwMHSE9GZ7fOsAsXLq4r5OKfWgjkazEzNjYs9u9vE2NjbmF5IWbG43PDng2CZxHNLzeL\n4SznPRu7DrvfpBBCPCWeEmvF2qw8Q3U4zcOwGDY9M5tFc5KNi/7XKlrF4+JxUSfqxDqxTlSLaoGw\nWrV4hVc8LB42x5fKZ7RIFAmf8KXtL9c/ZcWi91ElqsRasVY0ikaLp2mxKDbtWfTtNaJGhEXY0o5P\n+MRRcVQ8JZ4yz3mlWJk096vFhGlNsVT8wnGNnNazkFgrpt8eY7rP4drDtePv6nQm+XifunCRK5hF\nn1GXjLpw4WLOYD75j+aCp8RT4vtd9eKHr1aLV/esu+ZI7Ex4fA6PDYu2/W2ORDQVecz3R32+5DrV\nPOjjKBNlKcneRrHR0vdGsVEERVDcKe5MeUxERNISyIAIWMigIop7xV7hF/6Uxy0Xy3MiqnWiLuVr\nTl6oDaLBMq6ACFiIc62oNdfzFnGLqBJVwif2mz/KV4sH81qj/JCgCBvEeE7EIN8bGy5ywVpxrfi7\nrhXJZzKfPDWvndsC1x9wfUZdZIKbMzp34K7F9CEX/1SF+bAevfRyNTZAeGCYgf6Oay7PVOVL/lXn\nX01bbl4oEGLHuh2EHIpFpQrHzTdfUA+TLKU0awuYVHmjegjvGGPm9hJKLKGvC7vf4J7dB/mTvfDl\n+Eqe53nu4i4Oc9jcp4gii7focY7zMA+buZpmWHCn/C9MOCmP8ypX+RbfMttxyvP8mI/TnquOGmpS\nzo0PH8L2GydMmCvGn0KcOL/gF4C0nTnLWXM9T3CCEUaY4BGKeYmXGKHSKJpUiFDWzDY/iUDJdp7K\nKb+tl166Oudv/uf8QPYOmnP9+8LpTOZTGGuuIcVzfT1cZIY3w+v7wUiesOI/AK9l28mTTz7JTTfd\nBEAoFGLlypVEIhEgcRG5z93n19Nzhbkynuv9ucJcGY/T8yBBThwHzsA9q5tYs2brnBrfVJ+HCPHd\nzu9y5MgRmVA3w/0HCUIn3MzNbI1sTXq9uztKT8+7eDwB/uzPXicQCKVtr512Wjtb+T7f568jfy0r\ntHbK6rWdkc6049kR2ZH0epQo7Z3tMr/TmJ9AZ4BtbOPvI39PBx2UdJYwenCMuw0Lzxv/nzKO3HmE\nYET+PF3SuYRP+ZT3Iu+xgQ2c7zwPwGBkkN3sRnRKwncpcgl4Fo4co5ibGI3839JCRQ7H7H+kc8R8\nLhBJr493jkt7mIiR72l7XX8+yiiTnZOOrzsdP844o52jSfuPM86iyCKucpULnRcIEuTpyNPS4qUT\nYISrkTbuYyllnWVUU81LkZcIkX49Mz1XhBEgGomygx22/YPIpzcTifwNO9K01x5pp5deLnde5i/5\nS3P9bu68mcd5POX746udnfQDDZEI7cCROfT+nvvP2+nsbAW+TyQSSru/wtwav379RIgCj3d2cmQO\njCfX50Hj+c2dnTwud0i7v8JcGf/19PzIkSPEYvLm2yeffEK+KIS1ywHg3wG/TPG6ody6cHF9wrXw\nSI356KU6W4gR47vxJ/mjniJa1mxzr6MCw8lzUsfu3REGBiTZWLasjXXrrE526d7nhfCpjJCwHKmi\nijLKeJu3aaTRHPsww9y6t4M7+qE63MTXH3yTQCDZT/NWbuXX/Nq0U7FbtchtPVzhXuPZi8CjOY/Z\ng4dOOrmf+/PyK1WWL3ZPUieofcKEKaaYs5w1z2kpSznLWbM4UxNNBAmaVYQL4RGbeY2TDSlS2a/o\na91GG1vZmpW1TATXa3G+IopUBIPIwkDX86f7XLGucZE7ZtNn9ADwfdBigKxwyaiL6xqZfsRez5iP\nXqourk/s3buR/v59hMPNPPjg/qSbAene55mIbjZQZKeaat7nfRodrOxT3bCIEuU1XuMc5yillAtc\nSGvPEiLEFzhJF0HgXeArYLOKyRZBgvjxT1uFZIX1rCdEiFOc0qxqngVuwcs4k7QBIwQJUkEFZznL\nJJOsZCUHODDl0PBc1liR0GMcY5hhwEqI87154Xotzl9EcG8kuJj/mA2f0YeA3wP3AHsgB8dsF7MC\ne0iDi5mBk4WHuxYSc8VL9XpYj+7uKLt3R9i7dyPxLD02ZwNzaS30PMDmlmdYtqzNkYhCaqueKFFa\naeUCF6Y0FpVLeoITjkQUJIksDyzg/1g3zGcDd3Av97KRjRznOAMMMM44I4xk9An14MHHE4Q6/wb4\nCn4uW/xHiygiRIh66imnPG1bl7lsElG7rctU4MVr2s4ECSIQbGUrffRpe90MrGWS9cCzVFLJHdzB\naU7LsGOggYaC5Cjnklus8pMVEbXnrDrlDWfzvmjH9VqcCUzHZ1T2Gasu7Ojs7JwRuyQX04epfDPs\nMv65cOEiDVpa2unpibJmzdZ5HVo5HSG17S0tRHt62LpmjRuiO82IxXpN5a6nJ+oq9FlAkQaAPwls\nZkeaOUv1PtfbiBLNOxxUkZ1cxtxPP4Dp4alwG7fxG37DOONJxxdRxFnO0sFLrOZTGviKTW2U+aEx\nYjTQQAklaYm2Hl6rCKAHDxvYwM/5OWc563hcGWXEiZvH6PDg4TCHWcIS6qnnEpfooIMneZJGGs3z\nhkvG/+8C/4ZRRnmf9y1t+fClHLtCdzRKrLcXbzDIa+11fBjqSwqtzQWqQNRKVnITN7GNbUnFqvK5\nTlSRGhfZY66Ex7bjhqZOBaroEch5dN8H8wuFCNPNBDdM14WLawBuSO38RqYwUxfJyBQumU0+eCHy\nRVPBqX/Vn471rKeXXkYZxYePd3mX7/Ad9rGPSiopoYQv8AUOc5hznAOk8vgbfkMjjWab5ZRbiOd6\n1vMjfsQ93MM5zjmSW5X3aYcXryPR1FFCiaVycIAA1VTzDu+Y6nANNabC2EorceLsYx9VVBmBxc8C\n/wY9zDhAgDhxM0R3C1scczcVdkciDHTJn7p9bXX8rzvOAPnnmhYibNtFYRDBDY+9FuCGqM8NzEaY\nrgsXLq4jzJWQWhf5oaWlPW2Y6XzEdIcep7JZUVBqc3//vpRWO5nayBdRonTFdiT1r/rTw2rLKOMm\nbmKYYQYZZDObzf366OM0p7mJm5JUzM1sBqCOOoopTlJAP+IjnuAJmmiil15aaSVMGJDqXi21VFHl\nOH5FRJtocgw7rqXWolp68LCSlTTRZGnzTu4029nGNvO8jnKUekqBR1nJUlMdbqaZj/iINtrMXFEn\nWx89RJugHEe4uZl/3voFs5187WDytQtykR75hGrOx/BYNyQ1GW6I+vyGq4xeR+js7DRLMruYXczH\ntYjF49dsSO18XI+5gHwqRWc6Jpe1cCoaNJPVq2dCbbZXXFUq3jGO8e29w9zRDyPhav7HB09Y+l/P\nejrooIkm3uRN7uAO+umnkkqOccwkgE7FdED6eNZ31rM4sphRRi1hugC3cztVVJnb66nnQz4027SH\n9tpRSSX3cz/b2EYrrWZoMUjVtIgiswKvDx+llJrVcHVFMkaMVayigQYqqbQom7oCqcZVSil9WMNs\nndRrvaLtt2Kb+O+iftZs3crlUOp2Cgk9NLilvZ2fHznifkZlQITcVc58KrfO9vdFhOtbzbWHVh9x\nv7/nDPJVRgtXTcCFCxfXNEKBgBuaez0jGoXeXggGob0dQqGMeahOxLCQuatORYNmMjc213zwVEQ5\nlcUHJOecDjJoPv/7Fvi3PdX8uzXvJ/W/k51EifJUd+n/z97bB8dV3vmen37Xu1pvtmyMhZUAcSaA\nDWLwEnxpkIwvhsQKoCTDbA1ka6arbnZ2Zrd2TN2X2qm5W8mtqUvuztzaqUmNZ7J4QtCAbYLDm6+D\nHMtyDCgD4WUCjEVsMEhyW5attmVbarWk3j+ePqdPd59+71afln4finL3Oc95znOep0+rv+f3xo+C\nG3nEeY6/64ZLnkvcwz2sZz011CQJzTrqqKGGDWxgmGF+w29oNyk3/kW+yFu8pb8PEOBxHucAB9jL\nXmVRTEArv+LFy7u8Swcd+PHzPu/HtUt0362jThfKDhxMMUWQIF68PMETXOACn/AJABvZSD31TDCB\nCxdv8ZY+n3vZGycytRjefvqT3Ga1uM4uuvhb7x68e9V2T4p+NAtrscRpcGREdw0+5vfj/O53C+pv\nJZCPlbMS42wr0ZpbTBLjQ+XOqHzETXcFIU+OrIOshbXw+XwVk222bIyMwNGjcPCgEqakziCrYebG\nmumYXO4NM9fjTP3ngtFV06w0icfjpadnb9YW0VRuvWZuohpGUbSb3fr7zWzmPs9O/kPPKVZ5kt1c\nvdH/3gz+jPpAgI2jC/zPx6CJJtayVrcF5ukAACAASURBVD/fSU4CKplOCy1c5jITTPBrfg0+dZ43\neZMd7MCNWx9LAw26pVLjl/ySa7mW1azmDd5ISpyksvS6uIVb+HP+HB8+9rM/ziKrUUutPi7NFVer\nhzrAABvZSJAgI4xw0RAPGiDAJ3zCRS4yySR36bVSzedTm6tEt9lM7tWJ/aRbw3xw1kQ/x11dbN29\nW/5mZMFSuWqWey1Wuktqohgv93oIhSNiVBAEgezi/7JlWQrb6I9jurpgt/oRnykO1UwYFjN21UwM\nFrP/oguMFELZTCBptNFGK626IOqnnw1soIYa04RBGprVb8KphNonrfDy1kbe4R09nrSLLnz4aKWV\nCSb0ki+11OousutZTwcdXMM13M7ttNPOfvZzmtNxYtSOnfOcZ5RRJpggSJAJJmihRb/GeeYJE+Yo\nR3mFV+LKm2xiU1zpl3rq9bjOfeyjjz5Ws1rfHyCAH78+d9qxXXTpMaU11PBLfhk3L4kiM9UDh0xx\nnYn9pFvDfHipv43TfW3842teZlai4sgDzcq53KdrpVxnKla6GF+OiBhdQVipft9KR9bCWgwODhbV\nolZMYVtMChLJ/f3Q1wevvQbeqMtiGsvg0JCfublLVFW1s23bfr1NJmtiofdGrtbKdOQiMDJZUSG1\nUE5nhTvNaSaZZIAB3R10Pes5zvGUCXeMFsMfdcOvO208/UATv+O5nUYa4853hjNMMsk44/q4Navk\nDYM3sIc9gBLmxzlOgABb2KInF7JFw4O0jLkOHPrY7dj5Cl9hJzu5kzvjrkvL2ltPPTvYoScT0vro\noENPmKRZeRMTKJ3iFGHC7GQnv+W3+jW9zdusYx1f5+s8xmNxa5IoMo0PHDRrazYk9lPsRFUfeU/z\nX/ae40WvWnf5m2EdZC3KS6IYl/WofESMCoIgUFyLWjGFbTEpSCR7vbB3ry5EsznXxMRxZmcDDA/v\nymO05ScXgZGNFTWVUDazwmni8gM+AOIFsZlITjy/1qbK08S7Pb/LmGeKAQa4nut5lEf1+EitndFa\nWked3mcnnXqiHo0AAeqoo4++uOtw4WILW4BYSZejHGWYYf6Bf4izfGqZe6eZVi7BwFu8xTrW0UUX\nwwzHzaVWmsbI27zNAAMMM0wjjfocdtDB53zOGc6YrolRuBsz9mrW1swk5zMtdobcYltarYXkgxUE\nIYaI0RWE+NVbB1kLa+Hz+YpqUbNqGZWlFMnauTyeVq5cGc/aGmuleyMXgVFs8aCJy0kmWcc6vsyX\n6aWXHeygkcY4112z82tC+hSnaKYZUK6sk0xykIN8h+8AMcH9Pu+zgQ148HCa06pTH0wxxZ3cST/9\neiKjLrp4iqfw4o0rBxMmTCON9NHHvdyrbw8QYBe78OEzvdYAATaykUYauZ/7GWEEUBZaLVmRUQyb\nHW8mIlOtiVG411EXd13ZrZ2WQuUgFMF924zEByFWui8Kp/TzV0qW11pUPrIelY+UdhEEoaJYytId\ny41QKJhT9tdinOvKlXHOnlXZWrXyK9YlsWiAmqNsPnPGMiLFsI4llhsxlj9po41znANiZU7SnV/b\n9xqv6W6oO9nJAQ7EtTNmiE0cyyu8EneOJ3iCPezRY0s1tH6DBNnIRgIE9GsAuJEbmWCCm7mZAAEm\nmNCP7aOPYxwjQCCuzw1sYC1rOclJ9rGPHnoIEcKJk3nmqaOOLWxhLWt1K24bbfwP/ocupg9wQJ+X\nxLkFcly7HSgh1YVEruWDzJ8gLEfyLe0iltEVhPjVWwdZi/wpdjzm0JCf739/07JKNpQqNrSY1t9M\naOdyuZT7Z7bW2PLeG+YWm2w+c8V200yXIOcWbtFfp8sImzi2LroAlSxIiwU1op3jJm5S7quDqp7o\nMzyTdI4RRpKEqLHfLWzhKldx42Y96+mll0d5lF/xK/ro4yhHOcEJ3SqplW+ZZTauzy66WMtaPV61\njz5OcII++niER3Di5DKXGWCAgxzULZ7P8RwTTOgJk4yW08S5zX3tlj6FyvL6m1HZKWiW11pUPrIe\nlY/UGRUEoaIotqvp90438uGFG/COruZ7g9+ld3t/Tsdb0VK7lLU2zTDWzdzT/UPcx3YtiTW2cMwr\n+JXCvdk4R//HUBszwdNxnyFNIGkYa2Fqx6ey5KWqW6rVHjU7zo+fS1yinXZe4iUaaaSX3jiLohGt\nJAxAI404cdJKq74tQECP8XyZl/XMv/dwDzPM0Eknt3Ebb/Imt3M75zjHAAN6OZibuIkv8AWe4ike\n5dG4fu/hHgIEmGFG3+7Fyy3cwgADdNHFb/ktIUL6+Izut4lzmzuVWJ3SSsj8CYIQQ9x0BUGoKIrt\nanrTnv/Cb+bUj+iHOtbx/PYdOR3/4os+Xfh1dvbhdnvLLk5ffXUHo6MHaW3tKkvcqtHdU3MjTSSV\nYCovQZRFdDdGi00p3JuNc/SXL7bSFJgEiuPKvIY1uqtrL728wAv6vlTznuuaXeACwwwD0EIL5zkP\ngAcPNdRwmcuECVNDDbdzO0c5ShddePBwnON6n3308QZvMMooDhxsYQujjLKOdTTQQD/q4ZDR5Tex\nD1Cut8/wjC62++hjgAEaaeQ93qOD5Fqsyw9zN3NBEISlQNx0BUFYERTb1XRt21cAuK2liR/5unM+\nPtFqZoWyLuVOoJRNMp9i1/AsDuYV/Erh3myco43OTUBhlldjhlijxdCYYAhSz7s2nlZaGWfctESN\n8VgtyVEXXWxmM6BcbUOEmGKKMGGqqOJDPuQAB3S3WC1rL8BmNrOb3bpQXGCB4xznKlfjStd48fIR\nH5n2AfA7/A6ttNJLr17+RatN+imf5iREsynRY13KmxhoyO/nRZ+PV3fsIBSstLkTBGE5ExGswZEj\nR8o9BCGKrIV1mJqdjdz9gx9EpmZn8zp+dnYq8tprfZHZ2anI0aN/FHnqqabI3/0dkR/9qC7y0ks9\nkdnZqSKPODVHj/5R5Gc/uzvyyiv3L+l5E5mKTEX6In2RqUjqMdwfuT9ChEhXpCuuXSH3RrbXb4V5\nMs6R8TOUL3dH7o4Q/W9VZFWECJFNkU1Ja5Bq3rXxfDXyVb2fu4/cHfmjyB9F7o7cHbk/cn+kJ9Kj\nH/tp5FN9/Nqx2n4iROoidZGeSE/S+aciU5GdkZ2R3khv5A8ifxC5O3J3pDXSqh/nirgiDZGGCBEi\ntZHalH3siOyItEfaIzsiOyJTkam46++L9BVlHgvpp9hkd1/cH4lEiEQiXZFImnuvVPzs7rsjfweR\nv4PIa33WmbtiI3+/rYWsh3UA8nKFlZhRQRDKglViLb0eD39x2214PZ68jtesZqBiNefmpgCYn7/M\n+PgAg4OPs337gXRdFI3EWNFyuQxniskbGvLzh8FL3OVs5w+79+Mt0riyjZUtd0wtJMyRh4LHYLS0\n7mc/u9hlGhtqjD017tPGs4Mdej9/xp/xA36gu+/uZCd99OnHGtdYy+j7OI/zS37Jec4zwAB+/Oxl\nr6l7sNE12IWLsOE/gCtcievDONZXeCXl9RdSWqey63v2Y+ZmvlQ4a6LW9a4utu6utLkTBKFcSMyo\nIAhLiiZCL1x4Xxdu1i/5kR1arKaKgFgEoKOjl+3bYzF7Q0N+Tp9+iYWFEG1tt9HTs69oIjExVvTQ\nod64eFarzHFinG2xxpVtrGw27Sot+i7b0jJf4ksECODCxVu8leTCGiTI/zN0K5uCa/mts4G/7v6P\nnPFcYhNPcoQXsortTSydkig8tZhUYzsvXj3G8yIXaaCBS1yK66MY15+JQvuxZix0thT2qQ8Fgxzz\n+9m6ezcebyVdtyAIxSDfmFERo4KwjLCKtTEdRiEClC3JTinQEt3MzExw5sxRWlo28+CDv4i7tsTr\nL6YYS0y0U+5ERqko1biyTTRk1i7x3tnu8eoVN/tIzv2Z7b3mHxpiJBikxumkv7s7bwt8sfDi5SIX\nAVjHOj7n86Q2xs/oW519/H3PXnqZ4wXcSW3NxJeZoDMTqMZ2Wl9P8iS72KX/W6y6rUtFNomgrIsP\n0n7qBUEQUiMJjISMSC0m61CqtVjK5DmpallmQkv409y8iY6OXkuIpGKth+aye999B+js7EsSohC7\nfoCWls1FKxViPL92zkyJjPJdw0JJN65C1iLbRENm7RLvHfMiL6Rsn4qRYJCjgQAHR0fxHzuW+0UV\nGRcuQLmj/pJfmrbRPqNvBW7gJ1t30wU8lUKI7mVvUkIks7qdibU9E9tprzvoiPu3koQolM7Nd2n+\nfmf61Asgv6WshqxH5SNiVBCWEaWoh5iKfIWvJkS+9rUjbN/+QtmFaClIJ4q6u/vp6NhJR0evqVhd\nqnHA0j68yGVc5SDx3ulH2YZew9xZMdt7rcapUjN0tbaye+vW4g46R/z4+QJfwIOHN3jDNMusHz9/\n1X2Jsc527t7yn3nQ4005ByOM6FbWJprSii8zgbocMRPdxeEHKMvlDihZlt9Mn3pBEITiI266grCM\nKEU9xFRY1QVUyJ5c17AS3MDzJdd7J9v2wVAI/7Fj7N66tewuukYX0g1sYD3r86o3qqG53jbRxDu8\ns0JqeZYLH+JCW3wqLTZcEKyMxIwKgrCkLKXwNSMXYVRqEVXM/os9VrP+nnvuS1y9GsBmc+By1VJb\nux63uyHj+UqVeKjYLGfRXAjGuE0PHo5zHIgXnWaxnanIJ9nP8v3xX+or24GqH9qFWC6Lhw+R+IJQ\nLCRmVMiI+NVbh+WwFuV2tczFxTSxbWKspHE98omjNPa/d+/GpONy6bPYrrNm/V29GiAcvsjc3AVm\nZs4yMXGc0dGDPPdc8tiNLIUbeKFrAeVzP84WP358+NjBDoI5ulzmcmxiW6MLaQMNQHJso7HNu4Pv\npu0/H9fbEdSP/4OA9VamEEp7ZYOD30VcaItPPlGyy+Hv93JC1qPyETEqCEJFkoswSmybTqyY7Usl\nirTtU1Mf6NtmZgJZ9VmM68oGs/7sdpXExmZzYLdX6W1nZ5PHbrz2rVt/mDYhUqrj8k2OlGreMgmy\npYydzocRRpKS/pTi2MS2RvGYKrax1LGd5UyRU8hDgMyU+srqUHY7EaLFRKJkBaH8iBhdQfh8vnIP\nQYgia1E4mTLFpmubKFaM62EmZFKJIm17KDSJ3e6JHl/H7OxUnADLRRzlcl3ZYNbfQw+9RW3tOlpa\nupifv6S3NRtfvNX3S1y+PM7hw4+WzMKbaS0gsyAr9hwWW8QUknE1l2PTtc1GdJbie6qcP/4LeQiQ\nmWJcmZ9USYpSr0XqY4TMeMld4svfb2sh61H5SMyoIAgrjnTxrmb7UiX6MW7ftm0/P/3p7YRC54D4\nmMpyx9emQht/c/Mm6uuvw+d7Kml8WptEzGJGjbGai4thxscHCkpwlWreEuMan+CJpFqXxcSY1Ked\ndj7io4LOkU+sZT7HFnKe5chSf25yx0fuEYz5HCMIglB8JGbUYpSrfl86xK/eOshalJfEeFfjepjF\nwqaytBm319d30NbWBcRb8oaG/Bw61Mvc3OWsxlbq7w4zt9t0ZXa6u/upqmoHwOVqBFJbeI3WUJer\nLi/rZKa1gOTyGaW1eMUsjAABAgWfI19XWD9+eunlMtl9lgp1uS3291S5/y4u9ecmd1K7+qZeC6kN\nutTI329rIetR+TjLPYDlivajDODYMb8lsk6+994PuHTpLyTD5AqikrKKWnmsmijKtL27uz/Jkpfr\nd0GpvzuM/f/0p7frAjoVHo+Xb33rI44d83PHHU8yPLwrpYXX6FZrZmUtFprI0rhz6CS3BaHa2cCf\ndD8J0QoqxfpM9dPPRjYSIJCXa22x0MQTKGGaruxKOvz4kyyC/qEhRoJBapxO+ru7S1KGJtfPttk4\nCyHxc1OIu3Rp6Ee53e4me8fRfI4RBEGwDmIZLRFWTKCxYcNlS2eYXEksVYyD1bOKGkk31lJbVIq1\nHmaWPO27wCyW1IzE745Crt3sWON4QqFzWX02tOuqr+9Im0G5GLGa+axFV7CDGwOwfvQS7x/bpW8v\n1uffi5eP+Mg04c9SYiaetDX+r69ey7bQXWniWmOxhSN8mGQRHAkGORoIcHB0FP+xY0Dxv6dy/btY\nastlqiRO5SN1BGPqtcgn6lEoBIlRtBayHpWPiNESUewEGsXAigJZKC2VtObpxmoUFc8/f6vlXODT\noSxybczPX2Z8fCCjKEr87ihEUJkdq/W/atUWILfPhpm4NW4DylLup8qpypQkXksxP//5urwW80GK\nmXjS1tg7OsqGY8fTCLdY6ZEaTgLxorbGqRylulpb2b11a0HjTEWufxdLbbksdeZgYSmRRE6CUKlI\nAqMVxM9//jJ2+48tl0RlJTI4OLgkT/OsmjjHjHRj1ZLoeDytLCzM6RlgzZLo5EM265HO5dNsn3Fb\nJBJmbCy/ZD6pkidlckEdGvJz6tR+5uamaG7eREvLzUxPn9bbAzl/Nl580ae7WWpzb7atEPK5N4yf\nneHhJ/R52br1h2ndipeCbOcnH5dU4xoHmxv4i69d4nc8XbpYje8zjJcBoIsg+/GzKy6xUTAUwn/s\nGLu3btVddJfqeyoVkoApRrnXwvr4WKpETrIW1kLWwzpIAiMhI253XVmsFkL5SJUAZqnJxjqUbqya\nRaWx8UZdiLrdTUtq7c21Nqlxm9NZm7enRHV1Gx5Pa9JxmSymweAIc3NTANTXX8f09Om49vl8Nsws\njdo2j6eVy5fHy2KxNl6LcV6Gh3eV/fOfrXU2H5dU4xp/uf4uHvTEW03j+6xDKz3ipSPJIuj1eNjb\n01OSWNF8EculkD2SyEkQKhURoysIeXJkHVbaWhQau6eJDbdbuWO63U08/PA7RRMZ2axHOlGRTqSp\nZD576OnZy/DwEzm7bI6O/pxQaJKxsQEGB7+T1XgS97vdDVy48D4Azc2b8hbxZm6WxgcFExPHC47P\nLPTeSDUv5crk2t3dz4XODfzwAQ/f8Dyask5pPi6pxmvd5nsmSbjF9/kUucYWrrTvKStT/LVYbm6t\nS1fBVu4LayHrUfmIm64gCCUnlatprpTK7TibrKu51iZNdB09ffolZmbOAQuActl0u70Zz7tnT7Nu\n/ero2Mn27Qeymgvj/kOHenVX0Y6OXqqr24qeubhYa1wIfvx8EvqQO4+d5H/d+iarPB36vmK7E2vn\ny8a11lintI8+0yy4rw09xq+Dr7LRuYnt3fuymr9MnwFxcxVS40PqkwqCUEzETVfIiNRisg4rbS2K\nldCrVG7Hx479KmOCpHTnNtuX6Do6MxNAE6I2m5M77ngyK4txa+ttgLJo+nx7shpP4v5EK+nJk3uL\nnmW5WGtcyL0xwggDnuP83z0B/tizK25fKZJ5Zetam43VcyZ4mqbAJIHRzEmuNDJ9BqxWZzQ/lpsF\nLz/UWhRzLsStNV+scV8IGrIelY/UGRUEoeSkqtNpFRwOFSfX2tqF3e6Jq4WYznqZbR1LTQhpRCLz\nDA/vykogbdu2L876lU/tTGP900OHegmHLwL5xd2mOr8V1jid6DOrAVvK8xnppz+jhbKSMl8vLVoW\nYFBizLrfI6Wn0LnwR/uoAX4I7ELqkwqCUG7ETVcQhGVFPmLN6O54+PCjce6mRhfXRPfOp59eE7V4\nKvfX7dtfSNn/4ODjnD37BrOzE3rfgGkW2FTZcc1cfbXxZHPdQ0N+Rkb+kcXFOWw2Jw899DYtLTdn\nnB8jpXB3LRbFdEvNxgW3mOerpMzXS8sO4CDKglf6eMBcMf+cGEVfP8Ubc6Fz4UNccwVBKBX5uumK\nZVQQhGWF5voK8PTTa2lruxWXq8G05Iq2bXj4Ca5eneDw4UeTyoGks1gtLIQM71I/dPN4vFRXrwLA\nZnPhctXq2zUxZxy3mUVWCdGA3meiVTPxeDORGAyOsLg4p0Ybmeedd76Xs5i0sgVPc0stBpoLLijB\nYdavFy9/PORlKNhbcPytFSzL1qQfJe6sacEz/5yUyppb6FyIa64gCNZDYkZXEOJXbx1kLUqH0SV2\ncXGGs2ePpyy5om0zxowmlgNJFwvpdFYD4HLVc+ed/z3tuILBEWZnJ4hEwpw5czQuLnBoyK9nu/V4\nWrh8eZxTp/bHjdMofG02Z1I24UuXTkbH0sAddzyZcW5aWjbnJSaLFRuaCqvcG9m64J4+/ZK+ToOD\njy/R6JYGa6yFl1yzAJvhx48PHzvYkTKjcT6Yf06KL/rUWhQ6F0uXcXY5Y437QtCQ9ah8xDIqCMKy\noru7n+ee28jsbACXq4Fw+FLKkivaNmPMaGI5EM06aUZ9/QauXh0nHJ7WRazxmOrqNkZHfx4VkjHP\nlcTyKsZ6kZEITEwc1/e1tnbhcFSjWV5drkYeeeQ96us74sYYDl8GIBy+pI/FbG6UYLLh8z2Vl5hc\nKRa8bOI8IdE6vhSRL0KuaBbLi1yMe18MzD8n2VgwS+XKmw5NzAqCIFgHiRkVBGFJySemM1e0+Ls7\n7niSl1++h5qatbqrLpC2DItxPMb4yOrqdr75zY/i9mvlTJzOOlat2sK2bfviYkw9nlZCoUnDyJxU\nVTWzdu29XL16Rp+D/ftv4sqVUVyuRpzOamZmArhc9bS3b+Xee59JKM0SK++SOEYgbiwSe1h6Xnll\nG2NjA7S0bObBB38hc25BjKV1mmjiFKcsUOrGh8RvCoKwnMg3ZlTEqCAIS8pSJ8DJ53yaYJ6a+iBO\nTNbVbaCubr0uIgGeffYGQqFzev9zc5cZHT0I2LHZHEQi4aT+PZ62uGPGx4eYnT0LqFhQzUqqjTdd\nDU9tX3PzJq5c+ZxQ6DygrLa1tetLIvqX4oFCpSCJh6zPDnZwkIM00cQ7vEMHHZkPKhmaRfQDYJKl\nS8xUDkusIAgrCakzKmRE/Oqtw0pei6VOgJPN+QYHBxka8uv1RaemPiQQOEooNIndHnPhralZq8cH\nPvvs9Rw+/CgtLbfE9V9d3RbtdTEqRNXXrMtVHx1PHbCovw4EjjM7GxO8drsrabxmcZpDQ35+9KMa\nRkd/js3mor6+k0hE9dvcvClurLnUEjXOw5Ejj5nWXM2mPmq+VNq9Uarat1bAumuRW73Nfvrpo49T\nnCqzEIVYcqNJYB3ZCtHC10I770FIUxNXyIx174uViaxH5SMxo4IgLCmlqPdoRCuBEgpdwOGopqVl\nEx0dvRljJI3ZaKur2wElCLdt269n1z18+FFAichQaJLR0YN0dPTS2dmHw1HNoUO9eiIihY3W1s1c\nuTLGjh2HePnlbkKhSebnLwN25ucvR18r3G4va9fey9jYAB6Pl9df/1Omp08zPX2S2toODh9+VLdE\nBoMjLCzMABCJLPD55y/rmXLr669jcvItQMWY3nHHk1lbM43zYLTg/vjHq1iz5m62bdtn6Yy6S4XV\nrMP+oSFGgkFqnE76u7vxejxlHU9pyS1bbTGzLBeOMblRqSyiZlZQyaQrCII1ETddQRBKzlL+cE+M\noYTs3HONrrANDV9kbOwwLS23xMVeai6ZodAUY2MDenxmbe1afvvbf4pzybXbXTQ338zk5Nv6GDQX\n3tbWLi5e/Jhw+KLe3mZzsn791xkfP6xvt9lcSW6+dXUd1NVdF+dCbLM5cDrrCIcv4nY34/VuZGrq\nN3o/Dkc1NpuL+flLgBLb69bdFyd03W4VU6vVWXU667Db3czNXUiay61bd5fsgYL2WflX50le6+7A\n5WlIWeeznFit3qrvxRc5GlClf/o6O9nb01PW8WSisO8Ea9ceTU+Q0peq8ZEcj7oU5xUEYSUjMaOC\nIFiWpfzhrolKDZerkUcf/TTux65mPV1YCNHaehvbtu0DVGIjh6OaTz/9mS7k6us3MD8/k9TWGCtq\nt3tYXIxlVa2pWcs11/Rw+vRLzM1N6clttHNs3bqbgYE+xsYGUG68yr3WaImMx9imRY8LBaiqWkV9\n/QbOnRs2HUsqkpMrxYRmqmtrbt7E1752JEk4FPNhg/Gz8lYn/H0P9NGX1rJVDitlujjecrDj1Vc5\nODpKV2srrz3wgOUto4V9J1hJWBUai5nr8dm0r2SxLghCpSIxo0JGxK/eOqy0tVhKt87u7n6qqlYB\nyu31kUfeSxIKweAIMzMB5uamGB8f4G/+pleP/ZuePq0LUbe7iZqatXFtBwcfx+Px0tbWpV+T5h6r\nrrWOvr4PmJ4+rScimp7+THfx1eILa2rWYrM50USm292kx58mfpd7PM2Aqg3a2ro5bt/cXFCvMarK\nwFQZ9pr/TVDt3HHbtHIzidf2rW+doKOjl46OnaZCVJtPYwypMe7UGGuaDR9+qFyPp1ob+cnWzHU+\nzc6/FJS63mqu9Hd309fZWVQhWsrvqcK+E4pTe7Q4FBqLmd3xsbXIpr3UEy0lK+3vt9WR9ah8RIwK\nglBylvKHu8fj5VvfOkFnZx+/93uf6PU4jRjrhjY3b+Kmm/5MF1BTUx8AYLe7sdmcTEwMxx179uwb\nhEJBqqvb8HjacLu9GL9K5+dnOXz4URwOlYjIZnMyN3ee0dGD0RqfilOn9hGJzGsj4uGH36G2di0e\nT2tUpKIf/+CDh+ns7OPBB39BT88+bLaYkFxcnCMUmsTprMFu92C3a8fa0WqTGrHbPWzbtp+6ug1x\n2+vrr9PXxrhe9fUdbN/+Atu3H0i5donCIhdxmChcb731/6Kzs4/vPPAeD3r6eI3XMrroliOG1WqJ\ni7weD3t7eixvEdWwmpjPn0JjMXM9XmvfCoxjnsTJSmJdEAQhPeKmKwjCiiMUCjI4+B0ggs+3B4/H\nG+c2WFu7jrq6Ds6ePW56fGdnH1evTujtY7GdNjQB6HY3Mz8/E7WaLgDQ0dFLdXVbXJIg7fhrrulh\nbu4SExPJ57TZnLjdXh566C3q6zuYnj7Nc8/doFtk3e4mHA43MzNnE8ZjTnwJmtTut9mSWN4kFxfW\nYrhwF1pexY+fEUaoocaS8alCIlYqU1Koy3Cux2vtxwHtu0LqlAqCUH4kZlQQBKEANAGlJSWy2YjG\ndCo0gacJLGOin5aWW5ie/oRItHoX6QAAIABJREFUZJGZmYBp/263l9/7vU84dKg3KcGSRlXVKmZn\nJ3A6G/RkQ0Zqa9fx+7//edx4bTYHVVVtzMycQxO92ljdbi+rV9/F6OghACKRMB5PK42NN+J0VuNw\nuLHbXbogLxa5iEMrxF768HE0mvAlU3yqYAV8aAl6/GxghPUr8EFCrnGhZgLeSqJeEIRKR2JGhYyI\nX711kLUojHxiElMdMzTk5/vf3xQVby3Mz19mfHwAp7M2zl02EglTW7uOpqYvc+hQL5FIGJvNzfz8\nZc6ePY7d7oqrFxqPg+ZmFQ9qdBFWxL637XYXHk8bbW2bcbma4lvZHMzPh9izp5lXXtnG1q0/xONp\nIxJZiArgBf1cmlV0zZq7CYeniUTC+vgbG29kYuI44+MDuFy1ad1v88XowppprRLdNctxb9REXR+z\niU9dSVjje8qspmjMtXWEtRzlKAc5iN+S9TNzq4maiuS1yDUu1CzWtFi1R4tzjZWCNe4LQUPWo/IR\nMSoIRaSQxC1C9mQTk2hci2efvYF//df/z/SYYHCECxfeY2xsALtdxXm2tnbh8+2JxoMqnM5aGhu/\nxMWLJwkEjkatpjGvj8uXPzPEgCayQCBwlH37bmJu7hLxDw5jfVy9epZQ6Bxnzhxlfv5KXA+RyAKh\n0Dnm5lRZmRde2GJIeKSw2VysWXNX3DUY4ykfeeRfcLsbotdTx+zsVNzn1I8fHz52sINgkX5UZlqr\nbGIvS31f9dNPH9nFpwpLjZlgigmxGtTn2boPEool+BLJNS7ULDbVbFsuwlJru5/SXKMgCCsBcdMV\nhCJitdqDy5VUrp3GEh+p4i/ByWOPndOtdqdO7dfLr9x33wu8/PI91NSsxeVq4I47/pIDB7awsBAG\nlNDUXGBbWjZz9eqZlG65xcBmcxOJzKXZ78Lj8TI7ew63u4mHH36Ht976cz777KBeIxWIc5kNhYJx\npVu0z+nQkJ+jwb1MOC/yo2540JPaXTWXGMtiuOHKfbWSSe+OGiSIHz+72W3RBwlWKbOSGJv6JeAM\nMAv8M3BztJ2P5BqlqTC2hfJfoyAI5SRfN11n5iaCIGRLObJ6rkS6u/tNYxKNiYGqqtpNj73mmnv0\nY4LBEb38Sl3deurrO6itXa/34XbX0db2u3ExnpoLrLKELlAcYnVEjaxb18Pk5K8NgteGzebUx6Bc\nhR16fdDh4Sf0Gqnj4wM8++z1LCzMMD8f4pNPXmD16q+yffsB2tq6dIGofU6DwRFWBy6yGvjfjjXx\nH3pSf35HGNFjLP3408ZYplqrXJD7aiXTT7oEP168Fo/x7QduBTzAo5QvNlOzpGqcArQkZ/cDY9HX\nuWT31dpuAq4DnkKEqCAIuSJuuisI8asvPdmWK5C1iJGPC2Yq106jaPnGN96kuloJUperDoDm5pvj\nrGpa+1OnGjh79g327GlmYuJNQFkdp6c/00u9JBIKnWdurnCX0erqNbrrbCJnzvwSr/dGamuvBRxA\nJClLbnPzTbjdXg4d6mVk5B/1GqlqjJNRd995IpF5AoGj/PSntzI5+TY2mwuXq1Zvq83FxdYm/s+t\n78RZmRLXKJcYy1xLoJjdG8unDEhlYY3vqUovU+IF1qMy3+bvxpp6LfKN1zR6rG0yvM4lFlVrewR4\nIYv22WLtGFRr3BeChqxH5SNiVBCKiNVqD1YCudSkzERifcxvfvMjOjv7eOSR39DZ2cfXvnY0bm1U\nrdBWIpEFZmcnmJubYnExBCir47lzw4RCk5Tyq3JxcT6lqJ2fvxSNH71KLEFRPE5nrT6HWqmXVDid\nDdTUrGV2doJIJMyZM0f1Odfm7k8eOMUqT3xt1sQ1WuoYS7mvhMqm0Fqk6TDGpG4ke/H21ei/XwGe\nMWzPRfyX6kFBqeJsBUGwIhIzKghCWSlVaQ9j/Gh3d79pv8ZYxHS4XF7C4fgfecpdVktYZKeqqoVQ\n6GLaGE9znMB8Qn/xVFW1MTt7Lmm7x9MSTap0Iiqa01Nd3c78/IxuPdXqiw4PP5F2rqxQfkUQKpdc\naonmWm5Fi0nVyLbmaKH1UTNRSNkYq8TZCoKQC1JnVBCEiiSXmpTZCEyNxKQ3brc36VhNZKWK2QR0\nd1bNeulw1LN27VYWFuYYHx9AfY1m/o5LJSijZ8HlaohzsTXicjXG7WtouJ6mpt9hZuacnqTJbncn\nWEad2Gy2JLfe2HW52bDhIa5ePcOFC+/rsbNmCYJyWSNBEArBR/YJhECJyo1AAGuJNx+5XYeRUgtl\nQRBKgdQZFTIifvXWodLXIt9SG2bH5eKCaXQX/elPb005hqEhPxcuvB/tv4UrV8Y5dWp/kjuwctNt\n48QJ8/PZbC4cjqo4N9qFhWnOnn2Ds2dfR4vjzIbUQhQgorsHK+wYv88TRer09KecO/cWwaCKZ21p\n2UxVVatx5Dz88Nt8+9sfU1u7jjVr7la9RkvXAEQic3zyyfMEAkd1IZoqQdBSuslW+r2RK1YuB7XS\n1sIamLv0pl4LL/ARsTjPJ8gt3jLX+Mxs22vXUQdMZdm3hrXjhOW+sBayHpWPZNMVBCFnjFlrjx3z\nZ11qw3jcs89eT1vb7RktnEaMCYquXPlc7+snP1mH3e7Ebnfx0ENvcfr0S7rAikQWOXs2VuLFKLim\np0/rJU4cjioWFkIkisv5+emkcWh9FwunsxabzcHCwmx0i7mVVmEnEglz9eqovmV6+nTcMddeez8f\nfPA3BIMjNDXdxNatP2R4eBeXLn3G5OSw3k6zmra0bKaubj0+356MLru5kos1uxjHVRr53kvCciV9\n9mBzjJlytXhLov3sNbw2c5tN1T4V2bbvB64HJoEBVEbh9SbnFwRhpSNuuoIg5Ey+MYTaccb4yI6O\nXrZvfyGr4zV3UYejmo8/foZYUh8Vdwlgt3twOKp0a2JV1SpmZydMBdfExJssLoZwuRq59tr7OXVq\nL0ZRt3r1V+OEbPFx4nbX5yVuHY46FhYu43TWMT9/Wd+urcmhQ726yKmr20Bd3Xqmpj4gFJrU57+5\neRP19dfh8z2lr2Gxa3rm299KqS0q8bj5UkhMYiWQ7/Wlirf0Ye42m2t8pta+DtgC7EtzjLGtk5h1\nNFe3XUEQKgFx0xUEYclIVWojk8uhdpzTWWfYmv3DKs1dVFkClRB1u71xpVEWF0M4HG5AWfy+8Y1f\nUVe3AYejhoWFOV5//U85eXJvNPusco0Nhy8yNjZAokVyauqjrMcGYLdX59Qe5rHb83NQWVwMUVXV\nTlvbbYBKRtTR0auvidGKXFOzlkDgKKHQJLW16/j2t38bzS58hO3bX4hbw3xreqZa+3z7Wym1RaVs\nTb4s94yr+V5fG6qm6cco0afFX74f3b8ZqEaJ02uBC0A7sB9zUZnoltsfPcdllMUz1dj8wCXAFW2r\nfSeYZRS2dikXQRBKi4jRFYT41VuHSl+LVDGEmcq0aMe1tXUBSkD5fHvi2mQTQ6cJFbe7iYcffpeH\nH/513P7FxQU6Onp58MFfUF/fQV3deiYmjjM6epDPPjuYFIMZCNxAa+smEpmbu5B+IhKIj/vMjlDo\nIjZbOkHqwOmsT9imXHVnZwNMTX1ER8fOJGFpFDmaWG9t7eKRR/6F+vqOlDGg+YqjVGufS39DQ36+\n//1NvPrqDrZu/eGKEGlWLltj7e+pUpZLyZVSiKn46zNfC7PzngZCwEViYnEEFbep7X8JJXRHgWFi\nyY92AI8Z+nwMZcHURPGtQC+xB4ja3H8JJWTbov0TPedxQEugtil6rJn1tbIeLFj7vlh5yHpUPhIz\nKghCXpjF9GVrzdq2bV/K7KyJMXRmWXC7u/uTjjdmnJ2bu4DD4dLdcaemVKIfj6c1qRan3e6hq+s/\n0939b3n66dXR/Q6qqlqZnT2b46zELKs2mwu3u4FQ6HzaI1KVgnE663C7G9i583V+9rM7mZ+fxums\nY3FxHrvdzfz8JQBmZydwONxJ86iJnKEhP+HwJaqr29m2bT8ejzdu7aqr25iePh03v/m4xKZa+1z6\nCwZHuHDhPUZH32N4eFfWx62U+FLBSD6xlaUi17jLbEi8vh8Af0G8267ZeWsMfWyOHv9o9H0dyhKq\n0YCyXtahYjsPoiyZmoBsQ4lagCZgreF8HmLW1ICh3V3A54ZxbAKuA54i9TpZ6cGCIAhLjcSMCoKQ\nM0NDfk6e3KuLv+rqdr75TeXSWmgJkMQYOmPsY6oSLQCvvLIt6mobq59pPFaJJacu4ozU1XWwsBBi\ndvY8kcg8VVWrCIevsrCQnLwoG6qrV+t9JZNdKZhrrtnGAw/8HIADB+7SS7h4PG160iUgY6yhMfbS\n42nD6fQQCl3S58HYn8fTGpdUKheRd+TIY3z22UFaWm5h27Z9ea1/vvGTKyW+VLAqS1EX00dyzKfZ\neYPA46jvmacM2/wo6+gAMYH418Auw3YjLag4/IvRPt4F/h3xNU2bgTuAX6CssRjaNpL9wwIp5SII\nywGJGRUEYckIBkfiXF1nZgIcO+bP2uVwaMjP00+vYc+eZl55ZVucO26iW2eixS2VO2hPzz46Onp1\nl1WPx8ulSycBcLka8Hpv0gWYy9UY5xp75coYMzOBaHbZCLOzZ7MSojabi5qaa5K2z8ycNRWidnsN\nbW2/m7FfiFkaAd3NVsXaKutrYoxopn6czjpCoXNcuTKqz4Pb7aWl5RbD/sm4ec3kdq25VD/zzLV8\n8skBQqFzjI8PmLbNhnxdhFdKfKlgVfqJlVYplZgysx6andcLHABeSNi2F5VsaAMwjhK2fxjtax8q\nbtRIkJi184rhfMbfmRdQ4rQm4bg7SV2excy1WGuba1kaQRCWAyJGVxDiV28dKn0ttB//mqDLVQQE\ngyPMzASYm5tibCxevHg8XtxuL4cO9ZrGDqZzB92+/QW2bz+gC5n6+g4AwuFLnD+v4krdbi+PPPIe\na9f6otdSx7/+q5kFUyP1Q75IJBwtB5Mdi4tXOXduOGm7zebm2mt30N6u6oEaY2mHhvzMzV3CZnMx\nP3+ZUOg8tbXrTJMPmaEJvFWrtkS3OPR9q1ffybZt++js7GP1arXfOK+ZRJ4mVuMFblPeglCt73dz\ntqpKEqDSUOnfU0uHmfAqbhzp4OB3UULSg3K7DaY4rxlaTOf1wCpggpg11E+sVqnxu27B8DqMsqQ2\nkezV0QXclrAtOf5eobkSa/Gh1xM/P5UROyr3hbWQ9ah8JGZUEMpIpca6aTGbd9zxJMPDu5LccjNd\nl9Hq19KyOUm8GONGX3hhC9/61kdxiXnMXIET4yBHR3/O7Oxk9Hyx8idudwNHjjzGxYsf4/G0oIUR\nuFyN2O1OkxjP+B9fDQ3XMzd3kdnZiailsvAwhPXr72f79gNxpWt+8pO10bqj8f23tnbR1PRlDh3q\n1ec3XW1QzVqt9T0zM8GZM0dpadnMvfc+k7TfOK+p5lpDW0ctXlcllHpnyT/H+ca5CkLpMMZzFqPG\nZl20j1xqiGoYYzqNHh/GzLo1QD0qhjQbGoB7gD3R9y3EYuaPAdtQMaY/R1lQI9H9RpGrxakmxrtK\n7KggrCQkZlQQyshyjXXLdF2hUJDBwccBW1yNSw0tdlDDrI9EwWuMD02Mq7TZPEQioai77sYk66QW\n8/r663/Kxx//OOP1qTqdEeJ/WKVrq1leHUnH2GxOVq26A5erQReWxnhcDZerkbVr78HneyopjvbM\nmWPMzAQA87qtxrnauvWHpg8Q8kETsKkeSgjCysUYz+lBZZaFwmpsJsaIPoESoe8Ty5Zr1n8bSvjV\nAG8Af476+dcA/BOxhEVaveZqYCbDWJqBk8SEbyPJQtZDLJY0FYnxrhI7KgiVSr4xo2IZFYQyslxj\n3TJd1/DwE4RCwTgLqZHu7n6ee24js7OBaCzjFKFQME7oJGbdNcZGJgu5GubmQoTDl7hw4b2k883N\nXYpaIjP9AFOYJyZK11YlLXK56giHL6MJUperAbvdzdmz6ofqs89+kbm5adMMuzabjTvv/Os4V2Wn\ns47Z2Snm52eNZ0w61jhXuWSpzYTRIrlcHqQIQoxMFsd0GLPhatlsC7X4JWbYNVpfU/XvB76Asoi+\nAdyMiikFWENMiIISogDmGb7juUDMxXeEZCF6M/AZqcVoS/Rf45xqbseCIKwkJGZ0BSF+9dZBW4vl\nGuuW6bqyqUeqXHNbmZ+/zNjYAM8+e31c7dFEwavcU9uYn78cV77FZnOhCbTm5k1R11ojdsbHO0yF\nqNvdTHEcSNT5w+GLOBxubDYXVVVtPPLI+0QiMUtpKHQ+ZamXubkgzz13I6FQMO5ax8cHdDHqdNZz\n553/PenYSnroId9T1kHWopAYRmM8Z74JjmJxp4ODL5McI2osn5KuhucwShR+L2GfmVB0k43Hh/pe\n3Af8PfGCWMMY7lBPTHx+BehAieDzqLjVjVRSwiK5L6yFrEflI2JUEMqIlQveF0Km68okjoaG/Ozd\nuzEuflPL9Do4+B0gWfB6PF7a2roAZXFU2IlEFpibUz90pqZ+Qyg0FXcup7OOK1fGTMZYx9ycFuuU\njDEbb/bYcThqiETCzM6e44UXfpfm5ptNWzY3b2Lt2p64bYuLIY4d8zM8/IRunW1u3kRrq8qIOz8/\nzfDwrqS+cnnooWXINQp/QViZFCuGMdtEQ4kYxfAPTPZrIvcI8dlzjWjX0IrKonstqhboDpT1MpFF\nk21mZAq/GiMmMKuBt6NjPYZKhmS0pAaALyJZdAVhZSIxo4IglBSzZEZmyXKMGGNOE+no2Mn27QdM\n9yXGMH722SHTuqKZsNs9rF69hTNnjGPIrj6ohtPZYHpum80VLSGjuPbaHbhctXz++SHCYdXe4ajB\n7a5nYSGEzeYgFFKi2OVq5JFH3uPIkcf0+eno6GVhIZRXfU4zlmscsyDkTrljGFPFiJ5EWRcbyOw+\nrF3DUVQWXSMuVKKhI2SOES2UVqAKNe63SO2+q8W8fgklUl3R9h0lHp8gCIUidUYFQbAkZi652VpO\nY++Va62x5IkZWr/19R309OzF4XBnNcZE193FxRCTk+8R/xWZvRD1er/C6tXJ9UQ9ntaka7Pb3fT0\n7NXrjzoctTgcbmZmzjI3FyQUOo/drq4jHL7I66//73H1U++886+prm7D42nD7Y6fT62e6z/8g4en\nnmpKqulqRiW59ApCacnXopkL6UrAJLr3apbSUVRCJM19OF0f2jWESSaMSn6UbQx8pp+MtSm2O1EJ\nlLRxpxKiXpTw9wO/RWUAngRuQKymgrB8ETG6ghC/euuw3NfC6OrpcLiA3MRNd3c/HR29XHvtDjo6\neunr+w2dnX187WtH0lr9El1MH3roLWpr1/Hww+9RV9dBfM42B2vW+Ojo6OXs2RuS+gqHg2TvshZP\nY+MXqalZm7R9bu5iXHKllpbNuN0NvPiiD5tNxaguLFzR3Yq1Ng5Hlf7+/Pl3dAtqOHyJ4eFdTE+f\nJhQ6x/h4fM1WrZ7r4uIc4XCQsbEBnn9+U1o33HLHMS/3e6OSkLVYCtLFpcbEsFoL7UFWY/RfzX04\nm9jWxFqgRPv7Jdk/aPOSPu/llRTbsxW7QZS77vvEx63OkVvcbnFrvCYi94W1kPWofAoRo0+iqiS/\nB/yU2LejIAgrnNOnX9KtoXa7O6W4SRWf6PF4qa5uIxy+wsJCCLe7MavYWqMV9umn23n++c00Nn6J\n99//b9H+jT+KFjh79nUWFkLccssuOjv7WLPmbgBcrvpoG0fSOWw2ta25+WaqqlaZjMLOmTNDTE2d\nSNpjdM+trm4nHL7Mxx8/QyBwlLGxAd0CqlFTs5YHH/wFbW23R8+5idradboYdbub2Lp1d0prplm2\n4rm5yxmTR7ndXg4d6i163KjEo+bPypm70goJ63Ey+m8D6mdVOjRL6XvEW0zTxbZq8wnxP9OqgK8D\nj6ESDGXDBbIXlvlyHng3+toFfDX6Ope43UISTwmCsNQUEjO6DTiMMh38ZXTbvzdpJzGjgrDC2LOn\nmbk5lSjIrO6lxtNPr0lZH9MYu6jVATUTs1o8anV1G6dPv8Tc3BQORy0LC7Gn9Ha7h8XF1PXubDYn\nbreXBx88zM9/3ktV1SomJ3+ti0e73c3i4hx2u4vm5pu5cmWM3t43qa/v4JVXtjE2NpBhRuwkWlk7\nOnYyPj6oW0ptNhff/vbHHDiwhZmZQFz8pzHG9vDhRxkdPYjb3cTDD79DfX1HUgyuNi8Ohwu73c3E\nxK8IhSZpbt5EVVUr4+MDaeNLSxU3KvGo+bNy5s5HLDtrITU5K4W7yK4GaboyM2axrVp7Yw1SF8o1\ndxMqTnQjKi4TVBbdefL1BikutwBnUZ+Fz1GC3QecIXOZHT+wH3XNm4FfpGkrCEIxKUed0dcMr4eB\nhwvoSxCEZURb222MjQ3Q0rIZn++plO0WFowCMf6hldGqNzMT4Ngxv/4DfGjIz+nTLzEzcw7Nncso\nOF2uGl2MNjdvYnr60zRi1E4kMk8oNMnPfvY/YbO5mJ7+xDCOOtrabsPtbmJ29pxeE/Sll+6hrm49\nDocLj2cVoVBichANG4k/8Lzer+B2NzI/HxPMzc03c+zYv6O3902Gh3fFJXcy1vPs7u5PSv5k3A/x\ndUU7O/v49rc/1o8B0iaPUtdcmrhRiUfNn5Uzd8XKYFspaJm/M12vsaaon3jR6o3+vxbl0upFubsO\nG9o4iMWNXodKhnTOsD9dbVEnqiTLu2naFBOtFvTzxMZ8mNh4E6/fyAgx8X0eVfImUcAWUj9WEIRi\nU6yY0f8FeLVIfQklQvzqrcNyX4uenn10dvbx4IO/iBM8ia6Gra0qjsmYmEhrE4mEdTfYxB/gWiyk\nJkTd7iaczmp9/8KCOra2dh0uVy02m/lXncfTisvVwIkTKoNtU9NNuqXS7fZis7mZn7/MmTNHCQSO\nEQye0MdTU7NWd69tb7+Tzs4+Vq/+alz/bneT7tYLYLdX4fG0UFvbzqVLJ/XyLG53E+fPv83o6EGG\nh3eldUk2S/6UOK+JwsV4TDblhEoVN5pNv8v93siXcsTylmct8q3JaQXycTHO5nr9DA6+HX29GXPR\nOoLKiLtAvKurlpxNi8HsQgngvWRXTxTgHmBNlm2LiSZEu1DWUoiVqNHmOHHOtYcZdajyMWbuuoW5\n8cp3lLWQ9ah8MllGXwPaTbb/R+Cl6Ov/hHqk1p+qk8cff5zrrrsOAK/Xy6ZNm/D5fEDsQyTv5f1K\neq9hlfGU4n1Pz96k/ceO/YoLF97jxhuVdc7t/lOmp8M89tgBPB6VpOP48V/R1qaejAeDX8Xh2Bi3\nH2JWohMnwOWq5T/9p3c4evQPOXJkAJvNzg03BAmHYWTESSQyyo03Atg5cUJZKNV7eP/9SVatugOH\n412am29iePgjwmG46aYm1q3bzsGDe/X2odB5TkTDQF2uj7HbnZw4AY2NN/DYY09x4MAWfvWrj/X2\nbreXVav+lnff/UtaW9+jqekmTpyIMDX1G268cYDq6nb9+NtuW8/Y2ACBwA1cd90foJHtfF+6pCyh\nJ07ARx/18sd/fIBjx/wsLv4Bb7zxbtHWr9D3b7zxLk7nd3UxZdb+3XfzG+9yf+/xeHE6v5v3eubz\n/t133y3T9e5d4vMV+r4fGEEJxsuo3X4GB79bpOsdAS6j3tbg85ndPzVof158vhrgDQYH/wSYxuf7\nNbCZwcEa4N/j8/0AuGhoT7S/VO9fA5w5tC/kvR2fzw7MMzi4AViLz/dydP9G4BI+3/Ho+14giM/3\nnuH9n+Hz/TMwaehfWZ3VfP0An+/9aPsvAn9gOP9gtL0v7XsN63z+VvZ7DauMZyW9f/fddwkG1YO3\nTz/9lHwptM7o48AfAd3AbIo2EjMqCBbFrAZoKft49dUdpvUwjX1EImHGxtLHNIZCQQYHHwds+HxP\nxcVVjo6+pmejtdmcuvVRuanFWwJaWjYTDk9z6dIn+r7a2nU88si/cOhQb8papxpaW4/Hy1NPeeMy\n5Wr1UI3xnPv23cTVq6O4XA18/evHeOed72XtOpuOVPMqCMJS4CPmQguxuqDFug8T642a9RsEfh9l\nEf03wCCqhMrNwKfAOmJ1SR+N9qeRWw3lpaMNZcXUrtdHbJ6bgFPErsU4N9p8bQbWA3tS9LETMK9Z\nLQhC7pSjzui/BXah7uZUQlQQBAtjVgO0lH2kcjU09uF01mZ0R/R4vGzffoDt219Iiptsa+sClNBs\nb9fcZu3EhKgNcODxtHHffS8kxJ26dHEZi1lNzqir9a+11Y7VcLubmZ2d5NVXd0Tfq+y08/OXAVWS\n5Z13vpeT62w6yl2ORRBWNtp3xWbUT6Jiuxj3k9mV1wu8AoyhEv0EULGTR1ElV4x1SfuJd3qLoLLr\n3oK1OIcSnWtQ38OaiHSgxPYTKFfcdlTSIm1utPn6BUpsGufMGJO8p2QjFwQhewoRo/8vyin/NeAd\n4G+LMiKhZCS6NAjlwyprUYykKLn0kUp0Gfvw+fbkLcyGhvyEwzNUVbVz330vcN99B/B42ohPIBQB\nFgiFzjE8vAu73aW737a1/a7eqrq6DY+nDbvdGM0Qe51YNuWaa3qw2VzRREphzp49zujoQZ599npO\nndpPIHBUt9hqc1Wsch2FilkrYZV7o9RUQqmWlbIWhZNO/BQDL8rlN9t+jd9Nm6L/QyxJkhdVma/d\nsP0MKgGSFQkQ/x2+APSgBOjx6P5dhv2x+qzJZCPs0yP3hbWQ9ah8ChGj1wMdqEeBm4HvFmVEgiAs\nGcWwqBWrj/r6DTgcHg4ffjTvH+fB4AgTE8eZnQ0wPLwLj8erW0rd7iba2+/W2zoctYRCUzz44GFd\ncJ49e1y37k5PnyYUOpeQhTdWY8/YFuDq1TNEImEWF0PMz09rZyEUmtTL3LS0bKajY6c+V0aL8N69\nGy0rSoTiUwyvBMEqpBM/S4Ef5X6qJfHpR2WR7QBqgQ+BloTxaYLUKMz6KazIwlJRg7Kaallzc8m8\nXO61EgQhkUJjRrNBYkbzISLlAAAWz0lEQVQFQciIsY6ix9NKW9vtcTGo2cSmarGTHk8rXu+NuFwN\nbN36Q71UCsDzz2/i8uVRNNfczs4+5uYuJ8Vcan2ZxZoC2GyOaNmX29m2bZ9e/9PYvqqqjdnZc4AD\nm83G6tVfZfv2A/rYY+dAH0ti/chixPUK1kPifIXi4cO8Nqtxu0YbSrz1Yy7ItgHGuslNxERfOalD\n5cpsAyZQmXbrUPGxz1AacelHSsAIQvaUI2ZUEAQhJ1K5Jg4N+blwQWU4VBbLySSLUTaWJM1K6/Xe\nqLvJGkuleDxe6uquw1gSZuvW3UnW3aEhP3Nzl7DZXJgLUReRyALh8EXGxwc4dsyv99Hefle0jZOF\nhTm9j0hknkDgaNzYu7v7qa5WrnKp3JzFgrY8kThfoXikqs2aGPdeh7IompU0+RJKbP3CsK0elRLE\nCtbSWZTwvI5YyZfLwG9QVuBcyulkS2ElYARByA4RoysI8au3Dit1LVIJq2BwRHdlXVxUxddbWjbH\nibNsYlO12EmXqyFlW60ft1uVbzl0qJe/+qvtcZlsNXffSET96Glu3qTXPHU663C7G/T+mps3xdXy\nrK/v0LP4hsMX9T6MbY3j/eY3P0orSooR11tJrJR7oxLifFfKWlQC6dciVRxkP8qSqD1QM2YWPwbc\nRUzEBYCLxMdmTqNql85TfuZRFtt/Tth+hZhgvJV4d+VCMRf5cl9YC1mPyscKj7sEQVghpBJW2nan\ns07POFtXtz7uh3p3d3/W5U/M2mrurg6Hi46Onfh8e/TyLefOwY9/vIo1a+5m27Z9+niamzdRX38d\nPt9TADz77PWEQpPMz1+mpmYtbW234/PtYXj4Cd2VNhy+ZCgnA83NN1Nbuw673YXPtydp7JooMWJ0\nzTW6GVtZuAiCUC60OEiz7V3ESp84gGGUOA1E/wdl9XOZHG9F5gB39F87SkCDuj4PMbdkP+Zzkgv9\n0X60pE+CIJQCiRkVBGHJMNbcNAorbfvs7BTj4+lrjObD0JCfkyf36nVAtdjMxJhNbd/WrbtNx5kq\nzs8Y71pd3c7MTACXq5H29q9y773PpL0Os5hQY39mcaRC7kjsrbAyCRITVDcBo9HtdSg3V60+50WU\npfSfgLtRFlInqoz8oRR9l6s+qQs1Ps3iW4XKBqzVHK0DtgD7iBeREgMqCKUk35hRsYwKglASzH78\na1ZAs309PXtTitV0fWZDMDiiC1EtThSUBXXv3o3MzCgLgZZhFzC1VobDl6iubmfbtv1x5zZafLdt\n25+TJVNzXQY4dsxPT8/eFeeauxSYzbMgLH+MVtMOYmL036Ay7WpWPy/weXRfKypJ0DzxMaSJlMvQ\nEDa8rkFlC9ayAV8PTKJcehOto1oMKCb7BEEoFxIzuoIQv3rrsBLWIl3iHeO+n/70Vj2pEZA2ji6x\nz2xrNRrjRB9++B29fy1m87PPVgNOFhauMDY2oI93aMjP00+vYc+eZk6e3MvZs8eZmVFlY7T9L77o\nY3ExTEdHLw888Br19R05xQKaCc9iJLephDqWZpTq3hCBnzsr4XuqUsh/LYxlX6qj27pQGWhTlTgJ\np3htJVqAdcDXgcdQ1wdwe/RfJ/BzVHZg7fsvVaKn3JD7wlrIelQ+IkYFQSgJxh//Dkd1nDAy7qup\nWZu1wEwUFNlmmq2ubouWi7kNt7sxbp/H441mtFVxnkbLaTA4wsxMgLm5Kd2y6vG0cvnyOK++uoML\nFz4kEDjK+PgADocrL+FoVmO1GMltJAtvPJK9VliZGDPC1mGe6CiR2wyvbwZ2Yr2fizcB9wPPE7u+\nFpQABfV9fhFlIf0iSqz+EHX9F1FZeduA00s5aEEQTJCYUUEQSoLR5VZLFATJMZlafU4tDjOxrdGd\nMhQK8vzzt1JTsxa3u4FIJMzYWOYY00wxmMaaomvW3MV99x1IqDUKTU030dDQyczMJBMTxwGoqmpn\ndjaQd4yr5nZ84cL7ejbhYsWISh1LQRCUCNMSGBlFqB/4CSoRkBd4G+XG60e5vX5MzILqRSUNspqV\nNNeYVa0Gq5dY4qN1xNyTBUEoBKkzKgiCpTBa9xItmsZ9iRardO6Uqk7oeiYmVA1Rp7M2K2tXJhdN\nFX/aCixw5kysFmh3dz91dR243S1UV6/G59ujl3Vpbe3iG994syBr2+nTLxEIHNWFaDFdSMUSKAjF\npTJd31OVfRlBlW1ZAM6jkhdp24+jYkZro8f4URZSq5FOiLoT3jcRc83VMgfXAL/M4jxGV+dKWXdB\nqBxEjK4gxK/eOqy0tUgnjBJdUjOJKKOw9Pn2ZOXOmqnPN954l7a22/V+NUGoxO91zM2dZ3xcxZIa\n+8o1PjSRhYWQ/rqmZm1RhWMl1LE0Y6XdG1ZG1iKecrq+578WWgKjxO+BmoTXd6EE1wfRbca4yhGU\n5VTjJpbGsS5fGoDNxAvSBZQoDwJvoSyiH6KswWYYBeiHxFyB/XJfWAxZj8pHxKggCEBpn/qnEkZm\n5xwefoKrVyf0+MlE8rH4ZSPMVFxpG253fJt0Vt1CaW1VsVnNzZvo6/sgZZ+VaZERhOXF8kqC1Y8S\nWmtRYusMSnBNooSa0ZKqCdfNqPjRIWCNoa96YFXph5ySRGF8CVVPVatJqm3TMux2oFxzUwlRiI+1\nPRndVljiI0EQzJGYUUEQgMxxlUt1znLV2Ex13kzlZgoh276l7qgglJ9SfheUn1SxpRCrVVqNSvhT\ng7IunlviMeZDEzAVfb0JOEJ29UWN87Ef2EWsDI4gCGZIzKggCAVRjqf+Zucsl/Uh1XlL6e6abd/L\nyyIjCJVJpbq+m+NHWTebUeVPtEyzmhA1uqmCcvU9TcxaWAoPjWx/kq6Kjqsq+r6e5BhRjSnAE22f\nrRCF+FjbDlKXwREEoVBEjK4gxK/eOlhxLcpR29LsnKVMvJNqfIODg2VL+JPNnKUa23J037XivbFS\nkbWwDmotCkmkk3jsCBBAibUBlOVvL/BEtN1+jHGSCmOdzjujrxtQNT3N+LLJtibgloRt9UA7sRqh\nmdiIyvKrlaCZJj6ZUaJhJgT8Oot+zQR48t8CuS+shaxH5SNiVBAEoDhP/XNN8GF2zlJaH9KNr1xW\nj2zmLNXYpJaoIKwkjHGMud7viccaExhtIj5Z0VFirq3GOEmjtfBA9PVplIBLZFX0WCPtwClUnCrE\nYlA/Q8WsNpv0Y+bxdxS4AThh2GYsOxNBZcx1GbYFUCI2nYgvZH4FQciXVI+zhGWIz+cr9xCEKMt1\nLfJ1J9XqbTqdNdEyK6URhKnGV871KMQFdzm67y7Xe6MSkbWwDmotjJbJXO93s2O/gxJue4hZALVk\nPS6gjnjLoJaZF5RYmwAeBf4B2IISfBoLwEuG967ouVaj3GurgFHgU5So3YcSuzcQH4tqlnOk1tBG\nqzV6c/T8E9FxX47u96Aso0T3+w3XkEim+fUDI/h8NSgBL267VkC+pyofSWAkCELRyDfBx1Il6AmF\ngjz//K3U1KzF7W4oqfDNZUz5JkVZ3glVBEGIR0sklE8inWyPvQtVZ9RIH8kCzoeyImr7d6NiUGdR\nTneLhrZOlGuusTxMIk5iFtYBYiLT2Fc7cAdwJdrGKDp3okS1n5jrcRfwReB5lOV0E0q0akmY+jFP\n1JRqjhKvWRLJCYIRSWAkZET86q3Dcl2LfF1dl8rCNzz8BKHQBSYmjse5tpZzPQpxD15eCVUUy/Xe\nqERkLayDWotUNUOzIdtjG6L/ugzvnzRpl2hF9BKL4dTEY23033lgLMN554kJzA3Rf419dQEfodyD\n96HE4BbDvj3ErlHb/xrK/Vdz4b2O+CRMia64meZIXfPg4A1IiRfrIN9TlY+IUUEQys5SJQ86ffol\nwuGLALjd3mXj2ioIglActLjQW6PvL6GSG6VqZywDczr6rx1oJRYz2gW8ibJeahlw64CWhD6dwF8D\n61FJiYj2vTPhPEbRuQEVc9qJygocJF5UGkXzUxTm6qxd839DXHQFoXiIm64gCCuGPXuamZtTiTmu\nvXYH99//SplHJAiCYEXS1R1NRaKLby/Kwmp0ezW26QVeR8V5auwE5qLnbgLeQZVWUfGaye61PmKu\nsxDvPusHPkTFwb4Z7acQV2dBENKRr5uuJDASBGHZkpgYqa3tNsbGBmhp2cy99z5T7uEJgrDiSCWq\nrEY/uYu2BsPrWlQ8576E47U2mqUSVOZdzZXWluLcWqZbovu80W1vGvr+CvHWzhFiwlcrXWNMwiQI\nghUQN90VhPjVWwdZi6UhsfRJT88+Ojv7ePDBX8S5A8t6WAdZC+sga1EK8isfsvRrkWt8qh/l0rsK\nZdHUkgwlXmOie+8TxGJLb0IJVLNzJ7rXvoSax5ChzRczHFMc5L6wFrIelY+IUUEQli2JiZGWY8If\nQRAqidIIpPKjWSEnUEIUzK8xUWiOEKv9+QVSi99EEXshYb+WWTeY5hhBEKyIxIwKglB2SlVnVEqf\nCIJgLZZrzKIWY6qxDvgXMl9jqtjUTO7MTcSEp4uYm6+UXBGEcpFvzKiIUUEQys5S1RkVBEEQciWb\nONcgsBEIkH3SI7MEQ9r2vcDF6PsNqAy7xvNvQ7kBb0Jl7tXqiooVVBDKhdQZFTIifvXWQdYinqWq\nM5qKpVyPoSE/L77o49VXdxAKBTMfsMKQe8M6yFpYh/KuRTZxrl5UHdBc3GI1194A8eVjRogJ0SZg\nrcn5tVqiR4ivK1osIepHZerdQbzrb7nXQkhE1qPyETEqCELZWao6o1YgMamSIAiCtck2zjXXpEep\n+tW2a6VdGkzaGc+V63mzIb9EU4Ig5I646QqCICwhr766g9HRg7S2dq0I8S0IQqVTqjjXVP0mbk93\n/lKVysmnzqogrGwkZlQQBKECkKRKgiAIuZBOcPqI1R8tZvKi5ZpoShBKh8SMChkRv3rrIGthLZZy\nPaS8THrk3rAOshbWYWWvRTqX2VKVyknt+ruy18J6yHpUPiJGBUEQBEEQBIuSTnBKLVFBqHTETVcQ\nBEEQBEGwKOIyKwiVgMSMCoIgCIIgCIIgCEuOxIwKGRG/eusga2EtZD2sg6yFdZC1sA6VvRapa3ZW\n4rkqey2WH7IelY+IUUEQBEEQBKFELGXNTqkPKgiVhrjpCoIgCIIgCCViKWt2Sn1QQSgXEjMqCIIg\nCIIgWIylTEBUzHOlq28qCEIiEjMqZET86q2DrIW1kPWwDrIW1kHWwjpU9lqkrtlp7XOZu/xW9los\nP2Q9Kh9nuQcgCIKw3Bga8hMMjuB01tDd3Y/HI0/UBUEQKot09U0FQSgW4qYrCIJQZF580UcgcBSA\nzs4+enr2lnlEgiBUBuIaah2kvqkg5EK+brpiGRUEQSgyTqd6ot7a2sXWrfJEXRCEbNFcQ0EJIXmQ\nVT40l19BEEqJxIyuIMSv3jrIWliLYq9Hd3c/nZ19PPDAa+KimyNyb1iH/7+9ewuR5CrjAP4XEwO6\nYgjqasyGxRsqglEhBi+wYCKJ4O0hj4II4oOgb0ZdwQcRZH0IiORRjEgU8RIUE3CVPIjiiphZL7gx\nBkeiJlHRlUgERdeHU2GGZrq7+lLVX9O/HwxT01Uzc5h/fdN9us45JYtNOHpoqCzqkEUt8th+OqMA\na3bFFVfmxhu/qiMKLOiuJLfGbUmAXWHOKAAAAEtzaxcAAAC2hs7oDjGuvg5Z1CKPOmRRhyzqkEUd\nsqhFHttPZxQAAIDRmTMKAADA0swZBQAAYGvojO4Q4+rrkEUt8qhDFnXIog5Z1CGLWuSx/XRGAQAA\nGJ05owAAACzNnFEAAAC2hs7oDjGuvg5Z1CKPOmRRhyzqkEUdsqhFHttPZxQAAIDRmTMKAADA0swZ\nBQAAYGvojO4Q4+rrkEUt8qhDFnXIog5Z1CGLWuSx/XRGAQAAGJ05owAAACzNnFEAAAC2hs7oDjGu\nvg5Z1CKPOmRRhyzqkEUdsqhFHttPZxQAAIDRmTMKAADA0swZBQAAYGvojO4Q4+rrkEUt8qhDFnXI\nog5Z1CGLWuSx/XRGAQAAGJ05owAAACzNnFEAAAC2hs7oDjGuvg5Z1CKPOmRRhyzqkEUdsqhFHttP\nZxQAAIDRmTMKAADA0swZBQAAYGus0hn9ZJLzSfaSfD/JibW0iMEYV1+HLGqRRx2yqEMWdciiDlnU\nIo/tt0pn9EySVyW5LsndST6xlhYxmL29vU03gY4sapFHHbKoQxZ1yKIOWdQij+23Smf08UPbx5L8\ndcW2MLCLFy9uugl0ZFGLPOqQRR2yqEMWdciiFnlsv8tW/P5PJXl3kieS3LB6cwAAANgF866Mnk3y\niyM+3tbtP53k2iRfSHL7ME1kXfb39zfdBDqyqEUedciiDlnUIYs6ZFGLPLbfum7tcm2Se5K88oh9\nv03yojX9HgAAAGp5KMmLF/2mVYbpviTJg932O5LcP+W4hRsFAAAA03wtbcjuXpKvJ3nuZpsDAAAA\nAAAAMKLPJPl1kvNJvpHkWVOO20/y87ThvT8ZpWW7p28WNye5kDbs+rZxmrZzbk3yqyT/TfKaGcft\nR10MrW8W6mIcV6UtlvebJN9NcuWU4/ajNobS51z/bLf/fJJXj9SuXTQvi1NJ/pFWB/cn+fhoLdst\nn0/yWNoIwGnUxHjm5XEq6mIsJ5Lcl/Y66pdJPjjluI3Wx005WKX3093HUX6X9iKE4fTJ4qlpi0yd\nTHJ52rDrl4/RuB3zsiQvTSvgWR0gdTG8Plmoi/GcSfLhbvu2eM4YW59z/a1pixQmyeuS/Hisxu2Y\nPlmcSvKtUVu1m96U9gJ6WudHTYxrXh6noi7G8rwk13Xbx5I8kBWfM+bd2mUZZ5P8r9s+l+SaGceu\nazVfjtYni+vTnvz2k/wnyVfSFqRivS6kXfnpQ10Mq08W6mI8b09yZ7d9Z5J3zjhWbaxfn3P9cEbn\n0q5eHx+pfbuk7/8ddTC8HyT5+4z9amJc8/JI1MVYHk17oyxJ/pk2AvPqiWMWqo8hOqOHvTcHPeNJ\nl5J8L8lPk7xv4HYwPYsXJHn40Nd/6B5jM9RFDepiPMfThl+l+zztCUttDKPPuX7UMbPeaGY5fbK4\nlOT1aUPf7knyinGaxgQ1UYu62IyTaVesz008vlB9LHtrl7Npl2knfSzJt7vt00n+neSuKT/jDUke\nSfKc7uddSHvng8WsmsWlgdq1i/pkMY+6WI9Vs1AX6zUtj9MTX1/K9L+92hhG33N98qqDGlm/Pn/T\nn6XN2XoiyS1J7k6bdsD41EQd6mJ8x9LurPKhtCukk3rXx7Kd0Zvm7H9P2njhN8845pHu81+SfDNt\neIoXFotbNYs/phXwk06kvYPB4uZl0Ye6WI9Vs1AX6zUrj8fSOqqPJnl+kj9POU5tDKPPuT55zDXd\nY6xXnyweP7R9b5I70uZS/23YpjFBTdSiLsZ1edptPb+U1vGftPH6uDlthaVnzzjm6Ume2W0/I8kP\nk7xl4Hbtoj5ZXJbkobRL7U+LhVqGdl+S107Zpy7GNSsLdTGeMzlYNfQjOXoBI7UxnD7n+uHFKG6I\nxVqG0ieL4zm44nB92vxShnEy/RYwUhPjOJnpeaiL8TwlyReT3D7jmI3Xx4NJfp+D5ZXv6B6/Osl3\nuu0Xpv2T3UtbFvijI7dxV/TJImlDGh5IWzhBFsN4V9r4+X+lXQG6t3tcXYyvTxaJuhjLVWlzQSdv\n7aI2xnPUuf7+7uNJn+v2n8/sFcFZzbwsPpBWA3tJfpT2Qo/1+3KSP6VNcXo4bd0NNbE58/JQF+N5\nY9riqHs56F/cEvUBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAi/g9WwoSRDa/NUgAA\nAABJRU5ErkJggg==\n", "text": [ - "" + "" ] } ], @@ -151,4 +155,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/examples/web_demo/app.py b/examples/web_demo/app.py index c667ea94c11..e456526fa55 100644 --- a/examples/web_demo/app.py +++ b/examples/web_demo/app.py @@ -10,12 +10,11 @@ import tornado.httpserver import numpy as np import pandas as pd -from PIL import Image +from PIL import Image as PILImage import cStringIO as StringIO import urllib -import exifutil - import caffe +import exifutil REPO_DIRNAME = os.path.abspath(os.path.dirname(__file__) + '/../..') UPLOAD_FOLDER = '/tmp/caffe_demos_uploads' @@ -81,7 +80,7 @@ def classify_upload(): def embed_image_html(image): """Creates an image embedded in HTML base64 format.""" - image_pil = Image.fromarray((255 * image).astype('uint8')) + image_pil = PILImage.fromarray((255 * image).astype('uint8')) image_pil = image_pil.resize((256, 256)) string_buf = StringIO.StringIO() image_pil.save(string_buf, format='png') @@ -115,18 +114,15 @@ class ImagenetClassifier(object): "File for {} is missing. Should be at: {}".format(key, val)) default_args['image_dim'] = 256 default_args['raw_scale'] = 255. + default_args['gpu_mode'] = False def __init__(self, model_def_file, pretrained_model_file, mean_file, raw_scale, class_labels_file, bet_file, image_dim, gpu_mode): logging.info('Loading net and associated files...') - if gpu_mode: - caffe.set_mode_gpu() - else: - caffe.set_mode_cpu() self.net = caffe.Classifier( model_def_file, pretrained_model_file, image_dims=(image_dim, image_dim), raw_scale=raw_scale, - mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0) + mean=np.load(mean_file), channel_swap=(2, 1, 0), gpu=gpu_mode ) with open(class_labels_file) as f: @@ -210,9 +206,8 @@ def start_from_terminal(app): opts, args = parser.parse_args() ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu}) - # Initialize classifier + warm start by forward for allocation + # Initialize classifier app.clf = ImagenetClassifier(**ImagenetClassifier.default_args) - app.clf.net.forward() if opts.debug: app.run(debug=True, host='0.0.0.0', port=opts.port) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 472cc1841f7..6ccbb8bf645 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -1,17 +1,11 @@ #ifndef CAFFE_BLOB_HPP_ #define CAFFE_BLOB_HPP_ -#include -#include -#include - #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" -const int kMaxBlobAxes = INT_MAX; - namespace caffe { /** @@ -21,20 +15,21 @@ namespace caffe { * * TODO(dox): more thorough description. */ -template +template class Blob { public: Blob() - : data_(), diff_(), count_(0), capacity_(0) {} - - /// @brief Deprecated; use Blob(const vector& shape). + : data_(), + diff_(), + num_(0), + channels_(0), + height_(0), + width_(0), + count_(0), + capacity_(0){ + } explicit Blob(const int num, const int channels, const int height, - const int width); - explicit Blob(const vector& shape); - - /// @brief Deprecated; use Reshape(const vector& shape). - void Reshape(const int num, const int channels, const int height, - const int width); + const int width, DeviceContext device_context); /** * @brief Change the dimensions of the blob, allocating new memory if * necessary. @@ -49,133 +44,35 @@ class Blob { * an error; either Net::Forward or Net::Reshape need to be called to * propagate the new input shape to higher layers. */ - void Reshape(const vector& shape); - void Reshape(const BlobShape& shape); - void ReshapeLike(const Blob& other); - inline string shape_string() const { - ostringstream stream; - for (int i = 0; i < shape_.size(); ++i) { - stream << shape_[i] << " "; - } - stream << "(" << count_ << ")"; - return stream.str(); + void Reshape(const int num, const int channels, const int height, + const int width, DeviceContext device_context); + void ReshapeLike(const Blob& other, DeviceContext device_context); + inline int num() const { + return num_; } - inline const vector& shape() const { return shape_; } - /** - * @brief Returns the dimension of the index-th axis (or the negative index-th - * axis from the end, if index is negative). - * - * @param index the axis index, which may be negative as it will be - * "canonicalized" using CanonicalAxisIndex. - * Dies on out of range index. - */ - inline int shape(int index) const { - return shape_[CanonicalAxisIndex(index)]; + inline int channels() const { + return channels_; } - inline int num_axes() const { return shape_.size(); } - inline int count() const { return count_; } - - /** - * @brief Compute the volume of a slice; i.e., the product of dimensions - * among a range of axes. - * - * @param start_axis The first axis to include in the slice. - * - * @param end_axis The first axis to exclude from the slice. - */ - inline int count(int start_axis, int end_axis) const { - CHECK_LE(start_axis, end_axis); - CHECK_GE(start_axis, 0); - CHECK_GE(end_axis, 0); - CHECK_LE(start_axis, num_axes()); - CHECK_LE(end_axis, num_axes()); - int count = 1; - for (int i = start_axis; i < end_axis; ++i) { - count *= shape(i); - } - return count; + inline int height() const { + return height_; } - /** - * @brief Compute the volume of a slice spanning from a particular first - * axis to the final axis. - * - * @param start_axis The first axis to include in the slice. - */ - inline int count(int start_axis) const { - return count(start_axis, num_axes()); - } - - /** - * @brief Returns the 'canonical' version of a (usually) user-specified axis, - * allowing for negative indexing (e.g., -1 for the last axis). - * - * @param index the axis index. - * If 0 <= index < num_axes(), return index. - * If -num_axes <= index <= -1, return (num_axes() - (-index)), - * e.g., the last axis index (num_axes() - 1) if index == -1, - * the second to last if index == -2, etc. - * Dies on out of range index. - */ - inline int CanonicalAxisIndex(int axis_index) const { - CHECK_GE(axis_index, -num_axes()) - << "axis " << axis_index << " out of range for " << num_axes() - << "-D Blob with shape " << shape_string(); - CHECK_LT(axis_index, num_axes()) - << "axis " << axis_index << " out of range for " << num_axes() - << "-D Blob with shape " << shape_string(); - if (axis_index < 0) { - return axis_index + num_axes(); - } - return axis_index; + inline int width() const { + return width_; } - - /// @brief Deprecated legacy shape accessor num: use shape(0) instead. - inline int num() const { return LegacyShape(0); } - /// @brief Deprecated legacy shape accessor channels: use shape(1) instead. - inline int channels() const { return LegacyShape(1); } - /// @brief Deprecated legacy shape accessor height: use shape(2) instead. - inline int height() const { return LegacyShape(2); } - /// @brief Deprecated legacy shape accessor width: use shape(3) instead. - inline int width() const { return LegacyShape(3); } - inline int LegacyShape(int index) const { - CHECK_LE(num_axes(), 4) - << "Cannot use legacy accessors on Blobs with > 4 axes."; - CHECK_LT(index, 4); - CHECK_GE(index, -4); - if (index >= num_axes() || index < -num_axes()) { - // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse - // indexing) -- this special case simulates the one-padding used to fill - // extraneous axes of legacy blobs. - return 1; - } - return shape(index); + inline int count() const { + return count_; } - - inline int offset(const int n, const int c = 0, const int h = 0, - const int w = 0) const { + inline int offset(const int n, const int c = 0, const int h = 0, const int w = + 0) const { CHECK_GE(n, 0); - CHECK_LE(n, num()); - CHECK_GE(channels(), 0); - CHECK_LE(c, channels()); - CHECK_GE(height(), 0); - CHECK_LE(h, height()); - CHECK_GE(width(), 0); - CHECK_LE(w, width()); - return ((n * channels() + c) * height() + h) * width() + w; - } - - inline int offset(const vector& indices) const { - CHECK_LE(indices.size(), num_axes()); - int offset = 0; - for (int i = 0; i < num_axes(); ++i) { - offset *= shape(i); - if (indices.size() > i) { - CHECK_GE(indices[i], 0); - CHECK_LT(indices[i], shape(i)); - offset += indices[i]; - } - } - return offset; + CHECK_LE(n, num_); + CHECK_GE(channels_, 0); + CHECK_LE(c, channels_); + CHECK_GE(height_, 0); + CHECK_LE(h, height_); + CHECK_GE(width_, 0); + CHECK_LE(w, width_); + return ((n * channels_ + c) * height_ + h) * width_ + w; } /** * @brief Copy from a source Blob. @@ -186,25 +83,17 @@ class Blob { * of other (and die otherwise); if true, Reshape this Blob to other's * shape if necessary */ - void CopyFrom(const Blob& source, bool copy_diff = false, - bool reshape = false); + void CopyFrom(const Blob& source, DeviceContext device_context, bool copy_diff = false, + bool reshape = false); inline Dtype data_at(const int n, const int c, const int h, - const int w) const { - return cpu_data()[offset(n, c, h, w)]; + const int w) const { + return *(cpu_data() + offset(n, c, h, w)); } inline Dtype diff_at(const int n, const int c, const int h, - const int w) const { - return cpu_diff()[offset(n, c, h, w)]; - } - - inline Dtype data_at(const vector& index) const { - return cpu_data()[offset(index)]; - } - - inline Dtype diff_at(const vector& index) const { - return cpu_diff()[offset(index)]; + const int w) const { + return *(cpu_diff() + offset(n, c, h, w)); } inline const shared_ptr& data() const { @@ -227,7 +116,7 @@ class Blob { Dtype* mutable_cpu_diff(); Dtype* mutable_gpu_diff(); void Update(); - void FromProto(const BlobProto& proto, bool reshape = true); + void FromProto(const BlobProto& proto, DeviceContext device_context); void ToProto(BlobProto* proto, bool write_diff = false) const; /// @brief Compute the sum of absolute values (L1 norm) of the data. @@ -246,7 +135,7 @@ class Blob { /** * @brief Set the data_ shared_ptr to point to the SyncedMemory holding the - * data_ of Blob other -- useful in Layer%s which simply perform a copy + * data_ of Blob other -- useful in Layer&s which simply perform a copy * in their Forward pass. * * This deallocates the SyncedMemory holding this Blob's data_, as @@ -255,7 +144,7 @@ class Blob { void ShareData(const Blob& other); /** * @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the - * diff_ of Blob other -- useful in Layer%s which simply perform a copy + * diff_ of Blob other -- useful in Layer&s which simply perform a copy * in their Forward pass. * * This deallocates the SyncedMemory holding this Blob's diff_, as @@ -263,18 +152,26 @@ class Blob { */ void ShareDiff(const Blob& other); - bool ShapeEquals(const BlobProto& other); + /** + * @brief Return the device context to which this blob and shared memory belongs + */ + DeviceContext device_context(); protected: shared_ptr data_; shared_ptr diff_; - vector shape_; + int num_; + int channels_; + int height_; + int width_; int count_; int capacity_; + DeviceContext device_context_; - DISABLE_COPY_AND_ASSIGN(Blob); -}; // class Blob +DISABLE_COPY_AND_ASSIGN(Blob); +}; +// class Blob -} // namespace caffe +}// namespace caffe #endif // CAFFE_BLOB_HPP_ diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp index 3c829f2f9b0..d7c7984697b 100644 --- a/include/caffe/caffe.hpp +++ b/include/caffe/caffe.hpp @@ -15,5 +15,6 @@ #include "caffe/util/benchmark.hpp" #include "caffe/util/io.hpp" #include "caffe/vision_layers.hpp" +#include "caffe/greentea/greentea.hpp" #endif // CAFFE_CAFFE_HPP_ diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 6cf80a37bc1..af29060c054 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -5,7 +5,6 @@ #include #include -#include #include #include // NOLINT(readability/streams) #include // NOLINT(readability/streams) @@ -18,6 +17,8 @@ #include "caffe/util/device_alternate.hpp" +#include "caffe/greentea/greentea.hpp" + // gflags 2.1 issue: namespace google was changed to gflags without warning. // Luckily we will be able to use GFLAGS_GFAGS_H_ to detect if it is version // 2.1. If yes, we will add a temporary solution to redirect the namespace. @@ -66,7 +67,7 @@ private:\ #define NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented Yet" // See PR #1236 -namespace cv { class Mat; } +namespace cv {class Mat;} namespace caffe { @@ -150,6 +151,22 @@ class Caffe { // Prints the current GPU status. static void DeviceQuery(); + // Get the default device + static DeviceContext& GetDefaultDeviceContext(); + + // Prints info about all devices + static void EnumerateDevices(); + // Prepares contexts for devices to use + static void SetDevices(std::vector device_ids); + + // Get a device context + static DeviceContext& GetDeviceContext(int id); + + // Get a device OpenCL program +#ifdef USE_GREENTEA + viennacl::ocl::program & GetDeviceProgram(int id); +#endif + protected: #ifndef CPU_ONLY cublasHandle_t cublas_handle_; @@ -160,6 +177,15 @@ class Caffe { Brew mode_; static shared_ptr singleton_; + vector device_contexts_; + + DeviceContext default_device_context_; + +#ifdef USE_GREENTEA + vector ocl_programs_; + viennacl::ocl::program default_ocl_program_; +#endif + private: // The private constructor to avoid duplicate instantiation. Caffe(); diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index cae1c3e4ee6..551d2706755 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -25,7 +25,7 @@ namespace caffe { * * NOTE: does not implement Backwards operation. */ -template +template class ArgMaxLayer : public Layer { public: /** @@ -37,15 +37,22 @@ class ArgMaxLayer : public Layer { * if set, output a vector of pairs (max_ind, max_val) for each image. */ explicit ArgMaxLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "ArgMax"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "ArgMax"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: /** @@ -60,10 +67,11 @@ class ArgMaxLayer : public Layer { * @f$ (for @f$ K = 1 @f$). */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /// @brief Not implemented (non-differentiable function) virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { NOT_IMPLEMENTED; } bool out_max_val_; @@ -74,19 +82,26 @@ class ArgMaxLayer : public Layer { * @brief Takes at least two Blob%s and concatenates them along either the num * or channel dimension, outputting the result. */ -template +template class ConcatLayer : public Layer { public: explicit ConcatLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Concat"; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Concat"; + } + virtual inline int MinBottomBlobs() const { + return 2; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: /** @@ -99,24 +114,24 @@ class ConcatLayer : public Layer { * - K @f$ (N \times C \times H \times W) @f$ * the inputs @f$ x_K @f$ * @param top output Blob vector (length 1) - * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or - * @f$ (N \times KC \times H \times W) @f$ if axis == 1: + * -# @f$ (KN \times C \times H \times W) @f$ if concat_dim == 0, or + * @f$ (N \times KC \times H \times W) @f$ if concat_dim == 1: * the concatenated output @f$ * y = [\begin{array}{cccc} x_1 & x_2 & ... & x_K \end{array}] * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the concatenate inputs. * * @param top output Blob vector (length 1), providing the error gradient with * respect to the outputs - * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or - * @f$ (N \times KC \times H \times W) @f$ if axis == 1: + * -# @f$ (KN \times C \times H \times W) @f$ if concat_dim == 0, or + * @f$ (N \times KC \times H \times W) @f$ if concat_dim == 1: * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ * with respect to concatenated outputs @f$ y @f$ * @param propagate_down see Layer::Backward. @@ -133,14 +148,19 @@ class ConcatLayer : public Layer { * @f$ */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); + Blob col_bob_; int count_; - int num_concats_; - int concat_input_size_; - int concat_axis_; + int num_; + int channels_; + int height_; + int width_; + int concat_dim_; }; /** @@ -149,29 +169,38 @@ class ConcatLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class EltwiseLayer : public Layer { public: explicit EltwiseLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Eltwise"; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Eltwise"; + } + virtual inline int MinBottomBlobs() const { + return 2; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); EltwiseParameter_EltwiseOp op_; vector coeffs_; @@ -190,17 +219,24 @@ class EltwiseLayer : public Layer { * and in Backward, the diff pointer of the bottom Blob to that of the top Blob * (see Blob::ShareDiff). */ -template +template class FlattenLayer : public Layer { public: explicit FlattenLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Flatten"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Flatten"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: /** @@ -212,7 +248,9 @@ class FlattenLayer : public Layer { * the outputs -- i.e., the (virtually) copied, flattened inputs */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the concatenate inputs. @@ -224,7 +262,13 @@ class FlattenLayer : public Layer { * gradient is (virtually) copied */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + int count_; }; /** @@ -233,29 +277,38 @@ class FlattenLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class InnerProductLayer : public Layer { public: explicit InnerProductLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "InnerProduct"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "InnerProduct"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); int M_; int K_; @@ -269,27 +322,36 @@ class InnerProductLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class MVNLayer : public Layer { public: explicit MVNLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "MVN"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "MVN"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); Blob mean_, variance_, temp_; @@ -301,29 +363,40 @@ class MVNLayer : public Layer { * @brief Ignores bottom blobs while producing no top blobs. (This is useful * to suppress outputs during testing.) */ -template +template class SilenceLayer : public Layer { public: explicit SilenceLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} + const vector*>& top) { + } - virtual inline const char* type() const { return "Silence"; } - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 0; } + virtual inline const char* type() const { + return "Silence"; + } + virtual inline int MinBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 0; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top) {} + const vector*>& top) { + } // We can't define Forward_gpu here, since STUB_GPU will provide // its own definition for CPU_ONLY mode. virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; /** @@ -331,31 +404,37 @@ class SilenceLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class SoftmaxLayer : public Layer { public: explicit SoftmaxLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Softmax"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Softmax"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); - int outer_num_; - int inner_num_; - int softmax_axis_; /// sum_multiplier is used to carry out sum using BLAS Blob sum_multiplier_; /// scale is an intermediate Blob to hold temporary results. @@ -369,25 +448,25 @@ class SoftmaxLayer : public Layer { */ template class CuDNNSoftmaxLayer : public SoftmaxLayer { - public: +public: explicit CuDNNSoftmaxLayer(const LayerParameter& param) - : SoftmaxLayer(param), handles_setup_(false) {} + : SoftmaxLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNSoftmaxLayer(); - protected: +protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensorDescriptor_t bottom_desc_; - cudnnTensorDescriptor_t top_desc_; + cudnnHandle_t handle_; + cudnnTensor4dDescriptor_t bottom_desc_; + cudnnTensor4dDescriptor_t top_desc_; }; #endif @@ -397,27 +476,36 @@ class CuDNNSoftmaxLayer : public SoftmaxLayer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class SplitLayer : public Layer { public: explicit SplitLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Split"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Split"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int MinTopBlobs() const { + return 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); int count_; }; @@ -428,34 +516,46 @@ class SplitLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class SliceLayer : public Layer { public: explicit SliceLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Slice"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 2; } + virtual inline const char* type() const { + return "Slice"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int MinTopBlobs() const { + return 2; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); + Blob col_bob_; int count_; - int num_slices_; - int slice_size_; - int slice_axis_; + int num_; + int channels_; + int height_; + int width_; + int slice_dim_; vector slice_point_; }; diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 2bb9d948169..5cd48d3a3f8 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -171,8 +171,6 @@ class HDF5DataLayer : public Layer { unsigned int current_file_; hsize_t current_row_; std::vector > > hdf_blobs_; - std::vector data_permutation_; - std::vector file_permutation_; }; /** @@ -193,8 +191,7 @@ class HDF5OutputLayer : public Layer { const vector*>& top) {} virtual inline const char* type() const { return "HDF5Output"; } - // TODO: no limit on the number of blobs - virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int MinBottomBlobs() const { return 1; } virtual inline int ExactNumTopBlobs() const { return 0; } inline std::string file_name() const { return file_name_; } @@ -208,13 +205,14 @@ class HDF5OutputLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - virtual void SaveBlobs(); + //virtual void SaveBlobs(); bool file_opened_; std::string file_name_; hid_t file_id_; Blob data_blob_; Blob label_blob_; + int current_batch_; }; /** diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 880356601a4..cbc1baa7036 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -16,7 +16,7 @@ namespace caffe { template class DataTransformer { public: - explicit DataTransformer(const TransformationParameter& param, Phase phase); + explicit DataTransformer(const TransformationParameter& param, Phase phase, DeviceContext device_context); virtual ~DataTransformer() {} /** @@ -107,6 +107,7 @@ class DataTransformer { Phase phase_; Blob data_mean_; vector mean_values_; + DeviceContext device_context_; }; } // namespace caffe diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index bb18e8e1e28..e483fe13c7e 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -21,7 +21,7 @@ class Filler { public: explicit Filler(const FillerParameter& param) : filler_param_(param) {} virtual ~Filler() {} - virtual void Fill(Blob* blob) = 0; + virtual void Fill(Blob* blob, DeviceContext &device_context) = 0; protected: FillerParameter filler_param_; }; // class Filler @@ -33,7 +33,7 @@ class ConstantFiller : public Filler { public: explicit ConstantFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob) { + virtual void Fill(Blob* blob, DeviceContext &device_context) { Dtype* data = blob->mutable_cpu_data(); const int count = blob->count(); const Dtype value = this->filler_param_.value(); @@ -52,7 +52,7 @@ class UniformFiller : public Filler { public: explicit UniformFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob) { + virtual void Fill(Blob* blob, DeviceContext &device_context) { CHECK(blob->count()); caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); @@ -67,7 +67,7 @@ class GaussianFiller : public Filler { public: explicit GaussianFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob) { + virtual void Fill(Blob* blob, DeviceContext &device_context) { Dtype* data = blob->mutable_cpu_data(); CHECK(blob->count()); caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), @@ -79,10 +79,11 @@ class GaussianFiller : public Filler { // These have num == channels == 1; width is number of inputs; height is // number of outputs. The 'sparse' variable specifies the mean number // of non-zero input weights for a given output. - CHECK_GE(blob->num_axes(), 1); - const int num_outputs = blob->shape(0); + CHECK_EQ(blob->num(), 1); + CHECK_EQ(blob->channels(), 1); + int num_outputs = blob->height(); Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); - rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); + rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int), device_context)); int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); for (int i = 0; i < blob->count(); ++i) { @@ -103,7 +104,7 @@ class PositiveUnitballFiller : public Filler { public: explicit PositiveUnitballFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob) { + virtual void Fill(Blob* blob, DeviceContext &device_context) { Dtype* data = blob->mutable_cpu_data(); DCHECK(blob->count()); caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); @@ -145,7 +146,7 @@ class XavierFiller : public Filler { public: explicit XavierFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob) { + virtual void Fill(Blob* blob, DeviceContext &device_context) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); Dtype scale = sqrt(Dtype(3) / fan_in); diff --git a/include/caffe/greentea/cl_kernels.hpp b/include/caffe/greentea/cl_kernels.hpp new file mode 100644 index 00000000000..c07c31db47c --- /dev/null +++ b/include/caffe/greentea/cl_kernels.hpp @@ -0,0 +1,13 @@ +// AUTOMATICALLY GENERATED FILE, DO NOT EDIT +#ifndef GREENTEA_CL_KERNELS_HPP_ +#define GREENTEA_CL_KERNELS_HPP_ +#include "caffe/greentea/greentea.hpp" +#include "viennacl/backend/opencl.hpp" +#include "viennacl/ocl/context.hpp" +#include "viennacl/ocl/device.hpp" +#include "viennacl/ocl/platform.hpp" +#include "viennacl/ocl/backend.hpp" +namespace caffe { +viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx); +} +#endif diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp new file mode 100644 index 00000000000..31426616f0a --- /dev/null +++ b/include/caffe/greentea/greentea.hpp @@ -0,0 +1,80 @@ +/* + * greentea.hpp + * + * Created on: Apr 5, 2015 + * Author: Fabian Tschopp + */ + +#ifndef CAFFE_GREENTEA_HPP_ +#define CAFFE_GREENTEA_HPP_ + +// Define ViennaCL/GreenTea flags +#ifdef USE_GREENTEA +#ifndef NDEBUG +#define NDEBUG +#endif + +#ifndef VIENNACL_WITH_OPENCL +#define VIENNACL_WITH_OPENCL +#endif + +#include "CL/cl.h" +#include "viennacl/ocl/context.hpp" +#include "viennacl/ocl/device.hpp" +#include "viennacl/ocl/platform.hpp" +#include "viennacl/ocl/backend.hpp" +#include "libviennacl/include/viennacl.hpp" +#include "viennacl/backend/opencl.hpp" +#include "viennacl/vector.hpp" +#endif + +namespace caffe { + +#ifdef USE_GREENTEA +template +cl_mem Subregion(cl_mem in, size_t off, size_t size); + +template +viennacl::vector WrapVector(cl_mem in); +#endif + +enum Backend { + BACKEND_CUDA, + BACKEND_OpenCL +}; + +class DeviceContext { + public: + DeviceContext(); + DeviceContext(int id, Backend backend); + Backend backend() const; + int id() const; + private: + int id_; + Backend backend_; +}; + +template +struct is_same { + static const bool value = false; +}; + +template +struct is_same { + static const bool value = true; +}; + +#ifdef USE_GREENTEA + +#define GREENTEA_BLAS_CHECK(condition) \ + ViennaCLStatus status = condition; \ + CHECK_EQ(status, ViennaCLSuccess) << "GreenTea ViennaCL BLAS ERROR"; + +// Macro to select the single (_s) or double (_d) precision kernel +#define CL_KERNEL_SELECT(kernel) is_same::value ? kernel "_s" : kernel "_d" + +#endif + +} + +#endif /* CAFFE_GREENTEA_HPP_ */ diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp new file mode 100644 index 00000000000..d428f453627 --- /dev/null +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -0,0 +1,53 @@ +/* + * greentea_im2col.hpp + * + * Created on: Apr 8, 2015 + * Author: fabian + */ + +#ifndef GREENTEA_IM2COL_HPP_ +#define GREENTEA_IM2COL_HPP_ + +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "viennacl/ocl/context.hpp" +#include "viennacl/ocl/device.hpp" +#include "viennacl/ocl/platform.hpp" +#include "viennacl/ocl/backend.hpp" +#include "viennacl/vector.hpp" + +namespace caffe { + +template +void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_im, const int channels, + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + cl_mem data_col); + +/*template + void im2col_gpu(const Dtype* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_col); + + template + void col2im_sk_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_im); + + template + void col2im_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im);*/ + +} + +#endif /* GREENTEA_IM2COL_HPP_ */ diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp new file mode 100644 index 00000000000..9d0329d112a --- /dev/null +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -0,0 +1,134 @@ +/* + * greentea_math_functions.hpp + * + * Created on: Apr 6, 2015 + * Author: fabian + */ + +#ifndef GREENTEA_MATH_FUNCTIONS_HPP_ +#define GREENTEA_MATH_FUNCTIONS_HPP_ + +// Define ViennaCL flags +#ifndef NDEBUG +#define NDEBUG +#endif + +#ifndef VIENNACL_WITH_OPENCL +#define VIENNACL_WITH_OPENCL +#endif + +#include "caffe/greentea/greentea.hpp" +#include "caffe/util/math_functions.hpp" +#include "viennacl/ocl/context.hpp" +#include "viennacl/ocl/device.hpp" +#include "viennacl/ocl/platform.hpp" +#include "viennacl/ocl/backend.hpp" +#include "viennacl/vector.hpp" + +namespace caffe { + +void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, viennacl::ocl::context &ctx); + +void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, viennacl::ocl::context &ctx); + + +template +void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); + +template +void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, + const int K, const Dtype alpha, const cl_mem A, + const cl_mem B, const Dtype beta, cl_mem C); + +/*template + void greentea_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, + const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, + Dtype* y); + + template + void greentea_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, + Dtype* Y); + + template + void greentea_gpu_axpby(const int N, const Dtype alpha, const Dtype* X, + const Dtype beta, Dtype* Y); + + + + template + void greentea_gpu_set(const int N, const Dtype alpha, Dtype *X); + + inline void greentea_gpu_memset(const size_t N, const int alpha, void* X) { + /* viennacl::m + #ifndef CPU_ONLY + CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn) + #else + NO_GPU; + #endif*/ +/*} + + template + void greentea_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X); + + template + void greentea_gpu_scal(const int N, const Dtype alpha, Dtype *X); + + template + void greentea_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); + + template + void greentea_gpu_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); + + template + void greentea_gpu_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); + + template + void greentea_gpu_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); + + template + void greentea_gpu_abs(const int n, const Dtype* a, Dtype* y); + + template + void greentea_gpu_exp(const int n, const Dtype* a, Dtype* y); + + template + void greentea_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); + + void greentea_gpu_rng_uniform(const int n, unsigned int* r); + + template + void greentea_gpu_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); + + template + void greentea_gpu_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, + Dtype* r); + + template + void greentea_gpu_rng_bernoulli(const int n, const Dtype p, int* r); + + template + void greentea_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out); + + template + uint32_t greentea_gpu_hamming_distance(const int n, const Dtype* x, + const Dtype* y); + + template + void greentea_gpu_asum(const int n, const Dtype* x, Dtype* y); + + template + void greentea_gpu_sign(const int n, const Dtype* x, Dtype* y); + + template + void greentea_gpu_sgnbit(const int n, const Dtype* x, Dtype* y); + + template + void greentea_gpu_fabs(const int n, const Dtype* x, Dtype* y); + + template + void greentea_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y);*/ + +} + +#endif /* GREENTEA_MATH_FUNCTIONS_HPP_ */ diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 2d13ef97c05..1a0d1bba737 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -11,19 +11,21 @@ #include "caffe/proto/caffe.pb.h" #include "caffe/util/device_alternate.hpp" +#include "caffe/greentea/greentea.hpp" + namespace caffe { /** * @brief An interface for the units of computation which can be composed into a * Net. * - * Layer%s must implement a Forward function, in which they take their input - * (bottom) Blob%s (if any) and compute their output Blob%s (if any). + * Layer&s must implement a Forward function, in which they take their input + * (bottom) Blob&s (if any) and compute their output Blob&s (if any). * They may also implement a Backward function, in which they compute the error - * gradients with respect to their input Blob%s, given the error gradients with - * their output Blob%s. + * gradients with respect to their input Blob&s, given the error gradients with + * their output Blob&s. */ -template +template class Layer { public: /** @@ -32,18 +34,21 @@ class Layer { * layer. */ explicit Layer(const LayerParameter& param) - : layer_param_(param) { - // Set phase and copy blobs (if there are any). - phase_ = param.phase(); - if (layer_param_.blobs_size() > 0) { - blobs_.resize(layer_param_.blobs_size()); - for (int i = 0; i < layer_param_.blobs_size(); ++i) { - blobs_[i].reset(new Blob()); - blobs_[i]->FromProto(layer_param_.blobs(i)); - } + : layer_param_(param) { + device_context_ = Caffe::GetDeviceContext(layer_param_.device()); + + // Set phase and copy blobs (if there are any). + phase_ = param.phase(); + if (layer_param_.blobs_size() > 0) { + blobs_.resize(layer_param_.blobs_size()); + for (int i = 0; i < layer_param_.blobs_size(); ++i) { + blobs_[i].reset(new Blob()); + blobs_[i]->FromProto(layer_param_.blobs(i), device_context_); } } - virtual ~Layer() {} + } + virtual ~Layer() { + } /** * @brief Implements common layer setup functionality. @@ -59,7 +64,7 @@ class Layer { * This method may not be overridden. */ void SetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { CheckBlobCounts(bottom, top); LayerSetUp(bottom, top); Reshape(bottom, top); @@ -83,7 +88,8 @@ class Layer { * adjust the top blob sizes. */ virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top) {} + const vector*>& top) { + } /** * @brief Adjust the shapes of top blobs and internal buffers to accomodate @@ -98,7 +104,7 @@ class Layer { * accomodate the bottom blobs. */ virtual void Reshape(const vector*>& bottom, - const vector*>& top) = 0; + const vector*>& top) = 0; /** * @brief Given the bottom blobs, compute the top blobs and the loss. @@ -118,7 +124,7 @@ class Layer { * Your layer should implement Forward_cpu and (optionally) Forward_gpu. */ inline Dtype Forward(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Given the top blob error gradients, compute the bottom blob error @@ -142,8 +148,8 @@ class Layer { * Your layer should implement Forward_cpu and (optionally) Forward_gpu. */ inline void Backward(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); /** * @brief Returns the vector of learnable parameter blobs. @@ -155,7 +161,9 @@ class Layer { /** * @brief Returns the layer parameter. */ - const LayerParameter& layer_param() const { return layer_param_; } + const LayerParameter& layer_param() const { + return layer_param_; + } /** * @brief Writes the layer parameter to a protocol buffer @@ -182,7 +190,9 @@ class Layer { /** * @brief Returns the layer type. */ - virtual inline const char* type() const { return ""; } + virtual inline const char* type() const { + return ""; + } /** * @brief Returns the exact number of bottom blobs required by the layer, @@ -191,7 +201,9 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some exact number of bottom blobs. */ - virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int ExactNumBottomBlobs() const { + return -1; + } /** * @brief Returns the minimum number of bottom blobs required by the layer, * or -1 if no minimum number is required. @@ -199,7 +211,9 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some minimum number of bottom blobs. */ - virtual inline int MinBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { + return -1; + } /** * @brief Returns the maximum number of bottom blobs required by the layer, * or -1 if no maximum number is required. @@ -207,7 +221,9 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some maximum number of bottom blobs. */ - virtual inline int MaxBottomBlobs() const { return -1; } + virtual inline int MaxBottomBlobs() const { + return -1; + } /** * @brief Returns the exact number of top blobs required by the layer, * or -1 if no exact number is required. @@ -215,7 +231,9 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some exact number of top blobs. */ - virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int ExactNumTopBlobs() const { + return -1; + } /** * @brief Returns the minimum number of top blobs required by the layer, * or -1 if no minimum number is required. @@ -223,7 +241,9 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some minimum number of top blobs. */ - virtual inline int MinTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { + return -1; + } /** * @brief Returns the maximum number of top blobs required by the layer, * or -1 if no maximum number is required. @@ -231,7 +251,9 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some maximum number of top blobs. */ - virtual inline int MaxTopBlobs() const { return -1; } + virtual inline int MaxTopBlobs() const { + return -1; + } /** * @brief Returns true if the layer requires an equal number of bottom and * top blobs. @@ -239,7 +261,9 @@ class Layer { * This method should be overridden to return true if your layer expects an * equal number of bottom and top blobs. */ - virtual inline bool EqualNumBottomTopBlobs() const { return false; } + virtual inline bool EqualNumBottomTopBlobs() const { + return false; + } /** * @brief Return whether "anonymous" top blobs are created automatically @@ -249,7 +273,9 @@ class Layer { * blobs to fulfill the requirement specified by ExactNumTopBlobs() or * MinTopBlobs(). */ - virtual inline bool AutoTopBlobs() const { return false; } + virtual inline bool AutoTopBlobs() const { + return false; + } /** * @brief Return whether to allow force_backward for a given bottom blob @@ -271,8 +297,9 @@ class Layer { * for all parameters, but possibly with wasteful computation. */ inline bool param_propagate_down(const int param_id) { - return (param_propagate_down_.size() > param_id) ? - param_propagate_down_[param_id] : false; + return + (param_propagate_down_.size() > param_id) ? + param_propagate_down_[param_id] : false; } /** * @brief Sets whether the layer should compute gradients w.r.t. a @@ -285,6 +312,12 @@ class Layer { param_propagate_down_[param_id] = value; } + /** + * @brief Returns the device context this layer runs on + */ + inline DeviceContext device_context() { + return device_context_; + } protected: /** The protobuf that stores the layer parameters */ @@ -300,17 +333,20 @@ class Layer { * the objective function. */ vector loss_; + /** Device context */ + DeviceContext device_context_; + /** @brief Using the CPU device, compute the layer output. */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top) = 0; + const vector*>& top) = 0; /** * @brief Using the GPU device, compute the layer output. * Fall back to Forward_cpu() if unavailable. */ virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // LOG(WARNING) << "Using CPU code as backup."; - return Forward_cpu(bottom, top); + Forward_cpu(bottom, top); } /** @@ -318,16 +354,16 @@ class Layer { * for the bottom blobs if propagate_down is true. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) = 0; + const vector& propagate_down, + const vector*>& bottom) = 0; /** * @brief Using the GPU device, compute the gradients for any parameters and * for the bottom blobs if propagate_down is true. * Fall back to Backward_cpu() if unavailable. */ virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { // LOG(WARNING) << "Using CPU code as backup."; Backward_cpu(top, propagate_down, bottom); } @@ -340,39 +376,38 @@ class Layer { virtual void CheckBlobCounts(const vector*>& bottom, const vector*>& top) { if (ExactNumBottomBlobs() >= 0) { - CHECK_EQ(ExactNumBottomBlobs(), bottom.size()) - << type() << " Layer takes " << ExactNumBottomBlobs() - << " bottom blob(s) as input."; + CHECK_EQ(ExactNumBottomBlobs(), bottom.size())<< type() << " Layer takes " << ExactNumBottomBlobs() + << " bottom blob(s) as input."; } if (MinBottomBlobs() >= 0) { CHECK_LE(MinBottomBlobs(), bottom.size()) - << type() << " Layer takes at least " << MinBottomBlobs() - << " bottom blob(s) as input."; + << type() << " Layer takes at least " << MinBottomBlobs() + << " bottom blob(s) as input."; } if (MaxBottomBlobs() >= 0) { CHECK_GE(MaxBottomBlobs(), bottom.size()) - << type() << " Layer takes at most " << MaxBottomBlobs() - << " bottom blob(s) as input."; + << type() << " Layer takes at most " << MaxBottomBlobs() + << " bottom blob(s) as input."; } if (ExactNumTopBlobs() >= 0) { CHECK_EQ(ExactNumTopBlobs(), top.size()) - << type() << " Layer produces " << ExactNumTopBlobs() - << " top blob(s) as output."; + << type() << " Layer produces " << ExactNumTopBlobs() + << " top blob(s) as output."; } if (MinTopBlobs() >= 0) { CHECK_LE(MinTopBlobs(), top.size()) - << type() << " Layer produces at least " << MinTopBlobs() - << " top blob(s) as output."; + << type() << " Layer produces at least " << MinTopBlobs() + << " top blob(s) as output."; } if (MaxTopBlobs() >= 0) { CHECK_GE(MaxTopBlobs(), top.size()) - << type() << " Layer produces at most " << MaxTopBlobs() - << " top blob(s) as output."; + << type() << " Layer produces at most " << MaxTopBlobs() + << " top blob(s) as output."; } if (EqualNumBottomTopBlobs()) { CHECK_EQ(bottom.size(), top.size()) - << type() << " Layer produces one top blob as output for each " - << "bottom blob input."; + << type() << " Layer produces one top blob as output for each " + << "bottom blob input."; } } @@ -384,10 +419,10 @@ class Layer { const int num_loss_weights = layer_param_.loss_weight_size(); if (num_loss_weights) { CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be " - "unspecified or specified once per top blob."; + "unspecified or specified once per top blob."; for (int top_id = 0; top_id < top.size(); ++top_id) { const Dtype loss_weight = layer_param_.loss_weight(top_id); - if (loss_weight == Dtype(0)) { continue; } + if (loss_weight == Dtype(0)) {continue;} this->set_loss(top_id, loss_weight); const int count = top[top_id]->count(); Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff(); @@ -402,59 +437,64 @@ class Layer { // Forward and backward wrappers. You should implement the cpu and // gpu specific implementations instead, and should not change these // functions. -template +template inline Dtype Layer::Forward(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { Dtype loss = 0; switch (Caffe::mode()) { - case Caffe::CPU: - Forward_cpu(bottom, top); - for (int top_id = 0; top_id < top.size(); ++top_id) { - if (!this->loss(top_id)) { continue; } - const int count = top[top_id]->count(); - const Dtype* data = top[top_id]->cpu_data(); - const Dtype* loss_weights = top[top_id]->cpu_diff(); - loss += caffe_cpu_dot(count, data, loss_weights); - } - break; - case Caffe::GPU: - Forward_gpu(bottom, top); + case Caffe::CPU: + Forward_cpu(bottom, top); + for (int top_id = 0; top_id < top.size(); ++top_id) { + if (!this->loss(top_id)) { + continue; + } + const int count = top[top_id]->count(); + const Dtype* data = top[top_id]->cpu_data(); + const Dtype* loss_weights = top[top_id]->cpu_diff(); + loss += caffe_cpu_dot(count, data, loss_weights); + } + break; + case Caffe::GPU: + Forward_gpu(bottom, top); #ifndef CPU_ONLY - for (int top_id = 0; top_id < top.size(); ++top_id) { - if (!this->loss(top_id)) { continue; } - const int count = top[top_id]->count(); - const Dtype* data = top[top_id]->gpu_data(); - const Dtype* loss_weights = top[top_id]->gpu_diff(); - Dtype blob_loss = 0; - caffe_gpu_dot(count, data, loss_weights, &blob_loss); - loss += blob_loss; - } + for (int top_id = 0; top_id < top.size(); ++top_id) { + if (!this->loss(top_id)) { + continue; + } + const int count = top[top_id]->count(); + const Dtype* data = top[top_id]->gpu_data(); + const Dtype* loss_weights = top[top_id]->gpu_diff(); + Dtype blob_loss = 0; + // TODO: Greentea backend here + caffe_gpu_dot(count, data, loss_weights, &blob_loss); + loss += blob_loss; + } #endif - break; - default: - LOG(FATAL) << "Unknown caffe mode."; - } + break; + default: + LOG(FATAL)<< "Unknown caffe mode."; + } return loss; } -template +template inline void Layer::Backward(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { switch (Caffe::mode()) { - case Caffe::CPU: - Backward_cpu(top, propagate_down, bottom); - break; - case Caffe::GPU: - Backward_gpu(top, propagate_down, bottom); - break; - default: - LOG(FATAL) << "Unknown caffe mode."; + case Caffe::CPU: + Backward_cpu(top, propagate_down, bottom); + break; + case Caffe::GPU: + Backward_gpu(top, propagate_down, bottom); + break; + default: + LOG(FATAL)<< "Unknown caffe mode."; + } } -} // Serialize LayerParameter to protocol buffer -template +template void Layer::ToProto(LayerParameter* param, bool write_diff) { param->Clear(); param->CopyFrom(layer_param_); diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index d3eecd2e510..1a80c7f6f05 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -19,7 +19,7 @@ const float kLOG_THRESHOLD = 1e-20; * @brief Computes the classification accuracy for a one-of-many * classification task. */ -template +template class AccuracyLayer : public Layer { public: /** @@ -31,15 +31,22 @@ class AccuracyLayer : public Layer { * correct if the correct label is among the top 5 predicted labels. */ explicit AccuracyLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Accuracy"; } - virtual inline int ExactNumBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Accuracy"; + } + virtual inline int ExactNumBottomBlobs() const { + return 2; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: /** @@ -67,25 +74,20 @@ class AccuracyLayer : public Layer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - + const vector*>& top); /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { for (int i = 0; i < propagate_down.size(); ++i) { - if (propagate_down[i]) { NOT_IMPLEMENTED; } + if (propagate_down[i]) { + NOT_IMPLEMENTED; + } } } - int label_axis_, outer_num_, inner_num_; - int top_k_; - - /// Whether to ignore instances with a certain label. - bool has_ignore_label_; - /// The label indicating that an instance should be ignored. - int ignore_label_; }; /** @@ -96,17 +98,20 @@ class AccuracyLayer : public Layer { * LossLayers are typically only capable of backpropagating to their first input * -- the predictions. */ -template +template class LossLayer : public Layer { public: explicit LossLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp( - const vector*>& bottom, const vector*>& top); - virtual void Reshape( - const vector*>& bottom, const vector*>& top); + : Layer(param) { + } + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumBottomBlobs() const { + return 2; + } /** * @brief For convenience and backwards compatibility, instruct the Net to @@ -114,8 +119,12 @@ class LossLayer : public Layer { * they output their singleton loss, (even if the user didn't specify * one in the prototxt, etc.). */ - virtual inline bool AutoTopBlobs() const { return true; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline bool AutoTopBlobs() const { + return true; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } /** * We usually cannot backpropagate to the labels; ignore force_backward for * these inputs. @@ -149,16 +158,22 @@ class LossLayer : public Layer { * d = \left| \left| a_n - b_n \right| \right|_2^2 @f$. * This can be used to train siamese networks. */ -template +template class ContrastiveLossLayer : public LossLayer { public: explicit ContrastiveLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} + : LossLayer(param), + diff_() { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 3; } - virtual inline const char* type() const { return "ContrastiveLoss"; } + virtual inline int ExactNumBottomBlobs() const { + return 3; + } + virtual inline const char* type() const { + return "ContrastiveLoss"; + } /** * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate * to the first two inputs. @@ -170,9 +185,9 @@ class ContrastiveLossLayer : public LossLayer { protected: /// @copydoc ContrastiveLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the Contrastive error gradient w.r.t. the inputs. @@ -200,9 +215,11 @@ class ContrastiveLossLayer : public LossLayer { * propagate_down[1] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); Blob diff_; // cached for backward pass Blob dist_sq_; // cached for backward pass @@ -236,15 +253,19 @@ class ContrastiveLossLayer : public LossLayer { * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve * linear least squares problems! We use it only as an instructive example.) */ -template +template class EuclideanLossLayer : public LossLayer { public: explicit EuclideanLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} + : LossLayer(param), + diff_() { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "EuclideanLoss"; } + virtual inline const char* type() const { + return "EuclideanLoss"; + } /** * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate * to both inputs -- override to return true and always allow force_backward. @@ -256,9 +277,9 @@ class EuclideanLossLayer : public LossLayer { protected: /// @copydoc EuclideanLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the Euclidean error gradient w.r.t. the inputs. @@ -294,9 +315,11 @@ class EuclideanLossLayer : public LossLayer { * @f$ if propagate_down[1] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); Blob diff_; }; @@ -344,18 +367,21 @@ class EuclideanLossLayer : public LossLayer { * outside the InnerProductLayer and no other losses outside the * HingeLossLayer). */ -template +template class HingeLossLayer : public LossLayer { public: explicit HingeLossLayer(const LayerParameter& param) - : LossLayer(param) {} + : LossLayer(param) { + } - virtual inline const char* type() const { return "HingeLoss"; } + virtual inline const char* type() const { + return "HingeLoss"; + } protected: /// @copydoc HingeLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the hinge loss error gradient w.r.t. the predictions. @@ -385,7 +411,8 @@ class HingeLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; /** @@ -420,29 +447,39 @@ class HingeLossLayer : public LossLayer { * \log(\hat{p}_{n,k}) * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. */ -template +template class InfogainLossLayer : public LossLayer { public: explicit InfogainLossLayer(const LayerParameter& param) - : LossLayer(param), infogain_() {} + : LossLayer(param), + infogain_() { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should // be the infogain matrix. (Otherwise the infogain matrix is loaded from a // file specified by LayerParameter.) - virtual inline int ExactNumBottomBlobs() const { return -1; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int MaxBottomBlobs() const { return 3; } + virtual inline int ExactNumBottomBlobs() const { + return -1; + } + virtual inline int MinBottomBlobs() const { + return 2; + } + virtual inline int MaxBottomBlobs() const { + return 3; + } - virtual inline const char* type() const { return "InfogainLoss"; } + virtual inline const char* type() const { + return "InfogainLoss"; + } protected: /// @copydoc InfogainLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the infogain loss error gradient w.r.t. the predictions. @@ -477,7 +514,8 @@ class InfogainLossLayer : public LossLayer { * gradient computation is not implemented. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); Blob infogain_; }; @@ -511,20 +549,23 @@ class InfogainLossLayer : public LossLayer { * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) * @f$ */ -template +template class MultinomialLogisticLossLayer : public LossLayer { public: explicit MultinomialLogisticLossLayer(const LayerParameter& param) - : LossLayer(param) {} + : LossLayer(param) { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "MultinomialLogisticLoss"; } + virtual inline const char* type() const { + return "MultinomialLogisticLoss"; + } protected: /// @copydoc MultinomialLogisticLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the multinomial logistic loss error gradient w.r.t. the @@ -555,7 +596,8 @@ class MultinomialLogisticLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; /** @@ -587,26 +629,29 @@ class MultinomialLogisticLossLayer : public LossLayer { * \right] * @f$ */ -template +template class SigmoidCrossEntropyLossLayer : public LossLayer { public: explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) : LossLayer(param), - sigmoid_layer_(new SigmoidLayer(param)), - sigmoid_output_(new Blob()) {} + sigmoid_layer_(new SigmoidLayer(param)), + sigmoid_output_(new Blob()) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } + virtual inline const char* type() const { + return "SigmoidCrossEntropyLoss"; + } protected: /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the @@ -639,9 +684,11 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); /// The internal SigmoidLayer used to map predictions to probabilities. shared_ptr > sigmoid_layer_; @@ -654,7 +701,7 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { }; // Forward declare SoftmaxLayer for use in SoftmaxWithLossLayer. -template class SoftmaxLayer; +template class SoftmaxLayer; /** * @brief Computes the multinomial logistic loss for a one-of-many @@ -684,35 +731,44 @@ template class SoftmaxLayer; * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) * @f$, for softmax output class probabilites @f$ \hat{p} @f$ */ -template +template class SoftmaxWithLossLayer : public LossLayer { public: - /** - * @param param provides LossParameter loss_param, with options: - * - ignore_label (optional) - * Specify a label value that should be ignored when computing the loss. - * - normalize (optional, default true) - * If true, the loss is normalized by the number of (nonignored) labels - * present; otherwise the loss is simply summed over spatial locations. - */ + /** + * @param param provides LossParameter loss_param, with options: + * - ignore_label (optional) + * Specify a label value that should be ignored when computing the loss. + * - normalize (optional, default true) + * If true, the loss is normalized by the number of (nonignored) labels + * present; otherwise the loss is simply summed over spatial locations. + */ explicit SoftmaxWithLossLayer(const LayerParameter& param) - : LossLayer(param) {} + : LossLayer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "SoftmaxWithLoss"; } - virtual inline int ExactNumTopBlobs() const { return -1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } + virtual inline const char* type() const { + return "SoftmaxWithLoss"; + } + virtual inline int ExactNumTopBlobs() const { + return -1; + } + virtual inline int MinTopBlobs() const { + return 1; + } + virtual inline int MaxTopBlobs() const { + return 2; + } protected: /// @copydoc SoftmaxWithLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the softmax loss error gradient w.r.t. the predictions. * @@ -741,10 +797,11 @@ class SoftmaxWithLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - + const vector& propagate_down, + const vector*>& bottom); /// The internal SoftmaxLayer used to map predictions to a distribution. shared_ptr > softmax_layer_; @@ -761,8 +818,6 @@ class SoftmaxWithLossLayer : public LossLayer { /// Whether to normalize the loss by the total number of values present /// (otherwise just by the batch size). bool normalize_; - - int softmax_axis_, outer_num_, inner_num_; }; } // namespace caffe diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 323215134c7..0c306fb41bf 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -433,8 +433,8 @@ class CuDNNReLULayer : public ReLULayer { bool handles_setup_; cudnnHandle_t handle_; - cudnnTensorDescriptor_t bottom_desc_; - cudnnTensorDescriptor_t top_desc_; + cudnnTensor4dDescriptor_t bottom_desc_; + cudnnTensor4dDescriptor_t top_desc_; }; #endif @@ -516,8 +516,8 @@ class CuDNNSigmoidLayer : public SigmoidLayer { bool handles_setup_; cudnnHandle_t handle_; - cudnnTensorDescriptor_t bottom_desc_; - cudnnTensorDescriptor_t top_desc_; + cudnnTensor4dDescriptor_t bottom_desc_; + cudnnTensor4dDescriptor_t top_desc_; }; #endif @@ -601,8 +601,8 @@ class CuDNNTanHLayer : public TanHLayer { bool handles_setup_; cudnnHandle_t handle_; - cudnnTensorDescriptor_t bottom_desc_; - cudnnTensorDescriptor_t top_desc_; + cudnnTensor4dDescriptor_t bottom_desc_; + cudnnTensor4dDescriptor_t top_desc_; }; #endif @@ -654,90 +654,6 @@ class ThresholdLayer : public NeuronLayer { Dtype threshold_; }; -/** - * @brief Parameterized Rectified Linear Unit non-linearity @f$ - * y_i = \max(0, x_i) + a_i \min(0, x_i) - * @f$. The differences from ReLULayer are 1) negative slopes are - * learnable though backprop and 2) negative slopes can vary across - * channels. The number of axes of input blob should be greater than or - * equal to 2. The 1st axis (0-based) is seen as channels. - */ -template -class PReLULayer : public NeuronLayer { - public: - /** - * @param param provides PReLUParameter prelu_param, - * with PReLULayer options: - * - filler (\b optional, FillerParameter, - * default {'type': constant 'value':0.25}). - * - channel_shared (\b optional, default false). - * negative slopes are shared across channels. - */ - explicit PReLULayer(const LayerParameter& param) - : NeuronLayer(param) {} - - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "PReLU"; } - - protected: - /** - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times ...) @f$ - * the inputs @f$ x @f$ - * @param top output Blob vector (length 1) - * -# @f$ (N \times C \times ...) @f$ - * the computed outputs for each channel @f$i@f$ @f$ - * y_i = \max(0, x_i) + a_i \min(0, x_i) - * @f$. - */ - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the error gradient w.r.t. the PReLU inputs. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (N \times C \times ...) @f$ - * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ - * with respect to computed outputs @f$ y @f$ - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times ...) @f$ - * the inputs @f$ x @f$; For each channel @f$i@f$, backward fills their - * diff with gradients @f$ - * \frac{\partial E}{\partial x_i} = \left\{ - * \begin{array}{lr} - * a_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ - * \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i > 0 - * \end{array} \right. - * @f$. - * If param_propagate_down_[0] is true, it fills the diff with gradients - * @f$ - * \frac{\partial E}{\partial a_i} = \left\{ - * \begin{array}{lr} - * \sum_{x_i} x_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ - * 0 & \mathrm{if} \; x_i > 0 - * \end{array} \right. - * @f$. - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - bool channel_shared_; - Blob multiplier_; // dot multipler for backward computation of params - Blob bottom_memory_; // memory for in-place computation -}; - } // namespace caffe #endif // CAFFE_NEURON_LAYERS_HPP_ diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 4dcdc3dc20b..7805bcd2a40 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -14,7 +14,7 @@ namespace caffe { * Requires implementation of ComputeUpdateValue to compute a parameter update * given the current state of the Net parameters. */ -template +template class Solver { public: explicit Solver(const SolverParameter& param); @@ -25,31 +25,39 @@ class Solver { // The main entry of the solver function. In default, iter will be zero. Pass // in a non-zero iter number to resume training for a pre-trained net. virtual void Solve(const char* resume_file = NULL); - inline void Solve(const string resume_file) { Solve(resume_file.c_str()); } + inline void Solve(const string resume_file) { + Solve(resume_file.c_str()); + } void Step(int iters); - // The Restore function implements how one should restore the solver to a - // previously snapshotted state. You should implement the RestoreSolverState() - // function that restores the state from a SolverState protocol buffer. - void Restore(const char* resume_file); - virtual ~Solver() {} - inline shared_ptr > net() { return net_; } + void StepPrefilled(); + virtual ~Solver() { + } + inline shared_ptr > net() { + return net_; + } inline const vector > >& test_nets() { return test_nets_; } - int iter() { return iter_; } - - protected: + int iter() { + return iter_; + } + void Snapshot(); + void Restore(const char* resume_file); // Get the update value for the current iteration. virtual void ComputeUpdateValue() = 0; + + protected: // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be // written to disk together with the learned net. - void Snapshot(); // The test routine void TestAll(); void Test(const int test_net_id = 0); virtual void SnapshotSolverState(SolverState* state) = 0; + // The Restore function implements how one should restore the solver to a + // previously snapshotted state. You should implement the RestoreSolverState() + // function that restores the state from a SolverState protocol buffer. virtual void RestoreSolverState(const SolverState& state) = 0; void DisplayOutputBlobs(const int net_id); @@ -59,28 +67,34 @@ class Solver { shared_ptr > net_; vector > > test_nets_; - DISABLE_COPY_AND_ASSIGN(Solver); +DISABLE_COPY_AND_ASSIGN(Solver); }; - /** * @brief Optimizes the parameters of a Net using * stochastic gradient descent (SGD) with momentum. */ -template +template class SGDSolver : public Solver { public: explicit SGDSolver(const SolverParameter& param) - : Solver(param) { PreSolve(); } + : Solver(param) { + PreSolve(); + } explicit SGDSolver(const string& param_file) - : Solver(param_file) { PreSolve(); } + : Solver(param_file) { + PreSolve(); + } - const vector > >& history() { return history_; } + const vector > >& history() { + return history_; + } + + virtual void ComputeUpdateValue(); protected: void PreSolve(); Dtype GetLearningRate(); - virtual void ComputeUpdateValue(); virtual void ClipGradients(); virtual void SnapshotSolverState(SolverState * state); virtual void RestoreSolverState(const SolverState& state); @@ -90,55 +104,61 @@ class SGDSolver : public Solver { // of gradients/updates and is not needed in snapshots vector > > history_, update_, temp_; - DISABLE_COPY_AND_ASSIGN(SGDSolver); +DISABLE_COPY_AND_ASSIGN(SGDSolver); }; -template +template class NesterovSolver : public SGDSolver { public: explicit NesterovSolver(const SolverParameter& param) - : SGDSolver(param) {} + : SGDSolver(param) { + } explicit NesterovSolver(const string& param_file) - : SGDSolver(param_file) {} + : SGDSolver(param_file) { + } - protected: virtual void ComputeUpdateValue(); - DISABLE_COPY_AND_ASSIGN(NesterovSolver); + protected: + +DISABLE_COPY_AND_ASSIGN(NesterovSolver); }; -template +template class AdaGradSolver : public SGDSolver { public: explicit AdaGradSolver(const SolverParameter& param) - : SGDSolver(param) { constructor_sanity_check(); } + : SGDSolver(param) { + constructor_sanity_check(); + } explicit AdaGradSolver(const string& param_file) - : SGDSolver(param_file) { constructor_sanity_check(); } + : SGDSolver(param_file) { + constructor_sanity_check(); + } - protected: virtual void ComputeUpdateValue(); + protected: void constructor_sanity_check() { - CHECK_EQ(0, this->param_.momentum()) - << "Momentum cannot be used with AdaGrad."; + CHECK_EQ(0, this->param_.momentum())<< "Momentum cannot be used with AdaGrad."; } DISABLE_COPY_AND_ASSIGN(AdaGradSolver); }; -template +template Solver* GetSolver(const SolverParameter& param) { SolverParameter_SolverType type = param.solver_type(); switch (type) { - case SolverParameter_SolverType_SGD: + case SolverParameter_SolverType_SGD: return new SGDSolver(param); - case SolverParameter_SolverType_NESTEROV: + case SolverParameter_SolverType_NESTEROV: return new NesterovSolver(param); - case SolverParameter_SolverType_ADAGRAD: + case SolverParameter_SolverType_ADAGRAD: return new AdaGradSolver(param); - default: - LOG(FATAL) << "Unknown SolverType: " << type; - } + default: + LOG(FATAL)<< "Unknown SolverType: " << type; + } return (Solver*) NULL; } diff --git a/include/caffe/splitnet/splitnet.hpp b/include/caffe/splitnet/splitnet.hpp new file mode 100644 index 00000000000..ca5349f458a --- /dev/null +++ b/include/caffe/splitnet/splitnet.hpp @@ -0,0 +1,29 @@ +/* + * splitnet.hpp + * + * Created on: Apr 5, 2015 + * Author: fabian + */ + +#ifndef CAFFE_SPLITNET_HPP_ +#define CAFFE_SPLITNET_HPP_ + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template +class Splitnet { + public: + Splitnet(); + private: + +}; + + +} + +#endif /* CAFFE_SPLITNET_HPP_ */ diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 1b726de9564..68fda2ca626 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -6,13 +6,15 @@ #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" +#include "caffe/greentea/greentea.hpp" + namespace caffe { // Theoretically, CaffeMallocHost and CaffeFreeHost should simply call the // cudaMallocHost and cudaFree functions in order to create pinned memory. // However, those codes rely on the existence of a cuda GPU (I don't know // why that is a must since allocating memory should not be accessing the -// GPU resource, but it just creates an error as of Cuda 5.0) and will cause +// GPU resorce, but it just creates an error as of Cuda 5.0) and will cause // problem when running on a machine without GPU. Thus, we simply define // these two functions for safety and possible future change if the problem // of calling cuda functions disappears in a future version. @@ -40,12 +42,23 @@ inline void CaffeFreeHost(void* ptr) { */ class SyncedMemory { public: - SyncedMemory() +#ifdef USE_GREENTEA + SyncedMemory(DeviceContext device_context) + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false), device_context_(device_context), cl_gpu_mem_(NULL) {} + explicit SyncedMemory(size_t size, DeviceContext device_context) + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), + own_cpu_data_(false), device_context_(device_context), cl_gpu_mem_(NULL) {} +#else + SyncedMemory(DeviceContext device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), - own_cpu_data_(false) {} - explicit SyncedMemory(size_t size) + own_cpu_data_(false), device_context_(device_context) {} + explicit SyncedMemory(size_t size, DeviceContext device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), - own_cpu_data_(false) {} + own_cpu_data_(false), device_context_(device_context) {} +#endif + + ~SyncedMemory(); const void* cpu_data(); void set_cpu_data(void* data); @@ -61,9 +74,14 @@ class SyncedMemory { void to_gpu(); void* cpu_ptr_; void* gpu_ptr_; + size_t size_; SyncedHead head_; bool own_cpu_data_; + DeviceContext device_context_; +#ifdef USE_GREENTEA + cl_mem cl_gpu_mem_; +#endif DISABLE_COPY_AND_ASSIGN(SyncedMemory); }; // class SyncedMemory diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index b531dd5fa7a..eaed7333df8 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -50,45 +50,41 @@ template class dataType; template<> class dataType { public: static const cudnnDataType_t type = CUDNN_DATA_FLOAT; - static float oneval, zeroval; - static const void *one, *zero; }; template<> class dataType { public: static const cudnnDataType_t type = CUDNN_DATA_DOUBLE; - static double oneval, zeroval; - static const void *one, *zero; }; template -inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) { - CUDNN_CHECK(cudnnCreateTensorDescriptor(desc)); +inline void createTensor4dDesc(cudnnTensor4dDescriptor_t* desc) { + CUDNN_CHECK(cudnnCreateTensor4dDescriptor(desc)); } template -inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, +inline void setTensor4dDesc(cudnnTensor4dDescriptor_t* desc, int n, int c, int h, int w, int stride_n, int stride_c, int stride_h, int stride_w) { CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(*desc, dataType::type, - n, c, h, w, stride_n, stride_c, stride_h, stride_w)); + n, c, h, w, stride_n, stride_c, stride_h, stride_w)); } template -inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, +inline void setTensor4dDesc(cudnnTensor4dDescriptor_t* desc, int n, int c, int h, int w) { const int stride_w = 1; const int stride_h = w * stride_w; const int stride_c = h * stride_h; const int stride_n = c * stride_c; setTensor4dDesc(desc, n, c, h, w, - stride_n, stride_c, stride_h, stride_w); + stride_n, stride_c, stride_h, stride_w); } template inline void createFilterDesc(cudnnFilterDescriptor_t* desc, int n, int c, int h, int w) { CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); - CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType::type, + CUDNN_CHECK(cudnnSetFilterDescriptor(*desc, dataType::type, n, c, h, w)); } @@ -99,29 +95,29 @@ inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) { template inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, - cudnnTensorDescriptor_t bottom, cudnnFilterDescriptor_t filter, + cudnnTensor4dDescriptor_t bottom, cudnnFilterDescriptor_t filter, int pad_h, int pad_w, int stride_h, int stride_w) { - CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, + CUDNN_CHECK(cudnnSetConvolutionDescriptor(*conv, bottom, filter, pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION)); } template -inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, +inline void createPoolingDesc(cudnnPoolingDescriptor_t* conv, PoolingParameter_PoolMethod poolmethod, cudnnPoolingMode_t* mode, - int h, int w, int pad_h, int pad_w, int stride_h, int stride_w) { + int h, int w, int stride_h, int stride_w) { switch (poolmethod) { case PoolingParameter_PoolMethod_MAX: *mode = CUDNN_POOLING_MAX; break; case PoolingParameter_PoolMethod_AVE: - *mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; + *mode = CUDNN_POOLING_AVERAGE; break; default: LOG(FATAL) << "Unknown pooling method."; } - CUDNN_CHECK(cudnnCreatePoolingDescriptor(pool_desc)); - CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, h, w, - pad_h, pad_w, stride_h, stride_w)); + CUDNN_CHECK(cudnnCreatePoolingDescriptor(conv)); + CUDNN_CHECK(cudnnSetPoolingDescriptor(*conv, *mode, h, w, + stride_h, stride_w)); } } // namespace cudnn diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index 0051e2fa067..cbd4f8686d2 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -15,12 +15,25 @@ void col2im_cpu(const Dtype* data_col, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, Dtype* data_im); +template +void im2col_sk_gpu(const Dtype* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, Dtype* data_col); + template void im2col_gpu(const Dtype* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, Dtype* data_col); +template +void col2im_sk_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_im); + template void col2im_gpu(const Dtype* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index cd0ab8babb0..3899e6a7d92 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -16,45 +16,103 @@ namespace caffe { +template +class DataRandTransformLayer : public Layer { + public: + explicit DataRandTransformLayer(const LayerParameter& param) + : Layer(param) { + } + + virtual inline const char* type() const { + return "DataRandTransform"; + } + + virtual ~DataRandTransformLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + int NUM_; + int CHANNELS_; + int WIDTH_; + int HEIGHT_; + + bool apply_normalization_; + + bool apply_mirroring_; + float prob_mirroring_; + + bool apply_rot_; + float rot_min_; + float rot_max_; + + bool apply_blur_; + int blur_size_; + float blur_max_var_; + + bool apply_contrast_brightness_; + float alpha_; + float beta_; +}; + /** * @brief Abstract base class that factors out the BLAS code common to * ConvolutionLayer and DeconvolutionLayer. */ -template +template class BaseConvolutionLayer : public Layer { public: explicit BaseConvolutionLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline bool EqualNumBottomTopBlobs() const { return true; } + virtual inline int MinBottomBlobs() const { + return 1; + } + virtual inline int MinTopBlobs() const { + return 1; + } + virtual inline bool EqualNumBottomTopBlobs() const { + return true; + } protected: // Helper functions that abstract away the column buffer and gemm arguments. // The last argument in forward_cpu_gemm is so that we can skip the im2col if // we just called weight_cpu_gemm with the same input. - void forward_cpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); + void forward_cpu_gemm(const Dtype* input, const Dtype* weights, Dtype* output, + bool skip_im2col = false); void forward_cpu_bias(Dtype* output, const Dtype* bias); void backward_cpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* output); - void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* - weights); + Dtype* output); + void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* weights); void backward_cpu_bias(Dtype* bias, const Dtype* input); #ifndef CPU_ONLY void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); + Dtype* output, bool skip_im2col = false); void forward_gpu_bias(Dtype* output, const Dtype* bias); void backward_gpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* col_output); - void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* - weights); + Dtype* col_output); + void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, + Dtype* weights); void backward_gpu_bias(Dtype* bias, const Dtype* input); #endif @@ -80,20 +138,24 @@ class BaseConvolutionLayer : public Layer { // wrap im2col/col2im so we don't have to remember the (long) argument lists inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { im2col_cpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + col_buff); } inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { col2im_cpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + data); } #ifndef CPU_ONLY inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + col_buff); } inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { col2im_gpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + data); } #endif @@ -111,6 +173,53 @@ class BaseConvolutionLayer : public Layer { Blob bias_multiplier_; }; +/** + * @brief Convolves the input image for pixelwise classification. + * + * Layer introduced by Hongsheng et al. + */ +template +class ConvolutionSKLayer : public Layer { + public: + explicit ConvolutionSKLayer(const LayerParameter& param) + : Layer(param) { + } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { + return "ConvolutionSK"; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int channels_; + int group_; + int height_, width_; + int pad_h_, pad_w_; + int kstride_h_, kstride_w_; + int num_, num_output_; + Blob col_buffer_; + Blob bias_multiplier_; + bool bias_term_; + int M_, K_, N_; +}; + /** * @brief Convolves the input image with a bank of learned filters, * and (optionally) adds biases. @@ -127,7 +236,7 @@ class BaseConvolutionLayer : public Layer { * be filtered. col2im restores the output spatial structure by rolling up * the output channel N' columns of the output matrix. */ -template +template class ConvolutionLayer : public BaseConvolutionLayer { public: /** @@ -159,20 +268,27 @@ class ConvolutionLayer : public BaseConvolutionLayer { * kernels + stream parallelism) engines. */ explicit ConvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) {} + : BaseConvolutionLayer(param) { + } - virtual inline const char* type() const { return "Convolution"; } + virtual inline const char* type() const { + return "Convolution"; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual inline bool reverse_dimensions() { return false; } + const vector& propagate_down, + const vector*>& bottom); + virtual inline bool reverse_dimensions() { + return false; + } virtual void compute_output_shape(); }; @@ -190,24 +306,31 @@ class ConvolutionLayer : public BaseConvolutionLayer { * padding is removed from the output rather than added to the input, and * stride results in upsampling rather than downsampling). */ -template +template class DeconvolutionLayer : public BaseConvolutionLayer { public: explicit DeconvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) {} + : BaseConvolutionLayer(param) { + } - virtual inline const char* type() const { return "Deconvolution"; } + virtual inline const char* type() const { + return "Deconvolution"; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual inline bool reverse_dimensions() { return true; } + const vector& propagate_down, + const vector*>& bottom); + virtual inline bool reverse_dimensions() { + return true; + } virtual void compute_output_shape(); }; @@ -225,19 +348,19 @@ class DeconvolutionLayer : public BaseConvolutionLayer { * input and filter regimes the CUDNN engine is faster than the CAFFE engine, * but for fully-convolutional models and large inputs the CAFFE engine can be * faster as long as it fits in memory. -*/ + */ template class CuDNNConvolutionLayer : public ConvolutionLayer { - public: +public: explicit CuDNNConvolutionLayer(const LayerParameter& param) - : ConvolutionLayer(param), handles_setup_(false) {} + : ConvolutionLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNConvolutionLayer(); - protected: +protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, @@ -245,14 +368,12 @@ class CuDNNConvolutionLayer : public ConvolutionLayer { bool handles_setup_; cudnnHandle_t* handle_; - cudaStream_t* stream_; - vector bottom_descs_, top_descs_; - cudnnTensorDescriptor_t bias_desc_; - cudnnFilterDescriptor_t filter_desc_; + cudaStream_t* stream_; + vector bottom_descs_, top_descs_; + cudnnTensor4dDescriptor_t bias_desc_; + cudnnFilterDescriptor_t filter_desc_; vector conv_descs_; int bottom_offset_, top_offset_, weight_offset_, bias_offset_; - size_t workspaceSizeInBytes; - void *workspace; }; #endif @@ -263,29 +384,38 @@ class CuDNNConvolutionLayer : public ConvolutionLayer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class Im2colLayer : public Layer { public: explicit Im2colLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Im2col"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Im2col"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); int kernel_h_, kernel_w_; int stride_h_, stride_w_; @@ -295,50 +425,62 @@ class Im2colLayer : public Layer { }; // Forward declare PoolingLayer and SplitLayer for use in LRNLayer. -template class PoolingLayer; -template class SplitLayer; +template class PoolingLayer; +template class SplitLayer; /** * @brief Normalize the input in a local region across or within feature maps. * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class LRNLayer : public Layer { public: explicit LRNLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "LRN"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "LRN"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void CrossChannelForward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void CrossChannelForward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void WithinChannelForward(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void CrossChannelBackward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void CrossChannelBackward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void WithinChannelBackward(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); int size_; int pre_pad_; @@ -373,41 +515,107 @@ class LRNLayer : public Layer { vector*> product_bottom_vec_; }; +/** + * @brief Pools the input image by taking the max, average, etc. within regions. + * + * For whole image processing, reducing redundancy. + */ +template +class PoolingSKLayer : public Layer { + public: + explicit PoolingSKLayer(const LayerParameter& param) + : Layer(param) { + } + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + virtual inline const char* type() const { + return "PoolingSK"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int MinTopBlobs() const { + return 1; + } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return + (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + int max_top_blobs_; + int pad_h_, pad_w_; + int channels_; + int height_, width_; + int pooled_height_, pooled_width_; + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int kstride_h_, kstride_w_; + Blob rand_idx_; + Blob max_idx_; +}; /** * @brief Pools the input image by taking the max, average, etc. within regions. * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class PoolingLayer : public Layer { public: explicit PoolingLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Pooling"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "Pooling"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int MinTopBlobs() const { + return 1; + } // MAX POOL layers can output an extra top blob for the mask; // others can only output the pooled inputs. virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; + return + (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) ? 2 : 1; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); int kernel_h_, kernel_w_; int stride_h_, stride_w_; @@ -424,35 +632,36 @@ class PoolingLayer : public Layer { /* * @brief cuDNN implementation of PoolingLayer. * Fallback to PoolingLayer for CPU mode. -*/ + */ template class CuDNNPoolingLayer : public PoolingLayer { - public: +public: explicit CuDNNPoolingLayer(const LayerParameter& param) - : PoolingLayer(param), handles_setup_(false) {} + : PoolingLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNPoolingLayer(); // Currently, cuDNN does not support the extra top blob. - virtual inline int MinTopBlobs() const { return -1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int MinTopBlobs() const {return -1;} + virtual inline int ExactNumTopBlobs() const {return 1;} - protected: +protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensorDescriptor_t bottom_desc_, top_desc_; - cudnnPoolingDescriptor_t pooling_desc_; - cudnnPoolingMode_t mode_; + cudnnHandle_t handle_; + cudnnTensor4dDescriptor_t bottom_desc_, top_desc_; + cudnnPoolingDescriptor_t pooling_desc_; + cudnnPoolingMode_t mode_; }; #endif -} // namespace caffe +} + // namespace caffe #endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/models/bvlc_googlenet/readme.md b/models/bvlc_googlenet/readme.md index 061b6d74530..2e22dfcf59a 100644 --- a/models/bvlc_googlenet/readme.md +++ b/models/bvlc_googlenet/readme.md @@ -5,6 +5,7 @@ caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel license: unrestricted sha1: 405fc5acd08a3bb12de8ee5e23a96bec22f08204 caffe_commit: bc614d1bd91896e3faceaf40b23b72dab47d44f5 +gist_id: 866e2aa1fd707b89b913 --- This model is a replication of the model described in the [GoogleNet](http://arxiv.org/abs/1409.4842) publication. We would like to thank Christian Szegedy for all his help in the replication of GoogleNet model. diff --git a/protoc_generator.sh b/protoc_generator.sh new file mode 100644 index 00000000000..1c94ccb5530 --- /dev/null +++ b/protoc_generator.sh @@ -0,0 +1,3 @@ +protoc src/caffe/proto/caffe.proto --cpp_out=. +mkdir include/caffe/proto +mv src/caffe/proto/caffe.pb.h include/caffe/proto diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index df0401daa1c..a2f82089cac 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -4,7 +4,7 @@ if(NOT HAVE_PYTHON) endif() include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) -file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) +file(GLOB_RECURSE python_srcs ${CMAKE_SOURCE_DIR}/python/*.cpp) add_library(pycaffe SHARED ${python_srcs}) target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index dff7f627016..03967a21029 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -5,7 +5,6 @@ #include #include -#include #include #include @@ -164,10 +163,9 @@ struct NdarrayCallPolicies : public bp::default_call_policies { // the shape information from the blob. void* data = PyArray_DATA(reinterpret_cast(result)); Py_DECREF(result); - const int num_axes = blob->num_axes(); - vector dims(blob->shape().begin(), blob->shape().end()); - PyObject *arr_obj = PyArray_SimpleNewFromData(num_axes, dims.data(), - NPY_FLOAT32, data); + npy_intp dims[] = {blob->num(), blob->channels(), + blob->height(), blob->width()}; + PyObject* arr_obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32, data); // SetBaseObject steals a ref, so we need to INCREF. Py_INCREF(pyblob.ptr()); PyArray_SetBaseObject(reinterpret_cast(arr_obj), @@ -176,20 +174,6 @@ struct NdarrayCallPolicies : public bp::default_call_policies { } }; -bp::object Blob_Reshape(bp::tuple args, bp::dict kwargs) { - if (bp::len(kwargs) > 0) { - throw std::runtime_error("Blob.reshape takes no kwargs"); - } - Blob* self = bp::extract*>(args[0]); - vector shape(bp::len(args) - 1); - for (int i = 1; i < bp::len(args); ++i) { - shape[i - 1] = bp::extract(args[i]); - } - self->Reshape(shape); - // We need to explicitly return None to use bp::raw_function. - return bp::object(); -} - BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(SolveOverloads, Solve, 0, 1); BOOST_PYTHON_MODULE(_caffe) { @@ -234,9 +218,8 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("channels", &Blob::channels) .add_property("height", &Blob::height) .add_property("width", &Blob::width) - .add_property("count", static_cast::*)() const>( - &Blob::count)) - .def("reshape", bp::raw_function(&Blob_Reshape)) + .add_property("count", &Blob::count) + .def("reshape", &Blob::Reshape) .add_property("data", bp::make_function(&Blob::mutable_cpu_data, NdarrayCallPolicies())) .add_property("diff", bp::make_function(&Blob::mutable_cpu_diff, @@ -261,8 +244,7 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("iter", &Solver::iter) .def("solve", static_cast::*)(const char*)>( &Solver::Solve), SolveOverloads()) - .def("step", &Solver::Step) - .def("restore", &Solver::Restore); + .def("step", &Solver::Step); bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py index 49f8003ce9d..94dd063a2c7 100644 --- a/python/caffe/classifier.py +++ b/python/caffe/classifier.py @@ -28,7 +28,7 @@ def __init__(self, model_file, pretrained_file, image_dims=None, # configure pre-processing in_ = self.inputs[0] self.transformer = caffe.io.Transformer( - {in_: self.blobs[in_].data.shape}) + {in_: self.blobs[in_].data.shape for in_ in self.inputs}) self.transformer.set_transpose(in_, (2,0,1)) if mean is not None: self.transformer.set_mean(in_, mean) @@ -83,7 +83,7 @@ def predict(self, inputs, oversample=True): for ix, in_ in enumerate(input_): caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_) out = self.forward_all(**{self.inputs[0]: caffe_in}) - predictions = out[self.outputs[0]] + predictions = out[self.outputs[0]].squeeze(axis=(2,3)) # For oversampling, average predictions across crops. if oversample: diff --git a/python/caffe/detector.py b/python/caffe/detector.py index a67b818b93f..4ea07fb7b36 100644 --- a/python/caffe/detector.py +++ b/python/caffe/detector.py @@ -24,7 +24,7 @@ class Detector(caffe.Net): Detector extends Net for windowed detection by a list of crops or selective search proposals. """ - def __init__(self, model_file, pretrained_file, mean=None, + def __init__(self, model_file, pretrained_file, gpu=False, mean=None, input_scale=None, raw_scale=None, channel_swap=None, context_pad=None): """ @@ -40,7 +40,7 @@ def __init__(self, model_file, pretrained_file, mean=None, # configure pre-processing in_ = self.inputs[0] self.transformer = caffe.io.Transformer( - {in_: self.blobs[in_].data.shape}) + {in_: self.blobs[in_].data.shape for in_ in self.inputs}) self.transformer.set_transpose(in_, (2,0,1)) if mean is not None: self.transformer.set_mean(in_, mean) diff --git a/python/caffe/io.py b/python/caffe/io.py index 6ae2cf13cc0..f51e3a64d36 100644 --- a/python/caffe/io.py +++ b/python/caffe/io.py @@ -7,7 +7,6 @@ # Python3 will most likely not be able to load protobuf from caffe.proto import caffe_pb2 except: - import sys if sys.version_info >= (3,0): print("Failed to include caffe_pb2, things might go wrong!") else: @@ -238,20 +237,12 @@ def set_mean(self, in_, mean): mean: mean ndarray (input dimensional or broadcastable) """ self.__check_input(in_) - ms = mean.shape if mean.ndim == 1: - # broadcast channels - if ms[0] != self.inputs[in_][1]: - raise ValueError('Mean channels incompatible with input.') mean = mean[:, np.newaxis, np.newaxis] - else: - # elementwise mean - if len(ms) == 2: - ms = (1,) + ms - if len(ms) != 3: - raise ValueError('Mean shape invalid') - if ms != self.inputs[in_][1:]: - raise ValueError('Mean shape incompatible with input shape.') + mk, mh, mw = mean.shape + in_k, in_h, in_w = self.inputs[in_][1:] + #if mk != in_k or (mh, mw) != (in_h, in_w) and (mh, mw) != (1, 1): + # raise Exception('Mean shape incompatible with input shape.') self.mean[in_] = mean diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index 3c19261f690..d662d6cc282 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -41,12 +41,12 @@ def _Net_params(self): @property def _Net_inputs(self): - return [list(self.blobs.keys())[i] for i in self._inputs] + return [self.blobs.keys()[i] for i in self._inputs] @property def _Net_outputs(self): - return [list(self.blobs.keys())[i] for i in self._outputs] + return [self.blobs.keys()[i] for i in self._outputs] def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): @@ -85,6 +85,8 @@ def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): # Set input according to defined shapes and make arrays single and # C-contiguous as Caffe expects. for in_, blob in kwargs.iteritems(): + if blob.ndim != 4: + raise Exception('{} blob is not 4-d'.format(in_)) if blob.shape[0] != self.blobs[in_].num: raise Exception('Input is not batch sized') self.blobs[in_].data[...] = blob diff --git a/python/caffe/test/test_python_layer.py b/python/caffe/test/test_python_layer.py index dd99f6f15b9..383c283959d 100644 --- a/python/caffe/test/test_python_layer.py +++ b/python/caffe/test/test_python_layer.py @@ -11,7 +11,8 @@ def setup(self, bottom, top): pass def reshape(self, bottom, top): - top[0].reshape(*bottom[0].data.shape) + top[0].reshape(bottom[0].num, bottom[0].channels, bottom[0].height, + bottom[0].width) def forward(self, bottom, top): top[0].data[...] = 10 * bottom[0].data @@ -20,16 +21,17 @@ def backward(self, top, propagate_down, bottom): bottom[0].diff[...] = 10 * top[0].diff def python_net_file(): - with tempfile.NamedTemporaryFile(delete=False) as f: - f.write("""name: 'pythonnet' force_backward: true - input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } - layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } - layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } - layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") - return f.name + f = tempfile.NamedTemporaryFile(delete=False) + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_dim: 10 input_dim: 9 input_dim: 8 input_dim: 7 + layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") + f.close() + return f.name class TestPythonLayer(unittest.TestCase): def setUp(self): diff --git a/python/classify.py b/python/classify.py index 4544c51b4c2..81d06369341 100755 --- a/python/classify.py +++ b/python/classify.py @@ -96,30 +96,23 @@ def main(argv): if args.channel_swap: channel_swap = [int(s) for s in args.channel_swap.split(',')] - if args.gpu: - caffe.set_mode_gpu() - print("GPU mode") - else: - caffe.set_mode_cpu() - print("CPU mode") - # Make classifier. classifier = caffe.Classifier(args.model_def, args.pretrained_model, - image_dims=image_dims, mean=mean, + image_dims=image_dims, gpu=args.gpu, mean=mean, input_scale=args.input_scale, raw_scale=args.raw_scale, channel_swap=channel_swap) + if args.gpu: + print('GPU mode') + # Load numpy array (.npy), directory glob (*.jpg), or image file. args.input_file = os.path.expanduser(args.input_file) if args.input_file.endswith('npy'): - print("Loading file: %s" % args.input_file) inputs = np.load(args.input_file) elif os.path.isdir(args.input_file): - print("Loading folder: %s" % args.input_file) inputs =[caffe.io.load_image(im_f) for im_f in glob.glob(args.input_file + '/*.' + args.ext)] else: - print("Loading file: %s" % args.input_file) inputs = [caffe.io.load_image(args.input_file)] print("Classifying %d inputs." % len(inputs)) @@ -130,7 +123,6 @@ def main(argv): print("Done in %.2f s." % (time.time() - start)) # Save - print("Saving results into %s" % args.output_file) np.save(args.output_file, predictions) diff --git a/python/detect.py b/python/detect.py index 691098f5c53..d395bd97abf 100755 --- a/python/detect.py +++ b/python/detect.py @@ -107,22 +107,19 @@ def main(argv): if args.channel_swap: channel_swap = [int(s) for s in args.channel_swap.split(',')] - if args.gpu: - caffe.set_mode_gpu() - print("GPU mode") - else: - caffe.set_mode_cpu() - print("CPU mode") - # Make detector. - detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean, + detector = caffe.Detector(args.model_def, args.pretrained_model, + gpu=args.gpu, mean=mean, input_scale=args.input_scale, raw_scale=args.raw_scale, channel_swap=channel_swap, context_pad=args.context_pad) + if args.gpu: + print('GPU mode') + # Load input. t = time.time() - print("Loading input...") + print('Loading input...') if args.input_file.lower().endswith('txt'): with open(args.input_file) as f: inputs = [_.strip() for _ in f.readlines()] diff --git a/python/requirements.txt b/python/requirements.txt index 7bc164a42b5..908373bf452 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -2,6 +2,7 @@ Cython>=0.19.2 numpy>=1.7.1 scipy>=0.13.2 scikit-image>=0.9.3 +scikit-learn>=0.14.1 matplotlib>=1.3.1 ipython>=1.1.0 h5py>=2.2.0 @@ -13,4 +14,3 @@ python-dateutil>=1.4,<2 protobuf>=2.5.0 python-gflags>=2.0 pyyaml>=3.10 -Pillow>=2.3.0 diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index 0e8c37861b0..82f386cf029 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -67,3 +67,4 @@ export PATH=/home/travis/miniconda/bin:$PATH conda update --yes conda conda install --yes numpy scipy matplotlib scikit-image pip pip install protobuf +rm /home/travis/miniconda/lib/libm.* diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 6d2b3f502d9..7304469f078 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -1,132 +1,112 @@ -#include -#include - #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template void Blob::Reshape(const int num, const int channels, const int height, - const int width) { - vector shape(4); - shape[0] = num; - shape[1] = channels; - shape[2] = height; - shape[3] = width; - Reshape(shape); -} - -template -void Blob::Reshape(const vector& shape) { - CHECK_LE(shape.size(), kMaxBlobAxes); - count_ = 1; - shape_.resize(shape.size()); - for (int i = 0; i < shape.size(); ++i) { - CHECK_GE(shape[i], 0); - count_ *= shape[i]; - shape_[i] = shape[i]; - } + const int width, DeviceContext device_context) { + CHECK_GE(num, 0); + CHECK_GE(channels, 0); + CHECK_GE(height, 0); + CHECK_GE(width, 0); + num_ = num; + channels_ = channels; + height_ = height; + width_ = width; + count_ = num_ * channels_ * height_ * width_; + device_context_ = device_context; if (count_ > capacity_) { capacity_ = count_; - data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); - diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); - } -} - -template -void Blob::Reshape(const BlobShape& shape) { - CHECK_LE(shape.dim_size(), kMaxBlobAxes); - vector shape_vec(shape.dim_size()); - for (int i = 0; i < shape.dim_size(); ++i) { - shape_vec[i] = shape.dim(i); + data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype), device_context_)); + diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype), device_context_)); } - Reshape(shape_vec); } -template -void Blob::ReshapeLike(const Blob& other) { - Reshape(other.shape()); +template +void Blob::ReshapeLike(const Blob& other, + DeviceContext device_context) { + Reshape(other.num(), other.channels(), other.height(), other.width(), + device_context); } -template +template Blob::Blob(const int num, const int channels, const int height, - const int width) - // capacity_ must be initialized before calling Reshape - : capacity_(0) { - Reshape(num, channels, height, width); -} - -template -Blob::Blob(const vector& shape) - // capacity_ must be initialized before calling Reshape - : capacity_(0) { - Reshape(shape); + const int width, DeviceContext device_context) + // capacity_ must be initialized before calling Reshape + : capacity_(0), + device_context_(device_context) { + Reshape(num, channels, height, width, device_context); } -template +template const Dtype* Blob::cpu_data() const { CHECK(data_); - return (const Dtype*)data_->cpu_data(); + return (const Dtype*) data_->cpu_data(); } -template +template void Blob::set_cpu_data(Dtype* data) { CHECK(data); data_->set_cpu_data(data); } -template +template const Dtype* Blob::gpu_data() const { CHECK(data_); - return (const Dtype*)data_->gpu_data(); + return (const Dtype*) data_->gpu_data(); } -template +template const Dtype* Blob::cpu_diff() const { CHECK(diff_); - return (const Dtype*)diff_->cpu_data(); + return (const Dtype*) diff_->cpu_data(); } -template +template const Dtype* Blob::gpu_diff() const { CHECK(diff_); - return (const Dtype*)diff_->gpu_data(); + return (const Dtype*) diff_->gpu_data(); } -template +template Dtype* Blob::mutable_cpu_data() { CHECK(data_); return static_cast(data_->mutable_cpu_data()); } -template +template Dtype* Blob::mutable_gpu_data() { CHECK(data_); return static_cast(data_->mutable_gpu_data()); } -template +template Dtype* Blob::mutable_cpu_diff() { CHECK(diff_); return static_cast(diff_->mutable_cpu_data()); } -template +template Dtype* Blob::mutable_gpu_diff() { CHECK(diff_); return static_cast(diff_->mutable_gpu_data()); } -template +template void Blob::ShareData(const Blob& other) { CHECK_EQ(count_, other.count()); data_ = other.data(); } -template +template void Blob::ShareDiff(const Blob& other) { CHECK_EQ(count_, other.count()); diff_ = other.diff(); @@ -135,324 +115,387 @@ void Blob::ShareDiff(const Blob& other) { // The "update" method is used for parameter blobs in a Net, which are stored // as Blob or Blob -- hence we do not define it for // Blob or Blob. -template <> void Blob::Update() { NOT_IMPLEMENTED; } -template <> void Blob::Update() { NOT_IMPLEMENTED; } +template<> void Blob::Update() { + NOT_IMPLEMENTED; +} +template<> void Blob::Update() { + NOT_IMPLEMENTED; +} -template +template void Blob::Update() { // We will perform update based on where the data is located. switch (data_->head()) { - case SyncedMemory::HEAD_AT_CPU: - // perform computation on CPU - caffe_axpy(count_, Dtype(-1), - static_cast(diff_->cpu_data()), - static_cast(data_->mutable_cpu_data())); - break; - case SyncedMemory::HEAD_AT_GPU: - case SyncedMemory::SYNCED: + case SyncedMemory::HEAD_AT_CPU: { + // perform computation on CPU + caffe_axpy(count_, Dtype(-1), + static_cast(diff_->cpu_data()), + static_cast(data_->mutable_cpu_data())); + + break; + } + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - // perform computation on GPU - caffe_gpu_axpy(count_, Dtype(-1), - static_cast(diff_->gpu_data()), - static_cast(data_->mutable_gpu_data())); + // perform computation on GPU + if (device_context_.backend() == Backend::BACKEND_CUDA) { + caffe_gpu_axpy(count_, Dtype(-1), + static_cast(diff_->gpu_data()), + static_cast(data_->mutable_gpu_data())); + } else { +#ifdef USE_GREENTEA + // TODO + //greentea_gpu_axpy(count_, Dtype(-1), (cl_mem)(diff_->gpu_data()), (cl_mem)(data_->mutable_gpu_data())); +#endif + } #else - NO_GPU; + NO_GPU; #endif - break; - default: - LOG(FATAL) << "Syncedmem not initialized."; + break; + } + default: + LOG(FATAL)<< "Syncedmem not initialized."; + } } -} -template <> unsigned int Blob::asum_data() const { +template<> unsigned int Blob::asum_data() const { NOT_IMPLEMENTED; return 0; } -template <> int Blob::asum_data() const { +template +DeviceContext Blob::device_context() { + return device_context_; +} + +template<> int Blob::asum_data() const { NOT_IMPLEMENTED; return 0; } -template +template Dtype Blob::asum_data() const { - if (!data_) { return 0; } + if (!data_) { + return 0; + } switch (data_->head()) { - case SyncedMemory::HEAD_AT_CPU: - return caffe_cpu_asum(count_, cpu_data()); - case SyncedMemory::HEAD_AT_GPU: - case SyncedMemory::SYNCED: + case SyncedMemory::HEAD_AT_CPU: + return caffe_cpu_asum(count_, cpu_data()); + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - { - Dtype asum; - caffe_gpu_asum(count_, gpu_data(), &asum); - return asum; - } + if (device_context_.backend() == Backend::BACKEND_CUDA) { + Dtype asum; + caffe_gpu_asum(count_, gpu_data(), &asum); + return asum; + } else { +#ifdef USE_GREENTEA + // TODO + //greentea_gpu_asum(count_, (cl_mem) gpu_data(), &asum); + return 0; +#endif + } #else - NO_GPU; + NO_GPU; #endif - case SyncedMemory::UNINITIALIZED: - return 0; - default: - LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); - } + } + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL)<< "Unknown SyncedMemory head state: " << data_->head(); + } return 0; } -template <> unsigned int Blob::asum_diff() const { +template<> unsigned int Blob::asum_diff() const { NOT_IMPLEMENTED; return 0; } -template <> int Blob::asum_diff() const { +template<> int Blob::asum_diff() const { NOT_IMPLEMENTED; return 0; } -template +template Dtype Blob::asum_diff() const { - if (!diff_) { return 0; } + if (!diff_) { + return 0; + } switch (diff_->head()) { - case SyncedMemory::HEAD_AT_CPU: - return caffe_cpu_asum(count_, cpu_diff()); - case SyncedMemory::HEAD_AT_GPU: - case SyncedMemory::SYNCED: + case SyncedMemory::HEAD_AT_CPU: + return caffe_cpu_asum(count_, cpu_diff()); + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - { - Dtype asum; - caffe_gpu_asum(count_, gpu_diff(), &asum); - return asum; - } + if (device_context_.backend() == Backend::BACKEND_CUDA) { + Dtype asum; + caffe_gpu_asum(count_, gpu_diff(), &asum); + return asum; + } else { +#ifdef USE_GREENTEA + // TODO + //greentea_gpu_asum(count_, gpu_diff(), &asum); + return 0; +#endif + } #else - NO_GPU; + NO_GPU; #endif - case SyncedMemory::UNINITIALIZED: - return 0; - default: - LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); - } + } + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL)<< "Unknown SyncedMemory head state: " << diff_->head(); + } return 0; } -template <> unsigned int Blob::sumsq_data() const { +template<> unsigned int Blob::sumsq_data() const { NOT_IMPLEMENTED; return 0; } -template <> int Blob::sumsq_data() const { +template<> int Blob::sumsq_data() const { NOT_IMPLEMENTED; return 0; } -template +template Dtype Blob::sumsq_data() const { Dtype sumsq; const Dtype* data; - if (!data_) { return 0; } + if (!data_) { + return 0; + } switch (data_->head()) { - case SyncedMemory::HEAD_AT_CPU: - data = cpu_data(); - sumsq = caffe_cpu_dot(count_, data, data); - break; - case SyncedMemory::HEAD_AT_GPU: - case SyncedMemory::SYNCED: + case SyncedMemory::HEAD_AT_CPU: + data = cpu_data(); + sumsq = caffe_cpu_dot(count_, data, data); + break; + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - data = gpu_data(); - caffe_gpu_dot(count_, data, data, &sumsq); + data = gpu_data(); + if (device_context_.backend() == Backend::BACKEND_CUDA) { + caffe_gpu_dot(count_, data, data, &sumsq); + } else { +#ifdef USE_GREENTEA + // TODO + //greentea_gpu_dot(count_, data, data, &sumsq); +#endif + } #else - NO_GPU; + NO_GPU; #endif - break; - case SyncedMemory::UNINITIALIZED: - return 0; - default: - LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); - } + break; + } + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL)<< "Unknown SyncedMemory head state: " << data_->head(); + } return sumsq; } -template <> unsigned int Blob::sumsq_diff() const { +template<> unsigned int Blob::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } -template <> int Blob::sumsq_diff() const { +template<> int Blob::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } -template +template Dtype Blob::sumsq_diff() const { Dtype sumsq; const Dtype* diff; - if (!diff_) { return 0; } + if (!diff_) { + return 0; + } switch (diff_->head()) { - case SyncedMemory::HEAD_AT_CPU: - diff = cpu_diff(); - sumsq = caffe_cpu_dot(count_, diff, diff); - break; - case SyncedMemory::HEAD_AT_GPU: - case SyncedMemory::SYNCED: + case SyncedMemory::HEAD_AT_CPU: { + diff = cpu_diff(); + sumsq = caffe_cpu_dot(count_, diff, diff); + break; + } + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - diff = gpu_diff(); - caffe_gpu_dot(count_, diff, diff, &sumsq); - break; + diff = gpu_diff(); + if (device_context_.backend() == Backend::BACKEND_CUDA) { + caffe_gpu_dot(count_, diff, diff, &sumsq); + } else { +#ifdef USE_GREENTEA + // TODO + // greentea_gpu_dot(count_, diff, diff, &sumsq); +#endif + } + break; #else - NO_GPU; + NO_GPU; #endif - case SyncedMemory::UNINITIALIZED: - return 0; - default: - LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); - } + } + case SyncedMemory::UNINITIALIZED: + return 0; + default: + LOG(FATAL)<< "Unknown SyncedMemory head state: " << data_->head(); + } return sumsq; } -template <> void Blob::scale_data(unsigned int scale_factor) { +template<> void Blob::scale_data(unsigned int scale_factor) { NOT_IMPLEMENTED; } -template <> void Blob::scale_data(int scale_factor) { +template<> void Blob::scale_data(int scale_factor) { NOT_IMPLEMENTED; } -template +template void Blob::scale_data(Dtype scale_factor) { Dtype* data; - if (!data_) { return; } - switch (data_->head()) { - case SyncedMemory::HEAD_AT_CPU: - data = mutable_cpu_data(); - caffe_scal(count_, scale_factor, data); + if (!data_) { return; - case SyncedMemory::HEAD_AT_GPU: - case SyncedMemory::SYNCED: + } + switch (data_->head()) { + case SyncedMemory::HEAD_AT_CPU: { + data = mutable_cpu_data(); + caffe_scal(count_, scale_factor, data); + return; + } + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - data = mutable_gpu_data(); - caffe_gpu_scal(count_, scale_factor, data); - return; + data = mutable_gpu_data(); + if (device_context_.backend() == Backend::BACKEND_CUDA) { + caffe_gpu_scal(count_, scale_factor, data); + } else { +#ifdef USE_GREENTEA + // TODO + //greentea_gpu_scal(count_, scale_factor, data); +#endif + } + return; #else - NO_GPU; + NO_GPU; #endif - case SyncedMemory::UNINITIALIZED: - return; - default: - LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); + } + case SyncedMemory::UNINITIALIZED: + return; + default: + LOG(FATAL)<< "Unknown SyncedMemory head state: " << data_->head(); + } } -} -template <> void Blob::scale_diff(unsigned int scale_factor) { +template<> void Blob::scale_diff(unsigned int scale_factor) { NOT_IMPLEMENTED; } -template <> void Blob::scale_diff(int scale_factor) { +template<> void Blob::scale_diff(int scale_factor) { NOT_IMPLEMENTED; } -template +template void Blob::scale_diff(Dtype scale_factor) { Dtype* diff; - if (!diff_) { return; } - switch (diff_->head()) { - case SyncedMemory::HEAD_AT_CPU: - diff = mutable_cpu_diff(); - caffe_scal(count_, scale_factor, diff); + if (!diff_) { return; - case SyncedMemory::HEAD_AT_GPU: - case SyncedMemory::SYNCED: + } + switch (diff_->head()) { + case SyncedMemory::HEAD_AT_CPU: { + diff = mutable_cpu_diff(); + caffe_scal(count_, scale_factor, diff); + return; + } + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - diff = mutable_gpu_diff(); - caffe_gpu_scal(count_, scale_factor, diff); - return; + diff = mutable_gpu_diff(); + if (device_context_.backend() == Backend::BACKEND_CUDA) { + caffe_gpu_scal(count_, scale_factor, diff); + } else { +#ifdef USE_GREENTEA + // TODO + //greentea_gpu_scal(count_, scale_factor, diff); +#endif + } + return; + } #else - NO_GPU; + NO_GPU; #endif - case SyncedMemory::UNINITIALIZED: - return; - default: - LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); + case SyncedMemory::UNINITIALIZED: + return; + default: + LOG(FATAL)<< "Unknown SyncedMemory head state: " << diff_->head(); + } } -} -template -bool Blob::ShapeEquals(const BlobProto& other) { - if (other.has_num() || other.has_channels() || - other.has_height() || other.has_width()) { - // Using deprecated 4D Blob dimensions -- - // shape is (num, channels, height, width). - // Note: we do not use the normal Blob::num(), Blob::channels(), etc. - // methods as these index from the beginning of the blob shape, where legacy - // parameter blobs were indexed from the end of the blob shape (e.g., bias - // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)). - return shape_.size() <= 4 && - LegacyShape(-4) == other.num() && - LegacyShape(-3) == other.channels() && - LegacyShape(-2) == other.height() && - LegacyShape(-1) == other.width(); - } - vector other_shape(other.shape().dim_size()); - for (int i = 0; i < other.shape().dim_size(); ++i) { - other_shape[i] = other.shape().dim(i); - } - return shape_ == other_shape; -} +template +void Blob::CopyFrom(const Blob& source, DeviceContext device_context, + bool copy_diff, bool reshape) { + + device_context_ = device_context; -template -void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { - if (source.count() != count_ || source.shape() != shape_) { + if (num_ != source.num() || channels_ != source.channels() + || height_ != source.height() || width_ != source.width()) { if (reshape) { - ReshapeLike(source); + Reshape(source.num(), source.channels(), source.height(), source.width(), + device_context); } else { - LOG(FATAL) << "Trying to copy blobs of different sizes."; + LOG(FATAL)<< "Trying to copy blobs of different sizes."; } } switch (Caffe::mode()) { - case Caffe::GPU: - if (copy_diff) { - caffe_copy(count_, source.gpu_diff(), - static_cast(diff_->mutable_gpu_data())); - } else { - caffe_copy(count_, source.gpu_data(), - static_cast(data_->mutable_gpu_data())); - } - break; - case Caffe::CPU: - if (copy_diff) { - caffe_copy(count_, source.cpu_diff(), - static_cast(diff_->mutable_cpu_data())); - } else { - caffe_copy(count_, source.cpu_data(), - static_cast(data_->mutable_cpu_data())); + case Caffe::GPU: { + if (device_context_.backend() == BACKEND_CUDA) { + if (copy_diff) { + caffe_copy(count_, source.gpu_diff(), + static_cast(diff_->mutable_gpu_data())); + } else { + caffe_copy(count_, source.gpu_data(), + static_cast(data_->mutable_gpu_data())); + } + } else { +#ifdef USE_GREENTEA + if (copy_diff) { + greentea_copy( + count_, (cl_mem) (source.gpu_diff()), + (cl_mem) (diff_->mutable_gpu_data()), + viennacl::ocl::get_context(device_context_.id())); + } else { + greentea_copy( + count_, (cl_mem) (source.gpu_data()), + (cl_mem) (data_->mutable_gpu_data()), + viennacl::ocl::get_context(device_context_.id())); + } +#endif + } + break; } - break; - default: - LOG(FATAL) << "Unknown caffe mode."; - } -} - -template -void Blob::FromProto(const BlobProto& proto, bool reshape) { - if (reshape) { - vector shape; - if (proto.has_num() || proto.has_channels() || - proto.has_height() || proto.has_width()) { - // Using deprecated 4D Blob dimensions -- - // shape is (num, channels, height, width). - shape.resize(4); - shape[0] = proto.num(); - shape[1] = proto.channels(); - shape[2] = proto.height(); - shape[3] = proto.width(); - } else { - shape.resize(proto.shape().dim_size()); - for (int i = 0; i < proto.shape().dim_size(); ++i) { - shape[i] = proto.shape().dim(i); + case Caffe::CPU: { + if (copy_diff) { + caffe_copy(count_, source.cpu_diff(), + static_cast(diff_->mutable_cpu_data())); + } else { + caffe_copy(count_, source.cpu_data(), + static_cast(data_->mutable_cpu_data())); } + break; + } + default: + LOG(FATAL)<< "Unknown caffe mode."; } - Reshape(shape); - } else { - CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)"; } + +template +void Blob::FromProto(const BlobProto& proto, + DeviceContext device_context) { + Reshape(proto.num(), proto.channels(), proto.height(), proto.width(), + device_context); // copy data Dtype* data_vec = mutable_cpu_data(); for (int i = 0; i < count_; ++i) { @@ -466,12 +509,12 @@ void Blob::FromProto(const BlobProto& proto, bool reshape) { } } -template +template void Blob::ToProto(BlobProto* proto, bool write_diff) const { - proto->clear_shape(); - for (int i = 0; i < shape_.size(); ++i) { - proto->mutable_shape()->add_dim(shape_[i]); - } + proto->set_num(num_); + proto->set_channels(channels_); + proto->set_height(height_); + proto->set_width(width_); proto->clear_data(); proto->clear_diff(); const Dtype* data_vec = cpu_data(); @@ -487,8 +530,8 @@ void Blob::ToProto(BlobProto* proto, bool write_diff) const { } INSTANTIATE_CLASS(Blob); -template class Blob; -template class Blob; +template class Blob ; +template class Blob ; } // namespace caffe diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index af96cac40aa..332c4c7cf69 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -1,10 +1,15 @@ #include #include #include +#include #include "caffe/common.hpp" #include "caffe/util/rng.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/cl_kernels.hpp" +#endif + namespace caffe { shared_ptr Caffe::singleton_; @@ -18,8 +23,8 @@ int64_t cluster_seedgen(void) { return seed; } - LOG(INFO) << "System entropy source not available, " - "using fallback algorithm to generate seed instead."; + LOG(INFO)<< "System entropy source not available, " + "using fallback algorithm to generate seed instead."; if (f) fclose(f); @@ -29,7 +34,6 @@ int64_t cluster_seedgen(void) { return seed; } - void GlobalInit(int* pargc, char*** pargv) { // Google flags. ::gflags::ParseCommandLineFlags(pargc, pargv, true); @@ -42,9 +46,9 @@ void GlobalInit(int* pargc, char*** pargv) { #ifdef CPU_ONLY // CPU-only Caffe. Caffe::Caffe() - : random_generator_(), mode_(Caffe::CPU) { } +: random_generator_(), mode_(Caffe::CPU) {} -Caffe::~Caffe() { } +Caffe::~Caffe() {} void Caffe::set_random_seed(const unsigned int seed) { // RNG seed @@ -59,19 +63,18 @@ void Caffe::DeviceQuery() { NO_GPU; } - class Caffe::RNG::Generator { - public: +public: Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {} - caffe::rng_t* rng() { return rng_.get(); } - private: + caffe::rng_t* rng() {return rng_.get();} +private: shared_ptr rng_; }; -Caffe::RNG::RNG() : generator_(new Generator()) { } +Caffe::RNG::RNG() : generator_(new Generator()) {} -Caffe::RNG::RNG(unsigned int seed) : generator_(new Generator(seed)) { } +Caffe::RNG::RNG(unsigned int seed) : generator_(new Generator(seed)) {} Caffe::RNG& Caffe::RNG::operator=(const RNG& other) { generator_ = other.generator_; @@ -85,12 +88,14 @@ void* Caffe::RNG::generator() { #else // Normal GPU + CPU Caffe. Caffe::Caffe() - : cublas_handle_(NULL), curand_generator_(NULL), random_generator_(), - mode_(Caffe::CPU) { + : cublas_handle_(NULL), + curand_generator_(NULL), + random_generator_(), + mode_(Caffe::CPU) { // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { - LOG(ERROR) << "Cannot create Cublas handle. Cublas won't be available."; + LOG(ERROR)<< "Cannot create Cublas handle. Cublas won't be available."; } // Try to create a curand handler. if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT) @@ -102,7 +107,8 @@ Caffe::Caffe() } Caffe::~Caffe() { - if (cublas_handle_) CUBLAS_CHECK(cublasDestroy(cublas_handle_)); + if (cublas_handle_) + CUBLAS_CHECK(cublasDestroy(cublas_handle_)); if (curand_generator_) { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); } @@ -112,88 +118,262 @@ void Caffe::set_random_seed(const unsigned int seed) { // Curand seed static bool g_curand_availability_logged = false; if (Get().curand_generator_) { - CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(curand_generator(), - seed)); + CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(curand_generator(), seed)); CURAND_CHECK(curandSetGeneratorOffset(curand_generator(), 0)); } else { if (!g_curand_availability_logged) { - LOG(ERROR) << - "Curand not available. Skipping setting the curand seed."; - g_curand_availability_logged = true; + LOG(ERROR)<< + "Curand not available. Skipping setting the curand seed."; + g_curand_availability_logged = true; } } // RNG seed Get().random_generator_.reset(new RNG(seed)); } -void Caffe::SetDevice(const int device_id) { - int current_device; - CUDA_CHECK(cudaGetDevice(¤t_device)); - if (current_device == device_id) { - return; +void Caffe::EnumerateDevices() { + int cuda_device_count = 0; + int greentea_device_count = 0; + + cudaGetDeviceCount(&cuda_device_count); + +#ifdef USE_GREENTEA + typedef std::vector platforms_type; + platforms_type platforms = viennacl::ocl::get_platforms(); + + std::vector> platform_devices; + + // Loop through devices + for (std::size_t platform_id = 0; platform_id < platforms.size(); + ++platform_id) { + typedef std::vector devices_type; + devices_type devices = platforms[platform_id].devices(CL_DEVICE_TYPE_ALL); + for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { + platform_devices.push_back( + std::make_tuple(platforms[platform_id], devices[device_id])); + greentea_device_count++; + } } - // The call to cudaSetDevice must come before any calls to Get, which - // may perform initialization using the GPU. - CUDA_CHECK(cudaSetDevice(device_id)); - if (Get().cublas_handle_) CUBLAS_CHECK(cublasDestroy(Get().cublas_handle_)); - if (Get().curand_generator_) { - CURAND_CHECK(curandDestroyGenerator(Get().curand_generator_)); +#endif + + LOG(INFO)<< "Total devices: " << cuda_device_count + greentea_device_count; + LOG(INFO)<< "CUDA devices: " << cuda_device_count; +#ifdef USE_GREENTEA + LOG(INFO)<< "OpenCL devices: " << greentea_device_count; +#endif + + // Display info for all devices + for (int i = 0; i < cuda_device_count; ++i) { + cudaDeviceProp prop; + CUDA_CHECK(cudaGetDeviceProperties(&prop, i)); + LOG(INFO)<< "Device id: " << i; + LOG(INFO)<< "Device backend: " << "CUDA"; + LOG(INFO)<< "Backend details: " << "CUDA"; + LOG(INFO)<< "Device vendor: " << "NVIDIA Corporation"; + LOG(INFO)<< "Name: " << prop.name; + LOG(INFO)<< "Total global memory: " << prop.totalGlobalMem; } - CUBLAS_CHECK(cublasCreate(&Get().cublas_handle_)); - CURAND_CHECK(curandCreateGenerator(&Get().curand_generator_, - CURAND_RNG_PSEUDO_DEFAULT)); - CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, - cluster_seedgen())); + +#ifdef USE_GREENTEA + for (int i = 0; i < greentea_device_count; ++i) { + LOG(INFO)<< "Device id: " << cuda_device_count + i; + LOG(INFO)<< "Device backend: " << "OpenCL"; + LOG(INFO)<< "Backend details: " << std::get<0>(platform_devices[i]).info(); + LOG(INFO)<< "Device vendor: " << std::get<1>(platform_devices[i]).vendor(); + LOG(INFO)<< "Name: " << std::get<1>(platform_devices[i]).name(); + LOG(INFO)<< "Total global memory: " << std::get<1>(platform_devices[i]).global_mem_size(); + } +#endif + } -void Caffe::DeviceQuery() { - cudaDeviceProp prop; - int device; - if (cudaSuccess != cudaGetDevice(&device)) { - printf("No cuda device present.\n"); - return; +void Caffe::SetDevices(std::vector device_ids) { + + Get().device_contexts_.clear(); + +#ifdef USE_GREENTEA + Get().ocl_programs_.clear(); +#endif + + int cuda_device_count = 0; + int greentea_device_count = 0; + + cudaGetDeviceCount(&cuda_device_count); + + for (int i = 0; i < cuda_device_count; ++i) { + Get().device_contexts_.push_back(DeviceContext(i, Backend::BACKEND_CUDA)); +#ifdef USE_GREENTEA + // Dummy to have same vector size as device contexts + viennacl::ocl::program program; + Get().ocl_programs_.push_back(program); +#endif + } + + // Initialize GreenTea devices +#ifdef USE_GREENTEA + typedef std::vector platforms_type; + platforms_type platforms = viennacl::ocl::get_platforms(); + + std::vector> platform_devices; + + // Loop through devices + for (std::size_t platform_id = 0; platform_id < platforms.size(); + ++platform_id) { + typedef std::vector devices_type; + devices_type devices = platforms[platform_id].devices(CL_DEVICE_TYPE_ALL); + for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { + platform_devices.push_back( + std::make_tuple(platforms[platform_id], devices[device_id])); + Get().device_contexts_.push_back( + DeviceContext(cuda_device_count + greentea_device_count, + Backend::BACKEND_OpenCL)); + // Check if this device is really used and initialize + bool is_used = false; + for (int i = 0; i < device_ids.size(); ++i) { + int device_id = device_ids[i]; + if (device_id == cuda_device_count + greentea_device_count) { + // Setup actual context and compile kernels for this device + viennacl::ocl::setup_context( + device_id, std::get<1>(platform_devices[greentea_device_count])); + viennacl::ocl::context ctx = viennacl::ocl::get_context( + static_cast(device_id)); + viennacl::ocl::program & program = RegisterKernels(ctx); + Get().ocl_programs_.push_back(program); + //viennacl::ocl::switch_context(device_id); + //viennacl::ocl::switch_device(std::get<1>(platform_devices[device_id - cuda_device_count])); + is_used = true; + } + } + // Device not used, dummy + if (!is_used) { + viennacl::ocl::program program; + Get().ocl_programs_.push_back(program); + } + greentea_device_count++; + } } - CUDA_CHECK(cudaGetDeviceProperties(&prop, device)); - LOG(INFO) << "Device id: " << device; - LOG(INFO) << "Major revision number: " << prop.major; - LOG(INFO) << "Minor revision number: " << prop.minor; - LOG(INFO) << "Name: " << prop.name; - LOG(INFO) << "Total global memory: " << prop.totalGlobalMem; - LOG(INFO) << "Total shared memory per block: " << prop.sharedMemPerBlock; - LOG(INFO) << "Total registers per block: " << prop.regsPerBlock; - LOG(INFO) << "Warp size: " << prop.warpSize; - LOG(INFO) << "Maximum memory pitch: " << prop.memPitch; - LOG(INFO) << "Maximum threads per block: " << prop.maxThreadsPerBlock; - LOG(INFO) << "Maximum dimension of block: " + +#endif + +} + +DeviceContext& Caffe::GetDeviceContext(int id) { + return id == -1 ? Get().default_device_context_ : Get().device_contexts_[id]; +} + +#ifdef USE_GREENTEA +viennacl::ocl::program & Caffe::GetDeviceProgram(int id) { + return id == -1 ? Get().default_ocl_program_ : Get().ocl_programs_[id]; +} +#endif + +DeviceContext& Caffe::GetDefaultDeviceContext() { + return Get().default_device_context_; +} + +void Caffe::SetDevice(const int device_id) { + + std::vector devices; + devices.push_back(device_id); + Caffe::SetDevices(devices); + + Get().default_device_context_ = GetDeviceContext(device_id); + + if (Get().default_device_context_.backend() == Backend::BACKEND_CUDA) { + int current_device; + CUDA_CHECK(cudaGetDevice(¤t_device)); + if (current_device == device_id) { + return; + } +// The call to cudaSetDevice must come before any calls to Get, which +// may perform initialization using the GPU. + CUDA_CHECK(cudaSetDevice(device_id)); + if (Get().cublas_handle_) + CUBLAS_CHECK(cublasDestroy(Get().cublas_handle_)); + if (Get().curand_generator_) { + CURAND_CHECK(curandDestroyGenerator(Get().curand_generator_)); + } + CUBLAS_CHECK(cublasCreate(&Get().cublas_handle_)); + CURAND_CHECK( + curandCreateGenerator(&Get().curand_generator_, + CURAND_RNG_PSEUDO_DEFAULT)); + CURAND_CHECK( + curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, + cluster_seedgen())); + } else { +#ifdef USE_GREENTEA + // TODO: ??? +#endif + } +} + +// TODO: (FTschopp) fix this for the new backend +void Caffe::DeviceQuery() { + if (Get().default_device_context_.backend() == BACKEND_CUDA) { + cudaDeviceProp prop; + int device; + if (cudaSuccess != cudaGetDevice(&device)) { + printf("No cuda device present.\n"); + } else { + CUDA_CHECK(cudaGetDeviceProperties(&prop, device)); + LOG(INFO)<< "Device id: " << device; + LOG(INFO)<< "Major revision number: " << prop.major; + LOG(INFO)<< "Minor revision number: " << prop.minor; + LOG(INFO)<< "Name: " << prop.name; + LOG(INFO)<< "Total global memory: " << prop.totalGlobalMem; + LOG(INFO)<< "Total shared memory per block: " << prop.sharedMemPerBlock; + LOG(INFO)<< "Total registers per block: " << prop.regsPerBlock; + LOG(INFO)<< "Warp size: " << prop.warpSize; + LOG(INFO)<< "Maximum memory pitch: " << prop.memPitch; + LOG(INFO)<< "Maximum threads per block: " << prop.maxThreadsPerBlock; + LOG(INFO)<< "Maximum dimension of block: " << prop.maxThreadsDim[0] << ", " << prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2]; - LOG(INFO) << "Maximum dimension of grid: " + LOG(INFO)<< "Maximum dimension of grid: " << prop.maxGridSize[0] << ", " << prop.maxGridSize[1] << ", " << prop.maxGridSize[2]; - LOG(INFO) << "Clock rate: " << prop.clockRate; - LOG(INFO) << "Total constant memory: " << prop.totalConstMem; - LOG(INFO) << "Texture alignment: " << prop.textureAlignment; - LOG(INFO) << "Concurrent copy and execution: " + LOG(INFO)<< "Clock rate: " << prop.clockRate; + LOG(INFO)<< "Total constant memory: " << prop.totalConstMem; + LOG(INFO)<< "Texture alignment: " << prop.textureAlignment; + LOG(INFO)<< "Concurrent copy and execution: " << (prop.deviceOverlap ? "Yes" : "No"); - LOG(INFO) << "Number of multiprocessors: " << prop.multiProcessorCount; - LOG(INFO) << "Kernel execution timeout: " + LOG(INFO)<< "Number of multiprocessors: " << prop.multiProcessorCount; + LOG(INFO)<< "Kernel execution timeout: " << (prop.kernelExecTimeoutEnabled ? "Yes" : "No"); + } + } + else + { +#ifdef USE_GREENTEA + // TODO +#endif + } + return; } - class Caffe::RNG::Generator { public: - Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} - explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {} - caffe::rng_t* rng() { return rng_.get(); } + Generator() + : rng_(new caffe::rng_t(cluster_seedgen())) { + } + explicit Generator(unsigned int seed) + : rng_(new caffe::rng_t(seed)) { + } + caffe::rng_t* rng() { + return rng_.get(); + } private: shared_ptr rng_; }; -Caffe::RNG::RNG() : generator_(new Generator()) { } +Caffe::RNG::RNG() + : generator_(new Generator()) { +} -Caffe::RNG::RNG(unsigned int seed) : generator_(new Generator(seed)) { } +Caffe::RNG::RNG(unsigned int seed) + : generator_(new Generator(seed)) { +} Caffe::RNG& Caffe::RNG::operator=(const RNG& other) { generator_.reset(other.generator_.get()); @@ -206,29 +386,29 @@ void* Caffe::RNG::generator() { const char* cublasGetErrorString(cublasStatus_t error) { switch (error) { - case CUBLAS_STATUS_SUCCESS: - return "CUBLAS_STATUS_SUCCESS"; - case CUBLAS_STATUS_NOT_INITIALIZED: - return "CUBLAS_STATUS_NOT_INITIALIZED"; - case CUBLAS_STATUS_ALLOC_FAILED: - return "CUBLAS_STATUS_ALLOC_FAILED"; - case CUBLAS_STATUS_INVALID_VALUE: - return "CUBLAS_STATUS_INVALID_VALUE"; - case CUBLAS_STATUS_ARCH_MISMATCH: - return "CUBLAS_STATUS_ARCH_MISMATCH"; - case CUBLAS_STATUS_MAPPING_ERROR: - return "CUBLAS_STATUS_MAPPING_ERROR"; - case CUBLAS_STATUS_EXECUTION_FAILED: - return "CUBLAS_STATUS_EXECUTION_FAILED"; - case CUBLAS_STATUS_INTERNAL_ERROR: - return "CUBLAS_STATUS_INTERNAL_ERROR"; + case CUBLAS_STATUS_SUCCESS: + return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: + return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: + return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: + return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: + return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: + return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: + return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: + return "CUBLAS_STATUS_INTERNAL_ERROR"; #if CUDA_VERSION >= 6000 - case CUBLAS_STATUS_NOT_SUPPORTED: - return "CUBLAS_STATUS_NOT_SUPPORTED"; + case CUBLAS_STATUS_NOT_SUPPORTED: + return "CUBLAS_STATUS_NOT_SUPPORTED"; #endif #if CUDA_VERSION >= 6050 - case CUBLAS_STATUS_LICENSE_ERROR: - return "CUBLAS_STATUS_LICENSE_ERROR"; + case CUBLAS_STATUS_LICENSE_ERROR: + return "CUBLAS_STATUS_LICENSE_ERROR"; #endif } return "Unknown cublas status"; @@ -236,32 +416,32 @@ const char* cublasGetErrorString(cublasStatus_t error) { const char* curandGetErrorString(curandStatus_t error) { switch (error) { - case CURAND_STATUS_SUCCESS: - return "CURAND_STATUS_SUCCESS"; - case CURAND_STATUS_VERSION_MISMATCH: - return "CURAND_STATUS_VERSION_MISMATCH"; - case CURAND_STATUS_NOT_INITIALIZED: - return "CURAND_STATUS_NOT_INITIALIZED"; - case CURAND_STATUS_ALLOCATION_FAILED: - return "CURAND_STATUS_ALLOCATION_FAILED"; - case CURAND_STATUS_TYPE_ERROR: - return "CURAND_STATUS_TYPE_ERROR"; - case CURAND_STATUS_OUT_OF_RANGE: - return "CURAND_STATUS_OUT_OF_RANGE"; - case CURAND_STATUS_LENGTH_NOT_MULTIPLE: - return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; - case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: - return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; - case CURAND_STATUS_LAUNCH_FAILURE: - return "CURAND_STATUS_LAUNCH_FAILURE"; - case CURAND_STATUS_PREEXISTING_FAILURE: - return "CURAND_STATUS_PREEXISTING_FAILURE"; - case CURAND_STATUS_INITIALIZATION_FAILED: - return "CURAND_STATUS_INITIALIZATION_FAILED"; - case CURAND_STATUS_ARCH_MISMATCH: - return "CURAND_STATUS_ARCH_MISMATCH"; - case CURAND_STATUS_INTERNAL_ERROR: - return "CURAND_STATUS_INTERNAL_ERROR"; + case CURAND_STATUS_SUCCESS: + return "CURAND_STATUS_SUCCESS"; + case CURAND_STATUS_VERSION_MISMATCH: + return "CURAND_STATUS_VERSION_MISMATCH"; + case CURAND_STATUS_NOT_INITIALIZED: + return "CURAND_STATUS_NOT_INITIALIZED"; + case CURAND_STATUS_ALLOCATION_FAILED: + return "CURAND_STATUS_ALLOCATION_FAILED"; + case CURAND_STATUS_TYPE_ERROR: + return "CURAND_STATUS_TYPE_ERROR"; + case CURAND_STATUS_OUT_OF_RANGE: + return "CURAND_STATUS_OUT_OF_RANGE"; + case CURAND_STATUS_LENGTH_NOT_MULTIPLE: + return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; + case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: + return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; + case CURAND_STATUS_LAUNCH_FAILURE: + return "CURAND_STATUS_LAUNCH_FAILURE"; + case CURAND_STATUS_PREEXISTING_FAILURE: + return "CURAND_STATUS_PREEXISTING_FAILURE"; + case CURAND_STATUS_INITIALIZATION_FAILED: + return "CURAND_STATUS_INITIALIZATION_FAILED"; + case CURAND_STATUS_ARCH_MISMATCH: + return "CURAND_STATUS_ARCH_MISMATCH"; + case CURAND_STATUS_INTERNAL_ERROR: + return "CURAND_STATUS_INTERNAL_ERROR"; } return "Unknown curand status"; } diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index b0b98e478c1..c42eb012705 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -12,22 +12,23 @@ namespace caffe { template DataTransformer::DataTransformer(const TransformationParameter& param, - Phase phase) - : param_(param), phase_(phase) { + Phase phase, DeviceContext device_context) + : param_(param), + phase_(phase), device_context_(device_context) { // check if we want to use mean_file if (param_.has_mean_file()) { - CHECK_EQ(param_.mean_value_size(), 0) << - "Cannot specify mean_file and mean_value at the same time"; + CHECK_EQ(param_.mean_value_size(), 0)<< + "Cannot specify mean_file and mean_value at the same time"; const string& mean_file = param.mean_file(); LOG(INFO) << "Loading mean file from: " << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); - data_mean_.FromProto(blob_proto); + data_mean_.FromProto(blob_proto, device_context_); } // check if we want to use mean_value if (param_.mean_value_size() > 0) { CHECK(param_.has_mean_file() == false) << - "Cannot specify mean_file and mean_value at the same time"; + "Cannot specify mean_file and mean_value at the same time"; for (int c = 0; c < param_.mean_value_size(); ++c) { mean_values_.push_back(param_.mean_value(c)); } @@ -61,8 +62,9 @@ void DataTransformer::Transform(const Datum& datum, mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { - CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << - "Specify either 1 mean_value or as many as channels: " << datum_channels; + CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) + << "Specify either 1 mean_value or as many as channels: " + << datum_channels; if (datum_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < datum_channels; ++c) { @@ -102,17 +104,17 @@ void DataTransformer::Transform(const Datum& datum, } if (has_uint8) { datum_element = - static_cast(static_cast(data[data_index])); + static_cast(static_cast(data[data_index])); } else { datum_element = datum.float_data(data_index); } if (has_mean_file) { - transformed_data[top_index] = - (datum_element - mean[data_index]) * scale; + transformed_data[top_index] = (datum_element - mean[data_index]) + * scale; } else { if (has_mean_values) { - transformed_data[top_index] = - (datum_element - mean_values_[c]) * scale; + transformed_data[top_index] = (datum_element - mean_values_[c]) + * scale; } else { transformed_data[top_index] = datum_element * scale; } @@ -162,10 +164,10 @@ void DataTransformer::Transform(const vector & datum_vector, const int height = transformed_blob->height(); const int width = transformed_blob->width(); - CHECK_GT(datum_num, 0) << "There is no datum to add"; - CHECK_LE(datum_num, num) << - "The size of datum_vector must be no greater than transformed_blob->num()"; - Blob uni_blob(1, channels, height, width); + CHECK_GT(datum_num, 0)<< "There is no datum to add"; + CHECK_LE(datum_num, num)<< + "The size of datum_vector must be no greater than transformed_blob->num()"; + Blob uni_blob(1, channels, height, width, device_context_); for (int item_id = 0; item_id < datum_num; ++item_id) { int offset = transformed_blob->offset(item_id); uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); @@ -182,10 +184,10 @@ void DataTransformer::Transform(const vector & mat_vector, const int height = transformed_blob->height(); const int width = transformed_blob->width(); - CHECK_GT(mat_num, 0) << "There is no MAT to add"; - CHECK_EQ(mat_num, num) << - "The size of mat_vector must be equals to transformed_blob->num()"; - Blob uni_blob(1, channels, height, width); + CHECK_GT(mat_num, 0)<< "There is no MAT to add"; + CHECK_EQ(mat_num, num)<< + "The size of mat_vector must be equals to transformed_blob->num()"; + Blob uni_blob(1, channels, height, width, device_context_); for (int item_id = 0; item_id < mat_num; ++item_id) { int offset = transformed_blob->offset(item_id); uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); @@ -210,7 +212,8 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK_LE(width, img_width); CHECK_GE(num, 1); - CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + // (FTschopp) Fixed for float data + CHECK(cv_img.depth() == CV_8U || cv_img.depth() == CV_32F) << "Image data type must be unsigned byte or 4 byte float"; const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); @@ -230,8 +233,9 @@ void DataTransformer::Transform(const cv::Mat& cv_img, mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { - CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << - "Specify either 1 mean_value or as many as channels: " << img_channels; + CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) + << "Specify either 1 mean_value or as many as channels: " + << img_channels; if (img_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < img_channels; ++c) { @@ -276,15 +280,19 @@ void DataTransformer::Transform(const cv::Mat& cv_img, top_index = (c * height + h) * width + w; } // int top_index = (c * height + h) * width + w; - Dtype pixel = static_cast(ptr[img_index++]); + Dtype pixel; + if(cv_img.depth() == CV_8U) { + pixel = static_cast(ptr[img_index++]); + } + else { + pixel = static_cast(((float*)ptr)[img_index++]); + } if (has_mean_file) { int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; - transformed_data[top_index] = - (pixel - mean[mean_index]) * scale; + transformed_data[top_index] = (pixel - mean[mean_index]) * scale; } else { if (has_mean_values) { - transformed_data[top_index] = - (pixel - mean_values_[c]) * scale; + transformed_data[top_index] = (pixel - mean_values_[c]) * scale; } else { transformed_data[top_index] = pixel * scale; } @@ -344,14 +352,15 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_EQ(input_width, data_mean_.width()); for (int n = 0; n < input_num; ++n) { int offset = input_blob->offset(n); - caffe_sub(data_mean_.count(), input_data + offset, - data_mean_.cpu_data(), input_data + offset); + caffe_sub(data_mean_.count(), input_data + offset, data_mean_.cpu_data(), + input_data + offset); } } if (has_mean_values) { - CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) << - "Specify either 1 mean_value or as many as channels: " << input_channels; + CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) + << "Specify either 1 mean_value or as many as channels: " + << input_channels; if (mean_values_.size() == 1) { caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); } else { @@ -359,7 +368,7 @@ void DataTransformer::Transform(Blob* input_blob, for (int c = 0; c < input_channels; ++c) { int offset = input_blob->offset(n, c); caffe_add_scalar(input_height * input_width, -(mean_values_[c]), - input_data + offset); + input_data + offset); } } } @@ -379,7 +388,7 @@ void DataTransformer::Transform(Blob* input_blob, if (do_mirror) { int top_index_w = top_index_h + width - 1; for (int w = 0; w < width; ++w) { - transformed_data[top_index_w-w] = input_data[data_index_h + w]; + transformed_data[top_index_w - w] = input_data[data_index_h + w]; } } else { for (int w = 0; w < width; ++w) { @@ -390,15 +399,15 @@ void DataTransformer::Transform(Blob* input_blob, } } if (scale != Dtype(1)) { - DLOG(INFO) << "Scale: " << scale; + DLOG(INFO)<< "Scale: " << scale; caffe_scal(size, scale, transformed_data); } } -template +template void DataTransformer::InitRand() { - const bool needs_rand = param_.mirror() || - (phase_ == TRAIN && param_.crop_size()); + const bool needs_rand = param_.mirror() + || (phase_ == TRAIN && param_.crop_size()); if (needs_rand) { const unsigned int rng_seed = caffe_rng_rand(); rng_.reset(new Caffe::RNG(rng_seed)); @@ -407,12 +416,11 @@ void DataTransformer::InitRand() { } } -template +template int DataTransformer::Rand(int n) { CHECK(rng_); CHECK_GT(n, 0); - caffe::rng_t* rng = - static_cast(rng_->generator()); + caffe::rng_t* rng = static_cast(rng_->generator()); return ((*rng)() % n); } diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp new file mode 100644 index 00000000000..03c9facd444 --- /dev/null +++ b/src/caffe/greentea/cl_kernels.cpp @@ -0,0 +1,25 @@ +// AUTOMATICALLY GENERATED FILE, DO NOT EDIT +#include "caffe/greentea/cl_kernels.hpp" +#include +#include +namespace caffe { +std::string activation_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void relu_forward_s(const int n, __global const float* in,\n __global float* out, float negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void relu_forward_d(const int n, __global const double* in,\n __global double* out, double negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; +std::string aux_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void gpu_set_s(const int n, const float alpha, __global float* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}\n\n__kernel void gpu_set_d(const int n, const double alpha, __global double* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string channel_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_channel_max_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_max_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double maxval = (double) -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_subtract_s(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const float* channel_max,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_subtract_d(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const double* channel_max,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_exp_s(const int count, __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_exp_d(const int count, __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_channel_sum_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_sum_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_div_s(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const float* channel_sum,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_div_d(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const double* channel_sum,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_dot_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data_1,\n __global const float* data_2,\n __global float* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}\n\n__kernel void kernel_channel_dot_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data_1,\n __global const double* data_2,\n __global double* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; +std::string im2col_sk_gpu_kernel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string pooling_sk_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global const float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n __global int* mask,\n __global float* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string softmax_loss_gpu = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MIN 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data,\n __global const float* label,\n __global float* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global float* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((float) (prob_data[n * dim + label_value * spatial_dim + s]),\n (float) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}\n\n__kernel void softmax_loss_forward_gpu_d(int n,\n __global const double* prob_data,\n __global const double* label,\n __global double* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global double* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((double) (prob_data[n * dim + label_value * spatial_dim + s]),\n (double) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}"; +viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { + std::stringstream ss; + ss << activation_kernels << "\n\n"; + ss << aux_kernels << "\n\n"; + ss << channel_kernels << "\n\n"; + ss << im2col_sk_gpu_kernel << "\n\n"; + ss << pooling_sk_kernels << "\n\n"; + ss << softmax_loss_gpu << "\n\n"; + std::string kernel_string = ss.str(); + const char* kernel_program = kernel_string.c_str(); + viennacl::ocl::program &program = ctx.add_program(kernel_program,"kernel_program"); + return program; +} +} diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh new file mode 100644 index 00000000000..8829ac408db --- /dev/null +++ b/src/caffe/greentea/cl_kernels.sh @@ -0,0 +1,57 @@ +#! /bin/bash +# This script converts all OpenCL Kernels to C++ char strings and defines the helper function to +# load the kernels to ViennaCL/OpenCL contexts. +# Outputs (overwrites): cl_kernels.hpp and cl_kernels.cpp + +CL_KERNELDIR="src/caffe/greentea/cl_kernels/*.cl" +HEADER='include/caffe/greentea/cl_kernels.hpp' +INCHEADER='caffe/greentea/cl_kernels.hpp' +SOURCE='src/caffe/greentea/cl_kernels.cpp' + +echo "// AUTOMATICALLY GENERATED FILE, DO NOT EDIT" > $HEADER +echo "// AUTOMATICALLY GENERATED FILE, DO NOT EDIT" > $SOURCE + +echo "#ifndef GREENTEA_CL_KERNELS_HPP_" >> $HEADER +echo "#define GREENTEA_CL_KERNELS_HPP_" >> $HEADER +echo "#include \"caffe/greentea/greentea.hpp\"" >> $HEADER +echo "#include \"viennacl/backend/opencl.hpp\"" >> $HEADER +echo "#include \"viennacl/ocl/context.hpp\"" >> $HEADER +echo "#include \"viennacl/ocl/device.hpp\"" >> $HEADER +echo "#include \"viennacl/ocl/platform.hpp\"" >> $HEADER +echo "#include \"viennacl/ocl/backend.hpp\"" >> $HEADER +echo "namespace caffe {" >> $HEADER +echo "#include \"$INCHEADER\"" >> $SOURCE +echo "#include " >> $SOURCE +echo "#include " >> $SOURCE +echo "namespace caffe {" >> $SOURCE + +echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx);" >> $HEADER +echo "}" >> $HEADER +echo "#endif" >> $HEADER + +shopt -s nullglob +for CL_KERNEL in $CL_KERNELDIR +do + CL_KERNEL_STR=`cat $CL_KERNEL` + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo "std::string $CL_KERNEL_NAME = \"$CL_KERNEL_STR\";" | sed -e ':a;N;$!ba;s/\n/\\n/g' >> $SOURCE +done + +echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) {" >> $SOURCE +echo " std::stringstream ss;" >> $SOURCE +shopt -s nullglob +for CL_KERNEL in $CL_KERNELDIR +do + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo " ss << $CL_KERNEL_NAME << \"\\n\\n\";" >> $SOURCE +done +echo " std::string kernel_string = ss.str();" >> $SOURCE +echo " const char* kernel_program = kernel_string.c_str();" >> $SOURCE +echo " viennacl::ocl::program &program = ctx.add_program(kernel_program,\"kernel_program\");" >> $SOURCE +echo " return program;" >> $SOURCE +echo "}" >> $SOURCE +echo "}" >> $SOURCE diff --git a/src/caffe/greentea/cl_kernels/activation_kernels.cl b/src/caffe/greentea/cl_kernels/activation_kernels.cl new file mode 100644 index 00000000000..db44980e154 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/activation_kernels.cl @@ -0,0 +1,24 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define FLT_MAX 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void relu_forward_s(const int n, __global const float* in, + __global float* out, float negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +__kernel void relu_forward_d(const int n, __global const double* in, + __global double* out, double negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} diff --git a/src/caffe/greentea/cl_kernels/aux_kernels.cl b/src/caffe/greentea/cl_kernels/aux_kernels.cl new file mode 100644 index 00000000000..68f4dfd9e3f --- /dev/null +++ b/src/caffe/greentea/cl_kernels/aux_kernels.cl @@ -0,0 +1,22 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define FLT_MAX 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void gpu_set_s(const int n, const float alpha, __global float* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + +__kernel void gpu_set_d(const int n, const double alpha, __global double* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} diff --git a/src/caffe/greentea/cl_kernels/channel_kernels.cl b/src/caffe/greentea/cl_kernels/channel_kernels.cl new file mode 100644 index 00000000000..028b488bdc2 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/channel_kernels.cl @@ -0,0 +1,176 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define FLT_MAX 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void kernel_channel_max_s(const int num, const int channels, + const int spatial_dim, + __global const float* data, + __global float* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); + } + out[index] = maxval; + } +} + +__kernel void kernel_channel_max_d(const int num, const int channels, + const int spatial_dim, + __global const double* data, + __global double* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + double maxval = (double) -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); + } + out[index] = maxval; + } +} + +__kernel void kernel_channel_subtract_s(const int count, const int num, + const int channels, + const int spatial_dim, + __global const float* channel_max, + __global float* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void kernel_channel_subtract_d(const int count, const int num, + const int channels, + const int spatial_dim, + __global const double* channel_max, + __global double* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void kernel_exp_s(const int count, __global const float* data, + __global float* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void kernel_exp_d(const int count, __global const double* data, + __global double* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void kernel_channel_sum_s(const int num, const int channels, + const int spatial_dim, + __global const float* data, + __global float* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void kernel_channel_sum_d(const int num, const int channels, + const int spatial_dim, + __global const double* data, + __global double* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + double sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void kernel_channel_div_s(const int count, const int num, + const int channels, const int spatial_dim, + __global const float* channel_sum, + __global float* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void kernel_channel_div_d(const int count, const int num, + const int channels, const int spatial_dim, + __global const double* channel_sum, + __global double* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void kernel_channel_dot_s(const int num, const int channels, + const int spatial_dim, + __global const float* data_1, + __global const float* data_2, + __global float* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +__kernel void kernel_channel_dot_d(const int num, const int channels, + const int spatial_dim, + __global const double* data_1, + __global const double* data_2, + __global double* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + double dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} diff --git a/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl b/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl new file mode 100644 index 00000000000..df18bfa8c22 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl @@ -0,0 +1,84 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im, + const int height, const int width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, const int height_col, + const int width_col, + __global float* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global float* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const float* data_im_ptr = data_im; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} + +__kernel void im2col_sk_gpu_kernel_d(const int n, + __global const double* data_im, + const int height, const int width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, const int height_col, + const int width_col, + __global double* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global double* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const double* data_im_ptr = data_im; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} diff --git a/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl b/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl new file mode 100644 index 00000000000..46937131d34 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl @@ -0,0 +1,103 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define FLT_MAX 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void max_pool_forward_sk_s(const int nthreads, + __global const float* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, + const int ext_kernel_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, + const int pad_w, __global float* top_data, + __global int* mask, + __global float* top_mask) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + float maxval = -FLT_MAX; + int maxidx = -1; + bottom_data += (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data[maxidx]; + } + } + } + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void max_pool_forward_sk_d(const int nthreads, + __global const double* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, + const int ext_kernel_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, + const int pad_w, __global double* top_data, + __global int* mask, + __global double* top_mask) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + double maxval = -FLT_MAX; + int maxidx = -1; + bottom_data += (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data[maxidx]; + } + } + } + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} diff --git a/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl b/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl new file mode 100644 index 00000000000..aee2c312dc4 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl @@ -0,0 +1,61 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define FLT_MIN 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data, + __global const float* label, + __global float* loss, const int num, + const int dim, const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global float* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((float) (prob_data[n * dim + label_value * spatial_dim + s]), + (float) FLT_MIN)); + counts[index] = 1; + } + } + +} + +__kernel void softmax_loss_forward_gpu_d(int n, + __global const double* prob_data, + __global const double* label, + __global double* loss, const int num, + const int dim, const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global double* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((double) (prob_data[n * dim + label_value * spatial_dim + s]), + (double) FLT_MIN)); + counts[index] = 1; + } + } + +} diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp new file mode 100644 index 00000000000..d824b37f0d3 --- /dev/null +++ b/src/caffe/greentea/greentea.cpp @@ -0,0 +1,72 @@ +/* + * greentea.cpp + * + * Created on: Apr 6, 2015 + * Author: Fabian Tschopp + */ + +#include "caffe/greentea/greentea.hpp" + +namespace caffe { + +#ifdef USE_GREENTEA +template +cl_mem Subregion(cl_mem in, size_t off, size_t size) { + cl_buffer_region* region = new cl_buffer_region(); + region->origin = sizeof(Dtype) * off; + region->size = sizeof(Dtype) * size; + cl_int status; + const cl_mem out = clCreateSubBuffer(in, CL_MEM_READ_WRITE, + CL_BUFFER_CREATE_TYPE_REGION, + region, &status); + std::cout << "Subregion: " << status << std::endl; + return out; +} + +template cl_mem Subregion(cl_mem in, size_t off, size_t size); +template cl_mem Subregion(cl_mem in, size_t off, size_t size); + +template +viennacl::vector WrapVector(cl_mem in) { + if (in == NULL) { + size_t size; + clGetMemObjectInfo(in, CL_MEM_SIZE, sizeof(size_t), &size, NULL); + viennacl::vector out(in, viennacl::OPENCL_MEMORY, + size / sizeof(Dtype)); + return out; + } else { + std::cout << "HERE!" << std::endl; + void* ptr = NULL; + viennacl::vector out((cl_mem)&ptr, viennacl::OPENCL_MEMORY, 0); + return out; + } +} + +template viennacl::vector WrapVector(cl_mem in); +template viennacl::vector WrapVector(cl_mem in); +template viennacl::vector WrapVector(cl_mem in); +template viennacl::vector WrapVector(cl_mem in); + +#endif + +DeviceContext::DeviceContext() + : id_(0), + backend_(Backend::BACKEND_CUDA) { + +} + +DeviceContext::DeviceContext(int id, Backend backend) + : id_(id), + backend_(backend) { + +} + +Backend DeviceContext::backend() const { + return backend_; +} + +int DeviceContext::id() const { + return id_; +} + +} diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp new file mode 100644 index 00000000000..23be6964ed5 --- /dev/null +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -0,0 +1,67 @@ +/* + * greentea_im2col.cpp + * + * Created on: Apr 8, 2015 + * Author: Fabian Tschopp + */ + +#include "caffe/greentea/greentea_im2col.hpp" + +namespace caffe { + +template +void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, const cl_mem data_im, + const int channels, const int height, + const int width, const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, cl_mem data_col) { + + int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; + int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; + int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; + int num_kernels = channels * height_col * width_col; + + viennacl::ocl::kernel &kernel = prog.get_kernel( + CL_KERNEL_SELECT("im2col_sk_gpu_kernel")); + + viennacl::ocl::enqueue( + kernel(num_kernels, WrapVector(data_im), height, width, kernel_h, + kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, + stride_w, kstride_h, kstride_w, height_col, width_col, + WrapVector(data_col)), + ctx.get_queue()); +} + +// Explicit instantiation +template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_im, + const int channels, + const int height, const int width, + const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + cl_mem data_col); + +template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_im, + const int channels, + const int height, const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + cl_mem data_col); + +} diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp new file mode 100644 index 00000000000..e0ec822f792 --- /dev/null +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -0,0 +1,584 @@ +/* + * greentea_math_functions.cpp + * + * Created on: Apr 6, 2015 + * Author: Fabian Tschopp + */ + +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" + +#include +#include + +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/greentea/greentea_math_functions.hpp" +#include "viennacl/backend/opencl.hpp" +#include "viennacl/ocl/context.hpp" +#include "viennacl/ocl/device.hpp" +#include "viennacl/ocl/platform.hpp" +#include "viennacl/ocl/backend.hpp" + +namespace caffe { + +// Copy from OpenCL buffer to main memory +void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, + viennacl::ocl::context &ctx) { + if (Y != NULL) { + cl_int err = clEnqueueReadBuffer(ctx.get_queue().handle().get(), X, CL_TRUE, + 0, N, Y, 0, NULL, NULL); + } + ctx.get_queue().finish(); +} + +// Copy from main memory to OpenCL buffer +void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, + viennacl::ocl::context &ctx) { + if (X != NULL) { + cl_int err = clEnqueueWriteBuffer(ctx.get_queue().handle().get(), Y, + CL_TRUE, 0, N, X, 0, NULL, NULL); + } + ctx.get_queue().finish(); +} + +// Copy from OpenCL buffer to OpenCL buffer +template +void greentea_copy(const int N, const cl_mem X, cl_mem Y, + viennacl::ocl::context &ctx) { + if (X != Y) { + cl_int err = clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, 0, 0, + sizeof(Dtype) * N, 0, NULL, NULL); + } + ctx.get_queue().finish(); +} + +// Explicit instantiations +template void greentea_copy(const int N, const cl_mem X, cl_mem Y, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, cl_mem Y, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, cl_mem Y, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, cl_mem Y, + viennacl::ocl::context &ctx); + +template<> +void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const float alpha, + const cl_mem A, const cl_mem B, const float beta, + cl_mem C) { + + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + + ViennaCLTranspose vclTransA = + (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + ViennaCLTranspose vclTransB = + (TransB == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + + ViennaCLOrder vclOrderA = ViennaCLRowMajor; + ViennaCLOrder vclOrderB = ViennaCLRowMajor; + ViennaCLOrder vclOrderC = ViennaCLRowMajor; + + int offArow = 0; + int offAcol = 0; + int incArow = 1; + int incAcol = 1; + int offBrow = 0; + int offBcol = 0; + int incBrow = 1; + int incBcol = 1; + int offCrow = 0; + int offCcol = 0; + int incCrow = 1; + int incCcol = 1; + + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + int ldc = N; + + /*GREENTEA_BLAS_CHECK( + ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, + vclOrderC, M, N, K, alpha, A, offArow, offAcol, + incArow, incAcol, lda, B, offBrow, offBcol, incBrow, + incBcol, ldb, beta, C, offCrow, offCcol, incCrow, + incCcol, ldc));*/ + +} + +template<> +void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const double alpha, + const cl_mem A, const cl_mem B, + const double beta, cl_mem C) { + + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + + ViennaCLTranspose vclTransA = + (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + ViennaCLTranspose vclTransB = + (TransB == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + + ViennaCLOrder vclOrderA = ViennaCLRowMajor; + ViennaCLOrder vclOrderB = ViennaCLRowMajor; + ViennaCLOrder vclOrderC = ViennaCLRowMajor; + + int offArow = 0; + int offAcol = 0; + int incArow = 1; + int incAcol = 1; + int offBrow = 0; + int offBcol = 0; + int incBrow = 1; + int incBcol = 1; + int offCrow = 0; + int offCcol = 0; + int incCrow = 1; + int incCcol = 1; + + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + int ldc = 0; + + GREENTEA_BLAS_CHECK( + ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, + vclOrderC, M, N, K, alpha, A, offArow, offAcol, + incArow, incAcol, lda, B, offBrow, offBcol, incBrow, + incBcol, ldb, beta, C, offCrow, offCcol, incCrow, + incCcol, ldc)); + +} + +/* template<> + void greentea_gpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const double alpha, + const double* A, const double* B, + const double beta, double* C) { + // Note that cublas follows fortran order. + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + cublasOperation_t cuTransB = + (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + CUBLAS_CHECK( + cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, + B, ldb, A, lda, &beta, C, N)); + } + + template<> + void greentea_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const float alpha, const float* A, + const float* x, const float beta, float* y) { + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; + CUBLAS_CHECK( + cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, + &beta, y, 1)); + } + + template<> + void greentea_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const double alpha, const double* A, + const double* x, const double beta, double* y) { + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; + CUBLAS_CHECK( + cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, + &beta, y, 1)); + } + + template<> + void greentea_gpu_axpy(const int N, const float alpha, const float* X, + float* Y) { + CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); + } + + template<> + void greentea_gpu_axpy(const int N, const double alpha, const double* X, + double* Y) { + CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); + } + + void greentea_gpu_memcpy(const size_t N, const void* X, void* Y) { + if (X != Y) { + CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) + } + } + + template<> + void greentea_gpu_scal(const int N, const float alpha, float *X) { + CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); + } + + template<> + void greentea_gpu_scal(const int N, const double alpha, double *X) { + CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); + } + + template<> + void greentea_gpu_axpby(const int N, const float alpha, const float* X, + const float beta, float* Y) { + greentea_gpu_scal(N, beta, Y); + greentea_gpu_axpy(N, alpha, X, Y); + } + + template<> + void greentea_gpu_axpby(const int N, const double alpha, + const double* X, const double beta, double* Y) { + greentea_gpu_scal(N, beta, Y); + greentea_gpu_axpy(N, alpha, X, Y); + } + + template<> + void greentea_gpu_dot(const int n, const float* x, const float* y, + float* out) { + CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); + } + + template<> + void greentea_gpu_dot(const int n, const double* x, const double* y, + double * out) { + CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); + } + + template<> + void greentea_gpu_asum(const int n, const float* x, float* y) { + CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); + } + + template<> + void greentea_gpu_asum(const int n, const double* x, double* y) { + CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); + } + + template<> + void greentea_gpu_scale(const int n, const float alpha, const float *x, + float* y) { + CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); + CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); + } + + template<> + void greentea_gpu_scale(const int n, const double alpha, + const double *x, double* y) { + CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); + CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); + } + + template + __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = alpha; + } + } + + template + void greentea_gpu_set(const int N, const Dtype alpha, Dtype* Y) { + if (alpha == 0) { + CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) + return; + } + // NOLINT_NEXT_LINE(whitespace/operators) + set_kernel<<>>( + N, alpha, Y); + } + + template void greentea_gpu_set(const int N, const int alpha, int* Y); + template void greentea_gpu_set(const int N, const float alpha, + float* Y); + template void greentea_gpu_set(const int N, const double alpha, + double* Y); + + template + __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] += alpha; + } + } + + template<> + void greentea_gpu_add_scalar(const int N, const float alpha, float* Y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_scalar_kernel<<>>( + N, alpha, Y); + } + + template<> + void greentea_gpu_add_scalar(const int N, const double alpha, double* Y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_scalar_kernel<<>>( + N, alpha, Y); + } + + template + __global__ void add_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] + b[index]; + } + } + + template<> + void greentea_gpu_add(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_kernel<<>>( + N, a, b, y); + } + + template<> + void greentea_gpu_add(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_kernel<<>>( + N, a, b, y); + } + + template + __global__ void sub_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] - b[index]; + } + } + + template<> + void greentea_gpu_sub(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sub_kernel<<>>( + N, a, b, y); + } + + template<> + void greentea_gpu_sub(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sub_kernel<<>>( + N, a, b, y); + } + + template + __global__ void mul_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] * b[index]; + } + } + + template<> + void greentea_gpu_mul(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + mul_kernel<<>>( + N, a, b, y); + } + + template<> + void greentea_gpu_mul(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + mul_kernel<<>>( + N, a, b, y); + } + + template + __global__ void div_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] / b[index]; + } + } + + template<> + void greentea_gpu_div(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + div_kernel<<>>( + N, a, b, y); + } + + template<> + void greentea_gpu_div(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + div_kernel<<>>( + N, a, b, y); + } + + template + __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = abs(a[index]); + } + } + + template<> + void greentea_gpu_abs(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + abs_kernel<<>>( + N, a, y); + } + + template<> + void greentea_gpu_abs(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + abs_kernel<<>>( + N, a, y); + } + + template + __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = exp(a[index]); + } + } + + template<> + void greentea_gpu_exp(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + exp_kernel<<>>( + N, a, y); + } + + template<> + void greentea_gpu_exp(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + exp_kernel<<>>( + N, a, y); + } + + template + __global__ void powx_kernel(const int n, const Dtype* a, + const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = pow(a[index], alpha); + } + } + + template<> + void greentea_gpu_powx(const int N, const float* a, const float alpha, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + powx_kernel<<>>( + N, a, alpha, y); + } + + template<> + void greentea_gpu_powx(const int N, const double* a, const double alpha, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + powx_kernel<<>>( + N, a, alpha, y); + } + + DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) + - (x[index] < Dtype(0))); + DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); + + __global__ void popc_kernel(const int n, const float* a, const float* b, + uint8_t* y) { + CUDA_KERNEL_LOOP(index, n) + { + y[index] = __popc( + static_cast(a[index]) ^ static_cast(b[index])); + } + } + + __global__ void popcll_kernel(const int n, const double* a, const double* b, + uint8_t* y) { + CUDA_KERNEL_LOOP(index, n) + { + y[index] = __popcll( + static_cast(a[index]) ^ static_cast(b[index])); + } + } + + template<> + uint32_t greentea_gpu_hamming_distance(const int n, const float* x, + const float* y) { + // TODO: Fix caffe_gpu_hamming_distance (see failing unit test + // TestHammingDistanceGPU in test_math_functions.cpp). + NOT_IMPLEMENTED; + thrust::device_vector popcounts(n); + // NOLINT_NEXT_LINE(whitespace/operators) + popc_kernel<<>>( + n, x, y, thrust::raw_pointer_cast(popcounts.data())); + return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, + thrust::plus()); + } + + template<> + uint32_t greentea_gpu_hamming_distance(const int n, const double* x, + const double* y) { + // TODO: Fix caffe_gpu_hamming_distance (see failing unit test + // TestHammingDistanceGPU in test_math_functions.cpp). + NOT_IMPLEMENTED; + thrust::device_vector popcounts(n); + // NOLINT_NEXT_LINE(whitespace/operators) + popcll_kernel<<>>( + n, x, y, thrust::raw_pointer_cast(popcounts.data())); + return thrust::reduce(popcounts.begin(), popcounts.end(), + (uint32_t) 0, + thrust::plus()); + } + + void greentea_gpu_rng_uniform(const int n, unsigned int* r) { + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); + } + + template<> + void greentea_gpu_rng_uniform(const int n, const float a, const float b, + float* r) { + CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); + const float range = b - a; + if (range != static_cast(1)) { + greentea_gpu_scal(n, range, r); + } + if (a != static_cast(0)) { + greentea_gpu_add_scalar(n, a, r); + } + } + + template<> + void greentea_gpu_rng_uniform(const int n, const double a, + const double b, double* r) { + CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); + const double range = b - a; + if (range != static_cast(1)) { + greentea_gpu_scal(n, range, r); + } + if (a != static_cast(0)) { + greentea_gpu_add_scalar(n, a, r); + } + } + + template<> + void greentea_gpu_rng_gaussian(const int n, const float mu, const float sigma, + float* r) { + CURAND_CHECK(curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); + } + + template<> + void greentea_gpu_rng_gaussian(const int n, const double mu, const double sigma, + double* r) { + CURAND_CHECK( + curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); + } + */ + +} diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index d6a1cac5090..eccbfb2caa4 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -11,10 +11,24 @@ namespace caffe { +template +shared_ptr > GetDataRandTransformLayer(const LayerParameter& param) { + return shared_ptr >(new DataRandTransformLayer(param)); +} + +template +shared_ptr > GetConvolutionSKLayer(const LayerParameter& param) { + return shared_ptr >(new ConvolutionSKLayer(param)); +} + +template +shared_ptr > GetPoolingSKLayer(const LayerParameter& param) { + return shared_ptr >(new PoolingSKLayer(param)); +} + // Get convolution layer according to engine. -template -shared_ptr > GetConvolutionLayer( - const LayerParameter& param) { +template +shared_ptr > GetConvolutionLayer(const LayerParameter& param) { ConvolutionParameter_Engine engine = param.convolution_param().engine(); if (engine == ConvolutionParameter_Engine_DEFAULT) { engine = ConvolutionParameter_Engine_CAFFE; @@ -29,14 +43,14 @@ shared_ptr > GetConvolutionLayer( return shared_ptr >(new CuDNNConvolutionLayer(param)); #endif } else { - LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + LOG(FATAL)<< "Layer " << param.name() << " has unknown engine."; } } REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer); // Get pooling layer according to engine. -template +template shared_ptr > GetPoolingLayer(const LayerParameter& param) { PoolingParameter_Engine engine = param.pooling_param().engine(); if (engine == PoolingParameter_Engine_DEFAULT) { @@ -53,20 +67,20 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { if (p_param.pad() || p_param.pad_h() || p_param.pad_w() || param.top_size() > 1) { LOG(INFO) << "CUDNN does not support padding or multiple tops. " - << "Using Caffe's own pooling layer."; + << "Using Caffe's own pooling layer."; return shared_ptr >(new PoolingLayer(param)); } return shared_ptr >(new CuDNNPoolingLayer(param)); #endif } else { - LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + LOG(FATAL)<< "Layer " << param.name() << " has unknown engine."; } } REGISTER_LAYER_CREATOR(Pooling, GetPoolingLayer); // Get relu layer according to engine. -template +template shared_ptr > GetReLULayer(const LayerParameter& param) { ReLUParameter_Engine engine = param.relu_param().engine(); if (engine == ReLUParameter_Engine_DEFAULT) { @@ -82,14 +96,14 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { return shared_ptr >(new CuDNNReLULayer(param)); #endif } else { - LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + LOG(FATAL)<< "Layer " << param.name() << " has unknown engine."; } } REGISTER_LAYER_CREATOR(ReLU, GetReLULayer); // Get sigmoid layer according to engine. -template +template shared_ptr > GetSigmoidLayer(const LayerParameter& param) { SigmoidParameter_Engine engine = param.sigmoid_param().engine(); if (engine == SigmoidParameter_Engine_DEFAULT) { @@ -105,14 +119,14 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { return shared_ptr >(new CuDNNSigmoidLayer(param)); #endif } else { - LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + LOG(FATAL)<< "Layer " << param.name() << " has unknown engine."; } } REGISTER_LAYER_CREATOR(Sigmoid, GetSigmoidLayer); // Get softmax layer according to engine. -template +template shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { SoftmaxParameter_Engine engine = param.softmax_param().engine(); if (engine == SoftmaxParameter_Engine_DEFAULT) { @@ -128,14 +142,14 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { return shared_ptr >(new CuDNNSoftmaxLayer(param)); #endif } else { - LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + LOG(FATAL)<< "Layer " << param.name() << " has unknown engine."; } } REGISTER_LAYER_CREATOR(Softmax, GetSoftmaxLayer); // Get tanh layer according to engine. -template +template shared_ptr > GetTanHLayer(const LayerParameter& param) { TanHParameter_Engine engine = param.tanh_param().engine(); if (engine == TanHParameter_Engine_DEFAULT) { @@ -151,7 +165,7 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { return shared_ptr >(new CuDNNTanHLayer(param)); #endif } else { - LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + LOG(FATAL)<< "Layer " << param.name() << " has unknown engine."; } } diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index 90aad675ed3..db65383af81 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -14,30 +14,19 @@ template void AccuracyLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { top_k_ = this->layer_param_.accuracy_param().top_k(); - - has_ignore_label_ = - this->layer_param_.accuracy_param().has_ignore_label(); - if (has_ignore_label_) { - ignore_label_ = this->layer_param_.accuracy_param().ignore_label(); - } } template void AccuracyLayer::Reshape( const vector*>& bottom, const vector*>& top) { - CHECK_LE(top_k_, bottom[0]->count() / bottom[1]->count()) + CHECK_EQ(bottom[0]->num(), bottom[1]->num()) + << "The data and label should have the same number."; + CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num()) << "top_k must be less than or equal to the number of classes."; - label_axis_ = - bottom[0]->CanonicalAxisIndex(this->layer_param_.accuracy_param().axis()); - outer_num_ = bottom[0]->count(0, label_axis_); - inner_num_ = bottom[0]->count(label_axis_ + 1); - CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) - << "Number of labels must match number of predictions; " - << "e.g., if label axis == 1 and prediction shape is (N, C, H, W), " - << "label count (number of labels) must be N*H*W, " - << "with integer values in {0, 1, ..., C-1}."; - vector top_shape(0); // Accuracy is a scalar; 0 axes. - top[0]->Reshape(top_shape); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + top[0]->Reshape(1, 1, 1, 1, this->device_context_); } template @@ -46,42 +35,31 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, Dtype accuracy = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); - const int dim = bottom[0]->count() / outer_num_; - const int num_labels = bottom[0]->shape(label_axis_); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); vector maxval(top_k_+1); vector max_id(top_k_+1); - int count = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; ++j) { - const int label_value = - static_cast(bottom_label[i * inner_num_ + j]); - if (has_ignore_label_ && label_value == ignore_label_) { - continue; - } - DCHECK_GE(label_value, 0); - DCHECK_LT(label_value, num_labels); - // Top-k accuracy - std::vector > bottom_data_vector; - for (int k = 0; k < num_labels; ++k) { - bottom_data_vector.push_back(std::make_pair( - bottom_data[i * dim + k * inner_num_ + j], k)); - } - std::partial_sort( - bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, - bottom_data_vector.end(), std::greater >()); - // check if true label is in top k predictions - for (int k = 0; k < top_k_; k++) { - if (bottom_data_vector[k].second == label_value) { - ++accuracy; - break; - } + for (int i = 0; i < num; ++i) { + // Top-k accuracy + std::vector > bottom_data_vector; + for (int j = 0; j < dim; ++j) { + bottom_data_vector.push_back( + std::make_pair(bottom_data[i * dim + j], j)); + } + std::partial_sort( + bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, + bottom_data_vector.end(), std::greater >()); + // check if true label is in top k predictions + for (int k = 0; k < top_k_; k++) { + if (bottom_data_vector[k].second == static_cast(bottom_label[i])) { + ++accuracy; + break; } - ++count; } } // LOG(INFO) << "Accuracy: " << accuracy; - top[0]->mutable_cpu_data()[0] = accuracy / count; + top[0]->mutable_cpu_data()[0] = accuracy / num; // Accuracy layer should not be used as a loss function. } diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index c4040cdcaaa..79cec2e09d6 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -23,10 +23,10 @@ void ArgMaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { if (out_max_val_) { // Produces max_ind and max_val - top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); + top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1, this->device_context_); } else { // Produces only max_ind - top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); + top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1, this->device_context_); } } diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index ccb3adc7e89..421949cddc8 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -8,11 +8,10 @@ namespace caffe { -template +template void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; + const vector*>& top) { + // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK(!conv_param.has_kernel_size() != @@ -22,11 +21,11 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, (conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; CHECK((!conv_param.has_pad() && conv_param.has_pad_h() - && conv_param.has_pad_w()) + && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK((!conv_param.has_stride() && conv_param.has_stride_h() - && conv_param.has_stride_w()) + && conv_param.has_stride_w()) || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; if (conv_param.has_kernel_size()) { @@ -35,8 +34,8 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); } - CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { pad_h_ = pad_w_ = conv_param.pad(); } else { @@ -51,16 +50,15 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, } // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. - is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1 - && stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0; + is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1 && stride_h_ == 1 && stride_w_ == 1 + && pad_h_ == 0 && pad_w_ == 0; // Configure output channels and groups. channels_ = bottom[0]->channels(); num_output_ = this->layer_param_.convolution_param().num_output(); CHECK_GT(num_output_, 0); group_ = this->layer_param_.convolution_param().group(); CHECK_EQ(channels_ % group_, 0); - CHECK_EQ(num_output_ % group_, 0) - << "Number of output should be multiples of group."; + CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; if (reverse_dimensions()) { conv_out_channels_ = channels_; conv_in_channels_ = num_output_; @@ -73,7 +71,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // - blobs_[1] holds the biases (optional) bias_term_ = this->layer_param_.convolution_param().bias_term(); if (this->blobs_.size() > 0) { - LOG(INFO) << "Skipping parameter initialization"; + LOG(INFO)<< "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); @@ -83,47 +81,45 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Initialize and fill the weights: // output channels x input channels per-group x kernel height x kernel width this->blobs_[0].reset(new Blob( - conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_)); + conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_, this->device_context_)); shared_ptr > weight_filler(GetFiller( - this->layer_param_.convolution_param().weight_filler())); - weight_filler->Fill(this->blobs_[0].get()); - // If necessary, initialize and fill the biases. + this->layer_param_.convolution_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get(), this->device_context_); + // If necessary, initialize and fill the biases: + // 1 x 1 x 1 x output channels if (bias_term_) { - vector bias_shape(1, num_output_); - this->blobs_[1].reset(new Blob(bias_shape)); + this->blobs_[1].reset(new Blob(1, 1, 1, num_output_, this->device_context_)); shared_ptr > bias_filler(GetFiller( - this->layer_param_.convolution_param().bias_filler())); - bias_filler->Fill(this->blobs_[1].get()); + this->layer_param_.convolution_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get(), this->device_context_); } } // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); } -template +template void BaseConvolutionLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; + const vector*>& top) { num_ = bottom[0]->num(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); - CHECK_EQ(bottom[0]->channels(), channels_) << "Input size incompatible with" - " convolution kernel."; + CHECK_EQ(bottom[0]->channels(), channels_)<< "Input size incompatible with" + " convolution kernel."; // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { - CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num."; + CHECK_EQ(num_, bottom[bottom_id]->num())<< "Inputs must have same num."; CHECK_EQ(channels_, bottom[bottom_id]->channels()) - << "Inputs must have same channels."; + << "Inputs must have same channels."; CHECK_EQ(height_, bottom[bottom_id]->height()) - << "Inputs must have same height."; + << "Inputs must have same height."; CHECK_EQ(width_, bottom[bottom_id]->width()) - << "Inputs must have same width."; + << "Inputs must have same width."; } // Shape the tops. compute_output_shape(); for (int top_id = 0; top_id < top.size(); ++top_id) { - top[top_id]->Reshape(num_, num_output_, height_out_, width_out_); + top[top_id]->Reshape(num_, num_output_, height_out_, width_out_, this->device_context_); } if (reverse_dimensions()) { conv_in_height_ = height_out_; @@ -142,22 +138,23 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, // overly large memory usage. In the special case of 1x1 convolution // it goes lazily unused to save memory. if (reverse_dimensions()) { - col_buffer_.Reshape(1, kernel_dim_, height_, width_); + col_buffer_.Reshape(1, kernel_dim_, height_, width_, this->device_context_); } else { - col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_); + col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_, this->device_context_); } // Set up the all ones "bias multiplier" for adding biases by BLAS if (bias_term_) { - vector bias_multiplier_shape(1, height_out_ * width_out_); - bias_multiplier_.Reshape(bias_multiplier_shape); + bias_multiplier_.Reshape(1, 1, 1, height_out_ * width_out_, this->device_context_); caffe_set(bias_multiplier_.count(), Dtype(1), - bias_multiplier_.mutable_cpu_data()); + bias_multiplier_.mutable_cpu_data()); } } -template +template void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, - const Dtype* weights, Dtype* output, bool skip_im2col) { + const Dtype* weights, + Dtype* output, + bool skip_im2col) { const Dtype* col_buff = input; if (!is_1x1_) { if (!skip_im2col) { @@ -166,42 +163,47 @@ void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, col_buff = col_buffer_.cpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, - (Dtype)0., output + output_offset_ * g); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, + conv_out_channels_ / group_, conv_out_spatial_dim_, + kernel_dim_ / group_, (Dtype) 1., + weights + weight_offset_ * g, + col_buff + col_offset_ * g, (Dtype) 0., + output + output_offset_ * g); } } -template +template void BaseConvolutionLayer::forward_cpu_bias(Dtype* output, - const Dtype* bias) { + const Dtype* bias) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(), - (Dtype)1., output); + height_out_ * width_out_, 1, (Dtype) 1., bias, + bias_multiplier_.cpu_data(), (Dtype) 1., output); } -template +template void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, - const Dtype* weights, Dtype* input) { + const Dtype* weights, + Dtype* input) { Dtype* col_buff = col_buffer_.mutable_cpu_data(); if (is_1x1_) { col_buff = input; } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, - (Dtype)0., col_buff + col_offset_ * g); + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype) 1., weights + weight_offset_ * g, + output + output_offset_ * g, (Dtype) 0., + col_buff + col_offset_ * g); } if (!is_1x1_) { conv_col2im_cpu(col_buff, input); } } -template +template void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, - const Dtype* output, Dtype* weights) { + const Dtype* output, + Dtype* weights) { const Dtype* col_buff = input; if (!is_1x1_) { conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); @@ -209,24 +211,27 @@ void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, - (Dtype)1., weights + weight_offset_ * g); + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype) 1., output + output_offset_ * g, + col_buff + col_offset_ * g, (Dtype) 1., + weights + weight_offset_ * g); } } -template +template void BaseConvolutionLayer::backward_cpu_bias(Dtype* bias, - const Dtype* input) { + const Dtype* input) { caffe_cpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., - input, bias_multiplier_.cpu_data(), 1., bias); + input, bias_multiplier_.cpu_data(), 1., bias); } #ifndef CPU_ONLY -template +template void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, - const Dtype* weights, Dtype* output, bool skip_im2col) { + const Dtype* weights, + Dtype* output, + bool skip_im2col) { const Dtype* col_buff = input; if (!is_1x1_) { if (!skip_im2col) { @@ -235,42 +240,47 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, col_buff = col_buffer_.gpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, - (Dtype)0., output + output_offset_ * g); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, + conv_out_channels_ / group_, conv_out_spatial_dim_, + kernel_dim_ / group_, (Dtype) 1., + weights + weight_offset_ * g, + col_buff + col_offset_ * g, (Dtype) 0., + output + output_offset_ * g); } } -template +template void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, - const Dtype* bias) { + const Dtype* bias) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), - (Dtype)1., output); + height_out_ * width_out_, 1, (Dtype) 1., bias, + bias_multiplier_.gpu_data(), (Dtype) 1., output); } -template +template void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, - const Dtype* weights, Dtype* input) { + const Dtype* weights, + Dtype* input) { Dtype* col_buff = col_buffer_.mutable_gpu_data(); if (is_1x1_) { col_buff = input; } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, - (Dtype)0., col_buff + col_offset_ * g); + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype) 1., weights + weight_offset_ * g, + output + output_offset_ * g, (Dtype) 0., + col_buff + col_offset_ * g); } if (!is_1x1_) { conv_col2im_gpu(col_buff, input); } } -template +template void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, - const Dtype* output, Dtype* weights) { + const Dtype* output, + Dtype* weights) { const Dtype* col_buff = input; if (!is_1x1_) { conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); @@ -278,17 +288,18 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, - (Dtype)1., weights + weight_offset_ * g); + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype) 1., output + output_offset_ * g, + col_buff + col_offset_ * g, (Dtype) 1., + weights + weight_offset_ * g); } } -template +template void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, - const Dtype* input) { + const Dtype* input) { caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., - input, bias_multiplier_.gpu_data(), 1., bias); + input, bias_multiplier_.gpu_data(), 1., bias); } #endif // !CPU_ONLY diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 352200915d7..e9f18097923 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -24,7 +24,7 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, // The subclasses should setup the size of bottom and top DataLayerSetUp(bottom, top); data_transformer_.reset( - new DataTransformer(transform_param_, this->phase_)); + new DataTransformer(transform_param_, this->phase_, this->device_context_)); data_transformer_->InitRand(); } @@ -64,7 +64,7 @@ void BasePrefetchingDataLayer::Forward_cpu( DLOG(INFO) << "Thread joined"; // Reshape to loaded data. top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + this->prefetch_data_.height(), this->prefetch_data_.width(), this->device_context_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 775f6c47f7e..5cc956c147d 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -11,7 +11,7 @@ void BasePrefetchingDataLayer::Forward_gpu( JoinPrefetchThread(); // Reshape to loaded data. top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + this->prefetch_data_.height(), this->prefetch_data_.width(), this->device_context_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 1cac8fc3387..90630f3b9d3 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -6,86 +6,99 @@ namespace caffe { -template +template void ConcatLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - const ConcatParameter& concat_param = this->layer_param_.concat_param(); - CHECK(!(concat_param.has_axis() && concat_param.has_concat_dim())) - << "Either axis or concat_dim should be specified; not both."; + const vector*>& top) { + concat_dim_ = this->layer_param_.concat_param().concat_dim(); + CHECK_GE(concat_dim_, 0)<< + "concat_dim should be >= 0"; + CHECK_LE(concat_dim_, 1)<< + "For now concat_dim <=1, it can only concat num and channels"; } -template +template void ConcatLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - const int num_axes = bottom[0]->num_axes(); - const ConcatParameter& concat_param = this->layer_param_.concat_param(); - if (concat_param.has_concat_dim()) { - concat_axis_ = static_cast(concat_param.concat_dim()); - // Don't allow negative indexing for concat_dim, a uint32 -- almost - // certainly unintended. - CHECK_GE(concat_axis_, 0) << "casting concat_dim from uint32 to int32 " - << "produced negative result; concat_dim must satisfy " - << "0 <= concat_dim < " << kMaxBlobAxes; - CHECK_LT(concat_axis_, num_axes) << "concat_dim out of range."; - } else { - concat_axis_ = bottom[0]->CanonicalAxisIndex(concat_param.axis()); - } + const vector*>& top) { // Initialize with the first blob. - vector top_shape = bottom[0]->shape(); - num_concats_ = bottom[0]->count(0, concat_axis_); - concat_input_size_ = bottom[0]->count(concat_axis_ + 1); - int bottom_count_sum = bottom[0]->count(); + count_ = bottom[0]->count(); + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); for (int i = 1; i < bottom.size(); ++i) { - CHECK_EQ(num_axes, bottom[i]->num_axes()) - << "All inputs must have the same #axes."; - for (int j = 0; j < num_axes; ++j) { - if (j == concat_axis_) { continue; } - CHECK_EQ(top_shape[j], bottom[i]->shape(j)) - << "All inputs must have the same shape, except at concat_axis."; + count_ += bottom[i]->count(); + if (concat_dim_ == 0) { + num_ += bottom[i]->num(); + } else if (concat_dim_ == 1) { + channels_ += bottom[i]->channels(); + } else if (concat_dim_ == 2) { + height_ += bottom[i]->height(); + } else if (concat_dim_ == 3) { + width_ += bottom[i]->width(); } - bottom_count_sum += bottom[i]->count(); - top_shape[concat_axis_] += bottom[i]->shape(concat_axis_); } - top[0]->Reshape(top_shape); - CHECK_EQ(bottom_count_sum, top[0]->count()); + top[0]->Reshape(num_, channels_, height_, width_, this->device_context_); + CHECK_EQ(count_, top[0]->count()); } -template +template void ConcatLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { Dtype* top_data = top[0]->mutable_cpu_data(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->cpu_data(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, - bottom_data + n * bottom_concat_axis * concat_input_size_, - top_data + (n * top_concat_axis + offset_concat_axis) - * concat_input_size_); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + int num_elem = bottom[i]->count(); + caffe_copy(num_elem, bottom_data, top_data + top[0]->offset(offset_num)); + offset_num += bottom[i]->num(); } - offset_concat_axis += bottom_concat_axis; + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + int num_elem = bottom[i]->channels() * bottom[i]->height() + * bottom[i]->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, bottom_data + bottom[i]->offset(n), + top_data + top[0]->offset(n, offset_channel)); + } + offset_channel += bottom[i]->channels(); + } // concat_dim_ is guaranteed to be 0 or 1 by LayerSetUp. } } -template +template void ConcatLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - for (int i = 0; i < bottom.size(); ++i) { - if (!propagate_down[i]) { continue; } - Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + - (n * top_concat_axis + offset_concat_axis) * concat_input_size_, - bottom_diff + n * bottom_concat_axis * concat_input_size_); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; + if (propagate_down[i]) { + Dtype* bottom_diff = blob->mutable_cpu_diff(); + caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num), + bottom_diff); + } + offset_num += blob->num(); } - offset_concat_axis += bottom_concat_axis; - } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; + if (propagate_down[i]) { + Dtype* bottom_diff = blob->mutable_cpu_diff(); + int num_elem = blob->channels() * blob->height() * blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, top_diff + top[0]->offset(n, offset_channel), + bottom_diff + blob->offset(n)); + } + } + offset_channel += blob->channels(); + } + } // concat_dim_ is guaranteed to be 0 or 1 by LayerSetUp. } #ifdef CPU_ONLY diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index dbadb5aeb30..88fc090025f 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -10,18 +10,29 @@ template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, - bottom_data + n * bottom_concat_axis * concat_input_size_, - top_data + (n * top_concat_axis + offset_concat_axis) - * concat_input_size_); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + caffe_copy(bottom[i]->count(), bottom_data, + top_data + top[0]->offset(offset_num)); + offset_num += bottom[i]->num(); } - offset_concat_axis += bottom_concat_axis; + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + int num_elem = + bottom[i]->channels() * bottom[i]->height() * bottom[i]->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, bottom_data+bottom[i]->offset(n), + top_data + top[0]->offset(n, offset_channel)); + } + offset_channel += bottom[i]->channels(); + } + } else { + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; } } @@ -29,18 +40,34 @@ template void ConcatLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - for (int i = 0; i < bottom.size(); ++i) { - if (!propagate_down[i]) { continue; } - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + - (n * top_concat_axis + offset_concat_axis) * concat_input_size_, - bottom_diff + n * bottom_concat_axis * concat_input_size_); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; + if (propagate_down[i]) { + Dtype* bottom_diff = blob->mutable_gpu_diff(); + caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num), + bottom_diff); + } + offset_num += blob->num(); + } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; + if (propagate_down[i]) { + Dtype* bottom_diff = blob->mutable_gpu_diff(); + int num_elem = blob->channels()*blob->height()*blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, top_diff + top[0]->offset(n, offset_channel), + bottom_diff + blob->offset(n)); + } + } + offset_channel += blob->channels(); } - offset_concat_axis += bottom_concat_axis; + } else { + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 0692c11c257..9bb02688d01 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -20,11 +20,11 @@ void ContrastiveLossLayer::LayerSetUp( CHECK_EQ(bottom[2]->channels(), 1); CHECK_EQ(bottom[2]->height(), 1); CHECK_EQ(bottom[2]->width(), 1); - diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, this->device_context_); + diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, this->device_context_); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1, this->device_context_); // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1, this->device_context_); for (int i = 0; i < bottom[0]->channels(); ++i) summer_vec_.mutable_cpu_data()[i] = Dtype(1); } diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index c0c9f6f3371..0d41c47424b 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -8,7 +8,7 @@ namespace caffe { -template +template void ConvolutionLayer::compute_output_shape() { this->height_out_ = (this->height_ + 2 * this->pad_h_ - this->kernel_h_) / this->stride_h_ + 1; @@ -16,16 +16,16 @@ void ConvolutionLayer::compute_output_shape() { / this->stride_w_ + 1; } -template +template void ConvolutionLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* weight = this->blobs_[0]->cpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_cpu_gemm(bottom_data + bottom[i]->offset(n), weight, - top_data + top[i]->offset(n)); + top_data + top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->cpu_data(); this->forward_cpu_bias(top_data + top[i]->offset(n), bias); @@ -34,9 +34,10 @@ void ConvolutionLayer::Forward_cpu(const vector*>& bottom, } } -template +template void ConvolutionLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); if (this->param_propagate_down_[0]) { @@ -44,7 +45,7 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, } if (this->bias_term_ && this->param_propagate_down_[1]) { caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); + this->blobs_[1]->mutable_cpu_diff()); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); @@ -62,12 +63,12 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_cpu_gemm(bottom_data + bottom[i]->offset(n), - top_diff + top[i]->offset(n), weight_diff); + top_diff + top[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_cpu_gemm(top_diff + top[i]->offset(n), weight, - bottom_diff + bottom[i]->offset(n)); + bottom_diff + bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp new file mode 100644 index 00000000000..3f486ad656d --- /dev/null +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -0,0 +1,158 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + + ConvolutionParameter conv_param = this->layer_param_.convolution_param(); + CHECK( + !conv_param.has_kernel_size() + != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK( + conv_param.has_kernel_size() + || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK( + (!conv_param.has_pad() && conv_param.has_pad_h() && conv_param.has_pad_w()) + || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK( + (!conv_param.has_stride() && conv_param.has_stride_h() + && conv_param.has_stride_w()) + || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (conv_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = conv_param.kernel_size(); + } else { + kernel_h_ = conv_param.kernel_h(); + kernel_w_ = conv_param.kernel_w(); + } + CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; + if (!conv_param.has_pad_h()) { + pad_h_ = pad_w_ = conv_param.pad(); + } else { + pad_h_ = conv_param.pad_h(); + pad_w_ = conv_param.pad_w(); + } + CHECK_EQ(pad_h_, 0)<< "pad_h_ must be 0"; + CHECK_EQ(pad_w_, 0)<< "pad_w_ must be 0"; + if (!conv_param.has_stride_h()) { + stride_h_ = stride_w_ = conv_param.stride(); + } else { + stride_h_ = conv_param.stride_h(); + stride_w_ = conv_param.stride_w(); + } + if (!conv_param.has_kstride_h()) { + kstride_h_ = kstride_w_ = conv_param.kstride(); + } else { + kstride_h_ = conv_param.kstride_h(); + kstride_w_ = conv_param.kstride_w(); + } + group_ = this->layer_param_.convolution_param().group(); + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + // TODO: generalize to handle inputs of different shapes. + for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { + CHECK_EQ(num_, bottom[bottom_id]->num())<< "Inputs must have same num."; + CHECK_EQ(channels_, bottom[bottom_id]->channels()) + << "Inputs must have same channels."; + CHECK_EQ(height_, bottom[bottom_id]->height()) + << "Inputs must have same height."; + CHECK_EQ(width_, bottom[bottom_id]->width()) + << "Inputs must have same width."; + } + num_output_ = this->layer_param_.convolution_param().num_output(); + CHECK_GT(num_output_, 0); + CHECK_EQ(channels_ % group_, 0); + // The im2col result buffer would only hold one image at a time to avoid + // overly large memory usage. + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; + int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; + col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, + width_out, this->device_context_); + // Set the parameters + CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; + bias_term_ = this->layer_param_.convolution_param().bias_term(); + // Figure out the dimensions for individual gemms. + M_ = num_output_ / group_; + K_ = channels_ * kernel_h_ * kernel_w_ / group_; + N_ = height_out * width_out; + for (int top_id = 0; top_id < top.size(); ++top_id) { + top[top_id]->Reshape(num_, num_output_, height_out, width_out, this->device_context_); + } + // Check if we need to set up the weights + if (this->blobs_.size() > 0) { + // (FTschopp) Silence this output: + //LOG(INFO)<< "Skipping parameter initialization"; + } else { + if (bias_term_) { + this->blobs_.resize(2); + } else { + this->blobs_.resize(1); + } + // Intialize the weight + this->blobs_[0].reset( + new Blob(num_output_, channels_ / group_, kernel_h_, kernel_w_, this->device_context_)); + // fill the weights + shared_ptr > weight_filler( + GetFiller( + this->layer_param_.convolution_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get(), this->device_context_); + // If necessary, initialize and fill the bias term + if (bias_term_) { + this->blobs_[1].reset(new Blob(1, 1, 1, num_output_, this->device_context_)); + shared_ptr > bias_filler( + GetFiller( + this->layer_param_.convolution_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get(), this->device_context_); + } + } + // Set up the all ones "bias multiplier" for adding bias using blas + if (bias_term_) { + bias_multiplier_.Reshape(1, 1, 1, N_, this->device_context_); + caffe_set(N_, Dtype(1), bias_multiplier_.mutable_cpu_data()); + } + this->param_propagate_down_.resize(this->blobs_.size(), true); +} + +template +void ConvolutionSKLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + LayerSetUp(bottom, top); +} + +template +void ConvolutionSKLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + LOG(FATAL)<< "Foward_cpu() not implemented for ConvlutionSKLayer."; +} + +template +void ConvolutionSKLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + LOG(FATAL)<< " Backward_cpu() not implemented for ConvolutionSKLayer."; +} + +#ifdef CPU_ONLY +STUB_GPU(ConvolutionSKLayer); +#endif + +INSTANTIATE_CLASS(ConvolutionSKLayer); +REGISTER_LAYER_CLASS(ConvolutionSK); + +} // namespace caffe diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu new file mode 100644 index 00000000000..630474df139 --- /dev/null +++ b/src/caffe/layers/conv_sk_layer.cu @@ -0,0 +1,199 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + +namespace caffe { + +template +void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + + if (this->device_context_.backend() == BACKEND_CUDA) { + // CUDA backend code + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + Dtype* col_data = col_buffer_.mutable_gpu_data(); + const Dtype* weight = this->blobs_[0]->gpu_data(); + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; + + for (int n = 0; n < num_; ++n) { + // First, im2col + im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, + stride_w_, kstride_h_, kstride_w_, col_data); + // Second, innerproduct with groups + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, K_, + (Dtype) 1., weight + weight_offset * g, + col_data + col_offset * g, (Dtype) 0., + top_data + top[i]->offset(n) + top_offset * g); + } + // Third, add bias + if (bias_term_) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, + (Dtype) 1., this->blobs_[1]->gpu_data(), + bias_multiplier_.gpu_data(), (Dtype) 1., + top_data + top[i]->offset(n)); + } + } + } + } else { + // GreenTea backend code +#ifdef USE_GREENTEA + std::cout << "CONV GREENTEA BEGIN" << std::endl; + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + for (int i = 0; i < bottom.size(); ++i) { + + const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); + cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); + cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); + const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); + + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; + + for (int n = 0; n < num_; ++n) { + + // First, im2col + greentea_im2col_sk_gpu( + program, + ctx, + Subregion(bottom_data, bottom[i]->offset(n), + channels_ * height_ * width_), + channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); + ctx.get_queue().finish(); + + std::cout << "After im2col" << std::endl; + + // Second, innerproduct with groups + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm( + this->device_context_.id(), + CblasNoTrans, + CblasNoTrans, + M_, + N_, + K_, + (Dtype) 1., + Subregion(weight, weight_offset * g, M_ * K_), + Subregion(col_data, col_offset * g, K_ * N_), + (Dtype) 0., + Subregion(top_data, top[i]->offset(n) + top_offset * g, + M_ * N_)); + } + ctx.get_queue().finish(); + + std::cout << "After gpu gemm" << std::endl; + +// Third, add bias + if (bias_term_) { + greentea_gpu_gemm( + this->device_context_.id(), CblasNoTrans, CblasNoTrans, + num_output_, N_, 1, (Dtype) 1., + (cl_mem) (this->blobs_[1]->gpu_data()), + (cl_mem) (bias_multiplier_.gpu_data()), (Dtype) 1., + Subregion(top_data, top[i]->offset(n), num_output_ * N_)); + ctx.get_queue().finish(); + } + } + } + std::cout << "CONV GREENTEA END" << std::endl; +#endif + } +} + +template +void ConvolutionSKLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + const Dtype* weight = NULL; + Dtype* weight_diff = NULL; + if (this->param_propagate_down_[0]) { + weight = this->blobs_[0]->gpu_data(); + weight_diff = this->blobs_[0]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + Dtype* bias_diff = NULL; + if (bias_term_ && this->param_propagate_down_[1]) { + bias_diff = this->blobs_[1]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); + } + const int weight_offset = M_ * K_; + const int col_offset = K_ * N_; + const int top_offset = M_ * N_; + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = NULL; + // Bias gradient, if necessary. + if (bias_term_ && this->param_propagate_down_[1]) { + top_diff = top[i]->gpu_diff(); + for (int n = 0; n < num_; ++n) { + caffe_gpu_gemv(CblasNoTrans, num_output_, N_, 1., + top_diff + top[0]->offset(n), + bias_multiplier_.gpu_data(), 1., bias_diff); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + if (!top_diff) { + top_diff = top[i]->gpu_diff(); + } + Dtype* col_data = col_buffer_.mutable_gpu_data(); + Dtype* col_diff = col_buffer_.mutable_gpu_diff(); + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + for (int n = 0; n < num_; ++n) { + // Since we saved memory in the forward pass by not storing all col + // data, we will need to recompute them. + im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, + stride_w_, kstride_h_, kstride_w_, col_data); + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, K_, N_, + (Dtype) 1., + top_diff + top[i]->offset(n) + top_offset * g, + col_data + col_offset * g, (Dtype) 1., + weight_diff + weight_offset * g); + } + } + // gradient w.r.t. bottom data, if necessary + if (propagate_down[i]) { + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasTrans, CblasNoTrans, K_, N_, M_, + (Dtype) 1., weight + weight_offset * g, + top_diff + top[i]->offset(n) + top_offset * g, + (Dtype) 0., col_diff + col_offset * g); + } + // col2im back to the data + col2im_sk_gpu(col_diff, channels_, height_, width_, kernel_h_, + kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + kstride_h_, kstride_w_, + bottom_diff + bottom[i]->offset(n)); + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionSKLayer); + +} // namespace caffe diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 104d2b9d669..4a69ca20d0a 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -24,8 +24,6 @@ void CuDNNConvolutionLayer::LayerSetUp( // Initialize CUDA streams and cuDNN. stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; handle_ = new cudnnHandle_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; - workspaceSizeInBytes = 0; - workspace = NULL; for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { CUDA_CHECK(cudaStreamCreate(&stream_[g])); @@ -45,10 +43,10 @@ void CuDNNConvolutionLayer::LayerSetUp( // Create tensor descriptor(s) for data and corresponding convolution(s). for (int i = 0; i < bottom.size(); i++) { - cudnnTensorDescriptor_t bottom_desc; + cudnnTensor4dDescriptor_t bottom_desc; cudnn::createTensor4dDesc(&bottom_desc); bottom_descs_.push_back(bottom_desc); - cudnnTensorDescriptor_t top_desc; + cudnnTensor4dDescriptor_t top_desc; cudnn::createTensor4dDesc(&top_desc); top_descs_.push_back(top_desc); cudnnConvolutionDescriptor_t conv_desc; @@ -106,12 +104,12 @@ CuDNNConvolutionLayer::~CuDNNConvolutionLayer() { if (!handles_setup_) { return; } for (int i = 0; i < bottom_descs_.size(); i++) { - cudnnDestroyTensorDescriptor(bottom_descs_[i]); - cudnnDestroyTensorDescriptor(top_descs_[i]); + cudnnDestroyTensor4dDescriptor(bottom_descs_[i]); + cudnnDestroyTensor4dDescriptor(top_descs_[i]); cudnnDestroyConvolutionDescriptor(conv_descs_[i]); } if (this->bias_term_) { - cudnnDestroyTensorDescriptor(bias_desc_); + cudnnDestroyTensor4dDescriptor(bias_desc_); } cudnnDestroyFilterDescriptor(filter_desc_); diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 4a1a4c4f4f2..071014e1b48 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -19,70 +19,23 @@ void CuDNNConvolutionLayer::Forward_gpu( Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); - size_t workspace_limit_bytes = this->kernel_h_ * - this->kernel_w_ * - this->channels_ * - sizeof(int) + 1; - // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { - cudnnConvolutionFwdAlgo_t algo; - - // pick the convolution algorithm - // TODO(shelhamer) this should be done during reshape - // TODO(shelhamer) the choice of automatic or manual algorithm picking - // should be exposed in proto - CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], - bottom_descs_[i], - filter_desc_, - conv_descs_[i], - top_descs_[i], - CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, - workspace_limit_bytes, // memoryLimitInBytes, - &algo)); - - // get minimum size of the workspace needed for the desired algorithm - size_t workspaceSizeInBytes_temp = 0; - - CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], - bottom_descs_[i], - filter_desc_, - conv_descs_[i], - top_descs_[i], - algo, - &workspaceSizeInBytes_temp)); - - if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { - workspaceSizeInBytes = workspaceSizeInBytes_temp; - // free the existing workspace and allocate a new (larger) one - cudaFree(this->workspace); - cudaError_t err = cudaMalloc(&(this->workspace), workspaceSizeInBytes); - if (err != cudaSuccess) { - // force zero memory path - algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; - workspace = NULL; - workspaceSizeInBytes = 0; - } - } - // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], - cudnn::dataType::one, - bottom_descs_[i], bottom_data + bottom_offset_ * g, - filter_desc_, weight + weight_offset_ * g, - conv_descs_[i], - algo, workspace, workspaceSizeInBytes, - cudnn::dataType::zero, - top_descs_[i], top_data + top_offset_ * g)); + bottom_descs_[i], bottom_data + bottom_offset_ * g, + filter_desc_, weight + weight_offset_ * g, + conv_descs_[i], + top_descs_[i], top_data + top_offset_ * g, + CUDNN_RESULT_NO_ACCUMULATE)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); - CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, - cudnn::dataType::one, - bias_desc_, bias_data + bias_offset_ * g, - cudnn::dataType::one, - top_descs_[i], top_data + top_offset_ * g)); + Dtype alpha = 1.; + CUDNN_CHECK(cudnnAddTensor4d(handle_[g], CUDNN_ADD_SAME_C, &alpha, + bias_desc_, bias_data + bias_offset_ * g, + top_descs_[i], top_data + top_offset_ * g)); } } @@ -115,22 +68,20 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], - cudnn::dataType::one, - top_descs_[i], top_diff + top_offset_ * g, - cudnn::dataType::one, - bias_desc_, bias_diff + bias_offset_ * g)); + top_descs_[i], top_diff + top_offset_ * g, + bias_desc_, bias_diff + bias_offset_ * g, + CUDNN_RESULT_ACCUMULATE)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], - cudnn::dataType::one, - bottom_descs_[i], bottom_data + bottom_offset_ * g, - top_descs_[i], top_diff + top_offset_ * g, - conv_descs_[i], - cudnn::dataType::one, - filter_desc_, weight_diff + weight_offset_ * g)); + bottom_descs_[i], bottom_data + bottom_offset_ * g, + top_descs_[i], top_diff + top_offset_ * g, + conv_descs_[i], + filter_desc_, weight_diff + weight_offset_ * g, + CUDNN_RESULT_ACCUMULATE)); } // Gradient w.r.t. bottom data. @@ -140,12 +91,11 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], - cudnn::dataType::one, - filter_desc_, weight + weight_offset_ * g, - top_descs_[i], top_diff + top_offset_ * g, - conv_descs_[i], - cudnn::dataType::zero, - bottom_descs_[i], bottom_diff + bottom_offset_ * g)); + filter_desc_, weight + weight_offset_ * g, + top_descs_[i], top_diff + top_offset_ * g, + conv_descs_[i], + bottom_descs_[i], bottom_diff + bottom_offset_ * g, + CUDNN_RESULT_NO_ACCUMULATE)); } } diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp index c92c4e477b5..dd90195637b 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cpp +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -13,13 +13,15 @@ template void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { PoolingLayer::LayerSetUp(bottom, top); + // Sanity check: CUDNN currently only supports pad == 0. + CHECK_EQ(this->pad_h_, 0); + CHECK_EQ(this->pad_w_, 0); CUDNN_CHECK(cudnnCreate(&handle_)); cudnn::createTensor4dDesc(&bottom_desc_); cudnn::createTensor4dDesc(&top_desc_); cudnn::createPoolingDesc(&pooling_desc_, this->layer_param_.pooling_param().pool(), &mode_, - this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_, - this->stride_h_, this->stride_w_); + this->kernel_h_, this->kernel_w_, this->stride_h_, this->stride_w_); handles_setup_ = true; } @@ -38,8 +40,8 @@ CuDNNPoolingLayer::~CuDNNPoolingLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensorDescriptor(bottom_desc_); - cudnnDestroyTensorDescriptor(top_desc_); + cudnnDestroyTensor4dDescriptor(bottom_desc_); + cudnnDestroyTensor4dDescriptor(top_desc_); cudnnDestroyPoolingDescriptor(pooling_desc_); cudnnDestroy(handle_); } diff --git a/src/caffe/layers/cudnn_pooling_layer.cu b/src/caffe/layers/cudnn_pooling_layer.cu index a952b855a48..1c113aad75f 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cu +++ b/src/caffe/layers/cudnn_pooling_layer.cu @@ -15,10 +15,7 @@ void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, - cudnn::dataType::one, - bottom_desc_, bottom_data, - cudnn::dataType::zero, - top_desc_, top_data)); + bottom_desc_, bottom_data, top_desc_, top_data)); } template @@ -32,11 +29,8 @@ void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, - cudnn::dataType::one, - top_desc_, top_data, top_desc_, top_diff, - bottom_desc_, bottom_data, - cudnn::dataType::zero, - bottom_desc_, bottom_diff)); + top_desc_, top_data, top_desc_, top_diff, + bottom_desc_, bottom_data, bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index 759d83984ef..0b8a6bc3248 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -35,8 +35,8 @@ CuDNNReLULayer::~CuDNNReLULayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensorDescriptor(this->bottom_desc_); - cudnnDestroyTensorDescriptor(this->top_desc_); + cudnnDestroyTensor4dDescriptor(this->bottom_desc_); + cudnnDestroyTensor4dDescriptor(this->top_desc_); cudnnDestroy(this->handle_); } diff --git a/src/caffe/layers/cudnn_relu_layer.cu b/src/caffe/layers/cudnn_relu_layer.cu index 21d14857dd2..862508707a0 100644 --- a/src/caffe/layers/cudnn_relu_layer.cu +++ b/src/caffe/layers/cudnn_relu_layer.cu @@ -18,11 +18,8 @@ void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_RELU, - cudnn::dataType::one, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->top_desc_, top_data)); + CUDNN_ACTIVATION_RELU, + this->bottom_desc_, bottom_data, this->top_desc_, top_data)); } template @@ -43,12 +40,9 @@ void CuDNNReLULayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_RELU, - cudnn::dataType::one, - this->top_desc_, top_data, this->top_desc_, top_diff, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->bottom_desc_, bottom_diff)); + CUDNN_ACTIVATION_RELU, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNReLULayer); diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index 32637873d46..67bd9c373b0 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -35,8 +35,8 @@ CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensorDescriptor(this->bottom_desc_); - cudnnDestroyTensorDescriptor(this->top_desc_); + cudnnDestroyTensor4dDescriptor(this->bottom_desc_); + cudnnDestroyTensor4dDescriptor(this->top_desc_); cudnnDestroy(this->handle_); } diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cu b/src/caffe/layers/cudnn_sigmoid_layer.cu index 7a06cf721da..31b094e25d4 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cu +++ b/src/caffe/layers/cudnn_sigmoid_layer.cu @@ -13,11 +13,8 @@ void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_SIGMOID, - cudnn::dataType::one, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->top_desc_, top_data)); + CUDNN_ACTIVATION_SIGMOID, + this->bottom_desc_, bottom_data, this->top_desc_, top_data)); } template @@ -33,12 +30,9 @@ void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_SIGMOID, - cudnn::dataType::one, - this->top_desc_, top_data, this->top_desc_, top_diff, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->bottom_desc_, bottom_diff)); + CUDNN_ACTIVATION_SIGMOID, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSigmoidLayer); diff --git a/src/caffe/layers/cudnn_softmax_layer.cpp b/src/caffe/layers/cudnn_softmax_layer.cpp index 77a3225adcd..83a5b69a626 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cpp +++ b/src/caffe/layers/cudnn_softmax_layer.cpp @@ -26,10 +26,10 @@ template void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { SoftmaxLayer::Reshape(bottom, top); - int N = this->outer_num_; - int K = bottom[0]->shape(this->softmax_axis_); - int H = this->inner_num_; - int W = 1; + int N = bottom[0]->num(); + int K = bottom[0]->channels(); + int H = bottom[0]->height(); + int W = bottom[0]->width(); cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } @@ -39,8 +39,8 @@ CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensorDescriptor(bottom_desc_); - cudnnDestroyTensorDescriptor(top_desc_); + cudnnDestroyTensor4dDescriptor(bottom_desc_); + cudnnDestroyTensor4dDescriptor(top_desc_); cudnnDestroy(handle_); } diff --git a/src/caffe/layers/cudnn_softmax_layer.cu b/src/caffe/layers/cudnn_softmax_layer.cu index a9e2fcefaf7..f328afdd831 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cu +++ b/src/caffe/layers/cudnn_softmax_layer.cu @@ -17,11 +17,8 @@ void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, - CUDNN_SOFTMAX_MODE_CHANNEL, - cudnn::dataType::one, - bottom_desc_, bottom_data, - cudnn::dataType::zero, - top_desc_, top_data)); + CUDNN_SOFTMAX_MODE_CHANNEL, + bottom_desc_, bottom_data, top_desc_, top_data)); } template @@ -32,13 +29,9 @@ void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, - CUDNN_SOFTMAX_MODE_CHANNEL, - cudnn::dataType::one, - top_desc_, top_data, top_desc_, top_diff, - cudnn::dataType::zero, - bottom_desc_, bottom_diff)); + CUDNN_SOFTMAX_MODE_CHANNEL, + top_desc_, top_data, top_desc_, top_diff, bottom_desc_, bottom_diff)); } } diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index 376faad324d..b1d2b86384e 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -35,8 +35,8 @@ CuDNNTanHLayer::~CuDNNTanHLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensorDescriptor(this->bottom_desc_); - cudnnDestroyTensorDescriptor(this->top_desc_); + cudnnDestroyTensor4dDescriptor(this->bottom_desc_); + cudnnDestroyTensor4dDescriptor(this->top_desc_); cudnnDestroy(this->handle_); } diff --git a/src/caffe/layers/cudnn_tanh_layer.cu b/src/caffe/layers/cudnn_tanh_layer.cu index d287f6fee85..bf9ec7cfac4 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cu +++ b/src/caffe/layers/cudnn_tanh_layer.cu @@ -13,11 +13,8 @@ void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_TANH, - cudnn::dataType::one, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->top_desc_, top_data)); + CUDNN_ACTIVATION_TANH, + this->bottom_desc_, bottom_data, this->top_desc_, top_data)); } template @@ -32,14 +29,10 @@ void CuDNNTanHLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_TANH, - cudnn::dataType::one, - this->top_desc_, top_data, this->top_desc_, top_diff, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->bottom_desc_, bottom_diff)); + CUDNN_ACTIVATION_TANH, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNTanHLayer); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 0f2d66776a9..f74a775fa24 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -16,14 +16,14 @@ namespace caffe { -template +template DataLayer::~DataLayer() { this->JoinPrefetchThread(); } -template +template void DataLayer::DataLayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // Initialize DB db_.reset(db::GetDB(this->layer_param_.data_param().backend())); db_->Open(this->layer_param_.data_param().source(), db::READ); @@ -31,9 +31,9 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, // Check if we should randomly skip a few data points if (this->layer_param_.data_param().rand_skip()) { - unsigned int skip = caffe_rng_rand() % - this->layer_param_.data_param().rand_skip(); - LOG(INFO) << "Skipping first " << skip << " data points."; + unsigned int skip = caffe_rng_rand() + % this->layer_param_.data_param().rand_skip(); + LOG(INFO)<< "Skipping first " << skip << " data points."; while (skip-- > 0) { cursor_->Next(); } @@ -43,40 +43,39 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, datum.ParseFromString(cursor_->value()); bool force_color = this->layer_param_.data_param().force_encoded_color(); - if ((force_color && DecodeDatum(&datum, true)) || - DecodeDatumNative(&datum)) { - LOG(INFO) << "Decoding Datum"; + if ((force_color && DecodeDatum(&datum, true)) || DecodeDatumNative(&datum)) { + LOG(INFO)<< "Decoding Datum"; } // image int crop_size = this->layer_param_.transform_param().crop_size(); if (crop_size > 0) { top[0]->Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); + datum.channels(), crop_size, crop_size, this->device_context_); this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); + datum.channels(), crop_size, crop_size, this->device_context_); + this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size, this->device_context_); } else { - top[0]->Reshape( - this->layer_param_.data_param().batch_size(), datum.channels(), - datum.height(), datum.width()); + top[0]->Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), datum.height(), datum.width(), this->device_context_); this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); + datum.channels(), datum.height(), + datum.width(), this->device_context_); + this->transformed_data_.Reshape(1, datum.channels(), datum.height(), + datum.width(), this->device_context_); } - LOG(INFO) << "output data size: " << top[0]->num() << "," - << top[0]->channels() << "," << top[0]->height() << "," - << top[0]->width(); + LOG(INFO)<< "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); // label if (this->output_labels_) { - vector label_shape(1, this->layer_param_.data_param().batch_size()); - top[1]->Reshape(label_shape); - this->prefetch_label_.Reshape(label_shape); + top[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1, this->device_context_); + this->prefetch_label_.Reshape(this->layer_param_.data_param().batch_size(), + 1, 1, 1, this->device_context_); } } // This function is used to create a thread that prefetches the data. -template +template void DataLayer::InternalThreadEntry() { CPUTimer batch_timer; batch_timer.Start(); @@ -100,10 +99,10 @@ void DataLayer::InternalThreadEntry() { DecodeDatumNative(&datum); } } - this->prefetch_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); + this->prefetch_data_.Reshape(1, datum.channels(), datum.height(), + datum.width(), this->device_context_); + this->transformed_data_.Reshape(1, datum.channels(), datum.height(), + datum.width(), this->device_context_); } Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); @@ -126,7 +125,7 @@ void DataLayer::InternalThreadEntry() { cv_img = DecodeDatumToCVMatNative(datum); } if (cv_img.channels() != this->transformed_data_.channels()) { - LOG(WARNING) << "Your dataset contains encoded images with mixed " + LOG(WARNING)<< "Your dataset contains encoded images with mixed " << "channel sizes. Consider adding a 'force_color' flag to the " << "model definition, or rebuild your dataset using " << "convert_imageset."; @@ -150,14 +149,14 @@ void DataLayer::InternalThreadEntry() { // go to the next iter cursor_->Next(); if (!cursor_->valid()) { - DLOG(INFO) << "Restarting data prefetching from start."; + DLOG(INFO)<< "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } batch_timer.Stop(); - DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; - DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; - DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; + DLOG(INFO)<< "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO)<< " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO)<< "Transform time: " << trans_time / 1000 << " ms."; } INSTANTIATE_CLASS(DataLayer); diff --git a/src/caffe/layers/datarandtransform_layer.cpp b/src/caffe/layers/datarandtransform_layer.cpp new file mode 100644 index 00000000000..03bb543dc79 --- /dev/null +++ b/src/caffe/layers/datarandtransform_layer.cpp @@ -0,0 +1,235 @@ +// Copyright 2014 Julien Martel + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +#include +#include + +namespace caffe { + +template +DataRandTransformLayer::~DataRandTransformLayer() { +} + +template +void DataRandTransformLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + CHECK_EQ(bottom.size(), 1)<< "Data Rand Transform Layer takes a single blob as input."; + CHECK_EQ(top.size(), 1) << "Data Rand Transform Layer takes a single blob as output."; + + cv::namedWindow("Test",0); + + // Bottom[0] cause there is only one input blob + NUM_ = bottom[0]->num(); + CHANNELS_ = bottom[0]->channels(); + HEIGHT_ = bottom[0]->height(); + WIDTH_ = bottom[0]->width(); + + // Announce for the top blob layer + top[0]->Reshape(NUM_, + CHANNELS_, + HEIGHT_, + WIDTH_, this->device_context_ + ); + + // Read the layer parameters + apply_normalization_ = this->layer_param_.apply_normalization(); + + apply_mirroring_ = this->layer_param_.apply_mirroring(); + prob_mirroring_ = this->layer_param_.prob_mirroring(); + + apply_rot_ = this->layer_param_.apply_rot(); + rot_min_ = this->layer_param_.rot_min(); + rot_max_ = this->layer_param_.rot_max(); + + apply_blur_ = this->layer_param_.apply_blur(); + blur_size_ = this->layer_param_.blur_size(); + blur_max_var_ = this->layer_param_.blur_max_var(); + + apply_contrast_brightness_ = this->layer_param_.apply_contrast_brightness(); + alpha_ = this->layer_param_.alpha_c(); + beta_ = this->layer_param_.beta_c(); + + /* + LOG(ERROR) << "\nRotation: " << apply_rot_ << ", min: " << rot_min_ << ", max: " << rot_max_ + << "\nBlur: " << apply_blur_ << ", size: " << blur_size_ << ", var: " << blur_max_var_ + << "\nContrast/Brightness: " << apply_contrast_brightness_ << ", alpha: " << alpha_ << ", beta: " << beta_; + */ + return; +} + +template +void DataRandTransformLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + //TODO?? +} + +template +void DataRandTransformLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + // Transform the blob data in an opencv image + cv::Mat img(bottom[0]->height(), bottom[0]->width(), CV_32FC3); + cv::Mat imgTransformed(img.rows, img.cols, CV_32FC3); + + // Normalization + float val; + std::vector mean(CHANNELS_); + std::vector std(CHANNELS_); + + // Center of rotation + cv::Point2f center = cv::Point2f(img.cols / 2, img.rows / 2); + + // Mirroring + cv::Mat map_x(HEIGHT_, WIDTH_, CV_32FC1); + cv::Mat map_y(HEIGHT_, WIDTH_, CV_32FC1); + for (int j = 0; j < HEIGHT_; j++) { + for (int i = 0; i < WIDTH_; i++) { + map_x.at(j, i) = WIDTH_ - i; + map_y.at(j, i) = HEIGHT_ - j; + } + } + + for (int n = 0; n < NUM_; n++) { + // Reinit for normalization + for (int c = 0; c < CHANNELS_; c++) { + mean[c] = 0; + std[c] = 0; + } + + // Transform the data into an opencv structure to apply transformations + for (int c = 0; c < CHANNELS_; c++) { + for (int h = 0; h < HEIGHT_; h++) { + for (int w = 0; w < WIDTH_; w++) { + val = (bottom[0]->data_at(n, c, h, w)); + + img.at(h, w)[c] = val; + mean[c] += val; + } + } + } + for (int c = 0; c < CHANNELS_; c++) { + mean[c] = mean[c] / (img.rows * img.cols); + //LOG(ERROR) << "Mean" << c << "="<< mean[c]; + } + + // Normalize patch-wise + if (apply_normalization_) { + for (int h = 0; h < HEIGHT_; h++) { + for (int w = 0; w < WIDTH_; w++) { + for (int c = 0; c < CHANNELS_; c++) { + val = img.at(h, w)[c]; + + std[c] += (mean[c] - val) * (mean[c] - val); + } + } + } + for (int c = 0; c < CHANNELS_; c++) { + std[c] = sqrtf(std[c] / (img.rows * img.cols)); + //LOG(ERROR) << "Std" << c << "="<< std[c]; + } + + for (int h = 0; h < HEIGHT_; h++) { + for (int w = 0; w < WIDTH_; w++) { + for (int c = 0; c < CHANNELS_; c++) { + img.at(h, w)[c] = (img.at(h, w)[c] - mean[c]) + / std[c]; + } + } + } + } + + // Double mirroring + if (apply_mirroring_) { + cv::Scalar color; + if (apply_normalization_) + color = cv::Scalar(0, 0, 0); + else + color = cv::Scalar(0.5, 0.5, 0.5); + + if (float(rand()) / RAND_MAX < prob_mirroring_) { + cv::remap(img, imgTransformed, map_x, map_y, CV_INTER_LINEAR, + cv::BORDER_CONSTANT, color); + imgTransformed.copyTo(img); + } + } + + // Rotate image + if (apply_rot_) { + float angle = rot_min_ + (rot_max_ - rot_min_) * float(rand()) / RAND_MAX; // [-rot_min ; rot_max] + cv::Mat rot_mat = cv::getRotationMatrix2D(center, angle, 1.0); + + cv::Scalar color; + if (apply_normalization_) + color = cv::Scalar(0, 0, 0); + else + color = cv::Scalar(0.5, 0.5, 0.5); + + cv::warpAffine(img, imgTransformed, rot_mat, img.size(), + cv::WARP_INVERSE_MAP, cv::BORDER_CONSTANT, color); + imgTransformed.copyTo(img); + } + + // Blur image + if (apply_blur_) { + float s = blur_max_var_ * float(rand()) / RAND_MAX; // [0.0 ; max_var] + cv::GaussianBlur(img, img, cv::Size(blur_size_, blur_size_), s); + } + + // Contrast enhancement + if (apply_contrast_brightness_) { + float alpha = (1.0 - alpha_) + (2 * alpha_) * float(rand()) / RAND_MAX; //[1.0-alpha ; 1.0+alpha] + float beta = 2 * beta_ * float(rand()) / RAND_MAX - beta_; //[-beta ; +beta] + for (int y = 0; y < HEIGHT_; y++) { + for (int x = 0; x < WIDTH_; x++) { + for (int c = 0; c < CHANNELS_; c++) { + img.at(y, x)[c] = alpha * (img.at(y, x)[c]) + + beta; + } + } + } + } + + // === DEBUG + //cv::imshow("Test",0.5+0.5*img); + //cv::waitKey(1); + + //Fill back to the blob + Dtype* data = top[0]->mutable_cpu_data(); + for (int c = 0; c < CHANNELS_; c++) { + for (int h = 0; h < HEIGHT_; h++) { + for (int w = 0; w < WIDTH_; w++) { + *(data + top[0]->offset(n, c, h, w)) = img.at(h, w)[c]; + } + } + } + } +} + +template +void DataRandTransformLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + // No GPU implementation for now, just apply the CPU transformations + Forward_cpu(bottom, top); +} + +// The backward operations are dummy - they do not carry any computation. +template +void DataRandTransformLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + NOT_IMPLEMENTED; +} + +template +void DataRandTransformLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + NOT_IMPLEMENTED; +} + +INSTANTIATE_CLASS(DataRandTransformLayer); +REGISTER_LAYER_CLASS(DataRandTransform); + +} // namespace caffe diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index ec1256fd2fa..63ae058e25e 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -27,7 +27,7 @@ void DropoutLayer::Reshape(const vector*>& bottom, NeuronLayer::Reshape(bottom, top); // Set up the cache for random number generation rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); + bottom[0]->height(), bottom[0]->width(),this->device_context_); } template diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp index 6b0d617464c..cbb7aa737b7 100644 --- a/src/caffe/layers/dummy_data_layer.cpp +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -16,30 +16,18 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, num_data_filler == num_top) << "Number of data fillers must be 0, 1 or equal to the number of tops: " << num_top << "; you specified " << num_data_filler << " data fillers."; - - const bool legacy_dims = param.num_size() || param.channels_size() || - param.height_size() || param.width_size(); - if (legacy_dims) { - CHECK_EQ(0, param.shape_size()) - << "Both shape and legacy fields were specified"; - // Using deprecated 4D output dim specifiers. - CHECK(param.num_size() == 1 || param.num_size() == num_top) - << "Must specify 'num' once, or once per top blob " - << "(" << num_top << "); specified " << param.num_size() << "."; - CHECK(param.channels_size() == 1 || param.channels_size() == num_top) - << "Must specify 'channels' once, or once per top blob " - << "(" << num_top << "); specified " << param.channels_size() << "."; - CHECK(param.height_size() == 1 || param.height_size() == num_top) - << "Must specify 'height' once, or once per top blob " - << "(" << num_top << "); specified " << param.height_size() << "."; - CHECK(param.width_size() == 1 || param.width_size() == num_top) - << "Must specify 'width' once, or once per top blob " - << "(" << num_top << "); specified " << param.width_size() << "."; - } else { - CHECK(param.shape_size() == 1 || param.shape_size() == num_top) - << "Must specify 'shape' once, or once per top blob " - << "(" << num_top << "); specified " << param.shape_size() << "."; - } + CHECK(param.num_size() == 1 || param.num_size() == num_top) + << "Must specify either a single (1) 'num' or one for each top blob " + << "(" << num_top << "); you specified " << param.num_size() << "."; + CHECK(param.channels_size() == 1 || param.channels_size() == num_top) + << "Must specify either a single (1) 'channels' or one for each top blob " + << "(" << num_top << "); you specified " << param.channels_size() << "."; + CHECK(param.height_size() == 1 || param.height_size() == num_top) + << "Must specify either a single (1) 'height' or one for each top blob " + << "(" << num_top << "); you specified " << param.height_size() << "."; + CHECK(param.width_size() == 1 || param.width_size() == num_top) + << "Must specify either a single (1) 'width' or one for each top blob " + << "(" << num_top << "); you specified " << param.width_size() << "."; // refill_[i] tells Forward i whether or not to actually refill top Blob i. // If refill_[i] is false, Forward does nothing for Blob i. We use this to // avoid wastefully refilling "constant" Blobs in every forward pass. @@ -75,19 +63,14 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, } } for (int i = 0; i < num_top; ++i) { - if (legacy_dims) { - const int num = (param.num_size() == 1) ? param.num(0) : param.num(i); - const int channels = - (param.channels_size() == 1) ? param.channels(0) : param.channels(i); - const int height = - (param.height_size() == 1) ? param.height(0) : param.height(i); - const int width = - (param.width_size() == 1) ? param.width(0) : param.width(i); - top[i]->Reshape(num, channels, height, width); - } else { - const int shape_index = (param.shape_size() == 1) ? 0 : i; - top[i]->Reshape(param.shape(shape_index)); - } + const int num = (param.num_size() == 1) ? param.num(0) : param.num(i); + const int channels = + (param.channels_size() == 1) ? param.channels(0) : param.channels(i); + const int height = + (param.height_size() == 1) ? param.height(0) : param.height(i); + const int width = + (param.width_size() == 1) ? param.width(0) : param.width(i); + top[i]->Reshape(num, channels, height, width, this->device_context_); } // Run Forward once, with refill_ inverted, to fill the constant Blobs. this->Forward(bottom, top); @@ -101,10 +84,13 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, template void DummyDataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { + + DeviceContext &device_context = Caffe::GetDeviceContext(this->layer_param_.device()); + for (int i = 0; i < top.size(); ++i) { const int filler_id = (fillers_.size() > 1) ? i : 0; if (refill_[filler_id]) { - fillers_[filler_id]->Fill(top[i]); + fillers_[filler_id]->Fill(top[i], device_context); } } } diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index a80700736bd..b9f9792c494 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -31,14 +31,21 @@ void EltwiseLayer::LayerSetUp(const vector*>& bottom, template void EltwiseLayer::Reshape(const vector*>& bottom, const vector*>& top) { + const int num = bottom[0]->num(); + const int channels = bottom[0]->channels(); + const int height = bottom[0]->height(); + const int width = bottom[0]->width(); for (int i = 1; i < bottom.size(); ++i) { - CHECK(bottom[i]->shape() == bottom[0]->shape()); + CHECK_EQ(num, bottom[i]->num()); + CHECK_EQ(channels, bottom[i]->channels()); + CHECK_EQ(height, bottom[i]->height()); + CHECK_EQ(width, bottom[i]->width()); } - top[0]->ReshapeLike(*bottom[0]); + top[0]->Reshape(num, channels, height, width, this->device_context_); // If max operation, we will initialize the vector index part. if (this->layer_param_.eltwise_param().operation() == EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { - max_idx_.Reshape(bottom[0]->shape()); + max_idx_.Reshape(bottom[0]->num(), channels, height, width, this->device_context_); } } diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 80efa31b22c..9d2fa229c08 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -11,9 +11,11 @@ template void EuclideanLossLayer::Reshape( const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); - CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) - << "Inputs must have the same dimension."; - diff_.ReshapeLike(*bottom[0]); + CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); + CHECK_EQ(bottom[0]->height(), bottom[1]->height()); + CHECK_EQ(bottom[0]->width(), bottom[1]->width()); + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width(), this->device_context_); } template diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 745f271ea45..1d6c5f2535c 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,11 +9,12 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - vector top_shape(2); - top_shape[0] = bottom[0]->num(); - top_shape[1] = bottom[0]->count() / bottom[0]->num(); - top[0]->Reshape(top_shape); - CHECK_EQ(top[0]->count(), bottom[0]->count()); + int channels_out = bottom[0]->channels() * bottom[0]->height() + * bottom[0]->width(); + top[0]->Reshape(bottom[0]->num(), channels_out, 1, 1, this->device_context_); + count_ = bottom[0]->num() * channels_out; + CHECK_EQ(count_, bottom[0]->count()); + CHECK_EQ(count_, top[0]->count()); } template @@ -28,6 +29,10 @@ void FlattenLayer::Backward_cpu(const vector*>& top, bottom[0]->ShareDiff(*top[0]); } +#ifdef CPU_ONLY +STUB_GPU(FlattenLayer); +#endif + INSTANTIATE_CLASS(FlattenLayer); REGISTER_LAYER_CLASS(Flatten); diff --git a/src/caffe/layers/flatten_layer.cu b/src/caffe/layers/flatten_layer.cu new file mode 100644 index 00000000000..42abdad4499 --- /dev/null +++ b/src/caffe/layers/flatten_layer.cu @@ -0,0 +1,23 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void FlattenLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + top[0]->ShareData(*bottom[0]); +} + +template +void FlattenLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + bottom[0]->ShareDiff(*top[0]); +} + +INSTANTIATE_LAYER_GPU_FUNCS(FlattenLayer); + +} // namespace caffe diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 8a782f7e524..c32a6446113 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -14,9 +14,9 @@ #include "hdf5_hl.h" #include "stdint.h" -#include "caffe/data_layers.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" namespace caffe { @@ -36,7 +36,7 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { hdf_blobs_.resize(top_size); const int MIN_DATA_DIM = 1; - const int MAX_DATA_DIM = INT_MAX; + const int MAX_DATA_DIM = 4; for (int i = 0; i < top_size; ++i) { hdf_blobs_[i] = shared_ptr >(new Blob()); @@ -48,25 +48,11 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { CHECK_GE(status, 0) << "Failed to close HDF5 file: " << filename; // MinTopBlobs==1 guarantees at least one top blob - CHECK_GE(hdf_blobs_[0]->num_axes(), 1) << "Input must have at least 1 axis."; - const int num = hdf_blobs_[0]->shape(0); + int num = hdf_blobs_[0]->num(); for (int i = 1; i < top_size; ++i) { - CHECK_EQ(hdf_blobs_[i]->shape(0), num); - } - // Default to identity permutation. - data_permutation_.clear(); - data_permutation_.resize(hdf_blobs_[0]->shape(0)); - for (int i = 0; i < hdf_blobs_[0]->shape(0); i++) - data_permutation_[i] = i; - - // Shuffle if needed. - if (this->layer_param_.hdf5_data_param().shuffle()) { - std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); - DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) - << " rows (shuffled)"; - } else { - DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) << " rows"; + CHECK_EQ(hdf_blobs_[i]->num(), num); } + DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->num() << " rows"; } template @@ -95,33 +81,16 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, CHECK_GE(num_files_, 1) << "Must have at least 1 HDF5 filename listed in " << source; - file_permutation_.clear(); - file_permutation_.resize(num_files_); - // Default to identity permutation. - for (int i = 0; i < num_files_; i++) { - file_permutation_[i] = i; - } - - // Shuffle if needed. - if (this->layer_param_.hdf5_data_param().shuffle()) { - std::random_shuffle(file_permutation_.begin(), file_permutation_.end()); - } - // Load the first HDF5 file and initialize the line counter. - LoadHDF5FileData(hdf_filenames_[file_permutation_[current_file_]].c_str()); + LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); current_row_ = 0; // Reshape blobs. const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); const int top_size = this->layer_param_.top_size(); - vector top_shape; for (int i = 0; i < top_size; ++i) { - top_shape.resize(hdf_blobs_[i]->num_axes()); - top_shape[0] = batch_size; - for (int j = 1; j < top_shape.size(); ++j) { - top_shape[j] = hdf_blobs_[i]->shape(j); - } - top[i]->Reshape(top_shape); + top[i]->Reshape(batch_size, hdf_blobs_[i]->channels(), + hdf_blobs_[i]->height(), hdf_blobs_[i]->width(), this->device_context_); } } @@ -130,29 +99,22 @@ void HDF5DataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); for (int i = 0; i < batch_size; ++i, ++current_row_) { - if (current_row_ == hdf_blobs_[0]->shape(0)) { + if (current_row_ == hdf_blobs_[0]->num()) { if (num_files_ > 1) { ++current_file_; if (current_file_ == num_files_) { current_file_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) { - std::random_shuffle(file_permutation_.begin(), - file_permutation_.end()); - } DLOG(INFO) << "Looping around to first file."; } - LoadHDF5FileData( - hdf_filenames_[file_permutation_[current_file_]].c_str()); + LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); } current_row_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) - std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); } for (int j = 0; j < this->layer_param_.top_size(); ++j) { - int data_dim = top[j]->count() / top[j]->shape(0); + int data_dim = top[j]->count() / top[j]->num(); caffe_copy(data_dim, - &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] - * data_dim], &top[j]->mutable_cpu_data()[i * data_dim]); + &hdf_blobs_[j]->cpu_data()[current_row_ * data_dim], + &top[j]->mutable_cpu_data()[i * data_dim]); } } } diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 5e3e4ced141..02e3821d104 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -10,9 +10,9 @@ TODO: #include "hdf5.h" #include "hdf5_hl.h" -#include "caffe/data_layers.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" namespace caffe { @@ -21,29 +21,22 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); for (int i = 0; i < batch_size; ++i, ++current_row_) { - if (current_row_ == hdf_blobs_[0]->shape(0)) { + if (current_row_ == hdf_blobs_[0]->num()) { if (num_files_ > 1) { current_file_ += 1; if (current_file_ == num_files_) { current_file_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) { - std::random_shuffle(file_permutation_.begin(), - file_permutation_.end()); - } DLOG(INFO) << "Looping around to first file."; } - LoadHDF5FileData( - hdf_filenames_[file_permutation_[current_file_]].c_str()); + LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); } current_row_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) - std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); } for (int j = 0; j < this->layer_param_.top_size(); ++j) { - int data_dim = top[j]->count() / top[j]->shape(0); + int data_dim = top[j]->count() / top[j]->num(); caffe_copy(data_dim, - &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] - * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); + &hdf_blobs_[j]->cpu_data()[current_row_ * data_dim], + &top[j]->mutable_gpu_data()[i * data_dim]); } } } diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index f63375c3dc6..5de5de6d65d 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -11,17 +11,18 @@ namespace caffe { -template +template void HDF5OutputLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { file_name_ = this->layer_param_.hdf5_output_param().file_name(); file_id_ = H5Fcreate(file_name_.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(file_id_, 0) << "Failed to open HDF5 file" << file_name_; + H5P_DEFAULT); + CHECK_GE(file_id_, 0)<< "Failed to open HDF5 file" << file_name_; file_opened_ = true; + current_batch_ = 0; } -template +template HDF5OutputLayer::~HDF5OutputLayer() { if (file_opened_) { herr_t status = H5Fclose(file_id_); @@ -29,41 +30,24 @@ HDF5OutputLayer::~HDF5OutputLayer() { } } -template -void HDF5OutputLayer::SaveBlobs() { - // TODO: no limit on the number of blobs - LOG(INFO) << "Saving HDF5 file " << file_name_; - CHECK_EQ(data_blob_.num(), label_blob_.num()) << - "data blob and label blob must have the same batch size"; - hdf5_save_nd_dataset(file_id_, HDF5_DATA_DATASET_NAME, data_blob_); - hdf5_save_nd_dataset(file_id_, HDF5_DATA_LABEL_NAME, label_blob_); - LOG(INFO) << "Successfully saved " << data_blob_.num() << " rows"; -} - -template +template void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { - CHECK_GE(bottom.size(), 2); - CHECK_EQ(bottom[0]->num(), bottom[1]->num()); - data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); - label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), - bottom[1]->height(), bottom[1]->width()); - const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); - const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); - - for (int i = 0; i < bottom[0]->num(); ++i) { - caffe_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim], - &data_blob_.mutable_cpu_data()[i * data_datum_dim]); - caffe_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim], - &label_blob_.mutable_cpu_data()[i * label_datum_dim]); + const vector*>& top) { + CHECK_EQ(this->layer_param_.bottom_size(), bottom.size()); + for (int i = 0; i < bottom.size(); ++i) { + stringstream batch_id; + batch_id << this->layer_param_.bottom(i) << "_" << current_batch_; + LOG_FIRST_N(INFO, bottom.size()) << "Saving batch " << batch_id.str() + << " to HDF5 file " << file_name_; + hdf5_save_nd_dataset(file_id_, batch_id.str(), *bottom[i]); } - SaveBlobs(); + current_batch_++; } -template +template void HDF5OutputLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { return; } diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index ae497c34fc2..e8b797824d8 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -14,22 +14,7 @@ namespace caffe { template void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - CHECK_GE(bottom.size(), 2); - CHECK_EQ(bottom[0]->num(), bottom[1]->num()); - data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); - label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), - bottom[1]->height(), bottom[1]->width()); - const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); - const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); - - for (int i = 0; i < bottom[0]->num(); ++i) { - caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], - &data_blob_.mutable_cpu_data()[i * data_datum_dim]); - caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], - &label_blob_.mutable_cpu_data()[i * label_datum_dim]); - } - SaveBlobs(); + Forward_cpu(bottom, top); } template diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 1c802714e33..6cddf0008cd 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -50,15 +50,13 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, template void Im2colLayer::Reshape(const vector*>& bottom, const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); top[0]->Reshape( bottom[0]->num(), channels_ * kernel_h_ * kernel_w_, (height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1, - (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1); + (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1, this->device_context_); } template diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 38ebbd5ec14..0ad1c70fefa 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -69,21 +69,20 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const int crop_size = this->layer_param_.transform_param().crop_size(); const int batch_size = this->layer_param_.image_data_param().batch_size(); if (crop_size > 0) { - top[0]->Reshape(batch_size, channels, crop_size, crop_size); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); - this->transformed_data_.Reshape(1, channels, crop_size, crop_size); + top[0]->Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); + this->transformed_data_.Reshape(1, channels, crop_size, crop_size, this->device_context_); } else { - top[0]->Reshape(batch_size, channels, height, width); - this->prefetch_data_.Reshape(batch_size, channels, height, width); - this->transformed_data_.Reshape(1, channels, height, width); + top[0]->Reshape(batch_size, channels, height, width, this->device_context_); + this->prefetch_data_.Reshape(batch_size, channels, height, width, this->device_context_); + this->transformed_data_.Reshape(1, channels, height, width, this->device_context_); } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label - vector label_shape(1, batch_size); - top[1]->Reshape(label_shape); - this->prefetch_label_.Reshape(label_shape); + top[1]->Reshape(batch_size, 1, 1, 1, this->device_context_); + this->prefetch_label_.Reshape(batch_size, 1, 1, 1, this->device_context_); } template @@ -116,9 +115,9 @@ void ImageDataLayer::InternalThreadEntry() { cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, 0, 0, is_color); this->prefetch_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); + cv_img.rows, cv_img.cols, this->device_context_); this->transformed_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); + cv_img.rows, cv_img.cols, this->device_context_); } Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index a1e0b40de0e..2540443d05b 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -20,7 +20,7 @@ void InfogainLossLayer::LayerSetUp( BlobProto blob_proto; ReadProtoFromBinaryFile( this->layer_param_.infogain_loss_param().source(), &blob_proto); - infogain_.FromProto(blob_proto); + infogain_.FromProto(blob_proto, this->device_context_); } } diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 89e0c8fbad7..323850ab324 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -9,21 +9,20 @@ namespace caffe { -template +template void InnerProductLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { + + DeviceContext &device_context = Caffe::GetDeviceContext( + this->layer_param_.device()); + const int num_output = this->layer_param_.inner_product_param().num_output(); bias_term_ = this->layer_param_.inner_product_param().bias_term(); N_ = num_output; - const int axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.inner_product_param().axis()); - // Dimensions starting from "axis" are "flattened" into a single - // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W), - // and axis == 1, N inner products with dimension CHW are performed. - K_ = bottom[0]->count(axis); + K_ = bottom[0]->count() / bottom[0]->num(); // Check if we need to set up the weights if (this->blobs_.size() > 0) { - LOG(INFO) << "Skipping parameter initialization"; + LOG(INFO)<< "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); @@ -31,91 +30,77 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, this->blobs_.resize(1); } // Intialize the weight - vector weight_shape(2); - weight_shape[0] = N_; - weight_shape[1] = K_; - this->blobs_[0].reset(new Blob(weight_shape)); + this->blobs_[0].reset(new Blob(1, 1, N_, K_, this->device_context_)); // fill the weights shared_ptr > weight_filler(GetFiller( - this->layer_param_.inner_product_param().weight_filler())); - weight_filler->Fill(this->blobs_[0].get()); + this->layer_param_.inner_product_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get(),device_context); // If necessary, intiialize and fill the bias term if (bias_term_) { - vector bias_shape(1, N_); - this->blobs_[1].reset(new Blob(bias_shape)); + this->blobs_[1].reset(new Blob(1, 1, 1, N_, this->device_context_)); shared_ptr > bias_filler(GetFiller( - this->layer_param_.inner_product_param().bias_filler())); - bias_filler->Fill(this->blobs_[1].get()); + this->layer_param_.inner_product_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get(),device_context); } } // parameter initialization this->param_propagate_down_.resize(this->blobs_.size(), true); } -template +template void InnerProductLayer::Reshape(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // Figure out the dimensions - const int axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.inner_product_param().axis()); - const int new_K = bottom[0]->count(axis); - CHECK_EQ(K_, new_K) - << "Input size incompatible with inner product parameters."; - // The first "axis" dimensions are independent inner products; the total - // number of these is M_, the product over these dimensions. - M_ = bottom[0]->count(0, axis); - // The top shape will be the bottom shape with the flattened axes dropped, - // and replaced by a single axis with dimension num_output (N_). - vector top_shape = bottom[0]->shape(); - top_shape.resize(axis + 1); - top_shape[axis] = N_; - top[0]->Reshape(top_shape); + M_ = bottom[0]->num(); + CHECK_EQ(bottom[0]->count() / bottom[0]->num(), K_)<< "Input size " + "incompatible with inner product parameters."; + top[0]->Reshape(bottom[0]->num(), N_, 1, 1, this->device_context_); // Set up the bias multiplier if (bias_term_) { - vector bias_shape(1, M_); - bias_multiplier_.Reshape(bias_shape); + bias_multiplier_.Reshape(1, 1, 1, M_, this->device_context_); caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data()); } } -template +template void InnerProductLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const Dtype* weight = this->blobs_[0]->cpu_data(); - caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., - bottom_data, weight, (Dtype)0., top_data); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., + bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) { - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., - bias_multiplier_.cpu_data(), - this->blobs_[1]->cpu_data(), (Dtype)1., top_data); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., + bias_multiplier_.cpu_data(), + this->blobs_[1]->cpu_data(), (Dtype) 1., top_data); } } -template -void InnerProductLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, +template +void InnerProductLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight - caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); + caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., + top_diff, bottom_data, (Dtype) 0., + this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias - caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)0., - this->blobs_[1]->mutable_cpu_diff()); + caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype) 1., top_diff, + bias_multiplier_.cpu_data(), (Dtype) 0., + this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bottom data - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., - top_diff, this->blobs_[0]->cpu_data(), (Dtype)0., - bottom[0]->mutable_cpu_diff()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., + top_diff, this->blobs_[0]->cpu_data(), (Dtype) 0., + bottom[0]->mutable_cpu_diff()); } } diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index 3496a5c2a8a..471adc5c56c 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -24,8 +24,7 @@ void LossLayer::Reshape( const vector*>& bottom, const vector*>& top) { CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; - vector loss_shape(0); // Loss layers output a scalar; 0 axes. - top[0]->Reshape(loss_shape); + top[0]->Reshape(1, 1, 1, 1, this->device_context_); } INSTANTIATE_CLASS(LossLayer); diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 36c1ace4c99..deaca6b2db9 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -69,16 +69,14 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, template void LRNLayer::Reshape(const vector*>& bottom, const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: - top[0]->Reshape(num_, channels_, height_, width_); - scale_.Reshape(num_, channels_, height_, width_); + top[0]->Reshape(num_, channels_, height_, width_, this->device_context_); + scale_.Reshape(num_, channels_, height_, width_, this->device_context_); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: split_layer_->Reshape(bottom, split_top_vec_); @@ -115,7 +113,7 @@ void LRNLayer::CrossChannelForward_cpu( for (int i = 0; i < scale_.count(); ++i) { scale_data[i] = k_; } - Blob padded_square(1, channels_ + size_ - 1, height_, width_); + Blob padded_square(1, channels_ + size_ - 1, height_, width_, this->device_context_); Dtype* padded_square_data = padded_square.mutable_cpu_data(); caffe_set(padded_square.count(), Dtype(0), padded_square_data); Dtype alpha_over_size = alpha_ / size_; @@ -186,8 +184,8 @@ void LRNLayer::CrossChannelBackward_cpu( const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* scale_data = scale_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - Blob padded_ratio(1, channels_ + size_ - 1, height_, width_); - Blob accum_ratio(1, 1, height_, width_); + Blob padded_ratio(1, channels_ + size_ - 1, height_, width_, this->device_context_); + Blob accum_ratio(1, 1, height_, width_, this->device_context_); Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); // We hack a little bit by using the diff() to store an additional result diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 24aa6a30130..58c39926c72 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -26,24 +26,26 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* in, Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values - while (head < post_pad && head < channels) { + while (head < post_pad) { accum_scale += in[head * step] * in[head * step]; ++head; } + // until we reach size, nothing needs to be subtracted + while (head < size) { + accum_scale += in[head * step] * in[head * step]; + scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } // both add and subtract while (head < channels) { accum_scale += in[head * step] * in[head * step]; - if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; - } + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { - if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; - } + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } @@ -141,19 +143,26 @@ __global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values - while (head < post_pad && head < channels) { + while (head < post_pad) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; ++head; } + // until we reach size, nothing needs to be subtracted + while (head < size) { + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; + ++head; + } // both add and subtract while (head < channels) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; - if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; - } + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; @@ -161,10 +170,8 @@ __global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, } // subtract only while (head < channels + post_pad) { - if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; - } + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index 42de4198bc4..d5e90174994 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -19,11 +19,10 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, CHECK_GT(batch_size_ * size_, 0) << "batch_size, channels, height, and width must be specified and" " positive in memory_data_param"; - vector label_shape(1, batch_size_); - top[0]->Reshape(batch_size_, channels_, height_, width_); - top[1]->Reshape(label_shape); - added_data_.Reshape(batch_size_, channels_, height_, width_); - added_label_.Reshape(label_shape); + top[0]->Reshape(batch_size_, channels_, height_, width_, this->device_context_); + top[1]->Reshape(batch_size_, 1, 1, 1, this->device_context_); + added_data_.Reshape(batch_size_, channels_, height_, width_, this->device_context_); + added_label_.Reshape(batch_size_, 1, 1, 1, this->device_context_); data_ = NULL; labels_ = NULL; added_data_.cpu_data(); @@ -38,8 +37,8 @@ void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { CHECK_GT(num, 0) << "There is no datum to add."; CHECK_EQ(num % batch_size_, 0) << "The added data must be a multiple of the batch size."; - added_data_.Reshape(num, channels_, height_, width_); - added_label_.Reshape(num, 1, 1, 1); + added_data_.Reshape(num, channels_, height_, width_, this->device_context_); + added_label_.Reshape(num, 1, 1, 1, this->device_context_); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(datum_vector, &added_data_); // Copy Labels @@ -62,8 +61,8 @@ void MemoryDataLayer::AddMatVector(const vector& mat_vector, CHECK_GT(num, 0) << "There is no mat to add"; CHECK_EQ(num % batch_size_, 0) << "The added data must be a multiple of the batch size."; - added_data_.Reshape(num, channels_, height_, width_); - added_label_.Reshape(num, 1, 1, 1); + added_data_.Reshape(num, channels_, height_, width_,this->device_context_); + added_label_.Reshape(num, 1, 1, 1,this->device_context_); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(mat_vector, &added_data_); // Copy Labels @@ -98,16 +97,16 @@ void MemoryDataLayer::set_batch_size(int new_size) { CHECK(!has_new_data_) << "Can't change batch_size until current data has been consumed."; batch_size_ = new_size; - added_data_.Reshape(batch_size_, channels_, height_, width_); - added_label_.Reshape(batch_size_, 1, 1, 1); + added_data_.Reshape(batch_size_, channels_, height_, width_,this->device_context_); + added_label_.Reshape(batch_size_, 1, 1, 1,this->device_context_); } template void MemoryDataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset"; - top[0]->Reshape(batch_size_, channels_, height_, width_); - top[1]->Reshape(batch_size_, 1, 1, 1); + top[0]->Reshape(batch_size_, channels_, height_, width_,this->device_context_); + top[1]->Reshape(batch_size_, 1, 1, 1,this->device_context_); top[0]->set_cpu_data(data_ + pos_ * size_); top[1]->set_cpu_data(labels_ + pos_); pos_ = (pos_ + batch_size_) % n_; diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index b74d7b4f300..4650cf9033e 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -7,26 +7,26 @@ namespace caffe { -template +template void MVNLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); - mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), - 1, 1); - variance_.Reshape(bottom[0]->num(), bottom[0]->channels(), - 1, 1); - temp_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); - sum_multiplier_.Reshape(1, 1, - bottom[0]->height(), bottom[0]->width()); + const vector*>& top) { + top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), + bottom[0]->width(), this->device_context_); + mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, + this->device_context_); + variance_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, + this->device_context_); + temp_.Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), + bottom[0]->width(), this->device_context_); + sum_multiplier_.Reshape(1, 1, bottom[0]->height(), bottom[0]->width(), + this->device_context_); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); } -template +template void MVNLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); int num; @@ -41,55 +41,57 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ caffe_powx(bottom[0]->count(), bottom_data, Dtype(2), - temp_.mutable_cpu_data()); + temp_.mutable_cpu_data()); // computes variance using var(X) = E(X^2) - (EX)^2 caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + sum_multiplier_.cpu_data(), 0., + mean_.mutable_cpu_data()); // EX caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), - sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); // E(X^2) + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); // (EX)^2 + temp_.mutable_cpu_data()); // (EX)^2 caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance + variance_.mutable_cpu_data()); // variance // do mean and variance normalization // subtract mean caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_add(temp_.count(), bottom_data, temp_.cpu_data(), top_data); // normalize variance caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), - variance_.mutable_cpu_data()); + variance_.mutable_cpu_data()); caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_div(temp_.count(), top_data, temp_.cpu_data(), top_data); } else { caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + sum_multiplier_.cpu_data(), 0., + mean_.mutable_cpu_data()); // EX // subtract mean caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_add(temp_.count(), bottom_data, temp_.cpu_data(), top_data); } } -template +template void MVNLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* bottom_data = bottom[0]->cpu_data(); @@ -107,45 +109,47 @@ void MVNLayer::Backward_cpu(const vector*>& top, if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_cpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); + sum_multiplier_.cpu_data(), 0., + mean_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., - bottom_diff); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + bottom_diff); caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff); caffe_cpu_gemv(CblasNoTrans, num, dim, 1., top_diff, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); + sum_multiplier_.cpu_data(), 0., + mean_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 1., - bottom_diff); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 1., + bottom_diff); caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim), - bottom_diff); + bottom_diff); // put the squares of bottom into temp_ - caffe_powx(temp_.count(), bottom_data, Dtype(2), - temp_.mutable_cpu_data()); + caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); // computes variance using var(X) = E(X^2) - (EX)^2 caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + sum_multiplier_.cpu_data(), 0., + mean_.mutable_cpu_data()); // EX caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), - sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); // E(X^2) + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); // (EX)^2 + temp_.mutable_cpu_data()); // (EX)^2 caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance + variance_.mutable_cpu_data()); // variance // normalize variance caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), - variance_.mutable_cpu_data()); + variance_.mutable_cpu_data()); caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff); } else { @@ -153,7 +157,6 @@ void MVNLayer::Backward_cpu(const vector*>& top, } } - #ifdef CPU_ONLY STUB_GPU(MVNLayer); #endif diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp index ba67b43878e..1a55a1c84bb 100644 --- a/src/caffe/layers/neuron_layer.cpp +++ b/src/caffe/layers/neuron_layer.cpp @@ -8,7 +8,7 @@ namespace caffe { template void NeuronLayer::Reshape(const vector*>& bottom, const vector*>& top) { - top[0]->ReshapeLike(*bottom[0]); + top[0]->ReshapeLike(*bottom[0],this->device_context_); } INSTANTIATE_CLASS(NeuronLayer); diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index c8d41499455..a20e7afee8b 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -13,28 +13,28 @@ namespace caffe { using std::min; using std::max; -template +template void PoolingLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { PoolingParameter pool_param = this->layer_param_.pooling_param(); if (pool_param.global_pooling()) { CHECK(!(pool_param.has_kernel_size() || - pool_param.has_kernel_h() || pool_param.has_kernel_w())) - << "With Global_pooling: true Filter size cannot specified"; + pool_param.has_kernel_h() || pool_param.has_kernel_w())) + << "With Global_pooling: true Filter size cannot specified"; } else { CHECK(!pool_param.has_kernel_size() != - !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK(pool_param.has_kernel_size() || - (pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "For non-square filters both kernel_h and kernel_w are required."; + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; } CHECK((!pool_param.has_pad() && pool_param.has_pad_h() - && pool_param.has_pad_w()) + && pool_param.has_pad_w()) || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK((!pool_param.has_stride() && pool_param.has_stride_h() - && pool_param.has_stride_w()) + && pool_param.has_stride_w()) || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; global_pooling_ = pool_param.global_pooling(); @@ -49,8 +49,8 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, kernel_w_ = pool_param.kernel_w(); } } - CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; if (!pool_param.has_pad_h()) { pad_h_ = pad_w_ = pool_param.pad(); } else { @@ -65,7 +65,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } if (global_pooling_) { CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) - << "With Global_pooling: true; only pad = 0 and stride = 1"; + << "With Global_pooling: true; only pad = 0 and stride = 1"; } if (pad_h_ != 0 || pad_w_ != 0) { CHECK(this->layer_param_.pooling_param().pool() @@ -78,11 +78,9 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } } -template +template void PoolingLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; + const vector*>& top) { channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); @@ -90,10 +88,10 @@ void PoolingLayer::Reshape(const vector*>& bottom, kernel_h_ = bottom[0]->height(); kernel_w_ = bottom[0]->width(); } - pooled_height_ = static_cast(ceil(static_cast( - height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; - pooled_width_ = static_cast(ceil(static_cast( - width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; + pooled_height_ = static_cast(ceil( + static_cast(height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; + pooled_width_ = static_cast(ceil( + static_cast(width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; if (pad_h_ || pad_w_) { // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. @@ -106,30 +104,29 @@ void PoolingLayer::Reshape(const vector*>& bottom, CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } - top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_,this->device_context_); if (top.size() > 1) { - top[1]->ReshapeLike(*top[0]); + top[1]->ReshapeLike(*top[0],this->device_context_); } // If max pooling, we will initialize the vector index part. - if (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX && top.size() == 1) { + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + pooled_width_,this->device_context_); } // If stochastic pooling, we will initialize the random index part. - if (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_STOCHASTIC) { + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_STOCHASTIC) { rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + pooled_width_,this->device_context_); } } // TODO(Yangqing): Is there a faster way to do pooling in the channel-first // case? -template +template void PoolingLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const int top_count = top[0]->count(); @@ -140,98 +137,99 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, // Different pooling methods. We explicitly do the switch outside the for // loop to save time, although this results in more code. switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - // Initialize - if (use_top_mask) { - top_mask = top[1]->mutable_cpu_data(); - caffe_set(top_count, Dtype(-1), top_mask); - } else { - mask = max_idx_.mutable_cpu_data(); - caffe_set(top_count, -1, mask); - } - caffe_set(top_count, Dtype(-FLT_MAX), top_data); - // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_); - int wend = min(wstart + kernel_w_, width_); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - const int pool_index = ph * pooled_width_ + pw; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - const int index = h * width_ + w; - if (bottom_data[index] > top_data[pool_index]) { - top_data[pool_index] = bottom_data[index]; - if (use_top_mask) { - top_mask[pool_index] = static_cast(index); - } else { - mask[pool_index] = index; + case PoolingParameter_PoolMethod_MAX: + // Initialize + if (use_top_mask) { + top_mask = top[1]->mutable_cpu_data(); + caffe_set(top_count, Dtype(-1), top_mask); + } else { + mask = max_idx_.mutable_cpu_data(); + caffe_set(top_count, -1, mask); + } + caffe_set(top_count, Dtype(-FLT_MAX), top_data); + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_); + int wend = min(wstart + kernel_w_, width_); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + const int pool_index = ph * pooled_width_ + pw; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + const int index = h * width_ + w; + if (bottom_data[index] > top_data[pool_index]) { + top_data[pool_index] = bottom_data[index]; + if (use_top_mask) { + top_mask[pool_index] = static_cast(index); + } else { + mask[pool_index] = index; + } } } } } } - } - // compute offset - bottom_data += bottom[0]->offset(0, 1); - top_data += top[0]->offset(0, 1); - if (use_top_mask) { - top_mask += top[0]->offset(0, 1); - } else { - mask += top[0]->offset(0, 1); + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); + if (use_top_mask) { + top_mask += top[0]->offset(0, 1); + } else { + mask += top[0]->offset(0, 1); + } } } - } - break; - case PoolingParameter_PoolMethod_AVE: - for (int i = 0; i < top_count; ++i) { - top_data[i] = 0; - } - // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height_); - wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - top_data[ph * pooled_width_ + pw] += - bottom_data[h * width_ + w]; + break; + case PoolingParameter_PoolMethod_AVE: + for (int i = 0; i < top_count; ++i) { + top_data[i] = 0; + } + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_ + pad_h_); + int wend = min(wstart + kernel_w_, width_ + pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + top_data[ph * pooled_width_ + pw] += bottom_data[h * width_ + + w]; + } } + top_data[ph * pooled_width_ + pw] /= pool_size; } - top_data[ph * pooled_width_ + pw] /= pool_size; } + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); } - // compute offset - bottom_data += bottom[0]->offset(0, 1); - top_data += top[0]->offset(0, 1); } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL)<< "Unknown pooling method."; } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - NOT_IMPLEMENTED; - break; - default: - LOG(FATAL) << "Unknown pooling method."; } -} -template +template void PoolingLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (!propagate_down[0]) { return; } @@ -245,73 +243,72 @@ void PoolingLayer::Backward_cpu(const vector*>& top, const int* mask = NULL; // suppress warnings about uninitialized variables const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - // The main loop - if (use_top_mask) { - top_mask = top[1]->cpu_data(); - } else { - mask = max_idx_.cpu_data(); - } - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - const int index = ph * pooled_width_ + pw; - const int bottom_index = - use_top_mask ? top_mask[index] : mask[index]; - bottom_diff[bottom_index] += top_diff[index]; + case PoolingParameter_PoolMethod_MAX: + // The main loop + if (use_top_mask) { + top_mask = top[1]->cpu_data(); + } else { + mask = max_idx_.cpu_data(); + } + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + const int index = ph * pooled_width_ + pw; + const int bottom_index = + use_top_mask ? top_mask[index] : mask[index]; + bottom_diff[bottom_index] += top_diff[index]; + } + } + bottom_diff += bottom[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); + if (use_top_mask) { + top_mask += top[0]->offset(0, 1); + } else { + mask += top[0]->offset(0, 1); } - } - bottom_diff += bottom[0]->offset(0, 1); - top_diff += top[0]->offset(0, 1); - if (use_top_mask) { - top_mask += top[0]->offset(0, 1); - } else { - mask += top[0]->offset(0, 1); } } - } - break; - case PoolingParameter_PoolMethod_AVE: - // The main loop - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height_); - wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - bottom_diff[h * width_ + w] += - top_diff[ph * pooled_width_ + pw] / pool_size; + break; + case PoolingParameter_PoolMethod_AVE: + // The main loop + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_ + pad_h_); + int wend = min(wstart + kernel_w_, width_ + pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + bottom_diff[h * width_ + w] += top_diff[ph * pooled_width_ + + pw] / pool_size; + } } } } + // offset + bottom_diff += bottom[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); } - // offset - bottom_diff += bottom[0]->offset(0, 1); - top_diff += top[0]->offset(0, 1); } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL)<< "Unknown pooling method."; } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - NOT_IMPLEMENTED; - break; - default: - LOG(FATAL) << "Unknown pooling method."; } -} - #ifdef CPU_ONLY -STUB_GPU(PoolingLayer); + STUB_GPU(PoolingLayer); #endif INSTANTIATE_CLASS(PoolingLayer); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index d1d48501af3..9729990594d 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -15,7 +15,8 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, int* mask, Dtype* top_mask) { - CUDA_KERNEL_LOOP(index, nthreads) { + CUDA_KERNEL_LOOP(index, nthreads) +{ int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -52,7 +53,8 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) { + CUDA_KERNEL_LOOP(index, nthreads) +{ int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -84,7 +86,8 @@ __global__ void StoPoolForwardTrain(const int nthreads, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* rand_idx, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) { + CUDA_KERNEL_LOOP(index, nthreads) +{ int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -126,41 +129,40 @@ __global__ void StoPoolForwardTest(const int nthreads, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); - // We set cumsum to be 0 to avoid divide-by-zero problems - Dtype cumsum = FLT_MIN; - Dtype cumvalues = 0.; - bottom_data += (n * channels + c) * height * width; - // First pass: get sum - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; - cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; - } + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; } - top_data[index] = cumvalues / cumsum; } + top_data[index] = cumvalues / cumsum; +} } - -template +template void PoolingLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - int count = top[0]->count(); - // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top.size() > 1; - int* mask = NULL; - Dtype* top_mask = NULL; - switch (this->layer_param_.pooling_param().pool()) { + const vector*>& top) { +const Dtype* bottom_data = bottom[0]->gpu_data(); +Dtype* top_data = top[0]->mutable_gpu_data(); +int count = top[0]->count(); +// We'll output the mask to top[1] if it's of size >1. +const bool use_top_mask = top.size() > 1; +int* mask = NULL; +Dtype* top_mask = NULL; +switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); @@ -187,27 +189,27 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTrain<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, - rand_idx_.mutable_gpu_data(), top_data); - } else { - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTest<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, top_data); - } - break; - default: - LOG(FATAL) << "Unknown pooling method."; - } - CUDA_POST_KERNEL_CHECK; + StoPoolForwardTrain<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, + rand_idx_.mutable_gpu_data(), top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTest<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, top_data); +} +break; +default: +LOG(FATAL)<< "Unknown pooling method."; +} +CUDA_POST_KERNEL_CHECK +; } - template __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, @@ -216,7 +218,8 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { + CUDA_KERNEL_LOOP(index, nthreads) +{ // find out the local index // find out the local offset int w = index % width; @@ -262,7 +265,8 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { + CUDA_KERNEL_LOOP(index, nthreads) +{ // find out the local index // find out the local offset int w = index % width + pad_w; @@ -299,81 +303,80 @@ __global__ void StoPoolBackward(const int nthreads, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { - // find out the local index - // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - int phend = min(h / stride_h + 1, pooled_height); - int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - int pwend = min(w / stride_w + 1, pooled_width); - Dtype gradient = 0; - rand_idx += (n * channels + c) * pooled_height * pooled_width; - top_diff += (n * channels + c) * pooled_height * pooled_width; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff[ph * pooled_width + pw] * - (index == static_cast(rand_idx[ph * pooled_width + pw])); - } - } - bottom_diff[index] = gradient; +// find out the local index +// find out the local offset +int w = index % width; +int h = (index / width) % height; +int c = (index / width / height) % channels; +int n = index / width / height / channels; +int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; +int phend = min(h / stride_h + 1, pooled_height); +int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; +int pwend = min(w / stride_w + 1, pooled_width); +Dtype gradient = 0; +rand_idx += (n * channels + c) * pooled_height * pooled_width; +top_diff += (n * channels + c) * pooled_height * pooled_width; +for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff[ph * pooled_width + pw] * + (index == static_cast(rand_idx[ph * pooled_width + pw])); } } +bottom_diff[index] = gradient; +} +} - -template +template void PoolingLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { - return; - } - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); - caffe_gpu_set(count, Dtype(0.), bottom_diff); - // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top.size() > 1; - const int* mask = NULL; - const Dtype* top_mask = NULL; - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward<<>>( - count, top_diff, mask, top_mask, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - bottom_diff); - break; - case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolBackward<<>>( - count, top_diff, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolBackward<<>>( - count, rand_idx_.gpu_data(), top_diff, - top[0]->num(), channels_, height_, width_, pooled_height_, - pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, - bottom_diff); - break; - default: - LOG(FATAL) << "Unknown pooling method."; + const vector& propagate_down, + const vector*>& bottom) { +if (!propagate_down[0]) { +return; +} +const Dtype* top_diff = top[0]->gpu_diff(); +Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); +const int count = bottom[0]->count(); +caffe_gpu_set(count, Dtype(0.), bottom_diff); + // We'll output the mask to top[1] if it's of size >1. +const bool use_top_mask = top.size() > 1; +const int* mask = NULL; +const Dtype* top_mask = NULL; +switch (this->layer_param_.pooling_param().pool()) { +case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); } - CUDA_POST_KERNEL_CHECK; + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward<<>>( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + bottom_diff); + break; +case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolBackward<<>>( + count, top_diff, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); + break; +case PoolingParameter_PoolMethod_STOCHASTIC: + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolBackward<<>>( + count, rand_idx_.gpu_data(), top_diff, + top[0]->num(), channels_, height_, width_, pooled_height_, + pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, + bottom_diff); + break; +default: + LOG(FATAL)<< "Unknown pooling method."; +} +CUDA_POST_KERNEL_CHECK +; } - INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); - } // namespace caffe diff --git a/src/caffe/layers/pooling_sk_layer.cpp b/src/caffe/layers/pooling_sk_layer.cpp new file mode 100644 index 00000000000..3fb90285d05 --- /dev/null +++ b/src/caffe/layers/pooling_sk_layer.cpp @@ -0,0 +1,139 @@ +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +using std::min; +using std::max; + +template +void PoolingSKLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + // Set the max number of top blobs before calling base Layer::SetUp. + // If doing MAX pooling, we can optionally output an extra top Blob + // for the mask. Otherwise, we only have one top Blob. + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + max_top_blobs_ = 2; + } else { + max_top_blobs_ = 1; + } + PoolingParameter pool_param = this->layer_param_.pooling_param(); + CHECK(!pool_param.has_kernel_size() != + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(pool_param.has_kernel_size() || + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK((!pool_param.has_pad() && pool_param.has_pad_h() + && pool_param.has_pad_w()) + || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!pool_param.has_stride() && pool_param.has_stride_h() + && pool_param.has_stride_w()) + || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (pool_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(); + } else { + kernel_h_ = pool_param.kernel_h(); + kernel_w_ = pool_param.kernel_w(); + } + CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; + if (!pool_param.has_pad_h()) { + pad_h_ = pad_w_ = pool_param.pad(); + } else { + pad_h_ = pool_param.pad_h(); + pad_w_ = pool_param.pad_w(); + } + CHECK_EQ(pad_h_, 0); + CHECK_EQ(pad_w_, 0); + if (!pool_param.has_stride_h()) { + stride_h_ = stride_w_ = pool_param.stride(); + } else { + stride_h_ = pool_param.stride_h(); + stride_w_ = pool_param.stride_w(); + } + if (pad_h_ != 0 || pad_w_ != 0) { + CHECK(this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE + || this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) + << "Padding implemented only for average and max pooling."; + CHECK_LT(pad_h_, kernel_h_); + CHECK_LT(pad_w_, kernel_w_); + } + if (!pool_param.has_kstride_h()) { + kstride_h_ = kstride_w_ = pool_param.kstride(); + } else { + kstride_h_ = pool_param.kstride_h(); + kstride_w_ = pool_param.kstride_w(); + } + + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + pooled_height_ = static_cast(ceil( + static_cast(height_ + 2 * pad_h_ - ext_kernel_h) / stride_h_)) + 1; + pooled_width_ = static_cast(ceil( + static_cast(width_ + 2 * pad_w_ - ext_kernel_w) / stride_w_)) + 1; + + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_,this->device_context_); + if (top.size() > 1) { + top[1]->ReshapeLike(*top[0],this->device_context_); + } + // If max pooling, we will initialize the vector index part. + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX && top.size() == 1) { + max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_,this->device_context_); + } + // If stochastic pooling, we will initialize the random index part. + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_STOCHASTIC) { + rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_,this->device_context_); + } +} + +template +void PoolingSKLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + LayerSetUp(bottom, top); +} + +// TODO(Yangqing): Is there a faster way to do pooling in the channel-first +// case? +template +void PoolingSKLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + LOG(FATAL)<< "Forward_cpu() not implemented in PoolingSKLayer."; +} + +template +void PoolingSKLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + LOG(FATAL)<< "Backward_cpu() not implemented in PoolingSKLayer."; + return; +} + +#ifdef CPU_ONLY +STUB_GPU(PoolingSKLayer); +#endif + +INSTANTIATE_CLASS(PoolingSKLayer); +REGISTER_LAYER_CLASS(PoolingSK); + +} // namespace caffe diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu new file mode 100644 index 00000000000..b4cdc83c4e3 --- /dev/null +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -0,0 +1,374 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + +namespace caffe { + +template +__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* top_data, int* mask, Dtype* top_mask) { + CUDA_KERNEL_LOOP(index, nthreads) + { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + bottom_data += (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data[maxidx]; + } + } + } + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +template +__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) + { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + bottom_data += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +template +__global__ void StoPoolForwardTrain(const int nthreads, + const Dtype* bottom_data, const int num, + const int channels, const int height, + const int width, const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, + const int ext_kernel_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, Dtype* rand_idx, + Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) + { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data[h * width + w]; + return; + } + } + } + } +} + +template +__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, + const int ext_kernel_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) + { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +template +void PoolingSKLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); +#endif + + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int count = top[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + int* mask = NULL; + Dtype* top_mask = NULL; + + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + if (this->device_context_.backend() == BACKEND_CUDA) { + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, top_data, + mask, top_mask); + } else { +#ifdef USE_GREENTEA + std::cout << "POOLING GREENTEA BEGIN" << std::endl; + viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_forward_sk")); + viennacl::ocl::enqueue( + oclk_max_pool_forward(count, WrapVector((cl_mem) bottom_data), + bottom[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapVector((cl_mem) top_data), + WrapVector((cl_mem) mask), + WrapVector((cl_mem) top_mask)), + ctx.get_queue()); + ctx.get_queue().finish(); + std::cout << "POOLING GREENTEA END" << std::endl; +#endif + } + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == caffe::TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTrain<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + rand_idx_.mutable_gpu_data(), top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTest<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, top_data); +} +break; +default: +LOG(FATAL)<< "Unknown pooling method."; +} + CUDA_POST_KERNEL_CHECK + ; +} + +template +__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, + const int* mask, const Dtype* top_mask, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) + { + // find out the local index + // find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff += offset; + if (mask) { + mask += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } else { + mask += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +template +void PoolingSKLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + caffe_gpu_set(count, Dtype(0.), bottom_diff); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + const int* mask = NULL; + const Dtype* top_mask = NULL; + + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward<<>>( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + bottom_diff); + break; + default: + LOG(FATAL)<< "Unknown or unsupported pooling method in Backward_gpu()."; + } + CUDA_POST_KERNEL_CHECK + ; +} + +INSTANTIATE_LAYER_GPU_FUNCS(PoolingSKLayer); + +} // namespace caffe diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index b8924c855e5..e3654920c2f 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -4,27 +4,53 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, - Dtype negative_slope) { - CUDA_KERNEL_LOOP(index, n) { + Dtype negative_slope) { + CUDA_KERNEL_LOOP(index, n) + { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } -template +template void ReLULayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); - // NOLINT_NEXT_LINE(whitespace/operators) - ReLUForward<<>>( - count, bottom_data, top_data, negative_slope); - CUDA_POST_KERNEL_CHECK; + if (this->device_context_.backend() == BACKEND_CUDA) { + // NOLINT_NEXT_LINE(whitespace/operators) + ReLUForward<<>>( + count, bottom_data, top_data, negative_slope); + CUDA_POST_KERNEL_CHECK + ; + } else { +#ifdef USE_GREENTEA + std::cout << "RELU GREENTEA BEGIN" << std::endl; + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( + CL_KERNEL_SELECT("relu_forward")); + viennacl::ocl::enqueue( + oclk_relu_forward(count, WrapVector((cl_mem) bottom_data), + WrapVector((cl_mem) top_data), negative_slope), + ctx.get_queue()); + ctx.get_queue().finish(); + std::cout << "RELU GREENTEA END" << std::endl; + +#endif + } // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data @@ -32,19 +58,21 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, // << " threads: " << CAFFE_CUDA_NUM_THREADS; } -template +template __global__ void ReLUBackward(const int n, const Dtype* in_diff, - const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { - CUDA_KERNEL_LOOP(index, n) { - out_diff[index] = in_diff[index] * ((in_data[index] > 0) - + (in_data[index] <= 0) * negative_slope); + const Dtype* in_data, Dtype* out_diff, + Dtype negative_slope) { + CUDA_KERNEL_LOOP(index, n) + { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } -template +template void ReLULayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); @@ -54,12 +82,11 @@ void ReLULayer::Backward_gpu(const vector*>& top, // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<<>>( count, top_diff, bottom_data, bottom_diff, negative_slope); - CUDA_POST_KERNEL_CHECK; + CUDA_POST_KERNEL_CHECK + ; } } - INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); - } // namespace caffe diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index 8d044ee7307..1952edf6c65 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -4,21 +4,44 @@ #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template void SilenceLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // Do nothing. } -template +template void SilenceLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { - caffe_gpu_set(bottom[i]->count(), Dtype(0), - bottom[i]->mutable_gpu_data()); + if (this->device_context_.backend() == BACKEND_CUDA) { + caffe_gpu_set(bottom[i]->count(), Dtype(0), + bottom[i]->mutable_gpu_data()); + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel( + CL_KERNEL_SELECT("gpu_set")); + viennacl::ocl::enqueue( + oclk_gpu_set( + bottom[i]->count(), Dtype(0), + WrapVector((cl_mem) bottom[i]->mutable_gpu_data())), + ctx.get_queue()); + ctx.get_queue().finish(); +#endif + } } } } diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index e4418c9cf9c..affed529586 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -11,8 +11,9 @@ template void SliceLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { const SliceParameter& slice_param = this->layer_param_.slice_param(); - CHECK(!(slice_param.has_axis() && slice_param.has_slice_dim())) - << "Either axis or slice_dim should be specified; not both."; + slice_dim_ = slice_param.slice_dim(); + CHECK_GE(slice_dim_, 0); + CHECK_LE(slice_dim_, 1) << "Can only slice num and channels"; slice_point_.clear(); std::copy(slice_param.slice_point().begin(), slice_param.slice_point().end(), @@ -22,27 +23,18 @@ void SliceLayer::LayerSetUp(const vector*>& bottom, template void SliceLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int num_axes = bottom[0]->num_axes(); - const SliceParameter& slice_param = this->layer_param_.slice_param(); - if (slice_param.has_slice_dim()) { - slice_axis_ = static_cast(slice_param.slice_dim()); - // Don't allow negative indexing for slice_dim, a uint32 -- almost - // certainly unintended. - CHECK_GE(slice_axis_, 0) << "casting slice_dim from uint32 to int32 " - << "produced negative result; slice_dim must satisfy " - << "0 <= slice_dim < " << kMaxBlobAxes; - CHECK_LT(slice_axis_, num_axes) << "slice_dim out of range."; - } else { - slice_axis_ = bottom[0]->CanonicalAxisIndex(slice_param.axis()); - } - vector top_shape = bottom[0]->shape(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - num_slices_ = bottom[0]->count(0, slice_axis_); - slice_size_ = bottom[0]->count(slice_axis_ + 1); - int count = 0; + count_ = 0; + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); if (slice_point_.size() != 0) { CHECK_EQ(slice_point_.size(), top.size() - 1); - CHECK_LE(top.size(), bottom_slice_axis); + if (slice_dim_ == 0) { + CHECK_LE(top.size(), num_); + } else { + CHECK_LE(top.size(), channels_); + } int prev = 0; vector slices; for (int i = 0; i < slice_point_.size(); ++i) { @@ -50,64 +42,94 @@ void SliceLayer::Reshape(const vector*>& bottom, slices.push_back(slice_point_[i] - prev); prev = slice_point_[i]; } - slices.push_back(bottom_slice_axis - prev); - for (int i = 0; i < top.size(); ++i) { - top_shape[slice_axis_] = slices[i]; - top[i]->Reshape(top_shape); - count += top[i]->count(); + if (slice_dim_ == 0) { + slices.push_back(num_ - prev); + for (int i = 0; i < top.size(); ++i) { + top[i]->Reshape(slices[i], channels_, height_, width_,this->device_context_); + count_ += top[i]->count(); + } + } else { + slices.push_back(channels_ - prev); + for (int i = 0; i < top.size(); ++i) { + top[i]->Reshape(num_, slices[i], height_, width_,this->device_context_); + count_ += top[i]->count(); + } } } else { - CHECK_EQ(bottom_slice_axis % top.size(), 0) - << "Number of top blobs (" << top.size() << ") should evenly " - << "divide input slice axis (" << bottom_slice_axis << ")"; - top_shape[slice_axis_] = bottom_slice_axis / top.size(); + if (slice_dim_ == 0) { + CHECK_EQ(num_ % top.size(), 0) + << "Number of top blobs (" << top.size() << ") " + << "should evenly divide input num ( " << num_ << ")"; + num_ = num_ / top.size(); + } else { + CHECK_EQ(channels_ % top.size(), 0) + << "Number of top blobs (" << top.size() << ") " + << "should evenly divide input channels ( " << channels_ << ")"; + channels_ = channels_ / top.size(); + } for (int i = 0; i < top.size(); ++i) { - top[i]->Reshape(top_shape); - count += top[i]->count(); + top[i]->Reshape(num_, channels_, height_, width_,this->device_context_); + count_ += top[i]->count(); } } - CHECK_EQ(count, bottom[0]->count()); + CHECK_EQ(count_, bottom[0]->count()); } template void SliceLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - int offset_slice_axis = 0; - const Dtype* bottom_data = bottom[0]->cpu_data(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - for (int i = 0; i < top.size(); ++i) { - Dtype* top_data = top[i]->mutable_cpu_data(); - const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - bottom_data + bottom_offset, top_data + top_offset); + const Dtype* bottom_data = bottom[0]->mutable_cpu_data(); + if (slice_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + Dtype* top_data = blob->mutable_cpu_data(); + caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num), + top_data); + offset_num += blob->num(); } - offset_slice_axis += top_slice_axis; - } + } else if (slice_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + Dtype* top_data = blob->mutable_cpu_data(); + const int num_elem = blob->channels() * blob->height() * blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel), + top_data + blob->offset(n)); + } + offset_channel += blob->channels(); + } + } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. } template void SliceLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } - int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->cpu_diff(); - const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - top_diff + top_offset, bottom_diff + bottom_offset); + if (slice_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + const Dtype* top_diff = blob->cpu_diff(); + caffe_copy(blob->count(), top_diff, + bottom_diff + bottom[0]->offset(offset_num)); + offset_num += blob->num(); } - offset_slice_axis += top_slice_axis; - } + } else if (slice_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + const Dtype* top_diff = blob->cpu_diff(); + const int num_elem = blob->channels() * blob->height() * blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, top_diff + blob->offset(n), + bottom_diff + bottom[0]->offset(n, offset_channel)); + } + offset_channel += blob->channels(); + } + } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. } #ifdef CPU_ONLY diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index e6e65677bd8..b5c5e61533f 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -9,42 +9,58 @@ namespace caffe { template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - int offset_slice_axis = 0; - const Dtype* bottom_data = bottom[0]->gpu_data(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - for (int i = 0; i < top.size(); ++i) { - Dtype* top_data = top[i]->mutable_gpu_data(); - const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - bottom_data + bottom_offset, top_data + top_offset); + const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); + if (slice_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + Dtype* top_data = blob->mutable_gpu_data(); + caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num), + top_data); + offset_num += blob->num(); } - offset_slice_axis += top_slice_axis; - } + } else if (slice_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + Dtype* top_data = blob->mutable_gpu_data(); + const int num_elem = blob->channels() * blob->height() * blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel), + top_data + blob->offset(n)); + } + offset_channel += blob->channels(); + } + } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. } template void SliceLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } - int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->gpu_diff(); - const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - top_diff + top_offset, bottom_diff + bottom_offset); + if (slice_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + const Dtype* top_diff = blob->gpu_diff(); + caffe_copy(blob->count(), top_diff, + bottom_diff + bottom[0]->offset(offset_num)); + offset_num += blob->num(); + } + } else if (slice_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; + const Dtype* top_diff = blob->gpu_diff(); + const int num_elem = blob->channels() * blob->height() * blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, top_diff + blob->offset(n), + bottom_diff + bottom[0]->offset(n, offset_channel)); + } + offset_channel += blob->channels(); } - offset_slice_axis += top_slice_axis; - } + } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. } INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer); diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index 04712c9e653..4e01c76bc4e 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -10,18 +10,14 @@ namespace caffe { template void SoftmaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { - softmax_axis_ = - bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); - top[0]->ReshapeLike(*bottom[0]); - vector mult_dims(1, bottom[0]->shape(softmax_axis_)); - sum_multiplier_.Reshape(mult_dims); + top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width(),this->device_context_); + sum_multiplier_.Reshape(1, bottom[0]->channels(), 1, 1,this->device_context_); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); - caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); - outer_num_ = bottom[0]->count(0, softmax_axis_); - inner_num_ = bottom[0]->count(softmax_axis_ + 1); - vector scale_dims = bottom[0]->shape(); - scale_dims[softmax_axis_] = 1; - scale_.Reshape(scale_dims); + for (int i = 0; i < sum_multiplier_.count(); ++i) { + multiplier_data[i] = 1.; + } + scale_.Reshape(bottom[0]->num(), 1, bottom[0]->height(), bottom[0]->width(),this->device_context_); } template @@ -30,32 +26,34 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); - int channels = bottom[0]->shape(softmax_axis_); - int dim = bottom[0]->count() / outer_num_; + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + int dim = bottom[0]->count() / bottom[0]->num(); + int spatial_dim = bottom[0]->height() * bottom[0]->width(); caffe_copy(bottom[0]->count(), bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. - for (int i = 0; i < outer_num_; ++i) { + for (int i = 0; i < num; ++i) { // initialize scale_data to the first plane - caffe_copy(inner_num_, bottom_data + i * dim, scale_data); + caffe_copy(spatial_dim, bottom_data + i * dim, scale_data); for (int j = 0; j < channels; j++) { - for (int k = 0; k < inner_num_; k++) { + for (int k = 0; k < spatial_dim; k++) { scale_data[k] = std::max(scale_data[k], - bottom_data[i * dim + j * inner_num_ + k]); + bottom_data[i * dim + j * spatial_dim + k]); } } // subtraction - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, - 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, + 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data + i * dim); // exponentiation - caffe_exp(dim, top_data, top_data); + caffe_exp(dim, top_data + i * dim, top_data + i * dim); // sum after exp - caffe_cpu_gemv(CblasTrans, channels, inner_num_, 1., - top_data, sum_multiplier_.cpu_data(), 0., scale_data); + caffe_cpu_gemv(CblasTrans, channels, spatial_dim, 1., + top_data + i * dim, sum_multiplier_.cpu_data(), 0., scale_data); // division for (int j = 0; j < channels; j++) { - caffe_div(inner_num_, top_data, scale_data, top_data); - top_data += inner_num_; + caffe_div(spatial_dim, top_data + top[0]->offset(i, j), scale_data, + top_data + top[0]->offset(i, j)); } } } @@ -68,18 +66,20 @@ void SoftmaxLayer::Backward_cpu(const vector*>& top, const Dtype* top_data = top[0]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); Dtype* scale_data = scale_.mutable_cpu_data(); - int channels = top[0]->shape(softmax_axis_); - int dim = top[0]->count() / outer_num_; + int num = top[0]->num(); + int channels = top[0]->channels(); + int dim = top[0]->count() / top[0]->num(); + int spatial_dim = top[0]->height() * top[0]->width(); caffe_copy(top[0]->count(), top_diff, bottom_diff); - for (int i = 0; i < outer_num_; ++i) { + for (int i = 0; i < num; ++i) { // compute dot(top_diff, top_data) and subtract them from the bottom diff - for (int k = 0; k < inner_num_; ++k) { + for (int k = 0; k < spatial_dim; ++k) { scale_data[k] = caffe_cpu_strided_dot(channels, - bottom_diff + i * dim + k, inner_num_, - top_data + i * dim + k, inner_num_); + bottom_diff + i * dim + k, spatial_dim, + top_data + i * dim + k, spatial_dim); } // subtraction - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1, + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim); } // elementwise multiplication diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 1f9c3a41203..895977ea795 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -8,12 +8,19 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { -template +template __global__ void kernel_channel_max(const int num, const int channels, - const int spatial_dim, const Dtype* data, Dtype* out) { - CUDA_KERNEL_LOOP(index, num * spatial_dim) { + const int spatial_dim, const Dtype* data, + Dtype* out) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) + { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; @@ -24,28 +31,33 @@ __global__ void kernel_channel_max(const int num, const int channels, } } -template -__global__ void kernel_channel_subtract(const int count, - const int num, const int channels, - const int spatial_dim, const Dtype* channel_max, Dtype* data) { - CUDA_KERNEL_LOOP(index, count) { +template +__global__ void kernel_channel_subtract(const int count, const int num, + const int channels, + const int spatial_dim, + const Dtype* channel_max, Dtype* data) { + CUDA_KERNEL_LOOP(index, count) + { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } -template +template __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { - CUDA_KERNEL_LOOP(index, count) { + CUDA_KERNEL_LOOP(index, count) + { out[index] = exp(data[index]); } } -template +template __global__ void kernel_channel_sum(const int num, const int channels, - const int spatial_dim, const Dtype* data, Dtype* channel_sum) { - CUDA_KERNEL_LOOP(index, num * spatial_dim) { + const int spatial_dim, const Dtype* data, + Dtype* channel_sum) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) + { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; @@ -56,22 +68,24 @@ __global__ void kernel_channel_sum(const int num, const int channels, } } -template -__global__ void kernel_channel_div(const int count, - const int num, const int channels, - const int spatial_dim, const Dtype* channel_sum, Dtype* data) { - CUDA_KERNEL_LOOP(index, count) { +template +__global__ void kernel_channel_div(const int count, const int num, + const int channels, const int spatial_dim, + const Dtype* channel_sum, Dtype* data) { + CUDA_KERNEL_LOOP(index, count) + { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } -template +template __global__ void kernel_channel_dot(const int num, const int channels, - const int spatial_dim, const Dtype* data_1, const Dtype* data_2, - Dtype* channel_dot) { - CUDA_KERNEL_LOOP(index, num * spatial_dim) { + const int spatial_dim, const Dtype* data_1, + const Dtype* data_2, Dtype* channel_dot) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) + { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; @@ -83,61 +97,132 @@ __global__ void kernel_channel_dot(const int num, const int channels, } } -template +template void SoftmaxLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - Dtype* scale_data = scale_.mutable_gpu_data(); - int count = bottom[0]->count(); - int channels = top[0]->shape(softmax_axis_); - caffe_copy(count, bottom_data, top_data); - // We need to subtract the max to avoid numerical issues, compute the exp, - // and then normalize. - // compute max - // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_max<<>>(outer_num_, channels, inner_num_, top_data, + const vector*>& top) { + + if (this->device_context_.backend() == BACKEND_CUDA) { + // CUDA backend code + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + Dtype* scale_data = scale_.mutable_gpu_data(); + int count = bottom[0]->count(); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + int spatial_dim = bottom[0]->height() * bottom[0]->width(); + caffe_copy(count, bottom_data, top_data); + // We need to subtract the max to avoid numerical issues, compute the exp, + // and then normalize. + // compute max + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_max<<>>(num, channels, spatial_dim, top_data, scale_data); - // subtract - // NOLINT_NEXT_LINE(whitespace/operators) + // subtract + // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<<>>(count, outer_num_, channels, inner_num_, + CAFFE_CUDA_NUM_THREADS>>>(count, num, channels, spatial_dim, scale_data, top_data); - // exponentiate - // NOLINT_NEXT_LINE(whitespace/operators) - kernel_exp<<>>( - count, top_data, top_data); - // sum after exp - // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_sum<<>>(outer_num_, channels, inner_num_, top_data, + // exponentiate + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_exp<<>>(num * channels * spatial_dim, top_data, + top_data); + // sum after exp + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_sum<<>>(num, channels, spatial_dim, top_data, scale_data); - // divide - // NOLINT_NEXT_LINE(whitespace/operators) + // divide + // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<<>>(count, outer_num_, channels, inner_num_, + CAFFE_CUDA_NUM_THREADS>>>(count, num, channels, spatial_dim, scale_data, top_data); + +} else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + const cl_mem bottom_data = (cl_mem) (bottom[0]->gpu_data()); + cl_mem top_data = (cl_mem) (top[0]->mutable_gpu_data()); + cl_mem scale_data = (cl_mem) (scale_.mutable_gpu_data()); + int count = bottom[0]->count(); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + int spatial_dim = bottom[0]->height() * bottom[0]->width(); + + greentea_copy(count, bottom_data, top_data, ctx); + + viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_max")); + viennacl::ocl::enqueue( + oclk_channel_max(num, channels, spatial_dim, WrapVector(top_data), + WrapVector(scale_data)), + ctx.get_queue()); + ctx.get_queue().finish(); + + viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_subtract")); + viennacl::ocl::enqueue( + oclk_channel_subtract(count, num, channels, spatial_dim, + WrapVector(scale_data), + WrapVector(top_data)), + ctx.get_queue()); + ctx.get_queue().finish(); + + viennacl::ocl::kernel &oclk_exp = program.get_kernel( + CL_KERNEL_SELECT("kernel_exp")); + viennacl::ocl::enqueue( + oclk_exp(num * channels * spatial_dim, WrapVector(top_data), + WrapVector(top_data)), + ctx.get_queue()); + ctx.get_queue().finish(); + + viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_sum")); + viennacl::ocl::enqueue( + oclk_channel_sum(num, channels, spatial_dim, WrapVector(top_data), + WrapVector(scale_data)), + ctx.get_queue()); + ctx.get_queue().finish(); + + viennacl::ocl::kernel &oclk_channel_div = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_div")); + viennacl::ocl::enqueue( + oclk_channel_div(count, num, channels, spatial_dim, + WrapVector(scale_data), + WrapVector(top_data)), + ctx.get_queue()); + ctx.get_queue().finish(); + +#endif +} } -template +template void SoftmaxLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = top[0]->count(); - int channels = top[0]->shape(softmax_axis_); - caffe_copy(count, top_diff, bottom_diff); + int num = top[0]->num(); + int channels = top[0]->channels(); + int spatial_dim = top[0]->height() * top[0]->width(); + caffe_copy(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_dot<<>>(outer_num_, channels, inner_num_, - top_diff, top_data, scale_data); + kernel_channel_dot<<>>(num, channels, spatial_dim, top_diff, top_data, + scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<<>>(count, outer_num_, channels, inner_num_, + CAFFE_CUDA_NUM_THREADS>>>(count, num, channels, spatial_dim, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); @@ -145,5 +230,4 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); - } // namespace caffe diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index ba312f67fbc..0a8d2db071a 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -35,18 +35,9 @@ void SoftmaxWithLossLayer::Reshape( const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); - softmax_axis_ = - bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); - outer_num_ = bottom[0]->count(0, softmax_axis_); - inner_num_ = bottom[0]->count(softmax_axis_ + 1); - CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) - << "Number of labels must match number of predictions; " - << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " - << "label count (number of labels) must be N*H*W, " - << "with integer values in {0, 1, ..., C-1}."; if (top.size() >= 2) { // softmax output - top[1]->ReshapeLike(*bottom[0]); + top[1]->ReshapeLike(*bottom[0],this->device_context_); } } @@ -57,18 +48,20 @@ void SoftmaxWithLossLayer::Forward_cpu( softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); const Dtype* label = bottom[1]->cpu_data(); - int dim = prob_.count() / outer_num_; + int num = prob_.num(); + int dim = prob_.count() / num; + int spatial_dim = prob_.height() * prob_.width(); int count = 0; Dtype loss = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; j++) { - const int label_value = static_cast(label[i * inner_num_ + j]); + for (int i = 0; i < num; ++i) { + for (int j = 0; j < spatial_dim; j++) { + const int label_value = static_cast(label[i * spatial_dim + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } DCHECK_GE(label_value, 0); - DCHECK_LT(label_value, prob_.shape(softmax_axis_)); - loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j], + DCHECK_LT(label_value, prob_.channels()); + loss -= log(std::max(prob_data[i * dim + label_value * spatial_dim + j], Dtype(FLT_MIN))); ++count; } @@ -76,7 +69,7 @@ void SoftmaxWithLossLayer::Forward_cpu( if (normalize_) { top[0]->mutable_cpu_data()[0] = loss / count; } else { - top[0]->mutable_cpu_data()[0] = loss / outer_num_; + top[0]->mutable_cpu_data()[0] = loss / num; } if (top.size() == 2) { top[1]->ShareData(prob_); @@ -95,17 +88,19 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, const Dtype* prob_data = prob_.cpu_data(); caffe_copy(prob_.count(), prob_data, bottom_diff); const Dtype* label = bottom[1]->cpu_data(); - int dim = prob_.count() / outer_num_; + int num = prob_.num(); + int dim = prob_.count() / num; + int spatial_dim = prob_.height() * prob_.width(); int count = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; ++j) { - const int label_value = static_cast(label[i * inner_num_ + j]); + for (int i = 0; i < num; ++i) { + for (int j = 0; j < spatial_dim; ++j) { + const int label_value = static_cast(label[i * spatial_dim + j]); if (has_ignore_label_ && label_value == ignore_label_) { - for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { - bottom_diff[i * dim + c * inner_num_ + j] = 0; + for (int c = 0; c < bottom[0]->channels(); ++c) { + bottom_diff[i * dim + c * spatial_dim + j] = 0; } } else { - bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; + bottom_diff[i * dim + label_value * spatial_dim + j] -= 1; ++count; } } @@ -115,7 +110,7 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, if (normalize_) { caffe_scal(prob_.count(), loss_weight / count, bottom_diff); } else { - caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); + caffe_scal(prob_.count(), loss_weight / num, bottom_diff); } } } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 7e0f3da4552..dc13a0b7961 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -6,15 +6,23 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { -template +template __global__ void SoftmaxLossForwardGPU(const int nthreads, - const Dtype* prob_data, const Dtype* label, Dtype* loss, - const int num, const int dim, const int spatial_dim, - const bool has_ignore_label_, const int ignore_label_, - Dtype* counts) { - CUDA_KERNEL_LOOP(index, nthreads) { + const Dtype* prob_data, + const Dtype* label, Dtype* loss, + const int num, const int dim, + const int spatial_dim, + const bool has_ignore_label_, + const int ignore_label_, Dtype* counts) { + CUDA_KERNEL_LOOP(index, nthreads) + { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast(label[n * spatial_dim + s]); @@ -22,55 +30,67 @@ __global__ void SoftmaxLossForwardGPU(const int nthreads, loss[index] = 0; counts[index] = 0; } else { - loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], - Dtype(FLT_MIN))); + loss[index] = -log( + max(prob_data[n * dim + label_value * spatial_dim + s], + Dtype(FLT_MIN))); counts[index] = 1; } } } -template +template void SoftmaxWithLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); - const Dtype* prob_data = prob_.gpu_data(); - const Dtype* label = bottom[1]->gpu_data(); - const int dim = prob_.count() / outer_num_; - const int nthreads = outer_num_ * inner_num_; - // Since this memory is not used for anything until it is overwritten - // on the backward pass, we use it here to avoid having to allocate new GPU - // memory to accumulate intermediate results in the kernel. - Dtype* loss_data = bottom[0]->mutable_gpu_diff(); - // Similarly, this memory is never used elsewhere, and thus we can use it - // to avoid having to allocate additional GPU memory. - Dtype* counts = prob_.mutable_gpu_diff(); - // NOLINT_NEXT_LINE(whitespace/operators) - SoftmaxLossForwardGPU<<>>(nthreads, prob_data, label, loss_data, - outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); - Dtype loss; - caffe_gpu_asum(nthreads, loss_data, &loss); - if (normalize_) { - Dtype count; - caffe_gpu_asum(nthreads, counts, &count); - loss /= count; + + if (this->device_context_.backend() == BACKEND_CUDA) { + // CUDA backend code + const Dtype* prob_data = prob_.gpu_data(); + const Dtype* label = bottom[1]->gpu_data(); + const int num = prob_.num(); + const int dim = prob_.count() / num; + const int spatial_dim = prob_.height() * prob_.width(); + const int nthreads = num * spatial_dim; + // Since this memory is not used for anything until it is overwritten + // on the backward pass, we use it here to avoid having to allocate new GPU + // memory to accumulate intermediate results in the kernel. + Dtype* loss_data = bottom[0]->mutable_gpu_diff(); + // Similarly, this memory is never used elsewhere, and thus we can use it + // to avoid having to allocate additional GPU memory. + Dtype* counts = prob_.mutable_gpu_diff(); + // NOLINT_NEXT_LINE(whitespace/operators) + SoftmaxLossForwardGPU<<>>(nthreads, prob_data, label, loss_data, + num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts); + Dtype loss; + caffe_gpu_asum(nthreads, loss_data, &loss); + if (normalize_) { + Dtype count; + caffe_gpu_asum(nthreads, counts, &count); + loss /= count; + } else { + loss /= num; + } + top[0]->mutable_cpu_data()[0] = loss; + if (top.size() == 2) { + top[1]->ShareData(prob_); + } } else { - loss /= outer_num_; - } - top[0]->mutable_cpu_data()[0] = loss; - if (top.size() == 2) { - top[1]->ShareData(prob_); + } } -template +template __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, - const Dtype* label, Dtype* bottom_diff, const int num, const int dim, - const int spatial_dim, const bool has_ignore_label_, - const int ignore_label_, Dtype* counts) { + const Dtype* label, Dtype* bottom_diff, + const int num, const int dim, + const int spatial_dim, + const bool has_ignore_label_, + const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; - CUDA_KERNEL_LOOP(index, nthreads) { + CUDA_KERNEL_LOOP(index, nthreads) + { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast(label[n * spatial_dim + s]); @@ -87,12 +107,13 @@ __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, } } -template -void SoftmaxWithLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { +template +void SoftmaxWithLossLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[1]) { - LOG(FATAL) << this->type() - << " Layer cannot backpropagate to label inputs."; + LOG(FATAL)<< this->type() + << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); @@ -100,22 +121,24 @@ void SoftmaxWithLossLayer::Backward_gpu(const vector*>& top, const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); - const int dim = prob_.count() / outer_num_; - const int nthreads = outer_num_ * inner_num_; + const int num = prob_.num(); + const int dim = prob_.count() / num; + const int spatial_dim = prob_.height() * prob_.width(); + const int nthreads = num * spatial_dim; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<<>>(nthreads, top_data, label, bottom_diff, - outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); + CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, + num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { - caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); + caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } } } diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 272cb59cd37..e58fafb78af 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -18,7 +18,8 @@ void SplitLayer::Reshape(const vector*>& bottom, // some strange effects in practice...) CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " "allow in-place computation."; - top[i]->ReshapeLike(*bottom[0]); + top[i]->Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width(),this->device_context_); CHECK_EQ(count_, top[i]->count()); } } diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index c127d56bc46..62192792401 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -25,14 +25,14 @@ namespace caffe { -template +template WindowDataLayer::~WindowDataLayer() { this->JoinPrefetchThread(); } -template +template void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // LayerSetUp runs through the window_file and creates two structures // that hold windows: one for foreground (object) windows and one // for background (non-object) windows. We use an overlap threshold @@ -48,24 +48,24 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, // num_windows // class_index overlap x1 y1 x2 y2 - LOG(INFO) << "Window data layer:" << std::endl - << " foreground (object) overlap threshold: " - << this->layer_param_.window_data_param().fg_threshold() << std::endl - << " background (non-object) overlap threshold: " - << this->layer_param_.window_data_param().bg_threshold() << std::endl - << " foreground sampling fraction: " - << this->layer_param_.window_data_param().fg_fraction() << std::endl - << " cache_images: " - << this->layer_param_.window_data_param().cache_images() << std::endl - << " root_folder: " - << this->layer_param_.window_data_param().root_folder(); + LOG(INFO)<< "Window data layer:" << std::endl + << " foreground (object) overlap threshold: " + << this->layer_param_.window_data_param().fg_threshold() << std::endl + << " background (non-object) overlap threshold: " + << this->layer_param_.window_data_param().bg_threshold() << std::endl + << " foreground sampling fraction: " + << this->layer_param_.window_data_param().fg_fraction() << std::endl + << " cache_images: " + << this->layer_param_.window_data_param().cache_images() << std::endl + << " root_folder: " + << this->layer_param_.window_data_param().root_folder(); cache_images_ = this->layer_param_.window_data_param().cache_images(); string root_folder = this->layer_param_.window_data_param().root_folder(); const bool prefetch_needs_rand = - this->transform_param_.mirror() || - this->transform_param_.crop_size(); + this->transform_param_.mirror() || + this->transform_param_.crop_size(); if (prefetch_needs_rand) { const unsigned int prefetch_rng_seed = caffe_rng_rand(); prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); @@ -75,7 +75,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, std::ifstream infile(this->layer_param_.window_data_param().source().c_str()); CHECK(infile.good()) << "Failed to open window file " - << this->layer_param_.window_data_param().source() << std::endl; + << this->layer_param_.window_data_param().source() << std::endl; map label_hist; label_hist.insert(std::make_pair(0, 0)); @@ -109,9 +109,9 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, int num_windows; infile >> num_windows; const float fg_threshold = - this->layer_param_.window_data_param().fg_threshold(); + this->layer_param_.window_data_param().fg_threshold(); const float bg_threshold = - this->layer_param_.window_data_param().bg_threshold(); + this->layer_param_.window_data_param().bg_threshold(); for (int i = 0; i < num_windows; ++i) { int label, x1, y1, x2, y2; float overlap; @@ -144,62 +144,61 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, if (image_index % 100 == 0) { LOG(INFO) << "num: " << image_index << " " - << image_path << " " - << image_size[0] << " " - << image_size[1] << " " - << image_size[2] << " " - << "windows to process: " << num_windows; + << image_path << " " + << image_size[0] << " " + << image_size[1] << " " + << image_size[2] << " " + << "windows to process: " << num_windows; } - } while (infile >> hashtag >> image_index); + }while (infile >> hashtag >> image_index); LOG(INFO) << "Number of images: " << image_index+1; for (map::iterator it = label_hist.begin(); it != label_hist.end(); ++it) { LOG(INFO) << "class " << it->first << " has " << label_hist[it->first] - << " samples"; + << " samples"; } LOG(INFO) << "Amount of context padding: " - << this->layer_param_.window_data_param().context_pad(); + << this->layer_param_.window_data_param().context_pad(); LOG(INFO) << "Crop mode: " - << this->layer_param_.window_data_param().crop_mode(); + << this->layer_param_.window_data_param().crop_mode(); // image const int crop_size = this->transform_param_.crop_size(); CHECK_GT(crop_size, 0); const int batch_size = this->layer_param_.window_data_param().batch_size(); - top[0]->Reshape(batch_size, channels, crop_size, crop_size); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); + top[0]->Reshape(batch_size, channels, crop_size, crop_size,this->device_context_); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size,this->device_context_); LOG(INFO) << "output data size: " << top[0]->num() << "," - << top[0]->channels() << "," << top[0]->height() << "," - << top[0]->width(); + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); // label - vector label_shape(1, batch_size); - top[1]->Reshape(label_shape); - this->prefetch_label_.Reshape(label_shape); + top[1]->Reshape(batch_size, 1, 1, 1,this->device_context_); + this->prefetch_label_.Reshape(batch_size, 1, 1, 1,this->device_context_); // data mean has_mean_file_ = this->transform_param_.has_mean_file(); has_mean_values_ = this->transform_param_.mean_value_size() > 0; if (has_mean_file_) { const string& mean_file = - this->transform_param_.mean_file(); + this->transform_param_.mean_file(); LOG(INFO) << "Loading mean file from: " << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); - data_mean_.FromProto(blob_proto); + data_mean_.FromProto(blob_proto,this->device_context_); } if (has_mean_values_) { CHECK(has_mean_file_ == false) << - "Cannot specify mean_file and mean_value at the same time"; + "Cannot specify mean_file and mean_value at the same time"; for (int c = 0; c < this->transform_param_.mean_value_size(); ++c) { mean_values_.push_back(this->transform_param_.mean_value(c)); } CHECK(mean_values_.size() == 1 || mean_values_.size() == channels) << - "Specify either 1 mean_value or as many as channels: " << channels; + "Specify either 1 mean_value or as many as channels: " << channels; if (channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < channels; ++c) { @@ -209,16 +208,16 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, } } -template +template unsigned int WindowDataLayer::PrefetchRand() { CHECK(prefetch_rng_); - caffe::rng_t* prefetch_rng = - static_cast(prefetch_rng_->generator()); + caffe::rng_t* prefetch_rng = static_cast(prefetch_rng_ + ->generator()); return (*prefetch_rng)(); } // Thread fetching the data -template +template void WindowDataLayer::InternalThreadEntry() { // At each iteration, sample N windows where N*p are foreground (object) // windows and N*(1-p) are background (non-object) windows @@ -265,9 +264,10 @@ void WindowDataLayer::InternalThreadEntry() { // sample a window timer.Start(); const unsigned int rand_index = PrefetchRand(); - vector window = (is_fg) ? - fg_windows_[rand_index % fg_windows_.size()] : - bg_windows_[rand_index % bg_windows_.size()]; + vector window = + (is_fg) ? + fg_windows_[rand_index % fg_windows_.size()] : + bg_windows_[rand_index % bg_windows_.size()]; bool do_mirror = mirror && PrefetchRand() % 2; @@ -278,12 +278,12 @@ void WindowDataLayer::InternalThreadEntry() { cv::Mat cv_img; if (this->cache_images_) { pair image_cached = - image_database_cache_[window[WindowDataLayer::IMAGE_INDEX]]; + image_database_cache_[window[WindowDataLayer::IMAGE_INDEX]]; cv_img = DecodeDatumToCVMat(image_cached.second, true); } else { cv_img = cv::imread(image.first, CV_LOAD_IMAGE_COLOR); if (!cv_img.data) { - LOG(ERROR) << "Could not open or find file " << image.first; + LOG(ERROR)<< "Could not open or find file " << image.first; return; } } @@ -303,12 +303,12 @@ void WindowDataLayer::InternalThreadEntry() { // scale factor by which to expand the original region // such that after warping the expanded region to crop_size x crop_size // there's exactly context_pad amount of padding on each side - Dtype context_scale = static_cast(crop_size) / - static_cast(crop_size - 2*context_pad); + Dtype context_scale = static_cast(crop_size) + / static_cast(crop_size - 2 * context_pad); // compute the expanded region - Dtype half_height = static_cast(y2-y1+1)/2.0; - Dtype half_width = static_cast(x2-x1+1)/2.0; + Dtype half_height = static_cast(y2 - y1 + 1) / 2.0; + Dtype half_width = static_cast(x2 - x1 + 1) / 2.0; Dtype center_x = static_cast(x1) + half_width; Dtype center_y = static_cast(y1) + half_height; if (use_square) { @@ -318,16 +318,16 @@ void WindowDataLayer::InternalThreadEntry() { half_height = half_width; } } - x1 = static_cast(round(center_x - half_width*context_scale)); - x2 = static_cast(round(center_x + half_width*context_scale)); - y1 = static_cast(round(center_y - half_height*context_scale)); - y2 = static_cast(round(center_y + half_height*context_scale)); + x1 = static_cast(round(center_x - half_width * context_scale)); + x2 = static_cast(round(center_x + half_width * context_scale)); + y1 = static_cast(round(center_y - half_height * context_scale)); + y2 = static_cast(round(center_y + half_height * context_scale)); // the expanded region may go outside of the image // so we compute the clipped (expanded) region and keep track of // the extent beyond the image - int unclipped_height = y2-y1+1; - int unclipped_width = x2-x1+1; + int unclipped_height = y2 - y1 + 1; + int unclipped_width = x2 - x1 + 1; int pad_x1 = std::max(0, -x1); int pad_y1 = std::max(0, -y1); int pad_x2 = std::max(0, x2 - cv_img.cols + 1); @@ -342,25 +342,25 @@ void WindowDataLayer::InternalThreadEntry() { CHECK_LT(x2, cv_img.cols); CHECK_LT(y2, cv_img.rows); - int clipped_height = y2-y1+1; - int clipped_width = x2-x1+1; + int clipped_height = y2 - y1 + 1; + int clipped_width = x2 - x1 + 1; // scale factors that would be used to warp the unclipped // expanded region - Dtype scale_x = - static_cast(crop_size)/static_cast(unclipped_width); - Dtype scale_y = - static_cast(crop_size)/static_cast(unclipped_height); + Dtype scale_x = static_cast(crop_size) + / static_cast(unclipped_width); + Dtype scale_y = static_cast(crop_size) + / static_cast(unclipped_height); // size to warp the clipped expanded region to - cv_crop_size.width = - static_cast(round(static_cast(clipped_width)*scale_x)); - cv_crop_size.height = - static_cast(round(static_cast(clipped_height)*scale_y)); - pad_x1 = static_cast(round(static_cast(pad_x1)*scale_x)); - pad_x2 = static_cast(round(static_cast(pad_x2)*scale_x)); - pad_y1 = static_cast(round(static_cast(pad_y1)*scale_y)); - pad_y2 = static_cast(round(static_cast(pad_y2)*scale_y)); + cv_crop_size.width = static_cast(round( + static_cast(clipped_width) * scale_x)); + cv_crop_size.height = static_cast(round( + static_cast(clipped_height) * scale_y)); + pad_x1 = static_cast(round(static_cast(pad_x1) * scale_x)); + pad_x2 = static_cast(round(static_cast(pad_x2) * scale_x)); + pad_y1 = static_cast(round(static_cast(pad_y1) * scale_y)); + pad_y2 = static_cast(round(static_cast(pad_y2) * scale_y)); pad_h = pad_y1; // if we're mirroring, we mirror the padding too (to be pedantic) @@ -380,10 +380,10 @@ void WindowDataLayer::InternalThreadEntry() { } } - cv::Rect roi(x1, y1, x2-x1+1, y2-y1+1); + cv::Rect roi(x1, y1, x2 - x1 + 1, y2 - y1 + 1); cv::Mat cv_cropped_img = cv_img(roi); - cv::resize(cv_cropped_img, cv_cropped_img, - cv_crop_size, 0, 0, cv::INTER_LINEAR); + cv::resize(cv_cropped_img, cv_cropped_img, cv_crop_size, 0, 0, + cv::INTER_LINEAR); // horizontal flip at random if (do_mirror) { @@ -397,12 +397,12 @@ void WindowDataLayer::InternalThreadEntry() { for (int w = 0; w < cv_cropped_img.cols; ++w) { for (int c = 0; c < channels; ++c) { int top_index = ((item_id * channels + c) * crop_size + h + pad_h) - * crop_size + w + pad_w; + * crop_size + w + pad_w; // int top_index = (c * height + h) * width + w; Dtype pixel = static_cast(ptr[img_index++]); if (this->has_mean_file_) { int mean_index = (c * mean_height + h + mean_off + pad_h) - * mean_width + w + mean_off + pad_w; + * mean_width + w + mean_off + pad_w; top_data[top_index] = (pixel - mean[mean_index]) * scale; } else { if (this->has_mean_values_) { @@ -418,46 +418,46 @@ void WindowDataLayer::InternalThreadEntry() { // get window label top_label[item_id] = window[WindowDataLayer::LABEL]; - #if 0 +#if 0 // useful debugging code for dumping transformed windows to disk string file_id; std::stringstream ss; ss << PrefetchRand(); ss >> file_id; std::ofstream inf((string("dump/") + file_id + - string("_info.txt")).c_str(), std::ofstream::out); + string("_info.txt")).c_str(), std::ofstream::out); inf << image.first << std::endl - << window[WindowDataLayer::X1]+1 << std::endl - << window[WindowDataLayer::Y1]+1 << std::endl - << window[WindowDataLayer::X2]+1 << std::endl - << window[WindowDataLayer::Y2]+1 << std::endl - << do_mirror << std::endl - << top_label[item_id] << std::endl - << is_fg << std::endl; + << window[WindowDataLayer::X1]+1 << std::endl + << window[WindowDataLayer::Y1]+1 << std::endl + << window[WindowDataLayer::X2]+1 << std::endl + << window[WindowDataLayer::Y2]+1 << std::endl + << do_mirror << std::endl + << top_label[item_id] << std::endl + << is_fg << std::endl; inf.close(); std::ofstream top_data_file((string("dump/") + file_id + - string("_data.txt")).c_str(), + string("_data.txt")).c_str(), std::ofstream::out | std::ofstream::binary); for (int c = 0; c < channels; ++c) { for (int h = 0; h < crop_size; ++h) { for (int w = 0; w < crop_size; ++w) { top_data_file.write(reinterpret_cast( - &top_data[((item_id * channels + c) * crop_size + h) - * crop_size + w]), + &top_data[((item_id * channels + c) * crop_size + h) + * crop_size + w]), sizeof(Dtype)); } } } top_data_file.close(); - #endif +#endif item_id++; } } batch_timer.Stop(); - DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; - DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; - DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; + DLOG(INFO)<< "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO)<< " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO)<< "Transform time: " << trans_time / 1000 << " ms."; } INSTANTIATE_CLASS(WindowDataLayer); diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index fd00b122630..7f56865b2a3 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -18,12 +18,12 @@ namespace caffe { -template +template Net::Net(const NetParameter& param) { Init(param); } -template +template Net::Net(const string& param_file, Phase phase) { NetParameter param; ReadNetParamsFromTextFileOrDie(param_file, ¶m); @@ -31,7 +31,7 @@ Net::Net(const string& param_file, Phase phase) { Init(param); } -template +template void Net::Init(const NetParameter& in_param) { // Set phase from the state. phase_ = in_param.state().phase(); @@ -39,33 +39,24 @@ void Net::Init(const NetParameter& in_param) { // the current NetState. NetParameter filtered_param; FilterNet(in_param, &filtered_param); - LOG(INFO) << "Initializing net from parameters: " << std::endl - << filtered_param.DebugString(); + LOG(INFO)<< "Initializing net from parameters: " << std::endl + << filtered_param.DebugString(); // Create a copy of filtered_param with splits added where necessary. NetParameter param; InsertSplits(filtered_param, ¶m); - // Basically, build all the layers and set up their connections. + // Basically, build all the layers and set up its connections. name_ = param.name(); map blob_name_to_idx; set available_blobs; - CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0) - << "Must specify either input_shape OR deprecated input_dim, not both."; - if (param.input_dim_size() > 0) { - // Deprecated 4D dimensions. - CHECK_EQ(param.input_size() * 4, param.input_dim_size()) - << "Incorrect input blob dimension specifications."; - } else { - CHECK_EQ(param.input_size(), param.input_shape_size()) - << "Exactly one input_shape must be specified per input."; - } + CHECK_EQ(param.input_size() * 4, param.input_dim_size())<< "Incorrect input blob dimension specifications."; memory_used_ = 0; // set the input blobs for (int input_id = 0; input_id < param.input_size(); ++input_id) { const int layer_id = -1; // inputs have fake layer ID -1 AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); } - DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); - // For each layer, set up its input and output + DLOG(INFO)<< "Memory required for data: " << memory_used_ * sizeof(Dtype); + // For each layer, set up their input and output bottom_vecs_.resize(param.layer_size()); top_vecs_.resize(param.layer_size()); bottom_id_vecs_.resize(param.layer_size()); @@ -81,11 +72,11 @@ void Net::Init(const NetParameter& in_param) { const LayerParameter& layer_param = param.layer(layer_id); layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); - LOG(INFO) << "Creating Layer " << layer_param.name(); + LOG(INFO)<< "Creating Layer " << layer_param.name(); bool need_backward = false; // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); - ++bottom_id) { + ++bottom_id) { const int blob_id = AppendBottom(param, layer_id, bottom_id, &available_blobs, &blob_name_to_idx); // If a blob needs backward, this layer should provide it. @@ -100,8 +91,8 @@ void Net::Init(const NetParameter& in_param) { // ExactNumTopBlobs() or MinTopBlobs()), allocate them here. Layer* layer = layers_[layer_id].get(); if (layer->AutoTopBlobs()) { - const int needed_num_top = - std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs()); + const int needed_num_top = std::max(layer->MinTopBlobs(), + layer->ExactNumTopBlobs()); for (; num_top < needed_num_top; ++num_top) { // Add "anonymous" top blobs -- do not modify available_blobs or // blob_name_to_idx as we don't want these blobs to be usable as input @@ -110,28 +101,32 @@ void Net::Init(const NetParameter& in_param) { } } // After this layer is connected, set it up. - LOG(INFO) << "Setting up " << layer_names_[layer_id]; + LOG(INFO)<< "Setting up " << layer_names_[layer_id]; layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); } blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id); - LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string(); + LOG(INFO)<< "Top shape: " << top_vecs_[layer_id][top_id]->num() << " " + << top_vecs_[layer_id][top_id]->channels() << " " + << top_vecs_[layer_id][top_id]->height() << " " + << top_vecs_[layer_id][top_id]->width() << " (" + << top_vecs_[layer_id][top_id]->count() << ")"; if (layer->loss(top_id)) { - LOG(INFO) << " with loss weight " << layer->loss(top_id); + LOG(INFO)<< " with loss weight " << layer->loss(top_id); } memory_used_ += top_vecs_[layer_id][top_id]->count(); } - DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + DLOG(INFO)<< "Memory required for data: " << memory_used_ * sizeof(Dtype); const int param_size = layer_param.param_size(); const int num_param_blobs = layers_[layer_id]->blobs().size(); - CHECK_LE(param_size, num_param_blobs) - << "Too many params specified for layer " << layer_param.name(); + CHECK_LE(param_size, num_param_blobs)<< "Too many params specified for layer " << layer_param.name(); ParamSpec default_param_spec; for (int param_id = 0; param_id < num_param_blobs; ++param_id) { - const ParamSpec* param_spec = (param_id < param_size) ? - &layer_param.param(param_id) : &default_param_spec; + const ParamSpec* param_spec = + (param_id < param_size) ? + &layer_param.param(param_id) : &default_param_spec; const bool param_need_backward = param_spec->lr_mult() > 0; need_backward |= param_need_backward; layers_[layer_id]->set_param_propagate_down(param_id, @@ -148,6 +143,7 @@ void Net::Init(const NetParameter& in_param) { } } } + // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. @@ -156,21 +152,23 @@ void Net::Init(const NetParameter& in_param) { bool layer_contributes_loss = false; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; - if (layers_[layer_id]->loss(top_id) || - (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { + if (layers_[layer_id]->loss(top_id) + || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; break; } } - if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } + if (!layer_contributes_loss) { + layer_need_backward_[layer_id] = false; + } if (layer_need_backward_[layer_id]) { - LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; + LOG(INFO)<< layer_names_[layer_id] << " needs backward computation."; } else { LOG(INFO) << layer_names_[layer_id] - << " does not need backward computation."; + << " does not need backward computation."; } for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); - ++bottom_id) { + ++bottom_id) { if (layer_contributes_loss) { const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; @@ -185,16 +183,16 @@ void Net::Init(const NetParameter& in_param) { for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { layer_need_backward_[layer_id] = true; for (int bottom_id = 0; - bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) { + bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) { bottom_need_backward_[layer_id][bottom_id] = - bottom_need_backward_[layer_id][bottom_id] || - layers_[layer_id]->AllowForceBackward(bottom_id); + bottom_need_backward_[layer_id][bottom_id] + || layers_[layer_id]->AllowForceBackward(bottom_id); blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] = - blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] || - bottom_need_backward_[layer_id][bottom_id]; + blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] + || bottom_need_backward_[layer_id][bottom_id]; } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); - ++param_id) { + ++param_id) { layers_[layer_id]->set_param_propagate_down(param_id, true); } } @@ -202,7 +200,7 @@ void Net::Init(const NetParameter& in_param) { // In the end, all remaining blobs are considered output blobs. for (set::iterator it = available_blobs.begin(); it != available_blobs.end(); ++it) { - LOG(INFO) << "This network produces output " << *it; + LOG(INFO)<< "This network produces output " << *it; net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); net_output_blob_indices_.push_back(blob_name_to_idx[*it]); } @@ -214,13 +212,13 @@ void Net::Init(const NetParameter& in_param) { } GetLearningRateAndWeightDecay(); debug_info_ = param.debug_info(); - LOG(INFO) << "Network initialization done."; - LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + LOG(INFO)<< "Network initialization done."; + LOG(INFO)<< "Memory required for data: " << memory_used_ * sizeof(Dtype); } -template +template void Net::FilterNet(const NetParameter& param, - NetParameter* param_filtered) { + NetParameter* param_filtered) { NetState net_state(param.state()); param_filtered->CopyFrom(param); param_filtered->clear_layer(); @@ -228,7 +226,7 @@ void Net::FilterNet(const NetParameter& param, const LayerParameter& layer_param = param.layer(i); const string& layer_name = layer_param.name(); CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0) - << "Specify either include rules or exclude rules; not both."; + << "Specify either include rules or exclude rules; not both."; // If no include rules are specified, the layer is included by default and // only excluded if it meets one of the exclude rules. bool layer_included = (layer_param.include_size() == 0); @@ -248,24 +246,24 @@ void Net::FilterNet(const NetParameter& param, } } -template -bool Net::StateMeetsRule(const NetState& state, - const NetStateRule& rule, const string& layer_name) { +template +bool Net::StateMeetsRule(const NetState& state, const NetStateRule& rule, + const string& layer_name) { // Check whether the rule is broken due to phase. if (rule.has_phase()) { - if (rule.phase() != state.phase()) { - LOG(INFO) << "The NetState phase (" << state.phase() - << ") differed from the phase (" << rule.phase() - << ") specified by a rule in layer " << layer_name; - return false; - } + if (rule.phase() != state.phase()) { + LOG(INFO)<< "The NetState phase (" << state.phase() + << ") differed from the phase (" << rule.phase() + << ") specified by a rule in layer " << layer_name; + return false; + } } // Check whether the rule is broken due to min level. if (rule.has_min_level()) { if (state.level() < rule.min_level()) { LOG(INFO) << "The NetState level (" << state.level() - << ") is above the min_level (" << rule.min_level() - << ") specified by a rule in layer " << layer_name; + << ") is above the min_level (" << rule.min_level() + << ") specified by a rule in layer " << layer_name; return false; } } @@ -273,8 +271,8 @@ bool Net::StateMeetsRule(const NetState& state, if (rule.has_max_level()) { if (state.level() > rule.max_level()) { LOG(INFO) << "The NetState level (" << state.level() - << ") is above the max_level (" << rule.max_level() - << ") specified by a rule in layer " << layer_name; + << ") is above the max_level (" << rule.max_level() + << ") specified by a rule in layer " << layer_name; return false; } } @@ -284,11 +282,11 @@ bool Net::StateMeetsRule(const NetState& state, // Check that the NetState contains the rule's ith stage. bool has_stage = false; for (int j = 0; !has_stage && j < state.stage_size(); ++j) { - if (rule.stage(i) == state.stage(j)) { has_stage = true; } + if (rule.stage(i) == state.stage(j)) {has_stage = true;} } if (!has_stage) { LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) - << "' specified by a rule in layer " << layer_name; + << "' specified by a rule in layer " << layer_name; return false; } } @@ -298,11 +296,11 @@ bool Net::StateMeetsRule(const NetState& state, // Check that the NetState contains the rule's ith not_stage. bool has_stage = false; for (int j = 0; !has_stage && j < state.stage_size(); ++j) { - if (rule.not_stage(i) == state.stage(j)) { has_stage = true; } + if (rule.not_stage(i) == state.stage(j)) {has_stage = true;} } if (has_stage) { LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) - << "' specified by a rule in layer " << layer_name; + << "' specified by a rule in layer " << layer_name; return false; } } @@ -311,24 +309,26 @@ bool Net::StateMeetsRule(const NetState& state, // Helper for Net::Init: add a new input or top blob to the net. (Inputs have // layer_id == -1, tops have layer_id >= 0.) -template +template void Net::AppendTop(const NetParameter& param, const int layer_id, const int top_id, set* available_blobs, map* blob_name_to_idx) { - shared_ptr layer_param((layer_id >= 0) ? - (new LayerParameter(param.layer(layer_id))) : NULL); - const string& blob_name = layer_param ? - (layer_param->top_size() > top_id ? - layer_param->top(top_id) : "(automatic)") : param.input(top_id); + shared_ptr layer_param( + (layer_id >= 0) ? (new LayerParameter(param.layer(layer_id))) : NULL); + const string& blob_name = + layer_param ? + (layer_param->top_size() > top_id ? + layer_param->top(top_id) : "(automatic)") : + param.input(top_id); // Check if we are doing in-place computation - if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id && - blob_name == layer_param->bottom(top_id)) { + if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id + && blob_name == layer_param->bottom(top_id)) { // In-place computation - LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; + LOG(INFO)<< layer_param->name() << " -> " << blob_name << " (in-place)"; top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); } else if (blob_name_to_idx && - blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { + blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { // If we are not doing in-place computation but have duplicated blobs, // raise an error. LOG(FATAL) << "Duplicate blobs produced by multiple sources."; @@ -344,17 +344,13 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(false); - if (blob_name_to_idx) { (*blob_name_to_idx)[blob_name] = blob_id; } + if (blob_name_to_idx) {(*blob_name_to_idx)[blob_name] = blob_id;} if (layer_id == -1) { // Set the (explicitly specified) dimensions of the input blob. - if (param.input_dim_size() > 0) { - blob_pointer->Reshape(param.input_dim(top_id * 4), - param.input_dim(top_id * 4 + 1), - param.input_dim(top_id * 4 + 2), - param.input_dim(top_id * 4 + 3)); - } else { - blob_pointer->Reshape(param.input_shape(top_id)); - } + blob_pointer->Reshape(param.input_dim(top_id * 4), + param.input_dim(top_id * 4 + 1), + param.input_dim(top_id * 4 + 2), + param.input_dim(top_id * 4 + 3), Caffe::GetDeviceContext(layer_param->device())); net_input_blob_indices_.push_back(blob_id); net_input_blobs_.push_back(blob_pointer.get()); } else { @@ -362,22 +358,24 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, top_vecs_[layer_id].push_back(blob_pointer.get()); } } - if (available_blobs) { available_blobs->insert(blob_name); } + if (available_blobs) { + available_blobs->insert(blob_name); + } } // Helper for Net::Init: add a new bottom blob to the net. -template -int Net::AppendBottom(const NetParameter& param, - const int layer_id, const int bottom_id, - set* available_blobs, map* blob_name_to_idx) { +template +int Net::AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { - LOG(FATAL) << "Unknown blob input " << blob_name - << " (at index " << bottom_id << ") to layer " << layer_id; + LOG(FATAL)<< "Unknown blob input " << blob_name + << " (at index " << bottom_id << ") to layer " << layer_id; } const int blob_id = (*blob_name_to_idx)[blob_name]; - LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; + LOG(INFO)<< layer_names_[layer_id] << " <- " << blob_name; bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); @@ -386,7 +384,7 @@ int Net::AppendBottom(const NetParameter& param, return blob_id; } -template +template void Net::AppendParam(const NetParameter& param, const int layer_id, const int param_id) { const LayerParameter& layer_param = layers_[layer_id]->layer_param(); @@ -404,8 +402,9 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, params_.push_back(layers_[layer_id]->blobs()[param_id]); param_id_vecs_[layer_id].push_back(net_param_id); param_layer_indices_.push_back(make_pair(layer_id, param_id)); - if (!param_size || !param_name.size() || (param_name.size() && - param_names_index_.find(param_name) == param_names_index_.end())) { + if (!param_size || !param_name.size() + || (param_name.size() + && param_names_index_.find(param_name) == param_names_index_.end())) { // This layer "owns" this parameter blob -- it is either anonymous // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. @@ -417,48 +416,54 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, // Named param blob with name we've seen before: share params const int owner_net_param_id = param_names_index_[param_name]; param_owners_.push_back(owner_net_param_id); - const pair& owner_index = - param_layer_indices_[owner_net_param_id]; + const pair& owner_index = param_layer_indices_[owner_net_param_id]; const int owner_layer_id = owner_index.first; const int owner_param_id = owner_index.second; - LOG(INFO) << "Sharing parameters '" << param_name << "' owned by " - << "layer '" << layer_names_[owner_layer_id] << "', param " - << "index " << owner_param_id; + LOG(INFO)<< "Sharing parameters '" << param_name << "' owned by " + << "layer '" << layer_names_[owner_layer_id] << "', param " + << "index " << owner_param_id; Blob* this_blob = layers_[layer_id]->blobs()[param_id].get(); - Blob* owner_blob = - layers_[owner_layer_id]->blobs()[owner_param_id].get(); + Blob* owner_blob = layers_[owner_layer_id]->blobs()[owner_param_id] + .get(); const int param_size = layer_param.param_size(); - if (param_size > param_id && (layer_param.param(param_id).share_mode() == - ParamSpec_DimCheckMode_PERMISSIVE)) { + if (param_size > param_id + && (layer_param.param(param_id).share_mode() + == ParamSpec_DimCheckMode_PERMISSIVE)) { // Permissive dimension checking -- only check counts are the same. - CHECK_EQ(this_blob->count(), owner_blob->count()) - << "Shared parameter blobs must have the same count."; + CHECK_EQ(this_blob->count(), owner_blob->count())<< "Shared parameter blobs must have the same count."; } else { // Strict dimension checking -- all dims must be the same. - CHECK(this_blob->shape() == owner_blob->shape()); + CHECK_EQ(this_blob->num(), owner_blob->num()) + << "Shared parameter blobs must have the same num."; + CHECK_EQ(this_blob->channels(), owner_blob->channels()) + << "Shared parameter blobs must have the same channels."; + CHECK_EQ(this_blob->height(), owner_blob->height()) + << "Shared parameter blobs must have the same height."; + CHECK_EQ(this_blob->width(), owner_blob->width()) + << "Shared parameter blobs must have the same width."; } layers_[layer_id]->blobs()[param_id]->ShareData( *layers_[owner_layer_id]->blobs()[owner_param_id]); } } -template +template void Net::GetLearningRateAndWeightDecay() { - LOG(INFO) << "Collecting Learning Rate and Weight Decay."; + LOG(INFO)<< "Collecting Learning Rate and Weight Decay."; ParamSpec default_param_spec; for (int i = 0; i < layers_.size(); ++i) { vector > >& layer_blobs = layers_[i]->blobs(); for (int j = 0; j < layer_blobs.size(); ++j) { const ParamSpec* param_spec = - (layers_[i]->layer_param().param_size() > j) ? - &layers_[i]->layer_param().param(j) : &default_param_spec; + (layers_[i]->layer_param().param_size() > j) ? + &layers_[i]->layer_param().param(j) : &default_param_spec; params_lr_.push_back(param_spec->lr_mult()); params_weight_decay_.push_back(param_spec->decay_mult()); } } } -template +template Dtype Net::ForwardFromTo(int start, int end) { CHECK_GE(start, 0); CHECK_LT(end, layers_.size()); @@ -473,22 +478,24 @@ Dtype Net::ForwardFromTo(int start, int end) { layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; - if (debug_info_) { ForwardDebugInfo(i); } + if (debug_info_) { + ForwardDebugInfo(i); + } } return loss; } -template +template Dtype Net::ForwardFrom(int start) { return ForwardFromTo(start, layers_.size() - 1); } -template +template Dtype Net::ForwardTo(int end) { return ForwardFromTo(0, end); } -template +template const vector*>& Net::ForwardPrefilled(Dtype* loss) { if (loss != NULL) { *loss = ForwardFromTo(0, layers_.size() - 1); @@ -498,25 +505,24 @@ const vector*>& Net::ForwardPrefilled(Dtype* loss) { return net_output_blobs_; } -template +template const vector*>& Net::Forward( const vector*> & bottom, Dtype* loss) { // Copy bottom to internal bottom for (int i = 0; i < bottom.size(); ++i) { - net_input_blobs_[i]->CopyFrom(*bottom[i]); + net_input_blobs_[i]->CopyFrom(*bottom[i], bottom[i]->device_context()); } return ForwardPrefilled(loss); } -template +template string Net::Forward(const string& input_blob_protos, Dtype* loss) { BlobProtoVector blob_proto_vec; if (net_input_blobs_.size()) { blob_proto_vec.ParseFromString(input_blob_protos); - CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size()) - << "Incorrect input size."; + CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size())<< "Incorrect input size."; for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { - net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); + net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i), net_input_blobs_[i]->device_context()); } } ForwardPrefilled(loss); @@ -529,74 +535,80 @@ string Net::Forward(const string& input_blob_protos, Dtype* loss) { return output; } -template +template void Net::BackwardFromTo(int start, int end) { CHECK_GE(end, 0); CHECK_LT(start, layers_.size()); for (int i = start; i >= end; --i) { if (layer_need_backward_[i]) { - layers_[i]->Backward( - top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); - if (debug_info_) { BackwardDebugInfo(i); } + layers_[i]->Backward(top_vecs_[i], bottom_need_backward_[i], + bottom_vecs_[i]); + if (debug_info_) { + BackwardDebugInfo(i); + } } } } -template +template void Net::InputDebugInfo(const int input_id) { const Blob& blob = *net_input_blobs_[input_id]; const string& blob_name = blob_names_[net_input_blob_indices_[input_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Input " << blob_name << " data: " << data_abs_val_mean; + LOG(INFO)<< " [Forward] " + << "Input " << blob_name << " data: " << data_abs_val_mean; } -template +template void Net::ForwardDebugInfo(const int layer_id) { for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const Blob& blob = *top_vecs_[layer_id][top_id]; const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] << ", top blob " << blob_name - << " data: " << data_abs_val_mean; + LOG(INFO)<< " [Forward] " + << "Layer " << layer_names_[layer_id] << ", top blob " << blob_name + << " data: " << data_abs_val_mean; } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); - ++param_id) { + ++param_id) { const Blob& blob = *layers_[layer_id]->blobs()[param_id]; const int net_param_id = param_id_vecs_[layer_id][param_id]; const string& blob_name = param_display_names_[net_param_id]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] << ", param blob " << blob_name - << " data: " << data_abs_val_mean; + << "Layer " << layer_names_[layer_id] << ", param blob " << blob_name + << " data: " << data_abs_val_mean; } } -template +template void Net::BackwardDebugInfo(const int layer_id) { const vector*>& bottom_vec = bottom_vecs_[layer_id]; for (int bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) { - if (!bottom_need_backward_[layer_id][bottom_id]) { continue; } + if (!bottom_need_backward_[layer_id][bottom_id]) { + continue; + } const Blob& blob = *bottom_vec[bottom_id]; const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] << ", bottom blob " << blob_name - << " diff: " << diff_abs_val_mean; + LOG(INFO)<< " [Backward] " + << "Layer " << layer_names_[layer_id] << ", bottom blob " << blob_name + << " diff: " << diff_abs_val_mean; } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); - ++param_id) { - if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; } + ++param_id) { + if (!layers_[layer_id]->param_propagate_down(param_id)) { + continue; + } const Blob& blob = *layers_[layer_id]->blobs()[param_id]; const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] << ", param blob " << param_id - << " diff: " << diff_abs_val_mean; + LOG(INFO)<< " [Backward] " + << "Layer " << layer_names_[layer_id] << ", param blob " << param_id + << " diff: " << diff_abs_val_mean; } } -template +template void Net::UpdateDebugInfo(const int param_id) { const Blob& blob = *params_[param_id]; const int param_owner = param_owners_[param_id]; @@ -605,65 +617,69 @@ void Net::UpdateDebugInfo(const int param_id) { const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); if (param_owner < 0) { const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Update] Layer " << layer_name - << ", param " << param_display_name - << " data: " << data_abs_val_mean << "; diff: " << diff_abs_val_mean; + LOG(INFO)<< " [Update] Layer " << layer_name + << ", param " << param_display_name + << " data: " << data_abs_val_mean << "; diff: " << diff_abs_val_mean; } else { const string& owner_layer_name = - layer_names_[param_layer_indices_[param_owner].first]; + layer_names_[param_layer_indices_[param_owner].first]; LOG(INFO) << " [Update] Layer " << layer_name - << ", param blob " << param_display_name - << " (owned by layer " << owner_layer_name << ", " - << "param " << param_display_names_[param_owners_[param_id]] << ")" - << " diff: " << diff_abs_val_mean; + << ", param blob " << param_display_name + << " (owned by layer " << owner_layer_name << ", " + << "param " << param_display_names_[param_owners_[param_id]] << ")" + << " diff: " << diff_abs_val_mean; } } -template +template void Net::ShareTrainedLayersWith(const Net* other) { int num_source_layers = other->layers().size(); for (int i = 0; i < num_source_layers; ++i) { Layer* source_layer = other->layers()[i].get(); const string& source_layer_name = other->layer_names()[i]; int target_layer_id = 0; - while (target_layer_id != layer_names_.size() && - layer_names_[target_layer_id] != source_layer_name) { + while (target_layer_id != layer_names_.size() + && layer_names_[target_layer_id] != source_layer_name) { ++target_layer_id; } if (target_layer_id == layer_names_.size()) { - DLOG(INFO) << "Ignoring source layer " << source_layer_name; + DLOG(INFO)<< "Ignoring source layer " << source_layer_name; continue; } - DLOG(INFO) << "Copying source layer " << source_layer_name; - vector > >& target_blobs = - layers_[target_layer_id]->blobs(); - CHECK_EQ(target_blobs.size(), source_layer->blobs().size()) - << "Incompatible number of blobs for layer " << source_layer_name; + DLOG(INFO)<< "Copying source layer " << source_layer_name; + vector > >& target_blobs = layers_[target_layer_id] + ->blobs(); + CHECK_EQ(target_blobs.size(), source_layer->blobs().size())<< "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { Blob* source_blob = source_layer->blobs()[j].get(); - CHECK(target_blobs[j]->shape() == source_blob->shape()); + CHECK_EQ(target_blobs[j]->num(), source_blob->num()); + CHECK_EQ(target_blobs[j]->channels(), source_blob->channels()); + CHECK_EQ(target_blobs[j]->height(), source_blob->height()); + CHECK_EQ(target_blobs[j]->width(), source_blob->width()); target_blobs[j]->ShareData(*source_blob); } } } -template +template void Net::BackwardFrom(int start) { BackwardFromTo(start, 0); } -template +template void Net::BackwardTo(int end) { BackwardFromTo(layers_.size() - 1, end); } -template +template void Net::Backward() { BackwardFromTo(layers_.size() - 1, 0); if (debug_info_) { Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] >= 0) { continue; } + if (param_owners_[i] >= 0) { + continue; + } asum_data += params_[i]->asum_data(); asum_diff += params_[i]->asum_diff(); sumsq_data += params_[i]->sumsq_data(); @@ -671,54 +687,56 @@ void Net::Backward() { } const Dtype l2norm_data = std::sqrt(sumsq_data); const Dtype l2norm_diff = std::sqrt(sumsq_diff); - LOG(ERROR) << " [Backward] All net params (data, diff): " - << "L1 norm = (" << asum_data << ", " << asum_diff << "); " - << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; + LOG(ERROR)<< " [Backward] All net params (data, diff): " + << "L1 norm = (" << asum_data << ", " << asum_diff << "); " + << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; } } -template +template void Net::Reshape() { for (int i = 0; i < layers_.size(); ++i) { layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); } } -template +template void Net::CopyTrainedLayersFrom(const NetParameter& param) { int num_source_layers = param.layer_size(); for (int i = 0; i < num_source_layers; ++i) { const LayerParameter& source_layer = param.layer(i); const string& source_layer_name = source_layer.name(); int target_layer_id = 0; - while (target_layer_id != layer_names_.size() && - layer_names_[target_layer_id] != source_layer_name) { + while (target_layer_id != layer_names_.size() + && layer_names_[target_layer_id] != source_layer_name) { ++target_layer_id; } if (target_layer_id == layer_names_.size()) { - DLOG(INFO) << "Ignoring source layer " << source_layer_name; + DLOG(INFO)<< "Ignoring source layer " << source_layer_name; continue; } - DLOG(INFO) << "Copying source layer " << source_layer_name; - vector > >& target_blobs = - layers_[target_layer_id]->blobs(); - CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) - << "Incompatible number of blobs for layer " << source_layer_name; + DLOG(INFO)<< "Copying source layer " << source_layer_name; + vector > >& target_blobs = layers_[target_layer_id] + ->blobs(); + CHECK_EQ(target_blobs.size(), source_layer.blobs_size())<< "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { - const bool kReshape = false; - target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); + CHECK_EQ(target_blobs[j]->num(), source_layer.blobs(j).num()); + CHECK_EQ(target_blobs[j]->channels(), source_layer.blobs(j).channels()); + CHECK_EQ(target_blobs[j]->height(), source_layer.blobs(j).height()); + CHECK_EQ(target_blobs[j]->width(), source_layer.blobs(j).width()); + target_blobs[j]->FromProto(source_layer.blobs(j), layers_[target_layer_id]->device_context()); } } } -template +template void Net::CopyTrainedLayersFrom(const string trained_filename) { NetParameter param; ReadNetParamsFromBinaryFileOrDie(trained_filename, ¶m); CopyTrainedLayersFrom(param); } -template +template void Net::ToProto(NetParameter* param, bool write_diff) const { param->Clear(); param->set_name(name_); @@ -726,7 +744,7 @@ void Net::ToProto(NetParameter* param, bool write_diff) const { for (int i = 0; i < net_input_blob_indices_.size(); ++i) { param->add_input(blob_names_[net_input_blob_indices_[i]]); } - DLOG(INFO) << "Serializing " << layers_.size() << " layers"; + DLOG(INFO)<< "Serializing " << layers_.size() << " layers"; for (int i = 0; i < layers_.size(); ++i) { LayerParameter* layer_param = param->add_layer(); for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) { @@ -739,76 +757,80 @@ void Net::ToProto(NetParameter* param, bool write_diff) const { } } -template +template void Net::Update() { // First, accumulate the diffs of any shared parameters into their owner's // diff. (Assumes that the learning rate, weight decay, etc. have already been // accounted for in the current diff.) for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] < 0) { continue; } - if (debug_info_) { UpdateDebugInfo(i); } + if (param_owners_[i] < 0) { + continue; + } + if (debug_info_) { + UpdateDebugInfo(i); + } const int count = params_[i]->count(); const Dtype* this_diff; Dtype* owner_diff; switch (Caffe::mode()) { - case Caffe::CPU: - this_diff = params_[i]->cpu_diff(); - owner_diff = params_[param_owners_[i]]->mutable_cpu_diff(); - caffe_add(count, this_diff, owner_diff, owner_diff); - break; + case Caffe::CPU: + this_diff = params_[i]->cpu_diff(); + owner_diff = params_[param_owners_[i]]->mutable_cpu_diff(); + caffe_add(count, this_diff, owner_diff, owner_diff); + break; #ifndef CPU_ONLY - case Caffe::GPU: - this_diff = params_[i]->gpu_diff(); - owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); - caffe_gpu_add(count, this_diff, owner_diff, owner_diff); - break; + case Caffe::GPU: + this_diff = params_[i]->gpu_diff(); + owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); + caffe_gpu_add(count, this_diff, owner_diff, owner_diff); + break; #else - NO_GPU; + NO_GPU; #endif - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + default: + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } + } + // Now, update the owned parameters. + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] >= 0) {continue;} + if (debug_info_) {UpdateDebugInfo(i);} + params_[i]->Update(); } } - // Now, update the owned parameters. - for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] >= 0) { continue; } - if (debug_info_) { UpdateDebugInfo(i); } - params_[i]->Update(); - } -} -template +template bool Net::has_blob(const string& blob_name) const { return blob_names_index_.find(blob_name) != blob_names_index_.end(); } -template +template const shared_ptr > Net::blob_by_name( const string& blob_name) const { shared_ptr > blob_ptr; if (has_blob(blob_name)) { blob_ptr = blobs_[blob_names_index_.find(blob_name)->second]; } else { - blob_ptr.reset((Blob*)(NULL)); - LOG(WARNING) << "Unknown blob name " << blob_name; + blob_ptr.reset((Blob*) (NULL)); + LOG(WARNING)<< "Unknown blob name " << blob_name; } return blob_ptr; } -template +template bool Net::has_layer(const string& layer_name) const { return layer_names_index_.find(layer_name) != layer_names_index_.end(); } -template +template const shared_ptr > Net::layer_by_name( const string& layer_name) const { shared_ptr > layer_ptr; if (has_layer(layer_name)) { layer_ptr = layers_[layer_names_index_.find(layer_name)->second]; } else { - layer_ptr.reset((Layer*)(NULL)); - LOG(WARNING) << "Unknown layer name " << layer_name; + layer_ptr.reset((Layer*) (NULL)); + LOG(WARNING)<< "Unknown layer name " << layer_name; } return layer_ptr; } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 5b21cf20028..b36e9491880 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -2,21 +2,13 @@ syntax = "proto2"; package caffe; -// Specifies the shape (dimensions) of a Blob. -message BlobShape { - repeated int64 dim = 1 [packed = true]; -} - message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; - - // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; optional int32 channels = 2 [default = 0]; optional int32 height = 3 [default = 0]; optional int32 width = 4 [default = 0]; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; } // The BlobProtoVector is simply a way to pass multiple blobproto instances @@ -55,15 +47,10 @@ message NetParameter { optional string name = 1; // consider giving the network a name // The input blobs to the network. repeated string input = 3; - // The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "shape" instead. - // If specified, for each input blob there should be four + // The dim of the input blobs. For each input blob there should be four // values specifying the num, channels, height and width of the input blob. // Thus, there should be a total of (4 * #input) numbers. repeated int32 input_dim = 4; - // Whether the network will force every layer to carry out backward operation. // If set False, then whether to carry out backward is determined // automatically according to the net structure and learning rates. @@ -259,7 +246,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) +// LayerParameter next available layer-specific ID: 131 (last added: python_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -289,6 +276,33 @@ message LayerParameter { // included/excluded. repeated NetStateRule include = 8; repeated NetStateRule exclude = 9; + + // The optional random transformation variables for the data transformation layer + // Note: parameters 70 to 90 inserted by Fabian Tschopp for datarandtransform_layer.cpp + optional bool apply_normalization = 70 [ default = false ]; + + optional bool apply_mirroring = 80 [ default = false ]; + optional float prob_mirroring = 81 [ default = 0.5 ]; + + optional bool apply_rot = 71 [ default = false ]; + optional float rot_min = 72 [ default = -10.0 ]; + optional float rot_max = 73 [ default = 10.0 ]; + + optional bool apply_blur = 74 [ default = false ]; + optional uint32 blur_size = 75 [ default = 7 ]; + optional float blur_max_var = 76 [ default = 1.5 ]; + + optional bool apply_contrast_brightness = 77 [ default = false ]; + optional float alpha_c = 78 [ default = 1.3 ]; + optional float beta_c = 79 [ default = 0.1 ]; + + optional float temp = 90 [default = 1]; + + // Parameters for Greentea + optional int32 device = 95 [default = -1]; + // Parameters for Splitnet + optional int32 buffer = 96 [default = -1]; + // Parameters for data pre-processing. optional TransformationParameter transform_param = 100; @@ -323,7 +337,6 @@ message LayerParameter { optional MVNParameter mvn_param = 120; optional PoolingParameter pooling_param = 121; optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; optional ReLUParameter relu_param = 123; optional SigmoidParameter sigmoid_param = 124; @@ -368,16 +381,6 @@ message AccuracyParameter { // the top k scoring classes. By default, only compare to the top scoring // class (i.e. argmax). optional uint32 top_k = 1 [default = 1]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; } // Message that stores parameters used by ArgMaxLayer @@ -389,13 +392,9 @@ message ArgMaxParameter { // Message that stores parameters used by ConcatLayer message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [default = 1]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. + // Concat Layer needs to specify the dimension along the concat will happen, + // the other dimensions must be the same for all the bottom blobs + // By default it will concatenate blobs along channels dimension optional uint32 concat_dim = 1 [default = 1]; } @@ -429,6 +428,9 @@ message ConvolutionParameter { CUDNN = 2; } optional Engine engine = 15 [default = DEFAULT]; + optional uint32 kstride = 16 [default = 0]; + optional uint32 kstride_h = 17 [default = 0]; + optional uint32 kstride_w = 18 [default = 0]; } // Message that stores parameters used by DataLayer @@ -472,15 +474,13 @@ message DropoutParameter { // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N - // shape fields, and 0, 1 or N data_fillers. + // num, N channels, N height, and N width fields, and must specify 0, 1 or N + // data_fillers. // // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. // If 1 data_filler is specified, it is applied to all top blobs. If N are // specified, the ith is applied to the ith top blob. repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. repeated uint32 num = 2; repeated uint32 channels = 3; repeated uint32 height = 4; @@ -518,13 +518,6 @@ message HDF5DataParameter { optional string source = 1; // Specify the batch size. optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [default = false]; } // Message that stores parameters used by HDF5OutputLayer @@ -585,11 +578,6 @@ message InnerProductParameter { optional bool bias_term = 2 [default = true]; // whether to have bias terms optional FillerParameter weight_filler = 3; // The filler for the weight optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; } // Message that stores parameters used by LRNLayer @@ -650,6 +638,9 @@ message PoolingParameter { // If global_pooling then it will pool over the size of the bottom by doing // kernel_h = bottom->height and kernel_w = bottom->width optional bool global_pooling = 12 [default = false]; + optional uint32 kstride = 13 [default = 0]; + optional uint32 kstride_h = 14 [default = 0]; + optional uint32 kstride_w = 15 [default = 0]; } // Message that stores parameters used by PowerLayer @@ -694,14 +685,12 @@ message SigmoidParameter { // Message that stores parameters used by SliceLayer message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. + // SliceLayer needs to know which dimension to slice across. + // Currently, SliceLayer only supports slicing across num (dim 0) + // and channels (dim 1). + // By default, SliceLayer slices across channels. optional uint32 slice_dim = 1 [default = 1]; + repeated uint32 slice_point = 2; } // Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer @@ -712,11 +701,6 @@ message SoftmaxParameter { CUDNN = 2; } optional Engine engine = 1 [default = DEFAULT]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; } // Message that stores parameters used by TanHLayer @@ -816,6 +800,9 @@ message V1LayerParameter { TANH = 23; WINDOW_DATA = 24; THRESHOLD = 31; + DATA_RAND_TRANSFORM = 70; + CONVOLUTION_SK = 71; + POOLING_SK = 72; } optional LayerType type = 5; repeated BlobProto blobs = 6; @@ -951,17 +938,28 @@ message V0LayerParameter { // the other dimensions must be the same for all the bottom blobs. // By default it will concatenate blobs along the channels dimension. optional uint32 concat_dim = 65 [default = 1]; + + // The optional random transformation variables for the data transformation layer + // Note: parameters 70 to 90 inserted by Fabian Tschopp for datarandtransform_layer.cpp + optional bool apply_normalization = 70 [ default = false ]; + + optional bool apply_mirroring = 80 [ default = false ]; + optional float prob_mirroring = 81 [ default = 0.5 ]; + + optional bool apply_rot = 71 [ default = false ]; + optional float rot_min = 72 [ default = -10.0 ]; + optional float rot_max = 73 [ default = 10.0 ]; + + optional bool apply_blur = 74 [ default = false ]; + optional uint32 blur_size = 75 [ default = 7 ]; + optional float blur_max_var = 76 [ default = 1.5 ]; + + optional bool apply_contrast_brightness = 77 [ default = false ]; + optional float alpha_c = 78 [ default = 1.3 ]; + optional float beta_c = 79 [ default = 0.1 ]; + + optional float temp = 90 [default = 1]; + optional HDF5OutputParameter hdf5_output_param = 1001; } - -// Message that stores parameters used by PReLULayer -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope paramters are shared across channels. - optional bool channel_shared = 2 [default = false]; -} diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 096980dd7af..da0783ae3d3 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -158,6 +158,71 @@ void Solver::InitTestNets() { } } +template +void Solver::StepPrefilled() { + // Prefilled stepping can only do one at a time because the memory layer has to be refilled + int iters = 1; + vector*> bottom_vec; + const int start_iter = iter_; + const int stop_iter = iter_ + iters; + int average_loss = this->param_.average_loss(); + vector losses; + Dtype smoothed_loss = 0; + + for (; iter_ < stop_iter; ++iter_) { + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization())) { + // Currently can't do testing with this solver method + //TestAll(); + } + + const bool display = param_.display() && iter_ % param_.display() == 0; + net_->set_debug_info(display && param_.debug_info()); + Dtype loss; + net_->ForwardPrefilled(&loss); + net_->Backward(); + if (losses.size() < average_loss) { + losses.push_back(loss); + int size = losses.size(); + smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; + } else { + int idx = (iter_ - start_iter) % average_loss; + smoothed_loss += (loss - losses[idx]) / average_loss; + losses[idx] = loss; + } + if (display) { + LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + const vector*>& result = net_->output_blobs(); + int score_index = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + const string& output_name = + net_->blob_names()[net_->output_blob_indices()[j]]; + const Dtype loss_weight = + net_->blob_loss_weights()[net_->output_blob_indices()[j]]; + for (int k = 0; k < result[j]->count(); ++k) { + ostringstream loss_msg_stream; + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * result_vec[k] << " loss)"; + } + LOG(INFO) << " Train net output #" + << score_index++ << ": " << output_name << " = " + << result_vec[k] << loss_msg_stream.str(); + } + } + } + ComputeUpdateValue(); + net_->Update(); + + // Save a snapshot if needed. + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { + Snapshot(); + } + } +} + + template void Solver::Step(int iters) { vector*> bottom_vec; @@ -349,7 +414,7 @@ void Solver::Restore(const char* state_file) { NetParameter net_param; ReadProtoFromBinaryFile(state_file, &state); if (state.has_learned_net()) { - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + ReadProtoFromBinaryFile(state.learned_net().c_str(), &net_param); net_->CopyTrainedLayersFrom(net_param); } iter_ = state.iter(); @@ -420,10 +485,16 @@ void SGDSolver::PreSolve() { update_.clear(); temp_.clear(); for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - history_.push_back(shared_ptr >(new Blob(shape))); - update_.push_back(shared_ptr >(new Blob(shape))); - temp_.push_back(shared_ptr >(new Blob(shape))); + const Blob* net_param = net_params[i].get(); + history_.push_back(shared_ptr >(new Blob( + net_param->num(), net_param->channels(), net_param->height(), + net_param->width(),Caffe::GetDefaultDeviceContext()))); + update_.push_back(shared_ptr >(new Blob( + net_param->num(), net_param->channels(), net_param->height(), + net_param->width(),Caffe::GetDefaultDeviceContext()))); + temp_.push_back(shared_ptr >(new Blob( + net_param->num(), net_param->channels(), net_param->height(), + net_param->width(),Caffe::GetDefaultDeviceContext()))); } } @@ -563,7 +634,7 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { << "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; for (int i = 0; i < history_.size(); ++i) { - history_[i]->FromProto(state.history(i)); + history_[i]->FromProto(state.history(i),history_[i]->device_context()); } } diff --git a/src/caffe/splitnet/splitnet.cpp b/src/caffe/splitnet/splitnet.cpp new file mode 100644 index 00000000000..3197b97a6e6 --- /dev/null +++ b/src/caffe/splitnet/splitnet.cpp @@ -0,0 +1,18 @@ +/* + * splitnet.cpp + * + * Created on: Apr 5, 2015 + * Author: Fabian Tschopp + */ + +#include "caffe/splitnet/splitnet.hpp" + +namespace caffe { + +// TODO +template +Splitnet::Splitnet() { +} + +INSTANTIATE_CLASS(Splitnet); +} diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 7617ccfb27f..4dfbeb3c2e7 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -4,6 +4,13 @@ #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" +#include "caffe/greentea/greentea.hpp" + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { SyncedMemory::~SyncedMemory() { @@ -13,55 +20,100 @@ SyncedMemory::~SyncedMemory() { #ifndef CPU_ONLY if (gpu_ptr_) { - CUDA_CHECK(cudaFree(gpu_ptr_)); + if (device_context_.backend() == Backend::BACKEND_CUDA) { + CUDA_CHECK(cudaFree(gpu_ptr_)); + } else { +#ifdef USE_GREENTEA + clReleaseMemObject(cl_gpu_mem_); +#endif + } } #endif // CPU_ONLY } inline void SyncedMemory::to_cpu() { switch (head_) { - case UNINITIALIZED: - CaffeMallocHost(&cpu_ptr_, size_); - caffe_memset(size_, 0, cpu_ptr_); - head_ = HEAD_AT_CPU; - own_cpu_data_ = true; - break; - case HEAD_AT_GPU: -#ifndef CPU_ONLY - if (cpu_ptr_ == NULL) { + case UNINITIALIZED: { CaffeMallocHost(&cpu_ptr_, size_); + caffe_memset(size_, 0, cpu_ptr_); + head_ = HEAD_AT_CPU; own_cpu_data_ = true; + break; } - caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_); - head_ = SYNCED; + case HEAD_AT_GPU: { +#ifndef CPU_ONLY + if (cpu_ptr_ == NULL) { + CaffeMallocHost(&cpu_ptr_, size_); + own_cpu_data_ = true; + } + if (device_context_.backend() == Backend::BACKEND_CUDA) { + caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_); + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context ctx = viennacl::ocl::get_context( + device_context_.id()); + greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, cpu_ptr_, ctx); +#endif + } + head_ = SYNCED; #else - NO_GPU; + NO_GPU; #endif - break; - case HEAD_AT_CPU: - case SYNCED: - break; + break; + } + case HEAD_AT_CPU: + case SYNCED: + break; } } inline void SyncedMemory::to_gpu() { #ifndef CPU_ONLY switch (head_) { - case UNINITIALIZED: - CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); - caffe_gpu_memset(size_, 0, gpu_ptr_); - head_ = HEAD_AT_GPU; - break; - case HEAD_AT_CPU: - if (gpu_ptr_ == NULL) { - CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + case UNINITIALIZED: { + if (device_context_.backend() == Backend::BACKEND_CUDA) { + CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + caffe_gpu_memset(size_, 0, gpu_ptr_); + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context ctx = viennacl::ocl::get_context( + device_context_.id()); + cl_int err; + cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + size_, NULL, &err); + gpu_ptr_ = (void*) cl_gpu_mem_; + ctx.get_queue().finish(); +#endif + } + head_ = HEAD_AT_GPU; + break; } - caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_); - head_ = SYNCED; - break; - case HEAD_AT_GPU: - case SYNCED: - break; + case HEAD_AT_CPU: { + if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (gpu_ptr_ == NULL) { + CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + } + caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_); + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context ctx = viennacl::ocl::get_context( + device_context_.id()); + if (gpu_ptr_ == NULL) { + cl_int err; + cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + size_, NULL, &err); + gpu_ptr_ = (void*) cl_gpu_mem_; + ctx.get_queue().finish(); + } + greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, ctx); +#endif + } + head_ = SYNCED; + break; + } + case HEAD_AT_GPU: + case SYNCED: + break; } #else NO_GPU; @@ -70,7 +122,7 @@ inline void SyncedMemory::to_gpu() { const void* SyncedMemory::cpu_data() { to_cpu(); - return (const void*)cpu_ptr_; + return (const void*) cpu_ptr_; } void SyncedMemory::set_cpu_data(void* data) { @@ -86,7 +138,7 @@ void SyncedMemory::set_cpu_data(void* data) { const void* SyncedMemory::gpu_data() { #ifndef CPU_ONLY to_gpu(); - return (const void*)gpu_ptr_; + return (const void*) gpu_ptr_; #else NO_GPU; #endif @@ -108,6 +160,5 @@ void* SyncedMemory::mutable_gpu_data() { #endif } - } // namespace caffe diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 6cbf51df45e..fa59fab1e8a 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -19,24 +19,10 @@ template class AccuracyLayerTest : public ::testing::Test { protected: AccuracyLayerTest() - : blob_bottom_data_(new Blob()), - blob_bottom_label_(new Blob()), + : blob_bottom_data_(new Blob(100, 10, 1, 1)), + blob_bottom_label_(new Blob(100, 1, 1, 1)), blob_top_(new Blob()), top_k_(3) { - vector shape(2); - shape[0] = 100; - shape[1] = 10; - blob_bottom_data_->Reshape(shape); - shape.resize(1); - blob_bottom_label_->Reshape(shape); - FillBottoms(); - - blob_bottom_vec_.push_back(blob_bottom_data_); - blob_bottom_vec_.push_back(blob_bottom_label_); - blob_top_vec_.push_back(blob_top_); - } - - virtual void FillBottoms() { // fill the probability values FillerParameter filler_param; GaussianFiller filler(filler_param); @@ -47,11 +33,14 @@ class AccuracyLayerTest : public ::testing::Test { caffe::rng_t* prefetch_rng = static_cast(rng->generator()); Dtype* label_data = blob_bottom_label_->mutable_cpu_data(); - for (int i = 0; i < blob_bottom_label_->count(); ++i) { + for (int i = 0; i < 100; ++i) { label_data[i] = (*prefetch_rng)() % 10; } - } + blob_bottom_vec_.push_back(blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_); + } virtual ~AccuracyLayerTest() { delete blob_bottom_data_; delete blob_bottom_label_; @@ -117,89 +106,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { num_correct_labels / 100.0, 1e-4); } -TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { - Caffe::set_mode(Caffe::CPU); - this->blob_bottom_data_->Reshape(2, 10, 4, 5); - vector label_shape(3); - label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; - this->blob_bottom_label_->Reshape(label_shape); - this->FillBottoms(); - LayerParameter layer_param; - layer_param.mutable_accuracy_param()->set_axis(1); - AccuracyLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - - TypeParam max_value; - const int num_labels = this->blob_bottom_label_->count(); - int max_id; - int num_correct_labels = 0; - vector label_offset(3); - for (int n = 0; n < this->blob_bottom_data_->num(); ++n) { - for (int h = 0; h < this->blob_bottom_data_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_data_->width(); ++w) { - max_value = -FLT_MAX; - max_id = 0; - for (int c = 0; c < this->blob_bottom_data_->channels(); ++c) { - const TypeParam pred_value = - this->blob_bottom_data_->data_at(n, c, h, w); - if (pred_value > max_value) { - max_value = pred_value; - max_id = c; - } - } - label_offset[0] = n; label_offset[1] = h; label_offset[2] = w; - const int correct_label = - static_cast(this->blob_bottom_label_->data_at(label_offset)); - if (max_id == correct_label) { - ++num_correct_labels; - } - } - } - } - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / TypeParam(num_labels), 1e-4); -} - -TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { - Caffe::set_mode(Caffe::CPU); - LayerParameter layer_param; - const TypeParam kIgnoreLabelValue = -1; - layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); - AccuracyLayer layer(layer_param); - // Manually set some labels to the ignore label value (-1). - this->blob_bottom_label_->mutable_cpu_data()[2] = kIgnoreLabelValue; - this->blob_bottom_label_->mutable_cpu_data()[5] = kIgnoreLabelValue; - this->blob_bottom_label_->mutable_cpu_data()[32] = kIgnoreLabelValue; - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - - TypeParam max_value; - int max_id; - int num_correct_labels = 0; - int count = 0; - for (int i = 0; i < 100; ++i) { - if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - continue; - } - ++count; - max_value = -FLT_MAX; - max_id = 0; - for (int j = 0; j < 10; ++j) { - if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { - max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); - max_id = j; - } - } - if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - ++num_correct_labels; - } - } - EXPECT_EQ(count, 97); // We set 3 out of 100 labels to kIgnoreLabelValue. - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / TypeParam(count), 1e-4); -} - TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { LayerParameter layer_param; AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param(); diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index 7da6423b67c..e0678061173 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -1,5 +1,4 @@ #include -#include #include "gtest/gtest.h" @@ -32,7 +31,10 @@ TYPED_TEST(BlobSimpleTest, TestInitialization) { EXPECT_EQ(this->blob_preshaped_->height(), 4); EXPECT_EQ(this->blob_preshaped_->width(), 5); EXPECT_EQ(this->blob_preshaped_->count(), 120); - EXPECT_EQ(this->blob_->num_axes(), 0); + EXPECT_EQ(this->blob_->num(), 0); + EXPECT_EQ(this->blob_->channels(), 0); + EXPECT_EQ(this->blob_->height(), 0); + EXPECT_EQ(this->blob_->width(), 0); EXPECT_EQ(this->blob_->count(), 0); } @@ -52,59 +54,6 @@ TYPED_TEST(BlobSimpleTest, TestReshape) { EXPECT_EQ(this->blob_->count(), 120); } -TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { - BlobProto blob_proto; - - // Reshape to (3 x 2). - vector shape(2); - shape[0] = 3; - shape[1] = 2; - this->blob_->Reshape(shape); - - // (3 x 2) blob == (1 x 1 x 3 x 2) legacy blob - blob_proto.set_num(1); - blob_proto.set_channels(1); - blob_proto.set_height(3); - blob_proto.set_width(2); - EXPECT_TRUE(this->blob_->ShapeEquals(blob_proto)); - - // (3 x 2) blob != (0 x 1 x 3 x 2) legacy blob - blob_proto.set_num(0); - blob_proto.set_channels(1); - blob_proto.set_height(3); - blob_proto.set_width(2); - EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); - - // (3 x 2) blob != (3 x 1 x 3 x 2) legacy blob - blob_proto.set_num(3); - blob_proto.set_channels(1); - blob_proto.set_height(3); - blob_proto.set_width(2); - EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); - - // Reshape to (1 x 3 x 2). - shape.insert(shape.begin(), 1); - this->blob_->Reshape(shape); - - // (1 x 3 x 2) blob == (1 x 1 x 3 x 2) legacy blob - blob_proto.set_num(1); - blob_proto.set_channels(1); - blob_proto.set_height(3); - blob_proto.set_width(2); - EXPECT_TRUE(this->blob_->ShapeEquals(blob_proto)); - - // Reshape to (2 x 3 x 2). - shape[0] = 2; - this->blob_->Reshape(shape); - - // (2 x 3 x 2) blob != (1 x 1 x 3 x 2) legacy blob - blob_proto.set_num(1); - blob_proto.set_channels(1); - blob_proto.set_height(3); - blob_proto.set_width(2); - EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); -} - template class BlobMathTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index 662a50fa23b..f14f1d2fa4f 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -19,9 +19,9 @@ class ConcatLayerTest : public MultiDeviceTest { protected: ConcatLayerTest() - : blob_bottom_0_(new Blob(2, 3, 6, 5)), - blob_bottom_1_(new Blob(2, 5, 6, 5)), - blob_bottom_2_(new Blob(5, 3, 6, 5)), + : blob_bottom_0(new Blob(2, 3, 6, 5)), + blob_bottom_1(new Blob(2, 5, 6, 5)), + blob_bottom_2(new Blob(5, 3, 6, 5)), blob_top_(new Blob()) {} virtual void SetUp() { // fill the values @@ -29,30 +29,30 @@ class ConcatLayerTest : public MultiDeviceTest { FillerParameter filler_param; filler_param.set_value(1.); filler.reset(new ConstantFiller(filler_param)); - filler->Fill(this->blob_bottom_0_); + filler->Fill(this->blob_bottom_0); filler_param.set_value(2.); filler.reset(new ConstantFiller(filler_param)); - filler->Fill(this->blob_bottom_1_); + filler->Fill(this->blob_bottom_1); filler_param.set_value(3.); filler.reset(new ConstantFiller(filler_param)); - filler->Fill(this->blob_bottom_2_); - blob_bottom_vec_0_.push_back(blob_bottom_0_); - blob_bottom_vec_0_.push_back(blob_bottom_1_); - blob_bottom_vec_1_.push_back(blob_bottom_0_); - blob_bottom_vec_1_.push_back(blob_bottom_2_); + filler->Fill(this->blob_bottom_2); + blob_bottom_vec_0.push_back(blob_bottom_0); + blob_bottom_vec_0.push_back(blob_bottom_1); + blob_bottom_vec_1.push_back(blob_bottom_0); + blob_bottom_vec_1.push_back(blob_bottom_2); blob_top_vec_.push_back(blob_top_); } virtual ~ConcatLayerTest() { - delete blob_bottom_0_; delete blob_bottom_1_; - delete blob_bottom_2_; delete blob_top_; + delete blob_bottom_0; delete blob_bottom_1; + delete blob_bottom_2; delete blob_top_; } - Blob* const blob_bottom_0_; - Blob* const blob_bottom_1_; - Blob* const blob_bottom_2_; + Blob* const blob_bottom_0; + Blob* const blob_bottom_1; + Blob* const blob_bottom_2; Blob* const blob_top_; - vector*> blob_bottom_vec_0_, blob_bottom_vec_1_; + vector*> blob_bottom_vec_0, blob_bottom_vec_1; vector*> blob_top_vec_; }; @@ -61,115 +61,61 @@ TYPED_TEST_CASE(ConcatLayerTest, TestDtypesAndDevices); TYPED_TEST(ConcatLayerTest, TestSetupNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - layer_param.mutable_concat_param()->set_axis(0); + layer_param.mutable_concat_param()->set_concat_dim(0); ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_1_, this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_1, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), - this->blob_bottom_0_->num() + this->blob_bottom_2_->num()); - EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0_->channels()); - EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); - EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); + this->blob_bottom_0->num() + this->blob_bottom_2->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); } TYPED_TEST(ConcatLayerTest, TestSetupChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num()); EXPECT_EQ(this->blob_top_->channels(), - this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); - EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); - EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); + this->blob_bottom_0->channels()+this->blob_bottom_1->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); } -TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConcatLayer layer(layer_param); - // "channels" index is the third one from the end -- test negative indexing - // by setting axis to -3 and checking that we get the same results as above in - // TestSetupChannels. - layer_param.mutable_concat_param()->set_axis(-3); - layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); - EXPECT_EQ(this->blob_top_->channels(), - this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); - EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); - EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); -} - -TYPED_TEST(ConcatLayerTest, TestForwardNum) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_concat_param()->set_axis(0); - ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_1_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_1_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_vec_1_[0]->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { - EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), - this->blob_bottom_vec_1_[0]->data_at(n, c, h, w)); - } - } - } - } - for (int n = 0; n < this->blob_bottom_vec_1_[1]->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { - EXPECT_EQ(this->blob_top_->data_at(n + 2, c, h, w), - this->blob_bottom_vec_1_[1]->data_at(n, c, h, w)); - } - } - } - } -} -TYPED_TEST(ConcatLayerTest, TestForwardChannels) { +TYPED_TEST(ConcatLayerTest, TestNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0, this->blob_top_vec_); for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) { + for (int c = 0; c < this->blob_bottom_0->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), - this->blob_bottom_vec_0_[0]->data_at(n, c, h, w)); + this->blob_bottom_vec_0[0]->data_at(n, c, h, w)); } } } - for (int c = 0; c < this->blob_bottom_1_->channels(); ++c) { + for (int c = 0; c < this->blob_bottom_1->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { - EXPECT_EQ(this->blob_top_->data_at(n, c + 3, h, w), - this->blob_bottom_vec_0_[1]->data_at(n, c, h, w)); + EXPECT_EQ(this->blob_top_->data_at(n, c+3, h, w), + this->blob_bottom_vec_0[1]->data_at(n, c, h, w)); } } } } } -TYPED_TEST(ConcatLayerTest, TestGradientNum) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_concat_param()->set_axis(0); - ConcatLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - checker.CheckGradient(&layer, this->blob_bottom_vec_1_, - this->blob_top_vec_); -} - -TYPED_TEST(ConcatLayerTest, TestGradientChannels) { +TYPED_TEST(ConcatLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradient(&layer, this->blob_bottom_vec_0_, + checker.CheckGradient(&layer, this->blob_bottom_vec_0, this->blob_top_vec_); } diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp index a23034f284a..441d2313c48 100644 --- a/src/caffe/test/test_hdf5_output_layer.cpp +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -88,6 +88,9 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { LayerParameter param; param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_); + param.add_bottom("data"); + param.add_bottom("label"); + // This code block ensures that the layer is deconstructed and // the output hdf5 file is closed. { @@ -103,13 +106,11 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { this->input_file_name_; Blob* blob_data = new Blob(); - hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, - blob_data); + hdf5_load_nd_dataset(file_id, "data_0", 0, 4, blob_data); this->CheckBlobEqual(*(this->blob_data_), *blob_data); Blob* blob_label = new Blob(); - hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, - blob_label); + hdf5_load_nd_dataset(file_id, "label_0", 0, 4, blob_label); this->CheckBlobEqual(*(this->blob_label_), *blob_label); status = H5Fclose(file_id); diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index c9b027f88cf..8d3b3d1e987 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -77,13 +77,15 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { EXPECT_EQ(this->blob_top_data_->height(), height); EXPECT_EQ(this->blob_top_data_->width(), width); - EXPECT_EQ(this->blob_top_label_->num_axes(), 2); - EXPECT_EQ(this->blob_top_label_->shape(0), batch_size); - EXPECT_EQ(this->blob_top_label_->shape(1), 1); - - EXPECT_EQ(this->blob_top_label2_->num_axes(), 2); - EXPECT_EQ(this->blob_top_label2_->shape(0), batch_size); - EXPECT_EQ(this->blob_top_label2_->shape(1), 1); + EXPECT_EQ(this->blob_top_label_->num(), batch_size); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + + EXPECT_EQ(this->blob_top_label2_->num(), batch_size); + EXPECT_EQ(this->blob_top_label2_->channels(), 1); + EXPECT_EQ(this->blob_top_label2_->height(), 1); + EXPECT_EQ(this->blob_top_label2_->width(), 1); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index c4e2f8ea7f2..07425df9b3a 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -138,22 +138,6 @@ TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) { } } -TYPED_TEST(LRNLayerTest, TestForwardAcrossChannelsLargeRegion) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_lrn_param()->set_local_size(15); - LRNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - Blob top_reference; - this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, - &top_reference); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], - this->epsilon_); - } -} - TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -175,28 +159,6 @@ TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) { this->blob_top_vec_); } -TYPED_TEST(LRNLayerTest, TestGradientAcrossChannelsLargeRegion) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_lrn_param()->set_local_size(15); - LRNLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { - this->blob_top_->mutable_cpu_diff()[i] = 1.; - } - vector propagate_down(this->blob_bottom_vec_.size(), true); - layer.Backward(this->blob_top_vec_, propagate_down, - this->blob_bottom_vec_); - // for (int i = 0; i < this->blob_bottom_->count(); ++i) { - // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] - // << std::endl; - // } - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); -} - TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 08106e79274..1680a3f28d5 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -63,19 +63,18 @@ class NetTest : public MultiDeviceTest { " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " " data_filler { " " type: 'gaussian' " " std: 0.01 " " } " - " shape { " - " dim: 5 " - " } " " data_filler { " " type: 'constant' " " value: 0 " diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c9d52f247a6..ad10720116d 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -1,4 +1,3 @@ -#include #include #include @@ -100,23 +99,6 @@ class NeuronLayerTest : public MultiDeviceTest { GradientChecker checker(1e-2, 1e-3); checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); } - - void TestPReLU(PReLULayer *layer) { - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Now, check values - const Dtype* bottom_data = this->blob_bottom_->cpu_data(); - const Dtype* top_data = this->blob_top_->cpu_data(); - const Dtype* slope_data = layer->blobs()[0]->cpu_data(); - int hw = this->blob_bottom_->height() * this->blob_bottom_->width(); - int channels = this->blob_bottom_->channels(); - bool channel_shared = layer->layer_param().prelu_param().channel_shared(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - int c = channel_shared ? 0 : (i / hw) % channels; - EXPECT_EQ(top_data[i], - std::max(bottom_data[i], (Dtype)(0)) - + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); - } - } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -410,184 +392,6 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradient) { this->blob_top_vec_); } -TYPED_TEST(NeuronLayerTest, TestPReLUParam) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - PReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - const Dtype* slopes = layer.blobs()[0]->cpu_data(); - int count = layer.blobs()[0]->count(); - for (int i = 0; i < count; ++i, ++slopes) { - EXPECT_EQ(*slopes, 0.25); - } -} - -TYPED_TEST(NeuronLayerTest, TestPReLUForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - PReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(layer.blobs()[0].get()); - this->TestPReLU(&layer); -} - -TYPED_TEST(NeuronLayerTest, TestPReLUForwardChannelShared) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_prelu_param()->set_channel_shared(true); - PReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - this->TestPReLU(&layer); -} - -TYPED_TEST(NeuronLayerTest, TestPReLUGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - PReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(layer.blobs()[0].get()); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); -} - -TYPED_TEST(NeuronLayerTest, TestPReLUGradientChannelShared) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_prelu_param()->set_channel_shared(true); - PReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); -} - -TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter prelu_layer_param; - LayerParameter relu_layer_param; - relu_layer_param.mutable_relu_param()->set_negative_slope(0.25); - PReLULayer prelu(prelu_layer_param); - ReLULayer relu(relu_layer_param); - // Set up blobs - vector*> blob_bottom_vec_2; - vector*> blob_top_vec_2; - shared_ptr > blob_bottom_2(new Blob()); - shared_ptr > blob_top_2(new Blob()); - blob_bottom_vec_2.push_back(blob_bottom_2.get()); - blob_top_vec_2.push_back(blob_top_2.get()); - blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); - // SetUp layers - prelu.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - relu.SetUp(blob_bottom_vec_2, blob_top_vec_2); - // Check forward - prelu.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - relu.Forward(this->blob_bottom_vec_, blob_top_vec_2); - for (int s = 0; s < blob_top_2->count(); ++s) { - EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); - } - // Check backward - shared_ptr > tmp_blob(new Blob()); - tmp_blob->ReshapeLike(*blob_top_2.get()); - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(tmp_blob.get()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), - this->blob_top_->mutable_cpu_diff()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), - blob_top_2->mutable_cpu_diff()); - vector propagate_down; - propagate_down.push_back(true); - prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - relu.Backward(blob_top_vec_2, propagate_down, blob_bottom_vec_2); - for (int s = 0; s < blob_bottom_2->count(); ++s) { - EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); - } -} - -TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { - typedef typename TypeParam::Dtype Dtype; - // Set layer parameters - LayerParameter ip_layer_param; - LayerParameter prelu_layer_param; - InnerProductParameter *ip_param = - ip_layer_param.mutable_inner_product_param(); - ip_param->mutable_weight_filler()->set_type("gaussian"); - ip_param->set_num_output(3); - InnerProductLayer ip(ip_layer_param); - PReLULayer prelu(prelu_layer_param); - InnerProductLayer ip2(ip_layer_param); - PReLULayer prelu2(prelu_layer_param); - // Set up blobs - vector*> blob_bottom_vec_2; - vector*> blob_middle_vec_2; - vector*> blob_top_vec_2; - shared_ptr > blob_bottom_2(new Blob()); - shared_ptr > blob_middle_2(new Blob()); - shared_ptr > blob_top_2(new Blob()); - blob_bottom_vec_2.push_back(blob_bottom_2.get()); - blob_middle_vec_2.push_back(blob_middle_2.get()); - blob_top_vec_2.push_back(blob_top_2.get()); - blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); - // SetUp layers - ip.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - prelu.SetUp(this->blob_top_vec_, this->blob_top_vec_); - ip2.SetUp(blob_bottom_vec_2, blob_middle_vec_2); - prelu2.SetUp(blob_middle_vec_2, blob_top_vec_2); - caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), - ip2.blobs()[0]->mutable_cpu_data()); - // Forward in-place - ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); - ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); - prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); - // Forward non-in-place - ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); - ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); - prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); - prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); - // Check numbers - for (int s = 0; s < blob_top_2->count(); ++s) { - EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); - } - // Fill top diff with random numbers - shared_ptr > tmp_blob(new Blob()); - tmp_blob->ReshapeLike(*blob_top_2.get()); - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(tmp_blob.get()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), - this->blob_top_->mutable_cpu_diff()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), - blob_top_2->mutable_cpu_diff()); - // Backward in-place - vector propagate_down; - propagate_down.push_back(true); - prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_top_vec_); - ip.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - // Backward non-in-place - prelu2.Backward(blob_top_vec_2, propagate_down, blob_middle_vec_2); - ip2.Backward(blob_middle_vec_2, propagate_down, blob_bottom_vec_2); - // Check numbers - for (int s = 0; s < blob_bottom_2->count(); ++s) { - EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); - } - for (int s = 0; s < ip.blobs()[0]->count(); ++s) { - EXPECT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]); - } - for (int s = 0; s < ip.blobs()[1]->count(); ++s) { - EXPECT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]); - } - for (int s = 0; s < prelu.blobs()[0]->count(); ++s) { - EXPECT_EQ(prelu.blobs()[0]->cpu_diff()[s], - prelu2.blobs()[0]->cpu_diff()[s]); - } -} - #ifdef USE_CUDNN template class CuDNNNeuronLayerTest : public ::testing::Test { diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index e9964e7f0b7..435caa8381e 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -976,6 +976,9 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { EXPECT_EQ(this->blob_top_->width(), 2); } +// This test and all following cuDNN pooling tests with padding are commented +// for now, since cuDNN pooling does not currently support padding. +/* TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; @@ -991,6 +994,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { EXPECT_EQ(this->blob_top_->height(), 4); EXPECT_EQ(this->blob_top_->width(), 3); } +*/ /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { @@ -1058,6 +1062,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } } +/* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; @@ -1102,6 +1107,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); } +*/ /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { @@ -1169,6 +1175,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } } +/* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { @@ -1187,6 +1194,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { } } } +*/ #endif diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index ccd03646d19..395be280089 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -62,7 +62,7 @@ TYPED_TEST_CASE(SliceLayerTest, TestDtypesAndDevices); TYPED_TEST(SliceLayerTest, TestSetupNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - layer_param.mutable_slice_param()->set_axis(0); + layer_param.mutable_slice_param()->set_slice_dim(0); SliceLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_); EXPECT_EQ(this->blob_bottom_->num(), 3 * this->blob_top_0_->num()); @@ -91,7 +91,7 @@ TYPED_TEST(SliceLayerTest, TestSetupChannels) { TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - layer_param.mutable_slice_param()->set_axis(0); + layer_param.mutable_slice_param()->set_slice_dim(0); SliceLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); const int top_num = this->blob_bottom_->num() / 2; @@ -166,7 +166,7 @@ TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) { // Gradient checks are slow; reduce blob size. this->ReduceBottomBlobSize(); LayerParameter layer_param; - layer_param.mutable_slice_param()->set_axis(0); + layer_param.mutable_slice_param()->set_slice_dim(0); SliceLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, diff --git a/src/caffe/test/test_solver.cpp b/src/caffe/test/test_solver.cpp index ceabc9cdd2c..1c2c9bbb740 100644 --- a/src/caffe/test/test_solver.cpp +++ b/src/caffe/test/test_solver.cpp @@ -55,15 +55,14 @@ TYPED_TEST(SolverTest, TestInitTrainTestNets) { " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " - " shape { " - " dim: 5 " - " } " + " num: 5 " + " channels: 3 " + " height: 10 " + " width: 10 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " " } " " top: 'data' " " top: 'label' " diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index c90f93eb67b..5acfadd9a18 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -8,6 +8,81 @@ namespace caffe { +template +__global__ void im2col_sk_gpu_kernel(const int n, const Dtype* data_im, + const int height, const int width, const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int height_col, const int width_col, + Dtype* data_col) { + CUDA_KERNEL_LOOP(index, n) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + const Dtype* data_im_ptr = data_im; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +template +void im2col_sk_gpu(const Dtype* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + Dtype* data_col) { + // We are going to launch channels * height_col * width_col kernels, each + // kernel responsible for copying a single-channel grid. + int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; + int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; + int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; + int num_kernels = channels * height_col * width_col; + + //LOG(INFO) << "ext_height = " << ext_kernel_h; + //LOG(INFO) << "ext_width = " << ext_kernel_w; + + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_sk_gpu_kernel<<>>( + num_kernels, data_im, height, width, kernel_h, kernel_w, + ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, + height_col, width_col, + data_col); + CUDA_POST_KERNEL_CHECK; +} + + +// Explicit instantiation +template void im2col_sk_gpu(const float* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + float* data_col); +template void im2col_sk_gpu(const double* data_im, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + double* data_col); + + template __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, @@ -59,7 +134,6 @@ void im2col_gpu(const Dtype* data_im, const int channels, CUDA_POST_KERNEL_CHECK; } - // Explicit instantiation template void im2col_gpu(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, @@ -70,6 +144,82 @@ template void im2col_gpu(const double* data_im, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, double* data_col); +// Support of stride_h and stride_w greater than 1 is not implemented +template +__global__ void col2im_sk_gpu_kernel(const int n, const Dtype* data_col, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int ext_patch_h, const int ext_patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int height_col, const int width_col, + Dtype* data_im) { + CUDA_KERNEL_LOOP(index, n) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w)+ 1; + int w_col_end = (w >= width_col) ? width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = (h >= height_col) ? height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + h_col * width_col + w_col]; + } + } + + data_im[index] = val; + } +} + +template +void col2im_sk_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_im) { + if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) + LOG(FATAL) << "stride greater than 1 or pad greater than 0 not tested in col2im_sk_gpu()."; + int ext_patch_h = (patch_h - 1) * kstride_h + 1; + int ext_patch_w = (patch_w - 1) * kstride_w + 1; + int height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; + int num_kernels = channels * height * width; + + col2im_sk_gpu_kernel<<>>( + num_kernels, data_col, height, width, channels, + patch_h, patch_w, ext_patch_h, ext_patch_w, + pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, + height_col, width_col, data_im); + CUDA_POST_KERNEL_CHECK; +} + +// Explicit instantiation +template void col2im_sk_gpu(const float* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, float* data_im); +template void col2im_sk_gpu(const double* data_col, const int channels, + const int height, const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, double* data_im); + + template __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, const int height, const int width, const int channels, @@ -112,6 +262,8 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, } } + + template void col2im_gpu(const Dtype* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index 77ef7f257f4..1532b320901 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -252,11 +252,11 @@ void hdf5_load_nd_dataset_helper( CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name_; CHECK_EQ(class_, H5T_FLOAT) << "Expected float or double data"; - vector blob_dims(dims.size()); - for (int i = 0; i < dims.size(); ++i) { - blob_dims[i] = dims[i]; - } - blob->Reshape(blob_dims); + blob->Reshape( + dims[0], + (dims.size() > 1) ? dims[1] : 1, + (dims.size() > 2) ? dims[2] : 1, + (dims.size() > 3) ? dims[3] : 1, blob->device_context()); } template <> diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 38a06026adf..9f1aa250b27 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -77,9 +77,9 @@ void UpgradeV0PaddingLayers(const NetParameter& param, } for (int j = 0; j < layer_connection.bottom_size(); ++j) { const string& blob_name = layer_connection.bottom(j); - if (blob_name_to_last_top_idx.find(blob_name) == - blob_name_to_last_top_idx.end()) { - LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; + if (blob_name_to_last_top_idx.find(blob_name) + == blob_name_to_last_top_idx.end()) { + LOG(FATAL)<< "Unknown blob input " << blob_name << " to layer " << j; } const int top_idx = blob_name_to_last_top_idx[blob_name]; if (top_idx == -1) { @@ -95,17 +95,14 @@ void UpgradeV0PaddingLayers(const NetParameter& param, << "Padding layer input to " "non-convolutional / non-pooling layer type " << layer_param.type(); - CHECK_EQ(layer_connection.bottom_size(), 1) - << "Conv Layer takes a single blob as input."; - CHECK_EQ(source_layer.bottom_size(), 1) - << "Padding Layer takes a single blob as input."; - CHECK_EQ(source_layer.top_size(), 1) - << "Padding Layer produces a single blob as output."; + CHECK_EQ(layer_connection.bottom_size(), 1)<< "Conv Layer takes a single blob as input."; + CHECK_EQ(source_layer.bottom_size(), 1)<< "Padding Layer takes a single blob as input."; + CHECK_EQ(source_layer.top_size(), 1)<< "Padding Layer produces a single blob as output."; int layer_index = param_upgraded_pad->layers_size() - 1; param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() ->set_pad(source_layer.layer().pad()); - param_upgraded_pad->mutable_layers(layer_index) - ->set_bottom(j, source_layer.bottom(0)); + param_upgraded_pad->mutable_layers(layer_index)->set_bottom( + j, source_layer.bottom(0)); } } for (int j = 0; j < layer_connection.top_size(); ++j) { @@ -151,7 +148,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_inner_product_param()->set_num_output( v0_layer_param.num_output()); } else { - LOG(ERROR) << "Unknown parameter num_output for layer type " << type; + LOG(ERROR)<< "Unknown parameter num_output for layer type " << type; is_fully_compatible = false; } } @@ -163,31 +160,31 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_inner_product_param()->set_bias_term( v0_layer_param.biasterm()); } else { - LOG(ERROR) << "Unknown parameter biasterm for layer type " << type; + LOG(ERROR)<< "Unknown parameter biasterm for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_weight_filler()) { if (type == "conv") { - layer_param->mutable_convolution_param()-> - mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); + layer_param->mutable_convolution_param()->mutable_weight_filler() + ->CopyFrom(v0_layer_param.weight_filler()); } else if (type == "innerproduct") { - layer_param->mutable_inner_product_param()-> - mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); + layer_param->mutable_inner_product_param()->mutable_weight_filler() + ->CopyFrom(v0_layer_param.weight_filler()); } else { - LOG(ERROR) << "Unknown parameter weight_filler for layer type " << type; + LOG(ERROR)<< "Unknown parameter weight_filler for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_bias_filler()) { if (type == "conv") { - layer_param->mutable_convolution_param()-> - mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); + layer_param->mutable_convolution_param()->mutable_bias_filler() + ->CopyFrom(v0_layer_param.bias_filler()); } else if (type == "innerproduct") { - layer_param->mutable_inner_product_param()-> - mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); + layer_param->mutable_inner_product_param()->mutable_bias_filler() + ->CopyFrom(v0_layer_param.bias_filler()); } else { - LOG(ERROR) << "Unknown parameter bias_filler for layer type " << type; + LOG(ERROR)<< "Unknown parameter bias_filler for layer type " << type; is_fully_compatible = false; } } @@ -197,7 +194,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, } else if (type == "pool") { layer_param->mutable_pooling_param()->set_pad(v0_layer_param.pad()); } else { - LOG(ERROR) << "Unknown parameter pad for layer type " << type; + LOG(ERROR)<< "Unknown parameter pad for layer type " << type; is_fully_compatible = false; } } @@ -209,7 +206,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_pooling_param()->set_kernel_size( v0_layer_param.kernelsize()); } else { - LOG(ERROR) << "Unknown parameter kernelsize for layer type " << type; + LOG(ERROR)<< "Unknown parameter kernelsize for layer type " << type; is_fully_compatible = false; } } @@ -218,7 +215,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_convolution_param()->set_group( v0_layer_param.group()); } else { - LOG(ERROR) << "Unknown parameter group for layer type " << type; + LOG(ERROR)<< "Unknown parameter group for layer type " << type; is_fully_compatible = false; } } @@ -230,7 +227,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_pooling_param()->set_stride( v0_layer_param.stride()); } else { - LOG(ERROR) << "Unknown parameter stride for layer type " << type; + LOG(ERROR)<< "Unknown parameter stride for layer type " << type; is_fully_compatible = false; } } @@ -238,33 +235,33 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, if (type == "pool") { V0LayerParameter_PoolMethod pool = v0_layer_param.pool(); switch (pool) { - case V0LayerParameter_PoolMethod_MAX: - layer_param->mutable_pooling_param()->set_pool( - PoolingParameter_PoolMethod_MAX); - break; - case V0LayerParameter_PoolMethod_AVE: - layer_param->mutable_pooling_param()->set_pool( - PoolingParameter_PoolMethod_AVE); - break; - case V0LayerParameter_PoolMethod_STOCHASTIC: - layer_param->mutable_pooling_param()->set_pool( - PoolingParameter_PoolMethod_STOCHASTIC); - break; - default: - LOG(ERROR) << "Unknown pool method " << pool; + case V0LayerParameter_PoolMethod_MAX: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_MAX); + break; + case V0LayerParameter_PoolMethod_AVE: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_AVE); + break; + case V0LayerParameter_PoolMethod_STOCHASTIC: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_STOCHASTIC); + break; + default: + LOG(ERROR)<< "Unknown pool method " << pool; + is_fully_compatible = false; + } + } else { + LOG(ERROR) << "Unknown parameter pool for layer type " << type; is_fully_compatible = false; } - } else { - LOG(ERROR) << "Unknown parameter pool for layer type " << type; - is_fully_compatible = false; } - } if (v0_layer_param.has_dropout_ratio()) { if (type == "dropout") { layer_param->mutable_dropout_param()->set_dropout_ratio( v0_layer_param.dropout_ratio()); } else { - LOG(ERROR) << "Unknown parameter dropout_ratio for layer type " << type; + LOG(ERROR)<< "Unknown parameter dropout_ratio for layer type " << type; is_fully_compatible = false; } } @@ -273,7 +270,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_lrn_param()->set_local_size( v0_layer_param.local_size()); } else { - LOG(ERROR) << "Unknown parameter local_size for layer type " << type; + LOG(ERROR)<< "Unknown parameter local_size for layer type " << type; is_fully_compatible = false; } } @@ -281,7 +278,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, if (type == "lrn") { layer_param->mutable_lrn_param()->set_alpha(v0_layer_param.alpha()); } else { - LOG(ERROR) << "Unknown parameter alpha for layer type " << type; + LOG(ERROR)<< "Unknown parameter alpha for layer type " << type; is_fully_compatible = false; } } @@ -289,7 +286,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, if (type == "lrn") { layer_param->mutable_lrn_param()->set_beta(v0_layer_param.beta()); } else { - LOG(ERROR) << "Unknown parameter beta for layer type " << type; + LOG(ERROR)<< "Unknown parameter beta for layer type " << type; is_fully_compatible = false; } } @@ -297,7 +294,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, if (type == "lrn") { layer_param->mutable_lrn_param()->set_k(v0_layer_param.k()); } else { - LOG(ERROR) << "Unknown parameter k for layer type " << type; + LOG(ERROR)<< "Unknown parameter k for layer type " << type; is_fully_compatible = false; } } @@ -317,17 +314,16 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_infogain_loss_param()->set_source( v0_layer_param.source()); } else { - LOG(ERROR) << "Unknown parameter source for layer type " << type; + LOG(ERROR)<< "Unknown parameter source for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_scale()) { - layer_param->mutable_transform_param()-> - set_scale(v0_layer_param.scale()); + layer_param->mutable_transform_param()->set_scale(v0_layer_param.scale()); } if (v0_layer_param.has_meanfile()) { - layer_param->mutable_transform_param()-> - set_mean_file(v0_layer_param.meanfile()); + layer_param->mutable_transform_param()->set_mean_file( + v0_layer_param.meanfile()); } if (v0_layer_param.has_batchsize()) { if (type == "data") { @@ -343,17 +339,17 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_window_data_param()->set_batch_size( v0_layer_param.batchsize()); } else { - LOG(ERROR) << "Unknown parameter batchsize for layer type " << type; + LOG(ERROR)<< "Unknown parameter batchsize for layer type " << type; is_fully_compatible = false; } } if (v0_layer_param.has_cropsize()) { - layer_param->mutable_transform_param()-> - set_crop_size(v0_layer_param.cropsize()); + layer_param->mutable_transform_param()->set_crop_size( + v0_layer_param.cropsize()); } if (v0_layer_param.has_mirror()) { - layer_param->mutable_transform_param()-> - set_mirror(v0_layer_param.mirror()); + layer_param->mutable_transform_param()->set_mirror( + v0_layer_param.mirror()); } if (v0_layer_param.has_rand_skip()) { if (type == "data") { @@ -363,7 +359,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_image_data_param()->set_rand_skip( v0_layer_param.rand_skip()); } else { - LOG(ERROR) << "Unknown parameter rand_skip for layer type " << type; + LOG(ERROR)<< "Unknown parameter rand_skip for layer type " << type; is_fully_compatible = false; } } @@ -372,7 +368,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_image_data_param()->set_shuffle( v0_layer_param.shuffle_images()); } else { - LOG(ERROR) << "Unknown parameter shuffle for layer type " << type; + LOG(ERROR)<< "Unknown parameter shuffle for layer type " << type; is_fully_compatible = false; } } @@ -381,7 +377,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_image_data_param()->set_new_height( v0_layer_param.new_height()); } else { - LOG(ERROR) << "Unknown parameter new_height for layer type " << type; + LOG(ERROR)<< "Unknown parameter new_height for layer type " << type; is_fully_compatible = false; } } @@ -390,7 +386,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_image_data_param()->set_new_width( v0_layer_param.new_width()); } else { - LOG(ERROR) << "Unknown parameter new_width for layer type " << type; + LOG(ERROR)<< "Unknown parameter new_width for layer type " << type; is_fully_compatible = false; } } @@ -399,7 +395,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_concat_param()->set_concat_dim( v0_layer_param.concat_dim()); } else { - LOG(ERROR) << "Unknown parameter concat_dim for layer type " << type; + LOG(ERROR)<< "Unknown parameter concat_dim for layer type " << type; is_fully_compatible = false; } } @@ -408,8 +404,8 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_window_data_param()->set_fg_threshold( v0_layer_param.det_fg_threshold()); } else { - LOG(ERROR) << "Unknown parameter det_fg_threshold for layer type " - << type; + LOG(ERROR)<< "Unknown parameter det_fg_threshold for layer type " + << type; is_fully_compatible = false; } } @@ -418,8 +414,8 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_window_data_param()->set_bg_threshold( v0_layer_param.det_bg_threshold()); } else { - LOG(ERROR) << "Unknown parameter det_bg_threshold for layer type " - << type; + LOG(ERROR)<< "Unknown parameter det_bg_threshold for layer type " + << type; is_fully_compatible = false; } } @@ -428,8 +424,8 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_window_data_param()->set_fg_fraction( v0_layer_param.det_fg_fraction()); } else { - LOG(ERROR) << "Unknown parameter det_fg_fraction for layer type " - << type; + LOG(ERROR)<< "Unknown parameter det_fg_fraction for layer type " + << type; is_fully_compatible = false; } } @@ -438,8 +434,8 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_window_data_param()->set_context_pad( v0_layer_param.det_context_pad()); } else { - LOG(ERROR) << "Unknown parameter det_context_pad for layer type " - << type; + LOG(ERROR)<< "Unknown parameter det_context_pad for layer type " + << type; is_fully_compatible = false; } } @@ -448,8 +444,8 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_window_data_param()->set_crop_mode( v0_layer_param.det_crop_mode()); } else { - LOG(ERROR) << "Unknown parameter det_crop_mode for layer type " - << type; + LOG(ERROR)<< "Unknown parameter det_crop_mode for layer type " + << type; is_fully_compatible = false; } } @@ -458,8 +454,8 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_hdf5_output_param()->CopyFrom( v0_layer_param.hdf5_output_param()); } else { - LOG(ERROR) << "Unknown parameter hdf5_output_param for layer type " - << type; + LOG(ERROR)<< "Unknown parameter hdf5_output_param for layer type " + << type; is_fully_compatible = false; } } @@ -516,8 +512,10 @@ V1LayerParameter_LayerType UpgradeV0LayerType(const string& type) { return V1LayerParameter_LayerType_TANH; } else if (type == "window_data") { return V1LayerParameter_LayerType_WINDOW_DATA; + } else if (type == "data_rand_transform") { + return V1LayerParameter_LayerType_DATA_RAND_TRANSFORM; } else { - LOG(FATAL) << "Unknown layer name: " << type; + LOG(FATAL)<< "Unknown layer name: " << type; return V1LayerParameter_LayerType_NONE; } } @@ -526,24 +524,48 @@ bool NetNeedsDataUpgrade(const NetParameter& net_param) { for (int i = 0; i < net_param.layers_size(); ++i) { if (net_param.layers(i).type() == V1LayerParameter_LayerType_DATA) { DataParameter layer_param = net_param.layers(i).data_param(); - if (layer_param.has_scale()) { return true; } - if (layer_param.has_mean_file()) { return true; } - if (layer_param.has_crop_size()) { return true; } - if (layer_param.has_mirror()) { return true; } + if (layer_param.has_scale()) { + return true; + } + if (layer_param.has_mean_file()) { + return true; + } + if (layer_param.has_crop_size()) { + return true; + } + if (layer_param.has_mirror()) { + return true; + } } if (net_param.layers(i).type() == V1LayerParameter_LayerType_IMAGE_DATA) { ImageDataParameter layer_param = net_param.layers(i).image_data_param(); - if (layer_param.has_scale()) { return true; } - if (layer_param.has_mean_file()) { return true; } - if (layer_param.has_crop_size()) { return true; } - if (layer_param.has_mirror()) { return true; } + if (layer_param.has_scale()) { + return true; + } + if (layer_param.has_mean_file()) { + return true; + } + if (layer_param.has_crop_size()) { + return true; + } + if (layer_param.has_mirror()) { + return true; + } } if (net_param.layers(i).type() == V1LayerParameter_LayerType_WINDOW_DATA) { WindowDataParameter layer_param = net_param.layers(i).window_data_param(); - if (layer_param.has_scale()) { return true; } - if (layer_param.has_mean_file()) { return true; } - if (layer_param.has_crop_size()) { return true; } - if (layer_param.has_mirror()) { return true; } + if (layer_param.has_scale()) { + return true; + } + if (layer_param.has_mean_file()) { + return true; + } + if (layer_param.has_crop_size()) { + return true; + } + if (layer_param.has_mirror()) { + return true; + } } } return false; @@ -588,43 +610,43 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { if (NetNeedsV0ToV1Upgrade(*param)) { // NetParameter was specified using the old style (V0LayerParameter); try to // upgrade it. - LOG(ERROR) << "Attempting to upgrade input file specified using deprecated " - << "V0LayerParameter: " << param_file; + LOG(ERROR)<< "Attempting to upgrade input file specified using deprecated " + << "V0LayerParameter: " << param_file; NetParameter original_param(*param); if (!UpgradeV0Net(original_param, param)) { success = false; LOG(ERROR) << "Warning: had one or more problems upgrading " - << "V0NetParameter to NetParameter (see above); continuing anyway."; + << "V0NetParameter to NetParameter (see above); continuing anyway."; } else { LOG(INFO) << "Successfully upgraded file specified using deprecated " - << "V0LayerParameter"; + << "V0LayerParameter"; } LOG(ERROR) << "Note that future Caffe releases will not support " - << "V0NetParameter; use ./build/tools/upgrade_net_proto_text for " - << "prototxt and ./build/tools/upgrade_net_proto_binary for model " - << "weights upgrade this and any other net protos to the new format."; + << "V0NetParameter; use ./build/tools/upgrade_net_proto_text for " + << "prototxt and ./build/tools/upgrade_net_proto_binary for model " + << "weights upgrade this and any other net protos to the new format."; } // NetParameter uses old style data transformation fields; try to upgrade it. if (NetNeedsDataUpgrade(*param)) { - LOG(ERROR) << "Attempting to upgrade input file specified using deprecated " - << "transformation parameters: " << param_file; + LOG(ERROR)<< "Attempting to upgrade input file specified using deprecated " + << "transformation parameters: " << param_file; UpgradeNetDataTransformation(param); LOG(INFO) << "Successfully upgraded file specified using deprecated " - << "data transformation parameters."; + << "data transformation parameters."; LOG(ERROR) << "Note that future Caffe releases will only support " - << "transform_param messages for transformation fields."; + << "transform_param messages for transformation fields."; } if (NetNeedsV1ToV2Upgrade(*param)) { - LOG(ERROR) << "Attempting to upgrade input file specified using deprecated " - << "V1LayerParameter: " << param_file; + LOG(ERROR)<< "Attempting to upgrade input file specified using deprecated " + << "V1LayerParameter: " << param_file; NetParameter original_param(*param); if (!UpgradeV1Net(original_param, param)) { success = false; LOG(ERROR) << "Warning: had one or more problems upgrading " - << "V1LayerParameter (see above); continuing anyway."; + << "V1LayerParameter (see above); continuing anyway."; } else { LOG(INFO) << "Successfully upgraded file specified using deprecated " - << "V1LayerParameter"; + << "V1LayerParameter"; } } return success; @@ -633,8 +655,8 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) { bool is_fully_compatible = true; if (v1_net_param.layer_size() > 0) { - LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' " - << "fields; these will be ignored for the upgrade."; + LOG(ERROR)<< "Input NetParameter to be upgraded already specifies 'layer' " + << "fields; these will be ignored for the upgrade."; is_fully_compatible = false; } net_param->CopyFrom(v1_net_param); @@ -643,7 +665,7 @@ bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) { for (int i = 0; i < v1_net_param.layers_size(); ++i) { if (!UpgradeV1LayerParameter(v1_net_param.layers(i), net_param->add_layer())) { - LOG(ERROR) << "Upgrade of input layer " << i << " failed."; + LOG(ERROR)<< "Upgrade of input layer " << i << " failed."; is_fully_compatible = false; } } @@ -676,32 +698,40 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, layer_param->add_blobs()->CopyFrom(v1_layer_param.blobs(i)); } for (int i = 0; i < v1_layer_param.param_size(); ++i) { - while (layer_param->param_size() <= i) { layer_param->add_param(); } + while (layer_param->param_size() <= i) { + layer_param->add_param(); + } layer_param->mutable_param(i)->set_name(v1_layer_param.param(i)); } ParamSpec_DimCheckMode mode; for (int i = 0; i < v1_layer_param.blob_share_mode_size(); ++i) { - while (layer_param->param_size() <= i) { layer_param->add_param(); } - switch (v1_layer_param.blob_share_mode(i)) { - case V1LayerParameter_DimCheckMode_STRICT: - mode = ParamSpec_DimCheckMode_STRICT; - break; - case V1LayerParameter_DimCheckMode_PERMISSIVE: - mode = ParamSpec_DimCheckMode_PERMISSIVE; - break; - default: - LOG(FATAL) << "Unknown blob_share_mode: " - << v1_layer_param.blob_share_mode(i); - break; + while (layer_param->param_size() <= i) { + layer_param->add_param(); } + switch (v1_layer_param.blob_share_mode(i)) { + case V1LayerParameter_DimCheckMode_STRICT: + mode = ParamSpec_DimCheckMode_STRICT; + break; + case V1LayerParameter_DimCheckMode_PERMISSIVE: + mode = ParamSpec_DimCheckMode_PERMISSIVE; + break; + default: + LOG(FATAL)<< "Unknown blob_share_mode: " + << v1_layer_param.blob_share_mode(i); + break; + } layer_param->mutable_param(i)->set_share_mode(mode); } for (int i = 0; i < v1_layer_param.blobs_lr_size(); ++i) { - while (layer_param->param_size() <= i) { layer_param->add_param(); } + while (layer_param->param_size() <= i) { + layer_param->add_param(); + } layer_param->mutable_param(i)->set_lr_mult(v1_layer_param.blobs_lr(i)); } for (int i = 0; i < v1_layer_param.weight_decay_size(); ++i) { - while (layer_param->param_size() <= i) { layer_param->add_param(); } + while (layer_param->param_size() <= i) { + layer_param->add_param(); + } layer_param->mutable_param(i)->set_decay_mult( v1_layer_param.weight_decay(i)); } @@ -729,8 +759,7 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, v1_layer_param.convolution_param()); } if (v1_layer_param.has_data_param()) { - layer_param->mutable_data_param()->CopyFrom( - v1_layer_param.data_param()); + layer_param->mutable_data_param()->CopyFrom(v1_layer_param.data_param()); } if (v1_layer_param.has_dropout_param()) { layer_param->mutable_dropout_param()->CopyFrom( @@ -745,8 +774,7 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, v1_layer_param.eltwise_param()); } if (v1_layer_param.has_exp_param()) { - layer_param->mutable_exp_param()->CopyFrom( - v1_layer_param.exp_param()); + layer_param->mutable_exp_param()->CopyFrom(v1_layer_param.exp_param()); } if (v1_layer_param.has_hdf5_data_param()) { layer_param->mutable_hdf5_data_param()->CopyFrom( @@ -773,28 +801,24 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, v1_layer_param.inner_product_param()); } if (v1_layer_param.has_lrn_param()) { - layer_param->mutable_lrn_param()->CopyFrom( - v1_layer_param.lrn_param()); + layer_param->mutable_lrn_param()->CopyFrom(v1_layer_param.lrn_param()); } if (v1_layer_param.has_memory_data_param()) { layer_param->mutable_memory_data_param()->CopyFrom( v1_layer_param.memory_data_param()); } if (v1_layer_param.has_mvn_param()) { - layer_param->mutable_mvn_param()->CopyFrom( - v1_layer_param.mvn_param()); + layer_param->mutable_mvn_param()->CopyFrom(v1_layer_param.mvn_param()); } if (v1_layer_param.has_pooling_param()) { layer_param->mutable_pooling_param()->CopyFrom( v1_layer_param.pooling_param()); } if (v1_layer_param.has_power_param()) { - layer_param->mutable_power_param()->CopyFrom( - v1_layer_param.power_param()); + layer_param->mutable_power_param()->CopyFrom(v1_layer_param.power_param()); } if (v1_layer_param.has_relu_param()) { - layer_param->mutable_relu_param()->CopyFrom( - v1_layer_param.relu_param()); + layer_param->mutable_relu_param()->CopyFrom(v1_layer_param.relu_param()); } if (v1_layer_param.has_sigmoid_param()) { layer_param->mutable_sigmoid_param()->CopyFrom( @@ -805,12 +829,10 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, v1_layer_param.softmax_param()); } if (v1_layer_param.has_slice_param()) { - layer_param->mutable_slice_param()->CopyFrom( - v1_layer_param.slice_param()); + layer_param->mutable_slice_param()->CopyFrom(v1_layer_param.slice_param()); } if (v1_layer_param.has_tanh_param()) { - layer_param->mutable_tanh_param()->CopyFrom( - v1_layer_param.tanh_param()); + layer_param->mutable_tanh_param()->CopyFrom(v1_layer_param.tanh_param()); } if (v1_layer_param.has_threshold_param()) { layer_param->mutable_threshold_param()->CopyFrom( @@ -825,11 +847,10 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, v1_layer_param.transform_param()); } if (v1_layer_param.has_loss_param()) { - layer_param->mutable_loss_param()->CopyFrom( - v1_layer_param.loss_param()); + layer_param->mutable_loss_param()->CopyFrom(v1_layer_param.loss_param()); } if (v1_layer_param.has_layer()) { - LOG(ERROR) << "Input NetParameter has V0 layer -- ignoring."; + LOG(ERROR)<< "Input NetParameter has V0 layer -- ignoring."; is_fully_compatible = false; } return is_fully_compatible; @@ -837,91 +858,97 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { switch (type) { - case V1LayerParameter_LayerType_NONE: - return ""; - case V1LayerParameter_LayerType_ABSVAL: - return "AbsVal"; - case V1LayerParameter_LayerType_ACCURACY: - return "Accuracy"; - case V1LayerParameter_LayerType_ARGMAX: - return "ArgMax"; - case V1LayerParameter_LayerType_BNLL: - return "BNLL"; - case V1LayerParameter_LayerType_CONCAT: - return "Concat"; - case V1LayerParameter_LayerType_CONTRASTIVE_LOSS: - return "ContrastiveLoss"; - case V1LayerParameter_LayerType_CONVOLUTION: - return "Convolution"; - case V1LayerParameter_LayerType_DECONVOLUTION: - return "Deconvolution"; - case V1LayerParameter_LayerType_DATA: - return "Data"; - case V1LayerParameter_LayerType_DROPOUT: - return "Dropout"; - case V1LayerParameter_LayerType_DUMMY_DATA: - return "DummyData"; - case V1LayerParameter_LayerType_EUCLIDEAN_LOSS: - return "EuclideanLoss"; - case V1LayerParameter_LayerType_ELTWISE: - return "Eltwise"; - case V1LayerParameter_LayerType_EXP: - return "Exp"; - case V1LayerParameter_LayerType_FLATTEN: - return "Flatten"; - case V1LayerParameter_LayerType_HDF5_DATA: - return "HDF5Data"; - case V1LayerParameter_LayerType_HDF5_OUTPUT: - return "HDF5Output"; - case V1LayerParameter_LayerType_HINGE_LOSS: - return "HingeLoss"; - case V1LayerParameter_LayerType_IM2COL: - return "Im2col"; - case V1LayerParameter_LayerType_IMAGE_DATA: - return "ImageData"; - case V1LayerParameter_LayerType_INFOGAIN_LOSS: - return "InfogainLoss"; - case V1LayerParameter_LayerType_INNER_PRODUCT: - return "InnerProduct"; - case V1LayerParameter_LayerType_LRN: - return "LRN"; - case V1LayerParameter_LayerType_MEMORY_DATA: - return "MemoryData"; - case V1LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS: - return "MultinomialLogisticLoss"; - case V1LayerParameter_LayerType_MVN: - return "MVN"; - case V1LayerParameter_LayerType_POOLING: - return "Pooling"; - case V1LayerParameter_LayerType_POWER: - return "Power"; - case V1LayerParameter_LayerType_RELU: - return "ReLU"; - case V1LayerParameter_LayerType_SIGMOID: - return "Sigmoid"; - case V1LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS: - return "SigmoidCrossEntropyLoss"; - case V1LayerParameter_LayerType_SILENCE: - return "Silence"; - case V1LayerParameter_LayerType_SOFTMAX: - return "Softmax"; - case V1LayerParameter_LayerType_SOFTMAX_LOSS: - return "SoftmaxWithLoss"; - case V1LayerParameter_LayerType_SPLIT: - return "Split"; - case V1LayerParameter_LayerType_SLICE: - return "Slice"; - case V1LayerParameter_LayerType_TANH: - return "TanH"; - case V1LayerParameter_LayerType_WINDOW_DATA: - return "WindowData"; - case V1LayerParameter_LayerType_THRESHOLD: - return "Threshold"; - default: - LOG(FATAL) << "Unknown V1LayerParameter layer type: " << type; - return ""; + case V1LayerParameter_LayerType_NONE: + return ""; + case V1LayerParameter_LayerType_ABSVAL: + return "AbsVal"; + case V1LayerParameter_LayerType_ACCURACY: + return "Accuracy"; + case V1LayerParameter_LayerType_ARGMAX: + return "ArgMax"; + case V1LayerParameter_LayerType_BNLL: + return "BNLL"; + case V1LayerParameter_LayerType_CONCAT: + return "Concat"; + case V1LayerParameter_LayerType_CONTRASTIVE_LOSS: + return "ContrastiveLoss"; + case V1LayerParameter_LayerType_CONVOLUTION: + return "Convolution"; + case V1LayerParameter_LayerType_DECONVOLUTION: + return "Deconvolution"; + case V1LayerParameter_LayerType_DATA: + return "Data"; + case V1LayerParameter_LayerType_DROPOUT: + return "Dropout"; + case V1LayerParameter_LayerType_DUMMY_DATA: + return "DummyData"; + case V1LayerParameter_LayerType_EUCLIDEAN_LOSS: + return "EuclideanLoss"; + case V1LayerParameter_LayerType_ELTWISE: + return "Eltwise"; + case V1LayerParameter_LayerType_EXP: + return "Exp"; + case V1LayerParameter_LayerType_FLATTEN: + return "Flatten"; + case V1LayerParameter_LayerType_HDF5_DATA: + return "HDF5Data"; + case V1LayerParameter_LayerType_HDF5_OUTPUT: + return "HDF5Output"; + case V1LayerParameter_LayerType_HINGE_LOSS: + return "HingeLoss"; + case V1LayerParameter_LayerType_IM2COL: + return "Im2col"; + case V1LayerParameter_LayerType_IMAGE_DATA: + return "ImageData"; + case V1LayerParameter_LayerType_INFOGAIN_LOSS: + return "InfogainLoss"; + case V1LayerParameter_LayerType_INNER_PRODUCT: + return "InnerProduct"; + case V1LayerParameter_LayerType_LRN: + return "LRN"; + case V1LayerParameter_LayerType_MEMORY_DATA: + return "MemoryData"; + case V1LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS: + return "MultinomialLogisticLoss"; + case V1LayerParameter_LayerType_MVN: + return "MVN"; + case V1LayerParameter_LayerType_POOLING: + return "Pooling"; + case V1LayerParameter_LayerType_POWER: + return "Power"; + case V1LayerParameter_LayerType_RELU: + return "ReLU"; + case V1LayerParameter_LayerType_SIGMOID: + return "Sigmoid"; + case V1LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS: + return "SigmoidCrossEntropyLoss"; + case V1LayerParameter_LayerType_SILENCE: + return "Silence"; + case V1LayerParameter_LayerType_SOFTMAX: + return "Softmax"; + case V1LayerParameter_LayerType_SOFTMAX_LOSS: + return "SoftmaxWithLoss"; + case V1LayerParameter_LayerType_SPLIT: + return "Split"; + case V1LayerParameter_LayerType_SLICE: + return "Slice"; + case V1LayerParameter_LayerType_TANH: + return "TanH"; + case V1LayerParameter_LayerType_WINDOW_DATA: + return "WindowData"; + case V1LayerParameter_LayerType_THRESHOLD: + return "Threshold"; + case V1LayerParameter_LayerType_DATA_RAND_TRANSFORM: + return "DataRandTransform"; + case V1LayerParameter_LayerType_CONVOLUTION_SK: + return "ConvolutionSK"; + case V1LayerParameter_LayerType_POOLING_SK: + return "PoolingSK"; + default: + LOG(FATAL)<< "Unknown V1LayerParameter layer type: " << type; + return ""; + } } -} void ReadNetParamsFromTextFileOrDie(const string& param_file, NetParameter* param) { diff --git a/test.txt b/test.txt new file mode 100644 index 00000000000..ec78c958209 --- /dev/null +++ b/test.txt @@ -0,0 +1 @@ +PROJECT := caffe\n\nCONFIG_FILE := Makefile.config\ninclude $(CONFIG_FILE)\n\nCXXFLAGS += -std=c++11 -Wno-deprecated-declarations\nLINKFLAGS += -std=c++11 -Wno-deprecated-declarations\nNVCCFLAGS += -Xcompiler "-Wno-deprecated-declarations" -Xlinker "-Wno-deprecated-declarations" -Xarchive "-Wno-deprecated-declarations" -Xnvlink "-Wno-deprecated-declarations"\n\nBUILD_DIR_LINK := $(BUILD_DIR)\nRELEASE_BUILD_DIR ?= .$(BUILD_DIR)_release\nDEBUG_BUILD_DIR ?= .$(BUILD_DIR)_debug\n\nDEBUG ?= 0\nifeq ($(DEBUG), 1)\n BUILD_DIR := $(DEBUG_BUILD_DIR)\n OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR)\nelse\n BUILD_DIR := $(RELEASE_BUILD_DIR)\n OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR)\nendif\n\n\n# All of the directories containing code.\nSRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \\n \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print)\n\n# The target shared library name\nLIB_BUILD_DIR := $(BUILD_DIR)/lib\nSTATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a\nDYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so\n\n##############################\n# Get all source files\n##############################\n# CXX_SRCS are the source files excluding the test ones.\nCXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")\n# CU_SRCS are the cuda source files\nCU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")\n# TEST_SRCS are the test source files\nTEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp\nTEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp")\nTEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS))\nTEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")\nGTEST_SRC := src/gtest/gtest-all.cpp\n# TOOL_SRCS are the source files for the tool binaries\nTOOL_SRCS := $(shell find tools -name "*.cpp")\n# EXAMPLE_SRCS are the source files for the example binaries\nEXAMPLE_SRCS := $(shell find examples -name "*.cpp")\n# BUILD_INCLUDE_DIR contains any generated header files we want to include.\nBUILD_INCLUDE_DIR := $(BUILD_DIR)/src\n# PROTO_SRCS are the protocol buffer definitions\nPROTO_SRC_DIR := src/$(PROJECT)/proto\nPROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto)\n# PROTO_BUILD_DIR will contain the .cc and obj files generated from\n# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files\nPROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR)\nPROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto\n# NONGEN_CXX_SRCS includes all source/header files except those generated\n# automatically (e.g., by proto).\nNONGEN_CXX_SRCS := $(shell find \\n src/$(PROJECT) \\n include/$(PROJECT) \\n python/$(PROJECT) \\n matlab/$(PROJECT) \\n examples \\n tools \\n -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh")\nLINT_SCRIPT := scripts/cpp_lint.py\nLINT_OUTPUT_DIR := $(BUILD_DIR)/.lint\nLINT_EXT := lint.txt\nLINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS)))\nEMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT)\nNONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT)\n# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT)\nPY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp\nPY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so\nPY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp\n# MAT$(PROJECT)_SRC is the matlab wrapper for $(PROJECT)\nMAT$(PROJECT)_SRC := matlab/$(PROJECT)/mat$(PROJECT).cpp\nifneq ($(MATLAB_DIR),)\n MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext)\nendif\nMAT$(PROJECT)_SO := matlab/$(PROJECT)/$(PROJECT).$(MAT_SO_EXT)\n\n##############################\n# Derive generated files\n##############################\n# The generated files for protocol buffers\nPROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \\n $(notdir ${PROTO_SRCS:.proto=.pb.h}))\nPROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \\n $(notdir ${PROTO_SRCS:.proto=.pb.h}))\nPROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc})\nPY_PROTO_BUILD_DIR := python/$(PROJECT)/proto\nPY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py\nPROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \\n $(PY_PROTO_BUILD_DIR)/$(notdir $(file)))\n# The objects corresponding to the source files\n# These objects will be linked into the final shared library, so we\n# exclude the tool, example, and test objects.\nCXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o})\nCU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o})\nPROTO_OBJS := ${PROTO_GEN_CC:.cc=.o}\nOBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)\n# tool, example, and test objects\nTOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o})\nTOOL_BUILD_DIR := $(BUILD_DIR)/tools\nTEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test\nTEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test\nTEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o})\nTEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o})\nTEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS)\nGTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o})\nEXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o})\n# Output files for automatic dependency generation\nDEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \\n ${TEST_CU_OBJS:.o=.d}\n# tool, example, and test bins\nTOOL_BINS := ${TOOL_OBJS:.o=.bin}\nEXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin}\n# symlinks to tool bins without the ".bin" extension\nTOOL_BIN_LINKS := ${TOOL_BINS:.bin=}\n# Put the test binaries in build/test for convenience.\nTEST_BIN_DIR := $(BUILD_DIR)/test\nTEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \\n $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj))))))\nTEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \\n $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj))))))\nTEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS)\n# TEST_ALL_BIN is the test binary that links caffe dynamically.\nTEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin\n\n##############################\n# Derive compiler warning dump locations\n##############################\nWARNS_EXT := warnings.txt\nCXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)})\nCU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)})\nTOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)})\nEXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)})\nTEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)})\nTEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)})\nALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS)\nALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS)\nALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS)\n\nEMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT)\nNONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT)\n\n##############################\n# GreenTea backend related include and lib\n##############################\n\nifeq ($(USE_GREENTEA),1)\n # Find a valid OpenCL library\n ifdef OPENCL_INC\n CLLINC = '$(OPENCL_INC)'\n endif\n \n ifdef OPENCL_LIB\n CLLIBS = '$(OPENCL_LIB)'\n endif\n \n ifdef OPENCLROOT\n CLLIBS = '$(OPENCLROOT)'\n endif\n \n ifdef CUDA_PATH\n CLLIBS = '$(CUDA_PATH)/lib/x64'\n endif\n \n ifdef INTELOCLSDKROOT\n CLLIBS = '$(INTELOCLSDKROOT)/lib/x64'\n endif\n \n ifdef AMDAPPSDKROOT\n CLLIBS = '$(AMDAPPSDKROOT)/lib/x86_64'\n CLLINC = '$(AMDAPPSDKROOT)/include'\n endif\n # Requires valid OpenCL library\n LIBRARY_DIRS += $(CLLIBS)\n # Requires valid OpenCL headers and valid ViennaCL\n INCLUDE_DIRS += $(CLLINC) $(VIENNACL_DIR)\n # Requires OpenCL compile library flag and librt\n LIBRARIES += OpenCL rt\n # Additional flags\n COMMON_FLAGS += -DUSE_GREENTEA\nendif\n\n##############################\n# Derive include and lib directories\n##############################\nCUDA_INCLUDE_DIR := $(CUDA_DIR)/include\n\nCUDA_LIB_DIR :=\n# add /lib64 only if it exists\nifneq ("$(wildcard $(CUDA_DIR)/lib64)","")\n CUDA_LIB_DIR += $(CUDA_DIR)/lib64\nendif\nCUDA_LIB_DIR += $(CUDA_DIR)/lib\n\nINCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include\nifneq ($(CPU_ONLY), 1)\n INCLUDE_DIRS += $(CUDA_INCLUDE_DIR)\n LIBRARY_DIRS += $(CUDA_LIB_DIR)\n LIBRARIES += cudart cublas curand\nendif\nLIBRARIES += glog gflags protobuf leveldb snappy \\n lmdb boost_system hdf5_hl hdf5 m \\n opencv_core opencv_highgui opencv_imgproc\nPYTHON_LIBRARIES := boost_python python2.7\nWARNINGS := -Wall -Wno-sign-compare\n\n##############################\n# Set build directories\n##############################\n\nDISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib\nDIST_ALIASES := dist\nifneq ($(strip $(DISTRIBUTE_DIR)),distribute)\n DIST_ALIASES += distribute\nendif\n\nALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \\n $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \\n $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \\n $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR))\n\n##############################\n# Set directory for Doxygen-generated documentation\n##############################\nDOXYGEN_CONFIG_FILE ?= ./.Doxyfile\n# should be the same as OUTPUT_DIRECTORY in the .Doxyfile\nDOXYGEN_OUTPUT_DIR ?= ./doxygen\nDOXYGEN_COMMAND ?= doxygen\n# All the files that might have Doxygen documentation.\nDOXYGEN_SOURCES := $(shell find \\n src/$(PROJECT) \\n include/$(PROJECT) \\n python/ \\n matlab/ \\n examples \\n tools \\n -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \\n -name "*.py" -or -name "*.m")\nDOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE)\n\n\n##############################\n# Configure build\n##############################\n\n# Determine platform\nUNAME := $(shell uname -s)\nifeq ($(UNAME), Linux)\n LINUX := 1\nelse ifeq ($(UNAME), Darwin)\n OSX := 1\nendif\n\n# Linux\nifeq ($(LINUX), 1)\n CXX ?= /usr/bin/g++\n GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)\n # older versions of gcc are too dumb to build boost with -Wuninitalized\n ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1)\n WARNINGS += -Wno-uninitialized\n endif\n # boost::thread is reasonably called boost_thread (compare OS X)\n # We will also explicitly add stdc++ to the link target.\n LIBRARIES += boost_thread stdc++\nendif\n\n# OS X:\n# clang++ instead of g++\n# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0\nifeq ($(OSX), 1)\n CXX := /usr/bin/clang++\n CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d')\n ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1)\n CXXFLAGS += -stdlib=libstdc++\n LINKFLAGS += -stdlib=libstdc++\n endif\n # clang throws this warning for cuda headers\n WARNINGS += -Wno-unneeded-internal-declaration\n # gtest needs to use its own tuple to not conflict with clang\n COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1\n # boost::thread is called boost_thread-mt to mark multithreading on OS X\n LIBRARIES += boost_thread-mt\n # we need to explicitly ask for the rpath to be obeyed\n DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so\n ORIGIN := @loader_path\nelse\n ORIGIN := \$$ORIGIN\nendif\n\n# Custom compiler\nifdef CUSTOM_CXX\n CXX := $(CUSTOM_CXX)\nendif\n\n# Static linking\nifneq (,$(findstring clang++,$(CXX)))\n STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME)\nelse ifneq (,$(findstring g++,$(CXX)))\n STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive\nelse\n # The following line must not be indented with a tab, since we are not inside a target\n $(error Cannot static link with the $(CXX) compiler)\nendif\n\n# Debugging\nifeq ($(DEBUG), 1)\n COMMON_FLAGS += -DDEBUG -g -O0\n NVCCFLAGS += -G\nelse\n COMMON_FLAGS += -DNDEBUG -O2\nendif\n\n# cuDNN acceleration configuration.\nifeq ($(USE_CUDNN), 1)\n LIBRARIES += cudnn\n COMMON_FLAGS += -DUSE_CUDNN\nendif\n\n# CPU-only configuration\nifeq ($(CPU_ONLY), 1)\n OBJS := $(PROTO_OBJS) $(CXX_OBJS)\n TEST_OBJS := $(TEST_CXX_OBJS)\n TEST_BINS := $(TEST_CXX_BINS)\n ALL_WARNS := $(ALL_CXX_WARNS)\n TEST_FILTER := --gtest_filter="-*GPU*"\n COMMON_FLAGS += -DCPU_ONLY\nendif\n\n# Python layer support\nifeq ($(WITH_PYTHON_LAYER), 1)\n COMMON_FLAGS += -DWITH_PYTHON_LAYER\n LIBRARIES += $(PYTHON_LIBRARIES)\nendif\n\n# BLAS configuration (default = ATLAS)\nBLAS ?= atlas\nifeq ($(BLAS), mkl)\n # MKL\n LIBRARIES += mkl_rt\n COMMON_FLAGS += -DUSE_MKL\n MKL_DIR ?= /opt/intel/mkl\n BLAS_INCLUDE ?= $(MKL_DIR)/include\n BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64\nelse ifeq ($(BLAS), open)\n # OpenBLAS\n LIBRARIES += openblas\nelse\n # ATLAS\n ifeq ($(LINUX), 1)\n ifeq ($(BLAS), atlas)\n # Linux simply has cblas and atlas\n LIBRARIES += cblas atlas\n endif\n else ifeq ($(OSX), 1)\n # OS X packages atlas as the vecLib framework\n LIBRARIES += cblas\n # 10.10 has accelerate while 10.9 has veclib\n XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6')\n ifneq (,$(findstring version: 6,$(XCODE_CLT_VER)))\n BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/\n LDFLAGS += -framework Accelerate\n else\n BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/\n LDFLAGS += -framework vecLib\n endif\n endif\nendif\nINCLUDE_DIRS += $(BLAS_INCLUDE)\nLIBRARY_DIRS += $(BLAS_LIB)\n\nLIBRARY_DIRS += $(LIB_BUILD_DIR)\n\n# Automatic dependency generation (nvcc is handled separately)\nCXXFLAGS += -MMD -MP\n\n# Complete build flags.\nCOMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))\nCXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)\nNVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)\n# mex may invoke an older gcc that is too liberal with -Wuninitalized\nMATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized\nLINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)\n\nUSE_PKG_CONFIG ?= 0\nifeq ($(USE_PKG_CONFIG), 1)\n PKG_CONFIG := $(shell pkg-config opencv --libs)\nelse\n PKG_CONFIG :=\nendif\nLDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \\n $(foreach library,$(LIBRARIES),-l$(library))\nPYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library))\n\n# 'superclean' target recursively* deletes all files ending with an extension\n# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older\n# versions of Caffe that do not place all generated files in a location known\n# to the 'clean' target.\n#\n# 'supercleanlist' will list the files to be deleted by make superclean.\n#\n# * Recursive with the exception that symbolic links are never followed, per the\n# default behavior of 'find'.\nSUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo\n\n# Set the sub-targets of the 'everything' target.\nEVERYTHING_TARGETS := all py$(PROJECT) test warn lint\n# Only build matcaffe as part of "everything" if MATLAB_DIR is specified.\nifneq ($(MATLAB_DIR),)\n EVERYTHING_TARGETS += mat$(PROJECT)\nendif\n\n##############################\n# Define build targets\n##############################\n.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \\n py mat py$(PROJECT) mat$(PROJECT) proto runtest \\n superclean supercleanlist supercleanfiles warn everything\n\nall: clkernels $(STATIC_NAME) $(DYNAMIC_NAME) tools examples\n\neverything: $(EVERYTHING_TARGETS)\n\nlinecount:\n cloc --read-lang-def=$(PROJECT).cloc \\n src/$(PROJECT) include/$(PROJECT) tools examples \\n python matlab\n\nlint: $(EMPTY_LINT_REPORT)\n\nlintclean:\n @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT)\n\ndocs: $(DOXYGEN_OUTPUT_DIR)\n @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen\n\n$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES)\n $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE)\n\n$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR)\n @ cat $(LINT_OUTPUTS) > $@\n @ if [ -s "$@" ]; then \\n cat $@; \\n mv $@ $(NONEMPTY_LINT_REPORT); \\n echo "Found one or more lint errors."; \\n exit 1; \\n fi; \\n $(RM) $(NONEMPTY_LINT_REPORT); \\n echo "No lint errors!";\n\n$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR)\n @ mkdir -p $(dir $@)\n @ python $(LINT_SCRIPT) $< 2>&1 \\n | grep -v "^Done processing " \\n | grep -v "^Total errors found: 0" \\n > $@ \\n || true\n\ntest: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS)\n\ntools: $(TOOL_BINS) $(TOOL_BIN_LINKS)\n\nexamples: $(EXAMPLE_BINS)\n\npy$(PROJECT): py\n\npy: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)\n\n$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME)\n @ echo CXX/LD -o $@ $<\n $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \\n -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \\n -Wl,-rpath,$(ORIGIN)/../../build/lib\n\nmat$(PROJECT): mat\n\nmat: $(MAT$(PROJECT)_SO)\n\n$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)\n @ if [ -z "$(MATLAB_DIR)" ]; then \\n echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \\n "to build mat$(PROJECT)."; \\n exit 1; \\n fi\n @ echo MEX $<\n $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \\n CXX="$(CXX)" \\n CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \\n CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@\n\nruntest: $(TEST_ALL_BIN)\n $(TOOL_BUILD_DIR)/caffe\n $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER)\n\npytest: py\n cd python; python -m unittest discover -s caffe/test\n\nwarn: $(EMPTY_WARN_REPORT)\n\n$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)\n @ cat $(ALL_WARNS) > $@\n @ if [ -s "$@" ]; then \\n cat $@; \\n mv $@ $(NONEMPTY_WARN_REPORT); \\n echo "Compiler produced one or more warnings."; \\n exit 1; \\n fi; \\n $(RM) $(NONEMPTY_WARN_REPORT); \\n echo "No compiler warnings!";\n\n$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o\n\n$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked\n\n# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link\n# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it\n# exists and $(DEBUG) is toggled later.\n$(BUILD_DIR)/.linked:\n @ mkdir -p $(BUILD_DIR)\n @ $(RM) $(OTHER_BUILD_DIR)/.linked\n @ $(RM) -r $(BUILD_DIR_LINK)\n @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK)\n @ touch $@\n\n$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)\n @ mkdir -p $@\n\n$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)\n @ echo LD -o $@\n $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS)\n\n$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)\n @ echo AR -o $@\n $(Q)ar rcs $@ $(OBJS)\n\n$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS)\n @ echo CXX $<\n $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \\n || (cat $@.$(WARNS_EXT); exit 1)\n @ cat $@.$(WARNS_EXT)\n\n$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \\n | $(PROTO_BUILD_DIR)\n @ echo CXX $<\n $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \\n || (cat $@.$(WARNS_EXT); exit 1)\n @ cat $@.$(WARNS_EXT)\n\n$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS)\n @ echo NVCC $<\n $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \\n -odir $(@D)\n $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \\n || (cat $@.$(WARNS_EXT); exit 1)\n @ cat $@.$(WARNS_EXT)\n\n$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \\n | $(DYNAMIC_NAME) $(TEST_BIN_DIR)\n @ echo CXX/LD -o $@ $<\n $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \\n -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib\n\n$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \\n $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)\n @ echo LD $<\n $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \\n -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib\n\n$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \\n $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)\n @ echo LD $<\n $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \\n -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib\n\n# Target for extension-less symlinks to tool binaries with extension '*.bin'.\n$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR)\n @ $(RM) $@\n @ ln -s $(abspath $<) $@\n\n$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME)\n @ echo CXX/LD -o $@\n $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \\n -Wl,-rpath,$(ORIGIN)/../lib\n\n$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME)\n @ echo CXX/LD -o $@\n $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \\n -Wl,-rpath,$(ORIGIN)/../../lib\n \nclkernels: src/caffe/greentea/cl_kernels/*.cl\n src/caffe/greentea/cl_kernels.sh\n\nproto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER)\n\n$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \\n $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR)\n @ echo PROTOC $<\n $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $<\n\n$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \\n $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR)\n @ echo PROTOC \(python\) $<\n $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $<\n\n$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)\n touch $(PY_PROTO_INIT)\n\nclean:\n @- $(RM) -rf $(ALL_BUILD_DIRS)\n @- $(RM) -rf $(OTHER_BUILD_DIR)\n @- $(RM) -rf $(BUILD_DIR_LINK)\n @- $(RM) -rf $(DISTRIBUTE_DIR)\n @- $(RM) $(PY$(PROJECT)_SO)\n @- $(RM) $(MAT$(PROJECT)_SO)\n\nsupercleanfiles:\n $(eval SUPERCLEAN_FILES := $(strip \\n $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \\n -not -path './data/*'))))\n\nsupercleanlist: supercleanfiles\n @ \\n if [ -z "$(SUPERCLEAN_FILES)" ]; then \\n echo "No generated files found."; \\n else \\n echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \\n fi\n\nsuperclean: clean supercleanfiles\n @ \\n if [ -z "$(SUPERCLEAN_FILES)" ]; then \\n echo "No generated files found."; \\n else \\n echo "Deleting the following generated files:"; \\n echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \\n $(RM) $(SUPERCLEAN_FILES); \\n fi\n\n$(DIST_ALIASES): $(DISTRIBUTE_DIR)\n\n$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS)\n # add include\n cp -r include $(DISTRIBUTE_DIR)/\n mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto\n cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto\n # add tool and example binaries\n cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin\n cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin\n # add libraries\n cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib\n cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib\n # add python - it's not the standard way, indeed...\n cp -r python $(DISTRIBUTE_DIR)/python\n\n-include $(DEPS) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index eb9e97f5e27..f04e28a3674 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -5,7 +5,6 @@ #include #include -#include "boost/algorithm/string.hpp" #include "caffe/caffe.hpp" using caffe::Blob; @@ -77,19 +76,6 @@ int device_query() { } RegisterBrewFunction(device_query); -// Load the weights from the specified caffemodel(s) into the train and -// test nets. -void CopyLayers(caffe::Solver* solver, const std::string& model_list) { - std::vector model_names; - boost::split(model_names, model_list, boost::is_any_of(",") ); - for (int i = 0; i < model_names.size(); ++i) { - LOG(INFO) << "Finetuning from " << model_names[i]; - solver->net()->CopyTrainedLayersFrom(model_names[i]); - for (int j = 0; j < solver->test_nets().size(); ++j) { - solver->test_nets()[j]->CopyTrainedLayersFrom(model_names[i]); - } - } -} // Train / Finetune a model. int train() { @@ -126,7 +112,8 @@ int train() { LOG(INFO) << "Resuming from " << FLAGS_snapshot; solver->Solve(FLAGS_snapshot); } else if (FLAGS_weights.size()) { - CopyLayers(&*solver, FLAGS_weights); + LOG(INFO) << "Finetuning from " << FLAGS_weights; + solver->net()->CopyTrainedLayersFrom(FLAGS_weights); solver->Solve(); } else { solver->Solve(); diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index 364c436dfd8..f86ff96ca82 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -147,9 +147,9 @@ int feature_extraction_pipeline(int argc, char** argv) { int dim_features = feature_blob->count() / batch_size; const Dtype* feature_blob_data; for (int n = 0; n < batch_size; ++n) { - datum.set_height(feature_blob->height()); - datum.set_width(feature_blob->width()); - datum.set_channels(feature_blob->channels()); + datum.set_height(dim_features); + datum.set_width(1); + datum.set_channels(1); datum.clear_data(); datum.clear_float_data(); feature_blob_data = feature_blob->cpu_data() + From 3b4b138306a622cec5e97651e3745259467c9eb7 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 19 Apr 2015 02:30:33 +0200 Subject: [PATCH 002/254] Compability to new BlobShape. --- include/caffe/blob.hpp | 176 +++++++++++++---- include/caffe/neuron_layers.hpp | 316 ++++++++++++++++++++++--------- models/bvlc_googlenet/readme.md | 1 - src/caffe/blob.cpp | 111 ++++++++--- src/caffe/layers/prelu_layer.cpp | 12 +- src/caffe/proto/caffe.proto | 32 +++- 6 files changed, 493 insertions(+), 155 deletions(-) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 6ccbb8bf645..3c274a93b9e 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -1,11 +1,17 @@ #ifndef CAFFE_BLOB_HPP_ #define CAFFE_BLOB_HPP_ +#include +#include +#include + #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" +const int kMaxBlobAxes = INT_MAX; + namespace caffe { /** @@ -21,15 +27,13 @@ class Blob { Blob() : data_(), diff_(), - num_(0), - channels_(0), - height_(0), - width_(0), count_(0), - capacity_(0){ + capacity_(0) { } explicit Blob(const int num, const int channels, const int height, const int width, DeviceContext device_context); + explicit Blob(const vector& shape, DeviceContext device_context); + /** * @brief Change the dimensions of the blob, allocating new memory if * necessary. @@ -44,35 +48,139 @@ class Blob { * an error; either Net::Forward or Net::Reshape need to be called to * propagate the new input shape to higher layers. */ + void Reshape(const vector& shape, DeviceContext device_context); + void Reshape(const BlobShape& shape, DeviceContext device_context); void Reshape(const int num, const int channels, const int height, const int width, DeviceContext device_context); void ReshapeLike(const Blob& other, DeviceContext device_context); - inline int num() const { - return num_; + inline string shape_string() const { + ostringstream stream; + for (int i = 0; i < shape_.size(); ++i) { + stream << shape_[i] << " "; + } + stream << "(" << count_ << ")"; + return stream.str(); } - inline int channels() const { - return channels_; + inline const vector& shape() const { + return shape_; } - inline int height() const { - return height_; + /** + * @brief Returns the dimension of the index-th axis (or the negative index-th + * axis from the end, if index is negative). + * + * @param index the axis index, which may be negative as it will be + * "canonicalized" using CanonicalAxisIndex. + * Dies on out of range index. + */ + inline int shape(int index) const { + return shape_[CanonicalAxisIndex(index)]; } - inline int width() const { - return width_; + inline int num_axes() const { + return shape_.size(); } inline int count() const { return count_; } - inline int offset(const int n, const int c = 0, const int h = 0, const int w = - 0) const { + + /** + * @brief Compute the volume of a slice; i.e., the product of dimensions + * among a range of axes. + * + * @param start_axis The first axis to include in the slice. + * + * @param end_axis The first axis to exclude from the slice. + */ + inline int count(int start_axis, int end_axis) const { + CHECK_LE(start_axis, end_axis); + CHECK_GE(start_axis, 0); + CHECK_GE(end_axis, 0); + CHECK_LE(start_axis, num_axes()); + CHECK_LE(end_axis, num_axes()); + int count = 1; + for (int i = start_axis; i < end_axis; ++i) { + count *= shape(i); + } + return count; + } + /** + * @brief Compute the volume of a slice spanning from a particular first + * axis to the final axis. + * + * @param start_axis The first axis to include in the slice. + */ + inline int count(int start_axis) const { + return count(start_axis, num_axes()); + } + + /** + * @brief Returns the 'canonical' version of a (usually) user-specified axis, + * allowing for negative indexing (e.g., -1 for the last axis). + * + * @param index the axis index. + * If 0 <= index < num_axes(), return index. + * If -num_axes <= index <= -1, return (num_axes() - (-index)), + * e.g., the last axis index (num_axes() - 1) if index == -1, + * the second to last if index == -2, etc. + * Dies on out of range index. + */ + inline int CanonicalAxisIndex(int axis_index) const { + CHECK_GE(axis_index, -num_axes())<<"axis " << axis_index << " out of range for " << num_axes() + << "-D Blob with shape " << shape_string(); + CHECK_LT(axis_index, num_axes()) + << "axis " << axis_index << " out of range for " << num_axes() + << "-D Blob with shape " << shape_string(); + if (axis_index < 0) { + return axis_index + num_axes(); + } + return axis_index; + } + + /// @brief Deprecated legacy shape accessor num: use shape(0) instead. + inline int num() const {return LegacyShape(0);} + /// @brief Deprecated legacy shape accessor channels: use shape(1) instead. + inline int channels() const {return LegacyShape(1);} + /// @brief Deprecated legacy shape accessor height: use shape(2) instead. + inline int height() const {return LegacyShape(2);} + /// @brief Deprecated legacy shape accessor width: use shape(3) instead. + inline int width() const {return LegacyShape(3);} + inline int LegacyShape(int index) const { + CHECK_LE(num_axes(), 4) + << "Cannot use legacy accessors on Blobs with > 4 axes."; + CHECK_LT(index, 4); + CHECK_GE(index, -4); + if (index >= num_axes() || index < -num_axes()) { + // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse + // indexing) -- this special case simulates the one-padding used to fill + // extraneous axes of legacy blobs. + return 1; + } + return shape(index); + } + inline int offset(const int n, const int c = 0, const int h = 0, + const int w = 0) const { CHECK_GE(n, 0); - CHECK_LE(n, num_); - CHECK_GE(channels_, 0); - CHECK_LE(c, channels_); - CHECK_GE(height_, 0); - CHECK_LE(h, height_); - CHECK_GE(width_, 0); - CHECK_LE(w, width_); - return ((n * channels_ + c) * height_ + h) * width_ + w; + CHECK_LE(n, num()); + CHECK_GE(channels(), 0); + CHECK_LE(c, channels()); + CHECK_GE(height(), 0); + CHECK_LE(h, height()); + CHECK_GE(width(), 0); + CHECK_LE(w, width()); + return ((n * channels() + c) * height() + h) * width() + w; + } + + inline int offset(const vector& indices) const { + CHECK_LE(indices.size(), num_axes()); + int offset = 0; + for (int i = 0; i < num_axes(); ++i) { + offset *= shape(i); + if (indices.size() > i) { + CHECK_GE(indices[i], 0); + CHECK_LT(indices[i], shape(i)); + offset += indices[i]; + } + } + return offset; } /** * @brief Copy from a source Blob. @@ -84,15 +192,15 @@ class Blob { * shape if necessary */ void CopyFrom(const Blob& source, DeviceContext device_context, bool copy_diff = false, - bool reshape = false); + bool reshape = false); inline Dtype data_at(const int n, const int c, const int h, - const int w) const { + const int w) const { return *(cpu_data() + offset(n, c, h, w)); } inline Dtype diff_at(const int n, const int c, const int h, - const int w) const { + const int w) const { return *(cpu_diff() + offset(n, c, h, w)); } @@ -116,7 +224,7 @@ class Blob { Dtype* mutable_cpu_diff(); Dtype* mutable_gpu_diff(); void Update(); - void FromProto(const BlobProto& proto, DeviceContext device_context); + void FromProto(const BlobProto& proto, DeviceContext device_context, bool reshape = true); void ToProto(BlobProto* proto, bool write_diff = false) const; /// @brief Compute the sum of absolute values (L1 norm) of the data. @@ -152,26 +260,26 @@ class Blob { */ void ShareDiff(const Blob& other); + bool ShapeEquals(const BlobProto& other); + /** * @brief Return the device context to which this blob and shared memory belongs */ DeviceContext device_context(); - protected: +protected: shared_ptr data_; shared_ptr diff_; - int num_; - int channels_; - int height_; - int width_; + vector shape_; int count_; int capacity_; DeviceContext device_context_; -DISABLE_COPY_AND_ASSIGN(Blob); + DISABLE_COPY_AND_ASSIGN(Blob); }; // class Blob -}// namespace caffe +} + // namespace caffe #endif // CAFFE_BLOB_HPP_ diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 0c306fb41bf..1d3e191aeb8 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -22,16 +22,21 @@ namespace caffe { * each element of the output depends only on the corresponding input * element. */ -template +template class NeuronLayer : public Layer { public: explicit NeuronLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } }; /** @@ -44,24 +49,31 @@ class NeuronLayer : public Layer { * -# @f$ (N \times C \times H \times W) @f$ * the computed outputs @f$ y = |x| @f$ */ -template +template class AbsValLayer : public NeuronLayer { public: explicit AbsValLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "AbsVal"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "AbsVal"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int ExactNumTopBlobs() const { + return 1; + } protected: /// @copydoc AbsValLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the absolute value inputs. @@ -81,9 +93,11 @@ class AbsValLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; /** @@ -103,20 +117,23 @@ class AbsValLayer : public NeuronLayer { * \end{array} \right. * @f$ */ -template +template class BNLLLayer : public NeuronLayer { public: explicit BNLLLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } - virtual inline const char* type() const { return "BNLL"; } + virtual inline const char* type() const { + return "BNLL"; + } protected: /// @copydoc BNLLLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the BNLL inputs. @@ -135,9 +152,11 @@ class BNLLLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; /** @@ -151,7 +170,7 @@ class BNLLLayer : public NeuronLayer { * -# @f$ (N \times C \times H \times W) @f$ * the computed outputs @f$ y = |x| @f$ */ -template +template class DropoutLayer : public NeuronLayer { public: /** @@ -161,13 +180,16 @@ class DropoutLayer : public NeuronLayer { * Sets the probability @f$ p @f$ that any given unit is dropped. */ explicit DropoutLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Dropout"; } + virtual inline const char* type() const { + return "Dropout"; + } protected: /** @@ -187,13 +209,15 @@ class DropoutLayer : public NeuronLayer { * @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$. */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ Blob rand_vec_; @@ -209,7 +233,7 @@ class DropoutLayer : public NeuronLayer { * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, * and base @f$ \gamma @f$. */ -template +template class ExpLayer : public NeuronLayer { public: /** @@ -221,11 +245,14 @@ class ExpLayer : public NeuronLayer { * the base @f$ \gamma @f$ */ explicit ExpLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Exp"; } + virtual inline const char* type() const { + return "Exp"; + } protected: /** @@ -239,9 +266,9 @@ class ExpLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the exp inputs. @@ -261,9 +288,11 @@ class ExpLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); Dtype inner_scale_, outer_scale_; }; @@ -273,7 +302,7 @@ class ExpLayer : public NeuronLayer { * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, * and power @f$ \gamma @f$. */ -template +template class PowerLayer : public NeuronLayer { public: /** @@ -284,11 +313,14 @@ class PowerLayer : public NeuronLayer { * - power (\b optional, default 1) the power @f$ \gamma @f$ */ explicit PowerLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Power"; } + virtual inline const char* type() const { + return "Power"; + } protected: /** @@ -302,9 +334,9 @@ class PowerLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the power inputs. @@ -327,9 +359,11 @@ class PowerLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); /// @brief @f$ \gamma @f$ from layer_param_.power_param() Dtype power_; @@ -345,7 +379,7 @@ class PowerLayer : public NeuronLayer { * @brief Rectified Linear Unit non-linearity @f$ y = \max(0, x) @f$. * The simple max is fast to compute, and the function does not saturate. */ -template +template class ReLULayer : public NeuronLayer { public: /** @@ -355,9 +389,12 @@ class ReLULayer : public NeuronLayer { * the value @f$ \nu @f$ by which negative values are multiplied. */ explicit ReLULayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } - virtual inline const char* type() const { return "ReLU"; } + virtual inline const char* type() const { + return "ReLU"; + } protected: /** @@ -372,9 +409,9 @@ class ReLULayer : public NeuronLayer { * the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$. */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the ReLU inputs. @@ -405,9 +442,11 @@ class ReLULayer : public NeuronLayer { * @f$. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; #ifdef USE_CUDNN @@ -416,23 +455,23 @@ class ReLULayer : public NeuronLayer { */ template class CuDNNReLULayer : public ReLULayer { - public: +public: explicit CuDNNReLULayer(const LayerParameter& param) - : ReLULayer(param), handles_setup_(false) {} + : ReLULayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNReLULayer(); - protected: +protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; + cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_; cudnnTensor4dDescriptor_t top_desc_; }; @@ -446,13 +485,16 @@ class CuDNNReLULayer : public ReLULayer { * Note that the gradient vanishes as the values move away from 0. * The ReLULayer is often a better choice for this reason. */ -template +template class SigmoidLayer : public NeuronLayer { public: explicit SigmoidLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } - virtual inline const char* type() const { return "Sigmoid"; } + virtual inline const char* type() const { + return "Sigmoid"; + } protected: /** @@ -466,9 +508,9 @@ class SigmoidLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the sigmoid inputs. @@ -488,9 +530,11 @@ class SigmoidLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; #ifdef USE_CUDNN @@ -499,23 +543,23 @@ class SigmoidLayer : public NeuronLayer { */ template class CuDNNSigmoidLayer : public SigmoidLayer { - public: +public: explicit CuDNNSigmoidLayer(const LayerParameter& param) - : SigmoidLayer(param), handles_setup_(false) {} + : SigmoidLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNSigmoidLayer(); - protected: +protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; + cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_; cudnnTensor4dDescriptor_t top_desc_; }; @@ -529,13 +573,16 @@ class CuDNNSigmoidLayer : public SigmoidLayer { * Note that the gradient vanishes as the values move away from 0. * The ReLULayer is often a better choice for this reason. */ -template +template class TanHLayer : public NeuronLayer { public: explicit TanHLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } - virtual inline const char* type() const { return "TanH"; } + virtual inline const char* type() const { + return "TanH"; + } protected: /** @@ -549,9 +596,9 @@ class TanHLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the sigmoid inputs. @@ -573,9 +620,11 @@ class TanHLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); }; #ifdef USE_CUDNN @@ -584,23 +633,23 @@ class TanHLayer : public NeuronLayer { */ template class CuDNNTanHLayer : public TanHLayer { - public: +public: explicit CuDNNTanHLayer(const LayerParameter& param) - : TanHLayer(param), handles_setup_(false) {} + : TanHLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNTanHLayer(); - protected: +protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; + cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_; cudnnTensor4dDescriptor_t top_desc_; }; @@ -610,7 +659,7 @@ class CuDNNTanHLayer : public TanHLayer { * @brief Tests whether the input exceeds a threshold: outputs 1 for inputs * above threshold; 0 otherwise. */ -template +template class ThresholdLayer : public NeuronLayer { public: /** @@ -620,11 +669,14 @@ class ThresholdLayer : public NeuronLayer { * the threshold value @f$ t @f$ to which the input values are compared. */ explicit ThresholdLayer(const LayerParameter& param) - : NeuronLayer(param) {} + : NeuronLayer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "Threshold"; } + virtual inline const char* type() const { + return "Threshold"; + } protected: /** @@ -642,18 +694,108 @@ class ThresholdLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /// @brief Not implemented (non-differentiable function) virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { NOT_IMPLEMENTED; } Dtype threshold_; }; +/** + * @brief Parameterized Rectified Linear Unit non-linearity @f$ + * y_i = \max(0, x_i) + a_i \min(0, x_i) + * @f$. The differences from ReLULayer are 1) negative slopes are + * learnable though backprop and 2) negative slopes can vary across + * channels. The number of axes of input blob should be greater than or + * equal to 2. The 1st axis (0-based) is seen as channels. + */ +template +class PReLULayer : public NeuronLayer { + public: + /** + * @param param provides PReLUParameter prelu_param, + * with PReLULayer options: + * - filler (\b optional, FillerParameter, + * default {'type': constant 'value':0.25}). + * - channel_shared (\b optional, default false). + * negative slopes are shared across channels. + */ + explicit PReLULayer(const LayerParameter& param) + : NeuronLayer(param) { + } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { + return "PReLU"; + } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the computed outputs for each channel @f$i@f$ @f$ + * y_i = \max(0, x_i) + a_i \min(0, x_i) + * @f$. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the PReLU inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times ...) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times ...) @f$ + * the inputs @f$ x @f$; For each channel @f$i@f$, backward fills their + * diff with gradients @f$ + * \frac{\partial E}{\partial x_i} = \left\{ + * \begin{array}{lr} + * a_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ + * \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i > 0 + * \end{array} \right. + * @f$. + * If param_propagate_down_[0] is true, it fills the diff with gradients + * @f$ + * \frac{\partial E}{\partial a_i} = \left\{ + * \begin{array}{lr} + * \sum_{x_i} x_i \frac{\partial E}{\partial y_i} & \mathrm{if} \; x_i \le 0 \\ + * 0 & \mathrm{if} \; x_i > 0 + * \end{array} \right. + * @f$. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + bool channel_shared_; + Blob multiplier_; // dot multipler for backward computation of params + Blob bottom_memory_; // memory for in-place computation +}; + } // namespace caffe #endif // CAFFE_NEURON_LAYERS_HPP_ diff --git a/models/bvlc_googlenet/readme.md b/models/bvlc_googlenet/readme.md index 2e22dfcf59a..061b6d74530 100644 --- a/models/bvlc_googlenet/readme.md +++ b/models/bvlc_googlenet/readme.md @@ -5,7 +5,6 @@ caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel license: unrestricted sha1: 405fc5acd08a3bb12de8ee5e23a96bec22f08204 caffe_commit: bc614d1bd91896e3faceaf40b23b72dab47d44f5 -gist_id: 866e2aa1fd707b89b913 --- This model is a replication of the model described in the [GoogleNet](http://arxiv.org/abs/1409.4842) publication. We would like to thank Christian Szegedy for all his help in the replication of GoogleNet model. diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 7304469f078..33e38794062 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -13,16 +13,27 @@ namespace caffe { template void Blob::Reshape(const int num, const int channels, const int height, const int width, DeviceContext device_context) { - CHECK_GE(num, 0); - CHECK_GE(channels, 0); - CHECK_GE(height, 0); - CHECK_GE(width, 0); - num_ = num; - channels_ = channels; - height_ = height; - width_ = width; - count_ = num_ * channels_ * height_ * width_; + vector shape(4); + shape[0] = num; + shape[1] = channels; + shape[2] = height; + shape[3] = width; device_context_ = device_context; + Reshape(shape, device_context); +} + +template +void Blob::Reshape(const vector& shape, + DeviceContext device_context) { + CHECK_LE(shape.size(), kMaxBlobAxes); + count_ = 1; + shape_.resize(shape.size()); + device_context_ = device_context; + for (int i = 0; i < shape.size(); ++i) { + CHECK_GE(shape[i], 0); + count_ *= shape[i]; + shape_[i] = shape[i]; + } if (count_ > capacity_) { capacity_ = count_; data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype), device_context_)); @@ -30,6 +41,18 @@ void Blob::Reshape(const int num, const int channels, const int height, } } +template +void Blob::Reshape(const BlobShape& shape, + DeviceContext device_context) { + CHECK_LE(shape.dim_size(), kMaxBlobAxes); + vector shape_vec(shape.dim_size()); + device_context_ = device_context; + for (int i = 0; i < shape.dim_size(); ++i) { + shape_vec[i] = shape.dim(i); + } + Reshape(shape_vec, device_context_); +} + template void Blob::ReshapeLike(const Blob& other, DeviceContext device_context) { @@ -434,19 +457,40 @@ void Blob::scale_diff(Dtype scale_factor) { } } +template +bool Blob::ShapeEquals(const BlobProto& other) { + if (other.has_num() || other.has_channels() || + other.has_height() || other.has_width()) { + // Using deprecated 4D Blob dimensions -- + // shape is (num, channels, height, width). + // Note: we do not use the normal Blob::num(), Blob::channels(), etc. + // methods as these index from the beginning of the blob shape, where legacy + // parameter blobs were indexed from the end of the blob shape (e.g., bias + // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)). + return shape_.size() <= 4 && + LegacyShape(-4) == other.num() && + LegacyShape(-3) == other.channels() && + LegacyShape(-2) == other.height() && + LegacyShape(-1) == other.width(); + } + vector other_shape(other.shape().dim_size()); + for (int i = 0; i < other.shape().dim_size(); ++i) { + other_shape[i] = other.shape().dim(i); + } + return shape_ == other_shape; +} + template void Blob::CopyFrom(const Blob& source, DeviceContext device_context, bool copy_diff, bool reshape) { device_context_ = device_context; - if (num_ != source.num() || channels_ != source.channels() - || height_ != source.height() || width_ != source.width()) { + if (source.count() != count_ || source.shape() != shape_) { if (reshape) { - Reshape(source.num(), source.channels(), source.height(), source.width(), - device_context); + ReshapeLike(source, device_context_); } else { - LOG(FATAL)<< "Trying to copy blobs of different sizes."; + LOG(FATAL) << "Trying to copy blobs of different sizes."; } } switch (Caffe::mode()) { @@ -491,11 +535,30 @@ void Blob::CopyFrom(const Blob& source, DeviceContext device_context, } } -template -void Blob::FromProto(const BlobProto& proto, - DeviceContext device_context) { - Reshape(proto.num(), proto.channels(), proto.height(), proto.width(), - device_context); +template +void Blob::FromProto(const BlobProto& proto, DeviceContext device_context, bool reshape) { + device_context_ = device_context; + if (reshape) { + vector shape; + if (proto.has_num() || proto.has_channels() || + proto.has_height() || proto.has_width()) { + // Using deprecated 4D Blob dimensions -- + // shape is (num, channels, height, width). + shape.resize(4); + shape[0] = proto.num(); + shape[1] = proto.channels(); + shape[2] = proto.height(); + shape[3] = proto.width(); + } else { + shape.resize(proto.shape().dim_size()); + for (int i = 0; i < proto.shape().dim_size(); ++i) { + shape[i] = proto.shape().dim(i); + } + } + Reshape(shape, device_context_); + } else { + CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)"; + } // copy data Dtype* data_vec = mutable_cpu_data(); for (int i = 0; i < count_; ++i) { @@ -509,12 +572,12 @@ void Blob::FromProto(const BlobProto& proto, } } -template +template void Blob::ToProto(BlobProto* proto, bool write_diff) const { - proto->set_num(num_); - proto->set_channels(channels_); - proto->set_height(height_); - proto->set_width(width_); + proto->clear_shape(); + for (int i = 0; i < shape_.size(); ++i) { + proto->mutable_shape()->add_dim(shape_[i]); + } proto->clear_data(); proto->clear_diff(); const Dtype* data_vec = cpu_data(); diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 7119a274dd3..8d482e1ce4a 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -20,9 +20,9 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, } else { this->blobs_.resize(1); if (channel_shared_) { - this->blobs_[0].reset(new Blob(vector(0))); + this->blobs_[0].reset(new Blob(vector(0),this->device_context_)); } else { - this->blobs_[0].reset(new Blob(vector(1, channels))); + this->blobs_[0].reset(new Blob(vector(1, channels),this->device_context_)); } shared_ptr > filler; if (prelu_param.has_filler()) { @@ -33,7 +33,7 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, filler_param.set_value(0.25); filler.reset(GetFiller(filler_param)); } - filler->Fill(this->blobs_[0].get()); + filler->Fill(this->blobs_[0].get(),this->device_context_); } if (channel_shared_) { CHECK_EQ(this->blobs_[0]->count(), 1) @@ -45,7 +45,7 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num())); + multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num()),this->device_context_); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -54,10 +54,10 @@ void PReLULayer::Reshape(const vector*>& bottom, const vector*>& top) { CHECK_GE(bottom[0]->num_axes(), 2) << "Number of axes of bottom blob must be >=2."; - top[0]->ReshapeLike(*bottom[0]); + top[0]->ReshapeLike(*bottom[0],this->device_context_); if (bottom[0] == top[0]) { // For in-place computation - bottom_memory_.ReshapeLike(*bottom[0]); + bottom_memory_.ReshapeLike(*bottom[0],this->device_context_); } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index b36e9491880..9e02939b1ca 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -2,13 +2,21 @@ syntax = "proto2"; package caffe; +// Specifies the shape (dimensions) of a Blob. +message BlobShape { + repeated int64 dim = 1 [packed = true]; +} + message BlobProto { + optional BlobShape shape = 7; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; + + // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; optional int32 channels = 2 [default = 0]; optional int32 height = 3 [default = 0]; optional int32 width = 4 [default = 0]; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; } // The BlobProtoVector is simply a way to pass multiple blobproto instances @@ -47,6 +55,10 @@ message NetParameter { optional string name = 1; // consider giving the network a name // The input blobs to the network. repeated string input = 3; + // The shape of the input blobs. + repeated BlobShape input_shape = 8; + + // The dim of the input blobs. For each input blob there should be four // values specifying the num, channels, height and width of the input blob. // Thus, there should be a total of (4 * #input) numbers. @@ -246,7 +258,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 131 (last added: python_param) +// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -337,6 +349,7 @@ message LayerParameter { optional MVNParameter mvn_param = 120; optional PoolingParameter pooling_param = 121; optional PowerParameter power_param = 122; + optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; optional ReLUParameter relu_param = 123; optional SigmoidParameter sigmoid_param = 124; @@ -481,6 +494,8 @@ message DummyDataParameter { // If 1 data_filler is specified, it is applied to all top blobs. If N are // specified, the ith is applied to the ith top blob. repeated FillerParameter data_filler = 1; + repeated BlobShape shape = 6; + repeated uint32 num = 2; repeated uint32 channels = 3; repeated uint32 height = 4; @@ -963,3 +978,14 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } + +// Message that stores parameters used by PReLULayer +message PReLUParameter { + // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: + // Surpassing Human-Level Performance on ImageNet Classification, 2015. + + // Initial value of a_i. Default is a_i=0.25 for all i. + optional FillerParameter filler = 1; + // Whether or not slope paramters are shared across channels. + optional bool channel_shared = 2 [default = false]; +} From 5437f11a523c53c031eb25324c45a3cfc4d1e128 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 19 Apr 2015 15:46:21 +0200 Subject: [PATCH 003/254] Almost working! --- Makefile | 22 ++++--- include/caffe/greentea/greentea.hpp | 3 +- src/caffe/blob.cpp | 35 ++++++----- src/caffe/greentea/cl_kernels.cpp | 4 +- .../cl_kernels/im2col_sk_gpu_kernel.cl | 4 +- .../greentea/cl_kernels/pooling_sk_kernels.cl | 3 +- src/caffe/greentea/greentea.cpp | 31 ++++------ src/caffe/greentea/greentea_im2col.cpp | 10 +++- .../greentea/greentea_math_functions.cpp | 4 +- src/caffe/layers/conv_sk_layer.cu | 2 +- src/caffe/layers/pooling_sk_layer.cu | 9 +-- src/caffe/layers/relu_layer.cu | 4 +- src/caffe/layers/silence_layer.cu | 2 +- src/caffe/layers/softmax_layer.cu | 59 +++++++++---------- 14 files changed, 103 insertions(+), 89 deletions(-) diff --git a/Makefile b/Makefile index 966e263a011..0144cf179d3 100644 --- a/Makefile +++ b/Makefile @@ -180,26 +180,32 @@ ifeq ($(USE_GREENTEA),1) CLLINC = '$(AMDAPPSDKROOT)/include' endif + # Use AMD clBLAS, TODO: Not implemented yet + ifeq ($(USE_CLBLAS), 1) + LIBRARIES += clblas + COMMON_FLAGS += -DUSE_CLBLAS + endif + + # Use ViennaCL BLAS + ifeq ($(USE_VIENNACLBLAS), 1) + LIBRARIES += viennacl + COMMON_FLAGS += -DUSE_VIENNACLBLAS + endif + # Requires valid OpenCL library LIBRARY_DIRS += $(CLLIBS) # Requires valid OpenCL headers and valid ViennaCL INCLUDE_DIRS += $(CLLINC) $(VIENNACL_DIR) # Requires OpenCL compile library flag and librt - LIBRARIES += viennacl OpenCL rt + LIBRARIES += OpenCL rt # Additional flags COMMON_FLAGS += -DUSE_GREENTEA -DVIENNACL_WITH_OPENCL # Viennacl runtime debug output - ifeq ($(DEBUG), 1) + ifeq ($(DEBUG), 0) COMMON_FLAGS += -DVIENNACL_DEBUG_ALL endif - # Use AMD clBLAS, TODO: Not implemented yet - ifeq ($(USE_CLBLAS), 1) - LIBRARIES += clblas - COMMON_FLAGS += -USE_CLBLAS - endif - CL_KERNELS_CPP = src/caffe/greentea/cl_kernels.cpp CL_KERNELS = src/caffe/greentea/cl_kernels/*.cl CL_KERNELS_SH = src/caffe/greentea/cl_kernels.sh diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 31426616f0a..037dc453a05 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -34,8 +34,7 @@ namespace caffe { template cl_mem Subregion(cl_mem in, size_t off, size_t size); -template -viennacl::vector WrapVector(cl_mem in); +viennacl::ocl::handle WrapHandle(cl_mem in, viennacl::ocl::context &ctx); #endif enum Backend { diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 33e38794062..316f3c3a55f 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -69,6 +69,14 @@ Blob::Blob(const int num, const int channels, const int height, Reshape(num, channels, height, width, device_context); } +template +Blob::Blob(const vector& shape, DeviceContext device_context) + // capacity_ must be initialized before calling Reshape + : capacity_(0), + device_context_(device_context) { + Reshape(shape, device_context_); +} + template const Dtype* Blob::cpu_data() const { CHECK(data_); @@ -457,21 +465,19 @@ void Blob::scale_diff(Dtype scale_factor) { } } -template +template bool Blob::ShapeEquals(const BlobProto& other) { - if (other.has_num() || other.has_channels() || - other.has_height() || other.has_width()) { + if (other.has_num() || other.has_channels() || other.has_height() + || other.has_width()) { // Using deprecated 4D Blob dimensions -- // shape is (num, channels, height, width). // Note: we do not use the normal Blob::num(), Blob::channels(), etc. // methods as these index from the beginning of the blob shape, where legacy // parameter blobs were indexed from the end of the blob shape (e.g., bias // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)). - return shape_.size() <= 4 && - LegacyShape(-4) == other.num() && - LegacyShape(-3) == other.channels() && - LegacyShape(-2) == other.height() && - LegacyShape(-1) == other.width(); + return shape_.size() <= 4 && LegacyShape(-4) == other.num() + && LegacyShape(-3) == other.channels() + && LegacyShape(-2) == other.height() && LegacyShape(-1) == other.width(); } vector other_shape(other.shape().dim_size()); for (int i = 0; i < other.shape().dim_size(); ++i) { @@ -490,7 +496,7 @@ void Blob::CopyFrom(const Blob& source, DeviceContext device_context, if (reshape) { ReshapeLike(source, device_context_); } else { - LOG(FATAL) << "Trying to copy blobs of different sizes."; + LOG(FATAL)<< "Trying to copy blobs of different sizes."; } } switch (Caffe::mode()) { @@ -535,13 +541,14 @@ void Blob::CopyFrom(const Blob& source, DeviceContext device_context, } } -template -void Blob::FromProto(const BlobProto& proto, DeviceContext device_context, bool reshape) { +template +void Blob::FromProto(const BlobProto& proto, + DeviceContext device_context, bool reshape) { device_context_ = device_context; if (reshape) { vector shape; - if (proto.has_num() || proto.has_channels() || - proto.has_height() || proto.has_width()) { + if (proto.has_num() || proto.has_channels() || proto.has_height() + || proto.has_width()) { // Using deprecated 4D Blob dimensions -- // shape is (num, channels, height, width). shape.resize(4); @@ -572,7 +579,7 @@ void Blob::FromProto(const BlobProto& proto, DeviceContext device_context } } -template +template void Blob::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); for (int i = 0; i < shape_.size(); ++i) { diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 03c9facd444..45a9a4960ca 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,8 +6,8 @@ namespace caffe { std::string activation_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void relu_forward_s(const int n, __global const float* in,\n __global float* out, float negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void relu_forward_d(const int n, __global const double* in,\n __global double* out, double negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; std::string aux_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void gpu_set_s(const int n, const float alpha, __global float* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}\n\n__kernel void gpu_set_d(const int n, const double alpha, __global double* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_channel_max_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_max_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double maxval = (double) -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_subtract_s(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const float* channel_max,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_subtract_d(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const double* channel_max,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_exp_s(const int count, __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_exp_d(const int count, __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_channel_sum_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_sum_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_div_s(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const float* channel_sum,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_div_d(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const double* channel_sum,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_dot_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data_1,\n __global const float* data_2,\n __global float* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}\n\n__kernel void kernel_channel_dot_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data_1,\n __global const double* data_2,\n __global double* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string im2col_sk_gpu_kernel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string pooling_sk_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global const float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n __global int* mask,\n __global float* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string im2col_sk_gpu_kernel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string pooling_sk_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global const float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n const int use_mask,\n __global int* mask,\n __global float* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_gpu = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MIN 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data,\n __global const float* label,\n __global float* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global float* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((float) (prob_data[n * dim + label_value * spatial_dim + s]),\n (float) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}\n\n__kernel void softmax_loss_forward_gpu_d(int n,\n __global const double* prob_data,\n __global const double* label,\n __global double* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global double* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((double) (prob_data[n * dim + label_value * spatial_dim + s]),\n (double) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; diff --git a/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl b/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl index df18bfa8c22..6e9971da326 100644 --- a/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl +++ b/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl @@ -35,7 +35,7 @@ __kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im, for (int j = 0; j < ext_kernel_w; j += kstride_w) { int h = h_in + i; int w = w_in + j; - *data_col_ptr = + (*data_col_ptr) = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; @@ -73,7 +73,7 @@ __kernel void im2col_sk_gpu_kernel_d(const int n, for (int j = 0; j < ext_kernel_w; j += kstride_w) { int h = h_in + i; int w = w_in + j; - *data_col_ptr = + (*data_col_ptr) = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; diff --git a/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl b/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl index 46937131d34..3dc7b9fdb81 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl @@ -20,6 +20,7 @@ __kernel void max_pool_forward_sk_s(const int nthreads, const int stride_w, const int kstride_h, const int kstride_w, const int pad_h, const int pad_w, __global float* top_data, + const int use_mask, __global int* mask, __global float* top_mask) { @@ -47,7 +48,7 @@ __kernel void max_pool_forward_sk_s(const int nthreads, } } top_data[index] = maxval; - if (mask) { + if (use_mask == 1) { mask[index] = maxidx; } else { top_mask[index] = maxidx; diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index d824b37f0d3..dc4a0ec010c 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -19,34 +19,29 @@ cl_mem Subregion(cl_mem in, size_t off, size_t size) { const cl_mem out = clCreateSubBuffer(in, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, region, &status); - std::cout << "Subregion: " << status << std::endl; return out; } template cl_mem Subregion(cl_mem in, size_t off, size_t size); template cl_mem Subregion(cl_mem in, size_t off, size_t size); +template cl_mem Subregion(cl_mem in, size_t off, size_t size); +template cl_mem Subregion(cl_mem in, size_t off, size_t size); -template -viennacl::vector WrapVector(cl_mem in) { - if (in == NULL) { - size_t size; - clGetMemObjectInfo(in, CL_MEM_SIZE, sizeof(size_t), &size, NULL); - viennacl::vector out(in, viennacl::OPENCL_MEMORY, - size / sizeof(Dtype)); - return out; +viennacl::ocl::handle WrapHandle(cl_mem in, + viennacl::ocl::context &ctx) { + if (in != NULL) { + viennacl::ocl::handle memhandle(in, ctx); + memhandle.inc(); + return memhandle; } else { - std::cout << "HERE!" << std::endl; - void* ptr = NULL; - viennacl::vector out((cl_mem)&ptr, viennacl::OPENCL_MEMORY, 0); - return out; + cl_int err; + cl_mem dummy = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, 0, + NULL, &err); + viennacl::ocl::handle memhandle(dummy, ctx); + return memhandle; } } -template viennacl::vector WrapVector(cl_mem in); -template viennacl::vector WrapVector(cl_mem in); -template viennacl::vector WrapVector(cl_mem in); -template viennacl::vector WrapVector(cl_mem in); - #endif DeviceContext::DeviceContext() diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 23be6964ed5..7404e22bc50 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -19,6 +19,10 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, const int stride_w, const int kstride_h, const int kstride_w, cl_mem data_col) { + std::cout << "DATA_IM: " << data_im << std::endl; + std::cout << "DATA_COL: " << data_col << std::endl; + + int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; @@ -29,11 +33,13 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, CL_KERNEL_SELECT("im2col_sk_gpu_kernel")); viennacl::ocl::enqueue( - kernel(num_kernels, WrapVector(data_im), height, width, kernel_h, + kernel(num_kernels, WrapHandle(data_im, ctx), height, width, kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, height_col, width_col, - WrapVector(data_col)), + WrapHandle(data_col, ctx)), ctx.get_queue()); + + std::cout << "END OF IM2COL" << std::endl; } // Explicit instantiation diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index e0ec822f792..65e026e6973 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -106,12 +106,12 @@ void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; - /*GREENTEA_BLAS_CHECK( + GREENTEA_BLAS_CHECK( ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, vclOrderC, M, N, K, alpha, A, offArow, offAcol, incArow, incAcol, lda, B, offBrow, offBcol, incBrow, incBcol, ldb, beta, C, offCrow, offCcol, incCrow, - incCcol, ldc));*/ + incCcol, ldc)); } diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 630474df139..850ef41de03 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -99,8 +99,8 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, (Dtype) 0., Subregion(top_data, top[i]->offset(n) + top_offset * g, M_ * N_)); + ctx.get_queue().finish(); } - ctx.get_queue().finish(); std::cout << "After gpu gemm" << std::endl; diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index b4cdc83c4e3..337e792ca75 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -218,15 +218,16 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( CL_KERNEL_SELECT("max_pool_forward_sk")); viennacl::ocl::enqueue( - oclk_max_pool_forward(count, WrapVector((cl_mem) bottom_data), + oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, ctx), bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, pad_h_, pad_w_, - WrapVector((cl_mem) top_data), - WrapVector((cl_mem) mask), - WrapVector((cl_mem) top_mask)), + WrapHandle((cl_mem) top_data, ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, ctx), + WrapHandle((cl_mem) top_mask, ctx)), ctx.get_queue()); ctx.get_queue().finish(); std::cout << "POOLING GREENTEA END" << std::endl; diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index e3654920c2f..498138a2eac 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -43,8 +43,8 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( - oclk_relu_forward(count, WrapVector((cl_mem) bottom_data), - WrapVector((cl_mem) top_data), negative_slope), + oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) top_data, ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); std::cout << "RELU GREENTEA END" << std::endl; diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index 1952edf6c65..1536ca15cea 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -37,7 +37,7 @@ void SilenceLayer::Backward_gpu(const vector*>& top, viennacl::ocl::enqueue( oclk_gpu_set( bottom[i]->count(), Dtype(0), - WrapVector((cl_mem) bottom[i]->mutable_gpu_data())), + WrapHandle((cl_mem) bottom[i]->mutable_gpu_data(),ctx)), ctx.get_queue()); ctx.get_queue().finish(); #endif diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 895977ea795..a7c61e530ea 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -159,8 +159,8 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_max")); viennacl::ocl::enqueue( - oclk_channel_max(num, channels, spatial_dim, WrapVector(top_data), - WrapVector(scale_data)), + oclk_channel_max(num, channels, spatial_dim, WrapHandle(top_data, ctx), + WrapHandle(scale_data, ctx)), ctx.get_queue()); ctx.get_queue().finish(); @@ -168,24 +168,24 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, - WrapVector(scale_data), - WrapVector(top_data)), + WrapHandle(scale_data, ctx), + WrapHandle(top_data, ctx)), ctx.get_queue()); ctx.get_queue().finish(); viennacl::ocl::kernel &oclk_exp = program.get_kernel( CL_KERNEL_SELECT("kernel_exp")); viennacl::ocl::enqueue( - oclk_exp(num * channels * spatial_dim, WrapVector(top_data), - WrapVector(top_data)), + oclk_exp(num * channels * spatial_dim, WrapHandle(top_data, ctx), + WrapHandle(top_data, ctx)), ctx.get_queue()); ctx.get_queue().finish(); viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_sum")); viennacl::ocl::enqueue( - oclk_channel_sum(num, channels, spatial_dim, WrapVector(top_data), - WrapVector(scale_data)), + oclk_channel_sum(num, channels, spatial_dim, WrapHandle(top_data, ctx), + WrapHandle(scale_data, ctx)), ctx.get_queue()); ctx.get_queue().finish(); @@ -193,8 +193,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("kernel_channel_div")); viennacl::ocl::enqueue( oclk_channel_div(count, num, channels, spatial_dim, - WrapVector(scale_data), - WrapVector(top_data)), + WrapHandle(scale_data, ctx), WrapHandle(top_data, ctx)), ctx.get_queue()); ctx.get_queue().finish(); @@ -206,26 +205,26 @@ template void SoftmaxLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* top_data = top[0]->gpu_data(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - Dtype* scale_data = scale_.mutable_gpu_data(); - int count = top[0]->count(); - int num = top[0]->num(); - int channels = top[0]->channels(); - int spatial_dim = top[0]->height() * top[0]->width(); - caffe_copy(top[0]->count(), top_diff, bottom_diff); - // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. - // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_dot<<>>(num, channels, spatial_dim, top_diff, top_data, - scale_data); - // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_subtract<<>>(count, num, channels, spatial_dim, - scale_data, bottom_diff); - // elementwise multiplication - caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); +const Dtype* top_diff = top[0]->gpu_diff(); +const Dtype* top_data = top[0]->gpu_data(); +Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); +Dtype* scale_data = scale_.mutable_gpu_data(); +int count = top[0]->count(); +int num = top[0]->num(); +int channels = top[0]->channels(); +int spatial_dim = top[0]->height() * top[0]->width(); +caffe_copy(top[0]->count(), top_diff, bottom_diff); +// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. +// NOLINT_NEXT_LINE(whitespace/operators) +kernel_channel_dot<<>>(num, channels, spatial_dim, top_diff, top_data, + scale_data); +// NOLINT_NEXT_LINE(whitespace/operators) +kernel_channel_subtract<<>>(count, num, channels, spatial_dim, + scale_data, bottom_diff); +// elementwise multiplication +caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); From 8217233719b8abc5e61e811231a96b0c8fe19490 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 20 Apr 2015 00:11:03 +0200 Subject: [PATCH 004/254] Greentea/OpenCL backend now working for Conv-SK and Pool-SK, as well as ReLU and SoftMax (forwarding only!) --- include/caffe/greentea/greentea_im2col.hpp | 4 +- src/caffe/greentea/cl_kernels.cpp | 4 +- ...gpu_kernel.cl => im2col_sk_gpu_kernels.cl} | 16 +- .../greentea/cl_kernels/pooling_sk_kernels.cl | 15 +- src/caffe/greentea/greentea.cpp | 1 + src/caffe/greentea/greentea_im2col.cpp | 18 +- src/caffe/layers/conv_sk_layer.cu | 3 +- src/caffe/layers/pooling_layer.cu | 372 +++++++++--------- 8 files changed, 224 insertions(+), 209 deletions(-) rename src/caffe/greentea/cl_kernels/{im2col_sk_gpu_kernel.cl => im2col_sk_gpu_kernels.cl} (84%) diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index d428f453627..57e1c0712f3 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -20,8 +20,8 @@ namespace caffe { template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, - const cl_mem data_im, const int channels, + viennacl::ocl::context &ctx, const cl_mem data_im, + const int data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 45a9a4960ca..5389cb1e0d3 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,8 +6,8 @@ namespace caffe { std::string activation_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void relu_forward_s(const int n, __global const float* in,\n __global float* out, float negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void relu_forward_d(const int n, __global const double* in,\n __global double* out, double negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; std::string aux_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void gpu_set_s(const int n, const float alpha, __global float* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}\n\n__kernel void gpu_set_d(const int n, const double alpha, __global double* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_channel_max_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_max_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double maxval = (double) -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_subtract_s(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const float* channel_max,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_subtract_d(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const double* channel_max,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_exp_s(const int count, __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_exp_d(const int count, __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_channel_sum_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_sum_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_div_s(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const float* channel_sum,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_div_d(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const double* channel_sum,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_dot_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data_1,\n __global const float* data_2,\n __global float* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}\n\n__kernel void kernel_channel_dot_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data_1,\n __global const double* data_2,\n __global double* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string im2col_sk_gpu_kernel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int height, const int width,\n const int kernel_h, const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string pooling_sk_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global const float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n const int use_mask,\n __global int* mask,\n __global float* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n bottom_data += (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string im2col_sk_gpu_kernel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string pooling_sk_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n const int use_mask,\n __global int* mask,\n __global float* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_gpu = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MIN 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data,\n __global const float* label,\n __global float* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global float* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((float) (prob_data[n * dim + label_value * spatial_dim + s]),\n (float) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}\n\n__kernel void softmax_loss_forward_gpu_d(int n,\n __global const double* prob_data,\n __global const double* label,\n __global double* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global double* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((double) (prob_data[n * dim + label_value * spatial_dim + s]),\n (double) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; diff --git a/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl b/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernels.cl similarity index 84% rename from src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl rename to src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernels.cl index 6e9971da326..c9ccf80da27 100644 --- a/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernel.cl +++ b/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernels.cl @@ -9,9 +9,9 @@ #pragma OPENCL EXTENSION cl_amd_fp64 : enable __kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int kstride_h, @@ -29,7 +29,7 @@ __kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im, int w_in = w_out * stride_w - pad_w; __global float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; - __global const float* data_im_ptr = data_im; + __global const float* data_im_ptr = data_im + data_offset; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ext_kernel_h; i += kstride_h) { for (int j = 0; j < ext_kernel_w; j += kstride_w) { @@ -47,9 +47,9 @@ __kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im, __kernel void im2col_sk_gpu_kernel_d(const int n, __global const double* data_im, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int kstride_h, @@ -67,7 +67,7 @@ __kernel void im2col_sk_gpu_kernel_d(const int n, int w_in = w_out * stride_w - pad_w; __global double* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; - __global const double* data_im_ptr = data_im; + __global const double* data_im_ptr = data_im + data_offset; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ext_kernel_h; i += kstride_h) { for (int j = 0; j < ext_kernel_w; j += kstride_w) { diff --git a/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl b/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl index 3dc7b9fdb81..a66c7ee3bbb 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl @@ -10,7 +10,7 @@ #pragma OPENCL EXTENSION cl_amd_fp64 : enable __kernel void max_pool_forward_sk_s(const int nthreads, - __global const float* bottom_data, + __global float* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, @@ -23,7 +23,6 @@ __kernel void max_pool_forward_sk_s(const int nthreads, const int use_mask, __global int* mask, __global float* top_mask) { - for (int index = get_global_id(0); index < nthreads; index += get_global_size(0)) { int pw = index % pooled_width; @@ -38,12 +37,12 @@ __kernel void max_pool_forward_sk_s(const int nthreads, wstart = max(wstart, 0); float maxval = -FLT_MAX; int maxidx = -1; - bottom_data += (n * channels + c) * height * width; + __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += kstride_h) { for (int w = wstart; w < wend; w += kstride_w) { - if (bottom_data[h * width + w] > maxval) { + if (bottom_data_ptr[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_data[maxidx]; + maxval = bottom_data_ptr[maxidx]; } } } @@ -85,12 +84,12 @@ __kernel void max_pool_forward_sk_d(const int nthreads, wstart = max(wstart, 0); double maxval = -FLT_MAX; int maxidx = -1; - bottom_data += (n * channels + c) * height * width; + __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += kstride_h) { for (int w = wstart; w < wend; w += kstride_w) { - if (bottom_data[h * width + w] > maxval) { + if (bottom_data_ptr[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_data[maxidx]; + maxval = bottom_data_ptr[maxidx]; } } } diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index dc4a0ec010c..00412ede1af 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -19,6 +19,7 @@ cl_mem Subregion(cl_mem in, size_t off, size_t size) { const cl_mem out = clCreateSubBuffer(in, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, region, &status); + std::cout << "SUBREGION: " << status << std::endl; return out; } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 7404e22bc50..784772c50fa 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -12,17 +12,17 @@ namespace caffe { template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, viennacl::ocl::context &ctx, const cl_mem data_im, - const int channels, const int height, - const int width, const int kernel_h, - const int kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, cl_mem data_col) { + const int data_offset, const int channels, + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + cl_mem data_col) { std::cout << "DATA_IM: " << data_im << std::endl; std::cout << "DATA_COL: " << data_col << std::endl; - int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; @@ -33,7 +33,7 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, CL_KERNEL_SELECT("im2col_sk_gpu_kernel")); viennacl::ocl::enqueue( - kernel(num_kernels, WrapHandle(data_im, ctx), height, width, kernel_h, + kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, height_col, width_col, WrapHandle(data_col, ctx)), @@ -46,6 +46,7 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, viennacl::ocl::context &ctx, const cl_mem data_im, + const int data_offset, const int channels, const int height, const int width, const int kernel_h, @@ -59,6 +60,7 @@ template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, viennacl::ocl::context &ctx, const cl_mem data_im, + const int data_offset, const int channels, const int height, const int width, const int kernel_h, diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 850ef41de03..e3e5c2a5739 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -76,8 +76,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, greentea_im2col_sk_gpu( program, ctx, - Subregion(bottom_data, bottom[i]->offset(n), - channels_ * height_ * width_), + bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); ctx.get_queue().finish(); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 9729990594d..4ee7d119245 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -8,15 +8,17 @@ namespace caffe { -template +template __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, - int* mask, Dtype* top_mask) { + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, + const int pad_h, const int pad_w, + Dtype* top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) -{ + { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -47,14 +49,17 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, } } -template +template __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, + const int pad_h, const int pad_w, + Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) -{ + { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -79,15 +84,17 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, } } -template +template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* rand_idx, Dtype* top_data) { + const Dtype* bottom_data, const int num, + const int channels, const int height, + const int width, const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, Dtype* rand_idx, + Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) -{ + { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -120,106 +127,110 @@ __global__ void StoPoolForwardTrain(const int nthreads, } } - -template -__global__ void StoPoolForwardTest(const int nthreads, - const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); - // We set cumsum to be 0 to avoid divide-by-zero problems - Dtype cumsum = FLT_MIN; - Dtype cumvalues = 0.; - bottom_data += (n * channels + c) * height * width; - // First pass: get sum - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; - cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; +template +__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) + { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; + } } + top_data[index] = cumvalues / cumsum; } - top_data[index] = cumvalues / cumsum; -} } template void PoolingLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { -const Dtype* bottom_data = bottom[0]->gpu_data(); -Dtype* top_data = top[0]->mutable_gpu_data(); -int count = top[0]->count(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. -const bool use_top_mask = top.size() > 1; -int* mask = NULL; -Dtype* top_mask = NULL; -switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); + const bool use_top_mask = top.size() > 1; + int* mask = NULL; + Dtype* top_mask = NULL; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, + mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTrain<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, + rand_idx_.mutable_gpu_data(), top_data); } else { - mask = max_idx_.mutable_gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, - mask, top_mask); - break; - case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - if (this->phase_ == TRAIN) { - // We need to create the random index as well. - caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), - rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTrain<<<<>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, - rand_idx_.mutable_gpu_data(), top_data); - } else { - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTest<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, top_data); -} -break; + kernel_w_, stride_h_, stride_w_, top_data); + } + break; default: -LOG(FATAL)<< "Unknown pooling method."; + LOG(FATAL)<< "Unknown pooling method."; } -CUDA_POST_KERNEL_CHECK -; + CUDA_POST_KERNEL_CHECK + ; } -template +template __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { + const int* mask, const Dtype* top_mask, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, + const int pad_h, const int pad_w, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) -{ + { // find out the local index // find out the local offset int w = index % width; @@ -258,15 +269,17 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } } -template +template __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, + const int pad_h, const int pad_w, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) -{ + { // find out the local index // find out the local offset int w = index % width + pad_w; @@ -294,87 +307,88 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, } } - -template -__global__ void StoPoolBackward(const int nthreads, - const Dtype* rand_idx, const Dtype* top_diff, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { +template +__global__ void StoPoolBackward(const int nthreads, const Dtype* rand_idx, + const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) + { // find out the local index // find out the local offset -int w = index % width; -int h = (index / width) % height; -int c = (index / width / height) % channels; -int n = index / width / height / channels; -int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; -int phend = min(h / stride_h + 1, pooled_height); -int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; -int pwend = min(w / stride_w + 1, pooled_width); -Dtype gradient = 0; -rand_idx += (n * channels + c) * pooled_height * pooled_width; -top_diff += (n * channels + c) * pooled_height * pooled_width; -for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff[ph * pooled_width + pw] * - (index == static_cast(rand_idx[ph * pooled_width + pw])); + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + int phend = min(h / stride_h + 1, pooled_height); + int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + rand_idx += (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff[ph * pooled_width + pw] + * (index == static_cast(rand_idx[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; } } -bottom_diff[index] = gradient; -} -} template void PoolingLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { -if (!propagate_down[0]) { -return; -} -const Dtype* top_diff = top[0]->gpu_diff(); -Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); -const int count = bottom[0]->count(); -caffe_gpu_set(count, Dtype(0.), bottom_diff); - // We'll output the mask to top[1] if it's of size >1. -const bool use_top_mask = top.size() > 1; -const int* mask = NULL; -const Dtype* top_mask = NULL; -switch (this->layer_param_.pooling_param().pool()) { -case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); + if (!propagate_down[0]) { + return; } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward<<>>( - count, top_diff, mask, top_mask, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - bottom_diff); - break; -case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolBackward<<>>( - count, top_diff, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); - break; -case PoolingParameter_PoolMethod_STOCHASTIC: - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolBackward<<>>( - count, rand_idx_.gpu_data(), top_diff, - top[0]->num(), channels_, height_, width_, pooled_height_, - pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, - bottom_diff); - break; -default: - LOG(FATAL)<< "Unknown pooling method."; -} -CUDA_POST_KERNEL_CHECK -; + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + caffe_gpu_set(count, Dtype(0.), bottom_diff); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + const int* mask = NULL; + const Dtype* top_mask = NULL; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward<<>>( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + bottom_diff); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolBackward<<>>( + count, top_diff, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolBackward<<>>( + count, rand_idx_.gpu_data(), top_diff, + top[0]->num(), channels_, height_, width_, pooled_height_, + pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, + bottom_diff); + break; + default: + LOG(FATAL)<< "Unknown pooling method."; + } + CUDA_POST_KERNEL_CHECK + ; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); From a16499485770acaf11c1e184706be3bce299a4a6 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 20 Apr 2015 03:09:54 +0200 Subject: [PATCH 005/254] Fixed some bugs with memory. Still WIP. --- Makefile | 6 +- include/caffe/greentea/greentea.hpp | 4 +- .../greentea/greentea_math_functions.hpp | 7 +- src/caffe/greentea/greentea.cpp | 4 +- .../greentea/greentea_math_functions.cpp | 128 +++++++++--------- src/caffe/layers/conv_sk_layer.cu | 37 ++--- 6 files changed, 91 insertions(+), 95 deletions(-) diff --git a/Makefile b/Makefile index 0144cf179d3..d4e13a4f50a 100644 --- a/Makefile +++ b/Makefile @@ -180,9 +180,9 @@ ifeq ($(USE_GREENTEA),1) CLLINC = '$(AMDAPPSDKROOT)/include' endif - # Use AMD clBLAS, TODO: Not implemented yet + # Use AMD clBLAS ifeq ($(USE_CLBLAS), 1) - LIBRARIES += clblas + LIBRARIES += clBLAS COMMON_FLAGS += -DUSE_CLBLAS endif @@ -202,7 +202,7 @@ ifeq ($(USE_GREENTEA),1) COMMON_FLAGS += -DUSE_GREENTEA -DVIENNACL_WITH_OPENCL # Viennacl runtime debug output - ifeq ($(DEBUG), 0) + ifeq ($(DEBUG), 1) COMMON_FLAGS += -DVIENNACL_DEBUG_ALL endif diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 037dc453a05..99551091925 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -31,8 +31,8 @@ namespace caffe { #ifdef USE_GREENTEA -template -cl_mem Subregion(cl_mem in, size_t off, size_t size); +/*template +cl_mem Subregion(cl_mem in, size_t off, size_t size);*/ viennacl::ocl::handle WrapHandle(cl_mem in, viennacl::ocl::context &ctx); #endif diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 9d0329d112a..8e4b7b101fa 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -37,9 +37,10 @@ void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context template void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const Dtype alpha, const cl_mem A, - const cl_mem B, const Dtype beta, cl_mem C); + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const Dtype alpha, + const cl_mem A, int offA, const cl_mem B, int offB, const Dtype beta, + cl_mem C, int offC); /*template void greentea_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index 00412ede1af..08baa0f962f 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -10,7 +10,7 @@ namespace caffe { #ifdef USE_GREENTEA -template +/*template cl_mem Subregion(cl_mem in, size_t off, size_t size) { cl_buffer_region* region = new cl_buffer_region(); region->origin = sizeof(Dtype) * off; @@ -26,7 +26,7 @@ cl_mem Subregion(cl_mem in, size_t off, size_t size) { template cl_mem Subregion(cl_mem in, size_t off, size_t size); template cl_mem Subregion(cl_mem in, size_t off, size_t size); template cl_mem Subregion(cl_mem in, size_t off, size_t size); -template cl_mem Subregion(cl_mem in, size_t off, size_t size); +template cl_mem Subregion(cl_mem in, size_t off, size_t size);*/ viennacl::ocl::handle WrapHandle(cl_mem in, viennacl::ocl::context &ctx) { diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 65e026e6973..b527826e58f 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -26,6 +26,12 @@ #include "viennacl/ocl/platform.hpp" #include "viennacl/ocl/backend.hpp" +// TODO: Remove: + +#ifdef USE_CLBLAS +#include +#endif USE_CLBLAS + namespace caffe { // Copy from OpenCL buffer to main memory @@ -43,7 +49,8 @@ void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, viennacl::ocl::context &ctx) { if (X != NULL) { cl_int err = clEnqueueWriteBuffer(ctx.get_queue().handle().get(), Y, - CL_TRUE, 0, N, X, 0, NULL, NULL); + CL_TRUE, + 0, N, X, 0, NULL, NULL); } ctx.get_queue().finish(); } @@ -69,35 +76,22 @@ template void greentea_copy(const int N, const cl_mem X, cl_mem Y, template void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); -template<> -void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const float alpha, - const cl_mem A, const cl_mem B, const float beta, - cl_mem C) { - - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); - - ViennaCLTranspose vclTransA = - (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; - ViennaCLTranspose vclTransB = - (TransB == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; - - ViennaCLOrder vclOrderA = ViennaCLRowMajor; - ViennaCLOrder vclOrderB = ViennaCLRowMajor; - ViennaCLOrder vclOrderC = ViennaCLRowMajor; +template +void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, + const int K, const Dtype alpha, const cl_mem A, int offA, + const cl_mem B, int offB, const Dtype beta, cl_mem C, + int offC) { - int offArow = 0; + int offArow = offA; int offAcol = 0; int incArow = 1; int incAcol = 1; - int offBrow = 0; + int offBrow = offB; int offBcol = 0; int incBrow = 1; int incBcol = 1; - int offCrow = 0; + int offCrow = offC; int offCcol = 0; int incCrow = 1; int incCcol = 1; @@ -106,22 +100,7 @@ void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; - GREENTEA_BLAS_CHECK( - ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, - vclOrderC, M, N, K, alpha, A, offArow, offAcol, - incArow, incAcol, lda, B, offBrow, offBcol, incBrow, - incBcol, ldb, beta, C, offCrow, offCcol, incCrow, - incCcol, ldc)); - -} - -template<> -void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const double alpha, - const cl_mem A, const cl_mem B, - const double beta, cl_mem C) { - +#ifdef USE_VIENNACLBLAS ViennaCLBackend backend; ViennaCLBackendCreate(&backend); ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); @@ -135,32 +114,57 @@ void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, ViennaCLOrder vclOrderB = ViennaCLRowMajor; ViennaCLOrder vclOrderC = ViennaCLRowMajor; - int offArow = 0; - int offAcol = 0; - int incArow = 1; - int incAcol = 1; - int offBrow = 0; - int offBcol = 0; - int incBrow = 1; - int incBcol = 1; - int offCrow = 0; - int offCcol = 0; - int incCrow = 1; - int incCcol = 1; - - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; - int ldc = 0; - - GREENTEA_BLAS_CHECK( - ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, - vclOrderC, M, N, K, alpha, A, offArow, offAcol, - incArow, incAcol, lda, B, offBrow, offBcol, incBrow, - incBcol, ldb, beta, C, offCrow, offCcol, incCrow, - incCcol, ldc)); + if (std::is_same::value) { + GREENTEA_BLAS_CHECK( + ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, + vclOrderC, M, N, K, alpha, A, offArow, offAcol, + incArow, incAcol, lda, B, offBrow, offBcol, incBrow, + incBcol, ldb, beta, C, offCrow, offCcol, incCrow, + incCcol, ldc)); + } else { + GREENTEA_BLAS_CHECK( + ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, + vclOrderC, M, N, K, alpha, A, offArow, offAcol, + incArow, incAcol, lda, B, offBrow, offBcol, incBrow, + incBcol, ldb, beta, C, offCrow, offCcol, incCrow, + incCcol, ldc)); + } +#endif +#ifdef USE_CLBLAS + clblasOrder clOrder = clblasRowMajor; + clblasTranspose clTransA = + (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; + clblasTranspose clTransB = + (TransB == CblasNoTrans) ? clblasNoTrans : clblasTrans; + + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + + cl_command_queue queue = ctx.get_queue().handle().get(); + + if (std::is_same::value) { + clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, + offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL); + } else { + clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, + offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL); + } +#endif } +template void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, + const int M, const int N, const int K, + const float alpha, const cl_mem A, + int offA, const cl_mem B, int offB, + const float beta, cl_mem C, int offC); +template void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, + const int M, const int N, const int K, + const double alpha, const cl_mem A, + int offA, const cl_mem B, int offB, + const double beta, cl_mem C, int offC); + /* template<> void greentea_gpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index e3e5c2a5739..8a64e7b97b2 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -73,32 +73,23 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int n = 0; n < num_; ++n) { // First, im2col - greentea_im2col_sk_gpu( - program, - ctx, - bottom_data, bottom[i]->offset(n), - channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); + greentea_im2col_sk_gpu(program, ctx, bottom_data, + bottom[i]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, + pad_w_, stride_h_, stride_w_, kstride_h_, + kstride_w_, col_data); ctx.get_queue().finish(); std::cout << "After im2col" << std::endl; // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm( - this->device_context_.id(), - CblasNoTrans, - CblasNoTrans, - M_, - N_, - K_, - (Dtype) 1., - Subregion(weight, weight_offset * g, M_ * K_), - Subregion(col_data, col_offset * g, K_ * N_), - (Dtype) 0., - Subregion(top_data, top[i]->offset(n) + top_offset * g, - M_ * N_)); - ctx.get_queue().finish(); + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, + weight_offset * g, col_data, col_offset * g, + (Dtype) 0., top_data, + top[i]->offset(n) + top_offset * g); + ctx.get_queue().finish(); } std::cout << "After gpu gemm" << std::endl; @@ -108,9 +99,9 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, greentea_gpu_gemm( this->device_context_.id(), CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., - (cl_mem) (this->blobs_[1]->gpu_data()), - (cl_mem) (bias_multiplier_.gpu_data()), (Dtype) 1., - Subregion(top_data, top[i]->offset(n), num_output_ * N_)); + (cl_mem) (this->blobs_[1]->gpu_data()), 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., + top_data, top[i]->offset(n)); ctx.get_queue().finish(); } } From 974c386a2ea96b270bc40026b2a198cccf679e0b Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 23 Apr 2015 01:10:23 +0200 Subject: [PATCH 006/254] Updated math functions. --- Makefile | 28 ++++++++++++++----- include/caffe/greentea/greentea.hpp | 1 - .../greentea/greentea_math_functions.cpp | 5 +++- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index d4e13a4f50a..7b551e63534 100644 --- a/Makefile +++ b/Makefile @@ -211,6 +211,12 @@ ifeq ($(USE_GREENTEA),1) CL_KERNELS_SH = src/caffe/greentea/cl_kernels.sh endif +ifeq ($(USE_CUDA),1) + COMMON_FLAGS += -DUSE_CUDA +else + NVCC = +endif + ############################## # Derive include and lib directories ############################## @@ -576,13 +582,21 @@ $(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) -$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) +ifeq ($(USE_CUDA), 1) + $(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) +else + $(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) +endif $(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ | $(DYNAMIC_NAME) $(TEST_BIN_DIR) diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 99551091925..e64fe1b2e99 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -23,7 +23,6 @@ #include "viennacl/ocl/device.hpp" #include "viennacl/ocl/platform.hpp" #include "viennacl/ocl/backend.hpp" -#include "libviennacl/include/viennacl.hpp" #include "viennacl/backend/opencl.hpp" #include "viennacl/vector.hpp" #endif diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index b527826e58f..6cf3f4a8d61 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -26,12 +26,15 @@ #include "viennacl/ocl/platform.hpp" #include "viennacl/ocl/backend.hpp" -// TODO: Remove: #ifdef USE_CLBLAS #include #endif USE_CLBLAS +#ifdef USE_VIENNACLBLAS +#include "libviennacl/include/viennacl.hpp" +#endif + namespace caffe { // Copy from OpenCL buffer to main memory From 434b5b146feab79a1a7896db4f5eab986980d0d2 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 23 Apr 2015 02:50:38 +0200 Subject: [PATCH 007/254] Successful runs with libviennacl and clBLAS backends. --- Makefile | 25 ++++++++----------- include/caffe/greentea/greentea.hpp | 10 +++++++- src/caffe/common.cpp | 9 ++++++- .../greentea/greentea_math_functions.cpp | 14 +++++------ src/caffe/layers/conv_sk_layer.cu | 2 +- 5 files changed, 36 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index 7b551e63534..4c6d46c3ff9 100644 --- a/Makefile +++ b/Makefile @@ -213,8 +213,6 @@ endif ifeq ($(USE_CUDA),1) COMMON_FLAGS += -DUSE_CUDA -else - NVCC = endif ############################## @@ -582,20 +580,19 @@ $(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) ifeq ($(USE_CUDA), 1) - $(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) else - $(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) endif $(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index e64fe1b2e99..739583f036e 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -64,9 +64,17 @@ struct is_same { #ifdef USE_GREENTEA -#define GREENTEA_BLAS_CHECK(condition) \ +#ifdef USE_VIENNACLBLAS +#define GREENTEA_VCL_BLAS_CHECK(condition) \ ViennaCLStatus status = condition; \ CHECK_EQ(status, ViennaCLSuccess) << "GreenTea ViennaCL BLAS ERROR"; +#endif + +#ifdef USE_CLBLAS +#define GREENTEA_CL_BLAS_CHECK(condition) \ + clblasStatus status = condition; \ + CHECK_EQ(status, clblasSuccess) << "GreenTea CL BLAS ERROR"; +#endif // Macro to select the single (_s) or double (_d) precision kernel #define CL_KERNEL_SELECT(kernel) is_same::value ? kernel "_s" : kernel "_d" diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 332c4c7cf69..a86f29ca30d 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -8,6 +8,9 @@ #ifdef USE_GREENTEA #include "caffe/greentea/cl_kernels.hpp" +#ifdef USE_CLBLAS +#include +#endif #endif namespace caffe { @@ -280,6 +283,7 @@ void Caffe::SetDevice(const int device_id) { Get().default_device_context_ = GetDeviceContext(device_id); if (Get().default_device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA int current_device; CUDA_CHECK(cudaGetDevice(¤t_device)); if (current_device == device_id) { @@ -300,9 +304,12 @@ void Caffe::SetDevice(const int device_id) { CURAND_CHECK( curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, cluster_seedgen())); +#endif } else { #ifdef USE_GREENTEA - // TODO: ??? +#ifdef USE_CLBLAS + clblasSetup(); +#endif #endif } } diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 6cf3f4a8d61..41f96f56f14 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -29,7 +29,7 @@ #ifdef USE_CLBLAS #include -#endif USE_CLBLAS +#endif #ifdef USE_VIENNACLBLAS #include "libviennacl/include/viennacl.hpp" @@ -118,14 +118,14 @@ void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, ViennaCLOrder vclOrderC = ViennaCLRowMajor; if (std::is_same::value) { - GREENTEA_BLAS_CHECK( + GREENTEA_VCL_BLAS_CHECK( ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, vclOrderC, M, N, K, alpha, A, offArow, offAcol, incArow, incAcol, lda, B, offBrow, offBcol, incBrow, incBcol, ldb, beta, C, offCrow, offCcol, incCrow, incCcol, ldc)); } else { - GREENTEA_BLAS_CHECK( + GREENTEA_VCL_BLAS_CHECK( ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, vclOrderC, M, N, K, alpha, A, offArow, offAcol, incArow, incAcol, lda, B, offBrow, offBcol, incBrow, @@ -145,11 +145,11 @@ void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, cl_command_queue queue = ctx.get_queue().handle().get(); if (std::is_same::value) { - clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, - offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL); + GREENTEA_CL_BLAS_CHECK(clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, + offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); } else { - clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, - offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL); + GREENTEA_CL_BLAS_CHECK(clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, + offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); } #endif diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 8a64e7b97b2..33e396beaf7 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -89,8 +89,8 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, weight_offset * g, col_data, col_offset * g, (Dtype) 0., top_data, top[i]->offset(n) + top_offset * g); - ctx.get_queue().finish(); } + ctx.get_queue().finish(); std::cout << "After gpu gemm" << std::endl; From 23c9f86e38f81ac67a194d39822cc19cab8564a2 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 23 Apr 2015 22:16:18 +0200 Subject: [PATCH 008/254] Started implementing block-GEMM for Conv-SK. --- docs/development.md | 2 +- docs/install_apt.md | 21 +- docs/install_osx.md | 6 +- docs/model_zoo.md | 43 +- docs/tutorial/layers.md | 28 +- examples/classification.ipynb | 13 +- examples/filter_visualization.ipynb | 109 +- examples/hdf5_classification.ipynb | 1257 +++++++++-------- examples/hdf5_classification/solver.prototxt | 6 +- examples/hdf5_classification/solver2.prototxt | 6 +- .../hdf5_classification/train_val.prototxt | 4 +- .../hdf5_classification/train_val2.prototxt | 4 +- examples/imagenet/make_imagenet_mean.sh | 10 +- examples/net_surgery.ipynb | 386 +++-- examples/siamese/mnist_siamese.ipynb | 20 +- examples/web_demo/app.py | 17 +- include/caffe/greentea/greentea.hpp | 3 - .../greentea/greentea_math_functions.hpp | 30 +- include/caffe/vision_layers.hpp | 3 + python/CMakeLists.txt | 2 +- python/caffe/_caffe.cpp | 30 +- python/caffe/classifier.py | 4 +- python/caffe/detector.py | 4 +- python/caffe/io.py | 17 +- python/caffe/pycaffe.py | 6 +- python/caffe/test/test_python_layer.py | 24 +- python/classify.py | 16 +- python/detect.py | 15 +- python/requirements.txt | 2 +- scripts/travis/travis_install.sh | 1 - .../greentea/greentea_math_functions.cpp | 165 ++- src/caffe/layers/conv_sk_layer.cpp | 7 +- src/caffe/layers/conv_sk_layer.cu | 61 +- 33 files changed, 1330 insertions(+), 992 deletions(-) diff --git a/docs/development.md b/docs/development.md index fe54864bd35..ccb6a29701d 100644 --- a/docs/development.md +++ b/docs/development.md @@ -30,7 +30,7 @@ Similarly for IPython notebooks: simply include `"include_in_docs": true` in the Other docs, such as installation guides, are written in the `docs` directory and manually linked to from the `index.md` page. -We strive to provide provide lots of usage examples, and to document all code in docstrings. +We strive to provide lots of usage examples, and to document all code in docstrings. We absolutely appreciate any contribution to this effort! ### Versioning diff --git a/docs/install_apt.md b/docs/install_apt.md index 89bc9a00aef..75f8bec0e95 100644 --- a/docs/install_apt.md +++ b/docs/install_apt.md @@ -8,12 +8,24 @@ title: Installation: Ubuntu sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libboost-all-dev libhdf5-serial-dev +**CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions. +Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. +This can be skipped for CPU-only installation. + +**BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance. + +**Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface. + **Remaining dependencies, 14.04** +Everything is packaged in 14.04. + sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev protobuf-compiler **Remaining dependencies, 12.04** +These dependencies need manual installation in 12.04. + # glog wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz tar zxvf glog-0.3.3.tar.gz @@ -28,17 +40,10 @@ title: Installation: Ubuntu export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1 make && make install # lmdb - git clone git://gitorious.org/mdb/mdb.git + git clone https://gitorious.org/mdb/mdb.git cd mdb/libraries/liblmdb make && make install Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first. -**CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions. -Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. - -**BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance. - -**Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface. - Continue with [compilation](installation.html#compilation). diff --git a/docs/install_osx.md b/docs/install_osx.md index 55b098731fc..39cb02fe232 100644 --- a/docs/install_osx.md +++ b/docs/install_osx.md @@ -18,7 +18,7 @@ In other `ENV` settings, things may not work as expected. brew install --fresh -vd snappy leveldb gflags glog szip lmdb # need the homebrew science source for OpenCV and hdf5 brew tap homebrew/science - hdf5 opencv + brew install hdf5 opencv If using Anaconda Python, a modification to the OpenCV formula might be needed Do `brew edit opencv` and change the lines that look like the two lines below to exactly the two lines below. @@ -32,7 +32,7 @@ If using Anaconda Python, HDF5 is bundled and the `hdf5` formula can be skipped. # with Python pycaffe needs dependencies built from source brew install --build-from-source --with-python --fresh -vd protobuf - brew install --build-from-source --fresh -vd boost + brew install --build-from-source --fresh -vd boost boost-python # without Python the usual installation suffices brew install protobuf boost @@ -115,7 +115,7 @@ Then, whenever you want to update homebrew, switch back to the master branches, # Update homebrew; hopefully this works without errors! brew update - # Switch back to the caffe branches with the forumlae that you modified earlier + # Switch back to the caffe branches with the formulae that you modified earlier cd /usr/local git rebase master caffe # Fix any merge conflicts and commit to caffe branch diff --git a/docs/model_zoo.md b/docs/model_zoo.md index ad30d0acd55..06dc0a49ec7 100644 --- a/docs/model_zoo.md +++ b/docs/model_zoo.md @@ -3,28 +3,30 @@ title: Model Zoo --- # Caffe Model Zoo -Lots of people have used Caffe to train models of different architectures and applied to different problems, ranging from simple regression to AlexNet-alikes to Siamese networks for image similarity to speech applications. -To lower the friction of sharing these models, we introduce the model zoo framework: +Lots of researchers and engineers have made Caffe models for different tasks with all kinds of architectures and data. +These models are learned and applied for problems ranging from simple regression, to large-scale visual classification, to Siamese networks for image similarity, to speech and robotics applications. + +To help share these models, we introduce the model zoo framework: - A standard format for packaging Caffe model info. -- Tools to upload/download model info to/from Github Gists, and to download trained `.caffemodel` parameters. +- Tools to upload/download model info to/from Github Gists, and to download trained `.caffemodel` binaries. - A central wiki page for sharing model info Gists. -## BVLC Reference Models +## Where to get trained models -First of all, we provide some trained models out of the box. +First of all, we bundle BVLC-trained models for unrestricted, out of the box use. +
+See the [BVLC model license](#bvlc-model-license) for details. Each one of these can be downloaded by running `scripts/download_model_binary.py ` where `` is specified below: -- **BVLC Reference CaffeNet** in `models/bvlc_reference_caffenet`: AlexNet trained on ILSVRC 2012, with a minor variation from the version as described in the NIPS 2012 paper. (Trained by Jeff Donahue @jeffdonahue) -- **BVLC AlexNet** in `models/bvlc_alexnet`: AlexNet trained on ILSVRC 2012, almost exactly as described in NIPS 2012. (Trained by Evan Shelhamer @shelhamer) -- **BVLC Reference R-CNN ILSVRC-2013** in `models/bvlc_reference_rcnn_ilsvrc13`: pure Caffe implementation of [R-CNN](https://github.com/rbgirshick/rcnn). (Trained by Ross Girshick @rbgirshick) -- **BVLC GoogleNet** in `models/bvlc_googlenet`: GoogleNet trained on ILSVRC 2012, almost exactly as described in [GoogleNet](http://arxiv.org/abs/1409.4842). (Trained by Sergio Guadarrama @sguada) - +- **BVLC Reference CaffeNet** in `models/bvlc_reference_caffenet`: AlexNet trained on ILSVRC 2012, with a minor variation from the version as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Jeff Donahue @jeffdonahue) +- **BVLC AlexNet** in `models/bvlc_alexnet`: AlexNet trained on ILSVRC 2012, almost exactly as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Evan Shelhamer @shelhamer) +- **BVLC Reference R-CNN ILSVRC-2013** in `models/bvlc_reference_rcnn_ilsvrc13`: pure Caffe implementation of [R-CNN](https://github.com/rbgirshick/rcnn) as described by Girshick et al. in CVPR 2014. (Trained by Ross Girshick @rbgirshick) +- **BVLC GoogLeNet** in `models/bvlc_googlenet`: GoogLeNet trained on ILSVRC 2012, almost exactly as described in [Going Deeper with Convolutions](http://arxiv.org/abs/1409.4842) by Szegedy et al. in ILSVRC 2014. (Trained by Sergio Guadarrama @sguada) -## Community Models - -The publicly-editable [Caffe Model Zoo wiki](https://github.com/BVLC/caffe/wiki/Model-Zoo) catalogues user-made models. -Refer to the model details for authorship and conditions -- please respect licenses and citations. +**Community models** made by Caffe users are posted to a publicly editable [wiki page](https://github.com/BVLC/caffe/wiki/Model-Zoo). +These models are subject to conditions of their respective authors such as citation and license. +Thank you for sharing your models! ## Model info format @@ -44,7 +46,7 @@ A caffe model is distributed as a directory containing: Github Gist is a good format for model info distribution because it can contain multiple files, is versionable, and has in-browser syntax highlighting and markdown rendering. -- `scripts/upload_model_to_gist.sh `: uploads non-binary files in the model directory as a Github Gist and prints the Gist ID. If `gist_id` is already part of the `/readme.md` frontmatter, then updates existing Gist. +`scripts/upload_model_to_gist.sh ` uploads non-binary files in the model directory as a Github Gist and prints the Gist ID. If `gist_id` is already part of the `/readme.md` frontmatter, then updates existing Gist. Try doing `scripts/upload_model_to_gist.sh models/bvlc_alexnet` to test the uploading (don't forget to delete the uploaded gist afterward). @@ -56,4 +58,13 @@ It is up to the user where to host the `.caffemodel` file. We host our BVLC-provided models on our own server. Dropbox also works fine (tip: make sure that `?dl=1` is appended to the end of the URL). -- `scripts/download_model_binary.py `: downloads the `.caffemodel` from the URL specified in the `/readme.md` frontmatter and confirms SHA1. +`scripts/download_model_binary.py ` downloads the `.caffemodel` from the URL specified in the `/readme.md` frontmatter and confirms SHA1. + +## BVLC model license + +The Caffe models bundled by the BVLC are released for unrestricted use. + +These models are trained on data from the [ImageNet project](http://www.image-net.org/) and training data includes internet photos that may be subject to copyright. + +Our present understanding as researchers is that there is no restriction placed on the open release of these learned model weights, since none of the original images are distributed in whole or in part. +To the extent that the interpretation arises that weights are derivative works of the original copyright holder and they assert such a copyright, UC Berkeley makes no representations as to what use is allowed other than to consider our present release in the spirit of fair use in the academic mission of the university to disseminate knowledge and tools as broadly as possible without restriction. diff --git a/docs/tutorial/layers.md b/docs/tutorial/layers.md index 34bb48050e8..839939f5ad6 100644 --- a/docs/tutorial/layers.md +++ b/docs/tutorial/layers.md @@ -453,20 +453,20 @@ The `SLICE` layer is a utility layer that slices an input layer to multiple outp * Sample - layers { - name: "slicer_label" - type: SLICE - bottom: "label" - ## Example of label with a shape N x 3 x 1 x 1 - top: "label1" - top: "label2" - top: "label3" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } - } + layers { + name: "slicer_label" + type: SLICE + bottom: "label" + ## Example of label with a shape N x 3 x 1 x 1 + top: "label1" + top: "label2" + top: "label3" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } + } `slice_dim` indicates the target dimension and can assume only two values: 0 for num or 1 for channel; `slice_point` indicates indexes in the selected dimension (the number of indexes must be equal to the number of top blobs minus one). diff --git a/examples/classification.ipynb b/examples/classification.ipynb index 6f8fa4252e6..0babf79f304 100644 --- a/examples/classification.ipynb +++ b/examples/classification.ipynb @@ -4,7 +4,7 @@ "example_name": "ImageNet classification", "include_in_docs": true, "priority": 1, - "signature": "sha256:918b797b1b7d78125c8f1e3c84756b0679120cbe1071ce7fee7aeafef0fbae55" + "signature": "sha256:a2b12abaa1eb252f436d59833c08ab97948c8a7a0513197f31afad0a0690e318" }, "nbformat": 3, "nbformat_minor": 0, @@ -18,9 +18,9 @@ "Classifying ImageNet: the instant Caffe way\n", "===========================================\n", "\n", - "Caffe provides a general Python interface for models with `caffe.Net` in `python/caffe/pycaffe.py`, but to make off-the-shelf classification easy we provide a `caffe.Classifier` class and `classify.py` script. Both Python and MATLAB wrappers are provided. However, the Python wrapper has more features so we will describe it here. For MATLAB, refer to `matlab/caffe/matcaffe_demo.m`.\n", + "Caffe has a Python interface, pycaffe, with a `caffe.Net` interface for models. There are both Python and MATLAB interfaces. While this example uses the off-the-shelf Python `caffe.Classifier` interface there is also a MATLAB example at `matlab/caffe/matcaffe_demo.m`.\n", "\n", - "Before we begin, you must compile Caffe and install the python wrapper by setting your `PYTHONPATH`. If you haven't yet done so, please refer to the [installation instructions](installation.html). This example uses our pre-trained CaffeNet model, an ILSVRC12 image classifier. You can download it by running `./scripts/download_model_binary.py models/bvlc_reference_caffenet`.\n", + "Before we begin, you must compile Caffe. You should add the Caffe module to your `PYTHONPATH` although this example includes it automatically. If you haven't yet done so, please refer to the [installation instructions](http://caffe.berkeleyvision.org/installation.html). This example uses our pre-trained CaffeNet model, an ILSVRC12 image classifier. You can download it by running `./scripts/download_model_binary.py models/bvlc_reference_caffenet` or let the first step of this example download it for you.\n", "\n", "Ready? Let's start." ] @@ -44,7 +44,12 @@ "# and the image you would like to classify.\n", "MODEL_FILE = '../models/bvlc_reference_caffenet/deploy.prototxt'\n", "PRETRAINED = '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'\n", - "IMAGE_FILE = 'images/cat.jpg'" + "IMAGE_FILE = 'images/cat.jpg'\n", + "\n", + "import os\n", + "if not os.path.isfile(PRETRAINED):\n", + " print(\"Downloading pre-trained CaffeNet model...\")\n", + " !../scripts/download_model_binary.py ../models/bvlc_reference_caffenet" ], "language": "python", "metadata": {}, diff --git a/examples/filter_visualization.ipynb b/examples/filter_visualization.ipynb index 0bfdb5caf68..7125907f35e 100644 --- a/examples/filter_visualization.ipynb +++ b/examples/filter_visualization.ipynb @@ -4,7 +4,7 @@ "example_name": "Filter visualization", "include_in_docs": true, "priority": 2, - "signature": "sha256:44536e4f82eb5748b6a3bb6fcfca01bc6c5815dad2641c994dab031f452b7606" + "signature": "sha256:64c88129e2eeaa956e4c8a26467ff6119f24ea3d7ef15f8217326249973bea8f" }, "nbformat": 3, "nbformat_minor": 0, @@ -24,7 +24,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First, import required modules and set plotting parameters" + "First, import required modules, set plotting parameters, and run `./scripts/download_model_binary.py models/bvlc_reference_caffenet` to get the pretrained CaffeNet model if it hasn't already been fetched." ] }, { @@ -44,7 +44,12 @@ "\n", "plt.rcParams['figure.figsize'] = (10, 10)\n", "plt.rcParams['image.interpolation'] = 'nearest'\n", - "plt.rcParams['image.cmap'] = 'gray'" + "plt.rcParams['image.cmap'] = 'gray'\n", + "\n", + "import os\n", + "if not os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):\n", + " print(\"Downloading pre-trained CaffeNet model...\")\n", + " !../scripts/download_model_binary.py ../models/bvlc_reference_caffenet" ], "language": "python", "metadata": {}, @@ -55,7 +60,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Run `./scripts/download_model_binary.py models/bvlc_reference_caffenet` to get the pretrained CaffeNet model, load the net, specify test phase and CPU mode, and configure input preprocessing." + "Set Caffe to CPU mode, load the net in the test phase for inference, and configure input preprocessing." ] }, { @@ -63,12 +68,16 @@ "collapsed": false, "input": [ "caffe.set_mode_cpu()\n", - "net = caffe.Classifier(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',\n", - " caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')\n", + "net = caffe.Net(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',\n", + " caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n", + " caffe.TEST)\n", + "\n", "# input preprocessing: 'data' is the name of the input blob == net.inputs[0]\n", - "net.transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # ImageNet mean\n", - "net.transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]\n", - "net.transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB" + "transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n", + "transformer.set_transpose('data', (2,0,1))\n", + "transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel\n", + "transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]\n", + "transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB" ], "language": "python", "metadata": {}, @@ -79,25 +88,36 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Run a classification pass" + "Classify the image by reshaping the net for the single input then doing the forward pass." ] }, { "cell_type": "code", "collapsed": false, "input": [ - "scores = net.predict([caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')])" + "net.blobs['data'].reshape(1,3,227,227)\n", + "net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(caffe_root + 'examples/images/cat.jpg'))\n", + "out = net.forward()\n", + "print(\"Predicted class is #{}.\".format(out['prob'].argmax()))" ], "language": "python", "metadata": {}, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "Predicted class is #281.\n" + ] + } + ], "prompt_number": 3 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The layer features and their shapes (10 is the batch size, corresponding to the the ten subcrops used by Krizhevsky et al.)" + "The layer features and their shapes (1 is the batch size, corresponding to the single input image in this example)." ] }, { @@ -114,21 +134,21 @@ "output_type": "pyout", "prompt_number": 4, "text": [ - "[('data', (10, 3, 227, 227)),\n", - " ('conv1', (10, 96, 55, 55)),\n", - " ('pool1', (10, 96, 27, 27)),\n", - " ('norm1', (10, 96, 27, 27)),\n", - " ('conv2', (10, 256, 27, 27)),\n", - " ('pool2', (10, 256, 13, 13)),\n", - " ('norm2', (10, 256, 13, 13)),\n", - " ('conv3', (10, 384, 13, 13)),\n", - " ('conv4', (10, 384, 13, 13)),\n", - " ('conv5', (10, 256, 13, 13)),\n", - " ('pool5', (10, 256, 6, 6)),\n", - " ('fc6', (10, 4096, 1, 1)),\n", - " ('fc7', (10, 4096, 1, 1)),\n", - " ('fc8', (10, 1000, 1, 1)),\n", - " ('prob', (10, 1000, 1, 1))]" + "[('data', (1, 3, 227, 227)),\n", + " ('conv1', (1, 96, 55, 55)),\n", + " ('pool1', (1, 96, 27, 27)),\n", + " ('norm1', (1, 96, 27, 27)),\n", + " ('conv2', (1, 256, 27, 27)),\n", + " ('pool2', (1, 256, 13, 13)),\n", + " ('norm2', (1, 256, 13, 13)),\n", + " ('conv3', (1, 384, 13, 13)),\n", + " ('conv4', (1, 384, 13, 13)),\n", + " ('conv5', (1, 256, 13, 13)),\n", + " ('pool5', (1, 256, 6, 6)),\n", + " ('fc6', (1, 4096)),\n", + " ('fc7', (1, 4096)),\n", + " ('fc8', (1, 1000)),\n", + " ('prob', (1, 1000))]" ] } ], @@ -138,7 +158,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The parameters and their shapes (each of these layers also has biases which are omitted here)" + "The parameters and their shapes. The parameters are `net.params['name'][0]` while biases are `net.params['name'][1]`." ] }, { @@ -160,9 +180,9 @@ " ('conv3', (384, 256, 3, 3)),\n", " ('conv4', (384, 192, 3, 3)),\n", " ('conv5', (256, 192, 3, 3)),\n", - " ('fc6', (1, 1, 4096, 9216)),\n", - " ('fc7', (1, 1, 4096, 4096)),\n", - " ('fc8', (1, 1, 1000, 4096))]" + " ('fc6', (4096, 9216)),\n", + " ('fc7', (4096, 4096)),\n", + " ('fc8', (1000, 4096))]" ] } ], @@ -180,7 +200,7 @@ "collapsed": false, "input": [ "# take an array of shape (n, height, width) or (n, height, width, channels)\n", - "# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)\n", + "# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)\n", "def vis_square(data, padsize=1, padval=0):\n", " data -= data.min()\n", " data /= data.max()\n", @@ -212,8 +232,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "# index four is the center crop\n", - "plt.imshow(net.transformer.deprocess('data', net.blobs['data'].data[4]))" + "plt.imshow(transformer.deprocess('data', net.blobs['data'].data[0]))" ], "language": "python", "metadata": {}, @@ -269,7 +288,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv1'].data[4, :36]\n", + "feat = net.blobs['conv1'].data[0, :36]\n", "vis_square(feat, padval=1)" ], "language": "python", @@ -327,7 +346,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv2'].data[4, :36]\n", + "feat = net.blobs['conv2'].data[0, :36]\n", "vis_square(feat, padval=1)" ], "language": "python", @@ -355,7 +374,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv3'].data[4]\n", + "feat = net.blobs['conv3'].data[0]\n", "vis_square(feat, padval=0.5)" ], "language": "python", @@ -383,7 +402,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv4'].data[4]\n", + "feat = net.blobs['conv4'].data[0]\n", "vis_square(feat, padval=0.5)" ], "language": "python", @@ -411,7 +430,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['conv5'].data[4]\n", + "feat = net.blobs['conv5'].data[0]\n", "vis_square(feat, padval=0.5)" ], "language": "python", @@ -439,7 +458,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['pool5'].data[4]\n", + "feat = net.blobs['pool5'].data[0]\n", "vis_square(feat, padval=1)" ], "language": "python", @@ -469,7 +488,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['fc6'].data[4]\n", + "feat = net.blobs['fc6'].data[0]\n", "plt.subplot(2, 1, 1)\n", "plt.plot(feat.flat)\n", "plt.subplot(2, 1, 2)\n", @@ -500,7 +519,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['fc7'].data[4]\n", + "feat = net.blobs['fc7'].data[0]\n", "plt.subplot(2, 1, 1)\n", "plt.plot(feat.flat)\n", "plt.subplot(2, 1, 2)\n", @@ -531,7 +550,7 @@ "cell_type": "code", "collapsed": false, "input": [ - "feat = net.blobs['prob'].data[4]\n", + "feat = net.blobs['prob'].data[0]\n", "plt.plot(feat.flat)" ], "language": "python", @@ -576,7 +595,7 @@ " labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\\t')\n", "\n", "# sort top k predictions from softmax output\n", - "top_k = net.blobs['prob'].data[4].flatten().argsort()[-1:-6:-1]\n", + "top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]\n", "print labels[top_k]" ], "language": "python", diff --git a/examples/hdf5_classification.ipynb b/examples/hdf5_classification.ipynb index 51d854fa142..19d27372754 100644 --- a/examples/hdf5_classification.ipynb +++ b/examples/hdf5_classification.ipynb @@ -4,7 +4,7 @@ "example_name": "Off-the-shelf SGD for classification", "include_in_docs": true, "priority": 4, - "signature": "sha256:c3b84add3bb83e91137f396a48f46d46bf7921b242fc42c58390b30806e5a028" + "signature": "sha256:741422697d76b1667287180dc7c6360cf105ee774b1e2def800dc8fe80f78f67" }, "nbformat": 3, "nbformat_minor": 0, @@ -15,32 +15,48 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Classification with HDF5 data\n", + "# Caffeinated Logistic Regression of HDF5 Data\n", "\n", - "In this example we'll use Caffe to do simple logistic regression on a simple binary dataset, showcasing HDF5DataLayer functionality." + "While Caffe is made for deep networks it can likewise represent \"shallow\" models like logistic regression for classification. We'll do simple logistic regression on synthetic data that we'll generate and save to HDF5 to feed vectors to Caffe. Once that model is done, we'll add layers to improve accuracy. That's what Caffe is about: define a model, experiment, and then deploy." ] }, { "cell_type": "code", "collapsed": false, "input": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "\n", + "# Make sure that caffe is on the python path:\n", + "caffe_root = '../' # this file is expected to be in {caffe_root}/examples\n", + "import sys\n", + "sys.path.insert(0, caffe_root + 'python')\n", + "\n", + "import caffe\n", + "\n", "import os\n", "import h5py\n", "import shutil\n", - "import sklearn\n", "import tempfile\n", - "import numpy as np\n", - "import pandas as pd\n", + "\n", + "# You may need to 'pip install scikit-learn'\n", + "import sklearn\n", "import sklearn.datasets\n", - "import sklearn.linear_model\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib inline" + "import sklearn.linear_model" ], "language": "python", "metadata": {}, "outputs": [], "prompt_number": 1 }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Synthesize a dataset of 10,000 4-vectors for binary classification with 2 informative features and 2 noise features." + ] + }, { "cell_type": "code", "collapsed": false, @@ -51,17 +67,8 @@ ")\n", "\n", "# Split into train and test\n", - "X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 2 - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ + "X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)\n", + "\n", "# Visualize sample of the data\n", "ind = np.random.permutation(X.shape[0])[:1000]\n", "df = pd.DataFrame(X[ind])\n", @@ -73,13 +80,20 @@ { "metadata": {}, "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAImCAYAAACB54oCAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWmMXOd57/mrfd+33vdmd7O5kyIpkRJJiZKdyLKvk2sp\nUZwBJmPMnQBBYCeTDJB44JkAjpMbw04ugmCQwcxFPiS5jpNJcm3HjmRbJEVRIsWdTfbC3pfa96pT\np6rONh+q2WKLpERKbC5S/YBGV51T55y36lSd83/f93n+DzRp0qRJkyZNmjRp0qRJkyZNmjRp0qRJ\nkyZNmjRp0qRJkyZNmjRp0qRJkyZNmjRp0qRJkyZNmjRp0qRJkyZNmjRp0uQTwHeBE8Cfv2+5Ffh/\ngJ8Bf/GgG9WkSZMmTZo0+XSwC/jr1cd/Bey5ad3vA88+8BY1adKkSZMmTR459Bu4733Aa6uPfwo8\nedO6Q8DngTeAlzawDU2aNGnSpEmTR5yNFCNeoLT6uLD6/Ab9wA+BF4H/HTBsYDuaNGnSpEmTJo8w\nGylGCoB79bEHyL9v3XGgAkwDkfdvPDIyogHNv0/R36FDhx56G5p/zfPe/Gue9+bfhv0d4w4Y77Ti\nPvA28J+A7wPPAf/1pnWngO3ABaAHSL5/4/HxcTRN28DmNXnU0Ol0zXP+kMgvLrJw/DiyKAKgN5lo\n37uX8Ojohh/7k3beU9eusXz6NKokAWC02eh+5hm83d0PuWWPFp+08/4gqBYKzL3xBpXk6i1Tp8M/\nMEDX009jMG7k7fz+oNPpDt1p3UaOjFwAqjSyaWTgLPBfVtf9KfBN4CTwf6+ub9KkyUNA0zTS4+Nr\nQgRAlSRS4+NINy1r8uFIokjy2rU1IQIgiyLpZueqyX2guLz8nhAB0DTy8/Prlz2mbLSU+ur7nv/2\n6v848JkNPnaTJk3uAlVRqJdKtyxXqlXkahWTzfYQWvV4IlerKPU6ZpcLnV6PVKmgShK1UglVljGY\nTA+7iU0eY+rl8i3LVElCqdcfQmvuL4/+uE6TJo8BqZTAzEyOcrlOW5uL3l4vNtvjceMxGI242tsR\ns9l1y60+HxaX6yG16vHE4nJB6yCTV6OIokRXZwd+TwV3S6gpRD6FFApVZmdzpNMVgkE7fX0+PB7r\nR96fIxRCp9ejqeraMpPDgfkT8DttipEmTT4mqZTAT386S6nU6J3MzeVIJgWefroLg2EjZ0LvH8GR\nEaq5HOV4HE1Vsfn9tO7ejf4xmId+lEikRN6dqLE8lkKqCFwfj7Pv2S0MbtnysJvW5AEjCHWOH18g\nHm+MZszN5VleLvLss704HOaPtE9XRwfBzZvJTU8j12qYHQ5aduzAHgjcz6Y/FJpXmiZNPibz8/k1\nIaJpGtVimWtnc/S0GOjoDWO0WD5w+2o+T3FlBblaxREO42prQ294sNnuNq+X3qNHqaTTa2LEbLc/\n0DZ8EpiezqIYbIS2jCIJAuh0ZGQPFdnE7frDqiwjJJPUBQGzw4EjEnng5/5eqFcqlFZWqObz2AMB\nXO3tH/r9/rQSi5VJJNZPq8TjZRIJgb6+uxcjkihSWFqiuLyM2enE292Nf3AQpVrF7HRi8/nud9Mf\nCk0x0qTJx6RSeS9YsRSNUlhaQqeqJAYMKEsTdB44cMcbeyWTYf6NN9amSPQmE607d9KyY8cDafvN\nGM1m3G1tD/y4nyQEYTWDxmzGaG7ccFR01OvKLa9VZJmVM2fITE6iShIGs5nA0BDtTzzxSI5ISaLI\n4ptvUlhcBE1Dp9fjHxig88CB5hTUbahWZd4fs6xpjeV3i1yrsfDmm8z9/OeUYzH0BgORbdvoOXKE\n8JYtj7RwvVcejzHkJk0+AE3TiMVKXL6cYGIiTaFQfaDHb2tzodfrqAsCxeVl1LqEL+TGplXIz81R\nWFi447bZ6el1sRo3sliq+fwdt2ny6NLZ6UanW7/M47Hg8906LlKOxchMTKxl3ij1OumJCcrx+Npr\n8nmR8fEUly8n1ob7HxallZU1IQKgqSq52dl17W3yHoGADYtlvViwWo34/XcfEF6Ox4lfvIiYTqM5\nfBQ1J7PXFsnHM1TS6fvd5IfKoye/m9zCxMQEb7zxBg6Hgy984Qt4PJ6H3aRHiomJNGfOrFCrNXqf\nfr+Nw4d7CAYfzDRDd7eH0dEQV8+V0etUgh1+9uxpQytFAaikUjAycttt3x80Co2MDEkUsXq9t9mi\nyaNMX5+PTKbCwkIBWVbxeKzs3duOxXLrpbZWKKDK63vJqiRRKxYBSCYFjh2bI5+vAWCzGdm3r51N\nm4Ib/0ZuQ61U4v1dfVWWkSqVh9KeR52WFie7drUyNpZEFGVsNiNbtoSJRBx3vQ9JEKiXSlQNLqYn\nMohCo6Nl37SCobWXwVvsQh9fmmLkEaZcLvO1r32NH/zgB3zuc58jm83ye7/3e/zd3/0dzz333MNu\n3iOBINS5fDmxJkQAslmRyck0wWDXA2mD2Wxk//4OulvNRLtVzHIZtbiCXGvcRKwfMKfrbG2luLS0\nbpnJbr8liyWTqSCKMk6nGa/XSiolMDubo1Sq097uoqfn8cneeRQQRYlsVsRg0BMM2jEa788gsc1m\n4uDBbkZGKkiSis9nveN5MTmdKKpGJiWQzYoYTQYirS5MjsbNanw8tSZEGm2WuXw5QWen56Gca5vf\nj85gQFPe+60ZzGYsbvcHbPXpRafTsXVrhM5OD5WKhMNhuqtMmuXlInNzOSRJpT9kwuz2Eb0YRxSq\naIDeaEJUjVy5lqFvtPuxCZL/MJpi5BElm83yi7/4iwwODjI1NYV79Qd/7NgxvvSlL/HDH/6Qffv2\nPeRWPnwEQbrtHGwq9WB7azqdjtbuMLpCnOJCEc3jQdM0zC7XBzpv+vv7KcdilKJRNEXB7HTSumsX\nZqcTAEVRuXgxzvh4mlpNxm43MTgYIB4vEY3eiNLPkUg8Xtk7D5N4vMTbby+Ty1XR63V0dLjZv78d\np/P+BGLq9TpCoQ/v/bpaW7F09mOUVvA5FAr5KiWDn6rB1TCiS9/6HRZFmUpFeihixNXWRnB4mMzU\nVCPGxWIhNDqKIxx+4G15nPB6rXi9d5fOOzeX48SJBWRZJeQzkdAUvJuGCMymyefK1GQdbTt3sJAx\nokoCu5YK9PQ0A1ibbBDVapUXX3yR/fv3893vfhfdTZPQhw8f5q//+q959dVXuXLlCvZPecaDy2XG\n4TCtGxkBaG11PvC2aIqCUq2Sm5+nkkph8/kawavOO7fF4nLR++yzCIkEiiRh8/mw+f1r65NJgfhy\nFodNj6LoKZXqvP76DMPD7w3VaxosLOQZGQkSiTz49/04IUkK774bXSdWZ2dz+HxWdu9+sMG7ialZ\nZs9eoxJbAU2lbcc2TIPbmZgu0NLhp6XFSSaz3gHX4TDjdDYCY+VaDZ1e/8CCRw0mEx379uHr7UUS\nxUb2TziMTt8UwPcDTdMYH09Tryt0hw3kLr/DlFile7iNLZ85xODzR5ldqrCSN7AYk+jyGZiayhAK\nOT5yqvCjRFOMPGJomsZXvvIVuru7bxEiN/jiF7/I97//fb75zW/yzW9+8yG08tHBZjOxc2cr77yz\njCBI6HQQDjvYtOnB592XYjFS165hslrxdHYCkJ6YwNPdjaulZd1rVVVjfj7P7GwOaMQadPd41o1s\n1IpFlk69Teb0OEablbaREbIWP1NTGSRJQa/XoaqNOXxJUpGkWzM2mqynUKiRz98a4Ly4WGDXrtbb\n/t42gmqxyNKZd1mZT6EoJgwGHZl3phj2tKP6G9+d4eEgqZRAKlVB08DpNLN9e4TEcobFsxfJLywQ\nCLnoe2IroeHhB5KBozcacTUzrjYERdGoVCTcbgu1lUmcdiOVXIbZ164S6Qpj3bSduNzC2GySlhYn\nw8MBYrEy+Xz1nsRINisyPZ0lk6kQiTjp7/94Rmz3i6YYecT4y7/8SyYmJnjzzTc/8ML4p3/6p+zY\nsYOvfvWrhEKhB9jCR4/+fj9er5VMRsRk0hMOP5yegpBMUo7FUCSp0WO1WDA7HNTy+VvEyNRUhlOn\nllAUFb/PwtyVLGrBTddQB0arDTSV5dOniV++QmIxg8moJ7eSYOizzxOJODCZDGu1TqrFIk4LGMQ8\n9bLhA0di7oVsVqRUqmE2GwiFHPctruJhYrEYMJkMiOL6qT2n04ymqgjpNKosY/P779kGX0gmqWQy\n6I1GnC0tH+heWy8W0ctVfF4r4uo0o6pqlGIxtj2xDUkUsShlDh9oI1tSkWWVYNBOJlPh0msnmHn7\nIgBzBh2lZIpt6HH2DKyNmtwtqqqh16+/zlQLhbXvsSMcxhn5BEVJ3kc0TSObFRHFRjaU12u758//\nZoxGPZ2dboSigM9sR1xJomllpGqO0myeYiLDs7/+P9PZ5cVo1JPP1zAYdJjNd5/eWyxWOXZsfm0K\ncGmpSCxW4tlnex96zFlTjDxCXL16lT/6oz/i7bffxvYhF8LOzk5eeeUVvvOd7/Ctb33rAbXw0SUQ\nsBMIPLwpK7lWo5rLUVxepl4uU4pGMVqteHt66D50CEWWG+l5gNnjY2oqjcmkp7/LjjB1iYXJWWJG\njatBP6GdT+AJulg8O0mhUKNalciJMj6fSmF+lr17D+B2WyiX6+SWY+iELEPbwyTfOUHB7ab7mWc+\nthHSxESac+eiCIKEyaSnt9fH/v0dWK2P9yXD5bIwNBTg/PkYitIQczabkS2bXMwfO9ZIzZZlrF4v\nHfv3425vv6v9ZqeniZ47h95kQgfk5+dp27173ZTbzRgsFopllWRKYGmpiKZptLW52T3Uht9QZvrH\nJ6iVShhX4zLCo6OIVZmVmSip6QW8kQAmqxmhKDBxLYlifRfdkAmbzcSOHS0f2tNdWMgzMZGmXK7T\n1eVhaCiI221BSKVYOH58LcvLaLXSvm8fwaGhu/+QPwUsLOR54415ZmezuFwWXC4zNpuJzZtDa5/l\nR2Fk0MPUzy+Tu3KGzPXrmOx2WreMsHjyJHazjUoqQb3eQi5XRaeDwcHQPWUNxmLlW2KRbpizPezY\nk8f7yvIJolar8eUvf5k/+ZM/YWBg4K62+d3f/V3279/PN77xDazWhz/M9mmmHIshVSr4+vsZ/6d/\nQlMU6oJA6969VAsFrv/bv1FdvcCbfQE6gwM4xSzK5CLR4ycIdLYRT9epFgVqdQ1h226uXEngsEIk\n4qRalbFajXR1uhl9oh2bzURLyErsShFjVUYrRZGrVWRRJDs9TfsTT3zk95LLiVy4EFsz8JIklevX\nM7S1uR7K9JemaSiKitF4fwyetm6N4PFYWVwsYLEY6OvzocWmyc3MrL1GzGRYefdd7MHghzqMSqJI\nYmwMg9lM7Pz5hjmVyYSQSDD8S790W8M72exC8bZhNEZpa3NRryt4W4KEejuJnn4bSRRRjHakmkp8\nfAqb34/qCKAzGOgeaid7fRoxtoQnGMHpa0eSFCRBYnGxSL2u8OyzvXcMZo5Gixw/vrAW+J3JiOTz\nVY4c6SU9MbEu3VyuVklcuYK7s7PpyLtKLFbihz+c4t13o2QyFVKpCrt3t9La6sLlMlMu1zl8uOcj\nTflVY4uULr9DdmqKxOXLmB0OzHYbvc89R2lpiVDEic0VJJ+v0d7uprvbs+44xWKVcrmOwaAnELDd\n8pu5XbC/qmrU6+otyx80Gy1GvgvsBs6zvoLv/wH8ByAH/PfV132q+fa3v01HRwe/8Ru/cdfb9Pf3\ns2vXLr7//e/z67/+6xvYuk8XN6Y/7nQxqddlFhYKLC0VsdmM9PX5oFRCEkVcHR1s+tznqGQyOEIh\nfAMDxM+fR6pU1kYrtHqV0oWTZLIy9eUZ5FSM5YUZHJ09VBJVbAaJ6sBWRIMLtZQhkSgzMtIotCZa\ngxw/Ps/gYJAWt0I8Po10U7l6aEwXfBzK5Trl8voqoJoGiUR5w8TIwkKehYWG0Vt3t5eursZFdmmp\nwPh4mmKxRlubi5GRID7fx6sibDTq6evzNc7bKpNnlm55Xa1QoFYoYPyQbJG6IKA3GklcukQllUJn\nMCAJAotvvUV461Zatm+/ZZtisYbo7WXkRT/lWAyLy4nB30ohJyBV6wh1A3MnTlFOZXGGfJjcXoZf\neBa/18T4+Bjz71wAIJe/xJYjT9B24ItcjTfOWTxeJpcTCQZvn9EzN5e/5aa0slIina40PHHeh1yp\nIFUqTTGyyvx8nkxGRJZVSqU6mgYTExn6+nwUizUURSOXE/H77/7zUmWZ3Ows8UuXWD59muzUFHK9\nTmllhVqpxPN/9mcUlqMEOloY7Lt9zM7SUoGzZ6NcvpxAUTR27Gjh8OGedSZroZADs9mwzhHYbjfd\n1pTvQbORYmQX4ACeAf4K2AOcXV2nAb8L/GwDj//YMD8/z3e/+13Onj17z2r6N3/zN/n2t7/dFCP3\nAblWIzM1RW5mBp3RSHBoCF9f3y2WyxcuxLlyJbkWPDo3l+OZ7bZGXZp8nvz8PEabjdz8PM7WVkrR\n6Lrher3RSOn6VZxtg8gBD+kzGSqlCkanE01vI70Uw6PUMfaM0uvMYhyfp26w4uwaYKlkR6iUiMcF\nnjnQjq2jB7FYwaRVqRfyaIqCYzWGqC4IjV6uToc9ELjrGAiz2YDFYrzlhvVRh57vhp//fA5JavTO\nZmZyPPVUJ16vlePHF9bs9rNZkVxO5OjRvtuaiH0cbhdnYzCbMdxF3RWzw4HF7aa4uEhhaQmj1Yoj\nEsHm9VJYXLytGLFYDBQFlYxkxxYcpqSoiAmZrlYbdYudq//6I4qxhqgsp7NMvH6c8OhmXEoBg8VK\n+3A39Wyq0VM2q2i69zLKzGYDmsZqYKMJk2n99/fG53wzqtoYfXK2tNzi7Gl2OjE77t6o65OOJCmY\nTPp1sTaSpGAw6HA4zMiyes/X8VI0SnpyEnQ60hMToGkYbXZURUWuSYj5Aj1HP0PVePs4JFGUuHAh\nxg9/OIVOp6OtzcXERAqv18Kzz/atva611cnu3Q0jtmq1YRWwY0fLXaWibzQbKUb2Aa+tPv4p8CTv\niRGAP6UxMvK/Apc2sB2PPF/96lf52te+Rk9Pzz1v++KLL/KVr3yF+fn5j7R9k/eIX7pE4tKlNZfJ\nSjKJpmkEN21ae82NSPQbQgSgXJZIVrwEOjqwuFzkZ2eplUpYvV5MdjuOcBjTzdNomoZBr8Pv0mNq\n6yV/OYRxJY5Jp6K3WbD1DZGLpynWPRT9I2z+wlauTeXJ13WEK40bjt1u4tpkjnzcTGJiAX/AzrbN\n3biMVfyDg5TjcZZOnaKSyaDT6XCEw3QeOHBX1T1DIQf9/T7Gx9Nr7zMUstPdvXHOvzffICVJZW6u\nkW57c90faPT6U6kKHR3312grMDREaWVlzU1Up9cT2LQJ6x3cjrPZ90zN9AYD9VIJIZVCzGbRG43I\n1Srujo47BhMHgw4GBnxcu5ZeG4UKBu209LUzMzuxJkQAbB4nZVGlGE9iMUJri4OsaEEy+VEUlWw0\nRY9SxWSyYzYbCAbtnDy52DDashvZvj1Cb997572ry8PMTHYtZgbA57MSCNhRbcOUk8nGCImmYXI4\niGzffs/BvJ9kOjs9+P1p/H4bpVKNel2gp8eL12vDYNATiTjuylekVpPJ56vYbCbSU1Nc+fu/Z+ur\nr+IIh0ldvYrF48Xq89GycxeaBoo7wvj1EpH2wC1TcIIgMTaWxGw2MDDg59q1FMlkhWRSIBJxMjra\nGN27YcTW1dUwYnM6zbhcG9fJUGQZTVHuqpjiRooRLzC7+rgAjN607r8A/ycwAPy/NEZPPpWcOHGC\ny5cv873vfe8jbW8ymfjlX/5l/uEf/oHf//3fv8+t+/RQK5XIz86us7tWZZnM5CSBgYE1L4V6Xblt\nzzKeqrH1+X3MTcZxHwlSmJ/DYQPvwCCOngEu/9sxdJqC12vFYTQS3rKlEVdSyNN94CkKS0u42jvQ\nu/wkczKaScdnPjPAykqJ2ZUql8Yy+Owa1WQcf8CB19XBsWPLbNoUxNLeS14QmMzY+fwX92Fx25l9\n/fW1Hq6maZTjcZJjY/QcOvShn4Ver+OJJ9pobXWRSJRxucx0dnoeaPpfva5Sq92+B6+9v/rYfcDd\n1kbv0aPk5+ZQajXcHR14biPuazWZ8+djzM7mkGUVn8/Gnk1mZFGk58gRpv/935HKZVRFQanXCQwO\nAo1pmeXlIoJQJxx2rAVcd3a6yWZFuro8bNkSxuOx4uvtJTLUR61QwOJyYPIFKYgG9HodznCEWqFA\nPdc4tzpNw+v3U81l6WgL4fbaGRtLItVlyvE4QjLJ/LsOPv/L2wi1+dGbTHR2etmzp42JifSaoNqz\np60RnGz103/0KEIy2cgqCgQ+EeXp7yednW5GRoIUizUcDhNPPtnB3r3tqKqG02lmdDR825GRVEog\nGi2h04HRaGBysjH92NXlxnDpKkIiQfb6dUa/9CXig4PUhQqRbVup12ScXX1cy9qQlMb00PvFiMmk\nQ9MaU5xvv73M0lKjpIBer+PkyUWCQfs6DyKPx7qhv2dVlslMTZGenESVZbzd3YRGRz9wm40UIwXg\nRvfFA9xc+Su3+n/6g3bw1a9+Fe9qfY7h4WH279+/1vufn58HeKyfa5rG17/+db7xjW8Qi8U+8v5+\n5Vd+hT/+4z/m5ZdffqTe30d5/rDQFAVVuU1lVUlC0zRuXFq8Xisej+UWh9feXg8zMwVOnUmjKEbc\n/i0IZgNyyU6hUMWxZT/FxQXKFiMDmzbT/oSD6NmzJK9cIXHlCr7ubmShTPStt4ns3MHAoaMIkkQc\nFaNOYfeQjQuvvwNeGy3uThYmNKxGI9msSDpdxWAwouWhKGqYjI3pGZ1ej85gIJsqkkwK2NIKcssQ\n7V2BD03jM5uNt8RVPEjcbvPqZ5pdJ/58Pus9FRq7F1wtLbekYMP63t3iYoF4vIzFYqBWU4jHyyzZ\n9KixOFKpROuOHeTn59dGFBRJolCocvz4PJmMiCSp+P1WNK0RKGww6LHbTVQqEhcuxKlU6nS3+ujZ\nu5Pk/ArZgkKhqDC0vZNQbycWp4Pwli3Uy2UkQcAWCNC6cyeKWmV0Z4DllIKiaJQTCfJzc2iqRjyb\nZeJtyNtFbH4/FpeL4b17GRjwU63KeDxWjEY91UKBermMyW7H19d3m0/o8eCGYL1fbsSaprGwUGB2\ntvFd9HgaI3b9/T6MRgP1uoSiaASDNsJh521tBZaXixw/Po8gSIRCds6fj2GxGAmHHRTyVQIGA9Vi\niav/7b8x8h//I672dmqCgKbX0/3cUaaFAMl0mS1bwredovR4bOzb186VK8k1IWIy6Wlvd6FpjRHF\nB2mImJ2ZYentt9dKB8RzOeTqBxcw3Ugx8jbwn4DvA88B//WmdS6gBAQ/qA1//ud/fsedv39K4nF8\n/tprr5FKpfjyl7+M4X1xCfeyv2eeeYZXX311XSDjo/D+Ps7zj4okNW4Q+XwVl8tMa6vrruILLB4P\nzpaWdRkV6HT43xczYrUa2bOnjfHxNIJQRxAkOjrcdHa6+elP55Dlxo2zUKhRqdS5eDHO9u0tzOUs\nOFu3UAOWSzaeGG6l97nnEDMZrD4f5ViM6PnzBAZ66dy1g8LlMyyPLyCIJvr27sYcNGO37OPs6SWi\neQO9I17Ks0mWlmREUcFg0CMIEvl8lUjQgzncTiEnkc+LVC0uarpl9JqZS2Np4qkaBw50PTCDr7uh\no8NNItGwtw+HHWzb1oLPZ2Xfvg6uXUtSrSr4fFZ27Wq9bx4y1apMLFaiXK7j89loaXGu81JZ17tT\nFDybt3PuUoUrY2nsdhObNgUaQYyyAZsGC8ePo9br2INBVFluVOV1OlE7zKhqY2rN4TBht5v44Q+v\n097uIhCw43ZbeOedZQqFGkcOtjB35hpGnQ2r3YpbEnBuGcE1sgXVZMdgMq1NuaFp6PR6asUiJocD\nu8uOPtMYlRESCbTVKTZVqmNyuBDECm67ndzsLCabjc6nnsLhMKNpGsmrV0lcvoxUqWC0WgmPjhLZ\ntu2xcleVZZXp6SxTUxkURaW/38/QUOBjxxfNzuZ4881F6nUFVdUYH0/x9NNd2Gwm5ucbwcCplEip\n5OLixQRHjvQCGsViHbfbTDjsZGwsuZadBg1xYrMZ8fmsKPUaztY2/IObyF2f5No//RN9R48yePAg\n5mCEpaUCdau2VpDzTjz1VCc6HZw6tYSqarS2OhkY8KPTNaY+BaH+wPyXMlNT62oYARSWbg0Sv5mN\nFCMXgCpwYvXxWRrTM78N/BmwBdAD/9sGtuGR5lvf+hZf//rXbxEi94rBYOCll17iBz/4Ab/zO79z\nn1r3+KEoKmfPRhkfTyPLKnq9jt5eL0891fmhIwE6nY7W3btBa/QqdXo93p4eAjf5KyiKysREmvHx\nFOVyo/DVU0910NPjo1aTb0mPq9dVKpXGyIqqahSLjaJn2WzD4rteLhM9f57omTM4W1pwRiK0bNvG\ntX/8R2yBAJrBRm6mhFWp4Nv1FKd+dAZFbyFXL9EVNpLPVGjrCJJLFqkUBDYP9SFXq4h1D1diFq4c\nv0p0MYus6njm+RE6tnUzGZOoVPIMD9+bP8FG8/zzfWQyDafRQMCG2dy4NG3eHKK310u1KuNyme9b\neq8oSpw8ucjCQgFV1TAa9YyOhtizp22tR31z787iD3Dy51OcOFekKDXmvxcXC7zwwgA11UDftu3k\nJ66Rn58HnY7wli0UFhfxbN/HyZ9PcfnMHBYTuIMentjXzZM73FjtNiz1LJpYpRBLEewMoSXmuPiT\nt0gkKzx1eAiDz4WrpYXFrJ5AtMTQUBB/fz/FpaW1+Ba9yUR4dBSTzUZLC3i8VuKrM1l6mwOrx8/i\nUoGlqRTbTJ0MhDsorqwgiSImmw0hlSJ2/jyy2PheSoJA7MIF7OEw7sfIbXV6Ostbby2uxcKk0xVq\nNZknnrg7r5jboaoaExPptewTVVWx200sLxeZn89z7lwMWdbYtatlTZy+8UajU6IoGgaDjt5eH+/X\n/SaTgXq94ZpsqFXRTCqOoJ9KshGjlJubwxWJ0N7ejrWa5cCzT+Hz2z/w+2+3m9m3r5NkskIiIWCz\nGRFFiXSn7nLUAAAgAElEQVS6QiolMDOTZWDAz9at4bXf10agaRqaeu+pwhud2vvV9z3/7dX//8sG\nH/eR5+LFi1y/fp2XX375vuzvpZde4jvf+c6nWowkkwITE+m10QlV1ZidzdHT46W///bmUzdj83ob\noxW5HHqD4ZbgxaWlAqdPr6ztv15XuHIlSXu7G5vNRHu7a53VuNVqpL/fj16vQ6d7LxylpWW1CJ4k\nEd66ldjZs5RjMcxuN4nLl1ElCb07gN5gY+CJTqrlMgvLAunFODodmPwmpk/E2PHSZ6iJdWoLSbr6\nQkT0KYSpKhPydlay4B8cQmCFak1hoWAjrLqQ5dJqT2njrOMVRaVeV7BajXc9+mIyGWhpuX2mgM1m\nuu/ukDduJjfOiSw3hGZ3t2etHdlYBtXXjoU6stnB3PgMXoebck5B1RkQBInFxTyHD3fjsqmERkdx\nd3Y2RkUSCWyBAEvxGlePnUVI5ojFExitVnS1/ewcsmGXNMbPTOIaGCF+7RpP7T2EOL+Ex21BECTS\ny0l0eh1ipYZv37NrTp/ujg76X3iBwtISqizjbm/HtWrO5vPZOHy4F5sqsDSxgMUfQMkluXLsHLZw\nK5fOzCFtibBn1Id+taZNNZdbEyI30FSVarGEySfd03l8WCiKyuRkmmpVplaTMRoNWK1GZmZyjIyE\nPrIzqqqq69x6jUYDvb1ejh1bwG43oqqNjkY8XiaZFHA4zJw4EWuk4ht0KIrG3FyOgQE/fr+1EUzu\nMLN5c4iZmSxGnUo4YGXhBydQy3k6n38RGRNqPkFqchLfpk1E+toJhe/s5nszTqeZo0f7GBtLkkwK\nQCOOJJWqUK024p1umLNtFDqdDv/AAJVUap0ocbW2fuB2TdOzh8Rf/MVf8Fu/9VuY7lORq+eee45f\n+7VfI5fL4fuY7puPK+Vy/Zbg0hspjneLTqfDfgfXzIWFwpoQuUEmI5LJiLS1udi6NYIoykSjJQC2\njvgI2WssXo/h8tsRdE6sDiv9/avnR1UJDg4y+vLLZKancbW1YfV6kdq28tbbswjlIoHOVp48uhtE\nGy29bWjlHGoxjRLsIZ8uMuQv4/XnsDkMCCtlJmI25LSbd99N09XlITjQy+Jigflole2rvTuvd+Pi\nLhYW8oyNJSmX64RCdrZujTwSaYPvJ5er8v442FpNWRvJGh9P88aJGPlYGm/AyY6nWrHYzNTLZYaG\n+8jmJep1hUjEwchICL3iwdPZyfwbb1CMRtEbjQy+9AV+9m6cQl5ElhXMdhtisURybhHnnu0kz76N\nJ9yKzaxy6MXdpNIi+ZkcicU8Ho8Vf8BGqVTH4TTjclvX+YY4wuHbVsutFgrY6nmOHOmmuq+NN14f\n58p0DFukHZvPS61UYnkxz4EXtmFYrWVjtFjQ6fVrNw6j1UrV1cbJc3mkS5P4fDa2bQvfUSw+Ctyw\nZr96tTGlZzDoaG9343CYUJSPbuhlNBro7vasjWZCw/bdZjNiNhtxuy1YLEYiESfxeJn2dhcOhxmD\n4T3xpigaOp2OdFpkZiZLMGhn//4O9u/vIJ8rY1XjtIwOsZJW+enrk8Rmlgl1t/KZV49QXFwkNDJy\nT20OhRwcOdJLKiXwzjvLJJPC2miRpsHMTPaexYhcr1NJpVAVBZvP94GlDgD8g4Mo9TrZ69dRFQV3\nezstO3Z84DZNMfIQyGaz/PM//zMzN8cnfEzsdjvPPPMMP/nJT/jVX/3V+7bfxwmn03yLoY9er/vY\nJlk3uF1tFr1ex41pdbfbwpEjPWSzIjpVoXjtHOWJKG4ZKjmJSFc7gwefxupoDPM7wmEK0Si2PUfR\ndRykbtbhDli4/Ff/SmIpTSWVInp5HFmBz/xPLzF8cAfF5RWMlq0YLFYCXiPlyUuIK1GMlq2MT+So\nKXr279iPzyYj5nKU8mZAT1ubG0VRUVWVlhYnb7+9RDYrYreb6O310dvrxWQyUMlkUCWpkZZ8j+mc\niUSZEycW1nqShUKNQqHGCy/0P3JVRX0+K6JYJ5msUCrVcTrNdHa6cTjMxGIlzpxZoapaEIUqlZKI\nhIneLT2MX14hEHYTiugwGHQ88UQ7VqsJMNH/wgu4u7spzM9j8gXJiGaU8hK5dJl6qYTDacEf9hIM\n2PAFXaibt+Boa8NlUTlxReb82WV2DfYRX0wiywpGvUbIrad/ay8OTxV9OUlVtHJtPM3ERAa73cjw\ncIihoQA6nY7c3Bwrp09TK5XQGwwERjZj9EZo2+ekLlQQ4nHMbi/29k4Umw9RlLDZTDgiEVzt7RRX\n5/RVd4TT5zIYQjZM1trqeazywgv9j0RBtdtRqUiYzQZKpTqqCuVyjYWFAt3dHpLJ8pp4gIZp4dxc\nnnS60kjdD9oZGPDfcQpkZCSEomgkEmWKxRqRiIMnn+ykUKjS1uYklxPJZquN1OwWJx0d7wkXRdEo\nlWokk2XK5TrBoB1RlDl7NsrBg51s39FG9koCoW8f16bGsA/tYNvWrSxenuTidJU9vUa87xu1uluM\nRv2aCdvN3Gtgb61YZPHUKcrRKKqiYHG76XzySTxdXXc+ttlM686dBIeHUWX5Q8ULNMXIQ+F73/se\nn/3sZwnc55S5G3Ejn1YxEg47GB4OMD7eSFm8MV/b1nZ/enTd3V6mp7Nr5lLQ8N8QRZlTp5Zwuxsp\nsKGQg8LiIkKpSkwOEIsW8XjduFQ9tVwaq6MDAL3BwHItwL/8f1dZnEliMJvo6Q0ycuhpUtFsQxT4\n/chGO/Vigf1PDzA+7iWfzNHa6mTXsJ0LP5vG7vGRTZUoZfJs2txGxCFhrWY4dXyKtq4g3bu30N7u\nwuezsrRYYOxyHB0q755P0t3tQSjXMShVTOlZ8gsLjdosHg/t+/bddW0WgGi0dEsBunS6QjpdeeTE\nSDBox2w2Eo2WkCQVUZTYvDmIw2FiaipDva5gD4dQFRkhkSCXzHHghW1YAiGiiSrlskRnp5taTaFQ\nqOLxWNEZDBQWFqgViyREOxPji7SFLQwMR5i8JFIpV+nsaWf/C7u4Op7h6oU4Dm+FruFO7CaF1oAR\nW9cAL/yPQZSV61TTCbq2DbL05gmSk1O079mFPHCAN84LFMsKFoPKpYtRvvTyVgZ6nETPnaNWbGRS\nqLLM/NnL+Hv3MDYp4XQ6sHf1kUoLWH1+roylsM/m6Oz0Eg7b6Xr6aXKzs4iZDEk1iD5gX+eNk8tV\nSaWER1aMZLPV1RiuTs6fjyEINUZHQ/j9Nt59N0qpJLFrV2Oa4OLFOPF4mbGxFPPzeUwmPQcPdvHZ\nzw7c0nFRFJVkUqBcrmM06tm2LUJrqwubzcj4eJpaTSEQsOP1Wjl4sBOr1UgiIXDlSgK73UShUKOn\nx8ulSwmi0RKRiJNCocr0dJa+Pi8eLUe8bOJf/+UyE2MxqvkcLe1+jn7uMMvj81S7g3g/YnC/12ul\nq8vD+Ph7JnZGo/6eXZQzU1MUFxfXnldSKRbefJOhz3/+Q0XGvXRommLkIfA3f/M3fOMb37jv+33x\nxRf5wz/8QxRF+dhBsY8jBoOePXva6ez0UCrVsdmMtLQ475tbZ0eHm2ee6ebatRSVikR7uwuTycDP\nfz631vsIh7McOdJLVaxxcUJg6moUh8tKdCnPypIdf18Png4orqwQnZrnx38/TjZapKWtnbJqZSUu\nYDLpaBvqIeVwYrBY0WQZfVUg5IH6cBtL1KnmcsxeL9L/i79IMZ4kO1egrSvI8IEdJFcybNvRxtCW\nViqFMh29NuYKdc6eWUZMxHh7IsfI5hC9AQM9gTq6+fMspzTK4+fxd3dicbmopNOsnDmD7bOfpVYq\nIZXLGCyWD6xme7MR3M1sgC3IxyaXqxKJOPjc5zYhitKac2YiIaxVQTUYjXg6O1er1mqEu1sYCdga\noyZVBUGoc+rUEi0tTp57rhcllyQ/NweqSsHqJr6SJRhoYe8OH1t3tCOWKozu6kZCz/i1JGavn9jE\nBKlYjv4dmwi2+qkqRozt3Wi1Mv5whOs//THzp86g0+lpe+ZZ/u3v38LbP4C5lKeUziHMG7jYZqQz\nuJl6qYTBYsHg8pEp60jlsoRKaZ7c183khRnysTSj/X4GW2VSNYl//dcZnE4zW7aE2bYtwuatW9Hp\ndFSuJjHP1G/5zB7F83gDg0FHJiPi8VgZGQnR0+OlWKyRy4no9XquX88wNNQIME2lKszN5deybioV\nOH16hdZWF4cP96zb78REei1OLJsVee21GQ4c6MTpNLFpkx+Hw4LTaaalxUGhUOP11+cwmw3s2tW6\n6pUj4/fbOHlyEU2Dyck04bADnQ68dtCqIhd+fhGDpCEkkzhCQTJFmVhOw+OzEd66lbo9uCZ47wWd\nTseuXa3Y7Sbm5vKYzQaGhgL09nrvaT/F5eW1x+VEguLyMkarFXswiH9gAH9//z3t7040xcgDZnJy\nkoWFBZ5//vn7vu+Ojg4ikQgXLlxgz549933/jwOKKGAV4hjrVeyeEGbz/XUN7e310d3tRZYVisUq\nr78+t1aG3eez4nboySVzyJKZclli584WjGoVq9uN4vCRLhuIFAosvvUWBdlOOpqlmCpRFUSCIyPU\njA6qso6Ax002VcZotdDWHcKmE4nGdSzXK+QFEAsqizNJPv/KbgZGKnh2Ay4/7xyb5OqFGWamswxt\nbuPpA63Mn3oH346ncLW4UVu6MLk9GHQqAyGFqX//MW39HdTFJLpMGp1cJzgygtFioZrPk56cpByN\nUoxGETMZAoODdB48iPM28QptbS6uXk2ts5H3+22PVNYONGzy1Xwcr76MoNkQVG11yB5EUaatzY3H\nY6FQaGQ/GcxmWludhMMOkvEiWi6Okk7jdjhwhgKsxPPMTyxjSV9HzOXIz8/j2B1EEkWujSXp39pF\nR1cQg0FP60ArP31tGld7O5VUisimfoKdrYS6I9jMGg63A4Mqkjc78LV5WBAaGTMGA8hKo6demJkm\nmxVQtIZomrk0jfhMBHswSKZm4+y7UeKxMm0DbdgtYXxmie29eqbLAuWpZcamFPRdIyiKnXS6Qi4n\ncv58jFDITjjceJ8Oh2ldKqrbbSEUerTO482EQg4iEQdLS0UmJzMIQiNmyeu1kUwKmEx6FKWRvWKx\nGBqVmreEMBj05HJVMpkK8Xh5XfprrSYzOZlBllXqdZmlpQKRiANBkFZHQCps2+Za8+I5fXplzU3X\n5TJTKNS4eDHGc8/1EQzaSaUaU4J9fT6G+xz0hDSKC2lChjz9ezrZtecwb/xshkTJSKmisuPQPtI1\nO5d/urAqJIJs2xa57XTxnbDbTQy0aIS1KqgqLqsVHfcWU2jxehGSSWrFYsMUsF7H7HQiCQLL77yD\nxePBEQze0z5vR1OMPGD+9m//lldffRWjcWM++ueff57XXnvtUylGxFyO+ePHqawWijNYLLTu2kVk\n69bbvl5RGrn3VqvxrlPdCoUq2YyAVJepiAr1ukJrqxO/pUp1+hz5t6dZ7G4jvPcAYb+R6Jl3WJzP\nEF/O0DbUy+5X/gP2movs9evo3X7aW+1USgJiRaSeyxKvWDh0eBs2xUNNgZ4eL32ddpRSHsEYInp1\niuz0NFavh9z8POd+IrOj34CldwvLBZWrl1dQJBmX08TiTIxLDhjtCqAZVC6fnmHh+gpFzU2k1cVo\nT4hKMo1t+yZCw3sQsoMYjDr0LhM6uYqqKNRKJaLnzpGZnAQgeeUKtVKJLa+8gvGmYXxNVQn5zRw+\n3M3Fi3EEQcLns7FjR+QjZzJsBKVYjIU33yR2fYmJyTTBvm5adu5nIdmoFxMM2vD7bRw+3MPERJps\nVlyrVmwyQvryOcb++RgaOqRqFU/AjW/7HpYvJvBVl2jdtQtJFKnOjzM4PMz1hSrLeRP6kJ2dO1vx\n+gwEQk7Eheu0hJ0MDoVQotPUx84T6OtCM/Xyw3+5zPLFa3RtG2J097OEKwK565Po8nG6BrqYuXid\neg3MFj0Wm4m+Xi9LV6Zx9gzy4//rGLGlDMN7hzl9OsrrxxJ0h1S8RoHNW1pJRWNImkpxbIJw/5Nk\n8o0Kr6Iok8tVCYedhEIODh7s4uLFRuVmj8fK9u0teL2PpiW8KErIssrBg11MTmZQFI16XV4rXGcy\n6env962KCAO1msyJEwtcv57DYjGwY0eE7dsjWCwGTKb3bvT1ukKt1hDWlYpMd7eHmZkcFy7Eb5r6\n1XA6TYRCjnWB8pKkYrMZqdVUMpkKzzzTTTxewuez4XYY8No16skVMmMX8LmMLL3+I3RmM7905BDX\n8l56Rrvxhdxcv96w7a/VFC5ciOHzWentvXsxkZuZYfHkSZR6QyStiCItO3bgam/HEQrdsdzBzQSH\nhylHo5RWVlDqdYxWK5Ft25BEEUkQEDOZphh5HPnRj370gWZuH5cXXniB//yf/zN/8Ad/sGHHeFTJ\nLyysqzqq1Gokx8bwdHXd8qOLx0tcvJhYDeI0smVLhIGBD07/nZ3NcuzHV1icSTK/UKCrx0/XYAQx\nI5JdOUfx2nmqmpWzJ6fYuZIDp59cWSORljDanaRX0kjJZeK2MInL1zAZNEaGD1JIGliJCtTEKi1u\nC3algsFk4tAvbMOlr+ILOBCVdk7+wyXiF6+Qm5uje+coQ09tp7XTg1xdwJhPIIjwC5/fhhhdQENH\nuqgxO5Nl6NlnuHwpQSEWR85nae/3ozPoGZ9IM/zUVnTeMOcmKyy+ew1NltjyzE6e3NuBzaggiyIG\ns5nI9u0UFhao5vMkrlyh5/BhvN3dQMPMKHX1KrVSCXsoxOF9m9HZPdjtpvvmgnk/UGWZpVOnWHzz\nTeR6nYDFTvTCJWxeL22Du+jp8a65VEYiznWOleVkksU3x0i8c4pybAWdxU6tVKK4tERwoBd/0IdS\nlqkWCnQceAajw4EsK2x7to2yYqPNXUedfZep69cJ2oLUe9oolGWm//01Zk+cpLPTw/JbJzEE23n2\ni6/wg0SC9Pwy1+QQu57YT3FuGnF5liOvPE9FqJG7ME8uL7K5J0JmOcHFjIFNLaOkFRfDT7ayGKuz\nFBOxWIyIigNdTSNfNdIy2EHs2jRyFcxGaG11odPp0Ot1WCyNkZa6IGDOzrHZnkP1WAn1ePG1P3qZ\nNPW6zNhYiunpLIqi0t7uZufOFrZuDXPtWoqpqQwWi4GWFg+zszmuXk3R0uJkbCyF223FYjGgKCpT\nU1n27+9gdHS9/0Zj+sVJqZTF5TKTyVSYnMzgcpk5fXqFZFIgl2uYK/b1+Rkc9KOqGjodlEp1PB4r\nQ0ONa8qNVPJ4bAGllOc3/odRXBRx+H0osSSugIvpt88jxKM88ZWv0Le/k3/793lEUcZsNmA2G1AU\njVisdEcxUhcEyrEYcq22ZuOfunZtTYjccOUVkknadu9GU1W6Dx1aK6x5J1wtLfQ9/zz2UAhHJIIj\nEkFTFKr5POh06O9Tx7opRh4g8Xic2dlZnnzyyQ07xqFDh3j55Zcpl8s471Ck65NGKRajuLJC9vp1\n7MEgcrVKvdRIr5WrVaRKZZ0YKZfrnDy5SDZbXXv+1luLOBwmWltvf9EVhDqnjl9n6eo00Swsz8SZ\nHZvnl351N4GwjktvXqU7YiO6UKBSrhEdv87IM3spCxJ1TYfLYsbrMiEszRNTZOyhCOmzp3CrGgeH\nR7B+7gC6YAf5eJoz//hjJKOdzb/wHKPbuunudpB76xTt7U6mjhXoGOzA7PUzPlujUBOxSBae2zFE\nMJbl3f9+AipFlJqItyXEZ754CDWf5PrPjhG7Mou7NYLPLqPXFTGavAzt282bx+cpZisER0aQymVS\nBY2cMYzTnGHs7/6O1LVrOFtb6Tp4kPTkJCarde0CJ6RSLJw4gSQ0PA2quRzVfJ7+55/HYLCsVTLW\n6fV31QvbSGqlEsmxMaq5RjUKh1miv9WBT02z86k23L7b/14USWLl9GlUWUbNxenu9hGNC1TEMk6v\nh5DPiNOicu7kGUwOJ7JQxGg2M/KFl0hNreDv6WT69AQunUBuOUpeTpFVnGzeN8KJvz2H1WYGoxmr\n20ZqcRklvUKkM8zy5CKiKNFx+CihiKuRUhn2cPRQBwN9HqS6zPLkIud/tsS+X3mJsbE0uaoFyeon\nX1wmlyoSDNqpl1Ti596BXJRfemUnttEeZEeQ+YqCbbV0fFubi0jEiaZpRN99l8zU1Nr7r8xNoj9y\n5CMHUm4U169nOXcuuhbLMjGRRlFUDh/uYc+edvr7/UiSwptvLlIsNr6v1arC+HiKTZuChEIOstkK\nZrMBp9N8S2dEp9OxY0cLoihTq0lEo8W14o0rK0WMRj2LiwXGx9N4vTYKhSpXr6aQJIXNm0NYrQZe\neWWUZLLCP/7jtYZ3TLLIkX1BWsx5Fl87Rub6FHK1iqe7h71feJb05CTmSgIxn2diIk2pVMdsNtDe\n3jg/d4p/q+bzLJw4QTmRaBTitFga1aJX/WGUep3i8jJKvY5UaWQRidksubk5pEqlkYIry3h7e/H1\n9WF4n+WEIxSifd8+lHqdciy2lgZuDwZvm2L+UWiKkQfIT37yE44ePbphUzQADoeDvXv3cvz4cV58\n8cUNO86jQm5urtHTrVYpLC5SyWToOXIEg9mMUq9jstluqZyayVTI5dZ7j9RqCtFo6Y5ipFiskV5J\no6kqhVyVUjqHKktMXpyn/akQsiSj2SMsRJOYLWZqqglZVmhtdSHVZRxGCa1SxOrxMHt5huee24yJ\nOmImg7maoSNo4NS5S1z4+UVURWXboe206BIk3xzDvuDG7HKxY9RB8Lc+TyJd5crlGP8/e28aJMd5\nn3n+srLu+z66+r4PoNFA4wYIHiABUqYl0RqNLVvhmRiNx+uYWdmxduynCX/b/TJryzEbnghvxEg7\nY8tjWaKsGzxEiSdA3EADfd9d931fWZlZ+6HBFimSNrUiAFmjJwIRlVVdeN/o7Mx83v/7f56nv0tP\nbHUTxexkJapgjq6TiuaxWnV4PF7qlTrKzhK6oJljJwcYG3GiCFps3Q4EjQZrwIertwdJjaDRaNBo\ntfinD6C3mElECvSOaOg6epRqKkU1kSC9sIBndBSTx4PpnpdNORbbIyLvoJ7JUM9mUWWZxPXrVBIJ\nBEHA0ddH8OBB9OaH03vw0wF7iiSBJKFHwmT88IbvRqFAI5/H2tVFYHoaT6tF/34tsbuLGAxawgMB\nYnOL6MwW5GKGwnaEdktG7ogMnj+PrlUid+0tmnYTotFMOlEkEdti5lA3FpOIXgOl7S0Uqx5NR4vZ\nakBoJmnk8wSGenaP3W5soRDL1xexe+2YNG1iy6vUKypjZ08zH9NgLBSYmQmQydSw2g043FZ0Qhuz\nARS/G1/ATmXpDj2PPo6trw/DnRgdp4Fwv4+BARcmk456Nvs+22613Sa/tvYLRUY6nQ5ra/n3NdXG\n4xWKxSYulwmXy0QiUdlzPr73TcxmPalUlakpP+F7FZ+BARcazfuN3VwuE+fODVIoNHG5zGQyDS5f\njuJ2mzCZdLjdRiRpdwtFoxE4dCh4L4NIYXjYzcCAm2KxRbMpUy63OHWyh19/xMj68/+D7MI8HUVF\nkVpU4zG8oyMIg72oikouXSEctjM3l0KSFDY3iwQCVnp7P5jQFzY2qCaTe8dKq0VmcRH34CDVRAJF\nklDvxYXYurroqCoanY56Ok1mfn7vs3I0ilSt0jU7+74xjHY74aNHyS4uUstksAaDeMfGPpJs96Pg\nV2TkAeLChQs888wz932cc+fO8dJLL/3SkxFVlknfvbsXwGT2+WgUi2Tm5wkcOEC7ViM4M/OBF4sg\nCPfCtHZL1O22+j7L5nfDYNBiMukoi1rCXVZ0SpB6XcLhNKGa7BhcLqrlOuVCjU6ng8Xnxdw3ij1R\nwV2q0UhlCY8NoA32M2B1snlrHqvdjdPjxWi10CoVaJWqqIrKsfOz9LpkYj/8Dk6fk9W4iVKpwdSz\n5/GGfWi8erYiNSLJGv2z+9FZbJRzJfTNOgcOBCiXGthsBhzdZmxqiUosT/XOKrLeSXi4m2b0FsUa\nWEsO9KMORk4dIhktgkaDqEjklxexe1RW1+bRaLWMf/rT7Lz5JhqtFt/kJN7RUcz39og/zPZZEEUS\nN26QX/tJFmZmfh6dyUTo0KGf46z//4feYiF44ACVWGzPSt3odOKbmnrfSvDd0IgiFp+P0vY2xUiM\n9PomRo+PvqOHENU2WpOJSq5AR2oQn19GltqYXU6Udpvc0hJSV4hKKoNSM1IvlfGPHyQWAb3bR3i8\nj1vf+A56ox6zaEdr99AqlejfN0BXv4+xo5NITRnFFiJ6e4HVS8vILRnrwCDDTz7Ba5dyXLteQavP\nMTrmIxj0Ew7b0YkdLFqZVjYFrSqDhyaZGNSSn38b4/omFruRPmuF3uMH7qmFdqF2QHZ2ozq1GI0C\nOrmGlN8llr9o+CDyYDTuuqKmUlUMBi06nYhWq9kzQ5QkhZmZIFtbhb3vBINW+vo+XGGSTNaIREq4\nXAaGhpwsLmbIZGq43UYeeaQPnU7Dvn1+mk0ZVVXJZHYN2DY2Cjgc+r1elFDIxumJDko5T+zKFZr5\nHOq9HoxWqYSo1+ObmEAw29jIdRgddeP1molESjgcRo4c6fpQA8Hau7an34HcaGDy+bD39lKORjHY\n7Zjcbjyjo9QzGQxOJ+VY7D3uuh1VJb+6imd09APvmxafD4vPtxse+jG78v6KjDwgyLLMyy+/zJe+\n9KX7PtZTTz3F5z//+fs+zsOGfK/k+A50JhPe8XE0ooh/chKT2/2BJUSfz0wgYEaWO/ea1BT8fgs9\nPfb3/ew7cLtN7D8ySGJxjfrGJqWdAsH+IMePdxOviEw8cYryxgoHT2uotgR6T53k5TmVoaEjPPO0\niXZLoq2zEllPIZVLXP/RImaLnpnjQ+w/OEt1e4OJPh39+84R9OiZ+9rXaRYKRBa3MFoMGEL93L24\nQODUo1y+tE3H4KYm6vi7b+9w/GQvwwNWTHYrumYet9jGYgbR6sDkcrDz9hUEg5Hw5ChXv/oNDGYj\nfb6+QyYAACAASURBVMdmKUdiRK9dY/r0OYyCgiK3qVc7OEaD9LvbpF7YfQhp9XqGnn4aAeh79FFM\nDgfNUol6NovWYOA9XvfsPuA1Oh3Ve0nU70ZhcxP/9E/cPx8ktAbDboy5IFC555JqCQTwjIx84L53\nu9Ggo6qY3G4QRaK3F4nmVNqyC02yCfEKE7/+a+gNuyvvbCJPuSxRr9QQMlWGzz5GcXMD78gQRrcH\ntd1E7ojIzQZDR6bYyBvpfeRRqqkMrXwat99J36OPITfr9IY9JLMpbn1tGddAH0aazN2IEQi5qZTq\n1DI5hESS8GAXeqsFm03H2JifdLrCk08NICU2GH+ul50VqMRUXDboFNJs7aTpPt4hcfUKXY88Traq\nodgu7yllri9UuHalSCRWRe3AkSNhwu4upoYGHui5+qcgCAIjI549d1FBgEDAgtNp5BvfWKDRkPF6\ndxuoJya8bG+X9hYgo6NuHn20j0KhiSCA32+mUmnuKXDebX62vp7n4sUIjYaMTqfhyJEuvF4L6fSu\nBHxnp4her+X5519hdjbEqVM9+HwmenvtFAotXntth3DYxrFj3ahqB0N7mXqlhMFiIbe4uFvBbbWw\ndXejs1pxDQxg6h3khX9Ikk6n8fvN9Pc7MRq1/2gkgsXvp7S9/Z73tCYTZo8Hx5NP0shm6T52jFIk\nsrvN0ulgcjqRm03k+nsTyFVF+SfJ5/2IB/gVGXlAuHz5Mr29vXQ9gOCpgwcPks1m2dnZofcfccn7\n5w6dyYTZ66VVKu29p9XrcQ4M4Bkb+9ALxmzWc+BAkOefX2RpKYteLzIy4mZoyI3P9+F9NhMDJuQn\n+tjwCwinwevU09ye48CRE/ztl2/RPT3KxG+cAr2Zt6+nqNUk2gE93/rmPDajjGtgiCsv3ERs1zn2\n+c/iCvkJ9gfRhUOMH9hHZmGBuinA0htXScZKNMtVKpUWwz3dTD5xnFZLJnXrNrMT3awkRez1Dsl0\nlJs3k3z6U4/SSbVYvjiH2CwRHvBhsFsx2AKUJR2KxUs5mcaoA1FQKefKmDw+1pdTBI5UWF7JcvNm\nErMBDkzYqSVzDEwcpHTnKrV0mu4TJ3BOHiBVUKmubaJmIpCLIIgi1mCQVrmMIAgYHA5CBw9isFgQ\nPsDrRqN9uDkn7qEhNDodZr8fAbCHw9h7et7zM7IkkZmfJ7+2RkdV8Y6N0a7VqHQsLC1vU28qCFo9\n6/ktpO4Ep0+HsfQO4RxIsz23gkbU4hsewOjvor2VoJivE3j8GQxSiVxdS0vvomdohHobkpEOY598\nFp0q0WnWSM3fRRS1INVZuXSXhmjF3RMgsbyM0+2j2VJpKSKKLBMWKgQPhrlxI47fbaTVkhBQuH0j\nSo/XQGlxiY4k0T0SZv0H38XZN0DP4YOYzVrykRaRtMrqwg4ajUBXl5Vw2MHKSp68bGYrkkCuN6jV\nJM6eG8OS0xAYeTjn7MMwPLzbMLq8nMVq1SNJCt///iqxWGXPDt5qNSBJMrFYBVlWGRpyUSw2mZ9P\n43Sa6HQ6fO1r84yPe7Ba9fh8Fk6d6sFs1pPL1VhezmEyadHpNMTjFTKZGuPjXvx+C9HoLsF56aV1\ntraKhEJWtrZKtNsyi4u5e1s5Iisr2V01kl2Hq+mlsV2i59RJ6tkMpe1tBEFP6NAhnH19tKpV9IKe\nVkulUGhQKDTY2ipy4kQ3er1IMlnF6zW/T97rGhigHIns9Yy8o3jRW62UIhFqqRQ6sxnv+PjuNXCP\niOeWl0lcv/6e/8vs9WKwf/jC7H7hV2TkAeFBbdEAaDQannzySV5++WW+8IUvPJAxHwYEQSAwPU2r\nUqGRzdLpdDB7PAQOHPgnH3iJRBWLRc/0dACzWYfDYWBzs0A4bMNmM3zgd+rJONZ6gpkxM7nNLeI3\ntsgn8vgnJgiMDZHMq+y8kWJywsfGcgp/yM7c9R3a2TzeYyMsLmaRDC5mTk2zURVQ1gWCtRpHTHUG\nDodxWf2s/+g2O9tF/CMDrFy8TnBsiCOfOc/aq2+gMZrpWILc+t4qY6cPMXp2ApvNwOxsiOW7UeZ+\ndJvRvjF8bg0dhxNHf4hqpUZF42ThepIjx/vIlxWCXQZ8QQf5qoTXaWR9o8hbL99FUiBbq5NZkXji\nsT7Kbj/BQ4cQtVqsEzO8PVcmEY+RujuPQZA5frIPh5Sg0WrhHR/HOz6O3mLZqzK4hoZI3ry5VzUR\nRBHv2Biah2jIJ2g0uPr7cf0j/Q+55WXi167tzTu7tITSbtNWBQxmIxIq5YqExtRhK1LlQLVDJp7H\nPHWcJyanqKZSVDJF8ukS+//lc0TWM+RXV+ib3U+5reHWSpO3v3aRM48N8ZufPUV+J0KrVia7fh2j\n1YPNbaVZbxKeGEDr9uMf6UduNFlezXP9eop6vU0gYObA2aN897uLIEvUS2a+//dXGT80RL9X5dvX\nb/PkuXFWX/4hLreFR3/3U5gtOor5GoooY+m30RZNaLUdTCYdzabC/HwaWVbJFdpY/UGUtkRTFDF7\nPWxtl9m3/8FF0H8UaLUaJid9jI662dwsMj+f2QuHU5QOBoOGq1djSJLK4cMhisUmCwsZhofd3LqV\nolqVGB/30m4rrK0VOHYsTDpdY3OziMNh4Mc/3uS113aIxcq43SZGRz1cvhzF6TSi14vEYhW++c0l\nMpk6Q0Mu/H4LX//6PP39Tl56aR2NRuD3fu8QbreZFy+scmKggeCD3PwyrqCX4aefRpHaGOw2/NMH\nSC8u7hKEbI0jR7rw+y2kUlVsNj2BgJVbtxKUyxLd3XaOH+9+j2TeeC/ks5pMorRau1XhQID8xgal\nrS1a5TJqu43e4SAwPY1Go6GRy2ENBvFPT5NfXaWjqlh8PrpmZx/KNXq/yciXgFngBu9P8BWAm8D/\nDfzX+zyPh44LFy48kC2ad3Du3DleeOGFX2oyArt7mEPnztHI5aDTweT1fqQGyXS6RrvRwCBICJKR\ny28laHc01GoSp0/3vi8UrFBokC4o3Lgew2rREwi4GDhoxj3QIDDSz2+O6ail05RrCorNjsE0zvJi\nhnxJi9HuoFmu4PS6MdvNrEQkfAEH2Wiepbsxlm5u0CgfYHzCT6WtQ28yYeua5KmD41jcLtbefItK\nIkk2lqZaVTj4uecorq9RFNzcvLyJx6xw6dVFPGaRjViL5ZUa8dg6n/mdo0weG6MkGSgW6ggGI86Q\nF5kOhVQOndzAMf0IGwkJWQWl2UJnMVNMlqm3oVpVGB3vxTM6SjTbIZGo0m7sqpOaLYmbNxM8dsyF\nkk1Q2tkhODPznu2OwP79aI1GCmtrCKKIZ2zsY3NrvF9QFYX82hqqLFPPZmnkcgiiSNeRIzitIvmd\nKIKoxePxED44QbYqUpINjJ2cYenSbXDZCOyfwlxuY+ntJxvfYv57L2JxWnnl9hr23n5CI0f5xCcC\neANWNrbKJCMya4tFThx/FLdDxmWGF//yqwQnR+lUK9SiUUzhPuT5FFJbRWrJdHQm8oIXRS5iMeuZ\nX8zR0RkR6JDdiVPMVFlazmHzeImsrFPLZmlm2jz/n/6ayf3djJ89xcDsEcy6EunVVSxuJzpLiNw7\noXLCrtnbOw+8TucX14FVqxWpViU6nQ56vbjXH2Kx6FlbS/HYY/3EYmWuXUsgigJer5nRUTeXLsVY\nWMhw5kwfb78d5cKFNXK5OisrWQ4eDFGvtxFFyGTqxGJlbDYD+/YFSKer9PY6abUUNBoBvV7DzEwA\nSVLo7XVgNGrp63OytJRhc7Ow25sm1TArNbbfXkYpF9AadJgcTgS9Dq3JTL0mIWo0mLwBYmWFdFrC\n77cQClmYn8+wupojHLYjSQobGwU8HhMHD743BVdvsbzn+mpWKsQuX2bx+eepJhJY/H76zpxBbjRQ\nZZlmPo/WtOvyOvzssyj1+u4Wzb1/D5qQ3E8ycgiwAGeA/wIcBq696/NfB9LAL+if+MeHByHp/Wk8\n9dRT/Mmf/Mn/FNbwerP5Z1ZouO0iN5eW6R4K8O2/e5tyoYp/dJBtvwGNRsP587vhblKtRilf5dU3\n41hNJkweL8mtOOVKi+FhF5pwN9Vkivnnv8nG3AaKIDJ19iShI+dRR1yst+t09w0R9mgYHfNQlIy8\n8VacpcUUb7++TsBvohBp8A+o/MHvH8LZyaN49IQCFjquEHd/9CbLl7Zo1mp0h/1Q3CK/ME87NIkd\nCX0tTTFtZfXWJtp9IXSVBHpPgHKxTq7YYiclM/TUk5x57hRKvcyhJ4+wenWBdjFHuMuKb3iQ1763\njdlhp1TPoLRlrMEgNr+f/gk34ekgRrebiy+s796g9HpEgwGlJVEu1JAIoAGMDgei/r2rZq3BQGDf\nPvxTU8D92Wf+eaEquzlDGlGkVGpSKbfo2H2019f3VotStUq72WT6c58nJ1mIrMXwjY5Qdw4xHvIQ\ncig0MkWchjYrP/oxnQ7s+41PoxFFrrx4Ha2/F7vPTKqaIhPPcfQEvL5cJZ8tQlhH2G+i57F+vn9h\ng+OnBxjoNoIjQC26zfbVOboOH0YzMEPX7CznRpvEsgpGf4iryy1sLhPdYQcbOzVkrQmdFtqSDDoD\n1aqEx2TA6HTQzKQQdAYOnN6HfyDM3bkEqfILNKxhXGZIvH6ZiUeP4HP58HrN1GoSGo3A1JSPdlul\nt9eBxaKjuLOzG6gnCDh7e7F3dz/kM7gLp9NIpwOjox5u3nxHVSIwObnbL7K2liMardBqKUiSytmz\nA5hMWpxOI/F4BUlS9iS7AwNO4vEqfr+ZwUEXc3MZEokqy8tZnn12lP5+J3a7AVlW71nC6/nEJ0Z4\n6aUNNjcL6HS799upKT8Gg45KRWJ80E4rvYpOr2fik89Qj0VQWg0cvX2Y/X6KLR3BmUlEg4mtqJ5O\np0y53KJSaRGNlnnqqSHq9Z844u7slPbIiKoouw6ppRJasxlrMIhWr6cSjbJ24QKVe3bu1WRy1wBN\nlnEPDgK7Ta655WVEnY7MwgKtchmNVouzv5/w0aM/c1jmz4P7SUaOAS/de/1D4ATvJSOfA/6O3QrJ\nLzVefPFFzp49i+4f6dj/uNHd3U0oFOLatWscO3bsgY37zwUhm0T/gJOdSJ5cIofJYsRratNMxUgY\nNCSjLuydIrVUiqqsJbcSJYKRidkTuAaTNApFArMjaKw2Fr78VyxcXiKXraPVa7n6nVf5xNQ+dnbM\nOGwi7fXbVGMS21sabD3dHJ4Y5vpbRbrD1l2JsE7DyLCL5Zd+hFzMks62aSSjGN1xvF1eNswmkhtx\nhI5Kb5cP2i1cPhtyB3TtCoIs0TMUQFY6mHRazDYTozMDdPd6MNstZBYW+cFbVyjEkhgsJh557hHG\nzj1O7EcvYM+mCDigZfSwLWioF8v0DPoYOzTM/tkuquuLJOfm0BS1pOdTOPsHsIfD5JvrWGwm9Mho\nrFZ8U1MfSjZ+EUmI0m6TW1kht7KC1mwmrwuzGpVptVXkQhWv6sUaCFJLxGk3GhhtNkrLC+x/bBbn\nyCiNhsTJIyEsVNn5/jdZ+YdvYvL5GJk9TrnSInnxVfZ/5jnkQgZVb6RpFdEobbwOLdVylcGwh+03\nrnLp1RjFbJXHnt7Hs48dYznVwGKA47/1aySXNzCPzBAeCbO0UWdxfofh2XEcbgsrawXGRi2YzRrU\ndpvhES/5bB2dwUBHb6DSFBib7qW2XsXXG8Tq87J48RZCucDGlRzWQIDsxiZNn4HVgsSJQwOoqQ2O\nPTnA8HiI9fU8BoMWg0GLw2FgejpAbmWFyMWLezLQ/OoqvadP/0JUu7q6bCQSFZxOA8PDbqLRXVKh\n0Qh85Su3yOUaiKKAKILDYWBrq7DnrDs3l6LdVrl+PcGxY2HqdZl/+IdFenocSJLM44/3sbKSY2Ym\nSLnc5NKlCJVKi099apz/+B/PEImUuXhxd/um0ZAxGnU0Gm0mJrwcOhSkUmnR1WfC5+qlnU3ygz/6\n39CoMo6Aj+4Txxh45tcQHT2UVCMWW5ADBzQ0m/KeY6zL1fseRRDs2vPDrvolcfMmmbt3USQJQRRx\nDQzQc/IkrXIZ+V2Jv4JGs9u4LYrorFYEjYYOYLDbiVy8uFf6UhSF3MoKFp8P3+TkAzuH95OMOIGN\ne69LwNS7PjsHvAoo93kOvxB4kP0i78b58+d58cUXf0VGPgBKJsKBIR16i4/YTC8GUYFiFEKT5NfX\nKG0bmP/6f6VZKmEbnsLStmJwdbO0mMHbE0YX7gWfD7OUIb2dpFJpoSgqOjpYXA7uvnUXa/9pPNUt\nsvFtknID2WMkFc8x/HQ3T5ybIJMsEduxo7aaBA1lMrdWMZl1+F0WajWJ7Ss3OPIbT2G2GPD3+qjm\nyxgdXnpmJtGOjvDKj3fIrO9QDlt47PERUjmJxJ0SzqCbJ8/sxxOw0SqVqKwvk0sVyaXKWIxVNi9e\nocupMH/hR/TOTHJ41s+ArpvUdC9Go5b9+/0MDrqoRTZJ3boFnQ69gQEiNi2FzQ18U1N0H5pmZtJJ\nV5cOi9e7J/V9UMjl6qTTNURRg9+/q6L4WZBdXCR25QodVaXj6+f2/AL+kJOg20nF5GZrLsPsxGE0\n4g0CBw4gN5tkl5cx98iI2SoBk5b2eoObr7wMjSrWQID89jbl7W1GP/VponeWMQgS0/v9FLJVXHYQ\n/CKFYg2r30fy1jo7CxvodSLlcpMrbyzj8toZPXIarU7g7RsxXv3qRZRWG29vkP/1//gdRqa6qKSy\n+FpJeic1hKZsRCMVErESU4MOHNYhSsUaBu8g+w71IFazqGg49anT6ASZwtoaBoOWQrFFrVBm37kz\nxHUG4hsxHK5uTOUMmnKG2dNjzM52Ua9LqOquE6ncarHzLj8KuOdlMT+Ps6/vY3Ph/Hmg1Yqsr6dp\ntVRGRz0cORJiaSmHz2emXm+j02nwenedcYNBK/v2+e+l9Ha4cydNMGjB5zPx3e+uMDHho1RqsrSU\npVBoMjDgpNGQ+d73Vjh2rJtsts6FC6s88siuQKCry4bPZ8Js1nLzZpLubjsDA05cLhM6EZLxKuP7\nwrz1t/+NVrGA0aChtNMAQcDWN0DwTBeb8QbJ+U0eeaSXM2f62N4u0el00Go1LC7uSnf1ehGHw8D4\n+G7ybi2TITM/v2dC2FEU8uvrOPv7MdhsOPv7qWUyKK0WCAImtxv38DCNXI7EjRuosox3fByTx4NU\nLv9Eqt/pUI5Gf2nISAl4pyXXARTf9dkXgN9ltzryofijP/ojnM5d/ff4+DjHjx+n/17j2dbWFsAv\n/HF3dzcvv/wyf/zHf8zW1tYDHf/48eP8xV/8BX/6p3/6C/P7+KeOHxSsPh+pWy/jM3pxqlkK8RyW\nQACpWiUYstBYvkF6bg4EAXMghM5mJeA3Uku2iccr2O0GvF4LupaMPeDD6U6DpgFtCZPdChYnXruA\nKZfFrqnTkCWs3jC2/Ud440qKVFXH3LUdjhzvY9gnYzFKLKbq5HYSIEtYe/oxCwqy0sHb7aOjygQH\nQxx65jD+iTGWc2ZKVZXQUIhmLofbMcHsI2M0np7CZDUS3S7g9ltpZVPo2yUOHAjSGnag1Co0MimK\n8RSh6Sny2xHCU2YOPTOBigadTtyrZMS3tn7SfJqPcuZYkEJLjznsoXswQChkfShVj+3tIm+9FdkL\nJXM6DZw50/e+Pp8PgyxJ5O5twWi0WrQeL11BmUo2h1TMoVVaDPZ7ENxuek4YyN/L9wgfPUp1Y4nQ\nwCCbb13Cc/4pmpkkJqcT0ddNYGg/UnSDVrGAIxRAFrSET5xCuniZSnyTrlCAyadmyJqt6JsFXA4D\nkZ0iAh2sVgPZRJaTow7SZRGbp8Fz/+E55l69yeC+fq5cibG9EkdRVLwOHaPhNjs/fJGrW1rowKB/\nmCeOdZMsuUjsZHHadTht3fT0PYHFbmH79ddxBT2ozQZCScLksODq72F7qYQot9AZdLuJv0bjnoeE\n2fyTbbdmtUE+XaJebGC16vZs09uNBkq7/dDJyPJyjtu3k3Q6Anq9yNZWEVlWcDiM9Pc72d4u0Wwq\n7OwUmZjw8fjj/djtu1s7J070UKlIVCotNjdLjIx4GBlxYzBosVr1iKKGkyd7+MEP1vj856eZnPSS\nyzWwWPTY7Qbm59P88Ifre6qXz352kkZDpt1WeOWVdT73qR4cqoJUqWP1unF3+ZEbdQStDlmS6Mht\nFK2JWKaO3WEklapx82Zyz5jRaBSZnPSh17RRcyl0cg4xryLZ+pBqNZRWC41Oh95iQdBqkQQj8XQT\nT8hD8PAR5FaLWioFgsDA2bNojUaWvv1tVEkCQaB4z4XV0du7504MoP+YzMw+Ku7nneQg8PvA/wL8\nJfAVfrJNcxNIAuF7c/gMsPJT3+/8tGPiP0dcvHiRP/iDP+D27dsPfOxGo4Hf7ycSieyRul9kvOMD\n8HGi0+kQiZSIRMoIAvT2OgmHbbTKZTZfeYXYtWu03YOsRproAt1YqTE9E2brm39DdmkJ39FHSEhO\n0nU9mBwMHJ5CFQ14PLs9KoODTpqrt7nx37/K9lKUcq7MoefOw9QZTA47moVXefPCTcweN5PPnufC\nyxEKhSbegR68Pitqo8rZx3twm1Re/s9fprgToZrJ4R4dx+Cws+/scdbm44wfn6Srx4MiaLi7UKBr\npBunw0g1nUavhWu387zywgINWYvTbeXcJ8Y5sM+FoRzn0le+RlvQU25pqZcq2CwCn/j950iuxzDW\nUgw/9QSDZ8++5/emyDKJa9coR6M0SyU693or9HY7g089hcXj+djO0c9y3iVJ5sKFNVKp97q+Dg66\nOHt24CORI7nVYvk736FZKGAKdXM3aeD5v/wetVqL8Egvx04NYqfE2COHqd96g8LmJp6xMfRWK6nb\ntxF1u83Kgelptm4vk9IEWb0TRTRb8YccPPHJWfQmHatRhdW5TQaGA4yOeTE6HWQLMka7jRvffplr\nL12h0VDQakUmp/wMTPYQeuwc/+n/usL6Wpq+Pje//duTNFsq3/zqNbKxDIJGoHfAy+Mn/XRJ64jd\no1y7leX8E72Qj4LZjmQNMrcDvf0eKok4PX0ehNgCLreZYiRGLlvHEgzRUkWWN2vodFpmQjWUQgpb\nVxfho0cJHz68RzDy+Tq3b6coXH+TyNwyFouO4SE3NrsBz9gY/Y8++rOfdD6+612WVb773WUymff6\nZSjKbk/HykqetbU8kUiZUMjKk08OsrFRQFU77OyUcLmMjI56UNUO7bbCtWsJSqXWvfTpDt3ddp54\nop9Uqk6j0WZgwMncXAqdTiQWK3PjRpJAwMrychaA/fsDHDsWxmAQsVh09NglGvEt+vtsbL74ItFL\nl+h0OrRrNUSzhYkv/HsuZbr49g+22b/fx759fkRR8x6/E5dDx2y4RmHhDnqbDa3BgNnnwzU8THpu\nDkWSqMTj6Fw+sPuIFPXUBAsjPQZ82hKNTGrXU2d0lPWXXiJz9y7teh291YrR7aZVLOKbmtrL9tLb\nbAyePfuxWb2/g3vX5wdepPeTzt4EmsDr915fA/4z8EV2iQrAvwJE3k9EfmnwsLZoAEwmE6dOneKV\nV17hM5/5zEOZw4OGqnbIpCuUsiX0gky9LfL29Qzy7rOU1dU8p0/3MjTkZviZZ3CPjFCORhk7bEZj\nstBMxmiU0kiqiGN0ioWEluVbdzD5fBiCfTjKMv6Ahe99bwVJUhgd9TA4EOT4F7/IxMYqWqMJa08f\nKzGVoE9PJDNApniN/ccnWd+qsjAXwRUKkIzmSW7GGA5pWbxYxBOwM3L6GOuvtqgkUpiNGibOnsDV\n182k00PP9BitRhtZ7jA666NYVogkmhyd6ef6lW2u386RKQk0m03y+SZrS2nM7TwjfhWzx8321ev4\nxye4E5M59dzjyKoWY6eJozuE3mqlnssh6yzEYhWaqRi1jSW0rQpKtYh7eBjYvZGY3G5Q1fviwPhR\n0GjIexWRd6NQaCBJyodmd7wbWoMBz+goxe1tKgY/t9+6hKAR0Or1FHI1Ll/a4tlnBnA49HT6R6mr\netqCASmZxGCzUY7FdvOPajXk7mne/MqPQKvDP+QhmhdYLtgYD3gRbBX2PeahGd/ie99fIb6dxzsQ\nxmVROfvrpwmEXUiqFlGjUIyl6D92kG9e2MRm06E06mwu1dnZ6aUmdcjlalSrLcwGDSs3V+kJGQn1\n6RCqWZ59zMfN//4V2qUcBocDvcvLE1/4N9T1Bux+LxaPm7YhQDkaxdsdQLQ2SUYLdM0ewjxoxW9q\nktyOsxrTIG50kP0F7D0pHOEwAHNzadbXC/SNTBGsN2hlU5TzRdzhYTxjYx/3Kf6ZIQjv70uq1SS0\nWs2ua3K5idNp5ODBEBoNXLoUpdFo02rJlMsSsViFdltBp9vdoiyVWvzgB6sEg1a6uqwcOBDA77dy\n+XIMjUbDzZtJrl6N87nP7UMURcxmHRaLlsOHu6jV2oTDNo4c6WJjo8DSUpaF1BZesYhV48McDGKw\n28mvr2MLhxl45llqjn4uX4iiKB1kuUOlItHf78Jk0tLpQDJZJb6VprvTwOX3U0kkKGxsIGg0jH3q\nU0i1Grf/+q8RdQYSa9tYA0Gmfvt30Ot9LG668J7sxWk1U45GKe3soBFFbD/ld2Xo7cU7Pk7N5UJn\nNuMcGPgnA/Q+btzv2tpPy3m/+FPH/+0+j//Q8YMf/IA///M/f2jjP/3007z44ov/U5ARWVa5di3G\nlR/eJL26jSPgITAYxmYWEexORK1IrdZmfj5DX58TnclEYP9+vOPjtMpltEYjaZOJ9M1lPAcOU8zV\n2HxhCYPTjcHpwRLupS3Da69tYTBoqdXa7GwXkKpV3LYeAtPHkQUdQqfBoeEa2baZa8sSpz7/Scp1\naDdVAoM9lJsiYqXOzp0Vup7Zj8WoIbMZJ50oMv3UMwRmZlD0dpZv7/CJUR+5Rptv/M3V3Sa5oJvZ\nw2GcXicarZb59RqY7IgGA9V6m0ZNItTtIpvI0fJbyVWT+HsDuLufxWjScfL3DpPO1Onk1xHqU109\nXgAAIABJREFUeZoaiXomQzFb5ta2gNdnY/uVl6lVang9Fnq7dNTzeQxWK4WNDfQ2G4XNTXwTE4Q+\noh+BLEmokoTOYvm5CYzZrMNq1VOrtd/zvtttQq//aKqxjqruZhltbbFWKe6uCj1GZK2FVltFo7ax\nOGxsr6Sp5SosvD6Py23GKufRyA0C+/dT2N5GqjeJlQWsg6NYzFoEm4dcGb765bc5fiSAy2tjdLIL\n1erg9vU7GPRaOguLFDQCwcE+SkIXa3c3kVtNDp85SBkHHTnHsZN9jI17uPbjBaqVBhq9CbPLQS2X\nR68TMJqcmGxm+g8G6Qha1r/zLZRqgY7ezNZWCWUtj9b7AvrDTzPcZ8ZezxI4cYL03ByVRAJPwIFr\ncpqlnTbZWJJtDTQzZbZXYth9Lq5eTWAJ93IwHKbZlEmna6hqh1hBZOL0SVo7y7SqNToaLfGrV+k5\nceKB9wy9G6KoYWTETTZb37OCf0dtsrSURZIUxsd9PP/8Ar29Tubmkng8FpLJCkajllZL4fbtNO22\nQixW5sSJbtxuI4uLWc6fH0arFbhyJYYoalhbyxONlrFYdKiqSjpd49atJDqdhoMHQxw6FGRgwIUk\nKSiKSq3Swu9x4NdJvPxXf8fjv/kko5/8JKgqtnCYtn+Yb/z9/G5WlFXH/v0BVFXlwoVVOp0OOp3I\nwYNBzBYdpWKRZmmH7I3Le+aBmcVFcsvL2AIBYjfnqGYLVPNlAnfnaNq7mXziUbZf+D7Zt1+lWSgQ\nmp3FPz1NKRJBZzbvuiWLIq7BQXwTE9jCYZRm81emZ79seEfSe/LkyYc2h/Pnz/OlL33poa1kHyQS\niQo3394gs7ZNR1FQOgK5gkxxo0w0GccdcDI9HUQUBSRJ2XMxFHU6EAS233yTzbvrdKplnOEgrskT\nhBImJNGEIxwkmlboMuloNGQymRobq1ncdg31LhOBoJVMoogs6Dh8rA9du04lkadUVXhzI8e+U/tY\nXNigu9fNeqRBJZdGoxHweMzYzSpOg4l0osjKRgVFNiLnoxyb9VMXbNy4scnd9SZtVWDxdhSHzcCQ\noOX67QwmvUAotLsSq+QrpPNt7HY9Vn0bpZghvnKJ5J15+k8cpeEN4Ng/SzYv0XvgEJbJIwjtOu1W\niZIssn51DsO4i8TCCoJGQGm4CHUNYHY6ya2vY3K7qcTjJG/cIHb1Ku1mE9/4+IeWcjudDvm1NdLz\n88iNBmafj+CBAz/XikunE5mZCb6vZ2Rqyv+R/74riQTZxUUMdjt+uxfrTpP25iZ2iwat0YKIjNag\n585ygUaxTizZZHUxybFZHw6jgKWri+DMDLVsFt3NBti8+A+M8tqLd9lZjRMI2qhkdMy9PsfE/n+J\ntp5hf7dK1/5hFq8uc/DJI9y6lcRo0FAuVClkKkT//hq/9W8f5/qVHb77/Q18fhuTUwMEez309Dh5\n62IEd28PBqWKWs0zM+Vi+43X8Y8Oo2nXMVpNxFJtcrk6FrOWVi6DUCnw+g93OH/CgcmRpO/MGaR6\nnUi0xsXLcUrZGvW6wt1rGzz++AAnHlHJbkaxGGrUy7sPdp1OsyvpLTaxWHREr90gMreE1arHvt+P\nrBNJzs0x8PjjD/X+MjTkplRqcvdummpV4vDhEIGAjXS6htGoRavVMDLiwes1Uyo1SadrlEotjEYt\n8XiF6Wk/Kytl/H4LX/nKTc6c6ScYtJJIVHA4DJhMWjY3iyiKit9vxek08K1vLXP8eDeBgIVcbrcy\nZ7MZmJ/PcPFihGSyypFpF0GXnvpqBm+Xn4v/798TDNkw2a0IWi2hf/HvkOs1enp76O50sFp1zM2l\n6KgqkZ0CuXyLYrHJH37xCI6mho2XbtJqSMiVNBafD1GrpbS9jcXvp6O0EXV6VFWlls4g2row1tPc\neull1NQWnrExohcvsvXaa4x/+tPUczl07Tbho0fxjI2RvHWLzOIiiiRhcDgIHz78QKXbvyIj9xEP\nQ9L70xgfH6fT6bC8vMz4+PhDm8eDQD7foFGpod7bk/GHHLz4WgStCO22QqXcIra8zb/6N4eR0jFU\nd5B0tk4xXyfy+o/JrG5gbJcp7WwhaPXM/msfjpCPdFlAwkA6ncbtNmCx6Mjl6gQCZoyaNvFUg1S2\nxVquSLNYxixlCepLNEQ34wMmfrRSo1WtUq4qGC0tfvtz+1iZs+EwjTLgVnj1by/gCbl56tOn0IcH\nKMcSOMzduJwG1isaam0t9XINjcaATuywtV3C6bFQyZURTJBS2yhSi4E+G225jMmk4eCxPtylRXZi\n2zRKZdILi4z9i/0Uc1V6x3q4tpjnzZfuYLKamT0Spn/QTLtcRKroABW5IVFNtVGFIaRGg44sU9re\nJnb5MlKthiCKBKanKSUzTPza0+gt7w/wqsRiRN56a6/TX6pUaNdqDJ0796H+BaVIhPzaGkqrhaOv\nD9fg4G7+zbvQ1+fEatWTydTRaISfWU3TKBT25uT3C4R7XLsVHkGDRhSYOTqIxmblu8+/jlbs8Mjx\nWaRMlMCREdz6BrmFRQorKwSOn+KR3xgi+j/mKRQaxDdTNKpNxsaHiG1GqaSzZKJZrKU8a6+/jbae\n4+nf/S1SxQ7Xry7hdJqQW220HRVR1PL2W5sEgzayxTKpeIFaQ+HUI/24nHpmZ0OIYgiLXKQ7OIGh\nuE3yzh1sXheWYIhcardhUyNq0OlFHEMj3N1qsbXdYGxmEF2mhM68g9xqkVgpIKoCZqcNndnEMbcD\nc2WV/NoS0eUYgnYNjSqxf7YXezDA5KSPXK6BUZTJptKIWg1dYRvae34a9UyGdqPx0NKYFUXlzp0U\niUSFoSEX2WyDdLqOLO9WFkZGvGxuFkgkKsRiFQ4f7uLSpQhdXVbq9TYazW7Ozfx8mu5uO9evx7l9\nO8nIiIf19QI9PTb27w8Qj1dYWcnymc9MYjLpyGRqvPVWhCeeGERROgwPu9jZKRGPV0ilqnjtAi9/\n/Q1+67cPYdBpqdUqiHo9cqNBPpPGPTSIQ6wzOBZkLqLS3W2jWm0hyk2CbpFmUSDksRMKG6jFoyRu\nvolYyhGY3k9lawNRr0fQarGGQqCqaLUiRpNIo7kbe5CTZNRGHaFeQms0YnQ6Wfne99CIIumhIWzh\nMKJej8ntpp7NEr9+fa83rJ5OE3n7bUaeeeYDr+37gV+RkfuIh9kv8g4EQdiT+P6ykxGLRY/WYNjN\nRFFVBFFLo9agq8dNPlullc9icJqRqlUSy3kW0wkEq5P0VoLIq/MEnBpK0QiNYhWrVc/aK68xfvwx\nHBUdqwmJ4WE3hw+HkRotXnmlTaNpoFqq0N/toNloQ7OOySjQrlQQvdCJrpC9FePR432o1Pj3XzxB\nta6STRY4/egQ5nqcF//q69TrEr1WIy6XgcjFl8gvLqJOTqIfH6LRMlPMllHbEm21g91pRe2oqB0B\nk1ah3VKwGYz4HQJHjo5yqiThsGlx6VskXklh9nrps9vxjI7iHR1FsjpZWC7y49eimEQL8Y0cgtFM\nGz2+bh+C1YnD56EQT9I/PYwn6EJrMGDv7SW/vo5Uq0GnQ1uGQrpEYTGO6h+ie2rkfYmipWh076H/\nDt5xNdV9wIqruLPD1o9/vCtDZJeYtMpluj9Amu7xmPeaiD8KJEkmkahSKDTRyib0viBSNoWSi3N0\nX4jcoBPRG8bjtaDTKNy+m6PQ0NJp1fnOd5Z4/NcOgFbP2uIm6UQdg0FLrnWdiSctfPIT/SytV6g8\nNozVpKEQTZKJ5TAa9YS7HWRXIxx/dIx8oU4iXiWZk3C5zbTrDcp1lVBPCIfdgCq1MOvhsbOD5HMN\nPD4r+XSRjtTk5MkeSoUq7o5Ke/MO2bmroCisvvIGj/yH36NebVKqrGDo99I7ewDT2EFu/PUaFoue\nufkMFsyUFi5QqbR4+3KM3hPHubkmcOV6hj/61/0k3ryDy6IwPOanUJbpFFJkF+bRGvQMDLgwGETi\n0SLaHi+iV8Tt/gmZ1JnNaH/K8E5VFJqlElqD4b4/zN7pzRAEAadToFxuEY9XGBvzMjjoJJGoMD+f\nZmurRK0mUa1KPPPMMG63iVpNIpWqkUrV+MIXDnHpUpTeXicHDgRwOAzU6236+1202yqnTvWyvJzj\nzp00n/3sFI8/PoAkKUQiZQwGDWNjbpaWslSrEmaTiNusUDTqiWyk2DcSxusxUL57jdyd2+itZkIn\nHmHt9Yuc/o3nGJ/ZdWHtaI2U40kWl3N4XXosRhASy6gJP3IuhdVpJ7+6wtBjj5JZWqKRyzH+qU8R\nu3IFayiEUZIIzBzC7PeiF/S4urvwjw1RicepxOOIul3VVLvRYPv11zG5XHs+Me8QkXfQKhZp5PO/\nIiP/3PFOSu+f/dmfPeypcP78eb785S/zh3/4hw97KvcVXV1WBifC1LIZaskUAh2CfiO9ISMBjx6p\nZkDfaaHvNCg1DcRWtlFcKn67nsGwHiWXwu41UdJpEKRdD4BadIeDJw8THrWjSBK1rUWKW5scdenw\njQ0huIf51nfWScZLeM0aAg5wCUW2XrmM3mLmiacP8+YPFxg9th+/voKYyeBxiaCxIXv6+ez/+b8j\nSlUsDgs6uUr86hWMbg8aRaJY6ZDJ5ugd7UKj07NyN4o94ObQ8UEiOwWsJgGd0YKxmaW+tIYukGTY\na0cqKhQqMvbDZ6l1Hcbp0OP36NE7XWyULMwvJlicTzE44IBGlfhKBL+pzpnzsyzejdP3yClGlQpi\nKU7y8kUsXd1kN7aw+7z4DswQv34T59Q+Uptx0Okolppsv7XD+fPD6DQqCML7Hk7vwYeU83PLy3tE\nBIBOh8L6Ot6xMYw/hxpMlhUuX46xspJDUTqochuXxsikP4BSKUA5S18oRO/pAa5djfE3/88bPPbM\nfhw+F4WclkfPzXD2mSlSi8uo7h7kdIPYRha5XkUqfAvzzEn2DfUTsozw0jfeIh9L0TcS5PBj+3AJ\nRcShPqy2FJ7x02SNbix+Aa3ZwsJyHlu5SrVQ5uDMKF6/hZuXt/n/2HuvIEnu+87zU5nlvfdd1d7O\n9HT3eI+ZwWBIDEASAkiKkERK1F5QsXtxoVhdSKd7udi4h4t72FjFRUgrKk6hoKSTyAUIkoIjMBgM\n3Hg/3dPed1eX9zbL3kMBQw5AOAEgIAW/T52Z1dm/yMyu/zd/5vttJTexKvXk4hLrhTgXVyK4B/p5\n7BvbKdyZo1GrYOnuolnIkEvlmb0+h2XPMR5+9FGKlSbZpo7nfnoXu7zMg6fGqNWbXH/hTUa6VMRS\nNRxeKzPTMVbniuwZC6Ipx1i/cYeIvIXTqqLrwC68vQHW33qL+NQUnn37ycqd5Ap1jCMTZBbnqcry\nqKgiV6txjIzcN9pbiMUIX7tGOZVCUCiwDwzgGBlpl0M/ZSwtpXjuuXmuX287RAeDZvbt8/Ozn80S\nibRLLBsbWZxOHbVak3q93edx7VqIJ54YJpEo4vUaCARM5PMSx451YTKp6O42k89X2djIcfVqiFAo\nz+nTffzZnx0km60QjxfZu9fHpUubzMwkGBy0oVCIDAy0FWCdDg1CMURXj40jj+zCIyapppr0bP86\nPPkN8tE4uXwNe0836eUVbr0xjVyhoO/AGFaXGX20hpTP4jDI0asrrP/8X6gmY6gHuzA4HUiFQrvR\nNBjENTGBweul68QJ6pJEOR5HY7ejd7vZOH8emSAgiiL+AwdIzs9jDgapv53tVJvN7ReGXzHVJBPF\nX6sk/G/IyGeEK1eu4Pf78b3dkf554sSJE3z3u9+lUqmgVn88cah/S9BqlTxwrJuuLjOR9ThOR9tC\nO56sEJ+eRiiWMDpNuKxyovESrWYTi1lJK7VGqwUb00u4LCJmtxNrIIh/716qhTw6i4mFC2lKi5Nk\n19fx+kyo5DVayS3kCgGTUUmz2sKmqXLrqZ/hOh5AioTJSRU0Bi0nH+yiJmV47f95mUxWwh7woPV1\nsl53Ick0DAw5OLxPyeI/P4V/ZJhyS8nqhcvIdVmuntlg7OgOdn1rF+nkMFaXCYfXyrPP3KGsrKKR\nVUhcuc7o4R6m/+779Dx4gt5HvsrajShv/eANBFFEUGvo3jOGwVgmHEnQ1W3l9RdvEwvD8HA33UEd\ngz16OjptOAxNysUqRp2LtZcXaLm7uDOfQVYz0EgX6dxzAHP/COHFDSKTsww/fJJiU0toNcb8pTLV\nmUsoDQZco6OYAoH3EAydw4H2fcaCa++yMgdo1us0arVf8emPjkikyOJiikaj/YUryBWkqgZa7g60\nqmUEpRJrXx/lmsCZV9dYW01z/uVbHDoURGvSotRqeOrHcyxfuoZCaHH4gV5G3BZCFy+i0QSwIjF9\nY5lgwMiJRyfIlVo4vWYszSTJG5cI37lLvZCjEA7T7D/A+dkWOouZng4t+YKSriNBhjoEwuEMOnmV\nhevT9O2fwBt0UoiESSxvsP/kOH//g1s8+Ug3Ji1UCwXkAmiSWRQWG5EbVykYtbjGJojOLtBlKmMf\ntrF67lUmjo/TcttoKRqUQ4u4x8cplk3s0uro6dOyObdGYDBAbGEJk9WKxapj4cwr6LwdpJMFsvk3\naDm6mCm6WVrO4DBq2D7gZPc2IyaPE4PnFx4pdUkidOkShUjk3r6t69dR6vX3prI+LUhSndu32+qp\noiij0WixtpbB4dBw8mQ31WoDh0NLLlchl5NwOLQsLaVRKkVEUeSVV1a4eTPMd787zrlzq9y5E0Gp\nlLN7t5dise11k0yWcDp1GI1qpqZiyOUCp0/3cfNmBK/XyeOPD7N9uwuNRo5KJdwT4YvGJfTVBmPb\nzOiyy7z5N/8dWTmPXK2k59Qp/AcOokJDtZBl+uWLRCMlDGYdokJOj0+F3eIjF9PQ022iOpfmztVl\ntPImmeVFKknTPbl2Ua1m7qc/pVGpUIzHsXR1oXO5KKdSbJw/T3ZtjUajgVyrJR8Oc+jP/5z5554j\nHwqhNBjw79uHlMth6emhks1SK/5iZN7g9aL9lEd7Pwi/ISOfEb4IJZp3YLFY2LZtG2+++SYnT578\nvMP5TKHXKxnZ5mZkmxuAQH+FubkkM0IFVSNPb6cBkpuYjV4aoopej8jP/vkCnSNB9v6n71Eu1qjU\nRQzbekg1ZagsSmoqE157nlpWRr0p485Lz5NP5hjb38PgyQd44rHjTE1FKM5NQq2KpcODeTjA6muv\nkZydJXBgPzMvvkIhV8bstJNKVbj42ktMPH6a6ekyucVpTNqj2HUa1l5/nc6Hv4LW7qDVqmI1ynnp\n78/wwFd30+GQM387StqqYt9AD3JDF+nFRTS2fnLXXoOaRCESJpWusBwq07V3Ao1exdpmmZ/+eIrj\nxztZvr3IztOHGBoL0KhIPHKyg0pZYmE6QibfZGQsgF0j0cimEY027txNs7KSoV6rs65o4NhrQBIa\n1MoVhk49gHX3YW5MpYhPTxOVqSlcu4BcraYQidDz0EMEjx4lNjlJrVRC53TiGh1F/j6E2NzZ2RZn\n+iWorVbUFssneiZKpdp9UtoA1WKRrcUCHpLUKxWKkQjG4QkaLRk6p4vFpSi1co0vP3mIv/rrG+j1\nKqxuOyu35zl7ZoE/+M44ldoF3MODFMJR5OhJxUElq6OtFBCTaWx9bqKxKCavi0IUZDoTd9+8zeHH\nv8bthSKh9QQDI27Gxz2snnkRi9/NE09OsDRsxqCTYXcpWbNYCM+bqUkS+wbl1NfnWHrzDYRaGYvL\ngn14G5GshLpZIbWWIn73Lgq9gc1ra2RtJgLHTrK0XiRbNZMK53AP70Rj0OAoRaknE6gsLlxeCwrv\nIeTUsXfZiC6uIeiMLK/mkOrQKTexMXMH1YiedLpCJgPxHAzsGsTvub8ZuZxKUUom79vXajTIrK9/\nbDJSrdaJRosUizWMRhUulw5RFO67r+VyDYNBhdutJxIp0Gi0WF5O43BocTp1FApVfD4j6+sb2O06\nJKmB3a7FZtMwPR2jq8vClStbvPLKMgaDkkymPdb76KP97Njh4rHHhjh/fo1CoYrLpWPHDifLy2l6\ne61oNHJEUaBUqpJMljEalRSLVR5/fIibN8P0d/Wyu09g5u//BrnYQtBp0NlsrJ0/j7Grh6ajk9xG\nmKXNEtW6jH3Hh4nPz6OS1WiGtrDrNcy/sYjFaWb45BHmnvkxCpXinj9UfHYWSzBIan4ec1cXgiCw\n+vrrOIaGsA8NEZ+bQ2ux0CwUEORyCpEItXKZ/kcead+jZpNSMolMJsPgdmPq6CAxN4eUzWLq6MDa\n14f4axSz+w0Z+Yzwwgsv/Fpdej8MDz/8MC+88MK/ezLybphMavbs8TE2bCI6OUlqaQkAp0PDngc7\nEQpJAn4dKqOBt66maSj1hNbjVM5EmDjYj9OlI3ktRSlbYFBT5/JPz2LWi6AXiS2uQ+U5dvaN0tdv\np6Zwos30oZK3uPkP/0CzKkGzSXJuHqQS7m4flWqL+HKESqGElM/R1e2hmUsQXo2y65vfZfXuGpOR\nIoYdD+F3KDndm8b84g22VuPUVP0M7OuhUcwyt1ZGV1zCo8xSmbuOXK1C1OlJLy3jrjbwebQoymnW\nbyyibSn48uEA5VIBk6xIbivKt35/N4Z6hs3FEBfOTCEr54jOGQmvxXjom0cY3NXL1vIW0egmjUYT\nmSBQE5WsJOXITX3s/497CedEplbyZDa3MGtaiLl2qrxeqZBdXyc5P8/AV7+Kye+nUat9qOmWrb8f\nKZcjs7pKq9lEY7W23/4+4ReiwaBELheQpDqiKNBqtShFIxh73DQy7Z6WerlMYnqK4YF+1tfzyFVK\nctUCoY0sUqkC+QRanRL/QJBSOoNgMHHsj56knMkhS0bRFVaxKJzYJ/bSrJnRyBuk8y2StnGK5Sb+\nQwfQChI9pipzcwleeTWCnDrVcgUjOSxNCVkpT/LqW0SvLaLs97I5l8c30Mu3vr2TltVDbvIak88+\nR2wtTEtQYDGGySUy6Ed2EQ2lMNrNyGkSD8UxqJo4gx7C6RaL86v4h3tYvbXEus3GwfE62dkp8mUN\n02+F0CjBNbGHwS89iMVpJHRjkmQ0TTaTQOd0kMlIlIo1tG/zgFarTQSi0QLDw/eTEZkgIBOEd9+C\nj12ikaQ6Fy9usrycpl5vYrNp6Ooy09nZlliXyWTodL8Y835nf7lco7fXSkeHiUSihF6vxG7XYrX2\nMzMTx+PRc/hwgNnZOEajmsOHA7zxxtrbfjUZVCo5lUqdy5dDnDrVy1//9VWWljJ4vXoEQc/8fJru\nbjNra2laLXjppbYAnyjKcLv1HDwYYGkphUIuo6NDT2VzkvDVa+3RXZeT2N272AYHaNRquIJ+slsx\nIutxjjy6h9i1KxSaKpZv36CQSGNzmtDZLOTCUbxH99Kxfz8yUWTo8cepVyrUJYlms4kpGKSUSDDz\n058iyGRUUimUBgMak4lWo0EpkbiXqeo6dgxLTw+phQWy6+uojEYCBw9i8HoR5PL7sly/bvyGjHwG\nCIVCrKysfK4jve/G6dOn+eY3v/mFIki/Tij1evx797bfzlotNFYrQUEkuqykub+PZMvC4uoquZKM\nxGYUjaeDy5e3ePRrQ5x/fRZvwMZoAOrFIvm6iMWkRKMWaMrk1NJR3rixxviQEXeni61r15FKEmqt\nCr3NgsqgRyYDn1vDxlYRuayFUq2iY7iH8nyCUEoiUG/yzA9vMj2dwN3lZfXVKaxWLcdPdHPsyYew\n2HSsbFZ44V+mWJ0L4w+Y2b/dQ2UrRrVUJLcZgkaDjkOHsTj0iLdnmTl/g0pNxvrcJkbbFF/5k+/w\no5deRuV0MzJwkMr8GteWQygbRVApqKRShC5f4ZrdgPexYTzD/ejeWiefl1BpVfTtG2M1q6bXqUPj\n9pKPRtGqBAYGHThpkrp04971rlcqNOt1aLUQ5PKPJBeu0GgIHDqEY2iIZqOB2mJ5T+9JLlehWm1i\nNqvuU6h8P7RaLSSpjkolcudOFK1WQYdPR/+gHYOsQLX5i4yJUmgw0GNgY8vG5maBRl2DL2jDbpKT\njjRIlksgKFHpnViDHSjlUJ++RosaiUwOa8BH9NJ5NCYdkTLciWiZPfsmlUwWUank+LcfQWv3Eb6d\nxePWYxGLrF24jDzpYv+ElfWfPEPP8QdQ2Z28dnYRpdDAHZI4/h+eQNYokc4lSa6sojaYSacr5OUy\n/AoRg0GFbt9OmqIKpBJXzv4zap0SW38fC5MpjFYjMkGGf3w7pUyBZL6Fvb+P7GISi8eIvBAh6NcQ\nWYlSzFVwjI6x/Ozr6N1ulFotMpkMT08HoVybZMhk4HBosVjUv7jGuRyCKKK12zH6fKSXl+9dV1Gl\nwtLV9aH36pcRCuVZXEzRbLZwOLTEYkUuXw7R02NhYMDOxIQHo1HF+Hh7zDufr2KzabDZbAwP27l5\nM0Kt1iQcLlCrNXG7dRw6FMBq1ZJOVwgGzVy8eJdGoz2+nEiU8PuNZLMVmk2R/n4bqVSZbFbCatWQ\nzVaZmopSLjcwGpVks1U2N3M0Gi3UahGTQUkyWeaHP5zi8EE/87MxymsLnD7mod6UkZpfoJyIY+vu\nolmt4h4eQshHCIz0curxPbjcBmYuzeI88ADhagOZIJIMJ7AF/aQ3NiknEwQfeIBKJgOtFgvPP09y\nfh5bfz+mQIDAwYPYBwcpxePIBIHs+jrlVIpCJIKoVGIKBqmV2qXpUiyGY2gIx9BQu6dnaOhzl/OH\n35CRzwQvvPACp06dQv4FuMHvYGxsjEKhwMLCAn19fZ93OJ8LZILwHo0Ld7cfoTJB5PwGRreTRrpC\nZ68LmVIglanSajSpSzVEAWRWL56BLorRKJ5OJwajhnxVoC7VGRh0E85VmNg1gbQ+j95iRKnToHe7\nyayt4x4bI7URpjNoxuIwUVLaKEqQjJcIrcd5NLifH/y3F8iWIF+RoaLC8o11+oNaJJcBqd5idT6O\nTK6g1FSyGpb40ldH6d9uotBtQxAE9D4vcrWGZjJCIGihJe5l7voiHUMKDKoGucVZHH5Twd0uAAAg\nAElEQVQn3f1OxHyU+MoGlUKZJgKNWpNEKIHRakAqFMnG06jdXTz8J99lYyFEMl1jLSnQQqC/30ZH\nhwmv10ilUiUzfYfIzeX7ekMUOh22/v73jOV+6D2SyX6lgFat1uDOnSjz80nq9SZWq4Zdu7y4XPoP\nPN/aWpZz51bR6ZQcPRqkUqnT2WnCKxNJ3Ln5zh9FY7Wi0OlQNJI8dsxKngB6owaDGhYPdPDi0xGy\n4RQGq4EjX9qFLB9jMtTi1OEJ1pEQzTZEtRKp1kQ0u0g3BHKZCBqbnapUpVZtsLUWo7e7n0IhiVpo\nUCtksbtNFFJZ9K5BGlYr67dmKTmH2AgVCPgNrK8kWNso0Ntvxze2jfzGAQrhMBpNEZVGjtQQaZo9\n6HxBrv7op/QM+Bh68BCV2Bb2gAdxbo1kosDc1RmCnRbsXQHqciXxWIxWvUa2AB2+TpyDg5i7e2hV\nytRVBvY++RgLF25RrVTpGAqg6d7Gyqvhtxd8Ld3dFjo6TFSyWcI3blAIh5EJApaeHty7dqE2m8ms\nraHU6bAPDn5svYp0ukyz2UKrbWucvPXWOvV6E4VCIJOp0Gg0OX68i0DAjMGgIpEoIYoCDoeWt95a\nZ3Y2weJiitu3o3g8erq7Lfh8Rg4fDvD666sUiwJKpYjRqGTXLi8Oh46trTyzs3HUajnj4y7S6RI9\nPVbOnl2hWm1gNKrI5yVsNi379/u4fHmLDq+WpdkMTalOaDWD0WpAr1fSG1BhaRaQyhV2fud3yIdC\nhG9cR9Ro6Pvyl1EYjJRKVWqZJO7de9HWMlh7e7C4rZjddhbeWkZvMSJTqejaPYZv924QBKRslo0L\nF4jeuYPe5UImisSnp6lXq2jsdjRWK0q9nuVXXmHbN79JfHaW1Nwcnp07sfX1sXXtGmqTiVKiLV1v\n8Pk+UXP4O6jXGzQarY+kgPx++OKslv+O8Pzzz/P1r3/98w7jPshksnulmn9PUzX5SITcxgatZhOj\nz4fB5/tQ8aVyOk0mXaZYlaEx6HD0DdKdlXPl5R+TiqRYmw8jarSMnjyA26HCZ6phFIpEC1aCRx8g\nszCD1iAjHU2i6+knr/Vx9UIUQQZWl43eR79GvVggsbxKQ8wQr6gJ+LZhODBGORFn3G8mlZZYj9QY\n2O5n25CFerFAPieB2kg+W6JzVzfRRJWF5QyrW2X2G80UsgVGRz0MDzuoV2tIpTKrdQ3+3lEUpQRS\nNsfW5Ss0FFpunb2KZ/sIo/v6Sa6FUAp1zC4bx785gFnIs/zCszT0DtwuLWvTKzSbLZotAUFvxj/Q\nwY2rW4QTK1gHh9Hr7bj6lOi9dRwOLX19NmQyGXK5DL1eTdPno5LJ0Go2SS8tIddq6Tp2DOfIyAfe\nh4+DlZUMN29GaDbbTaihUJ5GI8SpUz0f+AW4uJiiVmuSybRNx0RRxvJymu6DXrRbG5SSSbQ2G8mV\ndRLRDPEcqJRyth3aTlMnJ16pstOSx/6NPsKpBjazCp8hS2Q+itHsYe75nzP92jVsgwPYHXoknZtQ\nREEiXSNTqKOwOAj29FKv1qnpzTjcFnQmHfpmHVGuQKaxoDHpqdaa1DVmCrEERWUJgRZKjZKenaMU\n0PPimQ3U9QyuwXFKVQGhukUqW8JzZDuC2c3F63HGHz6BmNzA87WTWJ0mKpUmtmCDjdA8Uh2yxSYd\ndhtma4tr59bw2JQopBoOj49SLMztN6fwuLTEEyXMHR30nXqQfKFOR5+HjoAZo8tOKJTDbFbT12fD\natWweu4SqcXFe9c7cusWcrUa765duMfH22Wbf4UYmsmkRiYDrVbOjRtxyuU6mUwFi0XN5maOUqnG\n0JAdr9eIxaJ5230XNjez3LoVob/fRjRaoKfHQq3WIBAw0tNjRaEQOHIkyPR0nG99axvLy2lef30V\naJfz/vN/3o/RqOLGjTB9fVZyOQlJqiMIMnw+PUqlmY2NLDIZGPUKXptcp1hqELAb0WgK7Byz47Qq\n6NBrSV2+zJ2nN7EJaVR6Hf2nH8YQ6ERhdzN9dZ5KKsHY8Z2QkcgXakg6J8n1LXr3bENZzVAvFdHI\narSktk6R1m7HNjREJZPBvWPHvd6r6OoqdUnCu2sXsTt3sA8Oonc6aTYayJVK7ENDaB0ORLUajdWK\nXKNBodOhczrxTEx8ojJovd5kfj7J3FyCer1JIGBi2zYnOt0HTNO9D35DRj5lVCoVXn31Vf72b//2\n8w7lPXj44Yf5/ve//++GjGTW1lh74w3q5TLQtoX3HziAvb//V36+1WoRn5lheSHOzGwSvaqF3qCm\nc8iPCYnuLiOVZAKrTYfKoGffLifa7ApuTYmNm+usXRf5yncfZGK0F8oZVHKQRC3X724hFWsUYnFe\njiT4ym9tx7tvP6JOT0Z0kK3pmNxSsHBpGbFZ46GTemxWI8O7TGiXU5z50RvUWgKO7g4W5xOI5hbh\nWJGRsQCHdttYX8+ysZ5heGcvN69vcOapC1jsRh77vT34lTnO/39n2H1ijKmfvEC9JeLePoyo1nL7\nlcsc+QM/a/Ob+P1GrD4H6y8+S1Eqou8IsjqfZvDgGCrdbhbuhui02Oka7aFeKXP97DWc20cBGel0\nhd5eG16vHqtVc5+bK4DR57uXCWnUaqjNZvSfsq/F2lrmHhF5B8lkiVSqjMfz/u6ijUbzXdtt/w+Z\n1kTPqVMU43EK0TiT5+8SidcoptIU43E270zzlf/1O5RTEeaf+QlSuYLe5GR5q8Bmo8KB//l75Go1\nlE4fGVuFWxcjHHtyFxdfm2X30W10jxiJJiS8pjpWTZ1quY65187W7ApWs4Lbr97GatUx2G/l+MlB\npPgW9Qf2o9KquXQpRPewH4PdgrxzG0//ZI5OnxavXU9Mbse808yODi35CpyfqpB/I84Dx3twdJox\niB1k1lZ5/m+eJTI5hXfPfob6Lfi6XVBv0NnrwmuucfBID6tXbyO32dG6fWxev0n/oA+z04q/v0V0\nPYpJIdG1tx+NRsHqaoZyucbAgA2Px4BOp6ScTpMPh++/4G+PYzu3bftEY6E+n4HOTjP5vIQgCGSz\nFRwOLY1Gi0ajRaHQ9pTxeu+XLS+Xa8jlAteuhchmJfbu9eN26yiX66TTFV5/fRW9XoXXq6daFZmZ\nSdwrtSiVclZXM/T0WOjrs+LxGDh6NIhKJadaraNUylGr5bz22ioDA3ZsRpG+ASfnnruJQqwzMuxh\nfNTFxnKEk/vN+JXbMZp2k5yd4c4//YhCLMXR//J/sLKUJLMVwePWUy0UCZ8/j2TwUdfakZOFVpXg\nwQMIcjnVQoF6Lo2UyaAym9G73VQyGSI3b4JM1v5fc7sxer3INRpazSaJhQU6jxyh2WiQXl6mFIuh\nd7nQWq10HDiAd/duaDZRGY2/sr/n42BpKcXFixv3JtXS6QqSVOfw4eDHJqG/ISOfMl577TVGR0ex\nfYqupp8WTp48ye///u9TKBTQ6z84vf1FQa3WYHMzRyxWRK9X0tFhwmhU0Wo278mMv4NGtUp8agpz\nMPgrywPFWIzY8gZz8yU0lTiL565CvUZ5rBu7U89Yjx6fOUgTOQabCSE/R6vRYM+Agt0H95JMFnGb\nWnj6gyz8yy0i0SiZmpZkqMT2fTt5YblFPprln384zR/+wSH6xg/xxlubzM7kmXkphFanYf9BP9fu\n5MimI5SkJgcPBcHsYmszw85dHehsVuqiloEuPT26FKtnX2FrK4vD76RqOYg/YGH/g9u5OxXDIG/w\n1jNv4nKYyJVhYyVGU64mJS0S3DaAw6pElosxPOqje+8E2UIT+0AfsmaDrZUIky+9hZwqu07tYWTb\nODpvgPBGghefvoYgiPdk3hUKkVxOYs+e9x9T15jNaD5iureSy0Grhdpk+sjPgUr13oWt7Wz63i/T\nXE5ifj5JOJxHpRIpFCT0+l88Dw6HDqtVg1wuotTpiK6GiSQk9HolapmOeiZBLpYgsRHBpFOh9XXg\ndVrJbm4hNmsYHF5cfhsutYYzzyVYno8yvr+XSrmG1WUh11Azd3WLEw/1c+epn3Jlch5/pwOxEEPX\nNYAoiZz88jD9E73oChss/svTLNxaxtrhY/zrj9J/4gjqnhRXbyXRFGRIlTrFUpXkZp5rm2XqTZg4\nNEginGYjXMFo1hDayJFKltjWKSd84w7NFvj6/EiRFZwuNwdPHUFlNDI9GSHaVKM22QgcOojUVCAq\n1cilPJtnf07GbkPv8eCfmMCgqmM0qjh3bpVQKH/v+gUCJo4eDb5vs6rsU9Cm0GgUHDoUIBotYjSq\nyeUqlMsNisUqMhn09FgIhXKMjrruy4xFo0XW1jJ0dVkQRZF0uszu3V4WFpI89dQ02Wxb1v7hh/sQ\nhDrJZJlwuO1RY7GoSSSKVCoN1tYyHDzYQW+vlbm5BF1dbi5dCrGwkMRub/vaTF2N8fCXuvjOH+6h\nUpTo3uYnk6tzaLuKlWf+iejtW2i1CjqPHObwn/4JGzcnSWwmePEvfoDJZkY2aEIsJXF2d1BXGYln\nm1i39SMkVlk4+zrxuXmoSujUoHe7qUsSM08/jW1wkOTcHNVCgWIsRvDIEby7d1OIRLD29qI0GKhL\nEvlQCFMwiG1gAN++fZj8fkydnZ+qUu7CQvIeEXkHGxu5t7NYH9yw/m78hox8ynjuued45JFHPu8w\nfiUMBgN79uzh7NmzfPWrX/28w/lQNJstrl3bYno6fu+BdzqTPPBAF3qN7L6Z+HdQr1SoVyq/koxI\n2SylqgydssnmpUlsBoFavkIptEqmqMM3Pkp58iJ1ScLz0ENEVpZo1BrounrJJGNk51ZoukTW3loF\nuQK9P8jm3S0UcqiGNzhwaJgXnrlJbAumrq/i7PZy/lqK828so9YoMTlViCotc/NpbCY1KlWNlZlN\ndh3spVisI7TqfOfUBEqVAml9gTd/dBOdRqSSzbORL+B269H1jjG8I0Cj0SC0EiG8nqBeM6E0ZxH1\nZoRaFVldYnExidNqQ9M5SGwlzsbMCtuO7yNNgaZUZev1u1i8LnRCmalnnsV/YD+1phyjrxtXfxKZ\nSovul3o3tNpPLlhVLZWI3LpFdm0NaGdUPBMTH+l3u7utrK5m37Z1b6Ojw/geFdZqtc6FC+usr+eA\ntoGe06mnWq2jUslxOnXs3Om5r/lV63DQuW8XszMxBIea0WMPU4hFabn8aCygts6yOb9G774xXC1Y\nuz3Dxp0ZnNtHiIbSHP/KBKnpO0RzFWSCjkvnZjA6LITuziOIMnY/MIJGKbCykaMan0YRHGL22gpW\nh474+gIVuRV9t4KteJHc/3iN/b/3GDqThq8dt2HW1/A84mD6bgxBa0ZbVpMvQ6PeYCNU4Ob1TXbs\n6ebO3RgKuUiP3cb01Tk6er0kUkaWZ1bJX9ugWBUJHjuGYHEQDodpLW9Bo47eYkIm5UlO3qJZr6NQ\nq6gtL4NMhnt0lHC4wNZW/r5rvLmZIxot0NlpwRQMEp+aundMJorYBgY+FZ8ajUZBZ6f57RFd6e2m\nVDUul55CocbCQgqvN0p/v7UtrV9vEokUGBlxcvbsMpLUIJ+volK1DTKLxSqHDwcoFtvu181m656d\ngEajQKdT0NtrJRg0cenSBlevblGptDMiiUSZ69e3UKvljIw4mZ6Og6hgYSZMPRUlthEnX9zP17+1\ng7WnnyW/OEthZZGSKJBd3+DI//7nOIYHSURS7PnSXlQKGS6Hmo0bd7ANDGBx2/CKKXKLBerxDbRG\nPbagD61WhUDb2NHg8TA5O0vk1i36Tp8G2tlelcHQHqePRmlIEs7RUQxeL0avF2Qy7MPDuHfs+Ezk\n+t+drWzH1I7r4+KzJiP/DdgJ3OB+B98/A74MaID/ArzwGcfxa0Gz2eQnP/kJr7zyyucdyvvinb6R\nfwtkJJEosrCQuo95x2Il1tczjI660TmdVNLp+37nnQaud6NardMS5SiEJnqNiFaQiM/MIpXKlB1W\naiYR745tOLdvRyaTsfrGGzSrVaR8nsU7S/gOHaVvrIfQhQukV1aolCRUdgfW0d3cevYmqxsljv3h\nOHK5iEEjQ2vWI6tVeeRUgGSqzOpmmZaoIJaoEOi0o2oUSM4tEd+oQk3iS799iCvnl3n6H69i8Vix\npafRahUUskUSqQoarRIqBWy6OnK7jhd/nCCfkLPv8AjpzTCLs2H6Dx8geusmGquFu7NpBnbv59yZ\nOSrpFIf2e8nM3UXr7UBudbJHZyY3fRukPBlRzcbsGuYyOFyd9O4cYnV+i2qhgFKvR6dT0N39ybQ+\nAOJ37963cCVmZ5F9xHq132/kgQeCzM0lKRSqdHaa6e+3IQj3L3rxeImtrcK97VSqjE6nYOdOL8Gg\nGaNRhSDIqJXbCrsKtZpcU8eZC0lW7qyA3sYPn5rlt797EEupxk/+7hk8TiW1ksTkT54jeHA//sNH\nufA/XmaPRk/Xth4Kc9eIzK0weHqAFFYyMytYHAY2FjZZmUoALXweLUaThrWVDA886Mfqc2E0ybl0\nN8Lc3S1kMhlDQ3aKsQiJxWUsKoHom2fIatVEKnrEYouuEydo1OrUSmW2dwoMBwPs3+tF57Dzyqtr\n7Byzki21CPT5aDZbFHIVLB4bVp+D4K4dXJ3KsXenl1xCh6C1UI5HCHrMFDdWMHZ0tBVTRRGZINCs\n1xEUCorp2nvEOZvNFuVymxR6xsdRaDSkl5cRFQps/f335MU/LWi1CoaH20aI0WiRyckojUaL48e7\nmJqKkc1WOH68Pa1jNmu4dStCsVhFqZQTCuWQyWTEYgX27vVx+3aUGzfCjI25efDBbjQaOWq1SH+/\nncuXNykWq6ysZNBoFBgMSkKhPLdvR9m928ujj/bz7LPzbGxk0euVSGUFex8Y5NqLWboHPew90ou6\nUaC0sUytWESp11OIx2nWaoRv3mbnH32PyD/+hKUzr+F0aAhnknjHx3EO9LJ8+Tbx1RDB3TtoinJE\nuYDJaacQ2qCazaA0GNi6fIUd3/42S2dfJTI1g72/h3q5jLmzE7lWS+DAAfQeT1vtVqmkViwiqlTo\nHI6P3Uj+UdHbayMWK91HSrxew8fOisBnS0YmAB1wBPgrYBdw7e1j/xX4v98+/hL/TsjIpUuXsFgs\nDA0Nfd6hvC9Onz79b8bFV5IaSFL9PfszmfbUhnP7dqRslmI8TqvZRGuz4R4bu69WXas1mJlJsLCQ\nxGIU0TfAFXCwVikhFcuICgWCKEdtNLTHf/fvJ3L7Ns1GAymbZevmbZJbcdyFDK1YGVGjQeewE729\nRCu5gsnnZ2hHkGwZltbLPPDlEXxWGbGrb9GyaqhU6jz5UDfzmSDTs3GOHvYzd2WWQnwLT8BBoyVi\ntGgRRZHXzi4QW4/Tu6Mbi1NJLithMeuwWIrYbWqWltL0nbYhSSUCPj1f+cYEdlWJ1fMXufLqJLmC\njvHv/A4thQZbNItep8S6GEVjtSGTK6gjsLWRoDW9gMdrRrF9mGw4iVKTI7u8jFDOoKrmCFoUeHa7\nWFmM4/ZZ2baz7WD6SVCXJDKrq+/Zn1tf/8jnCATMBAIfXApq9xTc3ydSLNYolWqYzWpq5TLR+UU2\nNrOEQgWCI51cncyTbZkI7p5gbiaGwd/BzGqZTnuTza0CgsKM1tqBVKkzc22R3Z09WPUyZJkwHf5t\nTF4vUpGaTJ99i32/8zh7D/UQipRwdQeILGxQKtVQ6zRUKjX2Hu5DbdCj0tUoVGs0miAT5RQLFZbm\n4wz3m3B6Lcy89CqRW1NUKxLbv/Yo6VQZv7mKzFFlcNcA1USMteuT9Jh0pDf1PHykH4NVx8bSOvsP\n7mDlwmWya2vUkTNwcIx4RcX5F66hl1UY6jdjHdiDtDaHz2+kmqhTs9sx9/RSK0vIRdDY7bSabX0P\nhUK4TzROpRLvjfUqNBo84+P3SPxnJR/e19cWGTt3bpXhYQdud9vk7p1sSDpdxm7X4XbrSKUqiGJ7\n6sZkUpNMlrFYNG2fnjtRQEa5XKNQqLJnjw+XS8eNG2HkcpFmE2Znk4RCObZvd9Lfb6NabWdYRked\n2O06UqkStVqDHd8cRqNscerJI/T0WDFWwpSTGVq1KrV8FpPPi8ZiQZC1cAwN0KiUya0u4vKZoVyg\nEE+SmJun6+FHuPD8FZqNBtFUlT0nd6JWQHJ2BlEhx9DRgaBQcPX732f0d38XU3cfC+dvIIWLBPp8\nWLq6cO3YAfBrNyvs67NSrTaYn0/SbLbweg2Mjbn/VWvLh5GRIeCrwDvF4k3gX4CZj3DuvcDLb//8\nCrCfX5CRd1YYLZD5qMF+0fHUU0/xxBNPfN5hfCD6+/tRKpVMTk4yOjr6eYfzgdDrleh0yntW8dDW\nOHC52sZNWquVnoceophI3NMOebep0/x8kitXQlQqdZaXawQ8dvYOmBk+sY+WlEcuiBhMGjzbh1Fb\nrRj9forRKNm1NSrpNNa+fnZ0D+Ea6SeyuMHFKxG2D9uxdRTZml8jEwrjHd9LsKOf63czjG+zEr50\ngcjMAj0PbScvypFimxzbZWPbcD82ZYkOt4KQ4OGNl6eIhtLsPjKIyrrMth1+No1KRJ0Wz4iPzNo6\nAi2atQrLizn2fO1BUhUVEzuDGOxWrp2bJJMuMDS2k2/8n8dIhmIk62auX1nn0E4rubkpLHYDK4tR\n7GY5yUyL+dt30SvrpKduY91/ghuzNTamQ3QN9dHZ08XiuTeJTM+y+9g29u/ciSDGcdg/BXL9PgvV\nJ22gezcsFjUmk/re9AyAXC7g9babXJNzc9y+GWZuLo5aKaeBnPmZCptbRTqDRkp1BVuhDHK5nPyQ\njWSmRqBbAQIUsyXKJQmxVkGoFilKLfoCVurjPeQ2N2k1qtQWr3Jk92GuzwoYgzYcwUXcTi12pwFE\nOR2jQwjyFnWVhZtTaQydXRjjRRotGd5+N2OPTGB2mrFrqtTtWmoNLTplE62ljiIyS9DmJb4e4eLf\nP00pnUep09G/axCPVc7mmgWdRk4mVcC3axcnt+2m2lIiyTRMXl9Do1MBLd588RZqtZyTj+2k90gv\nxVSGW8+eZe7uJuVUEqNJw+7+IaKTk7h2jDE66mJmJkG5XEOrVTAy4sTpvJ+cftZKnTKZjI4OE4GA\niVAoRyJRupexkclkCG8/R3p9u9QSibT7QEKhPLduRfje93aytJQmGDQjijL6+qy43XpeeGGBRx7p\nJ59vC4hpNCoajfbouAwZer2CWq1BKlXhZz+bw+XS8ZWvDCC06li0LWZvrrC+kkBT0HH+rUuMHB5j\n4PGv00Agt7JMqyZh7u/DOTJCJZPF6PGQWlpCpZSj0qrReHxIxTImq4HoyialdJ7X//F5Hv/T76B3\nOankcuQ2N5l79lkqlQalTA6L2cLBP/o2lZqIzGjF4Pd8bo7JCoXI2JibgQEb9XoTg+Ffn4H5oCfo\nz4BvAT8ELr+9rwP4Z+BHwP/1Iec2A+8o32SBd8/5/RXwGPC7HyPeLyyazSZPP/00P//5zz/vUD4Q\nMpmM06dP8/zzz3/hyYjFomFszM3Nm2GKxRoKhUAwaCYQ+EXjo1ytxvQ+Ggb1epPl5TRKpUAmUyOb\nlTgzlyBdaHFq/wFsHd62K6XBQK0pkM1K2FpKjIEAtXPn2q6jDj0NjYVQVs7KVoNEpsHcYpq+gW56\nHQ66D+6hqPOzHpbYNt6Jw9KiqIWR39pDq5glu7FKrSRR1tWxj+ylUZTocCqYWijg7/bg9xkhEyUT\nsZDNNAl2WVlaSrEQ0uM7cBhTM0FJbsC7bZjJDehsNFmaXOfs02+xePkONpeZTrPEpStx3GawO+38\n9ukhonOLVDcXsDoc9HxtDw6/g431LJ6+DtLJEoae7bz03F1Udhc1uZ6VpST5isDYoBmTXiR66xYN\nScK3eze1cplCU0Gp1Jbe1uv/FWN77/i/pFK03hEak8k+db8Sg0HF/v1+rl/fIperolSKDA878PuN\nVEslUtEMlWoDaytNan6dTM7DxNghQtEyBqMamVxEoVER6HXi9NuxOQ0o5AKFooR3xzaMqgbFfBHN\n2BFCkprMpWV6ens59HCBzOI8pXic1YtXmThylKwk8lv/y9dJT90kubiExaZDnlzBHAgQlxk589I8\nO3b0su+3e+hwy2kkNiEboxFrIlJFp1OiMZtQqQTKxTpKgwHJ6mHt9WskVrdoNZrQarC5sInZIGPs\naB+paIbkwhpTFSMbGSWzC2lqtQZjuwI8+e1dzN5YJB7Lo1LKkYsCRp+P5biAfGAPlqIMjcmAKRAg\no3CjLiWJ3bnN+KlTBIMmyuU6Wq0Cm01LXZLIbm1RSafbEx0ez4cq7H4a8PuNLC+n7ysd+f1GLBY1\n5XKNGzciDAzYSCRKZDJl5ueTaDRystkKfX1Wtm930mi0KJdr/MM/3ObEiS58vnbvkctlIJutoNcr\nMRrVIGuRz9e4di3M+noWn89AOlXGZFRy7KifpalVXvynNxgc9RNbzaI0Gll+4wJWsxL/rp3oT38Z\nrdWCqFKxdfs2SoeXYiKJZ6gPmjVc20doCQrMLis7T4ySDPmYvb5AYn2LUiZHZvouifl5GrUaWrsT\njSCgNRtZe+01UKiZu7OGY3CQoX2f3gj9vxYazSfvKfsgMvIfgGHg3S5V/xWY5sPJSBZ4Z+7KxHsz\nIP8R+N+AM7SzKO/BH//xH2N+u0N/cHCQffv20dnZCcDq2ynfL8r2z3/+c7q7uxkeHv5CxPNB26dP\nn+Yv//IvWV1d/ULE88vb78bwsAOXS0cuJ6FSiTiduo+kvPkORFHGlSshtrYKOJ06hoedrK9nKe3p\nQWVIUqtILM9HqTSU2MZ38fKZVXZss9Fx4ABSsUSibqTQ0CBTOvHsttBotpDVKhhMOpQdHuI1A6nZ\nFXq7rfgcOQS1imQrh6wCS6+80lYhFUWi1/L02IxgdiFa3Ey9eY5mS4ZJKyCngSfgRO+UsXr1FvnF\nDcLVNMFH9pMqaym6nfz4bITtOzwUc0VkyRRz52+hU7fYsdPPnR/+GJlKQ2u4g0+L4B8AACAASURB\nVGo8TOruLRomP7liE4WuhLKaRW8MkL7xPOV0Ho3VDdYxmsoQokYLKgNKnUCm0EBQa9G1itSlBqV4\nHJXFyuxSgZmZBJVKezEaH3fT3/9eYbIPg31oCJkgkFpYoNVqYe3txfY+Y9ifBB0dJpzO9jOjVsvv\nva01ZTJErY7szAUWL96iWm2gjOfZtXMPAb+BYrmBz2eko8PI0JCDly4kGH3oEOWNFZSCnIrWwNHf\nO0kilqOynqI+N0P45nkSl9TsfuI0al8nlUKZos7H3/+/F7CY1WQ7W5iFIq1MjKXLd8mHQuz4zh8g\nWbvYM+HkR0/PcPrRQdILk9QzMUZ71cz87DzmQBBbXx/5RIp0KIZ3dARzTy9rSQGzRY8MQACVXk8+\nXaRWktBplUwubNIxMELs9gojfjm79wyzHod0ooDXb+St5xM4PWZ27Axg81qpVussLKRIxlpI2MFg\nIrxcRBtb58RBB618nnqlgt3+i0xIvVpl8+JFUouLbWIpk2Hu7CR4+PD7+g59WujqMtNoNJmdTVCt\nNggGzQwPO5DJZKRSZSKRAq0WOJ06Dh7sYGLCSyxW5Ny5FU6e7OHo0SDXr28xOxtHp1MiigLr6+0e\nkAsXNrBatej1SuLxIkeOdDA/n2RxMQW0hdgEm4Zzryxy8kSAra0cao0CrcVMcMhEZW2OqtaC0Kgx\n9dpV7EEfh//TH3Lzx8+zuJgiOCRHbzOz9OZlvKPDVOslzMEgS2fOUi0W6RjqRe+0k0luQ2uzIvT3\nI9do2Lx0GZ3LhXt8nNjcIhqXm9k3LiMo1ZTjUcqxKKIyQDLZniy02zUolf/2ZlM+KOIG7fLM6rv2\ne98+9mG4CHwPeAo4AfzdLx1TARJQAd43T/sXf/EX73vydxaxL8r2q6++ypEjR+7Vyj7veD5o++jR\no3zjG9/AaDR+pM9/Htu/DJtN+56piY+CVKrE7dtR7tyJIUkNQqE8mUyFxx8fZCNW54HDR4ivh0kZ\nEsjqKjbSDWq1Kleuxzi6cxj3w1ZC18MIKgOvnF1DrdfxxKMPIc+HMSjr6CxGarU6wuICG8/McTca\nY/d3v0Pn0cPM/OMPqJfLyJVypGwag9VE+OJbbP/9PyQjN9G7vZvblxco5Jt0DAaQq5VMuMqYCwY0\noge9RkaXOkbL5cNgNVKrtR2GncYWVlHB//SnD5MLbSFvSuSdakp1AYNJg0ZZJXxrmsEntpNNpAit\np8mkiuhcLiJrEbRCnValjJRKYuzqQUKDfcCI8v9n702D5LrPc79f9+l937fpnp6efcfMYAcIEgBJ\ncAFpWhQlWVdW6ZaXG/uWJd+qVPIpNzdVzoekEldyXZWqW7E/JM61XaZoSaa4k+IGgiAG6wyA2dee\n6X3fu08vJx+apESL2kjRoFT8fcE0CoN655ya7uf83/d9nnaVWmQHrdAkl8+idzjQ2O2IJh835uM0\n6k3ESoVyTs682P5E90RQKHBNTOAYHQX4TOPJ1WoFTudH396UWi16vYJStOuNIZOBa3SEp//Lq/zu\nnzyGwWGlVGqgUinY3sohIWcpZ0FtGKNvWIfOqGU3BTrk7L/zNqn1TbQWE8mNXZ77z/+Vh7/zTba3\nIzgPjrC/m6E/GGL14jyHpq2kbt2knsvRrNVIr6+jGDQwO+JB/2+nGelVsfFsBIe2TTmSBr2NWlvO\n4Jn7Se/FkFptVD0h5DKwKitoh3s4ef4wsUiR/UiRYjKD3nOM8F4Jc8CPTmgSfvtNmnINHYWOvoOT\n6MwhBLWa3//jk6jkoDSaKbb1yOWy7nq0XCCbLJLf3kIml6MdCSKXddBYrSj/RQugkkiQ29r68QmX\nJFHY3aU0OPgr277/qgiCnJERBwMDNjqdzocfuisraba3cxQKDba3c2i1SqpV8UM7+Eqlyd/+7SJP\nPTXGk0+OMz7uIpXqpjnv75fQahU8+OAAyWTl/bVvOclklXZbolRqADJUKgGNuom/xwgygTo6LMEg\nQzMDWMVNLn3v76mm0hh7PASPnaTcgOTaFjduxEhECphVIgaNxMTvPIRnbITs1jabr75KdnMLSWdi\nrqeHgcOTpLb3yd6+iXVggImvfQ3/sWNk1taoFkq0myIKk43ohTexBXz0Tg5STOe4st4mFusObrtc\neo4fD2CzffYnVb9Ofp4Y+Q90Zz02gL33/y4ADAF/9kv83zfoio233//6KvBXwHeA/xMYpStK/rdP\nUvjnCUmSeOaZZ3j22Wfvdim/FBqNhvvuu4+XX36Zr3/963e7nM+MWKyMXq9ieNjO3l4BSeqelFgs\nGlQqgViyRiQjZ2lPQqn88QGgKLbJZyvsxNugNuDT15jxlBiYdFDY3mJnYYmgAwxGPZHLFzE77SRX\nN6mWqyx87zmO/Ol/g/vAAer5PHq7DbfTSS2fx+R109aY+dGLG8w+cAilzUWpIqI1GrBpW2y+eQGT\n08p9B43EdhMsvvgW9/+738PsM6Np5Aj5NRTCm5RrWa4//TyCUsHwvceI3Fhg7P57sNl0lPYzVPMF\nasUKib000Z00ckkkePIkkw/fT/TKPAadgNMqwyd3cWejSinXYTRkxaasYjcrMJ06hVKvZ/Chh4jU\nFeRjScRSiVajgdTpUI7HSc25PpFAhE8nQlqtNvF4hXJZxGBQ4fEYPtZr5GdhsNsYGPHQqInUqiIG\nj4fxI24SqTrff3GRAwfcSJKMhYU4r7+2gcOhZ3bGRT6d4PHzgzgMEsqOjE6jitbuoFgScY2NoVLK\n0ChBOzDF0nKaf/MHJ2nVa7RqDpTyJtTK6I0aTDYT7pAftddAYu0axw8fQSxmSS/dQTLLyRVTNGQa\n7MEelitV4uEUCjnMfcXF+qU3aTbbJCsqLD4nQw47OncVe++9aL0edrJtXCY1iduL6Dw9bG+kEIQq\n4VurzHxpkHazzcD4ILsJkd1oiZMnTUhSd/vhxuUtOnobGkcZsZBloN+CRqdGOzhFpdr6SGuuWal0\nT/x+AqnToVEsfuw1lySJaiZDs1JBodGgczg+tRDt3vPufc/laly/HqNWazI25qRabVIsNiiVugOq\n8XiZ3d08jz8+hNWq5fnn1wGJt94K0253cLv17+fXdFfAW8027XYHlUpgaMjGiRN+FhcT2O1anE4d\nh4/6SSVLDIz5CO9k8RhF4q9dQ6VVIxl1tEolihtrDD75NRpiG5tZhRmoxmNsLizQM9pPz/QkC3/3\ndxT2Y5jtZrxz06RvLZBbuYNKp6VZLmMJ9VMVwdA3QD2fxz46SrsjsTN/DYtdj9NlwGTWkqsK7O0V\nP2xdRSIlFhcTnD7d96mu8b82P0+MvASMAEfonpBIQISuqPjpFYeP5z/8i9ffef/PP/0Vavzcc/Xq\nVVQqFVNTU3e7lF+aD1Z8f5vFiEwGGo2Cvj4LarVAuy0hl0OzKbGxkWZjI4fZrGZ9PUt/vxWNpvvr\nIAgyjDYTznyZyMWrPD+/TihoJnNxDUGtZnyyF00jg6AWyG9u4vTYMNsNSO0WvqFeOqKIZeYY5UqT\nZhuWlqLozE4Cc6fYTCu4vlKh0k5w6HAPEpDNi2i0cm4tRKhWd/GHnPR49fQO+xErJbavLJFc3SUS\n22HizFEEZRO1WKCcb6FQK5g8ewxlp0Ijk6BSETENjrC3todJI5E3arC6veQqEhatDvfoMDuRKuuv\nrzD91CiZisDQoAWfV8fhqWmUhX06gwHMvb2Y+/rYeXuZzOoq2c1N2o0GBo8XS28P7UoR+NdN+BTF\nbprq+nr2w5ySkRE7hw/3oFT+ch9uBreb4IER1CoZ5UoTx3SAOy/ssXwlxvZOmf39EocP9+B06hkb\nc2DUQCe+RbGYon1Ug1hvYBocpCo3sJ3MoNNq2dopYnMYUBkMhBdyvPTPtzh00Mvjj/TRNg9T2V5F\n73LSyKYwB/wo1ErCr72IvneQjX/+J3xzs8w9cJDMrRvIFUbS0QyuwCwdg4PK/CL3/em3aOWz1NJp\n8vEUGneAWqSGOjTAkS/dz/5GlHCkyna2g3lUT7XWQWfUo9ZXaLWhWmqgUbTZDZfYDpfoHfBw6lSQ\nnh4jV65EiOzl8LuUbG2DY2KSQ+M6HFYlV9cqtFMplMosIyN2pqZcXaM4oxG5Ukmn+WMBLxOEj804\nkSSJxOIiicVFWrUaglqNbWiom7XyKWg220QiRZLJKq1WG51OSb3eIperMTvrQS6XEQxaKJcbCIKc\nvj4LMzNeLl3a5803dxgctDE35yGfr3HkiJ+RETvf+94yt24lefDBfjQqgVisjCDIePLJMe65J0in\nKWKzaRgYdpFLV3jumetMHuxDno+RWlqhns9Tz+eRCQrKsShqtYwiekx6GTdeWyA0PYTBYsQ1NoJS\nr0elUqIzG1AYDWitFrZ/9Bq+uVm0NhtqTw9vPfMWnqNN8h0Dc6M2CjdvojfqsFvVyFUqXP0BHJMH\n2KhqkKSPei4lEmWqVfGn3JI/z/yixlKbbrvlC34OzzzzDE899dTnflX2J3n00Uf5j//xP9JutxE+\nw+Pyu0nXtlqJ06lDoZCTy9Xw+020Wm0EQf6+OJHjculJpyv4/WZksq7DpK/PhRjfY3dhDbUCdhbW\nmJp2U9vfxDzmJ76yzdDpU1jdVmr5PFaPHc/UBMmqkvJOFUx6nGNHSCysYhk0IFicXN5RERpsoRVE\n3DqR8Bs/whjs40fP38H0zZN4B4Ncu7BC7HIY0wNDNGVNNHsZ7rx5lb4BJ/uFIvt31hgY9uDqdWOr\nVWnFd5l54hyxxUVaMhWagIve2Sku/sMLGA1axu8bw9jXz82re5w+7qYgtnH3WLGonVyej6JTyxkN\nWBj0NoldfJt2o4LFpKEci1FJJrGqlaibBcqRCNB9Ku4bcqERc7SbTer5PG1RRGM2f6y/y6+TWKzM\n2tqPHR+bzQ6rqxl6ey34/aaf+X1dX4wmarUCQa3G6PeTWV9HWctCs47YkqPRqPD5jCzciCI/7KNU\nrNHj1WOQ1chvFXn4G/eTyBVYWa9x0i1gtFkxpOrEE2WUCjkGlKRFLT1+iYMn+qlVWyzutHA5xhh5\nYAB30EcpHsfs72HvvUuoVQLyWg6jJFK4/DrDjz+Gsp6jVaviHOxn+KEHCd/ZYPaxs3Q6Esk7d4jf\nWUJCTlmUIxcUqKQGWZOBmszB8nYWhaxMT18f4dU2PrcGvd5HPlfB4TSiN2h46YVlvAM9qA0Ghoft\nJJMVVlYyVAslyuEo/W4jSimJUT/AtbeXWby8jv/EPRjcbq5fj2E0qhkctGHweHCOj5NeXqZZr1Op\nSZhCfVRkRrSN1kccUSvJJImFBVr17nZTu9EgvbyMqednu/n+IiTpo2aI+XydalVkZsZLMlkhne7O\nTgwO2jh0qAeXq4BOJ6DXK7Fa1RSLdS5c2KW318wjjwxisWhYWUnjcOj42tcmeOutXdRqgelpF0tL\naSbHbByaMrO9uIGhkUeKFdnblcjEMvT4JhBkaTLr63imJ2hks5QTCVR6Hba+Pt76/jKTR49SSGRQ\nCHDfd/4dCEqyW5v03XcvWz/6EQqtFq3FhNHjwuT1Us4V2b2ziNZsRm9Us3YrRTQ0zfjJ+2iXcvgO\nHUJlMGANhdB5e1h8aeunrpFGo0Cl+s16X//Nm3L5nCFJEk8//TQ/+MEP7nYpvxK9vb14vV6uXLnC\nsWPH7nY5nwlOp55Tp3q5dSuJUa9ketzCyKiLtc0CDocOkJAkiVDIDMgwGrt280NDNnQ6FTpVh/Ep\nL6lYnkRFhlqjRK5oITUbNOodqvUWE1/9Ks1iHkmuYG27QqaupL7b4NryOmOTHkbHDmK3G7j47j6X\nXprnj//9MR48YSd+dZ5SsYrW48Osgys/usmhs3OEagqy0ST+8QEUDg9XX34PkJOMl9AZVOiooTIa\nESWBZqVGI1+gsLeHe/oASoePTDxLNlVAFRym78gkmaaOmwt7TJ+aptSucmVLhqBXMHsmhCNeQyqk\nyd5cY+1KjEIkztgjD6D2+KjWJRI7cTwDPcyGwGWeIZOp4vOaCPXJEFMxYjdukFldpS2KqAwGfIcP\nY+vv/8zuZ6HQ+Cnr6Waz835P/+OJx7umVbFYGUmSGAiZMIjgOXgIuVxGUduDwVyDBpjNKgxaP9G9\nNOceHqFcrFJLJxk+/zBXfrRI7PYy+ZochdGGZWyOgHIduS7K6KSX/tkx5LImQZ+G40cP0WpJNNoC\nrUyUfLqC5J1h8IyP0u0raM1WOkBybRNLX4hOW6SUzCC3ODF5lejcHmQmB/1ne6lEdth5+wLWvl4K\n21vEt/ZQyzUUWhoCc25aGiOptAxVo8DkqQPY7DqyRg21YpnEToyJcQ9Ks47dG0t4xCyjXi9KrYL1\n9Uw3cLHVQaHRoLVa6NAmnOwgW69SV1mZefAYsUiGjsMBCOzu5hkctCEXBHyHDmH0+wmvxygXJLZF\nDTde3SEUsnLiRODDU8bG+wOwP4nUbn+YGvtJSKWqrK9n6XQk7HYtDoeWbLaORiMwMGBlb6+ITCYh\nSfDaa1vU6y28XgM6nZLV1QxPPDFKNFpiZsbDwkKc7e08CoWMra08Dz88SChkoV5vY7Xqus6ulRKp\naAup1UZsNyikRBoFicBoP1qlhM7pYOjcA2y++ho6qwXn+Dj+Y8dQaLWMz4aoV8rovH6cDh0Gj5ul\nZ5/H3t9P4OEnMIxMISZiuMeHESsVIteugsqATBAoxlMcCLhRhdusr2c5/uijOC0CnXb7I1EK4+NO\n3n1370MvmA82yH6VQf/PA1+IkU/JBy2az/ua7Mfx6KOP8vzzz//WihHoGmWZhAq5/RJSLU/p1gZq\nyc6lBbGbSaFVEQpZcDi6IVw7O3l6e02olDIEtRq700C7mEVlk5DyCZyjI8gNZsqCmZX1AhOPnEVF\nh045z9iYwOVrKS6+HaYhGHn7jQ38QRtyVRuHRYm/zwmSDIuqRbFdxN1joG/cwdrVFZqNJq+/voPa\nM447OIFytJ+FK1uItTpSs0U6XmFs2Mfu0hrOQ0dRBoZBoULtcNIoltCLItf/378jfGuZua88wclz\ncxTkNhr7FcYGzcg7Ii9+7zqlGmQSCZbupPjWnz/M8o3b5KQyUiNKMRZn67IT/ZyV6/NhBEnkpMEP\n+QSm6B52rZZ2pEVNCCIMBcmur3+YDdQoFIheuYLO4UBj+tmnFJ8Gk0mFXC77iNujQiH/qVXjer3F\nXjhPLlXi8tUYjabE1laOYlHkilbG8WktwX4HlTrEVxMgEyil0ohtOaERN+VCFbtFwCrVGD8xxNWL\n64QvXkTSGCkV2lTyeZb2wOkdZfjcOI3oFj/8mxcY8Cmp5rLIHz5JPpmhx29hJynjlX+6TLWtxhbq\n5dw9TrKZCjJBSV7lo5JqMnDwAEWFk5paRG6xofWG+Lv/6yW03gDDg1bMXj9lUY7/zP0UK69QzhVw\nTo6jG5zANTWNTdAjnZ7E41RTWlvENzWGyWGjZzpMu1xmZ3WP2t4OnYqcpWef594/+Api1YFOp0AQ\nZICAbzhAeHEDdXGf9KVV8rkKwycPMzIZIFbtgCB85ElbLgjUBBML+2mq1RbQ9QLa3MwSDJoZGLAB\noNRokCsUH50xkck+1Slao9Gi0WjhdOpZW8uwspIilarS32/l3nuDaDQCoZCFZ565QzRaZmDA9n7Y\nowWZTMbly/sEgxai0RLXrsWYnfVSLrcQBDnr6xnGxpzs7haYn49QKtSo5Ao88cQwqUQJrclIKNjD\nmEdOqCkhxDdoGIzI5HIGH3yAWjaDzuVCZ7dTzeXYffmHjJw7S82iJba4gD3gxjQ0Tts7yFtv7VJK\n5bFbzXhdQez9CWqpJLGlVbwHj6F0+YlvRQjNTiHKtSiUwsdet+FhO1qtgnC4gEwmo7fX/HNPCj+v\nfCFGPiVPP/00X/3qV3+jWjQfcP78ef78z/+cv/iLv7jbpXxmFFNZrl1YZW053o0B9xqIrd3ArvVw\nZWkfUaai0wkxO+tlYyOLy6Xn6nu7ZPciuO0qhs/eg1o7T92qpN0U8Rw9QVXvpRMUsB6Y5q//6xbb\nW2mGx30YVG3MBjUWg0BZJge0GFQdLK0UUmWdrzwWpN2ugqyDWqvE3eeBepmjj51gZTlBsaEkUWzR\nG3IT3c8xMOhCE7iXaiKGStYivLrH1OPn0HkDBF1e6sVJ7HY9uxcusPLiqwgKFce/+RSVeIzE5XeR\n27wcHBtAZXDw1//7S6Q295DrTbQbIqLYpJbLIol1rF4jYkRJx+Dg+sVVZnxjyAU59YbAjZtxDh0+\nSznyt9RyeTS+IHWTn2JDSXgpgtdr+HCjoVEs0igUPjMx4vUa6e+3srWVo9OREAQZg4PWj7jDNptt\nLryxzvzzl3AGnLzwzCI2vwdfv5diEfKlJmqTh8vX01y/FqVZLDJ37xh6u4VmukRiP8u5B0L0ejRI\nUoP84hVy+w1sg4M0OwKFZpZiWWJ82kut0WH/9iLRGzcJ9BhJxIsYNQrE2A5Oh5O2So/CrOPQ/bMk\nIzl8oz4C0/3YLUoW3rlDo1zAM9aLcvQIYnyPdq3GxvUl1LsZpk5Oc+XKPtdyFR4+P86tFy9gcDiY\n+qM/wWTRE81JaIO9/N9/v0Wt1qTXLkOW3mF6SIsQX8Xu96DyBpl/5odUKk2y6TL+A2MURDWV8Bah\nIyZMhu7129rKozXq0IoZdhaWsFmUFItt3vm75/n6f/pD8jI1MpmMUOijkQBdZ9t/McgqdS34P3CE\n17vdWPr6yG5u8sGEpdHnw/QzvIF+GYxGFTablmKxzuJigkKhjk6nxO3WE4kU6e+3srycptHoUKu1\n2NsrYDarSSSqnDwZIJmsUK+3EMU2Pp8Rm03L7dtJtrfz+P3dFtaFC7sIclCpFcTKLcoNOVpvgB/+\n41XUr21z7wNjhLwKmqUCW5dWqcTiNPJZFCoVlVQKhVqDTKXGqmrQ3F+n9+QxJh45Qy2yS3J7n9V3\nf4i9L4BeaeKNf3iNXFXGyUkdgZMnCZw8iczk4ubFZZTaCvKAFoWgZG0tQ7HYIBSyfmRouytAfrE7\n8eedL8TIp+CDFs1zzz13t0v5RBw/fpydnR2i0Sg+n+9ul/OZsHgzypsv3qbzvkX4lTeWOHQsSMAp\nMDLiIJerYVI1qdWaWK1aYrECiswuRFbY348ydHSKoYPTVHs9lNNpsrt7JAwG9kp61t7a5tr1GB2x\ngd6Yw+3UkE43GZkO8t6lMCODduy6NrWdDUhtY7S2aPumuLOa494HTpFbmCeysYF3eoKx3x1HaXOx\nGWmyvlXAb2mwuZ0jmW/Tappx2jQEzowSyYtEE2rMjQiv/tXf8uV//xirL72K0eXENjxMNZVi6/JN\nVE4vZSHD7WtbzD50gpnT04RX99EqOliG3LRVBnR6NX6fgYX31pid8YCuSjUaoVZrIlMqsPoCqMxm\ncBuY/uY3yezFqSgsaIPD5Io5wrsFms0Og4Pdp2BBpUJQfnrzo5+FWq3g5MkAoZCFYrGBxaLB6zV8\nOLwqdTokEmWuvjJP/NYtzOZDNIpFNm6UMVt06PUG6nUZcp2e9dUtGqJEo9Hm2vUEWjXc/8AQ2WQe\nuVhm6aXr6JspTDo5gUCQ5Zs7WAdG0PQNs7NXpbKa4Pd+f4ZwZQFN0EgsWkQpSAz0aHnvn17l9Hf+\nGJ1agS6yQ8itYWq0D5neyrPfvYl/wEPbN0ZgyoTG7aGT3ebO95+lWa2h0mnI7kZxOPQoVCrkRiu7\nsTr9YwES8QLvvHIb/72nUZst3L6c5PLFHUYGLWSKaRS1HLeqBX73K6dY+fv/j8FHrKilBoJWjswk\noGwUcJqcBII27Ooaa7diXLqU6Ppz7MuwWxwERwMUdnewmnXU9RqqsX2Gjw7T02P6qadtnU6JSiUg\nij92epDJwGL5sdeIoFTiP34cUyBANZVCY7FgCgR+yin5V8Fi0XLggIdnn12l0WjR12ehXBZ58cV1\ncrkG3/rWAaamXFSrTSKRIvV6i0aju/o6Pu7gsceGWF5OMz3tJp+vs7aWYXjYhtGoYm7Ox9pamt3d\nPO1WB6NJzcGjfRRyNdbWM8h1BppiDbEucvW1Rb781QMUr2xi6vGhHAhRy2aRyeS0Gg3KqRTF8A7V\n6B4Hhgao7cVZ/uHzVKstjHIlses3sIT66D88yeqFq/g7boytDPbRUSQxiqaewj11HLVHSbmtYXk5\nzdpahkajxeSk+xNfv88rX4iRT8H8/Dw6nY7Jycm7XconQqFQcO7cOV588UX+8A//8G6X82unWhXZ\n2MzRed8Pod0UaTYaLN+J0z/qxapMkS9lqMbaKBSTyGVtlM0KzZ0lwjcWQSbQfvk1tl5uMfs797P2\n1mU6Vj85QxwZWvbDpe7qnsPcTYT12UjHsgyNunH1OhkLqshfv4i2UUXSqrj4ym1GzlrwOy3sX1+A\nZofgeJDN119h/dlncU5P4ZudZebRI9yY3yW5n6Gt1LG0lEZtNCAz2NCbjER2szhGQkzffwyZDGyh\nPorpHD0jITKROFqnG41RR3Q7g2AVWLu6ytCjj9A7M04uW6bWUeEO9qK02Kk0BRpNiVimSWCgB1fT\ngsLbh86iQm81I5PJcIUCjIzMcPGdbUrxGpFkg4DDicXrIJvNU6s10epUWIJBdE7nZ3pP1WrFTz2h\n58Ph7jBltUrdOUx2d59mo5sPEuhzsLGRo5jKYQ3qMbn0CAolKrMZl1ZPXq+lXJPYWkrRG0hg0cuQ\n6k3iK2sEB1xozAZktQSTcyG2wjmUFiXHjwfw9zvIxLI43GZyOwL2wwNYdLD39pvo3S70ditr33ua\nzZvrqCxWps4/gG5uGEmTpoWC6ytV2s0Sv/8HPtZ/8BaJ7SgqlUBjP47V60DMpggMjYPZg2/YRirq\nwuCWOP6QlVSyTKlUI7xbpFnM4fL4qa+to9SrqJs8pNtW0h0zzkwJ/8QAycVF5O0CJpkOg9GE1Sin\n2pTz+rubLK115zlkLQV3Fja49+QEYq2OUiHHZzMS6LUyfaL3Y++Fy6VnMTPaSgAAIABJREFUZMTO\n8nKaVqvz/gbLT7cIlFot9qEh7ENDv9K9liSJQqE7D/STAqdaFel0pPddYZtkszXeeWePcrmB329i\nfj6KIMjp77dw/LifjY0sSqWcYrGB2azhhRfWaTTaVCoiOp2SWq1JJlPj0UeHqFZFBEGOzdw1RMvl\n66QzVexWC1aTAkVTicHhplrvUC03aMrVKKbPsvLOdRxaE54+Fzv//DSeuTmyq2tUsnlc4yPUczm2\nXniOWjJBuQqVpgzPyDB1CXoGekht7EFbpBSPM3T+PPFbtxg9PEZRDjuvv47z+Gmgm720upphaMj+\nkUHh3wZ+u36af2V+k1s0H3D+/Hl+8IMf/FaKkXZbwuKyEpwI0SiWSO6n0GsFtGYjTZkai03H4YAb\nd58HvUpCoWzQElNceu11WnURa7AXWbtJLhanUa5Q7agxtqr0uJRU02r6BrTsxlvIZXLk7QadapFD\nh/xMD6mRSy2+9z/+JRafl3C4gNdvZX8/j2UvRd+cm9Wbm/QfOcDtt29STdegVkC+uoraHUCeqHBz\npUS8rEKrE0glSpyc6kUvlWhtrNCOFqg6R7BOH8RkLjB49j5qLYGE0s1edJ9UVo5Tp8feayBXlkCj\nI1uR88gfPcbijX129yrYvBa296vI+mbplXTQrCOzeQkN9rGw02FgwIRMJsNqVuJ1d/1E0hmRXK77\n4RXNyem55z46mSg2twJnfxBLX99namT2cRSjUXbffPPDIUlB50SlkFCoVOwtbXLkyDQOjwWT04bR\nbSXYb8dgUNEXsgIyNjezNJMVXH4bQ+M9qDsVOp0GOocTg1mHQqOhmslgzoU5OTuB3OHEaJQjd5tZ\nvLTKQLAX73ETCzeitAwa/Ocexx9ykbl5jdTiDXoGhtDMnOHdhTyFjQVaSgN+lZLJUTOvPruAsj5M\nWxSRyboBdHK5Hq1KwmFTI9c00Uph9haT7GaVRONlZEKCYMiGTiNn+oCHUqmBTq+i2oGypCcXLVAu\nN7AMDLFwZYvDZ2do1WpkN7fwzc1hDgapFQq0dH7ypRIAOq2ATK7A6XPQaMuwep3oVB16gzZ8k6M/\n89oLgpxDh3z4/SZyuTpGowqv1/jh8OqnoVwWuX49yv5+t0a/38TcnBedTsn8fJStrRzDwzYSiRKv\nvbZFsVjH6dTj9RooFuusr2dQKmU0mx3Onx+iUhFRKq1UKiKhkJXbC1GuX4uxtJRifNzJkSM9KJVy\nvv/9Zc6c6cPm0JOK5dFoFMg7TUJ+Lcvv7pKNpDAr/LTUvRw/f4S1zQKrb19FYzayvrzGyPEDHPuT\n75BduMLCP/4TwVOnGDp3jkp0Hzpt6pkMJqcbqSFQyWQRehxEUi0mHj+Hz5Gn6dCgdThwj49TrzbY\neGsZsQU9iED3d6vZ7NBsdviMgnjvGl+IkU9Ip9Phu9/9Li+++OLdLuVT8fDDD/Ptb38bURRRqX5z\ndtL/Ja16ndz2NsW9PZQ6HdbBQdoaM7WGxEZKiVpmpP+wF4s7wuTREfQ6OcpEm8h+nnI7T6vdIaBM\nYzQo6DSatOo1suEIrgO9KBtF5IKc/oMT5DbW6dUVGf/qcbJVBSbHNqlYDrdVQCjE6VVIVBf2cc/O\nMXP2ENcubVPMVekb7sFkNSIYLeztZkGtR6VVk9iKYDHKie2lGP3dL3HpapKAcp/1lSRXL64zc3IU\nX9BOyNlh5cVXUdBkbyeDPLvP7LljKOwe9BYjqYKVleUUwek5tqLvkVovMTjiwhPQMnbPQTZyLeLJ\nOsFhHweOannlxWVWbmXp6THTUgY5fqafcLSGRaXjoYcsNCo11K0SDiHJ2j++i3NsjIFAkFSqgiR1\njeG2EzAxMc3YicBdE+T57e2PbGuoa2lOnJvh4gvXyEYShG8ucer8YaaODKP1BRGbbcxmDU6nnjt3\nkuh0SlqtDqGQBZtNQzZVRKXV4NE3UJe668xGrxcUarQBF1WVHvtggN35m7S2VogyxOpunbbaxF6+\nQ1RmwDkToFG8QCFdwnyklytX98lF02hGbEQLDTbeW+Br3zrCY1+epZQtMHX6IMpKilw8jcOpx2AQ\nMLpdRG5vEY7nWV9LceyJ0+zVZdy4sU0+H+C+0wMohA6zhwNEt6J4xoeJRgqcOTfKws0woaEZxkeG\niawv4xsdZ/CprxOO1Lk6v8rMg6MEPV6M9iY9rSzVRJz0Xh6L287odACbqECtEXCOjmL9BdtRSqVA\nIGAmEDD/3H/3q3LrVoKVlcyHrxcXE4hii8nJbpyDzablzTd3OHzYx/i4C4VCjr/HRDZbJZOpYtCr\n0GgU7O8Xsdv1PPnkKFeuREhE85QjYaYHu74weztpKmWRzc0cdhNMDuopRWOcOxMglnYT2c8zN24k\n5IKZKRfL7QadWhmvrkrvUD/v/PA9FCoFSo0G29AwhbqMmt6D1uVh4qkncY6OIpfDzltvYevvJ37r\nFvVsBnugl7qgxndkBlVeTnZ7m5s5NY9+6QFKkQiNfDc9xWLVUqhAo/Njke/zGT9RPtTnnS/EyCfk\nvffew2g0MjFx90OKPg1Op5PR0VEuXLjA/ffff7fLAbrT8tVqE4NB9UuZWUmSROTqVdLLyyBJ1PJ5\nEuubbIi97KdbtJETy8sot5t85al76TXViewVKMRTWG2W7lF8LsbCtXc5+uQ5Ju4/xsZ7N5ErVVh9\nLpQeA1i87C1sMzQ3x/ADJ9B7nYQvXOBbD7ooCj1Eri/QiWYpv/4yWwsL9N5zD4f/7M8QVTewroVx\nD/YQuPc+nnsljNWjY3hqnHq5hi/opJlP4R/wUlVaSUfDyKIlBvstrN7SsLsW48v/5hDZ1SsYdTIy\n8QpWPYiJfXav3mBg5itEbkS4fn2fHBZaKi8j586SXN3A6DNz9NFjvPBehZXVXYrFGgemPRw44OF3\nvzxNMl4iuRPFZLJh99gQpTK9vWYOzznJ3F4gv71NJZGgFI+TuHmTkS9/hfFBH/vJ9odx4VNT7rt6\nMviTxlsA1WSSkMtF33ceJRNJopR3cLkNOH02TD0/bh9YrVoCARPVajeJVqhm2bx0HV08jdXrJHhy\nmkrMTiEcRiaT4ZmdQ+YdoNqA8MVLPPs3LzN6IMDmcoS95X16pkYRTDZqpRoLtxIcnpxF/eobyJ0B\nYu/cRN5u4dYLNBRars8nCW8lkeoVjszNUL6xyMz500Ru3aHdkvCMjVAu16mnk4iVNm6HBqla5MDM\nMJIMpGYDXafEzkqMU08cR3fKTyWTQ64xcOvaFuG4yLuXFxk7PMwjDz+O3Sbxj3/1HJVqE9vgIDt5\nLdaajF6fjuTiIpVkFpXegMkgYNd36DtyFJPPh+IuPXpXqyLhcOHD14lEmf39IpFICbVaIJOpIggy\nVla6sfWzM262NtM06k0UCjlKBRw65CGTqWM2q1lZSRGP+xkZsaGopnn9zg71msjBk8M8dJ+H9XAN\nq65NJRrjzMlBXvvna7y0kkBjd3D6dB9SKsz/8hc/4Mj90xw71Y/ZpGR0wke1miZ29QqCWo3B46Ga\nyVDY2yPVp0GdzFCMRLH09xNdvIPnwAHQmTj63/73pG8v0qiLTD36OHmFg+iVi1TyNRpyAx1HL4pa\niQbQqLfQGTXoh4a4vVFDEORMTbk4cOC3b14EvhAjn5gPWjS/DXzgxvp5ECObm1lu3UpQqXTFyOys\n5xdOiVczGfLb2/C+7XR2bQ2twsrKnTt0jC4ENDid3QAsuaDAPzXKRmoTy2A3L6XdbNFpCFRLDeLX\nrnDk4cMEQ1aqySRGpxnfqSfA6MB3YJKeAR8mn5dOu417YoJCJIKpvMPetRfI7+5Sz+dRW20UYjGk\ndofB+++l44khyjSoLVomZ9rkKmAdGsOjqyN6VGQXb6D3uImWGqjNJmSCCpO2w3/3P50nHisyPOKg\nJVhZv1Wk4zDSKohIHSWdep3s8hKrV1fJxhQkK2kkmYItdEwfPINnyEZNq6NcWcdq1TA+7qBYrHN5\nPkJ/2oLXa6B/wEKf30SiLjAz48ZsUhK7cZPV7/4DiYUFlBoN/hMnAMivr9I7p+fA+Tk6nU432fQu\nY+7tJbu5idT+iSFKSSI0PYC/340kSahNJrRW6099bzdfBwp7e6x8//vUsll0JhNSrkX8Wp2Bc+dw\nTU3RbkssbZTZuRyn1ynRqeQ4++Xj2D0Onv/BApE76zSrZcbuO4bRbaXeUWAdn2T40UfAYUOj06A2\nW1AZjQQcWrSPzDJ5eACbsoZaLiIOHaLRllBPmDFrQKORsfnGBXw+L1pNnZrGx15OQSSSJpUsc/q+\nXpRSDUEhI769z8EjvTQqGm4uxskVOkwcGcLXm+X2UprbK3a0cy60fUMYFApURiMKtYalxX0mQwJx\nUx1ZsYnL2ebQERdiKkYposT2GWfM/DwEQf7+yjEUiw22t/OIYhu7vTtw3Ol0yOW6K+Vra1nknRa/\n//UJUpkaDVGiXLJz82qYxaUsNrsBo1HNrVtxnDYt6USZB75ynNhWnMROjNmTw5x7dBapUmDnapzC\nyk1Onuwl19LT7kh4vXreeeEWZpOCrYU1hGoWjVeHsqdF7voSsnqJzM4O5Xic3hMn0Og0mO0m0psd\nRp58CtvYFKmVFfaKGlaursNanqHpewmFLCym1CzeSTF3z2F27+zQbrWQZAr6z5yhFI2yuRJD19HS\n0loJGrutvLExO81mm729wocJyr8tfCFGPgEftGhee+21u13Kr4Xz58/zjW98g7/8y7+826Xw7rt7\n1GrddcFKpcnFi3sYjWqs1p8d+tQRRTqtFpIkUY7FaDebyBQK6o0a+4kMhUIdU08PSrWS6enuU4XJ\nokNQvG//rlRQl9npPTSNGF8lNn8ZtdmMf3YK19QU5UgUtbxD36lTCCoV+b19tt+6wP78VZwjg2hN\nBsrxOEafD6XJSksU6bREkhvb1OU6ZIKVne0c2VtZRoctnDnrJLUbIZJqEgwOozKZqeyHGRzuvgnq\nesw898MVkt9dZXjUhctlYGB0mPh2jGisRKfWwW7XMTgRoLC7haJZ4qGvf4k7awU0Bh1GjwubRY3b\nrqSaL2CT5fAFHJSaEtevx7m1mCDUb2Vk2MY3vz6O0mzBqpVz506KXCyJGNlmKDiNemeHWirF/rvv\nMvDww0iSRD2XQ6cVkAufj2Nic28vvkOHSK+s0BZFtDYbvoMH0dpsaG22j/2eTkcilapQqYjIxCqt\n3WUi8/MAKDQabIODQNc91D40xOpqmtXVNF51iVKiyaWradaubzAyE6T3wBixSAGTQUEtFSe7fIeR\ne2aRC258955FY7JwpGVnay3O0vwqkkrHocM+DFRAY+Tadod4pERiP4NCkLj33hD6wg7IBbbXEoyc\nmOHtm1WiqQx5tZp8usSdhRiOM30UIit4TvVz68oOVxYy3FipkogWCPXb+OrXp0C1i6pZohDrUM9k\n0Fqt1FMJNBYLtUyGdFNEn7zDhFmLmNln7Xu3GXv4/n/1uZ9/iVqtYHjYwfx8hHJZRBTbKBTdgLx0\nusrgoA1JAqMxTbPZJp+vsXs9ytyJfjQOD//r//wjIrEa7WZ3m7hYbFCptHj7zWWmeyXCa1EOP3CA\n+x6bJRots71bxmXXMHBwlDu34lTrYNDKia5t885+EZO/B2s4gtRqoVR0cId8FCJRMpd+xOGj93B9\nXqKYK6NRyzj91QeRR5fQPPAIi0kl5p0O+YSOZ/76NRxeK3qjjs2Xtrjn0Vmu3k7x8vNLqJWzeA0K\nFDoLTp8VtdFI1dXL3q0W8XiZcHiHbLaO12ugWm1Sq7Uol0WMRhVHj/qZmena3/+m84UY+QS8++67\n2O12xsbG7nYpvxZmZ2cpFApsbm4y8IFBwF3iAyHyAaWSSDpd/bliRGO1ojGbqSSTtMWu+ZKyWcLR\n18PKW7vQkdBpFcxMWvBoiqTW1un1WAi7dIhiB53QRJJb8B98AGtzkkoiTqfVQiwWiVy6hMHjoVUu\nU47HaVTrrF28zuL3fkglU2B7aZsHv/1v0VitNMpVipkiSB00Fiu5TJVYLMK+6ObWzTg9E0NcuxYl\nn6vx5d8J4c9luPBf/hZBkGF2mJE2FpmYPsLzF1JISi0WrwqFVsvLL63xR398mNFjU5idYVTacdwD\nvchVGmI3rjN67iyvX42ytVfB4bPTq7eQjOWZfytDS6bEbjcyPmTn2Re2uXw5gsGgopCr8fabW0yN\nWbEYZbx3cRe1zUEplScXyZOPNpibmKNx8XXEahW5QoHB50PndN71D6ufRK5Q4DlwANvgIK16HY3Z\njFzxs9/W2u0ON27EuHMnRa3WpLyzydCABWMwRGm3O39SjES6OSvv+2JEIiVseonyTpibGw2i8Tpi\ntcLCa1c4+pSFQw8dJbsTprCziS/gZLxXwcbb72KePcnFt9eZmfOTiWUYGPUhl0uMzA1z43aUnkEn\na+EsFpuLYZeNO+/c5JWXVvnaN2bxmwwkXrtCSjTSUcvpO+CmIsowqTvkCw0i0TKnv3QCl03JlSsR\nbry3TUtjRtZpsbkaZ3HRzcERHbm1DexqPy6LDI8PBLUJjB2qSgPV3DadZpPkXoxOp41SrUalVWIO\nBv+1bt/PZGzMgUIh5/LlfQCGhmyo1Qqy2Rq1WouzZ/uw23WsrmZo12vUo0WsRiUby5s8/NAAF+cT\nZLIN7C4jJ0/6UakEegIWvIN6oltRBKWCF55dYf7CClqrlZGpHnp7jGTCNaLhDBqrBY/biVel5fLF\nNAfOnsakbuEf8uMIuEh8//9B0JuQdq9x+sgQuv7juAeDaPLLNFUy3r6eIJ2t4xQVbG3WUeoN5LMV\ndrfS9A56ubNexu0z4+11sh0ucfgrY0zNBnC4uq1ESeqeAkWjZaLRMjIZeL0G3ntv/0O3WUGQE42W\ncDp1v/aZnbvBF2LkE/Db1KIBkMvlPPLII7zwwgt8+9vfvtvlfASZjF+o+pVaLT1HjhCZn+/2btNp\nzC47U0E/6XSdQqnNmXvsdHYWuf3POZrDDsxeJydm50huR8nux1AJ4NGMonZ6aTfq5DY3Ke7tUUkm\nqZXrmMamsbYk9hZXqVVEqrkSyGSI1Sbrl24w9Y1vsPPWBZrrOziHBtAH+7l9YQG5f5i1q8vUsg3S\nKjmlSpvE1j6njjjRJdfwmltUM1lSNzdICCpCniEsRjkHDwe67qu1ApVCk9s3wowErBx8fICVcItX\nF3M0M/uolW4mlG4q6StYhRZHjx/k1Tf2qBSriIUC7U6HhMPCwaN9pFNdUWc2qyjmqigFyGaqFPIi\n6c1tnCoNCq0WuUKgWpehCIzSNtxCYzRgnzqA1qDDMfqztyvuJiq9/pfyrkgmK9y+nUIU23TEJqVM\nnqV6g8MjY8j2w0jtNq16HYVWi87hAMBgUEKhQaktEN6Io7fbCB07SCWyTzKc4tSTI7gO6MluKFC3\nSrTXr9Jo6UmubrA8v0qP18CRg05qxQoao56XX1nEHvBREmUsLCTZ299gdMLL4SMH2dlKs5ZSU0jY\nCJx5GJvbzrVUjI4o0szEsRjUhEaCDI1YMaZuksqo2LidJ7+1iWN8HItNRz6eIZ0oYJr1YHU20YsZ\nBhxNrv7gWQSjFc/IIIcfOcbFpSwqew8WuZxSIo6zvwfX4ABKzc9vv4nlMqVYjGa1is5ux+D1/toF\nqlLZtTTv7TVx+XKE/f0ilUq3NWM2a7DZdASDViYmXNRqTSj3UI9sc6tQRFaXOHO6j2pThstlYGcn\nz87OPs1mB5XKwwO/d5r1lTgXXrmNXNbBb+iwenWDi8/neeKxQcRCBqPNSLWt5sCQnUIqy+ZWiqFR\nBw25BrVKTt/ZMyQ3tjA4XWiUHRTpbRQ9JmLLy7RNHnJJDRaPh0Q0RywpUlQ4CITMGFtt0qU2nUQV\npVZH/4iH8XEn04dCDA87Pvz5rVYtbreet9/eBUCrVSKXy9nbK1Iui3i9RpRK3v/Z8l+IkV+C/wM4\nCFznowm+/wl46P2v/wfg9c+4jl8bnU6HZ555hjfeeONul/Jr5fz58/zN3/zNXRcjJpOaYvHHWSM2\nmxaX6xd/yJj8frR2O57ZWQrhMG1RZGM1SX+vjuD0MK2NG+xGkljMGlRqgXI8TvG5H3ajzxMJHMeO\nEb38HpVkkkoqRXxhgeCpU5iPP8Q7r69QvX2TkYIVh1qHSlsndPoUTYURSQaNeoFyqcbMH/wB8a19\nBLWaN//6adK7EezuQeQyiWathlwhUMllcfd5KcciaKUWGqu1+8ZezFGudaBaxGYPUGs2yW+vkljd\nRKXT4T1hJ3l9hUKsl1dfvIPa7sE9MkQxleWdi/tMHRkkEq+RyYsk4kVyyTx+j5bs3v/P3pvGOHZe\naZrP5SV5ebnvO2PfMyLXUGZKKSlTUlqSJdvyomrbPRrUgmm0awboMQwU6tegFgxQmAGmMD2YqXKh\nqzHuKQNV3Xa5XJItW4u1K5XKfY+IjH1hcN/XS/Lyzg+mQkpJXiQrlbas908EGZHkiXuZ33e+c877\nvln0Oo1WvckdByMUy222Ngu0Gk1iI158PhmL1EVtKr3KgsuJ7HKTjadRBAuGod34ZyZZLLkZCzjJ\nLy1hMJmwhsO3TGn1VqJUUqhUFNLpGo1Gm24DGvUiHN6Nd3KSaiKBa3CQgWPHdto8g4MurmUzGPQ6\nvGEvudV1as0aFquEXjbRKFVJv/qv5BcXcY+N4erro7JawRlR2b8/xNyTP0JvNOB0m7nz8c9gC3bQ\nG/WcO73F5lqOekvgypU0xWKTzz40RCBg4/XX17m4IHDf/T6aHZFcTcTu6sPn0mM1donZm5z59n9j\n+J4jDAyPceXUEs1MClcsinPQzcy0n4C+QFkp0sy3yb7yUyJuO97JIGW1y/U3zrP7zl2cf+Y16nWR\n8Mx+9jxyL/mrF6gszzHx5S8jvU9y1yyVWH/5ZaqpFGgaOoOB4J49hPbvvyX3y2qV2LMniMmkJ52u\n3dA28WKz9YZr3W4ZkAE7StjDPb4Cr5+IU2kKaFqHeqVBNtNzsM1m6zz3XJ2QE6rFOrVcjsHpQeqJ\nBMlEm0K+hoYOrVFHJ+pROx3ERoGpYJs7947hsnSZe+lFfvaDFP1jYQ7/j99g+aWXKSfSxPoDFDc2\nQBR7wmeqD0VpUyppDI96mb+6TWutisVuotvRmN4T4tpCiWqtzeioG4vl5ranIAjMzoZYXy9x7VoG\nWdbT12dHUTpoGuhuCLCKoo5Op3tLrv3HjVuZjOwHLMC9wN8As8CZGz/7L8BfAA7gSX6LkpETJ07g\n9XoZHx+/3aF8pDh+/Dh/+Id/SK1Ww/JrqCP+ujh6tL83u1Bo4PWamZ727yw8vwwGWcY1ONhTe8xm\nOTKsceZSga7aIRdPI8sGorGekFdxbY1yPI5vaopqMklpbY301asYzWZESUJvMpFejSPKg2yvppCc\nbvJ5ha1CkcOHYqwtKlz4ySmUSoXxg5OMfXaKomJkK6ng9Js4/PiDPPu3/4jfYyTYH0RncYHVSrSv\nw659YZS1qxgmB1FqdbA48U756A8EGJ6dpLrc5vyLF8lcOo9okBge76N88SQmWSRfaKHW6iiNZVp2\nA1aHh3pNjyXgJ3nyJK6AE4NRj2yRadaaiKKOrY088xfX2H1onI3NMh63CZNBoz9q5o4pC7ZuCW/I\nhU6vR280Yh8cwjsYxRWSsAUeooKNxbNLFFZhb6zTmxvx+3sb9vvYxv8mQ5JENjdLbG9X0ekEPDYv\n5e11DDoN78QEwb17Ce7Z06Pz3oDPZ2H64CgZm4rkq/HKP61TqrfotoB6mbCjTUGno6V0SFxZQDTb\n8PdFaeoMbL7xBhuXFxEEgcBwjPAbp9kzMsVmXkRtVHE4TXTLKh63Ca1axGfvUrpyir39Vs6vw8pK\ngcOHo5RKTS6e2UCOuDky60K3fg5Z0hM/dYq9j42TOz7DdlrBHnIQirqY3uWhXqphCoTplPI4+vrI\n5es0OiLpTI3sXJbPHLiTQ196ALVeo1lrkrlymfLcRQwWC87+fiKHDr2HUVNYXaWaTO487rbbZObm\ncA4M/NwZnV8XXq8Zr/dm4bVms0MyWaFWa+N0mggGrUg2G+MzNpx+N8VEmtL6Ki++sko3nkMW7Qjt\nDpGQnrnXz7H7yDRqvYaotsitb2Jzx9C6MqLQQalUaJRK2F1utuaWeOWffsbdj85ybXWO+FYZyWJi\naUFPOfUMtpEJlLYd1R6gvb1C+tJVfDO7GR52M79eZnBskq1kk8ce30uh0MDpMHD3vQOkUzXQ9xKt\naNSOz/feQVSn08zRowO43TKaBhaLkakpH8vLBUSxl42Mj3vxeH5+C/u3CbcyGTkEPHvj++eBO3k7\nGVm78bUF3GzD+RuO733vezz++OO3O4yPHA6Hg9nZWV544QU+//nP37Y4QiEbwaCVTqf7K9F63w+i\nXo8tGMQGPBTxUSzUidcHaOdT6A0inWaTRj6P7HKhKgp6SaJVrVLd3sY7OYnJ6cTq91MVZLpKT/xM\nb3NgtFrpigaSBVhbLeIeH8dkNmKNBjl5scygN09pK87qi68wcGCCr/9vf4LW1ehrWjjz5hrlzTiC\nQ2K6DzzGEHqLBcfwOF13A4PZjHNymjNPv4bsCzG7x4VLHEcSu0xNO7n4j88wcGA34cEARl2H4vom\nVo8Tm0HCI5lQikWyWykKMQ+H7giylaizenUTQdSz/1AEpSOQzdfZO+Ohq7pxmjpYGin0yydRvR4e\n/uIe0i07xXILh8ONKOqo1VooikpqK0clkaCIE6G/tznV02mKKyvIt+hEfKtgMomEw3ZSqRqqqtEW\nZfY+dDdy2EXXoOEJWXfaM++ErGshC03cucs8+EA/ivkOsrkabq1A7uUfE7n7KE21519kdPkY+uz9\nXDgxT2ZlA0mWkPQQ8eiwCVUkXZ6azU8+UyEQsNI/ZEbWd6kXFIR6mbM/PYXaFbjva4+B2U61qnD2\nzXWahQIbokIi0GXA5sAWidJuNGhcOclnDsxS7HqRYmN0lCaXf/p4qjNvAAAgAElEQVQylXSO/hE/\nI26JZrFAcHI361loKSp6k4ntbAd3t0x1O0lT1eENhHBNqjSScRr5PNVkEue75kea+fx7rk2n2aRd\nr3+oZKTb1Wg02phM+p0N9pehXm9x4UKStbUS1WoLo1FkZsbPzISDeqGE0BWozZ2jkimiVWskF9eo\n1lVG7ppFUsuU4wl8Yh/HvzjL2noBrdslErZw+OgoueUVjHpwB9wMz8TYfm0Fk9dLt1kntbyBzRdC\nUXU36MerHJjegylgJ3H5KrJeY+j+Y5i8PqwtjaEj91BoGIj0t2m1OrjdJkIhG4GAhcOHTVSrLfR6\nHX6/5eeudQMDTlS1y/XrOVRV5StfmWRjo0Qu18DnsxAIWIhEfvsqlO+HW5mMOIGVG9+XgPcT5Phz\n4Nu3MIaPFN1ul3/+53/mueeeu92h3BI8+uijPP3007c1GYFeifLDJiLvhiTpCQTtSHcdYOO112jX\nagCYfT7cIyPE33yTbqeD4UZFRHa5eslIKITaENG5PRhyRpz9A4gGPSadDqPdTHT3OI16G6vLRqNY\npJ4vsHrlApdfvYjVbqaQziIZRKz77uH0hQRmoUV0XwybpBLfLLEtB5geGwG3Qmp1kcDABHNXk1z8\n8RuMHt6LrpQgYlBo16oULjqx9A9RVSUiQRfuWJj0tZ6misEk4fGZGRx08rX/+THWl7P0D9rYf7CP\nxV1B9EKXfKZMudTk7KvXuTZf4K67+5g6GsWh1NGVqoQPHMA1NMSkLFMoNHjjjU1++tMlSiWFWMzB\nnikXCbpEY3ZUpbxzbRvvszH9pqPT0ejrcxAO22g0eroUuWyVf/nuG5h1LSanQ+ydshPaNYnlhrS9\nUqmw/sor1NNpWsU81bU1nENDDO3Zw7X/9hMq+QoOwUYlcpByrYspHESR3IxM97P38ChKXcEmg0mv\nkry+wqDbxVDQy30PTvLGGxtEgxLp+QUCThOblxdoKwrBoI2xMKQ7On70k2WyiTzplU127Y4wd14h\n9MAAeqeHzOoF0qfnsF9d4rP/6//CWlGlqzfgjISJZ7pslmVmjs3iyyRRdSrUqshmO7GZ3YhWF5HB\nIE9fKXD59TlcXjNDAw7ufeA4mtalVa2+5/qZfT7yS0s3PWeQZQwf0oX3+edXyOcb2GxGZmYC9PX9\n4tmHdLrGG29sculSikjETiBgIZ2ucfrla6hxkfmXTtE3NcDmydNIdivDsTDXLkmkE2nahRwGWcfo\nqI/Uq89yV7Sfu48eoJgawEINQ6dI1mRnat8A4/fs40fPbdKWwlj9afRmE61ag+m7vKxtN4mvZRgb\njuKw6Tj1gx9TzpbR6QXUSoHpf/M4Vm8Mz/gQmUydF19cJZmscPlyB1Xtcv/9gzzwwCCxmIPt7QqL\ni1n0epFOp3tDQ0XE5zPT19ezmRgf9zI25gF6RpD9/U5yuTqSJBKJOG60qn77cSuTkRLwVsrmAIrv\n+vmXABfwTz/vBb75zW/ivFEGnpiY4PDhwwwMDACwtrYG8LE+vnLlCg6Hg8nJydvy/rf68ezsLP/x\nP/5HNE1jfX39tsRzq+Ds78dgNvdKzIJAu9EgfekS1kCAei6H2ukw+aUvoSoKjUYLzRVhYHaITTWI\nkxYmuVcR8HrNBII2cvne4tDtapQzBexyk/WFNTqtDvVqA7vDxfbiJrFYhq1UC52i0ey00JltdA0G\nrF2Fiy+eY2Q8wPgRK1euZdGatZ4fRrbK6Pgg688/iy9go1RuoQhWbC4/z/znp9h3/0Gm9vVTbYJ3\nbJTVc9f4/n9NUGsbCPklfvD3z3Hsy3dx4NgMTruBy2dr1Ast3nx9AbPPg9bV6CoKZpMOvTmEe3gY\nvcmEpmlcu5YhlaoRidhRlAIbGyWsFpF990wx4FJopd7eoCyB3z7xJafTRKfTpVpVCARsPPvsEpQz\nUEiis0pcObNGNDBB9/RpRh58EJ1eTy2Vop7JAGByu5ErFaqJBO6REcIHDmAYnOapJ+e5eGKhJzd+\ncA/JtptDBwKMHT1IdWme9QtzJLJFQv1+RKef4txZjt37IIoSRK/X4+j68FlV4peuMjXpI7OdJ75R\noOX1kErXQRNxuCzs2RsksbxFtd6hXr2hhVJrETl0iPObJq7N58jl6rRKBcJeBy8+fYlatcW//cpx\nTIJCeG8dU3SIjOalWOlwYaFKuiFj9riQrSKFushSRs++ARWT472JgXNggPLWFpV4HK3bRS/L+Hfv\n/tDturW13rZQLisUi03uuaePQqG5M6QZjdowGvU0SyWSmzmef26ZzUST7ULPo2V62s9gzMyVEwuE\nLX3k02WiYyr1YhGjWaK1epXHvjzL6TMJ3ANhpveEKZx+hcT8MtL6BiOSSMws0dWJlFIJwqEoer+L\nZgc6bZWXThWIBMfx7vWzX2yjq2aI2tvYdwXZ89ARslevklpPopfNeLxuJG+A9PIG9vAeBu0SJ05s\notMJeL291nel0uLixRQHD0ZZXi6yvl6gVGoiCDp++MN5KpUWo6Mejh7to1hsMjsbRhCEHWFBo7Hn\nzfRuf6ZPAm4lOXkf8O+BbwD/D/D/8nabZjfwfwCP0mvVvB80TfvN6uB861vfwm638+d//ue3O5Rb\nAk3TGB4e5sknn7wt5n+CIPBx3fN2o0H6yhWKq6tomoZ7dBTv+DilRJrrVzZpdCXqOiuaIFIo9Kh0\nwaCNqSkv7XaXl15ao1ZrU6u16NRrTLorPPt//38YjXqcATe5QguLy8HMFx6moPPw+uubHJwN0VHq\nbJ94nWsnriDqYHDUx9f++CE6Dj9nTqxhrW2ipJOMTQXQCxrZjTjGQAxddJLNrQrr81s4vA7uv9ND\nWxN55WKTyy+fp23xc/lKisERL0NRGUsghEXWcfdBLy//+BxbGyUMLi9mp4Mv/94M4e4m1WSSyMGD\nBHbvplxWmJ/PcPJkHIdDwmIxkExWqVbbuN0mHjnqo3D2dVrVKoIoYo9EiB05gmSz/dr34lbe93y+\nwfp6kWq1RTBopa/PQSpV5cKFFJWKwptvxjHkVjHSRCf02gT3PTRBwFBk+PhxzF4vmbk5Fp56ikau\nJ0+uE0UKKyvE7r4HpdmiIMU4cSbDyrVNgmND1EQHme08v/9HB+gP6KktXGD99RMEBiP4xkbZvnwV\nySQx9rnPcWJJpFlroCyc4el/eImAz4TTJqLqzez9yqOYw328+toGmxtFvvj5Ear5MpfObfLZL0wR\nJM21p58lcMdhrpec/Oina/iifhYWsph0HabGbHTyGSx2mYePBZkcc1BPpRD0elyH7mM72+GZZ5bJ\nZxt47F0o5+jUa5itEl/5t3cwuG/ifV2Y240GtVSKjqJgcrmw+HwfSoVXEAT+7u/O7Dyu1VpEoz1V\nXEVR0ekEdu3ysWdUZvPkSVYzIq/+bIFqXUVz+KjpesnSg/dFmHvpFNO7PLSKRayxAdZefZVcPIPb\nJeF36vCPjxA8cIDC6hqoHSSLmfzCPM1SiYG7j9DQjJhlA6lSlyvnt3GFvYzvH+HMxTxvvrrI+KiL\nvYMCpmqcxtJVzP0jqK4YGy+/TKNQwOJ2YPG6MFosmJxODv33/wbBKPH886v8y7/M3Wh5dohG7Rw8\nGOUznxni9OltdDqBQqHB88+vcPp0AovFgMMhsXt3gM99boxDh6J4vZ8cYbMbn5P3/bDcysrIeaAJ\nvHLj+zPA/wX8B+B/B/zAM/QqKF+8hXF8JHiLRfPb7kXziyAIAo888gg//vGPf2udiH9VGGSZyB13\n4J+eRtDpdgb1ipqdxaKNdruLpjURBHC5TNx1V4xw+O3e7IMPDhOPl2k0OrRaHRzdAv6+IAa9jvnr\neaxOG0abjeubbTZzSUZHPRgkA+1UlpWz19B3m6B2SaymWD0/x/3/bhqh1cLQddHZtrD4/EvY/D5C\n9x7nynyZ0nqH1HaTRqmO0WYFs4P50yuUyyYCuybJ5BS6apJUssqDX9hLvtQmeeo1lP7dzA7rmJkc\nQnbaGdk3gkOoUs8K9N19N66RESoVhR/8YI5z57ZZXS1RKNQ5dmyQXbt8eDwQDRjxhDx4Hn6Yei6H\nXpKwBAIfWC68Va9TyZUw2OzYnbd+SDqfr/PCC2vk8z1K6Px8lqkpH3fdFcPns7CyUqBYbFK4nqGR\nu6FPY9RjNeuhDbp3bMTV7W3q2Sx62UIlEcdgsVMXbSSzaRSLgqpqTB0/QmIjh1or0alViS9vs3S2\nyOe/uAejqKIDXvk//4Z2qYAt4KNUruPZdy9NXwx33904nBZyG3G66PDPzHAtLnAgBl/83DBz19Kc\nOZ9i7mqaYNDKayfTjI57OfD7f8DiUoGFpTQWScBAi0quiOC0Umro2Dc7QXlzA8x2OrXaDm3Z7TAQ\nGQyQTteJxyugdWnZTZhlkWDMS9cRJJ1t4veL75nlMMgyzhsVzY8SmUwdt/vtjbfb1UgkKrjrqyil\nEo1mL/E1GaGUy2COOihWVPR6kb139KEVUkihPtaWkgzdfQjPZpxCKkdwxIbVLrHy46ewuJ3YojG2\nzp4nH0+j14u8+eQrWPuHmH7sUU489VNatSYWu4zHqefwuMigI0SrKyILDfwjAxhGo7SKBRShi+HQ\nbpKLq1jdDhwOE0ajnsj+GTAYOXlyi6eeWuDEiS3abZWxMQ/z8zkOHYphNvfYM6VSk1qtTTpdR68X\nMBp71zuVqlKrtWm31fe9Vp9E3Gpq7zff9fg/3Pj68C1+348cp0+fxmKxMDU1dbtDuaV49NFH+au/\n+iv+9E//9HaH8rHAIN/cb63V2rRab1PlNA3y+SaVys0FPJ/Pgs/X21DPnt0mmbHyyDd/n/mXT9HQ\nbxOMeXGMT/PcWYX19SJ79gTo73dwda6EUi6jdlSMRhHZqKHWK5SXF4j/039BcvvoVCsMHT5AVxCI\nL6f4yXffILxnN7bBEbRihUq1Q6msIEf6kYUulaKK3w4TbSN6XRen20olGyc2HEJIrSKmVrBJEmJG\nwnfvCP5dh276W1ZWsrz88iqlUgu320Sh0OD11zc5fDCAmFqCYoHljIxraIjAzAz6X6JD8W5omsb2\nxctcfO5N0okiVo+TiWN3Mrx/HFl+78n7o8LqanEnEenFAcvLBUZHPfj9FkZH3eRydS7WyyjlMqIA\nU3tjWLQK7uFhTA4HWrdLJZEgcOAOrr/yJs1Wh65kx7N7L0o2TXnuEt47bLTTWzQNerLLCaqpNKOz\nExh0GplChWalhmewn9WfvYAr7CevEzH4Iqydm8dc0AjdeS/XdH6uF0IMjfej1iqcvlykUGwT9Rs5\nn6xx5P5xNhN19uwNgSBSqXd443Sa4fEZjB4Jp7vGxvVtuooJh81IrVzF6QoRHQ6SVBv4PTKN7BYI\nAs6BAcweD4IgsHt3gEKhSbPZwRfzo6oac4slttMKRqPI8LCLgwcjO3b17bZKIlGhVFJ2XHo/Kit7\nk0mPw2Ekna7f9HwlnUXXaBD0BzBKvc+Lyy5g95sYGXeydzaGsFWmmNdx9c15kq9fJH1KYvrRB7jz\n6EGKl88w/7PXWF1IUKt3ePAPHkZoNbE6zGznYXW7Tl97i75ECos/gC6dZNfsEIntKnM/eZHp3WGM\neh317U3e/E8/Qi8Z8Y6PM/Lgg4zf9zC5+Xkq29sA2CMRxNAQL7ywzupq/oZsux5FESiVFGZm/HQ6\nKna7hMEgYjYbKZUUfD4z1WqLWq3V001BQ6cTcDpvv+XCx4VPRc9+RXzve9/j937v926rKdjHgWPH\njvHVr36VQqGA6338PD7p8Hhk9Poed7+3YOgwGnXvqwDbaLTJ5xv4fGbsdolUSsJ/zwMYx4sYLFYy\nxQ59fRXsdiO7dvkIBi3o7hji6tNWGrUmdruR/j4nbpuud/LejmOUjGy98jJbr77EyOP/HZnl6xw6\nNslWUaNa62AdHOXQPg9to4rH72azkmImZkJrtxCVMh6PGSW5RcjS4I4HdlO9eILqDaqyPRJ5X8bD\n9naF1dUijYaK1WogEunZwJvUGrrSJma7EaXUInnhAjpR/MCaEpV4nDd/8DM2VrIAFNNFasUqosXK\n5N6BD3WffqX3rby3A9xqqShKT+XXaNRz+HCUgQEn6c1+DJ0qNqGO3e/BecOtVu10qGWyLFxLou+b\nwh9y00wn2JxfhHKOUqqEbekCd953mPPnE8T6vZRNXcYiIiN9Mn7Zz8aJE4SCZmqJbexeJ9ZgkEw8\ng0UCn03D2C7h9IZ56KER5t+4SHojg6iTmRo0cf6nr4E9wGWnmWKmQsTZYXD3KBoCrVYHWdIhBSwM\n91mYP68nsZmlr99NvqAwNmwnU2iz//49hFwVmvogzv5+3CMjKEqHfL6BwSDywAODrK8XAYEzZ7Z3\nqPStlsrCQo5w2MbwsJt2W+XUqTgLCzk6nS6iKDA46OLIkdiHTkj6+x0UCk1sNiP794dYWMju/EwQ\negJnTmOY/JU01m6Bg3cPcfVSgg46hsaDHLyzn4EBJ0pgP83nX2D78jyVahvBZOD0U69i6RSpbm+T\nT+bR6QSCQSvZy5ewBfw0slu4FJX+mRi4HYh0OPLILJraodtuUc7ViU0MUVy8jKC2uP7DH2Jy2pGd\nTnQ6HclLl+i75x4G77uvN8wtCFRViTNn4mxvV9jY6FVORVGH221EFAX8/t5aIUk9UbeVlQKNRpuJ\nCS+lkrIzVL1vXwiv10y1qtzShP03CZ8mI78CNE3j+9//Pk8++eTtDuWWQ5Zl7r33Xp599lm++tWv\n3u5wPnaEQjamp31ksw2Wl/NsbZXo73cxOFjF7e4lKtBzEj1xYpN8voFOJxAIWLnrriiVipPvf79M\n8loao1HE5ZLZvz/E4cMxJEmkmQnyhT+4n7XTFyin89hkAd/wINmlxR6Dp93GGg5T2tyiWa1RWt9E\ns9Z57H/4fSpSiBde2uDslRKCACsrm2iagCSJ7J208+CxKH0BHfV0iuZWhtQzZ3ry7QYDtlAI79TU\ne2irmqZhNusxGvU0GirVaptarcTsPh/GVhGL3fjOXya/vIxv164P1KLJrW+R2r55fr2QzFHYztCa\nin74m/VLEApZWVzM8c5xFKvViMPxduxGo56+PufPNWPUG43oXT4y25dQ1TyptRR9ExFcATfLq+sY\nJT06TcVvKHP3uIAQClLMGElcmiMXcnLq1DYjgw76QxGcfUk2T5zAOz5OKGhFlGSC+yaRvVZsu6Nk\n1uJE6tcY2D/B6deWuPjmKmqni33QTLerYXdIDIz5uHZxi5MvL9BstLj/4Skefmwauxk+89AYly6l\nMJok/uAP9+GSVVomD4GQHWffIO1hFVk2cHUhx7lzSSRJ3DmhHz4coVhUMJlu3hK6XY10usbwsJtU\nqsb167kdkS1V1VhZKTA46PzQA5XHjw/RaHQwmfS02yqVisL6eglV7RKJ2KhUWsTNNipNGf3iNpF+\nL9HP9GOODREeiSLLhp4/TblFvVTBPxQhWU2hKCqyXiC3vIJvMEpKEDGbDdjEJu2qAOEgSqlCenGJ\nVqnI8EMRNKuHp55cYHW1iMWiZ9++EPtGhrnw7E+QxTaVUp12o46m9P7Pi04f8bU02cUmwaCVaNTG\n+sUUuVwDq9XA1laRAwfCPPPMEg6HEbPZiN9vYWLCiyjqmJjw4vWaKZUapFJeHA6Jer1DX5+dWMzB\n5mYJj0fG5/twTKXfNnyajPwKOHHiBBaLhZmZmdsdyseCtyi+v4vJiF6vY3Y2zA9+MMfmZolarc25\nc9skEhW+/vUZpqZ8N/xNkmQyvXKyqmpsbZVZWSliMOiIRu2UywrVagtJEtm924/JpGd5OY8mQHBq\nFIvXib7bQegodHV6kvPLWO0WGtk0nrExRMmE7HJhtNvx7ZlmebvLYjJOJlNncNDF008v0u1qtNsq\nhw6GSVxfYzIUpbkWp7i4SC2Twezx4BoYQG02sQQCRA4efM9AYqulIggCjz02zlNPXadQaOLzmTl8\nZwxzd+0910fQ6XpH1g9yTY0G3q3oL4o6dKJ4Sw2++vudjI9Xd1xfbTaJ2dnwB3Ybdo5NEp5cI7W8\nSVft0lJFxh75HMbQAEaDgF5rs/jUUwjtKpHjn0PJNFnbLBM6ZKCSzXMhm8Pq93Lk6DHq2SzmUISG\nLUa+YUAwBPHKDjrlNnZjm/ziInajja3FTUwGgY7Wxe02oe8q3H80RjxZ56WfXgZBINLnJrm8xUv/\n2uIrX9/L0qVVrLNeZElg7ifPEhwbpeESqTU6LC3l0et1FAoNTp6M76gcj4562LMnwLVrWQYHnRiN\nIq3WzXMKdnsveavVWrTbN6t9drsapZLCh4Uo6rBaewmvXq/jnnv6mZiooapdTp3aJputk9UJ+Pv3\nY9LVsEatBAdDmD09Nls+X+eNN7ZAbdPerlGvtxkYcJHJ9Cj8gYEQXcmMPRqlnU3QyhVwRMP4p6bZ\nePMszVoTBAHvzG6eO5Nhc7NMvd7CYNBx6lQcpzlCdM80Qr3A5okTmIM+dF0Foy9MoqJHt9VgLdPk\n2rUMQ0NO2m2VM2cS7NsXxO+3YTaL/OmfHqFSUXYG4IeH365O9gTdzIBANGrHYBBRFJXl5cLO9fld\nwafJyK+Af/iHf+CJJ574xLdo3sIjjzzCn/3Zn6GqKuJvkCnax4VEosqlS2nm5rJkMnU0rTf8+Nap\nplJp7fD83yppC0JPjOnq1Qz1epuxMQ9Go4iqdkkmqzidMufOJfB6zMgthaUXXqNZLOKPeokdnEWO\nDKDr1nGYTaDTccc3/j3y4CSWA/exkNLzL/+yiM9n4dix/necTLs4HBI6sSfCVC7WiHoNNItFVEVB\n0OuxBIMYLBZEo3GHtfLOz7HRKNJudxkZcfPNbx6mVuuJSEUiNnztNulLuR3DOAShRwM2fjDHXs9g\nP/1jYeYvbexIHPqH+/D2h9Drb93ny2TSc+RIH+PjXlqtXp/e4fjgPXhP2Ivv8L04xnPoBIGaZiLb\nMeDy2cmtbWBoVXF4bWgdEza3g+TJVSYOTdMwumiLJkySyPZ6jsqEg4kvfZmVioNTLy1QrdQRNlIM\nzpjZY2lRTdew9/ejZeNMHxhi8eIKRlnCEQlhstvYPdtH6icLTO+JQLdDp1qlnC6yVK+iPXEHsZCJ\nysY8+VKNkb3jFKQwc3MZdu3ykcv1ZmfOnk2wvl7E6TQhCAJLSznGxtykUlX27QsyPOxifj67c8sD\nAcuO74nNZsRg0N2UkPTmGj7YIPMvgl6vIxSykUxWdhKmblcjmW2REgzo3FaGbiQimqZx/XqOQqHZ\ni7VvCHEtST5fYaDfgaAXce3ah85goFZqYOyP4vaYEY0Sy6+9SXDPbvruuhNJNtK1eMmnU1gsRlS1\nSyJRpVhsMjXh4ejMNOVEktHPPkr8wgWCQReaO4J3ZJpqV8br7VFv19aKTEx4d1yhR0ZcOBwmTp+O\n75jaFYtNDAY9sVjv761UWphM+h3hs7fuE4DDIREK/W5UReDTZOSXQlEUvv/973Pu3LnbHcrHhv7+\nfvr6+njxxRc5fvz47Q7nY0e93rPofucgXbvdq34UCg3MZgNms4FqtbUj2mS3Gzl8OEaz2eH8+SSy\nrGdw0InbbcZuVyiXFfR6Hc89v8L+cTs1OUBbs7NWVCm/cg6r183U5x7CZjdhDQYx+YK8fjLBWqFD\nqdZg9+4gNpsRUdQxPu5he6tEq9FElnRoOhXv5CDhqJHK6hKtSoV2s4lnZITE4gbFShu330X+X3+K\nxazHPzaKZ3wcgywjCAIzMwEuXdymUm3T6XQRhN6JLeTfg95gIL+8jKDT4R4exvshnKrNXi+HvvYo\nzug5EqsJ7JEIg7O76Rvxf5S37X0hijoCgV9vQTca9eyaCbG2oFLP5hh0dhAaWaSgE12rTiGh4b7r\nM0SnhhG1FnfYB3jjbIbUhRTeiA99p4Gm0XOCdnpZmFdYKVopFBtUqwo5JYlJMjAUsGMfGqeR3GLK\nZSMydIRCSyI8Ncqe2X7CMSde9zql9fWbqNB2txWDpKfsHUM3YqUWrzK/pmCzNTh4MMK1axnW10vc\ndVcMVe2iaaAoKiaTHk2DSkXB57MgSXoOHowQDttIp2vY7RKxmH2nMuL3Wxkf97CwkKPd7s2MDA+7\nCYV+fXr3++HddG9NezsvbuTzJK7Okzy5iMvlReeLUldD7Hr4PtLzCwiCgGdsDGN0iOERD7ZYj2Uj\n66vUFy9Rz2aIr66hNFvs/erjKA0Fr89OMldgZaVXlQh5DYj1PKV4gW4pz9Dx+xg9fhSLrKNmChBv\nONCJIufPJ9jYKJHJ1Pn858f4whfGOXNmm7ExD2fOxLFYJKxWA6VSkzfe6Bn2eb1mPB6ZZLJ6wxTQ\ny5EjfczPZ8lkathsRiIRG9vbPcn7UMj6ia+SfJqM/BL85Cc/YXp6mr6+vl/+y58gPPHEE3z3u9/9\nxCYj3a5GNluj1epVF97pf+NwGAmFbqaeSpLI4KCTWq2F3S7h9Zr54Q/nmZ/PIQi9U6PTKTM46ESS\nRGq1NqurRaxWI+GwDUHoLa6bm2VsVgP2UJQLP3qJRqXCkfsmcPT1obPY8U+PYbLb6XS6NBodgkEL\nQ0NO0uk6qtolm63hkDpMRbosXdqilVMwmc2M75+lb0gikXPT98CDdGpVtq8tUNOsOGIRNs5coNA0\n4nSZGCsUaNVq9B05QqtWo7M5jyezhK0r4h4dxzs2uDOwG9q/H9+uXSAIH7gi8k64YlEOfz1Kp9Nr\nC/22LaxGpYi0eR6dCoVraxRXV9AHYlSwYzJbcYxMcnmpzJC/C7UK9a113A43stOK3uxndNzP4L4o\nJdVK/MfnWV4p0O1qjAzYECtp4pfKeIdEghE3aU2gVqmye78P3/go3qGBnWrWrr0xZu4c59KJeQDs\nLgsPPX6IhmrkZz9b5tVXN7BYDHi9ZhKJPA6HDPQ+d4lEBbtdIhKxUa3eoDIbdPj9VkZG3Oh0ApKk\nZ3jYfVMr4a1qml6v4447IvT1OSmXFaxWA8GgFaPxo99GPKEErPAAACAASURBVB4zfr+lRzu+AYNB\nx8CAk2a5zNpLL1FOpEgvpajXrhMcG8A8fYiVip3RYw+zZ08ATWOnBeSYidBVmpSuraIU8/TP7mXk\nwQfpIKJ3+clk60T6Q5y9mAEEomGZPksVYy3Fj374M4b7rNiWtzj6776O0CjTlJ2oDYG5K2mWlwuo\nahdV7XLhQhK9Xsfu3YEbw+8ikYidubnsTjurx0ZqkkwaGRpyoWkaGxtl/H4Lx44NUCw2uXBhmzff\njKOqGq2WyvS0nzvuCGO1fnRVqN80fJqM/BJ897vf5YknnrjdYXzs+NrXvsZf/MVfUK/XMZs/OaI7\nAIrS4fTpOCsrb80SGJmdDe8swE6nicOHYzeGWAuYTHpmZvx4PDIWi5FksqcB4HCYGBx09obZRIFW\nS0VVNYaHXVy9mkHTNCIRO0NDLkRR2HHmXLieZ2Kkj+mvPIYsqgxPhfE5DTht+h0nXL1ex8yMn2vX\nMjz77AoLCzn8fjPHjw+TnV9AKia4/4ERavUOdruEy5Ln+ptpVq6kwWAiEnOimX1Ex0doZLOU2xKt\nVo9BUa0oSOvr1KemSF+6RG5hofeeQPXaGbx+K7gGd67XB9UT+UW4lW2ZW4WuqpK8eBGlVMLs87G1\nsoxSqVGqbZDWPKwsZhneaKDvm8DYKGCrrXP3vQNsJDsoSoeRCQ9HH7uj1yLKN5AkAxaLAb1eQCgn\nSWxuMxwZ59obC+SDVmYePkq1bcA5EcU3FLoploERP0/8T8e5fnSSWqmGfzBIOqPwn/7zRdLpGmNj\nHk6dipPPN/D7LTSbHYzGXuK3vJxndjbC8nIBTdNQFJVdu3w71ZB3o1hsMD+fIx4v43SamJz0EQ7b\niEZvvReKwSBy550xLl1KkUxWkSSRyUkfsZiDwvJST/PFIBIKWVldLZJe3mTX1C7ymozPZ36PC26r\nXqdy9QwXX72G3SxgMzSgmSJ091HKipFKw4TQ1Xj00VHW1oqMD8i01udYfPUUFpuMbDHSzmyz9Oxz\nhPbuYXB3kKZY49VXN4CeiV0kYqdUarK6WsTlkmm3u0Qidmq1Ns1mZyeWQMDCxkYZnU7AbNZz+vQ2\nuVyDZLLKww+PUCo1eeaZVZrNNpVKi2q1xeJinmazw8iIm9FRzy2//rcDnyYjvwC5XI7nn3+ev//7\nv7/doXzsCAaDHDx4kKeeeuoTN8i6vl5ibu7tvnippHDmzDZ+vwWbTcJo1DM15aNYbN7o2fb64hMT\nPjweM+l0pmc/39WwWo1omkY8XkYUe4vLyIiLiQkfBoOOgwcjO6ez3qBgmKWlHOU66C02JKlL34Ab\nj0tCJ4okzp9HL0nYIhEsFgNra0UqFWXnJHvlcpK7BvXQbeOkgNfSRa0rpJbyKAYn2+tZ8vkGmwsy\nTp8Dw6CZfLZNo9FbDLWuhtrV6KoqrXKZ8ubmTdem226TX1rCNTjIp+ihXa/TLL7NBuqqKhoa+WSB\nmmymK4hogp5ksoYiNLh7zIVDqjNwwENXE7Da2zutDrdb5t57+0ina3TqNUpLJe59YBJTt4zJ70A0\nSSRW4sjj+3GF3mvYBzAwEqBvyE9qdYurJxfIrxcZi9qYm2uQzdaZmfFz9myC/ftD+P0Wut1eW0DT\nBKamfNx9dx+tVq9N4/NZ3neIWFE6vP765k5lIpdrkErV+Mxnhnb0dW413G6Zo0f7qdXaGI0CzUya\n1MULNIpFLH4/zWLxRmVGJJtt4HLoGT04uDPj8k7UMxkMrTJDQ06ymTrJKuRyFeRphWTXQaPRQZYF\nwmEb7bZKyNXh3NOX0apFwkEr5k6RXHILTZtBNVppCUZ27bIwPZ0hk6n3qPAmA9lsDbdbJhCw3Ghf\n+TlxYhNJElFVjaEhF8GglQsXUhw8GOHVVzdYXy8BPYbeK6+s43RKxONlZFnPtWsZBKFXyW02O5w5\ns43PZ8bp/GT40bwTnyYjvwDf+c53eOyxx3b8cX7X8MQTT/Cd73znE5eMbG9XeLf6eKXSolhs7rRr\nPB4zDz88QipVQ1E62GwSLpeJVKp6Q/9AYnLSy8mTW2ga2O0mYjEHHo+Zzc2emdyuXb6bpJyDQRtf\n/eouLl5MUi4r+P0WZNlAw2Ags7lKdekaqD0zLcwOTOMHaDQ6jI97EQSBclmhVG6BO0xpo8K1pIGh\nAQc2MUdHaqMzylQqLTStx3zw+DrUOiK4wmhrcQRNw2wxYrEYsPj9GCwWtO7N7AjoJSSf4m3oZRmj\n1UqrUkHTNGzhMLmlZXSSREsFg2TAMTTI6qqGRTIhB3x0cklauTRoGuahO24aGj54MIIgCHSbDSpr\nJqROGb1tgPnFEsliE6fNy8xM4BfqS5Q3Nzj//R9z7uQq6XQNs8PG1x85xrf/cZNg0IbHU8ThMOHz\nmclm60xP+9izJ4jNJtFodHA6Tb/QYC2TqZFM3myUV6222Noqf2zJCPRUoa1WI+mrV9k+fRq11aJZ\nLNIoFokePIiayeD1Wgj1+RjeOwhmM9ev56hUWni9MuFwz9tGU1XQNLxeC16vhVKpSaul0mq04Ubh\nT6cTmJnx4/WaycYziHY3znCXoE+mkhfx3hHDObWXcluiVOp5Gx04EObcuQSqqtHtajeSzX5GRtw7\n98/tlpma8rG9XaHV6pJOV5mZ8WOzGXfWCru91+ZNpapIkh6HQyKTqaOqvYVKknoVxUqlRbmsfJqM\n/C5B0zT+7u/+ju985zu3O5Tbhscff5xvfetbLC0tMTIycrvD+chgs7139sFoFDEab24hGAziTkm6\nXFZ44YVVkskqnU6XVqvD9LQPt1smHq/Q12dn//4QpZKC3S4Rjdrp63O8h4EVDvcsxHO5njvu4mKe\nqFdH6rXXsRja9PU5WF8vksttM9i2kkhImEziDounUGhQqGikkxU2qzWW55Pc99Ak0UOjpHNtYuOb\nJNfTGE0SI3cdwBAKU+wU2fvgXRTXVjEZdVgi/YRnZzE5HJi9XspbWzvxCTodrhuCX5+iB1GvJ7B7\nN0q5jFIq9czxLBaMFY1WUmNgeJTNkgmj1GF4/xiWiEhWtKEh4PHb8YyN3fR6druJw4ej5DNVNjNX\naetdPPmjZTLJIjpRh2p2YwomeeCBofedrel2OqSvXCGXKJBMVimXFTY2yhhs5/nSF+/C57cyMDDN\nsWP9VCotHA4T0aiNxcU8ly6lgN6A8p49QQYG3v+g9dbm+m7cDnlypVIhfeUKaqtXHZQcDlq1GtVU\nCtnjQVNVgnv3oskOXn5pbeewIYoC4+Me7rwzhuzxINntKOW3Nn+J0Ykg5kiQbFrD7Taxd28Ih8PE\n2bMJ4tttBo7ey9orrzO3kmB8Vz+24XFyxgiBsAezuZdo9JIKiaWlHNlsHYfDxPx8lqWlPIcORQmH\nbTidMvv3ywwM1Emna0xP+3E4JFZXC/h8ZmTZgNcrI8t6ms02kiQyMuLeqWY6nSb27w/faLmJH5mj\n+W8aPk1Gfg5efPFFTCYTd9555+0O5bZBlmX+6I/+iL/5m7/hr//6r293OB8anY7K+nqJra0ysmzA\n5zMTCJhJpXpsGUGAwUHnLzzxXbmS3imn9iCQzTY4fLgnvOTxyL/yIiGKOrLZOqlUTwtBT5t6uUpN\n7WCWDSSTtZ6zbq2EzzfAwkIWp1PG4ZBQ1S4mq4x7fAKlVKLbVUkqdsYGoly4dB5TbISZXTPorA4s\ngyE6nS733DdCodDE4ozSaXdYF80IiQ7THj2Rw4cRzpyhnsmgE0Xco6M76qOf4m3suD6nUgiCQOzu\nu8nl6hjniyRyHWJab4OfnvYxP59ldamIBnjreuRoh9i7PloWixGLxY37cw/w5H89QyZdRWeUsAWD\npKsGTp2Ks2dP8H2ZQB1FoZwtoHW7yLKBZrOD2WygnC0w5DWye3/4hgX924nGxYtJ4vEKLleP0ru6\nWiC1lefOfQ6srQyOWAxHfz+ivrcluN0ydrt0k4aIwaC7ZcyZX4ROo0Gn2dx5LAgC9kgEyelk4OhR\n9CYTks3G/Hz2poFXVdVYWircYPw4iB05QuLcOVqVCnpZZuDYNKbIAGO1NjZbT5Rsba3A9XNLlJMp\nik4HndB+XJE2asDBU69tU24scfCQwhNP9Fpoen0vcVDVLoVCk2SyupMIXb2axuORKRabJBIVOp0u\nXq+FaNSOXq/DZOopAL9FTQYIBq1YrXpiMQfj4x727w/SbKrIski12mJ01P2xVqY+TnyajPwcfPvb\n3+Yb3/jG74y2yM/DH//xH3PgwAH+8i//Eqv1t5Pzfv58kosXUzsnPavVyMGDESKRt6zKrQwMOH+u\nAFer1SEeL9/0nCjqUBQVt1v+UNoVxeLbC1ALCavLTiGRpdFso92I0+j2sdsTQNM0rFYDAwNOhodd\nJBJVjBYzRouZdlslla5z9fQS4bCD/j4H1UoLnd1OOl0nFLKwsJDj+vUs4bAds1kmk6hRKvcs2j0e\nN0MPPEBqPUWp2qFkkDGWO3i9H54580mFxefD4vPtPDZ7NHQWB/JSgUajRbOp8vLLGwgCxEZCZLN1\nKpUW584lCAQsO6yTWq1FPN5jVHg8JioGN46xKTR0NDoCzVqHVKr2HvGxt2CQZQw2J/VGh/ExN8lk\njXJFYfLwCINjQfr7HTuW9W8hkahisRio1Vq88soGlWyBVjFPIT3GXYdCXHhujthkk4kDo1itRmw2\niSNH+jh7dptyuafMOjXlIxK59cOr74bRZsNgsaAqN4uruQYGbrofpVLz3f+UVkvdqTA4YjEsgQCt\narV3DW/4Ur2ToVLaTpJZuI7W1Wi0dWxni2wXBT7zkJ2RqRDxeBlN4z3tqqWl/I79gNNpQqcTWFzM\nY7UaSSQqxOMV6vU2/f1Odu3yMTMTwOEwcc89/Vy5kiafb+B0mpie9qPTCZw6FadUajI25qHT0dDp\netUci8XAxkaJSOSj8wT6TcEn66/5iLC6usoLL7zwOzm4+m4MDAxw/Phx/vZv/5Y/+ZM/ud3hfGAU\nCg0WF/M3lZyr1RaJRIV77un/lV5DFHXIsp5C4ebnjUbdTmunVmuRSFRp/P/svXmQXPdZ9/s5ve/7\nPvtopBlZ+2o5kiw7JPFCChPIUgnJLeAm5IbVcAPckPu+RfGmKhAIwQRC2F64EByomBCcgLFlvFu2\nbMnaR6PZ96339fTp093n/nFmWmrNonU0WuZT5bK6+/Tp35zfWZ7fs3wfUcbrtRAK2ZZVF/X7LXPl\nvpDMQ3jnDoRjxzCbQKvXEuxopWwPMTWVpbPTx86dIWw2A8ePT9VEp+bluP1WmVOTMeLTKXYc3ERF\no+PMS2+y7UP7+cEPLjA1lcPns3D69Cy7d0eIROwkk0U1r8RroX8gyVtvzSJJ6sPPZjNw6FDLqjx4\n7iQEQWB8PEMqVeT06RkuXIiRSBTnvGwWtm8PEY2qBomahKmjUCjx2msjjI+rD7VQyIrBoCddUKhW\nLxof8+GTM2dmMJv1hMO2OQXVIjqdhqZd2+k9Pcr08BTBkI2uHW2E778fq9u8wBABtfN0Pq8aRpIk\nU8yk8QVc9PWlyCZy+C0lTp+cZCol8P4PdGC1GmhsdBAIWMhkSpjNugUVKrcKvdlMeOdOJt5+m1Iu\nh6DRYA2F8FwW/vL5Ll5T85jNurqwrM5gQLdIfyYAWRTRZKM43DbS8SxaKuiMelx2BatVz9NPn0WW\nq5w/H0OrFejq8mE26ykU1HJ/Saqg1QqkUkVee22ESMSO223kn//53Fxyu4Hu7ii5nERLixOHw0Qo\nZCMYtCJJFYxGbW3x+9hjHTUjUBBUsbqBgQR6vQ6DQcvkZJb772+4q0I2K22MfAPYBbxHfQffnwf+\nX+BN4DMrPIZr5utf/zq/8Au/gMOxdjMG+J//83/y/ve/ny984Qt3nHdEkiqLrjDn1R2vhCyKpIaH\nCeiz9I6NobO7MLlc6HSa2s0ok5F47bVhpqZUF63BoGX79hDbt4eW3G9Tk5OODs9cg7oyWW+AB/6P\nn8ZhLBGcKDCV1jERUxNJ9XoNfX0JJiYyNSNHEFSPTShoI2DM0jekhpCy+TIX+mME/Q5mo3nicRFR\nlGsNuHp6YjQ3O+ey/3UUi2XOno3WDBFQjbXu7uicPsq97Rlcjni8wOBgCoNBy8BAAkEQkOUKyaSI\nLFe47z7/3ENIX0tmnJjI1gwRgFxOxuMxsXt3hP5+tWlaa6uL3bsjvPHGKLGJOIVEDH+Dj3DExcSM\nhNFqYuNGLzs/8QSTfWOUShVkvY28xsaW9sUftB0dHpLJouolqIJOq8EVcDHYPYpF7ybs1FMtVxgb\nTTI5ma2VjxoMOny+1V+zetrbMbtciMkkGq0WazC4oON2Y6ODDRu8DA4mkeXqXEl+sC6JfDkqsoy+\nmGT3zgBvv54hEx2nvXMrwbYQL786iixXMRg02O1GJiayTE3l0Gjg2LFJxsczjI9n2b+/iYGBZE3V\neHAwxfR0HptNj8VioFJROHcuOieUqHpUBUFY0BNIr9fi9arjfvnlIf7lX86Sy8kIgjqXO3eGiUbz\nRCJ3zzNqJc+ynYAVeBD4FrAbODb32b8DrwK/u4K/f11Eo1Gefvppuru7V3sotw2bNm3i4Ycf5pvf\n/CZf+tKXVns414TLZcLhMBKL1bclb2q68kWsKAqTx44R6+nBbLPxvu1+xqZF7KEAGzY30tKilhAO\nDSWZnLxYeVAqVZiYSKPVakgkClgsaojlUreuyaTjwIFm1q/31qp1VG+JgK2phGM8Qygt4XQaGBlJ\nMzSklpaOj2fxeMysX+9Bp9Nw7twsw6dma2qVglZLKlmgtTVErqi+Z7GoK0ONRqBYLKPRCGzYoMae\nU6kihcLC6pl0WqJSqd6RuiC3AklSQ3cXLsQIhWwUCmWMRi1Op3FOxVY91largc2bA7WHTSYj1a3c\nc7kSBoOGhx5qZefOMIKgdpZ9770p4lMJYj3nQWfgv09Huf/+BirFIuXGVk6cmOHBB1vYdGAbqZSI\nTqclELAueKjN4/Va2Ls3wsREhoGBJIZmI4VsHlks0dTkJJ+JYXS50BlNtTLy2w2zx7No1+l5jEYd\n+/c30dHhoVgsY7cb8PutV21QG+12NFot8qmX2NsaotLlxxksUQg6eDZbqiWbut1GAgEriUSB0dE0\niUQRi8VAMGglGi0gCLB+vQen08jUlJqALMtVqtUqhYKq+xKLqeXAodDyOTiplMi7706Sy6nXqKJA\nX1+CpibnkmG8O5WVNEbuB16Y+/eLwANcNEbiwK3PhLoK/viP/5iPfexjhEJLr2rvRb7yla+wb98+\nPvOZz9DYuHKdVm82JpOOPXsiHD06TiqlSrI3NTnqFCaXohCLkRoeBkWhlM1i0OTZ6LXhjkg0tF/s\nUnppPwlQy37Hx7N0d8dq+ST9/Ql+7MfaCQQuGiSXVutcitVqoLNTTZAbGUkyOJiqhXxU5dgCbreJ\nrVuDHD9ewRIIUhKLFBNJlIpMe1cEweKk0efgzJkZNBpVPVYQBJxO9XtNTY5a2aTaLVRGq1V7bJTL\nVQIB65ohsgTVqsK776pNC0ulCvm8jCSpDwuzWU9zsxO328S2bUHCYXtdEqrHY0arFWolmwD5vEww\naGPrVvWeMz6eUdvJJxJqBYnFQ74YJ5Eu49SVkLJZzC4XAwNJOjt9y5boXorHY+Hxx9fz1lvjzE6l\nSU7D/Qc78Lp0TOWMuJqa0GoFvN47t2xUp9Ned3hREATsDQ3YQiFSw8MIGg2KrgN/63oe3N9IMlNG\np9PgdBpxuUyYzXqyWakWGnI6TXi9ZqrVKhqNhnRaorPTS3OzA1Esk8/Lcx4TL5OTWXp64jz0UOuy\nInL5vIyiqAuJeV0jh8NIpVJZtCrwTmYljREXMDj37zSwaQV/66YwMTHBX/3VX3Hy5MnVHsptR0dH\nB1/4whf44he/yD//8z+v9nCuiaYmJx6PuRZv9/ks6HQacrkSGs1Fz8HlVMtlquWLyolKtYqUyVC8\nLHnE77fQ35+ovdbrtfT1JWhouGhvZ7MlBgYSdcbIUqTTRRIJkURCJJ+X6e2NYzLp5jQTVANBreCx\nsH17kFOnQGtYj6ZSYv0mP3t9Dk6fnsVo1HLgQDMzM3lCISsul4m9exvqqiz0ei07d4Y5e3aWVKqI\nokAgoFaFAJTLVYaHU4yMqB2J29rci4pK3e1IUpnp6VzNaIvHRTIZid27I/T1xdm/v5nBwSRer4W2\nNhctLc65hMYcZrMeh8OIoig4nUa2bQvS358gk1GbpG3a5Mfvt9ZaFIiiTCRiIz+lo2r1kspWSKTK\n6C1msoUqzjnXitW6tA7JUoTDdh57rINEQjWg47MZjh0bx2cLYTLr2LDBu6ga60oiSWUkSX24rnZY\nsJTP425vx9PRgQIolQrKzAD79mxmcFwNc5lMOjo7vRiNOgwGHTabek0mEiLptMTu3Q2MjaVJJESq\n1SqPPLKOqakck5NZGhsd7NkTmfOa6RkYSCxrjFitamuKWMxaa9EwNZXFZjOyiETQHc1KGiNpYP4o\nO4HUZZ8vLGK/jCeffLImONbV1cW+fftobW0FYHh4GOCmvv6jP/ojPvvZz9LU1LQi+7/TX//Mz/wM\njz/+OD/84Q/ZsmXLivzeSqGWUqpGRzYrcfKkWuqo0Qh0dHjYtMm/IDvd7HZjcrkoRKMX3xQEnPMt\nN+dobXUxNpapZdrrdBpcLtOChL+ryVPp6Ylx4sQU58/HSKXURmc7dgQ5fHiw9ltWq572Oc/M5s3q\n6lt196uueq1WQ2Ojk0xGwmJRm6GVSlXcbtOiQlparYbZ2RxjY9laLkpHhxePB86dm+XYscnaSn5o\nKMWDD7bQ1uZesJ+7FVGUOXJkjOHhFJWKQj5fwm43YrcbyOVKdHb6MJl0PPxwGz6fWS2bnSmQy6l5\nPBMTGfbsiXDmzCyzs3m0Wg2trW5CIetcqbm11qJgeDiNLFcQRZlgeyMnT01TFGUOPtRGMVugp3uW\nPQ/78Zh1dHRc2bu3GGaznoYG9TyIROyEG90UCvKyiqwrxfnzUc6fjyFJZXw+Czt2hK86x2MlcEQi\nxHt66sQATS4Xe/Y2sn6Tei6YTDqi0TyvvjpcM+I3bvTT0eFBliu0takVM7FYgaGhFA6HkZYWJ7GY\nSDwu8r3vnaNcVggELBw61LrseFSNkTAOh5Hu7ijHjk3i81mYnc3z8stDfOAD7Xg8d0e7jpU863YA\nnwf+L+DPgb/jYpgGoBX4XyydwKpc3rVxJTlx4gSPPvooPT09uN33zo32Wnn99df5+Mc/zsmTJwkG\ngzd13/Mt7lcSRVF47bURLlyIX/K7sG9fI1u2LPx7spOTjL/zDlIqhUavx93eTnjnzgX9WkRRZno6\nhyiWcTqNHDkyVqcfALB/fxObNi3dqTYWy/P88wMkk2p1RqGgCiB97GObiMdVXZKdO0N0dfmuGGu+\nWhRF4aWXhhgYqPf2hMM2Dh1q5rnnBuq0JkDNt3n00Y6btoq9FfN+I/T1xXnlleGLHWNFmeFh1SiL\nRi9q1Tz0UCuKAq+8Mlz3fUVRaGtzMzKSqu1Dp1PzROaNyu7uKG++OVr7XFEUyuUqDQEDyckodruO\nZKbMVFzhvm2NvO99jXd88qIgCPzv//1erToMVJ2NRx5Zt2plq5VymekTJ0j09VEplTA6HIR378Z1\nSaPU2dkc//VfAxSLau+hWKxAKlXkiSe62LLFj9+vhuVisTzPPdePKJYJh21MTmb4+78/jdmsq3mf\n9u5t4HOf27ms2q6iKPT1xfn+989TqaiNPOfLxPfujbB9e3jJ795uzN0zFr1xrOSMnwCKwGtz/z4G\n/Cnwq8CHgd8G1gHfAz62guO4IpVKhc997nP8/u///pohcgUOHjzIz/3cz/HZz36WZ599dtXdqtdK\nJiPVCSOBGu8dGEjOVT/UK17aIxHWP/YYxWQSrcGwZAKd2ayv8xbs3dvA0aMTZDISer2GlhYnbW3L\ntxVIpyXyeTVjfv6wSlKF0VFV2nvTJj/79jViMl27e34pSqXKgpwXUMNK2WyJcnmhL7hQUPvyaLV3\n1txfL4mEeFm5qNoVV6/XotWq3Wzb2900Nzvp6Ykt+H4mo7Ya0Ggu5oqo4a9kzRiZ96rNIwgC+XwJ\n9A5EwUw2XsLqcrKxwcq2bcE73hCZ51JDBCAWKxCLFVatrFyr09GwZw+ejg4qkoTR6VxQtVMolGuN\n74xGHQ0NDhoaHFgs+pohonLx+pDlCpKkeidlWS0BdrtN6PVqufZyxsh8tY3Xa1nQxmKx5PM7lZU2\nP5+87PWvzv3/R3P/3RY89dRT2O12fvZnf3a1h3JH8Lu/+7scPHiQr3/963zxi19c7eFcM4vZT6oB\nsPjDVWc0YrvGhOaWFhder4VkUsRg0OLzWRaV9r4Ug0E794BXty8U0iiKGmISRZmODs9NNUTmf9Pt\nNtWJsAG1NvRer4V8Pl33WXOz84p/y92E221eoF/R2Ohg82Y/27cH5+ZXDW/4fBYMBm1dpYPVqsds\n1i04xpeGQxbT8PB4zJhMWnQmMzqTmYqiPryvNmH1TkQQuKVhoqUwL7MoNZt1GI3aunJ4QVC1XC7F\n41F74wwMJJGkCgaDhtZWF16vqm5rsxmw2QwL2lAshtNpwm431oV6tVphVRRxV4rVLyBfZY4fP87v\n//7v89Zbb91xq/zVwmAw8L3vfY+9e/eya9cuHn744dUe0lXjdJpoaXFy9uzFPBCtVmDDBu9NvwnO\n32yulkDASlOTg+HhdE3jw2zWEQhY8XjMy4Z4rhdBENi8OVBLvgP14bltWwijUc+uXeE570kBjUag\nocFBV9fi3WTvVhob7bS0OBkdzVCtKuh0Gjo7vTQ1LTTKwmE7O3aEOHcuSrFYxmxWtS4SiQJTUxfL\nv+eTgedZt87N8HCqVlYrCHDffWoegiAIc00cDWzZEqzpT9wNXP5QDwZtt301j99vpavLx7lz0bky\nbjVJ/vJEVI1GYM+eBkwmHRMTGTo6vBSLarfkea2gOC4VSwAAIABJREFUdevcV/X3Op0m9u6NcPz4\nFNmsmh+2YYPnqiQK7hRu56fviueMpNNpdu3axVe/+lU+9rFVjRTdkbz44ot85jOf4d13370p5b63\nKnegUCjR3R1jaCiJTqdhwwYvnZ3eayplVRSFVKpItargdptvmiGTz5cYHk4xM5PH5TLR1OTA4TCu\neAw9mVRbxFcqVYJBW10SYalUJpEQ0WgEvN4re3iulds9ZwTUPJGpqRy5XAm3W1XOXE79MpkUEcUy\nNpseh8NEOl3k7NlZJiaymEw6Nm701QyNeWZn8wwNJcnnZRobHbS0ODEadXPt42WMRt1NP/aZTJFS\nqYrLZbzlpdyCINDfH6e7O4oololE7Gza5K9VjdzOlMtVpqdzJBIiNpuBcNi2bKhFFGWq1Sqzs3km\nJnKUSpW6Ob5a8vkS6bSqzHonesiWyxm5Z42RUqnE448/zqZNm3jqqadW7Hfudr761a/y7LPP8uqr\nr2Iw3Fjd+61+KElSeS7mf2034XlZ7dHR9Jykt409eyLX1aNmjTvDGLlZqOecBp1udcNcpVKZ06dn\n6OtLUC6roZ89eyIEArdOYXl+3iuVak0x9W5ldDTFe+9NkcmoxmxX10Jj9F5gzRhZuGN+9md/llQq\nxfe//3202jVxp+ulWq3ykY98hIaGBr71rW/d0L7ulIfSsWOTvPfeVN1769d7eOihVgRBoFAoMTyc\nZno6h8tloq3NdUes9laLO2Xeb5RKpcrYWJqRkTQ6nYaWFteyGhMryYULMV5/fbSuZ1MoZOPRR9fV\nKjVWmjth3uev5ZkZtb9Tc7Pzmhcd6XSR557rr8v3MBi0fOAD7as2/6vFalXT3JYoisJv/MZvcOHC\nBV566aU1Q+QG0Wg0/MM//AP79u3jL//yL/n85z+/2kNaUWS5wvDw5ZI5MD2dI5uVMJv1vPXWeF2p\n7NBQkh/7sXZcruVvYuVyldHRNMPDFwXG7rWb1d1MT0+Mo0cnahVK/f2JVdNsGRlJ1xkioFYNpVJF\nBEFgcDBJNlsiErHT1uZaNgRxt1IuV3jrrXGmpnLYbHrOnYsiCPDggy10dnqv2mhLJMQFGkOlUoWZ\nmdza9X0J95QxoigKv/7rv86bb77J4cOHsVjunkSw1cTpdPLss89y4MABurq6OHTo0GoPacXQaAT0\n+oUudo1GmBMPyzMyUl99Eo+LjI+nr2iMXC4wNjiY5NCh1loH1zXuXERR5vz5aF2ptCRVOH8+SkuL\n65ZXkBgMC89hrVZAFMu8/fZ4LZl5aCjJ7GyOgwdb7qkKKlBzeMbG0jidJl58cahWDRWPF3jkkQ7u\nv//q8uS0Wk1Nzv1SFruP3MvcM0dDURR+7dd+jSNHjnD48OGasusaN4f169fzne98h0984hMMDQ2t\n9nBWDK1WQ2enry7mrzbG8mK1GpDl6qLaHPn88noA+XyJnp5YXc+S+YfVpa7s6eksr78+wn/8Ry+n\nTk2rWhRr3PZIUqWuamSefF6mUrmyrne1qjA4mOTw4QGef76fvr74oufZ1bJunQejUbvgvVisUCdy\npygwPJwmGs1f92/dqZRKVcxmPcPDqbqy7EKhzMBAkmSyXp9nfDzDK68M8Z//2ce5c7M1LRK/30Io\nVN8Gwm43rJqWyu3KPeEZqVQq/NIv/RInTpzg8OHDOJ33Xm+NW8EHP/hBfud3focnnniCN998E7v9\n7qmBv5T16z1otQK9vXEqFbVV+Lw097x+wKWdT7Va4Yo9aUqlypICY5WKgk4nMDub46WXhmv7npjI\nEosVOHSoddUTItdYHrvdsKhmS2OjY9mqnHn6+uIcOTJWEwkbH88gimW2br0+FeSmJicPPdRKT0+M\nQkGmtdVFZ6eX48enFmwry5UF4mT3Ag6HEbvdSDZ7Ua1ZoxFwOIzIcqVOS2Z8PMPLLw8hiqoBMjGR\nIZUq8r73NWE26zlwoJkLF+JMTGTxeNQE1rupRPtmcNcbI8VikU996lNkMhkOHz6Mw7Fmja4kv/Ir\nv8K5c+f4yZ/8SX70ox9hNt99iZtarYb1672sX+9d8JnHY2bXrjAnTqheC1UPwHtFPQCHw7jow6qp\nyVkzNEZG0gvau4+NZYjF8jdNHn6NlUGr1bBzZwhJKhOPiwiC2hfmvvv8V/xuuVylpydWZxBUKgq9\nvTHWr/dcdz5HS4uLlpZ6D3E4bOfChXhdSMHhMOJ0Gi//+l2Px2Oms9NDJiMxMJBAq9UQDFpxu021\nzr3zDAwkaoYIqB6loaFUzehwuczcf38jiqLccxU0V8tdbYzE43F+6qd+ikgkwn/8x39gNN57F9St\nRhAEvvWtb/HpT3+aj370o/zbv/3bDZf83ml0dvoIhWxksyVMJi1er+WKN6BLH1bzmh6RiL1OYGze\n7Xsp5fLiYaE1bj8CARuPPtpBLFZAqxXmukdf2StSqVTrVuHzyLJaEnsz7f3mZgebNvnp708gy1Vs\nNgN79kRwOO7NsvV167w4nSYsFj0TE2kMBh1ut5k9exrq9EEuNUTmKZerCzxKa4bI0tzOR+aGSnuP\nHz/OT//0T/OJT3yCr371q2g0a27sW4ksy3z84x8nl8vxzDPPXFVo7E4o9Vtp5lfOWq0qMHZp+GVw\nMMnLLw/V5ZV4PCYee2z9AjnxXK5EqVTG4TDd9iGce3XeC4USxWIZh+PKgmNvvTXGmTOzde9dWk5+\nM1EUhXi8gCRVFu0+fbO4k+a9WlWIxQqUy4t3v7680SFAMGjl0Uc7rluwMJ8vIUllnE7TXZM8fE/p\njJRKJb72ta/x1FNP8Rd/8Rd89KMfXYGhrXE1lMtlnnzySV555RWeeeYZurq6lt3+Tro5rQblcoWT\nJ6fp7U1QKlVwOIzs2ROhqcl5yTZVzp2brbn1PR4zu3eHb6mY1bVyr817tapw/nyU7u4oklTB6TSy\na1ek1sl1MbJZiXfemWByMouiqA+6vXsb7mj9mrtp3iWpzPHjkwwNpeYMFjN790auK3wqyxXOnYty\n4YJ6Dft8FnbtCuP3L593didwTxgjoijyj//4j/zhH/4hGzdu5M/+7M9ovqTt8xqrg6Io/PVf/zVf\n/vKX+Z3f+R1++Zd/Gb1+8Rj33XRzWkmSSbG2ar1ctXIx78mNrtBWmntt3sfG0rz44mCdC38pD9el\nVKuqx0JR1HyG293jdSXuxnlPJERkuYLbbbpu8bj+/gSvvjpcdw3fakG6lWI5Y2Slz+ZvAK8Bf3LZ\n+xHgKJABTgN/ca07lmWZEydO8Fd/9Vd8/OMfJxwO8+yzz/K3f/u3/Pu///uaIXKbIAgCv/ALv8CR\nI0d47rnn2LRpE08//TSl0lpJ6vXidpsJhWyLymePjKTqbmKg3iATCXHBtmusDhMTmQW5BMlk8Ypz\npNEI+P1WAgHrHW+I3K14PGaCQdsNGQ3Dwwuv4Xi8QDx+d1/DK3lG7wSswIOAAdh9yWf/D/B/A2Eg\nDhiBHUvtSFEUBgcH+e53v8uv//qvs3//ftxuN5/+9Kd56623ePTRR+nv7+dHP/oRDz744FqS0G3I\n+vXreeGFF/jzP/9z/uZv/obW1la+/OUvc+bMmbtudbSaLCXIdju0ZV9DZTFDYm2O1phnsWtYq9Wg\n1d7d58dKGiP3Ay/M/ftF4IFLPtsMvAHkgSxgBxZqbAN/8Ad/gN/v59ChQzzzzDOEQiG+8pWvMDk5\nyblz5/i7v/s7fv7nfx6f795qa36n8sEPfpCXXnqJF154AUmS+PSnP40sLy8ItsbV09rqXiBmFYnY\n1zQNbiOampxYLPWhynDYXtcpeY17l/b2hddwQ8Pdfw2vpKn1JeA94Hngx4D3Af9r7rNXgUPATwD/\nH6rR8onLvq/Me0RMJhORSGQFh7rG7cDdGENeDUZGUpw/HyOfL9HU5GTjRh92++1b1n4vzvv4eIbu\n7ijZrEQkYmfjRh8u152bjHo93IvzfrUMDSVrgnQtLS66urzYbLfvNXy1rFajvDQwr/TkpN7zMR8w\nfRY1pyQBfBA4PL/Bxo0b18It9xiHDh1am/N7kLV5vzdZm/d7kvRSH6ykMfIW8Hnge6iekb+75LPT\nwAHgBKrBcgY1r6TG+fPnV91qzmYlBs+OkJqcRqPV4W9roH1jw4plNCvVKkMvvURycPDim4JAw549\nhLZvX5HfvJ1YWyndm9yp816tVMjPzFBMpdBbLNjCYXSXCStmp6YYeOEFKtIl7ePtdjoeewzzMv2x\nYrECJ05MEY0WMJl0dHX56Ory3VV5JSsx72IySSEaBcASCCx7jFeK5NAQwy+/TLV8UQjN4vOx7tFH\nMSzTnHV8PMOpU9Ok0xJWq56tW4Or0tF5JREEYUnBqZU0Rk4ARVTPxwngGPCnwK8CXwP+E2gFhoFG\n4LkVHMs1oygK/cfPc+IHh8nEUiAIeBr86D75YTq2rVuR3yym0+RmZi4fCMmhIYJbtyKsCbetscZt\ngaIoTJ04QfTsWSqlEoJGg7OlheYDB9BfIomam56uM0QAStksYiKx5INSksocOTLG9HRO3UeuxNGj\n41it+gXy7WtcJDMxwejrryNlMgAYHQ6aDx7E0dBwy8dxqSECqpFUTCSWNEZSKZE33hglk1HPlVyu\nxJtvjmGzGe4KfZGrYaWLlp+87PWvzv1/Ati2wr99Q6STeQbefEc1RAAUhcT4LP1vvUfb5rYVUcQT\ntNpFDQ6NXq+2hl0CMZFATCbR6HRYA4G6m+FylPJ5BI3mqrdf49YjyzJ/+Zd/ydtvv83Bgwf57Gc/\ni1Z7ZQnxNW4uUjZLIRYDRcHs81EuFol1d1OZK1FXqlVSw8O4WlrwbthQ+55mMU0dQUBz2RyWJYmq\nLGOw2YjHRWKxQt3nslxldDS9ZowsQbVSYebUqZohAiBlMsyeOYMtFFpwvFcS7SJzLmg0CJeMYX6+\n9VYrgiAQjRZqhsg8hYLMzEz+phgjdeev14vpNmwWe2crqKwg1WKBQiq74H0xFkMpy6C9+clEJocD\nd3s7M6dPM68rrNHp8HV2LhlbTQwMMHH0KKVcDkGjwRoK0XLw4LInWymfZ+bUKdJjYwiCgKejA/+m\nTQtczGusLsVikSeeeIJqtcqnPvUp/vZv/5bDhw/zL//yL2sGyS0kPzvLyOuvIyYSAJhcLnwbN1Iu\nFus3VJTaNvPYw2GMTidS+mKo3BYMYvGrDfKqlQqxnh6i58+jlMvYwmFMLZ1otQKXLa6XW4/c85RF\nsc4QmaeYSlEuFjFYb513wdnSQry3l7J4URfEHolg8ftr8x2/cIFKqYQtFCK4bduS4bebMee5mRlG\n33hDPTcVBZPbTfP+/dhvs6KQNb//Ejh8LsLN9R01BY1A4/pGtCvY+C20fTuN+/ZhDQSwNzTQfPAg\nnnWLh4VkUWTqxAlKOdWdq1Sr5CYniff2LvsbUydOMHv2LFI6TTGVYvL4ceIXLtz0v2WNG+O3fuu3\nsFgsPPfcc/zcz/0cL730ErFYjK985SurPbR7BkVRmD59GjEeVxcIikIxmaQQiy28DwgCJnd9jN/i\n9dJy6BCe9esxe70ENm+m+cCBmrs+NTTE+NtvU0wkkDIZ4hcukOk+ybq2eg+I0ahd84osg85sxmBf\nKL1udDrRmW5tkz97KETbww/jamvD7PUS2rGDpgceQKvTkRoeZuLoUQqxmDrfvb2MHz1KwGfC46kf\np91uIBS6sTYOiqIwe/bsxfMXKCaTTJ08SbWysPniarLmGVkCncHAtkfeh1zIMT44i0YjsG5TMxsO\n7F7RDHCd0UhwyxYCmzdf8XekTAY5n1/wfm56esnvFDMZMmNj9W8qComBAfz33YdGt3ZK3A689tpr\n/Nu//RunT59GNzcnBoOBp59+mq1bt/LJT36SDZeEA9ZYGcqiqN7IL6OUy+Hu6CDR10dVlhE0GhxN\nTTgaGxdsaw+FsIdCi7aPTw4Oolz2UBBjs3Ru2gIaP1NTWcxmHRs3+ut6EK1Rj0arJbh1K1ImQymr\nerQNdjvBLVtuaYhmHkdjI47GxgVznhoaWpBPkp+ZQZByHDzYwtmzs8TjIk6nkc2bAzesLVIuFtXw\nzGVI6TRyPo/R4VjkW6vD2pNnGQIbOnjo/3SRmZpGo9Vgj0RuWXb21Rg8eosFndG4IEHO7PEsvd+r\n+G1FUZidzZNIiBiNOoJB64p17lxjIYqi8Ju/+Zt87Wtfw33ZSjsSifDbv/3bfOlLX+Jf//VfV2mE\n9w5aoxGj3V4XZgGgWiWyZw/u1lbEZBKD1YotHF42/2rRa3qJ69xiMXDggB9JKqPTaW5519ZSqcz0\ndI5cTsbhMBIMWtHrb+/QoLOpiXUf+hD5WbW7sS0YXPZeeCuYn3NZrhCLFYjFCqQTqrExP6eCICAA\nwaCNQMBKqVRBr9felMoprcGA0eFYcP7qLRZ0t1mu4JoxcgUsPh+W20zdVVEUpEwGrcFAcPt2Jt5+\nu5ZIZ3K76xLoLsfocOBqbWX2zJnae4JGg3fDhppX5Pz5GMePTyKKZQRBvUgefLAFl+vWujvvVb7/\n/e8jyzKf+MTlOoAqv/iLv8jXv/51zp07x6ZNm27x6O4tNFotgc2bEZPJmhdSZzYT2LIFvdGIfm4F\nfL141q0jMzZWXwYaDKLR66nIMkbj4k0lVxJJKvP22+P09yeoVBT0eg2dnV727m287XviWLxeLF7v\nag+jDlmucPToOAMDSRosXvqHTuJ26Glvd6PVqnl+5rkxC4KwoKFlKZ+nWqlgug4vRu38TSTqzt/g\nli2LJtquJrdzStQ1de29VyjE40y88w7ZyUkMNhv+jRux+P3kpqfRGgzYGxqu6L0pFQpEz50jNTys\nGiLr1+PbuBGtXk8mU+Q//7N/QWb37t0Rdu4Mr+SfdsfqTdxMFEVh165d/N7v/R4f/vCHl9zuq1/9\nKhcuXODv//7vb93gVog7Yd7z0Si5qSkURcEWCmELBm/KfpVqlXhvL9GeHqqlEnqrFb3VSnZyEqPd\nTnDbNlxX0fRTURSkdBqNTofBdmN5BkNDSV56qb7zs16v4UMfWkdDw81z698J834zGBtLc/jwIOVy\nFafTgLsaJ95znpZGC40b1xHYsqVWcFCtVGoLTY1Ox+yZMyQGBlCqVeyRCOEdO64rtLJS5++1sloK\nrGvcZMqlEr0//CFjb71FtVxGb7GQnZyk8yd+gvCOJfsMLsBgsdCwZw+BLVsQUynkTIb06Ci2UIh8\nvoooLuwVMzu7MDdljZvPa6+9RqFQ4PHHH192u8997nN0dHSQSCTwrLIr+l7A6vdj9fuvvOESlCWJ\n3PQ0pVwOk9OJNRhEq9cjaDT4urpwd3RQiEaZePddEn19oCjIuRylXA6j3Y7ZvbT4lZhMMnX8OLmZ\nGTQ6HZ6ODgJbtqC7zkT7TEZa0DVWlqtks2udtq+HTEaiXFZFx9PpEkWjC8e2B4ls8eCxC2TGxykm\nk+jMZqZPnqQQi6G3WjHabCQHBmqhvPiFCyjVKq0PPXTNeYs3ev7eCtaMkTuI5OAg40eP1txtFUki\nfuEC8d7eJStuliPR18fUe++pOSeCgC0cxr9rHyaTDlmuv/H4/Xd3k6bbhW984xs8+eSTaK4gcOfz\n+fjwhz/MP/zDP/Dkk5fL+axxO1GWJMbefJPk0BBKpYJGr8fX1UXD3r215EqtToeYSJC/LPm8lM1S\niEaXNEaqlQoT77xDemSk9t70iRPoLRb8Gzde13gdDiMajUC1etEg0ek02GxreWPXg91uQKsVagae\nJFVQbDryQ70kJwapyjLVOQ+R1e9HzufRmUyMvP46Wr0eayBQ21d2cpJiKrWscXqncnsHAO8ySvk8\nyeFhkoODFC9PiLsKpFSqVp41T6VUonIdXW+LqRSzZ89eTH5VFHKTk0jTY+zYEap1jVRzRqy0t999\nJ//tRn9/P2+++Saf+cxnrmr7z3/+83z729++J1zdt4JCPE5iYID06Cjly5LCb4TsxERd1UxVlon1\n9JC/TG153jARNJr6xNZlDFMxmawlbM6jVKskBgaue7yRiH0un0Edg1YrsGGDh1Do3lACvVmUCgVS\nIyNYK1lam6x1x3NdSIM4NkB17t4tZzJMvvMOSrlcE0cTNBoKl1VyCYJw1ypxr3lGbhGFWIyRN95Q\n+yYoCkank+YDB65JqlhntdZE0UxuN/rG9chmD5VQJ6IoYzZfOSGpLElUSiWkfB65UFjweSEWo/MD\n23G5TMRiIkajlnDYvrYqugX86Z/+KZ/97GexXqVA04EDBwA4evQo+/btW8mh3fXE+/qYfPddVTxQ\nq8UeidC8f/9NKX0splIo1Wrde1VZrukDzWMNBtFF2knnquj0WmyaArpqaVn3ukajWdRlr72BEn2j\nUcf+/U20t7vJZiWcTiPhsB2d7vauprkeKpUqMzN5crkSNpuBQMB6U5J0C4kEo6+/rhqKikJjUyuN\nezopCSYcDiOW4iyjlTKVUgmlWlUNEEVBTCbRWyyUslm8GzYwffJk3X6dLS23pXrqzWDNGLlFzJ49\nS+GSFYyUTjN98qQaO77KG4fV7yeweTMWv5+YZOWddyap2iqM6dOMxod44IGmJduQX6r0WJVlHC0t\ni25n8fkQBIFQyE4otFBEaI2VIZVK8Z3vfIczl1Q5XQlBEPjUpz7FP/3TP60ZIzdAKZdj6r33LooH\nVipkxsZIDAxcUy7WUphcLgSNps4g0ej1dYmmuWiUC6fHePPdFFO9w2j1Otbv6uJDP7l32YePye3G\n0dxMvKenbt/LVdRdDUajjtbWu1tkTZYrHDs2SU9PDFmuotdr6OrysXt35IbLmGPnz9d5vnJjw1jL\nEhsfeQSdwUB6Ik8hFiM1MoJSqWCcyyMyu92Ucjmq5TJGp5Ouj3yE/MwMVVnG1daG7zpDb3cCa8bI\nLaCQE5kZGkfKl+r0OoqpFHIuh/YqtUvsoRCVzZvReUO888oEhkgLgbYGGv06yEwyfraAZcf6RaWP\n0yMjTBw9WishzAwPY/Z4EJNJlHJZzRkJhXC3t9+cP3qNa+Jv/uZveOyxx2i4xqZen/rUp9i/fz/f\n+MY3auJoa1wbUja7qJdwOfHAK6EoColYFjEWxYCMxecjNzuLgNriwdvZWcsFkEWRidPdvPXSGLNj\ncYwOBxqdjrRsYDJWIbz4ugFQDdLIrl0YrFZSw8NojUZ8nZ24Wluve+z3CjMzec6fj9WSS2W5yvnz\nMRobHdclMJdMihSLZVxOQ925I2g0aulupcLsmTM4GhqolsuYvV6Sg4OUi0Wq5TIN+/YR2LyZaHc3\nepsNX1cXrpYWlEoFRVFuu1Lcm81q3r3uB/4YqALvAr+ximNZMcbHM5w/HyUbqzJ9fha/30JzsxOd\nXovebL5m4RlXczOS0Y3Br9DcaqTFFGf81ZcoJNLkwkEMmUmaHnhgQYJTdiZKxd1IBQ1milQycbQm\nEy0HDlCRZXRGI7Zw+Jb2cFhDpVwu881vfpNnnnnmmr/b0dFBW1sbL774Io8++ugKjO7uR282ozOZ\nKF2We3W9gln5fInzZ6eYOf4uoye7MRsEtu3vwtfVhcnlwux2YwuHa7o+YjxOLiORTuRRqgpyvkCw\nwUOrSyI3cJ64I6/2EdEbicdFqlUFn89S06MwWK1Edu2a63GiuWtzCm426XSxZojMUy5XSaclmpqg\nWCyTSBQQBAGfz7Kkt6RcrnDixDS9vXFKpQoNDXb8wkVNJrPPR7S7m8z4OIH77sNgt2O023E0NmLx\n+5FzuZoImSUQoL2hgfzMDFI6TXZyElsodENhtzuF1fwLh4GHgRLwHWAzcHYVx3NN5KaniV24QDGV\nwtHQgHfDhgXxZVGUOXp0nFSqSPO6TpLjM0xMprC7LLR0Bgls3boglnw1WCwGrDYjrd4qvd/9AbHB\nMZq2bcRqVEgND2Px+Wjcu7duHCf6ipw9MkhJknH7ndx/fwPaUgyjy4XtkmztNW49P/jBD2hqamLP\nnj3X9f2f+Zmf4emnn14zRq4Tk8uFf+NGpk6cqCUUmj0ePB0d17W/3t44qfFJBt89Q0UuUyoKnD/W\nR2chy/pHH1mYJyYI6IUyFpuJQlbEG3ThNxcYeuVlGttCDGeGsHRspi9pI56SURTw+Sw88EBjXUfX\nyx9Y1UqFsiiiM5uvKIleiMeJ9/aSn5nBGgzi7ezEsoQxVojFyM2FIGyh0G0nMna1OBzGuioXUJNL\n7XYDs7M5jhwZI5Eoqkn8AStbOwyIA90AeDo6cLe3o9HpGB3NcObMDGazHr2myshQEktLBLMxgZYq\nUiZDoq8Pd1sbWoOBiiQxPThIaPt2xHgcndmMLIqUi0UqksT4kSPq8VUUNHo9oe3bbyhcKIsiuakp\npGwWk9uNPRy+Lb0sq2mMXJpKLgPlpTa8XqpVBVGUMZl0N1VOuRCLMfTyy7UeCPmZGfLRKG3vf39d\n59tkskg6rdbsT+dNND38Y5jJ49RLVHJxpo4fZ3YyQbZiwezxEG5w4fNduYTWYNCye3cEcaSXYipN\n+/3biZ56j9mZMUxWM6nhYUxOJ77OTgCGhlIMT0iIooxSqRCdTPDuMYHHf3wDFo+HcqlEbmqKYiqF\n0eHAHg7f8uZS9zJPPfUUv/Zrv3bd3//4xz/O//gf/wNRFDHfZhLPdwqBLVvUUMrMDDqTCUdDA6br\naP1QLleQ5Qpmo4CvrRmdUqISG0ecHGG6Eiew6b4FxojF58Ni0bF1e4i3siJNzU7KqRLeLbsxeS0Y\nW12cOx+nf2gEncWCRqNBzDoxm3V84APtiyawZiYmmDl1CimTweBwENq6taYUW60qZDISBoMGi8WA\nlM0y8uqrtR4m+dlZctPTtH/gAxgvaz6XGh1l7I03avk1BpuNlgcfvCEV2ltNKiUyPZ2nWCyzZUuQ\nRKJANFpAkiq0tbkJBKy8+uoIs7MXQ3c97/UjzZrZEDajZKKMvPEGFVkmsGkT8XieRmuB+PlzKKKI\nv7GZ0XEfDz54CJMiMvXee/jvu69usaozmdDc98sNAAAgAElEQVTo9Vj8fsqFAggC3q6u2rGfpyrL\nRLu7cbW0YPZ4UBSFQkHGaNReVUKxLIqMvP66WvqtKAhaLb6uLhrvv/+260N2O4xmK+AHeq604bUw\nNZXl9OkZkskidruBLVsCNDdf+81lXnpdo9fXOm2mx8drhsg82clJCtGo2hypWkUWRbQatT6/XK4i\nimVmBT1hTZHhM0cxSAl07dt46z9eQbF5MXs8NNzXwYEDTcuOc2goyfnzMbrPzRC0lWh+6CGqU/3I\ns2PodRqqlQpKpcLM6dNYAwEqksTg+WkMDieejg5y01NUZRlZa8EQaUMBJo4eZeKdd8hMTFAtl2na\nt4+Oxx+/ZX147mWOHz/OyMgIH/nIR657H8FgkJ07d/LCCy/wxBNP3MTR3TtotNpac7N5FEWhEItR\nymbRmkxYAwG0Oh2lQgExkUDQaLD4fOgMBsZGk7x3pJdsLIUr6CU6I3Khv4jXIdDkCiD2DyDqJfKz\nsySHhnA2NVGWpFp/qcju3ZiHhgiEXVRNDv7rX3Nkx5JoemNEUxWmYzKZyUn0ZjNlsYjBZsNm1ZPN\nNuBw1C8cxESCkddeq92jpEwGKZ2m45FHyFdNHD8+SSxWQK/X0tXlo8EuLighnfd+XGqMVMtlZs+c\nqasCKuVyTJ8+rYadVqEh3aUoioJcKKAzGpd80M7M5Hj11WGSySKxWIFstsTu3REiETttbW4iETvF\nYpl4/KIhIhVLTE7nyWaKmI1eink7bQGHqu/U0YFZjHLyv55FyKcABXG0n8j7DiKXW2i8r5OyJNU9\nL5RqFbPbjT0S4cIPf4iUSiEIAvnZWfQ2G1Iuh/GS5OZysah6N6J5Tp+eIRotYLHo2bTJz7p1y4cS\nsxMTNUME1OTseG8v7vZ27OGVVdS+VlbbGPEA3wQ+ttiHTz75JK65B2JXVxf79u2jdS4xa3h4GGDR\n1+l0keefP0Y2W8Jo9JHJSExOjrF3bwPbtnVd8fvzr6VsFv3sLPmZGWLFIo6mJrY/9BAVSSImSQga\nDZFAkKpGTzyXYnR8nBatlmh3N5PRKAaHk7C3keFJKBZj2M1aCoPdFAbOkXI7GTvWy9CxUfybtyBY\nq0RnDJw+bSQSsTM+PrZgPIlEgfFxLS++OEBDQCRXytLgM6EPhrHt3UN6eBivwYA9HGZ0fJzUf/83\nzkoFOWlltm8MR0MDgc2bUapVdIY8yWwCx1iezPg4s/k8eUnCnE7T//zzZE0mwtu30zaX0Ho1x+tm\nvL7XeOqpp/jlX/7lG04+/ehHP8ozzzyzZozcRGbPnmXm1CnkQgGNXo+7rQ3P+vVMHD1aM0ZsoRDW\nzffzo39+h/EzF1i3tYNnvvsedpcVh05Hz3sXmHCaef/e3QQ8eqRCkbGjR8mMj5OdmMAaChHcsgW5\noOYmeBxa+kdjJAYGkMQStnCYQqFEJi1iCYYp53MgqEaAUsgs6GMCqvT35YulUjZLLpHiaI/M5OS8\nMSHz7rsTsMmKRqut7yarKFRL9cKH5WIRKZNZ8HulTIayJNUWa6tBbnaWmdOnEefUSwObN+Nua1uw\nXXd3lFRKIhYr0Nsbp1JR0GoFdu2KMDWVpb3djaIoGI1a8nk1ZJecTRG70Iel0Ub81BAGt5dpx3q6\nWtxUKxWIjkBijInuQaqVChaXneauJoyyWvnibm0lPTpKbmqKiiyTn57Gd999RLu7Ucpl7A0NVCSJ\nUi6HLRwmNz2NoNHUjqfeYkExWHnzzdGatyaTkUgmRcxmHZHIRY9LMZ2mIsuYXC60Oh3FRbSpqrJM\naZFu76vNahojOtRckS8Cs4tt8Cd/8idLfrn1smzxS1/HYgVKJQeXRExQFBdarXvR7Rd73dzUxOB/\n/zfJuYekAxBGRkgODGCPRIh4vYhmP8d6kiQTKQKNHto8zcR6eijE4yhDQ1QtFtq3WAjtaWdi2kmT\nDyZ73yA+O4XT7SE9lqUsihRiUdytLRj0HrJZCVEsLzo+SYoxPNxPa8SI3HeO06d6mXBouK8RIu0N\nNM5JTBtsNuyZDG6NBjGZpDnsYeCCkeJMAatTi6DTct99HUQMBS786EeMvv46equVts2biV+4gFwo\nYMlmCVyyKlrqeI2PZxgYSCBJFVpanJRKZQwG3RWP75Ve3wtMTU3xwx/+cNnz/Gr5yEc+wpe//GUk\nScJ46Ym/xnVRiMdrhgjMCZXNXRvzuiFKtapud36MxOgEJrORqs6IkRIzpwcwro/gb2tBZ9Tj3b2N\nVH8vx/7zPbw+K1t+vAXB5mYiIdD/ry+RHh7G7zUiTk8wnTOxeU8H7xw+SaKvD7vfw64H1pGYSZGZ\nzFHQ2hGUKh2t1kWNkaVI5xViMbHuvUpFYXymRLvXW1eKqjObMV/WIFRnNmNyuRYYOia3G/0qhnVL\n+Txjb7xRCzNJmQzFVAqd2Yw9FKptVy5XicfVv39mJl9rAmg263G5jExP50gmRdxuMxs3+jl6dAJJ\nLDFzoZ+KmKcl4mPoRDc5sZdHvtDCzKTIyNkfUIzO0rhnL0arnWhvLyaDgDQ5jFJShfOMDgdtDz9M\nbmqK3Ows+WgUg9nM6JEj2EIhLvz7vyMmEmi0WnybNtFy4ADFOaNvviljVhIWzJ0kVRgfzxCJOKjI\nMtOnTpHo66NaLmNyu2nYswezx7OgrFxrNN5w/6KVYDWNkY8Bu4Gvzb3+EvD26g2nHjGZXKCQOK9s\nGDr4foybHuDUq+cZHowhaA3oS2bGh2K4MkmSfX1oTSbSo6Mk+vrY8bnPseXRbZRLJcTjYaJ2O5V8\nGl+glelBtSGWzmxGazDgdBpr4mWVSpXBwSR9fQkURcHtNhEIWMkOjHPi7ACCVksiW6Ggc5ONxmh9\nYC+J3l5KuRzBbdsQ43FQFLSpSd5/qInpNJgaHDQ2unBq0sS6L2B0OqnIMunubqqyjLeri2R/P0a7\nvZbMtxSjo2leeWWYYlFdUY2MpEini+zde+fEj1eTb3/723zyk5+8Kb1lwuEwW7Zs4cUXX+THf/zH\nb8Lo7m1KudyCcl+5UCA9NobBZlN7Q1mtmFwuxMw0Wza6sHvdSForx0p5tBqBQiyGTikhaLTkshvp\nOz1MLp7BGQ7TN1KgWNJSSU0x/fZrGJ0e9DoXGlGklBGxywUat3YhV7X4AnbSiTwFsYzG6mDbVjdN\nTS7cDl3N+L8UayCgto2/xIthsNsxOe0IQr0hAWB12Qk0b2Xq2DHkQgG9xUJw27YFie0arZbg1q1I\n2ayqBg0YXS6CW7euagVPIRZbEGYqiyLZiYk6Y0Sn0+D3W0gkRCqVKk6nkZYWF9msxLvvThKJ2JEk\n9V7W1eXDajXQ1z2BJh7EuTdA9OxppqYLNHU2k37vTU5PFEilJbylccolifDOnYTlLOLECGaHDdMl\nFY1aoxGt0Ugpk0Epl6lWKoR27CA3OYnOYsFhsVAplSjM5Yyse+QRDFYrJrcbayDAxMTFuZSyWaRM\nBkGjodSpliCnhoeZOXmyZnTkRJHxt9+m7f3vx71uXU0BWGsw4Nu48bbsU7Oaxsh35/676fh8Ftxu\nE8lksfae1aonGLx6a3AxZcOyXGF6pkD/G2OcPx+jVLSy5QPvI56UaAkbiR99hd4jL1JNzWJ0OGh9\n6CFy09PkJieZCrQzNpYm6d5C5CNe8ideY1PQh2y6n7LRidnjweEwsqnLjZRKgMNB30CaI0fGatne\n/f0JWlpciMpFI8FsNaKz2nH4rNjmYtqCTke8pwdXaytyLqdKvs8O0bVuHZE9QSbefZdzr75KbnIS\nWzhM+4c+RM8zz5AeGaHh/vsJ79iBweG4Ymljb2+8ZoiA6g3s70/S2enD6VxLgF2OYrHIt7/9bV59\n9dWbts/5UM2aMXLj6OYeHpVLZOE1ej1mrxc5n0drNKLR6+l//nk0ngbQmBh+/Xka9h+A5BQOqw1n\n0EdVEjFqqlgECYPZSNPWTgRPhFSyyMBIjt2dRoKbNpFR7EyWFZo6gjQnx5ieHqeYNuDrWMfwaA4x\nF6U80U+1WmW2x4H7o7so6to49Vw/O3eG63QxzG43zQcPMnvmjJqU7nQS3LIFa9hHQ0OOwcFkbVuD\nQUtbmwtPs1pVJxcK6K3WJUMujoYGOj70IfLRKAgCVr//jlIEve8+P/G4iM1mwGTS8/zzA/j9FioV\nhUgkSyRiJxCwodVqaG11EbBXMI2f4MSRXrJZGY3JTMv6EJnRbhIpE+m0hD/sh6khdFoBxR0iHPTS\nuG9f7YFfLZeZeOcd4j09pEZGyM/OEn7/jyPaW+ifmMHQvA2mLlAVRXwbNpCPRjE6HHjXrwfURbBN\nX8FpExgfmCU5MEilVMJiM2EpOMlOWUmPji6ozBQTCUr5PE379+Pp6EDO5zE6HFiDwVXP71mM1c4Z\nWRGcThMHDjQvSGC9tAzuSpjcbhyNjcR7e9U3BIFMUUATDDE9nSebLdHfn0ARNGzdGqA83UduZrbW\nV0BKp5l8911aDh0iqTh56/AgxWKZYkainFJ43+OfJmQtsv6QCVHvwuRyYyolqQyeYlwU0TncnBo2\nUKlcPGmMBg2ZVIHtBzcxfPQYGkEgHLLS6NeSm5pidmSKqaEpLFYjxqqIbnoao8OhJuAZjXg3bCA9\nPIyUSqmJcMUiqaEhtAYDnU88QXp0lPDu3VRlGavff8WeJ/n8wi6e5XKVUqlyVcd4XmzKYLPdc/om\n3/3ud9mxYwddXV03bZ8/9VM/xe/93u8hyzL627B0706hlMsh5XL/P3tvFiTHfd95fjLrvu+7qu+7\nG2g0boAHSBAkBUoiacvWWDP22Duy17G7sfOwYUdo7Y1wbNiOVYTtJ9t6mJjwhNY7trzWTck6SJEA\nCQIgDgJoAN3o+6r7vquyMrNqHxpqmeYhSsvLlL8ReChEITOR1fXvb/7+3wOtyUQ1HsfgcKDV63H2\n9eEaGSFz6xaWUIjk1auIOh3hsRir5y+TW1nFZtHx2ONTbOy0MEZCuD0WhvssmB1mZj/zaWqtLouX\n72L3auhICjpXhBvnN9hcjWMy6bD1KsweCHLi9CyjGieiM8CL/3QHjV6l4/PRQ6RWa1OSTQRMFkqb\nOS5e3OHsWcMbhKz2SARrMIjSbu86N+6vS8eORbDZ9OzsVDGZtExMePeIjN5ieVffQ6PT+XM5jd4v\nmL1ezB7P3jYN7LpVbOHwm97r81k4ciSMx2Pi3r08waCVUqmFxaInFLKRSNTI55v4/bv3weh0Euz3\nM5zOsrFRAsGJ32+lXHfTSjYIDYfx9Tsw7RvBNzaC3m7HrFRwDg1hdDh2p+GFAoWlpb3tE8PAFK/e\nKFMqZ1AkhdJKhf0HDhGw3UNutYgcOoTtvpC6VS6TvHqVVqHAVHCI1mYF1aHHbLYyOeFGzK6TvdN+\nwxTmxxA1GkRRRKvX44jFaJVKVHZ2KG9uYg0GscdiHymL78eSjACEQjYsFi31uozbbcZo3P2vKopK\nu61iNusQxbevYRYEgfCRI/R0BrKJIk3BSsNqQu5ZMJu72O0GDAYN8XiVkydjSAYDholD2AaHEXOb\nlG9fRZYkHKNj3MlCu6OgygqNTIZaIsHFap5Th+0Ep8YZmI6x+eKL3P3BD1DbbWyRCOGHHqGwtY3g\nCiOIGqqJBK1CHrffge/IQX7td8+QX99CLWdRFQlD3yiXv3eNcraILRQkFHUzPWjDNzWF3GrhHBig\nqpi4dmWBzGYVj7MPx6SW5uYyglaHd3qG8JEjVLa3aVcqFFdWyC8tETt58m1zSAYGnGQybxRCud1G\nXK53nor0ej3yi4tkbt/ey0EIHjjwM37C/3rR7Xb5sz/7M/7yL//yPT1uNBpldHSUl156iSeeeOI9\nPfYvCjrNJluvvEJ1ZweT2413fBxVlgnOzqL1hulp9eibIjubWcp4UTQdGrUWnWqJgdEgSA1cnRRW\nRxurz4ZBV0LftKN1DbP58mXa1Sphv4/QuB93xE+uJKHa/KhCiUqtg9EE27keJ2wBcl/7Mt7Hfont\n8y+htCUMTiemcD+GyDAbWQFZiDMUNCELWioV6U2uGlGjeRO5sNkMHDsW5dAhFVEU33ENfDu0WjK1\nWgeLRfeGROkPC3qLhdgDD+wKWAuF3cbimZm3dYuUSm02N8v3tYUqLpcJg0GDLKuoancvCO3H+r3Q\n4aOIRjM94wL5soI+OoJeEXhyn53S0l2yV19F1AgYbBZGxobxBkYoS1qe/6c7ZLNNXG4z0eGD6HOr\nWIJBVjIiq6+9TCObvU/sXCwtpvEfiKLvlBl87DH0JhO9Xo/U9euUNzYAMGp3GNQk6I/p6TazNC9d\nptHtEjp4kNGpKfJGI0r7J7sB9lgM833dT6tYZOPFF2kViwDkFhbwTU8TPX78La3hHwY+lmREUVTm\n57MsLe12DjidRo4cCSNJCrdvZ2k0ZNxuE7OzgTds3aiKgqjRIAgCitJlYaVKU4jw8rrMynKBUlmi\nWGzz+ONDjI15kCTl/hSgx+uLdbL3VtAJCjaTgWOPPYOmsIUt2o9SkWlXJTqlAqJUx+q00dVoES0O\niisraA0GNs+doxaP01UU5FYLSyjM8NgEC6sVavEE2YW7mKxmjp44RSGewjM4Qmj/NOkbN9C4g1z6\n+o8oJnY1Lu1KhbzJhM4fITg7C0A+3+DKK6ssLpXotXvIXZWWLcrA6Unu3s6ytaol6FYJGGwIQpWe\nqtLMZsncvInlzJm33BMeHXVTLrfZ3q6gqrualiNHIj/V/97IZEheu7b3xVE7HRJXrrxHn/5HH889\n9xxms5nTp0+/58f+8VbNv5GRnw/1ZJJqPA7sLuCiTodjYJCsZCF9u0C73uL5v38JQYBaPIHXY+TJ\nMRvtloxG6BKIeFFaTfJ372Ky25CMRswuJ/EfPEct2aSYLaNWQ3gH++jg4d56gVrPxugDh9ELMqIi\noXTapFe36coq9cUbuNwWUps1BMHIxo0NXIMCoaiTr/7DXU4/NkI45tnrV4lGbW/SkMiySqUiYTBo\nsNl2xc0/b+nd2lqRmzfT1OsdzGYd+/YFGB/3fOi/0KyBAJbHHkNuNtEYjXsBcIrSJZGokk7XMZm0\nxGIOTAaBVLKGwaAllaohy11MJg0DA06cTiNOp4GbN9Pcu5dHFKG/34HBMYzngSjaqsROSaKq6adw\nfRWzasIzNI5WrqEVuuiVOnXCvPjD23zl/7nB+nIKg8XCqUeH+NxnxwhoyyTvLSK3WgiiSHFlBbPP\nR+z4UbyzURyaBsb72+NSpbKXOZLPN5DLIppqnfXnn8cVi+w9YKuShN5mo/+RR3YNCI0Gjv5+PGNj\ne+t2aWNjj4jAff3j6irukZGPjH7kY0lGtrYq3LiR2tNapNN1XnhhDbfbRCq1a2urViVqNYknnxxB\nq7bILSxQSyTQW614JyepCg42N8vU6x2+851Ver0esZidTkfl3LlNRkc9eDxmDhwIEo9XsPq8yJUy\nUrVCU1FJSk7O/vsHMPcP07l5j0Kuxqi9STVTxO6xEZoYwKRp0ajXqSWTu4K5dhu9J0AiL7H93asc\n+Z39xAY8LCW2cffHOHzmENvJEjevbGD3ZTj65CHsOgPJdANzpB9XJEM1X6TX7eEbjGIID+zdkxs3\n0vzDly/TqO6m8fkCdp74laOsJppURRddWcf5564wPOxmJizQldpoDQaahQJSvY7xLdpLzWY9Dz/c\nT6HQRFF2yci7Ufc38/k3MHjYFZz9IqDX6/HFL36RL3zhC+/LAv6Zz3yGo0eP8qUvfenfump+DnQa\njT0rpMZgQHFEeHWxy/zSLSZno6TWU9y+vonRZGByJsLm7UWWVyMMHDpEev4uZp+PwvIyvqkpggcO\nIDeb1FMpSou3iU1M4faEMI3M8tzfXyQ8d5C5/UG+/9XXyO2UsFj0WHwefF4b1fUFTFYL6QsvcuzE\nJ7gbDLKTauPyGzl2OEAmU8QfcHDxtSzixR32Hxlia6vCxISHEydieyGP8Z0SF3+0RHItgcGsZ/bo\nIAcO9yOo8p4+5F9WR7wdCoUmly/H9yyvkqRy5UoCp9PwkSjVFEQRvdWKqnZJJmu02zJbW5Xd7fTe\n7vabKFWZHTVjzS/j37efs2dHuHo1idmspb/fwYkTUXK5Jq+/nsJs1mI26/nWt5ZZWysSDts4c2aI\njY0Ki3fSyNsJtGqbU5+cYywkoG3mKK6soJEN3LmdYnUxAWqXdq/BhXNr7Nvvxz7QwufRI2pE9BYL\nVUWhnkqhlZsItRymwQjVnR2EXg9Rr0fUaKhW26yuFrF4YGx8CKPHS7nUwuO34Bnoxx6L0S4WcQ4M\n4Ozre8t781aWbLXTeYMm6sPGx3K12tmpviHiF2B9vYzV+kbLY7HYolys01q4QnV7G9h9Gmpks4gj\nh9BotBQKLWS5e//9bcbHPVQqEqK4u/8aDttYWytisJjxTowjVat0FQVN0Ifi6uPFFzdR1S4D5iJ3\nv/M8jWR81zFTGsE4PYnaUXAbDJhnTlD1zlBHT3C8h0Ujo6NDxG8j9uwhKuksN15fY/4Hr6LttjE7\n7dQaHc5+eh9OQ4ubr4PvwByhXgezz4c2NLi3yDSbHW7eSFGvNKHXwxoO05BlFleqnHooSkOv0iqV\nkGo17l5MEDnTj7x5D8/o6G6o0ztYRXd7G342vYdG/xaj3Y/IqPD9xiuvvEKhUPj/FXL2ThgYGKC/\nv5+XX375fZm8fNxhcrsRtVq6ikLPEeDVKxlW4go7BRCNJsrpKlabgcROBY/PgntskpbOyfCZw8RO\nnKCdidPqdNGazSRu38U32L9LvHUG8iUZUSOycmUVgwai/S5MQouZ/SFef6VEKZlGruT51KefRlpI\nYDU4kIoFqpd/wIGHHmdsoo/N9RxCNUup5MBst7Fwd5u5fW4EunS7PdbWSoyOeggErDTqEj/82hXu\nvXKdRiaDKssk7o6gaR9Hk7iDKIrozGb8+/bhn5n5qeS4UGjtEZEfo91WyGQaHwkyArsOxKtXd5t4\nHQ4DL7+8hU4n0hexkLx7j/hqGqE9itgss/GjHzH51CcYGZnB4zExMOAkGLRx8eI2itLF4zHxd393\nl/X1IlqxR3w9w+ZqjjNPjJBJ19HUVbQIdFsNls/dwIiEweWmNx9nun+MxdlBNlZzdOo12s0W2UIH\n8/Ew4XSW2WOjLN2J44xFsdmNPPbMQfT5Ne784z/iHR3FHovhm5rCOz3N2mICudNFqjdp1NsMPfEk\nKlqcbjMWm4l2tfpT01StwSDF1dU3ZI7oLRb0b/GQ+WHhY0lGDIY3jyC12re2nqm1MvVU6g1/p7Tb\nKKkEihijv9+JXi/S6XSp1SQaDSNTU16OHo3Q1+dEUVSMOkhlsvRUFZ3VisVnwxO0k0hUWVkp4LF2\nEQs7HJgLU/Xt1pNvvnSOriwz8MgjFLRBfnTuAtVGFxkdTpPKmdMDLH/1K5j7hghPDOGNRNn6b+dp\nlKsYDBqsOh3p+TtszkQYHfcyfXiQu6/cIrWZwuLrcDwU2xNhtVoKeoMWo81Ks1ACRUFnMiF3xV0O\n0OtSjcex+P004k3UnkA5XcDg9TNydvodycjPA2sohNnrfYPgzPIL0o/zxS9+kd///d9H8z6q2T/z\nmc/wta997d/IyLtApdKm0ZAxm3U4nUZsoRC+mRlKa2vkmiLFfB2zK0AjWaXRkKnLWjwRP9l0HVnu\ngdLB1Gtw64evYtUpuMcncIxNc/G/fJmuLDN9SiY8M0ElX6FZrmILRug/8ijxwq52YXFpjWatyelP\nzhLyaEheuUpvZ4HAyAClxTt4x8fJ3rmD5u5tGuYCnboGy75D9CptJEnG6TLh9lr2CjclSd1zuGXi\nebbvrNIqFlFlGQQBWepw58I8I9YCFo9nN0H1xg0sfj/WQOAd75VOt7te/Etd+8+SdfJ+I52us7iY\nQ5a79HpQq3XQaASsI1bCQ2E8ET9Wt5FiJkE5k0Go5NB4B8hkGuzfv2sD1us16HQijYbC8nIem0mg\nXihjsluIxyuUym20GgiN9WHu1imv3MNnEllfr2Ks6NAZ29RWaoyPzZDeziM6ndj0XbxO/Z7de8CY\nZ+iTE9QLAaxGMJQ2ef1v/gZHNLq7Vd9okLl1i4HTpxl+5CGw3EHUaonOTbL24ktkltfoxDy4wn7C\nhw4haDQ0crldMv0Wa4tzYIB6KkV5a4uuLGNwOAgfPvyWE+8PCx+dn6L3EAMDTlZXi7RaP7Gdzsz4\n+ZeyB4/HjMWkofAWZXU2ixZRErBYdDz77ATPP7+OKApEIjZOnRogFNp9EuhUysQsNVbzScqFGjqL\nmb7940xMDBKPVxkbddPM5bhzL4NJLhEJmHFHAmhFP66BfkyxQV54LYMYHsXaapK+u0Sh3uPW6zsY\nMkV0Fiul2xUcs3poVHC4rZg9brqKRKPSopQt8dLyFoVql+jYGM9+4giaVhmHuQSqDDoN+p6Ez6oy\nMOwhqdfQrjdxOE08+ugALaWHKtVR2m00Oh1DJw9jtDcxju7HOrmPkujFpXbf024fw/39zeLyMo18\nHqvfj/u+je3jjBs3bnDz5k2+/vWvv6/n+aVf+iUee+wx/uqv/upD38v/oNBVVbqK8q6Jc6/XY2Eh\nx/x8hlZLwWTSMjPjZ2bGT+TIEZyDg0jLZXwTBjroMW+02Nwss2+fH0G1ojFZsZkEHNoGXqeGbqbC\nwvI21XOLHP/1zzD3m/+R3PxNRLON4OwszXaPytY6pr5RXr2Y4Btfm8doMTE44qcv4OTl717nN/7j\nIXyaIrV7OdzuY9ijUUStlkf/9P9CbtQolhVGfQM89/0tquUWDz05jV4v4vZY91wRFotuTxci9LoI\nqrKXF6TR61HaEmK3g3q/tl7U6WhXqzQLhZ9KRvx+C36/5Q2idbfbRDD40QnQqtU6KEoXs1mH0agh\nEDDj8Vi4cTPDtXO3UWSVJz89w5G5cfLxNJVSA4sHjhwJ7z289fU5WF0totWK6PUi3U4bWQVRUugf\ndGO1GQjHXGTSDR4+PIivVKFRkBAtLmyfg70AACAASURBVKRajXqiQvjQHKpRg8utR+lqeOSJKfZP\neyhc+x7u/QeJDI5Ti29j6OoxWU10ajXcw8PorVakWg2L30+90iC/k0EM9FMO6HBYtGwuLGL0B/Eq\nKgZtB9/UFO1ymfUXXgDAEggQPXbsTXZrnclE38MP483lUDsdOvU6nVqN8tYW1lAI7VtNqz9gfCzJ\nSChk49FHB1laylOrdejvdzA66iGfbyKKWWq1Dh6PidnZIA6biMm9Sxh+DFGrJTwxhFPvYWkpz4kT\nMR54IIaq9ohG7UQiNjQakXalQvrmTXT5JJ96dpKiakfpiVhMIl2phV3Nc/VWllyujtViYvv1ZRTZ\nTbAvQjJbRdPzUo93yBU63FutMxQzUZfAgERiq8RDhybJ3JoHr4XA9CRjc8PcfHUBUYCeRovW5sIX\ncnHhYoJCtkaupsFpUplw1xC6ZuKXL+/qMySJIYeHqr2GZ8TI0PggQZdI/6CGtta+G56TcWC16fFb\nVDaW0siyiKEmsnk9idGofUOOwXsBs9uN+fjx9/SYH3X8wR/8AX/4h3+I8X1OqxwfH8dms3H9+nUO\nHz78vp7rw0av16O0vk5ucRGl1cIWDuObnv6pvUrZbIPr11N7U4RarcPrr6fwes2EQjasPh99PRP3\nNlo0mzJHj0bQqBIjoR79USeKPgJoqK7cobSxRSHb4PbNBBqhy87iBis7Mo9/8mFcJpl2pUKr2SZ6\n5AjPfWuBhR2QGhKdtspKq43Lv5/Q+BDbRYG+iYO4DBIGm41Kvcu9ezWa3S4ev4WQx4jYrnHm0Riq\nrNA/7iUaMHDnwm2MXjORsSjhsG3PzebzWxma6aeUziG22yCKeGIBBgYsSGvbqLJMcX0dpdXCMzKC\n1mDAPTz8tvfMYtnViC0tFUin63i9ZiYnveiUOvl7mwhaLdZA4E3leh8k7HY9gYCVQmG3d+app8b4\n0Qtr7OxUsNqNWK0mUutp7qhmjj4yTeTIJCNzQ9jtPymY9PutnDo1QC7X4OjRKNcubZBN1+h2K0xO\n+xkZcrG6lCESteGNBZicmuOH//1FUpkGotLCSBddT+LIsSgPnh7HZhKxO00YlBLt0REqm2vYw2Hc\n0TCNco1CPI03uBul0Gk0sIZC7OxUSKcbFJ01bmwuMjzsQi7nuPDVV5kc9zB9sB9PxE/lfm6J3mpF\nEMVdd6LBwMCpU2+6NxqtFqPTyfaFC1S2tuh1uwgaDZ6xMaInTryp9fnt0FXV3V4lo/E9Dbv7WJIR\ngGjUTjT6xhGU1aonGrUjSQpms27vqTF28iSp69dplUp7CXX2WAynRkM4/OYvllStsnP9Okq7TWZ+\nHq0nwEayx7VbmxTybWL9Dg4dDqHpalDVHqpoIDB3kJBXi6DIpLJNTJEB7sZFbN06rbaKN+JF0KsM\nTPahtpoM91mweVS6EReFfIvXvnOB8cMHMOinKVY62Fw2QoNhsoU2TVVPp9lkbSGO265h5pNe4hdf\nxez1UkxmMfv9mCwtDvSLWKNhEjdeJ7cmsX2hx8iolxMPnGTfyFEqWzvc+MFFtu6uEZ6ZIK/Y2V4u\nMjrqfs/JyC8azp07x/LyMr/zO7/zgZzvmWee4Zvf/ObHnoxU43G2L1zYE+K172ufhs6cecfFtVhs\nvSGwD6DTUSmV2qTTdRKJKj6fhX37/KytlRiJ6shfvYi0kGXh9Q6BvgCDR/dxb3mbbqtFJtfC5XdS\nL5ZpdaDbg1atxnDMTzmRxh32srmcYmO1gNbsxWDU0pG7CBotiUQNt1mlWm3zarrJw0/MoJR2ePGF\nZe68fAu9yYizvw/BYOLZX91PefEW+dUtmqMxDvzK00x+/hQ37pS59OoOWi0sL3o4+WA/6YzE+Gw/\nakdiayWF32fm0MNTqDuLdIxWaqkUzWwW39QUnWaT5PXrKDoLGG3YbPq9JOh/DpfLxPHjP0lYLq2v\ns3bpEnKjAYKAye2m/+GHPzSHRqulMD+fYWkpTzbb4InH+hHpMj7ioNtvolcrUS9VaUlmwocP4x3s\n2yMi7bZMMllDFAUMBi3Npswv//IELqeBmx4DfQNuHjo1xLXLmwQCVh58eBjkFomkRHisn514Daku\n4xqNYR2fZfW1Wxx7cJibr26TSlZwRiNMDFnwe/xc/Mr3aFYb9I1HcUcDmLwBvJOT1FMplK5Is1Ag\nMjrCdl7g7s0dmrUGn37YRcBnRlFUkJoorV0zQuW+3hFBwBoIoLNYkGo1DDYb7UqF8tYWUqWC3mZD\nazBQ2tjgx/PSnqpSXF3FNTj4rpqXq/E4mdu3kapVjPfTd9+rwr2PLRl5O2i1IrIssL1due8xN+IN\nBBh+4gmahQJyu43Ban3HhLr0zZuU1tcx9o3S9Q6ynBe4sJjg4qU49bpMVxB4Jifx+BMj2Pwubi+u\nMTY5grv/CaRSiX6li2i109lpoOnJnD7pY+VejvhaksrmDgG/kaGIA0fQS/KOnoWbS3Q7HUSzjaGx\nKAf2e3ANDHBtReG/fukack9DOBrDolUQBIGOIiJotaTSdQyuMLd/8BKtXI4Dv/osmYUXKKYL1LVe\nHHYtm9fnKdcUCpYB4jsNBNc0j/7nx8gkS2xuFdgpiCjKO4ef/RveGb1ejy984Qv88R//MfoPaBz6\n7LPP8tu//dv8yZ/8yQdyvg8KsqySzTao1ztYrXq6meybHAH1VIpmPv+GKPB/CYNB+yb9g8dj4vbt\nDJXK7vGSyToej4mHH+4jd/UircI9zEoHQSMiFlpUFuGhTx9lc36VfKGJXuMjONLPzAMzBBcWqNyd\nJ0EfOhT8Bw6hy3VwRfxk8h0GxiPsbOSwucwMDPuJxay06i061gCXzi9z7HCQra06llAEpV6mEk/i\nHJ8kWzdw7eV7tBotDIMTZCsi2wtZvvGNZdR2C6XZ4MZFK8Vim3Sqys5mjlNHPfzKbwxQ2dpi9dvf\nxDMQBasHhytG8MABdGYzqiSRUZxc+uotDN4gVqueAweCDA29vdNGkSTSN2/uEhGAXo9WoUB+cfF9\nJSPttsLGRomdnSoWi47hYRfBoA1V7XLvXp5WS6bb7eF2GdhajiPJIqok0avm0Op1+MbGcATcJKo6\nVi/tUKm0cbuNfP/7a9y9m0OSFEZHPZw8GUWjtjl93M0Tj/gp1wXK5TZdQUOp2OHipR1iYTMv/XCb\nJ548zpnxfUhthfWNAgnJzehckG99Z4mFu1miQwHUnRRF/wjJK/NsbRRpFEskNrMceHAaV8jH6Cc/\nSS2RYPvmEraQA4Pdilis0cykWcxnOfvQA9hdFlBker0eepsNqVajVSyi+3Gj/PY2tkgEjU6HVK2y\nee4c1XicajxOu1zGOz6O3G6j0Wox3NeLdGX5TdUHb4VmocDWK6/8pA26UqF9vw36vUjh/YUjI9Wq\nxCuvbJFK1el2e1gsOo4fj+AWqySvX6dTraLR63GPjhI6ePBNCXVSrUY1mcTUP8K9lIAnMkGlkGZ9\nc7dR0WIzYbMaWFwsMD0TYGomSCJeY3GpiJxLs3l3g0aljsfv4PP/21luf/8cK+sdxqaCxPbbMZ46\nTWN5nsS55xn4nz+Pc3Ifw4UKgagHs1HCINfwHXiUsmxE1ecJ9nlZvhNnbVPkgQdiHDreR7tVRBca\nIOYPUt3aInZgH/6JEbqOEMvzm9iMJkJ2gWImjynoIbmRQuz30u6o7OR6bGSSeFwadBqRhx4awGzW\nUSy2aDQ6GAxafD7zL4wW4b3AN7/5TVqtFr/2a7/2gZ3z6NGjFAoFVlZWGP2Y6HFkWeW11+KsrBSR\n5S46nUjI0iHictMpFd/45rfQgf1zhEIWhoddrK2V9giJw2FkZaWAKIr0ej2kapWNZJKJQSPNrTWU\nehWtyYQoCuQWFqju7GCLRAiFrJz81HEyRZXBA6NU1tbQarqMPXqY0tY2itTg1qt3cPZFOfKYn7/9\n8uu4/Sb+p//lONQKhEIygk3DrXUD2m4Vow4EnR5Nt40qarBHYhhNIrOPTGPXSZw4e4RkokKp5yKZ\nqnPhwg6VUoNOuYjBZkXu6Lh0OcGjpyLce32dhXsCuRtX0TYLtJI7rF69g2VslrFDo+yfHSJ/+zaq\nd5BLL62idXpwmT2UUjmya1t84hMjBGJedFY76XSdYnE3Sj0UstJr1t+y/bWRy9FV1fclcrzX63Ht\nWoLFxfze57a5WeaxxwZxOIxUq21qtQ6y3EWRZJoamJrxsbyYRtUbUXtdtrcrDE7GKBTayHKXxcU8\nsqxy/vwW3W4XQRCoVNq0cjmU5Bq1ZIJqTcY/PYklPILDbWVs0oKiKAR9Zv7T/3CAZqOD1DVi0erx\n9gfJbOUopEukSgKR4Qh9Y2EsNgOD/SLLV/JE3D00oeBuuJnSoVOtsXPxIlK1iqNvkMxilrWvfI/g\nocOEIi5KhTrZXIvB44fRtQo4A2Y0JhPhI0d225Qrlb1tF2swiNZopLS+TiOToVUoUI3HEQSB4vo6\n7uHhXfv59DQarRaNXo/+XWytNTKZN5UkSuXybnjbv3IyEgK+C0wCFuCdV4/3AL1ej9XVApKk4vGY\nUJQupVKb7dU0lcwtavkikqSg1Yp0mrcwe71v2kMVtVrM/SNcW+3y8rkN/IHabtyyVovDa0MQRBLJ\nOmq3x8pyAafDwKnTQ/z3/3oJJbmNy6RS2KrQMmm59sNLxKwygiqTu3SecqmBfnCGuqJHawiR3c7h\nmxjD49LT3FmntLmDde5h/vHLL5ONl/CNDPDomREeOTPG9laZw4f8GIxart8sMTkT49o/vcLa5dex\nO4xIV7bZ/8nHcEWCrP/oRxj1InKrTWHVwOzTT7BZbdCpymhlgVJDRzTqZ3Upw8xxHfl8g/n5DM2m\njMGgZWzMzeHD4Z87OOkXCYqi8Id/+If8+Z//OeIHWCYmiiJPP/003/rWt/i93/u9D+y87ydSqRpL\nS4U9274sd1nbbuIedaMRSntjDrPHg8njectjdFWVyvY2la0tol0IzwbISWYcDgN2m55apUWjpZJa\n3qSyvU046iJ39TLdRGJ3Gup2I9fru845826nST6eY+SJx7G39WzeWkFsymjrec79n/8NncWK3u7E\n98gnSNWNXDq/xqHjg0wMmIhfuojNCF/9RhKnx8oDv/wod5s24qke1fMb9B07ipRP0ZYFDh3pI3f7\nOo1mnna5gm//ARrhEDqzCbPNitRMoLM5yBVbtJMZbBYd7XaXgwcDlDc2SCzeZXTEhXc4xPKtFvlk\nAVVV6T84g95mI17qIEkKdo+HWjJBeWubnqqy069FSa6RMQ6xvF5HlrsIwu42+APHw+gtljdlBJm9\n3vet+6RQaLKxUX7DRKvRkFlfL3HyZB8mo4ZEokoqVUduSxR1KkceGOKZz+ynXKwjClAqt8hmG7hc\nJur1DlqtyO3bGXQ6kUjESaHQZG0xxe0fvsLsbIhYyMP8taskvr3AwV/9FFp3kMFBB/HtMquLCV74\nwRJqR+bg4TBqR+bomVlyqSJGQUIspxibmyOdqzI+EiG7skxqM4W1W6WcTOLqi1KWBWKjEUxWM+7h\nYVaefxHX2KFdV2cxzcj4ERD8eGxgEXq4/R4EUaCRSlFaWyN06BCdapVOo4FraAjz/alUp76bqdUs\nFBBEEY1Ot9tzEwySX1xEaTTQeb27xXnvws34tvUgP6U25N3iwyQjReA08I0P4mTdbo+bN9N87WuL\nbG1VsFh0HDgQxOezIEh1ttdSJOMVOpKKVrfb7uge29kjI+1KhUYuh1Stkixr+PrfvUY608RgtfCr\n//4ggaCNttTl3lIeVe1y6FAYs0XHCy+s8dnP7sNhgZogU8nl8PvNOAN2RFnCblBB0LFd7aChh65V\nxOweQurJ6J1ulGwck91GtlTFEo2yHW+QuXGTeq2FWddj8foqR84e49TDUXTNPJ1MnWMPj5Jd3uDq\nS3fwe60oooalO0kMrpuc+exDrJ9/hVyugtMMWrMZrclE+e4KmR2JakdHZHqUoX4bqDKlUotEoobZ\nrKPXA41GIJ8uk97SERkK/NuE5Kfgb//2b/H5fJw9e/YDP/ezzz7Ln/7pn35syEi5LL0pP0hrdSC6\nLJi7FZRWC7PPR/DAgbd11eQWF0leuUJX2dWLaAxbTD/4ID1VInn9DvLdNPZAGK1XQ8DkY3TITuG1\n89i9DpyDgyiyQvLOMo6hYcxTR9heSaLpdXn9yhZr8TYbdzbweCx4ETF5vRRXlgnOuillyqylE3zv\nBxuc/eVZPCtrLN5OEg6aiPi0tBpV6utLxAYPE0/USGxlUNEwPBXl4LiX2sodTCYNPZ2Hnmhj9V6S\nA7F+3N4BhkZcNCthtEqDoqPLdqrDwf0ekitbrC9niDkULF4PUqlAKZFnaHaa1++UGDs8yU5WYdzv\nw1LdFa9q9HpqySQ9VUXUiGg0AuWWyPXXFjAGdzUFvd5ullNy0EVwbo6dV1/dG/Ob3G68k5Pv289A\np/OTuPYfQ6cTMXQbbJw7j70pMdvfw2k00uw6GBq0o9GK3LgRp7S+xfHH95PMSRjMWpLJKrlck17P\nSbfbw+k04vOZefnlLWyaFq1smZtX2yw77HhNNoReiVoqQ6msZ24uQD5T4Rv/7y3u3YmjqhDfLvDZ\n/3CIjY0SLo+F8tI2n/tfz/LqlSxL9wpY2zmq2xs8+NhJ0hdeQm42KW7H2ffsp2ikkiy+fJ4Tv/d7\nRA/OYomEqRydQOdwMvdgFL9Lx865F2k1KyR6Pcqbm4Tm5tDq9excvMjQmTMEw2HalQqOWAwAs8+H\noNGgM5uRGw2kahWb1YotHGbs6aexBYNYg8F3XZxnCQTQW617JAfAYLdjfo9iGT5MMiLd//OBIJtt\ncPt2BkEQMJm0yHKXK1cSfOpTY6g9kWarS0faLXhT5C7VqoQkdWnkcnQaDRKvvUb27l3o9VgSJjDr\noduocfSxCbRakc9+Zpy7iwXCYQuDgy4MBh0IsLFZoVhq8eDpSc6nNih2OnTVNj7/EIcfnaBTLiHo\n9Iy4vCz88ByRaABdNEI7p6EVX0PogaZvEMPUcewWLdU7y5x4cAgxOMi122WGwlZGB50EtHkacont\nG1dpLplRpQ5TE06aXQNSq0NH6dEoVylmy/hPPEJAaRPwmdDo9Sy8dBlLcIh6NkHoyFFiQ0EaHQGt\nw83EhPe+lU/AadehrSTYuniTOytGlCMTBOfm/lW1dn6QaLfb/NEf/RFf+cpXPhTSdvr0aT73uc+R\nzWbxfwxyXGw2PaIo0O3+hJBodSL+gRDho4MonQ6G+66Ct4LcapG/d2/P0qrR61E7HeKXLqGzWJAr\nJfxuLet35omNhqm0GtTXEmSvX6ZqtzNw5nFEuxuNN4ItHGYnWaextkD/Qw9xez5NNZ2hvROnIvmo\nqjJH9x0jM38bx+Ag5rk5lq5V+Q+fP4bHayPz6k0iIQt9PoFWOk29VaGTMnPozHHKNRcrwjBmbQet\nQc/+IwMsZJbo6Q2kUxKhkIVIS0bMLmMouDgxE6RfVbj8/XksSpdf/s2T6DxmvvKlVwgf2M/IPi9B\n537a118gfSOPxmJj6kQMY/84gt1L5GgUlwRpeZP0VnaPqEUGfVjFJk2slLNFgsE3ChyLxRYTJwfR\nW627QZFaLdZg8H1dD9xuI4GAlXj8J4mifqdIY+Eaqtpgc6HAsNfPgakoltgA339+m3PntpkcczIw\n3U+x2uXgkQHK5TbFfAOzUcTrMXH4cIQLF7ZZXi4wOekl4NawLGUp5GuUq0VGToYo78RxeW1U2wLF\nosS9xTwaswWj2YQsddCZTGSTZSZcRg7MRdDtc1CvFBHWr3P6QBS3oc3L14psJBzs+8STBKanELRa\nYnMznP8//neCs7N0qlVyi4tYcznGZiJ4p2dIra6x/MoKi9/8FsGZSXQ9GVGE7N27jJw9i8HpJP7a\nNbo6A1qHG9HhxxIKY49G8U5MUFheJr+0hNntxj0yQmVnB8/oKN6pKXQ/g7PP4vUSe/BBsvcFrAaH\ng+Ds7E91rr1bfKQ1I/V6h2KxiSjupnz+OIv/50G1KlEuSyiKSr3eQa/XYLHoqdUkhmdD1DeiqPES\nGlHAYddi0XWo5MrEL12isLxMr9ejkcmgs7tAaEKrxqOfnqNS7RBfjrPeUZicG2RifAqp0WBrq8r8\n0m4ltUYjILe7fPq3z5JcWmNzfoOjTx1gaSVPbjPFxLgHYxdO/ebTBKYm2bh+m1QyS6duQpU65Ovz\nmA0CV56fx+6xc/faGq3OFfqf+CQvXIzTF7XhCTaZ//KXKceTDJ56kJ7ZTWllGdvwOBafA3vAi28g\nSkc08fy3bhAKmBFmXBjKWwSDXpSwj5kHrYw9vB+11aTZaeJyuXnllR2y2Qblcpt+b49hn4rc7qC2\nuhSWl+kqCoOnT7+nFq+PC770pS8xNzfHyZMnP5TzGwwGnnzySZ577jk+//nPfyjX8F4iHLYxOOhk\nY6NMt9tDFAWGh90Eg1a0ei3an7Kwqp0OaqeDzhuk1DFRKDRxuUxYlQIGm5OmYqMst4gdiSDm1wl4\ntOgQEO5/99N37yEHJ9BbnaTXkggdCW/Uj+ofYuXVJZS6itXmppnP0SpX0Dz8KLO/9VssFyyoFxdx\nBMa5u1pl/k6BuUCAbn6DretxtHINuavB5nMjV8ocHbcip2votAKDU2HsPhc+e49qtUAs4qSwtEx8\ndYuBB0/SVTrU716lMX+D0ZiNarHGwje+Tf+DD2Jz7+oAVu5l2Om1eODIMXzHTpGpiaytNXn+H5aZ\nOdJleMzPyIibRx4ZYOGOkWW5jN+lYyCsp5PdBkcUV+iNglRB2BX8Alh8vvfdPdPr9UgkaiQSu6GP\nAwNOSqUWsqwSc/dopRpotCI2s4bM5jalzW18R3qYLUa8fhvlRo/+kTCxmIPLl3e4dW2LaJ+TWJ8T\no0Ekk65hteq5fj3JykqRx04PMHFsghsv3UQVdHTaHaJjfei8IdLX8ruuq0wTVeni8LlQpSaC1KCn\ndIgNeKiXq2Ru3GD15irJrSo7d1d47NOHmJ7yInW6aEQRlDZqsUzxnsDg6dMonQ6l9XVEUcQcDNO2\nhygpVjo9PRqtBoPXR6XSwdhrYDZqEDUajA4HW9dusrWwgXN8ily6yMrad3lca2Ds+H7cIyPEHniA\n4OwsPUGglc+zde4cla0tqjs7hA4fftsI+beCs68Pezi82wZtMr2n23EfaTLy67/+O6iqAUEQmJ2d\n4rOf/QT79o0DsLm5CezGX7+b17lcgnh8i1RKS7fbQ5YLdLsGJiZmkRVYV8w4D+/D3VXwOjUkilW2\nt7ewj/dR3tqiYTbT0Wqx18r4Y9A3asRkV7A6HLzwnXl6VFjf3KbRdnDigX46jSThsJmJ4Sim/DJr\nN++QEnscPLqPo8cf4TvP36Utw+iAlfmvf4+OSWRkOsJErki62GWnJmGlx50LcT79G31sJzYp9AR0\nHQFVY6Sta9JT8xw5HObOuas4nxqm1G5Q3kmiefUSsV/7LL6TB9E1JGRBYfbxCayDoxQVK/uPDuMO\n9Og0yvhDAQKT4xQQ8O2fQq/TsHZngbLSxjU8SnJLRGM043S2KCQT5FcFHn8kRldNk5cktKkUrVKJ\n7H1h07v9PN7u9ccFlUqFL37xi7z00ksf6nU888wz/P3f//3HgowYDFoeeKCPwUEXlUobp9NIOPzm\nYri3/fc2G9a+QV69lGRpfoNer4cgwNjhMYJmIxe+cxVV7aLKEiGfgQeP+6ncu03k+HEyt27RaTRp\nlqoETx4marfQWl9kKaenXlJYuXwLX9RDYMRP36FhNi9dwRvxU9bpWHjxEuGpUQ5OB/nGP22iqj3O\nPDSOWUyw+IM1Jo4dwOG1Q6PEva//I/59+7FKMt//x3lMT09T8XfRmkx0Wm3q6Q0WLt1GZ3eSq0H6\nOxcIORTUdpNarcHScglVljEurzIxd5RQv59X/u9vUdraxq49DZ4oGwmJfKVL/1gMh8PI3btZYjE7\nfr8V/+kRDk6YSV69SnJ5iWSqQWDKTf/UGMtrNex2A6Io0NdnJxb74NI7l5YKXL4cv19MuuuK3K3j\nsNIrJNlYEhFEkaGZPuyVLtubRSxuF1e/s04m00CrFUmna5w6NcD6YpLBqImt7RLf//YC++ZibG0V\nefJT05w6NYAsd7n8WpLP/btJQpMNRoYcaLsSksHFd19IcOxomEKhgc1loat06EgyWquIWkxz9JFp\nWi2ZpSvzxM+dxz82hL/PTyFdQtEamTk4wL1XrlFeThOcGsUWDLHx4o9o5HL0Pfggla0t3LOHeX0D\nEsk1AiMdzE4H03PHMdy+RzlVwBbzINcKWC0WZEli69YKeqebaktAVbqoSpfVawsMHppGabVopNMI\noojJ5WLj5k1USUKqVGjm88QvXdptDv4ZklhFrRa99b0PuvuokJG3nGE/9dR/fsPravUnwTQ//iX2\nbl+HwzFCoQaZTBqdToMguAmH3UiSgiyrTIwMUNrcxODQ0ihsosnmGZodxWC3E5idpVUs0nW5Kclm\nunoLp8/O0RKsXL64Rb3SRKPRsXy7hjeopV7r8IlPHENrsdG89zqXn7tMuVBHK3S5Va7z+H96GkEf\n5vCUhdXvfZdWq00j04KYn/idFUyBECGrhZ1kg30PzmDVdug0KuybGkIWDDjzDXT5Hi6tDkGv0lBU\nzFobhmKBgYNTGGKjOK1u+h8aweLzUqyD6PDR6IBFD/tnP0MjX8CiVmnvrFHJFmnhQGPS8Xd/812i\nY1EOzPbz+q0che06kQMz+PwxMvkyClV8ES/G0i75EEQRBOFn/jx+2ut/7fiLv/gLzp49y/T09Id6\nHU899RS/+7u/S6PRwGL52TqEPoowGrXvaDd9K9RqEqlUnU5HRbXESOY2QRQRBTA4HGwXRJReFUEU\nQAWNTofW5kbrDeOdkNGazUSOHgWdjoY5RryspSd00UUneP3bL9A/LvHvfusE1Y1Vyju3aCkeHn96\njpF9MV46D0OnH8EfdqHTaXjollrVwwAAIABJREFUgQilsoSEDl/fEHO/HiTi07H8ne9S2dnGZNTR\nkSFy+CTP/I9nMdVTrL/wI1xD/Yx+6tOsXJ5nn7eflgy3Xr5N/3Q/+Z0MkeEIgljkxEOjFMptHGEP\noWMTJFN1ovvH0RiNGEMxmhobwahK/6QVo0lPsdjkzp0MYZ8WXT2HQa5g93vw7p+laooQGlZoCBYk\nRWR83E0gYMXvtxIKWd+TGPhEokq1KmGx6AgGrW8ilj9uRt/YKO0REYBmpcbCa/ewjIoYbVZ0Fgt4\nIsRzPUpqE+uQn2RJwGDQoNeL2O0GzGYd8/MZhse8dNUeyy/uYHWYabRUGvUO8zeSHDvZTyhkxes1\n4/Xb6B4YYWjYST7fQq0rHD2mo15rsblVYf+Mh4zLwNikQChgJuYVCPfZefGHS/idFsaPz2CzCBi2\ncsw9M4fV1GPj6i1CQxF8fT6a+TT1ZBKD3cG+x86g6izovEE2NiosXNym78gc127kWV5e4pFH+jnx\nwGk01y5SKxaYOnkURyyG3JJwxMI0eyaa/6w3qCcIqGoXg92+e2+AenbXBi+I4p6tV6pWaZdKH4lY\n+A+TjGiB7wOzwA+APwDesUc+laqjKOrP5eJotRQGBhxEInby2Rp2mxZRFKlWJRRJoj7/GonFNSpB\nH0F9lYDXQieXZvX1q3RVFZ3FgmbuCV7+u1fpqHVcBQeHT1qot3tYnHYMegGfUQf0EHoKClo2bm+x\n8fw1dL0uTqNCq1xFlfQkF5aZGh9FU83Q3lmjP2qla4pidVnJrO4QNBhxx/oYfHCA5RfOs3NrGzWf\n4ua3r/LI5x4nMhwl2arjH+knfjODxWHBYdcx9slP0rBEydZgu25D39UwGvJgDJkRVYmtW0u8tpRm\ndMzLxLib5O0V3HYN6VKXbGYHS6LAyP4BLry0gmgw4PY7uHJxk9Bkm1bLSHR8gE4ugVH7EwGZPRJ5\n162fvyjI5/P89V//NdeuXfuwLwWn08nBgwc5f/48Tz311Id9OR84isUWL7+8STbbRKsVsVp1NI0B\n/PuC0OuhM1soltoYXWbGjkxSL1XxBaxIO+ssfPu72IXdaO7osWMEDhyg1DGRvpXCpP3/2Huz4Mjy\n88rvl5k3l5v7viORSOwooFbU3l1V3dUbqebSYpMiNWNJY0vDcVhy0NKEw69+sib04JA8YY1GdtAh\ncWSyOaLIJpu9L9Vb7YUqFPYtASSA3Pd9uff6AcUiaTbJJt1UNTk+TwhUJeKL+8977/l///Od06Td\nAqvTiiwr9FtqrFUTSIKM0yTTSayQTUQJj0VRhCSLd5PE13KUKwr9/Q40UgfR4aBezpN47yo7V65g\n7wuhG5hgbbvBavYmjumH8LvC2BsNOqKb9WSPjd0me5s58qkCzXIP7W6Vc48c2jcpmzq9HwgaFug/\nPMz/9dwK2VyTz3/+OHpvGBU9rNSZu3mHwKHDdHtO0okc58/3U12YYeXaPG67gNeuRrC50IydJlHR\n0Ol07l/PYNBKNPrRaAQAXnllnW5XRqPZP3I7fTqMXi/QavVYWMiyvl6g1eqhKODxGMlmG3SbTfLL\nK0h2PU2vm2Yui23yCO9czXP98iaSouKhxyZILJeIRGw4HCKiqGFgwImiKAzHzGyvZbAaZFD16AuZ\naTW79CQFm82A2y3isOkZG3Gwfv0u33nhDbz9PkzhKLtbZRpthVqjx5X3KkT6nTz2xCi9SoHMlbcx\nGs+hFs2YQ24EXZfV736HVqmE0ykiWrV4AjaMbiuZ27eJv/4q9v4IlomjXP/rbzP02EU2dxq4Qz7G\nztp549I2dbUFo8PC1SsJ7M5hTn36c1g0DRzefet50eHAMTjC5tuziLb9ddGJesKToxgMWjA4CRw9\nSm5pCandRqPVYvR60d/TeajU6p8bsvfPhQdZRQ947Bf5gNWq/6UzUpxOA9Vqh1q+gFQuklgtI0lw\n+ukTpLZSXH9zFmSZSq3HyKePoa7ssHnpEnqLBUEUsYwd4u2317GOHaTZU5NPFrh9qc7JE6PsJkqs\nLaVAaTI44sPksLOwVKRebJDN1mnmchyZsGGRSyiVXVTlAAajlZW5TTDZCMR82J1GdEYjGp2B/uNH\n2Mqr2F7aYmV2k1DUS6AvRn9NprC2yuHPfRpXyEsy2cDtsTF2wkdmfRP3o88w/84W88t7dCgQ6HfT\n1KQJBa00mx1e+s5t0okcqZ0QYsuBsZqj1jNQrSsIOj3F3RQD58d4qyezPJfkM89McuLhIXL1HtV2\nheB0gOFjQey6LK2eA3t/P557O/9fla/AryP+4i/+gi984QsMDAw86FIAeOqpp3j55Zf/iyIjiizT\nqlRYuJsjna6jUqno9WS0Wg2lchun00a3lKeWK1DuijR8RlbiPbxeDzG7luytHGG3HYMsIEsS3VYL\nk8eDRaNBo4FEPE8xXcN38ACHh0V2X/422laRvqAf0WZgZqHESu5txn7rKTqNNn3WFvVKAf9gBK3L\nSi5dZa6iYiLcTz0Txzs2jDk6RDyjkE2X0Tp0kK2wvVLm2d85wUayR2EvSeTAMNffWaZcaGEyasns\n5HEf/z26ipZ/+up7LC9l8cbCWLa2ifWbic+tk9txo6rm0clton0WMk4t5fgajoEoU1GBiUCX5cs3\n8GmatNNtMlmZVmWWgCQQHZpmbU/5MdHwR4kfJKJLksLaWoH+fhsDAw7m5zPcupVEUfYnIRcXsxw8\n6MNk0pJOpeg2G4SOD5Jpq8mle+jp8f7NPMW6Fhk1L722zdiYm1SqxuCgA0mSefPNOKJBTbdmx+m1\n0egJlPM1gsUaR44G2Es3yefr3Ly8uU/Qdndo5PNoNCoKqQLJoopjFw6gqNSs3l6nUawTC/mR6nW+\n/fUZPvmZU9ycSZFKN9mZzSAVkpx9/JPoqruEhvtpZNOYjBYQtJTi66g0ApJaT3xpl835HdwjIyiG\nPu4slPjEs8cJ74HbYyLgM9HMZfG6FDR2J067lfzcHIosU0ulGJw+gMpoIZ/MoxUNRKcPEjv6w4km\nz/g45kDgvv+Lcs9LBcASCmF0u38la/uL4uNBiX4KdDrN/dacyaRlYsLzS08l+HxmRoftvDK3ye5O\niVDIztmzfWzNbWAyqJC6++y7Wm5SKLXwqfeFWVqTCaPLhdYXxpKro3N7qGYL5FYKvPLWBodPxQj5\n9FRKdrq9Hla7GbNVJJWukUo1GT4wzPaVIjsbKfocPZqlAs1aA2ltkchQFMsBDwvf+DrpbGp/LPHI\nUfQeL7ff3+LCsJWyT6CyEydRtjD10GE0chtPnwe9U8LX7GKxipiMAlWnmvlUh3dulGi0DJTyFe7e\nzVA7P8jRoyGe+w+v4POZcDhNOGxaKrkCtXwFh09DpSzRaPawGE0Ighq7w8BAzIHd2OUzn51gKyOB\noOPwyX6iUSdazT75EPR6Stvb5N59l06thjUcxjM+fr8F+F8i0uk0f/u3f8vs7OyDLuU+nnzySb70\npS896DL+2dAql9m7cYNes8nKfIdCroMtEkHQ6+l0JA4f9lNM5amms7QMLgSNxPV3N9jerTEy7qOb\nrXHs0DA2qYAgOJEliUYut5+4anOxtJTnrbc2oVXh9AEjS5euUt3Yo52vopK6yArU2gI2jYzT0OG1\nbz0P3TZaeowczuEKnmel0ELbq7GtNxOdOokgmukoAt1cHp3ZQv+RCRLlNmq1hrraxo1//BrlYp2D\n/+5POPelT7Jy7S4Wh42RU1PUsHBzJo0xOkLIGKbT6pDcSGETfZw6FSbgVBOOOTFLZXbfeZ3jQ2NY\nRkZB6lKOr1NYylLY3MJgMaG0WmgdFszRMDqVhLGbZSgSZCvZuRcU+qu7t2VZoVRq0Wr12Nj4oRmd\nWq0iHLaxs1NhasqHSpGJTg2i0ah49du3CUQ9rNxZYW+vjdtnIR4vUomXGB52MDHhodXqcft2BllW\nOD0dIJ0qsbCwziOPjzBzeZ25a6u4/HZOnQyxG0/RHzSQ3ExyvbLL0797hhdfjNPsQjTkIjbgQJ3f\nxBiW6AVEhqxFtq/HOTzdx/s38ghSC005Sa1UJbmcIDwW5aGxCJtvvI5GpdBpNBj/7c9h9nhol4uI\nvgDr81kcYT96hxNF5cA1oMeoh0BzEXdBhappptdQmL1TwNw5QOBoDOUHpn6yTHV9iYPnTqP1hlFp\nNNgdxp/YtIt2O6Ldjt5iIb+8TKtUwhoK4RoZ+chT2X9ZfKzJyGOPxdjbqyAIasLhfYHVL4tarUNm\nt0DACeFAAKndoZwv4/OZKRQa+CNeytkidpuBbq0Gdg3B6Wn0Ntu+Ct/iYGk9w9Jrd9hbjBMZ9PDU\n504xc3ULbS3LJx7fT1yU2y3e+P4djk37qWdztA9EOPiIRHd3nT6nhOA4SiVbYOPKDNNf/iOW3ngb\n0eXBNzyARq+nUShRXV/lzJkhlPQSnUoZum06+Rb5JeibGKRTqTH/t/+RejqFa3AQczBM+Nx5Eokq\niixTyNeoFurY7CK5ZIFauU52cxerETpdhao2QNvnQNDm0ZpMOJ1d0ks5wiODtEU3/n4v02cGkUpb\nvPHKVdKFLqcfjpHRFSmXJtDpdTgcBsyaErvvvrt/vYBmPk+rVGLg4sUPHbr0m4Z//+//PV/84hcJ\nf4ich38uHD58mFKpxObm5m+cNueDkJqZobi+jtZkwuOwsX53G7VWiyMapVxuE/AbmR7yUGv7iG9X\nef+tFVSdHl5NkepaHr1VwHnxDOqdGqWtLao7O1hCISo7O3RKPXZ2KsTjJfr77WzdXULudBg5eYj0\nzWtIXYmNu3EET4zokUlSs3dJLa0jGnU4xS71lEjz2g18vjF2L99lYU1FczxCLDiEo1fC2xAYfuQs\nNSwIy7vQUSHotSiKTKtQZOnGKlsphYOf+QSekJfCXpbV2U0qJYXETp3VlRI6TY9g0Irda+fiaTem\ndpqFr38L8fzDAKilNrW5a9SKVVJ7JfQWC71GjWa7SSmZwRvx0TWK2AcGmPuHf8B58ChjJ84SGB34\nlSb0qtUqbDYDiqL8hI+WzaYnGDQzPR3g0LiV/PYeL3z9yr4IWaNBpdGwm6xhsYskEpV7br27/Omf\nnqZabZNIVIhG7eh0MHttnVymht+t5eQxD/qH+7AHXOhFDXffm0dqdxiZDFGuSmSyTcbGXaRLCgvz\nKYb8Cje/+RoD42H6AwbKS2vY1WqGLkwizSusv/0e1d1dXLEoks9Kp9kmtbBF6s5trMEAtlCI3NIC\n3oOToFah9vgwF/V0NQaqion3XrlDMGyhFGwScatZfvs67VqdA5+8iCZgo5TK0q770ZrN95+7gihi\ndjux+PZHqmVZIZ2uUat1EEUtXu/+JhPA4vf/zJiEB4mP9Rvjg8Luflns7VWJbxTILm6h3Gs57ug0\nPPmFs+Trai783qdo7qwjt9o4g04csSHmb26QWakQ7A9SXS+h1unRatu02z0217KcPTeIz23gjbc2\naMsaBLlLIVcFrDRKVYprqyTtRjpaHSdPTaOsXCNx6y5atUzfkUl8QSdFiwrR7ELSWkglK3RaGizF\nAlpNio25LXxHjrJzYwZadQxGHeHTp0jdvkV2fh73yDC7165hGShgO/kIo+MekjtF1Go1q9UmBp0K\nj1OPwaDBNjhEciuDyajDrNOxttvli//ys+RWVgmYWgxOT6D1RSiUu3zuv7mIoVfhe9+4SaenIhxy\noJa67KUarC0ssJXqMDHhxa2vMxl0wY+Y4LRrdRIbGSSNAaNx/0b4ZY/Wft3QbDb5m7/5G959990H\nXcqPQa1W88QTT/Dyyy/z5S9/+UGX8ytFu1KhmkwC0K3X6fO52R3ykUkXkMJhzBYDQzEHQmKGQlVP\ncrNCo9amvr1OvVzD6HRg1xmoJ/cwSRKppXXQm/DFxtndyiL4Texu52k2u6hUUMiUuHvpDmefmGT8\n/ENs3JrHoVfwnj4NjgDp61ew2gxotFr0VuO+zXy5iH1QzYZaJFdoU5wrYfYPc+7UEOIxB+tbNap7\nOaiXoF5AszfP5IXjbDlW6PNoGBgLE0/1KK/k8LsEJg+Fqc4UqM8kGBlx0mz2yKaKlJM6XvnabYIu\nFeFIGK3RiH1oCGd/P5vvvossOmiWa+gNOvrOPERyfhHTsB+9z4rDLtIuFXF7jDitKszVOEHfxEe+\nXhqNCknaH9MeGLATDFrui5RnZpL3SYlKBaOjbiIRO4piY2czCxoBlQyNnoA/bMe+WkOr1aDRqNBq\ntZw8GebNNzcZGLBjsehZWcmhUdnRm0Sq5TSpnSLxTArfYB8NtQlJqiO4AthNWjo6PbaAlWpLTb4m\nIMldut0e64tJwsN9BMNOqqu3qe0kqBb3RaDBh54hJQqY+1w4Qk6iYxcwCRLt629jMFtQqQUyCwu0\nq1XO/NmfohYECjspgmMDVAQ3t5eqREZDRFwy7e1VrDYjXruack+hl9ri0IVzrCylURuMhCcPUE/v\n+2bZotH7BEOWFWZmkszPZ2m1emi1akZGXBw/HvzQU2cPCh/v6j5CNBpddGYLerudVqGIzqDFHXQi\naAUe/eQEmdl5qoUMOtpozQO8c7NIfF0iv1VhNSkTGgwTGQ4gGAy4jV2kdptOqcDQ8THUXzyPwWqn\nurdHKV/l9LQXp0lm164QsnU4dCRG/eqLrL/xHhabCcvQAAPHD1DbjqOR2qQWd2ipRPaSNbpocR8+\nxmDIw2t3ltG5A4xcuEi4z4au10Ct1dIqluHebsB74hRJbYzv/tMs2v4esiDy1KciWBwidpOaU8f9\nzL51i8GYnZ3NPMuLGY4/epCHPjlJSjay0lUwigpOk5VhTRWLagcUhWouT3/IhMFkwOY0oXE6uDKb\npqay0u4Z99upzTxG2cCw00KnWkVndxAviey+uIpKNKPTaRgZcXL8eAit9jdfT/L3f//3nDx5kpGR\nkQddyk/gySef5Fvf+tZvPBlRCcKPed7IuR3OHPLR0kexRAdwuU143CLxjB5SJTx2LV6njrszZbRG\nEbVazeDkAGKvjMrsQApOoHL4ubahxqAkCTt28dv6KSb2SFoFjvX3oVXfoLixiTw6TejMw0z1BUl0\nXKTTJSStiaNnR+l2epR3d5AqeaYunGKpAT1BZORQANFqxRvykG8bWFrJsrSYpYfA8YtncWRusfL1\nv2P6v/vvcXYsZO/OkszexGAxIfYN8tKruzz9pTOMDDtZWnCg0WpJJyvEghqMcoX4whZb3Raf+u1J\n2s02ssWLMRjCPTJCYjOPymSjo2h4+ZvvcfDRE7iiMYqZMka3CXn1OtZQCJ1eS7dapV0uI3zE5nmP\nPx6jVGpjsegIBi33J3QOHPCgKArxeAmAgQE7Bw7se5moVCoGDkQJbjRoN1rU2mq6XXjqqUHK5TZn\nzuyPLFerbW7eTDI87OTcuQhmsxa1SuGhR8dQqdWY9AqhqX5kq5fRUQ/f//4qq2slGpU6R07FcPjU\nGKxmVm/EQaXiqU8dIhw0kl2Lk7n1Htpchu21JM1ag2qty9mzFzj4xBle/MZlLj8/T/TAAM/87jRi\n7yBqlUJuZZVOMY9KUSht79vthw8dIBg9ypUrSUKBDNE+K9b2Hqvff4OR6XFkWaZTyqGRImhFPUMH\no9R3E3SLOQw2G97JyR/rdGQydebmMjRqbXqtJl1BYHFBxusQCHp1iA7Hx1bb92tFRhRFIZutU612\nEEXhXvvpw11Yj8eEwajDOTiI7CthtYusbda5MV/FulEmYuvRF3WDSsXyRpG1Wym6jgiCL0K1I3H9\ndo6TJ0O0eyqsRhXF1B7Rx85y68oGpaaGSrcJvTbP/N5D9NZnmH/lCgd8bqantKhrG6gdVuw+J9V0\nlpHhIbJz85h9PkInT7D33AvEb93EOzVJUzbQlAQy+TZnHptkfa3A3toOcrVAtVjBdegoaq323vFM\niLKxn7tvrOI6fZHEegFZo8dZtPHF3z2CVWhS2Uty+e1ZzH4/R44FOfvICM6wn2yxy//xF2/x+OMx\n/HYV737nfarDIgd9NbStOn2Dg9RcUNpaZm9dxn3iYZqlHtqgD7m8v10xOBwk0znGAnqoVmlqHcwv\n7GEdGEbDfiT74mKOUMhKf/9Hp8D/OEJRFP7qr/6Kv/zLv3zQpXwgnnjiCf74j/+YXq+H8Bt8hKYz\nGnEND5O8dQtFllEkiV4hTeyhYdyj+y/S1O3bdBsN7GKXQk3m/MNhRPkA9a6asXEvh8ctFBbm6TpN\nbNVM7K7lqGRWKO/u8MX/+iwWd5lPfWqU2cUiPUuAZ/7bp1GVUhQ6IoWWjtuZOianwIGpAObRp0i8\nc4mNS+8RcBkx2yyE+u10uy5qtSGuX45jtnTZ3i4RHPBilGvsvn2J8NQ4mwsNzAETKp2R2uYaQiNL\np2NEKzepbu6iU/UQtUZe/Pp7XHj6CANBHUOHhqiUAiRuz3Pz0hwauUu7JVPXuqgVS7jPfRLcNjyy\nzNr6JVSimXypTn43R77Y4ebaCigyK/Myj54doXvv8arR6dD8Am6dHxaRiJ0P8twSRS3Hj4eYnNxf\ns25XolBoYjL1sNtFgkErR0/0s7ycw9DoUii0OHjQx61be0xPB1layrG8XOSxx2IkEhVu3UqSTNaw\nGeHxxwZw2saZuZFgYDKM0WqiXm8zNuZGp1OjVqvp77eh0ah47/1dJg8G+Po35lhdK3HiRIiLZ/1Y\nMxZmb6TY28ygNxpwBPU0K2VSHTO+fj8mUYNO3eTGa7d49OEjZL7+ddSKjN5mJXbxUaRGk1J8k/jr\nrxP91330WRto4nGSN1o0DT124xmsNpHQ5DjVXAnbyDiy1kRlaQaxz4vS7dAul2kWiww+8QTGe1lM\nlUqbUipHaWuLXrOJ3mqlW6+zJgzQNlUw2O2ETpz4yFxTP0r82jyVFGU/W+bu3cz99tPgoIOTJ8Mf\nat49EDBz6JCfxcUs5qiba9d20ZqsaPR6thaW2G7XefzxGMZeiUqlTbfZRDL3mJnJARCN2qjVungt\nMo22jkO/cwG1aKLZzEGrh8/jIbvbYO7aCr56gqOPHUPvCWHpH6C0nUAMCbhOnMdr0GMKeqhsblC6\nehmDy0v4+DHCJ05g8Aa4enmTt1+4wfFPWzl94RwrW6+RWNvCFfZy4l88QXy3TeDYGbZSPXRRP+Wy\nlcAjQ6xtVJEV0Bg0xONlTp6KMDzi4M3NHIWawubVJbxDbcqSiNbdZGDITTZbZ+bmLp8678QgSJTV\nTjZbBjJrWR6OWbD4vTSyGVzRQVyjY0QMCpWWio5q/wxXqxOJDjvRmfNI7TZVwYToD6PR6e5fd0lS\nKBZb9Pf/qr4ZHw/cuHGDVqvFo48++qBL+UB4vV7C4TC3b99menr6QZfzC0GWFXo96UO3mT2Tk2gM\nBgpra6g1GpzDw/czppqFApn5ebr1OnqbjckxB7LBgrllR6vXYhC6SNkEisFEqa5iJ54D0UwtlwcF\nRF+YeqPL2JCVRx+LYXbYSW27aDc73LqT5+adPUqlEsX8Kg89Ns7nnu4nduoojoAPtUomu7RM4up1\n2rFzrC/uompWSGcyeAYjvPL9ZX7v94+g2MOkigoqqwz+Yc78j/+WbL6NYBvGU8jQfPMN5PQeWo+J\ng0cf4p+eu4No0CCrBHK5GqtLGQpbFaxeJ+VMAcFkoNPuYQ55oddi5U6ekfEhrINJ1OIuPUOL81+e\nIlWC6nYKW8CH3CnT0drYWNxg8mCA4PGxB+JFYTAI3L2bYW4uQ7PZRRS1TE56mZryMj0dxOkUWVrK\nAvueJF6vmdXVArGYg0cfHWBtrUAmU6fblSmXGiSXU5T20jx0NszZsxEiUSevXdrj/WtpcrkGweC+\nf0ouJ9Bs9tBp1bz00iqtZg+VCm7c2MEoqvns1CjXnvseE489TOzxi+itNnQ2Bxt//y5avQ6jDgxm\nK6LXQ0U28eSf/zmdahVZltAaDJR3dykn05gDQUIhG916k9vvzNOotTj25ElO/e5nyC8tobG5OfWV\nr6By+BBNAvWOBkH8od9Wp1qlnk7fJyN6QaaZzyHLoBVFGrkc5c04urNBOtUq7VIJQaej//z5j12u\n2K8NGcnlGszN7RMR2B8JW1kpEA7bPpQJkkaj5ujRANGone3tEpubJURRu/9vWi3VbJNsoYNDaeJ2\nOTFYjOwUW4CCSqXCatVz6piH3GaDnkki4u/y7jt3ya1sg1qF1qDF6vfRkWuMXDjL8luX2XpjDtFq\nZuDMKcotI5mVDFvzG3zp336BzMYO6kaBVrlK/LXrOKJ9hJ/4NLffmccUDOLvc3F9ucVszsqZZ55G\n53Bw6UaVgQE7t/MupIFj1M0qXG4n828n0GjVFIsdeg01EbuK+EaRdquDb7APx8QU04MuYgN2JElG\nLZrQWmwIgorcXoFCuoBOFCnk66iqdfbiRbJ/9z5PPn2MgSMXuHotyc5SBdFiJp/M43Rb2dnMYzCL\njI/HIBxm6ISeTElhMRNHrVahUqnodCRUqv2R7N90fPWrX+UP/uAPPnY3+I/i/PnzXLp06deKjGxt\nlVhczFGttvH7zRw44MXpFH/mZwSdDu/EBN6Jn9Q4dBsNeq0WAL1mk25iC43BQGg4TLtSoVuvY/D5\ncJ0cJXUlw8HzR5BUAkOjfgwWI/M7EF8vES3qCBTUCIY6boeOrizw7e/fQqMGUa/FHXBS3kuzeLmO\nEr/J+pXbDJ85hiviRynu0Wu1yGbqGJxeRLGD1iCiEhok0zXUei1SJc/auxtMxfRsNRXSNZGXn3sX\noyBz+NAU5bl1NPFdRg51OHA0RnFtlbMPnaDS1lJvynRbbYyqDr6BMD67CqmYQjYcJXn5XbwTE3zv\nP19nN61hbGiK8EEn12ayyFRQ22Vk0cbE+cP4+tVorTbcU4P4Dv7wWjbyecrb23TqdSx+P9ZIBOFH\nNiAfJVKpGjMzSdr3csNqtQ4zM0m8XiOg4sqVHer1LtVqh/ffT+DzmZEkiffeS/CD07ofxIiYzTqk\nmojBZqFvrJ/562tIjRpDse97AAAgAElEQVTVGpSKTTptiTt3MrTbPf7wD49iNsmIRoHr15MoQMBv\nBkUhk66ie6KfT/wv/zM3Zsu8OZMhNTfDuWfOUsWCejdOr14hcvoMqYVVDg4eJDkzQ3V3B2tfBGsk\nQt9DD2MdnSIzO0d+fYvI8ABT01GK6RJUc1TVVnSDU+gmppnfbpOcT3PxtIdSrvKB16mez9MqV6jk\nJIxOF3NLJZxuC0ZVnQOnD+CyKGix0czn2X7/fTQGA9ZQ6H6o3scBvzZkpFpt02z2fux3sqyQyzV+\nLhmRJJl4vMTaWuFepoLxx7opRo+HXq1CvSGxu7SDNSgxfGSY/N0GKlUNu11PKGSlnk5jM6nRDwbR\niRp06i5bly9jDQZo1ttogjEmTh2guLfO3etx1EqXWrFGsXqF2BOPM/rMswxfSNKTu5hiE5DbhF4b\nm62O2edD1hkxev0cfeI0keEQ3/l3L2G0Wbhyq0ilnKJdLWP4win+7/90hwNDJk4esuK3Sli8bqh3\n6RsygCzTP+hCliU0GhVGk5ZnPz+FupoluTxHrdZEJ6ixDo1hFkWsMQdOoc7C7A6xkMzu2g7WSBSd\nycROXiG7lKRSqLNz7Ro2m8jUhYfQO13UilV8gz4SiTKJRJnHHosRCBqZmPCwuVmi15Pxek04HAYC\ngV+dAv/jgFarxTe+8Q1mZmYedCk/E+fPn+drX/saf/Znf/agS/lQSKWqvP321v37vlhsUSq1ePzx\nwQ+dU1UqNUkkKtRqnX33UKuI0e2mJotkqzJavQa72MPi9xN56CF6jQaC0UhXUuHa1TCzUGEvUWRq\nyk+u0kORu4yOeYhv13jvWoa+qJtMusYf/esjfPozo/TqDdq1OnK3Q3onj6KyYO6PMSHq6bWa2AaG\n2FM7CI/38+TnnTQaEiqli9yT0Io6VL0OWrrsrW8yODmAQd3jrf98mdjhEQJhB/OX5zFadPQfO4zN\n5yKzss6Z08fpFPPMLhR47/Ief/hfjeFSdGQSFQZjdsI+HT1dPxvz6+TffhGLUcBqDbO+nSddhmKn\nzt3ZFIOjPlLtLoaKDkeqy8q2xOHDQ3RE132dQSOfZ+P112mX9rUc+aUl3BMT9J0+/Ssh4oVC8z4R\nAXA4DAiCmp2dyv7GSn3PL8OiJ5vd37CePdtHsynRaknIskwu12B7u4xWq8bqcqDRa5m5mSC5mmJ4\n6gSGdInTZ/q4eTNJMGRhbMxNNGqn3e4RDln59rcWCQattKo1qjUJq0VLQ9KR6jp492YcVaVIJlVh\ney2NJ+yirlcoL9xGarfQWcwY5Bo3Xr5CqN9De3mFSjJFu1rD5PMTnj6C4AlCq4rVYSa9sIxi7Ucy\ne3BE++mZvBhsFaw9AYPNxujRARLrKSRFhc1pwRv2oADx118nLzt47YVZ3EEnFx8foVBoMDQcI6JJ\nsv3K9+k1mxjsdnyHD1NLpcgvLxO9cAH7x6Rt/WtDRgwGLVqt+r5JDuwrrG22n7/rXlnJc/nyzv3o\n6R8lNRaLDo8nQMFmxuwWsDkMaHRazOEo1kCLsTEX5XKLRqNDvqHj6gu3sTtFzKKawdF+xp98hFq5\nTqXUJCT2ODhmYeW7CerNLlargUyuRWl3B8Ngkt2CmoGIF4e0jTvWh2ksiNSTGHzaQbPWRIwF+MJX\nPot3IIwvaORf/v4xJLWBtXiZhZkt7t7IojPoabd6dCQZv0ekk0nyW589x3P/cId3Lq1htejZSRQ4\ncTqKyWCnsBFnNAC3vvcihc1dtGYzlkPHWL98i5PPPMGt+TLHL4xjoMnM996i3uhi9PgwxWKoDTqq\n9RrVZJJ2pUayo6JwdQt/uIq+V6PV02ByOwFIJquIBjWFQpPt7TLlchuv18j4+ND9DtRvKp5//nmO\nHDlC5BcInHoQOHfuHF/+8peRJAnNx1TE9qNIJCo/sQHJZOrkco0PNWVXLDZ54404+XwTgPn5LMeP\nB5FdI7z2zfep5quo1Cr6xqN86pgPQadD0O1bpKdSNeLxIoJej9Fuwep2sLwSJ+RWoxYMvP/uJkaL\nEZPFRCBoIblT4b1XF7j+zhKBkI1nPz9FLKzj6HSIpW/fpLGzhTs2QLnYJFtV0Zjf4+7tKgvLRTw+\nM4cmXTzz2XGa5TKUUgxFDjM+6mLx/Vs0Gj121pIMHDrMXqqOrDcz/TvnKCzMou01cLr0MHqKO68l\nOXTIx61/eoXFmTgaQU36joaQ38DQ2RP4nDpkt5W5l95Ac/AikWgIlcHE3l6FZ549QDTq4uqVbXZT\nDa5e3cXlEqnXOxw/HmJvr0qr1aO3vUSzUEB9r+2gyDLF9XVcw8M/NSyvWSySX16mureH6HLhHhvD\n7PN9qO+AwSCgUoGi7Afz7exUmZ9PE4s5aLV6DA46MZu1pFI1Mpk6arUKq9VALGYnn28yMuKi25Vo\ntXrU613KpS6nz/hYv7tJKl0nX+qhFQTarTanT4eYmvLx3nsJbt7cQ6fTkExW+f1/dZS11QJrq1lk\nZOwOEcVgYS++RHY7RcCjI3ZwkOzGNqOH1Bx+dIzKpBeX345WapK99gaSokYjipgdZvZu3CS3vEzw\n6FEiD5+jrm+zFy8x/tlPM/7EOW7dyrC6XqSWLTDcdTJ1MMDZc3qkQpKyOUxmJY/UbNDVWggOTFLd\n20Ot0bCyVECSVazfWMSRqWHzudheqqNqze/75GQyKLKMNRLB4vcjtdvklpb+fzLyi8LrNTE05GR5\nOY8s7wdcBYOWn/lQ2jfRaTI7m75PRGD/ITU46MBgEMjlGty4sUe3K3FjpoPbbWR42EH5ZgpR1NFs\ndrlxI8nkpJcbdwrIag3lTJG2RkLXLvG5f/MJ0ts5CqkCJr1MwCazJghgcoCoxerU4e03Eh1w0bO7\n6BX3yGzNs3H5BqHBIKWmisETh9F6w2yVjaAXWVivsrldI2Dt0aqksIt6Dp8exul3otNrGJ3wMjLu\noaXSUa3ukrmzw8p8AjpttlfzpHZNWK0iZ0/68Q1Yib/1FvlUkW67S3JtgdxOlvDFT9ApFzk85cOk\n1LDbjcgGC56oH9fwML4+L+E+M7l8i+qWCkm0kcs18Hi7tBttkvE9+uwhRKeC166mvbnIjfcS1Otw\nbGSUbNdLtdphcTFHNOr4SHIsPq745je/+WthKub3+/H7/czOznLkyJEHXc7PxQe5fsqygvL/NqH4\nKYjHS5TL7fvpsrBPmjOZNsa+GDpXA5UgIFusrMVrBPtcpNM1rlxJkE43qBRrjPQbUDVLGHQgtRo0\nWibK8QLZZAFzo8PQRBC/38zltxZxWDUMjwdAVrh7N8u/+aODJC69QTGxg8tpptVsk33/KgOPXOB7\nL26gdQUYn/CgM2ipNBSMFpFGqcqBAT3JxW023l6k3dGhVilIikKxrrCb7uDtt5BaXIPdLZxHDtFU\nGUmsV9jaLHJ80sxGIo1R06XR6NCUBEwxM2ahQ9s7Rn1Uw8iomw4GvvNmHo2hg2gUKOQbRDxqRgIy\n9ZKE07k/mj8+7mZ5OUcqVUMUtRiSW/QyZSIR2/2RfanTQWq3P3ANOo0G2++8Qy2VAqCRy1FLpxl8\n/PEPtYZ+/34OTibTQJIUbt3aw2LRYTLp6HRk3n8/waFDPprNLpGIDYtFh8Gwn8heKLRIJEr4/RbU\nag3FYpMLF/qxWvUkdmr4hgy0Oz3OnAnznedX0Ok0fPWrtzGZdBgMAul0nZERJwNRB+NjTtrNFgfG\nIthEhXyqhEGvQdDpcPhctLeWsXlNqHUCfX4DjrAVwWxkd24Hnc9D33ANq8/L7b/7O9rNNrZgAHnm\nDt2uhOX0J7h6dQ+pVODQI4dZWEjTrZQJHxihtbvJjY05vE8N0Wp32d2rYpk6Sast0TNbiKcV+o0a\n1IAky6hUakweD5Ik01Xr6RTyOA+Noq7lUKlUiE4n9XQa6Z6Gqtto3AuMfPDHy782b4gfpDSGw1Zy\nuQZWq55w2IrJ9MFnlYXCD0iGzNzcvvteKGRBEDR0uzKFQpPHHovx6qvrRCI2EokK1WqNzc0yrVaP\nU6fCvPjiGuVyi/FxD9NHPHz32xl8g0Mo9TITMQONpVvM/cPX6RSzGENhuu4Qid0GsckYrXqLRqNL\nqqegNtvItETIN3DKbQo18AzHyKeyVKptksvr9I1Ms7na4O7dLQ6OmKnevcpip4rHZwXRjOXACY5M\n9zHz9jyPX4zh8lq4+85dDg65WEgWqe3sIFhs+P1WOl2JeqmC0awjPZvm8mt32V7YwOq2E47FUNcL\nuPUN9J0S66/dIfz0cQ5MuunwKJWGwtCoDaW0hl/xMzzsplKIkUiUUAsdHGY1sZiDVHyPbElGtDdQ\nNpZodLMkk1XUKhWF7V2Gnnicel1Lvd6l0ej+xpKRRqPBK6+8wl//9V8/6FI+FH6gG/m4kxFFUQiF\nrCwu5n4sIM3lMv5czYii7B/fNptdwmErs7NpdncrWK16zpzpo1Jp74sxf0SQmcs1kCSZS5e2ePPN\nOBajivmbcfrCZoJuHeXNOAcP+dndrRMIWjGZ9KiR6QuZqNXadBstWqkdxmIeuooGldTFpG5SLGWg\nuEdyu4reE8ASiODoC5Jq59i8tIZgEDlyIkI1l2N1Totb7NEzWKimM7RaEn0DNlK5Nr7xEZKKlTNP\nHeH4pAX17gLWiXFWltJc+vPnOfsHz3LsyCCVQhmrTc/2bAqLw0xswIla6ZHYqbB14yrbd5eJ33Zz\n/HNPMDEV4PnvrXHiiAdtapHFlQyqXheXWseppx4h0zbj85lZWckjilpUqh5OX4CVO4s47Absjv11\n0JnN6G22D1yLRjZLPZP5sd+1SyWqe3sf6ntgMuk4dy5KIlHmzp00kYgNt9uEXi/c65TsG1dubpbI\nZBqIokC3KxGLOVhd7SDLAn/zNzcJhSy4XSJbaoX1jRKlYpNStkyu2GZi0s/5C1F6PYWdnSprawUW\nF3O4XEbUKNgtAs/+VoiI3CY5+zqlpoy9HCEyNsmRM0PUciWixyZ5/+1VHK0q/f4tVpZm8DoEOrkM\n3slJDEcPk711E3pd9AYBs8dFbS+Fye0hpJeYPn+AuZfeYua9BRRUaLo1crO3EUwmurUaxWNuaiuz\n9B15lJde3aRWbVFpyPhjYf7VvxiD9DZDQ34Sa2naXXAEfdj7+3GpbTjsRQxTUxSMxn1L+E4HjV5/\nv0v1YYhIZWeHwsYGUruNvb8fWzT6keuEHvQb4n8FjgG3gK/8vP+s0wkMDDgYGPjZGpFeT+bq1V0K\nhSZWq55g0MLlyzvo9QJ+vxlBUDMw4CCZrLG1VUatVuF2i6TTNer1DtlsA0HYTz1EUTgYbKLObdPa\nmKdSczB+ehJR3iOTyeKwaKjlChiGD9Gy9XN1tsypc0c44vawPbeO4pIQQ1HuLGSp1ns8Mm3B5LBw\n4/UN+gbD9I2YaauMrKyXSKV6qBQFQ2Wb2ZkF/H0elubTOJ0GhjGwV5ji6ENj+HwizUqN5PI6NuMA\nff0RRg5GSO2V0erA77dy8kSQRq1NudjYN8bxOskli5gsIv02HQaTSK9SopfdYf3tLpMXTzE9IaJ1\n+sjOzSPbnCyvFNDZ7Fx4fJyBqI1uvUHIL6KoBWqc5PZilaMmiepyioYgI0kK9VYXjUZNdTuO2T+J\nKAqYTL+5xzQvvfQS09PTuD8m+Q4/D+fPn+e5557jK1/5ubfbPzsURaEYj5NfXkZqt3GOjnJi2sfS\nSolms4vDIXL0qP+nbkB+8DdmZ9P3u6FvvBFHkpT7mgKXS8Rs1tHr/Xh3xeczkc83WFrKosgyNpOO\n4w+PkM3UGDgYorqxgkFdZOxsgPReif/hf7rInZsJkmu7nLg4hbrToCqXScyvU0wXMZhFVH80SSWd\nRW41QS0gtdvkl5YZ6TZp5POopA6Rfh+ZZBnUGuxOK35LHcvkJBNHByjH16llsoyGR6hr7XhUIjoh\nSCe/R9MUJdOEVreK2QCtnQ0C4TA1owOL8wi59U0sZgGXTSBXg36Hg603rtNDS7PVpVqXuXF1B0WW\nMbdTXHn+bSx2kQsXYiQ2UmRv38R79iKCoLqvz+l0JFo2N+HDB+i1c6BSobdY9p2qLZYPXA+51/uh\ndfmPQPqR8L2fB6tVz4ED+yO+tdoPPydJCkNDDur1LqVSG7NZh0aj4s6dNDqdwJNPDnH9+h4njgfJ\nbu7itxuYuRFHQYXHaSbg8WCziawtpjjzUJTvvLCOz2fmrbc2sZj16HVqOp0uY/168nN3ee1/+yqN\nQpnoWIiiRU0pX+HY1ClEa5AX/mkOfzTA9EE766++wPbtBU48eQKbTkN2aZHxz38BnUamsrmG1mim\nUSrTUwvUu2qKlQ6vvrXIiTOn0Bl0vPbyCp6pEOW7N6DVwep20ErtkN9MYIsUsdoMSIIR74AJm9dB\noa7CZzRibZYYnQqzvFFBbXVhNms5NT1M9vKbqDQazH4/vXabwLFj9JpNStvbaE0mDDYblmDwp17/\nciLB5ptv3hd+l7e28JXLhI4f/9Br+GHwIMnIUcAEnAP+d2Aa+NAxp5Ik02z2EEXhJxw+y+UWvZ5M\nudwmn28yOurCYBBYWysiivvufuvrBfR6gXK5TTJZpb/ffi/7BqamfDgcBiIRG0eGBFa/+Z+QO12G\n+2PcvTLHY48N0JhbxCyXEdVm3M9+kee/PU+9XcA+doD/8z9e47c/f5B6wEyxmue1F1Zp11v4Qk7y\nZRGdpMJsM1Ir12m1JR56dhrDyBjdy7s4TbB36xoen41Mukan00Nn0LKzvInNEmH2WoGRYSf+sAOd\nWuLlb17mk39gIxC2o1X1cLit2A0SB4eNXHn+XXRyC/9QGIPVjNntRtAohE4ewhoM8MZ/+BpWiwFR\nDKEStIRPn2Lv6lWaBhfvvBanVqmjSDJaucmFTx5h/ep7bC1p8Z17DHQi58+78Fo6rKdrdJotRkZc\npJLV/fXpdDGbdUxNeT/2zn//X/CP//iPPPvssw+6jA+Nc+fO8Sd/8icfm9bsj6K0ucnWpUvI3f0o\n9Ho2i+/gQX7rt47RavXuvWx+tptvNlvnzp00nY6ExaIjna7RaHSZnPQhigLttsT0tJt4vES9vu+i\n6nYbGR1102zuv9RMRg2zs0lWlvOgFshmGjx63EJETFO98yp62czomZNEAkN0FS2BiAu3x0Rh0Ekw\nusnsu3eZODqAIIpIZi/miJpSIoFSL+MeGKZbrXD0aIi1zSq7u1UK2RLTZ4dpN1os7+Yxzy5Ta8pU\n2hqsniADFjvajsTNd+6wfmcdbbOARpGwRGMEw05UBiNyt8f8O7ewhYKc/Mwhon1mNm/exREK4jV4\nefubbyHodfTaMirRgs5kYH0xia/fR2VrlXq5itLrIHU6RGNuenQIezRUZYFyuYXdvt8FSWU7hIYP\nMzm8r5vTW60YfkpXBEB0OtFZ9k0RfwCNXv+hNSM/ir4+G35/kVTqng26oGZkxM2tW8l71ucCjUYP\nnU5Do9aiW6lg76Xpi/Qo2A3UJAmjUYfFbkRQJPaSFWbeX2X1jgmt1ODIVJh6W2FpKc/6Wh6NGi6e\nD2NoZWhWkhjoIOlU9Nod9jbS1NopHjl2DK3JSa9SoJguYpzSU8/msDisVOsSys46nWqFwLETOCcO\nEzqZIDlzG/vQICaNEWtflJ1UB4vdzLeeu8PTnxpDQOL21XXcdi8Bu8zAoI3a0jt4Dx3l2nyZF747\nD4IeYzDC2UdM6IwiNleU7NVZxqI+Dj08wV5ZoFbr0FCZ6H/4YfLLy/TabSLnz9MulUjPzqIzmahs\nb9PM54k9/jjmn2Jol19evk9EYF8nVFhbwzUy8jPX/hfFg3xLnAReuffza8BpPiQZSSar3LmTplRq\nYbHomJryEYn88KKk03Vefz1OLtcA4O7dNE8+OcjTTw8zOupiZSVPtdqh15N5+OEI83MZ8oUmgiBy\n8mT43lFQk2DQgo0UUn4PZAWfVc+Jr1zEqm+wXckhdOt02gZuvzpHKV1AsDgxWc3spPJ8/7vzeLwW\nXv3ePLHxIH1TIkadjKQSsMcGkZdzuGP/D3nvGSTJed55/jKzvPeuu6raezs9Mz2uB2YwMDQ4iNJK\nS1EnyqxOp927jY3buI8XQd6ni71QaLUKKbQX0koh7ylBADUACDMw423PtPddXd57m1X3oYEBQQAk\nQPA4xN4/oiO6Kyu73+g3K/N5n/dvPIzP+jFoRVrhNaZ6TOypjbSqXZQSKro7UC03UGuU2AMe6g49\ni3eiGLQCOrOB88/NIX17kTsX73DimWMsPDaMXq/ALJXpVEpMnhyhsLOF2tuL5oSBQqFGtdLA4rfy\nrf/0x7TrVbRKgWalyvZ+mZQ+jbrQ4P79FMVMHrnRILO5SSWVwu4y4XVY2VveRbmxTmD8NOVyg1xD\nydB0L/GtPVqtNoNDDixWLb1np3AM+X9gS/3zjHq9zre//W1+8zd/82EP5ROjq6sLvV7PxsbGT5xT\nbHp9/UEhAkCn8+CmZ7Z+sBvaarXJZqsIgoDNpn2gqigU6tRqh54QkiTgcumpVlvodAoCATM6nQqX\nS0/QpyZ+kEGhFPH67TgcOpLJEkajClFQc/tWlGqhhMVuwKRps3n5FoFTbjS+IEurVYqLMTxeI2aH\nhTf/ZZ+SrCGVa6HqGPjl/+PnSe+FiWRFqj0naJXLeGY6mOQUFqeFZCJDb8DI1HwvS/eS6Ixa9Hol\nq7e32bp2j+OTRiqhbWZ+7qfZjtRRaDQIQoPl6xuYjWqUopZGLkNxdwvNyFnG53qwDA6xfnGHoRkt\n9b1V7v/5X6JzOJFsGg4OZArFGqJKh95ioFLr4LCqGB7zoDSZMbTsmBwWPG49SpWCfK5ES63m7v0M\nSqOMxaKl0ZCxWjW49Q0MzRhCyYLC2vWxD6NGqUQtl0NSq+manyd+5w6NUgmFVotrYgKD1/uprw+T\nSc0jjwQJh4uk0xU0GgWiKHD/fgKDQYVaLVGttqhWmmze2yN0a5HwTpwjxwMYKhE8E5M8+WQ/giix\nux4hH6tQLRTpOuZjZyWEvduNICh49tlB4FCNMupXcO0v3uDUqQBqrRKtTonDbSYPqNVKUqky9WoF\njcvHaMCFwetC4Q4gNBrozAYUDSMCHRrZJOtvJrEE+pgYHaVRrqI0WdB2Bbny7SsM2n3UutTUK03G\nutrMTU/SEpRMTPuQVy+xdjWN7qiddLGO0WpCVKmw+sxEIgVa5SI79+6wsZqieTeMWrdC75NPUkJB\nLFZm6Gw/tv5+Op0OpViMjcXFD3SymuUyhVDoY4uRRrn8odfazean6m59EjzMYsQCbL/7fR4Y/yQn\n5fM13nprj1zukDBVKNTJ5+s8+WQ/Dsdh9sPubu5BMBAcqmdu344xN+fDZtORTh+g0SjwGhpUN1YY\naOWZHvLRc2wQd8BNLFZibS3N9LQbW6XC8NwIjVIRk9OGUU6x953buEcGePPGKmML/TSLIkqFhH1o\nEFlSE4lWaFRqHJsPUi2VuXt5ja7npmnls0yMD5NuKrHNP0qXOc/6tWVsZonoXpLxLzxJvqRBaekh\nsxmnWq7hcuqotwRUvn5eeeuAaqHM+o0VjhclfuqcjflRJWqLE60Qxi4XUbT0XHtpkVJLhdOqZOmt\nW9h0bTpI9J85Rt+JY+y/8iJKmhjsOvR6BaVSHUmhZXnxgAGPmVRi55DYxGHWh0KnI7UfY3CwzXC/\nEZWihH/MztZWjldf3cFj6qahKKChSVevm/75aVxT4//dh+VdvHiRsbExPD+hwVMfh1OnTnH58uWf\nuGLko25unXabtix/4LVstsrVq2ESiTKCAF1dJo4f92EwqNFolCgUIq1WG0kSCQQshEJ5LBbtg0JE\nTO2y9cYrZEMxlEqR2mA/ncceQx/s5+TJbm7ejGKymtBrBLq7LTQKeYq5MoIzwOv/cBm9WYcqWyae\nUbBYN7J6ZwdHwIt7dJi7lw+4UMlz5pF+/vnFLfI7mxikGrTbjI27GVPksPd3c/fFe8SSDd58eQPJ\nYEYptrEaBCSjGWevj51wiPBOnMUtgZoYY+GEm56gmVgohcasQ6xUaDcbdHcZcHUPYhkeIjjRx8bl\nO9xZ3kPV1Ue9WqYld5hfGEAWJEJr+8iSmkC/B4ppJqa7uLeawzw4QHcoRLtWotZok8m3sU8GWNqq\nkkymOXs2wMKCH0UpQeLGLQSFTDQhkVo5XHWbv0dFltvbI3z1KvViEVGhwDYwQO8TTyA3Gii1WlR6\n/ae+NprVKum1NbI7OwgqLRq1i/ubh8ecTh0HB8UH5OZ0PMfIEQtLl3cpl+rsbyZ4+sl+PF4Fm4UO\nF1/fJLST5Oh8kL4RL2a9SKEo87d/cx+T3QSCgE6nJBi0kiq06RvykM63cQa8NJIRlM0i3h4/iaaF\nzYMm4fg6x04PsbOb48I7WbyuIGtvXKVRazAx7MfZK9KqVUldvY7t6WfYeulNJKuL5H6MTDzD6Bef\n5ubFyxh0Ovr9faxlOkQjWaLxGrFIFqdCYPLnvkZFYUbZ2UejkJFpoVGJ2MxKhHoZ2m2alSqNhozT\n70JXCJELpaFmoRBRYvL5EAQBudmk024jiCIqgwFBFGmUy8jfvQj4HliCQcrx+Ade01itaH7ELq4P\nU983BcjAMjALqIBr33X8G7lcjitXrvDGG28Qi8UwGAyUSgIrK2nq9RSyXEGh0NFoyLRaGQShjsFg\n4t69OJ1OFo2mRaulQq9X4vXK+P1q3G7HoVQ3s8P+9Uuk1kLkknkyuTgaqcTA1BhOtwm1uoQk58he\nucTm898inoiQCu0RmJ4hen+Fdv8Qks+PXG3gm5pCcFnQ+900GgoymSp9faCQcwyM9mJ1Wxka09E/\nYsKo05HIygT7RGLrK9TTRSRRRHCZuHLrgNhuAdHipmvciWRQY3H5OPqlBV65EUVrlHC6XQiSRFuq\n4u7W0drdQNkoUGhWSWeyCDoPb1/coGfCwebly1BvIxnMuMY9tKmjU6nJ10Qkh5aGJOHr9uI+coxL\nb98lvrfHwMwEej8/gZ4AACAASURBVIsRWVFDa1bSKLcRRInucQcaRRU9Mn1nTxKLx9m+fp1hj5K+\nPgvGgAXLYA+Dp04SmBxif3+fXC6H2WymnEyyvrRENpPB8W57dnd3l1wuh+XdC3p3d5ff/u3f5hvf\n+MaP9SL8LPjd3/1dpqamOHv27MMeyqdCJBLh1q1bfPnLX37YQwHgm9/8Jt/4xjdot1oUDg4+cMzg\n9eKamHiQN9PpdLh0KfTAy6bVOiSjS5JIV5cJrVZBsVgnmz28MQ8M2LDZtLjderxeI5N9SnZe+heu\nv3yL7bUIyWSZVr2G1WHC7PMgNCr4nRJOrwWDWU9bbmG165HqRQKDXl7+67c5fX4CZSGCza5jc7cE\ncot2rUyn3UGlViLRwmQzsbGRYvjIEEpJoFkuUq22mfvyWTqZCI6hQfJViY3tAnqLkXIqQy5dZPJY\nPz22JomDFN2jvaitNgRBIBXJMn+6n53NBGajGv9IgKFjw0xOdxO+cYuV1y/RNTbIznYGuVzk4O4S\n/rkZTH0DNOMHjE14OPLEcTw+E1aLgrWDNn3jQZKxIvuRKifPTzI21UW+rmDokVNs5PTsh0p0OpBM\nVjj3aID23j2EWolWpUyzXKZVq9Fut7H29T2Yn0a5zO7Fi9SyWeh06MgylVQKrcWCJRj8gDvze/P+\ng9DpdDi4u0x4L0NJVhMLpbn4929idtlYXC/jdOoxm9U4nXq0WiXDAxYi6ztsrkRoNmRMRgXjEx5S\n+3FC+zm0Rh0KtYZ8rsaxYz7SiRy3F1O0RA2NWgOzWcfOTo6BfhuSSkWhAmIhTlefh2q1ic5mpWd+\njr6FU6TyHVRKkdEJLwfhEqFwGW+/j5kTQ7TqdXrmjzD21OOklhYR1FrKsSiJpSVMdgupvQj5ZB6d\nUYc92I2iWaSr10VZYWNpMYZSqcBkNdLRGhH0FgJ+I2uLu5itBrx9fuw2NYOjHnpMFTTtMjq7Dc9g\nAE2rxMGNW+RyVczKOo1kDJ3TidpoRBBFqpkMkkpFMRymkkxi8Hhwjo19bJdLZTIh12o0K5XDQs3p\nPHQM/wFbNJ1Oh2omQ6NUQlKrEUWRb37zmwDf/Kj3P8yl62Xg14G/Bc4Bf/S9b/jP//k/f+ikzc00\nAGr1BwmDLlcXPT2HbaauLhPZrBuDAbq6DmXAh5yQPoDDZMsdkXCiiCgJSJJAj9OEuVGnnExi9vvp\n6elh7623WFtcxD0xjmp7m2o2SyEcpf/Rs1x6Z5daQ8Zk0tGvVzA2O83ORhJdp8rcpI2hQSvr60k0\nkszRcQOPPDJEtaXg26/FKBVl8vEMqy+sUy+XOT4fYHasl1u3t0kXUsgGB+ubQFPHyGyQIYOLteX7\nFJIZdOoozm4HLpcTZUXGNztGuwO3/+EFXD1+IskGVn8Xjf0oyWv3EVUqZK+b8UkPyys5HH6Z0H4e\njcbF5MI4nn4fr3zrJtHVKCPHR0mma0SKKnazbqxGBfM/P0Xs1k0mgxpC//gvmE+epByJULhzH593\nhOTdq4Rf3Mc7Okjb6acY9AM+enp6kJtNDq5cIbOxQatep6PXk5AkXGNjH4qy/zxG21+4cIE/+ZM/\nedjD+NQ4deoUf/AHf/Cwh/Eh2AYGaJRKZLe3abda6J1OfMePfyDYq1CoE49/uG28t5djZsaNSqXg\n5En/A9WdxaLhiSd6kSQRpULg4MZN7l3dJLJzKDUtFypUynWmnlWw+/rrlLIFYpE8ppqCYxOj3FYo\nMKrqDEzOYbXrmTh/itGZIK/9p+dp6Ww0RD/htT0cXXY8Jg2aepP+US86o4KRARPVyB6SUom1tw+1\nyUit1iS9uorhqB+9tsEzz81y916CTkmDVq3l5KkgmTtX2NjM0AlUeftWlPPn+xE1Mmazkq/92hnW\ntsrsb6fQNNXcee0WYipFs1wivbuP68gczVwvp37+WeL3Frn5B3+ExaZH++RZdO4e1mMCkTgcRKuI\nmjDh9T1MJiVvv11mZCZAXmkkExLYD73v9Gm1alBJbYrFIvm9PUqxGHKziaRSIUgSrUcfRaXTAVDP\n52kUvscltNOhGIng/AhH3E+CeCjNyy9vElqPkMtVkNQaBoYGKYV2UakGuHhxD7fbwLPPDiGKAi/8\n0zKpWBGVSsRoUNMWFKgNepqilnY+jyBJ2F1Gbr6zya2bEoNDDgwO6Op1k4nn2N3JsLySYmDARrsN\nN25k+Y2fHqGcj+B/9BzVRpuSyskr/+0tWnonqWQFl88KHRmDUGb9VoSoUYWn/xgZnYt4ooRnagrW\ntlh75SL5dAmfJGG2aFFq1WilJha/neuru3SUWpKRDOmlu5isRuJREcfQINuxBt2SmblpJ5vRDpVG\nC6mWZsDTjdtrpKb2kHn7HQSjlfDly1j8frxBKyazlla1SmZzE5PPh9poxBwIsPinf0ollTqcv0YD\nS0/Px7qxqnQ6gmfPUkmnabdaaG02FOrv7+/VqFSI3LhBYX+ftiyjczjonp//vuc8zGLkNlAD3nz3\n+0/EF3E69Vgs6gfbNHBo8+t2v+/yOTbmIJerEY+XaLc72GxaZmc9D8hvZrOWbp+ebNBCrS5jMqpw\nOHUPgrXkVotaLkcpHj+sJLNZzIEA1r4+qpU65uNTmLfKxC/dpGHQM7UwQY+1iFLaQTPQRbWtYnl5\nk+zV24cEuflBVv7uPl3HjtDX182f/9ldxofMuPxOMvsN9rbjzJwYYGzUSaymQ9IIJBfjJGN5Bkc9\nVMtVVK0i1UwaSSdxkIrjc85iO+pk/1sv0nfuHDavC/fMDGqti7WlMM6pcXwTa5SyRRQaLTSqjI87\nsPT10WPuxWkW6HIoufhPV9lfDWG0GfFOTfKXf3YbtdmCwaThIFLCaFbz5V86R/LNCww89RR6t5v1\nF15A0Bqwu/PEMlFUSoHCwQGtqkR+9R7t+UFESaJwcEByeZnOu232RqlE7NYt9C4X+s+J+uTjsLe3\nRyqV4siRIw97KJ8a09PTbG9vk8/nMf8ICWifFQq1mu75eRwjI7RbLTQWy4cSRpVK8QNbsO9BrX6f\nyN5qtanVWlSrzXcdl7W4XAbqxSLVRgdZeN9IC0Bt0NEoVwitbVOTRUxGFRaTgKRJMvTVk5TLDcyd\nDOSTjE76aFSqZPJ1qqFNxp8bp1Zyk4mmERQqavvbDD7Vj6RVc/+tKOnNbQqRKK1Gg54jE4izcww8\n+zOs7Ld45XIElVrFxKSHricCZPfD6KiyX5aZ/eLjrMSViK0G3/mbN/m5X1nAZDdz+1aYrTubBAc9\nqDUi6ZSJvqPnCCpLGKwGhEKC9WiO5UyY7P372AaHMNuNrC3HGDKvcGJhjo2tPFJ7m06lSDSUIdSU\nMevBpGpg6fLR5dBzsF5Gozfg9FqZmHBh81jJKpUUwuEHn2e5XqfTbtOsVB4UI6JKhahU0m590KxO\nZfjhXZiXV1Psb0Rpt2SKhTqNZoVmq8OxWQdSRSSXq727AK1hMqkZm/ShEpp49vZp1mqMHwni7XWz\nEldhokYkUqQtdxibH+HIcR8ej5FQbp9isYFCpcRsFfEHLXR3GREkiWSyTDKeR05kiF+4RO/cBOH7\nOW7/yzsEjx9h89YOk9Nu5GqHVjpGaCeJ26GmFg1xdPwpSIRQ2A0MPH4WrdlI/N4SereHWCiFTtdG\n7XCxu3qAUjw08cweRLF7bLRzcdBYCV26xNCjJ4ncWaSRCDN3YgG1N0hmfY3ivaschCW0JiO1bAa7\n24Vep6RdSKNouejIKqq5HMLmJgavF4PbTSkaRd8dQLK60OhUqLWHOU724eGPzR8SRPFjTe0+Cum1\nNdKrqw9+LobDRG58/0f8Zy1GfpmP6Gh8CnxqfaHZrGFhIfghAqvDoXvwHotFyxNP9JJKVWi3D29G\n3+0CWq02yct6Ioka9XKNhEKkWmsxNBk8DM67cpWyrKHREqnVZTR2B51GnUI4jNsXoKYyk1N5CTz+\nJM5uNxpDh/t/8ReEF5fRujxMPvs089Nuep0zGLQCte1lIneWUKkl+h7vwSOlye1mePQLp4ncuEk5\nlaaQLTD5yEnKS1VCmzHG5vrY3UwwPGTl1ht3mZtxo5VkUnsh/IM+5o4H8E90U7gZpJItcPrf/RtW\n37yB2IiQ29nlXqPI8PlH2b56GKKVztQIDE9jdxgYmXWhMxtY+8s/pd8DLIwTHO+n2JHQyQUMgohK\nUtBQqFi8fcDjJ53k9vYoHhxg7e2lnssx9MgjJFbWaOfiCJIKtdWITIdUKMruWpjeUT+VZPLBjes9\nNCsVGoXC574Yeemll3jyyScfOFF+nqBUKpmbm+PatWuc/4TmUz9OfL/2r06nYmjIzo0bkQemaEql\nyOioA0kSaTZlLl8OPYieB9jby3PuXB9Ws4aOQoN/bppCNI7RZsLe7WT07HHyiSSbO3kQJHg3MmJg\nRItcLmFql8hvrlPIlhgY7KWczjB8boHQlesUFq/w+JkTiK4TmAJB5Ckz8UgGZ5eDo4+O8/r2Biql\ngH9kgJMLQVLrW7SMPmRRyfmnhrlxM8bF17fxefWcP9dD15CeeL7D25fCrL9zk3o+j0KjRanVUM9n\n6aDg2a+dJLS4THQzhCfYTXCyn+W/+xvqqSSSxYFQaGEc7+fORpTASIC2qkM2WcC4tkO95qSuMKGW\nOlRqMo99YZLFa9s4LApUjSKjvSosigxJfRbBCMMzvRw/04NWq8La34/B46EYiRzmdfn9WHt7qWWz\nDz7POrsda38/yaWlB9WeymjE0tf3Q10LzaZMKttEZTRSz+WQlEokuUO9WscUCJK5lMfrNXLkiBet\nVsHmZoZQqIDebsPitjE5aiUYNLG+U2Fr+1B1Mzxg5sa10KEVvyCyvpljetrN/fsJEgmZYqlOMGjB\n7tRRr7c5dbKbai7LwUacriPHGT0zyJ0/fAeVTkuz1WH0SD+NdIq+gAXztAerHlqlAhNzPbjdWg42\njTSbetShGHqrGUmC9MYmXf1eFDoDbVc32bVFRs6dJroRYmbCyhuhEHKzRatSRaFWMTHbQ/3qPcqV\nKnvP/y2+02dpVmUaghanTk0ul6X71CkklQr74CDZ7W0a5fJhkF4qdZhkff065kCA8HaM9eU09UYL\njUaBv9uMW6ul/SMipHY6HXK7ux96vZxMft/zPmsx8n/y2YqRHwper/EBS/6jpL1w6Eni8310lRcO\nF1mPifQtnCJ27z71YpmWxoJ1+uihZ39Zwfqrr0OrjsbqopmO4ZuaZjDgR+d2Y+i20nl8mM2lME6X\njsiVV2nXq+jUAkK7STFXoHMQQp1Lc//NaxQKDXwD3URv3cI+fxartk0xleHW2zVmHjmNr8tIvthi\nPdLGQJGAR0nPqIuv/MwExVKD3c0UlfgeJ47Y8f3MGOWawMa9PeKhFL6hLzFwIkBse4cX/+YqVpuB\ns+cGyZbAEAjyzJmjJLZ2ydcUvLXeJnflGr/4b04wddxI9+nTyBcvMj9pAClDZjdOKxMnlc+j0Sew\nDQ6RyDSQ221K4TCVRAKN2UxgYYHc7i7NQo52KU+xUEOtkvB0+6nUYH2rgM5W+sjVkKhUIv2AFt/n\nARcuXOArX/nKwx7GD42TJ09y6dKln8hi5AdhYsKJVqtgZyeHKAoMDtoIBg+5R8lkmYODD24T5PN1\nQqE8DocXV7eDdW+Q8//7/0zmzg0ye/tkI1Fa1Rp2s4JMCeSWTHQnhsupoW9O5CAs8up9kbU7KYID\nCp77uRna9zYZfHwBg0lHqVClngijHwzw//z286gdLrq6TIwN6DnzSB80vTQTYXJXXqdh76XsypIt\ntekbcqM47mN2yonVKNAsl4lmtKwuhjArmgxM9ZFPZPAEnQz2GLB3eZjRl3npv/wpS+/cRWu1snp9\njXoyxuj8LDvX7qKVWth1AjqpzOM/NU85GqEk1+kbPezYmp02PBYDbscoq8txfHYdC48NIBRSNPaW\n8eoyNDJp5rprKG1NvNY8Nuvh51VrsdB94sQD3xBBFKkVCh9o2QuCgG9uDr3TSTEcRqnXY+np+VSr\n6u+GQiFitmgx+/0URRG3pKLWEvCPBPAN+HlEaSEYNONy6Vlfz/DOOyEODgp4PHoEQSASLTM+7mB/\nLYJWpyZfLXPjao5TC31E9pLsrB4wOttDvdEhGLRgMqlZWAig0Sh48cU1fvZfjfPSv6xjMasQVRpu\nX97A5nNw7PQAOocTUaOjFtpEKxhQVRrMB6HgtqEUbSj1Bv7pzy5RSmcZGzSx8eobHH/qGF3Hj1Op\ngdpkwDs+ypW3NtEPz/DO1RgLx51UVm9z4kQP4bAFSRLp6zXR62xzbWcLo9OB4LRg9ncTXTygy68D\nQaZZqaDUaqkXCnjeNTSU1GpatRre2VlajQYCUENNXtY+8GtpNhpsbWfxDPei/hERUgVBQKn9sIpS\n/AGChk9SjNz7Psc+Wgv0Y4AkiRgMP5wDXDpdIZ+v09A6sM0/iqLTpI6GqmQinS6SjuSIrO/j8Jrp\nmhzDYDxCJZlk/eo98tVlJN11Rs9M4XcpiNy6g17ooNFrkXwOFAYj+8t7zD1zmvXnn8fkcaOyQ0tU\nYjDrKWysMTLt586rccK7aQrVu/TMHyEWzhNdXEIqJ7EGfNy/H2e0Tw8qLfZuN+FdLcnwoWPra69s\nMDLmISG22VoKoTAYsJbSZCKpQ6dEnZlCPEPxXpi22sT2QYdLV0JEwjkMBg0vPL+KQq/HqNbTkVTU\nsllit26h653FYVOTq4rUSlVq6SRTcwPYTQrKs7NsvfQSud1d+s6fZ+355+k6fpx8PEVHWaHegkq+\niPfxBSI5mWi0xFh/N3q3+30mtiBgCQbR/xAeAz9JaDQavPbaa/z+7//+wx7KD41Tp07xe7/3ew97\nGJ8KkUiBjY0M5XKT3l4Ljz4aRKP5oKHee6TW78V7ad/Wbh+PfFHD1htvUc1m0bndFNJFDC4nUjOM\nVmskuZ+iVS5i7F5A1Oj4q798h3tX1qgXCkR2E9i7nZw8dZw3/+uf0GqD2eXgyFd/it3NGLKoRmmy\nUqk2yGeqRC6/hV3IktyL4pkYxdSt5GAjRDhaodhQsXIvxMxsF//19y8RD2c4/9wcHr+H3P4BvUET\nZWOTowt9aDxevnNhjUKxQcPkwztcYufWEl1jAyy/fZvxhRlCuxmquTy9QSMdrQnr7EkERwwPRQqF\nJnr/MCqzkY2NNP/wN3eQ81kcdjU6VYeTk3oG1Um2XnibVqWC/8wZ2rkk5ZgeudFA1GoxeL0kV1cp\nRaOH/9ROB0tv74c+zwq1GvvgIPbBwc8854IgMNRnZOXNJOVUCrleR2cwMjvXRb3VYXraTX+/DaVS\nIp8/ND7zeo28Z6FzZNrB0qX7yPkMDWTahSKu/hGK+Sp0OsQSFeJv7HHtepRMpsrkpJOtrQy/8AvT\nzM56SabKWA0CW3fWGRhxY5c7RFIy554aY3v1VRwuC7jGSJcllC2B5l4Yc/0ArdPG7eU0paJIJR4j\nLWUoZUus3d5GJba5eWMPp0mi3mjz1uv7bG8ksVo1mLwTbH8njbC3i8tuwaRXYKo5Sa80kMtFMvk8\nw1/6Ao22iEVRRlVpoDB7sQ8O0pFl2s0mtVyOrhMnMHV1Eb5+nfT6OqVIBFGlwjhdRukcpmdugsTG\nDnJLxuJzYhqe/JGqH+3Dw4fcove6LYKAbWDg+57zSf66C3gayH7EsUufcow/EbBYNAjCoeS3WgUQ\nUKlapLM1Lr6xg7qW4ua1PRa+dJzdN9+h5+wZ3vr7t6lLOtbX0ggaHWvrac790pcYGCshVnXkNmoo\nvU7qpRJCTo3W34tjZo7itTsUozkGxgfwjfYSXrrLwInT9HztFGubWbwBO7s5qJdK7G9GGBtxUG4o\nWL+/w0DXMMm9JB2lhvlHhnDbFLz92gYTM124/S7imSYHoRz37sb44hkfvYNORr94npdf26ctg9Xa\nIfxWFK1Bh0qroiMoqAlaDhJVbt+JMeaosPHWLfrGAoeMd3GRs6fnWYt0SCSrDI47efqr04jhZYKP\nPYbJ7ye7vY3GasV79Chaq5WZr/8iodV9GrUGgRPzJNQ+qrEKSuWhIVLPo4+S29ujns+jd7kwBwKf\ne8nv5cuXGRgYwPUxuvzPA06cOMHXv/512u3252KrKRIp8uqrOw+C88LhAqlUhTNnAh8wb7NYNJhM\navL59zllarWEzablzTf3CO8mULVKuGiiaWaR5DySt5fwboLeo8dAqUJtNmH2dWEOBNndzRMO5ZFo\no7dZMdrMXP7Wm0wNf4Ev/MdfpQ2Iah2likxL0jP26DF2N+JIFgtFRPrPnkKR2EDXHcAeDGAcn+T2\ny3sYnXYK5SYOi5LLb22TjGRAbnHjzRX+1VdnGFoYQm/SoqQFOgvP//0S2eW7iCY7y28v8ciTYzgT\nCYRmHYVSolauETnIolHCblwmrWihVmUIH1Tp67Wz8MUZQhkJvVImlSghqbV0DB2KlQr5dInkWDdj\nfgO5ly6AIKAyGNB4u8HiJV9uY9d0UOn19Jw9S25/n1omg87pxBwI/EAy42eFphxl4ZiNeJ8BWe7g\ndmowt0L0zD4CKi1ra2m2t7Nks1VGRhzs7+cIhQqH14KqRT4ax6hX0m7J1LNZ4itrDE8HEAQjx7qd\nXHwrRE+PBa1WQSRSwmrVceNGhJMnuwkfFKmVyjicOsKJBoW8REsso9aoeOYXzpKtKPinv7iKQmjR\nKkgsr4c4cqKPniEvQrFEZukW1VSBM888RjmdpZIrkg9H8FlE9HYLka0oXlMT3xOjRA7yJJNVRs4/\nQimVg1oRs1lLq1ZFpdXSPX8cjc2GbWCAdrNJqZ6lHO9g6elh4KmnKMXjFEIhNFYr9uFhipEI4atX\naR0+5OjIMqm1DQzWXlrdkwSCQ4i0qaJFMNp+pHNm6elBEEUyGxvIjQbWvr4fuFX3SZ4KLwIGDkmm\n34uLP8Q4Hzr8fhN+v5lQKE+nc9gKDATMJJMVWqIKvcGMqFIhiAJtlZbEQRJRo2F3PU2l2kTRqbFy\nN8RUqs7w9HHk1St452aRRBGlwUDA3M39O/vY3EFUAzA4paZRqbB1/T59xyYJ3VmimK+wvlWEk9OY\nB6bQ6lWASKkloRRUDE8FqJdKaMxmdg5qROL7/Pt/e5Qtt47+fhu5gwiNUoTghIdmu0ZDaeD0LzzH\n3/7jFq+9eA+lWsnkwhS9owZuX9tkdtZLLNMmcpDjsSdHCO9n0EXDbKwn8fV50NrtZNZWsbdlzp15\nFMHoIzgxgGfQRStooZpK4RodRW0y0W61kGs1arkclUQCUa0FtYWy0kYsXsFoPFydwOH+v2dq6uFO\n+I8YFy5c4Omnn37Yw/hMcLlcOBwOVlZWGB//RBY/DxVbW5kPJPh2OrC7m2N01IHD8b5vhcmkYX6+\nmxs3whSLDdRqBRMTTu7fT5BKVUitrtGsVtF0ahwbGCd/9ypGcwaz3kI0lMR2dIGqQcZpMkA2DKIf\npcGA1mal2QS1TkOjJFBKZancvIuuy8/lG0nSmQo1fTeiUsmTP3cGuVblxtvrzDx3BrdigtReBJPX\nQ7iiZ2/zKgFBZGZ2mMVkgmy+jiSJtKpNrDYdq5cWaWV9fPXfnieWavJHf3SHdrNJOV3AH+xBJbbZ\nWIkyOeAnfG+Z/ifOoFAradVqmD0uEpkG1rle9nczYLARly2U21qUifscbOyi3C/y2GSQzZybbLJA\nNQVIChSKNrbBQRBFZPcgl5eLtPVl9Ptb9PZaOHrUh9pkwj0x8WOd+2I4DIkwXVotgiDQTFWpKBS0\najWWVgrcvRuj0zlUWi0tJfD7TRQKdaxWDZpWnr5BN5lYlmKtQ6kqoxKrBLsN3FpMk87U2VjPEIkW\nsVi0WCwaKpUmoihiNKrp67Px4nqEnXtRavkiarsLi9NDpq5idzNKNi+T3T+gmi+Qt+qx+Xwk21bs\nsyfoqW1x84U3OPHlBUxmFQOjXjQmE2qhTvyd64x94QlWbm+ze/U24wsw3mtFIbRIhVPEYmW63Gr2\nb9wmH01g/6WfQVQoqKQP1aSBU6cYefZZOoBKr0fv8WB496tZLiNwSDA2BwLk9/YeOKgaDErMVgPh\naJ0MoNGoGOwCIbFFJHUopTd6vZ/ZnVl4twv+aRKBP0kx8ivf59hPflTpR0CnU/HII0EikSKlUuNQ\nuqaS+Od/XiNZlNB6HZz46ScQWyUU6NA6nCg0asqVFnKrjUqS0JiNlCoy6zdWCCibpLJNmuYuzEYX\n3VYLzq4KsWQNQZNmZ3EFjUJmbOEoLYuTwvVNJJ2O4Hg/CcEDByVmj/XQTCfIp/IMjHow6QSiuxH0\n3UGqyRIOk0QxkWBq3MJbf/c6l56/AgKYHGa+/OvPUcyVKAo20CTwj/WjNuhoyiKlQoXZ+R66fXoi\nsQpzJ3rJl2X6PBKVfJ10LEezo8B79gkEi4d2uQC1EvZgF9bgodRLoVI9yC54z0bce/QoG9/+NuVw\nCJVSjXfmGLis9JskRked2O267zcFn2tcuHCB3/md33nYw/jMeI838nkoRr67EHkPrVabZvPDWzI9\nPRbcbj35fB21WiKXq5HJVJHrDRrFInKzRVWlpNDRoNTpqKZS9J4YwHlkHtHqot+noL67Qi2Vxdnf\nRW+PmZBWSzZbp62TGDqqx6iSSRcKhOtF4ntxLL19ePxuDlZ2ufrSTY6O6nnq8W5qa7d58YWXcXY7\nUdg9CCYHI6MuNDqRXjeUB5xEki3k/gA6RQuHETQqiYFTR3jnZo5yW43Kaqcjyxh0MzSKWUYWjlHL\nJjE62sw8vcDEkwuoLRbGzi/QNdhNa6XAOzdS9I10Uao0UDdFolthLOktxFIGdaPO9pXLHPvyU7xZ\n1iKrJYYGrOjVMXxHj6JyeLkXl2jqnSjUaiqVJsvLSfR6JcGg5d3O8o8vSuA9/sl7K3zgkB/RUbK9\nHafTOUxzDocPuyE2m47f+I2jSJKIV53CZhC5fF0isxila7iP6RkPBr3E+nqGVLbG5JSb/VCBRKJM\nV5cJu13LNyu/NQAAIABJREFUqVPd9PZayGSqDI51Ua/WSe2GOXKmh5OPj1KuyoxM+bn45gH2vgB7\n1+9QKjXQVBo0RC2vvRPjxPwQz/6vP0vq1lVe+L9eYmRukC5vGePIAI/+x39PRZbomZsim8gR3gih\n0qVw9vkJnj5J45+f5+D6fYZm+hg8c5TU8n0K+/to7Xa6jh2jnEw+sNgXfT467TaRmzdJra7SbjaR\nNBqs726hqQwGWtXqYT6N14tnMkDTUCeXq9HvbFNZvUGyfdhJlL5L0fZpUCo1kOU2JpP6h742Pt/9\n8s8ArVZJf//7ralKpYFOp6ZUanB3ucbw0AiBHjV+TZZaR03f0Wlu30nSpoXKYGT6/DypXBOXq8lm\nWslKRM3yC8u0pU2e+NIEx+Y8/NMr9+nUdATHjuPpthKpt4mvVOibPY7SYCKWhXuXUqiNNYxihV/9\nD+epJqKIHZnlzTKN/n52Em0qpRaDvSaSuRbdxiZOfZu+UR+iUoXLYyaxtITN7yGV0TJ2pJelzQrN\ntkw7lSbRLDHzZA8TAZn+nhGu3i3SqFYop/PsLIV57Nd/nlC+zp1X1ug/MkX3cBBnwE7P1BCS8v39\n+FisyOpqikymRk+PBWs9hfHdKhpBoFWpYGiEOfr4oz/+yfwxIhqNsru7y4kTJx72UD4zTp06xaVL\nl/i1X/u1hz2UH4hAwMzeXu6BHBcOt2Ss1o+OG9BqlQ8UdOl0hU4HBIWEqFLRqtUBJaZgD0Z9GbXZ\njL2/n9LuBq31JbQ2G0aPm1JbRtsu8PVfnefyzTSL95N4PAaeecxDbfMOiuF+YjE9kuKAajpJ6WAP\no8OJRiFhsmiRs3FKoT3sY+OEDvK08iWczhZf+dqXuH3hbW7/xV8y94v/I4HRAG9f3KFWq2M2KPF2\nW9neyfPKS+tYA36OHfPxykubmPQqlA0tLpvI2efOMDtmpFausnl9ibTk5eZymaZF5MLLW/TPDLMT\nl0mnKqiiEQZ7h7A5e1FvbGHVKhkK6lAU4xybHyfYNcKEr0Zps4mpqwtF/wzyrTwK4bDQq9db7O/n\nKZUaDAzY0OtVHD3qw2T68RDRbYODFCIRKskkdDooNBpck5NIas0DRVW12qRabSHLbTY3M+h0SpLJ\nMuZ5O3YxybgfPLZuFBJ4/Q5aKgMag46D+xlGrUaefLKfRKLM0JCNiQk3zWaL117bxeHQUa40Of+F\ncSyWI1SqLdLpCpVqm+XlKnqTjkxNg3NyGgQwepwY7VZsNi2376UZGrAjbYvoHhlHqBdp5hvsv35A\nUFKSjmawzZ3kS//bL9Nstbm/lELvsLB3+U0Ksg7HzFFs436kchKlVos5GKT3scfQezxkNw/tZ0Wl\nEufICOV4nNTKygNJtVyrUUmlsPb0UIrHket1lHo93tlZHD0+PD2H+W57r79Gs/3+lqZcrxO/fx9z\nMPiRJNTvRb3e4t69OJubWdrtDm63nrk574Mso0+D/98UIz8oGEynU3HqVDd7ezlWV1Ns7+RRqhyY\nZwdwmtoYPB5+cWCUrdUostpEW2vCrqihMwpcvbTDylaZaqWJwelgdT132PIzSNRaTaRqkd231/GP\n+Jk/OkRoP0eqoESphl/5taMYdBI+XYl6aptGSyCWg/7JHjphmXQtTbfPx0EsSyKaZWGogdOppbfH\nQqsjUa+USSYrHHdYqERr3Lu5wdS4lYOtOIVcmdFTfsa8HRa/9c+c+OknOT4X5I3vbLAVyWPSmZDc\nASzKOFJBTW5rk0atSTQLucYedm0Li01HW2fl9dd3KRYPyUhip8X+6i3c5g4q1fs+EOVolHqhgPpj\ntOr/PeDll1/m3LlzKD7nvBeA06dP81u/9VsPexifCL29FrLZKtvbWZrNNhaLmuPHux8kyn4/2O06\njEYVxSIYvV6ylQp6gwaL3YDKdRyLRUvyzvUHuTj1fB6Dz4dreprs5iZmTYR//VPDPPn0AM1CnvL2\nCnI2ibPbTUCnY/WqSD6cohyLYLAlOf4zzyA0KtTyefKRGKGqibXFEHaXhcReDe9sGO+RWWrZAvFY\niXSlyuSkA4NGxKAV0Og0/Lc/3EBUqrFZVBQLNbxdJirFCjIiKp2eLreaV//v/4KoVDPx7BewuIfI\nVBWkk2l+6T88w5VrMdaWUjRkgaP9Jm7dDJNwazlz5DjylTcx2ywEJh0EH59AljuEQznafSasPisq\nrRaFoki9fijLj0RKRKMlgkELpVKDaLSEQiFy9uwnb8F/FmitVvrOnTskRNbraB2OB0F7Xq+BjY0M\noiigUkksL2dYWAiwuHho/VAuN/lf/qd5BFuYg3e2qMh6tlY7dDZDiAqRmRkPuXwDjVbJkSMepqc9\nbGyk+Ou/XubkST+Dgzb8fjNLS3EmTTreeGsPtUrixW9volaJfPVfTzAw5CAZL9Lba2V61oOGOvdX\nQkSyAq5ZBZ1mnfzuNkarkfjaNs1sGs/MJL7BXsRiiNhrd2iLKiz2ATqlFpmNNVQKAVWhQ/jFy8it\nFhM/+7NY+/vRuVw08nlEpRK9y4VzbAxLTw/JpaUPebtUkkmcExN4Zmdp1WofCjUU6FDL5T5wDoJA\nu9mkWa1+omJkczPD7duxB4uEUqlBu93h3Lm+B3lRnxSf/zvqD0AqVWF1NUU8XsLtNjAy4viAJ8l3\no7fXyte/Ps3SUpJqtYksdyhV2kzN+LHbdchaC1XdYXZNuyUT39rENuVHVhmxdRlQJVIoqKGQRLb3\nipw4O0R28Sav//nb0GmjbecxiyUaqiB//w/3kAT42tc6mDQxbr3+Mh0EAueewmjrIxRr8Kd/co9c\nsYFSkHFaJI7NunD1+0iEluntsxKJlimE0wTG+1FpVDhtHSKRMt3eDuMDWmwnxun3Cmy9+h1UCiWl\nSBjLUDdGkwoRC6dPT2NUtbj2Vxeo1toUGkoKF9fomVhHr/4CV1dDnD5mpy2qEMX3yZodBGqNDqVS\n4wNBeIIoIkgPM2Hg/3tcuHCBZ5555mEP40eCsbEx4vE4yWQS5w8pvfxxQa0+dFYdHnY8CG1Tq7//\n7asty9DpYLVqOXXKz+3bUbTaLrx+B06njuWNBEa7BfvWIsn765TKMpWWhNVpxp7MQ6dDKRqlFI2S\n392l6/g8919+gct/+x1K5TrHzo6h7xrAP9bH+uU7aAx6zE4rowMGtIUsJreBWLtDPZXA69ZjtOvo\nNJVkirC1d0DXWD+h3RqlVIYjk1YOrt4iFU7TO2DnK2f7uZey0Vao+NY/rjE54eLMvAe5pKGYyrKx\nuEvP8TksXheWbjeFg7vMj1iJp22olBWOzLhwd9uoVVtkMmW+8/oafX02Zv7dDJLyMnIli3eoh1Kp\nyRtv7D1QG0krJR59NIjBoOLKlQNUKolYrITNpqW720gyeRg+Gg4XKBbrGI0/nu6I2mj8QLjbezhy\nxEuncxieWi430euVSJJIPl9DoRDRahUkch3aei8XNw6o12vkMmWiB3lOnQlSKNTotDusr+eJx8tY\nrVr++I/vcPy4n8ceC1KptHjppU16ey1cuxZhby+PQiEhCFAuNXjppU2e+x9GGBjxMNsL0ZuXCe+G\nqSRauDx+2opZRLlB79FJBLmJnE2A6VDu3CyXuf6Hf4BraoZCPIGk1nHsN36d5lg3zUyCZmyfeqOB\nxmolv79POZGgls8fBkcGg6i0WgweD8K7hGNBkhAE4dC8s91GkCQUKtXHyqrFd7dtqpkMCAJa2+FO\ngahU0qpWP1Gy9+Zm5gPdSjgMqs1mq596q/5zW4wUi3V2dnKkUhXsdi29vdYPtQ0LhRoXL+6STh/u\nNabTVeLxEufP92EyaT7y9x7uGerI5w8JPxbL4U0vlSqzuZkhHi+TTJbZ2soQsBooV9vYutzsLl2l\nUijTFhWg0tHfO0o5laaRijE+3U2jXCbQbWTz1jrdCz4ee3oSlUZJYfUWy8UwFkFALhZY/Ku/ZujZ\n53DZexkfs/PGq1vUmzUCXg9Bu0wxtI/Z4yK5soJbBcGFQdyzs1x74R3iDTNnFwLYXEY66TCl2D5X\n31jEZxMpS3os2Rb5lQTaZg6zVUn4/hrD/Wby6SIKq4tCpkKn0ya+uYdGqBHdS5EZddKIbmLpO0yq\nBCiUWniGhhHCS7RqNRqlEoIk4Zqa+qFCsD4vkOX/l7s3C5LrPq88f5n35r7vmZVLVda+oYDCvgME\nBIqULFGSLavDUozd7bElhyd69GBPTEdMhMMRfpiwI9o9bzM90zMxbffYrZZsa+MGbiAJgthRQKEW\n1J5Vue973tzuPCRYIsRFlE0RIs8TcJG36p83E/ee//ed75wOFy9e5K/+6q8e91I+FgiCwNGjR3nr\nrbd45plnHvdyPhI+Sgp0p9Uit7JCdmWFbqeDfXAQ//g43qdHqFabNBptXn99i67ejlMvUVrNcOfm\nNp2OjKhWUcxXyWiVBA4d2P2ZolZL/OYN4rduIXfbPaJSKFFdfp6z3/gWswe+SCOXo53Yors1T3Rh\nDuvgIDO/+WW2/49/oh5NYAxY6Tt6gILUZvZgEM+Qh/WNVSweB2/9wyuoOxVsijI7b6+xdfltJr76\nVRSBMdQqBbWqRDaaITY3h8PvJbAviDKdZuvSaxTW/LhGh7n1g+dwT4xREMzkBS+vvrRONi9h0Iko\nNDr0Zh3bsTqzR4+itVqxhsNceiu5S0QAVCqBW7cSgMzx40EymRpOp56+PhPlcnP3wSMI7++E+0nD\nYtHyxBMDFAoN0ukqL764xvp6HoejF4xoNKrR69VEIgXcbgPLy1ky2Qa1eptrVyP80R8fpVSWeMqq\nR1SJlEoNvvOdQzSbHS5fjiCKIsWihNWqJZGooNGISFKbWrFKo96k7TOy+iDFqQMWNl64zO1nL2G1\n6rA5bKSXsxjOT6IbDrP4j/+AotvB5PcTPHaMSjJN7sEDujLI7RbKlkRpc43tV14kNDvL2sVFHGOj\nKJRKKqkUjtFR4rdv067Xid+4gc5moxKLYXkoEjX6fDjHxihFoyiUSgS1Gq3VivEXhHi6JiepZ7PI\n3S6Z5WUKm5tYBwZo5PN49+3Du3fvh57/ft8BpVLxS1dF4FNKRur1Vm9ML9oT8KyuQiRS5Ny5MAbD\nz7xHksnqLhF5Bz1CUv1AMgI9VfbiYoZstkYgYEavV3HnToIXX1wjEikyM+Ph7Nkwd++mEKxOZr1m\n0ksW4psNNHotR870I6haGNRK7s5vEt9K4g04UGq0eA4cQra4MYo6hrwKNtcqNJttBLVAaiuC2uYi\nevc+FV2OE2PT3LkukEp0mZm0kbl9Ba0mR//kAP4D+3s32uERkisbbC9HcE5M8tLFO/hCDiYDsHTp\nOv19WnQmPRVJR6ZlJh5vUIjVaORzzBwbQW82IDr7KFXa1BpdtIKAL2ClWe3tgOq1FiaNAtTyw1Tk\nnlJaY+1ncNhI5NJrtCWpJ3iSZaLXryOo1RjcboxeL5V4nOzqKu1abddSX9R+8LX/dcaNGzfw+XwE\nAoHHvZSPDSdOnPhUkZGPgtzKCttXruy6/0azWTqtFv5Dh9BoRBYW0pRKTdwONbE7d/GPjNHgLvlC\nBQUd7F0R60SAaleLUhRp1etUUym63S4olL0k4XabbqsNyMTn5pEEI4ZmmnYmg2H4KEW1BqnexKg1\nMfWlL9Ktl2krVBRVLpwuI46AHbfPysCgHb2UodHNYTfKpNfiaNRKRNok5u7hVop863f2sL6RQ2cC\njcnI9KSF7I3L1Lce0KlXUEpVRDrMfu4wGytJps4dQKMVMSq8bMTaXH07AgKMjbtR6oy0+iZY2Gqw\nfS2NQtEbfX6nJWM0qrh3L4XFosFm0zEw0DPCevHFVSYmXKjV4q7R3LtdrT9pSFKb7e0SyWQFo1FN\nKGQhFLLi85mYn08/NPWqo9EICILi4YayRjpdBYUSd5+VbrfL3L3eff7IkT4OHHBSLErcuBEjn69z\n7FiIt9/eJpWqkExWCIUs3LubZGrSyeJcFLVFh8WsoZDOMeKz8ubf3SQbz1LJCfgqJRx9Psoba+gF\nBYPnnsDkslPa2aHZaCArFLSlBjqHk06zSaNcxtTXR6fRwDowwOgXvkBubQ2N2YzB7Sa/sdHTgaTT\nqI1GmtUqok6325op7eyQW1ujvLNDq9HANjhI4OjRXzh2rbPZGLxwgeTcHOmFBZzj46h0OjqSRGp+\nHnMggN7h+MDzR0cdJJPVR7x9+vstH6jl+jB8KslIIlEhFiu/51g8XmF4+Gei1PdT2n/YcUlqE42W\nefnldRqNNhqNiEKh4P79JFarjr4+Ey67hoC1iaO5wzOnbPjHXFQXbjIV1jA8MIggd9Em7zJ49ADG\nkI/CoRH0WgVGDZgGBim3XOSK0G7VKRWVqLUa/C4bxftb6CwWWu0mRosBSWqhzG/xnT86yFtvx9g7\nbqBa7CCtbJDXdll98UVKOzH6T59E6/Jw+slpbry9ydNf3c/Keomu3czv/Ltv0YqtkS20CLrCLKVE\nitkiKrMLs8mMaPWwkW0SGvLQ7cpYbUUsJgGTQUU6IyGqRRx2DbqGAdFn580XoiSTVex2Lc98eZR2\npoQ5EKBV7QWX3f0v/wVBFHFOTCBqtbgmJylubfXKgEBxe5taPk/o+PFPVI3/ceG555771I/0/jyO\nHz/On/3Znz3uZXxs6LbbZJaXH40hkGXya2u4JiZQG43odL1sGiVdWrUGVZUN94HDKO8v0mlKeCaG\nEPsHiEfzeJFpFIvYJ6eQOzLuyQqm0ADprTiVZBy9P4xlcg8rNxZotGTCx08ha0SajRZL3/shg4k0\n9jO/wd3VKqLZyt4zfXisoGrmiby+iF2CqqTAbBBopiPoNeBw6ZHbLQZmgkitIrPDApJkotlRcuyr\n53DV19iZL9OWFWQKHdrqFrpsmuEjh7DY9aTvXydTqWIstPGVm/zhNw8QTbfptiQEpcy1KztU6l0K\nkppyucnIiJ1Uqrf5UKmUWAxKWrk0qUyHotmMd8DDuXODgIwoCgwP2xkZ+Xh9KX4ZdLsy169HWVzM\n7FZqVldznDsXJhAwc/iwn3i8gtPZS2t++eV1JiZc7NvnIRIpkU5XMJvV7N8fwGjUMDPjQa1WEouV\nGRmx8+STQ7RaHZxOPZFIEanRZsAq4fEIVI7Y0Zn1/P63DxGLFhkMW5kIKBGbeawOI5MHh9lZjdFp\nSjQSUcwWLfVoguhbb1KO7mDp78cSDBE6exad0cjOrTnahSxqkwlBrcJ34AC1XA6FSkUtl0NtMLDy\n7LM0SiUsgQDNSoVyNIr/yBFMfj9aq5VWvU5ybq5nCPewJdNttcivre1qaz4MKp2OTquF/uciOlq1\nWu/e/iFkZHDQhizLLC9nabU6DAzYGB//4Nd/GD6VZKTRaL+nTyXLvYrJu+F06tHrVdRqLTqdLuWy\nhFarQqd779vO5+tcvbpDLlfnjTci6PUqhoZsOJ16dnbKCILAnmkX7a37LL95k7Io43FqkePD+CeG\nWbn4vyN3ZUZPHiQw0Y+6lsLY0HHyS0dRVPMU8zVK2hA//fE8O4km1XKdr//OfvZODELkHsVEGiUy\nBo8Hh99NM16hIwMKgS89PciYr8vl/7aIUqWiuL1No1RBbTKh0ulYffElRr+k49BhP/pBByfPDKEx\nm0lEc8QUPuqWOsV8A5dfRzDsZnOrREsSuPRWjNPH+5g6PktxZZHwsIua0ohgcRLZzHPo+CBmsY5x\nbIrbOy2MRjUajYBGIxKPZChvb1JcuI1Kp0OWZeI3bmAKBLANDaFQKNi6dAnbQ6MbtcWCpLYRy7SR\n15IEw+73tfH/dcbzzz/PX/zFXzzuZXysOHLkCLdv30aSJDSfAZt+GXbtyh85Lsu9ygbg9RoJBMyU\ny03sA0FK+Spzax08ob0IdLiXFzB0uzwzoGXlTo6pkwfYWtwkc/cOUiZJo1AgsG8a98hx9P3DPCjb\n0e5x4DdUqCSjmGwmzAefIOQbx2jRosms8NT5YUSDEUVnm+zNbbqihmp0C93QXnxDPjzqWXLzAvVC\ngZWFGO5wEI3FxuZrV6i0BIwtAc/UFPawk8LdHbKSFqPRSbPTm3Kpl3sPjsrGBp14HKPRhCTVqNbq\nCPFFQv5xpK5APZthY2GLbrWEoNGiNtsBBSaTmkajTZ9Vpmurc31uHVkGpapXGTp2YYZjx3qj/o97\nI9Frk+cfeQbs7JS4di3K4mKvKhIImFGpBNbXe6+LRksIgpJvfWsP6+s53G4jxWKDtbUC169Habc7\nnD0bJhar4PEYGBqyUSw2GBtzMGirc+uFK2x02+w9OobJq8fo9WJQu1C1q1z7/rNUFBKyQqC2tcbY\nnjEK2zt4x4exBzxEyhKa2XPoJiu0thZAqaSLkuAT5xGMZuqpBHKnl1JtHx2lkkyyc/kyq889h2fv\nXhyjoyz96EcoBBFzfxhTMITc7WJwONA7HFQzGZqVynuuU+Ud5+uPgHf0Iu+GqNUi/gIRqyAoGR11\nMjzsoNuV/0Wtu8dJRp4G/j2QAU79MifabLpHSosAarXwHsGM223g6NEAN27EWFhII8u9/IE7dxLU\n620mJ38m7FlYSJNMVul2ZbrdnjgzEikyOGjHaFTT6XRx6prcub9EOpbF51QTX0mR2Ihx8psmnvzO\nN9he2sTt0DL/t/8PRouRRj5L3759HPntf0VZsPJf/3GTUq6K3WpAb9Ty1mtL7P3DWcITAbQGDaXt\nbXx7Z0hn6rz5wh0OfO0pqJaIpCQ89hDhs6cprq8TvXWLdkfG3h/EPbOX5PI6gqjCaLPQzUcI7w+Q\nTeaoNAW05QS5WzewqWHy+AwFfYhWy8zGepapCRfdyAL/+Ldv0+dWo1K26Z+dZvqJwxw4a8SgaqM2\nGEhXRRI319BoxF3RYL2lRCl1adfr6Ox2ipEIyHIv9bjVQtRoaBQKyLKM2mJlq2xk7uYG9VqT4JaC\nPQebHDrkR6X6dAheM5kMCwsLnDr1S31Vf+1hNBoZGxvj1q1bHDt27HEv518MQRR76dq5HO9+Wpn9\n/t1EUp1OxcmTIba2CigkHZEHUY6fDHH7RpRorILV62DQrUdpsuE8dJKWWsn85Xu0cxnsVj2mfivN\nVoexUyf44fduUKtucuYLe+jEEnTVKt6erxOLNakmJPpGvZy5ME5l7TqJlS1kvZWW2kzoyB68Y3u4\n+jff595LVzj6W5/H6Quhtdk4MLkfSann5k9fJ7hnhDdeuk8hW0b38l2+8Mf/ilxdR0fUEklUMdpc\nNKolDP4ASkGg3WhQjsdBmcQSGqBdryLWs/icUG93ef21Fco1JY1iE0Msjt/jJhAwEQpZkGUo3H6L\nfnONzrFBNlazdLsyA16RiVHrYych76DZ7NBsdh75+8pKFq1WRKcTuXEjzvZ2mYkJB0qlEoNBxRtv\nbBONllCrBcJhK6IIXq+JN96IUKm0OHDAx9///Tw2m45z5wbY2irQ12fCYlbxYDVDWWFmbI8PW78H\nrUFPc3uV/PoS5Y1VCvEyYtCNbWgY/54JlM0Kw4dnsA4Pk25b+OEP7pDfjqMSFRx+YpqR2RDp2zfp\ntNpYA35qiVjP2A0o/PjHBI4fRyGKyJ0O2eVlzKEQY1/5Gp1WG8fsIaSOQIf6bvaX2mBA1Ot3jc3e\ngd7pRO52KUWjlKJRBFHEHAy+b7XE4HajEARiN28iqtWYQyG8s7PvqZZ8EP65OpF343GSkSvAXuDl\nX/ZEj8fA/v0+7t1LUa+30GpFpqbceL3vDWYbHrb3HohqJaCgUmlSLErU6wn6+oxYrTparU4vVror\nEwpZGB93EImUqNVaD/uJfjKZGo1KleROFhUddGoFW1tFHG4La7eW2XPhOPv6g9z5T/8RpaCi0+7Q\nlSTWXnqJ0BPnaLkdDI/YEWixcGMNrUrL4VkH9e0NclUl9r2HsY7PEL1+g0zbyOyXziPpPfzj39xC\n0GiYe1Dld377AkN70nQAWVDjnZlhcyWBemgP2pE9bCSaaN0+ggorhUaNsCHHa5d+jJQrkcuXWb/0\nBie+9WWmDn0ehdxmwKfm4r9/i8x2CmXXTrtapt28R/+YH8WR85jtRvQmDVR7gWTvzPQDFMttwpMT\nxLfu06xUMPp8FCMRjG73bh6BOdSz6m6obdy+tkqjJqE2GekKahYXM/j95t2As193XLx4kbNnz34m\nqgc/jxMnTnD58uXPBBkBcI6P02k2KWxsIMsypr6+3fCwd2AyaZie7t2ULW47qecXOXpGTzYvkU0U\nqRUrPLi9jqDRULcpqGaylHcSFFJatAYtCkHAczxDajPJqaf3sfTCayTSEsGDs7z9wtuYPG5c0wco\n1SrMzWcZ0ekQAuPcupdjZ2kZ10aHg6d6InetVuTmxWsIehOnv3iAwN4Ztu4sYQiP0dWYSEVuozIa\nadbrtLMx5retnH7yPLeefQOrs5+x/cN4pycQpRLi6iru6WkEjY5KJos94EVns5B8+UeoA8N4R6dI\nvLlApdqlUm6gVivp6zNj0XbJrqyQuH6VYiSCd2KCgaNDKAQRnSjRqVVYWmoiSW3cbgNer/GxkROz\nWYPRqN61GiiVJOr1Dj6fCYtFTTRa4e7dJBqNwJ49Lur1Nul0lUKhQSZT4+bNGN/85gx6vcjcXJLT\np/uJRAq7FfdMprYbvOe0q3nlufso5DahiQHuLEs4HQritzYZmxqEzTUMjQSh8UOodRrK21vYx8Yw\nhAYpVOHVl9fQ9/VTl5SotWpSsotcXdlzSZVqzP3nn1LY2MDk8yJodQhKBfHbt/Hu28fmK6/QrNbI\nb0ZIr28z+sWn2Uy0Wbl5nxNnR9DabECvzeLZs4fotWu7xnA6ux3n+DjpxUWi167tjq1nlpYYOHsW\n87t0b3K3S2p+Hp3dTvjMGZq1GnqHA7Pf/4l+xo+TjBR+8UveHwqFgj17PASDZiqVFnq96kNV9vF4\nhUzmUSFrtdqkXG5iteoQRSVutwGlUsH8fAqNRuTAAR/tdgeDQcWxYwE6HZnIUoT9h/vZurdKbDMJ\nXZl2OzI3AAAgAElEQVR6VcIxMkytXENnlCklM6j0WlQqJZIsEzz/JPfjatYfpLhzLYJSqWDfyQns\nGonI65cIz9jYvhMlG89w5Pd+h+mv/xZrKZlKrsLcXBytGqRmg8hKgu//XY2v/+5xgk99jfjdee68\nucTLLyzRPzNCxNYglyrRoIHF68LqNBG7fJG1q3N0OjIoBVAoWXnjKof3HeXOXIrpgT5MRpG8SolC\nqWAg7MBuFcmnikTvp1lZyfHEE2HcbgNut4FE4melQI1GwDk0gOaJJ6hnMhg9nl6FZHMTUa1GbTLR\nd/AgjUKB9FZPpKs2GbGFwwgqFZ2OTC5X/9SQkeeee+4zM9L78zh+/Djf+973+JM/+ZPHvZSPBSqd\njsCRI7gmJ5G73Ue8Fd4PgaCV008Mc+faOuVqG3/AjFkHm/Pr6M0GxkZH0BqNtJ0upGqd9HYKe58T\ns93AwMwwjVyOlfkIjoF+VtdyVNtq6okShv4mar2FpXs7TH9liK2lDMViHJ3FhMFiJp8qkEjWsLqt\nFKoKysUab72xxvmJgyiHZll68x/o7yshatUUkxlC/Ta6jQa1aoO1jIOBs2dBoeBWRub6//ICx08N\n8cT5L7Pyg++x+PwPcXosNEolhp/8HN5DR+jUKhze6yObyFPMNwhOj3LgQB9er4GtS5eoJBLonU6y\ny8skbt/GVixi9vup6i3ElyusbPR0elqtwIkjXqzKMvVsFq3NhjkQ+MQm6SwWLfv3+7h1K95zPdWK\nHDzoI5mscOtWmaEhG8PDdrrdLuGwjfX1ng/JO7EfExNO7HYt4+MORkcdCEKviNbzrOmNs3Y6XWq1\nJtFGG6vbxuGTA2ynWrz11jpGgwptJYMzPIB3cASNRsXqixcRRSVSuUxhfZ3g2XPEOh4e3FnH7TVT\nbSpoyU02r88RNA4zNhCmUS4jAy2pgcHlQqlUINNrj9gGB5n+xjfIrG9QrXUYODeEoX+YN398H7XB\njHZo+pGRXefYGFqrlWoqhaBSYfT5UKpUpBcWdokI9HQg6cXFR8hILZOhtL1Nu9FAIQgoRZFKMklu\nbW3XefuTwKdSM/IOrFYdJpOGfL5n92yzvb9NscXy3t3su1sOCoUCjUbglVc2yOcb+HxG4vEKhw71\n0ddnIhrtCZskwcDoqYMsXV9Cb9RiG7LTf2AaSyjEyLgTSlk8YT+NQh61Tovs8dB2j7O4UqbYahIY\n9pJPFRH1ekLmGpYJK63tFdau3qXdhfnv/4AD3x1keaNDPlnAYlHzr//gCGubZebuxOl2Zd6+so1W\nrjM1vp99U9Moh/eTl1RUGkpEsw2v2Yha0Ual6KJExmg1kcvVaNYktFoRrajAqBdRdRusrmSY2NvP\n0KANvUGDoppFARgHhokVGrRaXZLJntnRqVMhlpYyxGJl7HYdExNObEYlzYiJdq1Go1TCPTXFwJkz\n6BwO9HY7OnuvKiWZE/i2FSjUOgTVO9ecT8yj4F+KbrfLCy+8wJ//+Z8/7qX8SnDixAm++93vfiRf\ngV8l5IdtlY9rDe/nS/HzqFQkNjby3LuXYnsxQa0BOq0Sk1pJq9VGUKlw9VmZeuIwi69fp1SoYrQa\nOfTl06RXNuj3aDBoRCw2AzqdgGhRUynVQJZpSB2Uqg7Hz47g9ZtYvL1Ou5SnmEii0ojsOXKBaF8f\nA/1mKhWJ1Y0SDr8TCTVv30wze3wUZSWN267G67Dj8pgptVQoGmX2zYxQTBX46ffexuBw0D89zHqy\ng/9uhHymjM4/QNdswD/jIraRwHPwOKUH96ks3OILv3GAdENDpS2yuJghl6ngF7S063VMfX3YR0bI\nr69TS6dxjo1BcJyttdruNTMbBJYuvopNLqDRCKBQYAkG6T9z5iMZZX0cGBtz4vEYKJUk0ukazz+/\nytJShlary6uvbnD+/CCnT4dYXc3hcPSGDw4c8FGrtbh0aYsHD3I0Gm2++c09JJMVBEFJKlXF5dIj\niko0GoEjRwLk83W8rsMEAwb+7795ARkFNoeZXKTBT//xFv/T/3yWKhLLP/0J5tAAstqE3hdg4+JF\nJr/9PxKYGqKSKWAP+RHo4A062Ht+GEV2G7odXGMjbFy8SDkQoJbLYwsFMPn9qCxW9EOT+PyDFIsS\napePTNdC4IybiiSQU7y3fWL0eB5pwdRyOTrSz9xV3/ESkbtdCpub6BwONCYT3W6358lDL0yv8/DP\nj5z7CeCTICMe4O9/7liCj5Br893vfhertbdzHh8f5+jRowwMDACwublJpSIRj4skEhUajQwul4EL\nF/ZjMKjZ3NwEYGBggP5+K/fuLZPPN9BonAiCAodDolpNAWGKxQavvnqHcjlNtaonFivTbGZRq8to\nNFNkMjUSiSgmk4qqOczX//zfMn9vkWxeYrViop7u0G0vIRdTjDz9eZZ+8APysozt4EGq5gDNikA8\nEUVZyJHNqvGGHMguCa1dSX2rjifsIy9oEIYGWZmPYjN4MTkLLF6ZZ/X5IgeOhjl1vI/bD5qYbToc\nOhVLsQyBIS81g4pUrkqXMm6viXpNyeKDIo5SHvvoMIGwE0jTNNgxW7UMHZpE7/Hg99wilSgyNTLJ\ng9eu0Cjl0BmNHD11kKrWTWlnBwBJ6rksFotJfD44cmSCYrHBG2/Mkc83mBwbwj8bJp2L09Hr8e/b\nh0Kh6F3/UomBgQECYRe+gU2i0QICThQKsFgadDo5wL77ef664tatW9hsNsLh8ONeyq8EoVAItVrN\n2toaw78g5vtXhYWFNCsrvRCwkRE7IyOOX7meqN3ucOtWnJWVHK+/voXdINPOJVEbDZhNbs59YQ++\ngJ3ClZfo87kI/dv/jnJJwuYys3N/hcvPXqfZhqd+8xD+fhdX3o6w93NevCE39baC2cP9tDfvo4lv\n8eDBNuqGiemTe7hzw4IYGOT2YoX+/XvZunUTQaPlyKlh3FNTxCUFT14YJNg3RebmNZR0KZS7mMJD\nbKfbXPjcIIpcjKuvrFKqwep2HLWxTFdQM+mz0kZDvKqhkmzgKORQ1Ms41lN0ZAvhkVE2ijqu3epl\nupTLTTqtJueOu5gJDdFIbeLZswfPnj2o9Hr8R47wxvUszWbPZUihAJOiwoO7D9CP2npkRJbJb0Ww\nRaM4PsHvj9Wqw2LRMjfXE2rq9SpKJQmdTkW53HxoDy9Tq7U4dSpEu93h7/5uHoNBhdGo4o03IszM\neDh4sI9UqsrXvjbBwkKa0VEHktTm9u04t24l0GoFnnlmDN9QH8l4hXKti9YXIpdLkKspMBmMTH7p\nN6g0IF9qkigq6BsaQimVmZ3tY3nVSG47hlZoc/xEP5EXfkJq6QGiANNfeooj/8Mfk1pcoLm9g6DR\n4BifJF3X0XENIzdqNIQmr9wuUyqn6XS6OBz6j2QopjGb0VgstGo9Iql3ucivriKVyxQjETRmM4Gj\nRzF6vejsdqrvErwqlEosv0TI3ceBT4KMJIEn/jkn/of/8B8+8N/6+/t57bVNNjZyD49YSaXg/v00\nhw/7d0kL9IzLvvzlo2xv95TnbrdhV20NvTZOsaij27Xi86nZ2SmRy4l4PLrd3VqtZuD48TA7O2Vi\n8RarCRudpoTJomd7aZvt3DYjxgIth4mZf/P70Gpi8HhYzuj40VtzRKIq2q025Uwcm1nNV48NkF26\nQ7FWx+x1YxK1aI0OVrcKHDvjZWMlhcegpyAlaRRLJNayHPz8k4wFFJDewe/WYjarCZ7185//dh6z\n1cv8fIZ2PU8qIjC+tx/9niGO/ve/x/ILF2lVK3jGR/AePgaZLU4M6dEHJrmzUGTkiZOoaCNarEQ6\nViySEo3GidWqQa8X2dwsIAg2XC4D1WqTV1/dJJNRASpu382yYdXw5JP7H8kjePf1V6tFnn76ENFo\niWy2jtWqJRAwoder3/f1v274LLdo3sHx48e5fPnyYyMjb721vatJSqdrtFpd9u79cMOmfylyuTqd\njszWVpFORyZfU2Ky9WEygahS0ec1IG3MYw2FKOzEubtSZyNWp3/PKHLXjn5ogqBNizI4yojJTanz\nJvm1Nb76jePYh0fQlHZIbSXQdJXcu/oAg8tDy9TCPznC5asp7I4KJ799gsNPTFLN5hEUIGpVpG7e\nZelyAs9vnWLgyF4cQ/3Eo0VWFhL0WZQM2+qUYnGeOB3EvCzx1lvbSG2ZeCxHtePBaDKTLeyAQqDV\n7BAI9xGYHqUpCxgHAqQuRdHpVEQixYcaXyVzd9PoMDHmslLPZ1AIAq7JSXQ2G1ZrDR5aHgqCkk6j\ngaCQ0WrFnj18rEypLNFy7xA2eHYTuz8JyDJ0OjIulwGLRUO93iKRqCLLXSSpjd2uRalUMD7uZG0t\nx9SUm2azTanUJJer8+BBFlmW+clPVpicdLJnjwe/38QLL6ySSlXR6UTq9RaFgoTNpiMSKdHJ16Al\nse/gDN4BH0p9jehKhFIuTSmRwTDST1fjQak3EWoWcE2pKIaDjM6E2XjhWQSjhdDBvVR3tlj+8U84\n8sd/hMpgIHjsGDqbDamrQqjnyW7nSGZbVDBi0KvRCG3GRq2YjCr6+n5xS0wQRfyHD5N4sE69o6Ih\nNWkrNtE5nSDLSMUi0evXGf3iFwkcPUrsxg3quRxKUcQxOor1E74nP842zQHgfwWmgReBLwEfuS5U\nLkuPaBjewdZWgX37PKjVj741s1nD1NS7LM1leTdAq9XqEA5b2dkpAT323+nIBAJmOp2fjQlqNCL7\n9/v44Q+LuPy9nVsqmiW7FaUUiTDyuTCLL73K9o07zH7xNHq3G7tWwu23c38hS7vZwmAxMTHlpdNV\noLE7aZQrxNejBA8fYuTcGViIkbt5mdyd23iGBvB9YZZOOY/VqmF2TM0L/9v/idcuUkkk0OjUjD3z\nVZ45HSLTtbBwd4e21GKj1MIWbKJ8UGDmG8cZOr6fYq5Mtdbh/rMXEbMR4qvbOPcdxOkb5/nn1mi1\nu+w9NoLBrccC2GxaBgdtXL68TakkoVQq8HgMjI46yWZrj1zbQkEikah+aDiSVisyNGRnaOijfsK/\nPnj++ec/U14c74fTp09z6dIlfvd3f/ex/P53i6O7XZmVlSzj485faPn+z0Gt1mRpKcvdu0mazQ56\nverhQ6dNvgwagxEFMlI2TbtWQ263sew7Am9H6bPpqQtGEokC82tqhoetiGsl7i6W2Hf0DKMGGZNJ\nS+zGVXTFLeIbCQS1jnJJIpNeo39Wx8kzw+htNg7M2Khv3efmj++ys7yFp99HMZ3FMzqMf7CP1Rdf\nwqTp0lGKWCdn6TSqWIU6az+5QyqeQ+cfQFHUMLt/nPtrVQI6HR2DHdvENAOpOsVskYMnhlA6A9xe\nayF1OrgreYaG7OzslB8ZjdVYLQgmE2qHBZWo6JX8H+oFRkbsJBI94692u4vGYiE80ruXrq5mKZea\niBoVNVnHa69tcuHC0AdGbnzcUCoVDA3ZSKerqNXiw/u+AqtVi16v4saNGKKoJBi0YLVqqdXa5PM1\narU2Ol2vklIqNUkmK9TrLQYGbMzPp8nnJVqtLrlcHZWoZG0tx8iInUpJQu52MIoSs1M2pEYDldqE\ne8BHaW0FRbOOWtnBv2eC+EqEnRu3CA66GZjZRyWboyrr6FQy6LtV9HY7BreXlRdfInP/Ht1mk6Pf\n/S5Svcza9bvcvBGjVJLw9HsxTx/AHtRhamUYNJswtzLAe1s17XbnoZdKFYuyTO7+PVbvR0ApYPIH\nqAthpkIa2skIAM1ymUahgMnnY+jzn0cqFBA0mo/U4vy48TjJyE3gwj/3ZFFUvq9PhVotoFR++Kxz\ntdrk+vUYOzulh5M2AqOjDur1FltbRUZGHIyM2BketlEs9vhRMGjGYuk5h5pM6t2qSjVfQqHWojXq\nkds9dXetXKNZ7fVf6w/WePJzYRRSlVy2wtCwiwOTBq794EccOtLPgX/ze9TKdVI7GdSyRHl1gUYi\nSmo9Qn4nxtDhGZwDflS0aWaT2FQSykadSr5MPqtEfP0ylpk2Wt8gRruF+HaBitSlWu2lKZ45049k\n01FuGmmu3cBlAllhRqo56Bod/NP/e4kKRtKxHNFEnad/+ygzT09is2l4/fXI7vvvdGRisQoKheIR\nJfs7aLc7fBaRy+W4d+8ep0+fftxL+ZXi3Llz/OVf/uVj1428g05HfmQj8HFBlmVu3oyzuJih2eyw\ntpYjGDSj1Yo0mx1kuResNzphRpm/37PVttkori2jjEYw+4dJlruoFS36gyZaUguDQY3VJDB/ZQGN\nSom1EaFeKLDvQIhaeRNLn5Xw/inyWxHSkTi1bIbBkJHl169hNYosX5lDqVRQESSMTieNYhGDtsv2\nm29hMmuQzW7y8Syf/4N/zd2/+6+o9Dqq6Fm+8gCD3cbg+BDFuonjZ0e5ej2OGPRy4ve+jl7VpVyX\nebDTQVQJGFQC1WqLcrlJX59x1zhSEBTMzPhw6SvklxfolvPk19cpx2IMnDmDzW7nc58Lk0xWabW6\nOOwaJK/MgzdvUC430Rp1BPZNU+zoKZebxGKlT4yMQM8FtNFos7bWS44dGbEzMGDl2rUd+vutiKKS\nO3cSDA/b0Wp7+S0qlRIQOHRoAKtViyxPsrKSpVisMzBgRaGQ0elUpFI1ut0O0WiJ73znAOGwBSVd\nwq4u2mYGUk0Sy0skF5fpP3mcaZeDSkfF9maW8PEjZCUDLZNAQVJhMynYfP0Nits7CHKTPqcatU7N\nxFeeYfvSq/QdOoTB7SZ3Z55yqUH3YcZM9P4D9h8bIrF0j5WtLZRBNVmPC93v//57BKa3bye4ezeJ\nz65k684bLN3ZQqUS0GlFcls7+A4dYTMhMGgy0qxUENTqXUdsQRQ/8ijvrwKfWgGrXq9mdNTBjRux\n3V2VKCoZH3d+oPHKOzfa+/fTPHiQ3T1eKklIUpupKRehkBWrVYPRqCIarWCxaOnvtzA9/bOqysiI\ng+3tEs1mB6vDQCMrcuw3jiFGbqLW67D63fSfOoFjeBilKJK+n8GubyN2RMqZLLmVLKpanvU3EijV\nImZ/AIXWSGJlC4XcRWt34ur3UcsXKEUiBGcmMDksZHbS2AeCxO4vIXUUVKpt7lxdZW//DDqiSA0N\nDo+FgK43b98zbCtx9WqUQJ+B8p0NmqU8I0NWvEMa3l7N0ChX0TitPYMbtZpUoozZrKbb7V2Xn0cu\nV8fp1D9CRnQ6EZfrs5lJ8+KLL3Lq1Cm0n1IL+4+K8fFxWq3WY9WNvBvBoOWRNt7HhUKhQSTSazmo\n1QJ9fSZUKoELFwaJx3uV1uPHgwz4tSSuRhFUKrbefJNqNk8hJbL29ibhvaPUZDdyvc3nnh5nIKDH\nZRP54X+6i97nod1oUk5lCRz4MhuraVZXs+SyNSb2DvLUl0/TtPgxiG3uvFihKvYyVWx2HXq9CqvT\ngmVggPzSIqIIkiTTrneIbEaYTKXpiDqS6zGqTR2tZptUoohuZYOueoQrb+/gcBroH7Dx45c3GRqy\nMzeXoN2WGRy04vf3fFa0WoHpaRfpdB2QGR114LaLNBfvUdlYQCr1KsTlnR30Dgf9p0+j16sJh3/2\neciO/bR0DrrBGB2lmkJTSz7/s43Lx4FOp0sqVaVeb2M0qnC5DO9LlDUakUOH/IyPO3efBd///gK3\nbsURRSVjY07Gx51Uq02+8IVh7t1LUau1aDTaKBTw1399heFhB4cPB7h1K8bnPz/MwICVRKLC8LCN\n1dUs58+HuXhxjWS8xOxeN9HVOFMTdiKXXsXps6Fp5Fl5/Qr5fI2+6SlKHS0NnZe5uwnazSY2l5UL\nJ52YDQoyDYmO3CKXbeHpU2EJBhh5+mlUZjMolWiNeozGBk6ngXJRwu0xUFpbopGXcFhUdOpVipE6\nyfn5R8jIz9pOoOlUiaVyFIsSGk2PjLTaQKNMtiUw6tLSqtdxjI6iezgi/LjxqSUjAFNTLrRakbW1\nHIKgZHjYTjj83lHRej5PdnmZciyGPtDPyv02737rKpXwMG/BiSzLmM0aHA49zWabbpf3xJQHgxYu\nXBhkZSXHllpJKGjGY+4iaSexDA7hCrrw7Jmm3WiQvn+f+vU5hixmqkY1rqCHfp+OiqGElMug93jY\nSUjEM1UMAYG1jSLlTIF9M0M4KnmatQquiXFsY+NkX3iDZHqJ1aUkokaFyaTDZHeRSlYZ9bp45ivj\nXLsWe5jLAKdPhx6avUGzDQa3k+xOkkKpRf9YAOV6jNCwB6UniNFVw2bT4+/rebVotSJaba90/W70\n9ZkYG+sJvMrlJgaDmpkZDx7Pez1ePgv44Q9/+JnKbfkgKBQKzp8/z8svv/xYyMj4uHO3TRoImJiZ\n+cU21v8cdDry7gOrWm0+1ALAiy+uMTTUi4uvVJooRCNdR4hcPEVLqcfoUjJs11FuJMls7bDnqVFy\npS6Re6uINTsjM/2c/9oRHiwmCY/NElfWKdfBMH0EW/c+oQMWFEY7L99ucO6cjJYGh/Z7sQd9GJp5\nyokEKys5Rmx9mFQ6VCYTGlGB3uOkWKwzMjtCsdJBMnhoqmvYTSqabZluF0J7J0juGHA6DVy4EOaV\nVzbw9xnx2sB01MuNOxkSiQputwGVSsBs1nLwoJ/+fgs7O2WKxQYhr8DmpW1q6fTutaokEiTv3aP/\nfaqCCoUCq99L+l7lYfW0R0Q0GuF9/Z5+WTSbba5fj7GykqPZ7KDVikxPu5md9X5g5e6d6bw334w8\n9KBqI4pKfvKTB8RiZfr7LXQ6MgaDmlSqyt27KbRakaeeGiESKaDVCnzrWzN02x1Ghm14vSamprr8\n5m9OoFYrERQyalFG2W0ze24/+0b13Ny5hyU8SKtawdBS4CxX8e2dZm01RyoSZ3tuEZ3FhFKlotly\nYQ/4EXRGSvEkNqcRg7JB9MoV1i5eZPLrX8ccCFBLJunv7zzMB9KiaJQxOyx02jmsohKtwYUl2HPD\n7bRaCKpeRpAktR9W9x5OpaFArRZ2409UWi1GtweL3YNlQMC7bx/mUOhf/Fl9XPhUkxGVSmB8vMd6\nPwitep3Im29SiccB6MhQi8s0tc5dBzvoiSzd7t5YVzJZpVjModeLOJ3vv+Mvl5tsbhaQpC6ZfJOd\njSJHZkO4LeCfGkNnsRC9u8D8G7cpra0gizr0Kh02S5FKVUFu/i7ZyA7VphLL5Cz+vftpWhxI7Tmy\nxQ5b0TpjITMGt4voeox4SaCq96HvH8Ee2iG9lcDgMjH2xEmuXo9z5KtDxKMSFouG4WE7zWab+bkY\nfS4ViY0YcanM5NgotmSaer2O3Gqx98QE9RtJuq0GFrcCUdNlcMKPzaZDEJRMTrq4di26+2U2GtVM\nTrrw+Uy4XAYqlR4Z+Xmy9lmBJEk8//zz/PVf//XjXsongvPnz/Pss8/y7W9/+xP/3adP91MsNpDl\nnuD8VwWbTYvbbWBzs0AkUsTtNnD1ahSzWcPdu0kSiQpKZYDt1STrt+apZPJQrXDkWD/GVoaBfgtS\nq8vQiJPctR0aUpuRCS/p7SQ+r5FQ0EKjXCEUfJqNTIe17QrqwB5K3SbRhW0OHQsjb81x7cYKa1du\n0jc6wL6nTvKj/+s5lPouBn8QvT9EeiOGaWya1RtL6B0OsskuiqIObd8gtXsPqOUaeLwmzIEApmCI\nSbOSmRkPdruO2TE9W2/fYPlWjFDYyf5QiLWikXZbxmgUdjdxwaCVYLC3eatmMkQU761oCGo1bUl6\n38A1i0XLiRMhbt+O706xTE+7PxYBayxWYWkpQ6cj0253qNVk7t1L4vebPnTjU6s1iUZLOBw6ms0O\nDx5kKZebLC9nmZlx8/zzq0xPu7lxI0Y8XsFs1hAImAkGLeTzDer1Jt/7/+5TLjex2Ax4fUb0Bg1j\nYw7CYSsXngih04lsx2psJxoo9GZ0Bh1yu8X9//YT6uUy3XqFwVPneP16jlypSzefJbKZ48tfP4jJ\n7SCWrCKbHFgCTsR6Dp3Lxf4/+AMGPvckLaMHfXicVmMOh6DE4TQgmKwIooiQfw1rOEw1mSS7tESr\nVsPU14drYgKlIGCxaDCZ1ORyDRpKI0aPC3+jRSHf89gyO60YgyHG9oXxej95TcgvwmfzKfIuVFOp\nR0aW2pUyI2Efc2sVeEhGlEoFo6M96+DLlyMkElXW1/PEYmXGxhw89dQIIyP2XUbeaLSZn0/t2tGr\n1QItrZZsXc3ktJ12vUZbkli5s0apocI5McnW7UUMqjIm4wCRK29TLtbp2IOY1Srq1TpqlZkfPxvl\nS184S3D+NsN+NYJKgWwNsHpriZazw81rEU5+7Qx7/3ACoZ6nVKjTUakYOBaipXdw+/Z9QEG12kRb\nz1De2UaYDjAc1BLPZllelhk6doZhL1gtGsION/qBHEv3duh2wR/2cOzkwK4WZ3LShcWiJR4vo1Ip\n8fvNu+2Yd/u0fFbx6quvMjk5ifcXxHB/VnD+/Hn+9E//lG63+wt1V78KvKPJ+lVCEJQcPuyn3e6y\nuprD5dJjMPQ0YJVKk0ajTb0q8eaL9/HoJIJjITQ1LW1ELOFBmmKKRFqi2NRiGQgzHehSWp5j4eoW\n9Vobz2AfwxfOU5RUlPIJNmMxgmGBTjyGUyvB9n1inQJSU0DrC1EuVNi4s8SX/t0f0RG0ZGsCy9kG\n7gMnMdV2KDUETIEAsaaDVy6ncdjUHDp3rvf/sd/N5laO7QfbjM8MMGBtYDAoWVid485rdyiVJDaW\nYoSHY5z/5jNIWgvBoJVw+L0mcGqDAe/sbC/npNzTkpiDQWzhMArhg0esAwEzXq+BcrmJTqf62DYm\n2WyNSqVJLFamUGigVvcqLsWi9KFkRBSVqFTCbitZktqIogKHo9cGSyarjIy0CQTMCHKnF5YoSQiC\nklarQ1vqsLyYIhR2sLWZo1Yooui0mB4/gs6mR9Tr+fsfLFKttjh0wMPw6BjF7U3iN2/g2zOF3mHD\n4PEy99JVFKZBmlKTdktGUIvkcjX6Tz9BR32VVqVCX8BC38HfxNzXR1NjYWVui+byIqLJjG3sCBDV\nyCIAACAASURBVE5TF61Bh9HrpRyPIyi6JO/epbS9vZsZE7t2DbVej21wEL1ezcGDfq5e3SFdahE6\ncIS+wQjUi6DW45qexjXY/5HGgh8HPttPE3omLu8OzupIEg5dkdNnJklUepkzw8MOhoZsRCJFlpdz\nLC9neOutns/GwkKaSqXF178+iSgqKRYl9HqRSkVClmXK8TiFzU1K29s0+t2M6oZRdNvkIjFsPhft\ngJFGfJuhPf2YfV4EtYjKaKaSqLC+EgeNDt9IP914hma5QhcrzpAPUSyzNf8AU1+bwZkhck0tOrFL\nPFrmeraDQqGlP+RmYtyDqd6GbherRYvLraddyJC4twDtJmLHQfHBOiabl05bg85sIjTbv0skTrod\n7JkN0m53sVq1j4iCFQoFgYCZQMD8yX5ovyb4p3/6J77yla887mV8YggGg9hsNubm5pj9Ofv0zxJ6\n+SNhRFEBKLBYUmSzvd2j1aolnqigVomEx3wI+R22bs/xIJVEODvFyOnDTH1hgrrGwX6dgvgrP6W4\ns41JC5VSh8xOCtNWkjfuNRjziyjqRRbejOL3qLE5VGjUXfKrCTRWF22VjobWjv3/Z+89g+Q4r7vf\nX/fknPPuzOaEsMgEwJwpKvmVr2RJVr1OJdmSryzLZd2SZVt6ZZe/yFWW6y2VZdnWdb2uUskqOcm6\nFCVmUARJgCDyZmyOk/P0zPTM9P0wwBCLBUASALkAyF8Vi9iZ2d7T093Pc57nnPM/A0NkYynmZ2KE\nuoI4tQYmxpMMbe9EM2QGn5+X//UMNSpUyzom9QbquRQuv4tIl5twrYYmM0bunI283kR2bg6f14gk\nyVQqdVaXMuSXl1B3Ojl3rin2dccdbeu+E43BgKu/H0GlopzJoNJo0JjNWNvaUKmvPk2o1aprahl/\nOS5USVarNcbGEuTzVQQBikWZarWOLF89UV6rVdPf7yKdviAhYEWlErnrrnbK5RqplMTKSp6BTiPn\nXh+jWq7SEzHS3emkb7iDeKzI4FY/TpeZSjpFeiGNUleYOTOLoNFSLodbi9DTp6O03efHWktiDwUR\nzXZimTr52WWMBjPmiAsFMNn0fOTTB3GbZKZPTBDu7cJqEtBaregtFkSzg9d//BTzJ8aRK1Wsbgft\nB/Zh29WLnhK587k7vuFhkpOTOHt60NntKPU66dlZVFotGpMJk9dLR4cdl8tAJlNGrRZxu7cjNOqo\ntNqbIjH9atz2zoje6URrsbS8fYB6qUh3t4Odl2QiR6NFymWZ8fE3kltLpRqJRIlDh+aw2w2kUhIe\nT9PrrkslyrFV1I0K9UoZj0vH6mtHmjLo2TT1YomVlw6xNrNMOlWibWsPj/4//zcLY4tYQiJurUw8\nKRNPVXh0uB1nSKJ09gg+p4a1iXOsHh+hnlgj1G6nVi/j8FhYnouxklSw2I30PNzNHfuCSCuL5PNJ\nguYSibUS1bVFSokEyBJ+YztE53D4Tey4axeRHd4NOxpvd0WqKApyqYRKp3vTgepWpdFo8JOf/IQX\nX3xxs015V3nsscd48sknb2tnBJrN8oJBK5OTSXp6nGQyK6hUzfJ1u12PUvFCOcGhfzuESq3C7fEg\n6nRU83m8HUHUBgOrJ08SPXmCYiyGyeEhFPZhD0dIZUu49HVGfn6IA7u7WY1bsHqdbIlocDQSvPTK\nKyiZMkN7hnFEOpAWp0jkBWLzKRZ/8VPCO7ew55FHSKZLnHhliqE7DZTKdeIpiZ4eHWqjGYEGC7MJ\njj97jLokEeoO8PCHXditWlIrcUw6A4MDbqRyjXS6zMpqEZ2xiNNp4Ny5FH19rg0OhKuvD0EQSM/P\ng6Lg7O7G+S7W4mezZV54YY5YrEhXlwONRiSbLZ9fJAkMDropleQ3Pc7AgButVsX4eAKzWYvPZ0YU\nYXExx4EDbTQqErOvT/LYY71orXZCIRNWq4Ezp1fR6FTo9FoK+TJOl5G1bBqnx4zLoWNpJY8auZVz\nJAowf24NR8SJyeOhEIthrckoBhuuLhfqTge/9jv3YjZp8JiqHP7BTyml84xXMrRZJAZ/9VcRtm9H\nypVYOjmCUqtjMog0ihnKkydYq2eI51ZBUdCazfiGh7EEg9QrFaR0mtTUFLVyGYPDweLhw7gHB/EM\nDmKx6C5Rt741xuhbw8rrwGC3037wIKsnTlDN5VAbDHi3bMESCGz4rNmsPS+ic7G2iApQiMVKrUk7\nkynT0WFHlBRKDT3FeJ49u/bhtqtYefYonqEhTD4Ls889T0XrwBBSoXZWqNTUzEys0HfvfuZfO45g\nyFFvpOm9ew+nJvJ0exXU6jJKchWHXY8SNqNSlamm4ugENb/yawdJNFzEkhV0GhWFZIrUApTGTxJL\n1RjsDDMXrbMUl7HYjWzd1glzp0hPjKG3mrEYRPTm60suKyUSrJ06RSkeb32Xju7um97rfrscOXIE\np9NJb2/vZpvyrvKhD32Ib3zjG3zta1/bbFPecYaGPBiNGgIBC/t2e6GQwiDKWEJ2XlEaLD57klq9\nQb0BJpuJfEWFLJUpxmJI6TS5xUUMLhcNWcbW0UZ+dRX5XByVMUCPv5O8UcvxnzyDO+TGYuulp3sb\nGo2TO37n18nLOvIVEY3bRHZxEbtNwdCuIY0ZSzWBoxFH57QQ6g1x8vUFdu1pY365iFWvYDBq8UeC\nnH7yBRBEanKN+fFFxoc6uT/spnv3AEeeHSFf05JMltCZjAQHupmOS7hcRmq1xrqutxdQaTR4hobw\nDA29+xcDmJ1NE40WASgUqoTDNiIRO1qtSCBgQaUSKRarb3KUZiiuKc/gIpstMzGRZHk5h9WqY9eu\nAIVYjJGXE7SHbRQLZQwNOPr8NHNzafxdIR5+uIszJ5eR7Wra/fqmIms+y85tbhRqqFQCSkOhPWLH\n6xMw9XgRDUa0CxOkzp4kObfI9v/ro1SMHkrH5/C1dTN38nXWVvJ4zTVoVCmsrhIfGcHZ08PK8Zew\nC1kEjUJ8IYqAQr60SniomwtnWy0UyK+uYnS7yS8vU1hbo1YuozYYsEUiSKkUsZER7B0d75ok/43m\ntndGAOyRCGa/n0o+j8ZguGJDp+5uBxMTFrq7HRw/voZKJRAO29BoRLq7Ha0qFVluoBcqGNPjFOfP\noitLFOZfwzTUjzUcRq3TkZycJJ8ucuqlMyiAM9xGpVTEePQs9o9/iD2/+gEajQalCqwmawRkgZC1\nwviRZZRaDbXdTvu+fcRHR9HoNBi87YyOZzl5apKVrJq6oOHBB5pdcRtylezIKaTZCe54/GH2DO5k\n7lCB+sIpMsuLIIrY2tuxRSLX5TTUymUWX36ZwtoaAJVcjoVMBrVev67x0u3AD3/4Q37t135ts814\n17n33nsZGxsjFovh9Xrf/BduYVQqke5uJ5E2E4svv0x6cQa5Xie7MklPYCvuuwdR8nFsVh0ajYpU\nuows10nPzbF4+DC5pSVcfX1Yw2FiZ8+yePgwrr4+dL1Gzj37HB2dncwelVganSE81ENuZhLX1u2s\nGgd57fmzSPkibpeB3nAvgdI4KyfOYCgWKawUSPZ1I+qMPPChezlxbAVBq6M7YqSSL9A7YOfEK1OU\nKwpaowm50CxJzkoq0mtxhu7ZgznYxpmj5+hUaQnvHiZRNdLWpqNQqGK363E4br5S9VSq3Pp3s1Gd\nzPJynuFhH7lcBUEQ2Lnz7eVv2Wx69u0LASFGR2O8+uoSXfY6EUeVXDSJVCiSKwucPXSaclWksBbD\npN7O/jtCaBplhvoHKaYyUDUgxWMYh7rJVtUUkln6ug3MLRaY/+lpVCoBq6OPXb9+kGFVkbnDhykX\nT7DzsY+QiuaQpTIhr5ZqSSbU7kRaSNKQZaKnTqHWamnkkqyOnUNtcdJAIF/J0ihl0dlsVDLNnrKF\n1VU67r8fnc1GfnUVnc2GZ2CAuiyjNBrUJAlZkt53Rm521DrdZTPCL8Zk0nL33R34fGb6+lwt6fLd\nuwOkUlKrqkSvV1NPrlJOp7Ho6hTSKaRsipVjx9j6qU9RSiaplUoYPW4EpU5dKpNfWcXi92ENhynU\ntAR37wYgEc1S1sQoZYsYQz4GP/YxoqdPozGbKcXjGD0eDA4HybqB//y7n6Cz2fHvuYPV5QynX5vj\nwFYjK8eOkV+ONZ2bRJw9n/gQJqeNasmNqzOCZft+BE+Y1YxA0NisgLkWSskkxYtK/6CZg5NdXLyt\nnJFarcaPfvQjDh8+vNmmvOvodDoeeughnnjiCX7rt35rs815V8gsLjJ19AyFXBmDXo3dUWP27DNs\n/eAjhPvbSCzGqFRkzFY9tqCfUiKBWq9v3vsLC7QfPEhifBzP1mbTTLWhgdcmotFB755BPJEAPb4G\nCy88R9nRxfhLM9TyBQRBT70Bo0fGCN7tb4Y9ZRn30BCOoa1kawaqdTWlTJrFpSK5ZB63XY3F7WhW\nVZTqSLk8Rr8Kg82CxapDbMjotCL3/eavMPholky+xtJyEV2lRqVSQ69Xs29faINC9c2A12vk3Llm\ne490usyOHX5sNh16fbNSZHDQQzh89Q7MVyMSsRONFqmWSphddop5AZtQIjOfxOa2U41L1OQasZkF\nDA4HkYAWITaNGI8jqtV0bO3Duz2ET5NBqjo5NS2TWkuDoMHptiPr9Bw/J7PHnWbsp0/i6urEYHgS\nb6QHQ7eTaCWGqBMxed1oQl3Ydm6nVq8T6vSTnJpCQxWjAdRmM+HhQbJzs3gGBlr2q/V6DE4ntrY2\nNEYjmdlZqvk89WoVQaXC2t5Odn6e+NmzmLxerOEwWuPNmax6OW6+O3KT8fvNuFwGhoY8FIsyLpcR\nrVbkyJFlcrmmRLDRqMZcqaCxGlBM3RicTqqlEqIo4t26lbnnn6ewtkZ4z37659PMvn4Wk9tN9wP3\nkNL46fV5yWQkqtU6LxxaJJM5X6O/muWu/Xeyfdu2ZmLs/DypqSnmXngBqfMgZouBfL6AGpn8yjJt\ngV6i5+bIrsWpKGqqxQJmZ5304grmUBjXYJh03cpzz0ygGCVs7UXaurzcc08Ep/PWuUnfbZ599lk6\nOjpuCvGvzeDDH/4w//3f/31bOSNLSzkmJ5sJkeGwjb4+FyaTllqtzvTIEmMjMZSGAgI4nQZ8PhOZ\nVBHPvruxda4gF0v07+nFrm+wcuwYOosFk89HYW2N1PQ0SqOBWqulXm6u7k2VOO2RbfTv6kEnyEz8\n5Cc4u7pIpitkZqcpJ5MYXC7qDSsNlQ7MDmzt7eidTvSD+3jxpSVi8QoqwwxaiwWnvoaxzYnBpKNh\nsDPY52VuuUxJzqC1gjVoYecdEYLtFqzBIKJaTTDsJggMDDZbX8hyA6fTcNOW4kcizZYcS0t56nWF\nQqHKBz/Yh9drQqtVYTBoruv4JpOWe+6JkEyWaGzzkE3mOfbvP6NcKLLn4DAvPj9NTZZRUcdsVrP3\n/q1YxBKVQgGt0YjJ621WZ8ZiNFwdHHl1htFTi6hUIsHeNrbs6qRWyRNfy1EoC/idHkZ++iTqRoU9\nv/kZhHY3k0fOUEGHEo4wdSxPuSZwn1VNcP9dmP1+NHo9zu5uapUKsZGRlu0qnQ7ftm0t58LR2Ul+\neZlGrQaCgLW9ndziIqnJSQASExM4uroI3303au2NFw98J7g578pNplkaZuJiZdwDB9ro7HRQLFax\nWnWoUgKzz81TWF2lWiyComDv7MTW1oYl0sXS5BKNiRnu+ORHidx3P7mqiqzoxOV1Mjufo3A+EeuC\nIwJQqdQZmchw7wEvTpMJUaslPTuL1mJBZzfi8Rpx662YbQaGDw5iMamhVqGYTOPv7yOZ1GPv7ETn\ndNF3107WRkY5fmSJutaEqDTIra4yUW42uLrrrjBm89V3ii7F6HI1E7XOh2mg+ZBcEOC5XfjBD37A\npz/96c02Y9N4/PHH+dKXvkSpVMJ4C62srsTqap7nn59tCfhFo0VSKYn77usgGi1SrGsRRZF6ow5K\nU8nSH7TR2e/nXFREY3UQDje1KHJzMyAICKKIo6MDg7OZvKizWEhNTyOXStTzeexd3ah0WlZPnia4\npb+pl1Gt4nAYqFcq1CSJSjZLJZejc+cg7cODuB/eh1SF0ZEY3Vs0uDNFFpcK5KsK/Xu2EF+M4ds6\nREeni0Khyu4dXhYmKgiKgtshUiukycwlsV2ySymKwmUVkrPZMisrecrlGh6PiWDQgihuXu6XxaLj\nvvs6iUYLVCp17HbdFVVXr0QyWWoptl6uV5ZKJeL1mqnLOixWHfWH7uClH/4MbTHKr3xsCLkOdpeV\nwTv7qTcgKmlxOENYnAZyy8vMvvhLlk6MILtiBIMepsZUyJU68WiBeLxIf1iP3+bEHQ7g7Q4j5mPk\nZs6x/NoxLDv20/fRHqSaitlakJf/8zW8bW5eeOYcj93tIB3N0HXHDqqFAo1aje5HH8XW3k4lm8Xg\ndK5rXGdwOOh66CFKiQRKo0E5kyF7cedzRSE7P0+pv/+W2bXeTGfkc8CFpdf/Bn54o/9AqVRFpRLf\nVA9DkmRisSKKAk6nHqt1YzxVrVatK3EtqNzUZZno6dPUazXcAwPYwmGy0RRJQ4SqM8Lo6Qns0SLD\nH7gPbd2GXjFQLteRpCoul4Hl5fy6v2GxaDGVVjn3i1OY7SYyMzPoz7d5VjR6hvb0EM+L6PQK6XQG\nm9+HLJVJSWocWguyzYmsNtC9dxsGixGVw4dUiREbGUVtspAVHBQaeUoVBVEU6Oy0o9WqmZ3NoNOp\n6OiwX1WwSK3X037wIGunT1OKxd5IBg6F3uaVuXnJ5/P89Kc/5Vvf+tZmm7JpeDwe9u7dyxNPPMHH\nP/7xzTbnupmby2xQEl5aypFIlCiVZPKKmbbhQZZOj1OXa6i1Guw9fQR7w7QPrV+N29rbcfb0kJmd\npQFoHU40bh+iVUYbTyKKIuaObgzd2yg3NKQWapACxRGiodZiKEfpHe7i6H/NYvZ52bK7i54uG9W5\ncWqmIWqVBsuvHGZ1ZByzw8aWXbuYi9awm1XseqyH9p2DRGNFRkbipGdmUAppGg2YmC7hs/Wj0WUp\nJRKYzuf7KIrC0lKO+fkMigLhsI32dhuZjMTzz8+1Spo1GpHdu4PvmPLt1ZDleqthoV6vJhLZqKJ9\nJVZW8szNpVGpRAqFKsvL+daxtm/3smWLd50zoygK2YUF1k6eRC4WsbS1ceCBLWSiCbRCFEvIg73P\nz/hMgdnZRep1BbNZy8FddlJHDjE9MkcxmuXUL37B9v/xOJ09HqbGY2iNBmq1Orv2hjEXBfZ/8oNk\nFhbwDfThHeijLtcZOzqOJOgwD+zily9PUW2oySSLuPQygnMrkUc/iCClUKsEHN3d6CwWVl8/TnIt\nhSKqad+xlY79u1uKqxqDobUQXLkkfA7QqNWoVd5y79lNZzOdkV8A/3Dehle5gc5ILlfhzJkoS0s5\nNBoVfX1OBgY8l+1Zk0qV+OUvF4jHSy0p+DvvDF9VW6NRr7P4yivIxSKdDz7Y1DERBESNhvFXTpC0\nbaUa3I7PFUYAzuVsRDrsUGxgMmkRRYHp6RSK0iw38/tNaLVq3IYKay+/jv+ObmaefprE/BI1qYyr\nrxff1i189DcfIFszsHB6EvU2O+OvTxIvC/Q9eC+VmoBUqtNx4A7cAwMU43HEcp5KbIV6pULD0cbM\nyXm0Viv2+wdJpcqMjEzR1eVoZbCfO5fi/vs7r3ruRrebzvvvv21Le3/wgx/wwAMPvGeEzq7Epz71\nKX74wx/eFs5IuVzb8FpT2bOBxaIjW2xgDW9lMByhViyisVgJ7uptDfoXo9brCd91F67eXnKpPInV\nJMeeeY3k/DKegI3e/gG0oW7i6gDPPTdPvRqkx21nyyNBJp58ivLxk+wd3saWnb+NoNRR4nOUzr5C\n3Okgt7RIKVfC4Q2xeKJOYm65mfh49/1YdXUcZhVWm55UuowoCtQqVUqZZgdtUSViMmqolSrU5TfK\nX6en07z00kKreubcuRQHDrRTLFZbjgg0k/JHRmJEIrZ3RXzuAlNTSUZGYpTLdXw+E8PDvrccQl5Y\nyHDo0Dzlcg2Xy8DPfz6Nz2dqSfqfOLGG12vC42kq7k5MJMkmstiVFB4dJOZirC3EadvSw9AHHkIQ\nRXQ2G3PRBufOLQJNR2l2No2htErq+DS1WgOXz4fZUWDymRd5+Hf/J1v39aLS6RkYcDO0o53ppyeI\njYwgF4uo9XoElYrgvjuISpO4B7fw/z27zNLEEg0E2jt7Mbm1aOxuljMO7n1kPyaThnq1yth/P8HY\nyTmSiRL1usLEyAr3ac3037F1w3dh9HgQVCqU+htVUhqjEZ311tGI2syZZP78/+vAxtHiGlEUhWPH\nVlqJUADptIRGo6K/f6Ns/OhoojUZA2SzFU6cWMXnM7U6816KlEpRWFkhMztLXZYRRBGlXqdWklBC\nWxBEAVljoSIaqUkSpZk4hXiSoy+MYol0kCuLbNvmJxg0s7ZWYGEhx8CAC1UlR1vESXF1mfj8KmvL\neQTqGIsyi7MxbHt1DO0I4yTFyIuvo+SSREsCWx6+m6KkI6hT49k+gFqnQ2+3U4mvMDzso1yts5Bp\nirRFuty0BQysxppyyx0db6xCJKnGxETiTUXOBEG4YkXSrYyiKPz93/89f/3Xf73Zpmw6H/vYx/jy\nl79MNpvFZrv2pMGbgfZ2GzMz6XUN3Ox2PU6nAZ1OzdCQh5GROPGqDoPBRNhlw+O/8jOg0miwhEKM\nLy8w9cvnyKwmUGl15PNVZqfjGBsuXpwscerkGhZNhdmTVRL7wux54BHcujILL78MCnTu6GN5fhJq\nNRr1OtV8nvz8HN47O7C3B0nPL5FPpAn6jITarLj6+gAIBMxEIjbyaw5KiTgC0DUQwKoqoTKZMDid\nQLOT9uhofF0Zryw3GB2N4/FsnPDL5RqlkvyuOiOHDy+27MvlKhSLVR55pPstJdiOjSWQpBo6nYpM\npkKpJLO2VsDjMbX6amWzFSSpxosvzlOp1MktLXF0dJJwXxBHQyS1HGV+PsNjkS7atg8CMH90svU3\nlpfzxONFQrqmfbOzGXbs8NPW2870dJJEvEBFZyJg09HbZSMxOUn01Cm056+D1mxGLpUop5IMP/4A\nE7N59HotZpsOrcGA1QAemwrKeUwmG1ZnM1RWjMdZnY8SixXh/G0rFSpMnzqHt697g0aMJRTCu3Ur\nyclJauUyWpMJ3/AwJo/nBl2pd56bYVn7e8B/3aiDpdPlVmvsC9TrCufOpTY4I7VanbW1woZjZLMV\nCoXqlVUFBQGNyYTebqdRq2H0NrcCLcEgmt5OFs5Vz39MpBCNUawKiMUySqPO7NgSxZqOYNBKKGRh\n2zYfarVAV5cdh2IkM5EkNRUlnSw0S8qUBrlcmcTSDNYdK/g62wju2UPd7MGyNUVV0XBuUUJvUeHx\nmbFYmslKOrO5WdI8+iQPP9jPWsNLPFbAZNKSTBSRZfVlna2Lu/G+1zhy5AiFQoEHH3xws03ZdOx2\nO/fffz//9V//xW/8xm9stjnXRUeHjWzWz+RkElluKg3v3RtsJUTu2hWkrc3akjT3+02o1VeWQIdm\naDeTqSBcpFis06mJxcv4I1rKpTIkF5k4t4TDZSI1OUlP4DFUbgO5RJbUuVmsNh1SOt2sknA4qJXL\nNKpVDKLM9t0R0u02dBqB/l1dePvf2KnR6dTceWc7bSEz82cdGIQyTl0FdV0itH9/q7RTlhtIUnOX\nxGjUYDWCihoqg4jdvjFfzGjUXCKW9c5zqd5JPF4ikZAIBq/eO6XRUFpSC/W6gtGoQRDe2PECUKkE\njEYNExOJlmqqgkAqJZE/ucQD97bBUpRyuUGu1KCUTqPWaPD7LSwtNXNp4vFm12BMbpw+F+fOpZif\nz9LT42DXwV4cXW1YvG4G+l0UxpsCeKnpaar5PFqzGUtbG8mxMSzt7c1dNbMNx30eHv3QEOlkAVGW\naGTjTeXY4WArZ0dUq5EqSssRaaHWXXZuUqnVhPbtw9HV1XRGzOabphvvW+XdcEZ8wL9e8toq8Gng\nDuAx4LKa23/4h3+I3d5cuQ8MDLB//346zifxzJ1P1rn0Z6u1GfOsVBIA6HRNBySXizI3p219fnp6\nBlEUsNt1pFLSus8bDGpisWWyWfVl/57R6UQym1EPDmLM51l5/XVyWi0hu53hoAPjSo58PoosSQhK\ng55uN4vnRukasqJZ0bK4lEeS4hQKApWKBYNBjSQlQdNAazRibWujbNKjeB3YBRXlmog+4mJpdYVh\nqYLgMFBR11lKppk7uQZ1GdHYwGmJ4Hb3tuyt2O14t28nt7CAO6BnLZolGjXh7PGjVpIMDqpbD+6F\n849Eglf9ft/pnzeTb3/723z+85/flL4sNyOf+tSn+P73v3/LOyNqtYrdu4P09DhbiZEXr7xFUcDv\nt/B2InNarRoEEVdvH8n5Veq15mSXzcv0uBwYdatko3EUmpoZWquBSj5HWShg79+C0W7DGgzSyKXQ\nW61oTCakbBZFUaiVJapLi4TCYZw9PXh6ulBpNOcn4Ao6nRqjUcvgkI/BIR+lVIp6pYLebl+nMWEw\nNAXdREHBUl4jdmSUqlSmZ1cvoe0H2brVw9RUikqljsmkYedOP2bzrVF5IYpNDahkUqJWa6DTqejq\ncpBKSRgMzWsbCllxu42cOdMc48rlGoLehKDVUZPrIIioNGq23jPM2omTSOOvozPosLd30hWxMzLe\nDKUbDGr0Die+e+6iIKupFPLY29uw9m0h0N9OX5+bQixGenq6Kcvu81HN56nk8zTm5iilUgT37kOu\nyphMNXKjp9HbbPS43RQqEu49fXQf6Eerf8MRNHk8tA12sTj7hhq43e9G7wug1V7eURYE4ZbaCbmU\nzZTNDNF0Uj4CpC/zvnKhFfLbQVEUnn9+bl2YRqUSuOuuMP39bmKxAqOjCRKJEm63gVDIysmTa6TT\nzZI8nU7F/v1tlw3pXEylUGD12DFG/u3faMgylkAAs9/f3C47cC9LqxKlbAF9JUWq0KBaAfeuSQAA\nIABJREFUKFLPpckXq6wVdNQsfjo67ESjBSwWHcGghXq9QcABViXL7AsvMntyHFlRI6uM+Hbupqa1\n8OHP3InJYaecyTD38hFyFRWVGtisekyqMs7BrQgmO0ajGq1WjZROk5mfRy4UUJwhojmRotQgGLRg\nNms5cmSZdFpCrW42wtu/v23TBiRBELiWa34jmJqa4uDBg8zMzGCx3HwdLTeDcrlMW1sbx44dazmN\n7wSbed2vh/n5DCOnV1Fll0lMTGIyiAjOIK6hrTz9n0eZPtrs/qo3aPCHbNy9143NZaFYBX9/N91D\nbUizEywcPkxhdRWtxYJv+3YAsotL2PsG6LzrAAa7nViswMmT0aai6vnQUn+/a0OliSQ1e7iYzVpU\nKpFMRmLu1ASn/vNJ6rKMf6AbWWsjmpJx9/YS7nDgchmxWnXvegM1QRD4538+sW53JBg0v+UwTT5f\n4dVXl0gkSphMGlwuIwaDmny+WSDQ0eHAbNZy+vQa//qvZ4nHS1itOuqFHAGHwmCgjslhRY6vEF+M\nMjDgplqtI6hURO65h4zo5uzZWOveLJVqWEwqusJGfCEXOoMGt7tZ7ZNdXOTcz3+OqFajt9tZOXaM\n1NQUBpcLQ/cWchoPY8dn6B0MMry/G71GAEXB4HQil0o0ajXMfj/WtjbE83l4mViasZdPE52aRTRZ\nMbV1YPF72bMntKlVT9fD+fv1ssZvZpjmzwEv8B/nf/4AUL7yx98agiCwZ08QnU61LoG1u9tJLlfh\nxRcXSKWaiVuplEQiIXHgQDvJZIlarUEgYCEQeHPJdJ3ZTKNWa7ZvFkUElQpZrhOfX8W7I8+2bSGU\nhofJ10osja3y6hOvIgqwdTiEtRFn6M5OkrKacNh2fju1uSX4xBNr7Nrl566HHsU4uJv4app8SUFG\nYHibC72laZuUSlFcmsdgs1HXeJiczlDTWjBnV6mrm4p9w8N+Ojoc67brIpecxwc+0Oy3o1aLuFzG\nyyb5vhf41re+xRe+8IX3HZGL0Ov1/Pqv/zrf//73+cu//MvNNuddIRYrEIuV0GhE/H7zVfMnIhE7\ner2aeNxJZOdWPB4jC8tFpqaS7L+7l8LiImZrlWDIztbtXuqJVaxdu2nUjXi6nNSTy+RWVnB0daEx\nmZqVOIEAaxmFBY2Bs6ckZuvLbNsJZ85EWV5uhpTz+Srj43EkqUo2Wz1fgWJrJe5XKnVcLiO7d/up\nVhusTUwTj+YIdQVIlHRMHJlCEEUk0UomJ3PPPZvXyfXgwXZGR9cnsL5VQbZmKXAHZ87EOHMmSjpd\nxu83s3NnAK/3jZw2WW7Q1+dGFFPUag26tkfoiFhRajW0Opm5Y6cJek3Mz2dJJJoJwTXzGGL3btra\nLMzNZVldzePxmAi12xnaEdigeaKzWNCazc0dkWyW4O7deLZuQwz18/TT08yMr6CuCaz87CQqh5cP\n/9aDyLkcM88911JYjY2M4BseJrRnD42GQlYSkJ0dWHe3Y7frcbuN+P3mW9YReTM20xn5vXfqwBcq\nYi4t7V1bK7QckQukUhKlkszw8NuvnhA1mlYcNx4vsrSYw2Q3wWiCxZdSBAIWqpKG+TNTKBWJYlXh\n7Nkov/rJXThJow34+Pd/HyOVklhczGGz6dm5089LLy3gMrURFAuEvFrqDbCa1fj7wq3qFUGlQmM0\nEq/Z+eXTY1Q1FsYnp7F4XDz+sZ0Ui1UOH17AYtFedaAxGDSEQtcnJnSrMzc3x3/8x38wMTGx2abc\ndHz2s5/l0Ucf5Rvf+Abq26xy6lKmppK8+upSqwzY6TRwzz1hvN4rL058PvO6lvY2pwmHw0CpUOY3\nf+8u0surxOZWSY1PEL5jN6LNyZ2DHjRKlamfjVLJZmnU68RHRqgWCtRdHRx6NU4unkZjMlK3BYgm\nmyv9C1itOuJxiaNHl+noaC40XnttmUjETjpdRlGgWMyi0Yjk8xW0qLDb9dT1Fn55aAaxIeNymxFE\ngVqtwcREgp4e5zv0rV6dvj4XnZ32Vjnu221XEY0WOX062tpdmZ/PUirJPPpoN0ajttUF2GrVEQiY\nqdUaGAxqLFY9ZrMWo1JCGHCTXM2wtJgDwGjSEIuXwVRkba1ItVqns9NBX5+TbdsuP0/o7XYCO3ey\nevx48zrKMhVHJ0/+bJGjhxeoZnNY7Eb6B4dJVo3kCzVK09MtRwSaHeaTExM4u7qYjzV45ZXFluq3\nTqfirrvCN6Vy7o3i9j0zwGhcH264uAHeW3n9zXB0dZGZmyObzDMzk0atFhFtHn7xYozl1RKdnXZi\na1n2bOmllJdQEBBFFfFkmfawnfHxRGvwyGQqxGIl2tosuFwm5tZquHcMMNShRWk0MLhc6ypYTF4v\nxrYORp+eQ5YblBGo1qDSUDM2lmDrVg/JpEQsVty0Vc+twp/+6Z/yxS9+Ebf76qG59yJbt24lEonw\ns5/9jI985CObbc47hiTJnD4dXadHkkpJjI0lruqMXIparWpVqNX63Zx4aRy70Yt5m4FkRcv8ySgG\ng4ZOn0j9vAaEqFJh9vkoqdUsLWWQpCqCKGDy+lBptSQSxXXOiFarYmQkhl7/Ru7AwkKWel0hEDCT\nzTaPWyjITE8nObg9wtEnj2DXlUmnSpQKEia3C8358aRWa6AoyqY1u9RoVFesXHwzlpdzG5Jgk0mJ\nZFLCaNS2zulnP5taV+K9spLnD/7gDswmH6rELKOn3mj9oDcZsHd28k8/HgUE1GqREydWufPOML29\nLvT6yy/e3AMDmHw+Krkcsqjj1ddi5NJ5lHodnd2BrBKRRAtaq5VGQ6F8kSNygXq1SrlQYnQ033JE\noCmIOToaJxKx37a717fnWV0Bj8eEybT+RjKZNJdVJ3wrWNvaiNxzD1q3H6vPQ89d+0jowkxPxpFK\nZSqVOtl8jVhKxuFxIIrNB04UBRpGK9VqA5/PjCCAWt18aOLxEj5fU3ekUhextrVhC4c3lNJqDAZs\nfYOorQ50Vgs6qxWDxURDKpCJZ5raJzQVB9/nyhw7doznn3+eP/7jP95sU25aPve5z/Hd7353s814\nRymV5Mu2p08kStecz5LNVRlfqjOTNrCQaIZXFAVGTy5QSiQo5/OtCcns82Hv6MDs8aAxGHB0d2MJ\nNjuLWyy6DeNWrdaUdr+AKArkcpV1z7sgKHR0OFhKQN8Dd+MLWNm+r5tAXyc4gkjlOoIAXV2OW7br\n9uXMFoRWbgJmsxZJkqlU3nBERFFohkGyFUSViuC+fXTftQ932E9goIuBRx9gMqpiebnQOr6iwMxM\ninT6Im0WSaIYjyNd5FQYHA7skQgNrZl8WWDr3h70NhsqjbqpipqRcJhE7Hb9ZTvHa81mBL15nb0X\nkKRaq+DgduS23hm5FLfbyIEDbZw8GaVUkjEaNezY4cPtvradA0EQcHR2EhTtrJlXKCoyz/3sl0jF\nMiq1CnVPU0CoodZjsJoglcdoMdA14Mcd8mOcWcXjMSGKzVXM3FyGvj4XDoceUWx2FL0aLr+T7l0D\nyBoztbU0Rktze7i3y4qcTWK3u/D53tzRatTrlBIJ6tUqWosFg/0N7ZF0WmJ+PkM+X8XvNxMO295U\n0fZWoV6v88UvfpG/+Iu/wGx+66vf9xqf/OQn+epXv8ro6ChDm9Re/p0gmy2zsJAlk2nmGmi1qg1K\nrc3FwrVN1KIobFgMFGMxNBqJpEWFxe9n9dgx6rUaZp8PTyRCeHCY5eIkhaxEQ64hqlQEAma2bWvm\nUqytFXC5DOzeHaBcrpFKlSgUZNRqkaEh97pJrD1kpLY2z3MvzzFxeomOgTaG7tyCabnM0lIOi0XH\n8LCPvj7XNZ3fzUB7u42JieS66+bxmNaN6aGQlUjE3soH8flMBAIWVKrmdTU5bAT37KVgjiDXIV7T\nUK0msdm0reunUglNKf/zmzCpxRUmT82xNJ/EbNHTu7WNju29rTC62axFoxHROSw8+PgWTrw0TqVU\nZfdODz2uEpmZaXQ2G0aPh1IiAYqC1mIhsGsXNo8NjydJPl+lUqmRyTQXtqGQ5bbdFYH3mDMC0NXl\nJBSyUiw2u9feiInV4TAg5UsUVpfxeU3EYgoOtwVVOU9HyM2OvRFScRehwW4GBz30DQVQ63QMDzc4\nenQFlUrAZtNz8GAbAwNuGg2F3l7Xm4qPiaLAjh1+lqdXOTYVo1AR8AXsVKs1NDoTB/b53lTAqFap\nsPzaa6Snp5vOiNlMYNcu3P39pNMSzz4728qzGR9PMDjo5uDB8G2RRPV3f/d3aDQafvu3f3uzTbmp\n0ev1fOELX+Bv//Zv+Yd/+IfNNueGkMuVeeGFuZbg4exsmmDQSqVSb5aACuByGRkYuPbQncNhIBKx\nMToaR1GaTn85FadnfwApudhsZrdvH1qzGXd/PxqrldiZM2z1y4yk05QSKcK7B9lzZwS324jP11wx\nq9XNhcq//dsI586lUKlE+vvdmExavF4N0WiR9nYbPn2BhZVJ+nu9jJ2cZXFinkqxyLaH7mD7dh+7\ndgVu+RBuIGDh7rsjjIzEKBZlfD4T27Z51zUD7OpyMDDgJpcrIwhNB/FC2e8FenqcKIrC6moBtVrg\nwIF2crkq8XgRURTwes10dztxOPRUCgUOPzPCa4fGWyXdY2dX+FWjifBAGGg25du+3cfRo8s0klH2\nbrfj9NnZ2mNESa9x9sWnMPl8WAIBXL29WIJBjF5vayG4Y0eATKbMyy8vks9XiUTsKIrC8eMr7N0b\numV3sq7Ge84ZgaZo0I1c3Vuteg7udnHqlQQf+8Q2jr98juzyKupSmeHBLu6+O4yigFotrlsp9fW5\ncTgMJJNSK3vfZHp7ZbUej4mONgP33t2GgEC1XCa6tES0bEZ7b+eb/n52YYHE2FhzHxKo5vOsHj+O\nyedjbk5al/CrKE156d5e17qkvSvRaCjUavWbMulqfn6eb37zmxw+fPh9XZG3wOc//3n6+/v5q7/6\nKzy3sJbBBZaX8+uUl8vlOolEiYMH26jXFdRqEa/XdN0iYLt2BTCZtMzOplFRZ1ugA2t5lWqt1uwd\nUi5Tr1YxeDwkx8dJjo3RKJXo1zWQVQqeioBebEeWdWg0qta4pdOJ9PQ4W31c6vVmD5oDB9rYv78N\nrVbN3AsvoFHqdHsVPvyxHUyNRZHlGopcZWjIc8s7Ihfo6LDT3m5FlhuX7Ujs85m5774Oxsbi5HIV\n2tqsDAy4141LarVI0FhCXZ2klpVwDAzx4P1h5haavW5sNj179gQxmbQsTsUYPTHfckQAEitpxs8u\nt5wRgP5+NzaLhilTFqFexaqWULIFVo8fJ7+ygt5moxiNUoxG0VnfUNiF5i7+tm2+1nzRaCjkcs1c\noGSyhNt9+ylg33yzxC2KzQhbIirio4fZ66xT9ZoQ61V8xQnqxa4rquF5PKZrzlm5wNJKkenjk+te\nK2hEauLVB1JZkpDOtzKv5nLUq01Fw2qhQCWXI5/fGCuvVusbEsYux8JChtHRBPl8hUDAwtCQZ12M\nezOp1Wp85jOf4Stf+Qr9/f2bbc4tgcfj4eMf/zjf/e53+frXv77Z5lw3l1MazuUqiKJIT8/1KVdW\nSyVq5XIzj8ugYccO//kGdApzL7xAOpZd93mjx4NGrye3tES1UCA+OkqtXMbcFmZG4+Ho/3kVUyBE\nd6+bwUE3RqMWSaqxsrJRPTqTqbQmWfV5AbRqYo1+t43OB9ppqLV07+3EEbh1epa8FVQq8ar5cW1t\nVtramomjl9vVzS4sMHfoUCupWHrpEB27djHw2BDVah27Xd8q563VBcrSxvyiYmnjuOgP2pD9auKj\nk8iNBmq3m1I8jtHtxnqh27kgkF9b49IMklisSDzeDC25XAZkucGRI0skEhJ79gTo63PdlAu9a+X2\nOZNNxuTzkZyaYu3EiVazIq3VSskwQH5l5ZqleavFInKphNZsXqeueDEdg+0sjC9SSqZQGnXUej1t\nQ93YXVfWzEhOThI9c4bUuXPIxSK+4WHkUomaJKHSalFrtfj9TSnli/P3zGYtVuvVnZy1tTyHDs23\n4rjpdJl0WuKRR7pvinyT//W//hcGg4GvfOUrm23KLcWXv/xl7r33Xv7oj/7ols+x8XqNqFTCun41\nRqMGq/XaBf+URoP42Bjx0VHqlQo6m43g7t1YgkEacpVKLod7YIB6pUJ+eRkEAaPbjX/7dgRBQGe1\nImUy1Mpl1AYDZUcXh56dxBquYq+oyeRkyuUad94ZxmjUYDRq1iXdCgLres44Ojtbz3clmwWyuHp6\nsHlv3RyR6+VK4eXU9HTLEblAcnycns5OXIH135cr6MTfHWLh7EzrNY1BR+dg27rPlbNZ6rKMa2AA\nKZulsLKCKIo4e3owejysHj9OJZtF73DQef/9G2zyeEyMjycwGjVkMmUOHZpHoxFxOIy8+uoyjQab\n0mX5nWLzZ4bbBK3JhCUUwhoKUcnn0ZnNGL1e1Ho9DXmjF/1mKIpCcmKC6Jkz1CQJjcmEf+dOnF1d\nGz67ZaufRGInSzNRanINp9fOgfu6NwjzXKCwtsbSq69SK5dR6XRkZmdZPHyY8N13UyuXsXd2YvR4\niDgU+vtdzM5mWqqOe/YE3zQPZXExtyER8IKX/2Z5MO80Tz/9NP/8z//M8ePH3w/PvE0GBgZ44IEH\n+M53vsNXv/rVzTbnugiFrAwNXZBDr51PZvdf1/Z3bnmZ5aNHW8+7XCqx8PLLBHfvZu3ECaqFAiqd\nDld/P77t21EUBaPL1VpkuPr6WD5yBGiW7p+azWJwe2nIMkqj6TTNz2fZsqWM293MjTh1Kkq5XEOl\nEohEmuGKC5i8XjoffJDU1BSVfB5bezuOri5E1bWV0d7O1Msb9TYb9TqN2saqFqvVwAP/4w4O6Y1E\nZ5cx2iwM7eujb0uzlUZdllk7dYrU1BQNWUbvdBLYsQO2bwdBwBIKcepf/oVyuik8LksSyakpfMPD\nGF1vOD7hsJWODjvFYpVTp6KtXBezWUujoTA1lWRg4PbZHbk9zuImwRoM4t22jWr+jUZ9Kp0Ok+/t\ne6+FtTWWjhxpeeu1cpnlV19Fb7Otu2GhmSj38CM9xGKBVsnf1eLBhWiU2vmHT2s04urvR0ql0Nls\neLdtw9bejqhSoVPBnXc2ZfSr1TpWq+4tdfRsNDaGdxoNZdMlv6empvjMZz7Dj370I3zXcE3eB77+\n9a9z77338vu///u3tFqtRqPijjva6O52UC43He3rDSPmlpc3LDwKq6skxsebFRM0n+O1Eyfofvhh\nbG3rV9KWQICBX/kVDC4XGqMR67yB3GwMpdFAY2o+z4qi0Dhftr99uw+/v6kroter8flMG3YeLX4/\nlrfTdOc9ir2zk9zyMhdvAxtcLgyuy+8i9fb78AVsZLMVNBoRt9vUqnTJzM4SPXmyJa9QWFlhpV6n\n57HHUOt0SIkElkAAjcGAqFZjcDoRBAEpmVw3thuNWu65J8Lqap7l5fz5HKY3du6aY+o78W1sDu87\nIzcQg9NJ2/79rVWQ2mDAt20b5msYDIqx2IZtw2qhsOGGvYDJpKWz861tMYuXKGlqTSa0ZjOOzk4c\nneuTXlUq8S0lq15MKGRlbCyxLrfE5TJuas5IOp3mQx/6EH/xF3/Bfffdt2l23OoMDg7y0EMP8Z3v\nfIc/+ZM/2WxzrosLVRI37HiXUaitSdKG57ghy+TX1rBe4owAOHt7URSFzNwcXYKa1aUUllBba/fE\n7ze3OrYKgrBBAfZ9rg1HVxeVfJ709HSzE7vLRXDv3lap7uWwWvVYrRsXZ9mFhZYjcgEplUJKp7H4\n/ai0WoxuN8aLRRYFAeEyO1Y6nZqODge7dgU4cya27r2ODvtNEfa+Udw+Z3KT4OjsxBIIUCkU0BgM\nG8TK3ioq7WUcC0FA1Fy/dLslGERns52PIzcxejyYvN7rPjZAKGRh//42RkfjSJKMw6FvVRVsBrIs\n84lPfIIPfOAD/O7v/u6m2HA78fWvf5177rmHz372s++r1l6Erb2d5Pg4cqnUes0aDm/oAg9XeL5p\nOhju/n6sbW14imXMnX3MzmVpNBQCAQs7d/pvy7LOzUat09G2bx/u/v5maMVuv6xz+VZQ6Tbm1Ikq\nVSs8ZvL70dnt66TgjS7XVcff7dt9NBoKCwtZBEEgErGxdeuNGa9vFjbzrv6fwO8AOuAfgP/3kvev\nqWvv7YKUyTDz9NOtuCI0k2S7Hnromh2ci8mvrREfHaWcSmH2+3EPDKz31G8A5XKNSqXW6iD6ZrwT\n3VsVReFzn/scS0tL/PSnP73t+6u8W/zBH/wBsizfEGXWW7Vr7+XILS0RHxujksthDYVwdHezcuwY\nucXF1me0Fgvdjzxy2R3Oy1EoVGk0GlgsutvKEbmdrvvF5JaWmH3uuVYoHMDZ10fHPfcgnM9TK0Sj\nxEdHmzvdXi+ewUFMb6FkPperIAhcd8n5ZnG1rr2beWergRpNSfqjwJ5L3n9POyMAxXi8FW82+/24\n+/sxOG9sQ6tGvX7TJLS9E4PT1772NZ555hmee+65W74C5GYinU4zMDDAU089xfDw8HUd63aclC5+\nrsrZLMmJCXLLyxicTtz9/dcUur3duB2v+wWyCwskJiaQi0XsHR04e3svu4i8mcbfd4Ob1Rm5gAH4\nOXDvJa+/552R9xo3enD69re/zfe+9z1eeuml98MJ7wD/+I//yPe+9z1eeeUVNNcRPrydJ6X3uTLv\nX/f3HjezM/J14LPAnwH/55L33rPOSDJZ4ty5FKmUhN/flCF+M22P24EbOTj9y7/8C3/2Z3/GSy+9\nRDgcfvNfeJ+3jaIoPP744+zbt49vfvOb13yc9yelK5NINMeCdFoiELDQ3e24ZbfoL+X9635lisUq\n09NpVlbyWK06enocNzTZerPYbGfEB/zrJa+tAZ86/28t8CzwAeBiSUHlS1/6EvbzWv0DAwPs37+f\njo4OAObm5gBuu58dDj/PPDPDykozxqzTuWlvt9LdLaLVqjbdvnfy587OzhsyOP34xz/mi1/8Is8/\n/zyDg4PXfbz3uTKrq6vs3r2bf/qnf+Lxxx+/pmO8PyldnkxG4plnZte1ZAiHbdx/f8dtUUXx/nW/\nPLVanRdemGdm5o18QatVy0MPdV9zU9ebhc12Rq6EFqiet+EF4ENA/qL335M7I6OjcV56aWHdayqV\nwMMPdxMO2zbJqneHGzE4XXBEfvGLX1x3LsP7vDVeeeUVPvrRj/LEE0+wd+/et/37709Kl2dkJMbh\nw4vrXlOpBB55pJv29lt/LHj/ul+e1dU8P//5OWR5fXnwnj0Bdu0KbpJVN4arOSObKUH5J8DzwGHg\n31nviLxnKZc3qrXW6wqy/Ob9YN7r/OhHP3rfEdkEDhw4wPe//30++MEP8tRTT222ObcNkrRe6l2n\nUyEIwoZJ6n1uL2S5Qa228Rpfqmp9u7GZe33fPP/fbcPaWoFotIBGoyIQeEOc6O3g9ZrRaMR1A47J\npMFuf3Pl0/cqiqLw7W9/m7/5m7/hqaeeYvv27Ztt0nuOD3/4w/z4xz/m05/+NJ/4xCf48z//c5w3\nuPLrvYbP1xwLTCYtarVAJlPGZtP//+y9aYwc53nv++uu3vdtept9575TokSRErVQsrM6RozEH2Ij\nyLEdI7GViyBGhMQJnFwguQFu7HsAx3EQGIgNO+fmHOUYtnUdWZaolaS4DoecjbP0LL3v+1JVXfdD\nk0MOh6RIihKH5PwAAepiV3VNv13v+7zP8n8wGu//EM3DRipVIRZr7bd9PstNm6M6nQZsNj35/BWx\nPEFQEQzev4rHt8K9TmC9GfdVmGZqKsWxY2FqtZb16nDoefLJnttWR2w2FUZH44yNJanVWj0zdu5s\ndWh80LkTt221WuXFF1/k3Xff5ZVXXllPVr3HpFIpXnrpJV5++WV+53d+h89+9rPs27fvpn2A1t31\n10eWm0xMpDhzJsbbb88D0NlpZ+tWL0891XNLrRnWMg/LuC8s5Hj77QXK5Zany2zWcuBAF11djhue\nEwrlOHkyTLHYQKcTGBpys3OnH43m/i4DXqs5Ix/EfWOMVKsir7xykXS6uuL44KCLQ4d6b3DWzcnn\na1QqIlarHovl3iiXftzc7uR0/Phx/uAP/oAtW7bwne98B7v9/o+jPyiEQiF+8IMf8KMf/YhkMsnh\nw4d54YUXOHz4MN5rlCYflkXpTshkqrz88jjZbBWDQbOcuLp3b5CdO69tOn9/8TCMuyQ1+a//miYc\nXpmF0N5u5fnnB5b72VyPSqWx3HfoTrzsa5G1mjPywFCpiCtaeV8mk6let2ncrWC3GwgErA+NIXKr\nKIrC22+/zWc+8xk+/elP87WvfY0f/vCH64bIGqOnp4e/+Iu/4MKFC5w4cYKDBw/y8ssvMzQ0xN69\ne/mP//iPe32L9wWViohK1ZoPrq6gyWZXd5ldZ+1Rr0sUi41Vx4vFxrIX/UaYTDoCAesDY4h8EOvB\nx7uAxaLDatWtSjDy+Syo1WvZ+bR2kWWZcDjM/Pw8oVCIUCjE+Pg4b7zxBg6Hgy9+8Yt873vfw3wX\npPHX+Wjp7u7mC1/4Al/4whdoNBocPXp0XQ33FrFYdJhM2lULWlvb/V3i+bBgNGpxOg0UCiubJTqd\n67k/17KWV8r7JkwDrRjf0aOLFIsNVKrWZHHgQDdu9/qkcatcdtuWy2VcLhdtbW10d3fT09NDT08P\nAwMDPPXUU/T23lnoa521ycPgrv8wXLiQ4PTpKNWqhFqtoqPDxv79nfe9+NnDMu7RaJF33llY9mY5\nnQaeeKKLQODBTki9HjcL06xZ0+zJJ598oJpCrfPBXDvm4XCYcDjMe++9dw/vap2PmvVn/eFkfdwf\nSvI3+oe1/Eu4rzwjt0opkSB05Mhy+2hBrye4Zw/ezZvv8Z3dex6WndLtED1zhtjZszTFVk6SyeOh\n56mn7nrDxHvJ+rg/nDwo4y6LIovvvktmehql2QSVCnt3N90HDqA1Phz5HrfKWk1uTplyAAAgAElE\nQVRg3UxL8Owt4MP3Ib9PSE9OLhsiAHK9TuL8eRql0k3OWudhpJrLkRofXzZEACqpFJmZmXt4V+us\ns87VlGIxsrOzLUMEQFHIz89Tikbv7Y3dZ9xLY2QS2A8cBPTAznt4Lx8b1XR61TGpVkOsVq/z7nUe\nZqRKBam2umqimsncg7tZZ511rodYLtOUrqmMURTqhcK9uaH7lHuZM3L16BmB3I3e+CBhDQYpJxIr\njuksFnS3UV3QlCTyCwvkQiFUGg3O3l7snZ13+1bX+ZgpRqNkZ2eRqlXsXV0Y29rQms3U8yvDrBa/\n/x7d4TrrrHMtersdQadDblypeFIJAgan846u1yiVyM7NUYrFMDqdOPv6Hqiw7I241wmsvw78n8BJ\nYO4e38vHgmtoiFI8TjkeR2k20VmtBHbuvK3YYnJsjPCJEyhyq19Nbm6O7oMHca5Xmdy3FCIRQq+/\njlipAJCdmyOwezf+HTuInDyJWC6jEgSswSDOvr57fLfr3A3m5+d59913CQaD68mc9zFmr5e2zZtJ\njo0h1+uotVrcQ0NYg7ff1E6q11l4913y8y3F3dzcHPmFBXqfeQbDA66ltFZ+/f8P8BPgF1cdU776\n1a/icLQkczds2MC+ffvWVMv7O30tVqtMjozQlGUGN27E6HLd8vlBr5eLP/0pS/E4AB59q7yvYrXS\nsW/fctnrWvp7b/V1b2/vA5HQdieE3nyT9OTkimNas5mhX/kVmrJMNZtFo9Nh9vnQ6O/vks5reVAS\nGW+VRqPBSy+9xPe+9z2efvppJicncTqdvPzyy7jdD37bh8s8SOPelGXKySSNYhGt2YzF60Wtuf29\nfn5xkZlXX13eaF6m64knaNu06W7d7j1jrcrB64DLfq2/BY4CP7vq3x/IapoPSzWb5eIrryCWyyuO\nG10uhn/zNxHu4AFYKzxIk9PtMvWzn1EMh1ccE/R6Bl54AYvPd4/u6uPhYRr3er3Opz71KQC+//3v\n43a7aTab/Mmf/Alnz57ltddeQ6vV3uO7/Hh4mMb9VsnMzDD3y1+uOh7YvZvg7t334I7uLmu1muYF\n4AjwJtAB/H/38F7WPIqioCgKepsN03V2T7bOzvvaEHnYsV+nwZ/ebsfguHEzrQ9D85qd1zofPYqi\n8LnPfQ6j0ciPf/zjZS+IWq3mH//xH9Hr9Xzzm9+8x3e5zt1mucrmFjA4HGivUZVWazSYr+nn9CCy\nVsI01+OB84wUCnWWlgpUqyJtbSaCQdtNGyVBq9ImPTVFdnYWlUaDZ3gYo9PJ4nvvUUmnUanVWINB\nOh59FL3N9jH9JR8N98NOqdlUiESKJBIl9HoNweDd6R3RqFSInDhBfn6epixjsNvp2LfvjuLON6MY\niZAcH6eWy2ENBPBs3IjxDhPt7hb3w7jfDb71rW/xb//2b7z77rsYDKs77k5PT7Nv3z5GR0cJBO7v\nJni3wr0Yd0VpPb/xeAmdTqC93faR9X4phMMkx8ep5/PY2tvxbNx4S3kfqclJYmfOIFYqCHo9ng0b\n8O/YgVq4vzv2wtoN03wQD5QxkstVeeONEMlkK0FRo1GzbZuXPXvab3re4rFjJEZH4dJ3odZo6Dpw\nAHtXF9VMBpVajcntvqP45FrjfliURkfjnDwZQRRbux2Xy8BTT/Xg8Xz4HjlKs0kllaIpSRiczrsu\nmFRJpZh59dUVmjaWQIC+Z5+9p+JM98O4f1jGx8c5cOAA77//Pn03SUD+6le/ik6n4x/+4R8+xru7\nN9yLcb9wIcGJExEajZZn0OHQ89RTvXi9d7fHVSkeZ/a111aE020dHfQ+88wt5XzVi0XqhQJao/GB\nqqRZq2Gah4q5udyyIQKt1tJTUxlSsSyZmRliIyPkFxeRrxK4quXz5Obmlg0RaJX1piYnEXQ6rIEA\nFp/vgTBE7geKxToXLiSXDRGvS4dTVWDu2Cmyc3NI9foHXOHmqNRqzF4v1mDwIzEOCuHwKnG9cjxO\nJZW665+1zhUUReEP//AP+au/+qubGiIAf/qnf8q//uu/kr6OHtE6H45SqcH584llQwQgl6szOXn3\nf/+FpaVVeX3FaPS6z1qjVCJ98SLxc+cohMMozSZ6qxVbe/sDZYh8EOur2IdAURQqqRRyvY7ebkdv\nvXHjo+u1/HaaFRbeehMlnwRFQa3R0LZ5M+1796JSq1GazevGGxVJahko15QCSo0GiiyvSxB/RFQq\nIvV6Sx7H49KhCo8zdvYCFpOAEGuV3Hbu3/+RVbvU8nkaxSIao/G6eUMfxNWG7mUURVmVuX8jmrKM\nVK+jNRhQqdf3MbfKj3/8Y3K5HF/+8pc/8L2dnZ38xm/8Bt/97nf58z//84/h7h4eqlWRWk1adTyd\nvnPByaYktbyZsozR5Vqee6/7rDWbq561Wj7P/JtvUorHQVEQdDp827cT2Hl3NUCXn12jcc2WkK8b\nIzehXG4QiRSpVETcbhN+v2U5x0NqNIieOkVmehq50UBnsRDcvRvXwMB1r+XzmZmevqKcqdWqMYpZ\nKtEwRmMre74pSaQmJnD09GDx+TDY7Vh8PrKzs1cupFLh6O1dsRhc9pakJydpShK2jg6827aht1hQ\nmk2keh2NXn9PFhCp0aAcjyNVq+hsNixe7327kNlsesxmLY2GjE1dYeLcGLIoYbebUZpNsrOzOHp7\nb1vvRaxWKcfjyKLYmtBMplXjlRwbIzYyglipoNHr8WzceNtxZGsgQPIacSa9zYbB5SKTqRKLlZDl\nJj6fGa93pQhffmGB+PnzNIpFjE4n3m3bsK6Lr30giqLwjW98g7/+679GuMWx+vKXv8xnPvMZ/uzP\n/uyWz1nng7FYdFiteur1yorjweDqTeTlub9cFnG7jQQC1lX5ffVSifCxYxSWllCaTQxOJx2PPYbV\n78fW3r6qlYPB6Vz2dEj1OqhU5JeWKMViy++RGw2SY2M4uruX39uUJErxOI1SCZ3Fctve8Nz8/HLL\nEaPLhW/btjVZobdujNyAQqHOW2+FiEZLKErLeNi61cvu3UFUKhXFpSWSFy4sey7q+TzhEycweTzX\nrYDo6XEQDhdYXCwgywp2uwG7NkPTuLKMT67XkS5Jw6vUagJ79qAoCuVEApVajaO3F8/w8IpzMjMz\nLB07tmx113I5mrKMs7+f5PnzVLNZDA4Hvq1b73pC5M2QajUWjx4lNzdHU5IQ9Hq8W7YQ2LVrzVrn\nN8No1LJrV4BTp6LI1RiyKOFwGvD6WvFmpdmkUSze1jXrhQLzb79NLZtFazaTnZlBYzTiHhzEu3Ur\n1kCAcjJJ9PTpZUE0sVIhPjKC2eu9LeVdazBIx2OPET93DqlWQ2+zEdi1i1xFzRtvTFMstowUk0nL\n44930tfXSmytpFLMv/32stu5ns9TKxToP3wYw32eNP1R8/Of/xxJkvj1X//1Wz5n7969uN1uXn31\nVT7xiU98hHf3cGE0atm508+xY0sUiw3UahV+v5mhoZWhkFKpzptvzhOJFJfn/i1bvOzZE1wxb6Un\nJlZsFCvJJJGTJxl44QVs7e20P/IIybExpFoNg91OYPdu1Fot0TNnlvtLqQUBk8ezInwj1WqIlQpG\nlwtZFImcPElqYoKmKC4LqrU/+ugtVU+WEgkW3n57ee6o5/PULz27N/Pk3wvWjZEbsLCQIxK5El8X\nxSbj4ym6ux20tZkpxmKrQiiNYpFaoXBdY8Rs1vHkkz0kEmUaDRmHQw9pgVBsdkVOiMZoRGsyLb82\nOhz0PfMMtXy+JTFssxGNFllcTFOtinR02GBxcYX7r9GQKWdyZGZepXlpF1zP56nlcvQfPvyxVU8U\nlpbITE8v/31yvU7ywgVsnZ1Y7tNStb4+Fw6HkcKSDm08iEmvoikrFAo1TGY9+htky5fLDRSltTu7\nmszMDMVwGJPXy8Lbb1OOx9EYjSiyTDWXY+DwYWr5/PJkcpmmJFFOJm/JGCkW6ywuFsjlang8Hrqe\n+wSCXEdntSJotRz/xeyyIQKtcNToaJyODis6nYZiNLoq/l3LZqmmUuvGyAfwz//8z3zlK19BfZve\nwC996Ut85zvfWTdG7jK9vU4cDgPZbBVBUOP1mpc905dZXCwQj5dxOo2o1SpAIRotkk5XsNsNVKsi\nJpOW/OIi0MpFyWSqSJJMWw3aM1ks3ja8mzfj6OlZNvwFrZbo6dNETp1anhOL0Si2jg60ZvPyM6Y1\nGtFeag9SjseXDRGApiiSnpzE3tV1S89+KRpdNXdUMxkqqdS6MXK/kMmszvGo1aTlmKPOvDr7Wq3V\nItxEsEiv19DZeWWxEk2dOHt7yc3Po8gygl5P26ZNmNrakCSZaLREqdTAatXj99vQaARmZzO89toc\n4+NJ8vk6Xq+JQ9s02GQVAjKRSJF0tkGfykR+YRFP0LX8sNXzecqJxMdmjFQzmRWGFlyy+ksluE+N\nEQCXy4jd2oO6uJ0Lb55iYS4Ngobe3ZvJy2auNkWrVZHR0QRzc1kUBbq77Wzb5sNsbhklpVgMtVaL\nVKlQvqSqKzcaSPU69VyOfCRGTVITT1Yx6FTYrHpU6tbuTHeV0XojyuUGb701Tzjc8tioVNDf7+KJ\nJzrR6DTU6xL5/OrfeqnUoFKR0OluMEVc0r1Z58bEYjHefPNNvv/979/2ub/7u7/L1772NRYXF+lc\n7zt1V3E6jTcs500myywu5vH7LaRSZU6ditJsKuzY4WdhocDi4iKVikh7uxVbU0s+X2NyMk390rpQ\naeoJxGsMX5redGbz8loh1Wotj8hVz43R6aQcj+PZsAGxXEZjMODduhXjpQ1tvVBYEeqB1kaklss9\ncP3I1o2RG+DxrJ7oTSbt8iJi7+oic/HilQ6qKhWOnh7MbW23/Blao5GuAwdwDw0hViroL+WISFKT\nY8eWuHgxgyQ10WjUDA+72b07wPnzSWZmMsuVObFYmXGbkc12A/VUgvn5PBqtFq3ZTCInU27kGN7g\nueJe/BgXEL3d3lr9rvpMQadD8wAk2ApaLYb+rRjTOnq6iwhGMwXFzDtHI1jsJtzu1u9nbCzJyEhs\n+SsYHU2gUsG+fa2JxOTxUIrFVizsglaLoNNRr0uEQjnSkoWKYGd2bIaODhudnXZMXi+WWwi5LS0V\niESuhI4UBUKhHIODLjo77eh0Am63cVWCtc2mx2JpGbEWv3/Fzg1a8W+Tx3NnX95Dwg9/+EM+9alP\nYb2DHajZbOazn/0s//Iv/8I3vvGNj+Du1rmWyckU778fZmEhz9mzMaxWHRs3tjEyEqdYbPDTn07i\n97fG8uLFDFu7g6TzF5cNEUGrwbtxA5NzFQLdNWy2lVoyiqKsmn81BgNGtxv/rl3ItRoGp3OFwJnO\nYkElCCs83ypBuGWvhsXvR2syrfCOrNVn96E3RhoNiaWlloiV1aqno8OG3W6gu9vO0pJ9OcfDYNCw\ndasPl6u1kBqdTnqefprc7Cz1QgFLIICjp+e2y2w1ev0q9c3oUo6pqTSy3PrhSlKTyck07e1W8vna\nioVDkpqkKxqcj2wmlC5h96pwdnei6xxEcy5EPlegXBaxWHTorFZMt2EsfVhsHR3YOztb7kxFQSUI\nuIaGHhg1wWi8wsW4GkFwIleaKEqrtDedruJ2m6jXJWZns8vzj0rVCtNkMlXK5QZmsw5Xfz+FxUVQ\nqzF5PC0xskulvfmqmkZDTzhZJ7hhFwPBduqZFM4t/QQ2DV03RKIoLZdyOFxErxfI5eqr7E9Jai57\n+FQqFVu2eMlma2QyVRSlZYjs2OFHo2klT5rb2uh64gni5861kuCcTnzbtz/wjbs+LP/5n//JSy+9\ndMfnf+lLX+K5557jL//yLx8aifh7Rbnc4OzZGNWqhF4vUKtJLC4WCAZt9PU5UakgEinR1mZBEFTI\nskKqYaXt0QPo/PPIYgNbRyd57JSyVWo1mWsfT63RiKO3l9iZMyuOu4eGcF7q0QUr1ySPS4+pvYvK\n0jxKs4lKrcbZ24vlFkXxLD4fnfv3r0xg3b59zYVo4N4aI48C/zfQBE4A/8fHfQPNpsKJExHGx1M0\nm60Z2+s18dRTvTgcBp58sodYrEStJuFwGFYJ45hcLky3WQcuSU0WFvKEQjk0GhW9vU46OmwrEqMK\nhcayIXL1eZLUxOezYDZr6QwacRplapIaq8eOYvfifeJZdNkKVUlgKirTuf8JihfHMNoFrD43vm3b\nMLpcFAp1dDoBg+HGw59Mlslkquh0An6/ZVVc9VbQmc10HTxIKRKhXixidLmwBoMPhJIgtMJuitIa\nm8uoVK2Et8s0GjKNhozLZcRo1LCwkCeVqtDb62Rw0IXR5aLv2WcpxeNY/X6ys7MoioLB4UBj62Jk\nromiNAknRPT6NozBALquLoyOKzNdLZdDFkUMDgezoTzvvbdEvS6jVqvwes0UCnVstivlxgaDBrv9\nymuv18Lhw/0kEmWaTYW2NhMOx0rvlaO7G2t7O3KthsZovOEYNhoSsViZSkXEbtfj81kuxd0fLhKJ\nBKOjozz99NN3fI3NmzczMDDAT37yE37rt37rLt7dOtcSjZYYG2uFvq1WHT09DrRaAVGU2bHDT6Mh\no9GoVqgp5HI1tG1O0iYBo1HDfEFElht4PKYVz5eiKJSSKfL5Goq7C1NfFTEZRn2pIMG7Zcvye5tN\nhZMnI4yNtdYklQoGe7sZ3t8DtRIGux1re/ttyQc4e3uxdXYi12poTaY1W814L42REHCIVrO8HwBb\ngPO3enI6XaFSETEYNHg8pjuqzkgmy8zMZJcNEYBEosLCQg6Hw49er6G7++72BhkbS3LiRHjZ2Jib\ny3HwYDe9vVfyOKxW3bL1fRlBUKHTCezc6UNVSnH+1bdIRNO426xs6N+L096F3W7g4lwRWW7tepdE\nPVufeZYtww60JhP5QoPXX58jkSij1aoZ6nfgMxTJz86gs1pxDw5iDQSYmEhx4kSYalVCpWqVvj3x\nROv616NRKiFeStLS6FYmaOpMphuWO9/vBAIW3G7jsk6BwaCht9OIQ1cjnSgwOZ1HpVIxNZVix44A\nb70VolwW6ey08+67i1SrErt2BdDbbC0p/8FBnFu2k43nkFRasskahUIEna618F82MPT61mup0SB2\n9izZ6WmakoTGYqNg6V3+PTebCrLcpK/LzMLEPIVEGnewjV1Pb6GtbaVhbbXqsVpvPsEJGg2CxXLD\nf69WRY4eXWJuLossK+h0Alu3etm1K3BfVk99GH7yk5/w/PPPo/+QmjNf+tKX+Kd/+qd1Y+QjJJks\nc/FimnJZJJWqkE5XMBg0eL1mhofdOJ0GrFY9yWSFXK6GyaRFrxcolUR6e3Vks1XGx0v4/RY6Omzs\n2RNAr28trY1ymaX33+f8uxdIxIs4OoIE9j6CdbCLrk4TlViUuddfx+Tx4BocpCzrmJ7OotGocdkE\ntEqdYqFKNhhg087BO/4bP+jZXQvcS2MkftX/i8BqNZrroCgKo6MJRkfjy8bIxo0edu4MIAi3Z/HV\n6/KyiNXV5PNXlDRTqQrhcIF6XcLvt9Debrvtz7lMtSoSi5UABVkUUQsa6nWZsbEk3d0OlKaMXKvh\n91vo73cxM5NBlhUEQcXAgAu328j0RIzyxFkMUpFAm442j4C0MI5jdx/Ovj4kSWFyMoUkNenqsrNx\nkxeDWYcoyhw/Hl7+zhRF4dzxCfr9Avp0DCUcprC4SODJw5w9G6dalS593xAOF5mdzbJz50rXYFOW\nSV64QHJ8HLnRQG+zEdy9G1tHxx19P2sdWRRRFGXZ4LLbW96zqak05XIDnzaHuHiKeMnKqVkViZJA\n14Yunnmmj+PHlyiVRHp6HAQCFppNhampNAP9Tow6BY1eTzxR5o03Qpw6FaFQqHPwYDcqFRQKNSwW\nPY2GTH+/k2KxpYGgzYfJnDmJ0dh6jIvpPAUpSfeOg8yEWiXpehoI2Ske22RF2dqDViViL00jVtqu\nm4T9YVhaKjAzk1kOCzUaMhcuJOnosOHzre2J8G7zi1/84q5Uwnz605/mxRdf5OLFiwwO3vlitM6N\nmZvLkU5X2bHDz7vvLlAsNhBFmT17gnR22i9VoiUxGjWEwwUqlQYbN7ZhMqgJL+V49NEOFEVBqxXo\n67HS5m2FQAqFGnPvnGDyzeNMT2daBky2hFqjIdKxBTkTpzl/DhSFYjhMMRzGvvNx1GoImqvEz5yh\nlM5hsJoJmB5B2dh2x0a9WK22CizWsFr3WrizbUAbMHErb04kypw9G1uOeVerEqOjCfx+a6vM9Taw\nWHSYzTpKpStljSoVyxNnPF7i9dfnlssetdoku3cH2bbt9gRjxGqV9NQUCyOTiNkGAx2dRBJ1QlMJ\nTD4/VZeR3GKY5EhLS8Lo8bBjy3Z6ehwUCnWcTgM+n5n5+TypxRiLFyOIDZF6vkApFsM46CB2+hS2\n9iCbNrUxNORClpVl6xwgk6kuL4DT0xlqpQp2bQ3HrwzT53BQTacRy2VS4QTV6mr1wHi8vOpYMRwm\ncvIkTak1FlK1yuKxYwy+8AK6NW6F3w5So0Hm4kUK4TC5S7oCvu3b8W7ejMdjw+MxUYrHmXn1PahW\nKZmdzE/OU6u31HB7h/1YLHqCQStdXVfyLEqZHEtnz9GMzWFp7+RM2MjoaIZYrPVdv/baHL/2a0N4\nPEZKJZFmU+HChQQ//ekUhw71wswEsYkEw8NuVNUChaUw2XKTjcN99Ld7WUhI6Bp5UtPTeHV+BIeX\nkmwgngNjOEX70N01Ri7nnFxNrSZRLq/+PT3IKIrCkSNH+Lu/+7sPfS29Xs/nP/95vvvd7z4U/Wru\nBZWKSKUiYrXqOHy4n0KhgdGoYetWL2NjSVQqOH06hk4n0NFhZfNGN816mUgoQW+/m4u/PIO+msJg\nEGCwi7zHjH1wIwvJJpELs6A1IisqqhURh9NIbjGC3dVBqlHFo9EsV8tUUimshTSdbQKzr75HPt4q\njqgWy6TPnaa4tQtb+817mV1LNZcjMTpKMRJBYzDQtmkTroGBNempvNfGiAv478BvX+8fX3zxRRyX\nSpw2bNjAvn37qNUs1GoS9XpLJEav9yCKTSYmppEkFz2XEoFCoRDAB77escPPmTNRMpkoGo2ajRsH\n6ey0EQqFGBmJUSxeKoutp6jXYWxMS2+vg0R0nkomg89qxeBwkKrVUAvCqut3d3cTOXGCo0eOEQnn\nCZ1aApXAjt95BpOlyvybb7Jp8NcYffs0oizQZrVQSSaJv/sW9u5uur1exFqBs69dJF8TsNn8aLQa\nivUCNaVKm1ZAatRZjEQo/fKXbD/4JAablaWl+RV/bzS6SCi0yIULLe0Una5EOp3i3KiTwYM2UpEI\nAH1KA4NBQ6kUWf5+G+UytXyWs29FGNiwAYvXSygUInHhAsIlQyR1qS+LJ5ulms0SuSTic7vjsRZJ\nT0yQnZsjdOQIlWSypZy4sEAtl6Pr4EEKCwskx8ZYOnYMi8+H2dWBSqVCrjdolEpks3W8XjOJxBWD\nTqxUsDRzNNNNsnNz5AsNZi5CMmXich8pr9dEPZfB4nGSqTUJRyuMjMSp1STOn09waGMQVVOk0chR\nX1zE0dODx2KjNHsRwRhl4/BupHyJYLsdxdnBWyfixBbSgEJfVM1hk/OWDXhFUSjF49QyGTQGA2af\nb5VnxeEwXFs8hV4vYDLd62nm42ViYgKj0bj82/6wfPGLX2Tfvn38zd/8zXW7/a5z6yhKK3R5OTkb\noL3dysWLaYrFBsViA51OQKNpNfFTlJbuCEC1VGX8bBZ1OY3ZqGHT1gDTR95GpwW5XmJ+YpLY6TPs\n//ynWTwdwmTS4vI7yYk1HtvfQyGRIbMYxu10IRh1mLUizUxr/rzcGM+ZjtPlcjFZbs3TgqDC7TFh\nM6kpxWK3ZYzIksTSsWMUFhZan5HPU81kEHQ6HN3dd+srvWvcy1lCQytX5E+BxPXe8M1vfnPVsfn5\n3KW4+ZXSJJWqtej39Fzp13HtRHCz1z6fmWKxC51OwOs1odG0jIqRkRrQWkAuf169LpFKFBFjOTSZ\nGOloFJVajTUYxH9VP4HL16+kUiSmZskvlhGTRewWgXS6RuzURYJ7H2HbIwIdXg3vv9YgnpbQGhs8\n9kQv6qUxCouLHI9EMHs8uIeHiZwbh44KA49v5/z/foVOnxUKSWrpMv5tWzj/P18nOhrDs3MPbR4T\nHV1X8l22bBniF7/IAK0feV2006jXkEWJUqWJ55L8eFvAxRazgVOnWomXtXwefS1Nj8GGPDHB7MIC\nnY8/Tk9fH9pUilg4DIDnUmxcLQio1Gp6rqmBv53xWEtIjQa5hQUqqVTLEAFQFMrJJLn5eUxjYyTO\nn0drNFIvFFpy6W1euvt9XDgzDyoVyWSZnh4HBw50USo1kKUmZlOTDT4T1cgUakGgnk4g5QQESYVB\nb6G/14atFCJ/Ms7Zs000ZiuBTbt4PVtFFFvJzGNhFeffmMeklXn6uf2U585RHhvD6HKTCccZFCQ2\nPHuIhJLmzGyR6HzLQFTrtJTqKkZGYvj95hUT841InD9P9PRp5Esy1pZAgJ6DB1u5Lpe4XHa8uJhH\nUVqdqYeG3KvyUx50jhw5wqFDh+7a9fr7+9mzZw///u//zuc///m7dt2HjVAox/h4knK5JRbZ22vH\nYNDS1WVn8+Y2ZmayiGITi0XL7t1BFAVkuYndrkctN1CqOfQqFYV4hYHdnXjsAqNYiIcyxGfj9AwM\n0d2rR63VkDjyJsmxcQyChLs7SGQpjs3roSk3EdTQ7tNjE5rkMq01IjMzg6DVUs/nEapVBrsMiCo7\nRpsFl9/ZagugUiGL4k21rK6mmkpRvkpqHlqiablQaN0YuYbfBvYA/9el138OHPugk/x+C11dNkKh\n/PKxQMBy3f4C11KtikQixUuhDyPBoAWdToPbfUUX4mra220rwhONhkytJjJyMsT08TF6hgN06tIk\njh5Bo9dTikbpOXSISipFYWkJtVaL0eWi2pCpSSokUYZGBY/biMNpQmOxsrXeREIAACAASURBVGOb\nnYnJNO//8hwGh5PgUBfTR8+gSoXwunSUIhFKkQiCTkd7j5+xkUl2feoF1If2UFkKUW1W8A/1sXBu\niky6wsIrb7BVa+L9SJUnD3TQvbGbto0b0WgENm1qI5mskM/X0Gj0DPYP4TZV0WkVsFhwDQ5ibW9n\nS5eA220imSxRizbRV2so6ZbRIVYqxM6dw9rRgb2ri/Tk5Ar9CUsw+LGWD3/kXFaPvaqfy2WkWo1K\nMklTFNE4ndg6OigsLpI8P8rmZzvRmzcynzdQKjVQqWDPngAGg5Z8OELx/Dih//cVcnNz2Lu66D50\niD2PBfE3XKTyMkOuMmf/9wiBNj16s57Q2UWs2QrtbYMIRjNnz0ZJWqporS4KqSjxVB1tvo6YL6Ax\nmtBbTDTSCaRSEfeW7WTOnECtEVBrddg7OzDYrOTzdUolEYfj5sbIZVevfLkrsaJQikTIzs3h3759\n+X0tleFuIpEipVIDp9NAMGi94xyr+5UjR47wyU9+8q5e8ytf+QovvfQSn/vc59aki32tEw4XeOut\neSqVBpVsgbdePU+g3c6GTT50JhPbt/vo7nagUqlwOPSYTDoqlQYOhx6Px4TVCGKjicGkRVMv0OmB\nRlPgzdcmsepl8pE0uUiCnoFnSIyeJzN+gchECKfXTi0Roe/pQ6RnZtj76U+RvDhL7fxRTP2tKpfU\nxAQGh4PAzp2tnjWKgtFswGm3ozWbWXrvPeqFAr5t26im03Q+/vgt5XvdSFFqrYoV3ktj5EeX/rst\n9HoN+/d30dHRKpF0OPQ4nUbK5VZi5o0mvmpV5J13FgiFcshyE6QGG7YEeOKJ7hvuDIeG3CSTZSKR\nIrKsIIoyXV125kanKFVEjr1ynOyQm163h8LiAk2VwPzJs6TmlpClJkajGsUtk9N3gF+Ht72TxLlz\nhOeTOFQmCuMh5IyRcMnYqvtWmthsejJjIbRqGbet5cJryjL5xUV8vgBOmwZBqpJO5BnYuY2Z//o5\nE2+8R7Um42jvJp8UKScTxKeSzHmgGZ9DFkXqhQKDAT/JDU5KFRlBENDrBXbt8NLXoW0ZTlcpswaD\nVrxuHRdnjq5qey2Wy0jVKhavl56nniI1OUk9l8PW2YlneHhVRc1aplCoLeuwXK+aRKPXYwsGaTYa\naAwGpFpL48XodKK2OJHNbgyeOrVCgY59+8gFAi3viMVEd1sATbpV+pvP13njjXmefypAY+o0jXRi\nuZ2AoNdjaAswF9OwFM6gNpgIn5+kq8uBILR2Z52DQfLFCo/utTMdU5gTRUrJDKpqFptFj9VuJjpS\npByKgs6IYHUSz8oE0ln6Dm6md3eVpsWDoNNdUoKcxdDlRlULADd3/YvlMmJ1dWfTZU/RVRiNWvr7\nH56259fj+PHj/O3f/u1dvebzzz/Piy++yDvvvMOBAwfu6rUfBJLJ8qXigNaG9VpvXCiUa1XKxNKk\nEmUWQmmioQQWoU4o0mBmpoOhIQ9btngxmXQ0SiU0ajVbtvj46U8mOPruAqHZLC63iWcOBrBqJcrZ\nHO1dTsqZPDa7kWIyhUCTSqGMWK2gNeiRGyKpVBb7Ugx7wE+jkCN+/gJSuxejzYJ/5076nn0WqVpF\nrFRoFAqIgoB/504MTidzb71LTdYitHVSrSvk5uaw+Hz4tm37wO/E5HZjamujeMl7DaDWaNakVwTu\nfc7IHWE269i0qY1crsqJExHOnImhUqkIBq088kj7Ck2Fy0QiRUKhHNV8gcLiImK5THZmhnanisHt\n1++yajZrefLJbjKZGo2GxMxMttU4T91SyKzWJCancwQe7STY1UMhX2PkX35EqVDG3d1J4Llf4/X/\nMYraZCaXyFGr1XnqyQNY+5NUtE6cVgFRbSAayeMe3oJSzIBWh9HlRFAktMYmpXgcuVZDMJhIRXMU\n6wLlYg2VLJJNFbl4YYlSvgJqDdu3WWlXa7G4HZSyM8hNFdmpKZr1OiqNlrJ4gZ7ODaQsTlSChm3b\nfPT1OZcNuGKxvkJrwm43oHc4VhkjepsN7SXLXGXzoB+2YtUJd1xifS9QlFYy6OhoglpNwmTSsn27\nnw0bVisTejZuRCUI9D37LPHRURC00NaNFNzM+bkaGslMX7uDSmQWi9dL29NPIzq6eOvnF7mmfRHZ\nRA6pXsfa3k77879BUG5SW5hk/mKMsyNFZKOd/h0b8JqcvPm/LtBswlIoiSfgYue+AdqCTty9VhKp\nKmZZhVdjQJVdorQYQqc00HcFUesNGCwmwqkaDW1Lw2bH7g7yJZnIVIjcXAizRU9vm4Olt15H9fjj\nN62A0prNaI1GGqXSiuNrUcXxXpNMJsnlcvT399/V66rVar7yla/wrW99a90YuYb5+RzvvLOwnCht\nNms5cKB7RbJ4Pl/j/GiMRjbNUrRKLlPGH7RTr8sk5hbR2lo9x2YmYxCfIT83g9ZoZK7hI5WqUMoU\nselFSrkip0a0dP3mMGK2RmC4j3w4jOg04en0Y/X7aEai+Pq7qFVn0KhBYzNisRioVhqMHp/m1JGL\nPPa7G9Dmm1iTSQS9viV8CKBStcQP02miY1PMj17E3tNLoaZmYTzG8LAbeyRyS8aIoNXS8dhjxM6e\nbfW8utTte90YucsoisLJk1Hm5nLLx2Zns1gsOvbtWz2x5vN1xFqd3MwsFpOA3m+nXm0QuziH32vA\nepWinSw3mZ7OMDeXpVqV6O52MDTkXl64DC4X2XyddKaOtaEQixZwBAeZ+K8jZJaiqLV6FLWGt356\nmnxdj5KvYx/agF1ukNM4GHxuG/lUlsWxELpqgd4eOxVJg2eoF0EDHb1umDlNI7WIyeWilExiau+k\nEo+y9eABUKtwdfhZijcYeO4wc0ePk1hMkcwr+LbvJtO0Mrx3E+3tVhqmQcr2VtJTMpwi/V9H2PYb\nnyRcUpNIlBgcbOXZxOMl3nlnYYUK5/79nXi3bm01RbvUZ0ZnteLfsQNBo2F2NsOJExFKpVbSV3+/\nk927gyuqeNYSiqIsG0uxWIlTp6LU6y2Z5Xy+zokTYVwu4ypxO63RiH/7dlyDg/Q+8wyLC3niuSah\nWI3z4xmkUgFRE2BX/yAaTUshMZmT0ek0y1VflxGMZkz9mzg1VmJ2voBarNNp6cHp7yL61iix8TCi\nxox+kxf0JuKhBNVKg3y+StMR5NhIls6gzPhohGee6cOeifD2T14jv6Wf/c9tZunYcQRDA0GvY+Ou\nnVQEO5LUpL3dxtNPdTKuSVPvHsDj1GCoZ1DrdCQnJmiUyxhdruu2MzA6HAR27yY5NkY1k0FpNrH4\n/Tj7+j6ikbp/OXHiBHv27Lntxni3wu/93u/x9a9/nfn5ebrX6ILycSNJTUZHEysqtsplkfPn47S3\nXwkRGgytqhVFlhEEFZLUxO4wUCzU8PltWMxajh9fQs5EGerQsf9RH3qbmXOvx5kLVcmUVagwEItk\nMZgMLCwVURQ1F5caWDQu2vr96DSgdbjw6LooRiPodw2QC8cx93lp372DQlGkMhNn868+T8nSwXsn\nY/Ts2Ii7v4daQ0GrNFCaTaRKBbEhUiiIxC+GWDo3xdDzz6I1mYlFSwwduHXlY5PLRe+hQ4jlMmqt\n9rbE0j5u1uaqcQtc3sVfy+Jinp07/asWRKfTgLpZp3PQz1K0ytxCmWC7DZPTSTEaXWGMzMxkGBmJ\nk8tVcTqNzM1lqdcltm71kUrNUVfUZKo6PEMDDHdqsetL1ApFsukKVl8b5UwBwWQhPhlHH+hGa7eT\nStdQFBCMItZ4GVHUYOnsQa7XGBz2YXeYiUaLCIKK3k09GPtMhN54A6O/g97uXhroUWplkufPEwtn\nyM7MgNWDbvNWbDseZ/NnOghXrBRMZl790Tv83n/bB9o677+fJDz+JorZgafDz5bBAcqZHLWmllAo\nz6ZNVRwOAxcuJJbFu5pNhWy2ytmzMT7xiQH6Dx+mnGiFFUweD0ank0KhxokTkWVNlmpVYmwsRVub\nednAWStUKg2mplrGpdGoZcMGD9WquGyIXKZalchkqquMkcvoTCbQ6JiNZ1lcLPDOOwuXUkrUvPp2\nnL7Ne9mxq51IpEg4XMBm02Gz6cleSjo1m7V4O1z87HSM//Xv55AkBalUYGC4jV/Z4SWWqpOvgFhv\n8NbpIruefQbH1BSldB73QD8LopdioUZTVvjkJ4cY7DEy/UqBbYd243FqSc7HMPZtYPjxHVh6hzi3\nqOH9I0vEUg22bPHisUG7o0FdztFIFNH4fETPnKGSSODbtg2dxUL7I4/gHhpa/pubTYX5+RxzYS0N\nzQAdW/S0+/SY2zx3XafkQeDEiRPs3bv3I7m21Wrlc5/7HN/+9rf5+7//+4/kM+43ajWJYrG+6nih\n0KBelzCZdMhyE1Fscui5QU6/M4HLL9Dd46Czw0p0LsbA1h7eH4mj1yhUQ4s0khrMDgv9G6yIlRqq\npoRWr2V2NoNaraerv40zJ8O0+aw88sQgEyOLZKpw6AkfbqeWutqPa2c3umYVo1mHpi1IKA5jmQyW\nDUO4TRqO/OwsKlMbCdHGqbdiWAxt2IqzKLFZchPnaYhNDN3D9O1/hMlfvkNqahLb4FZUiozzNkUk\nVSrVfSG1cN8aI1qtgE63eveh12vQaFYfb2+3snV7O//je0eZnohjsBqRNXqOnUzwq796RUyoUmnw\n6qszvP9+hGazJTi2c6cfk0nL8LCH/fu7OHMmitdnYdOT+xgIwOLZcQwOA7LBisntRqVaQK4UaO/1\n0bDYqepc1LMSarWKnh4HwaCVhYU8glaDyWKns8vJ1q0+Go3WezQagdRkFk1bOwtzObK/PI+7w0vo\njTew+b1kGgbcbZ1EZyP0P2GioPXyP49WmZ2P8t/+YDvbHuklW2gi2S0oYp3+HQMoGiPVagPZ5MDk\nbUNcbO0QasUy2XKe2FJLifZy7FWSFAqFOo880o7PZ13VyyCfr69oOw+thSsaLa0pY6TlQYswMZFe\nPhaPl9i2zbdK5ValYlnt9EYIghqjUcvUVHpFCataoyWerDIyEmNkJI4oNimXW9/P1q1eFAU2b26j\nUpGIxGtY/AHqlTolUaJUVxGJldi2t593XhujWBQxehycnW2i1g8SlbMUT0uYLGna2234/Vbiiwmq\n2QbeNhMF2UilVEYu5bG6HZRyJd5/M8JUQk17l5vZ2Syjo3F2bWtDzhnoCvbT1qeQHB0lOz2NraMD\nlVrdSk4eGcHa3r5saExOpjh6dGlZ8j6aktA6XAyvGyLX5cSJE/z+7//+R3b9P/qjP+LRRx/l61//\nOub1McBo1OB0GlfNRS6XEYOhVXUiCGosFh2CIPDYwUGUahFRgoszGfbuHyBT01Kvp0FSsNsMyLLI\n/GIZwZCjs8OMyqql1Mhy/rxEX5+Tnl4n46NVRs9GyWYusnmDAzUKbp+TQjaN2ugFt5+tW72k01X+\n+3dOcvp0jKXFHIosc+ipbjbt2cCF83GyhQbZVAm7W+bsL0/R4TcgqNWo5Drz759i46/9CsHtW9Cb\nDQR2bsbR0Y7lNgsEZFFEuiQFv5Zbcdy3xojRqGVoyLNCWl2rVbNxo+e6Saw6nQZXmw27y8TmPT3U\nJRXZeJZf/DxG+0AQnatIIGAlHC4yMZFeltSW5Zbi68CAC5UKNmzw0N3dkvOu10VOTxUxGHw4rVp2\nH36M8dPTdBx8DpPTxobeASYjMotLJaymIv42A08+6ibQ3cbmzW3U6zIWiw6nU8/YWJJUooBFVSEy\nn8Rm1bEwEWNxJolGLdPZ40YsFUhn7RicJgqVJpLWzPx0gvFkgWykzv59Q0iyCpvXg7fDgTYfY9tm\nD7pmldT8LEuxEvo+I/7gFirlBDZDlXponFwmDTmBas2CoDRxu/TE4lUqFZH5+RxerxmVStXK9KaV\n1KnVCmi16lXeBbN5bTX0ikaLnD+foF6XMejUNEolaiqIR4309jqYns4uv9fvt+D333yCFwT1qsot\ntboV1qqU6rz202kSUyGsdhO29gAmtxuzWceOjVZyczMsjodwF7Mc3BXgvfMashkr8XiZVKKMy6Lw\n27+7HbXJStdggMnRRc6eDpOOpBje2sm2ne1MXohQSGbJJIqcr+p5tKsdlVhHr7Gg13gR5CrW7j6K\n75fp7mvDZNIyNZEksRBj35Ca5MUL/Ow/Z9j51E5MUhVLMIg5EEBuNFBrtYjlMo1SCZ3ZTL0uMT6e\nWtF7RxSbjI8n6etzotWu3YntXqAoCidOnODb3/72R/YZfX19PP744/zgBz/gi1/84kf2OfcLgqBm\n+3YfxWKdbLaGStXSu9m2zbuiJ9LGjR6SyTJTUwXSiRo+t4ZP/vo2mhoNk5NZDjzqpcOjolGyMDcV\nQ2820NHrxWLR016T2b+/m09+cgBQtTwtElw4F8HjszB+LgKo2P7YEG5vkGZ6icrcBAndLo6Ni8xP\nx9E1q5jUdWRFYfpiS6hw9yPdVCsSZlUZTbmEupIln9Dh0mloFPI4zQYapRIFrAxs3YHi6aF94Paa\njGZmZ0mMjrbENC81ybP6/Xd3EO4S960xArBpU2uynZvLIghq+vqcdHffOJ6mqAR8nV4WQynmzi2i\n1gjoHW5KVYXTp6M8+6xxuSQxHr8sDtaSbNdqBRwOA01JQi7l6PBqSWdlQpOnmRyZYlyjsO/xbg78\n1pMcO52hFFZoNzXZvMXPzs016ukUZlUVJTXPmfk4sbIBq93E8LCbRKLEyEicHnuF196YYuT9Wbbv\n7WHD5i2oE2cQGnl0ZhMdO7Zx+myc3RvtmLRN4vU0bR1u0k0Rj0PDcLdMWWnSMRAkoI0xMjqGw2nk\n2H/8GJVKhWt4A9HRcZzaGuqmQCG5gNrnwrvnEfYGzbz30/cJXQhhtlt5csdWXIPtpNNVIktZmpk4\n1aVZpFoNV38/ruEN9PQ4VngIHA49PT13t5fPhyEUyjExkeTChSRGQUJJh9GrRdRqFS5jg1/97b04\nnUZKpTpWq57eXicm05VKoFpNIp+vXWosd6XipLPTxic+McjISIxotIROJ1CriZi1EhePXSCXymK3\nanFFw3i3bSPjbDK7dIzpV1+lafFQjMnMRc+x8fEnWAobQaVh564gb/w0hrohMDueRJAq7Oo3s3nj\nNnIFkUalit0GffvtxBcSuDZ5UBTY8olHkBaDJM6dJTszg62zE1FqUlicx20yUFYcxJZS7NnqZObN\ndxg7PoZer2Xy1BR9PTZ0Rgv5+Xl0l5JULcEgWlOrzF0UmzQa8qrvtV6XkaTmujFyDUtLSwB0XqOx\nc7f56le/yh//8R/zhS984b5JGP8oaTU17aZYbKBSqfB4jNhsKyvEXC4Thw/34/GYmJ3NEgrl+feX\np3G5jDy2WUfh2ClO/jKM02lkx74NGId6OXoiSTaextdmoFKus3PfIK8eCdM34EGl0eLr9mI3q2g2\ntBx8biO5skKfUeTkkTfZuaeLqgilTJ70QoRqNo9Rb6KOCr1WRZtbT2fQRDGVRhMdx9DnxVJPICha\nmg4TOosFRVHo3thFz14nzg2bcXnt6HQalGaTYjRKOZFAYzBga29H0OtRC8KKrvHFWIzFd99FrFSo\nViWKySz1YnHNKmTf18aIRqNmYMDFwMCtlRJ6PCYcXieTc2VsHe2gVuNyGbFYtCuaIMlyE5NJSzxe\nRq+X2bTJw/Cwh1o+T/j4cUqxGHq7HYveSXpqEptZjcGgJZsqEP35UWybHkUuQ6mqcPSNCXb0gjEz\nw//P3nvGSHbeZ76/OnUq5xw7VefcM8MJPYHkcGaYRImStbIsy1rJlrW+Nu4H+9PC/mLAhgFf4AIX\nuGvsDYvFtbUrrS050AyimIfDiZzUcTpWd1dXzjmn+6GpkShRwVqRQ0t+gP5QB3X6PXhPnfM+7z88\nj+ByEfAXyGSqqPtHiEbb7O/n6O014nGqKEcTLN8J0G53SETyDM4N0XCNo9cJFJQKvI+4YCBLPlui\nSQ3v0cOopHUmbVUi4TyRW2GO/9Zn8Hh1ZG/eoW/Iwc6125TLLbrdDqZSDrtDz8abl5l65jwydQ/Z\nVIHWyhYaixFFK49R2UTaLdHZX8Uy28/d9QrbdzdpZpOMjNroM4uEb96k2+1y7NgUDoeWSKSITidn\nYMD0gXotHzXarRapSIq3X9tFrdfgduvYvLZAai9Mb68Ri02NVdtl8bVrlMyjtFod3G79+wjH/n6O\nW7eiFIsHDsdjY1ampmxsbmZYW0tSKDTQ6RRMTtpYWUng67WgpYTJrKLTqCPXqhG0MnQq6HWK1Dbz\nSEURSTWPWaOh41IgyYU5c3qG3j4THamcseMTtBotJLIEXn2dd69E8W/E0CqhXS5gtOg498k5wrdf\nZu6zn0RqcbO2lmRi7hAOiQSdy0Vma4vqzjrKdpHE8jKmmSO4rTKGvHKq2SKHp0z4FzbJrcRQTj6N\nRmeg2BxgdzeDFDUzngkk74VyNRoZTqeWQuH9OXm3W/dzuTj/smNpaYnZ2dkPnSCcPXsWQRB44403\nOH/+/Ic61scZzeaB99H2dppWq0tPj56ZGcePNXwUBAmlUoPV1eR77b9qtIou6aWb2HVtVEMmqtUG\nS28vMSwaWbgRxu7Qk6+JdJGwsRbnNz8/wcJqht/+7TlWluKk0yV8/TosQpFCcBNDTssnvnASudCh\nKMgwyWt4XBqW90O0s3mkCgVjvR5cuiaKcgK5UCZaTNMtq3DPTrJ/+QpKqYl2vY7v/Hm8D/2o11di\nZYXk2hqCXI5CpzuQVigWUer1WMfGsAwPIxEEStEohXSe/f0ChUIdQQK2RBnnoQSWfyMjDxZ2u4YT\nJ7zE4yXa7S5Go5LZWQflchOZTEAQDgpjh4bMtFpdNBo5KpXI6dN99PToCb5ziXzgQGa902hQ2l9F\nJzZQm80YVG2k7TI7a3uM9Q2QWE3cFwXTJKT49AX8l64RykpJ5DoojPfoPfc4m4EGOmWHo0fsrAW7\nlMoNJg4PIZEp2NnJMjzVQy5TJZdPk6vriCahmmvSrLURw3Xm+qsUtrcw9/XSO+NkasKKRBDItNtY\nLRpyFjU2m5pOu40oaeNf3CKbLjMm07L3xitE7m0yMDuK0mTEO32EZldGKhinKyq4dWWHaEmBkAzR\nrte4U2qifWwUt0tKIRzGNjHB2Jj1A9thHxTqhQLBa9fI1BT4ry6DTMmhc4cp7WooJpQIcjmTc73U\nUzFi8Rzmk73EUy3y+SBqtQyXS0exWOfatRDNZgedTkG322V/P3/fo+IH0xaFQp2nznkp+jco+bd4\n5nOHeffaPtlQmKHDQygMJu5u1JA0B+h9fBxFcgPJ4gJakxWJUYHrrI9ytUU5W0DRLDA47KGSLyCq\nRIL+e9BuEQ0WEGlRzJQJ7OWQd5vc+dbzDD/1OI4jpygk0qRXt5GWUxSCQQQxyiPnzrOVVVNqg8+r\nwK5pcevGJZQGAyOHRoj4g2QjMZSPXODqP9+lkMwhVVTZC99AojtoI5ZIJMzOOqhUmiSTB8XiTqeW\n6el/Waj4VwXLy8tMT09/6ONIJJL7bb6/ymRkczPNrVuR+yn1tbUUnU6X06d7f4QQtlodtrbStFqd\n91S2BarVFg5Nm3ff9dPr1jI+YWVnI8bueobYhp+x8X6WV9O88OIWs4e8JMNJulIZR+Z9DHjkdItS\nShYFBmkG/2tvImnV2Ljix9TXS/9j51ArZTQSYR4/34deKyUeLtDrs3D+rBerrIxK2iRw9y7Rm9dI\nXq9h7u9j5nOfQSqXUctmye7vE1tcRCKVonO5aFar1HI5qtksrXqd7Noa1XQaqUyGbWKCSjJJMJ1G\nEEXMg4N0u1329vKkU5X78xAKFchka3xUVX2NUolOu41Cr/+pJP1XiowADA2Z+fznJ7l7N0qz2aFY\nbNBotBkaMrO0lOC559axWFRMTNiw2dQ0mx0EQUK1UKIUiyHIZKgtFmQaDc1qlbEhA9VqldT6BsgU\nCA2BeqmMqFBSSSbJB4OMPPkI8Vf/gUoiSaeqwKq3EIjn4OYCobyBIw+5WFmOo7a5+Np/fJblpQj/\n/A8rSCVw/jMKZofVKO1Wvv31a1TiccxWLdaBHoK3F2iMH8U91kKtEtDo1YgqFTK1GqXRSC2XwzEx\nyvr1ZfK5KvbZXobG1DiGBxDaDdQOF2MOK3VDD1ev79LaXGT0xCyOcT21RpdYNIdloJ9c+qAauxSL\ncfutKqpJEa3DTrf9oyH8B43U+jr5QACJbQBRJmXXH0Vv8TPeL+LQuXDaVbQTeySzBdQOB21EoEWt\n1iISOagbymarCIKETqfLzk4ao1GF16vH789QLNYRReF+iiK6n8YhZklu7KFr5mktvY2PNoozU4Sz\nDXb3Q0hkCtZXY7RaLR4728fRU4/QSkVpO4col2pU9zYobG+ikXeRqnM8fGqEVrmC0KjQbbRRq6S0\n6m0GjkzhnR6hpjhHsSlH63DT3LhNPp1Fa9Yhs/pQ2Z0gU7C6lUawqlHVszRSMdq9HlyHDxPZSyI0\nQOnup+ehOS6/ukhqL0S72aTdaFCQSFi6NcHEoX4UioPiwMcf9xENpGiUihh1IkpJA1A90Pv8ccTy\n8jIXLlz4SMb64he/yJ/8yZ+wvb3N0L+wu+KXAd1ul+3tzP0mA7NZRavVIRIpsrubY2DAeH/xi8dL\nbGykePHFTeLxMqFQgUSixMSEDe1JK+OTLmqFEvVqA5lCjlytQm0xUxPVLC+t4HAb0Zq06I0qEukW\nxVyVF68vMXVogD6blI3vXKYeDeD0eZDqfOTiWaIbOwwNTmCyG8lsLPLYmJ7mlAV5u4ahvM/WUpip\nx09S3vcz+cjRA1VUqUglX8Q1O0tqJ0xhf59Wrcb+lSvYJiZIrqzQKJeRKhTk9/Zo1+skVlZoVSoI\noojn2DGqmQyZ7W3Mg4OIJgcdUQF8n4xYvE4SJZEP2/+5Va+TWF4m4/fTbbfRuly4Dh/+ief8ypER\nAK/XgCBI8PuzaLUKensNVKsNrlwJ0Wi02d7Osr2dZWrKzsiIBUGQvfLO2wAAIABJREFUIFPKkdtc\n5IsS9kJ5VGIet8NOPXGRZrGIyayiXixz6MknuL20ByY3jUqN/lE3KirsvvkWCpOFTF5KS8zQNzOD\nqVfPEV8PlVSQYKFNtiwlly6hVUl5+LidoalelPUUqy+tceJzT1GrtwnspKjVW9hnZrDPHabcVVKM\nNxBkSrRzQ4TuLOIYHcJz7Bh7l69SSVY49Ou/hlqnIB9L0iyVqdZaiEqR9Poq7vPPcO1iiNB6lFYb\nqoKWgQEzR04MkNxoIZHK0NgdxBcXEDs1PCOTZMUWpbyAOZLCNdT7oG/nfXxPqRZA1S7gG3GwtZHE\nfy+E6+wwuVtX0NXVZLNV7C4DtvEJwoXv6xN8j7iLooBUKtBsNBkwN4mub9BsGFGZXYRCJYrFBm63\nDpNJiUreQSZABxHL1BwF/xqdnXUUvhGkbaDT5sXntmlJRCTNGiPjTsx2N0anG+9wD7Jals3oFjIa\nbC2FqRUK+PIxRHsPntFetq8vYXWZmDs5Sy2boR3coCo1YPdaWLl0C7ndi1Kup7gXQ9MukA+FMJ77\nLMlyA0HWYeeNy+RDEZrKZxl76Dyp7ZeI+DP0Hz2E3Owgmzl4WXxP6l2Qy6lW6tTr7fvt8cX9ANnr\n12iWy+QApclE/yOPoLH/W4TkB7GyssIf/dEffSRjqdVqfvd3f5e/+qu/+kAPr18FKBRStFo5er2c\n27dj+P0Z5HIpuVyN06d78PnM75GTLLlcnVarQ7FYJ5Op0elAsdhAYTShbPUjdLe4dTuGQSugtZpR\negeoR9r0DdoplttsrCfxDVm5fDWEzSKnmytz859fR90tI43uIMgVyLQ6FFotbWMv+aaM6H6KyfkJ\ndmRtavEwNjOolDqKsRh6vQKpVIbG7WHx6/8N97ETuM89TTzTJHQ7ikrtYPRzs4hKGdndXUSVilou\nh9bjIbezQykeR+d203v6NOEbNyiEQnhPnAA4UHAOF0jkBAYefxJHwE98awet3YZ+aJxS/cO3Z8hs\nbxO9e/e+nUZma+u+4vSPw4MkIy7gJWAc0AA/+Up/wXC79bjdByZfzWabf/7nDVQqkclJO41Gm06n\ng8ulw2hUUK228O8WWEvoWL+xSi6ZI19sMjBk4+nf/Brpy6+i0GnptNuonAZOOXsoCTr0FgNmsiRv\nXMI2MU61UMaqUpArwcC4l1Y9ycar22xupNDq5IyfPUlekLOyHOWLXxgn8M7bbAbT7C1v09NvwWKU\nURrwYveaiW3uUC5UmBufRHd4mkBRzZU7OY7MWmjduYPe5UI5PIegzODrMbP0wiusXN0kuh2iW68y\nfmKCY7/xBW68tU5HpsI0fYhKQ0JNoSZZkWMcGMDdKBEMFVFr1Cg1Slwjo6xu5KiEAih0WvayCp79\nihWr9cHXiQBIBAG5Vks1naaezTA75EL+a7MEwmXU3j6e/H0PSf8uvUoRq6+f9ahA4z3PGZVKvP97\nsNk0KBQCqkqM5/7L81hNMhZfb+AbcTJ8+DjvZg9E8eaPO9EX9rn2NzdJB6MUBs24h7wYvR5i715D\nprPjMh/kbwWpjOOnBwkmmqz/4xZzc04W1pZxiVkGdDJie3EMdhMKKay8fJGBC+d59NwoKqVA36CT\n5RdfxWFVci+4T7MDzk8/Srhm5PY3b9OtlfBapZx5coaZz53m1mYL0e7F7DJTHPAiUyoo1ETiBSkd\n+yBKpYLdpg1nrInb56SYztJptZDK5aisNjw+FzrdQSFvq14ntrT0Pv+hWjZLYnWVgX8jI/fRbDbZ\n3NxkYmLiIxvzD/7gD5idneXP/uzP0Ot/NvflXxYUCnWUSpFisY5MJtBstqjXWzidWoxGJel0lZ2d\nHRYXYyQSFfr6DLRaXe7ciVGpHGgMpVIVnnlmhJrKy+AJB7H6CjqnntGJEf7p9RiHD7kZnvKwt5vF\n7dJRrrRRyqvkkxn6HVpuv7rIyENjnPz0M4SuvsPit59DIojIzFYmnn0G/2aSfCLD2JyPbFRHZnmB\nlWvXkSjUNEQt7WIWjUGL1uPF/eSneeG/v0MsEMfQP0A1V+ST/8uz+IYs7Fay9HSt2EbNVPY22Xnt\nNdKbm+g9HkSVCu/8POV4nC6gMFsIlA3sveqn0Wizv5+nt9fN8IVx0rkm4VyT01Mfrl1Dt9sls739\nfvtuoBSN/sTzHqSDVQZ4jJ/BHO/DhigKWK0qDAYFUqmESqWBRiOnWm3wwgsbpNMVQvtZ/MES7okh\nVFYHhWyJV/7xNpcu7hJumFEaTZTjceKRAst3A+wEStxaSJNtKAmGSgw89SnMvR469RqeIQ8GrZRG\nuUI8VgRBIB5IEF1aZtinQyJAt5hl9+46HQSsThPJ9U0G7BIOnRlHKWmglLaYmXWT2t1ja2ELURR4\n50qQb/6PVS4t1MjWZGTDMRKJGploksuvr4FMiWukH/NAP4GNIK2uFLleR0uuZWMrRyzTYT9cJZSR\nkMtW8YhJjk1pcbgNHD7/EFXU0KiiNBiQCFLisQLr66mfOr8fFSQSCdbxcUSVCrpdGokIveocZx8b\nolKHq6s1JL1TzD5zDu/kEA6HBoNBgcej4+GH+3A6D4q6ZDIpbruSnRsLtOoNTCYVuVydnc0Y6nKY\nuVkboihh1CulEQ8TChWRak0YLHruPff8QetfNkl2d4f8+iJzR3poNjuoNHJWFmPUSlUy+xEKqQJ+\nf46qoEdPiVIqSz5bJFeoIeisZG5f5sgAiLkgDrOMRLLK4lKMZLbFXqhCuiSBTgeFSk5DIieWl1KQ\nWllcTvLc3y/x3HMbjJ07xZHPPE6u3GZxJUu0osLQP4B/O8PVO1kOPzzJ7NnDWAcHsA75mH70IY6f\nGbof4m5WKjR/SAYeoJpO0261fuT4ryo2Nzfp6elBrf7oiHlPTw/nzp3jG9/4xkc25scB+XyNt97a\nZW0tRTRa4pvfXKJQaDA5aWN83EokUuDv//4eb7yxi9msQi4XcDk16NQSxsYstFodlEopHo+Ovb08\nNqeBeNNAWjfE6xsqXrmSobfHiMWq5ty5QdweA7FEBUEq4cR8D9lMBZVWycipo+znFJS7SgJrQdpy\nLSqjFrPHTiWVRqxlSGaaRN+9TnprkzvPvUJoM8T6pZt0SnkapRLtaoWRp58kngel2cL042fom/Ch\ntFp582KApY0CK6sZXvj6RfZDRbKRGMaBAcyjo8h1OiTvuaR75+cPOuIGptiNNmk2O0gkElwuLeFw\nkWrjQA5jft7LwMC/vOMxHwyye/Ei26+8Qmp9ndYHGId+DxKJ5H4R/PuO/xRV4gcZGam/9/fAIZFI\n0GrlvPbaDsVig3C4gFotMjXlYHJQA6F7yEyjlNIF7i2XKeUqOH392D1WcqkiiVic8SMnMZ9yE85I\ncRgkvHMtwX4gQK3Uw3hvP7feXmX26BF6Hn6UrtZMPhRjbSWCu8dF6k4QmVyklknRjAeZm3PTKqRR\nKKTo9Er6+ocohcOU717jC//bf8R/p0MxmiB27waFSh3zqfO88/oanbaCoREzOoPA+l6NwVEnlWiK\n6H4az+w4lYYU/70QDqeXsWNztJAy99hhbn5jm4ZERTqSx+E2YlB2WLvtx0aCPneUiSOHqLT0bC5d\nofO9cL4oRW21Eo+X3iez/qBh7O1Feu4cuUCAdrOJzOIkUlaia1TR6RTodArkcilGo4qzZweoVlso\nFNIf0aYxagWk3RZWqxrle0J6lUqLSCCJ2yfjoYfciJ0G168FKOUaNBot4ns1NCYjkm4bi12PqiUj\nLxEYcyjY8isP3DgVEuwOLWpVG2k1h8trwtxrpRiwsH/1Duq5XiY+8RSx5RVym+vU83lKHSUbqxEy\nhTa1jpR8qcvGRgpRrUFjNlAvlxk8PkmipeXSzTQKrZazT9oJxupYrWpuL/spBvapFCrMPvYQRqed\nT/w7K4l0DfNAD5891E8yUUSQyXH1Wu+LRQHI1GpkWi3NSuV986OyWJCKv5JZ3g/ER1W8+sP46le/\nyp/+6Z/y+7//+x/52A8KwWCeRKJykEpttrFYNGxvZzh1aoaXXtqiVmuTSJTpdrt0Ol3OHlIQu/U2\nzmqVx8dMzIwMc3u1yMyMg1qthShKeP75TXw+E4uLccbGrOwF8mSyVU6d6kUUBSYn7UxO2viHby9T\nTOZ4+OwgwaxAPJrn7qVlCh0tznEPJredbX+O6q0IZ37rMCqzEXm2gNJtxjPuo10p0ZUIaO02NEYt\nlUIJ79gEyaaLcCVI6m4eT7+D6QunufnOJp0ulGIxTCYl0lKawNWbyKkjUyrRejx0Gg30PT2MfOpT\niAoFe6EKrfb3Nw9yuYjXa8Dh0HLokPPnctDO7e+z99Zb91O5+f39A2PQY8d+7DnW0VEqiQSdH9iw\nGH6KhcGv/Nuk1Wqzt5fjO9/Z5vLlfYrFBlarCodDS73aQFrcot6WsBuu8cLza1QrDXRmA6H9Oicf\nsuF0S6mbh4kJbu4uJtncSOIbcTJ/dpjWi+9y/eIa01+ZRLq6jv/mKspqgolf/zyO4R6m9sM0pQoG\nR53UCgUkjTLFfIW+ATUuu4kj8wOUm3IkopSKxU3vmB5JvcSN//evaTWaCFIBqcFCeSvG0Gg/x6Z1\n5JbepbQWRebzIfZeYHUlwqPHnYSSO1y7uIZGbBLeiZLO9DD35ClatTrzj44hKlU0ml1senBZpET3\n48ydtiCpZkivruB5+DEcvh5qtT2kcjlapxOVyYTZrCKbrSKRSDCZPh5FjTq3G53bTbfb5eLFPba2\nvu8uG4uVkEgkPPxwHxKJBLX6g1tUPf1Wpg73UQgFkXQb9PfryWbrWPp62NopUijUGfVY0BlUlFJZ\ntBo5pVwGSbGKQqslubqMY3ISeVfOzKkeKi05FquKbNpCq1QimM5z4pCZZmCVVEuGxarn4S8+RR0F\n1hEfb/+XbzL58BEqsQi+Zz7LxZdXyOdr2LxO6rUGngEXdzdrdBVa5g4PsrJVJJKvY92vUSuVGJnt\n48vPOtl44TnEbIlH50fRDY1yZzXPc89v4HTqefiRXqxWDXqTGr3tg0O3okKBc3aW4NWr91M1KrMZ\n2+Tk/e90Wi2yOzsHoVmJBPPQECaf72Ot9viLxvLyMlNTUx/5uBcuXOB3fud3WFtbY3x8/CMf/0Gg\nUDjYlddqLRKJMnL5gQAjSNjby2EwKHE6NUQiRXpNDdZfuUJoL4XXq0ciyXB4VsrYzDR3F1P09Bh4\n441d4ODdcOSIi2q1hU4n59TJXna2UxQyJdaWw0QjBWbnPCTCShpNkFsc2FodlGaBcLGGtu1g8Z0w\nxVwZk1XHdqjBkMPCbjxGdiGCUuZEEBLoxDqZSIq6VEv/mccptGBhIc7WegxBriR1J0SxreChk2Mk\nd/eJ+UMc/dIj7N1ZAUGkVckj12holsvYJifpmZ8n5/eTCwRomvpoFnKIuu8X8EokB3YoPw8RAUhv\nbt4nIgB0u2S3t7GOjqI0fLCul8nno9tuH5zbbGIaGMAyNvYTx/lYk5E//MM/xGg8CCmNjY1x4sQJ\n+vv7Adjb2wP4n/4slZpIJssolSUeecSA399lczNDs5nGqtWQuxfHc+wEb1xZYGxcwepym3ymxOC4\nGrlZxDncS6YIb715l8BuhtW7eW7dCPHwYzaGZq3EghliiSq64ycwmVW4OkX8V25gOHWKll5BO99k\noF9HS2XEOjZCO9eiGvKTs/tQjw2Suemnki/S89AQ1uFBEqEkaqeLbCmDrceCx2IlYzbhmVCw9vpL\nRC8v0m63UYWjVGoZTs49RSzdQDCITB23U8t0kKsUOMas7EQzjIyPUN0OYnVJMBrVNCJZNm8HeegR\nD9V2CRkHi008FWPosAPBYKNWa9Nopul2c4Dlvd1ICpdLy/nzR1AqxZ/7fvwiUSo17tuK/yCi0SLV\navO+VkajVKKWP9AAUFsstBsNIjdvMnpkiEY6zt7qLuMjTgx9A9A7SWKnwtxcL3c30xy6cIL+vjUa\n+TxauR6zY5ZqOolUFKkXCqi9LurVJuZmmF7bMM0JE9/+rwt86ulBtl57E6ddRbYNBquRSEpKyTiI\nPFrHPjlNoyNl+jOfRtE3wqO//Swb11cpFusMDHnpm+gn3s6j0KkwDZqJLq5itusIhTIMDlpJ+AOE\nvSUygSB7m1HahTQ+k5nnvrVGS5BTq7XIxZPYbBrOXRgmna5QqTRRKmXYbO93XzYNDCDX6aimUkgE\nAa3TieIHahSS6+uEb9y4311VjEToNJvYPsL6iQeNlZUVvvSlL33k40qlUr70pS/xN3/zN/zlX/7l\nRz7+g8CBIvSB8rFEcqAdMj1tx2xWMTpqobfXiFQqYLOpkZYTmIf7MY6M43GpyYeibC9u49b14bCr\nkcuE++Smr8/AyZO91OstwuE8l797h2SyTKPcZXbGickoZ3DIxJHDDvp7NMRjBfbrTroWFUc+fYHF\ny6u02x0MFgMT505QU9l49fVdbEKDZKjIeL+DbCZPC5FmJYvZYifZMVGVqDC5MoyfnScTzZCLRCnl\nq/QNOnjpH59DpVORCwbJBSKc+cTD5Fbv0mk2EUQRx8wMhWCQ3HvvT3kHXFolkWIeud74PguSnxc/\nHBWFgzWh/RNSNYJUinVsDPPwMN1u92eKon5cyMgHxvh/UpX49xaxn/ezRmNjfz9HNhVnZztBLivn\nxZf26e83cvSoG78/w68/1UMoESaTqZCJtSmVGsxM2RAUKhodAY3ShEKtoJqqEt6pU6so0dtklEp1\nlhbK2M1WLjx7iHqzyaWXd+h1a1DVExyem8ZqMNPzud8kuJOgkC6hpEMl3yAdiDJ85BhL9zLorW7c\n50Yx60UUWjV6rQz/mxcZf2weoZondW+VRrHAQ58eoxoJk3x3lXyhgQQwFMs0N7bwnDlPU2ai1LTS\nVOlpmevUkVLc6zB+2MDf/d0yc4e8JBJmbt5MYVe3GRx1cWTMDakAXQ66J4YmJxkXRUZGSiSTFQSh\nl2Sywu5u7j2VTiPRKGxvp5macvxP35+fF8VinWDwQFjOZFJit2vodLooFCLN5sE9VKlk7+2kDuSS\no7du0ahUkKlUGPr60DgclNNpBLmK4595jNkLNbrtJo7pSQJJMLtrvPrqDuVyg7bFjtQpR+eto7dq\nGZryEPvut7GOjSHXaFB4HCy8/hZ6nY5KoURyO8y//+oJLEKOjl1JJpFF0lSisZrRq2BozklB0NI2\ne1G69CzudigsL6JyD6Kes+DSy4jnBf76b9f59V+fJmZWoFbL8A1aKVeatLoyZHKR4FKIzhkzDoeG\n3W0BtUHL7e9cpa/HSyTVoVXIUapJWL2XxuUxcOdOjGq1hUolMjZm5dAh1/s8njRWKxrrj+rJtOp1\n0hsb72vz7rbbpNbXMQ0NIcrlP3LOLyMeVJoG4Mtf/jIXLlzgL/7iL5D+kkejGpUKHqeS6Wk76+tp\n7HYNpVKd8XEbgiBhfr6HVKpKNlthfr4XfVfH5e8uEQmGUShkTB32cvrCNHKnktArG8S6GkSp4n6U\ndGMjiVIp4rKruLge4PHfeJhvf3uVcrGC063lnUt7IEiZP+Fh4tAA4fAyLz6/xtlHexi+cI5uo4bK\nZGIvCXv3kmQyFTxHzMgUCe5c38HcN4B31oNBbHDz0j36jDlW9tK0qiWKqSK+6T6k43aK0RgaeRvX\n+AjFTB65Wo1GLVLa36P39OkD3Q5BwNDbS+jq1e/PTzbNsNXK4LgHiemgEN3l0qFU/vxLvbGvj3Is\n9r5jSrMZpcn0U8/9l0RHHyQZEYHvArPAK8CfAO9+FANns1Vu3AhhkuRZefEy/o0oLp+H//Bb43z9\n7wP09xt45JE+nD43lo6PzWiXh455eefNbTLRNKJGRwcJhw+NYu1EWA4kKKfSFNIFjE4PolyF3W3E\n7jIw0G/k//vfX8BsVFCKRSm222wm5Ez/2iTPf+Ma4XARmdHEcI8Kl03HxNRRotESerMOlc3Bu/ey\nFApZHj7Tx7BJT0PnJLiwSl+PHoXRTK1apezfRK1ToRLb2J1aVAoRrVqgUcxDrYDDZAFByu5+iVis\nhLRRRK+Vo1Yfxu3SceXKPmq1jE99apSRYRMWIU89sE5LFFGZzXiOHr3PbB0OLSYN5LJltrdrtNsH\nTVAajQyVUiSVqn4Ut/ADUak0uHQpQDhcBEAqldDTo0cQJNy7l8BsVtPfb2Biwo4oSqkVCkRv36Yt\nU1NSmanWWlCBVKDA4o6UUiGPTV9gxKdDJW1SLlRJpeD11/3Uay1mZ2z81//7Gul4AW+/lbFhKYl0\ngCeeeJpK6IDIiSqRvkE7Kxdv89DMKHKjmVtvLTHdJyUSSCFVKFCajJRrQDmNU9eiHVjjkU8cJl+T\n8nf/12t4+ix4hjzkNVquLWfwDRiZP+pAJ29yayuEXienXqlRzpQYGzVjt6swzLrxONQEFwtMTdrR\n6DRISy1sRgOlRol6qYDO5KDZ6hAMFqhWD3K71WqLlZUELpcOr/end2h0Wq335YW/h/Z7du2/CigW\ni8RisQem9zExMYHX6+W1117jySeffCDX8GGjXigQX1qiEA4jlcnoHx7G95SPUrmXcrlBJFJCJhPo\ndqFcbhAI5FGr5cg7VVr1Oi6nlkazzfbtDfrdM/S0S0grWRpI6LQlTIzbqdfbXLwYoNvt8plPj/Bb\n/+t5bl4LcGJGj9pk4JVX1zFbdUzMeAgurVMIannqqUEUCikStRLjgIu713dop4ps75Yw2Q34Bq1U\ny1VKxSrlGgTe3aNabeGVJejWW0TCedpd7YGhpT/H4u0wR4846Os3Y9SJzH/qFHTaDPRo2fynIl2F\nFFF94J+l0OuRKZU/0jLbyKQwGvUMzsz9QubePDxMLZ8nHwjQbbd/ZE34ReFBkpEW8EDkA0OhAspW\nkb0rl9i66yebrRLcinC8WeGZJ47QFuR84fNjNAolpP0jTDrrtO6GeOrTM+wGyxSyJeaPudAXtnn1\n//g/Gf3Sf+CORiC6kaKSyWHy+Tg23c/8MScbK2EGBs3E1raRGDRIdFYqGgehQJ7121uYvG586jxe\nnYTM0l1SlwvItRpszn62w2pW7yVxO1XU6m3uLceI5TSc+Mx5ohffoJDM4vY52VrYZuqxE/SNeSln\nCihkHUq5KtpBH4W2ilwgytyUGblCjnoziVph5dCcnXCkwMZ6ip29AodmrJR3t9jeyWCYH8Z5+DBK\noxGlXn9/d9tqNEgsLZHZ3qaFiF1Q4/GY6MoUtMt5wsv3wGEi1w/GX1Ck41+CSKREJFK8/9loVHL7\ndpRKpUmt1qJUaiCXC8zPH3iH1PN5Wgod1+5kCfn9SAQJo8fGiSbTZLf9ZP1+up0OkUODPDzvYHsx\nTSDRIZWqoOxWefv1NDarBqHdQNJuIBMl3Ly5z9lHe9m58i71TBKVToPa4eTMM8eQUuDwvA+vTeDE\nyX4OPzpFp9VGpVXSyOcILNyjmEjhf+0N0qtODn3pNzh5po+Mf4/QxTcwD43y+NkRxoe0bL31DrE7\nIieODFFrtBme6mFzK0MxW8Lm1HP83HHqS5cxuuxIIjEUYp0TnzjFty5WaNbbKIwWZGolY4M6gpsh\nUOpp1mq063UqCOztZel2u5jNKjSaHx/dkGs06Nxu6vn8+47rvV5kqo9HDdGHjdXVVcbHxx9oVOIr\nX/kKf/3Xf/1LSUa6nQ7hmzepptM0ZVpa3S7ptTWcSiW+4QP5romJDktLcarVFt1ul8OHXWg0Mt54\nI8pEbx+ScgZJuYpM0JOriniS+5w/76NhGSSZa3F7Mc0//tMGI6NWvF49e3s5Rkd8xDcv4nTrMQ25\n6e01Uy1VyITi5PcDtKoVJvqknHQk8J0+TiYUxakqced2hF67gfmzk2gtBl7+26u0pUqarQ4Op4HR\nUQuL/3iNuSdOEVPoGLJKmRrT4raOkco28fabmB2QInaraBol/CsBbq+2mf/cZ8nt7hC6+S5yhQzj\nwABapxOl6aCT83uQCALGn1As2u12aTebdBCQy386BZCr1fSdOUNlYoJuq4XSbP5QIp4flzTNR4p2\nu0MtGaNaKqPXK6jVWkilAplQnNmjAo4hF9e+u8DbLy/QbLSYPTnOqSdmyeSaOPrKdLIJFJlt9lZ2\nqGaydLZv89lPHuN6r4lUrMDMyVG8LgU2q4agyURFMOCaP00uWyWdqyPNtBE1Guz9XsaHNMikcO+d\nO9x4+TquXhs6gxb3cAfHgJJjx71cfnODTruDzarkzKyaUjCEXKPEOneYlkJJrxcygQAzv/ZJ9t+5\nTDWTweAbouexJ/jP//kGfWM9CFoTg0N2PG4N4UCKeKKMQqMmk6mikndRZ7a4/dY9jj89z/XbaSIv\nBZg+PcPMMR/9/Qc/vO8J2ci1WiTNClsvv4wgVyJTyuiojZQEI6n9KJJylrlfexrDh2wY9sMolxvv\na20XBAmbm2kGBkz09Hy/0CoSKWKxqJEqFESzEoLbBw+y0XzQ6pevdGnnv09q9rejVJ84wt69ML0D\ndtQ9JWIbfiZ8AwRyGq5F4jRLDVpVLYcfnWX/1l2Wrq6hUsvQ6+voM1k6opLeRx5Ffe8eNkmZYlzD\nwms3CK0eFH3OPHaCsc98lvDqJkazDrPXRm5lgdzmBusLAaStOrUry3z1P/0JjZAfpc3JpUv77PzD\ndxgYdfLMFx/moUkN/vUKY3M2hFqaQqFOfDuCc9CLZWwM9fg4h6o5zC4LrXKZx875GOlXsfBWAIXF\nRiZVJpOu0AVmJsws3inTaEs4daoHh+PHe1k4Z2dpNxoUIxEA9B4PjgeUsngQWFlZYfIHCnofBD7/\n+c/zx3/8x2SzWUw/Q/j8XxOqmQyVBqxG5QT34kgkEoZGbSjjKSzvkZFotMQrr2zzyit+KpUWer2C\nL395BhDItTTozHpQ16hGQ4gqJTsZJbe/s4RruonF18/i3SiPnRtgbS3F66/v4HBosFg0nP30Ca6+\n46exneK1l+9htuo4fXaYrs5OKpMArQm1KkIjuElueQMHSj7unkTOAAAgAElEQVT9ZA/VepvO/grW\nwYd56jOH2FjcRT7joM8poxLwc+SpUzhPnKLhT9PnVLCxEiK0l2Fg3MtMHyTfvUL45i3kGg2uYydQ\narXEd/ZpJlPILC4MJhXdVovI7dv0zM+TUSiopFIIMhmW4WGMAwMfOJf5YJDdd+8S3I4hN9twTE3i\nGfJgs/1kt3KJRPKBadpfJP7VkZF2u0O53ECpFH8mVvdBsNk0RKUSGvU2PV491WqTTqeLzaZmcNhM\noV7n+W9codPuIEgFVhbDNKVKTp+fQKOV0yHJ+ht36JkaIm3Vk9veQBrc5dHpWZRH+7FM9rK3k+Ta\nZT9Gtx2T28qtazvsrQVRmG1MTrtR1jJMeTuo6gkMQ2MsfSeL2WmGRo3Ebppms8HRHivhPZHN5SAG\no5Ixr0Dy+rtUAtsopV06UiljT15g9+YSxWCAxHWRvvmj2A4fA42exG6Q80+McPVGjNPPTrEbqpBJ\nVyhXO7jcesYn7cTCORRdCaEba4w8NM6tu0lqxQDVaov9UBl/uMEXvjCN06kl6/dDt4uoVLJ79SpK\nakhlItHtINFElaNf+CzBHNxbjmAb2fjIyYjZrEIUBVqtDhLJ9zV3ftjUrVY7SCuoLRaKTdlBxVL3\n4IHrClLqzQZytRaNQ0KzXEaqkCGIcuw6SF6/iH9pl2QkQzm0j75/kJOn+tla3qeSy2NWd9i4HkBQ\n6ygXMrSrFSQ6ECNRoguLbF+5zcmv/Xsuff15tq4voDBZ6YpK3n7hBiq3l3trKYYmjuA9NsvGWgSJ\nT8OIYwx5JYFeI8DOHdLBMJEUPHx0hJFBPRv34vzT19/h/GdPYPVYCS+u8fJ/+u/0jXhw2BxEi1I8\nNiftVJi5mV76ejR0mw0cdiWdZoOxo2O89eYum9sZCvkaR4568W/GmZh2sbbXYGEhxoULgwe2CNUm\nrVYHrVZ+v8hVodczcPYs1UwGJBJUZvPHptX7o8Dq6uoD6aT5QZjNZh5//HG+9a1v8Xu/93sP9Fp+\n0ZBIpazv1li6uXf/2M1kAY3ZyCAHO/1795J0Ot9XUS4UaiwsxHnssX5u3YrQaIi029DX7wCk/MN/\nu4ZaLSOdusNTkyNMTDtYWU1y61YUq1VNPl8nsJ/D7XTj3ysypdcxPOogkW2yvpZEQQ29Qcm7byxz\n9ISXzNYCybs3URv1VEKbdA0O0hUZ6t4I40M6VNtBZAoRpUZHY8hFQ6rFPuLEYFRxbzPP4kaFeqbE\nqTNy9t54i9CVS8Tv3EGh05Ld3eXI177GzvWrGKwmrMM+WoUsAK1KhWalgu/CBWr5PKJc/mMdeUvx\nOKsvvcLty5tUSjWkcgXZSJJc+VGOHO/7sQaDHxX+VZGRaLTI3bux99x1RaanHQwO/nQ1uU6ny95e\nDr8/Q7cLw8NmBmaGiK+s0qxU7vdfe0d7GZzu53/8P2/Rea8WQmezEk21CH1nFb3VyPWbcZ54xMXw\nw8eJLK1gGx9FpENqa5PtS1d46GuTLCzGuPvWAiaPA1bC+MYGUCkGcXuMTM84OTKuIXTnLq29VbLF\nPPV6m+FBI+vFHOGNEKIopZzOUa80mBnTEQq4GfHp6FNnufW3b2PUieTzdUxmFbFbt3AMDdFKhKjk\ncuxcvIznyByuY8cpRSLIpTFOTtkwmpRUtorI5SJPnTajqcXI3PAz7bSgtVtZ3lWh0BvYfWMJq+3g\nx9xuNtjezhAI5HA6tUhl7y3q3S61bBapVIIgCAgSCXq1jGYug1KjppTJk8/+aBfLhw2XS8fUlI31\n9TS12oF+yPS0A7VaRiZTJZutolBIOXXqgCQJUimDcyPshyvUcjm6ajUjEwOUF2KInRyFdBy9Xsn4\n6UM06k1cTg2hS1Ek3Q7ybo16ocCopox5RkM1peKRTx0lFs4RjldRaaw4HQaEchqpKGAfHyMZiqFQ\niIhyGY1WF/f4MPsbQbLxfSx9PaT2QpgsZnKigStvrXLrxSsIShX6Ph9zxydxVjeJrqwid/Sj6MZZ\n+9bf4hzzMWa3gaefTEmCx6Vl6YVX6AoyQv4otYzI2OFh9t+5hEylounMk2jqyQWC5ORFNJIyY//u\n84TidTqAViOjWqpx8aIfb7+FsTEL8k6VwM077IVr7AYryPRGegZszM05MBoPUjESQUD9Ie+cPq5Y\nXV39WBjWfeUrX+HP//zPf+nISFNQEc/8UF2SBKLZA/VsOBBCM5tVjI3ZuHs3SqHQYGMjxYULPvR6\nBclkmU4H5masvPrNi8j0RhRCjZYA+UiYk/P9rN1LMjJixunU4nbrkMtlLCwlUZlMBMJlnv3cDIlE\nlcBOikOzfXSKGWKbe4hiHyp3H4axKplQlO07O8g1MXpPzqPpFtm/e5DSyW4FKYb2kSqV9D/7eaKb\n++T9m8Tu7DHa08PIhVkcmgaXnn8BahXMfV7azRb7V2/gPnYMpUZN8J2LGOxGFHo9jWIRJBJEpRJB\nKkVt/slrYdbvJ7i8RWI7AN0ugkxGqNvGPjtHImH9NzLys6JYrHPlyj6ZTA04aNu8ciX4ns35T25b\n2txMc/Vq8L7bajCY5+zZfp742qeJLCwjadYwuOw4ZmeR6nToTQf/TypK6YgKYuEwvhEHglREoRC5\nuxDlwqOH0CscWM0KhGKCvkceoWHuZzstZT8U58jjx2jmM6C3kkhVcNjUmEwq5DKBzPYm6y++QiWV\nwuU1Ud7YxNLrweXWs3+vi0whYvXasY+NQENkaNCCt9+GshRDa9RQyhcoFhtIZCLpSBLd+Cxyg4lK\nQ0LvM88SzCm4+N003aqXYZ8Os6JKem+flZUix4YFvvEXL+JxanA51Wg0+8jq/fgmByjSRa0SaFZr\niColGpuNQqV1/4E3Dw9TjEZBEJCp1TQrFWR6HTK9AYoplEYDzb0MUlGKoefn86zJ52vs7ubIZKrY\nbGr6+40/80MiigJHj3ro7zdRqzXRaORMTFh59dUdAoEcKpWMiQkbW1sZ1GoZAwMmhkZsROMjRCJF\nul2w2HVceELH2oL8QK/A52brXpC9rRgDPWryhQZOlxaFwo1e3qays8nwsSmefmaUrkFDMVDj+LOP\nsvHaW9TEDjqZgNpmo24aIJdSMva5C9xczLIaEWk3NYyceRTVyl3y0STu4R62dksUCnlktTISmQwa\nVUrBXeTzfZQqbaR9Mxh7XWzcWCYdiiNpN2lq81jlGjpmF5lolUIqT7nSxtujY+70EOsvfYeaQ4dj\ndoblW28z/ZCPfDrHwvI6IyMWVBshrr21gdGkpJiuYnMZ6BlysBus4i3uYlXVSWgNXHp5iUa1jsJo\noFQeo15vcf687+fWL/hlwerq6gNP0wA8/vjjfPWrX2V9fZ2xn6Lp8K8JglTA0OOhWKhRzeYQpFI0\nTgcaiwmJRIIoCrhcOlKpCkajkpERC5lMlf5+I/v7OdrtLtevh+9HwCMlJeaxCVSNNH0zI4TTHURL\ng6eeHmF9PUUsXsHvz1Eo1HniCR+5YptmrcrOTo4u4O01Mjxq5drrMY5+4jQVqci9WINoVIdWq6P/\nlJPVN69xdm6Ixcv3OPPUHJFKi/V8AdRTjE67Uej03Pq7FxA6LfaXwwwU4gRTm3h+42nqxRKtchGp\nVoZMrUaQSpDSQaGVY+71kNnexjs/D8UiGrsdrdP5M81jOZ2mUaneDxl3mk0qqRSdVvO+8/GDxL8a\nMpJOV8lma+87Vqu1iEZLH0hGut0usViJfL7GlStBcqki0m4DQRRRaHXcuRPlk58cxT7so12rIdNo\n7oeWj5weZfmmn3gwRb3RRquVc+zMMN1WA4VCYH09jdCoktrbZ8Bn4+iZYZptCUvLOdK5JhuLEQL/\nP3lvFuTYfV55/u692Pd9SwC5Z1aute8LWdyKorhKdlvulmWpoy33jMPu1kzERMf4aSIc9uM45sER\nE+2wrbEdHssamRJpkRTXWsjal8yq3PcEkNj3HbjAnYcki6JJmrQsm8WZ84REAsh/4n9xce73ne+c\nzSIT037e/uE9TF43x48GMZpUqDs1rGKN6socVq+TSqGGx90mHS8w+cyTiK4QHURGjk3RFHScf2eL\njZ0WuUSOhydFHL1BiuvrKArUi2UsoSO0Wh10Hh/W3mkWd1T87V9cRBa0yK02qWPDTA6bGQw3GNvj\nJD77Hvl0BQmFgQEbSrtFdWOFia8+Tr3ZZXUpRTmTw+S1YTAbMJuN92fU7f39KN0uxViMnqNHKW5v\no7LYKct6nENDSCYrenOTwPQYntEhqtUWjcaugdDnbamdP7913xtkdTVHNFpi/34ftVobjUaF12u8\nn5j78+h0uhQKDdRqEY/nw/6nyaRhcNCOy7XrnfGBUdL8fJpw2IrFouXs2T4SiSrd7u5kycZqAou6\nzcixEW7PZtBY7LsnB40GSZERuh2c2jqhQR9d2Yze5WZ9W+HCq7eR1SaSDj0Tp87i0dUQ5CaZksJr\nr63hcJuY+8k6UreF3uVh6b07RCN5zj21j9BwBntfmPiFC0wcG+fW61sYPD7alQrNWgOTy0FNNcHF\n92Io791Gq+ph5LkhWlsLNPVOWpUSvQ6BOzNZRo5OUkhlcQdd0Kggdlp4BsN0dFZcziLxG9foPXKM\nzLoe7+ggSlfA5zUwEtJAMUVN0nD7wjbBPb3MLM7Q4zczeWoSQa2BepNWqUSjVCaZNLxPGv/pfvP/\nl1EoFCiVSoTDX3xgpEql4pvf/Cbf//73+aM/+qMvejm/NJhMGkbH/dRbAu1mC0EQ0ejU7Bnz3B8/\nHx93sbVVIJGoUKu16euzcfx4iKWlNO22QixWolBoYjSqGRzyUUnG8Q2GefHFZZaXUzz5wgHiiTr7\nDvYgdxSuX48BMDho59KlbRqymnhGpq/PysSkh+s3IuQrAsWGmp/89QydegW/306x0sQx2cvD/+Mo\nstlHeuc22YaOl384Q3ZtHUkSSRdkzvWPUkmksHgcBPs96IQay+/eYP9zj9N/+jiRt99CFAQklUTP\n3gmcw0NU02lso2MIChjcbrRmM5JazeY772ANh3GNjqLS6T71fdQYjdgcRnQGDY3arkeIPeClI+ke\niHyxLw0Z+bQW9Kfdv7CQ4fr1GCaTmtnrG6SjafZOudB069TKJszmAdrtLlqtBukf9dgm9of5rf/2\nHPfubFNtgNLpQqNCdD1Go6ZBlqF/yE2rJXPxVoqaNguiGlFQKMUTGC16rl/bpokWUFhf2MHt1PHC\nM0P83f/2f/G13ziGSiNRz6SwOi00o118U9MYrCb8B/Zi1ikohRR3VluYLDYsRZlWo0Hb0o9tuIZc\nyJOPLxGemsB16DiFtgG73Ua5YmDtVhatw4WcL9MRVUS3C/hcGkYGzPgtbSoOiVW9Bq1WhdWig66a\ndq1CIVdh/NgenhLVXHprhXgkgyq2ztf/06MMDOyW/wRRxDk8jGNwEPnoUSrxONV0mh60lBQjhVyN\ngTENA2M9JFJ15ua2abU6WK06Dh7009Pz2aOiyeRH2zs3buzQbndJJitIkvj+SSb4ER1INlvjxo0d\n0ukakiQwOGhn714fWq3qffJRo1L5qEFPrbarfZAkEYNBw8CAhky6yqUL60RuzVFOZ6nvH2JxPke1\nCRJdmuiZ/so52oktVBUtrWoN0dPLjcsbxPIChwbVmFUFJEMLyenHMTTB/EyEaxd3rZT7Jvq4sRDF\nrBfwGlUcfPYslVwJ2/Agx471cOVanP1nJgh4dLxTk2lW6mjNVtCYUSwe3n1jnYXZKE6jTGVnh1a7\nl8cffYjkyiZ6k0Qnt4PPJjF2YD9ep5r01g42S5t9Zw9iDPWyvV3CbNLQ7toxB3s5+huDODRNBG2T\nZ88FOf+XL2MLh7l8aY5WuQpuEZvDQi6RZSdeQ6dW6BoNuyO7yq4u5/9H0pBPxNzcHOPj44ifkbvx\nb4Xf/M3f5Mknn+QP/uAPvrSeI7lcjVari82mu++PMT3tRa2WWF/PIUkiw8NOhoc/bEs4HAYeeqgX\ntVoimayQzzd48cVFZmYS/MZvTPPIIwOsreUwGFScfWyIcsbJwnyafKHBvkN9lPJ1drbS9A04mZry\n0mp2MZk1vPzyCmNjbnp6zHQ6CqOjDpYXU1x5b4tf+8YUr76+QrHapZ6pkM9UEDQaLD4PtmELt5Zb\n7Dk2wcytCJJWg8XrArmF1mIkkaoTmBgiOruM02dHrzGTFrtkNiP0PXwWh89JemEB18gwlmCIer3N\n6mKKYk1h8itnyUg+jEqU2vsOp4k7d8iOjuIaG0NrtyNXq2QWF9FaLLj27MHs96Oz2Rg4fhC1xcH2\nehKt1UbP4UMMTQYfCPfsLw0ZcbkMuFwG0ukP3eCMRvUnOstVKi3u3k3SbHYQ5Ca9AS0Oi5+WpKbR\nAq9RSzigw2T65PEkSRIZm+phZNxPdCPJlUubXJmt0umIqFUCx073Uap3ee3VZWqVJsE+N6WmCr/f\nBM0KzkAfwUoXp0vP6KCJlY0SzXqTcq6A160jl28x/eyTJK9fpVWt4AgH0GoEzLouA06Z7Nw9FI2O\nPeP96DMiSreL0IJMpoZg9jP9qy/g2ruB2hng1obCeqrFb37rKOl3t6hpujS6aUpVGUQJuS2jN2gR\n9QbWbt5hasTNoQMurDYjlWIVlVriyEMTeCcCbL/2MtWVVY4E+mHEg5o2jTvvkNsXpljfjea2WnUE\nAmYa+SL5dBFRY8QR8BF22qlU2uj1KpLJCtev75IIgHq9wuXLEZ58cvhT3/MP8PPTMB/4BQwNOZCk\nXWHq2lqOnh4zo6O7+gRZ7nLtWoxIpHT/eTMzSUwmDePjHkwmDU6n/mNkxOs1odXuHv71XI7sygr5\nukji3jz1hkw6LzNlsZAupEjGS3g9RpKZGu0hD+GxPiyWEQwOB3//F+/gGdLSY66QvHad69tFGl0N\neouBp7/3LRI5mfDBfdQqdUSjBUVroqVV0VWXidyawe6xYTarMVkMHBw3onU4KSZzfO1bp3jtR9fZ\nidc4dHaSQqGBORDg7FeM2EwiK7dM5PINDL4gjpWbYBzkvZeuEJlb40ZPgP1PnmTk1AGCFpmVVxKo\nGgV8PQ5SmQZdi5dkCW6fv41aUvj6rx+kFtsk4JRw9Dmx3k2hMhioRjaxBIMIgoDWoEdQeWmmStTT\nSdrVGl63Dofj811RdbsKkUiR7e0ioijQ22v7XD4mDzoehEman8fk5CR+v58333yTJ5544otezj8L\nrZbM7dsJVldztNtdLBYtR470EAxa0GpV7NvnY3LSgyDwia1Bl8uIouxWNSKR0v1zRrHYpNtVePTR\nfiwWDZIkMjTeQ77cZWzSSyaWQ2/UUGvu2j5MT3vp7bPw5392B7tDz9pqjvFxF+NjLpYWM2yuZhEB\no82MWq0mmarisDnQim1y6RItRY0lPEBjJsPex4+Q/NE1DC4XeqOW6WN7UDfzaKsxwodH0UoKCxdv\nMHF0jFPPncKkbhO98A6B/XtxT+/F4vNSL9e4+splojs1glN7yLbNXP3by0yNOxiwmEnMzNAsFjF6\nPGycv0BqbgH/gX2Y3G7y6+uUd3YYfPxxbOEwpWiUvv2jDBzfT1elw+Zz4xv2/ttv9ifgS0NGjEYN\np06FuXcvRTpdw2zWMDHhweP5uHK4Wm3dN3HKJfMMDzt5+dUtlha3cVoELFYtgT4vnU73n+x3z82l\nefPFG0hqFT6vAb1BzdSBEO9d3OSVN2NYAn6sShej047fbmRjPY/T48WoaWOqbtNn0aMqxjkz3c9S\nRr9rzTvsIbqwzvBIkOApFbVMDu/BA8iCjlKhQn72NqVoFJXJxPzbi3StfpqGAWqKmVoeXH4n7nEH\n+kCIZF1H526SXr+KV19dpqfXiVZbRmWx49HrqJfKOHx2BibD2ANWwv1u0Aocf/IwpfUVmoUc/vEJ\nDH2jFCMR6oUCq2++g6h6D1lUAyKDZ0+RTRZY227SbHdpygJhc43E9SskI1mM6jZWu4mJpx7FM9SH\n1uZlZ6d8n4h8gHy+QS5X/0wyshsHvvvcZrODViuh16spl3fJhKJAMlm9T0by+TqZzC5BbVWrtOt1\nREliZVnP+LgHSRKZmHADH0zRCGg0AlNTHgDa9Trbs0vEYiUqshZf2Es9O4/T76DWkBkesNGo1DHo\nREasJdZfvkzHI7J4c5l9x4c5dfZRGmiZeeUOO7ECiXgFk9NOJVukuLqMqPhJVwS2ozKpWgGtSU+9\n1qbt9mIcEnEPeBiZHmBtIcL6dpVuM0Wr3UXQ6Pj13/kK1UqTVqNJo9PAI0dJrawja1VMTQZQuXrQ\n6yX0E+MYeodIyHZqxQoGuwm714mhXaCaKOGbnKQjd2gUFDRmDWZvD++9fJ5iIoO9N0SurBA0C6Sq\nMYxNPz1eLZGZeUSrFY3ZTFNjw2DWYhINRLfyOPp6GenV028u0W23ELWfrelZXMxw9Wr0/t6uruY4\nfTp8v+r2ZcWDohf5eXzgOfJlIyNbW0Xu3k3d1y9kMjWuXInw1FPDtFodEokqnU4Xj8f4kdagLHdI\np6s0Gh0OHw5QqbT4yU+WCIWsHDvWcz91OxIpkU5XaTY72Gw6xsZcmN1OrG4nxVITXw+YTCpUEmTT\nVZ766jDpVAWPx0Rvn5U7txP0BCwEQzZWZtfJp0sMDlh559U5BMWEO+DEFDAzOh1C0OrYO+GmXJHx\n9/vJxLIMjrioLd4iMrfM8P5hkjs1PDY7/d95CrVOjbWvn4WXX6MQi6M2W/G5Q9RqbfLZCnXnCB05\nwN1oG31xDaNZT2SnymCPnVomg2t0lJU330ERJXbmVpAFDb7pCZx9QdqFPIWtLXoOH6b39GlKsRhy\nvY7R68XS0/NFbffH8KUhI7A7kvvww300mx00GglR/OQascmkwWhU02p1MFoMrK5kKKZz7N/vw6hu\nI7WqRLeyZDK1T/VPKJfq3Lm2Ti1fIr+TpFSREQ1WOnKXfQf93FssEG0phIJmxqcCtNsdNjYKiBo9\njeQWI4MW1Jl1VueitG5vcugbz6M2WRF8w+yfVlFfvUu11iZ48hTFZIqNK5fo3TtKPZ7E4HJSb3QJ\n9xmJZRtMTvn4/g+36OuzYXWYWFop0h/Q0o1EuPbiBXLFLuG9I+zd9zDPPz/G8eNhZu7E0etVDIYN\nrF69w42flnj6iTBL71zG+tgh9BNjFHcStCQDGquNrs2BTedmtC0SuXKVUjyFIIJvbJTc0iL52Q06\nXRg8vp+Vi7dpFfKYxDrp+WV26i067TYjx/fSc/jQJ2o6RFFAkj67pr93r4+FhTSNhozDoae310q9\n3v7IY2y2D7/8did6BCrJFMWtTRBERJVE0yHSLIfIVeDKlSjJZJVarY3PZ+Lxxwex2/W0ajVSS2tE\nNzPEEzWiiSrJgoDN4iKoU2iWqzQLWV54ZgCn20Tz3nvslHeodgScXhuVbIH6xjyeIyexW1V0FBGT\nzYRcLRMY7EHTrfPwYSuX7qi5cCnKdqTC818bQ1EE0skyokbD6YcGmLk0y/l3tpi/uoDFZeWJXztD\nswZXbqQ5uM+FgIKYibNz+zaNpgIItCtlHvsPISyGLldeeQ2N9Dp7/923GDv52yidNoZWhvr8HFVF\nYejJpzCE+nFlC1y/uMyN8/PIggq6HfKbm6Qy+xkYNFNauINQy7N331kaGRfVpoLaZMNq7FIrlMln\nKoR69FgtOob7DJQXZ6mGvFjf10vU83lq6TQIAkaP536IVrMps7iY+QhBbTY7LCxk6O3958eZP0iY\nm5vjqaee+qKX8RF84xvf4Pd///cpFAr3s72+DIhGSx8TUpbLLSKREjMzCQqF3bA2o1HN8eMhBgbs\n5PN1bt2Kk0hUMBjUdLtdenrMfO97x0gmq3g8Bq5e3UGnE9+PruiyE83jdhmw27X09Tu4dy+Nwajl\n1HEzU5NumrksD58OUCzJIPkI99p45cf32Jpbx6IOcvBoL0fPjFIvVzDoVXz7P58gHi9TztWYPhJm\nbMiKwaDi4vVFxJCeXkuDqK6FSd1icWkViw6sUo1moUB5J8ah06doW3wsL8YgOIbfuesDFJtfZuTk\nIdq5NHatjrwooxIFKpEtKt0OXYeGak8I/4ED1DIZqvkyWrudoYdPU0/Gmf2/Fwgf2ot/YhSNyUQt\nl0NSqfBMTDyQo/dfKjICu14Qn+WzbzRqOHQowNxcGkEwklaJ6A1JTGKdbj6HZLUg6E3vZ6p8HF1Z\nJrW2xc7MPSLXblOr1JEFLe6hfhxOA+2WwrPPjNBstLAaBLbidZLpOuce78cmlUneTeEzCGzNx7BY\ntVgcZnw2uHEzTn4zx9JMiZNnxggPN1l7510WLt3GPz2O0mlTz6ZpZhKsLURptzqMf+URekacPP6E\nil6vxObrrxJZ3GLvgRDjR0Z48leOsZ1okq8oJJJV9BWZnWgRUWlj1GtRCQqrd9Zo1hpkJwysX3yP\ngf17iK/HiK/tMHF0lHikwFs/uUGr2caod7D32W8Qfe3vCe6bQlKr2Lxyi2x+lxCkFxaRs3HUGh2V\n6A6t+m7FohBPUa82SM7MEDx4hsXFDNXqhyTC7zd/LpHUgQN++vps1GptjEYV6+sF7tz5MBfB6zV+\n5AvMbtfT4zOwcn4bQaWiVakgl/JYx7WsvVlmuR0ml++iVktYrRLlYo25uztYNC62L15k4fI9br09\ngyPoZejESSrFHM2qyOPPHINOm1w0wcLlWY49NEp+dRWlWcfkMtNOpEHQQKeFI+Snd7wPRaWlkG8Q\nGu2lnY2Tnb2BXmoTFMz8z787xdJGg421LNW6wlNPDjDs85PaSTN7ZYXEdhG52aRrcPLWW2ucOt3H\nwmyEdj5FsMeMPr1E0GsgmqhjclgYPr6PzViDgmLD8fBzqDotXvyb6+iCGRrFMqJK5NjJSZTkBpde\nvIR9usbCUpF2rYpW6pCrtZFMFmqJOGZVk2axytjXvkZ2eZna9Z/xxOPnMA5P0TF5mLm8wM56kuS9\ne7jHxoh3Ovhcw6SX0jQ8MXQZFT0OSF+9eN+JVWe3EzvFZr8AACAASURBVD59GrPPR7Mp02x+3DL+\nA93OlxkPYmXE6XTy2GOP8YMf/IDvfve7X/Ry/klsbRXI5xtYLFpsNu1H/IFgN212aSlzn4gAVKtt\nZmcTWK1aXnppibfe2qDb3b0IPXEiRCpVZXDQiUol0Gm1ye2k2HcoTCZZIpavoVULtNpd/uT/uMy/\n/+Y+Rkac2K1a+vtMbL17haVrc4yd2s/dpQqHvnKY9aUUDreZc8/uQ2yVmbtwm69++zHMJjWVqszd\nmRgWo8joniEGw0aKhTqzN7I0S2V8fi/JV16lV2ngtYzR6TOglTrUtlbpdDoogorNjTyvvvgmGrub\n9Moa+48P4ZFkrLoy2+ffZuPCJWRrAHfPAAa7k5m5NAabiYnpPrZef5XeM2cwh3rRODYwelwUN1Yo\nRWPUajL1Yj+x69cRRJFiJILJ7cY9PY1rdPQzR4H/rfFFk5H/HTgI3AL+6y/rRUulJmtreba3C1Qq\nLawWLU8+f4Dt+Q0Ejx293U5P2IkkCUQiRfR69f0vynK5SX57m0Yiik7Y/QBIKhVagw53f5B3L67j\n77EzGhAwtss4TE42q22Wlwqkdwo8dURL9NpNZIeEN+imJZfQ2Cw0W23IJxGaNepthYXVKs8+t4fI\nnUWmvnKWTrVMcGqMyuoitWQG3fs5CyCxtpYnE8vSuHWXVqkIgsjWahKvVWH8zFdZez1GW+pw7Xqc\nu7MJTp3o4eobM+RqKo6c6OfAYwe5+cYtJJXIxBOnScWy6Lo17IYuGo3E7TeukFzeQtJoqGh0rHnc\nPPK//Dey0QTbt28hmu1YLCL5fINuR8GkF5G0EsX6hycIi8+LqHRpVSo4rCrOnu1jYSFDodAgGLSw\nZ4/rvkbjs+Bw6HE4dgVV09NanE496XQNo1FDMGjBYvmwMiIIAlOjZlpnBtnYLFDfKTO8LwybM6S6\nQ2ylG2j8fYBCObZDLZPGLAdZL81RjcdotruIGjW5aBLd7Aw94QlW53doZ5PUV2Z46PQoO7kebB4r\ntpOHKC7OUlqah65IPlfFLaspxFN4Dp9kdfN1thNFtPoEcjbO3seOUUlnSMwtEj4GzYqNyGqCTqOB\nTd+Hz6Nnc1PA4vVgzomUyw369gQImpv0Gss4BwVcXj3xeJZxv4f3XpvBHbATPDDGT/9+hnK5xdCJ\ng1jMVvpGe+gaS3RQoWj0GM1qEnkFncrJG393nifsYfKZJontBHsmeoktb6OxWDnwiJ8+nxql1kVt\nMuE/eIj85hbl5bvI2R10B8+RXV5F7w8CUEkk0Pv8ZJIlUtEculSJ7aUl3Pomk74P96WRz5O6exeT\n14vJpMXlMtxvtX0Av9/8uY+JBxHZbJZ6vU4wGPyil/IxfPvb3+YP//APH3gy8sYb63Q6CoIAbreB\nQMB8P1vKatUSDlu4eTNBpdL6SIu3Wm2zvV1kfj7DB9Es+Xydt97aYGLCjVrs4teVqW9HePygEcku\nUcxVSO3kQaUhlS1TKjXJpMtksnWcTj3rq0keOjRCp17B57ew79wJrtxIc+vmDipJxGLTcvz4CPaA\nj/W1NOsrWbKZCk+cG8KrbiK2oty51OHmW7eQTDYEi5u3XltknydM9NU/w2TWUo+soe8NY/R6kVtt\n2noHW+tJivEU+qaMe3yca5fm+ebvPE707/+SwPQEqm4Lq6GDKNUIDA1SrQ0z0m/E3klSczrpdjo4\n9+yh+dKbeB12SqsyersdtU1AJQkUNtYRAI3ZzNbiIq1qlW67TejEiV96vsy/BF/kSg4ARuAM8CfA\nIeDGL+OF5+fTrK/nUakkbDY97XaHaktg9NgkqVQVt9uAXq/iT//0FplMnd5eKydPhujttTE7m4D0\nFp10hIGAhtJgDyv3thndN0Ik1cTvNTNoKXP+T18lHDRyuy1RaoocOnWWe2sNyoqBw4/uJTtzk+JO\nkoOPn6FTrVCJzREW2oxO9VBsikQXN6A7TO9EPytvXySfKrJ58x5ahxOt1Yq2V6TQ0eE8fIqZpMTh\nfS7O/2madKKIqJKwjrtQW52sL8VoFovotEai0RLFbJmV9RIHTo/x+itLLMynOH1imtO/asMdlli4\nMo+nGiE8GSQ86EHXN0xBW6avUgeTnfhajIV37zB9qJeOoEa0OLl+LYrTqSeZrLCTrPPUrx2lVcij\nMRmpl2u4h3oJTI7SbZQwBAZQ6XQEAnoCAQvdrvKp7bTPA7Vaoq/PTl/fp1tcGwxqBjxd3O0yuVqG\nysIczWYT68AQGlWXdq1Ks1ikGNkGBSwWNdmF28j1Oq5AEFuwh1I8QTlfILTfRLDHiLpZoNpqYhFr\n1KxWbt+IMj46QE21jePAYZqFIoZAiJpk5d0fX8L3yFdZbgd5+DsHKc1eQRfay92X30Cq56kUmiwn\nopz45m8QWRE48tQk1UKJH/yflwhMjFEpNwj0WHCFfPToisy/dRm514KWNikUAkeOYR8ZR616HZ3N\nzu07caIrMTwjg8S3Usg2Fem6DvfoOH63luLcLbZuLaNN2hh6ZC9nHhul2VaoKRqkThNFkPjO//pr\nKHIbM0XMVgOyTkDt9BJfWqdm8OCYOoVnIEAnGydkaWB2g+fRfczPRLA7zMiFFL6JPazNxag2BbL1\nMoPPjCORur8vjUKBVrWKXKsxNaRDpbKzublbOfF6TUxOun/h4+JBwAdVkQex5H3u3Dm+/e1vs729\n/UCMHX8aOp3dMoiiQDpd48iRHmw2HbLcpVRqsr5eoFxucu9emmDQQk+PGUEQMJk01Ou7gvnd1+mS\nSlVRFHj+uWHE5BLrN2eRFJlkqoposnHusTP8uKWwE69SKNZ5+tk97Nsf5Pr1HVZX8xw86MfkceI9\neJTNaIGlCxH+8q/nkCQBg0HFnhEHb7+xTrjPxsiQjVSsiEZfRZuYp1jJYugd4upL52nXGnQSKZwj\nUJYNtIdDePdO03fyKD0TI2y8/TbdVhNzIIT71GO88U4UrcWKIEoYnQ40NgeS0YLRbqES2aRRKFBM\nZhk+a8BGngl7EWVjnoK8ezGoqPXIJi97f/UF2uUigtGOomoTGBtFziXoyjKSVns/uDK/vo59eJh6\nNovJ+2CIV+GLJSNHgZ+9f/sN4Di/BDLSbneIRksfuU+tltBqJfbu9e5m0OTqfP/7d0gmq7tlQblF\nI5Xg8H4nzWya0HCQ7bhALbLJuFdiZGACQzBI4q11BvoCbLz5Oj6PDkmjpZAqI6i17AmrCY33067X\n0feP4RQErHYD2fm7xO7Mksy0QaOnprax95knOPP0QVJ3brL6yius313H4PZTS6dR6kWm//1/oC7o\nKWbL5FI59o16aHdVBEeC9Az6sdiNaOQq0c0kDb+XuTffY//zT+DxGFlblsikypw508ueqQoOl4mJ\nfUF2NtJk2l1O/w/fwk2GnUvv0KhWkVU6+u1W+k/30e5KtI6P8u75Vaw+N1lbD+Tq1OsblEoivWEr\njZaCYvXTd+gw4YNTlKI7iEoXsV1D53RiHZ3k7t0k0WgJp9PAUK8BoZqn02yidzoxejy/9JO33uHA\nHAiQXVykuL0N7M7US0KXqSkf83EVxUgZjdGE0aylN2hGUPdSz+dRaSUOnhknnRmgWq5jD3jpMdbY\n+MH3KUYixEcO0LQE6DvwMDlZR++pEzSSO+jdZXZWImgdYHYHyGUqnDzsQSu0GTkyxea7V8jFEnh9\nFrrIoCgUVlf47u9+ldu345z/wVu0Wh3UrgDmUIjaTpTDR3vYubrJYy8cQm8xU20opNcj2B0aFK2R\nyeefQWhV2Zqr0DM1Rq2pQKVCPJbAfypAz2AQMTbH9s0ZOvUailUhc+0ie0+fRAp4kc0KS6gotiVu\nXtmko9Zx6qFBypHbJBeWsUweQDN5irVL68zcrtP92XlG+3X4PXq27t3G5PPzzH98AovTyuK1eba2\nihSyZdQmI616ky4CH6iFBFHE5PezfeEC1XQaQRQJ+vyMPTqNotbjchk+UVv0ZcKD2KL5AGq1mqef\nfpoXX3yR3/u93/uil/O5oCi7U1cnT4bZ2irwxhvrdLsKAwN20ukasVgJi0VLIGBm717f+66ruxOW\niUSFel3mwAEfDl2LKz+8xs5WmqFBO3q9mnq1QtjW5rf+035WVnPUmwJGk4a/+P4dtjaL1OoyBqOa\nyUk3itpAoV6i3mqxHSnTlTv4/Ub6nhwmn6+ztJhDbrbptFpMTAVQGgmsVj3Rewvs3L6L0WHH1t9H\nJRrBtWcMtdXJ2f/yXTbefofo1av49k6j9/golVqUyw1qghnb2BRqQaZdrTBydBJfn5em00pxO4LR\nbkHXlqmlU0xO7EFWR2l3+pGaJcRqFtueKW795A3kZhvX2Dia0YNsvnuLdrKJrtbCtWeSnqNHqSQS\nGNxu5GYTQVEQHpBx9A/wRZIRG7D+/u0i8Ev5VEuSeJ8t/zw0Ggm324BGo2JmJk4yWQV2xVByOsrq\nUhq7ehBVbot6bJvw/nHyEhSW7lFcuYLBoMYV8iNKGnRaNfPzJfrHzfTsm6RnYoSsbGBtLk0+nuZW\nJcUzz41RjsyRXFqlVGpTKtZpVnOE99upxzbJymUcQgFBEHH7bNRKOQzmSXC7SGwmyBZlls5fpVZt\n4t+zxumvP8Sx/Q7u/MM7mBQbvYcmEbUGxKEh1jfKpBJlNAYdo4MmuorC1lqSIyf6OHfWj6rbpNGG\nre0ynT4L3UaFSrGKQa+GWonLf/GXGJxOKrUuvccO8fS/+xXyDS2bm0VMrn5GT1Rp51IE+52YegfI\nYcfY1HD4oROkomky2wmQJAx6iYVLt0jFC1iCIWSdk0vf/xlOfQO9ToVKp8N/8CCef4UTuPt9x8lm\nuUyn1UJntyOqVPSHTQSng8zpKzRLBYIhGzZDh6KisPnWW6h0OgSNBvvIGId+/RlQFOb+/DL1XA6t\n2UwuVSS7muPYiTMYtCq2371NI53EblXTLpcoJrOce+5xomsJbvz9NWq5AifOjqCTy/QOeoltpmi0\nBbQmGxvraVzFKtlUiUpXRzzbYO6vbnDysSme+MYZvFawG45SbOu4enmL2MIaQrNO8LibXCSOyukF\ntY4DXpn5hTQqrRaP20ApmcI76GUgqOXWhXWsPg9aGgTsCoWlOVJWM4OhMI8esqMVu5QbkEvKiIKB\nt386w8lJA422QnI+waWb8/imphBEgVQkSTmn5Zmv76Mnl0SvqzI6YCISybE4l6D9vg5ErjfonRrG\nqJL5oBFjCgSopdO7gtb3UdpYx2C10HPkyC99/78IPGhjvf8YL7zwAn/8x3/8pSEjoihgt++admUy\ntftVk0KhyZEjPbRaHUKh3dRbp9NAsdhgcHB3VNxs1hAKWTh2LEg6niEVL2Iy61jbKnPgkYO0GhJ3\nl8qYAyaMYoupg34uvJfC4zFhseg4fjyIw2Hg3lyGvl4bw2M+AgEzyVSNa9diaFQiOr2at/6feVwu\nIzqlSnQ7h15yc3jASGllk3okgX+kl2YbsktLaK02KvEY2rqDSlKgsL5Gq1wmduMWuVwDUyCAT+4w\nNTzMT/76DiapjtGk5cjJR7AZBYRHHmH2r/4KAQF7bwjH+BSbBR3X5xoUEhkcIR+nn/46ikVPcmUb\n954RXvybawRDNnwHD+H3GekdOo3QrlONx8guLVGORhk8dw5LOIz+F9SMyI0GrWoVjcmE6nNM0n1e\nfJFkpAh8YDRgBQr/+AG/8zu/i0az64y6f/8kp0+fpO/9aPrNzU2AT/x5zx4Xkcg27XYHrdaFJAk4\nHE1SqR0CgSCiKGI215DlLjrBRDqRxuJSkAwyle0GhWIDVX+Jjt3NyHPPYTJp2S7W8adkItEOFpeD\nji2Fc+8Qt66XubqyDAYFjVHPqWNjXPzBHFdn9PQZWkgqiVK+gq7Xj9CUaTebOD1moqk4HacWlV6P\noDfT9YTYUXSUIzLT/UYuvvsusiwRcLnI76R498WX6ZkcxeaxI9RyzF+7Qf+jj6BaW+DQ0RDJap2t\njRyj4z6On+yl28mgrqWY+cFVMqkijiE3HrufN99M8/xXwuR1VjR+CzsvvYRaaSNbDFjCDiorc5h5\ngrsrCQrJPJG8mnRWxZFj4xQlkXsbGlqtJHZHmxs3iiwtyTQaMjZ1hI0rN9BWW6jUEtH1ZfrHe8nG\ns6gcElW7GppNpLt3sYRCJHK5T9y/XxSiSoV3chKd1Up2ZYV2tYqttxdzMMjWhQtYM0sUt7eJ3iyi\nOnuW7PIy7okJJJWKbreLVqfGaFDTabep53JoLBbodnH4QhQTHZKbSfr2SFx8/V1UtKHXtvt/9w3S\nqtZIzi+SjecpRSNclpucODOIwVFDvZPH1xdiO69mdMxDtihTqirEUm22FmNIGg0/+ZsrHDnRRywC\nuUiFn/1slmKxTu+gn8GQjq7BTkIWqFQbjE77yaaKbMaiLC9FUEvwzHMTjA3oMVNgaNBGxSHSSW6T\nmb+LpNGgMppYevMCrVoTmy9MoH+UedyoRRn0HmJdI/4TLlQ1gdqFt6gXi6hMVtJtE9aOlkRJxGoP\nonObEEQRt1Xi2Okh5u8mqNeaONxmTj08xMCon0oiiCCKaM1mti5c+PiHfnsb3/79H+YcfYkxNzfH\n888//0Uv41PxxBNP8K1vfYtMJoPrAc0N+mCUX6USGRqy4/fv+kbtBjHuVktarQ7pdA2NRiQctuB0\n7ur7rFYdjzzSTyJRpVBoUKu1uHw5iqbbIdDnJrqeZPTkQd64EGfuTgTv6BClTpTnnh1mUFSRieeo\nlBoMj7pRFPiTP7mGz2dmYsIDdBkZcTE87GBgwI4oQixWZnzCS63aopAro+k2aHdVNPIZNAY9NpPI\no79ymstv3aNVqaI3atl/ag/N9bvkixYQRURRJLsVJZ8soFGBVpygU0/z+JOjmE0qTFIdq0lmLQGr\n9xqEXvhtjNUYjegmjhOP8Xd/fpFGuYpaqwWDjfm1CvtNehAEBJWaaqXJnRvbHDkzTFivZvPGLPnb\nV+mWs3gmJvAfPEg1k0Gt1yP+AqZ4udVVkrOzu2TEbMa3dy/2T0kI/ufiiyQjl4HfBv4OeBT483/8\ngLNn/zOFQhNBAIfDhM32YX/rgy+xT/p5YMDOU08dYmOjQLvdpb/f9v4EhkI6XcNi0eL3h1haymKy\ndOl0OoR6woSsOlacfioNNSvrIsGJILLDjmLVE7Q1SWQS9I9osI8eRzKZWVhooNbqkEQTWzsNSqUm\nDkeRA2cPUCuWCAwGgTo7kTzJtSRKt4v2wASG0ABBgwWnqoLp+ee4+dI7lOcjyKoWrj17KDVFNu9m\nkBtNmv4W+ydt1Ja3sB8/Rvf4WTrNFupkHE2xTLHYwnNkgKWbHQpZifLyOtGVKF/7lUk23lukkMix\nspRGvrrBua8f5/DBCW7eznBszzRWocC9nTyVYgWXPoNc6yJKkF6PoAjD2M0iPodI/7CWUirLjXdX\n8E9PEujzoFY7SKdbVKv13auZzRy1WIl0tc3wkAOppVCZncMWHEZuFPFpd0eo2/U67Wr1n9y/fwms\noRDWUAhFUWjX65R3dqil02jMZvQOB0q3i1yv05VlLMEgrXKZdqGwK7icn0dxhDAdfQJrvUg7n6TU\nVNMj1Qn1O5AUeXd+36VldTWPVifRNRWplWrEIzn6e01s5ESqySSV1ijeiWnWo02yFRHFaMO6Z5pG\nV9gdPVY0aIwGul0YHu+hUu9y8XKCh04HqTVXUWk0ZIpdTj4xzI9eWkclqdCKLebX63icGgYHbIgC\ntGpVohtJYn0GStkKJtGAVswRX12ikkgROnmCtqLizo9fxdkbIr1UwjCf4NS3XmB2pcbCSo50ps7r\nC6s89OwxZI2JSqmB1eZFQaLRkOl2YWsjj8ZgwOByYXC5GCzfxm/zIisSVoeBnrEe9HY7BqcTgFat\nhvgJ4jhJq33gysO/CBRFYXZ2lunp6S96KZ8KvV7PY489xksvvcR3vvOdL3o5n4hz54Yol5sYjZqP\nRD309Fjw+UzE4xUURaFabdHXZ0OjkVAU5X6r9wP3ZEVR2FxJ0qhUiWTqPH7mKCrDHDvpFgtzCUYO\n7aGhtiHmGkTjuxduAZeKZY2C02ng/PlNfD4LWq3EzEwClUpEo1GztJRheMjBof0uRvv0zN+pUutY\n2Ikp1FMxXB4jtj1BBFHANJ4nde8uh6bs6B/dg8liIDlzm7W5OcJTw/gPHCT63nu0qy2a1RoKoNLp\nKUTibMyuMRJUoeoJcn22SMOQ4+arl+m0mpx97jAHw15iqzFarQ6tpkyrVELvdFI2mxG0vQzuH6ZU\nruL2W0nHcgS9Olau3sGua9GsN9Cp1eQ3NvDt34/caNDI5z+yD4qiwGe0biqpFJHLl5HrdQDkep3o\n5ctoLZb7n/t/CT4vGRkDAsBV4Of9up8EXv0F//ZtoAFceP/2x/QiH4TiwS4rnZ/PcOLER2Pp2+0O\nKpX4ER2CIAiEwzbC4Q9HQJtNmcuXo6yv53E69UxO7lr80mlzYtqI19iiUKgTzynM3lwhfGCK5eQG\niiwz6m6gbaQx5aqoHD6Kejfm8QOY2zG0XYmNeznKxQa1apN7M3F6nhzEJraROhUwGJl65hzSu3ex\n+V2EzjxEW9TSSWyyuXoXg7eH0KlTDD3tYLuoZTHSxZ1K4LAbqJR3DxK720I4OEbX5ObyazeJr27j\n8Lt4dN8xQoddvPH6Grdvx3d7o/kMBoOPSz+7y9HxAG//9B65TAUEkbdfmeHZ/2kauWsHU4NOvY5k\nttLNV1Bb7MQjWQxWM2M6E8XlCIntFI8+e5CAscpiR+Dck6OEpvvJ1iSSySq53O5BKUkijXIJjUai\nUmmhoCC3uwgGFWqNgFHzYSlPpdOhNvzr5iBUEgmSs7M0SiXUBgP1bBa9w4E1FMLk92PyeGhVKtQy\nGQobG6h0OrqCip1EnfdeukQxlUejhqljkwjbd/G6NLQySTIba/zqf/0Vrv7odQrFND69mZ6hEEaX\ni1algs/lwGaEQqrI5R++xn/873+Er6LH6HSynFJx7XqCqeNWjp8Ms76cQG/S4XLoOf3oMMlch/hO\nmUSmhXsgRLVUp9OWSSar3L2+wd4jg5jcdqrVFrPxMhOjVnKJHK18BpQelhYzzN1c5cheO3uGxnEW\nC7gmpwidPMG9V89jMGgw2i2U61py6TJio8TqRo1SQ4VFpadvTy/r9zY4dHaaldUcpZaK/r3DGDtF\nxFKSbquOpb8fSadDb7XS99BD1LJZBEHA4HJ9rFyrMRhwjoywc+MGyvvjDqJajXts7Be6InvQEIlE\n0Gq1eB8gAeAn4ZlnnuEf/uEfHlgysuug/XEXbZNJw0MP9bK1VWRxMfN+NUTgZz9bY2zMzb59vvuG\nlYqikJydpZNMcua4j5+9vkG6ZWb03CMUrkSYfNRKuiCzuphAUkkUC3VESUQldDnz8CCyItLfb2d+\nPkO53KDd7pLPNxgddXHkSICVe1Gy5hKhfjuVlXm8w32EjwaRD/nZiBTIFrKsLMQxSG2+/rUDaLZW\nqFbqpGJb3P2HN1GUDpPPnKPZajHy1adJLizh6AgMPf442WgCg9GEx2/BoK1SwEqxIZBNxCnlKmhN\nem5cWiX89QncagkFFeViHb3FjMpgQK3VYHcY6PvqSfKRGCNHNFy7voPVqSctimg1Ah2pS7u8K0so\nbGzQrFbx7t9Po1hEYzKRW1sju7REV5axDwx8asZNLZW6T0Q+QKtSoZ7N/puRkd8DfgdYAP4M+C/A\ni+//7o/4xckI/DPHeXd2yshyB5VKIpWqMjeXIpOp4XDomZhw/5PpvbFYmdXVHN2uQjJZxWbT0dNj\nZnrai7Fb4tqP3yHfMLAwl8Q73E+6oqJaq7BwbYHQ2SDtWITa0iblronRJx8j0tXTqLdZXk1j1Kox\naRWErkAwbCWbKDBx1IvZZqHSCtKI5Tn83cMUOwbanRbxN3+KnIvjNlqQGw0Ss3fpf+brXHlnm0uX\nd/jVF4YZPDBCZWcHq1WLxurAu2+cH/3tFeSWQr0tsbqcRn47xsRkh0S6icGkZ3MlQajHg8GkRaMT\nKNc6iKKATq/B4TKhN+qIxKoMTPXjdEmsXUxz5Le+w9bF94itxtCYrfQ9fJq62kopu04zHWdtIYZn\nv516PEajWkMeDpHNqjAY1PfH8Or1NrZwL7r1GMGgBZ1ORaPRxDk8hKs3QCcVAXavij1TU/fNsP41\n0CiV2Lp0icb7bSBRkqhls0gaDRqTCUmlQlCpCBw5wtKPf4zcaOxWTcJ7uDsbZ2d+HWs4TMfoYDXa\n4vjhY9QSCa68dRddYYt6ocTBb7yAYr9KeDSI2e0kWVXhmZ5mfXaOeldPaNKJ0Rcgu7GNzaYnK9pp\n1DLYfC6cehmjXuD0cT+xSBEEgRvXIhw9O44nYGXuXpK9Ux5++qPb2MwqSvkq3qCTdqPJnVtFRkdc\npJMlNPu9qDRq1GYdjXqLbCyJ0mqyeHsTS/gsrgNnqSWiROM1Vu+sYnfb0Dg8iNkukwMWYtES81dX\n6bTaRBZkpg4PYteIHDzai38kzOpagZE+Fy5Dh0YmieTqQWO2kpmfJ3T8OGq9HutnjLS6JydRGwzk\nNzYQJQn74CC2X1IF7IvGzMwM+/bt+6KX8Zk4d+4c3/ve95BlGdUDNMb5eWCx6HC5dr2KyuXmfdO8\nu3dT+P3m+1Eg9WyWQiJNvOUgGsuzd3+ARLICYSfOgJvb82XeuxxFI4Fe16FcaqDR6UjHc7gDAgeO\nj7G1VWB7u0i73aHZ3M3TMpnUtNtdmuUK2UiTwSEH5r5B/l/u3jRIris903tu3tz3fV9q3wv7DoIA\nCBAkmy02W+xF3S2NRh0jyxGKGMszCtshhUNWOBThHwp7RrZlSzEzYbU1bknN3tjsbi4gCZIAsQMF\nFAq1V2VVZmVW7nvmzd0/EgQbJFtNUE2xOe+firyVVflFnnPP/c53vvd9z18Is+e4BbPHwXY2i1Sv\nsb6coJLNI5fD8T1G5q/eRitvYfa5cA6GEG1eP3ullgAAIABJREFUlLIWXRSM79yNQqFk4coduo0W\nA3sGMGtaiI0qRbkRucuEUIsjiDKqhQoyQaAlqtBpZFhMItFbWexBDyqLjdCQEyG5RnR5HgCDzcZX\nf+cI5VwZdX6FRnwTLCaK5SLtZhOZQoHKYKAtScx++9vYhodpViqUEwnodqmm07QkCf/Bgx8Yjw+r\ndCIICL+kzcVHmZ3/FT0tkDLQBzx/7+e/+6VE8BAwGHq+AoWCxFtvbdzfmedyEplMlTNnBu9TedfX\ne5NLpRIZGLBQLNYfUPfL5yXyeYmRERveAT/BE4+RuRbFPFwnmm5SLJbpD2pplEtUEkmka1dIb+Zp\ny5Q05/Uc/9wTjLj7+P5LW8zfijA9aUVl0HHwUBBBKuPUN4lu17l5aRXBYOONv/wpw/tGOTylIbaR\nwK5pkw+v49u/l1a5Tnb+DicPDyAT4PrtDGdOH2NUIWHUQiEvsVHSki4KGExa9B4FZpOVy1fjBIIm\ndDoFzY4Ms0GBTKPB3+fAH7LSWLqGwaDCYtES364weLCfpUiDrjrLzRs1hgcmKCvrjH05wECpQE00\ns7BRIzu7icpkwjY8TDGRohgpou8WGXxkHxsVBdVqA4WiJ7OuVIpEo0VEZ5CRQ0W0rQJqlQy53kjf\nsaMY7RZK2z1tDY3d/olTyaqp1AMlyGa1inf/fmqZDHK1GrlGg3VkFHRmLJMbKGxuXOPDrG+3Wbnz\nDt12m0apRLVtoJCvkh/U09gM06nVQG1g9dodfPv2kK0oMBaa2Hb08b//u3PsPz7O7ucGSG7EcQbs\nBEMmts/+GP/Ro+zZ2Ud62sbN61Gk2AbrN2fY97WvoHfa2dwo4e4TGfAoKE45uXEtgkNb59kvjNJq\ntwkMelhb3GZ9PoLKaESjljE2akMu9HYlHr+dwVEPC1cXMWlBJSpZWM7h8xnp9/fTKpcYPXWMWDjN\nVqZLdKuCyWVDJtdTKdZo1huAjPBqiumv7KVYaZOKpDl4oJ9qvshrb6yi06vYOe1E1y1S2pLotFoP\nLEztZpNKIkGzVkNlMPQYUzIZolyObWQE28jIJzrmnwZmZmY+E8mIx+MhGAxy5coVjhw58mmH8wEk\nkxVWV7OUSg38fgP9/ZYHTDALBYlqtfmAem+j0SaXq91PRqSqxOxGlx++eK1n5ul34Q+YgQ6Tk3ZW\nVjLEYlbK5QahoJHTT4ywNBelmJeYuXSF4Kgfv0+P32cgco+JOTZmw2JRE93I43AbCQyK3LoeYf7i\nPCa9Ab2yzcpSCptFCbUS7XabbLrM9StRAr4xVINTTB8YxG6AeHibm1fC7D4+xe0bM+Q332F4yIJW\nLeDZuQNHvxeNWkTQ6DBbBrj7D3NILTmh6RFK2QIWrwOtUY8tFOT0E3J2HhxmYynG2IiWgClP+sbs\n/fuxGI2i0OkIHj1KN7eL+e+tobXbUZvNaB0OHBMT1ItFVn7yE1qSxMabb+LeuRPrPaov3S65tTXs\n4+OojQ96R+ndblRmM/X8e+2d2nsMyV8GPkoyIvDe0UwYOAF8Fwjd+90nhp/1KdFqFUxMOBAEgWTy\nvSOCd5HP19nermA2a7h1a5uZmcT95GN9Pc/UlBNRFO53aL/7/9+d+FanCa2lREumpNWqI4pgs2qw\nqJvIKym67Q5yrRa9osvKiy+gtxqIz87z9KlnGPaJlBpyatUGseUoGkWTmkFDqiwi0xoI9pl55PEJ\nwptl6pIct9eKohjFvXOa/MoygtaEza5jceYSTx2YQOmbQJTBRjjLnbktYnNLBCYHyeTqqHz9NFt1\nig0RrVkPCg06bReH08DG4hbFgkS11sbu0GANHEaqd6mWa4w97kfSutBVu2yE09yZy3Biv5WLf/si\nqZUN9GYDnp2TOPqG2Mq2UVoN1NIpDj8+gckAmNyUy3VK9Srb2xJTU04CARN+v5FUqkq73cHy2DDd\ncp5Op9MT3dH0hMtsQ0Of2BwpJ5OUYjG6nQ4Gj6d3U/6MhGO70UBlNOJ5/HHUJhMKrZbVzSqFuTVW\nrt0lubDCmExNW+cmu7KKxmqlnEigHnKhNhlQdSqs3JhlaLCPWluPb8hNaMcwluERoltl3nw7QqMF\nuWict66uYQ4E2VxfxPsb+9EGQujsDmJrMfKpIrGFVXyHQqgsJs5+5wKlhgyj1UhkNY/VMIXLquNr\nXxxA0y7SqNW58sot6jYVQ/0GqqUaZo+NTjrCo2cmCQ7ZcCuH0JhNdBVq9LpJlArIlbok8l0klKxV\nVAgqN6qpAK3OHGKjwN7Hx9D5Q7xxPk6gz87mcoxGs0W9VsdmUnD5Oy8hqtTYdhkxWlRMTzrpttso\nxS6i0YJeIz6QiLTqdaIXL5JbW7uvZ+CcmsKzZ8+HUrgb5TKVewwbrd2OyvDzq5m/ypiZmeErX/nK\npx3GR8ITTzzByy+//CuZjLz22tp9MbyNjTypVJVHHgkiijJSqQrJZBVBEHA6tZTLTarVJjKZgFb7\nXsKSq/Rc2utSE4vPzUs/WaJcbvA7v7uf/QdDjI3Z8ftN91k3K3c2qGxvs3e/H5tFRSFbwagXOXzE\nz0m1ArkosLFZoC61GBiyoulq6BvS8+r3v08ukUNrbFAtV5G11YRXYvS7RZxOA+lICqNJxdZKlGyy\nwL4zh4hVqtxNGzAO2UjWtAyePkl67g4KRZvgmJdqLs/b//e3SG+lcY8NEnpCxdSUi8hSlGwJpo/u\n4cCxYZqNFrWWyOorZ8lvJ1HrNBhGRLaXY3RaLUyBXvtCS5LILC1hn5jAd+AAKqOR5OwsMoWCai5H\no1Ihdv06nWYTuUpFq1YjvbiIdWSEdzuGu53OfU2Sn4XabKb/xAmSc3PUsll0DkfPJdhgoFSq02r1\nnJY/rnTDR0lGksAuYObe6zLweeA/Ap9o99bp0wNsbfXcFwMBE253rwny/f4F76Ld7lAoSCwvZx94\njyS1SCTKjIzYWFhI0+32PE2Gh633DZccDh2HDvmp11vcvBlHEARCPi2jHieNSy+gtphwqDps3bxN\no5Drcc937aK1eo1hS4DNsoaFZINmLs3up/dw7ieXiSWbXDt3h9E9g3zhG0cwmtKYnTocln7WXlvv\naW/YHVhCIXIzV1Cn88gTCgLDTrZyAqKs54GSS7tpydQM75+i3pahVki4bCInDoZw2ASuXiuwa4+P\nZiXI2JSP7eUNvv/vv8tjXzvF7i8+yXY0Q6dWRspmGbIbWIx2mRyQU1hdwiNL49vlJZ1rYO1mCXlk\ndI17qOYKPPKVg8QjGd7+8SyVOniGQhw5YyXQL+8p1eZq2O26+x3wAOjdn9yEeB8KkQgbb71Fs9I7\nD02p1bj37EHrdFJNJlFarGTbJm7OlTBXS0zsNONViYTDeWp3lzEMjVGv1ujUJRwegZ2fO0FkKYpc\nrcbq0DN9bBrl+jsInQbNfAqXz41/7xTxsohcJqKTVTl8wE1XqpBYWKG0skC71aWaybDxThufz0BN\npuHOXIxqvojdrqOaL7H7yWPM/4e3Ca/nGZrw0R8yopO38DkFrLouyz98Fc+uHZx8eppctsDo43ae\ne6YflBqyGxGW7q6hrycwhG+gMhjw7tuHQpZg4fo6BpeLkX37iOVlFHIVFldiLK9k6etzc+rkQW7P\nbuPqlMkkcli1HSYmnSDI2HOwD5ehhcupZWpPkFvffRHPvn3k4jU8TjXZ23PM3W0ycaBnqvju4lfa\n2iKzvHw/AWzX66Tm5jD6/R+ogpWTSSLnz1PNZIDe4hZ85JF/tvnyy8TMzAx/9md/9mmH8ZHwxBNP\n8Md//Mf86Z/+6acdygfws6q83S6Ew3nGx+1IUou3394kl5NYWurNl+PHQzSbbTwePR6Pnmq1gVwu\noy1TodBosLrMpLMSggCHjvYxNOKkVmsAArFY7zkyN1dDWS+xf8pCdf4GYqFOI6Vja72Eq3+AN96K\nkMtW2bfXy6nTAywupPBa1Uj5FCOTPoy7HYidBhaLgGfATr1aoVXL4rUKaA4PsP9QkMJmhCOPDlG/\nex7fnv3IBpRUlu4gJGoYhvsY+vwjNFsdlM0SK29dpFGpUisUWD73DjqnC+3IUf7tn3ye7UiaXCyJ\n3aIgEy+RX46goElLqhOc6MfkdqI16ihFowiCQCkepxCJoHO5iN+4gSUUwjU9jc7pJL+5SRfotlrQ\n6dBpNmkLAqZAgFo+T6fRQJDJ6Lbb6N1u1D/H00jndNLvdNJpt5GJIvV6i6tXt1hdzdFud3C59Ozd\n68Fi0Tz0XPgoyci/AJrvu9YEfhv464f+xIdAIGAiEPhgf4HdrkWvVz5gC6/TKXA6dTQa7Q84xkKv\ntLdvnxe3W3+P9aLB5zMgl7/XPex06njuuQkefTREKV9GiqyirOeZWV+nns+jD/ZjdFrouF3I1Wpm\n/v67GJ0WnKMSupaOrz7zCCu3mnz/P56lIajpGwtQlxuoVFvcWcjTzGRRdCQGH53Gixa3sU1+M0Ls\n+jUyK+sUUhkS16/h2rmTelZi843rLN+NMzbpps9uJhC04OzzkdhMoqjnKW0ss3Q5wpPPHqdUqzAy\nZEHRKDF3/iZKnYH5pRzTExaqiVtcfvE8KqudZLZJYDRIaGInlfUbVDfXUKmVjO2cotvKo+8UefLX\njpOLbrF2c5nXnj9PJVegmCmT3UqhMNsIjflJpaoUi3Xsdt0Hvut/DnQ7HVJzc/cTEejtCrIrK/j2\n7yezuEg4LXDxUgSty02r0OH8+U0O7PdgFkrINCKttobgk89gNwiEX3+FE48donxqF8ViHYdVRWhQ\nQdU8hlanYn0+SlmmYC6tJ7Me41Bfg/N/+Xf4x/qwCFpc+0LcLmURNRpCE/2I1Qzu8X3MRapItRba\naoylC7eYKdSYPjjMnkkPJ587gqqUIH7nLrmry5hzLhSDQ9iPPo5MJdKeuYDdZGH+b39APpXDPjZO\nsaFk4sRRDGrI5jxUigXufOe7RLdrKI1Wmpt58kKVmmGCZLqDzaZmZVXAZFSwvpLilR/N8uvPDvP1\nb+ziytmbFBMpJnb42O2vU5p5C4+sRHkhRz6exJRMsvPRRwi/+hKJlTA2t41ut8v2zAzl7W1MgQCV\nZPJBMxHe0yF4YLy6XZKzs1TT6fvXpFyOxK1bn+xE+QRQKBTY3t5meHj40w7lI+GRRx7h7t27ZLNZ\nrL9ifiTvR6vVodXqMjubpFJpolSKDA9byWRqJJNlHn98EINByZUrMeLxEkqlyOCgFceAH0Ffxt/q\n8mtfnOLqlS2uXInyzjtRvvnNXRw8GODs2VVEUeit79feoL56B6fViKuzRTvoYyNf4dChAJLUxO3U\ncf7Ndcq1Dm6PD6NLIGCa5fprNxFEkcrZyzzy66f4zd88ya23bqE36xkdd6OUd6k1g7TKeaTtAmaD\nnKXLr7Jw7jKh6RFqq3PEzp3FMjhI9PxbDD31FFpXjdhiGElqkdqIsxK5TWp9i065xIhfwFCXs5mt\nokzOYLRoGTjxHIm7i6xeuIysXqbbbmMMBilcuUKn1cI2MkI1maSaTKK2WnuWDC4XBrebxO3bQE+U\nUK7RYOrrQ1uroTKZ6NxLRLz79v3C6sa7TejLyxlmZrbvLwFrazm63S6nTg08tPL2R0lGIj/nehc4\n/1Cf9kuCzabl6NEAMzM9zwKdTsGOHW4cDh3NZhuLRU083jtZkstltNsdQiEzGo2C4eF/vOtXJhNw\nufQ04+vkiyU6Gi07f+u3WH/9dar5Av69u3FMjLP8xgVyuRpau40b55eJplvU6i1Eux+lSk4plUNs\n2fHpahRlXRSdOl/8zb1IyTivvLRAOlXjX/+bR1h69Sz58DqIIiqjCa3XR3pxmaXlMs1Sgb6Ajm45\nRyW8RCewi/MvvINSytM36sFnEzGHBJo3X8O/7zDf/+sf4BkdZHDfFFfPr2CplFCLHQrzN7FYtcRS\nRbRaDfmVJY594TBbWzUaZiOirIuMDsa+EKb+AcLhPJWNOJfeXGQznEdvVKHUaqiXy6STefrGA/eo\nb//0hrh2s/mxNCfazSb1YvED11vVKkq9Hu+RR7j2g3mso7r7N47JqCB+9TLZtTVUFhuFCuhkdVoy\nA6myiObOTcR2A6MoEn9jDe1TT6PzBdlumomUs5SKEpVElD6PiluvLaBTNAnfuI1vpB+TU8fU0Un0\nTjuWTppOZIt6o8PsuZv0B7SEZ+aQqVSg6JJO1zDqtgkYhlh4+yKlxRXMbgelTj9n/9+L2KZ2k5m9\nyfCkF08hihTfQK0x0tiOYA+GaK3eZHErh85px+r1kA+HqdUFMls5WlKFTqxK6JgDk97BkZNDHH80\nxNztLaqlKl/76jg7dvuxKcucORmgVbXSP+Zl9ezr1MpV6CrRq7qYnWbUJhM6dZdaWULvcDC0Z4j8\n+jqxW7MYvD48UxMYg4EHqJYAolJ5/4ju/rhI0gOJyLuovY9i+FnA7du3mZ6eRvyMsIJUKhXHjh3j\ntdde48tf/vKnHc4DeL8xntGoQqmU3a+YvNdQ2ut9czp1vPNOhM3N9+59rVaO12ukXG6yuZQhulWi\nWGxgsagZHraxuVnEZFLz7LOjaBRdpEQMlc9Obh6ahTSZ65c59t/8awI1G5ubJaxWNe12F7tdS7Xa\nJB7JUG1l2V6LYrKbKObLmPUKmrFl9FkvzsxNbLYR5KZRzr2yQPjWEhqtkkNP7qUQDhO/8BZquQql\n2CZ28w6NQp4jE6MUIxFufetbHPnv/nuarQ6Vagtz/wDFW1XK200iK0kUgo+dQS0Wg4QvuB+53c/S\n2xdZm4khyAQCARNOq4JqKoVv/35UZjMyuZzavXutlk6jv9fTYQoEUBoMiCoV6fl5BFFEYzbjnJ7G\n1NdHt9VCbTY/1DHL6mru/XsREokKuVztvh7MR8Vnq736ZxAKmfF49FQqTXQ6xf0Ho0Ihsn+/j7m5\nBJVKE0lq43brGBh4sOxULjdoNtsYjar7FDG4t4NLVnj1bJjYyhZypcjQqJuBg0dp5jPoAiGK0S2S\nkSQOv5N8qc3i7CZtQUExU8EXUmIw69Gb9UQuvI2UL6DQ6rA6xihdi6L0DRC7s0wqVSWf24ulL0R+\ndY12tYbG6cI4OMr2ZgqjyUJ8HRSyNt6gjuXby/RbfcQXw+QX77J+Vcc3/tsvEHnh2yj1evR9A+i1\nCi7/+BK7vuygVKrT12ciF09TzWTxD08gMzQwmlRsL6wSvrmI2hGgs7RMo1ztdZirbDR0Tu7eSOFX\ndzHajXQFgVKxjsNtBpkcEBEECAZNOJ0PN9nK5QblcgOtVo4oFUncudOjhdntOB5SyVKuUqF1OJDy\nD2rlqcxmlDod9WaXrkyOTHzPLdYkr7J58w6GgRHOvrHGykKcdqPJ53/3afY9eYboyy+gqKUQAOuB\nk5y7kMDobBIrq4in2+j0eoJDftqRuyzPp9g5GqJ8e5FmPoVT6WT/F08TX0vQTufRjg9RzaShUaVV\narC9vI7c6iIRLdA/YKUWi1BOJMmurpNYWMY2PsGFs3fYWtpE1z9CuSQxeyuO7ZEA9YaA2MwjSCVE\np5XMWg6Z0cnKpRn2/NopSuUGep+frVubNFtyjHoFOqOObFYivBQlPjOLzWHgwJ4g27dmUG4XKSvV\nTB4cpRTZJL4SxTY+QXL2Nq1ImEK6w+DkTvyHJmhLVaZPH6LWVaLSykldirCdqFETy2TbEXboDGht\ntl5S0e0iiCLW4WF0jgd9Z0SlEqXBcN/V91180jTvTwIzMzPs3Lnz0w7jofD4449z9uzZX7lkZGrK\nyepqjmazjcGgZP9+H1arFotFzeZmoXekWmsiijLMZtW9I/eejYfRqEKhkNHpQKXSIBotMjnpYHY2\nxczMNkajiv5+M/F4CZNJRSLRUz8WagUO7t+B76QcixaarTbheIsfnZ3h6u0COp2cqSkXjz4aJLKZ\np55O4e3rsB3L065W8ASsKFtdtK08jVKJ2LVrBE88xkt/8zort9eJRzL07xji4g/fwvLUEDIZONxW\nVEoZ3UoeWbdJU6qhc9hpSxJSPodjqI/B0ACSOYTbESWxuo7BHUBw9tFuNyETRTIEaEkypHIdnVGH\nVMixtpzEdHQcQSbDPjFBo1ik2+1iHhigmk4jKpXUSyVq2SyCTIbO4WDg1Cncu3bRqtVQ6vVoLD/f\n9+sX4cMsHWQy4YFn6kfFZzYZAVAq5R+6O3c4tKjVCu7cSd03XAI4dMiPTCZw506KxcU0rVYHu13L\n3r1edGKd9MICUqXG5cUOsa0iuWyZYqHOyswyJ5+YRFMt0DdgxTrtZmA7SzuX4trlTbqCnEZNot4V\naSmNPPlckMtnZ4iXq3TlCvwjfrymNrMvvc3oMyY8fU469Q1q5Trakd048xXanS4dmYqO1oxvapyz\n379CeD2P0ajE51aTiucZFEV0ZgOFbodqoUQqXUPv9yOqtcgEGZP7Bth55ihan5/gxAAGvZyNSAmF\nM8TWRoZ8TUaj2aZWlTA4LAj1ElNffo5qsYy9P0hDbeHumsT26hY6VxuPVY6n300mWUZtNKLRq9i1\n18+uaRt2l+mhKiNLS2lu3tymWm0i0sKvq+DspqgXCki5XK+T+yHhmp6mXi73zkEFAYVGg3N6Gplc\njkYOgYCRfL6nVSOKAu1SEXvAzdVbcRLxAvliC7lc5OL5NZzBgxin9hFwKRHocvZchKsvvMHQgWnK\nWj+xdIepiRHKVQlZsYjZrKar1BLaNUlo2EUJLRevZ7h1NcrYDj87Rw0UZi8xuaOfRgs0ZjNyJUzs\n7sfl1BCfXcMW9KLSqHAEXMhtHrZX30auUiHKZBh9PsqZHLmaAkvQSyWygcJoI52p4BgZoWNw4Ot2\n8O2YoJwrkNjK0mw0qFVb2IcMYLQz5JCzsZpma24Z55Fhapd/SjeyRXpFRt/h/aRmKmgddkYOTfeS\nO5VAwWlBaTCgdThox1fJtM28/HfvILVAbbbSykvsP3iEZi5DMlnh1jvzPPX7X73PQtJYrei93g/Q\nAGWiiGtqCimXu3+0JtdocP0Ki4b9PFy9epVjx4592mE8FE6fPs1f/MVffNphfACHDvkZHrbRaLQx\nm1VotT1n3vFxO5cuRSmVemu306nDYtESi5V6sgKmNuXwHarpDJYDB1iKSmxvV9izx41er8DvN+J0\n6qlUGuj1ItFoifPnN+m2mgz4lMxci/CbXxnk8gs/wtIfopDdxunUcfCgnkikyA9/uECt1uLXnx3m\nW28tYTdbCQy4oNVCr1fQH/ChUsmR+cfY/z//byi0KjZnX6Zbb+L0mKHdJLeVo6k/isI7RHQ1jN7l\npIMM58Q4apOJgVOPobHZ8OyYxnPoKGubFWYuLJHbjGL0BwlMjeJxa6G0RWZ+Dsk6zM0L88jWoqSi\nabw+A2aDjM2FdbwDh4hducKdv/97FFotoWPHCJ04gahWs/LSS0j5PIIgoHO7CR49iu7nKPK2Gg06\njQYKne4jVUhGRmwkEuUHWiNCIRNm8wd1Sn4RPtPJyM9DMllheTl7nynT6XRZWsoSDJpotTrMziao\n19t0Ol0qlQJStc6UOU1+eZGOPUT49hoyo5VaU067WabTEVkKV5gY7+Pbf3ORvokg+w8dJ3vzCkpF\nDIVaTmjPOHJnkO/+zZs8+ztnOHk8yKD9KFIuD6kN8lfDNGtNaDfp2zHM5JgFCRV5mQO5d5CFK4tk\n0mkOPTuJKjjEduI1jCYVuWyNZkckuGuKjWiFTruF0eOi06ijVkJdpcS9cxrDxBSNgJJ0uozR4cKo\nlWh35UTu3uCRYwdYevMSqXyFtsXGmX/7u3SzCRbffIOs2YB5YJCa00kmvIxzxz7eXFyCvJGRAT1P\nPDHEZqyG2e1gaNSJx9KlHZ6loxknl6iRWV6m2+lgGRjAMjj4oZbUqVSFK1diVKu91qPcdpKN6CZP\nPDWMKBSh26V2TxfkYaC12/Hu3cva2bOU43GUej3NWg3x6FF0DgfT007q9TZbW72SrtNjp9CpEHs5\nQrXa8wsSRRluFKws5wi0ishzOWzjk2QjcUS5nFQ0xeQTE2zFK0TX0wSdAqF9u6nMXSW/FcdiN1Cu\ntEiIRubvVtA7vWys5yhsJRhR1ZBWzzH4xd/A83tfYO7Fn6JQltEp9Ux/4Wm2Khq6vkky2w18KjVm\npx1BZ6RZq6ELDRLdvNaj0mmt2EeU6F1OZDYP3qPHuXYpjHrcTUPnxHP4URI/fIUDj47hnhzFNTlJ\nqirHZDNBrcyu3z5Ge/UGs9/+DmqDAYM/gMruotFq4HC5iW+mSc9cRyF2cQ4P0AWyy8vo/EFWkgra\noop2OU+n2yGTKpJqhLCaRSgkkBrQQoFzeOAXjpcpGGTwzBlK8Th0u+jd7l8p19CPisuXL/OHf/iH\nn3YYD4XJyUkkSWJ1dZXBwcFPO5z7EAQBu/2D1TGFQmT3bg+Tk70KmyjKyOclUik5o30aZp4/S3ar\n5xCtdW2iEczI6KBSydHplITDeeRyGfF4iWeeGb23MW2TzdYJBfSk0xWqbSUqo4GpU0dYzqhIZFqY\nzb2j/eXlDDqtnHpN4olndtBtt/Dt2Ul2aQm7uQsGGwVTiO88nyCfr/Nf//YoBp+f+vIqjewWKqUM\n/fAkWwU5g7/2DKXnf0RbbWL8S8/hmhijuLlJfm2NdrNFemUVmU5Pt2Oi0Wxz4HOHUHZqpGbPYdSM\nYPQ6GXz8DK9cTbE4t82x3WNkY+eJRkqYJqyYrHrURiN3/+EfAKhls0TeeQfr0BByleq+1EG326Uc\ni5GenyfwPmZVt9slu7JCcm6OVq2G1uHAvXPnByqc78fAgIVut8viYppGo01/v4WxsY9nPfBfZDJS\nLNZpNB6kJvUSj8Y9A6YeBUkmE8hkqsTCCbyhFjJApIsol1HMV5E7PBw/NUm3mMKgU6AwGAiOa2gj\nkGlqcZ94iqemd7I6v4VMoaBbr/HsV/YTHLDTDN9F2LhN+KevIGs3MTotjB0/iW/Ej61URa3SERf1\npHJV/IERlAUTGkeVCzcKHA82ePRfPketZieOAAAgAElEQVR8bpFyKoNtehy72cGF1+YRRDX2sVG8\nfiuDUyHE0W9QkOT8+KV13vjOeQS5iD3g4cmvn0Cm1TGyaxC7X8D5O19ilyQj3dDQ7lTZmrtA39GD\nVKIbxGduU08nGDj+KIKUoN+vIxbLcafexBcwc+bZPQipMNG3v0tEq8Xo91NNJknevYtcpaKWzdJu\nNBh++mmCx46h0j3Y1JrLSfcTkd5YdJCqdbL5Bm6lkna9/rHGudvtkp6f72mY3Cs1VpNJtm/dYuDU\nKfR6FcePh8jlanS7YDLIuVEpotWpMJvbaPUqHEEPmbIMhVbL6PgEytQKrVyK8UkX/aNeVtfyxJbC\nPP21E3RlSnT1JD6PBvXEaSLXb9FpSFh2HyW2JdKKRal1mxiNBjZvXMI9riR67i30FgMag4Ydn3uM\nRKKM3m6hY3TxwvO36HO68Bw8iqBQceJffYmZ83dRWW3cno3jm57COxLAuftf0pQk6HYpNBT8+Hs3\n2Yg3kelNLOWiPHLEj3LXSdwOFY3EJpe+8xIo1Dz5mycZeqyPpTtRcp5Jhr7+TQo330FGF43dzt03\nr5PqWLHajWQTOVKLK+zWabH3h2jX61TrUCxUEZQqmoISjajENTZCPNPC4lOiUCsJ7plG8xC+JzqH\n4xcucL/KyOVyxGKxX2mDvA+DIAicPn2as2fP/kolIz8PoiiQz0v31/F32ZE2mxalUMM6PIjB46aR\n2cZMAWt/gKIkY24uxfXrcZ55ZhSPx8DmZo+d8/rrYUBArVag1GronxpEZTQx9aUvEqmoeOtChO1U\njXq9jc+n5zd+YxK7XYtSrWHu4hrb22UefaSfI18bxahuEi/IuXy5wNXrq5RKdS5N2xje2+sDKZdL\ndOoVjH4/60tJwm0Z09/4HeQ0GZweQCwlaFYqCIJANZ0mNTuLfWyMkV1BbCo3ubUVMitr2BQiq2df\nQ6FRMfTrX2c7vILBpOHabIH9T52hK1UIBE3s3N9P/J03kbJZRIWiV11VqUgvL2Pq70dlNCKTy2lJ\nEs1qlVIs9gGtoGI0yvJPfkI1lUJUKqkmkzQrFYaeeOJD1VjfhUwmMDxsY2jISqfT/VjHM+/i00xG\nngL+VyAN/FJrnjqd8gGNEgCrVU04XODixQjJZBXolZgGBiwkywVk9N4rlpP4HEpSd3MERwykbl5l\n5foSk7t8lDVeNtIwv1zCf6fIU5+f4vDeAbQaOXdePkc5U8Rg1pLp5Aju2YnR78M2PkFhaQ593xDm\noVEyG1uotUrWVzK4T+/m//rBHY4c9NDuiLx+dol9hwfRiRLXl7JspXXIFQZit9v43TmOPDZBZDnG\ngE3CTB5Zt8VGOE9R5ebm+QVQqnGHnLhDTrbjOZ54JkRjJcrN//A8tXIN/cQ+vEeOsXLxJnIE3vw/\n/xONYgHfcID07Awagw7PoaP0q+IMHR2hIdfh8JqR5aLM/ue/xTo0dN+PoFEq0ahWEejteOvFIuHX\nX0dl7Lmy/myFRKmUPdCopjIYEJVK1Go5Xan3vSv1+oce52at9uFNkek0zXuukoIgYLW+t/NS+wc5\n8w01Vy+t4tujpCJ1MFkNHDroQVUKk1nuOVtKMhtFSeTgk6eYj8lY3Gzy9NN9sLDG1f/8U2LRPAan\njekzR8li5rX/7wVK2ynMdj0Otxlbfz8mdwPlo4cob0WoK0Q0bi9qi4V6V0ltO8meIyPcvboE1Tbx\nQo4nvjKG50v7iKbaOFwGdDoFiWiOly4scP2teUaO7KYkddm904lT0WRmocLMzC36ggbsLiOp2Rss\nX5yhIxMxaWH5xy+iDI7zg2+9SastYA14GN15Bo+xwcZyHKXDxWuvb2CzaTmxbz/bC2sIciXhN94g\nPT+P57BIcV1BPt0km6mRr8RxTkxw+EgAnxVcu3dj7w9gMD48je+ziqtXr7J3797PTPPqz+L06dO8\n8MIL/N7v/d6nHcovhM2mJRAwsrz8XsXU69WzvV1h5eYSifkVNAY1+w+NYlfmkLWiPPOFQ3zvh6sM\nDFgQBPB49HQ6Hba2SgSDRorFOu12B41Wid2mQq3TkKu3WQkXGBi2MzjS0yuJRkvs3Onh0CE/f/1X\n17n0zgbjY1beeP5tztPkt7+5l/MXoqxGGtRrHYqFOj98YYn/6X/YR//vf57w3TD+qVF0Lg8/fGGB\n61dy/ODlOA6XCcN3o/yPf7iLYiTCyssv0ygWsfQPkA+vo9CoKcW3Wfre92hLEt49uzE7zFRLNUpb\nEUwmNa1Wl7pCx+2VGv4BL/snQzQy67Sl3nF0p9kEQUCUy9GYzSg0GiqpFLVcDqPXi87lQqHTfeAY\nNX79OvEbN+7riygNBriXLBl/gdIy9JJdUfynyY59mo5VF4FPpAvM7dYxNGS9/+XIZAIOh4719Ty1\nWotMpooktQiHewJdgxNezA4TcpuLYjJDUF/gc782Rr+9QzOzzf5jIxgHR7j09iqx+TU8fU5MehVv\nP/8mkTsrLH7vO1jECv1+FTZ9B5nawNmza7x4ocRcvR/nF/8VbZWBl/6X/4Nrf/s8tVQaW8iPXOxi\nMGn40YvLWEMBfv9PvsSp04OEX3+No/vsaFQC6/NbNMtlXD4bue0UQb8Gr7GFStaiLqgpFiU2lrep\nFCqM7R+lo7dx+XqKazfTrK1maLdb6N1u/IcOUq3Uufv97xMYDyHlC5TTOVrNFo1SAZVOTSWTRaNV\n0kpEaNx6A3crgqmZphqPIVMo0NjtbN+8iQDUSyU6zSbVdJpGpYKoVPYcb9PpDyQILldPF+BdqAwG\nRo/sxOXSISqV6FwuAkePPvQ4fxhrA3q9COLPsbZWadU0FTqeeu4Axw44OLFLw+FQBYu0SXXpFlI2\ni97lIhQyE/AbaCU2CTjlnNilQSNIKCwOnHsPYA14UZvMWAaGqEXWkXVbdOnS7nTJR7boH7Lj8JoI\nnjiF98gxTOO7KGr8LG3LuXAlycxSHZVey/ZWnlJLRWQzS347TavRoJwrcvPCEjevxbD43Ny9skqt\nWqcrKlhbTvOD788jE0WarQ52p45aLo9X2Gb7whso82Em/TAyZCYaq7JwbZHhk8eQq9UIKj2Jjo22\nJYDN78YQHGA0qCRkB8Hk4MjvfoNqKkmrXse1axcyocPwgIlOuYDVbcfo8VDKl3C6jYjOIM6hPsbG\nfznqi58VXL58mYMfIpX9WcDp06d5/fXXaX+IoNWvGkRRxsGDPg4d8uH16hkbs+HzGXtHriodgsVJ\nU21jdr1B19GP0O0Q9Gh59NEQJ0/2oVYrePPNMO12l/5+C1//+jSf+9wQzz47yq5JK4cn1FTiUbbW\nEszdibO8nEGS2thsWkRRIJutsrFRpNnqIirk5OMprr21wOzMFqUaWK1a1m8vM+DXoJALGLUyXnlh\nhr7xAPv2eBA2Z4nfuIpK3kFqdKhV6lTLNYL9dlqSRGJhEVEuR1RrKSeTSPk82dVVVFo1CqWCeqHA\n9q3bVLJ56sU8emWbwyfHqJar6JQdRsfsjATVuPQNHCMjOCcn0TqdIJOh1OvReTwEjhwhfuMG8evX\nyS4tET53rudWPj7+wHct5fNU0+kHhM4apVJvHf+YAmYfB59mZST/i9/y8SCXixw86CMYNJHLSZhM\nKkqlOvPzKQqFOu12l0ikgMWiQatV4A9aeOtcisjdPFadhX6PyHDAQl1Tw3lmB/lklo7Lhr3PTypR\nwGrT0dgOs3l7mdgxL/m5RbTKLo7hART+Ic798DJtjQXbzt0srxXJJ/OEZGl0WjmmwQmuvL2E1Zsj\nZBvn5OkhNkZcmOR1aktz6LRNdPUU2evneWTvTnx2kWIyzeKVu4idOv2HzMQW13GF3HQ6IBNleL0G\nxnYGyNRlnHt1AYfbwpAaXv1PL7B70oxqZZXm1WsMfunrXPvBWYYUSjRWK6JKhUbVk+12jY3SbnXp\nSFX6TpwgvbCAIIqoTSbUVitb164hk8t7BnOZDPaxMTILC9Dt0qrVUBmNmAIBWs3mB9T7NBoFx46F\nWFnJkkiUsdm0DA9bMepEmtUJFDodcqXyocdZlMtxTE0h5fO07u0MRJUK5+Tkz6UL9/WZKRYlwm++\nzfzFWxj1SjxONXW5g0osgiAItOp1up0OjZUVbNO70XTMLL10kepIkODucdZSAh33BKWyRLVU5far\n5zn12BSFcpBSpcXYiBWXukg8JvVYAW+eRzOyi9f+8sc0qjWcU1PkKhJvvLxA/7CThbevYzKraCPy\no//nHBpfkHpHRmojz9nXw+w6Oc2tdxah3UQQZWxtZlEbDTRbeVwuDZpWkVJCwmaSIygVlDbDCN0g\nW+t5BF2DkpRH0Joop5I0FBrk+8ZQOx28+e+/ReTOIu12h+SdYZ78vS8i9vcj6dRUUimyaxtoDEW+\n/C8O0rD0s71dRmjUyMYzPPfU9ENRAOulEsVIhHqxiNbhwOj3f8Bc77OAy5cv881vfvPTDuNjwev1\n4vV6uXHjBvv37/+0w/mF0GqV7NjhZseOnpDixYsRJKnF5naDsqRmayNLtdqkfyzA0MBuRJ0eh7HI\nq9+bo5AsodOb2NoqYrNpOHGiD61WgU7Zprpyl1Q8z8UbBWKJGtWGgnhbx9mz6/zWb+1gc7PIoUMB\nymWJutSi0+nSqUuoNUoEAbY2MowNm7FY9Rj1MsxGBT6vjqOP+li/PsvSGxeZvTDL5KlDWL07OXbI\nRSLnYGTYgkXIE10Io9DoKEQiqM0WlGY7BreLWjaLTKnEFAqSDW8iV6vpNiRK20lkShXT00HsIR/l\nuozBQStepwqjzdgzq3Q6MXi9pBcWkKtUePbu7XnGdLvYx8epFwo9iwZlz+H4Z9GsVtE6HCj1ehrl\n93xwNVYr2oc4gv2n4r/InhHoMW1CITNWa51IJH/fvwYE9Holen3P58ZgUHH+/Aa35oooxZ6ink60\n0wpXGTTJWX/heVrNDvYzZrRClcm9/YhGLTfe3sBs1qBTC5SVClq1PPnNTdSuCfKZMraJPqRsFpVR\nTy67zdDUEOYdDa68EyYQNCNTKNAquwwE9OwKQa1co2Mbpry2gKzb5sKLl3EOJ/HsmKLelhja1Y9e\nI6My/zp9Rw+RmruDJF2hrg7h9eo5cGYv3/m7WUxWPYMTPgxijWa3xWa0zL6BQeJvn6OwOMfkyUNE\nFqPYRicZOfkIjVwGV5+bcrmBQdYlvbiI1maj/9QpnJOTaKxW6sUihY0NmpUK5r4+pHwe69AQOqeT\nyDvvYPD7MXg8WAYH6XY6aD5EWMlkUrN3r/cD1/+pDyTrwAAKtZpCNArdLsZAAKP3g5/zLlQqOX6n\ngnApjdfZq6okkxW6goBNrekpGcZiNCoVcmtrePYfYGstTLPRILq8Sf/BHQwO2bn0yk0UFgf5ioDW\noOf2a5cY3xmgXWkQvhqm/6kdNFUmls+/iXd4iJzSQj55g5ZUx5TP4dt7kLWFLUZOTjA86qR/xEki\n08Q0Mk5bkrAauoQmQmwl60zsnSS2kSa7lWB41Iu/3wVyJaJc5PDhIPrKGkqZjMHdY2xcuU46WUKu\nzWDSq+l6fczciOEO2HBabQR2DOGfHubOS2/Q6XTQmo0UMiX0yja51VVs/X3c+fFZlLIWMpnA6sWb\nhDoi1kddxOaWadSbPPbVkw+diITPnaMcj/cuCAK20VGCR458uPnWryi63S6XL1/mr/7qrz7tUD42\nTp8+zauvvvqZSEbeD4NeSS5Todlss7Utka8IGIx6cuUOL70Ro10pMH9zk+jsIhqNHGo5gpNjgEAy\nWWVuLom5ESd5/QoDB3Yxd2uObhfs/QE6LQWtVodIpMAXvjBCqVQnm63j8xtoNJpYlDqy6+u4/RbU\n3RryYo1/8ydPEysqOPJYi/ExG/pOntnnX2MrmqdabVGMp1Aac8TvpvDtnETelpi7cJ3msJWxyUlk\n7QaCKKJzOFHbbegcLqLXrmIfHun1k2SyyOQKvAcOcuf1y2gWk2woRth7uI/xCecD7tdGjwejx0Pf\n8eP3r0WvXEEQBDQWywP03W6rRbfbpZbJ0Gm3e8c2CgV9jz1Gen6eRrmM3uOh7+TJf9YNwz/HSuAC\n/u5917aBr/2iP/yDP/gDzPdkacfGxjh06BB991w/w+EwwD/6WpKarK1BIlFGpSoxNCTj5k0Jo1GF\n2SwxMGBBklqUSg08TgmzvEZ1M00jWkCxy8vi/DKCQo1Mq6arajI0aWT55ipab5PpR/swm5Q4zHKc\nzz7F0swNFBYLtuFhtN4MhXwMaWsdz/h+sk2IFUrUpTY6k55KQ6SNEmN4gfN/fwm/W0NbVqPTFTj9\nlc/TyKZRe4qU6yUWfvoyg/sm2Zy/RbXSYIehSyGRZjORQtVo09Tb2OgI2AfMHDwzwMCOQcxGke3Z\nd2g081DTQqOOYmKcTLnI8RMHyWbrLM4vYT9xGPlWDLlag8qsweh20W2pycp0VNDRzBUZcTh6k3nP\nHjqpFJN9g6QX5kk3W+j8fvb//u+jUKvZjMVIVavsOn4chUbzkcbnw15/HBi8Xgz/SALyfiRTNarV\nFuWfkaJORHJMf/VRCmtL1AsFRLUak8+HSq/DmI0jsxmpNmXUKxJ7Dg9gcdt569I2t5dKnH7uGHd/\n+jqJ6DalQo2J3X20rQGSW3VqSjM1jbO3AzJZUFtliAoFpfU1jFKOwdBBSukmm5euUpObmH3xdQx2\nC0qdhka9iXtoBLPTimFwlPpWBqVex2OnJjBbtIxPunHqJBKvJ3jzrQ0840M4du0h37iDtb+PQN8Q\ns5uQT0cJTg1jsMsZCmqwKmq47Wo2lEr0bg+juwexWrVU6gI+kwHX1Djbt2fRG1QMHdmDwtNHo1Sk\nI8jp3zvC5L7+hxqfYjT6XiICPTOu1VVsQ0MPNW6fNubn5zEajfh8vk87lI+Nxx9/nD//8z/nj/7o\njz7tUD4y3mV6KDIpVJVtxGIDk05NPi8wMGhjfj5FMV/DW12grTFRyEuACqdehZRK0HXb0WjkJBIV\ndAZQKkREGThdeqIbWWTNGnt39eHzGRgZcyIIMl55ZYUDu2186biOBW0U+f/P3psFR3Lfd56fzMrK\nuu8bVQUUrsaNBtAnm2yySapJkZRkyYcsjaSRZ3asmN31ju1d27H7ug/WTsRsrNYbfrBjxjuODYW1\nkmxLYw0PiVezye5mn+xGN+6jgAJQ930fWbkPIEFRJCVKpthsxn5fgMxIVPyQlcf3/zu+X4uVJx75\nbdR6mdr2Oi39IJdfWCCba9HpQj7Zx1OfGUHUasmliuiNeraWYpw+eZzO4TDpVptCokynK+L1WzE5\nzWhmZojfuEFX7WL2+VER8M0eobixRs+RIxg9XjKb22zcXCUZTWGoahj69CQeC2SWlvD+giZqs99P\nWqvd7yN5EzqrFa3JxNb58xS3tvYXjy4X9khk/zMnJxFEEYPLhaP/l7vP/7n4KMhIEnj4V/nDb33r\n/Y2B33qJ/bztjY08u7sbqCrY7R70epVHH93PirhcRiRpf3Vns8rIe1kWXruB2lUxyDAQ1CK2Ougn\nD2Pt7WXt2Z/gC4YIPDRGpWsg4PfhNCjc/Ou/oO++k4ROPUWyoqXYlrGE+7jz4uvkd3ZpNQ2M3D9H\nj6XKVr6K12/hjVspPnv2FDtXV8inS2jVNv0hIysXrrLgdjPxyAPMCkbisRz6gSEEdw8L33uVB8+O\ns3P9OiP3H8FvtdIS9chyBUkno600mZ4Y4IfrS+Q291DSLZRCjZGTg5TnX8Lh9dH38CMIoVEEY4vj\nkQFsFolurYhSq9GqVtlcjlNVoC4bqTc1tLoifX0ddDqJnr5h5ktW4rUGhpEAfoNCuN+NKxygXavh\nmZxEb7MdlEc+yPfz87Z/nWijxT0yTCGZQ+3uN9B6h/tAFNCaTLhGRnAMDJDf2KCwuYlSLtM74cXg\nD2J0Wli9GSU0GmFytEWzo2LwmJl4cI7heoNcvolks/Pyf73J0P1HGDk6QmVjBe9AkMjMGKnNGO1a\njUomy9DhQSS1xdW/+3uGjo6jb2Qx2cykd9IERyMk1mIMHR4kGDTTPxZGY7Jgsxl46dw21WqLf/21\nSfQGHQ3ZydraG6xGb3HmqRkch0/gO32UV5+7g8ms59/8L1/A65BQ91bpbtwgmpYJBP3MnuhnL5rC\n5jCjcfrYKaks/9cNHLogg58dQq/W0Blkdpe38PbYeWxwgKGpPry+X87c7r2Ucrvt9kFp7V7Byy+/\nzEM/tfK8F/HQQw/xpS99iWq1isl0d6wcflkUolG2X30Vtdtlut9EPmCl0NQyd7yXWKzElStxpsds\npGMpwuNmgn1Ocuky7baC1GrisOno67MRCtlo1wV0NiuNXIapmRAWuwk9LWJXryO7/XhNDpZX83zx\nt0YIqTG2zm+yvZShUWsyNNVL5NQJxmd6efrZdfbWE9QLBUSNyF45w+2Ig9HHzrC+lsMa8KF1uHnj\ndpbjT97H0R4fjUyCbXGP7PwbWA8PolYbhE89gAokVqMIQO8DDxCcmaa8E2PxB/+FZLqOqNUy+dj9\nYHHRP+xCVJrU8i06zebPzVxYg0ECc3OkFxb2DUMtFgJHj+77SS0tHRxX2dtDFEUiZ87QyOfRyDJm\nn+/nTtH8OnA3c6RHgP8NmAR+DHwW+NVmPN8H9frbjLDR6BCJOLhxI47fb8ZqlRkZ8SLLGpRilkvr\nm7hdeiQBXB4z1UoTv8/BXlJPKVOikKuQTizgrTQw9Q5hSG+jWsw4e7wUdEEuf+8KOpeXVj5HaHyA\n+77wENs37uAOeRkO6zAaTBx+ZI69nSJDjmFaOjsri3E0GolOo0mlqOAK+qgWimzsNTn85MOY1gts\n75SJrcZ48uufImJrIgw9hXdmhmS8iMVippHPk7j4Kna/k6nf/R2+8uUJXv2HHMbxKfzGOsryBRRR\ng9bhxDZ9nL//x3VSqQpBh0pv2MLsjJedF39EZi/H7dtJlLbC6NkHcc70sLmZJxKxU6+3ee21GO22\nQjBoJVNUWGsqVCWF0xENOovlnnJf9fnMJL39jDyqJbu6hihpmDw1QW1zkUYmQ2Z5mWoqdZBxkfJ5\nbH0Rao4h/vHbV7C7LbSNVV65lMLuMFDfXGbn9jL3PT7HTqbB7qVbRMYjhNUYu9cvUdtep7Z+m/sf\nfpLisTPsbCYwKhVGhmy06zUq+SKyViL66nlO3H+c+TsZrC4LwWEHhyccrNzeYf7iCv7+HpYWk3Q6\n4HAaqDZVBobcZK0j3PcVC/nYLqLFhe9QPzeX6hSzRTLxHG67SPn2TQqZIgP3H8MV8iKKIoNHR9mN\n5bEN9PPyC+sUGhKIGtJ3bjMx08dMbwd9u4BWo8GurVNL3KQqZVF9ZxDED977bvJ49k24um9Pt0kG\nA7qfsSj/uOPcuXM8+eSTdzuMfxbMZjNzc3OcP3+eT3/603c7nA+E/Pr6wepeW04iFlqINYGyfpjV\n1cr+5KQq0TsSohTb4ujRflbXi2hEGD86xAOPDOJ2mzh2rIdUyoZt3EFzZx3ZIzI0GebWhaX9rOGw\nA3snwf3jJsaPWrjxD1ss3klTr+975GwsxXGO1VC6KunoLpV8kVyySEcBb0ikXKignRji8f/uS9y4\nvsvLP17EHQlx8Y0S4o0cs8MyaqvBbjTJ5OMPIRoliqks0dtRNq7OMzgSoGdylGqrgmNggOCJEwix\nHNZwL61cBjGxTPbVEnqXm1yrjc6oxz87+74lU1GjwX/4MPb+/oO+Pq3BwNL16+86tpbNIgCuu+i3\ndDfJyDXg7K/rw1utzsEUjaqq1GptBAHOnIkwPOwkELBgs+lptxXEioWcXyG9EUcrdHHiwSI48E8e\noyntkNncRtQIBPrc+KfG0NvtSJUU9r4+TMFefvxynFq+SKcLjVKFV/6fH/LoVz/Nk5+bYPHyIunF\nBvrRI5hd4xya83LnB0vECyKRIS+p7RS1Ygm9pKVcr+EeHaEQ22MjcRODxcJEOMTDD56irXTZ2shR\nESRiN0qM3P8Q6YvnufbD5zFZjLSRWHnmOcbOnORTMxLl3U10Jgvy6fsQ9CYki42ddAdNu4I5Oc/C\n+S1SLgOmeASbxUa1FKNZqaG320lv7zE5V6Njs3D58i7pdJVbt5LU6x08HiNnzkRIJqvs7ZWp19sH\n4nL3CkIhK81mgLU1Ga+/H7fHhFVJ0Yruy8mbvF6q6TSdep1Dn/kM5p4euq4+Lj+zhs1rp9KWeOX5\nJRRFx8Z6AbO7iV6nQd+t8ujZYYpHA/jDbpZ//Dy1+C4mrw+NVkLdWWX6AS+2YpHc1i6lPTfOgQg0\nmrQ7CrlEnr2NHzIwOszwuJe9dIF8poRZb6KjCHz3/76ARqvF1eOkWjbSrgcpZ0tcvJLA7nbi7u9j\nU1XZvl3k/pN+5sZOodOKVJeus60ImIYn0Cgtln/wA9Rul97Tp3not88QqxipiimyqRRGk0zvzAR7\ne3FOnBzD3EzgHBqilsnQaTQo7exQy2Z/Ka0Qo8+HPRKhuL1Nt9NBazTim57G4Pr5PlEfJ6iqyrlz\n5/j3//7f3+1Q/tl4q2/kXiEjP01iHQ4Doiig06nYww5yDRmv14jNpsce7mFlfoe9a68SHuphYLqf\n8dk+Op0u5XKTwX4LzVIJVbKx0u5lctqNmN7E/OAAeqMek1gjt5mmsLVHMexkbTlFOlkinyzgcOoJ\n+E20qxW6Jj1Oj4WLT1/BYjdx8oFBbBYNowFILq9RU2QyTQO9s1NUyg10OomN9QImg42xE2eY/vRD\nKDtLbL58jlK+imt8Ao08S30nimzQYR8YwDsxgTUUwnpzhc3zr1LeXEVVFFZ+8iJas43Zr36J5Pw8\n5p4eLP6f75Sut1rBui+m2K7Xkd5jAlGUJMRfwSPsw8S90z32S6BabXHx4g7RaIGtrQKNRoe5uQA6\nnYZw2MahQ+4DR0GtVoPTrsNlVNE6RVRVRGwWaCe2aUiP0gpOcSgSopXeQ5X0dEsZ9q6c3x//WlvD\nfmgMj9fGYrNFt6Vi8PphJ8PijWiSi90AACAASURBVE2s2iAby0m+9GcP07H4WFxI8/r3buL02tiK\nZjh1dAa9dBWlrkdoFBk4dQzR5MBRjxG7fJWW1ooiL3Pic6exBnowCnWa5So9vVZqDYG6Lczhr3yZ\n3NoaK/M7mK1Zcgu3MHu97F15nWKrhWQwUC036HnoU9y+dQdNapvk4gqVUgej0GT5ldeZODpMYGyY\nWLKNxmRBVQCljVYrsrVVRK/XHIjZpNO1/dqrSYssa97henyvQBAEhoacRCI2Wi0Fo1Emv9EkIwho\ntFocg4MYPR66ioKttxfP+Di376QJeTRYe0IsLaZIJaG3x0LNr8HvdtB/PEBxK8oL/+n/wGizMDLm\nRWfUIVqcLNyKIkkSGl0Cz8QklmAYwROhVBfI1bQ8/O/+FVtXbzF0+iTLL11AauQpJxLojR62NzIc\nPW5gbLqH8y+tUqs2MVt1DM/5WFuK49HbmByx8dLLUbKhHnQ6DaOjLlwhP+1SgY5Gi2loHGljlx6P\nlo0fP4vcbaA1mShsbpItqeinHqBU6RLdSKMoKqFeB0OjvbgODeOSg5RisYOVqdrtvmta6v2gqiq5\n1dV9cTxZ3j+vbjcmnw+jy/VLNcHebSwvL6PX6z/ScuKvC2fPnuUb3/jG3Q7jA8Pe308xFju47mw2\nPb7hALVABK29gtW671nz6rU9DCMnmDvSwWzSspZUef6vFnjwwT56rXWkzCYGTYuuZOCBqVFyXZHd\nopbla5v4+nzozSYCo8fw5pMUmlqsLivZvQxqu0m10KZuk+kiIhn1DA3YiIyHmTkcIHnlIopGpWBt\nsLaSpf+RR7h5aZN2W8FpERBMCv0BGclgwGi3cO1HP8BEFb1WRzW+QiWRZOJLv4s4OYjebscwOMHK\nTgew4BwZInHtMm27jc1XXkNRuiBUaOZztM0y9UzmF5KRTqNB8vZtChsbIIqY/X4QRXiL5AkCzqEh\n9Dbbr/mb/Pn4RJKRaLTAxsa+BG44bKNSaVEsNvnsZ4cJBt99wquJBMETxxCuQC2TQWswYBs8RKUp\n0m21WLi6iKl/BIO2S+z55zCZtLjHp6mm02Tv3MI/cwZBq0OQDXR0VhxDQ4weDuBya/nd//G3cfSF\nSK5Eia4WSe3kkIp7hO0GSjUHZ/7lb+DQtShVFWodLZ1SjlpNIfyFz9ASZLrtNuvPv4iEwvZmhpP/\n7g/40fNxFt64RDcbR2fS88SnB7E5srTbCo1cDq3JhK23l2oyic5mw390gK1YBoe9jxsvLKJWq1gs\nduqVKsYBJ4Wt6L6MuM9EIlnGGA6gaPfNqoxGCQEVj1OmUqqDqKFa3XfEHB11v6dR0r0CSdIgSfvx\nmwMBbOEwxe1tRI0Gx8AAJq8X2WKh3WhgaqTYe+UFdupVVtZLhMcGaOkC2L1GtJoG7oid5/7iP1NK\n5TFaTNSKFZJ3luh/8AGSGzEknRatXod5YAhFcPLsX7/IxhuraPU6HvytBzn1mccpbaww8cgJREnL\n/NUNdrbzGO1Z1s9tY3eN8/XfP0k6VWFwyEmt0mR3K0vMVGE8bMb7xQkyVQ29ERfhsJWePievvVZH\np1GwN9qYHXYyKyvU9nYwRkI4+vupl2sUK3uMzrZwhAOMvTm2WynW0ekkvCE3hRsXEUQRrclEp9HA\n4HZ/4IxGeXeX2IULKK23G4Xr+TyOgYF7iogAvPjii5w5c+Zuh/Gh4OjRo8RiMRKJBP5f8CL7OMAe\nidCp18ksL9NttzH5fARmZ2lLJoxmPfl8g+FhJ4VCg5s3k2zudlDVNrduJfF4TBg0Le48+xK06kxO\nejEYGqg7CzgOneJOWcQ72MsrL66Ry1awuq2c+dQIXqudntlpTDroarS0OzD44H3U2iLesI9aMsG/\n/MZpiou30PXsa42Uouu0CgqF1QUeeHiK15+5THp+G5ImZIOB0//2SQaCEtlOBtotSvkKllAYo9WM\nzW3H5LTTaCqsLGVYWc3RViUme0WKlQ6dtopss6MgUa60KZZaqPkWEe0vlkRILyyQuHHjQHWy227j\nGRtDabVQmk3skQj2gV9s5/DrxieSjCST1Xds74/xCqjqez8AO80mSrP5Dr3+SrVFpdpGqBWJr2yh\nNxuZPjWK1ubE5DSAKOIcHqaRz2O3SZz+8uNcuxilWigyevQQM/eFEB1+nD470Vcv0ZEMtCplPFaV\nO6/epNtqYXfbsAhHGfYraAZn6SoCHkOH1/5hkcUb6wwdHqJvtJe+qSHmn7+I5PZzdb7A5XOryGYT\nVpuJnfUkl6+aeeToKKZuEYPPy9qzzyEbDXjGx8lvblKObePz96KL+NkNutnM5rGYNHQ1WmSzEXeP\nG1EQ8NshNHyI0EOPYB8ewmzWsr2eZPP6IlK3S9BupNKCiXEPw4dc9PbeXSb9YUJrMNB7+jSlWIxW\nrUZ2Y5P47UVyq2to9DpkiwWvU0smpUGnVVm/eocH/kWERFlhd2eHgMGHZLJicnWpFMu0PAb0dis2\nr52+AQ96i4HgsRMI3j7+y7eXiVeNyIFe2vUm3/3LZ6h+6SSPnnKyvKNw88Id1q/MM31sgJlBJ7VU\nFoPPzJ2tDLJOIpuuoCDRP+zGamtRj8fo85g59qn7cLpMBPq8yLLEkSMB0sur3HnxGna/m3AkgKae\nR2k06Ha7iKIIGh3R7TKCzkjbFiLk76G318aRk/04ekPo1RnSi4s0i0Uc/f34Z2ffV8PlZ1GMxd5B\nRADq2Sz1bBbtB1B1/DjhmWee4atf/erdDuNDgSRJnDlzhhdeeIGvfOUrdzucXwiNVotvehrn8DBK\nu43OYtkfWQUmJt4W3UskKrz2WoxOp0ul0qLb3fdOoVqkmM4jCPuZXY1GwCPIFKNpvCEnF14tUdeY\nEKw6Ck2RrWQHi6eDZPGjHTXw8OwJGm0Vo8NOK5dCok1blbAa9WTW1slFt8gnsgxNDeB1OmmVShx/\nxEHsikRRsWGz6fAGHChLF9AGTlDbWAJRxD00SHl3j26rDs0KtVSdqmhj87m/xTc7x/pOk7RlAMHi\nwq7t0Cj3sn5rA73ZhMbqIFfVUOqa+XkF006zSW5j4235a/YFzcp7eww/9RTyx8g1+xNJRt7LMVCn\nk/Znz9/r+EiEwuYmtXT6YJ/JF6CityCUywA0KjXy6SKSrKFVqSAIArLRiGwy0TM1Smsthf64B7Xr\nwihDQ9VTyrbp7+silRM0VQPd7C4Wr4tw2E49n8dgBKPdTLxYR4gVaTXaXHzlZewGGwaDjNqss/7y\nKxz/+pfRiR08/V6u7JVpVmpodHqMwR76dEZq9Q4Wvw+j3s9uMkVDa0fSqOhsNgJzc+TX1mjtRXH1\nuPmNf/MEN5/RUe3ImKQ2ZrGOd3ICrdGIc2gIayhE8NgxSqUGqWSFgbAJuRlkfX4TvVzlwRMRjo4Z\ncPb+6rbTH1fIJhOmvkH2XrvG68/f3k+zOnSYtQoCXXomxzBrWthtftYX47R319Gbe/GGDdhdZoxW\nI2qzRj2Xo1jrEugN4hoaRNaKWAIBAkePEa8bSKWqlCttWrkaSrVMu91hfTnJjDvD7NFZwoEZKo9P\noJO6tOKbGANBdtNZ/F4zCwtJJJOZSFhGbub4j//rj9Cb9PzW75/Fm4wiSQ46FT1ahwO3rk45t4Gp\nsIHYSaJxjEO3Sz2bxeh2IxjMyJ4AN1YarK8X6O21YTBoCR/yotHpEDpNcmtr1FIpuopCIRrdd/R1\nuz9QZuN9m1zvsaxIo9Hg3Llz/O3f/u3dDuVDw9mzZ/nJT35yT5CRt6A1GN5TcfktTEx4OXUqzLVr\ne0iSyPS0l/5+O816jq6iUig2cDprVMotshWV0EMidARKlQ7lhkippCLLUCg0iUbz3Hc8yF9/9zpq\nvcrRI37aV26RjGXxDUaQtVq03SaqIFEvVfGHXKidFrXdKLNfuQ+XUeHxY3oM4TN0RC3FWIx6dJ5a\nrsDwE49TXFtBK6mYRyIYXS5cQ0OUkhl23ojSrtWpb60yODzN8vUVTp4+iZy4Q7EGh8MRPKMjdG1+\nzM4Qd1ZL9A37379cLgjvvFcFAVGjeUcfzscFn0gy0t9vJxotkMnse9BoNAKHDjlxOt/7Qrb39RG6\n775907VWC5PPh//wYSxVDZvdJs6Qh1IyC50Wg6dPklm4s8/ORRFrby+OoSHathA17QbdTpeaqmev\nrHLimIvK0hu0k9s0ym36wkGWr9/G5HBQi+WJPHqaTK7JKz+6gb6/xROPBKjkCuidelyT04iyQKNY\nZGdxHVdfkMruBv2T09xesCIKCuV6F9nTy+ioA8tggHP/dI3myh2UWo3Jw0G0NgfFtRWSt28Teegh\ntl56CcloZPL+OZRWa38UV6+nU69Tz2ax9PTgHhlhb6/Ma69tk9hKkl1ZwR/xc/YLRxDLaeRWhsLK\nIs7ee1dr4edha6vI8pVFqpUWZovM6kKCVqOBWMszZYtg0ruxtbYZ9ncJzvRi8IfIXTmPWDMwcuow\nKxduojUawerGNzdHt93aH78TBHYuXqDnkccZH3dTyeYp5BQUVUFvszI224fBXkGRzaxvRrn16jzN\ncgVXj4uBIxFa1SJmScMjT05RrSk0ikUqe2V0Fgtmv5e6aKSRjLF64zVskQgGhwONXo+oEajvRinX\narRzKfxzcziHh7H29GDsP0Ss6UG6mqbdVlhaTGOUVfSaNifmpqnGotSz2XeM+OXW1vYF7z5AA6s1\nHCaztPQOI0STx/ORqjp+GHjllVeYnp7G+R5ifvcqzp49y5//+Z+jquo9VzJ7P1gsOj7/+RGGhvad\nZGOxEvPzSfqmPMg2G06NsG/mJmmwetw4vDZa2TY6nUQuV0dVVcJhK90uOBxGnB4TX/z6/RR299i7\neYdCro5zeJh8Ko8z4EJDi7FH7kMvNslvbSO06pg8Xqz9A+j0GhqCgef+80sUc2Ua9Q6nnphjLhDE\n02oitht0m3UEQcASDLL89LOkqjKrsQb5bA3zep4nZo5jtplJ7uU4OjfNXtNOraGwWdGgMbroJBWc\nzu67FFV/GpIs4xwcZK9QQO9wgKqitFo4BgZ+LrG7G/hEkhGHw8Cjj/azs1OiWm3j85no6bG8700n\niCKesTEcAwN02220JhOCIBB2gstlYLDXTG55EaFexhoO0f/Qg9DtIhmNWPx+JL2eUJ8RNBLRaB5N\nR2Vy1oZL12AjHscZ8iPt7tLjl+gLHSZT7HL/k3O0RAPf/r+exj04SFXRsb1TxeG20W63aHX2exlc\nQ4NYPG52t7fRdNscGbeS60xx6404haqKzajSP+zj2//xIrsrUc6eGSd66TKlQo1GqUo5HufQU0/t\nNyuurCCbTOgsFjyjo3gnJ7H19lLP5UAQMDiddFWBG+fXyOcbgIjS7hBbiGLVd5kOtmkWi5gDgY/w\n2/xw8EEeuqqqsr1dRGs2I2lF6tUmGysJjGYdvb0+cukSS4kSJwY12ANeTHqR5vYSzkgv1XSSiSfP\n4p6apZIvERrwIhYSFJbn0TscBzberfg2D53wsLUQpdPwo69ZGBhyc/rBfmQZsoqV7axA1ztMU1si\no7Oxdb3GZz8zxuLryySLO8Qy7As1jfoIjA8jyyKmZob1F1/D4TRh9vuJXbyI0enEPjCAf3qa+PXr\nVLM5Srkytok5zDNHcQZczL8UJRSyYbXoSGwlaZbLHB4xo0muktjaQmm331GWUd4saX4QWHt66D19\nmvSdO7RrNcw+H96pqXtOBv7pp5/miSeeuNthfKgYGhpCo9GwtLTE2M94ldzLkCQRSRJZWMjQ6XQ5\nfboPp9OA78ufIb+ySLPeRN/TT7quY2GlyLFjQR5+uJ/d3fKbn6BiNmsJBi37xnqCREvvQt93CIen\nTqMlYPa6ydZUULUYtHYGHn2UbjZBu1bGHDnE9oULmPoGWc8bKWTKIHRxBN2sxuocznaJ/f0/Yg/6\nQenQLJWQDAYqDYhtJNHJJgwmPWgkNjYKhIaCBKQMrfgOqiSzshAFwN7UYQuH6O+3/8K+PffYGKIk\nsf788+RWV9Hb7bRrNSS9Ht/U1K/3C/kl8IkkI7DfcW2z/XKiLZJOBz/zoDQaZYwjEXoO9R08mN/v\npRYKWQmF3tZOKO/toXa7+xMakQjteh1HLc7IsRmalh7+6UcreCamiOdhfSnJ0SOHUawe1i/fplCo\n4zSD+cQQg2dO4Ql56XTaOL06/pv/doZLl3apFGu43EZevxTj1kIeTUvgyu0ij/7GkwSdYO1xoNaG\nsQaDpObnD1azSrNJPZcjvbh40Kh5EHOxQbG4/7LRmk0YHA6qqTSpRAW1z4akb+L8qWYnVVVp5PN0\nFQWD0/kOieKPAxKJCouLaXK5OoGAhdFR9/tmyAA6HQVL3wCO3Rjrd2KggqKo9B2fI9O2oOgFHEfG\ncZrUg1FVUZbRTZzkR//v62g0AjavC1OhRfL1BUZHA6jq2xMo9UwGl6ry+18bIVkSkHUy/QN26oUy\nhVoXnSSRb0gUK21yJdhd2gFJRzhkpVRSOHrKS0OoYJANCJ081IqEentoJbYx0Mbs9++XR7pdyvE4\nBpcLnd3O+Be/SKnawTQwxrXrKTo7iwwfHmBoyMnt2ym03QbmZpKBiIN+R4NidBdJp6NSKmH8qYZV\n2Wzed/T8gHAODGDv7UVptz92K7EPAlVV+dGPfsT3vve9ux3KhwpBEHjsscd49tlnP1Fk5ObNJPPz\nqYPtnZ0y4bCNuuimHTqMoKg883KUQiGL0aglHq9y+nSI3/zNMfb29gmJTqchm60xPe3DaNRSqbQQ\ng0G2N3P0D9u4s1JieSWHonR5+MEIWk0Rh81EI51gd3GVpR+f4+SfzFFpdQjcf5pqvoTR6cBos7A2\nH8XrcyGiUk6lKMZiBI4cQbLVQc2j1kqMzkwxev8cDZ2dgUNO2kur1CstRnpDdLsRYptZLFYdk5Me\nxsd/cYZS0ulQVRWNLOMZH0ej1aIqCqn5eazhMIY3Vc7vNj6xZOTDhiAISG+aDNVy+7bWBofjHcRE\nVVWqqdSBip3e5cLo8ZDbilEsNqmXquhlGdXoYGevCBYXVSVDqVKj02oTjZXo8Y7wxP8wTWY3g9tj\nQtXqSWQUlCZ4IxE0kobNrQovvbhJKZGixyMhOzzIRj1dtU2nI2B02HD1mTEFXdRlB6lqG9PEMYSl\n67QrlQOxqfdqRDQYJIxGaf8GFEXs/f1ojUbcbhmr344tNHNgKd2qVtm7epVSLIaqqpi8XoLHj7/D\nB+FuIperc+5c9IBcZbN10ukqZ88OYDS+uwtdabcZC8JOtMn0k49g6d1A514jONZPTdWS3k5idTlw\n9gZpbi2hNRrpyGZ0Pj+rywlK6SI6g4zFbkEyGnFNzRJvdQiEHJiEGmqzhi0SYefiRZTmFmFfAIxe\nYq++SiOTwuK0IGjMzI25uKao3LwRp1ioEegzYLbquHGxwOgcDA87GRpyEF+PceYpLU6LgNvgRq33\nIogirWabutZGo9UkEB7FojVQz+co1iViywXS6Qpebx97e2X6+qw8/vgg0UWZZl8Xu75DN7uz3yho\ntR4IJnXbbWSzGf/c3MEIYDWVIru2RqtUwhIM4hgYQH4PVU9Rku4pD5qfxo0bNwCYmZm5y5F8+Pj8\n5z/PN7/5Tf74j//4bofyoaCQLbOysEen1T0w3ux2VVZXczz4YC+lUoPXX9/D5zMTiTiw23WsrGS5\nfj3B4KCTxcU0iUQFvV7ic58bIRi0EA7b8Pst7O2VCAYtOJ16LlzYpbfXxli/AalVQDZKdCyD5Itg\n6rVy4utejEYtrXaXvWgSWafH0O0ScAkMhx1oLaMUt7bwTk5SicdJ3brFyG9+FdW9jM1jw2qSiN54\ng0Khjqczg7vHQzWVopPeYbLHxlh/CP/hKXyRD26lUEun0fzMPdiu1WhXKv8/GbkX0axUiF+9Sjke\nRxBFTF4vPUePHqiPJm/fZvf111EVBUEUsfT04JqYZiNaZHNzhU61QujIYVR3H9RKuLx1DLE6R4+7\nKJcb9Pba0ekk/u7pHdJ7BcamZPq9bdqmEq/8h79k+P6jPPYHX6JUatIVNCiymeXFXWaPm/if/ueH\nadUbDAR1VDNZqsUqNzZ2aVXKaDwhNlYUJoeP4mok0dtsiFotzqGhd/2PsiwxNeXjwoUY9XoHSaej\nZ2yQ06d76e1950WbWVwku7x8sF3c2kLQaBh49NGPRR16b698QETeQipVJZWqEom8TUbq9TapeIG9\n1y9R3o7S7XTYLXcZevwxCqYIO9E0u9EUatfIkROThMYHqPV4ufp6lM2NHGwVaLcFhmYGUEUNa5tl\n4ud3yefqzERg67UL0KoxNh1CkmWsvb3UsnlWUxKd5WVu/fAZzH4f4X4vXl8NuZgnMjjOaxcsGLsi\n42NuhK7C7P3DuL0WTpwI0dNjIW6vsXluEU2+RVt00tT7MPn95FbW2EmlcUf6eOGZBQw0GOlRcHgc\n1JoG7H19yGYzAJlMnakpP0bFxubONTql+tvnJZ9n4FOfQqPVHpCTt4hmNZ1m44UXaL3Z4F3c3qaW\nTtP34IP3LPF4L3z3u9/ld37ndz4W1/OHjbNnz/K1r32N3d3de9pvByC/uUliLUZmaZNyqYk1GMTk\n8yEIAqqqYrXqmZz0s71d5saNOE6nHqtVx9xcD+vrWXZ2ihw+7OPUqTCCIHDyZBBZ3r+OQyErsixS\nLDZ57rkN/v7vF/jik37euPYGYY+GaztljA47hx46wXrOwNTYMEpiCWMrw961Nzj91CxOcYf0pdu4\nM2YkpY5rdBaN0mTo05+m2+2iExQcU0ew6LvcfvoFWg0Vf9CFpl2lHG9hHxigvLODIAj0DIdx9/1y\npXKDywVra+/YJxkM+/1tHxN8cp4aHwFS8/OUc2VKWg/1RgdrQUFeXiF49AiZ5WUWvvtdKokEstmM\nLRymvLtLXbLR6TuCU7UiSyKi0ci1awkuXd6h0VTx+OxIWpFg0MPIiJtvfesiO7EiIl3azSgr3Sp/\n9N/PYvK4yWUqrF64yeDjn8bvN9NodFDcHq5ejiFptXz1Xx/l1WdvsnRlmUS6gdlm5uTJENpansDs\nNOlamYHZEGqzjntkBPv7CDgNDjoxmbTE4xVEUaCnx4LH884Vr9LpUHgPc7tqMkmzWET/MWDbnc67\nxbnUN8sub6HR6HDhQgw1t8fCs5dRVRWPx8jgoANd8g73H3uAV7oCWqOJYNCKImpZXMrRaHS4eLNM\nqdTFqFYo5soMDPuplutotFrmbybxeWTSi1HyW0lsDhP5koJlawvXyAj4BkgsrKJJbqO3O9DoDSRT\nNTweE1a5yfAxJ43WYVBVctkqiWyHrgputxGfz4xW6NDcXsHpkFFdQ5y/lCB65w2MbheOgIdjTz1O\nYidL5vYdAMaPTrD00kXCDzyA6vbRbCoIAgQC+0Ta9KZKanZl5YBM2/v63tejori1dUBEDvZtb1P9\nACJM9wpUVeV73/veJ65E8xZkWeazn/0s3//+9/nDP/zDux3Or4x6LsfOpUuIGg3hsJ2blzfJb24i\nGQwY7DYGB51Ikkip1ODSpRhOp5GLF3e5des64bCVz31uBFWFZ55Z4/OfH2Fw0IXV+s5r3mbTs7tb\nZmkpw2NnIwTbC2ysz7OXd5LPN9HQpbq7jTU0wxvXNuknia26w9f+4FPo1CY7V68R6A2SzLSx69p0\nozuYBw8x8tn7cPWH0Gg0DGoNLD33Ij6PHpPJisOuPxCbdL7Z+yUZDL/SOK49EqG4tUUlmQRVRdRq\n8YyNvafD+t3C3SQj3wD+1Zu//wXwd3cxll+Idr1Oudzk8kKN6OomXaWLwaTjxJlRzP4dMsvLlPf2\nUJpNOvU6SrOJZ3ycXHSXtttJS2NidTOP3avn2rVN+oa9XL8W59KVFfoidr785UkymSpmsw63x4Qs\ngaa9v52KF3H2BlFlE4VMCZdZ5bEH/az2WVlZy+O09fPEYxH24lVW5mN0VZFms0M5luG6LHLqqBu7\nVUtB68F9ZBCH3fC+Y5fttkIiUaFQaOByGXC5jOTzDTY28litMm73PikRRRHNe7yk7nZKXlVV0ukq\nlUoLvV6LJIl0Om+PsVksMi7X270L8XiZRKKCtZKjq+wfl07X8HiMSFKJbipFqdSiUGoR3d7BYtEh\nigLRaJ7V1RzdrorLKqDUahQrHWStjMuuI/fiEpP9PcQuR2nV6uiENsWsFXpt1HM5LLNT9M8JqFst\nlHaHer2N2lWp1toEwm7MdgOzswFef30HjVbC5TEwPe3j+PEgVquOaiq134RmMnPuappLr6yBqmIV\nWzQcVua3FMRsEUGzX4prKSIDh3xUknuYh8LUalV8PjOgkkxW8PnMBE+cwNbbS6NQQGe1YgkE3tcs\nq12vv2tft9N5h0PovY5r164hCAKzs7N3O5RfG774xS/yzW9+854mI7Vcbp8YCwKjvUFEcYDtzRwm\nncrMXICRkf2ep5WVHH19dpaXsywtZeh2VUqlJplMjW5XZXzczd5eBY1GJB4vMznpPSDrOp1Eo9Gm\nUmny4GEXN//TdZRGg1y2QqfWoFWUyO2maevKpBNFBvotlOJxZIeTej5OPb5DTmegI5moNmScegmd\nc5DFjJFPzdgPSIe3x4EaeWeZW9RokHS6d/Ru/bLQ22z0P/II5Xicdq2G0e3eV2L9GOFukpHngL9+\nM4ZLfMzJiCCKpEoiG0tvW6HXq03uLKSJDLpAVTH7fBRjMVBVWpUKrWoV9/Q0erOR+I5Mva0h4rKS\nyW6iN+Toi9hAI5FIVFheznLyZBC/30QkYqNRa6FVjKxfvIao+pFtDmSDHo9VoBLbwriXYMpk5tGv\nHcbVF0LTbfHDf0xAV6GrtNFqoFwqEVtu0Jl108qXsHj8WCz69yUiitLl6tU9Fhf3O9G9XhO5XJ12\nW0EQBIxGLXNzfsbHvQiiiHtkhFo6ffACEkQR1/DwQQngo4aqqty4keD27RSNRgenU084bCWbrdFu\nd7FYZGZnA9jtb5ORcrlFXzvLfwAAIABJREFUs6mgt9kOUrpqV6XZVNDIMoWqyrVrcbrd/WxKtdom\nkSgDb6ftyw0Rq92N2QDBkJVaXSEUstLpikg6mVa9QbvZwqDdJzsaWaab2iZ14Tw9fS5MShGDxUyu\n2EYWFWw+F+EhD32jBk6cCO7Hp5dwuQz7QmWA1mRCMhjIV2B9JU6jVEJptzF5PdyZ38NikRh0WUlt\nZ9BoRKxmCcmhIxjwoxvzs76eo1Rq8sor2xgMEidOBDl0yI29rw/6+n7hubYGg2SXl9+hVyBbLB+L\njNiHhb/5m7/h61//+ieyRPMWPgmlGlGj2deuUVXaqR1GPTaGe1x4xyMERt/uq1BVdb+vr9oiHLZS\nr3fQakW0WpFqtY0kieh0Evl8g3q9Qy5X57HHBnE49p8XIyNutrdLNLIZfL1e9kp57G4j25s18ukS\nPTM60lUVk8eFXk4yeMhH26ijllPQasBqkckV6qxv1Ki3YO9qjJaUJxy2Mj6+P0Rg7+sjt7ZGu/q2\ncKc1FHrXOPyvMpItm8131QjvF+FukpGtN38qQOcuxvGBIOl0VLp6BFFA7ap0lC71egdTV0elo6PQ\ntNIdfxhbYJfO5jy1bAZLTw/BqRF+8Owu3/neMuVskXJdIRC006pW0Oj0+Dx67CaRiWELtUIJSdzv\ndahU2oQCeo6cmcJmEshrZQJeLY6Am3ouR7dZh2ad0p3r2Kw69m7fRii20MgytWgUo9ZASQNWuxGr\n18HmZpKZo5H3HAPrtFqo3S6FQoudnRKdThetVqRWa/Pyy5scOuTG6zVRq7V5440kgYAFh8OAc3AQ\nUaMhu7pKt9PB3j9ATe/m6adX6XQUBgedDA050ek+mssslaoyP5+k2dwvz+RyDZpNhVOnQlitesxm\n+V2x2O37neYNnYOe8UHiSxsIqJjMOpyHDpGpWg+IiMUi02h0iEYLzM31sLq6X65pNDoUVS1DhwcZ\nGrRx5bUNjhzv5fq1OFOHJ0neuIZW28GgE9DodBjdbvauX0ZqVYguNwlMTNKtlgmNmLF6bDRbKtvn\nzuGdmMA7OEgyWWFhIU02WycQMDPQa4R8HL3NRjudQKdRUBoNLB4XWpOFAY+ZwUE3YqfBwBEtfWEr\nDmOFrujBPzXO4l6VaLR4cA7q9Q63biUJhazv2dj7XrCGw3inp8mtrqK0WsgWC8Fjx+4p9+afh1qt\nxne+8x1u3rx5t0P5tUKWZb7whS/w7W9/mz/7sz+72+H8SjB5vfvmlskkwL78QI+JmmDk+efXqVbb\nRCJ2BgYcLC9n0eslFEVlYsJDJGLHbJZxOg0YDBImk3yg4F0sNkkmq7RaCtvbRRwOPVNTXnYvraK1\nhlDkGBYDeP12FK0e28g4e/Euc6cOoYnVsbodZGIbDByZoOwxsbVTZenKOtZgCM/wIOevb3Ds0cNE\nV+KYlAIGsY3J76f/4YfJrKzQKpexhkK4hocRJYlyIkF0Pc3yUpa2qOfQVJiRUQ9m8we7Zz/u+Dj0\njPxb4Ad3O4gPgvChMCtDacqJJJVcHY3Hj28gyLkLCeZ/cglfwIbdbeHIsSc47AH/3ByZisjubgmn\ny4zVaqBQaDE+7iafl9HLGjLxIj6vzO3zN8gncvzm73+Kzd0ma9EyR46GODrrRapmGZ0dQCd1qezu\nHGhWADTyeXJraxS3txkI97O7a6Y7NEQtk2XyvjCnHpvG79LitQoY63Hg7VVvp9Uiv75OdmWFzNIS\nTcmC3hQk6O+nVBdIJCp0u1CrvZ16r1ZbVCotHI79Uo9jYADHm6O+q6tZzr+8fVAWSSar1Osdjh79\n4F3f/xyUSs0DIvJ2vG1yuQb9/e9dG31r3Hd1NYtz5AhTh4ax6rsMjPdiCfawcWGX2Vk/29sFMpk6\niqLi8ZiJx8v099tpt7vUai2GhpyMjLgZGnJh0GuZPz9POGzDZjegO9VPaXMFa08Q75Fxds+/QuL6\ndVweH3a9jUIygzvgQGs280//53fodjpMHokwnE7TMbk590qMQmG/ETcey7JwPsuxYRGhVSMwNcYD\nBj9do5NsoYPRJKO16FhcSGCyGBDbCgZNHosxg06vBUGgUGi86zzUah2q1fYHJiMarZbQ8eO4hobo\nNJvo7fZ7cnT3/fD973+f++67j3A4fLdD+bXj937v9/jGN77Bn/7pn96TWSDZZKLv9Gkyy8tU4nEM\nbjdd7xCvvr6fIYX9Z9GxYwEeemhfd6S/P0c2W+fKlV30eolDh1yMj3sOFh5vYd/vJkGx2MTp1GMw\nSMw8NM2d584x9dRZ1HaTIZ8T++Ag+Y6ZU9Y0ne072IYP4RsMIV6+juT04PGHkPy7YPdhGxhmO6/F\naW8xEdGSunKOhatF5G6d4PHjBI4cof9nfJAKW1uszW/y46eXqZbqCBqR3a00heIEZ85EDso89zI+\nCjLiA77zM/viwL8ATgCfBj7/Xn/4R3/0R9jfTPuOjo5y8uTJA9fM6JvNkx/ltlZuMXJkmOVlJ7mt\nKGarDkWB1c0KjmEf3VaZarHKylYd11AYsVSiUjFisegIBjvUyjVSqwVul/M88VuD+NwypoYGRTLw\nxo08bqPKS//7X3LkNz6FtldHb0+T4VE/4CcajZJZXYU3iUim2UTUaPCaTLQbDTKNBpr4Gg+eGGA3\nYSJT1BEaDtIjJGlFc6SqVZpShLcerdFolPzmJnI6zdpzz1EURVSjneTGbUbOnkFx+jAY2ggCmEwy\nzWYGALvdj04nvev8rK9vcPnyDp3Ofk/JW8evrekYG3OTTu99oPP9z4HB8O4eEUEAq/X9Rba0Wg0n\nToSIROzUam3M5n68XtPBze3xmIhE7ExNednc3Ff1zeVqWCw6kskqs7N+gkELer1EOGxDFAUiA24K\nyR52r88TX45jsJnwzz1AqijgyFRoVSoAlLej6JxO0LqJ35ingUwpld0/HysJ3F4rymr8gIjA/qov\nsb7F2OAEulICnatGq1hkatpPOtciNBDg+o04DqmO1WEnvbjBne0qoafGEItRtl95hdDQMWKxd54H\no1H6lVZYH6cGuA8Tf/VXf8Wf/Mmf3O0wPhI88MADdDodLl++zIkTJ+52OL8SDE4n4fvuO9g+dy56\nQETewu3baZ56apihIQd37qT5wQ+WmZkJ4HAYkGWBq1fjPPJI5OB4k0lLu60cTOQJgsCVK3tYzBLe\nwTHWby+AqlLT2NAEurQLuzRjq+xtpdld2uC+3zqLwWJg/eYqhWwJvaTSe3gGS7gXOV7CNCiz+L3v\nUskVUe1tmrub5Dc29pW9g8GDcrqqqhSiUTY3ClTfnHZTlS6VeJytTT/ZKS9e790pjX+Y+CjISBJ4\n+D32B4H/AHwOeE89229961vv+6E/a+X9UW1HIi38fhN37hgJBi1cu7ZHsdikKjmJ9EXwuvToLQb8\n4SH8fvP/x96bB8dxnnf+n5np6bnvA/c5IG6eIkXSlERZknXYa2WdxHGta53dcmJXnK2K7ewfm+Nn\nx5uy1055165N9pdNbbxR8nMSZ+P4ikRTVnR4TUqkSPEED5AEiPuYATD3PT3dvz8GHGEIEAQIgLjm\nU8UqooF+++1++3je5/0+z8PwcBibTUdtbQO3eifRO9UY7Hqa6ypx5ca48fqPyVprmTl7i1Q8BQpE\nhkeI6qtRtcglx3cbjdwOBJBSKapqmwhndQQULZVmN9WVcbKhGZTAIDU6LRXmFBZVslhvp8JioWlO\nroS62lrk/n5G+/rIZzKYAUGlRnYamLp1m5oPtmC1mti3L4goatBqjWg0KnbscOLxGFGpSq9PQ0MD\nFy+mSSQKD4tOV1jjlGUFSZKXfb0fhMpKE83NDm7dmkFRCoZIba2V6urFlw4ymcJLy2bT43YbUavf\nnx3u2OEkk8kXl0oEQUN9vRWDQYvBoEUUNezeXYl9jvIdQNKaSXo7sFbuII+Gm5M59NkgGY+dZM0+\nFMWFTY4Q67sO+hSOpgZ6Tlwu7p+Kp5ByeaRcaQ2JfDaLnJeR5cLLMReaotklEVNy+Bqc5EQtxmwQ\n0VRT0C3FYiiyQiyexaTVkksmcUhhamsdjI3FUBQwGrXs3l2JwbC0AnhbnVOnTjE+Ps5HP/rR9e7K\nQ0GlUvHv//2/56WXXtqUxojfH8fvLwhPq6osOJ2GeR5SKETRZbMylZVmtFoNLS2lhnR1tQWdTsBi\nETEatezc6eX69eni7yVJpqLCRG/vNCmPDcF3EEElE9RqMU8l2FGtIZcTcZgLHhaHTYvQXI1WyREO\nGggNDqGduIpYaeXyv5xi975awjMx6psqEJPjZAWB8MAA0fFxksEgplmdiJzPI6VSZHOl5yRLErl0\nriQ6cDOznss0XwK8wA9nf34BmO8/3mAYjSL19Xb6+kJMTsaLYkhZVhB1WgS9DpNJi8VSmGVWVVnw\n+ZyMjkYxGER0HiN791UhyGnUajVGtxtR1KLVashrNajUKux1tcSmZezm9z9umYxEEhMVBw4T9U/z\n9rsTTEwWdCl9/iB1dhuNtjyZSBhBq6X26FHSkQj5bBZBr8fT2VlMVgbArJhLlt6fPUjJBF6HDlOL\nndodTkSbnSNH6vD7C9EpXq+J2tqF0+oLgoamJgczM6VRFpWV5kU9E6uJIGg4fLiW+nob09PJWSPQ\nisl07xn/0FCYs2fHiEQyaLUamprsHDhQg15feDREUWDfvipisQzJZK7k/AyGQk4Wl2t+qJ3P52Rs\nLIY/kEZRJFwuAyaNk+M/Okc2m0fJ57EaRA4feQrRYCCtMhL7yYmCLlYBh8eKrcpLdZOHmyNjxZer\naDbjrHBgErLIskw6HMZaW4tNq0WRUsSMDky19WgsFpRUHEVRELQarFYROVRYbtPp1DzxRAN+f4Jc\nTsblMswL3d7OfPOb3+R3f/d3EbZQvpT78eu//uvs3r2bb3/72xg20XJbX1+QU6cKOZGg4AU9erSB\nxkYbQ0PhucVqcToNOJ364v9nNa9FqqrM7N5dgclUMEa0Wg1TU8liqvhoNENDg514PIfNpiMazaBS\nqTh4sAK3OkwyEkGobKAiFyMdDqNCpv7IEQx2O46JCWqbvKg0GpLxKZ7/+EH0ei0Exxm82ItaraK+\nppHs2MC8aESNIKB3OKivE7h5ZRxp1igRTWY81fZFM0pvJtbzafutdTz2spmZSZJM5jCZRDweE93d\nXi5enKSy0kRHh3tWR6EvzjLvfAC1Wg0+n4NUKksy6QUpSz6V4FZfmL1ddtpefJHw4CCJWJLxkQju\nHS1gdbG/SUdVfcEyHh2NFj+YFosWl8tDQp/H1fr+R9afEenY14xXncLk8RRTvOdSKdRa7bzsexqt\nFnNFBU6fj1B/fzFduWg0UNvRTHNnXTG1+0If24Vob3eRTGYZHo4iywoVFSb27at8qOvQOp1Ac7Oj\nUDr8PiQSWc6eHSMYLNjA+bxEb+80Ho+J9vZS9brFouOJJxo4c2aMYDCFRqPG53Pg8y18HIfDwDPP\nNDM5GSebzWM2i7xxrIdULImSl1FrBeKSjri5gb2Hfbzz6kV2vfgcoxevotfCzsNtVB19inRORVOT\nnZs3Z0ilJNx1XnY+Xo0w2Utao0FrNGJvaMDV3sHli+OMjcewehxcuzaFy6HD7HXja7BgUeJkZRmN\nKGKprsZs1mE2b64aMQ+DmzdvcuLECb773e+ud1ceKnV1dezfv58f/ehHfPKTn1zv7iyJTKYgvL5j\niEDBYLh6dYonnqhjZsbLhQsTRCIZamos7N1bWUxkVlNjxedzMjAQIp9XEEUNnZ0eKistJZ7R9nY3\nwWCKiYk4slyYwP3bf7uTbDaPJCm4XAakbJZX/r8rTA5MIuq1tHZU0FZnwVJZicHhwLv/IOJMHEEU\n0EpxUn4/gXCejCwQy75LJJQoFNQUPXTu3o3D55uXxdrd3k42e4VDR1u5fmUCWa2l+ZFOPvCB+ocW\nILDWbI2zWENkWeHixUmuX58ilZKKs+GdOwsx6IlElgMHqkmlJHI5Gadz/iwzFEoxPZ3i7NlxwuE0\nuXSGmmoTe55ooG5XNd7ubqoffZT4TJhUOg+ZBM7aSmx1dSSTWU6fHil+MNNpNVeuTBEO5/B63zdG\n8mgRrE68DaWhlYuJCl2trcWpwdT164gmE1V791Lz6KMPVGPGaBR57LEGQqFUwU3pMJQ82BuNcDhN\nNJot2aYoBePvbmMECvqR557zEQ6nEQQNdvvitY9MJhGfr+AKnpyMI4tGHE3NxCcnkfMSoslESmXC\nU+Ph0JOthCa97HlqH0a9mpzJy5nrCYLBQaLRDCaTlvZ2F/m8guCwUd9agZxJF8JpbTbGxqJcvDxF\nNpvH6TTw5JONSJJCZ1sr+uAAibFhjG43nq4uLNUPR1C8GfnmN7/Jb/3Wb2FaIK39Vuezn/0sf/qn\nf7ppjJFkMlcirr9DKJRCUVSIoobqaguVlRZA4fr1KZxOAyaTiF4v8NhjdbS0OEgmJWw2HV6vad77\nymbT8/TTTcVcJA6HocTTmslI/PSnw6QEGyp9jGQywZVr0zTteQxzRQWTkzFOnRolGEwVE0gePtzO\n7TcHiMdz7Hj+eQwWC9ODwzir3bT/m+eo2t017/2rt9loOPQo3rYQ+57ciUpnxOmxIAgbqxbYSigb\nI/dhYiLG5ct+stmC5yCRyHHhwgRer5HKSsuSXGSiVs2t3gBKPo9Wq0aWRQJBidGJJF27QGexoLNY\ncPkgG4+j1mqLlU2Dk9GSlObZbB6328jgYBiv9/0XZiEsbXlr/oJOR8WuXThbW/HF42gEAd1svo2V\ncCcuf6NT0MKUCl6BRYWcgqApJn5bDEVRyESjqLVaRKMRi0XEbBaRqqswejwo+TwanUhdkxuVSkVl\nSyPu+myhIq6o5/jxQuXkeDzL9evTZLN5otEsPp+Dd98dw/ohH7W17xtMk5Mx0qksao1mtuJyoYKp\nzmikqfMw2fhOBL3+nknMysCtW7f40Y9+xM2bN9e7K+vCL/3SL/E7v/M7XLlyhe7u7vXuzn0xm0Us\nFnGeQeL1mohEMvT0BIrvbSjUpmpoiNLWVnhuRFGYV+JiIURRoLrauuDvpqcThIJJ9DYbosVCPpNB\nURTGZyS6pTw9PQGmppLo9QKKojA0FKG62oJeryUQSDKaM+F44sN4jqQRjAaqH2nFcI9lZY1Wi9nr\nZfNLVRdm88cDrTHBYKrkhgbIZPLFF/5iKIrCTF8fmalJYpOThIdHMAtZvG49Dod+ntobCpqAPBr6\n+mZ4++1hpqYSJR/LXE5GFDXs2lWBRlMwGvT6e2sXloJWr8fkdqO32zdlaN+D4nYbaW52MPeULRZx\nSUs8i5EKhRh4801uHTvGrVdeYfLiRfQ6NXv2VGEyadFoBUSjvqineeedYS5fniSezKOzWEil8gSD\naSYmYoyMRBBFDSaTllCoMLvK5WRmZpJAobhf4OpVEsO3mbx0icjICPlc4b6SZQWtVo1aENDb7WVD\n5D58+ctf5otf/CLOLRohdD+0Wi2/+Zu/yV/8xV+sd1eWhFarYe/eqqI+T6UqPNOdnR7i8ey897ai\nsGBY+4OQlyQCV68SvNZD8MZ1IsPDZBMJwoNDBPsHiPhnOPlaD1JOornZjiwXkpR5vUZGRiK0tTnR\n6wVyOZnATBZ/TEN1vWdRfdtWp+wZuQ8Gg3ae0EmtVhUFjosRn5xkoref3r44wYkZ+nv99F8fY+eR\nbroeaaS2dr61nc/LnDkzRm/vNIpC0fOSSuWK0Q7pdI5nn20mlyskXrvjYtxOhsRqoFKp2L+/Grfb\nyNhYDJNJS1OTvcTjtFzkfJ6xM2eIDA0Vt42/9x6CwUBrWxtOp55QKI0gqJmZSXHy5DC52YiZW7eC\nPPVUU8GInUly61aQRCLL9HSKpiY7O3fayOcVVKpCBAwUihWOnTmD1V2Dy6FjcnAQJS9jb2ygqsq8\nonPZTly6dIm33nqLv/zLv1zvrqwrn/nMZ9i9ezff+MY3MK9TJuXlUF9vw2ptYXo6iVqtxus1YTYX\nqqsbDEKJnkSlWrr+7X4Eb95k9PRpdFYrDXVWLr7bjzAxgWA0kszrycQSnHzvKvbWdk6+W1geikYz\neL0mfumX2mhqciKKAoODYXI5mfp6Gw0NtlXp22albIzch6oqM9XVlqKiGqCmxkJl5f0f1MTUFONj\nUc79yzn27uwiGY0z5Y8yNTyB69mOeaFlAFNTCfr7Q0XjJxRK4fWasFp1pNMSVquOlhbn7DpomZWi\n0wm0tbmLrtuVkgoGSQQCJdsUWSbU34+7rQ2324TbbSIQiHPixPuGCBTcyKOjUfJ5BZ/Pid+fYHw8\nik6nQafT0N7uZmAgREWFmaoqC/lcjplbt1BkGSU0yZFDtQyN2QnHZbp2eWjr8JbDdZfIH/7hH/IH\nf/AHm+IDvJbU1dXxxBNP8Pd///d89rOfXe/uLAm73VBS4gEK+q6uLm8xI7MgqGlqsi84AVwucj5f\nLCiZDoVor63EavRx69o4lTubyaSyjF4bwF3l5Py5YSYmCmnnRVFDOJwmGCxE5NXUWKmpWXl/tgpl\nY+Q+mEwijz/ewOBgmGAwhdttoKHBvqSXvFqjIRJOE52OornWy2OHmlDpmjC7XTx6oBqbbb7bPJPJ\nl7gXFaVQuK2mxsLTTzev6rmVWX1UavWCHirVXYK0TCZfzG8yl1QqRzRayHJ75EgdkiSTzyuk0zkM\nBoFHH62hqclR0J9kMsXaMHIuB/4BWhwW9E12Gna6EE3lZZml8Pbbb9PT08MPfvCD9e7KhuBzn/sc\nv/d7v8dnPvOZTettValU7NlTSEgYiWQwGrVUVJgWLIexbBQFeU5NpuzUJA5J4pGaNK4dRr73Ug/Z\ndA5HtYdsJobLZUKjKYhXrVYdsqyQz8tbImvqalK+GkvAatWxa1cFTz7ZSHd3BRbL0kIi9Q4H9Z0N\n6C0mouEE10+cZ+DsZWKxNBbrwiJPi0U3T4iqUoHTuTruxTJri8HpxDI3nwug1mrnFaiyWHTzhLKF\ncTZQUWEinZaYmkoSiWSIxTLodALd3V52764s5m0RdLpCKv45H4xsLFYQ0y0SDZJLpYiOjRH3+5Hz\n85NDbScUReEP/uAP+MpXvoJOVw51hkLxvFgsxrvvvrveXVkRarWKigozra0uamutq2OIUKhM7vT5\nSp47tVqN0eMhn0riqPYgWsyoBYHqxsrZ5V8H9fU27PaCVqxsiMyn7BlZI4K3bzN+9ixWs41Hn32E\nS6dugEaksr2FQ8/uvedavtNZKB1/4cIEiUQOUSzkKamrK7vzNgMqlYqa/fsRjUbCQ0MIOh3ujg7s\nd2WYtdv17NtXxfnzE8RiWURRQ2OjnZoaK/m8wvh4rLhkYzRq2bu3akFPmqezE1mSCA8MoCgKtvp6\nKnbvvmf/YhMTjJ4+TToUQqXRYK2pofbQoXWrtLzevPbaawQCAT71qU+td1c2DGq1mv/wH/4D3/72\nt/k//+f/rHd3NiTu9nby2Syh27dRZBlrTQ2ujg7i4+N0+SKcHBhgpCdC12MHMJr12F1GVKpCpE9H\nx+osCW81NrIPTlGUzZnmNh2N0vfTn5KJRgEw1dSRFGxo7R48DVULxrPfTTCYmp0Ra/B4TNvCklap\nVGzWMV8IKVsItV0sZ0s4nCISySCKhXEWhMI4ZzLSbMVQCbtdf99w4my8kG11saq5+VyOvuPHiU9O\nlmyvPnCAqr17l3Fmq8t6jbssyxw4cIDf//3f51d/9Vcf+vE3MrFYjKamJs6cOUNz89osD2+F5z0b\nj6PIMjqrFUVRuP3GG0RHR8HqJaM2oNOqsVZVoLJ7UatVeL2mJRej3IrMLvst+PEre0bWgEwkQib2\nvuA1MTYCjGA1dVJZuePeO86hkLr4wfN1FGrC5IsZB8s8fATx/i+du8V3hTBuBZ1OoL5+6er6pXg2\n0pEI6XB43vbI0NC6GiPrxQ9+8ANUKhW/8iu/st5d2XBYLBZ+8zd/k29/+9v82Z/92Xp3Z8My97nL\nxuMkp6aQs1mYHkULyEA64af1ox9ddlVrSZJRFGXVlpc2Ouv5pfp14DcAHfC/gL9ax76sKhpRRCOK\nhQRWc9AaH47uY2AgxPXrUyQSOaqrLXR1eeapzctsLCRJ5ubNGW7dmkGSZJqaHHR0uFc1GkYjiggG\nA1K6NNeCuIg3ZasiSRJf+tKX+O///b9vWpHmWvM7v/M7dHd385WvfAWXy7Xe3dnwaEQRQacjO2ci\nCiDo9Wi0S3+OJUnm1q0Zbt6cIZeTaWqy09np2fKRcevp+/974CjwAeC317Efq47R7S5oBOa85HR2\nO/aGBqAQMTEwEKK3dxq/P76qrsrx8SgnTgwzOhojFEpz9eoU77wzSjY7P3KjzMbhTsEvvz/BzEyK\nc+fGuXRp8v47LpFMRmIyKJN0tSFX+BBtBa+LYDDgbmtbteNsFr7zne9QXV3Ns88+u95d2bBUV1fz\nr//1v+Z//I//sd5deahIUp7R0SjXr08xMhJBkpYm8hZ0OjydnajnGB4aUSxsW0bRxf7+IO+8U3gX\nBIMpzp2b4MKFiWWfx2ZjI0wJDMCrFAyTuWxazQgUIhYiQ0PExscRLRYcTU0Y3W6i0QwnTgwVCy/p\n9QL79lXS3V1x77ZyeYLBVDFpz2L6kVOnRujpKc1zodWqee65FqqrN/YMeCusIS+FZDJLJFKIkHE6\nDciywssv38DvT5T8ncUi8q/+VeuSo7cWO97Jk8MMD0eRchJyMkZHs4m2ei2W6mosVVUran+lPOxx\nj0ajtLW1cezYMfbt2/fQjrsZ6evr49ChQ/T29uJ2r67wciM+77lcnnffHeXmzSCSJCMIalpbnRw8\nWLuk5RJFlomMjBAeGkKlUuFobkZldRGPF5JW3q+elaIovPLKTSYm4iXbLRaRD394x4Ii9s3ERtaM\nfBn4DPD/rHM/Vh2twYC7vR13e3vJ9oGBUEkCtXRaoqcnQG2tbcEbNRhMcvr0GIFAApUKqqstHDxY\nWwzvvJt8Xp63TZaSh1IJAAAgAElEQVQVZHljPfTblaGhMGfPjhUjaFpanOzZU7ng+CgKqzJuw8NR\nhoYiKEoh943aYmc0LtBd34LFs/0ytP7Jn/wJzz77bNkQWQItLS382q/9Gl//+tf5b//tv613d9ac\nycl40RCBO8unQRoa7NTV3V/DpVKrsTc0FL3gN25Mc/FkH8lkDp1OoLPTw86d3ntOKBUF8vn5z/x2\neIc/DGOkAviHu7ZNAv8G+GPgG8AbwA+AEnPwC1/4AnZ7oZBRe3s7hw4donE2RHJwcBBg0/08PV24\nyTOZaQB0OjfJZI5bt/rxeEwlf68oCv39MqOj0eLfZzKFcvRVVfkF26+rs3PrVpB4PFBs3+k0kEgE\nGBwMrvv53+/nrUw8nuXdd8eK9TFyOZmengBer4nmZgfT08mSsgN3kiStlJmZ0nYBUilpwYqnW53h\n4WH+4i/+gkuXLq13VzYNX/7yl+nq6uJzn/scLS0t692dNSUazcwrnClJMpFIhrq65bU1M5PkvffG\nSSQKz1kul+XChQncbuM9M8Gq1Sp8PgdTU4l574L7eVU2O+u5TCMC2dk+/Bz4V8Bc5c+mXqa5Fxcu\nTHD27HjJNrNZ5IUXWuZVu41G0xw7dotYrLTMvcOh58UX29Dp5tuSsqzQ2zvNtWsBMpk8LpeRffsq\n8XpXN49EOJwiGs2g1ZaGpK6Ejei2XU1GRyO8+mr/vBlOW5uLQ4dquXzZz+3bIWRZobrawt69VSXG\nSCyWIRRKo9Go8HiMS46UunZtipMnh0u2GQwCzz3nW/X74kF4mOP+sY99jD179vBHf/RHD+V4W4X/\n+l//K6+++ir/8i//smqC3434vA8NhXn99dsl3gmNRsUzzzTT0HD/Cr9z6eub4c03B+dtP3Cgmr17\n7700mskUvOX9/UFkWaGqysK+fVVLmphkMhLT00nyeQWn07BoBfL1YKMu0/w+8CSFaJp/oNQQ2bI0\nNzsYGYnOClcLZew7Oz3zDBEoVKXUaud/5HU64Z4ff7VaRWenh6YmO9lswYuy2jlKbt2a4b33xuck\n67Jx8GDtlld7rxRB0CAI6nnVRI1GLTqdwIEDNXR0eJBlGau1dBY0MhLh1KlRIpE0Go2amhoLhw/X\nLekFVV9vo67OyuhoFEUpaIja2914ttkSzQ9/+EN6e3v5h3+421Fb5n584Qtf4Hvf+x4vvfQSn/70\np9e7O2tGVZUZn89Jf3+QfF5Bo1Hh8zmpqlq+0a7ValCrVfMmH/crsqrTCezfX017u3vBd8G9iETS\nvP32CBMTMWRZweHQc/hw3aapf7MRBKz3Ykt6RgASiSzj4zFSKQm320hlpfmeSdB6evycOTNWtNS1\nWjWPP96wYJG9h0Ekkub48T6i0ffDllUqePzxBtrbVyZw24gzpdUkn5f5xS+GuHUrWNxmsYg880zz\nooZBJiNx/PgtAoFkyfZHHqnikUeql3TsZDLLxEScRCKH02mgstK8Kt6s1eBhjPvMzAy7d+/me9/7\nHo8//viaHmurcuXKFT74wQ/y1ltv0d3dveL2Nurzns1KTEzEiUQyWK06qqrMC3qh70c6LfHWWwOM\njESL21wuA88807wmQtQzZ8a4eLE0Aq+y0swLL7RsmFwlG9Uzsm0xmUR27Fha3H5npwejUcvAQBiN\nRkVzs2NZybBWm1isUMRtLopSEH6t1BjZ6mg0ag4erMHrNTE6GsViKVRgvp+HIhrNEIlk5m0fHY0u\n2RgxGkV8vvUxYNcbWZb5d//u3/GJT3yibIisgO7ubr71rW/xy7/8y7zzzjurHl2zURBFYdlLMguh\n1wscOVJPf38Qvz+By2XA53OsiSGiKAojI5F52yORNNFoBpdr49c2KxsjGxyNRo3P57znh0RKp8lE\no2hNpkWLo60Wd8rZp1KleUtWQ2i5HTAaRbq6vHR1eRf9u7wkkQ6F0Igier0enU4gkyld3tnqgrbV\n4utf/zrT09P88Ic/XO+ubHo+9alPcf36dZ5//nnefPNNrNbNsQSwXlitukX1IauFSqXCbtczM5Mq\n2S6KmgWXhaRMhkwkgmAwLFpC4mFSNkY2MeHBQSbOnycbj6PR6fB2d+Pp7FzTjJJutxGfz8G1a9PF\ntVCn00Bj48pnEmUKxP1+xs6eJR0MotZq8XR20rbDzYVLgaLS32wWaW0tZ8W8Hy+99BLf+c53OHny\nJOIS0vOXuT9f+9rXCIfDPP/887zyyis4ndvT47bR6OhwMzkZL0bvaLVqOjs9mEyl931kZITxc+fI\nRqPFpGyerq5Fa2g9DMqakU1KKhym7/jxktTDGlGk+UMfwlpTs6bHzmYlhocjjIxEsdv1NDbaFxTg\nLpeNuoZ8LxKJLFqtelXr/0iZDH2vvkrC7y9uU6nVNHzwKeKCk/HxGKKoob7etmUEqGs17n/+53/O\n1772Nd58803atmGW2bVElmX+03/6Txw7dozjx4/TMJtXYzlstud9vUilcqhUqvsKXwECgTgjI1Gy\n2Tw1NRZqa20lesRMLMatn/6UTOT9JR21VkvTU08Vc6OsJWXNyBYkHQzOq4GQz2aJT06uuTEyNZXk\n9u0w6bSEJMm43cZVMUY2C5FImkuX/ExMxNBqNbS3u2lrc61K1FIqGCQdCpVsU2SZcH8fvmefXZW1\n7K1OIpHgP/7H/8ibb77JiRMn1qzq7HZGrVbzzW9+k9raWg4ePMhLL73ECy+8sN7d2lIkk1l6egIM\nDoZRq1W0tDjp7PQsKqb1es2LhuunQqFiNfk7yLkcsYmJh2KMLMbGkNOXWTZqQUClnj98mjV2RYfD\nad5+e5jBwTCTk3EGBsKcODHM9HTi/jtvAfJ5mdOnR+ntnSYSyTA9neT06VEGB+dXw30Q1IKAagF3\nqUZf1ofcj1wux9/+7d/S3d1NIpHg7NmzZUNkjfn85z/P97//fT772c/yxS9+kVhsW2RoeChcuuTn\n0iU/kUghv9B7743T2zu9ojbVGs2C342lVBhfa8rGyCbF6PFgqqws2aaz2R6CVyRBOFwa2RGPZ+fV\nVdmqzMyk5p2rJMncvh26xx7Lw+hyYbsr1aOg1+Msf1SLyLLMxMQEp06d4nvf+x5f//rX+fSnP01t\nbS1/+Zd/yUsvvcR3v/tdbLb1izrbTjz++OOcP3+eUChER0cH3/nOd0jfVRm6zPKIxTIMDZVGxyhK\noYjeUgv3LYTR48Fy1zdCNJuxLje97BpQXqbZpGgNBuofe4zgzZvExsfRO52429owlMVka8paV5tX\nqdVUP/ooerud8NAQWpMJd2sr1tratT3wBkJRFILBIP39/dy+fZvBwcHiv4GBAYaHh7FarTQ2Nhb/\nHThwgD/8wz/E5/Otd/e3JR6Ph7/+67/m7bff5mtf+xpf+tKX+MQnPsHHPvYxDh48iL7s2VsVVhqc\nIIgi9R/4ADNuN9HRUXR2O+62Nkwezyr18MEpC1jLLItIJM3PftZX4h0xm0Wee8634lj2zSBoy+dl\n3njjNoOD789aBEHNk0820tzsWMeebV7ujPt3v/tdvvWtb3H79m1UKhU+n4+mpiaam5tLDI+GhgZM\nDyGMvcyD09vby/e//31+8pOfcO3aNbq6uti/fz/f+MY3ih6rzfC8ryd3V2BXqeDgwVp27bp3hfeN\nzmIC1rIxUmbZTEzEuHzZTyiUxmoV2bmzYkkVLe/HZnk5RSJpLl/2FyNb2tvdtLaujoB1O3Jn3Pv7\n+wmFQjQ3N5fDRbcQyWSS8+fPc+7cOX77t38brbZQNmKzPO/rRTKZ5cqVqVkBK7S0uOjsdK9q9N7D\nZlNG0xw9enRN82WU2XiUx3x7Uh737cMXvvCF4v/L474tmZ8mdpaNfCcs2zMyODhYLEm/1mynY+Ul\niduvvUZ0dLS4TS0IND75JI4VCCvvPtZSZkqrfS1Ws70Hacvf08Po6dPMrRfubm+n4Ykn1r1vD6Mt\nePAZ8mr1YyO1s1p9Of/GGyi3b5feV52dNDz22EPvz73auNe4r/SY5f037v6LeUbKfuUy9yU1M0Mi\nECjZJksS4cHB9enQFkHO5wn29ZV8MACio6OkI/ecQJQpsyiyJBEdG5t/X42MkL4rx0SZMhuFjeAZ\n+SLwy8DdFazKmpENQtzvp+/VV8lnSkN6nTt20PTBD67acbbbGrKcz3Pz5ZfnGXqixcKOj3wE/Tap\n+7Hdxn2tkSWJGy+/THJqqmS7aLHQ+pGPoNsg91V53LcfG9kzogN2A+U7cgNjdLkw35XTRK3V4mhq\nWqcebQ3UGg3OHTvmJSGy1ddvG0OkzOqjFgRcra3z7it7Y+OGMUTKlLmb9TZGfgP4G1bJQzP4EJcN\nHtaxFEXh2rWbZDLS/f94FVjovNSCQO2hQ3g6O9Hb7ZgqKqg7cgTbCtMHP8g1XO3rvprtLdZWIpEl\nkcjO2+5qbaXm4EGMbjd6h4PKPXuo3Lv3ofZtPdtaCavVj4fRTiqVIx6fP/5r1ZeYTkfNwYMYXK7C\nfbV3LxW7dy+7ndXoz3LbWOkxy/tvzv3XM5pGCxwF/nwd+7ChCQZTXLgwweDgKNeu5ejo8NDe7l6X\nEFK9zUb9Y4+RS6VQa7VohA0biLWhmFtfAqCpyc7OnRUYDIXwRo1WS8XOnbja2kBREHS69exumVUm\nm5W4cmWKvr4gsqxQW2tlz54KzOa1HWeNIJTvqzKbivXUjHwamAF+ApxgAc3I5z//eez2QmGw9vZ2\nDh06VFTp3rG+turP/f39nD07QTRaKECXyUyj0ah5/vn9NDc71r1/a/FzU1PTlltDPnNmjIsXJ4s/\nq1Swd28V+/dXr2OvNhZbWTtw5YqfU6dGS7SkbW0unniiYduHtW7lcZ9LPp9HluVifpXtzEZNevYN\nYA8FvchB4EvA/zvn99tawOr3xzl+vI9strQOQUuLg6ee2pp1SrbayymVyvHyyzcJh0vrdDgcej76\n0bYllQTfDmy1cb+DLCv88z/fIBAorWVkNot85CM7sNm2d4r0rTruczl37hwvvvgi6XSaH//4xzz+\n+N1z7u3FRhWw/h7wPPACcIVSQ+SB2GqakTsTp0xmes62tbUfN/o13KyakbmoVEurcbNRdR5lzcjS\n21lonBcb+81wTmvdxmbVPNy9fzwe5+Mf/zjf+ta3+O53v8unPvUpUqnUQzv+Ztt/vQWsd3hivTuw\n0XC5jFRWmku2abVqmprWrv5JNpEg7vcTGRkht4SHpsziGAxafD5HycdHpSqkddbp1scrkonFCA8N\nERkdRborVLvM6qJWq2bLBJRaH/X1Nmw2PVI6TWRkhPDQEJlYbJ16WWat+J//83+yf/9+PvGJT/Dh\nD3+Y7u5u/u7v/m69u7Vh2ciLltt6mQYKNVCuXAkwNhZDp9PQ2emhpcW5Jt6R2MQEw2+/TToUQqVS\nYfJ6qTtyBKPLterHuhdb0W2byUhcuzbF7dshAHw+Jx0d7nUxRqKjo4ycOkU6HEalVmOqqKD+yBEM\njvUt8LcVx/0OkpTnxo0Zbt6cQZJkGhvtdHV5UGUSDL/9Ngm/H0WW0dvt1B0+vK2qM2/lcc/lctTV\n1fH666/T3d0NwLFjx/jqV7/KqVOn1rl368dG1Yzcj21vjNwhnZYQBBWCoFmT9mVJou+114jNpnvX\nmkxoDQbMVVXUHjr00IR2W/nllM0WQrOXU+RKzufJxuMIev2KoyGkbJb+V18lPjlZst27cyd1hw+v\nqO2VspXH/Q65XB5ZVopG6Mg77xC4cqXkb8yVlbS88AKaFQgdFUUhE4uh0WrRGgwr6vNas5XH/dVX\nX+U//+f/XGJ45HI5vF4v169fp/KuvE3bhY2qGVl1Nrre4UGZnBxdM0MEIBuPkwkXQk9jOh35bJbR\n06e58fLL3H7jjTVLTb6dNCOiKCzLEIn7/Zz6wQ+4dewYt44dY7q3d0Uv7r7r1xccx9j4OHI+v8Ae\n96asGVl+O1qtpmiIyPk8sYmJeX+TjkS4de3aAx8/FQwy8Oab3Dp2jJP/+I/4L19GllaWn6isGXmw\n/b///e/z8Y9/vGS7Vqvlueee49ixY2t+/M24/5YyRso8GILBgKDXoxFF5FyOwZ//nNj4OLl4nFBf\nH2NnzqDI8np3c9uQTSYZeecd4n4/2Xic5PQ0o6dPlxQqXC4anQ5BPz96Q2+3o9asnaFbZj5qjQad\nzTZvu6DXo3lAD1hekhg9fZpQfz/ZWIxsNMrY2bOEBgZW2t0yy0SSJH784x/zq7/6q/N+99xzz/HG\nG2+sQ682PuVlmjIATN+8yUxvL5MXLzJz8yYaUcTV2orB6UQwGNjx4Q+vuX5kK7ttl0NkdJT+V1+d\nZwB6d+2i7tChB2536to1Rt99FzmXAwrLcY1PPom1pmZF/V0p23Hco2NjDP785+QShbBfjShS8+ij\neDo7H6i9eCBA3/Hj8+pH2Rsb8T377Ir7uxZs1XFfaInmDrdu3eLpp59meHh4HXq2/iy2TFNOdFAG\nANeOHejtdhJTU0jZLAa7vVjHQqVWLy0WtcyqoLpH7O/dtUaWi7ujA53VSmxiArUgYK2txeTxrKjN\nMg+GtaYG37PPEh0dRZYkLFVVWFZgFKpUqgW1Xaqy1+uhs9ASzR1aWlrIZDIMDw9TX1//kHu2sdlS\nyzRbVTPycHKaqDB7vWja2nA2N5cU1LLW1q5JxMV20owsB6PbjcnrZXrOLFcwGLDV1T1wm4ODg6hU\nKqy1tdQcOEDV3r0PbIiUNSOr047J46Fq715qDhzAWluLSqV64L4YnM4SY2Y6kykUs2xeWYLEsmZk\neeRyOc6ePbvgEg0U3rNHjhzh5MmTa3L8zbx/2TNSpgRLVRVWt5vpGzfIZ7PYGxrwdHVt+9TVDxNB\np6P+yBFCsow+m0VrMuHt6sJSVbXeXSuzQVFrNNQ8+iiiyURkZARDNkv9o49iny23UObh8MYbb1BX\nV7eo1+Pw4cO8++67fPKTn3yIPdv4rOcXpgv4X0AeuAp87q7flzUjSyQvSaRmZoDCrHo1BImyJCHL\nMoIorritpbLV1pBToRBSOo3OakU0mR6oDSmTQaPVrniJZiOzlcZdliSSwSAoCgaXa10KSkrZLGqN\nZsMLk7fSuN/hN37jN+jq6uJ3f/d37/k3r7/+On/8x3/ML37xi4fYs43BRs0zIgB34s7+Cvgz4MKc\n35eNkSWQCoUYPX2aRCAAUMgNcvAg+lm1fnxyksjICHI+j7WmpugO3ohs1pdTXpKIjowQm5hAazBg\na2ggPDDAzI0bSJkMosVC9b59K3aZb1U267jPJZdKERkeZvjkSVIzM+jtdqz19dQePLjuSeU2Klth\n3OeSy+Woqqri/Pnzi3pGZmZmaG5uJhQKod7Ck4yF2Kh5RuYGwBuA8Eob3Go6jvsdS1EUJi9eJDoy\nQj6TIZ/JEBkcLCZTioyMcPv115m8cIHA5cvcfv11pq9ff39/WSYbj5Ofk4tgI5zXau+z1u1NXrjA\nwJtvcv3cOSbOnWPk1CmGT54kG48j53Kkg0HGzpwhGQoVti0xr8dG1LOsdlsrYaNoRqRslskLFzjz\n058y+NZb+C9fZvLyZYK3buG/dGnBfbLJ5ILp+DfKOa1mO9tFM/LGG2+wY8cO5PukQXC5XNhsNgbu\nEXa9Wc9/pfuvt2bkReBrwHtAOSB+mWTj8XkZNQFiY2PkUimmrl0jl0wWt8u5HIFr17A3NZGJRvFf\nukQqGCxoErq7cTQ1PczubwlSwSAzN2+izBoYgsFAdGiI+MREiSckPDTE5PnzJKemEK1WKnft2lap\nv7cyCb+fuN9Pwu/nzsJIJhIhHQ4Tn5wkG48jmgt1prLxOP6eHiLDw6g0Glw7duDp7FxR1tUyG4PF\nomjuZs+ePVy4cAGfz7fGvdo8bBR//Z8CLwP/Mmeb8vnPfx673Q5Ae3s7hw4donFWkHXH+trOP+ez\nWdKXL5MOBouRF26dDlNFBUJHB6OnTmGZ3X7n91VOJ01PPcW5114jE43ink2yFAJq9u+nY9++dTuf\npqamTee2jY2P0/eznxVzdwh6PXI+z+TFi3i7uoBCAcJgfz/NTz9NcmoKANFioeW55zA4nevW943C\nZnfXz9y8ib+nB/+lS4Ru3y5utzc2UrVvHzs+8pFiOv+hX/yC6d7e4t+o1GpqDx8u3ivbic0+7nNZ\n6hLNHf7oj/6IfD7PV7/61YfQu43DRtWMiEB29v9fBU4Bc/PkljUjSyBw9Sqjp08XZ+ZqrZa6I0dw\nt7YydvYs0dkKvHeSK1lqaqjYuZO+n/0M7rq+1bMhn+vFZnw5ZRMJbv30p6RDoeI2o9dLZHgY7WzG\n07jfj6DT4fD5ikJjgPrHH8fT0QEUlswy8TiCTrfiOjSbjc047gBSOk0+l0PKZBh4802UfJ6BN95A\nSqdRaTR4d+2i9YUXionM0pEIt44dIxuPl7Rj8nppe/HFLS1SXojNOu4LsViis4X40Y9+xP/+3/+b\nV155ZY17trHYqJqR54GfA/8XqAWOr7TBja53WO1j5XM5dHY7no4O9A4HtoYGGp98EktlJUMnThDo\n6SFw7VohIkOvR6XR4OnqWjSB2YOe18xMkqGhMH5/HFle2gtmK2hGRJOJ6kceQWe1Mp3NotZqMTgc\ndP7Kr+DduRNbfT31jz2Gq7WVVDBYsq8kyYyNRRi9McSNV39WUocmPjnJ+ddfZ/rGjVUpLz/3PBVF\nIR4IEB4eJjE9vaK2Vpt0JMJ0by/+y5eJTU4u+rFaL12ELEn4e3q48cor3HzlFQKXL+Pp6CCkKPie\ne47q/ftpfvpp2l98EVdb2/s7qlToHQ6MHg9Grxf9HWHrnOdxKX1JJLKMjEQYHY2QySxce6asGXm4\n+3//+9/n137t15a8f3d3N1evXl2142+F/ddTM/LPs//KLJO8JJGJRglcvkywvx+9zYbOakVrNqOz\n2Zg4f56ZmzeBwsdy+vp16j7wATSiyOTFi1Ts3Im5oqJEbyIYDFiqq8mkUsvqi6Io9PQEuHzZTzKZ\nQ6fT0NrqYv/+arTajR1auFIysRj5XA5bQwMGlwt6e2moq8Po9aIRhGKa9VQkQv/x4yWeKEmjZyAA\nwb5hohdOkJyaosXnwGLVc/2HP8Tb3c3U8DDK7dsYvV4ajx5dVlSGLEkkZ2ZQZLnQt1nyuVzh/rhx\nAymdRms04u3upmL37nWPskpMTTH0f/9v0WgT9HpqHn0Ud3v7uvbrbkIDA4V6TbPeyGAsRj6bxdvV\nRaXTidZoxOhwzPN0JPx+Ji9cYOr6dbQGA+7OTuyNjTh37FiyV2R8NMjVS2Mo6SRqjYbbdifdu6pw\nOjd2hd6tTC6X4yc/+Qlf+cpXlrxPU1MTk5OTJBIJTA8Y9r/V2CiakYUoL9MsQHxykvFz51AUhf7X\nXsPd3o5KrWby/HlyqRStL75IYmKiWBQtPDhIZHgYh89Hxa5dpGZmMHq91B0+jP/yZSLDw8i5HA6f\nD2t9Pba6OkSjccn9CQTi/Oxn/aRS78/QNBoVzzzTTEODfVnntlnctnlJInD5MjM3byJLEka3m6pH\nHlk0o2l0bIxATw/pcBjRaiVuquXKkEKNJcO1fz5GPifhchvZ0WQm0NND1b596Gy24vKaZ+dODE4n\nkcFBdFYrzpaWex4vE40ycuoU8clJFFnG6HZTd/gwRreb8PAwA6+/XlLNVdDr8T33HOaKCrLJJNlY\nDK3RiM5iWd0Ldw/ujPvwyZNM3VW1Vu9wsOPDH37gPC0PSjYeJ5tIIJrN847d/9prhO+a/Ql6PS3P\nP4/J612wvXQkQt/x40THxwn19REbH0djMPDIZz5Dw+OP31fAqigKM7cH6XlvgJuv/5zg2CQmt5vq\n7nbannqcnfs3n/h8szzv92O5SzR32L17N3/1V3/FI488skY923iUa9NsEXKpFCOnTpGcnkZvtxc+\nVIrC9X/6J2RJQqXREB8fJzoygqW2FtFoJDWrZZg788pGo6jUaqofeQQ5nycXjxMdHSU8MICzpYWG\nJ55AvcRkTdFopsQQAcjnFaamkss2RjYL4YEBJs6fLxayiwwPk8/l8D333D2TxFlrajBXViKl00ho\n+OnxftLpNFiLDyjxeJZ0Kks+lytx3Sv5PP5LlzC6XEVtSmR4mOann8bods87VuDqVSJDQ8Wf4xMT\nTFy4QPPTTxMbHycdiaA1GovHldJpMpEImViMyQsXyCWTCHo93u5uPJ2dK/aYKLJMXpLum0AvucCS\nUS6ZJJdIPDRjRFEUpq9fx9/TU/QcVe7ejau1FVmSUODey5yLXKdMNEoqFCIyKzw3V1YCEOrvp+bA\ngRJjJB2JoMgyeru9eO1jExNMD44xeuYso5cLAth0LEkuI2HzOGjbVYcoll/n68HcJZrl0NXVxdWr\nV7eVMbIYW0oxtRF0HGt5rFQwWPgYKQpqrRZ7YyOJqSlkSUJvt2Opri4YKg4H0uyMWms0otHrqdi9\nu6BncLnQGo0Iej2hwUGiw8Okw2G0JhMGl4vh8XESsxEfS0Gv1yIIpbeRSgUWy/0zt25WzUh8chLR\nYkEz5+Oampnh1mx+l3uh1mgQTSZEnYhOV/hwJGQD7oZqAERRg2jQFeqMVFUxMWt4ZOJxTB4Pgl6P\nweXC6PGg1mqJ+f3zjiFls0RHRwGQ83mSwSDR0VFuXLrE5OXLxCcnCVy9ysyNG+Rml+RUGg2yLDN2\n5gzpUIh8JkMmEmH8vfeKS3mKopCYmiIyMsKNy5eXfK3CQ0P0vfoqN37yk0JCsDlC37u584Gei2g2\nI97DQ7MWuoi438/Y2bNkIhHymQzpUIiR06eZuHSJGy+/zM1jxxDNZu6ez1uqq/FHo/c8hqDTFXLO\nhMNI6TS5VAqVIGCtqysYP5cvEx4c5PxbbzH8zjuMvPMOo6dPk5ltMz4xgVrOkJx+/9mUs1nymSwp\n/wRqSnNbrKVmRMpkiI6OEh0bWzBXylLaWM2/X8/97yzRzK1Fs9T9Ozs7uXaXJ3C5x1+Izbp/2ZTe\nRKgEAaPbjXU7H6wAACAASURBVJzPo7NaafzgB/Ffvoy9qYmE38/wyZP0v/46HR/7GI1PPgmKQuWe\nPRhdLqR0mvDgILKiULl3LwogJZNFwWWwr4/E1BRpm410KISlqgpJyjM4GGFoKIwoamhqclBbay3p\nU0WFicZGO/39waIkorLSTE2NdV7/twKBQILeSYGpES3VVQ481jSJkQES09NobTZGczmcO3ZgXCRk\nVxDU7Gm3YJJCJOJxnG3taPVa3CYZd1MlDYcPER0bA0VBpVbj3bmT5NQUI++8g2AwYK+vR6XRcPu1\n15AzGdwdHWgNBc2ARhDQGo2kZmYIDw4WlmryeTJuN8G+PsxVVTQePUpsfLzgAZltT6PVkrsryiOf\nyZAIBDB5PExcuMB0b2/hPgLsKhXe7u5FvSaxiQmGfvELpFmjJx0KkY5EaH7mmQUjhlxtbYV8HYEA\nKApak4nKPXuK5/YwSAYC5O/6wIb6+hB0OrLxOEo+Tz6dxt3aSjoSIZ/NYq2pwdnayuDAAHGzGZVG\nQ/DmTVLhMJaqKpwtLRg9Hhw+H+PnzqERRTSiiKejg7EzZ0hMTeH0+ZBSKdI2G/FQiFQwiNPnQ63R\nUH3gAGpBQEknqa134b89QS5b8J6JeoHqBvc9084risLoaJTBwTB6LXhMORwOfcG4XWAMYuPjhXtP\npSpqnkquz/Q0I++8Q2JqCpVKhdHjoe4DH8A4R5e0nbiT6KzuAYpYdnZ28td//der36lNypYyRu7k\nrNiKx8pEo0yeP8/w7Lpkxc6daEQRc2UlVfv3c/K//BfUajX5TIZkIEByaorWj34UtSBw7R//kZvH\nj5NLJPC0t6O32VAB5ooKDNPTjL77LtGREQCEYJDRM2ewVFdz7XaKCxcmyOcLVsbAQJijRxtKll+0\nWg2HD9dSW2tlaiqB3a6nvt6G2Xx/z8iDXMPVvu7LaW9qKs61cwPIiShGTZax4RAhmxF3Iomo12OV\nJPyXLhEbG6P5mWdKKh/PZbq3l+DZsySv9pFISFh3ddPw2B5M2hze7m5EoxH71BTeUAjBYCA0MMDw\n22+Tz2RQqdUMvvkmjU8+ibWmhvFz55CyWeoOHQIKy3Hujg6C/f0k/H6UfB61KNLs85GYnGTi3Dly\nyYL40d3eTt1jjxU8bH5/waV11xq+oNMRm5gg0NNT1JnYgcmLFzFVVGC+h0YCCktJ0l2C6ITfT3J6\nesEPncHhoPmZZ0gEAsi5XMELtMhHbrXuhbntaO76QOclieT0dKHOy+y1yUQiCDodTR/6EBpBIBUK\nMfyLX5ANh7n+3nsoilLwUgYCxEZHSQQCNH7wg9Q8+ij5XI70zAzZVAqjx0Pc78fs8aAzm0mFQkyf\nPk3t4cMkZo0yQa/H3dGBtaaG0MAADV1NZMMhZoIJRIOehu5mfIf2zhPA3jmngYEQJ04MU2mH8cvv\ncXZknPp6Gzt2NRWMiDnLfMG+vuJ9BjB9/Tr1jz1W/L2iKExeuvS+t4yCx8Z/+TKNTz55T8N0ueO0\n0nF9mPv/0z/907xEZ0vd/84yzUqOvxCbdf8tZYxsZQJXr5IKBqnavZtUJMKlv/kbUKmw1dVha2xk\n/+c+x+ipUziam8nG4wydOEHj0aOEh4cZfOut4qw3cOUKKpUKS1UVgl6P3ukszHI0GgS9HntDA3I2\ny8z4DDdvpoqGCEA6LdHbOz1PC2IwaGltddHaurVnR/5bgwy8/hoRf5BMIoHFbsb66AHM3QcwyfHi\n8lZyeprY+PiCxkhyZobxc+cK+V/GBzAIArHzQTxOkel4HEtlJWJ9PSaPB5PHg5TJMHr6NPb6ehKB\nAMH+flCpSAQCVOzdSyYcJjoyQqarqyg4dTQ10fDEEyDLyPk8tro68rkcPX/3dxicTjSiiKIoTF2/\nTuNTTxUEyxUV8yKsDC4X5upqQv39JYJXACmVIhMOL2qMKAukxVZkeVHRomgyIa5jJmBLVRUGl+v9\nfDCKgrWuDpVGU3I+UiaDavY8Rk+fLupdYhMThG/fpvlDH0IwGJBSKaKjoyQDAay1tXh37WLy4kWk\nXI7pa9foO34ctVqN0evF2dyM0e1GEEX0DgdqjaZQVyqXw+h2U3vwIKGBAbosFjKRCEaPB3d7O7Z7\nzMplWeH69Wn0eoHkwFUmbhSSXI+PhHDZtQjGizQ//TQqlYp8Lkfg6tUSr5A0m8XZVleHWhDIJRLF\npH1zSQQC5FKpZQnftwK5XI4f//jHfPnLX36g/X0+H+Pj4ySTSYzb7NotRFkzsgmOdfPaNeJ+P9HR\nUfKSRGR4GI0oImcyoFIR6Okh4fej1mgIXL1KqK8Pg8uFzmYjNjqKLMsokoQiSaAoxCYmgMIL1VJV\nhberC293N97ubpKzH7RMTiaXK62hYjRq0WhU3LgxTX9/kHg8O6+vy2EzaUbyksT4+YtE/IWwU53J\nRDankJ4OoLPZSQQCTKfTxb+/11p6JhIhl0iQz+UKws5slmwshqIoiGYzY2fPMnHhAqlQqNA3lQqV\nWo3R7cbh82F0uzF5vYVMr2qRpLGaGbWHqZl08SOvUqkwV1Zira/HWltLNpHAH40WtUGyJCFLElqD\ngUwkAoBoNFL/xBNU7t2LuaoK786dNB49iv5OxeE5s97pTKZovC6GtbYW9V1RIganc9Vc+muhi9BZ\nrTQePYp3507MVVXUHDhAy/PPz8sRY6uvRzSbSYfDxWs4nckUM/GGh4aKy0tKPl805lzNzVi8Xowu\nF5HR0WKhtGw8zvj58whtbZirq9FZragFAYfPRzIUInD1KoJeT/0HPkDL88+z85OfxPehDy1oiEhS\nnrNnr3L9+hQajRqPQ0t0bHzO72UkSSE5NVVMwCal0yWlI+4w5vcXBNUUvEaaBcZcazCU6KcWu75L\nYbNoHt566y18Pt+8jKtL3V8QBFpaWuidk5F3Ofvfi826f9kzssFJR6OFglv//M9MXb1K49NPM/jW\nW9jq64uuY4PdjkarRWs0ks5I4KjEe+goWZ0No9eLwekkNTNTzItgqqhAMBgwOBzFpZ7sncRamUwh\n90SNE+eowthYYbtOp0EUNVy4MMnt22FAwWbTc/RoI5WV5nW6Og+PXDKJWkoSjRWih0RRg8kkIqfi\n6Mky1/TQiOI9w241ooh6VtehFgRkScJaV0dqeprx8+dxzmZpDfX3o2lrQxBFnC0tjIVCmLxe3I8c\nJnjjGt7Dj3HynXF6z93C3dnFYMLBnj0yu3ZVAGDyeLDW1jJ19SqyJCGazVTu24eg06HWaDB6PJi8\n3pIoFYPdTs2BA/P6bKmunuc1sdbVYaqoWPSaWWtrqTt8mMDVq0jpNEaXi8q9e5etAclmJTKZPCaT\niFq99tkIjG53yfJFLpVCmi1CqSgKtvp6KnbtAgrLWBqttuhR0FmtqDQadGZzsSCiaLGgt7/vTbT7\nfISHhhBmn71UJEpOrUdjVKN3uRk7f4HJs2cKOp50GpVKhUqlInD1Ko1PPrmoNyqblXj33TF6esYR\nhDRjY1Gammx4m2sJTRS8N2aziEEvFPo+a0SIJhNGl+v998AsOru9aHRqtFq8XV2MhMPks4WJiEan\nK9TWWWL03VZioSWa5dLV1cW1a9fYN1uGYzuznnfQQeBbgAycBf5/9t4sSK4zPc98zsl937fKqsra\nV1QV9o0Ed7JpqZvdklrutmSHbMdceUZjxcT4YkK3czXhC4U9F57whT0jta3o1S31xuZOgiR2oFZU\nFWqvyn3fM0+ezDMXWUiyCBAECJAEW3ojEMFM5n/+c/6Tlef7v+/93vd/e9gD/i5yRsqJBK21NVRa\nLZr9VN7t4ME9Pk6r0UCWJEJPP43W4SIXjmH0BSjqfPz2tS1OjIwQPHkSlUZDMRZDb7Mx+s1vYuvu\nxuT1IpVKBI4doxiJUEkkCOl0bVE0t4vjx3XI8h6ZTBWn08DKShqdTk2pJLG2liEeL7G3V+D55/s5\ndMiLWv1gImePE2ekuu+oq9brMbrdd9a/1VoErRGTSUuhUKdcllCrBYam+nGFuqjF9/CWy52WWHMg\ncGB4U5Ypx+M0ZRmDy0VTkrAEg5SiUZxDQ6Rv3cLkdqPfL+3ENiPY9B4uVffo8vXgPmXk6ocbrK3m\nsfueAKWLre01TP4AaoOBcrHG/Hyc7m4rTqehTXw8dgxrMEi9UEBjMpF0u8nv7HSIsSafD8tduBu3\noSgKtVwORVEIPf00+Z0datksvR4Ptt7ez5StFwQB99gY9r4+5HodrdncDqDvE4qisLaWYXExQbUq\n43IZOXzYh9drPnDvHhafdRyNwUDP6dNt/xhFOVB+09vtOIeHic/O4tbpUDQafNPTdJ85Q6Naxej1\n4hocpF4sUs1k0DudWPx+3GNjhC9dwjE8jKqsUKk2sPk9eFw+Lq2/g713ALVOQ6MlEJ5bZPDZZyju\nbpNeWblnMBKNllhdTaNWtwnUTqeR1dUMA0+PYppfRS00CYXsqHRaPBMTnXsoiCK+mRmkUqmTBTK4\n3Yw/+eSBvwXn0BBqvZ787i7Cfpn4Xt+h+1nfh/38VzFelmV+9rOfcfny5Yeaf2Ji4g7eyNfh+r+I\n8V9lMLIFPEvbn+ZvgEPAvXsjfwfx8dT63SCVSjQlCVGrZeSVV1DpdOhtNuJzcyAI1AoFgidO4J2a\n4tqrF8hnS8jLMdRGE96xw6zEdJz9/VcIHDlCvVTC2tWFtbcXrclEfG6O1M2bNBsNtBYLgWPHsPb0\ndPQgfD4zL788RCZTpVZrkMlUKZUkZmcTLC+nkGWFGzfi1GrtneupU19PF9rE4iLx2Vmk/WDCNTJC\n4OjRA9oPqUwdfAP4euLYbDpQwOJx4JqYxNHXh8njQSoWURuNtCSJ8KVL1PN5zIEAlmCQ+Oxsu5tJ\nlrEGg7hGR3EOD7eDTJOpTSAWRaRymZqiZXU1jV8bo1p0Eg4XqddlMpKVptZKXRaYW0iCtx9dq0gt\nn6fVkKhU1JTLUkeNU1SpMAe6OmRRo8tFbmuLciKB3uHA0df3qcJmUrlM9OpV8ru7oCiYAwG6TpzA\nNzX1wOur1us/s6RzN0QiRT74YJd6vZ1hKBYlymWJb3xjEKPxswnSjxqftlb+w4cxOBzkd3c7gUr0\n2jWq6TSO4WFyGg2F3V2kYhGtxUL3qVN4p6YY/da32Lq6QDydotXU4vSFCG9naNqC6L1aYtevk9na\nwRLw4RwexeL2HPA2uhvy+foBnpfBoKanx47R5eL3/tfvIyUiiKKAvbcX6ydKPGafj8GXX+7MYXS7\n78hi3Q5APo2n8g8F77zzDn19fQ/94J6YmOCv//qvH81Jfc3xVQYjHxdJaAB3N1l4AGxtbX1pGYuH\nnavZbLG+nmFlJU2zqTA46GBkxNXRn7gNvc1G1WrFtK8Zsffhh2iMRnqffBKtxUL/M8+AIFDY20Mt\nV7EJJUr5GBZTN+ZGnFzNQSEv4tlX7Lwd9GQ3N4lcudIp3cjVKrEbN0g3Ggx/TH5bp1MTCFioVhuY\nTJpONkSW2z94Ho+BXK7GjRtxJic9mM33b/L2edbwUd/j5dlZpPn5Tr1crlZJLCxg9vuxh0Kdz1Wr\nMtGqGe+ZZ2mVMu11NDkp0n5AaU0mIskkPp2Ozbfe6oiT5ba20NlsiBpNh09Q2NtD73Aw+OKLNPbX\nvV4oUE4kUGm1CHYvVlULg0uN3gwNROauRdDJJSqpJKJsxtHVxepSjAG/gFqnR6XTYTBoMBrbAVSx\nWGdnYY3I8gY6vQZdwMqpZ862d/f7DrGtZpOmLN81xZ5cXDzgLptdX0dUq+l7+ukv7e9sb6/QCURu\nI52ukk5XMRq1j+w8HvY4Ko0G59AQBbUaTbXK5f/0nyju7tKSZbbeeYfec+foOn4cqVikWa+TXlvD\n6PXSe+4coqebknUZqSaxm2siV/bIbYRRZWRa9TIGkxary05iaRn11Bjdx+9M5yutFvndXQqRCDQt\nNEoFWhoJna5datLpVPh8ZoI9QRi7t2W91mg8QER9FGv8oMd42Dm/jPE/+tGPPrVE8yDz3y7TfN7x\nDzv/4zT+cSj0TQMeYPmzPvi7hLW1DO+/v4sstxn6yWSZWk3mxImDKU+5WqUlyzRKJVIrK3SfOkUt\nlyMxN4dKp8Ps9dJsNFDpdGQXr3Hzt+92WhAnvvUyod/7p5TWFsnfyBM4erSzsy3s7XUCkduo5XIo\nn2LKZjBomJ72E4uVO+fc32+nu9vGjRsxBgYcSFLzrmMfZ0il0h3EPaXZpJJMHghGbDYdiqKwlVDQ\nat0oioJSh/GjB3f8+d3dAw6+AIn5ebqOHz/w3m3NjfTKCtV0muDJk4QvXSKzvkE9XcB/6hzR9TAa\nOc7oN16gsLWBUCujQmZ3c5Npj5WxqW5axQyWrgA6vYbxcTculxFZbrL03jWu/Y/XqVfapFqNz0jQ\n5cRsM1KKxahms1TicTQmE46BAdxjY51dsCxJ5D6m4HobxUikI8L1ZeBu/BBBuKfQ6VeO6PXrpJeX\nqRcKqDQaavk8m2++SeDwYfQOB0qzSeTSJUqRCJ6JCay9g8QKq1x6c5VCrs6xpwL0Hx1HunUNtSgg\n2myYg92svv42Sq3I0Esv3DFn8uZNwpcu0Wo00Lh8uNQi0XI7GBEF6PaIKLE1trer2EMhrN3dX7kP\n0dcZzWaTn/3sZw8s/343DA0Nsbe3R7VaxfAl6uk8jviqgxEn8B+Bu4aYf/EXf4F9n/g1NjbG6dOn\nOxHXbcbuJ1/fxqf9/0f1+vZ7n2d8q6Vw9eoS5XK1s3up1VLMzxcYG3NjsejY2tqi1WxSvHqV8qVL\nSB4PeUWhNTeHb3q6fbxWC8v16wSOHSOayRDe3UKrFWlITRouO7u7OxzS1SklCsQLBTIXL2Lt6cFg\nt5MoFsnU67j3a8apeh1RreZ0d/ennr9WC3/wB2NYrTr29nYAWF5OYTCo8Xgk0ukoTufgA63HJ+/X\nZ+FR78hDvb1srKzc0bqqNho7HBJRrcbjMXH4sL9jCKjXqxkZcREIfJS67+vrY+/ixTvm+GRbKNA5\nbi2fp1GpoDEa6T59GsfgINVak4bFT+7dWQwWI0Jik75eK5GtKqKgxxIIELm1wx/9+bdR6/RITRGX\ny0gg0OZSpBMFNi5c7wQiAEqmxubb7+Ab7KGWzbLx+uugKDiHhqhmMjQlie5Tp9rnK4p3dMFAu+wj\nqNVfWvaxu9vKzZsparWP7o3bbcTlau/cvyzOyIMc5/Kvf90O2FotlFYLlVZLPZ9HURQEQWD99deB\nNqk1duMGjkKBmWkfC5eMlIoN1uYLHPmzU5jHA1STCcrpLPndPXwj/biGBqlls9g+xtGQKhWSS0ud\nrFsjk2Ay6GXEFkLl6UIjlVDCyySvtzvosuvr9D75JM7Be2dIHuXa/K5xRt599126u7sZGBh46Pk1\nGg2Dg4OsrKxw+PDhBx7/sPM/TuO/ymBETZsr8r8Dibt94K/+6q8+dfAnL/jr9LrVUtDr3eh0HwlC\n6XRu9HotrZbS+Xw5mSSWSLQfhkYj7tHRjoGXTVGoF4voHQ4K4TBd4+PcKpXQ2vTIjRaCIOMxaqln\n2mqWbp0OQRBolMsY7HbGjh5lM5PpZAXcOh22UAjjfhfIvc7/xRcHeOMNWFhIEAxqOHrUz/HjwTse\nzA+zXl8WTD4ftt5eshsbnffUBgPVVIrE3BxaiwXf9DS2nh6mpnwEg1ZKJQm9Xo3HY7xjh2n2+Uio\nVAeyTo6+PnR2e0eLQlCpcAwNYfb724q6soxKq6WWyxG9dg1TsId6y4BKJaDRa8lubXByKkik28r2\nZhatTsXYmJsenw57d9cd1yTX69RKH3239AY1dpNAJRFHd2Sys4uGti6GweUiu7GBZ2ICncWCSq3G\nPTrK7sc6sARRxDk8/KVqSQQCFs6d62VpKUmpJOH1mpia8mEw3NtU7quEa2QEnc1GPZvt2DSEzp3D\n4HIRu36dZqOBPRTqZKGK0SjuXj1PnPQi6PuhIZHf3MTcZWL1/BUMJj2tSgmdVoXBaiG7vo57dLTD\nZ5JrtYOKsYqClI5jFFsMPTHJ1rtzNJoSBpeL1r7bd/LmTex9fQ9EJv5HfIQf/ehHB+TfHxa3Say3\ng5F/qPgqg5E/Bo4D/9f+6/8DuPAwB/y6cEbUapH+fgfpdPWA4GUgYMZq/YhzoSgKZp+P3MwMBkUh\nOT9PJZmk9+mn8c/MUIxE0JhMaAwGDG433adPdzQP5HodWZLQmc3U9tnxGoMBzX4rp8Xvp++ZZ0it\nrFAvFLD19OAaGWF3b+8zrysQsPD97x8iHi8jCGC36z8XoXBlbo6A04nGbMZgvz9TvUd9j/ciEbrP\nnsUSDHYUL2u5HOlbt9oy6sVipzXW4HDgdBo+1a59a2uLnmAQ38wM6ZUVmvU6GpOJrmPHMPp8WAIB\npGIR437brahSYe/vJ7e5SXJxEZ3dTu+TT2J0uynG4tTHHTj0JgZOTJNdu8WQXcvQaTuC0kIUyujM\nd5rHleJxxHoRm8NIYmMHj9+GUEyQKlVxm3qp5/Ptdk6hrbbalKR2wKEoNBuNjtOvra8PjclEZm2N\nRrmMc3AQx9DQF3IP7oX+fge9vTbq9SYGg/pA8Pe4cEY+fhznyAgT3/0uycVFBJWK4MmTtBoNYtev\nI6jVDL70UseJGWiLqgX8qBuL7C0s0zAI+JwevL/3NC8N9pC4cZ3i3i4Wn5fY7Cwmrxfn8DDeiQmg\n3UqsdzjuKDWWdDrkep1qKkX40iUalQq27m6svb2dQOl+gpF/5IwcRLPZ5Kc//Snnz59/ZPN/kjfy\nOF//Fzn+qwxG/vv+v689UqkKiUSJQqGO0ailr8+G1Xrv7oGxMTe1mszWVg67VY3f3MDnqlFOJDC4\nXBT29ignk8iyjEqnI/b++2Q3N6mmUohaLb1PPIF2f7ftm5rC3t9P1/HjrL/6KvViEcfAAMGTJ9tm\naIqCSqfDOzV14KFv7e7G2v2JDphPiDt9GtRq1V39ZxRFIZut0mqBw6FHpbpTV09ptYjPzbF3+TJl\nUURjMOD9HF0ajwpaoxHP+DiefRn1+Oxs+32LpS2HHomw9+GH+I8cwfKJlt1PQqXREDx+HOfAAHKt\nhs5qRWtul0/04+MoikJua4utt95CbTCQWV2lViigMRpRZBm5XsfgclGStehtXRjcvSTVZuReGyax\njpCLolWrcAwMUE2nqeVybb0Qo5FiJMLmW2/RajYZP3uIVqVAJbxNIZ7EdXwce08X66++indmhtzm\nJtVMBq3ZTKNSwTk0RHJpicLODlqLhXIyiUavxzk0RODIEQwOxxd+Hz51TVUiRuOj1WdsNltks9W7\nfj8fBha/n+CJE+hsNnRuP3N/9xq5ZJau4V409RxyvYY1EKBRqWD0erEODFI3ehj91jcJPZUnl4sT\ntJvJzl5EqJfQmQyIPd1Er13D4HCg3/eRcvT3U0kmkRsNXKOjyLUatWwWQRQxeDzUrVY2XnuNYiSC\n1mwmMT9PYm6O0DPPtAMkWYbPaM3+R9yJ8+fPEwgEGNoPzB8FJiYm+MEPfvDIjvd1xePMYlLuJRv9\nWZAkmXC4SC5Xw2bTEwiYv5D0bjRaZG4uzsWLYTY2soDC8eNBvvOdsTtM5e6GfLpI9PIFipvr5HJV\ncmVwDQ/hDflIn38dpSVTTae5+dOf4h4ZaWuMlMvoHQ4GXnqJVqOBJRhEyuep7nuZKM0mgijiP3oU\njdFIo1RqS3v7fF8oca1clrh6Ncrubjs709dn59AhLzbbwcCsGImw/tvfdoSToM2hOPxnf3ZPqfDP\nwu26/MMgvbrK1ttvtzkdGk1bSr9SwTk8jGNggNBTTx0gtn4Wavl8m8yo1WJ0u8ltbbH97ru0ZBmD\nw8HqL36BNRRC0tpJpSrYPDYsM2dZ3aywsJikUof+QReK3KAlSZw6082QX6Rya64tUCUImPY7M+Kz\ns2Ru3QLa62kfGqYQiaI1GlBrNVQzGZKLixj3HYBj16+3HYK7u7H39ZFdW8MaDLL74YcUw2EMTifB\nU6cQ/IM0bN3oDFq6uiw4HI+WaCcIwkPd9wdFOl3h0qUwsUgejVZN/4CTo0cDd/19kCSZtbUs6+sZ\ntFoVE6N2zEIFRW6gt9vvUJNtNZtsvH+RVEVD8tY6a2+9S7MuUWu0GB72YlYKdJ06RWZlha6nnmen\nGWBzLU29VsfqtHJoxELhzR+S31hDazJRjEYZfeUVjG43tVyOZr2Oc3gYuV6nGA6jNJvobDb8hw+j\n1utpShLlRIL43FzbJykaRVCpcI+MsPP++3gPHWLiu9/F2t2Ne3T0y1ryu+LLvu+PAn/+53+O3+/n\nL//yLx/ZMZeWlvjOd77D6urqIzvm44r93+e7/kh/1QTWLwSy3OTChTC3brXbZgWh/WB88skeKhWZ\narWB2azFbn/4H9WtrRxraxlWVz/q/5+fj+P3m/n2t0fRaO6dCq1Ht9l98zVSuzFyhQYmfxc7+RI6\nYZJEvIC/2wGiiNHlopxKYevro7y/q5WKRRrlMnqbjVu/+Q1ak4l6oUCzVmv7l8Tj9L/wAt7JyS+l\nPry8nOoQWs1mLR9+uMe1q2FmJuwMDVhxuszobTYq6fSBQATate/Pi2q1wcpKms3NLHq9mtFRF/39\njs8VmBi9XnR2O6JKRfTatbazrV6PzmKhHI+z9utf4xobQ2s24x4Zuav/TDWbpV4sUs/nKUajbeVS\nRcFz6FBbnr/RQFCpaO2TWpOJMhmphUUrITWsvPV312gYXaTTNapSi+XFCN94rpcbH6wyPWbh0vuX\nCXo1mExaUBTK8Tipmzc7ip+311Mq5KlnUuy+M0+jXEZrseCdmkJjMuHs78fe14dUKqExmdh44y1U\nBjPoTW2ZcqHdtpJu2fnwp3MYggVMbhc2m46nnw7h999dc+NxR7PZ4sN315k7P0+9UESlUZPa6cJs\n1jIz47/j83NzCa5fj6Io0O1Vc+G/vY1dVcNuVaGzWgkcO4Z7ZKRjVFfKFrkyn2d5u4a5GGd5KUNv\nrw2dXBTTiQAAIABJREFUIrG9meHQIS8mt5tWC5J1AwvzO1RTSVqSRDVrJX2zyGGPta2s7PHS6j/K\nm+8nsA7ZCQV99Ha3y2uFnZ3OOdbzefYuXOiQkddefRVFllHr9QiiiFQooLPZ6Dl7FrVeT71QOFgq\n+kfcF1qtFj/5yU94++23H+lxh4aG2NnZoVarof8cejy/K/idCkZu16pisTLr65mO+I+iwNJSEkVR\n2NnJY7PpMZk09PU5mJi4u2z3/cwVCoUolxtEIu122Far1SaJNlqk0xUKhXqH+S/LTVQq8cADstlo\ntLU9VlfZ281RLUnozSbGT0+iESSis/Po5RCtYFfb60RRaJTLFMNhQs88024vbTYpJxJ4xsZQ6XQs\n//znqFQqDC4XUqlEbmMDo8tFQbGwtZVFlhV6e22EQra7pqg/b71PlptsbeUAsFh0vPnmJjQlguY6\nscYucthAqM+Ga6C/zVsRBFK1Wqeb5377NT95foqicPVqhKWlVOe9WKyEIAj09392aeGTxzPY7YSe\nfJL0rVvt7IXT2VGYTC0vo7fbacoyyfl5PNPTjHzrFfItM7FYmXB4h26DQG1jCR11BKWJ3m5Ho9cj\narVUUinKiTZXW63TYXC56Dp1inhWpstmoJRIoekboLQepiEVqWe3yCZU1AsVykUvBk2LRqlAYm0T\ni7oLk8nZOe9SLIZvepri3h5NSULYL3/ltrfJbm5Rdzqwp1KEL15k6k//tF3yy+URBQHFYCZfgUY2\nQ11tIp2qYHfocQZ6mFtMUc5X0PvbgU4+X+eDD+b5wz88e1/364vE5/muplMlbl1Zohxv3we5CpnW\nTZZnXUxPH8wcJvcSzH6wQjFfxe53oSkkEMo5KsioFTXVWpPsb96kL5PD4nWTV6mQakZ29krsrmeY\nGrAgKC12d/NMHfJSKlRpKrB2/iLR9Qjl7irxaJ1yNgelDOauLgRtjVaoF9RacoZu3vrtOoqi4DeX\niYXzOL//BEYhicHtplEud1yRsxsbncC4USqRV6kw5XK0ZJliJEIxGsXodGLt6aEpSRg+Jnf/IGvc\nkuWPjPFMJkxe7x2OwZ91jAed80HxRY3/4IMPcLvdjIyMPNL5tVptp6NmZmbmsb3+L3r871Qwchvl\nskSj8VErZaUicf16jGy2RjxeolqVOXkyiCwreL1GHA4D6XSFZlPB5TKg1d7fsgiCgNdrxGrVddRJ\nVSqB3l4bVqsOg0FNJlNlaSlJLFbCatUxMeHplG8q6TStVotqvoBao+XoK08gF7LsvPUm3adPMfbc\naeJXLiKgMPHHf0xmbQ2t2czIt79N3zPPIJVK3PrlL9Ho9ejsdlrNFoGTZ4heuoAgijgGBqjl88R3\nElzaSFMutzso1tcznDgR7PiYPAoIgoDJpKFel0kmyxSLEjMDKlQ7t1i6sAyyzLEnhug7OsngN76B\n2e8n9bF2XkvXnV0h94Nstsr2dv7Ae4VCnevXo3i9pnb24C5QFIVMpko6XcHrlQ4QcC1dXRg9Hmr5\nPKVIBEEUye/uIlerqLu6yG9v02w0yK2vE7m1x2paz6XraZzmPPPXb6BR6jz54iGyF98itbyM99Ah\nVBoNoaefxt7XB6JIJZEgevky9oEBLNl1dt9+n/xehME/DWK1aEhEM7RqFRoVVdsdVythtRtRqwRE\njQaBjznI1mo0ZZliIokpEEBptVCbzKR3ImwtbKDVWamW61g0Ina/HwWIRstsr8YQRRG9MYt3aJhS\neA+T1USrViG3l8N3zkl5t4Zar0Nr+qiLpliUkOUWavWdD6HG/sPxQf1nviy0qmWaterB95otlHrp\nQCCSWVsjurxFem2LfKaEWQgiOirc+s1vqWRzWNR1jB4PM3/yffI1FcVoma10AUVwkdgMI0oK125I\nTD97hujcIga3G0e3Gv9UHxd+8HNUag3Bfh8f/OLv0drsWKxG5EoFq9cGrSZNi5elpSzFTBGr10Vd\nVhg/PEg5sks4ukQtncQ5NITGaiezF6NeqaG0WjSqVQSNhnouh6ZaxdbXh1yrYevtxejxYOvpQWsy\nHeA+VXM58tvb1AsFzPvdZXdTzG02GoQvXSK9ukprX9vIMzlJ4MiRfxCdOfcSOntYTExMsLS0xMzM\nzBdy/K8DfqeCkdvRmNWqQ6tVdUS40ukq2WyN6WkdW1s5ZLnF/HycQMBMPt9WDw2HC7RaCm63kdOn\nu/F47uxUuNtcvb02hoedXLwYplyWMBg0mM1aTCYN+Xydy5cjxGJtZ8xMpkoyWebFFwfa/hr7WgSe\n8THcWgOxa1ex+b24hoYo7IUxu12MvfIKl/7Df6Dne9/jxL/5N6Ru3qReKpFcWmL3vfcoJRKodTo8\ndieKRkchU0DTM4Z3agiN0UglkyFXUqhUGp1zbzYVlpdTDA05Og/hRKJELFZGEAwkk+V7Xn8mU2V1\nNU0sViIYtDDUb0Ws5ugzZnHTpOq2EovZcKljzF6+jkHXzgjVS2V233+fruPHCT31FLaeHirpNEaP\nB8d9RtKfjLibTaXTDq0oCpFIkXC4SLEo7ZOJ7YyNuQ/wSSRJ5saNGKVUhlY+xaW1KAPTg/SOf3Rs\npdXCNzNDq9HoiJjZQiGMDgfJdJa8YmZ7V0JMVMmXRZLJCsM+M7FMkZmzY7QqRaqFUltnolBAZ7EQ\nn59n5l/8C/YuXODWr36FJRikUa+z+urruIeH0Vht5GcvEvQeJrdXxufpwuVR43X0Md5v4OhhPzuL\nGww9cQJ1dofrH65id1rQCRJWnZNL77yO1arFN9KPrrcLWVVEkkWKuTJGixHMRlR6PZkiSHo7NWmP\nSqlMKp3m92cmGRkfYm92kfE/+A6F7S3UUgm7w0HN4OmQcAH6+/vvCESkSoXE/Dz5fbE0x+Ag3kOH\nPtO/5jZWV9OsrbXJ04ODDgYHnXcNdu71XbgfmA0iw6MeriTzne+NFiMjwx9xPxrVKvG5OcRmk0C3\nnXymhE6USS4ukouncdj1SPkcrWiEajqFEZHN5SQ3t2qcOWsnm6lgspkoVtR8sCgxMf0Ew98Yw6KH\n7Pu/weG1IysqfC41/VP9lCQ1GlULpdng+KmjDAZbRDUyGsmMSiWQrWspJjI0NkrMbm1w+EiAwt4e\ne1dv0Pvs80iKBuv4DHuLt1BKORwT06Rfe4taU8Tv8TExPY2lu5tqMolreLgjelYuS0Q3oyz/+nWq\n6RQejxGLtW2H0PvEE23F3Y+tcTESIbW83Gn3btbrJBcWsAaD9yR2P+h9etjunS9ifKvV4sc//jGv\n7+vEPOr5JycnOx41j+P1fxnjH+tgpNVqd2aIovBApDmv18TEhJulpRSS1KTVUjh9Okg+X+2oh94W\nrtrczLOxkcVi0aLTqWk0WqyspHE6DffFtFepROx2A//2354iEikSj5fQaFRcvRpBEATW1tIHJNLL\n5QbpdAVRFGhhoNUCSyCAc2gYRZLQ6rTc/MlPyUViOHr8PPnv/h0v/vt/T71QoBAO72dAmoiiSCWT\noZxMond52F1awzF5FHtXkJ5Dh8gns5gUDZ7JSdZKZhSlXUpSq0V0OhU6nUguVyMWK5HP17l2Ldop\na5lMGs6dC9Hba7vjestliXff3SKRqBDw6NDkdtn4zRaxix+gUqsoZIoYfEGePfMchb0Wel17DS12\nIxpFolGp0qrX0e+T7j4LstwiEinsS4BrCAatmM0fZTEcDgNut5Hd3QL5fJ3t7bbAVF+fnVSq3AlQ\ncrkaDoee8XE3lUqDYjTO3vn3yEaTAOxcusaL/+qb+CfHSN+61XG7NQcCeCYnCZ45Q2Jujt0LF8jU\ndKTzdYweH5EM3NxIYTCocPtsfPOPDkMhSXa2vXs1ut0YnE4ElQq1Vkt2c5PNN99s+5SYzWTX12lV\nK5TicfQ2J5LcYiykY/TwKWJ7WcRGBU0tR+Lt9zBOTTAyOoK3P0gxHyDkz+Cw66gWS4iNCn1DHmav\n7LJ08wqTLxmJZVr0PvEE22+/hVzKEd7dwH3oEKmcTMPgZuaVF2hUKsTSTer2Hm5du0gmVsJkt+Ac\nmMTdY+XUdDdLYZFKrc29crmMdy1tJubnO11I0PZmEUSRwJEjn3mPAd57b7vz/YtGizQaTQ4dunfm\nrlKR2pkd/f3/jOntdiZHbJiMY6yvZ9HpVIyMuBgc/phLb6WCVC7TrNeZGu2h2QqiERrIcovQaDfV\nZIwm4B4apJqMs3vxKpvXbzE4EiLgGeDs8+NceGcZs16DfcDP8JgHVSUDlSqFZAb/ydNYR8axWfW8\n8k9VJFNV6nWZvok+yvPvc+XvLmBwu6lgwW9zoFQ1SC2R3dl5fG4tQquBpTcEFieJjV1Czz2Hq6+H\nnSvXScWLVG/m8IYm6Ds6icrpw+bRU1heQGezYXC0uVSy3GR2NoaSiZDPVSlkJDLpCmPjHpSbN9E7\nnajUaoxud8eBuprJ3KHa3JQk6oXCZ3aZfd1x4cIFHA4H4+PjX8jxJyYm+Nu//dsv5NhfFzzWwcgb\nb2wQj5cRRYFQyPapjPfbuF2rUqlEjh3roqfHRqFQp1Lp4ubNJOfPf0T6cjoNeDxG1tYyiGLblKvN\nK2lnO9xuA2Njn84nuT2XoijE4yV0OjXz83EKBYlCoY7JpEGSmjQaTYJuERVNaooOUaNldjZOtVyj\nsL3Dock+tNYKtXwOi9fF1tvv0JRqGG1G+p56ivClS0heLx69HrXBgNnnQ2sykY3HGf3Od4jOzrNx\neR6bz4Z9bBJVcIiGK8DyTYmNio4BsxuHy4hqp4TDYUCWW+RyVbxeE6+9toHZrGVhIUE0WmJ42IlW\nWwTcLCzECQYtdwRkmUyVcrnBYL+VoBilkUmi1qkZfOEFVn/1S+rRHUS5SmuvC7fXj6fbQ1Oq0xs0\nQ72ALRRC73QeWMNPg6IoXLsWYWEhiSy3EAQwmcp885unOnosarXIyZNBRFEgl6vicOgZG3NjNLY1\nKS5fjgDg95tJpSrEYiXGxtwU1lfIRpPIBgF1VSGbzLN16TpGm5m9Dz/sCEnVslmMbjcDL71Es15H\narRobcYxuqrYRya4slNFo9Pi0CsIUpjswnXCV64QnBwhvbJCa2CgLZoWClHY3aUcj6PSaDB6PBg9\nHkxuN76ZGSx9g+zltSzd2GVvrsTkKRGtEifxm19gdVrxjc+gcXrx+a3IOjvvXl+FJpwaFyhv3KK0\nsYos6unqCrG5WyaZLPPzX+7wzVfGOfWv/4y1y+dxaPT0PPU8P/77DWav7CKIIofPTdLVYyOVraOt\n1igks1TzeRxaF+VEDaOi8NILT1OoKKhUIg6zSHRvFZtptFOKkcplcp9U0FWUtpDa5GTHePFe+Li5\nW7OpsLKSZnj4Tq8maAfE8/MJlpZuYbF4GRpyMjHhuetnPwm1TkfXoTHU4hJ9bgVRq6Wo0WDt6mq7\nFGezyLUa5kCAws4OreQux4ccaL0DJAw58haR5KYZjVqgWa8jq00IhRS1zZukDFVqaz6muxQG/3AI\nwWxHLhXYefeXpAeGKfhDZLtOI4kOElEF7c01iqkMelULt65M4fwCO3u7SDt77NxYInj8GIVslN4T\nT5Kta9DJFoang1jdNvLhKI6gBWuol53dNGtLUaa/933e/2+/RFeu0vD7eP+9TQr1Hb79Z8/i9Pux\n9/d3MlyZZJFGeJ387EVa8SS9w0Nk8w1K2TzF1V1MPh/FWJxIRWboxBMYu0MY9caOPs1tiGp1x038\no1vfJlVXMxnUej0pSTrgd/VZeBw5Dw8idPZ55v94ZuRxvP4vY/xjHYxsbuY6/724mMRo1HDkyP1F\n4CqVSCBg6Zi85fM1Dh3yEo22A4dnngkxOelhfj5BoVDn5z9f6QgrKQpcuhQhGLRisdw7zWy16gkG\nraRSZZLJCtWqjCC0ZaudNg0D+gTxdy/QqEt4B3vQjR0lU5NpFrKktsO8cWuDI0+OcXzCRqNapV4s\nYvb78c/MkNvcoJbLoZmcZPvNNzn0ve8Ru3YNwWBm49IsNr8XR3+I8e8dYmm7wfkrWeSdHOl8klOn\numk0mmxuF8gVJA4f9vPBB7tcvx5leNjF9naefL7GE0/0ksvVKJUkIpEit79DxaJEpVhBziQoJ5Oo\ndDoalQrh9TBDnl4MxW22LrxPcW+PyOXL2HuC9D71FDqbnejSLZS1HQa6ezn7rTMUtzdoVSvoHQG6\nz5zp7LQ+C6lUhZWVdCebpSjtktvOTu7ArtnlMvLCCwMEg1ZWV9NIUpNUqorHY2RrK8fQ0EdEz2JR\nolKuU8/nDsylUgkoUpViNHpQ0ZL2jrCWzeIeHSW6cJPY+i674RKJV2/Sf+oo3vGT9DqblCKrNHN5\nnEE/Fr+f0NNPU47HMTgcbL7xBlKpROTyZfzHT9AS1bz3X37EoZefRahX2M1pefs3CzSbLXrOnGVh\nKYtFVBh8/kV28wbmV1JULq3Qc1jFzIu96DQirUadZirN1f/3B6hooegt1IRlRl55hZLWTEVW8+Mf\nL+H5n09hCg3gdTmJJGqUyzKiKJDNlrl2ZQ9UKl55ZZS8MQXVIk6XEbu9zRnQ2+14Ag48QOrmTfYu\nzRPLZmmtrRE4cuS+ZcUfFLLcQpZbd5XCmJ2NsbCQpF6XkOUaV65EUKnE++ZAGd1uQufO0SiXEbVa\n9iIR5FqN6LVrHWdlALPfTzEWoyKr2VzNYgkeorCTR+dVMBpFaqUael8XG6+9hs1hwu628eH//f9g\n9jhQ6Y1ojEbGv/kyytHDqOweVCYrXq+RosZDOtfg1de3CN/cwOYwcuKJAY6HtFTmZtGpBTRqkfzW\nBlpRR9DepM/pwnbiLLV4mOv/5b+ye22WRl1i6IXnGP2jP6ZRltmJVFmLCVTSFeTwTYwaGya/n52F\nNVSmLMr+NZk8HjLLSyy+9i5WoUxscYXk6jqT33qZSqSIXrfvw7SVYydbpph+H/sJNS6nHruvi2o8\nAorS4aWZ/Qe7kBKLi8SuXWt3yAkCZbOZnkAAve3OTOvXAYqi8JOf/IRf//rXX9gcw8PD7OzsUP/E\nb88/JHyVwUgA+CUwDpiA1r0/DpubWaamfJ9aS/60aMxg0PDkkyFGR91IUhOLRYfT2eZGBAJmbt3K\ndNxBJamJ3a6jXpfJ5+vIcrszRqUS8XpNqFQipZKEz/eRP8TRo36Wl9P09lrZ2Sng8Zjo6bFCLoYm\ns4XYrNGSm7i9ZrLrc2jSOZw2M2MvDxHbjNJIxWhpfPinp9npfZdKNovJ4yG1sozWakWdziLavZQy\nOQStgVJFxtUfopjOo602MU6fpJzaQlZbuLkYYzcu4fOZOXasi0ymSrHYli9vNhXGxjz09dk4f36X\nXK5OoVDH7TaRTFb2Cbg+NBqRYMDE5o1VUosL6FslRFHBPz1N13AvDanJxtsfgCwRvniBWj5PORZB\n1OmwDw5h8vvQ2t2sXt9i5PlzDB8+jNioYnS7sQSDiPsusZ8VPddq8gFfEmjL5mcyd7YBq1QioZCN\ntbU0hUK9s4EzmzUHVG0BNFo1zp4uYuth1FUFhHamzOZ1tJVWXa52V1S12m6BFAQQRYrhMHK5SEUw\nEc+XELQmwsubHJ+ewmPTITZs7Iky0aVFKpFdPJOTjH372+2dts9HZHcXUadH4+9j9/y7GB12mi2o\npHPc3NSiILR3kisruI+epKz4MMwMsvyff4Vcq6HS6CjXBOZm4wyOeDE3tdRurjH27Fl2r86hMhip\nFUGUykQTZWwWDd3dLvLZElNPnsVMgSu/vUhPPU/v0QBZuhCNVia6FcyNNF1np/D7TeQ2N0FRMPl8\n+A8fRhAEitEo4cuXadbrOESRWibD3oULHa0No9tNbHYWFAW9w9FWqx0cPJAVyedrpFIVBEFo8xPu\nEegHg9a7ko+LxXqHsHzb10lRYG0tzcSEG7X6/oiUgiB0sgR9fX3EFxZILi11dv2KopBL5rBMn2Vl\nJc3W1h7yWpaRY09j0TUJ9dpJrW9w690LSJUqE89Okby5jNluQlCpMNlMlHIlKtkCQu8U0UQZZWcb\nt7FGQqiytFElsh4hHU1TrdR49ccppv7P72Ku1cFgxDXgwD48iqg34p8YpVWvYrIaSV27gFQq4uzp\notlskrp5E9vFCwz9k99npyrSEgRS4TSHnjtB19gQogh9IzasLRMtSSJ67Rr+o0ehlGFoIohKrcFo\nMbJ16QaZtVt0j/Zi93vIJnIkEmXUjRaVXAF3q87iqsTzTx3FNz5CLZfD6HJhCQY70vTQ1tVJzM9/\n1KqvKJiKRbIbG/ddsnvcOA8XL17EbDYzue92/UXMr9Vq6e/vZ2Vlhenp6Qce/7DzPw7jv8pgJAM8\nB/zsfgdoNKq7OnneD9Rq8YB3CrRt4avVBocOeVlby9BstnC5jB0r9my2ynvvbVMsSthsOgIBM4WC\nBCjodGpCIRsjI27MZh3Hj3fhdOq5fDlCpdLA6TQgpLfxe410+U2oLXZqqV12zy9j6umnVoxz8Sf/\nHa3NgcZqozxqQSoWCRw7Rn5nB425LfOudXpIrG6i93gRVWoiN2bZ3khhNOvxD3Rj7+5i9laevKSl\nnK/RbDRwO/UUCrUD3bKtloLR2L4utVrEZmt3AEUiRQ4f9pPPV2k0Wuh0Kvr7HUT3Urzx6tugNdI9\n4GXMJ7H805/iP3oUg8NBNREns7YOTRmD1YJUFqnn80j5PN4jJ8k1DOhUejYSAkavl/HDD95CbTJp\nMRo1nS6g2/B6706utVh0PP10H2trGdLpKoGAmWKxfsCGXqtV0dVlJeQ5gVDNE98MY7NqCfb78U1O\nUoxEiF69it7pxD44SEtvRUZNDT3NYgy1SmSg30EuVyOXq6FWC/g9WlQGI3qHHXt3kPjVK1SSSXbe\new+zz4eo09FqNlHpdDTdfawt7JDYSNBz6hSFVJJKpUVdatEoV0AUqKtMSDtpzCYtV6/skWsYUeVi\nGCxmFLWW5asr+FUOYuvL5K5+iN2hZ+i5p9CYzdgiecKxKgmKaGoZRnucRFd3KPeBXEmRWl0lshlH\nbVjlqX/5R+RiW4TfC2NPu7G47HSdOIF7fBxaLQxOZ6dLopxI3JExapTL7axRLkejVsPR309mbQ2p\nWPzoOPsIhwucP79DPt8OFJ1OA+fO9baJ3MDQkJNIpIiiKHR1We6Z5bibfszDit3dDsCgrUWys5Mn\nFt6kp+ljayfH8IibbKrE4uU1XGPjOIc89E4IFJYXaLh0+IYGSF29hFQo0mxIWCwGNCYzs+cXESed\nbM2ukgtHmTkzim/czY0fvIHNbiK9K1IvVdHYDGxupjj+J3/CzrvvUC1WiMwu0HPmLBvXbxIYHURJ\npIgvLbeN9xDQmM3kwltU0mnMDhs+k56X/+XL1OTfZ+1mjPfeWsbhMuF3aTA7RFZ/8Qssfj/FcJjN\nt9+mUqhQlNSEzpxg7OXnsJg1DD51htTiIuVMAXm/I9FoM9MQdCiKRDwjM3Bm+FPX8ePtxh/HbV+m\nryN++MMffmFdNB/HbY+ahw1Gvq74KoOR+v6/T8XHy5NqtcjYmPuewciD1qrMZi2ViozFomVkxEky\nWaFWk9Hr1fT321ldTVMsttt1XS4Df/M3c2xu5nC5jAwPC5w+PYXVqsfvb/+gDgw46eqyks1W0elU\nFBeTpG62xdAMJh251TRTT81g9PqY/+nfI5UrGG0WBkf9LP3wh3QdP465bxCN3YXeYsQW6qMpaFAN\n9WPXGVHrdEhViWqhhNZsZmtpm+BTz4DWgKgXyEUTTEwF2YnWAIFGo/0Q9niMeL1GDAY11apMsSgx\nNeUlm61hNGpoSXW+/WIXTq+VTD7F5maKW8tx3JMzCIUEl3/2OvlhB4e61Nz6xS8Inj6NymBAUIlU\nkimMXg96mw3X6CjWvgEMvQPko2UMvSPsJWqdroUHvV9Op4HpaR83bsSoVmVUKgGbrUpv76cr2zoc\nBk6cCH7stZ5r16IUixI6nYrJSS/BoAVBsPL0v/ojVubm6PJ4MLpcpFdXydy6hWNoiNzOHhf/83/F\n3j+IaWiCkn4Ln7vttOt0GRgaclKvyVgcJrx+GxgsSGKOgZdewtLVRfjiRTRGI/7jx6nE44S3t9H3\njhBJNVDECka3l71YFa/ZQDm9Q9/EGIlYAZXBRDKSwm4P0t8vIjRlYmmJ6WNnKCxeI3JzDYPVQnwp\niVnbxDE0yNxv3qU7Xab33Dl8p88x1BtiNFNld05k/co1Jk4fIhvbwCbVeer732D5+ialUp3S1gbl\neAKv24FOp0YqlYjNzjL8e7/XMcSTJYlSNEqz0aBeKKDW68nuGy8iCAiiSHxhgUo8jtZspvvUqbb+\nzb6wG7RLLjduxMjn23/ut8ttCwsJnn22HVg++2wfmUy1cw8/7e/cYtERCtmYn09Qr6fQ6dwIQjuY\nud+sCLR38AgCequVra2tA63IuVyNcLiIRStgFsrsvPEae28oHHtiiKFQPxmpTqPRwjIQpO/J05gc\nFqqZFA2fl9zcAihN5Fod76lzOEYmWdvcoxrZpac/QEPUExzwoTVoUVkdDB7X06xV6BntJTQzTsOY\nZfBP/icSuymcpQKZ+WvoK1US+TjOUBDX4ADRuQWkuoxR26Lr+DG6jh1BFjW8/84aG2tJIjk1Hr/C\niXMjzP/2fX76H6/zrT8+gtrZFvRb/dWvEFUqRLmG22altLrAyMsvoTO31ZrLiQQaJPQGNbJZh29m\nhlip1Sk/34ZUqdDYF83T7nteaYxG1AZDWx14H6l6na771DWBx4vzcLuL5kFKNJ93/pmZGW7cuMGZ\nM2cem+v/Msc/1pyRs2d7WFvLoNG0Ge8DA4/WH8PpNHD0aIDt7Rxnz/Z0duADA3YCAQvr6+2WTodD\nz+xsnPfe20GjUdFotKhWqxiNCYaGnJ1gBECvV3cyMOrBQXLb26g0GsxdXXhqNXI7O5S38hRW5uke\nHMTqdSEXsogaLdpAH+/8f79k6/oS/SenOfzMEZIrq5h9XroHBylFI/ScOUVFXKSaK2INeNBa7Yxq\nkavxAAAgAElEQVSHXKg0BRxmKDUUvF4LQ0MOFhcThEJ2AgETigJHjvi5dSuDUajhdgqc/F+maBZz\nJOeu04wXKG4ZSGLm0gc11tdyVLJZDk246BoIsru2yfEjU+xdvMjGa6/R//zzGOx2MqurHSt6x+AQ\n5qlT5LUBaj6IJ9rBjs9nvnPx7xOHDnnx+Uzk83V0OjW1WuqBTPl6e+34fGZKuRKCXMXitHR20Vqz\nGUtXF66+Pmr5PNmNDVRaLSqtlp0bi5RKTZrhBJmmFWOugea5F/AOjCDubBAKKdTKNUyBLuRMDINq\nj/DaKjVJoiXLTP/zf045m0Wt0eAeG6PVbLK2sE1DqmJ3Owk9d5rNDy/j7g+yfXWOPinNmRemuTaX\n4Og/mcDuc1LNbqIVdVhdNop1NVg8iKLIQMiMtLxEXq1D7/HRd/oo3VOTrKX1hF/bxT+uJ7e9y/iA\nkZmZAKO9sL28Cc0mol6PKrnGoWeeZ+/SVXxeI1pVnfzuLlqzGalSIb+9jS0UopJIsPHGG6RXVzHv\na6/ktrZoms2g02H2+doicNUqCAJyvY5ULoOi0JJlmpKESq2mUpE6gcjHkUpVO+33giB0BAI/CzMz\nPlQqkcXFAhaLnqEhF6Ojrs8eCNSLReILCxR2d2lJEiafj7rLRdfICIVwmGa9TqkooVKJ9B8ZYfmD\na5TzJWrlGvF1HbpknqHv/gnBLhPx2evkNjdxjoyy/PO/w9rdg+bwFIVIlEa1jjXYRcFgJbY8h294\nkPXtIqZyCv9Iglf+2RmuXw1jNHhppcIYpAw2Ocn2doWdjTTz71xFqFc49eQAIVOV6NWr1DNpgmfP\n4tmOsvHu+6hk6D96Et+Js9yKCaQqWnR2N9nZeWp5BVVVoXfAw+5yhVJDy7GXXqAS3kMURbRmC+r/\nn7s3DZLrPO/9ft19et/37ulZevYZzAxmww4QIEACEClKlC3Tpn2vl1u+cRJ/iW8lt8qVLzeVKiWV\nOFVRua5Tjq+s8MqSbVqWLIoUJe4kCBD7NgBmX3vf971P98mHBkekSFogaYmU/1X40N3n7efMeQ/e\n8/Tz/N//X29ALrVpSxIKhQy5XE5mbQ2L349MLqfriIvFaIlU24YoigwN2ejp6fA+0mtrxG7fplku\nI2g0uPfuxTE2hsZiwTU5SfTGjU4lTSZDa7Nh7e9/oPn5vOHKlSsfq0XzaTA7O8uf//mf/8LjfF7x\nuU5G/st/+d+w3Dd2KxTGEIRDuxnX9n32/s++fhcf9fnPvlarTQSDBWKxICqVwOjoIAMDNhKJMO12\nFug8vGKxEBZLjXJZT6FQR6FosLW1Rbs9+qHff/v2EqVsia7pw4jhNa489xyleByfx4Olrw/NcB81\nhYTHbKAUj9NwuWkY1Gh1AmqrjVStwc3FAFN7xlCYbBT0aqLFLTweH4f/YIpALEY+XaBUaWF3Fhnu\nLpNxm0ik1Hi8RjKZGIVCnkBAhsWi4fbtVfbt8zLbU6OaTBJa2qEc0uPQamkmwuyEQpTSBVq5Bo9+\n6SkSoQ10PjV3r2/z5a/sQWxlyLeaSO02+UCApRs38J88ydmvf51KKkUOsE5M4BmaJrWQpFQKYjAI\nHD06hcOhe+D5+ll0hOUMu+V8+PgkuHI4QOLWLRrlMoJW21k476sovns+9ZpIq9VGLgikVlcRKxXa\nyBBFCSWQj6aoZvNYj81g8Tgox+NIQC2ToVFKsvr664i1GjK3G6ndZuO11xh94gnyOztsvf46OoeD\nua9+ka1QDZXDTXY7gGtqkmyxybE//vektoNYdDJ+/98fpt5SIG9WSBaHiWUlTp3x0JYpufd2jIe+\nOIujneSNV8OIGguBS+vsP3uQvLabUD7PzlYC71AVnbxGuShxYEBH4uoFTFot1uFhdDYbvfPT1CNb\nDMyPE7hwgWIuh9poJL2ygt7tphAKkVpeJvjOOzSrVeSCwM4bb+yaMeqLRTwzM5h6elCoVFiHh8lv\nb9OsVtFYLB3lWrt9t9qg1SrR65WUSu+3ADCb1T/XLuHDoNOpOHDAx9SUC7lc9kC7aACazRaxxRUW\nn32WyLVr0G5jGxlh4qmnUPf303/qFOm1NRqaNNZpO410kuC1W1j1RioqPSqNEo/fjbO6QfbyKmKt\nhlyppFKqofX4qK0u03vkCAqtlkalhmlojOvnInhH+nD0utnjV1EK7KAJ3sQzNsHY7+zl9k/eBlsd\nr1kkv7PD5Vd3ULu66JudoBCL8c6rd/D+3lEMXV6MfX0kohnGf/O32fu7/5ZGoUhZsHBjuUwkVeD6\n7RRTMz5MXjeZnTCBepSeOSulSBiZOEby7l00Rj35VB4NKtpmL2aHGZtJidpsphgKdVSeKxVUJhN6\nnYIDe/toqs0IGi1utx6lUkH5XSfg+5LyrXqdyNWraKxWjB4PrslJdA4H1XQaQaPB4PXuVk4eBJ8n\nzsMnadF80vizs7PcvHmTvo/hffUvGf+zHv95SUY+tCb79a9//SMH/Owf/Ele12oiL764hiSB291D\ntdrkzp0ESqWCo0d7OHzYwqVLYdptCZnMAhRQKjtun7kcjI7aMBpVH/j+hRtBzv3wHvl0EeoV+txy\n3HIlNUlLIlHGPKBidHqG3PY2ocuXKSZSeOfmkIfCjB/eS6PRZnNhnWYkjVMs07VvH3dvJbFrzaRi\neWR3F6iks7j37MFs0XP5h2/T1efGNX+MdK7EwkKca9diNJttdLoAU5MubAYrRkmAaonkzWvoNRrs\nXjeVZBK1yUS3y0UkW+HOrRvYR0eZHPPz6stryNU6VEYjE3122NnB2t9Po1Ijc+cukUSC+sgI41/9\nKiNDQ5h7ewE47TZSrfai0Qi7ZOOPOz8fB41Kher9nrTWbn/fwldJpwldvLi7cIrVKuHLl9GYzRjc\nbrLZKrdvx2k0RIxoSd+4iiBrUkvGkAta7BMjJGotVDo1ap2KViLEyg++D3R2lxQjUQZOP0roynW0\ndhuGri6QKToaIpub3HrmGdqiSCEUwr24jvnUb5IIplh+8XV6Zie5eS9PrVCgx+9kfs7E63/9Peq1\nJjaHgarKwVrRQl2m4atPTRIVSixfWuL0lyZxDvRSrrZRhrIo5G3WVtPUSg3OPtKHVZ1CdLbROXSo\nXSasAzkSd+4QiEYpxmLM/Lt/h9RqYR0cJHrjRsf9N58nFwgwcPo0sZUNJL2dSl2imU4jViqI1Sqh\nixexDgygMhgQdDq233wTrdVKIRwmtbJCORZD0GjoO3ECz/T0bgVKqVQwNeWmWAzuiu8ZjSomJ12f\nmAMGPLDxpSRJrK2lCW4mibz0Mql7cVyeHmqRHdJLS4SvXsU6MIBzfBxzTw/ucoM3X7hBejvSWZlq\nBYb6XEztG6RVKVBLRIlGkqy9/CqCSolrYhz3kB/13gkKgRBGjwur34+528X0jBKZVk/uxnne+vvn\n0Chl6A0qRh85ztTjjzCiCVPKxihtVWlZHqWQzuP2dpNZXkSSwDs1iaS3M/nkl9h86zyXX7xIS1Lg\nPzRD78On+e63r+NyGRjdP46iWSO8GsTqtpNYXsPUY6ScyWC06HA7VFRjEWILd/DumyN2b4WKVCVd\nUTJ3cqbTO5OkjkGkILD1yitoLBbMvb3oPR48x47tJo61TOYD3jZirUYlkcDo8SCTyTB6vb/y2iOf\npEXzadDV1YVMJiMSieDz+X7+gH9l+CyTEQH4CTANvAT8z8CVT/OFH7dXVSzWd3+tFQp11tYy96Xk\nW8jlMvbudXPmzACpVAVB6Ci6rq5mUCrljI0pOHXqp7EaDZFUqkKl0uTOtQ3KmSwtUUZmK4hW7qLo\nGuXahefQu13U/BpOnn4M4+ICglaHvd2mnM5x9W+eZfzMSfpGu9hZ6ehAWDwO6vI6Rx4eIZvM4+l1\nUonHaJZLhBNNfvTsJQx6gXqtjqHdWXR1OhVGowqVSs6JI172DqpBlMhXROrRIrquHtRqgXv/8A/E\nbt1CJpdj7O7G3D+I8/AslXSKiUOzrKxYcfd5mTvQR+XaKplsm6EvPkk9n8c5O4dcBj375rAPDWHq\n7t69FgqF/H3CZP9S8/VhY8rJJIHz56mm00iShEKjwTE6SrNcxtTdTbvZ/ODCWa1STiRQWWxcevMi\nFoMdtdaAzN6F1j+CUdPGFQzTRIFMUCCTy+ieHGZ0by9r3/or7v7t3zJw+jSBCxcw9/TQajQx9/dR\n0+sRK1WSq6s4hoc7qrPVOrRETN3dlAI79EtJ7L3DvJPM48pkMJv17KxGmZ3zceuF1zE5LEwc9bO9\nHCaZjvHQ8XFurDd4/eU1jjz6ENfevEulKvHoH/8bmtUawwvbqI1G8gEZbnOC1MJ1AuEguXCUqROz\nTI48xuL587S7u0n9+McMfuEL1PJ5NBYLxXCY3lOnqYSDFCNhBk6fpm7u5eLrK9SaKUo7MvbO7Ee6\n8wYKtZp2u41Yq7EdCHTaM/U69XyenXPnMHV14ZycBElCrFZpNztJhyRJ1HI5uuxyHntskHC4hFwu\nw+s1PHBb5tPeP4FAngsXgjj1TWIbIaIbUeo+B/4uH6LYJtGUqIs/TYqEdp0eQwnHgXEa4Q3kjTIm\nnYjZZaUQqdKSZDREibbBwfrCMq7xUeQKObFMgXYiTvT6NXQOB6NPPonPbMHY7eb5/+82OiUIggQK\ngci9ZUaPztKWK4kWlURjNfwjLdp2E82WjHqhQL1YpFku4f/vTlHZWqBUbuIZGySfKpAJZ2je3UGw\nOvH26ej2qNl/sJtoKEPPoBWbaQ8To33IMiFOnt1D+u2XMHvdRBZX2HfoCKahPZRqMrB6qJocNKWO\n+J/SYCB8+TKNYhFTTw/JahUpEiGzsUHX/Hzn+mi1u95X7yU1Kz5CbfdX1Zvmk7ZoPml8mUzG7Ows\n586d47d/+7c/9vhPG/+zHv9ZJiMi8OgvMkA2W6Xdlj6SEKfVKtFoFNRqItFokXK5k5jY7Try+TrX\nrkV47LEhPB4jjUaLkRE7TqcelUqBTlem1eoY41UqDe7dS7C+nmFrK8va3QgT4704tCIUUzTVZgrR\nIntOHSV4b503/vr7uExfQR1ZI3r7FmKlSqOQp9kEtdmMf2YK98wMWoMeWavB1sY6Bm+VdGwDxcQo\nGrWBO997kVuXA3T12vF3axnf14/GI2fI0aRUKDA/JHD8oW70hQDxV64iGIw4xsZQymuUigViV+6x\n8vzzKNRq9E4n5VgMZDJ8M/MoGiryWxtMjVnZ//AABnkZYe8hDPuNhNMN8I4hdIHTY8I/2/1zZbt/\nUZAkifjCApVkZyGt5fOklpcpRSJYBwbIbm5iGxr6gFDTu2PXXnuTxNI6a2UT2RJkIwlc3XYeeXSQ\n6d/zEL1+HUkmZ+7kaSwjo7SyMWrFIiNf+hJd+/ej93ZRiETxzM5QjIS5c/ESLaUapVaNwdtFo1JB\nb7N2iJIWMyiUSJUCNpsOl13D8js36d2/D5fXhN2hp/vYFFIhSWX1DjaNEeNML7VYgIluD/19Nmzm\nBrbHxzG77chUTcRMkqkTs2yvxjh+zMrtH9xBIVaR6mX84z143TrCly6isViQORyM/S//K6Ebd7j4\n7I/xToyj1Klx9veh0GjQud2k01We/6tnia11KmAGr5cr7+xwZHov9dXr6Oz2Tgmz3e6oACuVVLNZ\npFaLajaLqbcXuUKB1G7TqFSo5fNEr1/v2NjLZJj9fsZnZ3/pvjWBQJ5ms01LocXS00V8eZVGo0nD\nPER4O4G9bWBho0ZNm2Zw0Ear2SRz/R2qySRTB0fIh0IYvV5Sy8tsX7iETK0jvrpJ36lTNBsili43\nqcV71FQqihubNMsl5EqB6MJtnHv2IvjcHPrqo8jVGuIr68RXt5Ar5Bg9HsrlOsaCHElTxCA0mHto\nnK1NCe/UHmzKCqOzg2hqKWKpLHfevoN3YoiqICewHKNHtc6eI4/Q54btYJmqqMBgtzE65kTelWbA\nnCdfSbL2zH+lEAww8/u/i6TUE1oNImqtSL49FLMabEqB+cMTZNq1TjJZr2Pp70djNlO6n2wUo1HE\nRoP06iqpxUVyOzsYu7pQ6vXUMhm0dvuvfCXkZ/Gd73yHp59++pcac3Z2lrW1tV9qzM8LPi9tmn8R\nvJuNVSoNbtyIsrOTp1JpotUq8fvN6PUq+vutu4JOBoOKyUk3ly4FKZUayGTg91vYs8dJPF6iVGpT\nLjdRqQRsNh0+n+m+ZHwLm82CTieg1QrEYiWWl9P87d/ewenQcfVSkEvnt/hv/vtDHHr8AJnNHVIr\nW9Css2fERMJnJ5Fq4DdaKewEaCOnVigxcPIY7WqJrTfeIr68jrxVwzY8QjEUptXtY+zUMVRakXS9\nyaGvnsXtX0TdKuEc6qews0Xy3l0ePXiQaFpLz5CX2Ms/4N5rL9Os1dGYjAyePoPJ56WWTVNJJmnV\naigEAbFSQWUwoBCUjI0Nk9jYpn96Ao3VhtJgwtTdzdtvbhDfKoJShbJcQyaTka8UGJ8QEYQHJ5R+\n2Hx90jHNavV9WwYrySStep1yIoFteBip1aJRLqPU62mWSrvHae5LYidXVtHofVy+HaMuqcklRcrU\neOXHyxwbbaMyGHDPzKCQiVDKUEkmyW5u0iyVCF69hkKlwdTXy/VvfJPRLz1Buy2RD0cRlALdRw4T\nvHQVlc1JIRggHwgiU8gZOnOG6FaUuTOHWb+xiNXUYv6pYfrnRrn9zH8lux0kn6sS3YnjHR/ikT/9\nD2y9eY7QC2sUTCpMPT3oLPtpygTSy6us/f23aYsiE089hVuRxdpvpmnx0kgnqG0tIRw+xNDZs2yd\nO0+k0mblJ69iG/RTyJUI3w5wyKjDOzlBNZUgk6pSKnQM1wqhECqzGUN3H5LTSL/P1PE6SqVwmUzI\nFQoa9XrnWsrlnZ0196+vQq1GZTQSu3mTzPr67nVP3r2LUqPBOzf3sef9590LD4JcqUXvkaOUkhms\nZiWBnTQqjYaB2YNshUvcXV3iyJEe+i1VWvU64StXCF+9in10FIVSSSXVsZdo1YtItTKpOwsMH5hH\nIcjIbW8jaLVU0ymktkRmbR3H8DDF7Q1alQIL3/42MqWKid98mlIkTM/sFLmdLSJ3V5FV6oxP99Es\nF9G1KowcG0Kv97L2o+eJ/NN5sgYNrokpBo/MEri3hbPLicPWR9eUH+eUjds3gkSycuT1IoN9VmTN\nCl02C5FLb6I16uh7+ATlZILUyhq+qTGsQ0NUDT7aOhs2pRyzAarxKDqnE7XRSL9MRjkaBcCh0YAk\nYfR6Sd671+HaSBJyQSC5tIR3dhbP7CzW/v5d1+BPO0+fB85DvV7n7//+77l69eovNf7s7CzPPvvs\nJx7/aeN/luN/5ZORdLpCItGRjHe79VgsWpaWUiwupqjXRZaXU+TzdebmvPT0dMiqjzzSvyuoND7u\nwGxW43DoEEWJZrPNzZtRnE49fr8Zna7Tl/Z6DYRC2l1lyHa7zcCAFZtNRyCQ5+rVMGaTEo9TzaNn\nRwkHM/T1GjBV11h640coBAGFRkNwaZXRLz5GWVLinpqka/8+UotLKI1GfPv2k1q8h9ZmJXblIgq1\nGrFWY+jMabav3MQxKyDKlPSO9SHI2piePEN6O4Qg1WgLavRGkY2/+X+xjY6iMEwTePUnNCsVavkC\n+e0tlFot47/+a4jVjty1ymikWamgdTgQtFoc42O0JBl9hw+gdPYSTEuUEgIjljoyQcXqVolaTcRg\n6HjE9PdbHrhv/3HxUa6w74WgUqHU6ajnOyJY7wotqQwGJEmi1WhQTaXoOXaMUjTaUUN1OHCMjRG/\nfRuL3cR2SsnqappcpozBpKWQLuJ396KxWEhfvk01k8E+MoJcLid06RIyi4dybodMOINeK8czOUG+\nVmfxhZcY/+IZ7MM5lDo9xVSWwTOnadVKNHJpTF43vn3z1IolSskY7tk5HHYVG+cvEboUxmiQI9Wr\n5PM1jAaBqs2AVquilY6RXrpHZnmRXLuBweNBQMQxOY1teJjQm69i7vKQDwQwmFSUwmEK68uUMjmK\nSPQeO0q9UECU5IiZLJVMZ4eYaY+FXCpPcCnA5BNn2QwGMPu86OwRBI0GsVajmk5jHx7Gf2Qf2vg9\nkvfuYRsZwTk+TjEaRWOxIFcqce/d23GLVSiQ5ArUPj/ZfIPY2jbCewwKAbJbW7j27kUh/PKWnr4+\nC+vrHWHDjMrBsT/6t5S215EM21gHBnjpxWW2dvKMzY+QiempLq5j7O6l9/hxEnfvIpPLaZRKiI0G\npp5uUlsh3HvGaDfrWD1OCrEI7qkpUktL1NJpTD3dtOoKNFYr2a1N5IICqS1RDAZZff6HHPqTP6Ge\nyxK8co2KqKd3fJCVHz4PbRHB5sHdbtMoJhEKMeqlHC2ti0IowNjRk9RrLWLrAQxuF0b/IDa3lf7h\nJsbbNwhubJKMKune38d2LIJJ2SK1E0HndDD6G79Du1FHqZBI1zU0JCM7oTLXL+9gNws4WxHmxvQ4\nHBp0djuWoWHS+RaNlgyDSYOpu4edt97crTCq7m/nlclkuKenH0jy/1cJP/rRj5iYmPjUD+aPi5mZ\nGf70T//0lxrz84IHXRGO0REpWwQeBvYBN4HXfjGn9WDY2clx4UJwl/ehVBZ4/PF9uzLyhUJHYfTd\nY4eGrIBEKJTHbtdhNmtQKhX4fCbqdZFvfesW6+s5bDYtuVwdh0O3682iVgscPtxDLlejVmsSiQTx\n+YyIYgutVkm/34TVIGNlKcHQoIWHvzqAVwyw+eLzmIUakXAehVKJq8tNZmOd8TMnidy8wcRTv4mg\nUSMXBHKBIJLGRKNawzE8gChKNFpywk0lC+9sc2rvEeqlAjqHlVo8RvbKOQSNFp3VhF7e4Nbf/S1y\nhQzD/ZJyNZVCYzYjiSKSBPlogrYEMqUKncPJyJe+RPjqNSSZDNfUXtzHThGqgMfo58Uf7rC6WUKh\nkHHqVD99fWb0+o4QXK0mYrPpGBtzfCqxqQ/rLcZiJRYXE2QyNZzOjinbex2E3ztGLgi4Jiep5XKI\n1Spam42WKGIbGiJ67RqVVArb0BAKpZKeI0fI5+uoVHI0KjlIEo1yCUFnpJQt0mqImB1ejhzrRZdY\nZPviGum33kCs1XCOjzP/R39EOFQgtJlBTBZIpevULHr89QamoRHSWwFS1Ro6UaS4toqgj6BUqbCP\njCEXVKh0WjRmE5VkCqPJQLuQ5uJffhNDdw+JopzuaJx6rc74sVkqiSjWbi8FnZF6No3QLKHTqVAb\nbVQyWSI3bmDs7cO5Zw/9J46js9tQG434Dh5k+bkfUslkkavUWIZHUegtJNa3oN+PQybgGhmg1ZbR\nbtRQG02o9ToquSLmgWGSdxfoG/WxtRRCoVIh6HTYbVo01SRam42+48dpFIvcu3IFbaGA1Grh3beP\nkS9/GZlCQb1YJlmUsZqSI19Ikd/MYRCadHebdu8TuSB8aoGyf+7++TD09Jg4erSHxcWOx5FC1kZS\nallZzzHilqPQtXj0sJPM1g2yF9eRGyU0HjuSJOEYG0OSK5Cr1DQSabKBKFqXF5nFRv+YH6xOlCIY\nzVqyyHCVqrSbDZwjQ+hcLlqNBkqdnpHHv4hcJVAt1cFgJXhjhWpNZOLEXm49+13Wz11EY9BhPKBl\nxKwmencLlVZDU6tF6/ZSaOlpRcvop49w8PhxzDoJZCLZ5Xukb29w8R9fZmBqkJH9Y6y//gpqrwWt\nUUs+VyVTTCBcu0k2XWJg/yQlVKQqDWqVJhMTLu6ev0P3pI10Jo8sE6I9ICIMzRMqlckVEhgaVpTR\nKq2fMckDaLdaH2iBftJ5+qTH/yLGP/PMM/zBH/zBLz3+8PAwWq2WbDaL1frJpCw+D9fvF8UZ+d+B\nk4ACeAM4TkfG/T8Bc8Cffeyo/wJoNlvcvh1/33bBUqnBxkYWpVJOq9Umn68Ti3UIcz09JrRagatX\no7zyyiY2m5Z9+7zMzXXh85lIJis4nXoGB+0kkxXK5cb98UUMBhUbGxm2tnLUai2USjmRSIALF0pM\nT7uZmnJitWr5/rMLOOxqVi/eJrGq5zef6EWqValGgzhMZrL5CjqNgMNvpbR8k3Y6jM7uIHzjFtVc\nDsfQEPHVdfoPzFBHg9qkIZ6oQCpPSwKUaoZnhlj//j8SeOcdpDa0RJHJp76KQqmgUSphdLuoZDId\nQzqrFYVSiVwQqDXaGJVKNA43t37wY4aOHKD3+Anc+w9h9HqRWxz8zT9sUKHBFx7vJp5uotMpUSrl\n3LvXMRA8dqx3V55doZDRbv9cBf9dSJJEIlGmXG6i1QofqaJ67tw2uVwngcxkqqTTVU6fHvhI6XBr\nfz+CRkMxEsE1NUWr0WDn7bcpJxLoXS7sIyOE18NcvF0gnW8hVkr4zCJ+p5rc1haqAStTM10Igoy9\nE3Z8pjzRe2uoPQa0PYOks3UCKYlJmZYWCmRqDRqbFSmSoioqqCtNNBtFrH09YPUQCm/h8A5i1EmE\nLpzraFl0ddGq17n5139Nbnsb/6On6Tp4CP/cHlqNBr4hJyargVSrSTmZYu2ty+gNasxHDiGXy6hn\nU+itZiK373R2/TgsBC9dRmsyYe7rJXnnDjtvn8c6PIRtYJDp3/s9coks7VaLRChGZKdAuW7FZdDi\nPvIwS29epsfhYLTbwuSJvRQjEYy9fVidFg73WnE5NCTTDfomB/Boq0ReeR7/yZPQbnck3+8b/LVb\nLeqZDIYjR9CYzWxv57h2fZNWS8JoVGEdGCRw5RoWiwajUY1MocAxOrqr6vpxUak0iMfL1OstbDYN\n0kc8BAuFOhsbGaLRImq1cF/csMH4uAOXS09te4XqSowTT+xDbnFRa2ZZev4cdRGUkhOFrkq7lMZq\ntxO/d49iIs3EU0/RbMsJ31lC5ekhkZcz0DPCq//5W7hcetyz86QFD6NP/yH2LgeCSomSOhqLheXv\nfY/IzVvI1Fr6T55CJghk8y0C6zmGH5ZQiFXMdiNKnQ6jSU0znepU9poNlCYzK3dCFCowZuIQHbsA\nACAASURBVOtifXGFxWqNwwfc5K++jv/M46jEEvO/9STBYImWxoTGbEGQt8jHEqy+eZXxEweQ1Sv0\nDPvIikbevhRjcatKtdZmeMTJmV+bJ3/zAue+/QKHHxqiqVCjyF5GLbZoZsto5RLZnAGbzU3zPWJm\nAJa+PoSPIK7+qiKZTHL+/Hm+853v/NJjy+VyhoaGuH79Oo8++gulVH7u8CDJyJPAXkAFxIFuIA/8\nX8BlPqNkpFxuUiy+X0hJrXYQj5cZHXVw40aUUqkjBS6KbUZH7bz1VoArV0IYjRqy2RqJRBmFQoFG\nI1Crifh8Zq5cCRMIdMr+a2sZNBoFg4M2XnxxnVyuSjhcYns7yyOPDFCvF6hWmyiVci69E6CFDKXU\nZHMlzJGTe1hfTzEwuYfotSsYFQocNiMms5qeqRFygRBdx79C4OJlln/yGpIEtv5+DHYbhXQRU28v\nxUSa4YePIhls/MZ//D3MLgPlwDabr73ekeHO53CO72HlhR8x9dtPY/APkU6VyK5E8J98hKHHH6cY\nCqHQGdB219nz61/l3isX6TpwGL2/H53PRWEzQrWi4bVLBW6tNRgYcRGMdgzqHA4tExMuajWRcLiA\nViuQy/3UF0aSHuwXbrstcfNmlHv3ktRqIiqVgpERGwcO9Hzg2HcTkXfxbhvu3WTkwzLu924jzAcC\nVDMZXBMTHQVQs5vXLmVZWgtgNKoRiglubC7zxaeP0DU/j76p4uwTE6iqae69coGBExMUtjfR4CVe\nUdNWCEgqHZFoiaETR1C//QZiTUerVqPckCPqHWjUKgxj07z12hrbV5cYO74fD1GErUWQ2ij1OjJr\nazQqFRRGC1JbInzjNuqufm69chWbt4HSaMZ//CEK0QRqs5muqVE843uoVWq4JyaJ3byB1Gqh1qjw\nzsxSiEZJb6yjs5hZ/O53aZQriI0GYqVC7NZtXAePUogmyOcqjB6d48qFdc6tbzEw0cvM7zyNfbCX\nQKhCWSbHrKyTunkN58wcckHAaFRhcDoQq1WSS+sIajVqk4l2s4nB49n1IpErFNQKBcrxOGqTiXi8\ntOu+Wyw20Dn89D+kRt1KY3AYsI+MdAjFnwD5fI23394hGi0hSaDVCszPd33guFpN5Pz5AKFQgUKh\nzvJyCqdTx8GD3aytBRgasjE/4sRg1JC5e4uqY4TK0ha5nR2G5sagDTWFCZ0Eiv5pGrI+hse6sdrV\naG12hh4+Rl2hZzsO4XwZa08XktbIW+ejiOUi4jEb2e0aoe0Q//F/PEjkhR8TvnEbuVxGGznldIbs\nxiaOiQlSoQjFZAbX2CiFzXUMXhdSPEas0cD/8HFufevb6PpHKFXrOPw9yMwugj98kVK2hEkYx12u\noFbLsYyM8uqzC4S204CErirHKZegJaK3mpCqRcxdHkLRApeDO7z0/F1kBhtNmYr1rQL79lqRNatM\nnNjHwNFxdE4nl7/7E5RaLU2lhY3QNfpqNfq+fBKdRkExHAaZDHNvL86JCcR6nWo2i0wuR2e3fyDZ\n/FXjjHz/+9/nd3/3dzEajT//4F9A/NHRUS5duvSJk5HP+vr9IjkjDTo7X0Rgg04iAlDlAcztflHQ\n65UYjeoP+JYYjWq8Xj0zMx7W1zP4fCZMJhUKhZxAII9G0yGdQmfBjMdLpNMV/H4LwWBgNxGBjkLr\nzZtRWi2Jl1/eoNVq02p13l9cTHLwoI9aTby/8DWQA1qdEptVR6VUJ5tsoB4w4j/5MNVMFrlGw+hj\nZ2m25ASv3aTelNi4voJ9fIJGvUEqWabr+EmMehVmlw2dw46EjLU33yF64U1WggEGTxzrKIWq1Zj6\n+mmUyx0vmz/6b1F3D2LUZGnLBN7+xzd45I+ewjU5RaMlQ6XTEF8PgEqFaHQRKqpQWQbZUZu5dilK\nPF7B53fichtJpcq7lYuOk68Fq1VDs9nercharRrs9gfbFZFIlLh7N/E+M8KlpRQ+n4m+Pss/O1aS\n3m8v/8HPO1tHJUlCa7XSajZ3d9cIrm7WN6ucf22NqihDarcxqkRcejOLN7exzhjQa7UM+Fzc+uEd\ndPI68rbI8L49xONFVAYtwWgVj9WAyaojW1fR88jjFHe26D10AIXZSbMlobfb+btvnKOQyCI3mNm+\nuch2PsmjDw2TuX4BvdNJ8MJFvPP7yMXSVGsipWIdszpLCxmbt9YppPI88T88Tf/YKG1kFGMxNt66\ngHV6HyO//huozRYMPRtY+gcopNOdB1s0hsagR6FSodboUZktNCpVFAoFSpmIXGpiHxoi29DiH/Ji\n7/aQKKsQ3UPcDbdYe+suI/snOHNygpp7ksubJQqVFl2uHoYkFbJsBI3JhM5uxzk2RjmVIr+zs3vd\ny4kE5VQKS18fhUgEnWngfXMTTzUwGr3MP3YEh6PDL2i3WpSTSZCkjvfNA3JHNjezRCI/JSFXqyIL\nC3G6u42YTJr33GtlIpHOr/dcroootgkGC0xMNFCpFASDeWZmhmmLItlIknKsSv+Am3rMh82mIpst\n0DbZWNlukHHWCCdVXLt+lfEBHebkAjqvF938I6yERVq1Nn1HH0Vv1LDz2jL2njlEk4lzL6yg1qjY\nWgzSkAT6Tp1CJjZoyVSIChWRpXWkPSeYe/JRlNUMereH2T/8Q2K3bgFg6unF1DeA76GHUTncMKRC\nVJtZuLxOWy7QarURW50kvFkusxOXU8lkcTs1ZMMx+uZGqEZDqIUySrWSrrkZmpKcukzH+nK8Y9xo\nBbEpISEnk28wNdlLcuEW6dV1Apeu4PA6aCn1xAMVNBqB1OYOtJr0HT9OvVAA6Oy2SSQIX7pEJZ1G\nJpdj6u7Gd/AgasMnV13+LFGpVPjGN77BxYsXP7NzOHToEM8888xnFv+zwoOsBHVAB1TotGXehYXP\nMBlRKhVMT7splRrv44xMTo7QaLSpVpv091vvE1Cl+667OpLJCuFwAY1GidGoum++J8fnM2Iyqe67\n27bv63QoyGY7rZ5Cod7hXeTrZDJVjh0zIAgKTCYF6XQFg0lLNJTBpDYhtiCXyjP9G0dIvPodxEYT\n/6lTqOwuZBod5WIbhcVJIlqgkMySjSbpmd/LhYtRjig1TB0cRmPQkbyzQHYniGJ4CGO7zvJzz2Gw\nmfDs28+d7z1Hv9NFo97E0NtPVVSg6J+i1VhGbTZj0Bt551KYPeMOlK5u7kbbeAe9lGUp7uwUEYxG\nbAWRUklEFNt0dRloNts4nQ10Oiebm3nqdRG328DYmAO3W0+xWEevV6JUKpid9fyzrqvvRT7/frM6\n6CQYq6sb9PXNv+/9d/1z3oXRqHqfH8Z7+5GNcpnojRvkAwGQJAxeL86JCdRWK81SiUxFIJcsIlfK\noSUhSRLJWA7nHgf9Y07aYobt7W0GlUrmHpogfusm5eXreKenaepTFAJ5hme8DE35uXk9wve/dR69\nUcPcyRkspjb7D0gYlTUWFzu+K2qbilS4jqRoIpbryMxOVAYj1VKFpthC0BnIxVcw+odYur7GVx4/\nS13tILG8jGd8lJbeSb2YxepzUYuG0I8PYDMqqGVzDJw+i9Jxi+TSCjtLEbRqGa7hAdQWC7ViGc/8\nPJn1TVrNOr59+9HYHVSDGXRaEy/81U+wDznZc/wkN95IUrwcxemxUBUsJLcClE9P8NLby7z63A1K\nlTYqlYInf2MvXz07SmttAfvICHqXC4VaTcpsJpxIoK9UyG5sYB8ZoVmrUQiFMA4r8bqtROMdjxm5\nXEZ3twmrVdfRXCkUCF+5QjESAUDvduM7ePCB7qF4vPyB97LZKOVy324yIkkSzWaLRkMkm60Tj3eI\n7SZTx4VbpepoBdUrDVQGA5a+PhLLCRQeK2OjVjZuLtNuSpTlTTR2H+tbZSSgEovw+q0kv/b0PIvh\nOqtffwHT+DTb0QbpPiNnvzhE25Sl1KoSuF0lGsqi1WuoN32sL2zTzsbpHnQTDwSoVtuM9gxRSJWJ\n7mywd64bhclGKRyi+8hRKnodfX4/5/7zN0ktL9N34mHCjS7SpSwiSjRWO5JcxdhcP0bRRDlfoooL\ny8AgUqWAWq9hNQLdg4NMzRzHtx3E2u9n5+4qgqTG5jR2ZA5qZby9fVQlFaMjNkyZIHWbiWKhhqmv\nl50b9xg9c5ql+CZSVaTPpsVsUiGTydCYOyrIbVEkev06pVjsp3OysYHGbKZr374P/T/7IPgsOQ/P\nPPMMp0+fZugTVvA+bXyA3t5eLl26hPQz5O9fVvzPM2fkBPBubf69yYcA/P7HjvgviL4+CwaDimSy\nglwuo9FI4/EYqddFdDoliURl13xrcNCCw6Gj3W5TqYhUKiI2W+fXvcOhQxAUTE97CAYLVKsiWq3A\nwkIcuVyGTCYjHi/R1WVCrVZQKjWw2XQ0GiL9/dYOIdah4+QXJohGckw8fICpITWGegxhYBDXnnGa\n1To777xDu1LB89AjDD3+BNGtCM5eD8FrN6mgw9erwe3SYfJ4OPe1r1GMRMiHYxgPHuDQrz2J/+gh\nqrk8PUeOsfLjl2k3mzhGRvA9+kWCJT3BjIB79jgyQUk0UqLVbKJ091JuaYikCtxbzxAOl3A4DIx0\nmbh9O8bDD/dz5EgPjUaLTKbK2toGCoWcL3xhCIVCRihUYH09QzRaQq2Ws3evh7173bvE3geBTqdE\noZC9r8Ihk7G7U+m9OHy4h4WFOJVKE6NRxcyMB5vtwyswycVFUktLu6+zGxvIFQr8x4+TuHOHWFii\nKbbYe3iMi68vIbU6CcHQWBf6WoiVV16A/n7yKhW1fB6FXEb81i2SKxtM/PF/QD6uQd6skCrX+cHf\nvEKj3kSQS4QXNyi67ExPOoisXsXoP4Bj0E8huYVvbBAqeVoGAZPdjPnIURRaPYNnz5IJRuk9epRG\nW8HoaR/3lrMsvLWM2awjv1OlK5bFLGUR+vcyNTnNlee+z+ZPXkSSwD4+jm1kDLlGS9dwD75jx8Hk\nJK1Usf8//R9UV2/SajTQ+fx4p8ZZOneDnet3sM+IuPvclKtVQoubjIwN47LIWF+JkAhmmRvtIpko\n88bLK+QrMur5EnWViheeW+HhR8eYPHMGo8eDXBDQ2e30nzxJ+fJl5OEwvceOoXM6dytR5e1VDp46\nS6bqoljsEMB9PuPurqj4nTtkNzd35yu/s/PAfAOnU/e+qiVwX+9HST4YJLW8TKNUQj0wRSZdYXUt\nS6FQJ52u0NVlpKfHzM5ODqtVg9mmp+ZwYLXHMdhqtCUZBl8Pmq0kGoWAdWyESE4gGU6jVUM10/HH\nMvcPErl0nkapglbo8C2q9RaFZBarskx0J4LbPojZbkKpkFjbLDHxpbOs/uCfyG1somhD7+w+DEOT\nbG6XGe81Ezr/Nkuvvo13oIu+2XFawxME8hrcZ34d2+g9arEwB4762choSexEwO9jen8fXSaRfFBF\n3/wUlZ02qytJvEM9GEw6SrEoLUSauTQqalh7vMSieUpbAQ4dnCBfaBJPlNEZ1Az5XYz1KLn94yus\nXbhGqVTD3e2g58B+qpUG3X47+obI1OExjK73O2/XCgWq6fQH5iq3s4N3bq6z5ftXCJVKha997Wt8\n85vf/EzPw+l0olar2draYmBg4OcP+FeCB0lGah/xfur+v0+D/xuYB24Af/IgAxoNkVKpgVarRKtV\nYrfr3qPk2DHLUqsF9u/3celSiFyuhkrVqX7s2ePA6dSztJRCqxUYH3fQ3W3c1R3x+Uzs3+/j/PkA\n8XgJpVLO9LSH9fU0J074WVxM4HLpmZvzcvz4JLlcjdFRB3I5pKI5VNSYm3bj9JjoH7HgUqVI1svU\ni2UWn/sh6fUtHEN+IleuUNNvI4zuJ19TMv7kE1DKYLCZ8Y4NsfHySxQiUdotaNTqNO7dI+Cw4//C\nE1z9p1fo7xvnoa/9nxg0MoTeMZ5/bploMs7yrQAGq4HH/81xgoUGA4NdrCTVXLgQ4ORJP1pdlUKh\n4zhaq4kEg3mKxQbz8170ehV37sR54w0FCws7OBxaJiddxOMlens7rZR6vc3ycor+fgsWy4MLV7nd\nevr7rWxsZJCkTiLS02Nmdrb3A8d2zLhMlMsNDAYVKtX7b9F3M26x0SD3IX42xWgU79xch3C5mSby\nVoAxhx69TsHS9U1sHhsnHu5m6+9ewXZfwl6h0VAPBjH6fAgWO5lImnpwE7X/CPdeuonSZKVeayIo\nZLs8CZPbTjZXI7UUZHr6EP39Vu7myuRTMQb2jHH06CO4VEUKKTvliohxfD+ycZF2uYDFaaGxEOHF\nv/weOpsNhcOH2WnGN+wjGrFQzCjJrK/QShQ7Yl31KrVcgdTSPdzThykrrbz2TohaIYJzuB9Zpc6h\n00/TUN0gnGkQTxpx9w0ju7WIt9dOuhDFhIaHn5wn09Ty3b98CdHowd9vZnLaRyjXoF0pIsjltHV6\nmuUS+XSObK6Kpef9dvF6l4uDX/oSsdu3Sdy5Qzke/+mHkoRGkBgb+6BDq1irdbgGHzJfD4KBASvB\nYJ5ksoIkgVqtYG5uAkUtz8abb+7a1je0EfpccjIpAbGQw+xVMDPvot1q4/Ua2bvXjU6n6uzCymYZ\naiuoZvNEl9ew7NlLq9kGk5NXvvkSXXMz2F1O8m0Jk1WPIGuRy9eJxwpIGzHUDglBanLphSVm9zpp\nl41oTRIH97mRFErylQbi5DSH/qcRxNgmOiXY90yQbwhgEXEJGV57dpFWU8TQ3UPFN8et1zdR25tU\nKnVcnn5OPH2WVLLE1LwbzdEBbr9+lcv/+DImi5bpY1PcWSnTPdrLo7/1ENcuB7lzK4Xf7+PEySGk\nZopEI057M8XAgRlyBRGhmubMmUHSNRUGu5kTDw/QCt8in8qitLsw6Bs0ZALJ7RAjMwdpxrJ0j3TT\ne/jQB5ILQaX60DabSq9/37G/KpyRv/iLv+DQoUOcPXv2M4n/3vEHDx7k0qVLnygZ+dfMGflFYQ7Q\n09md8//Q2S587Z8bEAoVuH49QqFQR6sVmJx0Mzpq/9BSls9n4rHHhshmayiVchKJMuvrGcxmDadO\ndezKV1fTKJUC+Xyd6elO28FoVDEwYEWnc5HPV9nYyLG+nkWnE3jssRF6e82MjzuYm/OgUimo11us\n31xDVY7R5zQjKCvE7kVwW8bRl5bYfP1NBKWC+MIdJr/6FUzdPvQuNwZvFyafh2oqRTYYoaJQcPcn\nb5FeXkKr1yCXyVBZTaiTOlptiVy6gClTReHs4aWfrGHxeTl4ZobrN8LcWYjQ3W2mf3qQXFXBpesp\njh3rw2hUkc3W2L/fx+3bCTY2MiwsxAkGC3R3m/j9358mkSixvJzEbtexvZ3j8OFudnY63JpSqfkB\nye5yuUmp1PhYyYhKJXDkSA+9vWbS6QoWi4bubtNHuu+q1cLPNT+Ty+UoPkTbQCaXIxME5AoFvf0O\npvNNlpfT2D02znzZhMdQR4quUYnHUer1lBIJDG43rXodhVJJ3+kvYA7FCF27TpdKx4FHp6lVGrSe\nPkA01CHpeVwauof7GNzTC5kptHoV+/0NRr1d3LvVYu+8G72YYOG7Pya5voPg6CJe0WKdO0xG08MB\nt5uZ091IaiPx7RBk4+ybsXDu+WssLqXoPTBP7eo1pGoL//A8rVKFpk6N0aLHPtDL9mKRdlvC6HZR\ny+bJxdP80z8sMOBVs371JqlkkbH5Ieaf+CKJWJpyOMTBL5/EYtVw8TtvcfTUOAaLAavdwLWra/TM\n2Zg9Mowkybl9K0psp8rQuBezsVO5qpfLVJJJ6sUiMqWSptKMTG2k2Xg/X0vncKD7CKt4mSAg/5D5\nkisfTKfGatXyyCMDxGIlqlURp1OHx2MgePHibiICUK212L56h8k9I0x0O2k1m6SCawjjJk6emUSt\n7qjG1rJZ7ON7kJx9tMsFpHySUi7Cxt0QNn8v+09OUNVYEJQKho/OM+Rq04xsojHoMXRpyZUlBvwa\nQrc3GJx3cPFb/4R/0o9WkeWhyXFso2O8fW6Hcz+5QyOwgqfPxcEvP0Tl7irlRALDvkco5VVoewfx\nzU3jm5vhe8+8RTlfQu9MYuz1E061eP3VdWRaHQNKB4urSdQyD0d/qxdtM8fW7TUq5RrNRB+Cf5JM\ntobdbsDlNnDz/D3GegUKkSjZrBWt1cLhrzxELluj2ZYha7fRaeX4esyEVquYXU7isSLlSotWroZg\ntqLxduMbmWF4sgeNWfOBOVEZDNhHR4ne6JCroSN45xgff99x7VaLciJBvVBApdejd7t3SdCfF+Tz\nef7sz/6Mt95667M+FaDDG7l8+TK/8/9z915Bkp3nmeZz0ntvK7OysrK8r/ZoAzTQcARAgqIROKKW\nQ0qiViFppdXsRGzEXmyEeKEJTcRqRzEbWuliZXa0GlJDUhRFAiA8iO5Ge1ve2/Tensw8eTL3IhsN\nEY4AaJqc9yoz6/z1n8r/nDrf/33v975f/OK9PpWfG+5lMHIMePHO65eB43xAMFIs1jl/fveuFbko\ntrh0aR+zWUMg0FX+e2etymDQ3H3gdXdT3W6QW7cS7OwU75QBBBYXM3Q6cPRogN3dAs2mjCDAwkIa\nl8vIU08NkcmISJJMT4+Zw4d72NzYRJZMGKpRvPI++9om0ZU1NAY9Dp8DnyrPzvV5LH1hkOrMPPM5\npFqNWipJq1JBa9CxeuUi688/j0KtweD1YvGF2Lh4g8lHToLOgCyKWEMh2sEg7uEJjOEhzJogq/MF\nbt+qYwgUuHAti+Top2bQkIqLaHRqPB4Dk5MeWi2Z9fUcXq+JhYUUS0vpu99VJlNDljuoVAK3b6do\ntdq43UZisV18vgB6vYpisU4mU70rEAfdHek7A4VKpYlSKXygAJpOp2Jw0MHgoOPuZz+JN41CpcI1\nMkItk7n7j1BQKHAODaExdAMolUrBoUM9hMM26nUZRaNM7uo5lDod1r4+xGyWdK2GvtlEa7Ggs1i4\n+vVnqaSz6IQmvukpPCEf+3PX0Key9Bs0TJw5xtKlJaJXLmInS9+gj53XfwjVHKlKhdlTpzHqWkTP\nXmfr0m1a7Q7KchNPZJDs5iraMS+NaoPVUp3Hnp5i92yBym4KWRS5/uYq1ZYGczxNeHyci6+eRR0t\nk1teoi42OfXMI4TaTXLxPLGbt9DotDgHI4gtBbvXVuh9bAhnpJ+2LktdacU8e4za3AJhZy/KgQA1\nWYOxL0IuX0dvs7G4VSdkN1C++AIjgky+ruTXn5ni6i0np073MzDoIr20RHppicSNG0hiHXUgQlJt\nQeMYJuAbQ1PcR6OQMXo8eKen37fsolSpcI+OspfL0W51OUGCUolrdPRDr313s/D279/e3obmjzoB\nq9t1WrUqe2v7NMsVDCYdgeEQlUqTSxf2cNuUEF2kFt3rdsflwHt0HF3vAHp3GVNvGCSRWb8flb+P\nRr2Fsu3A39PlU/zqmJVzF5O88eoqBr2KIycHUBb3EcsVVla3UBXmeDJoolPpIX3hVdooUZksZEtt\nzj13lYcfidDCwtf/03cZHu+hUlcQ6OmlUq5TyuTR2BQISiXpxUVsw2OYxyeYPNhHtiQzMWyhHt+j\nuB3lhb/5r9gcRgaPTNBsylx97gqJmMDwmAdrfZ+t21cpXJI4cLAHi1VH7NoNxEUZ71AYk1pJbCtJ\nvAbphhFDR4vR46J3tE1ibRulzoFzaIhiScJpK2CzdbNjstymUmmi0729WfBMTaE1myns7HQdnPv7\nf8SnqtNuc+2ll1AnEsjNJgqVCvvAAMH77vux9/jHxccZ/2d/9mc89dRTjI2N/UJwLo4dO8a3vvWt\nezb/Lypn5GcFG/BWAbkIfKAbUT5fvxuIvIVGQyaRqNx9wH4QVCoFfX0Wrl9PIMsdgkELU1Oeu2TJ\nQqHO+nqO1dUcoigxMuJiYsLDD36wTk+PmSNHug/o6REThbnrLJ+/Qa9Jg2Won3ZmlyFNifCIAZPH\niMNvIr+/g/3oacSKSM9wH5UbZ9l943U8k5OkE3NU4jEaxSKZ5RV0Tiel/X0cYyJGp5tmQ2LqmS+w\n+fyzKCxWVBNTWGdP88Mrebx9Ply9BhTmOoJC4MTJED/4wQZ78QbZbIORESNarYqtrTzXrydotdr0\n9dnIZmuEQjZkuc3hwz3Icge/30yx2MBu17G+3v27TSbNXbE4r9dIINDVa0mlatTrEvff34da3c1E\nFYt1bt5MkEhUUKkUDA05GR93oVJ9PB2JjwrH0BCCQkF2bY22LOMYHHxX66ggCLhc3c6gzEqcRrGI\n2mgkePx4VxitWkVrNuM/eBCt1cqRX9eTiedx99ix+L28/h/+FHv/ABpZwuXzkjj7GsPTs2xcX0JQ\nqVn83g+Q1UYEtRqt2czeay8x+PgnaNVquAYjrNzeolMTMbnz9AwHCEw7WFhLUyuU2J+X8HRy6ON7\nGA/0o9BbSG6XMaequHxBAmP91PfjCAoB/1gETXicYlOLtceHOTxIeWsNQalGazXgC3bIbmxRLjax\nB3uQDB5WNirkCzqmD48iCTVuL+YQHAGOHLNTzlVI7G+wPP8m1d0trE4LBn8P3uYWf/g/PYIj6MWk\nFIneWGb9hRdIz89TkzVIinm00zP0zNhYavQwPHiI8QNutHfUOD8IzuFhlBoNuY0N6HSwRyLYfsKU\nsDUUIrex8fbOvJrh2OlRlrZE8pUqoYkIi8t5tFsZ3ANNSrvbTI076TObEYp1ivEU2mgc38QJrp5b\npRxP4vIFOBjuo7lxi6v/8C36Ds1ie/pTNIt5Ni9eImJ3MPFbkyj0FlwWB9/7z+dRGc3YQx68w1p8\nQyFu3VggNbeAymhC7faTTpQYOjyM3unm9RfPo1JBW6Vm/IGjaDtVKskkzoCLpkqmtBdFZ7Ui5TOM\nj7lQqhRUKiKr8wk6YpN+v5H+w1PsXLyCopTCMDBGYj/G8PAgM4N6Ln79BzQ6DQJWA6XNVbbeOEf/\nmQeJLs5Rj20RPP0Il8+tE3ngFM18BqXYQudwYKrWGDxxEIPFhO/gQYROjWIsTadzJrcCJgAAIABJ\nREFUmGSywvXrcfL5OlarjolRG36PDq3Z/J733VuopFIUt7ex3ynbtFstcmtrdx2+fxGQSqX4i7/4\nC65du3avT+UuDh06xNzcHPV6HZ3u3Vmp/x5xL4ORIvBWFGEFCu884I/+6I+w2bp8BY+nj04ngFrd\n5YU0Gl26ikrVtVrefgd/4K334XCYvb0iL7xwjU6nQyjkx2TSYLPVKZeztFoWNBolhUKCV17ZZn9f\nQS4nkkjsc/BgD1/60jTZrIhSWcTl0FOcWya2vIlcKrIVK5Cbu44l2Etsfxtbb5Dq7XUCA59jw9HL\njdeuU9kv4Z+WsVusOB99nNZWl22+n0x2Teo8Hpq1Gu1AkFSlgs8/SDZbRz8cYuD3fx+3t5eG2syL\nb6zgCuqZm89waz5DZEBBu6Nifr5BKGSl3c7jdivw+cx4vUZWVtaJRrPcuFHHbtcxNaVhd7dEtWqk\n1WojyzlaLStWay9Wqwartau7EggMks2KNBoZyuUKjz12gBs3Emg0FUymrhz+q6/uMDqqZGkpgyDY\nUauVlEoJbt/OYDCoGRx0/Mj3/871+KD1+nH41xG3QqnEOTyMc3j4Q43VWiwoNRqkahWVXk/vyZP0\nCgLuiQns4TAr81G2gZwhQEVlYQQRo81Kq5ij0bFQTSVY/sErzJgtpBYWGT00xPr1ZXRODyaPG+VG\nlHKtgGNwg0ZLQWlvB0/Ix8bcNtlsleHBXqqSgu31JKX1FSwOM0vRbc48NIRNqhIa8pGrdOjoTVye\nrxLyzXDmkfspJ+JUO0ZeupAislVCZzEhWN2o+nWIpTLhQzO4Twxw47k30FoM3TKg3cT4iAXLfQGi\nsTI3zyfQ6HUYLTVupTMMTvfjdet56eYWXq+JFkpaTYlOIYUuv4nG2ibbFJB1NioiGLw+0uspGvU8\npmyOZi6BIuBna6fM6LgXnenHs/4FhQJ7JIL9p0TKC4fDtFsteg4dIrO8jCxJ6Ox2Zid6CEVqKA1j\nrGyJ6IUYCoWCjiBQL1VYmmvSc9qP0VjA6bVit/p4/WwUGQuWARsGl4mbiyn6VRr6jhwkcmyGG3/7\nt5T391B5gmwurqJb3mDgsUcxjozwyd/+JCuvvI4stQnNjuEcGiJQ28XSG0CWOygFGU/ATmgsglpo\n4R7oY2FrjfVX9zh2vI9HHhyF5k3ue+wgr/231ykn0hgcTqYemMHuMPDi2Tjf/Zc18ru7uFxGcv0a\nQjYvQ9P9KKQazWyCySkfeVmglsvQqjdRdmD4iIPCzVXapTIWgwK13UxyYRHX4RPMfP5XKMt6xOw+\nBoeFmnIKQd9LLZej1hHI/fAa5b0dpn/lSfKZMufP75PNilgtGtTFfS7+l5cIBYz4BoL4Z2fftzzX\nLJfvBiJvodNuvyfx9V+v6096XXwU/Omf/ilf/OIX7477ReFcjI+Pc+3aNU6ePHlP5v95j7+XwcgF\n4HeAbwIPA3/7zgP+/M///O5rUZR46aVNEomu1oBW6/qREs07vwC3u4d4vMzNmwmy2Rpms4disUE2\nKxKLlYnHYWKi61RqsWhIp43odCaCwRbVapOFBYl8PseZMxZmZ/1MTY1T3N9n4+Zlms0WqlKVrTdv\nslWtcN8XAji1WnxuF7rhIfKVDnOvriDIBhTKKtVcge2z83z63xxFSsYRs1lsQGlrG2u4j+z6OoZ6\nHUFQorM7SORkjHObjH3xaS4viexsrdE/1sc//dMSi/NJFFodCaOd69dF/H4j09M+otFu+aNalXjx\nxU3sdh3NpgWNpsWLL27yG78xQ7O5d8fzRcDpDHPhQpnDhwvMzHix2XyMjpruaKl0cDqHmJ72UijU\nKZfrGAwuHA4DWm2XY9Pp2LBYlOzsFMjn67RaqjulnhyDg453rcdHff9xIYoSW6tJoltJrFYt4UE3\nnl4v0CVfuicmSC8udm3uWy2cw8MIVi/XbiR59tktNBolNpuZaKJKPVOl9/Ah8gu30BrVFKJJBIUC\nhUKByaimXq5iMuvQu2zoNR3KlTI2lx2t3QmiEr3bi85spD0aIHLiCN7ZWb793D61YpVcsoBS6OAe\nGKBiCiDUSnjCPoa0HlRGM5ZGG61OyYWLMbaWdjB4fWT2s5gbMuWGAv9gCM9IL+GQBWUpjsbmgE+c\nJFYQyKZKjPUbSV86T2n4IH/5H5+n3lEj6MxoVW0ee2IErUZArVWhszuRVErUJj2NQg4x3aAYS7G3\nvIPa5sRgt6L2hWg3fTilVeJr23QApVqDQqlEpVKg0fx8MmHvBYVKhW92FvvAAHKj0W11zufJrr5C\nYTFJdF9PWwJbeAiVRoNKr6cuirQVGrT+PtxmJW2rj7FghsrOKq14BZvaj8rjQa4Y6B0JUd5cQ9zf\nRKXWoWmVCQfsWEJOXG491y/v4vL7OPxbv4GYzlAUBf7mb27x0OOjzH7qEdaudV2kQxN9nHp0go5U\n5eKbu9y6vkdLksllStREiacfGkJ643mOH3LSOh6h7/AMRrOWaKLOpTc36XTauNxmxFKZhmAlV6gS\n6Q9SWp3H3qxw6MAI0ZaXWnQHjVaJ36OnmslRzFZoV0u0qhXQGFAGRtjcKbOtkrCbWgwE3WQyZW7f\n2Ce1sMDGlTmUWi1HHzmAJp4jvraHMFBif7+E0ajBqS6z+so5mmIDtWRBJzRpiSIDjz32niU6tdGI\nQq2mLf0rfpEgoL3THnyvEY1G+bu/+zsWFhbu9am8C6dOneLs2bMfORj5ZcW9DEZu0O3UeePO6w8k\nr+r1ak6dCrG0lCaRqGCz6Rgbc7+nBkWp1OCHP9wmkajQaMisrmY4cMCP2ayhXG7i9RppNmVsNh0K\nhcDIiJNGI0m53EQUuwRNq1WHwaBGr9dw61YSn8+EWpZpt2SqVYl4vkApV0FAplxr4zp4jNr+JvV8\nnpSUYeW5VzD7/YTPnKEci2FwuUnuJPHq9BRrdXzTU6BSE781h396GksggP/YcWJ5UFX38R86QKph\n4KVvPI+srNMQWzQqIkePh6kUqqjNBhYX0xw44Gd+Ps32dh6zWUsqVeX27RRms5rjx3vp67PSaHQ7\nZwIBExMTbl58cfNOFqiIWq3k2LHuDs5u1yNJWY4fH7mTLarz2mvbnD+/i9GoIZcTmZz04HYbkOU2\ntZrEG2/sEo9XMBrVzMx4iUa7qrQfxkDvJ+GMvBeazRY/fGmZS98/T7PWbQKLTAR54rOH6BnpR6FU\n0nPoEJbeXpqVChqDgc1UgRsXYsRiZVYW48j1BoGQjf6QmWxOZGBoDF18j3IshqolcuSZp+m0atit\nGhIbu4w+fIpCpoSuVSHeaWFuQ6slU04kcU1MMHD8EI2OhljNyHJSzd5ukbYoIhZLJFoSJp+fZAmS\n2TJqpwGvz4XTb8frs7Kyvsn8loxap6WNkqMnImxcvkVL7+S1b58lPORh+Hce4Nz/9X+jNhoZPX2E\nR5/8FV5+tcTrr2/z6c+d4vLtEti8GNVV1uazNJsyff0u7jsNTZeDiSMRyukcJkMbk9NBtdWh3NaR\nXr1NJXcN99gYu2+cxdzjI3joIFKzRdPpxBweoFiWGR93f2i9mZ82/vW1oDWb4Y5iZn6zex8q1Wr6\nIm6i57ZolkroLBYsgQCaRh6j1cT8SgGzVQ/1fZb/+VmK0QQ2t53cbhyzz8Oxzz+GKqWgWSogtRW0\nahIbSysMnzjAwoUlLCUtN5a6LbLh44cZGdfz7LNbxBI1Li7c4rOfGeYzvzuCyaBAqGZpbt9iXbRh\nMysYm+kjES+h0iiJZyR0/gC2SD+VRIqRsJtqZg/ZOMD8QgqTUUtxMYPZrKHd7gq7TZwZx+3MYNTI\neKYG0fUHWLyUQ5YVHHlwkoWr15ByLQxmC70HR4mv79ESRZQOP3Z/ABIy3/ovVxgfd9FSaDGrZDoK\nFUaHnWq1ydpSnNPHhkmLVZwNmf39EgcP+invrdEUu+VyxZ1kWC2TQcxmMfe8WxHX5PHQcLnQpFLd\nUpogYO3r+xFeyQet6096Xfw4/Mmf/Alf/epX8d9RcP55z/9B4++//37+9m/ftUf/uc3/8x5/r117\nP1Q771twOPScPBlCltsfqHOxtZUnHu9mUNTq7k72xo0EDz0UplxuolQqGB628cADIQRBgUqloFhs\nMDeXolJpIssdNBol/f12Op0OtZpEPl+nz+9AZTaTT2zSLFVoVioER/tQWuzs7xZor29i8zqQpRpK\noUNxd5fc+jpStYrB46Z3epD8m7t4jx4neHCa0P33Ez69BzojpvAw6ymIZaJUOmbkWBtDLU21IuHu\nM9FsSqzN71CXOqgUHZDVCEJX6CmbrbG0lCEctuH1mpBlmVKpw/Z2AbNZyxNPDOHxGBDFFn/91ze4\ndi2OJLWZnfXS12fl1q0kktTmwoU9gsE2kUgLrVbFzk6BcrmBxaJlYyNPNiuSz9d5/PEIyWSVs2d3\nWVnppltLpQaFgshXv3qQcrn5M3Pz/SCkUjVuv7lwNxAB2FqMsjXuwx3yodbrERQKzD7f3Z9vX9kl\nkVBSL1epxOK0WzK7lSI2fRANTcyDs0iVCuZwik6riXt8lFKqgMpgoKm24j9xHE8hRWFtmbDbjbZQ\nYvl7z1JSufBY3URbLr7zvS20pgajkxq0Bg2VloXgzASlZIpyVWJAIbK8naK612T5xhtMnLmPgUE7\nJbHIQ08dJOjTkE6UOPfqKsbIGJlomp6xQTwDfor7CbKpInqDyN6lq6DS4VD6GHlyBtloJZ1LU28p\nsBg1CEolCkFGQoFSCa+/EeWRh+/HLMZYv7ZENi/hnJgkWdNSqwu0mhKirMYYClNNxDDYLRz94qfJ\n6h0Y/AFOzRiJRD6emdfPCs1KhcLW1l1J8j6vlvxsH/vRAnR68PX7OXZommSizH65jjO7h9FcRyoW\nqNeaZOMZ1JUmdQkalQpVhQOVO4jZ52VzbhOrx0kHBaVSBaHWJroRI9BrZ/v2BlpTH29ez5EvNJFa\nHbajNzl21M///JuDvPG//a+ETz9AxjaLot1isN9EZKyHjkqL261DqopUGwqsw2M0lXragkRB5UWl\nLuL1W0hGbyN57Xg9HiTaBPtcKPRGDM4QueQWjfg1dnZtzF3Z4LOfn8Q71aC5n2XmwScxaTvc/G//\njKO/j55Tp4nj4PUXrlApilTFDrLQoiG30JtM6D0+lDURZ9CLymKiruoK1jkceur1Fvo7JRe1RoHN\n/q866t6HL6S4Q1x2Tk8j5nJoLRbMPT2o9R++G+9nhWQyyde//nVWV1fv9am8J06ePMlv//Zv0263\nUfySabZ8HNzrYORj4f0CkbeisWz27VY/QRAIBCxsb+fv3i8Oh46ZGR9q9dt//sSEh2pVolRqYDJp\n6O21MDTkoNVqE/aAVsojKHvomZ1h+eo67oaVyOcewT01SzJdw+B00EyaqWSLyOU9Zk6OcfvSGpIo\nYu4J0D8VQWU143/4SdavLKKq6mka3MjBfubnU5Rv79GsVLoeJm2BXzs1Rb6m5tRnHuD21U36hvzM\nHqtTE2VCgz7Wt8qMjbkZH3cDHUqlBul0lUJB5JOfHCGfF7HZdNhsOiIRG81mC41GRy5Xx27Xo9Eo\nOH68l2azTS5X59Klfer1FjMzPqzWFJGInWKxcbeD4a2OmUZDwuUyIkky1WoTh0NPtdpEELrdDuVy\nE5PpwwUiHyd6/qAxtWqTWuFHjbw6nQ7FXBVJFH/kH2Cn0+nqntTVWNs5eoJ65v0WkrECUrOF3Gqj\n93lYuLVPuWwnMjSCFNtk/+wyqnaddlVk4JET1NVmtEE7WlOQ/kaB+MVzaCwWfBYT4089wrNvVkkU\nBSqxIjuxGo8+EkGsNWmKXnTCIANDLvYXNtAF+insphmeDGI0abFYjdTrMkIpQVNvQWqCKGjZWN5F\npdFisZuJDDhYe+kfqZVF7G4rBosRoS0RHrCwslckeSuDWG/RE7CSztToHzXTrtd46OEhClWBmUNB\nLAEbGxt6Kn4D0cIeb35vDYcJjh0KYbOb2dotk0urcbqGMXp9TH/6MdQ63U/NefcnwXtdC+80z2um\nohyIOJmaHsI+MoDDaUSS2nzzOxtIYovoxg7hgJ4enx6dTkU6XcNsUBIOmymXmogaJ0YhT+ThM1Sb\nKhTI2EK9BAN2fvjyOgpFt8tE6MiYDC4mBmLkKnq2dmsYjBqUSiXKjsTpf/8HqA16vLYhlmMLJLNV\nAh4b8WQVjbrJVlIm1RlEk5ZwWAS2t0WiC5v8yq/OcvH8Nl/6zWNs7pRp1hs8/sQwOoOG5//pPFqT\niVNPHMBQy3Nm0E+p1GB+vYqqpKNYsJC/kCPibmEcP8zYEyeJ51WsrZSQOwrQm9jYqfLEpye5cnYV\nb1+AYr5GutBkOuiikCsweugoiUSJBx7oI58XMdhDuHe38Lg0WO+0+5q83vfljAAMDHW7cWx9fR97\nXT8KPuz4v/qrv+KZZ57B7f5RMbdfFM6Fz+fD6XSysLDA1NTUz33+n/f4X8pgBLop+Wq16yz7zlZT\nt9vA+nru7nurVcvhwz1MTXmpViUUCqFrXNXudKWRAYtFy0MPhRkedrK0lO4+fDsSnegS6Z0NVHNt\nYlYz3okJemYnackTqBslLv/TixQTaWY/8QBjn3yS1//jn2HQK/HKq3zmyw9gmjiKrLexv7LD//N/\nPEdwuI9jDx9jOa1ibm6TpSvLjB2McObRYRZv7uA6OkZfxM3AWICX3ojz7e/vINUabMfmOfOJCQwm\nHcWyhNNjpr/fTr0uMTTkJJcTSSar3LyZpNGQefrpUTqdbutuPl/j9u0UTzwxxFe+MsPycpaeHhM6\nnRKVSsV3vrNErSaRTFbZ3i6g16s4diyI3a5FEMBk0jA87ECWuwJmNpuOZlMmEDAjy23K5SYKhYBG\no6Svz/q++iE/a9jsOhx+N8m1t6XDVWolnh4rmnd4ZSSTFWLLm9RuX+Dm+WXMBgWnTpxkN2wjkxG7\npEy9hqXtBns7FeZ3khw5FKHneC8+l5pbl7dY2GpQjxa5divFxpvXGBrx8tCj9+EemqG1u0zbYGd1\nfZ/lxQTVShOtQU82I/LlfzvF2CEnuxspelxqrmzFaCuMFNZXkZpNwkM+Dk4O8ebyRVa/l2BdpWTw\n0Ciff/oIq6M2qtUmw9NhXNIe39/cJDzkIzAURO/yoHX7WduucXk+g6QycPj4ANlcA3tHoDdoYXDQ\njtllI1dskq+0Qa3j5df3aDVa6CQ1lZqMUa+iipnIWIAr3ziLVqNErVWjstpJXL9Os1zGNTr6E3fC\n/CygNZux9vWRmpvrftDp0MhlCQxE8IW7vKrV1SySJNNBoCO3UeoM7G2l0SnbmFUgV9vsJJs4tRZe\n+WGUx88MYdMUmf2CC7QG2q0Wz//lWdLxPIGhAFqLGYdDT8jZJr+5Tr0mcWR8gOB4hMNhmdzl14nf\nXkQyuFCHK0wdGOD6tRilisT0bA9aQeLbf/0qGqsDsa3i8OEgksqGRtXGYlRhNylwu5wcOtaHJNaY\nu7ZHYqmGo7eHxbUSt/7zBRyaKjNnDvPpZw5y8/I2eVUHZaWBXS9z6dlLDB+dYERlQ60TsdoMVBoC\nsWiRwWEP1VKdUw+PoVN3iAx76ekxY9J2qDYV7O/lcYUdrK5mMJm0aKwORh57EEV2F62yjcnnwzM1\n9QunG/LjUK/X+cu//EteffXVe30qH4hTp05x7ty5jxSM/LLilzIY2d0tcONGousMauhyFQYGHHdr\nVV3TuxKxWFccSqdTcfRol9C6tpYjk6lRrUpMTro5cMCHz2eiWGzQarWx23XMjFlZW4pDMcX+6i10\n9SyZ3SxGt5tqMolzZJyF1U3it7bYi9ZwBfpZX4rj6PUx/YXPk1tfxRYK4R4apKDRcu7yLlsxiZ5j\nx1EpBdajbeymJj0OAd8nZxgcD6LWKXni84fptFpkMjX2k3WWljLojDocbhlZtpDMNnn8cC+SJHPu\n3C6FQp1KRSKTqWAwaPjsZ0eZnHRTKjX4h3+Yw2hU8ZnPjJFOiywspO/oqvhRq5U4HDoUCgUXLuyT\nz9dpNGTa7Q5DQwpqtW6mo1ZrEYnY7/j/SKhUCgYG7CiVAjqdmqkpD0aj5u53Fwxa3tNJ9f3w0+aM\nuN1GTjw+ww/rIrn9JHqjjukjESLjvajeIbhVKdXIzt+mXC8S6LURXd1m8bvf4+Hf+iy+T82gFSS+\n+YMEa3vNO+aAbV47F+fppyKkt8vcvBHFN9jHm5eX2dyr4bR5iCai/Mu3qswM6bj/wUmqNRmXy0i9\nJqEQFOj1GgqlBjqDjmypw9B0GKNc5FP/w4Ncefk6uYCbwECA++8P0dqew6CtILWbCJKCeipBc2+F\n4emDONxm2pU8BsHDp//33yOztUc8KaIJB4klaijNJjQmI5mCgpW1ArMHA1gsIn5/EKtVh0ajZH09\nSzxW4drlPcxGFQVJZi+vRB8aROsx4hryUcmucnTKQrNUxNZnp1xXUG4qaBWzVJI/JKJSfWDt/2eN\n97sWvNPTIAgUd3YQBAH7wMC7hLisVi3ptEzvxCjR+Ar+YyfoJDZJ7qYITE9RdE2xut8lXUptBTWM\niNSoF9tQF3nsk1OsjfZSLtWwB31MRAxU9m7ye79/jNvzGaqVJg8ds9Jau4pUrRFNNUlktsm9us4j\nv/frDPZq0Zt0NFoiCq0ay8AQYq5IqE+Nxmzh0IyXdKJCXeqwvFZk6eZtLE4zNVFmfNzDfQ9NsrWR\nI5pI4nAaaFTKZON5LP46T35qjCtX5nGeOElHLOMfjZCKl7h+YZ1PPXMUzE16eu2EgmaMRjUbayns\njhB9fXbknIjb0mFzV2RlvYAzqKXRalOvt5GkOvl8nZhGw+jwESZm3WgNhnd9/x92nX5ax3+c8d/8\n5jeZmZlhfHz8nsz/Ycfff//9vPLKK/zu7/7uPZn/5zn+ly4YyeVEzp/fI58XKRYbiGKXnPn5z799\nUZnN3SzHWwRWu11HrSaxspIllxNZWspQLNaZn0+SyYiYzWqGB6xUk0kchhaUcoT8TlJb60jLlyhk\nMigUCiRRRJXJEDhyFCm9j9etp1SzI4l1ZFFm/fY2x776ZYxTKaJXb5C8sUcakbUruxh8fmJpI7G1\nPZwuE//j792HMh9l6NgkhbaRa1dj/P0/LCA1Wzz+5AiHDtlptTp4vUZcLhWbm9yRYrfj8RixWvW4\n3UbW1rLMzPh5/fUtrlyJ4fWa2N4uIssyk5M9TE66SSar9PfbkaQOxWIdQRDodMDjMeJy6TGbNXQ6\nTQYGvNjtTeLxKsViV0/g8GE/stzG7zeRydRYXc2yuZnnS1+aJhSy0deXY3+/iMWiZWjIidd779w6\nBUHg8H39BHttFDIlNKoO/qAd/TuY+6IoUU4X2Fzao06DsMuE3zNKOZWG2AqGXj1KuxeFzoAgSHdT\n/waDGpdDyzeeW0LnGWQ726Ha0SHToGOyYHHoqBUUeA6MoO2xs7cdxes18sgnRtnZK6PVqhgatKPT\nCmh1Kv7x/7uODhGvW0f/ZJjDJwZYubyIVMiSfPMNtKoONqVEMVcmt16i3FRhVIR47Y19KgURm0NP\nIDRK+MAopniS6PI62wvbPPqVSZIKNfWVNDaLirrYZHMjzvq6jCx3GB11UyyKtBoNqpUOjzwyRDor\nIjVatDuQyYkYXC68XqioZbIFNytbOSrXvsXMoyeQdBpcJshvb9/TYOT9oDEa6b3vPrzT0wiC8C5+\ngk6npFSqd1tVwz5Csyo2L20RPn4/kac9XF7v8L3ntvjCF7z0h604bWqahTL2YC8WMyi1RnL7CRw9\nJQSlgt4+O7eee41sR4nF0eLoyQiKVoOwp8P1c0kKRYmWxkK9U8Zg1pFf3+TCxSRmj5NiDR54MILX\nb2e9oSZTqVDfLiOoFGjVAi/81x9y8MgkWqOO2H6B4SkXBw4HuTkXZXMjh8JgpWfAi0JyonG7uHQl\nht2hZX4xy8rcDqEePQ6Xgb6hPnrDTnZSLXZ2Cpw+HUapFKjVmsT2S1y6HGV1NcvMmI18pcLtKxs4\nQz0YLXqWl3M4HHqUd3g4zabM1k6J8Ukv94a6/JPj7//+7/nqV796r0/jx+LUqVN87Wtfu9en8XPB\nL10wksnUKBTqbG4WSKWqtNtdoubAgJ1HHx24e5xer6a//21y3Y0bcWS5QyrVfdAmEhVarQ6pVIVz\nZ9P4f20IbWGP6LVVjC47lv5+CtkKuXwDtaCiGI2islgRxCYYbThcAeqZJOEeHRurKRr1Jg6fi/n5\nJNpKgvlrW2jMJppmK/VGi62ra1j7IkgKLdlMmdtXNnEq22ytJsh0mrxxbpdioYHFquW559bJZkVM\nJjXz8ymWlrq+GgMDdq5ejfL66zscO9aDXq/C5zNy5UoUnU7Nyy9vEok4OHjQh8djoK/Pxs5OgZWV\nLC+8sMHv/M4hdndLbGzkyGZFPvGJQY4dC5LLdXc8yWSFvT0Fs7N65uZS6PUqBgbs7OwU6XRApVIS\nDFrRaBRIUhuTqWtkNzvre6+l+rH4aXNG3oIvYMcX6K59o9Eimayg06mwWnV0Oh2uXo2RjtWQBTXR\npQRZXZmpKQ9Wr5PA+GA37Wy28mA9htuqoKNUo9BoCfdZqZRr7CdE6pJIMGhFoVLT7KgoliTS0Rou\nt5mSCKVskXatSjpeZS9aJhCw0Gy2UCpBSZtzLy1y68YevX4D1UyOmy/ucOz0CDtvXqVv+FMUMiWo\nlUjnmpRLdRpSkhPjU0RzEhcu7NMUm/gCNl5+dZdPPj3KsdkIK7dj3PdrnwJnEFsuR7NWR61xcP7N\nKE+e6aFTyVEu1qmmYWI2wuSYAxB45eUNNraK7G7lmD7g5yu/eZhKqcrKm3OkdpPE97JIlTI+r55m\nOo5lfBaVRfzRds17gB93LWjeY9cuihLz8ylGR90IgoJYokYoPITvVD+3V7MsXkqjVit5+OEIJ04E\n2d3O8epz8ygbJUp7O5x68gj5ioRRo0RvUKNWa0hlami8Id74xwXWV1ZRKhXpcDw8AAAgAElEQVTM\nHO7l9/6XM2RKUMrVie/lMDkdVPIVWiiolKrojRomZseIL61iGZlGrVFx65bI6dMCW9slHn9skNJ+\nnNvXdjB5XIxOmRgbdXP1eoxKrorPb+PWrRS511YZjFhJFmTUej2lkoTB0kNbvUvPUB+XLu/x+tVl\nHnygj16/loOHg/y/fz/PmUcG+e53V4hGS8i1Cr1BM5lMnSefGiSaE9jI5phS2RBFCa1WiUbzdlD3\nYSwbPuw6/aTHf9Tx8XicK1eu8N3vfveezP9Rxg8NDSGKIru7u4Q+pFDcL9L5fxT80gUjSqVwl6zZ\nbnd3rO12l8CZSFTo67O95zizuct/aDRkyuUmrVYHo1GNRqPComtT3Vohev0c6WuXMfQNkk0U8Y2P\noLU7iV+/ik6rpZ4vMvPlf8vyag59ZJJcssDK+TcxuV34p3oJH5nh5Td2UBaj7G3nKIsZjjwZwBdy\nU5XLuH02VAaJIwdcbNxYIPjoKPFkg41cnnJZolyREBQKZLnDzk6RT3xikHpd5jvfWeLUqV4yGZFy\nuYHH01W73N8vYrXqWVnJ8vjjA3zuc+PcvJmg0ZAJBi0Ui3UsFg0bG3m++MVJqlWJCxf2OHDAh9tt\npFptks+LfPnLMywspDl/fhePx4hGo2R3t0gkYieTqd314FEoBPT67iXzTqLgLyL29opcvhxle7tI\nudxgfNzFzIyPzc08CoWCmUePY1TL1Mpd1+fwZITw6QcwuN1EL11CWtvBpHIyvxhHoTOg1ysRy3Wc\nbhPZXIMjx4JsbORRa5SUS3X243UOPDpAq1Ilut9m48o8p3/lUTRmK9FoGZvNyAOn+1HR4sqrt+i0\nwTQVxGFsgdwkkW7QMzlBPpEhcuIIy69dRKNto9GpkVVKfJNjPP9SGr1OhZoWao0KhUrBlctRwmEb\n1/dUFLU1nPEYi1dWOXZqFKmt5BOn3ZRuvsn6rU0azTZGh4UDY1+gY/bw3e8s8i/fvsXIVBCXQ8fu\nZpYLF/Y580CAlkmgoOzaBJj7rVj6Imzm2yhXKminbUxN9N7rJf7IyGbFu512AwN2hoYcKBQCExNu\nrFYtDocetbqrJiyKEnNzKSwKmUI0htmgRmF1osptsvTiWVRKBS2NgcHJEPrwKBvLcZoS1Ko1CqUW\nt7ZljMF+SoVFOioNNquW0JCfdFODWqdmYDLMiQfCiLUe6kojiWqer3xlFp1GycJ8kgsX97HZXFx9\n7QY9w3DwoJ/VtSzRaIUet5ZgwIReLRPdLTA128PWRoYHHhljcy1JNlvn3/z6QV56aR29UYfZpCW+\nl+Xsc1vQOsaTjwQpNVvcuJHAZFITCjjQKRvE72STBYUCtUbP3l6JmRkvtZrE8nIaSWrj9Zo4eNBH\no9H60AHJLxK+8Y1v8OlPfxr9L0BHz4+DIAh3eSP/vfvU3Du1oh+PP/7jP/7jd32oVitYXc2xs/O2\nnbjPZ2RgwEG1miYS8b9rDIBKaJFNFCkUG2xtFxgcdPDEE4PIcof7ZiwkL59HrpQxmbVU23oqmRw2\nnwvnyAg6ox6t3UnoyCHcs4fZLWpYTVcZnBhCbnfFtFSuAPpAiO9/d45QxMPe7RXKtQ6pvQxf+vdP\nozRbqUkKenrMWO16wsM92D02zC4H61sVLl2KUizWkeU2nU7X1TabrTE97WV8XMX4eIibN5MsL2do\ntdrYbDokqU2r1aZalVheznD6dB8TE+475RcjDz0UptGQGRpycuVKjOXlDNeuxclmawSDVoaGnJRK\nTYaHHUQiDlQqgWYziyxrCQTMGI0aenosdzwp3t4F6/UqZmd9mEw/GVF1e3v7rsIuwNe+9jXea80/\naMz7oVJp8tpr2ywvZ1haSpPNimxu5vH7TWxs5HG7jbR1FlpGFQaLC9dAhNHT9+EMBShHo8QuXwab\nl2tX9rHaDSiFNp6Ak3xJYvZAgFxe5NnvrzI66mJywsuRowEePOPErIJXvn0RlUZFNVvAGuihgZbR\nUTdKpYJKpYFGCds7JZpiA7/XQDpbxzM0iC/kRhTrOPuClDI59KMRBsb6cQ/0M/nEGQz9I5SbSpQq\nJdU6RKNlKpUmrZaMLMn0h604HEZiuxmEXBStVoXN56IV36LaqmBze1FrlOgUEnZ9G9/4EP/8T/Nk\n0jU0aoG23ELQdtufHzwTwa1vIJWLSC1wTU7xwnNLFKqgtCgoVxRonW5CIetP3FnzYdb9Lchym1Sq\nQipVZWtrG6/X9ZHmL5cbrK/nkOUO9XoLUWyRz8fx+12cORPhwAE/U1Me7HY9tVoLqdWhJdYQanls\n/f2o1UouffMFEjspWgiodVqy0QzOvgAtq4F6XY0zFMAWCrGzV6F/JoLeZmXiQB+mcD9jp49itel4\n8On7QG4xf3GZFipMXh8dFJw7N8fzz+0hNlqYTRqsVi0Go5bxqR7m5tI4XUauXo2RjOcRK00OHPAS\nGXAxPumjP2xjP1ZCrLWJ7u+g1prZ2yvj9RhYXojhcpnRGTRo1FBK55k+3E8qXUOvVyMIHQw6BQ6P\nlUjExm60hqDRoVaXsNnsnDgRpNXqoNWqsFg0iGKX8F6vt3C7P9gK4P3u2fdb9w97j3/U+d7CH/zB\nH/Dv/t2/Y2Bg4D1//rOe/6OOj8Vi3Lx5k6eeeuqezP/THH+n5PSedadfurDWbNby8MP9NBot4vEK\nLpeBwcEuydLne++HY25jg/iNG/RrTXgfCjA95SYaLfHC80uUyzLOp7zE9gqM9NtIJ5XsRkuoVAIH\nbDbKJRGlw0urJoNWR7VQQoGNubkM+aQLMamgI1aoXd6hZ2YSi8PCZkrB0H0zpPfTHHriBFduZFle\nK6PSa2nkG3z323McPNzDcMRCaLSX0VEXVqsWSer6a7hceoJBC+vrWZLJCg5HE6WyiFIpUK1K+HxG\narUWpVKT3l4rnU4HWW4jSW1isQpOpx5ZbnP27C6pVJVEokKnA5OTHtbX8wiCQDRa4uTJXjodcDgM\nKBTCHeM8BUZjd8eg1Srp7bUQClm4dStJLidiNGqYmvLcU27Ih0E+L1IodMtxstzN4khSm2i0jN2u\npdPp8P3vr6JUltBonIR1VqRbZT7hb1IvFEAQaCm12LxOXnh+mUS0SO/NHJNHBjCbdZw9t4tapWBu\nLo1Gk8XhMDDUL3HpuWVqdRlvwIFjqpc3l+rMz+8RClkxmTTcf3+I9bUso4eGiG9bMVrUaO06itUO\ngbCTpUqD8z+o8sShXuTiJjtbFUZOzpLReLl+tcDQkBujUcuLP1jHbNXjcRt54qlhblzdY3QkgEEh\nEetIIEtsL2xw5NEDLEb17KfbpKMVIoNBDo+b0CkrILcYmw5RFTso6ab4G20l4bCNVqvNwNFDyGIN\ni6/I7X0Jc48f38ggZmcHk8XPzk6JiYk6DsfPZ4cpSTJXr8ZYWemKt7XbeWTZwsGD/g/UHfrXcDoN\nuN3Gu0rOABpNt/woCAI6nQqdToXZ3O0YW1pKY/L5sFuUvPrCCr/6jANaEo2mTDpVQW+zYjJqEHN5\nxI6erMKFoiWQWclz4kSIpqzklUsljh5w4vS7qSqtmLwK1tfzbMYhVjQT3e7g6+TRaJQUSw1SaRGz\nVUe93sLr0XPy/oN8/9kNFhdTWO9sQnRGLYVKi8SbO4yO2KnH96h1tKyt5LDbtDgcBjKZbnt/LFqm\n0xZYX0mSiOYYn/CiUktItSqTYw72d3M0W3r6B5047Xr6BtyM5lp3srB67HY95XITm01HIlFhcTGN\n1arjwQfDXL8ex+nUfyh/sF8ErKysEI1GOXPmzL0+lQ+NU6dOfSzxs1823Mtg5Ang/wQywP0fZWB/\nv4377w+xt1ei2ZQpFBoMDNg5eDDwrmPFXI7opUs0KxX0ToHmlecJThzn4mYKqw4sahDbCnRmE/ly\ni/h+HrPLjX1knJtJMxpzkMDpccJqkdjiCks3d5h8epRrGy1uzqd4+MAs7cQG9iOD6IwGnvjcIS69\nuUdDM8wnn3mS3f0K16/vsbNXpl6XcTgN9I/2gFqHwmzj4qUYJ0708od/eIylpW7Hy9iYG7fbQE+P\niViszPi4G6/XiFqtQJZlQiE7167FGB11MTzs4FvfWmRkxMUbb2zT6QgYDGoaDZmXX96gv99BLldn\nYyPLE08M8/nPj7K/X8Zs1mIyaThwwI/T2a2tHz/ey82bqjuiZSomJ734fCYEQcDjMVGrSXfagX86\nCbWfNmekLcs0SiWUGg0qlQKlUqDV6gYi3bKSgCAITE15WVhI026DINgwmTTo9SryebEbcJnNaIxG\nTE4HN64tkI4XUaiU1OptqpUmhUIdp9NILFam0egGkPF4mYMHhxg4aeLBHiOhPiuSrMCRiTM66kKr\nVXH0aIB2u4PZoiM04WFswsvaaoaBkA2Xx0gsVrnDc+qg6w9jIsTgSRUXr+V449lNBIWS8aSIz2fi\nT/7Do2xsZsnl6xQKNR59fIBWrYZe3eGxR8M8+/UowUgfi4sZVtfy5PZqNGotoptJhoecBANGtAY9\nT39mnGKlxdpaDlFsMTho7ZYDVnOEwwOMfvJJwsUixVf20AQktPq3KYvdAFj+yGv4cZFIVFha6mYG\nARQKO/8/d+8VJMl13nv+KjPLe1/V1dXezrSZHtcYg9EYGAKkKFBQULQi5UhJjN1QxN7Q0z4pFKEb\netCVVqEN8e4NUbEUyCUpgRQEEQRhZoDxfqZnpr2v7i7vvc19yEZTICj44QD8v8xUdZ+u05nZmd/5\nzt/cvRujrc38rh+GOp3E4cNBpqYiZLNVbDYdFoufWq1JoVB7U7evrc3M4KCTlZUMWlsbNdUaxarM\n6EQ7t2pV6mgQRYH+ASede3uZLlcZHVVCI6uVBo880ovcavBbX9jH3HwKo8PChQtbtAfN5BoakgUB\nb18QQRS5cmWTEye68fuDTN1epFSuKwRRo561tSyrqxkEUSAcLvAnf7KfmekE1Wqd8ad2IVZznPnh\nRYKj/Xz2syOcfnGaoeFBuntdVKtNXnlliXqpxMJMmIFBD/l0nrpGhcUs0dcGjYyM1qhlZNDI2GQ/\n8/MJ+vsdiKJif2A0qsnllI6SRiMyOOhiczMHKGTWRKL0tsf/o8QZeeaZZ/jc5z63Q8b9ZX/++xm/\nZ88eVlZWSKfT2O3vbDL4UZv/u8WDzqYZB155rwMlSWR01IvHYyKfV0zKfD4TavVbL7ByJkOtoKyC\nVIJAamEBleBCFUvhs3uQkRDVWvqPHSJ8/TrOgAffwYd47Wqa5dshGoTp6rLx+OO9SP4h+sZ02J1m\nHjsRRJSbaLw2Og7sYnYqxPW5Mi25Qne7lmIqw9StMKFYA7vTxPpaBodDRyyWZ3jYg8GkQ6eTcLn0\nTE1F8XqNTE4GkCSRaLRINlulUmmQyVS4ezeOLMPJk910ddlIpcocP96F12tkeTnNZz+7G5/PxOnT\nqyQSZZaXlfThZlNGEKCz00qrJdNsNpmcDNLebsVu13HoUDsm088eLl1dNvx+E/l8DaNR/SYXVUFQ\nfeBtmfuJYjxO+Pp1Ssmk4vo4todgu5nFRR0bG1ny+RptbSbsdi1qtcDu3YoEWhBUGI3K7yUIKiRJ\npKKysdb009gqobY5sPsrFEpNYqk63kSJ7m4HIyNuOjutFIt1LBYN3d02PB4jS0sZCuUWKlFNR8DE\n0JCLqakoU1NR9HqR115bx+HQYxvTs7qawWDSkS/WufDcHI8+0ouKFrKshA+uRaAVb/Ds88uoVAL1\nhoxOnwaUrcnNrTzZTJX+AQerK1kkoUUzEcLl0PP0Vx5G721jdj6N+sgIDruGlXtr5LJVbtzY4sDJ\nx2kKGsxm+P3fn2BlJYMsKx49qVQZUVR8Y9R6NWq9nuE9AukLIf4zVcjh0P/SuiLAdgZS603v1est\ncrkqgbeuQ/5LuFwGTpzoZm0tw6VLGywspFCplK7Jww934HYrKc+CoKKjw4Ykieh0Il/930+SjiSx\nDe5mqNqiWKjS3mHFGgyymtHR1WWgu9u2rSxzMT0dZW42CSq2fXt0qCWZ4WE3586V6ex20pKhUmmw\nf38bJpOGr31tH16PAa9Hz8ljAf75/36Z/acm8PqMXL2mLFz+x/+4hM2m48iRIEsrWdoCZn73//wc\nqXCMf/3WOQKdPvZMBLg9FWViwsepU73cuanjoUk/QqvJretr7Dk0QD5ToZyvcPxYO36niLqeJejX\n43J1srmZo15vodWKXLgQ4uLFELFYiUikwMSEj/3723Z4Yzrdx6PBLssyzzzzDN/73vce9FTeE9Rq\nNZOTk1y4cOFdb9V8HPEgr6K3pPS+F6jVIu3tb67Gf5G+WZQkVKK4EzHeqFQwCQ0kuQ75FCogFdah\nHuhhz2930CgXmV1vkGi2yJVK+P1aGo0WU1NRnnqinUY+y/kXbyFZRb74hVFq5TIv/tttyoUqequR\nSq5E1Wyize9kfatILpzi0KNjzM5EUalUNBogiAI6ncS9e3FKpToPPRRkfl4xWpuejqPXq7mwfeN/\n7LEebt+eRZIETCYNd+5E6etz8vrrq0SjRfbs8REMWrBYNDSbijX84cNBUqkKly5tsG+fn1qtRa2m\nbGsVCjV6euzbnI+3CvPC4Y0PXBm/W3xYPiONWo2NS5cohMM7721ePM/YsUdQCV1IkoDNqmWkS6S0\nNYuothEY6CbaY2djYx1Q3CO7uqw0qlVeenGeYhEcNlnhS3S145SVlXk0nMHrG2BhUYUkqTCbNZhM\nGlwuA4uLy9jtVmRZxcJCiv5+J9FogbNn1+npsfFv/zbP7GyCP/qj/dy5E+Pu3RgAExM+fu1YJy+/\nvMTCYorIVp6JCR8DQ2p8vgC1OtRqdYrFGuPjXmRZRq9X4/OZMRq1qGSYmY3jd6khXyOSgpatQWVp\ng2vXI4TDeY4dtdFtcnDn5gZqn5cXzyfp64PHHusll6uxtpahXm+xspJBFFUcOBB4UzHa1+cgl6uy\nvJymUIgRCAQ5cCDwCxcA9wsmk2bHrBCU5G6Dwb1TTL4XNJsy9+7FyeVqVKsJtFoXiUSJu3djHD/e\nRaXS4Nq1LVZW0lgsOrLZCplMhfZ2J9js9Lf50TfzeLv8XJqu8sw/zzEwIOL3txOPl0inqyQSRZ79\n4Sy5XJVSqcHhw+1YrDp+9MMZNreUTunUVJRmU6avz8HIiAejMY/dosYgV0jMzbKr14jJIKFRC3zx\ni2NEInni8RJWq45kssS1a2FsNh3xaJCeHhuf+sqjhBMVXvzpDTY2VIgCzM+nOHbQSTO5RSYS5/O/\nNYA5EOD2rQ2K+RJGvYhLI9Iqp2k1GlgsZopFHdeubZFKhTl3TsmwamszUS7XiUQKBAJmIpEibrcB\nv//tt2w/Kj4jly9fRpIk9u3b90A+/4OMf4PE+m6KkY/i/N8NPh4l7QeA0ePB7PeT29gAFOtiTSHG\nnsO7uDe1pahqHDb8bRYaejVbaYm7ixtsbJWRZTCZtESjRSwWLS1BzU9+cIVmvYmr20DLZ+T2pSXC\nl+5SrskY7DYks5VKqcxQd5CBoJrV2wniG0ke/cQgpVIDnU6kr8/J/LzidZJKVchmK3R12YlGi1it\nOmKx4k4aarMp8+ijveRyykptfNzPf//v59jYyNFsymxt5Xn66WEKhRrXr2+xtJSmXK7z5JMDVCp1\nVCq4cydKLFZgzx4lVK/RaDEy4nmTA+3HGeVkklIi8ab35EaD7NIskqaPEye6MNXizL10hnKhTHJa\nxUR8jT0Tk8iyBVnW09trx6vOcf30dRbOztISJFS7+xg/0MErLy8jt2RCq0kGh9zo9Yr0c3ExRbXa\nZP/+NjY2cmxuVqlWy0iSgNutdK3sdh0dHVYqFYXv8OUvj9FotHjhhQUSiTKSpGJ2NsnXv76PSLSA\nz2PEZBBpD1rJpFMMDupxuQyEQlmGhlyUSnXcbiOXLm3wgx9M8+ST/YTrDW5dWcF0tIPNu2G8gz28\n8OMF+ncHMBgUxdjr50Ls3buLdN1An9XM3FyKcrnJY4/10ttrR60WWF5O02zKdHfbCAQsJBJFNBoR\ni0WHVitx6JDCb1pbUzM01LNNCk5hNmtxuQz33SLe7zfR0WHZkZqrVNDdbcfrNb7nn1Us1shmq296\nT60WyOWqzM8nWV3N8OMfL9DXZ2d+PkU0qmy/xGJFegNqLLoUyfV1SjWYulFkcrKdYjHG3/3dVQYH\nndTrLXp77Zw61cPaWpbp6QRHjnSwtVXk/Lk1Tj3axz/+401KpQadnVZqtSayLNNqqRja5UbdqrJx\n6wrL0xuYvR52DTmRBZH2gBmVSrWtHlRsCvL5GolUhavXZ/mt3xqmt9fJ1NQ8oqgnHi/z9JMBrn3/\neQqxGLVShfzKIhOPHCS9liO2kSQyI5A/0MUnnuhDazZTqTS4dGljpztbrTap1Zp0ddl2ruVmU2Z0\n1ENPjx2LRfdhneL7imeeeYYvfvGLH4kog/eKo0ePvmuS98cVv4xixAv8fz/3XgT4/DsN/NM//dMd\nVu7Q0BAPPfTQTsW1uroK8JbXb+A/f73j6FHuXr5MKptl8KmnyK6uAlUOPdKJ1dOJtztIvZEinc6j\n06kZHnaSTG5hsehotdR4PEZ8vgbReAS12YpYKdLVaWN1+jbVTIO2Xj/GTicqUaJZFCg3BErlBGKj\nwsnPTHLuSoJ0McTBg+3YbF5eemkJg6HIgQNOlpYM/Md/zHPsmJVUqkS5bMJoVDM6qkGlUpFKldnY\n0FEsbmKz1dBqnVSrDbq6oNGQqVSavPLKCk884aKtrcWxYxOEQlny+Qhf/Wo3y8sywWCJ/fv1bG6u\nsbkpkUqVgSx9fXZOnNj7luPVasmsrKwgisI7Hu8P+vrnz9c74RdV3CpRRPULgqRUgkSp1KBWqRGZ\nukEyquxzG4xqGpUqtY0lfuPTT4JKoJyIE7owTXyrxsZamlq9QWQzw8DD+3jykwMIAhw/0YXVpicS\nKfODH9zbXskqqc7z8wnyechm4zSbMlqtiN9vpKPDwuCgg6WlNF/72j66uqycPbuOyaSlXG7QaLRo\nazOxuprB57MgN2qY9DpCa0mcbjNWq5Yvf3mUO3fiZDJl1tezfO5zI3zzm9dQq0VKpRqCICBIEsVS\nk6YMbUEHvbtNuNxGstkq3d02zpyREUUVTz01SD5fJ5EooVKpqNdbqFQqOjttO7L4SKTASy8tkclU\nkCSB/n4H/f0ONBoJu12PTtfHhQsbrK5maDRaGAxq9uzxMjLifVfn8P3ijeTu7u486XQFh6ObQMD8\nvuSl1WoDt1uPXi/RaLQjScK2sqa8U5TlclWq1RZzcwnUaoFIpIhOqyJ0r8pXvzqOEFqlmEyye7iN\noqznekjHl740RjJZJJOpsrKS5sSJ7m1vmzDZbBWNVkJv1NBqysiyCrVaWXSYTBoWFlJ4PUHGx/QY\ntBrUu3eTK7YwOBxceG2LXKGGy2mkt9fORijLlath4vEyY2MewuECjUaLTKZCIGCmUjFy714EtVog\ntZTBoavQ1u+kUatTa4lMX7xL1559bKyliacriAsFRk/Y0UYVwncmo2yJeTwB9PoS5XKDQkEhzGu1\nIvv2+fH5zO/qWH8UOCP1ep3vf//7nD9//oF8/gcdf+jQIW7dukWpVMLwDq63H8X5vxv8MoqRKHDi\n/Qz8m7/5m//yaz//C7/da63Fwr5HH9153ThwgGo2i6TXK9HjwL17dZaWiohiA7fbwMmTe9gKZUin\ny/T0uDh5xE8lneTUp8aweF1UZq6yenmRoQMnePVqgdM/WqGQr9K7q53f/r0jBIUwq6sZbs9uMDLe\njc05SKFQ41vfuq1EgO92s7KSYHDQte1c6kOWCywuJunosHPpUgpZlnE6q9y4EcZo1NBq1XA6s/T1\nOVlaUhGJFBgYEKjVmlitXvr7Jb71rVs4nXrCYSOi6GB4WGk/nzsXIp0usXdvG3a7DrPZQyYjUSzW\nMBo1dHV1Ua8r6oH5+SStltI6fsNL4L0c7w/j9XuBwenEEgiQXl7eeU/UavEO9ZPahNhmmWqhtPM1\nl1OPpBaplUo0azU0BgOlRIJcLI7B4KbelGk2ZfLJLJsLW1gdVjr7HJw+vUq+2MRs1jIy4uXWre0t\nkGOddHTY+PGPF6jXW1QqyjFTiMMpOjpsVCpNvve9e5TLAW7ejDA/n6S93UKrJbOwoPjE5HJl6nU1\nkqQis5igXq4iqRr4vAZiMQPHjnVw61aE5aUUpVIDs1lDKJTn1KkuSqUa3UNuTjw2QKkiMzUV4ebN\nLQwGNfsPtPPwwx3o9WpisQLJZBlBENi1y4XL9bMbWyql+Ni89toqGxt5BEGFKKq4fn2LI0cUw6Xe\nXodiIb6U2uGPlEp1bt+O0tZmue8cEoNBQ3+/832Pl2WZu3djTE1FWV/PEo+XOHiwjVqtwfJyBpVK\nxdmz6zzxRD8+n4lGo0WrJTM/n1S4I6KKRCbDxmaBoeEe5lZqXDq/QkWy8tzzi+h0Er/3exP4/YqM\nWC2CzykyMmDB4zGwvBDlxLEOMvk6gYB5p+PSaDRZW8vyxBO9/NtzMwwOuMilaxx76nHOnV/jzt0Y\nyWgWh12D3WnmkU8MMDcXY3zcy/iYh1dPr/CZzwwjSUpCucWi49OfHqTZlImEFqnkmuzf50arUzMz\nkyAWytLpsFGxdWB0qVA5bKxulsmVI4yMeBBFxdiwXm9y6FA7N29G0OkkjEY1IyMffTXdz+Pll1+m\nu7ubvr6+Bz2V9wWj0cj4+DgXL17k1KlTD3o69wUPMpd4H/ASMAL8FD64s/C7XV1LWi1Gj2enEEkk\nity6FaFQqJPNVqlV6vi1GR47YOALn27jE7sq5M4/T+jMK1z57o8IX77ERjJN2/g4mXSZrY00cqul\nrNAEieXVHIa2DoKjQ8h6O1vRKq1Wi1deWaa314bTqd/+w9bQ1+fg2LFO/vVf7yEI0NZmIZst0dvr\nYHDQycCAE5OpxNpamnJZya2x27VUq00CAQvRaJF9+/yATD5f5StfGeFAWHEAACAASURBVGdw0InL\nZcRgUFOrNanV6mg0AmNjPmRZplCocfduHEFQUanUyWYrTE/HefXVFa5cucv6epbXXlvj+9+/x7Vr\nWx/0tHzg8/VOYwRRJDA5iW/PHvQOB5b2djqPHcMaDDI66qW9242rsw2dXqKj04rBqGF2Nk6yKHL1\nwk3iy2uoBIFCpkwrG+PQsV6MOhCaFbxeA8P9ZpamN4lGi8TjJV58cZFwOI/fbyKZLDM1FcHnM/HI\nI3YcDv2O2uull5aQZRUXL26QzVaIRwuEQpkd9VIqVaaz04YoCtvjFAXD6vwWpXgMnz3H+ecuUEun\n+PVP9iBJAtFogUi0wPi4B4ddh9WqXEOf/exuREFAFFRsrqdxWLVkkkWmb23yk+em6eqSGRpyUirV\nsdl0PP54D4880rPTVZibS/Dii4vMzyd5/vl5Tp9eYWkpxdWrW4RCOTY38xgMahYXk9y5M/8WKW2p\nVKdQqL7l3NxPvJ/rJxYrcvNmhGKxjtNpoK3NzPT0/HaXsYFGIzIx4SOTqTA5GcDnM9JotJBlFUaj\nGp1WxGLWUSuXyTcNzC0kKYa3aPPqOXXcQmeHkhCu0QjYLWrsZoFWscBvPOZnyFUioMtRT2zS7tPz\n5Cd6OXasg+5uO+Vyg9/8zWHy+Rg/+rcFzl/YoFCWmV/OEUnU6egwMzpsx+XUEwyacTv1fONPDvLQ\npJ/ZmQiPnurCaJQ4e3ada9fCFIsxlpfTDAw4GNzXj7/NwsZmgZXVLA6ngUC3h82kTLYElaaE3WFg\nZibB2lpmu1Nm3T5emxgMGh5/vJcvf3mMJ57oZ3zc9562Ot7reXo/5/Wdxj/zzDN86UtfemCf/2GM\nP378OGfOnHlgn3+/xz9Izsh14NF3/K5fAgqFOsViHbVaQJIEPOo0hlaJ9IXX0Rn1zDz3PHq7HfPg\nCDN3EtjEMj2PHURIxNlKNIjeuondasXaFcDk1DF9O8zBA350JgOrm0sUK2VAWQEPDbmZnAyQTlcw\nm7V4PEYuXgzhcCjeIfv2+dFoRFotJVn29OlVVKoGpZJSRLjdBrq7PYpLogra260cPBggm61gMmn4\n7nfvotNJtLWZuXx5kxMnuvjDP9zHCy8ssbKiKHq8XhO3b0cxGCQOHGjjzJk1YjGlIMvnowwPK5Hf\nKysZfvrTJQYGHLjdH+2VkNZsJnDwIP69e5Vtm+2bpdGoYe/eNro8xwn5NKQ3la5EsNuDSpS48uxL\nrBl1PPTEJBa3ndlzC2jVah7e78DSPoK1PUAqUWQrlKTNZ0HSSAQCFi5dCvG1r+1DqxVxOPQYjRJ6\nvUQwaKHVanHzZhiXy0ChUGVpMUWw3UhPt5VUUpFBPvxwB3emoui0Inv3+lChJCyf/LUg616Z2IaE\nztBkPabi2X+5y29qDXgCdj71yX6uXd/CYdciN5oE2y3UKzXuziVo5VKEKiZOPzfFvoeHGRhwsbau\nxm7TIgmQzVbo73dis+kwm7U7suRMpsyNG2EkSbUdt1All6tRqzXJZiu4XAby+QrXrm0higKdnTJ2\nu5V4/GfdpvdiD/4gkU5XqFQaO68FQcXaWhazuUiz2SIazePzmSkUqqhUMqOjXkRR4MyZVer1Jlaj\nyECHl9h6GL/dR7Nex2ER2bg7h9kqYFJVKGZ1VMsWHj3VyWCbzGwohCaW5tpPztM3cQhBZcNoFujs\ncHDqVBe3puIMDDgoFmvcuRNFElXo9WoymSrpjBJSaTVrWVkJYXLaeOm523jsavo6DRzY62Oo34bZ\npObFny5hNqqpN8Fq1XHy5CgajUgqJmPt7Se/vsbtmxvsP9zL6GN7WcoYOHBAhYY6lWSY0HqaYspP\nV5edo0eDOBx67t4t4vMZ6e93/kL5brFYY2UlTTRaxGbT0dNjx27/aDmbFgoFnn/+ef76r//6QU/l\nA+H48eP8+Z//+YOexn3DR//u8R7wflv9ep1In0+mnkthsJrQyjKRy1Os/fRV/OMjJBaWsXidNCU9\nfV1BxHKK6twCapcNZ2cAz+4RNEY9DUHH3N0Q/k4f9UqdlduLdPs1vHQ2QqPRZPduD41Gi9u3o+Ry\nVUZGPMzPJ5mairGxkaOz08rCQhKdTs3FiyEmJ9tpNJo0GiaCQYFKRVnVTk/H6e21Mzzs5vXX1/mn\nf7rFH/3RfjY28gSDVvR6CVFUIYoC8XiJYNDGoUPt2Gw6ZmcT3L0bZc8eL4GAmbm5JMlkGVmWKZcb\nLC9DqRRmcjJAIlEin68Rj5fuSzHyfs7XO40RpF98STva/Vg+/Ulmby6h3VWlHN7gztkbqKp1ksUG\noalZeiYnOGDzcefVK4iSQDJdp6avsJUoc+XcEpY2P6WaivZ2C1/84hjlstJar1abbGzksFp93Lp1\ni1isyN69fpxOPfPzKQRRhcWio9Fo4HIauXxxlYlxL7/71TFcTj03r61z7/oKXUPt5LJlxYBvOMC1\nKyF++uN7WB1WLl/eJJlc5MjDnQwP2DFqYXdQpGPAz7WrWwjZCHM3l9l3cg9Gm4XL51Zxu7SUmhoS\n4TSVUz1shZNshssMDCgEyzt3Yuh0IpHlTerhFew+CznZzNiYl7Nn16nXmzgc+m05p5+pqSitlkyr\n5eb4cdNOMSKKKvr7HTuS2F8W3s/1o9NJb1LkpFJlEgktxWKdCxdC5PN1rFYtf/zH+7lyZZNkssxv\n/MYQdrtiNgYQ30xgMbVjMTTZva+bs68ssDm9js6ow+11MbrXQUevHZtQ4LVvfp9GsUSnf5JCPE3l\n6hWK2g7yhTov3w5z9IlxyjUdUzc2yJWamIxG2oMt9kz4WFtOUq3UGB52sRHK4PLbmZtN4HEb8DjU\nnP7uy2i0asZO7sXd3oWREnqDAUeHH51Ow+uvr6NWC3R22qjZ+hG0Hk4dOEKhoebmep2jR13oKXLp\n1QUsNi39/Q6MBpliVpH1jo56GR39r3lA1WqD8+fXWV39mRv22lqWU6e6sVp/Rmp90JyRZ599locf\nfhiPx/NAPv/DGn/48GFu3rz5jryRj+r83wm/UsXI+0UzvET62llCKwmaLdgz7kUtQrNaQWs2Yw74\nsfnc6K16jCao5gVqxTLTFy8T/PRvoxIEpq/MozFb0JjN7D3Ug92q5l9evcuBR/byhKGHe/NZPvnJ\nftLpMpcubeLzmTCbtbz88hI2mx6rVUtbm5lnn53B4dDx5JMDRCIFjh3rAmRUKvB6TdRqLX7wg2mC\nQSvLy2kaDZlduzz4/Was1tQ20a6Ax2PEbtfhcump15tEIgVu344Qi5VotWRu344yMuKlUKgBinfL\nG/v92Wx1R7LZ1WXbuXF/HNFotEilSsgyOJ16Mk0TuVKdaiyNs7cHmg3KqST1ao1caJ2RJz6F2h3g\n4iv3EAUVgiiiqueo12VklUA8XsTlMtJqyczMxHA49MTjRc6dC/H44/382Z8d4caNMMGghVyuSqFQ\n59q1Leanw3R2OTCatewZ91EpFJGoE9mq4nZokTQiZ15ZRKuVuHl2iVimiddrYfduD7LeRiJRZOrm\nJm6njjvn77Bnog2DxQRaDbN3N2nkSzi7OlCptYwd6OH8a4vUKjXKNZnhXW4W5+I4vFbcbsUvRpIE\nBAGi8yusXbzC1ItTGIwa7O1+Jh8+vi2XlTEaNRSLivS31VJIsCoV2O06Hn64Y6dbFwhYPhbKLJ/P\nRHu7hfV15QFaLjfw+82sr2coFhuEw3m2tnIUizVGRtzcuBHh5s0wPV0WohtJGvUmew90UG2AChlz\nbgqv18iWzUA8ViDY2cJvqqPTCtRbevo+9WlaxSypQpJcTUKfK6Dx1LF4zeiNSZZnQnT0tbFn0EAk\n3cLTZufUIz3cuxvlO9++hdmi4U//j2OMjbdRKzlo73TisanYuDuPUCtSrgqsXJliT4+IU07QrNto\ntdqYnVUsAs6dW+cHP7jH3r1+DAYNY2MejEaBzc08UqvJ8kISd4ePrUiJhVAeg1RHNlp3umZvh1is\nSCiUe9N7iUSJzc38m4qRB41vf/vb/OEf/uGDnsYHhtFoZGxsjEuXLn2sHGTfLX6lipH3o28up1Ik\npu8R8OrQCHZC61kKG2s4OtoRdXoko5F9v/MlkgtziIJIoVzEO9hLUaumXmtRuneF3/39TzK3uotM\nroGnzUqg3UY2U8ZgNnH9tbt0jA1iMGi4dy8OyMiyTK3W5MKFEHNzST7zmSHa2szbq04Zn8+kJMvG\ni5jNWh5/3Em1aiadrqDRiHzmM0NotRKxWBGzWUMuV+PZZ2cIBCzodBL9/Q70egmzWYNWqyaVKhON\nlggGLezZ42N1NUMkUiCfr9Debt32NJFxu/WMjWmo1SxoNAL79/vp7rbdtxvLh+Uz8l8hm1W8VjY3\ncySTZXQ6SeHXaHQsprQs3QthdKoYHezC2QFqowGdQYvXbyVgrtBqtrh9PUO1AY89tYeFtTKOUpPB\nQSdqtUCt1kCWVQSDViqVBjduTLNv3xBtbSa0WgmHQ+DVV1eQZZmZ6STZbIUv/s4E+54epFXKU62r\niCUrpNI1RElkZjaD3WVm36E+spkS8/NhBna1kSlCqSHj8DkQNGo8bXaMXh+jY36iKyEGd/t56ccZ\nFmeWmZtLMjbZz+99/SGS4RSyqIZ6hbVwmL5dbTSaRXK5Km63nrXlBDcXlnA5jBz+jSPMXp2Dcp7E\nzDQ6Yzf9/Q6sVh0//vHiNklai89nwmQqodVK9PS8sxvk/cT7uX50OomjRztYW8sQj5fo7bVz7twU\nL7+cQq8Xt38vG5ubOUwmLZlMhXK+RPTKXQqxBP0DLkzRMFpXL+dvFxmQyowEmuwdmyBbLSCVZSS7\nmavXY4RWEixPLdHZ4+KrXx1HK1eIrW3hnpjg2p0EvXsHGdvXSSlXor1HxWNtVlKpCMvrMj/5yRI9\n/W6CQTM3r2/QN+DmC58fYW5qnTM/nkKlsVNVm6mmkgzv8iCkN3nsySHKkpW51QqGQZF/fz6yIz+f\nn0/SbMocPNhGtdrE5TJw5fIGnV1WLl2NcfnCCmqNGr/fyOJcnFAoi8djfNtjXKs1d6IW/jNKpTen\nOT9In5GNjQ2uX7/Oc88990A+/8Me/wZv5O2KkY/y/N8Ov1LFyPtBrVSiUS6jVos4nQYS8RI1wQCC\nQPDIYeztATYvXaSaz6K12Og59BA1RKr1JpqeESIaP1d+cAHfUB/7joygE1W8/C+vozFZGT8yzL//\nv68xMKnB59NRq9VxOAykUlGSyTJ9fXampqLcuRPj8cd7mZjwMTMTR6tVVjThcAG1WqC3V8XVqyGe\nemqYwUEn58+v09lpxWzWcu7cGplMFVmGjo4CJ050cebMKvF4mcFBFxaLhm996xbJpOKqub6e48SJ\nLkBGrVbUNG/ICtPpCj09dsbGBlGp2Jadmmlre3cSvo8apqairK1lCYWyrK1labVkvF4TW1s5QrEm\npUIFWWwxfTtE78AE3SM9CKKIrDdj6B9FquYYsMncms7REHRMTioqDodDx8ZGHofDQKslY7fruHUr\nisVSYX4+zU9+ssDnPjdKuVwnlVIKSJfPis2moxhPISXL3L0d5sxP7yFp9fhGdpEqyPzayV5m5tNY\njCqMeiOpQp3BsQ4uXQ1TyZbweM2M723H5zGwvJLhhf+YQaLOE091MXagSiFfQRJVaDQC1WKJPQc6\nmbq5ycJKnuG9PsXWO5mjs93EKz9eoTNoIrq0xXrIyPC+HrofOkC9VKS/346uu5dGQ8Zm07J/v59M\npoIoCgiCCo2mhcv10eIFvBeYTBp271Za9plMiXv3Fra7jkqXpL/fSSJRRhRFzGYNHmON5QuzNOsN\njGKVQMCCqtxk9+4xyKvI3H6FxtoWJanJxORerkdUhKMlZElL20AnpWKRS5fDHNo7hrd/P6dfXkRr\nNOJuc/P9793FaNJiMWvxe3LotRnUkpfeHiUrR9VqEo8V0Rm0hCMFzG4HapePtYUwLreLY79+AJvL\nxFqhgK1QR9MMkb0xi28syK4+J9evh9FohO1IBwPlcoN4vMjwsIvZexEmhoxIagGHy0SrKeMN2Onq\ndbK6mmF83Pe2x9Fm02E0qikWf1Z8SJKAx/P20tNfJr7zne/w9NNPfywSet8Njh8/zl/8xV886Gnc\nF/xKFSPvpxpT6/VIOh31krLas1i1JOJNerv60feNkLz0Glu376B3OgnH68zPPMfxr3wKr9NDaayN\nK8/fRGe3c3cdXp+d4beeHmTkyBjx9TBOp44vfOMR4iUtn5j0oFIpe9VqtciVKxvodBJ//Mf7KRRq\nWK06trYKjI56uXMnCoBGI2y7qWoplbIUClWMRg2HDwdZWkpx6dIm0WgJm00RIt2+HcHnM3L4cDu5\nXI1du1x8//vTVKsNDAYNs7OKRbzZrMVi0XL0aJC1tSzDw26Gh900Gi28XiPj416KxTp6vfq+SjXv\nB2fkDZRKdba28tRqTaLR4s5WUzpdZmsrh87lYaTTSbOkSFhzggNbZycATqeRMgZyVZGOIQvnbt3j\nwktLeL0m3G4DTz01hM2mZ31dMd9Sq0UkSWBsbIjXX1/dUb2Mj3vR6yUkScBs1uNyG7HqW4RWEty6\nsko1lUJtUlNYEalZe9mMlHG5jBQW1xDlFhN7hiiVq8TDOSrVBsNj7VQqTV746Rp2q0RsLYIkyKhf\nXcHjt3Hy1ydw2STKNbh4JYLGbCZVVPHIr48RCJhZX4xw9CEv0UgBbS1NKpTB4rKxtFnn3/91iiee\n6OPepWlE68N85hMuDAbF2VSjkbh7N0Y+X8Nu1zE+PvCRMLp6N9dCo9EkHC6QyVQwmzX4/YovSbPZ\nIputoFYLTE7upl5fo9VSpL+zswkmJ9uJRoscezhIK3YPjQSiVk2zqXQ1NeU8Qr1CUe1APXgAIR3F\nJIJ5YITk/Do3bkQolRpUKzV8bj2jahOGtg5++vwSFZ2bPQfaef75eaZuhjBbtNgdBgRZ5vf+YAKP\ny4DdpmErlMVituO26TAY1KTTFV59dZVarcHhU7sZH3Fw+fQ016/MYBbKlNJpdh8Zw9XmJHp9kY6A\nirExL/PzKYJBK1arwgsZGnJhMmkIhwu01Do0QotgmwGVKOD3mzE7FC8RQVC97TF2Og3s29fGrVsR\nisUaOp3E0JCLtrY3E10fFGdElmW+/e1v8/d///cP5PPvx/jDhw9z48aNt+WNfJTn/3b4lSpG3g8M\nTifuXbuITk3RrNUItFtx93YhBIbIbmxRLDcQHV5yFZlcroRWI1DMFXAP7WXm/DQap5uKZGJ+IYvZ\naeHc+U3a201U6laODnYiqiVMkSLpdIW5uQSSJNLfb2dyMoAgqGg0WjQaTV56aZnZ2QSf/exuXC49\nbrdxR3o7NRXDZNLQ3W1ndjaBSgXj437C4aJCTqs1WV5Oo9dLJBIl4nFFeTM87CYaLWE0qkmnlc6I\nIKgolep0ddlYWEhup3uWUKlApVLRbLa2yZcG1OoHqfz+YNBoBLRaxcH2P3NelPAvFfl8ha4uL5Lk\nBsBo/9kNVGnlKwXf9etK0N2RI0G0WglZltnayuH3W3jqqSGq1SbJZInBQSeRSJFqtYXLZSQaLWA0\nBnnyyX7W17NYrVri8RKqfIKaxkSmJOPoCKDTCogaDZ1ddpqCCo9bR1ByQiFFd7DB9FKBow95KFRV\nJDMVpqai2BwGJEnA0e6nVmuQyrWwu2WuXY+xa5ebO3diaLUS1WqTYrHBxYshTp7sJrkRY/Vmivbh\nbuR6nXK1hdHZTja1giwr0vRAt49MRWJrq0BfnwOAzk7FkbVabaDXqz8W3BBQOntXrmwyO5uk0Wgh\nCCp6emzs2uVhakrhT4miivFxD6dO9TAzk0ClUvHkk/07RPF6vUUr2qJYqhOPl8hkqyQTJUYmBxga\nDTC3mCPbNBIpefD5TBTqaprNFqKoXH+iqKXSUGFxmCnVVaQzNbxeI+VynfnFFKWqjFiqE+xQ08xn\nqaZT2L1NnnqimxdeXsXtNZMttLDbdeTzNZLJErdvR3ZSh1fiAo5gB83oCpLFwfXLq3z604PEVjbR\nFhNM7h9jeNiNXq9cux0dVgYGHLRaCnm3Wm0ycrCfM68sImoknH4nWp2aoSH3uzrPQ0Mu/H4ThYJS\njLwhW/8o4PLly5TLZY4ePfqgp/KhwWQy/cryRn6lipH3u1fl27MHk89HOZVC0usx+f00VRJTxSJb\nghXR4oJYHIdNh683gMVhZfbKJdZvh1me3cLd2UawZ5C6oN7+g1dMrmLJKqOjNrw+My+8sIjFoqPZ\nlInHyyQSStjd88/PsbaW5ezZdXp7HXz3u3f40pdGGRvzcONGmHK5yeHDZjo7OymVaoTDhZ297t5e\nO7dvRzCZtAwMOInHS+zd69+5cYVCObxeI5VKnUJBMesymzVMTPjQ6STi8SJut1JwyDLbdtRpZmYS\nrK5mMBrV7Nrl3k6S/fBxPzkjkiQyPOwmlSpjteqoVApotSI+n5lisU4kUkSSRKrVBGaz9y38B4fD\ngNGobF+JokCrJXPu3DrpdJnRUS8dHSKrqxkOHWqnt9fGzZtR4vFN3G4DoqjC41F8Xo4cCTIx4ada\nreP3m1HHF0htxOgb9JJP5VDKJIFyscKRk1045DixuoqW1sK9OzNkFhO0XF3MLjbQu9x0eczkchUi\nkQJjYz4lZdioZnzcRy5XJZ9XHgqjo14WF9OsrKTJZqt43DVMOiuiVcDf4aSht2PQi7QECVu7n84O\nCzafG8nuRq3XUyzWfu54CkiS5n2ft/uBd5pHLFZkbi65E67XaslkMlXOnFkll/uZJ8qrr97i5MkJ\nPv/50TeN1+kkpqfjqId3MXVxHpNJgyxDtSVQtwfRmww89JCV3l47m5sFNjfXaW+3cPhwkOXlNKFQ\nDr/fjNttQKMRKBbrfOITfdy4sUW12kSnU1OpyeyZ8GAVC8zNzZOK6LhwaxOd3c5XvnScWKrJZrRC\noVBjcTFFV5eN1dUMkiQoickqATRqtAY9cqtJtVRjI1whUiwz0mVgfI+ftVCeQMBCIGBhdNS7Q04/\nerSDqakoDoeez31pL/l8DZfLQH+/g64u27s6xqDIiN+OV/agOCPf/OY3+frXv47wC9yZfxmff7/G\nnzx5kpdeeum/LEY+6vP/r/ArVYy8F8iyEipXqTQxmzVY29owt7W96Xs6hjvILg9gdluRywVS65u0\nTY6Ri8apZLIcPtZLOpFDqJXwGBsUtXoGBpyk01Xm5xPMzMS5cmWL8XEvJpOGixc3qNdb2Gw6AgEz\nGxtZjhzpwGpVwrI0GhGrVcff//01HA4Du3e7sdnA6VQC0qamYpw9u4bBoCGVKtPebiGfVyLtC4Ua\n+/cr7qr37sV2VBCHDweJxQrcvRtHkhTXTZVKxdxckocfDiKKIul0GVBuKoJQ3ZFwFos1cjklFdlk\n0uxsR3xc0N/vRKMR8ftNbG7mt91GZQ4daqdebxGJFKjXjUxOdhIMWt8yXqNRipdotMC///s8q6sZ\nmk2ZdLqCx6N0J15+eRmv14QgwK5dbtrb9VSrCtlzczNHpdLg+PEums0W1WoTi2cAfS2NztHJ6dPL\nrC3HcXa00Tfgp9On4af/ss71Cwuomg2sfi3t3g56ehzcSza4fTvCiRNd1GoN9HoNBoN62zBNh0oF\ne/e2odWKLC4mkWWZUqlOLlel0WhRq8vcXc1w9HCAbK7OwG4/S4tJ/C4b/UYLPp+Jy1NJnE4DPT3G\n+7bCbTWbCG8T3/5holis7chx34Asy6yuZt60/SjLsLSUZtcuD/V6k5WVDMvLaVQq6O21k0iI7Hn6\nU6TX19Fotei9PsI1Pel0Bb2+yaVLmyQSJSqVLJnMBlarjuPHu7DZdKyuZlhaSnPnToxEokx/v4PO\nThuCoOLAAR8ejwGfU830qzfo7vdiNUJN1SCzsYWYj3Hzag6Ty4XdrmzjtrWZOXIkSEeHFbtdz717\nMco1CYvZQStXwNNmwyDW8PqtWHr6uXorth182M7goOtNwYZer4lTp5TFilYrvcXI7uOMdDrND3/4\nQ/7qr/7qQU/lQ8cTTzzBN77xDf7yL//yQU/lQ8WD7Ld+Dfjd7f//X8B3f+7r8hsR1R826vUmN2+G\nmZtLUqs1MRjUTEz4GRpy7XxPo9Hk2qVVNLkNNq9eIxtP4xsdxd7dxcv/z79g11Sxex3ktD7WNwto\nXH5Mnb243QaeeeYOyWQZh0NHW5uZUqnBI490s7mZZ24uCSgy0y98YYTeXic3bmyxsZFnZiaG3a7n\n7/7uCgaDwtcoleq0t1s4dqwTtVrg2WdnkWWZ//bfDvOG9DKXq1Is1nf+L8syarXA8LCLer2JLKsQ\nBEV2d/NmmOXlNH19zp3gM0kSkGUZWYZXX1UyaUwmDeVyHYdDTyZTBhQjpt273fT02O9L2JRKpeJ+\nnfNWSyadLm/LUvWoVCoqlQa5nNKqttn0mM1vNQGORAp861u3uHEjzNZWDq1W2laZaBkZ8RCPFwkG\nbWQyFXQ6ke5uG0tLaTIZpQjo63MwMxPHYtEq2SQagVOTNshEiCSqNHQOii0tM/Npjj3k4Z//+kfE\no3mSiTJmiwaz1cSjnxohKXgol+t4vSYsFi2hUAZJUh4ssgyvvLLMww93cuJEF6+/vk44rHBhFhZS\nO1tvogCtRoMr55bYvduNRq1i974eOrvtPP/8AolECafTwGc+M8TRox0f6sMpv7VFfGaGSiaD2e/H\nNTyM3q50o+7XeQ+H87z44hK12s9kqm63gVAo95bY+44OK5/4RB9TU1GuXt3cUYlIkkAwaOb06VX8\nfjOxWJGFhRQul57/7eujzN8Jcf31adR6PTqXh9VIg2DQSrXaIJOpcObMKoGAGbNZS7PZYn4+yZ/9\n2RGsVi1arcStWxEykSSVcIi+bjMLF2/Q12nEYtbiGR6k2TGOWqvh9OkVnn12llZL5umnh+nqstLZ\naWNqKsbp0ytoJJjYZWa4W8/qnSUEq4tk00qgXXFp/vznRxkcO9yAoAAAIABJREFUdPFRwv38e//b\nv/1bLl++zHe+85378vMfJBqNBl6vl6mpKQKBwIOeznvC9nPjFz48HmRn5EXgf27P4RJvLUbuG7a2\n8ty5E9u54eTzSuKt223YWRGm0xUy8zNsXrvB0lIKQVBxb/EyJ37Hjc5sIp+rY2vUcBuz9BxuQxvo\n5vRUjVKpRi5TJh7N0Wo2GR52odEojqC7d7sRBNjaUnxAnE4D6XQZs1nLvXuL9PQ4sNl0PPKIkvSp\nUim+CNWqEqg2O5vYVtFotj0fBLRaifX1OMlkCbfbiMWiweczEQ7nuXUrilotMDLiYdcuFz/5yRJu\ntwm73UC5rPhfNBotHn+8D0kS2NhQvBd0OolXX13B4dCxuZknk6lw8GAAq1VHOl1Gq5Vob3+rG+NH\nGYKgetNqv1Cocv58iM3NPM1mC4tFy0MPte+Exb0Bl8tAR4fCl3jDGrxabVKtNpEkkdFRhewbDFrQ\n6UTm5lLMzyfp6LDi85lYX88yM5Ogo8NKKJSl2ZTR6SS6u4O8PLVErZYkm62iUqmwGCX0Rj25XBKV\noEKr12Jzmth1cJBUWeLs2bWdbTq7XY/FosbtNvIP/3CNhx5qR6+X+F//6yYTEz48HiPFYo1Pf7KH\noEeiki9g8vn5x3+aIldWkcjJ1BpN5n40xze+cZATJ7pptRTZeV+ffacQKcbjFGMxBEnC5PWis735\n+LwbFONxVs+coVZQeA7lZJJSMknPI4+gvo8qB4/HyNCQk5mZBPV6C1FU4febcTj0zM8nd7J11GqB\n3m4z8dUNbl1cplyU0ZgUk79Go0UolGd42MWlS5tcuxZGpYJdA1ZmXrvKvdk05WyeeqFIKpxAdHSw\nvNzk+PFuVlfTuN0GbDY9y8spxsa8TE4GABXXr21x6FCA3bs9VDt0rJxZZvq1K5gMIrNzKfbtb0Pn\nCxAttWjkimSzVex2xY9o1y4XN29GeOmlFQYHnXzhC6PbW3RVJLeTNbnG3XNxGo0cBw8GOHKk/W3/\nXt/guPyqQJZl/uEf/oFvfvObD3oq9wWSJPHYY4/xwgsv8Ad/8AcPejofGh5kMbK2/W8TaLzdN75b\nvNu9qmi0+BZ9fLGo5NK88cBqVcsk5heRWzLVapNKpY7QrDP78lkCo4Ms3bhKcm0Ta4dA94SVss2F\nKEQx61Xo1DJaSWZ02I7TrudWKMbCQpLduz0MDTkZGnLT3m7m3Ll1ZFl5+DcaMnNzCYaHXTgcOlIp\n5aFfKFTp6lK2cIJBC/39DkKhPFqtiNms3SZKqrl5M0ssVmL/fj9LS2nm55OMjnpoteDevTiyDLdu\nRVhf/5lJkU6nWJvXag0kSYPTacBkKjI1lSaVKvNGwqzVqhQlFotiIb6+nvlQipH77TPyBur1Jsmk\n0hVxOpXtlYWF1LZ7agKt1kU2W+X69TBer+lNq2Zla8tNqVTH5dITiRRJJkuMjXk4eNDPmTPrLC+n\nkSQBu72MSmVnaSlFpdLA5dITjRZotWQKhSrNpozJpMFu15NMlpiaiuHxGBFFAVH8/9t78+C4rvPA\n93e7b+/7ikZja2wEuIAbQBKURImiZFGylOh55MyMHHvieF5cfiNHznOSmky9vBfFcXnKlakoVtWU\npXmx68U1kR1viSKF1mKPJWsjJVAkKJIghZUgdnQ3et+X98cFmgQBLgAaG3l/VSqhL/uc8/X9zr33\nu+d8i0AyA5baWowDQeoaXVQ0WRgdV/HhmRBGs459+6qIRKSts/7+IDMzSTo7q6iuthCPZ/nZz3pK\nzsx791Zy/yEvudA0l35zipyQoG73QZTFHEqNBoVKjaKYx+GQqjjPGVlarVhayg/29zPy/vtkE1Km\nVa3Nhu+++5hOJJakg8joaMkQmSM+OUnC78dSU7MkXV7NzeaCUqmgo0M6P5FIGrNZjdstJayzWLQM\nDs6g0YgYdSGE4TOMRtJMX5wmHE5ia2jA4JKcmwUB6uutvPXWMF6vEZtNR0uNyEDXAOmCmanJGBqt\niN4O+VgEq7ea06fHAejrkxITTk3FS/48L/38LCfeG8JkuBtRUcTqMFC/q4nUTIBAPIbTYMa7YytZ\nnRMTSkDN4OAMarWSffsqOXlynAsXAgwOhhgcnOHDD8c4fNjHqVNjBIMpPB4TkcgkAwPSVpXTaWBi\nIkZjo33e+QkEEvT0+JmcjGG369i61bmgEm85/IPW2mfkpZdeQhAEDh06tKz2m8Hn4tFHH+XnP//5\nosbIZpB/MTaCz8hXgH9eywENBtWCY6I4F31RYGBghpg/RCKeIRZNS1k2p/KkwhGi034uTTbRfF8n\nxngMs9tO3eH7mPBnMaouo07H2d/h4SMhh6PCxM9+fg5RJeL3xxkfj3HkiA9RVDI6GiWbLVAsgtUq\nhRqKohKjUcP99zegUl1iZiZFU5MNrzfP6dN+2tu9DA2FueeeWjweAx98ME5PzxR79lTy6KNb+M1v\nhkincwwPh6mrs5QeKvl8kZ4eP42N9nnGSDqdw2zWSE55s5ESVVUWPv54Gq1WqjXicOhQq0WSySy5\nXAGVSskqrayuCsFggvffH5EiWQSorDTS2VnD6GiEQkEK0xRF6c0wEkkTDqfQauenvm9tdZJK5Rga\nCmGz6fB4jOzdW8nJk2NMTMSIxzOcOzeN2ZzkySfr8HpNBINJpqbiuFwG/H4pA+yWLQ5EUUFPjx+v\n18g999TO1oJJMTQUYutWBzWtdeSUGixmLR+duoiocdNzMUg0muHwYR81NWbsdi1Go5qPP55kbCxG\nR4eXt94aIh7PzupNycR4hPd/HWFXi4GGBjuf9EVJTk1Q5xaJZ/RXFWtUYTComJpKIIoKtmxx4HIZ\nyKXTTHZ3lwwRgNTMDFPnziHU1y9JB4XcwneNYrFIMX/zLJ8rRRQVVFebCYWS9PXNcPr0BA6HnuZm\nB7t2VSAIAh8eO0Z4ZASt1Yq31kqwK0zk8ghaiwWl+kqVYINBhVJpQBAEkokMA+eGOfDYIfyTYfxT\nUVRGNWa3mt3tlbz99iUGB0P8m3/TSiyWxes1olAIHDxYzbPf/jUCcGlohv0dHk6fvEwOJVs67sWl\nnKGpsZ5YQc/57mm0WhVbt7p48MEG3ntvBKtVx7lz01itWoxGFSAQCCSJxzM0NNgxmdTkckVyuQIe\nj5nGRhuTkzH6+oKllw+QjJS3377E1JSk30BAmq8PPdS44WrLLJWf/exnPPXUU6uylbxROHr0KF/9\n6ldJJpO3TQ6VtTBGKoAfXXNsHPgccAB4GPjfyjHQrVpj1dVmnE4ppHWO2loLLpee3t4g7747jMWi\nwdfWwJlfn8Tl0mOzqJgS49R37ERhMxPvv8TI2BR1LSn2qJU0NBjZu6eCrjc+YmdrLXfdd5j/9eYl\nvF6L5Pk+W5NmYiLKZz+7HaNRhcOhw+9PkkrluPvu2tlcDtIb3Je/3IHfL2XLDASS/N7vmXC7DRiN\nagwGFW+8McC5c1NMTcV59dU+Ojq8/Pt/v4N8Xtq+0enmG1yCUMTns9HfP8PISGS2yJ6ZqioT//qv\nvSgUsGWLk927W5iYELHZdLhcerZvd9PbG0SvVyGKClQqBbW1C509V1Nfy21TLBb56KNxRkejpWND\nQ2GMRg0mk4aengHi8Rxq9RRVVSYaGmwLfAkAdDoVBw/WsH27i3y+iNWqxe9PMD4ubbcJgsDYWBRB\nkHKPPPbYFrq7J0mnc3R21mCzafH740xNJejqGqOhwUYikSEYTNLR4eXkyXEqKw04HAYaGqw0N9t5\n9dV+NIZKFApIpfK43QZ6e4Ok0zk++SQ467BaSSiUJBZLUVtr4YEH6slmC2g0SuLhOIV8EZfbgDku\nYleryUxPcOSedrbnHRQK0u9oaXGi1YrMzKRwOnVUVpoQBIFMPD7PEJkjGQjQusQ3TlNlJdNqNfnM\nlQgdjdmM1m6/Qaubc6tzIZnM8s47w4yNSaszY2MxxsaiPPhgAxaLFmMqRQJIhcNsra8hm6lmbDSM\nWixS32Rn164Kstk8W7e6OHlyDKNRhT+mwF3lID99icP31zMdzKBSidz16H4CsSIOhx6TScO+fd7Z\nkgRJursnGR+LUuez43brmRn3k09ZOXy4jt+8O85EVEVr61ZefnOUs2cvUFNj4fBhH8Fgkk9/uhmL\nRYtarUSrVeH1mtBolPT2BgHJD0qrVaJUKujqGuPSJQGvV9pmPHNmiomJGCaThro6C9u3u5iais8r\ndAhSGYixseg8Y6QcUVNrmWekt7eXV199leeff37ZfWyGPB0ul4v29naOHTvGE088sebjr0b7tTBG\nJoH7FzleBfw34LeBRd+1/+iP/gjr7B51a2srnZ2dpR86V6Z4OZ8tFi3NzQqMRhBFOx6PAUEI03f2\nY6L+AjX2IuPhKQp6K9sO7SE5NYHRo6Hp7kaMxgrGB8ZIWXU4Kppo3OZDpdUyPHIZqy3DQw/UMJnQ\nMhMfxWxOotOLqFQiIyPDCEIGUTQxNhZhcnKEbdtcOBwWAoEkMMPevTo6O1uxWrVMT4/h9cLOnXWI\nooKJiREgQV2dm1OnxvH7xzAa40xNSQ6MFy/2YbensdkqaWiwlX6vRuNEqRSoqYHJyRH27fPS0VFJ\nPD6NWq0s+YSk034mJ0fR6drZu9dDIDCG3x9l926peqcghNDr4+zfv53qavOKzv+NPpeTaDS94IYL\nMDg4Q0ODbTaPS45UKkc+X+TQodobhihenejr6pcuvV7EbNaQyeRxOHQUCgW8XiP19TZ27/awc2cF\nAwMz/OQn52htdZLLFSgUYOtWF3a7js9/vo1oNItWqyQUklK1ezxGKXQTKdfM3MrNxESMVCqHzaZj\nYGCGw4frEAR4990RLl0KSW/IdRbMFg172l14LHmysQzbtrpIpXPUNBh4YF8bsVgWne5Kpd1rFzvU\nBgMqo3GBQaJ3ua5bjPB6mLxeqjs7mTx7llwyidZioXLvXrTmtfE7mpyMMTERn3csEEgyNibVUNE7\nnST8figWKUyPsLfBxq6dzVTs3IbbYynl2/h3/247jY02hodDDA2FufuJo4QudDP+cQ8V9R5aDu5h\nS1sN+XQSh1XDL4718vJLF0lnpYy1tbUWivk81RUigYkpFIKA06Gl2RKi7j/sYnomy8svf8I771xG\npVIyM5Pivfcu8+CDDSiVCj7zmVb8/iTJZI5EQvJTqqmxkM8X6OysJpvNkU7n8XiMHDxYg1arpK9P\n2rJVqRTk8wXOnJmkWCxit+sWXeGcC4PerHznO9/hy1/+MgbD2hZtXA+efPJJfvjDHy4wRjYr67lN\n838DbuDns58fAVJXf+Fv//Zvr9v4WuvL5/PNe6At9u9X09bWQttsWoFiscjUxxOMnvmY7vcHKAgi\nVbu2k3M3EVN7cPpa2dvuRVnIMvJRN8N6F0O9QygLeoomEdNoDJ/PR6G6mk+6LvDuS+9jdNqx2yqJ\nRfsJBKNcupTHbNZTW2vho48m0OtVmM1KisUolZVGMpkK2ttrqKoyz94sPCiVCgwG9YI9uFQqh0bj\nxOGwEgj4Z53X9FitHqqqTDQ22mczukYRRYGWFietrU7i8QzDw2Gi0QwWi5OhoRBjY9KqgUYjedp3\ndZ3nyScP8ZnPHGR6Oo5CIXD//T4EQUCjUaJWi7d0fm/l8430dT2Wsh+pUikXTdxWKBTx+xMcPuxj\nZGQYsOLxGJe0PG2366msNDEwMIPBMJfZM0o6neenP+1BqxUJBlNEoxnuvbcWr9dEY6OjFEZtMKhR\nKgXsdj1HjjRw7twk58756e6WnI6l6rJh4nEDyWQOUVRQUWFAqxXZscOA0aghEEgwPh5nYCAord4c\n8DI1EUZJgccea2ZbjUDk/CkoFplKpjAbHUSVDs6fn6a21loyRBZD1Gjw7NrFyPvvS/4egoDObse1\nbduS94QFQcDZ2oqlrk7KdGw2o1Qt3CpdKrcqRzZbWLTY41yUTcJsRme3kwwGKRYK5BNxqtp24PTO\nd9a12XQ8+GADg4MhzpyZYHgqTm+4DoXRxeCEkpHTk2hdYzBwEoVSxd6tbtTFFEm0NDRJK1Bjo2Gq\nKw04zQpaWpzUGqOoDSbqt1eSPD2JIIRKfkuiqCAUSpFIZNFolJjNWsxmKQX76dMTpfD+HTvc+HxW\nXnutj1gsQ39/kN7eQUIhLY2NNurqLKVt6HC4yOBgiNpayRE+Gr2yWqXVilRUzH+IbyafkWAwyD/8\nwz/wxhtvLLltOcZf6/ZPPPEEf/zHf0w4HMZiubJavVnkv5b1NEa+so5jzyM+Pc1EdzfFTAq7XcfI\n5QhDH5xm62MVDEW0VFTYsVZID+tiLRAcplqjQqtxkcgpOXFidNb5Uw2WCgy1jfQPhtjuUXLo3jq6\nuqT6EPv2VRGLpclmC9hsOtxuPZcuhWludlBTY8Fu1xEOpzh1aqIUmtnc7MBkmr+37vWa6Onxo9NJ\n6ZfD4RQmk4a2Nje1tVIOg3vvrSMWy6BSKUoPHatVh9UqPXADgQSnT09c95zY7bpVTQW/Fuh0Klpb\nnZw4cSVUUwp5dnH27CSZTAGVSoHBoGVmJjW7EnFrKBQC+/dXodOJjIxEZhPJGenuTuLzWbFatWg0\nIpcvhxkbi1Fba8bl0pPPX3nzFARpezCTyRMOZzh2TAqvdbsNHDlSjyAYiUalNi0tUgrvqak4RqOa\ndDqPw6HH7TYQCCSIhRN49ClqGpVQBFV8kvRInMrduykUigT7RomKVfSei5HNRujrm+HIEV9pPiyG\nrb4ejdlMwu9HUCoxVlSgMZmYjsev2+ZGqHS6VY2euR52u25BDRWNRonLJT14dVYrngcfJDYxQT6b\nxeByYfQsXpdFEATGx6MYjVLhyw9PTpLJ5Nm924NGI3LxYoAtdhfZ8UHUyRkaXXY0TgMZtRqdTuTg\nXXUUD1SjETLY9EX0Rg1GjwdBocBs1uB2GwiHi6Wkc4LAbPXtK5FgFRVGPvWpxpKRMucbVlVl5gc/\n6MbvT2C1qsnl1Jw/P80XvrALg0E1b1vabNZw1101nDo1TjSaQa9X0dbmXuDAupl44YUXePzxx3E6\nN1YI82phs9k4evQoP/jBD/jDP/zD9RZnxWwEB9aysVxrLptMUixIDwmv10Qumyc4kyIXCdHSsp2d\nOysA+OQTPy+90k9vbwCDQcXOnSI2m4ZIJE0olMJoVKPTq8iLWkIJgXfeG+XQoRq++MXd9PUFCAZT\nDA+HsNm0tLRITnEWi/SmY7frKBSKfPjhGAMDMyXZTp8e57775v+u6moze/Z4uHDBj1qtpKrKTHt7\n5YKwVKNRfd3fbLfrqKoylfacQUqV3t6+dVnncDmsts8ISFsher0UjaBUCtTX23A69Vy+HJ7dwrHN\n1uERF9TUuBlms4a7764lmcwiigoGB0OMjg7N+06xCJFIGrVaZP/+Kj74YJSZmSSiqKChQcqi29sb\n4OLFACqVsrSN8/HHk+zcWcuWLU5GRyMMDc3wzjvD2Gw6OjurgUypj7GxKIp0jIlzA6UNz7YdbhSK\nJLlUCk3DdqbOCcSDVx7Gfn+C4eHwDY0RkMol6B2OecfK4UdQDm5VDodDz4EDUoRLLJadzVDrprLS\nOK+fWw1bNhrVTEzEZrP7avD5rKhUCn79v4Kko8M8dLiSnQ1uJi+dZ3L4IhVNNajbDjE6GiUez/LI\nI02lHDFXU1FhoKGhAYUixPR0nEQiS0uLk/b2ytKK5BwKhbDg+jYa1bOZchVEo3qqqvT4fBZisTSF\nQoFstoAgQEODDb1eTV2dlAYgFpOMkWv9zK4+NythLXxGEokEzz33HK+99tqm9ZlYTvunn36aL33p\nSzz11FOlTLObSf6rua2MkaWSDAaZGRwkcvkySrUarcUC09M0NTtIJnP49tRStbMWhUIgGExy4v1L\npGMxtGqBSCTN8eOX+fSnm1Grr2wHeDxGqqstnDo1QbEIb711mUOHati1y1NK4+zzWTGbpVolcyG6\nIBVxm6s5AWA2q1EqFRw71svWrVI4cE2NGYNBw549km9IKiVFxCx2I7kRgiCwb58XrVbk8uUIKpUU\nSdHQsDKnwo2GUik9sK9N9z6XCtvvT5SquHo8xuv0cmPmzr3ZrEajUZJOX1lhkbZipAe+x2Pk4Ycb\nmZlJoSjmMWqLCMU8o6NRWpqt2FUxwsMjxBNZtJ5qCoUCUJwtyCcVPZucjHP+/BQPPdRIba0Vl0vP\nyEiEczNhFKKIgiK77msjEIWP++P4dgo02hZf9YlEMguO3a40Ndnxeo2lVYDFEtzdKrW10gO+rVmP\nmE/h8Jp46aWLKACTtkgwkOT4xBgtjR6Kl/yoDAZyOclKjMezpRw112IyabjvvjoqK43E41LxzMZG\n+7wVykKhyODgDL29AeKBEFUuJV6bgKXCgdEoFcELhVIUi6DXS3VyamosZDI5VColDQ02tm1zlfrT\naMQbbtdtFl544QUOHjzIzp0711uUNeXuu+/GbDbzT//0T5ved2Tzz8KrWMpeVTIUYvDXvyYZCFDI\n5wn29WHyerH6fCQDAdyNtVQ0+VAopCyBY30j9L93kmQsRSFRxFVjJRC14Pcn2LmzorSMKi3ZVpBK\nZZmaSuB26zGbtYyMRDh6tJHJyQSffOJnejqBViuyfburtFysUAglx0hRVKBQKHjjjX5aWkT6+5W8\n8UY/1dVmDh/2sXWrc7YmxPLPl9Go4eDBGnbvzqJUCqjV4prWHVmrPCOL4XJJWyEXL/bR1NRYljT3\niYSfHTvcnD07RTqdR6VS0NwsPQTnUKmUKGdGmDx7lvFUCn1FBTplFSffHqTnxEVsDj1eh5Kxd39D\n5+8eIRQyzTM0q6pMZDJSnSG9XsoJcvBgNZWVRvo8IjazSM+FAJfO9qPS6ckNp5hOjKDTxclkrjzU\nBIEF/gG3ymapTXMter26VIl4Rf0osziivUxP9+Ar5hBQU+PVkyeK3ewgn00TDcVRGbyYHFbMjVu4\nFJBq4czVqRkZiZDJ5DCZNDid+lIYajQ6hc/n4MyZSQYGZggEkrS1uUshuf39QalG0sg4wb5+uily\n4N4m6ky9uNvaaGiwcfFigHTaj0LhxG7Xs29fVSmdwVINj83gM5JMJvnrv/5rjh07tqz2Kx1/PdsL\ngsC3vvUtnnrqKX7rt34LtXqhj+Fqjl/O9reVMbIUIiMjJANSanaFUomtvp50LCalqm5txVJbW8rC\nGB0dJTU5CrkMolDAoMxSiExT6XbT0uJk927J2XR8PMo77wyTyxXQ6aSiebFYhmJRqnHhdBqoqDDi\n8RhIJLKYTJpSdV4Aq1VLba2Fnh4/ZrOGixf92O160ukEp04NA5SSd4mi5PtQDpa6qnK7IAgCWq2q\nbPV2FAqBPXsqZ2sGpdHrVbjdxnn9R8fHGTl+nHwmg6BQ4I8KvPHKm4STEA9HCU7OwLYqDnTUYhRi\nCBYtIGXGTSSy9PcHKRSK+HxWzpyZpLOzmvp6G1u3umhutNB7bpSZ9y5j9fnQ2myo9XqSyRwWi4hC\nIfnGqNVKGhqsZQvRvtOYOH2a+PAAJk2BtKFIRohTZc5StFgoFg04PTbyNi2+tka82xr55HIOyOJ2\n69m61ck771xmcDBELldAr1exe3cFO3ZIW8GxWIYPPrhEKCQZL+FwmpmZJA8/3ITFouXCBT/pVJbo\n+DjFfJ48cLFniuq7HAQuXqTjyEM4nXp6elLU1npoaLBtet+vm/HCCy+wf/9+du/evd6irAsPPfQQ\nLS0tfPOb3+Qb3/jGeouzbG4rY2Qp1lj2Gic8pVqN3m7H5PViqqllZCTCxPkRjEY1msAwmkyIxhYP\nF86MoNWqIA/bWqzs3u1Bp5OWQ0+dmmBmRgoIcjr17N1bicWioabGjNNpKKVcvp5vgiAI7N1biU6n\nIhRKlpJQdXdnEYQcer0KtVpJMJji448naW11lj2xz1q+8a6Fz8hy+8vnC4yORhgfj2EyqdFoRNRq\nJWaz5rrhv3N9VVQYqahYfMsnOjZWyrehMhjoG4mSDocwWZ2EkAzDdCyOu64ep1jE0mBjYGCGaDTD\n1FS8lJo/lysQjWY4dWoCj8eITqdCVKvR2aw0dmwnGs3Mq8lSV+ejrs5COJwuOW8uNwX4RlgVgfLJ\nsZR+0pEIkdFRQPL3ymYT5HR5rPoiGb0Dp0vShWerF6XFTjKYZMcODW63EaNRzfR0nIGBmZJDdSKR\npbt7Eq/XTDabJ502MjZ2ad4KTjSamc0ToiaTyVPI5ShkrySSy2fzFIoK8qk4KmWR7dvdbN/uXvNz\nU64+lvL9YDDIt771LX71q18te7yVjL9R2v/d3/0d7e3t7Nq1a8XbNbLPyBpjcLsRlMp5WSDVRiMa\ns5nu7klOn56YXeEQ8QpRxMnL7NrSgMfTysRkDJtNz5776rBapQdTPC5VzwXJEIlG05w+PYHBoOKx\nx7bgdN7akrjBoKajw0s2m8du1zE0JL0VG41qAoEkkUiKmZkUoigwOhqmunrptUJkbs65c9OcPDmG\nxaLh8uUIAwMz1NRIZdj37auiqWl5vjXX5ugoFqQwWp1RquQMoNVpUKqUmGsqcbkMPPBAPQMDIXK5\nPDt2uNHpxFL+lGg0XQrPvHgxwLlzUwwPh6mvt6HXqwiFUmg0Stxuw7xoKpnlISiVCLOOgoIgSFus\nyhh1v91KylJHOJLBbFYTDKZ4993LKJUKXC49Fy4EEASBeDyDw6EjHE6XfIt0OpHjxy8TDCYRRSVn\nz06XahvNUSyCKCqpq7MSCCTRmE3kklKYeGWNDXUhgb6iApV+daotb1SeeeYZPvvZz9I2l6fhDsXj\n8fDKK6/wyCOPcPHiRf70T/8UVRnC59eS26c6EktLnGWuqcHd1oZKr0dQKtGYzVR2dJBR6rl40V9K\n/pNM5sgbnYRTCrJTo7iKk+ypzWOzJamovvJA0mpF9HoRk0mN35/gV78aZGBghtHRKO+/PzIvauVW\nUKmUbN3qoq7OTGOjglyuQCCQwGzWolQK1NZa6eqaIBZsVu52AAATMUlEQVQrrxPiaiQfK+dY5ZZv\nsf6i0TTnz08DkE7n6eoaIxBIMjEh1Rfp6hojEkktaHcrspmrqkrbf9l4nNoaE+ZKN6bKSjRGI0ql\nkqqGCjxVdiKzhovbbaSzs5r9+6vIZPLzErlJ805Fd/cEH344SjyepVAo8u67wygU4HRKNW1SKf8y\nzs7irOUcuRHlkmMp/agNBuyNjfOy3gmAp8qGy53jwQcbyOfnnI6lsNwPPhjltdf6GR2VjNq5ek/A\n7BaewIULARKJHNlsAItFw/BwmERCuraNRnXJv2frVietrQ4qWxtw1HrYsrOGtlYLKoMBz65dpZXS\n9Tg35erjVr9/+vRpfvSjHy3YmlipzJu1/Z49ezhx4gTd3d34fD7+7M/+jLfeeotsNnvzxmUYf6Xt\n79iVEaUoUrVvH/aGBnKpFGqzGa3ZzMREbF40BMBkVKSl8x6M6XHC00HMHi8quxVRe2W5Xq0WaWur\n4Pz5abq6xigUimg0SrxeE4WCVASvudm+pKVxnU7F3r1eBCGC2ZzGbNaiUgmzKbyVBINJwuHUDUN4\nZZZOMpkjEkmjUAhcvhwuZapMJKSLOhbLEIlk5mVkvVUMLhd1993HdE8P6VAIT72dxxpb6B2M4fA6\nsJuV7N7tobLWxaXh4Xlta2ut9PfPlLZfVCqpiF+hUGBwMFSS0+UyYLFoyeeLfPrTzej1aoaGlpcb\nRGYh7rY2lBoNM/39CAoFji1bsDc1ER8ZASjl8xAEqS7UpUthlEqBbLaA3a5nejpBNJpGrVai04lk\ns3n0euktNpHIcvfdtfT0TKPRiNjtWrZtc2M2S9E/BoOaQ4fq2LHDTTazDb2QgnwOndOJqL5z7gOp\nVIovfOEL/M3f/M0dk1fkVqirq+Pb3/420WiUn/zkJ/zJn/wJAwMDPP7443z5y1+ms7NzvUW8LutZ\nSeg/AP8R0AD/A/j+Nf9eLK5DRbZEQko+FQxeefMtFouzxbKKJOJplKKSpiZHqYDW1QwNzfDTn/YQ\nDqewWLSlm4jTqeO3f7tl0ZC+W+H06QkGBoKAQDKZLeXGeOSRplveAtroCIIUubSeFItFenuD/PjH\nZwkEktjtOrq6xjGZ1NTWWvD5rGi1Ig8/3ITbvbLzXsjlSts2mUyOTKaAwaC6oR9QMJhkdDRCJpOn\nosKI12siHE5x7FjvvKReIEVuPP54a8lBeqOyEfS+HIqFAgjCAn199NE4XV1jKBQCFouGl166iNGo\nZscON2q1klQqx7ZtThwOPXa7jmAwyYcfjpXai6ICq1WqIzMxIfkJmc0a9u+vuq4v0mZkJXp/+umn\nGR0d5ac//eltXRCvHIyMjPDjH/+Y73znO+zcuZPvfve7VFdXr4sss7paVGHruU3zInAfcBfwn9ZR\njnno9Wra270lI0IUFTidepLJLH19M4yNJ7h8OcoHH4zi9y9826yrs9LR4aWmxlLqQxCgvt62bEME\npERnuVwBvz9BPJ5FoRBoarLjcNxZe8SrzcREjK6uMVpbXYiiAr1ehcWinl0qlyqvNjbacLlWft6v\n9h9Rq0WMRvVNb6x2u462tgra271UV5tRKASsVu2CHCmCAI2N9g1viGxmBIViUX01NdmprbUgCNJ2\na22tmbo6C2q1dP1brVp27HCzZ4+UqLC+3jovw2o+LyUnk5LoSYnSxsdjnDgxSiazsALyncYLL7zA\n66+/zve+9z3ZELkFqqur+frXv84nn3zCgQMH2Lt3Ly+//PJ6i7WA9TRG5q4qDVCWNeRy7ZPW19t4\n5JEmjh5t5OjRRrZtc5VquMwRDI7PWz2ZYy4iprnZjsGgwmSS3ohaW5e/lDg0NITTqeeBBxro6PCy\nbZuT++/30d5eWfaL8U73GZmaihOLZUgkstx1Vw1btjj4vd/bzb/9t9vZu9fD4cM+Ojq8i573tfBn\nWQxBEOjo8NLYaCvNuZ07K9iy5crKXTllu5N9Rm6lH7NZw5EjPh5+uIn29kq++MU9bNvmQq9X4XTq\n6eysnpd23WrVcf/9Pg4cqMLjyXL4sA+Xy7Dg/hIMJhe956zFb1rLPm70/RdffJFnnnmGV155pVRE\ndaXj3SntNRoNf/7nf87LL7/MV77yFZ577rk1Hf9mrLfPyP8D/AHw5+XuOJfLEwymEEUFNpt2yQ9t\nKaGY5BMQjfrnVbhUKgUUCq6bn8Js1nD4sI9wOI1CwbJ8CxbD4dDLKyFlIpcrMDMj+dwUi8XS/JhL\nCjVXzRekVYajR6WMpxsVi0XLkSP1hELSnF9JhlGZlaNWi6VEZQA+n4VQKEU+X0SnW3jbtdl02Gw6\nLJYUPp+D0dHIgu8olULZcuJsRp5//nn+6q/+il/+8pc0NTWttziblgMHDvDee+/x6KOP0tfXx7PP\nPotSufxV+3KxFmtcFcCPrjk2ATw5+7ca+BVS1d7YVd9Zts/I9HScEydGCASSKJUKfD4L7e3eZSf3\nikTSvPFGP+m05GiWSuWwWrXs3+/FbpeNg3KxVr4DwWCCEydGmZ5OoFAI1NSY6ejwYjCoCYdT/PKX\nAwQCydL3PR4jDzxQj8GwsR0EM5kcExNSTROrVcptsRm2aTarz8hSGB+XtnbnjMWmJjt79njm1ZzJ\n5wtMTsaJRNLk8wX6+2dK5SEEAVpbndxzT+1tszVxq3pPpVI8/fTTvPPOO7z00ks0NzevgXS3P6FQ\niCeeeAKj0ciLL76IwbD6voc38hlZz1mtBjKzMrwJPAZcvRdS/NrXvlZaimttbaWzs7OUUGVuKeja\nz9XVNbz2Wj8DA4MAaDTO2eJQAo2N9pu2v97n7u4L9PcH6epKUCwKOBxpvF4jjz66H4tFu+T+5M8L\nP9fX16/6Q6lQKPKrXw0wOBiad7y9vZL2di8gRUN88kkAvz+Bx2OkudmOzbax83Mkk1nee+8yQ0Mh\n8nkpkmvHDjd795Z/K6/c3O7GSDqd49ix3nkh2YIA995bR0uLtH2bzxfo6hrj/PlpstkCoqjA7dbj\ndErRNz6flaYm+22VLflW9H7ixAl+//d/nx07dvC9730Pk2nzVhXeiGQyGf7gD/6Anp4e/uVf/gXP\ndapVl4uNaoz8BXAYyWfkR8C1G1hLXhkZGhrCaHTxi1/0kUzOd/SqqDDw+OOtyxY2Hs/wyiufEA5L\naZrTaT8ajXPeQ2y1GNrg9WLKNdat3JxWKt/MTHJe5MmcHp1OHY8/3rrsrKTlkG0l/fX2BnjzzaF5\n24k6ncjRo1LUTzllK/fvXK4xUi45Vruf8fEor77aRzZbmHe8vt7Kpz7VCMDYWJTXXusjFptCo5EM\nFKVS4IEHGvD5lr49uJHOzfX6uJ7eh4aGUCgU/OVf/iXHjh3jueee47Of/ewtG9UrlflOa18sFvnG\nN77BCy+8wPPPP8/OnTtXbfyNGk3zl8D9SNE0i3vSLJHjx48jispFHygrfaOIRjPzDJze3m6AeVV2\nV4vjx4+v+hibZayVyqdSzZ8fc3rUaMQVb2mU+9wtpb9AIMm19/VkMkc8nllyX+WUazUplxyr3Y8o\nKhb19dBqr2zRRKNpstlCaT6ClKNkLqtzuWRZj35utY98Ps+bb77JU089xZ49e6isrKSnp4ff+Z3f\nWdLq3kplvtPaC4LAX/zFX/CP//iPfP3rX+dLX/oSx48fX/Zq5XLlv628oS5cuIDVqqWhwXp1gkQ0\nGiUtLQtzgiwFg0E17+YxONgHMC8kb7W4cOHCqo+xWcZaqXxGo5rm5ishr4ODfahUirLU+Sn3uVtK\nf3NlCa5Go1GWjPByyraWc+RGlEuO1e7H4dAvKEqo04k0NtpKnw0GNaKoKN1XQCq8OJceoFyyrEc/\nN+qjUCjw93//93zuc5+joqKCr33ta+TzeXp7e/nmN7953YiZ5Y4nt78+hw4d4ty5c+h0Oj7/+c+z\nZcsWvvrVr/L973+f119/nVOnTjEwMMDk5CTxeJxCobBoP8sdf72jaVaFPXsqMZs1DA6G0GrFUtz/\nSjCZNLS1uenqGitlaHU69TQ3L69Gicz60dbmxmBQMzAwg9Wq5fBh37KWwjcSNTVmamrMjIxEZuuY\nKGhpcaw4MZvMylEoBPbvr8Ju13HpUgiDQU1zs2NewUyPx0Bzs71kECsUAj6fFa/39vaRUCgUHD9+\nnCNHjvDtb3+bmpoannnmGex2+b66Hmg0Gvbt28crr7zCyZMnefvtt3nzzTcZHx9namqKSCRCPB4n\nFouRSqXQ6/UYDAaMRmPp/xMTE3R3d5c+Hzp0iN/93d+96di3lTESCklOiRqNyLZtbrZtK0/lyjm2\nbXNht+uYmorz/vsKHnywYdlvLkth7netBRt9rHLIp1aLtLY6aW118vrrCurrbTdvdAuU+9wtpT+D\nQc1999UxNhYjFstgt2uprDSVVoDKKdtazpEbUS451qIfnU5FW1sFbW0Vi/67KCo5cKAKt1vJ/v1V\nmM0avF7TvNXYcsmy1v3crI/vfve7ZR1Tbr/y9lLuog46Ojqu+718Pk8ikSgZJ/F4nHg8zrPPPsvn\nPve50jGXy3VL425kN/s3kTK0ytw5hIGVLWHJbEbigLyEc+chX+93Hm8hBa7IyMjIyMjIyMjIyMjI\nyMjIyMjIXJf96y2AzKog6/XOQtb35uWO1N1G9hlZDlpgeYH5S0cDpFeh3w7gIGAFQsD7QNcqjLNY\nWLcAvAY8uArj7UAqjnh13FcnsJSgdCOSjNGbfXEJlHvOLHdelEvvq6HXcuhujj1Iv28Q+BRSJuZf\nAIvHCV6fcs+FcsyD5eh+pXovp77Lpefl6rhcOl2pLpeix5Xorxy6W6nOynU9zvEU8N+X03CzGiNP\nAn+MpIR/Br4NFIFfIyVSWwteBx4qc59/izQZfskV564HkH7n18o8VpLFJ+wuoNxxdX8DuIEs4AK+\nBExxc319CfhPSA6O3wf+d6SL5GcsPVHeWs2Z5cyLcuq93Hpdru4W47tIN3od0sMiCkSAauCLN2lb\nrrmwmvNgqbovh97Lpe9y6XkpOl6pTldLl7eqx5Xqb6W6W6nOVnI9AryNdL6vtiO2A2eBe2+h/W3B\n+0hhyQLwfwAvATYkJZSbt6/z38wqjPWbJR5fCR8hWfPX8stVGOvtq/7eieRRvY+b6+s40tuDDriM\ndOELwHvLkKHcc6ac86Kcei+3Xperu8W4+vd8fNXfb91C23LNhXLMg3Lpvhx6L5e+y6Xnpeh4pTpd\nqS5XqseV6m+lulupzlZyPQL8n8D/x3zD5xe32HYBmznPyFxu9u8iKfVfkKzEcuNEslQz1xx/YxXG\nOgn8DyTLPAqYkSztj1ZhrEeRLPNreXgVxlJwpTDiGeAzwP9EsqJvRBrpTSkJ/L9c0cFyt8fKOWfK\nOS/Kqfdy63W5uluMq+uU/19X/X0reafLORdWOg/Kpfty6L1c+i6Xnpei43LodCW6XKkeV6q/lepu\npTpbyfUI8CzSysp/BL4CvMgKdluUN//KhkQJ+JGWxgBGkSZELfBKmccaACZZuAd5Hhgv81ivIf2W\nLUA90u/8ZyTrs9zEgPwix5e7V3gjziHdYOKzn5PAPwLDSEt616Mw27bAFWtdDdRw69b7HOWeM+Wc\nF+XUe7n1ulzdLcYJpLfOAnBx9pgamL7q8/Uo11woxzwol+7Lofdy6btcel6Kjleq05XqcqV6XKn+\nVqq7lepsJdfjHHngQ6RVqU6kLaPVWF3fNLy4hmP9cA3HkplPOc99ueeMPC/WlnKd73LMA1n35WGl\n53GlupT1uI7cLoXyKtdwLM8ajiUzn3Ke+3LPGXlerC3lOt/lmAey7svDSs/jSnUp63EduV2MERkZ\nGRkZGZlNimyMyMjIyMjIyKwrsjEiIyMjIyMjI1MGFq/LvfnHkplPOc99ufUoz4u1pVznuxz9yLov\nDys9j+vdXkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGZtPyMFIp6F7gP6+zLDJr\nw/eRUlJ/fLMvytw21CAVMzuHlLb76fUVR2aN0CKlYD+NlHL+v66vODIyi6ME+gAfoEKasFvXUyCZ\nNeEQsAfZGLmT8AC7Z/82ItUBka/1OwP97P9FpKrF96yjLBsKOc/IxmE/kjEyhFRs6EfA4+spkMya\nsJzS8zKbmwmklw2QiqX1AN71E0dmDUnM/l+N9AIaXEdZNhSyMbJxqAIuX/V5ZPaYjIzM7YsPaWXs\nxDrLIbM2KJAM0Umkrbrz6yvOxkE2RjYOxfUWQEZGZk0xAj8Fvoa0QiJz+1NA2qKrBu4FDq+rNBsI\n2RjZOIwiObbNUYO0OiIjI3P7oQJ+BvxP4J/XWRaZtScM/CvQsd6CyMhciwj0Iy3bqpEdWO8kfMgO\nrHcSAvAD4Nn1FkRmTXEC1tm/dcBvgAfWTxwZmevzCJJnfR/wX9ZZFpm14YfAGJBG8hn6/fUVR2YN\nuAdpuf40cGr2v4fXVSKZtaAN+AhJ72eAP11fcWRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRk\nZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGTuIP5/xRHll/wr\nALgAAAAASUVORK5CYII=\n", + "png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAImCAYAAACB54oCAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXd4HPd57/uZme2LbcCid5AgCPZeVCja6qIKrViWbcmJ\njhLLshM7Tuyce+95ro99zuPclOPYThxbyoks+cgy1TslFlFi7yBAggQBEHXRge29zOzM/QMURIqU\nREkESUv7eR4+WM7sb/admd2Z77y/twiappEjR44cOXLkyHG5EC+3ATly5MiRI0eOzzc5MZIjR44c\nOXLkuKzkxEiOHDly5MiR47KSEyM5cuTIkSNHjstKTozkyJEjR44cOS4rOTGSI0eOHDly5LisTKsY\nEQThF4Ig7BIE4ZfvW24SBOG3giC8LQjCv06nDTly5MiRI0eOK5tpEyOCICwBrJqmrQEMgiAsO2P1\n94A/aJp2vaZpfz1dNuTIkSNHjhw5rnym0zOyEth6+vU2YPUZ664D7hQEYbsgCHdMow05cuTIkSNH\njiuc6RQjTiB6+nX49P/fZQawEVgH/EgQBGka7ciRI0eOHDlyXMHopnHbYcB++rUDCL1v3U5N02RB\nELqBYmDkzMGCIOTq1OfIkSNHjhyfITRNE863fDrFyH7gW8DzwPXAE2es2wcsFAShBagBJs63gVzf\nnM8XgiBc9nO+ceNm9u8fpapq/pQ9Hk8r11xTwW233XTZ7Orp6WHfviP4fGFmzCjnqqtW4Ha7L5s9\nF5Mr4bxfarLZLEePHuPQoeMoSpaFC2exfPlSzGbzR47ds2cfGzcep6ZmMaIooWkaw8OdNDQYue++\nL18C6y8On8fz/knQNI329nb27z9KNJpg9uwaVq9ejsPhuKDxiqLwr//6GOl0OW53GQCZTIrh4SP8\nxV+sY8aMGdNp/lkIwnl1CDCN0zSaprUAKUEQdgGKpmlNgiD82+nV/wT8PbAH+E9N05TpsiNHjgsl\nlUpx6FA7FRVzpn40giBQUTGHgwfbyGQyl8Wuw4eP8NhjGxkasiCKs2hpifGb32xgbGzsstiT49Oh\naRovvPAazz9/mHi8FEWpYcuWbh5//GnS6fRHjt216whlZXMQxcnZbUEQKC9v4OTJIUKh0IeOz/HH\nxzvv7OTJJ9/B53MhCPXs2zfBI488dcHnur+/n0BAmBIiAAaDCZutlv37W6bL7I/NtKb2apr2fU3T\n1rybMaNp2vdO/x3TNO1mTdNWa5r2xIdvJUeOS0MqlULTdCSTKUZHRwkEAqiqhk6nR1UlUqnUJbcp\nnU6zadMeysqWUlBQitmcR2npDESxim3bdl9ye3J8egYHBzl6dIja2qXY7QVYrQ6qq+czMqJx4kTb\n1PsmPR7DtLe3TwnPbDZLIpHGZLKctU1BEJAkE4lE4pLuS47pJRwOs337Uaqrl+NyFWE251FR0UAy\nWcDevQc/dKyqqng8Hk6cOMH5Ll1mcx6BQGSaLP/4TOc0TY4cf1RYLBYG+zs4fmAAh8GMomlIdjsN\n8+dit+vJy8u75DZNTEwgy0aMxrPd9253OR0dO9A07UNdnzmuPAYGhtDpCs45bzZbCe3tvSxduoR4\nPM4rzzxDsLcXqygSVVVKGxu58557qKgoIhicwOUqmhory2kkKUVBQcGl3p0c08ikCLUjSWffqgsK\nyjl5so11684/zu/389Lvf4/i9ZJOJGg95CGT0jN3/gJEcfJ7FwqNs2pV5TTvwYWTEyM5cpzmSFMT\nlboUmpjAaarBYrQx5B9h3642/vFnP0QUL33BYqPRiKbJ5yyX5TRmsyknRP4IsVhMqOq5U36ZTAqb\nbdLjsfWNN8DjYXV19dT64+3t7Hz7bW6++Voee+xVVDWL01lIIhHF6+3gjjtWYjQaL9l+5Jh+DAYD\nmna+70oSq9V03jGapvHyH/5AUTxOxenvTyKWZmfzNgxGPTPrZzExMYheP8GqVTdPq/0fh1w5+Bw5\nTtOyZw83LFrAnVfXYjEN4Y8cobIwwpIZFoqKCi+LTUVFRVRW2pmY8Ewt0zSNkZEOrrlm0WWxKcen\no76+Hr0+TDz+notcljOkUsMsXjyPWCxGf2srs8rKzho3u7yctkOHqKio4KGHvkRxcZTx8b0YDB7u\nu+86rrpq1aXelRzTTFVVFS6XRjD4Xo6Hqmbx+bq56qrF5x0zPDxMZnycisL3rlnXL5nPumWFeLo3\nMzGxjzlz9Dz88NdxuVzTvg8XSs4zkiPHaRKxGGa7nZqSEmpKSqaWNw0OXpZ4kXf5ylfu5Pe/fwGP\nZxxBMKNpERYtqvjQm48sywwNDaGqKhUVFbkn5iuARCLByMgIOp2Or371Zp57bit+vwWQEIQQd9yx\nmqqqKvx+P3pBOMcTp9fpEBQFWZaprq7mwQerz/9BOT4zSJLE/fd/iSeffAmPZwBBMKJpIa69tpEF\nC+af8/5EIkFnZyeRcJisqiKd/g7pJIlls2dBcSHf+3//5lLvxgWREyM5cpympqGBkb4+Kovem4tP\nyzJJSaLojGWXGpfLxV/+5YMMDAwQj8cpLCykuLj4A9/f29vLG08/jSGZRACSej3X33038+afe/HK\ncWk4eOAA+958E5uqogCq3c599/0JiqKQzWaprKzEZrMB4HQ6EfLyiCYS2CzvBar6wmHsJSUXlP6b\n47NDSUkJf/u3D9Hf3086naakpOS8sUGHDx5k75tvYkil6Dx6lNDwMGtWraL4tPdjxOejdt68S23+\nBSNcqXnegiBoV6ptOaaHy113YGxsjGcffZRyUaTY6SSWTNIVDLLirrtYtXqym4Hf78fv92Oz2Sgt\nLf3Q7cmyzMDAANlsloqKCiwWy4e+/2IQiUR44uc/Z77DgcNqBSCeStEyMcFXv/vdDxUxl4vLfd6n\ni3g8zsjICGNjYzRv3MjyqiqMej0wKSy6ZJlv/uAH5/VaHTxwgFcff5w6h4P6igpCsRh9iQR3PPjg\nJa0LMZ38MZz3aDTKyMgIBoOByspKdLor8/nd4/HwyqOPsqy8HJPBQEd7Oy0HDuDT6bhn3Tri6TRj\nosjXHn6YwsLLM+UMU+f8khc9y5Hjj4qSkhK+9p3vcGjvXtq6u7EVFHDz+vU0NDQgyzKbXnuN3uZm\n7KJIXFXJr6tj/Ve/ivX0Tf9M+vr62LBhI8mkERCRpBi3334ty5cvndZ96Ghvx6koU0IEwGoyUaLT\n0XbsGMU3Xb7CbZ8nzvSEtLS14UgkSObnYzz9lOp2OBj0eOjt7aWxsXFqnKZpbN++i3feaSFiaODN\nzh44uY/b7/gC9zzwAOXl5Zdrlz537N69l61bD6FpdjRNxuFQue++u67Ic3D00CGqrVZMBgOhYJBR\njwe7Xs/gyAiPvPIKN9x9N9/48z+/orOtcmIkR44zKCoq4vYvfemc5Xt37WKiqYlramqmMlg6BwbY\n9MorfPm++856bygU4uc/fxxJrMVdWEpBQQGpVIKXXtpNaWkxFRUV02Z/IhbDrNMRDocZ8nhIxmI4\nCgqQrFai4fC0fe7lYHx8nKNNTQQnJiitqWHRkiUXXJXy/cjyZMaS/rTn4tPQ19fHgVdfZWVFBUa9\nngmPB3smQ8v+/Vx7443oTn+GEUgmk2eN7ejoYOvW41RXr6aqSseCBWsJBMbwRQYu2KsVjUbp7u5G\nURQqKyspOSP+KceF0d3dzZtvNlNZuRqdbvJ8hcM+nnzyZX7wg4cwGAyX2cKziYfDuE0mEvE4W15/\nHcJhDJJERUEBdatWkQ0GiUajOTGS46PZuRP+4R8gEIA774Qf/hBM58/cynGJUVWVY/v2say8/KxU\n2lllZew+eZJwODx1EwwEAvzLT3/KqcNe8oQgh/x+otksFeXlSCZ45eWN/NV3H542W8urqtgzMkL/\n0BBOScJsMBDw+Tgei/GVtWun7XMvNadOnWLT739PqU6Hy2JhqK+P4/v2ce9DD30sN7Tf72fHli30\nnTyJIAjMXLCAtTfddMGiZrIIWQKz2Tzlwj966BA1eXlTUzJFRUX4fD4Uv589u3ZRVFREaXk5IThH\nYOzffxSXa8ZZdSXy80sYGBjC4/F85BTNybY2tj77LM5sFgnYr2k0XH01N912Wy4N/GNw4MBRbLaa\nKSEC4HC48XgG6O3tZfbs2ZfRunPJ6nQ8/eyzBHp7MUSjVFksaAYDndksCZuNa1au5Njhw9TU1Hys\n7cqyTDqdxmq1Tvv3JydGrgD+8IdJ8fGzn0F1NfzLv8B118HmzXAFZV59blEUBSWdxvS+pyFBEDCK\nIqlUaurmtenll0kODOEdjjCRCpJJxYln9ZzwpXG59Gz43/vQ63Vcf8Na6urqLnrtkqqqKnp9Pqoy\nGUrz8xEFgVgqhc5iwTsy8tEb+CMgm82y7eWXmZ+fPzUd5XY4GBgfZ+fWred4qj6IeDzOM489Rkkm\nw3UVFWhAX1sbzw0P82ff+c6HPv1qmkZTUzPbtu0nkchiMAisWbOYa6+9mlgoRMkZcSB15eXs3L4d\naWKCOqB/dJQNG99EqarD0TCHW265cSpAOhyOYTKVnecTjR+Z0RWNRtn63HMscbuxnn6Syaoqh/fs\noWbmTBoaGi7ouHzeSSQSnDjRjt9fQCKhUFJScsZ3wXBZM+veTzKZ5OWXX+XRf/ol8dEBChMxaiZX\nMKJplDgcWCYm6B8Zoaa29oK3K8syb7+9kwMHjqMoAi6XiVtuWcPcuXOma1dydUYuN8ePw/e/D9u2\nwX33wTXXwEsvwapVcNddcJnaoeQ4A4PBgLu8nIlg8KzliVQKxWAgPz8fmJyeOX7oMC1tw0TiKSZi\nBsYzxaiUgmLHOxEn6IXnn9zI44+/xYYNL6IoF7ctk9/vZ1FDA5WLFtGpqpxUFByzZvHlm29moLMT\nVVUv6uddDnw+H1osdlZcDEBFYSGejo4LPqZtx49jjUSoKS5GFEUkUWRmWRmC10tnZ+eHjm1pOcqL\nL+7DYplPZeU1uFxL2bSpje3bd1Hd0MDYGX1Dgl4vc1wu5JISjqfSbOgYplueSV9/Bb/5zSG+/e0f\n09HRAUBjYy1+/9miUVWzaFroI6dbent7ccjylBABkESRKpuNE0eOXNAx+bzj9Xr51a+eYGgowalT\nIzQ3D7J9+35isRiqqqJp4Y8MXL9UhMNhfv3r3/Hvv3gFS7qatFDLuGCkTRAYEkVUWWaR241DUeg4\ndYrqWbMueNuvvbaJXbs8FBauoqrqWgShnqeeeouenp5p25+cGLmMqCo89BD8/d/D3LnvLRcE+MUv\nwOGAv/u7y2dfjve47tZb6YxGGfJ6ScsyE8EgzSMjXHPrrej1ejRNY3R0lKbj/Vi1SkrydCSzXsyS\nnYyqI5IexyjFqSlcTiAQobx8EW1tQVpbj19UOyVJQpAkFs2axZduuom7b76ZZY2N6HU6JJ3uM+Gq\nlySJ7HmyMLKqiiCKF+xtGhsaouA8wcdOgwHv6OgHjtM0jW3bDlBaugCzebJFgMFgorJyIbt3H6Vx\n7lxiDgenhoZIptP0dHcTUVXmzJtHIGvH5voitSXXUGirxO1eQDBYziOPPIWiKKxatQyzOcDwcBep\nVIJIJEB/fxOrV8/+yPl+RVGQznN+9TodmSvoaf5K5rXXtqIoFaxYcStut4YoyqRSBg4fPkR//xGW\nLau7YjLStm/fQyTiJB0WqHAVY9XbydcX4pLMlBsMFIkiyUSCQCqFbDKxaMmSC9puMBikubmX6uqF\n6PWTHqG8PCdO5yzeeWf/tO1PToxcRl55BWQZ/uIvzl0nivD730++Z8uWS29bjrOpqanhyw8/jFxT\nQ3M4jD8/n1sffJAly5YxPDzME7/+Nc/++78THxllPNRHgdWOWYwj0otGD4IQpsxSTkaBhKyRzSrk\n51fT1HTiotpZVFSEpbiYUb//rOVdo6PMX7nyMyFGCgoKsJeXM+T1nrW8a2SEOcuXX7AYyS8qInKe\nm3RMlhF0Onp7e/G/7zjCZEPFSCQ1JUTeRa83kM1OXry//tBDFKxaxdF4nD5RpHTuXAS9nkGPB12k\nh/DoDiKh4yhKGqezgr6+MOPj4zidTh5++OusWOEilTqB2TzMvfdexW23fXQWVGVlJQFNQ8lmz1o+\nFAoxa8GCCzomn2ei0Sh9fV7c7nJMJitr1txEXZ0Zo3GUQKCV22+fz1133fqR28lmswwODtLX1/eR\nXZg/DS0tHZSW1qEzGhgJDaBmxggoUeKKSlQQGNc0BrNZJux2/vRv/uaCe2uFQiFEMe+ca4Xdns/w\n8MQHjPr0TGvMiCAIvwCWAs2apn3/jOU/AdYDQeA1TdN+MZ12XIloGvz0p/DjH08Kj/PhdMLjj8MD\nD0Bray5+5HJTUVFxTjxCJBLhxccfZ6bBQEVFBdGaOk51DTM40YXJlIeolIKogeBjOK2gjAfQ2XTs\n3LmJOXPmYbdPzvsePdpKZ2c/NpuFpUvnf+xAs3cRBIHbv/IVXvzd7xjzeCCZpGfCi+zMZ15hIbIs\nX5SMkcuJIAis+/KXeeF3v2PC48EiCEQ1DXNlJWu++MUL3s68BQto3rEDXziM+3TMz8D4OLt6BnFn\njmA09qJpcebPr2T9+nVT9UCMRiN5eXqSydhZgiSbVRDFDFarFZ/Ph6yJCDYXlvIZ7Dl2FDEwykxB\nxG20o2owkgoQ97dgNl+NpmlTIsrlcrFu3c0f2ATtgygqKmL+2rUc3r6dyrw8DDodw+EwhpqaXMG7\nC+Ddmifv3oTN5jzmzVvJ3Lkag4O7Wbx4EZIknTMulUrR2nqckyd7SaVi9PWNYDAUAiKCEGXu3CrM\n5jwKChzMmzf3E2d8vR9JElHVLK4iC4lmD0utTiY0gYnkEGoigc5iQSwqYvX69dzwMVL67XY7mhY/\npwlnLBaipMR9UWw/H9NW9EwQhCXAw5qmPSQIwm+AxzVNazq97sfAHk3T3v6Q8Z/pomfbt8Nf/dVk\nzMhHPch973uTWTZPPXVpbLtc/DEUQXo/2956i70bNlDicKCXJJr3HCQZUeic8DIgOlCyZWQyetJy\nCpujDqMlTsPsuVit+Xi9u/jRj75BW1sfXq8eh6OEdDpJIjHInXeuYPXqT95rJJ1Os3nzFl595R2s\neTW43ZWk035qakz86Z9+BdMVlKr1Sc97JpOhu7ubSDhMYVERNTU1571ZfBiDg4NsfvFFUn4/mqbR\nH4iS1c9kzpzVU3Z5PMeZMUNkZl0Vkk5H/axZ9PcP8PLLh6ioWITBYEKWMwwNHWft2hmAxjvvnODU\nqSBe72Q33cDYUWqiY2RTcXRiJTpjPo6CAkbUGBFbNatX5/Gzn/3kUwc0a5pGd3c3J5qbkVMpymfO\nRE6l6G1rQ2cwsGDlSuYvWPCxj9N0cCX+3v/jP57E73fhdr8XROz1DlFaGufBB79+zvuTySSPP/40\nw8NgtRawffsWVNXN4sVzqKoqZceOTUxMhLj66qswGkV0Oj8PPLCe6upPV8o/nU7z639/hF2bDzPu\n6cAcSaClslgEiaziRbTqEGfN4r//7GcsX778Y3tEn332JVpbI1RUNCJJOpLJGGNjx/izP7vpU2US\nXa6iZyuBradfbwNWA01nrP8nQRCCwA81TTs2jXZckTz2GHzrWx8tRAD+8R9hwQLYuBFuv336bcvx\nHqqq0tfXR++pU+gNBmbPnTsVSDg4OMjLv/0tzvFxgpJEZ38/WUmiVDSh2gzIogb5KpGYH1EqJZ0e\no6ioHoPBSSIRxuFw09vbh89norr6vSh1WS5h06b9zJ8/74Jdq+9H0zSOHu1j0eK7MZnejYuoweM5\nzqFDTaxZc82nPTSXHYPBwJw5ny66v7Kykr/467/G7/cjyzKPPvoMxcVnX7xjwQCbn9jEXWtWIOp0\nNG3axIp167jjjiW8885hMhkRUZT54hcXMmvWDB599FUMhmpSKY2ammo0TSU43k9lQxnh0VPE42Ek\nvYF0WiAdH8NWJvK97/2vi5JZJQgC9fX11NbW0tbWxu8feQRjLMay2bOxmkwcePZZPD093Pknf/KZ\nmLK72KxffzOPPfY8g4NBTCYHyWQIqzXG7bffc973Nze3MDIiUlMzn8HBUxgMFbhcs2hv7yUYHECW\niygoqCORgJkzG4lE/Dz//Jv87d9+6xOfb1mWefb//B8so0MIvk6SQ6ewixI6yYBiMrDomtXcddNN\nHB4fp7Gx8ROd5/Xr12EybaOpaR+gw2IRuPfe66Y1pXk6xYgT6D39OgycEaLJv2ma9j8EQZgJPA6s\nmUY7rjgCAXjjDfjVry7s/RYL/Od/wp/+KVx77WRga47pJ5vN8toLLzBy9CjFJhNyNsvRt99m9Z13\nsnzFCjY++yyL3G5SkQhpv58lNhtjioKY78Qty9xcUYlYWUFJ/QLGxiyYTA7GxrzIskJR0SzS6UJO\nnmyjru7seehEIsLAgI8339zEDTdcP5Wt83EYHBxElq1nCJFJCgtraGpq+0yIkYuFIAi43W6i0SjZ\nrHhWbQmvd4hk33Ea7PlUFxVhsViolWUOvvkm933/+6xYsYxYLIbFYsFoNLJ581sEgzAychJZNqNp\nKoIgYrbW4E300jhjEeVlEql0inA4Rr6umu/+fz/9xNNy5yOdTvPCU0/RtXs3pt5enDYbB/bvZ+my\nZSyrreVASwvDq1ZNa/G9P1aKi4v5679+gOPH2xgb81Fa2si8eXM+8KHg2LEuCgoqAUinkwiCCUEQ\n0TQT3d1tVFRcTyqVJB6fLG5ntxcwMNDF+Pj4J87K6ejoINXXx/DQEMmxYRboRVySxJiaobC0HGSZ\n8VAIyWL5xC0ojEYjd921jptu+iKpVAq73T7t3rTpFCNhwH76tQOYynXTNC14+m/3h6m2n/zkJ1Ov\n165dy9rPSNGmDRvgttvg49xjvvCFyTH/9b/Cf/zH9Nn2WUdVVcLhMEaj8SN/qO3t7Yy1tLCytnbq\n6aJGltm/cSN5NhtaMMjchgb2DA7i8/koM5uxKwqHhoaYvXgxt163huaxMSoqChke9pGfX0d+/mRG\nRDA4wcDASaxWlXR6Mhhy0puxh8OHD+H1Bmlv7+PVV3fxrW/d+wnFw/ld4Lkn4kk0TWNkZIQBjwe9\nXs+MmTNxOAzE42Gs1knFPzF0CpcoYjZJU1NbBr2eQlGkp6uLVVddNdWGfWJighdffJOODgPJpJVg\ncJhgMEB9/XzMtjzaByYY8vqoT7hZs2w29opyZjU0TNX/8Pv9DA0Nodfrqa2t/cQN8ZqPHCHZ3U2p\nIGByu7FZLJTKMkeamykvLMQlCAwNDubEyAeQl5fH6tUrz7sum83S398/Vc3UYNARi01W73U63ahq\n/+l3qoji5O8slYpSX3/xMnB6OzpoP3qcw4cOszCrYZJMqJkELr2eIx4vgl+mOfAWf/433/7Un2U2\nmy9ZY8bpFCP7gW8BzwPXA0+8u0IQBJumaVFBENwfZsOZYuSzxPPPf7KU3X/+Z5g/fzLe5AtfuPh2\nfdZpazvJG2/sJBpVEASFBQtmsG7djR/4Y2tvaaHK6Tzr5m3Q63FpGh6PZ7LomdFIdUMDzQcOEA6H\n0QQBnyRRarOhE0VkTWPJkkV0dLzK8HAXBQVlNDXtoru7j4KCQtxuC5s3P81tt91HPB5m+/YdCEID\ndvtsqqvr8Pk8/OM//icNDfUYjUba2zs4deoUExMRzGYr8+fPZNmyJef0x6mqqsJgSJwTZDkx0cdt\nt83l846qqrz52mv0HTpEviShaBq7RZGGpcvYf+A4Tucs7PZ84tEQanycq5ctO8etnj2jZoumaTz/\n/BsYjTWEwzvx+zUikQzBoItAYBQYobj4OhIRLz7VxtP7R7lm7Uz+n3vvPZ0qvJ2dO1vRNCeQRdNe\nZ8mSmRQUuKmpqaK6uvqC3fonm5qoLSxkIBhEPp1ZY9brsakqY4EAGU3DlOv8e15isRgdHZ1EozEq\nKsqoq6ub8ggEg0GefPIFJiZUBMGMpkUxGuNEIj7s9gLy80spLjYzNHQMg0FHXV0t3d3HKCoqoKJi\nsp9NJOLH6RQ/VXrwkZajNB85RpGsYEJDp4p0KiLdSScGQzFRxUTAWMS///p1jrX2cv/9X5rWYmUX\ni2kTI5qmtQiCkBIEYRfQomlakyAI/6Zp2veA/yUIwjwmU4v/r+my4UpkbGwyM+bGGz/+WIcDHnlk\nMhW4tRXOUyIhx3kYHR3llWef5YUX9+B2z6N+zgLKKso5dqyLWOwVHnjgax97m4qi0NTZS//hVmKj\ngzRUViIlk8iCQLHbTXp0lP2trZSuXk1ZWRnf/ObX2bVrH6+88hIDA2nmzFmBpmnodDpsNpWtW39P\nNisQi9mw2UyUlZWg11soLGykr2+MJ598imhUoqtrjIGBKAZDPhUVBYyM9NLc3M5DD913liAxGo3c\nc89NbNiwBShErzeRSvmprTWzfPmyi3h0r3wSiQTBYJC8vLypTIaOjg48Bw6wqqZm6iYfT6VoaWnm\na1+7m4MHWxkZ6aC6wYGUV4HL6aCnpxefz0844OOEz8cSoxG7w8G8efPw+Xy0t3vo6YkRChlJp63o\ndBKp1ACJxDA2WzX19TZuuGEVRUXFhMMhxsebCJ7uGfL2221UV69GknSMjY2xZ88ge/Zs5JprvkAm\n04IoBnE6CzCZjKxcOZ8VK5Z/aFaUIAiUV1VxzOPBkc1O3lA1jVgyScRkor6+/pIc+09KMBjk5Ml2\nEok0tbWV01Kt+P14PB5+97tXyGQc6HRmFKWdurpD3H//l4nFYvz4x/9MR4eG213OjBlFVFbOYWDg\nBGbzEAMD+xHFfIqLC4A2CgsLMJkEdLokdruJYHAMv38MSfLy7W9//RPvSzwep3X/YWQkNEXFp2TQ\nayKerAOrVIMm6YlmNWYWz8ftdtPXN8xTT73FQw+Zqf0YFVgvB9Oa2ntmOu/p/3/v9N/pa85xhfPy\ny7BuHZyna/gFsW4dPP00/OhH8POfX1zbPouMjo7y3KOP0nuim4aC2Rh1JnqPHCERjzN7zhy6uvYx\nOjp63vnb2YsWsbetjWKXa8o7kpFl+sJhju04jqNyDZ2HN2OJZNB0EBFkopLIYoOBkUyGVCzGA+vX\nA+BwOLjZkGjqAAAgAElEQVTjjls5caKHWCzB4V0HMGiTxbtUg0BZjRFFGQNKEEUTw8MBvN4ANTWV\nCIKNbdsOsnbtNwiHx6iuXosoSni9/dTUOPD7IzQ1NXPdddeeZX9jYyPf/34RbW3tRKNx6urmUF9f\nf8W2Qb/YqKrKznfe4eiuXViARDZLzcKF3HbXXZxoaqLG6TzrpmA1mbCdLnn8zW/eD0y65X/zy1/y\nmxffIC9rxjs6RkRLUTWrijl6PTv/8Aeit9/OjPp6urs9yPIs7HYnBQV5JJNhUqk8gsFxamsX0tBQ\nhcViZceOfaRSIpGIl3/4h19RXV2G3V6DJOmQZZmmpjZcrgaiUT0g0t+vMjoaZvXqSioqZrJx40l6\ne4e4//57zjvl1rh0KR1vvMHCmhqq58+n98QJJEXhVDKJSa9n/X33nbfT9JVCW9tJnnlmK4JQiCga\n2L69g9mz8/na1+6etrR0RVF4+unXycubi832Xg2Fnp5WtmzZxoEDrbS2higr+wLZbJYjR/oIh6M0\nNjbg84V46KEvMTIyislkZMaMr0/FmKTTafbs2cOGDa+TzeopLCzn6aff5M4717Jo0cKPbWd/fz/x\nkRHSqRSDchJ3VkXWDKQFKxICASWLpWQeoVCCsjI7kcgp7PaV7Nx58PMtRnKcywsvwHe/++m28ctf\nTk7XfOUrk2Xjc3ww+3fsoEqno1vTkWeyo9cZqDYY6Dl1ipq6OkQxj0gkMiVGotEoXV1dJBIpystL\nKViwgLf27MElSRjNZiI6HXGTg5KipTidhYiihCf9IjEE9EKcr990HYgisqKQrajA4/GwZ08L0Wic\nxsYa2traaT8cptZZhiSKpFMJQrEwxw61U1RpQhCcZLMJNC1DNmugu7sPQRimrKyCZDIOOBHFSbex\n1eqmv3+ExYsbOHGi5xwxApMFwv5YglUvpHOuoih0dXXR3t6D2WxkwYI5H9jS/fChQ3Rs28ZV1dXo\nJAlVVWk/fpwtooiSyaA7IyAvkUrhDYfxh0JnFarKZDL4IiJ11z1Ay97tmMpczCmuJZb0MeIPUmaz\nsun55/nmD35AJpNBUQRAhywLSJIDi8VCNltAXp6FWCzB/v1HSaetKIpKLBbj5Mk0b799gFWr1uN0\nFuH3+1EUI3q9EdAzPDxCImGmtHQhw8ODzJq1mNraxXR0HMTj8Zw38HXpsmX0trfT1NtLkc1G8fz5\ndIVCfHndOubPn4+iKIRCIZxO5yc6T9NJMpnkhRe2Uli45Izg6zra25tpaTnKihXLP9F2NU1jaGiI\nRCKB2+0+p5rt8PAwsZiOqqqzizmVlMzgpZdewGyuRa+3I4o6dDoDBkMVvb29VFaWMTw8Sk9PL2Vl\npcyYMeOcQM+DB9uZNetmXK7J/kOpVILnntuB213wgXE7fr+f1tYT+P1hamrKmTt3DmazmY6ODtLe\nUaolM1FLPhOJGGYtj4gq41dVdAUzKS+aQyIxTiTixWo1Y7O5GB3t/8hjFIvFOHDgMMeOnUKv17Fq\n1QKWLFn8gQ8v0WiU4eFh9Ho9VVVVn1oo5sTIJSQQgMOH4eabP9123G7413+FP/9zaG7+5F6WzwMD\n3d2sKiigyDnKWCCKM68AURAxCQLRaBRVjeJ0Ounv7+e5555n8+bDWK01p2MukkR9HVRY9XT7/aRE\nkWU33shETxCnc7IzrNtdSXNKoEwxkZEjtBw4xuLFjaQlkYSs8dRTu3C7Z2IyWWhqGqGl+RjZuJ3h\njEIsEkZFh8lkIxP14e9OEk33EVKL0UkFaBKoYozGuTYkqZCmA2/i8YRRVTP5+WXIsoLP56OnR6S+\nfvJG/e6FQ1VVTpw4wdH9+0knEsyYN4/lq1Zhs9ku5+n4QF557jl6jk+Wxq+bN4+1N900FRj6LrIs\n84c/vEBnZ5i8vFIUJcTu3S9wxx0rz6nJomkah3fsYEFZGbKi0D00RCKZxOVw0N3SwuLrr6dn2zbc\nDgfHenrZd2IYVbUxGAkQfX07paWlVFRUMDAwQDZro6qqkc6jpygrkIjEAwyOj/KbDVuZW+QCUeO3\ngkBxcR6trf0MDiro9SWIIghCApfLTDzeQyikcrRliExSIBrrQc0O48qrweRysnfvERRFT03NpLCa\n7EcTIJNxYzLZEQTQtPdiVETRydDQCG63G0VRsNlsCIKAKIoYjUa++sADdHV14enqIt9mY011NXve\neos3Dh7ELIpEVJXG1au58dZbr4iaI+/i8XjIZPLOyQJzu2s5fLjtE4mRcDjMU0+9xMjIZLaLpkVY\nsaKe22+/ZWrfJ3s2netlSiZTHD58nNLSfIaGBhkfN1NVVUV+fj6ZjMhrr72Ky6XwzjujRKPN6PVh\n1q37InPnzsHtdtPd3U08bqK6umhqmyaTBZOpgqamYxQXFyNJ0lkeuu7ubp58ciOq6sZisdPScoyd\nOw9TWGjhp//tv5EIBggCAgITgCSYSAgKQp4Nd0HN6WwemURigEWLFhCJ+Kms/PAYlUQiwWOPPY3f\nb8btno2iKLz0Ugu9vQPce+/d53jg9uzaxeGtW3EAMiDn5bH+/vs/VVB0ToxcQt55Z7IR3sWIHbvn\nnsnqrE88AQ9/bie9Ppo8u514KsWSWVW8uLMdvc6A1WQjnVWYmOhm0aICXn11I08/vYmurggm0xIk\nSSQQ8GGUojg1mYa5k9VVjQYD3tZW/EkDpaUpDAYT7e29ZCyNDIVOocYjBAcV3mprpby+mrzSeSxf\nee+US7y8fCZaxkU00ooolCFlzWgk8KU6yc8GQBUQ1SIEyYGs6lElPWkthX+sD+vgSarNLlLBEAPe\nPvrzF6FlrdQWCRzt28upAxFamlq48ZYbuOmm62g+dIi+vXupy8/HZDAwtGsXTx07xv3f+tYVKUjS\nJ09y7WkPR39HB88MDvLAX/7lWcHFra3H6eyMUVv73g1Jlit4880DNDbOPutJX1EUUtEoMUFg1759\nOGQZsygynM0yKIrc9o1vMFRby5bmZlraQ7jy6omqCjWz55FI2HjkkT/w3//7908X5RIAjWCgFzk0\nghyO4A+NUiplsVjLiClxXMEwwa52fD4boliCLAcxGPJwOCQEwUhRUZKD+58hErQjqBGM2TTV+kqE\neJpoOkE0z0dbm5n8fDPp9DgeTw951gjevl5icT16h5tFi2fg949isdgIhcb43f/eRl/3CD5fGJ1J\nx7LVK7j99hu4/vrrsFgsNDY20tjYCMAffvtbTGNjzD1dbCurqrTs2cPhggJWrV49/Sf4AnnveJ/N\nZLGsT9bk8bnnXsPvt1FdPVkSX1VV9u9vprDwEFddNbnvZWVlGAwpUqn4lBBSVZW3396C3V6I3V5O\nfb2L3t42urpi1NfPpL+/laIigTVrvobXG+bYMT/RqJ/u7leYM6eZm29ejslkQBDOveAnkzFeemkH\nTU0dmEw6rrpqEWvWXI2qqvzLv/wng4NWVDWJw2GlsXEGr7zwJP4TW5BiYW4EjEAPGpVARPMT0rmJ\nWkbQtBOMjrZhNEaYP/9mCgpKCQZPsmbNl857bBRFwePxcOjQYQYHszQ0NE6ts1qX0Np6gKuvHqKy\nsnJqeXd3Ny2bNrG6shL96YcffyTCK08+yTd/8IOpSsUfl5wYuYS89RZ8jKq8H4ogwE9+AvfeCw8+\nCB/S7fxzzbI1a9j7zDMsq6nh9tUz2Huij87BMHKemVmzJnvD7NnTSyCQQVEM2GxudLo8/P5RtFSA\nhDHChv7DzHTasZtFDHYr2fIqRkbacbnq8Hrj6M1l9He+Q0F6BFsEqsx5RIfGkNNlHMzsxl1Wweio\nH1XNkkmLGJQMNt0YGTEPm6ijQA4QUxPEBAe1+hLMJgPxbAp/JkZCZyI9mmJGkY5MNES+aCCbCHOy\nbwsmVzUZi5Vqm4mFtVfRNuHlrbe62Lx5BxWGJOuXLZu6WMyurKR9cJDmpiauuwJTsWaWvVfxckZp\nKTGPh/aTJ1mydOnU8qNHO8jPrzprnF5vQFWdeDyes8SIXq/HWVzMW1u3Ms9oxHVagJVns3iHhujv\n6eHeP/szfhGIo/NFyFpcRL0hoqMpRsayhEK9lJf/jm984+tIUhSPpx1zyks2GiWcylAhStRb8/CG\nxnBWz6f71Ai2pIbNnMHlshKLxUgm+4lEQsxprEMZ76YwPIygjJNRFSyUoJMFdJIevaKAyYVODBGP\nn8BgGKT75CnydS6MYho1MoFvVGRvdAGlFUESiQkGe3cwq2QR5kQFlcbZxOQY+3ccR68vZ3h4nG9+\n8xtTT/1erxd/by9Xn1H1UxJFZpeW0rx795QYGRwcZMeOAwwMjFFU5GLt2pWXPNC1qqoKUdxMJjMp\n9t/F5/Owbt3HL7g1MTGBxxOiquq9bBJRFCktnc3u3S1TYsRoNHL33TfwzDPbkKQSTCYrg4On0DQ/\na9feTkvLKfLzFzB7toXBwTaGhnZgsfi5++7/m0gkzMaNLyOKBQiCgRMnOpg1ayGbNh1h/fpVKEqA\n8fFxotHJejR6vcbu3buYObORWCyP7u4Rjhx5ia6ubux2G8eO+amsXIRebySVirN1614mTh7AFItQ\nC5QAJ4E5gAHoR0XIhtFFJEaUBHqjhcLCYvT6IDpdPw88cNt5K76OjIzw5JMvE43qaG1tJR53oap2\nZs+ehSAIpz1tLgYHh1AUhUAggN1up+XgQapttqlrC0CB3Y5pYIDe3t4pAfxxyYmRS4Smwdatk6Xd\nLxarVkFDw2TdkgceuHjb/SyxcNEign4/+3buxAbU1JcyY9U8XCUl/P7x1/FOGEnE7JhMM8hkoni9\nxyktXYWq6giHA8SzforyZmDRFxKKR5iY6CLc72XBKgN7977O4EAaOTTAAiOUmNyUGqwkMmmGExHG\n4iG8J07R3h2lqrqeZDLG+HgQk6rQkAljkmSyQh7prEhMkAAROZMmq2TIqhqiBpmsjBE9PlkD2UKB\nvRCrQSEZGcdHFqc+y4qGJSRTKRKD4wwFjKSzKnH5JPvSKqtWLZ16Uil1ueg7efKKFCPvJ99sZnx4\nGM4QI6Ionnanvx/tvIGcDYsXs+fZZ9GfrpibymQYjURYuWQJnc3N3LJuHS6Xm8XL53DwYCvJpIFs\nNo0gKGiak/37e1ix4hS33XYVP/jWd3GHomixGPF4iBAaJqMOh8mAM9/BwEAEk95Mvr0IW2Ex4+M9\nSFKGWEym/dhhrOkAquIGnFiANGZGtQkKFAsiYEybScVE+rraCQcc1OY1IKiQTCqMZ4Posi5inhSC\nLgJkkVPlIIsgWLCYbRgUE4GxAEeajpBMzmLNms6p6rSpVArjebI3rCYTca8XTdPo6+vjt799Fat1\nBk7nMgKBEI8/vol77omxZMniT39CLxCr1cqXvrSWl17ahSSVYDCYiMfHqakxsWzZ0o/ewPtIpVII\nwrlP6kajBa83cdayefPm8t3vumlpOU44HKO6ugybzUZNzRwymQwdHU1omh29Pk4iMYHV6mLfvrfp\n7u4kHq/AYnGj0xmRJIGWljYaG6sZHZ1gcPAEJ07sR6fLx2DQE4n04nJV4PONMzaWJZnMw+9PcuzY\nM8yYYcVun3M6ZghMJivxeBJrMoKChovJhm4mwAYkgSR6sriZUboMUdRw1y4nmx1ieHiIa69dOFUf\nJ5PJcORIC01NbSiKQmdnJ1VV11FdXY7fn2J4WKC9fQSn0z4VQ5fJxHhn8yYKslnygARwpKuLL8ya\nxYljxxgfGkKUJMpraxF1OlKfojt0ToxcInp6IJOBT1m9+hy+9z34n/8zJ0Y+CEEQ+MINN7Bs5Uq8\nXi9ms5mWw4d548mnKdKVYjUrdAYC+DMhRElHNptHKjWOKOqJJfyUWiupdOoxSEa8cQNjkVqyBh8e\nD8Ri5fi8bdhUEyEtToU+i2ASQNAgk6a3v4W0uhiz1UQ2cZSh8Q7SShpRquQocVzZIDMJk9TpQDOi\nynGiJMnLmsmioaBDyYJBTBGMSFj0DjIZGU3OYFJBL5voGYqwsDrM6IgPASt5Zid2s4vsaA/hsERn\nRxcLFs4DJm/G5o9oQ3+lEEmlqHef3ZRr6dI5bNiwF6ezcEp8pNNJJCl0VqaAqqq0tbVx4MAx/KKN\n5nAEVyyGLS+P2iVLKC0vZ//pjr+zZ9fwH4++TUdLF8m4Slazomg6VJ0Hnc7Nli27uXrlPBblWyk0\nieyIy+hpRMsInFSyFGVj5GfTaJqKotMTT4YJDnhIRMZJRuNEZTf5mo2oZENV8jEKcfJQJlMzcaPh\nxa3LYzQSxBsfodS+nCK9DT1hfOE4dncpvmwFZjEfcypMaqwXq92KqLjpHx+jvqiIZDKJzxcG2cyA\nZwB/IMLAQBM/+tHfsXr1atxuN0lRJCPLGM4IMhzx+6msr0cQBDZt2onTOQeHY/KYu1xFmM1W3nxz\nN/Pnz7ukzRWXLFlMWVkpra0nicUS1NevZvbs2Z/IhsLCQkQxjiynp27wAH7/CA0N53oLiouLueWW\nyfiK0dFR2tpeAKChYTHV1bPo7j7G/v1pli//EtFolvHxUXp7Q+j1ZaiqQCYzgdWaQpKqGB4e5JVX\nWjl1youm2YnFMqhqiFhsDIPBjN1ej8+XIZUSMJlqiMdljh3bR2FhjGQS3O4ZmM0uTKY8gnIcAzAG\nlAHq6X8xIIkdndFNvsvNWDpOIpHHyEiGSETHli2dHDrUS3m5kaamk0xMyMyevQiDwUFra4Z4vJWr\nriqmunoGHs9ejMY6ensHKS0tJRYLMTTQzNpKO0vP8JAFBwbY8NJLrKuro9ZmI6tpjLW10a7Tccun\nqJ+SEyOXiK1bJ2uLXOzil7feOtlwr6kJln2OykdomsbAwACBQACdTofD4cBut39ghoDNZsNmsxEI\nBOg8eJBCqwNJdWAUZPIDcWKZJIrkJJkOkkhkyGYBScOkS+AwuPEnEgQSOiymUkYSw9jti8lmQkja\nMDr0hDMGhlI9CIrCYDaCS5MpyWgMZfuIZMIEIhlkWYckOBGEEgz6LIH0ECeVdvRakjFkrFiYIEgc\nC6AjTYYoAayqRlZ1EE2l0TIhyvML8ScESm0FBGNxWo8eBjnDhAYU1JInSSStdgS9hX7PKPPmTz7Z\nHejspMxiYfu2bcxdsICioqLzHqvLwUQwSNG7lUyDQUJG4zmdZufOncvixT0cPXoQo7GQbFZG07z8\nyZ984aw4mNdf38T+/R4cjlr0tiX45DA6R4Ib1izFZDDQNTzM7NMel+rqSib695KKglHfgKJmiYW7\n0Gl+htuzPDfxFESuZ/WSJWzcvBNXwULMEY2ImGQ8EWE0A4eOH8GiVwjry4nFMhhCYxSlEqAWkmCC\noBBHogqDsYRUeoAYGdzECGMgSBS9miSqBHBbZZJpiTyDRDKZwmW24Q97UVU96WScwjwbemseNrsJ\n/4SPVCJNKp0gFlGQdEYmIieIkSGedBMOW/jOd37FXXft4Yc//DYrbryRptdfZ5bbjd1iYTwYpD+T\n4Z4bbiCVSjE6GqSqasFZx9tksuL1SgSDwUv+XSkpKZnqAfVpMJvN3HzzKl57rQmXayZWq51AYAxV\nHeKGG77yoWNLS0uZO7eUEyeOUVragF5voqenG5erhgULFpFIJHjiia3odNXIsoSqgtFoxWLJZ2Ji\ngljsEOm0hiQtwGh0YbNp1NU10Nl5iIGBfTidV5FKiVitk9csTcuQTlsJhewkk4MMDfVRUFCA3z9K\nCJmZTJYxdzFZ3jwMeJFIYsQgmAgkwqjWciLBFFZrLaLoxWTKY3Q0wNatR7BaaygurqOjYwiTqR+b\nrRaPZxi7fR8NDctYtKiRI0eaSaWyNDcHOXVyL6mRTvqGSshOTNC4YAE2m42KggJSySS+TIY8VUXO\nZgkBksFwjmckm82iquoFCcmcGLlEvPXWZNDpxUaS4KGHJnvXfF7ESDKZ5KUNGwj19BAaGKDP40Ex\nm5k1bx4Ny5dz2/r1H1hVtaenh2w0SoHTxKA3RJGzgqL8MJFUgNGkF70hjqqO43aLlBQVUWmsRpFl\nRsIJDNYiFEEhEzUwMuIjEw6Sn1eIlEmil40MKRqZ2Dhleh36TIZ8SUIyWjGYJdriWUTRiV6cSVrL\nEk6GMGolTBBCRw8y+RgoRyZNkBHSCMgo6MU6wnoNUzaAWbWQVCOMJ4IkDaUE4inCiWEGowkUnRFz\nYR0ObyfeyEluWPc1etoPooaCHOnpYf/x4xTn51MWjTK+ezcHNm9m1W23cdXVV3/igLOLybDZTNfA\nAAJgLizk7vvuOyfQVpIkvvKV9axY0U9PTz8mk4HGxlvOStMcGxvj0KEeampWI4oii1ZdzYmDB+kZ\nTXKgrZ2CAhdJp5Nbr7sOgKHBQW5ZOIPH+/YhqyLhkJda0UC+3k1Glgj5IuzcsoXF995LACsmRSKZ\niZBKRNBUDc3sYiDmw2WSGY4OI0TTlGNAQSKlJTDrsihalIlMgDypDNCTQENAw0QchSCClmQWImLW\nTE/fCdxFC5FTaSqteRgEHZnkEFZ9IRlJwihIOKxWNJ2HTDxMKDCGqjmJZscIphJIhoWgOjEbC1AU\nmQMHArz88ib+y3/5Gg6Xi6ZduzgVCFA+Ywb3rl1Laen/z957Btl13Ve+v33Szalv54zuBhogAGaC\nYASpQFGiKYvWiENpZEkuzYytcZjnmXpTU66penaVP3jKnueSnzXzniXb8liSZYk2FShSFCNIEIBI\nZBKN1Dnee/vmdPLZ78OFIFJMSqBsWetD172nT9/Tvfep3uv89/qvNYDv+xiG+hqdRhAESOm8bXbg\nb4UgCFhaWmJzc5NYLMbU1NQPde/efPNNZDJpDhw4Srm8wI4dQ9x++wM/lAvqhz70ywwMHOTgwWM0\nm200rcWdd36AeDyOED79/aN4nkuzqaCqLTKZfoRQmJ8/wMhIkkKhhhACTWujKA6WdYTh4UkWFp5j\nc3OVUGgckNTrq7iuSyIxiKp2MTU1gW27zJz6Bl71BbbT2ZpxgZOABSwAaXzAxZQtmr5AVwfQtAi+\nn0cIC9+32Nz00LRxIEkkkiYSSTM//yzr6y+i6z20WqdYXMyxe/cVXHvtbggucObAw1wvBBuKQrJc\nZrPdplGtcss730mrVmPb6Cjh8XHOVSrous7W665jK5BbX2diYoJ2u80zTzzB2aNHCXyf4a1bufPu\nu990rH9BRt4GeF7Hwv1yZcp85CMdIvLnfw5vYzX1Z4Znn3oKf2GBAcAqFnnP8DC5Vot6sYg5M8Mj\nUvLBj7w67tt1XR5++DGeeuooc8c36Y5CqV5BCI3xLSNoEQ2vsMjkdJxf/dWPcccd+/jTP/3/eO7J\nRZJ6hHRPlpfmS9RqFwirFTZXjiLpRUqPTFjSCmo4QuL5kpgbUJMQEwpCq5IJd6M1m/hKFhl4KLQx\nJCBUhIyhoJPWRon6AkVNoMk4bd9CEsELwjQZw4ts4pt5IuEtZCMqW7t3ML/+LP2KRa8QWL6DVV7C\n9ptMjk0QCoWZuOp2UqkdhNNR9kpJTyLBMyfOc+rCIl67zqPffJjhsVFGJid55733svfWW0kmk687\n5pcbn/zt36ZUKgEdb5Q3ys8RQrBly5Y3NHBaW1sDMpdaJfv7+4m94x2cP/sSc+4Fxq++msFkmoWF\nBbZt24bZbNKdTrN1bJRzczClpekJRXBcB03x6E8kMUt5XjxxAs0IUzdDtKWGYUQJfI+kptO0daaM\nDKZfwhE+VQS2v0G/kiQtdCpuiwIL1P0sYSwCFBp41KjTIxz69DhDepxl30Y4RWq5czgiSa20gaGD\nUMpUAgevLnBKNi/nN3G9VQxRY9Wex2zbtAMLRR0kGc6Sjqfxg4BmrY4cHODo0bPcf3+LnTt3snPn\na6MAVFXl5puv5MknzzA+fvWlsV9fP8+uXWP/JLqvbNvmS1/6By5cqKIoKaQ0UdWH2bv3SpLJFBMT\n4wy+QgT9g3hlZ9H38L2cmUqlQjKZZGJi4jWeGrquc+ed+7jzzn34vs9//+//E8PonCOlJJnMMjTk\nsrFRIZPpolzewHFKOM4Svr8P224BBr6fQ0qbctml0TCJxVRM8wyeJ9A0FVW1iUYTJJMhfD+GaRbI\nrRag7RIiwKKzRZMFhuhYl68DJcA0ApJdQ0SNCcJqCNdt4DhzbN06hu8r6Ho3qrqJlJ1oAM/zKBYF\nsVgay4JodIBIZJoDBw5yww09UMlxTSLBlT09HPA8FNsm7Dj45XInnC8I2LQsxn2f4aEhJkdGyCQS\nnFxaIpZIEAQBD37hCygrK9wy2PFTWltd5e//4i/edI5/QUbeBrzwAmzZAper0jk+Dtu2daov73vf\n5bnGPwVYlsWpkyd58K//miv7+phfXmbbxTTJwUSCtWKRwauv5tTMDJVK5VU+FU888Qwvvphnx473\nUtuMEm238IJVYJ5ifYV60OA//ucP8W/+zQPMzJzhv/23P+L48XNUqxY5mSafW4dGntFIhC4Zxm7l\nKIkNGgqkNQvpVUDV2XAhZ4RISR2p6nSJGr6/hMRE0qDlK8QUBV2N4EiLQEpULYaQ0FRCBEGbFjo+\nffjE8IWFToFIbAvxvgxePYdlLZIvVhmmynWZHtrtNr7Q8LwGi+0ipSWH5578O8a3jTM1dSMXTh6h\n3w/44hMzlCoa6WCUlrOMrBaZ6moQOn+ema9/nbmZGT72qU/9TNw5v5ec+5PCMAykdF91LJFIMDg8\nxOrqAseOF9E0H89bIRJ5ln37rqENbJ/o4+z8AhFNUHZNGl6AlB6DVos0Og+9eAq7YpKMxbADn3Yg\niYT6aNstIiJKwRGUmiFU0Y3rrzGERAuK1F2VNILdBJzlu4ToQUNHp4JKmSukQBKwhE3eN3l31xAN\nisx7NUq+S0nC+LZtzJ9ZRMomuuFhe4KIMUU4Wuedd7+b/fv3Yy6fw9B9omEbVVFxPYtsNEy9WiUI\nwnie96bjtm/frVSrdY4fP4CiJAmCFlNTWd7//jd/mn278OyzzzM76zA+3gmwW1w8w4EDyxw+nGfP\nnuT92G0AACAASURBVOsIgiPcdtsO7r77XT9UEGSr1eJv//arrKxYQBxo0du7n49//ENvuNWrqio3\n3XQln/3s1zFNjSCQNBobwDDRqEmrdZ5YTEfTWrRaIep1n2QyTbF4jiDoR4hehKhTLObYvXuC0dEw\n8/MrpNPbqdcr+H6VoaHbEaLJlVdu5dEvfhFLqyEdjzowQSdxtkYnAnMQOI/gtmu2YQlBzpunXD5D\nsbjG9PQ06XSWcjmHaSaJxzXAx/Mc6vUmQSDp7e2l3V4jmcwgZZGJiSm6u6GZmyN98al2x9AQR2dn\n6fJ9/GqVl8+d41yphOG6RDc3aeXzPH7hAtM7d9JKpdi2bRsLCwu0FhfZ8wpDvuGeHlpra286J78g\nI28DvqcXuZz48Ic7NvE/r2Sk2Wzyd5/7HGxs0FUq4VkWM2fOkN6+nVHDQAKNVosTs/OUXYfjx4+z\nuJhneXmDVCrG+fNL7N79fjRN5Zobb+TooUNsboZo5PLsuX6KD//HX+Pd73kPjz/+FF/96kHm5mKM\njT1ANltgaelxMnqZvliIbCyO6/rIZpmk1QTNI6WnCcdVHK2HkaZCSJWoQoF2kzXLJqoETGZCLDZn\ncYIdOGoXtmeiijZhBWL00QzqCCmpYSFIE2ENBROpxAmEiufWkH439eYSaXcTpxkQkg5zeKQjKYyQ\nDoFgAJf5ZpHc6RfZtus2lpbCPH1gldzicUbjO7FrNfKKR8QvMxnup1K1uWFwkEqtRrRa5cTx49xy\n6z99x9Z8Ps+JI0co5/MMjI5y1XXXkclkmJycJBR66lXJu77vcebMITKZXsbGvu9RUqsVOXToJYa3\nbSPbbIK2wdl6jJDSg+3baDQIWT5Nr0ZghQnaTdasAzScHuKiiygtXHeJpCoou1GCII0SzLIVm0Fi\nCEyissUskhiwEwtJlQgaYUUnCAIqqsIIMOe0GE/2kLNaFNwmG45KLJahX3OZn53DllOM9kzTbBQJ\nC4nrBNR9OH78BbLZaylshPB9n3x5jZCxQTY5RjqeJl+/wNTUrZcyeV4Jz/M69uKtFt3d3Xzwg+/n\nzjvLlMtlEonET0Wz8dPC4cMvMTDQ2YdutWqcOPESvb23Uq+vkUoNkEhs5dlnX2DbtgkmJyff8vMe\ne+wp1td1xsa+r0vK5Rb42te+zSc+8cDr/kwQBCwuruH7EcrlFqbp0Gw2cZz9ZLPXMzCwHdetkcud\nQVV3UCg0MQwPRWmgKD34vo2m2eh6k8nJqxgbS3LXXRkOHXoZx0kyM1NjdfUsmUyaI0deZrNaQLZX\n0IAkYANFOtTJp6MfQY3RExpktrZG73CaHdndHDnisLBgUSqtEI9LcrnD3H33AwwMDHHs2BlKpRrt\n9jk2cyGGsn2kkEjfpKdnBChihMMsLS1Ry+c7lcieHgqtFovtNqlolHdu3Up/LMa548fRgoCE4/D0\nyy/zh//rfxGJRCiVSiRfp3ur+y0qbL8gI28DHn8c/uAPLu81PvShTl6N4/x8eo4cPnCASKnEjslJ\nvFyOUK3GlZkMJ+bn6d69m6fn1zhTj9COhjhX2OCZM5/lttvuZWzsVjY31zl58gCp1CoTE1uQUuJ7\nHtlojHCQYTKd5vyxY0xu3cr+/Sfx/R7icZV6vUKjVKSUs8l6PmGpIM0NcEziioGmm/iBhdmyqdsR\nkA0GYhHWGnXafohQEMOTgpmWzc5EArfHxy5dIGZsoVRpE1VMol6A4UXxWcKlgiEGSMtFetGJqjYR\n0aLqedQrPlY1QOKj4qEKA1Uo6L5CwzaJBzZbkxHyUpK3BTekxthcmGF6+gY02YVj9qLGHLKahi8t\npGXhJaIIVUdRFMxmk+2pFMvnz/+TJyOzs7M8/Dd/w6CmkY3FWF9a4tShQ9z/7/4d/f39fPSjv8QX\nv/gtisUoQmhIWSEadbjiilf/XalUN8vL81x1zx4W1os4eoim72AoRbqEIKX2st6cIxNYbA8p6Gjk\nnBongwomIYQdIalEqVo2LTeOGThMEBBCIPHQUfER9BJQAjJCJy49dMAMXGoElAKVYTVMUpi0cRG2\nRcSP0a+GaPthLD+B726iBAbF/AK9oSiaFicUjrBcr7C6tMnuq28lbkCrVcf0YnjeJqpiUG+dYfsV\nBh/60D2vqRaUSiU+//mvUi4LoONKunNnP/ff/4HXWKX/rCGlxHVdVLWzXOXzK0AWTQshRKfdW1FU\nYrFhTpyYeUsy4jgOx49fYHDwllcd7+sbZ3b2APPz8xw9+hIzM/PE4xFuvvkaxsZGOHjwEI8//gKt\nFhQKVUxTQ4gIphkwNFSm2TyP6wboepZMJko+/xS23Y1hjCIlCLFBNpslk9lLrdZEVft573vv4oMf\n/AAnTpzgT/7kc9RqAdFoN6urq2xUVhmghQ/00amMAGwAeTq6kVCki0XHpqz2E6kPk0oZTE6+D0WJ\ns7Z2nqmpPnbvHmZl5RCRyPV0dzsIsYrVbHHD+HX0ZTrVSMtxOHX4KT752+/jW88/SXNzkysNg+5Q\niEouR83z2HbnnUQTCa6enERVOsnDtWoVoShk6vVLItVkMklbyteMe63VetN5+QUZucyo1ToJu5f7\n/3t/f8dz5Nln4V3vurzX+lng7PHjXH1xn2t61y6OPvcc0WQSt1jkmzNnWTYH2b7t+k48erKXcGQH\n58/nGBvbSk/PID09A5w8eZrR0RFmTp0i6brEsmncVBc3bd/ORqnEQ1/+MtCL53kU1ucx2g0yoQgZ\nqdNurJAIAnqNGJoEU7jkfQtH0Rgf2k3ebOE01ik3Wuiih4QRQpeSth/DkU1qnkEm2kUiqaH6TZLh\nGm0zQJVRfHwEDjHqWDKgnxQRFboVMAKVrCo57dfpkQmW8UkSRhM2lqoRkmC7JmlFwyTCBcehN7WF\nVCzFRjHPN77xNfx6Cz+IcHYjT69IoEiTSODiC0GrWWVtDcIjI7Qti/jrPEG/Hmq1Gi+fOkWlUKBv\nZISdu3YRjUYv4x3QQRAEPP7QQ+zOZEhfDCPLJpPENjd55rHHeODjH2dycpL/8l/+PQsLCziOQyqV\n4nOf+3ukfLVlfgeChx9+gmYzy/QV76I+1M38zFHqzQIl0aBX08jIFF2Kw7plEkNhFyEKhAn7Fg2/\ngYFGwW+iECeKgo2GiYOKQEUSBZaBuJTYgMCni46d94YiOO871AKbLXUPjX4UIsSlwHQtTvsenlAQ\nFIh43WgxHU1TsR0Ty26iY3DqyH5iZoXheDfoOkv1Apo4SqZ7nDvedROjo682ipNS8uUvfwPLGmBs\nbPjSsdOnT3LgwEHuvHPfZZ/HHwVCCHbv3srMzDIDAxMEgY8QCq5ro2n+JZ2Toqi47ptvR0FHKyKl\nuJTx9MrrmKbDZz/790Qi2+jpuQnHsfj0p7+C6zrE40McPVqmWNwkk9lBJDKMZdm0Wh5Hjpxk377f\noKcnS6mU48KFOZLJDKbZQtO66ZCnQWIxg1hMo1xepVg0WF5eJpcr8PDDT6AovQwNhdjYeJFcbgmD\nEjYdErIL0AEViNEhInnA0g3KJKjUfEqVVTY2alx11YcJhaJs2bILz9vkhhveyaOPLlAuzxKJ9NLT\nM8HG7AUa7VXikTCqolJtFhhNtSjlNpjq68O3bVYKBRYbDdwgoBaN8sC/+lecOnAA3/dRO1kHZLq6\nOvqsVuuSyd7k5CRPZzKsbG4y0tOJzag2m6y/xVbhZSUjQog/Ba4Djv1ggq/oUPXjwP8jpfzLy/l7\n/Czx9NNw880QDr/1uT8p7r0XvvnNn08yomoa/kXDq1QqxZ477mB5cRHD9zlXDegamsaORBiZmqD8\n0lG6uoYplxep1WpkMhmuvPJannzy2xw//iJHvnsCXXoEcoP7bh1DEYKh7m5OzMxgG2Hi8QxOeY2R\n7hFAoCstPCQhBYRvdSyrg4C8tGgQodquo5g2JauGFYQp4qJqkr6IThC4BKKLiifoc2ysdp1SrQ9N\nDhIOLBxKeDTRiBAljUIdHRfV3yCE2jETkD5RwghpMIaFQhxdRmlSRioSn4BmJMxSNMqOkRHadZ1i\ns029pVGTHq1CDdfuNAqnEoPE/TCb7SJGZY0d3SobBYeoplFOJvnkxz72lnOxsrLCP/7VX9Ht+yTC\nYU4fP86R/fv515/8JF1dXZf1PiiVSvj1OulX2FMDDHV3s//CBRzHwTAMwuEwge9z8LHHaJdKnHr+\nEBuVpxgZvpbs4BDTO3eh6x1fiEolzfDwNKdOnaG7p5/l2AB1S0HRNuiP9OJVKwSKgiMhQxzvYvWj\niz5Cap2mrpGw1vBQKdNGI0IFFR+T1MX8kDLQRacq0k3HOXMJwYiqcNZtU5IhHD/McDRCygujS4Fl\n13GECWICN9ikhI6wQsQUl7bbQBMFNHcC3/PQiVCzbUKyQVq36Y0nmNh7B74fpdFovEqEWigUWF9v\nXrJHbzQaeJ5Pb+8UBw+e/CdHRgDe8Y5bmZ39MqurDqFQmEZjjiDw2LNn56VFsF5fY9eut37qi0Qi\njI52Uy7n6er6/lZUvV6mVssxOLiH/v5xoLOVV60aQDdDQ4O023Po+o3k86eJx6MYRgYpe7Htl8nn\nF+jq6qe3d4R6vcjiYhHD8Gg0HCzrLJqWwnH6aTQCYrEShw9vcPZsDtdVWF5uEYlk8P11qlULv+mQ\nwSagI1qVdDppfDri1QjQQBCWEIvtotHwsCyLcuFZjj7/bXp7+4hnB0ilFZaWltnctLn99vtJJNIs\nLi6xpa+EIpbQ1XkCCTfu6GYgey0HZ2e5YWKCrl27yG9sUGu16MpmkZqGb9vsvOEGnnzoIc6fPcvq\n0hKu55HOZJi65ZZLDyO6rnP/r/0a33rwQQ4sL6MJgYjHee+v/ir/xx/+4RvOy2UjI0KIa4GYlPJ2\nIcT/FEJcL6U88opT7gUKdMb55xZvh17ke7j3Xrjvvk6q70/bz+RnjV179jD72GNceVEUFY/HGRwf\n57qBAcbsMNnsHkKhMEEQcO78STzPQghxybFzaGiSoSGNc+eeo9EukTSiQIpvPL+OaR/k/bfsJRmP\nk8hGOHWySDaq0WxV0LQQjrNGVhUUZUBTumgI8nj0IsgEHiulWRpBQE16eHSRYoiIjLDUblH3l+kz\nohhWnWY5h+n14mMToGMQQyfAZp3thMmicgGbLjZQ0IkHHkiHlpRItQvhe4CBSQILn3ZQJRAORaDq\n+Uw7KuvrDUrNJuHYJG46hdMywWwxGArQZRNbXqAgQ7RxMJU63Ykhhnt7McNhFHjLdkcpJY8++CDb\no1G6L1ZRhoCFXI5nHnuMX/nwhy/XLQCApmn4P1ACdhyH9fV11nI5VlZWmJiYYG5uji/+j/8bvdJg\nfW6ObYFL2NqksiGJSYfvzJ9m13Uj3HnnNbzwQgnDCKOqCi+9dAi/tUqvKNBub5KzLDLCYdP2UYWB\nFBG8oI1BGIGO4hvU/RxXI2liUMRAwQQETSQ1YAPBAJIZYJQOv6wDFgElB1pMopJAlQbLlkeIEr1B\ngINAlSGywkBVMlhBnU1zDUfx6YmGGdC7mDUrdIWytK0lNGudRKAwEBXEwxkqhXMMjV6H7/uvGS9F\n0Wm1Whw9eopy2UQIFVV16esrIuXrO9r+uFhdXWX//sMsLXVs5vft2/Mj28xns1l+8zd/lWPHTrCw\nsI4Qw5TLEl332dxcY3b2u/h+na9/3eHChUVuu23vmwqi77nnHXzucw+yttYgkcjSalXwvHX6+7vJ\nZgcunbe2toSuD+D7Al3X8DyLRsPEddNAnng8ghA2sVgfKyv7SSZVwGdj4zDtdgkpMziOQTg8gG23\nqNePYVlt4vE+stlfZn7+ENnsTnp6JMvLZwiHt+O1N1DswwzgYdNZIJt08mhUOtqRCmCjcU1mFKGp\nOE6eoFpgONlNw7KJeyYbM4/STMHyadCSaSqVGoqiE4/H0SIpIvSx7+otDFzclju3tkbf+Di1YpG2\naVK3bTLd3Qz197OQzxOJxYjEYjy1fz9b6nVu9H08ITi7vs65o0f50l/8BR/9jd8gFouRzWb52K//\nOuVyGc/zyGazbxnIeDkrIzcC37n4+gngJuCVZOTDwJd5vVSknyM8/jg8+ODbc60rr+y0Ec/MwOt0\n8P2zxp69e1mem+OF2Vm6dB3T86gZBr/88Y9z6tQMR4+uMDS0FUVRmJzcyqlTM0QiCVKpFFJK1tbO\nEQ6Hufvu+/jHB78NLYOuZBeuZ/OtwwepN56i/8rt3HHjlaysPEI+WqTdstgsbqKFXNLxDMPNJmFV\n5Uyrxe16BK9tsoyCEUjSCFqim4jsp0FA1XcQIkmYacrOIo6TYxMFHx2BgWSTJg4BYRKkieBiEhBD\nQ+JjKB5tPJJCUg/AC3wkIaooaEgkNfoQJKRABxbaNhkdRlM9CE3hXHkWixBqy0R6JWJ+kelInGRS\n4eVqHndsgi2jfdz/jmuRUtKTTnPm4mL+ZgtFqVTCLpXo/oHS/2hvL8+dPo3rupfVrTOTyZAdH2c5\nl2O0t5dSqcThwydZKrWxBqf4y798lF27+jl99Ls05oukolliQZiknmRKVrngr2OVm4TNFrW17awu\nJZmZOc3i4jrr6xuI1gwTQkMTKlo0xqZTQQibOgoKITxVkJOQlRFCqk7ed+gXkm402tInjUMbH5MA\nDwMfHQuPHA5bUIgBOoIYkjbgkiVGmBoeURnG9gwco5eyt0IIBYUu1ECgoJMhSkwo6OoKw0oPZjhK\nKqHRKB2lyykwhoLug6KmifguTmEVTbviNeLVvr4+NM3iuecO4ftZuro6LbGVyhKFQpXl5eXXzTL5\ncbC4uMjnPvc1IpEtpFLXUCrVLtnMAzzxxNOsrOTp6+vi+uuvflNztWQyyR133M4dd3RI8fz8PCdO\nzHD06HGEMNi16x6i0QSnTq3z8stf4lOf+sgbEpKhoSF+67c+yosvHmd1Nc/0dBd79nyYb37zCc6c\nOUckEiMeT+N5Hoqi4fs2oVAIwwDHKaAo4Pstms1FhOiE+6lqnGZziaWll3GcDK47QDh8NZ5Xpd0u\nEgr1EInswHWXmJ3dZG3tK0gZp1TKkck4tNsuzWYNzZ0nQ4UkPhadqlovnaoIQBvBIgq+0k/DdzAs\nC8Uv0xuWpJMD1M1jrK29xKRmEDF16q0yipnl8Df/lszgToa3bSPc3U1hfoG2ZREEAaubmxR1nT03\n38yf/97vscMwSIfDzM3OciIUIj01xbt37uTP//iPuam3FwVIaBqapjERCvGdSoXK+fMcO3KE2/Z9\nv7L2o1RKLycZSQPzF1/XgEvLoxDiLuAZOuP7c6tbWViARgN+wEjyskEIeP/7O1s1P29kJBQK8cDH\nP87CwgJrKyvEEwmmt28nHo+TyWQ4f/5LLC29RDLZSzweI5MpkU4HXLhwkMXFCwjRptHwEWKedNcE\nNdmkadoYqkqrHeZbR5a4JbmdF//4H9jcPEd1c40tvVeyfXIPKxurzC/sR/V9ru/vp8f3odWiFvgE\nSMIoCCWMDJJE8AjQKQOB9Gjjo1KmF5sJ+qhjs0IRm2kEDTw28LFwL37txsdEkA8cFBEgpSQMKLJO\nR9YaJkKNrUgiqAhCdAmXiIgy65qE43G8SIJmPsBqLLEr3QOKhhEMs+7kaLR9MulBnFQfw71phi/u\n6f6wEEK8cSnzbSrHvfe++/jq5z9Pfm6OU989iallUMd2snfvPRhGmGPHDnPk6UO8d+te6tUCEUUh\nHIriui7B5hx7rxkiE+tnxnZ45O+eoGB3oRoW+bwgGmj0dsVpVdcIazqOHGC5vYAtXTQ0EsInFRpA\n98ENfGq06EajIiGOQpQwHgHrNGgRkESjSogmghIRwGIUiUJAFZUmGRyySAJWRZUuqWE7PjY2PiFC\nRC+eHcbGxQw86o7Dml9D2AHj268nUZ1jhybQAw+kjtZuc8FsMNabYXyw6zVVDsMwuOaaSR577B/o\n6dmDbTcwzRKqWuCKK27l0UefZKS/i/zKCj2Dg1x3001v6uHxZnj00f0kEtNkMr0Xrx0mEknwyCPP\nAfDccxvE41mWliocPvwlPvGJ9zMxMfFmHwl07sPJyUm6u7s5dmyWG264+ZLAdWBggo0NeO65w9x3\n3y+94Wdks1nuvvv7e9rFYpHZ2QUOHVoimZxAiLOEwy3KZZ/+/nGq1ToTEzsplY7geU10fYggMDDN\nlzAMSTq9k2i0m3Z7FdfNoigGrZbA88JImaDdnkNVRwiCJFKCaQoMw8D3MzSbDTxvjbD3NKkgRzcS\nBXmpKvICCp2NtoACghyjpGPjVEWFO64YISoabEunKZUqFFsO10Q1JrLdXFheYHy0G9frYs1v0htW\nyJ09y9g1V+N4KeY9j5W1Nca2b+dfv+tdPPLgg9y5ezeVhQUCzyMhBOVqlUh3Nz09PSzNzHB1OIwX\nCpF+hQVAVgg8z2NhZuZVZORHweUkAjU6HUnQaY+uvuJ7nwQ+Rqc68ob4/d///Uuv77jjDu64446f\n6i94ufH44x39xut0OV02vPe98Cd/Av/1v75913y7oKoqU1NTTE1Nvep4KpXiP/yHj3Py5EvMza3Q\n1dXFpz71f1Gr1fjMZ77I9PQNDA1t5ZFHvsILL5ymv387k9PT5HLzbK6dpmGukR3YwbHDB8lIg0ar\nTKGRwLLqRCMm6XQ/WmIHs2YFc32dwDQRUuKLzhOuED4ubWzaFBnAJ4mFhodEp0APKlEygMRGo4st\nlDEQF3svbM5iUUaQwRIavfgkpI0rEwRalIrXpIBLHRWLBJOsIAjwFZ06kogQ9AqdZc/CiIyxY2yM\nldIJLDeEa7XxHIeG10IRKoofIh6NUjM32bPjqktj2LIsmpr2qqjw10O5XOb4hSXmDh5juLeb6ekt\nDA0NsZDLMXXllW9Lhkk2m+WTv/M7PPPMM7xQEExN3UQ2O3BJkBiNZmnYGm3HQjfCNC5u69imQ9OT\npGMxcs0m5+sB20beQbjeRM2kWFnywE9TMDfYNjTI4lKFwIeMPsGs4mBZkqoT0NJsisKjiYmNT01K\nhlEwCKPREagawAohPIZxCBEmoEGODAnyNPGoUmcQm2EMEggULJkixwUcJAEBCXx0FDzCCDw8JE1c\nfLEbJ8iiyzSLZ1/gCtWh7fvEtSSaqqEYIfo0l3AsRDabpVAocOH8eQAmJicZGBhgbGyUa6/dhW2b\nNJtFJia62bLlLsrlPM997Uvcd/MNbEkkKJ8+zVeOHeOeT3ziR95asW2btbUSo6O7XnU8HI6yudlZ\neoaGpoFOZ1OjkeWhh77D7/7uv79kWvdWyOfzKErqEhH5/j0yyLlzJ4BOFaXRaKCq6ht66Egp+cpX\nHiaR2M1tt01z+vQCUmbJ50vEYjkGB0e5cGGeUCjNyIjK0tIivt9E03R03cUwBmg0FjAMD9dVkLIP\n2z6HlN/rTGoC4/h+L6qq4vuTCJHD93OEQmUsKw3OOQxiSMawsClRwadIjQRxMixjU8HCQyUkrkGN\nW2y9apCxsX6apSKubaLFHLYNpbh5oBvhOKyvq+wYG+bMUh6tEVBrlAhpEc6cepz/8/d+jVtuuYkg\nCFBVlXK5TH1jg1t378beto1yqQRCcG06zYvFIrZto8dizC8vEzNNYpEIuqJ0th2lJKzrhC+Kyn8c\nXE4ycgj4deCrwDuBv37F97YBX6Oz3SyEEM9JKc//4Ae8koz8c8R3vtOpVLyd2LcPHngAmk34Ce6L\nf3aIRqPcdNON3HTTjZeOPfHEc/T1XUdPT6djYNeua1hZeYa1tQ3ajWWclReI1jdJKx612adxQgrb\ntr6blyyboWgWW9ost9u85+67mdy7l3/8/CqDMYfVjQ2GDAO3VmPOdYlISUUGuJjE0HGJIolj4KOw\nSA8BCdIUKOEzgIKGQRGFBlDFI84iJmklgittAlljhCghJQSE2BSCjFRRsVggShOVCmHUQEdQx8DD\npUYgNVbWX2J6yxbSEZd8vUjVddimxtAUhzqwaTpstjze/+EPULQsRD6P5XkUgoC7HnjgUsLn6+Hs\n2bN8/vPfZmjre1myDmBvVpldfZHhrcv0X3UVd9911+Wd5FdA13XGxsYYHJq6NL/fQzgcI5TqYslq\nMRFN4hhhqmaThUYZx9B5/ty5jr01CeaYoVSHeLtNWPcJ6gGu8GhcOIu0bUwRwdc1RsNZGlLBcQ0c\ntUVN+iipEerl0+Qw2XKxIjKLSh2dAiECBohd3GxJEaeIziLLxAhTJ4FOCgUbSRQNSYowJgkinCOG\nTwYfOMsmSQIGMFkBBonq00hVYrubaDJABi6+FsPQdOKZNP0Dg6y2apzNV8iXy3zp05+m5+LifsT3\n2X3nnUxfcQXJZIjx8ZtfNXYHH/8CNw/0suWix0gyFiPVaPDkN77B1H/6Tz+SlkTTtNe1mZdSIqXz\nmvMTiQwrKy6VSuWHbi8Oh8MEgf2a45bVIpmMsbq6yte//jgbG3UgYHp6iHvvves1xmbFYpH19Qaj\no7vp6YGRkWHq9TpBsA3PO8stt+zmD/7gz6hefKQeHNyOoqSoVApYlksQtGi1KihKG99vIETzovbm\ne39rgo5DSKnTjaIaSJkGGsAatn0BDYU4XYTw0Qiw0XGIE8cEwnh0YTMAKASGx+Cwxu///n/m7770\nNebmDtHMr7N3Sw/Dgz20bJtGpUI4mSQTj3PdthDN+SW8UJ7uZJy+niQDA3202+1LBE1KeUkzEQqF\nGLhYDQuCAN/z+Me//3tiqspssciI41Cp10kYBjnLYikcZqTV4t49e36oeXs9XDYyIqU8LoSwhBDP\nAsellEeEEH8mpfwdKeU1AEKIjwPq6xGRf+7wfXjqKfizP3t7rxuPww03wDPPwC+9cYXyXwTOn19m\ncPC2S+8nJ3dx7bUrfOPrX4NckzEJEVWiyAhpvwSexkurR2i6HhGnSH9YI3d+gfktXcQSw/RlBggi\nVZxcjgONBlHfZ0VGaZPGJkSAgskiLaoIugjRRCHAwkAhQEWn0/vSoBsbBQUJ2MRpU0YRvbik4vmI\nzAAAIABJREFUOSlNSkhCgYMfSCL0EBc6NbmMjksNjSYQx6QbBUGEFSxEEEavbrCcn8VylugSJpPJ\nLQivSVRPEBMqllUhe9VePvmpT9Fut1manaUnmeQ9u3a96QIgpeTRR5+lp2cXiUSGvr5RchvzNCqb\n5NUCv/tv/y3xt5n9Dg0NoarN1yx2plnn5pt3YrbTzBdWqUZSrKzMUjQbjBseKT/O7vFxzqzWWZo7\niaenGRgYpCo8Cp5FsbVJIF0axDBlmIptMiI9Ro04OVnBFxBTNMr1BeJCEMgYZ3EpEwGGMQmQNIER\nipTpQhLFR7loedbAQCWOJEKaNjY2gjgeCgF1pjCJkSZHmG5sQuRZZZMAlQAD2/eIGSnUIM9QZpxW\nrcZIIgmKj2W3yZULvGy38AYmOfP889x/443oF9uZPd/n0BNPYDoOzc0ZvnPyGFO79zEyMk0+v4Tf\nXOXad73adTWTSOAsL1OtVl/lavxWUFWVW265iieeeK3N/I4dw685v0NSgtdYsr8ZhoeH6e3V2Nxc\nvURKfd+jWLzAPfdcyV/+5T8QDm9jdPTKizqTBT796c9xzz13kEql2LJlC5qmYVkWa2sFzp3bj2U5\n9Pd3Mz09STwe58yZw3zrW4fYuvVmzpwpkc97eF4Zy7qAEGOAjuf5OM4wrZaB78/RkZn2IEQeKXN0\npKdNoNNxIsQGkUgPuh4jkxmgUDiI18wQx0fBw6TEEJIwKVw8XCQNSkiGEWhoxiLvfvevUC4U2KLa\n3PWRu1nJ5Th28iQzCwuckZJbpqfpKpcJpKRqmniGSrK5yQsvv8jAyAjf+sxn8ONx9r7nPWyZnKRU\nKuEYBvlymb5XaD2WCwWCcJjmmTN8dN8+njYMnnnySezNzY5/TlcXN2/Zgua65NfXmZ6e/qHn75W4\nrHqNH2znlVL+zg+8/5vLef2fJY4cgaEh+DG3Wn8i3HVXpyrzL52MpFIxTLNFLJak2aySzy2iSOhN\n2gyZbbplhMDzaLUrZGSA7qtcKC+yJdqFBih+lF5bcP7Jxzjb8OkWeSKhEFl0MhGV/ZZHiwlahNGQ\nOKQxKePTJEGSND2Y6LgUMPCJESApECaCgoKggUEESY00Pqt+BYs0DiPkUYnio9EmTBhLSsoECMI0\n6WKOAkO4WHiYqBTpRZE2hl3jxMlv0NUVJ9lQSMgQrh7FjScQQrJ7yzYKro2maa+b1/FGsCyLUqnF\n6GhnMQqFIoyN78QeMFlePky73X7byUgsFuOee27loYeeJxIZJhyOUSot0dXl8pu/+Zs88siTfOc7\na+T8JNGxG4jJIyjlPOVSjYTvU96skZHdlO0Cq4tzqI0mil+nIR2ajKGTxEFHJ6DuOKQ0jyF0THcN\nQ8SwPBsFFRcHExUYJUCjD4sKoGLgEadNhU08mvgodKOi4rOCS4QGEbox0fCwkeiU6SeEj0qn3yZB\nBB0VF1BIIan5BeqOyoCu0p3qZlXpoRXSiYWibFQKzBUr6GM347ZMcjOLXMh0sWPHdoQQKEJQvXCB\nJ+bnueuaa1hgkRef/wLz2T7ec+970a/fRfgHgvGCIMCnozX5UXH77bdQLtc4ceJ7NvNtJiYyfOAD\n733NuZuby2zZ0v26brFvBEVR+OhHf4UvfOEfWVpaRwgDaPDOd16JZbn4fu8lvUoQBKyt1Xn55fNs\nbLgXDcie4GMf+yBHjpxgbm6BbHaMZDJNoVAll3uRsbEEBw7sp1RK4DhRWq087fYmUnbTIRclVBVc\n10DKMTqVDh04AyQIgs7jBjjAFBBCCAdV9YE1dF0nGtUIhxUqTXmxhdemC480Bk3AQyGJTh8eedYJ\n1CS9XSEMNeDo00+zb2wMTVXJTE2xY3yc+fV1TpsmJBLkTpzg2MsvU2u1iEjJy+UyAxd1aCtS8o73\nvY+//aM/Ij04yNaeHpx6nQcXFrjliivoTiapmCZmMknY95lIJBBCcOdNNyFrNbxqlcVmk+v27uWK\nyUk0Xee7+/ez56ab3rTC+kb4uRWP/qzxdrb0/iDuuqsTnvcvHbfeeh0PPXQEVYmxcnI/PQisxTmy\nbg3sJhndoeX6hBVB4Emark1EBERtE0tzsR0VRYtiNG2iso7u+XRFE1RdnYX2BgFZIsRQiVBHx0dD\nMgHMYSPp+KWatPHI4WPQAGqYCMKkCaHgYBOjSBcRCjRpEEWwmzZFNFxUdDZYwKVJmyFUJhF4NBhl\nmU001gkTIqVOYnEeQ28QjQTcdeutPPLY87zcLGMoKkmnwp5r9qAY0IhaP7LVt2EYGIZyqQphmk3O\nnXqO+vocrcYKD/6N5J777/+pdWH8sNiz53oGBvp4+ukDPPfc83ieAvTyyCNPsWfPVZw8ucDOnbcx\nPztD9aXv0KcKSsUS+WIeD4kqmoQTPVhBnYa7QVhWUEQvEWOCtuuhSUlVmhhkWTcvMKkFRGjj+hoK\nEXwCemiycpEwRPGIAmUkDg0sokg8mlhIdqBSI4aBYIgWR2njUMYgdNHgO0mTNiFUXCQ6EQQGUVQs\nkrQJU8NGwRJtdOHiW2UGkiokB8nZCsueh9s1hKFGaVYXKLYCvv3tF3Ecm6uuuopCoYBdLLJl1y4G\nursZ6O5m73XX8sLiIvv27eVsV4zZ48fZPvz9ysXcxgajV1zxY2UW6brOhz70y9x5Z/GSzfzAwPdb\nZ5eWXkSIBEHQortbct99P3q0eTab5bd/+5Osra1hmiZ9fX2kUin+9//+CvH49ys5i4tLrKw0yGS2\nk0ymGRu7ilJpg89+9ku023D77b/E0aMn8P1hDCNGLlfh9Omv4nnDSDlFu12k3e5DyhSwRKfZ1sL3\nAzr9Gg2+7wYyefF1m45F2RSKUiEIkoTDKlI2aDaP0W77VCoaqlsgIMsGfWh4pOm0f7epk8TARhBB\nRcMlmbS5adsoseImC4uLqK8IjdQ1jW0jIywtLNA7Nka1UMB1XRYPHsQHMpbFuK4jazU26nW+vLmJ\nEgRsLC0hJyfZPj1N386d5HSdvh07MEwTzXU5fvAgW8fHIR7H930Cx2Egm2W21WJxY4NQNMrU8DAh\n36darf5YUQK/ICOXCY891rFn/1ng6quhXIalJXib14afOizLwjRNkhcD8d4I+Xyew4ePsrpaYHi4\nl717r+OGG65jeXmFL/zZ/8uOaDe6KsnGPRKNCC82GlxoNrG8EKrQiCIoCxMVgRcEqEocPWzgSQtX\nakRlkhZp5lp5ojLCphujiYaGRhONgH4UXFxsQMEjoMxZ4jRRLxINAwOHEElKhKkigCRhUqSp0MbB\nJQU4NGmhUqOfFgY+SaCMzjgAgjohYgT0IokQZgMryKNHoIFCJJ1lbl0QTV9HyvCwnIBNc4XTcy/i\nKTa3ffwjVKvVH8n2W1VVbr31ah5/fIaRkV2cPPQw3a0aXUjGd00zAnztr/6Kj/zWb9HzI3bo/KTo\n6ekhn68yOnoLvb2dG35jY43PfOZvSSa3EY0mWDryNIbvYTdLjAuBisKG9HGkSd7KcUMmy3PFVUKB\noC2j6MIjYuiYdoMWoFInLOuIoEnIt6gIjxiDNC52N3RjUMQGwrQQpEmSZwOTFDoSSQwNF4NeHGpE\nMJAMo3KWOD59qPQSEAEKmFQAD0GOCgFgoTGODmzQFFW6koJoJGAwYzIWDXG0OM9y2WOt7uCaPsGG\nRiKk8bJbYDAeZfNb+/naoZfx/QDpNtj+inRmRVHoi0RYuPD/s/emQZad9Znn7z3r3febW+VWu0ol\nFdoXEAKhFrLEFtjY4MENAxh3dEe3OzpiImYc/aGZDia6JyY6/KG7HeC2GzzYjWkYjBAGraBdaKtN\nqj2rsirXm3dfz37OOx/ORVAWGCQklRTBE5ERmefeuPnmPSfved7///k/zxned+ed/H/NJj9eXiaj\nKFhSkpid5QMf+tCvdY4qlcrPHbP9zGf+Cc1mi0Ihz86dO1+zAFpRlFeIr2dmqpw9u0mhEF+PS0ur\n5HLT9PtnyGTi66RcnuaFF54lkSizf/9ustkcS0vHOXfuMI3GOs3miERCYzDYJIqSYyKiEOs/KsAs\ncJyYnFxHnB6zQOyT2uanqTImiiLIZnu47gjPG6HrCySTU/i+TegfJY9DwDlMYIRLDoUSFhYqGsTi\nZfrctP86PnnHjRiaxtEjR+Jxe+DFs2vU230KaY26ElEMQ3bm85zv9aioKuFwSElKKpqGFQQ0XY/G\n8jJXTk+TKxTYbxicOXyY6b17MaKIHz74IJ0jR5jQdZabTf7miSc4cP31jCyLQ6dPE7kuVV1ndmqK\n2osvcurMGTLbt7/mCulvyMgbgF4PjhyBW2+9NL9fUeKqzAMPwOc/f2nW8OvCdV0evv9+Tj3/PJqU\niFSKd991F++46qpXPHd5eZn//t/vQddnyWTmuO++E/zZn32DPXu2USrluGHvPLuqVVabHQ4vn6DT\nqjOyLCQKk0S4MuIUGkM1S0JYZJDoCFKKSbNnM3AdWkLBk4J2GJFkgINPBKQRBBTQMJGk0WnGpqkM\nCdlEwUEjQ4hGCxWFaVxcKgwpI+gzoseALhZ7SaJh4NGnh80KAoMKGjoRKSJcEgzIEzBkAGQYYdOj\nxpS0SdnQ0XVSoxQThd2kEz7Ly+dQej2ivsGFYZuPvO86dgQBf/Nf/yu/+/nPX7RL/WW49dZ3MRyO\nuO++e3HWjmHmMszOlrjyysvRdJ2p0YhDzz3H+9/ktMYTJ07S6yWYn198+Vi1OsvSUp5a7SDHX3oB\nv7kKzpARko5UGaAxRKUJ1D2NR08ss4CLH0XU6LDhdAlUjUD4aDJihA16wJnQZQ6F7dJgwBoWEp8c\nLQQWm8A8BXIo47OWZYsAgUISkzQSE0EamxGQpIDOZZgEeEg0LGxsfGpozJCggAcE5HFoI5lCYyYr\n+MgHdlJIpzl94gSPHzuB5sCiaSLUkHrUJeMLJqWCgkK9scS6mmOmO0lo6oyEwY8On2OqVMIPQ3RN\nwwsCzGSSVCrFH/zhH7Iy1ojk83nm5+d/5emWV4ufNx33euHaa6/iqadepN3OUypN4boujlMjk/HI\nZAqEYYCqaphmiiCwACgUJgiCo0hZod9vEAQZej0D33eIh7IKxCRkCxgRVz62AceIaxld4LLx8SSx\nVmQT2EDKPFG0iOc1UJRpVHULyxKEgUuGKTIc4vKxeLU2/hsSgCCiS4MBUEqWSCXy/OCZY1y1a4Zs\nucy9P36WRj/BoAnCl6yNagzCFTaLGkXbJrAskv0+IylpAH3bQwoNM5QoEWy0OhSrVWzbZk8ux5Gl\nJdYsi8RwyAf27EERgssnJvhvjz3G6Ac/4H3XXkvVdRl2u8ht26hmMkwKwcGVFdxdu35DRt5K+OEP\nYwv4f9B6fVPx/vfHfiNvVzLy99/5Dv2jR3nn7CyaqrLWaPDl//D/cNm73s2tt97Evn37ME0TKSX3\n3vswudw+8vkKS0vnWF52MIzr2dqqA2WOPPcAz2carJ8bMGxIhmGRFBFZ8owQRIBKgCVLuOYETfcc\nRbtDB4O6G/tESOmQpkkeDw8TmMSmhsUKkiwBKSQdBMuopAlJ4uPQIk2PEVVmCGgjqQAKJ1mmzIAI\nA48We0hjYuPSp4pJBp0+W7iYeDSRCASrZCmSJ0KjTZ8uggvM06OIzkBm2FfJMvA8Ov0tcpkptESC\ntfV10kaCcmWWdx44wGS1SrrZ5JH77uP3P/OZX/mcaJrGhz98N9lskuNKnyt37ryodF/KZKhvbLze\nl8IvxdZWE8P46U4/iiJWVlY5fPgc6+vHmCpvR3c18oFknSzn0AhRxxkyEh0d6ejomQ7VREDNiTjP\nOjLcjmQCQZ2EsoUXOkiZJCRggE4DnRESFZNobP8esUKDDBkgjQPMUyNC4iARKAT4OIT0UWiRwQGy\nKCQJCKmjUGMahQIdsnTpUKXFDjqUBGwqKkZ5J4fPbCC8IUfOrpDpDSlqJTQzQzqKmPPbFJQsiiiS\n0EJwcxhoeAmThW2z2J0OR04NqbUeZHZiJ37gMKDL/zXu7QohWFhYeNNbbq83isUin/vc7/Dd7z7E\nysoZDOM8zeaAMKzy8MMPoeuwc+cOcjkN8Dh06GlMM8naWpdWqwvMoeshYZglth1bI27FxC6r8e1z\nHca1szjdJA3UiN0sOsSEZB+wSRi2sO0CUWQRRQ2k1Mavu06SDaYIiTDxyZBH0mDEBVwkKl3M+FFz\nF3PVm2n02vzp3z6NPTiOK9NE4Tzz5SqZTIY9E9M89ux53OY5Fkt5aq4LQUAGeEJK+kHEjGqghBFD\nBdpewJVrG2y6Lo6i0DVNzrZafPbAAZSx6LjvulxVLOKORrSiiGq1ypXz87xYq/Hs2hoTmQw79u2j\npWmv2cH3N2TkDcD998Odd17aNbz//fBv/k3syPoqxOlvCbRaLVaOHuWW+XmEEJxd3+CB589hu2Ue\ne3iVrS2dqakX+OxnP0EYhtTrI+bnK/h+wIkT5ygWF1FVnXZ7jSuumKDW19lYcZhyfSZJoAgDW6aR\n9PBR2BAZIjmJQMGy1nAweEn6iKGKSZI0AQKTNAkm2SCJx9LY6yPHWeqM8CnG+THkgQlc1vApoLOL\nkBpdOsTjfXk8hkgm6ZJDZ8AUPRYJcRD06SLJoqJSxMNinQQNFFQ2cOnRpE8BiQ70mKHJIhptBLqu\nk07pFBN5TiwdQle3kQxDqrrO3skJOt4Jjh08SPn225kpl3lkaekfdUyVUtJsNgnDkGq1+nKbbHZ2\nltP5/Cs0BO3hkIlL4LZXrZbwvNWX1/zCC0c4eXKNen1EtXoF7d6QhB8QkMBkG30sBIIFBBoRCi49\nHFZDk1ZooWBSIIuNxMeibE7gRzY5/zTTuEwqBuejEW0y7CZJFpUuCg3SrCEx8BjgjgeuBwTsRGMF\nmzOoFFBokaJNQBuPEBebEjouAW1KaGzHQKBgYGIwQqfOgKp0EVoKzxlw8ngDQ2qkXIvJMEDFJ7Q8\nZBQwJRXCyMaP0tjukISaoaIEbI3aeKM5hh6s1rt0hha6sYCRnWF6xw3ce+8P2blz52sSqr5VsW3b\nNv75P/80g8GA733vPr70pQcRYgfpdAXL6vHEE09w4ECGTGaB5eVjbGxs0Gz6GEaV3buv4cUX20i5\niqLMEoYp4jbMBjHRMIj/p03iSshPUmS648dyxG0cj3HGLkHwzPg5k4ShB5wGPAR1VEwSlEmg4CHQ\nyCHosEWIYJEOgn2ZDAPb5shLJ0kONBJqAV3JYVs+61tb7MxmqXe7pKVPWpiYisJUKsXaYEAxiojl\n6grroUsDCCOV96ZzFFEoaRpeGPLk2hqZqSmMn7lxtIdDJg2DdhCwa98+tk6fZqFUQisUGJZKvOu6\n61BUlR83m6/5XL3NblNvfUgZk5F/9a8u7Tqmp2F2Np7quemmS7uWV4t+v09GURBC4Pk+Pzx0lkJm\nP9WCwVK/z8LCVaysHOepp57hlltuRoiQKAqxrBFS6qiqPk72jPC8EUZ6nrW1s2QUiSc1hjLAIMuA\nAbYwMOUCEoNh6JJgiiEeAosZTHRCXKCApI/JOiaLJChjAzY6KlXquDgozNIlxGCdHCNsygSM0MkQ\nUUfiIsd74AIOKpINYiv5ISYmHtPYjLAZoSCQlGiQJ85M8IAOGUwKZNDG5X5Bgy0gTzEzja6F3PKu\n6zn5oxcolUqUM1mi9TWGUYurthWg32djY4PpmRlUXf+F5fdGo8E3v/n3rK/3URSNVCrit3/7Dvbu\n3cuOHTt4fGaGM+vr7JieRlUU6p0ONeD2669/sy6Tl3H55ft46KGnaTTWECLB0tIG58+fwXFaSLmD\nTCbHyFth5PaYDk1cGuwlT5YkFhZpEpSAF+wG80RItUIqzBIg6OETSYnwkyhk8WkxiFx6KCxgINBw\niQc2F9BpY9NmDpV4jM5jFZUlVBR0ljGwMXCwSVNkAZURTVpYuIxw8JnDw0RDRccEhuhk6WESCZ8w\nCrFr57CEyj5zmpbjUCGiF1q0hxYOGrqeJAp8FNVHR8P1XZQQtHQaqScYWTYlVSUlA0p0sVRBt5fk\n8D0nWF/f4q67buW2296NaZpv+rl8o2AYBidPrvGhD32c1dUa9XqbiQmTUulqzpw5zCc+cRs7d97C\n0aOP8/jjzxBFWSYnq5w8WSSKXMKwBjSIyYgJzBFXNVziTOYscYWkSqwfGQF7EWKElEOgCNjAJIaR\nwvO2EU/dtFE5ioJHgiImCipgoKCisI5Biwgdi5IQhMMRjzz1d3i2wWXlCS50IzqjCxTkNLnAZHlp\nCUdKqpqGFUQ0bRt0nbyikIkiWsQ1m22onCJigwiMBH0ZsGXbDBIJ9h04QEsIVvp9rhxPxRiaRtvz\n8HSd7du309rYYGBZ+EC5UiGRSHB8ZYX973zna841+g0ZeZ2xtAS+D5dffqlXEldH7r//7UdGCoUC\nwyi2Qq93u/hhGlNPMLBGZPOxWdHExCIvvHCEO+54H1deuZ1jx85SKs0RRXFMdadzHlUJOfzsc7Tr\ndTwELVHCUEEKGyW06OGhyWkUoeIREEiPDB5O7KvKkAlCfBR6TBAi0cb7XociPtvQiUjj0yCizxan\nGDKJwRwaSUx8bEak0XBxkASEXCBDQBaLgIAkHUaENHCYxSU7diJxCDGJpXAlwENhgMIQFUmauCzs\nM2KCDYbsFSG+3yRfWCA/UWb73inEqM4o6FIfvoT0OqzZWYQwWBk+TXXnDNd//Pd+rijY8zy+8pV4\nimBhIc4yGI36fO1rP+Bf/ss8U1NT/O6nPsVD3/8+T7z0EoqUFGZm+O3/5RdngbyRSCaTfPazv8d3\nv/sAP/zhjzh37hiGkSKdPkA2u50oCglDyVDxWWkq6KGHisACQnQiPEChTICGIIoEAQJQyQLrXp8k\nERYK20mRRgFs8uPRy4hYECuIyKIwIgEkiPCQzAAvMc0KUwhKCM6SRTJNnAXtoOFjEuDj4+PgYI6v\nNIkggcMAgceilCyEEQ0UlAg6bodQCo6LHMgyDjpD2oRBmwlhkjMEfmAjoiYN8uQL03Q6W5RkREMd\nMpUsoHQHbJ58id7E9UzNXk8qNcfjj69Rq32bT3/6E69rYN6lxGAwIIoMcrnYQTWXy6DrGgcPHsc0\nSywtHeHUqTN4nspwWKff76MoRSCLaYLr1omiIlJ2gCni/8rs+EsnrnCYxBM0GrF+JCDeRhT5iYgV\nqhjGBGG4RRgmYPw5kQf6CMpEeCj4gI1PHR3GQYtlvUAlHQAK5/rnWWttMOmMIBzRj8Amie1M4Eno\nCI9p1aPlqXSHQ3KALQSWlMwBqCppCWYUcLSzwWIuSytIMDU5ye0338yZKGLp2WdxazVm02ls3+fZ\n4ZDfvv12kskkB66/nicfeYSz3S7vUhSev3ABc26OW34Nl/TfkJHXGfffH5OAt8L/8J13whe+AP/u\n313qlbw6FItFdlxzDUdfeIG8aYKU2J5LzbI4cM0142fJl2+kd999B+32t1hdfRHD6HLu3IOEbp89\n5VkKmQwH/cdQZRlFZCmmTUb2kJ5sY0ZDRlhEsoGFIElcnnYpkcbCQBCRwcaji0WAJDGemJlGo084\nnpDR0YEOgog+KjUEBUY4eJhIHBQGmASUOY+CjiCghE8Rh3PAWQJGRDSI918KMENsVewg8IBZtPHO\n20WSRUEnIE2bNdakhWa7bE8ssKmqfOJ//RTPfOtbXDhzhqLicqYnaXQdEvik1RzZbMh16s8vxy8t\nLdHr6SwsbHv5WDqdo9+f4fnnD/PBD/4W2WyWj37849gf/jBBEFwUUX8pUK1W+dznPomqBiwtOWzb\ndi2nTx8ek1OJ5zmoahI7NUQZGoT0UdExlBRO5ODLARCiIXBkF48ZsghAEuLg4lMmIIuCCWQJcXFQ\nSRIhkONAvAAXgwEJ1sdWdwEWGcyxT64CSBIE6PgMmMVCw6BFEoskKkN0+gwpY2OPr7c287ikgL4M\ncFEo4RNFIccxSMp5yoqJFoWEJKgrEjU1ZK7qM+h5LLsRAy2iuXEYLfBIiQE53SBl6YRKQFWfoNvZ\nZEOeIXXrZUxObuf06adZXV1l/h8EIr4dsLm5yTPPHGRjo8nc3AQ33ngtmqaxtPQi3/72A3S7Q3K5\nKtPTV7C2do5q1ePFF01SqT2sr58nmbyRRuNBlpZ+RCIxj5QCITooyhZhqBGTjyI/kZfGPy8T+460\niXUjPWAdKRPELR0F8NH1AFWtoKoQRVso2BhSRY6fFRCSJ6CPQocUQ9Lk6VM1Kii6oNeukY0strkj\n+g7oBFQIqYoBunRxpADpEckGfU2J3V6FYBQE2MCsqsapv6FCC40WOdLSpVTI855rrqE+GHCuVuOu\nf/bP8D7wAe79xjc4tLJC/rLL+PznPkf73DleWFlBAZJXX80H3/EOJqpVpmZm2Llz5y9N5v3H8Bsy\n8jrj/vvhD/7gUq8ixi23wNGj0OnAqzBOfEvgrg9/mEdSKY489RQ1e5OeOsEVN930cqrn1tY57rgj\n1idkMhn+6I8+xYULF6jVanzzf36HU89sYup9bHeTA7uSHD3RZeiphK6JofYxpUvdS+EpDpooYUYu\naelik0FjSAIFlwuETBKh0sTGoMU2BAEhNgA6ET4+GZxxRFps2dzBYJ0sKQy2GKLiUyKgi4/LBBYT\n2CRJ0iPFTnSWGTAkQkGhgqSHpES8645rPWK8z1bp4+JSIEsBwQCbHOvMYGoqD55uYOxcwwkCvvf4\nU5TVOZbaOQL1ZmqySyRGFOUE82GZH/zgMd73vve8wqCo3x8gROoV5ySVytNodC46lryUKu2fg+3b\nF9H159H1DNPTM6yvP0+n02QwCEmpSfZMZ1he3cJyh+Qji1BKhKLTDWEDk4AAkz6WOEdNTqChoeDj\nMCSPTQ6Bhs4UBht0KCEJSBIiGNKnR4IsOhlCkgQkUaiNPTTnMckC6XHmjI2NR0SNCioTqASo2ARs\n4I2nbRy6JOmSJUsLi7T0qSCwULDRcEgRsoEdGbjkECRJp3dQ3Kew7Fqkpy6n6OvIU0eoIgp/AAAg\nAElEQVRJ2k10MWRHwqTjWYS2iWdmUdQshD7VZEi/12VycgIhcrRarbcdGVlaWuKv/urvx5N1sxw8\n2OLHP/4a4HD4cJu1tRyKskC3u8HGxvcolfKsrXlcffW7WV1dwbIStNsOcC1wlCg6QRTZRFGaZPJ2\nHOcIYegTVz0CGGcvwxCVFjoKPklCysApYv3IJDEZWQW2o6oeuZzJqO8jvGVytImADD1GTI2zuxU8\nJAE9kiTRVA/Pq1ORDkL46EgaqECSJjZ7pIONg8OAOSTPATLSqGoafeJG0jyQiCIGSgaUHHYEaaqY\n0ufJzRo3eB4d12Xoeezfvx/DMLjhhhsIw/BlV1zXdVldXUVKydzc3GsyN/tF+A0ZeR1h2/DYY/DV\nr17qlcRIJuPx4gcfhN/7vUu9mlcHXde54667eM/tt/P+kyf51rceJgg6nD3bwXWb7N9f5aabfpqD\noCgK27dvJ5fLkYgcKv46yqDNO/bv5537b0H6p1lbb9IN+wSRJDDyJDKTTBV2Mah1wNEYyToOK2RI\nEJLDIEKhDggs1giw6FDCRWCikSSBi4VGGg9BgMIkaQQ6kgGCAT4ONhXWUZBcT0ALD4cTrDFNkwlM\nEgQkUDiPRglBTs1QC/tsjUPDJQoBKgqSHkMsUkCNOjUEgiwFCoaHmoqYiuDCw0+hbitS6dnUWGfk\nTZBLlJFKhoF3gl4n4nwkabeX+Iu/+Bqf//ynLtIHVKsVoujgK87JYNDkuuteaeP9VsLu3bvZv7/E\n+voKup5F0yICXyGp+uyfKrNtcorFyjzPHf0m7qiPEaXwjSLdTJXAapPzV9kpXCazNs8OTnFGaoRo\n+CyyRp0CEgOfBhKLJB7DsbmZiU2WAAMdBQUDicGAEJcOi0gkAhuNMjarNAlwaCDQqRKhAAo5EkRk\niGiTpoQ/DkfssU6OJC4mppBkpUIThzxQJgN0sHDpUGHomvRtyd0f/DzT0/v50d99iaKWoVKa4sLo\nAomoz6wS4pkdhq5DE4e5Hdeye26eVq3G7j17APtNd9T9dSGl5J57HqJQ2E82G+++MpkCx441eOyx\n5wiCOTQtJIpUdH0nnucyGq1jGDqbm6vU6x06HY0w1MjntzMceui6SxCcRIgpgkAd58xIYhFrirhd\ns0meLZL4jJhEpYCPgkeaeAR4CchhGDOoqo9p9ogiCL1TzLDGLAFVoEPIiHUGmFgo6IQY5FASSTJJ\nE3sgSCqCTpQgIQRJqZEkQw+XIzQpE1ISgrKUJIE9iQTVTIYFXWd6MODQcMiFIESPElgoSIrMqQah\nMDkblfnbo0d5/223sf/d735ZxCyEuMie3zTNN2wU+zdk5HXEww/D1VfDq/CSesNx993w/e+//cjI\nT2AYBgcOHCCZTPJXX/5zBhtblPJpgm7I1tbWReOH7Xab//If/yPHHnmEymCAoao8urLC1J49LMyW\nqTfT6LZHJWmyNtwkIo2hp5gtjahv1FEBFZcseVpoDEmhEhACDjNEJBiwQZoyBgMqDIkQOKj0cEiT\nJ4WKgYlFir14PEtERIYUE7gUkHTQyQMShxE5AuxxYHwOhTMETIcjNFS2ABdJcaxHGKFQR0ejgkkC\nlwEBK2SIqJLCtx2qjkqn5eIqHnvNDAXPpcM6ZlphaG8hxB5UNUsqNYFhGKyswBNPPM3tt7/35fdx\n+/btLC6mWVk5zvT0LlRVo9FYxTTbXHPNBxmNRrz44jHW1mpMTJQ4cOCKVwSPvdH4ReODMzMz3HHH\nNfz4x2u0Wj4vHjxB6Bl49oDltR6DgUMlP8nsxPV0OsdYdbdRnruVkpknu3wP+AU26dIfjYhkSIUQ\nE4FDmwidZQJ04rygPLHTap0cLhlMRqRYIcDBYxYPB0GLMh4hCkM8kjAe8F0hAThMkcXFQ8MnQaxe\nqmACeSaQjNggQZ0BZVQEAlvqRHSxkFRRMYlJQxGBUEPCTJr2yimWlo6TSFTJhQGVShktjCgGec5Y\nLRZ1FdXU0BMqZq7E3oU9eEGEmUxRr1+gUhFs/xlnz7cDut0unY73cmTBT+A4AZ0OeF5EJjONoihI\nCWF4BYoCiUQHyzpPq+UTBFOoagLb9omiNsNhBSkjFCWBEJuoqoIQPmF4nthfpI3OMlVy1DBQ2I1B\ndhwiMCJAJxa9KoRhklRK0u0cJ3KWKFJnhlgKqxOrT1zgGHHwX5oEkT6FktjGQB2io9H1AhgHa06h\nYxCrTiwSdBihR9HLcz65KKIgBMlUCsW2ef/0NF9ebVAUVSqkURRBJCW+qpJSM0hrgxeOHeO63//9\nN+eE/QP8hoy8jrjnHvjIRy71Ki7GXXfBv//3EEWxGdrbEbZtc/83v8lN5RLTe+IY81a/z3e+8hU+\n9a//9cvhXU8++ijrBw+yL5fjeK1GRVEwheDoCy/gpbLUai3SWoKu7ZMOIyrmkPpKF0MpUVA0EBIR\npnBZJmAnkgl8Qlw2kGxHYRqBw4g+4XhAF8BBJaLAZWSI/QckKj6xiiLCQyOBiYNNiIKDQxoFF501\n2mNlQnwzGSA5S4RGgQiNHn1cPFIIlsmSY5KdePSwGBAwIkuWLUZenrziI6RKRsBmd8iEmmLGSFN1\nezStZ/CjCSQZzKSJZW1yxRXbmJ+/nKeffv4iMqIoCv/0n/4uP/rR4zz77I8JgpDLL9/OHXd8giAI\n+NKX/pp+P006Xebo0Qs88shBPvOZj77CAfONwNmzZ3nwwSdZXa1RqRS47bYbecc7DlxETO6++w76\nvb/lT7/4X/AsgapcQTa1Hx2XjfYSw6GHH3Tj+DEp6G2dxBcOc26DoplBkSpppU4tdNhNih4RHbq4\neFj44/jDTRQMbMpEVNBIkGMDDZ0kDkUuECLQEQzJsYbCBCYGLn08pjDo4dPGpUNIEkHIgAYmEUUE\nHRwkIQEqaSBDmxZpfEYoNNDZNjZWAw+JQShsksJhxT5DSkty9OiT7N59PYoWm8uvex2WnB5mcobD\n/oAcEe/bt48rr7+BRw6fZLnlsmvblUxMpPjYxz72qgLr3gqIR9Qjoii6aErMNHWCwCGKJMNhHUXR\niTVnbTIZldGoy3C4hOdFRJFGFDVwnAtABNhI2UfKc0CRKBoSRVlihcc2oEKSLXxsIqYxSAEaYqwS\nCsgS60nKhGGHbtdDY4VZWghiC7UesdIkIq6zFInlrw45BqENYYZWLkOjeQYNFw0dkxJ1PCI8bGL3\nkwFwFbGU1heCpu+Tsyy2ul1soTJwfbJ6hEqbVjjAjTIYpJnVNHS67EgY5HSdY08+yU033fSm68De\nXlfbWxhhGJuM/cmfXOqVXIwdO2K9yKFDcO21l3o1rw2nTp0ibVlM/0z/upzLUen3OXr4MO+57TYA\njjzzDLrn0W+3ualcxur3kZ5Hc2BxvjfinbkdjCKbvD/EUByGkU3WD1kVfUZyioJqIqI+WdmmRQIT\nE4lKWinQiSCijU+fPEly7GLAJh08QuYp4uGMC/ouPeYZMBgbYqmM6NPHxUaQpk0ADJC4TALbiXdG\nNiEuCm0m8NmJDrQIEBwng4fPBLMk6TGijYZHkYCALUaUMfCRRFJFkUO6PRvF9EmIJBoauuwy8BUU\nzUBV06TMLjmzwObmMvEH7sWVhmQyyd13v5/f+q1/gpQ/FQv/9V9/E8+bYmFhcfzMGXq9Jt/+9v38\n8R9/7g2dvjh9+jRf+cr3KRT2Mj9/OaNRj69//XFGI4t3vetmICau3/jqV9l47jkuk3BZqcCh+imc\nZILhyIBghk54AUMbkPRtNENhoZzi7GYDx/cIjTRW5NLxAypoqKh08JkmokAwNklTaKOhotClzynW\n6JHAIo3BNA2G9GkzjY6NoE6ZHlOEbFJAwcFDw6OCRGdAB4eQNF0koBHRQqCPJdRFoMUMXbYD6XGI\n3nk8VonYI3QM6eOJEaHioSlZpjIHGDoBrVaHe+75S3TfY7hxFtfWKRk5ClRoMsGK26aumwyjkF0H\ntvPhm2/mxptuelUxAW8lZDIZ9u2b5cyZc8zM7GIw6HD+/GlOnTqIpm3R6+XQtO1IaSCljWUt4/s1\nTHMb5fLldLuncJxDRFEVRdk7DrlbBbJIaRIEk8A8qtomDH8iYi0RYhAyROATUwqQ2Hj0iTcn+fFX\nAMwRITFosMhPnUhM4mi9gNhabQSMRJW0gLS6ytAR2Noilu+QJE+ODjDiFC4hPhKPaWI3kxzQ1zSW\nwhBnaKGrKj1Npeu67DVThM4IgzxdpU8PwZLvM1d2WFxYYM8NN+CHIcdefJGb3vnON+nMxXhDyYgQ\n4k+JlUAHfzbBVwjxvwN3EdvT/Z9Syu+/ket4M/DMMzAxEd/832r4Savm7UpGuq0W6Z+zS8snk3Tq\n9Zd/NlIpthoNbkgkKJkmYTrNS6vruKgsSA/LH5BCMmfmGHkRI6/LlD6BqUYcsRs0A5eE6OOINIlo\nFNu9izKaomFLG0fagIKLwhYbuFSAnZisM6RDnYAkfaq0SRGyBej4QIMhOSJ2oKEhsOjRoMSQWUyy\nhJiEGChUUVjDp06fkAANhRQZDNpjIaVDmxRJZhGoRLhxqixdgsinwIgkGYLQphcInJRLUs+gJjTy\naZd8TlAMz3L11DSFzgarZw+S21PFcZyfK0ZVFAXf9xFCEAQBp06tsG3bxTkH+XyFlZVTtNvtN/RG\n9sADT1AuX04uF/+OTKaAYVzNgw8+w3XXXYNpmhx84QXE+jqLhQJntSyldJlux2bJfomBvBw/CpGi\nzjQ+s5lJmlrA+vp5rMCgL0sY7pCiAFumKY2bMxKoIEkQE60aCg4q54EcKiY9II1CdkxfZqkzRYdl\nJDMoLOBzgXXmadFAZ0CRiBIhJSIUzjIgT4oEfVYRJBEcwCBJSJcM60wSkMUkBfgI5tDYwqYnQ3IM\nMRUfoUzS1jVcx6XjBSzuvJ1crkF9YwnXstil5EmGNv3BBUI9TWXqMsLKNt71yU8yPz//moLw3koY\nDodMTpZ49NHv8dJLj7KxMUBVJ5mc3Em16tBqHSYMW6hqhiBojUlFhlSqihB5pqZ2U6upBEGaIGgR\nT8tUiElHnbh+kUfXsyiKQRSdJgz3YLFIlg6CDUIyqBi4jAhIEtcrZoEd4+8bQIICgjQyTgcfr78K\nnCPemDQwiGQeJdzE7dtE5hxCpMmIJo5UsMaZVIso48kwnwHgopNEJ/RDmobGXKFK03MYCIXFfJlU\nMkPK7dIb2TD0aEZdFnQdkwRbQcBts7O0hkO6v4Z52WvFG0ZGhBDXAGkp5a1CiD8TQlwnpXx+/PB/\nklL+30KINHA/8LYnI2/FFs1PcPfdcWjfpQru+3UxMT3NGd9/xfG2ZbHnZ1oD77nzTu79i7/AHH+o\nbg2HDPp9NBlSUTTaThdThrT9LKEEU/o0vBqBmkaROko0ixB5PLlJngERL2DLPKOgjIYgJSwqImAQ\ntehTHY/XDggpYiAZcBwTC5eQIwgsJCMS2GwnztzcREFg4JAcm5mpKOOUG4GNQEVDH1uEC3K4DNmi\nyR4MNLqsU0RnGwIFlWBsxSZwyBARcgIfkz4OClGksiubYzJpUJkv85FPfIJvfPlrXFteJJfO4zgD\ndldMKvk0zzz9NO993/suen/X1ta4775HOX9+E13XuPHG/eOEUvlzztJrs4D+VeF5HrVah/n5d1x0\n3DAShKFJvV5ndnaWU4cOsVip4Ns2nVGHkRWiEJCNRnRYwTSmIDlHVfOYKORIjtoc6pxhJGfQlQzr\nMqKLhouNRUiISwYYECIRY5niFCYZJDYtigzZGPvlCjQkESMiknhUUEkQcAaBYJI+JVR6mGSxUTAx\nSLAHwZAOF3AZkCTERXKeEWkCukzRIkeSAA0Ln5A4lj6NwhpNtikgQ51e1MbxVUy6JM0cBjWiKE3Y\nOMduvUJez5BWJAnPwxAe9W6D48+3+fFjj7HzVcQCvBVRq9X4y7/8Jo6TZ27uVk6evA9QufXWG5id\nXeTee2Fi4gaWlu4liiwajQGuayDEIu22x9bWsySTBlGkoigm0CJWcATEQtUhcRNliKbNMD19M5ub\nTxCGzyHRaTDEICBkmZAWHiHxXjsApol9garAS6hs4mBgj32FWsQaD5u4PaMS35g9TpNAI4gEoR3g\n0yVDSJo2eVpMoJGiRZGAInAMA58iOUwMBWrRCMdIU5rZRckZsk1GWL7HRLHK4rRg69w5DMtlXzbD\nCSlJ6jrnt7YIVJWrZt98ofo/SkaEEPuAjxA3xyCuIH1XSnniV3jtG4EHxt8/BNwMPA8gpQzGx1PE\ns09ve3znO/A//selXsXPx7vfDcePQ7MJl8CT6tfG7t27eWpyktNra+yYnkYRgpV6nUEmw45du9jY\n2CCfz3PDDTdw+a238txjj7FgmpzfrJH0QqQUdAERRbgIpKIgZZJu4BKpJqX0BBmviulO0g86KFjM\nIElg41FniyZJM0Fo6uRDnaMjiUafLB4JTFxGeDjAAZq0CdgiSZpNuoTMkGYHOhF6LD9EZQODNCYj\nHAI0JB4Cl2jcpdZJMT2e28gzwOEMpymg0aeDRp8ELjouBj5DTEIcVPrMoONTxmBENxAca7hct+8A\ngZXl0KHT3PqOXUzrCYbDAfPzBRYWDhACJw4evIiMbG1t8ed//i2SyV3Mze3F910ee+wUo1GbWm2Z\nmZmfKupbrU1mZvKUSqU37BrQdZ1UysBxLBKJn44db2yc49lnn8S2O1QqOdz2GrPVCsvLK0g1S7uf\nIGmUwGtiBz6KukI5PY3vnWc0KtHuNkEWqSg78dUMivQZhjVsmSTPCrOE2HgYRNRRsciQI49DhCSL\nTXU8E5MnJMNoPPKp0Blb19WJSFNCJYVOD4mLoI06VhiFgIKDjiQiwCeiSJIsYJFBG+eVOOgIIkIk\nESEGAp0eCn3KSHVANRTMq3l0LYOiZAnOv8R5w2IRhbShEqKCDBCRiumpWGGXcJDi0EOPsPvyy7nl\n3e9+w87f6wkpJRsbGwwGA8rlMtVqlXvueQAhFpmbm8FxHDKZHeh6mrW1JRYWdpJOJ4EyO3ZcS612\nniC4lcGgxmgU4vspXNfEdXtomkUQ+MRTMBXidN4ssJu4ZdPAcWxWVjYJwwHxwOw2QgJsNGLFRuwY\nErsE2cStmwRxTU1QYAdDAmx6hLjkkbjEtRdnbAXvIZkCdqGxjsRDxUWhR0AOl1kiVAIq48F/F5UJ\nTGqEeEBG0SE0OdQe8dFdZVptn8ixCMIAhEZgO+xcXOT86iqpXI6qaXJFtcoTzz/PVXfeyb5L4Nr5\nC8nIuJXy+8DfAs+MD88BXxdCfENK+R9+yWsXiKtOEGt0LgqtEEL8GfBR4C3iyvHacfIkWBa87Mf1\nFoNpwm23xR4on/zkpV7Nq4eu63z8M5/h0Yce4slDh5BRxPzevUxoGn/zn/8zSSGwpeTym27iT774\nRb78xS/SPHoUZ71BOlNE9T1qvsMOJctW2EfxBxjCwFF07CjH2tAmFAWGREgEJlO08CiTJIHGoiI4\nj09VRiRKkyTtGtXIpMcIjTIhAaCjkAHqJChikUOyjYARDi4eQ3RyaJTx6SLpEaBxHpcpTFJoWHis\noNBhigIKggCJi02RLnNIEqh0iN1QPDQMDNL4GHRYpYpKwDQKQxK4TFFiLdjEGUjSxSl0fYHVpWP8\n1kc/eFEVY2BZiH+gbn7iiWfRtDnK5TjV1zASLCwc4PTpJkKscOHCAMMo4PtD0ukRv/M7H3tDr4Ew\nDFlYqPCD7/0d89uvZmZmB83mJg88cD+7dl3Lnj23YttDjlw4xZEH/ieDlQ6KMgdqQNft0dEgl6ng\n++fZs+caLpxwKYYetlAxtCQIBYmCo9mM/AxFuuSZxlA7OBE0ZYABSHQsQtbw6XMZISYRSUDikRy3\naQQRCpKTSAxi2zp1XCGzAY01aiQYoqCQIU2EQgcdcBGMCBlSIUeGAIsJzrLCTjwqRHRQuICgTYoc\nRcJohogLTJBCRDlkJDGTWXAsVK9BtjSFNhhgez1GgUFCSzMMbDxsbthxK832gIe/9723BRkZDod8\n/et/x/nzPRQlTRT12b27yoULDRYX41uMoigIEZFOT9BoXMB1bfbt28Ezz5zAslr0eqAoZaKoDqwg\n5Q3ouo7vtwmCDnHlL0vcWomIw/AsYkKxAyGmCYI8se17GiF2IeUqsS5kknjP7RBXQgbENEMDhhhI\nkqRwUXERNNERY1eZ3rgpu0EaSTg2DPCoINhkQIkKHgoO8ZRdHoss8ayOREWgoqOiCJVuYMdtPtfh\n2PMPY6Wy1GRERVdZ1JNIF845Dvsvu4zOYIDUNGq2jcxm+dinP/26+of8qvjHKiN/CFwupbyoPi6E\n+E/AceCXkZEesZYG4rN0UQVESvkvhBD/B/AgcRXlFfjCF77w8vfvfe97ee+vYTX7RuI734EPf/it\n4br6i3D33fCDH7w9yQhANpvlgx/9KMGHPoSUkicefZSlhx/mnfPzaKpKEIYcfeIJdF3nj/7tv+V/\n+xd/zDl9FS+dISFNiv0+qwE4gcF5LMqJJKEfUg8V3CAXm18Jg6IwkFKOiYWHADIiRxjYbERdFsI8\nSblBAYFkRI9VfAxCfATnyRDRJU9AEZUWJdYoYREg6LOOyw4ieuOE3oAmaYZoY+GrRp0CKvMYBOM6\nSoCBSUAWSYBGFp0hQ1JUyCIwCNkgRZMUBWALgYJGDh2LamBTXz3IqLfFgZtu4KwdUWs0mB6bxwGc\nq9fZf9ddF73f589vUCjsu+iYEIJkcoKPfOQqVFVlc7NBubyDffsuI5V6pUna64XRaMQ3vvpVvLU1\n9il1Tj/2/3LMyNCLVLZvv5kbb4zFq6qqMbQSHFp3KZBm2szg+i4NQ2V2+w3s2bvI8vIPqdefYhAK\nlmQHMyEJFQM7aKGRJfKHaCJLXkpMIvJagaxS5Kxdw2b0/7P35kGSnnV+5+d57zfvzMq676o+pL50\nttQ6QAeCHTAQnB4OQQThMWu8G/ba4/GuZyYmdh2x4Ql717Ez/9hjMzMxnpXHgBkhBhAgECAJoW66\n1a0+q++6q7Iq7+u932f/yJRAEgiN2FG3IvSNyKjoNysr336fqnx+7+/3PWjTpk2eNjtRGCZmG0EB\nSYUYDYUsAh+V3sxd9GmNkhwKG6RxSCJpk+AyXUw0DDQ0DNrYqDSIGMdhm3Y/lyamtxWuElF7afwX\n9AeEOToEfb2WgRBV4tCn22limDp5I4tjGdihhk2Zrg9e1GYt7jA6eRvTIzNUmhWuLq79na3f/594\n9NFvsbqqMz3dW3MpJadOHWZzc4Xp6d6o0DAMxseLrK3110AIJibG2di4wupqkyDo4HmXSCYtOh2N\nIDiGlCawARQRoo6Uo/TcVTV6FPOf0hvTuKhhGYHWp4yWkHKdXgHTGw8Likgc4Hy/vFBoc54YD7Vv\nd2ci0MlSImCFBgAKOapohKQZYIMh9L6PjY7FGg4BCgpdYio0KOAS9s+wQkiLEB2binRw0TBJEsZ1\npv0Ol9s1Vm0bbWiI729uEAUBe6emsJNJCtPTfOj223E8j6JhMDg4+GYt58vwWsVIRG88s/iK42P9\n534VfgL8j8BXgHcBf/7iE0IIU0rp0Ssff6ng9OeLkesZjz3Wk89ez3jve+F3f/etmeL789A0Dd/3\nOfHMMxyamEDrqzw0VWXfxATPPfss995/PwcO3cuF46tETRdbxKSlgibSLCmQS88zPzDOyvoitnQY\nkBpXpYslc3SJsKiRQaDjE6JTixxcDJqRQqrTRchekFWOLgYxNRo0cMmyE4McbSIEghybDCFQsZGo\nZIhZ4wwBXcYJWUYlZKrv6OrTRSHGQ9IgRqKTJCZEo4WGSRbooKNioLBOQBMLnUEk65jESDRUNMCm\niYaDgcqwPUgzdPjWt77J/n27OFmvU3NdbF2n4vuk5uc5eMcdL7vOQ0N5VlebWNbLSY1SdikUCkxN\nTbF//5uz5j/6/vcxNze5aXYWZme533W5vLLCl4+d453vvO+lLs/GxiKOY6Eld9BVFFb1KVTNRsQW\nmewQGxvr7NoxRtzc4Lzikk7tJ9RVKs02nUbA9ubl3phE0wiDJgkh8f2IpowoIGkhqWLiM4nBCAoq\nPl2gjcIeBKsobBGjYWOjUMRgkzIFQlqM4pAnjyQiR4xNiqsYGOzEoonHOmn20MbGpojKcWzWmKTL\neH97W0PiIMkBTVTGUVgmwENHETZCJjH1JrEaoCoKgSEJLRUlZVOUkyxc3KIZS9rGOKPqCCfPX0Ax\n4IZd17eZHUCj0WBhYZ2pqXtfOiaEYHb2Zi5dOsrW1irDwz0O2b59N7K+/jhCNNjcvIqUbQ4cSPP7\nv/9v+cIX/ncWFlJ0u116XKcxoIqUg4CGlOPQLyF7dNIX03lVEoqFqliEYe+mIKBLr3My2f/aS6IS\nGMAyBTy0/s1GmzSCIUJCJII0BkkSCCLqJIBhyjQYo8UQGvm+saIk5AIeeTbo0mSKmClUyggEst+P\njdlCQ8NEx6aIxRZtxkWblKpwi2HyE1R2Tt2CKjwWWiWM6Wn27t3LjrExmt0uC7Ua7/7MZ96s5XwV\nXmtb+l+A7wkhLtEblkHviu8E/udf9YOllMeFEK4Q4inguJTyqBDij6WU/wT4f4QQN9BTNP27X++/\ncG2xsdEb09x337U+k9fG5CTMzPQcYl/BU3zLodvtUtnY4NilS/iOQ2FkhNn5eVKpFFoU4TgOUrpU\n2yGamiGNhyIMFoNNanGOu7NjSDdA1zUKwSqWmkONJT4+XbaxqSDQ+u6XPYGeSRYbWG0sYaDQxcMm\nT0zMABp1gr4jqoWPjsY6Y7goBEjKCCJMGhTosobgDCoeBnq/dNDwSLDNNAEep6mh0SBHSIzFNBIf\ngzQmMSXagCSDQYRkhZgWI2TpkKOFTYcBTAJCGqqBCDsMJYs0mhsEocU//v1/w4njx7ly8SKjIyPc\nddddr0poveee2/nP//lvSCYzWFYSKSWbm1cYH7ffFD+RFxHHMQvHjnHP2NhLx5ui6Q8AACAASURB\nVCzLYs+OHaSPnqRcXmNwsLeRbmyssLFxmUajSjo9juNsoetThG6VS84WinaRfYNF9s7NsrzwY+Jq\nk07gE1tpiiNjtGs+UilT76zQoMuadEkjGUAjIqRNREQXwXliygSEqHSQCKCDykCfWBwRcrXfiJdI\nmmjYaOiE+PS8ZzwUYgwCmrRx2CDDIAomAZIECjFFAkrY/Xfo0hsavEjhLeNQJWQAwSKSddoMSR1b\nWJi6QU2vcOuD92JpOtWVMhdrVS4KMLVJbh6bI2OY1NttLnklPn//B960NX2jcF0XRTHwPI9qtYai\nCAYGiui6ydzcDIqyzPJyHdPM4Lp13vGOMd71rg8hhCCbzTI7O4uqqvyjf/Rx/tk/+2OaTZ84TiLE\nIBChKDFxbCHlIvSj6ngpNaYDqMTYmEqxP9C1UWgSU+RFsqrCRWK2UWlgESLJEBGRRUeyhYJAkABs\nLlJBIcJEYmLhExLQJUeIjUUdBwhJEGMSUn6pxwZNElRIs0qIShdJ0D8XhTZduvjMC4/5OIBARVc0\n9Chkq1Hh3n134119ntEDB6gYBhsrK6SKRd798MPsuYYJr7+0GJFSflsIsRu4g16HRNIr/Y7+HAH1\nNfHzct7+v/9J/+sX3vAZX2f4m7/pdR2MX5w5dl3hIx+BRx996xcjx44c4eq5c0zmchQTCRrLyxxZ\nXWXfnXcibBvbtrl4bpVsRkf6Jl0ljxd1kSLA6q7T8hTc0GdMaZDTQ9b9daCOwQg2kjLQwUPFIKLQ\nb8dvMYtPjMMyBa5Qw6bniujSpomFwhCCAUBBcg6FEEiTokmCDpKICB35UlM9oE3ABjEWJXbSU34Y\nJMihsEyXMjoxWyTJ9JUaEmjjE3IJmxgVlV1IMqxzii4rxHQQwiNQDaaUFFHQoOUpaGnJ+Pgw5XKZ\nF55+mpTnUV1a4pHDh7n5wQe578EHX+oyzM3N8ff//n1885tPsb2tI6XHjh0jfOQjH31Tk1yllMg4\nftV7CiGYmxmnVFoglxsijiMuX75As5kkn59EVWepl48TB98jpRr4bpdCqo7X0FioVhiO6lTKLWaE\nxtr6eRYVFeE2sGNIiDQZCTkkEo0VBCY2PVaJh0kZFxeVAVR2YiCJOA8E9DJVu32L9xwOOWALQR2f\nAXycvsmdCoTYeERcIk+KmAFKeKRQSRKQJEmVPBvUGURDI+QWerqOTcDAockSLTIUkZSkT0vEpGMw\nopixqVHMhsulckx+7HYKBZV07RksrUtL97hSWSUIKuSTIaefP8bdd991XXuMFAoFSqWrHDlSRlXz\nQISmnWX37hFuuGGWz3zmY5w7t8D6eonx8VluvPGGXyhXv/vuO7j33km+9KUfADegKGsoio3vn0NK\njV6Sy9307pVVeiTVZ4AtvDiFGmRB5BCygk1Ih2V66hv6nJCejV2RUVIY1Ikpsw2oJOnSoEGERGEQ\ni54BW0CNkAo2LikkETo+Fj5tMoR0AJ8eiwVUKhhE5MlgkAKu0mSCTbK6jx/UKCIJUWijkEPBEYIR\n06a6vUrXczCMDKpu8Y//198hCAIMw7jmCc2v2bCXUkb0xi1v45fgscfgs5+91mfx+vDhD8O73w1/\n9EdvXTfWVqvFyaee4j13382FEyfYZZoMZrN0trf5ztGjfOZf/Sva7TaNusPM6BzLiz+l1ayT1Qwm\n7DRbQmXnmEG0UiIRBFSlSV1JY0kFIf2+bmKaOtMo+BhUsFDQKSCoMUOFJlUaDFAnSdhvm8MeFEb7\nYWcegmk6nCbBFnl8PAwgh0tEihwNuqQYQyOBQCeDTpMVZvARfVukGTR8TLo08an1Z8seKnUsIqpc\nRbKPEIFGlYAibQSLqGiixoiapBE3sRXATrL/jvuZmZnkG488wk2ZDJmREQDCKOLIE08wPjXFzp07\nX7rWt9xyM/v27aVSqWCa5ktOt28mVFVldu9eFs+dwwLW1kpomkqmkGX8ht3suu0Onn76MCsrm/i+\nzthYGsua4eypH5NQbFRjjLSxwYEJk4yjsHbhAuPZLEG3y66ESqfZZFZYaEGNuoxpY7Bb2oTUSJPF\nRyXEZRUISeExQx4NF4lHiMomw2xTpAV0cYgoM4eCiUD2H8P4VIlYQWGwf2/7ouamQYIWPlChCiTI\noqARUKNLnQidWdZQyRLyHGXmaZNExSJNTESdGhohczj4ikoxn2f/jXu489AhnnqhwsG5G1iqVkmO\nDXHXnjt44fKzrDS20DyDIXsaXda58OQPeERReOijH2Xp8mVKy8sUR0e59dAhxn6uK3Utsb6+ThBA\nHHdIJEYwzTTN5gZHjjzO5z73+ywuLvL0089TLtfJ55eA3u9ws9nkyJFjnDt3Fc9zOXz4MFE0wtDQ\nMEEwQ7u9ietexTCG8TwX2A1cpjeaeVHw2Uvplag48goqHSzAoI2gQUgViwTTQIAkYICAXqquSZuY\nIQyKqMSotAjJkUWiorFNSIhGgRYzxGSBBAFlHHRiXkzDuY1eN0BiMoxAsEGGNAoCjZgWKreh4KoG\nnchjAsF5JH4UIA2DvJ3CspJsVkuU44jbhnvW+K/sil4rvIXZA9cerRY8/TT81V9d6zN5fbjxRkil\n4OhReAVF4C2Dzc1NMkKwb24OU9c5s7CAU62iJ5PYY2PcdvAgi4uLdLeuYG3U2ek5GKpFGUk2kSWv\nx6wBemqQ5e4GHZkiSBSY8bvgtVlXhgmiHCYKJiY6o0RcYJAcLjZXECgkuAmDFl1OodJj0O8gxkIS\nENHAArawSVLt+21m+h6NWYbI0aFIjQYTuLi4DACSASIcxvuamXbfvzViCI9lBtExsdkihYuNThsf\nA0NpYGo2YRxiRYJAHWcxdin5SVRlmFB0MBSbnTmDmZlhttYvk/k5gytNVZnOZDh19OjLihHoKZlG\n+kXLtcK9Dz7I//bVx5BrDYrpQdq+y5p/mY9+/tM89NAD3HPPIR555CtoWgMpTU6dOktK17DibRQl\nYP94yAf37uTK5ZBTl6+wXAnYqSXpKia1UAFaZGWMIQRdKbERdFAw0XEJ0UnQRBJzAJ+YGk1idBSS\nWJxgNz4xOjEZPAQ6CVrYZEji4ZPBQVKgzAajQEwHB5syKgF7aXKBHA08LpNgnIgsAR4V0hhMk8Ki\nikeVNYr91OYh8v3QvTbDaChMEFAmFLDS6PDA5AwrWxU0JY+qqBQsm0qrSbOxSRgNY4mA26cG0BSF\nWmeAhlcl3Nzkj//gD3hw/36mMhlqp0/zleef572f/Sy7du1609ddSsnCwgJHjpzEcTxqtS0mJ29l\nft7m4sVztFpLjI8XSCQOcurUGU6c2GBg4EYGB2/A8zp8+cs/plqtcuzYAu12jjBM8eSTJ1ldbTM2\n5uD7Ftvbz9IrOPYipdNX1bTocUXW6XVGBHAD9C3cNWxUQGEFjRE00gjKpNgmBgJssphEhJTx8VH6\nkXc+BTRK6BiM0mabPD4hKgU0BkkzRatvi9Yrf1r0ei5pQEcli6DW19BNoFFCMqDY7FJD3CAm1HQG\n7DSdboP10McESrrOhJmkqWoMajpXaiWyO3Zx331vrsPqr8Lbxcivge98B+6+GzKZX/291wteHNW8\nVYsRy7Lw457l8s7JSXZOThJGEa1ul6X+rOz5Z5/lwECaU+fOMxBLkqqOHUtOrl/h9gM3oE+Msz07\nxeXjz+HUEthRh3ElYFVJkhdZPBH34ruli0KHgJg2lX7WhGAHKZJorBCiMNRX3iRQMInRiWjRZRsD\nhU0iGhh9c/kcaRIERKiYRGTw6ZBHRyKx0fD7f5IBCsv4eOg0cUhQRCVBmQ4wT4IEXa4SYxDJEWIl\nAiVHU7rkbImq7kRTJoiiGDcWKJpPPq8xOjpK7Re0Y01dp93pvFnL+LfC5uYm+am70OdStCsbJFN5\n3jG+g3PnzrC5uUkikeDMmfOcPduhUNiLlDl03WQ2O07CcLhl3MPSNFL5PCU/QcHIk9R04jAGmcGN\nJcQ1pOyVkl1CAqCDS5cEoh9nqPQj7xxET2WlDqJEabo00MiQJgK6+NiYFNCRmHgUSFHGJiLJKjYx\ngjoJYsZRSRCwRYUKg7QQdPFJ0SKBRpEkK8QEaKT7/jEedVKMYtDEYQADiY6r6fgyS8JOkXa3+cmp\nGpYFbaeNEDblep2mprFa2qDdSDNkhmiKQtfzCDWdVHqaC5cvkwsC5kd7cu5sMkmu3eb7jz3Gjt/+\n7ZflvbwZ+Pa3v8ePfnSeXG4WXR/guefOIuU673nPR7nnnp/xlpaWzvHUU8cYGDjI0aNnaTY9IGZo\nKM1/+A9/STK5G9u2+clPnqZUUul0hqhUjqHrOkJ0iOMMQrjEscAwugSBx4vb/88My0xgEMk2fl8V\nYzGCSREIEaTpcAWTbQSw3o+N8Impo5JAwcIggUBDwcYgRKeDR0BPbqojKfbf8Qo9r9YSPY3OEPS7\nKjYWETZW/3NEwTJNVF2QQMNN6HRkTGJ8B6JTwzAVtGQCTcswoCVYdzukd97Ax37zN5ifn3/T1vL1\n4O1i5NfA9ey6+svw4Q/DZz4D/+ZXCbOvU4yPj6MNDrJWLjPed3ATQnBhe5u7f/M3AbiysEDY6pLT\nFXKxCjHoQmHEUPEiyFsGG0FIIjdGMjdAde00bac38ZcywJAeUKdNF4ckghRr1FBoM0HMVQxiEpSI\nCFBQyRFRIqZAkrNkaWFSJmILC4jRMChgYCPR6CBxEEhi6giGkERAFZ8xOixgs8YgDQZQKRKxRIwk\nQEVgYmETIbAYJGCNSBSRiiRj6XQcgRuWKOq3ki+OE6kqUwMDJFMuvm9gmiY1KYniGPXnNpf1ep09\n99776gt+HeDkyQsMDs5TKIzA/E2USiVOHXuBjdXz/Nv6/8Hg5ByGsYPBwYvEccDo6Bztco1LtTPc\nOuowlettXMvtNpnRvXie5HR1iSE1iy8VgkgliA3qwsJBpYyNjkaHKjp5WjiEmETERJjAKJJlvKiJ\nJMAlhdnviZm0+zJviYdLhpCIGJNW3zt1gIAsEgWJjkcNQZUsETtQUJH4tDlPgAYYFIlRKFDDoU2Z\niCEkPuAge3RZEeJHXSJCwlBHlQncIGLP3EG+9tS38JsuXtfD0HVE0IIwpuW7XNlSGRwdZWZsjO3W\nGs16nZ3j4y+79rlUinBlhVqt9qbySba3t3nmmbPMzNyFovQUc3v33sEPf/g0KysXmZ/vSbmklHhe\nCc+D55+/DORotaDRaHHx4mm2to6xb988zeZlLl5so+s2UubxPBvP8xFiB0IE6LpOFHXwfeh1Ol16\nxcgqPb8Qg16/QkOli4ZFAo8MXj9AMSRHng5NOmRp41Ghl2+k0cSmiECh2R/YNtlGvtRDTVCmhkkH\nk95QSPKzNJsGPdGxQNLFwxQ6sRTUVJ2inqFDRBA7qBqkdbATSUgJMtPzVDodPnrffYSdDk8eOU47\nbXH/nXu4885brzlH5JV4uxh5gwgC+OY34Q//8Fqfyd8Ot98OnQ6cO9cb27zVoCgKH374Yf76L/+S\ntaUlLEWhHsfsfec7OXBTT2cgFIX69haTI5MY7TpJ1UBRFcq+y+LGGocrVTxfR7oRg+gMxU1cp4KU\n4EiTPGk2qaExh8oUPh4xY0Sk2GCFIaYBHZ0EHl0kGWJWSHCEYUzSSEyajOGzjWCTDnVapLDpElAn\n0fdNjHEYYIUGs7jEbNLEZ5nRPmk2TYSDZASnf58OOgYxLhARYlLEVzcw1DqTgxOUamUKmWlUZZx2\nFHHj3j3Ytk2jcRlVtUmn0+y86y5++swzzBUK6JrGarWKHB3lwM03X7N1fS0YhkYU9a7Y1tYWp599\nltFkEj2TZndC8Ni3fsDOWz7Ovfc+yMmTR9jeXsTK1vC9KkrC4NTWFk3XZdOyuPHWW1laDKgECkbQ\nQAkFrTCkThKHAQQpVtgg0ZdnO7SoksQnQrCMxk5CJGChsYqkSoSNjoEALFRiVulZUaXxiBFUGcKl\niU+JGm1SBBSJMYB1soTMkiFD3PcocdkgJmACg1EiIKSBzmUCHKaULiEWRqzj4YG0SCk2jtwEmaaG\nRmW7iucfw5Yha9VTFPQ8Y8URrE6DlCUoFnYQt9uMjI2haiqqqOMBozMzL7v2UkoiKTHeZIb+6uoq\nQuRfKkQAxsfHGB4ucvLkEUZGpoiikHL5KrfdNs03vnEVxxlke7uMEEkMY4C1tefpdBJcurRGux2j\nKAVUNYHnbdHTJY0hZQshAqLIRVFUgiBLr+joeY7QF1T3ChIHre/io9GgiILaZw6lCanh0uZGMkyS\nxsWnQbOfNNXT4qSRdNEIcNkgYIiQMQRq38HG4BwCC8lOet2RAmBgcBGTGgkGCAnp0kwoDKbyCN8l\nW0wxW5jkVCtHcWiInQMDeEHA8VqNsfvvZzEMOb6wxuhNH+D2Gw/S7bb4j//xv/Nbv/UhZl6x3tcS\nbxcjbxBPPw07dsArbiSuewjRG9V85SvwB39wrc/m9WNjY4MjzzzD5tIS+eFh3vXBD6JpGo7jMDw8\n/DJy5Z6DB3nyz/6C+cwIJaeFQoQbBCx0m3RbTebzQ4RhnWYo6YQGA9JkKIJFQkzKtPFwSaJSRKOD\nLiI8qRIwT0AbiYWCjU6rr7g5xiAB0zSwiKjhYyCw0ZkixiXAY5UVAgJGiYiQNPv9kG0aNFhhk50E\n1DBQyeID4OCgoPQlvh1qJPAJcHGQ+DhIkUYQMj88wg3T08yNRpTqJjKOUSOLOJY0GlukUjqXLp3i\nG9+w2LNnnts/8hGunj2L57rsOnSIW2699RcqD64H3HTTjXz7249w4fRFLp8/z2QigUhYCFFhx8TN\njCxssn7lMvO7dnPPPf8DnufgOF1OnXqCndMmvu9z6MABHnjgAf75P/8/KQzsIZeb4/LCT1DEBhtu\nCSfeiSkNNDYpopNA0sKiho9FkkmgQ4kuTVqYBIDHKgYGq8QIYkIi1ggpUmUMk4AyZQJ2YBAQo2NQ\nYZsAA4mkt9HlKTKISRUTixYOERHDJFmnTYiHREEnQYiNKdaJTI22V8PF5hwRoyikZZmU0ClFZTpW\nBksbZqORJGtmUZQFdg4H3DMjkXKc//rCJc6tB9TbKgvNMnPjGrccGGVy/zsIXlF0XNncZGz37jc9\nTt4wDF7ht4mu69xyyx48D1T1Kqoas39/Gtu2iSKHjY2zqOoebDtJuXyJbtfBtqdwnC2knCOOfTzP\nIww36RUYbcBFiGGkPEkUjSFljyTee4zRG5pcBi4BBSRVNMqotIgYxSQiQqAS0CaByjAqGiEKSTw0\nAgqEwDJBf0SjoJBBsNG3eHdJk2eKJmlO0eRWfBYR1JFomBiMMo5JiWFKCOqyQireJi8D0gWTsYlB\nfEXh7911F+lkksWlJbBthopFPv7ww3z96z/kroceIAwDwjBgcHCCet3k8cd/xBe+MPMmrupr4+1i\n5A3ia197641oXsQnPwmf+1wvOO8669QB4HkenU6HdDqNrussLS3x6Be/yJRhsCebpb6ywje++EUe\n+MQnXuqG/DweeNe7+LPdOzh+aZWBZI7TTpNlt4PWbXG7qjEoIIpDHF3nbOxgJzI4bZtiGBKjU4w9\nmlgkEBj41KWKiUZvqpvBJcImQCcDXGGYGiMkKdKLwxtCYQOBh4qNQxKwiHDoAiYKQf8OOoPfN1Tq\n0OIsHh5ZHIYRTPa7J+tABYMGESU6WHicAoZRGSCWMWG4ydJ2jGm1+cS7buX0lXWePf1T6h0bJziL\nZkEsBLfffhuOM8mTT66QTrf4/Oc/SS6Xe93rEgQBFy5cYGlpjWw2xZ49N74hhY3neUgpX7fl9JUL\nFzCbF6hWBG69Sa0J5fJRPvmBe8ilUuyaKPDjhTLdbgchJE8/9SQXTh3GaV1ia2SAW3bPsW0YXBwd\nZefOcZ599jyl1YCh9ABbkYOdGUcPZpHeaSZCC5vBvq9LhTSLxPjkydLAwybCJqZECoM5bCwczrBF\nFh+fPG32ABYxCjpDwFk88kh8TJokCRmnxwQooNDCIO7nz0h0DFxUVHRiuqi0MBAohETArKnSikxK\nms1gHJEJI7Zp4ugpFKEQazpWaoh8/i6kVyZrRoyoBk3nCh3f5+iqT8efJZMcQVXaREYHmUlx19/7\nAO9//3v56iOPcPjqVdJC0JESY3SUj12DD7q5uTlM83t0Og2SySwAURTS7a7xuc99jJGREf70T/8b\nZ8+62HaCOC5QrR4jkwmJ4wz1+nls28Y0p4jjDRqNs0AB112hx8QYoJcdowENhLCJ4wq6bhLHJlIO\nEMc+vaGJChgIzmGqOunIJ02HBm3afV6IS4O4P4oN0HBxSLDJFAoJJFkghWQFSYqIBXQK+DSpEmIR\nYKNi02WBDiGGlUANfJQoCSRJEqEqBh1ZJJYjVP2LnFAiDu2epZW1uH/HDubHxlgrl5mensZSVbZc\nl2azycmTF2g2V+jxYBzyeZM77riP1dUKruteE+v3X4S3i5E3ACl7fJFvvUWzhu+8EzwPXngBrrfO\n/Pe+9wOefvoEcayj6xH3338bV8+cYlcqxXB/40tYFplkkqe++U327N2L9gpLWcuy+O1//a/5r//X\n/013s8acOUb1/AlGNYXJVA5DN4hDj0QckYwCWtLFUnRMTaEkQ9Io6HEd6OCgoSIJiRFsoLONJKBK\nHh8bSJKnhkDDIcbGRidkgJAmgoiefdI2CjET2LQYo0WSAQQONUqsAa1+FqXODBKXCA1wMVSThNoh\nCqrcKiNiHErkaNGmSwsbHVdJoaIyM7if58/XeN+hPTiNJ9lcWmAqP0mpUkUMzTIxPkcmUyCTKbC2\ndpFnnnmO97//N17XujiOw1/8xZdYXvax7SJBsM13v/tTHn74fa9S4Pwy1Go1Hn/8Sc6dW0JK2LVr\ngve977VNb7a3tzn/3HN87qH7qTab/PnXHsPqdvBdl6ee+wlLK8tksllClimVznPiuacIV5YYDWuM\nJy18N2ZlYZX9oyOc+e53yQ2OkErqVOUStpEmk1AppA9wcfUCNiEmNqrQCWQENBhHsE2MT5M5FDQk\nNXQkNRxmMWigIdC4CmgUcDEBE4lLhxQwREyNHCUyxGSQDKFT6bvlanSBDAFdDDShoQAV2StthghI\noxEhWMWh6YYMWYPUZchYuoAeSZrSZ3p2iFQqxeELZwmCLNWtyxhmQNYUzE9NcHGhzLPLqzj+JLHI\nMVrcTcVxmLlhN53OCQ4fPs+DD97Hp//BP2B5eZlarUYmk2F6ehpVVV9rif5OYNs2n/70+3nkkW9Q\nLqeQUkWIOg88sI9du3bx1a/+Dc1mjrGxSUBwxx3v4fjxU3ieSz6fJZudoNOpomkuqjqD627R7bbo\nmYcXUJQMUhqoagJF0dD1TeJYMja2lyCw2Ny8gu+n6A1LdISwSKi3Aav4Ypt9yTS+bHC1W8aXOlBG\nx0QQEOBjU2UMgUabHBEWvWiAAXoeMUOEgGSELiW2WELiM46BQVNVKJhZpCwTRAJNiWkJ6KLjyYhY\nSAxb4ebb7mbvbYcwjRXq1S3+6vuHWdsM2N528IM6jtbg3ZgsLraYm3vHSyOvZnOVw4d/yO7dWXRd\nf9PX9pfh7WLkDeCFF0DX4Rqa1f1aEAI+8YleyvD1Vox8//sXmZg4hK4b+L7L179+lLh8gk/ffdfL\nvi9l23RXVvjm17+OIiWjU1Ps3bfvpVHDnYcOYf7e7/LsE09Q2dxErJ5jXB3EjhVAxY1C1EiihT71\nTpNhIVkNTTblEFmpkqBEmwUiZpCoqLQoUKWAR46QbTa5QhoTBwUdGKRLCpMGkiQeNUr4tIEWSVwK\nqDiMUiXHJD2hXkQRC49FSsxCP7O15/b4PClsUvjo0SoDNPEViRJb2CQZRmcLiaHqxHqWLbfF4TMn\n2T01x1d/8BSDUcSHH/4kiqryve8dgTjmp0/8v9z/4f+JbLbI4OAkL7xw7HUXIz/+8WFWVgQzM7e9\ndKzTafLlL3+bf/kvZ37lh5rruvzZn32JdnuA8fF3IIRgaWmZL37xS6/5us3NTXKAqijUmk0yAtxK\nBcNx8CsVNtpt2rbN5NgYMzMR5x6/yM1zO/C3BdOZArGUnGzXOHnmIrccPMDXvv1N4rZGNtBI+xpr\nHQeh1VGVCkkjQsMjjrtIWcek3Y8xi9iJRg4TkAg8xonZ4BKTDGGQwaXJVl/10iFEEBMTkSImg2Ab\nhQEKbOADl8mSwsNDJU8LlQwehjBxpIurhCzKHkW5hoIHdOmQpssQCpFUSKbyrEmfISWGUFIul1BS\nKerSI0WTQtImlQ4pby9zPt7A1QRr5SaqiLEzBSqOw+DEBJZl4zhpHEdjdXWVOI6RUrJjxw5SqdTr\n+t34u8L8/Dy/8zuf5+rVqwRBwPj4OMVikSiKePbZ45TLOj/96VEAUimNbDbDxYtVwKbVWiKdNhGi\nRLtdY2xsB4uLR4giD0WZJY4dEgmbVKpAq7WOEDH5fJFq9QWGh/ejqi2EKCFlHphCoBPIAIVthnI2\n9aDLQBwyqUaM6FlOuXkcWcVhCZMMCWLCvkNvkl4/lD5hXUFQQNAgIkajSIqQOovE6EywxQp5t40R\nxHjUqcY+WyRwGQTmEFobTRmlXHYJQ0kQwJnlMrVlDbduoOkFfH2CREbjiSdOYlmzNJtlcrlhADKZ\nCa5efYEPfvC916TQ/GV4uxh5A3j0UfjQh67PEcfrxac+1QvP+8M/vL4M0CYnD6BpvY3NMCwmJm7i\nqePfwfU8rJ8z51nZ2uL44cMUFYViLsep55/n2FNP8Ynf+i2y2SxCCG659VZuufVWoihivVTCff55\nbEWhWWniRxIRBmwTYyg+1cikJAZADNCSklhmieR5hDiMKg0KhBQxyJMkoEaRgArLNEnTxcSkgiRB\ngwxd6qwh8Pst3CpFBumg4zBMQMwFQiaJidBpMoVGmw4uITkcdGxCFkkTYUVdsriMaCkq0qdOB0UE\nKFJHR+JrFlkzTY4ORjbLSlcyovo8/P7fIJNM8qMnnkBurlBID5Js13n2pSPT6AAAIABJREFU8T/n\n4EOfIpHIYFmvn5R49OhpRkZeXrkmkxkqFZ21tbVfSYQ7f/481arO9PTcS8eGh6dZXm6+5utM08QH\nXN/nxIkTvHNsjCOlEqbvo+s6lWYTY2KCd+3Zw7MvvMDusWkGMkNUaxsAKEIwqKps1ZucXlhgRkru\n/8iD/MWXv8765hJhrcx2oKIoe2mKEgUdFL+FTYk0vbIwRGEQlQifGK3vKiIo0pNgg4NgAJsYhxZx\nn9mTIsJDYQmfEj4KkhQaMQsoJDGx8VjEwqdFli3ZwNIlhcwMcWWbJJu0EBgoTCIYIUFEl9WgSyVK\nMZhI0Q4bEHdY3NrgUm2RXfkBSsESqmgzHWe5cSBH2etQpYm1e45YzqJoYwwNjb3UnpfSBWy+/fWv\nY3sethC0gZve+U4eeOiha6q6sG37VRbljUaD558/RyZzL7ncbsLQ58SJx7DtaXbtmiKVStBuj7Cx\ncZF8XmVoaBjD8AkCaDQmSKdn2dx8HkXp4PshUvrMze3HtiMcZw3LslCUMqY5jO9ryLjSy6OJVlFF\nGVUOsK7ENFNZ/G5MUhlkxOiw3XgeSZOIFF2gQ4ORvmmZ0WeIVYjJoNDsa2NcbAxq5PBJIrFIMSYL\nLEcNkkLHEjGKhAksKtRYZwFdFaTTk4yPH2Jh4QzT0yaT87fQdNt40iGRyjFaGEHXdY4fP0exWMS2\nu1SrVxDCJI5dhoYy3Hbbq0fc1xJvFyNvAH/91/Anf3Ktz+LXw759kM3Cj38M11Ny+IuFyItIJNKk\nhqc4vbLCbfPzCCGI4pjHf/hDDk5OctOOHXiehxZFLC0t8YPvfpcPffzjL/sZqqpy5zvfyQ9XVri0\ntYUuQjxdckVAx0qgCR2/O0w6TuHLAKEm6CoJkHvI6ReYSLjMt9rEUqURdYmlSgKfKVzOY7HJIApd\nClQBjU0cJClGGWedNabwGCZFhwY5TFR8timhkCWNQYcYjTY6Dh4KClWmcUgQs07IClla4RiKGlEV\nJdJykxaTdFHJmjm6YQ1V3UKVJpqlMDE1RiaT4cKZMxQVBbJJQJCyEhTMBOePfo/RXXv54Adf/4eR\nlL/smde3UW1sbGOar+anJBKF13zdzMwMfirFuaUl0nFMFEUULAvDMBiYmGBHKsWypjE7NsYTp05h\nSw9N1/GAWMYoQsEPQzTDora9zYHZWQZzOd6xfyfPXnmMexIKT3sdrnjnaQcZVo0mY0qD0UihAtSR\nuAjqhOgI2viUAQ2BIAQcgr79e4aQDVoU0Wlh0iSgTpcqw6QZpo7T33xUMmxjEZAlJujbdptEqNoI\niajLLhwKaLQJaOKTIw+4NAlZlzGJWHDz4E4qnRKXW8e40TLJqArzY0MEseTE5UXa0TgJK4sRN/n0\ne+/hnOex6SbY2oowzV46daOxSDars7l2hjuyY+zrF5VhFHHs+98nk8tx+8GDr2uN3yycOnWGVGqK\nVstD113CsIFhjOO6BiMjBu9730M0m02uXp3g9OnHMYw2mUxMMjnLsWPHKZVqQIiUdVw3iRBd8vl5\nTLOMZdmsr7fwHA1VsVFxiZVtVCWHrqbwAp2rLgyk94OWoDimovlNVldPMEVMjphL1MjTG89CL1NI\nImnQY6A4SAIEaWKm8HGo0cFAkqFgSLTIIiHb7FJsFJEkRrAc1sgRURNVMtl72XnjrSQSeZaXt3no\noUNsbARoWsT0jt2oam9blzLGsizC0Gf//lswTQvXdTBNg25XY/w6U1+8XYz8LXHhAlQqcOjQtT6T\nXx+f+lTPPfZ6KkaCwEPXf9YBcZw2u/feSGGmyLNnz5JRFNYbDYRhcOehQ1y9usipU5eIY4soDvnu\n6T/l5oMHX3Wn/p4PfID1CxfwSyWOHz2KmkySjCImu10iX2ANjNPwJKttMKWFVASBkkWY83S5hK85\nZFUVw/cIhaDthaxikSOkRZ3L5KgBGh0sTPKME1ElooNPmi0giUmDDhNAkjouXTwMrhCQZJBhRmlS\nZgKFMXxsDAwKrPdb9akoi8TG4xKSCzhKGs1rIKRHxhhgs5tECbZRbp7gwvo626ur7MjlSJsmJ89f\nphIojMqYbuk80+8+wMGDt7/udbn99j386EdXmZr62V1qt9vCsrzX9aE2OFjA91dfddxxGq/5OsMw\n+PBnP8uf/Pt/z3ajge55lF2XfTMzDA0N0fI8hKIQhCHjs7O4pW0a5Q3swiib5TX0OGKxU+Xu23ex\ncPUq87t3E0URV8+dY9/wMKHrkg5a7EwKyo7NaghrUQWBQhsVm5gBdLYJSCJpouCSIaaKQ5siDj4K\nMQ5FK8tWOEBF5GmHbXyZRiPLEFOUaWNzAEXo1GSbBHWmUNGR2OiUiTmKSei0mcyOoCZMgm6XUVKY\n6CzQwURSRqejqkxKl5Xt8+jOJpNKyP1jY9TrNerr60SmyQ26ypnWOsPCQ1UjctkMe4E9O3bzxBNH\nWFq6AAgKBYuhoSJpP8He6emXrrumqtw4PMzRp566boqROI45fPgIv/d7/46VlZgwvIiuj5BI6Hie\ni6pqTE/PYlkWlmWRTicYGWnTaHgsLraxrN3Mz1ssLCziujpCVIA1stk83e5JNG2CVsukUb6AjLeI\n4xU0ZRRVmSSIXULWMQyVlC1wvAaCNBdbFXLJVcbzFtPtkDOdDkls2rj9xCho0uOJhP3HVXrU2Rwx\nETEeMWVMVKPYC2BUJXocoWomMraI/QCNbs86TTq0/Dal0gnCcJtUyuNjH/sg/+k//TdSKZ12u0Mq\n1SP8tlqb7Nmzj5WVS3S7uxgYmEdVI8rlC7z3vbeTSCSu0Ur+YrxdjPwt8eijPeOw62m08UbxiU/0\nyKx/9Ec9Dsz1gJWVE4yPH8A0bVy3w8bGKT760Xs5ePA2Njc3qVardLtdDj/6KN1ulxMnrpDLzaCp\nOmEUoVU1/st/eYx/8S/+4cv+2MbGxnj4n/5TnvrOd1jtdlk9fpwJwyAPNKI2p5urbPl5FAaJpYEh\nA5CbZK0b2W5vMZrwmRMCU1XZ8H2WEGSxGSNFB1inRpOAEQIsBpCUaIokjpwl348+a9Ghw1U8OhjE\n/QAtaDFBGoMGVf4/9t40SI7zvPP8vXlV1n13V98HunFfBMFLAEGIt0jJFoeWRIkKcSTRHtszkm3N\n7tgTlr2anZiInQlHeGPWsfaMJ2zRq4OWLIoiTUokIR4gQIIQiPtooBt93133mXfuh4YoygSpwyRB\nOvT70lVZld1v55OV9eT7Ps//345PmhQtGtiARpAufKZosSR0KoTwaSciqmQ0m5hcRdY6yetRuvu2\nEYsJQpEQs2KF41NTLI2PYwuBlUxy99VXk4lG8RckMokwTz3+OH3Dw6xfv/5n1nzs2nU9Fy48zOTk\nUUKhLJbVQIg899//oZ+rCG7DhvU8/fTL5PPzZDKrXiel0hKaVvqZ+3Z1dfG/f+Ur/Nmf/An9uo6S\nSBBw3dXzpVZjYNs2Rubn2XvXXdi2zbPf+jb18RkuVpdYalZJZjNMVCpkNmyg7jjoloVZrxNUVUby\nFbLpTjJKkENT80y5JvVLqqbtCIpwyTfGI4CHIEULlyIhsngEL3W9mDhMmAbLUhZHtIGiYdnn0bFo\nsIJHCgRUfYMoZbqQSF9q9VzEZpR2FHIIVBbrGl4ohDBmwXPR8HCFTFXrR0g6iphFC7rIUon+uE7T\ndokl4tQadVTbZrFaZ000TUwVhCNJIpkujh0bI7O2gztuuJZPfvLjnDt3jmKxSDabRdM0XvzGN96w\nHBMJBqnPvjGBvFJ8//vP8Dd/8wzN5hDRaBIhXExzDEVJ4HlFurr6yeXa8C9N4y0vT3Lzzddz+PCr\nHD9eJpGQKZdX6OjYhmGs0GqFwYdmw+bE8aNEQosoWjet5jwhbMJ0gNdEFicxpHZa4ioQBwkHNyHL\nIUKyia0HKdZkbDOBonWQZ5QBoliXbkOyQA7ovmSbeQGPJXwal0wVLTwmhEwl1IGkWhTNFhnNo9I0\nCITa8GwVW67jWBE8P4avdBEKr0VVg9RqJ/jyl79IW1sbv/7rN7Ow8AjT03N43mpNDCzQ1tbNjTf2\nkUqFGBs7TDIZ4/77b2Lz5s1XLpBvwjuajAgh/pxVf5+jr3fwFUL8H8Adl55+2ff9Z9/JcbydfOc7\n71/10n/KwMCqVsozz6zWj7wX+PCHt/P880cwTZ9QSObee29g584dAORyOXK5HJ7n8eoLL3Di+Gk0\nLYEir34ZLlTzdA3vwDRjjI2NsXXrVpaWlti370VGRibRNJUPfGAb195solarRJeWCPg+QpFxvUXq\nnk9MbcP1LQx/cVUh1fIIRfvQOqOcnptBajaZtCxkSSMr4iiySlxRCJkGo26dEk0SFHFFjGU/hMGq\nuygIZAJYpDBpMEOQBhuR6EUlSIsCHrN0kgeCWEQwqKID8mpZJaaSIiBlMEUTLdnOXKXMxdYKUVUQ\n0eI0mxdpT/cxMTLPqcqrZNNpGo0GPfE4ihBY1SrPjc/wwtlJzo+tsLYnzcX2FMfWr+cTDzzwloZZ\noVCI3/zNTzMyMsLk5ByxWIqtWz9EKvXWyyyv3/9zn/sNvvvdHzA9PX4pnlE+/el7+dM//b2fuX80\nGuVTv/M7fP/rXyfS18fI6dOUl5cJpNMELIu8LzHx/FFkWWHD3psZSR3jpuE+dm3aRCwcxnFdnj93\njlcWF1EvjnMo3+KFhokkBFtVm2qzjmLXMYkgiDBNjbXYxC4py9goXCSIIIaKBKyjwAIeMyjIVLBp\n+hKS0geujOudQ5V9PL+bBa+ARwPJtwFBDA+dCCYeFgYLhAjTSQsFG5mWAZFwjla4QavlMudYrPgS\nqgsBVaKnZ4BkwiJjtNjR0c6PpqZYyedpGAaO62KZJme9PFY0jRvP0N05yHJxgbFimd8ZHCQQCLBj\nx47Xjm2j0VitjXFdlNcVNC6VSnQNDr4hFleCYrHI/v2nMIw0g4PdXLhwDkVpR4ghEgmHZrOOaZ7g\n7Fmb/fsPUCrNEAxWaTZ3ks8X2LZtPUIEWVqSabVamA0Z14gi3AKW4YGfRGvZtOpT4KdRpQGE1yJE\nFtct47hzeKKILvk0VsZoeSa+7wEWiAEUJUwkGEWrzGOhoxOnyQomq/4ydWRaqCwgIfBIBkL4ER/H\ncsgSIJCJkVu3g3hcJz/+KvqUwZlqgawcpYWPp3VT8G0IpBCiSSIRpatrK11dq9L9W7Zs5o/+KM53\nv/sEL754FEVR6e3t5KabNnDLLTe9Z8zw3op3LBkRQuwAwr7v7xFC/L9CiJ2+7x+59PJDvu//JyFE\nHHgMeF8kI9PTMD4Oe/Zc6ZG8fdx/P3z96++dZGT37g9www3X0Wq1CAaDl632liSJuz7+cf70wCto\nNYOW51NxbMxEhu3rdrK0NEWj0aRYLPI//+ffAz10dd2E41j88IcjzJ19nk/deSc/2LePsfPnWWq1\niIogA0qNmj+KosQIBDXCqd3Y1gqy5LJ2zTCLvsuZqSlM22ZQ0jAdD1UIbMcmq6qseIKSCDDteTR8\niVUviyxlCrQh0AlgYVFkGZMUMt34CBw0Vu9/M+SZJYOOhMWPu2sagKmkUZUIdXsCtBTd3buZNA9h\n2v34joKws+QXirilZwl7TYS5gNTTgZzJ4EsSAUni4ecPs+J0sPeqXyMZjVGoLGJaKwTkUY6++io3\nfOCtjbM0TWPr1q1s3br1l4pte3s7v/3bD1Aul/F9n0Qi8QsVR65bt472L32JkbNnGbz9dpBlwuEw\nTz31IprXQ2fnGiqVAo898iSLFw5w38278C7dJSuyzDUDA3zv/CgltwPRuYfCuVEk2+CAsULSrzIj\ncnj+OiBKnRWOcpEYeWp4mGSQ6bskWqUhCBMSA5T9Fr6IYxMlHC0jqBEwoeyAovQh/HY8OvGo4VMl\nTB0dhToeMquzOzb6JZFxCU3SUYSFVS5TdVo0PRWLbiQpDVIYw1+mszPMVVft4MBjj7GmoiN5Hofy\nefpiMSKKguyrzCsqSjQJ8QwT5RUWXI+brtl92S+lcDjMVXv3cuSZZ1jf1kY0FGKpWORis8m9t932\nS8X67WZpaQnPi+D7VXQ9zNDQWqanJ7DtJsvL5xkYSDA83E+h4BAMNmm1woTD21hYCKKqcU6ePMWd\nd97F8PAAzzx1mkRgPYX6FJJbR/N1HMC0LDxMQqzFQ8ZmDpkyCi4tKmT8Ag23Hc8N06YO0PRsKn6L\ngKjT8n0u5vNIhKnRIkuMImVmcXFIIJHGQFBCp0mICXOMm8I+mUQMX9U5bqyQjKsMrOlj165B9IDP\nf/k//5K5pqDVLOCrEE5upS0Uo6Mjx65dG/D9Os1m87Vj1NPTwxe/+Nt84Qs+zWYTTdPeU627P4t3\ncmbkOuDpS4/3ATcARwB835+8tP3H7sjvCx59FD7ykffOksbbwcc/Dn/8x1Cvrzr6vheQZflnthX2\n9PTw6X/3O3z9a89hxdpoS7bT3t6LJMn4fpmurk5eeeVVXLedzs5eYLU7p79/OycPfpcT587hC0G9\n0UCRJKpCISxpmE6Viu8jojvQFBNNW2bjxo1MO7PM1ev0pFLM2DZWy6RJE8fXSApBDYOCHCKrdiO1\nDOo4+FSQGaKGhEcJDeuSJ03gkrqryqokvIGPhk6KIhoLVEnSoAefCVYYJ0TLDeB5M1iegyoE02NH\nCGptmIEEmtZCcUycpoXkyqSpcFUqQr3RYAa46e67OTcxgRFwGGpfQya+qteSTXSxWDTwHJdzR4/+\nzGTk7eIXEVq73L7Xv26cx48fx3Ey9PWtY3l5hvMHHyPXMlFaEkunTzM3Pc3eG28km0igyDJnz87S\n3TfEUMbDDc/TKhi03DhnaQJpdCWG5Wj4JJBJUycERBDI+ECTRVR6iOJg+RYImYCcBqGSToSRvSTV\n8gItO0eAAIgWplAQvs9qrOdxgClMujGw0MhjYuPioqNJOkt2hXYcDJpo9OFLUZJqiEhQo2xqrIwv\nsO3Gq6lt3syBo0fBMDGTfbxsaJSqNTRV4q6dNzFjG6hbdhEPRQk1i+za9ebumDfdfDOxRIIj+/dT\nnZ+ne80a7r35Znp6et50n3cC13VZWFjthOro6HjtZkTXdTQNwmEV02wRCsVYv34blco84JBOWzhO\nkquu2snzzz/NwMCNyLJKoTDNpk2dzM8XOHbsFdraevDsR1lsrmBZCppvYYsiYb8NhI3wxSU1mVly\nFBhAQ0ZmEYmC5FH3VAIijuSspiu+r+J4NrqYYskPEyJGAQOTMkF0jiPQSaAQwCaJQRqBwKJEszmN\nIzQ82SMeCRK2L3L99TcSicQIBgP8q4/fzj/8wxE0fR2mGcEwfDStAOhks1mWlubJXPLnej1CCMKv\nc+V+v/BOJiMJVhVjYNXrZ9Nl3vMV4K/ewTG8rTzyCPz7f3+lR/H2ks3Crl2rIm7333+lR/OL8YEP\nXM+pU2MUCkFSqXZarTorK+Ns25ajp6eHxx9/lni896f28X0fw5UZOXKETYkE6VSK6VqN54wW+WAX\nejiO44Xp6eqlUpnHs+qcO36QRr5Ij2NR9H3KXoKgGiDh+lTcAjXP56KkkZD7iNsQIYBBgAJ1ZBaI\nkMa4ZKIFy0AEiwYuChLKJX+LCtAghImumDRclzO+wzIyNZJ4/hCuDz5xXFdjuXScZDRLOCBwfZ9S\nrUC7cMCRsVUDw5Rpi0SYLq3WZGRTbcSj0hvulHQtwWJxkcGhN5+hcF2X+fl5PM+jo6PjXfcpeSum\npxcIBtMYhsGLT36DXLmMJWTMpocwbPqTMkdPn+aO3bsZm5tDDqQx80VEvU7UdckEg1SdAFW7Rlh4\nrLhlAnIYy61cEp7ruKS928QmiCAEzOEQQZJqhOUQDXcZobUj2za2BWFdZaVpEFZUNE9QcyusTtaD\nR5xlAoQoUGSBrkvqEUVahInQcAokcbCpARYRoWD4Joa1Ar7HQFCiYqp8/+BBGoUqLV/hbMGmM9ZO\ne0cXOUVlbmacpw4+S7ojw/B1H8JxDHp7NTZvvtwleBUhBDuuvpodV1/9pu95p5mcnOTv//5JarXV\nczEcdvnEJ+5icHCQ3t5eUino7o4zMjKHbWcIBALMzBzG8+aZnFSpVi+iac+hKAk2bdqCLKsoSgjH\ncdm7dxfPP///IcsNhHDRlSyy7eD5EsJvo8Ey+CYyEjUqZMiTQ0bGxQMCOLR5LlVJR/XzOERx/RZZ\nPALo6L6ERJUaFi5BTOrY6LSIYrIBCR0PgYNMCBeJMI4XQpXi5H0P2wpy8ewkDz/8ImvW7MDzLMbG\nlunoWEO1qrC0VMRxklSrCouLo7zwwpPs2JGks3O1/mphYYFTp85RKpVRVUinM3R3dzEwMPCuuy3/\nsryTyUgFiF16HGe1q+k1hBD3AEnf9x9+s1/wla985bXHe/fuZe/evW/7IH9elpfh+HF4j8xavq3c\nfz987Wvvv2QkGAzy4IOf5NChH3H8+Fk0TeWjH93G1VevOlJmMnHGxmqvyUkDFIuLJFyDLZs2MX78\nOBlZpjOVYpNco5HJsKH/Os7PL7Jcn6JVeJWU5uL5CYr1Fp7XTkkF38uQVxs01TotT0XVWth2nDZX\nwvZWP1QpoIGCywk82gnhY1Ihjk+RBpDCYxFI49JCFhUS0kXWynU2qCqmFeBZO0iBLnzW45PGp44k\ndITQESQpN5boCYdIxyMszTeJ4tHyakR8i3y+Qb5YpCoES8Xiasu0LiPHYpiOTeBSC7VpN2naBpve\npGNiZmaGb37zcapVAQgCAYt77731DdoPVwLDMIjFQhhGnpHTI/hL8/RkOhAIzGqeqZkpurIpCsvL\nTCwssOC6ROM6egOm5+dJBgK4vk9EsQl6LnFZUHWLtPBYtSH8sSy8Q4IshuRS9gL48jRqIEDQj6JH\nMzTq40SDGpIZQvJL1L0WSA0skaDq1C6VwUaQCRIihUSUOtMkqJO6NIcmpCWafhPPB48aOcoIFLKS\niiokKngUXZ+VlkW+NceRkyGGs1tw/CCK7KL7Cs1SFd90adfS5J06K4tTPPO9/4c//i9f5vbbb31P\n1w1UKhW++tXvEY1uord3deauXi/z0EOP8/u//xmSySSf+cy9fP3rj+I4HtPTZ5mevoimeSSTG6lW\nE0AM3zdYXDzIhQtn2bBhK65rEgpliMXC3HbbbtraIkyOLGHVQ0xMN5ClGCoWVXeJEEFAockEQZoo\nKBi4mDSJCRvZ9xjzWvRoURpeEcv1VitAhI/iOWRREVQwaJDDpkqDOgKPOglimICBwMBGUKXq+xyp\nlkglcwSVELOVMCkzTUfHIOBz+PARXFchFgsTDOYoFpcpl8vMz8+xdm0n5XKOr33t26xbN8g//uMh\nmk2VU6dO02oppNMxNm3qZN26JJ/61L3v6dj/mHcyGXkZ+DfAt4FbgL/98QtCiK3A7wJ3v9UveH0y\ncqV57DG44w54j8j4v638+q/D7/7uasLV1nalR/OLEYlEuPXWD3LrrR98w2vXX7+DEyceJRpNouur\n05ZTU2fpDMvs2rMHz3GQKxXioRDdQnDGdgjo0+iBRaKtOfZ0RzHqAWZXCpieR1P2MFwVXcmhYCEp\nJhUlQ0xdRuQlUIO4fouav+oJmsLHoUGSEgYyPjIWZZKX7Olb5HGYAmySQY1uGghFx0Yw6fsURAZo\nx/UjSMhohHD8Ip636iyqiEVsO42mhOjKpijPThLRlhiUBbasUmqaNHyXv/jOY9xzz4fZdm0fkjTA\n9LkxokLg2C3yjTFuuPsOtl911RuOX6PR4Ktf/S7B4Hp6e1ft41utOt/4xtN84Qtp2tvb37nAvgUL\nCws88cSzTEws4Tgm586NYBUDhIJB8MF0TFJJmfZIHxfyeWYDAXb09PCv77iDhx56mJeeOItj2wRU\nlZZjYxpLdOtNbNVDaYTQAylsO49j1gnRBsLBFg7hQAJXChANt4iHHQzbRY469GbbUVsFisuTeG6T\naMRHUkLUa+eJC5UAEjUsII9PH+BiESaIjoFJGIlBSQbXYpkCJk2ykoLtSVS9ZeIix4pvo/rguA3S\nUpU+EjRaJWQRIBFOsWwW0esF+rMdlC2DvA0DuXaSapD56an3XBvnP+XMmXO4bopo9CdeR5FIgnI5\nw6lTZ9izZzfZbJYvfvHzzM3NYRgGzz33MmfOVBkdLdPTM8TIyDialkLXk1QqyywuTpFIyGSzGRYW\nTnPPPdcyPT3P5quvZfzkeZSFJp5jYbsuOhqCZQQqQYr4NPAJoGCTwyKp6YxbPsJfYcVJ0PIUogJU\n4VD1Z4iQx0QQxUUAg0AEj1eossQ0LVRsYnj4CGaIKTXSeheeohMXIU6W85TkNk6fnqRWe5pAABYW\nlikWY2zdOkgsluXsWYVsdgulkiCRyDE8/AFOnTrIgQPH2LbtI7zwwlMkEtfQ0ZGgUJhGkjoYHa3w\n0kuH+OAHb7pisf15ecfmb3zfPwYYQoj9gOP7/hEhxH+/9PJ/A9qAp4QQj75TY3g7+da34N57r/Qo\n3hnC4dVamL9/a2Xu9x19fX3cfvt2Dh/+e/7hH/6Cf/zHv0KS5lm3ZR2yLDO8cSOuEETDYQzXZbC3\nh5t3rGdgOMeWaJihVBrFVwgqEdJyG46XR/IdPGwQChXTRgpksZVeSpSYt8uUPReLVWEjjTpRlggx\nRYpZ+pkkSGFVZFzqI6ZcQ1jZSUjagi9itJJZuvv7ORcMcUqKIoJZFFkjQJ44NnEUoujILCMpJQKa\nR73xPOXlQyyWz1P2xhjwClRqJkUjiKG24ek51LrEYl3lYx+7nfb2Bh1DIeyUidZt8aX/9EU++9u/\nfdlCt5GR85hmjFgs/dq2YDCConRw/PjpdzGSP2G1KPlbLC/H6O3dQ3//zcRiQ+TzJ6lLBpPli8AK\n29Z00N/Tg4jFeOAP/oD7P/c5urq6+K3feoBERwtDWWS+OYWlzjMQr5LTQRVT2HKejVuSXLWzg2Co\njhSII6sZLEXBUnVCQY9EtA2PAN3pIXrDObZ1bwdHJmo16PNNtuI5NTAEAAAgAElEQVQSrc/RaY+A\nO4vHNAGKpMni0wBcVBoIIC7p5ISG7BnUaSIDJSFRkYIklHZ0f4mz3hlMfwmXFWTytIkoMc9HNAqr\n7keShxRto+hLXKgWsSyD9qDHFj1MoF7hyUe+C0ChUODAgYPs2/cc4+PjeJ53RWJ4OcrlKqr6xoRJ\n08KUSrXXnkuSRE9PD8PDwxiGg2U5SFIETdMYHOzG8yoIEcU0J5iZeZJczqRYfJVbb93ANddcTV9f\nJ7mOJNt27ybT3U+6rZv29h5EQAMpQ0rpZTDaf8lNxiaJBZJg0XEoyioGPkWxRFmuUKJIxbvIRn+Z\ndXj04hIEgkA7oAO7MEgzT4Bj2LxCSLxEm3QGyVcYNxo0PZsVu87FegMR6CEaHWB+Hs6dMygUTKrV\nIlNTY5TLRXxfxbbLxOMxKpUGAEIEWFhwaTZrGIZMMLhajxUOp5meXiCXG+KVV0694/F7O3hHW3tf\n38576fkXL/38+Qwx3iMsLcHhw6sFrP9Suf9++MpX4AtfuNIjeftYWVnhxRdPMDCwi+HhMLZt0mrN\nMNUYZahSobOzk8ratZw7f57JRoO1fX2MuS5rhoYoLy6iCYGPgSzLxGI5yqU6NZpgj+NaKWpCRm/q\n5M0lApTxMWmRw0IQoYRghSSwSTJRJJuGp6J4MnnCyEoPshJBwgNJJ5yOk+6YwVY9BoIh5nwLq+Ii\nu1WCko/jBZHIoCLwWCGWUvBEiJgzzYZcDFnTuDAOsy2fFTeAqkcwFZ3ecDcrrSJTkyvs33+YYDAM\nmKTTAW66aTe7du160zXlWq2OLAffsF3XI5RKby3j/k5x5MhxPK/9Na0SWVbYvPl6lsdP8+vXdnBx\nfJyQZZE3TeZLJfw1a/jg7be/tn8mk+H/+r//G3/xX/8rL333u4SFwI9GWZPqxa/VUKNbuevDtxMI\nRDDNbzA2No/rxLGdBhHdJxUNEwkJStV55uYm6evs4+LEcTLNFp2pXubLY8Qti6ArCKOTUjqpOkss\nYOGjI7Cp0CDDquS6K1xMWSWshWi1qkxgseAHWXFDKLTQcBEorBE+CSWI7cs0fZ183UTTLdxAmnja\nYrniIiSZrkgUhQYZHbrDcSK+x8lyieeee55nnz0OZJBljR/+8CxbtrTz8Y9/9A1Gk1eC3t5ODhx4\nGej/qe2tVoH+/suL8w0P93DmzFE8b1XrNBQKEgjYSJJCV9cWBgcVdN3m3ns/yPZLJlxbtmxm//4j\nmKZGV08aWc4wNX4CqlU0pR1VtomoATQ5zZw7v1q547lUgBIhPNagBGP05YJUp1/kBgd0VFTfoe77\nOKw28tdQLzX0O3RgEpQdVhSFgKzhOBFmHcEkXRS8EPXmMnJkDapm0moZaFqaUCiA59VptV7CMKLM\nzJzANCWiUZ9MpodS6TyPPfYwxeI8lmVhGE18/yefY9/3EUIgyzK27f5cMbBtm4WFBYQQdHZ2vuu+\nNVf+LHwf8K1vrc4cvMdnOv9Z3HorPPAAjI2tao/8S+C55w7i+1309PS/ts22uxkbq/LKygqFQz+i\n1Wjhx5Pc+MlPsmv3bvr7+3n04YeRu7sZO3qcarNCqdpC19rw9SRNuQvPzWNbs+haJwqTaO4ig3Rc\nqqKvoyLj0ETFQkgSbiyGUBQQKlpdoBoBED6qFqFpVQgEdUIRmUCii3h/F8eOHMJtXiRopZFFiyBZ\nKkzS4iKSqNKj+dSbPrJVYWdPho5IhM1dXeQXF3EbPr7aQSzeQUgOYlgWciRBteXyve8d4J57HmTT\npqtwXYcjR0aoVB7lgQfuu+zx6+rqwLbPvmF7vb7C4OCbF0O+k0xPLxCN/vRaYjKZJBDvpliv8rE7\n7mCxWGSxWCRsmvzmH/7hG5Youru7+dwXvgDhNAdeOIDfqOKl0/yrz3+es+cLRKMpVFVj1649lEov\nYhglNEUnqmaRRBVBE7m5sKoGY6cp5WdItGxa4Ra5gE8VFSQZ1RP4AZ+wFCRpmRSZxCKFBCgsEZKb\njHguIV8mYJmUPJMSEdLSWlxkSp6NRZag2iQWiBORNITkYrdaWHIES67T1bcGTYtSsV6hrk7hOhGG\nkxESksLY0jzlZpmuTYN885uPs3nzR9H11WNRLKb45jef4ODBI2zfvpk9e65h3bp171YY38C6devo\n7DzMzMw5crk1ACwujtPW5rF+/frL7nP99Ts5fPgUExOTVCoxGg2L5eUKsdhq59mqzmk73/nOPgYG\nBjh+/CQvv3yS0yeOkZ+bplJoMrVQIZYYINe5ntmZUSIs051bR8WaRql5LKEwT4AoAgdBmxoiHkpS\nyM+jeBKLuoJs26gOFIE2FHwENiFCyNRxaMktEopEe38/M1WJdGQHQ47NeHEWLxhGmFFCoTiDgxqT\nkzMoSoBWqwisMDg4SCzWT71+hlQqRzq9ifPnv097+xZisS20WhdptUY5efIIkuRh2y1UNUijUWDD\nhn6Wlqa47rrLH7/Xc/78eb797acwDA3wiUZ97rvvbvpep8r7TvOrZOTn4JvfhC9/+UqP4p1FUeAT\nn1h18v3TP73So3l7OHt2nPb2XT+1TVUDNJvQEkEiA7cR1UOAx/RMiQ/FYqiqyvrt23nkr/8GUdOw\n/DSKWmextsKK6rJ587UU5qp4To6kHqHVCFF1PSpGnoC/majwkISGS4WyP0Y447FjaIhEOg2SxGMv\nHSNkV6lJKzTcFtFkFE2TqVRabN06zOysSrXQwZpuhcL8GG7TJu0tEtMkkFp0BCWMRoNlT3B1SGGj\nolCtVnnVtrlx7VoeXjlKznGRXImma2LJPmY0SaNRo7d3C/H4aiugLCv09m7mwoWXmJ+ff60q//UM\nDg4yOPgKExOnyOWGkCSJpaVJUimTLVuujIJje3uKubkSsdhPxNaEEKzfPECiw+LQwgKKECidnXz6\nIx+57MX0xImTPPzwcyQS13Df/bdRLq9gGJPs2rOHweElnnjiRwSDXbS1xdm8OczZs2dIp4dwnCIB\nxUb1Kty6ZTMdsszFyhKziyXiskVQSOi+i0mAtlCE5cYKQadJKNiDL1lYxjRlqqgCYnKRcLiPqNJL\nxaxQtVrUsdCx6dFStHwfYZZpILAQLLgV4lqKeDSJJUHJaVILBQiFdPywzR/84WeZHz/P+W8/QqO0\nSMEOI2SdWHYdS0WD2tgiO3asFrwVCgVefPE4ntdNqdSgWEzzt3/7FPfeW+eaa65MN42qqnz2s5/g\nhRcOcuTIIXwfrr9+I3v2fOBNiy+TyST/9t9+ht7eH/Doo/uYnZ1GlkM4jkZ39/X4fpbR0UUc5wJ/\n/ud/iW3nmJ8x8fNJcqEQCeccV6eyjNWb6J1REok15Ocspmrn6YwGOG2mqFhpgoSQsZFECxFw0X2P\nEBIyCsOxdmbLs0R9l6AnwAtg4yALgfAFVWVV60NJ6Vy/axdHzjQxahH0aBxVz2Imk5RKZ1HVEMPD\nV1GpvIqul1EUjXQ6x3XXbWdysoDjSKxbl+HIkUdQ1Q4ikU4mJk7h+01SqU7Onz/P2rU55uf34/sx\nurszeF6ZRMLgxhvfsjSTfD7P1772fdLp7bS1RQGo1Up89auP8qUvfY5oNPq2x/ty/CoZ+RlMTMDo\n6L/MLpp/yv33w2c+A3/yJ+9vR+IfEw4HsSyDYPAnmiWu6zAyMsqtt36S9vaffAEvL8/w9NP7uemm\n6/irv/o79l8wCLQcQpaFIoMRTdGWipMMz9A/HGB0JkitYGM7Oo4IYok4SRFcVdoUPoFACFn0ouU8\nRDpNNBrFNE06ImHisqCrZy2RSCeNRpWlpRUUxaSjo5+FhVnCSoxqrUkiqNPwGth2g6TwcDWZhiuh\nqDJ9bT0EGzUqVYuAZoHv097VRWdHO2OFFqaooYfiOJE4gdQw3twLXHWZIlUhopRKpcsmI7Is8+lP\n/wYHDx7i8OFjOI7Ldddt4MYbP0ww+Mblm3eDa6/dweHD36ReTxOJJCiVSoyMnEDXF7n7o79DW1sb\nlmWRSCQuu/zkui5PPLGfXG77686LACMjLb7wha/w0Y/ezp13bqNcblCvt7jzzk/T0fG/cebMWQzD\npqurnae/9S3WqyoXDx/mtuEewsKkOjaGaDapOQ6WLxB+gClHAmmZRKOO43vUVJ2OxHoGs0kuTD5P\nTM6gShBWYtRaLogIrm8zZtYJyB14UgjHK+F5VQwtx4hVIVVqYkoStZjHb37pQTZs2MzQUD/9/f0c\nP36cvzx+guL5PKloO51dXaghnaPFAs1mkEJhnmy2mzNnLqDrbQjhIUkGyWQboVCUH/zgINu2bbli\nrdvhcJi77rqdu+66/We/+RKZTIbPfvbTfPazn+Y//sf/zKFDS3R27kWI1djrepwzZ85w4sQyN954\nA2PHLrKmvRPXcTgweoHeDVmuCdv8qDRNV9d6QqEoczMvYZs1tPh61rga9YZJFBnHsZlrLBGywcOn\n6QjOLs8wFPAxZBnhyyygsCJkHAFNLLxABNMzEa7LzOQkphugt3eIlXyRml0mG4/j+2EWF1cYHS3Q\naGiATEdHhkDApa9vLZp2kdtuu5ubb97N44+38dxzC8zMnMXzNPr6NqNpOq2WjevO8OCDd2PbNuVy\nHd+vMjg4QKlUIhqNvqnA4IkTp5GkHKHQT5KOaDRJqZTg3LkRrr323fEm+lUy8jN4+GH4jd/4lyV0\n9mZcey14Hrz6Kuz8+T3U3rPs2rWdxx8/RX//jtc+iJOTZwkGo7S1dfzUezOZLk6f3sf4+BwjIwVC\n0RswVZ2aVcH3DXKZJEF1hYg9z7n5FngaihLGsjx8L0jJA08YxBUNw3OoWHlinUnKwuRUq8XJfJ5K\ntcao0WDTrnu4OD7J5OQkjuNRrVZpa+tEVcOUywXyy7NEZYEphQm7VTKeQQwXw4A516YRybDWD+Bq\nJqbr4psusmxQaTZJJUOsHepledmmbMvEtAC6dIFf+7XdRCJvbAXz/QbxePwN23+Mruvccstebrll\n79sam1+W9vZ2Hnjgw3zve/vYv/8iExPzpNNZ1q/fwl//9RPs2jXM3Xff8aYX3lKpRKslyGQiNJtN\nzpw5y5nT4yRT/fh+O8vLUSYmjvOpT93Cxo0bOHfuHPv2HcD3fbZtW8eGDRt4QdfJpFLke3uZmJmh\nIxbjhOPQNEwygShNx2bCtxBShoS3SIQwshwmI0k0jDE6O+5kudWN78fJ111akk/eB10Iyn4QXQSI\nqGEs16FhCRx/BSFpOGofo2YRR2rybz77SX7v97742v+1srLCU0+9xIV6iCZh0k2HmYtjJNcMcNVN\n9/Dssy+xsDBNKtVBsVgjleqkUDjL2rWrM0eBQBDLUigUCnR0dFz22L3X6e/PsX9/5bVEBMBxbHwf\nHCeCYRhoQiAQ+L6PKiWYnJ9BM3xqrkSqaz3B4CJqQMcSgngoTH3RxBMyVa+JhY3q+yyYY6i00xCC\nEc+mZri0awoTnk1JJAiqOVYkiQZ1ekMwFFZIpwI0Ck0KS1MYlqC38yo2r1tDsbaErg9x5503sLIy\nj+8rjI0dRlXj3HTT3czOnqKtzefjH78P0zQ5c2aUuTmHSiVKIBBkamqcNWvWk0gkSad1hoYGWVrK\nc/Fig2Cwm1OnLH70o8f44Ac3cvvtt1z2uJVKVQKBN4qkKUqIarX+jsXrDX/vXftL70N8f1V/46/e\nN7Js/zyE+InmyPs9GRkfH2d2dolGY5yXXhqjo2MtsuwQiTTZtGnwDV9WjmNRrVaANM2miSSpSK5K\nJNSFabdwHIeK1SIa9DHtMBFKlBtncL0EDSeGi0dLb8NwbQwxjx5ai2kFSHe1c2zhFKYhSMaGUFWD\nfT/8IaFQH/F4Gll2keUk/f07OXnyNMWZQ2Q9h5jdpEKenBYgLOI0cVDae8mZMmW7hhbqJV88jR6K\nYDarXKzWmS+VqKUGuPrqu2k2XRYWZpCkee67715isRhPP32MUChKOBzH8zwWFkbp74+956zEfxZD\nQ0Pcd1+QmZmH2LHjTiKR1Q4Cz3M5ePAVNm5cy+CbeKrouo5tNxm9cI7JMyMsT0wTEBEKldNIsRKp\nVI5wOM6TT+7n9OkRjh1bJJUaQAjB2bOH6O9/lXOzszzxyCNs7ullw/AwbqNBZnKSU2GBHu+maTXw\nKiXk0jJhP4IqB1CkAIGggo7F4WPPsn1rN+VSmJJQUYIR/JaD02rhkcfxBmjYFr7nYkoNfFkib1eQ\nPB9UmUDQR1VVHn/8+1x99TY6Ozt58skf4jhdbL/6NkbCRRQ1hGsaiFiAzs5+hofHqVQusrTUjmWV\nWV4+Ri6n09MzDKwWPHqedcVmvH5ZlpaWGB0dAwS9vd2Ew6dYWholGEzh+x6uW2FgoAvwURSFfL2O\nLnxcp0HNmKFVLdCf3UwomkWSZGZnR7GsNZhOg2INDHMRz3Xw3TgeAg8PDYmUUqBbCyJpXUxWJph0\nDdK5HLqXJhLqoGE0UZpNkqpCCR9F34jTFISCbZybnMAIaWzuvZb8RIHt2zeyZcsmhIBGo0KlcgPV\n6gn27u2mq6uddevWoWka3/7246RS21CUA8hymnC4A8MoMzFxms5On8HBazlw4DD5vKCvbyfl8jKG\n0SAc7ua5506xadP6y37W+/u7OHbsxGtF4T/Gsor09Pxy1g+/DL9KRt6CQ4fAtmH37is9kneP++9f\n9d75sz9brSN5P/L88/t56qnj1GoaExM6y8sXmZm5wG/91ie5997P8zd/8w2Wl6dpa/uJOuv8/AXW\nretmaSlAOJykVMrj+RkggCxkLLuOLK1QqTXpQmKovYeSJJhbush5bwJXbUNVE1hSA8XtBlunWVri\nyMElDAeSWo32UhXJlWnzw5QMBZHoxfcXyWZrRKM6o6deZb0eQJaTmI2LhH2fqNlkxXOItK1haHgH\nU1PLlIs1Gq6NEskxKkzmrBqBXDtrrv8gG9LX0dnZj+e5OE6LkZEK/+N/vMTOnRswzRorKy9TLMbx\nfZstW/r58Idv/4X8Yd4rjI2NE4sNvJaIAEiSTDDYyenT5y+bjDQaDZ79wQ+YP3OQ88cnyaS6kf0Y\n6ViKYn0KYa5w7NgLtFomZ8++gBAp2tt30Nk5z8aNa2m1QvzRH/4FEWeZNk/lxXOHORh6mTXbNlEM\nx7h6YDfd2X4Azk0d59yhp0gpGSJBDYGEYXtYNCACbQM5JssTGOF1+ATJdu9g/uL3CXoWME/NlbBp\nEU4k0fQhdL2DSKSdWm0Zyyrz8suz+P4gL7/8Le65Zzejo/P09OwhEAhy4cIk0eggiYRGsThGqVSg\no0PlvvseZGZmAUlKMTNjcNVVt6yK4QHz86Ns2ND1z5Lqf7d57rkXeOaZ4yhKFs/zmBx/CWP5KIlg\njmZZkO7bzPard7O4eIzZ2Qn27TvASgVGT++jU2rRq9QwWnXOLpwgs/VTjI//CMPQ2bx5D/n8aSYn\nVjCa/bjOFFkpiOIFEAjquHiiTH9yJ5oSIeh5rFglhDZMT6aNo2OnUbQ1tCccEvEIU2WV/IJDrj1D\nW1sXmaEN6DmHm27qIxwOs379T+qvIpEEiqIRDndy5523vra9Xq8zPr5Ef/+NVKsN9u07QKNRAHws\na4KdO/81lUqBM2deolrN8uSTB9H1LJlMN5LURJZLnDp15rLJyObNmzhw4CgzMyPkcgP4vs/Cwhi9\nvTpr1qx5N0IJ/CoZeUv+1/+CBx/8l1E/8fMyPAw9PfDss3D7z790+56hWCyyb99RZLmPixfHiESG\nWbduK0tLJ3nyyVdpb+/gYx/7CA899A9MTS0jRBDPq7J+fZbrrvsADz20j+7uHly3zoXyaeqtGI5r\nEA9XWNsdZGF8lo19O2nv6CCYCNM2OIh67hhzajdDG4fY//zTROQU6aCHZGu0bA/NahLWMsg0CMpZ\nVKmBJMp0dcVJpTZQLL5Eq3UKapNEAnH0SAsv3c3IskvBNnFsk3A8SzKZYGGhgBPrYgLI5+vE40ME\nuraw9bphTp89yUc+stppMjMzysREic7OPZRK47S1rSOZ7MU0z/Hggx8jEom8L/0rfozneZdNooSQ\nXrOQfz2+7/Odr38deWaGO4cHSU5NUzMWOb48gk2RzoTOYsXj6JFxLEdnaSlANBqnszPNygrs2/ci\nJ05cQDbTdEUj9ES6sO0W5fJJJkbGMbMdlKbPYDsWiUiK+ZVFHF8jqIaIhEIE9ACu6+LUTTKdKX7v\nj/8YwzD4/d//z4yPzxKP9zG8dRsz4+O4Zgw9JBMJa8STSQxDZ+PGbZTLBTyvi0ikA8epEgxGSSR2\n8NhjL+C67iXzwSzbt2/k5Mlj+H6cSmWWYtHjk5/8EJs3b2bz5s3cdtsH+d73nuTo0UNIUgzPazAw\nkOKjH/3QuxG6fzaWZXHw4EH+7u+eYM2a3WSz3UxNnEYv1NkajhEJ+SCFmJp7lfPBMrfc/gEajSqG\nUUX1pxnUPVTLRVEE2fY2suFuphvnEEJn69bbUFWVbDaLLPscPQpRkSQdDeMaJqYhEbey+LKDIxQc\ns4rrqXQmhmhlIqQG17NeH0BVQ6QCkxQnx7GNBC4KxaJJteZS8Jts6t7A+fPjnD59nlJJZv36dSST\nq4JvKytT7Nnz051Nq+f06vm+fv02ZmaqeF4IVdUxjNXl11deeY5sto/FxSKStBHTDFOpCIaGtjM9\n/QrHjp3izjvfeFHXdZ3Pf/4+9u9/iaNHDyNJEnv3bmb37hve1bbvXyUjb0K1uupFc+7clR7Ju8+P\nnXzfj8nIzMwMnpdgdHSKSKSTQGB12jkW68W2Z3jhhaPccMO1fPGLn2diYoJ6vU46naa7uxuADRtO\nUK+b6HqdweENzEyMEVJLrOsJ0dMRRKnGKBhFzpxrIkgBMnk7gB6q0te3noi0j45AFAkwXBeQSAqF\nZmsRtBABPYFh1El4No3GMm1tgyhKhN7eFPZCkGwoQDTczcmFCtHMddRKoyhujXK5QLmSR08HMEWM\nWtUjlL6agbWDXHXVejo62hgZucDY2EW2bt3CxMQ4kUjfpfVz/9IxSDE1pVOr1a6Yeurbxdq1Qzzz\nzAlcdwBZXr2M+b5PsznPxo1vPHGnp6epT01xXV8fF8fGWNfTQUTXiY7azPo1dC0OJJGI0WxCV9d6\nTFNjdnaajRu3c+7cKOWyQ0ZWkDAvLQF4QBbRmMGJaUwswdTiDO3JAiulGQJqlBJV0moMSUg4kkve\ntwiHNcrlMvV6nZ071zIzc5JwOEM4HEWWIRCIk0j0YxhzjI0dIZ2+Fl0Ps7JyFt9P0tWVRlVX1XDj\n8Qy+H6GtzWN5eZpcrp+BgU3kcn1cvHiCRKKL//Af/t1PGU8qisK99/4ae/cWKBaLRCIRcrnc+2KG\nbGlpiYce+g7Hjs0xN6exuHiEtrazOIUptibaaaoBBgaCpNMpdhgG44AkKQwMXMfWrSmee+S/s63v\nKsKhGKXSHJJUQw/0INvG/8/ee4fHVZ55/58zvWm6ujQqlixZtmRbrtjYGIyxMcamBwgGAkuAbJZk\nU678tr0hV7KbvHu9IbvJbhohJBBCMaH3jjHuRbZkFatrVGfUpvdzfn+MkRE27rYkez7X5cuaMzPP\nec48c858z/Pc9/emX9ShVCrweAaYOdOBw5HFoYaPUUbl2G0WlDItba1NCDIBdSJB72g/GiRkci0G\nnRJjuh2z1Uw4qkIUFchUCZzDe9ArHKhkGiQJwsgJijp27apBp1vI7Nkr2LlzL62t7cyeXUY4PIzd\nHqO6erwwTEtLw+GwMTjYi92eyyWXzGXHjgOMjAyg13tpaHifkpLZmM0G9u1rwG5fgEwmx+8fxOfz\notWacbuDxGKxYxocpqWlcc01q7nmmtXnaSSPJiVGvoRnnoHLL4esrInuyfnnK19JGqAFg1PPW0Uu\nl5NIxAgEwlitR9a/JSmBWq0hkVDj8XjIzMyk5BiGKrfeej3Tpu3mww9ldHV1MWdOMVkZJgrzsjBY\nrRx480227ehCIaajkKuRpDh52XNoDxzi/fdfRJeI4w31okJNLB4jjpxI3I9MHEYhaQERtVpFMOol\nHgszNOSkvX0PPl8WKkMlLinA9voajJqZpGmsuDRZxPUaJJWSNw7uYsbCK6ialsGBAw0sW3Y5RUVF\nqFQqEok4FouezZvfQKNJEAh4UasV+Hwj2O1pY7EAgqAgFoudr+E4Z+Tm5nLZZRV89NFONJpsBEEg\nFOpjwYKCY04tj46Ooj/8Y2uz2egURTI0GuZOL8XX209zXxcRsYSQ3IfZnIPDkUtLy26iUQuhUABR\nhGg0RoReZHITo7EhEokECkUawXAUtSqbhYtX0tBQT4AwSl0eIm7CehM1wQEEMUZAhKF4kEUmB7/4\nxeu0tnaSlqbEbrcgihI6XYI77rgHj2eAffu2MG9eJkuWrKS5WU447EShiJKTY8dqtTMy0jmW/SBJ\nIldeeSlvv72Fzk4ParWJaNRHdrace++970srYNtsNmw22zGfm4xIksSzz75KIuHAbk8jEAhjNNro\n69uPyuNEbZ5LWJAhCLKx7DCX04nbPYxaXYRSqUKnM2I0WJDJZMjlasrLMnB2u/AMDqPNKKW7exsz\nZlQybVoRfr8fs1VCCMTQqgSUigQzZjjYX7cFgxQiLkrECIAYQ222Yi+ehSbNTFdXH2BEFEPEDOl0\nD7egk2dj1Jqw2QqJu0aRYl1Mn16NRqPHZsviww9f5oMPnmPBgksQxSx+85unuOOO9RQWFo4d/4YN\nV/HYY5twOkfRaExUVNiJRoNs2PBV/vrXl2lt7aG7W0U4HGRgoAajsYBw2M/gYAuLFs1Eq+0jkUgc\nU4xMBlJi5BhIEvzmN/Af/zHRPZkYsrKSmTWvvpoUJlOJoqIiNJr3kMvjxGIRlEo1kiQSDDqpqCgH\nBtBqtWMOhV9EqVSyZMklLFlyyVHPxeNx3n/1Tcy2EnKtuUSiUeRyBf0BPxkhDXK5h8HRPnSChyE/\nRCQNcRJIQjsFKj3RKESjrShVCqylDvLmlrNv3ydMmzadFY0yVUkAACAASURBVCtupKnpEDWfbsMd\nSEeh9pBhziAvr5LFl69AJoP6+o9YtaoKg0GHxaKntDQ5lRuNhtm69T28Xi2hkJLt2xvweA6hVPoo\nLCxlzpz5h/sfAzxTLmD1MxKJBDKZbGzcrrpqJeXlpRw82EQiIVJRcTXFxUcHJwOYzWYCh5dvzBYL\nGcXFdLS2Eo3FcBQV0Ce6ycoqJbewiEOHRtHrLeTmFtPcvAWPR8HwcAuC0AYyGTqlBTERJxbzE48P\nEhQEHPkLsNkzMJmMDA42MGvWJXz8zm9JM2jo6U8nFk8jHAuBIsDwsIzm5j4slgpEUcRobAeiuFxh\nmpvrMJkUrFkzh7vu+gput5vf/vYFrNaZTJuWS11dHyMjw2RnWzEabXg8g1gsUFFRQWlpKY2NjfT1\nucnIKGLGjPJJX5PmVOjv78flCuNwZCOKClpaDgI2zOZSOp0fEopFiMW9ZGUlM4TC0SgJpZKqqul8\n8IETo9GGOacYt8tJhtEKBMnKKsOUkQHBIJetXcvHH+/E65UYGOgkFgtQMVPA1S4nI8uMUqGkp7+R\n/Gw1/qiFQrsFoy6XLpebxkCCrxTOQqPRUle3n8bGbVithQiyKuS6FjzhHrLTC5GrooTDDcycWTxW\nLysaDSMI6eTnF1FdvRyNRoPfP8qTT77C97//dTSHC6JlZmby0EN3U1t7kP7+QbKyZlBZeRN799Yw\nMCCgUs3AZEonMzOA369FLh/GZpOzdOkczGYdNpt9rC1Iirv6+nq2bt2Hx+Nn+nQHS5cumjCBmhIj\nx2DzZgiFkoXxLlbuuCOZVTPVxIher+e229bQ3f0YDQ1bMRrzkMm8FBTY8PtdyGRDfPe7D9PXN0Re\nXgY33LCaZcuWnlQWgUKhYN6y5TzfvZdevx85ECZKblkZPftqmDVrCZ2qNHx1e1CpBOLKKP5APz6t\nloQ1k0Q4giAbRMpKJ73MQW5uFI/HjMlUzYsvvoEkmckougR3YDdObyfWIi2rV62itaWWQzVb8A23\nUJAR55a77qKx0YnHM4jJZKet7SCjoxq0WgvXXFOJ0ZhGb28h9fXvMG3afEQxyuBgDz5fJ2vWzMdo\nNJ7wWCcTbrebd9/dTENDBwqFjAULZnL55cvQarUUFBSclEukw+FAn59PU08PJdnZzKyqokmvZ0tL\nC3MXL+bapSqcTjW5uSV0dHyKzzdMJBLFZJLh89URi3WSl2egt3uQes9O0iWQxDg+WQh14Uws1iIA\nBEHCbrdRWVnF0OB8Og61kJtTilyuoN01jE4+D6fTiVKpJhJR4ff7MJlkXHllNb29HQhCJ7feejvl\n5eWoVCocDgdf+9o1vP76xyiVo2g09ajVBjIy5tHZuR+dLsDdd9+AXC5HLpczZ84cDjufX3B0dXXR\n3NxJf7+GzEwbeXkmurvbUamMJDRmajoPsHxWETabjXA0Sm13N3PXrGHeggXs3duI09lIfslc9g90\n0tNWw3SHhV6/nxGlko0PPEBubi4LFiygpaWF7u4+jMZC/umfbuKdt9/mzedfRhaL4w+1kdCpycxz\nEFRpUdlzSC+uxNPeRWvrQex2GxqNl8zMDAyGDEZGnOTnz8VqNeD3H2D27DK83mZmz16A3+9l795t\nbN/+CYlEGhkZR4z8DAYzQ0N62traxlXINhgMXHLJorHHiUSCzZv3Ul29gq1bDxKLGcnPn0l7+wE8\nnjC5ucX093ewZ89+Zs6cxh/+8BSrVy8nPz+fDz/czNtv12KzlaDRONizp48DB57iwQe/OiGCRDhW\nsNdkQBAEaaL6dt11SSHy4IMTsvtJgc+XDGRtaQG7/fzsUxCEYwYfng4ej4fnntvEjh116PUZh6tg\n9hEOa/F6teh0efj9oygU/axdO4P77rtj3F3D5wmFQjQ1NTE4OEIo5Gfz5nas1nJisRgmkwmFQsFf\n//ob1qy5Dqs1kzde+gMdB5vQyPUEJD9Fs+aQEEXioRFmzMzgB//8faxWKw0NDXznO79AEGYyMBBC\nqTShUiUwGuU4nbsoKJiJUj5MqLWWDLkasyGB3qwjbJJx27cfYsuWA4RCBnbs2IFMVkB2toWFC6tR\nKpP3GC0tW5k7NxOvN4LRqGfBgqpjLk1NNMcbd4/Hw//8z5OIYg7p6fkkEnH6+g5RWCjnnntu/9K6\nOsfC7/fz7htv0FFbi0yS0NpsXHHttZSUlBAKhXjyyU10dAQIBATeffc9vN5hFIp0EokogcAAarWD\nqG8UWSKIGHejFdzY06zoCkvJKrgerdbC0FAnc+cWYDJp8Hh2I4p5GAw5yGQCTz/9CkplOT5fJx7P\nQWy2KlQqI6HQIe6++2bkchm5uUE2brz5qL4n42GCyOVy+vr66OvrR6/XMX369CmXjvsZp3K+b9++\ngxdf/JRdu5rQ6+cQj0cxm6G0tJCmpjpyc0MsWjCTvuZmFPE4CYWC6ssuY+myZchkMrxeL9u27eLA\ngUMIAqTbddiMadiyspg5a9YJBbrf72fT00/z3p/+Qom9ArPBwkjAS2csTMXyGxgYaGTp0kLy8/N5\n442PsVgWIJerGBhwUVPTRCKhxettprLSgkYTR5Jy2bx5Ox6PgVAIwmEZKtUgl146jSuv3IBMJqez\ns46bb549VlfnWASDQX7609+Tn7+M/v5+amoa8fsjDA+34vU2UFKSi1abw4IFV2GxZDIyMkAg0MzG\njWt54onXyc1dMhZzBdDX10pVlY7rr193coN4ihwe82MGJ6VmRr5ASwt8+mkygPNiJi0N1q5NVvL9\n+7+f6N6cOiaTifvu+zvuvDOCx+OhpqaW11+v49AhF3Z7FYIgoNfbGBqSc/DgKAcO1B7TadDlcvHH\nP27C59OiVKYRiYzQ19dEPC6SlVVGOOxlZKSL+fNzDteFULN4+bXEhByUykzMZonLLkta0judDaxa\nVYjD4SAej/POO9vJyCjA6Qyi1dpQq42EQj4ikSB2u5zR0UZ8zoMsTM/BaFBQVDQTuULJvuad/PqR\nX7NizdWYTBoGB41kZpaTk+NAJjtynqtUKhYtmv+lnhtTgd279xEOW8jPT85+yGQqHI5ZtLfvpLOz\nk6KiopNuy2AwcP0ttxBct45YLIbRaBxb0tFqtdx771dpb2/n3Xc/wO+vRBTT6e4Oo9Eo2LbtTWJR\nC3kGHQaFnJzs5fQON5KhakMS/Didb6FU5mA2K6it7UClCnH11Qvp6FCSmZlxOL5ETiDQhcfTQSSi\nxeuVkKQ2JKkDUQSfr4P16y8/Zt+T39fktH5RUdEpHfdUx+fz8frrW3E4FqNS5bBnTw1KZS4ulw+N\nppE5c8x8/esPYLPZiMViBAIB9Hr9uNgIo9HI6tUrWb362MZfJ0IURTydnaxfspjaul4EwYrVYELy\nSzQd+ITiGdlce+06VCoVH3+8G0ief/n5eaSn23G5XHR1ubn99iuprq7m7ru/gdsdwGyejiQNIJMp\nsNkWs3PnZqLRIDZbFmq1h+zs49eU1Wq1GI1qAgEvWVlZLFwIH3/8PiZTHhqNnpGRAEajBb3ehCAI\nWK1ZxGJRXn/9PQTBOE6IANhsuTQ07OP660/rYzojTv624jQQBOEXgiBsFgThv76w/R5BENoEQXjy\nXO7/dPjVr5LpvFM46/Gscddd8Oc/T3Qvzgy1Wk1GRgYDA8PEYiKCYBkXUyCTaREEPY2N7cd8/wsv\nvIko5lNQMJucnGKKiuZRXHwpOTkCmZlesrL83H77En70ox9gtwfp6NhDNBpGLh8gEmmjqmoGoijS\n39+BTudh7tzZQHL9OxxWMm/eEqCLYLCXeDwI+HG7D3DNNTeTmakn36anoryU6dMrUSpU1DrbaHGp\naaxP0Noqo7Y26VESi42OEyLBoA+1OjKWJTRV6ejow2RKP2q7IBhxu92n1aZOp8NkMh0VWyKXyw/P\nHKmpqFjK6GgQuz2ZwqvX5yFFh5FLIIoy/EEvaoUGe34+c6ZlcfWaGZSUJBAEGVlZ06moWMnBg8N0\ndOwjFosiCDJsNhMjI41IUjrp6eUkEiLRqBJRTKOp6X0WLy740qJwFzNdXV1IkhmlUk1+fimXXbac\nnBwRmy2EwTDCTTddRWdnJ7W1tWOlAM52kObg4CAGQaCouJDsbC2DQ+2MjLpIhH0M9dRw661rx2z0\n58+vwOU6cj3RaDSkp1spLbWzaNGiwzOwambMWEBhYQaVlXPIyNDjcjXi9epwOkO0tXkZHAxTU1N7\n3H4JgsDq1ZfictXi842wZ892FIppKJVplJdXYLXOxufT09S0f+w9FksGAwMjiGL0qPYikRBpaRMz\n03bOZkYEQagG9JIkLRcE4deCIMyXJGn34adfBj4GHj5X+z8d3G548kmoPf74XzRceSV87WvJ9OYZ\nMya6N2eG3W4mkWjlszTXz5CkKDKZCr3+yAnocrkIh8MolUq6uz04HOOLwuXkTKOnp49vf/uGcRe9\nBx+8i5aWFlyuQa655l76+tzU1NTi84nMnFnMqlVfGSs6JZPJkCQRuz2H1auv5rXXXkaSQqSlGcjM\nnEZamgWLRYF31I5WY0BAwO0bprbZjTxmR1RH6K9vRmY0Ysk0o9H00dERR6u1E42GkMkGuf32NRNW\nZ+RskZFhpqfHi9E4fg1bFIPnrICXQqFAFJPBsqIoIpcrMRgsxMIRYolRZJKAIJiIxbyM9jjR+bWM\nBsOMxAu5+pobxz7zRKKI4eGnaWn5GKOxEIMhhEKhwGrVotWqsNs1iKIKgyGNqqpCNmy4Zkqk155v\nkqXsE2OPLZZMVCotbnc3o+6dvPH441gEgQTwvlLJ2ttvZ/r06We1D3q9npAoIpfLWbSwmqHhYUaG\nRwnFo+TlVo5b/ly8eCFNTR10dOw55vmYSCTQaJQEg8qxwpVWq5eRkSjxeACzWcXy5YvIzMxg8+bt\nVFfPJj39aEH+GVVVlchkMv72tzfo6+sgM9NCVdW0w7FjNZhMDjo6dlNZuQhBEAgEvBQXOwgGw4dT\nhZOZR6KYwO0+xC23HB28fz44l8s0i4B3Dv/9HnAJsBtAkqQhQRDOTynAU+CRR5IBm1M02eCsI5cn\nA1mfeAJ++tOJ7s2ZMW/ebD76aC+dnf1EIrmo1QYCAQ9KZRidLsG8eZWMjIzw6nPP4enqQiWTMRiJ\n0OOW4XB8sTXhsH22iCRJeL1eVCoVWq2WGTNmjBNuGzYkX5O8oB4hKysLi0XO6Kib/PwyrrlmAzU1\nNYyMhEhPN+D31/LNb97JL//fo3QM91Fky6GlowMhriNCmLJp0yi02Rn0enD1CSxcOIPq6pm0tzsx\nGjOZOfNqrFYrU5358+ewY8dzBAI29Prkuv7gYA8WS/ycuUMuWDCTZ57ZRmFhNvX1LkymLOAgBpMB\nlaDFQJBBzxBRfys5UpA2omRb8zAmVDTV11N5eI1fLleQnz+HK67IRxQFdLocAoEQSmUWgqADolgs\nVioqijAah1NC5EsoKChAqXybUMiPQqGioeYjvN3NjPa3ICRGKb/0UsqrqpDLZPiCQd54+mnyvv/9\ns5pJlJmZibWoiJbubqZlZ2O32zGaTOxxOln+BUOm5JLf7TQ3Nx/zfJTL5SxfPp/HHvuYtLQcFAo1\nPp8fjSYNna6bdevWk5ZmOdyahe7u7uOKEUi6qFqtFsJhLYWFi8ficaxWDSMjQ4AIJDN3Rkaaue66\n1VitVv7ylxfp7OxGJtMgih6WLZvBnDmzz9rndiqcSzFiBtoO/+0BZp7DfZ0xQ0Pw+9/Dvn0T3ZPJ\nxZ13wpo18JOfJMXJVCUjI4N7772eRx99ip073yGR0KPTqamoSGfDhmU4HA7+9OtfYxoZYebh7IxQ\nJMLeve/Rkt5ASckRheF2Oykry6enp4dXXvmAwcEAkKCqqoi1a1eN83X4sgBLmUzGrbeu409/eoHO\nzj4EQUVBgZnZs+Pceuv1TJ8+HbVaTfibUX73yz/S2V5L10A7wzE7JUUzmJ6fD4DNaKSrqxm5fCYV\nFRXjIu8vBLKzs7njjtW88MJ7DA/LEMU4OTlp3HLLTeds1qeyspLm5g527+5AoXDjdLah0/mBUaJR\nOb3uPvTBXqosAkU2G+lZWbzrbCMjv4iBri4qKiuRy+VEIhECAS+Dg0O0troYGUkcrvcxk/z8PHQ6\nHRaLhc7O/Vx5ZeU5OZYLAa1Wy623ruGvf32T5vo21H1d5Bs0ZJvklJqK6G1r44BWy9yyMtJ0Ooxu\nN21tbcyaNevEjR8mGAwSCCSLRn7Z92r9V77Cq88/z6ctLWhkMkIyGfOuvprKqqPrtyiVymOej6FQ\niLfeep/WVhcy2Sg7dvweq7UUr3cUSQqzfv11nxMiACfvC5KZmYnZLBAIeDAYzAiCwMKFc/ngg7dQ\nKAL09OxGoYhwww1LKCtL2gI89NC9OJ1OQqFQUnBN4A3MOcumEQThG4BbkqRNgiDcAORKkvSrzz1f\nAPxEkqSNX/J+6Yc//OHY4xUrVrBixYpz0leAf/1XcLmSgiTFeObNg5/9DFatOrf7OZvZNF9GIpGg\nq6uLvr4+jEYjDocDo9FIV1cXr//udyz8QppoXXs7rzb0U1F1JVqtiWBwGJ3Ox7XXXsazz76HyVSB\n0WhDFBP09bWQnR3l61+/85giJB6P09PTQyKRIDc3F7VaTTAY5OUXX2TXhx9iUavRmUyUVlez6ppr\nxrIk3G4327Zt55k/PEbcp6AgYwEqpTp5PGKCPR3b+H+/+Tdmz56YO5oz5WTGPR6P43a7USgU2O32\ncz6LIEkSTqeT9vZOhoeHUKnU2GwWGurqaN+8mXhnJ+V2OxqVCl8oxIHeXg5JmejtVVy6eg3NjY30\ntDQxPFqDXGNl3qJrmV5WQXNzDZ9+uoWCglIqK2fh9/dTXKxj48abUavV5/SYJhuner739fXxyL/+\nK3PNFux2G80HD2IMh1EoFNSEQtx49dXIZTLqOjupuukm5s6de8I2Y7EY7731Fo07d6IC4kolC1eu\nZPGSJV/6HRscHCQUCmG3208pkykcDvOrXz1KV5dAWVk1crmC5uaDNDd/zPz5RQwP65gx4zJksuRd\nXyDgxevdz/e///WTnuVpaWnhySdfR5LS0WqNBAKDmM0hbrxxDWq1GpvNNqHfs4nKptkG3A9sAlYC\nj3+xXydq4OGHHz77vToGw8NJk7Pdu0/82ouRe+5JirRzLUbOB3K5/JjZCIFAAM0xLj7F2dks0emo\nXpaPyzVCXl4pVVWVvPfexyiV+WOxDDKZnNzcMjo7d9LV1TXOORGgs7OTp59+Fb9fgSDIUCiC3HDD\nlSgUcty1tVxfVYVOoyEhijTW1PBaOMzNd9wBQHp6OuvXX8toXy+++gZqWutIiBZARjjST9WcbKqO\ncXd2IaFQKM5raXtBEHA4HDi+sEY33N+PpbSUtkCAYZ+Pzn4vCUnFSFjElWjDlmZg84evERvsI9sq\nYlNZ0Kgr6D7YiCHNQmnpHGy2TGpq3iAvr4S5cy+hvLx80rpiTiYUCgWFOTmUHZ4V9Obm0ldbS67V\nihSLEYvHkeRyRoD8w685Ee+88QYDO3awJD8fhVxOOBplzyuvoFKrmfclpcvtp+F1UFOznyeffIkd\nOzoxGmfR07ONhQurKC+vwm63kZ8fYeFCM1u2bAMsQByVysvtt19zSstNJSUlPPTQHdTU1DI05KGw\ncCazZs2cEuZ350yMSJK0TxCEsCAIm4F9kiTtFgThl5IkPSQIwjrgB8A0QRA2SZJ0dGL9eeQnP4Gb\nb4aLKFvulLjzTvg//we6ujhG/MSFQXp6Oh5JOsqZdWB0lOmVlaxYsRxIpvg1NTXx1ksvE4vaiMdj\nZGcfqY8iCHo8Hs+4tv1+P3/+88vo9RU4HMkp2HA4wLPPvke6IUy5zYbusMeJXCajIj+fTxsaGBwc\nHHfhW7luHc/39bG0UkskEmE0GCRuyOO2b3wjFW9wnsgpKGDPrl3klZTw6t/ewazJQqvS4A9JmLJm\nkVtgwij2c8nsQnLsdv73pQ8IBZwEA6Ps2DzC1dfditWazfTps1m5cumUdcOdCMxmMwmVilAkgpRI\nEAwG6Rgaormnh1hmJkMeD50+H5VXXHFSgsHv93No1y6WOhzID89kalQqZmVns/PDD6meN++snFe9\nvb1s2vQhGs00TCYlFksh4XCAbdtquPLKpaSlWenrq2XjxpuZP38O3d3dKBQKpk2bdloiwmazsXLl\nijPu9/nmnPqMSJL07S88fujw/68Br53LfZ8sbW3JAM2DBye6J5OXtLSkIPn1r5PLNRcidrudknnz\n2LtrF2VZWejUanqHhugRRW5bmvQJkSSJ1196CefOnRQnIvS7u3AP9+HOKWb2wjXIZHIkyY/JZBrX\ndlPTIaJRI1lZR9aCNRo9SmUOB+veYPHSReNeLwgCBrkcr9c77qKam5vLHf/wD+zduZOB7m4KsrOZ\nu2DBlC96N5WYUVHBrvR0GnfsJqtwJvEodI24iOQVcvU1G+noqCUtcpDSvDxaursJ9rWTIXjJUOvp\ncNax56NNVC1djygGp5wT7kSjVCpZfNVVfPCXvxBqbiZTLiffamVXby+RcJgurZbVN99MaWnpSbXn\n8/nQymRjQuQz0nQ6Al1dxOPxszJjtXfvAVSqXHQ6I6JYDyTP/2BQg8vlQquVUVCQDFBNT08/YbDq\nhcpFb3r2z/8M3/oWpK7nx+eb34TFi5OxNV9Sd2vKc/WGDezIzGTfJ58QcrspKC/nlpUrycjIAJJ+\nB527drG4qAivzcZHw7tI19jp6G2jv78DUYyRn68fsyeXJIm6ujqefvplDhxw4/OFKS6egVab/ADV\naj1KnZFBj4eszwWOiaKIN5EYKyn+eWw2G6uunhql3i9E1Go1t957L//RPUCruxOdOY2s2dewdPo8\n1Goter2ZUW+MSDTKvpoarizIpsXpg4QWhy0LdSTIrk9f4Pa7rz1hanI8Hmffvhp27qwjFoszd24Z\nCxfOn7KOq2eDhYsW8dHbbzPa1oYfMFut3LRkCUa9nkOJBCUlJSc9m2E2mwkLAvFEAsXnovNHfD5M\ndvtZWzobHfWj0egxmexkZRkZGGjBbC5CEBSMjLhJJAIsW3bDGe3D6/WyY8du6upa0Ok0XHLJHGbN\nmnVKDsUTzUUtRrZsSf577LGJ7snkZ9q0pO/IL3+ZFHAXInK5nCVLl7Lk8EzIF2lvaSFdpSIQCBCL\nxZg7dzqNjW0oQqM0173JdV+5jrVrryQajdLS0sJbb71PU5MPs7kUUVTR2hqgu/sdli+/Cq3WgM/n\nYtXalTTv3IFSocBmNBKORmno6aF04cJjipEU54doNIokSccM9ktLS+P6W27gFc1BHI5KEok4LpeT\n/n4no6MdLF00h8319cgjEfLT00nE49Q6u7HZCtAgYtXpWLv2qmPs9QiSJLFp00scODCM3V6MTCbn\n3XfbqKtr5u/+7qsXXbDrZ0SjUeTRKBvXrz9KdCScToaGhk5qZiESiaBQKKi69FL2vf8+s3Jz0arV\neAIBDrrdrNp4zLyKY5JIJOjt7UWSJLKzs48SMaWlDhob67FYMpk/fzkHD+6ms3Mno6P9GI0V3H33\njUfFJn0Rj8dDS0sL0WiMggLHWFViSM7w/O53T+H1GrHby/B6wzz99KcsXdrLunXHd3CdTFy0YiQa\nhfvvh//+75Tb6snyox/B0qXwjW+A2TzRvTn/iMC+/QdRRGR4vX6GhtwYDHrkehXzFlZw003rcbvd\nPP748wwMxNi5sw6DoQy/f4TMTCPDwxI+n55Dhw5gtVpJT4+zevVVOCtmsPmttzjY1YVMpWL2lVdy\n6WWXTfThXpSMjo7y5psfUF/fjiTBjBkFrFlz+VGFwyorZ7F1aw3NzXtoaKinudlFICCg1UYYGnKz\nZFEpPQcPYhweRmO1cuOll2K2WIiLIgeCQRSK4196u7q6qK0doLBw0diPrl4/i87OGurqDjJvXvU5\n+wwmMzKZDJlcTkIUx81mACQk6YSfq9vt5s03P6C5uQeAyspipl1+OXt37SIRDqO1WFj51a9SMfPk\nnCi6urp45pnX8HgEBEFAo4ly001XjXPSraycxaef7sPpbCQzs5CystkYDCqKisr5+tfvPsqD6IvU\n1taxadN7JBIWBEGBKO5kyZLpXHPNagRBYNeuPfh8RvLzk/vUag2kpVnYtm0rixbNmzLLPhetGPnP\n/0ze7d9wZrNjFxXTpyc/r3/7t6Rt/sVGW3s3TYMxSrVGRkf96PWzCAR9jEohpPYw27fvYO/eBhKJ\nPIzGMEZjEKu1mNHRfvLyNGRn62hq8tHRsYtrr72TpUsXodVqmT59OqWlpYTDYVQq1QkvTinODeFw\nmMceewa/30pu7jJAoK2ti8cee45vfvOuccGESWOrW3n44f+ksbEftbqUiopcbDY7Xm8ne2oauGTp\nUsq1WnIzMsYExYGODqpWnrg+itPZjVxuPeruPy0ti8bG9otWjCgUCsrmzaNl927KP1fqoMvlwlZQ\ncNzZRJ/Px6OPPksikUte3nIkSaS+vh2Lxc393/sekiSh0WhOepnH7/fzpz+9hE5XTkFBUqwGgz6e\neuotHnrINiYCdDod9913O598so2amr0olQrWratk8eKFJzzXvV4vmza9i90+H40m+f0TxQRbtuyk\ntLSIsrIyGhraMZvHz6wk04PN9PX1pcTIZGbfvuRyw+7dkEpCODV++lOYNQtuvTU5S3Kx4PP56OgY\noWzJBja//BgZMjuBiJdBRCR1HuXlK3j11Y8ANYWFVbhcTiBZ+8FoTKe7u5V1664kPd2EzVbImjVX\njmtfEISLOhZgMtDY2MjwsJKCgiPOrpmZhXR1+airO3hUIUWZTEYwKFFauhCbrXBsu9lcQG9vC7qM\nPJwhD+6uLrSCwKgoYikt5ZJLLz1hX7RaDaIYOWp7NBomLW3yp2meSy5buZJNvb3s6uwkTRAIShIJ\nq5WbT3BnuX//AUIhE/n5yR9uQZCTk1NCR8ce2tra84f/OgAAIABJREFUTtkwsLGx6XBg+pFZM50u\njZGRLGpqalm16oqx7Wlpaaxde9UJl+e+SFtbG4mEZUyIQFJoGI0F7N17kLKyMvR6HT5f+Bjvjk6p\n5byLToz4fEnL91/+8sJNUz2XWK1JT5bbb4ddu+BwbOcFTzAYRBBUZOdOw1CwkKg8BwmJdH0GwWA/\nCoWKSARksjgANls2Gs1OgsFhtFoLoigRj0cZGWll3bpjV2YFGB4eZseOPbS395Kebmbx4uqT9kxI\ncWb09blRq49ef9RqLfT0uI7ankgkiMdFBGG8Y2fSR0ZNIgEPfuc7tLS0EPD7ycjMpKCg4KTuvHNy\ncujpeYpDh1ykpZkpKsrDajUTDnczd+7FPZ2r1+vZeN99dHR0MDQ4iNFkYtq0aScMOO3udqHXH+0w\nqlKZGRhwc6rmxR6PD4XiaGGo0RgYHvYetb25uZkdO/bj9wcpKytg/vzqkwpihqNnT+RyBZFIEIBL\nLpnD44+/g8lkH7MY8HgG0eujU6q680UlRhIJuPtuWL48eWef4vTYsCE5q7R+Pbz11sURP2KxWFAq\nk0JDrVai1+ehUKgJhfyYTDoggcGgRqWS4/ePYjCYWbx4Odu3b6a7O4zFoqSrawtGIzz//Nts2vQO\n8+aVs2LFpWMXpIGBAX73u2dJJDIwmfJobPSyb98L3HbbSiorT97aOsXpYbOZiUZ7xh4nEgna2trZ\ns+dTDh2SCIXCrFx56VgqtdFopKgok5aWPuBIQGEgMIhSGWfmzKSl/8yTjD/4DJ/Px1NPvYRWm83A\nQDeDg0M0Nu6jtFTDN75xe0qckgw2nzZt2inVJ0pPt9DQ0Atkjdsei/mxWstOuQ95ednEYk1HbR8Z\n6WVgIMGPf/xfJBIi1dXlyOUyPvnkEEZjERqNhQ8+cLJ7dz333//V46Z4JwNbPyWRiI8JDYDR0W5W\nrUqask2fPp1Vq/r48MNtgAmIYTBEufPO66ZUocyLRoxIEnz3u8kaNH/960T3Zurzox+B1wuXXw4v\nvQRfcFG/4FCpVKxevZgXX9xBbm42HR1NaDR5RCLDzJpVTnd3HVdeOReHI5cnnngdrzcbnc7EjBll\nRCLtXHvt5ezceQCv10JWVjGCIGPXrjZaW5/hgQc2otFoeOedzchkDrKykj82BoOZUMjKK698SHl5\nWcql8xwzc2YF7767naGhPmy2bGpqajl0yIlWq2DOnHW0tnpobn6Gv//7r475v9x5543s2/cTOjq2\nYbUWEY8HCIVaWbAgh0WL5p1WP7Zv34XHY2Tu3IXMmhVleLiPWCxKONxJefnZrUZ7MVFdPZtPPtmP\nx2Mbq5brdndjNIbHarWcCiUlJRQU7KCzs5bs7FIEQUZvbyttbTsRhMXk51chk8nZurWe3bs/ZN26\n+9DpktkSBoMZp7OR7dt3cdVVXx5DlJGRwYoVlXzwwU4MBgdyuQKPp4eSEv3YDYogCKxcuYJ58+bQ\n29uLSqU6XFxwal0vzlltmjNFEATpbPVNFOGhh2DrVnj/fUhlTJ4dJAl+/vNkMPDPf56s8HsmMTjn\nozbNmVJXV8cHH2xn5859eDwhCgtLsFp1XHppFVdccRlyuRyXy8WePftxu0coLMxhzpwqnE4nTz31\nKYWF43+gOjv3c+ONc5k9u4of/vAX5OVddpQ3gNO5kwcf3DAune9CYjKNe39/Py+88BbNzX3s3HmQ\nvLxpVFcvxmJJrkf29bUyd66B9evXjr3H5XLx178+y7ZtdahUCq64YgnXXrv6tGzDAX7xi98jk5WN\n+dF8htN5gNtvX8yMz5eFnsJMxLh3dXXxwgvvMDgYBCTy8y3ccMPVpx3kGQqF2LJlGzt31pFIJMjI\nMNLc7KOsbNnYa/r7O3jnnc0sW7acoqLCse3hcIBYrIHvfvf+4+5DkiTa2trYt+8g4XCUWbNKmDlz\n5pQTGzBxtWkmBQMDyaWZUAg+/BC+YI6Z4gwQBPje92DFCnjgAfjd75JZNidRn2rKMmvWrLFqoOFw\nGJ/PR1paGprDdu6QvJu5+urxhXyczj40mvHpoQA6nY2Ojh7mzp2DSqUgHo+iUmnGvUaSYlNqunUq\nk5WVxTe+cTdbt25FJktj+vTF42I8zOZMWlvHT81nZGTw7W//A9/+9hdbOz00Gg2BQOQoMSJJsSn5\nAzSZcDgcfOtb9zI8PIxMJjtjLx+tVsuqVVeMBau+//5H9PePjy+SyxUolSoGB0fHlRyJRiPodCcO\nMBUE4ZSXpKYiF6wYCQSSxd1++lO47z54+GFIncfnhvnzYccOePxxWLsWrrgCfvxjKC6e6J6dWzQa\nzTgRAklzoj179tHZ2U96upkFC+aSmZmJxWIkFus9qo1IxI/Vmo8gCFxySRUffniIwsIjRe9cri7y\n803HvctOJBI0NDRw4MAhZDKBuXMrKC0tnVLui5ON7OxsNBr5UcGmoZCf7OwjdzQ+n4+tW7ezefMO\nAoEwc+bMYOXK5WcU17F4cRXPPLMVg8EyNoZe7xB6fXTM3TfF6SMIwlG+MWcLs9lIPN4xbpvVmoVM\n5gGOZLyIosjgYCu33DK+FMTo6Ch79tTQ1dVPZqaV+fPnjDlAf0YoFOLAgVqamjowmQxUV1deEHFE\nF9QyzeAgfPxxMqjyb3+Dyy5LFsE7xfixFGeA3w+PPJLMVrrttqR9/Mla7U+m6frTwe128/vfP0M4\nbMVotBMMeojH+9i48Wqys7N55JE/otdXYDQmI/r9/lE8njq+9a2NWK1WIpEIzzzzIk1NgwiCEQhh\ntwvcdddNWK1HZwFAUog888wL1NYOYTLlIUkiXq+TxYsL2LDhmilRQG8yjnsikeB//ueP+Hx2MjKS\naXfRaJju7t3ce+9aSktLGRoa4le/epxPPmlEknKRy/VEIm5mzNBy//3XU119elOEoijy6qtvsmNH\nC4JgBqLo9RE2btxwQfzofMZkHPczJRAI8Mgjj6HRlI3FpQQCXjo6PsZg0CIINiRJiSR5WLiwmPXr\n144Jzv7+fh599DnicTsGg+3w9aOXu+5aR0lJCZD0NvnDH57G7VZgNGYRiQQJh7u5/vpLWbDg9GKU\nzifHW6aZ0mJkcBA2b04KkI8+go6OpPfFypXJbJlUQcyJw+2Gf/93ePLJZF2b73znxEtkU/3i9Je/\nbKK9XUFm5pGc8UDAQzTawPe+9wDd3d08++zr+HwCkgR6fYKbb14zdqGB5Ppwd3c3Q0ND6PV6ioqK\njusq2djYyJ///CGFhQvGhIcoinR17eCBB9af0GZ6MjBZx314eJhnn32F7m4fMpkauTzA1VcvZdGi\nhQA899xLvPzyHjweC2ZzIZAULMFgB3PnaviXf/nGUTNnp8LAwAB9fX2o1WqKi4unlGfEyTBZx/1M\ncTqdPPvsa4yOSgiCDK02zo03rqKwsJD29nbC4TDZ2dlHFbh8/PGn6e3VkZ5+xMzN7x8lkTjEd797\nPzKZjHff/YDNm3vJzz8SNxSNhnG7d/GDH3z9tKr8nk8mLGZEEIRfAPOAvZ+v4CsIQg7wF0AN7Acq\nJUladuxWjtDff0R8fPwxOJ1J8XHZZcl4hXnzUksxk4X0dPiv/4Jvfxt++EMoLISbbkrG7yxeDBea\nyWg8HqexsZO8vPE27nq9iaEhGBwcpKCggO9+9376+/uRJImsrKyjhIYgCOTn55/0HXB9fQsGQ864\nGRCZTIZSmU5zc9uUECOTFavVygMP3IXL5SISiZCRkTEmLiRJora2Bb8/isGQPfYelUpDIKDB50uW\nji8+g7XKzMzMVEXmKUh+fj7f+c799PX1IYoi2dnZY+f5523iP08kEqG1tZf8/PHXj2TWTWKs5s7+\n/YdITx9viKJSaUgkDHR3dzN9+tTNtjpnYkQQhGpAL0nSckEQfi0IwnxJknYffvr/A/4FaATqgeYv\na+f99+G555LiY2AAli1Lio+vfQ3mzIETlCJIMcEUFsKf/5wUkn/8YzLQtb8fVq9OBr4uW5a0mZ8C\nqwnHRSaTIZfLEMXEUbEagiCO2T7L5XJyz+KUXTLo9Wj3RVGMo1anlPmZIgjClwoChUKOTCZDFOMk\n76uSSJKIIHDCOikpLlxkMtkpnedy+WffpcQ4PxFJkpCkxNh3SalUkEjEj3q/JCWmfBmJcxnhtgh4\n5/Df7wGXfO65WZIkbQNuA9o4jigaHIQZM+Dpp5N/v/JK0i9k/vyUEJlKZGUlq/3W1sKePUkR8tFH\ncOONSTO6qY5MJmPhwpn09o7X1W53Nzk5aaed5nkiKitnEIn0jbtAxWIRJMlNWdnUvUua7AiCwMKF\ns0hLU+D1do5tDwa9KJUhsrK0Z1V0priwUSgUVFdPp7e3Zdx2t9tJYaFtLOtn0aIqXK7Wcctbfv8o\nen1sys+CnsufczNJoQHgAT4fRioXBEEJXHb4NV961fzKV85Z/1JMEA5HsmLy/cdPr59yXHHFcnp6\nNtHRsROZzIgoBrFY4tx8883nbJ8FBQWsXFnJBx9sQxDsh+/Kh7n22iVTpkDWVGXFikvp6HDy5ps7\n6ejoRZJ0qFQBFi928NWvbpjyd6opzi+rVq2gv38TnZ27EIQ0RDGAzSZy/fVHrh/z5s2ltbWT+vod\nCIIZSYqi0fjYuHH9lE/7PmcBrIIgfANwS5K0SRCEG4BcSZJ+dfi5D4EngSHgHsAuSdLSL7z/wots\nSpEiRYoUKS5iJiKAdRtwP7AJWAk8/rnnDgArgGygGhAEQfh7SZL+9/MNTKVI60AgwM9+9nuyshah\nVB5ZP3Y6G1i+PGdcBccUx+ZCja5PcXwm47i7XC7++7+fJi9v8bg1/I6OfVx//RwWLJg/gb27MJiM\n4z4ZEUWRRx75HYJQSlraEZO2wcEesrIC3HPPbRPYu1PjeFYD5yxmRJKkfUBYEITNQFySpN2CIPzy\n8NP/CeQCeuArQN0XhchUo7u7G0kyjhMiAOnpDvbvPzRBvUqRIsXp4HQ6Acs4IQJgseRTW/ul8fYp\nUpx1hoeH8XgS44QIgM2WQ1tbH5FIZIJ6dnY5pyGgn0/nPfz4ocP/95CcLfmM985lP84HCoUCSTo6\nyjkej6WyGlKkmGIksxeOjqyOxaJoNClr/hTnj89+WyRJGjezkMzcEy4Yp+UL4ygmAQ6HA70+hs83\nMrZNkiTc7lYWLao6zjtTpEgx2SguLkap9BIK+ce2iWICn6+LefNmTWDPUlxsmM1miorScbu7xm3v\n7W1h7tzSKR+4+hlT2oF1stHZ2ckTT7xMOKwH1EjSCLNn53LjjetTngMnQWoN+eJkso57fX0Dzz77\nNvG4GUFQIIrDLF1axtq1V00Jm/3JzmQd98nI8PAwf/rTJoaGBARBjyh6cTi0bNx4M3q9fqK7d9Jc\nsHbwk5FQKERLSwvhcJisrCzy8vJSF66TJHVxujiZzOPu8/lobW0lFkv6OKQcUc8ek3ncJyOxWIzW\n1la8Xi82m43CwsIplz6eEiMppgSpi9PFSWrcL05S437xcTwxkooZSZEiRYoUKVJMKCkxkiJFihQp\nUqSYUFJiJEWKFClSpEgxoaTESIoUKVKkSJFiQknlm05xmpqa2Pvpp3hHRsgvLWXR0qXYbLZxr5Ek\nCY/Hg1KpnFJpYClSXIh4vV4AjEYjkiTR0NDAvk8/xe/1UlhezsIlS8aqtKa4uBgZGWHXtm20NzSg\nVKspr65m0aJFUy5r5nSYsGwaQRBmAr8naXN4UJKkB7/w/EWdTROJROjv70epVJKdnX3M9OCtW7aw\n57XXmGaxYNBq6R8ZoV8m47YHHhir2NrW1sZ7L79MeGiIBOCoqOCqa68lLS3tPB/RibnQo+t/9Sv4\n+c+huBgeewyKiia6R5ODC33cP8PlcvHOyy8z2NkJkoTV4cBot9O9axfTLBZ0Gg39IyO4lUpuf/BB\nrFbrMduJxWL09/cjCALZ2dlT9ofqQhr3kZERPB4PFosFk8l02m389be/xejz4e3ro9fppDcYxDJr\nFvf94z8yY8aMs9zr88+kTO0VBEEhHfZPFwThj8CvDtez+ez5i1aM7N27j9de20wspkGSEtjtSm67\nbf04j4NAIMDv/+//ZXFmJqrPOfC19/Xhz8pi5dq1SJLEC48+SoXJhM1oRBRF2vr7CaSnc9cDD0y6\ni9iFdHH6Io8+mhQizzwD776bfFxTAzrdRPds4rmQx/0zAoEAf/rlL8kTRXLtdgRBoKWnh5c//pgH\nNmwYN2PZ0ttLWnU1a9evB8DtdhMMBklPT8fpdPK3v71LKKRAkiSMRolbb12Hw+GYqEM7bS6EcY9E\nIrz88hscONCJTGZAFP3Mm1fCunWrT9kZ9c1XX8Wzaxe9jY1Ig4PkZ2SATManbjfZlZXc9q1vUVBQ\ncI6O5PxwPDEyYcs00vhCLlpgdKL6crYZHh6mt7cXlUpFYWEhKtXJ17Lo6uri+ec3k509D7Vae7i9\nfv7857/xj/9439gX3OVyYZCkcULE6/VyqLaJre/voMUp0dW6h4VmJbb8fABkMhklOTns6uyks7OT\n4uLis3jUKb6M3l74p3+CLVugvBzmzEkKkR//GH7604nuXYrzQUN9PfpAgLzPiQaVQoE9Hsc1MEDR\n587FPLudmvp6fJdfzqZNr9DaOoRMpiEUcjMwMEBV1TricR+JRJxwWMGf/vQS3/nOPRgMhok4tClB\nMBiks7MTURQpKCg4a5/V22+/z/79HhyOSxEEAVEU2bVrPwbDJ6dcqb1+zx5G99bS39BEji6NhsEW\ncnIzyFCpMEoSOzdvpmDjxrPS78nIhMaMCIKwHvh3YLckSe0T2ZezgSRJvP32+2zZUgeYgRg63Tvc\need15OXlnVQbO3bsQ6t1jAkRAKs1i87OXlpbWykvLwdApVIR/dxdRTweZ+vWvYSietIzM3E4FtB9\nqJm2Q62UFxaMW4PWAx6P52wccoqT4N/+Df7u75JC5DN+9rOkKPne9+ALIT4pLkCGXC6M6vEVvZUK\nBXKFAv/hGJLPCEUiaA0Gnn32Fbq7FRQULAWgru4gDQ1NdHc/j1abDyiQpBEsFhkHD9azaNHC83U4\nU4q6uoNs2vQu8XgagiBDJnuHa69dzoIF886o3VAoxK5dTeTlLRlbRpfJZOTmzmTr1h2sWLHspGdH\nEokE+2sPkeYBs86KXmckISbocroI2w1MN5kY7Os7o/5OdiY0m0aSpFckSaoEfIIgrPri8w8//PDY\nv48++uj8d/AUqa+v56OPkl9Oh6MSh6MalWo6TzzxErFY7KTaGB72otMdK55DQzAYHHuUk5ODJjOT\nbrcbSE7lhkIyhuIxskvmAGBOzyMqqulod45ryU+y+FKKc09vL7zwAvzgB+O35+fD9dfDb387Mf1K\ncX5Jz8rCEw6P25ZpseBTKvn81oQocsjloqC8nPb2YXJySsaeCwSCeL0xRkZMWK0zsVrLsFgW0NXl\np76+4TwdydRieHiYZ599F5utmoKCOTgcVWRkLOTFFz+h7wx/3EOhEKBELh9/T69UqojHBSKRyEm3\n1dHRgdpShF+hIiiKAMhlcoKigoFgGLlcTsZJ3tBOVSZsZkQQBJUkSdHDD73AUWsZDz/88Hnt0/Ho\n7+9nx4699PYOkpeXzqJF88jIyBj3mu3b92OxFCOTHYnFMBptdHWpaW9vZ/r06SfcT0lJPh991ENa\n2pGZjOS6qmcsKBWSa2/X3X47f3viCXo7Oxnp76fO68Ux+zIcBclAp7yiWew+tIdut5u5JC90zb29\naPPzp/za41Thf/8X7rgDjpUc8cADcMstySWcC6QKeIovYUZFBdvfe49OlwvH4fO4Z3CQkkWLiOp0\n7OrsRClJtLpcRA1WhrfX0NcXJDc3PlZkUy6PkkikIYpH7rZlMjkKhRWX64JZ5T6rNDQ0AnY0miMx\nOSqVBpUqmwMH6snOzj7tto1GI1othMOBce37/aOYzeovzVx0Op1s376XwUEPxcW5LFxYnYwJyigh\npM2kfvPzDA/2oNHoCCrV5FmNdIXD3Lhs2Wn3dSowkcs0awRB+A4gAO3AmxPYl+PS1tbG44+/glKZ\nh8GQy969w+ze/TT33HPduB/1QCCMSqU5RgtKotHoMbYfIRgM0traikwmEY930Nen+v/Ze8/wuq7z\nzve3y+kFpwAHvbGAJECCRSwiKUoUJTuW5SLJkh07rrFiO5Fv6s08mdzJ8/hOJhlnnDvjJGNnYtmO\nbEm2ZcmyVSJajaTE3kGCKEQ/AHEAnIPT+673A2hIlKhKyizi74vEfdZZe529sNd611rv+38JhZpQ\nVYXp6QHa26tfd9QTDAb58h//MRMTEwwNDRF9+ijt7VvnP/d6A1S3r6PMCLvHxzEEgUUrV3LLbbch\nXpv93nNKJfje92D//vN/ft114PXCjh1w662/3bZd4+JSKpUYHh6mUCicN0Gmw+HgU/fey/NPP83u\nwUEAahYt4su3304gECAcDvPEE8+Qzweoq+1AVUucPv0rFOUQmzfPhXYGAj4EQUUUdXRdBwzS6RjB\noPMNdlOvUSyWkSTb665bLHby+eIF1S3LMh/60GZ+/vPdVFYuxe32k8nESST6+dznzp/Z+cCBA3z/\n+79EliupqWlmejrGoUMPcued2xCELEuXb6K2sY2Tx3eQmAkjqgpGY4jbv/hFGs/6/l2tXEoH1ieB\nJy/V/d8upmnyxBMvUFHRjtc7d7jvdvtIJt08/fQO7rvvS/NlOzoWsGvXGVwu7/w1XdcwzRR1dXVv\neI+RkREefPApymUPgmChVJLR9VNMT4dxOOx88IOdbN68kXA4zN69R4hGk7S01LJp0zqqq6tpaWmh\nubmZyclZTp8+QW1tG7JsIRaboKZG5o/+6L8iSRIWiwWb7fUv5jXeG558cs4vZNGi838uCPClL8GD\nD14zRq5kJicneeCBxykUnAiCDcM4yPLlNdxzz8fnfQYURWFgYIh4zoBgA6tWLWPz5o3zzu2SJBGN\nmrS3b52fxNauvZ5Dh05QVxdgwYI2JEmksrLAkiWNJBIjiKJIW1sdNpuDzs7Fl+z3X860tjbx4ot9\nwLnO+rncNG1tmy+o7mQyydRUFMjQ1fUkXq+Tzs7l3Hnn7Sxe/Pr+6O3t5a//+p+Q5aVYLBAOH6e1\ntZbq6npOnjxNZ2cDXV3Hqa1dwg033c3U1Ci6HuZP//T3qaysvKC2XglcEz17C9LpNPF4iaamc70M\n/f4Q4+OnyeVy857Z69dfx/HjfUxM9BEI1KMoJZLJEbZuXf6GmgHlcpmHH34aj2c5tbVzfhyGsZSx\nsSPcddcmVq5cCcCJEyf52c924vG04nK10d0do6vrZ3zlK3dTX1+PIAh8+tN3sWfPPvbvP0a5rLJi\nxSK2bfvddx33fo0L48c/hs9//s3L3HMPfOMbc7so9vNtql3jskbXdR5++Ams1jaqquYmDNM06e4+\nTkvLUTZtuh5N03jooUcZHCxSVTUnLvPcc4OMjU3y+c9/CkmSGBsbx2KpOmc1vWzZWhSlyOTkfqzW\nKRYvbqCt7aP09aXp6FiJxWIlHp8kENBYu3bNJfn9lzutra20t1fS23uMYLAFQRCIx8dZuNDFkiVL\n3nW98Xicf/3Xh9G0amprt+Dz5Ugmh1m+fNF5DZFiscgPf/gLZLmDUKgdANNsZWTkJMFgFX19k/zN\n3/wxdXWH2bv3OLOzJdrbF7Bt2xffF4YIXDNG3pK5lY2OYRjnHG0Yho4gGPPnuQAej4evfvWzHDp0\nlJ6eESor7XzkI1tpb29/w/rHxsYol51UV7/iUCqKIsHgAg4d6mblypWoqspTT+2ipmYVDsec4eNw\nuInH7Wzfvot77/09YC7CZtu2rWzbtvXiPYBrvCtmZmDvXnjkkTcvV1cHnZ3w7LPw8Y//dtp2jYvH\nmTNnSKcFmptfmTAEQaC6ejEHDpxg06brGRoaYmgoR2vr2vkybvdqBgYOMzQ0xJIlS7DbbRjGuU7u\noihSW9vMbbct42Mfuw2YM3R6eno4ePAkhUKJW25ZyLp1111TVn4DRFHkd3/3Trq6TnD0aC+6bvDR\nj3awZs2qd6wD8mpefnk/ul5LXd3cjovD4cbjCfDccwdYvXrl6/pjdHQUTatAll9xahUEEaezkZGR\nQZYu9WKxWNiyZTNbtlzYjs2VyjVj5C1wuVy0tzcxMDByjmd7JDLIypWLsL9mOevxeLjllq3ccsvW\nt1W/pmmY5uvFx2TZQqk052cSj8cpl2VCoXNj4wOBGsbG+lFV9YJerGtcfH7ykznj4u3MEZ/61JzR\ncs0YufLQNA1RfP0wKssWstk542JwcAyHo+p1ZRyOKoaGxliyZAltbYsRhH3nOENqmkqpNMmqVXfM\nf0cQBJYvX87y5cvfo1909WGxWFi3bi3r1q1968Jvk7nF5rrX3MeKabqZnp5m4cKF53ymaRoulxe/\nP0sul8TtnvNoF0WZeDzCxo1bzutj8n7imhfj2+CjH/0gVVV5wuHDhMM9hMOHqKtTue22Wy647jmn\n1BSadu6qKB6fYOXKuegbm82GaaqvUyvUNAWLRb7slFSvMWeMfPazb6/s3XfDM8/AqyK3r3GFUFdX\nhyjmUZRzw3aj0fF5Pw6324Gqvj7MU9PKuFxzekJ+v59PfvJWEonjhMMnCYe7iUQOcNtt112R6qpX\nOy6XA0V5vQOsaarn9cv7zTi/alU7FkuaRGKcRCJCJHKEtWsb2LJl02+h1Zc313ZG3gYej4c//MMv\nMjY2Rjqdxufz0dzcfFEiUioqKvjgB9eyffshPJ5mrFY7yWSEmhp9/hzY7/ezcGEV4+Oj1NbObQua\npsnkZD8339x5LTLmMmN8HEZHYevWt1c+FIJ16+YMkrvvfk+bdo2LjMPh4CMf2cLjj+/B6WzCbneS\nTs9QUZHnhhs+DMDy5e288MJxyuXGeTHDUqmAacbo6HhFXmnFiuUsWNDKyMgIhmHQ1NR0LWHeZcrm\nzat4/PFjtLSsmR9/Z2cjVFZaqK+vf135QCCwZzwXAAAgAElEQVTAtm2reP75k3R2tpLNFojHw2zc\nuIC/+Is/vBZYwCXMTfNWvN9y0wwPD3PkyEny+RLLli1g1apOHI5XVFjT6TQPPfQLIpESguDENLMs\nW1bDJz/58XckN385czXkqgD49rfh5En44Q/f/nd+8IM5Y+QXv3jv2nW5cjX0ezgc5vDhE6RSOZYu\nbWbVqpXnSI53dZ3gl7/cia7PZeqV5Sx33bWNlSs7L2GrLy1Xcr/rus5TT23n0KEhRNEHlPH7TT7/\n+U+cowf1WoaGhjh6tJt8vkRHx0JWrux83VH/1cxlmSjvrXi/GSNvB8MwmJiYIJvNEgwGL0iw53Lk\nSh6cXs2NN84prt5++9v/TjIJLS1zuyrvt+Cnq6Xf34p8Ps/4+DgAzc3NON/nWRKvhn6PxebyBTkc\nDlpaWq4dmb8F14yR9zkzMzPzifuampro7+uj5/BhNE1jyapVrF2//pxdmEvF1TA4TU/DsmVz/32n\nO68f/zjcdRd84QvvTdsuV66Gfn8zstkso6Oj80cvkiRxeP9+Rvr6cLrdrNq4kY6OjvedA+PV3u8X\nC9M02bVrF/t37EArl1m7ZQubb7zxipRsuGaMXEbMzMxwaM8eJkdH8QWDrN2yhUWvUcUyTfOiDEyG\nYfDsf/wHgwcO4AMU0+TAwABtVVWsPpsldDKZxKiv5/fuvfeSn1teDYPTv/0bvPTSnAPrO+VnP4MH\nHoBf//qiN+uy5lL0e6FQ4PDBg5zu6kK2WFixfj2r16w5J1T/YtB98iQvPvYYFbqOCERKJRLZLGtq\naqgLBimWywzF47TfeivbPjDnP1Iul5Hl98Yx/WKNLReDq+F9f68xDIP//o1vMPz887Ta7UiiSFTX\nCW3cyH1/9Vd4vXMCm4Iwlwvn2NGj9B45gmmaLLvuOtauW3fJx/VX82bGyKXMTbMB+J+AARw2TfPP\nL1Vb3g3JZJK9u3YxdOoUNrudzo0bWb9hw5uG2E5OTvLY975HgyTR4fORmZnhmR/8gM133cV169YR\ni8XY/cILjPT2IlutrNq4ketvuAHDMLBare94oOzp6WF07142trQgiiKR2Vl8iQSz09Mcj0QwVBW7\n2005FqPn1CnWXHdhWSyvMefz8ZWvvLvvfvSjc/lqYjF4k2Pna1wgpVKJn3z/+1ijUZZUVaEpCscf\nf5zxkRHu+tSn3vFknUwm2ffSSwx2d2O1WuncuJENGzeSy+V48dFHua6qCudZv4DMiRNEe3sJtrbi\ndjhwOxz4PR727txJRSBAz5EjzE5MIMgyHevXc+O2bRfsU6CqKvv27OHEvn0opRLNS5Zw4wc+QHV1\n9QXVezVQLpc5sG8f3QcPoqkqS1evZtONN85P8hf7XsA7Mg727NlD969/zQpZJnc2KWrA7Sb80kt8\nx2rFLggYhsHizk5i09OIkQitVVUIwMAzzzDS18fvfvGLV4T0w6WMphkDbjZNUxEE4SFBEJabpnnq\nErbndRSLRYaHhykWi9TW1s4rnabTab793/4bybFpPA4njSEvJ594gqnxcT7x6U+/4WD28rPPssBm\no+6sop7TbqfC5WLv9u00NDXxyP3302Ca3FRfj6Jp7H/sMR74wcPUL+jEZhPZvHklN910w9s2SroP\nHWJBIDDv7R1NJiGXQ52dxWaz4auoQNB1+gYG2LtjxzVj5ALJZufy0Dz++Lv7vssFH/4wPPoo/NEf\nXdy2XeMVuk+eRJyepqOlZf7aGpeLgydPMr5x49tOIqnrOiMjI/zixz+mWRDYUF2Nquuc/vWviYTD\nNC9eTNA05w0RgGg0SqvHQ2RigmAwiGkYjI2OcXzPIR579hBtVX7WLK7F5bBz6Je/ZGRoiKWdqxga\nOkMg4GXdupXvOEfJE48+SvrUKa6rq8MaDDI5NsbP/s//4bNf/zrBYPCtK7gCiUQiHD7cRSyWpLW1\nnrVrV7/uWMMwDB57+GGUoSFW1tQgSRLhQ4f4yenTfP4P//Bd+/RMTk4SiUSw2+0sWrSIQqHAM8+8\nyOnTZxAEWLasmdtu2/a2IqV2bt+OmEhgdblodTrRDIOpVIrB2VkAvvh7v4coiuzbtYu+3l6+dNdd\n8wENnS4XR0dHGRwcfFPhzcuFS5mbZuZV/1QB7VK15XxMTEzwox/9kmLRdTbfxAE6O+u4++6Pc/+/\nfZ9jByZoqlpCrihzsG+WkL9ASTzB5I03zie003UdQRAQRRFd1zkzPMzNr9EMcNhsWFSVXTt2UKUo\nNJ/9bjadJjmWwNAseFa24Xb7eOqpY4yNjfOZz9wz7+ORyWQQBAGP5/WJssqlEpazhoum65QUhclY\nDE/JoHc8hSwXMfU8slNgsLf3vXyc7wt27IANG8Dtfuuyb8RnPgN///fXjJH3kvDAADWvmZgEQcAv\nSQz09xMIBM77Pr2a0dFRHn10Oz3dYVKjI8zUuqhwuaj2+1nV0sKB/n5Mi4V0Nks8kyHg8SAIAjab\nDT2XQ1PndIUGBoc4dWqSWAp8rkbOzBQ43N/Povom3A6BYy8+xPU3FejoWMPUVI7Dh3/BPffcxOrV\nq9B1/S0XJlNTU0z19LCxuXl+kdQYClGenOTIgQP8zjvxsr5C6Ovr46GHnsVma8TprGLXrkkOHDjF\nV77yqXMiXUZHR8kMDbH+VUZpW0MD3ePjdJ84wYaNG9/0PrquI4ri/HPVdZ1f/vJpjh8fp1x2Ui5n\ncTqfAjTc7g4aGuay7g4NhfnBD37G17/+pXN2vRRFIZ/P43a753cyZmZmEHUdn93ORDLNeKpESgMh\nV0JJpDENA0mWETWNSlUlMjlJS2vrfJ0hp5Pw0NA1Y+TtIAhCJ1Blmmb/pW7Lb9A0jYcffgK7fRmh\n0FxOGdM06eo6ht+/g5d3nmBBzUrcjjmlRLejgqnEMDZrlGg0isvl4vnnX6K7exhRFFi9egm33HIj\nNoeDkqLgeNU2nWmaKKZJfHKSJb5XJOH7+oZwOmuoLOUZHR1kcHCWRCLPyy+H6e8fZ9Om5WRnpklM\nTIAgUNXaygc/9rFzXrbFK1Yw8Otfc3piihND05yJxeiNxGkTK6lERjY0RMFCX3Qat9BFX18fy5Yt\n+y095auPZ56Z29m4EH7nd+aOeXp6oKPj4rTrGufi9Hgols8VIUun0xw5ehRhcpKe3bvP+z79hmQy\nyY9+9CQeTwcWQaW1xodhKDy5t4ffu3UtTrud1HSU/eGXiY7EcBwZRVTiLG2qweX10pNI8OHrrkNT\nVU6eHGBoJMlALIFdKJNUXNitTQwpORprJURzOePjedav9+H1BikWq/judx+muXkX5bJObW2QD3xg\nM21tbef9rbFYDK8gvG63ttrvZ2R4+OI91MsETdP45S9fJBRaPZ86w+sNMj09xgsv7ObTn75rvuxU\nJIL/PMZcyONhYnj4DY2RsbExHnjgEbq6BnG57Nx++xY+8Yk76Onp5eDBSbJZF+PjMQTBSjQ6RbEY\n5qtfvWV+h7qmppVwOENfX/+8Ubl71y66du9G0nVMq5V127axYeNGbA4HY7kcSjJFseQgYA2gaDlE\nQ6aoutm1aw+yZKN/fAJ1ehrLsS4aGhqQzxozJVXFd4WkCrikalmCIASAfwF+/1K247VMTEyQy1nw\nel9JbicIAqHQQl54YQ82Rx2qbpzzHY8jxGAkxcTEBN/61nfo6SlTX7+FmprNHD+e5oEHfs6K66+n\nPxI5x2lrbGaGytZW6pubyeTz89dTqSwOh5sz8Tg7dhxlclLCNFtJp/309mb5/v/3PZS+PrY0N3ND\nYyOuqSke/eEPKRaLZDIZ+vv7qfD72TE0xo+fOcDIeJzJsXFUI0TEEDmTijGWTzJkqjgDy7CLdp59\n5BGKxbeXVvua49m5mCZs3w633XZh9Vgs8OUvzznCXuO9YcWaNZwplSgpc+kWSqUSu3fupFwuc8eq\nVdzQ2Ih7epqfn32fXsuJE93oeiUejx+700VJVXE7KlC1ACNT00wnEuztm6W+fgtWpx8tMosvIxHr\nHeLUkSP0ZrM8e+oU2w8c4OkjxzmaKKDIrcSLBpJZj2jYKJQEhsYjiKIfw7ATj8eJx+McO3aCkyeT\nqGotTU03UyjU8cAD2xkcHDzvb3W5XBTP865mCgUqrsIEbNFolGJRmjdEfkMo1Ehv75yY3G9wezyU\nDOO1VZArlfC+QWLTiYkJ7rvvG+zcmUcQNpNMLuV//++X+OY3v82BAydIJg3C4Sx+/0L8/mYcjkaS\nSQs7djxJPp+Zr8du953N+Asv79zJ6eefZ31lJRvq6mjSNF740Y/4x29+k2hfH7KucyxbYtYQGSnn\nOGNqlG1uDM3K8eMjFIsOFjauIiG7mIorHD16EoBCqcS0rtPReWVo2VxKB1YZeAj4v03TjJ6vzDe+\n8Y35/9+6dStb366k5TvENE3Gx8eJRqO43e6zf7Dnzxej6ybVdTXEhmdxOxxYpLlHODw5wWwmzNHH\nH2dkKIGtJorT6cXvD1Ffv4SxsaPceGMV6dWr2dvVRYUkUTAMbLW13HnnncRiMX62cycWoCYUwuNx\nEZ4KMxAvoOmNlEsK8dgYghCjvyfFMkuWniOnqPJ6qampoTEUYnJggP/nr/6K4e5hBNFFPJ8jP9pP\nR20b2dkJVDVH1tmM23SimTkqPA5S2LBZXFRVefCqKsPDw2+a9yIajfLii3vo6RnB4bCxefNKNm/e\neEU4SL2X9PaCKMLSpRde1733wpo18M1vwvtciuI9oaGhgdYNG/jJT3+KV9cpqSrpfJ6Pf/CD8/4d\nDVVVJMbH6e/rY/WaVzLiplIpDh8+TjJpEgjU0tDSzPHwGF5NRZYcZPIlukfGmcroJF76D0rRMdoC\nlRiKzmh0gmU1Xhb7/Zh2O8/t24daLNPkczGWPUPaMHAhYpRLSGYRwS4xnUxSMvI8/OPTuCtqSSZz\nmGaZcHiK6uoW4rOThHt6+K9/uYcvfO3LbNi8+ZwjppaWFqSqKsajUZpCIWBukhrNZvnYWxxDXInI\nsoxpvv7EX9c1ZFk6Z4eora2Nl+124pkMwbMOq/lSiYiisGX16vPW/9BDPyeTqaW5eRUALlcFHk+Q\nJ598jIULvfT12QkEluFwFLFYRFLTB7AluxnZcYxY726aVlzPlls+RTI5TS5XxfDwMHt//Wuso6M8\n9MwzxJJFrBXVpNUifT9+kGWhKqoEgZToIitUI8kSjgo/BSNGsVTG6nAgnZ2D5PpFpG1O9p0eo+y2\nkTAMOjZvJpPJEAwGL3ul7ksW2isIwqeBfwJ6zl76z6ZpHnjV57+V0N5yucxPf/o4AwMJBKECKGK3\n50kkMixadCsWy9yRimEYjI/3sWFDkK6uQTLpAEPdvaiZDNligdn4af70ng1QUJiIqCTLRc6YBjd/\n5Ks4nW4ikRGWLRPo7OxAVefyzPT3D3D69CSnTw+QyZSp8PrJnOkn5BSorA5ysH+K8ViI6IwFi1AF\nTCMQRdbSLLOkWFJroX1hPQW7ncWdnTzx3HMUU2U6l2xmcGiY0XA3Bd1GwCrhFgqE3B72ZSyIYjM2\nJUPI6yaiaYSaqvjIpkrsDgviokUoqRSYJsvWrGH9pk3zjlyJRILvfOchoIGqqgZUVSES6WfFCh+f\n+cycjnk+n0eSpHcVAXAlh/p961tzEvDf/e7Fqe/22+Gee+CLX7w49V3OXEi/a5pGsVjE5XK97cH2\nxeeeo3fnTgKSRCKT4WB/P0t9Pj60bRvCq+oYnZqi2NREbW0tFpuNYlllz55eIpESp09Hqaiw0NGx\nFJvNR/+xo8xMnSBUkWNX1xg6y3E6QviVLKaRB+EMzUaKFZUuJJ+LrmQCeypNNFsCuRK15OOMmWOW\nhciCG6tUwOoyMU0rboeL2mATJc1gJpukusZCZeUCKuyThNQ0DZ4AqdQQK9avIO/389mvfe2crLGJ\nRIInf/5zMhMTWEURxWJhy0c+co6RdSl4L9530zT57ncfIJ0OUln5iix7ONzNli0NfOhDt55T/vDh\nwzxy//1IuRyVlZVYKyu55c47Wb5ixXnrv+uuP0DX1yFJVmKxEaanx8nnC2SzUbzeEvl8iJqaG5Bl\nHVE5iWd2FK2YQLYHqa+pZbY4RS5YjdPjZMOGG0mnpzn5xP1stluJF0ExKgjno3hQCBplAk4n46U8\n42VQhEbyqFgcFloWLmFkfIwKOyzv6MR0V9B23a34fFUcPfprBCGBx7MAp7MKyNHU5ORzn7v7kmd3\nvixDe03T/Cnw00t1/9+wZ89+BgZKtLRcP38tHp9CFA8xMXEIm62eiYkpentPYrUmqK29jZtvXsMP\nv/8Y+WwS0TApFEZZ5stjUxRGkrP0nB6gxuZCLubY9cR3Wb31Ho4ff4lw2MuhQ1GgQD4/ja5XEJ8Y\nYbD3FIh+rH6DO+76Q6LREQR3klsWtfHP/7wdDBHJmsZh9WCUl1Cil5I2hWy1cTpRYHg2xX/0JElO\nT9LR2MJ0JIpa1Fni9NOfmiFTqkQUJIqCgVqOEdUVvBYXlS4PdsNCfZVKW0MNP921i3XlMstbWxEE\ngbFduxg5fZrP/sEfYLVaOXDgCLpeTV3dXLSBzeagpWUVPT376erqoufIEaJjY5iAr6GBQDBINpGg\nsq6ONevXX9WhhNu3w5/92cWr77774K//ek4A7TKRhbis0HWdPS+9RNeePaCqyG43mz/4QVa9wYr2\nN0QiEXpeeokNTU3IZ3U8PA4Hx3fvZiYaRRVFDvePM5PMMjEZpnFBLb/TuYJ4NssvDgzTueEe1qxZ\nQDZ7gHzewvHjp2hsrCKSChPN6pwcLlDMC0iEKRdmKOomAamRvCnjEdOczsDImTGcskiDIFKnqyS0\nCEkzRqVYS9ocRhHasDga0LQ0VsspRLEShEpMI0GpcJpQ6HOIIiSHeti0eh3ZXJpyMYdQKGAUCpw8\ncYKNm15JvBYIBPji175GLBajXC4TCoWumhQSr0UQBD75yY/w7//+GOHwDOAAMixYUMHWrTecU/al\nnTvpev55rquuZspiYSqTYeMNN9DxJjvDqpqnq+s/mJ2dQtMETNONKMroeoL6+qXoepx4vJ9QqIHk\nRBcSWTwUcBamiZ8ZIye5mEpP8cWv/z2apjHU34OezWOIIImVFAyVxRYLiVwKWQCzBK2yg4iWx2JM\nU2N68di8BNQsw0aW4PJttN/yCbzeALquMR7uY+9zj6ApLqyOMF5/LWs3X08kIvP887u4447L12H5\nfS969rd/+0/4/WuxWu3ouk4mk0WSJJLJU9xxxwb+1//6HkeOjOJyBZBFE6us07QgxCKfwJJAANM0\n6RsYYKFpMjg7S05V8WXLOKx+MuUiWk0zR2dnKFgbaWxcQalkUCwW6e3dR0Acpb6YwyFUYbd6GS8l\nidn9dC7fSD4/RFGLMD5UQDAMSmI1Lscq1LxJXh3BL+ylMeAn5F8DRZPj09NYDR3JWiYkagQDNdjT\nY0SzGaapxmcTiBUTiNZWogrYXVYULUltdYk/+9RH6J+ZQUmn+eSt564cjo2Nsf5Tn2LlypX8y7/8\nEE1rxeU6NwZ/YOAQ1uIprq+vpy4YZDoe58lnn8XncHDLtm2ki0Uius7Hv/QlWl/l6f1artSdkUwG\n6uvnVFcv1sLDNKGzE/7xH+ecWq9m3k2/v/jsswzv2kVHfT12q5VcsciJqSlu/sxnWPEmZ+R7du9m\n/PnnWXI2ag3mji1+9vTTWGWZhBHC62xmNpnj9PgI7Qu93La+iVgqzfb9cRJZg9b2dmqbmjndP0B/\nbxflcpigv5NUWiGf0rHiR9OHsOpuEkzgoIhMjg4pS61FIqsWKQkmrS4XnrxC2ZSIGAYDgoeotIy8\n4EEkg0NKsLa2wGzJR1FyYbNWMJ2MYshBfG43jaUx2hrqGR3qxuuw4LTZKEkqTR/axv/7rW9dVmJX\n5+O9fN8VRWF4eJhcLkdVVRVNTU3n7JxNT0/zyD//MytDIfb1DDI8mcVEZjYf43Nf/wLXXbeGioqK\nc0Kfjxw5xn/6T//A/v3jGEYQWIGuC5hmHEGI4vPlWLx4OUODh3BYQIl2sUG2sMDuxuNyYmDQk0py\nwuZmQdty5OQMUzOjeAs5DEOn0hYgbuqEVIO0XiaAjF0wkCWdQQyWWaxENAHT7qKmOoi9JsBssJWN\nN30Bq9XB0b2/YmLfk6gzCaoqFpESTXLWEEXRzbpbtlBbW+Bv/ubrl/RI/bLcGbkcME0TVVWRZQuT\nkxG6uvrRNAnT1DGMYdrb/YTDJZoaN6FMnsCRzVMuZTh66gC9QSufvOVmGmpqaG5oID44SD6Xo1KW\nWbCwkfFwhGQpj0fzIyWnEGvbgCq8XieRiW6MjIOyXsJPDk2AnFrGYYpYUmOUpmooGzPUakk0vUhZ\nUVGYIVocQCVEwAmLqgLM6jVQglQyCQ4HNQ4n2bxCSRtHyecpqWV0yY5NhCyQM2oRdImg30dNvR9L\nxXIQp8kEg7Q0NmIdG0NT1XlPbICQy8XE8DArV66kstLHyEjmdcZIbHqQtRVQf9Yh7tipU6wLBsmX\nSqiFAgvr6vBlMrzwxBPc+yd/ctkoQF4sXnwRNm68eIYIzO2G/OVfwv/4H1e/MfJOKRQKnNyzh02v\n2t1wOxx0hELsf+EFlq9Y8YZ/Y7+ZAJPZLOMzM+iaRl0oxJqVK3lwzwlcVjcZo0heN1jV1oEoKjyx\n9zh6MYGZDdFi8eLK5ejfsxvNNFnRVE+pbMcUfExNjGKjGsG0YFKJwlFaKaNRppISVsOgqJl4JJlW\nDGKahiiCVZAIaDIWs0hemMQjSdj0DB6LhZZgiOHBPJK0DJurFqdoRSZDMnqSSjFLz8kwTdWLaaqu\nBUz6Jkd47GdPIOs6C9vb2frhD7/pAuBqxWq1vmlk4NDgIEFR5Je7D9M9KmKR/Pg9DmZnU/zJ//VN\nFtQ3YLGbNLUGuP3DH6BzzRqefXYPXm8zdnuSfN6NaZYAA9MEq9UGxTNEex6jXpSwIZIxijgMsNqt\n2GxWTMOgCjDyCezxCOsqKnkmNolpmjgMk+lynqhhYlCBhAddKAASSS2PRVQoGDI2r49gbQO3/M4W\nmlpaeGZgEE07TU/3MOXhwwTLOQRvNV6HD59pMqDEcPqrOX7wGJW3L0LX9cvWv+99a4zE43EmJyfx\n+5309XUxOJjC42nEYrGhqkXi8RF+8pNn0PUqlEg3gayKQ3SStWkEchr2yRj7n3wSR0UFeiBATSBA\nPJej0udDtlpxhfysaF/M4vZ2hh+MkCjLKIpOLhdHLCvYRBBKVnSLjK6LxNRJZMPEgsnxgV2Ish3d\nZ1InlKmwOvE4fIyVswyRo85fQbAySMjVgZ6XkHWdBfX1ZGIx7KpKtlSm3jCISQ5GinFagrXMKgqi\nv5aK+jpMVAILa7nppg8yNTVAqMnFrh37KZzoZsg3TEtzDe3tS5AtFgrlMrVnNRmuv341J08+idcb\nnE+FHoudQTJTtDYsBuZWmflUimAggKEo5HI5AIJeL/3j46TTaXyvCmG+GrgYUTTn49Ofhv/yX+DI\nEVi79uLXf6WSTqexC8K8IfIbfG432fFxNE17wwF30eLFPP797yMePky1JCECe3p7Ces6muRFszvR\nBYNcOcpYdIqQLJNMjiKLJVRZQ7J4kUURl6JQME1SeoqGyjqiKR3RBEMTEUQdTc9TiUoVrcQ5Q4Ug\nY5MgqecQDA3ZbkHWdUoOF0Grk2Q6hWb1ssTto1ExqLBXkMqN0dt9ipJai84siXSUhgo/ddXtzGYl\nNKWbqoIXWZAolnIMTU0wnM9gdfowpmeobW7mV/ffz6aPz2X2drlcLFiw4KJL3l+JhMfG+Mnz+zg+\nnCHoWUbQ6+bkSJjpmQTVgesQlBzlmR5Od59gaud+ggvrSUoOBPtiAoEaZDmIrnspl3VMU6dCHaBR\nKNBksRCwVxDREmQFAcFQGElME8o40SWJWVXBYqo0WG2MZ+KY5RwVpoFThLKgoBOkLNpxGAIl0Y4k\nKJRMC7NGCY/Dg7uxDavXQX1DA4VymdbFi/jSfV/l37/zHfJiiq7JCVR0VK2MRbbhRyCHRi6TIhi8\nvDMEX9Z/lf39/fT0DGKxyHR2LqPlVeI07xbTNHn22RfZvfsU4KNYVNi581c4nctxOCopFmdR1Uk2\nbdrKSy89Tz43ha9QxC7ImIJJPj/FAsOCXZLAMGj3eBjO5cjabJSqqhhXVbymSeOqVTQ2NXHyxAnG\n41FKzgwxdZR4JkON14OBCqiIsoN0OUGTIZDGhYwfmyYyoUWJzQrYRUAuYDVlqkSRWSNJzmbhhiXr\n6BqMkso4sHi9LKpvIO52c2rgBG5R5XSyjxkgK/lJZ8oYQp7qQI7K8gRSMQHhKV58chxPTQ2xmElz\n8030hzNYHC5GRpOUyt20r1jGjGFw69lt79bWVu6550aefvplVNWGYajU1bm54+6Pkj5+nFpAkiQM\nQUA3DEqGQe3Z7QLDMNDhsrXK3y2mOacv8hd/cfHrtljgz/8c/uEf5lRZrzGH1+ulZJpoun6OQZLJ\n53FWVLzpZGuxWMAw8JkmLkFABPKKwuxUEtXbjM93HYJgMnX6B/hy07S2LEVIwBq/nz2JaUazAkuN\nShStQElNUN0gkSmkyBXtaFoRq2ggiRZMJvHhIUuGGYo4TTsO3Ypm6qRJU7JYkG020ppOMZcmLQnk\n9CILlTKSrqEbJUplFUG1YpoKqpDA1HVS6TiSK0NNXSXZdIiIojKVmoSMhVhJxu3ZgKrqvNQzxebr\nNPIDA/zg7/6OG1asoARsd7n4wB130Nraelkkx7wUvPTSbvbuH6dvsgKLuJBMoUgis59iOYNdaEMW\nLETCB+mwB/D6O0kVEixwLmRPz17MVjeBQIh0OorX20wmk8HIj1EnGtiMPA7BCloGSyaBgoVpw4ti\nQERTsEkGstVGWVGIzUaxCRqdoozdYkUzNERd54ygkKSSpJhCcjiIqxYykkxWVVkoiWhjfcQrHPzP\nfx3GdHlZ94m7mJqaYmR4mGJvL4VCDvR44pQAACAASURBVJ8kMZMfwemspWyqRGbDZNQzzI5U8+D3\nvsfNH/7wvDDnu8E0TSKRCLOzs7jd7ouWrfiyNkZ+9KOduN116HqB/fufYtu2Dj7wgW0XVGdvby+7\ndp2mpWUTojj3AIeH00xNncJm81NdXUFz8034/SFqavpJJg9SLGTw2wJk1RxeTUMWSviddhK6Tjyb\nxVR1uuPjNK1bT+PyhQQkiUAoRHd/P0dOnKBzZTtdo7PYpBpsgsHkdD9lcwJDKDJSLFBrqhg40HGh\nYgAZgoCu6SiYpA0D05EjFPSwddlKln/iExiiyNHRH6OoZ1CKFmbiTpwOL+1tHhbWbuUXz+6g2bYE\nUQiQKImkc6M4ZnpxBFvw2GQq8grjE8fYdXCS1Rs+RCiUx924hGf3bceplBHDRaJeJ/fce+85wk9r\n1qymo6OdaDSK1WolFAqRSqV4sLubaDJJyO+ntq6Ok4OD+CsrqT4bTjgyPU3jsmWX3Jv7YnPqFFit\n8AaaUxfMvffOKbIODsLixe/NPa40XC4XyzZsoHvvXpY3NmKRZYrlMj0zM2z+5Cff9BhwZHiYVU1N\n1K9YQXR6GsMwSJ6J4J9IkZrtYnD2DJq9Ek9JRdeddI+exCaVGU0o1Msi/gYrtdVpxsOnKSglwtNB\nFE2jrIbQRZmSOoHFakFmFgs+Biiisoq8EMWPlwJ+VHGC8Vyc8VwOpygiSBJWj59K3cBrTKGgIygB\nPHI9skUnUshTxIIplTEIMRt3kDcLVHrtiDYno2ULsrQMSZaxih7KQoqgbxlPvXSANsoEHA6WNDSw\nv+c0+/b18+KeYVatXc6WLSu55Zatl33I58VC0zROnDjB/fc/gte7lIpgPWMjA4hUoputFIonwGoy\nFttHg1lEEQUSagRTmJuAl9S1cXBqGG9NBaI4Qyz2MoIQwEEYl0XFY9FY3FzFyMQZdLkKr1miWzPw\n4aBSqKQsZpiW3BSdfiYK06yw2EhrRSSLBUGQ8UkSvjJYvH6mE3lmNQUDkUK5zCKLjLWUQ9M1vKJB\nKl0k5vVQ2D7KsWN/S5U5S6XXS3VtNYVohha3m6lShCldBynGx29Yzd0rVzKTSPCL++/n0/fdR+js\n2PxOUBSFJ37+c2b6+vAIAiXTRKis5O4vfIHAG2izvF0ua2OkpWXdq6R2G9i58wCdnR0XFJVx6FA3\nfn/rvCECsGDBYuLxJG1tS6mrWzB/PRiUsNuKjOciSJkMZVGlQk9RYdPJWixMY2UkY2AT7FQEm/D7\nNzCZLDFWHEXpPc3E2ATLmxv52NpVmNJJzsRGMYmTyMexCjUogpuwMYZMAisaCgIGBQRC2JjGQCWE\nA1MroydVhnIZzujDGEt6qJFEvnzzFiLjE+zbe5TBrl+RtDmxiTp79kaRZR9L2hZQV9mEbpocPx7G\nlXajlcbxeRsYj09QLqSo1ksYA8fojp7hjF5Bw4JPoKpFZmb6yAje8+bBsNls51z3+/3c9fu/z3O/\n+hUD4+Oofj+Jxkb8lZUMRCIUAKm6mk9+9KPvut8uV35zRPNeucG43XPS8N/6Fnzve+/NPa5Ebv3Q\nh9gpSRw4cADZMDBsNq6/8863jKYxDINMPk8qk2d0Ypp8YpbkQD8eawVLQ40UYkX6wwfRdJWiYaJI\nJvaqRmZVBYeog2wBBOyeZYQzk/gdHTisdkxGsbk0MNOo+TOAwgQ5NBqQkUkKVajEcJgapukgKohU\nidDsdGKKIqOFHJOKgEeuQtVMBDRspLFgRzcL6EIew2xFEu2YuoaWzWN11XE6sY+C2UbQ2US5NMtU\nKoXbU2BJ8wqOHT/B0sUhZrNZ/vHHP2Fi1kVLfQdW3YbLtZwXXxzAarVw001bfit9dinJ5/M88MAj\nnDoVJRx2Y7XOkkgOYXV40PUWbJKdUnmEkjqDqruxWA1EqiiXixTVceLxKLUNjTgLEZLJIQShhGFE\n0PUydinHkvZWxJKTiako06kSmugki4scC8khMkUEUxWprGjAI9mZjIVZK4kscLmgXEYBSi4XFRQZ\nnemmFj9+zYJipoiTRETAanVRsNgYLsvUVjUhOpwsqGrgyOAwjmqBZYsXUDQMssUiw8kYUdVAdbi4\ncd1KFjc08PNdh6lwWvF5rBzet4/b77jjHT/Hfbt3k+ntZeOrTinGo1GeevRRvvDVr15QH13Wxsir\nVziSJCNJlYyMjF6QMVIoFLFYXlnpZ7NZYrE4ExOjPP10gvXrt7JwYSsTE730H32elWaeoWor01Pj\nVAIFWWNIl5kq+jGFSmTZx4yYx64JZPIqiTMaIyOT1Nd3kCvYiScreObgEKsWVTOd7CeRnUWjllK5\nhNe0kKOZGCJBSoCOieWsLK5ADgsFwEkAm1BCsBpEyhYeeegpbm+r42SyRD5foFTO4TatTEVjhGxB\n/KoLpaAydOg5Bvx1hKoWY5MkKmw+bFYoiils5TgdbgfDmTKJxBSUBSo8DuLRfmRdQdKLZDJ2Dh48\nyq23bn3L59rQ0MCX7ruPdDqNJEk4nU7GxsZIpVJUVFTQ2tr6nqREv9Rs3/7eHNG8mq9/fW7n5Rvf\ngLq69/ZeVwqyLPOB227jxm3bKBQK5+TzeDN0w+DJwyNYShXkMzLZrIC1ZMORzyCpcTyGTJVpY6I8\nQ60YoNFbRb6g4G9qYjo1w+nBKNZRDV/VMtzVJqJUh6JqKGINHr9ENuNEJU/AGserlAihYDDJjCER\noRkoYTXCLEGk0tDIFjTyBjgFHxWym6QhIqt2RMGgZKbJkyJLAAQwzQJxNU5AgArBRiajooserJYS\nZWZQ5CyVNpFGf4BCIY+ha+waHWVxMEgiY9LubSQTjxPRdTRNp6FhBbt3H+WGGzZdle/mq9mx42Wm\np200N19HONxNINDCwMAEgpCistLD7OwUiBqKUkISFzClzGJXEggIIHjoPdnDiZEB8vU+fL520uky\nNpuJJJVQtQFeODZEi92HC4G8IZI1ZBJCEMQGTMFFUW/E5BCFgoTDBjZRJiNJOOx2VFnGabVSKpcZ\nz+VosVVgKllMM0clOpUYRHQYyeWosbloEiA1NcoZmwVf7QpERUNRXYi6TtDlQli0gMV+PzP5PA6H\nA0Vx0D9uw+2oIRIv0j02zKnUk7QsXkxDQ8PrEgjCnNFeLpex2+3z87BpmpzYt4+1rxmEmkIh9obD\nxGKx86ZPeLtc1sbIazFN44IjMTo6FvLCC2O43T6KxSK7dx9B1z0sWtRCdbWP7u6XGB/fRVO1nUY1\nS2swyKr6eqILGtg7MMBMsch02ka9pQpZspNXRLJSiHzBwDYVpVgUkFWB0uwpymWNrtNnWLmsjUd2\n9rC0aTUzgUnUeABF0Mkpkyi6j2lMZCZwUsREJss4ZVw4kRjBxCtqiKZKSdVQNSf5eJGu+FFsliok\nuYJ0vgi6gls0mJLtKIoNxZjFqUehFKEw20tRtNBicdEYsiFkMnRarZTLKnZBpEFR6DPiCJqOv5zC\nY6/E4YLiSDfPbs+/LWME5ozHVzunLly48IL66nInk5lzLt12YSeHb0llJXz+8/Dtb89F11zjFWw2\nG7IsEw6HKZVK1NTUvOF2sa7r7Np1lLqFN3Fi9zFC7ioKmQI5qZUUgwSTJQTBjaDYyJoy6ApyOkXA\n4+ZMbJKEpwI0D26PhQqryvjkJHUt7dQ2NrNr10HKZRNFMbGj4cOBEwGNIlb81FKmxBls+BBIEURH\nM0FWVQwqUAUPFgQGdA8OcrhMgTIOMtixImAYBZzkcKIScHqR/H6KhkCF1Y6VBKrSjd8qIEoWxlI2\n0iPTOANWAhosr6lhNBFHKSsY+SLlconp6Wmqqqool01KpdJVd3z6akzT5MiRPmprNyGKMpCnp+cI\nIFAuF9D1FLqexeVyUBS9mIZMUqkAZglhx2v1kDIzTGRzWKY9OBwNlEppHA43+fwU+bxMxvCRyRcI\nSHYyCGTwoZo1YEYBKwJ2wIkpVJHMjrJEsjOum1T6fAREkVQuR388TlmQcIoeBCFJi+ihqGfJAnk0\nWg0DR7GATZSoFi2YmslA70EM7wJGJ8KECnnaq6tp9vmYSaU4rSgUTQtOsZ4q39wCvqwWiZ6JkJlM\n8tg//AMD0STBRW3c+YmPsX79WpxOJ4cOHuTwjh3kUynSpRKNixaxccsW2pYsQVUUrOfxybKIIsrZ\n9ArvlsvaGDEMY/48U1XLQJzFixe943pSqRS7d+9haGgSm81CsRhmbMwklVLIZstYLGmWL19KR8d6\nNE3jxRd/xoHnH6etWCSZyTBjtSJ6vXxh40YeONVDSQjgDjQyFIljCDaCngBZtUQqNY2cmCCo5MgV\nZASxmmSpxJ7uIRQ1QTrbxVRKQzcdSKaAIFgQ8KIQZIwpKsmjUKZIDV485NERKSAZYBcErIZGLlGk\nrBuoehmHmqTALG5MdBSihgtLsQqraANitJo6IV1AlOyMKRn6S3HSmpVlpsmwpqNbXAQ8NWRzaazF\nFIam4nb6aKzxU1dbQzwT48j+3fzd3/0zVquF665bxsaNG963jm+v5YUXYNOm345k+5//OaxeDf/5\nP8PbyDz+vmFmZoYHH3ycZFJAFO1oWpzGRhfZ+CzDw2dwB0Js2bKerVs3UywWyedFauubGWvWUE2D\nQjFLUKoimZxFKSaIlX6TTFyjRBVjap7BdAq/exHN7lbK6jSz6Qym0YxVaqLv1F4KpSyaZmIRgogY\ngJtJEliRsQAyUZyI+MngxYFKEh0NOzJFVDRUVFMkrVnRWUEeG2mmMZnBQpoq4pQoIqLjwCCejmCW\nnYgWF00OsJomdjWGQ/CR1wyKchZPQyN3/e7XOPnggyTTaeLxKQTDiSZYsTv8HNx7DJtNprraisPh\nwDTN/5+9Nw+y7KrvPD/n7vftL9/L5WVl5VL7IqlKCxLakAQ0IIwYSfbYGsB2Y4+XgY7ocEdHtMPd\nMXbP9MREdxDhCAfRBgLcQLC2EIuwbCS0IYykKlWpqlQqVVVWZVbu29u3u9975o9MZDCysSRQCTPf\nvzJu5n3nRp777v2d7/l+vz86nQ6apv3CFSZhGLK+vo6u6wwNDf3EglVK+fK75MiRo5x78QL9nkoU\nCVyvQav5LOXyTpKkjGmaOI5OElm0ZIWm7GCEVfJpi0p6kvlWxPp6Hd836PeXiWONJJkCcnQp4yTT\naPgYZBCsEVEkRAdWEdTo9F1SLGFJl4Ew5IULF5CmiQHErgdIOuEaFpKaUHGJ6RPRB7KAToyT9OjJ\nFFmZp9qts9KV+OEcYcPi6KUNSlmVPRPjTG3bxpG5JrvzOlIm1Bo1zpx6hGIYo5lQvVBj/+gups+t\n8rWvHeX06Qsc2DfOhSeeYDKV4uz0NPl+nzMnT7Jx6hRD+/dTHhtjuVpl+4/oTfqeR7ilH3w9eFMX\nI/Pzz6DrQ0gZI2WVu+666cdCaH4apJQ88eijfOovPkWrYWNaJTLlMqXhHOn0It3uOqVSjoMHr2Zk\nZBIpJSdOnObUsUtMhjoVTWA5McXEpyu6zNRq+K6DZQ6T6MNsG93GUrVHw5F43gYyqTHlh6z6DkX7\nGlQUVClZaDXwsHHDAlFUJ47Vra0YH4mKxhpDKGQYoksRiywxfSwUXPI0mKMse1SDHAECl5CL5MnT\nZxKHLAZnkHTIkJU1/LjPTnxGsHHpEURtBmTADmLqrsdGIkkJFZOQTrdO3tDRvBgvClAMlcJAloSY\n5y+dJ7S24ftjZLNlHntslunpOX73dz/4L84V81rw87L0vhLGx+F979tsoPfHf/zGjPlmRxzHfOEL\n3yCKxpmYGCFJYk4c+VuO3v9lxvJZhoamqK7M8WA15vz5eX7t194NJKiqhm3bDAxsI5UdYO38c7T6\nXZI4R4pBNAzAw6VLhhEEEX1Hw1A1HLdBEgxTr/Xp9Dv0nC4Rawh2E8s0EhMJROxEMk8GiwSXKkvs\noItKF4hwEMSoeFgEhPRZo8EuVFJoQkfIARIukmIFg4AcfTr4OOhkpCTjOgh/kWpPsFvPMDW8jWwx\nz2qnQWp0kF1X7sdKpdhzzTVUL86SsXXWQ4/BgSlCVLxUgWeffYSPfeyjzM/P881vfpdGw0PKmAMH\nxrnrrnf9WI+bNyteOHWKJx98ECMICJOEdKXC+++7j/KPNAFUFIWDB6f48pe/xbN/d4GiNoJlCDyl\nhaXpJDIijpbR9EmEWCOKmiTJdjS1jJQuqJfIWBInUIljj42NZZIkhRAJUlaAzdZqEgtFGujo+Cwg\nmEDDJkElZhxJhpiXSFCZj3VMJEUZYYY9ImAdFUGeDVRGMLgkO0giykCOzayoGJBE9KVP4nk0hYcr\nHQrmDght3MDjpX6LeW+Rf713H5VRg0DP8sz589RXZ0l3Wuh6hna3xUqgYuoDbC/maQcRy8sJiy/c\nz92HD3H8Bz9gUFHIDQ9T8X3Ot9sM9Hr0Mxnm4xh/ZYWhfJ6O4zDX63H7b/zG634nXM5GeRXgIWA/\nkJZS/kT7xI985B4uXpxF1zX27n3Xq96POvH88zz25a9gBgNcO7UfiWS91cLp5EinLd7+9nHm500q\nlc1QoFqtzvnzi+iJTzGbhygk8PpIN6acEjw6Pc1Cz6flr5L3doCSQlUzeH6CGznEnXliXaBrU3hx\nQOC3sJMYXbok7AO/j43A4yQxZWJ6QAeDDmVS1AlJUcKlTUhIgxyCDAk+q/j0aKJgkecwIV161LnI\nKoIODcZRmKKHAZzEANok9ESKDJKchICYAIGuGCgoiDCgS0wmM8iy0IhFFrfqcP7732fbkIk0Roko\ncOTIixSLeXbtmmB+fp0LFy5w4MCB1zX/nucBvKl97/8Uftil99//+zduzI9+FD7wAfgP/+H/j4iH\nzQ6q9XpCKqUwMzNLt7OOO3OGYXUESwgsYZJpdTi//Cx+fBNXX32Jctmk05Eoiku9vkJt5RKza+dw\nXAdbjhPiA2CToodkjRmKsky767HRCchqKbpRl5XmCjKsIKgAbQQxEguDLDEhKjkkra1vnIXFEA4e\nY0CIZIbMVtmi0EOljoZPE4ULRNIgoY6gj4qHSkyL7QyQR0dQp0+DZW5MPM4IldjvsrG+jB/00G3B\n5GQF1XVZWlzi1PQsYnGdqw/cRC8OeWllhjXHY3LyGoaHp5Ay4a/+6lvk8wfYvr1EkiScPz9Dq/U1\n/vAPf/tN7bSZn5/na5/8JNtsm3I+z9jgIGfn5/mL//pf+Z2PfpSJiYmXWZJsNsXMzHOIxCYhpNdv\nEycrFK0sjoypV19kcMSkWCzQbDrEuCTxOopIIDJZabhoRkwqHeC6HcLQAWykXANWgSwJ80gsEnaS\nUAOyhHgIxhBEQBGFARRGiVjnHH2KsssQHTwkg+QoYXEaSURIloRhdEYI2dwA0YnRERgkuIRynVgY\nDOi7iGKNtaiPZRbRtTzV1iW+8tjzXHPrlRiWQiGfx+umqYRpUghkolFWDRpLy2SVbUR5F8PIUa02\niMMQv91+uXNxxjQJGw0qAwMcW17mf/vIRzh94gRzs7Pkd+7k7htvZGJi4nXP5+VkRhrA24Fv/GN/\nsH379ld0c/xzceypp9ASlbS1WcQIBMOFAudXVkjlJsnlcsTxHL1emUymwMmTp5mZuYDpraDagyy5\n85Q0Hen5vLSwyNHAIj30FsRGjfXmSSQVitkS2ZSPaRdwehX6yTIdv05eQhpwEYTEgIuKi0aBDAU8\nNohYI6GHwiABMT4BFh6CAA0bkxwJCi4KGSwctiOI0dAxEWRI41HBwSJFBUGRHhKFEZrMkJBGlzEp\nAkKgQRqkoCkCbFXDTgROAsuJQBm4mnSQwrYUNnpNTq06SLHBwNAuthX3Eschx47NMDiYcOnS4msu\nRprNJg899Cjnzi0iJezdO8Z73/v2H1vJ/CLg523pfSW85S1gWfDUU3DbbW/cuG8GLC8v88yTT7I6\nP09xcJDrbr0Vz/M4efI84CGETW35OUa6G6TUIvXaKqobkzFNRuKY2dMn+MTHz/F//l9/wt/+7Q9Q\nxQoXT53Faa6Slz2EGKCVQB7BNhSyKARY1HBRkrPYehoUwXJdxQktkjCNTFZRyGNRICAFCCBBUqRP\nhE5MiIJPhI6NRxoFl2Vs8kwRE1FHAywkgxhcwmcGQRaTUUJapMjQJkuKIUJCTCRjFJhGUuUStlAR\nhkWsqZxr1JFpC3mpzdnFk0Tj67z1rXfyxLn/wcWXzjI6UqY8NcH1h25j27ZdLCwc4/Tpc+j6GLnc\nJuOsKArbtu1mfv4oc3Nz7Nix45+YlcsH13X5b//vn7N+ts1SyiZOlllrPEUpN0zbV6n2Ps911+3k\nvvvuJpVKcerURa6++kaebTxN0p8jYwrcSGO9H5BNl0iZu+j1GiwuzqKqN6GINIkMSGQI6ARxF9wG\naQVgGU3bThjWEEJlM9F+YyueYQSdGgKBRCEkjUoEaAg8FFKkSaFRRWUIlSJLzDNGBwMD0CgQoGCT\nQ5CQIKlikGcFSBMRo9EgR48OfqIgkbihj6bswAttlCjBCWPOLy2Smg7RtO9Tn9/AbflEQZWdpqRg\nDWDqOiJJWFhdYv8N7wASpGUQRtGPrXbcMEQ1TUxNQwKlUol3vfe9P/M5vWxlr5TSl1K2fp5jdBoN\nBjJp4iQEwPE9jp4+zYsnT/P4Xz/Iw9/6BnfccRVxPM3Jk3/D2bN/RzYLmeIQrppCLR5kXUsza6aY\njiQDqTK91hpOVEDKcZABze5phKqzfeJKLMtgLUgoyjZZJAOoDAAWkNAiTRETGwObFFMYWOTxkDSp\nEhGQsE4LjwSJjY5AwyOFg42KRQHwcOiRIIlQiTARmBRISOgBkh7bWSaFB0T4BFLSwsQhj8UI2WSA\nbhyxrOisKimS1Dhj+QM0ww381jST0QZDTpegPYNh+GiajmmmKJUmmJtb5LUuljzP49Of/gqzs4Kx\nsVsZH38b8/M6n/nMV+n3+z+bSX+D8PO29L4ShIDf/V34zGfeuDHfDFhYWOD+v/xL9Lk5rs3nKTeb\nPPLZz/Kdh75Dp9OlUBijWBwmky0DNrXaCpqUDGQyOLHP2eos0epLBC8c539+/OOMDVlMDLQ4NNxl\n30CWayYOU7Cy5ElQ8UkRkuDRwSNHzA2K5KCQtC7O0ez2cL1xkmSChCli2gh8dHwkHWJ8JH1CwCOm\niU0HnS59miScJsMyGhsIHDRM8phbT4gEG4MJdFJErKNRxMBGZRsKeQRpPMAHDNKskSaWGudihZNe\njpVkDyvBHh578RILrT7+Wp0zzz/Fze/5IGLsGmr2bq69/T7GxnbT67XIZCJ8X5LJvJIIKU273X4j\np/lV4fHHn2JxHrYPXUmltB0vUKm1x2j2BhnOTVAqXcn8PDz00CMkSYLj+Bw48BakEoKAti+pOVmC\nuEy9t4Kdy1Mq7ccwIIoaIDZDKSU+MT6go6p5PDePSEok0TlULiBYI0udPCtYJMQ0UABd0QlwEWjE\nxICDwAUiBJvb9BJBDoM8aXKARpc2fSJCfCJUVBIkdTRSZBmhSJcU57C5RIkeNhYBA8EcWdqEyTJq\n4KAHARYmOW2YXqeDre2Cfo2r8nX2DqgIEdDxanSCPrXYZV0xGByeII5r3Hrnu7nUaKCm03QchzCO\nmW632bdnD4u1GpP79//cmiy+eTm4nwFGp6YoZC3CeAM38Hjyuedpr7QoiDRFNURcqvHApz7Nhz50\nNwcPVrjxxpvIygbDMsGrTbNWPcu59QXsxirlUMHvOUTdAiQ2UpQR6l4k+1irznP6xafoORZOUiIk\npM0CVdGjS4BFF6iTIsEgQSEhpkqahAiLDBNEjGAxjEKPFgEuDj4tYuapYJJC4tBGpYfBMh49ekh8\nfDQSUgRY1AnYQCWgxgBnSTgPnEGwgIVBDp+IEEE/UenLgEAmbHQ6zNZPMKn1uTY/yPbCEPvzw+wz\nVPqrR+j3N2vGMHSBDqXSa1NQnj17jnbbYmRkCkVREEIwPDxOt5vmzJmXfmbz/kbgjdSL/Cg++EF4\n8EHY2uX6pcD3vvMddmcyjA0OYuo6g4UCV4+O8uyjT3L48GEajRdwnDpmdoQWPlG0hq3pNL0eT86d\nI+UaDFOirJbonbnA8QcfZO6FF9E9g4KVJ3IcYq+OQYIFeIQ0CYnoMImDJQVJr8+obzNCB0ELgxgT\nA31L36VxCYUFoEPCKirnMBgAXLK0iHBxyNNkjIAxqkgW6aOj4uKh0SBhGZ82CktoeKQZwEElISFG\nINGI0OiwSWkL1aRu5KjL7ayLCm1tiF6kEoQjKDLP9kRDXDpHc+UiO3cO4vtNLlx4gYWF0zjOS3zg\nA3cxOVmh2228wn+9/6Zt25AkCUePnmFq1yG6rkucxKzUGgzm99Dpx7R8n0wmw+joHl54YQ7P85iY\nGEEIwchkhcXuCvWehR+mcaMEJ8ixutai0eihaRXieBUpe6hqHkX5oe21hYzTqMl+LK7AlDdhMkZZ\nrrKLLsM4FOgzKFw6xARJhog1IhbYfPZvoDCDQpYedRJAByIkMR424BCwhKBOmRoFZjY3lAhQqJKw\ngUcV8BjEQmVsq1wV+JQRbKeGFC36eKCEFI0BgsYCmSShlNtFgMJ1g4PsHhnBNyWngjXWM2nSI2N8\n/4nP4VTP4HW7MDFBb3SUJxsNvrO8jLVtG4mmUbVt7njPe35u8/qmFrD+2Z/92cs/33777dx+++2v\n6vyb3/EOvj4zw8HJFN858jidlkNeS2OkQt66eyeVYplz8ye4/0tfotmLaS2e423bKzQ2OgR6hmZ9\nhrzfQx8ZwGt4tP0cNlNI6dCnQZLkkTKEqE5WmSQhC8Q45FDFLBkuUcAgS0AXDZdpYiwgxEKQRcdl\nmGHyhEjq9IAUFh0iVlAoMUoaiUaVBkPUGd1iS1aIcLAI6aNgUkelg4XO0JY8rkef3BbFa1KhSYoa\neQJcInpC0MHETu3ANip0G3N4ccCiW2W4XEHIkLFsijBcZXHxKYaGptB1l4MHJxkZGXlN87m6uoFh\n5H7iuG0XWV7eeE2feTnwQ0vv3Ah10gAAIABJREFUHXe88WMPDcHhw/DII/D+97/x47/RiKKIjYUF\n9v+D7VpT1zHDiLGxXZRKI8zOXkRRXPR9u2krdVreKkcWVpCBIJ8ukbVNuv2YpXMb6BdmmPMdMsYg\nbi/EUgcoa2VW42W6qAhMAjqM0yKHSV8GpFUdpMouIvqsEmGjAQYSECR4ZLlEnWUEeRRiJNNkiQkI\nMRhB4yABFgkBCSYJK6xxlhIqwwRbT4B1FBQiQroYOGxHYQUFC4GCD3hIIpooho2hj2Cp2wn1DEES\nkQ99UmqRAJNGv4ctA8K1WfbdeQeZDNxyywh79+6iUqmgqipXX30lx449QKuVpVAYJEliVlYusH27\n/TPRAbwabOpVznPy5FkADh3ax969e38iAyVJEuI4YWJqB0cXl5CtBrHcZBq6rs/U4DjFYhEhBEJo\neJ7Hu9/9Nj7+8a+wtlYlFuMk5JCoKKKMpqtE0Vnq9TWy2TK6rhHHdZKkipRtQAUUNAZRSQijiESA\nIjV0UigkFI1h+mEfRRbREVvzHJFiBo1lDDRyWPSYRUGw+ZxPWKTNDrokwDLDeGSxEfgImpSJ6JLF\nI0eLDCo5TCJaxHhIHIbZFLU2WCEggyvnCNSrsBim6sxRVl1UGWDkyswuCf6q1WJYl1RMkz1X7mFB\nVWltnKaSFNk9NMFAtcpSEHDHvffym//237IwP4/TbjM4OsrBK674uTop3yzFyCuS3T9ajLwWTExM\ncO/v/z7ff+QRopMvsT3TYc9khanRCWzDpNtrUl9r8uADj5MZmqR25jgHD72VkQO7aDSq9BunGdcE\nDdelEdkImUYRBqZUcQUksoOgh6WmsfUsgV8lS8ggKUK1RC5Zx8OnLwUBLhKbLComGSw8mtTRyRMD\nIQkm4GGRQqNMjy51VogJkZSoUqGPhQr0GUIwS50uMTXS9GmjkCFkHckMBbqUUAm3gql1wEbDRyDI\nMEGHOUyylQP0ejFxAorYhiMbVLsOumhRyWiMprJkD06wZ89hoiikUGi+5r4Gg4MDBMHKTxz3vA7D\nw6/esn258PDDcPPNP9suva8Gv/qr8MADvxzFiKqq6JaFFwTYpvnycUVRyJbzdDo1JicPsm3bZqZN\nt9tkcfcgzbkXyU13KLoe6dAgim36fhM7CDFlhEgCyuEaUiq4gU9dGgQEuASkEBSIGUMjQKcFpFTo\nBg5gUCFkgzo+GSK6aLRJCMlSYJwebdroxAyhsB2TGQKWMNBJo6ESoQMJMQFFqpTJo+NQxkAQcFFA\nRmqkCVhE4lEgZB7IEuMznHXxgg4rURldOmhqQiBdYreDrheRmPhBFVIauj6IW69Tr68wOprjzjvf\nxeMPP8yTDzyAIQSJZXH9dVdxaX6FxcXzQMyhQ7u48853vKHiVSklX//6tzl+fJlsdjtCCE6depLD\nh8/y679+D4qisL6+zlNPPcvs7DIrK0t0uzZvufVWps+exZk/R6ezTrFS5K233IIQAsfpkskoFAoF\nSqUSV101zhe/2EHTykQMADFCZIjjBNBQlBjf30BVHZIkg5SSTWmjDWhohChEeDJCEpHCAnLErGAz\nwIBoU5NLqOTxcNGosR2TFAYBfbJsoBCwikmEhSRgnC4aMacxCYkZo0kWhS6SNRQCUhToYxPQpYxB\nhlECNqiyFxVD0YgUi0LcY024bJDGUrIkSodSIU2QGEwvzRHGguuveT++p7CytsTx3iV2qCaRZ3Dz\nxE4KmQL1WpNadZobbjzEie99j7f+8R+/oR2fL6ebRgO+AxwCHhZC/ImU8uhPO8/3fZaXlxFCMDY2\n9lPtRBMTE0z83u/RcGOOfv0xDoxtKg6D0Gd2dgY/yTKy/TCTu6/g2dOnOfrcs2SzJdzmIqLTwCYh\ndEJ0mUcnIJQtImyk3FRxCKqosgfuaQqJg0ClQxo9hiIaJ4SkKsZIkgwORTyamGwgiFCoYhLRQMfB\nJkTHJ8Chh4rDBDohKwRIRojIoWMhSROiIlCQVAGBwSoOKueIcRklS4XdGEpAI1mnyAZ5BH0lg60o\nyLhHlwgDk2ZzgyDYgbSHcb1lNBJ6UZ3hgkkvdllREvYrMd/73gNEUY+77347Gxsbr4kdOXBgP48+\neoRabZlyeRsAjcYahtHkyiuveNWfd7nwzW/CPfdcvvHvuQf+9E8hDDeb6f1LhhCCw7fcwtmHH+bw\nxMTLL8iLKyu89Z13sNHpsLR0jkymhOO0ieNVPvKR32JxYYH/508/yVrzElpkousCVbYpGVnm/A7b\ndYuKTIgDybrw6CcRLYqEDFNDo8UiHhuMCJNAscmHXSJaLGFiYxET4SCxaZEG2qTw6BKi4hIyicIk\nFllMhvHoAB0uEW8tLjTKpGhSJMSgikmChopKipJ0qVHHwkenSYpRBCY+M2iKQ8kqI3KHGBjZx9za\nMkv1dUxjB5FqE6GiKxqK6BF0A6zKODPNRfyXHuOuu97Np//7f2eo3+eWsTEURcHxPJ7/wQ94z4c/\nzOjoKLquXxaH29zcHMePLzE5ecPLDpiBgRFOnTrCddfNkkql+OQn70dVxxgYOMS2bYM8+eRf0+02\nOHDgBqx0wvHjP+Da696OlDG12grd7gwf/OC/QlVVpJScPHmW4eER+v0ucbyHOI4IwxZxLIE14rhG\nkuSx7d3EsQMsAwkggVUcYjSKSAqoRMRIoIdBhSj2MBOooLNElR46FgYWRVyWkHToEJEiIoNPmx4F\nYIyEBKgD40jEloIoDRQJOY/LKDo1FExagEMDnywhhqaQiATd0LD1As1um4gmffl91EhidIdI1Awb\nrRe5YtdBRkvbEUJhdGiIsxsjNJ1lJrI5BrKbrplctkSzFbO2soZayNFsNl8xO6Rer/PMU08xd/Ys\nVibDNTffzOGrr37dxetlK0aklBHwzldzztmzZ7n//u8SBBZCgGl6/MZv3Mnuf0YHsXe/+w4ef+gJ\nFpvrbMsP0m7X6biSmgY3XvVWmo0Wq50Oo25As/sCRhyTN9KsBgERKnkp8VWdZjKLK0sICijUEZyn\nmCRsI00RHY+ARaq0ZZdFfAJlByhlokRHYCPxcDc3SDDZiccG4KChEWNjUCYgR50cPgsMkaVLnSwh\nKWI0ElRAQ6IhidlMK1EYYJP7iJG0CYnwkx5FIchIFQ2XiSRBS0BoWSKR5ywJq22HJFlCEdtYEk2s\nYA41yrC4ruEoKpkxm2PHjmOauykU9vPVr07z2GN/wn/5L3/ElVde+armPJ1O8zu/82t885sPs7Aw\nCwhGR/Pcc8//+guRaQAQBJt6kY997PJdw9gYTEzA0aObDM2/dNx0yy10Gg1+8Pzz5ITASRIKU1Pc\nd999JEnC88+fZH5+jcHBItdddwdDQ0Ps3LmTp595kW9+3aDVrjFq2USupBV0qZPwFruMTR837rDg\nezSYRGWEGIvN9pT7WcYmlEtcFUNKKAgk4HOBNh6TmGjEKPRZYAIXiywZDFpUcbGo46MjsbHosoFD\nHpgkJiFhBpsaZXyGUeiTkAF8+iioqFtkv0oLF4cCHtemMihxyHKrS095CU332Ta6m9BwqNdbCF2n\n7ldJh3W22QbDxSEcsU5LSTFh7uFbXz3ByuIZrpg0OV8+TxAEjAwPMzoywnNPPcWHfu/3Ltscnz8/\ng2UN/1hgmRAC2x7h7NmLNJttDGOSwcFNVnb79h3cdddv8uKLf006vczb3z7Fb/3WW7lwYZ6FhdOM\njQ1w663/C1NTU7RaLb74xa9z7NgCvl8iis7hu49gKHsRUcimNbeOpo2gaddhWR5SQhwfII4vsikZ\n3gMIIiJUVtDIolAjxiGmgEwCElwc6kRkUFBwyeJykQnApkxETI0ma/hkyLJGjw4h9hZLFiBJAxFg\nAiYaGh5NYnagoGHSR2MAnyVgIQqpGDZxnLAe+5xNBgiUXSSygkwaNJx5hkwYICasz/PiwgVy2TKR\nrrPzyqs5fryBG3o0O3VymQKqopJO51lbW8fMZ19xS6bRaPDFv/xLRuKYq0slvCDgyP33s7G6ynve\n977XdQ+8WbZpfipqtRpf+tLDlMtXY9sZAPr9Dl/4wt/wR3/02z9VbLVnzx7+9f/xIb782Qd4Yfp5\n6hurdMIUt7z71wlDwUvPPkc2NYGnLJHva2RVhYaqUtdzCC2hG4d04zVCxjAMCINFFBYZYZ0p0oTo\nhKjowCgeSwQ0GSASo7hREYmDRgOTFBpj2Myj0UPFpEOdhACNXVgEGOQIGaCOgkuMwiBpzmASUtq6\nYWMk60ADHbEVmRZiopECctSYpkyPKT3PbORhSp+SKvAjBSdWQFVJywQlhkB2MaVPDw+fYdJJAcMQ\npI0M9dU+woLJqVGKxQlGRvaytnacT3ziC/z5n//fr1pZPTw8zB/8wW/Ram2KYt+sIrl/DE8+Cfv2\nQaVyea/jXe/a1I38MhQjmqbxvnvvpX7bbTQaDWzbJpPJYJomuq5z++1v+4lzVFXlN3/zbp577gWq\n1h4u9aq08SjqGgWzQuK0yKY1fMOgEQySF1fRR0MqaRRAiBmiUCVLQpeEQMZYKOjY7MJkmj4lioRY\npMmQJSYhhYLK4NYatk0RnSYuFgkVFKaIyCBQ0EkR0KZPjQIxEaAQYgN9LIxNHpYAmyySq8ghvB49\nJWa3qlEYKNNWY/xonfPdBrncDmCZVsvFDRWWfZeVahs1n6NcvpphTIZzZapumqPHLpHbafCO/fvZ\nWFvj2NISxTB8Yyf1H2BTp/GT15AkEYahMz29wNjY7T/2u0KhyMTEft73vne+HAFx7bXX/sRn3H//\nt6nXcxw+/E6OHDmPoXaIlRph9AIJNiohCQlJYmIYEb2eSxxbJEkM5BCkMcRuItklZpF4i6XKYdJm\nlBAPVa5hEGChsxOFDVwarJPBx6AACFQibMDEIMJnFJ0+GnOE5AhI0SKiiLvlu7HxUYnIAGkEPhIh\nfEoS+oAQOn4SIG2TS2GBWNmBnT+A31exRIU4KdGVJ9lh5MkGEk822LHnWixLp1bboLv2Erl8gefO\nHKFoWAyN76VQHKIvHG686qpXXCAeffpphsOQnds2mW3LMLg2leIHzzzDW2688VWFkv5D/MIUI6dP\nn0GIoZcLEYB0OkejUeKll85y0003vnw8jmMWFhbo9/sMDg4yPLxZcd999114nsdDDz1NfnI/S0se\n8/NNLk3/LUNSRSvsou4K1oI63cBlPQrR7VEGbQ836ULfxs6quN4aOWqUqZIhpoTDOi4hGjlUBkjo\nYbKGTifSSBgA0lgsAJKEdYqsUsYmRmWOiC4GJezNltEoKKiE2IQ0GKJAC5M00RbhB2tAD1BRyNHG\np0+TbUQopIkBA5sEP+6gJx4h0E7AFBqJktAXEEQGgjqCnQQso1NnmClMIvQQPOGiJ0MEUZ0gkCwv\nr7FjxwSmWeHMmaf57ne/y6233kou95Oi1J+GX7Qi5If4xjfgNTS7/JnjXe+C//Sf4D//58t9JW8c\nSqUSs7Nz3H//w7hugqYl3HLLYW6//dZXbPQ2OTnJf/yPf8Bf/MX/JJ+/idbGKGLuIotzM7SjLuNK\nicUEbHOEOJQgE2w9RhE2np/C5DwV/C0qXbJOQEKWmM30TIsMCS45bCQRNjoeMRoGKVwcAgISlpAI\nRpEEQAeBxCDGoMw6iywRkGFzU6AL9AgJCOkQUSCLRp8TtChIOCRUBiwbM2XS7KyzXcAFZ4N1/wKh\nAmDhKRX6sQveKVJC5+C4wDJ0Tp+bxe90GFLznLg4Td/1GEkVaPkO3cuc83Pw4D4ef/wUYTiJrm8u\ncKIoJAjWueKKWzhx4ixB4GJZPy7UktL/J7eVNjY2mJ9vMTg4xcmTT7O4OIsSx2iKRkgbgY6tDyLI\n4yQJrtMkTiySpIMQI8AykhyRTBCkERQAF50KaQIkOh3y2NSpMIpGhKBNmYiQDpI866Qw6QEhWQTb\ngT4BCRIPHZ0UPjFrBERUyWyZXOvEaGwyJR0SImIUGeMRkgbmpY8WCbxeREcrIopjIAUQIxAoSh4/\nCkhlFBwvpnr6e+zIpEiSiAvnjlCWAZmuTdcYZtlpcun0c4S5FPf94W/znrs235VRFJHJ/P07d/78\nefb/g4JDVRQKQrC+vv7LUYy0Wj1M8ycVg7pu0+n8fUZFs9nka5//PNHGBrYQdKRk/NAhfuWee9jY\n2OCFF1a57rp7mZ4+yfnzT1CrObRqC3jlNLq0GSpKEm+IXsMhj4orJvA0hcHRKo2lsxQyWTruAjfq\nESWh8myosSZ1dArECBw6hIR00Oig4OEhMDDoYhGgEpKhxl5UYlw8bLYBl9jAZxx1q0CJt9otbRJ1\nHikiEhSWUEiRELFJHDZRAZNBJLMssIC25QGQOEQQB5gE9NAIpEpbKrRjBUERnxBJHpUWGiEqFhqC\nPDGqqmOpCp04xg8TNE2h03FYWVlhaWke6PA3f/MSR45Mc++9b+fw4UNv0J1w+ZAk8K1vbbIjlxs3\n37wZvNZs/vL0qjlx4iRf+9rTbNt2iHI5TRj6PProiwRByJ13/qtXPOf666/n938/4NFHnyOX28lM\n0ED3lmjVbM4pAbJYYptS4uJGSFFLEWsaQeTiJy5ZuphbRkohBLFMqOHTwkLF4ocui2jrGwuCNg4e\nOiEdMgTkUVCRREgkaVRMFLoYRAgggwmoVIlY20raXEbFxGSMCkVa5NAAySnZYT1UCftQ1jQG0jkG\n0jnGzBZV/xKOsxehTaFE56nIJQZUkH6P/uJJnq0vMqSWGbNtQq9FKo4Iag7LhQE0pcDC9ConTpxk\ncLDMkaeeYn1xkdLICNe/7W1viIhxZGSEX/mV63nooSPAAJuehjo33bSHpaVlcjmdM2eeZv/+zbQ/\nwzBYXr5IJiPxfZ84jl+xIO31emxsNPnGNz5PoxGiaYOoWhoZKhhajlJmAkNYJH4D310ilkOoQiBF\nhJQe4AAZYgRs5YboBCS0cdAIiLCpkkGi4QE6CQYGCWnULV9MFpM0Bi5ldDqEZNjMbA2QrKIyQY4O\nDvsxMPBJCOki2UDQRWIAXWJyJIyyyYyMAlkkvTigk4TktQKDhRzLvUU0xSCM+tgiYo9usdpbpRsE\nJJ0VdEPh1uECS/UWNWAkO0qYHqLuVglHK+y96moe/va3mT19GkVK8pUK77jrLsbHx0nncvQbDdL/\noAD0pXzdTptfmGJk587tHDt2FPhxJ4fn1ZmcPAhsKrK/9dWvUu52Gd+ypUkpOXniBEdGRlivNjh9\neo0nnvgKGxs9hodvplwGrxez0VljW26FvaWrmG32CFKDNNw1QplgWhUaIRy+0WaoWWM50hgKJWEQ\n40Q2QqYYIo2OCRRZYI0GWXT2Am1UZkjootMkpMEePApoxAiW8BFoZIhZ27JrGcxQoo1FgrdF3kX0\nKZCiiMUyHTJABgiI8EgwsCigsY4gIkcXg5ktk2FJLeHFffpy02YYMowX91glhSBPTq1TSHLUZRXB\nJtuiCEFWVYEWSdRGVVO4bp3V1QZCdBkbq3DFFW8jikK+9rUnmJjYtNP9S8aRI1AovLGpq/8YLAtu\nuQWeeALuvfdyX83PH1JKHn30GUZGriCOI5aWLqCqGpXKPp555ji33XYzqVfoWCiE4LbbbuXaa69m\nfX0dy7qb2dlZvvu5zzGazfLizCVm17LsMCRLa21imcELHQx1gcOJQ5AIVggpSQUDhT4e6+QwSQES\nlRTrOIwQMo+CzyAqNg5l+tTpU0XiI1jHoESwlfPjAwpN0gjymOgYbOATERCSZowCaXoMoKBgUiOi\nBLSJqUUOM72Y63N5wqhHX3aIlTxqEhNFlxiiwXa1yEA2RbWzRKofQbiCOphHNXT6XouKIsiqNpf8\nDocO3oCWL/D5zz/AmBmyK5vlUD5Pc2WFBz/1Kd7xgQ9wxavUh70W3HTTjezdu4eZmVlgMyTx0UeP\nkyQ1osjixIkTPP74IwwM7EaIHqYZcM01N/OJT3ybQgHuu++uH3P6bTY9/TueffYYy8ujZDJTBEGT\nRCjoepYkMej2q4woaXRpYErwmEeTeUJCYIHNyMofZnMmSAIkAWnqmCSYqNhIHPr0yZOliIKOxCeF\npEOPEjYGCjYRDhARM0bAOhILwQQChxALkzoWFvbWaD0y+MwSYwLprWCIOpsl0gEECgo2MRdki87a\ncYx4DIWAKLZR5AaTZkxWQEeR3Do4iGpHXHv11SwfO4Z0wFEV9u7aTiIlEduZNyw+/+kvcc+hHdyy\nbRuqorDRbPKNz3yGD/ybf8M1N9/MY5/7HAO5HNpW8bdSq8HAAOPj469r/n9hipF9+/YxOnqMhYUz\nDA1NArC+PsvkpMWuXZu20Gq1SndxkSt/xB8vhGBvpcLT3/0uZ5e6dLt5PC+hWLyGXi8mDGP2HryG\nqHGeATtNq3WJIIKO0iPMDjC+6yosy2LnwDiTkx6r3/8udjrN+WqVvutRS0a25E8tChj46NQpEWNs\n7QTnAY0MZ5jY6kHQI0MDgU245arRUdARzGDTYy8OFTZb9cQ02CCmBizhYaK9XBVbWzxKjw4OCgYW\nGg0ifNKiQoYs88zjxSEasICBQ0SXGi4GUuRJ5Dw58liqgh1laDOPQZF0kibwe0i5hG6orK8fQUqB\nqroMDSncdtt9qKqGqmpAienpC9xww/Vv2P1wOfDlL8N9913uq/h73HbbZjT8L0MxEoYhrZZDvz/N\n9PQsUAQiNO0opdKmuP3QoUNor9DeHCCTybxMNw8NDXHu+HHMapUrd+1gqXqavitR8BnQHdrJOpa7\nQj7Z3N+vIVgloYfGGhDj06BKQBeDFhAyjU9IBZMMARFlDHKU6eCTByQz1HGBCSTQZwmLFnWyNGgD\nghZpPCwENio+afqoWKzjk8FlEkGAJAdUNY2FuMdwqYzTzZFddxmS63RlzLDw0dUCQRxj6AmOt0xK\nKdLoLxPJPkW5TtrOk7JTaBrUZcD4vrfw4pGvc/MN+xjb6gFWKZXIplJ876GH2H/gwCsyDz9rlEol\nSqUSvV6Pj33sM5TL12CaKZ555jny+VswjAWmpjKcP38JVd3N0NBV5PN52u0an/3s1/l3/+5/Z2Nj\ng+9//yjPP3+KmRmXTKaCojQRooJpVmh7JwjCi6iKSpw0aEZNTDWDLWwS6ZETCa6MUSgi6ZBwChhm\nM6asQ8QqaSQjBOQZIMbHYRCXOg4KafJIhnGYY5UAhxUaaPioCFS24dNEMgWEKKxhEuHTR6JS2LIM\nJ4TkSbGKQo9lEhQ2uTiAvWzmw0pi5oCdSpeWuIDoBRSESdPbwBRVhtSYi60mhiaYGBzEsyykpuGy\nmXgqkwTLtlGEYKG5jsyUiWsddlYqzCwvc2l+frMRl2Vx9Omnee/730/tzjt5+rHHyEpJKCVqucyv\nfuhDr/v+eF3FiBDiw1LK//G6ruCfCcMw+PCH7+Ppp49y/PiLKIrgne88wI033vDyA8j3ffRXsBdZ\nhsHc7CWKwzeysjJHEARkMmlsG/r9GrbtcM1tt3Ls6W9Q63eohxrW0AGu3X0l+/btoF5fYWbmDM1m\ni2Ynot+LMKOYRcYJ2UdMhnUaNKlSJmQAm0XmCYgBi5geaTxMFOoMEVIij0qdEJcqWQQ9TNII/j/u\n3jRKsrO88/y9d4+4sUdkRu6VmVWqUqlUm0q7kFglaAlrZLcHm56xDeNj+wOGMYf2mfY53TM+TDc+\nbvdpz7HbM4MbmsZuYBi2QVi0MQitaC1ttVdlZeW+Rsa+3P2+8yGCAqEFmkUF/n+KEzci6s26N+I+\n7/P8lwIOI0hMQhIIzIFqJh54hJwhHgRS97UzMUmy+ECPOh10DDL4uNIjpQ0RxVli2cNDZZIUARHz\nxMAqgUySZBKJShQ5QBKFNVpKjbZIIMOAvKZT03SmpzdJpVIUiwe44YY7SSa/R24SQsP3/TfgKrhy\nCEP4/Ofh8cev9Eq+h9tvhw996Eqv4qeHVquFlJJsNvuKY7quEwRtTpzYpFC4hkplnu3tFer1beJ4\ni0ymRLn8JL/6q3ex9/taV57ncfbsWbZWV1EMA6EoPP/Ms8ydOs3C2dPko4iwskNca5Awcwwnc+wp\nwflLfaP3EhFpEoTE9NAQA55InhUUInw0HLK0KJNiFwoWKj367A+FmBwRCxxF8iwX2aSHSgHBNDFX\ns8rqgGkWEZBliBprNAnwMYkQeAR0mUDiAqaiEugaE4bB8VaL53o9UqHkmvIYl9Za6CJBAY8oXsPr\nGmSMLrmUZM3bwCIklVQ4VddZckxip05PlzCT4mB+hKhbp/wD3c1UIoHc2aHRaPxEfID/VjzzzDOc\nO1cll5vHsjQ2N9uUSnvo9SxWV5+lWDyCEDbz80scPXoQ1+3xwgsL/Mt/+b/T6cDU1E1Uq0lcN0uz\nuUQikULT1ghDH8vqoutput1RFNUlly/TCxbQ/HlyUZcwnMSUKgYJJA4agpgaPUICEgiliBNvoyFw\naBBgIRnBIGKdBg1UQrp4QAEVnYBtikQMM0aCDZqUWL6slPEIEBjkCamyhSSBgkaMRoWIApAbXFEG\n/T7NdzVHAf2b+H4h6Vg+qckuFa9OZXMNW9XYn8uhAsvtFvVmE7VUIpvNUtq1iycXH8co7UHKmM1W\nnaphEcmIfSNZHn/+eVrLy4wnk6hCML++zte/9CXuuvtu7njLWzh67Bibm5tYlsX4+PhrynodxyGK\nopfxTl4LP2ln5KPAG1KMACSTSd7xjrfwjne85VWPDw8P42karu9jfZ/KY21nB9XOMTq6ByFMVlb+\njlZrAU3L4DhblMtjZLMZQkVw+92/z6lTp0mljrK5ucWzz34aVc3j+wGl0jDb26D0QlqhiScniFAR\n6GhkUdHpskTENhnSdBH0kEg02licBXZTpo1KF4jQaTHBNuuMMkqbNQxUEijo+CQHhYgJhIQMo5JB\n5TTaQP5lUECQRFAZNBb3Y6Ih2aBKJWyi4JHHH7SOA7ZEQEcGxBRQcAnEDorI0FUcmtEqGSKGVAPC\nNoGMacY54iDHynzEwWOVbjKiAAAgAElEQVRlKpXNl8nv+m6IO8zM3P4zPPNXHt/+dl9O+yOoyN8w\nXH89nD/fd4T9MTjEP1f4m49/nNryMkII8pOTvPO++yiXy5ePCyHIZFK4bpu5uScJwzKt1ihxPI6q\nLrC8vMnY2F3863/9H9i9exe2bTM2luf4ww8i19aIOx3WKxXOr26wK1cmbSYZ6zVZdTpYisGd2RJO\n4LLR2mIksFjwJWcQqGjYWHTR2CZCp0GJNDMkKeKyhuQ0eQw0dBTsgV27j4IYhMd38VnGpEYKwS0Y\npAgJiAjwsRDsoCIRVNkkg42kSoUtIsYGlogOMQ6SbKzixCqGbhI7DpdWmxTbMQveNn7kIfDYosMI\nPkKFq7MpssPDCEXhfK3GeifJ+NA11HsKXc0gn01Q29jiqaceIp210b/PWA763+9ASswfeP5niePH\nn+czn/mvrK0puK5kZ2eRRqNGPj+NEAq9nksqlcQwUlSrW5w8+RQXL27iunlWVzfJZLLE8RyGYZFI\nJCiXZ2i15kkmd2FZGVZWHkXXy+h6g9CtU+94xEISBgoZOUovrqCzQ0AGk2EMbHRGUWlSYwkYx1dG\nWIpbJNgij0TQQZAlwKGFzzgeQ3hoKFwgS8wMKjqrdNBQkNgcJ0AgSRMQouBgEKNjYrNFHZMOE2iU\n6RcdCjAHpOlvRjXApz9IEnFMpdeEiootBDnbZjmKiHUT1ARlq8jF7RUa0UVuuO02MhMTmNcfZXWz\nwbdXzlMY3U1xbIShIUGwkqS2tMTRYvHyb/2E5+F0Opw5c4bDhw+TTqdJJpMsLi7y7DPPkMlm2b17\n92V1Zbvd5ltf/zoLp06hSEl+fJy3/9Ivve55/6HFiBDi5OscfqUjys8IUkouXrzIc8+dwnV9DhzY\nzaFDB1/2JTFNkzfdfTePf/nL7M5kSCeTbDUabAA3v/l25uYa7Nt3FFVVePjhRzFNg1KpxNRUma99\n7ZNIaXHmzBKe57C29hVaLZtWq0OpNIOu+6wu+0SeSeiUcKmhMoKBIERFo0CERY8XGcVlmhQ1Kqwj\nqDCJxz56tBBoDCNxEDSADiUiHEBHwSRApYtLCgUXBYWIkAgdSGFhoLEbwSWaXCAYUKsi2vjMMna5\nayKZJgG06HABB2uQHOlIhyQjZMihoeIpDdrxEmHsksZnSrVIYWKpKSrCI1KnGEvvJ5XU2VrTGJko\n8vd//xne+tZfIY4jGo1FbrpphvGB1OsfKz73Ofhn/+xKr+LlME04dgyefBLe+c4rvZqfDLlqlQOD\n8er6zg5f+OQned+HPvSyHdXQUJmRkSqnT+uEoSQIoFgsoao2m5vLPPnkP1Ctmth2miBo8Zf/7uPk\n/TrjRsC4At1Ol5v0BI36FlW3R1poZBBUjYCOEAwrKsu+x1yjS5IEK2h4jA4KhW3GBt/WkAqrA4m9\nSZISSbaJcKmhoJAfPO/SRKFGHoWLhITkiAlxqSPR6E//RwaZMxKNCJUWU4wSU+AiF1jCR0EliU5u\nUOgo0majEdC10vgiheN0kXEGSQMPhzYhHi6jUcx6VzBfraJNznCpWicIijQNH00FQ7PptFTa7jIO\nNfZfPc03nniSe25/0+Wb0IX1daYPHfqRdrY/DXQ6Hb761UfYu/dtbGw8TCJhMzQ0zcZGk1ptC0Vp\nMjs7S7VaQ0qVZNJnfn6bQuEo29vzJJNlyuVrWV09weSkzdraSVR1BlV1SCQ6tFoVGo1zKMo0mibQ\nzAyd3g4GwyiUULQkUZQdkI0DQooExAjOAx1UsY8o7nvRGKTxsGmzSIY2EWVCfMboK6JqDBNjDOLx\nFAQmgik82gN7f8kMHi4qbQxUoEKGBjEpFHKkgA4CmKTPE9GBg/THNVX6SpsaYMgIG8g6DvU4ZiiT\n4YzrMdd2mSwV2PBcTml5pFLis8++yO984Hf5d3/4hwghOH36LO12l+npCWZmZvhfP/IRjF4PCn0z\ntNMbG5xaXyddKPDxP/1TfvODH+TgoUN88W//FmdlhfTA++fhQoH//n3vI5/P84VPf5pkpXKZd7JZ\nq/GlT3zidc/9j9IZGQbeBdRf5dgTP8L7fyr45je/zUMPnSWdnkLTsly48CLPPXeK97//vS8rSCam\npjB37eErTz6HpcXccsct/A/33IOUkgsXPkurVWDPnsOk03meeeZhXLfC6uoShlFicvJtmGaSVMpl\naekBikWLOC4zNlbmzJmLEGQQCAxh40qBQRrwsHCIcYnpMESLWbKE6OgYjDOMoMsmIyjEVMjSpkIO\niYaJQhcI2cIhHNS6GwSMD36aOkhWicgBKiY9YlL4jKKywhgNUqSQlNhG0GQHA8ksKhoJAppk8ZjC\nYw6DLBa7CNjEJUQoPboySxCnUVHxqdNhizEpEWYeL5LkyeMGLXZl93KptcOhQ29nZeVJKpXHiWPB\ntdfuxvd9/uRP/grbtrjttqNcd91P7sb38wTH6buu/pt/c6VX8krcfjs89tgvfjEy/n3S0rFSidry\nMmdOneLGm2++/Py11+7m/vuf4sCBo9RqDrVagGWl6PWqSJlma6tFuXwzFy+eYGWxgvTGcd0h5nst\ntrUKcbfGKG18JFkkJQJ8obAVOiybCYpmgpofASUqeEgMMqRps8QMCjbDSFqoeOSIeYqYMgE9uoQU\n8egQcokEOUICJMvk6GKQJk8PH0mHOjHDQAsoDcwTv9snLaJwFkmAggFMYBBSJcUCS+zBJImPH0as\n4LEa2kQ+WDImRZsNYgKG0Rljmy5NUccVIZPZMpfmBSYlCum95BImHW+LuLdDMTtGNlnmmsOHuO6m\nf8Jj3/oE8sUX2VUq0Y1j8rt3c9c997xh18HKygpxnCGfH2b//r2cOfMSuj5KIgFzc49w662Hue66\nN/PQQw+ws7PJvn1j1Go5Go1NhoYS9IOGJVGU4bnnTqOqGarV03S7VcKwguuuYRhdEgkLzwsJwxSu\nVHFkB8E2XUbxiUiyG51lbAQRKgEFOgTElFCJMAgQRORJ4aKisEUFnwCLHRw8yujkkPiDeDuAWQQh\nNk2ylAb6nGVGUXGJmMfGIAuYFFAxCfHpK0U9+gVI39pSMA7kkawC24BLn52oxjG6EMx1u4yj4Qc+\nj29u4SmT2NlDlPYcYa12jqXVHd6eTqOqKrfeevPLzsGd997Lt1dXuVivs9FsUqtWecvu3aiKAqkU\nL9x/P48/9BAjjsMN38fPXK1UeOBLX+LWt72NYGODq77v2EihQHtt7XXP/Y9SjDwApKSUL/zgASHE\nIz/C+18TQog/B44Bz0sp/+C1XlepVHjkkZNMTd0yIExCLjfE4uKLvPTSCW688Qagbyn8yU/+f+j6\nBNce+VXa7RrnL65yW7vNzMwM73vfvXzlK99kZeUcUsbcd9+N3H77DXzqU3+HorjMnfk2im5jJUdQ\n1TF8v4Kuhwih4DkeeStNt+eAiFClIGINQR6JAngozJEjoI2HJEVMCg2TAjFbqMS4CFQcsjhEaLTI\n00VlixZbjBOjoNAmxRw+DgENBDqjdFBYxyWNxxgRNVKMkaKFRXuwr7LR2KCLhY+HSkx6QIYbw6VK\nSA+FFFKk6ak9NJEjkjlSwiaOFVQ5Ql3YrMRzTAVNQlLoSHRdoGsmIAmCgNXVOrncAWy7zN/8zROo\nasA73nEvUWTxxS8+w/r6Fvfee/dPcmn8XOHrX4frroOxsSu9klfi9tvhYx+70qv46SNrWVS3Xx6e\nePToEcrlzzM/v4RljeF5VaKoyfBwnlbrIpaVZHn5SbrdGiKcIPAdTCWFjokXaURUcPDJCkEKBVPG\nJIGkjKn7Hk8EIetxAg8LD4lOkhbOQDExREiDmAx9b8wGCg6XyNOjQcQ0MVlSXMBgDYMGWSLKWGio\nBCh0qdJjiJgJ+tTFAgoOoKGjIvuDGOqs00PHJUVAGxWLFQ7SYAmbgJCYNjl0OY5wWzQHGcM+EygI\nAhQ0soxaU9SCOfSWSjKyMCyJKx2224KunySKHLpxBzPTYHL2WhIJm2O3vod0ep1b3nEHmUyG0dHR\nl41lf9boh9tJAPbtO0KpVGZl5RLJZIJez2RqKkmtdppjx8YolVI899xpPA+OHLmJPXtmeeml06yt\nbbO+vkk2W2Jk5CoqlS+Qy6VJJIpUqwaq2qNWO07KPIZUFKRMAg0kwwSBCvi4OIDE7jt24KHjYoDc\nJoVJCYUesINEohNisIZA0sKjgCRNiEL/L/HpG7ynkXRI0k9g9smzzjYaEjnYUoZ0sOjh0iGBgoUx\ncA3xBr5SfcfteQQRGmMoRASsD9y4O3FMU9PoxTEHRYTUDRpikrQYZssP2ZMbRdNCLlxoMD8//zJ+\n1Xdx3fXXc/Laa9mXTLLwjW9woFAg7HapKgo3zc6SsG3+8ktf4gM/kIkxMTTE4vIyi4uLpF7lmin8\nEJftH1qMSCn/p9c59t4f9v7XghDiOsCWUt4hhPg/hRDXSymPv9prV1dXgfzlQuS7yOcnOHFijhtv\nvAEpJfff/y0ymf1ks/1dlm1naTbTfO1rD/LBD/42MzMzfPjDv0OtVkPTNLLZLBcuXODCyZfoXlrF\n2mgiSVBRVXbCHAm7xdRUmXptkdiv0Y5MwtBDk9skUXHZRrKIxEJjgxSNwfyw13dzJD0wb49Q6BBj\nI8hg4ZNAxSdFl0U0kmRokUVllRyQRBDSw8VhgjwpYlxcJF0uUWSbNjpJNEIMfDJs0yI7YKj0AG3w\nCQJBRA1JBkmXNgEJ1cI0HbpeAkUmUaSKJ318VcXWx6n464zGXSxFQ5ohmWyJeq9JIptiaWkFITT2\n7buZc+cuYtvXoighp049z6FD1zM0tJennjrBrbfeSOkKGyn9tPCZz8B7f+wr/WeLW2+F554Dz+uP\nbf6xoOG6HPiB/KNkMsm/+Bcf4KMf/Wtct0M+30HXc6TTSVZXL3DxYgQkCMMUob+FJhRM2SWrxmii\nr1Sp0GVUQkpVaEYxNRmxhUYjtmmh08PGIkWODjY1KnSI0YmRgyjLDCkqpNDp4gMdHFSSPEeWAIOQ\nkO+aEfYtvHu4uMTsBrZZAbIog02IioYgTUiTJIIuHSQGKjOoSBTG8YiRtOnxDpyBti4gJiGbjNFg\niAyL1EnTxKSfltJDpx3ZhFqWnU6IH3joaYud+hLIvRhKijDu0Ao8SlaCdLqA63ZZX79Euz3P/v17\nGR4efkMLEehnien63+M4HRKJFMXiKMXiKAsLz/PLv3wvIyNlHn74O8zNrdFouNx991s4fnyeyclp\ndF3n0KFraDSeoNF4EUXJs7DwOHE8iW3PEMeCKGpjeOvMxC4p/yK90GWDJi1uQBW7ieUyggIxq/SI\n8DGRCCIm6Q9EBEkcJAoKSXrExJfTZHxiplAYRmMPMSukeAIbnTZrBGg4JAfC7gCJiaBIgIakhyAi\noIFJmzY+KSIkCQIkdfrj9745nsAjwRAR8UD8oGOxRY9CrNP2I8qJNCv4rDkeGCbJpIGM4OmnH6ZY\nTDA6Os2FCwuvWozkcjnuuO8+/o9/9a/YnJsja1msKwpmsUi71SKbzWKEIX4Y8oPOX5oQ2LZNN45f\n8bn1Tud1z/2VlPbeBPzD4PG3gFuAVy1G+mqZV/5xYRgghORb33qIxx57jkceOc6hQ3dgmkksq+85\nkM2WWF4+R2vwnyiEeBkrfHl5mdaZ51DbRXLWEK7rMCbBkav4MqaYnqB54Zvk0en568TSGeTDdPFw\nkSQIWSeHP7gcLcZRMfGoskKIoEoXSQYVmywuI0SYxPjAJiYuDRxiTpNDMIaKCdQIGSckQRWDAqBh\n4jPGHFUkFttYuKQxUFC5ihXOo1InoIFOgoAkBWxcqjRZwx9U5pHiMlQcRmsbdHsmbugSawmkkqAT\n7CBJ82zUJSl6uMESXjuFruS55dYbOXXqGd785rvQdYOtrSqmmWdz8yzHn3mcF554hIQRk8lZPH3b\nXu55A9u7Pyvs7MCDD8Kn3jCa9n8b0um+78nzz8Mtt/zw1/+8Yml7m6mBpHRle5tuOs2Ba78Xnug4\nDs8//wIvvnie6ekc1WqV2dlpVleXuXjxcdrtHqnUYYTQaTR6xGQJ4goN1UHGEj120JCcQ9IENqOA\nBrBGApVpIIEC6FSYZZssJjYeKSRrxPSoYjKMTY0cCVx6+KQZAiyaGAiuwkYQ0KPHJi5nsVDQ6PZZ\nWFiYJAlx2SZGINlGMoskJqaFC8Ts4DKCTgcFCCkgySNpI+gQ0wZMJKskqDGKjUObWSJSaAMSfYxN\njw08NEXBkx1SWp6rs8NIv0PHW6bhecSySjE7y/jEbZw9+wLNZptKJebqq/fw6KPrPPbYS7z//fex\n6/va7T9rJBIJ3vOed/K5z30DKUuoqoHn7XDNNSX27NnNn/zJXzA318ay0gwNDVGvb2IYDmtrT6Oq\nQ0gpmJgI2bVLR1EmgQymeZggiFhZOU3QmWNW1jFkEjWOsBSNRJRkjh26coJ+KJ5Hn51hERLQH4C0\nEQhMNi9LDDrExHhkOEDADoIQixw+KhE9hlmmTG7gp+oR0YOBhFcnIKRDnn7SZRcHiWCCNkeJWSdk\na8A46YcvqpQHypoEOhEq24RsoFHBQDJGiE4vjNmmSdd1KCbTIBzU0KNa7+DLDIliiqmpQ5w7d5KX\nXgp597u/N9+t1Wo8+OBjnDw5x/raGpV6j6aWZhOTfMJgMpdn7sQJikNDWKUSZ5eWcLtdut0upWKR\nsZERRCrFsWPHOP3ss1za2GBmZAQhBDvNJptSvu65v5LFSA64NHjcBA681gtnZ2fR9W9frpYB4jii\nXp/H81wuXQopFK7DshwuXmyztfVN7rjjn6DrxiBfIHpFhsry8jJPPHGc//LxjxM2HFQVDCOHaRZx\nnG3GDY9gYor6yQe5u5jnkmhR7zbB7TBHSMgEaVRa5NEYRiWiQx2fHSQeKRIEOGxQocoM0MPAo4g6\nkOQG+GwzRQ+dkAYKNWw8htHJ46DjAxF5oEt9YAwcksIngY5DiwQ6BTRCfHYQKPhoJFklxkOlQEiM\nTYiNYJMWpqiRKSXpxBEdt0MYXk0Y2yS1BBDSiddBJtGS11PYVUI1AwyrwZvffDVTUyk0bYrp6f0A\nJBImp08/y9aGgx5OsssewQs7LM0/w7/93/6M5eUt7r33nb/Q5NbPfhbe/W54FbXpzw3e9Ka+5PgX\nuRgJJid5dG4OAYzs2cOvvfvdl03MPM/jP/2n/4f1dUGxOEU2O0a1epxe+xw3H5nC6WRIJG7Ctkc4\nfvxZhNhC1xN4rkRJDOHrSXZaT5LQNcYDlbKIWJYJKmRxuAqbNA5thmjSF7jraMSDUU1AgQIrVCnR\nwsanRkwFiUEZlRoFIhRsFHQ0IrIkmEOjyy6yWNjoCFTWOD2IyRP4KKzTwsMfaOZ6hDhIhpDk8dlE\nDnwpoDcY4ZwiQQh4GGxiE6ANBgjjQGdgoBijkMDGooUre6QUh5mJIq2mi6WX0KWHDM6QMHOMJHK0\nVxd4emOVkbE3Mzyc4uDBoyQSCVqtEl/+8jf4gz/4nTe0Q7J//34+/OERzp49T6/nMDNzjJmZGf7q\nrz7Od76zTrl8jCiyuHRpm0SiwfR0kV//9dsJw4goipmauoMPfOA8cbwHzzuP63ap1TpACTPqkbV2\n4zhzRFFATNC3QxAuntgmijtAAkmXfidEpS+qrZGmzRQFaizRRiXLOOogfKOf6FtGEqHgIFhliH6H\nDByKTFKlRRKTKgYxCxTxkeTYIKCLis0Os7hs0O+zXIVCHVgkGGw5k9Tp0UZgE+EhWUIBRkkxSo8e\nPqAyxlawiutvc9fkCC/WmyjhNEJYYGoEgYPjrPO1r12k241461tv5MiRg3zyk/8vnjfM6OjtPPQP\nn6G2WSZrJ/Fw0bQUpxe3gBYnNzcJR0f51uOPc10mQzmdZmNzkydOneIDH/sYpmnynve9j2/cfz+P\nnT+PAtjDw9z3nvfwBx/96Gue9ytZjDSB7woSs3zP5u4y/viP//jy49nZWS5depGtrQyKohPHNWZm\nEiwvm4yM7EbTdKamxlhf92m1AjY3F5ic3MfGxjyHDs28zKr25MlTfO5zD5JITLK15ZPXR4jDEMdb\nwI1DdE1FUxX8IOb6yXFumJnhgOfxd99+GNP12CRFFYseOjBKgi5JdBx0QpI4rNIkZguLGkVi6ugs\nozOCwTCSEIc6Y/j0TXUDQoawsFjkEj5XI0gQ0hq4C/gojAAmKut0sLFJorNDPLDKEfTQqZMmYgyN\niBCHChE+CZI0CEjS5VgqjZkr4QRd2qMBC5tncMI9BLJDFFWQAopD4xw+eifj4zo33niU5eWT3Hvv\nAW688Qb+43/8L1Qq65RK45TLaR56aIvIKWDFPjvby6x2KsAuvG6WRx9aZW3tC/zmb979qu3AXwR8\n6lPwZ392pVfx+rjttr7a5w//8Eqv5MfHr/3Wb+E4DlLKVzipnjhxkvX1mF27jgCwtbVMd/kSydYG\n5XwG79ISjcAifdUkw8N7CIISOzsvQdjE9SWRMLHzCWaVFLNCsNZo4XuTJICINC4mKXSqdBgmxEAj\ngY+KTQ8HQYUuBk2yBOyQJIOFhYoxaJJ7A0sqHR1JgxDJMBaJQVNe4NJgHJuYBjEGCVSSwDI7dFBJ\nMYFKGYFDQJIe0wRsozFMTAfJDhodBD1MMiSYpU2XTWqUgRwWkjZtQiKRRtUMYnWTm99yL/b2IuNj\nkvNrbeoXF9EDOFAcQubLzE5McGbhJNX6FpM3FbjuuiOoqsry8jKdZpN2d4Hl5eU3tDsCkM/nX0as\nbDabfPObLzA8fBO23fdCMc009fol1tdruK7Pm9506+XXXnXVXpaWHAxDY2XlBFE0QxSFJEwbTZOY\npk4UuGjCwA1bKEIhmYiQcYGeB1Ec0e/GZ+kPR0YJWKJDhRE1w1LUwiaJpJ+r3LeiaxGTxcInpEZf\n69Kh798UkkOjS4MaaXqk8fCpEBMRs4sOSRwCoDj4tAQqZcBEsIVGTJZ40BURWFg02EbBIoNH3xu2\nhYEphkHG+F4NP5SU0oL11jJSG0aNepw69Ry7dl1HIjFCtWrzF3/xAF7v3zNcvoHrrr8Wx3FptSNG\nCgdxvIv4+ZhTtXXqOx3iuMq7Dh2k0WgwlMlg5HKIbJY909PstW3WFxbgttvIZrO85zd+g263SxiG\nZDKZH1rQXsli5Eng94AvAG/nVfxK/uiP/oj19XUURWF8fBzHcbh06RK+7zM5OclXv/pfOX78HIbR\nRoiYsbEiqZTH5qbDmTPPE8dNJiYs7r77e/rmMAz52tceplw+gqKo6NkxetVz+K0ddqSCopcxkmka\ncZdCsoI9lUfXdZxKBZOYhBJhxAZpQnxGibFxcQgH1ryQpkcCG0mMh8EaJRoUUKixSheQJAmpD1Ip\nuriDWbRGjiQtdmihkUVjBYUWGUoY1OnSxmFroAPooAykvwIFmxwZFukM3EkyWKQGOaAdmgQ0KZGn\nYwyzvNVgXO2SEj7TNqy0L4BI4cUBplaiXNiLoihoWt9Rz7ZLLCysceONN3DvvXfyyU9+geXlOkHQ\nVwJ12vNEqs5GZxtdTJIxU6xv1JjaC4XCQb761Qf5yEf2/MIpbF58EapVeNvbrvRKXh+33Qa///t9\no8Q3eMT/U8VrZVucO7dAJtOPSY7jiPkXH2ZfMoMHiCjimokytbkKjcoSoCCcJXbj4IhV0tksbdmj\nroSUzAhX6CzKBFLY+LKLICCJiYVCDZ2YFrmB7fYOCltY+BSRCKBJg5AiPgoh/aySLt3L4ZR1YiDA\nQ2McD0kTSUyIpIvEJkQCPQRDmGhYVHGxyTCEgzfogDiYpImJsXGIaaLQxSAmYpwCGRIDwa9HhnXm\nuEo1yYuQSHHxjRSxEpGyMtgyxldNdmUy/Mrbb+evv3g/lTWNTHKIZb/J8ws7SNnFTtrEoUccxzz5\n0MNoTo+ErrNdPc/nP/EJ3vfBDzI8/IY5ObwC6+vrJBJD9Hrh5eeiKMJ1Y+bmnuKBByyWlpbobG3Q\nqFRYvLDEzL67OXBgD93uF6jVFokiCycIcVhldHiWVmMDRTZot7t0hIGMU3hBEyl3EMJBymuAWUBH\nsIbKNDVqqFETH5MtXGJiHLLoWAi6qFRRSeNj02GDIVo4QBWJIEdrYBWvYKOzTRETnwCNJhoRDpLS\n4F8ESQcD0MjCgH+Yw6COpEMFDQ+DFhKJg46Ci0FSVjFoQ+zw7GaLdCKPaY4wMZmm67Uw5DWUy0c4\n+9K3aM/3SEnJXGOZat7AqQdMX3MN6dwwnYaHJrKkbYGq5FGiPE40z/UHD/L8U09xzdAQ8/U6B44e\nJZlMEscxj5w/TxzHl3/rbfuVeXKvhStWjEgpXxBCuEKIR4EXXo28+qd/+n/j+xYgSaVC3vveX+Lw\n4X4g2/z8PN/5zkmEyJPP70bKmNXVDcbGkhw6NM6115rceefb2LVr18tugjs7OziOQqmUIggCnHaN\nVquCHxtk2IWIdCqtgCA1TaEEDSVgs91mZ3ubWEriuE8ltQY/TzomLYoIKmQJCekR06aBTgODMbaY\nxUfSn0K2OU9IkogAmxgfA4cUIBF4KPTN4y0EUCNPE58mMQZDQEiTRfKEXEVmICtzaOKxRIoQhQLn\naZMgwAAUfCpodChSpAhtQVf6LAofLW5wrJijkLNIqKOsdWqc76nUduoIM+L66/tGZo7TpljskwlL\npRL/9J/eyblzF1hZcRFsY1tTWFKA9NDUUeK4hRm4bGxukkrlWFkJqdfrb6iD408D//k/w2/9Fvy8\n11ATE2DbcOEC7Nt3pVfz00cyaeH73sBr6CXWLjxHoJsoimDv/jw3HbmGxa1nOL19HC1RJtNeRHca\nJI0CeWuGYdFj0Z2n2XE4YOscLBXpNZNUuz4rbA24WAo2FbJohDjYxMxRJM0Em5gIRgZKt9OssEkJ\nGxWVNkVceoyikcYnps0OEQ26GIP4hSYRNi4+JoIs0BrcPCQmAQptapxCo4VBhMkQAUUUtgB/kHo1\ngsMaMTmSBCAUdBCYZskAACAASURBVFRiPce2n+FJWaMcg4wVYs2nouqkkjnG4ohnKsucSrjsPXAV\nbz5wFS86L/DQ6gKhtourp65hdmyE4+eOc/z5E6ytrJJwXcaGhpCxy8FdRa5OJPj7r3yF3/y937ti\n14BhGIyMFJibaxIEGTRNZ2HhHJVKDcsq4PTyfPk//C3XlpNcd3A/UdHkwUe/xOFb/zvuuOMunnzy\nBI3G06Ryu6lKF5wecdggxGTHshkZOkSrV0H4DTy/TRjmQO5CESpBFCCx8aigkqJORJ69tEjgoQKb\nBFRIY6OzREAeDZU6DVQctimgYAMxEUVMcsScZBpJkTYKAQYREX2Sqo7OMpJw0DUR2Li00aiRRkGg\n0sNlB4UubTRMLPbiY6MiSeCi02AIyWQqyfCuUVbaEWHTZdvrMTK7i4Xzz5L1O0xnUqQTCbphEd/t\nQafD4vnzpNIWXZmitrVIz0uzud0kISXX7N+DnUwSSIkiRN/LqtUimUzihyGGZf3YI70rmk3zenJe\ngFzu6OXI6Ha7zqc//VU+8pHfxrZtHnroKWZnb6FeP47rtrCsDIXCOEtLJzl0SOdXfuV3X1XNYRgG\nUgasra3z7LMv4GyuY4YmnjpDTzHRDYNU5OFZGkIb5bn1FzjhreOtLxJ2u4SAS48ObbosIcmhUKKJ\nPyhSGqTwsTAosUGRmFH0AblVcImALbo0EMyjopNADuKPfLo00XHZIKbOKFskgTTrhEQEKCTQsEnS\nATr4qKRRGSdgjQ4+DgKF/YQYOGwOONkm6UFWTss/z3W2jefaiKhLrdFBWBErUQeiLHrcwHcN8mIX\nixcvkkpZwBZHjtzJwsICD3z+86gDVvTF9U3Stk2l0caSWXwFVGJ6UYchS6JF0cDiO3wFZ+fnHb7f\n54s8+eSVXsmPhttu6/NG/jEWI8eOHeT48fs5e3KJxtmnmfQ9xlWNSqfCS+cT3P22t/Hu2w9Re+I4\njt/CVjfxFEkiMUEUdZGyQ8H36cqIQqhiZlROdDzSwiQje7S5SBfBEBXS9Ngh5jwabYaIseigI3BQ\nBvZTO9jUURADNpaGRo9V1lhHQTCBQZotWqgIJlEp0mUJj0UKOGgodDFpkGITEOhMIEkzhIGLww6b\n1ACbPJMIugRUkXgYLNIFhExTU0zScos8LqpQ2FLAlwZ+ZNKJi0wxxRMXNuk2qqzqPv/XZz9Ly3Fw\no4ji0C6G8wfZabqcvLSKT5LY22D+5FmuLU8wV5tHs3vc8+v3MFYscml5mVarReYKWf1OTU0xOmqi\n61nm5lZpNDpsbzcwjDZvf/tbqa+cZcJIcfbkMq0apFJJZs0OZ1/6IvsOX4/nvUg+P0EudyO6rlKp\nv0Q7rKEaE0xMTuC5KfbtnqLRWOLixfN43iVgcaCAFIBBQESEgsooKgER3mDQngLW6SExyQATxCwx\nhMYOu/G5CkEOiYvCJhodQkwCIuboDkS/UMWiSY6INlehkBxYXlZpsAwIUrTwsPDpEgzYgglianRZ\nRDCDjUGbZSaoE9LDd2POLC4wNlxmvVOh0QvxFp4i7nnkBXRDCyuKyBgaW91NXrpgkC0OMzQ1jqK7\njExnGN1/jKY4QSpY466bD6NrGsK2WW+3CelHNUgpObe+zuE77/zFLEZ+GL5biACk03nq9Qznzp3n\n2LHr2NioUC7fwi23mDz77JPU6yZSQhgu8K53/Y+vKSstFArkchr33/8I6fQEWbWAbyfQexG69DCA\nyNDZrsdcvNhFlRZKPSJ0bZJ0CUmhMYWFSYeYkNP03fpSZNihhCDBBBHgsIOBoIZFiEDHJYPgEjZd\nplikRxGHBB1CklSxkExg4aOyTZqQaSRl1EGlLTmFT4I2Gk0S2PTo0MEhxqeChU6OInkifBxGiNlF\nxHrfVFpASZ8mFhUsVceXJuOKZDmMGfc3ccQGQhM0DZfIj9m4uI2eWOWGwzN86i//kjPHj/O2/fvZ\nf9VVNDodnnvsCUxnB0NVidQaUdTGjSQpvYidyVDKZllZOc+NN06S/iEa8583PPAAXH017N59pVfy\no+G7JNbf/u0rvZKfPqanp7n99qv464/9ew6kRtk2NBy3ys3791CLIs5eukQml+ND/8v/zInjx1l9\n5HnctkLkQ9zbwfMaeL0WGTvNdhwzokp0fZ0tYaLJBP0s6zUSAxVFX0NjDBxGxogwiQjRiNBYR2cD\nnwwh4whsBEV6TNLhLLs4hySkgYlPmZiQmHkCdDbIIogZGfgSrdHDYZoJXAoD+rnAIoFPkU22KWJR\np889WGcGlxCTEgbr1PFFxB6jCAKmrATbocJLvkqKGWxNgVDBcxSyxiiW6nCoWMSIIp7f3OTphR1W\nKltYmoUXBjihx8GpWerho9w82SNlGvixxvLaGuOlEkII5A9RQ/wsoes6v/Ebv8ynP/0Vrrkmw8mT\nWwwPdzh69Aizs/t5/KVvI3e62PZuVNWgWBjDtos0ts/w4Q+/Hykj1tZiTpx4lFZLks+Pomp7MYwy\n9/3yu9nY2OKhh56m0WgRhqvoepkgABhGUgW2gDYxTVz24OAhUEnhAS16GAiG6dEBQpLU2CaLJI9N\nk5gOIVkC8ihcGuizEgiGsAnQaZCjgIlJCgcFiC97xmjkyNBmmh41iqxxNZI1AkqkWMKhiMMKz6GS\nQSHAo8aUplMSKuk4xnYdDF1wIAubzbMY/hBeKKkGAetaTCA2yZkl6p0aHUeHikcm0+bw4SPYdptD\nx3IEGzu8dOYMnVqNWr3Oo2trfZnv5iay02H02mu59U1v+rHP8c91MfKDUNUE7XZ/Vz46OkS1WqVU\nGueuu+6j2dwhjmPa7QxHjx593c8ZHR0ilTpDr7dML2qRUSRd00SLM6RSw2z0Wmh6jnptnnzsUVDy\nuIpBN+6hMIYkgQ3kUNhCp04Ngw3GhIomISTHFhcYxmCIGI2YVSBGRxmkyfSdEjOs0yBiDYUUNntJ\n0KGAQ0yaFA2ywCI6TdKASkQbBY8U0cCKp4dBmZAhosHFHtJCISCghCDCQEGhgyrBiC22vJBhoRMo\nEi2dptPcwQwjdusKG7rO1Ti41TPs9EzaS22Kk1lot5n2PLZPnWJ1Y4NarUax2+GwjDhHj1CLObz7\nAGvNHTba62AXqTuL3DS5i3vv/cWzB/3Up+D977/Sq/jRcdtt8Od/fqVX8bPDzMw077rlCGPJJJ47\nytLcHA3XhTDk6TNnePuv/Rp33XMP1WaLc4+dIe6tkugF5BJ5diIfV9FI9toUp0aJpGQma9Btr0Cs\nMaHBiiuJMKkSU0SjRIDFNpsohBwA7ME+dYMiDVwEPj4eGRQioIFBSIKADkNojJDu9yzwCJCsEHIb\nF3mBGlWy2NjUaQ1sE/teFhGSLjESDRWDKhEBIdvsxicLbNOgQQ6NmPHIJZRN9hSTWHFEKYwZDVSk\n1UNoaXqKy+5ykY1mBO4ShweufZcqO7hBi+r2C1iGSSGZpiALXFxZZW85x5htUUinCaKI48vLTE9M\nkJuYeNUAwzcS4+Pj/PN//rssLCzw2GMaZ8+G7Nt3PVEUUm01SakFQKLr/duarpt4sUGlUkHTNFQ1\nZmZmFsdxSKcthoePcv78MoZh4rouQSBQ1ahvAKlOE0XzxPHT9IWfG/RVNRKVS6iMAjVyFOhgYBBh\nUAM0fFYG3eoOZapk0DEQtNlihQI9/n/u3jxIsqu+8/2cu+fNPSuzKmvvru5W71paVtNqSa2WBAiJ\nRWBsjMEMHgeGeW9msCN4EzMv3gvsF/PPi/fCjrEDvxgmsJmw5RUMGGywMMggI7S21K3uVu9dXXtV\n7tvNm3c9749MtwEbAzK4JX3/qcrMupkn85y6+bvn910GDIMIikyhkUdQoYCHQ5aQLGkaQIxHRB4D\nhRzQQZCgSJsKu+kyzBW2SNEjoMXBUUHbpYtDQKxZ9KOYvpCEmkbBdRFhyJ17pvj2pQ0GA52qMyCb\n8FGNSSYz2zELKolykSPH7qXVusoHPnCc2dlZFEXh47/yK3QuXYJajdRgwF7bpmFZXG61ePjhh3nz\nv9AG+jVVjARBg5mZ2wG4774jfOpTf4lhJEgmM2SzRVZXz3HzzfMUi0Ucx+HixYv0eg5TU5Ns3779\nOnfE92PuvfetBIHPN8NVlOUNnG6brmczCPp0fJdB7EDYIyVKxFE0ktvmSGCjM5ReJRGkkSxjMmAd\nKW0kaSpcYwyTMQxMQKfDBJKLgM3kyB11B4MRPWkYlrUM1DFxgM6IzAbnsNHYjs2Q3OfgE7CJQpcA\njYgJDNIEVJFIoIuPj4IzyrMxgIAMIT0kndBjTPhoekRCj+n6A7oxzKsGVQXQbKYT4wyimHqrTn1p\nC+OoJAoCJLBYrXLy2WdZGB9nemyMtA775+ZZ3Wqy0t4kly5RnIo4uDvP9qNH+aUPf/g1R1zd3IQn\nnhi2aV4r2L8fKhXY2oLvyJd73cA0TTCM6zLx+W3bqFarbNRq3DI+zi986EMoisLeffv4y8JTdNav\nUGJA2xl2+mPNoh+02GjUccwCy12dAfP0tR5F08MMhuTYovQYEyY96ZAGBG0clvHZiaQ18knNMk+f\nVS4woIdPjgSQYkAbSDOGQkyARIwypwR5FLoYJJEjq8IE6qh5amORIUbFJ0TSIULg08TGRaePAvgY\nlHHpU6GCSkZTSJeT6EIwbZr0Y8m65+LqIBSVpu+jCkHX87h1csjXqjkOX19yCaJ9GMokupam2V/C\nYZGcoVIqZmmrKm6jga6qVDsdrsUxP/POd96oqf8uGIbB7t27KRQKXLv2hwwGfSzLxijMsLmyzrhh\nk81OEccx15pbpKduwnVdKpU6tdo4k5NDxU23u8na2lnm5kLOn/8GTzyxSBSl6PeX0LQpPG+DODaA\nOYaFyAGGMt8OIT6SLTIoODSAGgY+Ov2R+nEShR45mkxRJiYcib9V0tTo4SNIE+NjouMTYWHRJiCJ\nNzqPG0j0kdS7jYeHoIeBRTwK0lMwMYhI4KGgEqPSo4OOz6RmMBuHJKVBJpNhpd3mUrvNpKbh1Wvs\nSgT0oya5cEAfm9W+zrV+n9LCAkeO3U8+n8d1O6yvb7Fv3z5eOHGCg+Uyqm2zfOoUU/k8Xr/PS8vL\nOBcu8LlPfYq9+/YxOzv7iuf2VV2MrKycZ2JiG3Ecsbl5hR07Mmzfvh2AHTt28Au/8CY++9kvs7Tk\nkkwmOHJkP29+8/2srKzwP//n5/G8DIpiEUWn2bkzy/ve925M02THjhmuXFlkbm4fb333v+dvvvQH\nDE5+i0gushWsEakSQ1lAjSeR0iWIfaSUyGGANDEqCdUklAGGJjAZICKLVuSRIEaik8XCw8bFAWxU\nImximvg47EIlN7oaChDEIzNghQ5dmhTwMGnSAybJYxEgGEaSD70d+6wBLsO46TYCG40DhIQIYjQu\nYlPBJEFIjxQSn4g+K+zExRaQS+VY6fdpRSGXhUo5lOzUc+iaMfRBGbjMZad47rmTTOyc4/TiIjcn\nEhzSNLbrOlvtNjU1ZGpMJ5Ge4sn1JdLbxrj14GHufOMbuef48ddcIQJDx9V3vhP+lbLBfixQ1aHP\nyLe/Dd/j0vy6wNzcHDKXY6s5jLlXVZXS+DjX+n3e9Mgj19fZ/v37mN5WJtHdR9FI4HsOU1HE5asn\nafUM/qoZYKlpDGFiaRqlXIZutEXorTApbISnsR5HhJhE5LCwSLKGj49JEp0iCm1gjZtwOccifdLE\nQJKYLgYhEfYoFkLFZsBQQSeok0CiUyJiCTHSYtTJkidARxv5mzhUAJcdCDKE1EhRYRIHDxVdibkl\nm2bTcSgUizi1Gj3XJXBdmoGPK8YJ9KH/8rlWFyyPqXyKWrXO585coOfNk9fGGCjxkEKvz9Lrr7A7\n3cYiw5333cfG+jqnr1zh9uPH+cWPfORfLSjvh0WpVOJnf/Z+Pve5x4miFONTJV7a2EBNSp66cp5K\nz8UozrFtXKHZbDM9fQtx3KfRWEIIi8Ggy2Dg86u/+tM8+eQL7No1xtpakyiaQ0oTz1sCdjNskY0D\nM4A3EhkMz+SwSYg/Ckr0EOxAx0Gwk5gzTBCi4hJf9+WFPC4VYmImR426mCwmOiERIT46a/SZREHi\nEtG+LgHWsAlR8BAskiCPRkAVnZgDqMQEdCyDy0qCbXEEQuBoKknDwGk0KAFJw8DqOIR+jCsSGHaC\nrTgiziS58+1vZ9v27dez3sLQI5kcyuwb1Sppw2B1c5PtExNUNjbwm00mDYOBYeBtbfE/fvM3+div\n/Rq5XO4Vzemruhg5enScF198AVVVefObD3DnnW9AVYdy006nw7PPnsTzNHQ9jabFTE1NoGkaf/RH\nXyKR2MPExD+oNy5dOsVTTz3D8ePHuO22W3jqqVMsLb2M7/uUZnew1d4kRZKHH34fX/nyoyxejOlL\niRNBjix9qqSJcdkEpomESoTPBhEyblGOE6yhU0WQGXGjAwwaKCToYKDRBdYwCNmGMios/r6JEwMO\ndTzGUCghqLGBTZYUSVR0Auo0gQFjRIR4mNTpUqSOBA6iYKDTJKKDSp6ACpJnMUnREDpStkjQo6la\neFqGJcehXJim5IdUxXCRh90GMyJiIAQVKcg3+1xodLm0vs6YohCGIWEcoysKpThmTVW5943HUIFy\nez+/8vGPY5rmyDX3tQcphy2a3/mdGz2SHx1/zxt5PRYjmqbxrg98gM//wR+wePUqnWqV1XabXYcP\nf5epnqqqbNtW5KkvnSXSMyiKpN/vE/YTRKGFL5IkrF0gmmTVmH6nh0iM42pbDAZ9CnGIQ4TCNgQ1\nJAUEGgq7EdRRqGORRgEcBAkKmMSELGGiECMZ0EbHRtCgTURAGlhGISLJOBFVYuoM8MmSpk/IJXqj\nNJOABjptJtAZwyJCYZwqPQI8MvgYErqDARfCkP6FyyTiiFbgUQlj1iWEwSX6ShE1tQuhRCQEfOX0\nVY5kMlzrChJWmdCTSGIms1mEIqiKWUJRwQtDNppNGorC3L338nO/+Is/kjzzXwOVSoUXXzxNvd7m\n+PHbyGbTCHE7X/jCGF/5ysv0BjohAmpttMQaTz/9ApnMrdxzzyQXLlzkxImzSKkjRJYvfOGrJJNZ\njh9/G5/+9KcwzRk6nReJ4w7Dr8c+YI9+WqP7fGxsMqiEBET4SNJ4bDEgABqYmITYBLTxEQxJrgGQ\nQqAjUYlIU6WJjoJOTIEmAyJqGATU0XEI6NMlg0eCMdYYOrl26JAnoodFQAIFW0hcy2DvzAJOq04c\neghg5+6dBEKSrDVoBH2abkDGcynk8nihz6adZld5G2e6HnPzs5imiZSS9fUVrl19hq21BEtLZcbG\nx1nxfVAU+r0ebrNJKZnkquOQTSTwVRXb93nhuee4/01vekXz+qr+xnjooTfx0EP/+I1JKXn00T+n\nWk0xPX0E3w+IY5/f+cQfsa38Gc6e3WDvoRKpVP76FVO5vJNnnjnD8ePHSKfTvPe9b+PjH/8Nrl4N\nMM0smdQ8y1ee5TOf/r/ohxaOWyMMJ2kSEysrpKWLwGKNNoINlDhDQJ9sNGCfkUZoBrbvcTl2Rp3E\nLjY9Igr0RhXtBml88ihsEVMgpkPMRTTWkECbBBo3AVUiJDEH2KRJBxWNLWbwSaGjISkQUiFmi2VC\ntqGhjK60VEwCNHqYRNyHg1BDfGlQo09F2Ows7KQTBgy8TfC6KHqalDmD4uusxn0GskcY+IT5W/H1\nDGOGRj+uklFt4qRGs91my/cpl8vcZBgMgoB+EPCG++9/1Z24flScODFM6b3nnhs9kh8dd90F/+W/\n3OhR/ORQLpd574c+xP/3G7+B1DSO7tkD3S5/8Fu/xds/+EFmZmb4vd/7E9rtErff9/N0zj5Hb6OG\njEC1yjT9LVKJBSRZ/KBNY9AlIwR9v4MrA66FAxJoaAgiukgEAxbpU0CjjsslkuSIaOMTsYpKzCwx\nDSJMFAT7FZvLcZsKEh0LcEZcAp+YGZo4wDp5YnYjqRIRkWGLDA4xEcHIWXmYidPBIkEEqNgopNCx\nkNRdH0XJcjkaJxx0iUKfopLgWHE77X6Xmi9Z9C7ywLv/HcFA4W++/HncbgXVtpA+uNLAkApuv4Np\nWqhql32HDnDrm99MZvduds/Osnv3bnRdv7GT/j04f/48f/iHj+H7KZq1dRqbVymULD72v/8q09NF\ngqCJouTI5XJkMrMoisezz57mllsmsKwsly6tMTGxH9NM0GicwzBSPP30V5ma8uh0Ful0qmjaDLAE\nXGbYmhHAwqjUHPq9WChExDQI0MjhsQsoolAlpoVLhgo201hIegwt/Mdo08BkgpgNIgxiBEt0gRoJ\nGmjAfiRtYtIwYoDATcSYI2cpQZ4mARvYJGkziaCFZMKy0KMOlhwQGhoyn6UZBVy+dJWUVGml8mgh\nbMWCSr1Gw1Rxc0Um5/dyqxGxvPwEllXm6uVLdNZf5k23bCM4c4YvnjjBtsOHGWSzkE6zvrxMQkoq\ngwFNTWMhmeRSGPJT8/OsXLoEr8di5PthdXWVlZUenmfx/PNPEIaSxsYJpmnRn7KYcFXWn/4rGtsP\nsP/W4wghUBSVMPwHw5yTJ8+yfftd3HnnAs9++9uEWwpKcjft3gUmadGNq2jmTgzFRNUzDIIuNa+K\nZh9gMieQziUmuk1ysU0Yq4QyYsy06bkubbosozFBnwSLIwqaNiK2lVBoE3CNoensOguYpMmwjhxJ\nBYfh0R5ZQNIhYA6FFAYWIRF9HAxMfJK0cKkQsIWCgkGPJBE9JAU8VCXGiwe4MkBTFDQRoSYMTCek\n53t45gSeDahZ2qGPH6SoOD3M1BQ3H3of1Y3n0PQauIKNQcjsRJ6f//CHuXDyJHEQsNHtotfrTO7b\nx9FjxwBoNps0m02y2exrzlvk937vteEt8k/h8GE4fRr6ffgeE9PXDZ76u79jQVFYuPVWVqtVXM+j\nJCVf+bM/4+iDD7K5Kclm84i5m1jbXOPy0jJ+twaGh6tMoise/UEVLRZ0KdCTAWFUQ1MlbXbxEg1K\n+BioeOhsYBExhY5OgEWdHlnW0dCYxqDBGhUc0hTp4vJCPMCnjxgVEwU8QCMiR8hFYjocpMduhuFn\nHk1qpElRwEEnIsRggwwWAwwC0vSosp0EEQX6hDiiTULmmRUaV8IiLlOMi1VyhGy2+2i6SS5RYkHz\n2Vzf5Lbb7+bgHe8kii4yUcrxjSdeYHzibpxWmzDso+sNMok2t7/5/XzwIx/5vuZzNxphGPK5z30N\nXZ9l6dRXmYgjZhNZNq6t8d/+z19j3VOZnDxGobD9+jFxHLG4uMhgsMhLLzlImULXdRqNa1iWg5Rp\ner08V640gCmEKCKlTzK5HcdpIOUY0ENQJybEYIBBH4cNmkSEFBBMMAw3tNDIMkyWabGFgWSVPBGQ\no0uTNjlylIjo0eEUKn2SSALa17NnOsA2hvbkdSIiukxgEaCPdtMlOUyq9IdEZ6HgqpJACJKJBIbr\nsplMUk6ncZw+qmFT9QZYdpo40ljzJF7cQzFMbrrjrdxx9CFWV5/hl3/5HVy5cgWl9hwP3v1mEqN2\nzXwc88yzz3Lsve/lbKnEU1euUFteZnZsjNlikUu+z+HDh/GDgHSh8Irn9zVZjPR6PVZW6tTrIbnc\nNrqdNcZ8H1vaeD2XsVwOKzHGhWsv05zfS6FQplJZ4siRPdef48UXzzM+fpRqtYZXrZIzDEwtSy63\nwN6SRrddo6Z2EIyjqwaqaeJo2wniFANf4gdF0rKDI2JCICkEXuCSQZCmQx9YwkJjkgAFkwEmkpDT\ngIqFBzRQGEMnRMWiADTpo5MlYhiJpVBE5ykMeiObNY0xUkSkcPFQaZNngw7nsJkGIkxCQtrkaeMi\n0WWMiqQoDJxowOMrL5JXFJQIrnXr9M0D2IkpdBucqIowBoyPTwFNjh47ztzcFCsr57j61F9RnCky\nPT1NoVDg2TNnGMtmedcv/zI7d+4kjmO++Od/ztUXXiClKPTimNmDB3nbu951vQ/5aka7PbRVP3Pm\nRo/klcG24cABeO45uPfeGz2aHz+klJx7/nn2p1J88etfx3QcLCHoSElF15GpPFfOniQVPI3wXBoX\nTzItBdKcxLINNhyHVb+F45vklR2YmgKKg6tGDEILRJq2GKMVr6KhA/NEDFBoEXMNkzWmcJhEIUKn\nQReFKil8JiiwHZuzeGgsYDKGSRuDPjZLBFyjAowBeZSRa6sgS4jDGg0a9Ehh4TJNE9hGTB8fgywe\neWIsfBQEUSwxFIs+GgmpImMfW5ooqo0nPYSqkUumEGqI06xgGDoQY1lpjh3/GQwrxVNPPYPUVCK9\nS2E2wcf+t/+Dt73tba/q9urm5iauq1FZPsWcUMhn8jiOQylVottYorJVpbRw7LuOURQVRcnywAOH\neeyxb7O+vs7Fiz62nWJ8vMzGxllmZ4/Sbp9ieVkjjhMMBhIhEkjZANbQ6KGwxDDcUNBDAOOEzAEh\nkhaMUtlDasARYIkYh00UKlwDemjchE4BiPBpMk6bBVwGpOkh2YGCNfJZnQB8ho2hYfZRgIGBgwQE\nuqogoz5NJLfbBpO2hWuaXHYcrLk5/EqFpWoVp9OhFypYqRyTQcxWu4OUeTTyrPV9lJfOYpl5Dt6c\nZG5ujkvnznHz5OT1QgRAVRQmTJPa5ibv+8Vf5OF3vpP/97/+V1KtFlPFIvOTk+iaxvNrazxy5Aiv\nFK/elffPIJ1Os7R0ienpR1BVDbe9yqSRRA0DXLfBvfce5sSJC5j9iKXFM/R6VQoFj6NHH6bRaGCa\nJrquEccR7VYTW1UJ/QBQEcTYdpKpwhSVXhPbKKNrIZg6bgeUsEFaJtD1HLpVJBV06QZNDJEkBNqi\nw5QMiQlYBTqUMLHIERDSwUchZpOdSGJUzhMSMiBJDhWXkCZdPPqYCHwEWxQpENBBJ4GBgYeDjmQo\n/jJxEYRsjER/FkKEjMkG05pgm6KyFASUibEJWCTipjjGVtN0VQ0tSqBIFz/WyOay7Dh4iK3KEyws\nZHjggXuuQiResAAAIABJREFUm5Xt2XMYp1enqm7w1MoKoZRse+ABPvL2t19vzTz+1a9SO3GCu+fn\nr/sSnDl9mq/bNg+/4x03arn80Pj0p+HBB+E1nOt3nTfyeixGhBAgBE8+/zyzYcj4d+y6Pb64yNN/\n+zUyjYD98/t5efkct1kJGo0lOhEkRZpiELPkBShMEourhEqEqijkU3tp9Nr4gYKISghSRKwgh/oz\nYprY1MjSZ/+IKt4hokuLLCEzDPleF+gyYJYyBQI8kugkKVAjAK6ywFCFZxCTIqaCQMUiRQobjSQW\n0KVMRIVVPIKRHVqPLhUKCCIG6KpGP47pSYUwTqPi41IlGetkkhkkXbzIIzIEY5PTFItFXHeJRMKk\n3W5z5Mhb2LPnEKdPP8bP/uz9PPTQQ68JU0JFUQhDH6e6ypRmc/bsRaJIRcqIINgiayvU65dIpQoo\nypBb2Os1SSZDDh06xOrqJidPutx22xEMI0m9fpVKpYmUm4ShpFAoIcQEtVoFGEdGGYx4kaLSpBAr\naDEsEyPZSURMjTaQQFUXgCWiqEJECnBR8FExSdPFJoVHDZ8AnzQuASU6HGCAgsIGIW3AJEYHSgz3\nzMsMWSYdIEWTHhEpEkgkHdlj2oZ5I8VGFLEZS6JBTF/6yGCDUrvB4elp9EKBrWaXp50WL7oRiSAk\nFj1aIk06exfddo/Lp/+Cj/7Kb/3QZmW5XI5f+c//mb/44z+mV61yoV6nr6oce/e72bZt2yue3xtW\njAghHgJ+E6hJKX+kDr1t20xMpGm1LpLL7UAoGoNBh7Thk83mGS+VuO++DN88cRJ1KuLBt+xDVVU+\n+ck/pNeTSBkgZY92+wyWVaIWx2QSFoNglVTSp5Qb41q2xHRKoed3GMsmObdUJQo9imLYMAl8HxcT\nTemSFE2ENqDuu5QEZIhoyjQp0qPU3kl0THQmSNDHoMUOFK7ioNOjTUyODhYaOVwCOlQBlQQJDEyK\n1FkmjQY4lJHECDooTGJRoYdglpgNptAZ17oUcikq/T6eYZDudChoGhuKgjqAXYrBQNHxlSQytlFj\nn02tT6a0HT9oksslGB9Psr5+nsnJXSiKyubmIrl8xD13vglF09h/8OB3hWcFQcCZZ57hzpmZ64ta\nCMHemRm+/dxz3PemN71qt38B4hg+8Qn4/d+/0SP5l+Guu+BTn7rRo/jJYXrXLi49/ji3fcfacwYD\nJnI51jfX2ZGfZuD1aTYrTPVbzCVtrvXa6PgY0iYhBL42TsFMMJZNs96tY5vT9IMYqXTAh1huJ4hc\nJCGCFDZlApZIsIiJiYNPnT4TwB6GgfOSmMsEvEQLwTjmdYGmwCZDF40ZQlzgBQwMMqiouHgYhHQJ\nmEWSxEUVCiU5QLCEZAWJwEAlgaBAyErk0cBkS01jq0V0BareNcy4iuknSGVtBqLC8iBmu1Pjz/7o\nNzDCGmrb4ut/sYSVK7H34Dz/4T+8jyNH3nBjJvIVoFwuk89rnHd7XK01sMwChqFSrS6haQkq9Sp2\nao1m8xKKYiNlhKK0ue++fUxMTLCx0aFcTuG6TaRUURQNKV2azXV03aTdXgEy+L7EMDxyuTHc1mnK\nukDxAX2MNBoyGqMtXDQRkUiMI0Qep3t5REwdZrmr+ORpM0cKgYWBRQ6fS6xjoTCPhoVCl5AaPg4K\nF4iYAwyGbJU0wwgRGxgQk6SFTRsHha0YVGExME1EILHsMuXMOF3X5fz6BbbCJLJlYmohGSGwem22\nywy2PknOtOnELoveWWZnbuWm2TS9bheAXXv38sVvfpP5OEYd9aqjOGbT8ziyd+/1uRgfH+dDH/0o\nq6urRFHE5OTkv3j3+0YH5d0CfP1HPTCVSrF//000myZLSycJlTodvcO+uZswtB66YRALQX7XAh/8\n6P9Cu93mk5/8AuPjtzA7myGOI5aXz1GpPEexeBObbgNvEGAmVpgwbc6eeYmVfo8WGVJ2m2JBoG9l\nMZwacVzHixMINAahQdMK6QiNbuiyTYObTZMVz0MLLCSSIoI6bVxMTExiuqQZUMGngkOZARXKLNLH\nJCJWBF6skcEnxiMgS4NFIrKsY+KPIqRVdAwMksTsQuMcLaQQzFkppJ6gEzaQlsWLvo+tKHQTCRp9\nlzmhoqBiyBhNU0gZGbx+lW7rOc6ePo+pxlgqeLWAg7c36bQvUBgrEjqblIkZnDqFF4Z8/plneON7\n3sOBgwcBGAwGKFGE/j3bvJqqoo0efzUXI1/5CuRyQ3nsaxl33QW/9EvD4uq1yHv5Qbjtjjv4xqOP\nstxokNQ0vCiiryjsu/VWzj7xBAcOLHDy5Dm6nQqh12MskWAqXcTFIhEVWO1UyJQmyVp5zDgg6LXo\nOB3Q+mTtJJ1WBz8IAAeLARbTSDT6CDQKDHAQgAscIkZjWIwkUJgk5Bo9anQZw0CijKLvfCwitlA4\nSxaP7agY6IQj2e8WO3DJ0GMSj7SUnBq9xjgxNwuBiqSiqCzGkpqUrIoBYRzh+VtAiKMmqWfT9IIN\ntpXKNDo9ZjQF88LX0Ad9yjt3cv+xu0kmElze2KB4U+k1VYjAcGfk/e9/hG/97ddZba4xnVWp19dR\nVYFqTyC1Iorik0qtMD6+gKpG7Nw5x/vf/y4cx0HT0uzYMclf//WXaTTaaBpYVowQZfr9NOXyIba2\nzhHHAUJkCKIzmMmQji+Z1AtE0iMI+8TCIdAsdDR8v0scd4EtVHwENWw0InzG8dFwYRhfh0KfBRRa\nI8lCm5A2CXzShHi0aeASYzLcQbvGcGdEMmzXmCM9V0KopKSgO/CoeB7jRpqc5lCrLXGq2yOKpila\nZUQgsc0ELweXGadGJIc7gcmETtFKIwYd6q0KCv/A85ibm2PPsWM888QTTIyKi03PY++xY9/lIbK1\ntcU3H3uM5YsX0XSdg0eOcPfx4/+iguRGBuW1gFfkY28YBg88cJgvf/klHnjgbei6wfmXn+Lk81/h\nzt1TnF1Zoamq3P/ud5PP5/nSl75KOr2DZHKYq6AoKtu2HUDKNm996yHuuGOKk9/+O0x3Jy9++ylq\nToCaznFTRmLqBeqxIDc2QaMRI6JrSPqARl4ZMGUotJNFcp7HrONgC0FCCPoiJJYJqtTxyeEMTycY\nVDFo0EFhGpVpFM5TZZU820o7SRoCVTTpNNosuQYd6QEGJgY9GsxjIxDU0AkxUXGYwEBlQFvqtOIu\nc3qJajxgdt822ktLyF6POyYmOLe8Sj0akFGG3H7NShMJyVakgjLHTeWD+J0u/WCZQrBBqllj++Q4\n6bkc4ZU6h7b/AzFsbjDga5/9LAs7dmDbNqlUCjOXo9XrkfsOT4Ke6yJs+4ZlWvyw+O3fho9+9LWd\negtDw7Px8SGRdZQp+brCwsICe+68k7LrEvb75JNJJicnuby6St1xuHDiBGO6Tt4StH2F7YUs0jDA\nU/BJUbQTuOmIZDpHs9HDVUDVNygUBJZZoN9dwXdXEGwQY+NSQQFUPNqY1HHIEiORmKgMiEaPS3QE\nCTyq+AwwSKCxiYdHhSwabSaJGKfITnwCPOqo9EgyhuQqJcMmiAI6sU+AQl2mkWQ4icSkx5T0MGRE\nAhiXAwytwmK4RaiMoWkLSDPiwZ+7F81rkbh0icOzs5w4c4apQo5Kvc63n3qK9zzyCHfl83zryhXa\n7fYNd1XdGLk5ZzIZZr5jV/X7YXp6ml/+X/8tv/3//C4btU0UQ4V0iU0rw1T5FiYmAsbGYt7znuOU\ny2VqtTqPPvp5NjdrfO1rj7O4GGAYU2SzB1GUBO32ORznGWx7H6a5HcsK6fcrgIrn9Zkay9HtGjSd\nPgU9QugRA8VFF3ncvouMXRK2hRoKJlyHTQI8ioToVOkhaZGnj8UEfcCnxYCYAQpbjJOlyAALRQmp\nxQla1Mhhj/ykHLI4ZID96OiqhhtJdEXjsozYiiMOmxp9YwwjWcBwOwykjZ6YJZfK0+5vYgYRMsqx\nRZKcDql0Etu2kXGM4g3Ycs5z6eoYLz3/PDt37aJYLPLGt7yF3fv3c+ncOYQQHNmzh9nZ2etz02g0\n+NNPfpI5ReH47Cx+GHLxiSf43MYG7/3gB1+f2TT/HO6++yiKovCNbzyP50kmJjUe/vWPUSyOoaoq\nCwsL178ANzcbpNP/mAigaWnGxsa4++67+ZmfeRe/99//O51mB+dqjUrLxhnkySZVFjdfIm6dZr9S\nIhQhUVxFU4sYcTCkldpJ9k9PU7lyZRgcFEXYUZ+rkU6faSLmEWSJcBhwkU0C5inSJsahS0gf8Dhf\nfZmCHjFtmdSFoCuLROwGTAYkifkGm1xDZw6bCQQxkGUdly6baMxyKfJYaa0TGxahO85y0ET028iV\nFTJC0EbBjHzaqMRBkqq3QTPMMD+7QK/bQhcOu0oFLE2l32wxnUzypS98gZ/7Hq2rbVmkw5Br166x\nb98+hBAce8tb+JtHH2VXEDCWydDq9Thfr3P8ve+97g/znXAch263SyaTwb6B8o9z5+DUKfjiF2/Y\nEH6seOAB+OpXX5/FiK7rvPGnf5qv/fEfM1cqkbFtFisVHj9zhvsOHaKzusqYrlPO5/nrlRonnQ12\nTRSRqqCvZSlN3Uxm+zQvvvg0rbZPzBoiNnDdHSQSHgP/KpIEJjswmKFPiI+DAdSpkGZoCBigsUFE\nWijoUjIYUV27QJ8KESZtYlRcdEpUSJAiDaRGcWuCkCwxAWkEQrVJW1m8IGLNc+kYM6S1eXzXIYhj\nmtJjQ15hP4IxTCpEyNBlt1Doixr1sEmvpfKGN/wCf/foo9w+MYEEZBRhWRb5MKRVr7NRr7OtXMZQ\nFFzXveHFyCc+8VmESCOlw+xsive//6d/YI7VwYMH+Km77+ellyp4Ax0zmWcmNUGrdZlyeQ5FCbEs\ni6tXr/HlL5+iVNpDrSa5fDmF67bI5Qq02018/wJxDFKmyedLNJtX0PUypdJBms02Iu5jKBHluTQ9\nR0V4A1r9HjoxbWcFgUk61cVKRPjtBlvoRMwRYY0a6SZ9lplggEMNgzwONj2SbDFAo0QbCw8TK/bQ\nsYnYhzJqEPYJqLHKFA0UIrZFCqqqs4WkpmrMpSxUVcXzfHwlhV0uMW/Datui7jlM5fOYCRMlDKkP\ndO7cXqLR7bHccYjdgKWoQTpr8fZjxyi2Wvzp7/4u//Y//kds22Zubo65ubl/8vM/8cwzjEcRs+Vh\nkrup6xycn+fpS5dYWVn5vsf9IPzEixEhxATwJ99z96aU8ud/0LG//uu/fv3348ePc/z48eu3FUXh\n7ruPcuTIYVzXxbbtf/ILD2BmZoLFxTql0sx33R/HXfL5PDA0S1peXOTc0oDlLRtVvQlV0Wn2Wthu\nn0ykMplP4idydLsVNqINXMPC1BPcfettGB50Wm1ObW6QBlAjOpGgTx6LBOCNiKYLhPTwyaCRZmuk\nqhkDQhy0OKDqDyCUKNjYIqAvHWJq6Cj0yJMjj05IjpgO0CWBR5IMLilN0I4SyMQsh9/089wWRnz5\nzz7N4uAis6KP48WciCSxopFQWnTtJKnkBHvnElSWq8znsyQNg6YbMfDqQ1l0FOENBv/oc1W+Jzxr\n3/79mB/6EE89/jjn19Yolsu85R3vYPcoSnZzc5Nms4lt25w+fY5nnz3PcAPS5a67bv5By+Enhk98\nAj78YXgNCH5+KDz44NC07T/9pxs9kp8MDhw8SC6f5+Szz7JZq6GMj3Or63Js927WFhb4ypNPcbKR\nRB87SiaRIDE3TrO7RBT0cHXJoLLB+PhdDAbPMzn5NsbHt3HlyjlWFk+gq5MookFCTiBRSQFdBApj\nuKyxKGziyUl61cucCvsc1jTiIKCOwWWgQgLIkGWATohPkh55YvL0CBB0CMiPjK40Bqis4zIXOWw5\nDt1YUKWAaS0wVt7HpWuX2Bg0MYAk49haCy90KRCTI8STJtVIRSdL3434+Md+jXHdJy6OcdPUFE4Y\ncmZ5HSEFFU3Q6XTo53KEhvGqkN3Pzx+9/vva2iW++MXHeP/7f+YHHDPP/v1lTp26imntxLIytFqL\nZDIe09MLbGycJY5jHn/8BHNzb6DVanPtWgPD2IaqxnjeFaQsEQQOk5M34TgauVyRIMgjhE+5nEeR\nTaQXUEhPQXCeyYTghbUKIp4iY6m4YR1dlURBknY4QFBEVUqocQ6LkBwRAZKYMh5tCnRZo0cFHR8H\nD4sEZQJUNCLS+PgUsEmTo0GEZBMdj0nW6dMCLhCTjQJ83SSbz3FbwcZQFBQvxhpLMTO/g8snz1Kc\n3EW7uYGpSBqDAXFmGFbQCCMSts1Kd4O23wRD45ZdeymXSkwXi3SWlzl75gx3HD78z37+G9euMf1P\nFLFpoF6vv3qLESnlFnDfKzn2O4uR7wdN075vJX327FmefvxxFi9d4qVLLfYcfJCdO/cQhgFra+fZ\nu3eS8qi6A7i4WOHiSp8g3kZSzeC4IX23zkyokk9mGPhrpBJFLMMgKSfYsrPESYVqO8vTLy/i9ecJ\nQ8GE3Sb2u7R8CxUb8BBCoEtJiE1Mmjo60EFhHOiRZJ0pbGyZouMPuCJ9dDQyWopmsEUXSZIJdDQ8\nSmziUqdPlhiQWCSYEQp5TTChFPENj3rtGqZR4PDe21mr5lhc/AZG9ibGzBk8T6Gd0BgvBvSdJLqu\nUsqmkXHM5XqbtfYmqVSbOxtNMuUym90us5OT1z+rge/TVpTvIrHC0KZ/x/dE3Xqexxc/8xk2zp0j\nLQTPX7nGSjvH/W9+D7adJIpCvvGNUz/kqvjxotUaZtCcPXtDXv4ngvvvhw984PXtNzIzM8PMzPDi\n4syZMzx/4QIA47kcsZrjnkP34AcR52pVZGaMhJFmotjm6N0H+NM//SZjYyUsSyMcmKxcXcYy0qS0\niLHSHi5efZFELJC4SCkYoBDSRhNZkmYBI59HeA36ffi61ydCIDHQSZDHIT+6NIhHDZ0t6mxiYlDE\nZAWFJn1y+Ag8IhQcYiVPWwoi4TNA0g8U/LaD40lisqQQuMScDqtkUChhkEDwImkEB7AVhYS8QKFr\nkTElWbvPmTMv05Q24yQx4ogGGi+ceJlN3+fBf/NvXnWGZlNTOzl37lt0u91/dnfk7JkzNFevkOUC\n5y49SXnhp9i37zYWFu7GdXvYtkc6nSaOE+i6wdZWFU3LYBgN4rhAHG8ipU0y+VOoaoBlFUgmu1Qq\nAapqkEyGaOoKP7XvJtxWRLWdZPfCJC9cWEGGTRx1nqw1QxxbdLwaTnyKhDmJoqTw4pAUEh0DZZRW\ncxWDDt6oGAWfcTRSeEgCfHIj0wdBFjnKLJKE+CiUyGJSZlxotGWfNUKmEhE/f2gfz1+8yN5ikfmi\nRV9zWbryHH7Ywhqb4J77HuTCqdMMGlV0WiTSk7zcMwlaK5Riwbw9Qz5ncYtt85XHHuPQHXeQtm2q\n6+s/cJ5ypRKdc+e+qx0PQ47TvyQ24EaqaW4H/m/ggBDiq8DbpZTej+v5Tzz3HE9+9rPsLZW4Zf9+\nFqwr/OWLn6HT2c/ERJGjR/fxxjcev/73UkqqTRepFlFRUBUNVVHp9TViqZJKW2TNCENXESJJ2++w\n4ldA7uL8xQ2CcA6CASkrw5a/QuheQAgHTcZoI6+PFgmM0bLTGScgQZ9rZGiRRMPDx49dBArjSDps\n4ofTRLjozKGwxt9fvBuM4SEwRtdtGRGRtDU0QxINVIz2gMc/9yjZ+TewdyxPLpVnIzHHrskHEIpK\ny+1SHp8Dpcea9yT1oICm65y8sokfG0gGzBcO88WnN7jjjbtJb5vlxJUrTCSTDHyfzSji2Dvf+UMt\nvicefxzn3DmOzs0RhCFPnVxijBznz5zl0OHDqKrG9PT+H9fU/0j49KfhoYdgFGj6ukAmA4cOwTe/\nOXxvr3eMj4/TjmOklHT7faLIQNcMmr0me26+mf0jkvXy8hM88sjDLC93qFZU3GaPcjZC03W2GlV6\nPQdL64IS0Y8CDDRUJDERCilUWoggprt2lQUl4tbJcVqVFi/3HHpolLDYpEOBGJ2AYZmSZpIkHTYJ\nUSmRQGGTkA0cfCQKCaZZQ2OLCoZeINJ6+LFko7qJKkooUqLjEbGFJ2boyBXKGLQIiJkkpaTwWWEa\nA8vIEioWF2urmH5ITRkwSOSpi5hEJsdz61tsS2q8odlkMBhgWdYNnr1/gBACIXQ8z/u+xciT3/oW\nf/Kb/w29HbDLymEk26yuPEN/foarV5+h01nh6NHb2djYIAwdpJSoqoqqKpRKeVZWqui6ghAaENPr\nNdi+fZyHH34rn/nMn9HprFIuT5KxDxJWWriDkFiEnFhaxQcCUSAvSpi6StsJUOIUsSwRyhAvrqKQ\nJyZGkhyVqFCmjIHJGhWybGcHE7QIaNInIk9AlwQQUidDggBBFUiiozBMNtOUHHNGEW+wwmZvwF9f\nucL2nTs5vbaG1W4zZtus1+vouk4hVeHcS18gL3T27E6xY3IPL152afZNRC7NjmSefrUCccDLF69i\nGwpP/u3fImybe36IoLtDR47w2RdeoOC6pEaihOVKBcbGrmfHvRLcSALrCeCV+cb+AARBwJOPPcZt\n09PYo3+2W3buZG5ignO+z0c+9u//ka7ecRwymSwzC1NcvvgykVdAoGOkZ+lGL2NnNGYKY4yn08Rx\nzLdWajR7GUr6HAkmCIMtGv2TtJUxpChDbKDKc8RcRbATl8SItrSORjC6rRCQxWQVjcQoUElHoT+i\nMFXwZAtBhIaCjySLIGADwTQqGh2GHq2zaYtSNofXaRDikk8kGUQenbVNntnaYr4cM16YwfUcTDNJ\nN46ZzpXwPIP5+THuvHOBP/j9L7Hp+ShSJ5ta4GpTsO/AHvwwyYOPPMLW1hZLly6RTia59+DB79pV\n+n4Iw5CzzzzDG6amEEIw8P3/n7z3jpLsrO+8PzfXrZyrOufu6cmjGc1olCVLSEIJYQRIYGzABo7h\nGHzswwafPcs67O5Ze/3ar9nX3jW28S4G2TIgRBCSXoRymjw9oSd093QOVV256lbduH/0MDBWQEia\nGZA/f3XfvuGpfm4993ef5/f7fnE9hbZYnNMLC+cGRFW9+IOi46wt0fzDP1z0S19wbrkFHn30X08w\n0rd9O/v27KE7FsN1W+TKRaqSxOjZwbHVMvD5JFKpFO3tIfY8/TLxgIAouuTLDfLlBqIcZWl1HJ/g\nwxILCG4bVSxMDOJig3ZRBamGZlSQPIkps0RfIkuifpKq53ASHyniuPhoUUVCAgJnv9kNVE4hIRNB\nQaXGPAFkrsCPgqeFENRuGlYLvzxJs3Yc3A5UKUTDNWmyTBQL0fNTRqR8VhTRw4/gAdQQPQlBUkEQ\n8dQkdTGI6dSYCvSgq3Vu7B0iHYxg2YvMPPkkizMz3PfRj/7cmFk2GlUCAc4tnf9LLMviK//fXxGu\nqWSyQ4iCSCTcRWjuOMvzLyJ0bSGTuYz5+QCnTx9lcXEKUYzT3t7N+PgcPT1ZisUjeJ5Eo1Gm2Zxj\neHiIO++8hUAgyOjoCFNTdZLJXvKUeOnAMYTmIrYrYjpxXDeEQ4SSKeFi4rguBg0E4nieD1kFy2xQ\nc+vIiLhUkJlHQsTBxiJKlCAiIKMRwMZgmgYGKgZBGsh04BKgiYeCg0CeED5k16VluYTEIGVdo9BK\nEpF1IuvXo1UqFJeXuXb9enpjMZ6fmuJMvc5H7r2XbDrN6fl5JFEkqHoYuRqRjm7mp45SX10ipJr0\nt6eZsSx6IxFWx8eZnp5+xYz3T9LZ2cnN99/PDx9+GDGfx/Y8Il1d3HvvvW9JNO8XNoH19SiXy0it\n1rlA5EfEQiGcmRls235FMKIoCn19XZhmjcHhIXK5FVw3gmmWaYZEoiNtJFIJ5qenmSoUmFKCpLJb\nEQSJVukIemWGsJvGL0ao2y1aYgTV6UPiFE2KNPEjIOPHxSKDI8hY3pqXjEUTB406OrWzOn8JdCRq\neMI+bE9FPRtlN5BxKVOhjH22dMyHiVjXKVo1ghLIXp1mM0Fd1PApGjOlSeIJiOqD1ColZusVYp3D\nNJtVqtVJPvKRW9m+fTN/93ffoqPzOlQ1gW23kCSLUgVaLY1SqcSGDRvYsOFnm8GwLAvPtlHPTgn7\nfT40xcZyLCRBOCfRbxi1N9/hb5JHHoFEAnb9YlU4viFuuQU+9KFL3YqLx+3veQ972ts58OyzEBUp\ntAx27b6FYDCI49jMzx/h9tu3I4oiV1yxlR9+42G0kMbJ2Zco1fxEAxHwp5hbmiIa8KhVC5TdNYkr\nCZWU4Cck+Vl0qqRdiPuDOG6VQnWViiBiewkkwrRo4kPAQcOkgEPrrGVmgw00ieORESVedkVUKYMn\nlKk7NrLXwC/FaTVOIzbniAsCAhM03SUQbfCCSF4ICwELldOih89t4VHEFZOYroQhemA6eHKT7sEB\nDEPHwSWS3EZj7iC4ArZlEY+F2dTTw8uTk0xNTb1iWfVisrw8TSSSpFYrUatNcf/9N71m7t/c3Bz5\nmRWGerYjCmsBlCTJtGX6eWLfM9y7/VdYXV1hZmaWaDRKINCL45yhVKqQydQ5der77NjRRTicJJc7\nhSRlGRnZjm3XmJ1dIBotEQrZPPbYV3FdhXJrgVp1FVXZTsDfhS9kg2HgOHWqdhPXW1mrSpRVBKGF\nLO/Cdc/gmIfxaNKBdTavz2WZICIyKgIWFjY+RAJIhLHJYWCTAmCGeSQMPHy4tJFCRcHxQPCgLrj0\ndw5w5cYb2De3j01tYRq1GjeOjp7TBemMRsnn8yxMTdGWyRAJBHDdOYJ6G3NGnaXJQ6Qlhxouumtx\nZH4et7eXK667jlKzyZEDB143GAHYsHEjI+vWkcvlUBSFZDL5lu+Fd2Qw4vf7MQUB5yeEW2Atz0FQ\n1VethdY0jWuv3Y7rTlKpGPh8FWy7SDCoctttn+amm65l/PBhWidO0JycpHJshYYpcHTyOKnG8pqP\no5ACyyQEeEIFBBmfl2KLUqdgVZkjRkiMkfPAIYeFh8oiFRSmsBGJIBPEpUGBBTwUdDFPxqmcVehr\no4CJSLDnAAAgAElEQVRDk24cXMJUUKijSQVygkXWKYPl4RNkVn1VJJ+fkL9EVhboXreZTCaDbXfg\n98ep1QxE0cHvT3Hnnbfy3//7/8CyFAShCMiEQllcFxYXz7C62nrTGiG6rhNtayNXKpGKRpEliZ2j\nnTz68nFMXwd+v596vczS0sXXYH+nlPO+Gtu2Qa0G4+Owbt1P3/8XHUmSuGL3bq7YvZuPmybf+c6j\n7N8/RrXqw/MMbrhhM1deuSZV3d7ezlU71tGp6zz83MucnKsR0BQKtTzdfTeiqDqV6SPojWUkx6WJ\niiho5PFQXQWXFqZZJawKiI5EydVQiCEh4+GjQYW1NEYZyFPAogcPVRAICCJ5UWTaC+CQpuUmMD0J\noVWhZk/i2Q4BOvH7fNTNCglKbBZlxl2bIiZNr4Em25hKkqIlgLNChQCulMEvLqF6VVTJoLd3mH3H\nj1GVM/hMh6AaI18r4xfqDAysLVtFRZGVpaVLGoysWycxOztOT0+cq69+z+s+BD3PwxQkziqin6Ns\n1DFaKi+//BKy3ImmtVEqlbGsOS6/PM1v/dZ91Go1BEFgZSUHQH//x9F1nSNHjjI/v0Iq1c+3vnWK\nRiNGW9sorivh801zOP8U8WQX6fQIhpFkdur7CE6Flr2E6yWxvTTYJQSniGk+juCtWSemkAggIuAR\nR6GEjUMdQXFxbQfBa9HEwSOMQh2FDHOE0CigUSVKCZcALk1cVFwUWl6VitLkmr4t2LaJZSocmZpl\ni08+7zkX8fmwVZX8ygoAqWiU3qzKgVOLVCUPs2XgFyVCfpF0uoOGZaGn0wSCQVquS7VSeUN9J8sy\nbT+RR/hWeccGI0PbtnFs7142dncjnA1Mjs3Ps+Wm1468b7nlRgqFMpOTZTo6dtJqFenq8vMrv/IB\ngsEg4XCYif37uWvzZv5heQ+61snkxEv47SqG4CIIFpZg4HkGOC0sxUfFbpGjgSSIKN4yq4JIyEvh\nkx1WnBwt10bAR4F2woRxBQHLU9EYoMU0LSrUhXYUL0CRIhZBIshYNLAJUiOG7Qr4/X4a5gRR28QS\nJOxGk1ggyUAsg1ETiUQ6+NVfvYtHH32GfH6aVCpEMOhxzz2/jOu6PPLI8whCB9WqTaNRQJZnSKe3\nYRgFfL4Q7W8hqeL6d7+bh770JZqmSTISIRYM0N4l4sZl5uefIRr1c9991/Hf/tubvsTPzLFja1oc\n99578a55MRFFuPtueOihd7aT76uhqirvfe+d3HRThWq1evYt+cdu0plMhnhfHywvs2VwkIAvSDSU\n4IXj+yjYfswypHxd6IpLSDAZK05StHUEJ0laCDHnrbIeE9u0kYU6qqiw4AokRQ3FTXIGcCihUcfF\nJoZHRBCY8TzOeC66oKJJXWhqG81WC1FI4Ylhms5+IoSQZQWfX8K2HUpOC79bYNXVgBRBKUgy0UXF\nmKfpyrSkLIK8QtRfx5I8ym6BsJ5kvF7GN7qJLi/F7JkFrNoCfTGT7dt3kUqtvYM3XJfgJdb/uffe\nu9/wvu3t7YTaskwVFulPrC37uq7LVH4eR5Lw+0fR9bUlHkUJUirBsWNjpFKpc5/5X8qV79q1Vjky\nPz/P/v0TxOM3IUkqMzP7WV1dxXF8LC7+EFVVUZQI4fggq6uHcc04shzGbjYRhCiu6yIKIhJ1TAwM\n5ugUQ5guWJikxDrzgs2KNA+eH8+2sVEQ0NgsqEgSzDsKmhenjokPsGgwjYOPIi0kDMHH6ODl1Iw6\ne06cBH+C47MmNTlHfzCIfnb2udZs0jMywplqlVypRMDnY6Azxbw5Q93UqJotCs06gi5haxrb+/tZ\nchxKtRpLlQobb3hT9SZvmXdkMAJw87vfzfcsi+cOH8YvCNQ8j5Hdu7n6dUw7/H4/H/vY/Rw6dIiH\nH36McrnByorL17/+He6442aOjo3RWFnhqakpWpUljqwsIVotooqM5RSZs0+QUWPEdY0zpRqCm6ND\nrJMRFKoYBIQWeHOoUgvJ8+jyGkwRwEZGIkqLBC0sErKOhEDJ9pDpolfppeE4GI6NiIuOQQUJER1N\nkFGkNF5zFRudFCbtno4iyUyXpnmyukxy6Cpq1VUeeeAB4pqG59aIRGXu+7VfIxgM8m8//wUaJR2z\nIaCqfhzPo9VSWVp6DE2rc999X3jTQjYAfX19vP83f5OXnnmGI3NzxLu7+fT999Pb24tlWSiK8pbO\n/2Z4p5Xzvhr33AO/93v/+oKRHxEOh19VbM8wDEYvu4wffPvbVG2D6dUl8pbA0NbLefbZZ7GaUST7\nDAGWCRgGo5Qos0LRK6BKfiSnwZxlEBRk8EwKnownWhiiwrJr4+EjgZ8QLikC1CjTxGSLILPXc1j2\nNCJikJyZx3aC+OVJZKlJzVJwaCI4AnKlSY+qUHLCTJnLOHQyEE0S8vmZbTj4tS4CwhlQQwz3p1ld\n3ccV3VFyjTCTlkxOSOLV44BHNOGnQYu7brueznQagJViESMQYGho6IL3g23bnD59mqWlFeLxKMPD\nw28qcdbn8/ErH/8QX/rLr1HIzRJSNEqmgZmMEm/JVCotisUFqtUahmFjGAUSiTzj4+Os+ynTg4uL\ni1iWjqoGOXjg69RXLcJSjIzSS7G1RNM4Rm/vjayspKjXI4RCHfj9QXJLE7SMSWAIGxFZENC8BKro\noIpzJBQRyzTwISAFAiiKi11cQBNlLMFl0VHRvQEkx0LxCjjkSVMjwpqQno8mqwjUEPEkj1pxjj2r\nkGq/glBUYnTdNTz96Dd54OA4dw53U7Nt5GSSVCJBx8AAB/N5tFaLkcsv548++1n+zxe/yPZYDFEU\n2b9nD/byMmFVZaJSYWJpCbWn55yq9utRLBZ57sknOXHgwDkF1iuvueYtJUS/Y4MRTdO45/3vp3jz\nzVQqFWKxGLqus3//AfbtO4bnwfbto2zbtvW8/JFGo8EjjzyHJI2wceOaUNrs7Ax/8zf/SDV/hurx\n42zOZEhGgiwf28OKa1FHJiSZrJMXcUQPn5JGlPL0i2VUUcJAQpZlehyHk6JH2h9AsE1ynozYLCJ4\nNgJ5WthonoYlO3jiMrpbxCZFwamjCTquJCKTIeceRfHCxAU/eNC0LaIU8XAJiBqOZ9GwTARBRVU1\nWrbOnqeeZ/1tO9i5czuCIHBmaYlvfOUruKLI6b3jXNE7xNGpZcqtCi1BJxBOYJoTXHXVRq688srX\n+je/YTo6OnjvBz/4iu2XwqCrVFpz5z127KJf+qJy3XVw+jTMz/9im/+9nRw6dJhvfOMJHCeE53XR\nCAhcdkMnjUaASKST6GGFyaWn6cNgJBDCcBu0AQtNnZJnsOgK6B4k0NHRaXkFMiLkhDPIOAhiiKxb\nIYpJEBk/Kroos+ou0VRkkqJOXoviuQrN+hxpigRsCdHWSOJSFAQCwgAy4FkWgmCi4Mcva7hOi7zh\nIXopQv4MueYkQquGXQ6SUdpICE3i4TBOocbU0lHimTXtHlGssH77Zk5bFouzszieh5JI8Msf/OAF\nr6ap1Wp8+cv/xOKijSxHsO1ThMPP8NGPvrkpyXe965eIREI88shTFAoVtnakGR3t5Q/+4EtMTh6j\nWhWwbdB1j46OBH5/lq985Tt87nPJ181rSCQSaJrAwsIhGrlVuoPrcd0WVamKT2pSyE1zUvoGiuJD\n0xwymSzlpTEGfAVyVnjNpJU1F3cLh6gSp+osM6xCXlA5JYCs++gPhahaNp0olOs1PFqMM4foyaRp\nkqFONy6zrHnSpIE2PAzBpSB5nK5UcGWXYu4IKhEqqzI33vF+9r30IPtlkZ6uLlaBernMxsVFQqJI\nyXEIRSLYtk0L+D/f+Q6jfX0MrVuH1dnJ3kOHaCQSbLjjDnbs3PlTl+RrtRoP/PVfE280uLqtDdtx\nOPXkkyxMT3P/xz72phOi37HByI+IxWLEYjEcx+ErX3mQvXuXsCwNy3I4ePBJdu48wa/92n3nsoDH\nxo5Srwfp7v6xQFo63cPY2CQsTtETDBLVdeZrNa7OJIgtLzNrNRGAq3w+5s1FFuw8eqBFueXguJAM\nh1GsIFXDIRSQmNZihFoeirnCFskhTJ0loUDBFci7NTJug17dR84GlBanzCWc0DpSup+looNpCKQQ\nkbHWhKeFIllPxMCjiUZQ9ai7CrqcJOxVOTM/SVtkkH9+6hQ+ReaGy7aSjkbZu38/puuSjkRIhrM0\nGgaFikm+XsaVRWKxEJ/5zEeYmpqi0WiQSqXo6Oi46LMYbzd/+7fw7nfD27jc+XOJosDtt68t1Xz6\n05e6NW8e13WZmJjg5MlJNE1lw4Z1b2itutFo8O1vf5v9z7+MTxVZt3kzew/O0dt7DZq2NuA2myOs\nrOzjwx++juXlVSQ28tw3jzNQdRgMa5TLAcYLLSaaBqIQouiGKNHGiuAhMEW7LLLJpxOyWrS8Gep2\nkAAmDgoIEoqiIAkKMSFIuL2HhlGlC5lKfYWUmKPD8eOTY+RtgxVMskCJHBF/G5WmQcFaJq7rOIJG\nWrAYrxu4apRWy0BWQPH5kIGKKVCSZEYHBmhJNWRkRq++Ap8vQCAQYWbmMNfduZY3Jssy2Wz2onyP\nH3/8SXI5Hz09I+e25fPzfP3r33tT5xMEgSuu2MWuXTuxLAuAP/7j/8nIyGXMzLjMzxsoSgDHqSPL\nBsPDI8hyB3v2HOC22167eLOzs5OtW7v43sPPobk+LMtgoTKFS5L25AiVVgPTzJNINOnq6qFaWSZh\nrhLS4tRaAuCheiC54Cgqc3aVpCJyShUpyCK93d1MlcucMU0c16HatCh4GiYZNPzIeFSpotEgg4MK\n9EoSDc+jXRSZ8gALqq6N6koEzBY7+npxTYv548fZuHUXn/zk7aiqykNf/jLDkQiyKJKMRGjzPL70\n//wZWC6Xd7ezPp1m8cQJjhw9StfGjQzdfjt3f/CDhMPhN1QNc/jQIQKVCoNnxc1kSXpbEqLf8cHI\nj5iYmOCpp8ZZWvIjij4kyYdpmiws7GHnzi1s3rz2FrG4mEPXo684vtk06ZQVol1dnJmdpVGrgePQ\nFgxStixCioKiKPgNA9dxsFebZG2XsGfSrFqURYWaGMbzdDLdl7E8/gxdZole2aXpiZTtEk00/NTQ\nLVgtzaCLoEt+2j2LGbOIP92P4izgNnKAg+cV0YUScU/AxkPFBlEC0SMaSJGvV3G1GNn0FtqCHTRa\nFb794hjFmsFSwWZiLo8om6QifjS1wvDQAKVyidxqibzc4tY7ruWpp/ZSLsusKaVW2LAhy7333v1z\nJ5j0RvlROe/XvnapW3JxeM971tRYf1GDEcdxePDBhzh0aBldz+A4Nk88cZg779zF7t1XvOZxy8vL\n/NvP/yGLp0okAwmgyr4f/g0EUnR3/3imz+cLIAhparUGt912M1deuZOpIwep7dvHZKHAC0tL6KbJ\nKCotTAqUyQsSVUYRlSCu7wRlp0zdamGrYFFHkCDiediux4xlEhE1JMFkpVqg0DIYjmWZLs+TdDxC\nkobrNUnhguix6oKnFNCT3RRzUwxQxpQEZj2LOVsBSaVuN/GaC6TiATraB9nY2cXi6hjXXz/C4vQ8\nPjWI32ohSQrB4NpYpmkR8vkSO3bsuNBddg7btjlw4CRtbVedtz2Z7GBm5sxbOrcgCKiqyuTkJIah\nsnPnDeRy/4wgtFBVH7Zdp9lcYuPGOzHNJvPzK697PkVR+MQnPsjY3hdZLFdpuSKiHCMRypLOpJGL\ny3Su62HdugShUJXHHv4Bop2jUl7BcGUC2hA+LYZZXUJWJep2kyhNbEuhAew/PUGH6xGXVfKGhYtL\n1VXQULFxkGgioFAiRYV5ZEBxXRygAVTwEZAyqIJNNpKk6cocnphn52gfaddmevIww8O/zdNPP82+\nl45zyt8FqBQrR6k0DJqVGgMBP0tulXhc5aZbbyVXLHLKdYmn03z5z/4Mz/PoW7+e6971rtdV552b\nmCD1KjowUUlieXHxTQcjPx8F5heBI0fGmZysEYn0EI2mCYXiJBK9VCohHn/8qXP7ZbNJms3yK473\nPAs1oLNl+3YGd+1Cy2apKArxnh6uvOEGIl1diJEIq4qCT1UZFUR2KBob9QBbfD5GdR+q2KCqSPj9\nZfyhJiG1SUiGVX+GdGA9cUWlXW6iUSKKQFKwkew8XbpDlCUW5scwGpOEaSIzTYscsufhYGHjskSD\nkOaiqhKm52FKftx4G/FUH4ZpIUsqlTo8O1YnFd1KMDJCJLwFjyjzuQPkK7OIigdanf4BBdO0WVrS\nUZQ20ul+uruvYGysxIsvvnwxu+5t5Xvfg1TqnVnO+2rcdhvs2wdLS5e6JW+OY8eOcfBgjt7enWSz\nvXR0DNLRsZPvfvdFisXiqx7jeR5///cPsjID2/q20ZPppSezHlHowcyvMDM5dt7+iuKjXjcAiEQi\nfOrznye4fj3jhkFQEFivKGiCRBKXQSz6vTyydwRVDNOww7TFY4gBP7uGBhjSbOKuQKcaJStqdHgW\nOSfHtG2xZJkk41kk16ZHVfHJCmHNJRmS0FWbrE8mEBDRNJdaawbPauBTfPjlFlV7BkPWCIeTiNIy\njrBEIDlIIBxkenmcgU6dbDxOJBLAaNZoAH7/jx8Yplkhnb648u+e5+G6LoLwao+Zt2dWxnEcQELX\ng1x11S10dibp7AzT3z9Ib+8IPp+fer1Ie3vqp55reHiY3/l3n6Wt00XUG6RTGTKZDKbdwqbC5s0b\nsW2NWCxMui2KL6AQC0fpCPrXtJ9Ui0gmQUUu0xkss7GjnRU9woobQrV0goqffn+UYclPxRFJoJ71\nO8rTRpU+ysi0WEagDuQ9DweY90REOY4puIS0EJY9g6J4lKoWp86cZrV0iu6Ujud5fPvhH6KI/bTF\nh0hFOshXNEq1bkTLIh5IkEz0UCx65POrDPX1MTM2xvJLL3FVWxvXdnTgnTrFA1/6EvV6/TX/T9Fk\nkuqr2IMYbzEh+l9NMFKplLDtNaOhn0TT/MzOLp77fePG9fh8ZfL5NVncVqvFsWMH8PlakExSMww6\nOjrYdfXV6O0dHMrnScRi3Hj99YzLMg3ANk16FAVLlLEkAQnQLAOfIiAFVNraEvSNbkJJpij5NUwp\nTjqepqezF0d0iKESU5OoUoiMpiJbBVp2Dr9YJugJBNUsjrQenT5aZMkRZJUqiiIzicAZCWbFOlOS\nQnLgl8hk2zHwKNWWcR0BSUyyUCySHRjAF08QD/WTigbZvUGjI1WgZ0Dg1tuv5bkn9pMfn2Tq5Zd5\n7tFHmTh9mra2YZ5//tJIt78d/Kic918Lug533QX/9E+XuiVvjoMHx4lGu85bUlAUFYgzOTl5btvK\nygovvPAiL7zwIidOnGBiYoWUHj2nRwHQme6h2rBYnZ847xqGsUJ//4/9NLZt24YTj+MoCt3hMC1F\nIeK5hNAIoZNFIEaNZnORZtNhxnTIdHezalms01VcSWLOXKbpVZBkE0nyqKlBiqaN0PIzV2hRbRi0\nRA9FElEkiXA4jOZTKIgCDWWQmtEBwhAnhGGWfBm2do4ymIDBXpltGzq4/c6bCATqiPoCgWiJZFhj\nYmGBQCTCXGMeLdWFpq09oFZWZgmFGoyOXtwab0VRWLeuh1xu9rztlUqBWOztmVnt7OxElmu0WgaZ\nTIZoNEyl4lAs5kinM5TLeVx3kcsv3/aGznftdddxxwfuoLPNw7AXKNbOUGvOsn7zANPTC+zZc4jv\nf/8AM4sBliyBrp5NbF+3i6uG1hH35YikDdb1+Pj0Rz6A178VLXs1CWUDQXk9C1YHh1s2juASxaOO\nhY8Gw/gJoBFBZYS1Sk8HeAmYkiQKsoKryEwLIqFID5lgCNGdorS6h/LiM8SNU3hOi/n5eQQxjqP4\n8TyXSqOI64aIBNpYrZvooTW17FAoyczMEktLS9i1GqNdXciShCiK9GazBKtVxg699hi/+bLLWDBN\naoZxbttKsUjjLSZE/6tZphkc7AeewLabyPJa0pZtt3DdFTo6hlhcXKTZbJLNZvn4x+/lW996jH37\nXuL48QlCoQhDQ+tYqOd47NRpwrbN+LFJCq4G3Tv5/vF5Ojvq3PbJT7Lvu9/l1L59+CUJ2RZQZB+u\n6yK1mtgti0zAYYO0xHKwyfGwD12XaBZElho1zpTPoLaqOGjgykiSREAJU7Nz6IqOKEepmkV8Sh+i\noFF28yheiwBB6oTpD0HW7+egaVFRk9S8MNPTOSxLIJTQMAMS83MOYU0itW4dA0ND1Ot1Du/ZS27B\noCZAqLedd73rXXzna18jrQboiq+9TTmuw5kjRwiGgryNqv0XlWPH4MiRd24572tx333w+7//zgrC\nPM87F6A8+eTTPPbYfiRp7e23Wp1ieTlHkPMTFsORMIoKC8vT7NvzIqpPQ5abbN4cZ3Bw8Nx+siyT\nSiQopdNrwlG2jSYIeB4ICFiALPiABVooCAp88oMf5J8efRSjUMI66+YrKRqyoBH1XFxJW7OPlxug\nS6yik3IsZqwmOi7JZIKj5TpFIc5gdgil0UBKaUiui22VqNsm3aE0VU+ja3CQoB4holdx6scJRHRO\njo3RbDYpaxrXfuCX8QQ/MzPPAh59fSnuuuv9b1or6K1w66038Nd//Y/MztYJBhPU62VgmY9+9G5+\n93ff+vl1Xefuu6/nwQefZG7OZHV1lVxuDttewvPS6Hqez33uN86V9v40fD4fH/v0p+lbt44v/vn/\nRhQCjGzaRrlcZ2pqhVBIBkL09W3hpeUF9uUX6QkHcRGoqiE2b7uBxaPfJ1c1cOmgXl7EatpIrohi\nB1iwSkSFNddmEwNVCGFKIj4bRAEaNMgoCqueR9h1KYVC6KrO0YpKtnMzkXSW6uwsXUqUTLTB7aNZ\n8o5Dxbb54z/6Iw48M0az7jKuhBjsGcFyLGzXwgm10RDs8z7rsakp0tnsK8RBk8EgS7PnB5A/STab\n5ZYPf5j//xvfQMnncTwPNZnklz/wgbd0j11Kb5pPAB89++v/63neBV3F37p1K+vXp5md3YsgxAEB\nUayQTvtYWVnhi1/8OqKoIooN3vWuXbzvfbczMbHAHXf8KtHo2o3cahlMTT3DyeIS7dd8mMuyveh6\nEM/zmJraw8bNm6ksLCCbJrUTJ+gWBIxqg0qtznytSk6A0VKAsf1HEMwmctVgT6NKob6KRI12ycUv\nRFn2DAyrguwJLDgqrhal2bTwZBfT9VDxk/D5MZwqcbdFTHCYlZIMbWvHKrcY8YK0X/te5ucnOH58\nhUKhyi/90i4CgW5++MNFrrzmKrq7+5BlmUgkwrYrdlAsetzzG/fR1tbG0aNH6VBVlnwmlm2iyCqS\nKBFTVcaP7uN9H9h+IbvqgvEXfwGf+hRcggKeS8pNN8FHPgKTk9Dff6lb87Oxdes6jh59jlgscy74\nsCwTQSjS19fH3Nwcjz12kK6u3UiSfPbv/Rw+/EWago+4oRPS19wC86VlFNWkM+pgzz9J03WJtsfY\nvv23ztMemp6e5sTJaYpFA8MUWXVEgoCDQQuROQQUSUYVPFSfy46+Pvy6zs5t29izWsItLqN4YQRb\nwfJa5CWXulWkUzIZUVXaMx0cyi1wurSCjkssm+Sg52EmYwz7RhGQ8XSd9UNDVOt1Tk02Wa0u48kp\n9HgcuVQiUK/jlmYJ1aZp87Ks272bTDZLo9XiSD7PJz7/eSzLQhCE1/R5mZ+f59jhw7QMg4HRUYaG\nht6SnPerkUwm+cxnPsKhQ4eZnl4ik8mybdvr5yT8rGzbthXLMvnjP/7fDA93c+21W0gm23Bdl2Lx\n6M/8gAwEAgwMDbFt5xb27j3Fiy9+l/n5eYLBLO3tG1lYOIGqTpFt20SxaGB1D2PbBu7cXvLHX6Be\nXuaJFxq4jCDLYVpKFctoYjlrZnhNsYghiLiegeq1aHpNZKGJKjbJSiZeKERvOMxyocBkPM6Oa65h\n2PLT1X0l42MnmTtxnJpTIK2u8NTJHNuuuIKVqSlqp0/TbfnwSR3kygX2H3oGIdaNqgXpH76MfMCg\nVljEreTI9MUIdnXRb5qv+Pxlw6D9pyiqjo6OMvj5z7O0tPS2JURfypmRRz3P+1/CmmPRi8AFDUaC\nwSCf+tR9PPDA41SrApKkoGl+FhdPoKpXU6/XmZ2dBjwmJx/k5psvQ9O6zgUiAJqmYxgBTFOnr2/j\nue2CIBCL9XLo0EluuOsu8gsLLC8tYRSLuFaTBaPKjCTSFYqTcmHl9CQrgsRQxxChlonjjxNuFmlX\nMxRsky5bouyWcAUZPwpN28JwDDZlk5xZqFE2GriuREt0kRSJGhJdPRHWDQ2yON3ARSSRSDI0tJGe\nnqMcOLCHqakfEA4H6Ozs4Omnf0Ak0smOHVuIx0Pkcke57753n7N+bjWbBBSFazb38IP9x9HVDjTF\nR7m2jKcYXHfdZy5kV10QikV44AE4fvxSt+Tioyhrs0EPPAD//t9f6tb8bKxfv56tW09y6NBL6HoW\nx7Gw7WXuvHM3sViMvXsPoKqZc4EIrC3jbNq0m/n5MU4X51ELIo5Vo9SYZNtQNx+++WYM00SVZUzb\n5smHHmJkZARN06hWq3z5yw/ROXILS6fnwecw1Vqkho0CFAQbCz+K2CTb0UM65K4JFZomiqpypC7i\nihohDxRJY8FzKJh5BgTQXY+juVkqjkl/KErBtQlv28Bd77mdyRdeYDCZ5FtPTlMteMwtl3iqepK2\ndIy+vm7C5SKOpKL7ddx8gVPVAvX6QXb1ZeiJRDg1NkZ7RwfRUIhgocDk5OTrWje88PzzvPyd79Cm\naWiyzFN79nBwZIT3fehDb3tyeigU4uqrr+Lqq9/W057HykqBTZtuIJvtPW97pZJmfPwk6bP6Km+E\n2dlZvvKVR0mnr+Z977uNEyf28sADD6Lr28hkRpBlHxMTM+h6DEmaxTCSrJx+nkR9mZAXYNe2LTy+\n/wjLS2fo7bmMnKLQqJbwATY1pt0GBSQk0UdalvCLJlqriAaYop+phoffdXD8EX79c5/jU5/5DDDO\nAD4AACAASURBVLlcjv/xF/+TyuKztEWL7MgG0ZVBBGDi1Cnq8/N0uiK+dJalskFbOEWtOM9kYZpg\nuo5SW0exoaDG/Wy+cRcf+tC9dHZ28nd//ucsrq7SdjY4LFQq5IBbt/30ZS1FUeh6A8Z6b5RLaZQ3\nffZHB7Bfb9+3i8su20ZnZwdHjx6n0Wji88k88ojA6dMnqVaDhELrcV2H2dnjfPWrD7N79/tfcQ5R\nVGm1XtlcSZKwLIsNGzcS/jf/hicefZS9zz3HgeefRwsmCRUKXBcMI9gOAUEjJcJ07gym45Lwp0nj\nogk1fDJMenUyePjdFnO1OeYFkWu3bCLeJmEToTq9zEqrDoJI0/ORjSts6PUT9fs50SzQimdJJNoQ\nRYnBwS2EQjHGxh5l06a7iESSzMyc4MiRQzzxxNe4+ebtfOhDt7Nx448Hro7OTvZ4Hld0dxMNBhib\nWqBaX6E9Y3D7r3/0bX2ruVj87d/CHXfAG/D2e0dy333wm7/5ixeMSJLE+99/Dzt2THLq1BSqqrB+\n/XXnSnsty0YUXzmMxeNZrrqqnXg8yrFjp0gmI8yeHCfbavHVH7xEzQCwWd+bwBf2Mzs7y+DgIMeO\nHce2YwyPrOfwoZeYrY7TbAlMt3IEXJOI4MMviOSxSHgFbr7ylxhfWWF/pcLhyQVGrrqXZ595mtLq\nBCFcTLvBRlza8WgKMqOpTqZdh0osTbvu57q7bsMsFrlu3Tqa9TrF1TP4fNvozoaYWF1hueQxufgS\n1+xI42+P8MQj3yOjRulKxWjJKfJLZWKahqgo1Go1IpEIMpwre301isUiL33ve+zq6DjnGdWZSrHv\nxAmOjI2x7bLLLkBPXlhs20UUX6msLQgSlvWzPV5efHE/Pl8PgcBaMma5XCEW66Beb1Cv14jH+6hW\nV5mbO0VHR4x8fg/BxhQbh9bT3taDUa4z2p5hJp9jrllFTHZQrTXAKiN4ZcJigPXBCJNGmVUsKpJK\nWPMhyT5qko6mx4iFUiz6XFbLLTzPI5VK4TVKDGTT7M2XeObENAM+kc6gznQ+j9No0J/oJhmOIVFg\nrjhDxCvTrcl86u5340gqlUaTnG3wyU9+lI6zwkPv+9jH+O6DDzI5M7Nm4BeLcfdHP0o8Hn/LffKz\n8vOQM/Ip4KGLdbF0On0uSj569Cirq09TrerE4z9eM85kNjMzM8Hy8mna2s63RFZVk0RCwrYtZPnH\nbxCFwizXXLOmXNfV1cWv/vqvc+udd/K/vvAFpg6eodww8FwP76xssIJLxHGYAkKijiMo6JJKQJdI\n1loYjkldEIjLPtKeRWVuhuHuTq65extf+s4TTC42UMO9hMM6ZmORhVKN8VKRKVpcs+u2876YMzNj\nBIM/nuXp6VlHd/cICwsTbNsWPC8QgTWBsq5t29izdy/9iQQ71/Uxu7qK29bG7rdBAO1iY9triatf\n//qlbsml46qr1sTexsbgDQgs/lwhiiKDg4Pn5XX8iJGRAZ555hE8r+fcNLHneTSbS1x++e309fVx\n0003AvBHv/d7PL53llR0lEwsgOM6HJ2aQVAmuNnzAKhU6kiSjiAIbNy8i4UlmVCii3p9Ca/0DJ5t\n4Xo1Losl6Mvq7DlyhN/6r/+VXVdcwT/8w9dZWAhw8nSBVWWA4vJjDODgkxWano3qubSqJbqTbSxW\nVgmkgmzdupUffvObRLu72Tt+mh19Q5zMzVJ3VFStQqW5TNyv0ReJsLQ4RyqgceuGrfg1nTO5AI25\nUywurKJ3JFFkGdtxKLKW2PlaTE9PE/W8c4HIj+iOxTh+4MAvZDAyOjrASy/9AM/rPHcfuK6Laa4w\nPPyzjVlLS6sEgz8e9xuNJp2d6zl9+gjFog9V7SYWa6fVmmL37gjHj8RZ33kdnYk1y4wgUZxci03t\nJebkVepNlYhWIaGU6HfjBMQmIRxEReOML0ikvZczxXnMUIakFiao+WmGQuy8/CYajQqLi4uYpskL\n+2bpDw8TUhZJCx5yy8aUDHpEkVOWRU1WkVZmoWUQaNVwadH0JHRJYnh4rdx2fHaW2ZmZc8FINpvl\nY5/5DKurq7iuSzKZvGQuzhc8GBEEIQM88C82L3qed78gCLuAW4H3vNqxX/jCF879fP3113P99de/\nrW1Lp9Osrs6jaTvP295oVOjs3EAk0mJq6gCp1NqNmc9Ps359jIGBzTz++Mvoegeq6qNcXqCvT2Pr\n1i3nnSccDiMGg1h2nVQkQa5aIuA5mDjUHAdb9xH0BbDcJsuAKloEZYgJLp7oYes+0pEwqmUyKUmc\nGhujq7OTgc6tjPRmaAQ1+gYHzi43zdC3I8zwDRpHj84gihKSJJPLTROJmMjyELVajUqlgqIoJBIJ\ngsHoq5ZHCoLAHffcw9jgIGMvv4xlmozcdhuXbd9+wRUbLwTf/CZ0d8NFlFj4uUMU12ZHvvpV+C//\n5VK35u2jr6+P7du72LfvZYLBtQG2Wp1j586eV/iQ1ByFph3Dr6351EiiRDTUwdjiwjnzzK6uNkzz\nJNBPb+86kskXWVhYguYyw7FOAoqHqji4do14VyeJZJJQOIwkSWzduo7jx19gZKSPZ5aeJ6xGUQWQ\nXANHaBFWQBFauG4ZNRhn841Xs3nzZg698AKrlQqNRpN4KMa1iXaWSjnyRya4adPNgMVQjx8OnmBB\nDzA+d4pt/Ztoj2UZKywzvjzBup4sJcPgyOnTZLZswTCM85J8f5LXWtv3PO81fbt+3hkcHGTLliMc\nOrSHUKgDz/Oo1ebYvXvgdQOzV6OnJ8uBA/lzMyPpdIpq1aCnp5+2NhfHmaCzM8rAwBY2b+6j2ezA\nmDp83jl8vgjtmTjBdAeVikm95hD1ElBZwLUbIPmpOy3Qw+zoa6ctoiGP3kx79wiu6xCLZfA8gWPH\nnuXo0aPMzKzQ07+b3OlTtGsKbdlOKrUa87VZktkMKVHi+Mo0WwMRkj4/hZbBkiDQ7fczf+YMw8PD\nALiv0seCILwtrrtvlQsejHietwy8wnlHEIQO4E+Auzzv7GvJv+Ang5ELQSqVYsOGbp55ZgJFCaIo\nKtVqEUGo0NWV4p57duA4Hvv2HcV1PW6/fZQdO7ajaRoDA30cPHiURqPJ+vU7GR0dfYW0ua7rXHXr\nrZw+dITVfJ71yQ4KtSKL1Qp1SUUKp+nr2Myxif2YgOdT8dcWsF2DZDjA5q4ufJLE/MICuC5xQWB8\nYhIYYalSwLRkCvsaAChKk97ePn77tz/F/v0HePnlMWzb5eabh0mnL+c//scvcfjwMqADFoEAdHX5\n2b371V+T1wbXrWzduvWC9sHF4E//FD7/+UvdikvPRz4Ct94Kf/iH8Av6zHkFoijy3vfeyaZNpzh0\naC0haMuWmxgaGnrFQ9enR9AzOjOreUKqiuk4VD2P/vU7aJ7VTRgcHKS7+yVmZo6QyQxw881388//\n/DeUKzOEgim6sp2Ioksmo7J79+Ucm5k5tySyYcMGNm06Sa02RVgvEjU8amad/kgQSYtgegaSqrAi\nQt+Vu/nQJz6BoihcedNNPPr3f48e8rFaKDG3NM/+6UWqdoITM2cI6DUigavx6Rqb2gfYm5/ncGGJ\nkCggxzPMihaX7djB42fmMcU4tWmFib/6Nr29Ie6//73nmQTCWgD3hCjSNE18Z8csz/OYLha58rbb\nLnSXXRAkSeLee9/Dli0nGRs7gSiKbNnyLgYHB3/mxMrduy9n376vUij4icUydHUNcPDgA8RiXVx1\n1c3Ytsni4km2bOkim01x8qTFfCDKYjlPJhRHEASWy3n8w3184jd/nUcffZKnZg4QFXW8RB/G3CkK\nTgsr7OOGK7Zx45VXsOfAYV6cO8nWHTcBMDk5xaFDJ6lWTxIM6hw69CKXX34PK4vz2HMGBHX8AT9N\nIcHWK7exd2KWH/xwHzOWgiJVkUSLtD/Exp4O3EYDwzAQZZlVz6P/Ero0vx7Ca8QBF/7CgvBXrAUp\nC2c33eZ5XvMn/v5aMcrbyszMDP/hP3yRcjmO4wi0tSXp7EwC0/zO73z8FV/kf0mz2eT48XEWF1dI\nJmNs2LD+vGNc1+XZp57iT//T72NNTRNVNbr6+2iIAsfKTWJtvQwOdTKyro9isc7hZ55Am5qkp9U6\np3J3YnWVacPAVRRivb2cbsSpE6ar65pzLpVLSydpa1vkL//yjwgGg+e18eWX9/D7v/8lbLuLSKQX\nUZRZXj6JLB/jP//n36Wnp+cNl75dSARB4O3u8xdegA9/GE6efOc8gN8Kl18Of/AHa0HJzwsXot9f\njb/7uwdYWPBjmlDI5dB8PrLt7RSLY3z2sx88t3xrGAbPPvsCe/YcxXFchoc7OXF8nKkfPEF/IkV/\nfweDA/0IksQLs7Pc99nPkk6nWVxcpFgsUiqVeOjr32Ll2eeo1Q18zTqjmRQdmRT75ubQtm/nP/3J\nn5w3Thw7epTHH3qIB//xYZbLKZLZHViVJrLYRJVn+PBNg8RVmRf3nCYXzrBx9x3U62VqtSL9/Sod\nHRn27MnT3b225FoqlRgbe4nuboff+I1foaen57yH8oH9+3nqG98gJQgokkSu1aJ961buet/7Ltrs\nyJvtd+P/svfe0VWdZ6L+s/fpRaeo944khCii2phmwMYN9xYnTtwmmbEn8SRzp2TuFN8pmcnv3jUz\nSWaSiRPixHFwwd0xxsaA6SCKQAj1etR1mk7vZ//+OEAAgY1tIQk4z1qshbZ2eff+tvb3fm8NBGhq\namZkxEZmZhrV1TPRarUTKlt/fz+bN+/AYrEilwtUVOQhSQKtrRa0WhVLl87j+uuX4Ha7+c///C06\n3Qz6OxsYG+wkGgkhaeP84w//nsrKSuLxOJt+9zv69u8nZHNga28jxWhAzM1l3Q03oFQoaOvp4d1m\nC9Xz70MU1WzffhBRjFBdXUBR0Uy2b38HpxNuu20Nh7b8mhJBhVqlJi45ENMNnOwTsfkETKKI3elC\nEGzMKhLIVMqJBAJULVqEV63m+vXrWXLdxSsXX25OjfkFtcMpU0Y+i8lSRgCOHDnKu+/uIhbTAXG0\n2jCPPLJ+nJn3fJxOJxs2vMLYmBqVykgo5EahcPL44/edyUw5TTAY5FBdHe3HjxOPx5m5YAFz5s5F\nqVQSDofxer2YTCbaWlv55b/8C64DB5ibloYrFmNUFBO/i0S47RvfYO+BBjo7jRQUJJreBQJeQqFh\nKipSePjhhSxevOica//oR78kFMpndHSA7u4uAoEg4UCAuH+Q25YUokhJoWjuXG6/554pLfN+OSal\nBx+EZcuurhobX4af/Qx27JheRdAmSxnp7u7m+effITNzDjqdkVgsysBAC1VVWr72tU8vPhMMBtm4\nYQP092NSqYhEIlijUWasWMGK1at5+9VXsbW3oxdFPLEYuoICxiwWakwmrGNjdPf2MubxEE1L429+\n+EOysrLGXcPv9/O///f/w+vNZnDQRn9XJ7PzMslKMxAINfGNm6/ng1276RAMlJQvIR4PkJEh5667\nbubXv36b7OylyGRy2traOXmyD1HU4fHUs2hRFcuXV3LXXbefo5DYbDZaW1oIB4MUl5VRXFw8qX2n\nvsi422w2Nmx4Dbdbg1qd+Obq9T6efPLBy7KgCoVCyGSyT015bm5u5rXXPiQS0RKLRZHL/Xz1q3dQ\nVVWFJEn4/X4EQeBkYyPHDxxg3/btVKSns3zePNRKJcFwmCMDAyy+6y66uwd5992PGRsTmTfvOtzO\nAFaLhZh/jKMdLVRUzqaqugRP6yGUPhd6Q5xWtxxDzmLkCiX6QACTXo8v6EEh76a2PJfDo6Pc88gj\nVMycecH3bjJJKiOXQCAQoL+/H5lMRkFBwSVNyhs3vkFHh0R2dgkOxzCdJ/bgGOhErQ3x8FPfYPW6\ndRe1rEiSRCgUYuvmzXQePYpaFAkKAvNWrKCguJh/+uu/pufIEXJSUjAYDAhGI4tuu42v/9Ef8dpr\nb/POO+14vTJAwGhUM3fuTMJhN4sWGcY1hPrBD36CwTAfpTIR73HkwEFioyOEI0PcUJNCSXY2FoeD\nsjVruHHt2i/9LL8oEz0pdXcnLAHd3XCRUgvXHE4nlJQkao5MQcD8BZksZQSgqamZ99//BJcrgiBE\nWbiwknXr1lxSLFR7ezsbfvxjRjs7kclk5M+axRPPPMPhffvwNTYy81SaoyRJNFosRAsLCdhsyAMB\nEASCSiWLV6+moqLighOnzWbjxz9+jfz8RK+Crs4uehqOk6pSMeZtZNWCYqTMTG594AGsVivHDh7E\nNTREJBTiUGMPi1c9gSiq2LbtEGZzCaIow+k8zOrVa7DZWnjiiZu/VIXMieaLjPuvf/0yfX1qsrL+\nsNizWvvJzvby5JNfnWgRL5lQKETfqUJhBQUFqFQqWlpa2LV5M36HA+RyZl9/PStWr8btdvPOK6/g\nHxxEIYr4ZTKWrlvHkuuvB+Cll15nYECL3eZm8MQJUnU6YrE4/dZ2guE+RHNuYuEbsJKiVtDnSeWm\n27+BwWDg6J49pMTjaFRKhux1LFs+h9u+/vVpM+6fpoxMh2yaaYFGo/lcAxYKhWhu7iUvbzkej5Om\nve9QIlcxK38GVlsbo/v38/roKI9+85vnRCf7fD52bd9O8+HDNJ08SWY8zs3LlpGi1xOJRjm6dSua\nu+/mV6++ytGjR6nbvRuZKLJo2TJqZs9GoVBQUVHCjBlesrMTJkC1Wo0gCPT29pKXN/4eZs4soaFh\ngJycMgKBAI6hQcSAH0v/MQow09vcjNpsxrFjBytXr56yaOqJ5t/+Db71raQicjZmc6JfzcaN8KdX\nXrmYL0119UyqqirxeDyoVKpLDsh2uVxs3riRGwsLSZ89Gykep76xkb987DECPh+1paWkqNXkZ2Qg\nCAJVeXnsHxzkW3/1V4yOjrJ35078ra00bdnCkc2byayo4M4HHjjHvZDomholHA6iVKopLSvFYDTQ\n2XoSSa6h+o47qJ0/H7VazcfvvovOamV2Xh4C4Gru4MjHG8mqWoUo6hFFGaGQB7VaQKczEAzm09DQ\nMm0mpS+Cz+ejo2OYgoLl52xPT8+ju3sPHo/nogXeLjcqleqcbK+uri4+fPFFatLTMRUWEo5EaN65\nkw+8Xu687z4ef/pphoaGqK+vp+vECXb9/vecPHKEpWvXUllZTHNzA53NvYTsduz9/ciB0UA3t95Q\nTGNPDw9ffz01ZXfgDQT4xe/303rkMItXr2HJjTdi6elhsL+L/DlVPPTMM2RfIfUMro5ZZwqQJInT\nSn1fdyM5gPlU9LUoyqjIzcXf10dvb++ZYyKRCK/++tc46uqYazRi9vkoCIc5uncv4XAYhVzOrJwc\nDn3yCYIgsHDhQp7+7nf51rPPMn/BgjMBsjU1szCbgzgc/ahUSuLxGAMD7WRkSFRWVp4vKitWXI9c\nPsLgYAcezxij1kEGeg9zS5GJBRkZLExNRe9y0dTQQDQ6KSVfLjsWC7z+Onzve1MtyfTj8cfhhRem\nWoqpQxRFjEbj58oMO9nYSFo0SrrRCEBbayvujg5mxOMUiiJ5wSD79uxhwGYDQCGXI8RiSJLEYH8/\n3uZmlhcWMr+ggKWFhcQ6O/ng7XMrGiiVStasWURfXz1+vwcAtVpORo6Mv/6777Fs+XJ0Oh1dXV1E\nhoaYkZeHTBQRRZEV1y3AEBihu+MQ0WgQj2cYr7eZOXNqr5rFxZXEgR07KDcYMJ2K31MqFMwpKqKr\nvh6Hw5FYPHZ30717NzUqFauLisgPBvnwN79BoZCTlRWntaUOud9NplJALtm5LltkpLMTldtN/imz\npl6jYWFFNlFPH53treh0OnLyMimvMvFnf/70FaOIQFIZ+cKo1WoqK/OxWi34naOknEoX9PndmExq\nNFotKYJwTupsR0cH0YEBqgoKiMZiaGQyskwmFIEAAwMDQOLlCnm9n6oUaDQannrqK1RXqxgY2M3Q\n0F7mzdPzxBMPnUlRPJu0tDSefvprLFpkIh5vw+msY0F2hMqzunimKhTIo1E8Hs9EPaIp5V//Ff7o\nj+AKrM922VmzBux2OHRoqiW5cnCMjBD1++nv72d4eBhLaytFaWlk6PVEJAmtXE65RkNDU1Nif7cb\nfXo6Go2GI7t2UZ2bi+yUUiAIApV5efSdPMnY2Ng511m69DoefHApkUgzvb07EIROvva1tcydO+fM\nPnabjZTzFIyMjAxuWrGQvIwwsdhR0tK8rFy5nJycklNprgPU1FRc5qd0edHpdJSUZGKzDZyz3W4f\npKgofcqsIhdipL//jOJ6GkEQ0J+aE8LhMAc//pja/HwMp1z5aQYDs9LTqduxgzvuWE1Zhget2IUo\ndFFidlFo1KIVRSIezzlJCktnV7FsjgGn7QAWyw5SUkZ48sk7KSoqmtR7/rIk3TRfgttuW8Mvf/kK\nvoiPUecoYbUGudxPbe2CxIpobIxCr5dIJIJCoWBkcBDzKetGilZLCIjEYuhVKlx2O5SU4PR4MGZk\nMDo6Sv3BgzitVvJKS6lduBCz2Xzm2ikpKdx//53ce28c4DMj4M1mMyk6FSkhO3NS5HhHR/lwbIx5\nZWXo5XLs4TAVM2bg9/uvyAqrZ9PaCps2XZul3y8FmQyeeQZ+9CN46aWplubSkCTpTPGn7Ozsy1Lv\nJhaL0dTURNORI8RiMarmzWP2nDlngs9tBw4w02RizO/HZrdTYjTiisWYOWcO/RYL6SoVVquVfquV\nbr+f2x97jHg8TsjvR3teHQdBEFDLZAQCAfR6PTKZDEEQEASBBQvmM39+LdFo9IKxayazGW88Pv4Z\nKRTc9/D9iEoN27Y1EAz6GB7uIRAYZsGC/CvaRXOa9etvYsOGTVgsTjQaM4HAGFqthzvvnF7dL9Oy\ns3G6XGSYTGe2SZKELx7HaDQyNjaGMhpFrVQiSVKi/ocoYk5JwWuxEAqFWLV4AT1HjjA82E+KR06f\ndZRWvx9Bo0F+1nshE0Wy01P543vvZtnKlZ8rCSEajdLY2EhLfT0AVbW11NTUTHiPokshqYx8CdLS\n0vj2tx9n586dvPvCbyjM0jFzxjy8Xi+vvfEGVlFE/+GHNOzZw83334/BZKLnlMVDpVAwo7KSkydP\nYpIksnU6HG43TXY7ZUuXsumnP6VApSJHp2N4925+e+AAD3/rW0iSxNatu2hu7kGplHPddXNYtWrZ\nZyojnZ2dHP/wQ5YWFqKeM4fY8DBjTif7OjtZsXgxsysqaA2Hp0Xxmy/Ld78L3/8+TINs5WnLU08l\nmuYNDcGpyurTFqvVyjsvv0xodBSFIOCXy1l2660sXLz4sw++RCRJ4r0332ToyBGKzGZkosiR11+n\npaEBuVxOuUxGPDeXaDBISVoaDouFA+3t6EtLWTRnDu6iIhpPnMAZiRAuLOTelSvPZNRl5ucz6nSS\nedZiIhgO0+9ysWnT+4yOjqHXq1mxYgHXXbcEURQRBOGik0pZWRm70tPpGR6mKCvRQNDmcjEsSaxZ\ntIj09HQqKso4ebKVaDTKzJm3UFJSclW4azIzM/nOdx7jxImTDA/byM6eSU1N9bhyBlPNklWr2PLC\nC2hUKvQaDbF4nOb+fvKqq0lPT8fn8+GJRNjd0ERTr41YLE5prpm5ZfkoNBpyc3OxuFz0DwxQEg4j\n+P0ICgVLcnPZHwpR19zMvPJy5DIZ/VYrDpWKOxYt+lyKSDwe561XX8Vx4gSFp97Ng6+8QntNDfd+\n5SuTXgAvmU0zQXR2drLtnXdwDw9z9OBBZuTmsvq669BrNLh9Po47ndz71FO8/eKLzFAoyDSbkSSJ\nuqYm9pw8ycy5c8kpKuK61av55N13ma3ToT+r26RlZARXVhZ9I0GggIyMfGKxCIODbZSXK3nssa98\nalreW6++itjRQX5GBg6Hg6O7dpGpVDIQDJJVU0NIJmPubbexfOXKSXhaF2Yisipefx3+9m+hoeHa\n6877efmTP0kobP/4j1Mrx6eNezQa5Zc/+hG5oRB5pxTl06mQtz/1FGUTVMCpp6eH959/niXn1ePY\n09rKqM/H3fPm4Q+FONrUxEB/PwP9/TgiEf7iiScwGwxEolGOWCxcd//9LDiv1G93dzfv/PKXlOv1\nZJrNuH0+9rS10R1IYVbNWkymDIJBH4ODJ1m1aga33PLZGW1Op5Mt77zDSEcHIqBNT+eme+65okzz\nk5lFNRUcq69nzwcfIAUCRAWBGbW1rL31VtRqNfF4nGef+V/0NIwxs6AKmSjH7hllxH2SZ77/DAsW\nLeKbDz2EqbOT2tRUlDIZDr+f3nCYitWriRcWIobDxKJRimfOZMVNN32uZoCQyA7b+qtfsfislG5J\nkqjr6eGmJ564LJa0ZDbNJFBWVkbpd7/Lto8/JgVYeFZktUGnI8vppKu9nfsef5zNmzbR1deHIEmQ\nm8s/fec7FBYWolAoGBoaQhYIoD/PQpGfkcHmHbvJLr+FgoLTaW1KcnNn0tFxGIvF8qkfoqDPR+qp\n2Tk1NZX5K1bQ1drKcEcHgXichx59lNlz5lz0+CuBgYGE++Hdd5OKyKXw3e8metZ873twljV5WtHT\n04PgcJB31rutViopSUmh/sCBiVNGurpIVyjGKfRpajUdvb2Iooheo2HFggXEamsJh8O8uH079VYr\nKS4XQVFk/i23MH/BgnHnLikp4Z5vfpNdH35IS18f5sxMpOxSqtSzzvSLUqt1FBXNZ+/efdxww5LP\njH8wm8185bHH8Hg8RKNRTCbTpNYISfLZzKutZfacObhcLtRq9TmZU93d3WhSSiicrabHYkEpioRR\nkZK3EEQ5x44coSo9HXk4jNXvJxwMkmI0UqJWI5MkZs6ezc233048Hv/CLpXu9nayNJpz3htBEMjS\naOhqa5t0t15SGZlABEFAlCTSLlBbRKdW4xkbIy8vj6eefZbR0VHi8TiZmZnnmMPkcjmRC/iDI7EY\nY94wVaYs4vE4He3t9HV0EI9ECMTcHFt87FOVkbJZszj57rtngqpSU1MxLVlCKCeH+//08dpH0AAA\nIABJREFUT8nNzZ2AJzB1eL1w113w7LOwZMlUS3NlUFGR6GT8H/8B/+f/TLU0F8bv96O+wCSr12iw\nOhwTdh2VSnXBvzulXI7caGTM6z2TGSETRTyBAEtvvpm7H3kEn89HamoqmrMsmWczMDDAnq1bGbVY\nkMnlFJSV0T5wnNzcczMdZDI5oMPhcFxyMOZ0CtpMMh6ZTHbBDrijo1aUylTK51cRmDmTYDAESJxs\nOMqG//oFJmUMMRDApFKx6KwCmjaXiy6Xi5tnzEA8lUn1RVGqVERisXHbI7EYqinoQTZlTkRBEL4u\nCMJOQRAOCILwxFTJMdHkFBTgCIXGbbf5fOSXJBruCYJAVlYWOTk54/xyGRkZGPLz6bdaz9neNjjI\nrHk1+P1uTp44wfDJkxRptVSkppEScXNw82Z6enouKtfsOXOQcnI40dvLmNeLdWyMQz09lC5efMUr\nIi4X3HknzJ2biBVJcun8/d/Df/0XjI5OtSQXJiMjA1c8Ps6cPzI2RsEFuvh+USqqqrBJEoGz/nbD\nkQjDkQh3PfooJ+x2LCMjePx+uoeHafX5uPH22zGZTOTl5V1UEbFarbz+i19gtFpZWVDAdRkZDO7d\ny6ClC6/33EyaRLmAwLSLf0gy8RiNBiQpACSyI3U6LQ2HDhEY7GVJQTY3zZhBittN59gYDaOjjAUC\nuIJBjttsZM6dOyFWi5k1NQxHowTD4TPbQpEIw9EoVbNmfcqRl4epjGjaKEnSSmAp8PQUyjGhzJgx\nA7KyONTcjC8QIBSJ0NLXRywjg5nV1Zd0jvUPPsiQWs2R3l6aens50NODsrycRx97BJernf62ZgrS\n0lDI5Di9VrLTJK4rLGT/9u0XPadGo+ErTz7JjFtvxaJSYU9NZdkjj3DbnXdO1K1PCceOwdKlMGsW\nPP88JC3Vn4+SEnjyyYRFaTqSk5ND4bx51Pf04A0EiMZi9AwPM6pQsHACe2ykpaVx4/33c8hq5YTF\nwomeHg4ODbHwtttYtWoVDzz9NGJVFV2iiHr2bL7yzDOXFJ9x+MABcgSBnLQ0BEFAqVAwu6iIHC10\ndh4iEklMBJIkMTDQSlVVLgaDAavVit/vn7D7SzK9KCsrw2gMn0lTHh4aIuayY9B6mFWcT35+Prk5\nOeQoFEjZ2fQqFBzy+UhfuZI/+bM/+9Tg0mg0is1mw+v1fqoMWVlZLLvrLg4ND3Oit5cTvb3UDQ2x\n9M47p6Q+yZQHsAqCoAG2nFJMzt5+RQWwQqJ/xebNW6mra2Kwrw/vSDelZQXcdOd6lq5Y8blMqpFI\nhK6uLnw+H2lpaRQWFiIIAh9++BH/82//TZo2E0mKkmmWsXZBNXqNhr0jI3zvMnc6vpx8noC2kRH4\n4Q8Tqan/9/8mOtImFZEvht+fsCr9y78kevlMNp817tFolLqDBzm2dy9Bv5/S6mqWrV59WTK/PB4P\nXV1dSJJEUVHROen0X4Rf/eQnlESjZ2pJnOZEby/xsgosFifxuIZ4PEB1dQG5uens2nWcSESGIIRZ\nuLCSW25ZO64j+NXA1R7A+llYrVZee+33DA156WzrRD7Wx93L5lJ8ShEIBoPsraujB6iaOZOaxYu5\nftmyi1rhAI4dO87mzbsJBECSosyZU8Qdd6z71EaCLpfrjFW9uLgY43n1USaSadubRhCEvwf+CPhb\nSZJ+c97vrjhl5OWX36CpyUdeXtWpcswBBgaO8uija5g1QWYvq9XKb//936k2m1HI5Wf82Ha3mwGN\nhsefeWZCrjMVXMrHaWAgUR9jwwZ45BH4m7+Z/qmpVwJHj8K6dfDeezDZTT2v5knprVdfRWhvp+C8\nTIdDvb3c/OST5Obm4nA40Gq1dHV189pre8nPr0WpVBOLRenvb2LBgnTuvXf9FN3B5eNqHvdLRZIk\nbDYbB/bvx3HwIDXnWdsaLRZmrl/P4ksIhOvo6GDDhvfJzp6HRqMnHo8zONhGSYnA448/crlu4XPx\nacrIZXfTCIKQJQjCjvP+vQwgSdI/AmXAU4IgjHOUPvfcc2f+ffLJJ5db1C+F3W6nsbGP/PxqRDFh\nQlOpNKSnV7FjR92EXScjI4P86mpsHg+GU9puMBym1WZj8apVE3ad6UZdXUL5mD0bgkE4fhx+8pOk\nIjJRzJ8PL74I69fDr34FF4jlTPIFWLh0KT1+P55TLhdJkugZGUGelUVRURFqtZrc3FxMJhM7dtSR\nlVV9pqGlTCanoGAW9fWduN3uqbyNJJcJQRDIyMhg5apVuFQqrGdV5B1xOnGp1VRf4kJ2165DGI3l\naDSJqVQURfLzq+josDM8PHxZ5J9ILns2jSRJI8CN528XBEEpSVIYiABxYJy29NwV5HLweDyIom5c\nep1eb2ZwsGFCr7X+/vvZ8t577D1xAiUQVSi47p57Jsz6Ml3weOCNN+DnP4fhYfj2t+GnP52+aahX\nOrfeClu3JuqPPPccLF+eaKzndoPNBlZr4p/dDgYDFBQkMpeWL4fVq6dPF+DpREFBATd/9atsf+cd\nBLudiCSRWVbGA/fee47fPxaL4XC4KSo610QuijIEQYPH48FgMEy2+EkmCYPBwL1PPMEHr79Ou8WC\nBOiysrj/vvsuOaB5eNiO0Tg+jkkm0+N2u6d9n5qpTO39viAIqwAV8IokSVd0UxSz2Ywk+YjHY2cs\nIwAul438/KwJvZZGo+GeBx/Ec+ut+P1+zGbzVeVT9njg6acTLoOVK+Gv/iqxYp/kgoDXJPPmwb59\n0NSUcN2MjSW6HqenJwqkZWYmlA6PB7q74cCBRNO9J56Aykq46SZYuzZRv+QCbZKuSapnzaKyqgq7\n3Y5CobhgHIpMJiMnJw23247B8Id2DLFYFPBjSmrgVz35+fk89eyz2Gw2BEEg7VTQ86VSUJBFX5+N\n9PS8M9skSSIadX3p2KfJYMoDWC/GlRgz8s4773PgwCB5eQlTq8fjxGZr5Kmn1k9YcaarmdM+ZElK\nxITcdVeypPuVQjicUEy2bk38O3kykeVUXJywrigUIEmc6XS9YkUiRgWSsQOnaWlp4Te/+ZCMjBr0\nehPhcJD+/kZWrizl1ltvmmrxJpzkuE8sfX19/M//vIHJNBOjMZ1IJMzAQDNz55p46KF7p1o8YBoH\nsH4agiBMT8GSJEmSJEmSJF+IK7Ic/HRVlKY7wWCQH/1oA7FYwRmTndttx+Np5jvf+dq07co7GSul\ngYEBfvrTTaSnz0WnMyBJEiMjPZhMTp555olJbw6VJLlCvlaZiHGXJIkNGzbS1yeQm1uBIAin+vzU\n89RTt1M+gYXxknx5Ps3tdOW3cUwyjra2Ntxu9Tm+Q4MhDUnKpL5+YoNprzT27z+CWl2ITpcIBhQE\ngezsEkZGYp9awTZJkiTTj+HhYbq7neTlVZ6Z6NRqHWbzDHbtOjTF0iX5PCSVkasQh2MMuXx8BLZW\na2B01DkFEk0fRkYc6HTji/oIgg6P54qOoU6S5JrD7XYjiuMLeul0RkZG7FMgUZIvSlIZuQrJysog\nGnWN2+7zOSgsnNjMniuN4uIc3G7buO2S5L5gQ6skSZJMX1JTU4nH3ePcPS6XjaKiK7vn1rVGUhm5\nCikvLyc7W0Z/fyuxWJR4PM7ISC9arZu5c+dMtXhTypIlC4BhbLZBJEkiEgljsTRSXm6moKBgqsVL\nkiTJ5yAjI4O5c4vp7T1OOBwEwOkcJRDoZuXKZPvuK4lpnU0zXWW7EvB4PHz88U7q69uIx+NUVRWx\nbt0qMqZxruxkBTIODg7ywQc76O4eRi4XWbSomjVrVqKegrbZSZIBrNcqEzXukUiEnTv3sG9fA+Fw\njLy8NG69dSXFxcVfXsgkE8oVm9o7XWW7kohGo0iShEKhmGpRPpPJnpTC4TAymSyZQTPFXAvKSDQK\nLS1QVQXyaZ3DOHlM9LjHYjGi0SiqZLW9aUtSGUlyRXAtTEpJxnO1j/vAQKIyrdebaGWwdStkXduh\nW8DVP+5JxjOljfKSJEmS5FolHodHH4UHHwSLJdH/54kn/lCJNkmSJAmmTBkRBGGWIAh7BUHYJQjC\nz6ZKjiRJkiS5XGzalGg0+Hd/l/j5n/8Z2tpg586plStJkunGVFpGWiVJukGSpBWAShCE2imUJUmS\nJEkmFEmCf/3XRAfk02FJCgV8//vwwx9OqWhJkkw7pkwZkSQpetaPGmBsqmRJkiRJkolmzx6IROD2\n28/d/vDDcPAg9PdPjVxJkkxHpjRmRBCEOwVBOAEEJUnqnkpZkiRJkmQiee01eOQROL8dh1YLDzwA\nL744NXIlSTIdmRbZNIIg/Bh4T5KkrWdtk/7hH/7hzD6rVq1i1apVUyBdkskiGV1/bXI1jns8Dvn5\n8MknUFEx/ve7dsGzz0J9/aSLNm24Gsc9yafzadk0U5bxLgiCUpKk8Kkf3YDy/H2ee+65SZUpSZIk\nSSaC/fshPf3CigjA0qUJN43FAoWFkytbkiTTkal009wiCMIngiDsBPKBD6ZQliRJkiSZMDZtgvvv\nv/jv5fJELMm7706eTEmSTGemhZvmQiSLnl17JM221yZX27jH41BUBB99BDNnXny/N9+En/0sUQTt\nWuRqG/ckn02y6FmSJEmSTBIHD4LB8OmKCCSqsh44AD7f5MiVJMl0JqmMXKW4XC7cbvdUi3HNIkkS\nY2NjeDyeqRYlySTz+uuJbJnPIiUF5s9PBLMmSXI+8Xgcp9OJ7xrRVpMtmyaR0xOUIAiYTKbLco2R\nkRE+fPttnH19SJJEamEh6+6+m6xkM4xJw2Kx8NHbb+MfHSUO5MyYwbq77vrUMfd4PEQiEUwmE6KY\nXCNcqUhSQhl5//1L2//mmxPunFtvvbxyJZk+uFwuYrEYZrMZ4fy871O0tray/d13ibhcxASB4tmz\nufn229HpdJMs7eSRjBmZJAYHB/no7bdxDw4iAab8fG65554JVRK8Xi8v/OhHFAsCuenpAAzYbPQJ\nAo995zuf+SI7nU5isRhpaWkX/SO5nFwNPmS73c5LP/4xVXo96UYjkiTRPTxMv1zOI089RWZm5jnP\n1uVyseWddxhqa0MmCChMJtbedRfl5eVTeBeTy9Uw7qepq4Ovfx2am8fXF7kQhw7BY4/ByZOXXbRp\nx9U07peCw+Fgy9tvM9rZiUwUUaWmsu7eeykqKjpnv76+Pt742c+YnZ6OSa8nFo/TPjhIvKCArz31\n1AW/zZIk4XA4EAThU5WcqSbZtXcSaGpq4sD27TiGh8nIy+P6NWuoOJXX5/F4eOE//5MyhYLs1FQA\nBm02LJeoJFwqdQcO0Pz731NzXq7gCYuFWXfeyaLFiy94nM1m4403NtPX5wREUlNV3HvvzRQXF0+I\nXJfKlf5xcrlc/PTf/532nTtJNxopKykhKz2dPY1dNPePUVBTw+zZJdx3361kZ2cTi8V44b//G6PT\nSUl2NoIg4PR4ODk2xoNPP01OTs5U39KkcKWP+9n85V+CSgX/9E+Xtn8sBpmZcPx4oi7JtcTVNO4X\nwuFwsHfHDtqOH0eUyxkcHGRRVhalubkIgoDN5aLZ4+Fr3/kO6acWjwBvvvwysq4u8jMyzjnfwd5e\n1v/xH1NQUHDO9v7+ft54YwtWqx9JksjLM3LvvbeQnZ09Kff5eUgGsF5mjh4+zMcvvkh+MMiqggKy\nvV4+eOEFmk4tdxpPnMAUCp1RRABy09PR+Xw0NzVd8nWCwSBNTU0cO3aM0dHRcb+3j45i0mjGbTeq\nVDgusD9AKBTihRc2YbMZKSxcRmHhUuLxYl544R3sdvsly3at4/f72fiLX+A/doxlZjO1Gg3DJ0/y\n401bCYYKyDHWYDLOxuPJZMOGTfh8Prq7u4mNjFCak3NmJWNOSSFPoaC+ru6C14nFYnR0dFBfX4/F\nYrmqP+ZXGpL02Sm95yOTwdq1125GzdWKx+Ph5eefJ9DYyLKcHNICAex1dbQdPYrDbkeSJNKNRnJE\nkeNHjpxzrG14GJNeP+6cWkEYFwfocrnYsOENQqF8CgtvoKhoGS5XOi+88AZ+v/+y3uNEk4wZ+ZJE\no1H2ffQR83Jz0anVAKQbjcyWy9m1ZQszq6txjIxgvICSkKJU4rBaL+k6vb29vPjiOwSDWkAB7OT6\n6yu5/fZ1Zyay9OxsTtbVcf4CaywUougiq+y2tjbGxhQUFf3hKIMhDY8ni/r6BtauvfGS5LvWaWxo\nQDM2xszSUlzt7Zh0OuSCjIhXRywuIyhF0Ol1pKZmYbFYaWpqRiYT0V7gXGa9nsHh4XHbnU4nL774\nOqOjcRLtnDyUl5v5ylfuRX3q3UsydRw9mmiEN2fO5zvudNzI449fHrmSTD7Hjh7F6PNRVlCAx+/n\nw4PHibuhzWXH6amjqCiNRYtqMel02M/7W8/Kz8fR0oL+vDnDK0nj4s4aGhqJxdIxmf5gRUlLy8Fi\nsdHc3MKCBfMv301OMEnLyJfE7XZDIHBGETmNUacjNDaGz+cjMy8PZyAw7lhXKETmJZjiQ6EQv/3t\nO2i11RQV1VJUVENBwfXs2dPJybOczbNqavClpGAZGUGSJOx2O5u37WT7iTa6uvuwWq3jVtJOpwu5\nPGXcNbVaI8PDScvIpdLf1UWGXk9BYSFeUcTt9+MKRtHLNfRbrYgGA+np6fh8Pnp6RnnhhdfYvfsg\n/WPj+0Pa3G4yL2Czf/PNzbjdqRQVLaSoaBZFRdfR2Rlh+/ZkOsZ04LRV5PO662+6CT7+OFGfJMmV\nw8WskpIkUbd7N71tXXyyYx8bN+8gEssBdRomjRmNJgurNU5razs2j2fc3/qiG26gNxTC5nIBEI3F\naOrrI7W8nNzc3HP2HR11oFaP/34rlSnYbM4JutPJIWkZuQinJ/NYLEZGRsZFMxw0Gg0REi+M/HSf\ncCAUiSDJ5ahUKqpnzaJuxw46+vsJO50M9PZi9XqJl5Rw53n+vwvR1dWFxeKFSAvxWIyMvDy0Wi3B\noJYPPtjBrFmzEAQBrVbLQ08+ydbf/563Dhyg4UQnKVkzqV60mj17+vnNb/6CkpICioryWLVqMfPn\n15KZmU40Ot5V5PXaKSws/qKPb8qJRCKcaGig5dgxZDIZ1QsWUF1djeysMZooPB4P7R0dtGzbhkIm\nwy+B3eZn1DGG1e+jpriSBddfj8/nY9euwzidIyxZUoXbnc7Rrk9Qhg+zbH4twUCAYYeDIVFk9Xnx\nPU6nk+5uGwUFN5yzPSengoMH97Nu3ZrLcm9JLo3TWTSvvfb5jy0sTJSOr6+HBQsmXrYkX5xIJILd\nbketVp+xSjQ2NvL66x/Q3z9CYWEO9913C7NmzTpzzNat2zlUbyHbLWDUamjq7CfNkI2k1BJ3OzHJ\nRAwpmdQ3NVK6YjG3nxr0xsZGDu7YgdNqRa7VctTjQeNyIYkiFQsWsHrdunGBqfn5WdTXNwF552wP\nhcZISyvi8KFDtB0/jlyppGbhQqqqqqZttl5SGbkAIyMjvP76ZoaGPAiCDINB5L771lFaWjpuX41G\nQ+WiRTQfPMisggJEUSQej3O4owNdZSUtLS2UlJRw3ze+wT/95V9ib2lBlMnIyMykXK/n7Y0befRb\n30KlUl1QFkmS2LZlC32NLZRnVxKNxdh+uIGgzEB6ZhZNTa2Yzb9j+fJFDPT2IogiN6xeTVuvg+Wl\n92AypWO19tPa2ockzcbpVFNSUsmmTfvxeLwsW7aU7Oy99Pe3kZNTiijKsFr7UaudzJt35+V+1JeF\naDTKppdewtfWRmFqKrF4nD2/+x1dCxdy5333TWikeSAQYMNPfkLL7t20tXQiSBqsITNxTQ5l+XNx\nWbuw9A/RcKwejy+C1+snO1tLSUkNCoWSG9Y8Rd2+33D43c343CGUxlQq5tXgcDjIOCuALRwOIwiK\ncbLL5Qqi0TjxeDypjEwhR46AKEJt7Rc7/rSrJqmMTB8OHTrCK6+8h9sdBqIUFKSSmprC66/vRyab\ngUpVTHe3jQMHfsw//MMTLFmyBIfDwc6dJ5i7cD2tu95Cj4BWayIaUaFOyWM0VYMsFkIcsxJUa/ne\nY49hMpmoO3iQXS+/jCkaJQOIKRTEFApWPfIIM2fOvOj8UFMzi08+OcTISA8ZGYWAxPBwNyZTiIa6\nOuL9/RSYzURjMXa8+CJd113HHXffPZmP8ZKZykZ5S4B/B+LAIUmSvjdVspxNMBjk179+nXi8iMLC\nhPPX43Hym9+8x7e//dVzop5Ps2bdOraEQuw9dgytKNJo6WfEryIj4qO+4QNSU0Vqa0sRAhGU5ipE\nwYzNE6DvQAvmxkbiKhUGYyr7dh3AP2ajsqqMm9avZ+68efT39+OzWMgxgkIG9S3H8TucxAU5dnGM\nVTcuYdvWNg5ueZ+baiqRJIkD771Hl13ihhUrAWhqakCjKUerTcNub0Op1FJQMJ8dOw6yZMkiHnvs\nQbZu/YRjx/YSj0tUVORzyy0PYTAYJvXZTxQtLS1429tZUFJyZlumycSBo0exLFo0LpXuy/C7l17i\n5z9/C6/bTCy+CJe/h1RljJSIgoY+F2V5+cRCY9Tt/B0utxtTTiHl5Wvx+z3Y7YMM9DTS3GKhdtEd\nLFm3BJu1n97mg3z7sWeonFlIXk4OReXlLF6xAq02jt/vQav9g1nWbh+krCwPhUIxYfeU5PPzyivw\n0EOf30VzmltugR/8AL7//YmVK8kX48SJE/z93/+EWCybQCDM8HAX0agKv38MnS6dkhIRmUxJKKRi\neFjJP//zT9i0aS4DAwMIgom0tFyKFt1MR/0OXBE7sbgRMSiy9ta70Om0hMNBiopiFBUVEQ6H+f3G\njYwcPIjbZiPk8yETRQzp6QTlcub94Afj5PP7/TQ2NNDX2Ul5cToj9jEGBnoAgdmzS8nJrqHlgw+Y\nf/438NAhBhYtIi8vb9w5p5qptIz0ADdKkhQWBOElQRBqJElqnEJ5gERAp9utoajoD7EcKSlm3O5E\nQOdNN60ed4xKpeKuBx7AuXYtzc3NHH1xC1FvlO7uMSRJTne3ix3bPsaMkdkl8/B6PQwOdCOLp9E9\n2kTTf/wMTzibWUWVpGizqd/ZgrWji4H778VgMlGo16MsjfPWR2+jcASZqcnBH3Ljsg4x0pdKzKNA\nUsgozMxEIZeTaTBw6PAHOGePYjSm4XSOkZo6i1gshkwmIJOJiKKceFyNw+EgLy+Pe+9dz/r1EeLx\nOCqV6orO0uhsbib7vGh0QRDIUCjo6eqaMGXE7Xbz0/9+FYW4AJNGhyBTEheK8IXb0cpcaOQFmNRG\nSopnsKvhfQpNM/CHUmlv97D1o39GHQetqMfq8NOhOIJtoIs8MUBWXCQ2GiDiO4azxE6JXM577e3M\nXbaMvXuPoVYXotMZcbttiOIIt976ALFYjGAwiFqtTlpIJpl4HF59FbZs+eLnuPFGePhhsNshLW3i\nZEvyxfif//ktfn8eWVlVDA3twWhci883xujoEXJyFtHefhiVagSdrhSFopRjx7bw85//mjVrliFJ\nEQDy8meQlV1M1oxj7NixDX8oSl3dccJhH0qlndtuexaA0dFR9m3dSkUgQGkggFomwylJ9AwPs/3N\nN1n/8MPMOSsq2uVysfEXv0AzNkaGXo8vFCISiXDf/fcza9YsFAoFr7/0ErlG4zn3JIoiqaKIpbc3\nqYycjSRJI2f9GAGiUyXL2SQCOhN1P4JBHz09rQwPDxONBklJyb6gMnIas9nMyIidri4bWu0czOZM\nANzuMdo69pNv0FJVEGHIYsGsUiGXaem264nH45SozBCXkWbIwOoc5f1dTexp/AlzFs1BO9JF2GEn\nLzBEAAUyHKRqZBRlV9DX04IhtRIQicZiKORyDCkpVKQbaGupY8n1d6BWqwiHfXi9LsrKchFFEUmS\niMeD59Q4USgUtLS0sH/bNqyDg6RlZ3P9mjVUV1df1mc+0ag0GnzR8a9T5JSidTF8Ph/t7e0E/H5y\n8/IoLCy8qEtHkiQ2bdrEqFWOJiIixEOo1Qpi8QhyWTGjnn0U5FQQjUm0WPrQKNOpyqumsa2PlpOH\nCHpMmFVKDAYtRl0eHo8La8/7iJlZdPn8ZGpNpGt0mGMxnGNj1BQW0tXayp/8yQMcOHCUwcEeCgvl\nzJmzlI6OzlOpfFG0Wjlr1ixm8eJF07bw0dXGvn1gMsFZYQOfG7UaVq+GzZvh0UcnTrZrDY/Hw6FD\nR2hp6SUlRct1181jxowZl3x8MBhk8+aP2Lx5HxrN9Vite4hGdWi1atRqE7GYiN/vIRg0IpdLSJIS\niKFWp9DTEyYQCKDRBPB4nKSkmJHLFeTklBKL/Z709BkoFDrKyyvIzU1jy5YDzJw5k23bthEaHEQf\nj6OSy1FrtRQplQT8foZ8PvZ8+CGzZ89GEAT8fj///R//Qecnn2DW6yksLGRuZSVZwK733jvzrVZp\nNIQjkXH3F5UkFEolAwMDeL1e0tPTSZsm2u+Ux4wIgjAHyJAkqWWqZQHIzs4kEmkmGPSxa9eHBAJG\ntNpC7HYL9fX9fPLJLlatWnHR43t6LITDOjIzM5EkGBzow2uzIUa1DNtdnKivRwHIU1MJRaPYw5Cn\nM5BlMNBpHaapdz9uqxOloMYXirProw48Y82UqtzMlMuJht2EQlr8Kj3Dg0P0e+1Eh50srDq3ampV\n1QyCfi+9vYcwGBS0tu6ivHwuVVUzkCSJgYFWamoKzkkVazh+nB0vv8zM9HRmFxbi9Hj4+MUXCT34\nILXzr5wUseo5c3hr717yolEU8sQrHgiFsAG3VVVd8Jienh7effFFDOEwCuBIPE727Nnc/cADyOVy\nQqEQTqcTnU5HSkoK27Z9wvvvH0AQjQQFFRG/G7fPgyCIBCMQIUQ4EsCk03C4tZFso4nu/j6GHO04\nQyE0zMAT9KMVPAj6bILuMJqIQGVUwhONMuLoY1SbSoUmm66RERbNmoXfYsFsNlNTU0Fzcw+jo2p2\n7nydrq5RVqy4laKiUoJBH2+9lahRsmTJhYvctbW1Ub9/Pz63m+KqKhYsXozxvFULAokLAAAgAElE\nQVRUkkvn5ZcTLpovy/r18N57SWXki+JyuXj++Y14PAZMpnxcrgAbNnzAbbeNsGLFss88XpIkXnnl\nLZqbvRiNxcTjeQSDUZxOF3p9EKVSiUIhw+sdIhyW43A4CAbHCIXaSU2FlJRcWlp6+PrX7+a//uvX\n7N1rw+32M9TXgErKZYZeT5QwzoE+Cgvz8HpNbNz4Mnvee5c0uZxgKIRRknC53Sg1GuKxGDFg/9at\nRCMRZi1eTFdTE4M7drAyI4NBr5eje/bwyf793LJ6NaJSydDQEEVFRcyqreX3hw+Te1ZihS8YZDAS\nIbhnDxGrFa0o4o7HKV+wgFvuvBO5fGrVgSm9uiAIqcBPgAu2lXruuefO/H/VqlWsWrXqsstUVlZG\nXt5e9u37EJ/PQGpqGR6PA6NRy8KFK/j446PU1s696Mc7Ly+DSKQDAJdrDK/VRqpGg0urRSaLEJLL\n6RwcZDAYQZLLUGjDGLVmhj0uuvrrSA95mS3LJi6G8Pr7sHt16GSZeEJuIn43mkiMgfgAdlIJBdMJ\nRjLwBqFnUOC1HQdZUFmA3W6n2+3msb/4C0wmEy6Xi+bmNpqbBxkebiAe91Ndnc/dd992Ru54PM7e\nDz9kbnY2KdpE9YtUg4G5CgV7t2xhzty5yGSyxLm7u5EkiZKSkgvG0Ew1hYWFLLztNvZv2YJZkpCA\nMbmc1ffdR+pZhedOE4lEeO93v2OWXn+m2JAkSdQ3NHC0tJRoJELdtm2oolFCkkRmeTnHm63U1t7G\niRO/QtSmYvM40URjyGVxggyRovTjdR9myJaKOj5MkbKQ/qEeQuEociEDhTwLcCNTB/F6hkgTNUQk\nEa/bisvvJozE0Z4ACoWcgpoawqeyszweDxs3biEtrRalUs2hQ40IUjEff7ibZSsjpKWlYzSWsW1b\nHQsXLhjnstmzaxdHN2+mzGQiU6VicPduXjpyhK/+8R9ftn5JVzOBQMJFc17dqi/E7bfDn/85hMOg\nVH75811r7NtXh8djJD+/EgC93oTRmM7WrfuprZ1LSsr4FNizGRwcpL3dTlnZ9ZSW9nDy5CAgEg4P\nMjCgQafTUVychtvtxe/vIyXFRDTagF4vUly8lKNHG6ipmU8wGEQQtBQUzGbM6cFuGUBAj1qpwqTX\n4w8F2bV1KxDk6AcHMcshLIrYJAl7IIBKkggEgwwBgigijYwgGxpi98svM9rfT77BQEN/P+GxMYoA\nRzBIw65deNPSWDA4iF6vp7S0lDlr17J/xw7MkkQccCsUKFJSMI2NUX7KVR2Pxzl26BD709JYvnLl\n5Ryez2QqA1jlwEvA/5Ik6YLlQc9WRiYLuVzOY489xNGjf0s8rmBsrJ2cnDSqqxei0+mx2w0MDg5e\nVBlZvHgxRuPH2GwWnDYP0YCHbmsXStFLimwEly+GR9IgxbUYZAEMSg3DgWGcQy4KpQCpcgNaUUE0\nHkEdFokKLmRqMz5RT5fbxwylDnU0RCgikqbJYEwtkq2SIw/K+GBPF9bORgqNBrILCtj15pusfegh\namtrqa2txefz4XA40Ol04yZlr9dLxOMh5bxUY71Gg2Sz4fF4aG1pYd9773HaqLdXEFi0bh03LF9+\nOYbiS3HD8uVU19TQ09ODKIqUlpZe9GNksVhQBQKYMjKQJImRkRE6Oy3Yxtzsbv1/zCnIZWlFBSqF\ngng8zkf79tFpM3HzLauprZ3F7t170KrTiQtRvKFhMjW9rCk2MBQMMBYc5Ia8dHqcw/R6XWQaZkNg\ngEAsTKZWi0quR4o5MeijjHrG6AvJcIrZaAQjxAU2t7tYl+cho7+fmhUraGvrADLQaPS0tx1mqLOF\nHG01kTE7O199FXVqKjm5uUQVQwwNDZF/Vg0Dj8fDoY8/5vrCwjMWI4NOR1t/Pwf27OGWO+6YjKG5\nqnjjDVi4ECYiDCkrCyorE11816798ue71jh5spP09HNdynK5AklKfLMrKys/9Xin04kgJBYjS5bc\nQFfXBgYGYsTjIm53O5JkpqqqHKczQCTix2SqRKHQIIpqnM4xgsFGzOblvPfedrKyatHrTezdsYPq\nwnLa+jy0D4yysFJHJBrFbbGQmSFSlqKhNjub10ZHcbpclMtkaOVyRsJh3LEYBaEQpSoV3q4uOsbG\nyIhGCWRn02exMEOSUAgC+lgMS3c3w/39vP+rX1GXlUV6aSnr77+f2fPmYbFYkMlkmEwm3nr+ecrO\n+saLosjM3FyO7tnDshUrptS1O5WWkQeAhcD/d+oBfF+SpANTKM8ZdDodCxfOo7Iyi5SU1HNWl5IU\nQXmRZYvH48Hr9bJiRRUnTjgY6u9H8tspMkCGLsrcnELebBzFpDagMqgozszBpKlkx7GPkYsjaGJx\nIjEfUZSkaFQEIhEMMQmL145OJ0OjLaY94sURdBFQGAmlGClMzSEccCCqQeN1ojYbWbNuBampqfiC\nQT5+4w3KZ8xArVaj0+ku2gdHo9EQk8kIRyIoz8rMiMZiRAUBl8vF9k2bKNZq0arVZKemIkkSBz/4\ngOLS0mkZEGU2mzGbzZ+5XzQa5fQIt7d30NjYh1abgVymob1xL+neIB1qNSl6PTlpaVTm5fHJyTb8\nfj+rV99HNOCk9eBu4tEQFXkidy1aiEmt5nBHB0d8PhYsnIP9eCsxuxKZQoEuriAQaCcmFuIKKHGF\n/bhjI9QUptHnMWGW5RAOBvGGQqSmlLG/2UHtunyuW7aMjRs34XJFMRpdDDQdJFutQCbFUAR95Bu1\nROJxlNEwYtjJh++8wxNPP33mAzM0NIQhHj+jiJymICOD4ydPQlIZ+dz84hfw7W9P3PnuuSdRPC2p\njHx+1GoVoVAYtfrcb9ynfbPPxmAwIEmJEuoymZy0tEKMxhwcDht6vYfCwiw8nkGKi0WWLfsmH320\nGa/XjEIhA0aBOJs2vYdcnsqcOYkcbb/fj0rQoFQMMuLwEQhl0z80gCziJCfVSLqQglImY15BAdvd\nbnoFAVk0Sns8ToVazSyDgVyTiUyzGafXy6DNRkSr/f/Ze9Mgy67yTPdZez7zmPNUWVmTai6NJSGE\nLAaBEYMBmbERzbVN+7a5jW+4o319u23HdUSHHURHd1/7hruxAhBtSRYCJIQlNKDSrJKqpJqHrJwz\nT2aek3nmcZ893x9VFJIlhDAIYdDzK/NErL3OWStzn3ev7/vej4jrYigKEufNMwPPYzQSIWRZvG1k\nhPlcju/ccQe3fOELF3NCVlZW0IR4heAwNA2r03nT7QHezATWO4E736z5fxLj4/18/esPMTCwlYGB\nMZLJHhqNMrGYw+hLGtE1m02OHz/Bk48/w9q5U2zt7SGtqqSUNQTncPwKSSPKlcN9VEyH4cQE3ZrF\n+Mad6JEIeirFFusKIh2Z/naD/GKeTuDhBTIELk3PoUWVy3sm0DsBppslZ9bwVR1NgYWFwwR4CC2K\n7rpEwvGLpx4RwyDquuRyuZ+YxKWqKruvvprTTzzB7tFR5At+KadzOS655hru/da3mD10iCAexwkC\nLMPguv376dc0Jk+detPEiOu6TE5OcubMDLqusXv3JYy/pJztx9FqtThz5izVaoN0Ok4lCMjl8zz4\n9BGE2kNWMrE8Fw+Z06enWTt3inRvFiWb5bprrkERZZ5++gG6XZtm0yGqOFw/mGDrpkGSF9x4fcMg\nZNts2rqVDZs2UftfD+E36wzFksw2HWx7hpZlYRglDENQdQRGOIIeCiH8OGOhEGMDAyw7DdRQjP/6\nX7/G8nKV06eLHDt2jlGvyeb+DIfOnCVBiHDIwJcF8yunuOX9u+murrK6unpxbzRNw36VKinLcTDC\nr2ZM/xavxdQUTE7CB3+Odjyf+MR5r5G//uu3QjU/jnq9zrFjJ1hdLTEwkGHv3t0kk0muvnoPd9/9\nPL4/wdpaDtd1CYUMEgn3Fc3lXo2RkRFGRiKsrExjmiaGMUQkMoCue2zefAm+L0ilehBiActq09u7\nmaGhLL7v0WxmKBRUjhxp4vtLrK1FyaQiVAsFpFIJRUCxcpSHDy/QajUZ0UwmYvuRRIzlSoWoJLE7\nmyU+OEi1VqNZqbA3GiWuafgXkvF39fdzeGEBv16nJ5FAkmUWSyVM26YvHGa52WRyeZn3CsHGgQGe\nnp3lwe9/H6fTIZ5KsXnbNmxVpWvbGC/54ypUKgxt3PimV+G96Qmsv2wEQcBDDz3KE09MAikOH57E\ndQ+SyQg2bRrg93//sxc9HdbX17n11m+yvh5QOP4iGw2DYm2ZK67cScP1ea7RJJrZTrnr8tBMFUOu\nM7dmouhJ4o5DuNulurSEHgqRifSy1miihAaRbAcvaFN1Tea9BkPpHsJSCFd3KLbKBNEsQWcOc2mZ\nfqHiShL59jIN2WNtvsPCwsLFjrs+vObRm+d5NBoNDMPguhtuoNvt8szhw0QlibbvM3H55QyOjvLo\nN77BZfE4fRdOGqqmyRMHD7Jvzx4c26bZbHLsyBFy09PE02n2XHHF67oB/Cy4rssdd3ybM2eqxOOD\neF6X5577R975zl28613X/9hxKysrfPWr36bbjaPrMbrdOQqFGvcemcSqRIhHY5xeLdGRSxjlIptk\nlVDgM2RZlBYXebDTwXQ1OsUuodAwitJLrnOGF9QKSdfl0OQ8+VoFLarT6na556FH2dw/wObhOLNL\ngrOLebQgYFiP0o4Itm8dxysXWZ1fJKyrrLerkNnA/m37qLdaGHqEb33rAXbseBdjYxtotY6yuFhh\nZXWdiV3byUjL2IpM2zdx3Q6bBiLs3TTOyeVlWq3Wxc89MjICqRTr1Sq9F/bR932m1te58qfp7vYW\nANx6K9xyy89XNIyNwbZt5w3Q3jqoeiX5fJ5bb70bx8kQDic5ezbHk08e43d+52Ps27eXAwee5N57\nb0OIYYQAWV7nE594++sKPwgh+MxnPsp99z3Eww8/T73uI4SD53U4fXqVTsem3V5jYKBILFbE8ybI\nZkdYXp5mYeEMIyNj9PQM0O2eZHLyDEG5zLbhISrAUqnMYHgQ/Ay2aGH5DQq5PCOxEPlOB991ma7X\n6XddMvE4cVlmtV4niEYZvXCaLQORnh6C/n5OHz9Ov2kyqOv0qCpxTWOl06G1vs4jzz+P2enw4uQk\ntXyefdu2ke92Ofb44wzt2sWLR46wOZUiHomwXq2yaNt89D3veWM37nXwlhj5JywuLvLQQ0cZG9vP\n+HiYybNHmDn8MMryCmZngf/j5vtJjY7xoU99nGbLJQhGkIMcY9EEyVCM5cUz3Pm1b+CLNH1SlCAW\nIZscZi6X42T+BQxRoy88QCaRIKwbROwuLyydZnhTlIJIEYk7aJZNpVGnEo8Qjm8impBwkymcwCcy\nNkx4eYHk1LOkRBhEFtWXyXgOhcBkra7w8MPP8/73q8RTKbq6flEUvFR4hEIhjh8/wT33/IBisYGi\nCK6//jJ+8zffy7XXX0+9XieRSJBIJPiHr32N3Rs2sH7y5MV1SoVChCoVzuTz3DQwwN//7d8SbTTo\nSyZp5fN854UXeMfNN7P3n2tJ+To4c+YMZ87U2LjxCgB836PdjvHQQ4fYs2fHyxxMf0gQBHzzm/ej\n61vo6+u5sC7DPPfc86RH3s6aVMfTE0SNDaxPPsioEqIjPMKSIBYKIbpdjpybIrLvQ/zmTZ+gVCri\nuh7Z5Ac4fui73Hm6BV2dmLEBr1nA9Du86K2y2pDxu3WmV2axpChROYocNZmIOVhzRfZkMniaQqFd\nIxEapNla49ziNLmWRaP4AlkRMLO4gpbOYgxvYc+eDTyXf5q19bO8be9mUp5LLBrlxeU2XUXw7Qcf\npCwE+z3v4meXZZnf+sxn+M43vkFucRENqAMT+/e/ofv0q4htw223wVNP/fyv/alPwR13vCVGXo37\n7nsERdlIX995H6h0up9KpcB3v/sIn/zkh6jX4aab/hWdjoWqKmQyGRYWjnL27Fl27tz5smsVCgWe\ne+5FVlbWGRrqZf/+ywiCgJDsMt6jUMgt4fspWi2JdruLJMWwbYd6PY4kNUgkLBanvou5OsNOI47c\nslgqvUAg1SmtrCHZIQ62qnS9FrJk0aP102xVuWTrTsxGkzOlKfYO9+MFAYuShJlIsHFoiHQ0SsX3\naZfLnKjXqRUKeEs5qrbDxuvezud+/9/wV//hP1CfmqIvHKbRaDDT7RIKhQh1OiycOEFEVUkXiwS5\nHNrWrQwNDzPQ6XByepobPvMZjh08yGypxODmzfz2dde9oufNm8FbYuQlzMzM8Fd/9f9x7pzMuXMH\nSSZlROk4V/UPcSyXwyoucmmyl8nJRe7+L/+TFT/go5/8E7qdBvEAcrPHidgWTQsSmTR+u8nRlRzV\nto4sD6FLLWQ5x3JzhWQuymA2g+uV2D5ikC9VaZdrdCyHittCSW1gYtvbaTRWsOQ6w9d9CFXViUaT\nlL7yx/SmU3TMGJ4r8IOAntAIrl1lutEgm4pw54MHGN67nctvuIHv33sv6+UyC8tVZDkOODhOlUcf\nPkqr2UMsOUQsHubs2R9QLJb4vd/73MsSdDvNJhv6+7HrdRaWlkgZBkIIitUq49dfT35piXS7zaYL\n4atMPE6vZfHEffdxyfbtr+nt8bNw/Pg5ksnzCZpLS1OcPn0C2xY0GgVuu+1OvvjFV9rsr6+vU6k4\njIz8SKjU6yV0fRDf1xkYybC+cI5Io02mmadKAyWRIhELMdtuIykKQjUIR+L84KFvUSuuk+gdIpGM\n0fEHCckS2wYSmLbNfFcikurjio39rIdirNXLNKwCshSl7Xi43QpurcFEYFPxfVqWCnKIartBux1w\nrPoimYTE7liCED20Kg2s0hy1lWVC2y/jmvd9BmftOXqGB5g+epRSPk/bdbk+m8XsdEik0zzyzW+S\n+sIXGLjQkLG/v5/f/cM/ZGFhAdM06e/vp7e39w3Zn19l7rvv/AnGli0//2vffDP8yZ9AqwWv0kn+\n15ZWq0UuV2F09OWGLul0P4uL05w8eZIgSJLJ9LzMOC6ZHOXo0TMvEyPz8/N89avfRVGGiMc3cOJE\nmccf/zvifpF9PT28e8MGQs02f3PvA7jSFjKZCTxvif7+Hnp7t7Ow8CC6OsXutIbrj6IoKSyrjr1+\niqYWYiQcp+EFqIFLwddxvRhzZYESBCwXc2wdHqFQzPDE+johXWfZdfnIu9/DgSPTlFe6tKwk+foa\ncavBTPXM+XbQmQGu8AZ4/PFDbN67l3qtRjIWw0il8FZW8IOAfl2n4fucmp5lixHBzFd44J77+K1P\n3Ew8Hkctl4nH43zmd3/3Dd0rz/M4euQIJ55/Hsey2LpnD5fv3/+aY94SIxfI5/Pcdtv9eF4fyWSE\nWGyQxbln6G+sU+mC0+iQ7ushEU2ScGwWmy2q+SWevvu/EYRjNJeniXbqGJJMtd1gONTECxnE1DRr\nLQ1dDWgHsHdsJxuHRlksHGbHhijRUJa/ufseYi2frJvGcSAhJjAtj24HarU09foZvvvd/5d9+24g\nlUoh+S2MaBzHCWE5Dogwlge+UOgqfZy2PRSyLB+Z49TBF9g02Eu11KbsyyRHtxNoGQ498zCOM8rE\n4AaKxQarKy1iiQi33nofN930npcp5Q2XXEL+ySfZvW8fawMDFJaXcV2XeDbLp2+5hTu/8hWu+idf\naCFdJ+S65PP5iyGjnzeKIuP7Pvn8PC+8cJJEYgeRSBjPy3H2bId7772fj3/8Iy8bEwQBP0yb6Hbb\nmGYb2+4ixPlmZxGxzqhbQieKpMRI2GV0ucvYxp0MJxII4LGnnyX37MMMa72EPcHa1BTPdUoE4e3U\nRZsTroMmy2iRFKZl8szUGWrNMu1WG8MfxCaMKnQMqZfZxguEgzYrbQlH9BOOZokZ0GzM0jErZEUE\nSUmxWMqhx9IQxMnKEebOTFJvVbn5Y28n3dvL1i1bOPQ/b0XpatwztcauzSO8/7LLaFsWzz72GB/9\n1KcuroGqqj+VEdRbvJK/+zv4vd97Y67d0wNXX31e8Lxk237tOd/gLSAIgpeFXc7/HlzIeXhlOEYI\nCc/7Ua6U67r8xV/8F44ezQEyGzdu48orr6VcUGg3C3zwQuPRt+3czgMHT7NQbpPJgGvHsWttCtUX\naTU7dESeeKqPgl1hdfUcVqdOGI2iabOGjO70IUkuspNDFv043X66IozZGeLFc7OMDwo+dOONCODb\nhw7x1Mkc5WaWtu9SqU2z0RNkAw2BQsWV6ZRqPPfIvXjeh7juuu2cmJtDchz6+vvZfOmlvPDYYyy3\nWpTbFpaI4AUxrI5Crd3iwIFnef/73/mKtXs1bNtmenqaWrVKtqeHiYmJn8qDJAgCvvftb7N25Aib\nentRFIXcE08wfeq1DdbfEiMXeP75I6jqMBMTCvn8YWCAkBHCLQWsFVaRJRtNjXNwrcRSXaLl64SD\nQdZyOS7fvofDxRwDwHg0SUOXOVWcRhraTa3i4ephdDWBrzSZWi7TtWqoqkrbbHPw2aexSm2G5GGa\nVhUvSBEKZ7AaRaaOPIhPHzG9l9bCPI8s/S8mdu2kJSTOFtdImipaoOJLbWzCFJCxHZvB4XeynnuW\neGuVjZE4S88fo4HPRHaQ1ZPP0rKgXm+iGztZKdZARNAROLbBwkKXP/3T/4c/+7P/m+HhYYQQXH7V\nVdx+9CjTq6sMZTKokQiz5TLXvf3t9PX1oWkajue9rAoHwAuCN9RI59JLd3DixA9YXCwTiUygqmE8\nz0GSLHbuvIYTJ47w7ndXsG2bQqGAruts2LCBSMTn8LPfwyouYwCdIGBtdZWxjVcQ6bbZeMk25ucn\nEXILW1cZ1FRmCgWSoRDHCgVMBCPI6F2BKing6niey1Rjhv74ED2eh2nZ1KwOa16DUaPJoKdge734\nTosWJfLE0EjikmKVOkOej6+7mKbA8VtUghCqvgvfWWSpVqPsa1ADVY2y0l3Fo0omKKPNDbE8N8cP\njp/G98bYuWMPqmYwuTDJka/dw/W7R5EqlZeJkbf42ZifhyNH4LvffePm+PSn4fbb3xIjLyUcDrN1\n6xDz8wv09/8oSX19fZHNmwfYtm0b99//PI5jo6o/SuSpVpe48carLv7+B3/wf3L33S+gaeddTRcX\nj3L8+GFS0SSjcZtGp0MiEsHQNMYHUqzWYni2ilKvMBCJ07XaeIpNZSVH2Q4QwsA0AwIvi6wrqFab\nWtcjKZUZcWx6SOIGIdp+kXnfJDA1uq7NysIMP3jCp3d0lKLt8vzhc2SlMKbTIe6uMSx0RGCgCJ8E\nMvOeg1Pt8MTD95KfSjGejbNSLJJuNpEMAzeRQEmlia7LOFJAxa7SLxuYZoelpSpnZ2YIBgZeMyRT\nLpf55le/ilqrEZEkTnkezwwNcfNnP/sTfVp+yMrKCrljx7h6fPyi8LlkZISTi4uvOe4tMXKB1dUS\n0egwkUiC8fFF5ueP4wmJYreFLLWI6R6zbYuOO4TjtRmLZfBci3JQ44kTT7FTkuh0W8x1AgYG+9ml\n6DxZWMB0N+CKeazSMwRmncAS1MpFOkGdWyfbpHWdwDJwVJB8CcttYZrrBG4H4ZUZzGzCoU696xNT\nL+H4wSppI0rDVJBwGZZDmK5DSVSxQ0OAw/LCGZLlc+wdHiUajtByFAY1n5VmjY4dUA76cO0EbXsa\nPzJGNtmDaXUolYoIyebQoSpf/vLfc9112/mt37qJeDzOp77wBQ4fPMjk6dOEYjGuvfFGdu3aBcDu\nq6/m3IMPsvclJyDr1SoilXpDY5Fbtmzh6qtnefrpJ4nHM3S7BYKgxb59m4lEIpRKYb5z1110lpdJ\nCoEVBDwaiWAoCu7sM4zGhjG0MJ1uk65cZXnpCWrFGvONNp6XQJZ7aHlhzhQrtK0iVjaLvmUL2aqJ\nVISuvUzHl1ECj35DsNRaQ3XjBJLAdQMkN04sWCHlyPiA5HqoCELoNOkCPhpx6oRR0ejxbGrBMmuE\nqYhRJClMy/dxTUE6sgvP7YKwaLoZYobHJSMbaK6vMze7Qmu+TEka4IyzRK5aQ/YjeEEP3370DK7y\nHGo4zI0f+hC7du9+w8Jmvy7ceut5l9QLRVNvCB/+MPzBH8D6OrwVRfsRN930br761btYXKyhqnEc\np0Eq5fCBD3ycdDrN+953Fd///mFUtR9F0Wi3C+zYkb4Yonn66af53vdOEolch2EM4romtdp5QbJm\nlClGNO5/9ggfu34/mqryvv17eer0I5TzPuOxLMvFWebWZnD8AAONU9UlUrEMY2P7KK2XaTTXaAgX\nVdHpxUURKrYnIXAIBx3GWCVo1OkjQNh1jh49SqzZ5PCRU/S5CQbVBOu+TwaBHLTwCBN4BooaIeE3\nWLEaBLhEVlvc8qk/ZHp5mZm5OQ7Nz5Pauxf7xBkIIoz09LFYnKLeKSHpMSp1k6cWc/zZl76E67rM\nzMzgui7Dw8MvC8k/8O1vM2BZjLzEOOfcygqPP/IIH/jIR16xH69GPp8nJUmvOIEZ+Ammim+JkQuM\njPRy5EiZaDTJnj3XMDS0wvLyAqdaYQayCdZOnmW15REIGVtS0CWJGgH9A+PkFubY1ZNkpegQi4Ro\nNZsMjqaIrxdx5HOkfINau0vSAU3rwwskUkqYRnueptOk7mlkPEEgEghh4toOrmgjJAnVmsXorhNF\np2bOo/o6pmeQ8McpsIQfeMhKhIhsEPfyIHoJNRcZ1UPUyyVajSaB6+G4Ft1Wm6bWSzg8yGBUMFPz\n6Dp1bKdKrWnieEUkuUit5DEzvYIQKps3n2LPnvOOs+9673t513vf+4q1u+rqq1ldXOS5yUkSQBfo\nRiJ89LOfvXC0+uq0Wi0ajQaJROLH+p+8FoVCAU14jPcrNLozbJjYx+joDqLR8/1+CvlJeto+b9u2\n7eL7yJdK/P2DD/I7N93I+nqRdrsLhNG9GFqrxanmOuluP5LQUSWFIAhR7hqU4xJf/NznuP/+x1hY\nXGNcjCLLCqZVJyJDIhJF7ywTsmcIeVFaVpeWCND9NsJ1SUsKnSCKTJQAiXvETYMAACAASURBVDAm\nJh4eDkmi+OisEaMYdFlnEM3YRMAZLC2OZ0UJOV1EYGEHdUDCsRNMnX4BbzlBx1eIBWFKQYelSptu\nO8pQOknbqiGbDS5JZ5i653tkLYuThw/zyc9/HuON/Cb9FcZx4KtfhQMH3th5otHzjqzf/OZ5UfIW\n50mlUnzxi59namqKYrFMNnsJW7ZsuSiwr732GjZsGOXkybNYlsO2bdezefPmi2Wrt912J6aZRQib\nbjeP41TwPBUhtuE4eVw/xSMvLhAJS7z7sssQQvAbv7GVU0eXyNeLLORzaF4fvaqH7ankRBm72Uao\na0RTcRZthWhqG5HCHGq3iiaiBAIUCVSvTFoIGpJESpJQJInJpknt1Fm2dTtEZZV1a5F2YJHFO/8A\ng4aMhOd6+CjUhEDtmkS889bvm4aG2Do6SrXVYtkweGA2x7pVpt/UkCIZipEMsfRmqo0cH33XdRQK\nBb7+N39DryzTbbWYKxYZ37ePD//2b5NMJqksLbH9JdYVABP9/Txz7BjOBz7wurqDG4bxqhYCpmW9\n5ri3xMgFrrrqMg4fvoNqNUoq1Uc63Y9ltfjUv/4kV1y+i7/+8peZvv8oitfE92yWbIu+4X4828FQ\nQzStDlFVJSxJ0O1y9PRpih585O0fY2lxitP1WTKeQr2zjCVieCIg5MvUfYcWGmVhkfFDBAR0abAa\nNOgRGj3dLu3ARPPCxHFxqdPx+shgUCdOJKiSCifxdY+o0FkTCkM9WdJOCLuyil2uEeDje108T6dE\nQEwShGQfXW3hegbl+jM0ux4SJj1aLzGnh+VTK+SXphgZMdizZ89rrp2qqtz8mc+Qy+VYW1sjHA6z\nadOmH/sE7jgODz74Aw4dmkSIMNBh//6frsvY2bNnefj22xlUVW7cOMyjTx2l4DcZHf0kjmOzsnKG\nqNRm58gWOhcMfaLRKDHDIG7btF2XzZs3sVws8viBAwxpGnFdpyEn8X2HuGuDCZ5wqeBTWrb437/w\nfxGO7SCQ4xQbi0TlDEIYtLwGS9Yqm2WJqzM6SizEYqFJnxUwZ7vI+PSgUKRFFwkXHRvwMdHooBNC\noY0e6HQCBUW4ePYU4bjAMELU6yHybgPhd9FCPYSsGqJbB7eDFEoykE7R6rTwfYmKDbpsUGhWUZ05\n9iXTbOkfZ6U8RZ9hUMnlOHbkCPuvueanWu+3OM8//iNs3gyXXPLGz/XpT8Nf/MVbYuSHBEHA8vIy\nxWKRSCTCtdde86pfjsPDwy9zHv4hR48e49ixBXw/STw+TLW6iGm20bQJgsBG0wK0iMxyOc7XH5zn\nqTN5YlGPq/buZl4u0SrNMSiPMxg20AhodVWmPIk5OWC902B8aDPR1A58q0TYLKKYM+hKgJAEllMn\nE9g0CPB9D1eysUUfSWsN3VVIC52w3yEdwDQ+TXwCVCJ0MYhhBV1ydDHdISRpDavb4Rtfv41UJs3E\n8DBaPM6MUIn1jXB6vo4fHyadmWCDHqNWW0RRm9xzz6N8+S/vIK5qJLQO+1KC7ek0p+6/n28sLrL1\nbW/j1R4dZUkC38f3/de1T5s2beIxw6DabJK6ENqxHYeldvs1x70lRi6QTCZ53/uu4sCB51hYmERR\nBPv2beY97/kNJEliaNNOoskcqujHth0ss0qrWsNXXFJhWGo0uWJoiEg4TKlSZX01j+T6zJw7garr\nBIGDLemE/SQIB1noeMjYdOhFZSVwqdLCw6aLTSB1ifktPLoEfgQbHWgTxcLBQpHC6L5DmgCz3cF3\nAmzdY2xY4OkylY6J7/p4XhvFM9FkhUU/QFPHsLotmpJDPNZLo92k4xRQkNicvpyYlgUsBgYmmCue\n5eDTz8If/eS7oRCC0dHRlxnC/Th+8IPHOXhwhdHRtyFJMp7n8tRTx1/3Xrmuy6P33MPenh6ioRAA\n8VCIxw6+yAvP/D1bdu3huut28syDJ3nmwAF010WRZYRhsGX3biRZptZscmhygcVVh0IuypzqUgla\ndB2Fqt9LKlhDFT6IFCpJ4sFpbNOmbge4joZDhF7PQRUqzUDGwmG7JgiFw8gEJHQFyekyFQSUkBj3\nJbIIyqyzQogOEj20iTGII8KEVJfhvhiL66vEFAslsPC8zQgxiCxXkNQIgaehCImOUychVUkrYSql\nKqoSomlXcJQRVCOGJmVw3TnGFJ8N/UMECNpdi6efPowvqTyXKzA4PPy69uql1Ot1Dh8+wpkzc3S7\nLQYGeti1aztbt24ldGEfftW59VZ4gwsRLvLud8PnPgezszAx8YuZ85cV27a56657OXt2HSESgEkq\ndYBbbvnYq5bwvxTP8zh69Cj/+T//LaFQFljGdZuEQhEsywY8JClPPK6TSF5ONCYwzdOEYkmMIM/O\nbJaZTAZvagbTk1CEoGo51FyHkDBoo9DwTPIVHc/NMT6mo4V9iKiMREJoQmG1WcLptKkIiaTwiWob\nqHkmGaFiSTq2sEj5LjoBowTMErCMhEGAoEkRgckABikaLHOunaVXDLC22mGlnsM125RSW+jPRvEa\nMvn649RKR4j5LmHZJ18o0up7B0l9I77ZYTa/SGl9gbErE+xIp+k4DsVTp2grCuVGg0w8fnH9Vi6U\nAL/e8G4oFOJDn/0s991+O3q1igw0JImrP/hB+Mu//LHj3hIjwMmTp7jnngM4jo7vG0QiFp/4xAeY\nuHAHePbZgxw7XOC6nddwfGoa2Y7iqUkWWgsIf4Fr+3VSIyOc8X3M3Ar5ZoeykmBY0SgWbURQp+Za\nxAkRZo0E4PoSdSxa6KQQjGJQQcImjoJCIE/hBSaOa6BjYCMQJBB42Jis+wZZ6kToUvW6VF2Zjf19\nbI6pHKyus9BqE+qY6IGHHO2h5rt0tTQhOUXdNZFjSS698p2srJyiVuvQyTcJSVGEsMlk0wghSGhJ\naqWVn+taW5bFc8+dZnh4P5J0/uhUlhWGhl7/ycja2hpyp0P0JU36BgcH+fiH+3h0YYF/9b99lG9+\n8x957PFDjFdLbErEyQz1ElYUTj73HPLAAE+cncHu9DOQ2URt5QQ2gnh8gPbSAZQgTky+hMDzCYC2\nX0WnRTiQaNlzxMQECS1D1S3g08GSsyREG0c3mfM8wq6LLSksWYK2b1DEoEODQRzaCHwMNhAhTwtZ\nctCogy5YDqr09upEG+f3YsE6S8Otk0hEKK1XSCi9SF4TmwqIKm3HxbdtOmvgxuJI9jq269G2NKJa\ng0DyqNRbtO0iHQfS6W1YgUfRFHzlK9/h93//5tftnFutVvkf/+MOarUwk5MF6vUurnuOzZsn2bKl\nj89//mP09fX9VH8L/9IoFuGZZ843xvtFoKrnuwHfcQf8p//0i5nzl5Wnn36Ws2ebbNhw9cXXSqUV\n7rrre/zbf/uvf2yFiO/73H33vRw8uES9PkwmM0wk8i3q9SdR1QF838T3c6RSDrHYTjQtQbW6SkhX\n6DfCGOpGTi/k6UunaSfjnCnVabUTyHICXU/i2F3anSIea/iteTK0qa36yEZAzNCoxxX8doemJlF1\nJIaDgLLr0LIc6lhkZB1JimJLTboBqIGHjiCNzzo+VaLU0fHJoiDoModKGl0fJaZFkOwwC9UOHeEy\nbofYNTDKxniWp178PpniFONDE1TaTQbjm7BbDpXWEhkjSa+apW61eeTEOd67ZxuObTMeDtMeGeHs\nwgL9rRbxUIhKu01Z0/j4q4TnX4uxsTH+zb//9ywtLeG6LkNDQ0R/Qp36jw/ov8EIIQaEEEeEEKYQ\n4k17H/l8nn/4hx+QTO5lZORyxsauRNMu4c4778c0TQBOnpzB7vpY3SZhr0lftEx/vMGmXp3tI1Es\nSWLn0BA7evvQ1SiSkWXr0OU0NZWwKpMQIUBhjRoKNiIQCBxcPOIEeDTwgFEEEbr45DCdNoVA0MbD\nFW081umwRhkTlxYWs1jYTAqFNSMM/b1sHB9jviGjdQYYSV9ON7SNKa+fST9N5JKPoEQNGt0lFOHg\ndlY4duQu4vEaV111DYYuoSgtYnGdwPdotSuomk1vT+Y11++n5XzIREFRXn68qmmvP4dBURS8V3nd\n8310w+DOO7/HyopgW89GvMww657G5PwqS8UiR1dWKXky665B10hzfGWFs9Uyk6trzCxU8fwInj9L\nw52mERRp+fPInGUYh7BvIwc1QEaSFBRZJaIq6LKD0CN0JZloXx8Fz2Ou7iP0Xgylj6y2FYtNrGMQ\nJoROhCI2FTFALhSj3hemHFNJDKT48MQobxvawP7RMW4YGuaKwSxb+sfpT4VwvCnSyjK9SpGk26Xs\ndlkRDjNOC0cK4wgDnHUiYpaN8Ti2olIq5SiUF9iy7XIMI0yh02R8xzWEQuM8/vjB173mTzzxLJaV\npVbz8f0sw8P7GB5+O8WihW338e1vf/91X+tfKnffDb/5m79Y748fVtW8Sgj+14qDB08wOLjtZa9l\ns0MUCh3W11+1zyoACwsLnDixxsTEleh6mHA4xXXXfZ6JiWEGBlokErPoegFNk1hbW2Jm5jDl8jRd\n0+X0/BqleotWx2bH5s1gGAxETBzJRNNUNE2l4hXxMQkJn8s1h3fEsuxU02xyAxqmSW61gGFkGE4O\nYjoec26AQ4DpNGk4FvO2Rdtq4UsyDUliTficxqWDYBsdHAJ0xjCIYdBFRsf3PVLpBE1VJe95dI0+\nhNSL8FssrE5i0yYmuwwKQcKrogYuTkdC6ToQmEgCZEkQkuO4ruD08jIDAwP4vs9Afz+f/uIX6bnm\nGpoDA4zccAO3fPGL/6wHDVVVmZiYYOvWrT9RiMCbezJSAW4A7nkT3wNHjpxA0wbR9TCVSgHb7hKL\npanVopw7d45du3Zx/OghKrPPINsOfUiYWgg9PoQsJ4knPYJwmAeWl4kFEus+6LExkqEUZ+QwSiBI\neIKwCIgGGgVkfBwiSGSJoVCjSoEOTWoYtHBII5MAPBzWyOMFKj0EWLgMoaIhMICqHCETSuJoCjdc\nug1Z05k+u4ZZb+MJh4ar0pXGke0o86ceZX/fMIutJZa7NQIvS9AQnDlZYHVhmba7Sq4eILfzRKMh\nJoaHaNtttu7aycGDzzE+voH+/v6feb1jsRiGEdDtdjCMH/VDMc3Wa4x6Ob29vRi9vRQqFfpf0n14\nanWVzPg40/M+hmEgVJ1Nmy9ltZxnZnWGZ+eL9AztJeRtpNHMsd6t0inN4bldgiBAshtoAThECTFP\nDJ8EATEENRysQGFYdlj25+nYc/QFLhHJx6fLgl1jWrQJvfgijgddX2NZsukGo6Q8B5cQBRI0KNLC\nxKIHTUSQrBVCbY9o0KY0XeRQMsmwiOA16tRsE8XxsFWboewwpfI8tl0m4vmYgWAwsBkHTgc+K5ZO\nPLmBAdskmtTosowSUVmr15DbGqfm5zheKtK/8zL2DmwkCHzm559/3Wt+6tQs6fQ+Dh9+hmRyEwCK\nohMEERRFZXW1TqVSeUU36F8l7rgD/viPf7FzXnXVefOzqanzHX1/HQmCAMuyX/EAAyCEgm3bP3bs\n3NwimpbFMAxGRrIsL6+TSvWzYcPVJJM1zp416HbjBEGI2dkpHMcgGvVBGsaxHc4u5dg8HGLz8DCx\njRuZffYF+kI1Gn6TgmnRAeLhS5HMp1hxNBY9DzuwEL7PmGQRBHCumGPelJHYiA8YdGj6JgYS0EYL\n6lh2gI9LSwh8SSHpB5RQkIiTpoBOhT4EDhoVPGaXX2DP+DWYqRQdU8aq5cD20YIm6ys+5coS8ZCg\nWa0i6WE8v4jihlEUmXbgIXseptdAV1yWHYfxwUGOFwpcuX072WyWG94Ee/g3s1GeBVhvZstigGq1\nSRDA44//I/W6hxAGllXGMFzGxwX//b9/hRcfOEDWipzPFZDaDKoBS5V5qprLjbv3snXDBmaCgKfv\nexDX1qg3q5yqNQCFlgr5bh1XkenzOuhBEiPQUAAbEw2TPqCXNpO0SaMQR2UIBdAAjSlcVpHZQ0CS\ngBodYijYXptTHZt0qJeGJYjLAasd6Hhp5CBETIvgUqTj1Il6Hfxaka7nklF3kIhm8YFVs0WjUUGo\ngyhaP3gR2l2JF2YmSffYmNb7eOCBWTzved7xjh285z3v/JnaTCuKwrvedRX33PM8fX07iEQStFo1\n1tdPv+5rCCH4wMc/zre+/nXyi4uEJYm67xMbH2fTzp1Mz08RiSQoETCi6mzs38BSpU5au4Te1BiJ\nsc3YgcUTjx0iJA2RDm1HjWnU20tEWKEpNLq2wnDgEUXQwaYBdLEJeza6sBjwQyREhIgvaAc2G+kQ\nC4fIqCrHa200wqQ86GIRFmEMBD5hiqi0SSOIIfmzjPkdNlk6fakwitVldn2dI67PpYksY8keirUG\nJ1rLJDIeOgWq3Qw+vdgErNEkxioxP4TvdggFLm6nw6IZoMb62TyUYMXMMddaoze6hdHxSynWuhw9\neohMJsbg4OuvYAqFNGzbuvCE/lLDKRdZVhBCIvgVfnxfWIBz5+DGG3+x8wpx3hb+e9/79RUjQgh2\n7Jjg3Lkc/f0baDabVCoVHMdG05o/9ql9bW2NSqVIu10HYNeu7dj2cdbWZmm1ylSrk4yO7iWbHeGp\npw7j+yaq6uF5IQIklis5+uJV5pfb/O0d3+H4YofV8ChhKUYoFCYZ76Uy3cS0CyjBAF16cLw4JiYy\niyz7NhEq0HIR9GITpkfR8V0HjzWyrDFMQEgI2oHLEhALwApghRhLgIKOTJFLUBBINBEIQnSsKs/O\nHmNwdBedylFG3TYbI/1orsBqVhhyW4Q9nX3RJI6isFJepOyAGpog1dPH4uoZCObp60nhJ5McX1tj\n+/XX/9R5ZD9P/sXnjARBwPT0NMePn8X3A3bv3sqWLVtedwfC8fEhbr/9NlR1B6nUALncKuVyQLn8\nHMePP4XdEuySIhhI1F2brmdQMlfJBw5tLcX9z+S4/4VZ+jcOs2RGWG8WcOwG/QT0CkGPEcdFkBdd\noiGJSrvCHGEMApI0CeOT5nw5rAXECBjFRgJkQmjIbCDAxiMgRSBKqEBZSChBgCZsYtEUp07P4dQW\nkZpZDDwsIrT9HmQ0JH+Frl9gplWlzQDpkIEky9htE4FH1/FIRUe5dtcm1ms1Cq0WUmQfg4MG4+N7\nkSQJz3N5/PFDbNo0fjGX5qfF931M0+Tyyy9D0zQOHHiOpaUWmUycT3/6Bv7qr17/tfr6+vidL32J\nmZkZmo0GvX19bNiwgbNnzzI/9TSKq7NayuM3q4z1jVFumPT3xqn4/vlutrMzeJZC3XUInDKaJIFw\nSOiDNFvHMQOFSXwS+CSQ2IBKFpc6FlrgksbEl7pUPR8hC/p8lWKnS0OYZBGYSGRRmKFJEEToImPS\nRGEEjSEC2iRwGCaE7EvUKjUMPAZsmyUEjXoVxXFpdNrEAhDOApraT+AkCCHoBTQyLBJgUyNoN/C1\nJsWOjSynsR2bNbnCQHIfpdYL1Jodhn2ftbV5ZmdbZLMaV145yOTkJNu2bfuJ63311Xu4776ThMMK\nZ868gO8LhLDo6zMRQiKd1n+lT0Xuugs++tHzeRy/aD7wAfjyl+GP/ugXP/cvCzfccC3T03fy5JPn\nKBQsHCfAcVbYuTPNuXNT7Nr1I6v3drvNXXfdy+xsmW4Xnn/+IM1mk717r+Xqq6+gXC6xuHiQnp79\nrK3FmZ4u4XkZUqkrgYBm8zSNRo5YupfVEsw9fQRJZIgkdpBMx7CsOp4+ht1pIyjguBaCDCYhBBEk\nVFQmgDUa6HiUiJHAwiVwHUrYpPAunLw6JAMISxIZP2COABOfPDYgI9FBRtBGwgAMfNrUMADdq5Fb\nPEVc5MnGNCpeg6BZp9suMBy4zHcDAiEYjsW4rsfmO6vnMH2bdrfAVTujfPDKjzCdzzO0fz8f/MhH\nXrUC6RfJL7UY+fM///OLP19//fVcf/31dDodcrkccL4L6cMPH+D55xeJRocRQuLYscfZu3eSm2/+\n0Gt6XPyQgYE+bLuLLKusrKxSLLaRJNC0QdrNAnHXwNBM+uNpNL9MudVgOZBRxSB9IopcU1j1k8zl\nS8RFBMVPEg/OsBXQAwmzU6AkzldXPN8OcMigECaDxDwp0uTJ0mUNSAEGAVECLGQEoOOj4hPCw8fH\nDEDgkxYgC5mCJPDy01iORZ/nEqDSIkYTj7bbIMDCwCTGRiwvoBa4hD0XxWlhBx00Q0e3QlimR36l\njuXatM02fT2X4PtNHMdC10PIskI4PMyxY2f+WWLk6JEjHHzkEexWCyUU4rJ3vIMvfel3CX4Gl1Zd\n19mx40eJr9VqlQP33suEX8VqKQz2DnNyZYbJSoGi49KbTVMtVxCVCnFZpz/SR71ZgG6RcDRJQtM5\nV7ZxvR4iZFBxMEUTNcijqiqKUyGNjCMChgPBouchk8TzAjpUsYAgkIni0qbOGnHW0XFRkSgTYCGQ\nEIBEBgWBBvhOCwMXV/KxAJmAsm3jBQ0qeAwJmafX1+kGW+hHBQK6F7Jm0sQoUiISGKxX69SDML2a\niusUWVtuYScjSCKL25nn8MElQtF9GKEE+/ZdyqZNm7j99gf5d/8uS/YlycCvxpVXXsHhw0d59NEX\nqdfDCHE+x6hcVpidPcB//I9f/JlOzH7Z+e534SW3o18oN9wAn/wkVKtwodnyrx09PT28//1v59Sp\nr9HTkyUWizE4uJtarcif/ul/45ZbPszQUB8vvniW73//MWw7xJVXXs/WrRMYxjCPP/4AnU6F0dFN\nQJUrrhjjscde4Nlna6hqH6bpIYRA00aBNo5TptEwaHQSILLo+mW0rS4hR0NRZIrFF3Gd88FXmxQu\nUTroyJgI2rh4dLBQiOCxgkeVFiod+vGwiVBAxyOFwCDA9300wEPCQkEhSwB4FNDQ0Qnh4dHARKMB\nCAJkMnIDw7cJazpdT8ZsO4SEjhtIBFLA8XyeyVKJcCTC6OgA7775AwS1GqVSnbsOn6N3bJTrL9n5\nioqkXC7HzLlzAGzauvUN78AOvzxi5FXvYn/+T/77jx07zj33PIbnnU+GabdztFpw2WU3XRQemcwg\nx44d4tJLZ19X/w1Jkrj00stptXQOHDiKYWRIpzN4Xh/tapdkZBPr7VNE/DqSMJDkJoGbxhch2rZE\nw5NIRnpptVRMqUAMizQ6McLYqMxi4gRR/n/y3jRKrvO87/y9d6996a7eV+wgAQIkwX0TBYmylphj\nK7ST2PKS5MxxjhN78iVjz3yanDMzJ2fOaOIkYzlxbDmW4sgKZdESJUILCRIEFxAAAZLYG0Cj9+6q\n7tpv1V3fdz5UCxKGlERTliFK/3PqS9Xtum/f9956n/f//J//U/UcDGIcPFws5hhAZw2TImdYZjfg\nAitImpiYGCgMXCI6KFx0+lHEGASY5KXPOoJ+Cf0hNITo0XxAmxXaGJh0EeTIMIRNjKNimsql3CkT\neGk6tsFIeoBq6xoD2TFqrTpmHKJ1fa6dO8nEzhQXL56mXC7jOA7FYp4g+MG6EaUUy8vL1Ot1stks\nY2NjnD51iqNf/CK3DQ+TLhTo+j6nvvpVwjDk4Q984IfO0bvFq0ePknVd7vrgB5ibm+PKlUUOjPaz\nbMP2TJG5c3NQ65JIJFhtVHEDkz5T0a8J6iqk1o0JY4kt0mgqpkNIR2VpksILawR43G8IGtKnpsDA\nwUfhYRFi4KETEAMJBglYYgPIYnEZhU4HmwgHnUEkDh2SeCgc2jQJSUkNE4iBs4QUQ50pzaRt2Sil\n0ElgXlcTBZsyN4VGiIvCxSAWDitxG5sqRA6dRovt2/fR6LQxjVEkGZyiwfbtW0kk0ggxyBtvnOHg\nwR88Dz2m0eKjH/15PM+nWm0ADomERS5Xvt6I76cRa2tw7hw88sjNOX8i0Tv3oUO9oORnFVeuLLBn\nzwcZGBin3a5z9Oiz+H6OTmeSz3/+RVZWFjlw4IMEwU6SyQGOHXuDO+4ImJrazeOP/zLLyy/wxBO3\nU6vVOXToLXbvfoxXX/08ljVIrbaMrvusV14mjhcJQh2lDJRqYhjg+/NIGbGy0iKdThAEPpZsMFXM\nMLPu0Q4lHl2gjiCPgUIjIkOHcSJSuNhABQuTFJIuJoIADQeJhqRMjx0fRpBDwyXa3FiuUadBb2tq\n4VOgS0hHJrF8j5TuoeptErZBPlsk9mJanoeQPntTCbqAcBxWUik++Su/whe+8DRSpPjgXbtxHJvn\nn7/M/PwKv/mb/xAhBM9+4xucP3KEgU0a8Mxzz7H7kUc4+NhjP9YNx00LRoQQBnAI2Ad8Qwjxvyil\nXvt+x5fLZZ588nkGBw9g2z1Pg9On21y6NM8tt3SvO3gKIUgmhzl3buZdBSMDAwMkEiHDw1PMz1fI\n57dQq63TaJzC0mM64RoxDspv0KelaURtXArkgZyA5ciAjosMXSJWsGjTwiDG5BoakmlGMdhAx6CA\nzzpZFulQQDJMlS4ZoAhEwBopFJIJQgQ6awSsYFMlR5sa/fikSXKSFhEJ0ujo+BRUyAaCRSQpMpi0\nCNAx2YmGJEbSVR4ZobGiVlHaICrOc3XlNI5ZIfQlXjBAXWmkMnnCxgzV2Q4XktsoFrfTbnvMzBxj\n794Hv++19DyPv/zLp7h0aR0h0ijlMjGRplOe547h4eueIAnb5vbxcY698AL33Hff2+rXm80mrVaL\nfD7/rpxZlVIcP36S//iZz5OLDY6fX+SunWPsvXMPzU6H4No1VmurRF6VlYYLcZGkEREG54itScqx\noNwp48UeJuvkRY51dIQaxBaDhKpKGxOHDEtyli0q5jgGEkiQpQG06cchZJk2A/i00VjGIcEQGiZV\nKnTIIkgSs4JCp84El7nAJBGjCAwMVoABEoygOEPAhMxw3mvjME2LOj55FBYaOpKAiBpZQipU8ZWJ\nrrVBGUiVwdAjtBgqq9ewMiEpp0A3iPE6dYIgQEqJ46So11s/9Bp7nkel0mJi4va3fbaw8Cr1ev2H\n+j28X/G1r8Fjj8HNdNF/7DF49tmf7WDE8wIMo/cbcv78aaJoiEJhJ15xYgAAIABJREFUDClXqFZX\n6et7kJmZFYRIkkjkMYxbOHfuLcbHt5PLFZibU7z88imefPIQqdQ4e/cWuO22vVy6dJFEImRt7QwG\nOpY2iiddoAb0qsekjBAiTRSl0fUtBEGbMK6w3MrSCT1iWkAA7NzcHqxjMESbi9TQaNEihUU/Vcq0\n8YjooFGn97vfBZaBfnQ8HBK4DBBzFWhicQ2NAVI42LTRKOOgEWHRRyX2aChFot1CdQM8Qjoq5o7s\nELHjowPewAAf2rePJ//iCyixjX37vpvWmpzcy9WrrzE7O4tpmpx74QXunZzsmZ0B01Jy7IUX2LF7\n949VU3IzBawR8KF3e/xbb51D1wevByIAyWQGKe2eLe62bdfflzLGNN/dv5ZMJvnQh+7ia187hZQt\nzp49jut2kHKdpqvRkBtoyqJqWMzFVSLaWAgmxAABYMuIKKiRo0YRxQgxTQwuUMNjhCw2ihiFiURg\n0k9MhRwuJnmaRFTo3fbzOCSYYoMuFdZR+HQYoM0wGhZNClRZw8DHZjslCgToLLJOi3WSNBlAMoZD\nC48WXVZZRmdyMxPpIIWFroFUc9j6PMLWmJrcwtLMAjVNx3H6aYcVIqoMRWl8t0GUjel02uzYcTuX\nL2/QbDbJfo8pznfwrW8dZmbGZ3Lyu14A166dY+HcGR7+2MEbjrVMEzOKaLVaNwQjX/7y05w8OYOm\nJVGqw3333cpHPnLwB2qAjh17jaeeOk46czv5SCGE5D8+/QrFjMFgYQuvnL1MrEzu3H4n3dY1Ytmh\nEy5zt9OkwywroYGjAkxcdupJbDq8qnQ8BvGVSQyYGGRFgpZKoqsmTQxcthDi4JJgEIGky1WW6bBE\nlwIpduCRok0V2A0kN5M0fehUsPFYZASXC3TRidCwSTOIhYuPBrxBSAuTFGkiDJrMkWIADYhYo491\nfNIYGICOLV3SooUggadilLYBwsEILBbLp0AMYaULfOMbr2FZIZOTCT760e/vIaCU4urVq5w6dZaZ\nmYvABGNjE9eZSCljlAp+qk3PvvIV+Pt//+aO4eBB+PSnb+4Ybjb27NnG2bPHyedLLC+vkMvdg1KK\nOK4jhEE2O8j6epM4bhPHEaaZoN3W6XbbrKwsMTu7xNjYfVjWPoQocPToSUZGcjhOAc/zQCmy6dtQ\nKsT1yihSSDWIlD22U6mAOF6m1ZLoeoFYWTT9EI1+NJaRZBHUAYHARrCBZCsdWmSIqdEgRYM0Nimy\nlHE3UzmwDoyibTLiggIWNhHgcRxFkgnW0VEIfHLk6aPNEtClgg1qGEUBKRWBlkXKZbz2EtIQxOk0\nj+zdy9233sofHn6VvXe/3X1Z0/KsrKzitVsM2fb1QKTVatGo19EaDc699dYNwUgcx5TLZXRdp1Qq\n/cisyU9KmuaHotXqYJo3elEMDo4hxAlct3P9vTiO8P0V9uy5+11/90MPPUBfX4F/82/+X86evUyx\nuJW15SYi2oKFSUwFQp0uAUMEZESHulwnpeXQiEAFWPoGaWUTSJ1hbJoE9LqIxEgUERKNGB2DGA2B\nRR13U0Mwzgx1OmiYZEiQZxWHOinSDJAgJkUDgzSruHiMkCOLho3EQ2OIVbpswUUAPhEBaTLEdKhu\nxtgdwCWSDdKaT1LvJ6FJdK3J7JWrjDu3kLUcAiS54i5W1gzMQodksotprjI+bjI6OoDvV1lZWXlb\nMBKGISdOXGB09MYbfWxsB6+/ElNpNCh9T0OmKI4JNe1tzMeJExXGx290Zk0kXuLRRx9+x7mLoohv\nf/sYo6N3YNtVLr36KrEfIeU2Wp1V+jKCZpCiL3UrDRe2Tm6lvLjIxvo6k6KM61eJojyT9iBerDEb\ntSjikcJGMyzCWCEwCK0c5aiKHgvO0MsrRqQJKOKQoMM6GQQaJSQbmAwSY6Bw8JAICuhY9IjaJgY6\nijXyKBQ6JRwsTCQOIYoWCSJMAkIsOgQsYrEdiU3EIgkUMW2WMIiwaTOIvRm6TlgFkqbOWmcezYJb\nt97K2dUyi66PUjn0MIGtqgRRhZXZSxx8eJS9e/e8o3bn0KFvc+TIBZLJMZLJcZ599lluueU27rqr\n17djaeki+/dvfVc+Au9HdLu9PjR/+qc3dxy7d4PnwdWrsGXLzR3LzcKtt97K1q1nOH/+NVy3jq7X\nCIIWW7cOs7jYII4DNE1j27ZJZmbmSKVKKOXTbNY4c+Z5du++l+XleWYuvIweaaSMDEfeOszw5AGE\nUAhhY5ltbt8+wqFjJ/DCbUCJHm8BoAMxSs3S19fP+noOPx5C0aJAlQ4DWGjYKLqUCcjhUASuksBH\nZ5AqMT4GCXwmyLFGiyRtkoAEXEz6KCKICYnRUSRRSFpYKDwMIEWMQsemRZ0uCWyxHV0U8VDkM8O0\n3SFqcZvZbpWCGaGEYLVaZXCkhOe1gBu7L0rZJZNJ0201EfQ2IefOnGH1yhWSwFKrxYV2m9179jA5\nOcmlS5f45l/9FVq7TQwkBwf5xC/90o9kfHjTzMb+pti2bYJO50Zzm1yun6mpLFLOMj9/noWFCywu\nvsrBg3v/RnSSEIKRkRF03WLHjrtYnptB6yqG9CwlK01GZEgiCEgQaEX69ByausJq/AYtzhOwiNR1\nWo6iKRQ12hRxUIT0OIYGkgUkS3RZJqRNE0FMgxJZcpRYZIAqOjW6tDDpYGBRpFdfo6FTRMPBxEaS\noYtBhEFACh8dRYENdFrELOHSwKVCAosWVWaQzFOiyQQRe5XBNr1XBWFKyEqTMIopWCmKmk1zbY5i\nro+qH6ElLJrNNktLMceOnee1117CfYceA2EYEsc9N9XvhWEYDE9t483FRfww7B0bRbw5P8+t9977\nth312Ngtb3NmffHFU8TxO9mc9ZTznge2nWBkZIShXbs4u1QhVknm1pZ58+LzZA2HTqPM3EqFgb4+\nOlKiS4dLsUnFGqbfmCQtExSNJEmtSEOksEWLSF5FM+psHZtk+9R2QsuiSosyoOMDy2RooNPYZLh0\nfBrUCXCpU2WDKm/h0cRliYg5FDUE9c0ANU0XEw+DNUJcYmr4rGFQJ0NAm1FgApsJJCOsoRHjcyuL\nDLPGEA0G6TCOJE0OCweHiIAAgWUaJM0Mhy+WeXMtg5R70IRLUszRaZ5i+5DO/ulbufryy7x27O2e\nI8vLy7z44nkmJ+9hcHCCu+/+EPv2bef8+Zd4443nmJ9/iZ07HT7+8Q+/62ft/YbDh2H/frjZhUJC\n9ISsP+4GfT/JiKKIfD5DozFPp1NhdvYZdu7Msn//bWzbto2VldOk0wZ79tzKPffswHXfIJttMTUV\nMjaWZ3Z2niOHz5JQY0TdDKvlJRKRjVO/TDLRolQoMD28m3rLpbfJHwESm68M0AEaCGHi+8MYRha0\nChYVsqTQqOMgSaBt8px5JD4mEhvw0DYbetRoABepI/FokOIaBeZIkgAkLhUaXKXOBVroeAzjMYrJ\nFhKMsUGXJSIadOmgiSwJu4+AmFBLUO/WCeMQG5PpVJG7cyWqZ8/y+UOHuO3Aftrtazf4OjUa6yST\nLjt27GD77t2s+j4Li4tUZmbYUigwmM+jpdPcOzbGX3/ucywsLPD1z32OW2ybeyYmuH9igsF2myf/\n7M/wf0gzvB+E9w0zsnPnTqamXufatTcplSZRSlGpzPLoo/v42McOcvXqLFIqtm370N84OqvX6/zR\nH/0F8/MdksldKO8VDJUETWDpOoE0GTIK+N46Hj6aajCBxwIJYJwEDpqVwUiliOxZcJeoBi4ugjR1\nBjHIYuGzyDLtzRKwJYYYwsShjYliEJ82LlfJkkSQQtEgxEJQoYkiJtjkW1L45Gjg4RAjCOjQxiNk\nHIMhJAKdBh2uoohw0dEJgJyWJG+mieMuelwnjtr0J5Kshj4rnSr9CZu+bIK6Lqm5LkNBPwMDvfxi\np9MEfF5++RS33377DbRcMplkeDhPo7FOLvfdygzXbbJr1wT33L2b40eOYEQRoaax9wMf4JGDN6Zu\n4O3BjGU5hKHC87x31I8kk0ksSxGGPqZps2PXLs6en6O+4VLsSg7ecjun5zuQKLBQnuWtmRn0OEZz\nLArpfgx9Gn+9S8cLCJHk8jmaLUEYb6CLZTQjRa06i9aSZOQC47j0pJwgWEQBGhoeFgEuedbpw0Cn\nQUiFDUooJpAM4+MiuYbc5EgS+IxSIMBmhhmKmJj0+hCts0E/5ua8xUgaJNBJUGMDD0igMUZIB4vd\nxCyRoAkiS0c1Sdk+pUwJJXM0amsEooBBDunWiZw0yrKYW/LZZiUYzec5/dJL3P/AAzdc2ytXrmIY\n/TcEh3fc8TCl0iD9/XV27dzCwsWL/Nm///dsv+027r7//ndM372f8dxzvR4xPwk4eBC+9S34p//0\nZo/k5uCLX/xrZmYC7rzzk9x2W8zhw4c5ceIoQgQkEhZjYy36+9MsLZ1CKZd/8k8e5vHHP4ZlWXz1\nq99gZgaqaxvoUhJ5McrL0tDmmKpL4lREYnyKSmWOWvsyQWijsQ4YaFhIdCQuMEAY2HRFCphEqXUE\nFXIkaNJAsoSgH4OQLk18PEIkC+h08AmJsPGwCfBwmaOPPMN4ZFlnlkXKm/WQkhzwnVxAa1PAqgMZ\ndCwWqBECbWIVUQuqYPaTzxQJ/A2yCQNNZpEpn8V2i6Bdp53I87WvnUSpFqurC0xN7UGpmHxe8Bu/\n8YskEgkmJyfZ/sADPPWHf8hkFLHcbLIWhoxs387OiQlOzc/zrUOHGNZ1ct/zezzc18fq3BwzMzPs\n2bOH94L3TTBimia//uu/zPHjJzh58nzP+Orv7eHAgTuwLOtHoodefvk1PK+PvXvv5IUXziIiA0WL\nTtjGjwOgSYIcQgT4ss2iiEggqVMiIkEZg1Ig8Lw2yVQf+WKI78fskpJUq46jTKTqkCNmmIgVXGY3\nq8ZDbHoFuBKDNDpJ2lwgJoVFF40aGltQJLGo9cpNaaDh0CRJjI9JjEmLLfRKewUhgg5NArpkEQwi\nsXFtEyHXyQqBjCVZzadgCYRwqeLhDEygJZP4YYiyA7YOb0UInWp1CYhIJBQHD36I1dUzVCoVBgZu\npPo+/vFH+c//+Sk8b4Jcrp9Wq4brzvKpT32E3bt3c98DD9BqtUilUt+3hb3vd2/QBblug1zOIZlM\nUqlU6Ha7lEql64yKaZo89NDtfPObbzE2dhumadHXl2buwst8cPs0g7l+LOMcaH1MDA/QFAItnabc\nnKNTr+B7NbS4jzx9NGOBaAQEUUBX08im+wjDCyS7AaoD02xQFFBQcE7oDCmfReZoMEZMnj7WGSKP\nRgIdjyQGOi5LNIhIk8SmxRARTXQWSWzyIwZJfPJcI0DQBRqYNDGRmNjkEEQoNkQdWynAQZAnZA2d\nrRgiRayGaHGFgrJRaPhxFdUdw9NCfK1AHKfw3UVslSEOspjYNOMuM4vXkLKE1+m8bS503XhHIzPT\ndFiZu0ZiZZ5tg4OYjsP8Sy/xX8+c4df+2T97V6Lj9wsOH4Y/+IObPYoeDh6E3//9njX8T3EV9Tti\neXmZS5cqTE7eTxAEVCplJienyGRMUqkNnnji59m581cJgoBarUY2m73ue+O6LvPzazQa0wR+AXwf\noepEapaCbnP7WB/HO03G9k/z/PKrbDRjlBrYrG+popFE4NNT9vURxRB11ugFByE6G/gosihC1hE0\nSRHQpAkM41PCQ8MhRGcVBxOXgHFgCReTNjkUGgbraGwjZBTIIVhFkQdmUUSsEV1P5nZJAGObK1FZ\nvkZX3oLq2ggRASvkshLfMJhbKTPVP4y3scraS88wPrKFlh6S3jHAL/3qrzI5OYmmabTbbRYWFpjc\nupXx22/HKJcxHId7h4cZ7utDCIEpBGsrKwTNJi+trWHbNlvGxihms6Q0jWa9/p7n+H0TjEDPV+LB\nBx/gwQcf+OEH/xD4vs+bb77FW2/N8OyzR5mc/ACl0jie9yyh3kZGVdrEGDKLQ8BZb4aAZSY1SUk6\ntFD4pPBIYusFVkIXVAyNLguNNUYyfbQ6TfrlADYGGjERHoI2EBLTpIZLiIaJtmn3XsNlnRAPk54K\nulfoGZJknTySgBKCKgazdEljYaKzTIkW/dhEKC4SE5CmTT8GDhYuKXRUYNLSM3SMDk7soukhRSNL\nhCKK51ha6ZLecRthWGZ4vIRhDrF1ay9tYlkWxWIRXddpNAzCzZTL92Jqaorf/u1f5ujR11hcvMD0\ndD8PPviL11NmlmVRKBRuMKnbt2/XDSZ1i4unGBy8hXQ6T7NZZX39HI8/fg9/8id/wezsBppmo2ld\nDh48wEMPPYAQgocffgApJS++eIw4NikUKgzla/Q7I0SRz47+DKfmTzI4tIP19QZzixcp0SAb2nRC\nhSaqLNAkZAQ9cugSYJpbiIMJEnGLYdVFx2XYSdMJJK04IsBknSQNTJLkAZc+IMkQIRKQaKRx0NAp\nI+gt0IoOkho+JeokqFMnJkCQZwSPKmsM0kER0wdsoYtLAqElyMuA0wg8BrHoBcwQEKoQkLjCASvA\nNjQ0qdFyDFYEuLJDWri05RIhO1Bk8cM2Ml5iV/8wr5yd5WO/9naF5vbtW/n6118lDKcxzZ7IWMqY\ntbUzDMoWd+zcf50d2zU+zpn5eU6fOsUDD37/iqv3E6rVng373e9efvZjxeRkry/O2bPwHjef71vU\n63U0LUOz2eSll17H90103cb3FY3GLL/3e1uxbZu1tTUajQZCCAqFAkIIzp49h+NMYtsZKt4qlhQY\nwkYTKQy5xlp9g/6+IS6efYv+/r3E4UXcapVuZBJhorAQaPTKdnUgiSIkyTLjxCRwyOMi6VInJETg\nIjHRiTBx6UcR0GENmxI+RVw2uEIXmxR1PAIalAgpAkP0qistFEkgBHIo6jgokghMPCpsI2ZC19Gt\nBEPEzPivs+avEqiQnNNiuCswyxtMI1isLKAZJluzfTSXr2IXhymfOs3c/fczPT3NiROv89RTz1Ov\nR9Tr62ysz3HngMMH77rr+hxEcUwlDFmv11k6dYpd/f24ccyzly6x/847aUhJ6UcgBd5XwcjfFoIg\n4L/8l7/k2rWAQmGcTifFyy+/SV9fmunphwi6X6M8F5PDRtckvkwgqTKKz4RIEWMxpFlUpUBgshG3\nsAhIofAI0PFwWytE5JFYSCQOGg5JArpE+AjatFkhg0WRLC2W6NIkRYEMG+Tp4COp4ODRIomOTxpF\nlxQZDFboYwmBzRQpWuSBCJeAGkOkyeIwgoaNj0tdLDKobIJYZ8ldZsIKcNL9rHgeFa/NXjNN073C\npbPXaFJiShWItArXrh3ljjv2sXfvLQgh6HbbOE50AysShiFra2sYhsHQ0BBPPPH4O153pRRPPfU1\nXnttnmy2Z6LzxhvfNakD+Af/4CEOH36NubkGg4MFfu3XPsyLL55gdTXB5OQDm+cL+PrXT1Is5tmz\nZw+6rnPw4Ad48MH7aLVaOI7Df/g/oFhr4LYa3LqtwIc/sJuFapU3nzrOdrPJbmHTDDzKWsyG1Cgi\nuaYbxLKFrecpZbbhBxGhF6ILi6zmYlv9uKFkmRhPJZEkCVBkcDaLpy0MTDQkAaAIiLAJCLApEtFG\nsIrFNCZZQgQao/jMotGiQo4sVZIYxMSk6AllHSLasovCIU2EzgaCQSyyRFzEZpmiiMjqAQ3hs+SX\nGc4k2GgtkNUstoctItFgSKSpqAu05RIJYTOQtBDYnFtb4fcOHHjbfJVKJT7xift4+ulXEaIfIQRR\ntM727QX6K/7b1PNDuRxzFy781AQjR47A/feDZd3skXwXjz7aY2t+1oKRXC5HHLc4efIMUKRY7Inh\nW60YTSvw1a8eotl0WVjoIESKMGywZUuOT33qCRYWVlHKZH19A7QcUdRF12NMPUEku1yqNRkvDbGx\n3sBOKXZsv5eZU1/Gjy10NQV0EASY6ISY9Ep46wxTJb35lAZo5LBoEhKgsOjDwCFGJ8sqMV0k2wjJ\n0KEGDBMwR561zade4tMgg8ShtzCb9IwwV4EuAoVBgEGTiDQOOhH1uE0uSDCSsDAsMPRVjJRF2m2y\nU9mkbAtdmCS7bU52fUSrxhbTYr0yz6rZ4fDXv86O3bv50peep1KJWFpqoOsjhFGWLxz9NkEc85E7\n78ALAmbrdeyhIbYtLdEeGMCMYwZyOUpBwDeff577nniCLT+CuvpnJhhptVocP36S8+evsb5e5urV\nJjt3PkAYhmzfvos33rjCwsIGqVRMIj1J/+QeaqunMUKXki4I9TQFmWbISlPtttGEgy4qoAxyWIyg\n0yEgwQpbEMxt+qa2aeKRwEUg8HHRuboZMRfpkmeeAAMPi2FGMVhmkjQ6EolPjE+bzCYh2CEigYZL\nCoMUDhIHAx0QdJCUsbDIYAMhvQ6RCbJ0lc0qHSIV0cZDiyHXqNFQMTudPPlIse538YISJXsAfb1D\nVwasqFO0WnVsG3K5FJ63wD/6Rx/G3DTEOX78BH/8x3/J8rKHaSr27h3jn//zf/y2FA70OmieODHP\n9PQ91xey75jU3XnnVQBuv30/t9++nziO0XWdlZUV5uebTE5+12nVNC36+3dw5MiJG/KTtm1fLxN+\n9Od/nte+/GVu276VfDrNRrPJpWqVfsNiX7YP2eoQxJIhaSOBDWJSho4b3UEcX6PjVtFUH6Hop8o8\n1QgW2nVUHAI2y3TJIMmgSCBpY1JHox8fY1MXHuHTREOSw8LC2zT8D4iJNmttPDrEGBjEWDQoELCT\nBOvElIhoAwERTQQeGjr92EyhyBAxQIJXmKaKo0qYMqY/7tBJgKPr9KkWg6GHpelU4jpXaDGpbWXF\n1pBWH914DuFeY+eAwV9/9rPc+9GPct/9N1ZD3XvvPWzduoVLly4jZczWrR+gWq1y7L/9t7fNb9f3\nSefz7+0B/QnE4cO9xf8nCY8+Cl/6EvyLf3GzR/J3i5GREQYHLY4cOcvExEMA+H6LIJjnwQcf4ckn\nv8KePR8kldrGmTMXqdc7vPrqZZaXl8hmEywtreH7MVK2UZqBrzQ6co3hYpG8BQOjaQpCZ3BwJ5cv\nn6EdatjKoUsF8HFI0aVIzw2kiI1DEgWUAQOFhYXBAJIYezOR4jNGgT4Ea+TQyNJEEKKRBmwGadNF\nYGJh0qJGtJkccjZfJj3Z7FUkAh+FTQOT4c1ONf2AjCOCbkCCGEeE2Nk8WSfFZT/GCaoUIp91FPt1\nA1NGFKwMcdBlupjn+JkzHD36KqurHZaWPEqlO75nk+HwVvUNppSif3SUD//CL3DkmWcYzudZ7utj\n5tw5unNz5Pv6GBoZ4a6HH37XbVjeCe8qGBFC7KYnLT6mlGp/z/s/p5Q69J7P/neERqPBf/pPf0Gz\nmUHXczz11EssL8/z7LOvksmMMziYIZGIiOM65fIVPA8y2QJJcxqrukImjqiFEZ6n0Q164cBq3GIQ\nnwodXFKEGGRxGcYljWCBkDYmLQbJb6qpOyQps0bIMII0Xao4NGmQQzJOnZghJIqALBoKB4syBll8\nJAl0wk1rtCIGi+ikMakRYuIQYVAhRCGJAY8mFgObC58gj6JJjaKmKJJiSjPQY49V38VT0EFjWBRJ\nhDH15TVsM4VhKjrJGkePfoHf+q1f4bHHnmB0dBQpJS+88AK///v/FsfZT1/fXuI45MiRK5TL/xf/\n7t/971j/vy3lzMwslnVjPXrPpG6I8+cv33Dsd25q13UR4u36kkQiQ7Xa/L5zfvc99+A4Dseee47G\n3BzFoSG2HjjApZdOU91YZ8BOk0451NqCPqFTjupEMgESpMoQeVUcZSFRXMIiyRTDsUMdnxbrTCMp\nENGgQ0iNJDmuYXEVjyIRAp8qBhtITBpUOUtEgMYoBqMIIKSF2gwkJRohOjqQQJBAp0tECaiiU0Yj\nIkUDkzwSB5cOLjaKBC4WMZrSMFSXSd1ksVVj2HQYd9I0my3G0alJHzPRZSw9QNlf5u7hYabG+rnn\nnp0MDA5w/KtfZWh4mOnp6RuuZalUusHQrFgsckjTeP3NN7GUIpvPU+jrY851+R++h9Z9v+PwYfjj\nP77Zo7gRjz4Kv/M7ICW8i24XPzUQQvD44x/hxRdP02gcB3QSCZ377ruLdLpApdJGiAxf+cq38Twd\nx0mRz2/jmWeOEgQ1hCigaSWkdIhZB9ZJWZL+dD8BdSbvuotTh17j9ddfpNkEP3QwN3Ue0CYAIkx6\nXOVqz7MJA0EJRY2IDhVCXCLmiYkxSeCSZ2BzW2nQxSAkxCDcNHvow6JLihThpn5sjiUGSVClS4EI\nAcwBCg2JQ5YCQ6QpM49FhI5AwwCp4RoahulwcSFi/9CtjA4WWVi8yka0gAhXGUdQ9z1WVZVcqUjC\nsmislvnc577ClSsdlJpC12v09fW0NolEhmx2B3c/8ggHNpnTZ/77f+fk0aPko4hiGNLodtmYn0cZ\nxrtqv/KD8EODESHE7wC/DZwH/lQI8btKqac2P/4/6bmo/kTjlVeO02rlGByc5tvfPorvJ1FqB3Fc\nw7L2sLHhMTwMw8OSffsGeO65M3SaddJRgCYEjqmR1rJUugGNzaqULpIUMToRtwD5TVszG2ih0IgJ\nyKGxhTVc2uj06tVLmGTRyRAxS4sZ8nTxuEoXQUgHjwCFwAS20OISF1kiTWpTnDqMoolGxBANYmos\noSNpYdJEIACJSYKADhtomJuNpzsMUGbMtLkWuLSloB8wpeICAkgQyYA8bTTyGMom7XdpBQYDA9Ms\nLS2TzWap1Wo8+ed/zuEvfYXkso/InmE9aJLK7yaV2sIbb7zC66+/zsDAAPV6nVwux9TUFKapbxoI\n3YieQdE734q9RbCNlPH1qg6AWm2N8fEBlpeXyWQyZDKZG/5OCMG+/fvZtXs3p0+f5uTJ0zz91Fdp\nri2ghQax8IjDCKF0grhFE0kY2uj6GpoyEbpBO/Rps4bGCDE6y0QoFBpDBFymSEQGnwZztCiQQLCB\nQZ0EASliJGlqDKDRReKh4xIBOpIu0IfCAsqbfShaSLKcp01qvD7QAAAgAElEQVQHkw6SYUJMFB1g\ngzothhjCIcAHPLIoDBRJaiSVRaiZaFKiZIxnaiyHHRIEJAT0CaiGy6w0qpRSabZMb2HfvmnGxkYB\nmEilePPEievByMrKCufOXSQIQnbs2ML09DSaprG+vo7b6XDu3DnyUhJGEbVEgt/8V/+KycnJ9/qY\n/kShUoG5Objzzps9khsxMgL9/fDmm72S458lTExMcN99+wiCMRKJNI6TQtM0ZmfPks/n+cY3jlCt\nJshkBvH9mIWFDTqdKpAjnR4DLgIOmiaI5SLtMOKtZostW8fxpcS2TFqtU0TRViwCBCtoDOCQJmQD\nRURAFUgSkqRCzAgJdFwsQhQOZWxgCIlPnjV0rrFOgRYSKGEhiSltWse7pDEI0YAuDoOs0yXGw8ag\nDEREJIEUFjqKFgu0SG86DwUk6VX2VVWH9TCiHCq6THBhpcpAIs9Q3zjlDZ1VNkhHLjkipAntyOfY\nzBUoTrNr10PMzT2LEAXm5yuYpkE2myWK2mQyaVqt1nWmuhOG1JtNzHabjKbRXyhQ932evniRY0eO\nsHfv3vdsfvZumJH/EbhTKdUWQkwBTwohppRS//Y9nfHvENVqlW63y6lT5+nv38fKyipB0MvIGYaN\nEAqlOkCSIDDodhv81m/9r9xzzyn+t//50/QXJlmLoO16BME8kXA4HdmkEYSAj4GkxTQShb0Z20Ib\niY+GQUxEg5AkPcOcKoJdCJrE1BikRj/FTQ2AxCOkTMggEG326TVQFOhi4lNGQ9DHCiaQQiNJSEST\nDBGTSLJAHajgMkgEWFRIskGOChlCNGKsIOIWJWkpDQ04i0OLUYZJ0iTLOhKLBnacJtBCymtnqdU+\nwqFDV2i1PoMhNziQy5DtRtj5CYJAMXvpNRYzNZKpCVbqNT7z6U9z7/btpOn13LGGh3nwsceIotOE\n4RSm2WNN4jgiCFbZs+ftroBRFFEulxkacjhz5kW2bz+AbSdZX1/m0sXDxFWdr1w5hyclW++4g498\n4hM3sDErKyt8/jOf4cLRY/iNmG55cdPpNOKa18SSCk9GLBoa7WiQnNnCNou0/AWkqhFTQ6cPiwEc\nBBERGjV0uhiYCJqUsDAJMamwDvSRQtKgTUSAYoQSOTQCsnRpcpkVQjQEQ0gsFBtAA41xIqpcYRGD\nESzySGLmWMehhWSQDvnNZN884yQpIYmISRKRQCcgwgwjZpWGpxQ50yBqNqmaOnnDhNigZDtEhRyf\nfPhhHj5w4AZaNWHbVFs9a/hXXnmVp58+hmEMomkGR44cYv/+ET75yb/HoS99iXsGB/m5J55grVYj\njnu9MJrV6t/yE3zz8Pzz8NBD8B57OP5Y8cEPftf/5KcNy8vLXLw4g1KKHTu23dBJVtM0PvnJn+Oz\nn/1rPK9EIpHDdTfIZpsMDSU5d65DsbgdTdNRStJstllbWyKdHmJjY5m+vltpt7t4Xo0o2oGuu3T9\nLhfOrLJw+b/ihZJu1we1wShlBA51FmhiAn2AApL0DNxdyrTwqZInRmAQM4RgijwGEU26XKafDerE\nFMnTZg6NcXzU5loxj8M0Eh9JihYQksMgyQAaDgE6NSJcFggYQSMmJM0GFjENFJcASUwKnTImDUok\nGaIddHnmwmV2DxSJlc1qbDNpSNKZPJatUcrlWKrU6Zu8lS1b9jA+/jrnz18glbqNpaUlgqAOqs7l\ns+d45elFzr7yCgcefRTh+wSOw1y5zFgyyZrrsi4Et05OUrl0ifn5+fe8IXk3j5r4TmpGKXVNCPEB\n4EtCiEm+T4O7dwshxP8D3Am8rpT6n36U7/petNtt/uqvvsalS6sIYfP66yeYmkpgmgk0zUTTdGw7\nZmPjEkqt4DjDWJbFXXfdgu/7jI6Ocs++IRbmVmjJDUJ/DcNrEsnt+OQ2Q4cENg2SzLCAzygGCSQV\nYpbRUCiMTYlih5Cev1weQRMdF4NFilhoCARpUljkyNPCZYYWXXzSSBro6Bik0OlJJvMkyVAhxiRg\niRUMRgnpwyGJxQAB/bQ5RUABg0Wm8Slu2vBoxOxUMesIdHQa6EgGEWQIgTQedTJ0sSnLRdYJMe2d\n9PfvJwiWSaVu4cVvfJb7PnGAbDbJtWs14q7GVKrIpWiNYuoWymvXkBdj9j/88PXg4PLyMm8eP87H\nP343zzxzDOgJIqWs8Nhjd7ytfXWlUuFLf/7niGqVpFJY7VVePz7D6MQ0hh6yOxvx6LadmIZBLCVn\nT57km0LwiV/4BaCnvv+///W/pnL6TaIgQdZKcn++xJsKKvUaBdXFSSdY9RV+YppsJ0Wf9KgFF0jI\nRSaVywZpKphECGIgg0VEP10WEcRscjYU0LiMTh4dRRYDo9cviCYRbbrkN+8YGCbiGk1AIakCNlBE\nRyfGI2SYFL2eSgECjd24nMJmBybWZp3OFQI0khj4SEIiuig0TBaUhgtsIybp+9iaRhiGvOX7hI5D\nf6mPR/buZandZnFhgWqlgu04jIyPs9JosOuBB6hWqzz99CuMjNx7PWhUaopTp15jZOQVupUKpc0K\nqdHNbr+xlLz41luEv/iL1/VE72f8JOpFvoNHH4XPfx7+5b+82SP528Wzzz7Pc8+9ia73Urnf/vab\nPPLILTz22MHru+2pqSl+93c/xalTb7K+Xmd8fDu33baXP/iDP8K2j9NuL5BMlqjXr9BsXiGV2oZh\nZEmlHDodlzhuIEQ/hnEVJSWhnyefv5Vm5zzIETTOoZBYwqGhQlwEMeP0qhrTCDIoDARXEQzSZIkG\nVTR2k2eEpNCIVIRLlxY5TrJOkgwlxlC0qHEJMOn16O2JYU3kZvOPPA4xkyQAe1MvEpBGMoGOwmIY\nGCDCImJhc1QrKKroaIyRJkmaNLrIUlENzm3USaYzDBUHyY6UiGWEjCMu1Ks0DIfR0ii6bvCRj/wy\nQnyRy5dPEIYGQ4PDRBuXePyeae7fuRPX8zj+1FPUGw2m+/rAsnCDgJRhsD2X46LrktM0qtXqjzUY\nKQsh9iulTgNsMiSfAP4EuO09nRUQQtwBpJRSDwsh/lAIcUApdeK9fl+j0eCl55/nwqlTnH7jDFpq\nB3fe8zFsO0EQOLzwwlH277+bIHBpt2u4rkEqlSWV2orvX2NkZJKZmdP8xq8/D52QlbmrZJwJHt73\n/5H3prGWneWd72+9a97zdOap6tTgKteEy7iMB2wTaAyGkBA66ctw3YTciI6UCPIB3VZ/iJA6Uqu7\no1Z0OyhRmktDAuFGEC7pbsaA8YApj1Ueah7OUOecfc6e573m9d4Pe1NxAaHB4Lbh/qUj7dpnndpL\na9rP87z/4a28cOESz609jWQCiUmWJBKNmAwOdTx6XCbAJaZFbmwDXMengcssgklUNtBoYVAii0JM\nDxODiCQKCgYmoJBAMGCWLUASMcBjBkGCiINouKzTHmec2Djo6AwpoJEnRsWhj0UKQQGJSp4MCg6S\nPgajun40IIQUMatoKKRRUGmOjXZy9OgS0mMNVT+OnVrE9wdMT+cxDBMRF/j2E6eYnypw5sVT2Mo0\nqp4jivpUWufIqg2OTd1ErVZjbm60BLA8Pc13z5/n7b/+6+zfv5erV1eQUrJnzz/7IbKrlJIv/83f\nMOd5zI4v7KNLS5xeW+PmB+7i9KOPcuymUSECoArBoYUFHj91isFb30qtVuNP/uiPGJ4+Td6NUC24\nWi8TGCa3Ts3zTTdk6CUoZBMsxjGr0qHm1un7faaFy2E1wo7h6ThDQEiPDiEZFFQ0VEIcQvp0gAEx\nO6hskSbFHApJfKpoDFhCx8WlT4CkgiSJRTjuov4xHlEZs0VCKkh2M8DAR44XhVJIlgkpo7EXhQGC\nkDZl0gTsYUAPSRmDDkVcUizHPm36DLstckLDUAW2lHQieH0uT29zk6d6Pbz1dQ4UiwyiiKdOnaLw\nhjfwW697HefOnQMK1wuR8f1KNrvI2bNX+GHnkesbvdzb9zWHhx6C3/3dV3svfjTuuw8+/GHGjsev\n9t78fLC1tcW3v/0CCwu3Xzc+jKLdPPzwkxw8uP8GR+1CocDu3YuUr17ie2ee5dwzT6MKhTe+8Q08\n9dQZ4rhBGG4wN3c3zeYKqlrDsg6jqgk2Np4mn48RwqBVNzCtKRL2FJ1BGUvPEURHGQbfY0MGwBwg\n0NlPwA4xqfHs20PBw8BBoY2GT588XXWevuzgyzOMBLlHqNJGsEMFh8x4xqqhUCOiT58GOXQMOoRA\nB4PRtFoQ4qMiMdHoYyLYJmAGddy4+KhExNgEBDRIs0SGAX1C+mikSGHTigYEkcvuuSluO3yCWrfF\nwOlj9bPMRS6aOmocdN3kvvvexa5d54EyfnObd9x1G/NjrljSsjg6O8tqs0kjitit6yyPG5Fyr4ed\nz6Mlkz+0XP7T4CcpRh4cH9nrkFIGiqL8S+AvX/Ynw+3AN8evvwXcAbysYmQ4HPI3/+W/kO/1OGzb\nVIfgehVeeOpr3HrXr7F79yEqlS2uXj1Juy3xvB6qGlAsHkfTciBDHn/0G1hRh0XNIo1gITIg7vLI\n43+PZ+5DAXQUNHwCHBQyQECfgDVsVJZwmMFCjDNzn2GCLC3KwDYmLSxcIEQjTYiCgg+4jBTloCBw\nUTDGZvIqDh1Ckvgk6WADGWAXPdbxsVC4xDQKU6ikAJUI6FAjHq8wDojokGSIwwQ+DtBnxNQeEhPg\n4RMTk0RBo4OPpYTEiocmDTwlRTavYdsOhw/fyuXLK1zbbqP326QWFjCMiMBdZ7u3wdCwWMoqTOam\nGAwG1Go1ZmZmEEIghEBlJAP+QULkD6JcLhPUasy+5AGkKAr7p6d59rHHCAcDEqXSDX+jCoEJPP/8\n8/zVn/4p9toaJdclaHcJ9IAlM8FztU1sBJZtkTJtmnGfZqfGXCKLHbSpxUOiOEZRBFUZ08VHoURM\nGZcOPdLEtElwjf2ENIAyMRsU0dmPhk6f/nUKa3WcwayQQhKgUkMisNnBoYxgkZAEkiIenTETaJSF\nE2OPfUQkKmJMalUYoiOAPCF78PBRUdDRmURykJAQGdWIKeAS0ZQe+Vgho+ukcxMMai1aCZ1FVWXh\nyBHa/T5hGHLTvn0MbiAc/3BhoSgKyWQKfWqKSqvFVD5//XdrlQp7jhz5pZiKlMtQrcKxY6/2nvxo\nTE7C3BycPg0/QpH9C4nz5y+h61M3ODCrqoZpznD27MUbipFLly7xtU9/mv3ZLAfn52n3+5RXL+I6\nKd72trfzwgvn6fXaeJ5LqZTi7ruP8/DDj+E4KVS1zMREkWTyFhq1U2QTecIoQgiJEBGx3AFMXCZQ\nmEVhjYg6kggFG0EXhRCTiAI2ETpF+lzjDJ1oEZXz6MSoHBgrGW1AGcenjnygTHTSNEnTpYuOREfg\nADO4xHQZjBtVFxUPHwUXSYjAH89lVAQKMRYhMSEaKklisphsscJA5gixiNmikM4RDnRWr64SuS4y\njgnDHn5BZeC2OfvCY+xcfR7Vd+n7Td753l/D29KuFyLfR8q2mSmVsA8c4LEvfYl9rkus60SpFHsW\nFggmJ3+I/P7T4H9ajEgpN/6J9yXw3Zf9yZADVsavO8ChH7Ptj8WZF17AbrfZt7hItVrFHYYETpv1\nS5foDATHT9zD7be/hZWVCCkDrl6NaTZjGo0acVxDRE10cswYMUcyC/i+Q6XTJdJDEkOftneBBWJ6\ntDGYJqJFk3ViFoBpAhL4RKgoJIQgjkfJMS4wi0aDGvtwsAhocIUBJhGSOi5pdATzODj0iQiYBCQ6\nUEXDIUOXKiYGDh0kFh4xLWLAJMYkHlvES1wCAiIS6ECWKTwCNtgkjUGAhzc+0J6iEiPRZcSAGhr7\nsLQMUvFxwzJ5tYXQJImpHsdeN086nebKlStcuVLFzGXYGW6y2mkzuXiE8upzTC2VeNeJE9QqFc69\nuMP2YIBpWZxstTj+hjcwDAKsYpHsS8Ly/in4vo/+I7psU9cJPY/0xATtfp/cS8LZvCBgCDzxjW9g\ndLvcsrDA41vbJPyQbmuLSFExZchZ5zw7uoUTK5QMlRk7Sd6yScQhi4rgjJQ0gR6COgEeBoISkhaC\nKik67KOLCWwDDUAlT4xFm21iJhAUUdnNFo8zyRADgUEGiUIfh2OoQJvLXGRIHoc1dNrk6dGmAOTw\n8VEJUOgi2UCQRhCPKbBdHCLOYZGgj42ki4NLB5jAE4KSTOCRIasM6EV9HKmgtDr0Bgk8S2N/KUPs\nebzjJWsRz127xsbGxviB8l2CwH/JMo2k3d7gne+8m0LhLr74qU9Ru3aNjGnScl2CQoF/8da3/tT3\n7msRDz8M99772larfN9v5JelGIlj+SOJj0II4ji+/m8pJY9+/escLBQojqMH8uk0v3LkCI1nTyPl\nKocOTdNun0fTPBYXl2m3JSdO3MvW1iq6rpJICHTdw7RG53joVQiCPq63SRClEGJmJAGmw6gPr6Ax\nA0gkBtrYfkyngGQHkySzdBjwFdKESA6iIvDokEWlh0qEhUMakAyoUGSbJKlxly8oYdGkimSRGjtM\no6FhINmkQ0ADQRaTOg5TgEdAABTGzicBPVpMMI/BLIIefbrUUZRtiopNLZAML5/h+OIykYioeG0S\nyQXaO89y4bnnyWk62VKB99xzN8PVVc5vbXHb3Nz16TNA33FI5HL83h/+IUdvu41vfOlL6EFAPptF\n37OHX3/Pe155ae8rhA6jRh8gy4h5eQM+/vGPX3993333cd999/3I/2hrdZWJ8RdTtVKhubXCdG6Z\nXbZNvVLm1KOPsuvwPo4cuQnPC5maKpFOF5BSsra6yslvfAenWyWnqghFQVN1TKFQH4TosUJOcZnU\nF1GCMl26OAjEOEtmpHmxkfgjOlGcQMMhiYnPRXRMJvAxGGBikcbGJY1Bkj5tavTRaRCTISSFjkRh\nkwFJOuQAjTJZBG1UYloM2UYhg04F0EjisYlDHxDECFT65DGYxaSLTZcsdTavFyLngAkZYgF5BMs0\nucz3iKNlEkYKtAZWwmXx6OsIrGnW11dIp3exs7NNpXKRfN5gZvZtXGjt4LnXkOlZ3ro0T6/TwWg0\nuHligs1kEiuRwG80+Najj5K7+WZ+7UMf+omY1tPT0wxUFdf3sV7SrW/Wauw9fJi9N9/MP/z1X3Nz\nHFPIZOgNh5zd2WHu6FEG586RtG22yjuoIsFOMBipnGIYSIUaNhktTyeo4jhdwuGAuujQikaKk2mg\nKiVrWAgEgh2GYypxlxYJquygsIqKScgUI1OiDlsEpFDHMeKSNj0m6BKSoEESA8EEGim2uECRgGVC\nKgzoEbOExiwKz7BGGx2DCTQiJFuoVEekNCqMXEdChkyhYeEywKXKNHlyDKgQ0qaIkHWS+HiRRguF\ntaiArkwwmYy4dWKGodflwrVt3vaS4x4xklQXi0UeeOB2vvKVp9D1aVRVw3EqvO51Uxw8eBBVVfnQ\nRz/KubNnadXr7J6b48CBA/+kxf8vGh56aEQSfS3jTW8ayY4/9rFXe09+Prjppj08/PA54njXddVc\nHMc4zjYHDz5wfTvf9+lWKhR/gJdgmyY3Lczx5g+8C9/3WVjw+NznnqRWmyaVylGp1BkOK3zsYx+i\nXK5y5kyZ7amIjWuPYGOgyRx+uIEkh2CbmDyjZNs8cImYPpBC4qMxRMGixSUmkICOTkSWBiY2zjhT\nV0MS0yckjUWeDDuohCRI0SSLg0objQgFizoqGSwi+hRZoUaSASo6CSJSRDTp4aPQJ2KaEePkCnAz\nFnl6nGEHnxJJRjGeDj0Wk3mW8Fjrd9mxpjjZ2aHeqVBMGmjnLmCEPu89cYKJdJqW43D+4kV+5d57\nOb+1xXeef54js7OUSiXCOObF7W3ueM97EEJwzz33cNeYX6brOrmfg7/Qq1mMnAQ+DHwBeDPwX39w\ng5cWIz8OmUKB+oUL+L5PZXWVg/NFyo0WQ6mSz+SIYpezz3+dt771Q3S7Xc6fv0g6fQeqqhLHMUHo\nYps+QagShAG+HxBHAVHgI6WLZVokDYtJKTDDNhUsLIZjVUQJlCyK9IDL5ACFDC1eZDd9pnCoE1NB\n5SYkq2TJsoxEoTxerumyxoiuqI/VFRoQoGGgM0WRDiZJdlAZYKGS5woKDl0EBhFFUlTHlfw8YmwB\nPxjn+WYwmRqXPDv4HESSB7pADYVFDAQ+bW2TXpRBKEX6CZW6V6JdCVGUDa5dO0e32ySKLKan72Zu\n7hDK/BHC0GNj45vM3HqIMw8/xO0zM8wvL3NPOs2ltTX6rRaVIODB3/7tn3iEZ9s2d7797Tzx5S+z\nK50mbdtU2m1qhsF777uPUqmE8uCDPP4P/8Dz166RyGS4/Td+g1Qmw3fPnWNxfp4nXjiP7QX4mHgE\n9OOAa4rGhDDJRQNk7CHRkbJEBgOhJKjKJj0CQgQLZLFYYEjMFkMaFBEkaJMnyTRZVIa00CkDTWAP\nsISGjklmbE63hcIuBvikyWKh4o+VVoskxwssESFdqnjEmMyRQOUcKjYRsIhCFochFygjqGOisYsh\nKQJCUuQQmDg0KZCjiMt2nGadCJst+qTwMInJklZ2IRkSWAb1YUxKJhm4LknLojsY4FrW9XH4XXfd\nwe7dS5w9ewHfD7jppmMsLy9f9xJIpVKcuP32n+h8/qLhO9+Bj/7c6PSvDO69Fz74QQgC+CVYGWNx\ncZE779zL448/hW3PoCgKw2GZ22/fdcNzQ9d1VNvG8TxMXafb7SKEIJFM4kvJ7OwsyWSSr33tMe64\n4y6uXdug07lMHLvMzk5w+vQ50ulZZmb2YRgp2rW/YtDTGIZpJClUJLEcpaUrio6UOSBLzDPoXEES\nEaERkMEkT5uQDjuksQGVFAMCgvFTRBtHgJgYXGISHwOFbUKGzJLApIBKDwWYJ0GHRRyaSNKkiMZm\n8DpXsPGZZ2QP32dktVZkpNE0iChhUqLFFsNxxrDk5tLNlAyDXDJmzrtAV6kRiwVuKS5RVC1qOxv0\nhw2+c+oUdx44wPzEBJO+z5lLlxhUq6y5LufOnycGFg8f5tcffJBbbr2VRqOBlJJisfhjl9t/Wrxq\nxYiU8rSiKK6iKI8Cp38W8uqRW27h89/9Lma9ji4lywuzhKLMpWqTyWgDVQS0m0MeeugaELO5eYZu\nt8b09AFcr0EsVtk7P8Xmyhqb9QoJJUHf8wlkl5ZwkFFAKayQMCcgcujRwVMUckqJlGrRinxCaePL\nIhXZQqfCEgOOYqARkEPQQOdFYgQ5BoQ4qARY4xD5KSyu4ePRJUOEhyBCkELjEkXaY/+SeQqUsHEZ\noDJgD5IrJHCZJEENBQ+dJAkiNBr0mUUhYkgeGKKxQEBm7EWSYyQQvqxIprQ0cVwkVAQVDHQlzcpK\nnXz+LlqtATMzu4jj5+n1Kmxv1ykUVpmf34OqjiZDhw7fjN5tc/tL1nbfcHTEb3782rXroVU/KU7c\nfjvFUonTTzzBarPJ4h13cP/tt5Mf8xQOHjzIwYMHCcMQIQSO4xCGIX1VZf/0NN8wTFb6LZIihSdc\nthSLJZFiJnTRNJWCouFjsRN5ZISBkAYJMpRpsIROiEk8NnefQjBgiIdJjt0UUdFRsDBpoyKpAwYG\nXTIUkGOXlwiTBC3EOJvIZDTXSCCw0Bji0cHHpwgUGIlq2+jEHMKjySiOPEWCEgl0nPGkKz8uVF1i\nYjQsHFxidjDQ8bmKxYAl5rGx2cZniEI7LjMp57jQrTO1fJT+sMLlzU00w6CtabzjAx+4QRo9OzvL\n7Ozsy7wrfzGxvg69Hhx62YvG/2tQLMLyMjzzDNxxx6u9Nz87FEXhHe+4n5tv3s/Zs5eQUnLo0NtZ\nXl6+YZoqhOD4G9/Io5/9LN52izjWAUkr7HHLu9/J9vY25XKZ7e02Bw/ej5QK5897aNoC1arD9773\nEO9////BgQOHOXv2SfL2Lrx+lYS2G5QEblAhlnNADiGGRFEL8BAUETiMIk2XSZAgHhuYgaTFBTyS\n6KhYVIgAjwTQIM+QJVRsDJJATMiAAQKLNAo9BGkSBDjExOiM+G8ClRYDkvgcQ46VdKPn9iSjpYQQ\nqGKTZoKiEuCLRdRoQNLoMmVNEMddTD1F15ekMwXmbYuDiRzrlR30QCWhZEkNh5y7fJmg1yM9Pc3J\nZ54hlc/z/je/GaEotPt9zjWbCFXlv37iEwwqFRTAKpV423vew8LCws/lGnhVVfT/Mzmv53mUy2WE\nEMzNzaH9gOi/XC7z+ONPs76+hVac4tnVy2y12zSlJC4V+cjb34aUks9/+wzzc69j164RI216+iau\nXv02i4tDzp/fYXZ5D+efO80w0ulKDytoIYXHUInxZQktbhAG26ixgiZDbBHR0+ZJqEmC2EdRAmJq\nCAYMpEYeSGEyIGIUrCSZRGOLiBYaLjYWJgY9IhR8NGCCGG08GdGRpIioIaljodNHRSWPwoh4qmKh\nYyMoUeQ0LllsZlGpYTKLIIOPRpl1MlSpEpEaZyv0UElhIICQgFBGdOMYJRbkVWgZHradxnFSNJs+\nqppBUVQsa4LhMCYMu5TL29j2SJuzsFBkaWmJ6toa1VaLyZcQGxvd7k/MFflB7Nmzhz179vzYbS5e\nuMBjX/86XqcDuk5yeprza2uYmgHJOdbD0aMkHcZMKTaKIvGjPkosSAgTI/boBT4+PnUkGikYW+4r\n1JAk0UmiUWXIJCqCNiEaI/M2lzQhbTIMkGMnEmMs2RaECGpoZHCBCMmAIQW6dFGpE9EnR4p5EozE\n3xFpNtjmGm0CcgwQBLQpoLCJR5ssgiwONRQMVFQEw1GeheLSlTUKWMwxi4tLgzpdEhRYoq1WMM0s\nU/NF9h+5k3b7GfInTrC4tMRNBw78EBO+0Wjw6KMnOX9+lWTS5q67buH48Vt+ZqfF1zK+L+n9RRAG\nfZ838stQjMCoIFleXv6x+SZhGFLeqfOFkxfQhh4F0yBZmkQtLvH5v3uES2sQhipPPXWWatWhXg8p\nFk8ghEa5XEbTbuKb3/wa09PP8PzzT9KtuRAlkSJEU87UAcYAACAASURBVFVC2QWmgBRR1ELT0kSR\nATIkJEaiElOkQZsiw/FdbiFIMjV+hkyik6BMBYchA3LYhBSvSwViVPIEdBkiSY6ND0cKnSYRISli\nQOLiss0yEsFI6qCMf7KMloe7CIoUcFBoAIFiEGgJkBpVt0XeFoS2RZTNEml5Mkj8MMRzHAASVhYv\nGBJGEXIw4OL6Om3f566jRzHHI7eJXI6ZXo9P/6f/xAPHjjG1sMB2o8Hlixf503/7b/nDP/qjH7Jm\neDl4DVr6/CP+3b/7c9rtGEWRTE6avO99v3pdw3z58mX+43/8v1lb6wIWvt9nakph/xvfyIzvc9v+\n/QgheOyFc7SdBEdf/4+tjmUlCII8jz56luPHf5UDB7IY1kM88tB/x9QMSlN7mSyUOPnct5h0PPZq\nE3g2VPs1hPDQTA1drdDoRwiKgI8flxEih64WsWKPUOo4OGTHCppRxRvTx8HExiUiwCGFBNpjqzMb\nhXkUUkCdiBQ+GjVWyKAxZNRZBygExGOCk0CgkSRDgixdmvRYQ47Z1irb7GFIB4UhAkjRHl/cJoKY\nBA493KhPUSToqTHZ7Dy2ncY0Nfr9AYlEEkVRSKVydDplJiezJJMW+XzAvn17ieMVdu3aRSaT4Uuf\n/CRdx6GYTtPq99kKw5+YK/LT4sKFC/zDZz/L0akpsgsLeEHA2WvXmDl0iGytw9nHrjH0CmjCYlD5\nJlEQIonwI59IKGT1JKrn0ZY+MRpdbAIcMtj00YnwSCDp4RChE+MzRCUHBAg8BD4WMWsEeORRCKnj\nYeMDBpsk6TIgJjPufDLUsPBZGzPxBUtEY/vnJIz7rBJDmswxYIBOBZMaXdpIYpKEdIk5jE9EAkFA\nBUGFvoS+kWTox1ykQ0wJ2IOuWvjUUWKHgVOl19C5/NT/y71HZtg6fRpdCG45fvyGY9tsNvnzP/8c\nUTRNsXgc33f54hefZnu7yq/+6tt/7ufytYLXsr/ID+JNb4L//J/h3/ybV3tP/tfhq1/9Jl/60mlm\nFv8F6XSOXq9Cy1lHHwbE8SEMY5bFxWkuX67w2GOPMDl5jFJpVDw3Gtv0el0Gg5DV1ccZDieJAhWN\nDrbcIQxb6OQJqAATKIpAShNVjQnDMtBiRHOMUYjok0VgI4mJqVBiQIDHVUIiQkIWMAhRGDIqZdQx\nmyTCBhpk2QFcDAQeGk20sSwXFKYZEuLRG39yf/zpU4yWaXYYZYP3hE4dSZUZVDWFEDlCv4uaijlw\n9CjrtVUW3/B6NhtJVi48T58+ke/TkyGa00MqIZVmj14YUbFM7jpxgltuuumG497sdLC7XUrZLN95\n+mnam5uIToftcpnfefpp3v2hD/Gb73sfxWLxZZ/b13Qx8swzfeLYBiSrqx2azc/y8Y9/lEQiwac+\n9f9w5YpLsXg7ppkmjkN2dl5EVaskD8zxV989STFpc7XaZmLPrczMzhLH8fWubn29ytTUFBsbFdbX\nT7OzU8eyDzCZaHH7oVuxDJtL5x9ml99AY0hKSZC2VWb0JJc1DT8OSGazGKrN0DXo+rM40SyqWsNU\nLIIwoIOCioJBzBCXBgoqbWK2RsobHFz6SHxU5onYwmI/o8wDn5Fja5odLAy6OJTRKOEREuASsYmk\nSYTJAAUDDwONRdK0gBDJBCqzZLCRXMMlIsQde8gahHSJaIoik4qJJGBgl8hmC0xMzOF5bbrdHcIQ\noshGSpdkskMuN4Fp+kxN2UTRGv/8n7+ZOI6xLIvf+lf/ihdOnWJ7c5PS/v2898QJpn6GWOkfh5Pf\n+hYHi0WyySQwUtscW1ri8UuX+N9/+wPo9pM0GoIrV9apB7soN1awIxeNBIoM2PIbdJQYVUb0SdIm\nQxqVDgNsNBwEMQ16QJ8EGhtETNDEoEgKA4UWVXQCcrTokyNBdpxnUSVFSERAkTVyqNikMNFpEbFC\njEqRGA0PFY8MQ0IkQzJY6Eyg4JPHx8FjhVFSUYhAYiLwiLDo00TFIyDLVQKUKImu9tC116PJ6fE1\nrxOSJAzXyYjz3H/0Xt549AClbJY4jnn2qad4cXmZ173E0vPkyacJginm5kaTKcOwSCSO88QT3+PO\nO0/8TA+d1yqkHBUjvyhf7vfcA+9/P3gejPMhf6nR7XZ56qmLJBILBIGJricoFHZTq/msrp5mcfEA\nzWaT8+ev4jg2QZDi6tU1BoOYqakCjcYqYVgkiuoEwQRxnCEhfNQ4g5QmEdukmaPNCjHPoWkF4rhJ\nFK2RTksGPZ14vNAiSWBQRKAS0MDAYoiGTZsWMZI8GvvxqeOxgwlI+kQMyGCwwwAXcEkCfTzKTNDl\nXnQsApr4NEd6Ry4SUABsRuw0D1hHsIpGjIUZa5jaHgwZY6oOsVBx7Bh19x7OBW1uftPt3HL8MJ/4\nk0/hGzq9fh+nOyQlXGw5pKEpTFoZrnoBd7/zLSxNTl5vHoeuixcEbFSrzExOcunaNZzNTUr9Pv1G\ng/koQqvX+fKf/RlrFy/yr//4j182mfU1XYwkEssYxoihPxx2efrpp3n++Rc4ePAAL7ywSjb7Bkxz\nNFoWQqNYvJnvfe975PO7safvo+Y0IHWZjbWzDGpdFF1nYe9e9uzdS6u1gabtZmenQSYzT7FY4OqV\np9h2ewzcPpZhkzQTJIZtErFDRkJdgctDQSP2CI0EpmEipUW+kKdfP4MhQpRwCyGHNGgzgUoDlRCF\nCjo2sEiPIRcISKLgMKDPgKOAjyAmxiAmxEIngYoQLkGcok+PPi18VAxaFBklRnaRVIEETTwS2EAf\nFR+TBD0mAAUVnQgLlR0EkgGQp0lIWxmNGLtKlY7UyJUS6GqFfq2LN9whnU4zNTVLPj86xvv2HWRm\npoBtd3n3uw+xtLTIE088yxe/+B0URce2JQ888Ebuf8c7XrHrIooinnziCb72d3/HvG0zOTnJsYMH\nmcznUYUgqSjMzs5wyy1TrKy43HLL/Tx10mbzVJN9aUG/WsEMY84OA9Z8Ax2TSMwg4pgJklTYRuCj\nE+DTpIuOzZBJ+iRpMcCgTBZIYdFCkmCfInhebtKmhYVHBuW6e+sdaBhYCHR8RomdAR41IKJNliUy\nmHTxUfGJaGOgEJJFRZLHHRv1u/ToMeqPVFQUJJMvyQmOmJ7KMOxcwB/6CDVAxiBFSECXRLrE/bdN\nMl3IUW93SNk2lmGwu1jkhSefvKEYuXBhjWLx4A3HXQgVIXJUKpVfymLk6tWRkdj+/a/2nvxkyOXg\nwAF44okRofWXHe12G0VJMjmZp1qtkEhk2N7e4dyZi7Q7NZq1p1lZgX373sDU1CzgcuXKi5RK0/h+\nlURiAcNIsb19DiGOIeM6SdlBU1Ij4rocZc9oxARKFUXxUBQwzR4zM3u55itoYg995xQRC8RYRAxR\nKJPHJELQpTdmlCVQ6GGyQ48mSTw0bAJs1oEKWXT6pGnSp8wsQ1KoeIwsFzQMEkgaJAkJeQaXHB55\nYlYw2EYnYJIQCx9BGDvMmjaRFHSVHe69d4mPfOR3WFqaJ5fL8a9/7/eI66v0Gw3KjospfSZig56d\nJmOVWMjlyUQDvKHH1M0389Rzz1GpdTi3VqczjBgKhzv3z9BfW2NKVdmpVgn7fWQUsb9QIC0l6ydP\n8vm//mt+72VGSr+mi5HvFyIAiUSGWi3F+fMXOXbsKP3+gJmZzA3bb22VkTLDxMQeZmeX2dra4NRj\njxG7ayRKDjOlfayeepKN9eeYnJR0Osp1h9Bs1mD38h7On32OtZ3LdAcNmsMmeX9IPmnT1U00pUTR\ntLjSXkdVkxhSJbYd3DhEYYDqnmJJSOaNDJVIpUePAV0iMoQkOUSEhiCDgo5KjyHhuK+VKIRATJdR\nVqSJAxCHKDiopIjIk2GbeXQS4whrD7iCR4M+DmVCJjBREDjobDKNgkfEEHdMa9U5T4ZNRkRTW9FA\nvYqeTkIcs5jRmJQeatRjPuPg5gXpeZft7VNks1mWl5c4fnyZd7/7AZLJJJ/+9OdZWYmZn78LIQSu\nO+Bv//ZhUqkke/fufUWui29+9atce/xxbs3lmFVV+u02Dz/2GL9y770U0mmGUlIqlXjwwd/ixRfP\n8OyzZ7GVNT78vz3AYDDgiVOnCLtdXiclrK4jwpgmAwJRwpcBSlSiQ4ygyQFmuUKPA3Q5MDZx9hmy\nic8GXZYpcoGYa7JLBoFKCo9FrmLgI5jhLAEREp+R1dyIhOYT0ecwJmsMaBKRw8VHp0IOhwmyxCPt\nCx4+BioKFhlqdFknZgbIIhmFNaqKRJgKCVvg9IvEIsCX6+i6SQCki1kMq8BGzcbSDcKoQ+L8Br92\n11EMTSPwvBuOcTabotEYYFnJG96PYxfbtl+R8/pq4xeJL/J9PPAAfOUr//8oRjKZDHE8oFDYzXD4\nXbbWTrFRdkioGUytTzFt0Wl5XLywRqk0ja4b7N6dRNddLl6skkxOAVvkcgaum0bqoPa30eSAjKLi\nyhohApUkgUwSxyphOEBReiST02jKJYSvopMjoI5Hg9RYuxghaSIJyKEwgwRSXGUR0JnHoUefBm08\nHA6SRh9PTiUaHofwWUMnwEJFjE0SDUJMfGwCFulTZ40aJjZzFK9z0bbxGcZX6bgemqqQnJrgE5/4\nE6anR5PRv/iLv+T8yTPcVtxNNZugKYZ0emW6qsEubZqskWa702d2eZ5hf8A7fuM3+D+ffI7vPLZJ\nRqRJZyfIptN879IqS1qVXMJm2O9jSElsmhRTKbqOw0w+z1Pf+Aa/8+EP/1Bi+0+C13Qx8oOI4yGp\nVIJkMsm+fXOsra0xPT1qY8IwpFIpUywmyWQKdDp1HvnyX7LLG6LGkly0xrWNNRaWlukEDrO7l/nu\n46sEwT4URVCvXcJrPcPCtEon3ESpXeJY1mJAEqGq9AcxjhITJkOSc3tRZcBO16dfS6JIBS9MM8Uq\nepyg7nroWGRQSQIVInRSDOigoxCjo42Nfkdjv5EKZuRyMVoK8PAJgJAWghwOk4RsMYeHzdzYBDhE\nJWYanT4ZbAIyVGniMI2JTYiCwMfBQjCDwgoOOkkC+hSMNDnbZULNk0uoXBhskG5XEPoEqmayVEqw\ntG+WcNccH/zEHxMEAZZlXVe07OzscPlynaWlfwy5G0V37+ORR556RYqRVqvFhSee4O5du9g2TS4+\n8QQL+TzRcMjz588zMTPD8vHj1wmzt956nJtu2k935SyL+Tz69DRz+Tyf/MIXuLK5hfA9DA28aB1H\nghtr5AhJ0sakwcbIN5b9SArEGGP3jwQR27hs4NMiTZMOJjEeBlAiJocgyZAaWzRJ4hPRI8SgS0Af\nA0GERRJBnz5dfFxKbFDExiVmxKd3cAjpjQW+ginMMbk1IBh7MypI2SOXKKDILPMlHy+TYiJnoWsG\nqhCUey0mZhYoJmbIpwuoYopWr87Dz11k/9Ik+97ylhuO8113Heczn/kW6XQeTRsR2er1MoUCN7hh\n/jLh29+GXzTftne+Ex58EP7Df3i19+SVRy6XY9euPP/9c3/BTZrNlfpFYmdATbgsTs6iG10c18Tt\nD1hZeZaZGcGb3/w+arUyKyvfZn6+wGCQw7Jm2Vp/BiXwGMgGBX0KoYQkgxA/DhhSRXIEoiyaMiAI\nKlw99wgTps+m+zzheDI5pEBIhD5mejmESKYQ+IDKBAMMCkh8LEys8QSzxYAJ8gxwadIgQ0gdhQSS\nMj7zqIy+ExTq+BgUmUIjYjcuQzJExExhY5NSXCzpUzfmmbCvocQRm/U2D37gd7nt4C4m83n+9n88\nxLKeI22l2Ilr5JKzKG4TS6q0RYhBB5UhneolxNQyW1tbXHhujbccv4ekNWo8/DDgzHZAU/c41djG\ncxz2FIvszmaRQEdR2GVZVHWdZrPJ9PT0T31+X9PFSKu1Qjo9N05gXCef9zh2bCQX/YM/+Jd87GP/\nF5VKiK7n8P0usMrRo7eRSuV4+pEvMu267MlN0h263Ll7FxcrFV5Yv8BCMknK0Cn4Fa5d/QK18gZ2\nY4uEEFiaiuLr7D12mF36HlTH4ezWFtc2msRCpTi7n/3FBU6tPMlwWEaEBUy1QCx7ZIkxqBKgEWEx\nMpafZYCDS52QmDlMJIIUA3wkXYqozBByCkEe0Ig5i4KHRUAKlz5TRDhYDMZDuZGtToTERJIjwiJi\nFDjtMYegQUwbnSZ9poEEMTtKREXqLJgKXe8iBCUMNccWEWu+SywTeLHKwaV5FASNap1TjzyDf/Ys\nhCHvfO97mZmZuX5+ut0uqpr6ofM2Mhla+aH3fx6o1+tkFAUhBMWJCZid5Tvnz6MrCjudDr/z7nfz\n5vvvp1wus7Ozg2ma1CoVXnjxRXquy9B1eXZtHUsKDufzVFpdkopgEAxpyvNkRYogdpkVAck4Znuc\nA1FAIGHM65CogEXEKi1cUtgcxKA5NnAHgywRgj4zlNlhHh8bD38s3A4Q6JymSB4diyp9kvQwsKii\nMYGKgqCLRhmXPB4D7FHXhkSjjoKOx0h6aJpL3H33b7J1dQ01MkGepZRZxDA0au0qrdYl/tnb/wBT\nz7J29gwF00RTEzy/UmXy0DK33nbbDcf5wIEDvO1tNR566CRSZpDSp1TSeP/7fzaXxdcqpByZnf37\nf/9q78lPh1tvhXYbrlyBV2gQ+ZqBlBLF6XL3Yop+w+Ga7DGrR8yqFu3IJqmpDIx1/KFOPv867r33\nXdh2Cs8bcu+9t9BuG1y58iSDrassSIEIE3hyQMuvoGgJMoksO/0ukkVM0qhIEtYEA1fF8RpUvT4G\nXZK0CZA4OPjM4WMA14AiGioWLSIa47yxNt9XyqgIDFwcNtkmIKJJmj5JNDqE7CKkSsgFRubzLQRt\nJrFJ0UQyyhuHOWJqVLDII6VHEkkl3Gazb2Ia82jqBJdecCmvvsDdhyfx2j06rkZQCNE1laHbBzxE\n4NAb9MgrE0wKhdhQyFoWn/vMZ0hgXS9EAAxNZzaZxcnM8oa3/wpf+7M/Q2garSCgHMfsmp2lF4YU\ndu162ZPT13Qxks936fdrgEKppHPixAn27Rslmh47dow/+ZOP8vnP/zfq9W0SCZ19+5aZmzvIYNAl\naFcpZvL0hl0yCX0kia3VCIcOW6FGIemh9zo06quYQ4e8uRddzxEEbWRth9PPnOLW33wPvUuXeOuh\nQ3TiNfxoF7GeZavZQLdmKCUc+p02brCKwCciSZKIHjEBJUIMIgJ05pCEtDlLlwiVAhoZAvqETGPh\nEqIjmRqTExexaGJSZYoVZqmwiUcWFwWJwciwS4yH/0NiYoYkcQiZpUuEIKCrSJrSpIdHDp2MzLMb\nHTccohIjjASVQCNh7iZjR3QGfVZ2Oij+M0yGAZGqkUgX0FTJoUSCb3z2s+R+//evV72FQoEo6owe\nEi+ZbXc6dRYWXhnSaiKRoO151DsdHjl5kpTrcmhigu1Oh342yy233cZX//7v2XjuOXKMXFtfvHCB\nWw8f5isPP8mVay16jkVCMbhoCSw1RgybTMqIkgKzeoSwsoSaJJfIUGjucHLoE2sqMojQx0I8BxgC\neUwskvjk0OmQoM2APgFd4vHyW5IEKsGYrJYgh4+Di4tNiavkMCkRsAroTKMxSwOHUTi4QoRDl9FD\nUpBEIAjojK+jSaCGbeq02zViRRKFLmnbR7LNTiNNJlHCNJe5ePEKt912K0fuvpvNtXWGnksxvZff\n+uAHSSZvXI5RFIX77ruH17/++PWibm5u7pdW1nvmDGQy8DIDR181CAHveMdoqeYjH3m19+aVRbvd\nZlCt8ua776LT6XDx3IvEcZZcapJe0CNtzjGfEvhyhampSVx3QKOxSRRtUSiUWF2t4neGHEjtRwQ+\nQdCgYE5gdnfYEB5Nz8YlM1r2pIUpptGlBAZICoQETJBF4uBTJ00WhyodNCAkwyS6EjJtaNQ9h5A6\nEh+FFIJJLGy61FDxydGkiYOHTxadAIMNPFK4dBHUMOmzG4sCSXx0FAJGDW8fhR5VpuiRR6WDRz9W\nCdiPG5iYuIhIpz+4mcdeXEFG81zzN0lWt8Br4rcrJIVNUxmyW1UInCpX8xnuv+Mu7rntNj796KMQ\nukg5yukC8AOPzqBKfkrjQ7//+7hxzLc/8xlmdZ3ZbBZhmqiTkxy5886XZeMAr/Fi5F3vOs6LL15B\n01Ruu+0Qb3rTG2/oyo4ePcLhw4fo9XqYpkmz2eSTn/wi5bKF6wwoJFQa7TJHlnfT7PVYaQ+5PMgw\nq++ivmrisUi/f5VZOUcUpdA0ST6/SBAUKTeeoOd5yFyO7VaL/XmDL595Eax5ZmdmGTQr/x95bx4k\n2XWdd/7u23NfKmvfq7qq9wVodGMHsXGHCIKiYJOixSFjtIwkygprLE9MTMwowjOyJyTLYUsOx4Ql\nDYOSg/s2JAASYIMglm6A6AVodFev1VXVtWZlVe758u13/qhkiwRAUwKaACF/f2a99+rEuy/znXvO\nd74P2dKxZQZTDCKkwjIzuGygIbAo0URhAwOPPhwahKQRDOGLEUIMArmKpIjeKe/pjODjo7FIkjVC\nGtSJ2EFIiTV66GeZCIMyXeRQ0dnEYRmf7QgECiVUXCIQAlVLkUjlyVZX2CFyCARu6GGEFjY2ZekS\ns3Yj1IhqvYwIA4SWYqHh0mtGpCKHs0sX0IwR6rZNr6rynW9/m8mpKeKJBNPbt7N//xgvvXSawcEd\n6LpJrbaBbV/h7rt/+bo/DxcvXuSpRx7h7JkzPD43x425HNMTE0RRhOv7jI2N8Zf/6T/R7fvcOj6O\nlJKrp08z5nl8+dEjODKL74aoUuJEXShBL7VoEz06w7Ci41MjlBFELrYd0AgChnoLyGWfY16b/Z2f\nFZuQCygd3RaDOGXWWKebiH4kl1jGJ4GNikmTOKIjHN+Hi03AOgV8NvFYRTBBmxwac/gE5MmQBFJA\nQJsWEh0HQZI8slNxs8mgsYyu9ZKyeogldK5e/haJXB92fZVYrELdyRCzUrR9wfZd+9G0fk6dOsX7\n3vchug/dRLm8Rjqd+W+WVJPJ5M+N+/OLhCNH4L773u4o3hgeeAD+4i/+8SQjUkrOnDnDsWMv0Wza\n9PVl2bt315bycueYUqnEwPBOrpyfwXZjSCFBkZRbVaa3Z/n4xw9RLJYZGhqm2czxwx9W2Lu3m81L\nLzMY76FebxAEFn19vTiXbEzfp67uZGuAtoeQFoHcwI2SCKGhSLXTakki6aWFh4JKikHaRAjaCHwk\n0A4CYnRTx0bDJkYfESabVCiiENDDCmdJ46GTpNSxvUtiImlygIhnkaj4+NgkMHAIENToRiUOlDHY\nRKMXnwgXjQQRBnm9n6pbwRcJ+tIFPL/IjuFBZmYFl+vnuUHxSMXjLLk2ga5gduU4NDzAaszi9oMH\nsQyDke5u3OYSq5uzdKWHWS1dobx6kXZzjd7efXz329/mN3/nd5jato1TTz1FXEpELEb/zp2878EH\n3/C6/0InIw8//GE++tEIIcRP1alQFOVaJjYwMMDv/d6vcerUy3xp/YeMmwoHb7yPlStXmC8WuVC3\nKPRsR8EiFiuQ04Y4v3AKTTewrDiqGqHrFppm4FZjzCwu8on3vY/lpSVePnIEqW+imAbFWg1Lb1Ai\nhSK7CVA7VZBeKswy0Rm29Tu76KtUcOhCMo5PGuQGAo0kLgEgqaEyQESAYJZhVujHJMRCENHCA0Ic\nAoZQsVBZZhNnS7yYQSR5UngIWmKFPtGFp+hsGt0s1UqMGXFCH5zIxcYmwCPApO41CMMy+ZhORiiE\nuoZlxCm1FV5yVuk3ChSlwa7MAb59bAnpzZFNxrFuvBE3inhW03jPP/kn5HKrHD36Q3w/oq8vy0c+\n8kvXTZXvR5ifn+fRz36W3fk84/fcw/+7uIhTLPK869I9MMDEvn2MT07yH776VQ7ecw+e5/H88yc4\nd2Edt92mXCyRFg36pdlxd7lA2V1FUfbiyyFa8gq2CEiRQLpxND1Bud1ivWLjJXop+UVOSge9w+VI\nIBlHsEaDJm2GOgoCATBAkyVsFNJEWGzZjmfxcZCsM4EKJIjjoeAyAwxgASF12qjUkbQIOo6dggRb\nLJ0WDlvEMJUUqBpCLZJM7CSf1enOZKiHBtZASLvWIG3lURWFlhDs3beL2dkVSqU2s7Mvk0ymMIwK\nDz30K9d1nd6pOHIE/tk/e7ujeGO4//6t2Gs1eIOb0l8ofPe7R3jqqQuYZoHTp8+wsdFA149w6NAY\nQX2TnOdhVxrkcn3sOpDkpTPPUwpVmrUXyQzl+MSnP8a73/135kL/5b/8Lel0L7bdRjdN4ok49XqL\nKPJYWztHGLYJIp1UdpJa9SIyrBGQJIwWCf04vmwRsQZ0IwmICDHowWUNnQwQECNJyAJS6oTRVkU8\nwGYOFYMWkio2ASYuaYoMEwIpDEzSRKwiSJLEI2KNJgZJUjhUWcfB6Cg+S2qEuPiIjvHeLLOoRPRj\n0sCBsIiUJjEtQ6VRJ58O2Dk2zHKpTKWicUW0yRsGo6MT3IBk0fMYGhrEs23arkvMMIin09z+3nu5\n9MJxzs1+F3W9yFjMoH/fBO++807OnzzJ9zWNBx56iNvvvptyuUwymXzTEg6/0MkIcK0sHEURp0+f\n5oUXTuM4Hnv3buPmmw+9pryczWa55553sX37FF/5q78i8jwmDx7kYiBorzTpEjqVioumNVBViUuB\nctQmpZg0vHWatosf+lQ0wdgtt/BCqUQ7injZhr7B23A8Qb1VpdGuU/cXOnLAfZi4SOYokCPAxaJJ\nPyoqKhYaF2hQQ+nskH1gBZOQLIImVTxWkFhkWWYQCwXwaJDBQ0WhTURIiV5UsljkMfA6ImpJxaIY\nOQR49EmBrdTxMIiiAEvRaEeCTdVDRpt0AwqSCh5FXNrUSMRG8F0oxHRUxaThB/ieQdvoYiSp09fV\nj92us3Z5ifs//B6mOmp79VaLJ776VX7rD/+Qe+99F57n/dwmLZ5/6im2pVLk02lsx2FoZIT9sRiX\ny2UO3nknuVwOKSVhEKBpGjMzF6hUBGBytbxM0jJu0wAAIABJREFUX6SyEbYo0GQADUHEChvMy3NU\nyVIFFBnjvG8wKARR2KSpKazTw8DoFFVeJl9eJUvYYbtDEhcFlxV0LEZRSWLTwGeTBBU8JC46G7QZ\nQOLRZgAdHYMKEBHD7FBZ50jgoKCziUedHhS27qTPZdbppUDAOmvUaDCAJIFpOPSlcwRUKNdLSJnE\n823uvmOaSj1Js9m3JYgXBJRWVrjnnls5ebLF+Ljg4MFJ9u7dQzqd/m/c9euLMAw5d+4cp06dI4ok\nBw5sZ/cvgO56EMAzz8Bf//XbHckbQzK5NU3zyCPw8Y+/3dG8OZTLZZ555gzDw4d58slHkHKc0dEe\nisWrPPvsIo6zwRntMlmvgS7j5AfGyB68h/1mhnrdxbbLXLmyyPr6Oj09PQDkcmnW1pr09Y2gpNJs\ntDdxnBKeFxGL5bCjBm19kMitoBtZIncVEfkELOHKFJJuIrZTp0KIR4oGCpCgSZuzeHSh0iSNQ4KA\nSEo0QrSOt7pLHQeDLgJiCPpQ6AOaRGiEpDCRBJRQyJDBpkVIQESWJG2iTtUli0DHp4mCz5ZlXz8h\nNyI4j8MZGuTDHFXpgJS43hpJPaJcrhGP6UwP7mR/VjKkaeTSaSqVCqvnzrFSq1GJItx2m1fKZbbd\neCPv+9CHeGH/MVb/+I/Z1T3B2LZtjE9OYpgmu4aGOHriBPe8+93kcrlrAw1vFm9bMiKEeD/wZ8CG\nlPLOn3X8N7/5KC+8sEhX1wSaZvDkk4ucPn2J3/iNXyUej7/m+IGBAT71+7/PmVdeobK+zoRmcmzu\nNA3PwgvLSDWi7gUII8NyGFCzL6JEMXSZox7YJLpGOHD4Vu67724ef/xJvvitM7ScHA27RbWRx26l\nkHK94/qxhEEBCDGQZGjQjX7NgzeHTYoUAS5NNEICNAq02MBngxg+KqtAiQwBLZKotEnRJEdIhRAd\nFbdTCNwSLzep4WEjaUQBc2hkhEZSS6CKPlKKSTloo4cZ1qIio0bATjWJDANU2UaIiD2q4JxaIp7d\nT+jqZOI67aBGt2ejaClSKZN2KkXcSnF59gQ5K0fqx15e6USC2OYmCwsLTE1N/VxHPotLS4x1/G3i\nlkW2UKBWq5FLJPD9LSPu5Y0NxvbuZa1aZXFxHc8ziUSSCANfuiRwmcJAQyciopc2NbnJJj62opBW\nEsSjURap4ouQtIxTrydonV/BbvvESQAuBRRMQhq0WUWhxTixjuiRZAKXFA4aChEqcYo08aiSRhCx\nJWVn04NglQgTjTZF6qTJEVFkDImJRdhRZtyGzRo+kyTpRXCRVdbJkrEyJGMh9aBIKt7NZrOBIddZ\nWdJoNhrMr8wxlpmkHYasrq/TPzLI+Hia3/iNT73lDrtSSr761W9x8uQq2ewIQgg+//nn2b37wlsa\nx+vh+PEtrsh19Px6y/HRj8JXvvLOT0ZWV1cRIsvm5iq2bdLV1YOUEaVSgyjSGB+/m1yuTjaT5tnv\nf4lRM0MoNfz21ijv4GCAlMP85V9+ic985pOkUikOHz7AyZNfJ5s9yM133Mszj3+NllMkFqRY8xqs\nRm08MU3ogK610PUhFNaQfgxdH8EPs/hhD6oW4vovESGJMUeMNlUSQDcBIUlWyBCnjY+gQZIMeVQ2\nCKgTw7jWrm8SIEkR4BGiIDqTeG0ctrx2NJrUOk3abhR66UXpMA9TnfRng02G2JIKiBGSVTdA1bEi\nlYZfR4oWSW2cudVVWn6D4d4kN92wm7PHjmE4ztYGLpXiOzMzJHM5yk88QXpkhN/+1V/FNE1uOnyY\nk4cPc+erpuc0VcWQEtu2X/fd+0bxdrv27geO/KwDV1dXOX78CmNjt16rlCQSu7h69QynTr3E7bff\n9rrnpVIpbr1t62/Fyt+i6sepejkaGARNG8XKks5tGSIViypCjGIKhb7hLj7+8Qd5+ukZ9u3bzczM\nJRSjC6elUG8pKH6anGgjhUdb5hFs4uKi0sKgQQpBHBCEgIGPIIWGTYk681j0ENuS1qGLOgY6FjVU\nLAIicjQB6EeionYcDHRW0Gni0oPGJio+WRL42NRpqgo+KZp+HEsRDMXj9Kghq6Fkw/EZ8BpUhdbp\nQ/qkUIlpFnNRhXL9GKOj26jUlhhNhfTEuzhV92llMwSJYeabLUQqSX/BulaJCsMQ13WRYUgURW/6\nYfhZ6OrtpVIuX/O9ObRvH08++yyNSoWuZpN1x6GZSPDpz3yG73zlKyxWK7QrPnaQoK6bGPYawwg0\nAgQhKhEqkgIu66KGGzmsRAqaCEmbOQoRVMI2vmzQanuEhFwhTpUkZWxcKngISh3p5hJ1YkwTUgNG\nEcSJqCFoI9lNmRlqpDFpkCTd4cprCFIU8RHE6aMGwAQhLg02gBg6XShIHAQxUorFNAFJa5W+0TiB\nLxnNHKDlO5hqnR4ni3ulSC6qUw0j1tuA0kUulebk0S/wf/7p//qWJyIAc3NznDq1zPj4zddarrlc\nLzMzP3zLY3k1vve9dy5f5Ed48MEtzkizuVUpeafCNE2k9PA8ByG2ntNWy8ZxArLZFLqewPfL7Np9\nC7l8D5cvP8H8vCSb7WJsrMDU1DYsy2Rx0eb06Ve4/fbbGBkZ4aMfvYtvfetpMpkYuw7u4ZincGXN\nxtT7CJwmvr+MIvpBCoSh4bQ30fVB4rExaq0yQimjKAWEVcD15nEjmzKTSMZR1R5CKbgqV0nTRpM1\nhsgR0ouLjUuaOBZtwMeiSYIUATnaCFQsoNH5PVqjjkOOMgoByxj0kqeOgk+EwCVPg0pHfcSmjkaD\ngIppcVfXKLbrcqm+xLlAI50dZnp6HFW41O0K1UqRrsK72H3rrVyemeHM0hIzts2DH/gA+6eniZkm\nfhDwvS9+kd7eXgqFArF8nmqzSfbHHqq26xIaxhsmqv40vJ2uvVXg7+VZsrq6CuRew+TXtBhf+9qj\nbGzUmJwcZvv27eg/xU+7VCqjqjqm6WH0jlCrFXGcSzhOg0RCYWRkG6nUAJommJjopaenh2KxyeXL\nsxiGghXPsLB2iUYjgSV9wjAkKbUODVGgM8tgp4S25b24Zam0jkMNQRql48p6FYV5QjzUTvmtBw0T\ng4gkm8JDlU1MoIWCgUIVSQqLDbKsYrJJSIwcJjplAVKtI6Iq7ah3q4cZxZhtrdFjCWKmhxEI/KBN\noGukhUaXmcYPAooIjFQX8STce2CAneO3c/XqVZ5+6SWcRDf3f/C3KBS2WjKnTiRwLn+fXC7HxYuX\nuHhxEccLuOA2GbvnPqanp38u/jM/ws13382jf/3XxC2LZCxGJplkcvt21vfvJ3fDDRT6+ti9dy+J\nRIL/4Xd/l8vLGxz5wnfRzH7C7B0EjSV8GaAJCGUTXwoCITCkS0Z4DKOxIBu0ZAUlyLEhPUoyThRl\nUGhhkCSiyAYN1sh3zL8XCBlEZxwXSZ0UkhVgGjpPhY6NhoLcGpamhIOFh0kehYg6q1RI0o9CAZ82\nkEXFEAZIlxKSGAqakCT1Fn5YR0Qhg2qcM5dn8YMJVlKXSVhtxoVLT6GfjaUmeUtwRz7NU7UFsqMq\nd980SaR0v8Zs8q3C7Ow8ptn9mmckHv+H6xFcbzzyCPzrf/12R/HmkMvBbbfBo4/Cww+/3dG8cYyO\njpJK+dTrEbA1qef7Hp5Xp1DYQbu9wfDwVvslm+0hlcpy3303MTDwk4aaiUSepaV1ms0mMzPnqFTq\nPPjgu4jFYrzySp7jxy/Sm99JV6qLpfUKdXuNIJwlijYQogtFa9FyUtjeKqAhpQvUMM2QdGYYGKNe\nXwFeAnQUJSQWVMlj4GOSJUMLh01CItK00EiQJINGDQ+DkDgNXCI2aLOGgo2HwwAecbpQaGAjiaPQ\nBJSOqEDQ2Zr41EWKV1SPMd2jz1BZLs3QCGE+EsTNGJacJ6P3ceuBPYz27uALR4/y5OXL5ISgkU5T\nBh7YuZN7brjh2n2LmSZ91SpnXn6Ze+6/nzvf+16e+Nu/ZUcYUshkthx819e5+cMf/qnv2jeKX3jO\nCGxly+D9xGdLS5d55pkfkM93k8u5vPDCUcbGTvJrv/bw6+78PM8lnZ6iu3uSVmsdKQsIcYBTpx5h\ncnIQw9hFOj2AZSXY3FxmYWEB09waWd23byd/+qdfQGjdKGqOwFfwow0ibBSSaBSJUyVFHAeTDXxm\ncbAAHYGGQo0aJfpI02YHOkkSBNg4BNhIYqgIYTFh5VhrR0hcfFR8wESQwceghIWByiguBiExsmaW\nml+gHS1jkKKCwKWNkDHm2xeZJM8UBm0hcKVkDZWiI5CoFIMGOcsk02zylcceIz88zD3vex+/+8lP\nsrFZ47nnZlhaqiKly9CISWL0Th49eozNxQaxRBclEbHtpvfx/e9fIh6Pc8cdr1+huh7Ytm0bU3fc\nwVe+8AUUxyHV3c1N99zDrz7wwGvaQ8lkkqkdO0j2nMP3+7H0JLaRZ8XbIKFIUkKi6yp5z2MxDLk5\nnYG2Q8XXKWJTDZJ42IQUMNFQyeF2Jp5MdGIMENGgThudDA4NdNZJYRGxQYsr+PShoaDQIksdFejC\nJ6CLNer4VBCEHcXFNDFK5NCIiKgQ0Y3syMfHaAhBSwgSnkOGrRHkZssmFqkklIhos0JFrLAr202p\nfRVpmix7AWtlF1/EsBJd9OaztD2PoNPSeqthGDpRFLzm8yh6e+L5EUolOHduy+flnY4ftWreycmI\nrut88pMf4XOf+xq6XmZu7gcYRpqtgmgbw9hkbGxLE6dcXmX37glWVpqvuY5tVxHC5N//+7/GdTPo\negLXnaVYnCGR6KPRcJFuRBQ1kCKiv7CXavMEMoRAFImirYk2RRlHCEEQLBFFPr7fRlFUuru3kUop\nbGysbfmn+VvjtzXawCZOx0NqawR/jgRxfBKYDKAzxlUcymyQxcemTQIFm0FcklTQ6MHDRNCmixIl\nEkSkO78PW6olCoqWQbMCArHJaDbNbFBnyc8RhVkUstQClW88e5G665CNm8yvrdHwfbxmk51DQ2SF\noDgzw1ldJwq2vpu9AwMkTJPa5iYAu3bvRvnkJzl25AgvXrxIrdkkmckwf+EC2VyO7a8y1Hsz+Lkn\nI0KIXuALr/p4TUr5sZ917h/90R8B4Ps+q6t1urrGSCazeJ7DiRMvomkD3HjjTXR3dwPDzM+f5vjx\nE9xxx+2vuVZPTx+JxGXa7RaZzDBRFLGycgldT9FqNVlcPIFlNYjFVPr7B5mbW2J6WmV6+t08//xx\ntm27gStX6gitCn43vtRwUIkj0WmQxiJOhE+Eik8GSQFJHI0WIQ1CQqoMoRJHR1Ahh4KPTgWbJhED\nioqCTlKY2FJSJWQUkwyCJh49OKygkcYkRRwfScXZoEUaSQFVdeiKFBRp4hBioaN4AQk1Rj45wXpj\nCUvLUUgPcLW6iqdmyAuFCEFCSXLpXJFy9AP0/CDvfvcd3HrrIVZXVzFNk9HRURzH4X/+g/8Dxifx\nk1l2Dm8nl+vBdds89dRxbrnl8M9t533ku99l9tlnuXNyEsfz2HRdkLKTqP4ktp6XGg8//Gm+851v\nUK83sbK95F2dUnuDtiUYMAxmKhUUy2Iin+dsqUlOibMvMczpjSIekCJDHoGHoE2ys49ZRyeggc2W\n0mqMJJcZJsBgAkhjM88aNSIGMPAp4NKmzFZ3tUQMjS1JuF7SxCmzgUMdnYheFJYJ2ZARbQzWgTkU\nxqRClgQqkpL0mZMxIhRq0QZ9Yhw7UthstOhNxVnaXCOfTrFtaDd+u4ZlbOfrz1wgnvUwd+/GisXY\nvn376967nxd27tzOE0+cwPNGr9k8+L6H666+ZTG8Hr7zHbj3XngD6tW/cHjwQfgX/wJsG65jK/8t\nR39/P3/wB7/Jgw9e5oUXjjM7u8aFCxt43iwHD96Lpumsrs5iGBt85CO/zOc+93VKpSUKhUGEEFQq\n6yhKiYsXJZa1i97eAgBrazHOnTvH/v0xJidvYuHyJcDCdoqoyioTfZOs19v4ikUkU3heGSkvImUX\nqhoBZ4EQTfPo6+tlz57beOSR79NsmrjuHAExQhRc6ixRJ4XKts70C8RpYrPGLDCKh4JCjinKBCi0\nkASkiFDIoNJEdGZ32tTIUaSESoSPTxUXQYaUX8XoSXDD9ptYuHCRBbVAPrYTvyFoRRYZs5tmK8aR\nZ8+xL9XCTFq4vs8tPT3QbLJt2za+fvw4pbPn6O3qJptJsZq6QDOf58Ef61vu2LGDvr4+/uY//2fG\n43EGcjns1VWe+OxnKb73vdx1993XZd1/7smIlLIIvCFT7h8lIwALCwv8zd98k3I5xuZmiVqtzi23\n/CgR2UJ39xgnTpx73WTkwIEdXLjQwPMSLC5eRVVVhod15ubq9PR8hERijVKpQrMZY2bmOUZGQn7v\n9/4nenp6OHNmlgce+DAzMxc5evQp5mZP4NGFgk5ECUmBKi7T+Ci0WUdhBAVJRBENjxRpkuQpYiAJ\nqVEggYlJEx+BTxWDRKTQbEe45KiwThYXC0ENnw1CepAoImBThihCoElJFkGZFioemjDIGUkanktM\neiTwSRCgxLMkdRvPHOGsY7PkGMTUPgZUiyv2BoNGjpRrkZMFFufh299+gRdfnOGhh+7kgQceuKbt\nEgQB3T1jDA//ZAXENGN4nsC27Z/LdMby8jLnnnmGm0dHUTutOiklPzx1issHDjD9KmczIQSKIujp\nGeLhhz/FlSvnOXvaJlq5QNxXyOYSLK6vY6VSHMzliCUSxO0YC06EFxqECqgRWJgdyXWJikaMAJda\nR1cggyCGgcoAMUwCBCtINDK0CLBZI0Sjhck6/XhYpGlhYyKp4tOkic0KSQIcIspI4vgoisrJKIZN\nBodukA5xFunFYB6DGkPopOnFpI5NSa5hkOSqV8atthGaTstPcGphhqBnlIQneOVSjcmBFsqlS/zw\nzBle6O7m4U996rr3fX8aenp6ePDBO/nmN59BymxHTKnMBz5wmH/zb96SEF4Xjz66JRr2jwGFAhw+\nDI89Br98/WV+3lJomsaOHTvYsWMHsMVRO3HiJM8+e4pKZZF9+ya4666P0dXVxac//St84xvfZWHh\nOQB6e5Pcf/+dfP3rz9PTU+icHzAz8zKaFufixRl27DhE27YImnVUJQDpUKoXafkOfpDHdUFVI1RV\nIkSFMKyhaS1GR3dy0005IMHJk89Rr1cJwxxSbkdHdNoosMJVdhIhgRCPiBox0nRR5yqzFFCAgBYR\nORRUFOZoIcjSj0EZBxsD0TGkEOSYo46PTT9pVBxMvU2YHKZg5vAHhpi/AhtuhBEbwPM8Wu0mKVJE\nUQwHhRtySa7WalgjI9jVKqdOvsRipck2qaGJBG5bZaO2RsXzSKZSP7EePzx6lG7HuTZJmU4kKGQy\nHP3e97jh4EFSrzr+Da35m77CG4QQ4iDwb4E9QojHgV+SW42518Xo6Ch/+Ie/xfz8PJcuXSKRSDM9\nPfWqo+RP/X/79u3lxRdfYWUl4q67biQIfJ5//hEymUGSyTzZbB+p1Bpzc+fZ3LyKYRQ4ffoiY2Mj\nxOMWUkYcPnyQ0dEevvKVL3HhbBUZtojoRiWGj8F5ztOPJEFIGskiKiG9CAYAlyRVwELiUcNGx8FD\nUibJOhBIhQidWofOKikiREgan52miR0ETJuCb9jr2NLCAiwiTKp4hGhBH6oBMV2h6pUZxSMfT5Md\ny3NltoYu+jD1JC2vByVYpUwTGfYhhIOhS/zAo16tc/ZsjFarwJ/92be4cmWNT3/6n5LJZEgkEhiG\nxPOcnzAxdN02UWRz+qWX8D2P4bExJiYmrpta55XLlymo6rVEBLYSjsFkkguvvPKaZETTNPbt28aZ\nM/MMDGxj796b2b37EGdOP0N1+RgT44OUNjZQSiXWL16kXq9zuiVp6NMIZRAj7mC2q7jRGpHMEhca\nSBufZULanTVVgFKnmaJ2PHSTwFxHmizEYwWLDW4koEKsI+KfotXpD3s06SOgG58GgjlDxYlUNhjG\niwaRmokmbHx/kFVsNOpUKJAnQwaDLdF5lTYRPvNUUUE2yCk9hPEsNS1LV7aPpUaDvRPTFPJlxvr6\nGAMur6zwgyee4EMf/eh1WaO/Dw4dOsjU1CRzc3NIKRkbGyPfmZB6OxAE8Pjj8O/+3dsWwnXHRz8K\nX/7yOysZCYIAVVX/m5wzVVU5fPgQhw8fes3furu7+fVf/wS1Wo0oishmsx2e4db16vUyx449xZUr\nNZpNnSgqoutnCEKfui8J0Mjm21Rrm+RSO1hbC1HVNFE0hqIsEYtt22rnyFeQsoVl9TM767G+7tBs\n1lHIoyptwkjHxAAmgGVMDFqoQBqBg8DpmJgmqNLCpMFZFIZRCBC41InRh8Ajj6BMiEKFPqokUdHQ\nSGKiUSLCpRHLEe+dIFJyIIr4WkAYxbCsFAo2piII3AppI0m2UEAqDfpVldVajS5d58LlOW7qG2ej\nWeVM4BI3TMzUADv6CywvLLBr165r93j2zBn2vMqpW1NV0sDKysp1ade8nQTWE8C7/yHnGIbB9PQ0\nIyMjHD/+/9BuN4nF/o7lWyrN88EP7nrdcy3L4tOf/hinTr3ESy9dxDR1Dh4cZ3y8iwsXFomiOAsL\nK3iexcDALvbsOUCz2cdf/dXXuP323Rw5conx8YO02000rQ9FjRA0kVEaT1pIIlZZoYlHHA3DsBBh\nHFVmEdIgkiE21paDKnESqCioRIRUhUNDpmgbU+DrRPgklSaG9EnKFqqWphoKnGCDZOSSkUsoNNGV\nNJH06ZU2ESENKpTbOXQ8fEqsq2naZpJSaZ2urmGWyi0ifYx8shev2sYOlkgYGgmhYWgGl51NVH2E\nyO8nnR5Byjyrqwaf/eznufPOw8RiMW67bS/f+95pBgb2YpoxXLfN6dPfJxbMM/9EE0NVOf/kk+R3\n7OAjH/vYdSE5Kar6umlmGEXoP6Ut9J733M3S0hdZWDiJYeTw/Sb9gzr/2//+5/T19XVE0Z7nL//k\nT7g0N8fipiRrTKCaMYb6DlBbOkbTbuHIKk6kEkVtpFTQGUDTxonCHIG8RMBlQmqoKB0/CoWUMGlK\nmzweJgHrqIRAkxguEU0C6gySp0IOSVI0OTjSzwXN5GwxiRJOkrKSuFEc220huYrHMHXOYZHEYGuc\nr0GIQ0SBGAoxutUhpOlQVHxi3dPsnr6TcvkFskqClAG9+b/7roz39fHs6dMEH/7wW0pqzWaz3PBj\nhLm3E08/DePjMDDwdkdy/fCRj8C/+lfvnKma//gf/5L19RqZTJx7772ZG2+84Q0T4X+8ytfb20sq\nBY1GhePHnyUMhxkZmebs2RkMYxuXLjUZHCzQ39/LysoPGB3NU61ux7ImabfPUSz6gEYQZGg2zyNE\nHcNIAJtcvNgmnT5ELueyulxCJY0f1WhRIqAASEIUGoQkKSBJIgGBR4AkJELSZiCZZqHlUpYG3cTZ\nh84KV/A6NhAWbVQaWIBLyAEVLBHiCZUVxcQ2LGS7wfNr80SlRRQnwveg3lJxVAPdsoinPXqtDJrS\npDef53KlghVFNGwbVUugCpVCMsfu6RtJxNN4nsNa7dJr/KeseBzXdYm/io/pwxty6H09vCMIrK+G\nZVk8/PB7+Pznv4uU3ei6heOUmJxMcdNNB3/qeVsv01u57bZbAXjmmed4/PEr3HffrZw+fZqF+Qpd\n6TQhbXRdI5XKUa/nCUPJoUP9PPfcEZ58/GnmLpVIksCTEZ4wMFQFN2gAdTzSeFoMU4noFjoykATR\nVkG+Tr6zd16kRAOBBghMEZK0uvH9GpoWByVOLBxkI1oiIzKkNBM/dGmR4XzUJEabcWq4UYM2goRl\nMhAJLntVIiXCliZZYweOtJB+kmoQUDSqODGDXGaIffsOcvF8nCvnztOv1ZHoLDlNNoGBzDQVb40o\nCpHSY2OtwcvPHSG2eBlhmjiJBDffvJczZ07ieQJF8YmHV3lo/15SnUb1JHDq3DlOnTzJ4ZtvftPr\nvW1qihcfe4xx38foJDdhFLFs23xw797XPSedTvPbv/1JLl68yMrKOl1dE+zcuePaXLxhGNx1113s\n2rWL//v/+lOaT89Ra1TQrASO3WKzVccgx2jvbjRDZ3b5PEJuYun9+FEbD0EQ9CKVFsWoSU+n/WKj\nsCYrVDucIYHGMgmahAg86miUUQnZQKeCUD2ErnPVdmgZcdJWD1EYQ5UadrsFQkeyRd/eICCBT9iR\nh45Q2aJ2+xgIdNMkk4mR0RWW7BUURduSyg+auEqFA5N/JzDWbLe5urTEsaNHmZicZHBw8E2v0zsN\nX/4y/Mo/MgHa7m64/Xb4xjfgE594u6P52fD9MUZGcrRadb785aM4jsvtt9/6pq+rqiq/8ivv4y/+\n4m9ZXi7S1TWK79dJpWzKZY9Uaphi8RUmJ0Puv//9HD36KK5rMTIyhqoatFrHaTaXcd0KijJPIrGN\nnp5BhKgSht1Uq1cprYcoYqsWLkSWSFr4zKOgIgkpUkdDYAKCLA4tSoTo9KPrEj9TYCQTp7X8EiYu\ncXR20cYjZJ6QDCZBKkU2CLjUbnNGUbAk1GRITejsT3fRmJ9hM/BRvDZ39xf4fmmBpXaFuNlLO/DZ\nOTBGu7LJZF6jL5fjmGEQui5xTSOVTnOhtMb2gUni8a02i+M5NHSFqZ07f+J+HrjtNp7/0pe4KZG4\nVvFer1SQ6fR1c/F+RyYjADt37uT3f7+XmZlzNBo2ExN72LZt2z/IUfTAgX0899xLlEoLrM7PkXLb\n6GGNhLbGc99Z4mh6GF23KBZN/viP/xdOHnuacTmPGquhMkGj6VGP1qgE0KussTfy2bAStKIkS47E\niXvE1ICG6+KSQIZxhFikm4ABEeETsiYjasLAsAoI4ijRAKqUBIqDE/QxRxnbKaMZMVrJEUrtMonw\nKioBDoIqOj16HkfWUVBoWxFxMYCl5fBTKWqKSdtRUdQuRkfTZLMqrdbLFHpaVMugygbLfgFViZMn\nRiQdTD3E89okEiF+aZ2hTJ5tg4NkEgnOBFLHAAAgAElEQVTWKxXmL1/gX/7Lz+C6LouLixz7fPla\nIvIjjHd3c+bFF69LMtLb28vh97+fFx57jB5VRRGCku8zfccdjI+P/9TzDMNgz5497Nnz069dKBS4\n4dBh8j13MPPKKyyeP8NmVGS4/1ZKpWWKm+eJJeJYiSQj49uQMsbK0ipRs4WMFMCnQRfnqTFhJFCE\nTskN0AjYxMUmRZ0ubAoYeOSRNBBYXGVS87glmUZGIUUv4nS7jidtVMVGiSSq7xHHpU2bkA26SCHx\n8UngEdHCICMCNLVKl2pgR00ULU6hO4MsLzE7+ziJRJ1qc529mRwiCJBSMru8zJM/+AH5fJ6rTzzB\nqe98h2233ML7f+mXfq7j2b9ICEP42tfg2LG3O5Lrj098Aj73uXdGMpJKbekGJRJphoZu4MiRH3Lo\n0MHrstuemJjgU596iFLp82iaS7ncwDQthPCJok1SqYi7734X8XiKo0e/j+9vjRJns910dWVpt9cw\n1DK5VIxEDnp7NVqtNKX1JaqlRTR/iqQwcaIaUm6RwU1cYpQYY5Ms8DLVjvmpTpsMNgOEyhK37NtP\nKGuEnkaxOMjF4Cp5WmgEqAhAYU2J+NCePeQsi+UTJ9hUFGLxBJEb8dDgFClNZ6Zept+waCoRXjbL\nYV3nQKPJWqvMmtDYaLjcd3CasLLB4wsLpKemSAwN0dfXx/rpCyQGd1C2G2iVdaIoYra6xC//5sdf\nY+ex/8ABVhYXee6FF8gKgQf4qRQf+bVfu24u3u/YZAS2XGNfj6z690UqleLXf/2f8md/8h+gcQpD\ncdneP0KxnkC4PTSqSRKD0wSBw5//+eeYPXGC8VSKUKyw5J6nJmM4UgFq7NE1xhJd7Ch08dzKEnGZ\nwBUTtMQySnycKMjhtl8hJWukRbBlJ60IMuhUUwXGdu9n4fIsbmUFkzRe2CJUXAytl6ru4yeH6e7e\nRebKN+m1TbLkSBDDJ+RSw+OsyGOlyuwfmaBk9wAxhscnKGsaoRanXG5x001j7NhxI+12i+XlM7zn\nnkH01WXmV+vMzm9wYXGRINBI5AoMDsbxGg5JQ8HUJelOstGTyzG/sECxWGRkZGSr1ytf20QRQhCF\n4Rtem1fjtjvuYHJqiksXLxIGAXdMTTE4OPi6L08pJWtrazSbTSzLot1uo2kaw8PD6LqO7/tcuHCB\nq1dXyGZTjI/3c/Toc3jlJrftuomnTj3LRs3CSk7TlY6T6ClQqZYol1fRtE0IBHEtia1XCPxFsoqH\nqyRZMA1inoeqajRClw1imIyQYYIcCk1CKmySwUbBo0HESafFsG6SNEyEXaVvMEUgVZbX6iSFQSh9\nAkoMUcVQUsS0LjbCFVJWDyVnmTY10rEkTixPw60Qr9cwRZOpWIxS9QVGu0e49caDnDh5kscefZTe\n0VEuLS4ylctx5113EU8kiKKIF48d4/zUFDtftSP6x4qnn4ahIZiYeLsjuf740Ifgt38b1teho4j+\njoBhWPi+Rq1W+4nBhDeD7du3MzaW5/TpKlIW6Orqodks4/vrqKpKPJ5CCIGux+jpMdjcPEs8Pkit\nuozmLtGtNNhmDtOobbDSmmP3oXfx1BPfIAoTGJqFYSSIex4tuVXvlLIMeHhMsQYU2GCROh5dWw7u\n2llMJcRt1THMKmvrTXZ2TXJh3ceR6/QhyBAhCAl1jedPn2Yyn0eoKiO5HB/8wAd46luPUatt0pIQ\nIhF4DGWSXF5b4/0DA+iZDKutFlpfH+vd3ey45x5ymQz5/n4mJiYYGBhAURTW19f5r//1G8zPlymV\nN0HYfPI3foeHHnqt2Z2iKHzwwQcp3XYba2trWJbF2NjYddUaeUcnI9cDXV1dKG6D23f0c27mApvl\nJepeLz2ZAVq1Ep63zr5997Kycp7FtXWyQqKrCTJqD/lYkoYTsBomWBE1RlIJik6NAk2k2eKSF7Bt\n7H5k5DO3eoaE2CBJFkPPEQqHhO4zFkjabhu3vU4yE0epzRP5RWRYJ6e0yes7yfQMM9vUsBuLFLw6\ncdUiiDI4UiCIyGBzSQpiDY/zl86SNMs00Wm4m6R6t5HM53GdRep1yeKijudtMjmZ4+677+MHjz7K\nnswGB7cPslDs48mZVXYc2M/U1BRHHvv/MLUy7z204yde+pqiXJNgHx4epq6qOJ6H9WO7mYVSiZ0f\n+MB1Xave3t6facbUbDb5whe+wdxcleJqhYVLp5jqNdkzPU6USnH/Qw9x5MhRVlZCTDOP5xVRlA1i\nsQqt+hWqpqDlVGm0LUZ6xkklYrR8wcTEdo4efRpdGyZGL8lUkqYWUW3kIdDoVuPosSRRbJWMu0DK\n1qj4Q0CIj8QHHFQsUigU6aJFMpBshgEtzSQRBezoyaAPQdNWicmIxdVFgmCNEVYYUZNUpEuohdw0\nMI0nBM7KMu3YTnp7t6HKiMbGZVrhPLa0uXHfHvyNdepRxMTAANPDw1y6epXHX3qJ6YEB7rnzzmu7\nT0VRGEmnOXP8+H83yciXvvSPr0XzIyQSW06+X/wifOYzb3c0f3+EYYAQ/mv8xn768SGrq6tEUUR/\nfz+bm5scP3aM9aUlugcGOHjrrQwMDLBr1zBHjnyXQiGFYcTwvKsYhkMqNcnq6jz9/WNks5KJiX3E\nYnFOnjyK5p1jKm7RZfWQSZgMWBlSjVWOHX0SPerGjwp4fh9B6KKIq3RbMVy3gqNo9CvbyKBCFKGG\naVwWaSlLNOUI/am91CI4eWWeoVyFZGCz0agTV9r0Rxl02cCljgbUXZea59FwHKxYjJXiJv/2s98i\nR5oF4SOpMp5UGBse4tLqKl2Arii0PQ/dslDjcXb19bG0vEY7MFHjLQzDuNZm6enp4Z//8/+RpaUl\nPM+jr6+P5M8gGnV3d19LFMvlMq+8/DK1zU36R0bYs3fvm7IE+e8+GXn+2LH/n733jpOjvvO83xU6\n5zSpJ2dJoyyNAkhCIJDIYHAGY+MXeG3v7Tq8fM+FvV3f3d6+7tbrvXuevfWuExjbOKwNNskiCZCE\nhLI00sxoNDlPT3dP59xdVc8fIwaERJYRwe9/JLWqun9d3dX1qW/4fJk4fpzVNhtX1frZ032aoWgE\nHWYkSaSzcylWqxW3209O1JNNRUjrKnFoLjRFQZEUzIqJtKqnJzpFp0lmqcfJeDRKVC+QzyWQpDzl\n7kWUZBFnSYegJrEINgqEsBnyVOntJDIpzD4vszMJjIKERS9jdTYxkS0yOTNJQRApJuKUl0JoohlE\nN0kliYwCGFDJUqEp1OZzpEthqh11TBVCVLo7sFt0KPkYN9xwO8lkiq6uLFNTdn72s2eoqnKw4vp1\nZBIJFns8fK6sjN7efiYmZlizzk1VTkfNq26vcoUCaVGk6mzVn8Vi4YpbbmH3b39LpU6HyWBgNpVC\nV1vL6jVr3vPP8+GHn2BiQsZma2Pw2G5W+NcTjo0h50s0evT8v3/399grL6epadXCPqlUjGTyedZ0\n2LCISQJRgaKSwWk1kS8qyGYDweAwmqaiKhqaMkMmWUIrzeKSGsmrCaxkKWWzGCwVhPLTuNUCvrOt\nwQnGMVCGdHbihJ1xFqNRIUhE0IgWiiQMOsrKyrju8qW8cOgYY4FhFutjVBugUNAjqOp8zlmJIcuV\nhBPTCKKbltaNuH1+hrtfwqH3klEFqpfZ8bmdqIUcUqFAIBKhvbaWlW1tBBMJtFzuvDC4LEmXzBDt\nvSaXm68XOXr0Uq/kj8dnPwt/8zfvfzGiKCUkSUZVFSYne1m3rvUtzTuZnJzkV796jHgcQCSTmcaU\nnmVFVRUNNhuRnh7+7fhxbvj857FaHWzcuIFkMkU6HWfTpnoCgSypVJaBgWNIUpA///NP0dU1QDxe\nwm430WSxYlOM+HzVmExmQCM/O0Ypq6Pa14qa0gjlE6iqCb3Oh91VJBCN49H5kVUd5PKoaGiChE70\nIGkFHDorgpzFqGrYqhchG7MooX3UWWL0x2P4NBUTCgIwDrQAaU2jAFgNBnqLJnSlCgoGGx2VlWhS\niWSyi2A0yqyiUAaEUimihQKW6mpqamrYvecQs7ZK2pe5mZwUOHLkl3zxi7cu1HmIoviOaj6Gh4d5\n9Cc/oUwQsBoM9Jw4wZE9e/jMPfe8Y7uAj7QYSafTvLRzJ1uXLyfR34/L4WBDewuF0wEyjiK+6g7q\n6uoAyOcTbNl+FUd/9lMEzYjLbCaTz6MIAgaziVQ+hUmUaSr3EslkCIoiLb5KZKOeuKgCfqZLMYRi\nDCkDHoeBgubGJEfJ5FRcZhMz8TwFyUmd2Y9JpyLLBdzJM6QKcTx2O2opj5rPoalGShTQY8DIfGGj\nEwEfoACKmkQRozTayxgc3UdFhZcWt5GnHn0Mq2cZjY3b0OnmL0aBwAinTg3wpS/dtRD9qK+vByAW\ni/GL73+fvslJyh0O0rkcY6kUl91yyzkKeOWqVVRUVtJ78iSZVIp1Z8P9F9su+M2IRqP0989QU3M5\np46fwKXXo5f1eJ21HB/sZlVrE7PjIZz+c9W/1erEbK7EYIF2sxmf00y+OEwseYJoVsQkmSgUwOWq\nweNahKuQIRYMMJWYRaea0YQsCS2DT6cjkkhjUIyIUhwDBSpUKzEtR5jxhSk29RSwIYCmohdE9IJA\nud7InE6HxWSirbGW3OluWux2MqUiKZORXCoJiorFpsdoTGI3yJhkL56yWhwOD76qRqyZBKm0hM1j\nQRBFNE1DhXNaot12O4P5PCVFQX5VrncyGmXVB31Ay1vkkUdg5cr54XgfVrZtg89/HgYGoOW1Dgjv\nI6an9yMIZlQ1y5o1zezYse1N98lkMvzkJw9jMLRRU+NF0zRefLoXT3QaT3MzdosFu8WCI5nkuUcf\nZdnGyzAaZ2ltfcVmV1EUurtfYvPmKm644TosFgubN1/O6dN9PPlkmANnZFbWdTA2NkM6XaBYVIhn\nw1isjVi8lWhaBIfNRTKfJl0sghjC63dRYa5DzUExGERLZ4gpecJKgTxm6i1GPGUOQskcNYvWIYpF\n+qJdmAtZXLKIXCwiMW9Q4QO8QBao1OuJKQJVmpGiXkGwWIgoCj6dCVFfwbQwhdTQQCQaZUKSsFVW\n4qquZt/ze4hkFBqcdTB0gklJprrjMh57bBdf/eoX3vFnpigKT/7mNyx1Ohdm1viZtwvY+9xz3HDr\nre/oeT/SYiQQCGDTNNpbWzkajTIaCmEQBMxSnsHwKLfd+gVEUSQeDyMIIf7yL/8d3x4dpvdwmICq\norfZaKypISPL9I73I2aDDCgKNr+fqzZt4rGXukmm4sTkErKpROOSKxk59huqxByKZCKey5K16HEZ\nZDJCnmQ6Rlpfx7AWxZaNImlmgkUVTV+HVrTir7ExPLwXf0lFUueQsBKhRBABDwUcgoYNkXEBlpbZ\nkdxmpiYm2OKcL4r99b5jlC2u5qzWAKCiooGxsQNMT08jiiIv7trFWH8/ZquVRatXc+XNNzM+OsrM\n2BjW6mpuWrduQay8msrKSiorK9+zz+5C5HI5BMGAIAjkc1lMZ1tWdZKeQlFFUVVEDYqF8yMAoiix\n42O3cWTPHpLRKLIhh91Yw6olyxifCKEodqamXsDlcRGdSpFKFygWQdU0NFEjLTspFksIhQJlZh0r\nyms5E0qB6sBbchLPpDGSxkwCB2CTJFKqRlhTSKkFXFY3Kzs7mTUaieXzTBcK5NJp7Ho7mt7MBGlm\nhCLkZkjbHNQ0tSMkyiiVokQiOTRZZjYRoMqro7qijAq7lcMDAyQMBvzeeeOnQrFIzmjk8ptu4tDR\no9RYLOhkmel4HFNTEx2v05n0YeO+++AL7/y3+AOBLMOnPw0PPAB/+7eXejWvz3/8j18iFoths9ne\nsnFWf38/2axtwdAsn88iZFM4LeWMj0/S0TFv7+Cy2ciNj1Nd7cdkOkQkEsDtnp+FFI+HqKwUuO66\nHQtpIaPRyMqVK2hvb+Mrhw+TTIRobaklnckQCEwimIrUNLTgr17GSOEw2UgMCQGFHPZyB7d9+iaO\nHpkmHZWZFATGxqfJaTZyShpZdDCuWCmVNGyV1TidPsLhAdxON+FSAi2TYACoAayABATP/ikrKpl8\nHkkU0KQSlWfrP8an+olFp/C6YevmzQz19UEggM9k4vTRo4xPTOFrWUNLRT2iIODMJBkf6kIU/aTT\n6becDnsts7OzkErhfE2Ra315OXuPH+f6W255R4XwH2kxotfrKaoqsk7H2o0bmZubIxaN0llfTzEw\nRyrVSzot4vGYuPvu+dDWn/+n/8S3/5+/RV+qxmFxkQHc1ZUscSXZ4q9jWUMDRoMBQRTxVFXx/Z1H\naK1fTiSioNNJ2GuXEQr0kkpHkeQim8sqkXw+mlpaePZMP31jOuz2dYyMnCGRmEYpmHCKNkyWSiyS\nh46lLg6fehGhkEMijYIRCyZUijg05scoCRITsThSocDisjKMOh2BWAyH3YUxW2RsdJSWVxmFCYKR\n8fFxDj31FPU6HRvLyjh+7BgP/v73mGpqaGpvZ8XmzWzeuvWiGZn9MfB4POh08xM/PeXlBIJBbCYz\nyUyMCrdlvkZdX6L36KPMTRzDV9NGTf1iSqUCJlOJpUuXsmLFCqanp7l8aor9+08QDmcJH++hsrKR\nlSs30H3yJNG8yHQ+C0oRlSGs+lpkyU2eEDohSJNDxKXXc3mFRERJMpAoIpUilJPFUCgRBWZVDdAo\nIuA0WUgV8rSvWMFn7riDZ555hoMHjxMSNAKoJFOzeAWZJYJExmykpqqKdddew+n+EDpdI9lsCUVR\nGB8RiE4doKRWEEinCfl8WCwWJsNhVE0jpKqsu/56NmzcyPCqVfQcP042m6Xz2mtZvHjxex7JuhSM\nj8ORI/Otrx927rlnfhrxX//1+9fu3mw2v+0x9IlECkl6JTIrSTKqICBKetLp3MLjqqqiAC6Xi7vv\nvp2HH36S8fFBACoqbNx228cvKIBMJhPf+ttv84P/9R2Gx3oQFBVdnZ1GfydF5qcKt63YTCQSZGqq\njzpXhu9851usXbuW++77OTt3dhMrahRkJy6PkfLyRpLJPMWij1h6EsmQpLvrOSRdmAqjAY+jjJ5E\nBCfQLwjoNQ0DUAeYBR0pRaYoQKCQRjOVkSgUiAwdx5yYolHLUClaUXt70UcirNiwgXgsRmBmhjpv\nJXKpAGddTpxmG6Nz02SzjnflLSSejbq+Fk3T3tX14SMtRvx+P4LbTSASocLtxufz4fV6OToywp/f\ndRetbW0oioLb7V5QeqvXrOEf//U7/OAHP2dgIEgmkSQ+cJIVK5sJ5LK0FIvE43GG+/oYnp7GX+1g\n/dZGBgbGCAYH0EkGBuYUIskgtbLCUDxOi9/P8fEpXFVtaKMnOHHiFFqxEh01QAOqpjCXDmBN6Oio\naaLfM0o4DeaURq2qUCJ71u9TRUbCafFwJp6DXB4hnmFgcBSLUYfmdDNHP4w7FsSIqipAkpH+fqpF\nkWqfj1MnTqAGAlxTX8/RWIxFViunn3kGo9nM+g3v3gPgj4Ver2f79g38/vcHsdnrGDcZOTM5iFkf\nYcOSRr7/2GOEZ+co5uJExyeYOX2UvrJKlnSu5O67b104Qf1+P36/n9WrVxMMBrHZYP/zk6SjU1hS\nk6RDEzjUInmxgKivxKQvUFSCpLKDWIRRjFkDitlLMBKjQpKo1olYKixIKZGeSJ4MGnZNwwWUiyKK\npKM7HqBz/XoMBgMtLS04GpZiMscZHOqhRXJjk8zMFpKoJSNaYI6uPXv41Fe+wlNP7SOb1ZAkjZVr\nXGz55n9BKZXQ6fV8rKWFZDLJ8OAgoiRxdWsrZWfrfxobG2n8MLaSvAnf+958y+u7qLP7wLB4MbS1\nzQuvD/LwvNdSVVWBonQv/Fun0+OuaWO8Zy+L2l7x0hmcmaGuowOLxYLFYuErX/k8sVgMTdNwuVxv\nePe+ePFi/vaf/4mBgQFy2SzVNTWEQmG+971fcvjwY8Tjytk24Dxr167niSde4umnX2J2dgafz0uh\n0IfN5kEQYC4UJZXMk0gNgFJAn9EjKkGsYoLJUp4yTyUeq4WpQg6jIFAly0RUlaAmYVM1dEqBvGxk\nVpcnlSlSI0exlaaoEUu4jCIVDgfjg4Msb2ggFo2yZe1aZgMByqMJhkMpstkUFvP8mI5UMsqWjoZ3\nNZeqrKwM2eUiHI/jfVV9yNDMDIvXrXvH9gAfaTEiiiK33nEHDz3wAFNjYxgEgbiqUr92LavXrHnd\n/umWlhY+8+mbeeL++2lw1NNQWUkinebF4WEeOXWK2MAALqOR8poaNjY1MT01yp996U6isRj/8I1v\ncGu9nvK2DubCYYbicX7/4gl07hXU1trp6Lie0ZEnUEpxBFlAlKqRZR0GnZ5gdILTwwJzCQveCpm4\nMMZsKkKZUsSDxoRsQm+2kJN1pPV2dOk51ssWKr1uZJ2IQprjwW6mzXZU9UrS6QQ9Pc/jdIo8+cRx\nrmqqIpPNMjs2RpPLhSgI2AWBVDbLkqoqjrzwAp3r1r2voyPr1nXicNjZu/cIxaVGCjkvNtnJYKlE\n/3SGJXXbcFrcRONRpoJjkJ7iis1LaXlVYr1YLLJ794vs33+SQkFhuP8U6dAk2azKUqOJuNVLsBCk\nIBkICBApRXCoQap1CpJOoD+RZiJVQDNbOVkoYJIkbDYzAdFOSBbwllQGAR05jFqJKjXPotpq8vn5\naQiFQoHmpZsYkk8QHzqJX2ciqBTBWI7TIDM3GWd28lnG4nnu+dIXqKnxYzabKS8vP++zsdvtH0lD\nswuRTsOPfgQHD17qlbx3fPnL8C//8uESIw0NDTQ0WBkZOUVFRTOSJGN1lxEptzGllsiMj5PWNMw1\nNdx4ww0L+wmCgGt+/O9bwmq1nuMWbDQacTrtdHRsolQCVY0xOjpBJFJOa+sG9u7dw8GDYzQ3g8Xi\np1CoIT4Xp5jpRhZl7PiQtNM4lQJWj59gUENFYM/sGNdWlbPeYubY1BT9qkoCCY+oJyGLWOxuzBY3\nKw0melOzmE15zIkY1WVOmuo7cDmdTB86hKSqhEMhZEmiyu9HU1V08TiRyDijQRibC5G1GVi8uAVF\nUd6xP4goilz/yU/y8P33MxOPY9XpiOTzyH4/m7a+ozF0wKWdTXMv8HLm9v/TNO2Xl2Id5eXl3PP1\nrzM6Okomk6G8vJyKioo33EdRFF54/HHW19fjOhvm8zmdbGlp4SfPPMPipmYGh8bJDE1TTORoaW1g\n986dxNNplhgMrDyba7PY7RzomiCRFPC5FyMIFfSeegm3FsSpA1UYJSGmUEzLiOdk8vkkkUAaRdCj\naYuwuRvICy+RLRVAUHHbdIi+Wlas3crRo8exhk/Q7KvEIM/HaFNpaHGoBKxzTE3tZnh4GEkqx+db\ny9SoxBMvjdLmD2DXNMSz6janaZgMBixGI4XZWYrF4ns67fWd8OoBWy/zT//0L3hsETyO+ciA1+PD\n6/ExPC3S19XF9muvXdj20Ud3cuRIkOrqtUiSjhMvTSBLM9i1GQTFjE6XwGPMkcGAaJTIZAM0GvwU\nspPorG4CiTyZUhZzKoXVYmdOb6A/LVPCTUa1okpOrDoDJZLo5Bn81iKWigry2SwwX3+j1+dx+Jpw\n1qzFhIQQTSOjoJOzRJIiUUkj1RPl/vtfYOXKSu655zPva5H4fuCBB2DTJmhqutQree+49Vb42teg\npweWLHnz7T8ISJLEHXfczosvHuDgwWOUSgrr17fx7//9PxOPx4nFYjgcDmpray/qObFv3yFstjZa\nWxvRNI1dux6lru5KgsEgu3fvo7d3BklaQl/fSVIpjWQihUV0g2ajWEqjI4tMjmDKRCJvQcWOQctj\nV4ycDs5hMRso8/sps1h4cWKG1Q3raG1ZhMlgXEiNRPY/zKeu3cTU0BBrXjXXyet0Mjo3h+lsVfaq\nxYv5/dQUpQofWR0MzghInhVcdvll/O53xxkZmeQTn7j1HR+f6upq7v761+k7fZpENMqSmhqam5vf\nVar3UkZGntI07QeCIMjAAeCSiBEAnU53zp3xGxEOh3nsN7/h0NNPM+VwUOX3s3rJEqwmE6IgMNR3\nhnTfONVmO4Ikczo6zlQoRvnSJkKJOG1mM6l8nvF4kr1DU4yFbaiKlUSyyODgEKZoP3UYcdsdZHLj\n+CgymD0GUgMmqxdFLOGy+vB42imVcmQNemLxAHqXQO2qZlatuZrx8T4kSaa6to1AIow5n0UG4qUi\nJp+XDWtXcuMdt3Dffc9QX9+JIAgsWraG0/vzjMwGqFRz1CkKgXQayeGgzOkklkph9Xov2hyC95pQ\nKI5Bf34PvaKaeXU569zcHMeODVNff/m8cZuqYjLZQalGjk/gNKhoyRzxXIqMVqSg2hELSWRzGIPD\nhK+yhtzIELWim0Qxi2Isx6G3kS2MEyxZsNs7KKQnMCGiFz1EinmC2iyNDgfesykUr9fLhg3tPPDA\nTkRbFZMzZ3AWS+gNGrFkllhuDskmUJ+PEeh6iZPCWg4dOsLWrVveo6P5waNUgn/8x/ni1Y8Sev18\ne+///J/ws59d6tVcPIxGI9u2XcG2bVec87jT6VzogLzYDA1N4nItolgsMDU1yNDQAF6vj1QqTyqV\nwWSyEghEURQ9mmZD1WbJKkVk4qhqjpQWwCZkkKgikgtQRxi/KKOTXUCaRS47U6LIVdu2MbtrFx6X\nGYvplXqaTDaJy2FF1umweL2MR6PUnk2TyA4HoVKJCquV/okJsqpKbWcnDr+fp5/soXPHvOeKyWRC\n0zS6ug6ydu0ITe9CmVutVtasPX9w4TvlUg7KGzv7VwUoXap1vFU0TePYsWN8/+/+Dk8qRWU+zzKT\nicjMDLvica7bsoXxmRlKkTirW5oxyPMKsVxROBGJEO4fpqGlnq7hYTITaTTNx5mgAUUrJ62EcJsd\niMoIHsECcoJsMU2Zy0G5qwJ9eJLDiV4s1jKc3nry+fkvqE5nIpMRMZkkwvFRFK2JQGCQdHqYxYvr\nUcNT1FTVk07FUZUSNqWEo0ykvkE6/IgAACAASURBVL2d/v5hTKbyhfxeVVUl2RUr6Dq0B0XSExwd\npaa2lm3r1hFLpegOhbjmzjs/sHbhS5a0cXjvJJl8DrNhftiToirECjFWvMoPJRKJIIrzroyJRILR\noSEikQjp4BzFcIhOUxVCSUNvtjOey5PVK+iM5YAep9NAXlMpR8BtdJDXmUiVzMiiFYuaJ58vIOlz\n5BAQFAmraALBwUQxwrKGhnNqOK677hqMRh3/8A8/J5B2ki6O4JFkJrIRdEaBbdVLEVTwWsyEJofY\nt8/5JzHyBvzsZ1BTA5s3v/m2Hza++tX5aNDQ0EcrKvRuKJVKnDlzhp6uLuKzszi8XkqlPKnULL29\np0gkdGQyIhMTIaLRMRobm+Yn+IolFKWA0einVDSiZE9jlgcplvJUawoCeiRVJCfE8GgiIgI2WSKr\nyQSjMWYQGYzEcJSXozOUCM+NIoomNK2AKGXp2NhJ0mymqaGBYeDg7CypdJqC18vn/t2/o6a+nmAg\ngMVmo7W1lWeeeYGO5W7Ky18RaIIgYDSWMzDw7sTIxeb9UDPyZ8D7vrZ97+7dPPKjH1ERjbLI5+NU\nIMCuA4doaWggE48zND3Nvu5u/O5ykvkMetmOgIAsiQipOKcnihjLFrF/OI1T9lBt1SEKRnKaGUG2\nkMtNYBZyGPVmVC2NThcjr6pEUyo6IYHf72Ttxk8wOwvR6BixWDfFokY43IvZXEZLSyeSpEdRJvgP\n/+HL/OpXjzM6aGBo4gy1NheyABPBM9gqFrFx61a6u0+jKOdqwKbmZkSpwNKlm/G47AydPMnRcBiH\n18s1n/vcOSOlPygoisLw8DDFYh6Do8CZSBCPwYKgaYQyEdpW1nLFFVcsbG+321HVDJFIhGMvvohb\nFFlcUcEzQ6fRlUocn53FWgDJaMHl9jGaL9DcvhZLeIpSNonZaEIVCkTzCWR7DWosRyh0hlwuj4ZA\nNKWgk9wUhSw5oYhsMKLonVx+zfZzcriiKLJt21X4/VX87GePcPA5lchcANkssrWuHbNOTzidpsrr\nRU3PkYiFL8HR/WBQLMJ//+/zaZqPIg7HvD383/0d/PjHl3o1fzw0TWNsbIwzZ4aQJJH29haqq6vf\n9vN0d/fw4IOPcWzfCSyUaPbbWNVSQ3J2lpcGXkKnX4HX24KmGRgaGsJodJ+1SPditWZIpeKUSiNI\nchpZF6XGJEA2j1t1MlnMUCKLHgWLqCMrCGSUEslShqRiRvL6OHimgGZz0FxfQb3RiB6BEhpRUWTD\nbbdRW1fH7iefxKAo2CoqWLFsGduvuw732bTNqwWG0ahHUeLnvUdFKWIwvL+65/7oYkQQhHLgV695\neEbTtM8IgrAO2AHccqF9v/3tby/8/YorrjjnovFekkwmOfrss1QZjVjsdqLJJMmSkYLi5fhQmpKU\n42RxD8tWdmBIayiKkYl4CL0gkMxnmStoeGvXsGjxFnq7ExTTJoazs5SMblQlTU3ZZaTTp8gIOYKp\nYSpMJT5358cwmQxEolEGMxmuX72Ovr4S09NJ6upW4XRO0dW1E6+3Gb+/kh07LsflcjI+3sPg4Cg3\n3HAF3/3uD+mJxDgxMYzLLnH9bTfwyTvvoKKiAk3TeP75kxSLNeh08zUgxWIeUZzjyis/Nb/Njh2U\nSqUPbMtnsVjkV7/6Hb29c5hMZVRVtdLTcwSLuwy32861S1Zy5523n9PmVl5eTkuLl9//9kmqdU4c\nFhv5Yg6rJcfVTe0MhcPMFkTsdh8edwVL9EYaLt/BmVMvMXDkSerwEjPqcct6ZEkhkTiDIDgxOZox\nqybEnJdiAUriDK7yalRphiuu2s7ISOCC72HRokX81//azM51f+CX3/se+dwsyUSMQCGHqtMhzIyT\nKEa5svWK9+iofvD48Y+huXm+XuSjyte+Bu3t0N3NGw6O/KCiaRqPP/4k+/cPYjBUACq7dp1k27bl\nXHXVFW/5ecbHx/nFL54lMKmjwbkIg05HMBrmyJkJbty4gmcP/R5neZFodAxBgLo6M8Vijv7+fkql\nUerrV+F2f4aenh7AitPewPTwb2lEQxQ03CaZYDGKUedGIYdR1FOQM4gFF7LBxHheQc5ZqPF3ECjN\n0bFyJYGxMZweD9ds3sySs4U/n777borFIpIkvWHdx5Il7Tz//CmKxdoFo8tCIYeizLJ48ZXv4ohf\nfP7oYkTTtFngvBJbQRD8wD8AN2kXalrmXDHyXlAsFjl9+jQDA2NYrWaWLVtMZWUlMzMz2AHN4SA0\nOUlgKobNXI3NDKF8Hs1tJVtVw9otazk8G8KUNeOtrEcpFUlNjpLKGVm3egM6nQ5/bQ06XSMz06ex\niwVMJidDQyfJZCLY7XZCuhLrOurxVpRRVBQyuRyrN23iqh07+PnPHyIcHmNkZIJcLoZeb6auroZl\ny5pwuZwAlJXVs3v3AWTZQH395TQ1bSWbTVEozLF87cqF4tzKykpuvHE9jz/+EprmQtM0JCnGTTdt\nXNhmfoDU2xMiytnheBdrkuO74eTJU/T2xmho6ASgoqKe9vZORkdf4C//8q7XzS3fcsu17HrkEbL5\nELm8DoO+yKpmC612Gy6Hg6iix+VqQSfrORGdxWZzUdeymNUbahgYCFKwOtDF5zCWUujNMmkcZG21\nuCU3s7MjoPciigqaYZyG+kZUVU8mk3nd96HT6WhobMJW10FgcIpIcByDwY/XUkk8nmGaLDan+7z9\nNE37QIvJi0E0Ct/+NuzcealXcmlxu+Gv/gq+8Q146in4gGZbX5fh4WH27x+irm79wsVZUerZtesA\nixa1LoyveDNeeukoJlMt0+MvUkjkKJQk0DTOTIRZ1hjAY7Owdv1yjEYjsixjs9kolQo8//wvGBsb\nQaezk8+n8Xr1CIIeKLJs/ZVkzuzHoRlZ1byKwdEuZhI5huIxKmTwWm1Y5TJ6EwmMdZtpaNpIPp+j\nu3uQG2908hd33rmwvmQyycmTp5iZCVNZ6WXZsqUX9Ep5+dyvqqrixhvX8cQTBxZ+50Uxxk03Xfam\nc77eay5lmua/AGXAw2frEK7VNC33xrv88cjlcjzwwL8xNpbHYimjWAyzZ8+v+djHNuP1eihoGotq\najh09CilgoTdLFEolcijILh8LOnYRCwWZ+V1Ozj2h50kImEUVeT03CjVK7bT0bEcVVUwGArodCK+\nsmoaGiy88MJxBMFKY6MLl8uLzdZCVJfkSDxOeUUF6668kmXLlyNJEvfeeyfbt48xMDDAmTMDHD4c\nYNmyjdjt9oX3oWkaIyNjtLZeSUVF/cLjpVKR5547wIoVy7BarciyzPr162hra2V0dBSYt4F/O61v\nr2Zubo7dTz/NcE8PoiTRvno1W6666h27/F0Mjh3rxe0+V3AYjRZMphoSicTr7mez2Vi9YhEdZ62O\nbWYzo4EAJw8exKSqrFzRzsnuYWaSRZTKRkKhARobLdx555+RSqXo6eml73Qvk/39HE/mKcgdSJof\nQRUxGOIYjWaSyXHs9g70+kV0d58A9ExMTJw3uhtgbGyMX/ziWRYtup7RgQAKsxg0mYwSxeYvY3Hr\nFrq7J7jhhgxmsxlVVTl08CBHdu8ml0rhrqjg8muuofVVRncfFb79bbjllnn79486X/7yvM/KE0/M\nD9L7MNHT04/ZXHVOlECSZGS5jP7+wbcsRoLBKJrmYzKQoMzsx3HWkG02mmbnoW7KvRYSiQBVVa/M\ntspm06xbt4RvfvOz3H//o4DEypVLSaeD6PVRtm3bTs+heqTJSWIzUer8leiKZzAYTSxe1sGxniFi\nBRHPkluprV0OgNmsw26vYPfuY1xzzTXIsszU1BQ//envyeddmExOurqGeOGFo3zxix9fuIFUFIUD\n+/dzbO9e8pkMPr+fTdu3881v3rXwO9/Q0IDT6bwIR/3icikLWP/sUr32hTh69BhjYwr19a98yQqF\nah55ZDdf/epnmM7nKQwNUdfUxK7gANF0lFg2g7u1gw2X3YSmqeTzOfw+D3KZj0hpmrLaWm66/kqC\nQTexWAyr1caqVZ0cOnSQRCJHqdSMxZKlslJHU9NiKiurKS+vJRIJUN2o8OlPf+ycNYqieLbHvoGt\nW7cSj/8Lrw1ABIMj6HQSXu+53hK5XJozXd38w3/+z7g9HlpWrOCKq6/G5XK9YwHyMslkkl/98IdU\nFApsqa5G1TQGDx/m1xMTfO5LX3pXbn/vhtcJuL1pEa4kSXSsW8fEnj0sO2t93+z30+XxsLevj+79\n+ygAtpoaNm/t4LLL1tHW1oYsyxgMBjZv3sSKFct5/OGHeX7XfjLJWdy+Sly+aqqrnXR1HUOSiuh0\nMsHgfmpry6iv38DDDz/FX/zFF89b3759R7FaG7BYHOjM5eja15COjZPOzFG/cjPt7SuYnDxOMBik\nvr6e3c89R9+zz7KsqgqL281cIsEf7r8fvvCFj5Qg6e6GX/4Sensv9UreH+h08E//BPfeC1u3zk/3\n/bDweuf6m/3fa6mvr+TgwRM4PI1kExmMeg0BAaO+RDxtpGVNPe5KjbGx4xiNHgqFNLI8x+c/fwt1\ndXW0tbXR3d3LSy8dohQfxq4zMXGmj41XX81oXx+lM2cw5HK41i9he3MzZr2e6g2zPPz7fsrK6olE\nhhEEEVk2YrGISJKD3/zyl4RGRzly6BgY61mx4XocDi9QRTg8xaOPPsO9985HT57ZuZPxfftYUVmJ\n2eslFIvx6I9/zK333nuOZ8r7kfdDAev7gmPH+vB6z72L1uuNRKNF/sf/+GcEoZbDPX0omTniShFT\n2XJamtpYtXY9oigwMHAAozJE2ZyNjy1bhtrRQe/oKLv37SYQUjAbGtBb3NS3t9LS4sdmS+Dz6VDV\nelatuhpJeuWjsFgcjI6eYnZ2Fq/Xe8GUh06n45OfvJaf//wPRCIedDoTuVyY+noTTmc7+XwGWZ5v\n+8rl0nTt/R2e2AxbNmzC7nAwfOIEv56c5K4vf/m8MH4oFGJychKdTkdjY+Ob2jWf7OrClkpR//Ik\nSKC9poYjo6MMDQ3R1tb2Tj6Sd82qVYt56KFjZ0/ceQqFHIIQveB8nVezaetWHgmF2N/Xh10UCSST\nTEej3LV9O5Ki0HfyJIGxMY4/9QRaKobT6VwwFysUCvz6vvuwx2J8tnMFOw/0EQgdYSIxgd1bhap2\nUVbmI5vNIst2xsdDaNo+/H47p0+fpqmp6Rwvl3A4itXaDIDJZEYUvbhcdUQiU9jtZWdbkHMYjUbS\n6TQn9uxhY13dwiA8j93OIk1j/7PPfmTESKkEX/wi/Lf/Bl7vm2//UeHqq+drZ/76r+G7373Uq7l4\ndHS0ceDATlS15lVpmhKlUuicAXlvxoYNa7nvvt9htbaQF10EI2GK+RkslhyCvYqNW69i69Yt9PX1\nMTY2g8dTTkfHjQuTaiVJYs8zT9G/axdNZjM6kwnyeQ5MTbHlk59kxy23UCwWcbvdiKKIoiiMjo7y\nb7/9EkePhpFlP5qWQxCmWbduJUf3/QFfpI7V7e3MaQYEVeHU3t+x4oqPY7U68XiqGB8fJplMoqoq\npw8c4LK6uoXhmD6nE0VV2b9rF3V3333xD/xF5E9i5CyiKJynoBWlRHd3D52d22lqaqO9fT1zc3Mc\nPvwUZnOJymovodA4mcwsNmsCd0wjHo/zRG8vsiyTmJvDm8uxvnM1M3Npxmb7OHXgCJ++59N86lN/\nRigUYmbm4XOESDqd4fnnn8VojPB//+9vsVrh5puvOs/EC+adYL/+9bvo7e0jkUhRX7+E5uZmjh8/\nwUMPHaG+fhWiKDE9OYAhOktTlXfBBrm1upqjY2MMDg6yaNEiYP4O4qmndrF3bzfgAhT0+l185jPX\nvaEPS2BsDO8F8pZOnY5gIHDJxMjy5cvo6Rmgr+8QZnM5pVKBUinALbdsftOhXAaDgU/ceSfT09NE\nIhEO7t3LDU4nXpuNQ889R4vNRofbzeFIBGc8zsM/+Qmf/4u/YKC/n50PP8zwwYN0Ll3KosVt2GwW\nDnf10TN9Cld5iXS6jLq62wmFwkSjaVTVwdDQCQRhhEwmR2WliyuuWM3mzfNeJ3V1lZw4EcRkstLc\n3MKJE0N4PB1ADrPZQjA4Rk2NjfLycqampjDDORN5AbwOByfHxlBV9SNhjvbd74LVOh8F+BPn8t3v\nzhexfuYzsHr1pV7NxaGxsZF16+o5ePAgRmMFmqZSKMyydWvH23Ig9nq9fPazO3jggReQZT1GSxGf\nr4nW1uVksxM0Nc1bqS9fvpzly5efs6+mafzmpz9l8sABtvn9hDMZxmZmON7fT8vKlTz76KN8/a/+\nauHmsqenl0ceeY6pqTC5nBlJKqeiovpsxNTGc8/tp9GUJGbzcDLfA6i4rQ5ysTCTY720L9n48isj\niiLhcBirIJwzpRvmBcmZsTHe7/xJjJxl7doOfve7E9hsr6QspqYGURQdNTUNwLzqLSsrY/PmG8jn\ne+jsrCCbzdHaeiVdhw+z98E91IoiLRYL8ViM0cFB9C4XgqJwy+VrCEQi9IyMMj06RDabpaqqipYW\nD8eP76GmZhEWi4tnn32GTCbMli23Y7O5SKcT/PznT/KVr9gvmPd0Op1s3Lj+nMdWr15FMDjH/v37\nEAQHo/37aLKrrO1ccU4KwCHLBAOBBTHS39/PCy/0UV+/EVGcP2HS6QS/+MUTfOtb975uhMRVVkaw\nv5/y16R70qUS9kuYm9TpdNxxx8cZHBykv38Es9lNR8fWt1W4VVVVRVVVFS/u3EmFx8PY0BAOUcR4\n1vzNLopIoog1k+G+f/1XDHNzuCIRlsky8TNn2DkywvKWFi5b3cHq1UtI+f2UlAmCwRmiURWrtZZI\nJIaqNqLXlwiFSixZsoadO7swmUx0dq5h48a1HD/+C8bHCzidPmprg/T0PIHH4yGROENlpZFPfvJW\nBEHAZrORUZTzREcincbqdH4khMjJk/Cd78wPxPsIvN23jc83f3zuuQcOHZqf8PtBRxAEbr75epYv\nH+X06QFEUWTx4vXUno3Wvkwul2NgYIBkMkV5eRkNDQ3nnRPXXLON/v5pCoUyysrqAI1AYIi6OvMb\nznOanJwkPjqKS69nKBwmFgxSq9fjNxgInD7NUDDI9N13U1NTw8TEBA8++DRlZSsoFo/T2Hg1MzOz\nhMMTiKJEsWjHZPJT7Y3i9TQSCk2gqnmSyQgOo5nxufnuu2BwjObmCiwWC1arlWSxSCwWQ6/XL/xe\nx1MpHB+A8OCH4Gv49kin0wwNDZHP56murl4Ye79y5Qr6+obp6zuEXu9BUQpEIj0sXtx6nuuoLOsA\n4znuf7ueegpTKkVTXR2qphFOpajM5TgxNoaroYG8InByJEGxZCMyPEumdD/tbT6io/0Y5oY5cOIP\nZHVGVJ2XHTs+sSCKLBY7yWQNhw4d55Zb3loRliiKXH/9djZu7CQYDNJ11Eixt/e8YtJ0qYTzVZbC\nhw6dxOGoXxAiL79+OGxneHiYjtfpCVy2ciUPvvgivlQK59miz5m5ObI220WLigSDQY4cOEBgbAx3\neTmrN2y4YLHna5Ekiba2toV1hEIhjhw5gizLNDY2nlP8+0Y4PB4SoRDZVArDq9JaL9vlh2IxJsfG\n+OSmTUxIEpOTk3gEgb7uMzwTKFDtqWQyNolng0ZjYyPDw6dR1TLS6QSxWBi9HrzeSrJZmVQqRmVl\nBy+8cIi1a1cjyzIec57je35DKpaiaDBw++3XsG3blTidTqqrqxdEpsPhoH75cnpPnmRxdTWiKFIo\nFumdnWX97be/gyP/wSIeh9tug//zf+BNMnEfae68c94I7n//b/jWty71ai4OgiAs1NRdiJmZGe6/\n/yHSaROiaEJVT9DYaOezn70No9G4sJ3dbueeez7J00/v5vTpF5EkgbVrF3HVVVvesEswlUph0+sZ\nLRbJB4OssVgQBQFFVSmVSpRKJXpOnqSmpoYDB45hMtVhNtsoFovodFba21cxNtZ19trURiikEo5N\nMDTQj6oqSPosFkucyWCAXFULY2NdOJ15brxxfvDQyPAw3WfOMDE3R4XFgquykpYlS+ienqZ+0yYO\nHz5MRUXFOb8X7yc+UmJkcHCQBx98nELBAcho2kt0djZy003XLdxFDw8PMzo6gdlsorp6Mz/84UMU\ni4WFHm2AcHiCLVvOzb1LqorRaCSTyxGemSEdDpMvlbBoGlP9/RztS7Fy6TaiqTRNtbVomoGHv/dj\nvnTzdrZs2YSqKOw+coT9MxpOp++c57ZYHMzOzrzt9/tycarP5+NnZ84wl0jgOXvxnQqHydrt54iF\nXC6PTue4wDNJFIvFCzw+j9fr5ca77uKphx5CGR9H1TRsVVXcfvvt55zk75TJyUke+uEPqRJFGux2\n4v39PHTiBNfcccdbNmLTNI1nn32eF144CXgABUl6nttuu4rly5e96f6rL7+cJ++7D4/dTmR6GrvZ\nzGQigehwUO5ysa+/n2rbvHNrRWUlfcePMz40ToW1nBkEJKOVoruOdMYCTFNTU0Gp5CWZzJDP69Hr\ni/h8NQiChqKUMJmshEIZCoUCv33gAaryeTbedB3K2XbvrtAsTqfzgoLs2ptu4ilBYN/JkxiAvCSx\n+tprWfVhicm/DooCn//8fF3EHXdc6tW8vxEE+P73obMTbr4ZPuylRJqm8etfP44kNVJX90pkdHj4\nFHv37ufqq8/13PD5fHz2s7dTKpUQBOEtWRX4fD4yoojD6yUwOLhQIZzJ50lLEovb25kaGgIgGIxg\ntc7XKFZV+ZmZGcZs9qDTmQERg0FHPDqKOaeSVWM49WbCkVlqanw4W71svqqTxYtbaWtrw2g0cvr0\naQ787nd8dtMmjvf2EpyaYnxwkOcnJ3E0dBA+EuXo0TSadpClSyu5/fabL1ljwevx/lrNH5FsNssv\nfvEEdvtyLJb5C7Kqqhw8eISmph6WLl2KKIo0NzfT3Ny8sN+OHet5/PHDWK11GAwmYrEZ3O4c69ef\n68nvKy/HsGwZw6dOEQ4EcJhMpCwWJEVBEY1IWRNTgQBFq432pkbOHH8Wv8FDMh7H7XQiShLLmpo4\n0LeHeDx8jiBJJMIsWVL5jt+72+3m5i98gaceeoj+iQkUTcNRVcXHXyMWOjpaePzxXux2z8Jjqqqg\nadE3dTJsbGzkS9/8JqFQCFEU8Xq9F019v7BzJ01GI5We+XXZLRYc6TTPP/YYbW1tb+mHYmRkhOee\n66G2dsNCjU4ul+Ghh56jrq72TVvdWltbid16K7sffZSBQoHe8XEqamrYuGIFpycmMPv9WM56rOj1\neqqamxkai1DIJAkKIkgSSy67GUUpoSh9hEIB5uYUvN4KAoHTGAwyLlc72WwvTqePRCJCWZmTyclJ\n1FCI+rOeKJIkYbNYqEunOXbgwAXFiNFo5OaPf5zE9u2k02lcLtdFEYXvZzRt3vY8HodfvdZi8U9c\nkMZG+Ju/gbvvht27Oa8z78NEIBAgHC5QW3tuiraysoUDB46cJ0Ze5u1csL1eL82rVxMPBlHtdkYz\nGdRikbiqsnTjRvz19aTORo7r66s4fDiExeLA729ifHyEcLgXTcuhaWlGRvbi1sOqRTsYmzjJxNw4\nHruRrtlZ7vriF7n66qvPee3De/bQ5vHgtFrZ2tlJKpsllc3yvZ17aa24DL+/HpgXZSdOHKO+/hjr\n13e+jSP4x+cjI0ZGR0fJ561UVLwSlhdFEZergUOHTrF06dIL7nfZZRuoqqrg8OEuEokwGza0sGLF\n8vNSHss7O3ni5ElqW1uxqipeh4N6VWVPMEgwrTJXUkAUuXLzJqxWK9lUFLfehKqqC8/h9niochkZ\nGeli6dItSJJMODyFKM7S2XnNu3r/9fX13PuNbxAOhxFFEY/Hc942K1Ys4+jRHkZHT+J2+ymVisRi\nI2zZsgSfz3eBZz0XURQvupFOPp8nODZG+2suunaLBSIR5ubmKDs7YO6NOH68B4ul5pxiYaPRjKK4\nGBgYZO3aNW+w9zyd69axbPlyBgcH6TpyhJnhYXoyGTouu4zrOjt58J//mWgyictmw2wy46tuY6JU\nYNW666irW4QgCEQiAerqmrjppu38/d//gGQyxJIlTsJhjVism9Wrl5LPZ5mb6+Wuu7aTSqUwXUDU\n2S0WxoPBN1yv3W5/y2moDzKqOp9qOHIEnnsO3udDpd9XfPWr8JvfzLf8fu1rl3o1fzzmIxznqy1J\nkikWL95otB033YTN5eIH4+MUslkqKyrY1NGB1+fj8NgYV916KwDr1q3m8OEHCYdNeDxVrFmzma6u\n5xCEFI2Ntex54RButRxNU3F7q3G64dp1iygpCvIFfg9ioRAtr6rZs5pMxNNpUBxnoy3zCIJAWVkT\nBw+e/JMYuVS83pdRlnXk86+fggDeMA/5Mk1NTay+7joe/tGPUHM5kno9OYOBT9x4I/F0ml8/P0pj\nR8dCC5jVXUl8YAS3+9w6jNrFrfhXNtLXtx9FUWlq8rNjxycW5g68GwRBeENRYTKZ+OIXP83x4110\ndw9gMhm55ZZtl6wbBubvTARZpqQo6F51l6JpGiVNO6cF9o3I54vI8vnbiqL8himo12I0Guno6KCj\nowNN086J/tx81108+uCDGKJRcrksPekA7etvo77+lVRSPD7FddetZ9myZXz/+/+LI0eO0dc3Sig0\nSzKZQZbnkKQin/vcNbS3tzM5OUniAj4JoXicyos4MfODSqEwf2c/MjLvLPoR0F4XFVGct8vfsGHe\nCO1VQeEPFRUVFRgMBbLZFCbTK9O7g8Exli+/eDkqWZbZsnUrLW1tPPzTn6JLpQgWiwxMTbF827aF\nrkiv18u9936cJ598gaGhF9DrZW6/fSNXXrkZo9FIQ81POfz405h02f+fvfeOjuM687Sfqs4RjW4A\nDTRCIxCJAcxJpEiKoiUrWrIkW9LI+hzkMOPxN9m7nvlmxzvnTLDXk2fHXnstW7ISrSzREhVJUcwZ\nmcixATTQ3eicq+r7AxDMv9Cc7AAAIABJREFUqGSSAEk85+AAqND1Vt2uW7+69w0sKLGyqHwdFqOR\n9qEhdAbDOcd1VVQwMTBA8WmOqplslgwiJtOZ1crVai3J5Mfv8y4X14wYKSkpQVHeRpKyZ7wd+/1D\n3Hrrxfkybrj+ekrKyvjJP/4jNXl5VLpcqFUqzAYDRlsbWdlHIDCGJGUR9ApisY2EJGFVFFKZDO0e\nDzVr13LnPfcgTUdEXO5U3gaDgeuuW3dOhM5soVKpWLh6NR0HDrD4NM/4vrExnAsWzIi7j2LRoipa\nWg5jtxfOLJNlGUnyU15+TrWCj8XZ01But5tv/cVfMDAwQCaToajlFMeODTMxMYxKpSYU8lBTY56J\nXrJYLNxww+aZaruKokw7s2lmPru4uJi8mhqaOzupcbnQqtV4fD68osi2dXOjjWaLQADuuw8sFnj7\nbThPHz3Px6C6Gv7yL6fysuzadXVGIGk0Gu6+extPP/0mGk0xBoOFcHgciyXC1q0PXvTjuVwuvvXn\nf87AwADpdBqXy3VOX+VyufjqVx8kk8kgiuIZ081btm1joqODVUVFM5F78WQSH3DrdP9xOus2b+a5\nn/wE9eQkztxcEqkUo6EQdpcJjebMBp2YGGTjxrnnJCR8kux0lxNBEC5UsuZTs2vXe7z5ZhNmc9l0\nQjMPLhd87WsPYLiIPVlLczNvP/cc1mnnp6AgsP6WWzCYTDQ1daLTaVi+fBFarZY9b7yBd3AQrV7P\nso0buW7jxjnnWHS5EIRzc73AVDjei888g7+rC4sgEFcUdEVF3POlL31sMZLJZHj88V/T3R3HZitB\nkrKEw4Ns2FDJHXfccrFPBZgSF93d3Zw40UoqlaGhoYaFCxd+YoGZSqXYu3s3zQcPks1kKKmuZvNN\nN81Egl3pXKjdP4z2drjzzinnyx/84Or2d7gcSNJUMrQHH4Q//MPLc8xP0+6/K6Ojoxw71ojfH6aq\nqphly5ZiNps/esdZ4Ojhw7z/6qvYFAVFUQir1Wy7914WX8ClYGBggPd27mR8aGjmeaLS6Nix4zBG\nYxl6vZFQaIzc3CTf+MaDH5lr6VIw3ebndSacNTEiCMLDwNcAHfBTRVEePWv9RRcjAD09PRw92kQ8\nnmLhwkqWLm24JM59sViM/v5+FEXB7XZ/aMNns1lUKtWcDLe6nHxY56QoCh6Ph0AggMViwe12f+Kc\nGZlMhubmFpqaOtFo1KxcuYja2tor5rrLsowsy1edWP2kD6WdO+Hhh6dEyFe+cgkNu8bo6ICNG6eu\n7+UIvJoNMXKlEYlEGBgYQBAEysvLP1atr0wmMzW9Pd2vDQwMTPs8xqitdZ/X5/FyMVfFiFpRlKwg\nCCJwWFGUVWetvyRiZJ4PZ2JigraWFhLRKO4FC6iurr5sD79rpXOKRCK0Njcz6fPhLC6mfuHCizoy\nd6XxcdtdUeDf/m1KhDz77NSDc56Ly7PPwne/O+UMfB4f94vKtXK/X0wymQynTp3C09eH2WZj4eLF\nF8Wf8HIxJ8XIjAGCYAB2Koqy+azl82LkMtPU2Mi7zz6LUxTRazSMJxKYq6q470tfOifx26XgWuic\nPB4Pzz/6KLnpNBadjslkkqTNxv1f+9rvXLDwSuXjtHs6PRX5cegQvPLKfEKzS8l/+2+wb9+UQ/Cl\nfIG+Fu73i0k8Hmf7L39JdniYPIOBeDrNhCBw60MPXTE1pz5MjMyqq5IgCP8D6AQe/aht57m0xONx\n3n3hBVY5ndSUlFDmdLKqvJxkTw8njh2bbfOuChRF4fXnn6daq2VhaSmlBQU0lJWRF4ux+803Z9u8\nOcvo6FQis/HxqYfkvBC5tPzDP0xF1dx1F8Ris23NPB9waP9+VCMjrCgvp8zppK60lGV2Ozt//etP\nFBE4V7nkYkQQBKcgCLvO+nkaQFGUvwWqgEcEQTjHi+j73//+zM/u3bsvtanXNAMDA1iyWQxnhcqW\n5+XROi9GLgqBQID4+DgFZ42AuJ1Oepubr4oO5WLz2muwYgVs2QIvvjgVOTPPpUUU4f/+XygpgU2b\nYHh4ti2aB6D16FEqzsqpZDEa0SWTDF8FjXTJnQEURfEC58ROCoKgVRQlDWQAGThn6Ob73//+pTZv\nnmmuFCfOea4NjhyBv/s7aG6Gp56CGz5d9PU8nxK1Gh59FH74wykx+Pd/P5Vq/yrznZ5nDjGb0zTf\nEwRhF7APeF5RlMgs2nLNU1ZWRkStJp5MnrG8b2KCRVd5TZPLhd1ux+R04p2cPGN5v9dL5ZIllz2n\nzFzj+HH48z+HJUvgC1+YCjVtbZ0XIrOFIEz5j7z1Fjz+ONTWTjkPd3RMORPPc3lZvGoVvV7vGcvC\nsRgpvf4jy3VcCcy6A+uFmHdgvfy0NDfz9vbtFIgiOrWaiUQCa00N9/7e7807sF4kRkZGeO7nP8eW\nTmPRagmmUiRtNh545JGPrI9ztfJBu7/6Kpw8CTfeCGvXzucOmWvs2wdPPgk7dkzVAKqrg7w8sNmm\nhIssT+UricWmfqLR3/7+4G+PZ2ofuDbu94tJIpHgmV/+kszQEHkGA4lMhgm4ahxY57QYmW0b5pln\nnnnmmWeei8eFxMicngGcq0LpSiYWi/HTH/yANQUFM2mGAU7297Po9ttZd911s2bb1f6mJMsy/+ef\n/okFgoD9tCIq3SMjGJcs4Y577plF62aPq73dLzWJRIL/84MfsNJux3haAsfmgQEW3HwzGzdtmkXr\nLsx8u197fJhv4lVYhWCeD2NwcBCrJJ0hRGAqaqbt+PFZsuraYHx8HDkUOkOIAJQ7nXQ2Np5RwXme\neT4uQ0NDmLPZM4QIQHl+Pm3zkXDzXCHMi5FrjAspU0VREOcjai4pgiBMTa6ff+XlNWaeq4YPvaev\nxqp381yVzH9TrzHcbjcRjYbYWVEz/T4fC1etusBe81wMCgoKUOfm4guFzljeMzpK3fLl8w+OeT4V\nZWVlxLRaoonEGct7Jybm7+l5rhhmszbNIuCngAS0Kory+2etn4+muUS0trTw1vbt5MNU1EwqRW5t\nLfc8+OCshpdeC3PIw8PDvPCLX5CTSmHWagmkUlBQwP1f/eqsVNGcC1wL7X6pOXXqFDufemrqnlap\n8KVSWBYs4L6HHroskXCfhvl2v/aYk9E0HxTKm/77UeA/FEU5cdr6eTHyKUilUqTTacxm84c6CwUC\nAdrb2qYK4lVVUVlZiWqWYymvlc4pEolw7OhR4pEIpRUV1NbWztkHxuXgSm/3dDpNKpX6yHvuUjM5\nOUl7WxvxSISyykqqqqpm/Z7+MK70dp/nk/NhYmTWomk+ECLTGIDgbNlypRKJRDi8fz+djY0ogkAi\nm0WORtEIAuaCArbefjuSJHGqqQlFUahdsoTq6mpEUcRut7NhuuxpMpmkq6uLbDZLSUkJmUyGQCCA\n1WqlqKhols/y0pDJZOjv7yeZTFJYWEh+fv55t5NlGY/HQyKRoKCg4JxcIB6Ph+YTJ0hEo5TX1LBw\n0SJ00yn1M5kMx44cofnQIbKZDLXLl5PndHJ41y6iExPIgoAsSSxYsOCSn+88U0L96JEjtBw+jCLL\n1K1YwZp16zAajZ/q89LpNLveeosj776Lb2SErChy/e23c8fnPnfBz1QUhbGxMcLhMHa7/Zzv3cjI\nCD6fD7PZjNvt/kRiIjc3l+s2bPhU5zLPPLPNrOYZEQThTuDvgKOKonzlrHXzIyMfQjwe51c/+Qnm\nyUlK8/I4tH8/nuFhCmtr2bZ2Lb5QiFePHKGooAAzkMpkUJvNVK1fzx2f/zyRSAS1Ws3Y2BivPfUU\nxnQaJImDp05hM5lYWFFBTJaxV1Vx9/33f+oO+5Nwud6URkdHeeGxx9BEImgFgaAsU7t+PTffdtsZ\nfhuBQIAXnniCjNeLVhCIAIs3bmTrTTchiiLHjhxh74sv4tJq0arVdHq9iIWFfPOP/giTycRzTz5J\nsLWVBQUFqFQqWnt62NfWxhe3bsXlcJCVJDo9HsTKSh78yldm3qo/eGCl02kKCwtnxM2HIUkS4XAY\nvV6PwWC4VJfuknA52l2SJJ557DGS3d1UFRQgCAIDExNki4r4vUce+VjX+Gxe/PWv6XnnHdIDA9hE\nEX8ySWsoxMKbbuKPv/c9rGdFTcViMV565hkme3tRZbMEMxmqV69m5bp1yLLMkb17mejsxCoIJBQF\nVUEB9z788FVbzXl+ZOTaY06OjAAoivIK8IogCP8uCMJnFEV56/T1p9em2bJlC1u2bLm8Bs5hGk+e\nROf3U+d2EwgEECMRri8v5/joKN7JSTSKQqKnhzePHqVApUIHBFUqDp48yammJoyKQjKdpqO7m7tW\nrcJVWMiR1lbc8Ti6RAJXfT35+fmc6u/njR07uPsLX5jtU74oSJLES7/6FVWiSL7bDUyNfhzdt4+m\nkhKWLV8OTAmCF598krxIhNLp7SRZ5uju3TgKCqiprWXPK6+wxuXCGwhw8NgxtOk03hMn+NvhYe76\n0pcYb2tjbXn5jMjQJpOUpFJMhsO4HA7UKhULy8o40NODx+OhpKSEiYkJXnnmGRJjY2hEkaRazcZb\nb2Xl6tUXPKeW5mbee+015GiULFC9YgXbbrkF/VmhntcyPT09RLq7WX1ayd9FZWUc7++nvb2dZcuW\nfaLP8/v99B0/jjw6ihZo8ngwSBKWTIYDL79MRW0tX/ryl8/YZ+fLL5Pq7ibp8zHu8RBNJHjz5Zep\nW7QIm9XK6PAwn7vxRsqcTmCqTMCO557jS1//+u949vPMM/eZNfd9QRBOnyQPA+dMmp9etXdeiJzJ\nYGcnRo2Glt5eTnR0kEwmEQQBiyzTMTjIqa4uOoeGqEsk+KzDwQ0OB1t0OkaOHmXs6FE2lJVRo9Xi\n9Pk40thIMp2mt7eXGrsdh16PZ3AQgBqXi4GmJqLR6Cyf8cVhcHAQIRQi/7TpFlEUWZCXx8kDB2aW\njYyMkBobo/S0YXSVKFJbUMCxvXsZGhrCKssk02kOHTpElSCQAxSo1SRPneKZn/8cUzZLJBIhEAiQ\nzWaJTE5SmpPD2NjYGTaZBYFgMEg2m+X5xx8nPxJhvdvNqtJSVjkc7H3hBfr6+s57Pj09Pbz95JMs\n0um4rrSU61wu/MeO8epzz13cC3eF4xkcxHEevxyn2cxgd/fM/4qi4PV6GRoaIpVKXfDzgsEgxOMk\nYjG6h4dZqFZTZzRSpdWSF43y9H/9F4FAYGb7cDjMYGsrw4ODiKOjLDabkf1+bjQaob0drdfL9XY7\nBw8dIhSLAVP5ZyYHBvD5fBfxSswzz9xkNkdGPisIwp8yVa23D3h9Fm254vCMjtK1axeVZjOpRIJT\n/f2MBoNMhkJY43GGx8cRolGKT/P5kNJpFokigyMjAGQliRKzmYlwmEGvFyQJjUqFRqUiMd0Ri6KI\nWhBIJpOYzeZZOdeLSTqdRnseJ0OdVksyHp/5P5FInHc7o05HfHISURSRBYHuwUGM6TTHRkawZDJo\nZZmEIDAeiTApijTk5aECsmo1skpFNBbDWlp6xmfGFYWcnBz6+voQAwGKp0diAPRaLeVmMycOHqSi\nouIcew6/9x7VNhuW6Wk0tUrFotJS9re3Mz4+TsFZJcevVYxmM0lJOmd5Ip0mPycHmJqWe2X7diLD\nw2hEkZRGw6bbb2f5ihXn7Ge1WokrCmPBIEXT90jX+DhKIkGBSkVqbIwf/s//yZ/+1V9RUFBAMpkk\nGg6TnZyk3G6na2ICuyxTaDIRTSYJTEyw1OkkP5Wid3iY5bW1AGgEgXQ6fWkvzjzzzAFmbWREUZRX\nFEXZoijKZkVRvqwoynz6yY/JxMQE0ZERSnU6yqxWFrlcVBuN9PX0kFWr2VRcjNtgwCbLBDOZmf3S\n6TQmtXpmnjbXZiMGGIFUOo3GaMQfjxNKJHBMi5hoIgEGw1Uzb11UVESIKSF2Oh6fj8qFC2f+dzqd\nRAThnO1G/H7K6+pwu90ktFr8k5MMjo9TpijUmExYRZFVJSWY/H6GJybIMxopt9spMxqJ+f0cDwbJ\ns9tJptNkJYlTw8NYKyooKSkhHo9zPs8Fi9FI6LS37NPxjY5iPyskWBAETIJA6Kx8JtcydfX1+EWR\n8PSoA0A8mWQsm2VRQwOSJPHcY4+R4/dzndvN6tJSVubmsufZZ2dGpRRFIR6PI0kS+fn51Kxdy0gy\niSBJjIZC6FIpjCoVNrOZGpcLWyzGa9MjVHa7nRignf4+JdNpDKJIMpPBYrGg1emYjEYxqtXEpm2M\nJZNkdLoLOlfPc3UxPAx/+ZfwF38B0wPT1xRzujbNPOenu7OTKpsNw8qVdDc3o5NlQpKExWBA0GoZ\nDocxOxwIOTkkEgm84TAGjYaELOMXRcorKwGw5eaSV1bG6wcOYEkmUdJp9g0OUlFSwkMOB2OBAN2h\nEFu++MU5HSL4SbBarazcto3DO3dSabNh1OkYmZwkaDJx62mRCBaLheVbtnDkzTepyc/HpNczGgjg\nURQe3LwZnU7HLQ88wL/87d8yHgiwODcXXyyGPicHUaslXxBQ5+fTnslgiscRgXFRxFRZyWtHjyJH\nowgGA+tuuYWvPvAAgiCQn59PmKmH3ukhot7JSUovUDMov7gY/+goRQ7HzDJFUYgqyjVbBfh85OTk\ncNtDD/H69u3o/X4EIKbRsO2LX8TpdNLT04Ps81F22qiUQaejwmLh2P79JOJx3n/jDRKBAIJWy/JN\nm7j985+nra2NY088gSkSoUSrxWC1YrTbSVmt1LrdDI2M4Pf7cTgcbL7lFrYfPUqJyYRJr6cvmUSl\n1WIuLMTmchEIBvEGg9RVVzM8MUF/NMqWL35xVnP/zHN5eOcduP9+ePhhUKth3To4cABO+zpe9cyL\nkSuQbDaLKAi4Kyqw5+XhGR7GEw5zXVUVaaeTtUuWYNDr+YUkMdbRgdNiwWqxIBsM9Pl8bJkuNy3J\nMpOiiGIwUONwYNRoWFVfT+foKC82NbFhyxZuv+8+qqqqZvmMLy7Xb96Ms6iIkwcO4A2HKb/+em5f\ns4ac6eH6D9i8dSuOggJO7N1LJBTCvWQJD27cOPOmWl1dzbf/+3/nbzo6iKtUuPLzMRqNdHg86A0G\n8u127ty2DX84TDqdJtrSguT18vC99yIoCtFkks6JCTweDzU1NbhcLlxLlnCysZGaoiJ0Gg3DExNM\naLV8du3a857Lui1beOmnP8Wg02Ezm8lks7R7PJQsWTL/Rn0W1dXVuL/7XYaGhlAUhZKSkhkn31gs\nhuE803JWo5F9ra14WlpYnJ+PrayMZDpN6xtvkIjF+Iu/+iv+t1pN20svUeJwoDca8csy+rw8ivPz\nGR4enqk59NlbbqHtxAk6Dh7EolaTdDiIqFQoajUr6+sZ9vkY9PvxarVIVit3PPAAldMvDvNcvezd\nCw88AM89B5s3Ty2zWuE734FXXpld2y4nsxra+2HMh/ZeGI/Hw3P/+Z8Iw8N0HD+OKp0mFI8TU6u5\n8/77WTQtHmKJBP/1m98gZ7PIkkTdihV85q67GGhvxz88TDyVou3UKVY7HNRWVZGXn48oikiyzP7h\nYb763e+eE554Pk6dOsX+/ScIh2PU1rpZt27Vp5rWuRJD/RRF4d///u8RurtJ+HwIokhOfj7dp06R\nV1PDoupqdh1uorW9i7BviFXFhZRVVdGwahUarZZT3d0MGwx860//lJKSErLZLIcPHuTEvn2k4nGq\nFi9mww03nCEsgsEge3ftoquxEbVWS05xMVGvl3Q4jKJSsXDNGrZs2/apwlVng7nQ7qOjozz3n//J\n+rKyM0aluj0ejo2Pc2NlJXmnidV0JsOzR46gLyxjMhRnuKsVSyxImdNJRXk5S6qriSUS9AgC3/iT\nP6Gzs5NDu3bhGRzEHwigV6uxmkz4IhGMej02mw2V2Ux8YgKHRkNWURDtdj734IMUFhbOxiW55MyF\ndp9tenpgwwZ47DG4+ebfLk+loLYWnn0WPiSQ7opjTmZg/SjmxciH81ff/S7Hn3iC1TYbBq2WrnCY\ntnCY9YsW8cD995NIp2kbHaVq0yZuuvVWYCocsbenB4BwJMKb27fTsmcP661WRJOJXLebFWvWoFar\nOT48zM2PPEJZWdmH2vHee+/z+usnyc2tQq83EQiMotf7+eY3H8But3+ic7qSOqexsTEGBwYQVSq0\nWi27nn+eAkXBajQSiMXY19mJVaOlfyhFaDQLikTE38HyQguLKwroDwaxmUzkaDTsDwQoqqzEWFLC\nrXffzZKGhgvmdYlGo/zqxz/GHo3idjrJShJdo6NoFyzg9nvuwWAwXHHZXOdKuz//zDNMNjZS53LN\njEoNSBKhcJjb6+pmtstmMrz37ru8drgZS8l6rGYn3kSEVMbDTUucFBr0eEZHCRoMfOmP/xiNWs2u\np5+mzuHAbrEQiERoGR9n4z33sGbtWgRBoLe3l1d/9jNWFRfPVNTuGBri5OQkdz/4IDW1tThOm4q7\nGpgr7T5bTE7C+vXwR38Ev//7567/4Q+hrQ1++cvLbtolY87mGZnn05FKpRg5dYr1CxeSTqWIAMtK\nS1mUyfD28DAvt7ZitdsRTSaaDhygu6UFTU4O4f5+8kWRVCrF06+8wjKLhSKNBqJRiMcZTyQYdDqp\nqKwkJkkfWSslEonw9ttHKStbj1o9Na9dXFyNxyOwd+9B7rzz1stwNS4viqLwzhtv0LZnDw6Viqws\nMykILLvxRpBlAl4vVW43N3/nO/x/f/mPeOMRskoEhyMHtSqMXmukf8xPIjhB5YIFZGQZKRymPBik\na3iY3ZEIxysquP+RR847utR48iTGYJAs8Jtdu4jH4+Tn5yOFwwRuuOG8ETfzfDzuvOceDhQWTo1K\nJRJULFzIAzfeyMtPPUUwGsU2HU3W39dHf3sPelMhNaV16LUGCtMp2n0G3mptoSFfT3FeHoscDt57\n4QXCySQ3lJVhnk5Gl5eTw0qNhuN79rB6zRoEQeDkoUNUWCwzQuRkZyddbW1kQyEOShKH7HbW3nYb\n68/jOxQMBtm3ezcdJ0+i1mhoWLuWdRs3zueZmcNks3DffXDrrecXIgBf/jJUV8NPfgLXQlPOi5Er\nkMnJSeR4nCqHA8NZzm1l0SifufdemvbsoQQodrkY9/l49bHHqF64kEVr1nDg+HEqslkMqRQOp5PR\nsTFqtVr8oRA9HR1MKgqqoiLGxsbQaDQXDOkdGxtDUawzQuQD8vNLaG09wZ13XqorMHv09vbSvns3\n68rLUU1na02m0xx55x3+nz/7sxkB4fV6cRZVISoC0cFujDoN40oB/lgAJRShWJwKKz02MkKD201R\nbi6o1WRkGXssxp633+Zz9913zvE93d2Mj48TGRpigcWCMScHfzDI0e5umhob58XI74BGo2HTli1s\n2rLlDCfidVu38t7TT7Nco8Gg09HX1cVoMo2tpAq9dkpg6LU65EgcnaLlrs2byZ0W8mOBAE/v28et\nZ6X8txiNREdHOXr0KFarFb/XS+X0E2d8cpLu1laW22xMACV5eTgLCzm8YwflFRVnlGiIxWI8/bOf\nYY9Guc7pRJJlunbtwjMwwANf+cp8Jeg5yl//NQgC/K//deFtCgpg2TJ46y24447LZ9tsMS9GLoAk\nSfT19REKhbDZbJSXl8+ZiBKj0YjRbmc8FsN9WsREKpMhplbj9XhwyjJulwuASCDAstxcBkdGCITD\nTIyOYtJqyRFFNIC7tJSW0VEiqRTe3j4K9YWUilU8+eQBRPEN7rjjelavXnmOHVPTAefmQEink5jN\nV1ZK8o9L64kTlFksM0IEpnKBOIDmpiZy7XYymQxWq5VIJEBH7wj+wRAWQw6yIuJDJJtJMkkKZyaD\nw+GgdjoXiAAosky508nepibke+4552GiMZno6upim8uFenpdgdmMKxiku6UF7rrrcl2Kq5rT/UYa\nli4lmUxy8M03EdNpmiIREjYHCwprz9gnGgxSUKhBo/5tt2q3WECS8Pn9OE/L+dLc28fbBzoYiO0l\nHo/j93WyqkDHrWtWMzg6ilOtnsr3w1QEmFajoUCtpqOt7Qwx0tTYiCkcZsF07hoNsMTt5nBvL319\nfVed8/nVwEsvwVNPwdGj8FGPlM9/fmr7eTFyjRIKhXjssWfxeiWmsnDEKC7W8fDD982JxF9Wq5X1\nN9/M7scfRyUIOM1mouk0+4eGaLjzTsLj49ScNsQvSxJqUcQmSUxGozjMZgYFgYyioJJlqvLycNvt\n7Dx1CkPhItasfwCDYeo80+kkL720l+LiIlzT4uYDSkpKyM0VCATGsNunnOxkWWZ8vIt7772KvK5O\nI5tOoz3P2+aYP8Dux17BWbgIQVAjy35OnDiGKK7GmFsKqRQWfQnDviPUVJRi1Wu4e/Nm2vbvB6br\n0aTTLC0pmZpHF8WZB2I6nSaTyWA0Gil2u5EzmakIjWk7IvE45pwcMqfl0Jjn4rJm7VqWr1hBKBRi\nwdq1vP3EMwTCoxTap0aiUtkMoWyE60sLZqZjAFQqFZb8fE6NjJCfl4coioxPTvLsnnZU1kX09kpA\nLolELY83v4ZWPVW6IZvNMuj3Yy8tnYny0qhUZE/LGwQw3NND/nn6JJsoMjY6Oi9G5hijo/CNb8Cr\nr8LHCXa7+Wb4l3+59HbNBebFyHnYseMtJietuN2/vZE9ng5ef/0d7rvvc5fFhmQySXd3N+FgkHyn\nk8rKypmRmXQ6TX1DA80NDexuaSEyMEA0EqGyqgp1NsvY+DhFFstv56idTlq6ukgw9RbvrqxkoLeX\no14vy/Lz8UajjMXjjOj11NdumBEiAFqtHq22iMbG1nPEiEql4qGH7ubxx19gYGAYQdChKEE2bKhl\nxYrlF/2aBAIBerq7kSSJispKnNM1PD4ukUiE9rY2oqEQxW43VVVVqNWf7BZYsHgxB5qazsjrEU0k\neLdliPWf+TqFhcUAjI8PEo0eIy8vg8ZuJDSZYjg0hDEnl9Kltdx2yyY69+7FL8vI4+NERZGc0lJK\nCwro8nioX72aZDKMaeRFAAAgAElEQVTJu2+8Qefx4wiyjLmggGXXXUfhwoUMjY+jzmaRFQXBZKK6\noYGJjxH5NM+H84GTtyRJlJSWEolEGB0awmKzUVdfT15eHpu2bqW7vR3v2+/TMTiGjJmIFGH5dS7K\niqaEg6IotPf3c6yxkXA8TpdGw+D+/Sx0uzne3Y9kLkeWc8l3VEyLziIUJcW7vS0sry3Gn05z08qV\nuKen3RRFYSyZZGXtmaMxVrudUG8vZ+fZTcgy5o/w+Zrn8qIo8Ad/MCVGLhCpfw61tVORNX19cLXP\nwF5zYkSSJJqamjl2rA1Jkli+vI5ly5bORCBEo1Ha24cpLd14xn5FRQtoatrLnXemLnnIpNfr5blf\n/hJ9OIxRFGmSZd53uVi6bh39XV0cef99irVaGiwWPOk0Xr+fpRUV5NlsFEkSXX4/u/v7uXvDBvRa\nLXl5eWRtNvonJlguy4h6PXJBAeacHLIOB72pFJmiIupsefT2jiBJbbjdpTMOrBqNnmg0fl5bnU4n\nf/InX2dgYIBEIkFhYSF5eXkX/ZocOXyYfa+8gl1RUIkih2SZpTfcwJZt2z7W/v39/bz82GPkZrMY\n1Wq6du/mkNvNfQ8/PFPlNp1Oc+rUKUb6+7HY7SxctOgcJ9L6+npa6+o41tFBcU4OWUniYG8vtqIl\nM0JkcjLI0QMHCPvSqLOjuKvKaGiox+ncjE6nIZls4eZbb6V+8WL27d7N+2+8QbHZTEJS+PeX3kJj\nt/H1W/L46X/8B+NHjlFgtFDsykcXibD3pZcoXrgQi9NJgcmEShQxmc0cHxpi7e23X9yLfo3xm1de\nYccTT2FKpLHmmOjwjlGen8/y2lqGMhkO7NzJ7Q89RH93N8lgkOIyJx6vl5I6N/d+8Q+or6/nuSee\n4OjAAF6Ph5PHjmHVaFi/bh21lZW0DA2hdrsxxhRGWkcxGrOYzXGMRhMAFosDs7mSL3/7EdoaG/Ge\nPIkhGERRFIYiEUpXrqT8tEJ/AA0rVrD9wAGcicTMy8dEMEjMaKRmOp/QPHODN9+cio555pmPv48g\nwA03wLvvwte+dulsmwvMWmivIAhrgX8GZOCIoih/etb6ix7aqygK27e/wMmTEzim30j8/kEWLDDw\n8MNfQKPREAwG+dGPHqesbMM5+w8O7uF73/sGJpPpotr1AS0tLRzevZvdr79OicnExpUrser1jHg8\nvLlnD6q8PPJMJoSxMcwmE+OA1NdHkUpFymik3O2mPRbjug0b2NXTQ7HLhUUQyMgyhqIiSqqrObp3\nL/FIhLoVK9DpDXR19KLXqxkZi5JKWWlr82A0LgDCrF/fQH5+Pv39x7n//rU0NDRckvP+gAuF+vl8\nPp74l39hdVHRTLRBVpI4PDjIHd/85jkd9Nlks1l+8qMfUa/VzkREADQPDFA+LWhisRhP//zn4PXi\n0OuJZzKMShJ169djy8nBWVg4MzqVyWRob2+ns6kJtVYLWi2HD4coL19MIpFg12uvkfSP097fgdVU\nRF15CSq7nY033sjExABr1uRy222/TSoQDAb5px/9Jx0dYbQaAxrS9A93IY10cuPC61CrtSSTISwW\nmbKacqipITAxQdfR42hkGZ0jly2f+xxbP/OZM3wdrhTmQojnO++8y3/8j39ksa0Mo95I+0A7xugI\n5aUuVm7disPhwB8O81p7O/V5eSx1u9Go1cSSSV49coSYIIAM5rx8VBqR9154gWUmEzUlJSQFgbBW\nS0N9Pc8cOEFB9SZ2725Cq10OZCkvL8Jmy8HvP0lFhZFvf/sO6urq6Ojo4FRjIwD1y5ZRW1t7hg+R\nz+ejp7ub3p4e+pqbsavVyICYm8sd999PcXHx7FzMj8lcaPfLhSzDihXwN38Dd9/9yfb96U9h376p\nXCRXOnM1tLcfuEFRlLQgCE8IgrBYUZSWS3rA/n4aG8eoqFg702lbrQ56eo7R0dHB4sWLycnJweHQ\nEw77sVp/OxQ/OemluNh+yYTIgX37OPrqqxTrdNRLErZ0mt+89hpWtRoxGkUZGeHU0BD2nBweWLoU\nbzTKRFsbC7Ra7AYDg/E4akGgRK2mZ3CQ6uJibnvkEQB0Oh3JZJKXf/lLygUB0WLhpcefJyQ7WbHu\nBg4ePkEiEWHbttXE4xlGRsbRavM4dOgIixYV43ZrqTstz8LlprOjg3xRnBEiMFUQzmUw0N7U9JFi\nZGRkBHU0iu2snCkLCgs5efQoW7ZtY/+ePQgjI1S7XBh1OsLhMN27dvH8/v1s27CBFlnmwGkjKQ0N\nDTPirLe3l337nkeWJbq7upjo66NUr6fYKJPMDjDUn0Dtz6Ep30RFhZbrrrv5DDsGBweJRE0YJC+5\nMT8GlYaeviGITpJOJ8mxOjAZLfgDo0T8k/QcPY5ocqNybSIry0hCHK8vjCzLc8bJ+koikUjw1BMv\n4jYXkZc7NfWXTqcoU+cghSOMeTw4HA5Mej0TnZ3cVFFBatp343hrKxNHj5NMKdgLauh4r4Xu2Ci1\nWigzGFCCQRZUVDAejfLm7r3odYVUV6+gt7eX/v5uzOZq+vu7KS7WUViox2bTUFJSgkqloqamBq1W\nSzwex2aznSFEDuzfz6HXXiOPqQ5eD+QuXMjGzZspKiqaj6KZY7zwAuh0n86/fO3aa8NvZNbEiKIo\n3tP+zQDZS33M3t4BtNq8c94eTSYnHR19LF68GEEQuOuuz/Dooy8Ti7kwm+1EIn5gjAcf/PwlsSuZ\nTHLorbdYU1rK5OQkg34/A+k0w6OjqHJyGAtmiCYsRCU1neOT+Ly7WFyQS3YyiFdUkwlGGVeBNDJC\nPJ0mHA6zwGqloKAAg8GAoij89J//mXqzGYfVys7DjdgM9dgEA76xSSTJjtVazcmTh9m06bMMD3fT\n39+H39/Dhg0r2bZt66wm0spmMqjO88avPo9D3/mQZfm8IwaCICBLEuFwmO2PPoo1EqGzqQlZq2V8\nZIQcWUbUaNBqNKwuKqJ1cJCdO3ZQVVuLSqXCZDLx9tv76O+foL+/i/f3HCE4GkMTT9Cvz5KRQ9iN\ndgJxH0PDXRRPCnzzH//tnKmf9vZe/CNDlGYzFNgLCYXD5BvyCcYDePpayctzISBgsdhp7GzFay1l\nw6Zl+H0ewj4PGoOZgwf7Wby49ZKPXl2NjIyMkM1o0KmnvuPJVJxEMgpqHeFQFCk71TUlUimikQhv\n7t2LQaUilsnQ3tlJeUxNWBEZiXgozrWTiI3TH46Qp9OijcfRWSzkOxz4mjsw1lRiNFq59dYH+M1v\nnmR09DiSlKWwsAGn08hNN60gJycHn8/HY489RyAgIAh6FCXE0qVlfP7zt+P3+zm0YwcrCwsZ8fsZ\n83rRa7V0HTjA+o0bzytEJiYmOLR3L33t7ZisVlZs2MDSZcuuyJG0K5F//Vf47nenpl0+KYsWwdAQ\nhEJwVsWKq4pZ9xkRBKEByFcU5dSlPpZer0OWz314ZbNpjMbfhshWVFTwne88yOHDx/F4Rqmvd7Jm\nzY2XxBcCppzm9JKEoigcbGxEjERwATbg1WE/MbmCKrWJpCKRlacKtgUne7GIAg5nOQMRH/3JOAui\nUey2HCSdjvHRUfx+PyUlJUxMTJAJBnGUlpKVJPpGQ+TnVIEAnWOjpNMK2axCNNrPxMQw5eX1lJfX\nMzi4lw0b1s968qSKqioa33yTKlk+o6MdiUa5YdGij9zf5XKR0umInjavnpUkujweajZs4NnHHsPi\n97OmoABfLMaBlhbSgQALXC7iwSB7DhzAftNNJKJRXvynf2JRbR2pbJbGoUkWrvg8NTXrGOkZoVjq\nIxlpxS4IBOMq9MbFVNndLHAIdPp8ZKQ8BgYGz8lMq9WKRMYGyCudck4UBAGTxsCk3kwwFiKRjGPU\nm4gkopyaDKASKnjhFz8iT05R5ixC0OsYS0TZsUOcFyOfArVaTY49D58vACPdxMeH0ScTDMfGUIQs\nS6e/Mx1DQwTGx9lQWEipxULv0BAdXh8ThiJErQmjYGIoECARj+CQs8SCQbJqNe+fOsVNK1YQySQp\ncS9Co9Gi0Wi5++6v0tfXRmvr+2zc6OaGG65jwYIFKIrCU0+9RCrlwu2emmpRFIUTJ05QVHSQnq5O\nOo8fZ4/XS54oUldQgKxSMRwOs+PFF/mDP/qjM87P5/Px9I9/jEtRWOFwkEil2L99Oz6vl22f/exl\nv97XGkePTomJz33K2Ae1GpYvh2PHYOvWi2vbXGJWxYggCHbgP4BzszsB3//+92f+3rJlC1u2bPmd\njldfX8vOnYdIJuPo9VPptjOZNKnUKA0NZ2Y2LCgo4PbbL8+NajAYSCkKPR4P2kgEs83Goc5OUokE\nyYwBgwrGUikKgbQgIGPCr2iQpThvjfQgiwIVgkggEsWXTLBgxQpuqKvjrZdf5ivf/jaCIJBKpxmf\nnER7VpK0QHCUUDJLOGxDEFS8//4+GhoCOBwFlJXZP1WNmY9DIBDg+PFGRkd9uFz5rFix9ILblpSU\nULVuHYcPHKDEbEYligyHQuQtXkx1dfVHHkur1XLTvfey88knsWWz9A0O0jcwgGI0slivRwwEWLF4\nMb7ublpHR1mk1eKRFXrHA+gsJoplmad37qS5pY1QTEM4WoRGb2LEL5HJHCAWm2CwZT+GdJJSETKZ\nCGm5GJMGgskkGiCpUlFYWMeePUdZvnzZGfY1NCwingzQ1t9GLB5EpdIQTqXJN1mZ0JloCvkxxCOM\nhMYw51rwDp6iJBHDrjYSGhihuLKKWqOFrqOHyGQy81VePyHFxcUUFVk5PmhivOMYC612SnLyOBKb\npMhiormpiZCisH3nTspzc+no6qJTrycdS2AV1fTGJzCLVuxSgmDGT34GEiozGSWXWHSSRGyCp44c\nIXdxA6JGpK+3BUmWCPs8DHcchdQoh9+IE/EOsunmmyksKsLrTc4IEZgSqIWFNfzqseexRUbIer0o\nvgBjkoqAP8TWJfUssVho3b+f2COPnDGdfPD99ylSFCqm85PotVpWGo0c2LePVevWzVd3vsT87Gfw\nrW9NiYpPy+rVcPjwvBi5JAiCoAaeAP5cUZTx821zuhi5GNjtdu69dyvPP/8OkpSDIIhAkDvvXH/R\nnb2SySTj4+M0N7dx8mQnkiSxcuVCNm26DovFgizLDAwMcOzYCWKxKDFRpL+5mbTPhzmZZIvLRefA\nAEpWYEwaJyrZSar0+KUEZnzYyOJAJECWsCxQIGoQFcgIAt6uLjobGxGrqwmFQjQ2trC7aYijqVEM\neogngyjKCMmMQiAaJS9/EZHIMKIoolZXcvDgQW64oYp77vnWRb0mHzA0NMTPf/4CslyAyWSjq8vD\nvn1NF9xeEARuueMOuurqaDt5krQksXHJEurr6z+2j0RdXR22b3+bf/27v0OfTnPnpk2UuFwca23l\nRFsbS2+7jf7BQUKBAPqkTDytJSBpKMx10doxyBHfEIasngVGC+nxXgYlNaKpgd7WvSQH9mJNBCnR\nG0iqNAgqPeGkjkA0SlajQa3WENFZ6Onx0d5+EptNy8KF9ZSVleF0OrHZbORYRNJdzZSb80jLUVLp\nSXwCTIpOkjEZozFOjk2kPKui19uO1eBCLaeRJImO1ibKFhRRUe3A4/F8pA/NtUAkEqGrq4tMJktZ\nWekZicLORq1W89BDn6P58H50eU66o2Gy2STLljWwYuUyjnd1cWxkhJV5eVzvdjPh8zEyPMyOsTGU\ndBaTIpJVvHTJWXKkBGnRSEY0I8gq9GoHitrCmJKgUFAYO/kKoZTAkN+HJZ3AYdCwsqKMWCBA46s7\nGGtpwb1+PaJ4btJASVLo7+jkS+sX8devH0SUS9GJVpLZCN0He7mpzk6F283AwAALFy6c2W+gs5Nl\nZ9W1UatUWBQFr9c7L0YuIek0PP88HD/+u33OmjXw619fHJvmKrM5MnIfsAr44fS85fcURTl4KQ+o\nKAp6vY6ysjyGhkaoqCji5pu/+KEd1SdFkiR27drDe++d4NixDiKRNA0NK6irW82hQ4N0dj7N1752\nPy+99Do7XnwLZcJHjiKTUCL0B73YgkE25eaSFARclZWMdQ1TkIKkGCehaFALASoVAZUAhYKaqAxN\nyGhUWvLVIqJaIZTN0tPcTDQa5Z//4Qec6kmzdOU9dLe0IaVSxJIifeP7CWe0ROM2YvEJBEEkN1cm\nLy9CVdVq1qxxX5LCXIqi8PLLb2Ew1JCbO5UdITe3AL9/9EP3EwSBmpqa3ylc0efzoYtEsektjI5M\noNVoKC8qoqelhYGxMVatX09jcztqkxmDLo0BHWq1EX8ghSqVpUbvoMg6lalIHh+hOfw+VSoj2fAk\nhUi4rAb8iTBpiwWtkkSfseNXBMzmIurq1+L1niQWU/GTnxxm2bJJcnP3cf31i5DTce5YvZoxi4W0\n30+eSoUurPDORIjl192FKFrpaz9Be2sLDk2IXCFBJOlF0ecSTMSZTCZRNElSk0Pws5+x7dZbWbps\n2SVztr7cJJNJTpxopLm5E51Oy+rVU0L0Qv4Ora1tbN/+JpJkQxDUKMpB1q+v4bbbbj4jguP0/UtL\nS1m1op4R/zBmtRqL3oGiVZEcH6e4oICUJGFVFBLpNPn5+XSMT1CoiOjUOcSVNKIsIUlJfEoKUc7F\noTcgq9SodFryzXrGUsNEugfIUVlQ5ZhxCWnq8ixMRKP4x7yEAhEyEgyndXQMvoyjbjlO5yK02t9O\nkfb3d1CZb+H1Y+2khCU4BAd6lRqD6MQb83LI5+f3rNZzfeIsFmLx+BkO4ABpRZkJa5/n0vD661M+\nHx9Rb/QjWbkSvve9i2PTXGU2HVifBp6+nMd8++1dvPNOKzk5FVitRXR3ewgGd/D1rz940Tru99/f\nxzvvdKLRVKIoAiUlLnp7O9Bq26ivX8nAQCPbtz/LazsOo4wOU51XgjPHgSRlkTOHCIfDiDk5JKJJ\nZAmsBpFQOo4CRNBSpKTRqBKYFAlZBhCxAKNSEpdWj06lJq4odI+NoddqGRIs6OVKOk80UrdyJbIk\nYY9WY/Ia8XqDhMM15Oe7sNlyEAQIBE7gcn2yZGKfhHA4zNhYhLKyM6dlPsjgerGPNTQ0hCAIuFwu\nfvX4MwS6JtAXLUCWJQ4f7sbtziEnN5eOvj5qSkqQdSaygg5BZaTevYTx8Qlichyr3oqg1yGIIvFY\njHy1AWvKg1ptQaU3ISezBENBbBYNCb2eNU4Hr3UGUOeUUVBSS3//MXy+Vhoa7kOSoKWlB4vFwp49\nT1DtSvHQ2tU4c3Px+f2ko1GGmjpZXr+QPFcpXY2dLHVVEPZ4CGbaWeMsYGR8nHgqi06dj1VvIJNU\nE1LraT/QSV4ySeP+/Tzw9a9f8W+9qVSKRx99muFhBbu9hGw2w+OP72LDhgHuuOOWc7aPRqP8+tdv\n4nAsn0neJ8sSe/cepqzMxdDQCAcPNgMC9fUVbNt2PQUFBSiKwoTPRzabRWcxkxUECs1mgmNjtKlU\nrKutpbioiM4jRyiUZXpGxnHrTQzFExhttYhqPZmIl5GkF4PFhKjVUVbkwqDT0T05xkQkyfKichRB\nh8acgyEYIjDuJZOM4fWHcZpzUUjTOzFCTvECQiEf/f0HsdurMRgsBINetNpxHAU2DjaN4C7eSMTn\nBwSQJeyGcsLZNKOZzMzIWCAQQFEUlm/YwN6nnybHZEI9PZI4PDEBublEo1Gam5unsylfminZa5kn\nn4QHH/zdP6eyEiYmrm4n1ll3YL1cBAIBdu9uwu1ej0o1ddoWSy6Dgy0cP36S668/N6/IJyWTybBn\nz0mKi1fS0zOAWm1CpdJgs9XQ3X2c6uoG1GoTTz/6Y4z+KC4BenwjHFckKkrrsOhyGRcGOT7oJUdr\nQlSryRjtCEkPsXSCeHaCAtLkIBJGYRAFPSCh0CODOZ3ClM3gE0UCBgOfq67muD+FwypiUanpaW3j\n+s98BlEUmPA3UVlZxciIjN3umHmbEoQChoZaqa+//ne+Hqfj9Xo5dvAgfZ2ddLZ1YTYvPMOR82Ln\nGzh86BD7duzApijIikK7z0+/X48zpwCDfkp4GgwWBgZ6KawsBYuFQ14vEYOBvf4Y+WojoeF+4iqR\nSYOZFVYj8XiWQDxOOBojJWVRIyGrAhh0FgJZSKQjONNG/J4Q6nCYXKuGnolGhOQ4OQYVBgrpOdVL\nLC0hijHWrFlJOi1y4NhzBJpbWFheCoJAWhBQNFZktZZoNIZOymLQ5VCY56K3p4XNRbl0BoMEYioK\njQK+RBzRrKdh0RYy2SSKpGCPRtm/Zw+3XuHVChsbmxgeVigv/614tdnyOXjwIKtXL6ew8EwR29PT\nQyaTc0YWYVFUoVLl8Wf/7/cwZATMFgdmp5tMJkpPzzP84R9+iWQySdDjYTwYJDM+jlGtplVRUFmt\niGVlJNRqXMXFSJJE89GjhDJpbIBoyUWnU5DkNDarCRM6wlYj2bREWIozHIvTm5WwW/LRa4wkZAW9\nzkBEo0NKycSiUWS9BRIxJJWaXL0Oz0gfzqpFLFvm5Pjh9/GOjFOzsJKvfe0LPPerXxGKREiLg8TT\ncWKygigasdmKmUymuP6226aiw37xC0IjIwiAIS+PvIYG9re1YRUEUrJMWK1GiUbZ9+STqIG3geVb\nt7J569b5CJuLRDIJO3fCj3/8u3+WSjU1wtLSAht+90fVnOSaESMejwewzQiRD7DbS9i9+yBdXYP0\n9XnQakUqK4toaFhMVVUVRqPxYx8jkUiQzQpotXr0eh2S5AdApdIiyxrS6SRdrfspzqTRav5/9t48\nyLKzPPP8ne3u+5b7nllZqk2q0lJSgTYkIQzIBowHA27sxh3ydHvcETi6e6L7jwm7Y9rj9vTg8LTD\nHtvCAgMyq8CAVJJAa2mrfc2qzMp9v3n35Zx79nPmjyxKSAgMDskIrCfiRuS9ee53MuOc833v977P\n+zwBas0KUdvC6LQ4X1zBkhXauESTXWQlGRfQLRPVdvilZAJfFjlfLlNyXZrABBAAmsCwKLIBNHwf\nNxhkuFBgpVZDLNepF+voSggz3o+q3owsC9i2xshIH8GgzszMZXw/jCAIGMYme/fGGX+Ny+hPina7\nzYXz5ylvbJDt7mbvvn00m02+/jd/Q78ssyseZ5o2zz76MLfc/f6ri8nW1uI/6Xyvh/X1dV7+h3/g\nYF8fwStkzqXVMp2qSz0RotSqongSHVWl0m7RTEv8H3/wB3Q6HU5eWiAaSZEIR9F0Fd2X8aMVgmGd\noAyXl5dQbA8HCV0R6U4EUGWPhuAhui7L7TZtQaBXEAgrCjsiIqFQlkBCoVG3sBsl2qZBOg1nXvo2\nQcmFRh3ZbhCSIBqNcnFtjZcrDUh1M+GFCNkBfD9DMhFkSfZ5rtFAUIIU5QJ6NI0lWBzad4hULEu7\n06DaLHFgYoTHnnmGUk1jYWGDWCzMO9+5n4MHb/q50iK5eHGBVOrVNgSiKAFpVldXfygYcV0XQXj1\n/2dZFidffAlno8qdN24zACv1MmvtOn07b+Do0ZPEYiHMzU0+vGcPxXqdaq3GgO9zsdnEXltjWhBY\nOH+eQr6HGSvEkhtBE1xGYj0M5XsRBOjYBsVQBymbxw3kuFAt4nsaqlFEFPKcW1tk9/guupM5VkQR\nU2vhOg5dpk7b6HDGdchGEniBIMVTz/LN9eP82u23kx++lrPz8/yXf/NvqDebWI1lQCEf6MeRFUzF\nxZSbjAzmaTQa/F//+T9zXSbDofFxRFGk0mwyMzfHr95/P6ZpAvDIQw+xN5+/KgLouC7Hvvtd+gYH\nfyJS+Nv4x/Hcc7B3L7xR1e59++DcubeDkZ97qKrK7OxZLl26TDAYZGxsgoGBHZTLa5w/P8OhQzuo\n13NcurTIo4/OMTp6lh07uvn4x9//Ey/M0WiUUEigWq0QiYQRBA3D0JBlCVl2MU2D9tYlfvmGa/nW\nd18goas09TaDrssQPoZpccKx0fKDVINhHNtkzVDpCYaR9Q6FaIRaOMyipjEMOFdcPdueR9b3iYgi\nwXSaGU0jK0nckMngJxKcnS+jSDLH187x+ON/j6ZpFAoyly4do1DYi21rtNsNfN9Flle4557fudpC\nu7S0xNGjp6nX24yN9XPTTddfNe76Qei6zveeeIIv/+VfEgd2jI5SS6U4+fTTEAgwEYnQfSUT8qFb\nb+Irz5zgxWe+zg2H7sbzVAqFV3ZjrVaLVqtFKpX6JxkTXjhzhpwgXHW1BUhEQsiWiUYP3zz3HCG9\nTjoSQsVlj5ugWq1y6exZ3rt7nLNzmzQ0i0wiQ9RpUkn5PLlWQWwb5D2TuGBSFkRSkTRbgk+6WWEn\n4IWDqLZFJBJBSSbp1TTslE/dKLO+JiLYNrLfS0BoExOiRDoadb/GdekkWTvIzOoqng84Hr2egKYJ\nNI8/xXo4TLk+RrneIdd9kI12ibq1TDyXpLdvL512m0xmm8vSMVtkEyG++L2n+cbzl0gmlsn3DHHD\nwf18+9vnKRarfOhDPz+y8aFQANs2X+c3DuVyma994QvUymV6h4e58dAhBgYGgOdwXefqxmNjfR2z\ntspornB1159PZGjXNjF0jfn5NfoLUbLBIAv1JtMVE92K0a5vEfZURicn+cgdd/B///03eWp6g5AU\nR45ey6bapNPYxNAtUsk4y3qJkQOT3P2hD/Lk499jbnGOgNqkIMO661APp1goLlOy2sxWGqxaEr1S\nEMdzAdgnisxaOmOZbkS9wbAbpFKr0VJVVs+fZ6hYJG1Z7Myneam+iCFLSFKWZFRmqXiCcGyC//qH\nD5GqLqHmk2wsrnPbbTeTSyaptNssLy5y6+23MzU1RdyyXqVGLEsSw4kE544ffzsYeYNw+DD80g9X\nEv/J2LcPrgjy/kLiX0QwUqvVeOKJo5TLIqnUOLYtcOLENM1m/Ur55BbK5RZnz67Q07OTQmEXzeZJ\nQqFJHnroEf7jf7x/20L85RPMzCyTTMY4dGg/k68xrTIMA8tSeeKJvyce34HjmNTrp4AWO3bk6XSm\n2HPNMLFclm8SXyQAACAASURBVJbTotZuMOLaxGSZju8RliV2IbDRrDJ0zc2cXbxAFogZHWKRMNlC\ngYGtLRqmCb5PQxDYmcngCwKb7TaRWIzkzp1MmCYFyyJ9hZx2zaDN2fkVBLVNq1LjXe/5GH193fzD\nP/wtJ09+nX373kcms50VKRT2cOTIeQ4dOsTU1CW+/vUjxGLDhMN9HDmyyYkTX+D++3/9VeRW0zT5\nsz/+Y05/5Sv0WhbhUIilapXK2BhduRzPTk1x6Nd//erxiWiU33rPO/nW2bPcfHOK8fEDjI2N8alP\n3c83vvEdTp6cRRQj+H6HW27Zzb333vUT7+YXFxc5/PDDhObnWY3HSRUK9PT3kwyJzK9P41fDREM3\nEoh51IwSfbkKv3pgP0989asYhsEdQ0PsHBxktVSiqXWotSRm1+NE49fjuW1ajk5HKrM/oCK4cFZX\n6ZEVVNdg1/g4bqOB5Dgc39ggCjQsi9FehaXKFqlogoZbJa6kMAybqGQg28v0hQcQfZtIPM50tcFk\neoCo77Poymg6VLQt5qstdo0dYKR/BFXvRzQncMQNhvaOUVmroXZ0HLeDIlc4OdPghaNTpIMT9ISz\n2OU23/vWd7nvIx/k5Mk5br21jKIozF6+jG1ZDA4P09fX95ZMz99ww17Onj1MJtON63p0Oh08z6ZW\nucjFpy8ymU4zGY1SPn+eL505w4fvv5877tjHk08eIx4fRJYDLMwdZyBnkiaEYXYIBsMICOA4XJ46\nSj6/m3Ykj5Iv8N1j6wTFJFVVpdkOYwsOXR5sVCpkEhPkGxqDhQL5XJYzc4usbIrM+2vIuLz3Nz7A\nv/3df8sXv/hl7KUq+3r3gqri2Boho8KmIrPkQWm+iUQ3ffE8KcmkYy8z4TToCoSwDJ358ho7M1mo\nqXznkUcJiwLjjoNgWZiWxU0jIyTCTc53NhH9Mo4jsiObREpcj+C5RBob1NY2MSqb1Irr/NKvvJ9E\nKESttN20aFkWr9f8HQwEaL7t/PyG4fDhbc7IG4Vrr4W//2dlWf7z4hciGLFtm7W1NWBb4Oq1RnYv\nvHAU3+/hnnuu4eWXz2GaAQQhx4kTR0gkPNbWGszNTWGaSer1Rbq70wSDEXzfwzRjnD59mmeeOYVp\n5shkJtjaUnnwwSd473vL3HbbK4Z6X/vat2k0ohQKKWZnXwJkQiGHj33sLg4dOkihUODTf/iHHHv2\nWQqGQcm12PB8BMsGwWfJE9nwYxhNn+LLT7EvHkYOhHC1JglFRm008DyPlCwj+T5hQSDousRCIZRI\nhK2uLu694w6eXVhA3thgrlSiVi6jtlqopkM2VqAVDrCyskGlUiOZnCSbNQkG14hGE0SjaeLxDOXy\nBlNTU3znO0fo7j5AsbjMuXOncF2XSETm8OEn+Y3f+F+AbXXTxx97jLnHHqPbshiNxUCWObaxweXV\nVa7buZPm/DzffOopfunWW68y+iVRJJ/Lcdtt7yTxA26zJ06UGRh4B6Io4boOR46cJRx+gTvvvO0f\nvQ/W19f5h898hslEgnIohFRr8MKpC2w6Di1foG4nCXmzJGKDgE8kpBMMSMTDYYLtNk3LwrAsoqEQ\nIz09OK7L3z3+Ejg5Ygr0dMfxVRVByKIL84yLBpfbbbryGTKJfiKizHy5RthxCeETk0Rcw6C4topo\nd9gXT9MJmzy/cZGAI9Iblgh5BsVakd6AgIeAhMRRrcNlXUBHJCKFiQojhAIh1rc2adQuIYVjJHv3\nkIh14Xuz2GKbsxsbDKdD5GMyz7x0DsV16aWBXFPRBYGQ0sXR51/i5lt38eLzz7N46hQZ30cWRU46\nDiM33cR7f/mX33Iy4mNjY9x11x4+9+CXKK60UHwHmyqFhMeBu+4iecXMMRYOE6pWeeaxx/jYJz/J\nyMggp05dQNfb3Hr7ILOPnENdmqG9uowST6IpIUpby8QTUWIbcU5cOs+xi0uk8xPMrzbR2nkkv4uO\nr/L4S3MkQjF8vxfPqDC3Os2xWQfXEwgGRW7ZvYuxvZP8/n/9AzY2Njhx5Ay7usaYPXcRyQUIs6la\nLLUvUve7EBkASSVRyNMVybNV92n6bQq+het75HqGcQSBjZZGtVFjNBYiHA4j+D6GaVLXNJKiSFBw\nGMsmqPlxpnWXntQIK5cPE+zUGYukwTcRqnVe+t73CA0Pc+311wPbc+QR38d7rYhgvc6OW275GVzl\nXzwsLkK9vi1W9kZh7144f37b5+Yt9pi+Ifi5D0bm5ub48pcPo+vbi1wwaPLhD9/DNddcc/WY6ell\nMpndhEIR3v3ud1Iul1lcXKbRiDA9fY5otBtJChCNZlGUIBsbFbLZ6pW2OpmjR0+h61kgzIULsyiK\nTE/PCN/97nEOHLiOWCxGuVzm5MkZ5uc1JGmcPXtuxDCarK6+zOGvPEysUeXc5cvItRqS7SC7ElkE\ncqLAmu9R80OU3X7ChPBRMRsGp9UmCirdnkuq0yHhOGi+z5brIgsCKc/nfMsg2rao+xZdO3dS7XQ4\neOed1GZmUKemSEYiTPT1MbNe5sxKh7WlJsGUhKYJzM6uEIl0MTAwyPz8HLOzKr4fwLabSFKNcHiA\n1dWXmZ+vEY0OEI0mKZc3+Pznv8lAf4ELL79Mp93m8COP0LW2htnpsC5JLPs+mVCILlEkJwhc19VF\nZ2WFY+fPc9uVCXGhWKR3cvJVgQhAf/+uK5wAkCSZvr7dPP/8SW677R0/NjuytrbGn/7J/0t9dpGx\n3jzzqsb6zAK+rpL1PTw/QLcQpOKvMDIxTCwUJRUfodZeodxs4vs+8Z4evvn44wzGYuR7ekjk81Sa\nNqF0DqNcxnI86ppOJqigCTLBVJiA7yNlM2yUK0iVJrZlgi9giBJ6x6TsuAgEKSLy8OVNImHokiSC\nrkraELAEkZlmkUo4jKBbnLMUfHcclzyykMDwymj2FPnAIGElQbnhkDUKXC5dpONVUdfyfOCde1mQ\n41TrdVbKNnJHY2cgTkQOE1Ii2J7DrLFOcV1A07o4/fQp7hofJ3JFWdfzPI69/DIzk5Ovem7eChAE\ngUQ8wt6cxz2DBYKBAEFlgscef5yly5evLrAA3ZkM04uLOI7D+Pg44+PjWJbFX/yP/4HpOOwcGcSo\nNpgvrbNSK9Ff6OLWWw7QKhZJ1+s011ZY8CWi8l4CYQnd6BALF9Asi0eefQ5LmWCt3AKhh1BwiHQ8\nQ0Pd4PkL0xRGtnWKFhaWcd0A5xcuoasu2XCKqlFjrqUTpIc4A0ik6bg6FzZLDO2epLdnhHppE93V\nsJQYJVtmpgWqKmGQpGyqiIk462oHV/R4bHGR3mAQU1LY0ktsUkbNTuC6JlnPxYmkqdoGScFD1A3m\nL11iq1TCTSZpViq870MfYuKWWzj+wguMZjIossxatYqZzZJMpTh+/DiJRILR0dG3BfT+iTh8GN7z\nnjc2aEint19LS9vdNb9o+LkORprNJl/4wiMkk3vJ57d5DLqu8tBDj/Pv/32OfH67jp5IRGi1tlVX\nA4EAzWabUslGlpNks9fS6Tjouocsr5JOT+B5TTzPuMLIr7O1pbG8bNNoCIRCSVzXZnHxEvm8SrFY\nZHx8HE3TWFraRFGuJRbbbo3tdDSCegSzZbGnq4u5M2cYCQQ4rRr44Th2s0rbsegAm8TpJYJKiWEg\nLYWp2j5twWVLlqmZJoKuowEaECbIppwlKiu0HAddlOgrNcm02wxubrJYr9Pe3OTW4WFkUWSlPs0a\ncfozw3SaZTIje+jpGWVm5llOnChSLPooyiAgoOtFZmfbWNZzrK15KMpuqtUykrRJNpthbVXjs//P\np/nobbcip1J8Y24O03VJhkLgeZiGQcJx0KNRWp0OQ5OTCKLIS1NTRFMpXFkm2NvLh19HH/m1BONA\nIIRleRiG8SPbr2dnZ/nc5x7h8ozLaGwfq6UOF9ccNNPgYCCE4HugJBgMDTFXX2VxbYW7brwLAN93\nMC2L6WKRfk3Dcl1OXriAcvYsWjxOLTLINfuv49GHH8XxJKKhLGvtEqZd5mjHoJXLca5SIdVoEvJB\n8EXmbYMt1waCBMigksIjju3E0NorJEId4orCSadDnyTxznicimUxbRm0vAHiwX4kgkjE8fw4BjXq\n+hqauRfXS1HV6jhOEheJy4ubfNlscsdggQFRpJxK0ZJFugMK6+1NAulhFFEm4/tsGUU0bZXJRPBq\nIAIgiiLDqRQXTpx4ywUjvu9z7OmnOTg6elXKv6VpJGMxyqur6Lt2XdXKMG0bORB4VdA6NzdHVNP4\n4L33cvTMGVSg2mkzIKbZf/B6NhfmCbZaKIZBj+BR82RqHZ1gIIznezRbFRTJp215NOwF8AaIBMbw\nbCiWi0SDOqrXxZnZVUzT5MEHv8jTL50noPtEBEipJVpWkRh9RBBoYBMRAsQIsuGbHJmbY9dAASkc\n5HS1ypoUx1bTIPSi+iayJLFpn+Vkc51xQSFuO8iCzZrjsyFJNJI5Gr6AqTWp15boEkUSuUlWy7Os\nGusEjDYTI8McGB3lrvFxpqenefSb3+SDH/kIU8PDnD92DEPXGbz9di5OL/HQQ88BcaBDLvcsv/Vb\nv/Z2y+8/AYcPw8c//saP+33eyNvByFsMU1OXcN0M0egrhMpwOIYgdHHu3BR33XUHAO94xwE+//ln\niMVSuK7H3NwaoVAC3/eIRvdgWW02NuZoNC7jecskEklisSzLy8d417v28thjz1IqCfT0vOJc63kp\nFhYeQdd1YFvdtVarMjj4in9No7RCQlQgkqPW2nZVzSeTmLWzYHoMhZOU1BoV30EgShWVPB5BIUzb\ntokiEfRFDM9CBkaBJLAMnKYb2StgSSGGx4ZJRGMsr55iZyzGjmgUzfdZ9zymTJP5zTLThoxGhJVy\nhYBp0zu0i2DQQ1GKXL4cpKvrVxCEAJal0t9/CMNY4eLFKWKxe0ilBvE8l1LxDMXZ48QDJmWpzdPi\n83imSciVKTs+TsejInmYvo/l+6zrOplUin3XX080GqWRTrPrvvvo6+tjaGjodUsCpqkTDL4ixKRp\nTZLJ0I/savJ9n+9852nS6T309ik4tTrpWA7LKSD5y4wn0zQsg1Q8w1a7RW8kxVR7i+VqBcE1sN0i\nC0aCiCwTqtUYA4ShIRq6zkqziZuWOXPmBMn8CJsbS0StDrZdIynYdDJZhrNZOisrNC2bLSlEHQi7\nAFEqxHFJkWQQiQCikAFfZtG4gODpxBSRsOOw0Wggx+Mk8/1kKlmaXgffFwEbXwCEHJa3QUhIYng1\nIl6IOEFCgQIhUaa4usJmuMw1o6NUOh2isSCe7ZJWfDrmGr4foNbcQOjK0OlIHJs6TcR2uWbnDoQr\n10ASxauGcG8lOI6DoarEfqANPBGNkioUqC4vY1nWVTPI6Y0N9t1556vuq2a9TkwUySYSvPe221B1\nnedPniRcLrO1vo69tkZfJEIyEKAQFDldrhCW+0lGYjgimJ0Wut3AdXV2yD6LnollLSIIYUTRxvc9\nHDfCsZePcfPNd1NZF1G8ARwphmmXqNsOKj4DBIgh0qKO5WeRCKMQZsts0147z2iPRyccZb0eIRga\nwBcCiEqMeDhFvV1D9tfIRiKoHZem55EKdRPKZOnefRuKH+apMy9SrpxAMn2CLuQiYTRERvJD+IEA\n8StiaDv7+3lhaopWq8XevXvZu3cvAN/61qNUKlGGhl6Z47a2lnj44Uf57d9+E1bVX2AYBjz7LHzu\nc2/82Hv3brf3fvCDb/zYP2v8XAcj7baGLP+wgmAwGKHZVK++3717N/fcU+aZZ15CVQUajVl6enq4\n/vq7eOmlS/T07CKfH2Nz80n27NnHzMxFEoktRkcnGBsbJhh8Ac9r4fveFQl50PUaoZCMc2UCTyQS\nTEz0sbo6SzY7gqIE6ahNohhksjEioRB9fX1cnpsDy0QSgliuTVKJEHRt2q6JA0SQMH0dBw8LgQgu\nvUAGaAC6IFDxZWJCCFcUCcoK+UwWtVqlEO2irZlEQiGuGxtjcWoKzRJIF25hwtHpWB4L5UW2aiUu\nXVK59tq9yPI1XL5soygu0CSsVEBrUGnUEEUZSeqgqhVqpUtE2uvkRQHb0lHrFfxyiBdnFukKD9Ay\nOyw5DbrFOCWKCHhEs1nuue8+AoEADVUlOzDAoUOHfiwvYW3tNF1du4jFUrRaNSqVi3z843f9SHJl\nu92mWtUZHEwzND7B2SPPEQooJCJxipUgdVNHDEcoJLOYTom1eoNoppfIQBTbbPKrH/gtevv6ePKB\nB7DLZYaumCEOAiO5HH919jS6FSai9CLHJFa2ZohJHUYLOyg1l8lKNQ4WClRaLeRoN8fmzlMlQpYY\nVaKI7KCKi4tD0F8mjQ2kyfgNIpZNt6KgAm4gQE8sxFrDR04E8UM5Wi0bfBe3VUWhheMcRfYEguIw\nrh8ERDxE4kKWlXIZa8glGYsxPD6KurJOp94mFoaGWUPJJ/jgR/93CoV+jm8WefHF85SLm1yzexe5\nfJ7Vep3r77rrp30E33QoikKmu5tqq0X2B0p6N+zdy9+VSlyoVEg0m7R8n949e3jn7be/6vvZfJ7v\nVSqsrK1td5Hl8xQKBc7Pz+NoGn2KQiwUwvN9IskEuXaDprFCx/ARbI18zKfeatLrOwwmCpiqj+c4\nNEQVSe6i4ZQICDW6RZv67DpRZTem2EETBDzBxxOT6J6AhkeeIH0EWWeBDjEsDDIU6Q7IUNEQxBAh\nKY4ixRFDSeq1Ks32FkG/Q1wIIoYztEyBcDhJd7wHVZHQtRZd/X2Mjk6S6LVoF5eQnS3kgIiw6hLT\ndZZrNTYMg7VKhYn+ftqCwMrKCrZtk8vlcF2Xkydn6Ol5rT/XEIuLz1Ov19/OjvwUOHIE9uyB1/hh\nviHYswe+9a03fty3An6W3jQ9wCPANUDU933vtcc4jsPW1haSJNHV1fVDC9LQUB/PPjsHjLzq806n\nzOjoDT94Lu6++05uvPEACwsLuG6Fa655H7KsMDbWYHZ2mUAgSjqdo902cByN0dG7WFkJ8kd/9ACt\n1jrZbA+12gkEIQWYRCIWe/bseVVN9ZOf/DX++q+fQFXXaLcdEpkgIafD2ECCRCSCCzyzsICoaeCo\nzDouTURyuDhUgRA2Pll8bHyKGGi4WMA62wtk2PfpxUHyK7QcH0WDc1NTDKVShOJh4tHt9Hs+lSLV\n3c3Tp1a48+CtCHaNs1NLOF6ORLJAu73O5qZKNOoQVARk2cBvnadfhHggyppWB6NBS5qhqjWJqGVy\nooJvWQhscm1Y58LURcJiBEOErniK9U6ARqgbVVAoS2X27t2L6/usVyosqCrv+cQn/lGC5Ec/ehtP\nPXWU5eUmXV1pfvM33/0jSweqqnL61CnmZi5im930DfRzzcGDXDx9GickU3E0pgyBnak8teoGolaH\nkMvNd1/H//q7H2BsbAxBELhw4QIbxSIHXuNOvNJokDVNBnuGKPRNMHN5mkHdxZXi5GJJHE0gpOts\ntNv4nsdW6QJ5y6KFh4NHBxsPDxGBEC2SeCgImEg0bJMoHhuOg+X7qKZJJmJgm03KJYV0bw+yX4FW\nkbQ3xw5BRcNCJYbrORTpQ7Ez4KmkgiEM22euWmXnjTcix+MkgkHCzSZDO3Zw/NISIxO3MDg4Sa1W\npK42UMubONVlasVN2tEo+++7jz1XdslvNbzjnns4/OCD7AKyiQSqrjNbqfDb/+E/MDI+jqqqZDKZ\n17V1qJbLzE1P02sYDORyWMvLXNA0WskkWr2Oo6qsaxoVzyNbKHBvt8OT67P4io8iilhOhQmliuxB\nMhzFa63Q9DwEL0rNdXB8HcsvE45KNDyPiBiiaTrYfpGUt05KEPAwKVMjSoEECilC1GgRZ4UubLr0\nBDHPp6NY6M4yTb0H1exD8QxCSOC5IJhonSrhaATFc3Ask81WE9ePUim2aYRtdqa7ueX9n+DYsWMY\nZ8+Si0YpaRqS7+OsrLCytEQzHmfN85hbWmL/vn0EMxnufP/7cV3/Klfr+xAEAUF4ZcP1Nn4yvNEt\nvT+IvXvhv/23N2fsnzV+lpmRGvAu4Bs/6oA/+ZP/j05HAlxyuSAf+cj7XzXhjI+PMzx8jOXl83R1\njSIIIltbi/T0iK+7gCWTSfbv388992zy8ssXGRjYze7du4hE5jl16ikGBoK026u8733/Cl1XOXHi\nOI6TZGNDRhQXOXBgP319wwSDYaLRBPX6WUZGXgmEDh26mVqtwbFjs0AGTRNZv7zI7uHdzKysUFtY\nYCIW52zZIBIK4ugGZVekjEmSOhksEohEkVBw6cLhJFBCph8FG1Ax2CtAyDeZo0lCSNDsdFiVRQ6O\nphjufkXKfcfoKC+ueiy22zQ1lQougUw/gUAWXV+gWrqAKG6RU3IszD9DXqvRCCSpuBpGZ40UKqrm\nYDvLiITRHBlZbDCqNIk4Mnlg1bPQFZ+2pTPRdw2JcJaFSoBQIcnE+9/PhU6H3NAQH3znOxkaGvpH\nb4rrrruW6667Ftd1fyxhdWtri69+5jMkdJ3hgMmll7/L8uwQe2+8EUVRSIc93n3DJOrlixyfP0M8\nGCSSiBPsH4Bqlc//5V/SMUy2mh7dvaMcXWqiyzrv3jmOLIq4nsfFYpGJfB4hJFAtFgmbPuFwEsvU\nKLa2COAQtnxmGi3CrksCARAx8HAQ6UNmiRVidGPhEyaKgoBLHYiwisqA7yMBIwh4epukr1H1LlLZ\nKJMJSQTlGhOiRm8gRqvZ5jI+YVKEWcf02gQkhYZj0lZVAmWF0sUGoZCJHIsR7O/H6e8nJOTZf+O2\nEuvMiSe4dWASo3uYhZWTpEZHkQWBkZ07CbzGu+SfE77vUy6XAcjn86/aeExOTiJ88pM8/8QTnFtZ\nIZJIcOMHP8gNN92EIAh4nve6Cr6apvGFP/9zRkQRyzS5cOkSYiRCdmCAkYMHea7VYrZWo0uSKIRC\nbK2uU9R1cuEAw4MuttmhVWowkYix0GhxrjrDgNMhTZMSYVp+AAEXwYmzZvRT8VZo6yZRKULKqzLq\nK0QR6Edgg00W0AmSwMYhSZNePFJIdDsCniSTkgO4gs954wKGHCEZTGJaTRA1Or6DYrcRwi4dCy43\nDJYlGaml4XlrDAdMetoBohsb1OfmOLhrF8FAgKOPPophmtwcjVIyDMr1OoVAgHilQsp16fV9Hnvo\nIfL5PqrVDXK5VwxDVbVBIiG9KR5Vv8g4fBg+//k3Z+zJye1OHdOE1zSN/txDeKNluH/qP0AQngbu\nem1mRBAE/9OffopodDs1W69v4boLfOpTv/0qcyfDMHjppaMcPz6F53lcf/0uDh06+GO9Zmzb5okn\nnuLlly8CARTF4a67bqJY3OSzn30W0/RZXl6gp+cd9PVN0G7X0bQlyuU5CoUs6XSOnh6J++//MHv2\n7P6h8efn53niiadZWSkRDodQUDnz3HMMuy5zi0WaJZ+0JG/rWPgGDnF05jiIhwaIQBQwgMuEqJBg\nDIUwIjoqORoEgWUhxJbSRTqRZ0tocP/73slt111LOBzGcV2en5tjwQgzOfkejhw5Squ1PWmbpo7v\nX6YXk14ZphsrnD63wKAbREYggEmEBgEcFpFQ5R7yrknS7zAQkEiFFGwB1l2TGV8kNXIdttKD1QYB\nkBMG/+d//9+4++67f9p74SeWhf/iAw8Q29qiP5/HsCweO3aO6ZU2Gx2BXMBmZ3+UXV1JXnz6adRi\nkUXXZe+ePYyOjdFeXaXYVtGjk4RC/aiiQnagj2e/80WuzXfY152j4XkUdR2xXKYvm2VheYtW3SEg\nS6hWFSUcwDc1Bn2RVU3H8R10oImHgYhJEB2ZFiDQS4AM3Yg4NBHZII1GCIt1fDJAAWgDJtsZsGkE\nEoEw6XCcmCmgyBKWYbPpqNQIY6KgBUcRPI+Gs0osPkx39yCC66JEFMb3JvmLv/gjAoEAf/zHf0Wh\ncJBGo8TykW8wGk9jWSbxuM5tt91Cu9NhxvP4nd///Z/qer1REASBv/r0pzGrVQQgkM3ySx/+8BUB\ns1fDcRwkSUIQBFRV5Xvfe5bTp2fwPJ+9e8e4557br5YUvvaVr/C9P/sz7hkevhq0zCwtsdHpMG3b\nxF2XUVEkGY2yvLKO68tctC1S+T5unjiAIjcptitcOnMGwbZRVJUxBCQEjuIjEEZCwaOXtgBLvoCF\nQkLw2CGYpDwXjxoZNJAUTrgaJiAjE0JAQGAXEhEEPFx8WQIlzAVdZYY8spjD82oEadNNh4zgYMsi\nmhChToFg7np8R0DR1xjN1Ll9zxB3v//9fOvJJxHrdbr6+1mbmaGyvEzBdVlptwnKMqlQCC0cploo\n8Huf+ARrlQryrl1MTRdx3QKJRA5Na+A4G3ziE+9900TQfprn/ecFS0tw8CBsbr557be7d8NDD23r\njvy84co1f92a+1uaM/L9QAQgne5iebnI7Ows+/btu/p5KBTizjtv5847b3+9IV4XiqLwvvfdy7ve\ndRuappFIJGg0GjzwwNfQtDzhcARJClCpuDjOLKlUBsfxiEYHcZwy6XQUSfKp1xv4vs/6+jrtdptk\nMsn581P86Z/+HbquMDw8zvBwPx2zio3I+vIqVsNFcUUW9ToBQEbAIEgHGRuLPOABKlAnSIgEEhJR\nfGIIBImi0kHDRZCDhKMyjrdFJupzeWWZucsz9A8NkRod5cb77mOipfGZBz7D7Nnz9IW7cR2L+foy\noihSsjq80CkTdAyi+EhIdCOSxSYBuEAJlw2nRF4KIHgWirhdX3ckhVokQG8yT9eeWzh0x4fpdDps\nbS0wPAzvete73pib4HWgqirlpSUmryxWoUCAX3nH9dy8q86ff/3r7MkNYpXqPHlyik6zxTWpLD2K\nSAQ4/eKL3D05yYvzJfpGc3Snc1RbTXBcbrzzA5x86UtEukUmhgZYW1/Hr1QIOQ59ERFaNbyOQdH3\n6I4kkYGnVI0cIh0gjUcXChI5bCRqGLQwcVlEpk0HnSgOI3jkAQUZBRuD7QB0kO0HMoaEhguWieAB\nUhjXHIqDRgAAIABJREFUaKF4MgoCQcGh4Wu45iUUySEnhkl5ZWRd4fprb0UQRObXVvn61x8mIoDd\nWOaly/O0OxL1i+doKHEEocOtt+67qjXh2fabdr2+j06nw3PPvcjx41P4/vbG4bbbtnkKg7ZN/oq1\naaXZ5OG//Vv+9ac+9UOt37K8PWXZts3f/u2XqFQi9PS8A0EQuHRpieXlL/G7v/ubhMNhpk+eJBmN\nXs2yaJpGbbPIesumJIcIpbqZ6ZRJt7fwUfA8l6QkYmZ6OKU1qJVX6XQqNJwovtFkHyJtPDbxcZEY\nJ4yBzCYiSZL0oLGIgeVvofodTFzCbFs32K7FfkBHRieAT4ctAERkwMOj4gikfYkAIiG2kD2bCHHi\n9LMd1qyDYFF2exgZvRbZ92nVavSEorQ7Ooubm9i2zejICGc3Ngg3m0SjUZxMBq/RIBQOMxQM4gPR\nUIhmp8O5y5cZ7u9Hdxx+7/f+FSdPnmF5eZPJyQw33viRH5Lbfxs/HocPw733vrk6IHv2bJNYfx6D\nkR+Ht3Qw8u1v/9XVn3fsuJ5oNEmr1b76mWmaTE9Ps7CwRjIZY9++3eRyudcb6nURDoevZlleeOEY\nudxeisUSnuciigrRaBeNxjKGUSESydLb200kkuaGG+7GcWy+/e1nOXHiPJWKiyhGmZ4+xcrKBsnk\nzfT1DVGpbDA//xhDQ4O8fHKGdydiWKJFy66TwcfDZRkfSCMTYAMLHTAJYxDCQmIdhyQJ6rRx8Whj\nYwKKEMCLFxiMTrLWbhOJw6/dcw+WbXNsfp7YwABPPv08jz12grXFFo7ms2GexHdrBHBI4RO6sisP\nILAMWLiM4qKwvTjWAQXI4pCIpGh0LOpygHXfoybBfe97HwSDmF0ZVlePIIpw003j3Hvvu34kN8R1\nXaampjh16tK2o+j+nezZs+fqIvOT4PXIrIIg0JVOo5suWlNG9mNkwh6CGWS1VcePGAyKItW6xsNT\nWyzWJdRVlUZnmfG+Lk6fu0Aw2Y8fGGGq5LOolunOh5BzOWZWVhgOh6kgsGLbKIJA3bKwPJ8SISqE\n8ZHo0KELBQcVD5MgkEOmSYAUHTJY6Fi0kRDwUfAZAFaAEBADLAB8OkANl6zjojsNBgIyuutTRkD2\nRTooZFAZdAUiYoooAqXaZV54YYmh/p2U2zWe/twCH33ve3jXQB+fe/GrnJ6tEiREOmQSicY4dmyG\n2UsXERJxdn3gA1iW9aaVamzb5rOf/TIbGzLd3TcgCCIvv7zA7OyXgG2O0/eRSybJtFpMXbjALYcO\nve54ly9fZmvLZ2joFRXknp4xVlY6nD9/gQMH9hOUZdxcjk1VpScWY35pibImo4kR4ukxUqEcltLN\nzOrz3JBI0pPrRlSbTC9ewHFMzI5O0w5gB7qwPIdLuJhEcUkQwuUiOnl8fIJ4fpAwHRRsXHR8LApX\nrqvOdsawCQi4SHTw2PaVmsejlwAGIg4+bVdjAw+BNApZfOJUMRExsP0YEbdKDJXV5YvcNnEt1USc\nlOMgWlHKpQWajQY7h4Z46cIFyqKI2OmwoGnETZPRbBZUFZPtzc6u3l6WlpdJpFIMDwyQyWS45543\nbxPxLwGHD8NHP/rmnuP74me/aHirBCOvm7a5777fedX75eXjdHdvE1M1TePBB7/MxoZLNFrANNd5\n+unTfPzj72Hnzp2vN9yPxdLSBv39k3hekDNnpjGMdQQhh64bhMM2uVwfpdIFEgmd5557jEIhx6VL\n87Rau9i16yZKpS2q1RzNpkUgUKdUCjIzfY52e4OpqRl0NcERu8iQbzPiS/jIaHgMEGSVTXbioyEx\nS4gcSTx8NpFo041ECx0JiyZ9iIBP03fZsgzaXotYOs9E/wDTK5tMDnSzsNzg60cepFR1cK0xFDuD\n7izQRZlBXOJsT0YuEAQUfHrY7taZArJXfqexTQ3eEHy6AmHWfCiGgwgC3HX7IZLZLJk9e/jQRz+K\nZVlIkvRjFzPP8/jqV7/JmTNl0ult/siXv3yM8+cv87GP/eo/eo08z2N5eXnbayQQYGF9nbH+flzX\nxTAMFre28OM9mIEwflMnKMsk4gm2yiqbukbKdChZBcb9QWLhCp4dYGGpzcb6OoYSIWrbiIbK/rHr\n8XyPIy99k515gRsnJvjKqQsImkM/IQzXpdVUiUhhdot5HM+hik2DND5lJrDIINBCYQ2LawgioaAQ\nJkcLHyjiM45OHoEKPpuABDjAIh5Btks3OiYNoGZ51FFwKKATw8QlSg2fJppXR9BaDEoBJM8lpbfR\nagsI2SGy8TgL8/PIjQ539AxSDKfYWl8gsrmGYqmsizZeKoruOPiWxb/+d//uTQlIZmdnWVuzGR5+\nJas5MLCT5eUzr3t8PBikUan8yPGmp+corm/QLleJprvo7Z8gGAwTiWRZXS1y8KBC18AAQUliamaG\nUq3G6WIVlRTtcI7J/p3UNzZIiRE8L4GgKNgInFyfZ4fn4lkGNT9LkgiOvo6AS4AAOgHq6PhINBCp\noRFFJUwUnw4KJcax2Qmk2Q5A1oAutgP8DBISLuBjIzBNHJ0gUSQ8dOp4BPEQUXBJEKCbCAJVpulC\nJe+6WBjo7iZnF3V2DN7AZkWlo5cY8Q2+8tnP4oWitLp7uPnD7+Pi0aMIsszW4iLdokjdtlFlmXwq\nxXAmwzOlEvVQiF++7ro37mL/C4Vpbrf0Pvjgm3uePXvggQfe3HP8LPCz7KaRgceAa4HHBUH4L77v\nH/vBYzY35ykUhvA8l83NWYaGIlcJoy++eJRiUXmVtbiu9/G1rz3Bf/pPoz/1hJrLpVhdbTE5uYNC\nocDRow7Ly3NEIhGy2SSzs8/iuk1Cof1cuLBMs3mcen2VVstnaamFqtqsrNRx3QCN2jkUf556u4Hr\nFPC9CIJfoOI0KEgqVQQcZGwUfDp0YeDhUUJhk24abJdvWkSwSNPCI0udnJBClwzCgkBIUBiIZtiQ\nPW7efwCwWS/P8/ixM1xYNCg1BHyvC4FNXC4yQp0DeCSBBNuBxgwQYXvCDAMttgMR8crnw8AsoPs+\ny55Kf083ZNNcd+P1TO6/ll0HDrB7924kSXoVj+dHYXFxkbNni4yMHLya3UinC1y8eIy5ubkf+13D\nMPib//k/KV28SCEcxnccHjh2jKzvE3BdpFCIWjhMJrcHP9vLSuMEwXYNJRhkPRxG910ulDQMMUPJ\ndckGI5TVy0QDI6xVVJSojW2c4vodA/i+ycWl81TaGi+WS8yvl9BbGvs8jxywIHgUfAHVjdPBI4hA\nDhmVGiY55nFZoUMCjwQCIYJsLyEi4JADBGx8fIoIyMDmlVcQ2AWkEGjgUyBAAJ8VRNIUWCOIRZw4\nm2SRCRFGoE3MDyD7CmkEVraWGZeh1Wzy1W88RnV1mVq1RSAYYnWrSiKUp+KV6FLixMMC16TDREyT\nC488wrl3vIMbbrzxp3p2fhKsrGwQCv0wETISef1MZt0wuK6//+r7TqfD/Pw8lmXheR7HnziMML9B\nKjtIa32OE5dPce2tH8AwWuRy2+W7W++9l4f/+q+ZHB/HcRy+e3kDU+8iFM5SKRYptlqsGgYCQcqu\nyom1BhHLpFcUWPMVXCJ4tCkg00HEQ2EAjQQuAj5JRDbxSLJFgzJbQDc243h0s/1chdgOSorA8Dbl\nGRHwMPBwiREhTS9zFEmhMEgWkSYCCZrU2EQGJAoYJFCQcBHxiZGi0+lwZOYoCAFkymiGhNTQMIMO\nflMg//I6+/ffzAd+4zf4sz/8Q1ZrNaK5HL2xGEng2MYG+QMH+PX77/+hctjb+Olx5Ajs2vXGufT+\nKLydGXmD4fv/P3vvGiTZXZ55/s795Ml71v1eXVVdfVXrjiQkBEJIICGQYgBBLBh7jYNl7PnAbDDr\nnYmYtSO8M4aYWBze8G54wONw2NwMY7BZtUCiJYSEpL6p1Wr1vaq7quuelVl5P5nn/t8PJyUuuiAx\ntADB0x+6uiozT3X+85zz/N/3eZ5XBMCrKhz37DE5ceIJVFXhlluu4O1v/1Ek+DPPnGZg4CfZfCKR\nolw2WV1d/QmXy2vBLbdczxe+8G1SqRz5fI4777yPM2cOUq/PUShELC9vsmPH+7hw4VkUZYZsdjfl\n8mEWFyMymXmiKE+zWYL2BsKbpyMZ+FyLShoZFx2JFP1EYYOQNkkkXNp0iCd2ngICNJLkcFHpEKIj\n8KjToEUvLcCkoCi0Ig1PT5K3dJxGkefOHefC+iKRd4FSS0ZmpNtssZBx0OgwSoRJXBqGuCXQB2wR\nk486ce7iIvEuzgROA88DhZ07+f0//mP27t3Ljp07f65JugBzcwuY5kst2pY1yLlzF1/xee12mz//\nkz+h8uST7MnlqHY6rBWLXG1ZHF5fZ9fQEB1d55Y9e3jg0DkMfYK9d36MQz84QFZRuXI6RxDOMbda\nYXb2JiobG4hqichfYqk5TyAEpquSsfKsb0V8/+ADJH2dnghaUZKFss80LgVFw4sCBCEJJBwkQBA3\npAQGKQymUHBpobDFBnk2iQCfAiEVPDwcBH73vXYRVInFypPEJX0FKCKRQiOQdPpEghouOhZ5ynRo\nkiNExkEQogIJPOywhiNZpGWFlusS1XSqzZBqaROn2aHRAF8xsZ0qgxgM60lU3WYklWKj3aZgGBx+\n/PHLQkZyuTSet/aS77tunAc0v7bG5EDsBFssFvHyeXbt3g3EgvAHvvQl0p6HKgQPHz7MVQMDWAUL\nRZaZzA9gNSqcfOYA49sHufLKuwGwWy2qnQ6P/uAJypUaZRdqTYspEbDp28iRTLUDodLk+jtu58KD\nD1KQIhIRRFis0yAe0GDRQUJDJiQki0MZjQgVGY9LCHRCWjjMEicqbxG3ODUgB6yjUAF6iGcQNdCp\n4SJo4uKSISKPgSDER0YnJI1GkVVU0hQIkIgISJMniUNEHxorQkWIJjo72cLGkKrkwxC16XDw0f0o\nyr0UV+axazXK5TJTqRSSEFTzeWbuvpuP/+EfvuwU7mq1Sq1WI5vNUrgcgRlvQuzfD3ffffmPs20b\nbG1BowFvJg75q9KmeVl88IPv5wMfEK+oEXg5JXYcTPb6p49u27aN++9/O/v3P065rBFFHvv29fLB\nD/4Zp0+f5tIlhcXFZ+l0UhiGArQwDB0hCpTLK6SSNrnOWXKBhoKFJ9qUmadCC0tWMYWLh0kLmSEC\nehB4xBWK54AhZFQS1FFRSFPCZYuAtF5ABHU8YREpJqUooCM0NEZZW7MpuxKXtk7Qx0VMPPbQi0mZ\nJiFFNpDpAUwU2rzwbvnEpOSFtkAbKAGzwCaxVgTi26yeK/A3f//3XP8LuEGZpk4Yei/5fhB4JBIv\nvSC+gMcOHGDj2We5dXSUpK6zNT/PsBA0ymX2ZDLsGh7mTKnGNx89TSbXz8Gnvk+95XDz7e9j/txJ\nzl06zA3XjKKOjtHTt48H//EsbvEUEyJkJpKxRZnQl7lQ3MJdvcSUKJAghSJLSGqORW+FCiFRCJYs\nIaIID0GEh4WKR0SESoSCS4SNDySQGKROhRQRGjISFhIhZVrYxOQv2/07JPa6K8TE0EbCRMIUIdBG\nIsKnRQEPFZMMCSp4jNDBIyaUPgEl12ZKTeIhEwpB6dxRsh2bPk3hhN8iJbI4UYQfBdT8KlcPZdBk\nmSgIUIUgfI3uhvX1dZ49fJhKscjQxARXX3/9q960du/exUMPHaTZrJJOx24X224Qf/LAuuIKfnj8\nOJIkMX3FFXzkzjsxTRPHcdj/5S9zRSZDNplktVxGqTc5t1IiOzSEqjbpdFLISDQ2Fvmf/uQvyefz\nsaPtH/4BZWGNG8avx9qV4wfHnuRY/Rx+SWdCTxBIAkn1cUWDwwcPErougSzjRxFNPBR6sFBRkNEI\nUBEIcgQEQJqLqERkCNFRUFBZoMYGg8RVxgxx/9kGWggUDCpEaERY6KSRUYAa6yg4JDCQ8QjwEQQI\nHBL4hAh8AhL4JEmhABlCIjx0MrgUCLEpsMpIaCKLgKZXwfVaHHr4H5k1fD6yexfBxATHVldZDkMm\nslmuuPHGlxAR3/f5zre/zcVjx0jKMnYUMb5vH++9776XDCD9LX4S+/fD1752+Y8jy3EF5uRJeAVJ\n1a8lfqXJCECn00GWZcyfCqS67ro9PProBSYmfhTUZNt1EgmfkZGRn36Z14Srr76KvXv3UC6XMQyD\nQqFAtVrl0KFjlMtLeF6VdHonfX0pstkRbHuTRqMIWNjlZ9muJIm8i2j4qET00SSiRL+UwySkLoUU\nhcMAsZCtTnzjTwNyd1ckWMVnjBQmDQSm5ZNU0ljaNpTQQfLq6J6C74UUvRo+CXpQEcAEoGMgodCD\nTBaXOk18Emx1f+50j+cQx8rbQBkYIy4lF4ERVDQtSdXKMrzvJpaXN/hFbJZ3797JgQPH8LyJ7hBC\n8H0P399gz563vexzgiDg7NGjFJLJF/M/2q0W29JpyuUyimWxUKmx1siS0wtsH9sB2gKdzipPPjnH\nfffdwTv/j//M7OwsFy9e5DOf+Rwby6e4QZWIAoEXlhiQ2oSd2MacIoNJIpYTShIhEj0kWEJmg4Cp\nSKaFIAtYNHBQAJkqClvoFFgiD+jUqOLQokMLhRTlbhUswkZiCsF09/9oAvPEN64y4CORRKB1i/ot\nwEBBpkQbjQygd6szm4S0gfPEu3ArDKk6Do6SZK5RZzDw6EiCtqLgSzIiWkTDohjWGUtbjGRHqXQ6\nZLJZzrou77nllp9egpfg3LlzfOfv/54RXUePIs6eOcOJp57iI5/61MsGj0GcUPx7v3cfX/vafpaW\nJCRJIpEI+J3feS9/9mf/lvd94AOE990H8BP5MgsLC1ieRzaZpO047H/6BOtV2JMco91Uyfcm6e83\n2L17B7lm88WK6KHHHiMfBFR9g3whrpsLx2EnDgnFRRJNMrpJjyK46AZcXFmBKKLoeV13kyBBng2q\n9KGioSARItDZBCIkYBgVBZc2Gh4KA9SpIeOgEp9nAE0kUqjYBPShkkXGxmcdlzQaMi4hAQoBJoIm\n0KCERg4PDQXBJjYTuGjkcIEUEUXipp+PQ5Zl9mKSwERRLPywzoXQw9m4RN/0OKnu9fOdMzOcrVTI\n9/Vx/vhx3nHbbT+xTo8/+iilZ57h5vFxZFlGCMHJEyd4JJHg7ve//2d+Nn5TMTcHrdYvdkrvq+EF\nR81vycgbhP/2377MwsImkiTYvXuSu+++/UUm/9a33sDc3CUWF5/BNHvwvDaKssXv/M49Lztp0nEc\nlpeXkWWZsbGxV9SUaJr24gW10WjwhS98hWo1RTK5Hc/bYmOjhqIsY1k6mYyGqlrUakvoYZmkrJLR\nLVQvwJFtFsMtRogYoDeOdhdtMvgsE9/4LeJWSQrIoFInQMEmYpHN7sVPcvJIWpIOBS6EK0hODeFq\nNHGoI5Apo+OTJyIJtKiRJEkbFwufIi1CTFxi/UeWmHAUiV0cCrGDYxFoYtC//V/hahnswGVkeIh7\n772Lo0ef5aab1l/xRvNa0d/fz733vo1vf/sJoih2T8hyjfe//yaGh4dfds1OnTrF4tISPdksq+Uy\nk/k8UvciKRSFShAQtQRZs5+2GyDLMpnsIHe/5z2srBzkxhv3MTIyEg+Dm5wkK2/RmyjTsjtkQo+J\nbvtqFcgi8AlR8BHI+CKgFTi0u/6IZUIahMjEVYw4sL9ICZU1chgI+lFJoCIRkKWJDUhkSVHHoMM5\nIvrRkBHUAfBpEleq6sQi4jaCGQRbQIs4pbcfWMKnjU+BCI+QMTxEd+2SxOJImZCTXkBHS2MqKdJh\ng0TkMa7KJA2D+aCDqTtshirrqsrRchlXlvEMg9133cUNN9zwqmtYqVT4wv/1eaxyhdNbJbKaimUY\nbLou/6Cq/G//8T++4nMnJib4zGf+F9bX1xFCMDQ09BMuqpcLuQuC4MWL1NmlVfxwkGQ+oNNpoeoJ\nenomKJUuMre6yvC11774GuX1dQpISFK8m4+EoNWp0OM5ZGWDjAiIgoj1loMLXDs8zOT4OIePHmXJ\ncQgI2aRNSJ4OTfoRBETUaeMjoyBIoNEmIkSihoNMCw+DIzhMEWfGlIE2MhIqGXzSSOjICEIG8Kng\nIRMLxWOCCwNIzOOzSJEOJi4VEoSsEmLj4KLiIfDJdVNPSvQjsFAJhUfoRyCH9CNYDnw0Q8dutei4\nLoqiENbrPP344/iFAlYqxa3vfjfj4+P4vs+Jp5/mxu75AnEFetfoKE8dOcJtd9zxmrRhv4l48MG4\nRfNzFOV/LrwZdSO/0mSkWEwzNrYDISLOn1+kWPw6f/RHv4emaSQSCf7gDz7K/Pw8i4srZDIj7Nnz\n/pftf5448Tzf+tYj+H4KSRIYRocPf/guCoUC5XKZVCrF8PDwS9o7R44co9XKMTExQ7vt8sgjh6lW\nt6hUxiiVygwPpxCijedFyF6afCQwZZVA2CTCDhoNksh40SYSHgot+gmxidsjk8SViTpqV1MCG7gM\nkMAkYgtwXR/Jq1BrnyCjGhTdWDuikCRFgz7krqgtJAu4uNQxSZBDISKkRi9VNGI9wipxuJYDZJAI\nSKJYY9hhAi2psm3bPvr6ZgGo1YosLi6RTObY2Nj4HyYjANdffy2zszMsLi4ihGBycpLcj9k6X8Da\n2hrf/Lu/I2HbpMplitUqZc/DjSKUVIrn1tcpJhKM9fSw3AjwohAjlaLcatG/bZKFhVMcPXqcTscn\nm32cqakChiZx7swZVEUQhh2yxOJcNxKkgCwRy4Rda67CKhHQh4egg8kK6+i0MIHB7to5QD8BLg1C\nBBIDOFgolMgRkURmkyp9SKSQsRHE8sUkFSI6xETE72aVjKPQwOUSIRZxCy1CIkKnQ4okbTxCAlI4\nRMg0aREyjkYaiRYyfWQoqRp54dMv6yhCJ/BtZE1lMpGmmEsxODhIcniYhusyNDrKHffcw53vfver\ntjiLxSKf//zfsHhqHam+xWjgYemCmalRtqdS7P/ud1n42MdeVa+lKAqjPyZM/VkYHR3lgBAEYcji\nRpWMNUZ6vIdnnvseecWns3yelY01gmqRa4xR5v/Pv+Bd73or/SMjdCoVosgBwPNc7PoW+UggCY9a\n4OF7cTXKlyQuVipMTE9jGwaO41DrNs9k+igxSI0SEQ4eLUZQaOJRxcXHAspYlJnBwMREpU0Tn3ks\nEvSjk6BJkUEkGiRooQEuJgIbh13EomWPuFLaQXTthYK9qARILGFQx6BMB4k8OhYBPjBPhgAJQYsW\nCgZC+EihIIo8bCVgdWWFXLNJUlWpNZucrdfZsCxoNPji5z7H3/3VX3Hfxz/One97H1IQoP/UZk5V\nFFTizcFvycjLY/9++NSn3rjjvRln1PxKk5EXooklSWFoaJpLl44xNzfH7q6wTVVVdu7c+apW3mKx\nyNe//ij9/ddimvHk12azyn/4D59nfHwcyxogitpMTmb5yEfuI51Ov/jc8+eXUBSTAwe+TbFYpd1O\nkc324DhFBgZS6HqLmRkJWR7hqYfPUYsE42Ya33cIJBCSjiWHdJQqsh8QiQAd6IUXg648VFyyNAjJ\nYiLjskqbEhoeIIlJEqxSiNYwQ5eACMEwCRrsQMMkQQOTGh0ahEiksEiSQKJKSJJeQEanThqFZjfc\nzMVinTzb+qbQE1lMZZBsIcvS0kl6eqaQZRXLyrKxUWZ62vqF9ouz2SxXvkpiTxiG/MtXvsKMqtI3\nMcG2XI6jjz+OWi5Ttixk0+RiGLJndpbBRILnn3qGcrvK9Mgo/Tt3omg+zz67QCazix073s7ayjm+\n9PkvYtWXGKhucaHToRxFbANSioIjBKvEbY4AjyIbBFjIzBIi4+KR6zZHmsyj4qITawIqxHqPEh4+\nLVQSCDxUmuiohARotMgjAxK9wAoBaWwietHI4FDHxWOUYUxk0qwiCHGR2EYOlSRbBJxHp8wgMmUm\nMGl3c0sUwJI0DFlhPexgSXnAJyVrNIRHghBZSGh+m3YoOOG0+d//+I/5xCc/+brWbf/+R9G0SULl\nGP1CMJztw/NdllbWmd0+xYiuc/LYsdctHn815PN5rrn9dg49/DCu71BrNVivlqhq/UTWKKdX1mi3\nFd5z8ztYW3PZ3Kzw0EP/D9dfP0becxBKm43iIrV6GzcMaSkKVuiiASPE2o6UEJi+z9d++BQDvoRP\nEphARyEhbdAS8bnYpkyKEUIMLCq4dGgT0McGu5Ax6RBi00FgoaMyTo5kNyHIBPoJaaLRg4mERxmV\nDRRCEsTtugTxpsEGBjEYJImGwMDnIgpbpIhYJ0BlGzY9uJwljQO08MnioCCIpDR1PUENFQMotlqM\nZLMs1uvM2TapTodp10VSFGqNBs9+5Sv4lQqOLFNrtcj9mEi91ekgJRK/ddy8AppNePpp+Kd/euOO\n+UJlRIg3rhpzufErTUZ+GqqaYXOzTJeLvCacOHEKRRl8kYgArK2VWVszmZoaZmxsLwCrq/N861sP\nct99d3HkyDHm5pZ4/vlTnDy5QW/vLXhem1zuGjQtTal0mr17p7nqqqvZ2DjBxz52G3+VUzj43/+F\ncqOKJSVoCxVbNhhQZXQridwqkvIDRogFpC+EXIXIbBHiYbJChwUi6hhINFBRCXiclAhIdLUBCTJY\nuCTxMNG7YjiJqDvrRCdCw6eDYBONNFkcQKLBIhJNegkTY6SzKQYGbmD79h4GB/t47rlL6HqaSmWB\nRmOdXG6MIPDQdRfLkpienn7pm3uZsL6+DrXai4mc2WyWG9/5Ts7PzXGwWOQDn/gEn7nuOqIoolKp\ncNsnXf75nx9DkobI54d46KF/AgbZt2+WMPSYO/w9RhyVph2xra+P4soKiSjiEpAPQxzialEEJPBx\n8GmRxOwGl+0gvmlVMXCw8HB5nriylSG25HYAQRKTBB4BESYBNh4CBRmBwOvueHMobKCQR0HFokYb\nCYUUEQbJrg7IZbV7mzKQyaDTg0ubHsrkkLGR0IhIMorGpqigiwQtEgQiwA4VlgKXfmHRFG1MQvxw\nykkqAAAgAElEQVRQwtUttg+PsnbiBOfPn2d2dvY1rYnjOFy4sM74+K2cSmUJ1xcA0DWDlt1gpVxm\nYts2GtXqz3il14+3v/OdjIyP8/ADD/Dst56kKXrYc8W7CcOISvU8eXOYp58+zPbt72ZgYA+2XWd5\neZW51jxmu0S7tMWFlXVsfJIEeN212yCuFOYAzfUxkWgSIdFLAZMqgi2hkiVCId5AWJzHJ42KisoG\nSST6kH7MdithAsdJomEhAQYym1iEaCiYqEQ4qPiYgEoViRoBFnTnT8WV02TX/+YREBKRRKOGBIRM\nojEmBTSEwh5sSig4gIIGqGyIDiV1lP5sBj9TZ7PTYaHVYrHd5kpZRgDbdZ20aXLBcVgvFsm029DX\nx8lSiVnfpyebpdpscq5S4db773/VWVG/yXjwQbj5ZvixfexlR9d4RrEIb5aQ3F8rMhIELXp6Xt8o\n63q9hWH8qLTYatU5ePAJwlCmVvtRqNLQ0DTPPfcw8/NfQIgh8vkRXHedixdPYpptfN9BVRP4vkcy\nmcW2BYaRQJZNfN+nZ3gbueFtOEsVHE/BREbGZi6o01uvkok8JOL+/ggggAtI9ACCDi10trCok0Gj\nxAguOm73AhNrAmLbp0AnIkuI2rV1GkCAjEBiHgMZixQGKiYNVGxMamSoKNegaCMk0iWmpvrp6dmG\n71dZWChSLFaJojqOU6dYfIIg2Ee9vsxb3zrCxz/+kTdUSR8EAT+d3+oLgauZCCPF0NgYCxcvcuLg\nQdqNBuOzs3z0o+9lcXGFM2fmMU2Xt7zlevr6+lhePo/WqLPVrOA4Nk1ZJifLlLvvqUcsNOwFnkKi\nRR8qbSwURjC6YVdgEmIiUIlj28Pu77VArPPIo3KWBAYtBlARyKwR0UIwgUSLsDuzRmYcgzUk1kh3\nQ/5dplHx6QBy949ORNS1D8etnB4kSoRdQ6+HikEDlS28mNgYeZSgwarfYCizl2JlHV/yUYVMTkmQ\ntFKcDwN2pnvYlcvxxEMPvWYyoigKsgxRFLL32ndxdOEkGbuODtQ6LfKFKTIDA4xeJtI6MzPDzKc/\nzWbN4cCBTTY3F3HdDlBlYGCMpaUWURSvimlaXDi/SkHVuGHfLkbekuah73yHp+fm6FcUlsJ4ErZK\nfC6WAYGEimAYuZuYalDAwMVhjZAefHoQjOIQ4pAgDqV7FhmFBDIhKhoyPkl0QgQdYoIbIBHQR4kK\nvYTIRCgErBLhdeXuSwT0E+tMikAdmQlMAtRuIy7EYp0EYOAjST6rkkQoQnahYAEryGyikJAK1GSV\nfO9uMlqNG6Z6WFhZoRGG6NUq40DJdXFsG1NV6VMUlsMQ1fcJHYf3feITPP3oo5xZXaV3cJA73/e+\nnytI8jcF3/gGfOhDb+wxJelH1ZHfkpE3ANXqJvl8f3ei5zK5nPeaL54vYGZmnGPHjtDbO8Lx40/z\n2GNPsb4eIURArbaOaabYvftaAJaX1xkbu5IdO+ITb3x8mtHRIouLZ7CskFptHsPIkUxazM9fJJs1\nkaQl1tZGCMNBvPwoCxdqDEgmjiwo+XG6YokiCgZ5PK7sBo/5QAbBHIIVFEBCxiDDFkPEI7sTxBfL\nqPt1D7BMmwZBN79AECLhdjMnIqCBSZY8FjIe4KDj4uHIs2BcQd9gFkVpsW3bJPPz5wCZ/v4pEolL\nrK9v4jh1NjZqTE9n+d3fvZMPfvBfvawg+HJiaGgIR9dpOw6WabKwvs53D1+k2jKwxnby2f/8Vaid\n43dvv4nC4CAbi4s8PDfHhz/1Ke666w4URSYIXvhoCxqdFpFTYVqEKK02sq/TT4bzQBGFfsokgDZJ\n8gxTZp1eAlq4BBi00QCFBhUs2rSJKyJJYpIQEpPMHnwiCpRxkBDU6KNOEwWbJhJpZNLACSS2SJFA\nRkYCJCo0usTHwwU8dBza9BEhoeACDQQSAXlqjCJjIdFEsEhABRstKiELl/60RbN1CilSWUVGk6oI\nLUmvkWREqLj1IhOjozyxsvKaI+A1TePKK2c4ceICo6OzbFx/J81LZ7DCiJGZIUZ37mTTNLnqmmt+\n0R+HFxEEAZubVRRFJ4p8wEVVFaJIQpYtoig+b1aWn6OzegRhynz3/1tlW38Pim0zJQQrYUgoSUhC\nYBKT0YAkLh4RARYqMuAg4eCj45NFoOJgEKAyRApBnQZzdOglwkewQcgAKiqgESLTRqdDlnhgZxON\nIgNssYhBh4CQFhoGJkZXoWIDTRSKxEQnViK5GLgkEXTwmUAgE4EQOAjGEOQRSEgkkTmDQb/RTyj7\npBIG2STYrkuj2WR4fJzq2hqu6+JJEglFwWm3cTUN3bIIhCCTy8XEb2bmsq3jmwm2Dd/7HvzX//qz\nH/uLxguOmjvueOOPfTnwK01GLGuNpaU5IGLbtj7uu+/+171D37VrFyMjx3jmmQN873tHsKzrSCTW\nUBQZSRrgm998kOefX0DTEqyvn+Gaa+558bm5XB+Fgko6Pc30dIqjRw9SqwmqFR81XOWJteNMjgR8\nrTxPeuhGZLUHzxhk2dFw/QpxmHcehR1k8GiwyhkusYgALFxCitgEpNnGICqCkAYGNmni8LEX0jlL\nxNbcLULarFEmjY6PjkSHFhE+y93kkDWqbJFFQxCyHgsglT50aZ1GY4Xh4VGEEEjSeXx/iPPnnyaK\nhlAUn1yunyCwOXduhcnJiTeciAAYhsFt997Lo//4jwwoCt89fI4gmsDsLbB7z7U8+8QTJJVJFjZK\n9OfzjPb1ERaLHHz8ce67/36uuGKKL/zVl0noBfJD29jymoyGDr7fpomFRg4FGMBllQKL3TkvggI1\nVBwK6GywxTIOU2iYRLRQaGOTZ4EN8sAMEBLHuceEUVDBR8MghSBNgE+DfiTqgIvBGgZt+hFksJHR\nWWEEhwQuBgGbrCMhU8IjS0RIHY8cGoIWMr00GKLebd6ohHgUaJIGDCVkJpXGSCbpVKuUvSplNISs\nMaga5DUTL6iSz/Xh+j5aIvG6ZgK9+923USx+g6WlZ+gdmWUlsKlX5umbmUbbs4eP3H77ywrIf1F4\n/PEnabVkLCtNT0+8YTh16hBra0uEYZFEYh8bG+epn/0O41GEaDaYzEKP73PK87i2v59vb2zQKwQX\ngZ2Ah4KQshRFoxsymGCEFCc4j06OXgwadDCoksKgCfhYJNGpUUPQYIQ2pW6FSgdaeJTJoGDT6uaK\nOMThbj4TuChIZDBokqFFjTVaFDGJ0EkgkeMCbZp4jHYN4VuE9BFfExzgJBGDAiLFwI4EhpCQZBiQ\nYMMvU1Y1pgs2b9s3ywOPHEButTDX1ylHEX4QkNE0ykGALknY6TS53l7qqsrbb731sq3fmxEPPgg3\n3nj5U1dfDldcAQcPvvHHvVz4lSYj/+bf/D61Wg1FUV63eOrSpUscPnycWq3J1NQIzzxzGDAxjA4z\nM0NsbdWo1SrYdi/Vap3+fgnL6uGpp45w5523o+s6up4gn09z8OAPse0xSqU2tdoidJaYyWQYTiYJ\nyzal6jOcOvQsgTVF4F6iz6+TJ8JHZxOJLQxUevAosIBLDykyyGwhqNGhwBYJ2ng0CWkwQUiKH0VJ\np4Gn4EXR5DoOSRyKyN2RWyYhQ9j047OCoWxgJFM4jk0U7cUwpjHNOtmsRCIxQKVyhsnJ67jzzo/w\nzW8eZGlJx7bb5HJjpNPDOI4NzPOFL/wTg4MDv1BB4mvFviuvpKe3l4f276d5vMLMrusYHR2jXq9j\nAr2ZIc5cOs0Nu+JK2WChwLH5eQ4fOsTJAwe4bTTJ8uICi08fodbaJBW2CSOBhwVCwiYmEhIgU6CE\nx7TcQ1NWuRjkudR1zZg43ZaIikuOUEqyLjrUaVMiwiIuq0sMMcwEKSI8Ito0yLOChIuKRLur//BJ\n04NNLA9sI+gQISEQ+AjStFlBxsJkkwCbNgputxYmsZsOGtDGp04Lh9iJUU+n2VUoYIUhzUoFQ5IY\nkSRkEVAJPLY6HlM5hchUGZ2d5dTqKte8972vONDwBXiex+HDRzh8+CRhGLFv3yw339yDbXfI569j\nZmYGTdN+rqDB14MoinjqqeNcddUdHDr0GJXKHMnkIBMT05w+/QCWVWdz8yRbl46zzzKxq1UCv0ml\nYVBp12nj4A70099xqdkttEhwRpZoBhGeFNEUBSw2aRDgEjJMi0Fq1FGQ8NGx6EdCIcQloATYqMgY\nLOORJ2AQOIPBKsPojJJGJkChgQ2sUKCHEstETJCmQRYZFZkk0wTodGhQI8Qkh8s4Jc5gYeMguJLY\n/hsCiizTiCLKqEhKgg0CzAg0IXCiOuu6y+w1V5PtiTh4YZ4dExMkczkKsoyVTnNmbY2W77MURdSD\nAFSVq/ft46p77uGaa6+9rOv4ZsMvo0XzAt5sM2p+qWREkqS/AK4FjgkhPv0yPyeff30aEYAjR57h\nW996EssaxzQHWF7e5Pz5VcbHr2N0NA5J0/VFGo15FKWDJNW55ZY7aLV28sQTR1hdXWNiYoKjR4+z\nuhpQKGisrc1RrbYxgy3emu0no+pU3TpZt8lQSqFf+JxafYZRr8Y0EKHRIoeFgsIlKgh8fAJmWcNm\nHRsJCYMkAo+ITfJErHbbOApxKyckdt1kuv/eAjw0mqRx8HEwCRlDJgmYaGYaWGLXrvexsnKJVquK\nJGm47hDN5hYTE70UCgU+9KF7MQyDf/mXw/T2juD7bZLJOOsjDF0KhSEg4sknn/mlkBGAkZERbrvj\nDhaWYWwsLht3Ojo+IBDIP3YDbHU6aIkET+3fz1tGRjA0jZ2Tkzz22EHWKk3qLR9ZJGh7OglFQw0F\nRVw6aF1JoIQcRSjCpo1PQIYOESl6ULBoxc0uNCEjY5LHp9AN1y8h4VEgQEZHwcCjhwxbpLuR/wqb\n5GihMYqgnz4EASYVCihcwsHAw8NCR0HHZRWNDClKtJglZIyQNaCCQo0UAo02bSZpA+DLMv25HGvL\ny6SJdSwoKm7gIysavgRPV+vo+Sz5gQF233orN/6MxKROp8NnP/sXHD++Tl/fGOPjUzzxxBr9/Qt8\n8pMfe0kQ4eVEEAQ4TkB/f5ZbbrmTxcWzrK4uYBgab3/7Xv7oj+7nG1//Zx756iYEESguWTlPSjKo\ndlzqkccDK2vc3DeGkxtmtbFFfxTREBErnTRDuRnOl1UWog00OuSIyCIhEbKGYJKQPhI4xOe2hss6\nAh0FH4NLBETIlDHwmcQnS0AHmQCJFC6D+MhkUAGVXkJ8QCKBhsBHoY8Uq7iksdGRSHat4Vb3miCI\nw9g6UUSSuAXXCVNkjAydsEkj7FA1Va69ci837dvDroEBHn3yScaAmmmSSSbZWSiwZ2KC7ywsQDbL\nO++4g3vuuYdt27aRTqfpdDqcPnWKzbU1evr72b137889/uHNjmoVHn4Y/vqvfznH37sXTp+GKIpT\nWX/d8csclHcNkBRC3CpJ0v8rSdJ1Qoijr/acIAg4efIkx4+fA+Caa3a9OKjtBbTbbb785QcwzV0o\nikU6nSeTKdDTs5Pz508yMrK3GyUPudwkmnaGt73tHQwMTNDbG7C0dJ7z5x+jXp/hzJkz9PfnkOVp\nDEPB6yyh2x4yCltOC88tMZvN4TsNTFUi4TcZJiSFSgufgAYWLUaJcChiYxBQQMOlQNS1fsr4aHhd\nFb+MYBOHNLGIc42YhMjEYskNEmwxjcc4ERniZAIFlG1I0ga6Ds3mRdbXV2g263Q6HYQ4hKYN0GpV\nWF6us29fH+l0mkwmw/R0hgMH5omiUcLQx3FaJJMyhhHS1zdMuVy7PB+A14jR0VEMw8G2GySTGXK5\nLHoux8LqGe64tg8APwg4XyoxfvPNlA4dwtA0gjDk0acO8/jzp/HbDWzPJo2KLZrUhYaDg0+SIWQi\nbEwqLOGzJYYwmQJ8fEo0SCJwUFEwcMngENJmlgCZkAoSAWlAZ4MO/RgYKFTxWUPHAEJ0Eli4QAKD\nNhI2ETIyCi4ZArYIGcfGQqVFSAKBA2iYdOggABuFIuOMkUVCxqVNkWWykk3Q6dBRVTRNww9Dqo5L\nTrdYl1wUzaQTBQxdcweT2yf43f/13/7MrI9Op8PnPvd/873vrTIwcCXFosPa2jNceeVuikWZU6dO\nc+21l08f8tPQdZ2Rkd4XdWQ7dlzNjh1X47odarVj7Nmzh/ndJ2nunebsD49hiT4agY3nd4gQ2GqS\nAJVnayUmp/cRmCmeq2yQj3ws0aZOld27r2Njc4mVzZPYBGQQpFFIEOLRYYuQqBsKb6NgEFDGx0Cn\nSY4sfd349iQWdfq6Wo4mAhcJCR8fgUJAiI6EjkMNmwAPFxkbjRQNNvDJUMClSZIULk1CDEAlpNEl\nL1UkQuGSihqYgUNbeHRCQW1ujkY+T3Z6migM2dbfz8rWFk4yyXylQtN16SBxz/338z///u+/2Pre\n2tri63/7tyTqdXKmyVnX5eCBA3zoE5/4hWQMvdnw1a/Ce94Dv6zRPZlM3B5aWIA30Ox42fDLrIzc\nADzc/foAcBPwimQkDEO++tVvcvp0jVxuDBB85StPcfXV83zoQ/chyzK2bfOXf/nXHDmyRjbbA6yQ\nz+vceOM13Hjj7czPf5bV1YPk8zMEgU2tdoqRkRSzs3G1JAh8crkcu3YNEYaCIJimseWxvHQWS0/h\nNOfxQpmtoERSVVF8j4VakYTqoyeTyJqC7kW08Gggk6SFTKo7DLyMTwKBRpIEDgkCfBJIdHARmBgI\nUiSoElJFJk3swGkRE5EWEk3GQZ8k8mLTYOwDWUKSLqCqIe32FqDQbPr4/gBhaAMlPG+ddNpg+/ab\nWFp6iL/5m/+OqlqoapK+vhrnz7dQVYmBgR5M00PTWmxubqEoDgcPHuaKK/aQTCYv00fhlaFpGh/+\n8F186UsPUq32oqomPYMakiSwVYlnlpexJYnr7r6bgaEhNg4dwvE8HnnqKQ499hi5toMu6XSEylIU\noWKjRDppEgwQssESPVLAHknmhLCpiQCTJglMfBScbqPEQiDhUafEKA5ZVNpELKNjk0CQpUVEgxYJ\nBBERgjY58tSJ2KLZfUwNlwwKw0jkuEQFiWXSRAwQ4uJ3nTOCTWSSaGygskmER4Y0OcpI3WD5BE2S\nNIRHMgg5uLDIqIjIqSqWKdPQU1w7OMlousDBdoMbb7sfzytx9uxZVlZWGRjoZ2Ji4mVbNYcPH2V+\n3qNQ2EcyGU/WDYI+Tpx4luuvv565uUtvKBkBuOuut/PFL/4zQeCRy/Vj23UqlfN84AM3o2kanSDg\n9JmzrDkBs0pAQgjaUUSVEEfuwZBtJLnFJZEjP3wL05MpLi08idw4S1+vQiBqoHWYTGmstTqcI4mM\nSkCDBAEaISHQQMJB70qck7gYZBlCx0TuzpMx0PCooWChENHHFr14bBJRwu9mmXiojKCSRkJjg3UM\nqkzRYZkODSy2k2WVKmkibCICwEGhRYQgj4vEVgQpI4MlZPbIJTZabR45eoLT5YhmQ2J9fZE9fSYD\nhsFCqLJYC3CS0yxekvn857/Ivfe+k3PnLvKNf/gqA60GN161i+GREcYUhY1KhYe+9S1+7w//8A1d\n618H/O3fwn/6T7/c3+EFR81vDBmRJOkWoCKEOC1J0juA64BnhRCP/A8cOwe8MKq1Dux5tQfPz89z\n+vQW27a95UcvkOvnuecOc/31i0xNTfGd7zxCsaiRzQ5QKMQth1qtyPPPn2HPnhne//47kOWI558/\nQirls3NnyI033kGxeInjxw+yuLiEridR1avY2rrIxqrDtuQwQ4kECauHWjLBZmWBFTnADwPsIIcZ\npoicCpm2Ta+qskgcMSaI0ACDJiXAQKcPm3VkdPZikqZKmzKraLiUSdHCBVQckiSRsamiE1BGECBT\nZZyB4XfSbIWEwiYIthCiH2gTBDqKMkEYVkgkbsK2A8LQRZIGCcMkcBDb3mJl5WFcV+fYsVUKhR5S\nKZ1q1cO256hUzrK5mWF2dgdCJKhUzjIychsPPHCG73//CH/wB/fT19eHEIJ6vY6maW8IQdm+fTuf\n/vTHOX36DPV6i8nJdzEz8ykqlQqO49DX10cymcTzPJxEgqeff55wdZWcGzCk5+KUBjXB0UaRYSx0\nXHxa2EhskwWmapAxMySaLQq4yASEeCSw0GnSoYpNEw2JMVxMLGxUTiBosJs0KZq4wCAB6a6apEFE\nggV0BEHXGFqmgkIOAwmfeMKIThUJhXXO0cAgYAgoIyEj2CDOL1mk3q2XAHRQEAjAJ8U5GkzKCvV2\nxKLfJhAhQ2aCiXSSjGHxfKuG3D+GoigcOvRDgkDCMLJE0TPMzGT56Ec/+BJh+PHj5+jrm6BSabz4\nPVU1gCzl8jLZ7BW80ZicnORf/+sP8oMfHGRx8Sj9/Xnuvffd7NixAyEEK/Pz9PiCTqaXpZZMJFxc\nSWZT5OhhiEJvnWIF0uEoQ0NX0m43kBhmqbLKYO8QhVwf5+ZKOG4crp9ApwIY5FmiSQ8tEnikiWjS\nponKEAY1esiQZgtwKNPDKjmSdAip0iYig4mLQx2FYRLksJGRGUTuJs1IJPDYRojNGjoWKj451glJ\nkuQSIWkUBCYddFrItHGYwsKS0mQ1g7q3jh9m8YOQTXsCe8nn6h27KW9s8ej8PImFp/HlKUKlQHYy\nT6EwQ6fT4N/9u89y9dV3QFtnIDfL88+vU602uP76axgsFLiwtES1Wv25WuZvVpw4EWd8vOtV585f\nfrzgqOmOdfq1xs8kI5Ik/TlwG6BIkvR94FZgP/AnkiRdI4T4Lz/nsevEUgiIxzK8pB/wp3/6py9+\nrWkJLOsnbb2SJKHrfczPLzI0NMRzz11gx46bWF19gHZ7C8vqIZvtY2VljmzW5777buPmm99Ko9FA\nURRKpRL//t//OXNzLUqlDqY5hQhanDx4AUOHM6eeYOi62ynk01SqDZIJk0mlyUIYUot2IzNAJGQC\nfFrRIm3vDDZgELKLiAwKNQQ5IkZxuYhKDxoSy/ho6PgEtFGREfTgoSMhAwk2qbOARMgIUEehhp6Y\nIhIhmtYL9BIEK3jeIkHQAkYIgiaaZiJEBt8PEaKCJHkoikwUZUgkMiwsLFMo3Ey7Pcry8hqrq88h\nSf0MDLyDwcEBlpef5+zZw0xN7eMDH/j/2XvTKMnO+szz99499siIjIys3LNKtVdpA0lICAFaMDaY\nAZsGY9y4jX3UH9z2Od3MdI/tM32m50N/mub0abeNZ3w4gz09TTdgMwYZkDCSEKBdVVpqU1VlZeWe\nsa837n7f+XBDAlkCZIFUYszzpSojbka8ed/MuM/9/5//83ycQiG5K67XN/jqV7/JbbfdyFe+cj/t\ntgtEHDu2yPvf/57Xva88MTHB29/+Uo3D9PQ0g8GAwWCApmmYpskvf/zj/MFv/zbz3R6oKv1ghFRT\n7Iz6lJFUFRVD6KQ1jSCGldjF1yW7+PiEKLpGOvBRx061FgYd+pg4FCiRx0OisYZHh2lSzAAaA3YI\nOEPyaxyhomORQ7CQTGywBcwyREdFJYWGRMXGwmEOQYBLlr2ss0UGxtlEIwQhNtNIuoywGSEpkmGS\nmJARfVz6tMMYxDLZXBGpBFz0LrG5s4q2u4av6hidHqfP/c/sO/YO9u//fv7MyspzfOc7D3PnnS8N\nTFNVlUqlxMWLu3ie86JXTxj6hKHNtdcee133+4dhbm6Oj3/8wy97vN1uI2ybmcUDeJs7nOm4WHIa\nkxSWAm6whT/0EMYewthmbfUJejuXGfWHZMxlnjz/HKMwjxlVCZFIdJr0SbGPDDp1LhFikGGIjmSb\nmJh9NGnh0aI+rlLtwWMZHZshaQzKdKnRJcMs22SpUMLCYJMRkgwqAocBISoQkaZExIgOETEuDuVx\n0tQ0babGYmebCgZC7BLKJnbQJpYajiwghIIvFPLWDLFS5NLOJnOVBS7tbhOgcnRpgfmZeSzD4OTD\njzA5v0C/nyWVKiAUBd8LUdU8ly7tsP+qDsWJCV5djvM/LvzZn8Fv/RZcaR+448f//2ML/2oqI/8D\ncDXJMEcNmJNS9oQQ/zvwGPBaycgjwD8HvgjcAfxff/+AHyQj9977LR55pPn3DyGOA0xTJwgCpFTQ\nNJ2bbnoHDz/8IO32LmDS653l4MG7uPHGJHo2n88TBAEXLlygUtlPKiXZ3JQ4/SGm7aOPPBYrU3Qz\ns6ycvY/ZvbcQCJe2fY6S6uEHZTTmAJ0hIRlCdAIyKGNnxySRVxJRRiFG0BtPS0gsyhSwgB4hXQxc\nNrEJCLEQRPh08fCRzJPk6SqErFJKWXQ66wjRJgjssZdGC5CYZpVMxsJ1O4xGgkQCGyNlijguoqp9\n4ljiuja+b9BsRnQ6HVz3KuLYY2PjMtXqNNdf/0ucODEik1lEVb8vUKxU5njmmXt4/vltpqauZWFh\ngjiOOXduhW73S9x99yd+7GTGTxOu63LfPfew8vTTWELgaRo33H47t9x6K9ffcguDp09heBbDrS3i\nyGA9UjGZ4XxsIfQh+zKS/ZpOY9jGLxdJaxqqqoDrkpOSKPQJgR4DirSIsMgyxMJHQ+ESEhuLEIjY\nZIIOBVxifFpkcdlDRAqNLho5TFwmcOhRZQT0KJJ48CZS1IgMJhnWaHEN2jjdRpJB0sMgR4SBwwoa\nPiU8ekjkePYjg84ecuZBwriBaWjk/RSzmkU2DCkIlV27TdsoE6yd5ukT93Pt9bcDsGfPfh555MmX\nkZEbbzzGX//1Sd72tqt54onnsG0tiTkIn+eTn/zUFdMQrK2tcerkSex+n8UDBzh85Aj5fB5VVTEt\ni2Ixx9H0DDu799IOu1ikiKSDIg022iYd4VJNu6jxLpW0QTBUSWkG9khBiZYwOMsMI0wS348umyhc\nheAAa1wmQx6THn0kGg4lNEwcXHYI8cmRZoiPQoY0MVksNEJqxMTswcFGJeYF0/kIOZ6v03CJkGMv\nVp8UGhoGNgU8/HEFDfJMEaJqBmGYYUSNJUWwGQTkAU0IPAlbzTMIfS+5Xpdes0MhU8DXRsfZCXkA\nACAASURBVFx94BCalgive+0WKyvrWFYB13XZadnEnYtMGlm6ToenHn+cg9ddR2Fm5udVkR9As5no\nRc6evdIrSSoj//7fX+lV/HTwasiIL6UMgVAIsSKl7AFIKR0hRPxa31hKeVII4QohHiJp+fxI8eqx\nY4f4+tf/kn4/IR2VSplcLkMc1zl8+E5yuRyVSoZ+v0WhMMmdd36AZnOLTqdGsXiMT37y4y9eLJ9+\n+hnuuefbPPnkCr2eJAi66PoBnNY2OTNFGEIYhGQyBQ4Vp9AnepT3T+L2YloDH5cMJiCIgRiLXRaI\nyCG4ajxuu0ySyBuPw8ZzSC4RkSKHjYpDzACJiyDAQbCDQ0xIlsSsOkOiB7kEdBEix3CYIo5XCENJ\n4nIRAzNAiOc9g6LsxfczKEqXODZI9Pc6Um4TxwOiKPEOdZwmcTxBt9skDA+jKB5x7LO2ViOOYxQl\nRxCEOI7zkorH1tY2hw/fTi6XfDApisLMzH7W1h5jfX2dpaWl1/rr8A/G17/yFfrPPsutc3MoioIX\nBJy45x7SmQzX3Hwzp2o11i/3CLNTrA/6IJbQpIYuBFpcYtXpohl1hkTEmsZ52yaIIlxvl9V4wGhs\nHG7QpURIDp9JppBo5PDZh0aTIQF9JukwyQSCHaBCDpUOdZocI8AgyzYGKXLoBAxJk6eFi88iiQC5\niSDDLm2qxONUYMEIgxImKWBrbAeeRsFhF9iHxCJEAjVCVmkPAyQuuX6HRV0gY4W5dI7qxARVx+ZR\nz2FP6ghnH7uPo8dvRdcNVFUbE3n5kvHc66+/josX1zh1aoWjR/fQ7bZQFIe77/4jjh9/41s0AI89\n+iiPfeUrzJomnVqNhz73Odqaxm3vfS93/PIvM7m8jNJs8eiDz1CMdXKqQTPsYZMnqx4gUq1xtXCW\njc3zaKbLqOfQD7bxZZYUNWaBNAoSnRQWKRx22RxrO9JoZPAYMUJlPzoVJnAZYCEADQ2VgCwCFZUI\nh4gQSYg69oUR+EgKqLhs4TOLhkWRgDYODjYZrkanTQkDkxJDdilhA1u0GeFi4IQe+rjZ1xCCORlj\notJHpTSmSFtxiCdVijkFvB473Sb3PvkkC1NzLO+poAuBO+rRaF7m298cMKmVsNND9NADJeD8pUs0\nymU+9Tu/c0X2+82Kz3wGfuVX3hzOp4cOwaVL4HnwBppkvy54NWTEE0KkpZQj4EXFmhCiSHI1fM14\npXHeH4ZarU6nU2NlZQUhykSRzZ49If/6X3+SqakpAD7wgTv47Gf/htFonlyuhBCCXC7kE5/48ItE\nZG1tjS984UGmp69jaiozfu1nOXPmfqpqlY4uCIIBphmRm1CIA4+nnn6aBUVhsl4nDALS2GNj7xQh\nIRZ9DHxyhGhj++4KyV2PMVbTu4CGpMMIkzxDXEZYKAzRKTCFyjYNQsokXqt5En/IaWAdKQW+v04y\n7DtLsnUvhCEkWZ+Oszs+vkOSvDHDC6kqcVwmCBpYVh7HaaOqIapaQMqAOLbHDpZZut0huu7g+31M\n8/uVkXp9jVTKpFSq4nke29s79Ps2hUKWMDTp9Xr/8F+A14hut8vaM89w68LCixdQU9c5Mj3NYw88\nwMfvvpvVc+fYffBRIi1NS6kw1Ay00COlmWiKgilKXHR2GZgK1xcKXN7ZoRKE+FKlhUaagCoxGgVc\nulSJGDEkS5EeQ3xGqDTwUMmTRZAIhyFAR6EINOkhmCAmRmWIwRQTCDpsopPDB5KoPYkkIqKBjmRA\nYZxbMkAhjU5AhM6ANAEuJss4GDCe1YEZJA6C/ePXeoQ4EPh00SIDRVEwVB3dabFba9KhzokTD3PD\nDbdRr69z/Pj+l/mEaJrGxz72q6ytrbGxsUkqdZiDBw+8JEjyjcRwOOSRr32NG2dnOfvcc2w89RRz\nUmLGMbtPPcXfNhrc8qEP8e2tLerBg0Siy1AVrEUGoZjHESaaopKzCphmm4bTY7XfIq24WHTxmSNF\nD4MMIR4CB0lEliwqQ0bYFAjp08VGI4NKmiIeQ2JiYAoDh5g2gioxLt5Y8rqOjkMFlwYeHhKdAxSo\ns4U39hWJCVDYImYKB4s8KiV8fCCiiE2HKgYhHhopPGwCtcVbp/eyNayjuBE1X0Wo84hIIYeBEdfw\ngwyrjV1SSpm9pWOEeshWq0e90wHFZoCNohjYLYesaWGl5nDyAVEY8bYbjuOVSky/Ga66bxK4LvzJ\nn8C3fhK15E8RpgnLy3DuHPyI7NGfCbwaMvJOKaULIKX8QfKhAb/5uqzq76Hf7/PlLz/IDTf8E669\n1qfV2gbAtmuk09/PnVleXuZ3f/ejPPLIU2xtrXLs2BQ33/yRl5SUH374KTKZJSwrw/z8DGtrz+E4\naRQlYhB1KFp5iHVq3R0mgxornS713oCAmChIxm2ztBmwwoh5JAoSF4UuWSIiBEWSCRgNEEh8EtWH\nSkyPy+g4xOQp42MRMmIGly1ypAgBjy2SlApJ0oaxx1/nSELldZJhX5VEerOHJFprc/w99fG7O0AJ\nWEBRBIoCQdAik9EJgnVUVSMMzyJEEUVZJo5DXLdDtaowMxPS7V5mNMrh+30KBZe77rqFU6fWuXCh\niecZ6HqK1dUtHOckv/Zr171Ou/9yDAYDUorysgtoIZNhsLZGNpvln/3u73J6tcbzp9ZQ1mPKcRbR\n3UWNY6IwRDEMbKEzoUT01tY4EkmCKCaQE5SJaWJjMY0/vlRNE6DRoY9HSJaQEiYODo0xEUkh0DAo\nAhEhiS14wJCYDgVCFNqk8DAwialhUwNyaIBFA5MOIwyGqASoKGTHQ6VDIItBHhdvnIyioRIDOjFt\nYiZRcVC0KlE4TYcNimg0wxEZ18EeujiKIB7V6AvJffc9xJkzT3HXXddyxx2vfOcrhGBpaekNrXj9\nMGxubpKPY4b9Pk8++CB7NY2MaYLv8/SJE/zC1BTnTpzg+tveyQOPr3G6+y1mNRPTDvADlVgq+HGP\nSmGaQsFiNdokRURGCOI4YsQKUECQR5JH0kdQQ+CgomIQ4BGQoU+AgTLOLQrpAwYClTQT1BiQQ8FA\nIaBPH50+y0g0VDRiLDwa9DFI0UNFYxIfFRUHE0EKixgVSW+c3J2E5AkC+oywURiRZUDZEuiWxWSU\npmqlGHRs/NAhqdC4yLiFiBdxA42rqhWyms7U/AJhaHP68gU80eQ3/tm/ZXd3jce++TeEVKm3O1y1\nZPDrH/glitksD21vX9mNf5Phv/wXuP56OPojxy3eWFx3HZw48Y+AjLxARF7h8SZJxtTrjtXVVeJ4\nAsOwMAyLTCbRvXY6E5w4cfYlZeM9e/bwK7/y/h/2UjSbXdLpxMRraqrCzEyOixc3yOePEfvP0nEe\nY6FSIRVH+IOAdqSTNdKURx6ZSCdNyBxDHmMdnyE+2jhafDh+B0mBpKbRB0boSNK4pOgTY2KRwyQY\nd4UnSdF9wTOAkBCNhFCskRCQNIluRIxfsQe0EOIFAlJCyhcs0g6Mj3GAHELkxtM2Q0wzh2FIguD8\nOOgv6T+bpo6ULqPR04ShII6H3HTT2/nAB34RXTcYDEZUq/s4fPgQ3W6XL3zhjwjDI0xOzhLHEb1e\nh3y+yNNPn+P61zGX5AdRKpUYCUEYRWg/oCBr9npMzc0hhMCyLH71w+/j/2z9DT27jtPTWFg+Srvb\nIAwHmDnBZEOh7EvywIxpsSY11EAnxKaI4CIxKiUc8lxkhGCHHCoWZRI7fxcPhwFNpsigM0QjJkan\ny4gIB4N1plgjTY6QmKT9FuDio7FECoHKNgcZchGVNiEWNbLkCTCp0aFPCosFHLYJ8QnpopEhZIRg\nhI5JhINEgdghQqfHkFk04sjmcquOrWh4ah7LLJGemCE3eS2+v0kqZVIsFt+QfftJoKoqoZScefZZ\n8nHMxHiKy5SSMtBYWSEyTS5sNen2CpQXP8T5C9/BC4pIKujaFBg6oWywu9vCUK+iGteZkioKBXQu\n0qSOT4DBNBJnPM5tAypp1sigkELisYtDFo9tDASC/PimY0CfEjUqGNSI6KOwZzz/ItGYoY8c++6e\n4jpi6mgoFOkhGAIqHVwUXHLYFAALny16pBmQAYooDNCx0Z0eF2vr7JkqEdgRFiDxkGqAFBFlZQ8o\n0FcLVCtVunFAF5VMfpqjN8xRb16gWp2nVJqis/osC6qGkFnK+YByPs96rcbyzwPyXkQcw6c/DX/8\nx1d6JS/FjTfC448ngtqfZbyp7eBfQBzHwMvtpoVQxs+9euzdO8vjj9fJZPIIIThwYB8bGz06nRUW\nF29HlSP89gbrq9vU+zaRB/uCAXswCDCJ8Jkm4q0MeAoPlzRFYtZRGRKRI6lXXABMdEwqCFS2kKjM\nEBHTweYwmXFCZ4xghIvDiAIRMQn5uEBCRKpoOKjkCMgT4wCXkLJCEp+XaAaScv0Lfq0pIEJKgCcR\nIosQk3jeBQxjABQxjCG2PcA0lykUMszMBAhRR4gM7XaWe+45Q7EY8pu/+SHm5+cByGQyLC3NUqv1\n2Nx8ACEk+/fv5fjxX+HSpSewbfsNGfXNZDJc/fa3c+KBBzg6M0PGsmj1+5xtt3nfeMZtdXWVe7/0\nRTK158l1W+w0FIRWYmF2nsnpRcLgMspA5flRiB0q2H6AKcBSFJwYOqj4zJKjShoXFx2BhUuDSUJq\n2NhMo2CzPR7DTezsajQxaTOByQ4l2rRIoVAgRYkIhQYxNgdRx+RFJcm2sUkxjURDocOIEUOGTBIz\nSYo+Nml89qDTRSOFjo6LRKGHgo1PiiynyWu7pGK4KPs0owg3shkoaTQlTWFqiePXfABdT9HpZGg0\notes9/n7OpPXE4uLi7ipFDs7O6iGkUx4SMmG57E4P4/vOAwGA0ZKGU3LY7sdQn0RxB58d5so6jJX\nOUQYdun3R1iaR1GYyCgCVceIKhxnhadpUSUkg8IAQUSKOcooOIRY46pE8nnUIkeRYGx/1qWOicsi\nUMJlRIYsEYvjsd2IIQEOWWCKOgWeZIhFkz4+DiV0DCQNAkJgmYAc0EMSEnKYHrvkiNGZwCamTI9+\n5GEOBmw4HrEf4COI1AnqUsEIU+haDxmGXNze4cb33M6NY9fd06dPYjtbAOi6ydI1t7F78gEmhSCM\nY1Z2dtgBfu2OO96Q/f1ZwNe/nrRFbr/9Sq/kpbjpJviLv7jSq/jJ8TNBRpIPygcJAh9d/37CaL1+\nkYWFCvfddz/T05McPHjwxwbpve1tb+Wpp/4fGg2LcnkGyzLY3X0aUHHdKUDS9mJqnoNlSqTfJaMo\nxFFEhBwXZ5PL/hQ+l7DQyJNFp02XHUbjAHG4TIksRRwUXCZJkcIkYESLPlkmMBniUqeFjwUcJhkN\nfYgXtABpXEwmEBiESGwyRMwAJ0jaNhMk2pBpkupIlRfyP2ESVV1ACI04rpNOa5TLBzGMI7iuwsKC\nQq12nlbrGRqNENNc4ODBG6nXdXq9Da655jo+//mv8qlP/fMXXW49L2A0KmCaGUDQaMQMBvZPZ6P/\nAXj3XXeRzuV46sEH8ep1itUqv/Rbv8X+/fs5d+4c/8v/+G+pDiOm0wsU57PMT7c5u7YBqsehhaNU\nc8t85pnH2IjmmYgy+EJlENWQcQcdgxoaGiVUlHEiiUUdFYU0PTQ8coRESNL4eKwzSYcmMT4CCWgE\nOGxRRZKjj4GGR0R5bN+fJeQRfDyK+NSZJINFGROVBlkURoRASJ0RDjEKFXQKiS+FqBFJBUGAYIBg\nCZMNFuIek2pMysxCNGKYmaSHRteYJ1e6g0g1AA0pY4SIMYwitv0P27/z58/zzW9+j+3tJuVygXe/\n+0auvfaa15WYGIbBL3/84/zh/feTVRT6rRaYJtlikblCgb/b2OCqmUV0Zw7fP0kU6ZhWCU1bQBtJ\n4riGoqzhedvk8wVKZgWl0UC1HaQQiChpiQzIYBNjEuOiMIOgMA6k7NPCpY/CJCZLqBjU2WGEhz/O\n9lUYElDDoM40Jg18IqZQUDHGpmkObTJchUaaBjso9FkiJmSAN17FiB1CGsToOMwS4aIzT0TECIOQ\nHpdjk2tRuOi6BMVFPF0hcBzsUCGIBAWrxrSl0HVt/FyeoRsTx5IgcNH1LsvLRRxnSCqVZWn5GFYq\nxzOP/r+k56bIXncdH7/lFiYnJ1+3Pf1Zw3/4D/CpT8EbxL9fNa69NtGMOA6kUj/++DcrfibIyMTE\nBL/4izfxta89gWHsQdcN6vXz7OxcQNdvxLJUfP8SpdLDfPKTH/2RY2jlcpm77/4I9933EOfPfxvb\n7jM7qwJ70LQYXU/jOh6+d4mJ7AJhv8tOlNzVKMQwdgToA/WxA+oOAzRMMlTIM6RDb+yqatBjGo0c\naVIYRAgcdHr4dOmioyOJMcZJGC/oQSSMvSgssiTVDh+FGBOBQw75YluoT2LTIkiErzV48bnLCGGg\nqhZCjLAsl8XF9zA5eYDV1TOoap9sNkUYziDEEpXKIZrNBq3WDuCxtfVXHD26l/X1dZaXlxkOh2xt\n7aAo01QqSbXE80Z861v38+EPH35DHVoVReHmW27hbTffTBAEGEZCUm3b5nOf+zKGb3LV3AIAsaxi\ntC6z9+0z3PfUSR47rdMY9LkUVikZJTqezyiMUON5HDRMbNooGOOsXdAZYQAxKurY1G6SpMGjo7BL\nTEifJVSy+FwgIYNvIYeCZAeDIgEuISqSNjE1BF0MHLJMsYHPNCkgNXZXHZAC9qDg0EMZy1gjPBw0\nCnIClRiFCIUODs9TGAfwKbEkb2iktSq+muIpd4gfCjqdNsXiHJ1OF1V12beviqJ0XhSAvxKazSZR\nFDE5OYmqqpw7d47Pfe4blMuHWVy8muGwy3//79/FdV1uvvltr+OOJ5qwT/yrf8X3vvAF+t0uwWCA\nnkrxnUaD6g03cNPbbuSLX3yaUmmOSiXNE098AcfpoSiCfF5h//4Ss7N7qdVa4Gp0ADvaJXIdarTY\nZgqFJVQmEQgEK+zSxGFAmpgRLnmKxOOap6bq5NV9rPm7xMhxE2cHCDFIY6GTZhcfhReca5Kh/4gs\ns+gIdAYItjBRiOmSR8fBokCFmDQOMTYhMElEk5AASZ6ABSxaFGSKlNth7oZ34gwgaO/Q3jmLFcdE\nYkRWz1EtV5ma17l48WGmplwmJ1P8+q/fQSaT5r/+168ThkWE0InjNh/75K/yoQ+9/yURGz9Hosm4\ncAE++tErvZKXw7LgyBE4eRJ+TNzUmxo/E2QE4NZbb2FpaYHnnjuL43hIKSiX38v09OKLx+zurvK1\nr33rFU2RfhDT09N84hMfIY5j/uqvvsrk5FvQNJ21tYsMh3VmqirpaJ7NnW1Uv8Ucgjl0eoxokVCD\nJEE3Mx7gm6ZLZ2z5PaKCxxzwGF1CfHR0QkJcHCI2mWVANPZlHDGFzwSSLBEHSLwntoAnx7m+OcR4\nkDhigEYdDYMACRwk0ZA0SSoqDRJNwqGxIVyTuTkbw5hjcnKOweBZDCODqurMzCwzGDxNNns1YbiC\n5+WRUmE4hCAImJk5xGDwNGfP1nnwwe+yvLzMyZPPcfTo27l4cZVOx0HTcgTBgDhusH//e37qe/5q\nkPyc36+Wra6u4vtZdO37jylCoKppLp0+R2HyEJVjt7HzyNeZtFL0my0iYVGTIESKSMok4Eyfxwlc\nQpIRbgsdAxuNDiNmCCkBfVT6aBwlYJeY0xhMoLIDzBDRJqaPzgCPDAEpNEKgj0cPjQ4+OXaI0YkY\n0cVFYJFHAiEePTpMo2JRZJchOmlGIqQpDQwmEdRRmCMSQ4qKwYTpMF80mCgW2Nz1cAIHV1XYu/da\nVlaeo15vkM3OcfXVe0mnHd761quoVCovO6+NRoMvfelv2drqI4RKNgsf/OCdfP3rD1GpHHtxvDub\nLWIY1/HNbz7OW95y/Uv24vXA7XfdRa/VonX+PJrr0g8CKtUqH7v7bqSUfP7z9xJFFVx3i3R6Hkgj\npUEqlef8+VPMzc3zznfu48kntnDbaWptSdNtEaCiUUIiERhYZFE5Tsh5QoZAl2Ac5RAgaOGjyiJK\nwDhpqofJGjpNLEq4DKkTMgk4NHHQcdAJqaBzFEUEhNLDosYkAWV6NMf2YgEBPm0UzHEgRIKYLhFT\nGOjo5NGUNIqh0xkotC72qFYXGbYeZ07qTJgWfjhi5LToZasYxiHgJIrS4v3v/zXa7S47O3U++MF3\nEkURvh8wP38bc2PN1c/xUnz60/B7vwe6fqVX8sq46SZ47LGfk5E3DHNzc8zNzdHv9zlx4iLz8wsv\neX5qapGzZ7+D4zikXkW9Kooidnfr2HaWyckqpgqDwS52d5fIl6hynTJDyqrOIPboyvjFy38dgzwZ\nVulTx2TBmGehXKYzPMOct8l530fHIWQFnwE+Gio+WYZIFnFoIQmooxJhkLRaRiQjvUlfGS6N+8ez\nxEhU1knRoM8kCR3aIWnNTJBURK4hmbaWgEEcNxgOPVKpIY1GnV6vRbN5HwcP/gKGoRMEPhMTFcLw\nNOVykVptGyHyaJokilyE0CgWF3n++Rq9Xo9ud0i5PMPCwmG2t1fp93vk87PAnpe0z64koijCsrIo\n+RLd0YBiOhlF3d3dYHV7wKA6Tf2BezFcm4xSQs1UcZwBAZIeeaSIEELBsg7jK88QBzYZUULIAWq8\nwaTicSluA+skY7QzxKTQUDFoopMmQ4oQwZAKAS4qc4S4aBhAlohtVDrEWJQQlCig4RLTwGGXiDIW\nIQEWAT4FighsCkpMX0boMo2KgUMLBRWFQwjZpxt9F0/NEk6keXpzh75tMhIWbb3IcOMs8/PL1Gqn\n2LevwKFDgre//RpuuunGl51D3/f53Oe+hO/PsLCQiMNtu8fnPncPjjPkyJGXxswbhkUQaPR6vVck\nNj9NmKbJRz/xCTY2Nmg0GmQyGfbt24c+vkp89KN38kd/9J9oNCTp9FvQ9Q6WpREEXY4dO87sbIZ/\n8S9+hz/90/+Dzzz+DdpOFzPOEoklbFlEEiOp440nZCQGkKZNnZFSZBAPGDFBhI+ITWJUgvG0m0aB\nCYqASwXJCFhDx0IhpEWfMjHvQFUFbrSNxWX24qOiUSSHxYgBPn06RETY+EiK41dqwliRJhmiM0KX\nEdueR0OaVLUqKXXIzOQevM6I9qiFqkpmZ2/BUFWGwyHdbpb77nue++//X9m79xqOHj1CFF3k2LFJ\nPvaxD/+8GvJDsLUFX/sa/Of/fKVX8sNx443wjW9c6VX8ZPiZIiMvQEoJiJcx+ORrMX7+R+P06TN8\n+ct/x/p6nZMn1zGcJlelDezmEN122eldQMYjlkxBVgZ4AvZESY1ClRIHsIlIIdBVlx1K2K0uui6R\nhQKtrk8lvcRObw04T5Y9TJJGMsEAHYUJHFbGeTQWCQmxSS5yOpAmIqCES8zz6GgYRNRIkQhbMySk\nI0XiKaIBD5MYpU0jxBaGoeM4Gq6rEIbnMYwsntfk29/+c0qlCY4fP0qrtY4QNt3uLt2uh6r66LqP\nbffJ5SJuuOFqpGzQaDS46qp5zpw5Tak0zfLykRfP5dra48zNzfxEe/rTwvz8PEI8wFVX38bzj/4t\n5U4NA8HTl57HLRxgas9xsrvPoesZnry8jikrKCJLBsFQqolLrvQIwzNABkVvE9BEo4khfRrkkb6K\nqoyQZInjFpI0BiME/tgSPk3ALrCLj8THxKKMzgiVLVQkOgKDAVNIJGryXsyRYZsNtrFI0yUmi44q\nMiiKTlXPkY1adII6Ayx85omZIqYBSBoYbHg9jkwu0F/1sEWKHQQuB+g3BzQaj5PJlNjYaPK+9xW4\n+ea3veJd8MrKCt2uxuLi95N9M5kChjHLxsZ3XmIPDxDHEeCRTqdf591NIIRgYWGBhYWX3ozUajVW\nVjapVkusra1imk1mZuaRMqRSsXjXu25hZ+dhLly4wFf/+htkpIpROsigFdDz1LHfbQ6fxPRLxCkQ\nMZ7IEzCPZJk1dtAJMDAYcQaP5DMnRYlJRugkLj8tAlIEGPTIkyaFyiQ2O/wtw+g6OjRYYBMNFx+D\nOgKBho5KFo0JDDo06LOBS56YuXHGc2KXGNDHw+UxX2WERbP5JH7H59pMgT2zM6yu1jBTBXL5aZrN\nVVZWnmd29ij9vodhzDMaTXDPPV/HdUd8/vNt7r33Af7gD36fI0eOvNIp/0eNP/kT+I3fgDfz0NmN\nN8K/+3dXehU/GX4myUihUGBmpkC7vUup9H1DnmZzi337pn/oh2Icx5w6dYqH7r2Xb33zEeYP3MaR\nI3eyduHPEbttLl92MIwCUTBiWityzu7iqJKyZbIVhGQRxHFMW0o6oowuFhjFKkHsowpBU8mTM7uk\n81XmRRFCmyljgrN+RIoy4Vj+aiDGnp4TGPRwaZBUQwKEmEfKPLCFy14alMmwjUvMCB+fm0iIywSJ\nO2uLRDOSIskeXEeICEVRCcPsOKNmFSigKPMIsYiuR9j2JufPP4Nh5JidvY1ebwfbjnCcNq67yvLy\nLO9730dIpXS++93H+cxntpib20MQbLO+rlGtLhHHEbXaRfbvL7C8vPx6bvmrRqlU4s47r+O++55h\n7vg7aDc3efbMIwyq+1hYfBeKUNAFTGYr5MQ52t6IdGqZKO5DvINgLwBhOCAMLCAkEA3y2Rxaegln\nNIMiDVKpZWx7k2QEu4tFk0l8wKSNjsEMgoAhJtDDJcJEI880IRE2DQQWfXxUGqQYopHCRMEgTZsC\nHlVS1JGykIhRoxUUcuRJs/vi3FabpIqmEitltvSY//bEc8iwiiemcMQyvhOhKCaKYlMszjM19S7+\n7M++ydLSPO94xztedg57vT5CvPxvKJudYG6uytbWaRYXr0VVNeI4ZmPjDG996/4rkur8Amzb5rOf\n/SKwyHve89t0u/836+sKZ848M9Y7CU6depZcrs+//Jf/GxdP2QhZxg8VRpFNSqkiZEg4HucO4gGG\naaMbcwSBA8EARVyFHxv41PEpEDNJUpEso+ASo9DAIWYRgY6gjskImzpFCqTxMRmwHl/c4AAAIABJ\nREFUwvdwMIgxcSiNc4kUQMcmYhNnnEFTxKOMRoSkiakO8SIdD0Goqqip2xgOL6KgISODYeSyPTpH\n5G8xM5MjjiNsu0Zt2GZ++QaKRYPBoEIURVy8eJJOJ2R+/mYsC5588jx/+Id/zKc//T+xd+/eK7aP\nbzbYNvz5n8Ojj17plfxoHDwI/X5SxZmdvdKreW24YmRECPGLwKeBppTy5Z+IPwYf/OB7+Oxnv8TG\nRodUqoDjdEil+rzvfR/5od/zjXvu4fLDDxPsNjig5PBXn+OZxgbFlIlZSHGp2UKGCpPZMmlziXoc\n8LyzieHr7J9c4HJnm5Hnc4EMM8oc3VjgY+LJadS4hSrXyKYPstPyuGZ+is0L9zMXDkljkQLyBIRo\nOOhjSqKTYhKJJGCHGA0pOyQmZknmTMAsXZZJ9CA2iftqQFI9WSRpy/RI3DgT51YpV5BSJZNR0PVJ\nhsMeun4NQTCgWLwKEKRSBwjDRzCMCCE2qVangBVarRZzc8e4+uq96Lrgy1/+b2SzVZaW7sTzRvj+\ngKmpNsNhF01Tee97j3PTTTe8obk0Pw7vfvc7WVpa4MSJU9RqU6j6XsJTPisrJ1HVacqBTdkqUVZV\ndNUhnXXQ1R5dr0zohwhZQoZrWKKAySFipYtGD9+NcEcBmpEnihpEkQOUkTxLRA9nHHxnMEWGIi1G\nqKhozKCyRpkCMR36hCjsHxtqQZceIetUkKgUCRkhKZBGMsCizgaSNJIKCkN8PFymUZQ8cTyNrk8D\nXVKpKRRli5Y9wDBuRdOmUYMARRkCKaRcIZ/PkckUSKUO8td/fd8rkpFKZRIpT77s8cGgyV13vQPP\nC3jkkYeBNHE84tpr9/JLv3TX67yrPxqnTp3GtrMsLiYVOtNMo+su2ewBJibK5HIFnnzyW0i5iT1c\nJq9ohCgMbElEGssIMAKPQG4jZYOYEaqWJ5PJ0+uNgIgw7GBxkRQBaaaAmB4RQ5Tx37WPZAadKj49\ndDQEJQIUbHqMMAjQxhNQFfp0KFFFwSFpw0zioGNTQqoGQnSSyR2xScbIMpsLWGs3GUbTqPoUrn2K\nAlnK6hSx72KjUEPB7LeYnc1RKldx1QKGprK0dJxebx3fH2AYGYZDl1TqGoQwUZSAiYlF+v0RX/zi\n3/Jv/s3vXcGdfHPhL/8Sbr0V9u270iv50VAUuO02+Pa34dd//Uqv5rXhSlZGHiERObwmY92ZmRl+\n//d/k+eeO8XOTpPZ2f0cP37sh9pV12o1Lj76KDcvLfHoTotipkgqlUW2d6iPBsR+QKgUQNOTdobf\nw8CjQY6zQcx6c5dBHKKrEIbTOAiisRH0hAjwRZpQmphaBl0r0HQCdCumgpE0X9yQLDohw7E9/IAI\nm5AJYpZRySFwx2r704CLYZiE4WXieImEcCQW4QnpiMY/WZpE8JoBNki0JA5x3CKTqWIYS/R6bXTd\nRUoXx9nBMEqYZgHPMygUFllaKnPw4HFU9Va2t1ucOXOe558/wUMPfZU43kc+X+Hv/u5hrr/+MFdd\ndTP1+mP8wR/cjWVZvFkxNzfHU089y7lza3zvexdpt/OUSgeQssVad0g4eoS85ZBVdPxwiJuZoVKc\nZ3e3jvTbWELFIoumGujaNH1vHY9ZFEUjjof4vkOyBxl0QibJowM+GhJvPK5dJk1nbOwdoSGxGaJQ\nJI9JiI+PQGeKITZVhvRoEmJikTTqBgS0qAKT6KiE2MS4SGrIeBIhPBSliaIopNOzjEZtVNVAiAGF\nwn46nSZCqCQxUqMXp6BMM0+zufWK5255eZnFxTTr62eYmdmPoqg0GpuYZpu3vvX9FAoFbrvtFjqd\nDvl8nkKh8MZs6o9ArdbGspJ1DIdDdL3K4mLA5uZZGg0d05xhdrbCyZO7HNh/lJ32ExT0LI1hnTgs\n4sfbpIx5jIzLREmn2+0yNWXylre8g29963tsbY2AS0zTJERnxCoWM2TJMmIHjymGxJjkiImI8ZDE\n+GgITOoI8hzGeDGRaoIePWqMKBOjkMLFZQuTkEksPQXYRNE2btTGd8GLNAK1wuz81RhGh96Kz5y1\nlyCI8XybeS3FelRmI9oh3L1MxQK1YDG9dJhOZz0hU1adINiDlALXXWE0mkBRBLOzB4iiHBcvrr+h\n/jFvZkiZ5ND8x/94pVfy6vDOd8KDD/6cjPyDIaXsAj/RL32hUODWW9/+qo7d3t6mKASKolCpTNBo\nNkmlspTTec4Nu6w0L4Pr0I3T1NwNrMgHYZERe8GIcVICN44oVktsXGhixGAoKuk4hyosfCWgg4If\nKQRBk9YgJh+q9HWFchSxxSYXySOZGHtUuJiUCTAwxSxSQkTiGxGyF8NYp1o9ys7OCeJ4jcTTNUkK\nTizeAxKCkugFkhHfZAYjMTzLoGkzxHGDOK7heUXCsEwUtYmiTXz/MPm8jqaBpumUy4llvqal2Npa\n4cKFbTStxKFDd2CaJkHg8dhjZ7j99huI4xStVovZN3E98Nvf/i4nTjQZDtPMzt6Bql6k1xtRqVzF\njbe8hZWL91GpDOjuDpDmASbUWba3t5BygBAtFG0ZU1NI6QI/8BAxqGpELpXDc0eosSDGY0AXAx2b\nJRx8HEZoCCQFJB5ZFAR9Qpp0aSCRVEgjhUCXKRqExAg0pcj52MNCISJmhDNOBfZIsYyLhouJJioo\nMkQRbRTNIgz7QBZNKxFFQ9JpDcPIEcc7OM7z40TWXWCApikvkpHhcJN3v/vAK547RVH4p//0n3D/\n/Q/x+OMPE0WSw4cXec97fu1F4pHNZl8SonilsWfPJI8/fgpIBLiKYrJnzwF0HY4cmWL//mt56KGH\nEEKlVK5SL+SIhz5lQ2fT38YPN9GMEUuL17NnzyE6naeZmdG4fPkShrGXfF5n2P0OQ0xUFlEQ2FzC\nYIRFzIgUNuDSQcNDYNPEpISBQw+VCWx2ccbJNxIXyQTbZOgwQKGKi0fENOCjaSVMcxFd36DdXiSM\n6rjCxTRNhEjB6CJlw0QKiAnJGRpZTSPtWeyKKlgTnN/UyXsh1epZFCXF4cNv5eqrF/nCF/6Cfl8j\nlTpGtzvCstoEQYUoCqlUJn5ORMZ44gkYjeBd77rSK3l1eNe74E//9Eqv4rXjZ1Iz8lpgGAbB+P/z\nC3OsXNqg12swiiOcbgNVS9FWMhj+BH7sscMuaXwimuSVJUxjAi0K2K13sdQBWVJkNVjzBgip4cgG\nKSuNH0f48SqDnsFccZrTwxqOm5SCfTJEpIiIkWTHVZEm4CfTLdGQOAZDnwIi+v1phLgWIYZIGY7/\n7ZK0awQJ8dgF9gL7SYjKeWCEEDsIUcayWkxNHaDRSBPHJqlUBcMI6XQe4xd+4Tbq9S0MI9HTtNtt\nHnjgezSb50inj9Nutzh79nkOHTqAZVmoaoG1tQ0KBe9HTis1Go0X75qvRMhWFEU8/PCzzM7ewOnT\nf0Mudz2ZzCS12nm2th4ln59lbj7LRz76fk6fXuWxx9Z4/vnvEQQKqtrFkDNEcYq+r2KoklB2qKQr\n9BQXogbFUEMVGWxpY9Pk/2PvzaPtrM4zz9/+xjPPd56v5gmhWYySTGMM2GATz4HYjhM7jiupVUmq\nq1fVqiyvrlq1vKp7pd1d1b0Sm7KrHMfGxlUQA0kIxgxGQgg0IwnpDrrzeOb5G3f/ca4Vy0AFbEAI\n5/nrnO/c79x99j5n73e/+32eR6dnRR01jUDgkMUgj0cXVUoEmKcbF0MxyPlNXBw8aVJDUCOJRw1V\nmEg6gAI6q2nJgBtIEiuE8n4EAXRiGKaO57chlDk0LY2i1EkmE6RSUYLBELlciHR6LbWaRy43jmHk\n8DyLrq5VSCmZnj5JODzHJz/5+6/bh8FgkDvvvI3bb78V3/fRtHf3VLFp00aeeupFpqdHCIVS+H6T\nUmmacLjJ8PBmVFVDVSWRCHhek4ENO5gbO0VANAg0cjhqmUz3TiKRNIXCCdatM9B1nx//+CC23YOm\nGfiik4ZMEkclRAyFNBYXV/x2HVQELlN4bKJV06VTJIeggo6BTTs2EXw0WlPvCFCggUYrsxlDAJIi\njUYZKes0GjaKkiCZ3IXjvEIsZmLbs3hWnX4dFuuzqL5JKhxAAgVPQ9MhE1qHjkLHwHamp5+is9MG\nZpmfz5PJdOP7GqpqEIl0EAxuYHz8KB0dko985J9fqSF81+Eb34DPf751BHI1YMsWWF6G+Xn4OTu2\nqwZv+wwjhOgAHviFywtSyk+93f/757Fq1SqeDAYpVqskIhFuumk3Z86e55EXjqK6Lh19OwiGfBbn\nS2i+JOKaZJlDRWHUWmQooFGr1Cm4FXQWyVIj62XwUGkwhyZtmo5Go7CAL21cR+HYUh1EO6pox1MW\ncfw2ILyiwyjx0PAwacpRPCeCAGzpgF0BFnCcxIrvTBpFOY2UMRSlihBRPG+RVibEo3U0U6VVNxJH\nCI1IpExPT4J8XmXPno8wMXGBqakxwMY0TcLhOKGQw6c/fROapnHu3E954YVThEIZduzYxvy8STTa\nxcWLU8zORlm1aghdN5mdHWXXrk2kUqlX9bFt2zz00KOcOjWNokTx/Spr17bz8Y/f/Yao1m8VHMfB\ntj103URVNfL5RWKxNnp7t6JpAk2rks/rHDtWx/MMarUZwmGTQGAL+fw8zfIMuD6uGyJbyxMiS8VP\nY8RsFHsWiKAoETx/CkmAOsFLgS64SMI0OQ7kqaPjouMSpB2DGDEmaGIRAlwURUXIJp6XpVXE3ItF\nBoEOOCtCWSo6FXQEnmxQtRq4QiUcKaBpCtVqmXx+Ed+3aWtzSQZd7Oo5EskBursHgV6Wly8QCFTJ\n5R7h+us38ru/+6f09va+Ru9dDkVR3lX1QK+Fubk5pqenwckzevIn5PMWhVqTUDzDLbd8FE3TyeXm\nicfrbNu2hlzuDJWKxnJxhmJpASXSZNf2W5mfn2BhYRTTVDl0KAiEqVY1pJRI6WOYvdjNClUkQaor\nYxTGZQoNi6C6GVetULdHaOU5JS4W/koeVKxkPUBZMRAwaW0mkrQynJ20RPZ8PM+hVluidQw7j+No\nmKZPPJ7BNOvkK0WqjQUyepGcFaVUj1FVJBZNhjPrCIcS5BsVCoWLFIuSWi1EV1cPlUqe1auvQVWX\nOH9+kmx2ClUVhEIVfvd3b2Pnzh2v0cO/fqhU4Ic/hLNnr3RL3jh+vm7kk5+80q1583jbgxEp5SJw\n4Je59ytf+cqlx/v372f/r5AvCwQC3H3fffz1X/4lwXweHbA6Mnzgs7/J8ScOEQ9tZNSZwdCTzExf\nRHGjKHTQrqrYUmWsOIIiLSLU6dd8kl4OT+bIoVMRUJcqBbkaxbsG2zmBqg/QtJqE9fVIfxnbX0bS\nuyJrFKJFDF4GHFzquNKkVRNi0HLdjaxQlKsI0YUQEQKB7biujmE0qNdn8P0BWkFIlFbBawpNEwSD\nMZLJXrq7JeVynXp9mp0713DbbbsolUo4jkuxOMYnPnEzBw4cQFEUJicnsW2X1avfx/LyDLOzx0mn\nN2NZDaanDxOP16hUptm/f4B77rnzVf3reR5PPvk0J08WGRi44VKqd3T0HI899gQf/ehdv/TYvVmY\npkk6HeYnP/kJS0sN5uePYprDxOMhyuVzdHRcw6pVQySTvZw8WWBhwSAe1+jrG0aIdtzYAPn5F8Ad\nQxENJAIUm7DSTgiVkObgyzpRFPIIPFIIOvFRYcVxqJW5SuGtuIm4GEz75wmKCo6MrIwXGP4UGnVU\nBqlSwCNJixWloBHAJYRkApcC5grt1GUJoQgqFUkiFKIt5KEpRYqLk/Q7bdy4bSeT0xNcHDtCavsu\ntm7fzu23f/o1NUWuZti2zQ9+8NecPTvPmRNjOIUZupJw1/uuJR4O85PzF3CcUebmxhke7uXeez/P\nxMQUf/Z//L9MvPI8oapCuxnDDEeYOH+crsEbWVqymJ+fQ8oynjeN55lI2YllGahqHPQqVecEGg7g\n0aCJQoj2cJ6CexREGkERSQ2V1Aoh38IliCCIJAg0UCijIJAkcelFoYnCMi6S1iYjRivbGadVO1Kk\n2aySzQaJBFW6QlFWr9+AurBAZ73CUnkRKQQ97dvoTA6Qr5dRI2GWl5cJBjdjmlVUNU4k0suzz57D\nNLvZuPEDNBo1ms08vj+ClK+WS/h1xfe/3zr2uNoyDO97H/z4x/8UjLwpCCF2AF8FNgsh/h74kJTS\n+vm/+flg5K3AwMAAX/yX/5KJiQkcx6G3t5dcLsdzf/ssEV8ifVA0STCapmy3CJpJXUf3BaYdwVgJ\nHlajEtFNZm2LBAq+hAKCiFfGUidRlF6QElXpo+lr+F51JQVfpCVu1qKAtiacOVrHLh6tiaew8jfB\nldcEUtbxfbDtRXzfxfdnUJQBpMwgJahqCt8PoCgFDCNAPN6BaWZZsybKhz70BY4eLTE9XWRqqogQ\nClCjtxeuv/76S7veWCy2ch4tyGS6yWReJpsdo6NjLbrus3p1jFRqNf/qX/2zyyicc3NzPPHEs5w7\nN8GRIyfYtOl2fN+/JKDU07OWEycOcvvt74yJHrTqkExTMD19jra2XQQCKWZnz3Px4jyhkEZvbwKr\nVuaHf/UQeCHcSpKp/BkajWcJBDYTTwxRK1cImxF6UwU2drQxurzAcjNGNr/IOjVA1SkiBAjp4hNB\n4tGq42l5p7ayHBHEikG8TQ3JMHV5FlgNqMSYR6OOg0KMNlyK1PBoUXZDSCSGSGHJESQzVFFW/ocD\nXhRI4boW8USIQnWJQSWCWmwgbY8NQxsZtAaZrM/xhS98nO7ud4cOzFuJZ555jnPnqoTDa8kvHMa3\nTRZzgvPTL/K+7X3sGuxFX9/HPZ/6hySspmm0KxbxrnU0G2ESiTSNRhVrfoLlpVmWlwWNRgPT3Eiz\n+TLQhu9b+L6NlB7ST+KTosR5VPrxCKEiafg2GzffQLmyzPx8mUoljUcXHnV8NBTqQAEfFxUBlFBR\nCGJQx8cggUUMhRoCC5+tyBWfmtZckF0pno5QLR7nxq0x/uBT93BidJQz58/j5/P45TKerjFZyRHr\nGMIIqoiajufVCIUCqCosLGQplUrE421I6RMOxwgGg+TzLzM2tkC5XCYWi12J4XxX4f774d/+2yvd\nijePO+6Ar361VXx7tcWVV7KA9SjwjvMBTdNk3bp1l57HYjE27NzMU397nOVclXpTo2Y1qYk6UeFh\nyzBNmljoOAh68NB8SdETNEgSJUoQSR0dWwmDO4MjtmG7s/h+BolDK8BI0doNV2kVoras9Fqp2Sgt\nRswWWpPPBK2ApERrUTuFlC5SltF1HzBRlE5UVcNxgijKDJrWi5RZEgkP1z1FR4fE8yzm5pY4cuQJ\nTPNaEokBXLdBvV7Eslo1Ij8rQk0mk/T1JVlenqGtrZe9e9/Hyy8f4dCh76KqLqq6k7vu+shlAcXy\n8jJf//qDGMYQPT03outLjI4WaTZPsmvXdoDWMYTQaTab71gwUq/XmZ+vcuedH2Zk5BxQZdu2VYRC\n6zlz5hUyqRjPHDlKR3QATTXw6x52HRampxHGLL29B3CsLNFgCSEUzswtYYQkMc1n2VTIuVkCJhRs\nHzwVGKM1vm20siJZWhTsJK2jljStbMckrfGOoZInShyXKBYNbIqo6LQC0E4kApcanizT+s600aoR\nMoCW2JeKiutVmMhWUd0mw3oHll3m4HPPEY9mUFQNL1RkYmLiPReM+L7PoUOn6O7ezZEjz1IsqfSm\nNxELC8q1GS7OCzx/lsFfUKqanJiAfJnurkFmZ6soikKjUSepJ5nNz1GtVlCUDWhaB6o6i+vGUdUE\nvj+LImoI1cV1bRQ60EQnChbQTb4xR/X046hqBNuOo6rr8T2BpAwM4/MSPnkUkhiksakC82h0oFOh\nCfgkViTUVFR83EuMuQZCKEi5hGUtY/jLxBI9HDpzFtfX2LB2I3f3dfHS3Bwj2SJP/2QUZ8nE9so4\nXoiu7n40rcnIyCzZrIEQHvX6ONPTLqlUHEUp0t7eh5QatVrt1z4YOX26pdfxgQ9c6Za8eaxeDeEw\nnDzZMtC7mvDurkp7B6AoCvd9/rM88fT/imX0UG+qWAoo2iKqWEDTVYRiYNsNhnEwEei+YAmDMCFU\nBI4ApILityMoIJUcQoaAJv/AdonQynyM0lpw2mhphCRp7YQNWtmSBi2dkQo/o45CH6oaADIEgzWa\nzWl0PYCux/E8cN0sQpzFdYtYlk0mE+aGG36P7u4hxsdHsO0EfX0umrZAMBhkcPBmXNfmpZdOXsaI\n+Y3fuINvfetBJieXcRzBK68cp7t7I/v23QJ4/PCHz1OrNdi3r6VNcfDgEYToJpPpQUpJIhHFdaPM\nzuZYu7ZEPB6n0agSCkHiHZQvbDabgEEm00Mm8w+fz3FsLlw4zbnTJ9CVMJpqsLi4RKlSIGqkUZ0Q\nBWucycmHCBhNNKWHoLqOUEgwUythu0Xibd1MLc/hNDU8GcS7pKDboDVmBq3xG0dhM1LMowlwfWgx\nngSQX7FN81tHQBg45FdsEF0kZ/mZA5JE0vqeWLTsAtKoajueV8IniOM7qLIT189StRoEiBJQU6iy\nQcxI8Up2iscf/zHXX82mFa8B3/dxHA9V1Zmfn0c3e1ayfoBQSEY6OD91jI23XG6aqRsGvoBoJIIQ\nOWzbolqrMVZYJOebeF4vQkRpNM7S+t3m+BmVPh3toVK7iEeupZGqmQTUDjxFxW2AbefRtDBS9iJE\nEFWzcd1WXUnryGUEQQmfLArTBFBXvGciyJX6E48cYKFcyp4KoIaUBTxPRdOGCATbOTrW5IXRBdb3\n9ZKM6Rw+e5yGkuMju3Zy82+28dcHT7GQrzGTc4jH+zHNAKbZSzqtsrg4QTLZg+u6wDLr119HrXaR\nUIjXrAX7dcM3vgGf+xxcrer4d9zRkq//p2DkKsSZMxe49f2fAIKMjl7k6NHT5LNpDF8Sy5hML5ZJ\nqTninmAWnTgKkgASQRXJrPQpE8NEBTRcfwrd2IvCFI4TRMpVtBYTjVYQMk9rYWqJyrcCFXXlsbFy\nfZlW8NIEViPlGUzTwLICuG4D359jeLif4eGtTE5OMTExQjCokEh4tLXtYnq6QF/fGgwjQjK5nUpl\njg98YB+G0dIGKZWyFArFy/ohk8nwh3/424yOjvLYY3/Pxo272Lx596Ujl1gszRNPPM/27dcSjUa5\neHGORKJFDxVCsHnztRw69AKNRoBiMY/nNSiVRvnkJw+8o74X8XiccFhcskf/GZrNGjt3ruHIcyex\nnDilskaxMo+q5OhNriFvW4QDOpFIgMXFV0DJkE6FUE2Xpdkl8hUBahlN60MNr6ZeyQIXEKKElG20\npPhtYAaBis80SIkUErhISzHXADwcahTRVn6ANSyaqGxBYRkPB9iMouhIWUaIbnz/JDCAEAFai5uN\nxEBKE0VEcUSEvCzTQ4iAGcX1qtTsLFo8zUvPv4xlWZim+Y6NwdsNTdMYHu5mdnYGTdMIJdIUyxVC\nAQNddZHSp+yrbPqFGXn16tUEejooZrP09XVw/Pg5srUaeTeIavZjKCZStuN5KXz/RXxfpZXRqlCu\nX8Bzp4AaLlFM6WK7HpbXoEWvjqIoHkJIXLeMlB6qauD7daQMAR1IIigsEcUjgsoiy0i6MXFwxATI\nBh4+rSCoDVhAoYCPgiKixGJJIhGd2ZxLf9tq5vJlUrEQRbedanWJ4a4uAobBNatWUarVePLoCY4t\njlEqdZBIRNC0JTo6PFw3SiLRjW3PUi4vIuUUH/7w599T35FfBs0mfPe78NJLV7olvzzuuAP+/b+H\nf/2vr3RL3hzec8GIlJIzZ87w4jPPUMrn6R0e5rr9+/+nmhiLi3lisU4CgSiNRpNm02V2dpaZiUUa\n6gzRYIG14TjZgo5oapwlR4AGEKKMQY0OfDooUQTKeATBG8EMuEAU36/gupJWRiRNq2bAolWomqO1\ns7ZXnhu00v4/Y8moKIqNlAHC4b34fgPfB98/y9xcEM8rUq3m6eszSaW2YFkL9PdvJ5+fYXZ2llgs\nhqJMImWQarV4ST6/VFrkuuteLStomiabNm3isceeZf36LZcFEZqmAzHm5uZYt24dmUyc6enypQW/\no6Ofm2/WOXToEWxbob19DR/72AdZ9Q7LF6qqyu2338QDDzxFIrGGaDRFuZyjVBrhi1+8l7VD7Txw\n/4PkCwvEDJNweAhF6NT9Zfr71qKqOqrqsmP7zWTnxrkwOodU16LoQRTlIprWoo6GokksqxshyjQa\nZxFiAUUJIKVOILCZZuMMviwhWaQVYPbT+skFkQSoYRBARXIBhQIWuZVMSwIhsvi+gWkG0bQmltWL\nlD5SNvH9wkpQAr60cP0sghCLuEiRw2s2sPwFmgQZ6L2eaqVIvV6/tNDkcjnOnTlDo1ZjYNUqVq1a\ndVWapN1228184xs/RFFc4kmTvGczm5ugrytMXlPZvGvjq7xW4vE49/7hP+M//+//AWtuiqZqs6DV\nUeOr6Ex0UCpVqFaLqKqK66YIBMBxphGihOuG0NUehC9wSNLwymjSxSWGSw3hNfH9GlIaSLkVaNVr\ntI5ga4CDj41gnhQ6DhYQxmaZsGqRMHwadoyaFwVxAUM9hS8dPC9F6xiwBlRwnArxtp2UfQfqDi9O\nz2O70Cy5PHf6NAe2bUNVFBKRCB+6fg+cP8/4UpNMRqW7ez+OcwPPP/9TpqePUC5fZNOm9Xz5y7/D\nzTe/aSHs9xweegh27IDBwSvdkl8e+/bBxz4G+TxcTYmu91wwcui55zj+2GOszWTYkE6zODHBg3/+\n53z0i198XSpjX18Hhw4tMTFxnmzWIxrtJJWKsrAwRlVRsM0gy45N+8B12EtzSCfF+eoELjGiDBAi\njIOPr1goRoq43kXvwA48bx5F6WRx0SKXO7Oyex6m5SfSBPpo7ZZfAlYhhIeULwMmQrSj6xlct4SU\nM2jaOur1OlI2SaXWousBHOci1WoTzzNob1/N4GA7S0sKrtsgGEwwN7fMddeaJGXeAAAgAElEQVT1\n09kZ5sKFs/j+dhzHZnFxglCowJYtm1+3H0MhE8tqXmaIBiClc2lRu+GGnXzjG48QiSQIBFpeJo5j\n8f737+bLX/7cFaWEbt16DeFwiKeffoGFhRG6u9v5xCfuZmhoiFQqxdLUFD/5m0PkbY1yM0+VCqG2\nFN3dm8nlziOERW9fL45bZJU2SDZrIefKFIuLWJaJEGl0XcX324nFutG0boTIoWmDNBrzeN4JdMNF\n0/pxnDlcN4ZgGN+3kCtWAB4nqBMmTDeCBmlFw/FFS7NEJPHwUBSHcDiKokhcdxLXzeC659G0fjQt\njuuWUdUcmjaE1aixLF0WmnOEzXYGonuYWXBoqPOMjo6ya9cuXj59mh9///u0CUFA1/nJT3/K0bVr\n+Y1PfxrDeHc4L79R9PT08Pu//ykefvgxHnzwGfoH1/H+D3wIwxBUKhf5yEduec3PtGfvXlZ9+5v8\n2Z/935w4Ps9Q3SGVuhFdD5PNzvHKK2eoVn0MQyEabVKptGwYfHcQ6fogZhCyiSvj+EzhUkAwT0Am\ncaW5wogZoXVc5wM1VGUZX6aRcomWlmsKIRyk2oWuJkh3Behpb2d0/AhWqUAyrLFpaC/pWJKnT87R\naDZw/Sq6HqVcLlCrzazUj0k8OjGMCPnyJN/78cs0mj637b4WQ9fJlkps3bmTNZ7GzIx56djywx/u\nZ2zsBJs23cZnP/ub79iYvdtx//3wxS9e6Vb8aggEWqyaRx6Bz3zmSrfmjUO8EYfbKwEhhHyzbWs0\nGvzFV7/KnvZ2jBVLcYC5bJZqVxef+tznXvO+XC7Hn/7p/8WZMwo9PVupVMqcPfsi0aikrS1DubzA\n0oVnMPwQvtuF1bQoeItIwvjYqKhIbISaJNLWSU9vG4GATT5fJZutEI+vZnz8xZXz6DSquoyuaziO\njuu2UvdCpJGy5UcDJTStF00L4vslbHsCTbsFIVRCIZXu7gyxmEc4vERnZ5zjx8/wkY98kv7+1YyP\nv8zJk5OYZjft7YJdu7YxMzNCo3GWdLqD2dlpGg2LRKKdWCzEjTdey8033/AqUatjx47zgx+8wODg\njktBRaGwhKJM8Ed/9IVLO+ljx47z2GM/xbZ1fN9maCjNRz/6wV+qRkSIN+a4/FagUCjw4AMP8K2/\n+C65kk5X/266u7fhOA0qlWPAPKrazsWLUyjKahxHwbbrOM4clhVBiEESiTBzc4voeh0pZ+jpuYZK\nZYR6fZ5AoAMpA6TTaS6OHcG2u9G1IHWrREsLRgKzBM0A7Z6GI2fxKVF127CI4NGLaggCgTiOo+B5\nz2EYcYSIIEQd217E8xSGhzeTzxdRlH7y+Wk8p4iphQhoaYLCoCmnWT0YZ2BI4w//t3/OS08+yY50\nmtDPSfkfv3iRaz78YXbv2fOO9P0v4q0Y9+npaZ5++jCTk3NkMkn27dvFhg0b/qf3HDnyIj/60Rkc\nB06fniOVWg/A4uIEk5OnKJXOE1AF1WqZhqUhlC2g+PiewLHzeNIDljEooJPAI7RSkBqhzCweJTQR\nRUqBREGINlRlGUEGxz+HlCXMQJhYbDu7du1jcnIUKRvksufIBHtJxXqYWR6haUUpN07h001b+z4s\na4JSKYvrSqLRMKtX78Z1a+Syh5BND12b4drV7Qx2Jol2JvjcH/8x0WiUb33rB2SzAlWN4PsVurp0\nPvOZj72uhcbbjXfy9/5GMD4Oe/bAzAxc7adVf/VX8L3vwaOPXumWXI6VMX9Nns97KjOSzWYJ+v5l\ngQhAVzrNU2Njr+u5kE6n2bFjPRMTR8jlDjI3t0Am08Xg4E7q9SzJpMbS0jCz8xYhYWJ5ORQcFEI0\naBAUAoSBoQo8v0Rn55qVVG8Kx5llcvIYvu+iaR5CVPB9g2YzgK67aFoAz7NQlCCeBy3mRCdSWkSj\nJprWw9JSE0V5BUVpJ53uIRx26OjoxDRddu++henpaVS1RcsdGtpIuVzk5Mln6O29lsnJ5+nri/Cp\nT/0x2WyW++9/hMHBzUSjSWy7yRNPnKVarXHXXXdc1ifXXruV6ek5jhw5RKt2xSYadbj33nsuS+lv\n376NzZs3kc1mV/Q90m/5uL4dSCaTfOFLX2LD5mv41rf+mpGRItPTz6CqZa69thcpNzA/HyAUsiiX\nfWzbx7ZnSSY34vseudw5fH8IXV8iGGwg5TKuO0EyGcU0e9E0iapCOrWahekLaEoGRdRp2jGkbEOg\n4Mtxms0sFS2JL5qElQCmp+LLLIg6vh+nVssBFXy/gpRJFMWjo2OQUKiP5eXzSAmx2BCuu0QwWKUu\nPVwsqvY4IgiZ5CAbB9fhe+M8/N/+G0OJBKFfOLIcamvj7EsvXbFg5K1AX18f993X96bu2bRpI088\ncRjPS+E485w9O08wmEZVizSbowTVDF3JAcbqr+BLF98JIAUEApJIdAfl8jmQM3QaQSr1RWpSwaKB\nR5ggKmpgPY7v0bBnUWmgqgbJ0FokJg1LRdefZ901Q1QqC8zMPE61XKWtLcHGm7czen6C6ewpFnI5\nJDV8PJLJTmx7EjDw/SkUJYKUHtXqKLXaBUrlOpYFmuoxUw7ix7rplQahUIhkMskf/EGrHqxQKJLJ\npBkeHn7XK+u+k/jmN+Hee6/+QATgQx+C3/99KBbhHeQO/Eq4qr+JlmUxPj6OZVl0dXURDAZp+v6r\ngo5as0koGn1dQZ+JiQkunHgRvTxNPJqkqAvS6R6WJg9RLU7Qv3aAvr5eHKdCITuF0GP4joFNHY0E\nQgaoyRplu4JZy5LNZkgkdgPzuG4MVZX4voqUEwixFUVx8H0Xz9MRooFhrMdxSrTqRgZQ1WE8bwzb\nFrS1DaPrLq47TiJh0NbWSTIZp9EYY/v2nZhmkI0bu6nXz3L27Ay6HiSVgj/5k4+xadN6IpEI3d3d\nCCH4/vd/RCKxjmi0xTAwjAADA1s5cuQg+/ffeBmlT1EU7r77Tq6/fpmFhQUCgQCDg4PovxDotd7H\nuGqpozfddANbt25hdHSUXC5Hf38/ExPTHDq0xObN61hamueJJx5DUQaYmytRKs0SCHTR0dGJYRRI\npUw8L86WLXsYHt7Ck08+SqNR4YYbduO6kuMvXSQRjrK0eAgTgyQ6VUpYMg7EkETw/HYUM4LUPCKe\npI0uKkaDgtWg7o0gUYAUnmfieS653AUMvRsNjaX5n9LWuQchogjR8otRRAK7cZ5IuEBHai25cp32\nhEJCUSgUi6/ZD/67aIf6TiEcDnPDDdfw1a/eT60WQFEqlMsjDA/HKRVMmkWDhdwUifAOGtZZ6m4O\nXetFOKPQOEhSeLhKHemaxI11hGyTktRQEQgxj+NVUIQBVAiow0ilStObxvNBiDqGqnL7rTtJJ5Oc\nePJJFmWr7iR7apRoJIOTCtFhDABhbLuKZamEwxLDUIhEVqFpJqXSOPl8mULBxvO6EcLBcRbJ14r8\nxs0HqNUKvPDCUe688zZ0Xf9Hs0W/rnBd+K//Ff7u7650S94axGJw4AD86EfwW791pVvzxnDVBiMz\nMzN8+9sPU6+3VEt9/1l27x4mNTTE2MwMq1cWR8/3OTc/z467737N95mfn+fh//Jf2JVMYMQWCIUT\n5M4dZHb2DBvb1iJEnXarwvjSLKYZpbN9E80KlIuLRJU0dW+WBsvorMcXgnAYJiaK1GqHcJwA0IHr\neivCZRZSTiKESUu8qoGud+C6TYQYRlWbSOmgaQqaFsL3a3jeFLXaJKaZw7Yl58+fJxZL0dvbx6FD\nz6JpNlu2tKMoKpXKItVqg5tu2s6dd37gVZXx09OL9PSsv+xaSwckcslL5hfR1tZGW1vbWzBi717E\nYjG2b99+6fnBg8eIRjMUi0WefPJ5JierNBrP0mzOoaoe4bBAyiDh8AChUILJyWe5eHGOjg6NLVti\nrFmTZOvWa2g0Ghx98TBW4TyD3gI6CSBBg1Eu4tLgAELkcMQcMSWDqSVo+McJmzF0ox1h51BI4xED\nNqEoLTuAZm2ExdlJksFBNEWhlJ1F0ZO0t+/F8xQK+SaGriFlhkptjqDhs74/RSJicr5QoGFZBH/u\nuzGRzbLpgx985zv+TaBWq3H48IucOHEeXdfYu/catm/f9ivt7JvNJs88c4Lbb/8dfN9DSkkut8Cj\nj/6AQiGF4TvkKyWioSLhYDuON4HmHqdP0Yngoioadddjwa8QMXWEEMQxqEmbhgyiewsoSpmAZhEw\nSqhmHyGzB03VaTqLVKtz/N3jz2MuL2I2LfLlPOuTbayOJzk+fo54cjXBVBe2p+O6PkL0YduL9Pam\nGRgY5Nix41QqBRqNFLq+a8WtOY+i7GB5eYpjxw6xe/fNTExMvoUj8d7E449Dby9sfv0yuqsOH/tY\n66jmaglG3t2mE68Dx3H4zncexjDW0t+/jf7+TfT3X8fhwzMMrluP3dPDoclJTkxPc2hmhqGbbnrd\nFPSLBw/Sp+skIxEGBjLksudJeyWitSUWyiOYyRjJcIJuzaJWydE7uAVVESiKjiJMNIIowsTUJeGA\n2jKrKpRwXZ1wOEMo1AEk8TwVKYv4fhJwEWIMRRFEIoP4fhFVHUFRLDStiBAnUBQNx5HMzp7C88qY\n5nqazQia5lKvw+KiIBDoQdfbeeaZMcLhDezY8RF27vwEFy44PPro46/6rO3tSarVy3fGLd+N2hU7\nN343orMzTaGwzAMP/HdGRxcIhXqJxVYhRA+BwBBSZqnXHebmFmk2x3jf+/axYcON6Hqdj370dkql\nMR588Ad897s/opDP0ql4ZMIRDNVEpUpcDdJJiHA4gGGGwYhSsissVSsUhcucX8fFo+nmkQTQ6UNV\nFUDDcQCC4PZRbxRIGlEUqZIvLJHNHsayRhDKBTy/gJRRsqWzrO1z2blumKphcOdv/iZHFxcZmZ1l\nanGRlyYmCKxaxbYd715Pkkajwf33f5ennppG09bjuoM89NAJHnzw4V+65sC2bZ566inGxuYplbIE\nAmGklBw/foqOjv1oRhvRyADpWA+6JuhvTxMNZehUTXoT/SSTCdau3UMq0k2aADl7AYcGPk0QKi4h\nhNKgqy1NJGxR9ywS4QE0VaNUL5ArLuCLdi6Oz6D5SbKeSaHu88JClb8eW+TlimQpV2Fu4hAbN6bo\n7c1Qrx/HtpcZHAxj2yWEWEZVG0AY1y1j2+MIUSceH0RVBzh//mWq1RLt7VcRpeIK4f77W6Z47yXc\ndRccPAhLS1e6JW8MV2VmZGpqikpFZ2DgH+oTFEUhk1nF6dOjfOlLv8PCwgL1ep1MJnPJ9vy1MDcx\nQe3CRU7kGwhhtoIG4RAM6Cz7CZwllfMLJ1nTFyDRsDEDFr4mEaqD55eRAixMDC2K0EykzKNpJori\n4/s2zWYTz3NpiRfFEELi+1Xi8VWoagEhDgI5dH0t4XAviuJRr1soSoV6fRzP0/C8JNVqB4qiUyzW\naGvz6e7uY9++vRw8eJRkcifj4xdoa+tFUVT6+jZy4sQhbr31cmnnAwf28J3vPIVpbsM0g/i+x8zM\nOTZv7r8kdjQ7O8tPf/oC09NLdHamuOmm3QxezTy3XwI7d17L1772h4yNFQmHN1IoNCiXTxAOdxGN\nDtFonGbjxg1YVp1QyGZ5eZlKReXUqWWeffbbRKNhenrizMycxq2PIbw6aBGkdPAJoAqdmG6QpYmi\n2/i+iWIGcGwVTQ/iiSK5ah6fPBpt6EYQV4RWmFU1BCF8USVkSLKVKsHIGoKmJBTSsO0KkUiNUGaA\nenmW7Wti3LRlNaeXlth2663sO3CAjZs2cfb0aRq1GjevWcOaNWveUIbB8zzGx8eZmZkjGo2wbt3a\ndySIPXXqNEtLGgMD/0DTDYe3cfr0YW64YZr+/v439X65XI5vfesHjI9XGR/3mJ8/im0/iud5FIs+\nnZ0RwmGJosVxypP4boRyrUTDmqNHCWJZdUwzzNLSIq7bxFQiKEaSqr6eemMZHxVFuITTBjfe8UFq\ntRyHnz/LYuE0lgWqouBTo+mGqOd8jlVLVJsQUXbguzV818MkQ9Uu0d6+mtnZLPv3X8/OnVs5ffrv\nSadzHD58lJ07r8OyXObmTDQtQLPpoutJTDOKbQt836XRmGLv3nve6iF5T2FqqmUu9+1vX+mWvLWI\nRuHuu+E734E/+qMr3Zp/HFfSm+YLwM/oLf+PlPJ7b/Rex3EQ4tW1C7pu0GhYCCHoeoMOR3PLeXLT\nJdb0tc5SVTXMmfIIdijFddtuwNRDKEKwUDxHT1eW3btX0ZZWGT1uU6nqTC9XMNQIiXgXNbeIakQo\nFl00bRTLKuM4JkLUV5gyVaTMIoTE8xps3LiP/n6VXO4sZ84IgsEOSqUi7e2baTSmgBlsO4Gu78L3\nfQwjiq4HKBaPU6nMYVkWjYZFPD5MNnt0pZ5gHsexqNVsqtXqZcHI5s2bueeeBo8//jy2rQI227ev\n5o47Wqr8Y2NjfPObjxAKDRGLbWZ6usBf/MXD3Hvv+9m0aeOrO+89Cs/zaDbBMDoRQuD7Dpq2GkXx\nkNLHdRWSySFUVeHkyf/O4OBeMpkeyuWXCAZXoSg+lpUnHu9AkSEC9TniQYOGPQpODcdzqYoqTfsl\notFNVKs27e1dVKtnseoqmrYalCKGHkF6NpY3RiB4E45jI3BQaBAzLUxNw/M7MNUoulqhv3+IoaH1\nzM4eYcuWYWq1Nq7b2U2sv5+911xzadFub2+n/ZZb3lSfWJbFd77zQ8bGKuh6EtdtYBjP8dnPfpiB\ngYG3Yxgu4fz5CWKxzsuuCSFQlCQzM7NvOhh5+OG/o9nsZNOmbiYmnmB2dpp6PYKmSYRIMz4+xbp1\nUTKZDnRziIvjJ7BUk1RbgMbSPGnVQNfbVkTNEkh1DqkZoGbQzQiue55gcIFrrtnD7t23cvbs8+zZ\n6zM/b9JsaiwsnGNpSeC5JhKVitWBItK46CgKaEodRS0TCPZgyTqRyBrOnDnF6tV93HXXPo4dG8f3\nVzE3Z6AoATwvSySykVCog0plhmZzEds+x+rVq/jUp26hr+/NFfb+uuHP/xzuu6+1eL/X8Nu/DV/6\nEvyLf/Hu96q5kpmRx6WUXxdCaMBh4A0HI61iyRKu66wIcbWQzc5w881r3nADHMehYmuUA1FqVp2w\nGaLZrNHUIiy4BruCMQy9JcldtcNcs7MTKacZXt2Da9V45fQxFH0cVUlRsS+S6ejGtlNo2hTBYBeq\n2kYul8f3s7T8Jdagqu3oehIhCkxNvcz27Zv52tfu5xvf+Et+9KMXaDYVPO8UqZSDEFFKpR6ECOB5\n5RWKrYeq9lEszhGNRonHw1QqWXQdnnzyEep1DSkNKpUzPP30EJ/4xOXsl927d7Ft27UUi0VCodAl\nvxgpJX/zN0+TSGwgHs8AYJpBQqEojz76FOvXr7sqhbF+GZw9e57e3k1MTIzh+8soioOi2AjRS72+\nSCxmYtslpATL8ojFumk2awjhEQzGCYejTEycpVyuk0jtZrE+D+UFTHMVimJT9aZphruIa0FqtZNA\nCMsSaJqOZ7RjGklMz8RzFTyh0XQnUNVzqKqGRwUhZoloXbiuTSScRALhqEs47FEozOI4knp9hPvu\nu4sDB/a9JX1y6NBhxsZsBgd3XbpWqRT43vce4U/+5PfeVlZGLBZmYqL+Gq84hELB17j++iiVSly8\nmKWvr+Us3dlpMjKiYJr9VKtjQJZAIMn4+Biq6rNnzzquvbaN667r4dFHf8r5Y02SZjsJM0Gz2aTp\nOSzZLh29KQqFYzQaFTQtgaZ1kcvlWVqaJhj0WbUqwcxMHgDfb0NR+tC0ZTxvDa4XRBVgexqa4mKQ\nRgQqRKJxKm4BKRssL1/gt3/7Jg4fPkdn524uXnyFZHKAcLid5eWvU60eRFF6UBQbeIVbbunj29/+\n/wiFQr9y/7+X0Wy2jmiee+5Kt+TtwU03gWXBiy/C7ne5afeVNMr7WVWVR8sB7A0jFotx6607+du/\nPUIsNoRpBikU5kkkquzZ86E3/D6O4xAOJ0nesJWRU88iCgssVXLUulajOVHGczmihkETiHb3smv3\nBvbvv44jR47T1TXI+i0Kx4/2YwbiVKoVpqcX0HWV/v40S0vTOI6GlC3nXeghEFiDprn4fpFYLERH\nRze33fY+Ojo6+Df/5o+54Yan+dM//RpCRAmF2ikUplEUD89roOstCy1FsfH9Kp2d7RiGwZo1/Tz+\n+EOEQgrx+B5isQTF4hw7d97BqVN5hoZe4rrrLq+X0XX9VUWpjUaDxcUK/f2Zy66HQlFyOZ9isXjV\n0HZ/Vdi2g21X8LwmjtOHaSaBCYrFU8TjBu9//0cZHT1PLlcnkYhQqeTwvCJbtqxlaqqOEAlMM4Ci\n1BAiQEGEyVlZQmSxnCZ1NUYkuBadEsFgiXj8ZpLJNczPHyQY3EqzOYMeiKD4NVYPrCOXUwiFLGZn\nLxAKxYlEEjQLDVQfSvYSvoA779jP9u3Xks1mmZkp8uUv38W2bdvesj45cuQMnZ2XV/dFo0mmpjRm\nZ2ff1uzI9u1bOHLkYWy785KdQbVaxDDKrFnzxjcfAK7rIoRyiVnnOA4bNlxLsynJZoN4nkWlIlHV\nPsrlIC++eI716wV79/4GIyMWicRaTrzwHDPVaTzbZtGvorT1k8mswnWXMM0eyuU8kKVQCPP1r/+f\nfPzj+9mzZysPPfSfmZ938P1+VFXFMOLU601ULYOUy3iKiSdtdN1EqgoyJLj1xvfT1zdMNJqgv7+X\ngwfn6OnpJJVqaY3E4xn27Pk4S0sHKZcnWbUqzWc+8wXuueeeKyo6eLXggQdaiqtr117plrw9EKLl\ns3P//f8UjLwR/B7w8Ju9ad++m+ju7uSFF05SrRbYsWOQnTu3E4lE/vGbVxAMBmlvj2JZEfb+L/fS\naFSpVos888xBeiPtbN91DY7jEIlEyeVeYevW9XR1dfHBD7bz0ktHefLJIuXqWTb2r+G66w9w8OBL\nSNmBZeXo7jY4c2YEVU0jpYqmteP7DTwvgOcV6e9PsHfvNjyvNSnW63VefPEVOjp2UKvFSSQ66O62\nOX36BXQ9gKaFsaxZ0mkFRakzPDzI9PRLKEqDT396J488cgrbLgJVtm4dYGhoiGazyqFDx18VjLwW\ndF1H0+Srsk2+7yOlS+DnRLLe62hvTzE3N8eOHbcxMjJBsVhEVUNomsHwsE48Llm3LkI0GuPUqXGS\nSYtNm3ZhmiaLi8+ztDRCb283uq5x7tzz1OsF9MBeil4IdFCVIqlUN7Y9gWEMkUolaDbrqKqGEBJN\niyPELNdfv4N4PM758yNs376OgYEDzM259PVtY3l5ltOnj3Du3FmuvfYAe/fuXmGEVNmxo5+tW7e+\n6nNZlsULL7zIkSMvI6Vk+/YNXHfd7je0e/Y8D0V5dWZMCAXf99+Sfn899Pf3c/fd1/Hoo88hZRTw\nCQSa3Hffh960E3QqlSKVMimXc8RiaQIBk3LZIxQy6enJ4DhhenujTE4eJR6PcM01B3DdOZrNJopS\nY+vWGxkaWs/ExAiu6xAYmSCTidJoNFDVOKZpY5pLJBI9pNO9KEqG6WmLF198lK6uHThOFVXtJ5ud\nIRSKYlkTSOkgpUUg4BCLpZHSJR5vZ3AoSiaT4ejRxxgc7OR731tiacmnv/8adu26lqNHT5HNjmHb\nDTIZg//4H7/CTTfd8LoSBv+EyyEl/Kf/BP/u313plry9+PznYcMG+OpX393y8G97MCKE6AAe+IXL\n81LKTwsh9gAfAD78Wvd+5StfufR4//797N+//7LX16wU3/0KbePOOw/wzW8+gm0PEYulURSVeLyK\novicO9dSCFSUCnv29LNu3ToA/sf/eISjRxfp6NjM+vUhjh8/x8TEGAMDGzh9+iyJhI+mxenr20Es\nlmZ29gxSVhAigaKUGRoaYt++PUCegYFWbcvp02colYIcOHAbr7xynosXJ8lkBujpOUOz+QpSdqLr\nGp2dKh/72J184hN347oumUyGxcVFlpdNurq2oqoqitKajHTdpFJpvqG+0HWdXbs2cujQKwwMbLl0\nfW7uAlu2DL7pSf/djkKhwMmTp8nlSvT3d7F58yaCwVbKPxAI0NXVR62WZ+3afmzbxrKK9PevYtUq\nl9/6rdtIJpM0m00OHz7M3//9UYrFBRKJNKtWxRkZeZ5Uqh/TNJicnKOtbRfLy0UsK4quQzrdDywj\nZYNm02d5eZyWsJxKpXIW3w/T16dz883XUSot8v+3997hcV3Xoe9vTy8YAIM2g95JohEEexObSIqk\niiVZvVuUYl/LjuPY1765L7kvyed745c4fknudfIiyZIVx+q9kjIlUWITKRIkQYAAQfQ+AGYGmMH0\nct4fA4Gk2EmAA5Ln933zcXgws/c6Z+1zZu29V1mwYD1PPvkQKpWK2tqDfPzxLpTKKPPmFXHvvUtx\nODx0dn6BQiFRVVXEpk23njYrjkQi4z4fQSyWMoQQfPZZB01NbTzxxIPnLZBWUzOL3bs7yM09ERru\n93tQq/3nrPs0WSxatIDKynJ6enpQKpXk5+efMefN+RBCcOedN/H88+/gdmdgNps5enQfZnM+qalm\nRke1qFQKiouTWL9+E2q1lv7+CAMDw6xbt5APPjiA2VxCZeVCnM4BEhP9jI3Z+fzzQ4TDBVgsaWRl\nVZCTE1tFsttjeWG02gry8xPp6/sMIUCpNOBydVNYWIrfP0g4rCE9XaDRBHC7W7FaE7Fa0/jqq62U\nli6lpKSakRE79fVvotfvo7JyEcuXL8LtdtPWVst99z3BDTcsB2Jbrm1tbTQ0NKNQCCoqZk44oQ8O\nDhIMBrFYLFdd+v/JZs8eGB2FDRviLcnUYrXGImueeQZ+/vN4S3N24pYOXgiRTcxIuU2SJOcZ/n7R\n6eAvlZ6eHrZv/5Kurn5yciz4fC5qa7tpbR2gr28YjUaipMTC2rWLWbVqEb///R+xWOZit9sJh8P4\n/X4aGw+RnR1Gq1Xh8+lpa/MyPOzEZhPo9am43UeJRJIxm9MpLbVQWhx3rgEAACAASURBVKpj5kw9\n3/veI2i1Wl599R1aW5WYzRaEEITDEXw+H1u3foBe7yEpyUokEouYqKhI4wc/eHziYezz+fjlL/+d\njIyFqNUnflT6+tqoqtLx7W9f2NZVIBDgtdfepbGxH4XCRDTqobDQzP33335FjJGpSg9ts9no7e1F\nrVZTXFyMzWbjhRfeJRJJQ6834fU6SErysnnzfaSkpNDW1sazz25Fp8uju7sfgJwcK1qtIC1thDvv\n3Mhzz73MJ58cxO2OEo2OkJysYu7cOcydW0FOjhWj0YjRaORXv3oBtzuDnTu34PWmkJFRhUZjoKvr\nE3Q6B0plAZmZ5YTDLpzOXvR6H6GQmxUrbiA1NYGsLC0PPngnycnJSJKE3+9HqVTi9/vR6XRoNBok\nSWJsbAy1Wn3WFazm5mZ+97tPT/H5AOjoqOXeexeecSXlZNxuN7/97csMDioxmTIIBDyEQv3cf/86\nKisrLks/k6H3kZEROjo6ACgoKDhnOYJIJMLg4CD19Y309w8zMjJMd7eLvj4vXV2jpKUZWLRo+URR\nya6uo2zaVMrSpYtpbGxk165anE43ubnpNDa2IkQ+IyNO6utt+HxqRkeHqa5eCsDw8FdEIm6Sk5dg\nMLhJTJTYsmU3Pp+ZUMiNwRDAaBzmttuWUlhYSm6uhdmzK0hPT+cPf3iNjg416enZqNWxDMvNzc3s\n2fMeK1duJCEhmbGxAYqK9Dz88N1otVokSeLttz9g374ODIYsJEnC5+ujujqT4WEX/f0eFAo1anWQ\nW265gblzJ28r72KJdzr422+HdevgqafiJsIVo7Y2dr6trXAJNvykMV3Twf8VkAG8Ob6suFGSpAub\nxk8i4XCY1uZm7O2NaP1+bO0O2gYksrIW0tnZQHX1GoRQ4HQe4vhxH21tL+F2J1NXt4twWIcQCiTJ\nS1ZWHuXlaqqqZvDhh5/gch0lHDai16uRJCNqdQGBQAs9PfV4PBGKi9eyfv0TEzNSpTLK3r3bCQYV\nqNVKCguL0evNeL0hFi5cRn7+iRlpR0ctx44do3I8Q49er2fDhsW8884+kpKK0OsTcDoH0GiGWLXq\ngQu+FlqtloceupuBgYGJJGhfZ2+9GpEkifff38KePccRIhlJCqNW/xG/30tGxmJMJjOBgI/e3i4+\n/3w/H330KTNm5FNZOYtAoJeEhBwWLIg9rCORMB0dX3HzzSt5/vlX+fTTXjSaRRQUpBAK+bDbD1Jb\n28jQkAf/mB9HzzF06jBeRQKLlj9CSsrd7Np1gEDgGG63B4Wil7lzH6et7UsikT5ycytJTDSSmupm\n+fJcVq1aSmJiIrm5uQghaGpqYsuWHQwPj6HVKrjhhhpuuGEZELvBzxdi29HRg1abdtpxozGDlpau\n8xojJpOJ7373IerrG2hr6yE5OY05c1ZhsVguUTuTx5df7uWDD/YQiSQjBAjxGbfeuoxFi07dJA8E\nAnzx6afU791LNBwms7CQGzdsIDs7G7fbTV1dHX/4w0eUlq7BaIylA/B63SiVDsrLY/dfWVnZRBbT\n3bu/pL5+jLy8EhITRzlyZD+DgyGcTj/JyXUkJEBhYSY2mwKfbxSDAfbvb8fnUxAMtuHxtGEwWDEY\nUsjISOexx+6dWK2IRCJs3/4ldruWaFRBQoKOyspqZsyYQSg0j6KiMCZThLKyRZSVlU1MTNra2ti3\nr4OCgsUn+cVk8cwzTzN37jJKS2NGkt/v5fXXd5CamjLl0VDTkaNHYysjL74Yb0muDHPnxioRv/EG\n3HdfvKU5M/F0YP1evPo+mS3vvkv/V18xPzsbnUbD53u+ZLB1lKEhDQZDFkpl7CZXKtPxev309HTS\n3HyIoqJ1pKRYx42RKI2Ne7DZbNjtBrKzV2E2B7DZWlAo7AQCfvx+CUlSkJiYh9Wag0o1k5df/pin\nnkpFpVKxf38zLpcas7kCtVpDc3M7IyN7MBj0ZGYWnCKzTpdCT8/AhDECsGTJYtLSUvnyy4M4nTYW\nLswhKSmHd9/9I+FwmOrqmVRXzz7vcjyA1WrFarWe93PTnYaGBnbtaqOgYPGEv0N/fwe7dr3JggUZ\n2GwDtLU1olYX4PPl0ts7ikKRSldXPXl5Fuz2rYyNlaNQaAEX69fXoNfraWsbAVIxmWIbsGq1nlBI\nRU9PiLDXxgxdiIrMMgL+MZoHm/h8y8ssvfFuLJZMNBor/f21aLXgcHSh0ympqrLg8fRgMnkpLNTx\n3e9+55QtiJaWFn73uy2kp1eSlxerKbR1awNjY15uueXC1phNJiOhUO9px4NBH0lJF2ZQ6PV6FiyY\nz4IF8y/o81eC/v5+3n13D9nZJ1YFg0E/7723h4KC/AljSZIk3nr5ZfzNzSzOykKtUtFvs/H6M8/w\nwFNPkZ6ezrJly0hMTOLNN7dhtycghIQQLmpqitm3rxarNY2ZM2dO3EPt7b0kJKQRDPrZu3c7SmUe\nmZk+nM5aOjp2sHLlcubMWcaxY7Xs3r2XQCCbkRE9Fks1PT3bSU9fg9GYQkpKOm+/3Uhq6ps8/PC9\nCCH44IOt9PZGSUyswGBIwu8fZc+er1i+XEVCgpabb95wxvQFDQ3NGAynTiAcjhGCwTQikRO+PTqd\nAaMxnz17Dky5MWK329m79wCdnQNYLCksWlRzRbb2zsU//AP88IdwPQUb/fzn8N/+G9xzD0xH3+bp\n4MA65TgcDoaGhjAYDOTk5EzcqA6Hg+MHDrA8P39ijz3VlEiu2seBznqyi4sm2hgbG+TQoW7cbjUj\nIy6OH28mLc1JYeEsIIrb3YFen0NeXiwfR1XVHLq7vQwODpKSkkoo1Elq6hySkrJRqbwMDnpJT89m\nz579aDRqtNoC1q6t4eDBelyuKKAmFBqjoKB4IoLga0IhD2bz6bkDvvahkSSJN954lx072jCbC1Aq\nlbz11mHq6o7x6KP3XtJe+9XIV1/VYzYXnuJ4qVar6esb4MCBTnQ6Aw5HMpGIj3BYkJRkwWKZhdOp\nIDk5EyESuO22uSQlJRGJRDAYDAwPDxMKKYFTr+HoqBOFIpngYCvFNfMRQqBJMJMXyMCaEKWp4T0K\nCkpoaNhCKORgeFgiEEghMzOXnh4HZWVFmM1pVFRoTtPPtm27SUmZNVFTyO8PodVa2bbtK1asWHrG\nNP7fpLx8Flu27MHjcWE0Jo634yESGWD27DWXeaXjR0NDE2q19ZTtSY1Gh1JpoaGhccIY6e3tZbi5\nmSUn/fBmpaXh6+9n/5dfsvHW2FZmVVUlpaUldHd343Q6+eMf97B/vwONJkww2E5Kym42b76P5ORk\nUlOT2LPnGDabjYEBifz82ORArU5BiFE6O9vJzNxBUpKCm28u4YMP6ohEtNjth9Hrs8nIqCYc9uF2\nj5GcnEdDwwB9fbGQ/X37jjF//ioOHuxCrzeh0yURjRazb9/n3HRT1VknCwrF6VsfwWAQUBKL6jtB\nLFKu63JVcE76+/t55pnXiEYtJCZmU18/woEDr/Hww/Fz1OjuhnfeiW1ZXE9s2gR//dfw1lvw7W/H\nW5rTuaaNkUgkwtYPPuDY3r0kCYFfktBlZXHngw+SnJyMw+EgUREL87Pb7YyOjCJJETQiiIYIXq+L\nxMQ0wmE/PT2HMZlqGB3tR6Mpxu3uwO3uIBodICPDSGKigdTUEwZCamoK6ekp+P0FpKdr0WrnYDKV\n4vWOkpKSSCAwRmJiGl1d7SgUChITc0lISObGG1cwNjaGENDXl4jH08LY2AgJCbE9cJfLjkrlpLz8\n7AWvuru7qa3tprDwxFJtYmIqbW21NDY2Mnv27Km98NOEQCCISnWqk57TOUQ0mopabSUSGUGny8Tj\n8TM2NkZOjh4hQKOJjQ2r1UJfXz9ffFGLyyUQQonH04fb7UaSTl1N8PlG0WgSydDpCIXDuN0uIhGJ\nSCTK8opyMoXg/j95DIfDwf/5P2/S3h5Fq83BZDITjYY5cmQfZWX9LFjw+Gnn0dMzSG5uOaFQiP17\n9zJms2FQqbCNHud3Tz/NE9///nkjYpKSknjooZt55ZWPsNu1gECt9nLffevIyMi47GsdL/z+wMTq\n5ckolWoCgdDE/+12O6YzbDemJybSPu5r8jU6nY7S0lL+/d//AyggPz9n4m+9va387ncvUV5ewvNP\nP81Xe3vw+Y1EKcY2sJ8ZM4swmWDt2ptpbj7AunWlrF69mmPHjjE6aubIkT4GB/UolWUoFEoUChU+\nn4ecnAyUSjPDw8Pj4ccJFBQUMjLipr39GEplApIURJIGuf/+28+6dVpZOYtdu94jGs2dMML1eh2R\nSC8Wy6k5Z0ZGBlmyJOdMzUwaH330GSpVAWlpsZWQhIRkPJ4U3nnnkynt91z84z/GkoGZzXETIS4I\nAf/jf8Bf/iXcccf0Wx255owRr9eLQqFAp9Px1b59dO/ezfKCgomVj46BAd595RUe+e53MZlMjIXD\n7NtXS1+fCyEMSFKYQUc/QpfE6Ggjbncy0egAkmTA5QqQk1OI261Ar5+Pw1FPONzDTTf9lPff/z1p\naYkMDAwwNDRER0c/Tqcdh6MHozERSfLi94+hUoXQag1YramMjY0wa1Yq0WiU48fdJCQko1AIEhNj\n+/86Hdxyyyb27Wukqys2mJKSBI8/fuc5fQQ6O7tQKlNPe2CZTJkcOdKMWq2mo6MHk8lARUU55mv0\nrqyqKuHDD5snVhQg5qyck5OPRuPC5XLj8wUIBhXjdYJiTrrhsIeEhEQCAR8ff1xLScl6srOTGBjo\nwOv10t1di0rlpr8/RFpaIT6fA7XajU5nJKRU0tjYQjSqGU+K1kGKRUvO6pVkZmZy6FA9FsssSkrS\n2b+/DofDgd8fwu12YbWmn7H6cUZGMu3tLezedZDRvmFMBi0WswGLUYnOZuPj99/n9nvuOe/1KC0t\n5ec/L6C7uxtJksjJybmgbbvpzMyZxeza9UckKX9ivMecNm2Ulp7wg/n6Xu/t7cXhGMVg0JGVlcmo\nx0PKGZJMjI6O0t3tJC/vhHNuKBSirW2Q9vY9qPgjzg43+Ql5dEt9hCJBRoZ7aIr2csutaxkZGcFo\n1JKXl4cQgoSEBLTaANXVlWzdupVweASVyojX68BoDFJRUUIw2EtCQgImkwmPx87evfux2UaRJAmV\nykNRUSZFRavPeb/m5+ezYsUsduzYi1qdMZ4xeJDVq0sZHe3DYDChVmsYHOxGpRpk0aKbJlEbpxIM\nBmlr6yc391QjyGhMxOGYsm7PSW9vLO17fX18+o83t9wSWx15+224c5pVCbhmjJGBgQE+ef99Bjs6\nkID88nI6jh9nfmbmKWGOBVYruzs7sdlsWCwWXAoVHU29lBdUoRQKAuEg4UCAwqIMVq9ZQn19E11d\nEp2dY5SUZGOx5DAwMIjN5kSvz8HtPk5Pz1EKCrTU1e2lqWkGXV2DqFRKkpLM5OUZGB1tIxQKkpmp\nJT09H6XSTXb2LPz+NpYsuYtoNMrhw2/h96eg08V+EAcHu0hLE6xYsYIVK1Zgs9kQQmCxWM6bzCgW\nYXF6Hjmfz8OuXftpaHCg06USDg/y8cdf8dBDm5hxlWf9ic0mxSlZYufOreHgwSY6O+tITs4iFArg\ndvcwa9ZsZs+eR2dnO7t3f4ZCkUN/vwOVSoXf7wIGSU+fQXv7bkymArRaA1988REjIwokSYt7KIrC\n/zlGg4FjvSqsBcU88sg6enudfPZRNyVKPSk6NcGgnaycQva22pjzSGzVLBqNolAoMJlMLFu2gC++\n2IPP50erTeXIERu/+c3zPPro3afUUyorK+DFF1/AM5JBdmopSFFa+45SnGWnumgZu+vq8Nx88wVF\nPKnVaoqKis77uauFoqIiKivTqa8/QHJyHiDhdHZRXZ1xynmmpaWxr7WLepsHS5KVaNRF7eFj6Ioy\n2fzoo6e1G8udcqoxf/x4K8PDUQyGdGxttRRZFyNFoggFjIR9eP0mhhxj1NcPAZ0olU3UVBnZ+sor\niEgEd38/XjHEggXl7NixH5/PQUKCxIYNq1EqQ6SlCQoKCohEIgwNddHVNUp29lwUCiWjo4McOLCd\ne+89t6udEIKNG9dRVVVGY2MzQkBFxSrS0tLYuXM3u3fX4vMFqago4sYb75vSiYhSqUSlUhCJhE/J\nXRQrznlReS4njV/8Ap54As5g818XCBG7Bj/+Mdx6a3wja77JNWGMjI6O8uqzz1KgUDAzNxdJkug4\nfpxPtmzBmZ2Nz+cjOTmZqrIycjMy0CoU+P1+BgcHae5ycNwn0Vq3B2tKGuqkdPIXb0Kh9LN48Xwe\neOAe+vv7ue++n5Camo4QCjIzrSQkGOjqqkOIMCMjx2hq6sfj0TI0dAghrKhU4HAc56abNpKaamH3\n7vfQagfQ6bykpKTidO7n5ptXEAgE2L59L05nD/X1h8nOLiIlJZG8vCTuvvuuiR/XM82Yz8bMmTP4\n4IPd+P2eCeMmHA7R2rofs9lCQcGJCq1ebzavvrqFn/2s4KrMOzA0NMTWrdtpaupCqRTMn1/GjTeu\nxGAwoNfr2bz5fg4dqqOhoRWjUUdNzZ1s396GRqNi5sxZZGSksGfPp4yNddLX14fdbkcZidLZ/AXz\n5hcTNebS3FyHy2UkOTmP3mMfUiyU6HSZaFQB0nMyIC+ZzZsfpq6ujs5OB7aeLpz+bjJSUxlOTCGz\nfCFHj7aSlXUEiyUVn+840Wgex44dZ2xMQ1ZWHnb7QWbPXsPIiJ+33vqQxx67f+Ich4ddlJeXs2/n\nHsZ8Q0CQ4qwkEvRKHC4XKiEIBALXXC6YC0GpVHLvvXdQWdnAwYNNAGzcuJSKiopTjPbdu/eRUbCS\nUUMv7UO9qITAo0ghU5N8xnsrOTkZqzUBp9OG2Rzbkmtv70Wp1KLXq+nwOBnw7QUBksJAVOXFNtJF\nIGyit/cwOTkJJOgT2f7Cizx06wa0Gg2Vyclsq69HnWNi06ZiBgYchEJadu58E7NZxaOP3kEoFKKl\npYWsrCqMxgDd3V8RCglCIRfp6WmMjZ0pLf6pjI2NsX//YWprjxGNRhkaGmH9+pWsXh17SZJ0RSLk\nlEolCxaUs2fPMfLyTjjb22ydFBaeHtk11bS1wWuvwbFjV7zracWGDfDrX8PTT0+vsOZrwhg5cvgw\nKYEA2eMFocT4w1kzNIRKpWJJXh4jPh9f7tyJb8ECvIpY1sh/+7eXGBhIIKfobgIBF7axVuaWVpFf\nUEFX10EikQgQKyxWU1NIQ8N+tNpcVCo9oZCd5GQPY2Na+vvDOJ0qLJZy2tubMZvVExEzdns/s2cv\nY9Giddx+ewX79x/h2DEber2F117bQXv7iyxdejOzZ99Ffv4wPT21rFlTyY03XrpTYXJyMvfcs47X\nX99GKJQICIQYITERSktjeSYikVgeE41GSyCgo6en56qbMY+OjvL0068QjWaTk7OCaDTCvn0t9Pa+\nxpNPPoRSqUSv17NkyaKJLLQx575t7Ny5GzADYWpqsrnnnhp+//wb5CpU5KdaSdbq6Wk5Tt2xV1Cl\nVZGScgMuVy9JARch1xBKnQmFIgmDMouu+m5+/Q//wm13bKKiYiH5mx7D5xsjEPAxMNDDtm3b2LXb\nRX39IGlpCahUPtra9lBf34XRmIvDcYiSEutEXovjx3cyOjqKQqEgFArR0zNIdfUygm4w+MdIMiai\nUWkZcLZhczpRmM3nrEx9raNSqaiurj5nePL+/UfJz5+HpnQ+Ho+LSCREQkIyvb219PX1nVZMTgjB\nHXfcxHPPvUl3twO9PgmHo4OkJD2asIfcqI9MhYRWaaDL1UVn0I/RcANqKUBGRg6RyChiZJAUvZFg\nIIBWoyHZZGJVeTkDCQnc8xc/4l//9Xd0d0eYP38tkiTx0ks7eOutbcycmYckJTN37kI0miM0NLSh\n1c7E6XTy9NOvUlRUQFFREU6nE7VafcqWbSgU4oUXXsVm05KZGcvGevx4N52dL/ODHzxKQkLCFQ3V\nX7NmBQMDb9La+iVCmAAf6ekK7rzzLp544oqJAcSiSX70I7hOqlqcFSHgV7+C9evhoYdgujw6rglj\nZLCnB/NJs8JwJEJ9QwNLc3MZ8vtxut0kGo1k+/28unUrVetu4de/fhqlsoTy8ipaWlyYzTkkJGRw\n7NhBsrOLUCrHJsLPlEoljz9+H88++w5ebwghJLRaE5991jLulJpJIKCmv98BBHG5YMaMLMLhAKFQ\nzIlOkrzU1zfR3S0oK1uHJEm0tLQBFXR0DJKVVUBKihWDYSV799axcuWKyyo+VllZQWFhAW1tbUSj\nUfLy8nj22ZcRQtDV1UV9fSuhEOPF3ez4/csvXQFxorb2EIGAmZycWNVWhUJBbm4ZHR37aG9vp6Sk\n5LTvfL2MPX/+nIlEaPn5+fz93/8rKeiZN2smivGHtVFnxNu4k8auOkymhQQ8w2jG7IiogtTUfHw+\nJxqNjtz0fJqP9Y0brw4ikTA6nZHGxoN89tkhQqF0Cgpm09sbIRj0YbEksWxZLu3tDRiNPiKRZMbG\n1LS0tJKbm0MwGOI///N1+vvdCKGkra0Ji0ViVvUc6nbvRu0PIfRKxrzDdI6l8O0HH7xuihheKpIU\nnfgR/jqaKHb87Em3srOz2bz527z99nscPVpPfn6IcCiRVFeA3PJ5NNcfJhw2o46ESYtq6QuGqKhe\nTG5uLnZ7C56uI4j8bCLhE1sSqYmJHOnupr6+AbtdS3l5NXZ7P7t370KILIaHRwmFfHR3H0GStBw/\nPoTVOhshFIyMhMnISOGf//l5MjMteL0gSRFmzszitttuIikpidbWVnp7wxQUnDDMrNYCurs91NUd\nYenSJVNwdc+OXq/n8ccfoKurC4fDgclkorCw8IqP1y1bYom//uM/rmi305bq6pj/yP/8n/D3fx9v\naWJMM3/aSyM1M5MR74nlyzGfD1U4jFavp2bpUhSZmXT5fLQ5RnEHdai0FdTX22lqshMMhjAagzgc\nvQQCAdzuIK2tn3PLLTecEqEwe3YVP/zhvSxalE1enpLMzDA6nZni4rWkpcW2OPT6UpTKVLzeLlyu\nETyeAaxWK729x8jJ0dPRMURWViylfCgUwOMJkpFRyOCgG++4/DqdEZ8vNuu/XIxGI1VVVVRXV2M2\nm5k3r4yGhq/Yv/84Ol0OZnMRBkMmg4ND7N9fd9n9XWk6OwcwmU5f7lUqkxgaGj7nd9PT05kzZw4V\nFRWEw2GGhkZJUWsnDBEAndZAfmYuJTlGnM5DRPAQUQdJS8smGo2gVkvo9XrcIT8JKfmMjnpYubKS\nrq59tLXVcejQUUKhWEIrq7WUtLQKhoclolENAwMuKitL6OwcZWQkEbfbwNGjw2zduo0jRw4yNJRA\nbu5ycnOXUli4in37dhAOjzF3xQqC5mSODLaRXJjMQz/+MZVVVec4UxmAefPKGRhoP+WYxzOKwRA5\nY74OAJfLxUsvvUdfn468vFXk5i7neMNuPEN9mFMs5M+YgV85SFgZxqzXkpWlJi8vtsKSkGDF4fMD\nfkwnhV473G5SrVba2nowmTKQJInDh/ej1ZaSlJSHwWAlLa2ApKQCPvnkPTQaM0Io8HiGUCrtWCy5\nHDjQg92eTG7uUnJzl9PWJnjhhdeIRCL099tQq0+f6hqNqXR2DkzeBb0IhBDk5+dTU1NDSUnJFTdE\nxsbgBz+I1aHRX1yR52uaX/wCnn8eGhriLUmMa8IYmT1nDnaVikFnLKu8Vq1m2OMhpNNRXFTEnHnz\nKKmsQpOYR2ZeGenpOaSkpGMyWejoGGLOnDKqqjJJSvKTkSGxefO3WLBg3mn9lJaW8uSTD/FXf/Vn\nLF++kMTEXKLRCCqVZnym0oNSmYTBEMLrrcfrrUWvH6WiwsB9932LcDg64cilVKpQKCQikRBCKAmP\nz56i0QgQmqiTMpksWbKI0dFmgkEnPp+dkZF2PJ4GVq/eRHOzDbvdPul9TiUZGWa83tONNknykpR0\n/twbX6PT6VCrFfiikVOOR6IRQlKIm26+idtuK2PBwlkEk5Owjw0QDDrJz89i1OvGodKQkmrBZDKw\nbt0aNm/eiNE4iNFoICsrkaKisokwS602g+FhOy6XB4/Hh1YbALwoFBJKpYTN1kQkYiQrq2RiJp+X\nV8TcuSvo7NyBx9OINSfEk09t5J9+848UFxdf+gW8jlixYinp6T46Ow8yNNRDd3cTTmcd99yz4awr\nkDt27GF01EReXiXJyekUFlay+Ibb8IedGI0eamoK+N5Tj7NoyTwqaspJTjUyOjpEMOgjEPDh1SjR\nW5L5eu3F4/fTNDzMojVrSE42EQh48Ps9uN1+9PqYI2kkEiQhwciNN64lGh1jZGQfTuc+DIZBli1b\nycBAD0plLhpNbKIkhCAzswibLUJ7eztmcxLhsOe0c/F6XaSnnz1F/rXM978PK1fG8mzInCAzE/72\nb+F734MprnV5QVwT2zRms5k7H3+cj99+m+auLiQgsaICo1aLcvxB09MzwHAogrWiGqVSRUlJKYcP\ndyBEKmNjYxQXF2IyKUhL015Q6XW9XkdubjqDg72kpuaRkZGLVqvn+PG9FBSo+NnPbmfmzBJSU1PR\narW0tLRgtw/Q0fE+s2bNIT09h+LiYurrGzAaEzAajUiSRE9PI3PnllxQJdWLRa/XU1paTGlpDsPD\nw+j1ieTkzCEhIZnu7lFcLhepV9GG6rx51ezZ8zJjY6kTeViGhnpISgqdcYvmbOh0OtasWcTvmtqx\nuexYElORkBga7iFqNrBq40YKCgpobGwkN1fBmy++jkGvpTXkRZWURvGsBYTD3ZSXxwrQlZSUsGHD\njbhchzh61HnKHn00Gsbnc5GVVcTYmII1axZy4MDn9PQcICEhgfLyGbS3n76qU1BQil6v54EHbh9f\nhZOneBdDQkIC3/3uwzQ1NdHe3ovZnEdV1UZSzlHG9PDhZjIy5hIM+untbWN4eAilUoFLl0xmrpXi\n8QRqSakG6rpcbNz0IAMDdoaGHCiVffzwz58kx5rK7v37UUsSwdta1QAAFcpJREFU6PUsv+suysvL\nSU1NZceOlwgEkpCkKJIUJRj0o1T6sFqtaDQqamrKSErKp6hozoQj+sjIftRq5WlRMEIYcblclJWV\nkZCwC7u9n9TU2IqP2+1EiEFqatZP0dWdvvzDP8S2Z/bujbck05PvfhdeeAGee44r7sPzTeJmjAgh\nHgE2A1rgaUmSnruc9nJycvjOU09NOP5ptVo+fOcddtbVYRSCw6NOojkzKZoRW/EoLCzH7XZx+HAt\ng4OlCDGI1arh3nvvvCAHr9LSEnJzd2AyJdHZ2QaoiET8VFaa+F//6+cTobLRaHS8ym8PJtNsmpqO\n0NGxjVmz8sjNLSIxsZa0tAh9fXVEox7Ky7PZtGnd5VyKc5KXl4XDkUxu7syJY9FolGjUfc7iYtMR\ni8XCww9v5K23ttHdLSFJEbKzE7nrrrsuOjJo48Z12O0OPnj9PY53dKMihMlq5pEfPjVRrXnOnDnM\nmTOHVauW8eKL7xMKGdDpDEAP999/0yk/bDNmlKLX76KwMI2WlnaMxnQUCgV2ez3LlmWxcOFcGho+\n4siR/bhcetLTlxKJhGhvb8Tt7j5NPpdrmJkzrde1o+rlotVqz+voejIajRqPZ5T9+/fg8RjQaMxE\nIj5sIQN7bDbs488J1YxillWZGB1tIynJiMkkUVY2l7vvvg2tVsvqdevw+XwkJiZOrMJYLBbuv389\nb765DbXaTXf3XtLSLCxdOgedTktv73FWr17IyIgbm62N1NRcIpEwweAgaWnpp40DSXJjNpvHfTTu\n4tVX36erqw0hlCQmKnjssduuqonG5SJJsYiR3/wGdu6E6zDQ7IJQKGJRNWvXxooGxrNMUTyr9qok\nSQoLIRTAPkmS5n/j75NStdfpdOJyuRgeHua11/ZQULBwYsnc7/fS2fk59967HqvVSk5OznlzeJzM\n4cN1vPHGJwQCJvz+IBqNhw0bFnDjjasmPtPS0sJvf7uVgoJFCCFwudy0trbR0rKb226bz223bcRk\nMuF0OklKSiItbWpD3lpbW3n22fdIT68iISGZUChAb+9R5s+3cuedt0x8zufzcfjwEZqa2jGZDMyb\nVzVRhnyquNQqnpFIhOHhYVQq1WU/cIeHh2lra0Oj0TBz5syzrkD4fD66u7sRQpCXl3fG5GGHD9fx\n+uufMDQUpadniGDQxtq11TzxxCN0d3fzs5/9gu5uPcXFy0lMjCWpGxrqwOnczqpVt1JYOBulUoXD\nMYDf38JTTz1Aenr6ZZ3fdCTe1VvPxs6du/jnf34dny8HlSoFh2MIr3cMnW6MVassPP743Wg0GjLH\ncxn19/czOjqK2Wy+4NpOwWCQpqYm3nprK4GACSESAA9ZWWoeeeRulEol+/fXUlfXjFarpbQ0m08/\nPYTBUEpKipVIJEx/fwtZWWH+5E8ennh+SZKE3W4nEomQnp5+Uc+1K8VU6f3YsVgNlrY2eO89yMub\n9C6uOX71q1ia+M8/h8uImzgv56raGzdjZEIAIfTAFkmSVn7j+KQYI18jSRIffvgxu3YdQ6FIASIo\nFE6+/e01VFdfenp0l8t1SsTKN42J997bwsGDHqzWglOO9/a2cMMNGaxdu/qS+75Umpqa+PDDL3A6\nvahUsHTpbFatumGiJorH4+HZZ19kaEhNYqI1tv/t7ea22xayZMniKZNruv4oXQ5fj49IJEJ+fj5m\ns5kXX3yDo0edHDrUQGurknAYzGY9GRmppKfrSUuD3NwITmeYaBRyclK55ZY15ORMberueDFd9R4K\nhbj33v9Cb28Kw8MhlMoE1GrIyDChVrfyL//yY2bNmnX+hi6ASCRCW1sbIyMjmM3mc0ac9PT08P77\nn9LTY0ehgJqaUtavX33V5ZmZbL339p7ILvrTn15/hfAuh2g05lMzZw788pdT18+5jJG4+owIIf4H\n8CTwl1egL26++Sbmzaums7MLlUpJSUnJRS17BwIBWlpacDicpKenUVxcTGJiInPmzDnrd1Qq5Rlv\nOEmKolTGZ7Yya9YsZs6cidfrRavVnubAt2/ffoaGdBNF/wBCISsffriHysqK85arlznBN8dHY2Mj\nR486KCxcQG+vHa02lXBYhd3eTlVVDkVFxXR317Fu3ULMZjPNzcfR6XRXZUK6qx21Wk1FxQyGh0cp\nKipEq9VgMiWiVCrp6Ginru7IBRsjNpuNtrZYNE9paclpkxalUklpaekFtZWTk8P3vvcIXq8XlUp1\n3Y+N0VH4u7+DZ56J+T00N19/dWcuF4UCfv97WLIESkri4z8y5caIEMICvPyNwwOSJN0vSdLfCiF+\nCXwihHhDkqSxkz/013/91xPvV61axapVqy5bHqvVesFLqCdjt9t5/vlXcTrVqFQmQqGjWK07eOyx\ne89ZNbWiYiZffPE2kUguSmXscofDIcLhIWbNWnWpp3HZCCHOOpM6dKiZtLRTH4xqtQZJSqKnp4ey\nsrMX6ZM5N0ePtpCQEHMszM8v5KuvjpGaWgXkEolECQa9qFQu+voGeOmlbSgUsW2ZaPRL1q2by6pV\nK+Io/fVHfn46kUj/RPVfAJ9vBLPZSE+P84La+OST7Xz66WEUirTxicmXbNq06LJzfkyFk/vVxu9/\nDz/7GWzcCIcPwzW6eHhFSE+Hjz6CFStiaeLPUCVhSplyY0SSJBtw2l6EEEIjSVIQCAGnF4LgVGMk\n3rz99hb8fiv5+Sc2IPv6Wtiy5RPuueeOs34vLy+PNWsq+eyzvahUsdlQJDLETTfNPWt+g3ij0ajx\n+0+vHSFJ4ctKxCYDWq2aSCQWepmdXczAQB89PQcZG5Ow240YDDZWr65m27Yj5OQsnggFj0RK+Pjj\nvZSUFF2z2zXTkUWL5vPmm7ux2+tQKpORJD9K5QizZ9eg04XO+/2uri4++eQIubmLJyYjoVCQDz7Y\nS3Fx0SlGjsyF4/fHcofs2RPzC5k///zfkTk/paXw6acx466pCf7mb+BKLbzF06vpL4QQnwG7gDck\nSXLHUZZzMjo6SlvbEOnpp6aMtloLOXKknUAgcM7vr1u3hh/+8G7Wrctj/fp8/vRP72PlyhumUuTL\nYtGiKoaGWk/ZXhobG0Gv95MfT3fra4CqqjL8/j4ikTAKhZL581eyePEc8vLc3HffPP78zx8lGhWo\n1ZZTiosplSo0GgtHjzbHUfrrjxkzZrB4cQVz5hQyY4aB6uocbrxxE5HIGIsWnd/XrL6+Ca02c8IQ\ngdgqo1JpoalJ1uWl0NUFN9wALlcsZFc2RCaXsrLYda2vh5oaeOUVCJ3f7r5s4jbNlSTpb4C/iVf/\nF0M0GkUIxWkhv0IokKSvK3yem8zMzCu+EuLz+SbCnC+Gmpo5tLV1c/jwlwhhRpKCaLVuHn741utq\nfzpWit6HRqOZtBWh/Px81q2bzbZte4BUIIpC4eTP/uzRiUR74XB4IuLrZBQK1UR5AZnJIxKJ4Pf7\nMRgMp93jWq2Whx/+Fr///XsoFCaEENhsh5k7N5c5c84fIhwKnVmXQigIheJTufZq5pNPYvVUfvpT\n+PM/j9VZkZl8LBZ4993Yts3f/V2soN6tt8LNN8dCgKciw4C85n4BJCcnk55uYHR0mKSkE45nw8O9\nFBVZp10CKpvNxocffkpraz9CQFVVIRs23HhO35aTUSqV3H33t1iypIfe3j4MBj3FxcVXnbf+5dDU\n1MRHH32B3e5BrRanRRxdDqtXr6SyspyOjg4UCgVFRUWnJLGaObOYzz//AEkqmPhxjBlGA8yateGy\n+5eJEYlE2LlzN198UUswKJGYqOWmm5Yze/ap6fULCwv5yU8209rais/nJzs7i+zs7AvKR1RWVsKX\nX25DknInPh+NRgmFhigtnbrItGuNaDSWwOyf/glefBFWX/kgxOsOIWIRNps2xVaj3n4bfvtb+M53\nYO7cE3+brGoUcQ/tPRuTHdp7uXR1dfHcc28RjWZgNCbj8ThQq+08+eQ9l+QQO1W4XC7+9/9+Acgj\nLS0bSYoyMNCO2ezi+99/bFJ+TKeK6RLi+XUulrS0SkwmM6FQgJ6eoyxcaOWOO245fwOXiSRJvPXW\n+3z1VTcJCdnj+Wm6mTcvm29/+7ZpmTPicoiX3rdu/YTPPmshJ6cSjUaHx+NicPAIDz20loqK8vM3\ncAFEo1Fee+1tDh2yYTJlI0kSbnc3ixcX8K1v3XxFK+hONy5U73Z7zJnS4YhtGXyjwLLMFcbrhe3b\n4cMPY6snBQXwl38ZWzE533Ce1nlGzsZ0M0YAHA4HtbWHGRiwk5OTQU1N9bTLiPnFFzv54x87yM09\nNeKlo+MADz98w7SOhJkuxsgzz/wnDkcqZnPGxLFoNEp3907+63/9zhXJVBuNRjl+/DiHDzcCMHv2\nLGbMmHHNGSIQH717vV5++cunycxccoo/h9vtRKns4M/+bPJiGyORCM3NzdTVHUOhEFRXl1FaWnpd\nGyJwfr1LUsz4+MlP4L77YvkvpvFc6rokHI7p6Be/gJSU2L/nWrU6lzFyzT3Ztm/fPmVtp6SksHbt\nanJy0li1asWUGyKXci49PYMYjacH2avVSdhsQ5PWz6VwKf1MhmwX20ZPzyBJSadmcj1+vBaFIgGn\n88LCOS9XFoVCwcyZM7nnntu5557bmTVr1oQhEo9rMtXtTEWb52pnZGQE0J9iiACYTGaGh0cmCldO\nhjxKpZKysjIslmTuvvtbzJgx47IMkemmu8keA34/vPoqLFoU81f4i7/Yzj/+4+UbItPpfK8VWVQq\nePDBmLPrqlXbefJJWLMGdu26+LZkY2Sa9nGp/VgsKfh8p1eyDYfHSEk584x+Op9PPG42iyUFt/tU\no+PYsQNIkveC/W4mS5bp3MZktjMVbZ6rHZPJhCT5xqtkn8DjcZGUZDwl++l0u1bXWjvhcCxHyL//\nOzzwQKya7L/+K/z3/w4HD8Lw8PSQczLbudZkUSpBrd5OY2PMOHnwQVi/Pubf477AONlrzhi53qmp\nmY0QQ4yOxiq/SpLE4GA3ycnBiYJvMudm9epF2O3H8Ptj+UCi0Qgu1xAVFTnXVbGxaxmTycT8+aV0\nddUTicRWQYJBPzZbAzfeuPi630K5UkhSrHbMfffFcoasWgWNjTGfhNtvj2UGlbl6UKth8+ZYFtyH\nH4Y//CGWlO5CkKNprjFSUlLYvPlO3nprK11dxwCJgoJUbr/97osO8b1eKSsr4667fHz88W6GhkCI\nEJmZeu644+Z4iyYziWzatB6V6lP27t2NJGnQaCJ861sLqak5e3kHmclFCDh+XK6qe62h0cSMkYcf\njhmcF8K0dmCNtwwyMjIyMjIyk8dVF00jIyMjIyMjc30g78jJyMjIyMjIxBXZGJEBQAixMN4yyFw4\nsr6uX2Tdy3zNtTQWrrltGiGETpIk/xXoRytJ0rkr5F18m/OBJUAyMALskSRp/yT3cSYDVABbJUla\nO4n9VAJhSZKaTjq2WJKkLy/w+wnExuekFFCcjHFxsTqfDH1Olr4uVx/jn68BRiRJahdCrAM0wEeS\nJJ2/ONPZ23xKkqTfjL+fdjofb+ei7/XppPvxti5b/+PfmfQxcJ7+Jm1MXM3jYbqNhSl5FlytxogQ\n4n7gJ0AYeBv4fyRJkoQQn0mSNOWVC4QQH0uStH4S2/snYgrdBowCScCNxAbNjyaxHx9wpkFXLUlS\nyiT18WsgAwgB6cDjkiQNnks3QojHge8DHuA54AkgSqyi879cRN9TNi4uRueTpc/J0Nel6OMMbfwb\noAX0gB9wAy4gR5Kkxy6wjR2AROwhCmABioAx4EdMQ52Pt39R9/p00v14O5et//F2LnsMXEAfl/0c\nuBbHw3QaC1M2DiRJuipfwB5iockC+C/AO4AZ+GyS+9lxlpdzkvv54mKOX0Y/tUDyGY5vm8xrdtL7\n2cDnwIJz6YbYjaYYH+DdxG5eAey+0uNiMnQ+WfqcDH1dij7OJTdw5KT3n19EGz8GfgesPknnH00H\nnU+W3qeb7idL/5M1Bi6gj8t+DlyL42E6jYWpGgdXdZ4RSZK+ztn8b0KIWuBdYlbfZJJGzPoMnnxQ\nCPHHSe7ngBDiaeBjYpZmIjHruXaS+7kZ8J3h+GSWg1UIITSSJAUlSaoTQtwB/CdQcY7vBKTYEp9P\nCPHM19dbCHHRW2GTMC4mQ+eTpc/J0Nel6OObKE96/3+d9P6Cl1YlSfp/hRBaYLMQ4ntAArGl3umg\nc5i8e3066R4mR/8wCWPgApiU58A1OB6m01iYmnFwOZZMPF/AnwD53ziWDfx/k9zPRs5skc6bgnOa\nS8yS/wtiS5U18b7Ol3geiwDLN46pgPvP8Z1HANU3jmmA//tKj4vJ0vl00eel6OMMbVScRT+3XaJM\nauBZYkvocdf5ZOp9Oul+svQ/FWPgLH1c9nNAHg9TOxamahxctT4j30QI8aIkSQ9cgX5ekiTp/qnu\n53pnsq7zZIwLWedXhumk88mUR+bSmQwdyOPh6uBaCu3NvEL9WK9QP9c7k3WdJ2NcyDq/MkwnnYOs\n9+nAZOhAHg9XAdeSMSIjIyMjIyNzFSIbIzIyMjIyMjJxRTZGZGRkZGRkZOLKteTAapEkyXat9HO9\nM1nXeTLakXV+ZZhOOp/MdmQunel0/8rjYWq5ZowRGRkZGRkZmasTeZtGRkZGRkZGJq7IxoiMjIyM\njIxMXJGNERkZGRkZGZm4Ihsj0wghxAYhRJMQ4rgQ4ufxlkdm6hFCPCeEsAkhjsRbFpkrgxAiVwjx\nmRCiQQhRL4T403jLJDP1CCF0Qoi9QohDQoijQoi/i7dM0wnZgXWaIIRQAseAtUAv8BWxegGNcRVM\nZkoRQtwAjAH/IUlSVbzlkZl6hBBWwCpJ0iEhRAJwALhdvtevfYQQBkmSvEIIFbAT+KkkSTvjLdd0\nQF4ZmT4sBFokSeqQJCkEvAx8K84yyUwxkiTtAJzxlkPmyiFJ0oAkSYfG348BjUBWfKWSuRJIkuQd\nf6shVv3WEUdxphWyMTJ9yAa6T/p/z/gxGRmZaxQhRAFQA+yNryQyVwIhhEIIcQiwAZ9JknQ03jJN\nF2RjZPog75fJyFxHjG/RvA78aHyFROYaR5KkqCRJc4AcYIUQYlWcRZo2yMbI9KEXyD3p/7nEVkdk\nZGSuMYQQauAN4D8lSXo73vLIXFkkSRoFPgDmx1uW6YJsjEwf9gOlQogCIYQGuBd4N84yycjITDJC\nCAH8FjgqSdI/xVsemSuDECJNCJE8/l4PrAMOxleq6YNsjEwTJEkKAz8AtgJHgVdk7/prHyHES8Bu\nYIYQolsI8Z14yyQz5SwDHgJWCyEOjr82xFsomSknE/h03GdkL/CeJEmfxFmmaYMc2isjIyMjIyMT\nV+SVERkZGRkZGZm4IhsjMjIyMjIyMnFFNkZkZGRkZGRk4opsjMjIyMjIyMjEFdkYkZGRkZGRkYkr\nsjEiIyMjIyMjE1dkY0RGRkZGRkYmrsjGiIyMjIyMjExc+f8Bo2OouhAS2pgAAAAASUVORK5CYII=\n", "text": [ - "" + "" ] } ], - "prompt_number": 3 + "prompt_number": 2 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Learn and evaluate scikit-learn's logistic regression with stochastic gradient descent (SGD) training. Time and check the classifier's accuracy." + ] }, { "cell_type": "code", @@ -89,7 +103,7 @@ "clf = sklearn.linear_model.SGDClassifier(\n", " loss='log', n_iter=1000, penalty='l2', alpha=1e-3, class_weight='auto')\n", "\n", - "clf.fit(X, y)\n", + "%timeit clf.fit(X, y)\n", "yt_pred = clf.predict(Xt)\n", "print('Accuracy: {:.3f}'.format(sklearn.metrics.accuracy_score(yt, yt_pred)))" ], @@ -100,11 +114,19 @@ "output_type": "stream", "stream": "stdout", "text": [ - "Accuracy: 0.763\n" + "1 loops, best of 3: 499 ms per loop\n", + "Accuracy: 0.756\n" ] } ], - "prompt_number": 4 + "prompt_number": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Save the dataset to HDF5 for loading in Caffe." + ] }, { "cell_type": "code", @@ -139,15 +161,69 @@ "language": "python", "metadata": {}, "outputs": [], + "prompt_number": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Learn and evaluate logistic regression in Caffe." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "def learn_and_test(solver_file):\n", + " caffe.set_mode_cpu()\n", + " solver = caffe.get_solver(solver_file)\n", + " solver.solve()\n", + "\n", + " accuracy = 0\n", + " test_iters = int(len(Xt) / solver.test_nets[0].blobs['data'].num)\n", + " for i in range(test_iters):\n", + " solver.test_nets[0].forward()\n", + " accuracy += solver.test_nets[0].blobs['accuracy'].data\n", + " accuracy /= test_iters\n", + " return accuracy\n", + "\n", + "%timeit learn_and_test('hdf5_classification/solver.prototxt')\n", + "acc = learn_and_test('hdf5_classification/solver.prototxt')\n", + "print(\"Accuracy: {:.3f}\".format(acc))" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "1 loops, best of 3: 240 ms per loop\n", + "Accuracy: 0.752" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "\n" + ] + } + ], "prompt_number": 5 }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Do the same through the command line interface for detailed output on the model and solving." + ] + }, { "cell_type": "code", "collapsed": false, "input": [ - "# Run caffe. Scroll down in the output to see the final\n", - "# test accuracy, which should be about the same as above.\n", - "!cd .. && ./build/tools/caffe train -solver examples/hdf5_classification/solver.prototxt" + "!../build/tools/caffe train -solver hdf5_classification/solver.prototxt" ], "language": "python", "metadata": {}, @@ -156,9 +232,16 @@ "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.099238 2129298192 caffe.cpp:90] Starting Optimization\r\n", - "I0905 01:07:27.100469 2129298192 solver.cpp:32] Initializing solver from parameters: \r\n", - "test_iter: 1000\r\n", + "I0307 01:34:29.141863 2099749632 caffe.cpp:103] Use CPU.\r\n" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "I0307 01:34:29.418283 2099749632 caffe.cpp:107] Starting Optimization\r\n", + "I0307 01:34:29.418323 2099749632 solver.cpp:32] Initializing solver from parameters: \r\n", + "test_iter: 250\r\n", "test_interval: 1000\r\n", "base_lr: 0.01\r\n", "display: 1000\r\n", @@ -169,42 +252,43 @@ "weight_decay: 0.0005\r\n", "stepsize: 5000\r\n", "snapshot: 10000\r\n", - "snapshot_prefix: \"examples/hdf5_classification/data/train\"\r\n", + "snapshot_prefix: \"hdf5_classification/data/train\"\r\n", "solver_mode: CPU\r\n", - "net: \"examples/hdf5_classification/train_val.prototxt\"\r\n", - "I0905 01:07:27.100630 2129298192 solver.cpp:72] Creating training net from net file: examples/hdf5_classification/train_val.prototxt\r\n" - ] - }, - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "I0905 01:07:27.100988 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", - "I0905 01:07:27.101011 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", - "I0905 01:07:27.101022 2129298192 net.cpp:39] Initializing net from parameters: \r\n", + "net: \"hdf5_classification/train_val.prototxt\"\r\n", + "I0307 01:34:29.418416 2099749632 solver.cpp:70] Creating training net from net file: hdf5_classification/train_val.prototxt\r\n", + "I0307 01:34:29.418583 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", + "I0307 01:34:29.418598 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", + "I0307 01:34:29.418608 2099749632 net.cpp:42] Initializing net from parameters: \r\n", "name: \"LogisticRegressionNet\"\r\n", - "layers {\r\n", + "state {\r\n", + " phase: TRAIN\r\n", + "}\r\n", + "layer {\r\n", + " name: \"data\"\r\n", + " type: \"HDF5Data\"\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " name: \"data\"\r\n", - " type: HDF5_DATA\r\n", - " hdf5_data_param {\r\n", - " source: \"examples/hdf5_classification/data/train.txt\"\r\n", - " batch_size: 10\r\n", - " }\r\n", " include {\r\n", " phase: TRAIN\r\n", " }\r\n", + " hdf5_data_param {\r\n", + " source: \"hdf5_classification/data/train.txt\"\r\n", + " batch_size: 10\r\n", + " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"fc1\"\r\n", + " type: \"InnerProduct\"\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " name: \"fc1\"\r\n", - " type: INNER_PRODUCT\r\n", - " blobs_lr: 1\r\n", - " blobs_lr: 2\r\n", - " weight_decay: 1\r\n", - " weight_decay: 0\r\n", + " param {\r\n", + " lr_mult: 1\r\n", + " decay_mult: 1\r\n", + " }\r\n", + " param {\r\n", + " lr_mult: 2\r\n", + " decay_mult: 0\r\n", + " }\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -217,72 +301,77 @@ " }\r\n", " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"loss\"\r\n", + " type: \"SoftmaxWithLoss\"\r\n", " bottom: \"fc1\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", - " name: \"loss\"\r\n", - " type: SOFTMAX_LOSS\r\n", "}\r\n", + "I0307 01:34:29.418692 2099749632 layer_factory.hpp:74] Creating layer data\r\n", + "I0307 01:34:29.418853 2099749632 net.cpp:84] Creating Layer data\r\n", + "I0307 01:34:29.418879 2099749632 net.cpp:338] data -> data\r\n", + "I0307 01:34:29.418905 2099749632 net.cpp:338] data -> label\r\n", + "I0307 01:34:29.418918 2099749632 net.cpp:113] Setting up data\r\n", + "I0307 01:34:29.418926 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/train.txt\r\n", + "I0307 01:34:29.418992 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 2\r\n", + "I0307 01:34:29.420812 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", + "I0307 01:34:29.420841 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:29.420852 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", + "I0307 01:34:29.420866 2099749632 net.cpp:84] Creating Layer fc1\r\n", + "I0307 01:34:29.420872 2099749632 net.cpp:380] fc1 <- data\r\n", + "I0307 01:34:29.420882 2099749632 net.cpp:338] fc1 -> fc1\r\n", + "I0307 01:34:29.420894 2099749632 net.cpp:113] Setting up fc1\r\n", + "I0307 01:34:29.425689 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:29.425709 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:29.425724 2099749632 net.cpp:84] Creating Layer loss\r\n", + "I0307 01:34:29.425731 2099749632 net.cpp:380] loss <- fc1\r\n", + "I0307 01:34:29.425739 2099749632 net.cpp:380] loss <- label\r\n", + "I0307 01:34:29.425747 2099749632 net.cpp:338] loss -> loss\r\n", + "I0307 01:34:29.425756 2099749632 net.cpp:113] Setting up loss\r\n", + "I0307 01:34:29.425767 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:29.425781 2099749632 net.cpp:120] Top shape: (1)\r\n", + "I0307 01:34:29.425789 2099749632 net.cpp:122] with loss weight 1\r\n", + "I0307 01:34:29.425801 2099749632 net.cpp:167] loss needs backward computation.\r\n", + "I0307 01:34:29.425808 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", + "I0307 01:34:29.425815 2099749632 net.cpp:169] data does not need backward computation.\r\n", + "I0307 01:34:29.425822 2099749632 net.cpp:205] This network produces output loss\r\n", + "I0307 01:34:29.425829 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", + "I0307 01:34:29.425837 2099749632 net.cpp:217] Network initialization done.\r\n", + "I0307 01:34:29.425843 2099749632 net.cpp:218] Memory required for data: 284\r\n", + "I0307 01:34:29.425961 2099749632 solver.cpp:154] Creating test net (#0) specified by net file: hdf5_classification/train_val.prototxt\r\n", + "I0307 01:34:29.425984 2099749632 net.cpp:257] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", + "I0307 01:34:29.425997 2099749632 net.cpp:42] Initializing net from parameters: \r\n", + "name: \"LogisticRegressionNet\"\r\n", "state {\r\n", - " phase: TRAIN\r\n", + " phase: TEST\r\n", "}\r\n", - "I0905 01:07:27.105614 2129298192 net.cpp:67] Creating Layer data\r\n", - "I0905 01:07:27.105664 2129298192 net.cpp:356] data -> data\r\n", - "I0905 01:07:27.105698 2129298192 net.cpp:356] data -> label\r\n", - "I0905 01:07:27.105710 2129298192 net.cpp:96] Setting up data\r\n", - "I0905 01:07:27.105717 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/train.txt\r\n", - "I0905 01:07:27.105813 2129298192 hdf5_data_layer.cpp:69] Number of files: 2\r\n", - "I0905 01:07:27.105828 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.109418 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.109501 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", - "I0905 01:07:27.109522 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", - "I0905 01:07:27.109531 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.109560 2129298192 net.cpp:67] Creating Layer fc1\r\n", - "I0905 01:07:27.109570 2129298192 net.cpp:394] fc1 <- data\r\n", - "I0905 01:07:27.109590 2129298192 net.cpp:356] fc1 -> fc1\r\n", - "I0905 01:07:27.109618 2129298192 net.cpp:96] Setting up fc1\r\n", - "I0905 01:07:27.115136 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.115190 2129298192 net.cpp:67] Creating Layer loss\r\n", - "I0905 01:07:27.115198 2129298192 net.cpp:394] loss <- fc1\r\n", - "I0905 01:07:27.115206 2129298192 net.cpp:394] loss <- label\r\n", - "I0905 01:07:27.115214 2129298192 net.cpp:356] loss -> loss\r\n", - "I0905 01:07:27.115224 2129298192 net.cpp:96] Setting up loss\r\n", - "I0905 01:07:27.115237 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", - "I0905 01:07:27.115244 2129298192 net.cpp:109] with loss weight 1\r\n", - "I0905 01:07:27.115260 2129298192 net.cpp:170] loss needs backward computation.\r\n", - "I0905 01:07:27.115267 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", - "I0905 01:07:27.115273 2129298192 net.cpp:172] data does not need backward computation.\r\n", - "I0905 01:07:27.115278 2129298192 net.cpp:208] This network produces output loss\r\n", - "I0905 01:07:27.115288 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", - "I0905 01:07:27.115295 2129298192 net.cpp:219] Network initialization done.\r\n", - "I0905 01:07:27.115301 2129298192 net.cpp:220] Memory required for data: 284\r\n", - "I0905 01:07:27.115622 2129298192 solver.cpp:156] Creating test net (#0) specified by net file: examples/hdf5_classification/train_val.prototxt\r\n", - "I0905 01:07:27.115644 2129298192 net.cpp:275] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", - "I0905 01:07:27.115656 2129298192 net.cpp:39] Initializing net from parameters: \r\n", - "name: \"LogisticRegressionNet\"\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"data\"\r\n", + " type: \"HDF5Data\"\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " name: \"data\"\r\n", - " type: HDF5_DATA\r\n", - " hdf5_data_param {\r\n", - " source: \"examples/hdf5_classification/data/test.txt\"\r\n", - " batch_size: 10\r\n", - " }\r\n", " include {\r\n", " phase: TEST\r\n", " }\r\n", + " hdf5_data_param {\r\n", + " source: \"hdf5_classification/data/test.txt\"\r\n", + " batch_size: 10\r\n", + " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"fc1\"\r\n", + " type: \"InnerProduct\"\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " name: \"fc1\"\r\n", - " type: INNER_PRODUCT\r\n", - " blobs_lr: 1\r\n", - " blobs_lr: 2\r\n", - " weight_decay: 1\r\n", - " weight_decay: 0\r\n", + " param {\r\n", + " lr_mult: 1\r\n", + " decay_mult: 1\r\n", + " }\r\n", + " param {\r\n", + " lr_mult: 2\r\n", + " decay_mult: 0\r\n", + " }\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -295,194 +384,176 @@ " }\r\n", " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"loss\"\r\n", + " type: \"SoftmaxWithLoss\"\r\n", " bottom: \"fc1\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", - " name: \"loss\"\r\n", - " type: SOFTMAX_LOSS\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"accuracy\"\r\n", + " type: \"Accuracy\"\r\n", " bottom: \"fc1\"\r\n", " bottom: \"label\"\r\n", " top: \"accuracy\"\r\n", - " name: \"accuracy\"\r\n", - " type: ACCURACY\r\n", " include {\r\n", " phase: TEST\r\n", " }\r\n", "}\r\n", - "state {\r\n", - " phase: TEST\r\n", - "}\r\n", - "I0905 01:07:27.115854 2129298192 net.cpp:67] Creating Layer data\r\n", - "I0905 01:07:27.115864 2129298192 net.cpp:356] data -> data\r\n", - "I0905 01:07:27.116004 2129298192 net.cpp:356] data -> label\r\n", - "I0905 01:07:27.116024 2129298192 net.cpp:96] Setting up data\r\n", - "I0905 01:07:27.116030 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/test.txt\r\n", - "I0905 01:07:27.116080 2129298192 hdf5_data_layer.cpp:69] Number of files: 1\r\n", - "I0905 01:07:27.116089 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/test.h5\r\n", - "I0905 01:07:27.117313 2129298192 hdf5_data_layer.cpp:49] Successully loaded 2500 rows\r\n", - "I0905 01:07:27.117348 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", - "I0905 01:07:27.117357 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", - "I0905 01:07:27.117364 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.117377 2129298192 net.cpp:67] Creating Layer label_data_1_split\r\n", - "I0905 01:07:27.117384 2129298192 net.cpp:394] label_data_1_split <- label\r\n", - "I0905 01:07:27.117393 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_0\r\n", - "I0905 01:07:27.117409 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_1\r\n", - "I0905 01:07:27.117419 2129298192 net.cpp:96] Setting up label_data_1_split\r\n", - "I0905 01:07:27.117427 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.117434 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.117444 2129298192 net.cpp:67] Creating Layer fc1\r\n", - "I0905 01:07:27.117449 2129298192 net.cpp:394] fc1 <- data\r\n", - "I0905 01:07:27.117470 2129298192 net.cpp:356] fc1 -> fc1\r\n", - "I0905 01:07:27.117478 2129298192 net.cpp:96] Setting up fc1\r\n", - "I0905 01:07:27.117506 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.117519 2129298192 net.cpp:67] Creating Layer fc1_fc1_0_split\r\n", - "I0905 01:07:27.117527 2129298192 net.cpp:394] fc1_fc1_0_split <- fc1\r\n", - "I0905 01:07:27.117534 2129298192 net.cpp:356] fc1_fc1_0_split -> fc1_fc1_0_split_0\r\n", - "I0905 01:07:27.117543 2129298192 net.cpp:356] fc1_fc1_0_split -> fc1_fc1_0_split_1\r\n", - "I0905 01:07:27.117640 2129298192 net.cpp:96] Setting up fc1_fc1_0_split\r\n", - "I0905 01:07:27.117655 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.117662 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.117673 2129298192 net.cpp:67] Creating Layer loss\r\n", - "I0905 01:07:27.117679 2129298192 net.cpp:394] loss <- fc1_fc1_0_split_0\r\n", - "I0905 01:07:27.117687 2129298192 net.cpp:394] loss <- label_data_1_split_0\r\n", - "I0905 01:07:27.117696 2129298192 net.cpp:356] loss -> loss\r\n", - "I0905 01:07:27.117704 2129298192 net.cpp:96] Setting up loss\r\n", - "I0905 01:07:27.117717 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", - "I0905 01:07:27.117723 2129298192 net.cpp:109] with loss weight 1\r\n", - "I0905 01:07:27.117743 2129298192 net.cpp:67] Creating Layer accuracy\r\n", - "I0905 01:07:27.117749 2129298192 net.cpp:394] accuracy <- fc1_fc1_0_split_1\r\n", - "I0905 01:07:27.117756 2129298192 net.cpp:394] accuracy <- label_data_1_split_1\r\n", - "I0905 01:07:27.117764 2129298192 net.cpp:356] accuracy -> accuracy\r\n", - "I0905 01:07:27.117774 2129298192 net.cpp:96] Setting up accuracy\r\n", - "I0905 01:07:27.117781 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", - "I0905 01:07:27.117789 2129298192 net.cpp:172] accuracy does not need backward computation.\r\n", - "I0905 01:07:27.117794 2129298192 net.cpp:170] loss needs backward computation.\r\n", - "I0905 01:07:27.117835 2129298192 net.cpp:170] fc1_fc1_0_split needs backward computation.\r\n", - "I0905 01:07:27.117842 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", - "I0905 01:07:27.117848 2129298192 net.cpp:172] label_data_1_split does not need backward computation.\r\n", - "I0905 01:07:27.117854 2129298192 net.cpp:172] data does not need backward computation.\r\n", - "I0905 01:07:27.117861 2129298192 net.cpp:208] This network produces output accuracy\r\n", - "I0905 01:07:27.117866 2129298192 net.cpp:208] This network produces output loss\r\n", - "I0905 01:07:27.117877 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", - "I0905 01:07:27.117926 2129298192 net.cpp:219] Network initialization done.\r\n", - "I0905 01:07:27.117938 2129298192 net.cpp:220] Memory required for data: 528\r\n", - "I0905 01:07:27.117985 2129298192 solver.cpp:46] Solver scaffolding done.\r\n", - "I0905 01:07:27.117992 2129298192 solver.cpp:165] Solving LogisticRegressionNet\r\n", - "I0905 01:07:27.118026 2129298192 solver.cpp:251] Iteration 0, Testing net (#0)\r\n", - "I0905 01:07:27.123764 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.646801\r\n", - "I0905 01:07:27.123847 2129298192 solver.cpp:302] Test net output #1: loss = 0.690777 (* 1 = 0.690777 loss)\r\n", - "I0905 01:07:27.123888 2129298192 solver.cpp:195] Iteration 0, loss = 0.689469\r\n", - "I0905 01:07:27.123898 2129298192 solver.cpp:210] Train net output #0: loss = 0.689469 (* 1 = 0.689469 loss)\r\n", - "I0905 01:07:27.123915 2129298192 solver.cpp:405] Iteration 0, lr = 0.01\r\n", - "I0905 01:07:27.127096 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.128094 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.129258 2129298192 solver.cpp:251] Iteration 1000, Testing net (#0)\r\n", - "I0905 01:07:27.135226 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.745599\r\n", - "I0905 01:07:27.135296 2129298192 solver.cpp:302] Test net output #1: loss = 0.573658 (* 1 = 0.573658 loss)\r\n", - "I0905 01:07:27.135315 2129298192 solver.cpp:195] Iteration 1000, loss = 0.49682\r\n", - "I0905 01:07:27.135325 2129298192 solver.cpp:210] Train net output #0: loss = 0.49682 (* 1 = 0.49682 loss)\r\n", - "I0905 01:07:27.135334 2129298192 solver.cpp:405] Iteration 1000, lr = 0.01\r\n", - "I0905 01:07:27.137315 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.137358 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.138335 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.140410 2129298192 solver.cpp:251] Iteration 2000, Testing net (#0)\r\n", - "I0905 01:07:27.147435 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.746399\r\n", - "I0905 01:07:27.147514 2129298192 solver.cpp:302] Test net output #1: loss = 0.582127 (* 1 = 0.582127 loss)\r\n", - "I0905 01:07:27.147541 2129298192 solver.cpp:195] Iteration 2000, loss = 0.555272\r\n", - "I0905 01:07:27.147553 2129298192 solver.cpp:210] Train net output #0: loss = 0.555272 (* 1 = 0.555272 loss)\r\n", - "I0905 01:07:27.147565 2129298192 solver.cpp:405] Iteration 2000, lr = 0.01\r\n", - "I0905 01:07:27.148572 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.149441 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.152377 2129298192 solver.cpp:251] Iteration 3000, Testing net (#0)\r\n" + "I0307 01:34:29.426126 2099749632 layer_factory.hpp:74] Creating layer data\r\n", + "I0307 01:34:29.426311 2099749632 net.cpp:84] Creating Layer data\r\n", + "I0307 01:34:29.426331 2099749632 net.cpp:338] data -> data\r\n", + "I0307 01:34:29.426343 2099749632 net.cpp:338] data -> label\r\n", + "I0307 01:34:29.426354 2099749632 net.cpp:113] Setting up data\r\n", + "I0307 01:34:29.426362 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/test.txt\r\n", + "I0307 01:34:29.426484 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 1\r\n", + "I0307 01:34:29.427692 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", + "I0307 01:34:29.427711 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:29.427721 2099749632 layer_factory.hpp:74] Creating layer label_data_1_split\r\n", + "I0307 01:34:29.427731 2099749632 net.cpp:84] Creating Layer label_data_1_split\r\n", + "I0307 01:34:29.427738 2099749632 net.cpp:380] label_data_1_split <- label\r\n", + "I0307 01:34:29.427747 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_0\r\n", + "I0307 01:34:29.427759 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_1\r\n", + "I0307 01:34:29.427768 2099749632 net.cpp:113] Setting up label_data_1_split\r\n", + "I0307 01:34:29.427777 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:29.427784 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:29.427791 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", + "I0307 01:34:29.427804 2099749632 net.cpp:84] Creating Layer fc1\r\n", + "I0307 01:34:29.427813 2099749632 net.cpp:380] fc1 <- data\r\n", + "I0307 01:34:29.427821 2099749632 net.cpp:338] fc1 -> fc1\r\n", + "I0307 01:34:29.427831 2099749632 net.cpp:113] Setting up fc1\r\n", + "I0307 01:34:29.427845 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:29.427857 2099749632 layer_factory.hpp:74] Creating layer fc1_fc1_0_split\r\n", + "I0307 01:34:29.427866 2099749632 net.cpp:84] Creating Layer fc1_fc1_0_split\r\n", + "I0307 01:34:29.427872 2099749632 net.cpp:380] fc1_fc1_0_split <- fc1\r\n", + "I0307 01:34:29.427881 2099749632 net.cpp:338] fc1_fc1_0_split -> fc1_fc1_0_split_0\r\n", + "I0307 01:34:29.427891 2099749632 net.cpp:338] fc1_fc1_0_split -> fc1_fc1_0_split_1\r\n", + "I0307 01:34:29.427942 2099749632 net.cpp:113] Setting up fc1_fc1_0_split\r\n", + "I0307 01:34:29.427955 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:29.427965 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:29.427976 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:29.427991 2099749632 net.cpp:84] Creating Layer loss\r\n", + "I0307 01:34:29.428001 2099749632 net.cpp:380] loss <- fc1_fc1_0_split_0\r\n", + "I0307 01:34:29.428009 2099749632 net.cpp:380] loss <- label_data_1_split_0\r\n", + "I0307 01:34:29.428017 2099749632 net.cpp:338] loss -> loss\r\n", + "I0307 01:34:29.428026 2099749632 net.cpp:113] Setting up loss\r\n", + "I0307 01:34:29.428035 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:29.428048 2099749632 net.cpp:120] Top shape: (1)\r\n", + "I0307 01:34:29.428056 2099749632 net.cpp:122] with loss weight 1\r\n", + "I0307 01:34:29.428064 2099749632 layer_factory.hpp:74] Creating layer accuracy\r\n", + "I0307 01:34:29.428076 2099749632 net.cpp:84] Creating Layer accuracy\r\n", + "I0307 01:34:29.428084 2099749632 net.cpp:380] accuracy <- fc1_fc1_0_split_1\r\n", + "I0307 01:34:29.428092 2099749632 net.cpp:380] accuracy <- label_data_1_split_1\r\n", + "I0307 01:34:29.428102 2099749632 net.cpp:338] accuracy -> accuracy\r\n", + "I0307 01:34:29.428131 2099749632 net.cpp:113] Setting up accuracy\r\n", + "I0307 01:34:29.428140 2099749632 net.cpp:120] Top shape: (1)\r\n", + "I0307 01:34:29.428148 2099749632 net.cpp:169] accuracy does not need backward computation.\r\n", + "I0307 01:34:29.428154 2099749632 net.cpp:167] loss needs backward computation.\r\n", + "I0307 01:34:29.428161 2099749632 net.cpp:167] fc1_fc1_0_split needs backward computation.\r\n", + "I0307 01:34:29.428167 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", + "I0307 01:34:29.428174 2099749632 net.cpp:169] label_data_1_split does not need backward computation.\r\n", + "I0307 01:34:29.428181 2099749632 net.cpp:169] data does not need backward computation.\r\n", + "I0307 01:34:29.428189 2099749632 net.cpp:205] This network produces output accuracy\r\n", + "I0307 01:34:29.428324 2099749632 net.cpp:205] This network produces output loss\r\n", + "I0307 01:34:29.428342 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", + "I0307 01:34:29.428350 2099749632 net.cpp:217] Network initialization done.\r\n", + "I0307 01:34:29.428357 2099749632 net.cpp:218] Memory required for data: 528\r\n", + "I0307 01:34:29.428388 2099749632 solver.cpp:42] Solver scaffolding done.\r\n", + "I0307 01:34:29.428412 2099749632 solver.cpp:222] Solving LogisticRegressionNet\r\n", + "I0307 01:34:29.428421 2099749632 solver.cpp:223] Learning Rate Policy: step\r\n", + "I0307 01:34:29.428431 2099749632 solver.cpp:266] Iteration 0, Testing net (#0)\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.158655 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.696\r\n", - "I0905 01:07:27.158746 2129298192 solver.cpp:302] Test net output #1: loss = 0.580239 (* 1 = 0.580239 loss)\r\n", - "I0905 01:07:27.158761 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.158768 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.159765 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.159843 2129298192 solver.cpp:195] Iteration 3000, loss = 0.476517\r\n", - "I0905 01:07:27.159873 2129298192 solver.cpp:210] Train net output #0: loss = 0.476517 (* 1 = 0.476517 loss)\r\n", - "I0905 01:07:27.159983 2129298192 solver.cpp:405] Iteration 3000, lr = 0.01\r\n", - "I0905 01:07:27.163079 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.163602 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.164567 2129298192 solver.cpp:251] Iteration 4000, Testing net (#0)\r\n", - "I0905 01:07:27.170277 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.745599\r\n", - "I0905 01:07:27.170344 2129298192 solver.cpp:302] Test net output #1: loss = 0.573658 (* 1 = 0.573658 loss)\r\n", - "I0905 01:07:27.170364 2129298192 solver.cpp:195] Iteration 4000, loss = 0.49682\r\n", - "I0905 01:07:27.170375 2129298192 solver.cpp:210] Train net output #0: loss = 0.49682 (* 1 = 0.49682 loss)\r\n", - "I0905 01:07:27.170385 2129298192 solver.cpp:405] Iteration 4000, lr = 0.01\r\n", - "I0905 01:07:27.172350 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.172374 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.173084 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.175192 2129298192 solver.cpp:251] Iteration 5000, Testing net (#0)\r\n", - "I0905 01:07:27.181659 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.746399\r\n", - "I0905 01:07:27.181710 2129298192 solver.cpp:302] Test net output #1: loss = 0.582127 (* 1 = 0.582127 loss)\r\n", - "I0905 01:07:27.181730 2129298192 solver.cpp:195] Iteration 5000, loss = 0.555272\r\n", - "I0905 01:07:27.181740 2129298192 solver.cpp:210] Train net output #0: loss = 0.555272 (* 1 = 0.555272 loss)\r\n", - "I0905 01:07:27.181748 2129298192 solver.cpp:405] Iteration 5000, lr = 0.001\r\n", - "I0905 01:07:27.182734 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.183248 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.186180 2129298192 solver.cpp:251] Iteration 6000, Testing net (#0)\r\n", - "I0905 01:07:27.192646 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7684\r\n", - "I0905 01:07:27.192751 2129298192 solver.cpp:302] Test net output #1: loss = 0.574538 (* 1 = 0.574538 loss)\r\n", - "I0905 01:07:27.192766 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.192773 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.193936 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.194007 2129298192 solver.cpp:195] Iteration 6000, loss = 0.464052\r\n", - "I0905 01:07:27.194036 2129298192 solver.cpp:210] Train net output #0: loss = 0.464052 (* 1 = 0.464052 loss)\r\n", - "I0905 01:07:27.194051 2129298192 solver.cpp:405] Iteration 6000, lr = 0.001\r\n", - "I0905 01:07:27.197053 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.198092 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.199162 2129298192 solver.cpp:251] Iteration 7000, Testing net (#0)\r\n", - "I0905 01:07:27.205195 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7684\r\n", - "I0905 01:07:27.205298 2129298192 solver.cpp:302] Test net output #1: loss = 0.574549 (* 1 = 0.574549 loss)\r\n", - "I0905 01:07:27.205327 2129298192 solver.cpp:195] Iteration 7000, loss = 0.495483\r\n", - "I0905 01:07:27.205338 2129298192 solver.cpp:210] Train net output #0: loss = 0.495483 (* 1 = 0.495483 loss)\r\n", - "I0905 01:07:27.205353 2129298192 solver.cpp:405] Iteration 7000, lr = 0.001\r\n" + "I0307 01:34:29.471674 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.4532\r\n", + "I0307 01:34:29.471724 2099749632 solver.cpp:315] Test net output #1: loss = 0.694067 (* 1 = 0.694067 loss)\r\n", + "I0307 01:34:29.471853 2099749632 solver.cpp:189] Iteration 0, loss = 0.692695\r\n", + "I0307 01:34:29.471878 2099749632 solver.cpp:204] Train net output #0: loss = 0.692695 (* 1 = 0.692695 loss)\r\n", + "I0307 01:34:29.471890 2099749632 solver.cpp:464] Iteration 0, lr = 0.01\r\n", + "I0307 01:34:29.483834 2099749632 solver.cpp:266] Iteration 1000, Testing net (#0)\r\n", + "I0307 01:34:29.486868 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7424\r\n", + "I0307 01:34:29.486896 2099749632 solver.cpp:315] Test net output #1: loss = 0.601764 (* 1 = 0.601764 loss)\r\n", + "I0307 01:34:29.486922 2099749632 solver.cpp:189] Iteration 1000, loss = 0.472665\r\n", + "I0307 01:34:29.486934 2099749632 solver.cpp:204] Train net output #0: loss = 0.472665 (* 1 = 0.472665 loss)\r\n", + "I0307 01:34:29.486944 2099749632 solver.cpp:464] Iteration 1000, lr = 0.01\r\n", + "I0307 01:34:29.498821 2099749632 solver.cpp:266] Iteration 2000, Testing net (#0)\r\n", + "I0307 01:34:29.501900 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7364\r\n", + "I0307 01:34:29.501941 2099749632 solver.cpp:315] Test net output #1: loss = 0.60818 (* 1 = 0.60818 loss)\r\n", + "I0307 01:34:29.501988 2099749632 solver.cpp:189] Iteration 2000, loss = 0.6863\r\n", + "I0307 01:34:29.502003 2099749632 solver.cpp:204] Train net output #0: loss = 0.6863 (* 1 = 0.6863 loss)\r\n", + "I0307 01:34:29.502013 2099749632 solver.cpp:464] Iteration 2000, lr = 0.01\r\n", + "I0307 01:34:29.513921 2099749632 solver.cpp:266] Iteration 3000, Testing net (#0)\r\n", + "I0307 01:34:29.517227 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.6964\r\n", + "I0307 01:34:29.517300 2099749632 solver.cpp:315] Test net output #1: loss = 0.604707 (* 1 = 0.604707 loss)\r\n", + "I0307 01:34:29.518105 2099749632 solver.cpp:189] Iteration 3000, loss = 0.617542\r\n", + "I0307 01:34:29.518154 2099749632 solver.cpp:204] Train net output #0: loss = 0.617542 (* 1 = 0.617542 loss)\r\n", + "I0307 01:34:29.518170 2099749632 solver.cpp:464] Iteration 3000, lr = 0.01\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.207471 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.207489 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.208534 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.210860 2129298192 solver.cpp:251] Iteration 8000, Testing net (#0)\r\n", - "I0905 01:07:27.216624 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.762\r\n", - "I0905 01:07:27.216704 2129298192 solver.cpp:302] Test net output #1: loss = 0.574515 (* 1 = 0.574515 loss)\r\n", - "I0905 01:07:27.216723 2129298192 solver.cpp:195] Iteration 8000, loss = 0.524565\r\n", - "I0905 01:07:27.216733 2129298192 solver.cpp:210] Train net output #0: loss = 0.524565 (* 1 = 0.524565 loss)\r\n", - "I0905 01:07:27.216743 2129298192 solver.cpp:405] Iteration 8000, lr = 0.001\r\n", - "I0905 01:07:27.217738 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.218291 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.221294 2129298192 solver.cpp:251] Iteration 9000, Testing net (#0)\r\n", - "I0905 01:07:27.227104 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7688\r\n", - "I0905 01:07:27.227171 2129298192 solver.cpp:302] Test net output #1: loss = 0.574278 (* 1 = 0.574278 loss)\r\n", - "I0905 01:07:27.227183 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.227190 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.228143 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.228210 2129298192 solver.cpp:195] Iteration 9000, loss = 0.461831\r\n", - "I0905 01:07:27.228240 2129298192 solver.cpp:210] Train net output #0: loss = 0.461831 (* 1 = 0.461831 loss)\r\n", - "I0905 01:07:27.228252 2129298192 solver.cpp:405] Iteration 9000, lr = 0.001\r\n", - "I0905 01:07:27.231314 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.232293 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.233417 2129298192 solver.cpp:319] Snapshotting to examples/hdf5_classification/data/train_iter_10000\r\n", - "I0905 01:07:27.233680 2129298192 solver.cpp:326] Snapshotting solver state to examples/hdf5_classification/data/train_iter_10000.solverstate\r\n", - "I0905 01:07:27.233795 2129298192 solver.cpp:232] Iteration 10000, loss = 0.49554\r\n", - "I0905 01:07:27.233814 2129298192 solver.cpp:251] Iteration 10000, Testing net (#0)\r\n", - "I0905 01:07:27.240015 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.768\r\n", - "I0905 01:07:27.240099 2129298192 solver.cpp:302] Test net output #1: loss = 0.574488 (* 1 = 0.574488 loss)\r\n", - "I0905 01:07:27.240110 2129298192 solver.cpp:237] Optimization Done.\r\n", - "I0905 01:07:27.240118 2129298192 caffe.cpp:114] Optimization Done.\r\n" + "I0307 01:34:29.531672 2099749632 solver.cpp:266] Iteration 4000, Testing net (#0)\r\n", + "I0307 01:34:29.534873 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7424\r\n", + "I0307 01:34:29.534920 2099749632 solver.cpp:315] Test net output #1: loss = 0.601764 (* 1 = 0.601764 loss)\r\n", + "I0307 01:34:29.534950 2099749632 solver.cpp:189] Iteration 4000, loss = 0.472666\r\n", + "I0307 01:34:29.534962 2099749632 solver.cpp:204] Train net output #0: loss = 0.472665 (* 1 = 0.472665 loss)\r\n", + "I0307 01:34:29.534973 2099749632 solver.cpp:464] Iteration 4000, lr = 0.01\r\n", + "I0307 01:34:29.546567 2099749632 solver.cpp:266] Iteration 5000, Testing net (#0)\r\n", + "I0307 01:34:29.549762 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7364\r\n", + "I0307 01:34:29.549789 2099749632 solver.cpp:315] Test net output #1: loss = 0.60818 (* 1 = 0.60818 loss)\r\n", + "I0307 01:34:29.549815 2099749632 solver.cpp:189] Iteration 5000, loss = 0.686301\r\n", + "I0307 01:34:29.549828 2099749632 solver.cpp:204] Train net output #0: loss = 0.6863 (* 1 = 0.6863 loss)\r\n", + "I0307 01:34:29.549837 2099749632 solver.cpp:464] Iteration 5000, lr = 0.001\r\n", + "I0307 01:34:29.562142 2099749632 solver.cpp:266] Iteration 6000, Testing net (#0)\r\n", + "I0307 01:34:29.565335 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7476\r\n", + "I0307 01:34:29.565373 2099749632 solver.cpp:315] Test net output #1: loss = 0.59775 (* 1 = 0.59775 loss)\r\n", + "I0307 01:34:29.566051 2099749632 solver.cpp:189] Iteration 6000, loss = 0.664614\r\n", + "I0307 01:34:29.566086 2099749632 solver.cpp:204] Train net output #0: loss = 0.664614 (* 1 = 0.664614 loss)\r\n", + "I0307 01:34:29.566097 2099749632 solver.cpp:464] Iteration 6000, lr = 0.001\r\n" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "I0307 01:34:29.577900 2099749632 solver.cpp:266] Iteration 7000, Testing net (#0)\r\n", + "I0307 01:34:29.580993 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7524\r\n", + "I0307 01:34:29.581015 2099749632 solver.cpp:315] Test net output #1: loss = 0.597349 (* 1 = 0.597349 loss)\r\n", + "I0307 01:34:29.581038 2099749632 solver.cpp:189] Iteration 7000, loss = 0.456775\r\n", + "I0307 01:34:29.581050 2099749632 solver.cpp:204] Train net output #0: loss = 0.456774 (* 1 = 0.456774 loss)\r\n", + "I0307 01:34:29.581059 2099749632 solver.cpp:464] Iteration 7000, lr = 0.001\r\n", + "I0307 01:34:29.592854 2099749632 solver.cpp:266] Iteration 8000, Testing net (#0)\r\n", + "I0307 01:34:29.595973 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7568\r\n", + "I0307 01:34:29.596002 2099749632 solver.cpp:315] Test net output #1: loss = 0.597265 (* 1 = 0.597265 loss)\r\n", + "I0307 01:34:29.596027 2099749632 solver.cpp:189] Iteration 8000, loss = 0.673885\r\n", + "I0307 01:34:29.596040 2099749632 solver.cpp:204] Train net output #0: loss = 0.673885 (* 1 = 0.673885 loss)\r\n", + "I0307 01:34:29.596048 2099749632 solver.cpp:464] Iteration 8000, lr = 0.001\r\n", + "I0307 01:34:29.607822 2099749632 solver.cpp:266] Iteration 9000, Testing net (#0)\r\n", + "I0307 01:34:29.610930 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7432\r\n", + "I0307 01:34:29.610960 2099749632 solver.cpp:315] Test net output #1: loss = 0.597777 (* 1 = 0.597777 loss)\r\n", + "I0307 01:34:29.611558 2099749632 solver.cpp:189] Iteration 9000, loss = 0.66526\r\n", + "I0307 01:34:29.611583 2099749632 solver.cpp:204] Train net output #0: loss = 0.66526 (* 1 = 0.66526 loss)\r\n", + "I0307 01:34:29.611593 2099749632 solver.cpp:464] Iteration 9000, lr = 0.001\r\n", + "I0307 01:34:29.623009 2099749632 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_10000.caffemodel\r\n", + "I0307 01:34:29.623209 2099749632 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_10000.solverstate\r\n", + "I0307 01:34:29.623319 2099749632 solver.cpp:248] Iteration 10000, loss = 0.457922\r\n", + "I0307 01:34:29.623333 2099749632 solver.cpp:266] Iteration 10000, Testing net (#0)\r\n" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "I0307 01:34:29.626454 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.752\r\n", + "I0307 01:34:29.626484 2099749632 solver.cpp:315] Test net output #1: loss = 0.597362 (* 1 = 0.597362 loss)\r\n", + "I0307 01:34:29.626493 2099749632 solver.cpp:253] Optimization Done.\r\n", + "I0307 01:34:29.626502 2099749632 caffe.cpp:121] Optimization Done.\r\n" ] } ], @@ -492,18 +563,33 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If you look at the `train_val.prototxt`, you'll see that it's simple logistic regression.\n", - "We can make it a little more advanced by introducing a non-linearity between weights that take the input and weights that give the output -- now we have a two-layer neural network.\n", + "If you look at output or the `train_val.prototxt`, you'll see that the model is simple logistic regression.\n", + "We can make it a little more advanced by introducing a non-linearity between weights that take the input and weights that give the output -- now we have a two-layer network.\n", "That network is given in `train_val2.prototxt`, and that's the only change made in `solver2.prototxt` which we will now use.\n", "\n", - "The final accuracy of the network we'll train below should be higher than for the network above!" + "The final accuracy of the new network be higher than logistic regression!" ] }, { "cell_type": "code", "collapsed": false, "input": [ - "!cd .. && ./build/tools/caffe train -solver examples/hdf5_classification/solver2.prototxt" + "def learn_and_test(solver_file):\n", + " caffe.set_mode_cpu()\n", + " solver = caffe.get_solver(solver_file)\n", + " solver.solve()\n", + "\n", + " accuracy = 0\n", + " test_iters = int(len(Xt) / solver.test_nets[0].blobs['data'].num)\n", + " for i in range(test_iters):\n", + " solver.test_nets[0].forward()\n", + " accuracy += solver.test_nets[0].blobs['accuracy'].data\n", + " accuracy /= test_iters\n", + " return accuracy\n", + "\n", + "%timeit learn_and_test('hdf5_classification/solver2.prototxt')\n", + "acc = learn_and_test('hdf5_classification/solver2.prototxt')\n", + "print(\"Accuracy: {:.3f}\".format(acc))" ], "language": "python", "metadata": {}, @@ -512,9 +598,50 @@ "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.466722 2129298192 caffe.cpp:90] Starting Optimization\r\n", - "I0905 01:07:27.468166 2129298192 solver.cpp:32] Initializing solver from parameters: \r\n", - "test_iter: 1000\r\n", + "1 loops, best of 3: 333 ms per loop\n", + "Accuracy: 0.818" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "\n" + ] + } + ], + "prompt_number": 7 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Do the same through the command line interface for detailed output on the model and solving." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "!../build/tools/caffe train -solver hdf5_classification/solver2.prototxt" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "I0307 01:34:31.589234 2099749632 caffe.cpp:103] Use CPU.\r\n" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "I0307 01:34:31.872560 2099749632 caffe.cpp:107] Starting Optimization\r\n", + "I0307 01:34:31.872596 2099749632 solver.cpp:32] Initializing solver from parameters: \r\n", + "test_iter: 250\r\n", "test_interval: 1000\r\n", "base_lr: 0.01\r\n", "display: 1000\r\n", @@ -525,36 +652,43 @@ "weight_decay: 0.0005\r\n", "stepsize: 5000\r\n", "snapshot: 10000\r\n", - "snapshot_prefix: \"examples/hdf5_classification/data/train\"\r\n", + "snapshot_prefix: \"hdf5_classification/data/train\"\r\n", "solver_mode: CPU\r\n", - "net: \"examples/hdf5_classification/train_val2.prototxt\"\r\n", - "I0905 01:07:27.468351 2129298192 solver.cpp:72] Creating training net from net file: examples/hdf5_classification/train_val2.prototxt\r\n", - "I0905 01:07:27.469081 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", - "I0905 01:07:27.469100 2129298192 net.cpp:275] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", - "I0905 01:07:27.469110 2129298192 net.cpp:39] Initializing net from parameters: \r\n", + "net: \"hdf5_classification/train_val2.prototxt\"\r\n", + "I0307 01:34:31.872687 2099749632 solver.cpp:70] Creating training net from net file: hdf5_classification/train_val2.prototxt\r\n", + "I0307 01:34:31.872865 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer data\r\n", + "I0307 01:34:31.872882 2099749632 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy\r\n", + "I0307 01:34:31.872891 2099749632 net.cpp:42] Initializing net from parameters: \r\n", "name: \"LogisticRegressionNet\"\r\n", - "layers {\r\n", + "state {\r\n", + " phase: TRAIN\r\n", + "}\r\n", + "layer {\r\n", + " name: \"data\"\r\n", + " type: \"HDF5Data\"\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " name: \"data\"\r\n", - " type: HDF5_DATA\r\n", - " hdf5_data_param {\r\n", - " source: \"examples/hdf5_classification/data/train.txt\"\r\n", - " batch_size: 10\r\n", - " }\r\n", " include {\r\n", " phase: TRAIN\r\n", " }\r\n", + " hdf5_data_param {\r\n", + " source: \"hdf5_classification/data/train.txt\"\r\n", + " batch_size: 10\r\n", + " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"fc1\"\r\n", + " type: \"InnerProduct\"\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " name: \"fc1\"\r\n", - " type: INNER_PRODUCT\r\n", - " blobs_lr: 1\r\n", - " blobs_lr: 2\r\n", - " weight_decay: 1\r\n", - " weight_decay: 0\r\n", + " param {\r\n", + " lr_mult: 1\r\n", + " decay_mult: 1\r\n", + " }\r\n", + " param {\r\n", + " lr_mult: 2\r\n", + " decay_mult: 0\r\n", + " }\r\n", " inner_product_param {\r\n", " num_output: 40\r\n", " weight_filler {\r\n", @@ -567,21 +701,25 @@ " }\r\n", " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"relu1\"\r\n", + " type: \"ReLU\"\r\n", " bottom: \"fc1\"\r\n", " top: \"fc1\"\r\n", - " name: \"relu1\"\r\n", - " type: RELU\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"fc2\"\r\n", + " type: \"InnerProduct\"\r\n", " bottom: \"fc1\"\r\n", " top: \"fc2\"\r\n", - " name: \"fc2\"\r\n", - " type: INNER_PRODUCT\r\n", - " blobs_lr: 1\r\n", - " blobs_lr: 2\r\n", - " weight_decay: 1\r\n", - " weight_decay: 0\r\n", + " param {\r\n", + " lr_mult: 1\r\n", + " decay_mult: 1\r\n", + " }\r\n", + " param {\r\n", + " lr_mult: 2\r\n", + " decay_mult: 0\r\n", + " }\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -594,84 +732,91 @@ " }\r\n", " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"loss\"\r\n", + " type: \"SoftmaxWithLoss\"\r\n", " bottom: \"fc2\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", - " name: \"loss\"\r\n", - " type: SOFTMAX_LOSS\r\n", "}\r\n", + "I0307 01:34:31.873246 2099749632 layer_factory.hpp:74] Creating layer data\r\n", + "I0307 01:34:31.873276 2099749632 net.cpp:84] Creating Layer data\r\n", + "I0307 01:34:31.873292 2099749632 net.cpp:338] data -> data\r\n", + "I0307 01:34:31.873332 2099749632 net.cpp:338] data -> label\r\n", + "I0307 01:34:31.873352 2099749632 net.cpp:113] Setting up data\r\n", + "I0307 01:34:31.873361 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/train.txt\r\n", + "I0307 01:34:31.873443 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 2\r\n", + "I0307 01:34:31.875783 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", + "I0307 01:34:31.875816 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:31.875829 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", + "I0307 01:34:31.875846 2099749632 net.cpp:84] Creating Layer fc1\r\n", + "I0307 01:34:31.875857 2099749632 net.cpp:380] fc1 <- data\r\n", + "I0307 01:34:31.875875 2099749632 net.cpp:338] fc1 -> fc1\r\n", + "I0307 01:34:31.875892 2099749632 net.cpp:113] Setting up fc1\r\n", + "I0307 01:34:31.882478 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", + "I0307 01:34:31.882505 2099749632 layer_factory.hpp:74] Creating layer relu1\r\n", + "I0307 01:34:31.882524 2099749632 net.cpp:84] Creating Layer relu1\r\n", + "I0307 01:34:31.882532 2099749632 net.cpp:380] relu1 <- fc1\r\n", + "I0307 01:34:31.882544 2099749632 net.cpp:327] relu1 -> fc1 (in-place)\r\n", + "I0307 01:34:31.882555 2099749632 net.cpp:113] Setting up relu1\r\n", + "I0307 01:34:31.882565 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", + "I0307 01:34:31.882583 2099749632 layer_factory.hpp:74] Creating layer fc2\r\n", + "I0307 01:34:31.882609 2099749632 net.cpp:84] Creating Layer fc2\r\n", + "I0307 01:34:31.882619 2099749632 net.cpp:380] fc2 <- fc1\r\n", + "I0307 01:34:31.882632 2099749632 net.cpp:338] fc2 -> fc2\r\n", + "I0307 01:34:31.882644 2099749632 net.cpp:113] Setting up fc2\r\n", + "I0307 01:34:31.882663 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:31.882678 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:31.882694 2099749632 net.cpp:84] Creating Layer loss\r\n", + "I0307 01:34:31.882704 2099749632 net.cpp:380] loss <- fc2\r\n", + "I0307 01:34:31.882712 2099749632 net.cpp:380] loss <- label\r\n", + "I0307 01:34:31.882779 2099749632 net.cpp:338] loss -> loss\r\n", + "I0307 01:34:31.882796 2099749632 net.cpp:113] Setting up loss\r\n", + "I0307 01:34:31.882810 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:31.882833 2099749632 net.cpp:120] Top shape: (1)\r\n", + "I0307 01:34:31.882844 2099749632 net.cpp:122] with loss weight 1\r\n", + "I0307 01:34:31.882860 2099749632 net.cpp:167] loss needs backward computation.\r\n", + "I0307 01:34:31.882869 2099749632 net.cpp:167] fc2 needs backward computation.\r\n", + "I0307 01:34:31.882877 2099749632 net.cpp:167] relu1 needs backward computation.\r\n", + "I0307 01:34:31.882886 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", + "I0307 01:34:31.882894 2099749632 net.cpp:169] data does not need backward computation.\r\n", + "I0307 01:34:31.882904 2099749632 net.cpp:205] This network produces output loss\r\n", + "I0307 01:34:31.882931 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", + "I0307 01:34:31.882942 2099749632 net.cpp:217] Network initialization done.\r\n", + "I0307 01:34:31.882951 2099749632 net.cpp:218] Memory required for data: 3484\r\n", + "I0307 01:34:31.883157 2099749632 solver.cpp:154] Creating test net (#0) specified by net file: hdf5_classification/train_val2.prototxt\r\n", + "I0307 01:34:31.883189 2099749632 net.cpp:257] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", + "I0307 01:34:31.883203 2099749632 net.cpp:42] Initializing net from parameters: \r\n", + "name: \"LogisticRegressionNet\"\r\n", "state {\r\n", - " phase: TRAIN\r\n", + " phase: TEST\r\n", "}\r\n", - "I0905 01:07:27.469447 2129298192 net.cpp:67] Creating Layer data\r\n", - "I0905 01:07:27.469467 2129298192 net.cpp:356] data -> data\r\n", - "I0905 01:07:27.469493 2129298192 net.cpp:356] data -> label\r\n", - "I0905 01:07:27.469503 2129298192 net.cpp:96] Setting up data\r\n", - "I0905 01:07:27.469511 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/train.txt\r\n", - "I0905 01:07:27.469558 2129298192 hdf5_data_layer.cpp:69] Number of files: 2\r\n", - "I0905 01:07:27.469569 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.471978 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.471997 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", - "I0905 01:07:27.472008 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", - "I0905 01:07:27.472015 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.472026 2129298192 net.cpp:67] Creating Layer fc1\r\n", - "I0905 01:07:27.472033 2129298192 net.cpp:394] fc1 <- data\r\n", - "I0905 01:07:27.472045 2129298192 net.cpp:356] fc1 -> fc1\r\n", - "I0905 01:07:27.472060 2129298192 net.cpp:96] Setting up fc1\r\n", - "I0905 01:07:27.476827 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", - "I0905 01:07:27.476857 2129298192 net.cpp:67] Creating Layer relu1\r\n", - "I0905 01:07:27.476865 2129298192 net.cpp:394] relu1 <- fc1\r\n", - "I0905 01:07:27.476872 2129298192 net.cpp:345] relu1 -> fc1 (in-place)\r\n", - "I0905 01:07:27.476881 2129298192 net.cpp:96] Setting up relu1\r\n", - "I0905 01:07:27.476888 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", - "I0905 01:07:27.476896 2129298192 net.cpp:67] Creating Layer fc2\r\n", - "I0905 01:07:27.476902 2129298192 net.cpp:394] fc2 <- fc1\r\n", - "I0905 01:07:27.476909 2129298192 net.cpp:356] fc2 -> fc2\r\n", - "I0905 01:07:27.476918 2129298192 net.cpp:96] Setting up fc2\r\n", - "I0905 01:07:27.476932 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.476955 2129298192 net.cpp:67] Creating Layer loss\r\n", - "I0905 01:07:27.476963 2129298192 net.cpp:394] loss <- fc2\r\n", - "I0905 01:07:27.476969 2129298192 net.cpp:394] loss <- label\r\n", - "I0905 01:07:27.476975 2129298192 net.cpp:356] loss -> loss\r\n", - "I0905 01:07:27.476984 2129298192 net.cpp:96] Setting up loss\r\n", - "I0905 01:07:27.477005 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", - "I0905 01:07:27.477040 2129298192 net.cpp:109] with loss weight 1\r\n", - "I0905 01:07:27.477051 2129298192 net.cpp:170] loss needs backward computation.\r\n", - "I0905 01:07:27.477058 2129298192 net.cpp:170] fc2 needs backward computation.\r\n", - "I0905 01:07:27.477063 2129298192 net.cpp:170] relu1 needs backward computation.\r\n", - "I0905 01:07:27.477069 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", - "I0905 01:07:27.477076 2129298192 net.cpp:172] data does not need backward computation.\r\n", - "I0905 01:07:27.477080 2129298192 net.cpp:208] This network produces output loss\r\n", - "I0905 01:07:27.477099 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", - "I0905 01:07:27.477105 2129298192 net.cpp:219] Network initialization done.\r\n", - "I0905 01:07:27.477112 2129298192 net.cpp:220] Memory required for data: 3484\r\n", - "I0905 01:07:27.477455 2129298192 solver.cpp:156] Creating test net (#0) specified by net file: examples/hdf5_classification/train_val2.prototxt\r\n", - "I0905 01:07:27.477480 2129298192 net.cpp:275] The NetState phase (1) differed from the phase (0) specified by a rule in layer data\r\n", - "I0905 01:07:27.477494 2129298192 net.cpp:39] Initializing net from parameters: \r\n", - "name: \"LogisticRegressionNet\"\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"data\"\r\n", + " type: \"HDF5Data\"\r\n", " top: \"data\"\r\n", " top: \"label\"\r\n", - " name: \"data\"\r\n", - " type: HDF5_DATA\r\n", - " hdf5_data_param {\r\n", - " source: \"examples/hdf5_classification/data/test.txt\"\r\n", - " batch_size: 10\r\n", - " }\r\n", " include {\r\n", " phase: TEST\r\n", " }\r\n", + " hdf5_data_param {\r\n", + " source: \"hdf5_classification/data/test.txt\"\r\n", + " batch_size: 10\r\n", + " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"fc1\"\r\n", + " type: \"InnerProduct\"\r\n", " bottom: \"data\"\r\n", " top: \"fc1\"\r\n", - " name: \"fc1\"\r\n", - " type: INNER_PRODUCT\r\n", - " blobs_lr: 1\r\n", - " blobs_lr: 2\r\n", - " weight_decay: 1\r\n", - " weight_decay: 0\r\n", + " param {\r\n", + " lr_mult: 1\r\n", + " decay_mult: 1\r\n", + " }\r\n", + " param {\r\n", + " lr_mult: 2\r\n", + " decay_mult: 0\r\n", + " }\r\n", " inner_product_param {\r\n", " num_output: 40\r\n", " weight_filler {\r\n", @@ -684,21 +829,25 @@ " }\r\n", " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"relu1\"\r\n", + " type: \"ReLU\"\r\n", " bottom: \"fc1\"\r\n", " top: \"fc1\"\r\n", - " name: \"relu1\"\r\n", - " type: RELU\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"fc2\"\r\n", + " type: \"InnerProduct\"\r\n", " bottom: \"fc1\"\r\n", " top: \"fc2\"\r\n", - " name: \"fc2\"\r\n", - " type: INNER_PRODUCT\r\n", - " blobs_lr: 1\r\n", - " blobs_lr: 2\r\n", - " weight_decay: 1\r\n", - " weight_decay: 0\r\n", + " param {\r\n", + " lr_mult: 1\r\n", + " decay_mult: 1\r\n", + " }\r\n", + " param {\r\n", + " lr_mult: 2\r\n", + " decay_mult: 0\r\n", + " }\r\n", " inner_product_param {\r\n", " num_output: 2\r\n", " weight_filler {\r\n", @@ -711,222 +860,200 @@ " }\r\n", " }\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"loss\"\r\n", + " type: \"SoftmaxWithLoss\"\r\n", " bottom: \"fc2\"\r\n", " bottom: \"label\"\r\n", " top: \"loss\"\r\n", - " name: \"loss\"\r\n", - " type: SOFTMAX_LOSS\r\n", "}\r\n", - "layers {\r\n", + "layer {\r\n", + " name: \"accuracy\"\r\n", + " type: \"Accuracy\"\r\n", " bottom: \"fc2\"\r\n", " bottom: \"label\"\r\n", " top: \"accuracy\"\r\n", - " name: \"accuracy\"\r\n", - " type: ACCURACY\r\n", " include {\r\n", " phase: TEST\r\n", " }\r\n", "}\r\n", - "state {\r\n", - " phase: TEST\r\n", - "}\r\n", - "I0905 01:07:27.477839 2129298192 net.cpp:67] Creating Layer data\r\n", - "I0905 01:07:27.477850 2129298192 net.cpp:356] data -> data\r\n", - "I0905 01:07:27.477861 2129298192 net.cpp:356] data -> label\r\n", - "I0905 01:07:27.477870 2129298192 net.cpp:96] Setting up data\r\n", - "I0905 01:07:27.477876 2129298192 hdf5_data_layer.cpp:57] Loading filename from examples/hdf5_classification/data/test.txt\r\n", - "I0905 01:07:27.477902 2129298192 hdf5_data_layer.cpp:69] Number of files: 1\r\n" + "I0307 01:34:31.883535 2099749632 layer_factory.hpp:74] Creating layer data\r\n", + "I0307 01:34:31.883548 2099749632 net.cpp:84] Creating Layer data\r\n", + "I0307 01:34:31.883556 2099749632 net.cpp:338] data -> data\r\n", + "I0307 01:34:31.883569 2099749632 net.cpp:338] data -> label\r\n", + "I0307 01:34:31.883579 2099749632 net.cpp:113] Setting up data\r\n", + "I0307 01:34:31.883585 2099749632 hdf5_data_layer.cpp:66] Loading list of HDF5 filenames from: hdf5_classification/data/test.txt\r\n", + "I0307 01:34:31.883664 2099749632 hdf5_data_layer.cpp:80] Number of HDF5 files: 1\r\n", + "I0307 01:34:31.884842 2099749632 net.cpp:120] Top shape: 10 4 (40)\r\n", + "I0307 01:34:31.884860 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:31.884870 2099749632 layer_factory.hpp:74] Creating layer label_data_1_split\r\n", + "I0307 01:34:31.884879 2099749632 net.cpp:84] Creating Layer label_data_1_split\r\n", + "I0307 01:34:31.884886 2099749632 net.cpp:380] label_data_1_split <- label\r\n", + "I0307 01:34:31.884896 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_0\r\n", + "I0307 01:34:31.884909 2099749632 net.cpp:338] label_data_1_split -> label_data_1_split_1\r\n", + "I0307 01:34:31.884919 2099749632 net.cpp:113] Setting up label_data_1_split\r\n", + "I0307 01:34:31.884927 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:31.884934 2099749632 net.cpp:120] Top shape: 10 (10)\r\n", + "I0307 01:34:31.884941 2099749632 layer_factory.hpp:74] Creating layer fc1\r\n", + "I0307 01:34:31.884951 2099749632 net.cpp:84] Creating Layer fc1\r\n", + "I0307 01:34:31.884958 2099749632 net.cpp:380] fc1 <- data\r\n", + "I0307 01:34:31.884989 2099749632 net.cpp:338] fc1 -> fc1\r\n", + "I0307 01:34:31.885000 2099749632 net.cpp:113] Setting up fc1\r\n", + "I0307 01:34:31.885017 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", + "I0307 01:34:31.885030 2099749632 layer_factory.hpp:74] Creating layer relu1\r\n", + "I0307 01:34:31.885041 2099749632 net.cpp:84] Creating Layer relu1\r\n", + "I0307 01:34:31.885048 2099749632 net.cpp:380] relu1 <- fc1\r\n", + "I0307 01:34:31.885056 2099749632 net.cpp:327] relu1 -> fc1 (in-place)\r\n", + "I0307 01:34:31.885064 2099749632 net.cpp:113] Setting up relu1\r\n", + "I0307 01:34:31.885071 2099749632 net.cpp:120] Top shape: 10 40 (400)\r\n", + "I0307 01:34:31.885079 2099749632 layer_factory.hpp:74] Creating layer fc2\r\n", + "I0307 01:34:31.885088 2099749632 net.cpp:84] Creating Layer fc2\r\n", + "I0307 01:34:31.885094 2099749632 net.cpp:380] fc2 <- fc1\r\n", + "I0307 01:34:31.885103 2099749632 net.cpp:338] fc2 -> fc2\r\n", + "I0307 01:34:31.885113 2099749632 net.cpp:113] Setting up fc2\r\n", + "I0307 01:34:31.885126 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:31.885138 2099749632 layer_factory.hpp:74] Creating layer fc2_fc2_0_split\r\n", + "I0307 01:34:31.885149 2099749632 net.cpp:84] Creating Layer fc2_fc2_0_split\r\n", + "I0307 01:34:31.885155 2099749632 net.cpp:380] fc2_fc2_0_split <- fc2\r\n", + "I0307 01:34:31.885164 2099749632 net.cpp:338] fc2_fc2_0_split -> fc2_fc2_0_split_0\r\n", + "I0307 01:34:31.885174 2099749632 net.cpp:338] fc2_fc2_0_split -> fc2_fc2_0_split_1\r\n", + "I0307 01:34:31.885182 2099749632 net.cpp:113] Setting up fc2_fc2_0_split\r\n", + "I0307 01:34:31.885190 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:31.885242 2099749632 net.cpp:120] Top shape: 10 2 (20)\r\n", + "I0307 01:34:31.885256 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:31.885267 2099749632 net.cpp:84] Creating Layer loss\r\n", + "I0307 01:34:31.885275 2099749632 net.cpp:380] loss <- fc2_fc2_0_split_0\r\n", + "I0307 01:34:31.885285 2099749632 net.cpp:380] loss <- label_data_1_split_0\r\n", + "I0307 01:34:31.885296 2099749632 net.cpp:338] loss -> loss\r\n", + "I0307 01:34:31.885308 2099749632 net.cpp:113] Setting up loss\r\n", + "I0307 01:34:31.885316 2099749632 layer_factory.hpp:74] Creating layer loss\r\n", + "I0307 01:34:31.885330 2099749632 net.cpp:120] Top shape: (1)\r\n", + "I0307 01:34:31.885337 2099749632 net.cpp:122] with loss weight 1\r\n", + "I0307 01:34:31.885346 2099749632 layer_factory.hpp:74] Creating layer accuracy\r\n", + "I0307 01:34:31.885360 2099749632 net.cpp:84] Creating Layer accuracy\r\n", + "I0307 01:34:31.885368 2099749632 net.cpp:380] accuracy <- fc2_fc2_0_split_1\r\n", + "I0307 01:34:31.885375 2099749632 net.cpp:380] accuracy <- label_data_1_split_1\r\n", + "I0307 01:34:31.885383 2099749632 net.cpp:338] accuracy -> accuracy\r\n", + "I0307 01:34:31.885392 2099749632 net.cpp:113] Setting up accuracy\r\n", + "I0307 01:34:31.885401 2099749632 net.cpp:120] Top shape: (1)\r\n", + "I0307 01:34:31.885407 2099749632 net.cpp:169] accuracy does not need backward computation.\r\n", + "I0307 01:34:31.885413 2099749632 net.cpp:167] loss needs backward computation.\r\n", + "I0307 01:34:31.885419 2099749632 net.cpp:167] fc2_fc2_0_split needs backward computation.\r\n", + "I0307 01:34:31.885426 2099749632 net.cpp:167] fc2 needs backward computation.\r\n", + "I0307 01:34:31.885432 2099749632 net.cpp:167] relu1 needs backward computation.\r\n", + "I0307 01:34:31.885438 2099749632 net.cpp:167] fc1 needs backward computation.\r\n", + "I0307 01:34:31.885444 2099749632 net.cpp:169] label_data_1_split does not need backward computation.\r\n", + "I0307 01:34:31.885452 2099749632 net.cpp:169] data does not need backward computation.\r\n", + "I0307 01:34:31.885457 2099749632 net.cpp:205] This network produces output accuracy\r\n", + "I0307 01:34:31.885613 2099749632 net.cpp:205] This network produces output loss\r\n", + "I0307 01:34:31.885632 2099749632 net.cpp:447] Collecting Learning Rate and Weight Decay.\r\n", + "I0307 01:34:31.885639 2099749632 net.cpp:217] Network initialization done.\r\n", + "I0307 01:34:31.885645 2099749632 net.cpp:218] Memory required for data: 3728\r\n", + "I0307 01:34:31.885685 2099749632 solver.cpp:42] Solver scaffolding done.\r\n", + "I0307 01:34:31.885711 2099749632 solver.cpp:222] Solving LogisticRegressionNet\r\n", + "I0307 01:34:31.885721 2099749632 solver.cpp:223] Learning Rate Policy: step\r\n", + "I0307 01:34:31.885730 2099749632 solver.cpp:266] Iteration 0, Testing net (#0)\r\n", + "I0307 01:34:31.901005 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.5944\r\n", + "I0307 01:34:31.901049 2099749632 solver.cpp:315] Test net output #1: loss = 0.693021 (* 1 = 0.693021 loss)\r\n", + "I0307 01:34:31.901177 2099749632 solver.cpp:189] Iteration 0, loss = 0.693163\r\n", + "I0307 01:34:31.901192 2099749632 solver.cpp:204] Train net output #0: loss = 0.693163 (* 1 = 0.693163 loss)\r\n", + "I0307 01:34:31.901203 2099749632 solver.cpp:464] Iteration 0, lr = 0.01\r\n", + "I0307 01:34:31.920586 2099749632 solver.cpp:266] Iteration 1000, Testing net (#0)\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.477910 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/test.h5\r\n", - "I0905 01:07:27.478999 2129298192 hdf5_data_layer.cpp:49] Successully loaded 2500 rows\r\n", - "I0905 01:07:27.479014 2129298192 hdf5_data_layer.cpp:81] output data size: 10,4,1,1\r\n", - "I0905 01:07:27.479022 2129298192 net.cpp:103] Top shape: 10 4 1 1 (40)\r\n", - "I0905 01:07:27.479028 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.479038 2129298192 net.cpp:67] Creating Layer label_data_1_split\r\n", - "I0905 01:07:27.479044 2129298192 net.cpp:394] label_data_1_split <- label\r\n", - "I0905 01:07:27.479058 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_0\r\n", - "I0905 01:07:27.479069 2129298192 net.cpp:356] label_data_1_split -> label_data_1_split_1\r\n", - "I0905 01:07:27.479079 2129298192 net.cpp:96] Setting up label_data_1_split\r\n", - "I0905 01:07:27.479086 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.479092 2129298192 net.cpp:103] Top shape: 10 1 1 1 (10)\r\n", - "I0905 01:07:27.479100 2129298192 net.cpp:67] Creating Layer fc1\r\n", - "I0905 01:07:27.480850 2129298192 net.cpp:394] fc1 <- data\r\n", - "I0905 01:07:27.480871 2129298192 net.cpp:356] fc1 -> fc1\r\n", - "I0905 01:07:27.480887 2129298192 net.cpp:96] Setting up fc1\r\n", - "I0905 01:07:27.480908 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", - "I0905 01:07:27.480978 2129298192 net.cpp:67] Creating Layer relu1\r\n", - "I0905 01:07:27.480986 2129298192 net.cpp:394] relu1 <- fc1\r\n", - "I0905 01:07:27.480994 2129298192 net.cpp:345] relu1 -> fc1 (in-place)\r\n", - "I0905 01:07:27.481003 2129298192 net.cpp:96] Setting up relu1\r\n", - "I0905 01:07:27.481009 2129298192 net.cpp:103] Top shape: 10 40 1 1 (400)\r\n", - "I0905 01:07:27.481017 2129298192 net.cpp:67] Creating Layer fc2\r\n", - "I0905 01:07:27.481024 2129298192 net.cpp:394] fc2 <- fc1\r\n", - "I0905 01:07:27.481031 2129298192 net.cpp:356] fc2 -> fc2\r\n", - "I0905 01:07:27.481041 2129298192 net.cpp:96] Setting up fc2\r\n", - "I0905 01:07:27.481055 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.481065 2129298192 net.cpp:67] Creating Layer fc2_fc2_0_split\r\n", - "I0905 01:07:27.481343 2129298192 net.cpp:394] fc2_fc2_0_split <- fc2\r\n", - "I0905 01:07:27.481360 2129298192 net.cpp:356] fc2_fc2_0_split -> fc2_fc2_0_split_0\r\n", - "I0905 01:07:27.481371 2129298192 net.cpp:356] fc2_fc2_0_split -> fc2_fc2_0_split_1\r\n", - "I0905 01:07:27.481379 2129298192 net.cpp:96] Setting up fc2_fc2_0_split\r\n", - "I0905 01:07:27.481387 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.481392 2129298192 net.cpp:103] Top shape: 10 2 1 1 (20)\r\n", - "I0905 01:07:27.481401 2129298192 net.cpp:67] Creating Layer loss\r\n", - "I0905 01:07:27.481407 2129298192 net.cpp:394] loss <- fc2_fc2_0_split_0\r\n", - "I0905 01:07:27.481413 2129298192 net.cpp:394] loss <- label_data_1_split_0\r\n", - "I0905 01:07:27.481421 2129298192 net.cpp:356] loss -> loss\r\n", - "I0905 01:07:27.481434 2129298192 net.cpp:96] Setting up loss\r\n", - "I0905 01:07:27.481446 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", - "I0905 01:07:27.481452 2129298192 net.cpp:109] with loss weight 1\r\n", - "I0905 01:07:27.481466 2129298192 net.cpp:67] Creating Layer accuracy\r\n", - "I0905 01:07:27.481472 2129298192 net.cpp:394] accuracy <- fc2_fc2_0_split_1\r\n", - "I0905 01:07:27.481504 2129298192 net.cpp:394] accuracy <- label_data_1_split_1\r\n", - "I0905 01:07:27.481513 2129298192 net.cpp:356] accuracy -> accuracy\r\n", - "I0905 01:07:27.481521 2129298192 net.cpp:96] Setting up accuracy\r\n", - "I0905 01:07:27.481528 2129298192 net.cpp:103] Top shape: 1 1 1 1 (1)\r\n", - "I0905 01:07:27.481534 2129298192 net.cpp:172] accuracy does not need backward computation.\r\n", - "I0905 01:07:27.481540 2129298192 net.cpp:170] loss needs backward computation.\r\n", - "I0905 01:07:27.481545 2129298192 net.cpp:170] fc2_fc2_0_split needs backward computation.\r\n", - "I0905 01:07:27.481551 2129298192 net.cpp:170] fc2 needs backward computation.\r\n", - "I0905 01:07:27.481557 2129298192 net.cpp:170] relu1 needs backward computation.\r\n", - "I0905 01:07:27.481562 2129298192 net.cpp:170] fc1 needs backward computation.\r\n", - "I0905 01:07:27.481569 2129298192 net.cpp:172] label_data_1_split does not need backward computation.\r\n", - "I0905 01:07:27.481575 2129298192 net.cpp:172] data does not need backward computation.\r\n", - "I0905 01:07:27.481730 2129298192 net.cpp:208] This network produces output accuracy\r\n", - "I0905 01:07:27.481742 2129298192 net.cpp:208] This network produces output loss\r\n", - "I0905 01:07:27.481758 2129298192 net.cpp:467] Collecting Learning Rate and Weight Decay.\r\n", - "I0905 01:07:27.481766 2129298192 net.cpp:219] Network initialization done.\r\n", - "I0905 01:07:27.481771 2129298192 net.cpp:220] Memory required for data: 3728\r\n", - "I0905 01:07:27.481814 2129298192 solver.cpp:46] Solver scaffolding done.\r\n", - "I0905 01:07:27.481822 2129298192 solver.cpp:165] Solving LogisticRegressionNet\r\n", - "I0905 01:07:27.481844 2129298192 solver.cpp:251] Iteration 0, Testing net (#0)\r\n", - "I0905 01:07:27.488900 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.4924\r\n", - "I0905 01:07:27.488932 2129298192 solver.cpp:302] Test net output #1: loss = 0.693168 (* 1 = 0.693168 loss)\r\n", - "I0905 01:07:27.488962 2129298192 solver.cpp:195] Iteration 0, loss = 0.692972\r\n", - "I0905 01:07:27.488973 2129298192 solver.cpp:210] Train net output #0: loss = 0.692972 (* 1 = 0.692972 loss)\r\n", - "I0905 01:07:27.488984 2129298192 solver.cpp:405] Iteration 0, lr = 0.01\r\n", - "I0905 01:07:27.495033 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.495604 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.497684 2129298192 solver.cpp:251] Iteration 1000, Testing net (#0)\r\n", - "I0905 01:07:27.504875 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.7744\r\n", - "I0905 01:07:27.504930 2129298192 solver.cpp:302] Test net output #1: loss = 0.486552 (* 1 = 0.486552 loss)\r\n", - "I0905 01:07:27.504955 2129298192 solver.cpp:195] Iteration 1000, loss = 0.660151\r\n", - "I0905 01:07:27.504966 2129298192 solver.cpp:210] Train net output #0: loss = 0.660151 (* 1 = 0.660151 loss)\r\n", - "I0905 01:07:27.504976 2129298192 solver.cpp:405] Iteration 1000, lr = 0.01\r\n", - "I0905 01:07:27.509419 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.509467 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.510288 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.514822 2129298192 solver.cpp:251] Iteration 2000, Testing net (#0)\r\n", - "I0905 01:07:27.522342 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8004\r\n", - "I0905 01:07:27.522444 2129298192 solver.cpp:302] Test net output #1: loss = 0.447153 (* 1 = 0.447153 loss)\r\n", - "I0905 01:07:27.522483 2129298192 solver.cpp:195] Iteration 2000, loss = 0.505697\r\n", - "I0905 01:07:27.522495 2129298192 solver.cpp:210] Train net output #0: loss = 0.505697 (* 1 = 0.505697 loss)\r\n", - "I0905 01:07:27.522507 2129298192 solver.cpp:405] Iteration 2000, lr = 0.01\r\n", - "I0905 01:07:27.524762 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.525921 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n" + "I0307 01:34:31.924612 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7556\r\n", + "I0307 01:34:31.924646 2099749632 solver.cpp:315] Test net output #1: loss = 0.511002 (* 1 = 0.511002 loss)\r\n", + "I0307 01:34:31.924684 2099749632 solver.cpp:189] Iteration 1000, loss = 0.38536\r\n", + "I0307 01:34:31.924696 2099749632 solver.cpp:204] Train net output #0: loss = 0.38536 (* 1 = 0.38536 loss)\r\n", + "I0307 01:34:31.924706 2099749632 solver.cpp:464] Iteration 1000, lr = 0.01\r\n", + "I0307 01:34:31.944727 2099749632 solver.cpp:266] Iteration 2000, Testing net (#0)\r\n", + "I0307 01:34:31.948729 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7824\r\n", + "I0307 01:34:31.948763 2099749632 solver.cpp:315] Test net output #1: loss = 0.489214 (* 1 = 0.489214 loss)\r\n", + "I0307 01:34:31.948799 2099749632 solver.cpp:189] Iteration 2000, loss = 0.532582\r\n", + "I0307 01:34:31.948812 2099749632 solver.cpp:204] Train net output #0: loss = 0.532582 (* 1 = 0.532582 loss)\r\n", + "I0307 01:34:31.948823 2099749632 solver.cpp:464] Iteration 2000, lr = 0.01\r\n", + "I0307 01:34:31.968670 2099749632 solver.cpp:266] Iteration 3000, Testing net (#0)\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.533335 2129298192 solver.cpp:251] Iteration 3000, Testing net (#0)\r\n", - "I0905 01:07:27.541055 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8144\r\n", - "I0905 01:07:27.541146 2129298192 solver.cpp:302] Test net output #1: loss = 0.421441 (* 1 = 0.421441 loss)\r\n", - "I0905 01:07:27.541160 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.541167 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.542178 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.542261 2129298192 solver.cpp:195] Iteration 3000, loss = 0.242177\r\n", - "I0905 01:07:27.542284 2129298192 solver.cpp:210] Train net output #0: loss = 0.242177 (* 1 = 0.242177 loss)\r\n", - "I0905 01:07:27.542310 2129298192 solver.cpp:405] Iteration 3000, lr = 0.01\r\n", - "I0905 01:07:27.549348 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.550144 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.552340 2129298192 solver.cpp:251] Iteration 4000, Testing net (#0)\r\n", - "I0905 01:07:27.560089 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.784001\r\n", - "I0905 01:07:27.560227 2129298192 solver.cpp:302] Test net output #1: loss = 0.4395 (* 1 = 0.4395 loss)\r\n", - "I0905 01:07:27.560286 2129298192 solver.cpp:195] Iteration 4000, loss = 1.01631\r\n", - "I0905 01:07:27.560302 2129298192 solver.cpp:210] Train net output #0: loss = 1.01631 (* 1 = 1.01631 loss)\r\n", - "I0905 01:07:27.560315 2129298192 solver.cpp:405] Iteration 4000, lr = 0.01\r\n", - "I0905 01:07:27.565016 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.565101 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.566145 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.570286 2129298192 solver.cpp:251] Iteration 5000, Testing net (#0)\r\n", - "I0905 01:07:27.577373 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.802\r\n", - "I0905 01:07:27.577426 2129298192 solver.cpp:302] Test net output #1: loss = 0.463582 (* 1 = 0.463582 loss)\r\n", - "I0905 01:07:27.577452 2129298192 solver.cpp:195] Iteration 5000, loss = 0.632809\r\n", - "I0905 01:07:27.577463 2129298192 solver.cpp:210] Train net output #0: loss = 0.632809 (* 1 = 0.632809 loss)\r\n", - "I0905 01:07:27.577564 2129298192 solver.cpp:405] Iteration 5000, lr = 0.001\r\n", - "I0905 01:07:27.579649 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.580368 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n" + "I0307 01:34:31.972393 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.7956\r\n", + "I0307 01:34:31.972411 2099749632 solver.cpp:315] Test net output #1: loss = 0.454184 (* 1 = 0.454184 loss)\r\n", + "I0307 01:34:31.973024 2099749632 solver.cpp:189] Iteration 3000, loss = 0.541374\r\n", + "I0307 01:34:31.973057 2099749632 solver.cpp:204] Train net output #0: loss = 0.541374 (* 1 = 0.541374 loss)\r\n", + "I0307 01:34:31.973067 2099749632 solver.cpp:464] Iteration 3000, lr = 0.01\r\n", + "I0307 01:34:31.994829 2099749632 solver.cpp:266] Iteration 4000, Testing net (#0)\r\n", + "I0307 01:34:31.998638 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.798\r\n", + "I0307 01:34:31.998663 2099749632 solver.cpp:315] Test net output #1: loss = 0.456348 (* 1 = 0.456348 loss)\r\n", + "I0307 01:34:31.998705 2099749632 solver.cpp:189] Iteration 4000, loss = 0.490437\r\n", + "I0307 01:34:31.998718 2099749632 solver.cpp:204] Train net output #0: loss = 0.490437 (* 1 = 0.490437 loss)\r\n", + "I0307 01:34:31.998725 2099749632 solver.cpp:464] Iteration 4000, lr = 0.01\r\n", + "I0307 01:34:32.021085 2099749632 solver.cpp:266] Iteration 5000, Testing net (#0)\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.586956 2129298192 solver.cpp:251] Iteration 6000, Testing net (#0)\r\n", - "I0905 01:07:27.594288 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.822\r\n", - "I0905 01:07:27.594327 2129298192 solver.cpp:302] Test net output #1: loss = 0.407026 (* 1 = 0.407026 loss)\r\n", - "I0905 01:07:27.594338 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.594344 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.594861 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.594897 2129298192 solver.cpp:195] Iteration 6000, loss = 0.214342\r\n", - "I0905 01:07:27.594910 2129298192 solver.cpp:210] Train net output #0: loss = 0.214342 (* 1 = 0.214342 loss)\r\n", - "I0905 01:07:27.594919 2129298192 solver.cpp:405] Iteration 6000, lr = 0.001\r\n", - "I0905 01:07:27.601003 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.601380 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.603358 2129298192 solver.cpp:251] Iteration 7000, Testing net (#0)\r\n", - "I0905 01:07:27.610307 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8264\r\n", - "I0905 01:07:27.610323 2129298192 solver.cpp:302] Test net output #1: loss = 0.403283 (* 1 = 0.403283 loss)\r\n", - "I0905 01:07:27.610342 2129298192 solver.cpp:195] Iteration 7000, loss = 0.894732\r\n", - "I0905 01:07:27.610352 2129298192 solver.cpp:210] Train net output #0: loss = 0.894732 (* 1 = 0.894732 loss)\r\n", - "I0905 01:07:27.610359 2129298192 solver.cpp:405] Iteration 7000, lr = 0.001\r\n", - "I0905 01:07:27.614289 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.614297 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.614701 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.618602 2129298192 solver.cpp:251] Iteration 8000, Testing net (#0)\r\n", - "I0905 01:07:27.625637 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8216\r\n", - "I0905 01:07:27.625661 2129298192 solver.cpp:302] Test net output #1: loss = 0.402446 (* 1 = 0.402446 loss)\r\n", - "I0905 01:07:27.625680 2129298192 solver.cpp:195] Iteration 8000, loss = 0.500503\r\n", - "I0905 01:07:27.625690 2129298192 solver.cpp:210] Train net output #0: loss = 0.500503 (* 1 = 0.500503 loss)\r\n", - "I0905 01:07:27.625707 2129298192 solver.cpp:405] Iteration 8000, lr = 0.001\r\n", - "I0905 01:07:27.627665 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.628075 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n" + "I0307 01:34:32.024950 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.804\r\n", + "I0307 01:34:32.024981 2099749632 solver.cpp:315] Test net output #1: loss = 0.46184 (* 1 = 0.46184 loss)\r\n", + "I0307 01:34:32.025017 2099749632 solver.cpp:189] Iteration 5000, loss = 0.467703\r\n", + "I0307 01:34:32.025028 2099749632 solver.cpp:204] Train net output #0: loss = 0.467704 (* 1 = 0.467704 loss)\r\n", + "I0307 01:34:32.025038 2099749632 solver.cpp:464] Iteration 5000, lr = 0.001\r\n", + "I0307 01:34:32.044390 2099749632 solver.cpp:266] Iteration 6000, Testing net (#0)\r\n", + "I0307 01:34:32.048216 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8208\r\n", + "I0307 01:34:32.048239 2099749632 solver.cpp:315] Test net output #1: loss = 0.423084 (* 1 = 0.423084 loss)\r\n", + "I0307 01:34:32.048790 2099749632 solver.cpp:189] Iteration 6000, loss = 0.480104\r\n", + "I0307 01:34:32.048809 2099749632 solver.cpp:204] Train net output #0: loss = 0.480105 (* 1 = 0.480105 loss)\r\n", + "I0307 01:34:32.048827 2099749632 solver.cpp:464] Iteration 6000, lr = 0.001\r\n", + "I0307 01:34:32.067795 2099749632 solver.cpp:266] Iteration 7000, Testing net (#0)\r\n", + "I0307 01:34:32.071524 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8124\r\n", + "I0307 01:34:32.071542 2099749632 solver.cpp:315] Test net output #1: loss = 0.423947 (* 1 = 0.423947 loss)\r\n", + "I0307 01:34:32.071570 2099749632 solver.cpp:189] Iteration 7000, loss = 0.447471\r\n", + "I0307 01:34:32.071617 2099749632 solver.cpp:204] Train net output #0: loss = 0.447472 (* 1 = 0.447472 loss)\r\n", + "I0307 01:34:32.071626 2099749632 solver.cpp:464] Iteration 7000, lr = 0.001\r\n" ] }, { "output_type": "stream", "stream": "stdout", "text": [ - "I0905 01:07:27.634202 2129298192 solver.cpp:251] Iteration 9000, Testing net (#0)\r\n", - "I0905 01:07:27.641368 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.8252\r\n", - "I0905 01:07:27.641412 2129298192 solver.cpp:302] Test net output #1: loss = 0.404175 (* 1 = 0.404175 loss)\r\n", - "I0905 01:07:27.641422 2129298192 hdf5_data_layer.cpp:99] looping around to first file\r\n", - "I0905 01:07:27.641428 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.641960 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.642004 2129298192 solver.cpp:195] Iteration 9000, loss = 0.201587\r\n", - "I0905 01:07:27.642016 2129298192 solver.cpp:210] Train net output #0: loss = 0.201587 (* 1 = 0.201587 loss)\r\n", - "I0905 01:07:27.642026 2129298192 solver.cpp:405] Iteration 9000, lr = 0.001\r\n", - "I0905 01:07:27.648680 2129298192 hdf5_data_layer.cpp:29] Loading HDF5 file/Users/sergeyk/work/caffe/examples/hdf5_classification/data/train.h5\r\n", - "I0905 01:07:27.649211 2129298192 hdf5_data_layer.cpp:49] Successully loaded 7500 rows\r\n", - "I0905 01:07:27.651327 2129298192 solver.cpp:319] Snapshotting to examples/hdf5_classification/data/train_iter_10000\r\n", - "I0905 01:07:27.651476 2129298192 solver.cpp:326] Snapshotting solver state to examples/hdf5_classification/data/train_iter_10000.solverstate\r\n", - "I0905 01:07:27.651564 2129298192 solver.cpp:232] Iteration 10000, loss = 0.935422\r\n", - "I0905 01:07:27.651582 2129298192 solver.cpp:251] Iteration 10000, Testing net (#0)\r\n", - "I0905 01:07:27.658738 2129298192 solver.cpp:302] Test net output #0: accuracy = 0.826\r\n", - "I0905 01:07:27.658782 2129298192 solver.cpp:302] Test net output #1: loss = 0.400826 (* 1 = 0.400826 loss)\r\n", - "I0905 01:07:27.658790 2129298192 solver.cpp:237] Optimization Done.\r\n", - "I0905 01:07:27.658797 2129298192 caffe.cpp:114] Optimization Done.\r\n" + "I0307 01:34:32.091625 2099749632 solver.cpp:266] Iteration 8000, Testing net (#0)\r\n", + "I0307 01:34:32.095410 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.814\r\n", + "I0307 01:34:32.095432 2099749632 solver.cpp:315] Test net output #1: loss = 0.423586 (* 1 = 0.423586 loss)\r\n", + "I0307 01:34:32.095461 2099749632 solver.cpp:189] Iteration 8000, loss = 0.386258\r\n", + "I0307 01:34:32.095474 2099749632 solver.cpp:204] Train net output #0: loss = 0.386259 (* 1 = 0.386259 loss)\r\n", + "I0307 01:34:32.095481 2099749632 solver.cpp:464] Iteration 8000, lr = 0.001\r\n", + "I0307 01:34:32.117184 2099749632 solver.cpp:266] Iteration 9000, Testing net (#0)\r\n", + "I0307 01:34:32.121587 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8208\r\n", + "I0307 01:34:32.121608 2099749632 solver.cpp:315] Test net output #1: loss = 0.419969 (* 1 = 0.419969 loss)\r\n", + "I0307 01:34:32.122161 2099749632 solver.cpp:189] Iteration 9000, loss = 0.468262\r\n", + "I0307 01:34:32.122181 2099749632 solver.cpp:204] Train net output #0: loss = 0.468262 (* 1 = 0.468262 loss)\r\n", + "I0307 01:34:32.122191 2099749632 solver.cpp:464] Iteration 9000, lr = 0.001\r\n" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "I0307 01:34:32.141635 2099749632 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_10000.caffemodel\r\n", + "I0307 01:34:32.141860 2099749632 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_10000.solverstate\r\n", + "I0307 01:34:32.141978 2099749632 solver.cpp:248] Iteration 10000, loss = 0.441529\r\n", + "I0307 01:34:32.141995 2099749632 solver.cpp:266] Iteration 10000, Testing net (#0)\r\n", + "I0307 01:34:32.145747 2099749632 solver.cpp:315] Test net output #0: accuracy = 0.8148\r\n", + "I0307 01:34:32.145771 2099749632 solver.cpp:315] Test net output #1: loss = 0.4216 (* 1 = 0.4216 loss)\r\n", + "I0307 01:34:32.145779 2099749632 solver.cpp:253] Optimization Done.\r\n", + "I0307 01:34:32.145786 2099749632 caffe.cpp:121] Optimization Done.\r\n" ] } ], - "prompt_number": 7 + "prompt_number": 8 }, { "cell_type": "code", @@ -938,7 +1065,7 @@ "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 8 + "prompt_number": 9 } ], "metadata": {} diff --git a/examples/hdf5_classification/solver.prototxt b/examples/hdf5_classification/solver.prototxt index 040162076b8..65a6eb9e9fb 100644 --- a/examples/hdf5_classification/solver.prototxt +++ b/examples/hdf5_classification/solver.prototxt @@ -1,5 +1,5 @@ -net: "examples/hdf5_classification/train_val.prototxt" -test_iter: 1000 +net: "hdf5_classification/train_val.prototxt" +test_iter: 250 test_interval: 1000 base_lr: 0.01 lr_policy: "step" @@ -10,5 +10,5 @@ max_iter: 10000 momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 -snapshot_prefix: "examples/hdf5_classification/data/train" +snapshot_prefix: "hdf5_classification/data/train" solver_mode: CPU diff --git a/examples/hdf5_classification/solver2.prototxt b/examples/hdf5_classification/solver2.prototxt index 32a3693b4a1..32b9feba346 100644 --- a/examples/hdf5_classification/solver2.prototxt +++ b/examples/hdf5_classification/solver2.prototxt @@ -1,5 +1,5 @@ -net: "examples/hdf5_classification/train_val2.prototxt" -test_iter: 1000 +net: "hdf5_classification/train_val2.prototxt" +test_iter: 250 test_interval: 1000 base_lr: 0.01 lr_policy: "step" @@ -10,5 +10,5 @@ max_iter: 10000 momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 -snapshot_prefix: "examples/hdf5_classification/data/train" +snapshot_prefix: "hdf5_classification/data/train" solver_mode: CPU diff --git a/examples/hdf5_classification/train_val.prototxt b/examples/hdf5_classification/train_val.prototxt index b9ccc1a93ec..d5e8dbfa169 100644 --- a/examples/hdf5_classification/train_val.prototxt +++ b/examples/hdf5_classification/train_val.prototxt @@ -8,7 +8,7 @@ layer { phase: TRAIN } hdf5_data_param { - source: "examples/hdf5_classification/data/train.txt" + source: "hdf5_classification/data/train.txt" batch_size: 10 } } @@ -21,7 +21,7 @@ layer { phase: TEST } hdf5_data_param { - source: "examples/hdf5_classification/data/test.txt" + source: "hdf5_classification/data/test.txt" batch_size: 10 } } diff --git a/examples/hdf5_classification/train_val2.prototxt b/examples/hdf5_classification/train_val2.prototxt index f9ef731fff9..8795e8facb6 100644 --- a/examples/hdf5_classification/train_val2.prototxt +++ b/examples/hdf5_classification/train_val2.prototxt @@ -8,7 +8,7 @@ layer { phase: TRAIN } hdf5_data_param { - source: "examples/hdf5_classification/data/train.txt" + source: "hdf5_classification/data/train.txt" batch_size: 10 } } @@ -21,7 +21,7 @@ layer { phase: TEST } hdf5_data_param { - source: "examples/hdf5_classification/data/test.txt" + source: "hdf5_classification/data/test.txt" batch_size: 10 } } diff --git a/examples/imagenet/make_imagenet_mean.sh b/examples/imagenet/make_imagenet_mean.sh index d3d0c9af5d2..57f43766c4b 100755 --- a/examples/imagenet/make_imagenet_mean.sh +++ b/examples/imagenet/make_imagenet_mean.sh @@ -1,8 +1,12 @@ #!/usr/bin/env sh -# Compute the mean image from the imagenet training leveldb +# Compute the mean image from the imagenet training lmdb # N.B. this is available in data/ilsvrc12 -./build/tools/compute_image_mean examples/imagenet/ilsvrc12_train_leveldb \ - data/ilsvrc12/imagenet_mean.binaryproto +EXAMPLE=examples/imagenet +DATA=data/ilsvrc12 +TOOLS=build/tools + +$TOOLS/compute_image_mean $EXAMPLE/ilsvrc12_train_lmdb \ + $DATA/imagenet_mean.binaryproto echo "Done." diff --git a/examples/net_surgery.ipynb b/examples/net_surgery.ipynb index 2932687da6a..75c9889fb5a 100644 --- a/examples/net_surgery.ipynb +++ b/examples/net_surgery.ipynb @@ -4,7 +4,7 @@ "example_name": "Editing model parameters", "include_in_docs": true, "priority": 5, - "signature": "sha256:811097f2151652d2b630c016a5f1de23bd824df3dfcfc72aa0aeb23b2d9686c0" + "signature": "sha256:f21c804f76329e70847ccb87e28a91e5d8a375f5da0ba6dd85d3b87a05bebd72" }, "nbformat": 3, "nbformat_minor": 0, @@ -15,15 +15,221 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Net Surgery for a Fully-Convolutional Model\n", + "# Net Surgery\n", "\n", - "Caffe models can be transformed to your particular needs by editing the network parameters. In this example, we take the standard Caffe Reference ImageNet model \"CaffeNet\" and transform it into a fully-convolutional model for efficient, dense inference on large inputs. This model generates a classification map that covers a given input size instead of a single classification. In particular a 8 $\\times$ 8 classification map on a 451 $\\times$ 451 input gives 64x the output in only 3x the time. The computation exploits a natural efficiency of convolutional neural network (CNN) structure by dynamic programming in the forward pass from shallow to deep layers.\n", - "\n", - "To do so we translate the inner product classifier layers of CaffeNet into convolutional layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 \\times 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding.\n", + "Caffe networks can be transformed to your particular needs by editing the model parameters. The data, diffs, and parameters of a net are all exposed in pycaffe.\n", "\n", "Roll up your sleeves for net surgery with pycaffe!" ] }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "import Image\n", + "\n", + "# Make sure that caffe is on the python path:\n", + "caffe_root = '../' # this file is expected to be in {caffe_root}/examples\n", + "import sys\n", + "sys.path.insert(0, caffe_root + 'python')\n", + "\n", + "import caffe\n", + "\n", + "# configure plotting\n", + "plt.rcParams['figure.figsize'] = (10, 10)\n", + "plt.rcParams['image.interpolation'] = 'nearest'\n", + "plt.rcParams['image.cmap'] = 'gray'" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 1 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Designer Filters\n", + "\n", + "To show how to load, manipulate, and save parameters we'll design our own filters into a simple network that's only a single convolution layer. This net has two blobs, `data` for the input and `conv` for the convolution output and one parameter `conv` for the convolution filter weights and biases." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# Load the net, list its data and params, and filter an example image.\n", + "caffe.set_mode_cpu()\n", + "net = caffe.Net('net_surgery/conv.prototxt', caffe.TEST)\n", + "print(\"blobs {}\\nparams {}\".format(net.blobs.keys(), net.params.keys()))\n", + "\n", + "# load image and prepare as a single input batch for Caffe\n", + "im = np.array(Image.open('images/cat_gray.jpg'))\n", + "plt.title(\"original image\")\n", + "plt.imshow(im)\n", + "plt.axis('off')\n", + "\n", + "im_input = im[np.newaxis, np.newaxis, :, :]\n", + "net.blobs['data'].reshape(*im_input.shape)\n", + "net.blobs['data'].data[...] = im_input" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "blobs ['data', 'conv']\n", + "params ['conv']\n" + ] + }, + { + "metadata": {}, + "output_type": "display_data", + "png": "iVBORw0KGgoAAAANSUhEUgAAAlIAAAHNCAYAAADVB5V4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWuMZdl13/c/tx733np393T3PPkYDUccPsQZiaRkCYpE\nCYklOwYhfwjCIAEiJDLswAkQf3AQIEoC64OcIEDiIHESBAiCCAkkJ4GtJHCM+KHQjmGZtmxKJBVC\nwxkOZyac4Uz3dHe97q1bt+7Jh+r/rt/517499ER008xZQKGq7j1nn73XXns9/mvtfZq2bdVTTz31\n1FNPPfXU0z86DR52B3rqqaeeeuqpp57+SaXekeqpp5566qmnnnp6j9Q7Uj311FNPPfXUU0/vkXpH\nqqeeeuqpp5566uk9Uu9I9dRTTz311FNPPb1H6h2pnnrqqaeeeuqpp/dIvSPVU089/b5T0zT/RdM0\n/87v97Xv0s4HmqZZNE1T1WtN03y5aZp/6v/rc3rqqaeeSE1/jlRPPfX0vUBN03xA0suSVtu2XTzc\n3vTUU0//f6Eekeqpp55+X2kZItRTTz319L1IvcLrqaee3pWapnmuaZr/s2maO/dTZH8E3/2399Nz\nf7lpmkNJn7n/2S/hmj/dNM03m6Z5vWmaf/V+Cu5p3P9L9//+yfvX/Kmmab51/55/Ge384aZp/mHT\nNPeapnm1aZp/7x9hDK80TfNT9//+95um+R+bpvmVpmn2m6b5naZpPtQ0zb99/7nfaJrmn8a9P980\nze/ev/alpmn+WLT9oPENm6b5j+63+eZ9Xo3+Ueegp556+u6k3pHqqaeeHkhN06xJ+l8l/RVJ1yX9\n65L++6ZpnsVln5P0S23bbkn6vyS193/UNM3PSPo3Jf20pA9J+sl4RLn2Pt2UtCPpcUn/iqT/vGma\n3fvfHUr6F9u23ZX0hyX9iaZpPvttDiXrGP5ZSf+dpCuS/qGkv3r/88cl/ZKk/wrXfkvSH27bdkfS\nz0v6j5umeeHbHN+flfSMpE/c//2EpH/32+xzTz319F1OvSPVU089vRv9iKTNtm3/bNu287Ztf0PS\n/6Zz58n0l9q2/TuS1LbtSdz/z0n6b9q2/b/btp1IqqFIDf4+lfRn2rY9a9v2f9e58/T999v+fNu2\nX7n/95ck/aqkn3iP4/qbbdv+1bZtzyT9T5KuSfqz9///NUkfaJpm5/6z/nLbtl+///fflPR/SPrx\ndxtf0zSNpF+Q9Kfatr3btu2hpF+W9M+/xz731FNP32W0+rA70FNPPX3X0+OSXovPvnH/c+kc6Xn9\nAfc/JukL+P9B10rS7SgWP5a0JUlN0/ywzhGej0palzSU9Bfepb1l9Bb+nki61V7svpnc/70lab9p\nmp/VuYP0IZ0HoBuSfuf+NQ8a3/X71/7WuU8l6dxp7IPYnnr6HqF+MffUU0/vRt+U9FQDT0DS+yX9\nP9/m/W9Iegr/P1W55tvdPvw/SPpLkp5s23ZP0n+p77Aea5pmKOl/lvQfSrrRtu0VSX9ZFyjag8Z3\nS+dO2Ufatr1y/2fvfoqwp556+h6g3pHqqaee3o1+U+eo0J9ummataZqf1Hl90a/e/76p3NPg878g\n6eebpvlw0zQbkn7xAde+G21JutO27axpmk9L+hf07Tth75XW7//ckrS4j079M/h+6fjuI2v/taT/\npGma65LUNM0TTdPw/p566umfYOodqZ566umB1LbtqaQ/IulnJb0t6T+T9C+1bft7vkSXnZnyWdu2\nf0XSfyrpNyT9nqS/c/+akyX3P8gx+tck/ZmmafZ17rD8WuW53w4t6/Ol/9u2PZD0b+jcYXpH57Vh\nv14uevfx/VuSvibpN5umuafzonYW6vfUU0//BFN/IGdPPfX0j5WapnlO0pckrX8vHpz5vT6+nnrq\nqUs9ItVTTz19x6lpmp+7f57SFUn/gaT/5XvJyfheH19PPfW0nHpHqqeeevrHQX9M52cxfU3nxxv8\niYfbnd93+l4fX0899bSE+tReTz311FNPPfXU03ukh3KO1BNPPNE2TaPFYiHvqB4MBhoMBlosFuV/\nO3n+3TSN2rYVnb/FYqGVlZXqc05PT7VYLDQcDstnvLdpms5z3Jemacqz+FzT2dlZ55n+brFYlHH4\nJx1VjpfP8/0cvz9L8rPdL/PS95DIV//tdufzebl/MBjo7OxMknRycqL5fK7FYqGzs7NOv9xX3ucx\num3zb2VlpVzvfq+urmplZUVra2vl89XV1dI3f+/71tbWNBgMtLKyUr73vb7O/fAzJWk+n+vs7Eyn\np6c6OTnR6empjo+PJUmz2Uyz2UxnZ2c6OzvTdDot43Pbq6urha85d23b6uzsrDM2X+f+sR32lfeb\nb6enp6W/Jycnl9bBYrFQ27YdnrkdEvtLWXAfSOvr6+U6yvLKykpZU+6Tn3N2dtaRi9PT0yIz5oHX\np+cq57Bt2zLffObZ2dklHZAyzb7kGC2P8/m8fMd1nOs++8223EZtTS1bl9RVlo1cp+4jn+371tbW\ntLKyUtaI52gwGGg8Hms4HGo0GpU2LZ8ep/uc7fr5ptXV1fIzGo3KnGU/Z7OZjo6OdHJyXi9/fHys\nk5MTHR8fa39/X7PZrHOv1+dgMNBsNtPp6WlHH3D+qE/5v+eLvLWs+Huubz8v9bWfSX3n+9q2LfJt\nvtV4Zdvk/1dXV8sapL7ydysrK1pfX+/MI+ns7KzMK9e9pKKL5vO5ZrNZGZ919Gw203w+78ilx8Hx\n5/z7/tXV1aodSVk+OzvT2tqatra2tL6+rvX1dY1G528yGo/HGo1GHVvHZ7m/bdtqZWVFk8mkyM3R\n0VFn/LQltD2eY/LIY1wsFh09NhqNihxT1wwGA62trZX7PW+cX//YP+AYTk5OOuvTbQ4GA62vr+tv\n/+2/Xd1d/FAP5OTkp3GpkYW7ptxSWbr9FOhUvlRwXNxcvOwrhY6LM9uuKWE6HxQMGxLyo+YUuR90\nxmqGM9twX7y4OEb/b8UkXRhezwevYz+TJzlGj82fe6HZQNvBPT09VdM0Rei9+Gu8oMHwuOikeBFQ\nyfNzf0cn0f3zdTZSVLocHwOAmhOZPHe/05Fi36yI3A5/WxH6s1zo6USQas6Vx2eniW36ev+4Xx6D\nnU+PwfflWkonM+Uz1yWNDOfE8lbTC/zMz2a77puNPOc428hgin9TFvkcyg+NOR0o3pPt+38avGXO\noQ0Ency1tbXyXDvGkooTs76+Xox/ytNoNCpGkkZrNpsVZ97P9H1ux/JKPnrsdrBozNx2zQiTGBT6\nmXTO2VeOJYljTVmkITWPvG7szDMoonw/SD/bAbY+SpmhLPh5/JyBrXShE8kH/+9167mogQm5HpYF\nIPx7fX298MUOYwZZvp5OiHnlZ9jx87gsjx57ggnU2an/amtQurAD7hPXBfuba926NGWQPPZaok7k\n2GvU10j11FNPPfXUU089vUd6KIhUQrn+jPAeI5OE52sRrz8n2fs0qpJ9yL+XpfLyvloUZGLkW0sL\nEalh5OroIlNUCb8yYuczE4FJpGRZipFtJIrk69KrTwQvowf2MXnrMRLpWBZZsm1GXgnVJkpkfrmv\nhMKTl0wFcYyJspHMH/LF0WimpXiN++7ok+1ndJ9RG9uuESNnPt9tE+1gSnR1dbWkGCR1/jZ/2KZR\nKCJTfC7TTSbKVkah7kvKlL/jmBMFznRAppm9/i1zNWKKJJ+Ra8Tk9ZmIOsdBhNd98N+1SNkoD++b\nzWZlXayurmo2m5Vo2eiU59XolPm2vr5evtvY2CjtNE2jtbU1DYfDS+j+6elp0Uuz2ayDco5Go/L/\ncDjU1tZWQZ88Bqd6c70wzWIko5YZyPVCvZI6jql3y1eiGWyH3zFd5LY4T5mKdB8TJed6MoLj/i7T\nr9PptIN0MIVHOaFey/Xk750GJI/5P/mW65F21qijZcufU8aJVhnlNLHf8/m8yJd0IcMea9oWzgOf\nR31qtNa6iXLpezKN7jWWKHUikkTkh8NhR7+kj/CgbNlDcaRcf/Fuxt1Uc6DSmDJ1lQ4RnYma8ksj\nRZg+IfmaEmV7tTSGKVMstVRDpm9qfaNgGKZcljpIZZFUUyx8tttIaPhBqRbynIqByiH7QMeC/6cS\nrEHUNn40DHRoamkBGi8aWjs7mWo1eRxe3CmH7EfNYGTagvfW/qahogzm9w+613VblhvWszgNQyVF\n3pBvNvRUnBybx03nz2Pl5zXFV1uHOXfpcHK8NbmiwiRRRjIwyrXJOc60L/ubTikdVxuTWuqVfXU9\nUuq7tbU1zedznZ6eajweSzp3bE5PTzUcDotTZePlmir3Y21tTRsbG5LUSfNxLJwnp3S4Dj3no9Go\nOFDr6+udtK9rVuxQUV6X6Xp/TqeZDom/s5OVASmvqzlSKQM1R9/EObKusZzSUTU/7EzQqWAKy8+z\nTuA95pn5armhfiDvycO0cymzXNt0UsgPyy9tyXw+13g8Lk6465DMG9dImS+cQ/d/Pp9rOp2WNl03\n5vor8t68qQEP7Bf7kGRniXWzGYBxjdqZo8NF8lpjepZB3zJ6KI6UF4SFS7pcgFYz1Mu+s2BZ4DhR\nifRQGOmAJWqVTlQ6QdLFxFppsdbFPxTgzDcvMwrJq6RliJT7zfFln0kWuHQaaTxS4dXQoFq7OR5G\neUYkzLc0IHSe2J4VP4lzV3MibYASAXJBPcfN+7wg0/GmE8L+nJ6elgXNaI/oVdb+pANtviRRcTEA\nYXTlfidfbNzIRyKAvocKfjablTXFfvK35SZlkQhBLSp3PyhXlI1a/RINajqWjLhrwQCdu3T0jejw\nGdkn8sY1M0ROEo2lImbUTIch5c28MiKUhtE65fDwsGOwiFKxEN3y4s8530YW3Bfy1M82atS2baf4\nmePZ2Njo1MK4bW+YmEwmHcTEsmLjzfusbygfUleW+LnbY11dGjmui1zH+Xmi0v6faycdcmYUqAey\nXsnriN/T4bMT7Pmn82TKNUZnJvV+Xlfru5+dn5mnGxsbGo/HpehcunAkE6RwW9YVRjo9jpOTk0vB\nittMJ4/rn85eAiJeX9ZDGSBbxiwDXjM5hpqudV8pLw9yoEwPxZGqIUy16CWNeP5tIrPTuFPYsu0H\nOWwPui6NgP/f2trSYrEou1aYJnFfahD0g1I2y/rIsdfuz+hpWXTO67NQl22z6JTOIgU8F1cqr+SX\n+WOlYCcrHRRCvORbRlN8Jp1rOgUe2zJidOO+1CJct51zyfHVEJRlz2SElPPNtOayaKtGq6urGo/H\nBWHgzhYTETj3n3NsR9TP4hzTQNBYZWFmyhwRwORVBjQcZxpSts/ou9Yuv2O0nw5YRtyZcq31xd9R\n4ZM36bRJ3eJ6ts35d8GxkbDFYlGifbc3nU5L0bh/M4jz3Cca6X6lvNJBbZqLtI/lwCksr8ssql5Z\nWSlIhh0pOoCZikkkcVn6jP30d6mj89osneC9lBcaaDpl5Jvn07KUc8ni+uRpkp9np9t8zLS626L9\nYhsu3M6AmTwl+sI2OPe+zw6OHWXu2vPGBfMisyAMZtimHTMHZ3ROzUvPZdM0nfRibWMTx+Q5465E\no6UMHIiaWi5qjiUdd+rEZYEh6aHXSOXkZzTo73xfMraWusnrKUzLnDQKbEaFqYRTCXJS1tbWyk4Q\nRnFMi+T2WaJiOU6OaZmx8PcP4lt64eY9lUrNQawZjexD7m6wMUklQqXExcBnZ/uG2wkNW+F4LrjA\nOXYa0ZzD4XCoweC8ZoEQt6FoO1E1VKo29uTRMgWakL+vpbKko2H5riGYbpPjZd9Go1ExfHQIrTCS\n7/4ud9xQdigbtUiQqY1Ee2ikUmbYb/fTz6OyzfVrfqX8p9NOxeiaFtaskei85Nbx/J9rP50nGyFJ\nHSRpWX1JGj1f73XIMR4dHRVEwwbKCK+PNXD7dL69ZigDdBz8HP6YWAs0HA4vbcl3W0YMmE6cTCaa\nTCalTfaPfeLcEkFNBMW8zpS+7880UNqN1HWeW1IGbzT81CmWd/Yz26/ZN/PCa9N1aJI69WfZT/eF\nNsv99fe0d+lEel2srq52HI3hcFhknulIP399fb2DJtLhp7NP/lt/s+bJ8s5Uam3Ocodd6o4sUSAP\n0kHjZ9mGx0J542/Td50jZcNVixRraT9peY2Pr2UxJwdsRcRoS7q8JdXPdxtZdFZLV9UMLRUwhdTn\nGTnq4aRyLKaag0dllf2mYUo+8HmpjDhu9sNKxHynQlldXdXJyUkR5OSplXsa2nRslzkhdBjIJ6N9\njlqcguAC5pi4aDlOR5D+YbrM33FuyYNaFMb2H4SqWBYtI4mK0DAxuk6lxmtSeZq2trYKMmA55BiT\nT/7OxbBO0XC7Mucx5akW6XJeE33NNmtyaaVt+aUO8PNpGBOd5f+cRxrudKLosKaDXgsoMuBiSsHE\nSDjboQwlslBzEpb1M5HDZWlSo0Scd39HXrN/kgrCbid/ZWXl0hZ9/3bAkw6h+zibzTprkQ5IyhQd\n/pQny0Q6Xrkm2Re2l8FlIra8j+k+857nQBFdYmDGZ5qfnO+1tTWdnJyUtLHJ/5vfDMLolJlqtsPy\nyPVFJ8q8J/9cX+dn2jln4FFzUt2GA14Wn/se2xTKs9vMAIJjMVrHdDBlv7beag6S+0rbn8FpbcPA\nsr6R+uMPeuqpp5566qmnnt4jPbQDOTPKq6FG9KLtmdvDpIf4btub7ZHXoLlEpdgXEqOUjJoyIkp0\nROqecJ31R0zfZX/ordfgz2XpUPcvd5cxQieMnMgSU5isr3G06wiMCBn5xkidffV15GFNFjJiIIJF\napruDrq8z5EzecCxZHGk++7xJ4pX47XbSpSHfOD9teLFZeki3+OxMLpMlCZl0SdjJ1rj6NKpFp7s\n7qjRhcS5LjJNlM9mio98ItReGyPRkUzp5L2mRLRr7Zp/mW5wn31PDTFIMpJIPZTPtpzxwEq2lyla\n98upx5QvRvcei8fgnVTr6+saDoed0+qlCxQoU4mudTLKQTTfJzsbcUl0inUjHH+tEJwImVN90+m0\ns0s06+USTSIim8iK5TtloIbmE6mnPPD51mdMCfm+1Lvc1epTvLmzL/Uex8W++H/zk8+ZTqcFESKS\n46yN9UGuC46fetyUpSmsf2V7zLb42UQbieS5Xq+GEBFNTH3FbEnOI/mWtaFEnCgXtCGLxaKzucKf\nmy+5zolAZSnEu9W6PrRi84RqOQF0qkz8nLAmHZvcsr4sx1z7nukOCkme8JoTkbuiMi1QG3M6WZzA\nWrqBcHS2nUJbG5/vpTDkIqnxhqmFWlrExtbPsQKuOYT+u5aarPWBCns2m5XCz6ZpSsFtykC26e/G\n43FV4dChS55L3WJ46SKnbwcjnWHKbebseS3Tekk5127DhjudePLSZ/ywz3aoTk9PL51kvb6+rslk\n0nE+BoNBqYtKntJJ4v/uS65p04MCnVwL6dSQz7mGmfbw9emcS11D4X5n6j7byDQ6yddwvec81NK+\nNVmgk5tOlmstaRhNbduW18bYmWIK2k6mj15gOtxOgHWC15P153Q6LXJuJ+Hk5KTU7XitppNlXZi7\nYRk4DofDjp7mRgrztBbs8RmcS/Yhi4M5jzTgdog572y/dk8G0Jyr2k7dTDXZWWaKzs6mZYXrx8Gf\n5yIBhlpqjzqXa/Ps7OJsNPPXNi3H5P5m2pf6x33hphynNy3/eTZVHjPDfvs3bUDWidYCL96f7ZFP\ntM8eS8695SH9ELe5rLSo3P/Ab79DlB6xpEtOUCoNDrKm5KloKVC5GKmUiKC4DT4/nTbpcrFerd7h\nQQXINBKpMOgQJoJUUxxE8JaNnUqa9S6JHGRk6MJSP5vndNjL39zcLAiGpPJOO+/oSWfB/a1F6nkd\n58F9pAKSLuoWjLBwYbB+ywo6597jT4TTfXc9h/vrV2cQeWQ/+ZnHSmXj/qURrjkS5A3XCxWDDaT5\nsLW11eGjjdrGxoZ2d3d1dHRU+LZYLLS9va3RaKQ7d+6UNnwQI5V7IhxE/2p1UP6byF+2ybmmovfc\n+TvKbN7H+fVz0iG2A+57cq5oGDgGritS1riQWBz/IMoaEuoBFvOyP5ZJRuj+2zUtvM+8MAKVemrZ\nGUZ8rxvfpejaKq8VoiLJ76ZpOs+0HNZqmIhGZ7Evn+f7OH46Tpwr94sOXOqOZSgI+0RdlYdVeh7d\npvVNDSXjLjW3a57O5+fvdyMSLKmzycOoejp9bIsyTEcg1xydS7bj+wgMJL/TiWWAaVnyRgR/58DO\n6FDKqNtLnuW851jz+mW2tMajmm6l/LHuzv1LPZD00IrNM8KkN5reZio+Ks9EWUheCF4ED0K5GPnx\nOct2SjFVlEJaixj8PxUcBclCT0hRulC0NaPt8SfsmX3xdTXEh5T8JgpkgXMagUW1VAwnJyelSJlG\nwuN3P2qITE1RZBrUPPH1VmY1x6XWf99H5ctFxIgwn+d5SDl1mzxnJ5E5olC1uXI/mD7ytcsUsw/I\ncyR48+bNslPqnXfeKdecnp5qd3e33Pf222+X+bh69aomk4nu3r1b+O31lYqf823+1tApU64nUhrQ\nWuCSCs/r1G3TWJCX/Jv/p7OUn7vdmoLmdwyg3FemhdIxJw8sP4z+a333d9ztRCTNhs2yk/rB629l\nZaUEOOYjdcpkMuk4S0Q/WLCeaF/bdl8+TSTH+pFrKA0j1zADOF9L3lMfk9/We6nbPC88r4j3mV/L\nHGXzO3Wk9Y1l0f30phXOB8fqteTfNYc3X0xs/UEdQseKzgvXpK/jeqJc0V6kbuHc+R7zkAief3t+\nfQ4g1yjn1A4mN5J5LrJ/tP2cm2WOTM0hTB1APcY0N51Gy6EdYT7T6+BBAdJDfWlxzdNLB8O/GdVy\nUVKhZhSZ3icdsHSq0qPP74gyeRGn05OMToXsyWB7fp5pPp93YHorJEPAtYh7mYfetm1nl072iahU\nwpzul3ex+XsrDSNWdChPTk7K2SNEp6QLYeQ81RQKHeokf8++LjPWnEsrvExt1QwrDYd5z+sodySO\nj4suFbhlmONINCFRHo7F50K5baKHbduWk69v3LihtbU17e7u6u2339bR0ZGuX78u6fxcn9lspsPD\nw9Ju1g3RGfQY6CTQkSYPa5Egx5Jkx9HP5trLtc01yr7WiP0xcS5qn9eIc+F55VpchnLVomvKKB1s\n95MpbD+PazvrS7z+bBxtJPy/9VTuvGTazrszJXWcLc8j09uWbbfLQJMy7TXDl78S/aND7HvdL+kC\nxSD/0rmyM1ALWjPQoQxTj6ceomNSs0F2MqwP3ad83Q4dW8tv7nZkP2vItOeBvMv7arYz/2eQTr6Z\n31yvzEBYr9euTQczn8s1zN2H1il5vqLHknqYgAPlxfaT81rjnX8vC/D4TDtQPCDVtEyfdZ639Jvv\nIDG1k8rQSp1MIrOWwX1s10Tna1k/an2g8NGIuE0LU0ZQjAJM2feMVPK5NbTKW1LzoM+MElOBJyKW\n4/CPo41EAMwHKw/pomaMyBTn04Wk9u6Pj48ldbekppPBhcGo3v1gfj0dVyploj5OdzCKSx7UiBGO\nlQcXfipn/5+FuaZMP/jvjJrSQaHctW3bORiPxteo0GKx0ObmZvnuySef1M2bN3VwcKCjoyNNJpPy\n3Qc+8AG9/vrrZW6YSl5fXy8pQLfrvrKPHk8qHMt1Takn4snvOFeJEPmzNJZ0rLh2/L/vf7d70zmj\nM7wMOeWp3RwDU3OMvNk+ibojnVken8IaTqn7qhdTbrDwuiJvjXycnp6W2iiiIGybcmFq24vXgWQG\n4Ozs7FI9mvvuYGEwOD+7rZYez/FQzmvBLddIBpl0ZLwxxp9nDRd1dKIXWRtjJ2NlZaU4fDbAGQCw\nbaLNXstOnRo1pI7w3JlPTrm7n3TkExRg8EEkTFKnRMPz4e8SRWf//Qw7+wwWrRcsczUEkGsqbSDt\nEJ/rdhPl8vW09ymnvt6orufJPE2AgKlyBjKmd6uR6o8/6KmnnnrqqaeeenqP9FAQKUag9AIdtWYx\nMj1XwvzSBYxKDzyjHXvoNXTK9xiG5n2MhOid5q6mHIPbleq1SPa2s76Lz6WHzfTbcDgskWTuwMn0\nFseyjByhJTxuqNPeu/njQnOfAM7IzGmP09NTra+vazqdFt64borRPOeZEQERKc/bsnoHp0L9HWuc\nvHtkGQLFCGcZ8iFdrhEyn/muMcPXhLQpb45wKTNZNJ8pahMRCkZGa2tr2tzcLJGuEUvpPI25u7ur\nnZ2dMi9GoK5evarr16/r9ddfL+/CMgrld7OZL5kCYyEmUVWmUGuolGU607fmEefORP4k3J7P+XbQ\nxkT5/Dd1jqNfR8+JDpovNaTLfU20mXNtqqWOjDrVULbFYtE5+dqoIHcVU9aMmJ+cnHRQJKdWvD6I\nuvl+oyOca88do/WcX6fgBoNBKaD2PBoZMPKQNVIp8x5jTf/6+TU97756TnP9U3e4nUQ6vA5ZYM0U\nK1FZ6WKzi9vwvLh9rzGj26yt8jWuIaJObNu2HNZJniWyYz5ST/n7zJ4wxWbExqldv7DYesx/u6/U\nB4nUGnk6OzvTbDbrvMrI/aghzrkBoWYDPQ9ZT0hEjvcxE8W5kbqZllpfPO+ZPXJby+ihvSImoU8q\np3R6qLBS4aZjlSkTTmItx8kUhZ9nITPTs0DQ189mMw2Hw046qJbek7q7c6y8MiXoSSTM6X7ZmeF3\n8/n5m7a5RXYZ7G7KflGJ0JhQwVhxmKy47US5P3agzs7OdHx83Jk3GlH/JJRL2JUK27tAEr63M2Ml\n7q3e7qPbzfScx5MpGPOBcDl3z7iPi8WinL2USjyNZi5YK3grMc8VDRqdkpRFzqGV9COPPKKrV69q\nY2Oj1D299tprOjs70/PPP6+bN2/qox/9qL72ta9Jkt566y09+uijms/nevPNN3Xjxg298cYbki7O\nrjk8PCxjSXifxj8dF65fKl1+nulAy3SmcNJBSsPhdUl4nqlUznemK2igm6ZbJ5PzWktJe3x0CMwf\nzzEdoizArfWtZjBMLIJ1X3JuPHYWmVueuRuMDhSdf6/PxWJR3q2Wzo0pa+TcB6+tmlNsObBDTt5Q\nb1JPMmXjdQ3XAAAgAElEQVTmsbEPNYeWupz1aVI3jc2Uln8zcGOtls+Isp5kP1nL5jmiDuBOQZ7Z\nZp75eazH5JxmKpLymPV+Hn+m9vJ5Juph6zvrIzp9rN/L1B5tWwaUlEuvGcobbY774+eZbPMpF9SZ\nDLS5GaUmgwYjaIfcN65df0Y+1fyH0vbSb77DxHy+yYOhopa6RbX+30QFZsVSi158H+8lCkIB83OX\n1XI5ak1kidFMkoXmQYhHjtvfsSaFyJmjPBdFE5FIHqTCoaHLKNERlGugMjqwl88IVFJHQM1bCn8q\nnozS6VhwjshbzqHbc/TD9mgUJF3ajcGaq+QLo1YrFKn7mh+jgZxLjikNCuWMBsD9TiPNdsk7Opmj\n0Uj37t3TRz7yET3yyCNaX1/XU089JUm6c+eOptOprl69queee07r6+v65Cc/KUn6W3/rb+nWrVtq\nmka3bt3SzZs39fTTT0uSXn75ZY3H4yIDHveyeck1STmhMeN6ZzDAufF1NTSu5iDlOiNKkcYy17rv\nz8g+dQCfx3lLRMbkuaoVufp7yonXYU0vZD8oz+ms5/qiI85de9xR634QDUjDukyXmXIe+W5H8571\ng2ncs+CZ75hjQbudw1owkWvMn+drW0y0MenUSBfvS01ZYzt0zoiomoeJHvmeDOASkXMfvM4dsLNG\nyqgibSI3FNjpa5qm8xotPpPjJRJJuU0UyfNLdDGpFjjyu7zH9XzmH201bSllhvzKoI38tM1wm0bb\nbdusc8gXBkk1gGQZPTRHih2WLhu+7DQhVSo/MjMdErfFk3zTqcq2pK5xqDlHXCw80JCQYc2RqSn1\nGlHYapGfF8XKykpJ9+T7mqww6bXnWDk2evBGunxaMu+z8FEIs2DWO/c4TjuS7sva2lopuuQ1jpJN\nLIhM40g+5a5GojhWOHTUKHtZtGhDkzsPeSihr8u5TxkiWZ64lZjjoPznXFMeeH7NI488ojfeeENP\nPvmkmqbRk08+KUl6/vnny0nDu7u7Ojs706OPPipJ+rmf+zm98sor+o3f+A198Ytf1O3bt/Xxj39c\n0rkDNplMOjsy6cRkVFyT41RuVpJUrjRSuY0+EZlc69Ll1IKJfaWM0imwnHp9cK54v40FnUQGe+m8\n0FFLBIrPoXLnRo+a/jKvanog+WviWvOPZfjo6Kjzkln/li6/A9B6js+jHqYsrqysdDY7mAf+24Zw\nMplcQpXT4SS/2QfqgKZpHojy2SinbufYcnzmW81hzfVMx8X6xbzMAJJjoZw4ADYRYUxkxw6H26It\nM3JXmyuuAT+j5uCbqAuJkltWuP54rBCRP+qMmu5gH2sInHmaPKwhssvsKG0ZgwofYJs6ymhwzX67\nL7UyHdNDc6QywmJ+WOoqzZo3SONrhUPFY2rbtqSGEnXy5GZEWPuM7fG5XIyMLjMdSGXi6/L0VypE\npqjSM+dClFS2xHOXj3fPuR+LxaJzuGSOh5GdpOJI2JmiwNqBS0fKnzl62d3d7QgqPf5UwuY1Uw/u\nl6Mxzov5xV0knlf/pvLKM214GnGiSjQ+dKT8ORVM8sXPdrsZXZMYlVNZpKKzonQQwV0/TzzxhNbW\n1nT79m29733vK/UOH/jAB0r91PHxsTY2NjrG4YMf/KAef/xxrays6Dd/8zfLePb29rS6uqqdnR3t\n7+9rNpuVs6nsrNaMV40Pte8T5RgOh5d2PDEV4rXkwCWROj6vthY5B9Qn6RBzXXBe+Bz3z7+5Xmv9\nMjF4s8HLNu3ceczsB4POdAbT+PB5PteNuojrKxH6JKLfHpeNptEJ7iYdj8edde51s7a2puPj4xLQ\n+hTs5Fc6hBxL8p/6vuYseb0ZqefaZDvkAflGZM5EpI0vXmZg6d90Ij22WpqNMktngzVHadeIcllf\n0AlgDW/WBNlhIJ8YxJhPltNEssyvwWBQMgHZL64njt/31Wwoeez77Cj7ANuac+rnp27Ndk22UUYc\naWct27ad+RqjWnul30u/+Q4SDSGVBhVeKuGMKGvtSZch17wuryVUz+tqk2XKiUul6EnnIqXR9iRT\nAVjgEy2hAq05fL7GyJRzy/bYLZBte/5ONX+fC90RBWk6nWp1dbVTB5ZGh4rPffC1VrLkjdtN9MZ/\nGzqmQvEb0lmv5HlwZJZpIc6b2+PBcUZc3A+Oz/PDE4fNU0fCXnSUX8uR26SSSgeJfzOiJQyf1/l5\n5vnOzo5u3bqlZ599VisrK5pOp3riiScKH40M3r17V2+99ZZ2d3dLm1tbWxqNRvr5n/95Pffcc/rq\nV78q6Vxp3L59W0dHR0WReR5v3brVUaxpxAi3M4IjUmpZdztZDMr167Vkw+S5JF+WOQK+z+uG/aEu\nqUWZnkPqJ/ZP6m5Q4PhJD0LXM7peJhs0ooni+blek+aj0Wkjy4lsUEdxzTCgyQjcBtCoizc6+DR9\nR/mM7JnaW11dLRsajJa7Xc/PdDq9NHbq8gySqS9zLqgbciwcR81JJg85/zxjiGvb93IsWXdF5zJR\nKo7R9zEtad1Ane0atpoz74Brsbh41xyDb+otrkU6Oa53pa1hip/zS/3IwJ1jJAJMnZi1x/6Ozu+y\nmiWvQY6PNpKOpHThRPlz6h7y07rdtLKycum4k6T++IOeeuqpp5566qmn90gPDZHKVBu99kQVMs1H\nBMFETzej5Myzsx0jNlmMyjQSUYAawkQiGuXn+HpHGPbQCSUTlchtljWkKvvg3+6n05ncESNdbLl1\nVObt0X4O+ekaMEYtUjeK4edMXWb9xXA41MbGhlZXVzWZTDroEusKHNUwtcd0G8fCCM1btjOn7zER\n6WHUnKkm/+9+sC98aSujKs6R5zxTCYz6/X/KSKYS3B/3aTablWMlpPPTy0ejkV5++WX9wA/8gK5f\nv64rV65IOo+w3nzzzVJUvr+/X3izubmp2Wymxx9/XB/5yEf0Iz/yI/rhH/5hSecHeX7+85/Xiy++\nqJ2dHY3HY+3t7UmSjo+PC5roiJnjYoTNNcA14ojYKJf54/Rezhl1BaP3lFVTIsWMXIkmLSuWJd8f\nhFqx3exDjsPXU96IvhMNTj3k56RO5JrPvgwGg1KgnCUG7A8jcLfp9qTLB4kSOTci5ZP0R6NR2cGb\nKRwjwX4Gj+JwOUKmWvlc95fjp84wypBrMtM2kjpF2pkupq1omqaDkFlenBZiX1Ivs99sN1NZRP4S\n4WzbtqS0rAN5Unwt80JdQyTG9WzZN6NVlOFEizIDwCyH26wdrZEy5XE6vevrs9+JjmY2wGSEKNeF\nEUDLh0te/ByvfaNRfAUOUU+vYfL3QfTQTjbP1IB0udKfE2xlk4bP1zP1lQVzmUYxURh9rT9nOo1K\n2G3U+s3vqCjdZjo2CVtL3SLXGl+o6MwLwqYJYbN+RLq8s80GkS8o5Xis9OgAMD3jv/kdU1BeNIb+\n5/O5hsNhqZnwmN0f95njcF1Fjd80FLyGdQPpMPk7Kxumarl4CB1L3ddS1FJ1djBIGSi4nZRTGoja\nGK1YCZtL0s2bNzUej7VYLPThD3+4GKjf/d3f1dnZmd555x1NJhPt7OxoMplIkvb397W2tqZ33nlH\nr7/+un7oh35I165dkyR95jOf0ebmpn7lV36lGGKnb3Z2doojVXO+M/VMnjLd4rQj73EaplaLkE4U\n5yd5ZaoVv7JfXitMybjdZW1ynDXj5fVdKy2gHiCPqEcyrev7MkVZ60c+Lw1zli5Yx9RqTzwPdGra\n9rx2bTweazQald88X25zc7OzY4w8cnueZ6b9uIWdu9MexHePwXqVRpG1oi43MC/sHFnfDQaD4ugl\nr/k89zk3TfA3X+GSThHtE4N6yijtDNPiKSvWo9Szqd/YN9a6JSBBuch6Kq6ZlPn8339z3bnP1KWe\nf/JGurzjnZtO/LLjZQ5NtufnUMdLKvK6srJSdqSzTpf1aAQ0ci3V6KEhUu4wJ4DKiDloT2g6QVJ9\nNwCvs/GjsyXpkkKqCUoKHYmL40FjZD+WOYJEqzx2GlYbgqxjqil/fsa6CTujuZuitquJyv7s7KwT\nmbkIcTqdajweV2savFgyGnCU52ttNGxouehoUHgoHZU7ayD8ORVQOpzpyORWX5MVe62Y10q65kh5\njMztZ4TlvtSCAUZvGV1KF2c8+d7t7W1tbGxoOBzqySef1Gg00sHBgSTplVdekSTt7u6WImC/a+/g\n4EBN02hnZ0f37t3T3/t7f0/PPfecJOn973+/fvRHf1Tj8Vh/7s/9Ob300kultmp7e1u3bt3ScDgs\nO/tqyttjqJ35MxgMyo5Of055Ic8sJ9QDiVYl5b3uT35HNK2mJNMwsX3qo6w19PzXiqRpYHLd1IrW\n+fzUUb6PNX35uREFGiE6OenweCcWDSgde4/JheV2pqRzI+X17ppKHgHQtm05HJY1l8fHx2W9Oyiq\n6dRcw4lisPCdSJTl0s5S256/j3I2m+n4+LjzPKIsdhwZGCXKaOImjET/GPzWAnDqmVwvPKIiA29m\nFtwW73Vfc7NS8nQZyJBBtR3cxWJxaSe3+8YsRKKaNdCA9/qZ387mDY4xgRKuFdsM2yBnIszb4XBY\nzgo7O7s4FNXEXZL2I5bRQ3OkclspvXIbHTpZUtfA0wgnFGeqRZspyLnjgp9LFxNTK3JNRyq9bio+\nomqLxaIUYEoXsKLh7fl8XlUK3u1CA0DnLJ0398N9zyJAeuDkMyMS981C5VTfaDQq51ZZGG2cGD3T\nAWPERYcgF3TOIXlAZWMDYgeKBom7P7yguEuSz6PT5rlftvCtMNJ4eV7oRC07Cdd84nEMLHBm35yK\nGI/H2tzcVNu22t/flyTdu3dP73vf+7SxsaEPfehD2t/f1zvvvCPpfCcnnTOujUcffVS3b98uBaUn\nJyd66aWXyjgef/xxvfDCC/qTf/JP6hd/8Rd17969wksrJZ/JQscgnUTuRrUhcfqHfPKasKJLhTWb\nzS6NoRYQpTHy2HNrOJ0IX2M0M9dAznPqnkQQTHQImRJhBC5dTj+lsU7jVou+ibrxc/Yj0RMaQV7r\n9HHy13z02jfCvLm5KUklbe8f8oaHJTqtzBS85cjpQgcDx8fHnefWguma/na/fR/XFI29EV7LN7MB\nRCvIB88PUS/pPMgxcs5+2tbZEU2AIOeH9pDBLG2X15LfaWr9UUMz6az5XpYscC3awXBK0denvNWC\nBzrBtIvD4bDoMKKPlgdSolWZHTDRuXefM4PhPo3H47Lr2HK7vr7eKTx3W3SYjL6aLy5HWUYPxZEi\n/EbHhJNH4a9F8bVJkOqKlWhA1qjUUCde475QELN/CYXaSBoVYlv0oi2IfAlxRjbT6bQ4VRlh+28v\n/vyc/SS8yv6YlzTs/p588ziMipycnJSddL7PzpIXI5W062LovDJKdt+86PmdFSIjBPKNn1H4rbgy\nH+5FxAiICB2VGCkVcsoAIfTc5s4goIY6sT98tuVyOp1qc3NTu7u7xei//fbbOjk50QsvvKDNzU29\n8sorZWfUeDzuGBJH4B7H5uamJpOJhsOh9vf3y3dvvPGGVldXdf36dX3yk5/UH//jf1y/9mu/VuTE\nDv3m5mbn0M50jIhgmAdWZKzZYK1aIs9U+jT8nAvzOI2sece5JVHXZAqHCj0dfAYuNWJ/OId0OGtR\nNNtMncF2Ui44Hn9nw+LPuK551MQylCJ1BvlkJ2I8Hmt3d7fops3NzeLoJxopnc/XaDQqiBSP8ODu\nNPJhdXW1yCWdbfLUDlG+fsnG0zz02hwOh8Xo2/FhXyzTlsd0TrwGmBJ1/4zapLxZ/6S9oLPu+WLA\nTh2e99GhN69qQEAGJ+Qh+57tM4VpIqpG+bOcZLbD91C+yQeibqlvbT8Tacv2s4+eN6d0LVeSyi7m\njY2NS3o29S9tg9TdRVijh+JI1RAiTj4jRv9vAUiUigVxTC+4TS6IjMw4yWyTBZMW5HTA7Mmnckvj\nSzTIDhKRJPLBjh6L9Tx2e9FS1wh4TFRG0uX6L1/PBZWpARqrmmPqZ9uBmk6n2t/f76CLnqM0Nv7O\n/Ga7jjw5HhoT95Nnq5AsLxsbG6Weh3UX/skCecoc+2o+1QxwLeVmYvRpPibaZR4SefB4E6Hk+H3N\nyclJOb3cZzCNRqNy0CL57eMbtre3y3k60gXCY8XStq1u374tSbp9+7Z2dnbUNI0eeeQRffazny08\n+ut//a/r1q1beuONNwpvc55yzjwezz/XHakWLNkRoIEhb/09EWkikbw+5Zjrj/PKYGYZlF/7PNdL\npuG9tmsGg0Rlzb/tNCQST2cnDbT/rvGUupKIlJGcROL9HCLiDo6kc0RqY2NDbXvxmhG273SeX+NE\ndJiGu2masmFie3tb9+7d0+HhYXl/G9cI9Q7bdDqH43E//Zm/51pz4GGdyPokyhWRZPKPzn3Kac4R\n76s5tERu/Kxsk31IO0S9k1kZykTqMK4l1yaR6PQksGHUqYaqWp+ynwyc/f+yWljy1GvLjhJ1Ltfv\nYHBej+ngmnV9NX8gecYx0xmvUX/8QU899dRTTz311NN7pIee2kuyx8xIK3cCSN3TgumVp3fPIteE\n042QEAnxd0ahEuIk9J1pDf9v+NdtsU1He/S+7aUbceEOOkf2fB+Rn+M6o6zHcB8Y+WaemZA9UTJ+\nxzF5PIvFQkdHRyV1xKiUp50TRWRbTrExFTEajQoqZf4y1cK31fOt8szpr66uanNzszPHTCXx8DVC\ny0lG9/w3EUt/lvNO/vhzX8f0tH9nOol89r3ug1MRx8fHunLlis7OznTr1i1J56+B+b7v+z5NJhPd\nu3dPg8Ggc0DidDot8u+onfPQtq2Oj481HA4LyvXWW2/pm9/8Znlx8fb2tn7mZ35GkvSlL31Jw+Gw\nvAqEKSojYIxgKfs8AJXwvmUk6yNMRKWy4DNlheTvslDXfeVxC7X1kygJ5/DbodRDWbzL/jiqzoM1\neX1NjrJeqsYDj9dEtDhTkP48i65zXOZNFnhbXyZaQV09mUw6qV2+SirRupWVFW1vb6tpGh0dHV1C\ng6yDbTPMI89tns7t+3ywZqagXcvDFBbLFvycGm+o9zIzQvQw58K8y9Qlvzffc8243bSP1HNcg+wz\n0UyiW6xzytTaMp0mdXdMJxLLdvLFzOYP54Xj5701ftTG7DlwnRNrpCgTiWQxm0L/hHV/y+ihvmuP\nTEoHZTAYVIu7crHVlJyZyxxzwtlmIhcajRknY1kaIuF7P8f3OB2V/VoGqXsCCQUzFeYxMK3pZ+WC\nIa+YussCPvahVvxs/iXseXh4WNrxd94FYSNLwTOsT8eVRngwGHS2PvNskhTgNNBbW1udRSGdpxpc\nO8E0gPlCqJltpszw+Qn3kprm4ugDywblmw507mSh88zFa/Kuu/l8rmvXrpVjDO7du1fOeDo6Oiq8\nl85lw68O8jO4Q8XpDab5pPPjFKbTqU5PT/XWW29pPp+XZ3z2s5/VN77xDR0eHhbDz/Xk4lfzjfzK\ngIR8t4JmutkywFREprEywGEqxm3znpRTpyJc72fesG8pe3ROavorZYJj9L1Zp0THufYsqZuySKK8\nkuyEZtG05S0Nhv+nI2XyOVHuv+uceCaQ5YHjM9/p7NCRog5lIOr+uADdgRDfTpA6knzzZ6PRqKNP\nneZhKpmBE4NryoIDEf9PvcAAsTYPHCuJutfjYMCa9oN8YZCf/K6lCik36XgyXewaSPOP4/Uc2PGh\nA+71n7qNfcqi+dS9Kdt0dnk9++T+8Dm2C9vb250dwizzsNwycJIuCtW9Rthmja+mh+ZISd0dUYwA\nzUAKXm3Bs51Uam7T7WZ9TBaP83lUPInImHhfLdedn7EgPCefB3IaqcozlvwcjoU1IYmq0YCmQ0PK\nXU7J67zeSptoh9tfX18vhXzHx8eduqRapFbjl8fPwkUqNO7cMwLjxULH24vFirEWlXuRUhFlzVjN\nOHFeajvz6IRSvnPREgG0o1RTtk3TaG9vr/TNr4F57LHHdHBwoMFgUN6rx7nwWpK679jKiHKxWJSi\n3qY534llBODu3bt6/vnnJUnPPfec/ugf/aN69dVXS7s+zsIKisqPjkPTNJ0dQrXAJ/+3c8KC1JSZ\n5HFuIaczYWeRhdJnZ2c6OjoqO8W8O2eZY0OekdyPWtS6WCwKsuj/ExXN3XIcQw1x8+c0TPmbR4+k\n0V5ZWSlzZ6Jc2rF3NO/59Vry2qJjaX1UQ2Rc32dZ5xk9RCJSLrzDb2VlpdQEShfvvaS9MCoyGAyK\nk0cbIF0UZbPfzAwQ7eH8UJ97HdYQ7XdDDjkHXCtN0xRHlXzhGniQ7ubaqq1B9s9y6uvZPx+BYd7z\n6ADKLeXO/XIfszA712wN/MisCANa8pXPTvnjs8xPZyXMI/Iq+2J+5TpKB7BGD82RIjQpXThDiSxI\ndeYTkqNTQ3TG1xMFIZMYfSSSkyhPKjg+k/e5P+5zOiRMabF9pjDoyBB2Tg/cCsLIVKIudkxTCfiZ\niRqR7x4HF6ev8TgN07qNO3fuaGNjo4yHi5/jcrt0lty33L1SQyXpEBh5srFMBIgRCndH+jOPp+bU\nZeTGOZO6BaHpaPvamvPC1Jafs1ic72piFMR55HsLPSdXr14tzs729nZnZyPnKZFTO/WLxfnuPaMy\nbntjY6MUoR8eHuq1116TdP4i5J/+6Z/WX/trf01//+//fQ2Hw04Eyg0VngM/j1uOfX3yquZE04ki\nQpTBUo33vmZ19fx9kUYjNjc3S6G9nVCflWWnygXOGUHXdBPny5/VrnP/aZBqyrlWwL/sWj47fzP6\nTj1iHhH1oIGvjcMOCueRc0X0PFM0fs+nn8/1QmeBzzeiTF7wXaE0tImAEikjcuZ1b91Hp86o+Orq\nannjQ65394W6hkF8Bpcco+Un9QvRJY+HtjB1VCKQLv3g/FO3Uw+nDSR5rLYni8Wi6AXysxbsWYd7\nHmqbInztsvVkOfB9DyIHZh6b2/RcOxPB3ZzcIZz2mTbY/OffecRC0kNN7ZGRhOvSm+bA+b8pvUUK\nDR2ZTC+QGLGnocv/0ws2WZC8rdaT7edzIaW3bQXhA+1yh5UdDC5aw7CO9mg86LC4rxmheNFTgMk/\n54XTyeR30oWCefvttztGiwgJHUGmmPzbfXA6h8o9HT5GFl40nuucE+6upJIiakR5omJNOcuFRqeB\n0b/b4XzkMRyMaJc5YX6ODztcXT0/w8epNu9COTo60tHRUUHnfJ+Vsf/muU48ysDX+buDg4OyTbhp\nmlKTdeXKFe3s7OgXfuEX9Morr2h/f/8S0mEZ4XpyKs0GLAOTRFY8T0aGlim3TD+0bXvJQJtvw+Gw\n1I8Nh8PO29/NV/N0a2tLd+/eLTvGshaJ67eGkqeO4Vr3d4km2AmmrjEyRCPNdWpngvLiZ7hNH9jK\nwMzGZzqdltf+SOdonE+u39jYuLRLaTQaaXNzs5wK7Tny8y1PDpJshL3jji8e5zqx3nS/6NjYaZlO\npwV99Tim02lHpknWw0agiCwxg8HvzDd+R1lMuaXj8aBAP4NtrvuU4zzA2DxKgKHm0JDm83kHzePu\nceox98NkmTMaRbTJ/cgxEgBg/9g+5SFBCTrFtD1+pueMQWnbth0AgbrGZ5p5/lk/ZSSViGj2iXbJ\n/cua46SH4kjRqSBCYsOUkQAdq5rjlUyoRWi+n/VQVDjShUB5cVsAKPz8P59DuDEF3wLq79hvQ+gW\nKAohoef0sH0tD/JkxG7BdxtUxIwcLJQUOI49Bd9zyDSbpFK4fPXqVW1sbFxygPxcj4dG2FGVx0LF\nmIbZ/XFNEGHxmiPlRZpRo/lFRVKL3hKpq6EqrP/yew5JdKAsWx6H/0+5Yv99Bpd0ntKTpOvXr+v0\n9FRXrlzR0dFROQHePKKD3LZt5/123/rWtwpKd3JyUr6zw/nyyy/rySef1M7OTlHub7zxhnZ2dvR9\n3/d9+tznPqc//+f/fAcBTAeRqfJ0GhOp9bP5WfIwgwB+nvrEz3JtjzdHSBebIlzrw/XN9WXeZH+s\np2ho340YDCZKTMeW7dmYzufz0s9lPGBQ6sDCr3JhytAp35OTE02nU+3s7JS05p07d0p7W1tbHWTB\nNUC7u7vlEM50wGm0vTFEOnfQZrNZqUWjUaqlZ8gDf2b0k2gdt+eTN/P5vByQaUokM3lm3vj5RolS\nf1E3PSjtxGcQnaE+9Zp3m5xP62X3MWXN/as9x/22TjU/PBeUOc6h+0hbTBvFYDSDTEkFzSNvmO7n\nmNxP88/ONFE3IvR0Bq0jqW/pgDrATDAhUSg76f6uht6THoRI9ccf9NRTTz311FNPPb1HeiiIFD1E\neoeGOe3B1iJSe+iMBjINmIVn9koZgRDy8zUmXpcpMXrDGSEmesMiyoycMirNMTIScORs9Iw8My8c\nYfo5rkfw27YdpdRSeO4fa0ocjTgSIILAnWFEegaD8xeA7u/va2dn51KkVsudmxw5JYyaUQqRKR6s\nxjRAzkcN4XR7WdPBOq1EG9ieEYREURxZZUSXc8Z5Z+SUtYOOkFwPtbKyUg4sXCwWOjw81NbWVgcC\n931N05RXEY1Go/L6mBs3bmhnZ0eHh4flOIPs087Ojt5+++2yU1A6j2Zff/11PfXUU/rZn/1Z/Y2/\n8Tf0la98pcgM35uW0bXnJ1MgHgdTwEYyrAcYIed8eM48ZpN56fTW5uZmQd34olLzyZ/x9RLb29s6\nPj6+BPFzTDUZqaXDvd4T4TQxDZztEY1KXeFxUGc4micSR9R9dXVVu7u7Jd3ilOd4PNbx8XE53dtp\nEukcoeLmDacLqcM8bs+hd5c69Xx0dKTpdFrejmB+14q7PT6XBBhx2d7elnS+yaFt25K+IpJPBMTI\nCwuO3V6mjGg3skyAO1yznsb/ExWqyYQzCjn/RJf42+Pw2k67Rt1Lm0iZHI1GnfGPRiNNJpOCXrMe\n1WO27kv0jfq+VlrD+2jj2/aifpXpS/+f2R6P3zymXXKbRPX4zjzXBPI1Rn4e++BnEH0mkkey7XpQ\nKjnPKggAACAASURBVPWhpfZsrCmMUtc5oKEhcRKpaBOay5qJGiN8L40iUywJxWc/c1HQYCQMT8XP\nGqOac8WCXQtL1mpJ6jg1fIfV0dGRDg8PO4oo+89UE5Uii1FNXGyZYjVf19fX1bbnBcqTyaSTjmIO\n3M4ZF5D7njl8G9larjxfvZBwN52brK9ZZpS4iGj8zGv3hw6hf7uP5gnrPehgpFNPma3VdKytreng\n4EAf+MAHdO3aNd24cUPSxXsPV1ZWOu9udH84N3aqJZXi9Dt37mixWJSXyXIcOzs75Vr38+mnn9bd\nu3d1584dXblyRT/+4z9e3tHHPngsTgExnVdL6dlgch5yLtwu55BknpJvliPWaEkXRcxMN7Iuh6nf\njY2NToEz+5zk+2xQsg6FNTWcbzqXqVNYK1MzOqytoRH2uxntUNkh4vqxo82TnyeTiQ4ODnR0dKTV\n1dUyh1tbW9re3u7UJJpHnGMXldORmk6nRR9NJpNLtTQOiLJ+zvWS7jeDG+uS3NDi75qmqZ5tZIPr\n52RQnnNL5zSDHM6770sblAF56pxlujTrsUjWpZRVyluuET7Xc+e6M84HeVlLbXq+M4DJeaL+8tqy\n48Z3rHoNso419T5fNZbz4vlz2lk6l2E/3zaTfeHOSJa6UJ9btpgO5jqs0UMrNk+vj0KQyA6/l7qC\nkUzmwpAu76jI+yjEiaBIlwuBa0xN71zqFpiyfdYBpDKmgfd3VpBWuIz2M1fMgj575o4Ca7n7RPWo\n6ImI1ZAlf84jDoiCnJycaGdn55Iz6cVI48nC8tzVlPxlTZt5TMRjWR6bjkTWYBHJ4Ti4Y8zPM+/8\nPfnC+fNcL6ujSKc+60PoHLqW6c0339TTTz9dovLj4+Ny3IR32WWNnJVh0zQlKt3f3y+v03HBsR0J\n7tZ79NFH1bYXL0l+6aWX9PTTTxcD9ZnPfEZf+MIXJElf+cpXyjvYPA4GClxn6cRarr1TKpWax5OG\niBF7KjnPh2Vxd3e340AQ3Uxk2nyaz+fa3t4u9SWJlNaM6jKqGWyPy455onXkoYO9Gio3GAwu7XAb\nj8elVsSvb5G6Dt3JycmlQMTHRPi9eeaZD2H12p9Op6Wu0+QNL0bHvd6MlFsXEzmnYfX6zfF7nnks\nhXVdrUaNZ4IZmSDiTP2R69AOO424eeMgg3Vgfg71fda/5nzTVvn/DDCI1qROsyNkPcJ15r76O/Oc\ngYp1aC3AJD/zuXS4zs7OOvq7NhbykwFAOoveYJU7IS0L1gt08hiwMiiyLrRdqqF9zCZxzZ2cnJR1\nneOrIVWkh+JI0VHg71RKNApETxLtyfY4aURcEgrNScuitGyb/2fEwbG4vVT6fv76+npnWz8XhK+x\n58x0ngWHHraNgRe/PXMW2h4eHurw8PCSICxD17wAiRrVlJ2VO2Fz99MFpjasHJv7SjTHz0vHmI6b\n+7Ys8mKxbkbvVHyeC8tEzWCbP+kAJVLKSIwy2rbnxd3cKJAF/OnUu082NtJ5im13d7fsrnr99ddL\nKsbGxQaF27w9P8sCDKd0DPtbHr1LzTvX2vY8rShJ3/zmN7W2tqannnpK+/v7un79uj73uc9Jkn75\nl3+5nEVl5yNTJaREXYgOuM/mn2WDc06+8fOcL6e4KYt2HhghU76NJI7HY+3t7XWeeXBwcGmDQlIt\n+Mr5zbm3/sl5IvqQn3uMTA1LKs6QkSgfESKpUyJg9Mgyar5sbGx0Dr6U1HHGzs7OdOXKlY5jaWM3\nn89LetAy7LSeC9B5BMPJyYlms1k584cpXPPMYySKb7mgw8FDfKfTabmWeiERl0zB+rk1PlvP+dkk\n7pDLIIEOaepU/zBtlXLBYNnjM9Vkwp8zKKeuNzmDYKKO5C5rPsdrikS+pXPKlKf5TZTPx5BwZ7J0\nUV4yn8/LC9LZLsdFB2wwOD/OxHYiAyXzn84Y+WV7QBlt2+7ZjjV6aK+ISQdF6uaFU1AZkSbMx4ms\ntWuEhY4EhZnwKNusQbTsT418n710tsn0BCNDO1JUloyevZPIcCUFkc6LJ9zf+TMbhcPDw6IguTuy\nBqsm6sa/ueXfi0G6gGrtbFH4iRxZEWVNQzpG5jOdI6kL4XMea9dauedCsJLKZ0vdLd0ZaVJB+RrP\nF99kL+nS4rdMZFpQuoxImccnJyel9ujatWva2dkpDkvW+vDZnDeiUtK5UZxOp7py5Uo5NdpGggbR\nNVR+3iOPPKJXXnlFkvToo49qa2tLP/iDPyhJ+uQnP6lf//Vf15UrV8q5RUQ6SJl+tazZUHKebATp\njHhcNYNCR9IoyOHhYamTklRSzozMuaa820k6l3GnOf0MGlIiVL4/dZfXGuWfz/M1RksyeMkxkm+1\ndJT1pA/IdUrO/DSfnN5JI0vnn7U1Dtq2trZ0enqq0WjUOX/MaUE7MpYbIlRO41OvOhiYz+cdFMj9\n8o5jzgtlImWDc+Qxcfx01Kj3GOQlgs/gjz+c41ow5mfWgq90KhIQqAX7vta6hE40x0HggI4UUaPB\nYNBJdVHnMOjM7x3YpszbHtVsG2XSMmXnmSk46gYequy+cwxe47lm7FyZL5TvRPjoLDGgTvR7mb0v\nc/zAb79DRGHK/LLULTL1dxnFkdJrJ3NqxiUpUa6muXxCsimNbhLh0dp1hFtTgRlB4EK0YBqqtJBI\nl9+czsiHaT9GyHZ6eIJzIiSpwKXuIloW5Tgf7v5RSTP1NBgMOhGj+bGyslLQFTpVVlKpYP15LmyT\nc/eOotIQpdPlz83/B6EollfLhqO7RNdShvMMLbdrBMcnYNMZ83sIr169qg9+8IOlH8fHx8XZsXHj\ndmH+9tj8vIODA+3t7alpGr322mu6fv26JJX3FR4eHpZzenzfZDLR008/rVu3bpWCd8/JH/pDf0j/\n4B/8gwKDM9pN5JOyZnQza9PY35rjSiNB2UjndD6fazKZaH9/v6REfd6WI9laarAWsPlEdBdMs1+O\n3qmMTYlc1pBOOti1ddi27aW5JD/pKNtwO+3hQEpSOaOOAQ8DI+qf4XBYeObPHnvsscLTw8PDok/8\n2crKSiknsKwfHR110EGm6KizF4uFxuNxZ66ti/nj75zyJrJNnnvbvwuJOUbz12UP/s58qRnvNPQZ\n2NUCh0RoMmD1/+4j59BE5I9tZPCYRLTOlGtpsVh0shj+3g4nA2/OE/tDx9C6r3aauH8c0PCQ3lyH\ntJF2qDLYdXBOu2eZoZ3MdWE7yzSybQk3G5GPtTdYkPrjD3rqqaeeeuqpp57eIz201F7CqvSsCd2Z\nGCHYm5S61faZ1mPqypHgsr64DT+LxXgZRRAurD3PkQCjRLaRkYuvZWozoWEjUtx95H44leI0nr+z\np15DJ/w8w+xE/Lg7LfvOMTGSky5OsDaqwkjJJzS7qDhrpIjesG0fYkqkg2NwxJoIEcl9ycithjqY\nh0RNWL/A1FvC6cy953icsmKtBIua3Uby4vT0VFevXi01KbPZrPqeMm9ZJ2LjPhPql87lb2dnR2+8\n8Yb29vY0Go107949Sec1Ui7Y9An95Nd4PNbNmzf11ltvaWdnp5yy/gM/8AP6sR/7MX3+85/XeDzu\nFPUmypepW6ZmiEwxhZG7fKXL6VDWybi/5u90Oi0v2t7b29Pu7m4H4eb6NkpDxMdk1LUmb0ZULW+W\n7zz0j+05Emb9D9tj1E2+pZxTJ/r7rKeULlAAIgxc5y7Mns1m2tvb69Tj+aDd4+Pjcp3bPTg4KDK6\nv7+v9fX1zsu1XWDutczdfp73GoJOBJ96kbKSfOManc/PDzM9OjqSpLLJwvqA2QevedbPEL1x/410\nmizT7Lfvs462vuUYuLa4a9FjYEkB7yM66bXBZ/vafB6/8/wbneH8N01T3syRqS+uE/aV9Vsso7H9\nmc/n5a0BlEmm39Lm8busS+Q9HANLZJj29HfWI7QdHvsyf6TmWyQ9tGLzXBiZWuNvKjoyzERG5nOY\n6qNSNLxdWxg0hMvqO7i4KVy+Lx0iO2W11JH7wUllLQPz2ePxuJO+Y/7d4yJ5gVqxG25PA0VF8CAj\nzDGyv5LKrq2VlZVSP1WD6W1obPRy9555JV0cjZBC7fGzHfad8uScPw1rKqZ0lphL59j5N6Fo85Dp\nnZp8u26KTgqdQ9eJcDv+wcGBHn30UV27dq3jTHj+bAzdrr8zP1dWVjppVs+Nzxh75JFHdPfu3fI8\npxn39vY0GAyKETo7O9OLL76oGzduaHNzUy+99JK+//u/X9J5uuynfuqn9Fu/9VulH56X7FMqb64z\nOw1+HgOTTM2mE0wZpiI/OzsrDqKkcuK3HdlMdzhtYLlg3SGPUCC5H2tra5cMPOWmlqJin5M3/HxZ\niQJl0uR0SNM0nZPNmSIxf53281EXjzzyiLa3tzUYDEoK98aNG9re3i4bZZzGM28Wi4Umk4leeeUV\n3bt3TwcHB7p9+7akcyfLcp2pZwckDjRynFzDdGz43r408uS79b+duoODA21sbHScZfbFMuR58HfW\nd54vrkPfY1lLvUd5oRzTTtQKuK3rrLsZ6DNgltTZaMLnZZlJreyEOsrjyzSm+UoHlm15HP4+Azq+\nOSFLQmjbsy+skcrNUgxk3Kb7bnua8k7HkDxggbl/aEseVGguPUREKhnN7xKtSuOYHjaNewoIJ5vG\nlJPp6Jl9SVQpc+ZepMwXu22+HysXTTorbIuLlWPm7jwWgLpGytdYEXEMZ2cXB3om+ubizVTQOTYq\nMCstKg86du4Xc+P+zblh8TcNTxoTL0z3g44UUUH3mbJBhy0Vbcpg7hSqXeddSFY2vK9mABlF0SGw\nA0bZ5HXk5Xg8Vtu2evXVV/Xcc89dir7m83mnxoFIrevSjFy5rz7uwKgCjy0YjUba3d0tdUV855qN\n8ze/+U0988wz2t7e1te+9jVJ0vPPP6+Pf/zj+qEf+iH99m//dqeIlTUJKQvpBGQxqNcFgxBTLfhK\nZctgwQ7h22+/XXbkOYJ9UCBGA+W16LlNVCz1jL9bLBadukD3k7UZeZaWg7UMukyscyJ6QCczkWEX\nmA+HQ929e7cU/EoXxuSZZ57RE088UWpZ3KZloWmaUjtjlM9y+MQTT2g2m+nOnTudnadt25ZCdD/L\nY/W484wpz0s6Sh67P89gi8GjdK6P3RejLhsbG5eCcr9qhDLJfro/KRN0qmqoI4GDtDF+tsdBnWRZ\ncv0Ui8G9nrkDk4ec+twmB9ocP20Sdaafz01RdHzpQFK+ySsGDP7cbQ6Hw0u7eRlc0TFK3ri/Jjo8\nDETNY/fDgImJtpU6yXx2XRg3DyzzVUgPDZGykjClIlvmMS9DrR5EVmy1CNbMpLPg5xMGZV+o6BMt\n80QmFGiBIkqQSJZU3/XjiN3CSMPJXXtZrGcD6iiSkYzb8PjZn+wzHQb33f3JQl1+xqgmIyvyzYuT\nxeFUgu4jHU1+RyWVY+Di5gLiSzlrEVRtkwI/q6EDVKKeM0ZMNtpWLLWNAU1zcUaMdH6UgFOljOo8\nfvN8sVh0dgmapywg5X1OeRwcHGhtba1zCvXGxoYGg/PzgO7evVui5a2tLV27dk0rKyt688039eST\nT5Y233jjDd28eVN/4A/8AX31q1/tKH46vJYLosScIzquRGoyIkw5sZzSWSPKxbTA8fGxbt26paa5\neDkvHWmmB1ZWLl6ybWTP17VtW9YRneNEKx1Ysdja/fS4ZrNZcfRIteCAc05jwzEyrcj3zjnNaaTq\n6tWrhYcf//jHNR6PyzEHPAvq4OCg6Jr5fF62mHuuvDPw7OxMx8fHevHFF8tux/X1dR0eHhZ0wIX6\nOZ/Sxa5RjtFGjevbTobT3Ua6PXY635z/yWSiO3fuFF1IXe5AjjqlllHwT6IbbIuBsK+xDqDTb0ff\n88M2vbvT64UbVZxCtt40uiypnGbPF5jXUpEpWyYGB5Qby5h5b0pEiQ6h5d46jw6S+8K0JrMNBDnI\nG8+Fx8xANEtGMuXJFGQ6fj4KxH/nLu8M0kgP7UDOGrIkLXeMUqjzvmXtSl0lx8nI692WjWzNgTNl\nxMG2fD2dojS87CMXLe+XumceJeTq2qlEcKSLt5XTkDONwL+zBoPoWI4/HYFEVjJyTEoF5DYdrVuZ\nUcAp8Ibq2S86ZRkRsS80zF68OU9ECog+8DMq6UTFEh1LRMrXEmXgCe3kq3Quizdv3iw1C1kjxZcl\nS906Pzr9RIGsFLxD0kZSUjnt3G0SXZ1Op+VU883NTX3rW98qp6xbeX7iE5/Q+9//fr366qvFIOdr\nPDzH/i4jdZK/YwrH91EmaJSki/Rlyj35Zscl0WErYM8ZX2lhw0wjTdnys7lm3OfRaHRp/N496zcY\n8Dsew+G+UY/4+XZaGTDQkBtpki7Sd2dnZ9rc3NTe3l6nxlE6d5oWi0XHsTPPPB/7+/saDAYFtRqP\nx+UF10dHR3riiSfK8QdGQO2cE1XmWqTO8Xd2Aiw/lBsHrBwvx++AJJ2lyWSiyWRyKcVEBJq6mHzk\nGqfek7oHRSYxWDIxPefvaAedVq7VgtlR4lEF5A3Tlk3TFMTZgQKdUup56jP3TVIJNvy7VotYmwu3\n5QCEjmTq7CQH+Nmm9cD6+volxNU6hvqYAR11Nx1MO06WNcoNAYBl9NAcKQsNPVAqykSkzBQLAiPv\nGqLk+/w7J89EtIqeMgWthoR5EXNh0KAnJE9KFIewcRp2OlE5PqJmvofnpbgOhwiS28iIlsaM11uI\nOG63beVIheLDD33eVUYRTClSUP3bCtufpdPE+pKcZ7bFv6m0pctQbUa6td/mGduuBQOUIzpPlAf/\n7evMRyKN/vv4+FgHBwfFmRoOhyWt4joVO6J8Xq3+w+RonA6Yi8Z5zMbJycmlV6RYeTs1dufOHUnn\nhvT27du6du2afuInfkK/+qu/Wgyz59XHONDBI4Kb8yd1z+5yCofzkvLhKDxRpVxzs9lMk8nkUkTL\nZ1rO6bAxXcqUjr/3WFibkc4WD8H0GrGD7Ejbc8+1R2rbtjwjr3EbrGUispDnS3l877zzTnm2ZdRy\nwwJ7vrbDmxSm06meeuopvf/979d8Ptfbb7+tN954o/DTZ0ml8+F0EcfFcTDYcr/8nWU8nR4iShnc\nORg5OjrSysrFq5DMGyKSXgeSOnxhvzj37F9+R0TK97qGzc4wgxZf73f82RHx2P08llFk0E7kOWXQ\niNcyJ8a6g+d2ea68rlkfx3mlHfU15hvTcP5sOBwW20A59fqrpbXJf9sd88P9sfymjqasZEBn+8Sz\nzuiYLaP++IOeeuqpp5566qmn90gPNbUndSMMIiLp9WfkQTQhIdiaB0qoT9IluJf3JPJAymsTlXC0\nUEtvJTTPNphXXhaFJp98jZGIjI4JzzJ9yHvIN+4scXTNvDL543otv9eIzzXiwAiC8LSjSSJSTK0x\nKneapZZeNdzKnRokIh+sb2IhZO5ASXTMCAfv804jzkHOT0aJ/t+RXqaGnd5zlOr7Njc39c477+ix\nxx7T1atX1bZtOSSxlupxv1w/xlohzgdTyOT3aDQqBy1aDswDoxyurfJrHaTzlJH/fuGFF/TFL35R\nL774oqSLOh1HdpwX84vpJ/LbP5nyJeTvcXOHD+WcSLH7Y+TTqZFMszLtkOuCaG7qKP4mMmk95XVj\nVNF8JcrOlB5TO3yeI2ZH7Jl2MPJiVNH3ra+va3t7W4899lipvfPuurZttbe3V1JyTJUahfIrXYhK\nSedr8eWXX9bx8bHe97736amnniqI1MHBgWazWVmvbNd1kSkTHj9RfSKXTt2SP5wz6j+md4i0JNrO\nsVhP1bIcqb/8eSJinHuWZng8Ror8mZEZqVua4M1C/s6viyKaSznMbEIiWJYfp8ZYZ0d7zNIQ1jx6\nXFk/lWlBtuP5OD4+vrROfA/TaSyi91xThxqFMgLFNCX5n74Cn2fkyWN3loQHG5syhZj00F8Rs8wh\nkrpwKeE6MlzqnjFFyjRPLnw/w0zNepeawSTcnUVwNYcmFyn7kBNL45Y74Hhf3s8FQGVkQ7psgbN2\njAbZ4/D3rK+xQLnonekk1rG4XS5MQqdURHSePM8J3abD6jYJtafTx8XJwkM+I40seWuZ4Pg4Z0yN\nOQ3AtCjnKCHlnE8rJhbq+5mj0ajU0qRR8P9N03ROjGbtjJ+dTkittm57e3tp6nV1dVWTyaSsCzoZ\nt2/f1mg0Kum/F154QV//+tfL/NJRYB2QeUpnOp3ZDIJ4jechAyz3l0ERee96pMPDw3J2jttlHRxl\nwkePsPYqg6haXyV10hdMCY7H43Latp06OlmuU7Pj5k0BqWtYsNy2F0Xwg8GgPENSOUZjfX29HCng\nWjaniF1/c3x8fCmotMHPtKXH5Xd67uzslB19k8mko69pkOkIcD3Vxsg5YV2VdUeWEdAusL/Wqaen\np2VzBftjouNaW7/U+8sKnF3D6HtdC+f2WR7B4nemFdm2pPJuzIODAx0dHRVZpo1aWVkp7y5Mcn+8\nPjxmy13N0aGeo041z1JH+3fWiKZ8+z47dix3kM7XDc8gdJseo3Wfr3dtMIEajoNpugzgrUPTznq9\nflcWm6eDQQSEkTOpZkxr6E86MqzHYnRDR4TPtHedhsREQ8I+0VDVIhn+zu+8aK0AuNg8Tu768vO4\nm4K1AHRqKOQ0DrUIgv0h8sXtvlZaPKvG13GXBZGZs7OzcijfdDrt7M5hFGCnyn1jcWBGRUTaPH4q\nHY/B8070iY5dGk06TMnTdBAzb+8fKnLfK3Vrl+gwWO7NT19vh7VtWx0dHWlvb68TmXpXGR0/z4Wf\nkw6/eeMt1E1z/soYSSX6tYNBnrImZW1tTUdHR7p586Yk6cknnyxnB924cUOf/OQn9du//duSpC9/\n+csF/fCOMcuT64WIDDEap9Odc0F5TVlm8JU1cVyzdgzzkFMHD+yP1731CZ1DKuZ0pIispHw1TVNe\nnOx6GAZvXvd2pkzus5/l89u4LuxM+6wwSXr88cfLLrq7d+9qNBqVQzfNaxcUz2az4mS48Nl9tYzb\nsTP/XMg9n89L/dTx8XHp7+HhYSeA9jg9L/zcTgiDaK5/FjBnYOI+Uh48F24z9buv9QYT2gXOXxpa\n6wXLS47NP5wP85TBF/Wlx5DHMUgqcuL31uWLoP0Mn5fFPuVL1KnLXPtkZ4LOIj/zM4gccr1kbSj5\nbjnzeGwPiNZ6jK4Rs8xxI4n1JFFok+ePdZJ+LgvJGdB5fEQqU5d8VzpS6Zyw01wIy76rOSTpEPgz\ne7SM9LmryTA1i5GpRBmB2WkhEsAIhM9t2/aSA+J+EzqksvazCX1vbGx0JpMQp3f8WBBYGGzv30WF\nXnAeR+7WoPPCBUBjRIifaTXpQjEsFouyy4xKyhGg0zw8kJNpnFq058VRi3rMl0T4fJZKfm/lRQPJ\nfhLF4UGWiXqRT0w/JprpPtYCAUkdRWLHx7uhbCxt3BeLRdmBY8fU8uPI3H1z6iCDCMv9/v5+MZg2\nSrdu3dKNGzd0eHioyWTSiYwXi4W2trbUNOdpo93d3YI63Lx5U5ubm7p9+7aGw6H29vb0Yz/2Y5Kk\nb3zjG8VZMr/9N/vsYxgSWXCxMonri06Tiam/RF0t+0ylUUlyfdsBdL+Hw2Hnt7+zzNpJyt1J7kdG\n6nTMrE9YMM+XRtP4e/w2wr5WUjGgq6urunbtmp577rmyu9LOj78fj8edFw/7hdbz+Vy7u7udAnmi\n4zwSgzw9OjrS4eGhtre3O+82XCwW5Qwh8tv60WuVAWwi/5yj2WzWQWKo21PvkO/mEY02HRDziHIm\ndXddJ/qSG4H4HV/Ia96lA0J7Y6JOdpE39Y7bs+5nitO8YIrPtFgsinNCGeL35gnfUWg95H7wAFHL\ndg0AoBNpvUo7fXJyUnYPM303GAzK0SQuIWEq0Q5UHutj3tgpTTTK65OZEbeZoE6iqOmzkB7aOVLS\ncsfH1zxIiGt583TAfC8njwbaz/H9Fg5H+L7GELifQeVMg2xhYr64lvYjDOsx1PhT+34ZqkSIW1J5\nsSoj/qwncF9SUVGY05HisyaTSWdHiL/zYmPEbqNjo03H1ULshcr5I6/pbLK/fmYaL85NKhOPPb8n\ngkc0i3Of6VL3hf1ynzLFJ3UjXukCUmfaIaNdG1k7pH62+WVHy0bX83pyclJeK0Q5tRJOtOr27dtq\n2/NambfeequDgNnAbm5uljHaWL766qvl9OvDw0NtbW3pmWeekSQ9++yzevnll3Xr1i1NJpMSAEjq\nIBSWEc6/x0fEmrJW0xecF8su15SVur+jfuDRJ1wHbsuBiwMFyg+DA0mdeUqEl/1msEAnxfpla2ur\ncyq426bjRQdkdXVVe3t7evbZZ/XMM8/okUceKam9w8PD8iJsG1Oi3l6bfIOC2yTysrKy0jl/y47N\nYDDQZDLRlStXCppFxDxrdqgfExmyPjdvanrR37NNrtM03EY5vHbYLp1a/8/vuBYTcSYaQ5mh02O0\nj4Eg7VAafn9u5IWIFPXN6upqOVyX/LN+YzDkPvl5nhepixQl32o1n/7bKUnPF/WXx2S+e90kETXy\nfUZnadfyHuoN3+dn0mHluDK952t4QLSBApORs2X00GqkpMsOAyNnevX8nI6TdPmwvzR8CfsSaaEC\np+J0WzRSCeslzMwxME1QM7hExTgGIm6e4IQt3VeOgYaGxXPz+bykzHy4HpU0n01DX/uM47ZSsLAl\nmuZ7jaR4/P4s0zR0XIhGuc00kukUWYHxPvIuoXH/zWcl+knFSH7bOWQNivmdTjnvraX52K75aaed\n9QCOnv1M3s+am5WVFV27dk3SeYrl4OBAOzs7JW3GQlMXDNtYeJ6uX7+u4+NjXblypRzWuLu7K0nl\nZGIbYjtt0nndxp07d/Tkk08WRW9k5Q/+wT+ov/gX/6Jee+21gvJRTj0PXi+eA6JQRBaTiBBQ/jLt\nRmOQaUTODwu5a3LjfhNtdpDAVLvJCB4NLeeQssu0Hw2T58h8Yw2JHSmeQP+xj31MH/vYxy6dvrc/\nrwAAIABJREFUNeT5c1qIemw+nxe0k4Gi+U8EINO+liOiFXakxuNxObcpEZCcz0wL0R6YJ76OPLSj\n4zYzPWN++YBiox8ZtLRtW+SXNiiRQCJLNUcl0TFf75IIPs/rm/qDZQupS63zvH7tnKTusT1I/jGj\nwbqspmnK6fKUCfeHmRnKomXI7xLl/LqPBhess0wM5ms1Ys7iJALIPrNNpvXSQczAlo6U59sBCkEH\n64ka8FPmfOk3PfXUU0899dRTTz09kB5qsXkNdeJ3JOZhE8LnvYlYMcqtwaZug78Tgq+llfxc5pF5\nn68l8sOomaiTP7PHTlTNEa6RB46PuWAiUJIK+sQj7xlB8pUCmZ92JJq8lrpQvGsi2A75lFEjkTMi\nWb6XqZsH5aN5j3+bb5zLWhoz57kmF+4no06PwXPjiJGvafFziJ4wUuV883OmmbxzhQfhuZDY6R3e\nS2RnsViU2qoPfvCDun37tvb390sKz/M7Ho+1WJzXzB0dHWk0GpWDCV2k6jm9d+/epZSvC9L9XOmi\nANZydevWLT3xxBOSzuunPvrRj+rrX/+6RqORvv71r1/a4u/0Bw9tzHS4PzMP+TvXPefc8kx5oewx\nneb2t7e3L6XgPVde71xPXGt+VY+f5znzHGQayykl7hiSuqiEr+F78c7OzrS1taXNzU2dnZ0V5PBD\nH/qQPvGJT5Q0GwuWXV/iOiqn5KSLHVKeY84va6pMREHOzs5Kql86r4t63/veJ0n60pe+VJBIZhY4\nfq+Z2roncsj5MBLhua6l6WtZisViUY5tIdJDquk+t5kpNPLBlKll2wmmkKQLdMVb+TNb4faJ1BEt\nN/JLefahsUZNyVMiPqx/8nNcx2Z9lkhc01ycks7UN9FMfud2PM5MJRo9slzTlvjHa4d8Z73Zsg0/\nHlOt1mnZ+1f9zLTBy2SzjHfpN99BSgeGn5m42HhN5m5r7TA1ZUbX8ut0bhIepDLnThovBvaP46FA\n53NTodfG7HaoDFicS6GxgNE5ch7XgmJl6bbIKxqKWmqvVpfC/rVtW2otpIttuXR4afgswKenpzo5\nObmUQkwF5b/Tsa7l2HnysMfGugm2S6ctZTFrNlJJsn3D68mbmiPm9A7JbTntdnp6Wk7/9nyPx2Nd\nuXJFW1tbRTG6wLttz0/y5u4l75R68cUX9eyzz+rGjRv6vd/7vc66WVtb03g81vb2duGF03RUoDs7\nO5fqZFZXVzvOmR23vb09vfPOO3r77bf1+OOPa2trS3fv3i3ffepTn9KLL76or371q521Y7h9e3u7\nGP3kJ+dgWSrIay1T1myHae1MvdLp9ny4loY8cPDkMTBtMJvNykn0kkr9mNtyP3k2mbe/MwWeqS2m\nwi3nW1tbJf24urqqra0tPf/885KkH/zBH9T6+rr29/dLoOW5dyrQKRwaVAYJJpYf1D73b+vIzc3N\nkh65fv26pHMZtm7i7i7LFNdJzhsLgvk9U4TWK2kvfG06Jf5tOedYUr9mfQ31BuWCuiJrNX29HQ46\nGQw8Ux8nr/J0dbfB8fhvp9n4mZ/DoJ3OkqQim4PBQEdHRx1nQlJZE7zPMkTeeR0zRWqnjvPCE8S5\nHmtUq/NiDRo/J/8pw94tzlRr8i+DNa/1lE3SQ0OkpMuM4UKgAGW0mYZ12QBr9VNs320lYsXiRgs3\nPXMvIhtTCrgXiwududi9WOxMcXw03ByPc73Og9cKQL04lu1CqDlDHov7yMXEZyS//DcRuf39/fKd\nx8KdGKamacpWaBohOjFZm5BzmMTIJZ9nZyqjFCqCZYuGTnWNd7PZrER+fB5rRei4Ek00OkgZtDGY\nTqflBa+SdO3aNY3H44IicZ7sxI7H44IG8fUNX/7yl/XpT39aP/qjP6ovfOELnV1z0+m07LjiqyLW\n1ta0t7enjY0N7e3taTgclgMbrbRu3rxZNjP4u+l0quvXr+vu3bu6ffu2nnrqqeLwra2taWtrS5/+\n9Kf1xS9+Uffu3SvPu3btmkajkW7fvl0KvRPFowOURa+pI/Je8pjyRmQv5d9zMB6POzUtNEBZQ2LZ\nnc/npX7M49/a2tJ4PO44UkSBiJwx8nX/WQNnx9UbE2ycXnjhBX3qU58q/Day0DRNeRWOx+pNA96B\n6bE70KOxJZ2dnRV01I4TdYwPap1MJjo8PCx9HY1GOjo6KhtTuHvZ+rOWOSCvuZPS47dRtA6nk+Qx\neI3TcfNYjPARkTF5rhhYEY16kD0iscYs2yBSaR3vdliDSTTHcph94jwOBoOy25Q2iJS6h2M2r9fX\n1zsv6SaaSN3pAMvPotzQGfT8kXfc6ETUyXNG5yrXdr4OymNIZ4rO6bIA2faoZpf8Wa0+s4xj6Tff\nQaoNJCk9bE52DZ1IzzvbMtGY0nEjskO0ytcQzqfDlTvWfD/bYB8o8Bnp+p6Eov3MLJz3c3xCLXcv\nWNhtfGyk8l6Po8YbOpnkVRp0Gzqfa+Q+5qIx0uIIu/bcdJgcXaVzSbITkjCuF3A+h/cRQeIz2Qb/\nZwSbznBen/d5oVrpUA7Mb74/TVLZReXv8rBG78qzcaMyXVtb09/9u39Xzz33nD71qU/pi1/8Ypmv\nlZUV3b59W+PxWMPhsKT2RqORRqORZrOZfud3fkfXrl0rZwUdHBx0ItWmaYqxPDk50d27d7W9va13\n3nlHq6urJdV0cnKizc1NffjDH9bVq1cLAiWdozY+qdmK00YkAwCmSskjGpZENVksSwfFha1EtDj/\nTpNTnqhrnI5JBW4ZPzg4KCjfbDbTzs5OcWyI4jolOhqNSh/ZTyKrGxsbHSSvbVvt7OzoIx/5iD72\nsY910Anzw8+kQW/btqQDiSS4/3zpsvlomamhPJYbO2+LxUL37t0rsmEn38XtNb3q5zC9af1Dg5pz\n7rESfaFT5nElb6SLwnMGcgxyOfd0lOmYkYwQUu8zsGqaizPD3E8H3rZx/s5rwO2lLuGp5H4Wgwzb\nDAYB/s1gNXV5on8ep4M195lrzbLMsRJcsENKR8z95f98Hn9nxsKfk9+cA17Hde8+ZGDtcfvHjj2P\nEkpdkPRQHKkak7jYPclUhnRslqFLKfyMcNJYppOTCFfNGSJZ4BhdSZePDiBs7udYIVGAPEZuF+XY\n6FARWXBkZiVDqJJ9snGkoWFemLzzXHiBU2nWFpL7yC3VNe/dnr2h/xpilouNjgkVninnkHUS3ObO\nOaYD5Pmlckv54hySx4TpM7XkvjG6sUz9v+y9aW+kx3X2f3U3t17Z3MlZNKNtZMmRBVmILTtAYiBI\nXuQDJB8zrwMDToAEcmAgtmF5kceWRtss3Ju9sJtLd/9f8PkVr/tM9fhBgD/4vGABBMnue6k6VXXq\nOtc5dQrLDWsPIEWMhMea1Ot1bW1taXV1Vfv7+wXFwHsZgz5WUTK1Wk2/+93vdHZ2pnfffVeSUqLM\n1dXVl8YizBDMWblcTiCr1+up3W6neBhYEUnJlTQ/P6/t7W11Op3EeBGPs7CwoH/4h39Qp9NJbR+N\nRur1emo2m1kWk4WORcMNBQAPMvG+oX9w0+X6lD7z+UYf46qLOY9inKLHelE/r6N0BSQ6nU4BCPE+\nAMjy8nJi43zsA9ZYFPmuWq2qVqvp/fff1w9+8ANNp9euSxaC6XSq4XCYwDHfwW5SvO2MO2ff+D9+\n7vPW9R19yEHYMKpnZ2eFDNTStTGK/nFDJWdw8h3sK+2OAJT7AYbO8JBihDEQXe48K8c6RWDEOGMe\nRXbMZYxud0Oc+jOu6CeMFGQZZYB+djlR3PXs7JfXBUPAgQfXe/yQvxdDjvr4uInrsRtDMW7W6009\nYdBcLzuL6sCH+YlxSR38N22JBhc6mXr5muAy8v89UfSscqOuvag0XTFKxbgFSYUJnANSPMPBGdfl\n6E2vx6xn8L8LnBIBhSNtH8S83wMmo5JCCcXB5lR0nJyOrKMFyXuweBcXF1+iXKH4I5PjFGickF5Y\nOBzMEFCa2y46a8HjXn+/09juZo2Fz5FZbmI4iKUAXN1K4X1en8iMIo+4zRfZO6vo48i307pyoS88\nv4orzuXlZa2urqb7/LeDKK8730lX42Zzc1NPnz5N8njnnXf01VdfaTKZFI6EkZTOZeO+Xq+XtrEv\nLCxob28vndN2cnKSWJeLi4sUH0OyzuPj49QGAPmPfvQjPXv2TJ988omkq7xGc3NzheBn6l6tVpMy\nd/aQ9jmgjf3vGZxJ6kc5OztLDBBHongBgPtGDy/UJbKdng/M5xtjkbbGvgRM8JmDDbKL48KD5bl7\n967eeustPXjwIMkN5pD0BuVyOeWDclckCxcyc2MA3RoNDNfBzElnQ3q9XtpoAOD0MQWIgEnxfowG\npRf62RdtZEwfO+Ph/7Ogx74g4N6ZOJ7JQhvfFxlIXzPcwGf+AyRoswPPCAiYby4X3GWeEoAxzHPo\nAzemab/Ln+d5/6M3YzC6G3s5r4EHj0e9yLU+X3I6MgekfKy5TNGF/r64TkZQ5yW2gXnNOx1El0ql\nZIS4N8VJlVnlNv3Bbbktt+W23Jbbcltuy/+y3Cgj5QVK190Tbr3lEKtU3CKdYxMokcVy68AZBO6L\n/uNc8HGMAXImJiJjrBwsFd9pAOLGao8umsjQuMycAYmWG9aA+9mxPthBA9qOFoYzVU7x5vzVbrU6\nMxNpXSyw/5uSe0/s38gI5O5Hlu4yiNf4Lhsv0QLxMRRZTHfHYPXEuByvp9cFFxrf+3ur1Wpyy47H\n43QauvTyIaMxNgHZnJ+fq9FoaH9/Pz13a2tLz549S3LxhIV3795N57GVStfn8O3s7Gg8Huv58+dq\nNpsqlUqJkapWqzo5OUnM0ubmZnLt9Xo9bW9vp3H093//93r+/Lkk6csvv9Tq6mo6u8/7olKppFQC\njN/IVk8mk8RAeP8wT0lWiXUvqRBgDBvkO7fG43HBwo9sLX3oO8WcxXEWkn7h/1xMEC5x5En8GKwA\nMtnZ2dH3vvc9SVeM1MbGRtrsERlgdGJMKsrRNq4/fRyii2i3x5j5XCqVihtfPEi5Xq8XmJRarZaO\nE8Jl4y4678/ofnW2LvaDMyuuc91FiFxpJ24+5orrBeass518B2PM974OePwP7iZPmOrF2wGrRhud\nOYPdh6113Z1zqcH08T0/rAk+7qhvZGLpGx8Tcb65ro/rqbvKXc/TV8xR6hnjqHLxxu7x8bAZ+hBW\nytvn3h2fE+jmuLZTR4qvWcjA51Ku3DiQiguNC44SP/MF1EFWdN/4OzxAmBIX5lgnv4YSXUG54oPI\nFRGUN/V3xY7fnmf7QuHuzBiTxQTmJ4LB6I7ybdbIEqrT+yK6M2McgU8kjxVwGUZXm/eVKyvq4e/P\n9U2MD/J2eFvj85Bf9NXzd3Tf+ef+XQS4noMHJcKmBffNS8UjNrg/F8jsLimvOwHZUnFzADFAUP2u\n3NklxcK1srIiSWlH2srKivr9vlqtVsG9MR6Ptbu7q/F4rNXV1eRanJub09bWlr799tvCdn7qOTc3\np16vl1IfkGVdUgpsr1arWl9f15tvvilJ+sUvfqGHDx/qyy+/TItwjPNzsIfy5WDcCJ5wb5HegfiZ\nubnrHEgeNO1uLx83nqLAXYj+PhYGisdfeN+SQ8jBsy8mlUpF/X5fk8kkHRZNYbdbs9nUBx98kI7d\nwX2DHHxMMxZzAcnj8TgdJxJjupAxYy5uhnFjNbo26/V6ci+Wy1e7xtjNe3BwkPKD8R1GgMe4sGU/\nhnT4WIj6hL/jtQATXLu0JR6L5Pe5DJEb8xG9xSLuMqVdDsbdMGGuu37mmWSId93FuJCUYhHdwOK9\ngIlofDlgiK47ZOOGneeEc/eyGwVc60ZLNK75PoISrvcNNv69g3BfU5CZAygv/r/r8wj6vf5eZ5+n\n/O1B575+uIGRKzd2REwEL+5fzgXu+gSL7JE/NyJQrpkFfCL4kGbnM/K6xvZwH4Mp904GDR3jPnoU\nTByccau8A4ToF/a2OEBwAMC9npDTmQyuiTvLPGaLQZiLZYsTxRfaaMHFgenxBy7jyCZF68Pb5e1n\ncswaK7PGUQ6MesmNJZ7nIMrPIvPnUDePY5pOp2nnGjFtkhKjguyl4hlu0Tp1ObAIAHq4ttFo6PT0\nVA8fPtR4PFa/3y/s7un3+8maHw6HiXWCFZmfn9fz58+1sbFRCGKGdTg5OVG73U7vr9fr6na7SbFX\nKhW9/vrrkqS/+7u/U6VS0ZdffqlWq5UWFuRE3jFig6gLKSLK5XKy3GM/1mo1bWxspBQNyJRga2Jl\nSqVSAmAe3O2ypj459sPHADrAYyqiFc/BwNL1OZ6DwaAwXviu2+3qnXfe0d/8zd/o3XffLYyZ+A5n\nnRhHcWFj7AFmfHEhsSNxJxE0utHGO/0gWTf+FhYWUh6x4+PjpPt8Wz6FdtOmWQZu/Mxj5+L85V2c\nM+l6yA0AB7jR+HRD0BdzB1XIzUGox+xEo5Rn8B0gwjc0eJlMigeR8xtA44HuDoKk4maXyITybJfp\n3Nx1agoHP15vX99c3m44O0BBVm5I+3v5389qpNBvThRQDwAW4zc3ZsAD/pn3aW58zTLm2XE6q9x4\nQs6ceyUCohwL4YtdziLxvx0g5RZBlEROUHHB9et5Zlx4vV1u4cRdGxQGBZOK66TrbMN+iGJu4nN/\nDqCWSqWXrFY/Zy1aZi7zuIgzIWYpPL6jTUxEFnNn9OI7ojWE3Px3LA5KvC+c2s6BSd8pFJksCs/k\nM6w8V6QR7KOA+S4mWgRk5erqk5iFfTKZqNVqpfxbw+EwXVev19OCSf9HOno6nSYQg6uNpIyHh4dp\nUd/d3X1JpixuruQrlYoePXqkn//859rb29P29nYaA6PRKD3Xd5hJ0vr6ui4vLxNoe+eddyRdWdy/\n/OUvtbm5mWTk4HMwGKher6dF/uDgQJKS27HZbOrw8FD9fl+j0aiQZ6nb7erOnTtaWlrS7u5uem6t\nVlOj0UjB8O7O6PV6BVDqIJ6+BWhEpoffkX1gXNBPjUYjyQYwIykxRfx/eXmphw8f6h//8R/1ne98\nR81ms5B5fDAY6PLyMuUzi6DKx6iDLPqGccf72MXldfe+976JOwwBLuRCajabevHiRRrDKysr6T2e\neNUX0eiqpt7Mm5wB7nWNYGVhYSHNEWek4n0Un4/MfTfg0M38eAoLngcD7CcTVCqVQrJixgrXoQsd\ncHHd5eXLyZgByugR9Jm3x92akfn2dcTDQZyVcX3F2HBQ48+h/hg9zlTGtd6BIbJy8BVZNzcEvH9y\nbkBvk5MhnjLGQ1acKPD54kDV5RZDQ7zcKCPloCIHnFxATPwo1FeBKJ4bBc3n/LiVEe99FXrl82hx\n0inxHhRqXLRzlLJTwyhtrPHImDEAvLNd+fCOaJlGoOGyoQ0RwLgrINdObwvWi8uTxccBSqyffx5B\nl7ebvs/1H7KIoNyLy8bBsFvpPqEcpPg7KN6mONldfs5cSMUcOLjqyAoNK4QbYDKZpOzl1NsXSp5D\n7iYYLZcfTMf5+bna7bY2NzcLB5ASi0U6At9F9utf/1rNZlMff/yx/u3f/i0pxfX19SSDxcVFdTod\nbW1tSVJy8+Eqcbnt7Oyo3++r2WwmV1y0okkCeXJyosFgIElaWVnRYDDQ9vZ2WoTPz8+TG/L8/Fyj\n0Uinp6e6c+dOYRdTq9XSxsZGmleDwaAAhKiDsz/Ilv7zRcP7dhbgZ0HF1ei7utziXV5eTvVcXV3V\nv/zLv+jDDz9MC5AnJ/X4Ltg1xhA6gQXQD8rlWCcHiPyOQCrqYFy/MfbGGQyP95SuAD8u236/r+Fw\nmL5zN5P3Pe+LYMbnDEZQjEcslUopWanfEwvpE6KOJHcb4FG6BiYOopApfed6z2NaeQbAKc5VBy3O\nPvmC70apg3hYJ5e3M/7EYfn8zoW5eH1cN/m1TlqUSqXCWGQcuyuU9zmz5V4Lxr3rLx8XDvId1NFm\nd20i47iL2cGSr/kAVAxW6uceIF8PMVhmlRuLkcoxPd5RPnmiVeXFJ/2rlJgPEn675enfRRDiEzEC\nM+9gt/KYBD4wQPQM5rjQ+mT0giIslUoFZer5g+JAdMDiSsHb6NS/g5VoBfrgd8siV89o6Uag4axc\nlBtK0+XmblIHcF6c5ckpTW+HPzOCea7le4CmTyiPr/B7HdjHxcd/A5oi8HMrtVQqpZQDq6urhbgp\n4mWk6w0DLGrRIkdJLC8vq16vF5L8Sdfb6z1+6ujoSKurq1paWlK329XR0VECbtVqVdvb2/r5z3+u\n73//+3rnnXf06aefSrpSRLjmkLG7KZ4/f6719fUU7IwcG42G3njjDR0dHaW6IO/hcKg7d+6oUrlK\nHor7ULpanEejkba2trS0tKTj42PNzc0lQNftdjUajXRycqK1tTXdu3cvxeyQH6rZbCZXoh/Jg6w9\nBQLyctYslzrALVrfch7Hrh+FA/hksUaGP/nJT/TRRx+lxdQTrp6dnaUxQlySxzH6ppZqtZr6Yjgc\nprEdXUK0m/EYxxQ6hb6NBgDhAnNzcxoMBml8k1eMcx2dhUOfOgMa57dnAOc+mC83TNzQQYa+4Ho7\nAKbOnvhvdLT3E0HkDqi8Da7XeBbzk/a5nnWd5TqM+2BromuL692QdN0OG0ecjzPV1NHXyhhIHvWh\n/x1BepRbpVIpJK8sl8vJm+Jy4b0+h9z4ZL30+CnXkV63uB7nSATGEYYM49XHYfQUOIj+S+U2/cFt\nuS235bbclttyW27L/7LcWIyUU4QULBO37vk8d38skZbmGe7Ci35P7vPfkbmJriU+i/WI9ZaKO344\nYwtrA2Tt8UcxUN0pdtiKGMjHdTnXpLcHi1e6tkz9x60PD6qNLI/3ndcBVO80u9cBSz7uwnDLF3eF\nWxO+ldrlS8wK78n1UbR8uQ8Llrp7H2AZUdec5YNbIBa3GiNzF92XOVerM5q8BwaAA43dJePxJe5q\nWVhYUKPRSBS5W4kkhsS91e/3C1mou91ucsdtbGwkGRMDNT8/r88++0zvv/9+YSegJJ2cnGhpaSkl\n+pSuY96m06lOTk5SrJZ0xUh98MEH+vLLL7W3t5d2WUlXc+bNN9/UdDp96Vy49fV1DQYD9ft9ra+v\n68GDByqVSsmd+OLFi7Rt/Pz8XPfu3UuyYdciMT0cU+N9wbmHPv7ZIYcrh9QM0jVDAuvqTAZ9BOPm\nJwz4Nm7k/d3vfleS9PHHH6cYIuYOLGNkij0mDVYAZpN7pesEpeiZuDvJYxhhc/iO96HHfHzzu16v\nJ9chcwHrH6aoVCoVErnCftEfHrTONZS4yyo3ByNrHOeZs81+P890pi6GX9B217fuqor6hHFBH56e\nnhb0tG8cQeYUr0fc1ODB2Yw7Z85h24jNoh3oNR+r8Z2483Fx0n760IPcaT/rAN9TX1hU34zg7Yiu\nZ5eNB/C7bqMdET/4/XF+0L9SkSGO7FMMu+FZHg+ZKzd61p6XSM9KL+9K43tfFB3Q5J7B/fzEhU56\neUeDu/yoK/e50uF3nIw5UAM1jyKJgMip2/g+BjfvdpcJ7iYW/5xbKbo1+SzStj6YiLtwCtn7gN9x\noPr7HTS5C8Tlx/uoP0DKARLvdjeWv496xnFBO6K7gD5jcriLlXoRMO4FGSHn6EpEMXCNj7EccMr1\nU7lcTjEl0jX9Px6PX4qxQJ7T6TQtjr6dnLPEeBY7xarVqkajkV68eJEWBZ5JTNb8/Lz6/b7G43GK\nO+p2u+p2uylA/eLiIgWNf/LJJ5pMJmm3Xr/fT66nWq2WXA3n5+cp8zb1X1pa0vb2dprb1LdUKmlj\nYyMFkQNeJOn+/ft69uxZajPKn/azK/H09DTFTtG39Xpdr7/+ejoTr1arpfE2GAw0HA7T0SoeG/H8\n+fPC4uKbO+IGg3q9np7pQCIu0Mi2Xq/r0aNH+vGPf6y7d++m9hOcT7wa7/PjOkir4gv7ZDJJ7fL3\n+bhkAXMXlYcC+DP5HEMMUO+yqFarKf6JnWy0g1xo0S0TF6cY3uC6OQKfaFC6geHueR/fDhSje348\nHqfjQHDxutzQNa5XqDNz1PWj1991lwc/O+hxfTGZTArxOw4wmevVarUQfkA/siuVthGHJF27ganj\nxcVFwXCJa6XrWtqIcRL7zOMHXUdFgzKSBOPxWLVarQBWfEzmwmDQ24BPH4fI0o938uKbmbyO1Cm2\n3denWeXGGCkmYQQccSB6ieDJ75GuFxX3z3pQ3iwAx/OibzjH1jh7kmO0fOI6s3B5eanRaFQ4ssGt\n1tjGHGKOSsGBI8jdFV+Mf/IB7YsuStrl6Qye3+fvjBaft3cWqOJ7X4Sk66DTGACbezeFfong2tvA\nfXFLsjM3sX+jn9yfQz/E9rm8aa9PRrfk+DzGFszPz2tpaUmtVkubm5vpPq5xS5H6oJgnk0kh+Z/H\n7GAhelzS3bt3NRqNdHx8rPPz87RrjzHx4sULtVotLS8vp/tarZb6/b5OT081Ho/1hz/8QR9++KGk\nq2Nndnd3NTc3p7W1tQT8GF+M9Wq1WrAIy+VyauvZ2Znm5uYKixiLPYwN+adWVlZ0cHCQ7iVuBfBW\nrVbTjr+VlZUCS3R+fp7ivV68eJEC05Hz6uqq+v2+ut1uYvYYC91uV9VqVXt7e6pWqylZ6eLiYprb\nEZwTy+a79ohzm5ub04MHD7Szs6Mf/OAHevToUQqo51w8jl/xnWIsypPJJC3MjLVqtVrIxzULSMXv\nnGHGGPBnELPiDFHOIAIY+DuRR6lUSjuRee4sHUyhjtHIjgHY/h1g0AGTlzjnkWluF5df4+tBbLs/\nO8rX2S4/z9CDs31xh7lyvcm4gGmGIcbopQBmAb7O6GMUjEajpBMcFCFT+j8GYUd96H3IZ1EvRk9K\nlKXPGV9TfY3wcQoAjOBTejlJNv3KdzC8MUYK4Orrohv/eJFmlRtjpKTZOR+YtDF4PLIOs57rHZUD\nTxQGOJ0dF0x/Z5x0fOef5awsB10XFxeJ9naWZJYC8Xq6fLx9HsD+lwAfVib1woLIMVbNoBA5AAAg\nAElEQVT8H9vktDsT2GXijFGuDX69W6xMiOj287r4s/iMCcrf3mYHz66IYwB5rAvyjPKj0AbfKeUB\n7MjWAZvLE6XqYLnVahUAA0yFM0ywTNEyAizB9khXSmA4HGo8HqvdbhdcLQSRz83NpUOIed/i4mLa\n7dbv9zWdThOQIMAZmQ6Hw7TFfXNzU0dHR9rZ2dHR0VHKui1dAYK1tbUEdnI0/fz8vFqtliaT62SY\nWJXSdWZpXHflclk7OztaWVnR6elpAl8csHz//n19++23qW6VSkVfffVV4Z1ra2taWlrSN998kxa3\nra2txKhUq1UtLCwk0LO8vKxf/vKXkqR79+5JUsrQ3mq11Gw209i5vLxM7tLLy0vt7+8nQLG+vp6S\nlU6nU/3kJz/R66+/rmazWdgphsvW5wxjhnmPLqG+PJOxG4PC3XXF/7NYZZ//LKIwcoxFZ3oAL+TG\nikYbCzPsVHwH/eosDgA3GtgReHAv13JNTq/6LjhnzaOR6Bt7pGICTC/OwvgpEhQHZ36KAQYF48X7\nwTdA+C496QpkLS8vF9IveFvH43Fy9cZksLQRttrXIeonvRw+47o+rhes1bMYIOofx5p7UtCbMYCf\n9zkYRn5sbvA6OVhjjEQwyI5b3K6UXKZ36unzMldubNdeXPidpZKKYMQXRQdb0stb1x2URNbLQUjs\n1AjauJ7nxEnqVkmcWKDi2AYUERYqFrKja97hbJUj9bgIUeLgjjKKCsD/zsU7uEuJ334v1gz38hxn\nBN1aYNLE/vH2OdPjQIqYEj7PTdY48X0x8rHDd0xaxohbH0xaLHIHp66U/D2AC2el/J3l8tVOGg6K\ndgW+urqq5eXlQpZjwATgCCDuCTqh5d3a9fpcXl7q+PhYnU5HS0tLCYQ0Go0U34OVS+l2u9re3tb7\n77+vb775Rufn52nXnscEEbtFXqf79++rUqlob29P9+7d02g0SjvsOJqmXq/r9PQ0ue4YAyh7EoXS\ndsYeliPAhvsAeLguyJvDvYypxcXFgrsUpocUCp7QczKZaHd3V0tLS7p//74uLy8T67S9va2NjQ0N\nh0O9+eabOj4+Tu9bW1tLB+SWSqWUdkG6isk6Pj5OuwRXVlb02muvJRlsb29rc3OzwC7RhsFgkHSF\nL7YsyL64O7uB2+fi4qJw9AhWuRth0fD0xcf1jt+HHH18X1xcaDgcpiNicE3htmEeA7ioD7o4GqSS\nklvT6+7y8RL1irMqnlID4BKNwxzbDpBzYywaSFLxwGOvvzMgl5eXic3lOweiEdDCYJPdPMrHTyRw\nIxnjEqAUZZvbeUeh/nH9Qk86MRHXC5ez61M3VuNa5nL1tcP1aAwHYQ1x74X3oRvXDu54Pu/LxYf5\nnIhEx/9zrj13OVFyHRcBkX9HcRdaDoz5gI/0awRITuNG4BCZnhyTkwMKEYxNJlfHfYDSuQ/wFBdE\n961HEOXMSZSf5yOJbZGKE9cDq/06romWqU9qR/HuPnpVX0Uw7FZyToH7/dTX+8V9+7n3RoDj488X\nXq6dZZnEz/xzmAGnlv0aFJQrGGc61tfXC3JlUaxWq6luvMOpZw/69PPhptOrRHQeIAojs7y8rFar\npZWVldRnyOvk5EQnJyfa2dlJ7IMv7hz1Ua/X1e/3U3+Nx2P94Ac/0L/+67+q2WzqwYMHCfSsrq5q\nb29Px8fHybJ1sD2dXsd4EeTtckeBN5vNxLqwmI1Go8RkEawsXbk3Wq1WcoksLy+ne2GfUY5vv/12\nSs5J8tJyuaytrS0dHBykDN3Ly8vprLvFxUX1er3kaiQf1tramkajkY6OjhLQ29ra0tbWltrtdqrr\no0ePJEkPHjzQysqKut2uGo1GgVX09Ca+oPN+xtTi4mJyTfAd90XWARkAzOOCEQ2VaOGjc3KuL8IX\nYEvctUsdMb58caQ/Yhwlz3QdGHUG88H1jjNu/hljiXrnGHnGGqyF3w/wyQVG+4YAN74uLy9Tugfm\nY9Rx1CGCGt6HG8p1lQNpAICve9zLc/075rvHFzLe3F0GIOE719tuXPv4ikHZORIjpx9zfebXOHBy\nuUWm0kEqfZHzGNFu17cOsNyVR91yBEaq58xvbsttuS235bbclttyW27LK8uNxUhF/3V0jznDEl10\nUtFajbTjrOc6LRn9sfGeWeyY18198Xweg92iq2E6naadE46icduw84jv/IBRp0K9bjwr1ps2I6/I\nBHhf+H0xwNktDiwWrEqnPB25R9nwt/c9snE62etMiS49ZxGhi5G7x3A5W+fWR2SkIruUk633Z2TG\nvJ+x1tzS9oKrpd1uJ9cXiSyx4huNRiFOqFarJRePW8rIE8uScUUbCU4mlob6Xl5eptQBxBhw38rK\nSmLKWq2WPvvss0LsDZZ3v9/XyspKkvdPf/pT/dM//ZP++Z//WZ988ona7XY6YFdSCmzHNcb7iO9g\nDnBen3R1XAsJHM/Pz7WyslJweeO+4jnuNmg2m5qbm9P+/r5qtdpLAc9YpbgUiTdpNBrJyr93715h\nR9/Z2ZmWl5e1tbWlo6MjbW5upjMDcSG+9tprOj09LewgbLfbeu+993R2dpZioWCk7t27p+FwmHZI\nuruHDSndbldzc3Oq1+uFecVZcoxjZ5uIRXJmgvsY/9HCdgYkMuO569wVw5iDKfPn08eUONf4bmFh\nIW1G4H9cY7C1pE2AjYJB8PQPzmTFbfcwsLCfOV1D+3zuexJR3zSCvEulYqZvCmwS7cgFviPLqGvc\nJZZLH0Aak0ajUWBMiN10t7jrLMIxmPvOxqPfyIbuSTd9fXWGyF18zjrSBt9gE9dw5izPi54I17fO\nSLkrPvahrzPU3dtAXVzvwdwiIw/hYI3IzQXKjR0Rk/vbK5obUP5/BFIMpAgK3BXnricfNO4ik4rb\nanOgyoGAd7RPiAi2eC5t5JwsPmeL68LCQmG7KvVmsWTw0TYHQj7RUCbuWnLKmffy/Nh+H3A53zCK\nzb9HmTmt7TS2pylwBezuAeTj9H6ki2MbKQ4yaVsOIHk/RJDubq44IePCk3M7UmIwu4OrWq2mVquV\ngpHZhcOus3K5nOJryMe0sLCgfr+ftrxLV6Cn1+sVYlc86z11pA4eSLq0tJRSIJRKpbTl/vDwUIPB\nQIuLi3rvvfc0NzenP/7xj0k2CwsLuri40Onpqcrlsh4+fCjpKg7mD3/4g/7mb/5GP/nJT/T48eMU\niP3aa6+l3Yaj0SgFuEtX8yDGUQDkOGPy9PRUlUol7aLjuk6nk44CKZVKKdUB3/d6vXS8Tr1eL+RQ\nYwGpVqs6Pz9PQeqNRiMBrJ2dnUIG8efPn6tWq6lWq+n58+d69OhR2jWI+5D3EA8nXc2DRqOh0Wik\nu3fvamVlJbn9mCPlcjmlnWA+cYTF4uJiih9jnNVqtcLxQb4zi52o6BPGvI9Zro9g4lUGqeuZ3Lwh\nTxY5z3zxRp/4pgzGov/tcW5kRwcQcai3z6+cEeSAEBkzvqiLH/HjOsRjV12nOnhE7/rOQ3SX52vj\nuxhG4bqd75Fl1BezAOjFxYX6/X7Sv37kD/d7oHkuhMFBDmNiNBoVdJdv9oi7OP1ZzGFPpUCdfYOJ\n63gHUBFoIWMfM5FA8L99DXH554Ab19br9eRGjwefu8wwSnNrCeX/mYScCNXZHf5GqB4r5H59BxPO\nOknFAemLCgLnebNYp1f9HYFWfMcsZotO8h0pgCmpuGDTJiwMX+gdxMV3uYLMLfgRXcdB7GxNjMuK\nfmvfWeZH2Tiqd7AZg0QdtHmbeaYra1f63j4UucsGQIgF7s/hubTD5ZHbeOAy8wB2Z8d4N0yUB8fy\nPWfKOetUq9XS0RONRqMQBI51CAiTrpNfkqSSg3Ynk+sUCFjs0jVbQyEoGiC0u7ubxuLi4qL29/dT\nrNJHH32UgsVZtDmmhmM/JKVUAp9//rnW1tZ0//79FFv05MmTdN7dYDDQ6uqqjo6OJF2N05WVlcSa\nEf8hXTE5n3/+uQaDQQKdFA5Hnk6n6azAk5OT1P5Op6Pj4+PEhngSTPoNgIRlLynlOgJYwWwxLhqN\nhg4PD7W9va2dnZ00Z2GGlpaWNBgMtLm5mdiTubk5nZycpP53UEdaA7ajOxvtfUVsmlvq5Lwi31Uu\nnQqpHpyN9E0Uvgghe58zbs3zG7DjDKBvkmDXo4N6AG88mBggyzPcMONZ8/Pzhdg3qWhEOosmFRNZ\n+rzkHc6y+K48wEBkQKQiG+vrUSzOZiFv6ueAC7nwHgenPMe9G9EoBJwBPn3XJmsLG1Pi7jT0FzFb\nsY9dH8aYo9zmHTeCWB9z8UxRz+aCyGNx/ZoDUoxVnxcY6nzmYx/miX6MORl9/Yj1jMaylxtz7TlT\nJBWDvyPI4joXWGQiCLp11CxdT87IELGwuuUewZlbynFi+KCIgYyzAJjXyf+HNqYNkTaN9CzFd6BE\nMOagxJknfwaLM5MtHnoqXU92X4jdredACgBBPiR2nEhK1mR0fXqJioa6OECNQLVcvj5XSlKBXmdh\nRt5x4lJmWToAtwjMseLiThsfa5EBcPaOzN8s1ix0c3NXKQvW1taSUjw4OEgMTaPR0PLycnp2v99X\no9FI9YhAEvkhV2dkyNLdbrd19+7dAogmPcKf//xnTafT5KL77LPPUsLNarWqdrudFsZms6nJZKK9\nvT1VKhX1+33dv39fkvTs2TMdHR0l4HJ6epr6C9bJ3XQRDBOAvbi4mNoOqzQcDvX06VNVKpXkAkQ2\nJDCdTqfq9XqFjOrj8VitVksnJyeJ8eKdnjuq2WymNt67dy/lxHrvvfd0enqaZMpuzGaz+RJwnZub\nU6vVSuBxc3OzkJhyNBoll1g0gAighxFAhufn50kG1NkX/hgADMhiDrprzMcKAAmd4Kw548i38FN4\nN8bAZDJJQNqZ8Og2kZSyhQPeXId5UL2zEvSZy8t1pq8hzjr5WpErDgR8HtNXtMcXYWfYo25Bpr5G\nuPHL+yJL78Wv5x3cBzMVE6tSf/SXyzSCUd6L69HXSGf+fL32NSzu1va1MQeE+M4/yxEkcYOQ6+ac\n68770L1TzrgRtM96621Adk4WuNxfVW4s/QHF0agzSpHpcUSbE1xE81JxJ0BkZ3IAx5/ntGPOzeiD\nJT4jx6b4NZHNYEIwyH3iUWcW2lynspDG7cFeF6eQkQ2f+zX+nQMtL+QD8ngDr+dkMnnpoFBJiTbG\nVRn7IMrE2+9/u3wdODu1jPLMKThXsLHvXFnH+3zbMHKPIMzl76wQ7WNR80URGQOeut1uAk8wRMvL\ny2mM40765ptvUl4y3gtbBYhl+ziLvKTEiJ2fnycGCcZnc3MzuajW1tb0+eefp636b731lp49e6at\nra20Q5B8SJPJRBsbG3r8+HGK9Xr27Jmkq4Xy4OBArVZL8/PzOjg4SIrLY17q9bp6vV5ix87Pz3V6\nepp237EDSroCUiS4PTw8TOObsUiM0tHRUdr5GN81HA7V6XTU6/UKOygBbMgQUAXwYoel97/3Wb1e\nT4CUvsAVurGxUdiqDkNydnaW+tF1FSASl68fZYPBkmNH3Z0ymUzSfaQiQDd6pnbmOzl2HJj7tnYM\niWjwsQgvLy+n7Pb+nRtlnljUM7TD0lIf+hvWhkIfue53FsWNUi/oCGf/XScCGHKuNZ6NDnI2y0GR\n60w3DqM+LJfL2Tgtl7e7Fr14HCtydkDM0TC4mf3gc57nTA2/Ac/uPeG+VzEygHlnwakfMqHe0bWZ\nY3xcp8d+BETlXJbUBZzgOjmGdPg7Z3leeF4uts3Ljbn24t+RzYm+TkpcfHM0Y6QcX1UPBzpxAQWd\nguy9XpEx8+flkLkruxy4inFSvuUcpQ1Qoi7u7nNWSnoZlLhcpGvXn3/ukwwQgO/dQZaff+TUqLMu\nTDwGJwoSK9KtgVmgxutOXSN4AUx5P9AGB1q+yLpcZk0s5DwroNzfkyuAXq+bM1goOOk6psVlR+4i\n4p5wRbEpQZLu3r2rg4ODQiyMA0lYDNxOjKm9vT3t7u4m96Iv7E+fPlW5XFaj0VCr1dIHH3yQsp5/\n/PHHKfblzp07hfQHJOF87bXXdHh4qHv37iUgQXwQGdidybm8vNTJyYmq1aqWlpYSuHHZkgRUuorh\nQn5cQxZzADzy73Q6Gg6HWl9fV6lUSvVZWVlJQMzjtnjXYDBITFG3203f+fEh0+lVHqu9vT1JV3Nx\nZWUlMWCueAkmb7fb6XzDmBsJ4O/noqEPms2mxuOxTk5OCpnbfUz6/AEcxAB7iusvNjDwOXLGxReN\nK+obj3rhXvQfAJV7ACjMgQj6kIEv+gSv49Z0dtjdePGZ7tpDNhQHY9Et5HPevRg8IxrlOUOQee9t\n8Lr7WkJxNscZGgBN/C664hiXXj9csLzb+xFdCIDN9bkDKorXjTWH+kc3mt9HuyOQ9M8iGHbihLbG\na/06lyUGNG1xeSF/Z55oUw60eVti/b3cpj+4LbflttyW23Jbbstt+V+WGz9rzz9zZB9jU/w+t/Yc\n4ef8svHZXtyi8Ngbtx6im83r7e5Ify/X5dgTvoufY8mxiyfGOlF8G67/dutAKh6D4vEN8b1uKbql\nhKWCLHiW76BwFoXvoEj53PvJ73Nrz/s6bvF1tixagN6WuLPQ5RDZJi/eTq53H7l/h5xol1tDtAmX\nAnL17+MOKq+DBwq7TGEkCLx1BqFer6e4qtFolCh+6YqZGg6HGgwGKb6IY2Cm02nabt/v93VycpLi\nmVZXV9XtdvXs2bO0sxD249tvv9VHH32k//qv/0pB8wSbj0YjnZycaH5+XicnJ9rc3EwxYGQzx2UU\n4zDm5uZSwPd4PE7PdHfQ0tJS2qVE+/r9fspaXi6XE5MkXbFuJIdkZ6AHeBM4zvNhndi0UCqVdHJy\nosFg8NKhrsyBXq+XXGacL+jZ351V3traKiQp5b1cs7S0lA4C9nHA4cfD4VDVajX1BSVa5BRYKbK4\nx7ntbimKu9LcVcf/uMR87rpudAaj1+uljPgwStQnZm93necsLjqLjSvO9uMuGo1GqS0xLMDnr8sV\nl6WzM7Sf+9zF58+mzh7n5euSewK4lrZEj4W77Vxf8Zn/758xT3x9I1Yq9gn9iEzZnYb7PDJyfiyO\n93OMp/O64bnw58VwGG+Hx4u5nH0NdhnPWr/xqsDoI1PWaHS/uw4jE+XF5ZbDHq8qN3pEjPun3f3E\ndz6IvXN8q2903zld6ILzv7kuuuR8wfVnuBDjzo5IK8b3xLpFwBjrzAGqEZz49fE+qNgczUm7oH5z\nrqp4X44ijyCGAc6ZTXyHK5CJkMsODNjIKSnqk6OUZ/nDXVFEZRgntFQ8UDjGUHkgql/n97vS9B1d\nsS0eixFpaV90AEiMG3fvlMtlbW9vpzQIfiwEu+iov6cVQKkuLS2pVqvp7OwsxSxVq1Wtra0VQDLf\nvXjxQtvb22o0GumsPgDR/v6+VlZW9KMf/Ui//vWv1W63k6xqtVoKIh+Px9rd3dU777wjSfr000/V\naDRUrVbV7/c1GAzSM5eXlxMgKZfLyR1B+0qlkjqdjqrVqkajkdbX1yVdufZIf1CtVpPb0OXmO8U8\nSL/T6Wh9fV2tVkt7e3tqNpupHaenp1pYWFCn09H+/n4hoH4ymajdbmthYUGHh4fqdDppASmXy8nV\niXLnSJpGo5GymhPk78HPABvGFAvZ8vJyWtzYoRmPLJGKqUgYT7jdoz5yoI9LzMeuv8MzYpdKpeQi\nnk6vM9H7/CIHF24fAHG/3087yOL5dcxfAJXPFeYkOy/dYMTtie7y+NAcwPG5y7z1sADeG12CLlPX\nDVEnMt5ybiDqGF2CXnJhA9GYm2WIo2/pNzc6yYPmIRjoYNyH/jmbJnzd8Ho5+ImAzceUgyXuibGx\nLovorvT+i/o3gpoYv+RGu6/VOdn52sp4zgE3xwe5ciNAKsfSxMBct0z43zvKOyAyQ5HpYtBHNA17\nQp1y90cg5ZM9x6rxO8eexMU0AjcUwtnZWWp73GLs9QMYuV/YZexWU87CQTYeb8Dn0ece5T2ZTNIW\nemerqK+DXZe5x1J4wK3X260tnp+bRMjMA1EdBPl1OYYTWXrf+I4mZ5m8n/jbF7PY92z5jkwZz/Zn\nETjLZ8T8UOr1ekoZ4IzHcDhMC2/c2dnv99NuMIKrHfCenZ2lNjabzZRYcjweJxas0Wi8xHL96U9/\n0ocffqjvfe97+t3vfqeNjY3ULoDX0tKSdnd30/EpDx480LfffquFhYWU6yla4+fn5+r1eoWFliSc\nl5eX2tvbSwHXFOK4Tk9PdXp6WkiTQODyZDJJweIebF6tVtXpdFQqlVSv11NcFuOh0+mo2+1qZWUl\nPRNZnJycpHgrz7NTqVQKgfLEsrVarRQXValUdHR0VOh7mCh0FIlap9NpYsSiweBxUNHQ8kLgsxuh\nFMaLL+LEyw0GA43H45eO1XHwHVmG8XicAu4PDw8TI3V6eqrRaFR4htfVAZ0zNj7HAIWA00qlopOT\nk7QLK7e4unziAcBe58hI8W5nYCILhfz8ep7v4DTKPzJMDrCkIoj0Y2Fi2zwONbJYvnMZ2Xl6E2cX\no25HbnE98fWK7xzY8T4/fskL66XLIm6eijsw4666CM6oO2kZ+M7X++hRoJ3udfD6OQDzdnnf5MqN\nACka4gJ3cCS9zPRIxQU957abBWxmuQn93U4ruvspChUUz6CIuxAc5efa4J+76y0OKLaokiDOF163\nrgAsUbFRVxZoH3z+fe47B4IRhfv1Dpx4Hp9HgIEyy2X7pv3xN+/LKUGvD5alKxXkG9NkUBfaFunq\naJFGCj0nZ68b9XHLN/YJ1rTn1OFzTjSHsQFknZ6eJpBBfT0BJYsf/7NhYTQaJYaL+2q1WmKTjo6O\nUp4o6XrXXq1WS2yW57uZn5/Xr3/9a/34xz/WgwcP9PjxY0lXLsFms5mykddqNX322WeSpHfeeUdb\nW1s6PDxUtVpNzIaktOtwPB4nBgr5kvxzOBwmMAjAnE6narVaajQa6vV6iSVhwTg7O9P6+rpGo5HW\n1tb0xRdfJNns7Ozo4uIiMU6VyvXhy2SY7/f7Gg6HSRa0fzgcpms9NxUuQRZ7LHuuI/AfEBcBGODS\n3Y24JH2MUdCdjDVP5AmoRj85WHB9xqYRd6XBXkS9B9gdj8cpU7zrb+YtTDWuXtrIgdy+GErXrj3c\nuJHl8va6cU2KCgwGX+SiPvWxn2PUXb5xIafkwjuiPkF+/n5n5HOAl7nv45428C4M5ly4R2StuAdd\nyLmu7vHwPHde3JXO3677Li8vC260yJC57vc2UrdYT38/oC6u2c42er7C+NychyquM85GRRAY1/FZ\nBMmscmN5pFAALpRIm8YFK+emcQCUE5yzBjlQwPc++HOupPjuyIJxPZMzUoRex1gPJpIzVQwyXAHR\nkpKKh9ZG1IzFEAdZHAw5CzEqsuhT5zeT1d1qruzZSs3fsT7uFstNagqLQW4iu8wcaPO90+LeB67Q\n3c0GMJzlKuDzCOhcbihkbxMWJM9nt5h0tWDW6/U0Xk5PTxPTs76+rkqlkoDU8fFx6mMWZIBE3GWD\nWygqNrKX3717V++++67G43ECPU+ePNH29nayvuk36Sq+olS6cvH84he/0A9/+MPEQJDZu1Qq6fj4\nWGtrawlwbGxsaHl5WZ999pnef/99DYfDxOgQq7S2tqbDw8PUTr7DeibBIDIlj1C73U79vrS0lJ7r\nQLRUutqx50zP4eGhFhcXk8uQHWZkDO90OikDOUAT9ypjsdfrJYYEcFyv19Mi7WARQEr2cgAhu6fO\nz89Vq9USy0jdfVy5geIWd07xs+jEWKiYxwlGjDYwj2CiuN/dRmxzx1UnXemdVquVdjQeHBzoxYsX\nkq4TwgKkHBTAqLreirrdx7XrBY5TYnz4fHNdF9l1xjR18bnhRxbFtcWZGP8OudBHrof8vXEdcjaf\n+nqKCPRWDBuIDBwAweOUGCscVwQ7ik5gjERGLK6l3kZY0EiCsH5hOMb1za9zOVCY53EtdYOduvvz\nXMe6PvZxE+uRc+vFZ7uB4t+9qtx4ZnMq68Aqgo8cEpzFTPh9lEjR+vty11McrUa/uwM9f68v8v5e\nB1252CGvq1sG1NkT03G9M1X8OGp3atgBDP9T3+hqcjlRd7daqR/KyJUcn/E83El+xEClUinEDxEz\nEhk03o2ijdZ1rj0RzDhT5f3p8oiWkPe1T1a3DmO/0W4K73TQ6xnFYZ+k6+SSyJkz1CSlWKbj4+OU\n/RoXU7/fT3ViAfZJ74uW1wVAcXh4qFKppAcPHujjjz+WJH311Vfq9/vp+IRarZb6kKzdFxcXGgwG\n+tOf/qTvf//7kqT/+I//ULfb1erqqiqV4nEun3/+uT788MOk2Gu1WmIr6B+Py8HNhpvT28N1uO2m\n02mKTSLbuqSUDbvVaqWxR7sZY+vr6+r1egl8SUouv/n5eT169CixMPR/r9dLR6D4+Ot2u4nFgFX0\nBKS4ttzNwndnZ2dqtVoFd5RUPIMyxxD5vHUm3DckxKNDYMcWFhYSuPNgesZ/Tic68EC3oTOYU8zR\nr776KqXNaDabBeDi9fF4QPSGMwPOWPFZLO7CkpSAqYMU1wvIl/e68edpW2AZeYfr3siuIXvYwMhy\n07bIcmF0U9/oofH+dmPZjWcMWHfd0z+AJdhv0ktQZ89BFw3I2BfULweGousuelsiGxX7LxdLyu+I\nA1iPXafTPuaCG9gRMM0Ct9Er5d/lvFqF9s/85rbclttyW27Lbbktt+W2vLLcCCOV84NH2s9RfbTK\npJfPBZrFRkl/OUaK/ymvYsmcLXHmxe9zRO/Pd1QeGTF3F/q9ntk310be79aCdL2Tx5/p97q7EuvE\nLbMY60OJKD3Wh3uwUN2K4L0wUl5XLM/4TH93jH2Iso8smltApdJ1+oH4XLfA6V8fa96fbpG5tUMf\nYtESB5Qbd24V81xYClg8/77RaGgwGKQz9Zyx6Pf7Bfrft9Xzbqx+2nN5eXU0DNv/+NEAACAASURB\nVLv5Pvvss+RK/Ou//mu9ePFCv/nNb9Rutws7AwlY55iUP/3pT8m19f777+unP/2p5ufntbq6qr29\nvcQQvHjxQqPRSN/5znf0+PFjvfnmm4VUEH4gM+d/IQuez/xxFwW76y4vL1MyTwLDp9OpOp2O7ty5\no5OTk8KzeD5xUCcnJ8lFSX0Zw6PRqBCsPhqN0u5EUkxISnFtHNexsbFRiO1jbFxeXhZciXNzc1pZ\nWUnsoo83DxFgfrubh0SMuNr8iCfGYHQPuWuOQHafF7QNViuyuDGmkLHIYc/I/euvvy4wb5eXlynj\nvVv73h9eN5cD7fTPYLDRXe6+jjrDC0wGzJjHllFc57vegrF5lYsuriuRifFnoveIRYrhFegw+j4y\n3vQFOoHvnYVEH/FsXzdhgmg/qXXQpT5unPmMupV6OKMY174Y8+WydsaKQqD5rF3m0W0X5eaeIR8D\nUR97G+J64mM/d6+XG01/EMGLL/qu+HONz4GK3MLl18RrEVr0d0dXTw6IuMuM4oPBO5L7ZlGE7Jbw\n2IroovJMuV5PlEKObuV6B6HRHeauPRSauy9zz+Td7pLzukXFIhUP8kVZOiBgkYnxaUxKJpaDRV8A\nfOAjU+ofA1wd1MZ20g+4LqPCwN3CIufyceULre71c1eMj3XiMYiT8bgN6sd5ZJ7ZnN2dBNxyFArP\nRG7UEeVDgHe73U4uPOKAPvnkE/34xz/WeDzW48ePC1n2J5OJWq1WYXz+7Gc/kyT97d/+rT7++GP9\n53/+ZwKKjKfV1VV9+eWXeuONN5KL2uPjut1uOryWQ3wlpYznuNsGg0FyeQK+CDp+/vy5SqVSAoTj\n8Vj1el3T6TTFJVFYmDmrkENfpeucR2trayk3Dy466kVAvR/Ci/tpfn5e1WpVw+Ew1ZU6jMdj9Xo9\nHR8fp2eurq7q4OCgoB8cEDHucDF64D/uLOaTZ8PHJco9vquJcToajTQYDFLby+VyOv7JA+l5JmOb\nnX2ckShduUSp997envb29hLIRK68Z25urpA2wtvJLlPGm8vF9RfzEiBExn2+o63+29vvbkZ39cWY\noXgWKz+5+BqKz21AbXSBUpdXHYTsYQQ8N7bHQWHuulyOLdf77qKkvnwenxU3UeXWtigPX/MiqKfN\nubXU18hIPLixG6/1+nBtdB1j1Hhf+3XMj7hevKrcCJCKbBPFhR3LXwJZOSbqVWDAhR0X4Rwb4u/j\nJ7d7y+vkHcDCnrvfF+b4DAdmOXYJ5ULOHJ/AXEPshreT9/tgoj7O0ETZuH+5VCql+JOc7GKOllm7\nHT12ivt8gDs4iLtpnNny4sCFNrkSnjUuXLEDcCi02Vk2f360RL3Qdx7z4XEVPGNhYUGvvfZaWoS6\n3a6WlpbUaDR0cnKS+ky63ogAMGDXG++LQNG/YxFtt9taXFxM+ZlOTk707//+7/rud7+rN998U0+f\nPi0k1uz1eqltZ2dnaXH85JNP9JOf/ETvvfeeHj9+rLfffjvFQVUqFR0eHmp7e1vtdrvQf8T/efwT\niv3s7EyDwSCBneFwmN5XKl3ll1pdXS0YACwcS0tL6Rw+gCjfVatVHR8fp5xcvnAAwFZXV7M7iZBj\nPJSbpKCTydUROLAX0vXuu/Pzc52cnGhhYaEQp8aGkbiQ53YzurHCoj4YDBLYQg4AeYCGz0tnjTkG\nB7l4igKPTYLRY74BtABEMOCMcQ+oJz6M+DL6h/vo8/F4nOLy+M5jhdyoof6VSiUlLfVzJt34cbnB\nTCMjZ8f8sxyz5HVx/YGs/FoK8qbdUcc7iIk6aRZ7ghxgbCIRwLNyejGuaaVSqXB+I3PBUw/4vdwT\nmUnaHdcSH2+lUqkQExfzlEXZ+H0RSHG950rz+/w53r/kP0PWcY3i+d6W3HiI5cYYqcgeOYiKSJXv\nvTFxh0ZO4H5fpPykIkqPwMgXzdyk4v/oTpo1ESJlGJkcf0cESx7E7c90Cw2ZudWAAnXZxOKDLLJr\n3o4I0FxOEehwTw7J85mDPs9cHCcoLgyXQ2RYfAL44M/JiHpGSjdn6VLf2CYOZEbGUtGV6u2Myozn\nO5CiXXNzVwcZLy8vJ3dSr9fTcDhM6Qg8WauzOsjJZSEp5aSKliesG4kw/Qy3fr+v3/zmN3r48GEh\ne3mlUklsCu0FxNRqNf33f/+3fvjDH+r4+FiHh4cJnJE4czQaqV6va39/P7Xv4uIiBYS7K0u6OhMP\nppSFF7AwnU5TmgG38gFjyARZT6fTAmDl3cfHx2q32wXGl0Dx8/PzQjbxvb099fv9BOaRnXQF6AeD\ngS4vL5PLlPd5biUysR8fH6f6EhhPADjtx40Ia+JuL85J8/HjDDvti/O1Wq0WNg9wuC0yLZVKaVcl\n6Upon+sBZLOysiLpWn+Xy2Xt7+/rD3/4Qxo3tI/rfBfwwsJC4aBiX8Cr1WoBJLixCVij3cvLy6lf\nB4NB2gzgoIPCAuqbiCjMcXeTIZtZBhj/+4acyMg7s+X9BLtHHzpzGg11b4Pry6gzI8ng/eZrIHKh\nbgSfA6Zc3r4zm3t97UA+zvTzXW7N8Hrm5OUGfJS3h7FED5DLgH7MuSf5cfZzFlDKAdJYbhxI5cAS\nf3tnOCDwweyf+//8nXtvfNer6ujv5D63TCMAis/09kj55F6vqrtfF60MR+xOO8f7uNcn8asKiN2f\nH4GGKw1KHNi5+jJR3RqIC0DOEgIkxFQFOTDF/65I+d9lG60l3s3kcmXlcgFE+ER010fsa28/MmH3\nFIX6AZhY7LDkWVA90Sk5lIg9OT4+LgAN2k1eJAABixPt8ASZsGylUknffvttSnopSQcHBwUL2McT\n7Ngf//hH7ezs6IsvvkgxSZIKC/Pl5WViQVjM6ANneZrNpvr9viaTSXL9eU4n3FbT6ZXrDIArXadV\n6Pf7aRcdfQWw63Q6mpub0+rqagGc8myMHZi1w8PDNKbYbk992IXqiw8A6uLiIoFBmEXGiOeBg7Vi\nYQNkYnS4BQ+w4jPfpQYAcZemg3Zip2JsGCwPdcoBCk5emE6nWl5eTiDIgfrjx491cHDwkoXP+Ped\ngs6kkXrAY6Y8/YbPRXdn0U/xIGf6zRdzxgJpMVy3M58B5jD9tG8Wy4PB6u2Nxrwv7BTkgpspMln+\nd3wvYyKn1328R1adfvb1wvUVLmQ+c93H/25oevt4ro8FXI8w57MMa36cdXNZ5tbVSJrEEtdQ3OAQ\nDuVyuTAOkRVj1Q2uWR4Myo259tx95MU7MS5IdFSOvXFGK4KzHGp1cBDBEt9Ht5a/j9/eDlcW/uP3\n+n2UHADJsVk5dsMRtS/s0rVF6otELlbC2+PFB7bL2SeuMz456nmWjN1SoN5Yq94uf2ccDzEFg/e1\nB3DGieHxVrHdLPTOOEUmh2dFxeegKrKHbgAQW+Z1ZSHAFePsCYkuWeR8PHi+GS+0ATZrbW3tpQWL\n3Ee0mffV6/WUUqDT6SSw8OjRI/3+979PR7y4S4ws5AcHB1pdXdX29rZ++9vfSrpKyPnWW2/pm2++\nSS43cgytr68XkpMuLCzo6Ogo/Q0bA2hyNxT9WKlU1O/3tbOzUwCgk8lEe3t7KpfLKQeU98/p6alW\nVlbU6/VSbNXFxUU6VoO0G34sC0BnMpmoVqsVguFZYKrVqlqtVqprt9tVo9FIwDfGazHWvH8kpWN1\nnFGLqTdgKXEn0vbpdFoINneWi884VodYrnK5rKOjIw2Hw5SGg7pwzXA4TGklGo1Ggammbl988UWB\nOY3jH+AQxzCLpqcxoF78jgHV7sql0KbFxcUCm039y+VyYuP8+VEfuJ7Luboo7gWgfa7P0RU5Q5a+\n8jABCuPU3W2xvr5eevF1ahbYQL+78Ukf0Rf0P2CRseRMofclbfF3+LyL6zHPiW3n8wg+vU2RJYtt\np66+5g0Gg5QPD4OD90W2zcNjcu8p1HfmN7flttyW23JbbsttuS235ZXlxs7aiy45qbjl05Eo1CwI\n0+nKiJBz1rn7cyND5MxERNZ+vTM3zkJFdyT1iCyNF0f9/r+70WL7Irviz3Y6lffCXBDP4Qh7Vp9E\ndsotIH8n/QFl61uTx+NxsuTdx4/FBXPjfeIxYlht7pKIQZ65MRNlwv/4+qPF5+xh7BtKjKOItG9k\nP93diwvMZe50+Xh8fcgorrVy+eoYHZI7eh80Go3CziTq6vFafvTIdDpN5+mdnp5qdXU1WZewVH4+\nn7sEkRf98/z581Tvv/qrv9Jvf/vbxLJQTxiX6XSqJ0+e6P3330+uld3dXdVqNT148CC51VzGjAt2\nE/Jetuefn5+r0+mksYUsSYwJ++IyJXM8FrufUUiMTr1eV61WU6/XK7hw/Pw2d7kgU+arxyzh8iNb\nOi5FSYnt2t3dTQcQM86azWZKIIkrC/YHGUyn03TUj+90JZi8VqsVDl5mZyR95pnDYfjm5ubUaDRe\n0jVY+hzl47GCyI34KM803+/300aCzz//vHDWoOvZ6M733WG5mB2uja50nussD8/2eCTYHK7Bvcwu\n11lsDnojegNiLA/1ZE1wN60XxpE/09/HNTzX3cRRb8d0D9ENi070NcjlzHfI2tcRZwvRYd5+7wNv\nB+uWxyH5dzyLMA3azXPdC8C7WfMdL3gfuPzi+3xNoO0cW3R+fp7YXmSLq9xZOu8Xdznnyo3HSPnC\n5YMxN9h84vgOM3+eDzCnKqNwfeLOKgxOFnJKfPcs12HOt0vxyeH1jT5i6pkDOu5C83bxXQRl8b1O\n10a/eK5OFAeCXi9++0Sb9Z23sVK5znROfXzBQBky6aPvmzrlQBZtdFBHvXLyczrZY6GQl4MlL1zH\nZIwuDL8vN24mk6szBQ8ODrS9va21tTVJV2Ot1+up0Wgkl59nRC+VSqrVappMJoXA4VLpamcZrqle\nr5dinXCF3blzJwVJUwAvvgGA2JMnT57o+PhYDx480N7enjqdTsrbtLS0pOl0qrW1NX311Vc6PDzU\n22+/LUn6/e9/r/39/RR8PhqNkrsQMEgQt8fPICMCyl0J12q1pAhZyPgtXcczLS8v6+LiojAPzs/P\n1e12E9B1fQKoJf5pPB4nQNjpdJJ7lTgij4cCfA4Gg7RxgL5/+vSpJpOrc/iI+aKfqPfCwoLq9Xrh\niJzFxcW06CNj6Tr4m+t8LuB+Qx+6C5b7PcM+4BOg7ptU4qJHigXGLMAWF9XPfvYz/eIXvyjMfQfl\nxKjEkAx307iuBQSib3gm+sLnl8vA527UGeQGI5YoGliMQZcr/Z0zkh0k+G+Kt99dl1IxX5LrYAcS\nHrrgxXVWdI85gHRjNxqOHmLgcyjKk/7MuUEjkeCyQQ/6dZ4LDpDHdRE8x3f5cyjebjeAaF+Mj+O5\nfvIHxgt9HGNdGaezyo3FSDnAkfTS5PJdbc5QRLTI4HOrZ5aCiXXIoVj/n/f5/17HaAnwfbw23h9B\npC+yEUhxr6NyZ8vi5z4QmdBY/R6zwOSPu7n83T4ZKLQ5go/c79inKAQGpstnfn5e9Xo9WYIocBZP\nHyteFwdC8XsmDXLwAEhn7mLxseH9y+TKxR9g5ZfL17lwYEpcJihNB7T47OkPtxKr1aomk6st9eQu\n8tgy4og8jsfL6uqqer1eSnYpXSmyUqmUzoaL89APl4bNka62+B8cHKjT6ejh/9nRxzEg7XZbo9FI\nKysr+uijj/SrX/0qtYHz8J49e6aFhYV0jIyktHWfmJxoffti1+l0EgAh2Ju+JUkodWUbPfmLYGZ4\nJgqzXC6nnY2SEgBhwRsMBinHFs+o1WppPLH7ENZoOp2q2WymYzh4X7/f1+uvv54OknZjDPah2WwW\nFmr6pNVqvcRQXlxcJHYIoOnjGz2ZM8zYRQeocJBFDBvv8DkOw8kBxC5TjIb/+Z//0Wg0KuSDijFR\ncS57ey8vLxOQnk6vg+jdoGQsejwl7ZKUGCe/L74PMO9B+sjW3+9AivfHXcC0ywEo8o5y8A0RPCN6\nYCgOUKKxHAO6I5Ci/own5oV/F4sDCNoZGfrIzOWKr7sRADrL5aQJ+jKuHb62u072z+Oa64SLvw95\nU38/gzC3WzIG2Odklto185v/H8sslia6Lf5v7nEw4f/783Lo2d/B9/7OCILid3TmLKZo1jt8Usd3\n8LzYabPAoE+sOJGcYp1OrwPzInjMBWo7xctzc4MxFpdLtIAdFMf6475hMfBJSpbvyETmZBHZTVeC\nUlFhRyvPxwoTOwaMswPF5e7bwwH8OWofWXhQPbvTYHVYAE9PT3V4eChJKdXAwsKCTk5OCophaWkp\nHUrKOIQFKZVKaZFst9sFWhowNhqN0s4uZMN5egCTqIiazaZGo5GePHmihw8f6sGDB5KUDk4eDAZa\nWVnR22+/rW+++UaS9NZbb6W+63a7Oj09TYxbo9HQs2fPtLi4qFqtVkgQiTtrY2ND3W63YD2zm+/O\nnTtqNpvprEHaAWODK40M5rSBnEjIzXNeAebn5uaSK4Ax0mw21W63dXp6qmq1mvrZF3wHh9x37969\n5L4k672kJGvqOBqNCocrw4wRkM9iT9A4bfCAfT5nUwGg2cegu0ApjDV3+zDWAa4LCwvJJeb90Ww2\ndXh4qMePH6cx4uwK4zsyHa6/+d4Xa8aNt5P61Gq1wmLnQIh5mtsUIqmQnsJ3s+JKY065Tndg5oCX\n+vE+13+uRzC2XP+7PGIoRFyrouHphjLGAe2gzrTH74061HeuUWi760fX0dLLa6n/xA06sR8ovuZ5\nyAxj0Osxi5HzZ6MDeJ6z2xEgwyzzGc/if5dnlH8sN8pIRQTti3kEKJ5QMqJzR/VxMeV+f7dUzBvh\nCJdrIsKN9fe/fWDm6sX7/PscUONZkcl5VT1dls66xIGNBeM+aWepooKL7Y1AA/lGpcgPLJJbH7CM\nDMw4cZ3idUXkTEUcN3Ey8z/vyQFwv86vl/QSUPLfcTcKypFrsHLoo5gIkd9RwRPDtLy8nNg42kuu\noqWlpcRmeH14Xr1eTwkfKQCtxcXFwjZ3j+/y3VaSkiXPmIFpkVQAH8PhUM+fP0+AaHt7W71eT51O\nR5PJRFtbW3r06JEkpV1xx8fHWl9fTwf1Ikd2WE2n05RLSbpypeFOK5fL6RgS6frIEel6dxdJO6Vi\nhnrkRjuWl5c1NzdXcDGwQMMI7uzs6PDwsJCvaTKZJFchYxR537t3TxcXFzo4OEhuWMBwq9VKQI++\no54wYIyZxcXFl2SDm1K6XgzcZc11PtaY39F16ZY+xwj5PHT3ui/OGA3Ulfgqxs7S0pJevHihwWCQ\n2Dj6h3Ea89rxXPoqzlfXa/QRdT07O0vPR67OWJCtHpYwB+oODw/VbrfTfaREmMWA+5x3wwRWmLa6\n3nM9lFvzaGduIWc9jADJgUJu115kI70f/bn0ZXRReh187Md1x3VnjMHiWg+HcD3Dc2gPrlpfj3yM\neHtyxriDb9fhs3atOyj2eqObfU3PvTeWGwVS0svAwhfZiJzpeO+UyP64cLhnFqJ06yKHeON1uef5\nPQ4aaENEuLHjve0OinJsGiXe6+62Wf5ip/ula4WCTzwCy1z7JBUUBrLwNmHFovy8Pq5QXKnEtpbL\n1/k9PKgWl5wHHEcLNgJQ6Gh3Fc9iF70OswAY10RXabw39hvuPMaO5yDCxbK+vp6sfZdNuVxO8Svu\nlkRJ+3EkvtUX8BHbPD8/r+Xl5bQVfG7u+pR7z8nD4k+fE6sjKQUj4xJ78eKFHv4fd9/nn3+uWq2m\nra0tSUrs1tramobDYXo27WPx6Xa7arfbhQBnWB+UI+0hfgSgSYA0rkaAFnpkNBolgFIqldK5hZXK\nVdZ1Tz2Aa+ji4iJtoZeuWBfG9NLSUkq5gNx3d3eTpdvpdNJ4gz1ifHKeHfcRXwWrR/txazK2PTga\nwJ0zPAEC5ATyBdENEs73gxEtlUopEJeUGFzP+ML1x5jwTPPkjiII3ecHLEXUP4yBuB7wd2RXuI/4\nQOLGXJ8AfsnHxoYE5Mj1zBt/B6DIA6f9Pmf6vS+8vq7znIVDJ3i6BF/0ud7/dtbvVSW6cX1ceCxf\nBHSxHyJIieuJ3xuD2ykuG/6OOtbf5zJ2Wfi6lGP4Zxm4Odl4/WcZ2G7Ax/v/Uh/cpj+4LbflttyW\n23Jbbstt+V+WG4uRihRg7nv/3691BBwtAK7nN6g3RynPctlFZO4WHc/IMQ/+/FdRuJGFi6xW/H9W\nPR3pe5yB3xfp8ZjUTSpSntJ1nEguWN/jAairU+peR7doYnxEdG1GOtzrQrwKrI7Lm+udiaFNLsP4\nTGfgIqsWrSQKrAL3ujylomXq7Yp18DgX6SpOqFwuJwqeQGLpiiHpdrtpezwuFuk6Lqler2swGKhU\nKqX4GjKnkxLB5w9xVT42fEyen5+nw2vZvYcs2NVG3BZsxvHxsb744gu99dZbunv3rnZ3dwtuz8Fg\noI2NjXRWn8sb90ulUinErszNzenOnTuJ/SFRpnTFkBwcHKRjSk5PT3V8fJzOW1tZWUmuoFLp2o1F\nXxADc3x8rGfPniWmq1qtpizya2trevbsWRrDsBvNZlOl0tUByeyE3N3dVaVSUa1W08HBgZrNZpqL\nsAqkYBgOhymRJzLe399PrkN2xhGAz7Eyk8kkMXkwh6QkINaKvvedZLEMh8Pk9pyfny88s16vp2By\nYsXoQ092yjzgiJjpdKovv/xStVpNm5ubOjw8LMjb52rOZebzJbIgzhI4kxePyYl6kp2TMX4M5oln\nxOOg2ADAM/jt9Y6bVKKXJecujQHV3p7IgsNGodNzCUFzepDfjDnWLpexrwHTaXFnZgyr8DahL5yd\n4ztfE3O6nXti7JS33XUy7BQyiOuxe314D/3L5zw3rmGwgjG9Q9zkEduRwyqUG0t/kBO4lE8JHxdo\n/yx2dBRu7HAvs1x+sZ6z6h1BgdcdheG+aQc2sU1xEsY2zHI3eT1iQWl4YHW81mUQwcirZDDr3T7Z\nAAVeUAg5ypjnRXed74gjZkq6jhXwieiBkF43fPDIxXeLvKrNsW+4z92ptIvJCfij/rwTdwlxRij3\no6OjlFH78PCwENAJGADE+JEZAEtkRDslFVIhAHwBZ6SUYIz6zqVKpZIWUwLPARk8x2NS6Ceu/f3v\nf6979+5pZ2cnZSiv1WoppmZlZaUQW3V+fq5+v1/Y0YSbbXFxUcvLyzo5OVGtVtP8/HzKFs691J8+\nAdiUSiX1+/10BpvH81xeXqbnHh8fpx2R9DeB7wA4z09EsHmv11O73U47+o6OjrS9va2Dg4O0K47+\ndfe5dH3AMeOi3++nLOIARklpiz7uW8Y8fS9dxZEdHx8nN5F0DVYYk65PAP+lUinlEqMsLS1pcXEx\n7QScTK6znrN7FODNzjyeu7+/n0DmwsKCms1mQd4s7tQvtzDGOQyI8LUBGY5Go/RMP42AMcz4xMWb\nc+e7PJApoQmz1iDXGf48gGckAZgjtNHbE3VUDNlAl8Y1g7lLmzyujXul61MafDMJc9/1ZE429LOD\nWIwP1x08YxaYQu/52aouHzd4S6XrHGLueozuthgDhZy5L24q8uczT2IcmceMSUU3I3L0I71iufFd\ne7kYmhzQ8EGSA1ZxMEjF42Z4bw7A5CZNrEeOaZrFhOU+i+xQrDuK3n3Ksc4RgLjcpCJ74pOWCeug\nwLct89utowho/F0OyqJf2weoD+r43BjMx+TOAVvui4faslA5WM69i3fk2pHbXejxBV6wVD0Y0Rkw\nt54cTPM9DAJt9P7qdrvJqq7X64WdRDADsBi8k/iQ4XCYApy9eAoDrHDkxgISGTg/Uw2FBcvD4ghj\n5TE7zWZTrVZLe3t7+vbbb/Xw4cOUNwo5Hx0d6c6dO5pOpwlkNRqNFFNDu/xYknhsCjFJT548SXEy\nx8fHiWEBEMG29Pv9FMDuCnY8Hqvf7yd2DmA3nU61sbGRFO7y8nLqC1IUDAaDtEvtyZMnkqQ33nij\nwJDBMHl/cTizB6n7FmxA6+rqqiSlg3cZF6VSKQGbwWCg4XCY4s88ZQjy9nnlgMkXu+l0moAymzro\nU98VR86u6fR6QwBsmHR1DiPzghgq2k/us5gnSCoaNdTLWXVf1H2BZnx6OoE4/svl8kvj1AOvc7tr\nHfR43zkwoES97LtnfQ2Kuj+uVQ4wYn5B5BO9JP5/9B44cZALnAasxu9harwdvivS5RMBLjokgqUY\nfO/P4p2+frmcATye+oj7y+VyykkWAZEHlOc2RLFueeyYfxeJEq6fRVhINwSkfED5YPTspt5RPtii\nK4LveUZOaLmJwSBx1E+JFsmsxTX3zFyd+Y6BkVv43Q3i9zkzw4DJWRBu8fn7Hb37wIkZdiN17O/1\nz93ag32h0Gc+MWPwJIrFJw3PdUsnBhACpFx2pE2Iz6H+uKV8h5P3xSwFFUEz32PNeQCoL5al0nUG\n7cjuucUWz/9ikrKzzHeYtdttnZ+fq9frpbP43K0wnU7T585yEczurtWYXLFcLqcdgoA0gqt5lp9h\nxnNwk7HFXrrOA4b1xvcUdtTt7+8nNoiyurpaOBDZmRUyfff7fTUajUKeKNyWa2tr2tvb02g0Su8E\ngJEtfW1tLcl0Op2q0+no6OhIlUpF7XY7MVmA00qlklg5ZONZ0BcXF/X48WO9/vrrqR3or6dPnxaY\nw/H46gBlEoE6eDk9PU1gD8DR6XTSdw52XMmPx+Pkfp2fn9fZ2VkK/G82m7q4uEjnM/pi5fmV4nZ2\nUkq02+0EopA3Z+fV63UtLi6q3W6r3+/r008/lSQ9ffpUz549S2kqpGvwS9JXNy4cUPlGDN/pyvcU\nZ914Rr/fV6vVSrnCuMfnsB+ejewAhQ5Ambe+9Z7ncP6gj1Ha4GsV8nLGh7ERQygim+5sHC5v9JED\npZgMljEdjTvXUw4OnFVyhg1DEcDuxp7rwmggUz+u9b+p19LSUpbJiyEYvtHC1wNfg319cIDsz6Rv\nc4QMsvNkrD7O4noRwXGu3NiuvRh/49RaZIn4zF0KFAZhrpGRaswxHfx4TwFGhwAAIABJREFUR8V3\n5wCa05l+X66O/h1/++/YbkmFTowskN/Pfb4TJL4j0pY8n8U0x9ZE+cUyC517O3KTfTKZFNiOXF08\nRoH/fZK7dcRCkdsGTHGFJb28y8Pfx3UoPt8lyL2ePdwXfe5nl5gr92jJuhUFyCHvj7sbut1uco0N\nBoPEiElKO9V8YWLxcmq8VCoVGAkSV6JM/H5yLF1cXOjk5ERnZ2cp7oodZvV6PcU6UZfDw0MtLS2l\npJIcaispHajL4gu44btWq5UWpnq9ntxYzWYzKXRX8NIVqCGX03Q6LWyHR8bsIptMJtrY2Egs2MXF\nhV68eKHz83Otrq4mpklSyhE1Nzenfr9fAFKeFPWbb77Rd77znSRn4qI6nU7qI9/tNxqN0vOk6wXX\nE46i5AFEDnrYoYdsAHMcanx4eJjaT16ti4sLDYfDgpFUqVzn1mGhhHGEjQCcEQ/H+K7X68kQkKS9\nvT39+c9/liQ9e/ZMz58/T/mmfDHF1c0C7s/w+cxY9QU5lriwMzY855VUdEMBUmmjszCeANS/5x2u\nC6NR6+CE/6MBFY1b7wv+9/UuhoJ4mhreDwvpMo4pGZwl8ne6Low6nOc7U+3tYc7yXGfbnY13efI/\nOjwayf5cYta4z+vlhjfPAtC5256x68asy5R+zK0Vvta7fo6pgnLlxoCUlA/qll52wVF8MMRANWee\nfAI6rejvcnYiMkQ+SF4lcO5zVOvM0V8qESjG9nhdIrijDdEycBk6tT8LVTt1THGmyQGdt93bn6Pp\nY3ELgwU8WiOU8/Pzgq88x+xJxVPX/cfllusLlDkTKvYhn3m8hLfP7/dxxP/R2vbvqXe04ACYABz/\nDkajWq0mECopATVXwrS/Xq8ni5Y2AQhZSFhk6vV6Yoj6/b5WVlbUbrc1nU7V7XZTX5DLqtlsanl5\nuZBpfHFxUXt7e0lxHh4eJtfe3bt39cUXX2h+fl6tVist3LSPWKbT09NCP3nQdMwWPp1OdXx8rHff\nfVenp6daWFhIrI90tbAzH8mvhdwGg4F6vZ6azabK5bJqtVqKS5pMJin/0NnZWSGZ6fn5uZrNprrd\nrjY3N1Uul9M5hH5kCcCQPhwMBhoMBqlPPE8YdSiVrpOpspisrKxoeXk5sYPOZLGwDodD7e7u6uTk\nJIE0smkfHx8ng4B+Ylx2u90CK8m4gMXsdruFc/+Wl5eTW5MA7idPnujZs2eSpK+//lrHx8eSrty1\njD36Siq6jaNedcPKmQc3Lp0d9zAAnxO0D9kDUnOMCnJ3vepMdvR8wFTA2ObcgrQ3gkBAkbuI0ANu\nOPmz/HnRoKUNyCgaks6qeH24Ltc+13PIis+cTYt1hX0GmDtrjj5wDwhAy2N3+T0LrPjnrl+5h3oC\n2kql0kuhILTNvSr+3auAlMd45cpt+oPbcltuy225LbflttyW/2W5EUYK1iYyKJE5ir7yyAL4fc7k\n5Fx/0cXiSJT6OOvi90dGJJacHzbXPv6HcvZ7nIVw+pPngaKjdUWJMTsxritHc0pK1rFb0FCZzvL4\nu5z6zvnpvR/cEuJ6WKbY97zLGTK/nrbkfO3RWsj1U2SW3MqLTJ67Jn3sRKvT6X7+90Nn3dqNY84t\nday5paWlAo1PQOVwOEzuMncJnp+fF8a0M4icNUe9vK1xhxdpDIbDoY6OjlQqlXTv3j11u920Uw73\nI4k5t7e3U3vW1ta0ubmpr7/+Ol375Zdfpu8fPnyow8PDFP/jcsC9R0B4ZJ5pC+ySdMXytNttDQYD\nVSoVra6uajqdpucPBgPt7OxoMrlOsuhM3ng8TiwLLkTpipFrNpvpuJrpdJrYHDK3r6+vq1wu69NP\nP02xVbANGxsbmpub097eXiHJKQlQOdAYeRMsfXl5qV6vp4uLi/RMguVxKzSbzUKKA5glSVpfX08u\n2J2dHX3zzTeJKXBXFkyRu0KZTzCc/X5f4/HV0UI+ZkjkCuM1GAzS+2EL6TPfKBB1k3TNMDBXIjtA\niUyLjw1SGLjrkPZ4SISvCc4I5dxMzhbFGBrXMzEkwtch1xMuv+jug5FCfq5r0E/+vwdRo6PcpZlj\nwSLLhHvcw1N812L0rvhuYK6P6yXXS9fZ4WFHeQb3+uYAnulhFc7gezs8VQGyQ4d6HxI3GHWe1zPG\n+VJPLzGeOHqtYrmx9Ac5msxpyjhond6MrjenLv077nFQE104PMMFGRfXXB2je8vb5fWMPu+cO4//\no9uMetLx8TtoTad5o5JAcUSXoVOYuUHigy0CFXff/SWQ6c8DZMQ4ghg46FQ3sSHIMbpgXe5RplER\nOQiKSpD7omsSSt4LSsTdePyNYnAglGtjrg5LS0taW1vTysrKS4dpslvOFwXmideVwnfEL7iSvLy8\nTC6l6ErExYbrp9Fo6M6dO5KUdonNz8+rWq2mLejS1fb3+/fva2NjI7m76OejoyMtLi5qbW0tHdfi\n7QYocpiyu0vn5+dVq9VSfJMbWx988IGm06sjbLrdbiE24969eylA/f79+wXXBGOBYPKtra2Ca8Az\nrzt4q1QqKYfUkydP1Gq1Ul0XFhb09ttv6/DwUE+fPi24KdgNt7CwoO3t7cIiNBgMUgyZL0CSkqsM\nl9vR0VGSHf2Fu3Fubi4BW462ITj+8vIy3YfbHBBFSg3+lpQ2Q3jwPkDq7OwsudIIaOcedE2cL/SX\nAwf/nODpCE5Y6Nyoc/cSICqmHMG1xFxkdx/1lK530cbFGdkChqJbiOvdFUm9GcuzAI0bkjzPQyti\nGINfm3Mj0r4oN+qUixt1d53Xjb+JL5L00noBsESWOYAbQa2fCQjIimttbhMWesANUJeFg+SYQgH5\nx91+Pi6p46xd68jK25aL2aPcCJBy364XX+jcYvfvGfzRj87fcXHyToiD1FmeHLqOFkKuxM53688X\n8pwfexbwiEyctyUu8pIK8UR+ve8yife6bDjV3QdVZG8cAPJ9ZFlif0Vrj+fE2AmXBbFUDGrAA/3n\n8kXeKP7Yxw6iHXSgPHJWcO45XqLypC4XFxfpwF2sLn8Gipd3+nfEBhHg6+OGOBW34KPVyPMiWCLR\nZQShi4uLmkwmqlarqlarKf6G+9bX1zWdTnVycqLDw0NtbGxIumI6zs/P1el0UvyQA8P9/f20oAMo\npKsYqadPn2p1dTXFMnEO3fn5uRYWFtJxNXHss5MNBoTFfnt7OyUcZUz6MTDr6+sajUbqdDqFJKK8\nc319PTFRvjhw7A1xMHt7ey8tYi9evEggjLl3584d7e/v6+uvv9b8/LwajUa6j12OvuuTPvdUErBH\nfDeZTFLKBMCLgwWPkVxeXk5B6hyTQ7vL5XICaHHRp89d5uyymk6nSd5bW1sp0H5ubk7Pnz/Xt99+\nW0gc64Ze9Br4HIxgy3VTLkbGwbPvBHTd5EYqC6TrCWekKBF8OFPO9TFInT6KAIT3UHIMObJ1ubtO\n9vscbM1i62gP7YyB5+icaIi6IRgJBQed/kyuYwz4e6Vr8Ersp7fD11NYRNrq60g0aDFMfSely8lJ\nFwwD2useAl+fXQfnxoIDuly/zio3mpDTBeeMQnR95QZTZJ18MMRreFdOqN7hOSbqL4GF+D11pSMi\nePF2+vu87bENgEe/JrbdFaR0rUxA75EFkl4+P+ovtYvfrkT8ur8kN1dw3hcsckwMduHwnQ/+XB29\nPpGKz4F2d+vNYrH4PE4o6u27jngmu/+cuvadNFyLIoqgrFqtprPMottlFv2NFSldMwnS9YLkIMnr\nNZ1O07l3npiRxXN1dTVlB8d9c3R0pNXVVT18+FDn5+cpaaV0BQYbjYbq9bqOjo4KFvuTJ0+0sbGh\n/f39lG+KOvf7/SSzo6MjVavV5KKinwhO9vY1Gg0dHx+n3Uunp6eFvEeekZ30AsiEnFGNRkOLi4s6\nOTnR5uZmeifzgZ2JjKl2u62jo6NkXVer1fTM/f19dTod7ezspN1HAJpS6Srj/GQySekTkDeygAHi\nsN04xp3RpX9xP7G1HBat1WolNyRnEXqoAAA6MiTMGZiDfr+f+tDPLWRe+Fj3TRi8L+fSY5FzfcOC\njqEUd58xD6LrKRq/OSOTOZkzsHie3xeZKN9oMZ0WdxtG0J9ruzPcABpf8+LGKC/oPndVcp/rxOgO\nQ0dQF1KyIA8HQQ5OMbwBRQ6IeP/i4mJqkwMR5rsf4Iz80PvIyF2Jrr+jjo8g2ccw7fVdlnyGfnOg\n5f1OX/hvD0GJfeN9MKvcaPoDKZ+cK9KZjlgpzjRxbw64IGyu8XdxTWRXfEJQfDJEEBffGRWT9HK8\nTGRWqAcd5laGA6bcJOUnunpoAxMkKk2UjLt2vF3ezthfOTbJnxFp2UgZu/sO8MQzPWmey4G2e2oE\nnxA5+tstsRxwncU00g9eF1eE9JUzOa7sooJ21ysLIDuwAFXr6+t64403tL6+nurqOxMja8Nht6QT\n8LiUSG/7mMKdwnURLJbLV0enNBoNbW1tpQSRz54908HBgdrttpaWljQcDhPImk6nGgwGarVaWllZ\nSRmupavdZ3t7ezo8PNTe3l7haBVPM7C4uFjY8eOU/+XlpQaDQTqS5PT0VPfu3dP8/LyePHmi7e1t\njUajBCbm5q6OV2k0GmmnndeHfEcsHN7HAB2+Y6EiLQI5s0qlUooJq1arunfvns7OzhKIoQ/b7bYu\nLy9TXJK7t9F3MG++MJRK11nbkTHfjUYj1Wq1lJ9rd3c3AdDT01O9ePEipeAYj8eFQ6JhTJlTOWaF\ndzmT5Qu4J22kPrTB3VWMMfQkfZljA3yR5XPACzu//D76iXnncpvl9YgsSGTAoqsoMkNeT2f/3ZCL\nxm7Uy67TXDY59xzxaPzv33G9h3fwnR/6LhWPtHFQ5M/FMPA56O+gnbwvxlnSB143xpqzRN4XDvD8\nN/L1dcRBDr9ZA/jOCQPGm6/B3t4IYn0cxnq+yq0n3XCMlAtHyi/S/O8LofQy2+RsQRzgkYWKdWBg\nxIWYEpE59faO9Pf5AMm5seKE4V5AD+4G6eWtns5kAECov8fm0C6PDfEFwwdTjrqMiiNaVTw3IvUo\nC6flkQvWq7s3PLeUTwwHq5EN4kw0+sLb4vWgbx2gM54iaMTH7+3zc7pcBg7+vF1cc3l5mRYiLDIH\nMCxu6+vrWltb0927d9VqtQpxOTArvM+34RKYPD8/n7b3u9XmBoQrTrJrw2R4WgHaDuDb3d1NeZSI\nwWExJiGqdJ1p++DgQLVaTffv308Le6PRULvd1u9+9zv95je/0d7ent566y1J10wHBg8uG+pNIkT6\nhe9gVQ4ODnT//n21Wi396le/SvInMJtUDp7G4ezsLKVmGI1Gunv3biFLfrl8fUwNzI10HYD8/7H3\nJr2RJdcZ9psDp5yYHIs1s6pbPbpbguV2t2XBhlcybMswDHjjP+Bfoj9hLQwv7YU2hleGBcmyBQuW\nujWrq4fqanYNJItDzskpM78FvyfyvYeX+gADH8oLBkCQzJv33ogTJ87wnhMnOELm+fPnunbtWppf\n5rXX62XQquFwmHgV+nkZg9FolMoODIfDjHyglpTnW9EvQsHUGdve3pZ0nlvFPLJemF8M1tFolAw6\n52EQqfn5eXU6ndRPakjxHa+6Lk2rsEej0NcNfEe9sLzGPNAfZFrMu8LA8qOY8iIH7nj493y9u0Mb\nnW3+J18MY8EdZl9zUe5LU0PSc6/4ftRR3m/KYWD4eA4X33MHmXdjCLGuvOioy2r0jL/bHVZHcV0f\nRPQvT3/QkLNuYLpsHo1GF4w8aZpbSl2yKBd8fhwsoXnZC3cQ3Dj3vjugEJ9Hvl2e3k79vvTKVbtq\nV+2qXbWrdtWu2lX7re2FhfZiTBQvn8/ycm8us3zdMnUPK3oljjzkvdc9s+jhRE8DT8bvc+jb/6YP\nHhbKS8b0xOIIDXtIzhv9AK3KO/QRjzYvHOf/e5/IwYgoHda6JyV6rk+E7/E2yGVxyNmRFeYh5k9F\n7895hmt58+vzEceK5wV8DO0ZNyEmxu4oHp68lx2ABowbL8w9R57n26qZy3a7nRCMiHKenp6mzQBx\np8l4PNbz58/VaDTUbDbTLi/6zI9D7tI034Px+hhHo1FCB4H5QQnIX2LnXa/XS8jN4eGh2u12Qis+\n/vhjbW5upv4sLy/rzTff1PHxsf77v/87FeV89dVXM4VGHVVrtVqpTADhS0827na7Go1Gaadhv99P\n+U+lUimF+hqNRgZd4UzAcrmcDib2eSwWzw8JJgne5Qhz0Gq1tLy8nEEj2IHHcS7saINGVKSuVCpp\nLsjfAl1yFODo6CjDl9IUkQINJFwI7aVzhJPiqCcnJ6lgKWOgsCqoMHRhbguFggaDgVZXV9OOTXjJ\nQ321Wi2Vm2A9sCvw+Pj4QhK7y9cYwsvLbyI5n2seTnMZGWW1py04isM4kGFRXvBc+gPyTp8IZ0EL\nR5cd4Xcki2f5mqbx/Lx0Ft6Zh9ZEZAu5m7eVn3yhvBMPGKOnD7jcjkg98iymvXjOKYiPo/eMg3fF\nyuesOUeP+Bv9R64f7/PfzluuS/NymgqFQioZAdrL+JiPiEySo5m3EYL2Qiub+0RFZZgXxvN7vTk8\n6kS9LByYd837gjL1PBoPsUVF6VClh/5iHz3c5de9pD2Lwhl/ZmYm1X+Ji9H74QvaG4ImCo4ovKLh\nyt8xXMq9LK7LaBgNKRcAvqsGgXd8fJxykrwPeVuVoRswLcnePhfQLxrP0ZCHhtI09MGzyuVpzR/p\nXLmxUH1buYeX83InPNRBKIZ8H86oQxF7Um0Mx7oQw3De3d3V8vKy5ubmUs6Sh0ViCLZQKKRjYhCc\nGOCEBfr9fjKgyGfqdrva2dnRcDhUt9tVu91OFdEHg0FGIEtKdaSq1apu3Liht956S++++6663a4+\n+OCDNBe/+7u/m+hNjRtoTV6UG48+9kajoXa7rW63q+vXrydDo9VqaTI5r8x+7do1NRqNFE4iVLSx\nsaF2u62zs7M0RsJwGCrk4EjTXXDkt1Wr1Qy/nZycaH19XZ1OJ9WEct6gDpjLDK8eDq9ieKEA4VUP\n73h18Gi0HBwcpPAeBwjDQ7VaLSlMjDCMTww5Qm++K3MwGKTSD5PJRPv7+9ra2krhaY6iwSnyDSMo\nKF93zivufPm698R/1pobC8hiD81JyhgnGL+eM+gyNG+9IpuibMOBzHNm+U7si38HI86dF5+bPBmH\n/I+Gp5Q9DsodTO8jz3TZ5/LOaU5/XIchAwgzuoHiuoA+Rb3ndKNFh93H5M+hb8PhMHNMF9+LBhXP\ni8ZfNMCgqYfMXaZ6eJMxuKGa116IIcXCcYZmwl3x5RlNlz0rzwDLQwz8f97B4nHvjwnweiVSdicB\nfY3GGoT3Fo3GaCzEBFC+S2yYvvg1mD3G+Z0u0JUfZz6PCUevEPrljSV6UZ4/xf9R6UNfvuu5EG6w\n8l0/c8nRPV/4Lizz6O3NFwbNhQ3KjIKYfM4p99K0TIHv4PFFSt9Go+mBsp4AC+JCfhHe/a1bt1St\nVtP/PkZ/ni98+gfNdnd3tbi4mIRrr9dLAjFv+zD5McPhMON50zyHBcTm5OREvV5Pg8EgGfa8n2NQ\nQCZmZ2fT+XW9Xk+7u7va3d3VO++8o69//etJAf/sZz/T+vq6ms1myvXy8/QajYb29/eT0QLK02g0\n1Gg0MsebzMzMpOdKSjWbTk9Ptb6+nnK2QBzhH/f6QbLckIwHLC8uLqZEbwzpo6Mjrays6PPPP9f+\n/r7G43EyzvDIQb9w0uClSqWiTqejk5OTlCMHT/nxJ2dnZ5mjXsi7QslCm263q0qloqWlJQ0GA43H\n43RftVpVv99Psuv27dvp/na7rVu3bqUcqclkkkE52RVZKBS0sLCQ8uLo69nZWeZ4p+goueyK69Ud\nzagXaKenp4nelL2gxXIK8H1EtZAD7uS5HGAMHnHgmTH6gHInh4bPHZHy8UQUzJ04v4f74jvdaHeU\nnmuelI7ecBnPNf73HDeeyz2+s4659PpcHiXxvvF3RIgYX5TD5fK0Ph5zLCmdFdnr9XR0dJThq3K5\nnDn4PebpOu1wJPk8LyGfZ/qZgP48jyxd1l54QU5XRLHD0RCKVn7ec3wSY2jPmxtWIAwIG0dA3GCg\nuVJyJc67Y7jJ3+fM5EzMJMbdML5DAi/UPSAWKko2IjKXIXL+WTRE8CJ4v9M30t7H4EnoGIbc77tB\nCoVCStiVsmfGcTCvJ3g7miVlC62x6NmdRPOQLf/TfGHxf9wCTL88JEphRTd0HBWAlnNzc6pWq7k7\ngkCjVlZW0m44BBhKcTgcZubYlYDzcqfTyQjU58+fJ/QM2hBm8uZnUFH6gO9Qe0qahlxdmZCYPplM\ntLW1lc5a29vbyyjVQmFaXHJtbU3r6+v6/PPP1ev19M477+i9995LY/joo4/0+uuva319PbPz7tat\nW4nH2GVHTSsgen6q1WqmmjjoCYVHZ2dnk0G4vLyc6metra2lZ9AODg6S4u/1eokX6/V6GhPolBsN\njx8/VqfTSYg28gQeps6To06gJZPJ5EJtKuiIMejXUD6TySQV9GRdLC8vq1qtprAeuxelqYIaj8eJ\nV6DL4uKiSqVSQis9SZnDqOFJisYiJ1izzj+OADhq4vLGx+jKj4bB4uuB3474ufJz58vXOPd54nKU\nfW7E+qafKOejPCHsGGU7Y4qVy2PDOOE6eiAqf8bgqQk+bu+r6yEHCfh7OBxmdG4EMByRw/Fyg42G\nER3RLZ4BAkoo3YujuhM/Go0yjgmHbiMTfC4uAyy8FAwFhGNqBjrTZT3Og28K8qjG/0lEKioh6eKO\nvTwDIFrs8XtReUYUKvbBITtnKPcY+a4r6Rg6dI8xD53y/x069cbkgWK4IYXgjQuL+1wRRzpGQ86f\n63TLW1AufBwh4zs+bj5DaeWF9tglBs0Zhx9jUSwWNRgMEvrjyB8C67LcABeQ9Mtj9pE2eB+E86Sp\nUef5OI6UORSN8PP5K5VKqbhiXHiEJ/CSab1eLx0Ey04zlLCjt3hhLqyOj48TD5ydnSWlyMGxfM/7\nipD2XWhueJ+cnKSjZUBnaDMzM/rkk0/08ccfq91up/ltNptaWVlJBwR3u13t7u5KUipU+fLLL6tQ\nKOi73/2uvva1r0mS3n77bf34xz/O5M8xBs/LefLkiVZWVpJBACJWqVTS4byLi4uZStuHh4epLldE\nOqgGHnM2MIjn5uZSuNLzi1Bi9Xpdh4eHicbj8VhHR0eqVquJDhiE5AdijOzt7WVyvQhZYcDAN5VK\nJYUoaV7eASQjOhGEZlutljY2NjQ3N5eKo8JHlUolGVv0BZStVColQ5F1we5Q1ujW1lYy4JgrR2Th\nL+aC9XuZIQEPe9X734YoeJjPQ3fcVywW09qgf1L2yBKfU2kq0+C5mP6BDIGP3Mnk+b9Nf+UZPN4c\nwY7ymXEyFpe/7sBznSgL44sABQ6f6ygv4hlRGZ7jMuiyCI8jWh7xkZQxbAgjM1cuF3mO53rmyWHu\nh0as30JhehwN8oSadfQXR5LnnJ6eamFhQcViMcOHkRZ57YUhUlJ2e6kjNlI+9HvZ/x7LdCMEBssz\nplxxwFS80xPOohGH8OVaREriePIQNg+5eYNB/HMWz2WIHEzkXkikny90f5eUzZ9y2lzGOL/Nq4qG\niqTkwWNYuEfkW7lRNggSlLd7K3nGCc+OxpmP0ekkTQtXYvR55XA8E2jgiA/3AS37O1C4hAJdUDnt\n4MlWq5XGuLKyorW1NXW73XRkB7TEyKMf5JHRH/rEewhDgSwQKot1bwhpoeTceAZRYS5JKB6Px3r0\n6JEePXqk1dVVvfPOO+mZGF+DwSAJS0J70vmxJT/60Y90+/Zt3bp1S//5n/8pSfr617+uW7duqdVq\naTQ6TxynL9TNarVaSfG7x8r5dZw/eHh4mMJpHJ3C2LwcATWYSqVpFXFQJzxnjFQ3vqrVqqrVqkql\nkvb29iQpjRGkgxy6crmcQolSNjG20WgkgY6QL5fLKemeNeDH1BDm8/XN9v88VF6S7t69q1KppKdP\nn6Zx0n8PbRLGrdfrajabqWJ8THyHN7rdrj766CNtb28n3vDk7rhJhfe5Yo/IOTSYn5/PGKfQlnn0\n8wvpG0fe+FzxTi806++Dbi7PovyMCdXRAXaHBqM2bsN3Zw59EeV4RG/ynu/6CXkdoy8xlOp08Ocy\nP25EQn+nNXSg8VnUGc7bbigxNz7XbpxLU+c30gT0i/C56yScAfrAdfrGuNA1rMNqtarhcJjOksT5\n4X0e1o3AQp7u9XZV/uCqXbWrdtWu2lW7alftf9leaGgvxsOxTGNYioZlmBcuk3QBXuX6ZSiKow6e\nyItHGa11aZpwnLc7w3dm0A+3onkm34khNA/rQReS/AjNxHi3NPXOYsIl93uOgickYrF7aMPv9/F7\nWMjzFfx7oB6Mzz0sL3LJM+KuGI7RIL4tKW0/d8g5eoqEqEBhoI3nUPl8eg4KeVuOnPnWX0/mhWY+\nB5E/Yq6df+4e29zcXPKUlpaWtLS0lLbF+8G+HrpkDhw98d03Hgahyvh4PE4Jm3yX890Ijzl0DX3J\n83Ke+eEPf6jHjx9rc3NT9+/f13g8TqharVbTvXv3NB6Ptb29rQ8//DAViGw0Grp586YODw/161//\nWp1OR6+//rqk8519GxsbOjw8VLfbVbPZ1N27dyWdo1i9Xi+Ny+F2UEOOueEoHBALwqP9fj/xFfNP\nAje5dRzNAo3L5bIGg4G63a7m5uZ08+ZNSeeIzWg0SiECCpTS13K5nBK8fU15Mj+8xPomz4jQ7tHR\nUSZ3rlKppBBj3GzgoSA/mqNSqWh1dVXHx8c6PDzMhGZBNiaTSSoNQa4UoT7CIn5eoKMGBwcHevbs\nWQZ54JxDD+nAUxQP5Xnxe47Qe0jQkVb+j6gL93jyM5EJ1qnv+IKXyOmKyc+g1NDfdYK3y1D5PJkY\n9VhMGne9wXOZ7zw94zoNGefj9wOkY46rz2NMRaGhZxw5Qg64TPMTeIfZAAAgAElEQVQUiUKhkNn4\nwXPJDeNdcTccz/Z0DX5Dk5mZmUyJEs+Roq8+DsYdU2FYn41GQwcHB5mdxsgEohuMA3qTunFZeyGG\nFMrpMqOD/z38xvUYMosQZzQAYj6T3wdjeE6P95GJcKPHq0DHfvLMvFCfK3ZPTqTPl+UnMR5PRI2L\nzePgzuRudPhhm/78crmcyTuBVozDv8dvTxr3PhMKYes4Y3aa+o8rG3YkUaPJc0HcQPSQKELZ87Hc\ncKFPvguHZ7oBzmL1a7QoZKELMLXTF8FLnonPh4doKYWAUV6pVNJxHhhTHlpzge+hamBtpy3t6OhI\ntVpNMzMz6cw4F3RUDC+Xy5mzuNhtt7KykujKmXGtVksvv/xyygH65JNPUj//+q//Wt/85jc1mUz0\n7W9/Wz/72c/S+7a2trSxsaHr169rdnZWn376aXrf6uqqms2marVaypnAEF1YWEjG7vHxsRYXF1MY\najAYqNPppHDY8+fPUyiAeTs5OVG1WtXCwoKePHmSqpC7oGXnWwwZd7tdNRoN3blzJ/EpBomH3uAp\ncutI0vZdqXNzc+mYGkK3zl9UT2fsbiwQNiL07ZtAMIow0n2LOs5Ds9nMhDAItUwmE62urqaDpKEp\n/YG/WWvwt6Rk9B4dHaXnDgaD5MwQBvPQPXK2UqmoXq9nTifAWYoymudgXGHgMYe+/d/5P4bnvOQB\nOoS16u9zY47m8pK1F0M9Hg5i3vLe50Yj11y+eGM80RBivA4OxGcQBsbZyDNIXZZAN095iLlVPge8\nz/O5XPZQwZ5rrudxeumnb0iIvzGS3NihLxFEcAfa6/ThFDLuYrGYSs8cHR2lXansRqY/MfE/LyTr\n7YWVP5CmsWU+c2MoWuBuIPl33cJ2tOeyez0eDaFhnFj+wBcgzyQJ0S3heK6Rx8TdMCKZj8mPcWhf\ntO4JOnLkAt8RIJ7jaBFjcis7Wu4umJyJIxoXc6rcKHNDB6HIFnNPOK1UKpn6Hb6gyI8gNyXW0eIZ\n9IexkBzoBp40PYeLz/0QW77vgtvH54LGBS3KiyRs956ZGwS078LkXkfEXCnym+e6kVmpVDJCGCEq\nTTdFYJi5cVgul7W9vZ05FobGdmKEjOcPNRoNXb9+PR0KPBwO9cUXX0ialkTo9/s6OjpSp9NJ1+7d\nu6evfe1r+uyzz/Rf//VfevDgQSYBFPRnc3NTZ2dn6Yy6L774QpubmwkB4+gVaXq4cKfTUbVa1eHh\nYernwsKCer1eMmYWFha0vLycFDv8B/38oGTnW+bX+ypJL730ktbX17W7u5sM3mLxPHEVXn769Gmi\n69raWkIbG41GJgF8PB6r1WppZmZGzWYzs2YpU0GJh9FoenSS57dAQ+dv1kChUMjk0LCearWaBoOB\nFhYWUu7c4eFhQhpJuPdSIyA4c3NzCZVi7unHRx99pF//+tfJGJfOdwqWy2W1Wq0L5T0iCu3rwvO/\nkN+OxtMvvutyEdnA+o1OKHI9yjzkjxtqPB/+QH66QeDvdkXrxmNMRI9Ik6N4bpAgV6MT50rckRzf\nwYlOcfni80g0g/FHnRbpTT5qdL64lmdw4pgwFjfc3RjyvkAXdKkbVjEq5QBFdCjcWIJezCEODs8g\nlxEnAzS20+loMBio3W4nECCijpcZvdILMqQQbl4dGqXmFnQ0iPw3zY2qy8J3Pil5VqUbINLUyMqD\nQ52JokdAX9x7YUF5COUyWNhDV+4J8ByMg7goHLXxcTBmxuf9QcAgkECSpIuwebT46SOM70JiPB6n\nitNUUJay9bAYX0wM59BdR8mkqeERz4byMGM81JR+etjTFx8L2o0f5oDfEYJGgLK7LhpZ0FVSQpbc\nS2auKpVKpj6V0xtjMHrJrJXRaJRJoHXUz2FsSaleEPzoyCOKhERdlB51jIDSt7a2MrA6zxiPx2q3\n2/r4448lSf/wD/+g999/X9vb2zo8PLwAxX/yySfa3t7WX/3VX+nOnTsp7Lezs5MMZ3amgqy4MUsi\n/r179yRNE0cRsLVaLe06o62traWk8MXFxTSPviOS/32HIyjZw4cPNRwOExK1vb2dDKnBYJCSsuE/\naludnJxobW3tgsFPmNWVQqVS0XA4zChGxo+xzvNdLrC5wQ0FD+d7EV+MJp7dbreTMePoAcU74Tl3\nYpCjn3/+uZ48eaLZ2Vldv35dd+7ckTQ9KJn3sUNXUtrMwRp1p4KxoUzdWMI4gM99rKyZPAeTPvj9\n0fHOQ3s8zCZdlKWuk1wnuEEQ54JrbuRFHYSh5P30Z0QjC3q5LnFjxdd1XuiL70cnAoTb6Ro3C3na\nRtRjbvC50wK6Sd9iaR2fozz9nocGuc73uYevmV93LpGrDqLQz8XFRZ2dnWlvby85kI6AeXQpr70Q\nQworGQUhZb3DSNQ8WNIVTbRw/buOcknTxQETQtTotdBQwu5d5oWrGENcsL7wfBE5IzIekB9nHGcY\nqj5HONQ9qZjP44apI1NUTHakxoWU09f74yGFuPBR9qAEvssKo8qVZJxff5eHEjGGIqrGIuH7cUGB\nHlLMLQpI6O4GJgsQNAuEjO9HWrkwdto42sd15/E4RvjBjQefY//tUHepVEo09R0yGKSE7bygHcq7\n0WioUqmo2WxmtrljdJ+dnen58+cJIVpfX9f9+/f19OlTPXjwQF988UVCqbrdrr773e9mlCdCqtfr\naX19XX/yJ3+ibrer5eVl3b9/X9J5uHB7ezsTDnNjYX5+XoPBQKenp1pdXU3ywncyIRN6vV4y9Kgb\nhWEKX0rnQhM0HBkEbywuLmp1dVUffvihtra2tLe3l4wxPHz3bL2AYK/X0/b2tubn53Xt2jXdvn1b\nklKxVZDGWq2WUSz0BYONNcOBxI4MOArhvO/PPDs7SzV0vCSH8zwGOAcsQzPCUp6GQD/ho8XFRb31\n1lsqFosp7Ht4eJhqXVFSwQ0RR4fc+I9r3sfqDnUeCkLzQ5kZY8xDik65p1e4PPdr0RH250MTb76m\naY42RV2U56y7Lotz7LqFZxP6d0cRmmBQudz3vkJj+kzJC0nJUXCZS6Qhponk6U9kH+MnxcKjHXnp\nON6vmEfnY/P38CNNkTGMwIjSQ2MPl/uzrl27poWFBbVarVRKBRpFw9rbCwvtIQTpHLA4CEqeZR5D\nTnzGdy6D3pyJnInzlC/XWHwwsDOvCwKfKDxNlJ0vYO7xYmSeK+EG1mUT5go3b4x+rxsXLogiIsU9\nk8kko6QQYpFGLtjwIFGAIFInJycJteFeknqHw2E6Sd5hW0e3HCEhbIWX7bzB3EAT7qFRP+gyHuCd\nk8k0eRyli6HpwtJp6CFO5wueQf/8fkcb3SCiinS/38945v5OTwz1CuhOtyjs+RkOh6rX68kgWF9f\n19LSkubm5rS8vKzV1dX0TN5Nrlqr1UrXqIL9+PFjHR8fq1arZbbv00/6Tu2ir33ta/rnf/5nXb9+\nXd/61rf0wx/+MOUr7e3taXt7W2tra6rVahnDnjPkMIYoh0A/q9Vq5oy9vb29JPxWVlbU7/e1vr6e\nBCcGg8P/rAPmaW1tTVtbW/rkk0+0tbWlk5OTBP8fHx9rf38/vcP5u9lspurr7XY7U3/r7t27mpmZ\nUb1e19zcXKb6er/fTwavo42SEj0KhUIyqHi3r+3Z2fMzAb1ulTRF/t2Bm0wmqlQqiWcoc8A1UM9S\n6bxWlssitq5vbGyo1+vp0aNHevjwoSSliu2lUikpYS8O62vC1wUKDKXNPHMfSiwiNr5+ovHiKLEr\nY5rL8+g054X3aK47ohGV1w8pq1Ni//yZ7tzRMHrduPNnIJ9cDvh7MaJ8vNzrBpwbGsgRZDL85jly\n3i9ohV5wB8XfxbWYfM+8eppFpE0e3fjfIz00dBkbHBxVjYac1wpkLeGY8pvNJXnzm/py6ZWrdtWu\n2lW7alftql21q/Zb2wtBpGIYRTr3EoHwpWyYzi3ePGuY7/O9COu6BexQpSMMjryAfLi34Ie6ekJk\nDHvRp+gh+PvjAYz+DPrnniDjImnav4MlHT2bGELKg7h5LiiEJ56DAGHNx/FyL3A/38HjYOs176PE\nAIm0bFmHbtwbkzNp9IW8J/rioTkPJ0lKOQI8O26rZl79fXg5EeHxuQHBil6mh1N9bqWpxzM3N5fQ\nSMJpq6uraVwk3Tp/gyzxOeE0kCLfbekhI77f6/VUr9fTdn3QJGjFOXHQrNPpJN4i1wKeAtUajUYp\nX8jnIo+XqdYuSd/4xjf0y1/+Ml0DceQ+RxUJNbAb1HOZOJvPER08UNqtW7cyyKgjJLybfuB9DgYD\nPXjwQHt7e+n577//vqTzbf/j8TiFNwqFQipI+ejRI62srKQDmmdnZxOtoNft27fTGLwEByUH5ubm\nMoc2exXpwWCQCZl4jhDoIfME2g/qRC6gNEVx4fFGo5HhH/hyfn7+QkHRfr+v58+f69GjRyn06QUN\nQZ3Zru6IqyO5jojQN5DgvDwZR2Sct8bjcUJdPKzGu5ALLhNiuDFuTuJ95MS4fnG94iiP6zPChY7y\nECL1SEecQ5c7NO5hw4HrLniDNY8c87Expx7qBIl1FNuTzPPSHqTz0DU5qjEq5KkXrGNPGifKwPOj\nXM3Tz9I0r5aiyR7S4zehYPrNOgDh96OaYi6xj4WixZ5o7ykdHvbMay+ssjlE9wUuKR1NEaE6Wh7T\n+zOjoRCNMY/d+gKI7+NZTFaEuJnkmAjnCXdepZi+8jyH9z0/wPN+fFw8mzwG+uGhIs87wvCAKaBp\njEU77X0MMHDMEXADggXt4QgXFJyTJCmFH+iPjwNY340dF1okqca5QKD5dQ8lukHk4QVXpIzf+YT3\nRLjZ89ycd7jP5zEaYhHCdv7D6OHvvIZwZ2cfY4SWsb4ZCobkXg6klc75qdVqpcOSS6VS2gZ869Yt\nzc/Pa29vL13rdDpprJzfxgGyvrXYeX00GiVF/OMf/1i/+tWv9NWvflXvvPOO7t27p0ePHiUekabJ\nnp7PUSyeJ0k/f/48JTcj3I6OjlLdKencAKrVaommCwsLqcYU+W5u2LlQ9TDUb37zGx0cHKhUKunj\njz/WBx98kIysV155ReVyOdHl5OREL7/8sqRz+H9vb0+dTkfb29va/H9rbdHXR48e6fT0VNevX8+c\nN4bh7IYBfMORMfCTH6jteV3UrfJEe2QT8+LhDPKgeJevXwwPwu+04XCojz76SA8ePNDu7m46UxDD\nldINVHf3texhcH77eoOPvNwD19yAiakL7iT6Mz30XigUMgc/j8djdTqdjKK/zMjEmXCa+t9OH2Rm\ndKaQ2R7ii4aIO/yuh/xeNz793Dv64sayPwPZFx1Txh7HwD1OX0kpzNdoNC4Yux5mhA7uQPIdZBH9\n9ntiaNNDdhhwOOzoi4WFhcRzHr7zvlHeRDrP/8TIQl94TnHcUAVtvL7fZe2FIVIQz2PVoEJ+sCbX\nsHLzkCiEUF7CmjOpeyckf/oCdwHuk4G3KCnFXrGCowfucVs/qd29n3K5rHq9njEanNHjDgHGHhOP\n/dnRO0JY461FoQADe66FX3N6oZSd3ixCf6YbC3hnMN/x8bG63e4F71SaJukTQ8+L2/vYnE5x518U\nir67xQWy13rxFvnMkzw5kDVu3aZdlr/mz/YaJ7Tj4+N0JhzfcXTT+cHfwaGjlA4ALZHODQm8Y/7G\nWFpaWkrjQgixqwtEolqt6vj4WM1mMxk9JHNjsMzPz2dydpg/R6do3/nOd5Ln7GjkYDBISeHUfPKD\nh9kZRzI18wLyMTMzo4ODg4zBDW1wJFqtViqTAQ0lpdyc5eXllFD/ySefqFQq6YMPPtDW1pZ+53d+\nJxmprVZLtVpNrVYrGU0Yi3fv3tWtW7dUKpX05MkT/epXv0oG7+///u+nk+wPDg5Uq9USqsjcgrrF\n/BKvSZbnXIGq4ZkzF752XUZ5DguKxJF4EE8SdWnPnj3T+++/r52dHXU6HVUqlQwC6geQ+1rmnf7b\n/3Y0HIPAnUFvLjOglTs70aElP4j3MOeTyUTdblf9fv+Cs+vJ3t4cwYnr1xEndILLLzf2XI77zjZ+\noqHoxlV0zBijb/GnId/i55PJJIPEuoFGf6GpG0yg0/BpnpxDx4HqMA84C1HP+j3+Hh8jMr5cnu4c\n9/NQ0XMur73vp6enSUaNx+OUG+rGMu8tlUqpbInLeZ+zy9oL3bXnDMdEoWQxpqRpnR3PuvcWEwlp\nDpdicSI0CAPBOJcRih0o3IfgcYXoC9+VtBsEpdL57iqu+YGaLEL3XC5D3mIisu/gi54C48vbzgvt\nYWIXWg5rw2ARwcLjdIMXeh0dHWXqKkE3qh+XSiW12+0kfKXpAbwxJIrR5mEBNyiZW0cQI72id+ke\neKR3Xo0veMoVU0yadQWIUnAlRf9coNBAE/CwIq25ZzQ6rzHkByyDUMVdRihqR2I9pLy8vJwEVL1e\nz2y5J4zW7/d1+/Zt/eQnP5EkffbZZ3r77be1sbGhg4MD1ev1lGwOneEdRyRefvll/eu//qt+8Ytf\naG1tTWdnZ9rf309zgeCl7pivmcFgkNBFDFnoMjc3l6rAU16Ae09PT5NxiTfruxuhO8YZu89KpZLe\nf/99/fznP9d7772nhYUF3bhxQ9J50vzMzIz+5V/+RR9//LGePn2q3/zmN5Kkhw8f6t1339X6+rru\n3LmjV155Rf/zP/8jSfr5z3+uP/qjP0qCOu5Q9Lnz9e3hFb7POoXnMX4iz7MuCZU6MsAP6zvyqMtl\n5vfBgwd6/vx5CqXWajUVCoXMRpN+v5/ORCwWi6l2FQY4KCbvYoyOFGOUSNOyCcw5JUcYI84qitWN\nA75TLpfVaDQya351dVX1el39fj9zigLoBXTx8J3Txku50G8+Zx7c2XKjwneKeXg3hq0YXzQQ4QGc\nJJ7tzpbLGZqjfN5iArsjatHoOTk5UbfbTQgNvMVvZJTfE/vhOgq9R+kPd5Sgqzu8DoK4rPPGHDnC\n76Hr09NTNZvNJAuc3ugexu0OVwydxvbCdu05AiVllQLXvRAewsZDPvzO+0y6WGsCL1qahr48NMH3\nHd1BIbjXRtjL86akbKVXR0P4DMFXqVQy9ztyEE/kjjuySqVp7YsINTqS4SEm6BENO98h5iG7PEHi\nhgVGm4cJ6Cuxe+8n9+EdcA0GR7HAyDHEF3Mr3HBzr8EXqUPTLI5ocLv36MYp70XZRGMLgcii5T6n\nl3+fvyOs75464QcMhHgf4/bDWd2QZY0QhuNoFOZydnY2ha8kJQPliy++0K1btxKywhw2m00Nh0O9\n9NJL6b6nT5/q/v37ajQaunHjRiasu7OzkxwM5pJnbmxsaHNzU5ubm2q32/rss89SHSlCZeQgDIfD\ndEQK9OI3/XLazczMaGVlRd1uV8fHxwkFZrfe2dmZlpeXMwUrWRPkUOzu7qb+7O7u6ic/+Ylefvll\nlctlHRwc6G//9m8lSffv39dnn32mTz/9VI8fP86EZD///HNJ0le+8hVtbm6qUCjoq1/9qiTpgw8+\n0MOHD/X2229rfn5eS0tLF5xEZJ3PPeMDBfDDtTGMcfScLwj9ECZF/knnSJ2HbvydHimAr0Hqut2u\nZmdnVa1WE6rIu7gX2Ug4zeuWeb4WcyBNHUBkgpdeoY84HnHdsw5xin1nYAz9uQFKIVHCieyuJJRK\nOYFYVsEjGB5tYP3RJ1fCjizjgPnuaF/bLqOcBp5H5n2JqJLrL4/CuA5ANzny7yFYL9/ihvh4PE5A\nAPPr749OkOtQ5j2GHuN8Rn3J54wlpn3grLtByfy4AQzPtVqtBJwUi8WEqtKQlS7XvX/RCPX2Qgwp\nGI7fkpJlSjjKjQK207pXGhebM08MFzqC4JPpC8Ohcb4vXTw13EMYQH4OHzsjucVM3+i75zu4V8Di\nd0YE8kaZ+PcjeuNWNJ4aRmgst+BGhi9MDJIYuqM/0Jjxe4iO552enqper18IQRwfHydUB0OQd7li\n9sXtc+lz76HSaDT6onQjUFJKXHTj1BUUhlwU3tAGOju9oXOEgZ3fvN8+Ni+sGIXGZDJJStKNVGiK\nAUJoDCRqOBxqMBgkVMYRuWKxqKOjo1Tn65e//KWePXsm6bzEAfkxc3NzajQa+ou/+AtJ0re//W1t\nbW3pS1/6khqNhmq1mjY3NxNNd3Z2dHZ2pnq9rnq9nkoc3L9/X/fu3VOn09HTp0/V6/US3L60tKRr\n164lujv6OxqNVKvVEm+4MmEeQDmWl5cznigKazgcanFx8QKSDTpULBa1tbWV5uxHP/qRlpaWdP/+\nfe3u7urTTz/Vt771rTSODz/8UL/4xS/07NkzFYvTQpeDwUA///nP1Wg0VCyeh+ReeuklSdK9e/f0\n61//Wq+88ko6XsdzOlhPzH9EQz3M7kfkgA6VSqULqRDlcjnV7skLpzgdWIcoUt7p9zWbTRUKBT19\n+lTb29uJ/xk/RgnozmQySX+jzHy9xPXhoRiUImhVXiK6RxSQAdDUNwNEFNuPTWFzB/dVq1X1+/2k\nb7woI/IcerthjhFBGDY6Zr6BypU0PI+jAxrMMx0Zd32CIYAcI5KBzAAphF5Rz3K/yz/nRQ8pusxE\nX7KO4pEthP7iGvZ5j/wd5SjONXLO+d83Q9Fvfpw28EOkG7UM2+126qPraNflUa/6JpC8dlX+4Kpd\ntat21a7aVbtqV+1/2V4IIuWQs1d/9nCbw9g0UI3o9Tv8L13MJ+KZviPIUSrPF5KyOwccAaHF/BgP\nJ+FF4Gl4c+TGUSU8Ct+Z5mPhWt72T/di3RJ3Sz8vEZ93eOl/9xa57igJNPWwnoc3vG+S0sGZ0NIt\n+5jkyfwyx44A4vlERAqP0hNy3fNx+sR58FCuezRxnvLi+/CLe3ru/TmdHBVzT5/vcM0RRK/Czvc8\n1OD8xvyCzDAOjioBWYq5JtJ5AvH6+romk0kKb2xtbaXdcXjvX/7ylyWdH0z8T//0Tzo5OdFrr72m\nV199NSFZjUZD165dS5WvFxYWtL6+Luk8Efv4+FhPnjzRs2fP0lEi0jlac+/evcwuGkcyCEWBfsQd\ni51OJ61tEuThPRLX+/2+6vV6Ji8JBGN7e1snJycp1+lXv/qV/viP/1gzM+fHpxwcHOj73/9+4gvy\nM8gTefz4saTz5P5ms6nHjx/r9u3bmp2dTXlXN27cUKvV0uHhoTY3N3V4eJiQHPg77mBlPfDseGYi\noT5HK10mgu6BbHhI38PxyIDI3/AqIaz5+Xk9evQo5eiVy2UtLy8nmvouPg//QG8PpUfe95BgRMsc\naSKEJGWPlfFoBvcREuJvD4c7CgiaBL3n5+cTUutFVRkLGxg8dQGEKk8OcT3mPfpcez6lh/24j+9E\nBAnUDRkIj4COsf49GsH3Pf0ElJOwPvrA0z3Qoeg3T1iP8+zz63nEHvWIcxzTQTwqQ19i2BCkymU8\nvO5IHrLUUcR+v69isZhJaXA6Oy8yv//nyh9gMBUK091w3nFCETHmy9EjfkafQ5F5hhSMErfHe95N\nvIahRp9cCXk4iZBZzB/yyfSdK/Gdnn9EvN+ZTsqe+8e27XgfeRRugNDvaHjm5R9Eg5V4MCFMh4H5\nHAZ1hR93wnlyM+fLkQOWR1P+R6B5X3x+aZ7LFYVVNDDdaIcmUbj7vd588btAQHAyBr93NBplFq40\nFaCRb8jJKBTOD9eNMXrmAJ7zZGlyF2IomvpP8I7vpIH/hsOhDg8Pde/evZQLMxwO1W63tby8nLa0\nM4b33ntPZ2dn+vd//3d99NFHeuWVVzIlFZaWlpLBs7Kykvr57Nkz7e/v6/Hjx3ry5In6/b7eeuut\n9MylpaUUhpKm8L6HMqPxSSkNkvNRHnwPZUHYC2HoczUajbS7u6ter6f/+I//SOMYDAZaXFxMPMp9\nMVS2v7+vd999V5L093//9+p2u/rLv/zLlEtG3lO/31ej0dDDhw/10ksvZUoLHBwcpBCtK3v4pVQq\npVIHHtZtNBrpvEFCQjEHxY/icp5GWZFAy/ji8U3+vrm5OdXr9ZTPhiJyxYTRxkYhngv9Wa9x3dDH\nqIRZYzzHQ0bu0Hi4SlKmz9CQZ2I4eWI79CZv1WVv3LnlxqafBoCBhbzyNAnvL84BdOEaITMvbRNz\nlLgG/VyfeIoFaSOlUinVLHPni+YOKc8tFqfnevq6Y7zu8PC+4+PjdPg2a5Fxs1EEg8flmstn3uU6\n3zf+uHMJ38VcP7/mNPV15Ruwjo+P01xwYHiU/d7XqCe9vRBDiol2hMgXE9Yh3hCM6YrbGc7jtj4Z\nfO5x2ohIEWuPgojFhzESvQGu8bc0tb6Pjo4y3oSkTN/jgpemQjNa1M4o7G5wA4Ux+f3+uQs70Atv\nLhgQGp6T4P3nN4gagjYib57jwPtWVlYyuzUdiYzootMIpU8phWjUFAqFzELNi2NftghiYdTYZ3jO\nG/wAvZwH3PvyhF/64M/wnTrulcUcQBfMeKmeiAmCM5lMMsUsOR4nb9wuXCaTiXZ2dlIS98HBgQ4P\nD9MhwZVKRTdv3kz9/tM//VPdunVL3/ve9/Sb3/wm9XN+fl6VSiUVnPzss8+SkHry5In29/d1cnKi\n27dv691339VXvvKV1B/QlFarpeXl5QsKHTnAUTfSeY7QYDBIGzcoxeA7Qbvdrmq1WkrWdidqNBrp\n4OBAJycn+sUvfpGOOnn99ddVqVTU7/cTyuJzgUGKwUUe1BtvvKHJZKIvf/nLevz4se7cuZM8fZQi\nR8fcuHEjGYutVivxDgVOuc8RtF6vp2azmRK4WXfkwjgCTm0dFKLzP7vckE2+BqIB4M6ldL7Tk/wh\nCp0yHxsbG4mnQUYZB0izI0H+Hl/HGCTwKfweZaYbQO4w8kwpm4zs73MdQk4P/IbxC784wuyRCuaF\n5lvyvbmBQ1QBWUCRSt8BTd+9vlF0yAqFaeFadCYOlXRuFKD7Ym4PRp/nULmMIwE75g9hJPouUd6H\n7Op0OsmA5l6KDzO/oEvMJfLS88Gk6Q5NRxldB7sjGuntxtSHDXsAACAASURBVJk7EY4oxQ1bbhi7\n0cr4fpuRJb1ARMo9CulixVk3INjaPBwOMwwmZRWYL0qu8TlM7tAh/18W2suz3uME+sJ0ryomoktT\ngcq7HT1yS9qZGOHD7+i18ONGGPR0dMsNQ677ova+02Agfyd9wphwpnODiPGw2EjC9Z0qjkB6crfX\nBXEvJm6P9fsvS6CELg7xEk6IgpdrLijj9mApm+gaF58bsD7H7oF7P/2dIK7+LgTfeDwtSMccelFX\nN4Jpk8kkCaRCYbpV3UNGo9EoY4DcuXNHn376qdrttk5OTrS/v5/CG3fu3ElFJf/8z/9cT58+1dbW\nlqRzo6XX66nf76dyDI1GQ5L05ptvSjpP1n7ttddSbSfpHMlYXV1Nhsv+/n5mmzNnyEFXFF0eAhnX\nBmfNYVR6An+/39fBwYE6nY4ePXp0wRFjzbgh57skUQhUPaf90R/9kf7xH/9RxWIx0W0ymSQkR1LG\nADk5OdHe3l7iQUdquR8E6+xsWguMHYeOarrCdgXrCsnXILX0PHTmzkC73dbTp08Tz/iGB0cnpOkh\n0SBkvqYIwzm6HCvi8/fMzEzmnE2cEeSzjxHHMk9ex52wXhEe2cU73bByB8/TN5hz5Gh0SB0dc6PH\nE8BpjnK54xVlKWOIpWSYI+iKYQzdovHhOpF5B0XyNcOuV+q3uQHukSP6HY0Nkud9HN6Hy5LF+cwN\nd482EMb2ufVdgG6sOhLlfeNadBRopJqA8rtDze//c4gUSiYS0hefMz+xZI/7xu2sLFAnqiv7aGU7\nLMr3WNzsLMObyVPmjqA5oR25cc/fDUWHLaUs7Ivw9jFwD4vAFzdbi6OX5EYZ/fawIH3CSHMm9v7g\n1frzPVxALgn3OXM3m810NAjC1xeI9xXYlXH6YnMB5kYIf/v4ozFHv3wREd93oRg9PubQaYYScuM0\nhkn4wYhyzxEh7krTeQPkxPMBWAduYDqfQlfmw9cQ9IlGB8LTFQgGymQyScgH92P0/PKXv9TGxoaW\nl5dTHtTdu3fT+Hu9no6Pj1PoAeFHXaHxeJzqPnldJL5br9e1uLiYMfjw4ieTiWq1WmbHkPPJaDTS\nwsJCBlGoVCrpfz865vj4WP1+X8fHx+noFeaCMTSbTa2traVipDwTfigWz0OYBwcHkqTvf//7WllZ\n0RdffKHV1dULKAo7KJFhoDWSUj2jer2uyWSS6i+BwJXL5zui9vf3M2jG+vp6urawsJBZe6QBsKsL\nHoanWUuOdji/cw0Dm12RXvdrOBymOlPD4TCFSqJDRugZ5cy7oA18jbPgzi4/8LAr2slkklkbvr5x\nHAhN815Hn0ajkSqVSlpPjs5hqHitp4hO0zzHh3v9OciEqLxdt0QDzA03xu10Jf2hXC6nuXc0n5Aw\n/EZzhNyPQGGMR0dHmbUfHVIvT8Fv5oS5cLDBU0EwpqIsykMZoYcb3YzPneMo23m284kbdYwLmvJu\n6O3z4EZtDCFe4IFLr/z/2BB6UlbxMwFucUtTaxGh64odr8qPJXBF5YiOv5fPHcXy0J6jHHzf+8tk\nupXqaAMLzwUHiIUngvIsxl0qZWuUuBeHAeoGT7lcTudaRYQvGlPOcBhJQOmEOaXp6dmSMh4a4/Dw\nq38H+o9Go5R87BWcCTe4MSZlK7tTediRHN7nc0Y/+U1/XNjkhXt5Jt+PixR6uWHngsLDCf5c984Z\nkyspV3L8RslzJIlvn3ZepD+VSiUjFOfn5zN5BG7U48kiiNyoo7I4Qmxubi6hHw8ePNDrr7+e0KTl\n5eWkLLe2tvT48WO1222tr69ncrNqtZpu376dnAnQCeaB5F3oSCixWCympN5qtaqDg4PE/zdv3kzX\nYvXm/f19zc7OamlpKVVuh57MB+9nbhgHhhly5fr166k/ID7UrFpZWUkJ5V5uAJSJd/zZn/2Z5ufn\ntbm5qddffz3jOC0tLenDDz9Mxmez2UzXZ2dntbGxkYx0H4PnQYE4gWRhmPua97p7jhxEBCbyyGUe\n9/z8fOKDVquls7OzlPPS7/d1dnaW5pRilh6miUoxbrDhnRhtLtelafkDlKajV55aQCK0ywMMtOhc\nR4TaDQP4pFAopDyvuA5dznj5FkdUXC64jKO5XGJu8kqycJ2+er0vngky6AYDxpmffZiH5HtaCDRl\nTuv1esYodjmSF0WCj+AP3zwRUXJaBBOcT+EXN7ScNvBxdISjHcFapbmj4O/jOfQJPe28mJc2kmh6\n6ZWrdtWu2lW7alftql21q/Zb2wtBpNyLd4vXEQXgNGkK/wJnel4SrVAoJOjYw2uO4HhYiP95t4eH\ngMTzPDXPrfFkRN7n/XcvgebhnejteB6No1LQBuSId/h2V+D2mMgHbfCmHHnxcKKPw61vwlTe/6Oj\no3S/h1l91wvHMNDHuG3Z4XYPzzE+35Lt0GqEjfGU3LuAhngdjN+PifD55FnQzcNh7s2BinmxPM/P\ngC/wjguF6Y4ovChyCDz05Vv62YXm+VCMHTrAU3iOc3NzKcE3zj88QEjNx8q1+fn5FPqan5/X7u6u\nbt++rZmZGbVarTTGlZUVraysaG9vL+UreliXM+SYM55ZLpfTLruYe3F0dJSQlGfPnqnRaKTk9u3t\nbZ2dnalaraaChYSaqLxO+KJarWowGGR2RFE2oFarpZQASemok2LxPIcPpEg6r9B+eHio69eva25u\nTrdv304oFaEsX3fuJd++fVs3b95MHi9zcXR0pPn5ed27d0/j8TjtCKQtLS2pVDo/ONqPEDk7O1Ov\n11OpVFKz2cygOu5Be94SDbTcEU34AATHQ/SMwcPo5NCwThiTo1p+BA3PJeXAk8ZBl0B5PORNdfKI\nEoCaOgpCA1FnPbLBh77Ck65H+Ix3kqzveWYRRfbjRRxB4TpzwTuRYS57I2ri9HYky/OgPAISc7I8\n1IesdmSJ++hLDH3RJ89f4hp5WRy7xPh9HplLGvoZ/vTSNi4foJXrFubIdSp8RfFpaO85rdAgpi64\nTvFxQxMPBTpPOQ+A9EJz+vt/rvwBE+KJfvGaNwSBJw56UqkzkCsan5woNFxBwhi+APlcysKRMSE9\nLnCYwnOBeCafMR6vTcJCiBPMQuK6LypPjmRBusESBZkrVsbvRpw/18OeHj6g5o3npHneGefFoZyc\npv7jdIZOMfbNd+ATD8MyL4RQPabOPPBdFlpefgbN4+EeSvV++iJEEUUB52FLF4wuXD3kJE2VEbzr\nix+F5MLGt00TXoDWMVmdkADhb+hGWPD4+FitViuFYFGEz54908bGRjoYWVI6sLhWq6W54kiaSqWS\nhO/KykoKu0nniehPnjxJQoqwGOPDIJqfn9err76awoyE7xYXFy+EIfjbDyeem5vLGFLwJvzhByWf\nnp4mo+wrX/lKSqr+zne+o1arpW63q6Ojo1TlnL4+evRI/X4/5QthgL3xxhtaXFxUpVLR+vq6Zmdn\ndf36dUnSp59+qo2NDX3pS19KBz5D04WFBbXbbR0fH6vRaGg4HCaaHh8fpwOm2Q3nBj+hynK5nMm5\ncgXP77hLNtZr43PGxTwho1jTGHvNZlPz8/NprjCaJ5PznaeEqpgL6B3DzOTM4AS4HB4MBikP8DKd\nwNx6GgX9Zjen0wKZyfrwJGaX3WwO4JnkmmG0ut7x/zE08px3fjM+FDbhMN+xiLyOubw01zGkS8DP\nhOiga5xnjE8fL59jYLLBwx1ND6tiODNGjFNo6/XO3HiL4TF3FF1+uy6DFk5HPotpGy6r81JPPEXE\n+cLpEw1T13mXtRdiSEnTE+89iRtC+g+NwWNoOMFhJI+3SvlZ9s6M/M+EONPExEGP0zrRPTcGYsMY\nMblbyu7mYwwoO09487G7co7GAnHpuPARSDCg7xiENhhIeUmSzvw8j8Y4o7c4mUzSoaW1Wi1T9yUa\nsR5Hj0aWG9gYj84fbsyiMKNQiDHtyE9unHlzRYWAjgY1dIlJvDFvzlFB5x9H2XjeeDxOxgAFJn0e\nHaUgb8XzFuhTXPyMIa4l6Ibhi9Lb399XtVpNO+bq9Xo6zoX6QeTr1Go1vfzyy5KmhlSj0UhePsKU\nHX3wC8YNdGMHGqUE9vb20jPZIo6hj5HBuieZmBwdRzLxrD2Phnv5DgbA3/zN3yT6/tu//ZsWFha0\nubmpxcXF1C9ytXZ2dtTr9TIHGq+srGh5eVnNZjPVW6I213g81nvvvadicVpry8cPL0VnqFarpXUC\naudGRql0fhB6vV5PO7ecTx1Big1j2vOHHEXnbD2nmaMcOIKuvCmNgIx2WUM+EgaTI9WOivsczs/P\nazAYqNvtXuBvrh8dHaU8HHcEPRcGY0OanlHIs8ijYox+LmC5XL5gnFA6wOWwO3OsZX8mfWc9xh2L\njN2RWuYlOoHch1zMyyFinlgzLnujg+rXyJMdjUbJmHZn3J1830HNnDJmR0ahhecMR+fSc9NcxjrI\n4u92PRk3AXj+a3T0Y3PdBZ3yDDmci8s2G0gvsLI5gtYFH2f1sBAcxpamXmhUkggbBCS/+dyNkDyh\n4te5H2MNRvY+RIal0T9HnbwvhHMQntEgQsFFVCImC8YEcASJhxqkbH0VBI2PH0ECPf0au/W8j4zR\noXH6Lk1DU6AjHq7MQ2YcYfECilL2/Cfo6kYTDQGVVyk9Gg/xkGdHoeKigV5Od+YCIeZhRgSJI2De\n3Dsaj8eZooTQi23e7hTwLt9Z5IiUG+Vs9+c9vAtl44Kf7eMYaTxzcXExVfvd39/XaDTS7du3JSmd\nUQUidXx8nIws+kPYLKLGJycn6nQ66dkYGZQCoCDe06dP030U3IQvBoNBJnzK2AqFaXKwIxCgB0dH\nR3r+/HlS3uPxOFOC4Pnz5+lcwL/7u7/T7du39YMf/EDb29uZxPg33nhDd+7c0dOnT9O7QN0ajYbW\n19c1MzOjnZ0dff7554nfvv71ryd6Ly4uJgeA+WQTAQfm0nzchK9cRp2eniZ0rFQqJVSKEIsrSd9I\n4oaGKxOuz87Oql6vZzaUgJYSembuuI7cOjs7S3PjssNLMlA+hDF6+gUIDc/k7263m4yxPP7m/dK5\nAe6IQ55D7hsOeLeHHklRcNnG+3mnPzOG4P0a1z0iQd/c2fZdgr5bMG5k8bnCqCgUCqlPrBl38vKi\nKqx9rrm+YLefyxPmEn5yGkNXdx7j+9xR9ve5QelyE4c9Jrl7n93h9eYy2sfHHEQHw/WTy3He4YZ6\nXnth5Q9KpdKFwpIwqsOWUjaj38M/fq80NVbcqne40eE+t4S5FhET3h3hQ+6PiwbDyo0ynunvjXCi\nozaSLjBFtJz9+xiKjhjFZ7iF7kyOwo4GmAsymhu1eILOzNDNi606OkPf/VkojV6vl4xrN/x4tiMo\nl4VLyf1wIeWGtKOYCB6Emgt9F4bRiHbh78oKWjpyxTuhJYKW/sZ3ouDwsBBcXqSP+fOwiHukEf73\nuffG3LA2+v1+ConBS+RAUWtJOq8jRWHNRqOhcrmc8ocI9ezt7SVPlrDXwsJCKpzJYcrQqt/va2lp\nSc+fP0/0dJ7Z39/X0tKSOp1OhsaEik5OTjJGkh8vUygU1O/3U0gKeUPeFU5NpVLJKJdvfvObun//\nvt5//33t7e1pd3dXknTv3j3duHEjoRR+bMT8/Hw66HZ3d1czMzP6gz/4A0nS9evXtbi4mKrAu9OC\ngURdH4x45oK8OvJ3XCZSvsEROx87Ctx5v1QqJcMdJRTlkZfX8C3yGBnQyx0QLyQbUysmk0lCVmN4\ny5HXGNZ3RHs0Oq935sqUsaI7nDfQJdHBos+uZGO6Bu/Mmwuuz8xky74gbxivz5O/y3kYeeeos6M5\ntLyQkst3csloGCCEDiMigyGNXvS+QlOv8C5NjQl3uhwBdSfd+RvUk888jxdaxCiO08Zln/OT69po\nLDtgEiMKrI0YhXEQhPF6VMSN3Lz2wgpygiy414oCc+UhTZWpW/ZMIorVESC3JF3ASNm8p2h5RqOI\n97qC8smMC889BEfP/Nko4RhmcwvaDRunB8LGvUAWe0TjECJuSFwWxosWPd6nx7lpbt1DD0dbPKbv\nRkg0grxCu9dEifkA9N29pwj5+uLLQxzpP9ecvyKky9iYHxcKPl9ReGKcYUzG8RMWkJQUJ7zf6/VS\niIZnOJ/hYYNIwPt40+5lu2d/fHycqg2DdjFPGDQgOghw7kFZNpvNJECePHmijY0NDQYDbW1tZeYe\nZOr09FTLy8tqtVpJCbNVHgHvypYq4o5GraysSFKqk4TiOz4+TqG44XCoYvH8rCzQHPcyEfjwEyE5\n51PmOVaEPz4+1o0bN7S2tqbd3V09ePBA0nn5h36/r7W1Nc3OzqrT6aRxzMzMaGFhQd1uV+vr63r1\n1VcTQrS8vKw333wzoQbdbjfRhm3mEXFxvkXuueE+Go20tramcrmckEEPX3nivz8LXvRTBmigOhhv\nvAc+9FAT80l/UDY4Ai6joxL2xGHWPHlefo338dzT09NMiNPDVB7BcF6DLzzfhtAzazI6rhhzvr6L\nxWlNvUKhkElKd3QK+e3IymVRjEKhkJDbvBCs6yg3YFyGgzQjx5hHN3SjQeKGtiPHUVf6/9Bzfn4+\nJYI7vSLw4PIbY9aRJx8jY3FgwkOzMUeU5t/3CIbznzsR3i9/xmV/+xiY88vaVfmDq3bVrtpVu2pX\n7apdtf9le2GhPazkeHI0Vr+jFzHG7SEch57xXGOozGPQvsst7uCIUDSWvlv7niiN9+ZWdR7E7u9z\nBMWh0Jij5Pd5oqaHIfC4SU52KHoymaRdLx5j9nc7XT3M6t5iTOaLfXXP06HqiCwxnyAgoBT+zLzQ\nlHvi5DA4H7nnEPuCV4ZH4fPrf8fkZ7xkUCafE88B8Gsk+0JTP9yWcXg+w2QySWEoShMcHh5qZmZG\n1Wo15RLhMTqSyU6piJbG8CJrCVSHa6wZeP/o6CiF70BboLdXfiYE+PbbbyeUhjyjVqulZrOZPH6Q\nHfpZKBRSgja79OgDzwdl4L7RaKRarZaKY5IL5bzJTkHCG3HNg9SMx+PM2Z3j8TjlVfFu6Txct7y8\nrGKxqO3tbd26dUubm5uSzsszfPbZZ+r3+ynna2NjQ5LSurxz546Wlpa0tLSkO3fuSJLu3r2bvGd2\nusFvlJIYDAbJ4wV1mZubS+VeGJMj1YTT/ABneNFzq2LVc3gFnnQaELJCzvJM37zg/YBu8AnoU8wP\njSg7c0zpDT+CxMN9nr5QKBRSmBVEknHFXXSRTq4TWBPIQEddPNRfLpcTv3nKiSMl8CIHSzNOR+49\njyeGoNBbCwsLaecqYwA543sxBIkcgUYeMuO6pynwXD73VBau0U8/cJpxcJ0ctzwE0HWAz72P2xFH\nvhPDlz4unutzzTgdtWPMLs89n9jXEbrb0T34Im7M4t68aAfthe3ak7L5Qh7aizHPmBDnTIVyizkB\n0lRB+24Qh+c89IdB4u+MuVL+PhohFZ7n59e5AeaGBWGFmNvi4UoPGWAEIDDdyCJMgZCOyd2zs7Mp\njyLmn9Avvu8LA0aGSV0A+/z5gmIO3AB25vUdZg5VM59Oa38mi5jdYv58X7D+2+nOHHkOhQsbTwBl\n3t1gdoHDnBIacQMMIes86bSCtz0USp9RhMDYKBoXTswDhhQ8i5FFjg7PBIanVhPPJEcIh4TDhqXp\nUR/VajUpY3ik2Wzq8ePH6vV6un79ura2tjIGEXk8sXYYIeilpaWMESGdVxJ3nlxcXMzMKWHHaDSy\n65e5hAZeOoDf9Xo9HXHCZ4Q+l5eXM7zI92/evKmzszPt7++n+1566SVtbm7q4OAgbfGHT/v9vmZm\nZlSv11Wr1VLld655MrRXjPZSAyRvsyszHtp+cnKSSh6g6DEgOfbG+cplDrxIrlretnaMfHa2ebKx\nO7cuF3xrOUnmbpAx/mKxmHbzEj72eWKN+LrBKKPf165dS7TZ39/X/v5+UoieK0gf4RHfXetJ+O54\n8H5P8vZ58jBYdPDJOxuNpqUIvKo/zcfl72T9u87zkJ479JJS7TRSFKKBwbvQbZ7LRp/43FNFnDZx\nfpHlOKHuJMZDlX2M0RmPG5Ccv/IMFX+fy1N/j+sc9Bx0dX5i7vicMKyk5LDgQLizDV/G0KK3F2pI\nuVESE/fi99wTcIJjQV6WPBm3lXKNnBGIHHNvILQLDe8PfXEmdSvWDQGuMcEYQM587j24cYYQQLl2\nOp2EAsQjEHwnGIoUb8HzaWg+Nvci4uKPiA33+Xj8+/7bcy1Y+NzvBihjcMPM54u5dM/W+YfF5tei\n9+xIHuN3LwSaci+L3sfA1m0UhSNkjlZFh8ARMObMx+bz4kUZOZ3ehYkn6iIASOB2Id9ut9Xtdi+c\nt+aJoAsLC1paWkr39Xq9dJwLXrgfsFsqlfT06VPNzs7qlVde0ccff5yZ52vXrmlubi4lpdO84KIr\nPeiLJ+65Lxh4vmPNjVA8Z3jYUU7qD/G+g4ODC7l0IFWNRiMZIdK54m80Gtrc3NTy8nI6mBmHpFar\naW1tLd0vSbdv3047VkEXXJkOBgN1Oh212+3ER1wjGd/5h7nAAUCper0vDghm3WDAzs7OqlqtJofE\neRTjGuXmCtHRdwxsPxhbyh4/43XicFRBOKNc8N1x7igNBgO1Wi2tra0lxNydIMbKO/mMQqrPnj1T\nr9fLlH/w5HNQHYzavOTjaARwzdeTO708PxpU9N1lOwn7OGFuuLrjhdHiaE2j0dB4PE5lPFwusm4c\n4Y7INHzmaBXvRR67E4NsgQ9cfqEPQSYjmOBghvcFJ9hlvRtn3Iej7cgp/fSiqcxhNLppPk7PCaW5\n4Q1tGTsFSBmrjz3SMLYXYki5weBKLn7mqIF7V27YYBx42Mq9DAwB/57fx3e8MVGXWcHc4wuC+7xf\n3lhkLJi4w8x3gvkY4k4BQhmS0inejnDF/pbL5YRauBCRpohcTA6FST3k5ve5gHGUi7njXR7mhJYg\nCA6VuvCkX96gD7uWomHkcxLhWIRCRMrcwHJe4HO/5vOM8ILWcZ49fJdn2Pnf7vGcnZ1pOBwmRe0V\n0V15ueBHWfMuR+vOzs7r+kwmk5TEToP+JCWfnU3PTPNwtyd2S+eKHaH405/+VN1uV6+99pqk8yrk\npVJJOzs7mp2dVaPRSMYRAovQRblcTsiKJHU6HQ2HQzUajQyi5cUroSXjAP1wD9drvRBuQZlGBLhU\nOt9R2W63Va1Wtbq6Kuk8FMh8YGRRK+vp06cqFM6TZ3d2dlSr1bS8vCxpuhbb7bYODg6SIUtfQYxO\nT0/TepSUhDdhMUrAOJ9AE1c08OHi4mIyiuA1SiJgPPuZeb6BAqXiiKukhKwQrqJ5gj686SEz5ATG\nlKcRuIJ2HgZhbTQaSR5FpN6dZN95yc7R/f39lKzvdEPx+9rDoMpbhy7X/Fm8j3mLUQopmxjOnDF2\nR0L4Lu9wkMD7Ca1Ho1Faj466QwfWo+sqlz8eGpOyTpqPnf9dzvNZ/B9edR3gYcUoazEuYz9jgrh0\nsRI6z4jOPe+IwAV84oag09SdefhUOl8zbsz6+JibSBtvLyxHKi9kJk0VWYQAHQFy1EnKHiAcLXNH\nsBwqvsxAkrIeBlCtowe0PIXo3pR7EL51HkMqz8LFI6fPHC8B3Hh2dpZg8ajIo6EUDVanGcYTNPOc\nBv72ReRM5ULGPTP3SpyxGZfDtB6iJJTJos9Ddng2z5GUqu/SPw9h8C4XPh66dFp7P+l7Xi4UC9MV\nkBuRKDc3RPNCopPJJOUfSVNkqVwuq1arXTDg4Xno7eEHDI3xeJwQCqebdG4cYCBJSmhFq9VSp9PJ\n5MIgpFHmbgwPh0PNz89rbm5OlUpFrVYrveOtt97SgwcPEvo1mUwyyrvT6ajT6aR8plarleaiUqmk\nHXClUint2qvVaolWPu/c52sIw9DlCoYViATzjiKAfhyYLJ0bNr1eLxnQrVYr0ebu3buSzsOR8CRz\nwa5FQm0YI5JSeLDZbGp1dVWTySQZks+fP09ozdLSUmaXJP0ndFQsFtN7OKjcUVU3+KEbx+vQ+J6v\nF6eZhzucD0FffY1G5YJ8hZaO8tEYkxuSyDQUsstaDsYmf4n7yG8rFovpmCTeF3PzYjqAh4MceXDa\nRQfSDXJfi1xzneMKHsV8fHycZFBUyMhUDA6eQ4t6BpmLfIm5t64/HAX2e/29jirSD67H6I+HF/ke\n+ol3gy5JyuUlR/8JU8aUFEekGEdEEpHREcn1eYifMca8cGGv10sGvfclomx57YWF9rCSo+WJcnJm\n8Obb4Pnfn4FhIE0RF9plMVhX6DQEtYd9+NzDSW6ouSCKW1URbCT+ej4WC8WtaDfm/DgHXxTdbjfl\nungOkDRNrEeoRIPQtxnHxc3CdG/V0So8fZ4X0SpHnXyRuoHhuQaOvsXxQxc3WmKIyo0hD236nHiM\nO0LrvuBc8DD3LgAcvYoOgfOeP9/7g+Dz7zlqyHV/hzsVjspEDx0Uge85UgGfwDd4uvSZBGeUAAab\noxI4AKAqfg4dob6dnR3Nzc1pZWUlw3uHh4cJNvcz3ECpNjY2knDDURgOh6pWq8kYALGTsoVR3YBl\nXZGXw2eOFg+Hw7QOMZKgL+u+3++rUqlkio4SEqvVarp165YODw/TOm+1WlpeXk5lEU5OThJaRaHU\no6Mj7e7uZsofNBqNZEyAhMEbjka3Wq3kcNBfR04xJJkLHDme6xW64TNHQKQpkgGPuzHo/Op1mJzf\nxuNxJgEbRBMDx2tNxdxGShsUi9M6adFZcd7n/cgML+9BhfW8/CRPXHfnimc6XVxfFIvFdO4hSfDR\nGeczlxHoAhBCn1+/n7G6I45T4mvR54nP3fhgfqJMdFnvecbIDebMk61xRugrtPPQGPNKyRdfZ5Gu\nPjZpGn5nc4obTvydl5cEnTxE6A5VnFOXl5737IYiNkW/309OixvHEaSI7ar8wVW7alftql21q3bV\nrtr/sr0QRAqL0eGyGOby+Lt0bvV60bKIZrgF7l6WpnlYRwAAIABJREFUw5ERLnWPztExLGiSa90b\n43sxd4O+eJzcty7H+7zPHp6JSB1eF/TwEBxhnHq9fgEB8rHiDbg35JZ+tLTxKkhU9O/i7UAbpy/0\nxGuLCYlA9XhDsS/s7oIm0AjvM4ZuQSfdW/KxxDE73O5zH3O+3BuLKBPfiV6W52F5zpmP0Z8TE9VB\nQvCu47lwjMPzgCibgJcYn0li6Onp6YWigXjwEQEkNEWBUA8lg4IQUjs8PExzvLu7m1CuVquVQW7r\n9bru3buXSbZmXP1+X9vb27pz546Gw6FarVZmLZMDc/36dVWr1QthBzzshYUFDQaDlM/FPHh+hoci\n8DzhJxA5r/rN2YEehvMkX6/83Ww2U8ju7OwsHVgsKSFXoHeFwrS4nyMglCpxXiQVgP44gjYanVf7\ndj6E1+bm5lSv11WpVFJ4zJvzaVxrjm47euu8GkP30JhwuydcU6iU8caQN9/znarMEzssvSyDNK0I\nT+5YPG7MkXN4BNqAUHvaAeNnPkAgXA6Dxnq0ItLUEXDG5+vLS7j4PPO/o+0gc6Br/j4PsXp+sfON\nI+YxGsNYR6NRWjOe1H9ZXpCvJ66NRudlSCgc7HPqiBCywyNGbExgjI4Qud6JuXrQir8jwu95ZP7+\nKIdjdIGxlEqljAxmfV7WXlhoD4HkQhMmdKOD5nF+h0AhMELXBYpDiRFOdQZ3uFPKLghnFr+P5/J9\n7xv9Q5FL2WTz2D/uY3HH2DC5WrzLc5TYEeK7UqTp4sNgcLiT5wDh+tjjeKCrM7JvKfYF7PlMHhbk\nmv9G2NJXDARqYUV4mAUQIVYWALT1vvh8xnExZu5zvvAcCJ8jz3vK29gQDS7PJ0PxewjXhdtoNEo7\nvuI5dQhe4HMvY+BHo2BUeV8IYdF/KXuUj9cHkqb5WhhNvkaHw2HKgeF8PM6hW1lZ0f7+vsrlstbX\n1/XFF1+kkM6NGze0sLCgarWacSQY93A41IMHD5KgJSTEmX67u7va29tTo9HI5F0xTgwFD4EeHR1p\nPB6nhHXPyUMpQ08EpzQ9p63b7aat+t7Xs7Mz1ev1FKaCh4fDoRYXF1Wr1VIY/fPPP5ekZIhxkLcr\ndniNWk+EO+AZjAHyUJzffKOJ8ypOAJXgnQ+LxWIKGeXlfDj/ezmVmGODonGe93URc+tmZmbUbDZT\nyMplKPlHfIbRPxgMLhxjkrfGfG4ZPzIc2eKyIO6o9jQMHDNoFp1rfij1IU03PtAXf4enUuSFhtxR\njGkuHK+CU+Iyx8N4HlL0OWRMl+kcxsF9HuKLYTr4BjkWUxh6vV7iYz+Wx0uEcF+sHelz4Dt9XUe7\nUSspY+xFeRnH6LwL7aKcdJuCcCONOY285+2FIVIoWzdUnACxhpNvHwWJ4FqMgeYRJ8aRaUySE4oE\nPBZUzK+JBoJ7AihLR1G8nxhMbix4P5lkzyFA4dP36A1yOrqfxcXY+C4Lwxk3eofeQHmgqfeP/0GQ\n8oxU/z7XnDbRw6C/3OPCDhpwj+/U8DnyuXHPKjaMU4SbC2Ke44nJvpvJ+x8NYUd3oJ0vWBcwzjd4\n024Euwfr3hJ955ko99PT0xTbp98Yu9Hz9fGT5OuJ75zhJp2jCRQHxVA5OjpSs9nUnTt3UkFK6mCd\nnJxocXExIUnSuWIkgZvSEYyh0WhoOBxqZ2dH7XY7c5YdXjle/P7+vvb29iQp1bmamZlJRo/zEMaT\nF3d0/rp27VpS3l5jqt1uq9lsql6vJ/TBd2CBrrTbbTUajTR+0DAMv4ODgwwiA1oHmkd/oTXor+fm\nkP8yHo/VarUyCsNRY+QT95XL5VTmIDYcFhR3dLBYe6PR6ELNL3f0+D8mdjvq7oU+eaaXZYBuo9Eo\nc2ZgrC3F+nalTPK25zG6sQTtPe+Q53Edg9aNHnd8HBXBIHGHDkfBd/T5sUQ0N8xcJnk/oyPohm5E\n0FkPzEVE1uDXvIRq3umomOuEaHi5nnVD17/HmNgtGo3a+H7+j7miMUrgfB1lu+v1GOVhTIwz6ieu\nxYiJ63mQWX9HXo417YWVP2CSvXPA79HC9glD8XtyGYwNokFjEUVPTsomW8ekZfrg3pMjYlJWEcZJ\ndOQiImQIufg+/s9DSHzscSFijOL1OUyNgvV73HBFAMEkjtxgBDrCxjU+82J89NWFex6NXajSFzcM\nYpK+G5RR+DN2Ry4dUnceiIKNcUNDpzPj8ARgf7Y/x+nqhil9cx5xj9ZDdAhNVz4YLzMzM6lQY/TK\nxuOxBoOBzs7O6zB51ft+v5+ZTzfOR6ORWq2WisWims1mZkcbCgpjxsOCa2trWl9f1+LiopaXl3V2\ndpYqTVerVS0sLCRE5tq1a3rppZckTcN+MzMz2tvb0/HxsZaWltL4OCC53+/ryZMnevjwYaL322+/\nnalBFR2ier2e+LTb7aYQEn0hxOWhn1qtlgxEjF6eS82sYrGYUUiSdOvWrVSramVlRfV6PaNoQIY2\nNjYyPAdqzLMGg0EqjcC4QK1AIZxnkBuTySSF9nBwfF1ggGBEM1aMNmgaEWeXCePxOJVrcLq4IwTv\nerjJEVUKh8LLlG/o9/sJIfME/pOTE1WrVQ2Hw/R+nokxByLjicq0GNpiXB6Kjjt2oaevwxhNQK5y\nDQSOdcizjo6OkqEML+bJfcLv0bGJ6zo2n2saKRJuNESEjM/9+S6XcK5dvrkD6062n5JAnz0dh2cR\nMne56O/IQ4yYQ9cJjqhFMMTnijl0fenrB+NWypao4DseVYImDoDQN/9uXnth5Q+kiyEkfrvnz/dg\nzsiEKJr4PClb5ZrnRw+M+9w74F6ER4TGvS/+Tmd4R2VoPvlu4fv1+Ey8D645EuLGGl6XMx39xkCN\nwobvIDB9YTP+iBa5IuO6LwYY2+FhaOPeZRQehFtAbdyw8bF680UQjWEXir5wfOzej4hG8nekNzQG\nxXKUw9FN9+SlaQ5JNGjpC1vg5+bmVK1W03VQExSDw+2gO9xP8U3o7R4dh+UyDmgzGAwyVbfdY+eI\nEp+LmZkZ3bp1K5VpYO11Oh198cUXWl9fV7Va1bNnz1JtppWVlWRcg5RwX7FY1Guvvaaf/exnSQCT\nk/Xo0SNJ0le/+tWUS+F5IxzKu7e3l8lroq/Ly8t6/vy5ZmZmtLq6mikb4YUu3YuGP8mD8aNuWIvM\n77NnzzJClu+vrq6m3XnM4Xg8Tjk+boCheEHYyFOBZ4rFYqZ6uedtTCbnRy4RSozoZ61Wu6BIL3Me\nnYd5nh8P5OEOz1V0Be1Oj4eicEbcwfS++hz4WsWYg+cdOfZ1hMPosg9ax13XvBtaRjnkSLKjfMgm\n6Bl5kdwa38rP2PNQZPriO089SoGR6norprf4uJze/O8Ii/fHHXGXYYyPH78XOnNPNHqi3Isy03WQ\nP5PfLjv9Gs+KRpQ/33PnHL3nur/P6ev2RHy+j8H122XthSFS0chwQyYaGRGS88/cI3MDgGsolMss\nTUfG/D4IDTP6pPN5XiKyNI0x+4T6IkHQRBr4oo+QL/f5Ncab56VhkLl1DsLm1z1fI2+B8S6Hvwk3\nYThFww6kxJNcHbFBUXHNi/X5Nl+nFfPl0LAbQTHp0FG9aNj4QomwMYYgfYn0duPMBa17xXmIYl6h\nOeh9dnaWcnNAZ3guBgtz5/yNouj3+9rb28uc1VUul5NRsbi4qPX19YRm+JEwEdIej8fpSJlbt25l\nwsGdTkeTySTVpapUKplK23Nzczo4OEjHzGAIraysaDgcamlpSW+88Ybu3r2rnZ0dSefe/PLysh4+\nfKhKpaJarZbZ/j43N5feu7y8nMbeaDTU7Xb1gx/8QMPhUOvr67p582bqz82bN5MCu3btmiqVSqb8\ng5/RWavVEgKIHOE9cYMGZ/7t7e2pVDqv8s41jDlChSi+ZrOZQqU80z1ijm1xj5lroMyj0ShTTBK5\n5Xlu9NPRRWScG06eM+JhHvK+CPGQjA+fOMobHRt39Hxtcy/rGNTJEQcadYV83Z2cnCS6ttvtzLpC\nxvLj8gQnydcktGHduzFPX1xWRLnvTpWvcYwWp4HLG3eqHTmiWjm6ydMIMDw4ncLpDV9joHpfaPQ3\nIkf+OTrMdYRHBdzw4R6X59FY8fvzDEeXt1yj72xggHf9xAJ3eJyn4GU3vrnf590NTje46bPzQDTS\nfFy/zZC6Kn9w1a7aVbtqV+2qXbWr9r9sLzS0Fy1MrMq4s0fKFr+MaBXJeDFkhPXru/ncUsaad4+F\nZ/JO/8019wzyIMdoPXMtjimiEzFBmc/xOt0z9vt5L8+gb3hkjNefGyF/p6l7sTEUhWeKNxP7Sh6A\npMwZSR4y4tkewsBLBZaPuzdIFo5esHvakb5eAC/Gznl/pLc0RSH4gd6efE8ozufE8y/yaMp7QBAc\nlej1eims46FoPCveRSiHRjjppZdeUqVSyXhWJOeyRvCEgcMpXMj3Jen69eu6efNmJkzhOULkwTBe\nEm5B0zj3rFarZSrw1+t1HR8f6/3339fKykqmyvrp6aleeeUVvfrqq/rpT3+aEsrX19dTUjioJTRu\nNBpqt9uan5/Xl7/8ZZ2enmpjY0Nra2uSlBCg1157Le3cgzaLi4vqdDoJ5XPecPQABNDlFeGmcrmc\nkCRpyuuERzz3hjybUqmkfr+vdrudyQGs1+uZs+Kcx2dnZzM5bI4cUhYBOefP9NBODPnH6tlRniwu\nLmZQEmjBmgeduAyRKhQKF8JUhEVBUTxFww9e9yOC2HnJYe2O9Li8uCzMeHZ2lo7f8fF7qkRML3F9\n4PzGeAm5OTrlOUGOULMu6JufFiBNiy27bPD3gVDFHDDkl48/6hzPJ3ZU2fWTy1/e7UfqRP0BH/A+\nL6kCiuwomfNU3i5RR6cI27JuXe7FKBR9dR3k9IsoMo359p3hUX/HcKd//tvaCzGkXAG7wvBtkXlG\nhw8o1p3Ky6WJho3HfTFQYtyWZ+T97c/knR7HxjDx6uvOUHmwob8nT5j7fQiqGD/mcw/deR8RCm4U\nRIXtfXVI1KuUx37mJekjOKLQIAEWmsd4P1vDI4zqELL3w+mdZwzzfU9I90RK+MHDdD4WX4get6cP\nhUIhkxjs8xgTWn0c8L7fh/Kl9IGPE2UymUxS2QFCPAgDkjzZgsx9nU4n5UB1u900F5VKJRk7lLIg\n1Lezs6OtrS3Nzs6mvCT6WqvV1Gw2Ux6UH1o7mZzn0NTrdTWbzZTzIZ0fZFyr1TQajVJYkGTrfr+v\n2dnZVAX83r17qWwA32F+Dw8Pk5Ld2dnRysqKvvGNb2h9fV2dTicl/DL+jY0NLS0tpRAY4yD5vVqt\nplAS97EumDs/gYCE9fX1dZVKJT158iTxzeHhYeLjw8PDzGG/3EdOFzvXpKmCYZ16uQvCugh+D6/B\nU3y33++nd87Pz6ter2tlZeVCjhC8xU4zDzXhsHlaAv2Efq6Y3chmzbrz4XKYH56T91w2NvBMQtWe\nOxlDNxiMLpfcyKHOFErf0zx8PUtT5Zw3BvgZ2rmT6degnctVrnsNKfriOWDQimuj0ShzDJYb+Hlh\nVsbhBpXTl/64DIrGhNPVZZTfE8OlkpLjxVqNG5D8XV7r0MPZlFZhDl0H+Pj9ndSWuywM53TBgKY/\nseyP0yLqmbjDMLYXhkh5zPj/q7myzLO+EUTRg+Q91FpxQ8aVKj/+mS+2SGAEQhxLRKpcCdEvR8jc\nWHJG94WIhc+z4sKPnqN7tQhRvgeNpGxNDZ7lSt8XdqzrhLCNxzz4vRiT8X1sAZemxrBvUV9cXNR4\nPE04ZsyMwenGc6NR7c0NV0c882LifM8VgiuhiAa4oI2Cxb1b7vW5cyOLuk7j8Vi7u7sqlUopURtj\nAMHmieGDwUAHBwfp0N8opClTcOPGjZRvQf89Afrg4CA9c35+Xjs7O+k4lnq9rhs3bqQ+j0Yj7e3t\nXdgNOj8/r+XlZa2srKQE7T/8wz9M4zs+Pk41oA4ODvT8+XNJ50jO4uKiCoWChsOhNjY29Hu/93uS\npO9973tqt9u6du2ahsNh5nwvvOdGo5F2dvX7/eTRbm5upnPtfDerpHSe1sLCQjrj77IdqD7vXuD0\nyZMnOjw8TP3hfMLDw0M1Go1UM4n7MZiQRfQT55E+sjtKOs/lYr2g4P0IFlCw+fl5ra2tZRTt3Nyc\narVaoj1j8YR5jDTPS5mbm9PJyUnGoJEuJuPCC87PvhvXZbQrIMpS+C4ul3kcFcM4MPSYb2/uCHpD\nHrrij3KY8XteqctoFLAraMbFNZf7nj9E3/gNneJubUnp0HGe48f8QE8vVePNUTn+93f7/Ph3XGZF\no8MjG1EuwhvIz7ixBfTQnQ/0rkeUXLdE5Mh36kd0z/vNNXJxvZ/8dqdIyj9jNfIFfO15he7MX9Ze\naGgvfuYejn8nWolu9HiiX9x9Fqtrx/c508SwEf+zeOIi8V0SUci4ceJojRtYeUiYe3A0klD9fp9g\nWhw7cDhhHEeu4vj43xPRoSmL0Q1H0AeMGEdzHN1hfrjGOykG6coNZYPiYzFSI8u/616bG9f0L9Iz\n/h0RxzyliRDzOYyhAPckfeG5F5YHZ/PjZ5AVi0VtbGyoXq+ncJ10zsP9fl+tVkuDweCCl1gsFtNZ\ndGdnZ6l6tiS9+uqrmpubS4U6MV4ODw/VbrdTeNEF040bN/T2229rcXFR9XpdhUIhKbB2u629vT3d\nu3dPMzMzevLkScaQmp+f19LSUkooBgVot9taXFzUycmJKpWKBoNBotvi4mIKG/V6Pc3MzOi1115L\n8/yjH/0onc3nybiFQiGdNUl4cG5uLhmgJO/Do76+KSIqKSFWNEcoozc/Go10eHiYDJ7hcJjmaW1t\nTaenp8mwdXSMit+sj1hKxStzgyZJ00Oi5+fnk2HrmwngVWpRYaguLi6mUC4omSssxsX8uAHmRReL\nxelZjhwuGxPNY/Ix97oc9MR4D3kyj1xHtsGL/js6KnlIiitHd4b9e6BJ/l3vT146B/c5Yu46iHWI\ngo7Iuctrn3uMEeRuRD1w1kAaY9qG089lE2kgOErufGLwwOcxhMV4oJPrC3fc3ZH2sDN0YcygcU7z\n+L6oe6VsTauo/91gxCn0cLUbUW5MsksTfvJNEcgK31R1WUpPXnshhpTHd71zEVZ1uD2iQ/GIBYdz\n4+4NRwcgKu93RoxKGGPJr7mFy8J3YeJVkn18LnjyGAMhlGcscY+jJdALlMLDA5KSp8nnvMNRN++X\n/x29HxcMbtRFFEy6KIAQxHEO3DBCkTvaFz0p/+19dqHCWHlm5C3vI8aw84H3Jc5PHCd8E2H3+Ky4\nRVk6V5CTySQhPZQHoB0fH6dilpJSjg+KMdJkNBql42Lu3LmTnnl0dKQnT56o1Wpl6NHtdtXr9VSv\n1/Xmm29qc3Mz4wnjkXW7XT18+DDVOPp/2DuTH0mzq+w/MWRmzBE51VzuqnKVwe1u2thmYQMWyF6w\nQLDA8sIbNkjwB/AHIGGJHYIVYo+EkNhhscFCCATGlsCiy91tuqvb1V1DjpEZGWMOMXyL+H43nvdU\nlBeWvq+8yCuVsiLeeN/33nPPPcNzzj33F37hF/SlL31Jq6urqYAm/dzY2FC5XNaHH36oK1euZDzF\ner2uvb091Wq1lMuEAULJBsoZuKL5xV/8RQ2HQ73//vuaTCYJ9ZHm/L2xsZEOBC4UClpfX08hQnYO\nzWYzPXv2LB1hIWVDRqVSSY1GI9EfIwNDxY341dXVZPhtbGxof38/7UwsFova2tpSpVLR+fl5JtQm\nLarST6dTHR0dpbngeCcKi+7v72fyxwjB1ev1tBsSfsMoZbedoyteniKirsViMdHfETCcB+jnaJXn\ns7COfe3BX4TvY30m593xeJxBnTxUFHP5YlglIkTugPv7YijUZQHVvCPiAQ2WKU1kho8d2YaBBdoW\nZQ9oJAaGyxdKYVxcXKT8O+dRaOe7mV3Xca8bjDhI6AqXQZ6H6boMvkA+x5AwNHR9Rx9cF2PEuXEO\nksp6ipEP5i8ada4TPUrjBhyOvId83Rn2fvJcvvPwpfMA7/u5N6QkvaBMXwZdSgs0x8NbXFuWFxTD\nNq6o47OlbK0mKZt8iRHjCjMmuNEQbjAyyak8i78RDaI/XgkY5nQhuAy1c0TGFw0CCBrBxMvQomXh\nMfdoPDfBjR1JGW9gGWLj/WEcFG1bZtz4/EkLxYYR6d4V73PjOyJA0Dl6phGJ8rg9itT77v3x+YyL\nyz2xOD62+t+5cyezHf/s7EzPnz9Xr9dTuVxWo9FIyAN5UYRoff6Y48FgoGKxqPv376dnPnnyRLu7\nuwn5Ojw8TPkH9+/f1+uvv65Go6HpdKqTk5NkLD158kQffvihnjx5onv37unLX/6y7t+/L2muEHZ2\ndlI19OPjY925c0fSPBH90aNHunXrVqrUDa0wtnK5nI6Pj7W1taVOpyNpjsbVarVkeFDQU5qfbVev\n1/Xaa6/pv//7v1UsLorZlkol3bp1K/ULw5FxsKan06n6/b46nU4mFA0iihHCfHLGIHPp/IwSGgwG\nqtfr+pVf+RU9e/ZM0hzlY47q9XpGEDuqM51OVS6X0zgwgAjDFgqFlBs2nU5VrVbVbreTse2hcnLq\n6H9Md5hOF8U13Rlgw4I7DTRQlUKhkEFAkQcYLKxDV2oeuvH6Y6Di1FviPVyjeXV/xkCfXQb4HPr2\nf5fR/P5ljuMymYDMjmkOPk8YQq7YY0jd++IoDzLQHWE3XjC24BXQQ2rJeVjbK5s7qkdfHWFx9Gw2\nW+T9scnHmzujGKM+Du+HX3OH1HUbMgqa+LqIqGHUifH+6NDmcrkMiABN6SM868aiz7ejuq4nYuN7\nf05sl+UPLttlu2yX7bJdtst22X7G9kpDe26dT6fT5MViSTu64DAcno+UtYpBlRwhoUWPDQvUkRKP\nj3v/PLbtFq9XwKbl8/lMbpJDiR5KdCg6wot+HIB7HctCTo5ERTTId6q4FwQ9HEHyQ0OlRTJvDF2B\nxkHrZYX3JL2QC+G7uE5PT3VycpLCMHhMjjbGEgJOD0cOPXbv3owjf+7x8AxHtvg9tOdIlkIheyYi\n4Q7CmiQO0xeeC/+ANEjz3KObN2+muT08PEzb/GezmdbX17W1tZWB3aE7OUXc60m3FxcXKaTV6XT0\nv//7v5IW4Ya9vT3dunVLb731lq5cuSJJCTV9++239d5772k4HGaS+3/9139dv/u7v5sKJHJky7vv\nvqu1tbWU/P2lL31Jn/nMZyRJH3zwge7fv6/BYJBy4Ci6Se5Xr9fT1atX1W63E5JTrVa1vr6eksW7\n3W5CZCaTeXXyUqmUPGmOO8nn8zo8PNTW1lYKAcQzEx0NXl9fT7xEQiweP+ERflsqlZKH7+fVgUav\nr6+r0+mo1+slnqZvIAVnZ2cpfBbLlziy6Ghwp9PJhCdB1yqVik5PT9O5gzTWCYnzhC6LxWLKr5rN\nZpkxeOh7OBxmKqmDiudyucy8QLOzs7OE8pGYzpg8ZSGivJ6rw3rx/CfQW0JunpRNLmbc3eboDQWC\nvTmCwO5EaY6Gx9MTXMYgg+AHR6T8N45Yx7AUuYWMD7lPOJmGjGJtR1SeMReL86K73hfkvW+coTnS\nwljon/fVc9Ocn3h3TIXxXe4uh52OoF2MExnMrj7WKrRhXXiIT8rmspHL5PobuRw3Ifih79DHedTR\nSqcFctfDiVyLvLCsvdLQnpQ95sMhQBSrtFAKhUIhwb8x+ZvJdXjOE3sjZOehwhhLdQibWi0uaIHE\nHZZlDBgXCFRPOEXYAxM7zIjh4VtsvU8ueDx8xBhiXSPot2wbqn+m327YOYQew6YYuzCeL9IIIUfD\nxnPRvB6SH67L2B36J0fA4WJpsUXWF2dcGNGY8t/E3/FMfgvP+UKM74jXeMfR0ZGuXr2q119/XdI8\nFDUYDPTkyROdnp5qdXU1JQejCKCNCylCT4RaXGGdnZ1pY2NDa2trev78earhJM1zj+7fv69f/dVf\n1XQ6rzSPkfX06VOdnJykPJpcLqevfvWrkqTPf/7zms1mevr0qd5++22dnZ0l46XZbOrs7EwfffSR\nfu/3fk/NZlP/8A//IEn6whe+oE6nk0JOBwcHqaZTo9HQ0dFRRily1h6J0iTck0/BtW63m8Jh7O6T\npN3d3RROKxaL6eBinjscDjUYDFIJAEnJsNnc3FQ+n1en00nV2TG6qMWFYvN16gqnXq9nQnT0OeZn\n8F25XM7MG8fHjMfjlEt1cXGhg4ODZEg1Gg3NZrPEL+7soEhWVlaSjIqGg4fEWB+Mp1KpJFlCvpKf\n6chZeYyH44I8/OJj9PUXw0HIUd9F7E7WaDRK8oAdrNxHf2NYhbIVvBfjnfuiLkCeUHrCFbfLNpdR\n0Wnh2fl8PmOc4TRLC8cHg4ISIR4SpJGDhkPmNMHo8qRv+IR3oadimskyYzOmNcTf+mefP5dr5IBJ\n2dQFaEZf/HeM4fT0NPGx76Jz+eppLM67MYyMUQXPY6BBE59znBjo4ZXj+T3Nx+o0cgPvZe2VGFJu\nKPhxEK7Io3XqExCRqlxucVaZx+F5xstivW5tR0TDUQxPgKVv0cCSlLGafXcE1zxnyN/HgliWz8PC\n5bu4CLgvjg/r2xnSx+zjcXQOertgdAZ3uvNbN7b4PcIyGoLQ5PT0NOXJ+JlgESX0RYMQdsOV/0cv\n0xEpWkwe5J+PNyKf0ViibkmM58Ob5OJ87nOfyxSlfP/991UsFlWv15Mz4CgnPFAozI/RQNGurq6m\nBNyoTDY3N1UozI8pYVfXa6+9JmmOgF1cXOjjjz/Ww4cPdXp6qs3NTUlzhGhlZUX7+/t688039cYb\nbyQj4/Hjx/rud7+r6XSqO/83lwuaPn36VLdu3dI3v/lN7e/v6x//8R/1jW98Q9LcsGm323rw4IGe\nPXuWMU6fPn2aFMZsNlO3202Gy7Vr1zQajdTpdFSr1bS5uZnuY+MGByxTiwq6oPAprTAej9ORLRsb\nG1pZWdHBwUE6JNllTqPRyORCuWEH7yJzlhlCppsiAAAgAElEQVRHp6enaWesND+SZjgcZoxCFJ8n\nN5NT6CU/JKXiorncoihhq9VKNAPZJM9NWmz9hj94JgdWk7wPcg3dcIBAIDCUY86Rrx/y8OBfrnkC\nNDxMf1nzvtNvMsluGuA4Igzo2Sx7cDnyHX7yYr8gaM4T0DTumHaEyHWL6wuXc1EOkd8GasEuRu7D\nkJhOF6VFuObRhOiUg+6DfjvKRV4VPOtOcsw34tnQG7kG/VyGOV19jMhT14duaKFH/H7oTdFc1z88\nE7nMvPuGERBR74e3QqGQduBGlM/H7GgVQAY8Fw0kDDDXwe4sxSgI8xeNTm+vxJDyRefGEwsOIsRE\nSAScewMOm7KwIQDKtVAoZGpbSItznJZNoDMU/YzJ3whYfidlk7dBFqLydsjVkRVHY3yxwfguDJyJ\nQKVisjj9w1pflnTpXpsLTf+Nf/YxuDHjY0LYMfaYfI1wyOVyGU8c796NMd7HYojoEYLU++EGpRtW\nbvDCY7lc7oWKu8wdxlUut9ghyli8rAY0Oj091WAw0LVr13Tjxg0NBgO9/fbbie4bGxtpmz50cDQW\nbw5aojDhXe8Hc12r1fTJJ5/o/Pxct2/fVrVaTdd2dnb08OFDTSYTXb9+XaVSKSnojz/+WDdu3NDX\nvvY1ra2t6dGjR/rP//zPNI5r166p1WolhUGI7jd+4zf0rW99S3//93+v7373u/qTP/kT7e7uSpL+\n53/+R7/1W7+ld955J6Ed+/v7iRd7vV5ao71eL5UpYIciCE+3281URB+NRnr27JmeP3+eCoLCa5xD\nRkI1nq80RwQ3Nzd1dHSUwtwo2lqtpp2dHTUajYSaOW+AjDC/8CL1jGazWapF5UphMBikAqNukGO0\n+G4q5p4xsJbX1tZSiI534Siys0uaG1nVajUpCUcBMJ4o0YCShm7w/draWjJEJaVaZMhi53NQEyqF\nQyuvTZbL5ZJR5sVqmRsfj69TKuXDb27YQB8pu8MQtAqZ6SHBqAAdxcUQc+XqSKEjPNHpXCYPJWXk\nKCEsGmvOFX5Ev/nnCJCXoaBPzGGlUkl99eZhQeYQw8+NTcaGDHLH0ekWoy3QOqblsBbhAV8zfM89\nvhOU+fRd6ZGu3Of6D7p4H/xengk/eckU6OHz7WOHHu5c857YP2+vLEcKoiwLq0WDgYFAvBgy4nvy\nVuKxF75N0mE/mD8iHR4qwrr1PANX2tGi90XonoiP0WP9/kynhU+ab8V2rwkjjlwW6Me1WD/HFx6M\n6WHRZdtdY3/IEfJxOZ1h3rgwHJGjyKDniTCH0cPA21kG3XrozpWTt2VehKN9IJnRMOL50+ni0M+4\nIJ3X1tbWdOvWLa2ururRo0fa39/PFGVkQeN1RzqzgKMB6AYkixtl9eTJExUKBX3uc59ToVDQ3t6e\nnj59mvrIrrZcLqf9/f2kMH/t135Nt2/f1s7Ojr73ve9pOp2m/CnCUCjMbrerb33rW5Kk3/7t39af\n/dmf6d///d/1V3/1V/r444/1N3/zN5KkP/iDP9CjR480HA61sbGhZ8+epRAV5R7y+bwGg4FarVai\nwf7+fkIk9/f3dXR0lJn7Xq+XQngHBwfpGjTAYMD5cAMUob++vq5+v58MtGazqZOTkwza4sYLeVCg\nEq6EqUXDuue+2Wym27dvq9frqdvtpp190jxnZjAYaDweJ2QBg4/SBuwgdKeH/8OH7kB2u12trKyo\n1WqpXC6nqunch+HjRUzhaVc+VL1n7B5W8xpHKPLodNJXeHOZg0LlfMK4HrpHTniBV+aYtYK+cHnq\naFihUMgYWYwTGrgSZh25AcUzI52Q/VK2sCS0c+cUA8KNQP4SMuU3NJ7hzqwbM/QD3oi7z0A6vV/M\nHX1mbnxsjrAs251MP9zIis4ehjbXYmjS6Yh+ZSy+0xP5zbuiDOc7l+PIRw8lRpmOzPR8Yzcsya1y\nnec6OxpuPifL2isL7TEZLGoEYYxRSwvhxtlfJD1KWebHy3KPyT0c95JcIUe0xicFyxwlxLlKwPoR\nDnYl6KhaDAM6WgVKgzfoC4p+OJ2cjv7u+NcZwuF13unM6McOxMR1F2DRA4ihMxSPw9vMDQYL8+9j\n8WNS/LkonmXeAM9yGNdDdD5OFxhOP/rM4naaYaS6t+ILdDweJ8V9/fp1HRwc6OHDhyoUCtrY2Ejo\nEzTF0Ed5wFN4UCAAzvvkRfFdo9FI5QRarZZu3ryp4+Njvf/++8rlckmx8//pdKrd3V1tbGyksF+x\nWNQ///M/q9fraWNjI+M4tFqtlM8wnU71R3/0R3rw4IEk6Y//+I/1ox/9SH/xF3+hZ8+e6a//+q/1\n+7//+5KkDz/8UO+9956+8pWv6O2339bW1lYydp4/f65Go5EEHLlA0tzIGg6H6YgVVxgkom5sbGhj\nY0P5fD55+L1eT+vr67p7926iLUYxvEgpA451oQDqeDzW3bt3M0fgYNhgTLhj4or24uIi5SvhKdPX\nfr+fkC/QHknp2e5A+FrL5/NJbrkAZ7s9hokbMl6Elf67AYKhByLtRgNhSWlxdBPjm06nKfQYz7v0\ndRBReBQhChX6QEOQiIh+gJaRguGKD345OztTv9/PGEu+5jGaoTdH/0A3KZuT69EEeIXmBkFUnBhC\nUbG7XIFHvHAqSCn9cb3DPw+1SXO5F+WNn20XoyLQmbF6eM7LSsBv9N91QnRSlzn76F9Pcse4ch51\n3c24HLSQFuUu6K+/zw1PnAgaBqvrX+83ujXqY5A5N6Kcdq7vI2DhdFrWLssfXLbLdtku22W7bJft\nsv2M7ZUgUliCbp3iNYFouAWI93F+fp62N2Kd492BuPhZbngwy5IJHdqN+TVYrnjPHtfmXVjYETIF\nPWMc7iFLCyTK0SqPOwOn+xjc+vZ3OgoTz9SCbn4gaYSjl4WzeG5EBX2M3tyL4V63+B2OxrMA/o9J\nooRo4tEx7lG4Z+zXPCwW++noEvSO6JXTlOeBTrjXzfyTwEyS8s7Ojvb29jLFMz1/rFCY747ifuaE\n/uAF4ZVznyNlIBR+9t2jR490cnKiarWaQSXW19dTmOn+/fuq1WqpCvfTp0/VaDQySe940OThNBoN\nfeMb31CpVNKf/umfSpqXOPj2t7+t4XCoP//zP9c3v/nNRLfvfe97+trXvqZ33nlH5+fnunXrVkKA\nHKE8Pz/X48ePE6pGAvx4PE67AkHqKEwLD5OHxNi73a5+8pOfaHt7OyGZ9Ofk5CQV+Mzl5gnc5GV5\nflIul1Oj0UgIzebmpkajUUKcCU1ISrk8vuPHk61Z05RXIJ+L9z958iSFOZFfoJTINZdX0gKVIiwM\nD29tbaVjacitol9eSBNe8iTtQqHwwi5AGmOH7r6OkDOeb0VzWRFRVc9dk5RBeZERyE5kIHRzOeso\nPsiCI3ye+wId8/l8Ji3DUzG8f3x22kfaxFQNv854GQPP9Dyf09PTlNrA3PNe0FRkgqPkUnYjE9f5\nLSFhxgWa4/98bqABOiGmpiwLZblczOfzmXQLDkFnvTqaya5UEGKQbqc7esxRLr4HQZUWu279uiNv\nfg863PnCx4T+8eiVz3HcrPDT8qOkV2RIufJlkMPhME0GDBljtyROrq2tZeKzENNL1fu7mHQ3LNxQ\nwGjysFWcGD6fnp6mEIxPFs1/G5nbf+Phqphs579FUHreSIz3swBdSEh6IW/AjUdpkdPEAqL5+COM\nDa15tzMqxgl0cUOCcfpvvDHv8fwvp+WyPDeO2BgOh5n8khj/j2OAVvCiHx5LGJbvfacUeRMbGxtq\nNBop2XowGKjRaKT+YxQ6HTmjjbmPOXGE+1yIorhIZL97927amfbs2TOVy+VkvG1tbaWSCvzmzTff\n1Onpqd55553Ulxs3bqR+oqD5PBgMtL29rTfffFOdTkff+c539MEHH0iS/vAP/1ArKyv69re/rS98\n4Qu6deuW/u7v/k6S9JWvfEW7u7t6/PixfvM3f1NHR0fp3D/4YDwe691339XBwUHKycKIWFlZUbfb\nTYnqtEqlkpKcY2i+VCqp2+2q1+vp+vXrKdfIebHb7SaDgOT3arWaDEmMNuQC4eVer6dKpaJarZaZ\n07W1NY1GIx0fH2cMm36/r9lsfuTH2dmZnjx5koR0uVzW9va2Pv/5z2tnZ0fD4TDlJRGy4kxJ8p2k\nxWn1s9ksGZB+Dh/H/GCYMIbJZL5V3r9jnITJ4H3Pg2Ke2CEa1y5yK4aHoI3vMvPrxWJRlUolY9Dx\nbIxjHEZPZu/3+8mwi3k+KF3yc8iTYhzxnLooT1z2I08wSJCTHpr3vFf0k+8ixxDwv9yHwR5DRO4I\ne4gLesKXXuaBFhV+TMXAeVwWhowy3Pvq+UfRAPH8KE/LYcee3xv1Hc/w0C0lLxyYcJ2PY4EO5zmk\nLPBstyegacy387HDO17Z3Q3ISNNlhmVsr2zXnpStv+ELJSZ5xTONJGWYOE4awrRer6d8D1pMLoOB\nx+NxJmeFiUBxxnipnx0U4/7RG+A7FDv3eZ4AAsHf7+9zD8SvwdC5XC4lwtKcGZbllNEiquQGD/31\nEhAxjyIKBt7jVj39Jt7vi8YRMBdk9CUaQd5XDKx4FIYbmC4EoEukQ8yLw+tC2UiLwohXrlxRo9HQ\nwcFBJqeBfBMEsOemMEaMJTeynD88l4VxdLtd3b17V9euXdN7772XQXNASTBGHj16JGm+Vl5//XXt\n7u5qd3c3UyQQ5AoBPxqNUiL2nTt39ODBA/X7ff3Hf/yHnj17pt/5nd9J1/7yL/9S169f15tvvqnv\nfOc7+tznPpf6//DhQ73xxhs6OTnRzs5O8iA5Eqjdbms2mydls0ZJRMaAmE6nqSAnhTEvLi7UarW0\ntraWntntdjUej1Wr1dTv99Vut9VutxMK5Nv0e72eNjc3kzGC0ba9va3pdJpKL0jz3X7Xrl1L/cbQ\nZC7I4dra2tLe3l7GUD84OEhn+l25ciXN4fvvv68PPvhADx480L1793R2dpYMnM3NTe3u7qbSDhgT\n9NNRCd8Qsrq6mg5HrlQqGfQon8+ng4pBqd2o4zid4+NjnZ2dZersYEBFNNqT3vk/yJb0Yg0eR5ZY\nRy4naOS2cY8jwOTMYpThREtzxxsjwfMNpey5psj1iLogR1zWggxx3RVxjAa4HnG6k4PFvHFckiPL\nnm9K8j2ffe6X0Z3vkNluUDl9mUvmE1SOunTuTLreoUXD09HSaEiR2+g6ItbfQh56Aj/vH4/HydCO\neg9ZHDdNoH+hgzvc3D+dTjMlStBryN9icVH6g+8w6N0Ai1GpZe2Vlj+I4RWuRUXnqAGLzWvwuAUu\nLWpTsf2XZ7higzFhRkdPllm3fOdIFCG4yMA8fxk0SovhQv/snoB7ThgmnlQak/d8sfEXYzPC1u41\nLoO8uceNHlAh3w0TaUMfPawHfV1A0JgHr3zuwg2FT58iDX1B+wJ2YeOhVH7rnlNMvuSze+ydTidV\n6X769GnGaEeQubfufY0GvNPGw7IYDyAW0+lUn/3sZ1UsFvWjH/1I0+lUzWYzzTe0nkwmevz4cUIz\n3nrrLb333nvq9/spMdlDJqur8wN4B4OBhsNhMoi2t7fVbrf16NEj7e3t6Ytf/KLeeOMNSdLf/u3f\nam1tTV//+tf1gx/8QJubmwlZ+qd/+ifdu3dPzWZTDx8+TMpaUjIcqMFFQry0qMItKRUXZQx4rCAk\nHhIC3cNgpjwA1eLxkCn7wMHP0hyRGg6HaSdlt9tN7yyXyzo+PtbVq1d1eHiok5OTNEaQO0K+t2/f\nTkbteDw/Y293d1ez2UytViuhg/fv31er1dInn3yijz76SJ/61KdSSLjb7WpzczOz08/TFigLUijM\na10x/nK5rFarlRwZ5328fJwLr7JObS2cQd/peHp6mupcRQTEDauIRjlf41h6oV5kJE4U5+5J86Kj\n7BJlDL7rmjA6ss+RWl/7ntYA8oVeYFMB64n+8zvWBfzkCJOPzZvLa5ddpJa4MTgcDtVsNl9A/3Gc\n6JPTOK5VeIwxOMpLf6IRQbiU58R5ZAwxnOYOfkykx9gEZaQ/k8n8ZAJQ0KjPXb5GwGFlZXHWbTSy\n3CDEWGbeWPfuJMf3+Q5RjHtHv1zPoNfdLuEZUWbH9soqm0dL0hX9spikI0Su6JzY0ROKjOKK0xEC\nD7FIi91Cjib5IvVn0C9+xyQsM46WhQ5p7hksM3r4PsKV/r2jeuQp+S4VaOL3+dg9bOIonOcQeUgs\nMq1/Zp6iUcg/wmfQ2+c0jh20TVKGmemLe2a+gOGxlwl+DKzoeUbPmRDVzZs3Va1W9cEHH6R3RQ+K\nRUeffEwYvRFVBfGDn3q9XurPF7/4Rc1mM73zzjva2trKIBSOmh0dHanVaqVK6j/84Q/V7XaT8mW3\nmzQvHYASPT4+1oMHDxKS0+/39fjxY3U6HX3qU5/SV7/6Vf3Lv/yLpHmpgq9+9aupQvqDBw/03e9+\nV9I8J+vq1av66KOPMiUBpDk6BGKCcQA6JM2VKYLSqxSPx2Ntb2+nWkJuEJDDw/b9brer0WiUlDCo\nFoaJO0r9fl+NRkPD4VCtViv1T1oYS4TN2FUoKRlm7NDL5Ra7JPFor1+/rk6nk3LmJOnq1asqFou6\nevWqjo+PtbOzk/qC3Nrc3NT29rZ2dnYydANtpWYUeV4gV9DLHQjqDFG5HVROUqasBQg265ADo5FV\njjCwnjwC4IYM9HHD3u9HniBzuIahkM/nU/6YI8cg3yC4zD8GCv33d6EDYkRAenF3lhtuoGPIYnda\nI8LMe7gPQ593sLbhFxwC1xcun5BD/szZbH6qA31y586d+dhXnsc15sr5zcfE/GPwujykP+gk3xEX\n5eR4PFan01G1Ws2EmT186cgZzwIJch3kBZZdn8Zn4rC6QQSvcJ/vlnd+nkwmyThznQa/RNQxpqN4\ne2XlDyCCw5yxPIDnO0Cs6XSa8iUkJRgVD8ULcEmLMvFskY6Wsof3YBp+70iHM4ujDTG2CqNGQ9DD\nhT7JjNUNymhgRu/DFzPCC7pwzfMFYojN+wSNovfinmVMAoyfPV/Ncx+cBj5u5t29S5KcHZWSlHJS\nMAhdkEe0zY3VZWGz+B1CPdLbjUEQA2leGuD999/P5DxEg9iFAzShRZ6Iwh0Pazwe60tf+pKkeS7M\nkydPtLW1lcJ4sTLwYDBQtVrVgwcP9MMf/lCSUuhqNpsl4UZ+DTRrt9u6d++eSqVS2jp+cnKi4XCo\ncrmsr3/96/rggw/03nvvSZrXn5pOp3r27Jm+/vWv6/vf/34aAxXNz8/Pk3HBPHEGHWMcDocJrSHJ\nmi3VIEXS4igXkuFdkeEtr66uamtrS5VKRfv7+5nt7lTNxmjHWPTjSjDGIk2RB2tra8kIqdfrySBY\nXV1NOVf09fDwUO12W81mU81mM4VLyaeqVqup6CjPLJfLKhQKOjk5Ub1e12c+85lkZBJyrNfrqYK5\nG2Ae/nWnzfO7CP8hW6lnlc/Pa3p5xXzyqtzQcCSBNQOy5A6WK0hksstMR/ZdTrhMZDweappOp+lY\nFyrHS9lcUfghIite/HaZE8tadz3jjpAbRJ5CAY2cNm4cehgOQ5CNJg4SOAoS9QblVTwVJKJVjq7x\nGZryvRsb3lx/xc0DzI8bL9CYvkqLvDsMOXdefW6YR57reh4dwxp1MIUwOmkTPrf8defZxxYNVmlx\nQoqHdmkOHCwDCCL9Yrssf3DZLttlu2yX7bJdtsv2M7ZXVtl8GWrjiINbgVTFBaJ3OJaQAYnP7pnF\nEJtbqR4Lx2r30Foul8skn9PcYvZ3MC7QMjwNt3KxxuO4sY6BlB1B8uQ3fxcNZATvI3p6NJ4f7/dd\nHQ638z7y0TxEGUNmEblb5mW6hxI9JLy22WyWPE6Hos/PzzUYDDIesT8rzoc394aW3edQcPxMlW4K\nWT558iSTs+C8Bv1JwPV58PHQV/f2oif8xS9+MXOcy9bWVvJw3aMDEi+Xy7p586Z+/OMfp/fdvHlT\nhUIhJXrX6/V0rVKp6OTkRJubm6rVahoOh2lMnU5HpVJJv/zLv6xOp6Pvfe97mfypf/3Xf9WXv/xl\nffLJJzo4OEjI2f7+fgoXMW+OVnCeGuE5Txrm+Bj6QLL3ZDLRcDhULpdLOUfOh6xfkJ5isZjQHBLq\nz87O0tE5eN4cIAxd2BXGc0GqSqWS1tbW0lxMJpPMkS6UmJDmSepbW1sqlUpqt9taXV3NHD4MsrCy\nsqJyuZzkFzvTrl+/rna7rcPDQ92+fVvSHAE9OTlJfEGIS1qUqWAHs6+vUqmUDnFutVoajUYJjRuN\nRjo4OEi7pOE/SWnrOghSDPGQq+K5avGsPZAFR4DjX/f2nffz+XwqeAwP+fu9ErWXGYj5jvAFqFQM\n7aND6AutUChk5sZD9/TH5aj3k7BgRHeQ2WxUqFarS1F7T0+QssnmHh3gt9AceevrLSLvUfbGtJoY\nxiPfzHW0R2u43xP8QdCYT57tRTddVvp9Ple+KSqmMUQd5FEBD+F6mDtGB3yXYNSPjjJ6WNT56mXt\nlZU/gDBO3Bh2g6h+ICeJyIR+/MDbmD8VQ3A82/sQ4VZvL1O8LNxlzEainYcguD/C5MsYnmd6EqFD\nzX4tQpH0j3e4Eo+hPR87zO+5O24oOUPD7AguLzHgRumyBUVfCYH63DBXlUolIzDJn+K5KEDnGQwf\nFrP3xQ0pp7/f799Np/NkShKU7927pydPnqSx+1lvMf7uysWhblrcAeSKhVyKN998U+fn53r33Xcl\nzUNG5+fnScE5bYC2b926pQ8//FDn5+cphwbnA+PDw2nklty/f18ffPCB1tfXdXR0lPp55coV1Wo1\n/du//ZsqlYo+/elPS5IePnyou3fvajKZ6Mc//nHaoSdJ7XZb5XJZs9niuAhXeoTa6DMHVhNGG41G\nGgwGunPnTqakwMXFhba3t9OOLYTb4eGhisViynUqFAqZ0gmNRiPRmWNYaLPZ/EBfHB/CB8wda2w0\nGqlarWaMEDe0kT3S/NBmwqmNRkOHh4eJNzwHaDqdqlKppDVVrVZTWPXevXvqdDqJptVqVdevX8/0\nx3NvCoVC4lMMTuaekLi0yH2Cv90YdMeA61F2Od3cGXTjhdCay0xf36w/N5r8uW4MeZ4Kn70MgrQI\n7eFM+Dp0w4O+esiXtYeydHmATGNMbgTCq/CwyxqMqWVhTYwCjC36wkYAdxBdTyH7kc2u5H1Ti4+b\nv8ucTil7ogXzj0GELEWu+fzTb88V4n2UF4I2uVwuc1KAO9yeG+ty2R1JrvkufU89ic6pAwhuC6DX\nYx6w/3Vn1sPB6AGnWdyJmKHrS6/8P2wwckQsmCQG4szIonHjQXpx6340ijze7sYFBoDXIHFjhAXF\nPW61x91+rhCZnKi0Yx5PFCbu/TjK4wvK+0FjXCgwT5zkOh4L+QL+DsYRc4ig9TLjww1UN+IcTfO8\nKf76HPr4PTmQPjoiR/9Qeo4IuZBywcH7lnktywS58wwIzb179/T8+fOk6La2thKdoreKkPBFHnPf\naFEAcqbc66+/rkKhoB/84AdJebvh6YUqec7du3d1eHio8XismzdvpmeSE4ji8x1fBwcHeuONN1Ji\nOFvppbmCvnLlih4+fKher6e33noroS6TyUQ3btzQw4cP9dprr+n09DQZYHiWKysrKadtmSeHseve\nHjzMQcAIPpQgOUTNZjMhWSAXrLVicX4+niMN1J2aTCapxAJ8ShK285GkVDcKw2o0GqX/s7vMnTfy\nwVqtlnq9ng4PD7WysqKNjY00XycnJyoW5/WyUO6u2JvNZupLvV5PtCF513cAxvXN9m2vBcZ2f0/M\ndqXA4dmerM77kBUYf4668Bd55LuzoKPLRZrnvsRcoLgjzWU0vH92dqbBYJCS0eGbXC6X+uD9cBSK\nZ7BmPB+Wa8wvypq/GO8+BpwEd8ygE0ZCPAbGnWaQWcbqDpfzpBs60MONE9/p7JuJGIcbi06faIDF\nXXkuN7z5OpXmfOhFfHluRMdcLtMvT4BnLPCeG98+p8yH85rTBqfBDVmPPjEGR8B8PP77ZXrWc+CW\ntVeGSEVvgAXkW0edGV3hRyKjLCCs38dnRzWkxena7k24QeCWa9z26My3bBeVlD1lm7G4Be5j94Xp\nwp37CFPAPP5MlDfM78wArdzDdCUMYhQRK198LlwYFwvXUQenB3PrNM3n52UrKpVK8phdgHk9Ga+H\nxftZIA7/updTrVYzULPvPooo309DLkejkVZWVnT//n3t7u7q8PAwGTXQHW/YFx+Cwo1Fn2P/nRtY\n0jw5eHt7W+vr6/qv//ovbW5upi3ppVLphSR7DJvbt2/r9PRU+/v7KREaYYvRCXKyurqaDjS+cuVK\nQoCuX7+uZ8+epZDYm2++qbOzMx0fH+v69etqNpv6/ve/n64dHByoXq+rUCgkowEa0uKacUMfGmAA\nsOWeMFi/33/hvEBq7TjcXiqVUlFK+Mnf4d5sqVTKOFigVawrL2EC33O/8wbPZoeRG4icrYhjcXJy\nksbhSeoxZOGOXj6fz5SpINyJYRQNF+hHKBKaEkpl5yFrh2u9Xi8ZZigx+uBJ5DH04fNHX52/HVXH\nOZWyhy+jbL1+IPxBzS760+l00tgpdBpDcvTLjRU3eHh3RDPcEXeZ7YiYh39Ys/TXHUhkgRcVdsMN\nA4vnxffR2OggLeQo7/czYx3hczr47js3FrlO8wR8N5iQrchtvyfSIqLxrgscIEGHOGrkKB86Cx0T\n3+lRKnfM/LcYcjRkL7rRjTPGEcONcWefyzN3NF7WXokh5bUcfCHCrExU9PidgSEwQoJ7MVKkrDED\n8Vwo+kKIMK4rSW/AxChmRzdAYpYJWpjbJ9PREzfc/L0oAIQ0O7ekbL4Mz6E5I/g1/42UPc7BLXdn\nVheqvgjZeh9RN1AFjDXeG70NLHyUi0PKTgMgfebPCwhGQy1CvPSDvkM3N8jds5nNZvr0pz+tw8ND\n7e/vp5pN3BcNYV9cLuigqxfCg095N7h5EYgAACAASURBVAJ1Npvp/v37+vDDDzNFJ6UFbC5lC/FJ\n87DYxx9/nN5BCQBJaafQxsaGisWi2u12osPGxobeeecdffrTn9bR0ZGePn2awnflclnvvvuu6vW6\nbt26lXK0GNdgMFChUNDx8XEmhOEoXS6XyxTQY35Resw1fW61WklJ+nE1w+FQa2trCVFgVx80A1Fp\nNpsql8sZR4bPGOZxe/x0Ok15Y27w0r/JZFHh28Pl5Hyw9v1929vbOjk5SXPLfZubmzo6Okr3E7Zg\nDuEVvHx4Gjq1Wq1kFDgPu7Ls9XqZHLBGo6GjoyO12+2UIyYplT5ANnqxSj+6hHxTmq8XZFUMdUwm\nk4yj5buAMZDimnGEl3XiyIs7Tr57ixAlhqTLU69L5DmW3hwVZ57YNey5tl4MmvfyrmhE0hgn1wjf\nebqCpFTSwvWZI4AvMxzQhS4j/fnRWYPHoY3f4wYvOtL1pNPL3+3hO0/vIBfR14zfE1EgRyrd8Y7o\nk/OTG2mML6YR4GC7bIeOrmNdr7GmPELi9/209koMKZhw2YQxaIQZjQlwr0b66VYiSssZMoYb6EdM\njsQTjedROVEjyuXP82R2vnfDMYYgY/NFkM8vtoL6AsLLZcGzCBi709SVF/1B0CBsXIBH9MuFoguh\naGTF0KnDq87sLgQiIuCookOxCGoXfGw2YDyM2yvfYlBFqNi3NNNviizu7e2lc9pi0iH/j4YEig6e\nc6FRqVQS7fC8UQpvvfWWTk5OdHJyoq2trWQk8B4EXavV0sXFhe7cuZPmFRqBQvgRIlQ7n0zmSdv3\n7t2TNA/tEQra2dlJuTjSPNfn7OxMt2/f1mw20+HhoX7pl35J0hwhwHhAWbrgo5SAI5LSXGHMZrOE\nrk2n0xRKrNVqWltb0+7ubtrm7gUpye0CpeQaBoejDh72nc1m6XiZwWCgs7OzDGKzurqaSkdg4DCP\nlUolHaPi4XI3yN3JcF5cX19Xv99PeUjSHGVbX1/XYDBIxSF97YNMQBuUHc/wyt/OfygFEDaMTLz7\narWqvb09HR0dJb7Y2NhIhUORCY7EYwCyNjyMzrPpI8Yt9zImHB+XffSZe11JTSaTdMTOZLKowo1S\nIxyGUc08YQT6OqOvhHXdEeU+D295or4jI8gLl3/+O6cJfXUEKeonrjs/zWaLcDbPdic56hVH1Tz6\nAO94OG1Z/7zvL4tGuA70cjRuzDnYIS3yWD0czDUHMBiDR1uYf3+u98V1lDd4BX0RK9vzzmhfeFgx\njpfv/cgb6B2d+9guyx9ctst22S7bZbtsl+2y/YztlVU2jxCee0ae8ChlD98FtoyICxatW+7ErTmC\nw98LUuHImG//pzmUzX1StsK2X4vwqo8Ja53+R2iWPvlz/C/hBqzpGJaI4/PcIIctoQ395HvfXovn\nEMNi9NNDavwmhrYcoSKUhyflYUlCL+4BuKfg+R2eqOyJxqB9HtbFc4vevHs/oBnb29vpHQcHB0u3\n8ft5WXHuCWeBmhGmdJrwDEp13Lp1S9Icefjoo4/UarWSt+xhT1AuttQTbvzJT36icrmcEIBGo5HJ\nk9ja2tJgMND+/r7u3r2b+vrJJ5/ozp07yft68OBB4rPHjx9re3tbtVpNH374oT71qU+lZ3Y6nZSz\nE7fcxzCuo4NsCyfk6/Sr1+vq9XrK5/MpCdoriROWAXXx9QBKRYmDZWdNeuVveN5zU0DWfF2Anjlq\nwTvdo/UxwrP5/GILP3PY7XZTkjo08PCG78bz0hkXFxfp3fBWDLGAHvmZYp4bsrW1pclkcXjv0dFR\nJuxdq9WSbGR8oC9OTx+foz2OcEgvljOAN1xOsP59nnwnLs9knhwVhjfOz89TtCDKZvpWqVQyPMl9\ny/oIvaUsEsh1wssuXz3E5DlMzoPwu8u0uGvP86J4vufUkccHDVyuQEvnN8aFXPcQGjohFsWkr85f\nThufb/76fVHWRxntNHfd47zq+sYjHvyW98XcMH+mp0wwdp8n15mOZMYcLQ8zgnzH/ERvr8SQ8h1d\ncRFIC+jSt8y74OE7aWHoOKzq0CawJzF6h04hJIvCDRImF8HihoQrN4dG3QCKCtQhzBgW8LFFmFrK\nVgYmLwQ6OoTpBp/XQpEWoQeHpRmz00GaQ/OEA3kXjXF435ctcIzYmOdGDoX30YUJRkSEyLnmAgD6\nR6bntx66cKPWoenJZJIOuJWkZ8+eZcJGkUfjllzfYcUYWegxGZd+wVMksR8dHWXqKPEbxkjezunp\nqba2ttKRNZyLRm0iwlTSPAx4dHSUduzV63X95Cc/kTRPtkbJrKysqFaraXd3N/HU5uamRqORzs7O\ntLW1lZQXOSrQLcLt0MHHyBwyFxgSJNNj5PgxICh96j35jjP+3+v10vEZGC4YFdJi7RNK8zwpDHKc\nLMoy0Nh55ZtcmFsfpysMd+JwNLiv0Wik0BUHX8d1yvMuLi4y9Z2oiB0ru0fnLobYp9Oper2eisWi\ntre3085TjrdhPUAH5gLF48+D9h5Cmk6nGUOD8UYFSH+QNR6Sgaaz2SwdsoyM452e9xjrA/EvGgcx\nvQF6QmdoGEO2LwurOU0Zq8tL1gP3xfvduWYt8EyqydN884anH/h46KOHTz3XCefAU1RiiobLQ5dR\nUT/Q+J6wrBsd5Hl5LhLXyAEkdcT1NHzkaQIxYR3+WOYIw3PeF9dlnvLB+2JOqxvfDqp4SkVMi1nW\nXllBTppPJpPOAD2BDSJwDWF7cnKScitoy4jlRI8txpZ9YlDIEUFzgUJjDJ5UySJmXPw/9sMXZrSw\nfSzkSkmLAzLph48Pj8obAkpa1KGBrhg+Uva0dvdupGws2WlHczo5muRekRt+cVwkkMaF43zCczyP\niMKEPj6nSUycxfjGsGMbvyfLgti4gRPzASIqw/MRLNzruV6j0Ug3b95M4z8+Pk5b9xmrJyMjgNip\ntrOzk/oKz7I13Hdjce3q1avqdrsZgyiXy6Xjb4bDYRp/o9HQ6uqqRqORms1mJvkXI8gNKDd0oZuj\neIzBd8JGpKNUKiX+j3mTXD87O0tFKOlLsTgvicF9nuCNAHdHweeH30JrLy3hAtw9feiKQeG86Dtk\nURaeqLy2tpbZYUfyMwYt8gyeoy8Y46PRKPGC96VQKGROrpfmMhEniPIJbBhot9tJRkUv3BW2O45c\nQyYPBoNk4CPfQP5YN45u0Ffe58aBK074wx0txo+hRJ6bNF9vw+Ew7fzkWezYov+e78J8npycpHw+\nrvmuO+acvjAud64iKs+8R+TCjVMvRcH7iDQgk2Jz3cD7MKIwUKKRwDhfZtS50euyHP70fDTGH9He\nSBvWPQgaY+Qd0MF1lDs/HulgvplLd2Kcv3wOpMWOc1+Xy5x+tyfoCzTxCBfXvIbdsvZKQ3tu6cXd\nBK6EmAg8TJ/g4XCYMXQgspQtyhhRHkcOHE7m/dy/bCJZ8PxzFIb+LjOy/H73ylwASNmKt7wbpnHF\nxbsi4sL30WB1SN3hVA8n0D83pnxuXDHRNz+VO9IuevHU6IlG1mg0SoUKfRzMk/fXkTNpAcV7UiJC\n3oWW0wIkBGHMOyPKiHJgLugTgtHnyeuXOALDc+k7ioqE6/X19QzShUEiLUI6w+EwheMcquaZ0+k0\nHXIrzROca7WaxuN5HZ79/f2MwphMJnr8+LE++9nPZvh0ZWVFpVJJ+/v7L6CR7D6KiCT8Np1Ok8Bx\nA9SdDmjjYV0PZTnPjsdjVSqVZMBGfmJdIDh9nUIznCw3bHgnOykdJSA04LtI4e+oXFxuuFCPaxEn\nhT4Ui8VUGoECq6urqzo4OFCz2cwkW/NsP/8PmoIsxdpc1WpV+/v7ms1mKfSLAVIqlXRwcJAcRJeN\njrS6rJAWu+TYVYkh62iOe/XR4KbP/jze5aGfaNjRT1BZaMM7MKCiQeTP9rICIJw44258L5PRjuzT\nR+R/lK9uGMT5j6g/9zha5rqEviOjXCYh1/ykj9hcTi4zxDwFwOcdvneeg97IIubI5wnDqlgsvuDU\nevgbow8a8xzGG41vB1aWNfiA++KcuFHtCJ3rWH7rPOm8T799TLG9EkMKgRhDGFL2AE4fjCskFoc0\nF+7Ao0yGGzYQE4+I5kwTIUf3ZD1u6/dy3YVDRE5cKLjRxjtdCHOPf/b3oTSXTaaH0HyxuTCEYXkH\nIRpXdm4EITToq3sN0NIFJ9d8oU0mLx5QyTEi5MPQN1AB3hm9OgS3P8/r9rA4oA87thxxcwMEmuLR\neR4MNMUT8flw/nOkwo0anzdoQ60jDH88aUnp2AjWA8KB/pRKpRSe6/V66RBlpwUoE6hTPp9Xt9vV\n1taWLi4u1Ol0tL6+LmluEHS73bQGOaJFkprNptrtdjoepd1uL0UjoSW0ccdhNptljDDPYcPo9XxE\n+NBzqWgYp74e+X46XRwPE50dr6lVr9cz3j4CFT731AGQL5TCbDbLbJ3HSKTPUUn4Goc28F+5XE5H\nwmDYOILdbDY1Ho/1/Pnz1O9ms5mEPmFIaEwlefoCrw2HQ127dk29Xk/dble5XC6NgWNjjo+P03yx\ntqE1CBlHN0lKBpTvzIryxRVWlFWsaTcOeLcrSoyjOE8R5ZTmiHS/31+6sxraeuiGayhXdpnSQMBc\njsXwliOnNJANL8Pj6wNDD55wOvGswWCQdglDT/gqAgHIK48wuG5DntJ/50WniecK0Z+IskeECF3j\n78aRWVtbU6PRSOgzzyT1wlMdoDe6gntcfsbUgEg315sxBSSO26/x1yM40UFzZ8hPqnhZe6XlD9wC\ndQPDPXIpm38SjY6Li4t0ijmEcwKw6FHEoBggMRgcLohiMqgbGQgaFJ+HYaKFLr2YyBe3k0rZHLDo\nDbrFjoewzDr3cBTPj6iYK2j3KmAoNzSWbTf1FvNHeD7IAO+KaN3p6akODw9TWBIa8Xs8dubZw1fQ\n2o1oQjoYEs5PnmPnAhMDCQ/Lx+rJxHHufeMCvOiekAtdFB/0phwBZQcoBkl/oNtoNMoYhBge165d\n02w2S2evMccIae8j/cEgoCo4vxkOhyoWi7px40ZSmih2aLK5uZnyetyD9NwiP1sMYe/oqKPNGJ7c\nB51d2KOkl9UXQzjG0DpJyswp/MZ6psq6o1nUawLddofHc29AxRkHiBzKxBFY5AKKOOZVephvOp2m\nY2BKpVIqJkvtK96HcsV4435pgVqDTLlRXy6Xtbe3p83NTe3t7cnb6upqOoeRzQMuSz106coryiqQ\nHEdrQY4ieuZORlRS0cF1g4z3ufNGMdp2u61Op6PRaJRxlnlmRCFidIB5jAoSJA0Z5nlH7oiC2sOn\nHtaK6A9/oYPzDHoE4xyk0hGTZeF0nsfv3HiLzq0DFjTPeVtWlw955AYRNOE7z0X2DUNuQCN7+W10\nMNwg8ve5PmOtRUTT5yKGbplfv+bhd3Sj0wlegHfd+I4GZmyX5Q8u22W7bJftsl22y3bZfsb2ShAp\noLqYFOiwZ0ycc2/ALWyPdQNpxriztAjnYFX6MSigU25p8zy8ZYfwHRWhD9LCK/XYrKMnPt6Xhe88\n74P3ee7Fsti8o27QiERVYFCq9Triwnex7IN/F0OGeD+OaMUwnPctzkOlUklQNqEm3zkEZO0hSsYH\nyuR5GdDXUR3eTZ7U2dmZhsNhJpQYQ8Ee3iB0Bz0dPYE+8EnMI8Lrgda+fd3REZAp+u5ek3t0QN6z\n2Uz9fj+TX8Q6ICzmPMmOO0J8jUYjobEgIH4mW0Td+v2+Op1OJpwK3aiu7/0GFQNtdh6KIXPewTVH\nTz3PjblwuoNAkZjPmuD9vr7IJSH/jrmCVnjY+Xw+0cY3bzgCxzyBHHGUkCevwksgadAND7nX66X+\nMffT6TTNE/PMbk4qvoPSeYmDtbW1TO7WaDRKOXf1ej3t2rxy5Yr29/cTXxwdHalaraYE/1jIkDE5\nMsG1QmFxYgHzDG3gQw8X+TVQIa+iDd8gN6EV1wj1n52d6eTkRN1uN4NwIxvIvfT7QHaWIfggHPTR\n5bcjzS6/YvgohsRIFvfIivO3o3sxRER/+v1+Zh0iYx0xlV48XHmZfqS/jvwxF/7OmM/lSI2fsIB8\n9f44Uh378LLQv+809L4yZteLjnw6AuXoZgzR5fP5FGb06IS/j40oZ2dnaR3CB45EMU/I5J+7HCl2\nRTj8D5GA9GLNjphQ5wbSeDxOYQhnWIgJ7OuGDIaVQ83OmD6hcSK4z5lLWoRFPIna4UHizDHPy0N+\nMcxIc1iS5pPrDMvv3eDhfX64I4I/JgE6LWL+QTR4vbmx5cbdsntJBJaUhB6LwJU39HRFGUMm0NaF\nCwvD4V1PsPRF5H2Hbg4Ve/gC3vAwQhy750qg+Mrlctqqj6B2qNpp6sJlOp2ms+16vV4KD/JuD6O6\nQpIWtXbIR/PwFRXDNzc39cknn6QxEuI7OjrK5IpAN54Td8eMx/OSBqxtDxl4cjrzi7MzGAzS2J3v\nGHsUkl4ugDAh68XvHQ6HyWiF990gi+vS83I8h46cM8bPETC5XC45R9KiFIXzCvzlxr7ngklzWcjB\nyBhCXKvVaiqVSur3+5pOpyqXy2l+STR22UHb39/X7du3VSqV1Ol0UvkFv95sNtVoNDLGCXzLnLsc\nJGyKzImbMlxBY/jF/CJXok5/7keW+mYh/7uyspL4s1CYb9bo9/tpJxxzyCYCz6+j8Q4PQ3sC+7L0\nDJ9fl9sxrcHlko+d9Q6Pej4wziCGD/PkMnFZfk50QDx86e9nrG68uEHnSfp+v4c16asfneMJ3m5Q\nxYaco4+e7sDYPOzpciJuGvO5cWOXZzM2aIWD5SkG7vy4HGJM0MrlKDSJ4Ie3V2JIocRcAPjCcqXE\ndwhmGCjG7skV8YXo9VhgYveSPLk5xmqjZezM4p5LNPBQ7iwYjwfDoHGBR2TImbtQKGTQBhecEWWL\ndHRh7n2jP57LED2u6I25UetM5YsnIjf+/5gzMR6Pk5JqNBov0MSfj2CGyb2kQMxXiomUHrN3Lxih\niVKFvi7oUAq+gLkHgcIz/YgdjPZogJPn4Tk4Ths3lnnncDhUpVLJCBrnzdPT04yn6M/0hHgXTKA1\npVJJw+FQx8fHyfu8du1aQntA3OBFDghGeHnuTqPRSOPb2trS6elpymdhNya08fPkoDfjzuVyGeMU\nwYgA5j6SwumHtMhDk+aKn0N7QV9pjImSEX5+5enpaabGVswJmUwmOj4+1vb2djr4mb6en58n1MsN\n8F6vp62trYSOuhJqNBo6ODjQzZs3VSqVtLe3lxAplECtVkuGgj+fPmJQ+zN3dnZ0+/ZttVotPXv2\nLJXFIKe00+mkg8PdwHb5OpksjmthxyLXWAtuSDuP5nK5jOxxxZ/LLXarYZiQ67gM+QGJdh7GMWKz\nkdP0/PxcvV5PtVotzYM7Zi4jPB/RDazYD4wol2M05A7PcWfax+y6gPfxGz67ocx6eFmeGs8GdHAe\nd2cg5ke5g02+IM375jIN2sOT7vQgn13murEGzd1A94bcch5GLjA2jClpsS5wkt2I9dyuuDPPHQD4\nhbEjW9BNTjdHFV/WXokhBdzui1jKLjbpxeJhy7wF32be6/XSdmLu9+fjvUrZM+PwFPmtJ8H5byRl\nmAjjLFqvTKQzjPfXGZlxw6jRK5eUUY6erOh082fxPh+jCwnGiJce4WM3fvy6tEBsXDi4R8f4HYWg\nXxg18ZrvluFdKAyEK7zi97KLCMM1himZI/ruStiRB7xFSal+kgtXD28u+ywtFjfC2Xd9eX8wpNyL\nwnhgTXgjVI1g87nGcMHoiXyI8wE/+UkB9Gd/f1/5fD6dxTadTtXpdFKZinx+Efaijg/PGQwGmZDs\neDxORh9JwJJSwjSGhq81+AgB5vSl0KYbbnjsfOcVwz1MxWYCykCAwsGf8AAK2qt7S0rhLxwZeIH/\n9/v9zDolJE3BXFcm5XI5hfXG47Hq9Xqm/tfFxYX29/d17do1NZvNTKjJlbSjqNT6OT8/13A4VK1W\nS2gNu5ifPn2qzc1NXblyJaFdhEdxAiPywmdkH/10z34Z8gHtHAHhd+74uBNKgy4oTXhqMBhkduau\nrKxk5IKHoXw9TafTtCmDNephb5Q78tbRDHiS8bjyhDej/JayDkFE4pAvrHNf9yh7N2y4z51p5zV3\nfn23myMtbkR55MLrWPk8Mj/u5FGfjHc6os9ccj9GOQ4j13AcXEY50oTxFOfQHWD6HA1JlyFRJmLc\nxbIyjrTGqICnvTgfuC59WXslhhQMMxgMEoTpuyJgDI/PuwXqCgNisBiBIKXF4nfUwKvmRqQlQpxu\npXpD+HPNJ1FaCHnfbcFCwRiKhddYaNE4cW8zekku3OJ16BghXxo0xnJ3A9YZk2fRyFECJXMER1LK\nVWARuOGG8sbQ8mrmzIUjTtJCQYGgOPODqJF75EaGG4DLcgG83EVUJih8Dzf4/PpY3IDl/cViMeXl\n+OJzIeOK1qv+giJh2OBpOr85n1E2IvKb51xFZXJxcZGOXGEOaaPRSMfHx2o2myoUCpmQEDQpFovJ\niPKDgKfTacZo4RphzZhTJCmDTETlPZvNUj5iPp9P6BvPxMEZjUaqVquq1+upsGg+n0+5Qzha7rFX\nKpXMTjjeSSXx1dVV9Xq9NFfQhtw+cqRAwAqFQkJtQEC4Ro5Po9FIxU85HgjlheLf3NxMMsrzNzwc\nCG2Y37W1NR0cHCR61+v1NEftdlvXr1/PhJPgW5QhNEWZsT593cN7rBPWoaMwUS77kTVuPDgaKy1q\nziGrmRfWkhum/g7kIs/3UDJyCEXqSEt0LB2hYh3jlHm/WW8Yd97cKfOCs47cwa/uYMfwFY2ixC5H\no4L3yIwjbdDKIxfRCIhzTR8wrqFhNDIdJPC1DHrtckxaGEE8MxpSGDvIRm/IWHSGOzQ8x1Ej7sFe\nQCZ6mBSZHp0Cj4TRF19vP9eGlFeQZSAxP0palA7Awo6TCMNRh4OQAswCsSaTSYLiyTvBYHOhwXMj\nyhO/xyCKFr1Pki/gyWSSOVfLkR9fNBGxkbQU+YpxbM/9wFBwL93H4guc+L8LFH+/zwXv8JwWZ0bC\nPREG5zufLw+9IjThgYgigYa4VwqcDHJxdnaWKcCJoPctzNLCAMNbKpfLmQr0bly6kYERFI+qoJ94\nVuQSDYfDTMKml6/wZGQMXk8kduOfMYD2uIcFXbnHnQCEKciNC0OQXHdemCcM1uFwmEnu99IUuVxO\n1Wo10QLjk7wcSSmchLFHWNCdCJQ568jzGMfjcTLMHAWF7ryfJHxqdUU+HwwGqlaraS69rg31tZin\nwWCQ7u12u5pOp+kcRtBBDHdXQvAO/6bTxekLhUIhhT7X1tb07Nmz1JdGo5Hqqk2nUx0fHyderNfr\nGgwGGaSVvnmdp1KppHq9roODg0TTZrOpWq2mfr+v/f39NAaUGjSM6IkbTM77HiJGFruTOpvNEtrI\nd/7X0xBcLrjXHzcMQAfqRbEBgrkZjUaZUKSPYzqd16wrlUpJefoYWa+OgtCgj4egoR3z6WuG/jP3\nMV8PZ8XTSOhL/C7qC+gR3+UOEnPuERs3MFyfYOCSY+dGLX3H2IuOOfzip2JwzR1WB0E8jSIa29A0\npsHEecBJiZEap7uXICI1xulEX7Ah3N5wurtB7AjVshCpt8vyB5ftsl22y3bZLttlu2w/Y3sliBRe\ngO+0kBZbX6MV7TlKeNNuffN9hDB9VwLWJ3A5Xjx5Jx6iW5bI7qiWtIBVydHxPvgYPHTI/REWdm/T\nUTfGF70/moevsLCdLuSL4dXE4nt+5EcMYcatqIwxJgo7hOyeDP32Pjv0/DKY1HMWoBdhHWjl7weV\nwrPzHVKgMSRV837fnUSI0mFwD3sug8Qdqvex0Rc8t5jo72E0dng6rUHQ/Nlra2tqtVqZ3V2OrNBf\niksyRiqT5/P5dOQJYyyVSqpUKol3IuLIswmpgOKyY3AyWRSq5BqI8MXFRTo30cP2nn8Sd5uCUJAH\nE+kqLZLAHcGQlBKxybPjPfSPhGkP33ruWK1Wy6QY1Ov1FAqqVCoajUYv8D6J7s6LJycnKhaLarVa\n6na7mTAmsoudkq+99lpCViiWWiqVdHh4qPX19SQTDw4OVKlU0rsIG0pKhx8jc6rVahr706dP05mJ\nXOeZV69e1eHhYUItnPehvW+2iOkOPmcgbMyHh1P8OdzvKEoMeSMvHCUgrM/8np6epkKmg8Egg675\n/DoSS5jJ+x+jGTTPxQK18DF4WDLK4xjCYwzwPv1zPcP1ZZEPR82ibAaBQd5BTx8Lc+n5SdwbkWrX\ney77PFWCcSM3HXXztBX0jaNuUVbGHFPfUOP84ii0I6TINdYDaKjzgYeLvaFjY7qOpGQLsK6ivv25\nC+3FGKa0UK6eMOehplxufvwF8LFDtcCsCGlCCoPBIC0OzwPwaw5B0iIM7TkULlxgSodlXan7ex36\ndYXuY2fBLEtW5Lozt+9UWQan03dnZn7v35Mo7MYbv/N58P4wB77Io0J0iNeVNXT1kGAUSjGHjFwY\njkfhORGG9dIAHlJgx5TPdb1eTweeevgPvvB4O2N3g8WFF+En8gToj2+f59muVBgHApYkbXgYY9F3\nFXpI2JPXi8XFdl5Ch14/zBUkpwF4/See6YKVXCBo67TxHD12XnIYrNdzgx7wjdPUQxf8DpkQwyO9\nXi/ljjGOXC6XzhR05c14PXXAeYlQKPLEE3AJA8Y8P9Z+rVZLfENeEnPELqjRaJSZe+7v9/spnCfN\nDaLz8/NUCoQdgZJ0eHiojY2N5Ei4bMARPDw8VLVaVaVSSeHJ9fV1dbvd9Pn4+Djxz61bt5TL5dTv\n91MI10PM/GWNunyGfzG+oowmpyY6oi7XPOeM5o6X50Exp+R8EY6XlGrDjUajlLvmTjnOEmvbZY3L\nQDd6uMfTLFwuYZygxD30487ksrFhfPn1ZcaXG2D8nv5CNzdeMWzcifbwFDLM5wSZ4RuopGwSdwQz\n4H1fFxHMcMPG86DizmKah9wY7wx+SQAAIABJREFUl++q5xr3MgZSBFZWVpKj4yUpXH/5fe7AR31J\nfx3kcN6PvBDbKzOkpOwuGBYRdYGcmX3LJcmgL1P0nkPiyjx6JeTHIDTiuUu+8GPekxtlnrPjkxhr\nQblBgYG1LHkQL3qZVx4T5bjm8XBHvlBQMLAnrFIXxlENV7RxzJ634h6sN+aM/i+bFxceNM+PirRh\nHnK5nLrdbsqVcrp4bg3KC0VG3hxJq4zPhbcLOEfpEFKMHYTPk03dqPYYPAaK57RgZCGwobcbsvTT\nBRi5IPCUe4l8T/I0yoT6TJ7PQp9Zc8tyLVZXVzM5Z9PpNClk3wQAT4DkrKysJHQqn8+r2WxmjkXB\nAHFBzrW4y8Z3zuXzi91lvta5JikZyZPJJPEGyi2XyyWEzNFhp3m1Wk1jA7mMNIE3ML4qlYqGw2EG\nkWN++I0rfWQF6KDTEsOsXq/ryZMnqVDt5uZmMtbK5XI6MkZa5N00m03t7e1ljGj4T1IyPNvttiTp\nk08+SUgaRnJ0zNit5aUvkAls5kGmeF06X7P+GUcJ5MmVqSu9WEqmUJjvMPRdYr6ZwlE/5Dnvg2/g\ngShvPJ8tlirwxPeYa0S+oiNLEUlytMbf65EKrsX6bm4AeH+9MU+uB6Nzwl+MLXe+3DF3HUxzpMkN\njhixcGQPGTMajTJG2nQ6zaDvEamkkVwfEU7klveF/FpQc0kZxI17ItDBvLpucoAk6qWov6KRnKHZ\nS6/8f2oMxFGZWI5AUkIV+v1+ggJpKESHh6WFx4FH4krIt2iyaNwzQcm4B8YznUncM/EE4ig0HKJ0\nb4FrrvSk7AKMi8sXhe9MdEaAWfw7XwiumDAWfFEiLFgUy4w+3w5Lv/BYfNsrz3SGxmjmmgtCn0Pn\njdFolDz6+Ezm370kdmx5ojH9dAPI6c32dRCX6IWMx+NUksGVLQYS4cJlCZQo6Bi+5Z0kS/sWZVcK\n7F7knWwBPzs7S4nRCBZ2tXm/3OtCaLEVnvuYF++/hwTZxeeGCX2B30iqd14sl8sZoy0meVYqlYRW\n0RcMTujrCac8k3n3Q7AZBwn3IDaEWkulUlLM3O8bHxyRjggGic/senQEwWUGoUinD8qg0+mkWlHS\n3NjZ3d3VbDbT/fv3tbOzI0m6cuWKZrNZojk7CRlDr9dTo9HQxsaG9vb2kgFG+BejbWtrKxlgBwcH\n6axFjAZXvI4yu7zkr4dpMUi5Dt2QKS6n3ADC2OQZvvYcdWONMPcbGxvJAOVA53a7rcPDQ62srCRj\ncTgcZvjMDRR39DCMXA7TX490SFl5DGoa9YDLV99Zy7hdXjlN3QD19/g8uOHizv0yxN/nCVq70+nl\nYnwton+Q226cYbxwn6PYLkPRl5407yUfPNrgG2TcePP+05/pdJoxeqITCA09fYK1HEGQSKtItzg/\n6MMIHGTue+mV/8cNQeXKhIUdyw645ctRCfFIB6oMe26PL14gxmgQuNXuXiJ9YzIc/vZQlgsbXzTk\nYdBceCDk8PRd2Xp/JC1V5jFMiWJ3w81/Ez1FWoSP3bDxMGS01KGVIzreVwQgNJdehEljWAx6uoHp\n42XhttvtpDAwXD3M5M/kO4wTF94uBCP6h2EJH7pQRMCAzMW+QuMonJ2uvBN+IwTFNb6Dbuz0xFjg\nNxj1IBxxjNDTlSD3IRQpPgl6QpgJHq1WqynshLfJM30MhUIhra9Y9gHUFvQMhIY+wS8YN76O2I0H\nEs2acdRZWuzwoiFHQDMckaTvGKWuAFdXVxOS7Uguz8TYoJinOxHkHcYdRqCpoH3Hx8eJ3q1WS7lc\nTvV6PckE+lksFpNRUCqVkpHOGDgCh/wSP3amWq1qNpvp9PRUR0dHKSS+vr6e4a1YooW59TAufOwG\nV2wefnJji/44oo1xxDXWE0rXDX54mPu8LAxGfrlcTv8kpSKdONCu9EHumFt3CN2QQbbE3EkQJEel\n3UiIYSDeH1Ef7nNDwNdMTPFwow5UzGWypAzi7iF71yfMN7IR5JG+uuPsO3ahG2vSnV03mvyvzy8O\nBv1j/CCX1WpVuVwurQv6Dh3cAPIwHjI/6m43imN40sOMEU2MMlrKRlpe1l6JIeXK1xU/lmycfP4P\nE4BOcB9QNZ6gG1BMeNweHpnWlZgXFvNQjLTwLN3zcmbzyXBhykSAvDgi48qEMUbPxQ2b6CWxEH2x\n+W9iAiA0RRHEfCYXoPzWn+dJrE43+gJNvfRAhPBdaHk4D2Xq84QhRyiBRN1Wq5UWBOEi91pIUpWU\nEqGlrDCP6JcbHs5/PjZHIL1sAsYyOTh4kjwLtMYRU+6l/xhF9JuQIOGfZrOZoTVIEIrd+wzNmM8I\n+TMfrrzK5bKq1Wo6wmdlZSUTLoWPp9NppjgnlcwJxWD4SUp/XRn7GkVpw08uFFlH8KnnOyDo4TVJ\nGZTUESi+4zesa/rkfOMGrgtU+gji5l4q6xJj1uVLtVrVxsaG9vf3M4i4ND+updPp6M6dOymx+ubN\nm5Lm4dlcbh6a3NjYkLRQlqyhQmF+VEq9Xk8IGDWUSGAfDAYp72p9fT0ZLO5QwI84gIzHUQdHOOLa\niblobjhgrDhCEGUYdPU1y/PJsfFrg8EgI/fW1taSg4XDRT9clzCvzoeONHAmZwzbuTLGMIgIPk5b\nDHO6U+eOmZTVbV5KBznEO9A78EA+v9iA4+gTz+T9OHvu7COLGBv9iY6Yr0UcEUdefV3wFwPPESJH\nnx3hHo/HyWmDnu7QwROu+7iGTmP+IoAA7ZGPPpfuOMQxLPsd8/bTEKnL8geX7bJdtst22S7bZbts\nP2N7JYgUHjH/5y/oSYzB+v/xTN0KBk4HGsTKBBWQsltipQVUipfkcDQokyNDHm4gqdg9EknJK8GL\nLRQKS3MvxuNxpmK0hyVizNif7+iMtEBWfBu209h3UWB9ex4JNC+VSpk4NgiG5625VQ+aViqVXoj7\nS9ljCNzDcm825o/xnBjXZi58PGyB9u31Drszv4yj0+lkvA88Grxsz3uLSKUfseJ5EA47S1rKW/48\nxgBE7iFozgoDbfDjR+r1etrizxhAYz1s4Eiu040QBXzCuNwb9VALiea+bZrfehI8eUfHx8eSlHbU\nEl7xkCD9ogwFHih9cWTJ+8k4crlcQr/cS8Rj9nCDIyMcqUIyuXudyAue4+iuh3IdqSUUCv/5/FLI\nFP7yKuy1Wk0bGxtpLgjJwcO7u7uq1Wq6fv26Dg4OEuI6nU5VqVS0tram/f19VSqVTF+YPzYjkFhe\nKCx2W167dk2dTkdPnjyRND9LsdFo6OTkJK1xRyuQXR5i4X3QBXkTc+v8rLwoH32+l/1lHhxdZA6J\nVCDrnS848cALHZfL5RReBmGLaSKg376GCUsPh8OUauKyDVlKzpfvEuS3RBw87EW/YxiZMTv/uc5z\nOeKoKSH0KO88ooKcyOVymfWETGP+HGXxlI6IwjjS7XMlLVAnZImvs5iDxe9pKysrqlarGT0F3Xxe\npAXKGVFG3zDhyJznbPE7Px7M+xTnxHmWOf+5C+0ty4ORsnkt0gIu9tAPBPSdNAzQt/T7NReO0UBx\n5ReNumVQnicNejxVyu6mQJF5zJw8EPrpApm6PlG4eZiTd8fdA4QjnEFYZD6u2Fxw+liBaGGmeD/z\nFMMtbEP15H7mwuclhtNeZsTQRw/DTKfTtFOq1+slujHHbmRiBCAY/UxAr4DuNAKyxuhxQepKnJ1L\n9JOcMeD4yWRxBE5s/M5LGpCbE+FvnoWy6HQ6SYB7KIF+A/djPBDmITEb2niYyoUG9Lq4uEh5Ni6I\nvczA3t5eorcf48GuNnc+SH53GktKSeh+/puHMPr9fgrNuyEFHQmzESbwUDJHuXgCqjQPCxWLRTUa\njZRb6flT1KSify7AJWV2SMK7pVJJBwcHKcdpZWUlnW/nmx4Io/phz81mU51ORwcHB7p69ap2d3cl\nzcsW5PN5tVotHR8fq91up35Wq9UU9sW4g1cbjYZms1l6no/96OgoGRrlcjnxB/zE3+h0MR9SNhzH\nOOAZtqa7sewGmPTicRueL+WGDc9uNptpo4krZXLSqLHG8UCc0cda9PG7gYCc9XVdKpXSDsnhcJg5\nbox59pQP+k2/MCqiEUAozvOuWEse2o9pBp77CR29KrnT1+mIYeWywmnMmvcNQe5g0S9fs562Eg0Q\nD126TuR3LivdGWAd4czGtBzG5XSBv7y5rOEdkS5OG/rJc9zgjnMHryzLc6O90mTzuPXWDR4pG2fH\nw4GJ3fOGqRD6nguDIuL5TlQEeoyhwiwgTO4NYNTwTGcoR1RIzPMcAjfmPOmTmjRs54yJqjC0K0LG\n7tvmoyfA/f5bz+lxo89zBYg5+5gjbfB6Z7PsjifPt0HQ8X4fU/SEY86Fo1WMP9Zqgf4sYgwReIf+\nnZ9nz3X0vCHmw/OePPfC0U9frNFbjXF8aObCgO39GE6ef0Bx2uihgiyQByUpoT6+MYO8KlCg2WyW\naqX5Thv6QpLuxcVFRmGMRiOVy2W1Wq0kaKANNC0Wi8mYhaYIIBACzy1y4344HKbDpqE3zgWoI/y0\nurqqbrebHAWKjNIXeAjB6qgTieascYxRaa4M2+12QgZB9Hgn84FCZJdot9tVv99PxoLvoiPHo9Pp\npKNy4Nd+v5+UKIoWJO/s7Eybm5u6du2a9vb21Ol0Uo7UaDTS8+fPVSqVklHrMgSDYX19XblcLuWy\nYXCRP+U1vfDKJ5OJGo1GRsaCmIGauqz0HEyX2e4oMo8Yt3G9LHOgmRt36GJOGrIBtAia9vv91CeX\nO9SOQjY67/OZAqY+BpzRXC6nZrOZWRedTiedh4h8XJbcHQuAujPtiBbrkH6Px+PMXHgNtojgRxQZ\nPeXyGsfTaUB/XGe5IcncO/rt8+VoD7/nGeg+zyHzOfRIi/PFbDZTt9vV5uZmJtcpRgEimuybuXw8\ncWxuKHqL4AHothuKnieHM/iy9soQKSlb/A8C+4J16NQ9nejNO3waCelM5Ra/MyLMHqE792p8dw7/\nPITFc9yIiAmO/hnBIM0X/mAwUL/fT0aBG1N+GCpeOGNA4HOIrHuRjCuGeHim0xvPXFIKPYAwQQPG\nj1GGcojhBmmx2D1k4uiGL2D66ugh9HZUCKPNkwW51409voM+9B+0JgqVKNxZUI6AMk8YtvCLe/GO\nSlGLxwWqoy0ejoXPSbaVshX0p9NFUbtarZbmfzyeV98uFovJwHEDDFSJPkLTZrOZFO/JyYkGg0ES\nEhg5hIpWV1dT4i5n2rHrx7fw5/P5lGgO4uKGryN9zsMgUigD3+aNUYPR53OOondDilAP72TcGHnQ\nFgOy2+2q0Wio2+2mOQBJG4/n5/wdHx9nPFPO4ltdXVW1Ws0Ydvl8XvV6Xe12W51OJ+2UW1lZ0fHx\ncQZ983PkDg4OVK1W1Wq1MsjSrVu3NB6PdXR0lMocEPbb399Pa5jq6Kyt/f39tFmg3W6rUqlk6LK6\nuqrBYKDBYJCKXcJ/Kysr6vV6mfCUtNi2jtzEcHTHwefbP/MdssjloPMEz3Sd4Oi/8/Dq6vxgb0fB\nkb3T6VRHR0dJHziS6YhJDLHj3PCbZrOZ1tPNmzd1fHysw8PDF+QIhtrFxYUqlYoGg0HqC3ICZeyJ\n2NAIGRuTpplPxu9hPJfdIOz+e2SNI/qMkZAfutPlq6Pb9MPXk8vVmMqArHW9znwv07ukTVxcXKjf\n76eNELwXmhJdYI3GKEnUwf4XR5r/u33gupKIhTvJUb9EFMzbKzGkIAQCi8YgY4jPF/HLYpUgDD5g\nz8dhsh3K4zdMnuc4oEBjuXgEOOEAP3oEIenWuS+2aDnzvmq1qmazqV6vp06nk6nDgXKMuxHoy2g0\nSn2M6JiPCUMFj458C6dZDBl6PJ3nubEgvbibhLnyujnMoSMI7iWw2MhVi8YwjOwhHK5hAHqoVVrA\nscu8ESlb3iIKMJ8zX1DOBzH0598zVje0YtiVIy68IcDc4HT0CCic+6iQzeGsoJrc50aXI0tra2s6\nOTlJlahBoJjPfD6fUI5er5cEGErm4uIiGVkuMJkXFI0LftYSv4EvvX9eYI9GYU/oHUOJjh572QWQ\nDectp7OHElx5g+LBF6enp3r+/LmkRRHQ09PTNEavebWzs6P19fV07AxrrdVqZXZFOnK4sbGhp0+f\npsOJq9VqOnz44uJCN27cSEprPB6neep2u2q326meVb/fTzxYr9fV6/U0m83UarXU6/UyW/xbrVZ6\njit2jAsMIb/2snCfI66OPLiB4msoOi08N4ZseTY8RmjPHRMMH/8nLQwi8mEwcrmG4UFZDjfqKVHB\n+mUOqWO1sbGho6OjVM8QfnLkGqSLsftY8/nFzjRH4h1NlxaIK0rd830xuqCdGzI8F752xAuecmTL\n6Y0+QFZ42kbUxT6PPB+eiXPsURynB/IRtN2dZJB4R/lorAU35J1PGTeARPyNj9vnbDKZJEfKkTv0\nyM9djhST7MRxDxxl5IrGjSDPhaEBWfs1FiiCiDCCtEB5IJIr71wulzGm4nZYJhzP3WOxDmnGpDqY\n31Ei7gO+r1QqyZiiRcVMX0lQBhWKi9Yha5jLoXGO1oiJyowD4RAhfBQasCkhJ4e3PQ9HWuRPYfVH\ni58xQlenNwIHujJOX+TRiOY55IBEYc2z3ZD2732R+n3D4VCz2eyFGmCed8AzvK/+1w1Kfyd5VVQq\nZ+6bzaZms1ky2kE6CoVCCiVBJwwUN7xIEmecFxcXac5qtVpGQVUqlVS8sd/vZ5AlQkLr6+tqNBov\n5EJgnNIPFBRhR5CoGOYFvcVodIEH71LTyoU+1cOhJ+UXnKdc0TAOeAVFQZkH3nl+fp5o6qgKYc9i\nsaiTk5O0hqDbZDLfCNFoNLS+vp7Cn/1+X2tra4kX8/m89vb2JEk3btzQzZs39fjxY3U6HV2/fj0l\njTO3IJWj0SgZCxsbGyoUCjo4ONDx8bGazWYqcXB6eqqtrS3l8/mUDwafQjP4z1MOMJqZy8ifHhFw\nNAReJNcF+YCx6bIPnqb5M3yN++eo6Jknz9dzY9kNAXiLZ3iVexwUl5PkjzGv6CCOPiqXy2o2m5nx\nEK7O5/PJIIghZuS3K3b41Mfocvb09DTj5PlGCtaJy2k3pKLT7mE4eBoecN2L7HXHi2vISpcl0iIv\nzKMuzjv0B0fYZQbzNZ1O1W63E91Btd2I4ZkR+VwWVeC96BRo6sgaz6Bh0LEunJ+QIS9rl+UPLttl\nu2yX7bJdtst22X7G9kpzpDwvya1ELFCsXodmsWrd4sfLixWssdJ5llvYeB+gEsRheSY5Jngsy7bz\nAz37zg7PoXFvi/AbeSTAuTwLpIIkX3b0MbaYM+M083HF/B9P9CQZVFpsscd79G3+9A1vyMOXHiKI\noUB+47C2Q67QGfSJRjI11z3h3p8B7UFTqIbrYTV+U6lU0vM8KRUe8pi9o2N8xpMBMfG+4hnzDnjN\nm4cbuc5nQqCMkZ2HhBocQSAMCM03NzczCEK1Wk27lnyMlExwpCgmvfpBtzHZ/vj4OIWqvcTB9va2\ntre3k2fn/AbsX6vVMonMIDyE7xwFYN5YS77DDvQXr9l5n3dyD0iXh2JAoxmHI1148iAT9JXQz3Q6\nzXjxtOFwmPG+KWMCPc/OzlLOErTFm87n86kUB+NHLlSrVXU6HXW7XW1tbaX5JndyY2MjMxcgH1ev\nXtXp6alOTk7S+xqNho6Pj9P6cH4vl8tJZlar1RQCpPnpBhGtZQzuoTtaxPeeH8Q1p0MM+UOPiM6A\n0iLTPbTL+6rVakIuaCDirGtHxkFnhsNh2nrvaF21Wk0hPtalNA9FHx0dKZfLZULi9AXZ7yVCoCdp\nDsh735zjsssjH15IkzXsCA7zgN6bTrNlQ+I8eiPC4JtVoM2yXDI+cy9z7PPE/TGC4yHG2BfWGCiY\nhwx9Fy3NUy5Yo8idGK70MUTZDA966oXThmKv0GIZChfbKw3tRXiQheOTICkxL/DtsoXoytYJhyHl\nORP8zkNInjhOTJbPnpDp7ywUCpm4rm+H9rCgtKg2TG6Vw6D0DRrk84ujMOgD43f6xHABz6ahiFgQ\nhLukeS7IycmJarVaUnBc4x62pJOgKGW3lcbjPpxpPWeCvjjMyuKSFiEcF8o0n1voxHXCEB4GY34R\nhvADCpBn+sLz8BXX4T8Ox2QOuE4OQ6zD4mFSD0V4/pYbbdJcKcLbhJY8QZKq1hcXF5nq2OVyOSUf\nN5vNTBVfNyIQnL6rqdFoqNVqpfXhQhrjoFQqvXCcye3bt1NYxQ0+nI21tTW1Wq0XDGDWQrE4r8UU\n6cZYPckWGlIXxw1Fz1tjXJQEkOY5RPQD3o73YLw6f/sxOIRW3KFjDDyLXCdoQgV6D8PBA27c+4HG\nbIm/evWqjo+P0xomv6ndbms6nWp7ezu9jx17udx8h5lvVMDR/Pjjj3X//n3l84tK39Cy1+upXq+r\nXC7r8PBQ0sLZ8ZIgMVzEPGD4uAxHkcawoCs3NwyYC3eOPZTF2kc2uGL3UguDweCFGmOeUiBlD1jn\nnUdHR6pWq5lD0Ak/k2bBfa1WS/V6XU+fPk3r2nOkMN7gWc/Xgmb0P4a0uG82W9Tvwtl25xQ5h2MM\ncMCzYniLMgluaBDq9PxCGjTk+S7rXSaTvxyNWt917noQR5nP0bBBPjmwwjy5/nCZCP/lcrlM6onr\nyJin5uHFGIrkXujkub/sRP65M6Q8UdStWowCz12SFrFQj4W78I2eavSE3BuAcRCgeFGTySQJ00Kh\nkMkr4WR23he9KE/o9t11rtjJV3Cjx/MLXPi4scA4XAnH+DuoGEYP78N4cg/YE4exvGu1WiauD5OR\nD1KtVpNy7ff7L2yZdgZzIzkuVOjmNa6krLBbhjrgWXp+EeMnd4aFzDg9iXk6nWpnZyfNr3tgvkCZ\nCww76Oa5bCwwhCnjw9vybceu+MkZId/H5xihiVDwhHqMY99Vxv/Z5o4RRa4G7wPlmUwmCdWS5krB\nk4o9H2JlZUWj0Ui1Wk29Xi9tTZbmeTnkEZBb41vAy+Vycjp8Vw9jn0zmW+7dqPHET8bl65fdSPCA\nK0hHEjF6oJsrAebNnaGY/8Y4jo+PkyxgV6rnSoA+uNEhLRwxch056w0+Y/yVSkWNRiOjIAaDQVoz\nd+7cSWj0cDhMOWIrKyva3d3N5MCR/9Rut1Wv19PYz8/P05w9f/5cN27c0PXr1yUtjhyazWY6PDzU\nxsaGrl27Jmm+2w/aMTeO0mNMIWfoG3yDvMMYijtQkZfuUPFsd2gj8uLN6wdypA7y3XkYeRELZIJA\nsc6Pj4+TbGfn1tnZWUJrvbwHBpc7UsxT1Gc0lxGeCwhfoNMwDlzWuG7DwZKU+os8BBTwHGJoiqzx\neXJjyR1VdIUjUzGfDQMK0MObz2vMr6pWq2lzFAaozzXvjAY4SJYb4xhsroPdOYVW0N6NJc/txRnk\n/egnDD8cZ9erL2uvrI4UzVEIFIkbSdKCyBDWEwtRVjBEJJq0UHIxgcyTes/PzxMKRKVgdu240kMJ\nwEg+GUxqnEhpUaOl3++niefd7gVGhM3LC0jKKBPQD4eMPdnPBZdXv+b69P+w92bNcSTJubZXFdba\nCwBBsls8PdMtyWQmk270/3+HTBppemOTxF47tirUuajv8XwyAM4xmxt+FwgzGghUZWZkhIcvr7/h\n8fSUBQ9BXxhT/oaR93Wz2SyVhh0bI4U4o54DPscg2ZGwYbNT6TF8CYp9eHiI+Xwe/X6/FmEY/h6N\nRnF3d5eLESPsiNpRsN/Z0Q6GA6NoObTz6OJ0VkRcV5JcQQ5RzhDO6SvXHR4exv39faZ3cBym02mM\nx+M0pBFVpXFKHAwGg1rKCDmkFADPA+WcTCZxe3tb21aPs2qyqguAUnDSBFjmje97bCzDODuHh4f5\nfRwyI0fICagdRTg3m00SvekPuqR0epvNZiyXy0z3eX0Nh8N4fHyM33//PW5vb6PX69UMJrLKVnfO\nd8MIg1j1er0syEn/jTpjlCOihl7d3d3ljrrLy8v44YcfUg/d39/nPb2eKYtQBlHff/99XF5expcv\nX+KHH37I50HKJ7jj/XAiTIou0/Wk/HkXZw0IPrneiBx6qly/DprKdCHvYqfYz6NsDDQMnD7QIU4M\ncCBhvdtqtWI2m8Xnz58jotrtt7e3F91uN25vb2vfjYg4OTnJnbPc004z7+T3Mhpmp4bfccIc0KB3\nXL6FQPDg4KBGK0DPGHlBx9AX9892pqylZP3n+eE+zE9ZKwskz0GPn4dOsGNDoGJ7Y/DDSFfZ0L+l\nHudap+/op51U981/MxWGz0D6PVZl+2aHFkc8R1e8WDwANtr+PaJeUt/oBZ+Zf+PUF9G60QCUG8gR\nk7RarbLw3tHRUUbzTLDTG/SF7bW0p6enhGJvbm5itaq2MhsdK1EQSgmAPhmyNHqCk+g6TQituQk0\nc8eIfDkQF6HmPoxdxNZAobS43ve141im0Lif89u8I3ONo1qmd90XO9r0A36ZHRvkaG9vLw84jog4\nOzurFb90dOXUAs/wIqOPdpq4jvdx+sOQM04YP82V4D4UT+R3EJmDg4PcUo/DT8kMUhTv3r3L6s6k\nMjebTRwfH6czyVwwPhgdxnmxWMR0Ok1EkmiXMUVRLpfLWK1WGR3DM8KBJQDhM6fl7dAj61bQDgzs\nfHo8nRZBSeMA8kzqBCEj5XrFadjb28soudnc1oI6OjrKHZolmttsNrPK9j/8wz9ExBYl8K7hw8PD\nRIVwghjj2WxW01seX6rwR2yNxcePH2M0GuW7mJPF2DQa212SfIZczGazeP/+fbRarUzfwbVC3zIf\nPI/+WG7pJ/Lp+WKOmTtQCa995r00eNzXKR0jNjjApnxwPWsC1HQymeQc8l7WBbS7u7ta6vjp6SnX\njJEhIx4RVX2xRqMR/X6/Nk93d3fx5cuXdFhKZ6m0S/x0yQOQetM9IiKrz9uh4B3M1/QzkTPeE/5k\nRD3NZXSROUT3Oo1G4znQlcQrAAAgAElEQVTofzsmDlL4x+/MG2AGzwNdh3Pm9zdfinnxuDEX6AE3\nvttsNp+VajByZRCg/NzoGrucX3LoaN/EkeJFHQl6sGn+nIWLcPt7pOe4j68xAhJRrwpNdMXCx8ki\nKid6RhlHVIaN/jFZ3NMpNfcfoW+1tvV0Li8vcxFRCM/kVysy0DgXKPV9nb+1E8n1NDt/GCGihdvb\n27z/yclJonJE00aF6A+L206P58UOSukU21H2WKGk/TzGrYxmiRRIN/V6vVpKz5C30z5v3ryJq6ur\nZ2fWRUSWaLDTZg6Y03aOrpBN38upD/rj8TLS0263U1mTKmA+WdDr9TrJsBGRROS3b9/Gu3fv4vz8\nPB0JoHQqOF9cXKTiI9rmHo1Go3bsDvN9d3cX7XY7HX5k5fHxMZERnO/JZJKcGxxjnucjUuBg8RmE\nXgcKNIwaypA0JWOGPDsQ4v2n02k8PT3lVnUcRsbUKU8McERVvBMk8/r6ura+1+t19Hq9RHJIh5EO\nbbfb2S8/D0QB/mGZRgdh44gS2nK5jMViEf1+P4uv0hfSfefn5/Hu3btaBN3v9+Pu7i6ur6/j+Pg4\nn+f5BQUtaREOFLhnv9/PscZR6ff7tWCANf21lL4DjDJYcQDlOTYK4n4y38vlMkajUQ2RMpqPrXDt\nJgfsDtTOzs5qqS3P03A4zA0cm82mVrmedTYej58R7bkfSKsDQSNCfs+Iuq7B0fS2fwqu4owRUPO5\ny594TB3QYt9sa0iVYte4jrG13JpT62ft7e09qwcH2uWswXq9zlMACLyQC4IyB90voZ/o4ZKHZ3kx\nIl7Oq+WUAIPvORCkT19rr+UPXttre22v7bW9ttf22v7O9k0QqdI7jqgQG8OCL6V1yobXivdo5MJo\nARGhOTy+jpxyROSZTkTWTik8PDzEeDyObreb6JLRLqI9oinD1I7I1ut1QsqHh4f57yUeCv0tkRz6\njAdtUp5TnUQwjnhIc8Jz2Gw2yb2BF0OEbFQEBI9rzCEqI0iQJBqRRZnXNnIHT8gRnSO4rxE67+7u\nYj6fJ+pEpEVK1VBtt9vNcSTVxj2J0LnW/Xczt8fzY5JoCXHzXqSzjDQxxhC4TTYHOdjf369xdhqN\nRhwdHcXp6WlcXFzE1dVVTYYjIneQHR4e1sjIFF/kXDwQKeQHJPb4+LgWQW42m0ylnJ6eZjoJefLu\nWqd5SV/v7e3lwbkRkegOCBa8loiopcPm8/mLu2cdKVufEBGTCt3Z2amRRyOqSNqVv3d2duLm5iba\n7XZu4LCOgB94dHRUK5uAfgKpNSeLNCD9PTo6yr7e3NzkGmPN8RlI4GQyScQKeSP19Pbt27i+vo7p\ndFrjTi6Xy+j1es/Ghf6hmyhaS4Pgz0+nQ50JACn02ofyALroNAprxUiUm9NeXnNePxH1qvmbzbYw\nLlX4fYAy6TnKh5R8HlAxo2etVisPjEZfcB2bCbrdbq5V1hgNFBKUxe9lZMU62iicsxv0z1xP68nH\nx8cYj8cp26ZDmIu0v78f/X4/7Ql2wNw3I4DwbOlvWe7GXDnbY5A6l12IiFoqj3krsy3oB1LpERVf\nj7EzkshGF9Bs22DWGciSx4W/I2NO7TEvzoS4OQX6UvsmjtRyuUxDXC5EG9eXUmMMhuE6Gy4z/0tS\npHcM9Pv9GlmcFFlEJIF3uVzmoJvrQ5Vh0hhMOKRh18yx42K41FwnyuHjeDiNV+aHDeH2er0kHZKf\nN7fJabX1el1LDbpys53EiC3JdTQa1e5Z7myjMnxJFmWsmBf6jZOA8sChiqifS1gSI73Dq7yOOUVx\nw++J2KYn+X7Jw2COn5621XSBpCO2C4Zq4OxktMNTjmnJPSgNAQqM+SdVWpIoSQN2u90aERmHnrG+\nvr6uOQKdTid+//33+PXXX2vwt+uz7O/v1w58Zf1BOLYjwWdHR0d5BI2VIjV4UJDMzeHhYRow1raN\nrrkzTrVA4J5MJuls4dCzAWRvb3s4MNvcI6LGI5rP51nV+6VAAk6JHQ3k7PDwMPr9fo17BHcKZ4/3\nYPcunBX6yPMIAnCoPE84mKT9vIZJk5Q7ER8eHmI4HObzdnZ2stxFRMRoNMo0ignPHGGz2Wzi5OSk\n5hDg7Ji74p1wm80mdwOSiqYvOC5sImFnI7LhnXyUJ2ANWBeU3CqoEHzHzhayYl4b78hYoYPNYfTa\nLDfsoG9MWI6onLUvX75kCqokY9NXc2qPj49z8wb60ilYGlSEkn9qvqb5Spa9kgDeam0PjR+Px5n2\ntS3F+Wg2t+VhTHmgtlWz2cwdnNzX5Hqc6YhI6oedFHNccdzMt/NP8wBpHlvLDWPF707hWX5wPkt5\n4Tmkey1rdqJMLzJlp+wn41H+ze2bOFIoDNcO8ouVAo6j8NKAmfVfogcmOeLRGnWByGfUIyLSiOL5\nenGjhGezWU1pRlRRGwgROWeui6gfl2FkAWPJ9Xyf92bC2+12EnyJNBwJo9iIflA4fM9ePlGto5OI\nalcP48P7cJ139ZhoiLAhwL63uV5G13h/R26MZUTFY/DiodnpZp4wiLu7uzEcDmsOjI1fs9lM3sf1\n9XUNWWCnXKmQnWOH62J0DDmDk2OFihIm4qNeEfLGuEIadt0ucyks49QA+u2336LZ3O7eAnXa399P\n5wbjyDuyBdx8CRzQ/f39OD4+zhIJds5ms1ltl54LFhKx9/v9DF5sSJFvxtDzDc8DJNZOHcHV/v5+\nzOfz2m5GO43j8TiOjo5qSBS6wdwWvg+azM4/b+4wVws0EDnF8TCiGxFZfgInGa5URHUsCQHgZrNJ\nNJqz8iaTSa5H5ALjjNMN18bvzHjjEEZE7uK0E4nOiIh8NjJgcjKGnkDBvBR0NgbTzhFGF46V61qZ\n/8QaKblAyKGdLAdQjLMLNYKQ0Ed0DbqV9VlykVxuxe8ACnt/fx8fP36scX3evXtXQ73MAXr//n2+\nP+gu64TSMgTrDrzpj5ESO2BweYwYMZ7YyMfHx+Qy0tA95lVxLbWxyr9H1DM4ZSbGtpi+GszA1pVZ\nCl/LuDG/3vTCGL10sDyZIda+gYPy+Cv6jj9gW2o7w3f8O/JZcvicTfha+2aIFPCjvVKEvzRQbDfF\nGBmaRKBsVC0cEdXuQBuM8Xicu6Ps+NBAqIBCSyfu4eEhC1rSVqtVnquFI8Y9Oc+M/mIgI56ft+S/\nIbikE9g1yHjR77u7u1gulzXHjR0RXA+iEFGv3cTfrZAgpTpC57MSBbTTw9+NwHke/E52JB0VeT75\nDGVq4beA43iwaKbTaabIdnd3M2r3+D49PSVZmoKFKGbQG4qO8n4Yw3I7vjcmYIzKBYlBcXTHM/f3\n92M0GsVyuYzpdJqfsTuHMd3d3a0V5ru8vKyRkO2A/fHHH7mmdnZ20slqtVqJuu3u7sbHjx9Tpt69\ne1c7a4wUV0RFTG+1WjUZjKjScDYIGG/vnMH4ITOcUWcHwcVo7QiQUqRRKA8iL2e90bwxwrKIgcII\nNRrVGYXj8Tg/Zw4wmsi+yeNGejCS3NO7kwhiQHJo0+k0jo6OkjiNg4a8UawVp4fdw6DhIOPL5bI2\nphFVxH9/f18rqnp8fBzj8Th1kKN4jCznLJakcKPpNm5e8/6c64xUMpYR9a3qvKPXN7q5vCeBMYGe\nnQMCRxxop4xsP1izRhp2dnYy2P348WOO5eHhYQYWd3d3tcOON5tNHi4dEXFxcVFLcXHPfr8fq9Uq\nbRC0AtaFA90yW2CHwOPWam2r36PjGDdnUjyHvINtHboDOSO16+dhswjMnbpmDNHFpp80Go0Yj8c1\nZ8/Imfvg8j00AqKX0r22LfTVwArjVqaN0f12XO10EehZDg0YvNS+mSOFo1HWdiihuoh6yf+I52k/\nrmGBeNEwIHi13OPy8jIhUXbgGHK1cJQcA/rCAkWAfcgkCsZw83Q6TY/bSsXva0SDd2BR9fv9OD4+\nrm3fZUFTKNPoEnD/SzsfykqtpQG6u7uL2WyWKbwSdXLK0gqTMStzzM73G8ou71EufqdBQDoYb+65\n2WwSLaGuz2azyd1hOLSOjHiGDX7E1rCRCigjEaJw7u9xK51hnJ4yMnNEi7PWarUSmcCBpeFEAX0T\nhUVsiy2SZnMJCT4DugfFNNcJZ5ZSB6enpxFRHVrsVIudVJAuZNw7COk3z/WYGFWcTqc1hA1DaLSW\nz5h/UtpO+2LQUIzUzoqokKTSKY/Y6h+UOsbYZVHgwkREreo96xv+htGNRqORTvt8Po/JZFKjCsCf\nnM1mNRSl2dzWcgIhd+FUZHS1WuVRKIw3qaxGo5FpHYIBdBm7BHEoIiL7hVxjFLknjsJLKAifIxfI\nON/DGLK2S0NsQ+TUj1GA0mChU0BhcCRxWp1iZ06hbTiVylpzCYuI58e0WH4Xi0X89ttvOYd7e3tx\nenqaa8K79h4eHuL09DSf6xQsBpy1w5iy/gj0Pe78HzuJc887WKdHbNefHRUH2dZBrEHWRWn3LJsl\ndYMA0s5yRKWj2u126k7kbWdnJ4EQAioH0N6xbp1BORPWmNex5w29ZweUMfD3Pb92Mj0uZeqZvrAG\nPRZl+yaOlB0iT4YJ2eWCInoyehFRRUlfi4RQsiX/YDqd5nli9MOViLneqAF9tyPnqMKNCN31QFjU\n5GPt0ePskRpAsaM8gWQxOtyz3W5nVP709FQ7IgPl4cjTwsACsMJzIxq2ssWgGdp11MQYMU82mBCb\nPcY05omF7c9Br7inESKczL29baV2HAIW0nQ6Ta4PRsjwMcoEZ2p/fz/G43HWBLLxwnh47mlEMRgF\n5rfkSHiMGBsUyGKxyLl8KUrGqPP+pF+pE2OEzBw4jkpB9jl7cLFYxGw2i9FoVKshxjiD2pToJQ5i\nu92ucYvYdm1ieUQVCB0eHiaixD3H43Gcnp6mUTT5GafZ6Xka/BUI88gx/Tk8PIzpdJoy5+tbrVZM\nJpOsL0ZR14jIEgpsL/f823ihr2x8qR3X6/VqnCWcNcb58vKyljrcbDYxHo/j7du36Ux73jE0RmuQ\nd7huBwcHmdpDzlqtVgZvFGNFznBOmTtkEoNXcly4J3OPAeMeln/mztE+uofPuS+o0EsN54v7EtTR\nH28Isq4muFytVskjfIk0XRpT7A/oZavVirOzs5wnMg3dbjcDQsYPxInrnGJmvggoqC9GUNRut+Py\n8rI2DuhcEFlnTEgpI4/YFuQG3UM/SsTfa9z6ySCEx/5rnzm4Hg6HGeigNzzPPinA84S9I0h2cE2t\nMGrWOa2PLCGXzkiVjlIJzDjgLxEwnMASDSxtUtleyx+8ttf22l7ba3ttr+21/Z3tm5U/AE4vSV3A\naEZ9TGorURV+N0TpnQMgJ2XxMaoge7eeIV6iE6NkEdXRDEZo8FS9VdxQLPcEYscrN4wJOkL0Tp96\nvV40m9tihiAXjoJBL+B5gMiwo8MRiMcNCJ/IvkSl6D/ImgmSRAdEyOYmfC3CJIIhumLnSETUECYi\nJafIDPn73iBEIC+bTbX9ttz5BkrCPU2iB02IqA47ns1mWQyR8QaZ8E42o3IgUEQ63tUGaoq8Okok\nVcDYeNs/HB/QSqMum80mi11CNId/0W63o9lsZuFAE2Cvr68zHUTaz++PrLhQHrJIfzudTiJaXOex\nKNEa0m6kf4w6lfJkJOv+/j5LjXjMKMTI2EJidRmDiCrl4fQd6Aay6rMM7+7uEuUrd6yCbEyn0yyh\nYe4gP0tKAPL1+fPnGA6HcXJykmm4+Xye5UY415B3cIqZ1BnIIf25u7vLOXdK+OlpewzN09N2ZyqF\nQweDQepf5MmlVowYGB2EgwVxHqTMZHSQt3INmxeKnka/+YxP7mPUmjXD+5G+YrfmZDLJzRnIovlJ\npJxchRw74vQ13zfiaMTmt99+y/V8enr6jBvbarXi8PAw5QI9dX9/XztyzDqB9wFJHo/H+X4uwYOe\nduoJu0W67O7urpYWfvPmTXS73WeoozM76HiPt7M6RqGMyDgVH1Eh4yCnLt/DGsammuPY6XSi1+vV\n9KH1JRxHMifuJ4hdeV3pT+AbML9896XrvIZLpNJZopfaN6tsDunS3AQgY/NpIuqpL64v4bqIisNS\nOlJc59om6/U6rq6uotVq5WGsXlBwjJyH57py4fM8titzD3avRVRcGlIe5XbL8p3NHXMqE+PIdzA+\nZYXmXq+XaQOnapyidArUTi3vaD6YF2TpkJnEa6i/5AeRf+c9Pb+kAvibn4dhsiPGMyIiFY3LW3Q6\nnRw3uB1eiOaAeBcZ0PxgMEgip40zysaKKWJrsHu9Xo5PuduO56P8Sr4W88OmCjvqfMamAhTmZrOJ\nXq8XvV4vBoNB7kLjeRi66XQas9kseRtPT08ppygQVynm0FacNB9MDBeFQARZhMzPM73RAqNlo23Y\nHGcKSJ7PkF+nLklfff78OcbjcZycnORc8M6MGw4xir80Jjs72xpSe3t7ScTHKCGvTm0if/yfOmue\nX/7v9CwpnIeHhzg7O4v379/H999/HxERv//+e8xms3jz5k08PDzExcVFzg2OASR6+FsRkSl9+Dns\ntLVMR2x1xHfffZcpf9K6cOvsKJqrxDrAeTY1ATk1dcG80nIe0Sde75Z9gsjyc+tjpwkjtnp0NBpF\nu92Oi4uL+Pz5c82p44DccrcfPDYH5earUavLzg73/PnnnzO1f3p6mk4PdIPd3d3cBEDjOBlvNnJA\nt7Ozk84+toZGEFTWaLI98Lj6wPLxeFxzdmgmerP2vE5JzZV2zo0+IqdsdrFN9lyQZsbGIteXl5fJ\n3bTsMBfoG9vtiMo2YJvKoNTy5XXJvHJfBzu2Azs7OzVubglgvNS+iSO1WCyi2+3mqecRlYNQ5q0j\nqtonNjQl8Szi+eGNEVXESzOxDkK1OS18dn9/n9diqCMi0S2T5miz2SwVnL3biEjiJ3lrO2fkilGW\nnnyTxyHUoty63W7uyAFxcATpBRtR5ZZpLJaXOChGDNww7DgFNvrOjZfGi3cxP8zRAO/OvUsuhccJ\nhcFzcCIajcYzZcP7+XBWO3k4KeaeYPAZZ4yQeR+QX41MeYdVSdy0I9hqtWpOr7eJl3WSuNYKHYNJ\n+YtGoxHT6TQuLy9r5F/Q0svLy2fRKmR3uGPeNRdRIQWgSdzz4OAgLi4ukoeCA2K+Ds4X48YYIB/w\nixh/b2N3gOHxZc1hEI+OjuL8/DzG43EiU3Y0eA6cJ7gy3BdHC+ffc0DfkRNkic0LvV4vDRyNaNdr\nzLv9QCx2dnaSmxmx3TpPLbNutxtXV1d5Peub8QJFoi84Beys9KG2GCbkyBsqeM/JZFLjlFoXY1TM\nLWLtEUjQJ88xrTTE5pRhyHgWusC8Ke7JWkLv8xwQUHSpDyVfLBZJ1LaepR/ozJdKGURUMlnyWOfz\nefzlL3+JiO2affv2bW1s0aPwVWnU3mK8zQOy7nXJCAjW9K8s90FggSx7Mw3BAI6Kg6G/NUdc6+DB\ngTdj8/j4mJwxvz/zYV7heDzOABDEivfiAPbhcPgsu4FcMF7mfzIGOMIlclaid+aHlcR7fw8nys+I\nqIpQf82xjPiGdaSA1S04JrI2GlXNDtINwO2QwSLqSBYL1AaaKIsB5TMWGdtj2ZFAs1duh8AGytEY\n11xfX8f79+9zG7wjZCM3VtAoKXYuONWwWq2yBhDXcwAphoNxKdNvCAZCUO6cscJ0BPn09JR9YDdc\niSQ4vUlzasVpjYioGUeMJvcy6ZvvlM1RiZUbCp3UDg4DCBHOgNG4kjRpZw8ZIVJzygoHl0jeuw1J\ng5IyczTt8SaC8n1BLFCMfv9Wq5U7hjabTa6FiLqhubi4iMFgkKTi8XicAQGpQjugJrY7AudcN2Tp\n6uqqtmY+ffqUaSP6HlGhAPzfxGjgfeScjQfMKwiQU7t85rVO8BNR1VMjdYIMowiNuJJWQgZx+pA7\nnGDmcTAYRK/Xi/F4XKslhMHiEGKnIXlf1oB3JqKXkJtms5nPYzcuaES3262hKAQkpSxFRNYiI/VB\nPzn3j2dbTrnX7u5uppyMnJEmJO3reyLDTrcYuXEw6PXGdfyzsbJeZm34fU0k9z3v7+9jOp3GfD7P\nINKpZe6NETbST//L9B3zBBpZkpFJt/33f/93UjwiIt6+fRv9fr+WFkS+CfCo9wXhmnXhcSxRet4b\nh6IM/spyHh5rdgSuVqssMRIROT+murhaPJ+VZHGuZeeuz4QkZY9N8BzTN9YKp0wwT5QLQmegv12n\nr3T0WWPIh6kQ7HI0SODnObVp++xgmubf7ZC91L6JI0VU4BwukCIDGFEtCIwaEwlcT/OC9o4TrjH/\nyErKQmUIFEH1IncUgVEnJWDEgpQHBs3KxIrQPBv6ybMbjUZ6+YvFImvTcN3FxUVEbKPSfr+fEDYL\nw2NHH1AY9s4Zu/Jv5LkjIg+9LT9z+orxRmiJmGwEiSiJTO3U2VB+DVJmPnBYIup8nlIRYQRxXgxh\n42AaheI6Fibj0mw2n/EhnBb03FJ9m3e0kUKWzJHi+fAqQB8sDygGnCIcK8/xZDKJwWAQx8fHKRs3\nNzeJ0LGVGwd8NBrF0dFRzt3Ozk4t7Uf0XMrm5eVlFr6cz+c13kCZHnPKBMSHQ27hBzGGZYrFaxSn\nxQEJ3+FoFYIu6wHu65Qh6xRZeXp6ypIDyPdiscgdhoeHh7VyDCh7jDHpf97ffD7Pz3q9TlTPfWbu\n2u12HB8fx2QyqaVqCBBAI7w2QE1IL1qeneJpNpt5rE1E1OZoMBjUPiPtDBLPESQRUeNi2pGiMf9l\nmi6iChydDbD+dtBXUhkw7mQj0IO3t7exWCzi+vo6ZrNZ3NzcJHeQcSGrQP+4H84aHDCjLU4rmi+G\nc9Dr9WI2m8Uff/xRQ9Dev38fw+GwFoQhhw8PD3lEE44//bRjRzqSz0xRcbPxJw1lXcNzcdycwrLz\nUKI8yLepHXxOAOD15RpbRnaMAjK+pPBMIUGGOJLJKHxEvShniSxGVHxlI/bYGOyrx9H0HeTMAT/2\nhb45GEYvfq19M0eKCWVx4Sx0u91EpgzxItQYHRe1sxCVSM9ms6lxhfge0C19QPlHVAsfQTWSRX/M\na3EF54eHhzQUJQHXxtSCaCSM97EA47mv19s6KiiM8Xgc3333XS1Sd/TsiAalWiIk/M1OCNfh7FAf\nxOOGcff9LNQlQmZIn7pHJScCBe7owPMF3O2/MVe8L9eBpkG4xZGjL0Rt5fgzNyas2tB0Op2asXMk\nxHl/m80mick8A0XO4t5sqvpjT09PiargSHlMzGcZDocp36Cpj4+Pefo8lfsp7gjn6e7uLo6OjiJi\nuykC9PTw8DCPPYnYppoitk46zgTOGTVvxuNxDIfDGAwGNePNlnNkx2PocXZ6g/FAcTFXbqASNgIR\n9XpYjImLJPL/3d3d3DwQseV2OJrHufGY3t3dJRLochnIp+tX8U7oC1LQ5l9gXEkPYyQ4qmi9Xsdo\nNEoHNaKqiI7+wrHj3Y3GEYVz3dPTU4xGo5RDnGgoBryPA0GCANK68/m8ZtzYiAFFwc6UdSuOq3WD\n0W47SzbWpY7iu4xP6ejw/ZKaAMWBcSiReE4S4O+uQeTMAXKEXNCgpfz+++8REbk9v9FoJDezTN95\nffMZRZRZO+YAEkQQJBtZKYNHnlHyIymZYi6fU2W2eTSjcoAb3JN/LiVDfxy42OGHG0bQYuSauSHo\nto4ukTCCYfrCdxgX6xP0D+vM64lrcKIcTGP3Sl4ZY/a3Unuv5Q9e22t7ba/ttb221/ba/s72TRAp\nV0AFlcEDhqjmHVx8F++SLZMRzwnUTmmRljCHwMToMp9ryJHo05wgGh44SJCPdOD75HrL9B3Pw3uP\nqLZj4xW7EfltNpuE4bn3bDbLgoRs68Wbxksnp00qgvubl0BzJEhU5bw/fYWXYb5TRHWUD1G5ycJw\nPZhn+sW4GUJ3lEoapNlsPktfUrCRSKkkxnNWGRGtU4lEsMD/JYGdOTcBkQjKRe6cxmEXC+jh7u5u\nLedPJE+EZs4WUSXjYggalJY0j6s7r1arTNd4LcABgYR7cnKSpOxms5kV3N+9e1eDsQ8PD+P333/P\natvz+TzTfp4DCLXMhdNJ/B10DJnneqMXzBncs7JyOYgRXEDewZwu5o/iqxHV0TZw2Q4ODmpbr9l2\nPpvNaqn0RqORPA54m47m0UGz2axGQ0BuSb964wDrl/WBvHBPdnrB40L+XGyVeyBjy+Uy05mLxSI2\nm00WemQzw/X1dTw+Pubh5lxHdE9fWYf7+/tZeb3T6USn06khnMwTqIJRHvNI+Y7XKTsPnSaNqBAb\n6xyuM++pTAeScoeyQQqYe3JNibbTyrQNc0EzV5JmXWZu0ZcvX/JZ3333XWZNIipOEjtgjcjAXzQn\ny7u7fWSKU8wgn+i2EgE0gluiK4yxETyPK3YDGeE9QI2Rf3S5x8VZBLImnP5hhJdme0img2b+mGk2\nfMY8W5/SB8aynPeSMO57+vlkmoyo/620XsQ3cqTgffj0cNJgy+UyD3p0jhTBgA/w0q4C7waKqCox\nM/mGlBFMLzTXmYFo7PPreI65EE4R2dBCBiy5AsCU7ouJcUyihZz7Qy61crm+vq4ZKj6DtI4DFvF8\ntxqQJcLnviKcJiZyD67HSHFPDKGrIHvXGtA2Qu/UH/fBYfYCZvGXhFM4WIx36YSuVqtadXen13A0\nGE/SGyxMUsl26Hkm8uSFihyh2E1oZu6oQA2J1TvFzLtzmgaFwE4aFFQ5hxgrzz9jz84uuF7spHn/\n/n28f/8+rq+v01Eej8fx8ePH+Omnn2J/fz/++OOP2vPYmec5jYgsvcBzz8/Pc54IfEib9Pv9WorK\n81DW3oIUz+5Z9EWj0ci6PdPpNI0LzhL9wFBRwZw5pB8oTO+KfHh4yBSNS7QQAKF/LFPIgnfY2Tn3\nBoVymzvpPOTK+oS0IEaHtUw9qZubm0yN8O4nJyexv78f19fXcXl5Gbe3t+koHh0dpRzhoOLsI3/W\nTU418R3mxtyUiNIT4uAAACAASURBVPqZiqXjwtqwE807Wpas+1jX5uyYI4ccUuPKaRrGmmtMlShT\neE4X8jsBgVNbJVWCuZ9Op/Hzzz9nKvH777+vkfTR+RDNTSlA19iZ5B2wSegrPrNDznt7V58dw3KO\nnPJysOY55R39TPSMuXJeMyUXzYAF/eE0AZ6Hc4atsCMLtae0V34/NveUTryDe2wbn9nxcvBt2cOB\nNMfT7/RS+6YcKZRjRCX8cFt8ECsDw8JAwURUpG3/4zOQLeeTza/xYnG0ZOfIzkFEfWssiuElcimI\nlksQmExdKiCfG2akg37wHAwmz+G5OAZ2ePb29qLb7cZsNkuF6kjQjlTZrPD8HROsebadPqIj3tXk\nd39mAqQXyUvRFU5NSeQkp47slA4I48czeQbOCTL1+PhY49yZy2KjgsIyqliO2cHBQQwGgxiPx7Vi\nnkRl3W43I2krqf39/ej3+9HpdGqEdrgSlisWPwRuo7m8NzsWaaPRqMaB6/f70Wptj8C4ublJwzyZ\nTJL/BKLhdQgyhsNII2hpNpvJEyy3RbN2HSThJMAH6fV6uWa4/2KxyKAGo9/pdJ5xbZjLiMjjYVCk\nm011Fhv3pvbS09NT7lajdg99tTGFd+UDmr3e+J3Cu6UM4/S3Wq0aemIOFX2IqLhOyATX8xm7C2ez\nWUwmk1qtqNFoFN1uN/UjyNLd3V3WmHOQwjwhl+hGxhc0gb+zc4u58jiXQZ3J0f5/RDwzlNZ9BLTs\nLiQ4Zf585pwNdkTUdIERCyMXfk/aS0ET1/l+pVFdLpfx6dOndIg+fPiQ3+X8TJwozz3oD/LrjTd2\n9Kx3HVRGVOcuGjFjLl3OhHHh/Wl2Qvx+Rsj4O8iSv49Tiw4y/3c4HCY/cblcxng8zoDOa6l03OxQ\nl05WRKWLIPI7iPZ7l/23g4cd4n4RVSBd8mbtAL/UvpkjhXLlxahU3Gg08rBcYHwcIe8Y88ChMCHm\nmZRH9F8So5kEDCOVjiPqOw9emkATX7l/RL1eDs6Zya8+T8zIEue6NRqN3KpaLiKiGveHseB0eSLK\niK0h7Xa7MRwO4/T0NI6OjuLjx4/x6dOn7I/RljKdRtQCoueonEVpyJ7rQK+4v50Q1+1x1MC7WwE4\ndQt6aHSHcSOqhiDrKIt+eZu4ZdCOL8+DZMt3mAM+s3PpqsxEMJCCcbQg+W42m9qZcJyxRXMK17tg\nvB3dMhMR6QxFVHA+le13dnbSAen3+9Hv9+OXX37JeSNt5nGmn8fHxzGbzeL6+roGaTPeRLXz+TzH\n+O3bt2nYKA1g2WLnICiCkYjDw8PatU55+7mDwSBRF8j0rDHWFmPKVn6Mk9NE3M+puDLap7iq09r0\nmX45GLC8+fBymoMKgh7eg6rQGCru3+1203kHzfAGFXQMxTI9T2dnZzEcDlOv0beHh4cYj8e1mnZ2\nbkj5euck7w6yB1JnJxuZ47ukHRkbjzFjh7w5ELXO5X7oKeuAzWZ7KDkZA3Qhn5UImNNCRpbskCDX\nLuRJXzy+nkveD1v26dOnrOsXsUUACSL5ng/MdkbA5GeQX+ocgpggf9gQgqzSSTDqQnDjsbS+s63h\nPr6GBuIMiuvxdnbB5XIIjPb3t2eYejMF88DmI48pDVtQ2m/0frk5oEzzEahEPC/MXM5p+a72Ixi7\nr7Vv5khhoFlsi8Uibm9vYzgcxnq9rcxqZAkhQ2mWRQxBYxyVRtQPYeV3mp0JFwIs8+r+/+3tbUaH\nKGlzDIzAkIZ0X15KQUVE1p4pUS63l9Ajw8wYJcYM2PPg4CD+z//5P9Hr9fL63377rbZw7UwwtmyH\n7nQ6z1J0TpfSMGzsfIIr5ftznatw8/58r0RrSv6T+Q3A4nxuY8U8Mp7mp6CULRs8nz6wuMoImXn0\nbhgca3hM6/U6C81FbFMwLM5ytyBKkaNscIIiKqMABw4Dzzyt1+tEbz98+JDzf3Nzkwqs0WjEx48f\n0ymjvyAWcGoiqiNEPn/+nAiMq8Xj6OM4ukK6+YJ2Eh8eHmpVoMv16EBhNpulowxvkPWOgxoRGWwR\n/WJUmH/PA0ggYzOdTmMymeSOTo7T4TOnZm2EGQvvOrZSJjgEXSnRBYwPCKrHbWdnJ1E20negek6J\nI8MYK4JA70SEW8WRI5SN4XnsYsTBAmFoNps5lre3tzXqhWkMjKPLERA4OLBk3Fhv1oVGQaxv0Q80\n1j8GmbHBdrBTlh1wEfVijS+labATpeFG75jfWaZ+rBuMUDnQub6+rqWTkK+yZATBL7XDVquqMCzP\nYcydJcHZ5HrmwoiSUS9nRuyolnbG70WgyJgSKDgVztg4aOD/zC8OP+l9H9zO2vd4GDWnv6w1j53p\nFS5uzFwQfON00hfbEds/1qydVSPRphO91L6JI4Xxc54VztTu7m4MBoNYraozxVhgEZWxttMDxIz3\nWW67jqgKcFrxmXsFzyCiUho22I5aICgjbDQiFStgC6qjVgs7wsmCtxGKqIwP+WV7zwg/aSVQPB8h\nwdh1u9346aefImLrnV9fX6dht/OCUJX5dfqNUkBp2glicZRl9UuyngnVjAOwagm3c1/Gx6gTi4E5\n9LZcpxfNu+Iz8wz8dwwWkbnROBbb7u5ujXsCemAC5Gq1So4JKetGo5GGjsZY4oDCRYiIJFHD2+G4\nCcZlOBwmInt3d5elCnAqWCs4CIxzp9NJJ+jLly85z4wTqI25KJztxljjvEdUpFKu6/V6NXI3SAtK\n3ak9z2WJ1JpXiEMUUSHDk8kk1yFHOiFfBwcHWazRpPx3797Fr7/+GpPJJA4PDzOdyVzQjzII47mP\nj48xGo1q8oQMErUTLEZUyh3U7fb2Ntc4SF2z2czaetSgm8/nuQ7hkJbIAqgQ1bO5DqRqOp1Gq9WK\nN2/e5FzjRONsgZru7u7GcDhMFNOlH5xGIx0DtyWiSqF4LZUpf3SHxw2HCGTExhWdxHOMvKFbQKWM\n1iMbjJObMwoue8H7o0OYZztE6Ogyw8E8YKdw/iMizs7OYjAYRL/fT7TLxpo1wPyyLthYhBPselfI\nptPPdrIZu1JfMidOJ9q2svYAFYzGAjY4qOXnfD5PDibyaF1DUMPmFY+bwRTzA5EXv4vXE/d239zo\n50upYt+3HBuanVTG+yXELJ/31U9e22t7ba/ttb221/baXtvfbN8EkQKKM6ROtOjdTbSSiNxut2v5\nTbaKgkgRRRF9cE1ElbYi2ii9fK5z3p5nRzzfjeLcrmFEPP5yq2ZE5WE7v813F4tFonM8F/SGNEwJ\nxxIl2isnAiJ6IqKk/2/evMldUOW4ma9E2tRoHTs3HM3w/jzDxUhp5go48qSPRrYctRFxedMB80TU\nYXlhTBkfEB9D3/7n7zsq4xk0okyiYefbneJljLyLCcTUkDz3plimizPSnA6CbI2MjUajJJvTD0oV\nrNfr3P1KqgN0zMTw6+vrTA9GRI1nSPFGiOjr9bYg7GAwSETCZ7k1m82MTg2TszGDXTZeX57XMvJj\nHszT4TPu2Ww2M93OO3neSP9Np9P48uVLREScnp7G6elpnJ+fJ7p2dnYWEdtipaQjymNwWq1WdLvd\nrKZdbnN3BAySxHvs7OwkCd27Dw8ODmpn5a1Wq0Sk2u127nRiPTo1wc5ckEjmt9vtxs3NTdzf38fp\n6WmmeSO2aNzt7W3tXNDPnz/n3LNrmirlfgfSlWzbd2HGErkreThO+fi7rIWX0n6sXZPCzZvyhiMj\n9Tc3N7V0nrmMcM1A9FzklP6AtnqTERXvWYtlio7rSM0bbRqPx/Hw8JAHHTslxvuhA0GA2CQEKlva\nnVKX+TvWMUZWPE8eU+tK72b3GBphQhasv4wE2656w47l2J/RvHnDfK8SvXZDB9CQC/PzLGv+vtG1\nkrLi76Gb/lb7Jo6Uc79eiHt7ezGfz/P8HZcjcOopokoVke6CK+C0gQmPJsnyGUoNY2WyuEmK8J0i\nqoNr7dwxyIbd6S8ChRBxv5eEIqKqKG2SekSVUvMuLjtm7EJid8779+9rBGyUlQn1kHCBcO0EsojI\nh9uhdB7d/TB5Ex6CU6nMO5/RcNY45sHj5gUFhMtckLu2wjCviTktOUkYJBZPSXAu03M0O1+bTVW9\nPCKS00ffmCufUQbEDZTPHDN3BBMR1aHFm80mIfPLy8tot9uZhvIOMI5egVuFMWTH03A4zPcnRbFc\nLuP29jYNMM/lvZl310QjWMDQuEr24+NjOhdwQ3h3O4iMH+/nTQOWJwcH7D7z/G42m1pQxc445hg+\n5d7eXh6lFBFxfn6e1y6Xyzg9Pc00JGOMM1Te//DwMIbDYe5CshzjgMFnImBk3tFNDpQeHh4yLQz5\nHYfv7du3WZkeA877XV1dxXw+z40k8/k8rq+vIyLi+vo6hsNh3Nzc5Jicn59HxNbh7XQ6cXR0FJPJ\nJO7v7zM9jY7iMHdzwJgXBw7mgNqBLIOskjPl/5vrik3gnjaQcIa8gQQd1uv1ckMD482OU9az7QCG\nHhkpUz+kWZ1KJMBwSozmVBMbB8o00e3tbczn86wJhwyjd91XvzunZJiyAtfHQaavQdegH80v4r1Z\n27ZDUDrQiTs7OzUSOY10ozlbLjNSliPwmvBZkgRGTrGV69ubFPiJfBG42z4/PT0lxw1dZeespOv4\nnpZtpxzNT/ta+yaOlCMdIz9MAJFZ6UmbNMxLW+lF1POeOGclxyqi8kbhTxhZstKCe1Q6LRhtGwV4\nHDzXdZTw9B19lVFGxDbyYSs8jf6ASpnESnRAH+DInJ2dxZ///OeIqKNm9u5RROYyRdSj/VJx4Cwx\nBnZ6PEdE5GVES0RjYry5co+Pj1nQknd3P02AtbCzMPx+kCPhgXn3lftqArhliPm3sXx8fEwekzco\nREQeVOv6MCa/866Mh40UxOCTk5M0yMwFCq3X69V2fEG23d/fj5ubm7i5uUmjGLGNzN+9exftdjvL\nMfAOnHPZ7Xaj3W6nEWY8QdSWy2U6bnCROp1OlhdwLR+IuuzOY2wODg7y7D94QOZGWX7Kmk44wiYB\n05gDnE0jmRidbrebO4W4L8URkcXxeJzrzXxEEGLvsqL+krlaljfWi/tqAixrzeRfrsVpYp4uLi7i\nxx9/zHpQ5kCenp7GbDaL8XicBoq5o4gqKCYOQ0TkeZD9fj+Ojo6Ss8V4QuhHLzpYctHViEhuF5+j\nL6xHPVfMqaN9xoyxLPUJP12ehuvgG8HDsU3Z3d2NyWSSx/3QIFC7bz5KCJ2F3rGjx4aP0sFGP1G/\nyw6RETj4jSCONvKsA6NVBwcH0e/38/BhGn1DhyOrHmtsA0i59Tc8Y+aNcSVQcNDrHX3Wl3ZC7Nw+\nPj6mvvCceg4NWHgOsJ/0hXEpuV44SThaRn/R7eiv0nZZPiPqO02NJpZc3BI9K9s3caSIQDlDKqIy\n3nj0EVVUjjFx/Smu8w46tjgbqjWZ0QvRA9hoNGrbZ2kmDzMZbDlfLBZZVdnfZ2Jd04d+skAdRURU\nnjKRhreP4nnTdzsdhndZaOxq+f333zMtQ7TgCIT7EiUTVTBm9JV3MOnSEZ3fEcSNcTOsDDyKovbu\nDZwkl7tghxmIAn00wmTFWUYNbPsGHSzJ5jakTichQyhmO8MRkc4TqI4RE8afcaU8BH3keqe0+Wy9\nXsfNzU0isTa09JsUK+k7vutzKvnuyclJ/PnPf47lchk///xzbhWOqB9EfXx8HJ8/f07jPRqNYjgc\nxq+//hrz+Tz++Z//uVYVm8NFHfkxbswrDqxRB2TBKQ/mmnQRP/kOzyIt7bQA+oJ7r9frRBsiIonU\nEVuDPxwOawocWWq1WrkJICJqKAuyhh7yzjmTxbknShvZZg7ZtMD9ymAKA8V72Rn8/PlzljGhDk9E\n5CHVpAmNLA2Hw3TIkDWnMJrNZr7z6elpzhP6zP1xw4DRP4zf175HY97soNiB8Bi4ryCg/N33BMkB\nmXKwvL+/n7tbcTiQCxfxxa4Y4XbWwrvo+v3+s4DURHtqc5UoHn3l2aChEds0MmsIh8Zzwd/R29wX\nmTdoUAatzAGfG3lhNy/3N9JlPVrW0UJ2cIYcKNDu7u7i/Py8tmHqawAC9zOyxHqCkmD6i9+FTSH8\n3fbRqVv3zde/hJqCFtvu8+wShSzbN3Gk4D3YsUEw4BLYCOGEYORQ2hFVntXIhJ0bL+oSbiUatEce\nUUF5XsQ0Sgrg1ft6eCFlnpdnM4H26vkek8f2YxpCBkRv6JvnoaBd1+Xm5ib+53/+J969e5fCZQVp\nhYZAITgU7iQ6dMqMqAyj7pSox4n7enu0OQ3sooyoc6eOjo5qc2jDymIuI136X0bQ3mJeLgI7qDYO\nGGyQH1cIp+9sqfZOGuYVQ1vubCHqB+Wy88a7LRaLODs7i7dv36byhRO1s7OTqRhQIJxElJDTficn\nJ/Hly5f4y1/+kv3zDsPNpqoX9fnz54ySj4+P4+zsLH799df4t3/7txiNRolWWX4xHjjuIFdOjzA2\npBeRX6J65qaMcC0z/KS4KfNE1EgARbkC5Jt1NB6P4+rqKtN7jPV0Ok0E0WmKiCrVzPsa0eC5rENz\nID0nBFPMuWuh2Tn22mQdeJcoyBAHSCNH7DRkTq+urmrlD0hZLZfLWvqK9XxwcBDz+TwajUYeLbNe\nr/NvZb0vHEXQcfpq9LB0dkp9a2e6XMN2tLxTmLHCqfKa9r1KRAZaAzqSNCsZA3Y6Wi+xhlxjzfOE\nLLDuGXsKozqQtGFnLnd3t0dGkbpdr9e5S5L+WbcbzbFT5NQZ3/OYGpHC4TH9gnHmsxJUMD3D92TO\n+d32koYjaX1HfwEXPN6lnJjr6rXC2mGMCFT4WdpaxuQlCg0yZkCDuUeH2PHj2S8FDbRv4kgRuboy\nLguFgXZ6A8ElNWAym6NQBJ/rgMxfguVs8BAmC1TJb7IzRokGCgnaaDrHbAWMAWXyjDI5V1ymgEyU\nJJowcR4BiKjOboqIVB7z+bzG5fEc8E7l+FDg0+ifieCPj9vCgERKJqLjZLJ4HQUwN1zv8WbR7u3t\nxWAwqD0P561cGDyj2Ww+g7Bfmu8ynWDj4kVq5edUKoYc5NBFXDGIZWqpJNAbCXPj3rPZrKYYSDc4\nrWsiPGvp3bt3cXx8nPP/yy+/xHQ6zaNOOp1OGszJZBLtdjvu7+/jl19+iVarlfys+/v7GI/H8eHD\nh3j37l3M5/PkaQyHw5r8N5vNTCX3er1ajTdqNDH3pOFKYjC/o8C95X61WmW6y2uAe2LQIbguFot8\nj263G7u7u3FxcRGtViuurq5qDjG8osViUdM1oGJOG7h0ByiYgynPhcm13JO0JH8j6kU+HBARFdPM\n2cH4R2x1InXnms1m7QgcUw9AjbzWlstlOsOTyaSGkJC+RO78fnakcX6c+kKOy9QcDgY6z4GN0WUC\nytLJcurdDgrlLUD8cEYw2qw3r2mCddKY1lFlOYSIqK01+KmksZE10Cg7krZJ/GSMQH+vrq7i4eEh\nhsNhGuoyDYUzGBG1NWAebqNRnbhAs7Pg9394eKihcLYDlhs7aJ4fxsuoIt+jkLBlH33pFJznHBvy\nUvoM5L/MKPEM7KVTm3DJsF32Byy7OHhG1bDRRspoZUarbK/lD17ba3ttr+21vbbX9tr+zvZNECm8\nXrZXR1TpHVAHIqaI6rgHvlOm0xzhOq1Cjtv5WrxhQ6cR9ZOg4QuZ4OocMz9NzIyoDtk00dqRJ5FE\nyZECojQnx6mCMlIFIen3+5lLh2zqCGq1WuVWbXviEdUuxvL96A8RD1GRUQLG0DslGEM3eCvM79PT\nU0aKoD00ECLuBWy+Wq3i06dPNZlwpE8EATrAeNNPoxCOMEjLGLrnJ5Esc2GEymkdkBe/O1EQiKej\nKKc2DRW7j6vVKrflR2yRE9AIdn8xx0TU8Jqurq4yKifVsF6vs/gmxwPNZrP48OFD/P7777XvMg/t\ndjtLHPzXf/1XjTT++LitdN9sNuPs7Cxl4c2bN9FsNnPXJegC/VwsFnF/f5/8LG//h+dIpI7M3N7e\nJuLGdaQMXO4EVMtnSTKHoCx3d3fJL6JYJRtVqJJuuQDlckrFKACy7J3FToN5rTkt32w2a0flgBqA\nupacndlsVkPHQEH29rZVvklDNZvNODo6iogt4vH0tC04aY4JcxFRbVlvtVq1Y4g6nU5uvTfa3GxW\nh7qDrlhu6a+Rp5KozHecRrWclOkwo/boBtYr6x2S/nq9TvkZj8epv9kFDlJrkjVjZBSbOYB0bfSf\nMeC9jVSD1L6UAnKKESSNOQDd4h14X3N0sCfWr4w140wWhf4Y8fb/yRYwT6UtMupotMopwXJOjcSW\n5G9sEmvA/GXslncVWg85Q2MEDLQJm1pSe/icfjjzgNz67353xtlySfv/HUeKF/cRA6PRKCeuhH9R\nvHd3dzkQziV7EixQm80myxsgXObeuLq04UgTmq0sI+qnjuMQmE8AlAgPiPuQnvT7lVwonC/qCdFw\nsJzO4O/NZjMNCNWdIyKP2iAVEVFPUdrZLHPWKOYy1857wEkBtvUCN9HeHCAWC6RnUlgRVe0RHFvn\nw9m94maFTr9NhORvhoDtKHuueJbLQhhKN9yObABzo8QtM9SegdvCHOMIO13sBR5RGRAO8KU/du4a\njUY6mcDTjcb2CJj7+/sk7GMIu91u9Hq9+Otf/5ppuH/4h3+I6+vr7AOORcR26/xyuYx/+qd/irOz\ns7i/v4/vv/8++4JcTyaTuL6+jj/96U8RsXX4qEeEUTG8z/U0r1FkCGWJXLCeSdm22+2s6u5UD/do\ntVq18g+TySSJx+bBwTNrtVq5Ziw3yBHv4DIVbJDxzuGIKi1kQ0Nj/drIUKZkMBjkPLCbkLno9/vp\nBN7e3uaOyIitQ9Tr9fI8QY8JKRGcTHZBR9SP68HBcKmV+XyevJ1Wqzo6yBwlAgzrTBuy0ijyuR1C\nZB3jzBq2TrTTyucmXOPYOOhinnCsKDvA+u71etl3TgswSdsUE2p88RmnBUDdsAxzHf0qid+uis67\nE4ShKyIqLmV53JhpC6Qynford6cxFyXlgPuZjmJnhb+x7uzYoYuYF8ubbabnCVACp9XOGUEA82c6\nDQ4R48r6sSwtFosMLO0M02/e28G8OVpl+o7xNAWHd/9/tW/iSCFojlparVZuyTYxNaJaGNQ2Iaec\nL6H8f0RlkDA0zp1bgKzwbbxNfkZImQy+78E1zyuiyqdagP0cFizfJyLHWHAwaERF7ja3onSkMO6t\nVnWqPErJx+V4MZHzNzfFAscYec4iKtStRFx4BouMe3vHE0YIUisN48+7lk4m6BmOFgufd2ShOUpy\nn5h7I44eEzvtzHnE81PVPdc4IEayMDL0uVRgzFXJ40IJ8hlISUTUtqYTQTI/OG0gKJxnxdwdHR1F\no9GI//3f/62VMWi1Wrn79OrqKtrtdjpB0+k0/uM//iPG43FMJpP405/+VON2YNzn83kMBoN0sggU\niPpANRjnZrOZu5ow8owTHI+SbE2Q4I0NyKiP02H++v1+EnnH43EiZPAIcUIcUCF/DixK3pbrW/Ee\nIJKus+N5K51GjAZGkXty5l+3242dnZ2YzWbp8D4+Pkav10sZIApHLkajUR4bRF+RW8uvOSQU5+Tc\nRIyRx3uxWGQRVgez3IuAzOuC57sfNtDmQZbcSX/uYMgoNU42fWVevIMYmaJILXO9XC5rXEWXJzFX\nEp3NWjY66KKTBC9G3HBOGK9y3fNeluFms5m6H9vAO5Q7PyHJc2/GxFkVmjdyvBRE4pzj1JtfxHiU\nG4jgWuGk2H69NJfmKbNxjADGGwbIlpT8KPsBL70DcsguaaOcrVZVWsd/517oW29qMO8aOXVdSNb+\n19o3caSYFEogRFQkQG+pNgmw3W6nYvYL2XiWRE0GgAVgeJAokf7YQNuz32w2uaWbe7qhcLknjk7p\nnLFLDQ/aaBgCjxDv7+8nhE8dFJwMjwtRCv2zQOHBf/nyJRVEGUXbgTBaRZ9N2LOQY/DL+zlKIerk\nXovFIueY75FSWK/Xeb7i7e3ts8gU+N2pFfrvui1W0I6WIuopX+YbZVo6p3ZCgcLpJ3Lj/3NPGjt/\njMp4zB0EMI+kPIlE6f90Ok2UBkTECoxgABTCjjSE5GazGZ1OJ43jYDCI/f39OD8/z5pd3PPf//3f\nYzqdxs3NTbx9+zYJye4rNa9+/PHHnEPqez0+Psb79+9ryDBGwakNFx+ldhrIsaNg3os5NXpyd3cX\nnU4nlaBR0JubmxgMBnF5eRmj0ajmZHqDCGkVxoZ16ajc+sBrFJlg/hlHo5Zch0EhYOH9IUvz7tZf\nk8kkv3t2dlbrC87saDSqUSUYt3a7nboLRwuZmc/niXAdHh7WUD7Gu9y9ZxKxEfcyrU/gUeoZrgcB\nt/EtEYDyOtBEB4bM/3K5zFIPzCk7StF7dly5D3rE5x4yXhGRBh/75E0P9LcM6HDSyjQvc4Ls29nC\nQIPaYF9wPuyg0kB/SC8b4UMWsYu8p8EG+o4edzBgRBF55Zl2gMpd97x/SZnB1nFff8Y9WMcObgjI\nCKJs51kv5WYgnjebzTLA9o5Vnm09YZuHvbPTH1GVdvlb7Zs4Uru7u3kMjA9njaiiPhucdrudfCoW\n+0vfL1EXDCyD4PyseVYINM/jXkykBxXhKhd4RJWuMcJkhW1nwGkfvm+0A+EYDAZxcHBQ40PQ4ExQ\nHM4CBe9qsVjkifXcm/6U/xzJ0Fe+S7OBJCX3krPA4ud5KCHm0NfgLLIbzgubcWOePcceU3+/7AvO\nUOn0WaEy9+zEw/Fy6oPrO51OLnBD/yxCO+WlU8DYGlYG5cAZ8Nig+Oinjfju7m5GyqQxnGrkmBC2\n/r979y7l5suXL3FxcZE7Z/mMQ2zfv3+fUT/je3R0FHd3d3F4eBhv3rxJ7mLEdm1NJpP48OFDNBqN\nmEwmKfukz3AEHh4ect3zDGS/THmyFghCGDMCBYw6yINT0IzV/f19HB0d1Zxq5AinlzH10T4YMqf2\nLQcg3jwPAE5hCwAAIABJREFUJwFuF89DP1EM0ty6vb29dOyYCwd+Z2dneXyQFTpcQZ4F0sX8Ird7\ne3vPnIxGoxGLxSJPkCC1wgHAIC42sow7awkn7KXGOkX2jcgQXDht4u8ZIUJ/oJuNLhCQPj4+5vEt\nTl8a2bYRxrkiI+Lgw4gEuyl5b8pp8CwQScY7YmuL2NHpNJkDVgelOE7oRae2kF9SgiWPEn3vXWpG\nUMq5cMqs1OvuG84EffG6NE/L/2eesKnW0fSV59pRs+PJWqPhYGGLDZKUjqRtADsHoZ+UTqZ3QZfO\nJ0h1GYwzfmUA7PZNHCkWLk5ARIVycO7US3WkUHoR9e3t5aQZAkUR41BZEFjcLxlZ/k4E4QgDZ25v\nb68GG3PfiEp5um9WEBYM5+TL55HTpdQCW8jd106nk3V0SpLf8fHxM3Ih/cHZiqhQg4gq522iu+F/\nuEAsNISYBY/w+/1xOIn0rRSbzS0Bt9vtZmTgaN78CCslX89Y+DPfAyeN3/lXKvanp6c8S5BoyqkK\n5h8EjWgex8oRr+UUeWPeS+XOfZ2q4h3m83kqCwxuRIWacL3R3C9fvsTT0/Z8PY4FMX/u+vo62u12\nvH37NlqtVhLRV6tVjEajNDj9fr+GvoCQUJuJfp6fn8fR0VF0u93aWW4RdUI5TjQ6gNpwpOncWPc4\nJ4bXGQ+cEPhnyCn13g4PD9PZNCeP4An5dkFSlCny5Ll0WsVGH8cGI2QjjGOy2WyyZhByA0cKOWu1\nqlIUyNf5+fkzsj1oCRsRkAEaaE273Y7j4+M8IgbHeDAYxGQyqSHdh4eHacDMSWFdeCxIMzr4fMlx\nKj+zbuY9+P0l9AXUGB2OA4qeaTS23DIQHdZFmWLld/cNWXHNI5OUbaAxvs5yGOVh3eOA81zkxMib\nZZifOF8u8Iu+LtOFpfxRssOZnPKaMsA0guTUHsip0dWI6lxcxqG0lw4+nTovEX87Z5av0pnFcUMf\ne+5xlqbTaXKg/TwQfBxinmPqSGlnDCaUwECZxn6pvZY/eG2v7bW9ttf22l7ba/s72zdBpEajUcLU\neLx44ERtRiyI7rwLjobn7Dw73rijVe8qiKg4UnACSvIcKTryyWWEY8Ia183n8/SE8d5fSo85vRNR\noTygbsDKfBcEp4wCgURB9oCxI7Ze+9PTU0KcLrZGg+DL+72EANK4LykK+ASGPP0dw9f8NNLn1Kqj\nJ1IP/szIAXPnVsLX/I2oDfnwThpSL07v0k++T3Tmwqr8PaIqLBtRFTEl3VtGMKAN5PSRL48XyILR\nKpAx754qd6yaQwWfaXd3e5grxNvpdJr3JAV4cnKSu1dJUfGsvb29ODo6itvb24yS4Uydn5/H+fl5\n/Ou//muSu5fLZXz48CHu7+/zqBvI7WzxdukLk3+R75c4jUTcpLG9RknhuOQDRG3QwX6/H81mM5bL\nZaJg+/v7MZ1OE/32bjgQpfV6neiaURNHvE5v0CdayRMBRWw0trsu6TNn9+3sVEdcse4oQ4E+cLqQ\nMfVmmpLW4PQIpRFms1mmUTgPkesXi0WmF42yMGZ7e3u5/b9E5Iy8Iqfl+nbqzH8rKQ4eR3Qlusop\nHOYNBJjyFug0Ut9GGczrRCa5rtfrJUrDeHqNIqtGuv2u/CvHxTQCnut3QBdYlmxX0G/Wf6Q2ndJl\nDYO2Oo1Jf8wlJV3qFNbj42MN9UQ2yh13nnOPATbTGRzktCz7UqJm9JvP6OvR0VFSgXxPTsG4v79P\n/vR4PM77QXlxdofM0EsoE/rbxw5FVPawTPG7fTOOVEQFaUZUC/D29jYrxZZnv5FOi6igQBoD6p0T\nXGuCop0pjFu5648+4sSYQ1OS9pyiW61WydPwYvLzEG5PMEqfrcdOa5owCJmXMSOdAFfHfBYWCie9\n25BFVAbMBDveA4fMZx2WitJpljJX/lLqAy4AzgsOB9fjPGAw+Iz58Th4btfrdRoGO4p8D+VgJeCx\nZ1F5yz0LB8K2nSzabDbLVAjX4WTZKTJUzZbykhPnvnItjflhF9r+/n7tOIl2u52E29vb20wLQeTE\nEYLPxvwOh8N4enqK2WwWnU4njQl9xYCbl0NZh0+fPsUPP/wQ+/v7cXl5GRHbqthwekgn8e6LxaKW\n0sTBpy+eAzvJpPuQTXMOcaSY12azmbvfIqrDu0nvcbxOxJZSQLqUtca4jcfjXL+c+4mTRfV2gjOn\nr1iLcFbKtPbd3V2NZI5Sht+GnJPG5DrkBFnhmJvd3d24ubmJx8fHGI/HMRqN8jtsIMDxMmmY47fg\nm+3u7qbjtlgsYjab1Wp0OdBAFk2JMK8ThwXdUKZIWL8lcZfxdnDFPfn8/v4+D5+m0Q8cPJxFKtZv\nNps86BvZn81muduPPvPZZrN5xlWyjuP9GAtXkmcOv+ZcW/e7XA625SVnCV3h4Jl72W7B47Njx3g6\nnY2cEpzyPrwrOgSaQFkuh8DFqVOa72XqBoFjs1nVXfTYOnh0OhlnuNvtRr/fj+FwmOvCO3b5PjaR\n6vN7e3vZJwe+pFDNieZ5zBv3sK9gp/ql9s3KH5DnfMmrXS6Xz4htJRHNxpt6VPP5vEYAxji3Wq3o\n9XrPJp9m40YzEmSSekTdq+f3iOqEcHvldgjg6UBwtHOyWq3i5uYm+v1+HB8f15w/cuB2eiIqD346\nnaaxwcj6Pfr9fvJQTJDEszefw9exNR1UgDHFyUIB2EB7F5sdD4wBC6fMj+O8lWRrE05ZqH+LM2Bl\ngnJizFx0kTEw0sX7gS4xXy5GimPC2PjdI6LmyL+0HdqLn35gdOwMlvwpHG3X1aLEguXE26XhdGFI\nmA8iOWoQcdRIxPaMvuPj47i9vU1ekhX/bDaLH374IT58+BAfP37MvnBg8tPTUwyHw+h2u1lSAYNr\nJWpngeOCvCOPuScQYi3ZAcEpJ9o1fwynDIeJDRuMG4fQQkZnHo1QTyaTLD4asTXCw+Ew9YvHNKIK\nviyTyAF1veA2cU8MoI054w2SzK5GNt1EbJ3B9XqdZ+xdXV3lzkOcAwwOHEueNxqNYrlcxmQyyaDB\nc2iU3zoRuWQeza/h3c0DMv8S/VeuU3OCynHzWqH/5Zl5BC82dAThOMDwq5ApHy3jYA++TatVlTJw\nRgF9CZeJvoMOsjvNtovfzZGzjkKvY/i9KQgdav1Iw/EoESCutS4xj5U1ZSeS96CQK+jL3l51HFm3\n242Dg4Pc0cnYRlR6jHVjgMRrgrl0AImjz1zwGU4XZ0xSuJNxA7TAsYNz6B1+Jd8Jnc544eBFVPYQ\nHqht9/+LHxXxDc/aM5kuooLrcDIQLD7jOgsEn2HsIGPbATMZknSXrwdJKCMoP8cKw9FxmbZD8bCg\nPAFMEg6NYUyUy2q1iouLi2fn0Hk3CgsronJIqeHT7XZr6TkWY6vVyoNqEVQrH0dlfueI+kLgmShU\nE/Y9h95JYqfBBFEraIoGEkWXAu77lOPNvf0ufIdxBbXw9w1D27FzGtCy4zFtt9uJQLwUdQMNO51M\nUUWQGRvhMvr1+/r/L6Xh+I7J18yb0zBOC+Fg7OzsxHg8joODg0RyTk5OotGo6rusVlUNtdVqW+l5\nNBpligiEBIfkzZs30W634+bmJpXb4eFhbm9eLpfR7XZr6xfD43pREfVzJnGkyzpeGL71el0bm8Fg\nkMZnNptFt9tNYz0ej/N9n56e4s2bNzUna2dnJ1EdIwiz2Sym02kiH05RsduNMXeg1G63c+fiZDKp\nobm8vw/Z9U6p5XKZQRYOCeMNujSdTms7KNlVulqtsnQK+ouU/f7+fqbGXOsNB5Q+uoYW6TICCqP9\nOFjonDJN85Khj6ijURi5MmhGL3gOJ5NJjMfjdAScgqWvh4eHiT5ZbhycujmtizNhG8R7gcw6eLIu\nsL2wYw2ibH2C7POOTkPZoS3BA+anJK7zLGTE9sDX4jiUtm13d1unsdPpxMHBQe2gc8CPk5OTmjM1\nmUyy6KkRQt7D+tRZkzIVuLOzk/ccDAa5RiHvs+7YMToej2M2m8VyuUwHm/WKHSkDGmcn7Jyie7Dt\n6BX6+VKGye2bIVIMnAWNhU9kWfJWIiKhca5DiBlMFg7Piah2BrFV0v0oHQHuzeQzgDx/PB4nKkB5\nBDsLPt7CSA9pBN4F+J/rIrYTNplM4vLyMgUYp8XRkB0pIF5H1VyH4eEdOMCUZ6LsEXBD0oayvWBx\n2jyPFkbSli+l4UB/XkoJehu455doHYcXeSjlyf3h/dmlQXTl6Bk0g0Jx3lpr9Mi5cuYd5Wf0D6O7\nWq3yfu4nyr6s98W7olDMV/Bc0Mx1enp6yiKQIDJOlYN8PD4+Jl+AvqzXVYHbo6OjWuQ/Ho9TuQ+H\nw1rEyRZ+EElQECK54+PjuLm5iel0mn2hphHpFiJ+xtvOyO3tbXKrdnd34+zsLKNhR5gYHvpFGpax\nwXHzESl2el0qgfIQEVVNK8ogePzZ+Qj9wIYYhxVF7Xd0KQbk3Gv+/v6+dugtzgm7eClT4CrmrAfG\nZjqdZn/u7u7S6eB+vAMIpE9a4J6grfTfc4NBQmdbvpBhr2enjNCRyFRJvbD+ceDgNc/cIVMgijc3\nN6kj4Qcy9kbduQ9oFSgPcxZROTY8z2uUccSJBGHku+h1B+iMhdGl2WxWQ2boQxmQeecfes16AHk2\nCMCzmQsQd+tFEDBnTEqH/+TkJB0ngghKDlFU15mDyWQSFxcX8eXLl0TNjbZbfqj7xrh5N7jt/HQ6\nTd2DrqVw87t373ItgdRbXzqgtn13MVFnDdy8Tkod7GC+bN/EkQI2da4VQwSU7Tx1RP2sMje2MuMY\n4KxE1CvQAsWW6R0iMASSZ+EIlAt/tVrl1u7BYJDRD/dcrVapwHg2DQ8ZI4/iM1LTbDZrXAAbAD63\nh2042FESzgef43Q5p0wEQMqkVBwIpAWohJq9oFCWOAXmifCe5lvRUNAQbv0eVrLlAn5JPvhbyVPw\nIsXweu4cudlY2JHi+8yJiyc6jegjbTx2yAPGAiOPkraDamSNOXcV84jIjQk4py6Sh1LEYcfxYdwu\nLi6i2WzGn/70p0x3RGxRl8lkEu12O3q9XoxGo+wLHC/m3+nynZ2dODo6iouLi5hOp3F4eFhDbs7O\nztIRfXqqOFInJyeJvJQI3/39fTq9yBuK9+7uLobDYRoC82AiIonpjJ1lkfXJfSeTSQYuGFqcKVBA\n3h8Hi3F1oUenFOBXRWyDr/l8HqvVKrlnyJvnDF3E86bTaTorOKomhiPTIIrML6kOc2l43nK5TN4K\nfCGaNwHwTPpGSgr9Rl+dirLDYn5VGQQZ8WaNOJApr2NcrQv39vbi+Pg4ms1mXFxcZLAQsTXsLgJp\nbuhms8l6WWUpFqNRERW/LiJqWQb0rtNb9JE+lzoOFAj9xpihR5BTb2xAp30tyDVqwv24FufO+svf\nKYNYz816vT2fs9Pp5Pu32+3o9/u5PsqyGaenp7G/vx9XV1d51BHzyRhjSxwsUtgVmgLyDeL4p//v\nCKqIqNlLBxi+J6iYuVZG/rHRzJODIQc/zohZfr/WXssfvLbX9tpe22t7ba/ttf2d7ZshUnjbJiSa\n0FdG0EQ/L6FSRKrwHJz2wyMGHizRqpLgF1GPkEgdmo8CeRS40ygI7wTXgqjFRDzQFzxeUBATac11\nMTpjIp/TlbwL1xEhE0X5ufQHJAK0zrwl8yQcCXIfPP8S/jdq5pQoCN9L3r1TcyCTHm9kxTwN3tE7\nkIxaMqaeX1f+Bf3kXQ19g/LwLMshssR7esemPzf3B9kiMkOGyk0McBQcUXEvpzrpN3NNuujw8DCj\ncu7DmB4eHqYsXl1dxXA4jH6/n7sPzeWCHxGxRUW88wfonvsz3p1OJ6bTaczn8zg8PIx+v5/jPp1O\nk7S+t7cX19fX+e69Xi83aJTrwOgb/DTLN2sK9MnzX3KV1ut1DX00Z+b6+jplsNvtJmpwfHycCDl9\n6HQ6Od6gPfSHdJA5fhGR6BQ6ylxN1hbIsPlqFNXkHg8PD5n25FByZMSEcsjUpIE7nU6tn8gKRTlp\nICnmOprnVPJyjIiUKSzTNlgL6HGnAXl/aAfmMpYcxZI7tbe3l7tPXSKk3+/H/f19pkJNGWAeeUdz\nhDjmaHd3NzqdzjPEgjWPHbKu8fp01sDjgAyASNlegLTzzu4jNsgFnEE1bRPMvcIucF/3weP4EoEd\nefTmFdZlu92upWkjtutpMpkkSuq0O/PN2HlDCMgmyHC/36+lruHvnZycJDKHfG82m2i324mce10w\nV8yvqSQvcd4i6kdCoVdsZ5xyfql9E0cqImoGJ6LiCsDELw0uLwMR2/UtgCLLVjoShnip4RFRKQRv\n/0dBt1qtrKsTESmgKFgbWsimCA7OVMQW3rcjZ2WK4baTiJJkxwVQKoQ/PmMcymrZGGxvC3WDw4NC\ncjqJBY2xNMSPQWIhe9EwjzYS7g8OdJmiY+xw/nhfnuf0BlyTiOdbhCOeVxKnHygVPqMBt5fEWDuv\nNDtY5iDw3jYmfj73Ne8CnhJ9Nr+MHYHc105A6fQhN5xHaRnm3hgTZIXSBJPJJDlhvAelRyIiU1t+\n/+FwmAqu1+vlO04mk+TsQNTFAfHxMHBMUHwofQjqb968yXdAcQLhe4dRufOHs+EYJ2TtJUODs3B9\nfR1HR0fR6/WSGM9643NSD8gRTjvvwrhhaNgs463nfEb60QYTmSFVtFgs0iEidcpacOBJ3Ti4TlSj\nZ9zYgUddHe7poBMjxpiaIE8Kz5whO0AOsvjd6VNzJO1A2ajyGY5CaYQdhJS8V/NpGU8+Q3fiTMFl\n5Z6Hh4exXm83J3gnM2e0HRwc5Lt7V6EpBQ6CSse5lDWaAxsajq0Da77rVJt1B7KHLkCmmONut5u7\nTW0T6Lvvb3oEY4jceA5sr3EibfccBDsgd3r56al+EgYyjH5zYLa3tz2Q+NOnT9Hv92upctb509NT\nDAaD3LEfETVZKpvTdeUckj51CtU25W85URHf2JEynwnhoh6SvXwf6UDEXZaUh6lvTo/RAr5b5ovN\nrfEgRlR1RUo+ixcCzhR9iKgQj1arlSRWHEUiCyMlEXUis9+dhU2UZwFGKWA0/G7O/TsqMXEawjWL\nwxwLhBEhsmHG2D89PSWpnnmiMaZGB1G8cNUsnDhrnhP+bifI8+moECVvw8911BVBdnB2MXg20EaV\nkM8SBUXhY8j8fp1OJ4nfcP2YKxt5lE5E5dRybx93gbJk/I3KMkcYate1ohAr48WOlIhIRwgkh7pR\n3B+HiF1m8BTa7XZuK+/3+zVuAuOKscAp8NywPr0pA6I1xoTdpbwDRW4Hg0H88ccfz9AKnETOoWTn\nGvdBH3gNU6YEVHB3t6oVZQ4RjqePbMEhIEhxkUCQnv39/bi5ucmxwYFip5yRYwIjxsbIOhGynWOj\nIHA97u7uktcWUTkSg8EgHVT+RiAKR5Pgj3vyDAIbZNS8KFoZLDDHpUOEzkCWHbTZAfW93JAHjCdj\nMZ/P80xHry8QRcoZ0Ad+8rz5fJ71qSIiZQG55lgUnodjYieQcbCz4t9L0nKppxkTdvq6n9hIvufN\nD5ZDdBtIJqUCuLcdYNBfxqa0ew5KvduTsxlHo1He344lc/4SL8tghscJ+UAOncFoNBqxXC7jr3/9\na3S73dpOdnSFuYXmN/udSn4v4+hjZbgOzuNLOoZx/1r7Zo5URLUdMaLa1dVsbgvrmXnPIjEpr0x1\nAA/7IE2ntPg/SondTjzbUQXXEFnbeKHcOBAxooITXX0VR8GHyK7X6zg7O0tDiSDyLAx/uf3fKQ4v\nZP6P02RBdASJILu4ohUdC9aKj7+h9LgvTiJGxM4LDh3v5oWDwjcK5uKEXMP1jvSdtijlwgRiRw6Q\ne3l/qjZH1HdR8f7e3cU1yJU3ExC1QFalLzx7d3c3d0yBakRUu8QYS6f9UGxOmdlZL1Msjio9vrPZ\nLO9pZWGSPPcYDodZc+3NmzfpSFBD6/z8PO7v7+Onn37Ksb+5uUmnlIgeWez3+wnH23GKiHS6GVOn\nw5fLZTpSw+GwdnYl6X4czfF4nDKD3JGKw7B4JxVjS/BjFAtj6iiV63hHHEOjk8iCHRPmn1IEvV4v\njo+Pc+5vbm6SUEs07RQ7fWa7uY0//xgDy4GbN68wt7PZLPr9fpyenqaz4Cr9EVWtqojKSfduMho6\n0alCI2tln8pxIxgrEVCjxegwf8b13BvnFnSf3bDMtRvoJ+PHmOJksKvTwRCBK880Gsma3d3dTTQF\nmbG+c5BM4OC1YGcY+4Vd89w7Y+G0FBt2ms1mnJycxGg0yvM0I6qAnnfkLELmiB3eOO4+NcF63rLy\n9u3b3DSBzCM3nN3J2idgiKjOvESOLVM+H9YOFM8/ODiIq6urODs7i+Pj47zOB7y7dhdjit5HPplD\n9LptrQNUHFQcQaNjBBlfa9/EkcKgl3lH5599HAwGC2PValXHSKCIyM96m3fpmRoiJqL04LphpHDe\nzK9gCyi1j2g3Nze5Wwlnw5yN0WiUUVS5swBOR5mic6qp9JT5HAiaBc13vKWY7zki4HqUl6MhBBIH\noOSC8E4ggYyNnS732TwJlH6ZejOCUDqZdgZ8JIajWws/ix2F+/j4mHA1KTA7Fu4nsgZqZbSI8QSx\n4h2QCcaj3NV0e3sb0+k0VqtVLkyUDeUErIzdN5QCn7nOEE400WU59kD8ZVoxYpviGwwGcXNzkzJ1\nd3eXu+7+5V/+JXZ2duLjx4+5Joge37x5U+sLjg5Ov2vJGIWF20DwwXgC3eOs0T92wOFgMU/IAcaD\ndelUFGkE1hD9WSwWMRqNMqq2o+5AgMYcYrhZr/P5PI34cDhMWaBa/Nu3byNii2idn5/HbDbLsQE9\nIEA0j8cRsPkz1KKime7Q6XRqOhGe1OXlZS3A4z34HnwYZJh5QoYdtCCPBF1Gap3WQ19aFq17rL+d\n6isLspbUDHQ2Y0S1+sVikXWtPGe9Xq+GSPK8vb29Z0eO8BnrnUDbaJJ1aYmGe94c7PJ3nDfrBBA/\nbCFOX0S169rv7lRxt9uNXq8XJycn8ebNmyxXEFEhw+zeNGcrorKnrCHzDnkutov3R2bX63Vyahm3\n29vbuL6+TlS4PFrM8mAdtbe3l/QAOIK2ewTrFxcX8fnz57yOEgqMnxEwp5BZP0by+D/p3bIOmndV\nO/PDevha+2aIlJViRDXgX4NOidZRelYGl5eXtbowL8GK/LPBiKigVyMPEfEsAjDSBPy3u1s/dXu1\nWsVkMskT3bmWdxiNRmmE5/P5s8VtVMOGvSRK+p5GjEqvmYX48PDwTJmyOBGSElpnQRGlsRCJql/y\n+JvNZjoE9NP3xAlDgJ3ztsNYCjGKgMXFO2IAcAKtLOC2tNvtrHhvmcFZhDPibewvGVOPGYrHUTeG\njrkD2WAe2+12HlVCLRnky1W+TRKNiNqYlP3BkMJzKQMHIPj5fJ6IRMTW6FOj5fPnz8lRYixHo1ES\nrc/OzvI6qmh/+PAhlRXt/Pw8NptNPtM1hkC+4HFA6o2oUvKk3rwOcQR3d3fj/Pw8DVJExZtC1kAP\neP/7+/vke4FAMf/U8+r3+7UjnRhveJcmqtNASymFAFnb9XNAdJCpdrsd3333XVxfX2etLNY0ZSqo\ni2SU3s4I+stIR6/Xi6urq6y5Y94Z66vUNUTzjFer1ao5w+iBMsB0msjGruRHlpwg/5/rLd92jP0M\nPmOdsP7p6+7u9hw9Ni4YHcWAsjGA+3NPOFOWPcYNtJnvligX34uoHCcHob4f98CJQh86o8B3+LtR\navfZDgpHIR0fH8fx8XGMRqMa0uXMBmND/6iDBvhgvYiux6G07ub4Keut0unhPubaYTvshJR8T9LL\nFJ+NiOQEdzqdmM1m8eXLl1oxVmdg3CfI8GRyeCfLk8fJQZK5WvSdn6V/ULbX8gev7bW9ttf22l7b\na3ttf2f7ZkfEEP3Q8LZNirbnC6cGb9yRxtXVVUK4jpCA/ZzWcCMyMyRN/0zms+dKhWwfKeFidxA/\nv//++0SJIioO2GAwyPcGiQDFMCzpaAC40QRC99epMb8bKAZcAUctvh+5eROjndp0lExRRj/H6UtQ\nMg6VdPRFuhCeBOPmSAfI2OfJ0UjXODIwCRRZ4XkgI6RESn4IHBzzrnhXw7vIB7C/eUnMJVwk8wcc\nvdOXbrcbk8kktwtzHxdmNBoGpwjuUpmChptjNDOiqpoMB8SIBf3+9OlTNBqNWmXz4XCY79xobI/D\n+e233yJim7L6x3/8xzg6Okr0lfcHoXGBW6dFQAxAnoxwEpG+tA6bze0RLxcXF7WjZUBwkRe4Hy6S\neHd3F/1+P49Q8Rl5lGTgOiMKpGzhiZQ8zogqfUaKDmQL5IS+8TwQ6VarFbPZLGWROfPGBsYNukK3\n2817mwO2v78fx8fH8fPPP8disYiTk5OIqPM/WRs8zzoXlNq6kY0+ROGgAGQEQBiMftIf5q1ETtHB\nrB3TGpBhj1MpB3zXOyEbjUa8ffu2htL5uZSHaDabcXx8nPNGMU7QQ/NRkdu7u7uYzWZJ0aBvfMfp\nf/5m7pr1kxE90si2Ud5IYKTOckmanNQdx6dAwGYuTP6GPkHaE1oDRzvd398nLcbVxUHOGo3GsywG\nRzAhH6YKlHxNyxm6H26ZU6KQ3km3IlOsa747mUwSHUcX8t4u8kza/eDgIMfAdBcoNKW8mF9b0mSc\nQvxa+2ZHxJBSQVCddmHQnTIjL3x8fPxsgqfTaUK8hhAZjOVymXAlCoVjK1CIdnpQ4F40Jp4BObIr\ny1uL2S01n8/j5OSkRmREATHRJQ/IcKj5BU7vGHrEsJK2wEiV42zyOO/BgsGAe7cG6T4LpwWcbdMm\nYPIe5gdhCCIqDgWOoB1CeDU7OztJjCQVdXp6Wsu505+ISNL/09NTpoCsoHEG7ExFVAbKaRs7mGyJ\nRtF64aMwkJ0yzUjeP6I66JM+c583b97UlC0y4/Sc5x/DYz5aREWQJI1jhxLl3mq18uwsV8zGQcO5\nMmkhkt0rAAAgAElEQVTaHKBPnz6lXPz4448xHA5zfqbTaXz//fcRsd2qP5vNkh9kxx4lv9lsnjk1\n7XY7FotFchst+5BlF4tFEstpOKlwS2j8v9frxePjY+6apSQA98WYMJ/mOpEOGAwGqacsT+gJ6wWC\nOxNTS7I19+cYHuYeRc3zvL6pB0VZCesI6iQdHR3F1dVVHhLd6/VSR2JomcMyYMKR5XfWMPrQgRCO\nJ30z36Xk8TmIYK3hpNqRMuWiTHmxFng2mzV4XrPZjHfv3iWV4uLiIiKqndQ+fxV5Y5ezd0mXwUmz\nud39ae4kzrW5tuZysW7os+/pVJF3FZsP63sgM8gQARpOrY9HIQVHMG254d9yuYyrq6uIiORE+VgV\n7zonqME+OAVrWgNEfd6L5xLsWrez3gBD0InIBGNmXes0LqAL74Df4DReuQOeMbF9KmXRtpRSMuif\nssaf/Y2X2jdxpDBgCEPENlLodrs1h8oC2Ov1otvtxmAweLaLrtfrJQfDEYa5P+v1OksrRFSCQKT1\nUqTPdXZOUBAonPKIBQZ8Pp9Hp9PJd7i7u0uP26eS01j4Jpry004Mz46oHByEAuMfUW0RRTFbYCOq\nMg54/RgtmqNLowXeMs7C9+JnfECu2I5Ozt+L15wGBJnf6ctkMkkkwjn1iMhCkpAnqVWEXPj+cIb4\nHcSsRPkiqoJvLGZzIUz+Rh54P5w/5LBE71C+RJn0r9frpUOE019ypIzwWG4ajUY6w5YNR9soThck\n3dnZiV6vl+/nOmmdTifm83n89ttv0Wq14ocffkiZmM1m+fPDhw/x7t27iIj45Zdf4vHxMR2Yku9h\nvt16vc5dOBDwefbt7W1thw7bwr2jkbVAYNHpdGIymdTOokPeMYiHh4c1gjsOD/LE+9/c3KQjuFgs\nkksVUW0Hhw8C8Z7nEZCg12xgzEXp9/u1s/2oPUVJCq/DTqdTc9iN0j8+Pia68vbt29RTIBGLxeJZ\nfT2cdProulV8hgPW6/We8XRwvJAtI3nIKfrdXCcHUs4CRMSzde8xjah2hxmlZ7yQ3ePj4+Q2zWaz\nnNfHx8faMT+gfqwnb1CgD5vNJnlFXt/oPO7r/ltPWy4YN+yZSzIMBoMM4sssDI4fusubNwgeAAcg\nlLuMBRshOPeSsQFVp78GJQgEmGPPE0619aQRKH56juirHS4K0jK/XttuzmBgGxjz6+vruL+/Ty4n\nepl7An5g+x0o8BP7YE6fkaiXHF6/e9m+iSNlD9GQa0R1fpgJzgjSwcFBIg8M3N3dXRrQciLxSL3t\nGWO6s7OTqTi8ZUf7EXVkyIsNhc5EMOh8B0V7c3OTC4LnOP3C+3nRWfmWf4uoH2ZpR6bVqh8iasFi\n0TjCREiJhktCrYXGSAeLkJo5VnyOaJ1GjKgIgihTFFJEVQQRobcimkwmuRMSpWKUp9frJXkSo8J7\nozDLVEO/34+9vb086BXHppxD+shnpHvYUuw5RA7Yudjv93NsI+rVeBl/pyy4/0v95fuO5HjHMpK1\nY080zrg6LeICkc1mM2Hz09PTWK1WcX5+Hu12O46OjvJdHZV2u9346aef4j//8z8jIuLLly/x/fff\n55g7JYaTiLM7GAzSYHz+/Dl+/PHHRHF3d3ezphUlLCIiKxjbgPEsEN7r6+taIMK7objRMSBuTg16\nXgguMCQYIeoKgUyQjmMOHdHaCFuBs4UcRwrjRSDlYIgxNBLqgqSs7Yioka2Nitl55N3t8BnBR77R\nF6Sc/dn/Ze/NfttKkjzcIClq46bNdpXdNd3ThcG8z8z//zbvszxMowvdXVWu8iJLJMVVEkXyPhBf\nnu+EWXMvGhjoPigBw7YonpNLZCy/+GUkjhB/m2aADHoOeHc+CGT9ApLF851KZ24oEWKn+8uXL8X5\npC4Ua+j0k20Jz2G9nGoz2szY6IsPJ7muodce5y2ntXl2pp6QDuZ2CdsY0DcCIh9Acn9ADq3bHx8f\nyxhx1HPmB8fNwT9OKQVJjWT6uzyP/2MTcGLywSWc16enpxiPx2WvcdIVe+D0MPuBFKpTa+yBxWJR\nDs0gF7bZgCXMKw6R9asP+jhdmIOEiK9Ljrg9iyOFoXReFUTFhp5FBGoEdnRkCvxoFr8dJ35nX979\n6uqqHPE3IsJ3zdfKCEREvRQA/+cZ+fJdPGyEzekqn9CwgPI+KyU7Ofyco98cX2bM3mygWeZlOVrw\nWvAOlIznBUVMmhDEI6KKonB2XI02IuLz58+l767ezfwQnWRFZJTAGwqZQOHhOERUl6yyUR0xNhqN\ngmJOJpOYTqdlI5pX5e/zPaf5zK3i+ZvNJobDYTnlwjvZ8EbNnMJxCi+nxVg/UEYaa4TSdzkGGwXk\nysejKRrKZyBL/X4/3r9/HycnJ9Hr9eLm5qbICHPSbrfjj3/8Y3z48CF+/fXXiNjVmen1etFq7W4C\nIHWFLFrpNZvN8jlXyXz58iVOTk5qx/hns1lJGVLoFIQT5Utqo9FolAKUEXXFSGqHlCvGhdQA5Qci\nKmNiOUWm+I7RFaMQ/L6d5Iio8RNZJ8YBaomsgZSyLk6h2ZiCcNMH73eOoeP0g5AyPtARZDsHH+io\njCDjuOIQ2Olx6svpE9YC3Yc8O/jCCTBixR7z3np8fCxyMxwOY7vd1U776aefagjCZDKp3SQB8kRf\ncDLYI+bd2TEEDeH/lhHkgf5hh3xaknlzZgV7gsxwMTVcVFqer5zuJ2ADmSOIo6/8Xg607JRnxwDZ\nR5/4e6D+rIOzNDirppA4S+Gxr9frGI1G5bsEVft4S0b/cxqZ/X97e/tVCpaWKQHIL/OaA1DQOnwI\n15/ah7bV5u43P/k/bJBNM78moiLp2rnAyOBQmSsQUU3C/f194T5F1AvamfMTUTkXh4eHcX5+XgTT\nffGGt/OCcsLZQ4CdIgT+dRqPiJwog2gPw8uC+eipNzqC5X7a4XC66PT0NO7v72M8Htfy+a7fYvjd\nggJ/i7WwMgAyJUoAQeJ7kAm9KVnzu7u7WtToHLiJmCbwHx7urv+4vb2Nb775poaAkQpmDhyZgDbi\nNHgTMI84TBR+i9ilBQw5k86IiBp0jvOeG4oKkiPOC7VPkDk7x0Sc/DxHdGx4Uo05vcP4HHmjzKik\n3el0as77ZDKJzabilqFsbm9vCyry/v37WCwWBT0hzfv999/Her2OX375paBHkNTZDzlNzd1+y+Wy\n5pxTDmG1WsW3335bS+kfHR0VxJDvei88PT2VUiibza72D5wvUoYYZ6OujoI56OAUhh1G1oc55boo\nUil2pHESMUTWQwQeFN1EvpElxujaVOhJy7D362q1u7KHAMlXJ6FDcL4cXcNHwzgbBffvOqjjWayd\n6QXIBnKDrs0omH9GY96sX7Jjw7sp+hoR8Ze//KWUdZnNZrWq94+Pj3F3dxej0WjvO3E60BE0HE76\nawd0s9mU4MxBDt/DGcyBEJ+xh0FCkYvNZhPT6bR25Q/vA2kFdTL6CJeU+xhtB1gndEJOb+EU0XJq\nzw67i53iQFFY2QgVeg0ZNRptp7LZbJZnsu/gf5liYRlj/DRz0VarVQyHw9qBAyOa+AQenwEPyz4p\n0+12G5eXl+V7Dqp+q72UP3hpL+2lvbSX9tJe2kv7O9uzkc3xsJ0qIXVDysL53Ih67tunB5bLZYlk\nfRWII8RMGMMTBZ4178oeaGb98128cPNlQC84yulrBHxCDvQBrxj0yDA+jdSVI1/Dpk4LZGI06RsX\nWvMxf3OfQBsidkgPiArpEx91hXsGIudCaaBSRHT0td/vx/n5ee24Os90Cq3RaNQKrrK20+m0kCB5\nJgif8/JGwCi8eHFxUTudBBpHtH9+fl7m7fPnz6XyfLPZjF6vVyO3A20z10ReVKbOJFM4KqQMF4tF\nQao8DlJMrC9yg+wR6eVjxUS8ufAgEV2j0SicB/q6WCwK+R5uzZ///Ocip1wcvtls4urqqqBqj4+P\nhZMGcsWpPTgN7LHlchkXFxdlDOfn5+VdFNnzWsCXcCoVOeWOP1Jk9IVn+UJvk/RBnFxgMaJ+d+fR\n0VG5EobvwVdh7xjl4y4+iqBa3s7OzopucqoNlIt97JOd6BDW0ukl5IoLhnOa3frOqaGcnrMOI5W3\nXq9L9G19wv/z/XdGXMxDYQ8zJ6TNXAAVtNlrYHQwo2B8ZhoBKAmI47t37+JPf/pTOY15fHxcTi1C\ntAe9v76+LnMGgs6+MfnbiB/vN3pkjiHzynwbQXYajv0AsmYkGp3tAsG/deraGRquJyMTw/s8x+wr\ndPg+JNDUCGTRtIrtdls7dQ6CZ9pNRHXlljlGNBBPp25pli/G6cwP68WhCMtFRHWbAbo+Imo2Bf1u\nZJz+OCXNZ+axIR8RFaUhnx53e7Y6Uvvys0B2FsKIiq/EJaZ2LIBoUdIWRlfhdh6b9xkeNK8h9y0r\n9oj6qZmc24aUahg3oroPDkVkweCkFA6NlT7v5TSG+wdcifLEkJsH43oa5lhYMbPRI6r7rJz3z2P0\nfBmqHY1GtbsSPX7nnd2YE893zk8/Pj7GfD6vEY4pXQG/zmuHHGGw+/1+MV7Ox6MQ+YzULWkBH7k3\n9M8fO7gYdOqaWE6n02kxyigik+3NPbHTYwXK85z2pQ+sh9NRrCm8JQcmOHWbzSZubm6+4hUeHByU\nK41YZ47ncyEvvDVkBsNwe3tbg+k7nU40GjsC+jfffFOrst7tdmuydXh4WOSJe/jgpzjFTPkG0kL0\nP9emg+/glCjryB42n+fg4KCWQnx8fCx7kHc1m82yp3wCibRw3i+ZrDqZTEq6lEuOfR+aDz64XlZO\nl6Hw5/N54Y4yb+wNc3voA4aNAM6pYhtjX9rLHuQ0qoORiJ0+YT1szPiuqQKu+5MdKq8xP3eqlGd+\n//33MZ1O4+effy57mGdyKTdpqsfHx8LLIViPqNJVdlxt2E3E93jZd7YXDoqgNfh96HtSvH4mOjYf\n+OF7OEbMC79r0rdTf8wVTj22yH1FN1g+sJ/mvxmwADzIlBaew7p7LQj06Yc5nsw/e8Z2wGn57PBZ\nXpgnp/74jHIL5qQR4LpeGu/DGXVql/nM78/tWRwp35HliCaiykNaESGAkOtM8iVfbKKueTJm7OOh\nRlSOAwrWgsiC47ka6fFkmrjod+wzbC4+aVQmokKWUELk/pkHPjNRlL7QB4yOyeYRUa6eoFaRyd8m\nv2dnEUcTwTI3w/NgxcRJOG6iz+Rnk6OtNMwLy84skS3v4Th7RIVs8jveTMzN09NT3N7e1hzefr9f\n5tf9YAyvX7+O9XpdSKusPQ6ueRM+DYPsse7mBzCHh4e7m9p9TYg5UHa23SdkgEtA6bsdYDuuPA/5\nwAgig1YioGX0BfmHAP7dd99FRMT79+9LuQF4OD4J980338Rf//rXgtjRF+48Qz6/fPlSHIl2e3e/\nHg5Uq1XVQluv14UvB5cCzuHj42MMBoNy3Q7Iqfc7z8Dw55NX/r85REavfTzeAaBRK+YebmFGE82x\nYl1w8EGocJ7t9KC8XZ6AqDiTmH2whbnHcTFnpdGoymUg03bUMSDZGcr8VAy8kTzrYOtJ81TR4+ZY\n8V3WwEiWUSC3VqsV3333XaxWq/jhhx9iPB6XS6LzNVWQ0plvo2lPT1UNIjss7DnztbLOsk7EmWMv\nZSfSyBTOPmvBWB0g571sp3W7rV9jw3hMhqeved54nrlcRt2Yf3SZZdkHqH7rpDey5+CBAHKzqd+P\nyfdyPSrPKTIMQsozsZW2t3zGOuZx2dml8UzLApmynC3LqJ7bszlSEVGL9vedmjNZk0ldLpe10xTA\njUYJeA5CbOWQoUR77vaWgQRttCPqpHicDBNHSRHiRLBJ8YJZlExGRZE6VcczgcbZqBYEPiOKR/Fg\nHEgvTiaTWoFMKzungZg3lB19zg6h4W0TGTebTXz8+DHOz8/LJsj9iqg7ITyPZxpiR1njGLkKNesA\napCVDf0jIjVakZ1jK7eIKCkEn1BxypENZ5QDBwj0xOli+kZUjQLlHTmNQN+QWaJ9EAEa91PxnuzU\ns69AN1gDR7NOeyLDkMK/++67eP/+fUTs7tP7l3/5lzg4OIj/+q//ipOTk3j16lVE7NCqu7u7uLm5\niYeHh9oJuuPj40KMHo1G0Ww2y95nP5+cnBQHjMb36QupNtYCB2swGNT2IWNcrVYFBXXFcxwrZNx7\nitTPyclJQW0dYPkAxsPDQ1nH5XJZCosif3a6+Bmpa/pDtG5Umd93LaN+v18zrugDn5BiDCaS814b\nK4KPiCoFyr9B63LAlJ0a9BljRN68j63fIio9472f98i+tHYOkCKq0hDdbjfOzs5iOBx+ha47lWZn\nkb2KfkT27QTiODozwHx7bvns6ekp5vN52btZlxIgWyc6Bcf/mWMHxYzB9AocGZAp20v+DeqaM0Am\n1FuHIYPoQztLnGjDEbXcoa+dQrNjY/TecoGso989r6wZyBF6ivlA9jJKZPuBw8n7MiptB4y95YDP\nyPD/5kRFPCNHCpjQJ6EYCM6DlRtGgdM4hjkRSBbMgunN3GhUx/+d42YBUTa8z6mwfWMAIfCx44OD\ng1KBGmXDMxk3C2OjDG+C/rmuDUUa7TFH1IuWNZu7o9PwUlAk/M7Z2VlMp9PixFq48e4zPIpQMUf8\nnO9ZcTKnmbPGWuAIYJgcuUdUjit9M/TfbDbj4uIiNptNjMfjEl2ykdhUTiuguKxQmdvPnz/H09NT\nXFxclOs3aCim9XpdjtxbBtjw7i/f8+ZEdoz0oLhcUZtnGv1zetvy4XpbXot9R7lx9H1qC3kHwcTR\n6PV6ZS045XJ4eBhXV1dxfX0dP/30U0RE/PM//3NcXV3Fv//7v8dwOIx//dd/LcqGAp6r1SouLi6i\n1+uVyJNIdbVaxd3dXa3QIykh+m+nEM4NhsnGAM4R8+UghHeCxOB822CyD1GuDgY4IccfdJSjV97j\n9R2NRnF0dBSnp6dxe3tb5AM0A+fK6SRzwAiK+B5GBHQEh5Q+MH6CwGzIrL/sKA+Hwzg5OYlut1sL\nPpAT9o0DW+TNDpMdBpBYI6R2bBzY2CkAMaSBavF962bXykIv3t3dFZ7jt99+W+YUBDYHZk45s/7Z\nkWLNfNqXZ7AmDnZZH3QvThXy7XSaUR4KCmP7DAJQOgbd5SDCts+OhlEwvo/TlB0pI0UeI2vCz+0Q\ngsqwJ5yWRdcgG8wxKWaQZdKCyBvlQkDXLcNGvtx/mp0yo77oWcAMAiyP9elpd20aY/B7mTNnHv63\ntF7EMxbk3Gx2VYfhprD5DTd6EUlPAeHnaIXFiqgEwkRRCwKfkU4Cyt/Hs0FBZIcK4adYZESUu8eI\nsM0Hyn3K0WVERazjGfQhC7ohRzsr3CPFfDin3+l0ym33PCvPsfPFkEeNGvFOoiiUt4XThGZvRMPb\nfg7vxtBlB6vZbJZrBUBmMN6Qwp0eyAKPkXGqCbI0Ssx5dKJRNo8RoMFgUIPqGTPNKBKlJ0xUJ9WW\no33PrbkRNL6DwvY7I+r3eTk4oC84YSgU114CjSEtAupGam80GpU73H7/+9/Hf/zHf8Tnz5/j3/7t\n3+Lg4KDcwweacHFxEZ1Op2bYIf2Px+NinO7u7iIiyvwSqXvclhUUW1ZuzClOodcR9IG0t6NKp7iz\nXsDQMEdGfChrAkqUkczr6+v4/e9/H4PBoMwp84qSdnXn9XpH+saozufz8j6QDiJ2owB8F+fEe8by\nwVF/p3GPjo5iOp2WquYucJs5S5YnHCfQIKdoPLfoAOstZIo96vQ0Tk+O/NELjH84HNaoIOjRX3/9\nNT58+FCrMYb+y6lG7ADy6WDI+jjrXPpNf7wORu3zdWNZ39uOIO/+mR1MO6I4h7yPdK5tofeGUT07\nxZZr5NzvJDDNDhGyYOfZhynM87Q8gIYSkFoWjV4yT0admHNssPc26+xglPHRRwcrXif+jUzzfZxB\nyke4EZT9Vnspf/DSXtpLe2kv7aW9tJf2d7ZnQaSIgrfbbYki8AINM2evkOOOERUxjWj31atXBc3I\nRHWnBTKHxnCiv+Ncqb+XoU4QE8ZALhkYmsjT5F9QFEcYQMwHBwdxd3dXoGFQJ8ZgLxoEj0ji6emp\nnE5xocGcQuS7GbWyx79arQqXxH01zwfSrlMRoIdOtzF+5of3Eb3QR9bcXBcXkiSV6iPw4/G4Fn3k\ndAScj30RBkRpl3BABpHRRqNREMfBYFDSMhzH5b2WXZCu2WxW3uvrjUA1kFOjCVl+Sd1RcDFD8Ya8\nc/rLc+so2b97eHgYHz58qJ1aQ/4hf799+zYidoVjf/nll/inf/qnaDab8cMPP5Tn9Xq9glQis6RS\nKbrJGt/d3RUOWrfbraWSHHmDUBKpWp4gLVMtfR/Pj1QDe8R8Ra+XOR2gFIzB8kSVc1CJdrtd9BdF\nNofDYSlOCtrOaT5OnZpG0O12y54h1eQUItwxUFCnzeDjUWrFyAJyStrESHG73S5pPd9DR5kMo0PM\np9FQo+smamfU38greiNzQPm+9YVRDfpwfHwc5+fncXNzExFRkI3VahWz2Syur69L1XPeY6TE6TD3\nLaNBIDXsT/oHdwiujrlNRpXg74L8I3eupo4Mm6vldDJzy+9EVFeMMXbSUqZYmH5BSswcNMZhBDvr\nHsaVv8ucMJ+et+l0WuNist8sF5R9MVrm8bP3nYJ1RsPvM9/Up2/ppw+BOP0H2gaH1adSmXuKg4Ke\nMWdkxH6rPetdezYKuYZFRAWr+pjzdDqtOVksFEezqSHi75uv44Xi+0wQv08/yAXnnK8JqoaiSYdx\nrBxnhGe3Wq2iGP0+w8ZsZHhAzAuGwmOgTz4tyPdGo1EMBoMiuIzfuWAElZQcY0FpzGazr4y335lh\nXOaAvjpd6E1hUj7PzFwEGu8xNI8jxf175tPxPvM1SCXQX5cnmEwm5ToXZM0pouPj41K9G9gew0Xl\n3IhqA6NsmQPWmtN/+5SbTzJlhwBlBx/CcsO7rODNyzGJ3ScoI6rLiVG4jJFgBeVsnsiPP/4Yg8Eg\nDg4O4qeffqrtQ4wavBSud4mo7iEkvdftdsta4Fizp+BM8b31el3qkrmEw2KxKDwQFKpTNexNHGTL\nDYbCqXGXDeH3ebYPKTAnOOLmlcDjIWXGs0hduoQHqU0CHhP/aRj236I74HjhCPG+w8PDmM/n5YJp\nOwStVium02ms1+u4uLionYayc4BMe1+iJ3BqkVn6gx7ACWNPkpY2l4fP0G3sU1Me0AV8p9/vl0Dx\n06dPZf/haHGAgT3tZv1ASQS/x+PHaYUSEVHtNVNJkEWCcfZvpoGQpuMAQ05DZZ1Hc3radtHpKObX\nup13klKz/s7pzkwo93wZZDAvzCnXiCicYM8ffeP/7HM79TzTZHPr74io6T2aaSmtVr1avKkX1PZz\nag/7jVwxBkAG9IjlEAcs98PtWRwpn8JyLhfuBpvfXIHtdhuz2eyr0194wiyE66kQBZjPY+8UD5p3\no8Q48mzF4U1DTp8J9zMbjUatcNc+j98kWhqb1E4FY2DD2TGIqAieRol4H0iNDbEVscm1/pu+mIRn\nATePANQhH1HO/Cs3b+j8MzshfE4EnSPkiChEcdZvPB4XYXd0heLwprGMmFt2dHQUl5eXpf8YGxoG\nFufNXCnehTNhJGy9Xsd4PC6cFK8xRscbnHHC/WMNPW+sqb+Xlfs+hMAcFu5sMz+u1WqV8gzv3r0r\nBQ3n83m8efMmRqPRVzwYnHWcJDun0+m0zMnp6Wmcnp6W+W6323F2dlYrAGlnIvNYTLiF94Wz4b1j\n3eGCuTyDtTPi4sYzzTujzArzZMU6mUyKEV0ulzEYDGpGAQXOOqMjZrNZnJ+f1/a/+2KF7n2BU847\n4Z9FVLWpbm9va/IQsXPczs7OYrlcxmg0in6/X9bJKHuzWV0v47ljbtCN/NyBlZEd3m3UOQd0OIWZ\nf5iDDTiRrOtkMinXax0cVJfvbre7y6HNu2PeTHTGCTMyjqHH4SIQ9ljQHy78zNhAbjwGozmscZY1\nnr1PZxJoZztjMr6dU+wrJ44dmKEPQEF9yIh5wgm5v78vzim614RyNx+gMDqGXKPjzKNFJoyk0bDr\nBKoEhW6np6eFe+q5Qvf7PsKISkcTKOeA3air14gx/P8OkUIAXQzOpxqIThnMdDotkbEJ4jzDxjIj\nKxCNjUDxN9+xoaTRH28svscC5VNUPoaaIU76aCE0ugN0aCFmfETBRi1o9/f3JXLkhEREdRs8pyLy\n0VtHXRZ6GhsOpZg3yXq9rtWgYb7tvPAczztzYEK9YXcrWMZxdHRUijpmcreh2KOjo/jy5UttLdjg\nRIQRUUMvQF981x7RyT7EjY2P7Jocyd+OTP1ODDOy7RQeP8uHG9brdYn4qOPC9wxn4/zbKWUfGRX0\n3KHYSCdH7Gps3d/fx2QyiTdv3hRSckSUC4SROc8p+wQD7wurqUr/9PRULikFOb26uqqltI1yeX5B\nMbMz6HnPSIdTe/tSnqzJPuMVUZH37bw6qs4pMwzcarUqRTE9RqJyggOex7w1Go1YLBa1u8Gc3vIa\nYhScTkOGDw8PYzAYxOXlZQyHw1gsFjVndDAY1Iq+IudGVdin1jXMox1E61MjLXZemENSJH4na4HD\n4aDNcz6fz0s1/YgoqZn1elfaJdM5kIl8QtZrBvpLAy3ySTHS/ybH08+ctnfQk4MX1shzwNo76LRO\nzMG/aRL8n/2Sswa8D0fKFBXmw7YpoiKxozOQS2QKCgpyYEQMPZsDOmRjX4rut5xG5gFHDzn0HiBz\n4EwNDRTKJ8g9pzTsgufWiFruS5ZLt2dxpHL0SGMRnCKIqC6hHAwGZUCuRE3OE0OEMFJp2sgAk+MU\nGY6ZNyDRsbkdfGYUKC8GjhyolRUfAsz3Mupjz9kKmg1jQ0lf/LehdrhGhljzOJhzDHwWfiMlOdok\nYvA62Vm0gucz5p+xu6w/x40zUkZ0gJPVarVqhoZNxdzQz+FwWFIscAqIvPv9fkH5Wq1W9Pv9MhtC\n9csAACAASURBVPfj8ThGo1G5VgauUET9VCVpHRfUw3lpNBo1tIjvUtOIOXdqNxsiO6AogG63WzPQ\nnst9yCLFP70eNIIOInMbttvb2+j3+3F4eBjX19e1S025WcDHxiMqnpkRGXPn6Av9N7IAsgkny4YG\npbxvfHZA8l4gzb/dbr+6mJi153lGSLzHnPrmd/luhvqREwzqarWqnYRkDY6Pj2vHrk9PT2sGLKJK\nsdh4ZFTCOoM+Iac3NzexWCzi7du3cXJyEp8+fapF9uv1rrQHCIn1IIEhhtfp5Jx6+a3+sSbeNy4C\n6jnNgSF7ww2DuFwuy5w6C5FT3/BgnPFwIJ2RL2SGOUUGMxJPYIIsZS4jc2QZYl54b9bfvpjaOtiB\nKE6PP0OOWDOPlcY6ZIQsol5RPqfh0PsEBP6eHUP/DNTNzhXzho1AXrz2/l3LIv1gDbzvSWmDGNpR\nZt32lU1ANul7Djz5PnNtR9N7b197tjpSTIaPUOI547wYNvYGv7u7i8+fP0fEblK5EoAjxI5aEGJD\nuHzPnrkFdTKZlPdtNjtODEaYRUBI891ILJ55DxFVJMMCYWwjohwl5nlGjpgbNkM2mnZ2ECDmjD4w\nVnN2UKKsg9NGKDMbNKNWQNsWdsZv/lHuq//vjcia0R8r18ViUYo4np6elhIEEVWFcqITOGoRu036\n6dOnaDabJXWVSbwoNztnIE1E/Mvlspb2g+fCMXWnNa2QQRDpa6vVKmkv0kom2rKGrLOLweFInp6e\nlut3WAvkFjn0++gXCtxpXRAp1pO1ABWK2Bnkk5OTgjrhBJ2entaQWBpV9I+OjqLX6xWEhLFThXy5\nXJaUAc/AMXdai3Ejj4bcQWDNZ8u8O5AxG3vmBLQHJyobLtI11guOaJEPv49CpHCUkJvJZFLumWQ8\nfMY8YhS979ATBCx2Fk5OTspeIxBhDDhxh4eHcXl5Ga9evarxXxyYGDkkELWsECjY6PAuIw04EZSV\nsaNtZBuHynOKfDr1x2c8p9frxWg0KgjRaDQqe4mrgByIoivZG8gV/cQhcvCFEXbAuY+a4SrvEdVd\nbOxd6wKPFZ1pZ2a9rooME2Qx3xnFsSFHDvdlU0yQx07ZnmRagtfQNZc2m00tLc5tFYzP+x9b43Qz\njWdZfnivnalMk4moHHkDHwYWWG90opHinPFgv/B31hf+/Qw6+B372kv5g5f20l7aS3tpL+2lvbS/\nsz0LIhVRoTf2ToGAiUrwJDudTrmvCxgUb3E4HBZ4m9QADSg9R7K8L0cPJtxyegjvnb4AJeLtG+Ll\nma5ebk8ZZMnpvYiKqGfYme+Z3EckmcfoNF2upA4MTVTrKsJwvZjT3IjIHZU5ktvHsbA372ie+SL9\n5TQkaVqQSJPB4XJMp9Ov0qytVqugUPATQJaIrrgvzpEu/fGamQtA/0CKHBXBZYO/gzwxB/TfRFDG\n0el0ylp6/pxeZT0cQdNAF00AhfuWo0sTYA1le+zA1ZkAyrvOz8+j1WoV9ATCLKmr4+PjghAQTT8+\nPsbl5WVst9vCu3r9+nXc3NzUIkhOCSLrIDzmV4BiGC0yx8Gpa+bM4wB5Y0w+qs+cIY+ZX4L+MeeS\nSDYjiMhup9MpJ7PgNvLZ2dlZ7bQSn0G+59Jic9pIXbLfTD/g+piDg4OyPtxfyOXny+WyoIBG+tB7\nyJPTmv7/er2uFevkfXALM5ePueP53hMglcy/v+f0EjqQNeVz0Fini9FfyKOv5yHVSRrazXxF9ilj\ndDYgp5G9R9nnNL7jK7QYX5brfSn9XC4G+TLdxPNi7hConhGyiPq9kaZ8gEbxPc+p18LjQdfSV1A9\nt30onNN4UEJymtH20FQQ1n+z2dRoHx4zupj3QeNYLBbl1B7rT6ke9DNzQV/Qp/TJ47Fc7mvP4kgh\nXDnHTPO1KxERl5eXNeVmSI4FYELyUUg3Q3MIl6Fl+tBq7a5dIH3jflp49zlg/DyTVHkuStHQ6sPD\nQ0lTkN7zpZQIN6kvK33Dj84V+7QfQmHnBW4RZDwLqlOShtw9DpzgbrdbOy3z+PhYq3HiOcjOgSsj\nt9vtUmHc78KxWa93FapJu7LWlCdwOoy1oPYKc+sN5ZOjHp8rImNg2VxWMN1uNzabTVknDDlyZEct\nojpJZQVkp4d1xRC7BhPrinzSH3hT6/W6EGX5HStpxumUAo6lT7DRjo+Po9/vx3q9jru7u9p6NRqN\nmM/npW6bHeXhcBjdbjdOT0/j48ePNY7BbDaLt2/fxmKxiKurqzJ2k6uRGada2PPZcDHHrBHGhblB\n6ZNmyukkjD77g8b6oRc875Q8wfnabreldhE/e3x8jMlkUkuLcO8ka7BcLmsne5GFfLyalA8Oix2p\nx8fHwp/EqSAgefPmTdzd3ZV9mnki6DAcGr/35OSkpged4uPAj51zp5StQ3LQgs7L3CrG7r9zehdZ\n9R5HL8FF9clqKrfbuTHXyzowE5U9H6wp823ZcH9tQzglZtvAPOMY2DlCdnmO+5Fl1in9ZrNZLt7m\n3XZQPE95/dk7dnwjqjJD6E1/bz6fF71p3ULLoERucETN5aPlAzbMW54P5BK9g33x7y6Xy7i7u6vt\ntxyI4RSi55kXvsPBFq+hSf/72rM4UkRk7pw3dzaKLDboko8i+tilnxNRGSgEMp/ow/HhcwutT1b5\ntJCjyexgRFROIv3KUYSRMCsQrjIBgfE77DQ6V8x3UbDmSHmzmkeSCbdcG2PUjZNsFjK+B4mTwpjm\nQjAfGGcLI3NHP3yShJ/zvHzM36dIjORERHE6KHZpx6Xf78f5+Xnc3t4WdJKx0wecRvME2LTInfP2\n/IHvxPhwXDE2djD8TkfBzK8NFHwOHDQ7EE9PT4V7FVFFyVzr4popILy824VTGZORMPp3enoajUYj\nJpNJQTUonkl/cN6enp4KOsbJvIuLixiNRjGZTMr7XQ6g0WhEt9utnThkDTNX0QGSOS+WH9bIDq9l\nln5STJN5pKFzMv/EPCbPv3lOllN0GSccvYcPDw9LTSe4Scz34+NjDSX2HvHJPMsf/cZpIyBiXBym\nQP8YxfOpMiPVPJ/Cif1+v7Z3vT9wUO2E2gEBrTLXCZm37PE8DCm6LuvX7XZbC/iYbxwk86/y2kdU\nqJf7b32Y+XGWB+tL/nZGgrVgvDhc5pfaeWTd/Cz+YLDdT5rl00gd+tL2xDXzmPMcvNEXo5XsRes8\nvjeZTL7imjmgx1HKBU+t67D56IzM1fL3CP58ctCymJFsnLP7+/tasWTuDaXPHq/BG+ya0S0aeigj\ncG7P4khl5nxEFe0b2kYxLJfLWhTrNM1sNqvBjo44HH2tVquSBomoCi9CfrUj4X6iII0eRFTk2Kz4\nnTbxGOzx0kwIRpggnXoj5pMtdgaBN7OhMRLTau1OplF9mv6hwKjwahQK+B4HkOPqs9msEDyppmxj\nYkXgzYZhMYnfhsYb3uNHQVjJG+2h72wOKz5QKCI3YOqzs7PyTKBek/Adydh44eTiwKA8IuqVh7vd\nbjnubhnmO6B5bMzBYFAzJC7ah2FmnC5Y6Sj17OysKB/mwcqVOeFv5N3KkHGgoNg7duSRN2q4/O1v\nfyvvf/XqVQyHwwL98z3WdDab1W4BoC/ed74zE0eGk4cumZERaeSK+Vqv18V5NCLLd3wowg44cmxn\nne+BAmVEgf4Q3FHj6Pz8vMjpbDar1eui4QDjJHst0FutVis6nU7NWWIMpPEGg0FBxwj+fKDAhzB8\n0tSNgHGz2VXuN/kZmSf9yLr5WP1isahlDLLcgP7YUcnZAxfKZO2Y97u7u9rhJHQ6joWJ8RRB3Ww2\npfAsDUeSPZ77YDI573NAbuSZz2zUnU3hO+i+HODYKfL+xTm0Pst2kfmzvmbeeKbfwXPQi0aimBcj\nkHYyQao43OOTvkaUmWcHqw5OszPCvBFkGHiwDUCGvE7YOFfnZ3+ia72+/r/T1/zNHzt2/1/bszlS\nCJ0XignDAGSjjxK3sCJEKIEcgRkFMofCG4SJZVJt8A2vRlSX4PJsvwshsZKmsUAWanNvaAhb5gyg\nEB2xue+kdeBJ0Afms9vt1hwOUlMooNPT02LkuJYFgzudTktF4Zubm5jNZsWwRdSjJUfQmXvCnLFu\nVkSkTbbbbTkSzvfIkV9dXcVwOKxFszgkjIP32Jgb/aGhuFByKAM7ShiyvK6srRFO5oHSCKQ881F+\nIh87z61WqzitPi3GOKbTaTktd3JyUjuSTP+JmByJ2nnI6CiyRXMwYJTXaCwRW6/Xi8PDw/jxxx9L\nPy8uLuLLly8xnU5LZGp+Dc8EeYRbhdHjRCRONeOz07her2scFAwpKQkj3OawOeqPqAzlvlOpdp69\nDp5DHC2nIXFAQZ9saFD0PBuuHGP0dSIeP+gXgUu73S6O4nw+L6dYr6+vYzAYlNSejQHOkCN9dMLx\n8XEtDcX4SaNz/Qw/d7CZeVfsocViEYvFIrrdbi3wRR4dlPKZjet6va6lS32adj6fl8/u7u5isVjU\nys14j/N//tDYM9ng+3PrWJprGtlJ9Jx6DH5e5tPRsi7M+xE9470ZUe1t1sDoLA0aAXbNto3Peee+\nkjPMH8+ECoHddHCPHWde/D50tJFABx/sTzvEPNOBisfPvgQBNBXFJyfRK+bbunZcDjyxs/Q5gyIZ\nGXN71jpS/DuiXjMmok5IXK12VW1R8laoIEvm2xgVceVjIyYQ0djY+wSZln9uQ5sdAjsvhi1ZNH5m\nHg1j8sJmiNNEbvfNYz09PY0//OEPERHxzTfflFpAKACuhmDeyHmjJHDCLi4uCnrlO94idh4/5SbY\nQEakiKAzIsV8OJ3qqIXfyfW3OE5NBGrCaUTUkJ19iog1cRQHomK5YA3NQ2EtfedSRP26BDcTGTFQ\ndpascB1EGPrfbDa1NBTcMQyUAwX33xErfWFc/HwfAsr/M6q6Xld1jRwZomB++eWXmE6n8Y//+I8R\nUdUuQhlxPD9ilxYAHXI6lebUjfe2jRafmScGoRvlbh3AOrGmTvs5LcdagWbY8cp7kTlpNBqlRpXn\nnOfe3NzEdrstTsh0Oi10hG63G0dHR8V5coCUG7wUHMyzs7Ov6kH1+/3CCWL/sv+Qt4xms098PQ9j\nJyjJFALzM9lXzElEFQifnJyUK1qcsmSuLYP0x/rAKf8ff/yxHFggoKCOFI6rHSnXrcpVqL0PMyLm\nz2jej/QTR9loFg09jd7Z5xDk9zgr41Sbn5mRVPpMJsHBtZE82x2PFydhvV6XPeqK+Ov1rkbjzc1N\nDS02Gpf1DSgi8+IsRZ53+ue/XbvJBzKs53P6nb2HTWF/8x70DfXreAbBtm0/fSGNaB+EZzr7sK+9\nlD94aS/tpb20l/bSXtpL+zvbs15abC8TDzNHAfxtBAteQ0R1RNepQUeiJmI7kgKqzoRL3mEv39B/\njvqJ4mhOqWVOQ/aE88kuIitDoxEVXGluS0SUO6aOjo6i2+3GH/7wh3j79m1ERLx9+zZev34dFxcX\nEbHjNs1ms9oN4qAfoAA+Xpr5NY4GOJmCB+/0EWkF0lB8xpyAZEEWzc8nSqYvPkoOkuESB+aYOIom\n0oQUbzRusViU+QfRyjJHis/jZr5BedxPTovmS2f9b6ORLvEA/8L9cfTj9JGRFbiB5ld5HI5M3R+i\nNaJV5Io5Zf5AB/M+fP/+fSyXy3j79m1J0a1Wq5jP53F4eFiuILHsk3o1GhQRBX0zkdVE9Ha7XfaR\n9y/cIFASyOqG4ZEzUCsib9bWvBcaew+emtFM9i0pN0fQpG05en12dlaTN/YGiBbfm81mZZ1ms1lB\nIPke6ZsvX74Urhj9BE0mmrZeZa4sE8gMc4IuMeeLeTd6zzPRdei2xWJR43WRwuGkKY15A7Fwf5A5\n+DXtdrsga91uN3744Yd4//592WsgebPZrKR12FvIIqgRpUqc9gM1ps/eHyBilllkkrW27XF60NQF\n/vAcf25kJu9xpxSdWja/lPVxWo7DJk67MlbQGo+Dz0A5mW+4T6A4vV4vPn78WOvPPp4Rssfesb3E\nrpj7+luZg8xTBpXaR0WIqC5mNhUB1MlpXacS2fvmyTEfLn3A7zOfWZfm9iyOlCczOxrAooZHgTEh\n8ObTOXmy/UzXDDGnATKdDbEn0NCtnbu8MBmyxanj/2x8uFUYMDZ6xNcXpnperCgMtfI7bLZXr14V\nQnlElIrUHAuGw+ATC9PpNCaTSTEOFn4MkY1DRJQUDMIL/yWiSgv5FBpzikLYx0tBHphvX6HhtWm3\n23F3d1fjlrHxTeZmzTBKrAnv5lQHp4tcD8qp2cxpILXs/iMXOECu++UUg1M4mYOB42QOEZ/jSNJP\njjvTH4/VhiFzwPibvttomODrlCrf8RpmR8GptsViUaqbO53GuwzB40S7wniv16uNgXlErqgLRF98\nuXjm63kP+znMG2tFX/fxL9in2dGiwrv5gA8PD4Wzg9zB58GYEIBQ+Zx+ku4j/YcjtV7vyka8evWq\nlP7wZcd2RglSIqq0rnlpNHN2rDcZOzqSdbV8M9/cvmB9xL7BiO2rsZRTJl4v7zs+f/PmTfzDP/xD\nfPr0KX7++eeaY4Ne4m/vfV94i6OaCdUOALMD6n3itCe6n72fnS70pfUE62GOrNP25jRa1ngWcpaN\nOHKNfeNQkZ/l9JqdZMZ9cHBQ+I4R1Z5Hh5+fn8dkMinyZhsRUbdRtpfWNU9PT6VMB7LH9xhDXgeP\nMac7mRvey/tyqtY0C+s20zasA9G76Gm/E93hwDi3Z3GkuEjRCorOo8gwDhGVo2HByPlgC5qVG5sP\nz53fQ0idh7YC5/8Wdt6dOUCMA4XokwruC0oWAcpKDG+aSJoxwxHJRticqdvb27i7uytH1REIvG82\nJnMIMoQR4lQMfeX0w2KxiPF4HNfX1xER5eJQO0GOchzx4QTwbzgNKLKMAkEOZK4iKqWL8nGNMRwV\njPF2u62dCkMpgo4Z1eT3MWw+1ZSNqBUUyFdWpqwN8mbHO/+NA2huD2uBrBoFYtMjd2xoHAHkBXSR\n/iAH9DnzkuxgGJVgzjGKVryQuClyyjrd3t7WDjA4aGFuTERnPZfLZRwfHxeZAS2MqE4TEUBlDiH7\nmeKUliVH56BPNPPKspKOiHJpOL/Hszi5BFJnLshyuSxOLtwgDmiAAKFz7BCiP1jbjDqydw8ODsr9\njqwNe/zgYHekHI7QfD6vRfmZH4bh8ykvr5M5e/4MZIf1MoLQbDZrwaKdbOSKsWYHzOvi1ul04urq\nKl69ehWfP38uPDzWlz1EaQlzGJnv1WoVr1+/Lg5oNqz7goh9Db2GPgbd8BryPoJGf4YutjEngHIZ\nD88Xjvd2W5WPoOGsWu4zX8/rh2wYgYd/SnPpHPYwzYTx/PsEJoAY3lfojPl8XkMC6WPODNkm8Md6\nxO/2qWjmAd2JPFgX23ly4B4R5UCHA37z3DJCm9uzOFJAslmR2ZGKqBdwRMF6siOqUykIKAY7ImqK\nDoXjUygoLjtQEdWE5z8R1f197rOdOgTVhoxnm6DthtF2VGWvPY+B5k10d3cXv/76a0GkECjSA0S0\nPAsh5FTeeDwuCvf29rY4J5zYc2SCUiEStjJlg/NzNpuLdLrvfI/oiurkHjOONSfTjICB1jAe5pa6\nUtvtNnq9XvR6veK4OAWcU2nZObYDYifWp/toRlptsDxGZMtwtCs+ZyTATjvPsXFjHKCKtIzQOl1s\nxWL0xZ/xb6dFPAand5jvo6OjUmzSyHC32y0nPdfrdUwmk9phERwiyOpGajFOGBTWkEMmoIvcA+YT\nSDZq9Js1zWlBIygEBxgv5n69XhckifG56jvvbTZ3aTp+5nsFswOCPsEJc3QNAmkSLPN2enpanEvI\n8qzTarWK2WxWHGtkzvKMI2CEBCQCPbYvTY0MnJyc1MpR4Dz5cnA31iEj1TaYdrQiKoI5znu73a6l\nkh8eHmqnDhkHc/b4+BgXFxcFlWbd0FM5E0F/9jlWBPaM3zQCUwsIFu3U4AxYv0fU64IR0LD2yKj/\nuGwCiKzXJ6N7/J71EjaIvd9sNuPy8rLIooNYzw1pZWeETGsgzZyzK+gnp81+K6BDhrxODnqzTTBI\nYL2Zi7b6XTwDxyjTL+wLeA35/Lfas6X2iIhyNEREnL3ziMrQ2KAhDBg2BCWiyrFuNpty8ozvkS4k\nWmVj8Uw88OzZs6jeMK4ldH5+XsZhLxbPG0Vh1MFKOI+PPru+UebWsIE/f/5cvvfhw4fo9Xo1Lo5T\nEaTfiL7H43ERlOl0WjbNcrmspREYF+vFc7wWFl7PN0JOn5364XcoxWDOAQaFSB7HAycQZMhoAI7U\nZrMplbqJynhOLnXAM73OHguKxbJqfhiN73kunKrOCKv5LVaYPNcOljc7xh5ZArXkfcwd77Aj2WzW\ni97ug7+RDxu29XodnU4nnp6e4u7urqwFNbZms1ntOif6zJH42WxWLpyNiJJudo03o0qkBbbbqthl\nxM45cd0uZNsOsHkXeX1sWCiSyfdQ1Ow15sPFOBeLRY0DaQUNkgsKQloSFM9OJmm/09PTcprPaBpc\nQp7F9/j9VqsVs9msoDK8jz6x1zIagM4kOrdsGbFzs0PtcgARVYV65txyFFGllDCkpk3we/yM/4/H\n4xgOhzEejwtPkjVHz+/jaq5Wq1gul9Hv9+Pbb7+NXq9XmxsbZ88XDogDnoxy2WDn1BJ2IZfIARWx\n/vX7jCx7LeyoYm+YHzvCOSWGg2bUzGthRNU6CocfJ5QK/bnlWlA8i2yL9RA6HwfMto2+OP1Og4u6\nb76ZD/4YbGCsyKKBDuYNnWlk1HzBzWZT6iVGVMj4PvS69Pc3P/k/bEymNzjohr1Q13rCkWAy+QyF\nyQS4mCHCizFwuhBPlujJi4SBtEIz5MjGQeAsrL1erxRH3CeEjNFRgo2yo7WIeg2WvLkRCMZ5f38f\nP/30U+lLTgu9evWqjJd0l9NYNCJ0ogzDyk7d0V8LGvO8b4z8nZUshgXFYRTEBGFD0vTl6ekplstl\nOca7z7BRbweZcS2nzWZTyMD8vg1QNsI5HWBo3NwQK0Q+I4XH/NPs/Dv9yzgyguE0If1DBjL/wpwq\nr2FO69B8LYn7xJgwRBi2nMZCpiynnz59Kr93fX1djFzEbq/ZIeFqk4hdhW76MZ/Pa1XWIf7jZJPi\n5Tkc03bh3Jz2xOBmNI/5zqjqer07Mk4aLyOOrBXf974g9eGAjM+8DkazQdvRlQ48mX/SKqTzaK1W\nqzhuRrLMi+MdONSgqBhhGz2cWWTOSD7vc9rLzYEl85gDFjv1ds5ms1mRj9lsVvYwP+OP9yEBxOvX\nr6PX69U4kBh97EROw7OP8rpwbN5IjsdHP7J9MocUXZXBA6NA1sME3kavWSeXSOFZdnT9HT/T6Sr0\ngwvfdjqdstaAHTxzMpnUHB4jZNbpdl7QTfxxOhLZ9p+cyeFZDj6wldb1zCllfdB/OKm8L4MmzqZg\nf0A5vU5G7fe1l/IHL+2lvbSX9tJe2kt7aX9nexZECh4PKauI+nUAREQmFhJRkgrAO+Q0gSMiPHVz\nHfZFguZROJIy6gEilGFtPNSMVsBN4DnO+RKRZoSAZs5P9vrzBaaMgWf7RE1ERX5l/PAXvvvuu9JX\nOGOgM/AP8Pg5CeIoGUTCpEXe6agSNIfv5cq7hsmZe9ICvJv3Ad+SAjL5m2gb2aEvpJJAKUDgeCbz\nvF6vy/U3EbuojHcSkTtqcYqk3W7HYDAofWm32yVyBWkx3wUOivk3ERWXjz+MhzV2FOl5A0HwPDvN\nCroFquj8v7lnTjnkfZQROU6DzWaz6Pf7Ze6Wy2V88803ZQ06nU45vLBareLk5CSGw2FBDowac8ko\nqS1QUw6lgEY4KmUeOEGH7JgjR9TtPW6Z4gLsfegwiMt8Pv8KjSZq9VrQWEuXMXAKgetjzC/h2fCS\nHM1nsiyNiu6gwp1Op/AYjWRAZjafJyIKSo/cek2MOBuRMPJJoV5H7ehN5HFf+s78JMZtVHSxWJSD\nLT/++GPc3NyU+SRrQV9ZcxAIp8EuLy/j1atXhVuVdQbr+FsUkswZBJ0BlfIBBqOdNMbkTAPvxj6h\nz9AlOWUGwpznk3XEZoKe5DQga+uMilFs0Ni8xhEV9QN96u9jO7K+dzqT+QC55XtGeninm+0e46a/\nRobJotAPI1KgrKSC6QtpevaH9Rr6Mp8gZ+ym4uxrz+JIuQ5Q6YhOugGl0XELXq/Xi06nU5TUwcHu\n1nmcLhsMjGur1YrJZFKu2fD7fA0Dgsh34AH4NJRz0iZsMx6qOJMW8+kNp2S8kCbPmSzHZwhuhhaZ\nKwQtp30Wi0XtuorxeFyc2IuLizLfOBZWUu43HBTGb8VmZzFzg5zeYf2c1vX7nBO3s8hzXQfFBFcT\nIDl9yLtcSTqiqnzd7XbLFRhOtUbUT6Iwl3bqbFRNqiUVyrgg1zudhlzCAcMJ4fd8EsUwOfNhBUEf\nzPewMnNqIh9ggPjtk4xuOCSso8fQaDSKg+6ThxcXF7Fe747rX15elis8InYpuvV6d/VHfmbEzglD\n8Tvd//j4WNtjNsBHR0cxHA7LaVyUK86ygxKUJ59tt9uSJsIZcaAFiZnx2rGBU2jj6DWMqBxu3ucT\nUyhxE4XZKwR8JrV6b5h7Yl7hPgcIvg7cOT5rt9vleXyGw+u6e8ynU3AYLIy/Tx9SJds63XNjMnUO\nSPiMNNft7W1ERAyHw/jy5Uvc3NyUfjCn7Pmjo6Na6i5it7+4VDyiCjhYi/l8Xpxrp0tJW7EGDlCc\nXkIPZHqCA+4cQNPsrFh3M347vDzP9AzWnL3CuzL/Kn83Uywi6ulk+or8kDbn9+ERIgfIkcdIn1ar\nVbEzzLlTxt4n9I858DOzXaEhR6QJj46Oio63Q+p38n8feMjyjZwyd56ffUGT27MV5KRjTKr5P0ag\nInYL2+l0Sr7bXKfLy8tyuoPfN2vfJ198XJJNDwKQvdNc98mKD6HNRhjF741KM/pGiQN7cuaZAgAA\nIABJREFU+0Zw9kVJzjPn5/K3NzInVY6Pj2MymZRo1YRrol0QJ96FAOLxm9th7sS+/ngtnU9H4aMQ\nLJR2HHL0YWQwol4HhfcTkRodQ2HacLCGg8GgnEZEDs0twunx/XcR1Y3z3mjmchHxgK40m80y38yd\nx+IoHfn0VSl8xkbGONuRYo585RGySO01F+20XIFYeZ38b6M/rCHPxvFzNHt9fR3v3r2Lx8fHuL29\nLcVgW61WQUDY10bH7u7uiqNkZ4kSHEblGN9oNIqnp921RhDfOegREQVtshHhu1wwjYEFYaGvrAEH\nIzK6AKfOhHcrYpBzrzt7DfK8uUrIE9w7O0kYKYI9notT52icv3GGMhpPX+gPjm5GG/hjRMrIBnw/\nrslBVlqtVgkiHXCiC3imZcpZAAj8vvOz0WjEdDqNu7u7mkOCTNsYw7vjeivGsVqtiv4bjUbFwfYf\n+uRg0HPD5+bLZITCqG7mq+F4O1DgWXZK7Chnuc3vyjwkyw0OBXvA/DT+oL/4HgVzWU/khDmNqLiJ\n+4qJmvzOGLmmbLFY1AKiiCoww75YLzCH7Ll9iHJExYXMTibv8il7I1roGfsf9IF3+5nes/vas57a\nc2TiiMUQfkRFGPalnBZwECMMGMIAYuLFyKkFnuf3sThGmvievXwrWP5er9dF0WZ0jN/B83ZxTIwN\nEauFhu9ltAIHaF/EgYM0GAyKU+XSEHd3d+VyYMPWfldEfKUAbSTw8C2Mnh+iTfrKJsMpcIrT76Zk\nA435oB/8LlGjjYn7iUywvswbztfp6Wl5Hv10VVzkyugQlznTB9aQlKChfJNPeY+jX69tji5pJmIy\nD5yuQdmRxvUYGQsKgNRvfq5TmJZvO+VGDj0OR3+j0SgGg0EcHx/Hzc1NzUA9PDzU0micqEGGbm9v\no91ux9XVVQ3lIaXHOhwdHdUKnOIocmLNaTCQYaOG/i6OIM4xDSSTvkXU7wPNhOF82ADlbQXOAQoc\nYgJC5pTvshd5dqfTKbKBYbChcRBH2pi1NaqELuAzBx9eZ+ZovV6X6tbIBQEuCADpdx9rd0rMe4Q1\nxcBaRjFcm80mPn78GB8/fqyhIOx75sDGjGet1+vodrvFPmADWK/5fF4cKcopoDN8uTT6hSAb+WDt\ns/PiMaAnmYeMzGU7wJxiA52moy/ekwYd6BcoEXonIz127GxPCKLYB3bY2C/0w4R21w9DtzLfRhit\nh5BBkN+cRuf9GXVjL6NLeRbvcDbFjib7hXkxoMHPQdWto+zcGeVHzvZlhNyetSBnxG974M1ms3bM\nnQ2XYT7gaRaVCDyiqprcbDYLKmWDzXuJtuwQdDqdGl/Eih+h9QZ0P1Fe3hg+RbRYLGppODsVhhv5\nLKLaDPboOZHE9xCSiKpOBv188+ZNOaXEGszn84K6tNvV1QwHBwelrhSCbmTICs0Kk4gUAbXwZcVj\nZYPQ2oB6LegDa4/xyjLhuVsul19d6WIl2G63yyWwpM0Yw2ZTpVet+PicyMUKkrlmI8/n8xgOhzVj\nDLJgY4hMoTBAshg/6Rbk2gqaueNZv2XIcKic3jGaiNPPOOgnishjZD+gcBhfp9MptY2QVd53c3NT\nngNSxjPH43GsVqt49epVbY6RUQwgR9iNHOH8b7fbePXqVRwcHJR0Kf1yqQmvP0EVDpX5JaxFs9ks\n6HLELq0E5+L09LRWK4o5Zy/ZOQaFAgFEL7EW/swpG+YQx4a54Hs4GMiU0xvZWPoUlNfRkT7yh6xY\nnzCudrtdUnsOWtl77B+nshkLv2ddT2s0GjEej+N//ud/alyv4XBYgg4bffYf6TmuguJd6Ajq5+Ec\n45h5r7oiPDo2IxDoc+su17Nj7tA1diKRUebF68ReyWkjO5vZgDvt7sDSmRh/17bKTibyz/epEcj+\n9HNwgAAYXE/KJ40dWNAX7MA+gMIOIHuDZrvgvhCws17WQ+wVZ7XyiXyekZE1z31GInOQmtuzpfZA\nV+x1I8Sk2kyMJL0HidQGGqV/dHQUJycnNSKnc8URX9dsMgRKY6FwpvZVGiY9kBf58fGxcDYspDzT\nKZYcmTkKsKKxIsybG+VFxIZgoExxJNjs5onglCBUvB+j9eXLl6JsvGmazWZBFWz0czRmw29HcTab\nFePCmHxlT85He/1cBRujZ2XC3/BxVquqSByfHRwcFFTn5OSk5iw4PWPlx/f53AY3okpDrdc7ntB4\nPK5ddWP0Edkz0Zh1hLPB99jscNI838wVCF+73S6KjHQfht/H3B00MAZHiY707IBlHgHoC880tG/+\nFc+hRIERzul0Gufn59FqtWI+n9f4eNQwOzzc3QmWUWoU5uXlZUEyaexpAq3tdlvmO9d/MlLdaDQK\n0mKibESUK0dwQFwviL3Ivttut7XinZ1OJ2azWTG2fMbeZOzHx8fl0Md8Pi+6pN1ux8XFRY0OYAR4\nX4rKQYYdKe99O7zdbrdwi9CjdrIODg7KtSy80/okI2o0B0n79jd9PT8/j81mEz/88ENERFkDHA1n\nDjy/V1dXtVQqKPVsNovJZFI7MICxNippZ5B5zKi5ETfmz0FMDqzcsjNqZ5AACf2HnsXBdSCZOTv0\nOZdMQQcb9bYNw+7hMBnpQS/jlDrAZD/wXdpsNis8O+xNTouZU+h9jKPDM22nT09PS61Hk8at90hj\n0+xcOpNB35AlAg/bWfY032cMDhB+q72UP3hpL+2lvbSX9tJe2kv7O9uzIFJ4fYY5ncckteDoa7lc\nxsePH+N3v/td4SFF1AskEj3zTLxhIganAEnvEB35Ql/4FkRX9Ie+m+Nl5MynWohmTIzm/4bhI+op\nwdVqVSumlnPgPqFANAliMZ/Py7xA+uQZRH1454Y0STkalQAuJ32R89P8ntN+pD2JZI0G4dEzp0Sw\nEfW0QYb+Gbu5YE5tIkv39/c1/oWfDZJjFILn7LsihnUAgTKXy2k4TnDxPdaXIndOERoFog/MI/07\nODgo62ekw1GnUz+kYUCDvL6UKCAd5aPjcAUcsTqSZm9mNJF3wDHw+Ej5cBKOasZ8HxknCvbVMp1O\nJ25ubgqnyVd9kCbNp+Q8P5BZ1+t1QUjMU2TPMR7k5uzsrJYG8vg5AZq5TkTDeZ1Mokb2+d5gMCjr\na54X3wPJIso2Wsh63d/fR7/fL3uf/XpyclLG57W0jBuRcoqMvWuS+uHhYSkbYmTJZT32Veo+ODgo\nl0475U4DzUF+8v1/8/k8Wq1WvHv3Lv76179GRMRf/vKXMldGQJBFOLFOqyOLyE3mwvD7ZD7W66r8\nCYdzzImkWX9xwnkftYODIs5EGJHKtAh+x2lFv9Of79PZzLXRHNaI5zklStkM0LDMn2JtSFVmCgxp\naCO8EVFOiIO6+io20HdTdLyGTm2a9oCu4BAOaX2nZVkrj93FYp2iZbzOhNh2weFifLaHeW1ye7bU\nHik54Nl9REQb7O12d+/br7/++pXy8xUPNg4IHgbAKSIUD5PLwkVUqSSMFD/j7+zE0Xy6ACeC76EM\nUZqZm4DSwdmzEnLe3kqRNBdCDs8gYkcmRzHgVGUuFpsJ5Y+goqAMWTOOXCU455+Bh7mbyRuD57Va\nrej1erWNQVqS8bBOTi/kzeO0RualoEibzV1tL6fSSGUwt05HopQx3CaqIh/MiU+Bkj5CcXv96Ctz\nh7IyRw5lgpNi54W5gOjs+SYNmZ1znL7pdFqD11lDxm5Z5x18hlNppx4Zx4jb2WIdUNQmd/NeHCx+\n9ubNm0KWJ8XnvQ38ztqavI9D6Jo53kco88yjIDXL/uWEntcWJ86pD9/7x/zntUDmHx4eSvqYtI2v\nZWJNcXKdmnFwieyuVtX1OxFRHCjz++zwO3hysMNn5qv4vayRT3wxZ5YB7xnkm1Srj+7z3cxz9BH1\nL1++xPX1dTm9eX5+HhG7E1+sz2QyKfIYEV+l9eywsP7I2eHhYSFR41ijD1wtnc+tA/I1Sk7xmU5g\n59QBNLLDuNnfrCFEbZP2mWfsGjrOOop3npycFF1rh81XOzHmiPpVPXkd0dHor/v7+6/2EH30wYej\no6M4Ozsr9eDMceX3Scs6aMXxJI0KVYZ+5sDJV9hkPeDmOUMnWQ5x6H0LiteH30Un4hhbTnJ7FkeK\n/LvvEuLOK5P17IRERHGmvKFQ6iwWiE7E/gt/zYWBmGYCZkR1CtAnmuyouViZERqe6xolfObTNe6D\nx0Cfs3PG99hAvvA1olp0bxgULwgJCBNzavRsNBoVZU8fbDC8Kc2DwmgYQeDdOGlshG63W5xnjKIj\nXjZqPo7P/DpHzffsYBgl4Xsce2Z9fS0GRGUXFuVZ8OPYvBQInE6nxXnAYNj5NCKFY44iOjk5KQqa\nKMyGLyKKExJRlalA1n1C08TKrHhsFJkDHKqM5vD7R0dHNZlDEeVIjDXDaTNP5OnpqTZG9hbvYY1w\ncJkX+sZccRiDhpJlfRj7fD6Ps7OzYtTyiS7ewZ7YbKoSJr6yw46X55H18T133PdoZI/G+JCzw8PD\nWgFYz5NPipn/0m63Y7lc1vQD1zUR0TM3Rt7Mn2PO7FQ4Kke3Mp8Y84j6lVOg6uarwfkDseZd/M1Y\nWHcHbehR8wojojg50+k0/vznPxf0IWIXuPT7/RgOh2VvWxbfvHkT/X6/hi6zlvP5vNiTfGKVPqGn\nzE3FHvCujNagoxzQ8Szz0RxgmnNjXqFrwjkI9Lp5Tfy9x8fHghzybJ+cs23KNsrBF9kefk5RWOvi\niKqgMDaIE7MRVSYCGwu3EZnieaBk5pbRjJ4xBmqcAUBkJMu8RjugRvzQD36HMw40Aj+cqX08vyxD\ntbX6zU/+D9tgMIinp6cC2UdUG9E1nfZ5/Cb70fgMJZWNMIuybzHwkBGuiChRPBOfC74RJWcSGu/y\nouSIzgrUhhSY1saKMZjg7MjAkRb9otK2U4pcagp5LyLKEeyHh4eYTqc1UjFwP0egQZkidoam0+nE\n4eFhSVOwaVzPC8PoU4Kkl3AADDc7mjXUi3PCGtqpIxXBnBjJsuIl8rFjiDE7OjqKbrdbDBOGm8j8\n9PQ03r59GxG7i6A/fvwY0+m0GFynEp26cLoyokKzDLX71I/RKUefRNXIi+UwHyW2I2Vj5tQn7wCZ\nQMaNMtIn0q92sDEoHLawEeZ7rFFWRkSBEZUjAEG52+1+lfLFqOFEWg5pmVhqMjLvY208V5BUiWjt\nUPF/jGx2vCy7GR0FcTWSgfE4PT0t83Z5eVm+z95zmg9ZZJ2NqCIXXHjcau1qX/n+RAwvz3IaGUOf\n5cnpG1LD3k927iOqUiGsO303OuPPLCfMN87J0dFRPDw8xIcPHwoCTDDm0i085+rqKi4uLgrq4EAY\nfcfzbStYx31IBuvtAxg52GF+fa+na6Txe9bftO22XqqFuTZy58AZvQBC5LX03X9ZNph/9lpGB0Eo\n6Rvrip05ODgodRstH3bkcrYFHbJYLOLk5KScAMdeYEc8DkAH9oRJ9F5/DgyAQiKLOH527AFP0AXY\neM8N+9GOm9OOzhCwFpaFfe3ZECk8SqfJUCZGnPjMnAh+FlEvkmZBjqgfjweWd6TAd4wiRVTHOfnD\naSO/1wowIwTeHHzm3Kx5LhH1iMX5/Ih6Ne2np6dSE4pmRbPZbAr0z+Lf3d0VxTidTmunQhyxcxop\nIkpF8PPz8+h0OtHtdsupjHZ7VyZhMBjEwcFB3N7elv644vV8Po9Op1PWdz6fx/n5+VenLGi8H4Qo\np1RRfrkuiIv/2ZFgXTKfLSIKqoDSd8oXtKrb7ZYNykW5r1+/jm+//TZ++OGH+Omnn2qOMgoB5QhU\nbcfEacucFvIm9rhZWzhARnEjvr740w4hcgDfh+85TZxlzkoj86BwdLiWxcqbz1DKRrlIkbCP+/1+\nkZmnp6fodrsxGAyKMbXDg6PbbrdraZiLi4vYbDYlGON5KFu4Vqy1nR8cVT7DeWbe7HSDgrNOpB55\nhmUZeTDyGxEF1To+Po7RaBTdbrcYmi9fvhTejiuFe16pbeV1Ql8S9IBO0A9+x+P1M5E50yIcyeNw\n8SxQepDNRqNRDCTzZLTBXBh0jNEr1mk8Hsd0Oi2lQqbTaaEnYNhJi67X66Lfrq6uyryxt5GN0WhU\n6uRhMJ3aJHXO3rdtITjBbiDDGFZslDlLPN+oDA0nHtk3hSSvVXbQWbd8GtIBFPbPzqplmsLLDqyw\nP9hEZzEYG+vFvFGyCCcJPcH6bja7WwXOzs6KXeEz9Ln3GfOGDMKvIigHEGk2m4Ub6MCCtSM16ufy\nbJ5vm8+c2yeIqCqiQ4Oxw8sa5L3p9iyOlBWz0zM4Ihg+p/Qi6l5+9rAj6gXaIqLGVzEUyLPYOEDW\n5oegLDFMjjxBxPyHts+o80xvTBtvzwnHPf08+spYzWchEjUxMGJnSF6/fl1+h42BoC6Xy1J1FofD\nqS+M/Wq1uyft+++/L+/E0YjYOcWOhDEkpAIMm/NdhNbKHUXovDzzwsZHRowCOS3CumUZIRLMUDyf\n+2gt88RVRL1erxi9VqsVg8Egvvvuu/jTn/4U//mf/xnD4bCsL0bW8DTrStSKI2VEEtTJt5bbgLHu\nKAejjTiEzIH5T0boLBvmXvBzFBGRPc+m0Cj9BOEBceT5RpNBHu3wub6YDUGn0ynOOd/xFVI4y6vV\nKhaLRRkf/BajypPJpOwd7u1j/3ivYWAc/duxg6SMM4GBtrJGvrLj4CryyBRR+XQ6jc1mE+fn5zU0\ng8Kbs9msoL3MKVE58m/n1U4VBomfo18w7Nad6CbW2PrV6Y8czDp1OZvN4vT0tFbAMafrbaQyOokj\n9de//jVubm7i5uYmvnz5EqPRqHbHqp2gi4uL4khh6OCKUeogoqp677XyGK1PPI/MicuheI6dqiZg\njqicTPa7qRrIBuiJUTxSdqyJMy12kIx2Mh70DX/b3uRg3ugRQQLvpaRBRNTmbD6fF3pKRJRUHraN\nqvo8v9lsxmAwKOUznCpnXo0wM6fISa/Xi6enpxKwj8fjGI/H5Wo3gxlXV1clqwV5nn6iW0Cb7ACZ\ne+xMB82p1Jze+9+cqIiX8gcv7aW9tJf20l7aS3tpf3d7FkSK9J0JmeTA8c5NArTnmKNLokJH8TRg\nPaNbjsyI7vHQDXHzBxItzeRW0kw5dwp8nI8QO5WYYU6e7Zw4P8PzNnxLP5fLZZycnESv16shK0RA\ng8GgHAflZFnELjIZj8clmjHScX9/X+654udEwhwZBynhpA9z6xTtdDqtpW45jt/r9Uqaj3nh+Zy0\nc7TCWuS1N3cORCYjYEbzvE5E0ETRPBeiJLwdrpKJiFIcsd/vl2j8v//7vyNix58immV9OZEVESXd\nQ9RDusMyBVpocmxOJxithCdAhEqqw+MzGZ1m+crROXsPJMipa6LXfr9fSyVFVAVJfQqJMZCaY67N\n2QBhcArHiCtzBtrE73uujci47IW5aKR5eC4IIRGs0UHGn1PJlq+cnvfedVorouJUNhqNODs7K6kM\n5DQiSuTtS7aREVLGRqMiKg6JTy7yPpel8PicyrJei6hO6zIvvjnBd+ixVk6BHh0dlfQWaAoNdGBf\nqnG5XMYvv/wSNzc3MR6PayejWAf2InLE+9DPy+UyptNp3NzcRESUA0kej/Ui68ffToM7HeR+ooc8\nNsuCT95ZT8GnMvLrkiiZo+jsijl/tm3m8NAnZ2accqMotVE+xpdTwNjZ+/v7ImvoPqPi2CKfggUh\nuru7q6FORgKZX5p1HPab752fn8doNIrb29u4vb2tpei2223JhDAe0wGcsTCdJcuBqRnOLpGVYH5A\n0X0gJ7dncaQMX9q4sehZUIEUUXImgtnostA53cBGtlGwYPq294jKkQKKNMTpMaBkrbzNE7Bid5/J\nPTtdyN84BZ4Xb0znyk9PT2O9XpfqzxjciArGxKHKRFE7hAiQlcuXL1/i1atXtes8IqLwWRBscwVI\ntZ2cnBRY1afafK0A9YNYdxOUzZNptapq3/w7H6unL05t9Xq9kkqh9pYNtMefa9Q8Pj7Ghw8fipJi\ncwMh4/Sfn5/HH//4xzJnP//8czHA9M1ke1KakJF9Qo/vLBaLWK1WNeeN9edIvlMSXjPzXZgXnIFM\nnrSxxfGPqO6OI3VLfRi+d3V1VQ5nWGbg6TC/BwcHZQw+4YWh9TVOELwhKzsdyd43p493MC/T6TS6\n3W7hbiFfOFOcfDMH0EECZG3Gj9NOanofcZh58/gxNjg4yDckXMZ/c3PzVdrMxHGnIni2dQfvpuU0\nhJ0Dp4b4zKevTLrm/zjlJlR7jUipuT/oGTsFTjUhY+bLsP7w37jSibWg7+fn57XDCBFVSQl4NfP5\nvAQivnEiOxnI0j5iOGuIo5ADV5PQ85wyfqetmQOCOQIJ82ztrDm4MoeL5+f+8gxsm/mVPjDhsbLe\nXnf+9tyzf7PdIUC0TUQv4dS7Vhpj9z5yStDcJI9ru91Gr9crJzdxrCKinMhsNpvl5C6O4mKxKAEs\nNoUDWOgA9JffBzjAHwfX2JBcb87t2RApIi07NgxwH3HRvBOUcUTl9CCEv3UaA+OVo90cqfAZKA7H\n5BGMXq9Xi/JyVIp3jBfrz/OxUnvYmd+VlTdzY0/ZxMDJZFK7RgOHk/eiyFyqwIiaBX29Xsd4PI5P\nnz6VInteJyKd4XBYnFA+I6ImmspFGVFuNiSNxq4WDgqMUxyM24RFbzhQCis8RxEoAtbdPAHeAVJH\nY21Ho1F5rj/v9/tlXMhQxA6pe3h4iOFwWE4uOorCECHXJp0iWyjxZrNZfoZzimPsvWBFzJz6KDN/\n81k+vWIjQWOfwIcyIjsYDGqkbDtnzCVzYkeRnyP3j4+PBVmyobGjxztwrEHIMm+Sz7iXLyt371Hm\nlNIIrIk5HeYNsXZGJRy5Zh4MsoS88TmBgU/yOlCYTqe1k8Pwh3CiGWNGpPJdmzwTh5r5Mr+H3+Xd\nx8fHNZTT+xK0h59D0sYJ84lhAhk7GEY52eM4P+adcfqY+TWKPRgMCprrZ7NX0NVwelgL+owM2JEw\nNw7Zo4GoIgdGI1lHZDyjSOwH633PtYM5non+Bi3JzyIzs4+jg244OzurodXsNetgB9jMPX8bPYXD\nyqEmH5Biv+D8eE5Bxwjmrb9AW3HQ2fsg271erwYmMG/sFUrRoC/JIFm3ucYfVwKhvwjwWN+M+vFe\nuF6MzXNuXb6vPYsjhaA5ZcciUYsmoo44+Hi5UShHQHzPzsvBwa7iro9UR1RwIv82pOyjtrlmEwoa\nRYkDwHvZtPzbDtH9/X0h7x4dHRWFmcmduQaJj8lamZg0mNEhb2afhLNTsNlsahdX+oRZRMTPP/8c\nh4eH8bvf/a4QOc/OzmKxWJRNul6va0eNqevCOjraYa0cDUdE2Ujr9boUdWPecCqog+Xostvtxng8\nLmlG+mC5oFKzDRupM5OYMVKGrUejUfk8YpcyePfuXTl16jlrt9txfn5eNud4PK45B5vNphRbpZ9O\nb1HzKh8tRqFzIMCKwAaCuWK+UfoYbTvpGE8Uh1E3DAunffr9fq0vjup86pZnsDfsuCJ7jMPGiwAK\n4w9hH7ll7/Ne3ud0wOHhYQ3Cj9idAmWd7UBH7BBAB17IAb/D2tLPjEowDqcw0GcuIEozCsEYjGI3\nGo0SgNhxPzk5KU4suggdh7OGgjdiAZkeo8d88Rnr2263a0VVI6qsAOiokWwcL6ejnBnImQajpfTj\n4eEhrq+v48OHDxGxu9B6u92WuwhJ4zFG3/lnfYIzZtQw2wEMO3/TN+8fp7iQJ88Hn7laNpkNIxY8\nL1M97HDxu5nEDJJnpNbUEz+LuXbakv3mQxrsa+qEeR1Jw6JnrBetz/LJNf4mEEQ2jfjzPAchLhzt\nvUFAy7Ndm8oEe4IyZw6MdkbUbT52EefeqXJsBX2xrCAbIJ9e1wya5PYsjhTXN1i5w6lwusLKDUcL\nA+S6ETgsmYXvWin27mn8m1M7OdpFkFHk7hMbzt68I4yIqI2PvrLRDg4OSkRHCmq73Zajzl5Ep1Ai\n6mgDEfBms4nxeFw7Een0m3PqfM4fjAf95mTJ09NTfPr0qbbByIHj4JjvwfyyoTi6zjiMlqCw3U5O\nTuLi4qLmnD09VSf92ARG8nAOObWFXBi+Pjg4iMFgUFN8zM8+B9Och+FwWIz3x48f429/+1tcXV2V\nvlhm2u12dLvdchLUChwFtNlsas53RJRSCzhChtSdTsBo2CjyO0a8aHbOLft2uLMRQsmcnJwUR8bO\nMO+9v78vR8xZu/F4XL7PdS80X9KbT3AZ3bLRZb5Q0vP5vMgQSDFjmM/ntdQEjj9Ij1M/8PDy3EVU\naVY7oMw3eySnaDwOR7s23kTQcBORfRxQAh87505pwm9kj+IAohvNAWPeQJBwcJFD5pqUGuPgmDnO\nXaPRqBkvn9g0AoBsZP2ZeTKbzSYmk0nc3NzE+/fvI2LHLZxOp+WkpGvj5bSQUQF+H7qAg1bQGfYf\ne5z5zs5OPkXOnDnNCvqDg2FEBj4pfbUNYxzIVj45bgfI82l9QeDptCZOFH0hOOdzbA160KnrvE6W\nWWeEvFcZE/bNgRJOFb/vsfMs+mL7i5whk0axj4+PaydCTZPhO7ZtjAmbTf/tyLKmzFd2vplj5sB2\nNtuq3J7trr2IekSNgC8Wi5qXzO8xmdmjt4OTc8mZN+L3odgwxuYD8LtEnxGVgIPgIDSOYJw7t7Ph\nvlowvbldNfjk5KQYHsO9/G52qoBNMeARUbvviOjJhFRHMhjr/BlHi3/55ZfyPjYKzzMSgBA7heTr\nLugj68mmGo/HpTZJp9MpRUJZJ37O77iiLZEsRofvMcfMj4nf1DhiDb3ZqLdENLfdbosBvrm5ievr\n6zg6Oiq1tLzx2ZxET8iy5xTFYmeKqJT0IGhZRBUJU+IiEx7t9Du9Y8csNyJxp1wcpbMXMWyuobZe\nr0tpCx80GI1G8fj4WI7vcww6Iso1O8fHx3F5eVnQtX3zZmdhu62KWObrGUBnSCuY94PJAAAgAElE\nQVSwnuZ44GA1m82vCvrRb2TJTqmd8OyYImc4Wk5fgSqxd3jm4eFh3N3dlSr7nU6nli5cr9c1nei+\nGBGwzuC59NXBHo3negyZk+Pv8Bz012AwKEGESfggFXYm0InIh9EMdMt8Po/RaBTX19dFp/z8889l\nXeCn7HNCqHhuJwD9wdwYeWCeKbZrpGNfHaKISlc5Tcn6Ukep3+/XSr4gO1nvOzuC3aLmGbIPqtnp\ndIoDa12a9y79w+mgYDDvdWrc+hzbyb+dDcIp4TOoECA5Rn6c9nbQwPtAY/cFagZO6FdOXZLGQxYY\nHylz0zdAnAiGnSqnL6BgyEmmBjgrgL50cGZd6nna117KH7y0l/bSXtpLe2kv7aX9ne1ZECkiOnt9\nIA1E5M5dR1Qn8CIqz5xngdo4qqZlbxwPNJO6IyqIk1QZUYUJzuSusyfL951+dPRHdEmaarvd1nLM\nnKwDIs6pBiJhv9upJZNJI6I2f0QB5k0ZkQIVMCJnyPXx8TE+fvxYPnN/XOiUuTafIeffibR8pQVo\nA+lVUkMRUS5/7ff7cXZ2Fp1Op5zeoHgn7z08PCzRLRyC4+PjGrE4IgqR0vA5c5qRyXa7XQpykl4a\nDoclzcRnRqcajV1BTxNAiYDgpZkL6OrHHDGnEaGDHJGqolGdF/k3QmDo3PuA3yUyN/JKOmO73ZaU\nI+tLZM/cRkRBAJfLZUENT09P4/GxupDcldDhZ3iOQZ2I7B01kk5gHzEGkGHkb1/qYD6fl+dxMCKi\nOilIP4xuEO16n2d9wt/ef4PBoBCjiXzNzeI+y8vLy3KCL6KOGu1LheX9nnmgPt5vHovTki6M+/T0\nFN9++21Zn1evXtWQGF8XQnqQ7202mxqnynJozo0RTr57f38f4/E4bm5uauly+mi03il4dBYIqfUX\nqInTmMgwXKter1e7TxCOGDqKdHxElbr23COnoNfw1rwPzVXi3x4DXC4u5zYRnfkCrTYCxhyB8PgC\nYb6HHjP3yOhK5hIh8yBjtl+M0ffqGZXhO5nWwrtZR8+Nsz408wOZB2TYfDX/rnmc6C37AT4FbOI4\nv88YeB9/3E/rJGea3M/fas/iSJFnt5JCmbFghvJQdk5BeXAokXwKyXlOfteTYSfKBF/exxHqLDQ0\nE8D9PvrkmiEoChwKw+pWCjyLaxLgFiFkwM55TOv1unYCh7niGRkmxmjjKLnuj51baifRbm9vi1IF\n5vcpGxSH6y/Rz4gojs9qtSopM04GQgLmvRE7p2cwGES/34/Ly8saN8ZH8/meDS4OAQqSucGJg2Rs\nXgpkZxxCG0QcC5z66+vrr4jIPKfT6USr1SrOhDkO2XCSQrChpK9Uusc4+AAEyqXf79fSB6w5f5t/\nSF8YXyZdcu2HFbTLP0TsHBR+jqGlZlm32y1zk3lXcEmQG75HatPGjX7ijMORywEGBs9lTlgL0rNw\naVgr+G/MleXTwYeNE59l5ZvvroTgbsVMgESq3Pucgw7oGe9tZA0Z9jrZ6Nop83dx2nEc+Z3RaFT6\n6TQnv8dcIXeWUeQok6gz/cBtu92VU7m+vi4pMo6kOz2ZHUf6i/PB6S3LFO8y+ZuggxS703AEyDgL\nrorN/HvvmANmI22Hn5+ht+ycIMP39/elrh7OKsE0e82cpOVyWTtV6nRZPiCQObQ4LgRMtl84Uug4\n02hciiBzGVn/fbYWGUWGLafIinl6lmGn/MxJcyqRMZijad6hHTUfdMmfmSKUU/c5Jcm8IoNOA+5r\nz4ZIkce0cmdBF4tFiWojqlN0VhgmAdrDjKgjUlmg7LVHVJGtT/9F7ISVSMWePXwM+uuoFGGirxZg\nnCeIeZC8eT4C4FNZjBXBN5LAZwgu9TMcsToK9ZHoiMoJzBFJRB2ZazabBWXguXDZIPtlwvV6vS53\np3kc/L3dbmvcGxRoRBQukB2pXq9XDJUdglzozsfBKRgJyulrOYhy920OSLy0HIW02+1ymedisSik\nZiNvdrB5FkYeZWO+mhUR/ATL9z4eg+fUXCWOFi+XyxiNRjVukeeb92I8/H1OEbo//D7HzHu9XtnD\nrBNF+a6vr2O5XNaOHXOlDPObo2ATRB0A4DxhYLn3sN1uF86REVLGa4SPUg6eL1AyuFRGDmmZC2Id\nwbyBSBIg4UDBzaMvzNf9/X25sDiiQs326Sbkx84s/UNGMBbZiex2u3F1dRXD4bAWUCK37D/zchz4\n8ZmdM0jdyI33Po19tc+AsQ7Hx8fxzTffFDlFpphro0cELaAojJH+cTDH/TBaYx4o/YPUTLCH7Ltk\nQHZcCbaM+toxwJYhwz7NaWTDwQABBCimneNer1fj6nkf0qzHMn/H8uFj/qBQ7p9lw2vm/zM2n6i2\n3s+Ode5P5kcxfvalHXRkzXwzj8GIK33LfC36lfuAc4bM8Dsmydt583j+N47UszhSEdXmslHMpQZQ\nLK5ia2URUZ3QcLqQzzxwNoGFJqJeA8QeL+kfo1/8PpMOouZNyu/Td6d7GDffNbLCO4l0MELr9bqc\nUGETW2g8T2y8ffPM73lunN7LiAVz2e124927dyWCPDg4iMViER8+fIjr6+ty1NRziIL1vXMRO+eG\nQmlee6pio0x4BvPGpsGhBEngokscUSt+iKTeAD7pSZ/pK30BqWKuXHsLInqr1SplLPxZLvppdAXD\nCjKBEaAPGWmFdOm0TE7rMF9GRmgcqaYir42knUWfaOIz1/ixQzCfz2O73Ua/3y9oCXJBSYjb29uY\nTqdf3cGIXK3X69qJHyJ51jDXSkKZ8z2fArUDleePVC/pZxO3nY7OQZRT2tmJzk5UruoPQooDZ4cV\nJ3KxWJQLvZlj5iaiIuCyzqvVqoYYOdVkXZNTEaQuz8/PizPFOlHM0vsmoqpLxR2DnK7mmegtB2me\nN36Wg1bQee5TYywRlW4nNejTWjhCOGEmm/Oex8fH2gER5tQHJFhz+hJR7U8/E6ceGbTu9NxmKgl/\ngxQ5jQ5SttlsirzwTJe5yXaDvWKk3X3nOw7a6SN72nKc0SDmyPbE5RDsDPKunG6zk4XTaxuS598Z\nn4ioIetGqegfwRzP9cELbI33gdfHQYazV54rk9SxMaxXdqL2Oatuz4ZIWVAiqguHSY84usTb57SP\nuT4oUzaVI3anL1CWjiZc3dn/NqQZUVUi59/2xP1/n7TIkZojZBSdDaRz196k8FIYZ04z4gAxpz5e\nmzemlRvPw9jbeUF5PT09xdXVVbx7965E0XDZUEKu0sxcoJydhuSKBxTV01N1QSXKzDwaR0bb7bZE\nrg8PD4UjxWWnpImbzfqlxYeHh8WZWiwWRUHb8BrVi9gpb4w1StqpS1If/K6LanJaCYXK/PJdw82O\nkkk1ohidFj0+Po7JZFIiea8pCginpd1u12qTRVTH/532pNbXPmeXsXAdB+OK2O3Rs7Ozks61zGw2\nmxiNRjGZTMoaY5C63e5XqKidN/YFStFyaDlHtiIqFA45Y12sNFk/5pp3mk/D33xmNMjPiqiuVLHO\nsiPE6cJmsxm9Xq98n9Sd69tkPgaGwwbHCLfTJpZv+uZaWOiPyWQSl5eXcXV1VagCj4+PBRHPJSqc\nZmGvWSfakBA0OkBEpnKgiB7nJgbv06enXcFXir0eHx/H+fl5RFQXWm82m5hOp185vBSX9PUnrA/7\njHUxv4Y+YGP4HmMB/bZ843wakbPMYD9arVZJyzFPToWC6tAX9KxRPq8HqLBr0DF//Az9nU/XGm2y\nk8n/87iNKoFkGbhg3rBHNH7PKBeN72E3vE68y7yyjN4x3znNzvwbyOB7ZEroi/WJ05m23TxzHxrF\nc3Jg5fZsjlTOiRKZkitHOCPqV8Uw4c4zO41mD5QUAgLqlAK/7yObNnoYdoSZ75G3diTo5hQBSpyf\nO9LJUR3OgmHKiIpfAYKR04UR9UiIfqIgMpRKy9F8RNTG32zujqOfnZ3Ft99+W5QbimA+n8fFxUWN\nxEsDnneEQiSGMnRfSYeYJ5TnYLFYxHa7LXVoInaOFFXE90UNfAcj5Tz6ZDKppVFdxgDndTab1coN\nEOkBz9tw46zbwWg0GjXEwiiA1xGD73U0UntwcBDj8bimNFlPIjWiQqclURLMC/LnO9bgLTn6zKgi\nxnYwGBQ5BVXzPpzNZsUxtzPIAQH2qvcTc0Pw5OBqX7rL+gIUDB6ZycGuep1T5uZeuA/MtxEeI0Q2\nFDhyTnMYMTZnx4VzncbhmSDSEXWUwgRukEnWkGCF9+V0NJXCHx4e4uLiogQH5iV2u92vUpcusmrS\nsB2pnO5FxtiH7EXvfdJ6yKJ1NH93u92SAmZuCDJ5nknFjUajxo3LurjVapX37iu4Sr9zyQGccHQB\nv2eZPTio6j/1+/1arb19NQkt40YVPV/W18wRAQjrijyxV4w+edz8HvrP8sZaOrhzX/m3gQEH5Pvs\nBfNjqk1E5YA43UtfkWHsutFRUoiM0/1jDf1er72DVQcfyA2ygpPKs60XbIP+39CoiJfyBy/tpb20\nl/bSXtpLe2l/d3sWRMq8JLxNkB48TUjZERUE6sg5F+jjuY7YHanjpRLR+Fl4tDyTyACY0MRgIEaQ\nhcxJ4r0RUXumI2O8crxhCOxEWY7YSE9Cms2et087MRaeCfzriMARBlE373AqA+Tl/Pw8zs/PC6mW\nMRGR93q9EsHyXubTcCwRkBE9omM4DnBAKAQZUXHnptNpjEajGsEb0uxmszutlhFHIF4QAtIboGPc\nydRut786WkyF5cViUb7nQoMgIiawwzEhskGWmDfzuEA8WWOOR19eXpZIO6Lil9CXiApRAqbmea6I\nTz/4Tq4kDzzPeJk3n2KKiHj9+nV5H3eaudI4awgyBEJGWobPQBtJhznVQLROFLnvdAzrnPkO7G+e\nYZ1BQw7yQQz+D+GXfyMHGeUwVwVUxgi00wTsd55Jior0tm9IAMEgtZWJ7+xVEGv6ws/pp6NrUr4g\nGhcXFxER5aAEXKmMyvl+P+s9kEanzbLOsO7KqASIFQiYUQnmB1n25cOkG41gR1TpXtBSI4DIGIgK\nKcCIqJ1KhGJhPhPIdZZTz5M5Qcw340ZOzQfy+M3PY//RF8s988d8k6nhfTldaKQLnh7fz5kJ7xmn\n+Iw25TUEoeJ5mROHns0ps2xT/DOjwvYJLN/IisfnPmcCPuNzCnYfny8j404L0y+vef5/bs921x4E\nWCuqTIpjkOZBIUSZxxQRX8GcTushjBYU/m8YMCJqBu7u7q4cteYdbHz3KaJevRvIOJ8iQsE1Go3a\nVQi87/HxMbrdbg1KBdL3JoyonBYfo+dZKCHmgLGa6Ggh8nzCZeEklisDNxqNcoy30djVSxoOh6Wv\nvIP19aYhXYTRd6VpGv2lkfZgQ1CLhTGgFEgb2hCycXHcmDvGjoK1U9vtdktKkFQRY3eqmMa84Xii\nnDMXxOuEcvW1DfP5PMbjcTSbza+u+0DhGvpmvlkrO6i8o9lslgMMVN2OiCJ/PjThqu849Dj9cNIe\nHh7i9PQ0Op1OjMfjuLu7K+M/Pz8vzh77zWlmO/sm9LJuKDCcFH7PTpiblSAOgOXIqSV+3/w5GyP3\nh7QkgQtX/dAfHNRWqzotyDNxnigZwglKZI7ncGkycoODzjvMWbEzQwqYMftwgxU9PCzW8ODgoHzv\n4uKiHJRwatDzjZPvNCvzgMNgg8P7bVhJcyM32VmhOe0KfcE6ykaPtCjr6YMS3pc4xvQNKgJzwx/2\niw9TOJhGrhiTU+WZ5wa30Zcns2bMGfrdTkY+KJXT8ayRHRE7eKxBPjDiFJWDk+wg2kFxmo059QEV\n/rZzw/f4m+fZJtqJRp95juHcGgTJB0gcqJgaYeDAfzslSrPNxkG1DfS8ICu0HCjl9mx1pEA1zDGI\nqOpV2NP0orMomQti3pI3d/Y+feO1c8QINf1brVYFjbAy9ak3E9loOC9EdNlRBAnx9xCiw8PDUuoh\nEwfZ1CipiHokZOGIiK+EYh+HiD7tO1p8eHgYvV4v+v1+DV1Amd/c3BTlgcGAr2RkwdwWNia/Y+cM\nRUg/mTeUD3eCUXbBMmPFzmeQna3cnX+3HDIO5AJl02w24/9h7816G9uO8/0iRWrgrKlbPZwcx7Gd\nxBe5yvf/CrmKgQSGY5+xWwPFUdTA4X9BPMVnL6nzAwwE+l9oAwc6LYp777VWrRreeqvWbDbL59B+\ngWdaHhgr9wIhKBFA3tuKkUObiZTX63UFIeNdQSu4Z6vVSie72dydTu/1dlEFMudjOHgnV81w1Mtm\ns0mSr2VqNBrluY6sPf2jQHqMuiyXy9xDyBLybeJteSQNCtPor5W2ESj4LDinRoNZWxsDxsO6lRG7\n+T3WP8yrCcIRkc7m2dlZrhXjwDGv1XbnJfLeGBbzjqwbjKCbs+LWDQ4m+Fuc/hLlQ74YC+NizlwN\nbX1C0QGOD/e23ubffM8VrUaBeD+ew/2QbRBn7yHW3xWl3gu12o6PCPLptXPTUeaM90QGkR/0YRn8\nWFZKPYQzjB4uOWsEEzg3XAR1jJP3dXEEsm6bUNo47oXM2sFwUMk7IgPmFRoFM3rDZ3bYjA67kIR3\neMnBfykY4vcEhdYZ7K9y3tAJyJsRN9aX35nj6MCgfE8j3gRLdpTtcL50vRrZHI/ekSnRHMrbhzAC\nyTJICziTRqqjREFsVDwZbHaTObnKqjYuOx4lyc2wtpUe74KhtyKO2LUUKEt2+QyFz0+MBU6NEQVH\nl5Ab/Xt+lv2NTH5/fHyMXq8XJycncXx8nEhSxK4/0WQyqVRxROwqYlqtViyXy0rkidLiXUtvn7ll\nbR2VktLg/nxmJMrjZlxl+sDpFH9vs9nEzc1Nju/4+LjSfqJMm7pSy6kkV1ExZiOLjppL2Biir5Gw\niEgnCceOXj68mxEltw6o1WpJDjfszjjYLxg4R/MoRObIChO0gmfyPcr6QQ0cwd/d3cV4PI7T09Nn\nqbvlcpkHPS+XyxgMBhUSvpFjOzxOwfJOzWYzU3Q44KS2bXSNABg95WKeQTztZCF/q9Wq4rhtNpv4\n9OlTrvXBwUGmqJbLZeqhsqAEpIX96ojdxODlclmpsOO+rLXXd7OpUhgODg6yTQUo27eaDWMwmW/u\n6SaQdsDsSFnO+X/eEyNV6tOyn5VRBJ7PWNw0GFTbwZQd1/v7+wxQPFcgXiCP3ofYH+TOqLEDPAoZ\nQDgh0e/t7eWJBg6wnJosg10yDUZb+Gl94u/ZObLT46q+Ui4sG8ivG1OWf29nhe+VqCdjtHOP01fu\nU/7fziK/s71zGg5ZYC1eyrzw3g6S7LCXqT+nij0e5tdj9/h4l48fP8ZL16s4UpTfGhpm8vf29ipQ\nb8R28CiAiGojRnvKbGxvbgQ/Ip4ZBd7B1QMRu8V1btbGG+XKhHtRcRJ5vo2eo3/SVDwvIjItYo5K\nt9tNw4TBszBTOo9Rs5E3j+Ilp8/polqtVulrhFFiM/PZcDjMXkERUUE6SDExjs1mUzl+wfCyv0cv\nKBwsp6gwJmx4O1lErHt7ewmps/bL5TIbiZaOratznNLleWx2KtOYb9YF2fHam7tiKNpGx6kWR/PT\n6TQajW2PJH5vp8cw9tHRUUWB8zdE5DyPPWY0xfPmVC97gHsxjyAlvCfOFYgw88CF01dykFgbd/3m\nYrwYE7fMwLjyPjZ8/lnyRSIinfnxeJz713uYOeEzR+VOx5RUAWQVx4TvnZycZAqJZsI2bGVazIEJ\nvCnm1I5brbbtEUagVzopDsqc9np4eEjnez6f5/vSBd9pYsZAdSzrZ0TnJU4NY+OZGCKOTyK15/1m\nQ8k4jOx4vzm7wJ5yqtyVya6EZK5IGRnhRodaT1uWMJqk+fgeDp9TWuV7rtfbNg1lypX0LO1YjDQZ\nTbRuL/meXien60BquFdEpIPpLIT3vmXEOpN7gBg6gC51q4NXZ16MSPOuZAf8rp4bAsgSEeX9S54X\nqBm/cxrPDlxpn0E9zVFzup99yrt5f9h2vnS9iiOFQrUBIx1mpe6XJ8IrkSwGD2xHuWvETuAwevy9\nL4x06Z3ymSN137NUaFx2zCKi4tXyfRSK/+alKI73tYMU8fwIF/+/HSkUgcnbfk+E3pAtYwWtG4/H\nFY98Pp/H1dVVxZHiXTG6jO3+/j6jcvrEcE5V6dgYKnfKxJuSsTvt4jmP2PHbvC4o6JKHQsRiWWy1\nWhkplqkWNqI3ael82+CU5FEcSIyf4WUceZQysmFiaEQ8i/aQXZxfv7Ojfz+vbNtgQ+PfEy2bM4jM\nYKRKbgKO0NHRUfYJe3x8TJJz+RwQAIz7YrGopFlRkvTS4XKKiN9b8ZNKIQVUIqA2gmVEzBqXaXue\nQed9Chz43nA4jHa7nXMKgkTK9eDgINNWfk/4PG53YHl7qfyfMdspKNtbYCjr9XpcXV1FRMT5+Xki\njpZP/t4yab1UBmJ2cjyvj4+PMRqNYjKZ5Fhd8GJDGVFFwTDWDjjtPDkwZQ9j/FmXiK2s0werdP5s\nL0rUxYgTMsI6cS+coYidQ848O2BjfUgxko2wrsFAOwiwA1JmU+ys2EkHWfJakQYt18jvWepKO3Ps\nuZe+R2BnFMiBg+WUlDV70ek/9j3tKdwAln1Arz4HhbbT5gIjgyXowUUgQ4sPZITLDqqfZ+f7W9db\n+4O36+16u96ut+vterverr/zehVEyhBmvog8QBAbIy9EbF+/fs0IIGLXFRVv8yWEiIjOeVe8fxCi\nx8fHZxwSOBg+M8xpP3OlPA7gW+fYSRcQfZSQNvd2RBQRCY9TyfWSBw2079TH/v5+JRLyO0fsCLUg\nCEYlWJ/JZBKXl5cxn89zjIvFIptDgiAYfjdMXKvVErm6vLzMKJ1xOlrwmUu8f0Q1lVrmykEk+N3T\n01MlLdpqtXJOLHNGuZADIihIwuYGOPIyP8ERG6kpGlh6nZFToiy+zztAFPe6OBI2OuV0Ycl/sBzy\nPubRlGhQKUc8w+lAw9pEa6TpXF3Knux2u9Fut2M2myXvDITRcu/IezweJ6/IqMVLKTEjGozRqT1z\n/0C5PL989lLpP3JH9Oln80z0DWgL8jaZTHJsnPPo1NbBwUEi5bVaLT+zzuMZrD0I5WKxyK7gXE6h\ngriA/q7X62y4WSJL7PdyP0Rs036k540UMc4yg+DP2WOkS+7v77NtyGQyybWwfuQ+INRO7XAxp6Cu\nRl1AlCk0MOqE3ivXl3EYqXKatF6vV1LQyBP/b46uW7QwbtA3I9Xs9TIlxmeWeSO7RnzNmyRV6H1b\nVtvxk33guWFe0R2MH/2FTfQ+BRUrW4lE7NKE2ALrT/QF/GYj4/P5PCaTSUyn0yxmYN7oLO+xlf6B\nEUX+1ilmrxHjY6z4AeZou9mt9aALN751vYojZTj8pTQVEJ8NBsb04eEhxuNxLobPGGPA3B8yrY1m\naRRQjK4g5FnNZjPLxl1NYAHld9wfISuNt/+O3zFeFIgFmqtMQbD5eE/4KqVSsGLCGBviRticRuFA\n2Ha7HZ1OJ2F6Q9Wj0SghYypcmF9KxuEucGZXROT5a8y1nQW4TvAySGexvqQ+MG4mx+JAeQ24p1Nd\n3hg2zNwPmeEdgY1xnlgXjDYKGUfZaQH+zr2bHCggW4aODeWbS4Iswk2yDJMKsUL0+pPSBFb3OMwj\nfKnC06kzKykcdkr8kYu9vb04OzuLXq8XNzc38csvv1ScHj/74eEhq/0oJmCt9/b20uFgLcwh8bxQ\n5epAxHID38dGnr/jd8w53yv7PJXpKK9xs9ms9IlbrVYxGAxis9nEly9fct6Yq4eHhwpHLmJXFeie\nYE6R0LUd54Z34Z4ljwvZf3x8zM7vrgi0nvPfc7HvTZC2nPjvS74Uzs5LKWHSKdAPvBcptPF+9HPY\nR66EhFPI+Gy80ROuYmUt0D1OCzp4Q+59aD3ri8OHTjDdgj1tPpTniJS8gxsCATvETtkh4yV/iL93\nxZ/twsHBQQY5nAHq9J15U6QA+Qx7gcNnR4SghL1kpwQnsky/uYCAcbh9Tdn7rpQl3tNBtp2aUn8R\noJdcLJ7Pfbrd7jMfw4Gm0/+M+3/jSb2KI1VykiJ2+WYLpD8jv8rkwL+4v7/PzYliLKMaR2V2qngX\n8rB2spi4b+WfEWRvGjYlQgCfJGKHKJngZm4RwoKwefwRO6cBEjHf5z3xzM1xGAwG8d1338X5+Xm0\nWq2KMWEz2alAcdATqNvtJoeFPiyz2SyVXuloMI8oG5en7+3tZRNAjlOwMR8MBtFoNNJw4yBjeMoq\nTeQCRcUmsSNlZ4J59k9vLDsLlh9zKDC4oH2utkEWarVaOpLmGeAIehy8K+Pgd+UzI3ZOnJ0g/g5l\nUvYcIhJFIdsB9Rwhr6xFKYMlz8pyDA/o+Pg46vV6/Pzzz3FzcxPr9boS5IDSULXGe5qXQ38lG2zP\nQ8lnMukXQ2J+I8a0dArMH3PJOmOFH4czYGVrJQ7iwjg4hxDd5BYH3Ofo6KjS84rAAq4HbWGYb8sJ\n+43vbTabLEbBGeWyLEVEBallv+Bkm8SLE4XzZSPEVRotPmce2ScOFAkkcJTKwg10ETLIOqHfCEyM\nUtiZcGBWOkasKfOG0cTJ9vid4SgzDZ5H26/FYlEJMGxn7LTd398/O07LSJP1jmU1YleswnsSfPKe\ndpY5K3C93h0VZD6Ugy7QPp6Pw4+Dilx5/AcHB5XeinzP728bhc1A1sp+buj9EiXF5iNfjKF0suxH\nMNcGElzNjGw6mOL3PAO9aZl3kPrS9SqOlHtqMBCiDgwKJMyI54eE2mGgDB/hsSHkJHEqWGww7JE7\nRcTzUKQ2WhE7Q4tBsYIGZrYidiRgheJIgPfFsBvCRkD5XlkeDBR9dHRUUVBEVN9//32OqUTIeA+n\nSSN2yABGzaRFHK7RaBSz2awStfD3rsC008K8OvXFGLk/CpjPkAEcUxSgv1c6H8wpBEi+42jWG8My\n46qxUjEzJ6yDDRsG2+OyYfeckzryOkK2LNEFO78gQE6fGrEp03yOqrzOL0r9hz4AACAASURBVP3O\nc4dcl+/C/sFB8f3puXZ7e5tIru9Zr9dzHx4eHqbs39zcpEOGs4Eclmiyf1oOHBRYEZNeQzbKlAqH\nPbfb7WeNF7mXnSyu1Wp7qG8ZYBHlPjw8ZHEF42AvcLahURcQEAyyAzN0kBHCiKhE8uzX0qkhSCwJ\nuI7mPS92bko5tRyUTr5lir/f399PhLvT6cR8Pk+ytaN/5Iy9Y5kxisw82CjyDuwRrxMpOJBpp5Ih\n8GMsjSxxHwwyzye1aj1jNJYABwfArXt4P9a9dLDZ34vFohJkg046dc09jWyWjp9/8re8K/Ns3cgY\nsVugwk4XMk4cP9Au1ubu7i5Tey+thQn8pk3YGbIDip4BZHjJibGe5p6MCafY54qyruwr20KvRakT\n3bvsW9erOFI+zJcJAolAkRkSPDs7q6AB6/U6ERJY/44AHLWsVqvk5XhjIJj8zoYNQXGlnb1vIEnn\nZvk7M/+tMCJ2R2WU71mmpdiQEZHpMZyp5XLXrNNdq2mc6YNinZZxGsP/5t44OqwFCIbTSoxhsVhk\nry8rcO6JoLIGjJdndLvdilNrhIn343coIs+nq/0ceXgNidAeHh7SkNkIs4m5p40skTmGtIwOee58\nPq8cj4OSQMm9xGGAK+IxOnePgfQzUQx3d3cVRxrkzsiUEYQyhWUFgbPQbDYrvA0+93whn8w9zoK5\nR3S7xzlZLnedrUnBHBwcRLvdjvV6nRwjl+m7WSDPc/TJe/szO9F2GIjYQczKI6VIa4OQ2rlE6ePY\n2TmDy0FQxNyw35gDZIh79vv9NHy0u4jYdZnnvc3/xAFgjNYl/J53LXtMkfqLiIrzgWzZmHndmW9+\nX6Z1mTsjwdyX4LAMQJhDH95cogteY6eHQEeQ17LlB/sJ2eK5HLtT6nf0kINgyxTpV+7tzwjcTDPx\nvLFOBAsR1SOHarVapmr5HqgQ+ob5xlF3IO35BG2jythOqNsfcFnv22EwWIAdMOfMz8Qh4p2NcEN1\nKOeUABZns3TIrbOcvgP4YL6tj1g/ZN+ABWvG+hlZ5/vYb1/MM/cymlvazpeuV3GkHL1zTSaTNAoM\nngm4ubmJ9+/fV9JbFxcXEbHdGNfX1xGxg7Md0TldZ8PGxOBNm+vTaDQqTdmswIigMEDm4fhyCsrP\nJ+r0uXkIE0JuRypi1y6CCJNIr9frpQOFE+UNbJjdabOInWFHQbzETSBn73QSUCkG0Uq5zEVHRCUy\nY1M4zcTziMR5lhW0N1KJALo8vHx35p1/28ii/MrvReyOsWDuzCNDHsr1ZQ4pjy55RyhzI6Ceb2Qe\nJWjH1albyzBjQg55Dz8Tg2+H11E66+/0ldEIo4VE1lZcRlXZC/QS42JPksIiamUNO51OOjYR1dJ+\nFDsOjtFbK+ASOWYezYHyOBhfrbZNxRrNQS/wOXvK84ETyTv3+/0cM58xDvf0Yp6McuIk8zfmZ3lf\n2pg4pYN8uImsZc6y5rUmALWuKVMlvr71e9Z/OBxm/yXLAAEXAbIROQw0PEgjsDgfNpZ2CsfjcUwm\nkwzOGBd0BBtExohcYMTt1BHcWhaQbzt0XiP+Dr0MbcGcLL5vhCxi197h6enpWcoXuX3pNIX9/f1n\nfFOey7NIETMHvofTe6ZN2KlhzxlM4KfllTESPGM3S7TSOpw5YT/xX1kcAIpdIuMeh5FF1rdE6nGw\nmW/aLRjYsD5hH3rflaBIeb21P3i73q636+16u96ut+vt+juvV2t/QITq/HBE1fN1ftxw+MHBQZyf\nn+ff1ev1jEyMDhF5ANGbC8G9iFzwZHk/R4dl8zpXiZRRGpGL78E9I3ZwvSvaHL3CsXqJbN7v9+P4\n+DgRqZOTkzg5OYlOp5P3dTlnibgYdSs5Fa5KAP0ADSovUEFzzJhv7odHz2eOyl9Ci0Cx+K75S8wf\n//kzZAli90uctBJBIqp0tUz5Gd9zJOQIjfUllUqqGeTDuX/e1ZwA39scn9VqFbPZLNtGkKJiv7hR\nKBG001tOi/C5Uyfl88rCDCNQ/J3HwJhANElvgP447cVe6/V6icKCGrvM2UhfCfXT0gKUmKtEHplf\noytGJRyVU/RQq9WyZYD5J/V6PQ/HdkrU6XhQLMYxnU7zeBAqW09PT/OePAtOFu9vvpWRGd4flA/k\nkDEQXYNu8V3uCZ/I5f6MgfE6nek58Bw7feN5L/92tVrl8VF0f/dFA0SoCS5CgbdkYjlrityiE01V\nME/V5fLORLD3ywtd5H3Id5jTkmwPr2ixWFT0M4U8JSUiYptpYZ+5KzvPs36zDsZeGTn3+Fh75MDo\nGegtYzDvzkiUES4+Q5eY5sGFHeVz3sfyVtpFo+geA+PAxrJXGT8Vty5MK3WG19JrZ/3mtK5pCeY5\nM3ZzqKwv+L3f/Zk8ffOT/8MLgaP6KaLK27BBjIiKgel0OhUYM2I3CfB2bKCB050LjqgeFAycW1Yd\n+Pe8D++LMDrdgECYTOhKMUOGFgR4PKR29vb2ctPxHoeHh9HtdmMwGORxD/1+P51DK1su4Gt3fmbe\nECIg8zJlxGfmCHEPBA3n16kENpBTcVwWTDs2QNBs/NJxZSy+v58LBF5+BoT7ktInpYtyNMRrgw4X\nis9QeDhSXE6fMu67u7sKb8NKsuQKoERQGK5429vbVizBh3AVHf9h9O1ksZeczvX6mmjrNCX/NmTu\n9eX7ZcGE95KfV3K0GEtEVNIlOAplhSbpIKeRS4fWijtiu9/m83mlw7TfCw4dh1bbyYzYVqdiBHG2\nTk5OknfJmiAbe3t70ev10jiWfXAwpCWBN6KaHi2JunzHqbmIqOgm84UidmkZ5NH7wuvpg3x5vg1Q\neTllw37kfjiIw+Ew01gm+k6n0xgOh0lPYF+wn9BVnh/0noMeOx4UGC2XyyTxezzWXXaGMZLs+9KR\nIlhxyhzj/fT0FMfHx2noeQ56mKIpdON8Pq84dawJ33N6nnEx9pIK4fQVOsp8L2wGwQx2xHw587NK\nHhx6sV6vP9OdOEqr1Sp7RlmfWM+UdADLnNtU4CTh1DlFT4BBmpgUdcS2KpE1MoeW5+E/MH92epGh\nMv3Id5HB0jcpeaTl9SqOFIrB3q4/Y+HttHwrJ3p4eBjHx8c5kdPpNAfMwqNkOJcuokrYI+r3RJUI\nFJNO9YYjayt3Frd0FGkOyec8N6J6GCz/76gMQWi32zEYDJILQW4aB8oePQLKBkUZWWkbkXCE8RKf\nw0aR79vg8T2+WxJOXQ1j5c9nRBUoVBs2bxLPm9eL55pbZQNbVuYYBZvP54kAMa9uGuiqOeaUcbjK\nhujqJQVeGusSBSNyQ24wNETMNipGM3DAN5tNxXChDOBSlO9kx82KDkeWy9ElCt+RnflMrBtryBio\nKmO+zdlxBa8rJrmnnT4jWciyo0iPg+/aebVj4/5xfO4xwpskUkYuCOJoOsr6ttvt/B5z73Mmedf5\nfB7Hx8fPKuTm83kiM8gbetDcTz4DAWMP22Cw9+v1eiLHNvrw1Ox08xlrbqJxKQf+W/+u1WrF2dlZ\npRcV74osLhaLRPsYP/qXefMasiceHx8rhHKjjA7qeFf2DQ4t62tSN/NipGhvby8mk0k6zC60oAlt\nxNapNtmc90YueR/zLZEh5NC/512M6ltfWrej890nq9SLRq/s2LAHy3YZETuHnwxNeSHDAB6WU/it\nOB3sReYR3e49bsTcup95BGBwdTRzaGfHAaR1RhlcMW/fQikJgLH3Rj/xJb51vYojRQrK8CLOAwrD\njhQKpOwSHrEbJH2Ibm9vnx0GzN/b8FHeiUPHYZ4RUTH0LBZC6vQUxslwu2HIshEeqIU3dkQVkSgh\n8dlsFrPZLKNcHDKeV8LEVnCMA+E3cR4hMSQKSsBGxGgYSgXdw6GwsrXX7k0fUT3YMqLaKblU9GXK\nibFx/5J8yf+bSItjiuIsz5djDkBH+Iy/x6Ep78n7oCCtvEBPTKwso31ko3x3fl8qAae5/OyISIXN\nnNgQsZ5EipYtPxuDY6cWRYpi9Pjr9XqldLpMFaH8+/1+9Hq9fJ7nn4g5IrLAxIqb5xHcuJKG98Sp\nBFFiznEmn56e8vDcx8fH6HQ6FZQPJInneYwgdXQrt+O6Wq2y0SXGnfk2quk0Ow0nI7YEaVAmPnPk\n3Ov1KujB4eFh3N3dZbDIe4PSm7hsnYIBoyu6969TSHaEcKxLJNnz8i3ECv30D//wDxlIcb4fa06F\n3Xw+z15bOKyQtUHleCYBEfLswBCd8NJei9hWi56cnDxrqExAVDqLrBE6zvoN42rkhcvBvasumRfL\nrh3ziCqR2VmTcs96rdAfBG2mKHgty6pPfg8aD6rmTIEdJWcA2u12xV6bUM+cmIjv5/k+1us8z/Lo\nLIWzG8iDn1eiTRG7fVGigJYZO9LlM2xj7GOUgUR5vXpDTsOLrug4PDysNJFjkah6s7I1d4NGiRGR\nB+E6D2r4l/41RKgoWibN0W5p2Bzle8IjdgJi5WblWkZfhndLCBZFCZRa5v9LbgljtdOAk1GiYDy3\nFHKcKMZQjt9jdprSOe0SIWBMzEPpPLF5S5jdskIEZ0eyREts9EAh3ZgyYufU1mq15Ok5VeQmjd7k\nvA+yw5xylQqjTIUgVzb8yDAb10hZxK6CzakUfoeMgP6AmEREVnAig1burJODFz4Htkepl/LhVILR\nI5BkIjnzH+2Mkjbgu/v7+zEajWKz2SRKZLlAVh4fHytNc5EjHPqyxBkdQosQzw0VZYzT69TpdOLo\n6Cj6/X7SCcyLQk/ByXPTSebIFYqMA90C4meHAL31+PgY0+k0uVWknknPG80wVQC5cEAHgvISr4O/\ntZFnznAGSK97r5WpXu7FvFv+G41GnJ2dRcSWJ9RutyvHQKH7Hh8fU9czJuQb59oojeWYvktO1fl7\n6/U6kQR+h+OGDjbKGbFLNZPa4jOcZ3qb2YCjB9AXTsWi7325pxPPZJ+xXk5/od9LVA1g4FsIo9e6\nRFwJFo1WoZP4jN6EEbsejugEUn0ROyeTVCJ/53fgOy/xmZC1MlAyqmZ0GVnBttmu4TSWVZsRO32C\n3vcaYHvsK/gq5b68XsWRIlLyyyKEeLZGQVgEoHZ7vD4+BO/TpEujMm7Qxf1BXSzs3NvRhx2oiCr8\nXUblLIpz+ihroit3cDbkSMTA8/b39zOdiDKz0ua+Nj78tIPGRi3hSZQFyjpilxJljCgej5t3i6j2\nmmEM/J0Rx8VikeMoOVne/IbUuS//BoFgDdm0PmIj4nmDTMPDKHLWyLwFKx7Wq0yPls5TxC5d7b8x\nsub0FH9jTgFyhBLge6yd0wEocZQgcmFIHQOBsrSTaeTBqdqISKcTFPju7q6ShmHdUESMH+4i7/ZS\n6gCn3Kli1on5K+fX6S4jLqAbDiJKpMTGxdwyDOJoNMpxU6rf7Xbj48ePue9ms1l+RmoKBMVR+d7e\n7mgbHB4rd4ISCMTlWXtHR0fR7XbzKBXemaNJ3KIhYpfa6/V6z6Jq5JuAwagDsvgS+kTaDcfXaV4b\n1ZcCU/7tgIG5+fjxYzw8PMRkMsk14Ygg0pO8q/d6q9Wq7HenZF7ah9bhZDB8Tqgv97FCZ9B4mD3v\nZxOUkWYk4OIeDhSs25FJt5owOsYzvDciqvu8RAedtsaZdMBjNAfdZkqLnSPf3+vKu5Ryw/1s2/xO\n6M5SPzDPJurzLp4/yyL7y8Vb/I2pIyWAYPv7UnoaPet7lrL0/0Kgyuut/cHb9Xa9XW/X2/V2vV1v\n1995vQoiRdSNhxux8+SJSs2DMgrTbDZjNptV+Ex8D6jWnuRqta1EaLVa0W63s+IN6JBqC6eJeEci\nb0c0RsmMcPCTvy3z4URQjMvvSTQBauBUA+9qr53vAXUbPSrz3eTS7amX3zW8yZwSBdj791hJGTnq\n9/1KzpbhWSMSjMeImc+jMp9psVhUogh+wqugeZvvCdTsgoG9vd0p5D6Lje85Bed0AvczIuo14Tus\no+ebOWc+WRfuQXQGImkkK6J6dh7vY9JnCet7L1jOvPaOQP08p2Udwfoz/h5koV6vx3w+r9zT1atG\nijkuhHE5Ted7m0uETnCE67QUqcQypQ3fqWwCyzqR5kCfXF9fZ1UQ62V0DR6To2DGAZJepsBcNk+z\nSGScDtLr9TrTkOahWMaMTIAar1arRAiM4h4fH2c0D8rPvdiDPId5ubu7i59//jkRI9MInOIvES7W\nGFmzfkJGTk5OYjQaPePDwS1DVnkOa4TuYs5MqEfG0N2me3A00MPDQx58zjhcqWlOFrrFdBDrIjeT\nNW8WJA1kyLYEdNkpTC70DjzUzWZX7UeazEiTdSlotCv4LG/oEfad541CGqNIETuqAP9vHeGWISWH\njndFr1pn8BxQvpJzauTfdt52zVXnrIPPRLScIofoe2d3/G9/h8/M5yrRqP8XOvUqjlREtS9HxA6C\ndG7YJ6tjmBEuSpIhYnJEgxcRRwjFcH5+nlUflNzDJbETVCozbwzSaCxI6SzYcBreR/DNbbCBdkrI\nm6fdbsdqtaocemwYsyQplkosYnc0jeF2ExlRHM4bG6b1JkWZojycu3Zqz+vM85yDt/HGCOFEbTab\nSqFBrVarVNaU/CwUpufe6VycPjs9GGZIw76nFaONIunoEj5mDOU8lXPn9zcvC4XKOvozy5G5BIzD\nKXGKESKe9+3y9xgbStXOcrmWJYfECg7Dz9w4RWlCPX3OaO9BKoN5I03HcSnu7owjxHqRTvERIHDO\nms1mpUs1c8062MnC0eK7yD69kNbrdeWEBMbulLDnhXdjbY+OjlJHRew6apdOtOeKquKSb4U+KI0q\nfCIoC3YIeNfSCPN+dL72PmSfQQR3+op9XXJZuOysvxTwnZ6eZvXedDpNeePZ1ouWNfau0/YRUdnX\nflZE5JmVDib9XVMwXI3FeNFRbm/B2HAYLIvI6XQ6rZTsI084NAcHB9HpdCq8NRwUeLp8bzgcZqq1\nXt+dp8p32NcOKk0xsb50/zVS9qUsW26cAuNijzL2krbAWpqu4HVFfsqiCJ7LfuR7OP+WQV+m7fgd\nDFJgG2yD+Ftk34GfbZd5bsyRdUJ5vZojhQIzh6YctDcbCpfIi0E+PDzEzc1NLJfLODk5qZDUjIBw\nnAqIlPuE+D24rIh9WZnxd3ZsbGQtiCi1knPF91CypXPGpnHU4Dm6v7+vKGkrTiIEHAcLDpwNR3tu\nroizxN+VeXVHJnYWI3aKqlRe3rSr1Sq5IDRDvbu7i4eHh2cl0DgRcHe8WT1PZWTC5xg9rxPjs/PO\nOqGIcGxMDvW8oHg8H1yes/JznAOeSwsA5vIlpW+Oj50eGzhImH4+37eSctNMoxWWS+btWygPChFn\nASfOCBCK/+DgICaTSdzd3cVms6kYE/rsOOhwBErFFc8teRwlWdZVbEZBPQ4bCxSqHQH4NSBwPiII\n2ej3+xVC/d7etiEmfXsspwQyPiLHFWKME91hvcc4eV8739wDbs/x8XGukx1P6yz2EOthOeTdMZKO\n5nE6/T6WF+aOvy2NFAgC+9vBVjleF1Ogf8rDlz2H7AEjciZS39/fVxwL/h4HxeiJuZvmMXpvlkES\npOnj4+OKrPOerDF7mHvSRoN38BjgJ47H41ittsTuk5OTiKg6X+xDEGjemywGQSpzaoQeveJed0bN\nvYYgd0aMvd5Gs+xk2WHj37ZB7Fk7+tyTIK9Ef9n36CGADcue5cpZCtYfXWJdShaGgIL7uGnw/68c\nKSJMR7sIJ8JhyNWKgAm08DuCt9PRarWS+NfpdKLT6VTKw70IpbfJhsHQlsiKoycvsqFKDE/EzpHi\ndx5TGcWVqJqf53dhYRE4k5S5DFeWa0BF2mq1rVKhlxKC/1KqwlEFSoJ3L9MGRqv4LkalTOE5rUn0\nw+UKmnq9ngfeopzYjE5TPj4+xnw+TyKz54D1JrVXEtuJ4hyd+DPkr0zbMX7ey0bR8+X3QE55P9Ai\ny3OZ9nIbENYXcrRJno68mWPWAdlGzsoKUu5txcQYWd/5fJ7Po+qsXq8n8ZzP6GiNzJrETKoMOSNg\n4p57e3sVON5pNubZCKoVMpVWbhkQEYk0gvZ6bozg8b6lYu52u9mfiXsPBoN8frfbzdYLPA/nkf1G\nawhkEATRUT1/64CGq0SgV6tdbyDObavVahXjwBoyBigS3Pf+/j6urq7S0D49PVUQfN+n1CeeO+sL\nf9ZsNmM6nVbWCeeQUnwbVQJg9ken03nWBw8kj88jIh1FZzDsDDNe94XiM9ae/9wShv1QOv7IVqvV\nyoDdhHIj7g4wWCOj1A4MaZtB3y1kjc7x/X4/74cNQKaMRtqRBIHHvrbb7bxvq9VKXWPknbkh4C2z\nDuhzvlfqRuaPueC92HNUNpZZg4idzsGx9zpxeV9gX+wQ2uHl3yU6xj7BL0HfRuyyDS/JfL7DNz/5\nP7zgVdiZIOpwtFimvnBq7GFH7LqN4wjwbxaIA33tGXtTgIaVufLSEYjYRWZMtHO+CKEXiO87j196\n2SgEQ5yGbMt0lh0polkcQvfJcjoNBM7RkJ3RWq0Wo9Eo70seuoyuKRu2YHkcZQqJ8TO/ID12bOwQ\nAZ8breLvqWIqBbzT6VSUKrJgpWfHl02JQ1XKoQ2BS3lttMu14KfHUkatdkbLYMDzao4U70/AQJQe\nsUOW2u12lu3zWYkUGpFB2dEry9GXUT07/YyRMVOF5L28Wq2Sm1NGqSBLyCNzCoLMvnLHaI+XPWpU\njd42/K3XDbSZ5pAOtsyNQnka6cABMeKH7DPPOP7oGoKyXq+XaJajZNZ8uVzGbDZLY9pqtRLB8Poh\nM94vOFx85pSRKyZBWjD2Jd+UdAmHnXstZrNZjMfj+PTpU7x//z4/43slks5lXYBuY97QFRcXF/HL\nL7/EZDKpIGt3d3eJbMJbjYhKaxrGUvIxSePhhPNZxI7n46o1Ak90vA9BtjPDOOxIGsVw8OVgCZ1s\ndJW/Jw2LE03F4ksUg81mE4PBIPb39+P29jYzDxERV1dXcXBwEIvFIo6PjysBpNcKYIKAmPHbDs3n\n85Q5pyvLVjvWOWUGh7kkOAeB5Hu2oQ7M+DcZJv/eGSrskd8FfYk+9t72XHh9uJfpEB57xG7/Gzlz\nsGLH29erOFKUAFv4mVAm2oYPRWIHxz1hmBQWEWEk3WCinC9D/mXulsm0YeInC8IY/FlEtZeNvWWn\nRkoHzZFQqbDKCMAOJhsGo8jfuVGjkb7SeLvkl4teKygaFFrEThGhbOy5R+xIzn4/xr5eryvlryaA\nGmms1+vpSBGlYkBwxnhPyKG8P99DiTKPjoQctSBvzAv/9tzZWbLxtRwyXygAjLHRLOYLhekNz/qx\nyZk3lAVOhlO7oD5HR0e5RjZodl4x/FwQX9l3VjZeh729vZxT5BQ0wKmter0eZ2dnKRtloDMej+P+\n/j76/X4lPWD5Ho1GGVCx9hg2ggK/hwOSRqNR4SSZY+GjLSwjpLRLw2jH0c5bt9uNfr+fjliv16ug\nfZB6F4tFdDqdvA9pmIeHh0QHMIroJkrr2ZO8B3KCIbFu855oNBqp5O1A7O3tPTsiBONeBkSDwSDO\nzs7i6uoqhsNhdLvddCTYY+zjUkdZNlgb65TNZtt5//T0tOJIU3hA/zJ3y0cn4IC4iS/B3cHBQTZh\nZt6sYwm6S/SDe5VG3UHAS4G+G8va0BIA8rfm8rF3Sb+zTpzNGLHricV7gkahb6fTacot5xYOh8O4\nvr6OXq+XGRfGCMG+2+1Gq9XK3mQcm2QbwkVmAtsHCZ6/I5jFoXXwVdI/bDdJ6aOvcLJw8tEXTgki\nI+wVOH3smdJhdxNuZK8MvngmOtGpuvV6XeFm2jllj/xviNRb+4O36+16u96ut+vterverr/zerUj\nYpyrjagiPnzuPHqZV/e9QGWI5k24dXrFED4eJ6iT03gmpJE+chRC7t0oUsTz9ge+uIc9bv8/0VHJ\nxwD98vw4mmg0GpUT440clZV6TicZDiU95tQTYyO68PeIIohATUB39Zyf91LDUxAER+AgPY5mI6pH\nrHCRrpxOp8/I3bwzESbrybu4fNZRGd8zF6DkpvB+Xm8iT1IA7gzu9ycK87uSkqRCx6kII6Dm80VU\nKxOdgoyI5Nu4oWYZXdfr9SxUMFfA6JWP1/A5ZUZBI3aVeYyRueB7e3t7cXx8nKgNa8EcLhaLPOTZ\n8srflpwNokZS/uaXMDfMf5lqdFdzIlOj0SBUrD+f0VkdfpXPY9zb21YzzWazePfuXRZORET0er1M\n7Ww2m0zpMEZQzvl8XiH/Ov2IfjKJud/vZyr4/v6+kjJiHGUqharAyWQS19fXFVnb29tLQvPNzU0c\nHx8ngR0OCUhQicow58yPOTqmFFxcXMT9/X1WBpJROD09TT4biBRpXloHkHVgjKQw4ROZqE3aeTab\nVc7RRKch204HIwesu2XKyCNyZ3tkGgGHi/P3Tota7/vs14gqknN0dJScsPv7+zg9Pc15ub29zcOh\nn562x72QrWGMNHAFmTLCPZvNUtfw91ymOpTZFNJpjNk2x2uPjkM+jAwig9yj0WgkXWOz2VTmDR8B\nmoptt9+7TNuX2RxTQbBZ6Eae5+akoI6uAPbavHS9Wh8pJhdIDkUBdGiYz04GisWKDw4M3YZduYNB\nYUIt/NzX/CU+8zvxDvwkzQDUWRLmMXwWRnLFVtrepOZ6OD/rXiLcwwLFu2AoyiNFSOuRZrLgYODg\njvEOjIs18HyzSXhHuGdcTqnZiXW6hE1Vcmj4W6d1MeQYPVI1EZFcFfd2spK0jJgL4TRR2d6CDe3/\nXiJAlukLp/DI+Tt9h+FHFuxoNBqNyuHDJrlioG1UbYT5m5IXwLOXy2VWBdqpZR2RN74PP83Kn5QB\nBgpi9P7+fhrtw8PDJMTCLbFjfnFxkfLhvd1oNGI8HqdD7nQK6QOfVeegCGiesZE25bt7e3upFHFk\nLANWsrxP6XSTGkPe6DtX6gVSU5yJN5lMKlxNUu3wR5zWZp65h51zZty5IgAAIABJREFU5gPnCqcY\n5wkeE2ensb6sn3khXKSkN5tNFmREbB2+wWAQ5+fn8ec//zlub2/j06dP+TzmjHcxRcF6kfn1vmE+\ne71ezk9E5AHBR0dH6dyXQSMBBE5DxNYoci/SUcw3jhzjs55Bbzt9Z2I478tewz459YMDx1jZa3Z4\nuRfvjd7a29sVTEwmk9QTpOjNLdrf34/BYJD3gxR+enoa8/k8ZrNZchUPDw/TkTZxm0CDi/Sl9b+D\nAeaeQNH/RhZxRlyggp1lvbx/0K/MMwGGKQwR1cKOl7horD29wSKi4oiyTgSVJYVms9mk00YKnjEw\nV4+PjxUqA/OJ7fzuu+/ipetVHCmM4mQyqZC8yH1inGyEMDDk/b2BUaRE515ghMWC5av8bkS1HQGX\nnSs7e5AhuZdJch6D+Tg4Jy+dJehoKGIX0bzkDaO4XqqE87g3m03lfLmI6tlJKBQT3BFilw0zfnPE\nyKszBngnlN8aBWE88EScY4fcXvYCQ9HxO6MgzH2z2axwrzzv5irYOcVxRGF4fm3kjYQaGXELAeYH\nzhBrwv0Zo422KxaJeOzYgNa57Ydl1GtiJM/IktElc8Qwshhjj3k+n+d5YrwbChoEEKVP9VrE7lgK\n95vBYGDovbfZv6PRKLrdbnS73exFxcW6gpx5r3G5cMGE65ITVxp9B0LlfrPjYUIyjToxmsgtzzg+\nPo5arRZfv35NI8hYceTQEcy7jRxOPe95d3eX+wgOVnle4Hg8jnfv3kW3283v3d/fV/af0WfmhKo8\nOwv1+rby7J//+Z/znZFDxm7DbV1aoumuBqTQgmDQxSvIHA64nUzrByOEyM1kMsl97AADOUGnQKqP\niCw8cjDvve9WJF4bileoiOv3+xlEWCaMoETsAivkxXLIGm42m0pbHr6HE8R+LZ1veHfwj3gf9Lz1\nTYnK8H3WhGc6qPa7urChtG0OwAmSbEtKuSt/Vxbi8DwCJYIM6yjujd7z941io7N4Ns9C97EPWTf2\nPwic19BOXXm9miMFzM3EEXljoL3R2EQIvZUNG6hETfie008mlDu6x5Ba+B2VOJ3GpmZRTCzEMcEg\nsJBc3MPoD89zJFymCN0N3R42UYBhdkdXjIE5sxDzXgioFSH3NNnXwmgFbeIiY0dJ2Qnh/VlL3olx\nOO3puXEaDfKjHeWIXeWXWxawVm4LgNFjzKQFXlonZMLpOUPzoHyeb77HvFkmUVCO1IwuUGGGo2mE\nxA6BI33GjOK2Axqxq3h8enrKrtIREefn5xkhllE5zhxOoJE92hogo14nnK+Tk5Not9uVoGU2m8WX\nL1/SKTTZejAYxGAwiNlslvc1wRljWDoeyIxlPiLSsVutVs+iahupTqeTMmw95GianzhSOJRHR0cx\nGAwqyh0jNp/Pk1TLvqDP3Xq9TjSrdPoJdpx6IoKG6mA0qNPp5P1ns1mcnZ1V0sFG4O3Ez+fz7Pfl\nc/UitqkmUmKnp6cxHo8rRh9HxwbFASYXDhjvY0OJw+t7kO6lp5gdAnQ21YRGemi/MRgMKtQE9k+t\nVsuSft7PvbXKjtkQ2tF/TpfjALNn5vN5fs9NJ/lbX/69U6F21Ph/3un+/j7u7u4qjnCJ4PO+Tqnx\nXfQBY/VnzE35GYE8Py0bUGYIwCzDpsmURS04gbxPidJbzxmpZkz1ej11tG2wD1dH5hiDnSD/P/vA\ntBzLLTad5t4GAco1La9XcaTgZZToEIuL0WdybKwxWChMO0OlJ44gvAQxR0Tl78o0nqMrvhuxi7wR\nrDJ3iuEHtvQCIIQ82/dGAEGK7JlzLxtyX077OOK384PRsfFjs7AeRiF4FgqKuUIpMV+eT1BFxmfn\nzP8PemOHyBvBUSLRFuMwQsAY4SUYdcKI4kjRw4bne60pr+Yq00dWNI7mQJ4itkbIfDrQIKInlBCy\nY2eCjU0bh9IZZDwoPe8bFOxqtapEX+T4QUJc0eaO7ay/kRXkqByj0yk4OXyPeV8sFomQsE4Yebo+\nTyaTHCM9fzBw+/v7le7OtC9g7v085hKn1Nwr5IP9Z5mi6SSIolOjGFEiUNYlYme84DzZWQDZcSNE\nIwI2EsvlMt69e5f3vL29TfTKe5T3Qj/ZwSb9y5Ez4/G4QjFg/Pxng4i8vXv3LmazWQyHw3w2nEO6\nj/O84XAY+/v78f79+0rQYVn0c1xJy/xw+agUOoX3+/18H+sCG/DpdJopQRA1HAjLxnw+j06nk+vl\nlO90Oq0EgZZd0ousgYNkDCuUBHMVqR72XHu+cXrpeF6mz0Cbms1m6oTpdBrj8bgSYHDPdrsd3W43\nBoNBIvLsrYhIDtR6vc4UZGn37NzyE3QXygR6jjUD7GDflylDrwVygY1wWs+UBv+t6R4EFryT9V63\n262ker234fxhw6wT5vN5OuPouPIwZ9r9lClP3udb16s4UnjmHiQoTwm/R8SzyMAIkR2aMjoyIlMi\nOl4YOx08A6NneJl7WhE42vE72bGL2B3L4fE7SjM6g+MTUeUm+Ds8z4RSc6CAmf18O4i8z3Q6zZO2\njQ76dPsS1sRDd08jfm8nAqfDa+LowBvRzlnJaWA85cZg/HZczTGwk+s0Fs6YYWzek7/HCbFzimGB\n01Gr1dJRQpEBR7vFQsQ2FYGicT4+IioGAHTNRtfBQnkRWSPbzB0ybc6aI3YMD6iKo0s7Guv1rplh\nvV7tTO20gJEfDLs5Qhj+VquVTkNEJCkbuXl4eIivX7+mTADfk9Y0pxIn0YGGHSLGZaQ3YpeidBrR\nQZtl0k42aSee69QexsmXCdaz2Syenp6y8zWoy97eloR/dnYW8/k8z7iL2BrEwWCQZxiSUo3YcdJs\nOLz2ZdqjROWm02keWcKFjJJaxnBGRHz9+jU6nU6cnJxUUh5lMMyYXiKk8/ePj49JYqeRK4Yd3g/j\niIhMk3748CGfMZ/PKy1fHHARkDqjYCSe37t4gPfmHe/v7/NsRNYSR8sE74hdGgo7g/5kfDj6tHYw\nR4iiBdBH1tD6g2DRuhOOjx1NIzbsXTew5ZmkzJkXy7cdIqNgOJfQR0hxsj7YIWy1u9MvFosciwPh\n6XSa6DV/Z8eG/e9CAr8L7+jAkzYSs9ksgwZ09Hg8TpQPvqMDVZxd0ro8k33vwKC83tofvF1v19v1\ndr1db9fb9Xb9nderIFI0net2u5WUAHAw5FQfd+F8v73okuhWpvJMGHYEDWzp/K0vf6+Exg37ObI1\n98fRCZ+5SsH3NApH+uqltJzJ5L5HmT/mJ6kBR6VuWkeUDKfDc1Ov1xOVKvPz5OXLc8qYj4hdCtQo\nChGLIz+/s+/h6Jq5dKqC9eVepLbKiM5oHnNkNJGUi9HB9XpXAgvMy2dElLwPqQY4DSVqSGS2WCwy\n2gbp4AK+BmEzodMIYhl5mi/C35lfYySiXq9XiJWr1SpLzS3Pnmfvs4hdStAkfaJSGoKC2O3t7cXl\n5WVEbGW41+vlGji6hne1WCxiPB7HZDLJyhgaDzKuUgYh7ZeFIVzNZjPu7u5ynzBfpFhANEq0FfSK\ndyyPweF+Lpgg9cPBvCAu3PPdu3dRq21PDzg8PMzP4YQxR+12O6uGTC7u9/uZxouIlBPWx80qQbxB\neYzG8f/NZjOGw2EeJ8M9QQ+QPeR7NBrFYDDIPVA2OGZOeHf4ZP4MXQE/kfUH4aCKyqg632u32/Hx\n48eck19//TVub2/j8fExptNpHB0dJWGbPWsCOJf/TSf5Ml2KbrLe8xmJzWYzWq1WpZDIfCzzcthn\nk8kkDzW2HLN2EfHsMxAwPydiu99/+umnaLVa8fnz52i1WpUO9XDdSH+RrWAtQKE3m20bD+tT0CGI\n8MzNdDqt8EKhUzCnoMW8g9OMpPJB6vis2WwmcmS9zthNGuf9IiLT5mUxmmXYSBM2k7Y3/DS9hLnh\nvpZR9pFtb3m9iiPV7/efEaVZeKcIUKJAm6WR9uW0idsfRFTLLF9yiMoc8ktkcH9mx8RpL94ThWGO\ngR09vyvfsxPonK/5QEDI3li8e5m69P2YV48DJ2N/fz8VuVMzODMoD0iu5qGhaEziJu8MsdYVOHYG\nXjJezKVz6lwoRhO8cXR5l9LJgqjLmpSlrqS3mNeI3WZ7fHxMhcQ68V4mW9o5wXFeLneHantD2zjb\n8FnR8878HcrkpTlBLsw3tJwzJ/x0t/iDg4NMq6GIucy34DwvrxOOIDwUPiPlzVph2AgAlstdXyvP\nAyX3h4eHcX5+nob98vIyvn79mkqYd2KdPEfmhjHvpSNpIm8p915j9h96xEax5P255Hy1WmUBw4cP\nH/LZm80muYb1ej1OT08zvQkvg7PSHHyw3pzjt1qtMiVIwDkajZ5xNR2gMXe8M2kpAoYff/wxn3Nx\ncRF7e3vJ+cEoRUSutx35cn5Jozw9PcXp6WklBch8Ybw9hxhSjJu5o+w/HF86dLOncdapqkOmcGbK\ntBjrhMNYr+8O3iZdt7+/n0UB5f5GT9mQ8z4Onpm3xWIR0+k0gyh/pySMs2a8C845vEG/J7YR58z0\nFOYNh8N6n98zH6bJIHukrk3mHo/HmX6MiMq5tegTU0NstyJ2nf/dl86yiW607ceeAU7wPeSB/VLa\nNQ6C5vsELcynA3cHnoAG3NfFBHAgv3W9iiMFv8QbnFLE2WyWSrdsjbBcLp8hHSh3lByGkM8c1Vg5\nIvA2qiYBskAIlhcfgw6HyGgQThjfc57fqJcFGI4P72CUy/wunmXBZxw2xtwfJwpCp4UfIiLfh2vD\nWhjpqdfraRS9EZbLZSoK3gfjSgk2Df6IDNgwRlFwEsh7LxaLigNmlMToiT9DNphvVw/S98ib2orL\nhwTjzOF4MBc8zwrGjhuInpVNyZGDn4ACRR7gTiBn7iVEWwSTwK0I7EDaocI4sf7srYht/5p2u51j\n8D7EUUJe4YNFRFZM4jAZPSjXZr1ep5Nxf3+fDQRZXwyBW4CUvb84+mIymeSxK1w446ztZrOpkFVd\nQUQ06R5TyD5G0bJBNGpdwT0ho242mzg+Ps53wvnCKWy1WhX+FEYCUrEDSNaQ9i8YxHK9MfCeAxqY\nGsXEEJgTyQU/ptlsxvn5eczn8/j5558jIrInHLLkgojT09PKWYA2XlzD4TCdhaenpzg/P8/7Gm3o\n9/s5pwQrIKB2KkAwyh5cyAqGcTweV46IabVa0el0cj+iX5lvI9F2bB4fH2MymWThio2py+XLAiRQ\nVs5/5F6MD7lkndFtIGd2Rrkn2Rre1foLvtx6vY4vX77E1dVVdDqddDJBKuEq2Qnhwqk3Lwv5ALFG\njvx71n+1WiVaOR6PMyDAiWK+W61WxbHCnkRE6mz+xs4g+9D6xM43z8D58fE8OFLYcfsK2AuDD7xX\n6QCaU8n+/Nb1Ko4UG8PnDi2Xy1yYiK2nXSo3lGaZSkLh4927Uy+OgjdQxC464vtl2i9ih2SV8CJR\ny0spKf/OhpaF4/dWmCWa4FRdSTT2GHCqcBBLlAujDOpgpI1IkPG5d1Cn00nFUaYMy54pEPj4Hqka\n0CiUBukUjJbTRhhunBejNVyeozKdC9HRf/f09BSz2SydH883/2bdDdOXFWwPDw+pFIhkUTTInNea\nEn/elfsapmbjGz0ySdNKA0Iz8LmhaN6JMVlOnMqlWaMPgnZUaqSHMfV6vTg+Pq6k39mDdqK81/jJ\n+to5pfM5KW5XUD49PeV+9/52ZGiSqsfeaDSyms3IImO0EbOSZq5x7P09HEIqPm34WDMcKt4VJ+T4\n+DgPkWbv48RiOIjgGb8rOtFHjO/x8TH3khFnZMmHRrsTc4mMcpHq5Do7O4tff/01IiKur6+j2WzG\naDTKTvNl2icisk+c5Y13gjjO3zE31qOcWcczifR5X1fzGiVx81/k+x/+4R/iy5cvMZ1Oc/yz2Szl\niOIP3p1AG7mxjttsNnkfnDHvfewQOtepYhwk0CH2DM6ou4+zZgSi6FqjK65QLnWxEfzLy8u4vb2N\nTqcT//iP/xgRW6f37u4uKy+dwbGzQoViGXwR0BN8+8Jhsm6bTqeZ9nexBPLd7XYzVYjjHLE7g9J0\nGKfvmBPmyfckwMMOs/bOWrGmzkyVtAWvPTbQBQl8xnO/db2aI+XNErHjCtgLR4iZ1Iid124DTzXL\n0dFRVjFE7BQf9/NCEa2+hEhF7Jyoer3aOJNFJ3KCE8JlR8e5WyNc/Nscl4hI6NwX0eFLjpc/Y+N5\nDAgNyFmZ52V8KG82OPNDVdfj42Pc3Nzks1Fu9PIwT8ZIhisqUJ7T6TQjfkeXKGbQKTcJRHGVKQxH\nk0YXGDu9glBELuPHEep0Ool2shb+Wz+PeSZlYNTCDpbljMtjMjISEdkHx+lkvk/FFvLksmvfG2eC\n9QX1JQ3A+0VsjSkGmujOfAAcJDeAjNhVQlr5sYZHR0f5rowFBMyBCdWc5nvgyIzH46zi4TOca4xs\nabjLd/Kc4FywtsgL1Wmnp6eZUuTIEpwdI7KmGDC20omwjKxW26o3o54475PJpIIcgsItl9u2A+X+\nZ1zoqjIY4//tfDutz2XklPeBk+Z+Zg8PD4lQ2ZiQlqWS8PHxscLfwSEwWoMscuAs8miHEAe/5LlE\n7OQdvXVwcFBJF8PZIhC7urrKzwgU+dzOGvdG97uFCVVim8321AY3ypzP5xUeJPdEN79UBet0J++P\nPD09PcX19XU2Ae12u4niYuOGw2HyK603bONwtli3m5ub5Dk5A8G7EpxYr0ZEBpS1Wi3Tr7bTzWYz\nUWKvE20aCAqcdvceKoNPbKhtiCsTcfSwX85QsW/YHw5oWXPGYn0CKu6/s1yw1wxKOEgmqCqvV3Gk\n2IQWRoQDEmmpGMo2ByguTih3p2WXXfO3GDffs3RMXOrJ37CgRkEQRj7333sBrMxwEF4qZydyXi6X\nKYR8RoqKVCeRdsSuf5QNq6Nu7gP6YljVJHLIo2yQ0og4ZcIYfLSE58bIAj2AIrZw/sePH19M3WL0\ncEyIbCOqKSPmzQ4U788clGRc1qtsqVCm9vw9b1Dn7RuNRoWYW6ZoWJunp6d06rkvcDNRIo5KRCTS\nan4RRoj3wTA5LYlcUTTg93FLhdlsFrPZLBEp0CBImXAXmJtWqxW9Xi96vV4FAUMuFotFpX8N74G8\nMUfwgHDkut1uclYs+/v7+3k0zXg8TjmEQGtyr1NK7GnLCM4pv38J5YXgihEy34V7gGSyPhGRARtr\nRCqbiz0FksTcgCQul8sYDAa5fyKqPZVAyMq+aaXTFvG8hYoRXkfZyL+dqna7nWmsxWKRzsL9/X1c\nX19X+EBGCGazWfK5cEDQ36PR6FmvMuTs8vIyzs7OMt1p7hUBD7zEktOzXq8zZbi/v5/zxhwMBoNn\naMZoNEo6iA2p18IIr8ePM4hMIP+DwSB6vV7ynXD+uCfPQAcwPsYLFxBHLWKbCr29vY1arRYnJyfR\n6/XyXY6OjnJPuK0Ka02gzx7FyY2IdMj5e/c0sywQiFgPky6mNUAJWIAgO7CHqmCagBFXUp7ePzwP\nO+LsAt/D7uLEsLcJ4nge9obvkUWgeawDKZxI9jfvUrb7wcn0nL2Uzk5Z/OYnb9fb9Xa9XW/X2/V2\nvV1v1/96vQoiFbFDb1zmTxoOUjQRBtE1HiIRc8Suyyu/A+aPqDZfK/Oe5hQAIePV4qkTdZdduM0d\ncCrP0KMrGiKqh6Jyf5+CTarPUTZjBloEPnbFmzkEnk/fn6hof38/yd9EMUDOpF24iHhOT08rCOD9\n/X3c3t7msRdOYQFBN5vNuLi4yFw68wB35P7+Pm5ubrLihkoi1sh8FhAJ5KNM1RC1MWaTjb22/D+y\nx5zzfe65Xq8T+SSSK/lZJkA6SgE52N/fz7QiqBPRD5ENURzf46wx5tpcHJNejbKAtPG5D1IFxr67\nu4vb29uYTqdxe3ub8rDZbOLu7i5arVYlgl6v1/Hhw4fkh7iBIBGlU8tccEQYt2WRKrFut5vRIvdg\nfagUBJmM2O7tyWSSyJHTRUSJruRxeq8krnodifo3m012OvbeBSWjczhrSLoDBNQoJ6gDJH7uxXwj\nw6ytkTMIwRzfwkW1WSlnXOZt+iqRca8VSDx7ZjweJ3K0Xq+zkzbNfE0S5mw7UNyIXWX1ZDLJcdJC\nwBwxUqkUGjgbYESURqDMN6gnc2YE1BwZ5I55I4UDr4r5RieBZJti8O7du9xTf/vb3+Ly8jLRSLiC\nZA8Wi0UlXcj68LlTnEZWPHdwhU0hccYApPrg4KCS4kdekUPub2oAe4GWBdbtvJtToMwbmQlQS5/d\niv1hXaGC9Pv9Co2A9DjyBk8MvWAkz4R4tyKhjQiFCS4icxEUGR5XjjOf6GrTWdBp2HCnGXlfUCn2\nCLLnOSyvV3GkDNFZ8WP8MTpOvwC5NpvNhFkjtpArsCGGpCSN45zYyUKYTGg1L8ZKyqkmFh5j7Od4\nAUkZlL0nSKeZX2MHjs3B2MsqpJIcyLsDR5ojw7sgJFQMcT8UP0oPnsh8Pk/eDrC7HUNg6OFwWHFA\nIXhzoOfp6WnOM1D/4eFhwtAUFxjqhwhqZwkeh40Pn8HD4vsoPhtzeGKG39nQ/I1TQFQXomic+qU9\ngavbWEOgYZTYarU7nNaON8qV78Kp4H7O+TMe5MJwe8TufKgylcU7oPycMiH9sFwuK5VOEbuzJCm/\nNu+F1B6p2Ol0+ozX1Wq10jHFQMORcTDBuLjXbDaL//mf/4lPnz5VeB3mL5YOiA2LeYDMCzIC6Z65\n6Xa7eU9aQJiTR0Uba2UHHNIzhtXrw96m+7kvnGd+z/dIAeOAOD1JdaCDv5JD5fSd+Zhlaq50fn/8\n8cf45Zdf4pdffokffvghIiLXDb3h3kSs93g8juFwmPJrWkOZIqWCDQ7i9fV1DIfDuLq6yvf5/Plz\nrFarXIf7+/uURThDh4eHcXl5WTla6OnpKf/daDRiMBhU9v5oNIp6fXtOG5w1xsH7LJfL6Ha7cXZ2\nlu9Zq9Xi+vo6Wq1W9n6KiPjrX/8am80mixFM4KZikDS6U0a1Wi33m6t2mTMcdrh35nHyfdof8C4E\nSRRbsN7YRJO4S04xuo3fHR4e5vdwyPr9fnQ6nQp3ydXMvLsLoFjDm5ubrNhmHPSXYqx0qOcYKZxS\n+oIhPw5WsFURO1vIO9jusW/MozU/DOcQveiebcgd+5R7WuaojCyvVzsipiSZmaMB2dXcG7xqNjCL\n3+12K1VEVrClsxRRRYbKxbJiRoFjcEy6NEnPRh/BZSGN2KDkMepsAr4HImXDx8W7sCnsIBKtosh4\nFyrWut1uEqrhRUTszvADzfM5XggzKJ+JfnB9Wq1WnJ2dJZoQEelYrdfrPO/KjgbGdTabJaLFZzg6\nVFawEak+eXp6ik6nE/1+Pw0NCh9jbyN0eHiY0bUbF/pdeK433tPTU9ze3maFGvwd5MgOrWUHJ5Uj\nJFD0boAKFw1FZySBfzebzcoZdjaSbsFh2cCg2VlAMUBwNZJ5d3eXCALfN8o1Go3i4uIi59XGm3nm\nM8Ywn8+j1+tVOCYl/xFZtVNLddvt7W1cXl7G999/n2sI4ReD7v1kbhTOsM9pA91lbzjwMRfJpd7c\nl7/B4XXgB9dlMBhUIv29vb0MRHDwuRgz/DFkj+cgZ0Tkrk7ECS0vO03+6eulz9Af3W43fvjhh/j6\n9WsF5YO4DErgyjT2Mghjp9NJPdzv95Pv1ev1KoRy3v/y8jKGw2H8/PPPFeeFvwP9M1KPk7xcLvNM\nxojI/m8cIG10EINIQOq+aIvFIvl833//fXz+/Dnfj+qzZrMZHz9+rHA8r6+vM0C28xKxa0PR7XZj\nOp1W+EpUTeOAuZgCfW3+qxFAG3DOlIuo9h5D/l0JisOGk27Ujf5ZzJ/RQPYUPM9Go1FBSGnLUga0\nkOmR61arVRm/Hd3Dw8NEhLl3q9WKH3/8MatHI7boIIEo47NzhF7CmfJa2HcwKMHvcbDN/eUe6BLr\nKObeRWXl9SqOFB4/qYKIXZrGREuUBZAnyAiebURktZ4hfSYOMi33QjlGVNM73vS8Cx47gmM4FAOO\n0eciSjHCZGXGIpYwPc83cdROHY6Vn8U98chx4Bx5Imh2AlEoJktDHvQGKTe7WwKYqOd3Xa+3J7dz\nOOT+frWBHmOgaswRTbPZzLSIoVrSCybSlmgdisXOCQoTAqMhbJNDnX5jDKB5QOtuD+BGfXYwOPvL\nzrdLdjebTbb7oEqtTF8C+1vxMzZkHIjcc0GUWcob84Fzhwz3er04Pz/PoMCEzL29vWzCiWww9xSI\nEFg0m7tDVnEuSYc2Go1ce6M9/OQ9HQT84Q9/iH/6p39KeUKZsUalM4mzZqTWjhSyj8zxmZ10fucW\nBzwLeXUUjtNN9Rb6CyfPzifzTVoCh7Hb7eac4KyQYrdcszYgp95rrDFz4ZSJr9Lh4vr8+XP88Y9/\njEajEb/88ktEbJ26i4uLdOZB33kG+xAdQJuMiIiTk5NKisnoCo56u92OXq8X33//fTZJRO4JLF1B\ne3l5mfNNmgd5I3im99Z8Ps/xk1qnrUZEpOOGLJycnMR3330XjUYjq91++umnqNfrcXJyksEn3+eM\nSWTVpHfQu/39/TwzEaf6+vo65vN59vsiwEaeQP673W4iwKwXc469scNJ2pM0FX/Dd5F9uq1bt9Me\ngiABpMdpx8vLyyx0QhbRX4yVtUAnIefD4bBS5X1xcRGnp6dxfHxcQeS4SM0eHx9XwA90OM6d28qw\n50tainVMxA6t5nc8m7Qun+Gs4jQZxTMq9q3rVRypn376KQ/AZHImk0luGLxKVwyB5BwcHGSVXsSu\nEWLEzrDYKPgqnRUz+3EmuBDkiGr1HdEIC2whBeXiWf4en6HwXYEEYkZkAlQasSub9/f5HgqNyx2q\n2bR2QBy1wB0gHWeB63a7cXx8nM6uOQDAtFTzMVesBc4xBtV5ZuYYZIbnkbZCeTtFAGepXq9nbxsj\nK0QkODAuVQelLNM9Ebv0FhGiEQSMqnkGXkNXCNmpw5gy7+4UsmdeAAAgAElEQVSpwrrhDODEWN6I\nwGq1Xcdslzrj+JuP4UojpxvgMhilNWxObx93eGaOQOGM6DAGUvAEF9zz7OwsOQ9GYJgjy5dTVMjJ\nb37zmyynt8PrikxHmlRa4kSU6AdoIYaGCJzx48DwXSNk7Dnv/4hICgF71cYNjh9BQKvVyrUg2KEx\npR0+UHlKwc2vKZ0j6zPzRMrP+NyyZYcKR/ff/u3f4vz8PP70pz9FRMTf/va3jMxPTk6iXq/nfgI5\nw7EldW+uar1ej8lkErPZLFNyjIMg+P3793F2dpb8Ghxz5s6ouRFy5h3ZgL+GY2I9jL5Arh2Ut1qt\ndPqoRByNRilTpGVJGzK/2BuqzLx3I3aVsDihXBzBwjtafzebzTg7O4uLi4vodDqV+xoNLZ0DgkYo\nEdy3lAF0nzuhl8BBibjCqYLX5s7upMDJZphislwu4+bmJn788ce4vr5OO/Tx48c4Pj6Oer2eus1Z\nGgc8FxcX+Tz6eZlS47lBxzBGxg4tASTL9JpyT5hSApIIV5U0PfNOFuZ3v/tdvHS9Wh8pBAoBRdkA\nRcIZiKiSxolQmACUK0rdyA0RFBNuBMEGGLTIaRiUHRGqF8opBtIL3BPDY15FRKSBsZPgz8qGlb7Y\nnBgxnodBAHKGm8R3UMxwGxiP35Vn9Xq9inMKFPvw8FCB1P1Ow+GwQthzygtugCMs5oBxGBUiAjFE\nz2c4OiBzbg2AcmWjlRvPyrj8f55rpQj6CYfLaT8rDjapnXjWFwSl5AXZ8TRpnVQehvGlxoOklcqj\nZTilHmXk1IdRjLLHD+sVEc8CFu4LAsU92UMYKUjZzBv/Zu4dtPR6vUSEnUrEwdjb20s5A62A81ii\nunwPeUPpGVUFLSOV7NQAwYAdDL43Go3i6Ogoer1e1Gq1CveGdzGHzbxKnndycpLcj4hdihxODo5I\nxM7xQxat7EEjmLPyYr1BO/kuMmjUzwEXOoriHMZ3fHwcP/zwQyVAtL5g7kGWnOrAOWu32xmYcoHU\nWuczfpOzkVXz22iJcXR0FJ1OJwMzsg04bEaVSXsh86vVqtIb6/Pnz/Hw8BCXl5fRarUSOcXxs5Na\noiAgzZYBvkOvNgfQIDgEJp5TAk/k3I45xhv5cUoNZJo97gIQv6s5nqwdzzcSbfQbcGEwGFQI3rPZ\nLKbTaTYX5V4RkfQP+GHtdju72r979y7pGI+PjzEajZ6BDQ5Y0VEuggENs1OP3seOOZXnFKP5ik7r\n4lw70+LCHSNnZVbkpeut/cHb9Xa9XW/X2/V2vV1v1995vVr7A/LXPiOnTKW57BhSecSu8iBiV2WE\nZ+roixwqHnvptRslMcGZz0AJTAwnbQWSYfiXdykrhhgvkVeZ1omIZxEE98DrJgppNpsJp4Nq4D0b\nkYrYdrgdj8eVqMrnBc1mszg4OIjz8/M4Pj7O+RmNRnF7e5tpv6urqxwHJeBPT09xfHxcicoobWZO\nzEkDigd9Ia3EBSLJd3keZEo4KU4ZQfY36lKmec1ZKc8+g9zrFAroz3K5jPF4nKlkv2ez2cxKHSOP\npC+BsEEakVneGfny2XfwPfgPdIG0K3C+OXbA2jwbtAR5Iu9vLhrvCnRffg+5JkJzIzyiMlCr9Xqd\nc8NZeCcnJ5n2dfqAdyp5hbVaLStHN5tNBf11I8Fyv4Aqcq8yLdZoNLJQAaTTvAkjJlTY8r1Go5G6\nibQE8xaxa1wIf433odksRFmjZ666fHp6ynmjaenBwUGlOMLvZX6m0Tn0RKlTQK2tm5za43c//fRT\n3N7eVhoY+2Bi0IWI3eHV3BfU2e8DQtRobFs5ONUGmowcG6k1b9IUC/ZQt9vNfW/dyl5jTfiMOaYU\nv9lsZuPUyWSSxHGQY6O/7AkoI6wheh1CvOeWfUl7D37H2Lm3ic58ZpsF6o48L5e7o5WwR8goVdDo\nNmcq4KchS65II3VHwYCzAOg2yPHmItdq22bLVEN67Z+enmIwGKRtwlZFbNPF7969yxMEsCnsGeTO\nFYesL3YNuos75XPGK60xnE2JiCyEGI1GqSMajUbKMPdE9imS4HP7CmU7k5euV3GknNYxHO20gSFE\nJh4yMpsuYqekKB+1AuM5CJoNNFCzKwAQejYKn1nxY8yBVc1TYCyukDLXCRjbpFnehffEULp3FmlH\njL85G+ZWmQD59PQU4/E4SdGcgYfgkEOGq4QTELGtvpvP53F+fh79fj8FkrExJ+fn53F6eprjphIP\nhe9UKmNibsxboNKPdJuNScSuvBiombnlfSE/WnlHRCpmO2X+CX/AFTERO/gbUqmNM4YrYpePj9ga\nWXqekNLs9Xr5txhSOA3wMCIibm9vM4WEQvUY4SqYtxMRWQlnA2p+IAaFTuSsPRwtZM18AO5FqiIi\nKsrUMmtyLIaVdOfNzU32rfr06VOlKhM+G3NIOshEb+Zlvd6e3k7lqGXGnEg4HIbq4UAwPubG6wxH\nh3lttVpZcUl6yOnC0pnlYo0wXE4FjEajuL+/j4uLi9QbdhA5YHc4HMbd3V38/ve/zznmvexc8NNp\nX1+l4+cUph2AiIhff/21shakmajEYn3h0tGhfLVapT7ivlQK2wGJiOSp8ZnPW8Owsycs41Qzsn4O\nrr3PXIwQEan3cMwbjUaSzd+9e5fONfLhAMs8QvY7Y8Bg8yz3WGK/8X52os0psj7abDZJQuff5hE2\nm81nnEDWyUUxpMC8F1ln0ocGF/hbFzogJzinFH69e/cuIrb74vz8PHmfOFURW71/fX2dsr2/v5+6\n7eHhIb58+RKfP3+O3/72t5WWGlQ3kvqM2AUWyB56db1ep32bTqdpo2q1WuVoNVLAFHfM5/PcH4Ax\njLOsji/9Bj4jBfpSej2/+81P/g8v8pauFrLQIAAm7bnizYtvZ8ZRe8QuakOoysi7JJv6+Y7WbUB4\nX75vwhoT7aM8jEiZO+LokucQJfmAWQiuEdXjSzwvEZF9TJgXzoUyKdh8H8aHMbGRQqFdXV3luxkh\nwUHBkHFPIiScO3MFUGxU5pTVfigj5huFxLuxdo+Pj1kRgwNNPh9SKhdzyvoSXeMIs0a0V+Az+A/N\nZjMmk0muhY8cQLFBMkU5mC8GGZ41RkGVJG6M983NTUZJdsBQligpomsCA+8HG0nmjEDCzhKRMEEL\nShqSPPJghIggBXmwIqLvDITddrudlTu3t7dZHYRM2JB6PxiNQ1lTseX9hDOCM4Excdm1uUIO3vg+\nssG8sm4YgYuLi3j//n06oKAjROwPDw/Z4BY940OEicprtVpWJF1eXmY0HLFrDsraXV1dxcXFRc4N\na8feL7lOPq7HqCqf2/HyxTEnrJsvdK3fjYOM2+12GtcSAaXJoVFLyyRkZZotRuzadCBXnU6n0rCS\nY4WWy2UliGJPE5QSaPC8h4eHSpDI3v/06VNcXV3Fjz/+mH9vJ4PgaTKZVCpPHUwQNDDfkJRxvspC\nKS72hwPvo6OjdBr29/fzPV19yl5zxsSOEtkdZB99gHMLZ4h3KHsnOTihgq7Z3DY/paLz9PQ0UZ5m\ns5nBTcS2Jxm2ECAE2UHP0yeuXF/QSc4UZD9RHW1nz/J0dnZWCdC40D3YDXNRsYMEciXflmpcOGLI\nBUjd/8aRehVHqtfrZfUVAoeRR6ggUEZslTT9Rdg4DAohgnBtEqArrEqCNwtvhWxSLUqI71uwXWXm\nijZHT3yf55WOG85ExI5UawKuy/95BgbIZGM2L4icET7DxlbsEZGQKIRODBIXzsavv/76DMlbLpeZ\nhjAigrMUEdmjyO0tVqtd1aBJpURyGFEjUihLkLjJZJLPoFM6UW7Z/NTpY1JmXKQFSf+xvm6QiLLE\nsELCxvnwe/L3pBIwLEYzkGGIklyuvnt4eIjb29scI4qBtfeJ9LRJsJxZ3phn5p8xvkRCxjljD+Hc\n4FRzT5dwu5JmPp9n5Vmj0YiLi4uMIC8vLxPZfHh4SKPJO7CnqMJCLjqdTnz58iVubm4SBfHl/cS7\nIgPj8Tg7R5OO4sKhx8AYWSEAGAwG8fHjx6jX62l4Op1OBitfv37NMnq+NxgMot/v53p6bgkMv379\nGk9PT/Hb3/421wXjM5/PkyjNviC1amPhtWPOynSpf/rid3/5y1/i69evSX6PqBLmkTsCBc6WI5r3\ngbPIHc7GbDaL8XiccorRvbm5Sb3utbSDeHJykg7/aDRK4i/Ov59HyfzR0VEiZr5nxO4sv48fP0bE\nroUHAbD1ED33aFECshWx1QsnJyd5Xp4D7H6/nwcFWzZ5D/QsQSQXusAUBC47otgGf45MzufzyskD\nEVEx+OhPNwYm1erKNr7X7XZzb3BuasRW3i4vL7MIxRmMDx8+5Fhubm7i8fEx3r9/HxFbBJB7zWaz\nDCQiotLDirV030Gcrtvb23S6IrbOsPUgKDj/z1yCaPJ3puo4pcvzsPvoBFePImfful7FkcLbNC/J\nPTrgrhius1Iw94R7kB4gguOeEbs2ARZENpOREITf1QFGXriXK8Kc0+edHHWXXi2esPPCKHtSNVQc\nROyiOJQNqE/E7qBcKhfN2Tk+Po5ut5tOG/NtYzqdTmM6nVacFuYXJKJUUEDFdujYUMw/EZs3v6tz\n+En0hQNK0zrGG7FzMuGL0MuEd0Ehong830bpnKagfQYOE8aP77m7up1T+EY0uDMCRv8bokKUjbl1\npJ7NiWCN6cCMgkSWcFxecvpIkRDRW7nxfBSt03esNUqjjLZA20i7oKhRJn4PDJtThKSMkW+qfVwp\nZCcatKHZbMZ0Ok14/+LiImazWXY+dnNUno/zBKrG5yAoyCHOIWPHmWU9uZjHz58/Z4QKKkHAAc/D\nUTl8OuTf1ZU2lvf39xUuI++F03RxcZF73zSFMpXotSJ1478xEv/SdXZ2Fn/+858rUTkBKXwXc0je\nv3+fmYThcBjHx8eVSlgHHvV6vVLVh7NM1oCu8BE7ngxzY4dsMBgk3469ZNSNYA39D1JNzydoIEbW\n/uM//iP+9re/JQJqFAhH6eDgIA9wdpNVgiiCVqNjFxcXGWAh74wd2ggUBKPmpMOQSXQC6+dWMdgg\nAg8jfebVEliwFiUPiipAjgdyS4mrq6u0ieaj/vrrr3F5eZn7xg4KegLbhTOGTLnNA9V/rP1sNsvK\nSYKSiN1egydFo9SIHXcQvW9aDgEujp3Ts656pNLTHEdQbeaQMczn8+SGfet6tdQeL1miR6AWODFc\nNCpE8ThfjIOCw2Pjzabi9ybkAY1yH6MpNr5lZIeQowAdXUfsjpkw9I0D4ny/eQ/mcTkqZ554ptMp\ntdquLHM+n2fkHhHxu9/9Ls7OzlLZ3d3dxWKxSMV4cnKSKUQrOq8Jl9N+KFenVu30mBtg5Q5s71Qk\nyoYGccDMTtE0m9vu68zL6elpKj7Wlw1vrgibCyccQxWxc0AgkptbZIgaJ9KRtZ0OOzVE509PTxkt\nYVAiIlMUoI0YgIgd6RRehOfNJcOgB3aWcbQYn5EJ7k1EbzSWZ5lLxboh1/BIUHDwFnHI4b2wTqQv\nacaKHHW73XRqHfn5e+xheDYRW+T64uIifvnllxiPx9mhPWLXTBNHj+eSTnRKlvVzKh1FaeSZ8eME\njMfjXD/WolarpfL/8OFDBf2Bs8i+9BqQ3jg4OIjvvvuuYoRYb+TGTq05ayXnECQLZMpGmJ9GHfle\nrVaLDx8+xHfffRd/+tOfcl+s1+uM9klPMY5+v5+BAHqMwhLmFK4L+sIBH0R25NdkbNoGNBqNisGC\nF8dVNlBdr9dxdXUV4/E4ut1uXF1d5d9++vSpksr/7//+74jYpqHa7XY6TJ5XN1TudrvRbrcrfZSQ\nU5xijK6dwdlsVkH+4Sbyvm4Zgt5uNBo5d+xD7JMLNRzoghzbufIeNg/R6S36ZsGpfXx8TJtxd3eX\nAUir1UqHGlm6uLhIp84ZjM1mk73a3r17lzy0iIirq6tETWu1WnKbkKkPHz6kvqGfGrJIsQPAirvT\nw88iu8VagIb2er3UucwLndVxgh14LJfL1LHT6TQ2m02ll1/JTy2vt/YHb9fb9Xa9XW/X2/V2vV1/\n5/VqVXv9fv8ZikAUQ8qh5OxERPKjiLJo3mkkxCiBkShHZvw/qIJTGE5dAJmaiM47A3OaswT5GmjU\nsKKhSRNn+W6ZiuAivQWPjPE9PDxEr9eLwWAQs9ms0pQPGJr0C+gNUQrdyyFmkl5hfUjBEXkxX6CJ\nvIcjOqBpyNucvxWxRU+ISIG/mQ84KXt7e9nh3pAr9wbeJzL59ddfk/sEWmLSN9wJkEevuSMyCP4R\nWySj3W5nBF6v17NBJJC40w1EQrwTCBc8EsYBYkWq1VVdoJ6gjfBPIrYVlHAZIHkyb05xR+w4UxE7\nzhPRoFMBEbt0KpEu72IEjlQLc+Mzp0B/TbilupKUmDkIFD+4fQZjIAWM7MJNAP3sdrt5aC3R83w+\nTzlbrVYxHo/j69ev8eXLl4iIbMLpZo+G9tm3oGtGkyO2lUgcqsp+A9kGGQWZYQ1Bl7g33yNVwnuU\nCCgoFrxEX6y7OYq+zDcyqlYWiXA5ffTp06f4z//8z/j69WtEbLtQU5FFusa6lEIH5PPk5CSfyfhd\nEcp8s+YgLFSbITfozX6/n5WvjAPSb71ez/eMqHY2J51DupRnn56eRr/fj9FolCm63/zmN3lP9Dry\nFrFNRXGI88nJSaKxyB060aijK/u8xyMikRGqip0VsN0A6XXRCLaNLt9lk16eYbsVsTuuistI12g0\nSltxfHxcscHwzricMmOcvJf5kegEF22QDm+328kZfHh4yGdHRLbccUW4Dzv+y1/+Eo+Pj/Hu3bsK\npxQ+LHrbPDeeR8aHTA4XSDq2hjHQtoH9ad4pTV+/lV6PeCVHCoIwXKKIHcHaUDSTymIDZXpiSF2g\nIJz6QqHhSNmZgluBUTRUyb9R5O4zQ2oRCNgOj50gv1fEjgQXseNQmcM1HA7j9vY2jo6O4uzsrMIF\ngW+B02LljXJ+9+5dDAaDLDnnMFIcxF6vVykzpSU+aUYrPgyQjygoOzDbGSyrGZrNZh4wbOPNvLqU\nmDXkflSN2JFy6sVVJ9fX16nQmRN3jPYGY+28TuamADePx+NUDDiNjI9UAwaYiilklZw+z3AKezKZ\nxGQyiffv3+ehruaXMHbS2nx2cHAQJycnCW2zlsiwKzLPzs4yJYqRwIA7XYwjAXfKF9/ZbDaZjsAI\n8Ty3DWCPNhqNVHDIuVMGw+Ew96DT2jb+vCvfpwUBzvloNKpUxV1fX2e65/LyMn744YeUDVIbTj07\nnWR+n7ll5h26fxjPpOoKjh1zM51On6XeGCOyzTjtNBLsNBqNbDOCc84zPAYrc5ws7lOm8JiH8t+M\nt9lsxsnJSVZmXVxcRLvdjtvb27i9va0Y2f39/ZjNZsmFwfFjXKTfn56eKkUVvCdkdSgRDkx9TFOp\nL1xp/C//8i8Vh+bo6Cg+fvwY4/E4uWLIHSn/6+vr+K//+q98HgUDi8UieaSlc0p/osVikfrUtgqe\nJrIPb5WO3m7Pg2Pp9J5TsI1Go1K0ZMcV3cT9kQvmCeerPOYJ2UN/m1B+fHwc5+fnqVOto8zDxB6a\n7oKsQlnwHnIHfMtNr9fLoB7Agz36888/Vzh64/E43+XLly9ZWdlut+PDhw/JdQOU4ExDgi3mhrQ0\nh7M7ONpsNnlW4V//+teU/cFgkNV+OPTsLfMUqagtr1dxpMjvll4tXAKicxaDc6vgdNhbRGiILB3R\nIfw8y0LsEmYiDCMroAYvlT26Ms4KjHfDWDi/D1JhQ/GSMn16eqqcgk0+nzy0S5U5uBKSMgowIioV\nSzhiJo1jmMy9MdHO5FAUE99jva6urrKiISIy0gS5MRF/MpmkU8icMd84RxDcjZwxTzQ75NkRu8rC\n+/v7bNpnQ4sCKvlaJlkjP3wPQvPt7W3c3d1lg8GIyL4lJiszBhwX/m3kBJn6/PlzNn0zQsSYuU9E\nVGQEZTkej/MMsIgtx6DValVIoW5iCzLGs8z3idg1nywDGivR5XKZUfnd3V1cX1+nvG42m6zOGY/H\niW5yXxdTLBaL+Otf/5rvyBgIjnDQzGlAXnD44Yt5fYfDYfzyyy9Z/l46y+7NZXlCJpALO5n7+/vR\n6/Vivd6Wj1uhPj09VQoL2A8gIxgiE1kJPlyk4mIZgod+v18x3qvVtscUjkvJkzLS5gAUgjLyXiId\nzHOj0Yjf//73cX19HRFb9BMngfdkPs3fw7Fzs1f+DWJhfXN7e5tFA3zuRoinp6fR7Xbj6Wnb+45x\nwFEimIIvE7Fz3AheTEbGOR4Oh3F9fR2r1So+f/6c+4tiBypljQ7u7+/HH/7wh6jX68mVYX0jdsU/\noOARO87O3t5eHrXFPiQQsWNqp9aBi4P59Xqdfcl6vV5mFXgXZBi75vWnsMOOmREbkPQShUXGCRQN\nWHDQNnbawQH6nMpTAsKIXQaDNg+Q1nkW3+cIM/Rwp9OJ7777Lm2BA2KyGlSOl4EQsjUajWK5XOY+\nRPYbjUbc3t5WirOwQWQyptNpIpVfvnxJbuO//uu/xkvXqzhSGGorN2A1elmQzop4XhHiRfa/rTAi\nqtEYRp1n4kRZsTh9BcRpUjn/Ngpl5wylhqE1ub1EVFarVTognU4nzs7O4unpKS4vL2M2m1VSMwg6\niBJCOhgMYrVaZentcDjMzQYi5qpDN8KL2HWqxng41eQ5Yw2Yb0iSpCzc0I4Ig/n0mWRuzIeBiNgR\nJSFrU5USsUuXsoGdLgWaRolMJpOMMChHxyh44xNV2yEBirYThcPgA0kpjUZZo6hWq1Wl4Ruywbz1\ner04ODjIChDkLGKrpLrdbnbh9Zwyf/f393F6ehrj8TgV0XA4jF6vF/1+P8meyAo/SX3a6WONTMJE\ngbl1B8gYYyQQuLm5STQLgu9kMomjo6NMp9jQNBqNdISbzWZGft5POJcuCliv19nAD3njXZ6enuLq\n6ip+/fXXNL6Qmnnm/v5+7sMyILLzaDSW7xE5+zMj2qPRKNFDLirPQFGcJiEQwhHBwUa2QQBBKCIi\nx2wEzeR+UMCbm5tKew/3PeNykQ0FHfRp+vd///eIiPjzn/8c9/f38f79+6xAdGrdzjl72A1+F4tF\nXFxcxN7eXlxdXVXSJpwIQWDrVjMYRQJXO0uQ6NHRrD+2gvHa8NnB/OMf/5jBW8SuSo6siPfFbDaL\ner0ep6enqedcsdrpdOL6+jr76xmNbbVacXt7G4+PjzEYDDL4QHbH43FsNtsGnD6hgzYZ0FV43nA4\nzDUsq2fZowTAzWYzyfPIGvOIw++ihVqtlojLarXKeSNd2Ov1YrPZVA6md7NN1sL91QhiIXsbBQLp\nwvHD7rFP6MdmSgt945A1n6WIXf348WMS/HmXZrOZ6b6IXU/EiJ3TTqFVt9vNsYOaEVw53U/BgB3O\n8noVR2o0GuVklGkTHBcizojIKoKyd0TEzilztRUbzCgEShBhxOiAjjl9BDLmlKCdOlAl7m+kh0ox\nIjpHCfA6rMS5Hh8f4/r6OkajUQwGg/jw4UOOgQZ2bH6X82L0cTKZT3coJvdMaoL7YvAwinxGrvzw\n8LAyT8w74yKv7Bw9qZiS74JTgbFjk3udWHfn//f392M0GqUid0oQ5OvhYXuwMohAxFb4z8/P8+gN\nK2FgZqMXLlf2cUSkynhexBaVc1TI83C6eE/Sg4yfZqblkSVUQ/EujgQZEzC+2wpQsQTChpFkjPRK\nYZ95/DjROOpOKcBls6xHbB2u9+/fZ7PW4XCYn83n80z9oNjMa6CSjxSIy9hJT/L/Tr+jwIbDYeVI\nKar8kBVX/vBdl6lTNcsaI+dl+tcVbziOlmF+T/NAo6TsQ9AVyxQO6GKxyCNKeA6OPfwbBxFHR0eJ\nhP5/7Z3bT2PZ0cWXaWMa2/h+AWygQd1z0+RlpEjzNA9R/uZIUf6GSEmUzGS6e7qbuzE2Ngcb8AU7\nD9avXMc9Tzx8LX3aS4pmJmBz9j5776pataq2NyaDwcA0Qt1uV/1+386Fp6cn5fN5NRoNc+wZsy+D\nZw/SY4keTugJaTnB2IfDoTG0s9lM5XLZfu6ZQh9YsjdoLMzvw7qhWUGz49cw84PzcXt7a/uRWwzG\n40WDXpxHSbEeQVw+jWNCdSB7yTughUIhlkb1aT/YmGRy0SMtiiIbM+mgYrFoQS3znEqlTCPFmcLY\nfcUx5yOfQz8E00c7EtYte4Bn8sFnIrFoAAsryhnP+/dOIMwkc9Pr9azSHUJDWuojsSFeO8j7A163\nR2YJFsy/X9bEdDqN2SLWMA4NAZQ/izc2Nqwz+tPTU+yKGGwCJAu6Sc4yxlWpVGLNrjc2NhRFkW5u\nbixwl6S9vb3PslKr+GKOFHlOf8UGBoiXhiMlLfqerN7qLcU95dXGljhZXpzOP3E8+JxPbXEg0pTO\npwYw+Gh9VoWqvHwOFBYbEdNkMjFD7e+J6vf7ur+/j/V/YnywB3jXUPGMhwjf66d4HqJrFqSfN3/I\nekeKUmUcH1/q64Wk5N854DFoLHif10+n06pUKhaBemeL78EZ8NojhMsYPH8Y4wASBUlLkSMCxX6/\nH2OUeC6cDy/gZHz5fN7WWSaTMUMaRZEd3slkUvl83lgHjAA9oXzKyn8WJxvjKS30bL4Tt3fcYXJo\nDUG5tLQwfHd3d7q4uLCDnO9kbBy2vmCAKBedQavVsvFXKhVlMpmYTocIkkN/Op2qXC5bBM+eoS8Z\nzgfr4P7+3kTorH3ffyuKIpt33/bk9vbWUl69Xu+znmf05vHOFfOdSqW0u7urXC5ne9TvYd4Nh60v\nRAB8xjsJXkbgtV44egcHB3Z3IoYtiiIlEglz6DudTswpIogcjUax9D3P9vj4GOsIzb749OmTOfTd\nbtfE2AQ5iURCh4eHtsZY+1yjkU6nY+lQuq/joNLzin2B0ebM8OkmdGX8fXpAScs+aQQ8RPjSIm3y\n4sULFYtFY128EwLTSfoHNpbg4Pb21hjVarVq84Yjvds4flYAABpXSURBVCojuLy8VKVSMQbEBwqM\nzeuEGD8sm9fq8jlK6nkXJycn+vjxo+3RSqUSS2Ph1LF/vH3yDBD7jRYFPoD2bDfnLnaBdw0DuLa2\nFkuZkW2AdfdOCM4cbCw6QXp/ITHAxjGm9fV1W+f5fN6ehQantLhJJBK2Lkhdsjc8G4m8Zjpd3DTg\n2ejxeKybmxtdX19bcMI53Gw2VS6XrR/dN998Y/vm9PRUNzc36na7xjwyPjRssMypVCqWpfAp/t9D\naH8QEBAQEBAQEPBMfBFGisgSz15apoyIeL2omiiBiNAzPXzOs0M+XUgEwb8TmZDaQH/gWS5y2TTm\n8+JQIlHYJ88e8D1EKz5iI/r1FKFPtSDApE2+pzHxzCmt9VE3bA7RrNcekGZBFLqaZ26323YPmk8p\nUa7qU5Bem4KQ1bM4kqxBny8/ZbykJom+faqCVImvuOK7vfgZ9o3IgL9HOs6zVfP5XFEUxS7s9XMP\nlcuaIKLhxnjP4nhNQyKRUKlUMrbOp6g8k0ZkzdqA7vaVekTXzGsURapUKrHOz2gWSKnQDkFaUOOU\naHe7XQ0GA0v7oVMj/env/4JV444wIjRpQWP7zxBtsmdYZ8Vi0VJCfr6ZcxgWaZkW4bn81Tlra2vW\nyRwdG2uYVDjVUIVCIaZr8lE7TLAvXoGlbTQaFmWz3tifpCt888LVFCP7Ah2FL/HnPdF4sNVq2dzC\nkm5ubhrbBotyfn4uaVmZxtodDodWfp/JZFQsFjUYDCwdwfhgXa6urmxvsZ9I+VarVV1dXcWqDz2r\nSVqeNcn4SI3V63U7hyaTiQqFgqV90MOwvmHuYWN99TSpTtJw6KukpS04PT3V5eWlVU9Jy6tAjo+P\nbd/5DtbpdFrffvutcrmc7u7uLIVDSh0dW6PRsGqr6XSqVqulXq+n3d1da8DIGGBIYBR9yj2dTqvT\n6RhzzPh6vZ5ub2+VTqfNDrG3T05OYvqaRqNhFyhvbGzEpABek3RxcaHxeKxisWjsmdfs+AKp8Xhs\nejJpUX3Z7/f19u1bTadT1Wo1NRoNSUu92uPjo7GDvvCB4hWfqeBzSAzQXPobQPh39LPsb9KjsLGk\n8JjTfD6vp6cnE3/zOa5SWltbU7/fj2lqT09PrZ0KFYOcw71ez4oLuIMVZml7e1vv3r0zFnK1dQxn\nP4wqa41Kep8hW0XC56P/r/CHP/xh7juKS/HOvb6qQVrePF0qleyaAg5pShlJAUBPSktdFNVgXliI\nocO5QTAnKdbrhJQXC4VUIY6d19r4sUAN+2oPxoTjxBxQiVUoFFQoFGJ6Hi9EXK2k8BUHOAYYCyp3\nvHaMPjg8D/obRKS+vHY+X5TPYpg5UL2zhR7M38pN1RYb1lfYcXijWfFCVRYwug1f0dfpdKwvjK+M\n89oSxsq88Tv0+2Fc/AwHGI0RhxsVHVT7+ENwfX1dg8HAel15fRhOFWlRUr9Q6rxn5hCaXJJpPxD6\nstalxdUc3nm+u7uLafJwmK+urnR+fm4GAz0S2qDVi1RxJEajkY6Pj23d7O3tqV6vm0aCdA7PjbOO\nQN47SzjfpDh8FRWaqcFgELsf7MWLF7aGWKf+3V9cXCiKIkvdMAacUu/o+3TLcDjU1dWVXr58qaOj\no5hg26exMZZ+TpELSPFu2uPx8hoJAhMOV+4lo/LHH9TVatXaXuBA4GSVSiWrTuNqDap2EeJj3Lmv\nkDn1lc/VatXu70NTRnBRrVZjmlKq1Xgf3rn3GstEImHPyTlJawPmwwcuqwVEjB8ZA2lFr6nhwu52\nu22OEHuxVqspkUiYpsj37aLgYWtrS+VyWY1Gw/7e4+Ojzs7O9P79e1UqFX399ddmvElNzedzC1p5\n11EU6e7uzopkstls7Cqf4XCof/zjH2q1WlZJzbyVy2VLaW9ubprGdTgc2rUrs9nMzkVpmYJFD+xT\nR/72hVwuFxObJ5NJczbb7bY2NjaUTqdj/dem06lVVfOM0kLE7SvLCYwl2XdSiOXtF1WO8/ncbNT7\n9+/tc999951+/vlnnZ+f69WrVyYHmEwmVrFYrVZjmkNsGmelT7NeXV2ZNhDNH2NAZ8zZtrW1ZWlP\nbFK9XtfNzY1++eUX2/dc+5RMJk1Ty3qChFkV1EsLG/LixQu122396U9/+vwCS30hRmptbc0Wva/4\nWl9fj/Ur8oyHL/30bAaHBRvcfweOEwI0aekI0HuD7/Flk2xuX0Hm8/ZeaLyqn2KBIub0wlm0PkQX\nfCdOXDqdNgbJN5dDI5FMJnV3d2cGmJyyv27Fl3Hzt7i80Y+D/49oCPEq33tzc6Pb21tzGLy+SFoc\navl83iJDPnd8fKwoisxArGpO+BuUOktLUS0OnXeqiXIxGNLygKbBo2986ZvjTaeLcuZUKmVVSowB\nbRxrw98Ej1OLRsUzefx//X5f8/nyMlA0DGhyJpOJNfXk/Xujg26En2HEiIo51HC40On4y3ARB6dS\nKTUajZgmDQ0U7CG6BklWyQhz5LUwtVrN5gxhOIfpcDhUNptVFEX6+PGjXr58GdOlsI8ox/fsEUYf\nFpDnxCHDEfHaCwpMBoOBGVPYK94xjrkXpjJvu7u71vx1Z2fHnBgcvnw+/5kDRrk9/02jWNYG0Tdl\n8J5Rf3p60qtXr5RKpXR1dRWr6GT+0ZH4829nZ8cYG9hHSeawZTIZbW1txYpzuMQ5n8/bO2Psm5ub\n+uMf/2hOt69ikpZnA/uR8U0mE9NEnp2dmZPC2KlalWSVxexJzksKMWhPwDhms5lVl/p1ivaRJrj+\nXEAjx88RiEuyd3t+fq7T01P9+uuvtk8JKJvNpumT/HnCBbgEYN5phhm+vLy0fSVJ+/v7kmRViVzS\nzXeyBrPZrAqFgmq1mj1Lo9GwSt9+vx/TjsFKYavY59ij+Xyu6+vrGFtDtfZgMFCn0zG2knfDfqvV\nauakMEaCx2KxqFwuF6sQz2Qyarfbury8tHXGvPG7vrCJYI++VNgwX0jVbrf1888/K5FIaH9/X6lU\nKuacszZWK1ZhqllfPhNBQOIzUfysXq+r2+3qw4cP5tijGbu/v9fXX39tLVXIPDHfZJl8wCgt+l35\nc/f38EUcKV6sd5a8ANRHCR7Q8CxWaekskbrxgjWMCYccDSH5GVVLMFK8RDxvnomDQVpS6kRglO1K\nMuMLwzSbzWKfI6UAo8VhQrNMIkSfaqI6hMh0MpnYRsRRQ8BNVAuurq40GAxULpftQPKVPbAgPL+f\nc++g0UZAWkZK9OfxY+RAvr6+Nq8e8JzMi4/6+S5SDr6ij8ibCAm2h7mRllU6ns0gDeK7gnu2CoaG\n6MynYPl9L+Zk3aZSKe3s7KhQKNidVdLSIYBBHA6Hlu6QluJvbj/P5/O2hjlcMHoIM/2cEmV6VgmB\nPv+sVCqWFoJRXF9f/6xhIVE3hg/hvCS7ccALSZlv3gtj9OwDQk0ON290cRx8xZhvnOqDFtLYkqwn\nTb1eV7lc1vHxsfWiqtfrxprgTOHA8s5SqZQxdDQ2lRbCcIKZ1QDOV1nCkHpDy7/Dhnr2m4o9qjc9\nfOd932NqlWH1UTKXa1NE441JsVg0doGzjLU4GAz04cMHOxP93/BVU2trazGGiRYG9GvyDBWGnBYO\n7C//7Gtra1ZtNhqNzEg9PDxY6s2LwKWFker1etrZ2dHu7m5M1sHav7i4ULvd1uHhYayaud/vx6oM\nWVPMCeyXvzOR85ssw/v37038vbW1pUajYa0OfLuYx8dH5XI57e3t2VlzcHAgadE24uLiQgcHB/Zc\nf/3rX+3db25uqlqtqlwuq1ar2edgx7BhVIIy9qenJ/373/9Wt9vVV199Zc7Z4+OjOVc4jY+Pj3rz\n5k1sbp6ennR5eRlLpZKi7Xa76nQ6+uqrr+w5j4+PLcCjnx7I5XJKp9M6Pz+3NeADpVarZa0/fIFP\np9MxZom0N2xVpVIxSQupQtjITCajfr9ve2U8HltWhH2Gffad67e2tpTNZu08ffPmjZ0BOLG8y3w+\nb5/DtsDCUpAgLc6LQqHwWR8+jy/iSElLw7BaTUeE6L0/r0WRZAyUpFhUwMHBYYdhJQLCuPjPScvI\n1lfu+FSA/11K2HnOVCplC2NjY0O9Xk9RFJlh8uwBDgRXl3iWLZVKxQ5aTzn6hURuXIrrQGARfOqO\nAwWD4Z0byvFvbm60v79vz8ff9NV62Ww21tuH3+PfMV48697enu7v79XtdmPMCvOcyWRMTyDFW/6T\n5vOUMiwfhhNnAmPqy/R9+wMOKFJ0bAR6LvE8/A3WAhEO0aq/uBSnpVAoxFI7OBZoJXDQvd5HkqXD\nRqORbWK0X8wpjirrTZKxlb4ykxQpTCjODHOKgUGz45me8Xis09NTiyz5WbfbVaVSsaaD3qmCydjc\n3NSrV69ipeI4it4I+opc5pFUnq9mJXIkKmW+ifRpnVAul+1S4tFopHq9bqkftFnMDbokjGkymYyl\np5PJZKwa1AcgrEVYUNY3jiLVXL7FA3uPYI10DOPHMPr+PqwpHEbmh/3ty+ZJA/n9S4n34+OjisWi\nORn9fl9nZ2exNKgPaljz3lGSZC1SZrOZKpWK5vO5rW+MDAEk/+N7PfOKDsr/rFgs2lUng8HAzsft\n7W2bS3SrrCmY0mw2qw8fPkiSVYNxsflsNrMGioyfliewO9PpVH//+99tHD/88ENMK8Waurq6UrFY\nVLFY1DfffGPMBXNKehVHAaaD88obaJha9FS3t7eW5oLl2t3dNe3PbLbo1M/az+fzxhaxdxnP/f29\nvQ+qoX/99Vf961//kiS9efNGe3t7lm73QeRgMLCbMzqdjtrtdkw2AAt8e3uru7s7c8B++eUXTSaL\nZrTdbteaFUvLimQyF94BOzw81Pb2tq6vr+1KMt/nrNlsmmNN5oWfkeYmJbxaOV6v11UqlfTbb79Z\nWvjdu3c6OjpSOp3WxcWFOp2OzTeMKHKN1bYzrP9EYtGTkO9EcuOD2FV8EY3Umzdv7I96r9azVJ4d\nyWazajabajabdhXKKiNFPtl/bnNzM0bT+V4eHGb09PG0OX8TI8LhIC2vOaGkFO9dikcY5IN9SwGM\nKekkL+QkDUa6yIvicQRJOWH0cbwwpOSbpeXhiWCYnileIEg39GKxqHK5bAc/hobfY76AT9PA4PA5\n5nI2m8Uas2HwPdPCd2PQ6NQNCyjJ8tnj8ViVSiUmusTB4iDzuhxvCH27BsbkmcHZbBZrrocjiYbA\ndzDmbzDnvj8LzifsTyaTsUODd8PcIZDnXfnrUNDZSDJDAgNA2llaOrw4qYPBwETMk8lEzWbTnHDK\nhaVFzn82m+nk5ESFQkHFYtHGkU6ndXR0ZEGIF2JDfTMOr4GDcUokEsZUcngzxxyI6+vrsdvacZIz\nmUyssaJPAxLt8g4vLy+Vz+dVKpVMlI5j7PcbLC3sjbRwGCjEYJ/jMGD4WNe+mSNGiBQsncj9+ycl\n6tPtOMY+bcm6YO1ykPvmqDC1fD6bzZp+6vr6WtVq1fYbjLUkffz4Uf1+X5VKxXRABJ6sJVh6f/6j\nb/RzBDCQPgDxzhntOXq9nvWfYm2Q1vPCZu8s8844N3CW7u/vTRf38PCgdrtt5xeSBa+VI/2Gg8V7\n297etu+czWb67rvvNBgM9N///tfmhGeCASSI5NnQ5uKcE4Qyhul0qlwup3q9rul0ao2BOUvp7l6p\nVIxZgsWhaMQLuHk3tIB4//69sc3FYlHNZtMC0kKhoMfHx1ivLNjLFy9eGLsmLdiV+Xxua+ef//yn\nrUlauLBPYCiZ70KhoGQyqX6/b20kpMX9hTBLPvCQZFf4UDCw2sKhWq3GdHO+8IFiGPYSNmhjY8Ma\nvh4dHalWq9m8+X1Fip3zq1Ao6P7+XgcHB0qlUjo9Pf3MzpF9effunc13rVYzfe6PP/74u/m90P4g\nICAgICAgIOCZ+GKpPR/1A9gGSpuJaGi5f3h4aBGBb8znNRt48ZKMHYCN8lQeeWUvOvNVe6RQoBzx\n2onI0WNR0isty/FzuVysek5adv1GaEyULS3ywVDl5H6JoHzFIZczetbJ642iKIoxPgjCqTxCayUt\nrzuRljQqgl4odbQOiBn5XaI92ir4KMIzUC9fvoyJJ2FbEEaSvoI59NoRvyYon+U9ep0KrRRWq0Bh\nfohMYG2kZSsGnseLi3lHjMVXdMG2sG589QxMBu+e7t6rVY+k9Gq1mr0rWC60G76qqdFoxNpf+Co6\nUlCsL0+pUyk1Go2UzWZjFW++ioW55DlhsHhnPsXu0zXMtW86+fT0ZCJjr+fyjANj5Ge+mSzr2N/R\nxz9hBoloi8WihsOhMUFeD8TckJaGWeTZ0UCORiO7fJWIljWQSCRUqVS0vb1tUXm32zX2qFarxdIU\nrEvSML6CbW1tzS7D9oyetGAc2dPZbNZ0UcwvImXSf+g20JewFhEC8956vZ4mk4mq1aqlzhib36/s\nE9Youin+vmfj8vn8ZzIBXyVJVV+pVDJmivfOheykvP0NE7DniURCJycnxpx+//33ur+/V6vVMlaN\nux0zmYwxmA8PD1ZJJi3L4/f39+09UdGYSqXUarUURZEymYwqlUosnba+vq5Wq2XXbXnRNZeHYzNW\ndZwHBweaTqemS5IWtms8HhvjynnIvMCc3tzcqFQqxdJl7AW0WrQbqdVqenx81MXFhYnVacgrLbRO\n19fXlmbE1krLNgYwvWRBpOXdjsxrJpOJMTU02iSteHZ2JmlRFLC3t6fvv//e9pIvwkBPxt737Ws6\nnY7K5bKKxaJubm7MBlEhy92kPo3smdS3b9/GtIpktWazmYrFog4PD40dbLfbKpVKdu+fP4Npm0Cb\nlL29PdtrnU4n1ij29/BFHCn0PF4czMv1OUsqRprNpnZ3d1WtVm2heR0Uh+Jq7x5+zu9jlKVlB2MO\nAwyOtDhsqPJCk4Oh9v13cHa8PgD4tAv/jfKfNKJPM/qOsqSpJFnPHp+DZuzoiXheUiSMi3nx6VNA\nmoV0kn8en9pjzrxjiFPKeHylpbSk6P1GJP+NaI+KM0km2vYlyjwrjiTl8b7s2Au5MZReNM6cYcAw\n3qQg0bj5d0cVG04oTiZrBiMZRZFGo1HMsHuRsNeU8IwYGsbAoVGpVLS/v6/xeGwpKl9FRtsI//x8\nJ3uI9UOKCoeX52Wt8E7b7bZevnypV69exQwiDgn7yd8diGH1zhxGyAv4STXxOa/V8gJ/aXkZKul8\nr9sh2MBh8p/jviz0UxRvMP7NzU29fv3aCjF8FSHp+XQ6reFwqPl8bmuYzusIWqVlpWCz2bR0DC0r\n/NrAseVZ/ZnB2m00GrHziTmjapYUGWufefC6KRBFUawTOs56oVDQTz/9ZOl8f0m0tLy5IZvNmjMl\nLYwsvXS8/k1apso5f+hkzrPidBDclEolGz9BK+ezD5IfHh40HA5N3Hx9fW3iYAqESH1vb2/bM83n\nc/X7fX369En/+c9/lMlk9Oc//1mS9O233+pvf/ubvdurqyt7lv39fRO8U8HH/F1fX6vX61ngQQWY\ntDijRqORbm5uLDDlO0ulkg4ODpTL5ayS0t8GMJlMtL+/bxd38zPGiXgcHZYk7ezsmGHf2dlRs9mM\nVd1GUWTONSloNGKlUklRFOnt27f68OGDisWiOaC0wuh2u/rLX/6iy8vLWNsM0Ov1VCqVLA25vr6u\nXq9nleOz2Ux7e3uSZPo8HHgf4GF/0Ce/fv1ax8fHkqTffvtN+/v7pv3LZDIW0JBibbVadmOCdzLv\n7u6so/unT5/sZ1QSl8tl9fv92B6tVCrWfoKqeR+UTyYTs0tessJND6tBvscXcaQwAD6CRI+Bx5/P\n5+0FHx0dqVQqWdWbP/gwOl547V+i1wBgcKRlybkXY3v2yH+nd14kmcYDLYuvvmLRUEHkI3iqmWA6\nvBeNsUBfgdHMZrNmFEejkW1saeFhJxIJE/H5y2Dp6USeH20KefT19XVjD+7u7jQcDs1xLRQKiqLI\nhMVedOpF1Gg3mBsqZdDVJJNJE/pxsSq9YbiZW5IZNd+rZJV1KxaLarVaJnjk/cIgrYp42WgwEl4D\nx5hon4DWSlocamhn6FPCoe/XmL8+iDFgfDDCvE/mBp1QOp02po/P9no90xkkk0ljOemngnbL9y3z\nwmzGxvNQHcPfZQ74e+gO9/f3zdBJS90Za9M7WRSH4LzxvKxhHL7VfegLQFj/vsEtBpZ97VkO1jzP\n7PuxPT0t7q/b2tqKNQCVZD2ZqIJj7nlWxjAajWJCUuZ3OBzq+Pg4psekMo3rb3BUWIu0i1htYYEm\nslarWSNLH3TBcMFqegaPv1Mul62VA2ux3W7b3+eqDJ6F99bv91WtVmPsJ3osKX4mSkstkGd7JVnA\ngGPD//gs+2w0GlkLBOaUwJnz2/dE4qwrlUqaz+dqNpuxYJerbjxjydpgDb5+/VrpdNrE3/P53Kq1\nYLN5/lwup9FopNPTU9uLvmGptHBwGo2G5vO5vfv5fG6OPWcgWj5fsIF2lv2Lxomg0velQ7TuGW/W\nfq1Ws7ONd4GTcXJyonq9bvde4qQwN9wZyN+cTqfmvO3s7GgwGKjdbhvb46uSh8Oh6Sa73a6tE/RT\n0rKdBU7W0dGRstms2RyaB/OdvEscSeaNZsJeowiZgQOcTCb1ww8/6OzszMb/8PBgVc8EugQY+Xze\nAn2cTMbHuQNp4jWeBPIEXv4sqVarsV57v4cvIjYPCAgICAgICPj/gCA2DwgICAgICAh4JoIjFRAQ\nEBAQEBDwTARHKiAgICAgICDgmQiOVEBAQEBAQEDAMxEcqYCAgICAgICAZyI4UgEBAQEBAQEBz0Rw\npAICAgICAgICnongSAUEBAQEBAQEPBPBkQoICAgICAgIeCaCIxUQEBAQEBAQ8EwERyogICAgICAg\n4JkIjlRAQEBAQEBAwDMRHKmAgICAgICAgGciOFIBAQEBAQEBAc9EcKQCAgICAgICAp6J4EgFBAQE\nBAQEBDwTwZEKCAgICAgICHgmgiMVEBAQEBAQEPBMBEcqICAgICAgIOCZCI5UQEBAQEBAQMAz8T90\nn59+FodZjgAAAABJRU5ErkJggg==\n", + "text": [ + "" + ] + } + ], + "prompt_number": 2 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The convolution weights are initialized from Gaussian noise while the biases are initialized to zero. These random filters give output somewhat like edge detections." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# helper show filter outputs\n", + "def show_filters(net):\n", + " net.forward()\n", + " plt.figure()\n", + " filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()\n", + " for i in range(3):\n", + " plt.subplot(1,4,i+2)\n", + " plt.title(\"filter #{} output\".format(i))\n", + " plt.imshow(net.blobs['conv'].data[0, i], vmin=filt_min, vmax=filt_max)\n", + " plt.tight_layout()\n", + " plt.axis('off')\n", + "\n", + "# filter the image with initial \n", + "show_filters(net)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "display_data", + "png": "iVBORw0KGgoAAAANSUhEUgAAAicAAACbCAYAAAC5xzv6AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvVuMbVl2pvWvfb/FjkueW568VN5dXSUbl4sHbBBYbYRK\njRqEJW7qfkD90MItN4gGgQC3QHYJiwdejJFfcNvgRtBuaBAPyA9gt5FBcrnc1bbLVemqPFmZlZdz\nTuaJc+KybxH7sniI8839rxFrx4lMU7mjKveQQhGx97rMNeeYY/zjH2POleV5ro1sZCMb2chGNrKR\nqyKVdTdgIxvZyEY2spGNbMRlA042spGNbGQjG9nIlZINONnIRjaykY1sZCNXSjbgZCMb2chGNrKR\njVwp2YCTjWxkIxvZyEY2cqVkA042spGNbGQjG9nIlZJPDTjJsuyHsiz7x1mWHWVZ9jezLPuVLMt+\n7vF3P5ll2TvrbuNGNvJxZKPbG/lBlY1uf3rlUwNOJP2Hkv6vPM/7eZ7/13me/0ye518uOzDLsrey\nLPuL36uGZFn2lSzLXsmy7KUsy/4wfLeXZdn/mmXZ4HE7/s3vURv+8yzLfuOqXm8jH0m+X3T7Z7Ms\n+2qWZZMsy37te9iGjW7/4MiV1+0syxpZlv3q4/sfZVn2tSzLvvQ9asOnRrc/TeDkM5K+ccljc0nZ\nx7lJ9lgu+L4u6fk8z9+Q9EVJfxgO+W8kTSTdkPRXJP1KlmWf+zht2cinRr5fdPs9Sb8g6e98nPtv\n5FMp3w+6XZP0XUn/bJ7nfUk/J+k3syz7zMdpy0YeS57nP/A/kn5b0kzSWNKRpFcl/bqkX3j8/U9K\neufx378haS5pJOlY0n/w+PN/StL/K+mRpH8s6Z+z6/9DSV+W9P88Pu+lC9ryBUm//fjv/1LSz9h3\nXUknkl6xz/47Sb+44lqZzibCW5LuPz62H5/Jjn9L0k9J+tLj+5w+fsav2XP8oqTfl3Qo6X+TtPtx\nr7f52ej2iuN+QdKvPeG5Nrr9Kf/5ftRtO/6PJP0rG93+c4z/uhvwCSr670j6a/b/r0n6+bIBlPQd\nSX/R/n9G0gNJX3r8/z//+P+nTDnekvQXdMZG1Uru/289niDDxxPhkaTp40n3UGcRwhckDcN5f0vS\n/77imf6apG9LekFnwOZ/kfTfX6CU6bkk/Wcca9//Q0nvSvqcpI6k/1nSb3zc621+NrqNbofjv6wn\ng5ONbm9+vu90+/E5N3UGqF5b8Uwb3b7Ez6cprSOdp/wuSwH+VUn/R57nvyVJeZ7/n5K+KulffPx9\nLunX8zz/Zp7nizzPZ/ECeZ7/ep7nuzqjA39c0j8h6ev5WS51L8/ztyX1dKb0LseStla0669I+q/y\nPH8rz/OhpP9Y0r+RZdllxjXT+efPdaao38jzfCTpb0v61y6iO59wvY18cnLVdbtwyiXatdHtjSDf\nN7r9OP3zPzy+7rdWtGuj25eQTxs4uYxRLJPPSPpXsyx7xI+kf1rSLTtmZdX44yLXgyzLDiT9hM6Q\n7uuSfujx9f7dx4cOJPXD6ds6Ayhl8rQknxzf1Vn+8+blHqtU/Dm+K6ku6dqf43ob+WTkqut24bRL\ntGuj2xtBvi90+zG4+A2d1Qz+7AXt2uj2JaS27gasWVYpffz8uzqjyf76x7iW8jx/KGkny7J/XdJP\n5nn+M1mW/QNJv5zn+W/bod+SVMuy7JX8rPBKeozUV1z6fZ1Rg8jzOsvR3pf0rM4oPklSlmVVSdcv\n0d7nw99TnVGhw495vY2sR66abl/qeiYb3d7IKrlyuv2YpfhVnenNX8rzfH7BPTe6fQn5tDEnWfh7\nVQR3X9LL9v/flfSXsyz7F7Isq2ZZ1nq8xv6ZFddeJf+kpH/0+O8vKKxmeEzx/QNJP59lWSfLsn9G\n0l/WGRovk/9R0r+XZdkLWZb1JP0Xkv6nPM8XOgM6rSzL/tJjqvHnJDXt3HuSXgjUXybpr2ZZ9hey\nLOtI+nlJfz8/S05+nOtt5JOTK63b0plhzLKspbOgqJplWfOxsSyTjW5vBLnyui3pVyR9VtK/lOf5\nyROut9HtS8inDZzk4e/4P/KLkn7uMXX3t/I8f1fSvyzpP5H0gc4Q+b+vomJfBoH+mKR/lGXZU5Jm\neZ4flhzzNyS1H9/n70r6t/M8/+aK6/0dnQGX/1vSmzor2PqbkvT42n9D0n+rs2KpgYrU399//Hs/\ny7Kv2jP8hs4q4u9Kakj6d/4c19vIJyffD7r9t3Wmo/+RzuoBxpL+0xXX2+j2RpArrduPlwz/dZ2x\n3PeyLDt+/LNqj6qNbl9CsscVuxvZiLIs+x2d0aCbfSg28gMlG93eyA+q/KDq9qeNOdnIk+XK0Xsb\n2cj/T7LR7Y38oMoPnG5vwMlGomyotI38oMpGtzfygyo/cLq9SetsZCMb2chGNrKRKyVrWUr85S9/\nOZekCIyyLEuf+Xfz+Vzz+VxZlomi4tlspkqlcu68LMs0ny9XcVWrVVUqFdXrdVWr1XTtxWKRrn1y\ncqLpdKrT01Odnp5qPp+rUqmo0+mo3+8ryzLNZrN0D87ld6VSUa227Eramee5ptNpOrder6efarWq\nPM81m83SPfM8T8/Eb4R7rRKes1KpFPpoNBrp8PBQh4eHGo1GmkwmkqRWq6V+v6+dnR11u13V63W1\nWi3V63VlWabFYpF+8jzXfD5Pz5HneXrGWq2m2WymyWSiyWSSnqNarardbqvVaqlWq6Wf2H+z2UyD\nwUCDwUCnp6eqVCpqNBrq9/tqNBqF56ZPFotF6jPuxzP/0i/90lrpzV/+5V9+Itr39kapVCpPHOt4\njYuud9H5ktKceNI94/3iuWVz+Xsh3g7mELJYLAr9x/9IpVJJduSjSBwTtzVl7XKhjd622K4n3Q/5\n2Z/92bXp9q/+6q/m2LQyvfPvsixTtVpN8x374cKx3pd+jfl8rsViURgvPwZ7Uq/Xk82bz+eaTqea\nTqfJ5ktKfR6FNmHv8jxXs9lUrVZLdm48Hms2m6XjKpWKqtVqwZfQJv/b27xKoj9x27m3t6ednZ30\nHHmeazKZJBt+enoqScln4TvcB/A37XBbmee5Wq1W8hvIZDLRdDot9L/7JMa13++n/qXfR6NRei7v\n78ViUdCLarWa/pakn/7pny7tpLWAE1dIHsIH1X/TQThHHKJUBAeu6NVqNSkeA7JYLJIiRwDkk4iO\n5LjJZKJKpaLZbJaMigMAfrvB4brT6VSTySQpXLVaLYAkFMCvw/O4cG2fqP4MgKdqtZqO5X7dblfT\n6VTHx8eFezl4wtEvFgu1223V6/UCOKH/mcDcs1qtponLM/mz0GfxWfgNMGPSTafTdJ2Tk5PC9eKk\nbzQaaVLw+6rIk8DCRd9F57nqWv4Zf18W2JSdf9G5ZQ6pXq8XxiuOzyq5LJC66LndgWHoIjBxo4qU\nAZPLtCeCCZ8HZeevumbZXF91P5ePMq7fK8EeRr0ps9m1Wq0wZ7FLZcdK520ZgRzAbjqdnutvArvF\nYqFGo5HscrvdVqdztq0HNoa//VkcmEhLH9BoNJL/wLbQ/2778RMuEZSUjRuflQE7+rjZPFvpe3p6\nqmazmYJnAl138LPZTPV6PbWTfvdAzn0e7SSoZKzoW9oT/Ztfl/H0gBj/io9+EhB3gLdK1gZOVhkz\nV1SUCCVzpFn2wN6pDlBwgigyqDse32g0JJ05xvl8npwm1+ZcQI63l78BOKenpwUk2W63VavVkqJE\nR889eD4HMPzmud1ReLQSjW+WZUmpa7Wams1mgb0BOTP5K5WKTk5OzvV9jHrimM1mszQ5nOHiWb1f\nYl8xrovFIhmDarWqk5OTwiSj78oM+3Q6XRkdfdKyKk16Wad8GWCyShys87dfo8xYOuux6l7+OYbp\n9PRUW1tbOjw8TON8ERtQdq3LHFfWB2VO0Z8FvYlOpOy8y/Y38yu2LwYL8RnLQJIf/yRgcxWASZmU\nAVYPPNzxuO2SljrkOkp/wELX63U1Gg01Gg2dnJzo9PRUs9ksARJ3qOieO7xKpaLT09Nzjpd7u45I\nSmxtp9PRcDhMPsCBQ5n9KdMld9jeXxEAcGxZH2APZ7OZxuNxsovYSreLbi+jTjMePkawHwBAAnhn\nx+O8oG3z+TyBwQjEvH1+Lvf38XeGfpWsDZz4gJYZT0fdPKArFY4Qx9hoNAoUn6QU2U8mk5Q6kKRO\np6Otra10DgKFxjVRjizLEtXnk4BJ522ez+caj8c6ODjQu+++qzzPde3aNV2/fl27u7vn0iX+TGXU\ncewvZy3cOPj1XHmhODudTkqz0KeuhDAPtMEpc48cmCTev0wg2A4HElGBXQBP3q/uaGIqD8PlkYBH\nFFdB4qS+rJQ522j8Vzkqp7WlYpRYq9XOgcuyNrtReVLKIcsytVotjcdjtdttNZvNRAf/eaUsOl/V\n5pjCicwg7fUo8qLrMqcjOPC+LRunVePs7Yu2bdVzl9nCqwBQyvTanRbHEM27vfbghPnbarVSSt0D\nT/qZQKrVaqnT6Wgymej4+Lgw7gQ3BCcEYNgp/IPbGA/wuFej0VCn09He3p729va0u7uru3fv6lvf\n+lYBxHiKxO1wTJdIRV2LrLrP5/l8nnwLwIE+nE6nqX9w6O7geVaCT+4V2X8HAbBSLm4jYGHo0zKW\nCyAzHo/TfX3uR9CBXjAGjUaj1B9EuVLb18fJyISI1BPoF6Wez+caDAZqNBqpbgKFAlkS7ZFGWCwW\n6vf7Ojk5SU6PHwbdmQRH85ERoO0MynQ61d27d3V8fKw7d+6o1Wrptdde087OTjrfjZ1UrKvxSYRE\nJ1+WYuI6brABE51OJ01CFB82yettOp1Oaluk3SKIoH8iwPJnojYIVio6UcaI7xnvmNPlbyYq+kBE\ntSpCXaesAiqr2IB4jn+HnkfnBT3rn7luODApc3CrAoVVMpvNUkqtVqtpMBjorbfe0quvvprSPE8C\nQ2XyJPamTFzPpdWgCp2LYCY+r9sc7AzzJ9okjnlSn5UBi4vGn+8Z2wgA1iVRT8qYImcxnBXlWBwZ\ngQ/znDmM7s7n85TSkJapFmdxZ7NZsj1+H2cYCJy4p1RkNTwYqtfr2tra0u7urgaDga5du6Z+v6+v\nfvWrOj09TYGXAxO3ac7oRJ3it/cZ/9NWB0C03dPtMWW/vb2tWq2m6XSqo6Mj1Wq1VHsSWQq/N+1E\nt/BZHijzTA5u6DcPDGkn13Uf7deJ9gUfi/1eJWsBJ2URR9mAOgBAkRytukIvFosEOnB2gJp2u63h\ncJgAyng8LkTmlUpF3W5XnU5H9Xo9gRMv4qxUKmo2m+doMB9Q/j88PNR8Ptd7772XFPj1119XpVLR\nj/7oj6rVap1jI4j+YW98Mrlxc9Dlfed0JROW8+r1umq1WipO5RiM9sHBQUpB7e7upmO8FofnRdEA\ngEyK2Fan1RlTV+qYMuK5pSU4WZXC4nz6bTwep/OugpQ5H5fLOOA47tF5oSuec45swpPuA7BBd570\nLO5QpLMxxaB/61vf0o/8yI/o3r1758ahjBW8yDnH+z/pWdyOxGfxttBni8UyP17WRgciRJVlrN+T\n2hgDDI6Nx7lee/uvCjApk7JncGePrXb7TP8zt3k2D0QAGpPJRNVqVVtbW0nvtra2ku12Z0ywhW5i\nv6QlYI3sh7c7z3P1+33t7u7qzp076vV6Go1G2t3d1WuvvaY/+7M/03Q6PcfoxYJsrue/yySCOMTZ\nFQcMkgoByNbWllqtllqtVlqEcHx8nOYzz4ofAQB6mguB8fB+cWDjwMnH1ot30XPaXMZ+wtjAiNVq\ntQS6VsnarPmqCer/0wH+PYpdrVbVarWSg3MmAjbAacTt7W0dHBxoPB4rz3MNBgPl+VktCJG9MwMY\nfgdHeb4szooUtg/u0dGRPvzwQ12/fr3Qtq997Wvq9/v63Oc+l5AmE9cLQF0hHAVHZfYJ6MoelcfT\nIdKyGKler+v4+Dj9gIxZJZPnZ8VZAMHJZJKcGSxVZLicQncj5Mg6gjoHOzxPjGzoF66d52eV9JPJ\npJBvvQqyiv34KOITvuxaDhgwzBgmJj46EZ0wv8uKBS9qN846y7IEZmu1mkajkV599VX96Z/+qV54\n4QWNx+NzLAXyUZ1tBFyrjnHdiM/qc4h+jaDM+9vBAde5KMXyUcbWxy2yZdgDTx84wL8K4kDK7ZMH\nE94fMZCEiYYNKWOhOe/k5CQtSCAVlOd5Ykyk5Xh3u10tFot0DHaGa/mqFPwCz1Ov13Xjxg29++67\n6vf76ZkePHigl19+WScnJ/r617+eGENJKYD1e5WB8LL+879dD8rAidtM2I56va5er5f+h0Vqt9vn\n0usANoABLHNkEGMaKjInDsRoN230AFM6X/vJMVmWJQbfGaxVsjZwsspI8dBx6arTS16Y2uv1VKlU\nCstxpSV9RwdDhTmDMhqNCoWwDBJ1DL6s2BXaC7OkInXGtZ3+ZnBarZZ+53d+R9vb27px40YadBSG\nNsQIRFqmfWLNhRsCn3z+nSs9AmKdTqcaDoc6PDxUp9PRaDTScDhMFP3Ozk6KMukPnE+r1Up9FyMU\nV2D6gHbHHG1kg5xyjODQQd14PNZwOLwy9SZl8iQW5Unn+AQuG/PIapQxIe6cy6K9J4m3oVar6fj4\nOBW08dnJyYlee+013blzR7du3UqRZtRJb0vZfSKYiueW9eWqzz0d48EEn3n9lKfDykBVbIvfO0Z/\n0dCXtdXHN9a5OO1+VdKVcT6uAiZxoYGk1M8EjNgRZ6W5hzu6xWKhg4ODlN7xAAfWhBWJsGHScixd\n19BFZ2l4DtrMKh/sb7fb1f7+vl555RV98MEHOjo6KqT5ELdJDty2trZUqVRSAOX9dhkh0KDfZrNZ\nWlgR9YVAsd1uK8uylLL3gBOGibmAzXCGJj6b+1//oZbRAX0ZuOY3qRxA6WX6YW1pnbLIAeXE+Xva\nxsEJBbCgQtIxdLIPihubfr+vyWSiBw8eFFAvUTv0GPdm8HyZFuKpE58Ep6enGg6HiUrziKDVamk6\nneoP//AP9VM/9VPn2JgITBBWDh0dHanRaKTalWgovD9daWjrycmJ6vV6YovI7boBBOgx2Q8PD9Ne\nLyg97WWyMOl9nBAin0j3emU99CMKHp2rK7IDwNFolIAg7MtVk4/D5jggpT8w5mV7zUhFJ4lOO+D1\n65RR2xe11Z02UZh/x/UfPXqk3d3d0rlS1h/cNxbVMedie1a1z/UdXYxRnt8vgodY4M6xq+7nNmVV\nSuxJUSHXdwaRz914XwVggkR2QzrvgMrASWREAa5ZliW75Owv18uys4J59AqbBLCsVCppXxIvJPW2\nOcDzFIQX7WOXHWQ6ADo8PNQP//AP6/d+7/cKwTHX9lU93BeGoNlsqtPpaH9//9z2CNzHWTmENDwB\n7uHh2bsGm81mYkAkpS0Y6NsI0PCVnkWgX13oE/ezPgdIoTN2+BS3KVy7bCybzWYqZ/Ag5KLAcq1L\niVehJx7cnbsPIP+jcKDySuVsVUpc0uQFkxQ8eWEQyBy2hOO5NgPEPWO+2qvOAREuXItamLfeekvv\nvPOOXnrppcIyZ681QbIs02Qy0cHBQapYn06nunXr1rlVNyhEBGW+YinPc21tbaW86nA4lHRWdNbt\ndgt0IaCIPCGrMlBO9nBptVrnDKwXvcYo3pUVwxLzm4yBF5pxzOnpqQaDQUoz4bivas3JR2FPykA7\nfTmfz9P+ItFpAdiazWZhXwhnL8qcs9/Hj4/HLBYLNZtNjUajwjgxF/is0WjoO9/5jl599dW04V98\nNn9GB/+NRiPNhdimi8TBEPoSlyo6EPJVYJGd8H5dBS4uSvEgzPkI2Mv6N4I5b3ds/7olOmHXmfgc\nHIPNpJ/ZyoDjDw8Pk+PmGH9e3wQNB+k2we8LK+sRvKTkVNEL199Op5PASWTJmUfb29u6fv267t+/\nX7DRzqxH0Oo1H1tbW6XLoMv6F/vY6/XUarUkSe12W6PRSJ1OJ+ksAdpkMkmZBEAHARvp12azmZ6b\ntK+3x5/BWVZp6fdIZVHbSb8yn/Cl/DgAAqh5H/sqrTJZqzWPhqfMCDnIkJSQNx2G8wbZbW1taW9v\nL6FbV1KQGh2HAjlz4jUidKSnI/icamPoOtqJQfYVC6BZihdbrZa+8pWvpHbGCNcVnPMpaJrNZikF\ns729nZzCRVEW4Obg4CBVd3e73QSMqtWz3Vy3t7fTzn/0qRfH8hyHh4eFfQdA0J7S8bFzOpDnYh8B\nIgxQPUYBgAIThbHnc3ak9Wr8i4qrPilZxWRdFqCUHeOpEXbR9XtFiWm9+Pmqc5/UvhhdMlc85ZPn\nuV566SUdHx8X5k+kvLlfTK3Ezy4q1uX7sr99eaWPCY7Ha9T8ntHJuETgclG7uGYZExjnt9uKSKl7\n361TygJJt3s+z6MNkJbzE6aKbRxwjMfHxynY8Wt3Oh21Wi2dnp6mFIdUXDKLzYTNRTxlBiPMs/h4\n++IDxNkZ2JMvfvGL+t3f/d0CsF0F5ofDoba2tpIPIvijPs4Z0LJgIcuytLqStpElaDabyR/AZE8m\nk7RNBM/M3PQlvzGthb67/+Bzzxigf/gAgnEHNwQYgBDf0M6fId5/lVyNUNMEJXZnE42aU2mLxUKD\nwUCj0Uiz2Swhyd3d3YQ6XZmOj48LkbbXr0Sa2Vf8OICIjIu0rDRnJ9ZoVFBQPj8+Ptaf/Mmf6Id/\n+IcLhYw+mRBSOe+//356DlYE0RaYmbjXC0rHLrGj0UiDwUDtdlvHx8eJTen1enr++ecTc9JutxPQ\n8sIqKEKvDi+bpB6JxsjW0f1wONTp6WlaLeVGmPF0YOaOmijJXwmwbomsR/xcejLl71EM53q06sAc\nAbhBkT8JiFwkcQw5lw2x/BgvHsdYtdttPXr0SM8++2xiQlaxpW74ynS/DCz4/57Omk6nqQZqNBql\n+3r/cB8cV7wHz8hcjP3ubeBzZ17KWBX01schGu04Xv78H3X8vhcSwQbithEpq2vydFWlUkl7Lrl9\n9GthTyuVSmFvJt/XiPGmj5xhdJAEQKC9nuIAADjzTHs4D1A+GAz06quv6s0330w6X8ZM8kz7+/sp\nJQ6r74XAnirx1D7PwHUbjYZ2d3dTkMozTadTHRwcJFs4HA4TewKj7PpJCkwqrsjxOci9OZ82tdvt\n1D+AJHyu22KCXeyTL8xwpsTLA1bJWpcSl004qDaQ7Sr6KyqxVyk3m820vNTpYwbWV/I4OHFjxiAA\nTkCMzgI4WvfNfvy+7mRQqkajoa2tLb3++uu6deuW+v1+ul803NKZcet0OnrqqafSUjqiDBRDWlJn\nbEGPOFW3WCz04MEDnZycpM1wFouF7t27p1deeSXlKD3qJf+JEd3e3k60oufH3WH4+nX2hwE48R4d\nH7/T09NEsUpKBV6+F4tPaqhPwIkX565TLkrp8PdFwEQq7o0AG8H5q4SUT5wzq0DRKsATgaY7UV/O\nyLnOJjIWJycneumll/Tw4cNzqbYyRwujiLgD53tvY+wrImCCBQASbWw0Gjo6OirQ+R51e99Sl+Wp\nhCjOEHrQMpvNEqiJ876MUYvpZ38mB5/rBiYuEUxI51dv8NsBXOxzfzaPvCWd00NAijN1HuC4TvC9\n38tTOlzfA1Lvf+w97cGpSmfg9/nnn9fdu3cLjtl/JBVekwJrwfJZxhVg1Gq1CiUBrh/cd3t7OwVp\ngJLpdKper1cAcWVsHs93cnKSGJdY08RvbDx6zSZwzI12u53OZ9556hI/DChxP8L96DPf22aVXLnt\n66UiQHGH51GEG9o4URgkf3g6BJoVFgCwUMY2oJwMJk7RC2ERFMyLr/xYR7A+Ed9++2392I/9WFJo\nABGTxCu1O52OKpVK2s6Z1UZ8P5lMVK/XNR6P1ev1CuCD9h4cHKRnRrj3/v6+nn766TQGTq3TNz6x\n3QBHQwwy5jsKcaFyffURE86NuqN1trEm1+lRASm2uNvvumQVY/Jxo98IMhyo+GcOImKUGKN6b5tT\nu9L5FwF6VMw9ABOuV5JSpIkOHh8f69atW+eMbhlw8g3c+O3Ax9sICPD2NxqNwl4U7nRI/zEfvG/L\nAIAHKPRFBEURKHoBL4yi93e0d6tW4bi9uqoSmWGeDefjDGZkFaRioSo/Dgax7+48qesjHYy4j6Bt\n/oONQDexydPpNNXSRUDlLI37FnZFvn37tu7cuVNgDZwF85qmyWSSbC2+hPNgfTqdTgqsmSv1el3d\nble3b9/Ww4cP0xJpQMRisUjpbhdf0eR9DuD2eeNz3/2cAyfPKMzn81SM66wR/ppnp46x3W4XFq64\nT8SGx/a7XLm0DsIDOzXkfzttRCdRiOObvDAIsTgny7ICzcY93Vj5oPG3G2RP/9Tr9UJxKYjfn4ff\nLL2sVqv69re/rdu3b+vll19ONTAMphsBT62gVBhjVu7AZjBZAQNHR0caj8e6d+9eyr060oZmRbiH\n52IdEKKYPE80PB7d+KQfj8eF/Uz8xVqknqD+nA2CeiUv7crtIPMqgJMyWQXGL5Pe4XykzJl6hBnv\nW+YAy66HlLUHZ4tRpWao2+1qPB4n44ue0vann35a+/v72traKgCmMom0fAw+vO/cAWCsvQg49h9t\nj8yaj4kDLP6OgRESGQ9vqwcnDq4cTHsA5e2MIOiqAZQYbTswkc4v+faatchaOePktRNeJ+Q6je45\nW4E4YPe+5b5e4O/to/6j0WikGinpzC7B9jrIwOa88MILun//vg4ODs6x/FzfAwVYBHyXB7L4ljxf\n7inVbDa1u7ur559/XoeHh2l5sLOZ9F2z2Ux+h89cnOHwANzrSdwX4GM4lwDR0zP0ny9p9g00fczo\nex8b/MOVBCceOV3EnjiSc5Tn1J2kROujBCA+SYU8FwrCeTi+sloFH7iYX3ZE7jQ6VB/sgkdSRAcY\nyXa7ndIZ3/72t/VDP/RDkooRWKQ8QbTOklDVTVThWzqPRiPN53MdHR3p9ddfT0oDWHNWhWv6+MS/\nHZzEfCbfuUGu1+upDZ5j9T5lzGjbYDAoGHtH+uRcPcp3pukqpHXKpAxQSE92Pm6I4rER2Di1G89d\n1aYyBiMpQdUAAAAgAElEQVRKHAvGuFKpaDQapT6fz+cJqKCzTmdftE21t8FX7EjLZdHoBzaA4IDi\n9gh+ot5GEH1Rf3hQ4Ncs+wyJKdnoqNwplPUvziPeOz7LuqQsRcX/rt8OJImSfb5zDvOYfndWzuc8\nOuWpg9gfDkZivwFwPGrnOrPZLNUlwmjgNwicSMXQvsFgoCzL9OKLL6b3taGzrj/cx5lt6kHm87n2\n9/fV7XZVrVbT3iWz2Uy9Xk97e3t68cUXdffuXXW73QKTT78yt2GkAQ8O+qL/igDK/Zr7VUBPzExE\nNorrkIp33XYw6j7c5wHB9CpZ+4v/yiI7JKZAYuEZD45BJO/u1LMXQ8WqbCJul4gIIyXujhgDjBFm\nmS+5716vp8FgUIg8fQK1221Np1O98847+vDDD7W7u5vaTsWztHQKTFhf3VKtni1T29nZSVvzHx8f\nK89zPXr0SKenp/rGN76RlNHBCG1xqg0nAOJ1YIiB8NwvCsh5fAfz1W63NRgMCikemB6vIXHnw1h5\nvtdRdqTLYc2uCnNykdO/bHrHDXd0WDGSLxO/x0XO5KJniMbeAwKPMom2+v1+Ai/V6tk+P+TLy+4f\nGSUciUdfnqpxB0kRNE4h2hFsA33HRl2rntX7w+/v93XmA/12QB7lMixIZFw8TepA6KqwKBH8+f+u\nn3yHfYm1fW4nqtVqAgkONKXivksXAcxoE/gd9Y3jvJ5jOBwWbJ37iUqlkuqouOZ8Ptf169eTrUfX\nCL4Wi0XaJsHv529Pr1arqT5jOp2mgPrll19Ws9nU3bt3Cyt1XAf5jX8jpRkDFPwshcduR5xJIUMB\n++gMPudFEBHBhjMiMDDebr837QJQrZK1bsLmNGyM5CJ1KC2LVBeLRVpKykP7Rl7QdSBSp8F8NYsb\nD1dkAIpUpOgioHJkyvlON2NonXImpcNkbLVaGgwGeuONN/TZz362sKKHIk/qSxaLRYpUccSOoKG4\nK5WK7t+/r4cPH+rtt99OBhXlYZK6YsGcOE3vRcAcByUZx8z3KaB+gD5sNpsaDAYFI+4Rtiu2KzXg\nj/YRdaAL/HaEfhXE9Tu26bLARFrSomUOq0w8aoq6LRXBwJPa5OeQOiMf7oC+VqsVGJTZbKatrS0d\nHx+r0+no9PRUH374oW7cuJEcE2CWZ+FllDyDt6fMwWdZpt3dXR0cHBScSmy/Py+6VdZ/Hr0zTyI4\ncFra+xsQ7f3vQUCZeFtjPU0ZEI39sA6JtvgiHYr97qwvgQnBCk5SWq6SJEjyKD8yBj4WHvn7/z5u\nnOPpNlZl+UoZT7VUKmepaGwiQPzk5ESj0Uif//zn9eabb6bnunfvXmJ5YPPzfPmqFBy8B7oA7Ha7\nrS9+8YvK81zD4TAxJqukVqulnWdJvWAn6QNP53hKB7DowTttJlB0hhq/6+Prfe/pe0/NegDt/hHm\nijm58hlXfvMJSYzkfEBQSJ8QGKL5fJ5e0+479oEevXN9MxxJBQcdGRE+j4Y8IvM4MSJFyXf8Pjk5\nUafTObcklxc4feMb30jfZ1mmnZ0d9fv9VGtB5XSv11O32y0t4kUxxuOxPvzwQ925c6ewj4qkQkTo\ntB3KNBgMEmJ3JgLwEAvSJKV0kjuvGDWBzmezWWFVldO49JWPlwMo/5GWS5yvyjJil4sAyqrPOM/1\nCIYrpg1W3bNM3LhfxOqUtQ+DHR2In9/tdhMobbfbaVdh6PTnnnsuFW878OZ6MCtPYiCoc9nd3dWD\nBw9KgUlZH1FM6cxLdKb8dqAgFanwstSaf+cAIwIWF783ztCvWcYCXDXxKNiBoAc9/p33PfOdBQpc\nz5lSaWmnfTxiH7s9iAFmBCnxtzPnvm0C1yWAOjk5KWzQSZp+b28v2avT09NUMEvw6YCYcwFlgB3p\njEF/6aWXCqlRB3VefO7MPas+T09PE+jzVZA8j+vofD5Xt9st2EzGIII0fGmWZYXNHdFZZ7kARowL\n13f77cdh3y7S77W/ldjpprJjyj5zcACLQirFQQcTBcV3JOiV1g4wUEhvG2jPAYBPNraAZw8Vvuc8\ntnBn4rVaLY1Go5RzbLVaevDggb7+9a+nDdB2d3fV6/XUbreTIsxmM127dk1bW1tqt9vq9XqSzsDI\n1tZWymV+85vf1P3799Vut5OyQI26ktN/GODBYJB2t6W9zkr4JkYwI0xGJhrHepU2+xSwp8l4PE5M\nCjseSsV9ZRws+q690pItisDEt4e+KrJKj1exKjGl4MeUGWHpyamiaLQv2+YyQx/vhaFZLBapQHZv\nby/VGvV6veSEfI4DumDDKGr1wm8MPgWK3W5XzWZTH374YWqT0+QxWsaI+1b+0Un587pNkc7vRRJp\naY4p6zsvGr9IcHS0fVVNyrqZE6l8pViZDvtxCEtZ3f4QWbte+1g4MPE+8L7y+7ueRlDiwAldhZ3w\nqJ40PIw2tvH09DTt8orNabVa6UWB7EUlKYEF7CXpTna13t7eTun8mzdv6vOf/7yOjo4KBcHurxys\nxr+r1WoqpPUVb85wRh9G4O7jQ0Eyxx4fH6vRaGh7e7uQPpKWQI7+xwZ4MXwZ0wJj5u27qE5wrW8l\nXmUsLzK2lUol0cV0pufb3LBG+imiT0eUfh6f8T8d65/TFpiAR48epTf7gmop2MMAAVLq9br6/X5a\nCsY1j46OUq7ywYMHevDgQWIQyNEeHBykXRO3t7fT81+/fl2SdOfOHd2/f1/dbje10YEJEz72P0rG\nToP0qae3EGeh/D0PvnqJCnOcTa/X08HBgR49epQUlHvQTq7N2Hhxb3TMXggLEIzvi1i3lIGHyGJE\nI48j9vyxg2K/9mVYkHj8ZQEN4gXftMVXnAAQK5WKHj16lPZtIFgYDoeFVCX39iDCgTBAHIDrugtw\nR38jHc08jlE8+ua1BJ4CKBsjaclq+BzlnAhCaKuzQ2UMTexzZ6ViWkhaOqCLVjV8EhIZ7chWxb5z\nACcp7c+E7cLBe996MMkYx927nen2NFxkzOhDHyv0jF3EnSUhTeLMPLUg6OpoNNLOzo46nU56gSw6\nOZvNUl0TOsdqUFI8BwcHGo/Hab+Rev3sbchHR0eF9jq45n/3QV6r4/0FUPEl0PSXrw7ylBrP6nOT\n3ycnJxoMBtrZ2UnAif5kPBwwck3aHcGVA8wry5wgbpjjJEbhPXJgoOr1unZ3d1Wr1dIyWe9wp/Nw\nbjhfN3w4uGh8+Jx2uLLQPo/W7969q8FgkJwpqSYcNxR2o9FIK3qgC3n3Qa/XS89AAZazP7Az77//\nfkL2/X4/PQ/LbA8ODgrpK/rOWROcuhtxd4ZQmBRsuRIxoWNE7wW0gJc4eVDymHsFxERlZgLE8XGB\nsmd8r7qsmowOWkiHRAcaj7/oeh+1HWWO06Mfj+59LwV0r1ar6fr166kYG0cN5Y3RIjXDZmdOfTOv\n/EVwXgzutDPHe+2VR8HofZkRjMEIDqqsb5yBuagfcTa+Gs1ZFq93KTPaZZ/Hvl+nRHbJ+5r/Oc7t\nt5/PuOLcPOXs58OqYBcYW16p4e2QVKh3iixJDFCr1eUW8JVKJTlfzqemDmACi02qGyDSbDZ1fHys\nnZ2dpMsAFrfZbGM/n8+1s7OTNjID9LRarXQe/YD+kGr3vsc2AERgaTxzEAGHVEwvwmDAeESw7+eR\njoUB9Wu5T3Cd8Pussk+XWVl5JWpOLkrpeNrB0THROIM1Ho8L18IwudP1LXMXi0Vh+2R+s7wJRfbv\nYtQonRklgAHtYldA0jYUUR0eHqpWq6U3THJtEDaK4G/JjHUfTPLBYJCQO9ElEUTsz7KoGwNAH52e\nnqrX66VJwcukKpVKWjkU2Rb6wHO3cZyYaDBGe3t7CaBgtGNeP0YKMfqJUZIX+l5FuYgddFDi4kWZ\nZZFz2TVXOTc/J0bA3hb+xlnHdnlRrBt+xvDw8DBFkNKy4FBSMsLs3oxjYjwx1G64HRDx0kFJhcLa\nOObxfwcyZRE+fRuZJSQypav6lmNX1YysoubLzr+qgm0pAx9Ska3wc/gO0Opg1HXJbbczJw5Q2fnb\nARu2OjpX7ulMMedi+6j/o8aCN8pLyxqn8Xic2Hr8xmg0Urvd1nA4TLuosq/U8fFx0mX07+HDh9rb\n20uF4tPpVDdu3NBkMkkpTKkYEABMYD1gL7AJ7veYi7G2kj7gu7Ixc9uLuC6T3nLgF1nQMvvlvjKC\n1jgPy+RK7hDrx0jFqI3vpOUab5TT9/Hw853aoqCTDkP5nSHxIs1IrcXrMYCgUBSY+1AIyq6trDBC\nydnvBDDjRaJOczabzYTqfe07hpsUEbvIes6btnt9DUoMWzOfz/XUU0+lGhcYJU/vuDEo22/AgUlZ\nZC6dpYF2dnbSpHIWqwykeiQR9aLsvldBLtJr5CL9jxP4spT+RY7T7xsdSAQpzlxR5MffjMd8Pk81\nBLCDRJvHx8fJUPMZG0Wx/NLrBciBE7F2u920AohrA0x4TvonRuw8B6ADXY9sifetBygXjd2T+tf3\nybjseCDObn6ce3+vJQJS16OyzxH/HsaEwMf3hHL76nrI+fRrDMY8gCxzkFyHe5AC5Lvt7W09evSo\nsI0COt7tdpPtARhMJpP0Zm4CLJ4DRoLXamAzAeXxeHSaa8d5Tjt4dtrNrq2NRkOHh4eFNCV9hs57\nutT3j/Hr099ufwEWznhGm+TnRabQWZkyPWLMrhw4cbS2yhCUfe6Dx0N3Op0ETBx4+HVitL+KGfBc\nXYww4yT0CB/jCZvB24aZeKenp5pMJglJ93q9lN6BOoQBAnF7kSnUHceDZllr74WEgCFHql5j48vE\nPNed57meeeaZ1EcODKiDAD37mn3GwiPUaLwAN0TDtJH2eZ5eOq/ADtQ8Gohg9aoAlCcBE5eLnOFH\nqTPw9GdkkWI/rWJMytpBv3e73VQsyPGMKztsbm1taTqdam9vTw8ePNDW1lZyJjgj0oaMv+95w28v\nbGZextVKOKYItjjGI7dovMtSxfTLRWP3JNCJrn8UiYCwTJ7E2HwS4vMxMiPuhJ4UcErLvUwcmOBM\no47CcuCkfZmx1+hEhob7wS47WJCUmGI+w45mWZbS8u6U8zxP9YOj0Uj9fj8Fb7B9HvTyDNSxtNvt\nxLAA9LHx+C/u7WlGZ9NY9gzYIPWOfXYg7rZdUtoXCB11QOW/y/rUt7BnjGKhuAf3PLsDTtcjZ7Iu\nmk9r3SGWvy+KHpFIr/rDoTSkTzyq92ugiO6kWbZFDpJzy6Io/xsUy3UlJeWrVCoFxQe1eoFpp9PR\nwcFBUhKuQ3SaZZlarVZSWgAF92S7eYw3IIt2xpwubfZUjkcs9XpdN2/eVKWyLKD1zY+IgJlI9B+5\nT+7rrJKn43hOJrAj6viaAq7hNKRTvlCUPoGvGnsiXc75X/azy4g7Ywd68bplc6Psnm602JHY91Hw\nczudjsbjcVrl8NRTT2kymaTjOA+WLIIHxg8g6rl2dNyXRbrBdMfouhVZIOYiALmsRiGCPBfXxY8C\nFvy6Zd+tEmc2r4I4o7cqOFh1jLS0Sxzrf2MvYhDoq0wcYPi13A54m3CcXvsmLW13v99P774hsHTW\njWCSth4fH2t3dzcxKg5GHJg6U0ib40v08DOkOZ3BJ/h1W0cKp9vtajKZ6ObNm2lxgfs7MgnOtrhP\ni0xiBJ2eiikLCH1MnIkpAyc+Dv75YrHcr+vKMScurryeM3dlipM6Oi83RnyO4vi5rthScdc+BnQV\nq4OCMtherYxzxjgTITIQAAVSM7Sh3++nzdV4LpSRiNKZCNDybDZLG8yRs5RUoBQpFAW00S9Ohzpw\n6Pf7aWmyrzyAxYGy3traKuSMQfoRPfOdgzNH2/QvYxiBifc/Ch4NkhuIyBZcBYlRedn3Uvlqh3jM\nk8Rzyu4sypzEk67rjpSxAhz4d6RtML5ZliWdGwwGBRaPa0GrMzdOT0/TCgNnyDD06KB/54aUom2u\nT/uYG9S/wDgins7x/lgFTGJfP6nffNwvut5F3zmFv27mRDrPUMYoOR4XdTo6ObcV2DBnyNARt+Ow\nLewT4sWZzoRxnAc12FRJqViVZb3UMXkAiW/wPakcsLhjH41GyQ9EIE/bKY7lM3SQ15BQaOsbEqLz\n6DI+ZzKZ6M6dO2m7CPoWf+DbLPCiWOaAsx0+l7zvHdTgT+IGb5H5cAadcY/gB9DHWFw55kQqN45P\nmoD+MJFG9EIg74gIMDiOgZ/NZim/Tb2FVFQMJhMrZvJ8ufeGdAZ62u12KrCFVfD8M9flPQjSkmnx\niQWLsb29nZbfwWYAAlA8FJxzcBL0I5PXKVQMHudyzvb2dgIqcXI7kyIt30nkII22OzCK6R4mBYDJ\no5I4/vSxT4BoAHnusvqXqyBPmnzIk0BKvM5F142OzKPZeL/LCPrebrdTnRDX9b1tpGVQ4Pn3WKcC\nYOUaRFC+2stX8qCnOHEAPufgvAHcvuSRa/imWFFgAr2fvEgWiYAt9vEqoyxdbNcu+i469XXKKtC8\nirGMnzlQZpylYv0QOuLP6oGkM8ySUv0eQvDlRaWIA2VvB2kZbCvMntsdD5YJPvkcxh37SOEsjp5V\nPezc7bpCyh8gDQDHXvL3cDhMby5mKXS1WtV7772Xns/7jDlLX+ELR6NR2qoCJt99Zhwjntn3BJN0\njvWgn3xbB+ZkZGdYVu3nrZK1bsJWZpQR7+zIoETk5wgwrhpxY4rik3/DuVLkhLFzh+vsBfcmP8kx\nAJ5ut5v+n06nqeDJ834MNNvv53me3lBcqVTS2zEpbEVhQdQs6WLlhLTcCt7TIEwWFNEn3WJxtmaf\n5W/VajWxOBH88Vyj0Sg9U6fTSREAtTNuSJl4DkgiW+KTxkEef3uBWpZlhaVnPrb04WWBwDrko4KU\ni/6O/0dAs4p5ig7uojZFJo/8uBcxSstISSpuWEa0yVyLbAS/iRTRHWfYMHCkDQGiq5yPAxOeHdbu\n4OAgLRctEw9mmN+eNvQ2x7+9n1dJBDNl//t13J7F/ZvWLatYkzKWMDo7ByU+Z3H+XuDKddArB46S\nErssFdM53ja+A4QAJObzs51YDw4OUt+SivegFXDBWJAmRyepccQmHx0daXt7O92j0+no3r17un37\ntqTlTso+T/EFgHgCxHgcz+8rO9l+wrePR2ByAFNem8jzMJ+kJfjzNqzyzz5OPqbMN77jGq7vHvyX\nzWOXK8GcXIZFKUPTHq04nYdT90nhk4Gljy5eKOoTMHa8t9UroXd3d7W/v5/W4g+HQw2HwwQMKpVK\nYkJms1kqlIKNYRBxuIAJIrvF4uz11ZPJpJAf5b0KPmkdXBEx+DMTvbIMjt1m2TAr9g1g7uTkJD2T\nMyBMJp4hKqOnG8qYgXg8EzNuj8x5XCfWr1wF6tslPjPycYFU2XkOJi9ygKuu59fwaAn9yrIs0d4U\nCqKPUjHF6uABnfS0ZbPZTICaSNJrqTqdzjnGArDCNXzpOfl2GEvmK20mMqVoHr33fsRIeqrzo4xN\n1OmyICuOC32MREaG/igbq3VJBCDIRcA5SgwkY82Gz3G3Gc4Ee32a109wnrQEK86IwQB4vQV2BhBN\n6poiWAJGQDOpCIJB7xsYPIpUt7e3dXR0pGvXrhXS+ZJS0Ij+NhqNtGzZ00gEtwSasB71el1f+cpX\nCr7OXwXgfVCtVtOKUPqY4NHbDyBxkOMAxD9jTDwQ8DQSx8Sx8jovr31cJWuvObms+ASWzqNjqfge\nAf7ne1dEvvPqZ2m5ix4GxH/8etXq8i2a7hwZTF8JxL29AHc0GiUFJU/INsgc78W6FGxVq1U9fPgw\nnVOr1Qq7DfqKCPoKpZ9MJundPc7gnJyc6KWXXtKNGze0WCxXUrix98nMe3RwLkx2B4kOXGKajGM8\nokIwJvQV+9JgaLyv+Zxr+9heFYnMHvLnASurQMeqlIOfdxG48b99nKTiC+58fjgFj0Nx1gFwzByD\nIcSQslvmfD5PdSc+rr5TbtRFj6K5J22nLVDisKGRoUA8beTR/GWkrP+ik/Bcvh8bGa0I+K4a6C4D\nSB4suK2M33GuB3vSsnCeMfaaDw8U6UPArL9Kg3v5+EXWfDqdpqCPdna73WSjfFsIGAq/frPZTMzJ\n0dGRut1ugUXMskxbW1uJbcnzs4JRtq5Hr7HnnItuVqvVVEDO/XkdCiuBTk9PC6uE6J+oY8wNB1je\n3+6vmMdS0UZ7n8eUEcEqNoF7SCqAHnQ7gkjaGEFSlLWmdZ4k0ZnxmS9L5TOUyhEgiNdzaxxPkarn\np924cLwbLYwyaJQIbTabpSVdZR3uu6JS40I6qVarpRQOk4Tc4mQySe9zkM62t+cNlr46idoWVzzS\nQJXK2S6I/X4/0aCAB87p9/uFCNwnNhMLNobPcABc0wuLnamJURH3ROgHJhy/OY7zPFqJxtrH/ipI\nZCSepO8XgZVVTvMiQOMGK9K1q9icVeLREzVKDgYd9PiSxvl8nhg+38tHUgF0Y8SZp1mWFXLavtTU\nGQV36g6YYEwAUkTDHkFf5vk/Lrvl4NCDk7LUtB/jjCO/rwooQdxRrZprZdF2DBhdB9FTZ8M4x1N4\neZ6nFAVsHPrAvTy9g+33PVHYdZv3NO3t7Wk8Hms2myXWDp1DCLZYgXNycpKCRWqqnMVGz2u1WtrX\nylND/loHBxBcA+Z6OBymYI1AFtDCPiv4ntgXzvhwXR877ycPNPhfUiEdy+cOQtyX0E8OLl2HncXx\nY580v64cc+LG2HONvrTQaSgGzjvfd9tDKdyQSUpvNN7a2kosCPd3IEM7UEoUj/zlaDTSZDLRw4cP\nU1vYbEdabjmM4Cyazaa2t7dTzcbOzk6qQwGhc42jo6N0HRArUaFTZIAVBx9Q8bQ7bnglLTezI/2E\n0A++PDv++CSICk/7vT+YbL6XBeMYqWyPhnmmVVHRVTPkZRNv1YS8iFlZlapxwy0VV6gxHpHGXgWW\nIgDieg5AB4NBum+lsqzLiECM/+fzedoskPomSYWiPwclTnsTSeJUInihnZ7W4xlhYnguj8ovAxbj\n80Qm6knfl6Vo4jh6P3LN2P84m48DkL5X4kyGVEyroytebO/Pgi4CXklre2TPGNI3/uwU4XPt8Xhc\nGNfI3viSWuwX86VWq6UFBzAdXA/QzDVhPL773e/qww8/TGDjpZdeSky7gwdPYfhnMB29Xk+DwSC9\nc8fZM3SAII2XosKMw6awGg4d8XmIX4S98RoQZ+s8PeO2nLZEW+DpVOw540nw7eO4WCw0Ho+TL2Y8\n8SOM50VyJcCJd4Qr+3g8LtQsRNoe6l9aTgBf5hqjO67Npmg46Nu3b6fB9UIg2sObd8fjsdrtdmFn\nTElpTTsG1NMZPpnzPFe/30+vrH7mmWeS4X/77bfPpVycBfJ184AIPtva2kpLKh2hSkuAxhIz6gAA\nB61WS71eL1GcPgYxveVOg+MYu0jX8n1kNHgmLzhD6HtnsRjDVqulra2tc3Qw4814XgVZ5VAu62i8\nT70g8iLWJEayGC4MEQ47nkv/lZ3rKbSYI+Y6vuwR4+VV/b4HD0YaHZpMJmq324Xoy1NH/rlfP+qc\nz3mYRNrBSjwPUlYBvstI7PdV59Ie/i7r5+iEow0k8Lgqeh2BWaTpoftd7zgPR+3Le+N4eGTO9+5g\n2SyzVqvpvffeS9fzY/mbtDN2L8/zlKpZLJZbsnc6neQHsEfs6IrOHB8f6+7du2m7hfv37+uP//iP\n9dprr6ler6etF2ifB7j1ej0x5dvb26mehQ0LY+EpOsGqzqOjI7Xb7UI9yuHhYSpQBwhhDwGFnoql\nb5k3/rcHmnGOeHG6v7xzFUiNjDhz0Oc03w2HwwQIV8nawcmqaJJNa3BMdAAdSt7R99mIOXBfT01n\nYhhx0MPhUL1eLympOwOuNR6Pk5ITAbLja7VaTecDlDzSY2ICiNrttp555pn0Tp7r16/r2rVrmkwm\n+s53vpNQLytnWKNOesXz5+QiDw8PUx945MizMglQVq4Jc0TKJ8/zlPd0gOMvIkRZMUDObnku08Gg\nVCzQ4v84/u6AmQyMG5PQoxPuzTLuq2TEPyoQcXEn7RGNf+8GpUwwNN4nGPoYqbkzdEeBzsQVI+ga\nbCJtxOiwm+y7776rF198UdevX9f+/n7STSJBzvWi2Pl8rna7XajLQhecKfE5DdChpspTIjgqvwfP\n5IbYDbvLZYCLsyM+Vt6fMU3jUWaZHjgI59UA6xR3XogDAkCD70Lq5zmblWVZYkH4H/DiG08ylgBc\nD1xY+cJxpLG9MJ9xrVQqaZ8plu26ffO+JcVMbcWNGzf01ltv6fbt23rjjTf03HPP6eHDh+r3+3r4\n8KF6vV4BRJ2cnCQQ5Kzlzs6OHj16lEAMthpdRtc5j+fZ3d1N/cfxb775pnZ3dzUYDNTtdlMQQNvp\nS57d7UkEc4xfzD4wPtw7BkfYBNh8AmBshjOoi8UilSLgQ1khe5GdvBLgRDpPZUMVoZDecZ5KIOXi\nUT7OHHCQZVlSQOiuo6Oj9NnBwUFqA/uXgFIlJVTtkSPpHKebmSgOFKTlAKE8eZ7r1q1bunv3rp5+\n+mm9++67eu655/T2228n2hBFopaFNtBPtVpNn/nMZ3R6eqqDg4OU+sG4O4iAYaIynPbwHhNyo3me\nazAYaD4/2z3R33SJcXVQUubcpCUDwuf0mdfxOCDxFRmkthh3vvNxB7iyz8H+/r76/X4hPbdOicVf\nUXj+mMZySjTWPMVjPOLw6BqDzrWjM3FnKZ1n9kif8T15dahmZ1PcKANWtra2dHx8rKefflrb29sa\nDodpGSdOixefsVkVhpPrEyT0er1CatB1340gTs2fqdFopP0hKGCMYxMNLp99FHAZAUYZ4PAl2D5n\nvJ8d4DN+6MBwOLwwwvwkxB0tvyOY4ofniCCS60hnzs8jZ9e76Fh5fk/N+EaRnEtRtYMeBzr4Ed+b\nBL0m6CMAbLVa2tvbS6vLHjx4oPv376clvF/60pf0W7/1W3rxxRcLgB8g7Xat2Wzq61//evIXtVpN\nO7tsnBgAACAASURBVDs7unbtWkGXnfEg6OY53WYSrPtKTEmp/ZGlcxbSg3fsjAeWUnFDRw9IPbD0\nNB1tYMsBSeeuSWDMpnHb29sp6F0lawEnh4eH6Y29TmlJZw9OjhADy+C4USbXhXKhfOTDYDF8tUee\n50nx/B6wGjgG1q+DtGkbqN5Xk3hBZ7PZTO1i0pAWIl+5u7uryWSip556Srdu3VKe5/rGN76hV199\nNTEhGEicsG9RvL29reeee05PPfVU2iL84cOHOjg40DvvvJNYGi9clJQAGn06nU7V6XS0s7OT6l7o\nP69TcSqdAkP6pSzKZMJzz0qlkgxGs9lMfQQAZCKgtFyHQjUHOCcnJ2l1Eud6cTFb+q9bYgSN4Mil\n8++J8sjaDQmfx4nu7BjAZLFYLjnEgMT8sUf50SHyN4YLA+ibSmFAY5tY4k4kR8rxxRdf1B//8R/r\n+eefT5sOOqMYgRjOodfr6eTkRKPRSLdu3Upvo+W5HESg4x7FM0/j/j7+3FzPo2g+9/SE9xvnlElZ\nH0tKqV7GyW2e972L2x2PRNclOzs7CWhGUIJN9b6LKSv6nufyMWDuR+BAWkY606+jo6MEWEl90AYi\nfVgWZ3F9/B1E4VjRl8VikcCyAxnY2Z2dnWSb3n777bQNA/bLyxB4Zkn65je/qdPTU/3ET/yE/t7f\n+3u6detW6qNnn322MPeY217/B0hpNBr60z/907QCk5dy0q+k1bgOwV2/30/z2Df29BU39LkXtLo+\ne/qH7/GdtN0XQdAmlj870Gb7C16HskrWAk4Gg4FOTk4K0bm0ZExgC6SlU3KEiZMEnKCY5Jw5l85j\nkOmkavVsKTDvVvCqbpBirPp3qtKdPk7CC0zpcAykU8kffvihXnjhBd27d09PPfWUsizTF7/4xULa\nhAnJczJxms2mrl27phdffFH37t3TM888oxs3bqT2fPDBBwkY0RaPEpm49OnNmzd148aNtINhr9cr\nMBNeGAZoIZ3FVsxx1ZKnxqjNcYbLAYhTmp6qAhy6k6xWq0kvYIFY6QRrRL+tUzzyLUvXSOdXmrhE\nx8k1/douXksiKekyRgrn65FPZAn8mk6FM6e4j7fHDSnXbrfbevrpp3Xnzh3t7e2pWj1b+s7qCF+m\n7sCHe3o+/s0331Sj0UhzBeDuTtpXAEUn79vnOxPhgM/ZO4yug3f6M/a5j0MZAI2sGHOOKNiP9fnp\nTjWCkcsyOd8rabVaqtfrOj4+PrdZI4GMs9e+ygqnzzi7HvEdNsOvQf/AbDiwdSAdGXW+YwxwwlKx\n9kJa6rUDAvTy6OhI/X5fr7zyij744AM999xzOjk50Y0bN3Tnzh299tprGo1GhTe5+7Pmea7Dw0O9\n++67unnzpn7zN39TjUZDOzs7SQ+w8bQNn0NJAv6KawLO8Ce+pBow4i+dBbQA5lh15N87MHFGPLKo\n+AY+A0iyRQD3935mXBgTFkPwve+iHmUt4GQ4HCZFu379emHpFvtoOI3ra8Sl5YubcIiSEhrDsXGc\nKz3Rvnc+CuBK71G9O1sMPwrBdWLxEeyJ5y4p5vzggw/U7/d1eHiowWCQHOvt27fTLrXxWaWzAe71\nenr55Zc1nU71kz/5k6pUKnrnnXdS5TdKAENBPzJhvU30zXA41OnpaaK9URb2MolAkLTLeDxOb+ck\n5+n7VDDJPDVE7Q59DPihXUQibDaH4/IIObIJTAqe6SpJWR2B051OkyORSXSJDiqCFa4VDUMZMFnV\nVj/ewQ3O2hkEDBXOd3t7W2+88Yb29vaSsWcDtP39/TRHfG5xLwBvv9/XvXv31Gg09Nxzz+n+/ftp\njwneiuz9ik4Q1TGXmQ8+F2OqzOt6Yt+UgZgI5Lw/0EdPffnqOMCX0/dRygCPB2XrFBwPAaTXMuFU\nHRB4wEefABKwCW5b6ZcsyxKIc9tdqVTSHh9E5d6HjD22wPXfN5ZE//J8udmls/bOumVZpsPDQ3U6\nHe3u7qZnPzk50ec+97kUMHmNIcEVYOrevXtpB1l05Pnnn9e3v/1tzefztPs2/s7BBH3sATzXp68J\nADkX2x/HzmsVve98paf7Bc6hPV73A6gA/JOF4NrOurhvl4rbgpQFWy5rASdEuFmWqdfrJVQuLfNm\nvkTYKWyUeDKZJOcLymTSewEP/7M9MMp9eHhYeHcLRs4jQo9ypOWyYlJADDaoVVoCKUfznh7Jskxv\nvPGG+v2+jo+PU3+88847kpTe7grFTd1JvX72cqi9vb20E+3W1pb+6I/+SC+++GIaZIpcSZ94eqTZ\nbOro6KhQIU50QF9KS+p8PB4XwAypJgwudTA4Mq8bQNygwWp5HRB7A/AzHo/V6/VS9EKbvAiLZ/XU\n0kepE/ikJLbJUxBScQlvrFPx1BDXKQM6fh83Ur40153mqjQGOhvb68CbucP/njqp1c5eJb+zs5Mi\nOgD/cDgs1ELhgHz+MEcBQq1WS7u7u3r48GEynnEHWq878P7yfo4MqEfmHj171B6Bg+t0HBPEU20I\nKVDf6BHH6mwWoCfeAwdVdu1PWtxGEpljRxz0uZ65TSA1ApChaDSu2PCUEA6+Xq/r2rVr6V4OSLEt\n3D/+po85F7uCA+Y47DTPStuuX7+u8Xic3meDHt69ezc9J2CEZ6fG6tGjR9rd3dU777yja9eupdTP\n7//+7+uFF17QYnFW18hqS1+4gH7GeU4hOS+ZrVQqhb+jfkYGhGt5RoD54+DOQYikQhDCcxIQYMPd\nfpWlr2P9EZ+tkrWAE2coACpQtDj9GEWQi3bDyOez2UyPHj1KqM/BCdfFiI1Go7SMyZXcgQmgyGlt\nlBCA4imjWENB2gRFc6qc9elHR0c6ODhIwGNnZyetNvDVOkQagK8HDx6o3W7r/fff1+3bt/XjP/7j\nev/991NOdjAYKMuyVOXtyyo9lZJlZ8W/g8EgLYEjvQMAo/LcJwoK6n0DuHQAwSTFMPlvV243OIwr\n+wAgtN0NeJk+rTsvLxXTaDEi98/8WSJ7EqlnruHGir50sOZO39sirY5SympfPLWAcfJnwYFznLMD\nfo0sy9JSSNKCFHsDdKncr1arOjo60tNPP63xeKxGo6G7d++eCy78uZi3MYVGP3hhtqcSor5EnXIw\nEMeJzznfr+MADhDiu9cSnfLcTplHBsb70VMX6xJAgDtx1ynG3qNi9NidEscDcHwlJud7bU6tVtPe\n3l4BTDrA9nbxP/3lNgv7AsPR6/VSG7HTjI8X2mIfSdfgY2Dy+v1+quPwFDXB3s7Ojj772c9qMBjo\n+eefV7VaTanoz3/+8ymI4EdSAvn0G5/DzMCOHx8fazabpTovfAdsPMDcwTf+JAbf2GGvtyGAdH/o\nLAzj5sCFQILxYX6UpSk9iC+TtYATBh7QgEL6qg06F4X0CIKHAmGChH0vD2db6CAKX0kr8T3O1pc6\n+YRxx8jkigW7pKN8MnB9KN1a7WzXQFYKsawMNAy74bsK8pykTv7gD/5AL7/8csrns4x4f38/7emw\nWJwVbXU6nbSMMoItwANMyt7eXlIi9nSRzvZQ2dnZKeRDmeBEyETFHhECcHxnV4CcGxlfukzbvDYF\nJ8bfkd5m/KKDX5d4e5yVkFYXyl4kXkgZNz6L1/I0hTs39Bew4QyUzxXO8+8jEHHn4REqlfcYSKem\nqYHxqJe0j9PxN2/e1Lvvvps2BORzVv0Afn0Vnb9i3qM5jwi9H2BmGCdSiHGJpfdtGauBrAI6ztT4\nMQ5guJ+nbiNIiX+vSzwFE/eNwcZKSzbTwYY7X5/XOEoK5r0ImFUcDnqd8fDI3NORzjL6XPE5OJ/P\nUyCGnjrDg83BrhFwYl99bjx48EC7u7vpGjwLYz+fz/WZz3xG+/v7mk6XL4P9whe+oDzPE8AAiNE/\nvAzQnw2bzbt4YJmcjeRZASj+PePEPRxs4acoMuY85pzbfknn5rKPNWPp9i8GWj4/VslawInvnueG\nxYFJGXXqA056wEEOSND3+kCR3aFyHQAAhXh+X2lJZflqmZjPzLIsGVNpSdsSFUhKSNcpN/ZlQMko\nZKI/eDZPZQEa/uRP/kSdTke3bt1KTMP9+/dTbQsRG+DAl4L69UHGrIrgGTDcgDloWJxFrMZnHKEp\nuf7JyUkyAgAsj064B32M+OR3xF3GZhHZew58neJRfFma6UnAxIEt/eJOCpr6IpqfsfHUgVO1fkx0\nxDggDwrQZ8CGM13OdFYqZzsmV6vVtJLAa46cSYyMB/rNNuLsA1SpVBIAB+TTfoCGR2fohzNN6BI1\nAqy6q1arhSJqgIKPW4zI0X2eqQxExH6WlrUOOFokAs1o5CMYX5f4/MLmOiNEu2PahWfF/knLmkEc\nPfVxbu+d0ZBUGNcY+EnLlEEMSr0WBpBB+3G0jCO+AYaClYGVytmGlzdv3iwEWKPRSN1uN6UrmXOw\n4YzjdDrV7u6uJKXNMg8PD5MP8PS2z1X6LabK8jxPzA/PDcPTaDS0vb2d7uX1MzwrYzifzwt9IC33\nBcLfuT1wEoH/GUfGlWs7uPG6k2q1mlhUGJpVsva3ErtSoFxMBI8u3EjCVETamS3pSWc4clxVVBYN\ns1NdKLFHrtwrFvr48lnAENFfrKEhHzkYDJTnZxXdvV5P0nLrfSYTO2iiYL4Py3vvvZeQ9M2bN5Mj\nkpZFuR6NlTl3BxEYhF6vlybmaDTS/fv3tb29XQAobiAc2HlkSyrMDZFTrCg3zoPr+P4X3tdetEVf\nY9x4tqsi0WHFtNSTGJRI9/s5MZ0jFd9p4oYsRiZlxsANl+sKgrF0YOtACbbOHQIggLdYR8AFa+JR\nlxt01wFfAk/huzMS3NeBHM8VozYMa+w/N7re/3EMo5P2Yz1S538PqHxO+nX529Mg9DMOZN3g22l6\nxt37SyruCM24eMqEMaNPsHXoj5/vTpPxhpGDVYhMatRtricVC5xdfxCCOWfZ8SXT6VR7e3uJvWZM\nFotFes8PO2wDZggQfT76LtaAVH+1gz87eoHNi6k9mBJqFKln4d6wW9hfn9c8H3PV3xbOPKxUKikQ\n8HStM1OrBEDjY4feeLqU41bJWsAJnc2EhGrG6WI8fJJiDCWd25QMFO+Rm6cPnC3BAHA/irToLCYV\n9wdBOoKXllXgEZHSdugx2tjr9dK1BoOBjo+PE8KG/bh586Y6nY4ePnxYOBZAEukzwA8Tl36JhhUA\ngALGaJBCVO/b7e1t7e3t6eDgQPP5PBXHQoU6TY0TAdy5UXDAhxFytgxWx8GJR0keaXp7eX6nES9C\n4Z+UuBFELprIUSJbJJ1/s+0qwOKfScs3iK5ybPRbDAa4Btf2JbBu5AAtrEIBqABG/T1P0PEYdliQ\nXq9XqE3ytIu03IAMBsaZEGeDPKqkfRzvDp95QHvd4HqUGMfRDaz3v38fI0dnr6Rlca6PR9QNT/M4\nUFo3g+JpLwcW0vll3P4bJ+6BCc/o6S4Hv4wNfcr40gb00VkWqViXJRXfVs33UnG3agIb9JR24qBP\nT0+1t7eXtlngOo1GQ7dv39brr7+u/f39VPB6dHSkGzduqFo9qy1xANrpdDQYDBJL7bUd9BUCs+CB\nM0wxesk2DrVaLe1Bc3BwUNjw0H0b+o3v9D5xPeWa3u/O6tCvzD/muzM+HhhH9tv16coxJzjLSL/i\nnGIEIhVrD1xhMWieciCSosNw7K6UODnqPpgYbiS5L2jWQZTTrjAbOHDOZ4DzPE9AZDAY6IMPPijs\nepjnuR49eqR79+7ps5/9rCQlULK9va379++nTa24P2iZ/SN8Z77FYlmvg1NwFoq+caBANIRytVqt\ntEmb061MEKdtuT6GiB8fR0CTT1ZJ54wubfA+lnTu7zIHcVVkVdtWRQn+bBGARKcbrxmdm/c5feuG\nJ7anjKGJ90GnAKZHR0ep4I8NnlgOyXujYCJ4Jwg7DwOo0V/moVR8W3GsL0BnKVb06BtjylxyGpr/\neX6Oc3DtDnMVEPC8vDNXXM/ZFe8/Z8oceDqA9PtFVsyPW6fEqJ05T6rWAa7bRbcdnBtBh7O5UjHy\nduZsNjtb/dRqtXR0dFQAks6GeK1RTEtij2EPSN/Qxvl8+cJKnmkwGBRqMQiMRqORXnvtNb3xxhtJ\n927cuKFWq5U2t/SxJRUEsOd+tVotMYX0dSyYxm85AHAbycaLgBQfg8iWOwHA3OZ/dNnvKS2DEfoQ\n8OH2nftFW+L1pD7O3HuVrAWc+EYukRKlvsNzmihZZFK8M1FAf5WzU74cB9BAIdjVj9xejJZQIpCu\nTzofePLsvkS51Wqp1WppNBoltOr7L6BsTJZ3331X9Xo9FVgdHBwoz8+KpmiT/87zs132vEAQEMVq\nIZ4dytInoeeH3TjPZrNEx+OQMA5EgyihOwJfigzoi5PAxwaaDxTN94A6Z4uYuF6z4LR9BD1XRdAh\nf2Y+p71OPUcmQDq/gsMlRj5+fcakbHUF142fex9iqGOhYLVaTcvP2cyvWq3q3r176vf7Ojo6SjrJ\nPjq9Xi+9UG2xWKQ3rjLGtNXtwNbWVnp7baz5oo+IfD2v7oAOI+0sCc/moA1b43Pe+8aPpY9wys6y\nuPGNQMZtEp9H4FlWa8GcW6fQZrd/6IKDQ0kFQMnYOVsCoAF0RLbUI3ie28fJAUbZnOA8Uv9ScYVX\ns9ksFEKz1B0fwsaBW1tbGg6HevbZZzWdTgsgxdP0165d0/7+frKXnU4nvRIFPwbIcp8Vi2djKpvg\n2hkKntltB8ujfUECfYY98eDE2RrYfx8vjvc+4/oRfNBeBzi0i/s7K+ZgO86XKGtjTtyxz+fzQtW2\nVESKjpy9aphruJOD6pJUMEpc2wfG84uei445tYg2uS9g4OHDh2kSOfU4mUzU7XZ169YtvfHGG2mv\nEkn6zGc+k9iRbrebjPC9e/fS1sS1Wi1tXuXbM/tkd+fjL5Ki3UwI0jYYB/o3rohiZcVsNkvUom+l\n7JMD5XblZFkdq3hwUp4C86JZ71s3Sp739PF0AyktAVkcs3VJdPKuz25M/bhoQCNo4btV0bl0nh5l\nbDnGjRbihsIpeNJ3vlcJ+XY3thRwP3z4UJK0t7en09NTdTqdlDqpVCppTpIfJ5XnYBrwyfPgPHyT\nLEmJCseAx1oOB9/SMmdf1keMh3/nDKC3hz6NEagzI+hxGfsbGZCY3vEINdbo+NxYl7hDon/c4fDs\n/I/d9a0MfBx4Lq8n4hxsvbPP9CkLCLyuzZ2wdH7HX28XAIAl7thdtm9gkUKj0dD+/r4k6dGjR6rX\n6wmQE6xRb+JgzVffUKOHTtRqNe3u7iaGnT4YjUYFfwg75G33ue86689JcI5fYe54Gh4g7zrqdjwC\nbrdBTg5gbxkzacnqe8rG/XScKzzXKlkLOHGnifHDwdCB0hJ9oYBS8eVkXiCJkSK941E4g+ab9XgE\nJamwBS/3ccVgUBlY8ovNZjNVXAOQ+Pv09FT7+/vq9Xp6/vnndXh4qHq9rldffVX3798vOKlr166d\nc2i8kA/Q4I45y7JElbfb7aSATFwHH4eHhwVFZsLwjBgC/uYdNlSTO4JfLBZpySh9xfcYLU/heH7S\niyDdALtyM76wWTESiCk9ZxWugpSxHU5rXnRsBB9lkWFkVFaJ1zfECB7x6IbrAVIYY/TGgTuAFkYE\nVnBrayuttpGW0VG1Wk10tjMisB6ka6Ix9JUKDl4crMb+jfrggYszbG4cI5CINTrOfnkEHEEmx0WQ\nHO/BuT4HYnqHc5wRWqe4vZCWO8A6ve/94cDP0+jOlKJTETjH1IuDE2dfsCWeUpCKK85YUFCr1dLL\nWre3twsBEuc1Gg0dHx+rXq8X3gR9+/Ztvf/++3r06FEBCACw0V1W4jA/RqNRWvlCehMg5PtgbW1t\nJRYSVsmDLfeX0tJ/YbO5Hz6xWq0WXtsCWxkzD9gH37CR8cOHon8UIMcgC10ASPKeOXwRUsYyogOr\nZG37nPiD4vCgkF25XBkciNAhoFhp2bFujJwa9E3c3MgNh8M0CHQqiukGg1VC0lmn7uzsFKqc+Z3n\nedrpFGaFHQ7v3r2rra0tLRaLtMxXUlJaUkHsporx95c8eRTi+wGs6ueYSqDGhueNNCAgBYDlG+/4\ne3C8z1E6JqOn7ubzeWJAIgXuQJRaBCYxoIu2eWqOcUFHrgpzQt+5I+SZ6cMYNayKJDwalIqbfEXx\nz9xISyqMb2QSYrqC6JT5546BOUg+3PPTpBiHw6GGw2GqLYFBAWiiP6Q7+S0pGVfvv/F4rH6/n95h\ngpPziG1VSoZ2OTChH/jfdTCCOAdlZaxIHDdnbvyzOM5x/CJb48/gAGyd4u8gw+ETwDAenl5jLB18\n8JuUr1TcFM/HCGE+sacN4JAxdb30FJCDKXSM4+PeSp1OJ62aIbj88MMPtbu7q62tLX3ta1/TrVu3\n0rmw4pJSMMg29Owe3u1203j7bsjUB2IXqtWqhsNh6jMWaNB+T/14qs9tOyDX57mkBOzcVjtjg067\njvoSXx8Tgvsy4M1LCambgc1nVV7Ue59zF8lawInvM+Bpm0ajUaie9sLLsvfZkKP2yCvStC44CC+U\nhZr2NeG+PTMKxKQAuaPcPMvJyUnaVZUUFe8LoYBrPB7rxo0b6e2tktJeEN1uN+Xnh8NhMug7Ozs6\nPj5OeVFpiWpB+L7k0tNhtElaGnAmOIjb2QsUDwTvyl6v19XpdAoOwDcwos/5ATgywbiH9yPiY0VE\nTr97FMwKEHcEPsGugrgRZkLzf5zYZcyJS3SAq6Jov04EM4ALqbgMNBoymDf0JdKwgHbmXHzeVqul\nDz74IL1SHjDqG3b5VvPT6TSlf3ye43iYn/V6PRXPkmZicyrmIYYvpgf4G/EAIj6jU/8OTOh3ZwEc\nlHlfenTv4xL7mZo0HIuDHwf+Mehap9C/9A1stKf+vL0cTx9iE7Ar9JWkBGCRCMqjA/UFAdjCyWRy\nLk1HX1IDRSBHaob9R3hJJMCh1WolJvvOnTt69tlnNZ/PdXh4mBgVxo3/YZjRDYI66Txopm0wLfg8\nNs2MfYg9Rkcig0UfMReYK/Szv0gWYOh22hkVabnSxus8AdBxPJ3d8nHDtwBY6Qc+KwPpUdYCTqDZ\nJKVcHyCFKB2D6KkWAIhU3OwKB+YdhtGMBt1pas7tdrvKsixVOns1P9dhMvlyL2n5ojxPGfkSNopT\nJWl7ezvlOgEH0OC+pT4reaSzHVo93eXGAWPPM/veJExgULojXGdImDhcH6ViddBkMkm5WIq04hj6\nWABkHGUTRftEig6AZ4IxidG8O3oU2yNZANdVkRgtSuep+yhlKZwYua86DvE+8VQEuhxpWfTMUznR\n2fr3jJFveLhYLNKS806nk/LdUnEHVc5lHCeTSdqBk/s5a0nE65/hAHwjQHQJ48lz+1x3wwlL4cDN\nwa6ncbxvPUjiHHeiDubKxgZg7sytAypnspxxuwriAQK1dLQTe1nGbkvFeitsss8NZ6h8IUQMeqRi\nioi0s9sQrjObzdLeH6xo9IAzyzJ1u109ePBAe3t7qlQq6R06bg9feeWVtIs2tXgAHXS92WwmsEKK\npl6vF5hfdM7rIt1e8a4hxNlhDyI9+HQd53t8J+3xNwAzv7wfuY6ngRijWMTq7XW2DIbTx9uZc/df\nzNPoh8tkbe/WibQpoABE7lQW7ABRB+I5LxQZWhjUCPJkwJzKRcG9AIlIgJUmGHTPlznNCqhibxBW\n6zDwi8WiMNC0FWfNJGYwKVyl2Orhw4fpGWBjuHae5+lzvx4/kbFwSjwiWJTIlYX7zefz1E84E4/W\nPYqEzkVR6TeiXQdGPqG4BnsDuPPwqAFGiWdkEse6gXUJho+2EaWUAQOpCABo/yomxK/v/3Ms14hR\nKDpW5ugwth7N+zWYCzgg3/PEi/Yo4KZNkWL3dgJoJKW5yT1dt5zFZM44W+JGjzw+OuL9A1PjdW2S\nClF8ZEK8HoR2O5DzovR4jKfDfIz82Jju4V5loOgqiI8Pv9FlZ00iw+ERt4N0T62hz85QY68BzFyH\nH+wS4IAxZgxarZYGg0EK7tBdauV4v40Hj9h1d8osB+Y3vsB1GBsPoOR1Hw64uJ7Pf+pQXNccIPtq\nJvc9jIEHKwjLo6lpwUc4+0eb+d83yvQNNj3odFvk4A0ggg13sOl2niADPbiMfq91h1iPpjwv5XlE\nR7t0nDtTouroYOkofz03n3M9nL60fK00gMVTFgy4p3VgGRaLRTKwvmLA8+DVajVRbTgBR6m+Ex/f\nocikijAAfn8iOc7BaDD4kcZG+RAHiLSZ+9Bfk8kk9QfGn70saI9PHNrRbDYLbxyFvvT35nik6Xl8\n3/DLz3fnTQTtOdSrIPSDRxXO8pQdy3HovQMN71eu5UbGgbK0HGscLN+5YXO2yYtmnVlwY0X0NZ/P\nNRgMUlQGeJWUisK97RhmB2c+T7y93g/+bMxXH3//m2LY0WhUqC1xI++6Tduk4kZRzsgSLETG1QOT\nuKLHGVVPzfhY8T8GOqaEfJykYsooXueTllj7h02i/7yo3leHEPU7uyGp8L4lAlNsrgNP9MVX7bju\nuG/w8SY9E+0S7aDGER3FefIc6CO6AJvr15OKWxugq7GepowlcJCBfnuQQiDpab+YHo6AnTQ/fQNo\ngBViVZBvN+/Pw/3cV8KEO1sjLQNKxsrnegw0fK56DSHtXKlzH19dP754wV2WZYnmipMRxZSWyL0s\nQqZjSItADeK8iJDcOLkT8S2Hm81mSrG4gfFIwRkRZx74n8EkDeWokgGMlcuR+WEQXRH9f8Rpc56R\n6Nap1LhduAM5Z1K4FudT4EU/kB/mfUKeanOnkGXFF1kxfl5T5BEWz04/k7LCAMUlpYwN11238Ubc\nkESn5SmasvZG50xfer846EXnoqDzzB1pmTLhezc+buydHWCZZdQbfmq1mvr9fgGUuPPl/1V95IDa\nv3M99M/cGdIGdBDw7xGbp3hoB/dCP7FF3g5/YWUE3/7b03T8HR1QZFcisKKdPiel4ovVvN3raEGS\npgAAIABJREFUEuYfOoWjY+7hFLFxMaXIc7vN8fPdPsLUuf1H1+NCBwIplu3CqMR30jhw4l7MHfaK\n4jgKZrvdbqon9OJenyN5nqe0oxfs4icc8MegDNvmPs1ZE0mpXoQ5SDtInTqL5Wwec6GMnfZaEvqF\na/OsnONMErruQNPtugP7arV6Ljhl7Dz4vojtXtsmbBhQImqMDBMZA+FpgIi+fVAADCBmIj6AB6DF\nIylyhp4CYrAoZGXwcdbSkipHwZm4IFSWXcEuOP3pyuD35TiUAQVjsnqE6cyPVKTOPZJ2cFWpVAoT\nFmTNMzu4AszQZnKqCCkj+seNsOeJiaAcvHj6KU5OnsmL2zyCANjE/iPHexUAikfgGBFfpRP7wh2c\ngxY3ZO6YHKwg8RjG1j/HaDImGChPM3r6h4jJ2+eGEPBf1k7GmPb6/PXjI0j36MxXDXCver2eVo24\nrrdarcTuuX57ZB3bGZkVdC8CyAgm/VoYbx8bd8gRUHgwJC1XEjEHGLtVY79OcWYPXUJfcHxeS8PY\n8DzuyLxQlN9uw6NDo3+4L6llUjXMCZhexJkIB03YUk89S8tdrLkvb8L2FSfoFHtKAQDKarK8VlIq\nroZ0fVsF5F2/ou9zJsn/pw95fxvPCHhykIjQj/ge/57r8lzYA/wm53JdHyufzx50xA1IV8naCmLd\nueR5nhz6YrFIqNEpTwwSkmVZWmeP0vmD+rIwruP0oOfGJKVrcV+Mna8SKGMayiIbb6+DBaedI2vi\nLIykhEZjJbz3gVNmOATahIPneXgmz5W6o/eoBkq7Uqmk1RL9fr/w9s08z1OKhvMYPxQYw4XS4yic\nuYpLAgeDQRpfn/yc646ZtkoqGPh1igM1npHCT09XuQNy9sJZCeYGn3sfl9GhTqdKq2sjPJqi3zyF\nKp1/10aZvsc2u+PiHjgPJBpH12HqB4h8eU5nZnxHWHSGueoAnWvHokwXn2+AaS+gj88YgYrn8f0Z\nGSPvQ9oQmRhnsSLLEHVlnRJTVWyoh91y5xMdkqTCSkJ0xAE0feHpIbf5zt5Jy8JQCmIJUNyeuZ9B\nfBWig3P+Z+yxvXxPigib5n4gjqvrBXMqgk/XG19aTTvoczZDlIqr75xB4rquv6RiB4PBOcaDWjHv\na+y2tFzN5Asg0EkCU56XdlDX4sGur4p1RpB+ATCukrUtJfbohY4hx+1gwzsFhfV8l0d50nJDGAwc\ngpIy4DG/V6vVNBgMCq95j4Yalod2OdvAPaRiLUe8B4ZVWg44QMKRsCNyd9DOyHBPJgvHMbH8PlzX\nt12ODs8/Y4L5tsVUmvNMTh/6TpCMQzQuDv5wJEQlzgShA05h8uxMTO9n2uxGaF0SGQSPNBy4SOVv\nUeUa8Vox4nc63A2Ui4N/9IiIDgDh0Zm329uBOEvAvQFA7jSkJSiBAfHAwJ895viZNwASoiyKILk3\nx0Clk250IOXsm7fN54Pbm1g0uEqnIliIgQnX8HP9uh69u464M4y6sm7g7eOEbQag+J5Q7vwdHAOc\neU7pvL3kPogHMP45fzsTjaDPnlrxrd1h5Pk/6hU/vgmhrxQ8OTnR9vZ2wT5LZ2kuavF8rLmH2zi3\nafV6Xd1uV4PBIPlFr3dy4O9MC3rt98MudDqdtGqIOj8YqzhPOY/+5B7uv7DVkgrZDQelPA/j6syS\nB7xcz9N1q2Qt4ITB9j0TcD7tdjtF6OwqGY2Eb5kund811iMPp3opPHUWxB0dvwEwoMR6vZ4oMe5N\nu9iYDeOGOHCJkZgrlztekCfRo2+6xoTjuVFEEDxGnEI1p1g5hr5zytqNdyw8zPOzzeQePnyoTqeT\nHAHH0g6MPffxqJjP3KA46IvgSzqLWlBu0mZs1OW0phv46EjXJZHWjnSrR/YxdSAVNyh0NjBOYhya\nj188zh1bo9FI+yq4XnhaLTpWxo/reg0Q18Wh8wzM6bgB0//X3rsst5UkWbsLAEmRBAHwJqWUWdWV\n1tWTfv8n6KfoQVn34M+bUuIFd1IkAfwDnC/2t0PMLLNj5xQ0QJjJKInA3nHxcF++3MPDFSedP2SA\nZS/aawrz1O/3SxzbwGiz2bRqqyRt4MdaeK5gpkxPO/ZfP8PsksHFH7ErfrcdjtqLtJPh9ah13q6B\nNwYeNhp9C8NVMyteU+bfOjtpwLMBODqM37umTdIYa2TIR2UpZlY7XDyXd3gPAhD4LH848eIj8bDZ\nDlkQ0mB+fHmlHVL67nckKaXsOfbc7XZbzqVrkliOXhunGWhOoM5ms9Lvbre5kbjeJ3X4iDVljMwX\nJIDzfBySS1JspfNvWCPbSDM/r7WdaHQvMoM3q1CfisEzB3ERTsAAsxHIjUAQ2TAUtzGFZMPJBJ2d\nnZX8l36/Xyoh2vMyg2H2wErKeSwgYRKr8PD4ztPTU6mySAljPkfNEo4L2xOmP4yXOeBGTeaUeaDP\nDw8PBRB5bGYzrGxfXl5KP3w6iWqIZk34bg06WHMSz9wvmBfmAaXHJgNUoqyRAZR4zYLtutVMRu35\nWhnz7yRfzXud52HPmTnCGwGovDZ+5Nx1CFCeNCs++vAa4PNaJm2AlDT33hj0o+A8J8iKn2slxak2\nPn9yclL2AUmAlrsk5Qh+rXx5fu1ho18Mcl+rgHl8fNyioL0W9kLt3Lj58wbodlK8tn6WDem3AL7R\nWYBCQjM4R97vSZPDAYiBMcDJS/KV0TXQdTi4Ds8kzUlHDOlisSg6CSDFu10MkD2BXMDq8Zk6x8S2\nijASjjQ2BEMNMOD72AuD76enpzw8PBRZTtp6AXC22Wwyn89LH2zMnVeIDPH7N2/elL1hvXx0dJTR\naFTGAJAycDR7yPqxzn/E0hpw+N9mtW0b2aP/TK53VueEY08gK9e3cF0SAAAG6+npKdPpNOv1ulQs\nZaPUNSUwZISLzs7OSpYzjAqTzh+U1dHRUTmJwIQiyPZoLXQII5vYpdwpve1iQNQyeXl5KQAFRoLv\nwcrwHkJOvIt5dFIXmw+BhP1J0nqH45ZG1QgUz5xOpzk/P28J2ePjY2azWZ6enjIcDnN+fp71et1K\nnvXcYGS73W7rWB7CbwOCEUC54MXYCyGc5Qx5NvUuG2trg1MbLCsuFL2ZBeTc+TU1yIGVIV7Nv2tF\nmDSxdeYfZez+ITNWenVYhL/zDgMiG56kfW8PFZOdkGgDTR/t/dkTwxDQDLapMkuf2Nf2DP13jwnZ\nr6lz5srsJ+NzSJVx+99mof6o1QCIPtWsgvfOrpmTJEVP2dAAdCkmSbVVnEFARb/fLwwo+x02D6BS\ns77r9bqUnTfbAEuctMOIhDCS9rUFNfBFv6NLkS2D6i9fvpRrRpbLZa6uropM8hnkhPpbNfOHTXO9\nKOTW4aSk7dSwp7vdbgHHrgdk1sT7BjmCnTw5Ocl8Pi+OLxXHT05OMplMcnNz05qzpH3akJAnDBU6\nhvA+TiLfc80YrrBIGt3FvDCPh4eHLSepbjsDJ574ZCsYGNnxeJzNZpPLy8si3NQxAG1x3Hc0GhXg\nYqDB0baHh4c8Pj6Wd/GThQbM2Iv58uVLKXnsDYbi4nsg7ZeXpvy9lQiZ4wcHB+XoHUqOI7oGFe6P\n6WcMCieGzs7OWhRqTf8yBhQlmdgYDJ6JoNTMkNfEXiVzBNC7u7vLr7/+mul0mm53W3wIBcM4UCIG\nWoAK1gsFhDJxnxxSY3xsRjaRQeyu2x9R+vzbhgZAggwhn8yhPXkrINbQ+Sz1SYQ6xGAlyl4x2OTz\ngBTLup9Jf/27mtIF9PtZHrdpaOaFOXD+FM9Ejvm+HRFocVdqNjjmfYy1VubIE+927ke32xydd3jC\nxpN1dI6D2SSzZx6rw2ZmpAA2NnQY7F02+mWnxmDu5OSk6EHmnRwHHLGknXsDSLCjuNlsL5CELbPx\ns2fPHwz+yclJzs/P8+nTp9Yx26Rx2JIU55c5RzfiGCFbHCN+fn4upzp7vV4rfGjGwCyecy+QRd/F\ngzx6DyDvPAP9bfmAYa4TcutIgUMosNMAlZOTk1xdXSVJZrNZqVtkB9IOfpLC0iKP3l/IA7YQW8X8\nAF49X0RLPFevtZ1whWx+aHl7cSic8XhcBskC4SENh8MMh8PWEd7VqrlLBmXJxXs8D8UEkkya0uos\nMKeGxuNxJpNJoQWTtJAifcWzB+myeCjPpIkr1hvcFFfSgAH6b5qTRGDTlXjK9rIAPwgtxeWMuu2h\nooTdN97NZ0H7SQozcnx8nLdv36bb7eb29jY3Nzet9XXIis2MgDKPfKauycL6cImc69DYUHhc9jx2\n2ezt160GkihaU7t16IJmNgQ5drGnOixDcwgCI85nbPBQTO67jSR9wuO0MeY7PplVZ+GbvXN+AuNE\nOdrD5DPIH33BmD08PJS9S9+d7O3QsD3vmvVM2iEpzykOk/NsalYEQGSWqw4duBmk1YCtDg/VRmtX\nDX2RNKG6pM2wOVmefY18T6fT3N3dFbYFAzUYDFqsno3ja/KNgfZckWuC00Qfa5as291eVUKpCYdf\ner1eOQxh9psTm85XxBjzbOtwO5zot6QNPM0O8sz6WgSveT0OO5BmL+28UwWXiwTZU58/f87t7W26\n3W2pfYAZc227UTvE2Ft0sKMF7NE3b95kMBgUAGXAn3wdOv0zhnFngcw6KZJNDyqHQkxSyqZbiK+v\nr3N+ft6iwyeTSSsfBG+Iz2AUvdEx+izq8fFxCTsQwwQUWPGv19v7BObzeau0PIvFcSwotjphlf/D\nIDmkwR/6zhxxN06tBJlPPsdcoezt7dGshO1N0hBmfgIUHh4eSp8uLi7y17/+Nf1+P9PpNLPZLN1u\nt1C3vAcgAohbLBaZz+ctRsreODHT1WpVnlsjeoOUbwWYJG1vvQYMhAbxLpxMZm8cpW5g6rwaDJYr\nnFo50uqN79CQmZmaQUNZ0+ekDRAwBniT3W63dd2D9xNj94kL9oUNM0oXAFMfc6bPprMJlRq48HuP\ngfnzvvfnaGYjn56ecnp6WpISTbVbZ/Fun2pjvgzI6+e7ua9189h22XxhqHNBki04oKYUexjG2vkF\nDpWdnp7m6uqqsBKsPaULADfowqRhnWqQjy6mUQeFPplhYAzIO8zYbDYrdznhdD48POTi4qKAT/ar\nQxHdbrfkvbhMPEwQDig2xHuXk0113pMdSjMr7MWkCakh8wYz1jvuN3N/f3+fp6enDAaDXF9fp9PZ\n5nWxh60XsFcHBwctsJM0ABBnEr3f6XQK6ERecLitr+wcvNZ2FtbxZgd0MLmj0agMCEOOUJHwZPT4\n/Pyc33//PUnyl7/8pQgjSt7xTzMHbAh7qfwffSIhyRUQnaTFIq7X67LR7DX5hMFkMslqtc1AZzNZ\nuSKwPMuCwTw4PJOkZexq0IJyt7CZ+UFIeL49EtOJvOf5+bnUSkHw/vrXv+bx8TH39/f56aefihJa\nLBaFnnW+CRubSwl5tt+TbC88PDo6yu3tbbkIsd/vFwWDwtq1R1m31zxrxs66oBBZC5SUq7rWBjZp\n19XguXg4VtoYdCt1+gZFmzQlyfnsa7kkSTs0MZ/Pi3d5cHDQOiHDM90Ps4T0l/+jnw41GiyjHLlo\njZMh9M2gFgVt1oI5qvd8zSYxDu+Hbreb2WyWwWBQQryLxSL9fr8YIxwI1sWAlHf/Eagw2GI9DF7M\nJDhMtMtWM3cGl7PZrMiFQ8BJw1Cx9uSokMxeh/3MRtghxQ54jxAaShoGi/fBwjHXAN+kAcuup4Ms\nwcqNRqOiE2FPfEKF77BnDIScFIr8O1xq+4EsJU3Yh/6anWD+HRZhHSzzjCFpcnHMIn769CmHh4cZ\njUa5vLwsfSbBl0rrOBrL5bLMk2WA9R0MBuWCWIAn+8x2nLlCJjzu19pOwAkCtlqtChVYK1KEyqEJ\nFIizt1erVUHonHCBirWyQchNpTlWzqQjYBYSUDXvBlUT22SSqVdC4hdCMhwOC1PAM8zusMFchZZn\ngtRr78tGi88yHtOKjq0naWWkW9hqmpaGEAKA5vN568TN8fFxfvzxxyTbGOZvv/1WhPPp6SlXV1dl\n7lgLCgsx90bi3FA9GAwKQMMLYS7qPiavF9naRbNhdm6Bc4HwxGmAUtbfe8EsB+vld7mQFYaMdXdy\nLH0hodEx8qShq53Ma08naY5v813H6tmL7FPT2O43cg9I4nkoXCrAoszYh0lal8CZ2fFeZfxmEpN2\nPlXNsKArzMowXupXAJABSxzzr/UHCYZepzpU5PwiJxE7pOPvfwtyzdpyoZ4Ba7fbLbkmBlroHdab\nBEiMf5KiQ2t2CF1qjxsmPGlYSEINOIVJU5CMPpo5MHimn91uN6PRqOQNPjw8lMv7aNPptLAB6Fdk\nDPkzqERODLjq/WkHgn4jxzWg9sknjDzA2HmQvMtrQRgLFufNmzf59ddf8/LykvPz8/T7/cJU4SCZ\nmWL8rjDr8Azrt1gs8ubNm8I+Yb9xYLBjzJEdhdfaTuA4tzGCtGoKuNfbZjo7KxvlhXCs1+tyKR1J\nPu/fv0+SknvCZUcusgMrYsYBAwLNt9lsyoLybgAQzInZGIQNYZzNZkX4QJyM8+joqJzdrz1eK05A\nEoKIYDAn3vw1GjVt6dAAdKLDIg7dAFIQchtH/r1arXJ/f19O6qzX29ye//iP/8j19XW+fPlSlBdM\nF54mG2g0GuXs7CzHx8cl1MNczmazkjeEkYNFcSjQuSrIyLcQ2sHjAVBZGRlwJmltUN/fYUVDjk7y\ntdFiTu1d83dTxQ578T4rfZqVrZWjARaghH2C0UE2AVo29PboanbPgAJgYuVlT5HfMZeAAffdgAej\nRJ/dH36SH2adYG8Vuv7s7KzsJxLsWSMrWIdoaWZJvL/QSzUL6BDft8CYJE3VUMoBMJ66fAMyzFyy\n/y2PvV4v9/f3+eWXX3J7e5vk6zouHP81m5akZS+QQ/QXIWxkG5aGfpMAagCF/uUE6GazKZdYbjbb\n2inoc4fhsQ3ezzgd3tPoXN5lwG9GPPn6vieHzniX9zfyyNzwfOsX7AN2z3P422+/5fPnz8WJAUSc\nnp629kXNShoE+bb5WrckjY5CHmx32PN/1HZ2K3Gvty2ty7XWGHbH1RgwE85mcAiDCbq+vs7T01MW\ni0VeXl5ydnZWkPZ6vS7I3hQpiogFgNZKUsI4fM4erD176i8YEYI+UWCgd55PbNO0lgEAfQDcoJyd\nL2Mq0nkbPMv0m2lNCzBKmz+vGXcAnecBpeDEsOFwmL///e/5+eefi5fJBqX5JAZCbqrdiL1mHmoP\nOGkbU/q362aAmTS5B2xanyZB0bB2zrFgLK5uiaeE3LmxT6wETT/72TXbQvPvDRhNufNsji9D+fp7\nzIPZSwClT9kAYpAlxgFwZ+/WHiJePPLCPrOSRkH6Ejnvj6R9/xF99r6xgl0ulzk7OyuyDQh7zcu0\no4EMADj4WXuRXgMzJX8UFtpFA5TZaQOU1Dk26BvkdrPZFF3A3HAP2nA4LMma7O/Dw8PixLK2lll0\nGBVfsQvIDwYedp71n06nJXcCGYLNRM8AqqbTabEjgAYzNYzV7DaOJb/DtrA3vObsK8t2p7OtVYVM\nERKCkQNsmY1L2owke5F9jk0F1HBSdL1e5+PHj6VvsOlmZer5d4jGa4F981w6fIxMOILxz8D3zi7+\n464RkBMUEAYbwQHJUonVnv/5+Xnm83kR5sPDw3z8+LHcJ3BxcVEW/e7uLsvlssU6bDabkuhFVna/\n3y9on4kjsxtFCJq+vr7O5eVlxuNxQcmENJ6fn7NYLAoLZAqPvBkLq8M7pp0RRv6OssJztZCYQTGI\nMVXufBo2kGPHfJ9/0z/Ago3F4+NjBoNBQc8kLt/d3eX29rZlBJ0M6vABf1B01L6ZzWYFwXvu2Mhc\n9mZU/i0ocp9KcTjNRr6m8W2QDAIMUpJm7NPptMwH62pDyk+vVR3br8MRKG+MDUqfz/sIZNKEWBzf\nBiQgVwaoVm44DYAnn/7BwCFrHovfjw6pCxvCaKJw/R2eZcXOvJOAiJya9qcfPJM1BmjWeVwAsz8K\nbWH88JgNUh2O8nN33WysYH/pL/k4nHY5ONheBYL8JU1CJ04b84gskHBpRpTvO9Thk1rIFM+wswYL\n3uv1slgscn5+3rIjyC3POzw8zN3dXQaDQZKUPCeO3yaNkTUjUQNZO1wG96xvzQwyN94fSQPuGRtH\nfgmHmblzw04AqqbTaZJtjh85exTQJCxGBOPi4uKrkPNr4X7GzN4ghaHuk3WAdRnj/LN8k2RH4MQo\nCiOXNPUCfNwLAANASJp4I4ILc0DZc46hgrwBLlZ4AAiy8k29omAQWhQ2i4nC5cw5qBMljEANBoNy\nXw+KHNSKQPM+/i9p0LJzD2xoWPQ6b8ZGxkLCGHq95g4SmCGAg5u/v9lsslwuy7Hqi4uLcpppPp+X\nSrpJE/46OTkpNCog0JuW9TJK513O3jc74FCDWR7mraZAd9WIN9f5BEnjsZs9cX4Km5c1wUtjnQeD\nQSaTSYslqVkyyzh5LA6JAUCsaGgYGrNryAo5FuRz4SFyooU9wx5gXIAOG3z+Tt9gUSyzjMWKnn4x\nt4Q+GQP9JAm7VviM3z+9r/z/zCmspT1IanA4NEvzfPq9rC3zbwDH/zssVyfJ7rqx/0gypa6JQ1Do\nQyeNouvt3OCAouc+f/5c1skhQcLGzA3OG4ADPeJwh+cRY0y4x3uE3BJYlcVikYuLiwJeuJuGd7KH\nALGMNWnCLWb/AJ8+Zo48GvAawPLv15ws7jECTDEOmllB9g1sBn3APmIDYKeYm8ViUcZEfx2OYy7Y\nuwbiDpPa+Wftea5DYHX4s247ASdQegio2RAmhUVYLBa5u7trHY+lmbbiRMxf/vKXzGazIrg25mdn\nZ0XhsCBfvnzJ/f19rq+vW/kB/LG3RCG1Xq+X4XDYyv6GmnQoaT6f5+LiIvf39y3602yACwQxNwYY\nzIU3OayOF9YCwb95n71e2BqEq45b0oekEXjmC+YKAXcs+fDwMOPxuLzb9QscxkGh1YwPc9/tNsdS\nHS7DiHiDwxpgqGp6fReNsJ6TW+u59Xw79GJwXDfWHY/VANeeNw1gnzRg3sCFviBTDkEkaa3dw8ND\nYbRMzQJGkpTwDvvWILMGy8gVwAS2EoDCs63QfZzTuSkYBZwNF8xij9XhHBpj8RFYx9XZqzWb4vmo\nPWav8WvsVG2IWHuPhfd5Lr6FxtyzF50/Y10MC83eRQ5xPliTfr+fN2/e5O7uLt3u9jRep9Mpzo/3\nNMm0yOBgMMh4PC65SN4/nrNer1ee63oonU6nJPc7ZI9jQCI0uo5+Pz4+luex3sxF0oTlLeNmdy0n\nyBQ63uCJ7zgM62P27HlaDebpB7reoK/f7xcbhO5lvs2UJ+2aNowvafJjzFbxfdbEibrYUsbBuF9j\nf2g7AydsyvPz88xms21n/h+FQx4DKOzp6amEakCA/D+5JLPZLF++fMnFxUW+++67LJfLkg9CApNp\n1IeHh5yenibJV7FzI0F7M0lKYixH4VxA7OzsrNRaIQnr8fExFxcXmc1mRcFyAyWVVZP2aSEzIRjp\n+hSAE14t1LVSR+klTbKmvdTX8lVq5gR0nKQ1h0nKunCjJgq20+m0CuwBEO1tIKSMkXeDtE3D4o0A\nDm1YvBm/hcbcODZPA1glbQBgBozPGbihpNgbSVNDxrQp84BnV+cTmZ3hGZYbhyCs4E1jm85mz6Cw\nGRN0OM4A64aCQjly4Rmy4P7ZmAP4kLV63JYHvFyHaWg1UDGDC/sKMDF97wYoYxyeMzsXnlOPC8Nj\nx4N9w7o4n8PrtatG6BYHwd55khJi8/wyPwBr1pFTWRwl3mw2JTRuBhsmPWkcWnQn76yNtMOi6Abe\nYzYNRw0QAjB9enoqpyt5hpOuT05OCpCuWbhku6dns1mRZxwtwpc03pc0Cf7IIQ4I3wM4sb8sP+6D\ndTfAjn6iP1gDPzdp8gkBZ3Z+vC/tEPlyVpxXO0/YGu8Jvksf/6ztBJxwkgaQYu9ovW5yJJxklbRR\nHII5m80KNTeZTPL4+JgffvihKE0mwFQYhjPZnhzivaakvdjEsvv9flFYRvYoQICTz9vD/mC8fUTO\ntUxQ+owZxYZx5x3O43CyZdIkRdlj9700SQMueJa98JomtJJgY+E1Esqxt2RmA6FHCeC5G/igMLy+\n9NEljzmeSpjEdCNzhwx8C835A/aW+Il8edz2zB3qcZVNA1XnkNgw2itPGian9ubdHLrAwzF4xcOv\nk7JZF8CuT5+5qqaPA9dKCnYiaZJtzRog34At64zXGBEDhKQNAGn8nvmmD+wDlD/j90++X/+s38v/\nA0CSpqAdY6XvHu9rOUe1Ad5FY10BZi8vL2XfY/yRac+Hx4ze22w2Bdz1er1SqZQwO7rRjpbnxbf3\nmvVjLkmQpaJ4XZsHPYXugxkBMKHjFotF3r17V5zXJCVUUoe3GDtOnNkQ+s0e8lzUDsrBwUHJVcTA\nW4/w3VqneA+gg3kG6QWLxaIFprFdyKLnBl0LQeC+Mv8AE+wnzigy0O/3c3p6mtlsVsZe5yH+Geje\nCThxLJ4wAZckkafAImP0+ZwZFehD0DnCfXd3l+FwmG63W042+MiwjQKby4wFz+d5bCaOvjoHBZqT\nCT87OysABAFAufN8WBAnlNWgiGfWeRQWJH7PRnVRMyvLfr+fpJ1RXytihNsCbzBB8hoesalBknYB\nFPQRlgkQgkA6fOexf/nypYCMugLily9f8unTp3KHBgarXstdN9butTAGgAJjY48EEMZPK/I6ORLW\nIGnnRtQGuDZqBgj8G8XjMtt1SMI5JrWxZB1IhERpYzgMovCskZvValVAO8rZ/UShO/GWd3N6ok64\ndKsBcf2TseHIeJ55Xx0qqMGHGaV6TnkO/XutiBjfOz09LTF/+vCacd5VW61WxUAfHm4Lc3HJH+tL\nf2E4yUfiz3q9LqcpMWrIAU6ic/KSRh8RjkAmYLpYD55F+fokBTh0u93y3KSdRA7YIW9fRaVLAAAg\nAElEQVSQ/LzlcpnRaJTxeFyqqCIjTvB2CI69ioPq0EfNQvAcGiGjGihTm8ThSebF85O0i2qyD52Y\nzZxjTxeLRetZ7DM32wLARLfbzbt370rkgfIPdtyxT+S8jMfjVt4g4O3P5HpnR4nPzs5aniBC5QJH\nSVpn1KG8EKLXBsfmnk6nJanWCgVjTJ4KaDlpZ03jSSJkhBUQZJ9QsbFIGkWTpLVQ/JuNxLMcrzV4\nwsCwqGxIxgFAQ/AQQgTE5/WTtMIsDl/VSNyNTUU2NmACkIbBOjs7a3kKrxno2gMCzMzn89zc3GQy\nmWQwGOTt27ctg87zHh4eSoEk5+2wKf8ZTfivaA63Je1L7vhjEMXcWnGYQUIRO+RphgSP1qybjWzN\nxKFAauCAkfV+sjGn8WzABuDUIIKGl83zUVTsQUAoa8j7DX4I57AHTPX7OC4nBRwOShovk797XIzH\nx5qTJhfEAMXryZ6rGZmkCeUBfNgjNEJTPKvX65UrHTBQvpeL/u66kU9wcHBQQALXSiyXy+JUsF6M\ngzXnfjGAHLoCxvrk5KTlqNTsFH9H58HYmWE22EM3oBORdwMgwCb6czgctsrmv7y8lCTZzWZTLv3j\nvRhskkSZH4fqXGTOsuhkaN7P9+i7c7aQFduHmi1BHs24UweGvz89PWU0GrWAjAEeesOOMk65w0AA\nabNXziVinpOUFAjGTB5M8ucJ3zsBJ46bY8Sh1pgE8kkQCo6o4i0xSCdFMll4ecQ5kzbNjQK3EFkx\nW0GS/5A09xmQ6c1zCDEx4ShS2BZT9PaMTHMlTUVKKqryPhuTpO0Vm9qn7yhwV7O0ENoztsKm1cqQ\nTbxerwtaPjw8zHA4zP39fUuRAIxQIqwV/w8bRe4AoKfe+KwL8dvhcJizs7NMp9NMp9PWJWM2LN9C\nAwywLovFohwnN/NQU6pWXjXbhtz5CK5ZIzMSKGP/vVb2KD/+rzaYKGj3mXchmz6NQHM9B4wV/49M\n1Bf2MR7mjv1cy6wT562MGYt/x1j+SJnbg7ODwd6xPDl2bwXtUBzjqZU788T3PM/M22QyycXFRTl+\naxaH8e9avjudTs7Ozop+4WfNusIckDD/5s2bjEajVj0SA0N0tMEE82ZGPEmxB+iNuoYNewT5BDgg\nY4PBoAAn9hZsFiALMMAxdIe9AQ7IHseikWf0nJ0JH2YwE8begnVk7pK07APzw/o7NFSz3F4HLkxl\nTxwfH+fTp09J2kd50VNm4n1K6e3btxkMBlkulxmPxyWPM2lCV+PxuLV29PXu7q7koBjcM78121u3\nnYETJv3+/r5sRAwRwrbZbMrFb6enp3nz5k0mk0nu7u5yfn5ekB4CzHc5qcAkAkT4N6EgNk2v1yuX\nVxEeccEhTiEYyd7c3JQNdH5+XsJI1DCBWeHZ9ma5KM//h2KDxcE7Sxoa38JnWh2aESBkqtGVZtlU\n9kBgqSzkyddeJnkgLlPN/5kNYnPSJ451Pj4+5vb2tgjy27dvy2VTbM6Tk5OyzlwkhZLp9Xr529/+\nVvqLEdxsNuWc/beQc+LcD+hTQhdJ+yivE1aTJg+oHoeVSb1OBqcoUmQV8OB+8dMXU9ahHFPWNuh4\nW8gOBgejAIPhEJEZNN6LwjJrw15ARjE8VuTuZw2meQZgAaXLe0191+Fdym47ZICD4bmjOaeI9aQP\nPqX12ndZAzNQh4dNHQvH4+vQ0i6bnbj379+Xu7Mw2knDGpKXARtoet9z4bW1E2l2CueP8hDsjeVy\nWULJrl6L48g6I5voeeoy3d3dZbFY5Pn5OcPhsPTJIcPz8/PC/CWN00HIBFk/OTkptsvsGzowaVeD\nTtosEM+mhgngjHlHLvj8a7JQs4PsMdh5bNvj42PG43GRW54L2HQOEbZrsVhkOp2WPKHRaFTqoWDv\nCPlTvoO8xF9//bXcVJyk5F9anv6o7QScIKyr1So3Nzfpdrs5Pz8vCh1kbsqbDcDv7L3Y4wE5M+F1\nvJaS9tfX160kURSnwyYk26LEQbnHx8e5uLgoAIWaKp50noERJ/sbpQ5qB4w4Ucgeq4+lMi7mg7Gh\naAFfvMcGkX8zn9CX9ljd/G824cHBQekPioey3gChmnrlrpz1el1qopydnRUwN5/PS80IQCDgjrAA\nd16QYEWpacZX5wLssmHYam/BFK7zY+rEb8BeHa7EINZ5Nc5dQalDqWOUUU42AE5oNFOALFnRE66p\n84ZgBBwqAlgk7evRWdc6Dp40OWhmkQyqHeJ1bROvOc830LYhpKEQ6Xev1yvhT37PT8uX2Zo6P8X7\n0IUSAZP+nJ/N3GO07HHXa/zaWP6VDSPPvjQD4BMayEeSwhQDHgBvBpgwF/P5PMnXYUpygi4vL1ul\nzm3MeRdyi6dPTuL9/X1eXl6KUeb76H/LIbrLOSDr9Trn5+eZTCY5OzvLbDbLcDgsa8iFgDC/fr7X\nzkCf+at1ugEL+525Muv/mmFnHybJp0+fMp/PMx6Pc3p62tpbdujdJ+wbtogaXbbLyCpy7VweGBvW\nF/v7/PycyWRSdAPOLev1R21ntxI7CzhpCrBxXw4NdIey5OdyucxgMCiIFKV+c3OT6+vrEsNk07AR\nENLJZJKrq6skKcKVpCjjTmdbVpubhLldeLPZJmadn58naWJvCDZ9ZtOS2Y7QJimnf0DfPDdplABC\nTaKRKV7ewf85Ju4cEQtIHf5h0zmmbg8zaViTk5OT/Pjjj3n79m0BWgA2U4q8s/YSki0ABMD5lBDv\nPzzcFgTC4LEZh8NhS6H0er2iGJKUo4UHBwdFwe2ysVY0jyVpjKOTHvl9/T0bYf6Nwsdw8516zS0v\ni8Wi1B4w6wTIR+E5QY93+hZeg8A63o2TgOJhPC5G533quXGYludbQfNdxkr/6VM9B47R1wygmUEc\nEHujzhlJ2vfiuLFXTNvzf95nNRPpUBZ70kDRoSj6+Boo/Vc3OyWcisTweK87/OCj/4wZ1gigcnh4\nWMAEpwrRATwf58S5HehNGMKkSYq9uLhohd4xxBwdRkbQRewj9iFsg/MLyauhpD17AVlz3aikufaj\nBhgwOOjL2sHk/8zA8lzn17mZNUEf9/v9UqV7Op2W+Tw6OipgBZl1/hhrRk0r+oCsYutgdxi3c34I\nY9MX5qcuKgkI/KO2E3CCB9Xr9TIajcqxMdcvcUweBWZPI2kqbcJCHB0d5fPnzzk6Osrl5WURIMeq\nnc3NJK9WqyJ8gJ+Dg6ac8nK5bCVrsRimwDabTQEiSXN8kskHiXNMmveafkSgjYpZTN5pDwpa0zQ6\nY+Vn7SlaaQIMkvYJiZrKx+gz1qurq5Zw1rFDMt95LkJNOI21A4SRaMuJD9aYNfDmY62TFIbgNW9z\nl405NPCiOXSDgVqtVq0Mf5RyHYNOXk/w9O+Yz5omJ7wENetEPj4LeHec36EgK0uHUuhTt9s+HQeA\n4CfA254zMlon5nrMZiMwILA3KEf2t/vn/tKs7DebTTlCyvxYlmsQVQMUgIXBC+/lHQ7LmOVinvmO\nAY0NkB2PXTYcxm63W8LUm82m0Po+8p40Ce+Mg3HDMFhnDofD9Pv9Uu8KUJA0BySo5sq6dzqdXF5e\nZj6fF1Z3vW5KqZOT4svreC4sM/NqJoI9RO4fn0U+ABDURzGw4BkOTfkdzAtHm9nvZoutk1erVetE\nGvPL+F4D26wFNsBgnbHDgNA3l9BAXwEeASs+pZo0trMGFzj3SftaBuaYxGjWxvu8bjsBJyjPzWZb\n6Y/YFpQhVI9DMi58wySRF4KBBzCAtEHhpnvX63VGo9FXSUqcxBkMBsUzXa1Wuby8LKd6iGWywCBv\nb0gEpKZq6R/lglerpgAVVBuK0YKO0sZgma5mk+IFmxUxI4JCJ9kURYPRchjBSpgxUAbazUesMS5O\nDgRU8Lmjo6OSfwKFyXhZb7wXQJ3Xj2fbwJFF7hyXXTcbWPeZubQXjGJjHn0S5bWNS3gGZez3Je3q\nrrXn1ev1Sv0Gx7MpxW6PJmnAtRkTP5P3YGQZ7+3tbZG15OuifnXo0r+v6WszL1bGBtR4wyhe7xe/\nL0nrPcx7XajL4MTPq2n4PwI9zImpfINVAzOU92vNc1yzartozq+pveCkAcGeb7Nm1imsF59D/gwa\n0c8OH74G2M7OzsrhidVqVUo5rNfrcvO5nT3XvUoaxp7cEQNlmEROpKDLeQb949kGA8iwwcp6vS5F\nQQHFZop5LzqZOUiaECHvrfdCDY7m83nRyci8WXk+x5idgM34GBd2ot4z2FucSdgXOxyev8fHxwJs\n0S91+NptZzknLPZwOCxIdL1ujgQ6wZFFgFFhoyTNwBH04XDYin+aXvMCARaMcslwJmzDZ8mkH4/H\nX93vw79Z1E6nk9FoVBK4eL69McANFF/SZj2S5iQP40aY7KXCSDB/VhA29DBTSb4CBt48tXGnL8Ph\nsMSGARJQtKPRqCStcmytNjQ+6cDY2AAcC2eu+Q5KCc+FsZMke3R0VPJWxuNxWc9dt3qz4TGY+uYz\nKHKzG/y/4+32tpmbOgznsJH7gPzzbIP81WpVvLg696WmlpN2PgBrTEPuXA7czgXf5Xm1MXKioOfH\n/ajZBhRiv98vSa3Mr2lzGu/h/YASktNfAzTMtefQHmG9Dih6xoF3CWBxWOw1JsbhTjzvb4EVZN2f\nn5/LHrSjyO9ms1krL4X9yzOQc7632WyK08c8OefOyf0YWwzlYrHIcDjM1dVVFotFbm5u0ul0cnp6\nmru7u8KIo9ORJQNjwCZhIPaGk7DtdL3mCBtoGczbOUTu2MPj8bgAK/aiTx9Np9NSS4p38Szm0vuV\n/sG2UHUZ1sesFnaHP8ik5ZXPG+h77wKccLQs28yRc2g4PYrtYy7+zKHc2cV/LN6bN29Khm/STkQj\nDOCbi5O0QizE0lEc0IYog1pYHGbxM8ni/vnnn8sdMTAjxEY5XeLnI9CEpFg8qsk6y7nf75fjgsRv\nTW/buJpm5N9JO3YNdQk9DeJO0jJ0zBkAEBDhvBPmx4aITTUajVoxcjaTGQ4E7zU6ns8CtuxtORRB\ngSYAqr01GxVAIYDQRnHXDRlzSMBMSR0CsPKjsT/MvvB55sThEcB3kq++hwdIkUPytQCZBjVJE9ox\n64NiJAaNguH5Tua2gYUxg2qu995rXh+KzeOm3DlAjX4hi1D+ptiZR4Mt5sXgy9dWeD7quamBxJ+F\nW9A7jMf5RV5L//Tf2We851sAJ4Qe0L3j8bgwqhg79rqPoaOn7dXjYPIdEklxVJLmxIoveSSPjzWm\n+FeSXFxclBwTnNwkBZiQm2ZjDONqJ9WeP418F4d8krRyMpJ2CfgkX4EfO9s4dIASf+f5+bnYRDOQ\nSVqVs5Ft5BkZ73a7xS5RHZb+sS5mwWwja/adubEdOTra3kLtCwRZH+bODjDpDKwF9rpOB6jbTsAJ\nAmyqExCC4TFlhgCAIFFuNYXr0zYsPr/j0j8W3HQehpOjw9PpNOPxuBxXNuJz38mbAM1zmgTlykZM\nGi/w5OSk1HbBsENDsmgIitkNhM+gxSEmFCDjZ1PDLPm6ANN1r8XYaZwmQkDn83lBwvTFng1eS705\nraSZK5QSf7+8vCxIH7kAePBObqd2PhBskzf4LlsdzjGFn6QFXJB9mj0Pb1pkgVBikuKN4inVe8Js\nQ6ezzaKnUJY/WxtYclFsUJFnswcvL+1TSabtMSpWUJ4fsyrev8yTQYGZFL5XG2z64T7YSEC3I6M1\nyHsN/DI/Bkx/tNZ2evh/K3uvR31U3PNs5oY59X7dZXP/0XVmm+inwz0YUoeO0ck2dj/++GMxlA41\n/PTTTyXxlpLxOC/dbnNBKO8HxJJcb0YLucGIUr12PB6X/CiHaJIGePtE283NTS4uLpK0QauZgtp2\nAdjrOZhMJjk/Py/MkcEFwJ659zvMRNfRhfF4nN9//711whPHF0fJDiI6wpEK6w3mmjk8PT0tegTm\ng/3oongAReacsJmZliStfVO3nYETBmGEy6QATpwYSWIqyZZ1YhAK5B//+MdXl/o9Pj7mw4cPef/+\nfasPGEMUoMEMGyVJOZ3iWD7Ik8v9MNy+J8JGGg80aW7KTFIQLuOndbvdcsmUq+kh+PSFXA6O1bHJ\nHPtkU5iWs6JOvmZqkq3S//vf/15OPzFvGBCE0LVZGCPALWkE0HONR09hH7MhSTuUR1iKTeZ7imC1\nmOdvqaGYrCBrqtifsTJN2qEQvE3WGS/NNDdejJkp3luDHcCwlaa9Hj7HOry8vLSO8dJ3/0Qhwzii\nCD02xod8Grj57/5JBdEaCKAjrDw99zyz9kx5BrUf2FMwsZbvOizk/eJ1qoEx64W8m9mxh8q7DHTM\nTmIYd80KAkrJA3OIgpoX9JMxHhxsa5RQRgH9hRO5Xm9DzhRgZE8TipnNZi3g6DvXHMKGOTs7Oyt6\nGO8eveokcbOHw+GwMCE0h1iQF3IDGevBwUE5lcK+TlJkyswZdgU54d0AMj6TNBcs1o5kt9tthcuS\ntPQenzs5OSmXKGKD+v1+kSPWxc4ln7P+sGNbyzeRAW55RmZtp6w77GTyrDoZ/7W2E3BitAQaxrA/\nPDxkPp+X41oGDPP5PI+Pj3n79m0ZGEg62RrmH374IYvFooCBwWCQTqfJA0EBEA7qdrvluPB6vU2W\nXa/Xubu7y9nZWU5PT3N7e9tCkWwO+nV6eloWfzabtSrgEgayUibL2wwLWel4rTYgKEkWlfnjWFjS\nJDBipMjLQDBcPhhWwsJUhwoMvi4vL4uXwp0aCJtPQpGgyhzB1lCQablctvJKSF4jNmwDCUKvlbir\n59JflIiPa++qOW6Ol+PTVIAJnzCycTQgMUPAnJi9SBrji+K3geP3BkJJkx+FrDhJLkmLGbM3Zwqb\nZ7vZi/b72ItmA01z41wYxHk+/e71et1yHPwMK/LaY3YYhd/55EfSJFfWSe4GOjQDTFP3ngvCReQP\nvRYGslfMO9z/Ogl+V409TCVvDh2sVquyJ22k0S8YW4f6ABaU7f+v//qv9Hq9vH//Pr1erzDLPv4O\nUEu28sVRVY7K9nrbAmvD4fArJgJ2mTmFSZ5Op0madWXdyXHju4AR1pe9c3Z2Vu5xe3x8LGwNehEA\nbCeAED9A2KEfxsb+pK/0n9AaMu1wC2P7+eef0+l0ynsHg0GxQWZbfDQc4GcdZGBkub+/v8/Z2Vlx\nsK2DDfJ8mitJOZHJ8znl+WdH5Hd2KzEThGGjABf0fdJ4fklasUcm2YmuKIC//e1vpZLs77//XhgP\n19ZImrAP7wdVQglOJpOStFQfhURw8BDxEBA6jFKS4lkAUAA1pjVrYIDQ2ICgtGAPMEpJY4AYj4ur\n2Ztljgxc2CQYPDb0mzdvcnV1lfPz8yJUT09PmU6nBX2jbOy9stEYvw0fY+X/Dw621XWn02nr4igQ\nPIALQ8lmfHp6+up69Tr5cVeNeSfsQWlrU6woBDMmNdPlcB7KfLFYZLPZFG8PltGJmskf5y7QJydo\nWmFgWFy7woCmNsgoYmSBMZldYE/5HT6uD9BwX2t2ws/lXfz0HjYLQeO7BoJJWvkJyCw6Ablz43Nu\nrwG2+v/sELwmy2ZpkubUi9fyNVCzi4ZM+oQJMu4+moWlKBe/96kPchoosOacBDN2yJBDEg4bdLvd\nwmKQW9fpdArbi/OHc4t8oEMoJIksole95hhS5Isxc5CCPlAtFtZ7s9km8dvZJMfG+XXMpfWfnRUa\njik2yGtze3ubxWJRWJ7T09McHBwU/cqdSMy/dRIy6WRkQCE2zPkqjN25OmaMWGv2LPuL39e5LK+1\nnR1vYNCc8mAxT09PW7dZMmlJWrfr0jBKeIDL5bIYBMI2KD8WwwlWKF0r8Tdv3pRLoJjg1WpVEL2Z\nBaNYgBV1UQ4PDwvFRr//iMLFM4a6dOU9NrfpUnsFVmyAILM7q9WqVXYeJQmar9E3LMh3332X0WhU\ngM90Oi2nZJKtgrcX7zFCx9M/0/HUlXDVUmKksAC+0RiF7qRO5pHnM5+7bsiaDSfr7Lo9fK6m+R32\nqY0860fhMIy7jWzNNBkQ06c6edugo+4nzYra/alDSIyJfwPeUbaUG2eN6xAX7+D7Bi6uaUJugR0U\nU8n2+gz2aDaA6A47DXWrGRqzWu67f+K9Mp9ea/4YjLJe3s8e2y6b8yVms1lWq+3Fosxdt9ttXfDG\n/sUDR48DHLx/X15eyr09m832BBVJnITdAAacgHT46Pl5W6jt5uamgI2kCaPaLsDkuAQFoIFnJe3a\nM96PrI3B8dNTc5ke80G4BFnBJgHOYJWYW8JOyL0rPHe73VKDq9vttu6Xo+G83N/fJ9kCr6urq6Kj\n+/1+2TOuP4N8Ol0A+waAcp6cw2r0FZsDeATIuTI1wM9lOlxA77W2E3AC+Dg8PCxHiaGW8NrJ41iv\nt/FCMsMRLhaNRcSTpJAN5YsREt+DwOZJmnPuKABO3rx7964V/+T39MPC+fLyUrLBWVhOSJCTwsaz\np5Y08c2aumRzgzCNNvmML9FC2aGA7R2b7cFDmE6nRch8OiPZbszT09NcX18XtI1SIDGN00sodYMQ\n+uNcCCdS4snwXl8Uxtx6w9BH1itpjlrbc67DDLtoVmTOF3C+hQ2ywZyNEXNqehlGDuAOa3VyctIC\nCkk7J8JABGNtTxYlPBqNMpvNinzacLsvGCpCfVaivLs23Ky7nQGeC3BlXKbg8a6TNqBAwdpwm4F0\niInwCuMh7Ov+MBeMz3H518Ju/DTA8HgNQgwybYj8O3/PToWdn102mIJut1sMrQtKIluu8oqeRW9Z\nPpOmiCT3p71586YAHwzxwcFBbm5uiuPK2tr7h/0jN5DwMmEHvk+1V9YZHYKuAiyQs8j77ehhqAEz\nvjMIefa6scbOQ0Q3ADRcfI1cF/IpkW3kkXoutVF/enrK3d1dut3tCbR3797l+fk58/m8HB5g39dO\nMe92agDrSmidEgSU9OckEPaSvjNfgEGckJOTk6KvsJlOaH6t7azOCZML9cZEoLgQMiaVBfHGTtpX\nT+NZDYfDVmweqmu5XObs7Czv3r3LfD4vTAiKh4mlL1CEm80mt7e3mc/n+f7770tsdDweF8ADs0Ll\nu9VqVSoeAlBYYECH47NsEDYB3wEEobQstD51YaXMhqUZRAFOjG4BQqzD8fFx3r9/3zp5tF6vCzCz\nd4wn7FAW68Ia1YXaSGp23PXu7i7L5TL39/eF8YI94r4MU7vr9bqEAgE/NdW5iwZbBMuRpKXMknYd\nDRtSGziavWg2N4ZqvV4XeXI5d472JQ1ITZq7adg/yBZrOh6Pyxoa7AAWHfo0M9Dtdlsgy8weDS+K\nz6IY+cm7MHQ0lKi9M8+NqWaHKGFaANBeH3vdGFEfdTaAM71tI2yGw142fbYz4jli3uh/7bHyOdqf\nnWb4Vzb0CuOHZWBvr9fbPD30X9IGacwfYQzGvtlsT4CgS5Ptnri4uChl7ZOtDl8ul+XaEDPi1kNJ\nAzL7/X7+7d/+LUdHR/nll19aFaXn83lxdjH42CLnObKm6BkYAJhxHGISSPmugb3HnzRhS+fQWK7Z\nE3VtE8CRGRl+9/PPP5fcSWp0Mac1W8uJPOfFuW/oGv+byIZPqCKzXNaKHDA3R0dHmUwmhVAgvAR4\nNYPzWtv53TpJAzAwvMk2hENGcJ3tmzRHHK3goP2Ojo4yGo1asa9ff/01z8/POT8/z/Hxcd69e5f7\n+/vWZU6wE/Ymid2t19sblP/7v/876/U6//7v/96qLeIy2NyiybhMVTMWUD4hGJA1G6Tb7ZakYFfk\nIyZqpgCjD6gDgNCsZBEue8EWzMPDw1xeXubDhw+ty99IjCUkxvwAMhiDDTNKiA3OXDFOxgEljPH1\n0eqnp6eMx+Ny/xDvOzk5yWg0SpIWw7XrVlPwKBEbHMBp0lDPNuT+vsMyAAZ74tx9Azvn8AnPNaC0\nMUcOVqvt3VHj8bjE7k1Rw2aaAncYyt4+wMTG3UDHgCppco/s1RkcOB5vb9RHVpFF5M5MlNk8vs/p\nDowLfXBoir7WjIXBsdcIT5zfYVgMZvAw+T3yzv5xmNlMlIHPrprXHXBiIJikhFfq8RPCQH48fozn\n6elp5vN5AffT6bSE1mFAyO04OjrK7e1trq6uCoNjxmS5XObx8TE//fRTye/gRBA5RT4skDS62Sww\nHr9P6cCe83eSVNGTPAfnEjtl5xE7Q+4chp1TUMg/z/HJNqqi26j71KLBl0O1Blnsm263W5z92rFA\n/ngm/aI/dSLrarUq4PDk5CQnJyd5+/ZtyXt5eXnJu3fvis53GsUftZ2AE5BajebcMFj21j15TDYL\nmjR3ukAnUhr/+vo6s9ks0+m0CD6K03TzbDbLaDQqz4AJOTo6yvX1dY6PjzMej/Pysr38yuf4bZh5\nJgbItPrLy0vrfpMkJXHK4SEWmL7gpaKoURCcQGKxXQ+AMYL0if3xXgML+nl0dJSLi4t0Op3iUbA5\n8TApwYzw41WTQ0DfADHMyWq1at1jwsZwAi+XeZkd6vV6mUwmJeRgj4E+7Vp50wwA7H3UoAWAYhYF\nIFJ/1h55kpZ8PTw8FOXtuUZWFotFicED3pAfU72Pj4+lEjIhSYAFMW76YWXm/rOPzNr4plbWDuNk\nJsXxfa8lex85RGc41EQ/7N3jjda5Oy8vLxkMBgVA0U/mhWd5/g1yDJbYdw77OBxjZqXum/OmzKok\nbQaNPrs/u2ibTVOdmfkw8EvSMjoAZYwuupKQu4GBQ3sYwfl8nl9++aXldBGe4B1c8GdWGtaZI/c/\n//xzer1ezs/PW7VPyLuyDYJZRk+ZuQC4cFIJmYYp+vLlS7nFl+fU689ewy6ROEufHBZN2se3yWc0\ns8F8/vbbb5nNZkUPECoCCJCrSWQC547nEqojnMX+5fsOowH+WU/6ip62PcFusK99ei35up5T3XYC\nTqDqoeXxlOv4K4thpYbnDrVFAi3AgEUDZYJQR6NRzs/Pi0HnOCyeJyEFjudi1EiSfa8AACAASURB\nVDHCnKO/vr4ux2lZNIBM0tBgIE5QMoiesSUpRoES2syDj6oRWnmtzgMLPZ/Py2YhMzxJy0ggHIvF\n4quYpY3BcDjMxcVFnp+fc3d3Vyg6AACK5fn5uVScRUEAMnq9Xuv+IjxUxue4a5ISx3S4wmCU9bTB\nR1mgEM3Q7LLVRq/Oi0iaUAVjxMB7szuUSaufiwH48uVLOZaNPNb5ESgW1hKamoZB4H0oe6+J85gY\nh40Q8sizeYZzXKzgWNt6Hth7AHhCePTb4NR5AM4jqBvvYe5qw8gYUZzOQ2OsdijsMAGWeK4VukNA\nrLG9U5prsfjk2bdSw4f5Zw/CZDIvjJN9jyyQP8J8wUJzZUCSMnfkhKD7zs7O8ssvv5TwCyG6+Xye\n4XCYXq9XShCQIE4jwRZWdTab5ezsrADDpMlhYg0JL9nBShqn4fDwMPP5vMWmsD/QQThLXn+zpDBI\nZsmT9u3kyJOPGicNs22W5enpKWdnZ+Xfw+GwsHE4tOxZ1sqX35IzQ8KtIwWbzaY4F94HXnMDMOtg\ngKYLxmGP0GN/BkySHYETFs+5A0mjuFhkKyGEg88lW6GeTqflCBkxZqO82WyWT58+5f/8n/+T//zP\n/8xgMEiSkg9CbQMWltMp0It4p5PJpCw8TASTjWBMp9NCuVGrxUfKfLKChSPEg2GHhWGRYTEQ6IeH\nh9bpn6enp5JgnLTvbUnaxdAAATaEzHfSCNSvv/5avO46zwBFwXq4P7PZrBgLvGLWimdYaQFubOz4\nPJsAT9OeAs9DhpyAtutm78fy62ampO6z859gLZApswBWhm/evMnnz58zHA5byXvkohggeV4NCGBH\nYK1sfGuPB8DPPgB80D9T9zBEZh2QsxrkGKQl7Vwd53y4/6aok6bsNrLGu0xV86euX0FfcDDMCnmO\n6IvL3vPd5Ouj0PZ2a2+a+bIxNMjmd/9Mkf//3egreRW14U22sn93d9eqb9LpdHJ3d9cKpbhx9BZn\nEq/83/7t39Lr9fLbb7+1gB0s7ZcvX/L7778XJxOGhdMp19fXSVJYE8CzD19QhI2TjMlWtn17uoEF\nzAL5LbAesNtcxcI1HEkK++95JFyEPq1BOo4tgIk9hq5w+MdMUK/Xy3Q6LeUwcNwdmfARbPQLDbBj\n9tDF61wegs8zJphMA6mkyQO1LcDu2TF4re0EnKAg8Q4ZGIaTDiM8PpWB8OC9Y3CdK8LGMJvAGfea\nOgedkqOC1+eMafcNj8FKmLL3bA4MAhSfE7WShskgPou3S3P45/HxsSBdh1VYWAwA36efbBrQLz/N\nrPAulDaeMmvCpgKkEM5hrDBfjIHicgYkfgdUqzeikycxnklz4gPa0bkWPNPPr3+/q4airL0uy7Vz\nJJg/fm9Dz3zAeNAMLJyMZ2aBuXXuBobVzbWGkGmHjQA5vIe+Wrm436zVH8mYDTTNjAb/9nN4lgEp\nSX027H424+ffda6OmTr2OR4nc+4QE2EAJ63TT+bN4JG+oEu87vzdfTYw5xlej12HdZhj5/kxX2Y4\nWS/rNeaNeTg7O8tgMGhd4wHgob158ya///57qXhKCIIkWfY8a4jsM5fUHDFgZ47RmQ6zoO8BMk9P\nTyWPizUYDAYF2Jgt9EEKZIm9gt5lDgEe/BtnDp3qvetogNlAy5vvzQH4mZlyWQCHgp1AzN6DyQE8\nmJUkB8YOCn1l7XBasZNms2z7cGjZnwDJuu0sIZYBseBJWh4iAm4lzWKzkCS1LZfLVu0EX5+OYA2H\nwxJrRtFgqK1kOYLMYtugcLSZvtXJdCyUT98gvFRyRUAAGNDb9rq8oGZnmDMfPXbSI2NwXgm/h5JN\n2hUtrdwpuFavFaj86OioVcb+5aW558LrlzT3HBl8mvaHSQHIOUGLGCjKgSQvh3KsAJnbb4E5Sdpl\n59ncZgf8b7NC/p4NMvuD79vTwhvlmB8nmnyCyh4QILQGzEkb8PlEGSEay2gNqF8LW7E3bHQNGmx0\nzaoYrKL42fc8g1AhPz2HNkTur3Ma6j+vGTAD/DqB0CDIFD5j593+Wcvua8+0DJutqEHlv7rBYKFv\n2cMOH6LbACPPz8+tmlXodp9qZF0Ya6+3rVPy6dOnTCaTUjMK7x+QPhqNyj5I2ndKwerCohHyZD19\nGMOna2CLkQHrddbg4GBb1Ozq6qrFHJjdQkbRs4CRJCVVgfnjj8NLDqHQN4dJkLenp6dysMJsnp1s\n9q33Ms7u8fFxsZu9Xq8w+wAa9i9JrXwfe4uuwY4C6gGPdXjSziPMkZm3r2Tu/xPJ/X/RmIDlcpl+\nv9/K+kaIEWRiYb5nxiyKQxCcrWazQ2Nx8Z+LABG/5D2155I0RpiFMRUHyHJcznF8H6mzx4miWa1W\nGQwGhdZ04R17TbyTvmBQDKLot5WhPTOe5TCJQwUYeU4d4fGhaBxjxih1Op1y/wXvNYB0QhS/e3h4\nyGKxKJsHBgYjxHFxnkdCMnRtnW+Dcf4WqO+kWQN757UhB2i+BkoMRGqalFNgBhmwgev1ulV+HaoZ\nGa1Bda0AUUT8zrKbtK9op5ltqWUNA2C2o2YV+J3XzvPEv82esHf4npNpeY/77eewj+p18hg9PkA9\n78MI1ODRTKFPfBhcJE11VN7Dd2vWlHcz1po520VDhg4PD3N+fl6OndvIek7JY0AOanZzvd4eQSWU\nzj4xE8GJD+Ydufzw4UOSdrI04IV5Y3+5sjiMOv3w7cIYSzONDvWgY8bjcVnPxWJR2GKMO/3CFsDu\neU+ORqNi7wyazUIbUPNMg2vmk8hC0gAAHMNOp1McN++bJAUwnpyclDGgS11LBTCHvUUHOyGWvUBY\nzqFkAx3WnLnyd19rOwEnRmyfP38uA2JSXJqcxUgaJoEkK9PAj4+Pub+/z93dXf7nf/6nRUGb6mOR\n6gRNK1Umn36xMIQsMC6msqG0UH7kU5jmRPgxyPaI+DtGwIiSRXYSMMJudsc0HQjdIMfeFwrRyJY8\nAvrg3x0fHxcAmKS1Pg7T+eg34JECPKyvjbWBEkKMR0z/GDfJZKwHsuC8hG+hISvINKcEnMdRb0qU\nkQ21P2MK2Zudz5lC5/c0e/O1ATbI43nIDN6P2UHWnLyOmhkwnc+znNthJsgGwMAaAIz3y3fZW2Zb\nmFOHavkdzyX3hjE5Zm6QTD/tHGAU6JNZQAMgGxjez3gd1/f46U8N8vic5aAGW//qhuwlKXl7GPfD\nw8Ny2zqshfPLjo+Pi3OIbjg6OipHgdEBTjDFsfEJM9bGsgTbwncAHcvlsoB5O4027qwh+nS9XhdH\nmM+ancap4uQiSbe1zLHPqCHV7XZLDiIGnn4Q5vD9WDBUdnRfc5xJGyCEUx+N5hQR8mXG1s49dhEw\nSZkH9CkX/LH/AVnYtOFw2JoH5hUiAAcTmeCzTrh/re3sVuIkBY3++uuvefv2bQuRJs1RVz6XbA3w\nzc1NASgIyu3tbWazWWsToyAw7jAcfMY1P8wu1Emjjlvyf0m7ZPd6vS65MCwg5+2TJiHURsJ9eO2E\nQdJmXQ4ODnJ2dlYUIEdrUcAYBdPTgBL+JM0dGY6R867BYFAMPXPBM3zMzMaI76L0B4NBAXLMOxsD\noOON5DDeYrEo68/vicnCFJCBT3P9lF03xkk7ODgo7B8G3UesARL2tg2yzD7Q6vCBQTC/N4PgdTZd\n+xowtvJOvr7ML2m8OP+ecdMf11PAcJthcz/piz1MKztk38/i72aXaAZtBu7sP7xkgyje4X1P/7g1\nm/U0kKidKOdg4EB5jeo9mDSshMMNfv8/8zD/FQ3n4ODgoLDdm82mhA87nU7Oz89bYAFHp9PpFA98\ntVqVat528Lx/qTyaNGuIHfCRdkLjABzXg8LrX62aqztsGHu9XmHsefZgMCjHcXGIcIAIqWBQzXDX\njoENOwmqgAcDm6R9bJxnWNaYe/YGMtLpdPLhw4dSHHS5XJY8HmSFAwqj0aicXEUXw5ZQ9DJJiSIc\nHR2V5zhXZLPZtHSXw3JmSLDjHOZwHpz3q+3ga21nOScvLy8l+zlJxuNxRqNRUSaLxSLj8fgr42eg\nkWwVKwlNFlyEm9LIbCAEwMmuCJ6peNA/DAt9cJKPY3mbzaZFa1HREM8hSXkmmwZDW7MVTpxz3slw\nOEy32y2Cg8K2N+6Yueun1BsTQ290TF+vrq6SpChYNsXh4WEmk0lZQ/6fRKk3b96U43qOIaN4AGje\nyDSU3GuGOUlr7VH4GNM/ovJ31Uzzkk/g/AvnDzFe+o7HxJpi1A2Ka+NrA8nvMbQGDcylEweRXX/W\n1HLSPk3jMJQNrcfN352QSv/5jD04Pt/pdEqBK37PPgbM0l8Uqve9wbnZOPa5w3/Ik0Mu1McwhY7c\nQ8PX4TbWyawp4Z26VoYdBIdCoMENRgziasC6i8baPT09Fb1G4cSLi4ui28ziWU7MxvlOGXSbHVDq\ncfhwgkEPup938Xz0KBffUXTNBtDOnNkv1o/3WVeaLYZlTJpwEP2z04DMOPdmtVoVloE5RcbsEDAu\nyxiOqI8Bv7y85Pz8vNQyQhe4+dAFz16tVplOp8WZvru7K6zRcDgsZS04rm1WxkeDCTeSc4IcW18B\nYJO0dJLX44/aTsAJx9Gge05OTjKdTjOZTPL8vL1g7suXLyU0Y3TsBpK7vb1tAQrAg9kJKDDTw8lW\nUbtGRK34fILFl0Xx+6RRzgizvUBaTdXzjF6v16Ie6SPAw/+G6kT5+bkWCAw9v6cvbMyTk5NcXFzk\n06dPrRozX758yf39fYvGxkCgRKEh/S5/niNsoPlut5u3b98W+pF5hklwkiuGhDUDxDh8ZvapNqbf\nAjih/ygsFBj0qgFH0sgD6+gxYGQBZK7IiTGzwfNzUHR8x4rB1LhDQABHg2OaZRuQxRq4DLWBDcCY\n33s/0GeHUBhPDdDpk0MLgC2zIwYOyI4ZIBtU1goFy1iQZYM2DI+TC5FLwgKcrMMwAZjsecIgsNeQ\nAYdQzXDV87nL5vwO5KTT6ZTilDhRPkHFOlpeAHusB/Wf0C+Pj4+ZTqetMvgOsxB68y3v1puACRtj\n5r4Ob6Dn0WOwCbWcueFMU6jQjBwAC9lF9jlhyf/zf87hsDwDWJgzfkfVXAMZ5ufx8bFUPsZZ9x1I\nw+GwOMXUBasdysFg0Dr5R2gMJxWwZ/sKI2OWxfoY0I8tYS+9xhLXbSfgBOFhstjk0+m0RemjQJMG\nZTIB9eDW66bcsCvQvnnzJu/fvy+VR/0s3k8SJrRh3RAE0CNelz2zOnwzn89bXpE9fzYZYSjeDXXK\ngidNTgCl7Dudbc7J4eFhPn78WASQd5v65vuuoGoQMRgMSr9RfvP5vFRThN2B1rZiYZ34DgLJuFer\nVdm4HIeG+bm9vW3FmJkPBBow4pwDxk1irpXPrhW3G/Rm0jYwyJqBCc0eZm2M+Ls3P7LvBDzmnO/A\nKFhRYxzNeNhDA5g7pOKcDjyg2nDTLwCMmRrHvW2MDbT4ye+RMd7B/nKOCyymvXIciLu7u3z48KEV\n+ttsNqXgFHJGHspyuSwKmdMlljH0jC+NYz9tNptMp9MkTeEvvEU8f8snesO5KayF6XEML47IrhNi\nmX/6iaOSpDhXLnOetAvLGRADRizrOKo1s9HrbU9Jmv0yo4KMozu73W4xvkla8unQGWwuurHf75dS\nB9gRnLzBYFBSCBgHxhog471pBytpnIx+v9/af3WBM5pzdmCCkGvfrMweuby8zP/+7/8WRodIQa/X\nKzklPAdQslqtWiUqut1uxuNxYU8cwq+jDOxz9AlADOYsSZlfwm1mUrHVq9WqdZKpbju7lZhTB6Ax\nBuBEqjqkwyQmTc4DwubENT5vhsG0k424gcN6vS5389CHpNl8LAxCYDaHeCSbBQYHhZWkJEg+PDxk\nNpu1jnuS68HnWMzaG6QPVEa0wgclJ41woFjtQZIHwVFssxd4JDxzvV7nu+++K7cGI6BO2GVzLpfL\nAmBQYNRGQeE7nGBPHmPrNTVdyvtIXrNnzTiceLirxlrZ+0GR8DszBQYezqEw+HXIBkXHXjFl6zwW\n/pC8lrSP8CdNMSfTywZIhDRN69a1UljjpM0KIpckSdI/My98lvlgTgxOeJaPHpKXgqzwDE51AWLN\nuPEOAAh7HObUoB4ZRTegdA3eAEckzDNX7pdlnXGwLg4TeU39e/d/12Ed+oUcms3wsWLGyHolaSVQ\n+u8OY/Ms7pqB4WIu2Qsu2AlgA6zA6mK8zVpRD2S5XBYHkXcQgmXNALXIkp0BqodTMXw6nRYH0bko\n6/W6nCRlXQ8PD0v+Gf1NUhxPnEY7D4TB6PPnz5/z448/lvfQ5/Pz81Y4HZYFUI0NY2/BXPd6vbJf\nkNebm5skTZ0xWG+zYOgRKtQmyWQyKfk+7K1Op1NO0OKwAozqpN+67Yw5scLivhU2qBcYL4QFc0JO\nTcXWIRdimy6PjsGz0fYEdTqdElNN0gIlzjWp7yZA8fF+cg36/X5B8aB01zhhfNBp3LvgvhntJk21\nQhrje3x8LDcwY/AYL2MzQOAkDV5esjUWVKxlUyDgMCnMNYARJYA3vF5vj7US+3XxovF4nOvr6zL/\n9TFoh5KGw2G53t5AldM/yRaYUGDvWwAnSdtIO7Rg4GHvGAWfpDXW2kM3I8H3AA4opZp1oD+mjU2D\nW76ReYNuvCYzmA6/8NN0Nv2Greh2uwWgAjwZj49U+h29Xq8Y+k6nU+6ccnVk56Qw32dnZ7m7u0uS\nVn0Le7x2KDgOyvcZP8bIFVEd6nJdGeaU7xh08EzXhEhSEiMdRuLd9op5758p8X9FGw6Hmc1mJUy5\n2WxKkqvZMhto1gfd69Cs5dyfJYeCtaQmCrqcvVXX5EC/wDyjV/l/dDcgBEYXgE4fkNXlcllkpd/v\nl/L5sDfkkgyHw1aelKteU0UWUNXr9UrBzrdv35YwIDbC+sEMFHqXMAxgZzqdlitZzs7OSv0Z9Kjl\nE3uArvHlqpxuury8LDbr4OAg9/f3JVTnveB57/W2dwS9efOmBUjJZ+HvvHcwGLwKcl9rOwEnnz9/\nTr/fL2iPP6BaAwyDlKShrI12ASgsJhsCOtDPYlFhSEzvooiTtJQSRWhcCdbxThe/SppLr1DO5+fn\npdwyZ+5ns1kBEaenp2Ujfvz4sVCmLCZAwgmuxLjn83kmk0mZIwStjgEiEMfHxxmNRuW4GPPF3T3c\nO3R2dlYE8Keffiq3FFMcDU8Vj9pJbvydPB+El9COAUntTQN+mCeML5s3acJIq9WqJHMxzl032Cd7\nGVbYSfs4oA1RDVpQlvbqa1Dz9PRUkvKShir2RY3OR0raV7S79oYZMbMF9ioBI6w962+Wx8wO4MPH\n3h2GYXzsQQx90lT9pLAiYT7mzewac8++ZmwYiX6/X/aPy3ATggWgmBkaDodFhwBuksZhcSVNAwh+\n8hzXj2Bf2UGzoq91nR2UXTbWh3Lx3PtVh9/xnM0+ITsvLy9FVuv8FQ4KPD1t70FjvgAcAG+cV3IW\n0Q/sDYq2wWZ0Op3i/QP2AA3kglCUkLmfzWaFJfnhhx+KnGIrGM98Ps90Ok2/3y/63g7acrlslX1w\nQUTfccZ+MTto4Pb4+Jibm5tiG3HYnp6eyjFiElw7nW0+0+XlZcnhZG7Ze+hnbBB26NOnTzk8PMz3\n33+fu7u7HB8fF0cVVopnUUmX577G0HNaDeaSz7FnHeJ6re2MORmPx0lSQMR4PC5F1rxRQXiOjZvG\nht5DWSXNLb945kwu8WQMt5Pb8ITYXH6uQydsQsCL2RnXRGHTQf8dHx/n48ePxXNC0blEMpsd4UZA\nEQjmzoyFQRKgzciYhtBQcwCUfH19XWKNeCv2EjFyHz9+zPn5eZImj4XNxm3Px8fHuby8LEqm3+/n\nl19+KYoVNgvwiFKj0qGPBvJ+EDveDO9PmvhunQS3y4YiQ/EmTQjSYQoa4AEZ9+ZmQ/Nv55JguAlz\nEB6ELQAI1AnEVob2ZpN2QTbei8dXn1hg/9gr9NUIeMfsE+hf9iYyYYbH7JCBFP0E0Fg+aFDNPl2W\nNLebG5TwDkAJ7Bs5KXyGXJSaZsewEqZFfxjAoFdQ6hh0M0WMDwfLYUz6jmLfNSuI8cTYj0ajYswZ\nJ/92bRIcDfQD7LLHjXMEeFssFnn//n055cKNu93uti4WenY+n7cYKBwjJ2EDihxKplihw5jonV6v\nl6urq6KDAVKcvgLIEBq8ubnJxcVFK+zOn9PT0wIeAPuAIhwIZMrH570/Op1OPn/+nPF4XJ738PCQ\n4XCY4XCYzWaTjx8/5t27d+U4NKCF/YYz++HDh9zf37dOz7BPqeHS7/cLwHt5eSml5X1IBObMR7sn\nk0k6nW2CdK/Xy6dPn1q5nIzdJ19J6fijthNwMhgMSt7F09NTbm5uCvrlj+lr0DHAAAEETePZIHAk\ncHKSxLUxKCDExPEs2BEjffrB7/HwQLPkzjjc4mS/zWaT+/v7Et4ALSYp5+kxNMvlMm/fvs3nz5/L\nAkKJs4kAHpQPpnQx2esYqTpxkr4lWyF7//59AWPX19c5PT0tawCFCWLebDZ5+/ZtZrNZJpNJTk9P\nyy3K1DsYDocFyXNjKP9OmsQwMvHxWBw2QkE5qZlj4N1ut8RPURjOjUEJ7pr6TtoFwjCSDnuhMPlc\nHXKjAaIdiknSAiYAcudEGZSaIsZYOmQEPVwrGjMUzDnvR4adz4VhN8vJZ5zUx0kznu2wBrLGH/pn\n8PRa+Mvz1u1uk60vLi6KHDnEQj4CMkSuCUaNPlIKoAa9zAWGxSd96I9ZKEIAyRbkkZPlOeCEBXOF\nIXe4Gcp8l+3Tp0+FPUYfAdDQw4BadJ3HgV4y0HOeHpemrtfrsn7owX6/n/l8Xhi4pDkpCWvtyuDo\nAoA/evng4KB1uzuAK0lhO9mz2AUDVBg/O8Xv378v8orMJimMG+9dLpelRIMdu6TJ2XM+DXOUJD/+\n+GMrD/Ho6KgcTe52u7m+vi6y9+nTp7JGT09PJRyXbCMWRAfMmFIlFnZytVqVUNN0Om2dwOL3OBn3\n9/fFeXz//n0JeY1Go9ze3rZyFZ0bhP3+M4dyJ+Dku+++K7TYr7/+WjqL0kgaj9LCTAO4cConScnb\nICSBkpvP50UR2asajUZJ2ln/UL/2+Jy7AbpF+AEdJH+SBwL9hbInDJI09CheII2wj+OMbHKus2aT\nOyuevroUM58zAqdNJpP84x//KHkfk8mkABs2J8d9uYob2pnEKI4Ls2G92Z6envLx48eSCIuRhgHB\n0Jo+RxmQCAf4OTg4KN5Bp9Mp8UpAJCeG2Ji7ThpMGjk1aE6aM/7IJYbNORxJmxVE9lA0DpMgQ47z\nOhHbMV3nhdigO//EBQSde1IzBKbwUdJJu0gbYyC5EfBppwOljVK2PHNywgAC+XPisOccY0FxMIcc\niZGzHvZQ+Sz5AA6vAeBRog6T4iwBTviMw2f8ZDzkaBBenUwmLZ1E+BZmgXlCXnbdmAtqIpHUiPw6\nFMnet4PnUzaEN9brdcbjcZHjs7OzfP78uYR/Tk9PMx6PWyA1SWGH6UfSsKxm28m9MPuHLHW73cIS\nmLGm/0lzMs2hO4AHSfiLxSKnp6dFpwNIkVvrcoy6k2/Pz89LMbTXjPUvv/xS9udqtSoOdpISImQP\nMKfsOeqHPT8/l9OhLy8vubi4KGF8n8Kibz6B9do9XHzeckG43Swm15sYXGNz7QS91nZWhA1BPDo6\nysXFRW5vb1sdRQmy4fGYLKQYxaSpEooX4qIxxMdM3dm7TBqBtQDxfxyrYvFJuqqpexI/AS0IOooL\ndsBsAcoZANTtdkvlQk4S1OEBjJSFg8UG6fomTNPmg8Ego9GoeIar1aqAKTw1gEeSFoBIthvu8+fP\nubq6KgqVvBLi9ggx3hWKHqEEpGCcaYvFIhcXF0lSlHTSnNYidstJJYcn/hlF+K9qyAwN42dPHw/P\nXrkTWVkXvGg2sY2ewyWsncEPtCxzxHdsKMy0AIJhEgz26pwXJ2w77OmTdvwfYzXLBYhHNvjpEAaG\nhr4sl8uMRqNScdPzTZtMJrm4uCh7ybk4zJvDSfSJtcDR2Gw2OT09zWQyKdU1ofQZD/uZvzNG1pk5\nA4DhicPOcPoBz3U2m5VcBOfcOMS1yzYYDAowpl/MDSCXELqdsSQlodWMUtLkFMGUJMnNzU0Bw0dH\nR5nP562SDxjT2WxWTjlyMov5dWiEhHz2EuNItvoGubYjYUehrqjqPDj2HQw6DDi3KBP2ZI/A5qH/\n2XOE0y2bBilHR0e5u7vLd99918rXSVLsEUCH7wFmcGpJnrXNcZ4abDe6wuweugDGmj7DRB4dHRXd\nP5/PS07jeDzOu3fvWhV6Aeg885tLiP348WNRMHjNzupmEEwkE5Y0J17wPhDMJC0PxJOQpEXRouz4\nO54TeSMu9wuNdnCwPY5GXJTEJP7/+Pi4MAoOQVlJIYwO19ze3rb62utt70R49+5dRqNRPnz4UKh8\nDIKVHQ3l7rABPxk7Xs+HDx9KaAUBwRsEYZOEljTH9mArhsNhbm5uWsi609leioXxQTGAmA3WACn0\nlY2aJPf39y1QyO9B/z7yZgDrmgq7bDXrUBvnpDmNkjTGFeBlWh+ZQZm8dk8GzwNY21t1sjYso3Mq\n6BO5Tj5tZrmh1TkUdUiKPpghq8t3A9wdtqjngmdgAJLm8jV7sRg6gPxgMMh4PM7FxUUxaABjmFiO\nc8IoEXIyo4G+waj1+/3MZrMis/SFNbChMmAz2+Kj2OghJ3QCqiwDzLe91102xojedM4HoMsAbrPZ\nFIBJGBwASSjNdZTI5eBZSaM/ky34xOg6rw7bwXoljZPAmlBuALnjQAAev/P1zDwjH+PxOC8vL6Ua\nK+NBFjnhOR6PW3dpJSmOw3Q6bbFi7DUcXeRms9km5cIU+7Z4HBWiA9ieTqdT7u6pWeRud5t35Rvl\nYYmQL4clfaACZ55QO/ucd+Dk+D48F+D01SWbzSa3t7e5urpq1V75/vvv1OWGhQAAB0pJREFUX5W3\nnYATx43tUUEvYby5cMjUdz35hDTY2CBFBDFJC2UmKQuOADrhFDorSVFahIpQEiBkP5cKfYeHhyVx\n9MuXL61kOLyAwWCQ09PTfPjwoSR64Tk4J+Xy8rIIvFmE1WrVqvg3GAxyc3NTKGIy3AFyKF7m8/ff\nfy+KmYRUo1o2MJRcst1A5+fneX7eVvDF2wQ9A/LwDii0hDJzKGKz2ZT5s5Ew0+R7G0DpTjhcr9et\nBOdut1tYl103gwN7ZfbW+R39xyuELqY6Ixsb4A34cTIfQAjDh4G18nC4wkYWeWLdYFoM+vCE8fr6\n/X7ranlACUod40GoDoWKh2baHcCdtG8ato7g+Un75l+AweXlZYmrQ7UnDciBYTKNj9FjfIC3yWRS\nZI81AHQD2jkh5fViHv1+jDN7xeARGWAPcdoCZy1pEvAnk0nryPQu2nw+L3OO7rSjQWgFXYisEVbB\naTk/Py96EvYoad9Sj1zBUqCTLy4uCvhwAiynYtBhsM/cNO8QJmEQ2DuSe7nUFF1yenqaT58+5d27\ndyU/0CEtFymDaR8MBmWPAAR4H2kMLy/bAyA4pTA/7FHk6/T0tFWXi6qvJAkjQ5wGAnDAFpL3AThg\nj7E35/N5FotFAfTMD3PEQQ7W4e7uriR0w5ACKNnH2DnmfDKZZLPZlr0YjUY5Pz//in0i4fa1thNw\ncnV1VSaM4jhJQ7ViqJ2sljQhHbwLNq+pLZgTFtWeFsbw4OCgCG7SgBc2DcYaQ4+yxgvgvfQJQHJ3\nd1c2MXFBAACb9OHhIYvFosTjfWw4Sen/8/NzPn782Ep2xQNFgfk+BeoDQCPzPd7NnxoYYFxgZCz4\nrrY7n88zn89zcXFRjt8ZvEDrnZ+fp9fbFvIhhHN0dJTb29uyxtfX1616AEmK0XTYjjoUvV6v3POA\nYb25uSnH3fg89/7sutUhPQy+ATbKjo3uvAVySepYuZtzTvA0ydZH2fEee58wDSgj5ARP1EYXdi1J\nAb5JinfI3vLeSFJqLgCOXTDw/v6+gC4YIcsgbGmv1yvAnT3W6WwLOhmooexrFpS97QrK7I3n5+ec\nnZ2VfIHRaFQ8Z/bAly9fSrgBDxanpPbGyX0ajUa5v79vlTj/8uVL2af0t9vtFiaQ5FrCITBT5N5g\n/P+M/v5XNHTjwcH21CNrw3ohT9TaQP5IyiQnDpkGPAPcmDP2RX0a7fT0tIBIdD/rBWipLyZl3gk9\nOS/GuYKwIVS3dn4VsgPwHI1GrRQAZIk54EQN4MRHj7FB3333Xfk8cmpW/fDwsFzpgj7GmYHlZP84\nXw02nWdiW9EvHz9+TK/Xy+XlZbrdbs7Pz8spOoOZ4+Pj/PLLL7m8vCxsliuXA3jQbTAqnz9/bpXb\n4HQXoR1OZDqfaDKZfFvMCYlnKLD7+/s8Pz+3PB6ED48JQUSg7UmxaLVnjreBR8+kOp/CoRDeSzIX\niwyz4eqDgA6EebPZFBTpOKVDTHiBJBk6Ix0vL9l6f6enp1kul63jYHim9JvNnzQ5ISh2F2QzM5E0\nFLNDKhgqnuXwEMfS+Dx06A8//FCOgHNyiPlmDh4fH/Pdd98VT4BTRskWyPB3DC1z6hg13/Emd0Ip\nNQr+rBTyv7Ixz05GTRoQ7JguSof8KR/DZHwOESUN64IHBFMC24hS9f6pQ172dmAnqQ/B+mPweZeP\nhLKPzF45yZCGbAHCkHufdKvDr8zXcrksoA2vEqAH43lwcFBAlStR8j3+wN5gbCaTSdlnzLlP+5HM\nBwgnTOky5svlMrPZrCTX393dZTAYZDqdlpu5fS0GLOJ6vS61fyaTSVkrG3fndlgudtW8fpeXl2X9\nfcqF36PHAQUYNWQQZhemD6AL6+cCk+RNwMSyXrwHMIcXTz/ZYzCEDnOik5ElwvROCH16esrV1VXZ\nG5YT7wdYOGSfvtEIj3D6hfkARMPS+fQN7DQ6HrtBCM3MInIOuwxYBGytVqtSa4s9CghbLpf5/vvv\nW3k6zMn19XU6nU7r9ul+v5+PHz+W00Aw5AbzgCaYxufn59ze3ubgYHsIBSLBl9f+Ues4Fr5v+7Zv\n+7Zv+7Zv+7brtvuqVfu2b/u2b/u2b/u2b2p7cLJv+7Zv+7Zv+7Zv31Tbg5N927d927d927d9+6ba\nHpzs277t277t277t2zfV9uBk3/Zt3/Zt3/Zt376ptgcn+7Zv+7Zv+7Zv+/ZNtT042bd927d927d9\n27dvqu3Byb7t277t277t2759U20PTvZt3/Zt3/Zt3/btm2p7cLJv+7Zv+7Zv+7Zv31Tbg5N927d9\n27d927d9+6baHpzs277t277t277t2zfV9uBk3/Zt3/Zt3/Zt376ptgcn+7Zv+7Zv+7Zv+/ZNtT04\n2bd927d927d927dvqu3Byb7t277t277t2759U20PTvZt3/Zt3/Zt3/btm2p7cLJv+7Zv+7Zv+7Zv\n31Tbg5N927d927d927d9+6baHpzs277t277t277t2zfV/i+IAQDEy/wsagAAAABJRU5ErkJggg==\n", + "text": [ + "" + ] + } + ], + "prompt_number": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Raising the bias of a filter will correspondingly raise its output:" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# pick first filter output\n", + "conv0 = net.blobs['conv'].data[0, 0]\n", + "print(\"pre-surgery output mean {:.2f}\".format(conv0.mean()))\n", + "# set first filter bias to 10\n", + "net.params['conv'][1].data[0] = 1.\n", + "net.forward()\n", + "print(\"post-surgery output mean {:.2f}\".format(conv0.mean()))" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "pre-surgery output mean -12.93\n", + "post-surgery output mean -11.93\n" + ] + } + ], + "prompt_number": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Altering the filter weights is more exciting since we can assign any kernel like Gaussian blur, the Sobel operator for edges, and so on. The following surgery turns the 0th filter into a Gaussian blur and the 1st and 2nd filters into the horizontal and vertical gradient parts of the Sobel operator.\n", + "\n", + "See how the 0th output is blurred, the 1st picks up horizontal edges, and the 2nd picks up vertical edges." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "ksize = net.params['conv'][0].data.shape[2:]\n", + "# make Gaussian blur\n", + "sigma = 1.\n", + "y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1]\n", + "g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))\n", + "gaussian = (g / g.sum()).astype(np.float32)\n", + "net.params['conv'][0].data[0] = gaussian\n", + "# make Sobel operator for edge detection\n", + "net.params['conv'][0].data[1:] = 0.\n", + "sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3))\n", + "net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel # horizontal\n", + "net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T # vertical\n", + "show_filters(net)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "display_data", + "png": "iVBORw0KGgoAAAANSUhEUgAAAicAAACbCAYAAAC5xzv6AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWuMbNl13/c/9eh6V7/uvT1zHzNDzgw5HNIWNInpMCEi\n2wkCwYElBFASBTLg2DCM2LATSAkSJ5GlWDJi5EMAA0ngL/EjkQPFcuIQgREEcCIbAkJD9JhDgdJ4\nyOFjHnfuq2/fflV1VXc9Tj7U/e3+1+pTfe+MqOkmWQtodHfVOfvsvfbaa/3XY++T5XmuJS1pSUta\n0pKWtKTLQqWL7sCSlrSkJS1pSUtaktMSnCxpSUta0pKWtKRLRUtwsqQlLWlJS1rSki4VLcHJkpa0\npCUtaUlLulS0BCdLWtKSlrSkJS3pUtESnCxpSUta0pKWtKRLRT804CTLsk9nWfa1LMsOsiz7C1mW\n/fUsy37+8Xd/KMuy9y+6j0ta0kehpWwv6QeVlrL9w0s/NOBE0n8q6f/N87yb5/l/l+f5n83z/K8U\nXZhl2TtZlv2R36uOZFn2lSzLXsqy7JNZlv2z8N1GlmX/R5Zlvcf9+Pd+j/rwX2VZ9iuXtb0lfSj6\nfpHtP59l2etZlg2zLPtbv4d9WMr2Dw5detnOsmwly7K/8fj5B1mWvZFl2Y//HvXhh0a2f5jAyfOS\n3nzKa3NJ2Ud5SPaYzvm+Kum5PM+/JelfkPTPwiX/g6ShpGuSfkbSX8+y7NWP0pcl/dDQ94tsfyDp\nlyX9zY/y/CX9UNL3g2xXJL0n6V/N87wr6ecl/VqWZc9/lL4s6THlef4D/yPp1yWNJQ0kHUh6WdLf\nlvTLj7//Q5Lef/z3r0iaSDqSdCjpP3n8+b8k6cuSdiV9TdKPWfv/WNJfkfT/Pb7vk+f05Ucl/frj\nv/8bSX/WvmtJOpb0kn32P0n6qwvayjRbCO9Iuv/42m4ck13/jqR/TdKPP37OyeMxvmHj+KuSflPS\nvqQvSVr/qO0tf5ayveC6X5b0t54wrqVs/5D/fD/Ktl3/W5L+raVs/y7m/6I78DEK+j+S9Kfs/78l\n6ZeKJlDSdyX9Efv/hqSHkn788f//+uP/N0043pH0Gc2iUZWC5//7jxdI//FC2JU0erzoHmnmIfyo\npH647+ck/Z8LxvSnJL0t6QXNgM3/Lul/Pkco07gk/SLX2vf/WNJtSa9Kakr63yT9ykdtb/mzlG1k\nO1z/V/RkcLKU7eXP951sP75nSzNA9akFY1rK9lP8/DCldaSzIb+nDQH+cUn/V57n/7ck5Xn+/0h6\nXdK/+fj7XNLfzvP8n+d5Ps3zfBwbyPP8b+d5vq5ZOPALkn5E0m/ns1zqRp7n70pqayb0ToeSOgv6\n9TOS/ts8z9/J87wv6T+X9NNZlj3NvGY6O/5cM0F9M8/zI0l/SdK/c1648wntLenjo8su23O3PEW/\nlrK9JOj7RrYfp3/+l8ftfnNBv5ay/RT0wwZOnkYpFtHzkv7tLMt2+ZH0r0h6xq5ZWDX+uMh1L8uy\nPUn/smZI9y1Jn37c3n/0+NKepG64fVUzgFJEz0ryxfGeZvnPracbViH5ON6TVJV05XfR3pI+Hrrs\nsj1321P0aynbS4K+L2T7Mbj4Fc1qBv/8Of1ayvZTUOWiO3DBtEjo4+fvaRYm+zMfoS3lef5I0lqW\nZf+upD+U5/mfzbLs70v67/M8/3W79JuSKlmWvZTPCq+kx0h9QdN3NAsNQs9plqO9L+mmZiE+SVKW\nZWVJV5+iv8+Fv0eahUL7H7G9JV0MXTbZfqr2jJayvaRFdOlk+3GU4m9oJjd/NM/zyTnPXMr2U9AP\nW+QkC38v8uDuS3rR/v87kv5YlmX/RpZl5SzL6o/32N9Y0PYi+hclffXx3z+qsJvhcYjv70v6pSzL\nmlmWfVHSH9MMjRfRr0r62SzLXsiyrC3pv5b0v+Z5PtUM6NSzLPujj0ONPy+pZvfek/RCCP1lkv54\nlmWfybKsKemXJP29fJac/CjtLenjo0st29JMMWZZVtfMKSpnWVZ7rCyLaCnbS4IuvWxL+uuSXpH0\nE3meHz+hvaVsPwX9sIGTPPwd/4f+qqSffxy6+7k8z29L+klJ/4WkB5oh8v9Y84L9NAj0NUlfzbJs\nU9I4z/P9gmv+nKTG4+f8HUn/QZ7n/3xBe39TM+DyG5K+o1nB1l+QpMdt/zlJ/6NmxVI9zYf+/t7j\n3ztZlr1uY/gVzSri70pakfQf/i7aW9LHR98Psv2XNJPR/0yzeoCBpP9yQXtL2V4SdKll+/GW4T+j\nWZT7XpZlh49/Fp1RtZTtp6DsccXukpakLMv+kWZh0OU5FEv6gaKlbC/pB5V+UGX7hy1ysqQn06UL\n7y1pSd8jWsr2kn5Q6QdOtpfgZEmRlqG0Jf2g0lK2l/SDSj9wsr1M6yxpSUta0pKWtKRLRReylfiX\nf/mXPxQi+l4UEtvpeGfazbJMpVIp/Yam06kmk4nyPP/QfeBZ3uZ0Oj3TjyzLvifjK3o27VcqFVWr\nVZXL5bm+TCaT9JPnuabTqabT6VxbRf0rlUpnriuVSiqXy+kexjkej1OfYr/4XS6XValU5u6jX4uI\nccT+/cW/+BcvNLz5l//yX84jbyAfX6lUSr+n06mazaam06mGw+Ecb5gX/5+/ua5cLhfyyuVtOp3O\nzRs8529Jab6yLEvXweeiflerVY1Go9Q+c12tVueeNZ1O0zrgb6fJZKJqtTonUz6+eN9oNJIk9Xo9\nVatV1Wq1M/KDXMGDRqOho6OjOf75syLf+Iz7K5XKXNv8TCaTuXmhvVKplHglSZVKRePxeE6XwDe/\nP89znZycpHbK5bLK5bKyLNMv/uIvXphs/8Iv/EJOP73/UV9K8/qUuapUKmd4xby6jDk/aB9eO4/h\niaQk+ycnJ6l/9Xo9fRfn3NumH1wHv1038rxoI9w2xPXha3g6napcLs/pQO9TlJ1qtapGo5H0Af0Y\nj8caj8eaTCaq1+uqVCo6OTnRysrKmfF4H+Ap+n1lZUXj8ViNRiPxjOuOj4/nxsrYfc6yLFOr1dLx\n8bGGw2Fq9+TkRK1Wa279MD7ajDIwnU71S7/0S4Vy/X11zokb1g9LRQLuFBUjxMT4Z24wznse90fl\nU/Sc7yXFcUZgsAgouJJY1EdfAJEHvlD9+vOMNb/9B+H2RSHNz5ErtUV9vSiKRpIx+f+TyWQOHAwG\nA00mE9VqtWTEJpNJUgZx/viNMq3X6xqNRkmZwp88z5OBQElmWaaVlZX0MxwOdXJyMqfEHDi5UpdO\nZWA8HqtcLms0Gmk8Hqtarc6NnWdyL226IZCklZWVubl1OULxOQ8AHZubmyqVShoMBmnso9EoAWVA\neZZlGgwGid+AKJ+LaPjgQbVa1Xg81mg0OgNQHNRFw8146E/kp7cT9Uu9Xk/f05eLlu0iYCKdGr4I\nlv0+QIobTV8PyGrUA7QPOT/8ueVyWePxOM2PdCqb8LEI6LAGef5kMlGj0dBgMJiTi2q1KumsfqQd\nN7r+OeCBeV8EZGgzGm3nn+uOWq02txbH43EhiItrlzbQKycnJwmIwYNoY+Gp95U2mGvmAz4w//45\njo9/F/V7pO8bcBIN0Ye9F8TrCq+IXAjcO+e+85gZnxeVl3u3LsgfZhxPC8yiZ+jjdo86Kka/flHf\nIjCIisXbLvL645giL3yeoKJ7fUFdtPKGFoFc/x/ejkajM3JG5AQlRGSiSKETLeDzer2uyWSi4+Pj\npLSyLEtG2b0YPPTj4+M5Beny796YG55KpaJOp5NADWDHjYwrT1fgrgD5PkZ9sixLAMLXkCtI/qb/\no9FIx8fHGo/HWllZSUo3rv0I8uCR95HvmE/4SkQD2UQRF4ESnkVECM/SjaGvEY8yMT50iBvdi6Ki\n56PTisBIlFf/znkcwQ5tMf7xeKzhcJgAZwR7gADvAzxENlwXE+2jP/C/Wq2q3W7r0aNHajQa2t3d\n1crKyplx+frjcwf19AmjHQGNrwPGALjwSJKP8/h4dmxKjHRCHkGBt75WHMg56GCN8dvnhGf5unA9\nwryz/llj/iyupQ+sFXfYz7NnlwacROF3xc4gFqUTuDYqFtqBca7Mx+NxIUiJIaloYCJDI9JEyEij\nYGCGw+HcuD6KMX1aYOJ88v6hUKEIYFjE5wlNVKLc6wClqC9xfn3huAHi99Mq4+hpXTRFUBdBm3TK\nEwwoAMRD1e5t1ut1TadTHR0dzS1wT8dhaF2pYFS5ZjQazaVfUPjIBe3iKUbPB2OxtbWl27dv6w/8\ngT+g/f19PXjwQL1e70xkwI2+K6ToxbkCc7BZJIfOH9YwkQ1JarVac6mQqDjpD8DQAXEE3bVaTZPJ\nREdHR0k5cx26RFLqh4M5riPszbPwHh3ESPNpCj5nrhnbRdKiSKkbX8YICHMg6voB+RoOh8rzfA4E\nwFt0AtEv0hu0U61WU7rD9UdMcbrM+Xc+nlqtlu7f3NzU7u7uGcAcwWSUG5d3f6ZHKhzIuEwCPhmL\nr1kAMSDV1yL30Sb3+bwAvIbDYeI7AHllZSVFBI+Pj1OUMc414+M5AG70E5FeB94Q8o7Me8rX136h\nzC385mOmIhAQ/3ePP97LZBV50QiagxSurVaraTF4LtGjDPGniOJCLJfLajabqtfr6nQ6arfbSaEt\nmhDa+N1EiWJbruQJfcdxRJ48LUUvtIgWjdX5EOf2w3iJboAuC0CJER9XiDGU6/ld8skoGa7t9/ua\nTCZqt9tqtVqqVqvp++Pj47k5PTk5SXJHRMO9UffQAM1e41BUW8GzWC8ffPCBrl69qq985Sv65je/\nqS9+8Yu6du3amVqTouhJBOcobE9HkVuPawpyw4DiJ5oT8+WNRkOtVkvNZjOtP4xATEm44pekfr8/\nxxf66j/0GyPK355Wqtfrc+Fs5ou2I3h13qCfAJ0XRRF0S2droKIT6Y6MO4fMJTJ6cnIyx5PRaJTk\nNs9z1Wo1VavVVA+Bxw7Ydu/d142DEgyvdLYmg3sajUbS1Z1OR1/72tfORJv9N8bYdVcEJi5j8X4c\nlLgmWP84EZVKRbVabU7HAehYKw5OfL7grxNy6vchn57mYd5YU643fC24fuHZ/PYoD+OmRgadt4gu\nDTiRnhxNcIEr8qgQfPecIPfeWShENxyY8ByUJMaiyKAvMuT0gZAcqHR1dXXOS3CKSvV7Qb74ouDy\nG8ULP6LHUzSuou/8WfwuimB5lCXSkwBKfK6H7L8XgO57QUVK3EGiyyB9RvnU6/Ukk0QDfBH3+30d\nHR1Jmnl7ROaoueAHZTQej3VycpI8G+TAUyYYAQpLiZ5BrjBRYleuXNEbb7yhwWCg8Xisv/bX/ppe\neumlBCji+GL0zuc2epIuY+5U+Lp3xc+1tVot3cvYAVrwxQ09hp9n1mo1NZtNtdttVavVVJToBcIo\nZXQLfXej616wh83RHcwv97txdwPjYAj+XyRF+ZXmo9YOYD0S5pE3vHSIQmj/DJk/PDxUpVJJha0r\nKyspCh2vJfpImzyfuXbADbmHPx6Ptbm5qevXr+u73/2uBoOByuWyfuInfkKvv/66arVaKqZ2AOS0\nqPAWPvG/f+eRFo+sMKaYCeDaRqMxB95wUJBVl1F3ODzSQYGy9xlZc7DhEZMIPH1uvZ9Q/Jy16jJ9\nXjbgcmj0xxQXatGPC0VEred53M4IhMsRYRQoyNFqUYgyeuz8T9iR9ih2JIISDWpEzx+Wbx8m2gAh\ndB4yBKzxA7nXzfUOVNwbiF535A3t+e8i4/Mk8jmMsnFRFCNCEaw5Hx0YYticp+PxWMfHxwlgoAjI\nw2MsG41G8iLxJD0SwU+WzarsW62Wut2ums1m8uxj/5ljB/yMYXd3Vzdu3NDq6qqm06l+3+/7ffrS\nl76kP/yH/3AyvihYV7juVHgaxPkUDbb3Cz56SDiCXZdRrqfO4Pj4OAGabrc7F3mZTCYaDAbp+na7\nnfjAs6h5iLIe++QGhz4xx0Qvfe59/Hjj0qkXGz3QiyDkx42RpxGgqIsjsMNALtKdrg/6/X7y8svl\nshqNhiSlQk6cR7xxUm8RyDkQwIgzDgBSs9nU66+/rmazmcDh7du39VM/9VP6B//gH+jg4GBOn/Mc\nN7RRlqX52g8fK3PqNinqDbcJDvzr9XqSWeTy+PhY9Xo9OS+kU+A1axKnh/56dMbnBZ5Hfcb10e6y\n7rmOsdMmc0Yxb1wjRXTx2tyoyOC7Mo+KXZoPAxcpQSeE2ZkXQYkrPE/DeH+ipxf/RuH4fQhYvV5P\n4UwoCsV5qaOnoZhWcP7ymY+3aBwOQiJf+HxRGm1R+4yhCJjEuYsetvOgSBaiorsoikrFveEIpJx3\nLl947fCeYkwACUoRXmH0SP8MBgMdHR0lEIMiajabKpVKKXrHNkIiK6whgI506ulzP+sNw9lsNjUc\nDnXt2jX93b/7d/XKK69I0lz0kv56CNejKbVaTVtbW8n4eMrDr0X5OeCBx3iSfMe1k8lE+/v7CZzA\ns16vlyJHRJgcPFCYzNiJLnnks16vq9FozIH5CK4whi4XGAieG9cl0QSPeF103Un0+KOedPAa16Lv\nDvE0BPPleoBnYXxHo5FWV1fT+gHgABJLpVKqraIWwo2mry/66s8C5Gxubia559nValWDwUA//dM/\nra985SsJ+EvzoCMWlPo8ukGnHw5MIqCJvPAx8X8cBymmPJ/V7xwfH5/RMURamQ9sITyNO9ikU0Aa\n+xejOi4bbjt8jdLfCNDPc6ovBThZ5Pm78EYD6r+5H6YvMs5efOio168vOhOkqL9Q7J8XcxVFQ6bT\nacqNR8Md23aiXy5kTkWGOSL4WFfiHmbkhQtZfJ5714uiSBHMoNyLUm6+oGO0oYiKnn3e9R8nORhx\nBYnMRa8u5rQdyFAr0W63VavVknL3KAnPbLfbyQhIOmMUXQFhGDHkHoHiGnb9uDKeTqdqNBqpPfo8\nGo3U6/W0ubmpt99+W61Wa86ro09FnjJnlOzv76tSqWhzc/OMTEYD7V5qlmUJ8G9sbKjT6SSgUq/X\nUxTp+Pg4pbyYBzeQHnXy3RE+f+5les4fuVtZWVGz2Uw7l2K0AWNADYWDGp5Tq9XmtpNfBsANFUVF\n3MD4mof4HzkrlUpzdU5EUtx4uZywAwuj1m63UzsrKytz9Ti+o0qa9+5p1+Ud+apUKrp//746nc6c\nHOT5bEfb9va2vvCFL+jRo0eprw4YptPpXB0YbTqY3d/fPwOYuJdxR7vCWSYUZUun9SKDwUDSaYSE\nowQAL6R6vYgbOYvR6Qj0pZksu/MUASF6ivn3eUcH+v3IuQcHiDYuoksBTqLCks4KaYxWuHHy3KIr\nGhdOCIYUGcMYFTmPzgMo0fhyPYKRZZmazebcuPyaSD5ejHxUAEX3OWDwugKvbfB2HNxFYLMoolEU\nOYlgpSjSEZ/NvAAaYxGzp+BcqT9tNOnjpKKIjisCohNey+QG04EMhnd9fV3PPfecms1m2l3gSlmS\nNjc3U/rQFZJ0qjC8NsXlMXqm/C8p9fXo6CidFwK4IT+PPO/v7+vGjRtpvD5Gl2FoOBwmQ0y0J9Zl\nuZGPvPXoHbtE1tfXkwfpEaY8z5Ox43mHh4dJ7tmVw3W+U8Lnj3Sbg8Q8zzUYDHR4eKjj4+ME4rhf\nOq0Bcg8dAMpvQKQb/uhwXSQVOVJRZ3vdR9ThjJu0d5ZlOjw8TPdHvT+ZTNTv97W2tpZ4Q4oIAOAy\n7ODaQQggyHUIstNoNPT222+nKBZpP3azVKtVPffcc/qH//AfJqDkZxHRF8YuSe12O60j0qb9fv9M\n3QWG3deqpARSGeNkMtHBwUEaG2uS59Xr9bnUFnLpEZEYNUXv0CfmM25dz/N8Li0J/yPQKZVKyXl2\nHsHvPM/TDi2cjfMcykuxlTh6youiAItQuv/2XOQiQ+/PiwDFnxc/9+cjFBEknNd3aDweq9VqpZB7\nBGcuvNLZA3lA7+4lnOdhwRdH0R49YTzON/f4fMz+nRve6FGd5/XxvSuzIl65QYpeWgRFi4Ddx03O\nM5SOg0+POERvie9dSeEZkbPtdrsajUbJc3J+oPQxbu4teorAFQrrhXoKvBuPCGRZlkLFHp3Jslk6\nBS8P5fn2229ra2tL29vbSbF5JCE6C71eT61WS5J0eHioer2eUkgOLvhxvhEJIbSPfAPQWCNey9Hv\n95Nn6NsqUZxxB0FMM/nfbgBcVo+Pj1MEBcPoUSd4xzM7nU5h5Pey1JxALs940IsKTn2dMy95ns+d\naAovqC+J5+VwiikGkfM8oqfutRCSEljwg78g11flcjlt0XcQSRQGh246nepP/sk/qTfffFMvvvhi\nenae53Nrjn4dHx+r0+mo3++nPnvE0vvsa8r1qzsB7DZy5wJHoVKpaDQapXoOn6OjoyO12+20zqXT\ng0U9Aun1Iq6fIHdeAEWAJo/++RogyhN1huuW85zLSwFOnIqUuit3yBWFpwq4Lxo9v9YFwZVffM6T\nDF5RiLMokhDvkWYTtrq6qoODg4UAyckP7EII/EROH09E5d4/DJLn6KXZoqYvfj/fedqHau/oFRUB\nPO5xPkVA50K/qN8u3P7b5+EygJMiQMtPTMXE8Cq8ByRg2LmeuokYnYNvk8lkziA6CPVnS6cnO7oH\nCDDBu/J+UXTna4o+4TmenJxobW1NR0dHc+uQ53hdAu3WajUdHx9rZ2cnpTvoCxFCV6wnJydzx5QD\nJg4PD1UqlbS5uamNjQ1tb29re3tb7XZbm5ub6vV6yvM8GRwMheublZUVHR0dpV1LhMVjvz36w7gG\ng4FOTk4SKHLDjHcP8EC5D4fDpLv29vbS9bF439fkRZGvX+SKOS46RIwx+LqF7254vSDZD1uDV6TB\nAL4eWfNn8T/8d17SZ98t5LpwY2NDt2/fTp+z7pBlIl6NRkM7Ozv65Cc/mebR0zKu54jisc7ol9se\n6RR8Ai7oe1zL5XJZ169f187OTroPYNtoNObAFHwk6kPdzu7u7plIU0xrQh7tBGQcHx8nvZ/neYqa\nwudSaVb/w5le6BOADOOg/9JpmqqILkVMPBqiGEkpMj5FBiB62R7Gk86erOftegoogpain6J+F0Ub\nivrL/z7xT2NcY2QDwfCtwPw4cPBxudJnESBYLihxvN6Gh+r9uwhWPITqCtsNl/MvzouneWKKzucp\nzstFUwSnkT8+fucByszlNM9noVDSESghH6sbQNINGHg3ENLZd+owN767LK43PGPkhvbH47GazWba\n9bO5uZmA67vvvqutra1U7yGdPeMDD5pUI9dw/WAwUK/XU6/XS/NLvQcRD9cX6+vrunPnjt58800d\nHh6q2WxqMpnom9/8prrdblKmWZalrcMYBJQuqRUHdRGQ0YdYF3TlypXULsXHgCfWnusowObR0dHc\nWR/uibo3epHkMu2AQJqvDfJ+xj57ZCtuKeb6aAN8h5U7O74d1q93z57/+XHD6ilLP5wQfeXgcTKZ\nqNvt6uTkRD/+4z+uN998c64WC4BGP5FrT+MQgXFdiw6nloo23HGRpCtXrkiS7t27Nwf0X3rpJT18\n+DCBEQextE27h4eH6nQ6CRz6kfxuT5gX17kArXq9Pgfo3dbx7PF4nNqG7x5BcSB5nhMvXZLIiRsc\nF7SIvIvuO2+AcUFFA+gTEJ/H/7GPi/qyyMDGBRqNa7PZfOJOI0fbCHdE4TzbkWz0zN3wubH0diMV\n8YB7/Hf8PipUlFKMiPl8u6cDwRsfcxHw8nm8aCoCovQ31kV5usFlIypkf0cHCtT5RtqFe5l/B5wo\nWv52Im3k/UFp4r36rjiupf4Cj5aQOy8AIySOjNHfLJs/HKpUOi3GzfPTY+lRaO6ds8MGg0PE5N69\ne+m9NJ7b39raSpEZ2vFIBn3Gw4N/bsyguIUUXrTb7eRxHx0dndlN4YoanlDzwjjgp8s5vLloQpch\ny16X4PojAm7IQXnRSx7hd9FnyEmUF+msjXBHwKMmvu7oFwe78T9zyX3UdlSr1XS4IeCfefUUo9ed\nIGMHBwcpAkKUkAgCab1Go5E+J/qAnK2urqYic+l0DY/HYz148EC3b9/W1tZWeh59j4AtHtvPvDnQ\nAGRlWZbW2HA4TGkjb8f1yHg8TnVwOADMkRd7e9TkPLue5vg8gfy4KHbQ/4/hzUURhuhFx+tccBdF\nQnhe/Oxpn+nPiQYnEn1ESD1SE+/zz2IUo8gwI2x4iVRzg+i5xqM90Vv035Gi0vH+RpDmz4g7oRZd\n6/fQDwxJ0RwXzfdFURE/XVlHhRBD/ShPL3aNZwsURbJ8J4qkFF2Q5j3aWESMFwWwoJ/MPZES8vIo\nXyIRXH/t2rUULaCY9N1339X169eTMiLiUBTZ41n0kVA6hxiylfrk5CQZ/0ajoVqtphdffFH3799P\n0RdP22BAut1uMvxRHqfT6VxxHuOMCr5UKqX6BwzldDrb2dHv9zUYDBLYcZ6j1DmfAmo0Gtra2tLa\n2loqfC7ayXNZIoLS4vOMfC7ROXENu2MU1yv6ynnvxsyNrl/DHBU5lTyfmhB/NxLAh2jgyclJ2t2F\nQa3Vamq1WslQA8i/+MUvpq3FXmvh43HekKJxsAT5Rg523NC/Gzdu6N69e+p2u6ltr9fAKfCXdhIx\nZfwOIvjOgRXXMQYH9dimSqUyd64S68Vf2On3eaG46zzn1dPQpQAn0uJiUunsdkvprPFeBEyiwERD\nsGjhn2f4i54bjQi/mUxftDHF4Tt3/PtFPPDaDIQBpemFwBg5ThR0PsfoBc9GeJ4kQDFKVPSZG+Ii\nMBQ/cwXlB4Dxcx74uCzgBIpKyOc78pbxudx4sZyDVk7KRIE5yPW0j3v3TnweUziS5jwi+sj25aOj\nozSGRqOhR48eaW1tLX0/GAzU7/c1HA5TcepwOEx1H3meJ0VH2x7hIzrj/SHqc3h4mIw/r2nHC3zh\nhRf09ttvz+W9pdPIEGum1+vNnc7qz8GAeWjcQ9YeZfLDpsitRxBNG6SnmBs/R8bn1SNO/n4TX4dF\n9XMfJ8UIUgQj/p3rJwfi6Jaow1jrtOv6gdRIUbqAtiNQihEvl2lfSxSUAkyGw6FWVlbU7/cTKK9U\nKur1ehrW5XRPAAAgAElEQVSPx+r1eumVBq+++qq63W46D8fXGvYFYz4ej1MxNPO+v7+f1oNHuxuN\nhqrVqj71qU/pq1/9amE0xNO/nU5Hq6ur6dkxosV683QKbbL9mWsdaHhKmDVA/z3KRxlB1Fk8qyha\n7wXn58n1haV1okAXUdFigGHREDpKQ8DPi0Kc542cZ+hoPz6b53vo3Ptb9Ey/p16vz70h1vt/Hm9c\nqFDOCBJ5QhdCVwJOrhAZf1EfXHl4P4rADuPz633eI5/d03I++5xGHsQIwkWTh43jWKFoRH0OpdN3\ns6AckS88Pdp2hVAqlVJOG2XiYXjuQem6d+XrhjHQNxQb6Uc/7Gk4HKbi0U6noyzL5nYkdDodHRwc\n6ObNm9rb20uFrMgDyhjgjOIjIjOdTtVqtdLaAHwBuJ9//nn903/6T1Wr1eY8U49AuTy5EvXP+fGX\nybmsOf88rVMqlZJBc17FCEwEO8iqp46IclHwzPh9Li+aXM9KxccgeK2TdPY9Ng5IvSjSPfJYc+V1\nDp4yiMDat6BHpwj+kTqhn1evXtU3v/nNuWuQt0ajkXa7IHsrKys6OTnRzZs39frrr+uVV15JkRn6\nOp1O0y4j1szBwUGK6CFDnEjsa3djY0ONRkNf+9rXEqDhmuiolEolra+vJ7mODhw8ZltzUVrXHSPA\nNLt7aMPllXucXz6n2B8+90hUUTDgPBt3YRIfjfzTXO/RjKIohBuxmLeN0ZUPS4v6+jRj8IXi9/iE\n8WIyogaOOPkMpO1eeARgLkQUKjqAKIpQOZgoSrcUXRvBm0c9YjqjKAR83o/zbRH4KeLxZaMir1I6\n+xoEUgzRCPGel0qlks4PcCXgQNRBCafLesG1NF+g5usjAr8YwcJzRYkxL+TrO52O7ty5o4cPHyrP\n85RH530kAOROp6P19fX0MsxGo6F2u53qaYoMMAa81WppdXVVrVZL165d05UrV/Tmm2+mAw09ZeMA\ngXngDAsUfVTyzg8Hbhi8RqORdkWUSqW5tJmnn2I6A6OGJ03qRzrd3ux9JoLiHvNlASZRZqJzIJ11\nOpFXZDjLsgQ0pfnDHpkTj/ph+Fjzrv/8OR4diA4S17mMcF7K1atX1ev1Utv0tdFoaDAYzPWl2+3q\n6OgoFVr3ej3dvn07FYYfHBxoOp0mkMvrHTjLh0gKMsazWCdbW1tqNpv67ne/m6KIyA9zgKyUSqVU\nd4Je8JOdnbd8RlqLqInz0qMZfjpyBCE+V/AaZwQeuwMSoya0g+1a9K456YLTOk9jUIquiUo1eub8\n/jDg57xnezvntRk9iaJ7QKEOLpikWq2WlCCH8OCV4TUTpuP+IuNNuC8KUVFfI2iKAMOvj8oJcqGN\nURVXIg4gFwGM+BNTI0XXuyK7aCrqY5GiLIrOOeDm1FX47qAAxeRnl/hceoE1oVeUoSs6nsmPH2jm\nUSjeNQMoIv+OsalWq+p2u3rhhRdUKpW0sbGhnZ2d9OK8d999V+vr66rValpdXU31FZubm+r3+2q1\nWmkcACrm0iv9pRlg63a7+sY3vpFSqRh1D0W7LACQ4AWASzpNxWRZloADaVIHj4T02WHDlmbfHRdT\nS3k+/wK/6XSainIxen4Sp89P1GexgPkiyKMmEfxFUFK0vvmMuSCKwXceMYopyxiB4bn+WTTi/I1+\ncKeOZ+HAxSjXeDxWu91Wo9HQ/v6+JOnb3/62Op2O8jxXq9XSa6+9ps3NTeV5nuqgqBva3NzU/v5+\nKqZFpgDJABgOC+x0Oup0Ovr2t7+dapMAx0RcfJ3zd6PRSLuKWO8uOzzHSww43A2ibZ9H+EBqxiOG\nzLPz1aOtFHxzjTvQ7lx5Oq+ILsVunSdRFHoY5p8tMr5PIldiRSmMqCTOa+e8PdtS8YL2v3npFH0p\nMr6er0YxuwDW6/VU2R1DsHEcUal4SK5IEUTQwT2LFBY/GBGMnxe+RYMZjbYXv0U+LgJoF00xxege\nu6Qzf0MoOQeB7u2xqJl/VwoOYmiLfvgr5d17gSIYcY8ryzKtrq6mLZ3SbE6oLyHkvbm5qbt376Yc\n/bVr15JnmOe5PvjgA5VKJd25c0dra2upePbGjRuSlA4k9L4hL+wIWl1dVa/X03e+8x01Go1k3ON6\nYPx4iHyPUXT+MX4Al/MhplggT29xwixGJAKkWO/AgVxe8Bwjix758fV9keQA2yMXvvadf1FnezE+\n12JIoxfuhhPiHsjr+5AzqfgEW08B+W6cSqWi3d3duV1S3s7x8bH6/b6uXLmivb09ffrTn9adO3e0\nsbGhjY0Ndbtd3b17N0U5Op2OJKWibV7pgLNA9IgDBt999139/t//+/XZz35W+/v7+u3f/u3EJ/gT\nX5ToRa0e4QOgwBPptP4E2cahwWnwaCvpFwcTlUpFzWZTo9Eo7YLz4tsIVvmbscbyBtfPHpVdRJca\nnBRFTfiMgrgYOSny0p8mQnMePcnoUSAUc6VuIPCKHWzgIbohkzS32GiL7wg9YvC5H3544dV5/Xc+\nOblR8ghNETigr35t/OF6H3NRBKVojtyrWhQ18fFdBnAC8HAA4t9FEBG/q1QqqegOmUIxFM2r16AA\n5LyCH9Dsnkw0HG4kAAO0t7m5qcFgkDw8j+Ksra1pNBrp7t27Oj4+1tramvr9fgIopCIBXOyGODw8\nTFEKP1SLtM10OlW/358z1Ovr63r48KEePXqU1hmnbjIOz31zDbUIKFaIHD98oZAYHjH+uFOK+YC3\n7Fhg3IAi6RRYk+9n94M/w4Fn9OrRK7EG5qLoSXo0ppil02hFr9dLL3b03VpehySdvpLB6xe8wJLr\nmBtPdfqOFAfu6Arko1KpqNvtqt/v6/DwMNV3wX/08mQy0bPPPqt33nlHq6urOjo60gsvvKA8z/Wt\nb31La2trarfbOjg4OFM3+PDhw7TLplKpaGtrS++8845Go5E2Nzd1eHiYouXb29sp8uL6nK3ObvO8\nPge+OPjx9c24PApCfYx0un3fnV/kEh3AuT882wt9mV946uvOHQB3uD0yC6hbRJcanCwihI3iokWe\nqBv9mI7wlIrn1NwzeBpC+Xvoscjbkk4LIYuMludVvU8uOIyB0DRj95Coj4G2FxnvaNz9f8YCuXFz\nQiFEfkWQyPjPi4JA/v2i6FURELkM4CT2MRoW5tprRTxaVS7PzsygfgGj6vJQFBL1iCKGlTbxihyo\nuIfrbfs5G6VSKR0QRr0EtVGHh4c6ODhIAOT555/XN77xDT377LOpaHB/fz+BFHLrtVotHRrFzh5q\nVw4PD9Vut7WysqJGo6Ht7e3k5XEYGzwplU6L7QAWrAd2YLhOGAwGc++U8vCyA0PuZx2hwKX5NzTT\nLnKLkfIcOoaWMHcE4w4KXVY8MoFR8JqAi6AYCZTmXwUSx1aka46OjhL4RI+5nHvxtztzGFl4ESMH\nADiXe/rkfefv1dXVFGV79OiROp1OknPp1Dms1+v64IMP9JnPfEbb29spRdlsNnX16lU9evRI6+vr\naZs9RbBZlqW6r8lkovX1dR0fH6dtyScnJ1pdXU1Ag+gNssXuIYA1AIL0Dueg+FooOgbAX9QHT5Fx\n1+MAZ7dBzO3h4WFyQpgj7oenXjsVo2vu9PiaZE6/byInRUJ9HlCITD7vWhjn3ngRFQGUon55uyhy\nR6zSfL7NF07c7iXNv2mVe/k+9qMI6eJlRPBQ5HHFyAb984Iq+PW0Bv+8KJePB4V8XjrN+1TUtivv\n8+bmIol5iukT6XT+ixQyvMGz9+hBBGkxouVpBBSj70yJIXNASpFCl07fXgzguXr1qo6Pj1WtVlOq\nh/6/8847KYqwu7ubvCvAjG9FrtVqGo1GarVaSW7xVvf29hL4wqvc2tqae9mYe4+sK093+Qv7PDXE\nOSnw19Ok8N4jXyhzNwSACnd2uJYQONd64aJ0NprrwJ4x+Prjt6erLoq8765/FgGROE7677s3Yo2T\nA2v0hPMBHeVOoPPR0zX87YA9ptmm02kCDb1eL8k7RdqVSkWdTkcPHz5M87q6uqp2u63d3V11Op0E\n1G/cuJEOajs4ONDq6mra+n7r1i29++672tzc1IMHD3Tt2rUE1Pf29jSdTlO6Tzp1+Oijgw1qWIim\nAlRcTohGcr3bJS8XiM6wyxipyvF4drpspVKZO5TR5QDe0lfa9+exzqNNOS9defGxQp01wNCTIhiA\nE5Ss52+9zSjggIkIIiLQKeqj9yca/hhxWNQHNyr+zDi5Hg3xNj3MyWdFudaisRQBDj7zQr0oeEUU\n+xUpGk7a8blyBF7khfnnEbhdRvJ8bJEc+DUoEul02y5Kg8XuEQ4PmfpcIRNe2Q8xr34GgRuXIs/F\nPVN2VjSbTe3s7Gh/f1/3799PzxsOh+nY9itXrqjX66XCbtIYk8ns0Db30Djbh1DxcDjU3t6eqtWq\nWq1War/T6Wh7e1sHBwdz50YAmtABnFLpB5nBN2n+3VSSUj4fDxmZo4+ef3c9g7Hzc3jgIUXsXO/f\nAcB8pxRtY5zj8xyEXzZy4xQ/j/pWOtXVXgTru5HifQCMWEuCIXf95GvDvXr0ImCS38PhMJ32Wi6X\ntbOzo+l0muYOmTg5OVGn00kRvt3dXfX7fR0cHGhvb0+S9OjRI1WrVT169EiS5opTKTy9ffu29vb2\nNBgMUj/yfHY6OClPf5N4BL6sb+QV4EFxroMaZBMggby6foGfDjb52yMn9MXfZeUAMkZzXQY8MglF\n/fYk+34pwEmkCFDOIwcmfgCSg4kiAxoXhLe36NmLDDDPY4G4MXBh8Jw4HlNUPrEYzp/D2Lyv8f/Y\nx9h+EViifxyh7AdGuSFzKuLfIkErAkdR4S+KvEQD7wAlem2XIYISCyJ90cNL9ySl+UiQdDqvflKo\ne5duOB2cuGFDMXtEzdeKA1/IQS73EOUYDAZqt9tqtVqpgM6jHVevXtXdu3clSbdv3067UDjOmjoa\nUku9Xi8p2UajoTyfFRSym8W9YbZuNhqNubfTxl1vR0dHc4WBPhf0gR+iKOy8cb5JSkaAOYxpMD+N\nNxpVThb1GgCMalxXDnJIJ/h8XBZ6UhTVDU0EGtEgeRTNgbBfx7XS+duNixzEGHWGpxhVQFKj0dDG\nxoYePnwoSXOGn9cMfPDBB6rVajo5OdGLL76ow8NDHR0d6eWXX04vI5Rm9U+DwWCu8J8i6StXruiz\nn/1s6kuWZSlaA6hGlrFjTkTvqtVqAjiM88qVK3PRR8bqLw2Evw7MPTLFevHaNJ9P6s2k+WitrzNf\ni1Hn4VC4bMQIYRFdCnCyqIMx/LToWlfsi4x+UXvuSS0y4ovaKopU+IIqMqxMiEcOpPmdOd6eLzL/\n7cas6CeOORqyRUrPt7vF02adT867+Hlsu8iLiv1b1O8iKrqWti8DOFkkr274fQHHcKqHoFEcnlbw\nNICfVuoy4VuJoyfk6TWPBKysrKQQsXQaYeFtqZ1OJ9WPdLvd1FdqSO7cuZN2K2xtben9999PefjB\nYJC2xWdZltJBHELlbzt2Rea7awiTk9JpNptz4X/4wuFw7iHjMbfbbdXr9RTVIdKysrKidrs9B/BQ\nxvFlmvAR4wWvYkqGfpOi8iiK9xfZyPN87mVsDv5p4yIp6k3vu4OE6JnH610HFTmCRW2zJqRTwCnN\np4ijTme9RaCEnCFT3/nOd1IhN8+aTCapDqrb7aZC8Pv37+vVV1/V6uqq+v1+OlW2VCqlc3sAJBw6\nmGWZer1eems1ckTqhQgOaRppvobGdQNpo9FopI2NDQ2HQ7VarbndY35elsuZF867I89n/qwYBYlz\nhmx7eifaTuagSFd71OQ8AH4pwMmTyJHYede4sY8gJTIvKm5vI3q1/I6LIAIRb889hggmYhTEF/J5\nnrA/w+sQioyhG+yivjoCXiRI8RkeOnUqmhtXsJFvRfNS5GktIh9TUQTlIqlIzqTzAbhTTKn5fbEu\nATmJ9/s9ePIxhcN8RkXB/XhqpdKsKHZ7e3vOgyLsjbePDOF9Pffcc5pOZ8fGk6bJ8zwdPe/e73Q6\nTVuDvX2MXqUyO2GTQ+m8OPLKlSvKsixtoffCWEDG0dFRqhXAgFQqlXRwGump+J4d38kAEflxeXXg\nyLZn37HjxJxyvobPq7c5nZ4e3oVBuGiKssX/nqIq0p2u2xi/R7ljhCTyIepVCH7HKJRHyxyUAzzg\nJXNAZAIZ5kRVj7CwY21nZyc9KxpsUjakVJDvWq2mwWCQZIzD3dBbRTIiKTmJjJs0zv7+fvru6tWr\niZ9ZNr8zjD4SEUfeiorp0QFuC3CiIe+v82uRQ1bkhOV5nmpYnkSXApwUoadFnvR5SEsqTm3Edvx7\nL9RZ9OxoJKLRdSF1Y74IGPiE8TmTR1/8gK0IvBAcF6oIYOKzPXriIWrGGt9YCbmH5+Mo+ryIV/Fv\nH8+T5ioqq6L+xe8vA7msMseLgGzRPX4tcuLjdOXioVWXhQhaUNQo4ghivXjW2yiVSsl7ROlzD8Ye\ngEI0gpf0cWAaY+BE2GazmeozJKWzF3hXDRS3le7v76edESjcfr+feEf+HrDhp2y61ww/SCG50gWA\n+dpwj9ZTOniQXmTJdUS2mJOi9cIzfY0y15VKRRsbG+mguyfpvd9r8r7HtCXfF+nJeK80X3Pl/9N2\nkV7wKBTy66k9AHXR+gGYeJu8o4niV98R5dvwXS+zc4waqGq1qvX19bkaD3bvcJZJtVrV/v6+Op1O\nAiu+TgHHRFLyPE/gGzmiiJgxX7lyRcPhUF/60pfmgCG8lJTOEcKWeGG7g7bo7ERn0mtOWEcAuuho\n+y43lwGf/yzLks5wGSmiCwcnH8br/bALNHo9LrQ+oT5hRdcXUZHxBzV71XiRV+T38Bl98fwqffX+\nRiUXoxBxbA4EihY+/TlvW5eHAKMR9H5HIOb9L5o7n59oCJy/RR4Vvy8jOIHiwuez8/rrCtXBb0yh\nOT+j8kcJujx67Yo/w71Pwtm0hRG4c+dOUkhEU0gBofQIZ5dKJa2trSnLZgdDDYfD1DYeI6+Op5Cv\n2Wxqd3d37oh6SekMCt5zkmVZMtal0unbjxkz/ZhOp+n9IKVSKaVviiJF0ingx/D5IYa+3Rd+Mh6P\n/PB3rBPyNe6yEL1KijABZOVyOdXFeI3QRZHLFmMpchijA1PkuCxyBF0PO7D3M0gcvKG3nDzS4k6d\nryFOXyYt6O9GIm3EGEktOgDpdrtzZ5jUajX1+33t7++r1WqlNGCn09G9e/e0tbWlLMu0trY2B/4l\npTeC8xlgws8NAUhTd8U9bMOPgAfQk2VZqteaTCYpYuF89jcN+7zl+emuONfDrjd4lqegXbdEZ5k5\nKJfLC51hp0tzfP33wsBEIy2dLRr16yJA4XdRX6Kxj8bBQYNXR7tn5BEXvyYqniJDVCqdHiBEWM2/\nX/S3C1Y0anzuJxEWgTTaK/L0igxljI5EAx2ByiLQ6cCEvoHOPRR7Xr8/bloUOYqRr6ioHTRI86k2\nvH7kIPK/KLzqgAMjGz0d/9+VjR/Pnuezw9ZQiLQ9HA7V7/dTGoU0iTQzYEdHR5Jminw8Hs+ladg2\n6UWN1Wo15fAhL2hlCzOeV6lUSufBcFBUls2KLdkBBLDa29tL70/B0CMrfhgbz4L/yFUsfKWduDuK\nNVpkLLx2yAsQpdOCR+aEtFOlUlG73Van01G9Xv/IMvm9IDfuyFuR/Bat7Sfpdl8bXOu1V65HPOoX\nt5Tz7FjP52ke+iQpvbRRUjqXBH3o6wfwwk+n09Hu7q6Ojo706NGjJNt7e3sJxE4mE927d09XrlzR\nysqK9vf3leenKRb4SD+JtOX57K3fvV5Ph4eHCYB5lGc0Gumf/JN/os985jMJELBWkTnebwVgpzYG\nXnkkyfV41E3Rlsb5cZvnTqTrY/RLPN6gCFw6Xbw2N4pCXIS0z7sv/j4v0lIUaYhebRHQiUY3evpu\nIIuMlAOUGIY8bzwOap401gjSokHz36DY6OW4AvfURAQoi7wm+OS1M5GXsV0foytC/4mRFF8IlwGc\nRPDqHpF70UXki93nF1lxBemK1tslUoCxo8gZ3rvH7/1AOaLAULC+RRelN52ebuekMBTDHD3Ou3fv\nJtBAf/M8T9EJPNJOp5N2s3CCLCFy6kl4IRk596Ojo3QYFm1LSvIszYzOs88+qyzLEniCV74LAsXp\n9QKctBvnkX77tmAP/wOq8jyfe3cL0SWAJrUp/X5fR0dHiedHR0fpjIzhcKher5f4d1HkhsrXLt/F\na6Ri3eBrHfl0EEIRqXR6CizgDrklYse8xfXFfMR0mzt3AGIOOotbzev1+txamUwmCbAfHh5qdXU1\nRUNoYzqd6t69e6lwVZoVhz948EAHBwc6OTlRv9/X5uZm2rGEgUYesmwWcXz22WcTSCJFc3BwoE6n\nk05lJr1TpEuJinhqBx4gqw7GpLM1Iu4cYEdcl8NLnksU1a/jXn/FCvqMeV1EF/pWYmkx4o50nkKP\nvx3QuDEsui+GZYuiL0UUowLRc4+G3o2KG1JfgDHCEMfmExk9F7+Pv/1/9yai0iiKgEReLeJ/EfCI\nzygCIQCcGB2J6aE4nqL5eRr5+bjIPUzp7M6LOLZYpOq8imHVmOpxkMi9fI6njcyhXOmT98HPBYke\nU6VS0bVr1yTNe5ooGIwrO2kwuJz/8Nxzz6WTZAHCKODhcKjNzU2VSqW0DZOICHUuKL3BYKBut5sA\nRKVSSR4m/UbxEvLmnJSjo6O5l5sRHaI4t2gNxiJJjzK5zLJrCIIv7AJizjC8Hl0imuURKQAa65Ww\n/WWQb9c5UX/zO8pW0f3cxz3+Sgs8fOTdjR9RN4xp3HYNOHbdCu+Yfz6v1WrpLcPUiEiaq81gLkul\nUtruTirlgw8+SLUTgM/r16/r8PAw1Zdcu3YtHW0/mUxUr9e1vr6ufr8/dzgiO8okpWfxpmLfYlyr\n1bSxsSHpNGoEeYGuO3ExKoWti2Auzi19dpvG36xlj8byDJ4D6HHZcH1FuvQ8upD9aYuMb1yA0fDG\n6xalH/z+okXtBiJWJBdRNCoeQZFODQDfETKL4+R7vDhfZCixaMiLnk/bHlpDKIv4WMRXAAJ9cvDi\nfY33FfGzaJxQkbLyMfqiWAS0fK74zq8tuv+iKBp3yD0QDBJKx+fNlTz3uyLzKIl7J3yHwqZYD+PN\nZ76LBBnAw/cIGm12u11tb2/r5OQkpUD6/f5cmofnjkYjvf/++2lNkU75g3/wD2p/f3/OwPD2bXbh\nYIQBLRsbG8nT9cgMb3ClhgVetlotHR4eJi9zf39ftVpNh4eHqfDWPUw3bBgkpyIdw9p1WaN4Ns4h\n7aInACruzQO82OETI4vU0fhbmi+SXK4jSHH9BQCl/66vkGu2k2dZNlfPAb94hhtBapXYAeYGlvbp\nCwXWGEkcROaKLbzMzWAwSP2gTiPPZ+mSVquVonW8WfuFF17Qm2++qfX1dR0eHiYwSpqTNN7e3t5c\nSqNer6fzUTxq4zxCrr32qtvtqlQq6eHDh4nHjDcWjkunwIx5ijbOdZHLrvPRwZzrHX+NhUe/IlAl\nAuvyQ1v0+bxdaBe2eb7IM47fu9ItQuvS6YFW8RpHjUXPZiHBLPdwInlUxA2FKwz64OHEeI10CoZ8\nuyKLxT07N+DufcOP6Mm5Eigyju6NuJeNgXSvMfY5RmnO84yehiLQKmrHPVT66HxljNHYXjS5B+9j\n9PmM3mD0KF0GIyCMBtJBjD9rOp2mN6CywyXLsrl3vyB7pVIphYclpZ0Gw+FQb731VjrnhFoTFC9p\nFvrIAX7PPfecbty4oYcPH+q73/2uvvGNb+iFF15Iio0UEUdxYxjYNVGr1dTr9RLA4pnc67spqF3h\nSG/kmJ0VhNg93eK89uPUnefS6XkTTu5VuqKNkUnalWay6UYL75/5Y3uzyzP1BYPBYO61EhdNLqsx\nFUy6RTqVraiTJKVI2erq6lxaQZrXbfDVgTRRil6vlw4g8+gAOp2oR7/fTwDHU4PT6VQHBwfpfBHA\nIYcOEokYj8d69OiR+v1+SuXs7++rXC7r5ZdfVrVa1c7Ojvb29tI7dNABFDNzjgkRkclkkl7dcHR0\nlNJ9nETrB6iNx2Otr69rNBqldtyZZA34sf5Eo1yvu+53W+k2gLVEhDOmZwDTfk8En6wJd3QoYve0\nnUdgF9GFp3UWkRtl/ywqZj73ayA3EB4Oh9kIMALPAiCkG9Mwblhi7p8xxdSJF855uCvLsrSjgDyz\ng4w4hggOPGRZlBbx+1iwRX8jSB7OPs/QL4qCFM3tIkBzXnSrKELk98fP/EVul+EsCF/I/O/E+NwT\niTzykCgy6n8zh4R28UJGo1GqV2BHAsaP9rmPMPF0Ok1pD+YCw7KysqLNzU2trq4mxUXhINcAGlut\nlmq1mj73uc/pzp07ajQaun//froXWWs0GnO7glBSKHUMFtuE9/f3kzLloDTGgVIHxHBOCYdmoeyb\nzWby9FZXV+dkiJRWkW5xAwtoIAXB5zzT15MbS/hD2og30cbIDb9rtZra7bakWb3Myy+/rI2NjXTw\n3UVRlNEIqF238L0Db9cvEWi5gwVI5TN457qY9CGAD6BLtAO96G+hRk9Vq9W5N14jy76uPBp25coV\n7ezs6BOf+IR6vZ4+9alP6Rvf+IbK5bLW1tb06NGj5AgAbjgYDd1KnwAApFw9YkEfkG+Koj0dBZh/\n//339dJLL6larardbieeSfORPPhCNJ+2PZLC/+604Fy5HAOK3BnBVtE2KV23y8g2c0f92t7eXgKB\ni+hCwEmR4StS0i700QieB1SikfY2PVLgxsB/IkCJz/britIWHup0bwEEXyrNdh/s7OzMHbftk180\nFv9uOBymfCmvn48RlsijojMuvEgsArHz5s/nJF4bn+s/MaLDuJ1XPm/xejcEjCeebnqRxCmoHs2L\n88l4ve7AIyA+fjd20nxBG4rB594L96ir8LeB4rW7lxu3MuLhoHT5XzpNgfC8RqOhW7duaTAYpBTS\n1eWWozkAACAASURBVKtX9Zu/+Zvqdrv62Z/9Wb300ktzR33neZ68SJQ4qYuvf/3runPnTjpR8733\n3tPDhw81Ho9THt69MdYBRbHOt263q2eeeSalTkhJURMC7+gDPCiKpLrhdZ3gesRPVfb14S91Y9cR\nxZy+tvkfoEbhY6PR0Orq6vdIQj8a4fG6M+E61XWke/VcBy8dpAIipeID2CSllFej0Zh7942ne4g0\n+Bt8MepZlqV0DGeZlEqlBFDoL/2iAFtSSufcvXtX/X5fu7u7euedd/TZz35Wt27d0pe//GVtbW2p\nUqmo1+uluhWijr5u7969m87pqVar6na76VBBZJNzd+hLlmUpagJYLpfLevDggZ555hkdHBzMOTl+\nngm8jRFo191FTiQ6hO9ZHz43RP5xOtx2YM9wcAFBDv6IYh0eHs6lnSNdCDiho0UEo2LBpBtXX9Qx\nQhCNZszZAk58u5g0D1CKDHUEQtHg0PYiTzmO2Y1OPHHSjb0/n/v6/f6Zo+Y5TTIaOG/LxwDqBdg4\naCkCX9E7iv3yZ3h0Jj7XFVpM08WIUXxOnN8I/CjqvEgql8spZOvbvaXT00UdjBbx3fmEsXOD554l\nbaPA2FYLCBkMBnOyg4fjBaHsfHAlBnDJ8zwpUn+Tb7PZTICQN6s+88wzOj4+1tramq5evapKpaI/\n8Sf+hO7evZuOtncAhVfHmiRK8oUvfEH7+/va2dnR5z//eVUqlXRGCooTuUWWnQe1Wk0HBwf64IMP\n5p7JmFHk8JmxU0+wsbGR7uF737YO3x2Elkqzw+oATfSVehj6j+GUZjtCeHa5XE4Rrkpl9qZb6hWe\npMQ/DiqXyyklEQE36xT947sQ4RFGyg02c+Db0Nm2iwzD24ODg2TAJaVIGH3w57tBBbR4Kh1wQ92f\nR7/pO8/Z29vT0dGRDg4OEgjKskw/+qM/qvfff183btxIYJ/TY0nBoOPff/99lUolbW1taW9vT51O\nR3fv3lWz2UyHuBH9oOAW/cFYJpOJ2u22vvOd7+jWrVspWgg/vS4E3npND/0CQLPuPFKOrongm7Xg\nsoDt8KiuR75cLkgTMT8UEhMhXEQXAk4IycYiNGle2KHzAEI0kE/y2iH3knwhFYGEJ4ET/03bEaQw\n8Xip9Xo9CSDG4LxnQyhl0LkrvachN/Iu2M5zT034b36IVng43EFdEahhHh10+vxyfYygFCnCaLzp\ncwRKF0Eo71arNZdvl+ZPW4zRIOYQUMA9Mc2H8uBZbiT5vbKykiI4XBcVNsDItxP6czwiQ4rIdxmg\nzIiYbG9vq9FoaHd3VxsbG7p586Y+9alP6Sd/8ifTibDuIcInjHie59rd3dWP/MiP6Bd+4Re0tram\na9eu6Xd+53e0ubmZ+pRlWdoZ5Ip7OBym39PpVGtra3rmmWcknb4tGDDgRM0BkSQKcldXV5PhKJdn\nu5+Itvi8MSfw1aOu/q6ca9euJWBC6J25xnnylEi9Xk/bq10uLooA1aQMIQC3e+YYxaLUDrKPbHn0\nz42c1+T5EfJ8X6vV0n3+GgCvUUHOmTeOcPc0CQabSICkVHi7v7+vq1ev6uWXX9YnPvGJFBlYW1vT\nr/3ar+nVV1/V3t5ekhGijAAnANeDBw/02c9+Vmtra6rVatrZ2dHLL7+st99+O609BxJem+G6dTAY\n6NGjR0nHsq05rmXpFGQRpcLeElFFPt258dQ4fISXHs3zdDIv9kTuvQaFfnhJA5FcrjvPobwQcOIo\nKxqU6B1LZw20G8sisFAEKiCPlETA4u0vut/BkxtgJiRGBNxAsKjJ2+N9TqfTM9Xn3g/+RvlKpyAF\n9Oy5VhS+K083lBEYOKjwsXsEJQKMeEZEETDxZ/vc+vkQcQG68Szqh6faIsi6DOQL0M8BkDRn2N3A\n8517FxhMN3Iuq26s/H0ylcrsxXwU4BF6paYEGXRvKqYiMESsQQAwnwOMiT5m2exgtK9+9av63Oc+\np+PjY929e1df/vKXNZlM9MUvfnEOVBNRINxeLpeTR/no0SP96T/9p9XtdnXz5k3duHEjhYoxJuPx\nOIElFDMeLcrw4OBAWZalaxzExh1S8JC/8zxPdThEi1DqRFuZL0+ZwTvfygyf+/1+2kkkzYNqDLSf\ntYGecYV/0cRYom6J6535jbIMePA1TjvutXOd83YwGKjT6cztsPG0EO0iC64TAEHU1rEeAJPIFsaS\nqCPv2PnMZz6j3/qt39KVK1fUarW0u7urvb09vfDCC3rrrbfmrqUAHb22vb2dzjr56le/qkqlop/5\nmZ/R0dGR1tbW9OKLL6b0vOvP6NgAxpvNZqpbgvy0VeQG+eRz5o417rwAPCDXRDWZM2p84CvtSzNA\nSHppUaTbeY+M0J9LV3PiCtoNHxQBhhNGmO+iAV3049e7IS66zhXBouhJBEVF0RLvc8zTNhoNtVot\ntVotdbvduaLB6Cnz2z1ljB6nCMJX0DPXR4ASxwQfPB8Z+VQEFiOoWZQGi6AoAotFcxR5Ge+Jvy9D\n1EQ6relAMXi6iTnxhepzIJ3m2B1g+ntouA5gPx7PTl/Nsiy9XXc4HKZ1Eg1CBK/wlIgeJ1jyLC/i\n4zpOaC2VSmq1WukkyldeeUW3b9/WBx98oJWVFX3iE59Qv9/XG2+8kUC17yqg/5x+ef36dVUqs1NR\nS6WSfvVXfzWNtdPpqFwu6+rVq+kcFC/0ZXcOfKUNxoScEPGjfsGP//bCVebJj5B3J4C6EbxtT7Oy\ny6Zarerw8DDN6XQ6VafTmZs7wJ0bcgdx7oFfJLnuQH6YS4pRMXTwOkbjvBAUEOnvbIoRKOk05b2+\nvp54yxxxj58RQl9dL0SnCXANf7nHU6ij0UiDwUBra2va29vTzZs3UwSGqMIHH3yQdtAwlna7nVJF\nyMB4PNbW1pYODw/VaDT0G7/xG2l3jgMwaVaTxPuopNPzdlg/8HB/f1+lUiltmXYg69FInAoveGW8\nRYdvOliWlNaVz/1wOFS3203rAWDNsxx8+NjclhGpOk+2L3y3ThGwgKKH7/fG6ARGgLbckHG/X7co\n2uIKoogWgZUiY4txhmJOD6+YsXnY08eIssqy03QB+X5vw0ODvhDjGBkn18Y3c7o34REmH4vzMwLM\nRQAu8nERuHAwtAgUxrm4TGkdV6DwzV8CB4+lU3lDyfMOGa9NGI/H6Z0apNR8Kx4RGVITyBkKwz0x\nFJbXXOX56fHXblgIiXe7XW1sbKQtuVyLES6VZrUupH3YuXP//n0dHBzo4OBA6+vraRzULjhwqFQq\neuWVV5Ky/vSnP62f+7mfU6vV0sbGhvI8V7/fT4daAeAwGC4X8IVDqwAxLtP+t9cjsEuIufGws+fd\nJc0ZNue3R2HW1tYkKdU9jMenL3gD7MV172tWUoqAXSRh8OKpuA4IkQkIAAKA8HXLbhTakE6PfyCF\nAH/ZPs7ntOeyGx0cwAuf+9t6qe1gXBBRkzzPU1qUF0hmWZZASLPZ1P7+vo6Pj3X16lXdu3dP7XY7\npQi5ZjAY6ObNm2lXCjuujo+Ptbe3p+eeey69t4o1h/H3LcPIICkl5JJ0lNsfSXO7kTyq59GWmA7H\nvnikgwgLgN7TRx6RxDlhTXqBPVE/LyFgbTxpt86FbG9wYXZwIRWnUvzaaIRcoRaFw3ieRwYcYXM/\n//tvfy59g6JhdIpgxz8n1Otj9+vcmDivWGws6hhOxpsFyLinHBdq5J3zyHlJn/2ApNivyBvnl8+H\n3+/9iLyP/HV+xnqUIn5fNLHoUSrwEgXjvJROFbLz0UPinF7JwWe+u4t7UAK+o4a8MvxFMaNg3Et0\nkIhXiMFgyx+RChQfHv/Kyor29vbSoWoUndJfFBDvFsnzPBXt+vba8Xisr3/967p165aeffZZvfHG\nG8qyTDdv3lSWzcLj/X4/RW6QdwoVY3gfJQ94gue9Xi+dGkuhKnPjRjKerYKihgA4eZ4nrxbD5DUX\nw+Fwbr3DDzcCzE+RAr8sKcto1D0lEl8LIJ3qAebD66QAWhg9drBISsXZpBYoBKUWSNLc26aJaHGQ\nmhcYs67QlR71idFExuf9Hw6HqV+sPa5fXV3VaDTS/fv3dePGjQRue71ekmfk5vOf/7x+53d+J9Vd\n7e3tpcje7du35wrDXRfQDv2GH51OJ70UE74y3nq9rv39fTUajZQ29XXucsZ8+P2ARUC02848nx0M\nB588Pcuac2AjaU7efX55zqUDJ1Jx9CMCk0UARTobSXGv2g1nBAfx+ecZRf8+euuxX0zsovbdCHmF\nOH30RbIIWPE94TtHu5LmlKf3tQhgRT575Xusy0Gp+pgiqOK6+H302ov6tyiysug5Rfy/LOAEQxSV\nAEbSgQFGKPadOcXb46Am0gl4kM5jP6eEfhBiBZAAPh3wkqN3BUnaiXQK52+gcDFGKJe1tbUEiCiQ\nIy+ObJ6cnKQCT/rhnrg0MwD3799PRmZzc1N3797Vzs6OxuOxut2uqtWqDg4OtLq6qnK5nA4rc8Pu\nawaPF7DHCZ0ArY2NjdR3UqQu/36qLqeKOo8ARn7GA1ECgIqnIfzcCfhC5MYLS+kD25AvWr4BWZLm\nTkL1vro8QQ5+Me7oE04lpn2ihsg9ckk7yBtpjlqtluQt1re4MfY5ybIsgWYKszHyDk7L5XIC21k2\nezdUo9FQu93Wo0ePEnBoNBpzu3OyLEt9Aij1ej09++yzc0C41+vpwYMHiTfoBUk6Ojqa2x02GAwk\naW69Az4AJA8fPtTW1lYC3f1+P8klER3uRW6RV0Anc0f0TzotjKUvjJE17E6yrz1kwKOTkfe+1oro\nQs9EdjAS0yKLrpfmd3cUGT43aNFTj8/2vxelIGK73nb0PB1sgOr9b1CpA5GYgor9W8QD90YweB4i\n9nsieIpteqjWx8BzvLgzth+f5UayaK7iffH+2M84x0VANgKfiyIMkEcq3Fh5TQOeH+SREOl094F0\nasTY/st2YPiLt040Y21tLSncKLcobZQnoXSPnHnf8/z0BE1PWUizmifSLSidWq2W8snT6TQpsPfe\ne087OztzhhsvejKZHURYKpV0//59jcdj3blzR6PRSFtbW3OGnXfssDuC8UenBD5xjDnPrFar6vV6\nGo1GOjw8TMWIDqaQdULn/M0c+xrx4kCiAQBGalGYQ+acwkP3XH2N8DlA4KJ360jzp0zHrefwnnlw\nHnGvpCRrfuYIJxLHHYsQOg2Qxv1cB8h3HnkE2OucMLYAE4iCbdp0PU0dyLVr17Szs6Nut5te5Ac/\nOOPEoy/0o9VqqVKpJDDFYYmeKgEcIKPwlIMHIecncwCYYf3BJwChn3nCGof38MKjX0QlkU/nr9eV\nQMyfrweXDcCNg9jj4+O5iGehvD1BHn9PKIKBaJSlxSdr+j1+LwrfDRn/S2fPzeAzD+35c4oiDG4g\n+RvwEfvroCgaHdC/gwIWNcLECYg+0TH1BYp1JRB544s0GqnIV69hoX0vmoogg7E7+CoCCQ58Yj+K\n8u3R83LlF2VkEUi6KPK59ggBxh1eOpjwNKc0v0WY00TdCMRthxHEcr4JCoXIgAMTftNnvHdfMw5O\nfYfZ6uqqarVa+syff+XKleTJ5Xmua9euqdvtqtvtpvfroGwPDg5S+ocQ+GQy0fr6evIYV1dX07t4\nUNzscsPDKzr3xQFiuVyeAyikhg4ODtI5Fr1eb24tuC4BaHndBHNBBAFQ5uvK61WIlrILyIGOA/9o\nEKTT9MdFEoALWfXjzeOaRVb523WXdFoQ6cXARKiQ66J6IIwi/PHICjKFfOX56TkckK/JPM/TTjHa\nxKhHp7LZbKrVaqX05cnJiTY2NlSv1/XOO+/o4OAgGVvmql6vq1arpTOp/KwS1hkAGwBLZJLoKGDG\ngbdH3ugv0bnt7e0E8IlMUFDtUSFPFSF/yLM7h0ShmG90lad8yuVy2okGj7PsNPrFmnSHgbXpEc8i\nulBwEv/2/xdFDRaBGSh62UUGyw2iT0Q02N5+UVTC23bDzGLkB6PvR1cz0b5rAgXNhDko8LSL99FB\nwXnRhUXA5LxrvY9utCIoWwQknzR3RXNWNI5F1/P3eW1/3OSgkb4x33gpKOqihelz77wgCoK8EnGI\nxpkdJsfHxyqXyyml4Uo78s5BET/SqTePEWZXzdramtrttgaDQTr1stlsajgcpkLB1dXV9C6Td955\nR/V6Xa+99po2NjZ0fHysl19+WS+88IJu3ryZABTKinqZtbU1bW9va3t7O0Vt9vf39eDBA/V6PR0c\nHCTDRpQGoOCHTVG854dw1ev19PqIRQ4RXiKesSt2+MghiPCN9eLgUTo9CI4CXgwZax4gidKPBdWx\nGPcyECDBAbbXc3i0wmVKmt8KD5BzB9OBN/z0KAPy7R46upPdI/THHTRpPmKLbuVlf/TF3yjtu7B4\njw7plFarpXv37iUZYS2QbmQsgCtOSiZKd3x8nI679xNi4ZHLlO+KcvvlBh9ZdBnzKK2DRa4nzeOR\nDYrvoxPqgJR1QDveLvPt10P020FsEV2aF//x2XlGtIhcwbqHXWQ0i+5zI78I+LAwPFIRDXtM6/Dj\nnqJ7Q9wbIzlsDUNgYsTEyQGC9yn2M0Y9nD+umP160HgRD4uiFbFvMSJ13j1P+r9IJrzP8fOLpEWg\n2MPYfO+7qvgM7wKljpLw8zWk+ZQPyh+P09MKcdcVis5DrgAn5ozryuVy2iq8s7Oj/f19SUpbeYkA\nrK6u6uWXX9aDBw+U57m+/e1vp+3Mk8lEzz33nL71rW/p5ZdfTuHsb33rW1pbW9Pdu3fTZ+12O8nM\nycnJXFSFAkmeT7/39/fT+3IIbVPfE8PPFG8CArxQrwhoO7/9HkAfR537gWFRDkg9SUovaEPXRD2B\nIWRsGIfLkNJxsI3xOj4+TsXNDtx8Z1qRc+fn5Xh9CfqRa2kHOSMFRH3QxsbGnE6lD/F5UX+4LiyV\nSkn+RqOR1tfX1e/31ev1tLGxIUkJLB8cHGhra0uj0SgVnlJHxcF9zzzzjCaTiXZ3d1PfSKtwsNut\nW7dS3+JWYKLngGG3BfDAZYe1y/hpy9Ml7oSjJwDLtVpt7kRXaRbZjzVyABN3ZCLYi5Et6dQWwV/X\nMzGKHulCwEmRQfTw9CKDyWf+A4OYsCiQRdEE/o/P9O9if4uAUxH684XihY9uqPH2JM15Bb5gYjuQ\ng5W4ldH/9ol/GtBXFEWJ+eQoSEVeuIM0R/j+3NjHJ4GQ2OcIxi4LMJGU5ts9NmkeMHq0y5WyKx3p\nbK1Qls3v9qJd2nDgQl4ejw4P39v1Cn28H9rZ3NzU/v6+7ty5o2vXriVFCgC4c+eOXnvtNb3xxhv6\n+te/rldffVVvvfWWhsOhXnvttXTcerVa1b1795Rlmd566y11Oh2trKzo4cOHqe+3bt2aiwAR/uYs\nBWTp0aNHSZFSdHjt2rW0gwhjh1cbPTzAP8bCdzehsPFeAQWsvTzP0xZPzrHx+gqAJP1gHh1oYBhI\n72AQfAt/rNnwGoSLJE9Jx7SXgwIP5QPo/PA66dQpg//u+aMPPUIlnUZpANfw0wG1A37klD4Actrt\ndkqbS0rR7Dyfpe92d3e1tramlZUV3b9/X594fDLs7u6ubt26pXv37qX+MJ5Op6N3331X165dS7uK\nSIOgo0kbAX7oE7JCGoa/PfqDrJHmQV49mg1fWfukGeEV7yfyKCrPJ2pCX4hKYadcN7FT1O0TfOC5\nDjK53+uE4AfzvIgu9E1pRcAgGqhoEBd5/xFYnBchiIwtMqTenwhuojcQEaR7pY463bMo8tIgFh1K\nkv45avYIhxs7FxYMkIeuiwxgHKPzOqJ0+kL78KDICHt7Hkk5b079Pgd0RQBx0TxdJPli8/mm6DJG\nk1CazK90GhXxPLqf2OgF1Q54pfmXruGBsQXTyb105Mp3CLz33nvqdrtaW1tLioXajWvXrunrX/+6\n7t+/r+eff17ValXf/va30yFqHgafTqfpUKkHDx6kMDYnG2dZpnfffVfT6TS93A3ZR8FLSt83Go20\nqybLsuTV0kciLByyhUJtNptzfKNGgAgPfPSdUF7ULJ2+9dm3ZxKZAgj6mqTfkubOy3Dj6anbmH7L\nsizVtJznYX4chOfrY4R30UsHYAFIWMsAOOkUxGC0OU9DOjVcRAzdiaHoU1KKRhAlc6NLv3zbcaVS\nSeuQ97v4561WS71eT71eT61WK23ZBQwTMcGQE+kj8kI9EXUy1E71ej3V6/W5HU9e8Mzc03fO5aHe\nBLlG1hyYOUCD74ABwEi9Xp97NxWggahJ3NLtYA9g5CDddY/rNPjEGqZ/bjOYF+532Yl0ITUnUTlL\nOmPwIrhwigDGFT5tFRkyro0/tFlkjKMhh+neTwcWcYyek/VQM4uVPngVP9cXhZr5P47XozhusF0o\nfPyxTedV5J/fuwhcxL45f523DnCKIh8YBTfWkeL8F8nIRZEfxOVy4SctSqdnYpASQB7gLwvcZYT2\nuV86lVsvAIV31F5IszmlvgH+OQji2RTOkjNniyx9H41G2t3d1Y0bNxKwWFtbSwCFKAsRm1KppO3t\nbZVKJV2/fl2f/OQn1e12tbOzk57Xbrd15cqVOcDCy+6Ojo4S4Njd3dX29nZS0rw4DIPjWzU50K7Z\nbKZcvUc5ABrk5sfj2VuP3QtEscLnlZUVdbvdM/LN2sV4u3PghpJIAd/7WiUS5A6GR2MumhiLH2YG\nQKGmjrEjL4BpN26+K8yjwPz2KLHrEk8JwBdkkigIsu3nbLhOxMAD8PkNCD46OlK73dbKyooePXqU\n1uCdO3f0uc99LoEGTitmnLVaTc8//7wODg4SeNnY2NBkMntpo+tnJ4C8p6IAXgA/PwfH02XOH0+/\nkM5FJr1wtshuoosB+YA/TwOTYiNqCT/9O58PACVRGfrru6YASF4oHenCwEn0umP0IhrEaIDc8Ppn\nizzo+Bn/u1HwNrxNro+G3z93kBO/p6+gzAhsYuErCzrLTo+ALgIqtB9DrFLx7qTz+BB56IspRoqK\ngF0R7+IcxmfEn8hnb8v5ViQTlwWgsBg9PE9/PUTqcy2djVJxvXvNXgNUJH8AEPeuJpNJUt4oC+4n\nVEze/ejoKMmTK/XpdFYQ2+v10q6b0Wikg4ODVOD67rvv6sd+7Mf04MGDlP44OTlRq9XSzZs3kxJD\nWX7605/WeDxO2yzpKx46L74bDodaWVnRO++8o2q1quvXr6ter8/VA0hKQKrVas05AM5vQs6dTkfN\nZlOdTidt4fQXo8EHxs468nSMFyCyG8o9YCcv3GQLqr8XBqPZ6/Xm0m+SUvsXXXdCesBTZkQ8Iuh2\nz9r1CH+ztRxjS7QKcr0ToyHSKbhjcwHANBpUogkeZUCWHcADxh2McvDhgwcP9PnPf17vv/++Dg4O\n1G63dXh4mPrCSbCs452dHbXb7QR0m81memkhMsbhboPBIB13j8zDH+ehR5yQazfqHu3AnkinQJDo\nG/qDrcIANGpPkNF4pL5H4P3ZtOF1UpKSY4b9Qpd4DZFft4guNK0jnU2FFIGLaASh6HkX3Uu78Xlu\nUBGAWLlf9LzIzCJjEfviUaHYTjTCDiaYXD6PQGPRGN17jtd7CqYIREWQFnOb7g1G3kSe+P9F4LKI\nHLBG4AHfiiJiRc+9CPKwLZ4Zn7HwUai+w0M6BQyM3UFYBL1ed5Rl2ZynBSAiWkPBnhdnAmbyfBby\nbrfbSd4wCP4ulN3d3RRNaTQaWltbS4a8Uqnok5/8pN566y21Wi1JSodWYcw5sGpvby+FzzmYyj0t\nDufK81mNx82bN5XnuW7dupVy6BToMt+8MdhfUIjMwg838AAePoOXeZ6nz1HWzIvXj8E/Qu7OS//e\ngSNeMsWGjUZjzsCR3nD9wmfu/FwUMT4MbbPZ1O7uriaTSTK2AAB3Lly/OQhlfTA/1NV4vYIXyyIf\npCIAu34oIPd7xJaoG9GReGR6r9fT6upqAg8uO91uV9evX9frr7+uzc3NZCdqtVrauTYajdTpdNJn\nvJm72+2m55DWAbCsrq6m+gwHah51lU51MM4qtSDOt2gjkElpJoOkdpFBP5rC01r+Ek2iL/5s/o58\nBXSwpgAqpIt4jgceOFeFsSyiCwEnLrzRYC36P6YHitpzA+jILxpd/9tzZjCRRebXu6JyilEfnr3o\nefS3yLh7H5g0XuoWx+5GKoaJ4zVuvIvGswgocE9MuUVFWRQtiW27kfW+0G78PLbp/Y5g9TKAEgjv\nxFNjeBvR2OCFFoX7fSeOG74YLVpUf4JMssMExUfkhGvK5dk5BZwbwlwTfalUKnr48KFarVbanTEe\nz47glmZzsL+/n5Q7yp/zINjNUiqVUuHq3t5eWmMocA8Ru2L0k1f5nDNcMGhZlun9999Xq9VKBZjO\nD5S5F3PSFp4vY+33+3NKnDHCMzxT+MBz3HMtcq6YY+aD+XMPlvs468T10Hm5+Y+LSqVSihKQ2pBO\ngbinsL1wm7nkb9IhXn+CjDPPyCkRphj1y/M8ySM7XphjjONoNErvInPZZn739vbSCwXRo6TmpFkU\n5OHDhymK0mq1EmDHIGMvABt+/o6nVvwsF9fx6ADWsUeeiAQSyfNiY+e9R2C95kOayR4OhKSUNoLX\nrmtcB/CZp+Qc0DBn8MzBpV+P49Hv97W+vq7hcJjkgfsXytvvQlY/MhUZzUUGx41ykQfu3skibz5G\nQaIxxpC4AfVajUUGPeZNFxn6ImO96O84rvOASex/jLoUUeRFUUSDtoqKaV3oY3olzkWRko7GtQhY\nLAJuPp+Lnn/RRLgfZeMH6rlXh7LNstnx6ngZzmPpFHy6N+TFlkQe/FwMvJ2joyNJp2emoCxJY0gz\nQ7i6ulrYxng8Tgeh8fbswWCQ+thqtVQqlbSxsZE8RT/Hh5B5tVpNxtzXOZEXjLenMzD2nNyJwe73\n+wl84IH3+31dv349bWv1gjyIiIgrc4CTg5XV1dW5VC/f4Z1Sy+Lbhz1a5lG/LMsSUIu7QtwbAAYo\n7AAAIABJREFU9TVDv2q12tycX3RaR1J6VwtGJctmLyItSvGSAgKUObBmXaBjIt8isPHdOdPpNAFl\ngAzRAuaQPhDF43/qjySlU5Rpv9frJf6vra2p2Wzq6tWreumll9KBa0RepNOXMZIW5D1NpJccgLis\nui3waI5Hnfg7y7IEgHxt+DX+tvIsy9K5Jg6GJaWieNqFJ8gx/eB+L4D2aIp0GjX09CU6wUEgn7Oe\nACrww0FWEV0IOIkph1j0xzXS2foEpyIjFkHGk6ICRQaU5/v9RZ5LBCb+u+hZ/rtoTP4MB0VFbSwC\nYou+d+8uRje87diWgzS+cwR/HiApAg1F9SI+bzyzaN6LAGgc40VTlmUp3OzFjMhVuTw7O8TD/75Q\n4a0reRSqe0gocLb3ugI4OTlJkQ08FVIGeHL0BSPgZ09Mp6dvhh0Oh+r1ejo8PEwnq0pKCh3jDYjw\nuhiveSqVZmdU+Iv7JKXj5ZEHV+IoP89Rk/LxWpBms6kHDx4k4w94ATSQHqDmhBx/v9/X3t5eeg7p\nL3jqc0rI3Ne8A3XqCeCfRxFQ8hiTuEUZoObgCR3JQVoX/VZir3/gRFN4gU5gfbpxdeAhnaYekHe8\na9aHnx7Kc5EhUooeGaMfGGn6R7qNPjvIHwwG6dC+0WiktbW1BLqGw6Fu376d0ofb29va29ubS82S\nLpGU2jk8PExABdDmYIQUiMsVET36jPz6+if1g0FHFj2tgxwS0YB/jBee7+/vp3uQPyKktEekEp3A\n+Ogvcx3nm4JYIj6eegKQF4GR80D3pTi+XjoNh3tEYhEokeYjAEWGrCgKUtRGvM4X0aJoS4w+xHHF\nZ/j9iwBVfG6MEjgIgBZFLuL951FRVKfoe55X1GYR8Ip9j58XtXFeXwAvtOd8izy7SMLwSUrhfwAL\nyoI6DK+LQP75mwWPMpA0p2ylU6+Hdvmp1+vprAaPfG1sbMzVEPlbVv14aun0QCe2HKLUPRwb+e+h\natr0KA/RkcFgoP+fuTf5cezKrr0XyWB07IOMNlNKlUqlaqBCGTDgkQHP/Wd74omBMuxB+ZWsLlUZ\nTbJvoyX5BoHf5uLJy5D8fX5iHiCRDDb3nnuavddeuzmj0ShiCGhYmzQPNuU6k8lkLVgcC+/4+Fi9\nXi/AHOsJhfHw8BCsD5/n83m1Wi1Jq+A8gMTh4WFcy8fRgwJhcxgzZ46k1fpnTSC4vaGwuAdji0JF\n8bpS20bDWkZRUumUZ3Z2A4XEbzzmQFqPDWGePBbLlSX3llYuRMAbSg9Xxv7+viaTSawr7geQZ8+5\n+yKXy2k0GgWo5ViGx8dH9Xq9iK9xcIjidlkM40OfYTyYe/rq9WBYX8TNYEjQl0qlEicMO5ijOdPE\nenZw7q5JScGKsBYdLACaiQ1z4AngdtcboMVBu7tz0iMfGD93r/6UbtoKOEmVHILNFVT6vzf/zEHE\nS1a7C9KUufHPGDjeS5VrFhuQ1c8s0OCbzNkC71f6e4/z8Pc2ARPGNwUE0odsVFZ/NzFVLmhdgW0C\nkll9TYGEf+6BdOn4+fyk453O6bYbyg0hvlyujlfH/wvA4NmJd/DxcBfL4+PqoD8EI58DEvL5VaZO\nur4J8ET4Iyyl1YnGgBHmAtqcPZDLrU6mdsDkz+JWFPPrbgsEHsoYgMF16BtWIn0mkyIriJW6I5PJ\nJFgVaT3Wi+d2qhoLnZRR3GCwF54i6+sL4Y4y9rWZpnpS2TRVxsRHOE3O3KEQuBfg7CXf/C/RGEP6\nxenOxFGkDCeK2fc8gMtZPMBZypZIq/N8pBVjwTUA4tLq0EjWGSn0WP6S1jJg/CA87uuyB8DFeUvE\nTjkr4bEwLuNYn647iPl4enoKJZ3P54PBGY1Gajaba8xJsVjUeDyO1HWPXUFm8F1nVFOXzGg0CuAL\nS+L9dgDHXJFiz5r273h8FK5ongXWE3DHMyM/YBg9g+ejc+uk9HyqeFzo+nd+juWdotms+6TC25Wg\no9OfUrbuisoCO+lr74d/tokleokNyAIP6TNvep32IwsAZvWH72aBrJeYlyyAuIld8fd8nPn9x8SS\nbGoACJQ9EfNOiUsrIcx6g6VAWaYK390+KDvuh8UE2wCgwcqfTqcaj8eSVtYLlWNx43At7uunrqKo\nAQkIO6wlL6DlwbfME8Cf/gBQDg8P43mxWnHV8Ky4XnZ2dkKJu3uK2BqEPS4S3DRQ8KwbwNTBwYHO\nzs6iT41GI1KRPXiRceZvdzXQRwCdAzKeEQsZRQf4pM8IdxSBU91eQ2WbDSCWz+ejgFmlUolzZgAc\nAGeAKM9DUTAHGYAamlvorG/XDcw54MAzZSaTSSh3H0t+h8xHqTp75syctIqTYL0UCs8p5Nzz4uJi\nrd+AI1jAQqEQrkpnlqgVwrosFouq1Wo6PDzUzc1NxMiQir+3txdrm/GRVnVEvM/S+qnR9I/ibzA0\nqcx3A9kZGElrzC1j6IfRMlfMJ0CJMeYaboAwD27kblxzP7Em/580t6T4m0WYBVyyXmf97da2X9ut\nRGc/+JxreZ98caZKwH/n101bVt+z/mfTOKOQXncTm5EFsHzBpfdPQUDWOPr1oORcaTl4S3/j/c+6\nV/rsWe87K/QSSEr7+jE0D34jvZU5JqVPWs9QQng4IHFr210HLvA44E9a1VdxYeDWFNk0KGqsI3eB\nACCcDuZ5SB10Xz5pv+wbshrI2pHW9xUgi+dgHOinsya4mebzuer1umazma6urqKuCoqIANbhcKhG\noxGZN57x4YwHio+YGRRrsViMs3xQgF5BF3CUji3P4LUhHLxx/g4AhDGWVhY62VKz2SysUUAM13zJ\nwvwlGqnoxeLz4XWVSiVYN8AHqeq+flm7gBtpvQK2xwYxb6wJ1jzzARCEgcDlxfWp+gsL6LLQgZID\nF4AA/fJ96fE+xCJRGBDwSlYagePISWqdpHFPMBeSwlio1+uxhufzeZTYf3h4UKvV0ng8XmMq0Rce\nlMp93XXDXidwHVDsYM/dvO5q4TNpxZB7are0iqNZLBaxXx2YkFoMKHMXrj/PprY1HtyVT2o9Zylh\n/74DD77vf6ef+e9fUmRusUOp+mLdxHRsUtIvsSe0NGjQX/8UqyJ9ePgfLXX90K8sl1ZW2/ScPgbp\nPPjrLPCQ9uelMdoEUP3vrHWz7caGhgJFGd/e3oYVtFyuAj9Rlu4q8FN1CeREqSFkvWiTAxLKtkvr\ngZy1Wk3SiuEYj8drNSZgbRB60opKRymQXSOtZyW5MkAhE/CKNQ2ocVcG/XFB+vS0OjYeS24wGGh3\nd1etViviVvDR397eajweh5VJ312RueXo7M3BwYGq1apms1kcWsjY7e/v6+TkJCzCNEWZeBPWs48Z\njBWAhf6yPrwyJooO5Z5awwCUbYNvntULiaEMPZ7DlZuPcy6XC9cLay0N9JRWAaCMG4yMMykOaN+/\nfx8W/t3dXTApLldh/dIUWGkVh+EuPNYpAJF/0+lUlUol+spawvVIv7yysq83gsu5N4cEEtcC4HPg\nTnVlaRXcTuN77gp0Q8PZIq6Xy+XCJUW2mjN9ktYMEH7DNbPACf1iLJwB4/eMP3sDY+yjc+tIHyqW\n1DJnUJ3BiE7nV/UbpOwsnU33c6o2tczT32UBJv+XAh4WCIvFFby0bv2Aov0zX1AODPzv9Dn4m2fk\n/7SPaXsJMKXf82fyZ8U1gND3PvJbv5d/7myA9yUFWg4Ys4BIuo623dh40ipgFVbDA12d3i+Xy9rf\n31+by+Pj47DolstluD588+MuYs+gAD3YjT2CBZPP51Wv13V0dBQAAEWLkIEpwY+MgnH/MusVgQ+1\nns/nVa1W4364JbgP15cUp7/mcrkopIaiRvkVCoW1k18RgFDi0PmM1d3dnabTaYAg1uVsNltLZ1ws\nnv3xw+EwzlHBiiRYl3vjxqBc/v7+fgRJMjeHh4cxplDby+Vz0TJn0zxYlvcAZvQZZoBr4QbcZkMJ\n+TMAHt0N6e95kDVriJRd1idMG2MPGzeZTLS7uxvuEq7hLAhrQVK4XtyFwP8AS1gZroU8Zn48Xfz2\n9jbqmsxmM9VqtbXUc+K4iFdhbNx95UybtHKNEfwLG+Muv1wuF8xUuVwOoAWAQ9axjp1tdXkLs+PB\nsdVqVeVyWTs7z4UAqTcEY4gx7nuE78KmetCyH/EAU4hr05kcgEoul4vxQ056OEfatl7nxBWLDzIL\nMDpqljoC3N/Lsuidrvq5bAv394Hz97MYHQcHLH5vfn9pHemyQbJYEb+Hf5YFirKUcwr+NrFR3vz7\n6YL3e6NgiAPgmfy7znLxfroYmesU+Ph3/yfM17YbFp+07uZCoUnPwg8LjEBMhGy/39d4PNZgMAih\n4Osd4Uw2gLTKwimXy3F4Ht/xgl5YpLlcTuVyOeYR0OMBbLy+vb2NsuowBgCAu7u7ACLSSsiTPQE4\ng+nxuV8sFlGFFUbm4eFBo9EoBGqpVFrz1x8eHgaQuLu703A4DEt+Op0GzY9gvr29DZDEmAAosBQZ\nfxQMmR29Xk+dTmctVRsXHQoHyp/7UGSO/Y6bgrEB0BUKhVA8KHYUHn0jZkFaFcfaZgNMMxcAMBg8\nYqpwdeTz+QiMfHp6iqw1AlJTme9xG4VCQdVqVaPRKFxqKHCPNeL+yOpyuaxerxfgA8sedxpGgbMJ\nyCzXLbe3t6pWq5rP56G4vZIr5ev9AEoYQBgz9hAgBODOXALac7nnDCTWPrKCQwTZR1RcZX0yBqT8\n+zixXhkv3F2ML+7PyWQSQJjvpG5pADYAmjFAntDHdE3QH5gy/macAEwvta27dbKsYbfU/bssBGi3\n9OGcypM+BDvcw0FGqvAc0HgGBS0LRKT995YK5JRd2PS79Nop25PF6PhzpOzDTynudFz8dQowuB7B\nUSxA/34WQHGLKes+6ZgwB+n9/VofAyDxhoBwZi/1CzslDoBA8VWr1agrgvDyKH/Gm2BWFDtjlBVw\n6+uGoFHcCE5jexGr/f19nZ6eRoE15oLiVADSyWSi+/v7KMmO5eXCFUYHVxcCjLEBVBCfsbe3p3K5\nrOl0GuOay+U0mUw0nU5jrBqNRghTt9x5dgJsPY7FXWcI45OTE5XLZTUajbV6Jf1+X2/fvl1jiljb\ngJ/FYhHKhJORSf3E2nTA5+MFsGdsCVgEOCGH2DPbbE7T0x83DB00My5ulN3c3MTYeFo9Lh9A9P39\nver1uqSVe8FlJmvIZQzVR4k3ccaGFFnkBkyA6wiYQgANwez0odFoBAjzAGfAF4CKPe2l3TnYEuVM\nqq60yuzzNQyYwkB29xMy0d0n3Je1kuoI12G4VfL5vE5PT1UoFMJlCmjCYFoul5G2D2CiX2msi68F\nmBK+6/FkkgJkuR7f1LbGnDiISAEASsfRrLsBeM8VtrtJUFjug+N6qQLmft7c3eA0bKoMAVH+LP4M\nfi3+d0bIAYa7aJyByWIN/Hc/xX6kbM+m7/2UayR1LfEbp/bSACpvKTPibqAUoKRAzN1fad8/toai\n4ZmcKl0un+MnOKnUhaxbk61WKwq1LRYLnZychC/b/b2cnYPF5cKA77A3EJy5XC6qspJNAljxfUGR\nsuFwqLu7O/V6vQA+ngqLsD4+Pg7XTaVSCeGFtcSaIKCS+UegTqfTYBdwZeAGQBATNwIA7PV6wYrQ\nf7fUAGMp3Y8y4r3BYKDpdBq0+uPjYwQlUlr89PQ0ADiAApk0n8+jUirsC1k/HiiLBQ/zhKsJoJey\nZC7PXqK/f4kG8PSaPMzVdDoN0HF3d7fG2mF9o3QZw2KxGIXKYOVYr71eL76DbGEcWS809s5isYg+\nEvPhwbIuS0i1Zd1QGNCDw2G9Go1GsIQYZO5mkhRsDXuJfYSRwHqAQZJWMVuwLtJKX7VarbWYNEnB\nOPl9AFOpPoNdYtzYr+z9arWqYrGoZrMZaxBQ5wY/BgHHQtAH1qnHmrjLGjYQIJS6NJ0B26SbpC2e\nSpxlAaOUvHgT38lyl9DYyExyymQgQPy+mxgUvx+fu4/Tr526irKek/tLilgE+pyOg/fNr+n9yrqX\nK32u7ddxZiW9Jm0TUPPrZzFaKCyEC9Y4NN+mMWH8stxtbiWlACUFsh9bc8oWAY5lxGYlBsKBKwod\nxYrg4RTg5XJVV8BZK0lrygAlTiwKa8796whXMnjq9XooGoQM4AnLE9cRgnt/f1/ValXdbleSAsRU\nq9UIQPVAX2ciid3w4DhcAZ5pgNKn5gjrDvdMpVJZqyuBMiC+wKlqru3gjXXtpxBDxS+XSx0fH+v6\n+loPDw+6ublRrVYLWVCr1aL+CjQ24wr48PTsnZ0d1et11ev1OC6AgnQeU+JAhDX0kuz7pRoMhwev\nQs9j/RIvAguCPCYeCLAuKbKscrmc+v3+mrzwefLMGg/8hlGRFPE5pMiXSiU9Pj6fms0+Ys0AXgCu\nZJAAgmezmQ4PDyOeazqdajqdxtENhUIhGC5nMwGze3t7qlara/sNgIZLw10szkYAEABCDkxdhzib\n5EYAspPYD5ieQuH51Gba999/H2D7/PxcvV4vZA4l+B0wTSaTNTCVxtFwPhhMZ71eD3aFfzxjuv8+\nupgTZyakbIDiAZ8pcHBgkQIMBCuTl7oUHGBkMSnuz/TvbYoNcWXqin3T+7RNrIYDiixmI4vBSa+Z\nAh3+dyYqfZaXmv+WMfK+k/aGEOZzFHN6H1feqeDl9yhhXwc/t7/bbAAQBxnu6kGhSiultb+/r/F4\nHODAi6b1+32NRiO9e/cu3B77+/sqlUoqlUoR4IblDWjBx00DfKBAPcvm8fFRBwcHoVBQnG7lknp8\ndHQUKY+9Xk/NZnOtqNZgMIh55zmxUB0AeCwI848/3dcO7g+Uy3w+j0BEYnWg4FH6WO8oRQQ/v6dh\nCAGMJEVcC5b0xcWFrq6uNJvNIoBwNBqp3+8HRQ27Ij0rOQA7c+nF67g/YA/WivWAZSyt19vYNlOI\nQnbGCEW1u7urbrcbVXV9/t2t4sYZsSleG0ZaGSkEJbMeYUX8pG1JEbd0c3Ojp6enCGz2M2ZYv9J6\nbInHZtRqtcj6urm5UbPZjBRcaZV6D4MJs8iegpmgSu1wOIx1AehBUcPa4ErCJQgrg2JnvKgoDVAD\nXDGOyBH3JuC+Amzn8/lYazs7O/ruu+90cnIS690BCACOvjN/fqAgWXXs+3w+r+FwGPf3lG3PCPI5\nduMpq22NOZE+jBFJmQQam8KVslveuBec9ubzTQyJsyNpQyj4tdLrORuT9peWukq4JwLKn2mTANp0\nff9+FhDx36eAJgVNDubS8fA++7ylsSBewdSfy+fX5yAFW8wFffHfpi69tM+bwNo2WrlcjgwWBCpW\nITVBqtVqHByGICIAEKvR008nk0mACOIVZrNZWN1kq6AomS/PQOC7TvO6ewNlAfXLScNOJc/nc717\n9y4sP5SR08YpcEW4QUUzTwhnLFisbk/XJWaAbB2EMBYqboT5/PmYgE6no93dXdVqtaDlPWbDrX5p\nVcsBa3Q2m0WmD8BisVjo7OxMNzc3Go/HmkwmUdiKuBVpdUbJ0dFRBLrihnK27P7+XuPxOMbEA27z\n+XyUMncrFRZxm42MFeYLZg3wAHMhrWf49fv9UNrI1FwuFwHH7IvUoPI4oVxudeAewB+5wO9gLwCc\nzrK5CwGZAQsDGJ5MJrHOyMrZ399Xr9cLWcTcwEoUCoVw0wCG+/1+BP8yBnd3dyqVSnHfNHaHGCWC\ntcfjcTzLdDoN8MPYALqlVXqwZ/NIq9g3+u01Sbj3zc2Nzs7OIi5rMBjEYYhkFHHtyWQiaZ39Bxgy\nPzwDwNLdbABG1jLz89K63mpAbKrwXeFI2ZZ9VmwKD5wqxzR2IY0RSZU4wtzjPiR9oBxRygixFORk\nKX36nuWuyuoL13CA48+Ttqy4l5TxyGJ2sgBJVtv0HQcXFAVDaKTMU3qvTa+zxiHrs/TvbVPfkkLp\nk32C0sJyQVjhu8XKf3p6Uq1WC8DgAaWNRkONRkO1Wk2np6e6vb2NeBC+JymUJamtrHtAO1kBh4eH\naxYwwIUMoZOTkwAT0qpSpGcx1Go11Wo1jUYjjUajuObh4eGaJQigwEoCIOVyuQA5WKhkIHlwYC6X\ni3RTrx0DLV6tVmOdHxwchGJ5enqKwEqUQUohU8iOrAsKahGo6GD+/PxcNzc3Go1GETtALAJ7i+wm\nB98wUICop6cnHR8fBw3u6a3Q+ihl4hq2nakjKZ4P0OVp68ViMdwizKOzErj0CBjO5/MR8H14eKjz\n83NNp9M1Iw3glrKmKDbkNXEsFLNzxpU1hMXfbrcjQHW5XAazAQswHo91cHCgRqOh0Wik9+/f6/T0\nNIAKDAAybjwe6/7+PornwVZ4TBSGHM8E2GRMYIAc5NIXd415PB/K3uOZGBPe9zLxHtvmOmg+n+vq\n6kpnZ2cqFArhQvWibXyXeXSASdo06xTAB7hEHqfuOgczL63trZ1K7HQQEwhtlrIkWTEjbBYHBkwO\nEdcpU8G1/T5ueWe5k3jtFC2WsQt4acW40Hjtz8HzZ73vv3+J/UhBRtqcbUktEu73c9kGB5D8nQJL\nHzOC0diAKWhyEJHVjzRWI2W80r/T8dl2Y+0RzMfrnZ3VCcLSqjIqqbIAFT5zQeDFw66urlSpVFSp\nVHRycrIWqMh18PljfeFSqtVqa64RFAXXAFAAQqDIEUSHh4dhKQ6HQw2Hwyi/PRqN4nuwOE7rOjD1\nNYFARAjDmiCApQ8VEkBnPB7HAXQoluVyGSmSlFZ3xsT34HK5VKlU0uvXryO1m6BhSuvTp0KhoN/8\n5jeh6FA2rH0swDStns9gip6enjQej6OOhfvykV8wUlyDGKJtNj+SgPmVFHEYruQlRYAz1jVrFEWJ\ne0ZSBMd2Op3YB4xj6pZfLpeRDcTaxjWX6gyYM2KhDg8PA6C78sYIYMz7/b5KpZJms5n6/b5OT0/X\napB4n2DQlsvlWir009OTTk5OIn6ENe8l7l0/sOb9sE2MGUAY7zljwe+llWz2SqywQ9PpNLKGptNp\nPPt8Ple73Q55UCqVwh3k3gPPusHFiosLtokMO/qF/CMTj/ly3fBS22q2jrQ5hsStCd+8zh64sGJA\n2ESpAuVeaT/82t78b++jpLVJ4/O0H/Q9vc7PaZv68tK1NgEWf52CI373U/1iHF2Q5nKrACfG2AHF\nJgDkc/ISg+ULOAU4WWzPz1nsv0RD8QNkR6NRFBtLgyQXi0UEVgJgGGOYCMD3zs6OLi8vI9sHUOjp\nh55xgiA7OjqKuWE9ovQxCnZ3d3V0dKTXr1+rXC5rPB6v+ej7/b6k573Vbre1s7Oj8/NzdTqdtbLm\npEgi8D0DhfXgQm9n57meSLlcjvRqrC83LhCCWJtYyKVSKbJ6Dg8PI27HC6dJ625MX3P0rd1uB0DC\nVYEl2Gq1dHBwoNPT03DbdDodzWazsG7Z62k8A6CCPQGzAGtCf1Do9I2AXgcD226kkgKAUYB+VAHx\nDAQKo7gYG5iFYrEY7k+qrnrWUi6XWzt8zo0g5n00GkXMCnPpzDigfrlcnTkFqwhIgiEAGBDbQiA1\njNf+/n64YUulUrgfmWdcs/xP3weDQewvZ+QlrYFs1nmlUgmw5YYz7mAK0/Gs3lzHIX8BtGQHMgfz\n+epgy8PDw3AX49bCFUvDYHD3HZ/73vKUZGnlsnE9gaHxc2T11mJO6Jy7AFIkyUQ6s+KfO5jxhydz\nwT/jXm7lS1oTGlluDmcMvB9Ob6V0Gc/nz5HlcshiAbieAx1p/SyWrOukCt5f+3ilcTQ/p3mfUtCH\nEHbFyn3cb5+2rDlM2Zm0r+n3eM5N8TLbaARDEvMgac26xipB2HKo3f7+viqVSlwHoYTi39vb09nZ\nWYw97AjWDu9zz16vF2nGzEs+vwoKdX8+1DQCClcJ/mUvtFapVPT4+KirqytdXFysxRT0er3wOSN0\neVbALWwNbq7j42NNp9NgZaDQU0DjLGW1Wg3l7dT83t6erq6uQgG51c7+cZct4wXIub6+DkubAEZA\nFgDm9vZWp6en4cbE/YJbGSXHa/fdM5/MAVYmJey5N/LKD7KDVdtWI/CTDJh0D6JQUdhkzCwWizWg\nC8Phhsx3330Xqa0EiM/ncw2Hww9kDe6Vg4ODNbeJz7G7OZHbMIIwJMvlUu12W7VaLWKtSqWSbm5u\nIpUXEPX9998rn8/r5uYmMlJg49hL6Jvlchnpx9VqNZQx96SPsO+s6WKxqOFwGN+TFOzrbDbT0dFR\n6B0MH28pI0gNFhpxJRgmXjSPuJB2ux3zgsyg0Sf0H2AsZXl5VuQxe4igYFhJD5DfuOb+P67V/1+N\nBQcac79Z6mNMFVjWQLjCZDBoKevCNfmuKzgHCDRX6lhxaR9o7mNN75VOBH3yZ0/vze83fe6MT/o6\n67cOVLKYpU2NOUndYPx+U7wL92RcHISlr1PQkQXYXgJnP4cB+iUaAimfz8dBYAhjWhpLAH2bz+fX\nSmLDfuBmKZfLkcHC+sP/DWhAaRYKBfV6vUj5Zey4N6CE115NFgHi6xsrVFpR0g8PDzo9PVW321Wh\n8FzVc7FYqNlsqtfrrVVlhTFyhoHYA863IXMHYITVS1+xIEejUQQn4h4AFJJ5ICnqlbgxQmPt3t7e\nxomwjUYjxj6XywVo8tIGrLVqtSpJUUOF+UW4M9Yu41BK7hZmrlAU7r4hm2PbwbDSqvbNcrmMmi68\nv1gsAhACQgHCPD/zj9uAAFbcb+VyOcZ0Z2cnWDAHGpLWzrdhrtwo9MBZwDcgG6AprVwOgC32ArV1\nDg8PAxywrk9PT4M18/oixWIxjI6DgwNdXV1FcGylUlk7HZyS9awFCjE+Pj6GEVAqlTQej6P6MLFR\n3BPdxfi7HkHewHLs7OyEsfD09BR1TmAqXRcDIKRn8OLxMakeBqQw/riT2Y/5fD4AkOtP1yWuf7Pa\nVsCJA4Y0/iKNN/GWZWmnQpff8+AgO+lDhSmtKlT6595SpekDysR68/vR3ywl7Ao76/mDW9nQAAAg\nAElEQVSyhGlWS4HIS6/T936uQvcxcICSRZungOKn7uNzlLJN/h02hLT5UMOPAZxQnAyKGSAhrao8\nkh3CuKEI3bJZLBbhTsnn81GqHTcBcRUwBE6XkvYJPetl6z3+gmqs3P/29jZqTvh5KcvlKsaDfUYt\nCIIBPdiVzB/SKgkKdcODfuMuwLqVVkrZ/dXcj8A9Mpomk0kAAmJ63KID3LDn+Iwx3tnZiQqgjAVB\nxjwzAMGtQU+9Zh4BMhgngBqvTSEpsnlwH+zs7ETNGJ4XCh7Fj3LfVuM5sOR5ZndnIBsZAz89GmXK\n39fX12q1WuGmGA6H4QYjaNtTZgnqdiYG5cfe9zAAmBdq07A+fC1Iz2sAlgewm8vlIq240Wio3W7H\n+nj16lW4aLmuV8SFMcMooC/oOhgNwADsHzVvAOLsXZhBlxWSIijZdYUbPbCr7p4qFAoB1mnuQgJM\nelFGD7xl7QM+PF4OGcG1GS+CpYl9ISiW/eHxMWnbGnPiSsxdIh7TkC46ab0UOJPg3+MafId/afxH\nqjy9OTPAPwaU+6TX8gXi7h4HTy/52TYxKylT8VJLwY2zPlnX3XTvrOv6tZ1id6Di4+LzmMXw+Liw\nCdwiSGOFENbp+POdnxqbX6rhz0WJkqWwWCzCGpJWligxG+VyOVJqpVXAN8IWoYxfnO9LK6HE2GC5\n4hvHwuS+xFfk8/moh1IsFlWv16OYGvcDNCLEPQDQBae0yjRAKHY6HdXr9TUrj/7i2vDgOxQ+Cg6/\nOKyNW+XdbjeEP2OJT97dtIwL93d5gJ+fIlRY1s5kMF5ck9+STutWJcoUNqtYLOr09FTSqlDY9fV1\nKB7fl51ORycnJ3r//n0oJZgwYgO22fzcIU9vJh7D9yTfA5yyXjgc0vc88w/jSNYPFrhnllDozEug\nS+sy9+DgIOrfOPPg4A45AggmPdzlGSxFu91Ws9nU/f19VCTu9/ux/wgy9/kk9orrs5ZQzM7wM8/o\ns8PDw4jLce8CwJk16UYi/7OPdnaej0mgH5xFRWA8wB+d5CQB1+CejAX7kixDXJsYP8TveE0fjIub\nm5voB/MNm/WSQbm1mJM0kAzBmga0+nedqXD6yq1oz/ZwMJA1CJtcEln9ZQLT6wKsfHO6/y9Vxs7S\npACB9/k7BThZQjYFSemz+r1dsfv7P/X8PAfPynUQHg5OfFxShmMTI8K4QkO6PzpVMOn4+3h/DA2B\nRL8PDw8jU4D6ByhBT/ldLp991QhmSkZzPgxCAqsGBY+QcaGAi2M6na5Zk/QL0IQFisChgBrBf5LC\n3eNCzFlHZ0G4TqFQCD85+xbQhKWFYeGl2zn0D+XnhaoQvrA3zWYzrgUgoEAdY+/rM907PAOZM3t7\ne3GgG0CDOapUKlH0rlQqSVK4tegrcQfEAJFxc3Nzo3fv3uny8lI//PCD7u7uIn6kXq9rf38/Yg16\nvV5kP/kYe6G2bTVYscPDw1iPBPUSVwPbRio8MTbEHLi7EtcCTCHl/lHiyHXmHjcRABW2AuCOwscF\nhuJ3Vsf1ibt42Fs8F7KN92azmWazmY6PjzWZTHR8fKzFYqGjo6NwMXqqdKPRiNTxQuH5ZOu0lgtu\nU+Qj4AP3JQwe4yqt13hxQznLbYlrCMDMOiVdn/nwWE7XN8hlxphxhP3hc4+fkrQGwIjl2t/fjwwh\n5hhm8iWDfavl63lA90PzubQKQnWB6ErLgUUWg+LXdbDD36mLZhMYSAEIgtIXd9YC8b44EEgVtH/u\n/dv0efos9DEFKllAZxNweak5s+XWBe+l90xdPz4ffo6Fu+AQfq5Q/FousDb1+WNgT9iUtVotrH+o\nbyq7OvPQbDbDZQENzriSscN3AW8IGhiGXG6Vosj8UrgNgUCUPcFqHniZz+ejLsvt7a329/fVaDTW\n4lVQADAjZAwBFIbDoer1etQBWS6XAX5ggLrd7loRKmf1KEzn4wi9vVwuI3DYs3CwwLCqOYBP0pqf\nX1qtDYQna3WxWGgwGOjh4SFiGRDAFFAj/oXnqtfrcaowMqbVaukPf/hDBC7O53ONRqMoZNfv9/Xj\njz+q1+sF4CN1tlqt6vLyUu12W+12O/bCYvGcccG9ttkIcsXNJK0OoUTRu7zF/UOslLtBmKNcblWA\nDyXs4BDXBNflmAHuhbvC5QigGpnlgdmcrkuwMy7AfH5VCwS20M+HAUjAamL9z2YzHRwcaDQaqVwu\nxzEQMJMAq8lkEvdz45uGrIS94Tlx8VF1OmX9XD8CQFi/BOSyVyhS6CnTjDXzcn9/H6wpoI9qyQTz\nM1/EscC0kGnkzOPXX3+t4+PjMLCYZ+YQlmfjmvvfX8Y/3Vwp0lk6zmAyeU5rO13kFfNSq8iBQKqQ\nnf52sOEgCMrRQYffnwXCwBLkBrJ0xOl9cX9g2md/nQVMsoBP+ltpvUaGC+D0en6dn2pp35mTlJpO\nWSF/HhdeCBL3xSL8fbwZYzYcBbLcr5zO6bYbFs9i8RxAh6Bwf720iiPBZUHxMqx1D8J0NwiBbbwP\nPcrrg4MD9fv9KOXOukSQME4cm+4WH4Dg5uZGR0dHITCxdAjgw3WSy+WimuXBwYG63a4ajUYISkkR\nJHd/f69qtRr3WiwW4RqRtMYMMBbEFJDV4zEguLiq1WqwHygSrzXhbiesa99fy+VSn376aQAUhCvZ\nRygq+ozywJ2B0L2+vtY333wTypI6NFSTHY/HqtfroQhvb2+jGNhy+XyOz2AwiL4j8xaLVdDzNptn\niuzu7ur9+/d6/fp1GG2uZDgPyt1S0spVyZ5HNqXBtMgAXDPScyAse6VWq625/AgyzefzGgwG0Q9n\n27kmv+V7xGF4GvRyuQyQvr+/H5VU2Uf0m7lvNpv65ptvdHZ2FnILlxJgmj3hx0fgDkLf4M6RVgdm\n3t3dqVKprAHy1IPAPfkbdpT1RGaTFxcEgPs1AGH0zfuKTGNM8vnnEgMAGK5BWQCO6vj222+1s7MT\nxewwYvw8q01ta+AEAYHQppOe1SB9GKvBd7MUubsA/DPpw5RghLazACmY4btu+TMJULgOQLyxgNOF\nlLb02j/X+k/7mgVwnDFJFXcW+7SpIeRTMOL0Yso6pffgOg4IpXU3TfoZwjllqPwZfU4+BubEQdx0\nOl1LpcPSL5VKQeX70ewoIKwyBMZyuYyaJQh9ytYzPgjrQqGgZrOpfr8fcSfD4TBiVVB2i8UimA6s\nKazHnZ0dtdvtAERuXcLUULW1Wq2q3+/Hcy6Xyyhc1W63I5ZAUliDCORGoxGBvr5e3Vr19TYYDNZ8\n8oAtTrf13wAgAFyMVSrEp9Op/vM//zPWcqVSCQFcrVbX4gOQLWQKUTQLvzsgA1CFC8c/90qg/X5/\nzbWHmyCXew5O9HohR0dHv+Qy/qBR7wPXijPZ/E1AqbSqiwIwATwiD1xBUmUX5gpggouDNfn27Vt9\n8sknwUgShE2sA4odoMF9YDYWi+e6QtKq0qqn4cNQ5HK5AB7sj4eHhwBkvV4vlD/rtV6vazKZRMaR\npKggm+4h1h5AGcaPezvDgovPDdrlchnB48wB44keQ74gi3AxszfSEAWuSeVjjCxncdGHsKcE8MJ0\ncZ3Dw8NgbnE940ZirwKAmLOsttWAWP4hUFKXgH/XrWRpZQXR3NJ2JZoFZJyN8Ws5SHJ2xBkZX1R+\nH9+wriz9WVIAxLU8L9yfmetmgZYUfHn/0s9dIPgc/Fxlzqby1L0UlHDNtH/e99SNln7XY4vSefeG\nsvONxXxtu7E5sRIkhQBeLp/rD/C+pMhSgfkoFothJUrPzwjVSultDtiisBQsIsqQrB4UI6m9pGfu\n7DwfKEh/cEEQr0EtDqhed2EiWGBwmEcCNu/u7lSr1TSdTsOHz/4ikJEGNcxa4jVgi7VArA0CDkuX\n2AOvnYKlxxxIq5Nccem45f7rX/86XCmwP5Iik8iZPvYBLrFcblVTA0AjKZ6T4l+sZ34DU1AulyO7\nC/DEHACAyOaAcdtWA2zwXF6/hXFgbGHZmFPfwz5/Hk9xc3MT68xrdHjGCsCW/tzc3IQ7j2wXzjbq\n9XrhinIwj5uk1WqFiwLZ74wsCpcCf/P5XCcnJ5pOp+HeIgi03+8HKMNw9aBPZ5Wo0QIAd73D/mbO\nPY6MuA/Gmf66/vH/CfJlHeFOJ5CY37u7ibgxZ2gB1Bg36AHXdQ5YyV4i/RkjHpDnpRKQd5vaVg/+\nk7TGYEgfnh+TKuAUZCBkstwt/hsaQt1ZE0lrwpc+eLS4uywAH1ngwMFV+s+BA8/uzEUW48M903+p\nYua19yvr2ik78XObK/7U5w8CBiHzHbd6HYT6c6Vj5N9HKaSMTdY1/ydg6/9lIxiQvkNNo+xIU/Wx\n4IhxgAbCAeEB1Y8w9dLwbpGjkIlfkVZr59WrVyHIpedTcQeDQWRPYPEhuNxnj0JmfAEY0M4ooMFg\nEEqEg9QI5iRyH7CBhYg17a5arn14eBisCs1ZDBTcYrEIqh6GD6sv3eOpLPjb3/4Wlm+hUNDJyYlO\nT0/XXEMoLa9+ydwR0OsAiz55bASKgecEgKHIYbMAhhR8k/STgYO/VAOM4PKAbfC96jE6HuMEGwFD\n5W5a3FiAXSxyxo+6NScnJ2HgoGx7vd6aq4IUYAf3vq88VsUrGvuhlPShVqupWCyq2+1qMpkEW+Lp\nuE9PTwFW6C9rgvWSMtej0WjNPcPzo/xd7klai0NzPUffkROMw+PjY5zf5HK10WisudbpFwwv1wYg\nOcuTpm17kVMYzFwuF3PF34wV90X2URQuLSbnbSsr3gUzDwztJX1odTMZKUDwICGaT6Jfi/eyfiMp\nQE0qBHyROFXIc2SBkPTa6Wf+/K6gsxiILDdJyrD4okmv5X3w/joI/Cmg4vEhWWAoBRwpCPN7ZzEu\nm1xDKMgUsLq/OmVRtt08IA+h46mg1OgoFArxXTJNACJkCBC8R4Go8Xi8llGCUPB/HPXuAuz29laD\nwUDNZnNN6J+ensZhf9KKISOmBLbHmZLJZBL0d+qnptrmaDQKQc6zko2FUmYuy+VyWN4oddYbIMuf\nk/vCNAGaUAoE5iH8vVaGGxCsZ0+rpBLrYDBQv9/X3/72N3W73agMSjVcwKa0MjA8UwX3F8oB9mRT\nqXeeCcbED3sjSJR1vq3GesbSJbaAZ+RzACdrfjAYxNohoBmXg/Q8fmRD+fWYM8aGeKD0b4JtuRaV\nVtNqvMR0cE3WH3EsXJv5lBQxFa9fv1apVFK73dbR0VEcE8EzEPtCn2DDPBjbAdR8Pg/mhnXD/WFS\n2X++dgEQACze93FCjwH8fC8wRj62MGEOGj1+krmAHQG4AaQxmthr3ieeiecjoBZmiHttaltx69AY\ncG+pUnUl7r/jM/6xoJyF8e9nKX82FQuGzYdA8cUEgEoVa9p3LE/pw7LqHheR9TxZ72cpewc0KRjb\nNIap8v+5TAP3YB7S+XDWIgWQWc/gzJNfw8fvJYDnAiprPLbdoO0BGghcfL7SszAjtY6qkCh4LDgE\nG/+7heZjARvglpOn8y6Xy6g1sb+/H0IRv/KbN280HA7jaPhGoxFuFKhlD1ymmqunOXJPQIUH2tE3\ngn3dtYL/G9DPZ25NEZ/j6bX8lvidbrerX/3qV3GGiI8Zc4Hy5PestcFgoLOzsygshsAmXgda2un3\nfD4fisXpbJhExgwXM2wP90bOwB7QcOF5PBtjtG1W0BUmlq9b4SgaWDK+T0CzK02ChpkrAsYZA9ht\nnhngjkuBAGXYht3d3Si9zlxdX1/rzZs30adGoxF9ocEMMl+4OCWtncHT6XTimXG/cShmt9uNzDyu\nybPhBsMgwB0mPQf4NpvNOCfKjT/pue7NmzdvdHl5Gc8qKfa2tJK9i8Ui9hDrhWBtWE836LzyMKUO\nPFUckAcjybh7yARMaKFQCPY1NRB5loODA43H45hTCklS6mBT2+rBfw4ooJC8ucUOCOG7bHoXWKll\nnrp9UuXHd3jPfdQe7Eo/HQihGFIA4nSZswqbmJWUIXpJyaaAImWMfKz8mt4v/5f2fdM9nZ3wBZgF\nJl5ik9L7cg3/XRZTxOc01oF/n+fednOLjIOwUOL4xVH8XscD4QDVj5Xn1pnXG6FWiVcYJVMGId1q\ntXR2dqYvvvgisngIQAWE397ehvukVCpFJolbw2REAJxIBcflgv99Op0GGCDd2EGBzzkWrbQKlE4F\n7c7OTpQSdzD79PQUQcOz2SyybebzeVSklVaH0mFY+BwBkM7PzzWfz9VoNFStVoMRYe2hXGB1UGQO\nynyPM1ekIrsrjWdjjnyto9Bhb7i/P/c2G2NQLpeDhSBo11kCl6EoYjJxcElStt6tddgHfo8iWy6X\nUXUYxoCxef/+vXZ3d1Wv19VsNjUajdayyjgzygsDSgpWAVCFQQkDgKW/WKwOzzs4ONDt7a0eHh70\n7t27tRg85h+w6mAY+QnTAKPEOVR3d3ehrImpYT/3+/0oYoisA0x5zA9Aw8FAr9dTq9VaY7Jo9I1Y\nkuVyFe/F/f2ZMCrQA/SBE8HZS8h25kDSWlzNcrmM88OoZ/TS2t5aQKy31AJOYwiyPk/f5/8UkPji\ny2IZ/LvO4nhglCvh9DXXkFYAIVXim57TNyefu9spvY//1lt6Dd5z4JAFRNJrp83dKVzPwRx/p/dK\n+5mCEx83roew8Of0+ffxcJCYPvfH0BBO4/E4/NZsRKwSBCBWZblclrQKzgaceKbL3/3d36lQKMTJ\nwFiOg8FA7969Uy6XCzAEa0J/qtVqWPC9Xk+S4vvSOgtJH1EmKFmyD3jP0yA5NJDnXi6Xa4oL1nF3\ndzdAGMGf3N+BLPvAA3BdydH/YrEYVUW9FDfZSxgZKErW1Ww20/v379VoNLS7u6tutxtjx72RAfTD\ngzs9+JN94EIcgQ8j4IwI+yVlx3iNhc71PSB5Ww0lg9vK00xZJ4yBpwVLK8DCOHrlUa7nAJaiXe12\nW2dnZ3Fd9grjTAG7k5MTSYpA2vl8HooXF9xsNotAXGmVPdTr9VSv19eU8N7eXhypQAYOIGmxeC4R\ngDHB86XMhJ+fA+PkMUyPj4969+5duMekldxwhp7vL5fLyIaCuQAguvwnEwc56UGurGncOP4ZrBHP\nwDrH/ULfYLEmk4lyuZzq9bokRYwZMSVkatFv2CbG2NmzTW2rbh0pm/Z3xZYCj/S3qUDDqvNYBfdl\nS+spwalyS+/rVlaqPCWtKckUWGSBCr8vCPalfvj9U3bFFTXP5a/93mlf0mfIYlByudxaRDv9SZmP\nLPbCv+Nj6s+JcuFzR/9ZIMfvkQIlf55tNq/DQq0Q6FwKauF7JVASdwaCQ1oVYOPsmPv7e3399dex\nVohFYbypXAp1DWDwuBO+32g0tFgs4tqSgrKFMmd9jUYj3d3dqdFoqNlsBuhxgMj/uVwu6pnAbGJl\nQzFzP7IoHLR4MDBKnswfPzgQBc/4YdFKq7NbsJIZjzSwtlKpqNls6vr6ei0bqtFoSNIHz+fWqwMd\nVzbuapzPnzOg6C8yCFACO+W+fGdWuB7MGHEV22qpocG6Zd0ABjHyYEMAgrh4pBWDxPyWSiV9++23\na0wV1jfX4iA9Ulr5h/wsFp9PEB4MBjo/P9fd3V24DTzFmPUFI0gcFUGgzLfvHwA3VYmZ0/l8HjFT\nGBOAy1RnuBuL1PDZbBaMI24e2CMv+4+MA3ABInDBIPfo2w8//KA3b95Ef9jT7AN31bpBzjjB9HgG\nEtemfguAnnpKzt4AUGFrYHy5Pr93eZDVPgpwIq3T+iwIpzRd4XsQTcouULnSYxMQPlkK0wWCtAqo\nhbb2Q4/cbeD39UlOmQlXpOn73g8sTPrpz5QqbK6Z+il9nHjtQMw3tTMpm1gHru2shlf4pPk9eN/H\nm/fSDYvicPeZsyNZTIz/7QDJx3SbDYsJ94yzSoABXB8Ezs3n80gfdmsd6hXQ/d///d8aDAZqtVpq\nNptRhyNNnweoLJfLtawEhAyFne7v7yPlEEsWAYSVhr+43W6vgSlcq1j1rMeUGSBFkbgJUkM9JVLS\nGlDy9Nzb29sI8JW0JgMeHh50f38fwg5g6AYDAtD3TqFQiGqZBE0Wi8W4n7vPABWAKBSD19IAnLBP\n6Etawh2WCreeM4jufvJgSErE89ttNTKwYDZgTIrFYsQhsa4A03t7e6rX65HCzrNC6e/t7WkwGGg4\nHEbJ/+FwGPVDzs7O1gKxl8tlKGPiue7v7zUcDlWpVDSfz6Omz+7ubqSvshZZ4wBRMn6YO+YZ5sVT\n9InLcLa7UqloMBhEufpGo6HxeBylAxgH4mJwJ+HOgaFhrVGUkb3schX2ETewA2FJ8WzUYEEuLJfL\nyMxjLyAvCE5ljJ2VonEvntuBXKvVCtACq4ZRgEyBFYKB4bkZ75faVmNOUJ4IFDa/B/c53cz7rlSz\n2AwEv3+WZWUDYFLLm3t7rAnXcEvI3RJQnL6oUrZDygYa3I/fg0AdwKRjkQKllGHKAjZ+nbRPKUAB\nlDhtjb/XY3183NLX6dz4M6VMjbNPqWsuZWf4Ox2Lj4E5QXFj2VD0qVwuB42NJSkpAmWxPBC8jLW0\ncgl88cUX+qd/+ie1Wq1gndK9IX3oBhuNRiHU6AM1RlDOpVIp4iPYF36mC/2Bzsdq9APTEGTua5dW\nh4nhk0epsdddQLJm+A1ABmUNa8Hz7OzsBC3PIWysVUBduo4AB3yOZT4cDvX+/fu1UgPOGhIvwFgB\nanzNwtweHByo0Wjo4OAgik+5C8QZE54ZS5lsCL5Lhtc2G3NJGjGHWKLkAFUen4b8QGEBaN09BoAc\nj8dxBhVxGJIiriOfz6+tH2dEAIK1Wi3S9QkAx/AkuBlQCeCRFO5HQClj77rBi6Q9PT2fA8XexV2H\nOwcmVFpl72GU1Go1LZfLyE5yBtNZG9Y6ip97M+YeR8Jev729jfHl3nt7exqNRhqPx2sg3wEO93AZ\ny+f8o+4R8+s6mO8CUJDN6RlgxKwhI16qcSJtsUJs2lLGBBTH9936z7qWC+RNqYcpc5AiT3/tSJL+\n+b0duW5iJFIXS3rPTfd2Ab0JRDjY4O9NIAHFkY41/cpS6ghJrHi+70rQ3VFZ983yJzq4yOqj59an\nv0+BKb97if35pRtMEBtVenYh5PP5UDKkCyOAKJzm7AfMQD6fjwqiHORVq9Ui8HN3dzdYF8YHC2s+\nn8c9Eeqz2UyTyUTFYlEnJyfhIvI+cw8AELUKoO9hflACnp0EW7JcLsMq9cqQktasWd8THrTnmTaA\nEFxhZCM4g0rQYy73XHmWYEiYDE9zxF/vIJIU6dPT03hO3zdY1MwPgtWtbGQN4BNFSd+k1UGDrFmY\nKtaOK3Dq1my7AJukYCoAbACxQqGg0WgUh9tJK/eJB3HDbsGEuUzmkMTLy0uVy+VgJAj4Rg4/Pj5q\nOp3GuLgrHUCDAmZvoNilZ6DDWVGsPW+AnkqlotFo9IHRw/ol7qPb7QazeHFxoU6nE+AXdoA1zLrz\n1HjcvHwPVxagB3ewsyAOcL0YmqSQCa9evdLDw4Pq9bra7XbEvnm6PnKWv53h5zmdQaVfuVxOo9Fo\n7dRo9qob6R4wXigU1tKikTUYKZvaVsCJU92eNphaOG5ZeCaKNwSbMzAgM2cJuD4I0ZV1lssFwcWE\nuOXvAIrN4ddz5erfow8pU5P1PCnocIDm4+hj569TMObf8f5kuZ0khYJxdM3fWYDP++8AhPt5H5jP\nlLZkE6bsioMwd/850k/B2bYaQhyFlcutykBjZeNOAajwTNCiKF18ygiVSqWidrsd6YcINeJVptOp\nZrNZBKzSB5QFJyNLz2xMr9eLVMJaraa3b9+GO9PvjRCjj3t7e2o0GppOp0FLQ1F7jQ4XuoCKUqmk\n2Wy2JpTc0sSqRYCxdgigRQ5AkXNtBGGx+Hz+DrFSBEH6PgUEHB0dxfoiawmmxNlPAgelFbhBuDN/\nKCHcMMQNADYoh88hfmQg4UbiRGbO7Lm7uwsFSbbXNhvZIex9mCDfo8wxQALlieJCkfEe4zgYDAIM\nE4tD7AZjBAPpLnh3Ly8Wi3DjLJfLMBL6/X4Ec3M0AfEcAHECl5k31loa9+LPiFtHUuwT2BxcOs4i\nciwBOgXQ0ev14m8H0hgZPCfj5rE9DiQkRYD3bDbT6empJpOJptNpAJPHx0c1Go1YkzT2jfede7Af\nceEir5En7KXHx8co1AazA4BkXUsKA5R1/ZJrZ2tunSxLN3XneAqSlJ2OmwIaJpSFxaRDO0orq9QX\ndupy4H4eCOclqlOAwYZyxsSpOleeKRhAADtLQz8dIHh/N7l1vG/+dxZQ8XFM54HFJWltTL0/L82D\nP5/f0/vA/LoQwFqHkuQfY+kuH4998Ptss6X0rJdBZ85gQO7v76OgExYkjAc0KoAGCrjZbK4FVO7s\n7Oj4+DjAy8nJSaSkLpfLOCCPOAbp2SKq1WqhdDudjmazmf7+7/9ep6enkhQH4S2XS02n0wAaktaE\nv1dS3d3djZNoHZTzDDBApVIp1pfT/J76m84lhekQdMSI+FrwmhUEJKNAnZ6/vr4O5URm0/39fZwg\nLSmYKAdAsEjul0cws1cbjUb4/AGDjFen09FkMlG73dYPP/ygm5ubADjL5VIXFxeRtYWSZPzciNtG\nc+UKeOT8J0+zBsCg7JkbD2yVFPIXRUgaMHvk5OQk7tHv9yNuBVBEbAYMBNf04PJGo6GzszPd3NwE\n68aa4eDFYrEYgbOk5zso9Mwfz4LjPY8hgSnkvB5irWAVJUXfWB/cI00GgC3K5/NrtXZYt6QYs/7f\nvn2rcrkc6485c28DzJW0SntOQYfrHrwH6EsAp7vkDg8PI5vPjUx+A4PGXDJ+7KWX2tYDYqV1Bc/f\nWEIo7ZRRSH/rn6EIXGm5j5fBk7QWDOQUs8eSLBaLUCAoTfzCLhxTHzLK260wru39T10h/vtNgbbp\nePnfLBJvzuakLEvaAIaS1hYb4CFlgXxRpyAl7TPvZ8WKOABjvLi/F6XyZ/Q5/RyrqEwAACAASURB\nVBgaQtAtSmha1rIfxrdYLMKKZtNiSSEYeebpdKpKpRJVRaXnYk0eT1KpVPT69euwgNg7AAesQs73\nGI1Gms/n+rd/+ze9fftWrVYrghiXy2UIc5QlWS+j0Sj6xj5YLpdRsRPhyjryIk6e5SEpmA2eQ9Ka\n0PQ4BrKYqEnB7wFIAB9PkXT25/b2Vl9++WXsS2o11Gq1tb0H2EEoSwqZ4srX05v9cEGYK8YDpbBc\nLoN+LxQKEWB4fHwcmUknJydRTp9x8WJt22oem8A8eXXjyWQSrB2yCzAD8GROPU4BGcxRCADL29tb\nXV5eBpswHA51fn6uxeK55DqnHy8Wq9NwYV92dnYic6fT6UTgLWufjDkPfJVWp3sDtnDJYThIKxdi\npVKJWj7D4TDqlPhZP8g6YrSY93w+H8yaH8iH7PD1BIDCPeJndbn8ZU4oXEiWHP1y17yzI8SFuIz3\nfcB3mR/YUUA54JQA8FKpFDVdYFZJX/bzqkgj39S25tZJUWKqJFOF5cI+pft5L8vNgXBDODoq5XO+\nm2Y98D36SzomQt+VK6xJFhhx4f0SMKFPaVxH6pZxgJYq5tTl4d93MJU11twb4IVic6Tsvlq/L8/v\n1/F54vv010FICnjS3/nYpCAn7f+2G9So+1NRjDQsQBgqgC3CCKBBsKHvFwJZOVTv4uJCP/74YwSG\ncvZHLpeLIEPiXUhJxYLB572/v69araa//vWvenh40KtXr0Joe+wHghOhh9Chb25tMf9+ABlMEmX5\n3f/te4rXuJeGw6GKxWKkXQLipBXoeXh4CKscEOWsp6Qo1MYhh7hqqtVqGB300ZkVSWtF87xuBGBP\nUsTsMDfMe7PZDEEMMMNifnx8VLvdDmVJo6YMwv0l+vuXau6WSCl/FA8xNrBqzpowFx4gDMMA4ISJ\nePfunY6Pj1Uul2OvkJ1TLBZ1dXWlzz77TKPRKNyTBwcHsRboxzfffKOjoyPV6/VIG9/Z2QlwjkzL\n5XJrqffSSuawTlHSkgJMeTgBVYNxpXjQKsqfZAqehzgUSeFWxJgAiDBuKHJiQZiTXq+nh4cH/frX\nv5akSDtnj7L+eTaMHi80yrOXSqUIdmYfOiD36/ncSor94fLMA969xIe7tja1rYATj1lwt430YexE\n2nm3/gEkDiiklQJDuXJNj/zmunzuAiOrD1zfB9s3nP9L4zKklaWfXjtlNHjt/lkfFwcojpb5LIsV\nSZmctPlvEBhejMp/x/3cveJ98ziStK9Z/UrBafrddG2kQMTB6ccAUsiAkVYCjHXAmsEiQzg4XYq1\nDgXNepNW51DxvAiDf/iHf9C3334bawX2hriP/f19tdvt6ANpxFg91WpVo9FIX331lSaTSQAXrBv3\npTsd3el01hQx9wYc3d3dqV6vhxXK3iKNFoYFS4zxk1br7Pr6WoeHh6FsfJ7z+dUhnyg7LGqAgLRa\nQ/V6PWIXyAyhIit9H4/Hse889Zc9uVgsdHR0FGCDeg9Y7cgQD5hlvljr9C39HsD/+vr6A1Z229k6\nWM7u7pBWGUbS8/4kGBnAiRuPViwW1+q/YDzO5/NYl1jyFBvM5XKqVCrB2BUKhWCWYMlIb0UGeTB1\nv9/X0dFRlIt3txtgCAMAdpKibbTpdBrA9fHxUbVaLeaQe9J3QHy1Wv1g7SCzWS97e3sRsOqynmdi\nLzqYcR3FPFxcXEQsDwHysFoewwJwwPDwSs0eX8j3MCJcD7ph7gwZsh/WB1bIwwHQ+ezdl0D31tw6\nrpDTDroASj9LlZFPkvvOpJWljtU1m83WAAq/ow/4wX2g+S6L0pUv12BSPYDJla0H1aaKmXu5snfa\nk+s7cHFQlLp2fJy8L6nV4wvRf8OiSZWA3yMrONnnk9+81LcUWPn7/tqBj49ZulY+BmAirTIVcONg\nFXq2B0KWAD7qkMzn8zgojTWA0EIw4mKk5kSlUgnL0dNeiRNZLpex7huNRvyWWAq3YJ6enkKgeuEs\nqmQCljjQz0915R9pls1mM6xhd025yxBAAUNBPQj6PR6Pw4XFfsAy5jfOWCLM3UXg1vpyudRgMNDe\n3p7evn0bzwvVT1o1QngymQQIYYwKhULEijjgJAbC4wPcTZWyOPyPq8MpdbdWGZeX6O9fonlGEuyF\n9GGdKQ/+Rp7ALnn59ul0GinxMFme9XF2dqbLy0t9//33+sMf/hDKejgcRqBzv9+PIE/mjXk+Pj5W\nt9uNOiScVox75u7uTq1WS8ViUYPBIIwFAHValp/4kZ2d5xL+nU4n0oKR+yhk4pd4z90bkuJ+HiQK\nkIIxlZ7XBuDfD8xzoxcWBObPARB9J86DfUUBRMYMdvPu7m6NDeL5keNe/t/dQ4AtXnvJiZSNcp3v\n+iurbTWVOO1klqWPJe9AwMGHsyBck8b3sLj8unw3CwSlLhkUSpYSTJVmyoakuehZLhl3ofj104Da\n9L5Zr1M2w/uVunTSZ0mf09Ex1/ZFL62Qu89VKoD9dTrXzrD42Gf1LWu8/Rk/hgYTQJYMdC4Cgvk8\nODgIIeEguFarhfJlrIlNAIBAmcOQcGw8wrFYLEYMBW4Ip7xxOcGsPD4+hvuHPmDdsAb9YDXYl93d\n3cgIABgREMghfJ5iK2nt3g60F4tFZKwQZEs2gINo3D3ud6cRoOllvlP5AmAisBU3F+ABMElBNgfz\n7rYBlHjsDPE4ABoUghtCKDNiUrBqeTaCLAF6Xjxrm40xdMXD2ABGYMjcmPO4E9YfqfXT6TSClmlk\nrJ2cnOgvf/mLPv/8cw0Gg2BKAKgAQQAl65Vr4WKh4i8MJaAfxqJSqURtksViVT2ZfcFz5HK5qOpK\n1hmZVqQvs748sJc96QatuwF9f8CYeI0jGCtn3Uh9z+VyUXeGmDIYP0AB8ge3J64xlynEaaUl7N2F\nxdi7XGI9s6c9xsxjFmFX+NsNe97Pals9Wyd1M3hHnW2Q1svEuxXGIKZWPs2pJqevsHpSRQfVlDIa\nWb4xBxdZz8f9s1xOL4GiLFfGJjdJlmLeBBCymn+eplFm3YuNml7XmSaumwI2bz6H6Xh43IGDniwX\nltOKH0PL5XKxHrHEsFDoJzQtQsozCRC4WGBUfETIuNI7PDxUu93WZDKJVMydnR31+/2ImyAqHoVB\nlL+7GZ+enuJ8mvl8rm63G25OrCVSKBlnCrsdHh6GYELwLpfPgbSUt2aOUP4If2mVxlgoPJcTh93x\neizOjiG8YUYADSgoQI1n6cCccP4HLMty+Vw9ExCHAmUPnJ2dhZLyNObJZLJWAyiXy8V3OIeF50yz\nNVCeADpJEaPBWHgV3k8++WTNRbWt5q6c8XisVqsVGTweuIucxB2I0nTrmvFwA8hjrSSp2+3Ge5Ji\nTCljj2vNg289jgWrHTnvQAP2Z39/P5g0ZwkADfSL9UWwa7vd1ps3b2J9AiDQHfyeNSCt5BX1der1\n+hojAWjGYGDfkUmEnvOq5YwxDCZyBzlDBV5q5mAsAQAB+IBl1j2y3DPj+B5yDCOFNQ4D+fDwEMxq\nvV5fS8VmDIil8RiezDX3v7qCf2ZzS4iFwyJPLX1HY5uYDoQKloyDFb+uRyg79eXg4yUg4e+7snUA\nkirnVKikDI5b05uAht/H2Q+/p7NLKaBI3TJcK32mFHS4Ukj/9nv6WKRuIxfgjDnuN2ennFlKI8bT\nZ0sb8/sSRfhLNVKDl8ulTk5OIsuF01yXy2X47vnuzs7Omi+ZeYWCXi6fT/OsVCpBhUvPTAHpgq1W\nKwA8qcRck+A5XE4oU0/hhomBPgcIEKToaxZKGmHkYJ/r+lkkzmTQAEWwTAi6fr8fghlqXVrNMeuf\neANeu08fEMffDw8POj09VaHwXDDMLe9+v698Ph/z46npnmGD779YLIZ1fXt7G8IfC90L4j09PcX7\nWL8on1wut+ayIn6BGBgYgX6//1HEnJB2TtExrG4HA5wbhQVfKpWCYcLiBhzAXuCCgSGDWfnxxx8l\nKdJmHx8fY23A8mG4IMMBGswf4+3uBcAPgah+QCBz6udUSSt3hvScHUcc1Wg0itfu/gBosl48nmw+\nn+v4+FjD4TCYjWKxGOX5YafcZeiBtZ5qn8vl1kAzz0Z/ACH8TlIYTV5iA0bFgSLvM/e+L/gtHgnk\nN6wfhR/ZkzA+/AZg6s+V1baWrUNzP5srMGc0UvbBGRQmAgHPJk6DT5kIBitVtJI+uDdUHL9P2QpX\nJA5EssCAN79+Cgb4P1W07qvz370EZvg7BRlZjetmATLeTzNm6AfzkMac8L9fE+WRgke/Xhrz4iDH\n7+Eg7aee75dquGVgDs7OzjQYDGLzkx1DWiJBd8PhMKwNrJz5fB6xEIvFQtfX1yG0GQeEEkAH2tTn\nxil4fPsIUKx1qF5OFiYWAncR/u9cbpWBg58bxcp9AFP0kd+j/H09Eo9AITKAAGm1uVwuMiuYX0/r\nT/ekgxQ+29nZ0XA4XEt/JnaAOg24lKQVEKJ+htd7aLfba0fcf/bZZ+G3x0XEPFCKHaFOfBGuJ/r6\n/v37CMSsVqs6OTmJQE9Sc7ddhI2A0Lu7Ox0dHenq6ipOA6YUe6/XizXi2Uh+2JzLBVwjHhyK24R1\nhysBmc9rP4IgNVRTgwi5AVPO/AAYyQZCFmXJF+YKhvL+/j7AGcwKlXwdWFA75+HhIcA2fzebzTBS\nYD9IPwYMejyKr/Xl8jl+6vj4OM7+kVbsN3FvPC9yGMOEa7ZarQBGHu/FuFLbhfguZ7WRE4AXdDUg\nRVoFTLOnXFf81HlRW8vWcWsY5eSxFY5E0996EBwPDHp2IcX3+U0KBrLYAulDhsHZD+9TChLS5/D4\nDPrpit37t6ltYosAW95PR/DOmKTXyWqwTk5p8n2eAYbKr+mAwp89C8il7EYW0PIYnZ8aDxr9+xjA\niWddoHio0FgoFNYOOcPtsru7GyWn/bTixWKhwWCgwWCwxl543Q6sb66PYPCsKwJEocQXi0UcVIZV\niiIlkI/vEMCIAQGzgtDEIkqLHfIdLESEmbRiwjAqsHrz+byazeaav19a+bxRIIC2LOPB3WduXVar\n1TigjmsOh0PlcquTtzncLZ9/Ls3ebrdjfBHUrVYrFCjZQdyb+AOuT+EwytWnqdPSs1X+5Zdfxu+4\nbrFYjBRn5nybDWANqD4+Po60dWIwCKYkboffeQwKTGIu95ytxfrB8nYA7ewX4ABWgXF0d7K7X1D+\n1CFxtxTzQHo7MWGNRkO1Wi3WA6DdS9ATCE7NFGJbeCae1dk3+uSGA/EruEyIP4PFRJbTd1fqPHOj\n0VCn04lzoVibjB+yvFarBeuEnK9Wq5pMJlFN2TNnYDYkhSsOo/L+/j7cZ8wFz89cOpDyjCBneLje\nS4zg1tw6CA4HEbyPUk8VkTMKDgRSl0oW6+DXTpWk/85dLH7frGfwZ+G1AwN3cfjvHJh4SxmTlPUg\n0MrdIj5G/pzpNfxeKR3K93wD8x5CgeYpgmnfHYCkzFQWWPG+p39vcuP8FJjcdsMK6vf7kQmAlUDt\nABQNUf2eaocAl56FBMoxBbesK1wPUMa8pqIq/l1YBizIRqOhwWAQ1CruDoQUKdEUasOg8NRDt1K9\nSN5yuYx6LIAeZ994Bn4rrRgkzisBDOEGgbImawn2hH75tTillt/c39+r2+2qXq+H6ymfz4cwR5mg\nSFjz5XI5DocrFotqt9sajUaqVqs6Pj7WcrlUv9+P8a3X6yGkPXsCgMYc+pEDXKPT6YSV7ZYpv3uJ\n/v4lGrJhPn8u0e9W/9nZ2RqT4NY+INeroXrqPGvC63Z4wTZnWlGIHjuIvHalDQsAkGJ9AmaQLaz1\n5XJ16jPuEGS41xshoBe2P5fLrWWBejgBwGY6nWo+n+vk5ESTyUT1el2j0Shcgyh7AtUBTAAW2EMv\n4Y98KBaLajaburm50fn5+VosCvoRo8VdNhgrBNYyVgAvWD1YRsbOSx8g173AG1lJqV5hTny8/bON\na+5/ae3+jxodoqOgY6yolyxhV7QsSqeM3JriO6BQvzcD48qXhY6wywIqm9gWWrpZfqo548H10zgO\nn0T6lvbBn+ElZe1WRlb/WMg8twcN+xi7a8ybsyHuF5YUwWObruNrIWV9sp4hZcE+hkZAJ6yAB+7h\nXsDC5JmdYfNCR9DXgA2CMKXneep2uwECcDMwf9DI+MYp3EQdBAI1UQR8lz0oKRgKFAtgqFAoRF8Q\nSlDfMBewQghMF1wONsiawIXi1D1KbrlcrlXrRBhK64HVBEdKq+BAB0Kz2Swsxf39fTWbzRgPWCdc\nKChEAlLv7u50cXERgpx4nEajoXa7HQATBU0ALm4Zd/WgINyNhBLhux7UuO1gWBrujJ2d52J+9Xo9\n1mlqOLH33W2PnPOgaOYPNwQFxFiX7qLzOCNcQLjYiFnxfQRw93omHjuBjGbNHR4eajweS1oFLqOU\nnaFfLp/T3DE4nDWXVqwj7KjLtUKhEDVZWAMYAs4+k3EDK0l/KUro8nMymcSeRxawRpfLZRyWiAwA\nJPJ99iUxQwTtHh0dBTsE4wXoB0jDliA3nG1nHQNy3BD9OXFUW8vWSRed/4/iSRVQqkxZKM6+AAy8\npdZ/VsuiiPl70yCm13Sr3wGT38Of2Tcw10uZFq7LMzhIyWImuM5LLp0UoPhnWKr+fK5I0+ulTEgK\nrFDSCGTeS4sJcd2UMXFryIGLA6ZNjNk2Wq/XW2O2UEgwENStALQBRhCazmBxwBifQxmzdk5OTuJ7\n7AEUHkKLM3Tm87mOjo6iCJWXxnYBjFsHwPT4+BjxGg8PD2q1WmsBh7gzyJaoVqtrdTnu7u7Cp43A\nhb0gEM/ZFFxCs9kslJOnetJP4kQIGkawTyYT/f73v9cPP/wQSow9hZAvFouq1+u6vb1Vu93W7u7u\nWqAxwl+Srq6uVCqVVKlUdHl5KUnhbjs5OdH+/r7evHkTIMfL/hPUiuJDmJN1wZziwoLmLxQKQcEP\nh8Ngd7bZvMgce/Dp6UkHBwehHFlLgErWHd8FOOMKkRSH8rnimkwmobxgIJhfDmj0wxy5HjIFAIjC\nBAzzPfoGWIf1GQwGIWM5+RtgTUwjawPXBsXaWF/EbC0Wi7Vqube3txEXwvOVSqUwWtwgZE17Zl6t\nVtOf//xn/fM///NaNeFcLqfPP/9c7XZb5+fnMd6Hh4fqdrtxWOjnn38ewdWwg5QCIPYHME2to8vL\ny2AYiT2BGfYwCcAdTAqynjk/ODiIU6YxNnZ2diLAelPb6qnEtCzl5/9L6wBhkzJKYxj4TpblneUa\n8H6gbLNYFwdE6XX5rVuR3j+UtYMUaf3QQwdnfo20ZkrWOG1yl6TNGZI0IJVN71aFWxlZY+h/O8DC\nopa0ViMhC0ikNF8KPpzt4l5pHMO2G8Iml8sFQ8IYYEGgJBG+7trweBEsLNaSp5i6tUN6LPPmcS8e\nJDscDlWr1bS7uxspiqw7ikQVCoVw95AGncvlAuSkwITfE3w3GAzU7XbDUj47O9OrV6/07t27YHBQ\nFh7Y6lajjxFKJ5fLheLGmnSFzm+Ojo70/ffffwD6+R/XTrfbjbNvCoXneidYhC5r3rx5o/v7ew0G\nA5VKJZVKJXU6HX366acaj8d6+/ZtxLQQi4DlS6Ay92csOWsHFgHQVq1WQ1HPZjP1ej198sknkYm1\nzYZsfHp6CmXMmKcup8ViETVf/ERaruPGBkGfXAOWke+5oZTLrbJTkEUO5mEGiH9h70haS0vf2dmJ\ntHKKs0mrUgmz2UzHx8fBOEgK5o+znUjxf3x8DPbSi6DBRsIgIJs4gwfXln8OaCI+DEAIW1KpVAJw\nYESUSqUoFog7i2DqV69eqd/vRyl91iVz51lKrpPG47EKhULIClytHk8DaCNry3UlZQf8qAr2Li4j\nYrGQJ1ltK+AkK+7DLe9UmWaxAKkyZBFmgQ7uKX2Ydst7/n8KnJxRcfbCrfn0d/7dtG+u+P2+WNz+\n/RQEpQwFr3k+rIV0fLwx9vh1vQ+MlbNbXrUwZU1S3yKbzClePnMLPQU3jqi9j1lAMp3HFJBtsx0d\nHWk8HmswGITCdqHg6w7Wg/GhUiYC9OnpKYSB09x7e3sRLzEcDiNldzQaqVKpRC0T9+mjCFG0lUpF\nj4+PkQo7Ho8D1FSrVdVqNZ2dnQWFjyXocyophBBWEC4ghGe73dZf//pXXVxc6Msvv9RisYgDz7D+\noMBZgzANWHEECRP/QcMq9do73333nY6OjiQpDv1zS53rN5tN3d3d6eTkRJeXl5GBBLVNBsTNzU24\nXCaTibrdbox3qVQKN950OtW3336r169fx5hTeK9QeM4MIYC02Wzq7OwshPZwOAxlDnja29vT73//\n+yi2BVjbVgOIeDwMY+4ucNxjACxcKq68WNvuKuHvarUa7gP2ByySZ/nhpvHD8nBTEKcBIwIIhj3x\nGh2sO+aYf6RzM8/OuuDS8ZgmSQH6fbxINfbzcCRFsC5MJowa+8zl2tPTcyXj6+trDYfDqNHDs0uK\nDC/W2/v37yOQ3c8kIr4M9olaJ9PpNOKoCJAfjUY6PT2NoorVajX0kdc9caa3VqutBcvyTMwba5tr\neJp22rYCTlKa3xVsVpDoJqXjIMVBhLQOaNx6BxQ4LeX+u9Rd4BZilgvBWQxX8B67wqbyw5Q84Dd1\nJ2UxNamLxccMheHtpdgT/I1c29kIZ48YQ4+udjDibhbQONd3PzrXg4KV1ovscQ2fM5/PFJBuYkg+\nBnDS6/XCioaNcJ8rQph4Dg/ckxSvsQTxgTM+BLrOZjPV63XV63VdXV1FiifpuMRJEMSHAPQKrtIq\nELVSqQSAQshDaxP0JyliTVLgWq/Xw7pE4RwcHKher6vRaGg0Gumvf/1rHCdPaiL7CuAD2wPgefv2\n7VrMCowSgcO+NnBd4c6iSBgKBcVVrVY1nU5DaOOeQaGR8nt/f683b95oNBrp6elJvV5PhUJB4/E4\nqPJOpxNj9Omnn0atDVwzuHdS9x3P0u12Va1WQ8AjE+/v73V9fR2gctvZOoeHh+Gy5EgF9rC0OoDR\nDSVpxQjjXkFJA3B2d3cjBkhSuPsA6ovFcw2fXq8XMUH5/HN1WeSpx+DBWHAGD/dmfPleuVxWr9cL\n1xsxFcQtwQgQFE0GjPcdNwgKHtnJ3BLU2u12Y33xTNSD2dvbU6lUisJl8/l8rToxcrXT6ejo6CjO\nH2ItMZ6j0UjHx8cBPur1ui4vL6O+j8e7nZ+fx2GhBOQ703V+fq52u629vT1dXV2pXq+vMVboacax\n0+lEgC/PhPwH3HmsD2Powc1ZbWs8OANGJ1HYTtdlWcbOBmQpb67t10FIMNn8HmXBAvB/ruydyXBw\nk7o6UpDiAMcngb+dqXF/XKqsX+qLK35Xzi8BEw7mInCLDIPFYhF0o48/izIN1iTYy5+dZ8ByT8EE\n8+3X8YBQd004NZ8FTHh2B3cfQ/OUSihwrHFfC6PRKPzADr6wcDgYz4NLAYrlclmDwUC3t7c6OzvT\nn/70p/ju7e1tlLQHbLhf/+npKep7IHQXi0WcRZLP59XpdMLXTroshZlarZZarZZ+85vf6Kuvvopq\nmaPRKGjx2Wymq6uroJwBDAAyZ/d4Pmjsg4MDNRqNteDfQqEQTIVboG4wTCYT/fjjj8rlnl0nw+Ew\nhPrFxYUkRQbT4eGhzs7O9P79e3377bcBAPr9vqRVTZL/+I//CN95rVYLmfX5558H68VaPjg40Bdf\nfKHf/e53uri40NnZWSjw2Wym9+/f6+bmRt9//30E4ZLRdX19HbVWcHdgFQMMt90I1iX+AorejRNn\nirCK2cueKeUxKVTVxdKeTCY6OjqKa/7444+6uLiI2kFY5s4w4jJjHj1biv56HCCFEXERSgogT5+l\nZ4aD+Ke7u7tIdYcRefv2rY6Pj6OEPXPHeOHKBATxOQBcUmTqeFq165G3b98ql8vFsRKMYaPR0HQ6\n1XQ61W9+8xstFgtVKpUANhcXF+p2uyFDOYOo3++vudGkZ7kLY+W6kdL2zDdsO2wSbEmn09HT01P8\n3gvmET/j7BGuXN/LadsKc8LgwhI4kyF9mJYqfRgMK61b/XyH67pVz2/9tVv6fi2aU/BZwa300/vN\ne/4cXhjOn8mZGpRGCsx4rrRvrqjpVxovs8kd4jEv6XP461Tx81tnmWBIfEN5vrz7MRFU6cZzMEJD\nWfvvU7eS9x8q8mMAJ/1+P9wSrVYrzt8gMBMhvVyul3KHlfJzeTz1bnd3V69fv9a7d+8ivbHRaCiX\ny4XCI5J+MBjo4OBA+/v7kVUBhYy7wdcY1iL+9cfHR7169SrcL4+PjwEKYDRms1lkTsAOMIdQ3vyT\npC+++EK9Xi/YNAQejAZKBGZjMBiEUnMWkNoh9N3X+unpqf7xH/9Rf/nLX0Lh5fN59fv9YCg+/fTT\nOHSRM1twnR0cHKhSqYTVPRqNwn1GX87Pz+OZ8MMfHByo1+uFUH779m3EC2EF39zcxLx9+eWX6nQ6\nYZWyfofDYViv1BSBweGe22wAWlfw7H9iJCStzbu0XpSSNePZHZ6BtVgsQtFjYR8cHOj6+nqtjIHH\nmQCaYFIA2/l8PpSfByJLK6UMI4ALSFply8xmM3U6ndiTXq2ZPsDUcQ8qJj88PB+0ScAswbLj8Vj3\n9/dxDZhIDAFYT3ednZ2dqd1uq9lsBqNUqVTimAqP/bm8vNTR0VGwIrBOvIdby41swC9Gw2g0iirR\nuNVcxzw9PQWI9/pInIPkxlej0Yj78Jywj8zXprYV5gRKlsnG7eJgwpVk6m7xQD4EEA/N4nVWw906\n0oq54PceMCd9eGZOmu3D7/htGoeSgiC/b8rKpIrd36Nxf/8/C3hgtbiPL6s5WwGC5fspG7XJtYSw\nTt1NMCxc1+fTx8/nweeGuAqeFWoQJUSgJtaGuxheWui/VGs0Gnr9+nX4wweDQXzGeCKgsaAAIxRc\ng+YGoBA09+c//zmCSbGAfvjhh0g1Zu5JYSRmBaq8UHiuQAvzUa1Wg+2YsFi22wAADWlJREFUTqca\njUbqdru6v7/X+/fv1ev14gyYq6srdbtd9fv9oLMlBQWOi4U5gCXis36/r6en5zN8EIaTySQoYGqb\n0DzgD5eKp0f63mdtjUYj/eu//qsKhUIAEtYLFvl//dd/RezE8fFxKH+Yi1/96leRXYCV7qmo7XZb\n3377rTqdjhaLhcbjsa6ursKydBcQMT29Xk9fffVVgM93796p0+moUHg+4RiL9/j4WL/97W/1pz/9\nSa1WS+fn58EGVCqVX2T9vtS80qikqGPjChUlCShg78IKAGSJayBWCtnBKdsYraenp+p2u3GWEi49\ngi6x7mHscGfSl2q1GvLm4eEhWGHqhgDUAdcwPtwP0AMrub+/r5ubmxiTu7u7AAnIfACcMzruOiHm\nA7fi3t6e3r9/v1b7xWXew8ODLi4uAtTQb+5RrVZDhpAJyDEKBIkTmFooFIIxRXeRNbS3t6fhcBhh\nCG5ISCsjHYaT6rLz+TyK8h0eHmo2m0XW0mAwiGBz9AxxJx9lhVi3zKVVFTnpw+DUTZS9MyQpFeaM\nQvrbl6zrrDgNp23dFZMKRmdiUus+VfDeP6xjty5YmMTl8Gz8QwGkFfZApi81V+ZZAMb77KDQWR7G\nOWVbUsBIywJLPm9cw/uV3tvfp39Z7227Ufthd3c3UoclBQPBZ71eT5LCYsrlclG6HgFaLpcjFTeX\ny+n169fa39/XZDJRr9fTfD7X6elpVOiEyWKs/QTS09PTAAfEddzf36vZbIbVCM0Ne+FxApSX393d\njb+hwWFTiAOAHeJZvXR9pVKJyqCk0GJBISjJoEBIk/rrtD1z7UC92Wzqm2++Ub1ej4yG6+vrKKB1\neHioi4uLqJJLHIefZfMv//Iv+uMf/6hqtao//vGPury81O3trY6Pj6NIG4qFsZzP5xFrQ0zE7e2t\nptOp+v2+Wq2W3r17F0phMBjoV7/6lRaLhU5PTzWfz9VsNiPgcTabxanVpHW+lNXwSzRfCyhGGDkP\nZoeFICbCrWZiDPxMG2JYUK65XC6YM9Z6tVoNQCEpXIAEzRLbRRCzr5t+v69KpRKsBHFHFEr0gwM9\nxdtdant7e3GIXj7/XFARUFutVgNk4eKCrUQu+fpZLJ6L+1G8j/TltL4Q7Goul9P79+/12WefRVow\nz0BtFFL1cZUtl88l+bne3t6e3r59q4uLC7VaLV1eXkbRNggCmD5pVWPGSQJJwfDiqvHxA7jc3Nyo\nXC6HO5tn8Zon6C3A5Ka2FXDi6ZNMgFvgmwJbXaG628PjFVIAkbZNjEIamOMLRVov+OYUV+qC2hTg\n432C+oMuywJSLAQWT/rsLAgUkt8ndev46zQQlbHPcofR3K3Ftdi86fy5cnTwkZV1lILB1DUF4Evj\nbAAsCIO03P42G1aUpFCKT09PoUQRsKenp8ECuD8WxqFWq+nm5kaVSkWlUilcEWSfIHyI2O92u0Hx\nYtl4ZkSn09Hr168j/gWLsl6vR+VTBLRXpnSL99NPP9XXX38dwj6fX52PQiT/eDyO4nD7+/uhyNk7\nxNFMJhN9+umnEYfgRdY8HsOVEJYj5bax0HnObrerP/3pT+Fa29nZ0fn5eVTwvL+/V6fTUaVSWStY\n12g01O/3Va/X9dVXX8UcYOFyCrFnrXlmBcIXt9rt7a3q9bomk4larVYoDdjixWKhXq8XVvWrV6/i\nmVAsxMWwZrrd7tbWtLQ68wVGgrmRFFa2pKgxgkzCzSIp3BiAbsAL9UBcxrIfxuNxVEmGxaAWEwpU\nUoBC9hQn5Z6cnES2WS73nCZLphouV0AzbjT6y29wb1IBuNls6ttvv430+sfHR71//z4y9WC8GCti\ncAB0gKSHhwcdHx+H0k5dH7Crr1+/1mg00sHBQZSdpz4ILKYXg0MWIxOurq6igCCxIP1+X9VqVf1+\nX+Vyee0cH68l49eFQYRZcQAHI8O65VTp4+Pj6AcpzsTLHR0d6fr6euOa2wo4cWbALWRJHwCTLPbB\nFbLTjJ5RwwZJGQAHAmm8iLtiUjeBMwNpbItfA2Xgv3Ol72NAcJQzJ/6cAJAUfEirA57obxpM68+N\nEseqRXk6IOTaXkCM/noKn7uN/He8lz5nyibx/fR5s+aZ7/m8YKGgHF0QfgzghAP4cK84W4JVKK3O\nrECY4uJxVowsGQIEPf0WAUVWEAF6i8UiWA9Ja/e/ublRqVRaYzzI5EHxelYPcRdv374NVxCR/Chn\nr34qPR98+Ic//EGFQkHff/+97u7u1G63w8J6fHzU8fFxpNyizKi4ijXtacOdTidSJXHpQcGfnJyE\nP75YLOry8jLie7gmbq5erxeWLe6mYrEYfcFaB0y12+1gL/Grt9ttffLJJ/EsuVxOx8fHkp73xr//\n+7/r/Px8jRFjHlAkJycnAXZms5n+z//5P9rZ2VG/31cu91wS/Xe/+52Gw6G+++47lUol/fa3v/1l\nFvCGhiyBoveyALhy2ZMEyuKuIw3XwbKf3IzCZf0SQ/H4+BhxPShygCfxSpzwS00RScGAPDw8BPAh\nrklaxUIAvKlSzN7EpdTv99VsNiN+YzKZRDGxcrkc1xyPx2q1WgHW5/PnQnC3t7dhfIzHYz09PUUK\nOewDWTycrQTr9vDwoM8++0zfffedarVaGBMpkEHmjkajuAbjQNwIGTXHx8fBXj0+PqrdbqvVaqnd\nbqter6vX6wUAy+efT+qm0ByB7GQuSc8kA/Ep6DBcRLjGOp1OABp0C0zv9fW1Wq3WxjW3tYBYaaUU\nWQy8RkE58EibB2O6i4j//Tog9JRtcSYCpYqC5n3/nMWIsqehnL1YlIMgSWufp0yI99vZF5Svu3/8\nu84s0CevnAiwQUg6COS5UpDg93A3C699Q3g/nd1wYOMti0lJmTKfo/Q56QuWhYM67/s2mxcNKxSe\nT/2EhaCeAIGqOzs7QQ/DMmDJSSsQR0YN1gen1+Jnr1QqYT2NRiNJq7gurBssc9wXgChX3jAmrNX7\n+3udnZ3p/Pw8zn85ODhQq9WKYmOklxLV//j4+H/bu5ue1LUoDMDvFTgqWFMkCEIwAXVi/P9Dh/4I\nYqiJaEqjhWoVP1J7BifvctN7z/S6B+8zMTEqUNu9115rf+Dq6srKFWVZYjQabax+mM1m1kkxc1KW\npS2p5cTEVqtlO4hGUWSTFFlX5zVI09TmCARBgDzP7fnmCJAd1tfX10aa/uTkBNPpFLVazZZis2yR\n5zm63S6GwyHiOLZM0/HxMZIkAQDEcYzVaoU8z1Gv13FxcWGBCBtvNso8ZPHm5gZ5nuPw8BDNZhOT\nyQQAMB6PrTFfLpdI0xSDwQDb29v/2uPl/8bMFu9rlm3YTjCY5QRPPgPsiHivMhhlW8XSAveX4fPM\nElG9XsfDwwPOzs4se+Q+829vb9ah8vX5/2WwzeDFfYbSNMXe3h7CMMR8Psf+/j6SJLFdVtmOMRAr\nisLue95HRVHYEngeHMh7lJu1MUhmWxzHMXZ2duz1mVXjnBVmjhgcu2VeTrTn5+Z9zdVILy8vNieH\ng471em2bnTEw6HQ6GweSdrtdCzr4laWp5XKJwWBgc6QYJHFwxLYoTVPMZjOcn5/j+fnZ2nVm/5gt\nzfPcdqvlBnJ/86OnSXHU687cdoMNtyNzsyBu4OGO8quBDP++W4JxV5fw9/l9t9ThcrMQ7ByrnTw7\nSzewYWdZ7XyrWYIqN+gCNiebVssg7nt0rx8/t3sNq+UaVzWoqAZs7s9Ur4v7XqrZE14XBojV12dq\n1v271WyTG6Qx6OJruV99CE6YIfj4+LDDvjhSAb63Pg/D0HYK5ZJDjhIB2Kiv2Wyi0+nYrq08iTQI\nAmRZhjAMbT4IJ1YyK8Dab1EU9j5YvgG+Z9qz7MFOhUE4/2+slXMZJScsukuD2aE3Gg30+31kWWaT\ndFmmKYoCcRyj3W7bKO3u7s5q54vFAo3Gn8PMtra2EEURgO8SJ1dAMbOSZRmSJMFyucTR0RHKskQc\nxzZ/jW0FU+gsD7y/v2OxWNiW5aPRCFEU4devPycEJ0mCwWCAy8tLALBSQL1eR7vdxnQ6xWq1Qq/X\nw+npqa3E6ff7uL6+xnA4tH0mmPpvtVq4vb3FZDJBu90GANuwi/uZcE5LrVazn+H15qFvP4VbnvOz\ncGUR2wTOdeA2/rwX+Bn5DLME4GYBWBbic+IGLNyXZz6f22ohzm1ie8jyEjtkZlYY1PF7LK8wU8Hs\nDOdBuFkaljFZVuPnYhmZmbMgCGxSLU8qZtnIDeA4x6LX61l5lgNR7jDLVUYciLJcyu9z1Q8DRban\nHARwPhcACwhZvmIWpt/v2yq0x8dHHBwc4P7+Hru7u7ZUmXufrNdrdDodO/n46enJBjd85jnp+PPz\nE+Px2PaKeX193TgLrNFoWNvIoNTdb+m//PO3DlJERETkJ/z8YSQiIiIiDgUnIiIi4hUFJyIiIuIV\nBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUnIiIi4hUF\nJyIiIuIVBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUnIiIi4hUFJyIiIuIVBSciIiLiFQUn\nIiIi4pXfPRZNtgyLF3IAAAAASUVORK5CYII=\n", + "text": [ + "" + ] + } + ], + "prompt_number": 5 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With net surgery, parameters can be transplanted across nets, regularized by custom per-parameter operations, and transformed according to your schemes." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Casting a Classifier into a Fully Convolutional Network\n", + "\n", + "Let's take the standard Caffe Reference ImageNet model \"CaffeNet\" and transform it into a fully convolutional net for efficient, dense inference on large inputs. This model generates a classification map that covers a given input size instead of a single classification. In particular a 8 $\\times$ 8 classification map on a 451 $\\times$ 451 input gives 64x the output in only 3x the time. The computation exploits a natural efficiency of convolutional network (convnet) structure by amortizing the computation of overlapping receptive fields.\n", + "\n", + "To do so we translate the `InnerProduct` matrix multiplication layers of CaffeNet into `Convolutional` layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 \\times 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding." + ] + }, { "cell_type": "code", "collapsed": false, @@ -37,101 +243,17 @@ "output_type": "stream", "stream": "stdout", "text": [ - "1,2c1\r\n", - "< # This file is for the net_surgery.ipynb example notebook.\r\n", - "< name: \"CaffeNetConv\"\r\n", - "---\r\n", - "> name: \"CaffeNet\"\r\n", - "4c3\r\n", - "< input_dim: 1\r\n", - "---\r\n", - "> input_dim: 10\r\n", - "6,7c5,6\r\n", - "< input_dim: 451\r\n", - "< input_dim: 451\r\n", - "---\r\n", - "> input_dim: 227\r\n", - "> input_dim: 227\r\n", - "152,153c151,152\r\n", - "< name: \"fc6-conv\"\r\n", - "< type: CONVOLUTION\r\n", - "---\r\n", - "> name: \"fc6\"\r\n", - "> type: INNER_PRODUCT\r\n", - "155,156c154,155\r\n", - "< top: \"fc6-conv\"\r\n", - "< convolution_param {\r\n", - "---\r\n", - "> top: \"fc6\"\r\n", - "> inner_product_param {\r\n", - "158d156\r\n", - "< kernel_size: 6\r\n", - "164,165c162,163\r\n", - "< bottom: \"fc6-conv\"\r\n", - "< top: \"fc6-conv\"\r\n", - "---\r\n", - "> bottom: \"fc6\"\r\n", - "> top: \"fc6\"\r\n", - "170,171c168,169\r\n", - "< bottom: \"fc6-conv\"\r\n", - "< top: \"fc6-conv\"\r\n", - "---\r\n", - "> bottom: \"fc6\"\r\n", - "> top: \"fc6\"\r\n", - "177,181c175,179\r\n", - "< name: \"fc7-conv\"\r\n", - "< type: CONVOLUTION\r\n", - "< bottom: \"fc6-conv\"\r\n", - "< top: \"fc7-conv\"\r\n", - "< convolution_param {\r\n", - "---\r\n", - "> name: \"fc7\"\r\n", - "> type: INNER_PRODUCT\r\n", - "> bottom: \"fc6\"\r\n", - "> top: \"fc7\"\r\n", - "> inner_product_param {\r\n", - "183d180\r\n", - "< kernel_size: 1\r\n", - "189,190c186,187\r\n", - "< bottom: \"fc7-conv\"\r\n", - "< top: \"fc7-conv\"\r\n", - "---\r\n", - "> bottom: \"fc7\"\r\n", - "> top: \"fc7\"\r\n", - "195,196c192,193\r\n", - "< bottom: \"fc7-conv\"\r\n", - "< top: \"fc7-conv\"\r\n", - "---\r\n", - "> bottom: \"fc7\"\r\n", - "> top: \"fc7\"\r\n", - "202,206c199,203\r\n", - "< name: \"fc8-conv\"\r\n", - "< type: CONVOLUTION\r\n", - "< bottom: \"fc7-conv\"\r\n", - "< top: \"fc8-conv\"\r\n", - "< convolution_param {\r\n", - "---\r\n", - "> name: \"fc8\"\r\n", - "> type: INNER_PRODUCT\r\n", - "> bottom: \"fc7\"\r\n", - "> top: \"fc8\"\r\n", - "> inner_product_param {\r\n", - "208d204\r\n", - "< kernel_size: 1\r\n", - "214c210\r\n", - "< bottom: \"fc8-conv\"\r\n", - "---\r\n", - "> bottom: \"fc8\"\r\n" + "diff: imagenet/bvlc_caffenet_full_conv.prototxt: No such file or directory\r\n" ] } ], - "prompt_number": 1 + "prompt_number": 6 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The only differences needed in the architecture are to change the fully-connected classifier inner product layers into convolutional layers with the right filter size -- 6 x 6, since the reference model classifiers take the 36 elements of `pool5` as input -- and stride 1 for dense classification. Note that the layers are renamed so that Caffe does not try to blindly load the old parameters when it maps layer names to the pretrained model." + "The only differences needed in the architecture are to change the fully connected classifier inner product layers into convolutional layers with the right filter size -- 6 x 6, since the reference model classifiers take the 36 elements of `pool5` as input -- and stride 1 for dense classification. Note that the layers are renamed so that Caffe does not try to blindly load the old parameters when it maps layer names to the pretrained model." ] }, { @@ -145,7 +267,7 @@ "\n", "import caffe\n", "\n", - "# Load the original network and extract the fully-connected layers' parameters.\n", + "# Load the original network and extract the fully connected layers' parameters.\n", "net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt', \n", " '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', \n", " caffe.TEST)\n", @@ -163,27 +285,27 @@ "output_type": "stream", "stream": "stdout", "text": [ - "fc6 weights are (1, 1, 4096, 9216) dimensional and biases are (1, 1, 1, 4096) dimensional\n", - "fc7 weights are (1, 1, 4096, 4096) dimensional and biases are (1, 1, 1, 4096) dimensional\n", - "fc8 weights are (1, 1, 1000, 4096) dimensional and biases are (1, 1, 1, 1000) dimensional\n" + "fc6 weights are (4096, 9216) dimensional and biases are (4096,) dimensional\n", + "fc7 weights are (4096, 4096) dimensional and biases are (4096,) dimensional\n", + "fc8 weights are (1000, 4096) dimensional and biases are (1000,) dimensional\n" ] } ], - "prompt_number": 2 + "prompt_number": 7 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Consider the shapes of the inner product parameters. For weights and biases the zeroth and first dimensions are both 1. The second and third weight dimensions are the output and input sizes while the last bias dimension is the output size." + "Consider the shapes of the inner product parameters. The weight dimensions are the output and input sizes while the bias dimension is the output size." ] }, { "cell_type": "code", "collapsed": false, "input": [ - "# Load the fully-convolutional network to transplant the parameters.\n", - "net_full_conv = caffe.Net('imagenet/bvlc_caffenet_full_conv.prototxt', \n", + "# Load the fully convolutional network to transplant the parameters.\n", + "net_full_conv = caffe.Net('net_surgery/bvlc_caffenet_full_conv.prototxt', \n", " '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n", " caffe.TEST)\n", "params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']\n", @@ -200,21 +322,23 @@ "output_type": "stream", "stream": "stdout", "text": [ - "fc6-conv weights are (4096, 256, 6, 6) dimensional and biases are (1, 1, 1, 4096) dimensional\n", - "fc7-conv weights are (4096, 4096, 1, 1) dimensional and biases are (1, 1, 1, 4096) dimensional\n", - "fc8-conv weights are (1000, 4096, 1, 1) dimensional and biases are (1, 1, 1, 1000) dimensional\n" + "fc6-conv weights are (4096, 256, 6, 6) dimensional and biases are (4096,) dimensional\n", + "fc7-conv weights are (4096, 4096, 1, 1) dimensional and biases are (4096,) dimensional\n", + "fc8-conv weights are (1000, 4096, 1, 1) dimensional and biases are (1000,) dimensional\n" ] } ], - "prompt_number": 3 + "prompt_number": 8 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The convolution weights are arranged in output $\\times$ input $\\times$ height $\\times$ width dimensions. To map the inner product weights to convolution filters, we need to roll the flat inner product vectors into channel $\\times$ height $\\times$ width filter matrices.\n", + "The convolution weights are arranged in output $\\times$ input $\\times$ height $\\times$ width dimensions. To map the inner product weights to convolution filters, we could roll the flat inner product vectors into channel $\\times$ height $\\times$ width filter matrices, but actually these are identical in memory (as row major arrays) so we can assign them directly.\n", + "\n", + "The biases are identical to those of the inner product.\n", "\n", - "The biases are identical to those of the inner product -- let's transplant these first since no reshaping is needed." + "Let's transplant!" ] }, { @@ -222,33 +346,13 @@ "collapsed": false, "input": [ "for pr, pr_conv in zip(params, params_full_conv):\n", + " conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays\n", " conv_params[pr_conv][1][...] = fc_params[pr][1]" ], "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 4 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The output channels have the leading dimension of both the inner product and convolution weights, so the parameters are translated by reshaping the flat input dimensional parameter vector from the inner product into the channel $\\times$ height $\\times$ width filter shape." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "for pr, pr_conv in zip(params, params_full_conv):\n", - " out, in_, h, w = conv_params[pr_conv][0].shape\n", - " W = fc_params[pr][0].reshape((out, in_, h, w))\n", - " conv_params[pr_conv][0][...] = W" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 5 + "prompt_number": 9 }, { "cell_type": "markdown", @@ -261,18 +365,18 @@ "cell_type": "code", "collapsed": false, "input": [ - "net_full_conv.save('imagenet/bvlc_caffenet_full_conv.caffemodel')" + "net_full_conv.save('net_surgery/bvlc_caffenet_full_conv.caffemodel')" ], "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 6 + "prompt_number": 10 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To conclude, let's make a classification map from the example cat image and visualize the confidence as a probability heatmap. This gives an 8-by-8 prediction on overlapping regions of the 451 $\\times$ 451 input." + "To conclude, let's make a classification map from the example cat image and visualize the confidence of \"tiger cat\" as a probability heatmap. This gives an 8-by-8 prediction on overlapping regions of the 451 $\\times$ 451 input." ] }, { @@ -297,7 +401,7 @@ "plt.subplot(1, 2, 1)\n", "plt.imshow(transformer.deprocess('data', net_full_conv.blobs['data'].data[0]))\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(out['prob'][0].max(axis=0))" + "plt.imshow(out['prob'][0,281])" ], "language": "python", "metadata": {}, @@ -307,33 +411,33 @@ "stream": "stdout", "text": [ "[[282 282 281 281 281 281 277 282]\n", - " [281 283 281 281 281 281 281 282]\n", - " [283 283 283 283 283 283 281 282]\n", + " [281 283 283 281 281 281 281 282]\n", + " [283 283 283 283 283 283 287 282]\n", " [283 283 283 281 283 283 283 259]\n", " [283 283 283 283 283 283 283 259]\n", " [283 283 283 283 283 283 259 259]\n", " [283 283 283 283 259 259 259 277]\n", - " [335 335 283 283 263 263 263 277]]\n" + " [335 335 283 259 263 263 263 277]]\n" ] }, { "metadata": {}, "output_type": "pyout", - "prompt_number": 7, + "prompt_number": 11, "text": [ - "" + "" ] }, { "metadata": {}, "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAAXMAAAC5CAYAAADavt/0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvVusLVt63/X7vjFGVc3Luu3bubbdbcdOupMYbIidODE+\nIjdbIJnEyImDxHsQL0gIJAhgQBYOT0ggRSjcZKSQvEQRoAQeiEniRJhEYAvbiS9Ju7vdp885e++1\n12VeqmpcPh5GzbXmWmefPrv7nM1OzPqkWjXqsmrWnLPmf/zH/7sMuLM7u7M7u7M7u7M7u7M7u7M7\nu7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7u7M7+8fefgj4B8CvAf/2K76X\nO7uzO7uzO/smzAG/DnwWCMDPA59/lTd0Z3d2Z3f2W930JVzze6lg/htABP4i8CMv4XXu7M7u7M7u\nbLKXAeZvAV/Z2/7Nad+d3dmd3dmdvSR7GWBuL+Gad3Znd3Znd/Z1zL+Ea34V+Mze9meo7PzKtA1W\nhvgSXvrO7gxmJ4Htsyiv4rUf/ODvsCd/4x+8ipe+s/9f2PcDf+e5z/bLeOA98CvAHwTeBf5P4MeB\nv793jr3xe78HRevYQEE80CkWQIKgohPFF0QTbck0IsTtmtxHvIBKxjRjGJghsQCGCTjnUFXMjJwj\n42BsN5mUV0A9tv5a5PDtGa4VlvM5i0VgOT9m3s4J3jOOA0+ePeODx08pxehmQpgprlNMEi541BuW\noe8TORlIRoBcCqUIpWQODjvmR4KNEJ85Ls4i23UhjYaoMjzreevzS3wXEFVSclhRSlHEwMwwyzgR\nSim4JiDOUK9YUygkJBTimEnFKAPkFbApIMLxG8c8+swx3/Fdb/PgrTc4fLDEB89qvebx+1/jS198\nlw++9oQv/sx7LD6zYPWkx9YeSkZFCBRUDR8KTefxDrzUexxjImWlPe7wJ4EHry1o5wV19f6HTeaD\n39wwPDO2zyLDusesoMEhviAevDpOvvUB3/adb/NrP/cP+d1/+LczDJHT957x/pcfV8ahQtHdZ6EY\nmbYT5i7jGk9ocn30TPjrf/5XX9az/SJmP2r//dc94Zd/4i/zHf/unyDFQIqBmPxVO6VAjNfbcdp3\n3fZ1vTv+F38S/yf+DC4kQog4P61DwoeIDwnvp/Vun987trf9j37yL/CF//CPE4h40o3l9r6P2/6r\nP/F/8SM/8bvJOAp6Y/lG9/2tn/jf+b5/748wppZxbKZ1y5gaxtgwxvaF1kNs6f/ST2H/4n8EW6Dn\nev1R7a93fPUTUH6CKkZ80uWngH/rBR6vB/ARz/bLYOYJ+NeB/5Ua2fJfcxPIAXCiYPVtqICJYWaI\nuvqDFTAMp4oATgUvsDyak9rIenWJA0oBFSWWVMHfwEwogKqhouAc3hfaWQ+xxZLhVBA1nPeIGjFG\nxgG2ssWbkL0nxjp6ODiYM8aBg4MZvoPsEklKvUdnlFLwXrGcIQtFFDMFG1ksHEePtpjzFGlYUdhs\nMmlQRAXvlUEAhJINNRDx5JIQHCL1TYko2Yxm0WE6PQCu1GsEwxqjnYPGQm4KqkoMimw8Tx+fEu45\nohfc3OEWDWKCcw4chANotobrjOPPdJgKl1/cVtD24KwQvBKcx0tBDZwTUi54L0hQUixIKVgslKKo\nClbq8ab1RDeirlB7J0BAvBC8Y9HOOFgc0nYzmtBweHjEZrNhO1uzPFyQS6aUjCOTChQrlFwYBqN0\nSpsU84pzjm7xEp7oV2KfZl8kX3fzzn5r2MsAc4C/Ni0faRocVgxk99veMVpDVDCrQJxSYu4dwRmt\nV0rpca7QzQNxGJECKSaEytoQgQIlG4mCOjAc7UI4OjwkZWFzOVCSsX6SCI3UvtGMfozkvEbVMfeK\ntsJRM+dYl6gKhcRoI31ZVQbsYgUlcUCB6LEcAcP5graB2aNEd1+RXriMMKQRy4YqiDNcAypCouBw\ngCGAqiebVHaMAnlixUICzKAUjys94hVcJLuEC4p5AVWkgSSFmQ8cHC/IJRGHkfWzNbEY62enrM5X\nxHhGOy+4zmjvF2Y5YmtHPC8EB86U4B1qI5YV8YIZeIwhQtFMmHk8Qm+RVjtyKhhCiYWgHudHVECl\nUETqZxAF13rcskEcuOBw3tEGT+lausWMdrkmbgtmSilAyqRcEFEMQXJ9n845mlYIIbykR/r/a/s0\nXU/2dTfv7LeGvSww/3gTKCqTB3Z6ukQwASmCqZDNoGRiyhVcKWSglEyKkZIT3jn8rKHf9hWUi1SW\nTyElpRElNIX5QcPBg47lgafvC6dPRtIg+M4REzgDK0YsiVQyiNC2LbN5g3MO5zwpjTzrnzKOBlkx\nEuomuSALzhVMFVNhcTynaYy3HyTciePZ+RnH4zGP87Pa8Qj4oDhnLO41iAiWAVGyA5EAMaGqUAoh\n6NTJRUQbSskIBaKQNwkakJki84wTUA+jA81CuwzIXBli4fxsxfmzFc9O12wuH2O6pfiEC4mHv3NG\nWBSaY7A3DHJGR4cUMDIi9TMSUWLMuOIxM0oG0ZbF8ZzFMuC9YDkTR+i3I+t1JMYRo468KIYI5JzJ\n/UgpM5rO44LwLb/rM2jbEMxogmM+79jkRClCTkJKCQxSSpQCIlpHNG7OfNHSNP/4g/nDdz7FtIvP\nv/OpXer+O7/zU7vWb3/ntU/tWp9553Of2rXk8z/46fVl+g6UT+tiv/8TX+GVgbmKwywjIiQExMgl\ngwV0khxc1TEoeaQXCF2GXEgpkVPGSgYnNE1DNzvi4mxFTAkQigmI0seRsGxol8bhgXJ8f46J0s62\nhBC4eNaTLgdEasdixer1c0+hQ7WrbNdGslYmXooxWE+SDYE5rXicN/AOnNHMAsu5sjz2/LPf+d08\nGz7gXnOff3D6DE0topnGKd5D8I7m2ChIZeMaqvxhICZYNASDSZbCBVQhjj02FGIGawztDC+GNuBn\nghRQlKKFbhkQTfT9ig+eZFZPNqw+OCXrQJiN0K3JM+P4OzxmmfmRYVFJ54qegxYjx4xJS7FSR0AJ\nhmhkc+QWtA10bcPhbIE0mXEciXkgtA5vA32hfsepUIpRhyaGFKNPCalvmTe+8yEpj2Qiow2IRMwi\nWIFcIGfiOBCHQomKZKHtGkJQ2tDg3csI0LphPwT8Z1QJ8b8C/uw3eoGH73ye+LH+/xfUQr7wDlXZ\n/Aau9RGXrmD+cdd6Matg/unA5re88zlS/lQuhX7+Bynjp3Mt9J1P6UIAf4BP+nm9OmZu4NSRS0YQ\nBCqwZyOrIWZgBbOMFWOIAy4JYglLkZwSYgVBES3M5y2z2Qmn52vW656SFCioKNvtyH13RDMLzA5a\nZgcL2sWS7fCYMUa6xRLLVkG6H+mHiO8ha6FpFQsBVSPmyDau2YxbMgOuKzifEGnAC9o6TDOuc8wW\nDd/7ue+glSX/xh/61/i5/+dnWP3G/8IH+pTUFoqA9w5RcN7IJSDe0bYtOKEkQ4uQKQiClYy6AMVI\nKcPoUau6fU6ZPBSkrw7RMGnmOs+ErkM7MMlcXvRs1hc8e3cF20RoBpaPHBIEVxJNOCDMFXEj42C0\nJ0bfJ2TlyaZoSTjnSMkoWUhFSWQODo/p2o6SCzFGgldUFSdCdp6wWND1jrGsqt5thk9C9oY2injo\n08B23OKGgveelCJ5+gWLamXxKWOAD56cI2qCWSKEBW3boqKIvFQwd8B/AfwhatTW3wX+R57jE/rk\ndiez3Nk3Zq8MzAvx6pkSVQzFcnVMWi5QqpNLxDCDjHJ5tkWtZ9yugMxs5mm7iHctokrTdrzWdpxf\nrtluei77kZIzFgubTUKc4jvhaHHC0aKlXwlOntGGlu0wsl4NgHK5PacfM+qF8+GcRWgwZ2y2Pet+\nyxAHtFWEFhGPOEEIWDG8Nngc86Zh2Bo/+nt/jOai8Ed/z7/CuL7H+Zf+O74oA6u4xmkFalwdjYTO\nMVt6ci5ElDRWuYdc/YY5G0WMkg1TMFFKyhAFS44YEs1cwTLeC2qGkvGNZ8hb+lXh9IM1q/d6vDoW\nB0bJGTHj3uJ1Ht17hJ87HvMu64un+M7jVEmSsKyYM+rNKKUUcjJiylyeb1iczMnFk8h0zjMWo6jH\nJNMEYdOBOIdTj1ehOOhmDd2iIbQTAJsRx5Gc68hrTAMmBfUKoyBSRxySwDsHKNlGfHAslnPmjXvZ\nj+1+djNcZze/BDC/szv7xuyVgblYoXoPHWJGRlGBnApIoaSMKwYuo1SmXijEVJ1hsU9YjLShgQNw\nzgOC08J80eKc0rSO7aZnGCOnT1fcO53x4KFHtaNp5rzxxpu4tiPnzHwckdNzNustKUXOhp4+r9CU\nmBfQ1pF6IwEaAiEUXKeoaNWIAZxDxZNL5v78AQfumHd/6Yu8/+V/iFnhe77/D/LGn/4z/Cd/7s/y\nxbN+irhRsqtO3+XhnHbWkMYEZvQ2hVyKkGMmFwBDvMOcVLmC+jmWVLAsWHEUM0BxaqhTVArZlH4Y\n6S+2aBKSJVLnGTaZg3tLHh6/xVuvfQvNvKXxcy7PM6fLc/K8wEqnyCBjdtDhloEUM/lrI2b1vcc0\nchACgpJzwahRPjHGGvViimsc4cCDgFOlWQZcK/hG0WAMcYP1nhACKWVSzqSUKSkBSvANJUdizNjk\nRJ0tlsxmMw4WB3TBkeOnIxN8hD0vu/n7Xs5L3UWz3Nk3Zq9OZkkRcw5VMLRKCjIxUTPUMh7wKqhW\nh5loIIkxSqKkTBlGLi82zBYtbRsJruqwVjJYAgo+NBhKzCNP3tvw9tuOsYfglZN7JyRnrNbnzKVj\ntd6gCmNJ5HHEdcayE8IsIy4j6pGhxVvBNR2hBXV1JFGESROOtE3HdnXB93/hR/jKL/wyLsw4Wh7w\ni3/zr/PP/fif5nv/qe/hg5//WQaLoKBJUC/M5oqGjBNHPybMpDJyq/KCK9VpXB1/gAkinmQjZXKY\n5jHjkpJLnmQcI+aROFbgzluHlYFSlNwr4xBprOHegzd4+OhN1AspJ+4fn5F7QVYrLrcbdOMJTpg/\naFjcOyAjzO4VTk+3SPQUjTUiKK0ZtjLF+bsaoSMFnOEaoRWHOI94oe080gjiQnVqp4INkZwLKUa8\nKqgQzTASOTtKcZWVJyNLoekCR4s5jQt0bUsKLzUZ7YUEil/8D/7KVfvBD36BB+984UPnZPPV34BS\nzNWRKVJjmaT6kFBD1BAtqMu4orXzLMIUuwsYzuUaK+5qnLnzCe8SXjNOM85Na82olGkxRKZXFJtC\nYG+aTHeklBvt/cVRQ0b327tY8901bsePK6W27XqfI19d8frcPMWbK2qlvmeY1ns3LFKj4kQwBbQG\nUJgTrExBEVPIMgZSCnipaQme57Y/tO2oUWJuajvATdsvYt9smk/5WbC/fb39dZ7AVwfmpbKtIoZQ\nKJpRU5IlnBrBG0EyPhScVwRBW8hJ8aPDmbLNyvY8cioXYMr8IOG9RwqQEhYTKVZNWXxmvdrw+P1z\nXnv0JmWEg8WCeFQY4xacxyQSLSFSuPdggZ+NHN4Dv/BkM+LgKVrjwH0bmHUtzjuwxKBxisTxOIu8\nNnud/v2n+KZl1gVSyRwe3+OX/qef5t/80/8xf+/f+WHeHwpRCmkozJqWphFElOQcMa5r+CGVtQs1\nusdEoViVPKoCM3WABYtQtp7YF0Kb67drhmQjp6E6Lr1gfXWojjkSeiH4JceH93n99Tfo45YxjiyX\nB/SHAwffcsTXto85/UdnzJdzZgcNYeY4PDzk4GFifs+xuRgwa0gy4KaQ0JhBrT7pViLmDN86pBGc\nF5wP+MZjFMTV3IKUEyXWSJU4DpQhIimBZkIjxDwiqqgTvNb3Npu33Lt3xMHBHOcUVz618ILn2cdm\nNwP8tn//x29sp+f8AFOpYF5sD7xEr3FqwnRVQ7VgO0DfDcYMxGoXoD7hfa4g7hPOVTC/WmvCuYxq\nqYuU2kFIuQJ0rrqS5y+3QXwH3ru1J31oDVDqHU7gLc+5Sl3MbgJ4mbqQ3f9HK4jVPq7616bO5noD\nVBCdYgVqlO/0We13AlRfUxLIgiWwLJDkCryv11Pb1bU4uQZ0LxOgP+cpkQ81nnMMrpFZPvJ0+IFp\nmWz7nz7vJOBVauYxgdMqEugUG66Gk0KjhbYxfFNoGiUoqE69ag70fUODoCr0l5H1KmKcs101NRa7\ndZRipGSksQK09wEj8d6TU9784Awnc0Qv0VbREFitzhnHLSKR114/wrnC8sTTLBODbWtGaKxJQrg6\n5FfnaNsOr54urInbEdFCk4XjRUd/eUnjW1TAiUGOyPyA3/z5v8qf+qN/ij/3Mz+NDIlcBtq5oprB\nHCll8jiBUg2Up5jRzlvefusNnp4+5dmzZxgOLCNapSopDoqiMWHZ1c4gFazUZCQTY3kciD6wPevJ\nvWEWaNsZ8+WSguFDAIGuaTlYHhGJ3H/zkHKesWFL8Yd0y47usMWFOdo2+MWGtInkAhJ6WudJuVSn\ncjC64wOavjDOCiXnClQIpdT4fAPGVcTEaOYeAVI/MA4DkuooTIXKVJuEoOCMtvHcv3/E8mBJ21XH\nMcNLlVn+HvAd1PLO7wJ/gprdfMNS+fjwyGzuarkCtCtmzgRKEytXwVyeoruovwPsmmVPzNz5jJtA\n3Lm8t86o7i97QD4h5G0cuQbyD7PyHYjvs/H9ZZ+Z2wTIxfZAHbm1Xd/7PlO//X/OSn3GuYWVu45P\n7SqbvKZr1FFtxcv6H2ZT6PMOzNMOsAULgqVp8TvQntj9DWA3bGLk4qjt/fu50bAPA/Rzh0DGRyD5\nN2SvkJkXzFJNHHKOIg7RjPoqsThnzDroWsG7+oA7CTXZxAWclzpUd57V5Ui/qT/ys7MeVGjbXRed\nMRVoR8wKm4tL/v4Xf42jwwO2sbA4XJDHnvVqTc5CaOHo3pwuFPw8U9yWHOsQTp3iFDKZTELoUPUg\nwrzrYFYzHA984NsffSvx17e0xy0lDbhmRhFh6HvKB2t++J0/zk//jZ9mCEIjnmXXVv08wjgkSjKs\ngFiNL3fe8fC1+5zcO+D1Ryd86atf5ctfeRewSbe2CnKjkbaK6xqcS+AL2IiIY37Y8vCN+5Rxxge/\n+ZR3v3aGWyr37t9HUMYUyTlydn4GNrBczNgWR/P6Q4bHhcv3tqRVxr0eaGct4j0PFi3NecPF+bqG\nSxaPiEBRRGHmF9WJ7TtOfAVcJ1VXvzi/5OJiO0liwtCPJKmgFfuIpvoDNiu1bEOofZtIxgscHhyy\nPG5oZp7QtSBG6l/qU/tC2c2xfPzPqhR3zcztGsBqrkWVWWSSWdRNEoNLE6hWiUml4CYwdxMzd1Nb\n97d1f9kB+W7ZSSw7lniTqe/B6h5X/vogvlt2Ektl3VN3sN9Grhj57lVunnvdhTirjvorZo6hcHX/\nItSyIFMnWAHd9mSJqTiI2BUzr4tiWShJMK/XoO6VEgSLE5A7rX4qL1V6cZMMk7mFw5NEtrd5k3nv\nsXHh1nmfzF6dAzRDkfpVGb4+QNPQ2YVMECEEI4SMOsFpPUed4J0Hb4ivGYpFBuaLExofGGLh8ftn\nrE7HaVjpkMZwTQsqpGFgvVmzSWuCeoZxxebynPVmhYoyWzbMF4Guy2gD26SIOXK0ynC9kMbCMES8\ni4ShpW0LzgvdrEVl5K2TexzFh1jzPpSqazfzJePqHOccz/oNh3//7/KHPv8D/KVf+Bk676uzUoQB\nY4gJDaCmlVmbo1k4HpwcMm8DB8sZsTzi/fce16gbSZVFYBQ8OtZIIKjDdBBmBA6PHvLGwzdYhCO+\n5fU3+PUvf5GwNE4ePkJdYdz0XFxe8v7X3ufZ5SkHi2PUCVGE45MDLp+esnq64vLRAf5wTnMIs6Zh\nduiJ2RPPyvTsCk0j+NwgBNS1BOfAwsQEBe8L9+8f07QNz85WpBwpyWq255gYtiOuZGYdLJczZvNm\nCmFMWI6EoNw/useDB/fp5i3ee4xMLi+VmcMLZDe/CDMvtgM2dyUl7KATmBCrSizVYQL7UoiKUqRQ\nJF+B+TWoT21XUE04V6pePgG5akbFbunm1y979fI3AP2j5JY8VVH5MKgLNjHrm+C8Y+HXAH4T3J+3\nv4J5vbMPM3MBrakIO2ZuttOz5eozNSZmjlGSYkmu10GxqBQ/AbkviFOK1ytWXnwldMXZpJtPr7F/\nQzsgvwHizwHwfTZ+BfKfDNFfbZw5kFOq6YoFcNXZV3L9MgTDJNd0eclgkEWwAK34Gv1iynz5kNls\nyXw+p3Ez7t17yq//2hfZnCcsC06NPCbEK9J4ZrOGbXpKe3hMXzaMOZKGEStKCIp2UxieKNuYiEUZ\nk8OZIMUTrDD2kXVa46UhuBZaR84jzaJl9eyCPFwiOHT6AjcXFzTdgmHcMpPM4/ff5cf/4L/K//x3\n/zfCoqtgZIo4qRpoywTOBsXY9iNt1zGbtzRdw8NHhzx644ivfuUDrHgUKFNihZWCxCozhcYjAm3j\nOHlwwFtvvcXxyT0uPrggHCqjRDofKCnx7INTPnj/a5w9fZ9nq8eksTAPc5LCoJlsSu6FD750zuxo\nSTtvQcG5htl8ZLv1DJtEzhmVjGjA2yQTqFZNe4p+yRmMwmIxI0vhybNnNXJlzPSbgdQXmuCYBY93\nc+6fPOTg4IihrMhpjTpl3nXM5h1+5hCvpO2GPm1f2SO9sxcC8zIBl10Dm12x8kkGVpuCAWrHvIPC\nMo3DTJQigrqJnbuM+lLXk0Z+JbFcAXqepJuJmVPZ+ZV+w77E8jwQvym3uKk72gfyfafobfepIVPt\npB1YT/tNboC5XbH2er6zcoV5Yrv7vMZM2WnmxjSKuf6sd0qLTaAv1KS4EpQyrS1N20koXhGvmDfE\nG8UpxYM4rSTS2Q1n6I0ekP2bug3ixs1ec297//+/SXuFYF6TR8SUYkPNgDRHHgrZKaVLtR2qtz6X\njOLYFdMK4mi6huQSzs85PrzH4cEh3rWEriWmwrtferdGqASPZSGPNcri+HjB1gaWsmEYB5giKcZx\nRL2rei+JnDYVWMRhWVFzlS1HITjHdkisLrc0GghNRsWz3RY+49+icYaft+RcEFXGYSThODlastkO\nACzjyO98+G18lTM0GM4pIxltM96MnD0ksKyUGHlycc6br5/QzB3jpvDg0RGDjcQxk3Mk9tXxmLIn\npkjA1QqFojXeG8+9z9zncLEgNA1RI/PG8brr2Jxu2PQb1qfPOHv6mMvhjM22597xG9ig9CVSxJGG\nSP9k5PzJlm7pyH5G0wKhxo4nMlhBaDAxstbqliod/ZBwsutgDDfpnPN5yz2OOT07YztEygAaIZgj\nnMy4d/SIWXuEdw3L5SFNC0O5xKsxO2hoQy1MNsaRPH5a6X3fvMUXAPMaWSHXgGY1qusKobRKCmgN\nPXVXAF5QqdEa5LpWLRXQfb4C9rrea++Y+Q2Zxa6AfJ+/wm1Av5ZXburmOzaePiSzBCI78W8HzDsg\nv17rre2P3p9KRq80c7si5DLpLqLTrdtNb/MNB+lOiqFQgqPkHYC7a2D3Sgl1JF68XUW2VKcnFCfI\nBOhXoH71QvZhIH/e/ucNg/bX36S9MjA3TVAqz6CAiKOkWv9jHGDohdAZ2hScqw7QaPWx3r3r4AXB\n0wbl8HDOw0cP8S7g25Z+NbJZr9BWa3gSuYbw5Y6D+QmHiw6VzHY7sN0kiIXNdkPTOtxWEV2TcmaI\nU+hY9gQa0iZiGVIxVGr336dIiC1taJi7Yx7c/074aiSOQtt1lBLpupblyX3GYUsILZvVmq/83/8H\nf/KH/jh//m/+RZKLFDPUJVyjkDNjX+qPXEDF8+X3vsLnPveQsMlky7SHjvuyRNST4kjKmc0qsd2M\nbMsuRG+KU8+Fpm0I85bFckYYYNkc8IXykNdlTi+F98Yn/A+nv0IcImnIDHlN2J6hKZAEokS2GWzI\nfPVX36M7fpsZBQ5nmEXazjP2Hkh4UWTiZuvNhpx7xrFAKrTzhuUs0DjHmCLOOeadIy+WbM4HbEik\nPqNOSRHEGsgNjkDQBrOIqic0npQKkUQphXGIjP2rB/NkL/CzMqmywOTUvOaaXLNzraBluyG4XB+j\nMAE6qCvIDrx3wH57346N78stN5ygXN3BTjPf181va+d6g5Hf1NB37Lx++9ddAs8FboF9oN87xt45\nbopmuZac9294+mz0+vgVpMv0TqR+XpavwTxPIJ5ToSSleEcODkm1NIfsg7mvoYniy3VI4n40y41h\nwh6Qy/V3d8XEn7vvBR6sj7FXB+aWK5ATEAqWS3X2SdW+8iikVEgj1WkTqlaWLdWiVNQEHRFPUaNb\ntiyPF7TdjKEY3cGMxcEBeaolUgRiGpg1B0iZE/QAykiKT9lcDKwv18TtAKp470gWa11xaRgHI/WR\nbYyQofEBGsBqXRjnA213RNPeYybHdPkBsVziNJNyJLiOTb8mnp6R8ohqYtm2bFPke978AvMusVFh\nHHuSjahkxpLJBrXYQWG2FNqu5ctPvsKj+/fwzjg4XiBd9Sc0YQaWGfrM2dkl55uWVXqG2bqOglSJ\nMbI+PWNWlM9/bUHzeMnZ2Vf5VRFKHvn8d30X3/r+Ie+XwFmpWv359hmlD/SXMGRl2Ga8CasnhfX5\niLZU3Va1RihNeq5ZZeAqjuVsydBnLlc9Y8k8Oz+lj54HDx8QnCcPNSa+dQ3z0HK5voRB2UqPvR84\nmq9oZEYXjOQFS4USjOxg6HtScKQUuXi2Yr35lIp4fAJ7EWYuU3jh1fZuQ66XWpmgXGPE3jHU6jWU\nCtpq07o8f9uV6kyd5JW6nnRz7Pr1AdhnwOVq/TydXD/C+blj5teAzF77JrCzNzp5/n7BTw7QGx/R\nzvmpcKM7nP7IrtObKqmy+7zEyMHhUgV0TWUCdkOSkf0kp3h3E8yn6BaZ5BduO0D3v58bLHwfsJ8D\n6P+kM3OozrmS09TJytUTXkyICcZtInglSy2Q66U6E6X+MyVOIYsiqBhWelSbWsCqdTjnCKHBewXv\n2PSeo+UxeRDSWiiDY3tu5NHYrvta6S8aWwrOe3IspFjZ4bipunvbNKzHEW+C62p8tHOKc57l7Ahx\nDYumwRCEBMymAAAgAElEQVSGPpFcQWZG17XgA0eLY9KwwYYNxeDivX/EF+59Bz93+iv0JRGTkbeF\ncZUZL+tPeT4LNbU+ZC6fneNVmS89Czfn+OSEVBJijrZdcHCkzBYLZpcNzy6UIQmp9Egp5GHALguf\neTdhH/wm6yw4P4OUkJz5lV/4Zb777e/ib7z3i2CBlEdccUhTs16TRFKu3n40szq/pD0+YFDDi1Jy\nwUqNSDBxtIsZh8sDnHpSMg76yJMnT9CxwXkFjSyP7jGsRsZNj3OG98Zs1rIde0yMcXPB4/fexSuM\n8YDFsmd26PCzhBcQMpvNwDhGNqsNm/XLDWd5EXsRzbxGZtiVBnzlhLTJKalWR6w6bU+FykQNKZME\nU+o1rhOL6lqugPz62NU5Mm2L3ZJa4JqNf1hmuXbR7hj5bXb+4WUKPJ6k0T0hZ49111HJteT0oe3p\n/Gz5mmJzi3nLbi01YWh3fBrV2kQQy5RwJWKk4CjJIdEjvqC+kLzBrmDeHhuv0SxSfW6TxMLkBL0S\nCj4O0G+D93PP/2T2SsHcEETrjDw18bMOy1QKZTTSWBh7UOfrMM8XnCreBYwK8MUS8zCvvS0jfb8h\npZ5UBgojwTmcD0ijtCnTzmaQPOMGYhm4fJq4PDsnDYmMYT0QDYJAFobeyGMhDTDGQowjIoWDrkNK\nrW0yxrq40LKcnbD94BzZDFPmWUZoKOLYrFaQjDFHWjUQz+rpJf/SD/4Yv/xXfoqL0RjWA9t1Zrs2\nUqyZplkLxTJFHEMqPD19guUFXmE5O8C5lu1QoAXxynzZEWYPCI3w5HTkfLNl7uc4nfGFd+EkRUbX\nEJoGJw2SI7F3rPo1JWZcycSSUGo0TdFCdjW5R7QOSxvnUKsdBI2ivqkTR6T6gwxtQzPr6BZzVIx5\n8BjK8mHD6bMzSs54q7Vb2oVHSuDy4gIs4zw4LzRtILTCdljzxS/+Ot3C8/C1+xzHOYtlw2LW4hsl\npchmPbBerUnbfzIcoDLFTVcdeM9FuGNvVCBXs3qu2FUHoFqu2mITKOuk434IwG8v1wAue6z8Sntm\n9/r7zs8PSywvEp64c9xeqdy2a3OFstftaz8CcAXsu07gCsyvPp7pZo0Klrr/GhOw78BcZfotyhXK\nq/dkX5BgdUKXiWnfYN1ewOXrWHNXKE4nvZxrQL8eKuyBtt0E6hsM/WMA/pu0VxeaOD209bsoU8sQ\n0av3mbMSU4beEbKQPbRTmVlz0DShJtpgFMtst2tSiJyvzjhfPWazXeHKbIpQUkLjWW8uWSwWrC8S\nxcHqYkOMVZ5QVUoysha8B8ORByNuM+OQyLnKPe3M1ZKwZsRuBFU2zYrzizOO/JIuNmDGMCZKW1ht\nzmnaOccnD8kl4QuMY+TxkyfMGsebaeBhOeZXV08Y19TaLKWAKFaEEgM5CsU1xJKIlz3LZkZZGOM6\nkgqs+h4pC2ZLEBfQLHgXePjgDfr3NxzNHvADdsLbesxFWtO5OaoB7xrMDL/oGNLIzB8g1OSjDEim\nJkdYuYoGsAztvMF1hjipWbcqpBRRcXgf8L4W4yoamR0sCJMcFuyQMAukPBC3CY3Q+Tmn40jRzMFJ\nU19DC93cEbwScyYOhWyRy/UF7UJBIkKmpSGnSBxGUkqszodX9Uhf2YvILEqp8c5W0ElKU8oUOld/\n+BXA63lCjWpRK1f/K1NbxKYY64nR78WoX5UE2Ekz+/v3kobgNlncB/LnAXr+EKBXrfwazN2kQdy4\n+hWg7kB7b9/esZv7qx9ip5lf36fdzMGZQpuR3XXrvl37ah9GajKaPBILGnzVySeZhQgWpBaw84L6\ngnmheEN9oexkmMkp+pHgfQXaPF9yed7yCeyVgbnTMg2fbh+53mEpk0YlW6IkhwbD/DSpQxOIcaRp\nlFQSl9tzaArab9hszun7S1LuKcXQvMDFWphqtb5gebCkjAUJifXQk7eJnGvWZI3Xhr6nVgaMkMaI\nFcNKnSUHanlXJJOGkWEY8Xge8y5+dHxP+21cppGx7xGFdn7CsLlEfMfi6ITXH32Wi2dP2Jy9TyyB\n9XuP+dEf+FH++n/5kyzaBfSGtP3k+KxD6tRnct8zDlssjrSN48HRfYbNQD8kfvO9U9r5Ew5Plhzf\nO6RtA86UUoS3j97i28MJ333vuxg2F8z9Ag0NTj3OKeoaFM+8ixzM7zEUIzhPJIJonaxD6wxD2axO\nOdd6XOdR73GlTs1nU1JH/WwKMQ8M0dPEhmYZ8M5hpdA1gZKFGDIkQWMt4DU/6uh9zyIUwnw+TcUH\ncYwM/YjkBrOaZet9Wye6QKAIaexJsUB+9ZNTpBdIGnIUjHylJ++yz41cv3d2YFUB3e2BqLNrMFUr\newBOBY1JF67gfXsf1wAu7DHya6fnh6WW50Wz3I5o2QF6vgnmto9Qe+09oL4J5LeP1fbOAXqbnMNu\nDDGdekN3ryeWSYcpV9KNodFIwZA9AL+SVgJXyUI6LeYVvWLmTMycW8ycPca91/564H67/Qns1U1O\nASBGcYaliY1L/QJ0AkoMYixTDLUhSciN0JgQSIg25AyQ2A4bynnCa9V6m9bTzj3jZSSmLUimiJHT\nyNOz97FQww/77YbUbxFzpCQwFgzHECMURy42yUAZpCBaa20EBfWJVAb6bSENp8SNcZAWNJ/97SQC\n6hLDZkPfbolWaC1xcXHBdtvTOENcw3pzyeMnkd/2z3wf3374iC+dP6FrmzrZhUvEWMh5ZNikWupX\nCqETNpstJSqDFVLKXJxecPmlU45PlizvLXj7rbdJkplReNDc48de+31sVxeIBtpuhrmGzgfECaQe\ni4XgApYKQRu6VnFlxUiqSVuN4GcJaaB4T3vkCV2o36EBpeClQbyvsz85T+sDcQ2n41MWizldM6dz\nRtKOYbtFSyJaZNxuKRYJMyEWaKYJRyxTJ722ydGVO9qZUcqI9wtECzkPpKIUAttNz3b10pOGPtaS\nvUBoIhlFJva6+73bla6M7BjxHojf0KN3USSp/vMVSLPrCa6BZH9bJ4K4zySnO9rZTSC/1spvJuCX\nvXu4GZ7objBzJr/AbfC+xrx9IJdb5+zS8pPVJKSbzHzvM9tdbLfaI4m7dtndi+xAvMosEg0CdYns\n1WWpIG6+UFyZJiC3qqs7u+kAfS4r32vrrr377D+CnX8Ce3XRLFRnp5venDHNPKSCSZ2JHZm+yDSx\nA6eUXGPC5+JZHhuLZQCFGCMlF5wbUbRmSY6Z87gmxcSQypWEMw5rxn7AtLJvwVOyIpk6IbPVWWzi\nmJFcMG+4UOoUbmSKJbLWDDzLA3mEvBXcsGZ+Xxm2PR4hIog6UhaOj98kAfdP7jOMI6XUmizDZsWD\nowP07Jw/9v1/mL/wt/8az+IGZ4Y6x+V6YOwTnW85ebAgdKsa6RIjfexhrJqxWCb1I6fvnXF2tsLl\nlrBQ3uge8mPf+U9zuT5DzNO0HUUDTjwqHssjIorzgeAdkjOH8xNKWbFJI8nKNG9nxoqiQQjHSrME\nVNBGKWqM25GZzkGM4AOHBwcUhX67QWLDxdOe+eyEEDyzpqb0b8qWNCTUeZyf5v+cCY1vak33bWXl\n/ZixJHShSjpd12GW6qhLHaUk4pApI3VKv1dsLxRnjuBE9n7/OxlD2GkHu/3KlLq/Y7+SrgFU8kcM\n8a8dg7eXj9zPDk927Hyfld/sWG4C+c2QRL/XhmuMei5Ht1vtHdAXboC+u4Kq6Q7lWqa5aiM32895\nJ/UcrkHccwvEmSQVmTJBtZaRnoB8F1/+Yc18D8D11vaNOPfbQH5r+xPYqyu0JeAq556cnzZlfQqm\nBZHrL22qfA7ZKERycXRLT9u1PLh/hJmx2W4oxQjOEZqA84XZQST3mfXlSCxGjMPUO2dMM0KtoS7e\nYSaknJFSZ/BxuabVS7FJigB1dYKLokKWiNMBESW0gbwWNpsVbCIxxim1uGrwxaoEfvLgIZeXl8Rx\ny70HDzh98ozFfMH55YpnTx/z+77n9/Ozv/B3mKeWbCMXvsdLgGHFW68/IDYXHB3P2SCsL9Zs4jPy\npqGYMcYeqHVrxtXI5mtPmR04/uU/8sO0gzGUCqAFx7xbkFOiWERL7UC9c7gQoHHo0Zz5aJQ+0W9H\nyFXbFXGIh6OjJd3SQyPVUWwF75UWx+FswaxbYsGqBj+bk1Oh34xsVlsO2iVSjG7eMQ4joQ2UaDgZ\nEQHvhWKOftySciH2I3mAMho5gPMeH/w0f2otqJZHmypDBlJZvczH9r8B/gXgA+B3f9RJLyKzUEf7\ne45IRVFMCuxG3FPEiVJrsDjZAXjC75aJmdsEGjerxMpetdjrtu2DPddBiVda/dWyz9BvSi3u1mjB\nP8cB6sgfAvIrBX2fnV+t7aoy4u1jVyz/Fpjv3s+1OFT3Fa731eMTmO9i+EPVyK+APHBdcGuXPOQV\n9Q6dol10x86dXWvm+zKL7rV3IL6Tua7OeQ6Q7//fJ7BXmAG6S1avYFBjzKcnfHJWlGLTl1WdgYhM\nvYAQupYmNKhTFvMO55XLyzWqjhACOSe6pmXoRnToKdtILJmYDYfhG0ENGjd5SJzUSZvVanXebGgW\nSkm1BK0qoat6musSzlEzGItRnGBqEIUHBye03Zxnj1dYGhGUdr7EtQ0pRZpZy3bY8Bu/9it085Zx\ne0mat/T9yNvHh/yOkwf0uXBvecDPffkf8tuOGn5p/ArbdMqb91o+8+abfHX9RSw7hn5L2uQ67JvC\nKUsyXPIcmeNP/q4fZAEMcQR1uGZGM5uTpkk9nBqWqoThg2K94WaBb3n0Nl+9fIp64WLYkPOIUCNZ\nmjk0C8HNBBcKxRlpKByGGSftnGU3x/uWIUeiCEMcq1SmPWnTkw/nmBSaxrNYHGCjMUqtYS5WZZU4\nDsRxZFw7hnVh3ERmszlNq2jYEQCllEyORr8ZWF0ObPtSi76/PPtvgf8c+Omvd9KLRLPsfrz7YGnT\nqNSmH/ouDrwm+GScZLxUZh4kToAeJ1YtN4FtCko3ucVYRa7OqfkaE6Awta9u77bMYreA/MOAfjui\nZQfmV0GJxlWxrKvXuAHa07Fy3d6BvDd/HWfPnigk+8Au18dvgH09v8hOa59klp20MgH6DsgrM98B\nep7Yubtm5lfhidTlCoxlj31zi6XzHPlLbp338Y/N17NXKLPAjguI1DosuQiqilEr5dVUf6sVC5Fa\n6EqVnMGpRyXAlPrbNR2DHwnqa5pzFrx6VF0tteugxIxJ1cDTWJBGCU2dpci8oqUmL42xzmakOCwU\ntBV8V6AD1xWaUL3ZJg5LCsmhCXxoOZgf47xDqCX2jg5PmB/cQ31gs63ToC3nS7YXZywXC778la8R\n/AlDGsibC37gs5/lbEjcbxryesvWw1thyalsmR03vP3obeRsQPt36e2QJ3LOZhw5aB3OOrabzKNu\nzh/73j/Ad7/9Odbn55gIXhosBFIpzJu2xu+nWOcgFaUfexrn8Ys5rd3nMEVyP9C6OUMZGcdIjpnQ\nBKSp5XUjhRQ3eHN03uMxcsp1xOKMGHvGcSDGkZRH/GmAzjM7bAgzz+HhIZqFtC5E3yPmSbGQooF5\nckzEPILr8DMlNELja/x0ja5xjGON5slJKQw0By916ri/BXz24056odDEqUaITsz7GjJ3J0ys/Spb\ns9ZVcZLwGvESCRIJGq/A+wp+99u3t6/a14AP1yz9+ujzWPl+RMuHpZZ9EL8Gc9vDrz22P4E33ATu\n67Zds/VSK5XCteNzt74elXAdUy5Q9jq1MnVguzXKtaNzF0se9gG8ViktXskTK5crZr4H5DXZ+RqI\n90H9lp/iQ+D+oX1XX8U3ba8wznwnn8ikn9f3uGtf6YZWlTyzCvrF6jQ7Z6crHj1a1PC3XCeH9upw\nEqCA4nF5qqlcMsUySTI4RYBcCi4Z5l2dVFiENPWWMkk/IuCCwzUFPy9oVzU1uoxz1MJYY0vcVnno\nzdcfcu/gHoYnl8I4Fs7PLwlN4P69h3Tzlvms48u/8WVQ5YPTJ9x/7TMcLmtNcU3Cw5PXmF9eUMaB\n3/Ntn2UUJQ+J0+0Wf9DwcPEaeXXJw8NHyJHjV/JX2G57HsyOeLzuee/0knc+93n++c9/FxeXp0jb\nYmOimS2ZzZdTh5UpJUPJ5BIpJmiAUjIaWo6Ojxg2G9YXG5wGyAK5SmEuKL6x/5e5d4u1LcvPu37j\nOm9rrb323udWVaeq2477krax3YE4MlGDLQLkARGQEC9EiggKSEgkQkIiSIhLAgGigJKIhxDkByRE\nFBIkFAukJJZoy0Gy46STttPtbqq6qrvrds4+Z599Wbc557jxMOZcl33OqTru6vLxkIbmmGPOvfbe\na831zf/8xvf//vQpEn1CRE9MHm8cQfUID0JHXPK0fkEXrmnblnXneXx9xobfwz1uISw0ZUNV1VRl\nx4JLhNSkKAnO0bcOtKaeloggsGW+kSBD5kmjxHc93gfW6zVt6wGP1J/wG/FDaO4FFkBFzECtUl6P\nGKFyvPByZuMoK0womY2yMpiP3WFEfwDW4+sc8sRybyxIYtzftXQDSW7cDp6pNX9ewtCOatkD8y2o\nDwva2/EI7ukZYJ6BPJeVzGUht08c46uKG10eAneS7MBc7ubSaG9rxHacHRIzoCsdCFoN3jZqy5nL\nkTMfF0H3I/NnbsVHHONpUP8E7aWCeSIXXWD4QPOjZb6oZALIqfgZvwU+paFIhWC56Hjw4VW+Sx4L\n6tJSFQUxJFyfbxPe50VLo0uECIjksj+JVIQQiKknBkEIAZEMosiyOu0kXRfyh1bGLbemjCRqjzQC\nlRSdS2gKlIIf+8wr3L/zBqVNKKHQ1iL7DZvrJe8trjh/8B6mbrh15x59DFwvFsjo+PwXfy9VWLBe\nXtJePEQpgZKCZddz//X7hJRQybD2HUaCEpo/dOvHeVQ+5u0Pz3jtjS+Q+o5mMufNB2d0dwL/2pe/\nQlgvM9+dQOuKJOTwJRKUxhB8SwiOSEQlCC7gQmRaVpRJY2x2Z7S2wHQKjxgStjQxh0qkXhCcx2vJ\nlWiRKWCUxS8v2IQly25FSjnhyneezcIh5PvMZjVNM2W92lBXJWVlgVzCbrjH4J1HSYGyBq0C1hq0\nAaHIXHmK+BDou6x6soXCJ0U9+9SLOn9sU3/xP92N/8BXUH/gn3vqHCN7bHQYekzqMdJh98dpGKce\nI4b9uHdsGBvRgzgE6l0C/j5w741Ha1mxg+pxbHAY3FPuh1uqZA/eR5JlhG6H2QK9IOEHMJfp5s2B\nfHMQO2CX2/GgXBqqKI2JTQGNFzudzPYZIA06GqG2+we3lqgJQhGkIsR8XoyKGOXQb+ynIYM5im2i\n0ViqL8VhwTUNdO+4SDta7+5vd4sEH31s3D4vBjn7Kjz66gtddy8C5s9a9DkB/jrwGXKl8n8TuByO\n/SfAHyeLdv4k8Hee98IpJUYxVozkLK7h0QqRH0UT5DdSDo9PQzgRY+LDDxagMy/np5ba2FxdyHuk\n1JlzjyCRGCmRVYW1lqaoiCmxWF/T+w4ZZS4wrA1eeHwA2UlccvkmYHKpsyjjcJfJNQwlJc4lZs2E\nN159g7vzW5SlQTrJrbu3eYynlwalJc20RhnFo4fnvPraK7j1Gmg5//D7vHZSU0+mXJ89pNKJom44\nf+e73H/9dVbLJT4K1strbp2c0BzV9N2GL772WU7NlIfX55gY0FXFxFT4VmCtY7HsECGvSkRPvnmF\nQFHUpJhvkr3rSAg2XUtVlxhT4FXACI2qJOXEcrSeAi2PVuc4kWudbpxD94IQe5KUNFhW/QYVHFIt\nidrj8IToiQGCz+b/wiWun1zz5PySaTPDSJUtb1cbpNC4PuBd7lpphJQokbBKY6xAm6wsgGyeZpTG\nSdAGZB0RpcbYT5Uzf6F2+h/8ycOJtHjqHBMzEBuGbXK5i3445jAyA7kR/ZZS2Y73jmdOWO6i0b1x\nRG6j1nG8BXGxd17Kx8zgqzJC5ihJHInRhNgSLA69B/Rpa2MbkXh0vhGktAf/I4Cze34Qw3zaG5Of\nSGQcfw48ik5YOgo6Ctph21HQpYIuWjpRHHYKOmHphaWPwxZLHyy9tzhv8c7gvMZ7Q/Ca4BR+8GyJ\n2y6HIhZyW8wiDcq3rTRxlAKNoL0/HgH8edTKR9Esxz+X+9i++V8+97p7ETB/1qLPnwb+LvDngf94\n2P/TwJfIpbS+RK5k/kvA54d/56kmhMh0R0xEYRAxgczWZylmmBcygoSQsvrFJ49SiiQEzkUuHq+Z\nFIaYLKKucqQYgCQJwSNSLmBRlBpIlEVBJUu00QQBcXnNuKDmZYAUqGpDcI4QNFGErN+NBlyOSGNQ\nRJONf7QIFLXi9q0JzdGELrVIjmhEzWYy4+F738ZtrrGFZXo85/5nfoTNasW9z7yGiT1WOTyJsw8f\noG1e+IwhUE+PObtcIFzHxeUFfcg3rSQCrXOUseLkeEbdlDx48D7HdcP19SV3X3kF1yVizHdCWeQs\nT6MN9IEQWlRV5PUJYVguFyA8facxheGaHudabCGo64LJpGTTGsqiItUOv8mOG6HPKh1lFcSED5GF\n92AjSjrQAW0lYZ3XQoJLBCeQoed7b3+fqmgIDqRYIkNis1mCiAiV6zgW1iJR+Ys9yEdDSPlzIuVr\nIEJMgYQnSUlZKpSyL3BJf7ptlq4/9hyDQ28BfODA49542/th32PoD46N432aIQpJkiPNsAfw+8A9\n7G+ph73IXBPy08JedD7G/aM6JCaJRyExB/M7Q1xDj0WlsAXnvRIVBwC+nRcfcR6JgBrAuaDD7kAc\nS0dJh6VNBf14fADzfrwBiIJ+2O99gXMmd2/wbugjqDtN2AL6YJG7D+Qe8AOQj2C+D9jj+CZQv8j+\nJ2gvAubPWvT5V4F/fhj/L8BXyWD+R4C/Rhb7fBd4C/gZ4Fef9cIpZUoCEYbFTQgx+wQCyJQrleTM\nr8yXjzJGISQpRtZXPauZQ0iPjIEUhqyzmGVsiJyEpEXmxQtjmFQNUkuCCHifCKHH9X22BhACoRJF\nBX0n2aRc4qxdpxwtqkBUWdEicFhtiKrFTBRmIghXicZWmInBO7D1e/Tdmhih3XQ8OHvIUdNwfDql\nf/SQ1fKa4vSEJAztZsET9wTvAkkIGh9QViGVyU8aSYDQNFXJBx+8T6ELJtMJGsn14pqT+ZTbJ8c8\nevwY5wNFMyEkgUgKiUZLja2nhG6F7x2r9Yq23WALgxACbQrea694dPGItl/jfIs2iaq2KK0Irid0\nCWTEC49GD9yuQjpJsh6VJEJICJ4QsncN3pBiVp8IFJtFx/vffw9CxGhN9JHV8oqQ3GC8JgjR5QxG\nJEYNtFgccwUEvesRSW3jQlMYhByM1z699teG6/4UeBf4z8jBzkGbxqcj8ZttBGg9gjl+C9CaZ80N\nY9xTcyNXHLfbPUCXe6A9AvresRiH/SFqV4QtkOsDmgVIDJG3IhDxe9F62lsSdRgMbhvVj9H7oZXu\nLhJ/3nEpB8BPkZDULhLfRuiWPuUovU8ZwPsB4HsK+gH0e5HH/TgfLM4ZvN8DdK+HrghB5Sg9DIAe\nZAb10WRuG5nzdNLQfmS+D/LPAvrfJQugd4GHw/jhsA/wKofA/R45Qn+6javnMW1T+kMcFjsZA/SA\n8IagQ66oMixgkBIqJnwMhF5w/WRNchK3ajGVRaGySiP4wRQ/5htFCFhrsVZhC0uyiY13XF30BJfw\nMWLqiBQgC5WLB7ca0WZppEMipUeUEYVGmoC1AWU6fOrouyV3wylet1yfX3F1scFtlngXqZo5pimY\nTQtunzQ8+M7/x2tf+Az92xdcPHnMUTXPj7rKslo8JgKN63nt+DYXj6+pSsv51SVR5KeWJ48e8JM/\n9dO8//77We2jNNYK2nZFCjn5pGs7rK2zUZmyJBQxQtHMIEW87+i7Da53yEZhreX9Dx/w8OxDTKHY\n+DU+OpRQTKopm8LjLjuS1uAkxoBOCtxwE1aCpBLBZ+oshYhwueyeiAKjBGL4G1arlvPzcwppkBK6\nfkXft0QB0uTkLp1y9D0CeZQC8BmcYkTECFGTi0InlPzUl4CeKt78rDZ9Bq2S2/g8DjqNOvEBOIXD\ncLg/WsmOevKbxzKw+xy7yAHAh6pOW9A+mB+OsUe7yIFDH7j2MatUDzH2aBkw8ggj1AbU8N+ILcB7\nFD6NLLo9APMDkN5un54TZGfHQ5CPRNQuImcH2F3aAXuXcvS9i9ztIYgPAO+8HSJyvYvMBzAP2+0Q\nlW+BfKhGNCjlDgD9Jjg/D7CfNX+zf4L2w7j6d4Ta848/3YTIShU5/Kc+DWm3eQEkRTEcCyivkCo/\nUuvB2CYr1LPy5OLRirDRFBNJ3aT8uK0VToCQOYJPQ0TfO0eaJKQRNKZkWZWsFh2bq462zyn/1kYC\nufaoTJG4AYhIoelSpIwSpESpSCRRFZJFd0mF5f7RPRZnjzh7/4zF9YrgHe16Td0UTJoJ09mUR4/f\nozKGx9/5DovNGtE5Tn70VWBDSvD48Rnz47uAZO06jLVUZYk1CltMubp8Qj2dsnEdi+WCGAPTozmE\nQLu4Yr1ckWSB1hYhVDaicg4pIiZqXJ8omwmbboVQgsIUkKCZz/n6//t3+O7195nMakLRoa2g8x0+\neoqipNWe1nuUTBihqKyCBC55epdziJTI2asxCYwqQYTsPaI0KUBZWGpb49cOT49SOaHLOZ8lYSn7\nm4sYiR5SJJeUG4yU0iBUjmG4aYz7Mddifdlt9lww3zWNQ6U9D/C9sdoH7hv9Wcf2wTrKofDwuK9u\ngLzKC3pRDtApxK50XRIDkO5MtLKx106umI8mQO+APan8fcFsI/nxJvC0DcCY0fqsUnQjgD89F4Xa\nAnefDsE6g7jd0i6Hx3fbftjuqBU9ROga7/QW1INTezTLjjPPkXmuGZoCmWrJtcg/vu/ryJ9KLuKl\ngvlD4B7wAHiFvDgK8D7w+t5594e5p9rlB1eAyEqE2mIqm3OChuhckhBBEmSmHESQoCQpBJTKypYY\nIv8k8DwAACAASURBVMRI3Hg2IeBTNmKKwVIU5ASSXK4bABGhW65J0yMkAiUUZaEprGEtBG4ZCcGR\nZpqkBk5YCGKfF2b75EloQiEgSrTQaKHwQtJ3lyT9Ov16zbSe8cRecv7gzeFLElgul7xh7/Phg/eo\nC8H55RMenW34/Be/wOrJFQ8ffpeT+ZzQbbKxWAJdWJbXa6Lr6LusfV1ulkilObp1j4DAu0DnNtT1\nhN4kri8fIawmRlCmZNX1KBIhRmKhUMIihWd5taYsSmKUNJMZnW+pqorvv/Mhb37wXcojQ33LUM8N\nulQ4Jyh1wcONIwiQGowM1DqB0Cy7ljYALpCSAqXQFJlRFZLMeGlcCkihkFHgg0erhHOJtvO51J3J\nfDhe42POCwjJk8gePiiF9xGlJDEJBIHLBy3X318dFCl4me35kfmubYE8+Z2vSdoD7LS/DQfnavzw\n8wGVfLZolSIDtRI5ilTDfpS7+VHFMkbYYqxBups/lCEeCBp3nPmARjGNQdVhFH2gd0mjjHEPvEVE\nphv7N0Bf3dgPqAGw7Y4+GYE73QDuYa4fI/NxnApcsvRDVB4GjnxHseS54HecedhG52Jv8VM8zZl/\nXI83xjznvE/QflAw/1vAHwP+u2H7f+7N/2/A/0CmVz4H/P1nvcD81SkIkVP0Y46ydr7mKUfUKZHC\nkPE3VCeXUmTXvkHSGHwAkRUTLCObFIneE3zCFCZTJjJz7M7HHAX6XFItiYQVBi0kUghc5xBB5IzU\n0uGdgRSIId90tJXY0lA0EWMtRieUyrKlTZ8oS8OtZs5ydcHR0ZTpfM6js8dopdHGsF6vCX5F6zQh\n5IIRb37nLe6e3KauJlxcXjGrimHBJbG+XhOTRyaPcz1Iw/n5FXdv30Zqw/n5EzbdmtksV1Ty3jE5\nnnN19ghVa4QIHM3nrK87los1J/qU6wcfoJSmKjTrLqJVgQue46NjZFnyW997j+V1pOs6QloTpGVe\nnlCVBcJX3Du5y9sP3ifKiLEVQnSUVuGTJoQ+fz4qLxRLEdCUKK3xydMFTyEtWmtC71FCEIKk9x19\nF1BCZBdG5FAfFnoXiGHwxRHZckEi8ueKJiXFnTdqbr1S0cxqtIJv/coHP+Bl/cNps/jxC6Aq7QBc\nDdSESuEApFUKWzA/3B/PDbvIfARvlZNeUhTEKHcgnoYofQvPNxZE05YZBwayc5QMs8OZDNMqfz75\neWt7bDyXUW54A9D3s0f3gVylQ1DfnZO2x0OShyAeR9DegfXBeDi+D+wj0Dtns2rFaYLTA7CrISrf\nB/Ucme8ol8yZ58j8GWAOHw3oL3L8E7QXAfNx0ecWu0Wf/xb434F/h500EeCbw/w3yQ8g/z7PCZWy\nxjxzcQkypRJHf3NB9EOVkr1HaAlILwkqgYh50TRCigkfHD7GfNNLiRQ8MYItsn4UAkIoXAfBJ3zw\nSARGGQqpqcqCsrSsN5Fuk61ZhXRoKZE2RzimltRTjbYKpRNFpdBlRBeC9SaySEu64w6REn3boqsS\nqQV9GyB52m6DCA6fVsyP5iyXTzi2BZOjGYUReJcLUSehWG2uef+DNZPZKZMCrp8sOJrPEYAuCvpu\nw/XFw8HPRlFOJkghuLq8YjKfEESBtiV9F6mbUybT2/huidQWkRy9kyyulsyPj1m3C159xXJN4sni\nMUVZoFV+n3rnuV4tmdeCQlUUtsCgaBeevg1UShCtQ2qPitkMjSTQUmCVxUhLxBCNQnSKvvdZ1w/4\nIPAu4kKLEqCrImftDrawQihKawgx4J1HGpWT52R2ABciuzZenF2ilaGZbFPOXmp7sch8B9AqBeQA\n1HLY34L1/lwMT/2cSrviCVGJnPQSx/EA6Dei731FS5SHx0ZIHiH6cDskGu2XgdtuxeDEsTtfpLSX\n+h+eCd4q7YP4QO+kp4E9Ig8i85H/3s4lSx8L+mTo4wDge4DeR4tLBS4OfLlXNyJw/dTcIZDv1Cwj\nX37AmcOLgffHzX2C9iJg/rxFnz/0nPk/N/SPbYksN4wyS+nEsLiX/zFxeBdIZD8OIiSRo2UvUEnT\nxQ6VJMkHYgutD9AUGJGIUmWb1yEm0UqTosC1DqUMMgoKW3J6JLm4WLK67nAORKEpm4CUCmFzAUFT\nSaRNSB0oKqjKAt0EhJG0mzWb2LNar3GrBWVliQTKqmQ6LYl+g0qBtmspTOL64jHz4xNiiFyePeH4\n1oz5/HUuz7/L8a3bGGk4O3vAnbs1m+U1znU8fvSEk9unXJyfUxclH77/AbfvvIouG9o+QN+iUmDd\necpGYaylnh9x9WTFZhEImw1S9ATXsVhtODm+w+Lykr5f47nHd978FnZWUAiJ1IkYNckn2n7NUidq\ndUpZCKwqWW86Fmee6r7Ahm5YDA2EFLFFSREVlbTg8/WO0EgZ0UoRXcJ3Pa5PdJseZKKeWmLKNxBl\nbabQJBRGo2Si144+xHxTFybXGQ0RrTWnt0/QOqBkgRAv3wJ3BPNRejyO2duXcQfGh+O4A+oYDoBc\nHgD67txMrYzgPWQyxjEi36dVdhH5doE07cD+cJlyP5s0y1ETQwLNTVIl3VziHP0f0w6gb4C62gJ5\n2IF3ymZi+9ml43gL5sLu6BMOwXofzPPY0keTx3EP1J3JvPjIjTtFdDtQD1tFy6BkOdCX7xZAtzrz\n8YN9HmDz2zj2CdrLywCV2egqZJxFAklJpMhceH70yxmhWZImCD4iNTmpaGDxfPTkstyKFBOui5go\n8Sabz8syYEWW9Skt0FoRgsN7i+sDxlq0UQgqjmZHLB6d4VJCFgJbWmKM2aRKKuRgoUuSCK2QtURb\nQZI9WhiOqFgul7TXV7g+UBYFvhRIOopJiRAOH9ZIJSl0jXMdt+68RhKB2eQWq/YCpWuqZgrGMOtX\ndD7Sx0BEsLi64PjOXc7e/z6nd17FaI22NfV0zmqzwMfsh6KU4frqgnW7Zk6J1oIuetarFSH1CNfT\n+5xs0qVAM59ibMXZk2vqkqzrluRMz5gXode+ZyOvIWhmhWW5MFyuHfXCoBVYrTEm12nVPvvp+CQR\nkZwF6iIhhvxZxYDrezoXs1xRJoTKi7AxZcDOstWA1gKtZX7vnaJ3ibHqgFa5IpI2BlsUQ7T+Uish\nAjB9AZ35FozjsMgYw3asYj6WQX48FrdgLrfjfCxpsVVZxDAAuX4GRz6OZQb9LBcdovi9pceAIqbR\neWWoOIUipnEBdA+Wk9rbV9sF0Qzm5L/zAJj3wFqELYDvTLv2AT9sAT8lmQGZZwD5XndxAPYwAvxh\nd9Hi3B6F4nLkHfwO0KOXW6CPN1Qt2wXQ7B5xuADKx4w/7vhL4sw/cYvkL6UQaoi2RzVLplm2Frgp\nm2uloUBBjAIE+OhJyJy9idg6sSk0yUHoPcEaYp+140nkC1SpbHfbtWHQPZfEkIHa2oJmpll2Lhdk\nyGbr+N4TZfZxCV4SRUT2CaU0thQgJrTLC76zepvX6jtYFNoE6kowq07wrmO5uiQGqKqKqpzQTKY0\n9YzLq3NOT2/hXK5vutgskWqCtg31ZELoV6QEXd8ii5qrJ5cILZjdOuXy4ohmOufhw3OaCjabJUbX\nbNoORaBsTvBuyWYZuL5a0fc9sc3KFFNXJBWo5lOKpsBMJlymd3nl7l0Wy3O8CyAkIgqCByd7NvqK\nqpwyO1FcrQQRjQuSPmlUr9CVIIqU10FCoPMd0msIEhf9VhseXEAkMEKx8husVUPWblbGeO9zKToS\nRSGQylMUAtqENgUpBYxR+C4hpcEYhZCR3gnM7wI1y4vozDMQZzCWzxirp47dPL4bZxAfAHpQpmx5\n8jFS3kbjA/c7APrNyHwrMSS//35UraQsItiXJvo0ChiHJdq0c2YZpYs7V8Wdj0umV8IW0HNy0Q7U\n920ExnFE4hiyOMUO0F0yh1TKHpC7aHagHvbA3GuiU0QnBypF7pKDRmAPkhjGY4OiJYw3TXZR+bNo\nFp6zfZFjn6C9ZKMtgUh5DV0MqodcqGKQnA3mWlsnxe0NQCKHZJ7cJChIDnyMICMqWlrnUV1Bl0Cb\nTLHopAhekKLPHi5xk3XrgEZQViV9DKQYcS4hokDE/AXwElzMafFJWDabSFGV+akhWS7jkrbdgBBM\ny4qqqOn7BSkYYoq0mw3z6YzVcklTV8jUMZ0dcXpyF1lPaC/PKHRFSoGjacN13wAB59ZcXl7yhS/8\nFG+9+U/4p3//H6S0mtdevc+mbzHS8+T8Els3rNorgosU9RGX19ccm4aiqtGFo5nNePDWO+i6JKWE\nD4FKC6Z1wwfB87UPv46oDRN9ymrZ4vs1SkWQOTV76ToaWzKpBbOZ5nwR6QL5SYZEhSbFQCAQO49M\njtCFLI2MgUREiUQK2egspcFQCoFLDklCKUsUgSAcRheUjaYoBFpbqo0i+Gx7nFJOKGqaGh8CkC16\npXz53iwvkgGao/Ghh4iIKYN1SDfmd+On5oe5pAXRDPz4ll4ZQH1vkXObTCRvcOlpF7UHNHIA8pGc\nTOP3kz1NedoXSRpcGlXywzaZgWbJ0K+3KUV7IM5AGe2D+Hb/EOwjEifMDsSHyNxhDiPyaLaA7sJA\ns4zjkAHeOUN0MvuW73e/15063PdjZC620sRncub722fNfdz2E7SX+kyaq6pknlQLATJAHLI7Q9ou\ndiU8UgzRBmN1IIDBYU0ltIEkE77NGnWlBUJE2o3PNrpRUVbZzVCGgJKaruvw/YqiKEm+x/sx8SQv\n5tAKnA94l1U0MQS8yFGnkJ7VpccWES0kOEttpyijaYoSKxTO9RhtaF2LkpLpZELfe+7duU8Mkfnx\nKdPphEdnD/mpH/08b12eZ6fGYsKkabh6HJGyRqkNVTOnrhtO7rzK0fEJwXVoZSkKxfnyQ2bH97i+\nPCemiNEV6/Wa+UlNiJHgVlS1xRqNtJariwuOTuZ0qxXN8YTju/f4tSff5YolZVmxSQFpNAoLqYMo\n0KKgjAJtHUWlmM4VV30u8tF1CWMNzkWMkgQXCFHgeo9bRdpNIKbst14UxUCjeHyIGCNBZMWRMhJb\nZz8WZQyzpqCoPZNGItC5cIXPpfNSTFhboERe3C5sQVEaQnAv8YrO7UUWQGVMO6COCTEAswhpD6yf\nP7+9AYSYo8WbvHc6TOnfRuWj5lyJwVhqBPP8s55cIB32EoKSQuzPpZ3BlsNkuR8DsA7RsiM7Rz7P\nWXHbxa6wxSGQH56T4gDmYgBy9oA8Wfo0/B0jmA+RuRt9WKLJ22DxTm+lhjuQzvLDnaZ8J0d8Fm+O\nfwZnvr991txHHbs5/gHay/MzT2mIrkBqgd+ueyaiFKihZIqQmX6IMX+gYnDf8gMrJ2RAaY0ygUJl\nbtYKgykiUgX6jdiuVbgYKJOFIDJYCUO3bgmdoO3WdJsVQkREBNcLQvLQ5d8XtcgyxShyav5FYjHx\naBUoCrCioqprmukpssuSQlMo/LrHO09VVdTVlN55qqZkOplyND+hqics255v/OZvUlWKZbdmPjki\nxEBSFUUhuL5wtN2G1WbFa699Bl2UrFdP6HtPGzec3r1Pt74mSk30JQ5HVVbEEOnaNSTByb17nL1z\nznJ9hdGZs/Y6sDw/Z/KHv8B77/w9ZMqValOQSCnQFoQyED2lMAgCRimUXCO1QOiIV4pWCGwK6B68\nEVntEMG3Cdd7iImQIkYqRBBoKcFYlILgPEklJpOaujFUpcAaibUWrSRahyHtuycGRe97lBCkmAuM\nMKyxCJmtZMP22/Xy2jR9fLUjGTMYizAAdUy5JmVIGaT9cNzfOB4T0g8/FxPCp+zFfQOUd9SK3CUQ\n+b2korjHm2/VLoIeO2D5Pu0Shyj9aaMtl8wgGRx13juVCbAD5HRYkUhtbwfPAfYb50SRwdyJvZvG\nHs3i0gjieeuCxQVDH8wwzvpyFyze62x/6yXR7RKBottF3mMUnkFcHCQNPZXOn9+uXfsk40/QXh6Y\nB0EiIqTcVlRJMRMuW9dEBNqInCASBGHI+08CclqRpygMpoo5TVBqunVEKT/YqgralcsfSJIEF1FK\noI0mpsB0OsFIxYcPzoltP5hqRXwX8MvhMZSIIlfmnk40qzayWSh8hNVZQsSOZma4NZ/xE7c+hw49\n63bBUVOjpcIlMEajtcWHXLNSlUdUs2NcSLz7rW9w53TOSim+/s1f5/bpPVRR4SKUdUV0gcura964\n/2MEn9U1PgRIJaoInNhjgrVcXTyBviWknqqcEZNntXiCKRzz01uEbs3F+UPaxYLJ8Zx+s4LJnNOT\nU2RzTPKK6/WSTbvBeYcPHiGhjZ4YPHUsscmiRfYvlzLLApUGgaPz5Ci7MwgM3gWCz74wkP1qtDSk\nFCFCVRhi8CRtUI2iqi1NbdEmZV96JMILYq/woqewJUYqVq3DC0WMASfWWKNJybHpEioafmjfjGe3\n18mGc3fI2PZXgb9886QX0Zlnr+7BRGwAZRH2ur+x/YhxvEmtjAueI5jvpfbHMGrQb/LlOwUKsOPF\nB+56G60PypXMkZttlNxR0qWCljK7GqYS2IG5HuqV6mcA+b7d7n4d0QMwlyKDNDtAd2kPzEcQjwa3\nBfDsu5IjcnOQxp+c2AL6bry3PeijwZbYbgkiL37ejB1uXn6/3f1P0F4amAvIQC5H7lSCyHdgISJB\nCIyGyWywpXQRpTqElMQILibQimoimcwU16uAELnCe/KSogQvIuJaZb47RgQObRxCWrQumE8q/LTi\nerXifLPEWEjOoaIgulzIQolM10xmiuYIpiczLj6MLC/XuOtArzUywq2TiujA6RYtBVJY2uWStu3Q\nuqCsKrwPiKRoF1e8/fiM777zPd7+3rvoJPi9P/F5MFO6WOGMwW06YgpURnF8PCUSOb11h8n8FKUk\nLlmulw957Y07lGXNpX6XTjmOJ3Ni0KzbVY6ijaVvHa6/onVr6iOFNhXNkaQ6OmJ+OoXQ8+DBIy7O\nL1heL3Bdh1GCVA6GV07hfE+pLTIJhLQYK6CQqABJC4IPWU8u8hMXnvx4LgVSDIubQqJVvlGXlUUm\nhSkNfQoQPWEjmBRHSJFfrwsglcJLhUmJbu25WgaWXYvre5RSKJ3NvpSCuimpyk/1knbAfwj8Y2AC\n/EOye+hv7Z/0IjSLiNlEipgQIe8TUi7uERL4EbDZG+f3dTfO5488+ehZHrccudim8Ec/qFaC3C2W\nxt1i6RiZ5xBrR6OotLO5ZbxJ7MHsGCWPQL6hok0lLSU5rWgEZv/RW5G3NysV5RuAz3LivcjcMW53\noO62oD5G53vA7s3Qc8LQFrhdBuURuBmUKgf7g1PiSK/s5Ik8DebbD/gF5z5q/rfZXiLNMsgREdlU\ni5iBYLhohAg0zZRmkkuCrZYbjFdEEkLmxVLX9zQnmqqJLNsOIROm0LgAQgWUDJlzH2hApSTIDoRm\nOp0zm02pqwKjBL8RF/RpgfCJMmWjLpckSQaUkRy9rpmfSMJa0y88wSsKo5iWx0gZSb3mev2YubxF\nrQuuri5w6xVFUeQ1AO+wWtF7wZOrjrPrjr//rTN8qnj/0UPeefQb/Ms//zM8ulrQLE4oK8FpM6fr\nr5AIpk2D0AZbVGAMy/YJHkk5nbI8f4RVcHL7NWISOB+pZSIE6NfXpNBilOXOq7dJ/W0ePnyCrI45\nOp2z9pD6S1rXsbpYsllsiNFjJsVwkUWSSLQBKjqkbhBeonRCa4kqBKIQyCTwwSFRe+LqQYmkJMoI\nwCOEIiWHVJFCC8oS8BHnQUo1uGaWdOtEu/F0G8982hPlmmUruXjSs2pzRqhVhuA7Hl9eEULP0bxk\nMm0+zcv2wdABlmQQf5UfBMxT9tonjkA+gPgWvPe3z5rbAXtMh4qVg4VONWR+Dlr0DORyLzoXu59P\nebF0jJEdfptWL2CbFDTKD7dgPnihtJRsUsWG3HdgflhObgviYh/QPVrswH1razCCuRjAfATxNCy6\nbimWvch8iM79XnQ+ArlzhuAVuFFeKEiOrc/Kdu7GdgR8wgjqu+0LgfEP65yPaC8VzGPMHPlYhUIg\nt2CtpKRsHPW0wgXJupfYlAYFixx8JTxFI1GVQ5Qg+4QpwXc5WrTGUM0U/dINlYQSqIAPGxKRZlIz\nm84o64qz1UM+OL/CSokgMjnJPiAhGoJIzO4KJqWgTWAnihLB6cldmqIk6YSzWQK58QHJhtXikuQ2\n+FDR9QuKwVe9md7h5N6X+aV/8Lf5zE/8M/yzX/kX+Ct/9RdYXD/iF3/pV/kj//q/xP37P8qTh29z\nFHtC75BJUpSWsqooJ1N6r9gEwd1XX+fiekPVHHFSzwluzWaxxF88QqIwlYTUYlSiqDS3ZxO+/eYH\nlJXl5HRKYRNNo0AVvPW9d1g8viZ5wa3bJ5hSsRHXCJHwUhKjI4iGJKAsgD4vMqNzIpCSAuGGBbXO\n4/rsfClSQmsBQuTFUR9wTrBpV9RzTSAghSaFgEgS3WuMtKSo6FjRrQKtaWmqAqlaFB7fJkCC7ihU\nxb2juyw2HZcXT1i369+pS/izwJeBX7t54EVolhHIx+hujLq33d0c7wH7jWMxjhHzwHPvlUmL46Ln\n6M+i5U6TnnaLoGNkngZFi8MNzoeHfuYRQRj8zB2aPpnsJU7BJlWsqXNPNQmBFv4A0LfALvbmxB7Y\nC39j/gaYi7F8Ru5+4MpHQPd7NIvfA/Jsd5sj9ODU7n10u/dxH9Sf6sPnlMbjNznzF22fIgv4EsFc\nIJBZjShEVpwMSUIpJYzVFHWknCaMg8WVQpEIIVMyUWS6RemsST6aNawvHCpFbBEpa4nRgnQKG6lI\nMVAWecFsvVpyJVe88RqUjcWUmrt377GO7xG6nvKOoW4EwlqW64SyFfVkSYweWStEs6IqFfVUc3o0\noxM9Ia6HN1TSdx0SQY9itV7hfc9m09FMSz73uZ/k7XPN8f1X+LVf/xp2dpdvvvUmKq15dX6M6DVC\naSSgdF74ffX+PermiHrSIJtb0Dk+/8Uv8PZvfYMP3/1NhIL7r3+Gu3deY3qkKa3l4vIx3nUYpTHG\nMD0+obYTvvRPzXj4/gOqSkEKNNMTHn3vbfrFiiN9RFSBiW04ns95fxNIcZXVLNFibEKlrDRROmKs\nQBUJREBFSIOdQiQNiVggh6cckTxWZ4vU5ATtpiceZ+gA2Kw6pBeoWjIpSirT4KPnsl/R9S1abwix\nBxKbVcTqI2bNjFJZ6mLKjxyXrGLLW2dv/U5cvhPgbwJ/ihyhH7T/+hd2ipqf+3LuT7X4ET1wAPQH\nY7+3HcYi5AU7ocXAw+dkoFx9BYYKx6QkcwSVJDFxo8gysJUg7lwSx7aftp/ELhN03/twvwfUnqSR\nbfk3QcoB3PhqYu93CZ46J8+n4YlhUJ4P3/+03YqndNxCpCxmEEN26VAUW+VK2tuei0ELkEPuody9\n1nZfjueKzApISOP8zbqd6anB4e5Nc5P0rMn99lXglz/i+K69NDA3Nmf7KSEIxJ2efEgOMqVAmYQ0\nPQiDGkyugnODtE0iqLE2YSw0U81qETEyQJNQQjCpNUhJwiF9Q2EMWkDbOx6+f8Zn3rjL8a0TAKqJ\nwRSKRjcIYWmOCspqSrnYZNN+2yI7uFRrZBkptKKoJKU2YB2qN3RiQ+c3iNWGEDxaKpb9NUTF0fEp\nX/jiT3B05w2+94//AbePjzl78pj/6a/8z8TY84u/+Lf4C//Vn+PBk3OsEpmW6A2FLfCup6hqRFlQ\nNlN82PCr/89XefPbD3l0tUC4DW+/ecbv+/1P+OKXfhLne6QQKCT19IQkPDIKOn+NLmte+8w9+j47\nM1bHUx49+IA37tznfPVdYgqU2lJXmioUzNyEleiQaU2igGSzVpyUeU5hAIUUkAiECEZZpApokasx\npagQIlFUiRQCnfeI5PFeomRWD/UrT7daU9kGWQqUDNSFIYiCPnWkaAbDpp7oA0ZalOiZHp1wVN0i\nhsREzVE68Q/51qd66QL/B/C/sjOYO2j/xR+7MfGs6O2jwDzd2D5rLj09zoWuxV7tSrnbslscjUO8\nnZIk7GnMRxDeqbzlFrBHL8UDJCTfEbY3BbHnkjgUqd430JLEPVCPTwH8Pqhv283xIJYYAVrJMPi0\nDwCr8tOgjLmohUxxq6TJy7mOkNQNTpyDhc20pVT2Fj2HRKGbfHmKW07xBmA/Y39v8xSIPxfUvzL0\nsf2ZZ5yT20sD8zuvNDx+vCZlroWcip2jcq0SZRUxRmFtIplIXStEkqi6xvtcjWa5ECjdYW2RZXNW\nEnpJVWuUCngpKCqFKRSFrNDaAj2xiyzOOs4ePeH0zjUgkQKaaoJ1FiUqZrXF1IKQEm3fY4QkKY0y\nkaJyiEpidAQTsZVgUpZE72ldPxghgQ8h84ZKoYuSajJjPp8jZWS18nSbkJU1UVLVNfPTu5jSoJPE\ndx2xnFJXE7QRNLMTbDPHK03oHf/kW+d8Z91z9/6PkmLLm98/Y/btSyazB8yaAlvWJGuynWyErr9m\nZk+RUtP3PYLAZD7HVBN++Wu/wgcP3uf27Bbn1w/RVlJYxaQsWQeDCp5CNsgYsUpSlzXHDh4tFyjv\nsjxQSELIGbhKS5oyoZWnax3RFTktH4dWCe8TRml88CgvSDGijSD6irbXpCTwRJRS1LrAcIw0l9BH\naqup6kRTCJTdsOkecmt+G+1KRGE5FbNP87IVwC+QjeT+4nPPehG3r48C84/rN4E8DnkRY/Fh2BYi\njux48UimVEKSBwA+gvt+Qv1hpL0fke+9E3tyYrlXgHkEdOAQyDmsKjQCeX65w+3Y9k2/xt819hxp\niyFKFqDEUJlokHemuPW20YQtGx+SJGlJMoMMMYihvucA3mZfijgcC6NkUUCQO848Dn/l+Jnsj8c3\nK+0De3oGkKft+7mbF88ZP7/dfEj4HWt379zhzp0ZWutcI5lESlnPOpkW3DptsNbkauwmUk0SRRUp\nmkg9tUwmEyYzQ1VN0SZzsspAEpp6ZjCFQQmN1YKj4wJdeXSRXQBdm1A6cvlow9nZIy6vHrFewL+w\nOwAAIABJREFUL6nthLIwFHaKFhO0KYAOKSRSGZQyFMLSNAVHVYOSElSP0ZBEYmYaCiWx5IzVdnWF\nxNBMKqZNAb4nug2f++wbiCh5/dVjCA6RPP/9f/NneevNb6NR6EIym8wQOuBjj9Il0ijU0QwRNd/6\nze/yzvWGP/pv/wm+/c4Vf/1v/t/c/ZFXeLer0PYeKalsQiVNXszUJVbXOO+wUmG1YFLXzI+Ouby8\n5r0Hb3Pv1h2a0jCfz2jdOa1fUhuBlYapLpCip2GKNYa6KXjl3gk//tnPMZ/eBhSESG0qGjtjNp0x\naSqmtaUqisyFp4CSkbIsuTOfMbFFjsr1mmxINqGpb+McXG88IUWEcWiTqKuKpsyl/nQVOZpAdSSp\nKlC2552H30DYSIgbjP1U45M/CPxR4OeBfzT0P/zUWZ8EqD8OuJ8XrQ+Red6OoD5G53KnWhmhNe0n\nyx9G5AcuKmIfgvfhdwyaDyPr/Uh8NM3adrEH6PtR+XM5iN1v2e0NS7AiImVEyix0UNKjpUNLj1YO\no3qs7ih0T6E7Ct1S6pZSb6hMS2k2FKalMB2Fyeda02OMw5isetPGo01AGY80AWki0iSEiQiT8jPa\nQU85PNZp19XYc2b6QReRQcrEIacWP2L8/PbSIvOjeU1TVaT0Pc4eLrKcDYGxinoSKZuAtZnzEtJR\nVZrNylGWmr53WGWYqoqy8mht6NoWgUFKR91YtNmgdUKpmsJKtBSk2NF3LTEmcgm1rENPAXq3QhiN\n1QpEgZYlJjgUDiMjjanpYo/AUxQNk2pKaDOPLHWHRCOjovWO1LXE3hOTZFJPUMawXrd88OHbeF3y\ne1495W//ygN+9md+H2X5De6/fp+mOeZrv/517tz6OfzGo5XOEsTFhuru68QIPhms0Pzyr32du6+8\nwh//d/8UXdtSGMPX/tFv8LM/+/N4fRvhL+hWV7SxYz4/BSkI3hH6Hu8btLIIGale/zH81RXTosLY\ngnLZY+OUx/0CIVaURmY6SVmkqCmkZFJaqolB2QlFM2V6NOHs/JhHDx9RKcm0OaKZGoRcQ1BEf0l3\nDSCJdFTFFB1rIhOk3GBMJMVAMzEIX3B8dMLGtSx9R2WhLBNagxl8X3xS3LpboITCGoGkIgg4u3qP\nk6M5IWw+zcv27/EiAdAPIzJPH7H/LIolil1kvvVo2UXnW5pl7Iw1O+UNUy1FEGovej8E8lxwYABY\nsc+7p63z4QjcaZA0fBTVwt6NQAyveRPUx8g8/7IhKpdxqEaWP5Fs1pajcYUazL4UIT/j5bFQRDHc\npA5qe459Z6qVi1LkfRFk1vNvFz5lrnIVEykNT0VxjMiHu2p8znjc34Xyu7vwJ2wvDcznkwm+gHuv\n3GLd9SwvIglHSj1FVVBVAmsVQYKQAVsHuk7gfEsia9LrylKVUFpN8hVP9IKoDbaQKFUgLRgEpalp\nU8cmLIm4rDONLffvvUKZDDE6ohdURfbOFnENaYJULXWpaB2kCEIYBFDpmqOyJsoKW4BUEh82LLtr\nSq+RmyVKGoIEKT0yges6WjwP07cJ5YQ/8W/8i/yN/+vv8uNf+jEWj6959903+Y/+vX8LHT3L60eU\nBoRWNEczhFDYokTZgg9/612++rWv89Nf/mn85pKYNH/5L/2P/KW/8OfpXc/1eoXxSz54+xuoomC1\nWvPqa59ltbzk6OgWq9UTbt95NWv8jSb0G96YnfDOxQPuHN+hW60oKZBKoJWgMImiKCmszokuytNM\nKqrJKdNYUK+PwTRMiprQbTBKMZlCpIBYsFpYOufp1j1z65nNSpyuOZm9RhvnePV9LsM1dTPHbwSz\naU0tLO89fpe4EdjjTGMVlSLJgKSg0jWCKSlpZGxQQiFjS0od6XeBBe4Lg/l+wPWigP4RoJ4iA7iI\nXXQ+2uAegPPOGXEH6oe+hQclIoTc8u5PQY7IAHXAfaex5LPcgvhNbny3P1Iru4XXjxJ8bF9DxFxV\nUrAD85SyDDNFlApP00VCZeEEcueaaAYzLa0IWhGNJASF9IoQYj4nqJ3aaFS0jDLSg89qD6xl2u2L\ncX6PYol7j1PiGdTLD9BeGphnwE3UjaaeaPAdi0VCqoQtE7boKUrBYu0otEWILtuq+gLnOoLznJzO\n0MYyaY6QSG7Nb3H2+AprQWuTZXNUGF0SUmDjPLYKmEIyPznlaD4lhYAXgVJn7w/nIiH2uLCmwOeM\n0eRJvUQKizWSwhhKXZBkTVk2RPGI3q1ZRUelC4qiZLPeYOsJMQgcPc45VqsrPjudcasueOut3+Bf\n+cqXMVLS+YRrr2k3nvXGsbh6TK8FalrRNBVOJGTnKc/POHtyiVI109mMqioJi5b//M/8WV6/c5Qz\nNSvNG3c/R3f5PmePHnC9XFE+eUxZViyXlzTTGSFGJmUFXY9cX9FMGpoWHnSPUFpyVNZ4taKZNJzI\n/IVTWqKJIFo8Fc1RCXqOvlzTB0MhGkKvESlSlIEQl/hNRMuICOB8wFiBVD13734WjST1FZPJbfp+\nRYgXVJOKkJaoQjKdSDZumSFBe6TsaCYKgUCpnhRbpJjnaEnl8n4IR2k/1QzQF2svAubPAvKb4L0P\n2M8C8WFuG9gNUeI+kO+SgvbpltHidqzfubfd0i4DkG+3exC8x++OYhCRdjSL2gPwmzTL4f4+Z/60\nam//39wKVQQ7IM++fLlO7wDiuSi6OPzbt4U4BgVMkngzeJYHPWx3XYb/n7o3ibUtS++8fqvb3Wlu\n99p40WWkM9OZttO98aCwXaY8KFMqBkggJAQCZgxgSNUMJggYMGXCBJUEooRUgFQITCFlUaaqMJCU\n0850pp2R0b8XL957tzndblbzMVj7NPe+F5mRkRUV5JKW1m7OOffce87972//v//3/ywxJFQ0xCDX\nZIkSFSqMbSyfA/PtHMF6C+jqALy3xzk4J4e/6acfnxuYaxXwKlKURdY6R42QS7z1WFSirUGbEm1A\nqUA9Kdgse4Yh0kVNPV0zPzW4qmKmG26fQkhQNwPabVBpjtMTUspWqU1T4/2G+Znw+ksPKMrcQXy9\nGWjqGZEOpXrazRJna4bQARGrhGgErS1lmVuuiRacdTRugjctaRVxqaT3EastSRdMjucYShwe9IZN\nv2GxeEYzm/PFsynKRox1fO1Xf5nv/skfkx69x8On73M6fx1tGqrC0K575lXPELJhmLOaX/u5n2G1\nWfPrv/R1/tdv/CHJL3jplV/EILz68n2a2YYHX3iDdb9GRLFeL3IysamwtmAYetTZKSn2+NWC+fyY\nW/1tdDVwFQeehAFpjpk3d5gdw5OrDxBxDKsFRRmy10c5ZT6bkui4hUOLo9u0DK1HiDgXkUFR2ILK\npXyHrJaIXpPMR8AD6nqKci1NU7FZ9VjXExC0F46nBXptgYGkCvRYdaqmnqEIpEEjKRBJBAkYVaPo\nifK5pYH24yehWW4C9486d40n57mIfMebHyQ793Mfgd/kzm8mQNMu+TnC7kix5CMj9UGmWbYg/iKa\nZU/6PD93L3htbH/eKFdUkpVTKo2MT5Ytap0Qc3jRGbeV2oH5djuhc2s4awk2EK0lurHTkLPZdmPr\nk3NDYy5bGehopfAcgN+cSkZO/ACwd8fHc+oQ1D/9+NzA3MeWrt+gVKRuIPQKiYaYAsYqtGGMgi1a\nRyQpikIzWJV5VmtI4nNvTHLXnzt3XmbanBL1O6AMWk+x+gRPy7prKVyibgyzVyxHsyYXobiCoW1p\nijnGaNrhKX26YggNy4XHlC1xMBhToY2gfHaw0BIpCo1zFVqd4MqCk9M7lCnhl2uaaUBE4eqKxeWC\n2K6ZHx9hixnPrp5RFxWl2jCZNLz9J/8X3eUz2m7BpFZcPP2Q+v5dnn64pplMURfP0MbSPzrH2Dv8\n63/9N/jv/ud/xOyrX+H2vWPOzu5itOaNcsLtr9zHv///Ero1x8enPL1a07U9s2OLtjVKG4w1mGKO\nPPsQm4Q7usHNzpDlE2IRGXTEzu5xNLuNsxXWljz+6E2MLlivLonREZOlDwNNXTO0G05PCi5jAWEJ\nacDoDq3WlFWinhri0qJMQhjw/inW1SR64vCMybSiKU7oWjB2IKUBVxQc1RV9bDGpoS6mlNUZEp+g\nyxavsme9LRUiCWsCJDd6wXzO458WZ/6jQP25uaVYbkTnL+LLJfPi15OfN2mWQ3ni9VQlMGLsTdpk\nr2YR1AvULC+QJe4oFrgWnY7h+HMM+hbfddzTSiiQcf9AfUNkx/Vv9egZzB0hWkywhBiJMaKDIxx4\n32z1/Fvpog4aiXpvp7CLzOWANpNMwWwB+kV82I5eGYH82rlPPz43MN+0V6z7BcoY6lqIg6O0EWdP\nKaoWbTvECEVlITUkQ+6HqB2usGgpKCxjm7msU6/KmombE03BcvgzmvIeUNENkc0wIGKo6khpGorp\nOX1vaDdPUaIodAVW5QIY7dB6dE0MQu97KlXibEFRKJwDTMIZmy0IVM/D4TssVle8ZO5igyf4SFEU\n9F1LiBqlHY8fP+P27cBUnbDpBq5WK67WG0K3gmToNj2XG4+Rlru37mDqCqUMw9AxbHourzbcvr2h\nqO/zr/3VX+LtD1Z8c7VArTrunk35vb/2m/DkHT5687ssVmuMq5jNHYX1XF2dM5lUaDNFG4ccnyAP\nn5FioLJC7Wrq5hirLpmqE1ZywdH0axhtmE3PuHr6GJynHWBolwxE7LBGYsSkK9pBYbWjKmas2g8y\nmFtN2VhOz0qiXBBCwvvIMjyjVZqqqrAO6qYimYizc1IUjDlj8E+oqmOkv8Ka/A9raCitpx08Rits\nNUFiQUwdKQSqckboPwmSfsbjk1QF/rhqlsP/9+cep3Z0y3NAvqVXDlQs++TmDzOnvZkAHYtz5Aao\nb0FZDmJ/tefMn3sl9Rxps+POD1Kd2xc/IGHGI7skqNrx5WzvGGT3NLZ3D7t5kCxNSuNDIESbK0Oj\ny43CQxpdLFO2SxgBe98uTpOSRkchbQFc2AtR9AjwW1oljmAdb3x4IlnNIuO++ikH877vSXQQE85a\nyspgqwKrC6oqURkwqccyJYhCbCDZJbYYiNHhtKFusn+ID2sG33J7/ktYSormFvHqPtY0aFXgk8K6\nSBharANtPKIHWv+Irl9j7Rxtb2NshS0idaNIukOLpRsUvhesFo7mJXoI1KWlD5cM4iBZlF2jlKYs\nStZ9S5EChIDvB4ahY7G6wBUlD+7dY7G4YrF4l/Vqw6uvv46xBc3JLfrNirj0GG04mp9QT+9CCgQx\nbK4uWS7OqadTuuUlpw/ucevLv4Tob/Ly/a9RzY64deeE1eW7/PEf/xGkSDOf0fuAcwY7NVwu1zRN\nAxI5ufcqlDVaFKXVDAmauuGOgpQSlxislPSx5XRym7Ka8stf/R2+9+Y/YGme0Q2P6JdfQB/NcKIJ\noUalDoeFdMRJY1lu3gTd4rRjMhNEjlmuB6IHLx6rNmht0EZDspRlQUgNMSm0iUzUyyjdYao7FC6S\nJDKENW2/JiahcBOIDqVKVFWyWXeI7rGm/ry+0vvxTyMy/2EJ0RdE5YeJz73e/IaqRa5TLbu2b/IC\nIFfbBOkNVbhS1zBnz2Vvteb7nwDcULAcAvmebtlGp3vIvhmLH+Bzls/sVpSg9MF7UdsLA2MEPz5+\n/BsqDQmFcXEs90+5yUdMYzQuN5KdmuT0aFC2fVzaPy4xXiQOgFwxAvgLrsKyvepu6ZWbV+iDIqTd\nb3XtKvWx4/Mr5x8/HasNaKFuDI4SZwyuLNASkFSjtBrNmRxWg7HgisS0tMxKl02g+mfU7oz18Jij\n+lVQgrMFvW8xZSCZFldkT+MkgURLYGA6O0NUj/dXOANJD8yaM5I8RlMyxCE3eFUepXSOtk1O0hWF\nJnJF3x9jU0vDlHl5hNUGmzzL5YeErmdxuWHwA1LDo3fezVRO13K12HB8esIXXn6Jo+MZH30UqOo5\nPZ7TO6/QHJ0R+wFbaKIMyOoSrR1adUycYfP+m5wdlfjOc37+Ft//4DsoSRTNhIuP3sE5TRSHa0qe\nfPQURDh//CGv/cxXUKen0C5JIoSuQ7ynLBxTDEd6Qh8UrQtcLt6jaWrO7Cn65B4P7v8CTxcDbXqP\nYXVOURaIKoipxq8vYKgQAWcq5sWrXHVvk8SjdcWkEUin9KHH+w2u0SQP0SaGocXoI0R1aDVh6AO3\nTqd0myH72ocNooRBhJQCEmuKagZGSEFhU4PSHcvVE+b1K5/XV3o/PgswvxmZvxDQeQ7Idw2YbyRA\no5hd/84bPYC4pmS5IU3cJ0APSvUPKBbFnmbZyRUPXm3rIHP9OXvN+g64dri1P3Pt5ynJCpbDn89+\nHzLIZ3uD7cz7gsaEiLcxa8bHaHzrUqlu8ONb6aKOZgf6e5pFduX+HFxg9jTL9sM5APKURorlANB3\noP7px+cG5qUrsczRJpJSIiqfKQyrUNqSKBAfiCKgHKbsQVcY3SOmxLlsYyt6hbBhiOB7TeUmmKAJ\nKTL0ayAR0kBZzFDk/pNBeqytKVTBydkdPnz65yS1ABqsqXCFIg4dgsEoS9IVWkW8jyircM6htB8b\nQ7fZkncI9DZ7coh42k1Pv16x2fQkbbBKszEWq0DXFlGGYnqLVMx5+/GKl1/+Emse8u1/9E+4fPwm\nj8qaV9/4eVxzRDU9pqhPqCvQTGhX50y+8LOcf/sdEpHYdjit+fDho1Gnf4wyBdK3OCbcOj1iCB6S\n0EznpH6DaZeQoK4rVkswEpg6R/AeUSUqKj6KHU/Pv8XZ5A5ON9TVhFN1m2ebNX7zlK6Y45Qn+EA0\nicXmPSb2Fko7RAJGJkRWWbU0neH7FZtuQKuGwtZolf3NV+tLtJ4icUJqNVoMw6bHOUtMhughhIGk\nI2I3aFXkxt7R4NOafmhR2qP1mmX/F5/XV3o/flIwfxFgvwjQn3vOdb58F4nLi5KfN/nybcH7QTn/\nqPw4BPQ81A5sD1UsN5Od1xKg1y4PslOyPA/E1+PPPc0yyhgPdOYqja+jxwvFrjI0ofTY0EPL2NFJ\ndmX+SSl0KDAx7qLyDNI8n+yMemx8rYkxoqLZWxMfRuZbyudFIH4zItfj/hbUr32In358ft4srqRQ\nBdr2+LBhSBFVDmiT6ydD9CgKjLaI2DEDXOCcQscplSswLnNPulD4PhHCgsXqEcadkFLPkHpCv8Zo\nqHSDLY4Qa2i7Fi0FqIC2imbqWPslBUJM2fdDFwbpDcFYtFJoLYhEwhDQCKlKxCioGGn7lj72RO1o\niiOivqCezEgSmOiSJIoQB7Rr2IQBY2p+8Wtf5eXXvsD0aMbrX76FaNgEw+z0A77197/Db/zCXX7w\np/8nt196lQ7N3Qc/Q1kLx0cNrjCUR3POvvx1Hn33W8xOjulWS+aTKUkUSmu8UtTzUzrfU5YTSGvK\n2lKVNco1yNPHRN/SdS2TyhFCxClNL4FjNM4es5aey/YD3vngz2iKuwQ/EKPG0dCtWkzRM6gVGsdq\ntSamZ1z2l8zNFGdGU6cI2lpMUdDMKwIKUqKuarxPII6YhL5rUUqI3uOouLxYMz+Z4+OSpDxKBcIg\nmLLGlBZFj7EzBr1BENAeV2mE7vP6Su/HTwLmP5Qb54VAv/NkucGXX1O1yPNJ0MNE5/W2yzchOMM1\n1wB9HKNO/Cao73Tm115lLAK8AeLXIvLxNWFPr+R1C+j7aH8L6rtCJdl7tmhJ+2SsTrk9n+R9UQod\nUm7LFwRl8xSndsnONHYZynLFmIF/fM5O6bJNgL4oEr9+u7QH8i2YX4vODz/gTz8+NzC32oCyKKVw\nNqFDT0wD4iKFPUIGj1IQhoBIjzWKwlQELXjtAJujMacw1uB7AXqCXND1NheQCFntYhp0VZCSYE2F\nIjB0A6qKWFUynx1xefEBidOcrTYGZ7PncwhC6QxK9wzDkuB7alXiVCIJWBIxJgJr1v4xE6UpK4c2\nlmp2wumdOXU95+T0Dl2Ao7Pb1LOGymrK2YSnlys+ePQ2tqx47Ytf4Vd/U/P+997i6eUF92/P+Qd/\n9Ba/+pu/RTWb8PLrd1leLZjcfQPKBxQPFPV7H/L++3+BVQLNnGG5gBQ4ufUK2jlq45AUMaaiKBSh\ntth2QRwGNus11mjYdnvSilpbQuxZDlcUOuKs4qL9LhfLJ6ihwZYaTYPElu7qEdoZJLmce8BQlBA5\np7AlroTBa8KgiGZAqZ5JY8cuRvl+NomiKCxdv8bogRBn9FEoK0vbPwHlcEWFUpHWf4QWC6MVsg9r\nYvREGVA4jLEY7T6vr/R+fFY0y8cB/Q471I3ofEutHHizcKgv3ypa9s0mwjVw19mVEL2nbm4oWp6j\nWEjXaJadZJE9+/4C4mZPj3ysPHEPkFv+3WyBXEbrgJQwsgXzuDtvJO4aaJtrYJ52bfmIsjPQkrE9\n3BbM0wjou+fEMcF5SHPrg7f63JVW9iX8cgDgOuYkKT/lYF64zMspawhRmNVT+rjBOUNdTXJ7tPSM\nFB0pJAxTrOkpnEMlhTEOLUJMAadKjIkMkhOOQ7HMbctSRKJmiJ5NWmHtBOUCxiTaNtCGc06LOxxP\nv0q/+T7rtqWwNWa8XXPW0OuItRZlE0MvrLuOqAoqAU0FpiepHpGOoNfYWc1ZdZ9Zc8rb3/sWHz16\nyPHdgWV7RdsGhre+A8rxi1//OpPiZY6P79I8mHFy9x5Pz5/x4I0v8mu/9Vv84A//J64uPL//L/8b\n9HLFF7/0GvVrr6Eu1gR3TNHcRTbPiPMZm3aBv/yQqj6hns8ZVKJbPKPvO5rZDGsMzjjKqia2kSQd\nCkNdTVkvnqBTACX4GFGiMEmYSuS202yweN2zDhfoEFG6RNsJffcUHRaj6ZkhDAMi2T5BlSuiWVNW\nE9reovxYTScdqBrrEhI8fuiwZYGWgihXBG+wdgIJ/CYwmIKi0hRFSZKBafMyUR4ShewAqc4IMVfw\nGrONpKrP8mtbkf1IS6AA/gfgbz73qE8L5j8qGn8hgPMcblxXsxzEyzcjc3meZkm79QY5Mtre5nEz\nSccOyA95ckFdP84hX77tXnQgSdyxNzcvF3LgICDXLgq730BlQDeynWlnspVdE9NuW5QCdyPRuTXb\nGqmVXPmpiTEQo8XEdKB2kd0FICc9uQ7icrCdthH59gIwgrqOB9F5PIjOP/34JGD+cX0PT4H/FngN\neBv4V4DL8Tl/E/i385+Jfw/4g5sv6lxJHzzOOLS22OSJaAo7QytHWRhit0KrQFFMKYs5Rd1hTEun\nc1Pg4AM+OgozwRnNoJcEP3C5OAfASyAGwfsAVrBlZBgS2iiyXaumMCdIamnKMxbL97ObWnQksx67\n3iRQgaKoWK83hDAgEUJIWF0wyCV1XXL/7Kuo4S7DAp6tH8OQcLO7HJ3Bm3/xFxir+Plf/ufxQeMl\ncnLnJb731lvU1TmFayjfe5d7dx/Qr1d85Vf+Od753reRKLhZyd3jW6QIP/iztzFFyZ0H91HKYGyF\n6T3LTU8xOWXZrlhddKgIm7KiKBwn5R0Krbm6esYbX/s10kmNXgwEG+jXLcMwUJsSozuSQFmWRCO0\nfUCLUMsJnVwiskAoiNGCKvPfNC3xfUfwiqQsTmkCwq2mwhYD2m44MXdZPqtQytHLmhCXROmJoWDT\nC40RXKGxriL0uaGFdQUJYRgCtihJ0QKJGC8ICaxJ9EMgmQUiU+riBBHofI+kz7RoqCObbG3I/zt/\nCPylcd2PTyNN/GGA/SLwvnY+K1ieU7PcmHvufJ/8TNd8TG7KEs31+Fkd0CzbXN+NCPvwEnATzA/V\nLDtVizqM95+PyrdUy3bkitPR71HlSHvripjBOhxsjx2Lts2kx/OiVO7SNFIrO2vboEluDArGMv4Q\nIyYGdLToEHeR+Y5m2YH5+N6v0SojeG9X8wJ6ZcfVbNdPPz4JmH9c38N/a1z/M+A/AP7GOL8G/Kvj\n+gD4e8CXb75TZ2swGm08TjVIinSxxyiHcyUxeVRISNBYXVO4GmOE2dQjsgHRrDdCiHYsoTcY3bIe\negZZ41NH4UoQzRA0Xd9RBoMrsrVu73Pv0fPLh9w6fQ0RYVaeEKPHh4RoD5K7q2iVsDTZlc0MRLWh\nVAUhrrl79jM4l7i4esrMNTTzY1TrGHTP6b0HvPfmX/CzP/91Pnr8jG/8wd/ht3//X+Tpkyu+8fe/\nwe/+7u9iVIEfhKou+cH330Sh2VytOH31K0gY2ETPqW64WC6whWUYYHW1pJhcoErL7S/+LF+Wge7i\nKSEGku959vgh6+UVejaj2yww0xn3X34FTMCtOoZnT+n7BW3fMTm+T4gt2hsc2bvd+ZapsazFMNEV\n56FE/JJNu+RocoSKmok7ow8DUVpSjPTBE7DQRapqQj2tcVaB10yaCX1fgCzo+iusy4mpECI+KGrA\nWQtRqIoGHR2DCEqVLBZPSWoY/e83+BApyimOkhRbtDY4WxMGhbGR5D9zb5ZtK6MCMMD5c4/4JP+T\nP64vyw87fxD8cZM3P4jId9tbWeLHJT4/Ljo/JEUOEPY5ID/wZnleV/68znwbnT8flTMeUePP4Tqd\nw9gjVEaSSLZdiuJue398f0y02vda3VErjDpylWmVYHNPgpj16CbGHJ2nA6plx5lv36js3vEezMcP\nJ26pFjlwTDwE9H82YP6ivocPgL8O/PZ4/L8it8T4G8C/BPw35IvA28D3gd8A/vHhixpbEEIAEbTR\nOGfRmw1+CFAlRAKSCiI92lhc4VC6QBeOYpboVwMxKfpNxVBAWeqxl2hJaTUyZC6VpOn7SAiRhCVI\npB3A9wqrK1arBUX5HpaK0s0ZhsC661G2x7iBhCLGguAznVCXU46OT/HyjPnkZZ5cvM1xXTOZ3CVF\nTxtXFO4Ot6Zn9F3LK1/4Ih+8812S7/nN3/49/u9/+H9Qz+/zl//KX2W92YBsiF5xcRnwg0cCpBh4\n83vf56VXX+HRh09o24qmiFhXcOf+EZPZnBQ7bF+gZ3Puv/5VPio+4Ol73yMMA6e3T7lzSW9sAAAg\nAElEQVRMEesMF0/PMSjuvfwaKEv0G4rbp/SP1vj1gs2wpCon2HJGtDVEj06J0IVcjq8ctRzxpL3i\navmQxj0AF5kU97C6Jsb3cfaC5foChaMqT1leJkrXcOuOzxa+usRRMy1ew/cQ5FG20q1qrHMovf0e\nRERtcEWNUVOUFDBErhYfUDYt2kaa6iQrjlyFUGfVjBEkJUwUkvF8xkMD3wS+CPwXZG/z6+Mn5cxf\nxIl/kmj9ZiTOXmN+COTXqkB3FMxNuuVjQPzGeJ773tMsHEbgL5zpGqA/D+Q3NNY7PfrhHcC2GXSe\nbuxMugXu3f7Btii1S3ambYPrbTRux5L+YLHRYmLYA3lI2QL3pprl2vve0ik35jYqv2aBe9P29p8t\nZ/46+76Hd4HH4/HH4z7kBreHwP0+GfyvjZB6SAoPVEXWkzfVXS43j3DdeZYdpkDwiV632K6gmRQo\nCkqXPRWsswwpuykWfgkktG1IUaiLCYPa5FsqPILk8v8g+CGwWnrqMmLVjOXiipPJMc45hk4zdJFU\n9ExsNtcafM6IK+WYVI7Sddw9/gW+//Z3kFgRi55hWFAVR6A2LNYfcTSd8vSDj2jsjLM7r9N3a95/\n6885Ob7NX/oX/hrf+e6fMp+eYQvNyfExMSY+fPSIzXrD5nKBc44PH37A/OSrxKCRWlPNbqFNxdNn\nH3GijqmKgNbZt3x6coRWX+Lqne/Rd0uO5scsl5dMK8XR6SmumCCFkCjprq7YrJZIGHLT55QgDWjr\niBJQzlCEkokkUkyUHqJXDEPBanXF0fwMVxakXqFCiVZgC0UIiRCXhNWE2aRicSX4YU1qNzRuTmlK\nanuLjVwhCE1TESJoE4hR0CaR0hXazpnOjyh0w8VFJLZrxA9oHUmpQ9sTjM3+98MQCNKz3vS063Ns\n+ZmngRLwS8AR8L8Av0MOZHbjPzzoP/Q7X4bf+crHvMoYEfKCKYf/5zeP3ZghQowQQ17TOGVsUJwi\npCA5+tw1LeaFDaNzV7Xr9MeOFrkWFV/3Lt8RMyqNMXlCjTTLdarlOpBfA3u5eWz/nG0fUbcFZrkO\n0NdAW64fOwR4hx8pJzO+q/HCpQxRG5LOazCWaMbVWoK1WGexKYO8TRYrdswn3ATmvApxr2BJCVIO\nOnKULruKVNE7zur5If8PmQz50ePH+eZPye2y/n3gZvvxFxNe189fG3/wd76JcxYfer729Vf5yi+8\nwdGkoh2u6MMlSs0Z/EDvO7S6wCRB2ymlSVm1YCV3vSlsplD8Aq0NlbMMlMTUo5XFy/a226OoMVrR\nxh4/DGgqhjpiPWz8Oc6VuNIwa2acd4ZERNtEDJF+6AlRKOtj5rNbXK3+DKePaMOadevpGTDxhLq4\nxYMHb/DRux9wfHSGXyeUVYjumJ894PZLr/GNb/w9Xn79VZIMPHu6pO9a2rbjw4ePefXl16mMxZmS\nhOKdt97m9u0TvJwituP0tqaoTFaEqIohJhbLC4bNFReX59z58q/y6Hv/mMunb1MUDkxDUU0ozl4i\nXXyA7jfQelaLS8QPmDKrPxIaoxRGG1h7XIzEbkk0isFvMKlHiWa52uSLWjxBUdG2Gm8V1gq5K66l\nLCZYdQbe4/vHDP0TDHNINVppJAlWG4q6pOsDSMLoipSWxDQQpcM5oVCOogQzJPoI0vWI9tRSEdOc\nEFuGruS9P3/Km3/6lOCHgyTdZz6ugL8L/Bo3wfz3bzzyBRy67BJvL1jDwflwsB9efD54CAaCE4IX\ngk1Ek2eyCTEjX2siSiu00WitMGOjZ6sY+13mdcuNi9qbVSWlsdttNEkFzGgnm1Qu209KE4mYnQrm\nRgJU0vX9F62yLy46XLf89x6UD2gVOQTxjzkm++cSFWHwRG8J3o7OiZaQLFHGyTAC/EDSJjfCdiY3\nwE57+2CFIDohI3WyW8dIW2S/ioyAbnJEL5Gxx6jOf/8XfXXVrwO/vt9P/+XHfiE/KZhv+x7+LfZ9\nDx8D98gUzH3go/H4B+Sk6Xa8PB67Nn7lL9+idCVrv+R4IjhToa3BMaMbnqDUavQTTgRZM4jB9gZX\napRJQEAbTV1p+ihEiRRGSCoQgs98vNYQLAqNpJ7k8xVRAdYWpJCrxJaLFSF2OEqm5W16pynjDIkX\n2CKiUvbyDjHSDkueLs6xxoIOKJ1Y9lc0nNK6p9yZvMTTx+9TVVMW51cocegoHB3fIgZhvVrzc1//\nOZ48fgqFomka2naDMZa7d04oS5hNTphPJsQYCbePeefddzi6excBNpsNR/M5zlpC9GjnKJzj8dOH\nqM2a7/+Ttzk6m3Pvy19luHjGK69/mdMHXyL1K8L0Lu35t1meP8MoQxybaIcQsGUaaSpNPW2wXnO3\nNLBZc+IMV8UJa2fwCdpuQQg9kizrzYJUXVHWCqUTKXaUVaIPa8pkEARlO9btQ6w9ow1LlFYkiYhO\nFEWDNhakx9ieGDTaKBbLjziZl4jpUQboDes2YumAt6iqiqQKZLjHq186494bhrbdEH3NP/y7Dz/h\n1/rHHrfIcewlUAO/B/xHzz3qkyRAdzwtO1Deze25GyAuNx83ngsGYshAHo0QbF6TEZLJACMHnW6U\njqisSMVsmxVrcvm7UQcOgwf2sSqM+5qkMnDbcU3KjD0/DYZIVGaXAD2UKj7n3Sg3yZybxw45+JQB\n+RCcJWBGYDeSgdvcOH/z8VYCkhRuCIQh4ELIjVuiJSab7YF3RVNjtG40yejsd57UCOb574KSEcAF\nUSm7OJLIXXK3IJ4jckkyzhysK6PGlne5ybSonywQ+SRgrnhx38P/Efg3gf90XP/7g+P/NfCfk+mV\nLwF/dPNFjVYIPbNmwrodeVJdjKqRRDB99tHGEL1mHVdgLLavsAR879HGUFZC3waGGCnRxBRypD5R\nGG0p9RRTQKOnJFpIfZbS+YTTJUZNKBvHprviXF8wmRxhKw9dJAaLKSqM03RdT+gts+IYwgfEBNYU\nWWKkBGMKogycX55zx73M5uoZt45eyc+ThMJwfDLh4cMPee+dh8znM9abK0IQjuZHbDYbGqsZ/Jo7\np3OMUcxdg3KGsnyNnpb57D7TyQQRwQ8DWEtdFwzDhvsPfobV47eY1rkoqiwnPPjV36KpStTZCSpa\nUtrQnDzg/L0foFJHVUyBUXqpFW2bbQ9SEhBLGAIzLGfO8L6aYIsNCmG9XKDrM9p2jSssgxaUStnn\nRXcEuaQ0Bd47hmFA6wKllmy8p48bFEOuMUCjZIZJEdFZ6WL1hM064NTAk/MfoG0gSZerbINC95rO\nBCQJ2ilUELq4QZQnpHVW2Xx24z45P7Tty/63gP/tuUd9QjDnBQCebgB2Cs8DeArXgT16IRqIFpLJ\nQB61EHUimUTSCTERtBrtMTJ+iMpReU4sgkbAjKCiRzDXB6Cu85p0jryTjtgR6I3WRBXRegR0DKi9\nN8uOhZd4/djh/g7IR9b+ANiNxGsAbsf9w22bbuxLuPa87b5ERfSB6HN0vlWuxGhGoB67EmlN1BnI\nk81VoDIqgrYNpJVOJC2IltGOdw/lIkIaAyaRbM4lY0SejEIMYDQSJUfoL9TYf/LxScB82/fwW+Se\nh5Clh/8J8LeBf4e9NBEy6P/tcQ3Av8sLaBZlNNZAigrFdLzS5yKiEHx2IxSTI9BB0/YDuAVqnWhS\nT/BDjrxtSZKe2EWScSQJxJiVKrU5padAJOAKITHQJ4uNFcZYrJpSmJrCOhQ13SawXFwxOSqpXcFm\niMQAxhpEOm6dvEa/uWA2uYXnHKU6rBaUq0nJc/fO12jPL+lTzaR6icXqCqKmnExwRUXfD9y//xKb\nzZoQAilFlFL0QzsqZwaQQEwDVkeOTo/p+57XvvAybjpjMpkxmUyZzo9yJay2qLDm9htf4eIH32H+\nyuusnzxkfvs1qukElSzVnTeQJERZoxZLbDHn6OQeV8/ew4cue7pbR1GUmKpBa82qXSMqYZxDotCG\ngRgNs8kduuRZXD2m7yeZftE9SAZzEIyBJEvWLWyWClf1WLXElif0V0u6VigbjTaBENcU+ojoI0OI\niB2IcYIES+87rNMUlaB0iQ8erWuM7lHREEQYVp5pHfAExGyI4ZOg6E80/gT4lR/5qE/wNiRen+kA\nqNMNIP/YcxGSh6RHELfbaDxH5DmiHG/9jdol3XJUft1VUOkRzO0WtHUGbZ33M4AHkta7nptGRopF\nm6yB0VkOGFUGdEHlgp1trCuHQH1Qbyr7lOtWRrhVxOyObaWGEjBpC9ARk8JOcmhTHJupxxHY4+7Y\n4WMlKYL3OG+yr3kYm1Qkf810LClN1Cb7pCeVAX1L/IyNpJVO2bRSMcbijAC+zX3KSJnnpKmknL/A\n5LuhpMdG9kp+4o7MnwTMf1jfw7/yMcf/43F+7IhRIboce+kpDA6R3PBBks5FU6oceThwtmEYNmzw\nqKiAAqwBq0kS6dqB0lpC3ICR0QKgpK5m4NcYFxFdIDEwJItzGoLCUnM6P2GxukTCiqH3TIJl2tQs\n2pbkZxgTaJoZz84fclzdprAlKQ0UNqGwOPUqVTHn2YfvUjhHS8vpdML64gOCKlHe0fWBtltQlhVF\nkRtEpATGaPq+p6knENf0nSeEAa0si+UVWsOd+cuc3LmPdo6qKinrCYmA1gpdnOC7NUf3XiMNPdZN\nKJWimJ2hUyJeXuDu3kMNA2Yz8NZ3/3cmVYkPIdsUuFzQE0LClJa279HakGLEKoOyGmscp+WEoFZE\nFE0z5fLynChZg0upiLYgmQFFjR8gDktC8MxEM5kpMEtiTAxeYXwFaLReovUSCZGh6wk24fQCGRSS\nCqybEOJATAZUrkdQFiRGkiSulp6Ylhgb0CoSvEXC51YHtx8/Js2SDubN/S2AX9u+CfBm3Pd7MBcj\nY0SuRppFZSBX+fZeq7gDcq1kdNEVCCp37RkBXEYwj0ZjdY7Gk475QiF5ewd/yux031FlX/md2FGy\nE4zdgfeNYh4Oj73gnERMOgDttD+2BW6TDsD94DE2hYPHByTpkV4ZaZYYSNFnH6CdAdmeZhE9RuaY\nkWbK3YzEKNDbvx0j0N8Qsmyl5VGhxs8Umz//ZFROOGtQWvOTpns+v05DOtG1+RY/SaJdrUEbjHE4\ne0Tv12iTMNpRqBLjLMpY+hAZNhVKR8xEZV45wmLd0kwrrIaUOvquRtUOnRzdEHGpQxcDyiSSBJwr\nAU1KBZaawvRYE9i0ayaD4ExJXRZoOUbkEmMGjCNHqxicPkaLpymPAMOt49dZCNSDwfV3+Ojd99Gi\nqCcl08mEzkeOju+TkpBSJIRAWdbEGHBOY4yhcBP6fsNytaAuK4qywlqFLgxKm1z9qjQigi1cvuVN\nDuMC4kpMXGFu3SKuzvG+o2jOMEe3iI/fJJkCv3zEvXsv8fjdH1AWuTBHK41SCu97tM53e8ZYjFI4\nq3FmYPBLniRBUsTHNfPpHfxmwcMnjzBugwk2GxvZhFaOoTMkCSgsm81AWVXABmuPaNcBJTXVRFEU\niba7xChHUoFST0jS5rsTX2GbAmscXegpXM9mUEgsUAgpRvwQaU1HVW/QydGvzYHW93McPw6Y36BP\ntjP+kP1r5/yYtDTjHNUReY7dd1TcmUHtKBa2Pic7dhdIKJujRTE5OSo6g3tKI8CbTD0Y0RhzAL4q\ng6dWESM5Mgd1rRRpV8zDCLhso+1R6X5Y/DPqwndAfgjQHzNtiuib25J9VbZRuk4jzRItKWWufOuK\nGJMZqZR91WvSI3BvtTUq0yskEAvKJKLKnZgSEMfirLxCTIqUFDFuAV1BUDkyNwo1AvroIvYTfe0+\nNzA/qe/QW0FFzzIMPL14mMu2UwCpKVwkJg8uUusCEZcbJ6cNEl0GgCQo7TDaoJVFa50bEHeKoU30\nved4UnI0P6Ft11jjiNIRw4ASSz90xBi5ulpQTWpgCUlzuVhx5/Q1jmZC6guSNWA8TV2BCsRY4Iop\nxERpTmmKKesnz5i4E4YQuHN2zDqs8Sk7LS4ur7B1RdclJpMp6/WKlFIuanIFSXIiVrmaSV1igLKp\nESXU8zN8gk3XobSCYUAphdIua99Ng1WSPSeqmuDPwU3oPnwHfb/GDgvS2RfQ734TPzvl6tt/iDaK\noR+wVYmPQuMcxihiynI/bR1K54TNMAQqVzIvSoarK5RRODvheGK5uFpyvlpTRsHaAh8HlBqwzmCd\nJQZBc0LsDTF05I5Amk3rEZ0dKbWOKFNhlaEuJ6w3nhA3VHVJ00xQSpBYEMIGUHStUGhLgnx7bxLa\nJMQ7JFZI/P9B27hPSLNsE5vpxowHgB1fdGy77fOKEcRLXnVeMWlMrqUc8ekx2abU3v97LBwFwYyg\nLjbTCWI0yRiSCXsFh4lECViTlV6RAyDfTsnHNdvIfF+hmcF5y2nvQd2yTVzGPc+9A/wbkfYWpGO6\nfuzGvomjV8u4vXteSkhS2dMnGVzypGQyxbJTqYyc+ZZqQu0j8vHilpOYCmWEqBQRRRSNGYFcJ0VM\nGhVVppKtItrcNzRz5Wq861GjouWnGMwdNWU5YbF+TGEt51ePcbVi6s6o3THazCjqyLJ7n6oeCK1D\nlMUaIUkEPUdJQI+ewPWkRiWfO9fUBe1K6HtPrANVowi+QNGRUiSmrGuWELi4fMakfJX1pqUqCnRZ\ns95YjKm4NTvl6UePEAxKQ1lZLA4Z81/OzrF2TlEdsV5fMPQDLkFaKZIkoheUgRADsRuo6gqlFMMI\nyHkoJk0NKlEWNdYayspSlg2z2ZzJ/Ih+iLi+Y9LUiAht15IAFT3JKCDiVI4C+mcbVCNMmgY7dEQD\navEu3aaFOLBKwr1bcz581KJjxDhN8D1JCqq6xDlL6QrabkOIiaIoeba+4qpbYYGrrsUZA0Q02frA\noBm6kqhz0toVEZUSjTsl9I7LzjObliQJFMYy+IgfFK0WyiJSV4qoPSkFvG8JKWCrKU3d0LV9vmgr\nxaSsOb/q8KJQWnC2oigGrJrRTF/NjaOHz7xo6EePTxKZHypU4vWoOx6AdwwQ/Q/Z9+zSscqM+mWT\ncrJTpREk2K0KYPQQ33YJyrq4XKmoHEjQiM1RuFiVgd3mqDUmnXsCyJb9HkF9C+Z6n8jc68O3ssKb\nVZp+X5n5QhVKPudGgN+Csd5VY15ftwCu4xbQR3OtGMdj+ZwkTZRAEr+XGY4+73LgXbOLzMdbGtFq\n51CZN0DFRFS5e1MUTRCFTno/oyZEBdsG0FHnvERQKENOSGuN2t4W78tJx/VG4dQPGZ8bmIcUqW1B\nWc3xq3Mm9TEX6/eYHdcUdk5MEU3AaAX6Kab0bNaOmDza5i9R7xOT0jCtJgzDhlJblOTnKAVtf0WS\no5xYdRqtNFZmlEXEiMUERfIbnB0QFVC2wJk5lTuhcDOaokH09+n9JUWxpqmPiV2Oovu+pbQzrJ0z\ntB06DSgfSUNBUBuMNsSwYRg8rp6gUmK5XFLXuROO1tkf2RhD23bMZlPKssIYQwyRVGoWqzUJRVmU\nOOu4urqiLAsYNK4oCT4RVucE4/AKhr4DowhPn2Jv3Uf7Ner0FnLhKZsJ5997k1u37/LsySPKskQA\nqzVIQity+bJ2ePFUZYUuHMurBSKKk6Mz3nz4Pn3bo3hG2y7RZcfpyfYrZFG6AX2JEo1TE5KvCH2J\n9wNR1hRFpmOKwmKtoSpA8KzaD7DWsV4KXd9RFYqqMqD6fCEMA9pZnDEUG89mIzhTIETCMDCb3EKp\ngqPpMX1/swTicxifgmY55MgPAT3c3A7Pb6PJvt1mu8ooQcwJOqUyaOyTnWMBkDJj9aUeC4I0RJUl\neFETRwVHsoYogZg0dgTx5yLyFEc72r1LehbHbOmUgyrNHWD7nV58rwf3B+cP9OQp7ErpdwA97t/0\nJTdpdDhM189tt1PSOHKy0+HzRUm264FYctTUi9lX1ArsaxlEQRTCDsgNOmlCzKuKetSN6iwFsQox\nghjQJtM1aQvkW/L8JxifY0PnRKGFEARrGpwr8OEWQwj4sKSpjgg+IjERdI+za4wrkeQItAzeoZUj\nRENZZE151yWSeHofEQNRWtpwSaOnKJ2o6xlqKDGmoF0HNhIpcchwCWWNHyz3H7xCU9Ws2iuMarDl\njHb1BFeAKgZINcvVBqMC1Atcv6DQFl2AH3qaVIAIxhoIPaEHMQW1LRhS4uLiAoDJZA/qq9Wasmww\nxmCdI3jQKLwPtG1HXdWsViuIGXQlwUJfUZc1m8tzju++QuzXOCMs28AUz+L8IWeFw617fNcyCBxN\nS7rBU9XF2FDDo3T+ChhrsMailWCdBa1wYqiaGjusWX/4CCWJoe358PIH2UuFDabwGDOn0AWD9CSp\n8kU1ThhaQ9d1xOhpQ2I2N5TO4oqsoLEmEmKi9+dsBkW7sjmirxpEbViunrBeObre0xwVJB2YTTRK\nWYaNRStNig5NTVUcEVAEfjoi88MioXQQnR/SKuEAxEN4wbbP20pncFA+e95ovff93urJ0eyaN2Rb\n2m2VpUaPYK5Fg1OZNx4bMqRkSCmMkec+Eo/qOphvrWZzAjTt/cQP+HIrcQTqDOJuWwC0BXC223nf\nyb5qM9Mke28Uk/bgrQ9av+2PxRccy6uMFySHz0lO/K74aVevOm6nrSzzkDPfgroCYsKIyUAeDSGZ\n0VnRoKLJhl5RwCuwabzTEbQZE6BmTGJo89ML5u3wEXV1hNEO5XJ4cjY543LzmN73DH6DLRNVPSXF\nnqg8ZVmixDMMATEX6HSK1Zl60Bj6rgdlGPrcRKFqhN5v0GLGK6FQVQ02aoibrKQJiqQ6sjArMWlq\nJtWMkAKIUMkt5qXH92+xThsMDcvlgLYdqpzgLz9kVk2pyorSRGbTO0gscmSihWkzIdlMRSilcuQd\nI9YWDMOAc5b5fEZZOqoq0zBlURBjpC4rjo6P0dpQuCJHWgJKKQrjUNEznx9nZ7fUcvX0CcaVbNo1\ny6dvo2+/zHF3gbn/Ck56LoPQLxdYZ1BKMCYnDEMUtPcgBqU0Nil0M0EJOO+plUFbSx+EXhlK3bDp\nLmgKwVhHXVqsrghDh1INwXtsKNEpN4vwoUNrg4RE1ZQEnSgLDQmULbi8yJ+BH6YUpQYVCH1LOyxY\nLrMe/eoqcXJSEfSa2VSz6AuSRKycoMSgJeSqPf3TAea7QqH44uTndvoDEPcjreJHII8BvN8WEcpz\nU/4/6t7lV7Y0PfP6fbd1jdu+nEuezMqsqqyyG7uNobtFSy2LNkKipRYtZiAmDPgDQEIgaCSEGDBh\nwpAxTBATpB4waLCQLVqy225s2qa66KpyZVZmnjzXvXdc1+27MfjWioh9MrMq7XI5O5f0aa1YEXHO\n2XFiP+tZz/u8zyumRrCT2sIRxBOgK05j3mRIUkDwkmBGSSXIU1RudMkFLlSSFkTSlrVMcdN+LDhO\nAswks0xJ6andfgTyI2hbsun4jfPnzyeZZZwu9CaIj5nk54+n8W6f97oYxNFHHkZtfOpyTd76SSdP\nK8lUI3hPpvzpwhhiKvhGhQwaGRTSJ1IiQhwHQ2ui8Wl6kY4EnbBbKnGSWYSE0QH0592+MjDf9y+p\nu8cYfcXd7hllXpHJnELl7NsDnW2Z6RIlCmK4QMQBZSwmCrKgCRFMEGihGIJCUOClpesCzg34IUMW\nJcFJeteASVfhwii0uCCqnmgalNTsekeue8oqZ9/ekZs5Shvu1k/RsUCHjK2bE7wn+AP71lIUgcGv\nyRH0nWGeF8yKArcdUGGgH/pRAlPkmcYLyXK+IBKSxBEDWWawdqAoSpbLJcNgKYqcLMs57PcURUFd\n10ghyYwm14Y8zyiKMkkkBDo3oKUiOEewHftnP+TBW99hdfEEuZzj9mviJz+kHzxaOmYPr7h5+Rpj\ndHI5QOoCtQMmL5GmSuxsGOi7Hm8982rGrz76Lr0zbPbf55XbUGaXaNGSSUcMnhCG5GTxW4xwWH8g\nBIP3KRdnGrTbDS1Sa0QmMKaisxkxFrhBIDDMSkVmwHnPbn9LP1QIkZPrGcJfUBclbXdDPRNsNj2C\nOX4Q9GJLEH0a9PtVb39Gn/m94qf/rJRizxi5PWPk0/kE3gIlQcmIGoE80fKQQFwCRISQSDEBeUCS\ninYKmZh5SLklCcRHm150I79OAH7Sx8d15hyRIzuX+ATmU7Fz3J+Y+AnEsziMYG7JwriPFhOH42t0\nSHUY6eNxPw2XSBODAtLF42PpwheeC0FipB199Pbopw9SJekjJnfK5CU/X4zNQnHKVgkRGTUy6JGR\n6zEnfcy9MRCtSNHaOnn/pToR8aSXC44nf47tKwPzvpe0eUeeebK84DDc4WNBns8phx58JPMaEQ1a\nCaz1WNcitWVWX9C2knZoMW6FUhlalewPB7Ry2D7iBknoDVV5ya57TQwDfejJVYWLgKjw3AKRKHKy\nGHB2z6fPPqDMHtC5Ha/ufkRVXjGEbRolJypCTIA7XygIHVoFSiHJhGZwAxpFbhTO51ilcWLABU9W\nFkQfqOYVznkgUpYl+73n8vKK2WzGfn9gNptRljUxBsq8oKoq8izncNhRVwVZloYmSyXxQ4vAMvQe\nt36JjAPGSD69ecU7D+e0rz9G9AP16gJiR1le0W4/4vFbb7Pd3KAzTTv0aAG5LpAChn4HMcdkOXlV\nEVxGs3tN4zpKCY9WlxyGAbSgymcoHbndvSBTA1FlxKixoU1SWGuRWhJkQOmBpvcEmZiL1AO6VGgp\nWObvcNPcUBioTEmRB5qmx3nJ4D1S9VzXb9G3lrm+xNnnKA1ZoXFWcNhbAo4oPeJfBGui+3KviWfs\n/Nxy+HkSixtZuHWn5eyJmWsRCaPnOYqAStOMQQgkpKaUNIkZgRgn9Eh0DEcwVzF5oX0YmXlUx8Ke\nF5P67Y6M3EuPlw6v1OjjDkcgP3V5TjLLKcXQYMc1kGHJpn2YgD2BeHa2V8EnVh5GFu7jaVCEG4Ha\nnh27zzm26TjEyakzdnce7YenIRzH/HY5dmrKyYWSNG+hxtpEDMho0r/PJzYuXH6BKxgAACAASURB\nVDwNvbCCaGRi5lYSdExLicTMJ5nl68zMZXbgMLymdAuUMvShRQSPtxkiFlSZhtDj9h5RSAgGGwXS\nQzU3GLlECUnbvWI1X1GVNbtdh7O3eK+QCJS4IMacIr/gbv8xOsBW3JHrjPbgGYaBLEu+6hgNMkqM\nBGsbrGvZtB+y6T9ASEFdPMLIVETN8gyjNVpHRN+jpSULRRqGESzdcEAbw2J5QWsjURXkuaYfHENv\nubq+Yrfb0vc9lxdX43DqjKqC+XxBluVstxvmizlZlrFYLKjqEhUidhzMLL2gzDLwA83+wP7VR7im\nZbZYUNaaw/7A0O9Rpma72zFfzNndvEQIQ9M2FGVNxOMHO0otA3KA+ewKmWu0KdJts4BFXiGM4cO7\n56wPW7qwp9JLVst3yLXh0YN/mRfPXrLpn1Llj9k0HVIoHILQC/oe8mvw0dP0AqVbtFOEQyDLluBW\nyDCA7Ilj1MLQRQQGIQJKKZp2x6p8iO0GFNcgbskyRdtD04APHXVtUObr0zTEefv+uQXx84D8zTUC\n++BSc2eQoOU4kUcCIiCESHUFAlIkjSAVOgUyirEDU6AQ6JjkNRHEsXHGMM0IdScwFwo3MXLvk9fc\ne5Q+83WP3ZsnmeXM0TIVNkdmfloDGQN5HEYQPwF5HgdUTGA+yRfTgAg5AvW9ZRMbPx1HpD09n4q6\npzb9qE/6eJgA/B4zB3TylaPicY9OmeWT7IOLoz4OuORYSUA+/n064HXSy4Vi9JiL0cnyNQZzIXqc\nO9D2a5RWCKnZdy1aaJQvyXSN9Aa33+IGTb2cM/Q9ShWEIFnOLnh08Zhts2XXPCMKS1XWvLrZIP2M\nuc7p9hvyQmMyjR1ypOzZtRtcITg0PS6AMR1ZPqPMZyjvyLJIcA2KjIAj+ENy1OgFWuRI7fE0mFii\nJQyhYVAde7tBO89y9oCKJeVyTntoCPsDRTXHC8Xg4yinpPmkjx8/Js+q1NrvJXVdo7VGa01VlQgp\nCSEF9eRZQa4VdnBs16/x9sBu6KHbI/ICEVqMERACi7Ki2azRViOvlrib19zd3lDnBfttS50ptNYg\nJCY3KZaTdDvuhUebOn25ggUCeV5wt224KJesypJP947gHb11vPXgu7z96Lu883DDR8+/z4vbHzKY\nh+zaV+RFjhskzmpc5ykqQ5ZVqCAI7YGDPGBdDcGSaU2eR5wX+AaczSAqtBIMQ8s+vkZGQwg9QkeU\nFChjyfKCtvekm7kt1xfv/qK/ugr4J6Ro57/3ua/488gsk2b+hqPF+c8C+vDGsRqDspKmO/YsTA1C\nYgTyMyusSKk4aUWBjmIsTibj+QTiZ32ZaJGKnl46tEqDG45DG9Sp23KcMHovf+Wom8dzdj7JLcMR\nsNO+v/84pL0OLg2NTp05Z9N+7gM5lhOI23j/2CbGnMKypq7O01TSY8fr5FwRI5CrtKImIaYBxiHQ\nMF5gphF0bmTjThxZeTAKpVPcglLgxwJoIuSTZj4i/BdaEn/2HedXCOaCIezYdq/I8yK1HktY71+x\nMt/g6vIxzW7Lq91rajVDhYKHF3OaYYvtPMw0dV2RmyUu7IlhgzUOozSFKcnxHG6e09YFyhRcL56w\nbj5CqsCueU2UObiCEBxaF0ipgYgPDYEWrWveffAb/Onz3wIv6Ps9SheYvMG7BsESESWdG1AcKOWS\nWlb0rcMYS9ztkVIwm9U4JFIILi8viDFgreXRw8fkWbIizmYzIFLXs5SZARRFiZSpM9RaS/AeQ47S\nitxkbPc3yZa4v8OrjNA26LzCS89+vYbomK0WuCGF8Sug61vqWZ00bjTtbs9gB6QQx47YGGBoW1Se\nqvLTxUQpw8XyMrV0+wHvW17dvOCX3vtXefToEd969z3a4QWvt548n+GCBW/JpAEfECJZTWtZUeYz\nrK1Zu9c4IhmSi9WCKPcMg6UPoMQCFSVGQ9cf8AZa9wrpKwQOYVLAWRAHtMkYOocSPbvmJ7/or+5/\nTModmn/hK/4sBdA3GofOi5+fx8wHm9b5sYYx5Iljl2ci6DExzZjEFRgL6AhEFMjACORgosCMISNh\nlFb0BOZCouUkrSi0crhpH9S91vlJalFjc/ux+HkE8rPCJvYMuHvykMA8jwN5GPfjYxVcGpU5Tvg5\n16UTiEeE5bgXNn7uMTbJLDGq4xi9o6SiRLLbw/hBxhOY68TEhYmQxTRD1HwBmLsTmIdBEYxPS2uC\njsixACqUQB495qOj5esI5gFN73s8r7ChRhuDNAFhBrJMcHnxgKpacNs/TxOD/JJcFwilaYcDHz/9\nkFyvyMwc70CoAal31EVFZXJEu0MKz+ubZ8wX3+Vy9ggtAxv7FKn3SUYQ0A+S3bZFzysyU4KwbHYf\n8uTRX2M5/2Ve3P5/bNqPIXbMLkiWyd0AQuCDI2rBbbdDxudI9YCVzIgCuq6jaTaUZYUqDYvVJV7o\nFOsbInlWYa3j4mKJ1or9YYvRGc55pJEYk5FlBhGha1uKoqBtW2IExcDu1TP84TWuP/DwyTdp+hVB\neHzfk+sKrQzW9aByDrsdhRFoY+icpTTZMdrUO0dnLVUtKco5JktJjTrPsG2Hi6lwmemanTvgjaHp\nB7r2lsVc88nzP+bb3/wu7z1+wnfe/5f40Ud/jI+vMVrhXUQrjZ5Lglc09gaTV8hOUag52jVEn1HM\nS7LCEWVBvwHbHChnc6q8QoocLSva8DEChSNd2GQQKCXp7MCYpo3UGq27X+TX9h3g7wL/LfCffOGr\n/gwyy7Fh6Gzv/X2p5TMSy8jIBwe9g5AsTqPtUKQCp4hIIcbOTsZ2TxKIM/YWxXSRVxOgByCK0ban\n8MKdOVbcOLRBpZA65XHeo73Hjc08KoRjEVRGn2Z0nneAnuWKfx6g56PMkseeIvYjkPcUoUfF5FsX\nIWWBH8e2eY4adRq0MbJzO+2BYTwe96eJS2fWQ5na7mM418w5Fj2P8oqJxAwwAZGlhsV0dwCMjDxY\nSbSSOAG59kle0QE5yiwTmAslTnr519WaGL1G0OFtS2BA6jkiKrR2KN0lJ8dsjnppiH7NEBYcdoJi\nWaCxDD7wzz78PR5cvI0ROW1/h9KRR48eEzvJYTgQZcT2TQqLA+b1ima7TsUMUhEyWEPnIjYPEEFm\ngV33nHezv06RV7y1+hvsmlu6rklRlV4h4gwhKhB7FvMKUcwphiWVWpLJkhggyzKuHn6X9XqLyTL6\nvgcN+/2Bi8tLlBZstju8v0BrxWKxZLNZUxQl1gb6vmc2q2kPB7TWDEOPj4Ki0Gy2G+YXK7bdHXkx\nZ39omF9c0ux3uG7H9uYZeWmoqwXBB5bzir5r8MFxMb9MXbAotDJIqShLSVEkXT8SEGQgFCYvMVme\ndEU7cBEky/ljrmYVr7e3BAb+9OPv8ejxNykKxTy7YLl6wIvt96lUCWWNb8BaS6ZyhKnZ7F4z0xes\niiXX6m1uRotons9oulus64hOYvcNcVagi5pH9SWNL9h1L3DRkueavNA0+5bgJdb1SB3QJqLMz5lW\n9NO3/x74z4DFT33VnyM1Mbo3JJbP8ZbfA3J72iewPskrSoAXoI4jEkZmPpI7wTjoJpJklsBxgbgH\n4GY6VhonPVr6xMi1PrbJqzMny5Q9fl9iOcksZmwamhj6qdA5svMRyIvYJTAPA0Xs0MFzTLHynAb7\nTGsE8SSzpD3DtJ/APJ0LcfKRn4WJjUOdJ0Sc7nKi4KSPGxIbNyDygMiTSws/BpTZUVrJJHGQhCyN\noPMmoExA6YjSEakiUgnkFEk86eZfV828lA9Qekfv7/BOsB8ceZHGoK2bl7TuwDK/ZjZfsjvccrt7\nStctuTYPUvKYFzTDS/qDoUGg45y33v4WuIy7l7dsjUaUgkUueb1+islmKO3ItYaY0dmkMXvvsFay\nb9ZcXF2lPBI/8HL9Kd998hClch5f/hV+8vIfM7QdnYiIYIgxgZ+pAnmcMdwqCmVo+w7tA/XyET/+\n4Ed84+1vY0dfatvtMcZwffmYu81L6rqmaRryPMfonP1uj9KSQzNQ1zWvX78i+sDq4iKxc6W42T6j\nNoa7Q0MgUOgC6x3b3Rqja0LcU9YzXN8SvCPaSOcHqrrmdrtmSSCSYvZcDAyDh9DirMcOfbr7yPP0\nS5PnxDjgDg3BO4xM8o8uFHETGGxHZgr+4A9/hyePv8Oj+opfeuvX+NHHf8h8sSRGwdbdpQRGHTGy\nogsHOhwIwapaoOOMWM4oJNhg6Zs7XKvoVcNtcFxkMwpdczn7FbSZ8+ruewiZgraGAYiBEAVKO6yz\n5Cb/RX1l/23SAJY/An7zp73wv/6j0/FvvgW/+dZnLzCjPM00oit8Zp8YdUg1tnv7KYkv+NP+6IyR\np/Fwkz86SebxZJHmFMiuGFk6Yz0PmYYkiwTcTpwAXEuFVwrtNNo5tFZ4p9Baob3DeIX3bsw8UUh8\nYuDBYsII4mEE8TDaEMfns+O5EdjHc3lIurnyfiwscnSKHI+nx/7s/JtgP61Aauo7ykI+hYaRQsKk\nUCksbLJeqtSlrZRHKo/UHmXS3bM0HiUSUEvjj8xb6IAYQVvo5HoRMl1skWfduHJi5ccnPvtl6n8b\nht/+aV+34/YVZrNcI6WmaQdsGAhe4KwnywV9v+F2/4q+H8hzzd2u49B2gGC7q1jOlvTdn5JlFxAN\nMXqePHqH99/+FbyXfPLp/0Vre8y8RviWKAPb9lX64KVHhRRMpbKcwe8o1AJjAsFvcDKjzC6JYmDb\n7zBGU5oF17Nf5Xb9ESLmSCHQWYGMl5RZGqvmZMQ5i9E5RTHn9cvnICI//uAHPH7rXVRZ03UDv/xL\n38ZkCoFgPp8zDAN1PUMI2O62XF1fHtv89/s983pG33XIGPAhAW7b7lIWvMrT4IzDAN5hrSevF/jD\nmuXyAhcHijLj9vWWqwdXzF1g6C3GpKKn9+nbbYwmxEieF2NXZUgDIQ4blNKYTLFdr+kOlnlR8fDq\nHV69XmOHjugFIfN8/0e/z/Kv/h0eP36PX//uv8Wr1x/y5O13uMme8cmz79F2d9RljUCzO+x5VF+T\nG4PUBuoKP/QpvtYZ+rZhcXlFYzs+evpjTL5iNX9Ivag5bF8yhB37w4Gub1EiR+kMpQAhCf4X9pX+\nW6Qh5n8XKEjs/H8C/oM3X/hf/a37DOvziHqQacWxeMk4B3LqS5FRjDEr8UgMjwCuSGxwfH82LiNS\nbc4kSXeSeafcrWlyXLrF92MBzo0AP6UqqrMCok4hZtKl9nilPMp5tHMYm4A9KEWwY46LEgQn0oVE\ngZT+CMZZGDBh9JOH1J4/rVM8bTg2Bk0rXcE4gbY9W9O582HXcMaox/PnX4mpEz8TxxVMsmImx4kc\nZ3+OS2qc1FhhsNJghWFITniGmNGLtB9iNhouNS4aXBwTaKYxdHG8XxmzX2KUo6TD/Sv6m5v5zbSm\n7fDffM6L0vbV+bh88lLHkNwqfkjBQM3WE1Xgg6f/D29ff4vGbxicZQge0a25qr+B7Qyz4hFRbokU\nzGclVbliMbuiax11OeNgZuA6gjFIEWn6LSEMlApMZhG6xmQZ1+WM9WGDksmzXFUVUgXaww1ddSDP\nM9ZbwdX8XYzI6PufMK80dtii6vcp8wU2fozSaV6pioq+6zjs17z97rfZbnZ4b7l98ZyrB+8c42ar\nqqZtW7JsbBI6HJBSIYSgKAq895gsgW70nv1uw3yWE53HS7DtAedd0kZjl6INhgYfGowsE7PzghgC\nVxdXHPYtZVUjpaRpDgjvid6jlabpB+rS4L3HDz2D2lFUS2RRgUmdmsvVA17sP6bfbMmKJavlit1d\nk9IVfeSf/vH/yVuX7/Pk4Tf41tvfoipnCOl5+8mctr/hbutobYsUBUYPvGpvyWSZmEufdFcfHEor\nLq8ec7X8K1S24YOX3+eTpx9yXV2idaA0V+OUmA5iYkc+pHtrZWJKuPzFbP/luAD+NvCf8jlADuDL\nn/1v8IqUPS5HMJ+anc5Y9EnbThJIUKRsD8+opSRANxIyTsuMS/NTAP0sDkRMfxmMgV3JRy1tRMqA\nUgnItfLps1cKryTGySQtjCCOFTDZ7mR67wTmR1D3NoF6sOhg7wH6MUgrhJNnewLqc9b9BiOPR8Y9\nrjdvPaZzUycngphDyMS4Rl+9UXg9LY1TaVk5LjGCuUjO+IE8AfnklI/TGv06UeOiGv36p6z0BOij\nNn9cfJka50/dvjIwPxx6Vlc1F+qCjz+5QwlD1+/xUSLo2LVrNt0zel4z2D3EnIgjqB7vHZleMoSB\n1eIdEJGmbbi7W7Ned4BnNa8YhgwvAzpT3KzT7b4TAestgojuDfk8p6oyolf4QVJSkGUlm+45frjD\nB5DCYtSMWfmETVijdIazM3wIxKGklG/R5zf4YGltw0JVvPX4G2w2O7TIefHsKfOrR1RVRQiRfujY\nbjasVqvxXBg7QXOapqEsS5yz5EXB5uaOx48eYrTC9Qek8Bz2W4wQOG/RSmGqFX3fpIamoKmuZkQl\nqIqSoekI3lJVFTEEAoKLyytePnvBenugKjOyMRcmryp0USOEBikRMRIGm+4IrE/xuEGQq5wnjx5x\nK7a4xtJIRxYVv/N7/yv/5m/8+1zOL3lw9YCb169wYqCaX3Donyf5svcgA02wfGrXXLgZijRe7jA0\nqCLjav5NVvMLimHGJ7cfkxm42X+A0oDRqFgz7J9TlDUSMf42W3y06C/TsPMXs33hr54vfnYhKxlH\n4kluEZOHIqYiZYzIEXwnWUX7OA5FGHXjEYj1OTvns0CuwwnI5bQmQJ9ci9PnpkgukXvMPKCcR9kk\nO2jn8PbUeJOaYZJYP80lFSplw2QjK8/CgPFnkotPQyb0WUTtFGUrj52e8aiR3wP0s+Lxffnk7AOe\nAF1NNYXTuSh4g5UL/MjMvU6zPhMz10dm7iZWPgE69wHdxlMrlEOPQJ7WcXpRPDHzNwGdrzOYN7st\n33j7ffK84bA6sF4HYtTkOSgjyDKdXAsmYrSiLCRlMeP55sesygGtM+qqRkZFVS/58NN/ygcf/4B5\n8RAvDgRjx/Q+jSCyqGta5Qn0ON8RvaOPFbIzZKZkZ2/R4ZJDfyArNUJEdodPCE4S+x5RVUQrqKtv\nUFYzRIi8fP0UF3rKeomSOb08UOUrVCzonWU+m/PRT35ECBYjJM6nARDe97y+ecbFxYqyrPA+jY+b\n/jOHYSCGQK4MOM9uv6E2hueffsTFrEDFSN/u0TIn+HSfmecFwij80CGCJ8/KNACjLtgfHE3fEDzM\n53P2mx15WVA2OU3XMMtzFqtLqsWDcVi2QWiDPezTnUTb0gKHIXA4rDmEHVU9Qz6C1y874n7LQIbf\n7vhH/+Qf8q//jb9HlgtMoXnx4in1fIaNbzO8+CFR9ojCEnykHwRd1GQ+4pqOQ7OlWNZU8xlFPef1\n5gMymTHLVzTtAHJHXc+J3kOoka5iUVfsw45+sLgoMOovJc/8d8b1uZsvfjYzD2Jk5WOOeNIG4mgb\njMggkOEE6HEEclQcw1iSc0WImOSVCcg/R2qZWPkxHdefybVndkYA1OjNnpi5Ckh7xs6tJyifcs6d\nJE6DFizjhSA5SU5gbsn8kFawR0BPGrtLk4HGeFp1DuQjmItzIA+nFc+OPwPmk8wiT4+PLN2Pz2eC\nmI17MzFzeWLmSp3klpGZHwGd7IydH/tXE6BHfdy7qMfS79kEo3OZZZRajqldX1cwJwT6bqBeXrNc\nrjkcGoKV1EVNVgZc32IWmkwu2Q0OJTUhOPIyI6gNOp9h7YCQFmcHtoctt5ufUBYf8ejyXaIvaPo7\niBqBR0nBvMqIoiJEQT8cyMwB6StCHNC+5NDuObSOLm7IC8V69wlSVBR6xu3mY/xQUcwqtMjIdEZZ\nDPzk0w+4Wr7FvCp5OLsGm9HuNxx2B5b1iovlAqmylOp42PP0wx8jMkNmMkJMGvZms+by8oL5YkGM\nqeOxHyxd35LnOYfDHplrZnnF5u4V0vVUuQSpiVEiREArkLKgqJdE32GtRSgJRvHg8RNefvoxDy+v\nWW+3zKua/WGHEILlYkGuFFIIYhgQZEQZkWVFpjNC3+F9oN+vmeU5GoHxCQFCHlD5QDxIXGfxDn7w\nw+8xr694/53vMNBhYsA2kSq/YLV8xC7e0Q07Wh/QJpBphWhbRPSUucLHAaU8xAGpoGm22IuMUifn\nsgsNzgVkXOHbiNMiTcrxOd1BEvSX8QX+Yjdf/uwLih/BPBBPHvGRnok4AnoEFUSa6H7ORFU8piHK\nFI99H9AZmTln7PwcyOUJ0EepPG3x3CU3WunsSStXVqJHEA82DWlIs0XTXybsyMjHC4AUEePHoqZP\nQJ6AfQJ0mwDd+2Pm+ImZh2NTED7ek1HieaX47PieVDH9YFNNIpJuh/TEzCEYkdbE0CdmrhROq5PM\nogxOTqw8w54BeR9P8soR1Cd2zsTM9ZnM8llWHoM4XZx+ju2rY+aHT3j16oKsWmLMisVC04selSmE\nSF7h3vXkOieEktubLZeXFbkWVIXCe4sNB15tnrKoIiJmDK2n0IqHF9/gvbf+Jr/1u/+A7f4VXd8w\nrzzffOd9irLEx5rGHsgI+OHAfusZYknnWiIdftewpKaqJE13h0XQtT273YbaXeH6htViySpfcSt2\nvHj5guyddwlURG3Z92u0rpBaUZYrpJA8e/GU1eoxq6vHdENPPZsRfGB/2KW7kOjQWqKUYbtdH9l6\nUVc0r3bs+o5FmWJxZ1qw3x+4vLpKVjbrCMqi8wrnPTJCWRR0+z3EiB/g8dvvs1vfsFgu2W032O7A\nfFGxXq9ZXV8jshykBq1RKiO6QCgK1MWKbL0mCwPPPn3KoW+JTtP1e1QuyauM8KqltZGhSemTf/Qn\nv0ug53q1oqhmxBjpO9BujrQdtg807Y68DFhnUVJgo0dlis42tMMdVXlNVcx5dH1F12+QRYnSirZv\nkTrHRE/TDygTMcbg+wO2ldjwc9Kbv4DtSzFz4ji0fGTmo3VlKvxNUog6B3EXj3ZkKQVSRtQosxwb\nE6f9GyB+XOKMmfuTzHIE9LEAKlWSWqRKmSZSeZSSBCvR2iW5x4kkzyhSQXJ678joxQTmI5Abb8nc\ndDwy8tGrrqaBE/6zQH5k53AC6zf29yJ5piLnZN9543VRCqI5rQTk4oyZS7zS9wugZ4XQ4Z7McgLw\nc0Y+FUE/C+RqBHL5Wb3868rMV4sl0W+w3Yq6eAyzDdJ07Owd9fw9Xtx9QNt1qEyitMYGz7Y9sCpy\nhq5GZpHDoWEdN2gxpy4uIJb0Xc/l4jt4a3n/0V/le8MfYHsH0WCHwGpV0Q9bqiIipUMXEPycw11D\nDAq0pW0Ds+ISZzuyLNJ3A9Z7ml3D0Djuqg7CI967+C4LmbHub9g3N+iH76NDhRUHijwDZXj+4iMW\n84dcrJ7QdDv86+fk5QyZmTTENXqEMHRdhzEmOU0ixBipqpJmd0BEhzHw+uaG6B3b/Ya8qHBD6tCs\n6lT2SvG5BucCu+2O1eqSED32sKfbOLJqRrtbs1xd8Kq3fPr8ObOi4G5zx1WWo4ocoTMQMt34r9fE\noUTgmBcV77/zhJfblrbviJSg1sznOcuLjhcvn+JaTfRw9WjJxy9+QF39GnleUFczumZLDFn6MlsD\noUp3W9riBmhcj84EWkba/o7bzU/IRMVqmdE4UFITsYQoETGglERnkRAHkIYYcqAjK35h1sQvvX2h\nZn7WyOdHME8WxDNAH/XyECLKR6KPaD+6TtQpXM/LiJJp7sER0N8A9c8DdCnGAqi4R15PYDJWXMWo\nlScgDygXiNanmaL2FEA1FUuTA2Z8rfIom3J1jE9Dk40fpZUzMDcunVPeJeY/AroY42vFBOTu9Lkd\nhx6fuT3fPBdhnNrzxrnpdRKiIY3IOwPyoBXBJFbuz5n5OYjLk8zSk9OTp77WaLAxw8U3Qf1MN4/n\nurk4snO+7pr5fFnjVMSzx/UzZuWcduiZVddoWZKbCzaHj1CF5WIxp+tLDs2O7ToiigGpB1CS3nZs\ndq+os5qyKNhuO37y8R/z9uNfxfkDVRkJUaeurD7QbFtQAygFwqIyw9tPvsXt7gcIu2foBUZF+m6P\nESArjcl7mn1OaTRlFtkF2Oy23LhPIAqC8MTQcbt5yturbzFbrRheHWh3z3hw/YSm7ZCho55VCJEx\n9APLqyvy3IwDduWYbW6IMZJlOev1gbIo8EODlgG7O1DmOW3bQJanDk87UJQ5EUOIAeeHBHbCMFvM\n2e/3aBWZry4Y7IDqDgzW4dYbLlcrrGvp2uSLt4PDh4AaBihnCJOBGwiHNZQ1+fwS9eol71+vGDYH\nPhkCuCUX13OiV7y8W3P7iWO5uuB6eYUqLC9ePuNi9pihGchVZBNbvIhkeU4dJXnmKVSNWvRIWdG6\nHUpluKFl3X1CUSwwBSz0Q1Se44aB7faGSI/WERUcJk9zQoW05HNDVQxf1Vf6uIUv4WYJhATkMa3J\nRD45OOTIxpUbG2T0CJrqJK8cm4POgHwCcXUO5OduFnHGyjlbI5jcsyaOkomygaB8cq1YQRyLnceu\ny/G16QIQUSOoSyL6COIuWRp92ut7+yTjnLLH4zF7nCmBEN74B/NGxvj545PHM8r774mCNPhaC8Jx\njRKLuV8A9WfM/LwIasUp83EYAdydFT/vySz39HJ1rwB6zs6/1pp50IbLy0vW2zWD33M5v6YXIWWQ\naGialsFKur5nuTB880nN+k7TbCNd35CjwQlmxQPyosJkkbIq6RrBH/3JH/L0xceozFIYycUiMPSB\nu+4wXuR78ipQVUvmy2+Qm4Jf/tZf4//+/u8gMGgNWe6RcokyAUEPoqcsCx4Uirm6xIaMzWFDGzy5\nSjasZ+sPqOpLSir6/jWPr9+i63psvyfPHvDpJ0+ZzZZcPnwHO/R478mKFGnroyMEjxCMw54LvPPk\nUrPreoQfGG5ukMWMbtdRz2ZEBM4l1l6WBdZ2JINvoG0ixhRI4dlut1S5UQ6qxQAAIABJREFUSXKM\n9wyuQ0aPVjmzOkeEHmJEmzK1L8cISkK1QpgS4XsiETW/ZHvzjCo3yAYWi3dZFSvMo8jt/hW12vPo\n6j3KcsEw9HT2BX/64R9wMbtCSI8QnqIwWNehTQB6HA1VUeEHT+8FyqSh3pkC166RpUGpGikLSnNJ\nqHdYtyEqB7KnCQFcYFHMEMpTVvuv6it93L6UmyUmrT/GMN77nyQW4UMC8zEPW4zukKBEyj+T6b9n\nirxVZ0szNgBN+xHEJ/ydHHufy8oj962JbzJzlwB9siDeK3aOEbNKnzR2QbwP3KNH/c1zaloTmJ/F\n1t5j5vL+im8+huMUjs8898Zx1AnQoxYEdQbo94qf+uhosedA/kbx0x0dLOYeGz/KLGMPbHiz+Hnm\nZDnWAX6O7SsD86IsQHpaN7Bbr5lXcywD1jp8hN4HgjN0hz1u1VBncy7qGTWSRjgG6+jajln1kEIq\nMiN4+/oBcx35+ONP+PjjVyyvJFeXhmWVUWUZTSPYtAMyOoYoyfOcl68+4fHVY2DHvFripEPlO4qS\nVOjxKg09UA4bPZGSZf2A3Fyx3zxl/fIjqloDASFatvtPyKv3Ka/nrPc7hAgcWksIOy4uVkgM+80d\ni9US2w8UeUWWGYa9TBkzaGZVjXAOa3uMEbhuT6FhP3RcVDXMlkgfUTpNJRr6njzL0KpASmi7FqUc\ny8WCzeaOMtc0hw5tclRRIgfJertJHmWVUVQ1OtNEoUAbhI8EUyBEiu0LzZ4QHLWSLC8f89HTp7go\n6V0Aryjzax7MZ2TfduD68W4j4MOAkiUvbp8To6Wuc/reMfQdQcQUQewadKcJQlNkj8jLgA89wvU4\nr7jZ3jGrK1aZJsYeZSDLHnDYPUfojNBE6rxCU+Cco3f9V/WVPm5fSjOPSWAJIUlqMUTwYcwdkQnQ\nJ5ufiUgnCDr5zJPfnKNHXU5gzn1AP2fjxz0niUXGxMjFsUBIAvM3mLm0HqVOud5o7rPxiYkrhbKp\nMU8rlyJwnT+CuLq3/+w55fyxQekYWTtFyjL9UG+sKZ4Wjvr4ka1P1sSz10/BWWl828jMx1zzpJVP\nBdCkmVuZCqAnMM8YONPMY34C7mMmZALxyWd+9Jpzzs6TzMJYAJ06U3+e7WeBeUGyYOWkfoR/APx9\n4BL4X4D3gA+BfxdYj+/5+8B/SCrZ/EfA//55f3CJZ5EtOeSSTvwYjUTbFPUahGRmNAcryILBNQUy\nW6Cw1CaNXOuySDeAkiXffOdv8uzlHzKba5azJavqAb//z36f/W5gXknUsiBEiZISYwKDVwxuzs1d\nj1QD3v8zgpUsMkWnDSbTGB2QpmBwLdY5TNly2AnWzvIwOEoTiLpABUUmC5RQoDP2+5csy29weXHB\n7fopCPCxoygv2GxuKYoLrh5cIaVktVqx3mzHZiFJVdUM1lLmhqKeoXqZbtqMYbt9yeLiAU3bs1zO\ncENHJjPyvEaInr7vyYxh3/VoJSnyilevXqK1phk887LE+p7DrmFoDkRn6ds9ZV0QKXn06FcQWuN7\ni1zNiHG8ny8LVJzR3Kx5vn3BcOhY1As+Xu/xvcd7T99FLq7eo7vd0cU7XLBYBsraQ2yQYsmr29e4\nCCE42tajswKhIm4AtGGwjoiiMEtUFml2W3Z9w751aNPQNM+RSpBnj9FiSSYLilmgcy8YBkVWKGL0\ndLvs5/qF+IvYvgwzn+oi0ZOA/Jg5EsEFpJNEFxCW0VsOUo0+c5nS/FLudkxgzhlYcyatxFPjkQpn\nZHUCcsb9xMqPmnli5tGJBOTOo86KnW/q5MmLrnDOj0PJ07BoZf0RvI9e9fFYnx1PQK7syMzHQRJM\n0baMoDxqSTHxp6M7BcSJnZ9LLON77r1XjUCuTkCe1omVp6VTIVSe2PkwNQ2Jc2Y+AvlRG3+z+1O/\noZe/KbPwl6KZd8C/ATTja/8R8Buktub/A/jvgP8c+C/G9SvAvzfu3wZ+C/glPucGYpYJilInW102\no206urZHectqcYVfXtCGAyE4ZCjwtsTFiB8sxsg0FzOPzMuSYAM2RITbslo+pjbXXD2/YrP7lL4V\n3Lz0rGYlBYJOHijIiTGj6yJW7Ng2P6BUJbkoyc0lgpLoI33Ypnb+4oIQl/jVlsPO8mz9Q2zwSeNU\ninl1iRSWXu0QZsbOvSIbrokZdOsdi8U16+0NRpe4YFEqgferl8+pF/PEzHtH3yXbXSYVmZJAoGsb\njCm4WF4SvCPPMvp+QGuJD47e9mRVYrxt31AUGc4KttsddZ2idZvDAREl3kdm8xmbfqD3HlUUDIOj\nrjRDdyB/8ja0DdFG5EWNb/eoTYdvW/LFgkcx8jpumLmMKn9O3zWsu+c414MOSFIU797fQZjhA1xe\nzPFKMrMGZz2h6ym9wMdAVBatJV2/Y73v0OKKB5eeGBx97+lCaqQymcEJi47XXM+/y+XsCf2w5+NP\n/znXJfTmDq0Mfe+5O3w9CqAxCghh7F6MKVPeyRQX7CW4gLBizNAWRxBKTDP5zCcGKsWJbR/BnLMO\n0jMQP57nDWZ+tqZY2cmZElVAOTGC+Pi8jWP+SJJgvAt4OwL52OIvSBeEBODhCOTSnjUhuZH5u6TN\nSzuOejsOk+DEzA33PeVHX/kI5GdgGM9+4KjSe+NYGY56lFomUJcJ1L0cGbo895iffOaf0cvJ6Mnw\nUROiGkFdncD7+HgC8nOpZbIl8pdaAG3GfZY+Gu5IYP63x/P/I/DbJDD/d4D/mfTxfwj8CPjXgN/7\n7B8babY3SV8zsG/2fPrsObO6Qhc5QUSMypEqo+l3GLOkyGasb27xLiNqxeAzPn39AbPqIc3hgNQN\n+35NaB3vPXrIM2Xpmg1t03KRX5LnCuN22NjhrWZWXyPlgkP/HBkHJA4f75DM6A4hNfssS7wPlGXJ\nkwcZa9XQ7RSbdmCwA7406DwjEwFnW2Iuef36E1aX1/iokg7tA4vZBa9vPuXb3/41mrajdA6l0/AJ\nEGRZRt8dqIoSES1uaMi0wEvIM01rA+2hpagyhJC0bQsmTSey1lIVJbYLqYO0mKNUTte1WGtZzOc8\ne/oT5vM529ue1XJO0x7QOkNlmiAFfXeg6IaUlKqBoQUbiFIgs4K267jZ31KZgv3QUGc9+/ApN+uI\n0A5l53TrOXd3A8PQEsKGuloyFJqstJjcMQwDuVI406OMJ6iIjo62gcHmvPPWr3CxXPD09R+xG16R\nVwV5JjHK4JynLBdc1G/xcPUOP/jgT+htg9ANIlikyqjqjNp/ccz4X9b2Zdr5iWFsehFjiycJwCcg\nNyL16ZswTnZnpN5xBN0E6MgzMBf3gXoaUzkBupiOJxAf/4hpXKgY63DTSDQ5ajNTQ5BQEX+UX2QC\nZSkJyo85LRLv5JjVkoaFq1F3VzaBeALyEbSPYH7/NWKSWcYccizpH+9IKHTuOT+TU+6dE3wWyE16\nfwLzNHgiqDSUwks57tUx6veolUuNO89mESeP+RDzcXz1Wds++ljwPBU/p3yWzwH0OF3cf77v3ZcB\ncwn8IfA+8D8A3wMeAS/G51+MjwGecB+4PyEx9M9sfci4ffEMWVSs5ku2W7jbBfbNGit75osSACE1\nh6Zn137Ku0++yeL6CXebO7zL2G0tmo5nr/45TrbsW4v3T2m2mkIXXF885E5GBt/goiV6j4xplNUQ\nHHleUWVLZsWM9fYH9LEnF5rgW4RQ7Pc9pY6sLpb09o6siNSzBfN8wXY3cPBbyAMHt6EfOnrjkOI5\nTtVs+htW5ZKqWhEj3L56waMH73FoOh4+eYc8z1AyY14vcPaA7V1K09QOURRkmeBwt0YLSbO7SZNR\nGJAYqqIkN5rgEkAKmybGOOsxusLagdl8hm8sXdvgneXB9Vvs9ju0gPXrGw6HPVpL6nqGUgVFuUAE\nSxQZQpWEdo+qF9BKmC2oixllu+fZ3TPa0KGyA4ftU9qNRbCiEILbu47bO5eiPZHYvqHMM0LcEMJr\n0DP6TiBFgRM9oXfsfUmQguvlu3znm7/Ot957h2+99et88PQf8/Ll7+JDRh9y5vVjDts1ebni4fIh\nN7Mr/vTDHbraI3WL8w6LIC/+RfCZ/2yZRYxALnw4RbpaEC6AkQhDyswePYdCxRFQk0AuRqQWkzvl\nDMgnfBuzu46gLsKZJTGO7wtnbxCjzOJSQTPFyobRP34qdgYXCU4i3ShPOIFy/hi4FUY/uiAegVra\n8+Mko3zRc9PzYsojn8D8vDnoqIuPdyvTc9MHPBU9J0BPYanpYjAx83FIcxiXlxIvErB7ObFyhRMT\nMz9j52de81TcnABdnh2PQH4WsHXeAXqvnf/NOII/x/ZlwDwA/wqwBP4hSXY5337WDcLnPrfH0amS\nXDjeWr6HwaH0/0uwsL7t08DeHGaUtCy42W5oLw5cLB6gVc2zzUdoWTOLERfWRHIEBTZKtu2GmCvm\n1SOGmUfaLYiMKBQ2aiQDRBi6jkIbjJqjs0vwO7S6AAkhNiiZo6Km30p6JF27pS5TaqLJA7WJWNfT\nWWj6W3Q2UBlPNA27/TOuZpdoNadtbtFG0bQtDx9fMrQtdZGTZYLB9tRVhXSOpusJUVIVK+5u9zS7\nWy4XD1KQlsrIzIzNZo11A2VRgRAE78nyHDuknyk1gSg2mw2zqk6Z6Pstg5XkWcbt7R4ZLcbkeN+j\njQEBQafPh7wAo5H5JX6zRuoIXZta6JVAmoznr76H1hnsKz55+pSL+QVmoUAOKB0wusIojQ+e5jDQ\nDluc8LjYEJVB6ZyrskIVc6yTtH1GZMuL1z/g+vIKo9Odh3SCzMCqfA+yOaKwvN7/hO88/iYPry6Y\nV4KejBgt+8Oeg3MI8wsPZ/kQ2JJu+C3pzvPe9mWYuQgiTaiZrIejlzzZ/ZLEIoxAGoHQZ2vs/ry3\nuA/qIp5AXUDK8xdnr4mnx8dsFn96gxirp0KFNPzmWOwUSBsJKoyac5pqH5UkqHHvRMpqUeNUoxGs\nxTCx7RPrlkM47u8/F5DD2ai3gfQDfVHLvv7i5+4x8wxiPjYLCUEQiZUHIcf9yMpFAnAvNU6cWRLl\nSS/v0ygNevJx2IU67Sfw5rxR6L7MEqIgBvmV+cw3wP8G/HUSG///uXuTGNu2NL/rt7rdni66G3Hv\nffc1mVmZWVWmqrKMoYxBWMiFhBgwMANmSDCzkBCMGCGBEIgJgqEFkpEoQCohYUqWYYDBZVSm7HI1\nWeVMsnn9e7eJG91pd7c6BnufiBPx4r53M1+lH8WSltbaa69z7om4J37nO9/6vu9/ArwAHtLXeQZ4\nCjzZecwbw9pn2u/+9rskaoSzLfzKJcleQlka2rVHiYiKOYlq0T7ycHpEnh4zX33EowfvcDA64cXm\nPZJUMM5nvdJ4EsnzhNXmghAFNkCUmkxmiMTRuBbT5YyyY5abT4g0rJuKxeaKB5MTcIYim6HFCOs9\nRTZiIT9g3SyZpRmVF7SbDa6zpNJiveDxg28QY+DFiw+RoaStGpSOZCXIrCHqwNXFh8hBJuDg4Ajn\nLEeHM85Pn3H81tcJqyXYhjwxGNVXLXzx/GOmZc5GB4SKKJ3SWUeaakIo+vTuEDBpilAS7y1S9vBc\nLtbMygItE5wPlKNxLxPXebQS7E8K1hvL/PI5ZZJQb5aUZYmUQ0UiAaQJgRxVtAgbiImmW17y7PwZ\nFR7rExKl0MxYV59wPB2jScl1xkLVSC1JEkMpJwThqDpNHSNCWQ7KCb/6+C0eHBScb1o+Ottwsb5k\nM3/Kcn2Jc5dE1fDDT/9vDnLFYWKw7oKynKCzlh/88H/n8fgxXbUgKknnWp6/W/Hxuxe9xSN/5un8\nkb6e+eWrNryOZS4Ha1xabkBuYw9x20NcDgCXulekkYMAsLzOAr0VUn1/3z3kFDv775n3L4xr5R7p\nINrB1eL61xZVQG5l0dxu2dtBmGHwQ0fbP+GN/uYW6sP1nVG+Yp2uf47d84ZbtVY011UTtz7zOPwc\ntyxzA9GIHuhm4OcA9H4cLPJBjMMPQL/pt2PMd+PMA4Olzdbq3oH2Z+7Je/3m/yRCEw/pPVVzIAd+\nHfiPgN8C/k3gPx/Gvzns/y3gfwD+C3r3ys8B//C+J/61X39Ero8RIeEXv/7P8Yfv/T6TfUubeJpK\n4WzEdQmkitloTJpnLNqWzeaK2f4jvnn8F/lR9zskymHyEW4oLB98SggdbbdCesj0BNs1uLYhSfsC\nTdPRm1yuzoi0VJuGNgssqhUn2SGj/IDLzUcYccI7j77Dxfw9lpxTN45N0yGiovJnEHPEXkGepXzz\n8YQfvv8POJ97Vp3AKA95xdKeMS6PoPPsTw6o6oo3nrzJcrlACMN6M6eQkOqE6DvaumJU7veFraoO\nHVPqpkYIyXQ6wTlLCJYYIsFZ1s2GPOstg8lkRlaOcCEyXyw42H/ApqmJAkxacrZ4zsH+HqurFttu\nmM326Jp1ry8aAtJ3QCDmY0JQKAkiyYltAzqQjsYc75/w8vwF0/KbTNKG6ahGqYSqWULsCNoyznJM\n0mt3plGTpjPybEyc/wljAj+XTzgZZajgaDYVl8sL1leX4CymENT+Oc2mReC5apbIRPNgClJVCCq0\nsPzxD34XlaaMJm8RVjmP30z52uN9bIjUquKP/t7Fl/ur+OImPu/m64QmxgGW2C00I9LKa6kz2cVr\noF/3bQao7FVqtn7y7YvZAnkLcuLOnCFiZQf+d+fADcy3hbNURA2JQlFxozpvtweyW4gP10M9862e\n5o2gMtBtIU0v4bYz7yXdIqLbubcF+dYyH17fDcjFjRDFnRK423rm1y6WLcgHV0sUW2m4HuSB3kL3\nQvbFscTQ2bpX+mSg2yVw+x6voS2IW2izC+zb8eXb9dsZoOJnbpk/pD/g3P4K/zvg79Crrfwm8G9z\nE5oIvdDtbw6jA/4ar3iJy/mcThvyZJ+3H/055o3l2Yv/k4nK2BhF2zmiHPeHb1VFPpnQxJK23VA3\nHbGFIp3hg0d4R+MDaSYpTM5GtoNYQYWRhthZMhmx9pSs6P3YuVF8dHWGSXJiTKkaS2MjM9mhpEOr\niFIFJyff5OXZ9xn5hroCXU7xcsPqasnHT9/n57/+y6TGMs1ynte9f2x2oEkySbARLTXB9jqeR4fH\nff3wuqLIJ0yKks3yis1yQessRaJxdYUWjmpZ96FowMVyzqE+xncdTV1DCBitKEZjog8kqUJoSWdr\njJKMD4+x1iGAelMRI+RZweWLM3QmadsaQSQd7zHem/VVIMtRfxAnFCrNYLEhiI44mSHmc7pqjc5K\nCpkTXYvKUtLUkKqE86tTlBKkuaHII8QEKTPWQZDne+RdzpP9b1L6p2zWHd9/70MaL3h/PmfeLhgZ\nxYN9Q3EQ6NoldWuJXUKQFZerlsO9iBEeIwJIw8fPf8zDh99iks7ALZFdRDlFK2rq8DNPnYj0UVoe\n+OvAf313w+sU2mLHtRJ3ZM+2IBdGIg1I3ecTSA1K9WGCUvVJQ1IK1BAjfvfTRez+1d13/xXXQvTh\nj9vQQ7bQtgxuC3ETsz3oXt6dX8dzR3qpti3I70i5iXaYtzv32gHgwz3RDfPt5+OWRLshiv5+n/l1\nNIsWvWWeAGk/RkQvOS16UWdPD/JbQndCX2dy3rLIbxXaSvsPhQHk947xVeM/2dosfwL86j3rl8Bf\necVj/tOhf24TYURVrSnkjOlkitEZMSpSlZHmGXJvRusdPioaAWF5RWMbsrLk7PIco0py+YBl9wGd\nEyAkra3J0xmPxxNO64/ZNFfgJ8hEDXWcDa07ZSy+xRtv/AoX9e+zbGuQksw84KOnn5AISVlkLFY/\nIM++TsGUNw/+PJ+e/x8cpTOmkydcdB9h/ZJPP3kPQ+TxoyOidCQyp7Ub3Hqf0DqEmjNvSzJSpsWI\nqmtx8yuatiXPIovFiv3pjKcfv8v+wRGIQNNumBQl0BfVUkpx/OBxH+kyHtFYR7AdKkvwMZKXJTEq\nnHPYuo8xr7uGRCcszs8waITWZEVG1IJqU6GTFKENWZYSupb08CEmK+hC6P8fbEDsHSKa3gUkHjzm\n6o8/5ntP3+P7n37E++cv+cW33mLTLjFZSxSWrgEaj3OCoggIClLTsuo+YWweQfB4qbioW16erxFJ\njkOgpEBryWgcEcazWWxYLgKtN4Rg0C7l2YsfYo4WSNfgaonrPBcXH1OlNev6JcdGoLqOCkciH3zR\nW+/Ltr8EPAeO6MNzfwD8X7sb/vp/cnY9/86/OOJX//JnI2yi6hNxGNLkGeZIMQj9+j4bVzGICu/c\nl/TuFtlnhN4LgnjPZXz19XW7T2ptKwShd+b2xrd+d34d/xi5o8V5z7wDOkHcuY6d6AG+3TvA/NrK\n1tvoFHFdhGzX1bL92fqYc3EdphgHH7pXQ1naXpkUdz3eneub9Pyob1L2g76pXe7NIAIzaIgOY9wZ\nCVtVIXFd8vY68TfEAebxFTD/u3xOteVb7avTAE1KVl2DE4KLxUsu5h9TVQGZCMblDGkCETBIREyp\nu47zy5dM5Zvsl5oskSh9zNX8E3LhsGlk1S2Y5GOOHn+Ni/dOuZh/jNGPKEcRlXmaCmJXc7H+lP3y\nTQ4nh7TrHxBCx2R8QN0942y+pHEW7xrOzv8Bb7/9z5OrPd54+1/ie9/9bY7LnELOaLvn0ETOz89w\n5hKhV4yPBHppECKjqQI+P2PEGG0e0TYdLjQkaU6ejUjLbFBiD9jOkxcFAk+a5bjoSTND10WKsqSR\nDiH6r2VZVhCzkhAsTggWG4vSFh0lWIsoC8ZpgYuOMslZzJ9RlnvEmJFkIxKTgBBY14GQlEUJIeDq\nBn1wDLF/c4noIRsjhILNJcff+EV+/4Mf8sEnP2YZrviTDy7oOocPHlNAWY6IrUZphdEG4RVW1SRx\nztJGRBDUomAeVtROYEzER49JBDqFpRSEdcP5C89i0aK1JCkLOtHQVA1X86fkakpiM6LvaOoFy/WC\nIpN4YbF5xLaWNP2ZH4A+H8Yz4H+mPwC9BfN/499759YDNsvPPonaBOQmoIYu64CqA6rtu+z6BBrl\nbup8q0EsWQm/owAUbkPgVfMvut7Otxbt3X1bn+5dyNudx4g7jwncA+7PWbsrCber58lg/W/B7W6+\nDWwFMaLtO90Ae0v/IWGGbxTDB4GLmjUjNpRsYklFSU1BHQtqcpqY0ZINwhPpDbiHLM8+9HBwrThB\nXENcAxXEKhJriDV9lk4biV3sP6A6rt1UDDrGN4efuw6x3fYXh75t//E9e/r2lcG863pfsHOWl5ef\nMr98jjEZVfT4eEUua+oGRLJHrkpE9LS+wvsFHkETWkTImU7eRtSfIrOGTXSsu3O+dvIXeHLy5/jB\nj3+nT0CpLGLsEWmLpQ9/fPf0HyPECqMNjVuQmYLJZMTV1YKq9qSJo3Pwgw/+Ad/59r9CqmfsPfkF\n2uAwMaOrFXYT2RtnaDullRUi69DCU2bHTFTB0n2fzrbUfkOuDEpqjDGkWYoxGav2iufPz5jNZiQm\nQUjoPGgpcU2Hj5HNpiGGDqkUIUZcjBR5f2AptKZuHa5pcEQ8CaFr8V2fRWol7B8+Qpu8D1aQAtt6\n6mpNlmXYtiWMRv3hp/cIG0AZpJnh5+9jpg9w6QiuLqgufszR/iOiVAivWXYVwc54dPAdsgzKIsFu\nCpI0oV1/wqpacjH3eNeQlc+ZFN+gqy2rzRmb2mO8ZTwtKHJBjDUQaduOzbrXMjWZIBIQJFzOl8RQ\nMhm11IDWAmfBOYuTgkpELH10RT5a/SzftgW93bkCSuBfpj9DutWqVfaFTySrgKriLZDLa5DHa5BL\nF66l1HqYRyQBJcM10F8L5q873w2D2b1/Vwximw16d//2Mbsw/zzr/D5Nz/uEmRlcKX4H6tdj7waK\nw/NEzQ3I9TCqwW+uwEbdQzwWVPRjD/OcJua0ZLQxvdH1vC6mtavlqXp3iRPEDQPItxCP0EBs4843\njOFDx9IDfVds40/BXw5fIczXizOE2cPHyPnFJcTAZHTMYnNBjAvm1RzZHhISiYsN0WScHJ5QhxVK\njbCxI4TIXvmIS1sj4zk6JtTtkpYl40mCzANY2FQerRJIA6nyiCTj4uoZkzwDKXurVzQ8PH6T4z34\n4Y//HwwpdWNJC8l7n/4ej/d+iVxGHh+9xenFOdEqbNdytVgy289wDkLqcMKi8kDrW7CeqANCeKrK\nY5IM7TrOzxqaeoNSgtA1pNMDlqsVaZoyGo3xrkOlJTI4TGogQNM4jNZMxns4BDorECiEq5EGrs5O\nKYqE0ASE8lgFSZINWeIBKTXOOS6vTgm1pVaeSTlCB0ehE4rxBFJDJEB9iRof4us5Ki/gwQkXzz7k\nvFpR+5aoUoiOLBljZI5SBo0hHU1wNrCfn1CtrvBtg809RrRYv6SqDetNzbpqKETKfqZJ8hRaT1XX\neCdItYDSoHOD8JJ2bVmvHND2OqdtgDjBhN6na5ctp6q3xMbHkmz0JUMCPr8d01vj0P/t/PfcU67i\ntWBexx7iVUDWsYd5E29g3g0g93EQbOhFyCUBJXoVHykjSofPulninfFV81dZ5p8H513Qqp29XwTz\nu1B/Vb+j73l9uAm3XSpba/x6FNfWOWYY9Q7Qd9L6XdRUsaSK+c1ID/ImZrRxC/MbSbi79cnjEKUS\nnYDNAPKKHup1JDY9yGPLjQvJbj+A4u2f7U/pLfuVwbxeVaiihLHg/OoTYoSD8SOsW+O4JPU5ra3B\nSnRR0IUVWk8ZSYl3LXle4qWC0HE+n5Pmjqg9gsDTi++Ryn2SFLxviDaj2nhEq0kSgykK8rRBSUFr\na0ReIZVGmZzcJLz11gnPPr3AItnL9khj5Ecf/y7H6SH7s7/EfF3hkhZlBJ1bM18IVBEIDtJyj8p+\nwqoRpFlCYVY4NSH6iOgEzVqQ5mNs11G7jrJM2TQ1eZ4RQhxk5BIgslouiFJjNEySAqShrhsylSCS\nhBgkSjsWiyWXyzWj8pjOtygp0UGilLgOO1sul0ilaKqGXOfkY42KhfllAAAgAElEQVSSCh8D3nuq\nasmk2Qdjib5CCIXIR7hmidxYvEx4cXlGkZQ8mz8nxo6WyFqm1M8ss3GCljnT0QSiQ4qWzIAIjuha\nWrGmsyUAvrV0RtB1ljyVtFVL5RzBRhK1R5IYatvgg2C16FivYTzNaBtPu+4l8aaZRKk1LxaCy2Vg\n77FknKb4pvlZvm0/oM+5+Ny2WeZf+ESyicgmoOp4PZd1RDUR2W4TaYYMSh93YB57EW8ZUSogVfx8\nOL9qfNW9z3OZbLvjJjyQO/t2ob/1mb8K2q8Ddb/zXLfgzQ28r61x8VmAq+HezuiC7q3xmFOHvB+H\n3sScJtwGuo0JNtwAPURFGGLEoxPX7pVrkNeDZT4AnTYOrzMOP1cEHwd1oeH/70sW2YKvEOYyV/ho\nuTi/xOiCxp1iZI5UgizJ8S4SbWBTdUhdoVWDFBJfGVrnQKzxHjZ2wbI+I+88SRbwUbDaXNCqDiEc\nytRIk2AliOChSUCsIWo6YWilYHX1jPEoY/7RikezE9548JDZ7AHf/dGf4F1gPD5iefEhlVyzrlfM\nV8/Z39eIg/5TuZgc07VX2I0lK2eMi5xnq4/YbDKEOEOokpE5xLYbqtWayQy0iUynM5IkoShHCNGL\nbdgAyD7NX5iE/os1tN6TpwnKBtAJSVJQ1xYfJKv1kjQ1nF9dkmeGNJP4GHvZON9HqAgh+eT9H7C3\n/4BEScajETrJMBKCkuikIFhLePEh6uQhoWqR2QlSdzi34YPnH9OGDRVzGntJtAVKtSgCbbPkRd2R\nFZpqbThIS7wO6CSy3nRoo9HRo6Um0wWZqRCyj/AxShG9oFpFmgr2S0lOwtp3hCDRWpGkKQRPXXua\nJiMxgTLvqNuOpgWpLTrTeN/R1l99oa3XsszbAeK7YxtRbR9zfZ0Z6eM1zGWMqDjAfAC61JFblvmX\nHXct821s911Ib4F/93hiu28XwHfB7b5g7S7Ed9wsuyLO91rnFtDi2s1yHe3S3VjmPcxV7x8PWQ/w\nkFPHjDrsWOdhAHkYLPNgeqs8DJmdoY8lx4ke5PXgL6/o50289pcz9DiUJ4gDzHs5vAhbtakv6Wv5\nymCeTkZon7E3OkFJzcv5KVkyIk9ybNeiU01UKy7mn1D5C/ZHe71F1wIxoFcdgYY8kwgV6byDLkEK\nwXrpmU4D0/GUZtWii4Q6BIRwvTi0kaxWNVompNqQmSfEdp95dcX76/d449G3+PnHDzi7eEH0kSQt\nKGd7+NhxcfUUFwJppsgyT7I/YZbOWF45onOwNlidM9Hf5tPLDzBZpCxaihhomwYpDN5XmKSAqPAO\nFstLtDLM9h7QejBC4VzAJCkoSVQJWguEytCpRKgMoRO0kXi/pKpaCK7/e3OOLFFsmjVSlMxGI9br\nFW3TIoDESNJiDIkmm8xIEsP06IS8mBClRhpBPH+KevsbxOYSmY4xLBDKUVUX7BcPqMoXzBeCJMlQ\naOo6p20tSoMcl1RR9oW0kITWYFeGRKXY0CFwpNqQTwS5UQhSZKLIUsl6vqLVkUQIREwIoSPN0t5V\n5ANNHbGdQBnHJrbM144qeiZ7KZmxtC209jXCAn/GbfMaMFddRAzwFt0QVz4AXW6vh7T6rbCzjLG3\nzBlAPlRSfG2Yv86eL3KzbIF+357d+3dhvhMF85n53T33AZ3+ueOOm+f2QSi9e2V7KHodJ39joW9D\nJn1Q1CGnDekA8Iwm3PQ2ZnQhpQsJXRj85UHjgsaHHcs8DD7z4bCzh/rgYmnCDtDDUGMmXFvlfbnj\nHuQ3WUN/RmFeZhOilxgDWifUc0t2BEqVLNYLEtvQdh0mFwTR0YgWosCLyHrlac4uEAh05oiqj4ow\n5Dg6LhctIW44nJYcHpVk5msUacmHL75HmSWExtJuGoIL5EWB6RRZ9iaT9IQPF1e4LsUYw8nhEefr\nBSo1JKUhxEiMniwU4KcItxwK3XdMUkmZnPDp2YKmrijKI/YnJ4SmRmayT96RkrbZMG8rjp98E4+i\nTErWm3Nm+0dkZYltWorxmBgDVV2j84SAQGlF6wM6LXrNSyfwIeBjBKXp2orMpHTdmm5jSJSgWVeE\nYoJ3LfiO45MnKCUos16ZqJ2fYaYTunpFahK8yEnzHOEFYTMn6hT77IdEmdCuKharDbk+Yn/6AGSD\ndxYtM/ZnJVeLJUYbpMlYVBWxbSjzFKNSvDXYShPYILBkqSMvFEpFsnREmWdkJmW9athUgVQbxgnM\nXUWW7DE2CXV7TmsXeO8JEuZtx/MLhxkXZJM+Br2qIHyFGuXbVi1fBfObNEY5xFDLjt4CHwAu7HC9\nHR0IvwtzBqucXnRZ8frgfp21V0Wz3AX5q+4NqkjXha/ug/RP2rcw3wH4rkW+e+h5De/toed1pUmu\nD2x9UDQhpQ0Zje/HNqQ9yEN6fa8NvWXehcHNEgbLPOzCXEI9gLthcLHsAL0NvVXehb5wnevFVKKP\nfdXMELhRc/4zCnPo9S/XbY1dbZjNDvsoDW1JE8N6syRJDMJ0xGhpuzmT4pCRzFB6Q7oa0TWeutvg\nsEQ/ImI5zmf8q3/lr7J/NON8/hSTWPYmh+hkwuZ3Lnn+7H2868hQdC6wqVoO9R5RbOjiirEZk6V7\niDAhFY5qs2acFRzuv4W1l1TtBVHnKFLiaoJPHRt1yYFOySZjriycPXtJkJfkedEfQvoUZRTCpDRV\nPYQGtiyW51xdveTBg4d4L3E2EPB0tiMSsM6zqjqKoiRLEmIA2wW0knRdS9dZvAfvBaenFzw42KNM\nJOt6jQkdwTmUClSLS4iS6WxCkqSDTBmYIiMIgxAKJzKy0QxbzzGjEtG2MH6boCr+1t/+X3m6/iHL\nzRUQsUKRjF8S2j20zBE6p+pGEBWuCzjn6AJIGdFZTmJSXGhooyWfKHKzjxrn+NhLyD1+/E0yfYht\n/h4vns5BCVrX0TUN48KgYkqDoakiyaCRut44kIFJ3jHWgq6NELbCaV9t27pZIp8NCrlOzhkyP6Xt\nk2Ru5jcwF65P+5c+9vVW4mA4DzXMpQKhue0K4Z75fWuvmt8XzbJrmQtu4MrOeqCH5e7h6Bbmu7Hq\nd69fNb/PMt+1yHet8l2g6535AO+4U+w9SghB0oSUzqe0Yeg+oQ0ZXUhoh/UupHR+C3KD94NlHhTB\nqz523Alo+rj42LDjK489yNst0HuYYz240FvmPgwVM7dfOb7cSehX9s5v6jXBS4pxSToKvZpPlFjb\nMipyrFWs3Tlap71rQAaMcTyYvoXgKUbXdE1O6ktWqwVNu8L5AqLhsDzgYLqHp+H09BPa5iUhrjk9\nvWS+7JBtINWKdXNBVDkr3yLd+9Qm0K3WLC7OmZRwOHnCjz75lHWz5tHRN6iarhdDiA3FWKNrg/Ud\ntoWXOnLs1hw9yBDigHZ9jpQSJSb4EEAphBQonYLwLE5POTh5TNt12CDwLnK1WJAXBZuqpihybOvQ\nuaHaNIzKKdY70izF+8hms7kuhbvaLIgEruZXUGjoVoOob8NCgU4Ns+mMGB11XeN9QpnlCJGiM4PS\nBW2zIQrIsxLhPRQl+CuKoyfoQvPjH/4R59UC6xOquuadnzuCkSc2GpUX5M2Iruuw0bNZV7gYEXiy\nQpNoxXpd4UJDOppQ7B0hpaJt5gitGI+fEB2cHD5mud7QWYtWBiUylosLpCzx0RO9QOUSbwP1OqC1\npsg0QgZSo0hUTt39f8DN8hoHoFtQC9dD/dZ4d93TVzzclrEVO7VZlPgsmD8P2l90b/cAdBfou5y5\nz+1yt5C64ra1fjdu/CdZ24W53wH4fRb6XVWhndfU64EKgpe0vre4O98Du4f4dm1YH+Z2C3S/Bboi\neEkIg8+8BVrRw7zdBfkwdr63ygeQR+fB+QHkOxlPtz4lf/L21fnMhaCzgq7t0CEgfMtydYGNgv2D\nMaNizPryChEiqdK4qqUtNoxHI9Ztyen8I2TMIE77qA1S8jzn+ekF/+Nv/SbTowllOcKGNfPVh6zO\nPat2w+HBAdkIfNvSdRHfrgm152t7OVYaLuSU50/fpRwZ3jiZsV/ssa4a1FCHqqk7gk3pXIvTCqEC\nOsuIHcytJU01KgetR3Rti4sXpHKf2jeY4Ai2wlqHSQPzy3MiCuscq8WSl2dnvPO1d7jYrBHs45wj\nhEAI0HUdIYRBVKO9zg5t25a6rnDBU6YJJk1oKst4kmFiiZCRshhzdXaGVKCNBpsivSNGy8HBqK/l\nrsZ9dUQDUeUgQl8/Yn3OxcWC+VXL1ZXDxo6jvcdM4iM6/SfY4gzaESbNCEJQVzWbbkP0kaauKcuc\nUEiq1YY6biiykqrZEOhw4Qy3avnej2seHn6LurugKBNCUIDDWsVq1aHQSBVJVIaIkvXG0jSBLHOY\nRBJMR5rmNHXFZmW/6K33M2/V+ot95sKBCGJwowywdqKvLe5v5tL190QQO4wVA8i5LqL1Sli/DtB3\nr+8LNdzd67kN/jCMuwejcufefRml92WYft79bSi9F/cDfQvxIUN1119+7WLZgbr3ErsDbusNnTf9\n2vV1P9phdMHgvMb7wSr3su9O9BmsQzx5P+74ygerPHZ+sMo9w9fpvhLpFubXXzd++vbV+cxzgUaw\nXL/g4fHXOL94Qds0LJoaITqyTCE6SeUtwglc3Vt3i8MzXLdCqjWeNW1dIeMErQJpOUIhePfjj7n4\nowu+851v8/jkmMYa0rL/cVMRiVqDKRmLpP/apcGmfdREZqFya+brK77OCSezx3z/0z9itfwAgkXb\nCcHDyf7b6HzCYvEJedS0uWC5OKe1l6RlQaoUraT397ua2rfoGIiuw9sWhaANgrSYsFnOEVIRnOP5\n02ckaYK3niRJqZsGrc1QECtyevqSJEnYbDYkScJ6vUYg6NqGfG+KBpI8pcgKRAjE6OjaDVpLzl6+\ny3h6iG9S2loi/JR2PSYpDlltVhSpwtcBiUDuTxEu8P6P3uX5/AW+TVDOUOQ5bx6foAaoJqklsiDL\nR0SgWq3BR2zlsY2HBpSLdE2DSCJVe4nKFwSx7isvxsDV5XNG5iF7BydcrVdM9voC2tFnVKs1Co0i\nJ0kNPtFUTYVODLNZIEhLjILVekPbGpBfvZvldQ5ApRfXkL6G9fba37ne7omir1EuhpBT2QP+C8H9\nqrX71u/CfNcKh9sRLndqod+a78L8Pqj/hGs9zOmBPsD/Mxb6F0A8DnHxQUusMwOs73TXW+Cd3xnd\njlXuda8L7NVQOVL0B663yhHEHuh2cLPYAeTWDda5B+8GkO8eBPwZdbMYJXBKkSpBrqcczN7hRx/8\nHoWSnL88J8sLmiYitKSOFhklvhWcnj4jTXOMDxgVaEOFFGOi7fBth85yjh4ck2YFF2fPeXywh3IG\naRTlpCNUDo/CiISiHKNloFKWdXAoa4l5ToJj1a5YVQuKLEWqGhvP2Ru93V9LSTmegtDYzQXK1zS+\nYV0tqdYr9sKax4dHfHTZYL3l5PgNVJDYEDGm1+J0zmKUQstIs5njPIxGE549e86TJ0+4urqiKAqO\njh9gkpT1pkIpRd00bKqKarOhKAqapsG5DpMYQvC0bUWWFTgUs/GYxcVLkrQlSTMm4z2C69BpjlKS\nLkiEKVDZBG2XBA8qSRFFTjQF4WrB8/MLLs8WBPrQw3fefoKUkXc/+hEhixwcSnR6RZ8/7QjWobuA\n0hqlAonUaKlwRiETgWNNYxV5FijSvK8Xj6AcpcRoOXlYsLFPSfWYJHfsHaZ0VX/wp5VkTYvwgSwH\nlWfY0NCsPUJqUlWQjL7cIdKfRqtex80ywPlWD0MN8FvX2z6AHdHDXIqhPvkr3CyvC/P7/Ov3RarA\n7SiWsLPn7gfAbljjLqS/zMgO8zS9e0NxOzJmF+A77qK4+wEDBC9xTmO9HkCth2uDcxrn9QB1fc8+\n1Vvnbscyt9wA/VodKfTlg3ddLNaDc+Ac0bveQg+Dn2ibzvol2lcGc7cJ2OBYLq443KvYLBdMs5zx\nKOKvPISEk/1HlPsl0iiW83M2qyuacA5+hLcajcc3Ehur4XDQopVGyoSjg0MIY7pGcjJ9m2eXn9I0\nkv3ZCVJcMp2uSaLirHtJUUq8ajBqhIg5q0XLqrnk/dNPSLOEx289QSeeKE+RqiBGwaY5Z1UH1osl\nZSYoxjWjmcTkCdMihaQjMRWrRYI+1ozGE4KySKmQGoIP+OiItBTJPhfzc1w6IstKbAysFwuUUmhl\naOoGYwwhBJqmoWka/OCC8d4hAygpsV1NUQ7iGSbBxsjs4AAhe7/y/sGbg+KShRg4fviEYnpEpN+v\nRECYAkwK1hI8/N0/+Ee8WL2HD57OW9LU8PJ8Tlg4Yj1hqSpG+xVGrgkxQ3lLZgzWeWIWKcuMQima\nNiCkRLkMu9aUPhKQZCPNeDxi03zI3uwEH1KqxvDp81NSkTFKoIsKi6IEEqFYJApjFG1n6fBkhSJL\nJCFUrBdfPcxfxzIXcQAz4mZ+Z21wqNxe38J8B+qfC+mfBurXL/LOfvE5e171uC2M75YD+EnX4NbB\nZw91ceMz34L9urojt8vlbottCYheXEPbux7Qzg1zvzN36hrofrvHa4LrYR6cvKm1ch1mOcSTuwHq\nbhfkg3Xu3WCZOwiWeOsrxk/fvjKYn6+WCKEIoeHdd/+Azm7IswQjChLjCcIwSyWjUYJJp0hn8I1k\ntZ4jpjWYBqEzuuColhtG+ZQ0k0xnMz599iHjyZi98T6N7/BVhWslWmfoZMzV6l1M2pInCbm2eH1G\nrhPSJNB2inxzzmVwfPDyJdPxG8z2RpgkkJqc508/RquWy8WCqoMZJxSqwGnP0dGIs1ON85I2GLJR\nwVh0nNbn5ONDyjRBOEtWltTrNalMSCW4ZolBUK0XmGxEtVzivO11PmNESomUkq7rALDWUtcViU5Y\nrVf9AasQFEWOwNG0G2ajkvXyikQKpuMxaZJispJRoodEqBGzo0cU4wfMVxuUjBSjKSiNGE+JXeC9\nH79LmmZs6sBqVfH1rz1CiMhqsWGaTalCzeXLQJJb0skGqRxkAVcJNm2ftJRISahaTHBoZTBiRCpG\n2Msldt3im5ZNGjE6JbgXzKZjHkwfkHUT5hdn5GlBenQIEdy64WrTkbuM9XqJLqEcBZQU+LqjnivC\nvPiq3tLX7dWhibtN7kC5Nx1798nOfNjzmb3iZq/gJ4T5XaiHnfl9Vvru/O4h6N353bXdQ9Ld/qr1\nz7sPt5KGdq3xW+V5XwFw4PpDJiqBd+oz3dl+DP5m7p3GO7kzH/Y41YtzOMG2hjuDqEc/BqIfIlj8\nAHI35KI4B972fQB67yP6MwrzNmpiW4Nsse0SKSSrtiasEtrGE7FURtMuWoqyResUYopbj1i6BpMW\nWO+RWpJNJLbdkJiCPM159PAxHz57n816wXg0onMdb73xdVKzx6OHb/PJsz/g6ccb3pqdkk0lXXaA\nEBFlJKswJ9eKInYs3Aa3OqWzLQ95yJPH3+GXfu1f47f/0W+wXm5w0WFSRx5TLl0fMnh0cMRitWJV\nBZTKeXAo6RpH1a7JzZQiy6mXDWmSYOuKKCJSdpRJQdSR6V7Bcr1mOjpgvlwyXywwSUJdVQgUVVMR\nY6RtWlpqvG0JviPVGoHAeU8mE6qqghhx9YZWg1EGPdIkWZ9lKwKkRU7TVaxWL9mbHiKkhDQnoHDr\nJS+XF/zo6ffQec5YjhkXhsuzc7wXdFkvQB2cptlE8nJJlk9YJhGhILaWNMnQDmQQZEYhEkNiMpIu\n0jZQNRYhCpbVmmY958k7h/iw5tHBAaOjnG89+WcYHZ2QZIHFcsEnH3+KXBjWp1eYqaHMYL1oWDfQ\nNQqxMhwlo5/1W3cG/DfAL9Jj59/ijmD5a1nmQvZWtZA9oOUAZ7mF9SvWBoDfWoPbMA58Psjv+sh/\nEgv+Veuv6uGe8adZg9uHn0Md8+swyF2o34rIEbf9+AKiFL3wtJN421vZ3qlew9SpQZR6Ow737uwJ\nTvaiHE4MB7PDt4RtZqeLN+GHfohgGQ4+cfbaMo+h47omwZ9VmEexT8wqvJ0TpSHJCmLdsG5anIOO\nyPvPFpSrhIPjgNYGFxzRC0KT09QOkVqKsj9Ob6oamc7RScdUH5DIj7g4v+Tq8oKI5O2Tf4qf/9Zf\nQKcaI0t8t+KP//EZ3357j/JwyoWt2D88IAqB9xXj4jFV9QFaejq7YTVvuCgu+Wd/+df5q4f/Pr/x\nN/8z3v3oI65cxWzfE6uCeag4nB7QZR2Ly4YyZuR6BjISYyBogWscidZ03vHg+IjV4gohPKNcE4Rm\nmkhi4khlzbQ0zK/OkDpFS8VoNOqzSKWkqjYgHCH2mZ9aSowEvKOqVsCY2ThHiD7MwHuLIaKEJEuy\n/m8terwN5OmItm2JxiBGJaFucBF+57t/n/nyDCklb765hxOeH33wCSFMEAeaDsgySbWEtIiUZUVi\nBEmI7E9KVAvSW2IIpFrjo0FagepqJrOMVEuuVjXNqWHhLeXqkqw2nJ53nJTvEISmsktkknB4eMJs\n9pB33/2ITajJJwdUi5rzp55qqdA+ZaQNaTb9Wb91/yvgbwP/Ov3fT3l3w+uk8wvZxxYKNYxSDjXL\n5c09KfvruHPN7gfA8Pi7AN0ePv6kwN29hs/CO9wz/6LxdedfdG8b3z7AO+5CfLffjai5Dknk2mfe\ny9rJAcySYMUwyn7dbe+Je9aG/VYOljn3HOD2QI9hC/QhamXXveK37s5t31Yg++nbVwZz28B4tE/s\nDELUpCYjM1Mq2WE7Rx46VlZx9nKF85G9gzFEjUgCdWMpTaBrJTHxBO0oJoaYrLlav0/inrA/fshq\nteHqbIOICS/PTxFR41qHSSOPH+/zw9MOV9XshwOeriEdaw72H3N19sfkSrKXv83eZMTF+pxN3fHd\n7/8eo3Kfb7z9Td568ktcLi6pFy0hNkzyEy4v32e9WdG0HoEEnTBfWYgCPe1YdWuC82QODmYHSDqy\nLCfLM7TSfe1wv6aUNetlRWNTnAs4nSHps8XqugIE1jX44AdFIYmQgRg22HZNUy053JuRZiWdawnB\nErylWl1hsoSMksl0hhCaqm6IEUaZRghD9B6lEz758EdkoiAnMp0KUmU4vbqiDQrlLGGIMJEGlDY0\ntUNJj4gWlERKQWoSdJBE2Yc5CqvwRpHNpuS5QXQN8QJMInhymLF/lCBCSleXbDhmtRZcvPuc6Szj\nna+VHB4e8HPvfJsHR29DKLC14dtHnmmek6U5RZJijOJv/MbvftHb76dtU+BfoJdKhB4ti7ubXivO\nXPWgRg9AVoN8kBqu9Rbkwzpbq1wOQebq5jngs/CTfLGVvr13N0Jl+3y7PvLPc4t8kcvky3R2xl0X\nyw684y68xZ35PWGWUQ5Sd7d6D+lba04QOnmjb3rfXhchiJ2fOQ493OkD0IMjbl0roev7blnFL9G+\nOjdLd0l3lWHrmrbdgAdlWrrGEtFEF5mYFJe3tLVjs6jJ0hlZmmHtCpnDfqpZX6peoKFoaIh0cYl0\nz0GNMVojSfE+8NHTD/jBB39MXuS4UDNORhwfzBAYnFsyDR4j93h88k0WL37A5fk5STlBTQpGyZiN\n9yAkf+fv/y/8w++mPHljn+OTx5zaj/ExYmTGyd7PocWCTFmsDqzqBqECsbNUwaN8QIgRaZr2LhHn\nCTEymUy5PD9lMioo0hSZH2Hqmvc/fU4iJY1d4/DUm/paXKJt++qA1jkSI5mOJ2Sqo16siD6yXl5S\n5iXZeIp0FussoqnZXF1R5iNE6CNOskyzWi0xegSmfzu05xf87nf/kB89/wP2jwwhOs6fXtBIGI2m\njJKS470Doo/sHR8gVMR6x8nhPqU+IP2VkmDh4uxTnHeMRiWrzYYkzZlM95mOSvKiwHtYLzY422Hy\nlOm+JgaNCIY8LSmyjBgjzluC9IyLMdOjMW1d01lPqkcUZoQuJCHEviSB/3LhXV/Q3qEXpfgbwC8D\nvw/8u0C1u+l1LHO0RGjVQ1tLuJ73owjD9VaVeAA4UfYf+tusIa0+3xq/z43yKojvwvNVGaD3hRve\nNw/3POdPM99es/Nv7EL9jn/882B+7TuX3NRv6baA3lm7jky5c6+7s2frHdlqeF5/K4lDlvUA8bgb\nT+5uIliC+/+HZZ6nkvlyyeXZajj5PSXI0PsRVUpsFBrBaJSi8xThW7RosV6xPz1kNM7IVWRmz1BX\nNeeXgtrUSNMS3SldXGGkZP9oQrNeMRoZfvzR93FxQ2SBNJEiSQjOsmwNozLw1uOv887xN/kgpgTf\nIhqJbTXFaIIIK+rWMS41p88/wNqnPDp+h7IwWLsgSUak4iFQMxaG1fyUw6OE+WVNrT12E/FBcnJ4\nhAmKSMC7jgdHD3n+/FPGmcFIRZGNycuC0cjROcnHp5dcLWpUlrJerlBaUpZjrLPDGz1yeHDI4axg\nfv6M8WSCTwWJimBb0mIPHwXONkTnqOZnVIXh4OgQ6zw20JcFTgswCqlLPnj3D5Ey4Wsnv0AUDusE\n+1mHIOK94mB6RJZmTKf7HB8cIIVEKUWRFZhUM5lMIQZenL4kxMBkNKWqK7RW5HmOsx6hJUbrvvxt\nvWEynjGajAgOnG0IMZAkhjTLsRHquuqTLLxkNnkAApI0xWhN09Sslmu8d2TZ6xw+/tRN08so/jvA\n7wH/JfAfAP/h7qZ6/d/uXP154J/+7DMZNfQByNt5UBCuc9BvIL67trsutyEa3IQMXoNwIOG1mk3k\nOsnnriW+u3ZfwtAuyF838ec+65rXXIvis/fu/nu75QPugzp8NmRyO94nmnG33vrr3HNh5/e7+/rj\nTg/cKve4DUW8lkbafdK77cOhf3H7ymA+GhUE6YndhGpRU7UO5yVaRbKRQKea3GhG+xlEj1072mpF\n6ypkWeAzQ+0lE2l5eOh48YHCiBJPS72JOF9DmlGMFOOy4M23v8Ev/8Kv8b/99t+i3hTURrI/nSGb\njrOLK6YPBVpbzs6vuHq5QuWB/aOSLEnxrmOcj9mbvcHT8/xNrgcAACAASURBVI+ZlhnNesWmfEmR\n58QQ2axPUcke072vUXUfkpqCg70xJmR8/HLJfjkhNCnBif5rnpCUxYzF/Jw8SynKjDwfkeZ9JmuS\nek6Ojlg3HWeXK+pFR5pmeBvJsrKvuhgCWkORpDx78QllllFXLUVa4l2Fjx0ET1GM0VISbceoLFBS\nkig5RLUKtNAIY0AlxKYmG+1zvL8hN4ZHj0+YlHssL6+onaWqa5QKbJYboohcnL7g4PCIMs2Ig2BF\nnuas10tOjo6o6pY0MaSJIcZIVfUC0+N8hBACPS4oy4IkzUjzFO8cQhhM0lvZnbP4EDBGodKEEPo0\nfmMM3ju6tkPQfyChJPFLxup+Qft06L83XP9P9DC/3eRf++JnkoO7hP4g85o6kRs4hAG+PnJdnEWE\nO3Aa6HGfVf4qN8fn7b3l9+W2xf257pT4WUt6d35tHt/zu4h3Pz3uedzuc0Z2fld3foZbLo+d17+T\nPXrtf/+in+/u7+ren+tn3d4e+rb99it3fmUwDx38v+y9WawlSXrf94uMyD3Pfs7d6tbS1dt0z0zP\nxsWkLZKWBFmUZenNy4MhyIZfbFgEBMukDBgQYHgRH0zYD/aDARu0bFOSZWhgQbAgUeIMRYKkOZzu\nIWfptaprv+tZc9/CD3nurXNv3Vq61hZdfyCRkZGZceLkifOPL7/vi+8TQuKaEhmYzNIE2xQ4roll\nNn9OJQ2iecosjDEijZYlUkkW4VXCRYfAcUklGHZMXmnIXQZWD01CVFfEaYkWmqCV8+rrX+DNi2/x\nbv9dPp4cUFkSy7GIkoTZtEBaNYeHN7ixv8OcGUHZJoxC0rqZ+jd6QzZ7Q3Ym1xit+xR5SVlNcL02\nKvBYTEPqaMJwuIVlDqmlQlcSXWaoGnqej2EHeGYLz7TRRUwaRSAk/V6HKoswTRvHsjGMo3yfJoHv\n0wo8dvYOMC2nIcJ2F9cP0GWFNAR1mVLmFaVVE7T7iDLGEDVVWbGYH2BZGziuh9Xu4rk+6+fOYbaH\nFLGmyiLavRaGVM1q2DhHKhPT0Iz6fUxM0jii0+twLgjQGizL4vDwgDCMmE4n5HlB4aYow8R1HZSS\nGIZoVtvVFWlWL0MR6CZIlCHIsgxlmgRBgKWsxvBcVw05G42knxZ5o0EQAtM0MZcrYdM0pizUck2B\ngbQa46eQjQvnM8QOcAN4A/iQJqn5D+65ypD3VN2DpWfKMaEj7r6unyAofXer6lM64OVFj0riq23e\n79w9hjzOILv73H/EcveQ32kiFyfPnyD608R+n+OjSe+YdFcnwFVCF3f7fLR69Cj8wOnvdXS8+iZy\n5vd8VnjQpPZwvLgcoElKTYYhbGpRYlmCWtQoS1BVKVleUtYG01spO+OYbtvFcQ0KXRMlMdEiJHLa\njfQ+chgMExYHFdLysDxFGo0RpSKJSqTSXNx8jcLQSBVT1Bll5aNsGzGQBCIAXXMwzcjLmwi/Iicl\nUIqsOiRMasJ5Qh5HOI6JMgNqb5mjsgoQwscz+8TzEjMxsewtLAZ4lYl0EoZbNp5tkmQlpiHJ8gTy\nhFpW2GYbwzBZZBlbXkBe5JimpMxydJXjOza+61IUBXHcLOE3pMI1LUqpEXVBNE/odfrUlKTRgmEv\nwJQOqizQaLIkJeg0Komg38b0ArSQ6DrDFJqySDHtHjpNyOKSKAxRyqLdbrOzswvUdDpt5vMZnudz\n8eJFoMdoNCBNNxmPx0BNWRZcvfoJnU4Hy2qSREgp0Fo3C6CUiW1bSKnQWjeJJ5YhCQxDIpVCCEFZ\nFGRZimGZKGVRFjllUZDHCRpQjoXWmiSOGkndMgFwLLsxCj5b/Mc06eIs4BPgL99zhXiEPpwg89PK\nXU4S55FkviqVH1/4AGJelZbvS/Cn7j8mtVNEfkTsJwx9q+3qewnvLEI/QeRH5dUvdVonckbdahLk\nYyLXK+S9JPQT0rdovsPRIquzVEWf1Xj7WDhrqezTwYtbzq9t8DRxkTLbX+B5DnGYkoc1wqiwnAJT\nCjzTxjGalZOVrqmrGlFAXTmMsxgpDEaFQ6dvkdopyrZpGz32d/eRssZUDroWUFV8cuUjDmdXMUTJ\nIk4QUrHVv0Amr6HTClSJrA2CbpfZbIrWFVK7tA0LzxniygFdy0GLEtMysaWDUSs8s0XiZRTdCFm6\nuNLEc5ookFvr5zGlySJekIcHGK5FIUqiOKHtOkipqABpOk1C5yzFtRRxNEOaNo4lsRT4nsdsPscw\nWmRpxkF+iO3YOKaJZUuKOsNAI+qcLAkJugMMZWGaCoyaqiqwnC5pVmE6HnWtmYULzKpCYqA7A5hn\nXL/2IQezmIqaJImodQVCEEUxpa6h1nzwwY8wpIHnuaAlSRKRZQVB4KO15vDwsIlo6HkEQYBpmvh+\nq8meJARKmWRZRpqmhGFInMTUVY0ftMiylBqYzqa4rsfW1jlM22F/f58kTen1upg1aCqKMscybQyt\nuX37Fmma4LrPfNHQ94Aff+AVjySZi7tEvkroq5L5ESmJI6n8yMpJc4E27iXRs4j1tPT8IFI/Tear\nx2cSub63DZbleyyPp8j7M9WvEPjRNaukfkTkJyZCcVcir5b71ZgxDyPys0j9sfAopP3kpP7CyNxr\nSXKng2sr9u4sqPMaTyqmYYwhJJ5j4VgW0jcZqi5pVhKXEaOWRyEt4jBDSYusyBlPCxASt2eiqBDK\npEw0tSkwqVhvj7h664/Y340YjxMczyRZhNzZO+Sicw6lXdygy2ZrDVmPUEpRtUBJDxMTpSWtdg/b\ndYnDOZ7tEscxcRrjByaWq3A6FnnhkGcZta6bBR3AdDpFCIEhJY5joZSi1haeP0AZFe1Wi6tX3+fc\n5iZVrVks5thL42AT7ljjuCZlkaGrGmVIXC+gFXhUVUmRJETzQwatNnUZIZeqhqJICPyAuq7xvQBl\nKGrDoOU5SMdFGx79vkVVpTj9HjU2eB0mqeCTT68yHI7IssZ1UkhJYAdgNe6GcRJiOy6GUAhDsL6+\nQVbk6Kqm1WqRJAmW1ei3oyhGCEjSFAQUWUGrFbC+sUW4CJFmCYZmZ2eHJItxfR+E4HBySKssCOYB\neZqwv7/f2BkCD8e2EELgeU7zW1UVfsslaHmU1ZMtvHgqeBQyNwTHCVpPkBV3yUnQkNHRJdVR5fIa\nrU+qMO5L5Ksk+5BrV4m60nfJ+5jwVupW7zndHpozyfi+lsmHWSxX31xOT3wr5dNqloqloXQpqR/N\nm49iD3iYVP7Y5H4aT0c6f3GuiaVBEZcErYq339rkD353j0HPZLgZIAqN0hrf9hHKYDGOiGYJltB4\nKKqgGcThLKOqDKRZkxbQNtv0W2ucH73GT775DpbpUOka13fw24o2GU7l0PJsFDaOqej3+nzFMHFM\no5Esq8btr8hSoiKjzCvQUFcFWZxj2QoUhFnc6IURy3FcM5tOEctl91mRM+j2yIuiEQjKgrysMKRE\nWS5VlpPlU65dv4oyHUxlkqUlRZFR1RopLbQGQYkyJI5lEcmMTreP5zoIoZDSYBbvU5UFeZ7jWCYt\n32niEFUa0zRJs4R4PqPTGyC0RgnQroP2N7D7PhoNlochBHk0ptUKeOXSq+R51vRV2Wg0uU4QicTx\nXVpmF0eZRFGEYRjYjokjmrylaZ7TGwwY9AcsFgvSNGE2nXL7zh2EEAyHA5TT6NYDv/GDPzg8oN3p\nUNcFUgkW4YIsjTAQ7InbgMa2FX6nhWlBtnz2SRxjmiatdgfbspt46vnngMwfSc1ymszhHslcL0l0\nFcceEmJJWmKlnlNkc5qszyL1M645IYmf3p9Rd9zGWaR+mshXiHj1O59J3GfU6dWNk8/heOMucRsr\nJF6yNCSL+4fdfZARlJX9Q/E4kviTkfqjkrkEvkNjyf83gD7wd4CLNH4z/yYwXV7712mWOFfAXwH+\n8VkN7t3Zw23V1KXL1nrAK68HhFGOtmp6noNvWhzOm/gjRprhV2C4JkkWYWHj+XaT7GGegFfiDzu8\ntfHjXL7wBr4bYLkW/W4fpRSGgCLP6VoJ51stiqpGSOi0OphKYihJlqZYlotlKcrSI44jzDQjT3Py\nsqQoc9I0IbBsirwAAwLfx5SKOI5xHIckTbl5+zZBK6Db7VKiEUriWA625RBoTV3XlHlOJjVlXVBW\nOYZuVvSlWYjr+pQ1+IHLZD4HaFRLosZxPaTtsr9/SIXAsS2qssCx3EYvbRjUZY2QJsLS5GWONBS2\nbWEqE9Mx8YIuwmtTRzmiO8AQ9nIM1dy+fpOD8YRwMafWJd1uH8duAnyVFRyMx9i2j+1I0qpGmSZp\nmtKSkslkwu7uLm7gs7a1QV2WCKDdaeO4Dq7vYRgGjmNj2xa3b93EsmyUKVHSYDjogdaUVUWZ5Wys\nrVNXGsexsSxFuFgwmYzZ2cnwXR/TMqGssGybqq6Zz+dorXGchy/YeeZ4JDULSyJf2R9LnNwlEWgI\nqLngpERusEJq+viSM1UeJ4j+AcR/TNL3IXN9xn6VxI87sfJF9SmCPkHyp889gMjvIfVTq1/vJ2Ef\nEzv36swfJpWfpWZZLX8mPH0CX8WjkvkvAD8EWsvjXwL+CfDLwC8uj38JeBv4t5b7c8Cv01j+61Pt\n4SuPjmtitRx0pXjzCxf54cfXqPOSrEo5t9GhqgS6zGGo6PQ8Ku3h+gGjTo9Rfw0tIU5TiiLDkHBu\n6xKWKylESp0XzBYaXde0gi6WZdHr90hik2TRkGRVZkihcJwAw/UwjCaAkZSCXr/PbDqlFbSZzmbc\nvnKbXq9LGEVIKREayroGXeLYNmVVYTsO2+fP4TgOWZazu7/H9vZFep0BdaUpykbyzuqcuoJFGGKJ\nmm6rh2FIXMeiKjKUKSmKHCkVNTmGISlLTeB1kMohLWom4wMMITm/EVBkGUqD1+6TxBFuOyBPYwol\nabXa1GWB1hpTKqzhgEJbqP4FSObgmqAbdclwY5tSa753sMPtWzf56JMrtNselrLpBi0s32f/8BaW\naeN5PsIQ1FVNHIVYpsnbb71FXhaUWc7O3j6mZeIHHnmaY5kmjuNQFjlpGJHmGRpNHGYkUYyhJHme\nYzkO0jA4v3WOvCip6+Yf1e+P0MogXCzAMOh0OrjKhOWEaS/7Yy6NoS8Uj6RmYYWzVv7Qq6oLBMes\ncUSYhribku30QqHV61bLn+XcsSFxZX+i7pQ0fk/dqnT+IKJ+jO2YwI/K+u7xCcncOEXk4tGSZjyq\nquWJcVqNdLr8eHgUMt8G/hzwXwJ/dVn3F4CfXZZ/FfgWDZn/ReDXaDzgPwU+Bn6CU4GIAJRZ4ak1\nbLNm7zBmNHLY6I64fuM2rrLQVYvLGxcR2NhegGE4mKaDZXsYAnzPpdcfUpQlhwcH7I93qOuCNFlG\nmBOCLI7RNH7YbuBimgZmy8exTYpimUJNG024Vg1pWlBWJb7fIlmEzBcLkiSh0+nR7w9wPAsTn/Fy\nMijLgqLWpHmG67o4nks5T8nzhMUiJghadNtDXNdjMZ9hCIjCiDCck4YxluXT8y1cyybPciQGSkBd\nQVrnUIMhKixT0+m1sZ0en1y7TpoUvHr5MuPxAfNZySiwMMgB0bjv1UBVUeQ5UKFMC8u1sSwLYQUY\n3hDSMfNbe5j+Ie7GJdAmrbUN/MEau/vXmUymOFTYtsV8Nm0STO/eBgGeG9DutKnqCs9x2d25hWna\n+L6PlM3CIMc2qfKM2zcOl/ptl3AxQxgwmUxwXRevKjA0VLqmKjTtbh/PO5LgHfI8b3zTowixjE2y\nMVzH83y8VgBo8jzHMExMy0PYakU6fIF4JDXLyraKVcm61mecOyL15b2GPnlutY1VCfxRy8dS9+ny\nCnmvkvsJ8l6V0I8mo7OI3Dij7uj4tHfP0bmj+hUCP2H4XF6zOhEdEfmRzv+I1AVPR2f+SDj9Az87\n6fxRyPxXgL8GtFfq1oHdZXl3eQywxUnivkkjod+DMJnTdbbJqoRkHnE7Kxh01+j5A86PXmW0dpGe\n20Eqjee2KKuCstaYliDLmpVSZRlTVyW+byHNdfI8J4oilFJkeY6oKuqqou+3CGwHMwhwPBdDmEiZ\no5RCCkkYxcRxTNDrHE8EeZ5hGAa9Xo/FYkFd5ZRJTZimlLpCa02RZiglEUISzhfUaJI0pCproImx\n4ftBkwgizzGMZtn50d42TeI0wcSg02pR5imGMijyFMdWIBWWMFCGxDBqTNMijlPCRcLOzg5lVVD4\nDkUlsB2boqyRyqAsCnrtLkpalJVo/PcdH7/dQg/Xqe7cwXDb/ODKB1ze2MLtbiA8q+EIQ/Laq+8Q\nTRbkeY5tGvRaHaaLBevrXWpdswhnuI5LHDdeLL7vM53OmE6nrK2t8cEHH7J/uEuv36XX6bG1eY4f\n/ugHWJai2+0d5y6NogXtoIPrNBEwkyRdujA2qivXdRtylxJpSKoipa5K4nBBGscIIXBdl1xkVEUG\niYHxKFLxs8Yjq1lOqSX0su7o9f7YeUWfcU6fbGOVmOFeKfy+dWeUVwn7iKTvIfMzrjluf1l+KImv\nevEcleu7507cq1eu03fPa5akbtAkRl7ua6Mh+SN3xKOPO03mNfeS+hFhP8ib5ZnIDE9G6g8j8z8P\n7AHvAj93n2seNledec6SHbTQpGFKHieEs5RR+xKvnP8y50YXMaWirHKUsKjqvIlkKQ0s08Z1vcaL\noa5AC3qDDrZpkeUZi3BBGIbkeY40myTKu/t7BK0AVdnoVFDVJUI0/s/TxZQsz5nOpiyiOUEQoMsC\nIQRhGOG4JUkaU1U1cd0QuTIaclSGgWkq6rrR9VqmTZK5WI7N4eGUdruNZd997bcsC9u2CMMZSpoU\nRYxn2Qy6XebzA9peG8e1CGf7tN1t5kmMUja27RPYDrMwatLBIUmzDFMJXNtEKontBcR5hG2UOMqg\n1jZlXeBio7WmKnOktKgLhVo7R7y7QziZUYzW0JTHsSRqQxN0R1RlSRjPSaXB9vYFOv0BcRRSVRXz\n2Yw0zWi1u7SDDqal2NjYpKoq6romCAJms03KomJjc528yEFIbt3ew/fbvHr5AhoYTyZMpnPoK7qd\nDoZhLBNON5OlYRhIJcmLCseW2G6bsiop8gzLsknTlMUixLItlBLkdQ36Ho3e88cjkblebsvy0d9I\nr6hW6uU5vXJtfeq+YzI/i9TvQ+gPuu5Y2l4h7NMkf/rcav3RdvwZq5L2EUE/Qt3xbLZC5kfP5lhX\nfkTmRvO718aKVK6bCe+oLMRyAhT3SuZnxZR5kN78vmz3ICn8fueen878p2lUKn8OcGik879FI41v\n0KyI26QhfIBbwPmV+7eXdffgg/cPuMIcTcFw3WPUG0GuGK2vY1uKNM1QUlJXNUVVUlUVpulgWkdL\nuSsEAtdzgZo4jSmKgn6vT57nuK6DAeRlQZQkfHL1Kt1OB6Vkk1VnKQHOwwW2oZZudM0ilL3dXdRy\nuXgchcDdRS91XYEhcE0FhqSqGuuKIQS2ZTLsnSOMQ7J2m831C9R1yXSpVqjqZiWkMi3yusZz2/Q7\nDkneBM2q6oosyzBEozOvqwplGpRlEzY3WkxZLObEaUaeZyg0r5/7Ar7XIksWWMIgTmL8fpc4ivAd\nlyRcMNrYQCkLGWwgTCjHh1RVSp6nTGYTtqq8EXqqCuoCv9ul2x8QJwvGkwlKKQadPsPhkA/f/wDK\niiov6AUtaiVI05R+v4fWirKssG2bIPBJkpitrW2yLMP329zu7XDl048I05hXLrzC9tYFpt6MPE9Z\nLBYEQYBS5nLSswGYzxf0BwPSNAOtGY/HmKaJlE1MFiEE/+zb/5zf/f33UGbzO75wPJKaRa+Q+ApW\n1UTHJL68Ttzn+DQxH+/OOHc/kj9Wt5wi5fo+9afJ/ZjEV8ur0rhx8viB9fWp/Yo0zt3u3nVHpCFy\nrVekcn2SxI/8zY/mhvsR+INULSuP957yA3E/wn6+OvP/bLlBoyP/T4B/l8bw+ZeAv7ncf3N5zf8N\n/B/Af0ujXnkd+H/Panj9NYNO7WHkOUHQRno9AtdD65wsSzAMA9drouZpQ2Aqm3YroKoKhGi8QizL\noSxLomiBspqFKHVZUOYZlmU2QZ3yAsOtyMtmBaXW+tgH2jRNZrM5lmURBAE1NVIKOp1OQy6+j207\ngMZ1XbSuUEpRFCVHD7+udeNHnWSkWcbB+JA0TbCDNmmWotHMplN83yfLcg4Px3iew6XLl1kfdWm7\nHsqSjG/+kHC8j65LpPKI8hxTKrIiw3YtMCRZESOkQJkCkPQCB8uxKfMMkcVY7YCyAGV7lEWGsNsE\nLZe81AStASLwEAwwWym7N2/yxS9+hRs3PmJ6sMcgWEMYBtn+PvbaBYRSRFFCrzdASsGd/R2MscTz\nHUyzcUfMy4yiLMiXRmgpDba3LxKGIWmaopTiRz/6EZubm/T6fTrdDmujHkWR0261kbJZASqExrZt\nTNPEsk3KqkRJgyKvcF0X0BhokqRxhTw8PMQ0TcqyxDAMfuonvsGX336D8XifPC/57/7HX32sP8NT\nwyOpejSIU+KfXjl3xIVHBClWRESxcs+R5L563ypZn0Xk8IB7lsS3SsqfdTvxlnFav7EavuCsMIfL\n8vFkcPS9jTP6dJY3y5F0vrKtkvgqmT+KAfSZe7M8qP6z4bP6mR99hf8G+LvAv89d10RoPF7+7nJf\nAv8h9/naVSUQniSva6KiYmT5SGWSFzWmqVFKUFVV4zkiBJ12gOd55HmGZVlNSjU41ptWdfNqnmQp\nWmtsx8FzPfYWeyitcW0HbYjl/6OJW1KVjW/5EbkHVuP7rIXG99dptVrkeUGeZ/R6PbSumgUxSjGZ\nLUizlDxr/LGjuMBzLWohUJZJXVXM5zNc12MyHpMmKVmeUtYJtukx7HcRwiSuNZfXthkMh0z2d9j5\n9AqLcI8iSbFNC2k2ahphOZiWy8bAxpCCNA3p+DaBbYOusFyPrDARVhe7dY6W52PLmixPWW8PKBAY\ndkCtJcXigG5/wP7uLkWeES7mDKhAGEQHM+xhTRD0WVsfEUUxQdDlyiffRSrJcLiGY1tL+0SItCxc\nN6DT7lIUJVEUYhiC4XDE/v4O7XZjavmj9/6Aoip4/dVX2d7a4tNrnzIeH6DkctIta6BsJok0o5Tq\n+BnmiYdlW+R5M5Hbjsnu7g5SSnzf586d29S1Jmj1ONftfsYh/QzwSGR+9AaxSlZL5jhWJ5xmjxVC\nP71/GFGfdXwmoZ8hYZ8pedePcO1pMj8K6Xuq7rQErh8gkZ9+HMePRZxB4vrk28wRZ94vNstTM34+\njJyfDal/FjL/NndDdo1pggydhf9quT0QgeyRJ5pagGE5KNUEV3JtD9Ns/uBH0fGOJLckibFtG6UU\nQRBQVY1eVeuKrMhJshRq8H2/Ua9EIZbRrKZsd9oErRZlVRHNp5idDoEXYDpNrJM0z6iylCzLlkNQ\nE4ZzpJTYts10OqMsM4RhEEcR48NDhGFQ1BU7d3Zw3ICpkrTbbaRq4pKE0RRDKKqqZuf2VdKyxHE8\nhDIIw5D1rU3WhptkpSZLQsoy5e13vkKY5Fz54I+gzprkEqVBVYtG6vYllqFwgjUCUxEWOb5lY7QG\nnL/0Jlpr3nj9DfxWm2QR4gc+ZplhVBnakBiipk5LxtMpeR6zP57xSpGhkWSLMX7bBAFtT3Jx+zz7\n4wMc2+TrX/kav/fd72BZFo7rQV2iq4osLWi32sRxQrvdpaoz+r0+k8mMw/E+W5sXEMIgaLcoy4L3\nP/yQn93cxJQW0+kU23FQpkTKitu375BmCRsbmwyHI9IkYzKZkGZJo36xrUZtXFVUVUVeNLaNwXAN\n23Gw3WfuY/4m8LdXji8D/znw35+46lHULEfQK0R+LMmekqyPCa4+VT6yETyIvFcnBB6N6I/aPIvA\n70f4nCL341eLIwn8YdvyOehVEj+1rerjVx5NQyL1vaQujkj9qCvLe1fdFp+7a+LT05GfxgtbAfqF\nr32ZDz/4kMnBIW3PBMNokjD3+nh+oy81DLAsm7qujklVSklRFLiuS1Ek+C2fLMlo2RZhFGKIxpsj\nCyM21kZo32+i+y3G2JYiTptQsutra1i2gzQVjuNxeHgArkNZFNR1Ta31sb44yzKqqmKxmDEPF8wW\nc0whl7r8GkGNKZv8m0mSLF3xPBaLOYZQHI4P+fSTq7S6LdY3bbq9HmvDPptbF/BcD3RNns6I5gvG\nxYzO8BW2Lr+OQBAv5ly7+jFJVhKlMRZQWxa2YeF12nQ6Af3hBq+//jp1LSiyBNv18fwmxGynN4BO\nD7IURAwYlFpTJBk379xmsoyjQrFgsnsbXRVsAaNhn8P9G7TbLZRSDIcD3im+ykcf/5BuZ0i37SMN\ng+l0glIG3V6Pvf0bhGHIwcEOk8mMujKIOzFJktDrdZlMxriuyze/+U0uX36NOIkpqpyyyrBtG9sJ\naHdGuK0WO7uHDAc9Rmsj0jRjNBogpdnYMLRibeMcldZYlo2lFLquKamxlhPpM8IHwNeWZYPGHvT3\n77nqUSTz1VWP+siDQ5zxKn+aPGua2NhLIterjHN0PQ+oWzl3Tx2cIOzjff2Yx6cl8AdsxyQum/uP\nnenPYNITEjlL90MaIhf1XS8WcUTkK4QO9+rMH2QAfapqllWcNoA+e535M4Nt+pzbPkehCyzps742\nYm19k06nhWM55Hm6NHoq+v016rqmWBKtYTRGwVrXjX3DNAijkDwvsE0Tgcb3HSxTEScJW1sblEXJ\nIo7Y3bvFue1LVLpifXODyWRMHM+pioSy1riex3QyI8tSHMchjo+8K+rmzcFx8YOAuqoJ4whL2ty6\nfR3Ltmg7LrNFiBCC+XzOeDxlMY+4dutTrl67RmfWZ/v8ZXrdPkI1CZE7rYB2p4uSFXYdM7n5Qw5v\nfoLV6oA7xO0oLK9FWdyiLAt01hgCbctESBNp+7iuR1VpsjTH9VzCJKEChqMRwm8jDAucFmCgRYXj\n9Tg8/AFXPrpGVdbcurODZX/I9RvXcRyTIg1JkxDXBPIQ5wAAF8pJREFUaZGWGUmSYEh49bVLjSGS\nCt/3m2BarTbj2ZSvv3KJGzc/JctS9vb2mEzmDPoDvvvu7/P2W+/Q6/VI01sYhiAIAq5cvULbDzBi\nQeUFeOsdBv0Otu1g2zbDTh8hYL5Y0OkOieIQ1zEZdtfJqZDLxAyO7RLGC7Iso+V6p1/InyX+NE3U\nxBv3nHkkMuck6Z4OLqXhhCfJsWpjdatOke+Jhk/uH3Ren65bIecT5RXp/JHKq2R+RoLOEwS+ypir\n6pXVc6vdXHFL1Mu3mrpeEnvdGD5XiRy95Et999yDDKD6jP3q59+DxyHjp+vR8sLIvCxqBq0Nqk2b\nrgi4fPF1Oi0PanBsG6UMsrTxO87znKqqcBznWFLOsox2u0We5yRxSJHmWFJRFwVFlnFxe5s0Tdg6\nd469vT2+/MV3sGybazevc3PnDn4rIE0TxpO9Rh0jHfYO9xgOB5RlRVk2HjSu65KmCVobZFmGFII8\nSrADj0F/yOHBIb1eH9/3KcsKy7KoqqrxrOn3MaRk+9w2QjchA9IsJIwWCNFid3cX3/dJswRbmbQH\na/zovd/mlddeoT0coW1JmgQYqkVdFwgkQsD+/j6j4Rq+32Z78zxSKrK0QAO24yzjlbRRpg3KBGwQ\nFaAR2qBQDr/x7X/KLFzgWYr33v0ON29f5fz2RVqtHkkW83vf/T2+8ZWv4/tNaIL3vvcHaF1x6dJ5\nxuNDwvmctbU1zl+4xM3d2/zgB+/jujaO0wIdk2UZOzt3sB136bdfYJk+RZkxGo24fKFNdzgkK4tl\nRqkWnucSJ8nSzzzk5s3rmJbFm+vvoEyH69euMLNsBoMBlc4xlAJpNT7oZXnsEfOc8G/TGPvvxSOp\nWU4TqT5JUMfcqE+5BK4SedUcnyk6Lvf3JfEHlPVZTLZK0qcNt/pU/aru/yy9+Gli16fOPUQUPhG/\nRtyVyI8WCK3qyauVe8+SzB9G6Pfb7ov7SdyPQtjPT2f+VPHJ9U9o+T6e59P3+3R8n7IoQGvyNMG2\nbUpDUpYlZVEgaBIUKCmbSICBh640WZxQZgVpHEOt8VyX4bltqDXSLEnjhLW1NWzXYjqdUGQp22vr\nzCYz6qIgizNMw6KuK1zXYz4P0Vofe2OYlqQsCzwvaF7z4whDKvKsoMhLer0udV0QJyl5VaOEgec7\nREnaREC0bUaDPt12i7Js0pplSYFr10hRo9FYsgmaZXs9zl36El5/C7vVxrZd4nQPw7bIsoowCpGp\nQkmDOF5gWzZCSLTWhFFIr9dBaE0cRwx6feKDAzzLAdsGLZchQARe0OFP/9m/yPfe+11MU6CUYmN9\nm9FojTRNCVo+3/j6T6BMk6JM+ejj9/HcFr7vIaXBcDAEXbO2tsnHVz5qPFNaPvP5vGlrY30ZgqBg\nNFpjfX0dx3EZDke4rtuonwwDrTUqV7RaAaYyuXHzBmkaMxgMiBYhk/GUr37jxxAVWMrADwL2d/dI\ns4wg8DFNkzxvJnYhBLfu3CSKFs9j+Fo0MYp+8cyzs//ibtn5OXB/7t5rjuyfYlVXLk4SVc1dMj8i\n8aMEwcflkkcm6Ucq34fI7yuu3m9/WjI/InN5qnyazM+SyOW9XTyWyle2WjfPU4iTPvhi5eajbt3P\n8Ln6Nc4i9aeCz0LaV4Crj3TlCyPz69c+pi40a4MuG2+O2D/YwXN9BoMBQatJFiGEwFDq+GctiwLK\nnCovmU0nFHkGNY3u25BsbK4TBI3Xy+2dOwTdDq7lIASMDw+ZLxZ0uz2iKFr6lJfH+vCqqih1kw3H\n8zwMIYnikMlkgmlaTCYTyrJoVo1KRVkmlHVFVdcURcZ4MqXfHyDQYEDQ8rBsxWw6J45jDKMJTZtl\nGWEY4vs+htn8gQ8OD9ncXEcIi/VX3kQbFloqsqWEnxU1u/tzwjCirits22F39w6O6yz93yWLcEG1\njMq4tbXF9HDM1sULaMtFaE2dz8EOltK9QVWXbKytk5cFF86fJwhaS/tETTSfY0lFnqYcTA4QwsBU\nNvv7+wz6feqiwFAWH1/7CM+xSbMItKTT6dBkExIMBmso06TX7QAgpcK2beI4OlaXeZ6HMhubQlEU\nSCnJ8oz3P/g+ptGsNZhPp4TTCXWh8QIfqoqrVz/h+vVrrK2tMRqN8P0Wtm3jum1Gw/MPGnZPCz9P\nk8x5/8yzvb/xPPrwEv+/wOXldoTfuO+VL05nLg2SNMeoTeJ8wY2dW7z1+psoy0BoTbZc/m4azSt6\nmqYkSUxVNZJImmb4vo/X9pmMp03YBUNQlgV37tyhrirqoqQQjX95lqbkecZsPqEsGv17lIQopRrp\n2HWQJcxmc3zPxHEs0iyj3Q5QymJn5zb9/pDbd26TpCllXmDbNlVRUtU1gR9wuL9PVlaYpkm73SbP\n86X0mB+rilzXJc2bmOdxlBIu5rTbHeIoxnEshDKxbZubN2/T6/Uo8pzxbEZeZBiGwDBUs6jGMTFN\nk8lsvDQa+wghuXr1Kptb55iECzapGsm9KhCmQCwlgjhekOULTKXwA58kSdjePk+eZyRJhmmaHEwO\n8b2ATqdLlmaUZUWn06EoS7SGr3/16/z27/8OulYcjKe0Wi22traZTCbEcURZ6sZYu1RVlmWJEBz7\nlsdxzGw2I45jpBTYjkO4CFGGRScYkJUlo/URiyhlMTvAd7tUBjiuQ6/XY/vcBaSSaF03WYqkRJk2\n0nwuWvN/hyYG0Uu8xOcGL4zMt1/bII1rhHZIkoQ8ralerYnjmCxNME2bsiw5OIio6hqBoK4rDKNx\nPex0eiipcDwXaUiSJCbLUrIsZ2trizzPKLKcGzdvYttm82KnFIbR2KeUVli2PNaP7+/vIaXF1uYm\nWZ6TJClVWXHn5l5D9MJujGytFt1en/l0QlFWbKyvYygDgSCMFtzZ3W+W9js2QcsnihqV0WK+wLZt\niqJg984Ovu8hiow4ThgMhhzu7GLZClOZhMtl85PxhIPDw0b9kKS4rodpqsYovFzpWlQ57XYXx7Ea\ntQtgSoXvOgjTB1JAoKvmjcEQjU3iygcfcO78BfqDAUpKZsvMPlIauI7L4LW3ODzYI0lqEIqyKsgW\nIVVVc/HSK7z33Xc5d+4cnuXzxhtvUlca07QQKNKkxPNdHN9bkrhYEnqx9EYqKcsmhMBsNieKIoqy\npNftIqXCsh2KZMHa1ggpJDXQCUZsrg2J0wlRGJGlJYtoiu8H+L7XGE4dnzRNn/XQ9WmMn//Bs/6g\nl/gseGo6kOeAZ9PXF5hpaIAwSrzCbIyejiIOQ2zTxLIU4/EhUZRiWk0uSgGYpkLKpe5cGRha8Mn1\nKwSWR6vlYxhNHJEP3v+AIo+xfQdhaPK8QNlWoxbJ8kbF4dho3azkbLfbBEFAlhTs7x8wmc8Y9fso\nU+K2fCgqdqZjJJqLFy9QlgVBO8C2mljbQeChpMI2Faa0ODw8pNtqoWuNrZrQA912izAKAUFR1/zo\nR9+nriqCbpciT9nb30erRqpMkozaEHzw/Q94/+P3+f733qOoSmzXwpEmRVVRa0mWp3Q6HSazCW0d\n4Dg2tdYkaUR3MKCqElSSo5WFjnNE26NGossFg+EIdM34YBfPayPQKClptQIODvcoq5Jut7fM5SlY\nG40wLZMojJgcThmuryEQFHnKzZtjbMc9tmeM1vpIaSBlk9MzjmO0bsIllGUTrbIsSzzPY21tDc/z\nsKxmIVKapvR6PabTOY5ykKbBzRsLbu/c5uOPBePxmFdfe41oNmc42uDwYMLhYaOm2T/c48KFi896\n6EbA8Fl/yEt8Vjw7/+2nj2fT1xdG5sXhAlFIihKMbqOKiJeZYwzRuBQ6tofnO9iWTbnMpjMY9Miy\njIODMZsba+Rpid/1cRwH23a4evUqpmOTVinhfEGr3SWOEhQCQ5ksspQ0TYmimKrUtFoeSZw2+tko\nYTgcceHSgN2dHS5dusRsOqXOEl7Z3iZOE2azGZZtYhgGwTKcbhhGBEGLoNXGcX183+f6zRu0Wj62\nbdLtDppwsTMbx7LJ64JB1+eDjz7hn//Ot9la2yZeRBwczBoVEDlKWXz68Yfc3rlDu9vCMU3sdoCh\nNZbtYFBx/cY1WpMOrW6HMJxjK5PD8SE7O7sM+j1uXm3CCJiWxcdXPqXf63LpSz9JHh7y41//MZI8\np6obVcxwsE5Z5ORlxqULr2KYZuOxMlxHCEEUR5TzBdPFnLWNDfI4xrIsut0RUknm8zHpkqSLsqTf\n71NrmE2n2LbNeHxIkiRIKWm1WriuRxD4FEVJXddMp1M8z1+mvCtoBT4HB2PQFa4lGa1dwnNsirJk\nMQ8xTZOiaFRXs/mEt956i/5gxHh8+KKG9Eu8xAvFCyPzax/dwfAVnvLoul0KQ1AvrdKub2O7JnVV\n0263qcoaXdcIYRBFEdPpnKqqGE/mnFtfb/TKadIkS6gr6hqi6QJt1BTlAUIIWu0hs9kMpRRJEqNr\nuL23z5f6b6BUY+KO04x+r8t4NqPd7RBGEe1eCyW7eI7L3sGERThFKYFnOXie2/i9WybXbt5gMYt4\n443LnL+4heUobty4jmlKtIZW4LN7e4+tc+vUVYE2+rz9xhuMp1N+63e+w4WLFxiPD7CUotAVs9kO\ng/U1Wv0uZVUgDYP14YgwjAiTkDzTjKcH3Lx+q4lhY0g+eP8j1ocb/MD5Eb1Oh/39XVzPRxoKjeDq\nlQ8ZDHtEsxAEtFwLLQyyoqIoE77z7h/y0z/5Y8TxgnQxJ8tyFrMZRqUpdMVobQPf9Ti4s8Pm1ibT\n6RTHdZGFxrFtrn36KeXSPvC9996l3+8RtNqURcHu7i6vvvoqnueilMWtW7e4evVT3n77LaIoalbt\nZgXf+e4f8ebrl1DKwvYcwmhBELQQVc1k74C9g12iKOFgf8rmuQ3efvtttrfPc+P6dYSCbudzsJz/\nYUi+BfafeEqNvQt89Sm19YfAl59SWx8AX3hKbX2Xh+XQfmTVxfxb0PmTT9ifJYpvgfyZx7jxjL7q\nT4ALT9SdF0bme2FGq85RgYVtWWxdOE+306Hb6aFFRZ436pCiKKiqEqkkVFBVNRsba9y5s0uaJo0O\n2XVwpcP4YJ+6biIctjoetu1S182qzN3dXVqtFr7vU1Ulnuvw/d/4Hd55+y3qumIwGJAVJVmW4nse\n3W6Puq6pdI6tmtRpna7P+kafnZ0dwkWI47o4jsssXLC2NsRUkuFwxHQ6pdNt0e2+w+3btyjLkvls\nTp5lOI6NlB5ZlnF75xaDwYCd/Sn/+r/2M+R5ibJM0rRkMY+I45hWt818Pmc+mXHp0nmiKKEWNMZj\n8QazeUgUxbRaAZPJnLLKORgfMJvP0AKCTpet9Q02traxPUWZZLi+2wTxSmKEIRn1+lDX/LNv/xbf\n+OpbZHlOXWu01tS1pqhKzl+8wCdXPmkSXZgm80VIGIfc/sNb5HnJK5dfAaDQgqDTw3Jctra2KIqC\nyeSQjY0NJpMJURSxWCy4cP4yVWlw5/Yug36H3d0dsjzjn/7mb/Haq+fJo5jZdMz3v/8D1je3cDwb\n07SwAh+v3WXr/CVGgz7j8ZgoilhbW8dQjaH1c4/0Wy/J/DPhUcj8YVi6fs6//Tkg87NwhX9hyVwa\nOXEm6Lo169tbbI22cD0LqSDPK1zXaaLrtduEYUEcxYwGQ8IkQmvBaDQiTVOqqqSKIsZRiG3b6Dqn\nKkss26UoUqbTkG6viyFNlFRUeYZtO8RxRp5lFFnEPE6PM+R4noeQCk2BpsTzfYosxVQmnvKZz6eN\nS51SLMKIPCtR2sA0FIEfcHCwz2hthBA1VXn0XVzm85BWt0NZ1ziui2EYvPH660RJE++lLBv1SV4k\nFHmBpG5UTGbTZ8uSjRtlmSOVwjYFdV3zxisXcb0AqSRxOMEwbOIkwXd9zp3bptvtcnB4SJUXvPf9\n93jr7S+xdeFVqvkYJRXG0leb5dL4Mi+Pjatog8V8gu35fPTxRxjCZbh2jsVsTJyWmLaDHQjWu12q\nqmT7/DZ39nY5f+EiOzdvs7O3gzIEo9GQJEn5zne+w/b2NufPX2C+OGRjc8B4POHdd99jMOwRRiF1\nVXP10xt87atfQ9cVphtg2x6b6xsUecqNG1cp0hQv8MmKFNsx0cLFdBTT+Zyqqh8++F7ijyEepod+\nqo7iT4g/Zjpzv2uRxhVffO1LXD53CW1UTKdNTmglJcRNMocwDFHKJMmmXLl+DddpvFyUqRFGY3Cb\nz+cMBgPCMKQ2JIIm1kVRlIxGG2R5htZQ1JpSSKQwCHwbkGyfv8AsaZJPBGbzedK0msQIUmKaJUo1\n4VbrumY0WmM6a1zxqCFLM6qyJM8Tzm9vEycJlmlS1SWGVeG6NoaUVHXOG2++jlKSoi7RQpOk6TK+\nuslwtEaRl8hM4jpw/dPrDDc3uHPzFmmacvny5cZdU0oMw2Ct32djY53dvTvE8ZyyqvFsn0uvXMZ1\nXWzLwrIsoihiNBiyu7/Dm1/4IoP1LeLxAabtUBQpRqGxLauJKCkNOsMei+mCMFwwn8/QZY4MPTY3\nt7hx4waGWZJUOf1WAFoShjGG0ORa8Mn1m2yMNpHS5cLFixwe7tMfrHHr2nUGwzV+7mf/FFmWYkiB\n4zi8//4HfPmdd/gH/88/5Ke6P81orZmYOq0unfaA+WLO1nqbWTjD8x32oymm6TOdx0S7h5y/cIFO\nt8PIa/TvprOPIf5FMoS9xNPD54WoHwXPpq8vauS/B3zlBX32S/zxx7e5f2asZ41vcTc/7ku8xNPG\nixzbL/ESL/ESL/ESL/ESL/ESL/ESL/ESL/ESL/Fs8GeB94GPuF/UuaeD/5km8fQfrdT1gX8CfAj8\nY2DVKfmvL/v0PvBnnmI/ztNEx/kB8H3gr7ygvjjA79HYK34I/NcvqB/QhMF7F/gHL7APLwpPa/yf\nNb4fF/cbo4+D+42zJ8Hp8fK4+JTG//Jd7pOb+DOgC/w94Ec03/Nfesx23lz252ib8WTP/7lBAh8D\nlwCT5gd/6xl91p+gyQqzOth/GfhPl+VfpMllCvD2si/msm8fc0/iwcfGBncdgQMaB9y3XlBfvOVe\nAb8L/CsvqB9/FfjfaRKA84L68CLwNMf/WeP7cXG/Mfq4OGucPQlOj5fHxVUaweFp4FeBf29ZVkDn\nKbRpAHdoJtfPPX4K+Ecrx7+03J4VLnFysL8PrC/LG8tjaKS/VSnpH/H4M+3D8E2aQE0vsi8e8PvA\nF19AP7aBXwf+Ve5KWp+H3+V54GmP/0s8HTI/jW8Cf+optHM0zt5+gjbOGi+Pi6vA4AnbgIa4rzyF\ndk7jzwC/9bg3P28p5xwn02zdXNY9L6zTvJqy3B8RyNayL8+6X5dopKnfe0F9MWikwV3uvlY/7378\nCvDXuJuegRfQhxeFFz3+HwWXuDtGHxenx9kPn6Cts8bL40LTTAzf4cmiXr5CE8v+f6FZnvo/cfdt\n5Elw/+xVj4DnTeafJ8/+hy0Je9p9DYD/C/gF4HQ6nOfVl5rmdXob+Bkaaed59uPPA3s0usH7rXF4\n3r/L88Tnve8BjR74F4DwCdo5Pc5+7jHbeZTx8lnwL9NMVD8P/Ec0qqrHgQK+DvwPy33Ek2sYjrJX\n/Z+P28DzJvNbnNQHneek5PWssUvzGg+wSTNQzurX9rLuacGkIfK/RfMK+yL7Ao2R5R8C33jO/fhp\n4C/QvO7+GvAnaZ7Ji3wWzxMvevw/CEdj9H/j7hh9UhyNsx97zPvPGi//6xP0585yvw/8feAnHrOd\nm8vt95fHf4+G1J8ED85e9TmEoslofolmJnqWBlC4V6f4y9zVwf4S9xraLJpXqE94eqtjBc0A/JVT\n9c+7L0Pueom4wG/S6EVfxDOBZpXkkQ70RfXheeNpj/9LPB2d+f3G6OPgfuPsSbE6Xh4HHtBaln3g\nt3ky76jfBN5Ylv8G8DefoC2Avw38pSds47nj52ms5R/TGLieFX4NuA3kNHrKv0xjyf51/r927d4G\nQSiMAujtncIBjAvY60I2DuUs2DmEC9BZfBAspOGRvOackuqGXH5z/0/gHlOmd5Lrjjkuqc/OIcv8\n6NYhyyn1f29IzbPu0/Ee5ySpi3NeJ/TK0MNe/Z/7PWbp91ZrHd1irWetfvuyxTGVaUjNL1vvPefU\nm/kryTNta5ZDkk+Whw0AAAAAAAAAAAAAAABA8gUCSawI/1yNXAAAAABJRU5ErkJggg==\n", + "png": "iVBORw0KGgoAAAANSUhEUgAAAXMAAAC5CAYAAADavt/0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWnMbWl23/Vbz7T3PtM73aHGrqrurrbdbbttuulYVhqT\ngcgycQwIGVlYFiCQhSCRAUu2IyHExxAhWSIKcsRoRSJBfLAi5AgMIU4CiWxIYqc73XZPRVXXcOu+\n953OtPczLT48b7dN1O6qxHVz7e7z+/See/Z99nnPfe561l7Df4mqcuDAgQMHfn9jnvQHOHDgwIED\nv3sOxvzAgQMHvgE4GPMDBw4c+AbgYMwPHDhw4BuAgzE/cODAgW8ADsb8wIEDB74BeCzGXES+X0Q+\nKyKfE5Gfehz3OHDgwIEDv4W813XmImKB3wD+KPA68KvAj6jqZ97TGx04cODAga/yODzzTwCfV9VX\nVDUBfwn4ocdwnwMHDhw4cMvjMObPAq/9ttdfvv2zAwcOHDjwmHgcxvygD3DgwIED/5Rxj2HN14Hn\nf9vr52ne+VcRkYPBP/BYUVV5Evc97O0Dj5vfaW8/DmP+fwMvi8iLwBvAvwb8yD960dPf889gMO3Z\nwCjGG7QX1IM4gxGhqiJiEFMINdMZIe935H3ECRgpqCkoiqpiUgUUjGCtRUSotVJrJidlt1NSugEs\nxhi2byaWzw7YXlgOM+YLz2J2zKyb4Z1jHEfOLy94eH4BCMMM7GBx3qA2Y5zFONCijGOmZAUpCFBq\npVah1szqeGC2MtRJmR5Zbq4i41bJSTHGMF2NPPOtc3zvEWvJ2aLVUKtBFFQV1YK9/X1s8IgD64Qa\nKkpGfaWkSiqFGoWyVXStCLC6f8S954/50Hc/z73nnmV5Z4Hzls1mw9tvvcErX3yDh2894pW//oDh\n2TnbhxNmctSaMQiOijWK85XQO5wFJ+0zxpTJxdAd9/izwN17c/ysYK0jJxi3mcu3R3YXyngZmTax\nrest4iriwBnLyQt3+MC3PM9v/p3P8x3/wrcwTYnz1895+PoFXgQ1ghqotQKGUguhh5mtuOAIXQUs\nIPy1v/Cbj2Fbv3t++Id/+Ou+/6lPfYpv//Zvf8d17t69+47X/Mqv/Aqf+MQn3vG6H/qhd05b/fzP\n/zw/9mM/9o7XffCDH3zHa372Z3+Wn/iJn3jH6y4vL9/xmp/7uZ/jx3/8x9/xOoBf+IVf+Lrv//Iv\n/zLf933f967W+qVf+qWv+/5rr73G888//3Wv+Qpf+tKXvu77m82GxWLxjus8ePDgd3zvPTfmqppF\n5N8H/hfa/67/+mtVsli5jfCIIKIoFVUB45rxEgGpGGPbBzWCFWGxGtA+cLO+xqpSKxgxxJpbfEcV\nLQKqON+MOlSMqfTDiNoeTQVnLGIi3nswlZgifoI9O5wK2VpSSogIq6M5OSeWqx58QZwSKVQBtUqt\nGRcsWjIUoYpB1YKOrFaeo/tbqnSIeqaq7DaVkgzWGayR26/BtN8FEHGUmhEsIgDtUCsoYdGjoiCK\nmgLGYL1CqLi5IkmpYyEboVgLO8fFo0v8mSNZwQwGO/dIFYyxqAW/Uvyo2F45fb6naGH3+oQ3gnMG\nq5ngPc4YnBSMCtYKuRScM4g35FQwpVBToVbBSDtgfTB0vSW6hHUVqF/dA2IF7yzzbmC1OKLrB4IP\nHB0ds9tu2Q0di9WcqoVaK6qZUoVCRXMmRcu2E7pswLcDepi/1zv6wIHfHzwOzxxV/avAX/161xhv\n0aqogGIRCoJQVBERVBUrjpQzM2dxttI7Q60RkcIw88QxIhVyyph2XxADBapCpmCMQcXRzZSjowDG\nsL0ZyVHZPkq4AM1/hf2UyGWHsY7ZfMD2hpNhgbVHiBiyTkSNxLpDCqhJZAtiLMQC1qElgSjOVUzX\n0d2PDKce3SnXo7BPE0LFOoOxig2CiJC0YtVg0fawYhxFBWpBMAiF0DmcETKgCrV4XNnBzFLMhErF\nDoI6wRmD6SCZyiCeo9MFpWbiOLE53xBLZXP5iPXVmpyu6GYV1yvhbmERlY7A9jziLVh1WGswGtFi\nECeots0zJaWaQph5LIZJMr3pySkDQh4rVjzGZ5SKmAzVoFWRJJjOYhfhq09n1js67yhDR7+cEbY7\n8tgO+lIMIhWyImLbvimCCWCtoeulHc4HDnwT8liM+btCoBq5zcDehhlFUASp7bE6N4tFTIXBCkJB\ngVoLaYrUknDWEmYdu92eqgpqUKNUMpotnRe6vtLNPMuzGasjTy7Ko4eRMgmut8SoWBVUKzllilZE\nDH0fmM07jDFYa5ninov9I0wVSAYlItZhXKVWgzUV7S0qMD+Z03fK++4McASP5JLVdsXDPAGCCPhg\nsFaZn4XmgVdBjKVYEOMgtsOIWvHBIEZQzYhx1KqIFDRZyiYjnUE8MGS8GIorlB3Y5OkXHgZhSpWr\nyw16fsPF+Zr99hz8SLEJFzJ3PtIR5tDdUbwVclbMFqQCVERamEPEkWLBqqfWQqmC2J7l2YzF3GOs\nIghxqoxjZLPJpCkiGIxxVNHbEFih7JWyKoTeYZ3wvo88hwSPLwXvDLNZz75mam0HbvPQlZQStRR8\naJ9TzMBsPsf7J7el3y337t17z9Z69tn3rlDsox/96Hu21vd8z/e8Z2t97GMfe8/WeuGFF96ztVar\n1Xu2Vgjhd73GE9v5VhyqGWMMSRWVSikZqR1GDDVXrAqCRWtkjyEMilQl50wpFS0VrMEFx+n8lKuL\nG1KKgFDVgShjivQnM/yicrSA49MeGxxh2OGs5fpiIt/sMcYg6qilkFLz0AsDIh3GCVUnqikYa6gx\nMdWJSW4IdU5vA84pyVvUKt3Ms5jB0dnAP/ttH+F8+xZ3ujt86vwKTR3iMkEc1uptvFdQgWocWVyL\n96sgXtCoCAqYduY5jxGYxh0yQSqKDmC6FmZxwaALcNViUYoU+sUMYxLjuOHBw8T64Y7t248oLtLP\nCgwb8gxOP9SRdc+wEqKFcGmQWDFVKFOkytDCYbVCgTFWijp0Jkjn6JxnOczBF/b7PdhE11kmImNp\nYZWcKrXqba6k7YV9zhgrIMLT33KXlCcymbHuESKqGbRAVsiZFEdyrNTJQhb6IeC9IziPd/ax7lsR\n+X7gZ2khxP9KVf/MP+4aB2P+j8fHP/7x92ytF1988T1b6+jo6D1b6/e1MVdVrLGU2sIrBsGIJedC\nNi3UQKmoFKRC2k5siyA1oTmSU8JQ29+UwtB3DM+c8uh6y2azQycBqxh1bDd7Fkcr7GDpF5750ZL5\n6oT9/g3GGJmvjsmpUEplGiP7fcLulSxK6IQQAiKVsYys95fs40RmxPeCtS1ujRpMZ6imYoJhNu/5\nnhe/lSN/n3/3j/8kf/vv/R+sv/Q/8WgO67qjWnDOIaI4X8g1YJylH3rEQsnajCgVQdBSMM5DqaRc\nsDmAVNQKOSayGjCKhoLrCmIVMxN832F6pdrKzc3I9uaKR6+vsVHp+ogLFqrBaWXoF5g+sWZL0Up3\nBuMuY64CBYuUiHeOXKTZ1ipkKRytThhCTymFKUaCNRhjWvzfecLSMZsiV+sbKpWqisuGTMXOPNYL\nu7RnO22RfcZ7Ty4JzbcJcBFyqeRcQAw+BFQTtQpKwrkZw9BjTUuAPi5uu5v/HL+tu1lE/sqhu/nA\n7wWemDGvNA8aFDGGioGiWNviqaRMAUQrVSoinu31FsqeNG6ASt8ZhsHhXQ9iCH3HU13P9dAxjiNX\nm4laM2lSri5HnnrfDNcJR7NTzHJgfN5g5SHee6YpsdmM1EfCuLlknBzGCo92kYX1FAO7ccc+ZvZp\nfxvr7rHWY51BxFGL0puAx7GczdnulT/+iR/k+guv873f8oeQ7tvYvvqn+Zz1rKcrTAsqgfUYFXxv\nmS0ctbZvJ08FRKAAtwa+ilIqqFEwhhozUgyahWwyMhdAcN5iqYgWnAuMaQe7yvlbG/YPY4uBG6WU\nDFV5+vh93Dk9wc48b9RXGMcrfDAkDFEyWg1ilUrCucA0ZWoRUipcXWyYnwzU6igUjPGoEYrpqBpx\nRtFQMcHhbQCrFAOzIdAvAqG3LRGuEKfYnrxyJpYJlYL1jhQzIhXNFckgYvFBSDninGWxmDPvbfP6\nHx9f7W4GEJGvdDcfjPmBJ84TM+ZSFRVt//G1NrMmUGMBUUouOBXEpmaUbg1YTQI44n6iTpk+FOZz\nxTlLk5nJdL1HELwTdrvEOI5sb/acvzVy5+4xetoRwqw97npHjLHFYM8rm+sNKSW2+4ldvsbkxEIE\ncY4coRiwPuCD4vr2TGFsRZTbckhPqpljf8SJnvDrf+tvc3P+AHHCR77z4/zMT/7n/Cd/9mf43AWI\nte3pxLYY8mI1ox8CKWa0Vvaqt0ldoaZMLrQKH+dQa6i1VQGBoeSC5hZeqVVRlRbr9xYjmYphTJG4\nGTHFUHImRc+0zZzcPeXe0XM8+/Tz2M4iainxdd68fpNynJG1QWzFiLA8mmOXjgG4fG3PtANjDSlN\nLIMHDCllKhWt5TYkBhaDteCXLfzVGUNYuHYoBsF6ZUwbyujw3lOrUmrLYeQYAUvXOSKZnEdUlJKF\n2bBkPp8xny+YBUeO+XFu26/V3fwHHucNDxx4tzy5bFFJqLW0gjvBqFC0Ai0RaSg4cVhjMOa2TDE7\nsq9EIi5ZJCZ2u5FlmjXj7xwI7XDQRNWKdY7QdaSUub6cGHeGFJUuwHy5YJ4n2BZC77m63IC0KpiS\nJorCMBN8V2+NmSPngNaKOI/3ihiBWlvBnSglR0LXk/Ke7376O3n0a/8Q38+ZdwOf/X9+lR/86T9H\nSn+SMBvAteIbKaAWul7AZQLCGAUwGK1ULRQFi6BiKa2UvoV2xJPqRDUeam6lgcW0Wm7rEIFcEik7\n0l7R6Kk6UqtBkyEnxVXD7OiI+WKF8cJyfsrRfMPN8ZZ6ueZmscPtPFaEcGSZny1IVTFhRn17i6On\nkFEVxrwjiuCdx1iLxbVSSpcwTvChPYmIE1xooSkR15KhGTCFWpVaC8E5sJmorX+gZEOtFmPAZ08m\nYbww6wJWDcEHRB5rr9C7cvs/9alPffXne/fuvacx8gPfXMQYiTG+q2ufnDGvGcRSpMW9KxFTDUkz\nzkJwCjoSPFgPBovthJwNLhrGKoxF2F5FLswNUi39ImOtxVRFSqHGQsotPCEus9vtePRwzbPPVGqs\nLBdHpGMl5RHjA+Jyq2HWyt17K6Tbc3wi2LlSipDUNi/TWWzvGfoOY1vZ4LTPVBTjPFITi7JCrze4\nrmPoO0BZnt7jH/7FP8Nf+rm/xvf/5B+iGKGKEqeKN4YuCMYaCo6cb24972a5BUvRSlVBChTbnmZU\nAGORotSk6OjJU6F0CefbwSYVSorN221dSIgYUs7ECfpwxJ2zezz33HNsxjX78YybmxuWyy1H7zvi\njf3bXHxhTT/v6JceNxhWqyNWdyKLM8fmagTjSbrHpEoGYpxweJRCqe1gNcHgnGAcOO9wwbdyRdeS\nP1VbYrVVK02UMaGlYD04K+ziCDjsVyp7KvSD5+zsmNVqhnO21dw/Pt6xuxl4Vw1BBw68G0II/7/k\n6Ha7/R2vfXIx85RBLRXbaocVqihOlOAqnVeMhy4IzjZPVFSheqaxIyAYI0zrxPpmonLFfBNah2Jn\nyLmSs1JiJmvGWoeYwlsPL3nmrUvs0wus3eI6h+06ttsbprhDidy/v8LayvJsRlgWJt3jgydmENdq\n2YMPGGvpuo7gAr3fMu0mjKm4rBzNe24entP5GUYqRiymZG7qwPWX/y6f/MC38n+dfwESJN3R9wHj\nKgZHLIUSa+tsNRZRqKqE3vPMM0/x1oMH7HZbEItoRqQlP41aarG4CSgeVaXeVv20sFZlfuTJ1jHe\nRNKYGWqHdZ75YoUag/vq7zVwtFhRpHLn2US9UnS/RzvHsOjpVoG5n2GGgJt35F2iACZknLXkDLUU\n6BRvelwQfMiUVMHS6v+rRcSgKNMmoqKEmUOAuB/JcUJSpuTW/Wo9FNOcADGVzlnOzo5YrVYMQw/W\ntPUfH++qu/nAgSfBk6tmqRVyppaKWIsaizEF68BowQUIoTIbHM4ZQLHib2PT7tbDM0QXWd8kNtcT\nNSvx0R7rhdC12rdaMniLDQklsr2GT3/uM5ycnPAwXbE4XqApsltvKRl8B8enCzqv+Hkmm20LWyCI\nMRipVIFMoqPHuYCIYdZ3zIZmgJfO8cE7z5FfiQzHAzWNmOBQEeI0svn0b/Azf/K/5F/+T/8wMXhc\nMayGHmOEmoUYEzlWqCDavFBjhHtP3eXO3WOeun/GF175Eg/evrh97hf0tktUkpJGMLsO6xJqCyIR\nwdLPA3fvnRL3HW9/+Zy3395gZnB2dgcqpNIe6a5vLsh5x2oxZ6uR+0/fI75duX5zT14X7P3AbD6A\ns9xddFxd7lhfbyklotVhRNBSsUDv5y1x6QLHqwDO4kQopXJ9ecN6MyKikJRxP5Gl4BDiLmOKorWg\nWimSwRuMozVdUZkNMxbHgTBz+L4DI5S4f3x79l12Nx848CR4YsbclKZfYu1XGoUUNYAWnC94I/gA\n3hWsLxjjMCimGuzCs3WAqzgbqEwM8xOCc+zGxMO3r4lrwFSMEexQEPFY40ET2/2eXVrjcOzHDbv1\nFZvdGiOW2SIwzD3DUDBB2UZBEGpsIQ/jhJoqacpElwhTjwkZ2xmGWYcw8ezpKXfK81T/BqKCsZ5h\neUTcXGEqbEXJn/s1nrfHfCFu6X2rU7dGSEUZp4zxYKppnrVahlXgzumKWedZLgbG+BTnDy8p6jCS\n2ncnSlWPTBUtLQlqb8uu++BYLu/w1J2nWPhjNk8/w+de/QKzU8/Z/adACtNuz8XFFW+9+YCb3Tmr\n+QnOCRPK6Z0j1o8u2F5s2G6OCGPCL2HoAovjQNHIzdUepFXZ9J0juIDSId4RjG+JAeG2+UkJ907p\nhh1XVxtKzZRcyfuIxsK0jzjN9J2wWPR0/RysMKbUmsW8cPfoDvefuks/C1jXEqulPtYE6Lvqbj5w\n4Enw5GLmClag5Nza4GtFrFC0oMpt7bmCqcitfgt6G/P10ElL7hU1zBZ3GYYFs9kMb3pOTx/yud/8\nInlXUTFgKsWBmQxVhPmsZz2+zZ3lHaa6Z8qRNE5oNfjOYAdzW/pnGXOhqCVmi6ug2u4bY2GT13jp\n8LYDMZSS6OcdN48uKekGEY9oa/nZ3WywoWec9sxqYfvoDX7qR/40f+rP/yRuPuCsAwzGtQoe1zVv\nu0aFquzHiWE2Y5gFQh94+pkTXntjzvmDK1CPaV8PSivnlNw6I0PXPm/nLad3Vzz37HOcnJxx/eCa\ncGzJLuONJcXIozf3vPXm69xcPuR6d0GeCst+RTKVyWQyhjoaHrxyRb+a0816MGBdYJh59ntH3GVS\nrhjJGBMwtWBtQKV10dr2r0qtlVIqi8VANZWHFxeUXKmxMO4m0r7SB0vvLN4tuH/nHvPFjKnsidM1\nYg1DF+iHHj/zGG/J45YxPz7P/MCB38s8QWNeqUURNdQ6UkWwxpEnpXihdIlqleybYFapE025BFDw\nYglDILuMdXOOV6esliusCbjOk3LhzVffYooRcRatQk2GMLccHc8ZmchmR0wT1ELOlRgjxllKVSCz\n3+8Yd4liLLUaHJ5SKjWCE89uiqxvdnjx+FCxYtnvM8/YpwkV/Lwj54oYYbfboGPg6adO2U8Tj87f\n5qOf+D4++f5P8A+2X8B4xTlDHismFBxKqQ4yaDGUmLjYrHnq3nOEmWXcVJ5+7g7VavNoS6JGSBVy\nghRbW79qxYhBpeDVcud9d1nOZ7gQiDaxmgWeCTOuLrdsdxt2l1dcnp+zK2vWmw359FlInl2ZULGk\nKbE/j1yf7+kXjuJ6fAC8UC2kW2EcY7rWH+CESsGZgSlljAjeW0CxpnWCzmaBUz3m4uqKNCXqBDaD\nN47QzTk7vk8XFlgJHM1XzO4+w1jXWCksjga64FAtxBQp7zLzf+DANxpPLmb+FcEl6q0nbimxIAbi\npEyjwTrFdJXswRghaWsW4VbO1ztBcHReODqac+fuXazxWB/YryO73YbdbgQFpVI0kZJhOTvheNkS\nk/v9yH5XIFd2+x2hs+x3GbHbZuCzRdVRi0XwxF1EFYrRVnanypgjPnYEP7Cwx9y/8zK8HolR6Pqe\nWpX5fM7q7B67zQ29D1xfXvDF//2v8B/9O/8h/8Ff+GmSm1rFeJ5wvQHNpH37blTAiONLr7/Ciy+e\ntQNGCt3ScsesMMaS4khVZbct7LeRXblpf/dWuKyUStd3+HnHYjkjRGHVH/Hhco9n4oydFP7f6S3+\nuwe/RoqJEjPRRG52F/g6UERIktgXRafCl3/jTfpjxyCV+aqn1fc70uQg5qZKiZBrZZx2bPJESorm\nQugDi1mgc5YpRay1zAdHSUt21xM6ZfKYmZKlFkE0INrhTEfvPEpL+voQyLliyagqcUrE8WDMD3xz\n8gTb+cttw4vHUFsiVE0ro1OhDJmcwEYlmYoNIDiK5qaqiMWIQ8ShBoZFz/JkQQg9+5QZlgOL5RIx\nTQCkCsQ0MQxzpMzw5gjRTE6P2F5PbG+2pP0ExuCcJcdIzg4VzzRB2kVEM3Uq9F0HHlRbq791nq4/\nInSn9HpEX+6R6hprCqVmnBnY7jfkiytyGrnRkTurI/YpEt7e0psN2nWkPJJ0RKRJvpavaklWhoXQ\ndYFXH77G2ckx3sHR2QI7twTnCf6p25K+wtXlhqttzzpdAptWd38r6bu9uGKohg+/scC9Hbm6fJXP\nWIvUxLd910d55tWBq+rYlALGcb2/gTQy3Sj7apj2rZlre17ZXkdMB5WmsVNzi1dbY1GthG7AiGBn\nc3KEm/WOqWQuby7YJcu9u3fxvaOMBecsvQvMfMdms4Fo2ZuR8zd3HM12eAkMDpIprXzRgTphuxnJ\n3lJK4uZyw3b7eGPmBw78XuWJSswZo9SSbzW75VaeTykqxKSYMWG9xxulSquCEGmlaxhDTc1jx0ir\nmNARYwPOg+scznlC6NpjvbOsd4aT1Sk1CXnblA/31y0uvd+OaCmkpOypWOfJsVKyJcVK3NIagpxn\ns53wnSC9aRozt1oky+EI63oWXYeyYRoz2UaGobbSOWcY5ieQR9I0Erzh9S/9Xf7Ih/8o//MX/yZj\nSa2ccl+Jm8J4AxVhNnhc77C+cPXoEiMwW3gWbsbJ6Qm5ZAyW3g/YI8swX9DfePobyz5BqiNSK2Wa\n0HXlfa9nysNXicXgwpycEpoyn/u1z/Kddz/Mr169QlVHKqklnL0g3lJMJlfQJKgUNtdruuMlk1Gc\nGEquaLVNz9w6+sXAarG81dypLE9mnD+6wOwzxkGVieXpXcbNRNyOTXjMwTB07NNIRZm217z95pcx\nFGJcEDphftzh+ogTxUhlt5vaQbXZsd2OT3JLA/DJT37yPVnnC1/4wnuyDsDV1dV7ttbp6el7ttZL\nL730nq0FcH5+/p6t9fnPf/49W+vdNv68E/9Uh1P846AIYpp8bEvdtUYYK4U6VZKBFEpr0lFuq1cM\nznr01mutmpn5OSJKrhPjuCWXiawTSsZbh/MerDDvZ3R9jxRP3CrjLnFznllfXZGnTEHREUgKXqA0\nTzRHJU+QciXFiJiKDwOmKlorU2wyvcYFFrMT9g+ukN1ErULQgkigimNzc8Pp8phdjMyCBevxYc6/\n9L1/jL/xpf+Tqwj7zch+W9lvlZIqNvQU07pAq1imXDi/OOcsz/FWWHQrrHXspoxzinGGfnDc6+8S\nOsPDR5Gr7Z7OzrBm4NvfEI5zItkeCZ5geiiRuN9wtdtQUsGXQtKCLRZNlegjxRowrkn05kqYWYy2\nA4JgMC6QKmhuE6I63xFmPf1sBhR6ZxDjWNzruLi8IseEw1NJdDOHFM/65gY0Y13Lk4TO4zrDZnfD\n5z9/w7AKHB+vOEsL5suOOHX4rnW77raR7XpL3h8SoAe+OXly2iy3E3QUEClN8U8VY2yrXKlCqUJO\nhXE0hKJkJ+AsWEUdhOAxpiCqVC3s91uSj1yvL7i6ecB+v8aZBWib5OO84/rmguViyWadMN6xudmS\nUmEay22oQClScf52wMWopLGQpkzVinOG0Jk2vmwspD6iIuxD4PLqkiO3pE89qBKnDH1ls73Cd3PO\n7jxNzCN+CGzHDfEiUqaRk8//fV7uPsBn3vpV4tZQYkRLAflK4tZTklBtRyqJdLNn1c+pkzJuI1PK\nbGNElzNk2aY1GRQjlvt3n2Ffthz3Z/zznPGMOeImb+ntDGsD1jhqyfSzjpBG7vcrKg5RS9aKKRYJ\n0kbT0UYhaYFuFrC9Ilaa+qMRco4YCTjvsbaNuCsmMswHOuPIonhZ4DpHLhNpn5AEg5vzKEWqKazO\nujb8winDzOKcIeVCmipKYbO7Ydg5xGaETC8dpSTimMil9RscOPDNyJPTMze302P+ke5r1TYEAVEk\nV1JyZDI1O4xX1CneWyyeJJEQDFkTN/sr1C9gB/v9DTFuSbWVG1o3x0hrF5/yyG7a3Hq9le00UvaZ\nXDK2WLBtis5+bI1NOSk55ta4UgpGWtGk1Saxm2NkihMOx0PewEzCxxbfyk2aSFOb/rM6OWPcrVmb\nwNHZPY5PT7i5eJtxfc6wOCVvEz/6J/4N/vJ//LdYzRZsRpB+B7G1A4lCHgt5vyNNe2qKzBc9Z8tT\npu3Idp/4wmtv0s8sJ2dLju8cs1rMsGpIJfH86lleckd81+l3sN9eM3NzbOiwxmGswbm+jbQbE8/e\nfz+7X29PNLHGplVTmvSsOJqSThVs57C9w3qPrQasRbUpH1oxIJUp75mSxUyGsPIEH4il0AffFBe9\nhyxIqsyWA7OjnnE3MnOFMGvt+VpaYnPaT1AtIqU9Gflwqz1jMDhKuiGnCuV3rwt94MDvR55c0xC0\nJhenaDK/FTen1Zhzq9mSUmkj4UqGDCUEiiqehNiOUkA1M047ylXCGU+piWEW6OaWvImkbDDVotag\nOfHw4k0kKCqFcb8lj2PzRLPAVFBxjFMEdZTaDh2RipiKSGnDmo3BuExR2G0Labwgb5V5GuiOPUUC\nxhbyuGOVply1AAAgAElEQVS9vqEAc2d48OABcZrwpuK6gQcP3mIcL/iu+8/y7afP8xuPXicEh3U9\nmERMlZInxm3Gux5jlG5u2Kw3aLKMWtGqXL19wRS3bE6PePvNt3jppZeoBjotnIVjfvSpP8jN+gpj\nO/phTjWewXvEAnmkjBlnHPMwYxEWFANjviFKoro23i0sCvtQKc7RHVtC72+rhG6nIZmAOIezFuc8\nvQ3ETWWzPmc265nPFswGSzQDcdxjaiJqZNqO5DrhByFVpTcWOkvNLbzlS8XMeozO8KFQa8QYxVgl\npZFaLIXAfjexPyRAD3yT8gT1zNsUewu3nYvNm5PbSexVviIwJeTSNEaktvhozoWZOFansFh61NBa\n4FPF2YgxjtVyzjQW1mlHzolCATVtRGiaiGmkilJVm2pfNkgBiqFqRZMlpYqUgjrFhtqGH0imUCjW\nolKhNjGoaW9ZT1u6E9jvtjiEJC0RaNzA2ck9osILz99lt9+z3W+Qkig5M4Qz4sUF/94P/9v82f/x\nz/PWuCbRBL2u1iNxLMy7gdO7S1x/TayVPCX2aU/d0xKgUiFXLh5ew5VjZpa4heUD/ogf/Y6Pc755\nhNWmIFmNbx21Yql5QtQQuo4+TUzjjsWwIuuIStd0bWhPPyVZjPf4U0NYCNUIEtpAjrhLLOwCNUrw\nnuVyRZHCfrvHmMDVw5HF3OG6wLIb2BmDlh1pyjgfqL7gvKOfGdRbSi5MO5jGxH4smGro+5bwns1a\njqSURAiekjN5LNRJmabHqs1y4MDvWZ5caaK06UJNgISW/9Q23LhSEdMqRbjtGCyqTRmQTCmWYSmE\nLnB2eowIrDcbgNY09JVJ7ctAHTO7bSZVJaepraMVNS2xqgC21a6nUpDqMFqxpTKlBKXivcfYirG5\niTlZQ9GCcc0Q+t5TimEct9T1RC4VrYJiqKrEGCkF5ssVDx68zTSueekDH+ThW68yLBc8urrkbH3E\nyx/7A7w4W9E7T5HCjd/R24EH0yXPP3uH0V5wcrpgo2t2NxO7eEnaBIpmck2I0MZBj4V0tSbfRP7N\nH/pB/DZRaNrpVS2LoVWwlJKQmls5pjGIs0QnLO6ckKaRsq/sNyOaU5MyMIJxyvFywbB0mK4Zc2ol\nBEeH42g2Z9YfkV1iTBPDbCCnSpoS66sNp/0puWT6oSOOE6Hz1FQRbZvAWEGrZbsfyUlJ+4mahBIr\nvoc+dDjf1Beda7NQ06RNhrh0VN09tj0rIv8N8C8Cb6vqdzy2Gx048E/AE+wAbYMVWvu5Ymit9vW2\nJ11ra5YRade12nIDVdsw5L4nhA4xhvmsw1jL9fUNIq32O+fMEAamPkGc0KmQNREzGCq+s1Aqwd0e\nGlZw3hNLpaR2f8nSkn42I0bwg6UGcCHjvLk9ZhQjhiIViuXZe0/T9zMu315T04Q1luXxGWE+xwfP\n6f07PHgj8ulf+/soGWeVxczz6PqGu3HLR+7e530pc391wt/44mf40FHHpybDVXzEC0/Nee6Z5/jS\nzWehGHbbPXmXMaFiDISuY7ed6HTGEOFf/94/gouJbRwRcYT5jH6+JE0T3a06oqb23brOotuKdIan\nTp/mYrdGnLIeR7bpEoMFDG5Q3EwwvUFsIZsmNXzs55x2A7N+hnOtbFSppDhRSm3H9jRRpja4ousC\ni8UKTbeKibf/7nlS4rQnx0TceqZtJe4y89Wcvhe8N2htyeFaKzXDtB9Z30zs9hV1jzXM8t8C/wXw\n84/zJgcO/JPw5Dxz4Ldc8qbDUm6n4yja9K9v29Fb9NygtbaKkzYxFME1I6/mdpivp7MdNUPNipE2\nJFiMQV2hxISKoVZIU8YHwXnB945aDLkUrIFYaxv8gEU7xYUm1sUg2FDb0OCQqVgY25AHSRC6wKo/\nwTiDoBQMq6Mj+vkRPnRcXV2RcmS5WLG+Ouel97+fz3zmHxD6BVUr4+U5n/zgyzy43nDsLOPd55i8\n5fn+iHPd0R05Xrj3ItXu6cY3GesxD9aP2I4TT89nJCwPs/JMWPCvfvx7+ehzL7G5ugYRrLGI75hi\nZN71eGcoKSICwXt2+zWd73CrJbN4hiLE3Y5gBrxsyVOk5IL3Htu3XEISQeIOj6VzrVSxVbSAcYW8\nn4hxpNTMbr9BUYo1LM4G3MyyWh3h1FF2kNcRSyCnG0oGEd/Et8jYbqCbO5wXrCsYZ4BKViHHQi5Q\ns6HoSJibx7dnVf/mrfztgQO/53iCHaBCkWbGkeahG2kmsCVAv3JhS4i26WlC1owgPHp4zb27M1Ly\nBN/i2Z117VgoisNjS25ecynkkkm3E+G9WFJO2CxUZ5FesKJMouBashNpioPVCSYoYQnGQ7WK9K1Z\nRUQoMVCjRbXwwnNPcbI8olRHzi28cnn+iGm/4cWXXiYMHc/de47f/PRnMc7x6pff4N5z72fpEq7v\nCBjmyzPuZ4jjjo+9+AJZhJqU6xTx88CdxV3yzZYXTp+i3O/4crdg3I3MuxnGDfydz32eT9x5jj/8\nke/ien2Bn80Y1zuG4yXDMGs5gJSompGi5JrZx4xxQswJMBwv5xAz/dUaa8OtTjrtew0W60urKU9A\nTiiF6DKp20MS1BamHLmZLoj5mjxW9mPk0eacXR15n3sW41vdv/eWzndtdqcYqIY8RfZjRFxgtupx\nKjifcN424TUytdo2yDop+3FLjC05bb19Ulv6wIEnyhNsGmrW+ithleajl1uFRGmva21zP2tLfOWq\nGNOmtU9j4fJyRzdzBO+wnSd4j1Ylp+bLN4ldQ+c7YlGoCe8szt5qbmtCq1BKgeox4XaEnTMw1jbw\nISi4NtjBWI/6iliwOGLKOOnIBp5/5g7P3L2HMxVvHNY7TBTG7Z79+pppv2V5csZ+v+N6uyaNW9DC\nh7/zO8lXb5J2G/bba6xp5ZkpV+7fPSPXgpPASUkYKg7lu4+e59IPfO7BQ77t5CnsMkEYuN5HPvrc\nff6Vj/8x6rhjv5+oueB9U3XUWkGVoesoeaLU29r52++61NrCQeqxzuKCx7n2XVkxiCj2dphEG3zR\nGpvUwtZFgilkK+T9xCau2cYdWjMlCzVX9nHk3D3i/v071FiZ9olh6OmHpgmfU6bWdnBrbUJc1nms\nLfjgMU4x5nawhdYmX5BaPM55A0no5/7JbelbfvEXf/GrP7/88su8/PLLT/DTHPj9zHq9Zr1ev6tr\n39GYf62kj4icAn8ZeAF4BfhhVb26fe9ngH+LNlP+T6nq//q1120iUKqKoUmiqmlGvNbaHtURklS0\ntjn2apRSBaltcOZrr15TTaaqcrQIDNaTcxsibIwFVWoBsHhjW3ghBOb9gGplO67bMOdscNbQ+0CU\nTMrAREtymtvhFs5SXMYYRcggDqk9McPgPR944f08e3KPo9WAicLTzz/LG69liusInWd5NGe73fDZ\nT3+GP/jPfZLPfvrXoez4/Kf+Ht/1bS8x9D3T1Rpbt3TzGV/60is8/cwzxJjY18r65oJ7p6fMjzru\n18SLd7+FYwZeu3yL02GJdp7dSeBZt2S1KFy9fYHmdghaaTNQVZWhm0PJ1FoYpw3iPON+YjZvQmHJ\nRkJncZ2lm3uOFwsqRzzcRrwIVWDMCRsL+WZCRZiHjvW0xVeH2jVFCpFCLZmSWyVSKRWNwuZqy/n5\nFcvlMfubLVKVaRyxJjT5g1RbD4Cx7bO7Shc8zoH3BmtNm06kELwjThljPP1QICzx3ZMX2vqBH/iB\nJ/0RDnyDsFwuWS6XX3391ltv/Y7XvhvP/GslfX4a+CVV/c9E5KduX/+0iHyYNkrrw7RJ5v+biHxI\nVb9mvZhIG+LcRpoF5LZ+W1QpVVFuJVwNZCquGrJmjG3VMDEWHj3csOh886xnM+IUqdU2ZcNSsFrp\nHFQxGO/ou8DMdnQhUEUo5abVkIshm4JxhfncUVKmFIeIYkwF7ZCYKLYgxVN9RUWwtdANc56+c8Ti\nZEVyCc2KEVgsl3zxy79BHNccn54yLOZ86EMv85lPf5rTu3dZBDiee8acePD515ktPcuuxztHP1/x\naLOj7HdcXl2Sa221+VbZTRP9ouel9z3P2ekxr375FZ45u8ubF2v6O3fQ4nBujh33iHdt5Jz36JSI\ncUO/mFOqwZqezc0GMZk4OVzX8+buglIStoP5vGOxmrMbO2bDEuYTeW8pgKYmk2aDQ6uScuEygYaM\nCwlsxXolRaEUocQ2o9SUxKtffJXlbIVm4eZyhymV/X7TSlMtiBX6rsdgEWrLlUhoM1FvK55aDkVA\nK2IyxQp9b7B29i629IED33i8ozH/HZI+fwL4vtuf/3vgr9MM+g8B/4Pq/8fem/7alt53Xp9nXtMe\nznDHGm6Vy44dD22bzJ3QoiGgRrSEFAGiBShITV40gqCWQLxA4g2oheg3/AMRYpAQYQpE3YLutJIm\nUcc2bsdxbJeryi5XuaruPXc40x7W8Iy8WKcqgcTB7VT1DfH5Svecfc7ZWmvfc579W7/1e75DCcAb\nQohvAj8OfO67HBslFFy5BM4jhjS/YfM8ly5FIoogZzH7Y4t5NCOFhJLpLz3bpaeIiCyF5P08Yomz\nShA5b/6pPHd1lTF0dYs2mgOuNs/SRPABKRMg5mLSFIJXjMWTsmDYZYzVIBRBClQ1Qgk4bchqQC8k\ndinx20DnGqp1hRSW+4sHeN8z9QMpZU6ePGZhHT/0mY9y8eornG4vqfcFbRsuLy+IdpwVn3k2kFJW\nopQhiEzKUIpivTzkO995g8bU1G3LUbfi8vIMKzTPfuhFHj16RAgR17aEUlDFotBoFLZZEoc9KUR2\n+x3j2GPcvJGsjeM7FydcpHP2456UJ4wtNJ3l4kJSroqykJkoI/IqqENKhYyKZDy6KJAK0iyoKiVR\nooIiyTkggHHneevbb1JCwjpLDondbkMucY75A3IOV/NxiVGKECNFZKyes0198MiiiGU2IzPOIq5G\nMB8UhBD/3dW6PxJCvAX8x6WU//IDO+E1rvGPgO93Zn6rlPKufddD4NbV47v8Pwv328wd+h/GleJT\n5PKepH8OhZg7dinnxBySJpOuPL1n+qIoBZULMSfiJNid9xAkqR9QzqKFolCIeaCU2TNdKcHkA845\nnDM4ZyiuZcyBizNPCoVUEqbJCJFRToMZEd5BP2+IhkEjlUeYgCgSbQvWZkyViER82HIwLaEKnJ5u\n2G08U7+hZEm9PGbvt6yXjtZZXvvC51neWFIPYWa9LBpi2iJMxfbyjJAi9XLFc0c3uTzdUleOxxdn\n+OQRRXBy/zv8xE/+ed566ztUVYXEUjvNNO4QpczjJj9QVR1SGrSpZmEWAtsuyDnhrCP4kTAF6OYx\n0+uvv84TcYbSkiH2hOSRKBbNkt5GvJ9IWlNQGF0wWYEX5BzRUoKGGGa+UQ4RJoHMEpJg3puUlATb\n3cDp+RlOzOOUadox+YEsQFkFZDRz159SIqUMRiLSVXZszsiSEcVQciKLjJHmXZrUB4JSynV48zX+\n1OJPvAFaSilCiD/uLfRH/+yqKBcpoEhI5SqkYuYn5wRFFkRJiKAwenZINBrmSN/CVTonpw/3hF4z\ndJK6yVSVRmlFFABpns0LgRSCcZpYdBlhJK2s2FU1Wk8MlxNjyFSiYGwmkZBaIMdCHt7lk8NYEq6S\nCKnQKhBK5sBZtuMZrTa8sH6W7ZMnPHr7EbvdQImRYb9nHC557oW7xOjZ+ku0hKU54rXtJTZlbty4\nRxs35AyPHz9ktboBRTKkgLGWpq6onKGu15ydPeb41i36aeTy8pLdfsdifYCTkml7yeb8AmNaZDEI\nocg54v0c6pytJhZFu1wxTHsQZbbnRbC4ccT/9etfQnaKuqlJbkI7yRRHYvbUdYM3mT4ElCxYYaiu\n2CMxR3IEIwTSFYScBVlON/gU0RSKkuQExhpa1xJ2E4GCUhDjSIgRpRS5JIxWiJzIcV5BIUZEkRSV\nSSW9F7ghESDnBiDnjNIfXGd+jWv8acb3W8wfCiFul1JOhBB3gEdX338HeO4PPO/Zq+/9IVzcvwQE\nFLCNw9QzCyFfRetoCSJpkogUIRBJgpSUlFBKEMu8sUbO5CEwpEjMjlICOYFzBVSmMBfeUmaVod/3\n5NUaiUAJRV1pnLP0QhC2mZQCzVJT1EyflBSKnzfcpuxBaIIq2CSQosIiCVIQ/AUh3GHq9yzqJafm\ngsfvvHo1909sNxu0vMu23yHCHi3hc5/7LT716U/Tn2/46tc+x4v3XiKHAWSiCIFtHNvLPTlM+Elg\nrGHTb9DG0hysyWLutMdpovaBoC2XTx6jnGGKA9ZV7EaPoqBzJluJVhWCic3ZjrZpOL84o23XjKHH\nVjXvvPaYndxjGkV3w1EtJbYzhCljZUPfjyQhkAqMTLSmgDBsp4EpztRSgwYlMaUiEmeVgM6YYphK\nQAmFKpLoZ9FUjIVhjIQYMKaglEJETchpDmm+8rzPSaKkIqWrIPAiyEQuTkYu39wjr0Ktr3GNH0R8\nv23M/wb8/NXjnwd+5Q98/18VQlghxIvAR4Av/FEHWN9dsH5mwfpOR7UwvCcjEgCzOAhRKFlCyoiU\nETmjEIgiUDNzkBLT7OwXBGGX6DeJ/Tax30f8qEiToHggCMKYSQlCeFf6XjBCY4RECkGYAn4r6C8z\n434ihgIkcsxXFD9Ju9B0rcLZCqtmlktJgr3PmElze3GMVJH1esHy4AAfPEoqtDHsdjvG7TlKGHbb\nCyiZ3/nyl7jYbzk4us3p2flMuxxmp8D95cBmvyflSLiS3z98+BCpJEVoHpycsO+3tG1DLpkYI916\nxbjZIUshxJH1wRIpLdvNgEgVl/ffZvfoCTpH+n2Psy2jH7l98zaqa3j7ySXbx3OB3Dy8ZLOZ4+eq\nyuJMzZ2jO0xjZAoeY0GIidoWKmswolyFhcwxfCpnKlFhdY0zDikztXVz3JuPiChIQTGOCT8mZBLI\nolEoUinIAt5HYsxwdUEOKc0boRkEBorm5r0jXvrJYz75s8/zmX/u3ve5pK9xjf9/43uhJr676XP8\n7qYP8J8BvyyE+KtcURMBSilfF0L8MvB1IAL/dpkNVv4QCrMl7cyJEAgpKHFmiIAkhXkzslx1Wu/R\nFaMkqTILjXJB5pmTHFMg5ky+OnhJkGuwlfx9LruQRC9Jscz0RS2wyuKkpq4cVWXph8zUJwgCIQNa\nSaQVlKJwnaRZGKSWaFWwtcPWGVVJ+rGwLz3TzRGRC34c0XWFULPXTE6eYRzQJbPdnXLz+Dbn5/c5\nbhas1musBp8tIRZQmmHa89bbr7Jc36BzsDnbsj44QElF1bZMY8/l6YOZIonCth1KSk4fP2F5sGCI\niqpp8WOkaY/oFpo4bZHaQolMXrC93HFweMBmd0Fda07DiJAJZy3aitnAKhQudzuWbYtVULkKKzXD\nlQFYrQTZeqSKqJwQelbnGglO1ShpyGiy0UgfmaZIjHHOek2C4DMhjSgBunZoqZFFXPncS2pniGmm\nmxqnr9bBfA4pDELC5eNLKIK2m1fTNa7xg4jvhc3y3TZ9fva7PP9vAH/jezl5ARSKrGZGgpDlXXX/\nux+ujjl37EJIMrNIJCcBUaCKZsoTqkhKTKQBhpgRncUIyFIj1FWwMQnl5sCHMAVk0Egk1jiOVmvO\nz3fsNxMhgKg0VTPz1YXLQMbUEmkKSmeMTTS1w7QZYWenxCF7tvs9cb+lqmcDrKquWK5qku8xorDb\n7ehqxaOTt3nm7rOM2y3FR7CKrr3D5cWbHN24hVGWB/ff5vadhmG3IYSJRw+fcHzrBg9PHrBuF5w8\nuM+tOy+CqRhGD2HCykLvM1XbzZz6gwMuTneMu0zse6TwpDCx3Q8cHd5ic3pGTCMhZ37rt/4+9cJh\nxWwvW4qiRME4DQgdWeqbOCuxqmLfj2wfRepnBTZNGAu+JHLJWFvhsqYSmpwKgjmwW4iMUgqZIfae\nGApj70EWmoUlF0ixoKydXTKlwFlFLQo+RKY4Z40qaWc2U8pIpTg4OkCpgJIOIZ6+Be4fFA39SfC9\nikW+F1xeXr5vx/rqV7/6vh3rwYMH79uxAL5L7/h94f18be/n7/+74Sm7JkISQC5IBEXNRlo5ZdJ7\nz7xyVkSQYkZq5uqOuGKsxHkDFTXznaeMKQo/gbMFVQo6z/PVuaNWpBSJsZBTwlUGbRWCmtVyxfbx\nI0IpSCdwtZtZFHJ2HBRzagYlC4SyyFqhHKAiVlgOaNnvdoybS/wUqauKVAmk8FSHDVJGCh6fJc40\n7Pc7lssDtv2eo+M79OM5xi6onES4itW4ZQwJnyMZwebynOM7dzl9+3XknReoq4aq6ajbA/rpglQg\nhjmQYz88wrgLDqnQRjDtAv1uRyIggsfHCFIwklgerFC24u3tlrqarYYhk0RBFIEQiTFJ9lwikmFV\nWbYbw0UfaLYGrcBqjbVi5t7HecM5CIlM891XDPPdUCnzPkkIHu/nODxBQSgHZb5byzlfpU5FtOa9\njFW4unOhUMocISjQKKVxlZ03xMVTTUK8xjWeGp5iMS9XDoiaLBPEmc1SeFeuPV9h5zf1/LUQkPPs\n2xJzpCDnwGHmhBtKQaPJvpBtJASDniI4QUETk0ApBcx2AClFpKjJUSGFxFpHu9TspoBUAqkEOUMO\niSwLJUpSgpwEky9IEa6YLR3TbsOrm9e4197BCY11iTZLVs0RMXi2u3NSkjRtg9UVi+WK1eKQNO1Y\ndQd4HzHasrl8AtQYXbNarYlhD2XujlXV8ej+CbZtWR4fcn62plus6fsBowTb3ZbKtuz8iCqBdnmD\n4LeMfWG76ZliIA8TMUds25BEoD06wFYas1ziG8ntW7fZbp8QQ8Yz56CmWMgq0HNOW61ZHiq6nSAV\nTUgKj0Z5ha6vWEZ5LtyUERE1oghCzkzTNDNq/Py3N0Ix+h7X6j+g2oUY4xxFBzgnkCohKkGRYLMj\n54hzljAmhFBYqxCyMHkwWnz3RXeNa/wZxlP1ZskIREmzmo9ZhSmuAiNkEVcFXF45IxaKuJL/C4ks\ncu6aAZCg5tR4nxJCFUxSTFNAazlHx6mEdVeba1HgS8RPCVKPEHNIhkZQ1RU+J0hzN0mcuc0pCeIo\nmcIsU2+ipe8zdVchlSAVy4aefugRSrGoairb4P0WazUxRfw0cbi6xfZyw2qxQOIJpdA1a5qjG0yX\nD+mqjpBGbh8s2TKho2K/Gbi4uOCHf/izvPrK7/Ezf+FnoWRu377Ldrshjj0hekxdsx3OiT4gmwNO\nLy44vtHiqgZpPMfr27zzyjcxbU3O8+9Zi0xXN/zek0e8fPYKqlZ06nDumvc7tI5EaVAls/UTnfW0\njWK10jy+zEypEHJGIqnRlJKIJNIUScUTxkBOc/h2ygmjBKRy1X3PF26BIJSApKC0nT3rZcBqR73Q\nOJtRyuGsgOyQai7YZQFd113F2imEkFcX62tc4wcPT5WUKxG/zxcWAqETvDtguRIQSSlARKTK75lw\n5fxuEZ/zMaUq2KqgXJkDh7NA29n9cBgCk0/4KSPkHGYw+2vPkXSbzY5pyuz3PSFklNTMIxwog8Dv\nM1MPaYRpFxg3iXGT2Z4H9heR3TYx7DJ5MHRuha0cdV1jlCbngFGGlNKcvtN1DH3kzu3niKHQtSs+\n/vFP8eTshLt37xCLxocJU6/oug5KRsoOKQvt4oC2bbn7/Ieo2262GRAGbRwYg65W+HH+fzmzYL/b\nYYwgxMg4bGk6hzECYQ1nTx5TSqLfXGKcY337Fq+cvYXXAeMa0IoiBcrZWYGbwShHi8LoiKsC3Vph\nqkKRkXGMc6ZnyPPehZ+To6YxMewGNpc7dhcbxt1E9DMjpZQ0K0zNfKelVEFZiaslbatoF5b1UYWr\nA+1CUNWSZiGpF4a2M7hKsFrVWCNQVaZZGA5u1DSr6878Gj+YeIoWuOVqUxKUnh0RBSBkIReBLAJK\nQcjy3px77tTnW/HEzBsXMqKMRpmMk5HoM04bjJvVJv02Y6Qia5hSpMoWskAKjRKGcexJk2CaBqZx\nP2/CpitPkRwRk0AISBoUmZIEOQp8KGyfJLROVHWhVh2VrGi7Q+Swo5SErQyxj8QQqauaplkyek/T\nNRwcHrNarUEann/pY3zx81/kxq0V/aMRy+FsAFYstZ3VnMM4sNlteeaZe5iq4uLsnXlerCP16hid\nRt7Z71BCIaXHOYdEEvxIKYKj2wc8euOMzfacupL4aaKylu2jE7p/9l/g7d/7W4isZnfFNPvKK52R\nxuKTx6ERImGUQsV+7o5VISrJJMGRMb4QzEwdLUkSpjw7WJZCTAmnLSKBkYpiJabMNrZFZbquoesM\nzgmsmQNGtJJoLRFkwJOCIqSJLOdgCqkL6Nm7HpGJJZJK+uMX3jWu8WcUT60zL2kenSCYrWZV+f2o\nilJAFIQUaAPGKKw1V9FizOpRId9TL7YLiTHgGo2SBu0SVS1wTkEupJBm462Yrtz3NClFlssF6/WK\ni+0F+8stIYQ54m3MpK2g7AQxFHKQaAHrlcE5NY94JkH/uHD+zsT2NBIn+PSNjyLDQD9uKCKj5TwK\nss4ilcGHgBQgqiW2XXC53fHqN77C2aMTnnnuBX7tN36dJB26bohZUDWOnA1n5+fce+6HyHnmko/j\nSCkNWcJ6cYPDG7e4uLikkokY92g1R6vttqfst2e4WpH9wMXZI6bdFpTDDzuQkmefvYdeHCGiYjvs\n6YceHzzDNCKAPvmrsZPAFoMWiVTilUAnozQIImPwM5tllOAN3gtSkEihAEXjOhrTzN70uVBbQ20y\nba1YrWua2tBUDmcNRjlkkRQPaZJEn9BYnJ59XKYpMQxhfr1jTwgj/bBl3++ZfPjA1qwQ4jkhxK8L\nIb4mhPiqEOIXP7CTXeMa/4h4ap25YKYaCpmvnPDkrNgsCkS+UhkWFitDjorgM1rPAqGSr5SiQlF1\nsFg5zi8DSkVMLUEIrIUwe8BSCoScqEpE6oCQEWNa1l1FWdVs+55HDx6iXCFPfvbqiwmUREmJFJn1\nDUx/IjEAACAASURBVEO1iNSLBbtTwfZih99E6soSleT20YIcJcFPWG3Q0nF5ekoIsx9MVdd471HC\nMm4u+NK3v807b53wnXdOiD5x6+Zv89P/1M/y8PEFK6XZb3eM48Sqrjg8XKGM4M7t27SrI6QRhGI4\nvzjhY596kTRN5BjRKnO8PCJFxeVuQ+ssGMO0n4hTZIw7Fsc1Qhpu3V3SHqzRFUDkzTff5uzynP3l\nlhQ8VgtEbcllB0nT+5HaVYiS0cphrUBUEl3EbE8cExlDZv4sUiKjUUIgmSmFRRSUnC/IdWuRWWMr\nw1QiOQXiIOiqg9m6OCZ8mC+8gYAKnl0vudwmzvc7gvdoZRBKIaREykzbVjT1B+pnHoC/Xkr5shCi\nA/6hEOLvllJe/iBPeo1rfC94imMW5u4OgZBAmcMRZgikTNR1R7cQ5GTYbnfYpIk5gypYofCTpzt0\n1F3mcueRahYJJS8QOmJEmSPjckEWiTEWZSYymkV3wHK1oGtramsZ04YxnqMryCiUEQTkHPzsBKtn\nBetVy+5UkoZC9AqtLKvqiCwD/S7y+OItDo6eQ0vH2fkTQr+nbVtyKWTfUxvLlCUPnox8+2Tg62+c\n07bHfOvh63ztrXOE+QKrO7dZbT3LznD74CaTv8QohTOWUEAbS1KKMSZM3WLqms2TR9y+fcyYC1ZX\n7PuBQzWbku03l5RqwJmKo5tHqGy4f/8xybQsDhYEDDlcMEw9+7Md425EmZliWMjMHxM+Q4gjUneI\nNMvztVZIK1CVnlOLUpij9kqAcrUnIgVSSYydPWKEsOQ8kUukriTWZsiFKQikVMQY0KJm2sPQB/wY\nWC8DsfT03rDdeDabhLpSwQoKj8/OmKae5YGj69oPcM2WE+Dk6vFOCPEys7ncdTG/xlPH0xuzFK5S\nZWbDLBBz5idXc3KjaLqBeqFp1gphFaYGYwXWKqrKoI3ANhLpIqqefVRMJZCygNW42rI8NkiTkGSE\nzQhd8HGLTxNNW7FYrXjmuTs88/xNbFeoDwT1KnPzw4I7H5HcfNFx9JxjeVNiXcLUBbfUVIeCG8/d\n4PD2khs3jqGDumkZQmIIeza7S3q/53Jzzsmj13nw5re4/+bLlHHLzXuf4rTP3L73Mf76f/SfcPve\nj9Pdfo5f/fUv0rUrXvrQR9mkQiqBNHlyhLqtaJoWt1igzIptSNx65gUePtmiukO6Wx/m4MZzuGaN\nvOLhay1pGk1lFU1bce+5Y4a0o6oNt28foESgbiVEwcmjx+xPd4gouLm6zc31XfSVyCeL2SelFEOh\nUFVzrqpUAqFn62JjJULKmbXi0+ycqCNCBJTKQMFaTSkTMQr6foeQI4lxvttKIIpEBYUOmrpUKCnw\nfWLyA0VItNpjZCD0e/w+ESePypo7Bze5d/vD7Dbw5GL8x7J+r2yhPwt8/h/LCa9xjf8PPFVqokAi\nZHmPepjzTE8suWCMxtiAqebuUCuF0LNLohCSLMXMzpAzP7lpKnyMSFVQpuCqubjXqwJhzug0misB\nS2C/mQAwTqOsZH1wyPngSN6j15p2UcjK0o8JbWpMNZB8wDQdevLoArqG5bomqUKYdsSYUEaSUsAo\nzRA80zQRfWSzH1kdLnj2pU/xe2/3VOsFv/mbv8NfevCQ3335y+B33Di8RRozw+QRORNjxgfPrVvH\ndE1HVTuKWZJT4BOf+AQP33yDy7MzXKtZr45xpsG0EVlusdme46eBAmilqLsFRle89JEPcf/Nd1Bq\n9hpfLI44+84blNHTqArrLE4ZFlXDflqwLwOlgBEOLQuyFJQxs6DHaLQps5d8nv9uhVnipfWsDZBa\nzX47JWCUReVCibOMH5XnvZGi8FPA4BmyQtkaZxtsHJl8IgSPkhMpRXLhalzlOOw6rFC01YpaV3Su\n4c3zNz/4lTuPWP5H4N8rpez+3z9/7bXX3nt8eHjI0dHRB/6arvFnE8MwMAzD9/TcpzczlwXkPEN9\nd7yilJo7dQWmEigrkMbPGZRWYFxFCuGKmjirNp0DY+fZ+uP9Hi38nFWJZLmQIDUpjchc4axGC5hC\nZH/6kHvP3+Ho5hFSypm6V0kaUyM6S7t0uGrJdjdQRMFWI2TLZAeyKzircI2gsZbJjmhqkvLENJL3\nI+M04oxh22+RxbE4avnYJ3+U+uhZfvf/+NscH3c8PH3Ev/VX/xrKSv7mf/43+V//+/+Btx4+4p90\nFu80phhk44l+RNuK7Cz1Yk1Ie377N/4eb72x4fHZEyoyTRv5+J97iU9+8rPk0sD2DJEK6+UxiYBV\nljFusFXL8y89wzQFchFUqwVvvvk6n7r3MT73yjeoXUulDV1jaahJacl53qHyQMFBsbP2VoCWCSUN\noObov5JIRWC0QaqEzHP82xxMUagbGHeJEAOJkRAFlZ2pj8NlYBCJZ247pBVokVnUjogm5YGSHaJE\npIiUFJAYrIwcrI5Z1YfkAqvlIXVt+F1e++OW3p9s3QphgP8J+G9LKb/yRz3nOvPzGu8X6rqmruv3\nvj4/P/+uz31qxfzmnYazs2FmtMgrKiIzZdHognURYzSuBiESbTu7E+q2wXuPlIrtpqC0x1qHJGOc\nJHmJNRIhPNlUVG1GW02lW7S0KBmJvnD+YOLRozNu3tlDkWgtaOoOFyxK1KwXFaqai9boRwyWojMq\nFZSdMJXCmoJwYCporzY493FCJY9VmnGaCDGhhESaBbZZsugafBg4eRgIQcwOgjny5/6Jz/LFz30R\nGZ9giqTf7UnrI6qqRtYVi/Ux1eoIDygheO31ntf7yHMf+Qz7y0te/drLNO6SxfId1ouaqu7mqDgE\nhsK+P2PZHSG1xo8TSmba5QpVN/yD3/ttvv3Wm9w9vMOTzQnGLbBO0RXD1BtU1FSqhZSwUlK7hoVN\nKLnDpJn/n8WcBqWFQSlJUye0TIyDR1ChBagyYFRi8hFtLSEktMzkNLNicnL46ABBYh7xLGyDLy1C\n9ahSqJSirgO1AKH37Mf73Dq6hZwcojbcEOsPbM2K2c/hl4Cvl1L+iw/sRNe4xveBpzYzv3XrJkdH\n7Wy8VOBdvw1BYbGsuHFjQWUdUhaUKtRdwVYZ12TapWOx6FisLFW1QBsBQiB1IaPp1hbjLLJI6lqy\nPnTYOqIriLFQpowymfOHPScnJ1xcPpqtZN2C2lmcW6JlgzKGwogU6mojz6BRNK1m2TUYbSjSY/Q8\n6z+oOiolcUhyyYRhS1N1rA8WHB0uaaxCpolPfuyjPHv3eQ5aSQqeGCb+w1/8a3zrtW9AKkgjWC8X\nZBUIKWCqFm0NarVAS8eXP/cNXrnY8wv/zi/ya7/1Ff6Xv/V3+PjP/Bjf2GlyXlMwswpUaiBhbYPV\nDTFGnFA4K+mahvXqgPMnZ7xx/1VuH92iqizdomb0Txj9ntrOkvulckgm6txhjKZpHXfvHPPx5z9C\n69azJW3M1LqiUi3domHR1CxqTW1riKBFQcmAtRU3Dw5YOIcUiSL3ZCJV3dDWx+z3ge04h4MoEzE6\ns+hWtHWLkAXlMstO0q4VdS2wVeJbD76CdJmcB6z7QNksPw3868BfFEL8ztW/v/RBnvAa1/he8dQ6\n84ODlraqeb28wZPHe+awIomrLE0XqVuFM7M/SikDTbNgFwNVbfE+YlVmIRuqOqCNZRz6WRGpE21r\nQQ5oA0p1WFdISgKBfjfMxQdNTIVxFyBFJr9DGD139bJGC4vOBsUJqMLCLRmypzBhbcWqPiCNFm0K\nWk9IZiHSOASKH8k+EjNUtkYZy+VmxyuvfImXdM1PfeJ5/tNf+mX+mb/407z88jdZHSz5zGc+y3/z\nX//P/OU//y9DAKMN1gj82ROqm88QU0ZEjVOO3/jCV7hz6xb/0l/5BYb9jt3mlNe+9TovfviTiO55\nLPcZ/Ejoe1arAzKZAvjQE0KLUgYpE+29j3H68AGNMjS3V5xf7NHNim1+CLKnMhlnDUJHEBVOKBZV\nRdUaOtfhuiXLgwUPn5zx6OFjFtbQNWvapaaULSJJor8kjbPLYSoB5yQq16TSIOQeazNTibSdRcSG\n9eqIfurZRU/jwNYCZQq2lrM2WCuObuhZ6eoKmoqkCg8u3uR4fUjK/Qe2Zkspv8VTVk1f4xrfDU+t\nmK+7jugEQzhm8JFhIyhMFEZcbWma2TQpCD17ZauEGgQhDnAVAt3UlqYGZxTJO870lhw1tpKU7JAW\nrBA0rmVkYgg7Yu4Jk6Xkkefv3sVlS0mJFKB2hpQTMvdQWpQaaCvJ6CFHkNIgSqZSFeuqJasKY0Aq\nSchb9sOGKmnksEcrS9Jqnh2XQJ4mii5882tfoD6+w7//b/wc/9Wv/Cqf+sxHGS96fv1//z/5d//N\nn6O1mu3mMbURKGvoDtYoaXBVja4b7n/1Tf7OF77IT/74j7C7eDgHQZ8/4qd+5Ee5fedDvP7OW7RH\nO95+9XfQtePi4oIXP/RRLraPWS2P6Ydzbty4OxP9nUGWyIdv3uWN0xNuHt1i2hum6QwpQQqN1QGn\nKiqnERmECiyWNVV7TJcs7XAIpmNRdaSpp7KOdpFIpaWkicsLR4iZaTdyeKuwWtVMynK8foExHRHV\nG1zEDU1zQBxgvezohOPNh2+AtNh1oqo1rlbEHNCqxlIjxYqcJZpu9upJI6VMFK4VoNf4wcRTK+au\nUmhVaDtD22mII/v9TEusW4F1nqoS9JeeuqooaqKIQko13o9EFTk8XKCNZdGtEUi2yx2ncY8xcwqN\ntBlNhVIOa6EPp+gqoUzi8HjB+mBJSZkkRiqtMVoSQiamCR/3ODFH1GmTISiEsDiTsVpRm4osa5xd\nkuVjfBJs44RTFmsdQz9g644cC75MTOPIZnvGxz9+g0ZmvvP2N/jX/vmfmROBkiD+01vGfWIYMxdn\nD5iMQq0aVnWFFwXhE+LJCSdPzlCq5vDoGGsUOXh+7l/5Kzz/4rOk4lm4hhd/+Bn6J69z/8Hb+NJz\ncvKAtm3Zbs9plwtSyXRVA+NE3j6hqRx1lXkUnqCMoC0dRe9o2jXrKy947QS6gJQDPlccryqKXmMu\nBnwyVHJB9hZBxlWRlHp8XzAqIbMkhow2hcyeu3c+hUZRpprl4hYh9KR8TtPVZHYoA8uFYkobsgCh\nPUp52qXEDxIpJ8gDUqxnewUpUEiK8NTNtTfLNX4w8dSKuRSRIDOucrSdRBQ9Gy4JiSgjyEyREm2a\nOShBBZrO0G8nvI+MKVK3kuWhwlQVK9Fy6yiR8n3qNlFEjyxH1HpBSCMFxWKxIIYti8PCvZvPY51F\nicK+97TNksSIEBNDv8HompB6IGKkJMmMVYbKgTWSLDLaKGrTUuxE3gdMcYyTRwtJRHNwsEThsCIi\nlGLzZMfDh29xQyheOlgSxOzF/vHPfIr733mLh2+8yjuP3+LG4Q8hSqGuDP1+ZFVPhJSIMVE3lh9+\n4S4Rz2c+8XE+98Uv8/jkm/zYj/4Fmrbjo/fuUh8sef6lD7MdtiipGIctWkvatkZri59G5PEhOQyk\nYaRbrDj2NzE5cx5GRLBEt2LR3ODgqOLh+XfIGKbtFmMjRQpKtWC5WJAZuYFFlsDU7/FjAenRMlKU\npTKOysKoa0qZZm65vI8WL9A0HdJMNE3Ffjui3ewiqRIcLisutiOIRBEOsLRtwuiEsZnsJTkFsk8o\nE1CiRjIRy/UU5Bo/mHhqxTykgXHqESJSN4I4FdrWkMuEMRKlZq5yZRVCFUoWWKfwg8SYRNGKnAMh\neEBQV45bt55n0R6R9RuUolCyQ8oWXyTjtEUZT91K6grW3RKpBrRr8MNAY5coJRn8E8Z0QZ1aNpsJ\n1EAOFdY2IBJSFIwqyJKwTqKNQ6oDhBMcHd3B5UzY7GkXgVIEpq64ODshTz0Hh0fU7QH7aUsXaiqx\n5+j4Ng9e/Qq77SUxDtSu8PD+m9y5c5NHb++puwXy/BSpDb4/w1R3+Q9+4S/zS7/89/ipn/4xXnjp\nFs8982E22wvW+5FnP/sc0/YNchg4PDjidLNnHD3tyiB1g5AapRXSLCmP38EAd82CannM65sTQiOJ\nMiKaIw6WN7Gmw1UNb73zMlY7dttTUjKkpJmip20awjBwdGg5ixbSOSVOGD0QZY+rC3UrGQaL0gUh\nEyGeYu2KVDzeP6btKhp7wDjM7KScPMZYVk1NCBfovKRxHdYcQThHuj2eQg4RISW5RLSKkA2iXFvg\nXuMHE0+tmPfDJfvpEqE0TQM5OGoTMe4Q7UaUHRBa4mpNChasIDMhpcJYjSwWa+b8T8QcH1a5ioVb\n44VhH75B626TkfTTSMieKQqqRqOpcN1jvNdsLs8hZ6ysQM92u0papBLEFJGAjxNKWaxpcZWgspoi\nIkbPdxOowAP/Dba7S55Rd9AxEEPCWss0DmQMRSgenDxGKmi7Q4QynF5ecrHbk/2AwLDf7Nj5wsX2\nlJtHNyhtjRQK70f8xSVn5zvuPjNi3B1+/l/8LG/eH/jK+Rq76fns7UN+/Cc+TT1e8tZrX+Xi4hKh\nLYuVxWjP5eUZXVcjVYdUhrI+oGwfkaOnNYreVFTtAUZc0MojdmLLsruJUZqD9W3OH53Qp1NClvhh\niyeh/Z6SEipfMHiFVY5iWrb9OVZNKG1oOiDVTNMZMUpCiOzSOePwNs5VaFNom4qsEkYvyamg5BE+\nPqatD+n9JUoJpFBo0WBUJqWAUgktWko2mDSQgqeuFsTx6aeAvl+xaovF4n05DvC++rzfu/f+hWb/\ncbzp7wef//z7J8g9OTl534612/0hbdn7jqdWzKdpJDNCKlhjcJXE1g1GW5yJWAwiBipaQlGgIOsJ\n4wIxaoyU1E0FAkLa44dvc2P1I1gcXX2TdH6BkjVGKQafUDoi0ohUEik86MTYP2Cc9kjRIXVE6Qpt\nE00ryGJEFMPoBWlMGAWrZYUYJ+rKMYUzfJk3Wg09UgqcrdhPAzZHiHMYRZhGLnfnuKrm+eef48nj\nJ5ydnnJ+esYLH3qJySeOD28y7C9IRChwcHiTdnkXciAWRX95wXZzRt119OenrG7d4N6P/gziH/x9\nXnrhkywOj1gfdGzOT/mHf/fXSP1Ad7AkxoQWgsWy5fRiR9M0UBIHd+6BnS1ynZLEXFg0HbeZndy3\nYmQhF2zHM+4e3aPuOj790Z/hm298nvPdY0b/DtP2Q8j1ElMkIdaI7NFoKAfYzrDtXwMZMcrQLBLH\nYcVuSJSUGUNAiz0gUVpBVjhnibkhZYHUiVY+h5Q9wt3AGojJMwWPDwMxBYzuEMkgRIWsA33uKWJE\nq+ZpLelrXOOp4inGxgFItARkoW01hgqjJaZyUMIcMQYIYygloqVEqoy2ic41LJyhiMI4PqE2R/TT\nQ3T9LEaC1oYx7FBKgInY4q6KhSfR4/PEYnkHRGCcLjEKsvQsmmNyeYiiYkwjMRqymhPhU8wY2ZHi\nnqrSpHJJCTsQE3VpWVVrtFToEtluHxCGkc1FT4iBEuCtb72OVIp+v2PXB1586UPce+4OzimeZE/T\nRoYycXjzGZrlISkEtJEkAmV3gZQaKT2rpmb/xivcPuqYpsDDb3+Zt78xYbWiajqenD9A95CpsG3N\no0ePoRTOTk6495GPIg4PYL8hl0KYBnLwVM6xkJqhtIxJMurCfveI/aLjWByij+7SD5/A1hO7+E38\n7hTrHEVacmnwuzOEvzH7uKuGTj/L5fQWuXhKtrSdQrJiCAHvN5hWzZF0ITFNPUquKGJEihY/RW4c\ntvT7fnZljJcgFVPOxBTIscK6A6RJJF9ANCB6NttHrJrnn9aSvsY1niqeHpvFVBgkQkVyziThca5G\na4GQhpQsOZer8GaNcgKtK5QYMarGGIvWFUVuKezxCcKkZl53gJgz07RFKYgx4eyCXKCEQsgj1rRo\nIVgf3eTB42+QxSXQopTDWEHyPQWNFookKpRKTGPAmRqEAenJKRHjAEiCz0wmgBCU7On3I9N+zzBM\nFG2onGNPprYGkSW6XaHrQ/ZJc/K45/atFwjuki/92ud48s7L3P+a4/kXP4lt1rQHN3HNEW2d0WrJ\nfvOY6vmP8PhLLyOtpnhPUzlO3nkH6wTt8piqrtntdmhabhyt8DFSYqLplqRhjx52kKFtWvbbCxyZ\nThtyCEhZYaeJLGqenH6Zo8VtKnFIVTWsOSD3Nwn9Ewa7wgpPCJGkMpv+DTpzE6EUOSW0aAiMsyLU\ndSTfs91PKNlhdY0UhlIKu/0pUnaQO1IvUUXiB4+1ljwZkoc+epKYQHugINQaUQy59Axjf0Vf7dlM\nrzytJX2NazxVPLVibmyFwKK0x8c9U8ngPFJpyIWSAwKHFHr2OC+FgsVYgUgdlbEok0FmpH031WbL\ndn+C0GsoE1OaEOM8AhFR0Jo1wlj2+y2qWAoRZQztQrPzG5wtc06ltCinKaMkaovKc2pRyp7oIyoV\nhErEVCAn9vs9Y+6JSXNoVySVaBer2X9GOVIR9P2Wul1wvu+xpuPjn/40BzduoZ3m2Rc/RtVUxHce\n8OwLH+I3f/Xr/MSnbvLmK7/Dzbsv8ODBt7j97A9ha8HBukFbSXV8zDOf/kne+fqXWa4XxGlg3bWE\nDALJPkS6w5tM00RVLSn7LbazsyLTtpQnJ6QwMI4Di3pOvV9rwzhOLIqmq44I5ZSTOPLm219jUT1D\n8IkYJbrUDLsdyk54uUNh2W335HLKxXTBWi9QFsomQp55+MpZmmUilBU5RtqmYZriXJCBaRwQosxe\n6qLm7PRtlgcLfLwgy4hMs++LSA7jFIKANjUxjnPEHx5TSRAfXDjFNa7xpxlPrZhroUA6hACjIyp6\nYh5x1vB/s/dmMbel+XnX7x3XtMdvPGOdquqq6m7b7cR2xwkRgcTECkJOyBVESDgi3KBcwBUi5o6b\nCHGDhJBAkBvCRUQgggARIQkOKDZOHMvtod3uqbq6qs45dYZv2NOa3pGLddzpeOiuNn36ROnvJy3p\n095rr7Wl793vfvf/ff7PU5g1/TggpcCPHhjJIWNNSRBTlmfOCiE90giUVrghgRjw6YphUMDkihhi\nQIaGsrCkGChMg5SOruupqoDImdXijIvLRySOyEkhlUJLQTKCEDKlVSAc47CnS46qrCjGTIgJIzI5\nQ0gte/chjZCU5bThOT++xe3X1pTVjKZesR899157E10oVk2BNYphGHn0fEc3POfu65/gR/9wxVc/\n/2UuNh9x+2TOL3zuIX/oj/4Ey9Ml9x+csd8dmN1/B/QdzK0z9Be/yuX1BUpkQjUjHHZ0uy1nD94i\nKs1sdkSKgUoVVJXBzSx22BOcZ+j7yRdcKXwIJJGZaUvwA5u+wyhHaSyb4Uvs2itwDcaClDU5dfTb\nR0itydkyuAMGS1EKIpcUpsCWEt8KggdyR8oDdW3wftpoRgRillirGcYDSo6EtGCMmbKyDO4CKWcY\nu0DIwK57is4KAaQ0fQF470k4hLBoaZHypmnohu9PXtlkPmm1BVJpfMws6xl92GNtQWVmCBlJYkNO\nEH3EyBVGT5O9zAKtLTJDTAEjSpQKuHFgHA2FPeDHSM6J6CUhjbTRUxQrfDwgVWDsJH14xok9Z12/\nxVAp9oc91lQIkREiYbRhkB6lzbQST5mu3eFDTyMEkmIKuxAD5JGkPXbVcFbdp7JP+Mqv/xJPP3zE\n/bc/wdXmORcX1/za53+J+XzFj/7Yj3Hn5ITF/JSybnj79h3arsMWlp/8s3+WX/yf/grbq5Gf/NM/\nzZAuePDgLuUnPoHe9iS1QjXn+Kv3ORjN9eUjaC+p56fY2QKODWk8sN/vCfMFhbUUtsBYTe4DY+zJ\nWWKLhsPmCToHEJObocgSGWApJKmQHLLCFbBvd8goUdEiRcPQP0X5Eec9QgiCj+ScUaohFz1RHSiK\nBYdeIoLB6IJ+vCZnhbYJYsCNLaYsEKkgiQ0xOIyeQ1K4w4iRFaYSVLYgREdt75Dlc0LydO6C2t4m\nJkuKEaUTwUPCvrQxK4Qogf8HmITv8Ddzzj/z0m54ww3fAd92MhdC3Af+KnDGFNH53+Sc/wshxBHw\nPwAPgK8D/0bOefPiNT8D/AUgAv9+zvnv/PbrWltOqgZtUMoSwogXCq3mCBSFbRidAzFQ2obSLrCV\nRWtDLzNKgHcO5zVWzrBa4NWO4D2X189AQsiZ6DPeRZLShHg5BSkYCURyVhi1IKUDVbFks9uTUaRo\nQB6IcTL+knIyq9qxJyUPWRJiREvD6LdUleWTb/4oh6uK4TpxefiI5BLLW29hxMiv/8qvcue1u/zw\nZ/8YX/3yuxzfvs3y+Jwvfu2rzOdbiArz7ld47f6bDIcD61v3mX/iRxiurymPSm7P7xB85gv/+PPo\nouL2/RXzlLDVnHVd8bUgMM0JV0OPdD0iJPqyIRM5u3MPKwTbzSVv/MBnCYsCuekIpsENI94HrJrk\ni0SQZUkpE30YISVquaILiZSvyFkT4hItK0JI+LTHjx3BKZLUWCGJLZw0DbbaIfXIub1FtzV4p5Fi\nwRCuyDhiMHQuUylBYwXG1FOotwRtDFloRudRtiJFQ06JkJ4TfUTZRPCZgStSWlIWS8gQ8jCV5F4S\nOedBCPEncs6dEEIDPyeE+BdfeLbccMMr5eOszH/X3EPg3wH+bs75PxNC/EfAXwL+khDiB4B/E/gB\n4C7w94QQ7+Sc/ykBsNE1WfYoHZCiptAVQz+ihcZai4tTCSQ5gZQzlLYoJZgXjsyenASHgySEgmjt\nFLQsDhwGz0g3lWxMSc6C0Qu6MWGDnNLfEbiQSVHw/PIRt8/eJsbMqjjDhRaXA1mG35LcIHJC5gIt\nR2yRSHKPkhbouXPrExy2l1xuHlLK1zArhdsLApFyvuLyo8f8sZ/4U/z9n/27fOU3/wr/1r/3F/k/\n/vb/ydc/eMgf+fHPcrQ+Yb8bWCzmfPj+B3gfCYcPmJ/dw9Qrnm9a5rMlF7sN83nNGMGNI6nbkIzg\n7O3P8Cml8NtrfPLIGHj88AOunz/h+PiYdncB9YJbr91HCI/dedxuj3d7unZHMT8nFxJx2E6ek7zd\nvQAAIABJREFU5TLTaIlCsY+SBsOzXKFGza694nhxgvCZZXGLznkQHp8Dw9DhMYg+UZY1s8UcoTK4\nTFlUSGp8nxjdBdaC0ooYPCFMKVNaSdCZupgjo6XvAloUXF0/JlC/sAIYSTli5YxKL3HhCqUkRtdE\np9Amkbz7bn02fldyzr/l5GUBBVy91BvecMPH5NtO5r9H7uFd4M8A//KL0/474P9mmtD/deCv5Zw9\n8HUhxFeBHwf+4TdfVyg9deslkFqirYbdZtqws5GUPCIWxNwjpcDayfcj6wIzOzDuHTFlhs4y2kxR\nCJQUSFnRKEXvRrKMpMA0QYYwrbpFYvAZN2YUFVLC1e6rlHKB1gtSXLDvR6Tp0XYkIYnREoJHCk1T\nzmlmx2R1xXL2Cb7y3q9wa7WmLE9JOeByz6I+Z6WPMcqQgufDhx/wyU99ioTi7/3vfxNtTvjxP/Qj\nFHWFCyO9a9k93E6hGwHGYeRLv/yr3H3wGtfbLV9jpFAe5I5bd17n3JbE0KOSQSzX3HrwKZ49fkj3\n0XsMuy3HpytkcCAz1xdbxDHce/0tRNkQ+x3F6Zr2/SuG/YZs95SuQpsSYabSlROCIA6IMCK9YJZW\nXIw79ocPWJQRjKQ0pwgsMT7Emkv2bUsUidLO2G08hW44OQuE4JBIZDJU6pyZibj8HsZoZnWF1Bql\n4otQkkBIG2pdMG9WkC1ZRvb7D7F1h9JQ2hlalwgpKNQRkBDSIZREOE9WL3cDVAghgV8GPgH8Vznn\nL7zUG95ww8fkOzKy+G25h+c556cvnnoKnL/4+w7w8Jte9pBp8v9tF4uQBSGBQCOlxdo1resZxj05\nZ2LypAAuOsZxQAqDRL/Qm0eUFvgUCWFyU8xkkBayxuoCrQRSTvFoOWcgkHJmGDyH/cgwDgQHfddB\nrtBak5PGj5ExOCBATnifiBGktpTaorXj/OjTfPDwS2hZklPA+xalLZEdu/4JIY+89967yGxAzVDF\njM3zh1TljH/7p/8CX/rSl/jw/Uf86q9+nsNhh7WG4D2PP3rEe1/9ClppLp8+pS5LYhCUzYx6vkap\nkovLZwTnCb4nuBEJNCfHLI5us5wtkEmzWCwJIVHYTFkVU85qIcjVnHa7wQ09ViuS84SxI7o9pIAP\nPdJqpFAU0lKLgjLXpChw3nI47CFP4csxRkQyCNLkKS8zIbQMXaTvPH0/acj7bkRIhdElhZ6hVUPK\nibK0iBeB3inlKbuVDmOgrmbMZ3MKOaOQK2QyyJwJsUcpi1KgtCclT06Brj/QtdekOH4nQ/o7Juec\ncs5/ELgH/EtCiD/+28/ZbrffOIbhe5NJesM/n3jv6bruG8e34mNvgL4osfwNptzD/RS6MpFzzmIy\nJP+9+B3P/e2/8Y/RWhOi44f+wAPe+qEHHM3O+Wizx+VrVJoRosOFESk3k2OfnlPqhJEGp93UJGMk\n/TBi/RaEpLKa0Wmk0JOcMY/kDELGqVsQQQwjQz+QgqEqAtZrBr/B6hJTCGZVw8ZJYk5oAzEkBjcy\n+kQzW1JYy777MnVxzKE/sO87BjGwSmvq4pST0/tcPnrCanWEP0QWR8e0+2vWZ69xdOtN/tv/+r/k\nzU9/kqHfU1iFIHFx+YRf+9zneeetTzObNVTHZwwu8oXPf4HXX7/LmAqOTwrKeUE9L2n7jkU949CN\nHK4vif2eduy4/enP8vXP/X0unz2hLA0xGVar21T33sY/fRcdIuP2wGG3JfueujAIIZG6RAoQUuMO\nW2wKpPFAlJneO2RskSgO+xYhNPPZKVLUdK0gGoXSieQCCINWJ8yK27huzzg+wQ9PmRcrpLAooYgh\noa3EVgW596TcoZQlpUnRNPgdi/Ub6FTRl9DtHdFP/0stAylfgTgmxsw4JD744nO+9htXeD/A98ho\nK+e8FUL8LeCzTL9Kv8FyufyevIcb/vnHGIMx/yRw5VstDj7WZP5NuYf//TflHj4VQtzKOT8RQtwG\nnr14/BFw/5tefu/FY/8UP/LHTyhtwW645uxIoVWJKWtkqhnHPVqBD4kYIj4dGA3oUWIKhbKT2kTJ\nRF1ZxjgSc6DUkiwzIQaE1ggRyEmSsybFgegTxihgKtskn0FkNts9IQ0YUVDpYwprsGkGaYc2AaJC\nkklhJGSHSxklMyiHlHBwGypW9OaSpV6xufqI+WLN5vkeIypySCzXpwQX2e+2/Kmf+ld5/Ojp1FFq\nDNfXVyilefONexQ2cXz/NqWeGmrGYc0HH77P2z/0Q8SYaduW9WpFURQILSnLil5KHn30NXSCz//8\nFzm6dcKDz/wBDo8/4M7rn2R97x3i/orU3Gb30Zfo2y1aakKevmO990jrKbUlC6gXDd4bblcWediy\np+DarNhpGBMM454YHTlpDu0W6i2mEAiZII1UtWTXXrPUkpQzQnfs+w+p7F2G0CKkIOcEMlKWNVIp\ncnIo6wheYK1ms33E0eIeKI9UAu8Eh25AhzQ1iZXPiAjwd3njU2fcecvS9x3Rl/y/f+vxxxnW3zFC\niBMg5Jw3QogK+EngP3kpN7vhhu+Qb7uM+Ra5h/8r8Odf/P3ngf/lmx7/c0IIK4R4A3gb+MXfcWOR\nSXlkXs/ZtdeUxQJbNlhT4X0gREdKAYMhB0Hbt/Rji3Oe4ANudCidKMoMMuBSAikJYcT5EYRESkWl\nT1iVpyyLezR6jnDT495lBBYtZ8zqOW07cLW/QFqPKkZyTqSoUaLCWk2IPaEV9NuA77e4cUSryV4g\n5ohSls49x42ZUpa0mwvu37pPYdQUjSc0RydHhJj4+X/wCwz9gPM9T589QWtN3/domYlppCklSmdW\ns5Lz20veeft1RrdhtZpTVxXDMBBDoNvvMU1JSgMnJ69ztFxy79YxYuyI3vH2v/AnObn3AHl2glo/\nYNQK25zRXT0l9heUtpgyV41Ba0Xf98SU8D4CGtcPNFlxbCpKsUBZQ1ll+mFLjJG222ELMzVfSSiN\nQqiWEK9QNpGZJIsgMXqkHR8x5gtC7IkxAwKR5qh0hJKzyTRLLdjvRoah42LzPiHvyThAEqMi9BI3\nQnvwxCCJIdJ1B2II+HBgdPvf72fh43Ab+FkhxK8wlRr/t5zz//Uyb3jDDR+Xj7My/63cw18TQnzu\nxWM/A/ynwF8XQvy7vJAmAuScvyCE+OvAF4AA/MWc8+8osyit0CqRsiTFGVkKRNYoKfCDJxOIWaG1\nwDvFOIwIu0UcMnUaCM4hpUQYS+oGfB+I0pCJU61YSCp9wjBoNJGiGEk4xqRQuZic+NIMLSyFUlBU\n9F1kv99SzgvK1uIjpJCRClLy3L/zGZx7ipVrfLpE5B4lA9YYUg6889of5vqjxwxSUKpznj57jMyW\nYtZQ1w3ee954/RPcuXOH3W7HxeVHCCHphwM5C1IeSNEhRKAwmqOzBePYs17fxczmzJolTdMwX66w\nViK1QY4Hjt94i+7h18EesX38LovidcqmRCRN9dqnyD4RUo9NmaI5Zn1yj93VQ3wYsEWBVBpjLGVR\nAoJ2mBqhlLGoJBiGkXFMLGbnDCmw2z5lHGeM7oAuHQKmPRAySieyPND2gr4FoQ9IpnKVG7a0+0zZ\nSBCRGDusytOvr5hJckSlGTkYnHeMzmELEKJkGB1CVpP9cQikBLtuZF5lQh5Ad6QUgJeXAZpz/nXg\nR1/aDW644f8HH0fN8q1yD//k7/Gavwz85W91XR8FRtWkmJFJoKUlpTi170dJ1BmtSmJMhCwwtmEc\nWwQjIgnAMpmeSzKJofcUyuBCi1BAVohU0DQzunhAG0HEIgA/JKwVhAGMqDlaHLFvN+zDgXEYqSrJ\nvKl5cjlS6QVKeRaLFc8uPmBZLzCqAhGQKVJlQZPfQuSKh1//ImVR0uUDJ0efYnj/Q7rkyFpyOAy0\n3TVam2mjNSeEkBhjGEdHVTbILAk+EFNgcJHt7hoh4OTsLuvzO+jCUhaWopwTskMqCcUcFSJls0QI\nkHc/jY6BZn0LkRLh+TPM3fvIrUfuWt794j9iVlrG0SGlQFlNTJkYE0pp+mEAIckhYLWh8x6k4qie\nE1VHDImqathuL0gkQufJhSRZTRIj5IZhDMRhQ0wj67VB6QR6T0wZHzPKl6QskHKHVDsIkbEfCTpi\n5JY8CsgWpStCmhRFQhqULEB6cpRkYLPtiXGPUhGpPMErkn9lfXA33PBKeWUjX8lE30WUUuQM3a4l\n5kmjrNoZznV4HFYarKhRWiO1YQyB4VAgVaRsJDFGfMhsDx11UyBlJsaBoYdlWUDQHNqRohjQZURo\nPzkwaoPQipQsVjZY5TA60nYtzRysLCgLi0grcr4mxB60ROsKhMSqY8a4Y2HvgoDbpz/A5bMvUY0C\nPZzz5GsfoBDMmob5cokLidt3jhFC4pzj8vISRCanhLISKacvtz7uud5cYZSmrCqkzOjCIKXCaoOU\nGiEFRhVkkUlRI2OPPX2APrTohSW1W0bXUTSnqNUZ8cmXSboits+4fes2Tz74GmVZkMgIIRBC4Jyb\n4uGUREpJlBIdBad1QYh7LtOeGDw+HljOz/D9no+ef4QyHTpopMxIHRFY3KBI2SGEZr93LEQF4oDW\nK/o2kGNFNVMIIn1/jRaGJAJWNuQ8kJMneMOsLhAoYnRY62jHjIoGqQpyigTv6YeBsupQUTG0Nbx6\nO/MbbnglvLLJfF2dMaiETJHdMHC5efxC7uYRuaGwEKMDDbW05GRRRhNSC9EgxOSqKKRBK41AobTC\nqojpIA6JcQwsmorlcsU4XGK1Ycg9wY+IrAhxJKXEZrOlampgD1Gw3e45PXqdxTwhXElQk1qjLmvQ\nkeAFpalQSDRLFtWazaPHzOwRLgXOTtd0Tzt8ijjnuXp+QTmfsd8HmmbGbrcDwOjJmyalhNISoS2L\nukDlTFNVxBxoFie4lOnHESEF4zgihMCUhjA60A1KJnSM0MzI4zNkveDw/ldAlVi3J519EvneP6Kf\nrdj+xs8hRMI5hy4LfICZMQgFmclvxiqDVImcJV0/+bMsy5q4eQgiYeya9cyw2e65OrQUMaOUxccR\nIcYpPEQpUgKRZkSnyXkkxoTVmr4PZJlRUiNFRKgSLRR10dB2F8TUUdfltDkqMzkOxNyTc2YYph6F\nLJi+2ERAyEAaC3IoyPEmNu6G709e2cg3VCzLM1IWFMbw9OJ9Nt1DQkiUZsWqvMfZ8nWshnLmsFag\npEHJRJYJIRtELlFCgEjUsxqix5hIXRX4EBhdACL1TFLYAiGmlXxInpgg+8Dl1XNikBy6jtJamvkx\n47BA65Kz4zcABzljjMKWBmMkQllyBmuOqMoTiuqYpDNu7DFxhFaR8tRyLjKEGOm7gRAmvXtKCaUU\nQghyhqqqMMZQlhVVuaRZHVMuT1ge32W2OmN08YWroCDnTNd3DJ0jDgPD0JNGBykRxp7+omd78ZjZ\nfE6VA5mEvPoqYz/CsGcXIkenayLTrwIkU5KRiyhtKZvZpJSRGh8CxhS4FNj2e2RODENPzhpyRCLJ\n0aCocGNNiIaYFAiPzJ5aL4ljxeWzRGxL4pAolCClSPCJvsuEEDFGIk0gpQHve0LaYapIU88gKVKa\nzLWasiZ4zaHPdEPG6oqyNCjRsFq8QVWeYMxNOMUN35+8ssk8pIiSlqJcIJKgqY/YH64RoqMplgQ/\nOSMqIcniElVcMboDKQfQnkRk9AkpFLNyhpaKUhlEFtPmphQ4v32hYQZVSqQQGLmgtDWFVRSFJocB\nox1aOIy2lHbN8fIB1hxTmxOivCbEHT612EqSUQglp6SklJB6xtj1yOwQfiCOGe9alFSkMND3B2Ca\nwK+urhiG4Ru18pTSJJFMmcJWlGWFtVOTUMyS7aFlu93SdR3jOLLZbBiGnvZwIAMuQh4O9LsDXTuw\nv7okSYlqe8TiHN91sGhI0mKrms3D9zk9u8XF80uKogAh0FIiyCgpCCGSfCaGgDGG+foIIRUxw3p1\nRIoZPzoOhwt2/SWyGDhaawor0Ri0rJEyIZPCyhnJWdxQk/yC611mGBVJRAqrUUpSFACBffchPl1z\nvb+iHQekiRSFBDESYph+rWlFWUiq0hOiJ0dL9Bo/ZqrilKwMy9kZ8+ZG433D9yevMNA5YSXEkFGi\npC5KhmGJj5FDd01dNoQph4AQHda0KBswo8XTE7xGCUuIisJOdd6+T5gYGF0CBS62dH5DZRoQgbKa\nkYaRM2NpD442RSyGPF5DWeOd5s6916iKkn13jRINpmzYXF1ibUIXkWE/MjqHEoFcW/r+mtLUKCvw\nbqTKFnKe4tDCSBghScO8qBik5Orqapoo57OpaSoENpsN8/lyKp9YCw6kkDjnUWqkruopQzAm5Kwm\nJdhuN9P7fPYRq7O7RNdhNDgMhYRn736e88UctetxfUuSitW8pB8dZWVJCYIPSKlJOVMYjdEKJTPa\nGJIAkTNl02DjyOHxR+QccJ3j8dW7WGOADmU9Si2x0jBmBbmikJbsSlwvGcf+hV7esVppCmvRxiKV\nQctIyokhXNN76A8GkRJN9U+Sg9qDYnSBWVUQRKQuJVJa2p3CSE30AyJXVHZFAAIv15vl4/Ddyo68\nvLz8rlwHmCIDv0tUVfVdu9Z3Ky/1t3j27Nm3P+ljcnX13bPd8f7l++y/ssm8d8+oytUUnlwmcg6c\nr25xfXhK7wLO75AWmrokp4IQHGVZ4bJjHCNZbVDpCC2rydcjS4bekVG4YSQEgy0io+9QWaKUIBIo\nyxqEJ4VEjBnhBVEMCBSZxKyuqYoGFxwCQRFPWViBH97jEPeQNIfWI8UAtsB1T1iUK8qypFCO+eyc\nHC2KCDIzq2dkW0DMSClRSpFSImeB95Hlcon3nqIwVFU9+dCYqWGoLiuWq9W0+WnsFLKBQAootEGk\nwHy+YPADxvccNldk7wlaMjx5l8fjGefDJebeG/jYs/OZcb9DGzkpX6Sc3AjDNNhihLIsMVkhqwKQ\n2LihEBJrS8YAgxRUakY7XFHbjNaGqtAoWeJdh5AVfkzoUCKyIOWOEBxKKFKIFHVJlJGisJA0WRVc\n7yRJ9Hg3ZcEiAn5oGYYdh93UF7DZRNbrmihH6jqRhgofHIYjZNbI7InCk+RNOMUN35+8sjLLYXzG\nOOzRsmG3vyCFgJGKUlpiHGiHDSEMSCzkFZIlQjmUDS9WkRKjBVooclIIKqIQ9H0meEdyCZUbok+0\n7YFu3NENLSkFRFqDtiTtUAXsx4RLI7aS7LpLQgBlDNebZ6hUoZLh4Bb0PXTDQNt1jC7g0xYpB4a+\nw2JYlifEMEL0jH2HmvrjsVYjlGa1WFKW9sUE7qiqAudGZrM56/UapRTWWubzBdYWlGVJ0zTUdUVV\nF9RVRVkWrFZLpABNmgI3hIQYsELQP/sS3gea0/vUp8cENzJ+7TdwlxcUJnF8dkyKAiE0+UUNXghB\nCB5lLdLUZKOIo2PY7RjGQFPWfOrsDT5z+oOs5BrnI5U9xsgGKzUpRVLwSCwhOqQY8akj5oEYw+SZ\nIxI5Z3rXEn1Pzh2FVZAMUBKGGikMs6rCao2LnkN7jQsdPjpUnqPyOU31AJEMzSy/UL5IgpOM447E\nFiHCqxrSN9zwSnllk/k4Kno3TE03RcXBP2fXXWLLBbWdUYiSIhlkNhjd4J1g6AJJHJjNFIWp6HxL\n58ULP5CK3gl8zoxjnjTHTjPT53RuoO1Htt2OlAMxJlKqSAR8bolqai8Pfs+jx+/RjR3DuOfJ5W/S\n5z2BPYUq0OqUnAuqas5idTJtAspIbSWFMIwxgBBYI9HCIIUE5fDBo01BcJGmaV6EOEx182EYODo6\npmkajLEsFguWyxVVVVLXNU3TsFqtSSlR1yVVWU1yRiUJfiSnkfawYf/8Qw6bJ1SzI1qXWaws/cMv\ncbh4DBL8cI01K9yw4/btuxijKasaLyGQMbokJ8847nDDgMiZommo50sgc/A9lcicL1fMixmFKWjq\nM+ryDm03MroRckFOCp9HhtjTdR1aSyKJbAZ6d8AnR+96nN8R04CSsLT3EWFFJStqs6As5wxDIsSA\ni5Ex9tRVRX8AFVeEkMliT1Fpcta0B08/DlMqVbpZmd/w/ckrm8xVsad1zxlDj1GWMQ54RgbvULmh\neWHMFNpIHAUql/ggGHuB0Zp5vWa9vEM/PEVLS9PMUGJO8BCTQaaIlmekJCjNMX0/pQ5tdlf4MDJ0\nU0BzlOGFkgJk1hRa4n2P8z3b/j2e7n6ZNj2bYtB0whaR5dpitGZWNQgGlBix2SKSIoSRvmvRlWS1\nPqYsaqqyoa4MKU97BKenpy/KLZnTk3O0NpRFTVlWzOfLb9Q3F8vFi/r6nPPzWxhb4GPADQO+7ygK\ni1aSHHo2F4/oN89xQ0cWOy6fXyK1RC5WbLdbZvMVfXdFTop9e6CsGqqqokAiU8LHEWKitnN0WaFM\nTc4SKTIzW/HayTGiUGy7A4dwjdSS9foep+u3+OF3fopV/YPkqKjNbYYw9QREIRl6wXDIGK1JMnHo\nPe3YMfiWbfsEFx05VKg0J0tDQuHGxNinySVTJJQSDGNHqZe4PkI4meIGbUYg6Hs4bEeyAyVeXgfo\nDTf8s8wrq5lnRkJoaccrpJrc+vbdASUENs/RcobMEXe4IAwF8/UMkRy2qPFOcLw84nx9i97d52r3\nLhlHXdZcXO9Qac6isIzdNaZYYJSG1BDdji73RPGYQzfifUKbnqqYMatWCD9iTCDHHiMrUk6I3EMS\nJDWnEGuETrh0TcUSIzJd6PB2wd7v0C5wtLpNERua9ZJu3yL0gC0X9D5QlCXHx6fs9xuEUJyfn2N0\nMXnRBKjrCq0nL5emaUCISUoZJsuA0hqCj+zGHjcc2F/uifsrTFNTmcyYEjkllmXF5uIZsh9Yv30L\nu0k8fPgh50crDruIUXpS02SPKaYUH2KcJIOEacMYIERyCpRVxdX1BQs7Z1lVqEMmBkfXD9y5/w4P\n7n6aW0fP+fDZEU+vv8zobtP555R1QRg0KVn6Q089t1RVjaXA9zt6OWnPiQ5rNGUx6d5DFORcQ9Yo\nlXGuY8tTvEsIPMiI1gKpAqaIBDI+wHa/4fTo9Zc6boUQCvgl4GHO+U+/1JvdcMN3wCtbmUspGeKO\nXf+cIbRkCVFkrg9PEVlydnSfVXPOzgMUiFBx+/gtrD4mh4YQE01dsqxOqMolhTKUBVhtmBU1RsL2\n4n367oBQkvOjO4hsEUrRDVcIlVGyIqcarStyBpQmpoGYO7QSvHn+rxBCJkbNOB6IyZHZItgi2ZHT\ngIuO1reMwWNkxW57YPQju+sNUkJVlSQSVVVxdLSmbVv6vufBa69T2MlD/ezsDCkzy+W02SmEoGlm\nCCG/0fo/jiPBeYQAlTPdboPbPsMdrtg9fUS/n3TgspC0uz3zWc2tB/cZdwd6N7CY1XRdO72fHEgC\n2sOBoe8JIaCUQSoLCIauxfmBnNJkpxAipa04PjrBKkN0jhh7nl9+hFaak9NjfvwP/hgnxyuEcNiy\nQss5xhiqQjKvS2o9ZXWWuWaml6zMOTkavEsoqTlaLybjLy/oekhpgUgFRhXEOJLoyHJDCIHRO1JO\nZKboOm0SznuUiuy7r77sofsfMPkOfSvL5xtu+J7zCnXmCp8cXXjKvn9CSiO6iEjrqErN2eltzm+/\nRlPPGGKLyBUiW+bNKdrWfPjofS62Fy9kiglkj9QH5tWc+bxCK4cQkcvr50gKZmbN3aN3kHGJUTV1\nXVAXlugs2+sW7xJQkUXmcvtl6rLi3q03acwD3BgZxhGpYNmcU4gFOb8IZ7CSy27H9fgR+9hO8WdC\nkFLi+fOP2G0vUVpRlPZFt+ckE8tZ4tzUEWqtJeWIkhol7YsgDUFV1YgM+90U1tH3Pe2hRYpAe/mM\nw9UTri+fUBWGan6KqRt812NFRMXM5vo5dVHQ7w+MXUs/uBe5q5oQM9Mic1KyuOBRymJsQ1E12KpB\nKEsQGo9EyZKtcwxa0HvPZnNJzh3vfvgP2e4umDUNb7/5gygaYtpTlwadNVYbqsrQlAu0kFhqGBQ2\nzyjDAuEsZVmhSzBFQYqG0AUqPWNZ3mJdvc7p8g209qTUkeWWKA7EKMhoQkx470hEhFFYG1/amBVC\n3AP+NeCvAOLbnH7DDd9TXt3KPBVIMgSP8y0xDUBCG48yEWs1R+sjbNNQNoE+bthteiQFOiuSMHzh\nvZ/j0bNfRxAZ3BZjBbdunzOrTvAIhJLEsaWQhpQis3pJqWsK2ZCBnAM5KMYh411i9IkUM/vhCdpK\nKltze/1Zcqjou5HoMwRLyhVSzEEI5vOau6enrOx95nLNfL5CSoOxllt3H9AsTol5UtQAOOe4fese\nMXqGoSfGSEqJ+WzJ84tnDEOPcw7vPdZO1rjTRN4xDCPGZLa7LfP1GmMNq6NbdKOnWi0RerLHvXr+\nhN3+CbOyZL+5Yr1c4NxkKVyXc4SySD2FUsQQURKqwk5NVDmSsoAsUVpTVQvK1THVcsbx8hbnJ29y\ntpiTGMgy8OjZe3zhvV/kqx/+Ko2cM1sek8JApQxVXaFziUwag6Exc642T+h9h46GW8V9TC7xIVGX\nS1IUeN+Tg6bb7PBuwKiKs6O3OFt8CmUUQQS0hnpWMgyB4GEcPcpmlEoI81KH9H8O/IfcOMDc8M8g\nr2wyt2JBoWqkfuEB0gZi8IgXpZbej8SUaeolUsOuvWR/uMYNnhQhB/BxRxi3HPpniNhwfvo282aF\n1gKsRZeCwiouth/h04gLPUZrNAoCGKMxKiJyouv3CD19Rl10XGyeopSFLDlZPSDmERlhdAdkhhw1\nRi8orGRW1+ScsErT9h1d1yKN5b33v0ZZ1hRlgbGGrt9jjcaamrbbU1UVbdsSQkBKRYqJmDxt22KM\nYbPZ0HUdUkq8c+TkuXj2mPKFCsb5ESkU0Xu6riUliZCK2XJJDtB1HSpB13fMFgs6NyC0JCTIMeBj\nImVB8CP7/Z7t9SXRj0iREClBUZCtJPQdYezQEpRUoDKZjPcOhOALv/k5ujFCCtw/fgMmjwlCAAAg\nAElEQVQfBM18gS4MQQ7E7BAqIynRlWbMgSwypSk5qtfMyjVGlBhq/JBJXhHdyL67JsjJXXI5f5PK\nHqPEFFEXQyAGOUXO5Txp08MUmPEyEEL8FPAs5/w5vs2qPMb4jSOlm3n/ht8/KSVCCN84vhWvbDIv\nOKNWJ/ihwLtEcILdxpG84tn+Me144PHFh1S1IeWBftgypAPX2w1aLhiHKwxryNOm4P3br/Pm3c9w\n5/wBBz/ikkcvC5p1pPVb9uMFl/sniBwxKWNSRCpB1j3WTi6MOW0IeaQ0xyQ5MMaEFJJVfU6t7vH0\n4hH73QV99xxyhDinLs5IemAQB3LyxOgp64qL5x+hlODLX/48w34PUnJoO9548w3KqkApxXK5JMaI\ntZayLNhsNpTl9Jx80S2qlJrCqGPEDyN9u8fvr7BWU9gGqSWg2D7/CNduqNcn+P2W1eoIVKZqLO04\n0MznrFfHpBgxRpNzRiqJEBkpBCm+sBbICbIgEXHbDbnr0RK67RXD1RWLouD89C5alTjfMw4j7fCU\nrz/+In1IvPPgR/jkG3+C0tzmnTf/CMdHdymakjZcYEtJlrAZ9qCgsIb1fMV6vkC8kJLmYOj2HiVr\nDl3P+4/eZUyaWXmbu+sfZWHuIbLm0Pb0w4YYHdqAVAIhFDnalzVk/yjwZ4QQ7wF/DfgJIcRf/d1O\nVEp945Dyxvjrht8/Uk77Zr91fMtzv0fv6Xcg8oKU5qRYEcaSsZO4Fq6fJ/re8+HTX2fTfkjXbXHR\nM6bI9vAMGQ1hsDTFLSpdEpJmuTilqdYcr844Xt5GSIWWDTJbslkgdeIwXLFpL2nbPZ6A0DVlOePs\n+Daq9BgrCB6kKClNxdht2R0uqGcNOWvurD+DMceT85/UjONzNEfUxetkVaN1phdTLmmKmbFrOT+/\nzdHREUJknj95zOn6lJyh7w809ZzNZoMQAmtL2nZ44XVuqKqpq1VrTYoRP47sN9dT4k7MjDkx7K5B\nCEJyiHSgmS3xoedw/ZQoLd4HYsjknDg5OqE9DJRljU+w71qCd4RxAATtGDBGE7wnuoG+3xN9RGsL\npQapWR/dxY8d7fU1plyzWCwRXpBTIPrIP/j5/5nnmyeENPLpN36Yo9WbNPUxb73+Y5yf3GYxP6IN\nHdrMaMqCp/0VwxjwOJI/4NNIZjIgWx8dc7L+AY7nn2Z/2PPkySNc1yJINMUxMpZEp8hJInUg4xDC\nI03mJS3MyTn/xznn+znnN4A/B/xszvmnX87dbrjhO+eVTeZ9H6jKGcvZEa4zyGRxo2bop9b+Rxfv\ncnn4kI37Iv24JYZEJiLNVI4pzRpBxdHqwbRSdyN953j67BKjJcfLOcezY1b1kpP1Gq0zMTo8PV3o\n6XNHGjNGNTQzizYFMUhmsmFVntEeLgjjNSLvQUSsaThdf5IQSpANQp5OP+99Q6PvUpQ1STl2fosQ\nnrOzW1xf7TBqxocffB1b2Bf68UzXdTx/foG1lqIoKcuSEDxVXXE4HF4oWDJVU7PZbtEvfFP80KJk\nZOhbYkr0Y0tOClOv6MeBLAKlEizPTqCyLFZr3BAZhumLIqeEVpr18Qn9GLne9+hy+lLLGWxdImyF\nEAqpFFIr0uiJYSTGQGUthbBUpuTurXOOV0csdQVKYIj87C/8j2z2GxbVkgf3HuDbiCoM9fwUW2qs\nVfg0lVj+P/berMeWLD3Pe9YQc+whd04nzzlV1dVd1d2kSYomRdm0RIm0LAK+sOwbD/CNLnznP2D5\nD8iA/4Bh+IoQDAGCIdoCfGGRht2WmzIpm02y1VPNdaY8Oe0p5jX6Yh8SbbPJ7ha7dEh0PkAiA7Ej\nYgGZa3+x4ovve98xOJ75Lb0bGdxIP3b0pkfmCWen77I4WnF68phEL7B2y7r7mG1/hVeQZUdYZ0my\nHCWzP1r9BgzR/Sub0vfVLPf8ueK1BfNmu2FRHHG8qFnU6SFdEqCsSiQpeZETfECIQKIUZaFZLo55\nevcd9vYGGydmVY0Mkln9gCeXT/nN3/p1Pn7yB4TY4BOHSgSJTtBCUZcZVTlHqBwTPMZ2DM4zToZM\nVBjXIEJKP/UgAlJFxvGavrsk9Fu0DAQjWS6+SFF8gfniEVd3z7m6eULfOZAFg5goypooEjyCo6MV\nL68+wLsGHQUhHnLMiMB68xKlFMfHJwA4ZwivcqzTNGGNIREK4QNt16ATzeXVC8zYI7zH2wktC+Bw\nc6vnC/Jiho+CYA11XjBNE3mZE5RgMB37tsM4R79vKaqSqihYbw/CX6vTc+rlQ4oqoSgrRJIyDSMx\ngG17unFi01t2zQ3Nbk9dzTm7WKLKGViBo+Du5RW/9bX/jX3T4yaHLiRXm0uKWcby9A2klCRxROcD\nMh3pxw19GDDOYLqBfbsjyTXVckFRzdn3a+q0Zlas2LV79v01LvTEYHBGo/yco/IhuZzjncQNKWH6\n7Kd0jPErMca//ZkPdM89PwSvrWlIRHGoFS9OWR3vGKeRFEmVZaR5RhgNOp2RqTmNNWilcM5S1hVW\n3DHLF1g7IZXHO8vd7pZd+4KiUDw4epu+nUB6YtCHR3AhqHMN6mBZNowtWboHmxPjhHY5/djSj46J\nPTqL3G4+QMkZaVKy3n1CsDWZyiiygkLVLGrPpy8+YDm/YFbknM6OwWbYqWe92XNUH7NaLhAyBQH7\n7Ybt7R1BK/IsJ4RA1/as12tWqyVpluK9PbT5W0dnerIso2l2yDyh0AX77R3CTZSZRMoEaw+iVVpG\nlExJT1YE29L3PUIryDQn84dcPf+Ui7MH3G02LOoZu2YHwPHxMUk83NWd61Ayw8mArkoylRKmgegD\nU7PlqK5IhCJxEZDEPKAyQ+wlbrR4H/l/fv+rLGcnPFid0fsOaQzjDqpsxdHynIYNk9kz+IBKAplW\niGlARUeZKYJwKOkBQ4iW7f6W+VFCnip8dPg4YK1DxCVhkBgdQUB0GWMniPqzK028554/z7w+oa3m\nUy5fbPBeoXTN0XJBXZRk2QwhDlZhxhlCzICK29sBYxwET51qrBuZYsfN7gVtt4OQMHYGXOT8+A3+\nxi/8p1zfTdysd3z64pbru0uyNOF4seRoccZ8CfMqkBc90zBhfMpgJ3rbcbNf0w8TIRE05pbWtHRN\nw93mBfv9jvXtHW2/Zp7UpPqI6+srjA/EWBKUpxnuyNICIaGcrajqOev1M+zQsVguyZWmns0I4bDq\nVkqgtCBJDp2f2+2aptvjnKOY1fjJsds1ZLnE+YAWka7rSbMEKROm0eC9R+clxlmi0CRFztiNTP1A\ns2u5ePwuzTCxWC5p+gbcxGxeMnQdR0dHBKUOphtKo5MCbMRlCfLRQ9TqhHRW8/T2im7siCZhnCaU\nFGRlgrOOwUXadkSh+T9++3/ho8v3sOOeenlMXq4QISf1RyiT46YZfR8YjMd4S5ACEwMqVRjTMdoN\nXkbm5YrT1RHjtGNwLT4qhmkCJSlUwjhZumFCB42fAlMvaLf32Y97fjx5bcF8OZ8jYosdJYv6TY6q\nYx6dnFLIyKp6RHAZkxmx1iCUwobAvm+xYcCMB02Otu242nxEPzbU5QnEAjM5ThZfwkwjP/H458jz\nGdFJoq+ZjKd41R1al4KssszmnrKscdYc1BeVpe9HgsvxzpFmMDqDC45m13D38pLnN+9zc/chOY6F\nTOiHgX17Q5KXJNkSKzPQEnTGy6sn7JuW46PHtO2Wq5fPXpUbHQSh0lSTpinDMBBC+CNPTji093dt\ne6jA0XB9c8M09uw2a2JUmMljraWsC6TMAUGR5wQi+13H6vSMtKhxpmfYXpPVM4a+Y7FcYaPixcuX\naKG4Xd8gpEblKaosIJFEJRH7hrjdo7DUWc67bz1kVZ6S5Ss0pyg1YzZbsDjK2O1u6DvDsBuo0pSP\nnn+D0U0EZ6nzHEIgBkUkw1tNDOWhQUpbRjfRu5EgPFpG+nHNvn+OkAPHq5w8tyRKAZYQxEFoTEpk\n4okYopQQC4iWNFOva0rfc89r5bUF8/myRhYWT8c4OOpqgQQWs3MyPQeRs2tb2vGORa05PSkJwbDf\nDuxaw3bXgZCMdmC3vyHBkec5u13k8urbNP0W53rKDOpFBlIiDDS7Dm9HcIoYPTLRPHr0eZROEHLC\njAdl83FscYODqEjTCRcldaKZZyAj7NuOm6tPCK4n4CFabtefIqSmPlpiR8ftzXNOTx7ivCFGQ1kV\nVNWhq3NWzymrHOct1jqG4WALF0IgSTLGccQ7jzc9Wnpc21JkOYnQJGWNkgprJ4oyw3mJcQYfHZMZ\nETJlvlxyd3fHNHbMV8e4NMWPDcY6dtsdq+UR9WLJECLeetxk8SEQzQSyIEqNzBJie4e3jnR+jJgC\n75wseTCboUWOcnPOTt/m4uKCxWmFUIqTk3OO6hPmecnLqytkSJkGhw4eFwa8CKRZTpVUlJkmkxWz\nuaaelcToDl6ipuPm7mM6d4MuBIvlA+bLJVW+QCEJwaC1RWtHmhdIIUAY0pmmWLx+c4p77nkdvLac\nedAJJ8fHbHZrJqeQ81NGEUiDJwP6bsILxTgZlsuRNy9KtoWk38EwdeQk4AR1fnpozEkjRVEwtpKv\n/s5XODk9Ic0DuYbjxcFrcjf2iFbj3EheOYryiNn8DfI058uf/3n+72/+7wgSkgSS1CPkHJ1EYhhB\nGOqy4qhSLDhiDBnNtGN0gUwFlPS83H1KNTuljBXTdMvF6UP6fiC4ESHmXD5/gk7uOLt4m8Ia1us1\nx6sTpJQYHwjhUCIYfCDPS5x1ZDKhGbdgR2i3xCTHdD3l6SkhRIwxZLmmyA5130pqlFa0TU9RzhB4\nttstVZHhjSN4j3EjKnqkSJjVOfiR6AM6nRFxxOgRWkMxA5WjgiVYS7o8ZXf7gjJPEX1kMX+LZb5E\nn0fW7Q0b3fPw/HNkWYUxI/10ySfPfp8qnyGFQ0pPXiQY16OTQGTE0VHlFd54Ji9QSYYPllzD1K0R\nZUKqZkghKbMVRIvxHVFZZtLShR2jCcyyCqSnrH507jz33PMXidcWzMuyJApHMw1MnWVRLbAY3OTp\nGYgiJbrI1DmsbcnzOUdlTRUVPY7JWcZ+pC7PyKQkTQWPT89ZZoKPPvqQp09uWZ1Jjo9T5lVK9ClG\nS+6aFi0EBkWWZ1xdP+PR2SNibJiVC5x0qKwhLwTBHRzmbQgIBU6MCErm1YqT5Iypu+Hj5x9Qleog\ngKUmds0T8uqLFCczNs0e8OybAeg5OzshSyom05Nnj5nP5kgpKYoCszN4J5B5Ql2VCH9YZacJuLEl\n15Ht0LEqSkQ1R7iA1uJQi+48MRUkukQpQdM05EVKkS/YbjcUuaZvenSao/ICaSSb3RaUIEZNVc+R\nWhCFAJUjgiBIDTGCMvhmT4yeAsHRyUOePn+ODYLJBfCKMj/ltK7I3okIPyJiBVgQnslMdH2HcwN1\nnWOMZxpHgoBgHKnrSMaEIDR5ek5eBpyfwI5EobnZr5n5llX2JsYPqNRRxhMMd4zGE6ykTAs0Oc55\nJvf6V+be/2hewn6/jr8fht/93d/9kV3ra1/72o/sWj9q/jBF+aPgoJH0F4c/Nc0ihMiFEL8thPg9\nIcQ3hRD/1av9KyHEbwgh3hNC/BMhxPK7zvkvhRDvCyG+LYT41T/p2jmWRbpkkZ+TqYgWEm0CVZaT\nak2dJCREtBeYJkPFJYKEUsMs08wqDQqULHn7jX+LYBXzRc0bjx7xM1/+BdApfSuxRqFVitYpgoSi\n1AQtMK7m5m5kvd/z5PLrrDfPmKeKsi6Y1Uck+qArbk1gGix53dGFls14h/WGKgsUKkEFRaZyJBKp\ncrr2hilOzFYrjO2w3iC1R0hD2+y5ur6hKmd47xBC8OzZM/q+J8ZIlhWM03RoHKpnByu6PCdJEvbN\nlsXqjGGwzGc1xluUTCiLGQDjOEKMtN1AmmdkacXV1cGLsp8ceVURMHRty3a7xRpLu75jGvZ03ZrZ\n8QVCa/w4wbIikiJ0CkWOms+YvORyf8XYtCyqORGFnw4t62MfWZ2+TZYKvFzjxXO8vKaoAlnZkRcZ\n/eTZtZZ2GhkGh3cJUOCMABKM9QcvUr1gPjtF6Tn9CF0fMK6n6y4Zp5dIkZPqc2R4g5PFuyRJwTQp\npFTEKBibz6wD9J57/lzzp67MY4yjEOJXYoy9EEID/6cQ4q8Bfxv4jRjjfy2E+C+Avwv8XSHETwL/\nMfCTwCPgN4UQX4wx/jGBijqVFIVGiYBQOV3bM/aGqAbqcs58ucRKA2FAiwJrcqIQdFNDVitEVKRZ\nZF7mrx7RPcq2LBYXFMmKo5cz+u4O0xbc+sis1GQhoRM7MpkTY4qZBM527PsPKFRBLgqy5BgRC6IP\njHZHkiSU5emh/nu+oe894/rbGB+QUYFSzKpjlHCMcodMZ+ztNekEZJJp11LVx7T9HiUyiqoAIt55\nrm9eUlYliIiSMA4N1gUytSDTiiDiwZIuLciWxwTnyPOMvh9IEoUPHust+axiGAzt0FHkGmsCu3F/\n0EQH+rZFRIn3UM9qtuOEEa+qX4xlVmeYsUU/fAgx4AeHPDkh2B758o5oRnRRcBHPuY0bapdT5S8Y\nx47NYPHeEJVHxhTXdUzBQKzwk+Xs+BhnBXOTY6yFwVAFiQsBLw1JkjKODZtmIJEnnK48IVjG0TFF\nQ1lm6DTBMpHJh5T6Td48/yLjquPZ5QecloEp3aBUgjGBTXcfzO/58eT7vgCNMfavNlNAARsOwfzX\nXu3/NeA/eLX97wP/IMZoY4yfAB8Af+V7XjcEmt0NUvlXQlp7nry84ur6jt61IC2pztBFTj/uGMcJ\npUo6M3G9tew7MD7j6dUHbJtLurbD+Y522tJ0az7/4ILTozOmaWLoe4TNyHRCCrgwYIynLo85rj5P\noY/QgIwOH9cEJtpmpGsNSuY4K8jTisenJywWc9LsmHby3PQ9vkhQWUImFMoNxNhxe/sMT8RHhUoK\nRIzM6iXDuOX05BTrD23rf/g4XpU1RVng7EQqIwSDGRuyRJJISLTEBU/X9/hwWNG3bc8wjoQQGMeR\nqsjJ05ymaVFKveoqdWw2G2azGS9fPMGMDS+evyArUqy1SJ2SFBlBCcahRQ4GLUDpSNzdEXY9ZAnI\nBJTm5e6aXEmib6nSCZt8yN3uG+yGb9OuG/rbGZvbhLtrz83VjmiXTINAKk9WGnzoSKVCaU+SjCQq\noINj6EecL3l0+pc4Pf5ZrJc09payFhwtJEWSEEbBsjpnUZzy+Oxd7BBpui0yHQhMCOkpq5S6mv9L\nfRHuuecvOt83Zy6EkMDvAl8A/psY4zeEEOcxxqtXh1wB56+2HwL/13ed/ozDCv2PMcaUu6tLdFFx\nNF+x2XrWTaQb13htqKoMhEIqGAbBuvmEL7z1JZanb7De3OBjxn67JpUjlzffIShD0zuMeUK/V5RJ\nwenROXfS46PFhAkVC2SAVEW6yZKmOXWeU+c1m923maIhkynB90ilaXaOOvEsVkt6e02RK+aLU2RV\n0OwtfWwQRaB1G8ZxwqQeySU+KdmZW46KI+bzE6x1vHz+hEcP3+H2bsPnv/RlpBCslqfMF3M2myu8\nDaQqIa0O/5I0FbS3axKpuGtuiVHjw0SiMsqypMhSnDE45w6ljq9MLfLskMIpq4y2bfHOcX19xYPz\nx2x3WzKt2G+29F2LmSR1PUeKjKJcQrREWQAp+BF9dIp40RIXCxLjWa3Oubn5lM53qLSj3TynHywi\nrsil5G4zsNl6lBJIJLc3WxI1J7F7rLuBtMBMEUGOk4YwOZpQEaXgeP6YL3zup3nn7bd4fPIlPl3+\nc26uv4r1OaPNOT1+gxeXT/jcX/5FqiTnwdEF33yvJdAgdY91FhsFaX7fNHTPjyc/yMo8xBh/FngM\n/HUhxK/8/z6P/Ok6Fd/zsxaHSSqCdDxYvs3p4jFSOaJXbO76g8Sra0icJlcnmKGgabYURcHDs3cI\nqidN58zUnMAeokCKFC80+2HPaCNFekpVn5AWc6QusEHgRQ4cNEzHYcA6i5IFSXaMTkq0PCLVR0gZ\nkUKTkDJsBdNWsl13ODEhdECmltlMkmYGY3uuh1u6YYsPlijvaJqXr6zZcsZxIC8ymrbj9PSU3d0d\neEeSwDB2zOo5hZaMQ0PEMKsy2v2WrlmjhEDGESUMaTrj9u6Ou/X1IWUhJfaV2cQ4jK90XQ5qfbtt\nQ5ZkzGYVWgS6fkeWJOy6icEY0qwAIEmTQ0lkkkGUkBeQp8hiSdi0+EwQxwHcRDe1SJ1wefcBSufI\nbs7TT3v6vTpIz8qJJDlU4pTlDJ0kdO3EZtvQdgHrDU71uNJyMa946/SCVVWQMSPENU9e/B5X1zco\nLdg3W4SNFDLyePkupaqoy5yvf/gVggocH81YzTMUCVoUDF3Hvtkz2e0PNPH/ZRFCfCKE+AMhxNeE\nEL/zmQ52zz0/BD9wNUuMcSeE+J+BnweuhBAPYowvhRAXwPWrw54Db3zXaY9f7ftj/PZX3idP5php\nQvzcHcw8i0WKaUELgQw5WeqR1vLg+JS6fIPN+D5FMaNUFzzfv0eaWGblCRJP1P5QZ97cEuIr9wCZ\nkZIgk4rBjGQqp8ofsG+f4OkZzMTm+Q0XR4+JQVPmR2g5Y3KOurxgt/uAbX/HMj+nsxLb7/HG4pKR\nyQrOT96mLAs++eQbZHJO144oFSlqQVJNOGHo7j4i0wUIwWKxZLtd8+47b/P82VPKxRLfN2zHnrpM\nqQrN2DV80m45P14xJhGhInkxYxgnyjJBMgcChIDOMqQ61MtHAbPFgk+fPeHB8RExKpyPlNXs0B0a\nQArBos6ZppTb62dUqaZrt9SzGUIKQhQIAlRzEHMkLxFjTkg1od9xtbtj6wekWiBjIFFLBvMpVbJA\no6nSgk57hBbkaUYljnBiYHI13bSHxHFaPODfeOttHpxVXO17Pr5uudze0N7dsW3uiGywoeE7z36L\ni/mMk0wz2mvK/CFZ2fHs8kM+fPpXCGMDWmGd4cX7Lc8+2OKjJIofXQXIn0AEfjnGuP6sB7rnnh+G\n71fNcvKHlSpCiAL4W8DXgH8M/J1Xh/0d4H98tf2Pgf9ECJEKId4G3gW+5+rlF//WY/7Nv/kFfvnf\n/df5t3/lb6ISwdFpYH7sAEfwAtNIlCwp05TT5ZLz5RtcXz9BxsiXL/4aVSnQcjxUfOgCKVKiP6ww\n+2mLCI5cLRFBIIMkmg68ZLV4kzSpQAxYE3FecrvbIMmp8xWRhlTUvPvWL5CXJbt4wzB07HeGrnXc\n3t6wvr0Gd/Am/Zkv/CKzVBMmz+464EZPVD17d0OxWKGznOXynIjnc5/7HJeXTzk+eczN9TPssKPM\nJG4aaZsdZZEhvKfvtiQU9H2HsZblckGWJehEIqUkOMN+e8vY7djt9ugkZ7SBs7MLbm5uSZKEbuwZ\npgmd5Fze3WJlYDSert1wcnKKzjO8DwTvkd4gMMTZMX6I+H6H0AUxSkQcUHnBO4/eAVmS6zc5W7zF\noj5Gq5zN/pb1bsPgLLM8ZV4IssSTSMGquuBk9pPM1BkPZMW7eck8AdN17LZ7rtZXdJs7Uhuodc7g\nXrJv70hUznpcczvtUaVEyZbgJk7mS/7gW1/lvauPKOrHLOt3uHh8wi//jS/x13/p8/z8X/+eWb0f\nNfeWcff8ueP7rcwvgF97lTeXwN+PMf6vQoivAf9QCPGfAZ8A/xFAjPGbQoh/yMHw1gH/efwTijXX\nt3fkqWBZPuTN8y/z1sU1m83vUAWNSTN6Y5Fpgczm7NuWxWqFQCNlwmgMZpzI9QIXLXjHZAJJGqnS\nilHtDsYTsUfJcKjGIBDZovQxeV7xbvk237j8lCTLCTHBOkkzWhbVQJYIEBNSzDl/8A4vn3+LNDQE\nk5HrE7we6doN337/D/jpn/h5vJqoU40fU0yIRJsd2sqdBAvWWGQiqOdHh5eTRUXT3PH4rTe5ub4k\n2W/ph4HlrMANPVUmGdsWhEfJhE3ToJKSYEemcTwYiSaa2XwFMaJFJIgAeIa24eHjN+m6w3vrbt8g\nlWJWz9he3SGSgLUT1mqScs7i+ORgHl3NEUISEMiqRHYW129QyxNoGsbNJVfNlixqiBB8JEkFqVJs\nmitud56izJnVEF0ORU6DpCpXpFPGw+N3Kc0HNI3l9977mNFHnjYdbdgzLxIezBXFyjGanmlyCFfg\n/cimmXh44kiTQIqlC7C7e87R6jHFbIkdJdpLGB1WGEad/wi+Fn8qkUOVlgf+2xjjf/dZD3jPPT8I\n36808evAz32P/Wvg3/kTzvl7wN/7fgNLKozpkUWkqmqk0AiRkEiN1inL1SPa7mCF5pRk6vfYyZIW\nOevdBkVOKhb0/hlh0kQELljybM5RlrENl0y2x7sUEonCIULG5O6oxZLj4zeo11v2ZkRIiZYLrm6v\nWGQFeZGx3X/CYlahUTw8+gle3H2F4/KYqjyn8ze4cE2/XvPkybdI3/wCCIGSCcINmK4k2ADsae2C\nVAryMqefRqarK4Zhx9HinKfPn3NxdsLL50/IspwYM0bTk1U11jpCPFSunByf4Z2hmtVM1mHGgSxJ\nsMFTFYfyQmcswXu0VDTdIV++3x7yzirTpHmGlxE3TAipQEryPMObEXV8jkoSXAQlNVhBKOdIYcE6\nxPKU9sn7XK1v+ejqBe9d3fFTb7/JZAfSzBOiw40BZx1tHyhyULoi1Ybe3lFlp7jeEoWgdY6rbYNT\nKV5qFBIhICsiUgXGYWLoIt5rXJBECm5uPyJdnSEJeBPx1tA0t2hK2mHDXEISAqPwJHH5/aben5W/\nGmO8FEKcAr8hhPh2jPGffvcB320V991aO/fc88MSY/yBm5demzZLlc/xNsULxba54Xb3IU1jCUlC\nPT9HyJFymaA0SFHRj5a72yuaQRGRZIliWb+J7RVydDg30po78kzyuTc/hzeOl3cfsBu36DSSZJ4p\nSvqp5aZ5RphyHh6/RSo6QvAcLc7x3nO9bdi2O7wf+PjJV4gIyuyUB4/+Kt2+Y08GwLgAACAASURB\nVD6fU89OcF7gRsvtzR3vvfxd1lwyO4a60khyhm6k9dd4NmR5ydAb9u2OfduQZjXlvGQxW+CdYL/f\ncXZ+jlSaNM2IMlLVBVmWMZvPqWdH5EWFj5osL6nmK6JOMCGyaSaskPT9oSM2Ks28WiJ1QqlSxv4l\n0XRIIcmKGXW9ZL46IRz+sBSpRkaPHSZkPkeIFD/uQVhEWUFaEqcNpz/1l9l2e9778Ftcb/4Fv/f+\nP+PFzXOsG9CFZX5UkeucQs9RaoYfJZ4G55+wMx/RuJ6trtlEReMlXipcsKQa0lLQKcmmH7m53HPz\n4pZ+PeKninEU7Pd37JqnuCmQG42zA8ZseXb9TfrxhihaTDXilEGkn23XXozx8tXvG+DX+R6lt1LK\nP/q5D+T3/FkQQvx/5tOfxmsL5tZOKCUxZuR685z17SVKZ+z9xF7dEvIt7fSSIAxCeYie3jQ4uyXK\nHVY0RDyz+k0UjrJyWD/S2VsWy1MeHL8LQYD3jP2E9QFZjpikZxgaPrn9Dt34jDxN6c0tMfYs5iU3\nm5dcXu3Y7HtGY/n2x7+DU5GsekR5+nl8IkhJGbtIN0RwGdqc4myGLAPJkaeoL5iJt7G+wbhD52cI\nBiUiZVlytDqirudMbiTEkfl8jhSCKBSTjQyTp2l7usnSND3GdiitCTEyOUte1NTVMbPlGSqf0XYG\ni8KQ0E8T682WYAxGRo5OHzJfniCkRGmF856xH5EonPGgcqTQxNEgvSAIgZq/gdjfICmI6Qwax/Th\nN/j8xZfpgiV6uO7uWHdwfvzTvHn+s7z71pf53MOf5q2HP8VJUpGGwH4baJqGfnqGLhOsqNl2HX1v\n2TcGJSuKvCZKQQSGfmS/G5gmRxAOISB4yWY/crW27NxEGw15polOYaYJfGBynt4LjBAUdfOZzVkh\nRCmEmL3aroBfBb7+mQ14zz0/BK9Nm6Xb34Je4oLn8uqaRAiq8phdt8a5Pa25IQ0PcDHiwgiq5MHZ\nQzq/R8qa0R8c5o/rx+yixcdbEnLaYU3QE2dnp7xYf5NoI007sQgZoSrQWNKs4NnVhyyKHIRCSEWU\nhovztzhdRD78+D10NqMfNqQ0fPDsn/Nw8TPM8oTz1UPWdzv8lBCMYb1tOD5Z4EIk5hYnJrJa4UKE\nyeGkQyaRpuvIyxrnDE8+fcrzZ08oypy9t5yfXjCMIwCLRY2zIzKryPDoVEOIjKMjTRIW82O8kKis\nRAiF8iO1THCmZ7IG4SQ2GkwSSZMUyHAcKlmcc2y214TBIpUnzObo4KjTlOrhW8QsQYSDJjyzC3x7\nhZzNEQ8uuHz2MXfjHo9HJhUIR1GckicleaopkuSgFmksy6Xi08vvYPqRKTOkcSLGlmmsDgG7H8gL\nzypPyKscMYVXDVCCVEMsJWmVESxEC7e7HTF2HHmJt54YKqSLpC5idh3XvcRJT3WmKOafaZ35OfDr\nr1bbGvjvY4z/5LMc8J57flBeXzDfNaiyQK00m90LkqTgLH/MMN6B2pH5Gd1uR6GWZIs547hHhznz\nVODdSFHUOK+RMXCz3pFXHi89sjJcrr8JPiNNI84YwjijjR7tIUmX6LKmzHu0kljTIvMBKRVpXlBl\nmkePTnnx/A4TFcfVCWHa853nX+VULVlUF3iTMKkRmSis7Vmvn5EvxKEVv1zRTh/T2ZQkS6jKHi8m\nnLVMg0IjmS1WEEbGYUBpuNtuOFbH5HlBURS4JEEIaPY7hM7QQpJlCmTCMAxkOifqhBhAKsdmu6dr\ntpweLdjv7yi1QJqUXMdDA48U7Pd7lNaM/UhCQlHnxBgwbsJaR9OsWVRLYi3x+0tUqRH1Ejvu0VNA\nFSXP3r+lSGruts8Bg0HQq5zhpWe1yEhUzbysEa5Hy4lEeRTxIBrmdoxG4iO4wWCVwDpL9JJxmOid\nJTrI9ZIsTZjchHeC7WakaSPLVc44eMxgSFTOMo0sVc9tC9d3nsUDxTzXxGn6zOZsjPFj4Gc/swHu\nuefPwGsL5jJX+Gi5fHFF8ILR3ZCqkixPUFlOkBKxiEyTZNe0SDGQaInpU4y3SNXhPQxmz6Z7SW0h\nyTweyXr/AkVFxKCzAWdnOCkI3hC6FOIeERVWJExC0myfMqtz2mcdF/MHvHVxwWy+4l98+E2IkmV1\nzt1H38SeV4ymYz9sODlOYCUJXaBaPsC5PXY/kBVz5nXF85efILsKIS45lSVlesQ07dmv18z7iTSN\nr/w/A6uTE4QQbLc7Ts4fEmQgEpFphkcio8J6T1FVKOsRSUqaFnTdhLGR9eaWTMGTp09ZLmeoPDm8\n1M0SovMgFEppnn70bWbzFUV6UGZM0gIlPEFCkhY4M+A//BbpW2/jmg26qtHaErodHz79FCN6erFl\nNGuwBUJ1qGgYxh1958hLzW6dcJyXeBlIUkWzH5mJDBlGlJSUuqZPW6KI9F1HqhOCF/RNZOzhqJIU\nIqW1EzEcGqCyLIPgGYYR02fE3DE7Elg7MtxGgrCoIiEEQzskr2tK33PPa+W1BfNsUaFsxvHyIVIn\nfHp3SZ2uyLM51gV0luBsz8v1R+zGipPlkt44zHhoOE3agx9kWUhUIjDeEE2KlIK+ddR14PjoiG5r\nUXnChCYKg1CRIlW0zQAiI00Ux/oxcVyy6TZ8uH+fx4++xF96tOL67iWZSkjSkvnxKU5Zrm4+oekH\nklSQF55kVbAqj2m3kjAZZF/g9hWL5Cd5evcROpNUxUgJmHEgSQuII1pXWONJkgTnLG3bsjo6x3iQ\nUeOdI0kzohAInaITgVQ5OpMImYFOSVOJcxv6fmTAM00TWV6gZMAwQqxYzWc0TcM4jATnKfKErJgh\nM025PCZJNLOTBxTVgqg1Ion46yckjz9HHG4Q+QnS70gzxWbzlGV+Qls9Y7OFKi2QUTEMBePUIBXM\nZjUDAouBKPGjZkSRLmucCMRXkgn5IlKkmhg0UuekiWA/NhgNqZBEn+CDpSgLQgw4GzEe3ASzWWQU\nhnbwtN4yP8opUnuQ17X3Lxzv+fHktQXzKp8TtSJPJciUaeeYnQuUmnO7u2NWBLqhJykkIU5M8pBA\ndQLavWe4ukEKSVJ4gnSkmSChxDHy8rbhNAiWVc7ZaU2avkOZ5jy9+RZVrsBEprbHu8NqV0+QF59j\nkT3ik91XsVNKmmoenp7TOYPSmrzO0IlARIEYBal6CO4SkQhQE3USKY4e8Oxmz9h3FNUZJ4sL/NCj\ncoHHk0rFOOzxdqSoj5mcZ3V8xtXVC07OLqgXC6ZhZFHXhBDph55ZmeF9RGnF6AM6Kw83AAsueIRS\nqCTDjj1VuWQaGzJKkjRhbHvi7IgYLXjLxaO3kBKqIiHEwLC+Qh8tMWNDnuVMw0R1dAz9lrBfE6oF\n/tnvo+ojpnZk1zQU+ozj5QWIHu8tWuasRM1m15AmKSLNWbctQhjKPCGRBcEJ3JiBaonRkCYjZZUi\nZSDPZgeRsCSjbSb6PpKqhHmSsZ9G8mTFbHnMMF3j7IidFFHCzky8uLaQ5eRzSFVg6MC/vil9zz2v\nldc48xURz25omcYNq6NzsrQAOZIlKW23J0lShDZEPOO4YV6cUFc5SvdkyQwzeoapxWGJviYKy0V9\nwr/3q/8hZZ2zbl+g9cSj88+hkop/+tsTH3z4dQiODI3z0HYdp+kxQgyY2DBL5uTZChFmFNJy3TZU\nRxlnJ1/AuGt6uyEmCRKN6JZ45WiTW1Yqo1ys2FjB9fNrvLyjKEqyrCC4HJVoRJIx9AMxBHwwyKD5\n8OP3OTo6YZo83kW8dEzWgAg4H9h3E2VZkacpMYA1gUQrpqnHGMs0OVyQPH32jHfe/gLWWEY14sYd\n3hiUCvS7O4iK5dHiUPoYI0RIyxxPgpQKR0a1OsJ3O+R8jjAD6ugtxMLzP/2jf8Rl9xH7bkNEYKUm\nnV8TpiO0LBC6pDcVUmTYyePs4aYrBSRFQaITfLQMdiKfSVZHJ6hZgReQJhmPHr5LkZ5ihq9wfbkH\nBdZbpmFgXmVoUtpO0HeRRKbEGGg7h8dzVDrmicCOEYLmIOx5zz0/fry2YD4OLcFL6kVNUhsYHCIK\nQrDMqoKt39G6G5TKyFJNKj1ajZwefQklX9LpDjMWZL5i32wZpw7nc7TMOZudki8SHD3XN8+55IoQ\nM54/v2W3NzAGTpOUbX+LX+bspxHlPmRIAqZp2d3dMq8CZ8u3+eDFP6M3AxcnX2S93zCGK2woyMqI\npj6o9U2CKwUP7J6z8xwhjpnaW6RUSGa4GBBKE0VE6xyRRG6fP+Ps4Vu0Tc/iWCOFZrPbkeY5xEhd\n15jRUpYlfTdS1wusd2R5hnOBvu+RUjFNhs32hqIseHH5glkaGLc31HlONDs2MpKWGaujFSFahmHA\n+5QqLxAyR+cKpWcM3R60JNf6IGxTzohmg5qfcHS24je/8g/YTDsm+5KuG3j7i6dQe+KYoIqCYpjh\nnMMER9f1eCEhSqpZQpqkdMPEMPUs6opidYZSKWbaIrVkvniT6AQXp49p+/eZjCXRKUrk7Pe3CAp8\njEQvkamCCH3r0CqlLCRSCVIpSVXOYO6D+T0/nry+nLkQGCMZxxGtHbiRTbslyoTjkxllVjE1LfhA\nqhV+mBjTjnldM7k5l+uPkDGHuEQpgSClLEs+efKCX/sf/j71yYzFbEE3XtMOVwzbwE2z4ez4hLLW\nBDMxTRE/tYTB84VViZUJd3LBi6fvUc/+NR6cHjFPa6beIlUgxMDYTwgKjB1xWoHy6LxAGMHWWXKV\noArQusZMIy4acgWDH1HB402HGTxFOWOzvkZKjfee9d2afhz44pe+yNX1FUodFBG994QAZjKEEIgx\nZZoM1jqE8IzjwGRH7GSYFSVFpRk3e8rZDDHNURrqcsH6+gqpBDrRYDOkP2jgHB8/RvoBnVbQNoSj\nGQRJdI5Ypahxy4sXVzRbw93aYqNhtThnHt7EpH+ALa9hqkmynCgNU9fTTi0yKqZB4k2FzSRD19K5\nhizLGcaBIPa4cIN1PV9/r+PR6U8ymDvKKiUUCqJlMop2P6DlIWBnSUkiFV3n6ftAkjjSNMdrQ54X\njENP13zmQlvflx+V3diP0rbsL5oF2p8HfpQNX/8qmsdeWzCvS8GIYLe/5M1HX+Tl9TOsteyGPUoZ\nlIpgJIMxSD8RRo0NA5vTK8y0P1Sz0DINPSos0CqSVTNknPGdjz9h/bsbfumXfpbFbEFvJGkpObI5\npRQEKaGqmMeUqMCnYDNFmqXkFnrXsGnWfP7sjAfLR3xw/R369inSQR5WOOd4cPQWWb3ibvOEIghs\nqdhtb7kxd+RVTaYUk4QgDKMdGFNDGR3BDgQXCEJhjadeLJn65qA5M/Y8f/oMnWiuhETrlGEc0DrF\nGEuMkcvLlxRFQd/3JEnCbrcjenB2YnF+Riot1CW5zvERYvRMU0OSaK5fvke9OMHrnGmQyLBkamdk\n1RlN21AXCbqJiHqOWh4RXeTl+x/zye1ToisQtmdeFLx18RCFpGktWW6J7CjKithD8HtEEEy9xw4W\nMUV8ZfHOgIRxWtO5lihbUqVIY2C7uWKePub49CHb7jssVgrweFcwNB1KWNT/y96bxdq2pfddv9HN\nfq52d2effbpbdetW7zhuExuch7wQkfCCAi/IIrwhIBICxUSCNyLIAxCekECKIkTAAQnEQ4QAS7YV\nB6fsclW5XL63bn/6vc/uVjP7ORoe1vZVxS7KVaaury3v38tZa+251tj7aKxvjPmN//f9SYijBBdr\nmn6N1Jr5zIOxeATbqqbvDcjbnPktfzb5xGa+lrsy1TzSGFGymD7kyfNvkkk4e/mKOM3pWo+KNF3n\nkMHjreH09AVFMUGPnijy9L5Fygl+6HH9gIoTjo7uECcZr148Y/GZEgZFiCLiaYWvR0ahiFxElpdo\nZamlZetG5DDg05RYONbtmm2zIc8SkGu8vGJvfh/nI6SSxElMFJUMzRWh3zCEhqbbUG02SF9xcnDI\nB5ct1jmO70yQTmKDJE5KXHdN13dkxa7l7eAsUsVkacbjx0947bXXuLy8IssyDosjjImomwYpJV3f\n07QtXduSpinee7y3xGmC946quWBSTmitZzlZcH3+kjgZieOY6XRvd2iZZGit6J1ExgUynSDbS7yX\nyChCZCnepMhR8M6Tx1TXHf1YkeaaR4/uo5XgnQ/fwieBvX2Jjq8gaAIjfrCY3qONZhgFkTIYpZEi\ngBKMotlVscaBNDIYEdMGR1HGeN9z5zhn2z8m1nPi1LLYjxnbgAgQa8U29OAscQqmzLGu2xlkaE2i\nMqLydgd6y59NPrFgbmvP6CyXlxfMJp9ie33NLEtI04inlx4dYo4XE7JFRpSmrNcXbFcX9OECaQe8\n0yjrsK1kCDUhCMbBIsWASmIO9vfRwWIbyb3l67y4fMFmK7hzcB/COeVsRRokr4ZX5Dk43WNUiQwZ\n61XLph157/QJKpbcf/SQKHZY/wR0gfeSqh3oKsX66pLMSPJFQ9ELVBIxSxOIRuKoZXtu0MJQTEq8\nGnbqk0jincX5kTiJyIs5V+tLQlIymczxUnDx6pLDQ4WSO+OJKIoIITAMA03TYMfxJpA79K4F1a43\neZwggkArRTMMu26Tcmd+vVje3x0W2wGC5+69R6TTA0ZriZMEhUckBSGKEaPDVyPfeP9D3j37GgHY\nti2R0ZxfXRM2Dtopa1VTLmuMrAg+xriROIoYrSXKoCwycq24ajrQkthH9JuIPPV4FLKAg4MF2/YD\nFrM7OB9TtxlPHr8kVxmFgSEonDTEdiQ1hotYoXRE3w708UBaahIj8aFhe/0H7GZvueXPBJ9YMD/f\nrBEoQhh45+3fYHA1ZZ6jQoxRKVEUUajAJFfEWYGwirGGzeYSqQeIOlAxY7DU65pJPifJJPP5kg8+\nfIfZYs6yXFDZDlNDcAqjInRUsFq9jTAVeZKRKo83ZyQ6Io6gHzSZueDCWd59ecZsep/5coKUjjKf\n8d673yTLFBfX19S9Yy4OmZgpvfQcHEw4fWmwTtA6TVrmzPXAy/qUJJ+TxzHYgThJaOuaWEcYAb7f\noAOsry+I05LttSRgqesaQvioyU7f94ibsvy6rhmHkc1mg3WegKcoJsRipG7XLCZ32W6usMIzKyfE\nMsZkKUU0wVrPdDrZ6cuzBRerFbFRZJMZQkeIfIIdPZcvHmPtiPMxl9eXfPq1Y7QSbFY1s3RG5Rqu\nXjniLBBPK6Sx+MTRd9D0u+rTOAiGqkF5iyoEkSiJ9BR7vWa97bGzgSZao5TBDS9YzqfcmR+SDAWr\niwsmkwVxOUcEGNY1V3VFZiNWmy06DxSTgCJgm45upQnX2Sc1pW+55RPlEwvmPREMDUIN2LHBaMW6\nbRiDoh8cYazpgqK96sjHDi0zBJrQlmzOO6K0YAwjyiiyuWZot0QqJdYpJycnfPjsfertFUmS4rzj\n/smnKItHfPrR5zj7ypt86xvnbKbPmCxi+nSfQnikkaz9NbnU5GJgZVuuqnNG13N8cMLB8if40b/8\n1/nNt/4xz1/8En3vIHakpDSjR+jA0f4hV+sVm8qhdcL+QtF3gWaoSbUiS3KaTYdWCmEHxOgQzlHE\nGV5KZsuCdVWxnOyC7OXVFVEcU223GB2zrTcIIej7jr5v8bZDYCmzFO9GrHDEImK73SIFjM2WIdJo\nbdBKE8UpSS4IdqeM6YaGavuK8vABQimCiQlBo4eWD86f8a0n34DIUM4Kyizi4uwVzgq61NA7Dy6i\nq3vSvCJNYBOBkQHf7n4nOQRkUESRxEcabVKi0TEO0PU9UhWs64p267j3yGP9hpO9fe4dlnzptc+R\n7e8RxZar6zXvv/8By3xGf14RpS2TzLBZrdk0I2OvEBvFUZp/rPP2xqzlvwO+wK63+d8IIfz6937X\nLbd8/Hxyp0ViiUgzgt0pWKIsxdcdVTOCU2wZuXxRka8MyzuBLOsZnN1VFQ4JXe8IkSDNPEIKetOh\nshUmHpmqPRTvc3F+TeASrRLuH6Z84bN/kUkxQQZDbFK++a1rPv9wyvxwwYXdsrd/gpQCN1ZM8wc0\n7bsY5RiGns11x+XsFX/hx36Oo8Ofp21qvvLbv8ZlX7F/0CC6CZfDisP5kjSOWdc9UR8T6RgXHALw\nkaTfdMgAcZpSJBOq7ZoQLJPcMDrFxEiILLFqWZQRm9UFQkUYbShLydD3SClpmhqlBd5bhFAEtzOE\n1q5jvXqF9zMODpaEsDs4Dd5igkcKQRZnuCjgvcWHiCyd0rYNyXyBKEtsvSFSMf/st3+L9foCgefB\n/SWjdLz1wVPcWHJwoBgRJBk0a0WcefK8IzJgHBwtJ6jBE+yACIE4kXgMclSIrmEyj4n1jMtNy3gW\nsw0V225FMcScXZxzkD6gHh3t5hVFHrNYHDGd3OXyckUnvsZR+jp91bA6FTRXklikpEaRlXPgzY9z\n5v494B+HEP5VIYQGPt7V45Zbvk8+OZ15PTKbTBkGgZEtkckwxRQte5zzFHZkS8z1ao31DXuHChki\nMIG+t+TaMjqFwzMqT1EaBllxsX4LM5xwZ+8BVfUm1+cOKTrefOd3+OyjHyGOYlAjD04WbF90uK5n\nL2ie1JqklBzuP+D85W9QyJL97BHz6YRX2zOquufXf+tXMTrjU/de42D/AfeO3+X0+RUuDEzzY17V\nG+p2S9M6pBSgDZebFolCxTWrBibeU5qESVEw9BvSLEcpSRxp1OgRviHXLZvrDXUfYW1g0DEyOLx1\n1E0NgHUdwxjoRosioLVHigrnR4auYn5yF6MTetNCcNhxYLu5QCeGyOXM50uUTri8WpMmKVmkQCq8\nd0Qy5v233iKWEbkWFKUhFoqXlxVWGJS0CBmjXEBqgTaWph2Rgp31nDZICUbHRE7hGfFBQW/wsSLf\nP6BMY5pxZHN9RTbTHO4tKA80OsSEvsSbh5xf9Kzfu6ZcOh6+5jk8OOBYFZTZIZICN8R8+e7ILM3J\nspw0itBa8d/+/V/5WOasEGIK/AshhJ8HCCFYYP2xDHbLLT8gn1gwH901Zxc1tmkZx5bFvkcoRd9Y\nUDFhcJRE+CJhGAeqVUuexqRJiXcVOlfMEsPmvKUbBvq0J9jAQI1y53iVE5t4p5oRnqbZ8o33vsKd\nzT26cU2u5xztLRBOUg3X7AXIo30e3P08q+ff5NXpKdl8n9kkpogKtr3FaM3/8av/M/iBz75+l8Xy\niGrbYN2IEXBv/0tIzinMyNg7rtsGoTxhbGmtRasAskQJtfPdlJKqqjg5ucfLF0/Zn0/JE4Uu75Cm\nHW8/eYKvFd0AVgSqTU0cxyil6Lpd/jyEgIkVy9mURFouN69I4pz19QVGatJyinKWoW+Rg2Jzcckk\nn6BEwAdPlme0TcMkWyCiCDlarp6+5Otvvs1X3/k1pguwoef0wwaXxszKJWVccLx/B+Ele8cHIGCw\nI3cPDsijJXlc0DUj5+dPQAryLOd6dY0xMZPpnPlkSpxEhCCpNzXj0BGlGbOlwY0KERRxnFAkOSEE\nxjDigiOLc/JZip0PdH1Pmkwo4xKRCMZhJDIR3n+sapZHwLkQ4u8DPwJ8FfibIYTm4xz0llu+Hz65\noiEtuK4rVpcbNAItL/FSIKTAiQhawRgkKpG7Ev4wYrRldCNFNqEoDYXRJMUrxkvBeuUZ+oEodozd\nOaPo0FpRzlKGZkuWG56ePuZyfYr3DbUzGCkI3tLZiCjuOdy7y/HyhDeDJkci2kBbO9Iyw40tbW1Z\nTnJOX17y/OV7nNx5gyw1hKFFRQOSOVKsSLzEjReUU9isKryUdDWEsOXOcon0Chc8bVMxnc45PX2J\nCG4nvzMZWZmhVcz+4pCXlxu22w6VJHRtR6UkZTHBOcvOmhX2lksmWUpdXZIVGcQBjQU3oqXGO4cg\n4MeRwa1ot1fIgyP60TFaTxRplIkQWoFQfPD+Bww+8PDgdbqwZRwh3u8wcYydG5blHpN8wmQyZ3+5\nRAqFlIFJPiXKNLNyhnOWD58WGCNJopSu65FaMp9NcYMHLTBKM84GmqamLOYUkxQ3SpzvcM6SJDFp\nWtB76NoaNw7Y0ZEkBUlaEMUxSRwxDgNd1zCOI0nysXqAanY2iv9OCOE3hBD/FfALwH/ynRf9/gKd\nW7ehW/6o/CC2cZ9YMC8nBUEHGGc0m4aqtowOIh0whUYJSVAwX+YoExi3nrHZ0rmWPH+ADBO6MTBR\nnvv7kt9+GuMHTWUbxlYy+i1FkjFbGMQcismcn/nJf4lf/covUW1iosgzyaeYwXJ+vWF6pMgzyfq6\nobpqIfYs5lOmxZyRa2aFoSgSnp9/wKwoqFdXVPkpeRIxdoHN9hkqKskmJ4z+BVEUcbgouRIFT16t\nmaYFyuYIbxB6Zzgxn+6hlKDyjv3lkiwpiFJz09MlxgtJO1heXW4Jg0UbAwSGOEHpBILDGEWsY86v\nTtFK0/cjRZrvrN+waO8x+ZQOCONAHCfgBmSwpFHGiIGhR2U5XhqE9aTTJYvBw8mXWOxN2V8ec3V2\nTh8c221FHAnaukUZRb/dsNjbI0sKEIHM5MQmoR23PLp7l36wxFGEdRYpFV3X4qylzAqkUhitiZOY\nJM1I0hTvLH3ryPKc4D29HRjGEaUEWid474mimDiO8N4x9CMhOJQSoMDzsVaAPgOehRB+4+b5/8Iu\nmP9z3AbvW35Y/H4P2e/0l/39fGLBXDiJkoY80Wiv2fQdRkGaRUQxJJMUKQRD31Ove9za4hlRWlNV\nH9A2SzITYbUg6A11K8hCwiJfcmU7Rueo/QDKURYj9x494M995kd5/OQxX391ho8MeZ7R25r19YiM\nLJdXT3l6fsplf8VUzmjqDdYFAi33D+8xPzjk7Opd9g5Sxr6g70/JsteI51OabUPfr9nbO8EPNVoK\nlDB42yOD5WBaIIaCWKfkWc7YbqmrLc717O3tI/yA1hFZkiOlJBBIkphJWVIWW07PL5iYBd57JuUc\nk6YI71FS4IaOtukxsSMv5kgG0DHDMLLZXrIXG7K8JDYxaZqyd3hENDugNLqqXgAAIABJREFUbsB2\nNYv5dOe4JAJh8BhtyCKNnM+Yl0sUgcOjA8pygrUWrTWXlxdsNhuur6/ZrLdIGYiimNlsijGGrtvl\n9kPwdH1304ogYO2IlIJhGJBKkec56STFAzY4xn7XpTKKIrZNjRACYwxRFCGlxDlH37eMY78z/zYG\nGSeUN56mf5hP4v8fQginQoinQojPhBDeZmdq/q2PbcBbbvkB+OSkiU2LDz0iRKADkZBIBVGqsENN\nLyASiusXLU/O1izKgjSR2MSx3WzZrteU6YwsUmR7CcuDlvY6oOMJOgHVb3GjZLvq0Uby2r3PkZYl\n1q1x9Aw+RUUGO/PEY4LzcH5V040XqIlnYCBIyXZ4Tj0E/AB1fcmkzNBKYNMYhMZ7hSdG+Ix2NWKO\nNWV0jA4LosGwiJbMDzOKJGLQYISiqSuU79GpIY9nxEawWm85vjPF2hGlA2PvwI9kSUyWxIzjQF1X\nxHFClCRMspzReYTvuT67ZH86RceKsamYz0pErlHDgJDQNQ3FdIbSknw+JZ8tCAiC7dE4PBavHaFt\nqFYdfd/jXGBvecDl5RXjODCbTWjbljTNePDgAXFsuHv3mKbpuLq6QsrAOI689947lGWJMTuTCO8t\nSu2aXymlKYoCpRQhBLTWH7UmCAi01gghGIeBYewRWiOUpu9avLW40YIQ6DhCCBiHjrapUPEu0GdJ\nuqs0/Xj5d4H/QQgRAe8B/+bHPeAtt3w/fHLl/C4mZI6h7aiua5I0YbPu6CsPwhElFY0KJCIn8RGE\ngEXhRosYwdmEy65iJRSHY8pkHtPpmihJKG3OZrVGKtAyR0jPJCl4+vQxZ6u3UNKzrTqkiTjZf8ho\nHhO6EadGlBNk8ynr1TULjpDE5D5BhCnG7XE3SxnDiDGaJEqQ1hCrkiF1DPM1qkvIKOmTHonm5O6n\nMFKzqTecnp4j8hgrBzbrLYvpguAFKs6wHqQSDENPbCRNvUFHMWm06/meZzmbzRopoWkauqYlTmKK\nJGYymyBkYOx7cANVdcn+7ACpY4zRu3a6dmBSThgGi4pTvId1XRH7gLA9YXlEOLvm+fMP2VZ2Zyk3\n9Fg3ghRUVY31jknf81ZbIaQgyxIImqapGUdLnme0bXvTPE1TliVZliGEYDabfHRgG0UxbdvuPEmv\nr6mbmuADeVHu0jAhsNluyLKc4zvHRFm2W1TsSJqmmGAQQDt0aGVQIXD6/Bl1XZOm6cc6b0MI3wB+\n4mMd5JZb/gh8cuYUE0UfT0nihhdPV8TakAjJer1FKcW0zBACdKI4OlnQD47KbtkrU6yGetuhdUQ/\n9FxeDXghySe7isokiqhWHSLSlJFnkd7hg+ffQvmSzaonySXrq2sev3jJp+MHBKeI0wX72QE6naOV\nwmaQmCkaRVRqZosDpNY01YbSRPjRs73eUBQp6TSiTDXDmNB3HYGAkBJCYL1a7W79hSDPU4wxeB+R\nFXsE70mimA8+eJ+9xYzBWqrNNVrMMEpircf7QJJq3NjjrUNLSZZNSBNDCJ6m2rJdnXJnb5+h2yKD\nRxDR91umkxnee4pyikQQlCbLM1ScEkTCcq6xtiWez/EhIRRHPL/8Oh9+8DYHBwc0TUuSpjtFSlRC\nBFmcUDcViYkRGKSSHB0d0Q093nkmkwlVVX20M6/rXbql6zp0ZGibljzLODi8Q7WtkDqQpDFnZ2c0\nXU2S5SAEry7Omc8dZVnQVBWnp6coE7G3t0ee7XLraZpgjN4tEIkhTmZ4/7EaOt9yy59YPjlpoteM\nnWVSBr7wxWO++ZvXLBeG6MQgBgitZbpYomJBc9VSVS3SejJv8EWAkFCtepxXjMbRdYG95YRJNucL\n91/jL7zxIxgdMQbHdJaTFYpVHfiR+3+JMjdEIiXRmuViyZce/DSpUZRlgbcwDD1j37EdOrDgvWMc\nWqQVZHmMiWPOXp3jhdhJ4YQgBM/qJnCP40g39CymM7q+x0uBDND1A0JKTJTj+xHvKy6uLohis9tR\nBsMwdPgAUkUoBKJ3aKmJtEFqxWy+JEtjlNoFscv6GUIE6rqmyDPKNCaMA8Eroiii7Rqq1RWzxR4E\njxKBkCWE4ph4mRHhIS6QQjCOF+zvL5B8mkAgBFAqQimFkz3aGuTUMInmJNpQ1zVSQZ6kaG3w3tP2\nPQdHR8ymM9arFW3X0jYNp69e4b1nuVygIkVR5sxmM4ax5+LynEk7wboBqQKb7Zq22WCU4oV3hODJ\nspjJfEaSaLquRmtF17YIIZjOFuRZyjCMNM34SU3pW275RPm+grkQQgG/ye4k/68KIRbALwIPgA+B\nvx5CWN1c+x8BfwNwwL8XQvg/v9tnnj4/JSkduJyjoynn97YEJDayHGYZSmuurjckaYrsepLeIdOI\nbqwxIiLNot0OeNshMku6N+WLd36Kh/deJ89ykjRmudhDCokgYN3IPGm4V2SMzoMKTIspxmiUVvRd\nRxSlRJFmtI662mL6nZWa9W4XoLuGOI4Z2p0DfJ5nRNrQNA1JktD3PY+fPaUoCmazGU6A0IokTolM\nTD7ZyYzGvscJy9jXuL7GOY8Umm11RZ4XWA/TIuN6swHAO49SgSTNkFHC+fkFgwuUeYq1I5O4wDmL\nsyNDL8ijhKA9bd+ilcEkKUoqotiQ5zNENsFve8RiD0kEAsDxwTvvcb2uqKoNAcd8vkeRpzjnGK3k\n5dUlJyohzfTOws4Yuq6jLEvOzs85OztjsphxePcOIgSMMaR5iptNidOEEAJpmpJmMS+fP8eYCG0U\nSgr29hYIYLSWse85PrqDd4E03aWK1qsVl5fnDMNAmqSYKCKMFhNFAKw3G5zzH7c08ZZb/sTy/e7M\n/ybwu0B58/wXgP8rhPB3hRB/6+b5LwghPg/8a8DngbvA/31z8v8H9DS5yphmESYzSBfx5T/3ab75\n7feJrWLQA/eXE6Q0uLaBhaCYFFifkuQF+5M5B3uHoKBuO0bbY7Ti+PgBUaaw9DS9RW12u+qymCOF\nYL5Y0LUR9XqFQODdgBeOLCmRaYaUOxmQVoLl3h7r1TWTyZTLyyuePnvGwcE+683mow6GAXZ53CTF\neY+JIk5O7pKmKX0/cPrqjHv3HjCbLHHWY51jGDp63+MdrDdrYhnIsgJjIpIIvB3QRjGOA1prQj8g\npWIcPUU2R+mEdvC8OnvJJi24e5gSXI2Rjmk+Z311wfwoo2rWWKPIpwV22C0+WhuivSVjMOjlQ2jX\nkEYQJEIKDu/eJyjJi+cfcH15yTvvvs98PiEyMdO8IC4Krtan6MqQ58WuJsA62qYhS1O+9MUvMjpL\n33Y8Oz0jyzKyPGXoB7RW5FmOHUf6qqHtO5x3hHqkrRukVvR9T5QkaKm4d3yX0Tq88yACy71DUJK6\nrhidYzKbk98EcmkUOkkQUmAi80f+Mtxyy59m/tBgLoQ4Af4K8J8C//7Ny38N+Lmbx/8A+GV2Af1f\nAf7HEMIIfCiEeBf4SeAPNCLSkSM3C4wZeXXZsr835Xh+yNOnL5AyRcklD/dTgotJ8gKpUoxJiOMc\nKSBLExbLfUZruTg/53J1hvcjXXOjyxSCvqkRUjJJMtI8J4kURmUksWEYRrquwwXJYB0hQNeNWGfJ\n85J2W7FabxiGgclkxny+ICtyiixjW1d457B2ZPSBbhh2O848w9qOYWjZbhuKYsJssk+apmzWK5QU\nVNuKartmbAfSbMo8T4mUpm073OiJpMTZQB9GggsI4YhMYDqfkmYL3n7/A4be8elPvcb11SWr65r9\nYmet5+xInuV0o0OJgBtHvB9R2hClMVFkEHGJzA6gu2Tz7Ixock1y+ACCZnHygNnRMY+ffZtxcGhT\nk6QZq+srvHc0Zy8QUhBHKbP5HOctWZJydvqcKErIsgylFHleEBvJ2Lc8OT9FCEFRFDTVFqUkl1eX\nu/8vm6KFxHoHg2cyW5BlGVLKmwWx36VumgYhFVJJ7uwfEicp5XQCAvq+R0pDFGWIxMCto84tf0b5\nfnbm/yXwHwKT73jtMIRwdvP4DDi8eXzMPx+4n7Hbof8BqnbLrJa0Q0+9rgg2MM3nzLKeNx58mcXy\nmMLkaBPI0gLnHYP1RLFgGEYEAmsbvLMURYyODhmGgaqqiKKIpm2RwROs42i6wEhFLBVJkaFkhJQ9\nWmuUUFR1Q13XlIvZRyL9YehRSjGfz7m6ukbJQLfZUHct4UbDP3YDWkukUNTbCk+g7Sqc9YBEaklR\nlECg67qbQ0GBVBJrHWWeUbc1RAn7ywlNtUFqjx17MhXhlSQSEi0VUu7SGnXd0tQ9r16d0/ctyzKj\n7TqyuMA6MErSdy2zyQSjY6wTxEaSpgV5URD2DnEvniOzKb/97pt85t5DksVdRGQI7HL1r9/7HMbv\ncuJZYpjlE642aw4P74KA1fqKNElompq+H8nznNVqzfX1isPDA373d9/k4uoVh4f7LOZLDvYPePud\nt/Desre3B+za+VbVhmk5JU1yRudo2w6lFEop6rreadDTFGUMWu4WB7yjbxuGrkMIQZZl9KLHjz10\nCik+Pp3598sPS+v+w7R6+2EWMv1JtqD7Yf6dvyep/WHwvYp9flif8z2DuRDiXwZehRC+JoT4S9/t\nmhBCEOJ7inu/689iPSUIT191jG3LZTWw98ZD3vjUfU72HwCewe4KSJwfgYDSisjEpGmGMQbrLATB\nYm9GbCK6vmVbVVRVRdu2JElC0zScXrziUAlUbKDTWDd+JJNbbVcM48j1+optvaEsS4LddRqsqhrr\nLH3f4pxnCBbLrn/2pChQQhBFGu/BOkccJbR9RpTEXFysmE53OXkfPEIIoigiigybjcWYmL7fkErD\nwWLJdnNNHhdkuWazOmea3mfd1hgdE8c5ZZKw3taUeYmShrZtUSpQZDFFGpEWBU21QsWCSO8mtA8W\nSbgp1hnQOsaPCn14QnN2yvbqGndyD8JICAl4h1eC6cFdhm9/k65v6HrHvXsPme3t0dTVTk54BV3X\nM5nOmRRTtFEcH9/FWov3ji996UtsNteIINk72MO6EaVjnn74gjQt+OxnPgNSsF6vubpaM19o5rPZ\nTTfIBufcR8VCUkmG0aPSlCTftQno2oY0TRmGge22IoojlBLYMPJdMnq33PJngj9sZ/4Xgb8mhPgr\nQAJMhBD/PXAmhDi6qYi7A7y6uf45cO873n9y89of4K03zzFhTRAje4cZh4tDxGg4PDxA6UDT9Bht\ndoHIO+xo0VGMiTTGmN0XHkGapeAtVTMwjiPL+YK+78myBKM01juuVyvatmU+n6O1IopipJREUcRq\nsyaWO2VI0zSEEDg7PUUbQ2DXP0VKuXuf1MTBgYDUaIQyNzt4g8ChleTk8JiqqSgmJUcH93DecnV5\nQVEU9MOAtRYTJfRdQ55NWOQxbd/grcUav6uwlJp+6AnOIyO5e4/WbDdXrNdr2n6g71tSLfnSawvi\n2NA3G1w/sh1G9mYl69WK+XROW23ZPzpCCo2e3AUjsFdX2LGhbRs22zVHbkAI8M4ihWDv5B6Hxyf0\nT97j4mLNs2dP2JstOTw85Fu/8y1UgLHrmR3dwclA27bk+e7MwTlJluXkeYJzjuPjE/q+Z1LOmS2W\nfPvtN6n7lk89/BTHR/fIsyl931JVFXmef1TpGUUR3ns2my3L/X3qbU0Sx6zXG7x3GBPTtjsrvV/6\nlV/l//nK14kis8ux33LLn0G+ZzAPIfxt4G8DCCF+DvgPQgj/hhDi7wI/D/znN//+bzdv+d+BfyiE\n+C/YpVdeB77y3T774NOCqU9Rg6QsZ+h8RplkDLZD9AJjFElyU8JNwNyUtjs3ArvS8DhOsdZSVRtM\nHO1y4OOAHQfiOKYsS6LKoPLAMI40TY21ljzPGceRKIpYr3cHmmVZ4vG7cedzttstZbE7mJRS3BSj\n+F2pfQj0/YjWu0VFKUldt4zjwPtPPqRtGtLpjKZtcN7dHKTO6LuWy8tLZtMJx5/6NCd3lmRJSpLG\nvPzwd6hfvSA4h1QprbVExtD2PUkWI6RiGBu00URYlEw4mOeYKKJvamy9Zrm/oN62yChDS0XQGXmZ\nMdjAfHlESA2SfUxZc/r0CT/+4z/N8+fvcv3qJcsHS6RWdOfPSA4eUs73WH/ztyjLKVkWc359wenV\nOWkWk8S7BXWwu6KirmkZXvRoLXnw4DVWqxV932FMxDe+8Q329/eZLxe89vAhi1lJ2zYs5nOk3O2+\nQ3AkSYLWmjiJGMZh12vFyhvJpkdLqKoNUsLV1Zo4jhnHESklP/vTP8mPfunznJ29ZBwtf++/+Qc/\nhK/GLbf86eIH1Zn/XsrkPwP+kRDi3+JGmggQQvhdIcQ/Yqd8scC/Hf4/EmzeKlRuGHxHPXoOohyh\n5U0Xv51Vmve74CmFYDopyPN8VyEZxx/toqWUZFm2U0aEQD8OEAJxkhBpw3ldk0cxeZrupIKw20Ha\nAXxgMimJ4xjnPGWckqcpnkBZ3qEsJzvHHimYz+eMY0/f9ygh6PuR69UVfdviBfQ9JIkiSEmcpwTv\n2Gw2pGnK1eUV42Cpmi1CeyIjOVjO6UcYfMf92T6f+/M/x+rijGfvvk19/ZyhrklMhI4MwQswMVGc\ncxhHCAl9X7M3yzFCoGRMMpvTjxE626OcP0SlCbEC27e7Hbp1lMkE7wXj9orl/gEvnz+n7zrGvgM8\nCEH94pL44AF5Pufw8ICqasiykg/e/xDrR47v3CMy6saLtEYaQ5oWLOZLhsGy3W4wRrFcnvDkyfvs\n7+8OgN9+802qZsOnHj7ijdc/w7vvvcu5e4XREWmaMo4O7wNRrLH9iJUO7xxVtWVSTNCR2Uk4pSBJ\nNRcX54QQKMuSx48/wLqdlHI2m/0Rvga33PKnn+87mIcQfgX4lZvHV+yaDH236/4O8Hf+sM+bRkv6\n2uEAFadoJdDakKU5xmjyPP/odjsEdxPAa4wxCCEoyxJrHVIqIKEbetqhBx92QX8c2VYbFpMpzo6k\necZiucR7z9XFKybFPrPJDJNESKVp+w7b7UrRFQJnR1ary5vbfsX5+cWuWCUyNFXNarXaybOV5MWL\nF2TZhNUmMJ/PEUIhgG11hZL7OOd5/vQdeu8xUcqdgwNW6zX3Hz2kLGbEScLV9RnXFy/4whe+iDc/\nxre+/lVcv2G0Oxmj84Khd+TzGC0U5WxCpiS1dUyLknxxwMHRPbTSPHjwgOlsTr3ZkhU5GQ7bbAlK\nI6Vn3DY8u7hkbLe8OD3ns1+wBCTV2WPmJzMEAq1GPv3oU1xcX6CV4qd+/Mf55X/ya4AnzSaI4HDj\nSNtb7t69Q1VVlOUUHwbmiz2ePXvO9eqag4NjlNKkWUpWJHzrrTe5c3KXOEq4eHVOkgV0pPDB8vTZ\nc6pqy/2Hj9ibL+m6Yafy8Q6ld3cDhN87A3BY70hGy/0HjzDaEN8sxB8XQog3gP/pO156DfiPQwj/\n9cc26C23fJ98YhWgn/2xL/PWm29y8eqcxSQCJZnNZ8wmC9IswVqLUgJjdk2VtNZovft1x3GnoGi7\njtm8oGs6ijxns92gI03fddi2ZX9vgQ/Qti2r7RVaBiwCpQ13Dg9JspwgoCinXJyfESJDnuc473He\nc319zTiOhGBxbrfTbvqOqqmJpaYd+p1+fOjxUY80MVVVfXS3sK22SKE5v3jF4/c/ZLaccfd+wWK5\n5Gh/ycHRCVEUoYBuLFFBcvrsW8wPHnF4chchH9FWWz547y2azlJ3DeNV2Jldi5x8f4/ZfMFy74DP\nfvZz9P1AtVmhjMHEMXE+Mp0vCPMluh8gbABF0Boxej58+oTV9TUheMS44eLsBc1kwuHCsjef8Pid\nbxJFu51zmsT8zM/8LF/72lcYBsukSImMYbNZEUWa/YN9zi+es16vefHiKRcXl6TJhNPTF4QQmM2n\nrFZX7O0t+cVf/EU+97nPUzcNg+0Zx5Y0y5mUe9w5ekQ2KTi/uGB/b07wAR8Ce3tLhJC7Xj7dwP2D\nuwitUEpjlMK5XXDPbrTnHwchhG8DPwogds3knwP/68c24C23/AB8YsHciIT7Jw8YnCWJCu6dnLB/\ncIciT0jilL5vCSEghGBvbw/nHN77jwwIhmG4yV/vjJg3mw3jMCIMREoh0l2Ofb3dcnx8BwKM3vH2\n229xeOcuNjgOjg6oqi2Xl6fYocOGQBwnrC6vGYaeKIro+xHnRoZhIEkShJLMZjO89WyaijzJef48\nUJQTjImomhaA7XbL9fWaatvw/pP3efzhYxbbAz73+S+TJCnWw2Z1wWKxx3J/D+8HkgcPOX33q1w8\n+4B0NkdEBclkQpxNcPYJzllC2MkcizwjSnIcAqUUTdPS1B15UbKta6wP3Dm5C8UEGRSYKYQFQXii\nZMrZ6Vd58t5zQPDhh08ZrefZ0ydMZiXj+hWbas1kMmfbbLHWsq0H7hwfUNdfpG0rlvNdOmN5cMjT\nly949PAR773f0zQN5+cXNE1HZFK+9vWv8tqj13n06FNcXp4Tgufk5GR3N5PmDG2HHQJluWQyKdHa\nkJiIh/cf3LS7dUxn0xupqWMxWVLO2QV5AXmaU7c7BdM0L1B/fNLEvwy8F0J4+sc14C23fC8+sWA+\n9JZ5sc/DezH7asLDk9fIEoUfPaYwBL8rygnwkVytKIqbsvmBEHrKsmAcBpqm2h16mgg3DvRdx2df\nf51Xr15x994Jp6dnfOFzXyBNYg4Pj3j/6ZNd06e25dnzJwxDR6QSnp8+486dOyD4qMdKmiaMo8SY\nXdm+8IFuUyGjiOVswcuXp+R5SZblWLuT1HnnGUfLbDbDxBFf/tKX2V8cMPQ9680FVbXFaM1ZXe36\ncd9IY/PZknXdc3j8iOXxXZyy9K1EqIIQHAJFuLljODm+R5rkfOb1N+i6gb4bQQiSJMVozdGdI7TQ\nBASCGEQA4RFBErIpv/xrv8xgBwzw9d/+TZ6+eJc3PvNFZtMpV5sNv/rr/4Sf+6mfZT5fsFwu+Wdf\n+adY+yH3773GaqW5OD/n3r37HN45Js4zvva1bzCZlMyme3St5+zsbcaxJ00zlNKcn58jiIgiRZaV\nPLz3iP3DO4zBgXUYs7sDGMaeJElYr654/ORD9vYPWB68wfj8JderC9brNQd7B3jn0LFBxQGjDZGJ\nqKqaovxes+6Hyr8O/MM/ttFuueUP4RML5i8uXjJJU3SSUCQT8jhitCPeOfpm53XprWX0DmstIgSU\nUmitsdbuJIkBxmHEjY6x6/HjTgGyf7zEB0GaZ1SbDYvFHBNr1tWG7XrF/mRKU7d06Rbbj2hpEAKK\nYkLTdDjnPiryURrqeiCKIiaTKVfWYaKYvh9o6pr5fI6UgaZtGQPIsJNLbuqGut5SypIiS3n48IS2\nbZlOSuwYsDbgrEVpjQi7tgNRnjGZ3yEuFwgdU6Rz6vYKEWms9dRNzTCOxHFE19dE0e4g2HvP6EZM\npLF2pGsbgoNqdU4WxQjtAI24yScnScZP/tS/yNOn7xLC7m+7c3SfPM8Yx5FyUvDnv/zjqDjG+YH3\n3n+bSTmnLAuc80zKCRLPwcEd3nzrTQ4PDpjPp1RVhdaag4M9NpsNIXju33/A4eEhxkTM5wvm8xlX\nV1e7gi0l6Zue6XSCkprHTz5EysDh4RH1tmGzqfjMG18gjJ4sT7hcQd20mPX1rpgoaJq+pm07vPOc\nnT3nnXevP/a5e9PL/K8Cf+u7/fw7Czt+v1PMLbf8IPyeqcv3wycWzN/+9jfBC+7dOeTktUPW22uk\nUBwcHpCk8UfXlXEMN6oW5xwaj/SedrNh6DvGwZKkGU0IHB8fs1gsSJKED588Js0zlosl4zjStu2N\niYLeBeu25eLykmEYEEJ8lMbp+548zxFil7rZbDZkac44Oq6uzkiShDRN6Loe6yyR2vUUaeqGOM0p\nJxlSG/bzhLLLuLy4wrmd0iZJEtp2p6ku8pw4NozDQB12Pp5KGu698WUcBhnHeLFbuJwXvDxd07Yd\nm82WNE05OzslyzKMiYDAaEeUhDgyPHjwgGa9ZrG/T4hTBAHXrpBpgUAihOT47glDv0EIwfHxMWma\nUZYTNpsNruk4WOxhreXs4gznHJHJePnyJcvlEuk922bg2++/xXSWE8SuU2GapkRRxDAM/MRP/OSN\n5LMAwP2/7Z3Zj2T3Vcc/v7tW3dqrq7urt9k9nvEksRN7Ria2WSIICUJxhJBYJAiLeEICCSlA8g+A\neIEnXoAgCCgPBIjCIhRHSUQestnj8T6x25merbuqu6tru/v24+HWOBNrxp7p7ukyk/uRSn37Vtf5\nfavu6VO/+1vOSVJmZmYYj8dvXVtNU6nVqgxHQ5BQLlf4waU3uHptjWatjqpm1Y/eeO1FKqUaVaPI\nwN1ird9jdXWVSqXM0tJSVii6WsU0ixyuz3MAfBx4Tkq5dasn72W1o5wfL97uS3F8+7KIUxwzB88P\nCd2EkT/g8rrggaMnUDUFTc0KHgBYho7j+QS+j23bk8nIFM/zME2TUqXMcDBCUxRM08DzXDqdTtY7\nktmbX19fR1UUkjgmSiN0zcQPfLzAzcbFw2xduhIq+J5LpWxQr2XZ91qtJkEQ0els0GrN0el22BkM\nGQ2G1Go1fM8nmJRA6212iaVAVRWazSaOk62+yXZGplnKgEqFRE6WAY7HjC2L2WPH6ff7zLdnUY0i\nBbPA2uUrtGZmiMKQ3nBAEPqkabamXVVVFDXLStjr92g2GhiaQZpAd3ObRrPF0HFozNZBMZBpgmJO\nkiMCnmczHG9SNAuoujZ5ny1c18V1fUAyGF6jXq9nm52CkCROKBaL+L4PEp564km+9dy38dyISz+4\nysLCIsvLy3S7XaRM8XyfIImpNxpEYdb7H49Hk9wtJYbDIY7jYDsOxaKJqqoMB0Pac4u49piR41Aq\nN1ldvUTgj9jRbWqNKqVKGdsb8/DDH8Qwsk1lqqoRxwlJKigUD2Sr+a8BXziIhnJy7pSpBfOlY3P4\nToqmlnBdhzBIOXnsBJ7r4LnZEIIEOusbuFEAKZP6kWBZFuXyHIaIn/ryAAAQTUlEQVSuU65U0DWd\n8XiE49j4fsCZM+9nbI+JPJ9Ll9YoFEyEqpJEEbquE4URqqKSpNFkvbRLr9ejUCyxsrKM7/v4foTr\n+Kz21qg1G5QLNaIoQjcMqrUGpaKF7TrMzc1hlUvEcYzr2lzvbJJKiRTQmmvhuh5RFDEaDLEsC9/3\nuXr5KvVqBW80oF6vI1DZ2e5hlbI85cNxnygMGQ6HDIYDNrodwiDCskpomoppGszPzeH5HiN7QKFQ\nQDcMSlaJjY1rFM0iw3gHoVVIhY8iVNIYUEARUCgUePPiRY4cPU5rdhaEpLvZpV6ro2lZAYuZ1jy+\nZ7O1vUWaSsIoJApC0hSWVlY4/+xzNGdmqJebPProWfo7A8rlCorQuXZtHV3XsWoV7PH4ra33QmS3\njWEYvnX76Hs+l9cuE8Uxs60WjuNjmCZRLJip12lU62z3NpiprFCrFVD1mPZCm9HAwfFHFMwixWKB\nUqlMoVjGtu176rdCiBLZ5Ofv3dOGcnLukqkF80pjHtWIsCIdmUKxoGGPxygCTNNgY+Mavp/lSGk0\nmyiKwLJKQLZb0DB0FKnx/R+8TqVQpl6vIaXk5MlTvPzSy5CGGKUiiBTP9yhYRWIhEanEKhWQEnTd\nIgxjZmdn8TyXwIvpbW3TG/RpNRsULRPDNImDgEsb67TqdQ4fXpnUyAxoV+aoVMqoqqBYK2PrGkIq\n9Ho95mdmiKMEvZQNDzWrVcaOjUAhSGLOv3A+u5uolFHVV+h0NihUSoDEsX0Mq8irL13kjUurPP/d\n7+GHPsVSgYKqE0QRcaIQRgGL7WV6wz6WVcAPFSTgBQ7zK8skSoDqxUjNAD8CvYSUkIYjCkWLMPTY\n3lzHKlZQBMQli1qtzE6vR63epFKpTm7zBK1mE7NYxB6NcG0PpZXt3kzigAsXzlOtNfCDrBTc4tIc\npmkgZTZm7DjO5M4ky+1yo2RcqZQVqMiGSvS3vlhnZmYYDseUrTpxGmPbPp3NZxFxyvr6Omfe9wHc\n0YjWbJvR0GZrawtVVVnvXOfw4SP31G+llA7QuqeN5OTsAjGNDGhCCPnrv3WOOFAwU4tapY4hVI4c\nOUKtVidNJGHkoSoG1Vo5mwxNU6IopNmsE4Yho5HNwvwsb169zqHFRUql4mQseQuhqoydIWkQYJUq\nBH6QFaFQNQajHQwj2/ofBgnVWok0kdl4tOMxM9OiWquyfv06J06coN/rkQYuaAVG9gjT1FEUBd8P\naLWaWEUTz/MxTRPTsIiiGMdxuHR5jUYjS7RVq2WFmp2xS9Es4EYeg8GQi6+/ThBFPHD0JL7jc2hl\nhSQRhNLHMAq88eqrXLl+BUjRFIVCvYIqwSwUUUVKpdygUW9hWAWKps7h5RUura1x9rGzLLTnGY6z\nsflCscjqm5eYaTZYOn2WqL9Gb2MTNwhIZIKhF2m3l3DtEY5nUy5ZmJbFdrdLo9ZAKApB6ANwfX2d\n2XYbkSRZeb96C103GI16uI6DlJI0TWk0GkjIvqAVhY2NDRzHfmvuoFyuUKtVcV3vrQRbpVKJIPCp\nVLJ6od3ONjKN8UOfcq2GORmyGjsuIo7RdJPxeEynu87Zs2fZ6Q/Z2enxsV/+XaSUU5l1FELI/cq2\nl2dNvHvu96yJcRzf1renFsyf+PAySlmnpJY4MnMEqQgeOnmaWrVKYTKGmuVfMTANC9e1ieIQyyqw\nszMgjmMajSalkkUSRkRpjKpmvWBF0ehtbRGmAZVyGU3TaDab9HZ2svqecYTvRVzd6PK+UycRQuC6\nLv3hiAceOMlmr4euawgE1VoJXdMomgU2utvs9Dcpl0tUihalcpkkjvGikG63y3BnxPHjR1hcXKTT\n6XL58hqVSiXTaZm8/NJFHjv7CK5jkwpBzaowdl2e+cY3WVhYYLjTx9R00BWGg/FkJ2uY5Q23ijQq\ndWzbxvbGRJHCznaH2E9RNEGpVkcVKnPNeY6dPEx7rsX1a1ewSmVURScII0bjHk9/8pewhzaKIjBV\nhUSCF0SYpskLL1/ksUfeTxC6jEYjFEXFdWxKeoEwjpiZbzMej5Fpwvxim+2tbdoLi+hatkN17dIl\nPNvBdmy2elscOnSEGxkgVlff5PTpU5P0CUU2NjbY2Njg0Ucfpd/vZ/lywoTvnn+eB08cQdN0zFIx\ne860SOIE1x4TBB6u5/HG62u05mY4d+4cxaLF6xe/j2oqVCs1zn7kk+/pYH5j/8S7cSf/m3dq607b\nO2hdd8Ld2Hq3v7sbW+92HW+kG7kT3i2Y36mtdwrmUxtm6Yx8KkmAXjEpFgosrqxQq9ao12okMpsw\nNAwDVdVw3DGappKkWR7whYV5rl/fwHWzWpB6sUBZs+j3tomiBEVRaM5WUYQOStY7XFtbo9lsUq1W\ns4k4xeDl197kkfedIUli5ubmiJIU13WoVirU6w3SNCGIPApGdmfQmq2zuDRLr9djq9OlaFkYZoGR\n69Cen0dTFBYWFhkOhzSaNZrND9LpdEiSBNt2SCe92UZzBtu22e5vUSqV6G4OePrjH8H3fQqWheeF\njIZOVtezXsUej+lv73DixFFc1yNMYjwvRIgH6A9GjMdjqtUqg+EYL7K5fOUS29tbWeKwYonZ2SZL\ny4ewqgViL6A4ySgZeC6abrAwM4OMY5752jf40MMPEsdxVj/U89FUHS8MWFxZ5vKVKyDANExs28EL\nPF568QLD4ZiHzpzGtm38MKHWbFGp1mkvLOB6DvZ4xJkzD7G1leVT2dxc5djRk/hewuW1q7Rm6nQ6\nHYLQ55mv/y8njq5gj12UQZ9XXn2NuYUFDFNH07J0woZV4tyHH6fVbNDv91ldXWV5eQWhSpLkvV/Q\neVqBLre1v7buJpi/G/vRqZ5aMFeVENcXzFgpC4eWWWgtULQMUCRKIhCqQhgGFIsF0jRhMBiyON9m\nZ9gnTWFubi7LkpjEpK5Lb5xlP1RFQprGCFEiCD2iKKVolTHMYpYbfVJ0IvBDojAk9G0GdpZKtVAo\nZDlhNB1JSCpjqrUqoZ+tnCloBltbXUzTZLY9T384pGgWUVMFTQiqlSqbm13m5udQFEkSZ9WLymUN\nxwmYW2ijmwYCQalUojXTwHF9VF0nihKsUgXbGRL4KSoppXKRoqHT9z0KBR3f9wgDD1XTKBRUoiDk\nA6dOopsFVE3FHu9g6EVs16WgFzh06DCNRoPu5iZJGHH+Wy9w6vQZlg6fIRluo2s6iq7juR6qIjB0\ngziIaLVmJ2lrJVHgougmq2++iSJMlg4fpt/bZGfgoBsmiply4vQh4jjh2PFjrHc7HDp8mI3L6wxG\nfaLAp92eYzzOvpw0TePkyQfZ2dlmcWmWzc0tLjx/geZMnbE9RqYpa1ev8cgHHkamKYmiU6nUmGvN\nE4cenc51Qt+FJMAPPYqWSV3WKFgGWzs7iPdAcYqcnGkwtWBeqht4TsLDDz7MkYUVUpEwGAwAMDQN\nFEEcx9mSQUUllZLVtUsUzCz3hqZLhKJmOa8HfVqtVvZ6TUchJU2zhEzlSo0gCJBSEoYhCTqqVCmV\nKkipsLJyiLLrkKYSzTBxHAdVNybFETRMM0HTtEl5MoV2u43vB1QqFUghDLJVHp4/5tDKCmPbpmCa\nWUEGM8WyClne9GGfU6dOQypJNUGqSBzXBQG6pjE33ybwszXv1YrgytoVZtrzrF+9RhAEHD9+nCDM\ninVomkarXmdpaZFr16/gOEMSoFQos7K08lZecMuyGI1GLC0s0tnc4MFTZ5hdWMbtbaGbBaIoQAlT\nLKuIYugohk51ps5oOMLzXLa3t4l9h3KjxcJCm972Dooe4acRzXoVRSTYtouqgBemdNcuc3j5KDLV\nWD60TLezwcrKUa5fvcLs3Dw/8XgTz/NIZczy8iGee/Y5Hjt3jv/8n//micefZH7hGEXrOaqlGrVq\ni/F4xJFDx9jobtBolOl0xmi6he0FjIYOplWlVqtRa7Wp1aoI3UDbx3HOnJz/T0xtzPzAG835sWKa\nY+bTaDfnx4f31ARoTk5OTs7+kg8w5uTk5NwH5ME8Jycn5z7gwIO5EOJjQoiLQog3hBC3zDq3T+18\nTgjRFUK8dNO5phDiGSHE60KIrwgh6jc995mJpotCiI/uo44VIcTXhRCvCCFeFkL8wTS0CCEKQojv\nCCEuCCFeFUL82TR0TOyqQojnhRD/MS0N02K//P9W/r0HW7f00V3auqWf7VHfj/jLHuysCSFenNi6\nZW3iu7BVF0J8UQjx2uR9Pr5LOw9O9Nx4DHf9+ctJGa6DeAAqsAocgSyVNnD6HrX1FFlVmJduOvcX\nwB9Pjv8E+PPJ8UMTLfpE2yqg7JOONvDI5LgMfB84PSUt1uSnBnwbeHJKOv4I+Gfgy9O6LtN47Kf/\n38q/99tH99PP9tNf9mDnEtDcp2v5D8Dv3PQ+a/tgUwE2gJXdvP6ge+bngFUp5ZqUMiKrp/j0vWhI\nSvlN4O3JrT9BdhGY/Pzk5Php4AtSykhKuUb2D3dun3R0pJQXJsc28BqwNCUt7uTQIAss/YPWIYRY\nBn4B+Ft+mMjxwD+LKbFv/n8b/94Vt/HRxT3Ye7uf7ezW1m38ZS/s2YYQogY8JaX8HICUMpZSDves\nbI/Vqw46mC8BNwu9Njl3UMxLKbuT4y5wI/n14kTLPdUlhDhC1pv6zjS0CCEUIcSFSXtfl1K+MgUd\nfwl8Grh5f/NUr8sBMm3/f1fe5qO7tfF2P3t1D5Ju5S+7RQJfFUI8K4TYS9bLo8CWEOLvhRDnhRB/\nI4Sw9kHfnqpXHXQwf8+sg5TZfc076dlXrUKIMvCvwB9KKcc3P3dQWqSUqZTyEWAZ+EkhxM8cpA4h\nxC8Cm1LK57lND+mgr8sB857WPvHRL5L56K5zCd/Cz356l3re1V/ukieklB8kKy7y+0KIp3ZpRwM+\nBPy1lPJDgAP86V6EiR9Wr/qX3do46GB+HVi56fcVfrTnda/pCiHaAEKIBWDzNrqWJ+f2BSGEThbI\nPy+l/NI0tQBMbgn/C3j0gHV8GPiEEOISWXGHjwghPn/AGqbJtP3/ttzko/90k4/uiZv87LFdmriV\nv/zjHvRsTH5uAf/O7ofsrgHXpJTfm/z+RbLgvhfesXrVnXDQwfxZ4AEhxJHJN9GvAF8+wPa/DHxq\ncvwp4Es3nf9VIYQhhDgKPADsabb7BkIIAfwd8KqU8q+mpUUI0bqxSkQIUQR+Dnj+IHVIKT8rpVyR\nUh4lu6X8mpTyNw5Sw5SZtv/fknfw0d3Yup2f3TW38Zff3KUuSwhRmRyXgI8Cu1oJJKXsAFeFECcn\np34WeGU3tm5i79Wr9mNm9y5nbD9ONlu+CnzmHrbzBWAdCMnGKX8baAJfBV4HvgLUb/r7z040XQR+\nfh91PEk23neBzKmfBz520FqA9wPnJzpeBD49OX/gn8nE9k/xw9UsU9Ewjcd++f9N/h3c8O/99tH9\n9LP99Jddvv7oRNMF4OW9xh7gYeB7wAvAv7GH1SxACdgGKnvRlG/nz8nJybkPyHeA5uTk5NwH5ME8\nJycn5z4gD+Y5OTk59wF5MM/Jycm5D8iDeU5OTs59QB7Mc3Jycu4D8mCek5OTcx+QB/OcnJyc+4D/\nA43ph1xlbAoPAAAAAElFTkSuQmCC\n", "text": [ - "" + "" ] } ], - "prompt_number": 7 + "prompt_number": 11 }, { "cell_type": "markdown", @@ -341,7 +445,7 @@ "source": [ "The classifications include various cats -- 282 = tiger cat, 281 = tabby, 283 = persian -- and foxes and other mammals.\n", "\n", - "In this way the fully-connected layers can be extracted as dense features across an image (see `net_full_conv.blobs['fc6'].data` for instance), which is perhaps more useful than the classification map itself.\n", + "In this way the fully connected layers can be extracted as dense features across an image (see `net_full_conv.blobs['fc6'].data` for instance), which is perhaps more useful than the classification map itself.\n", "\n", "Note that this model isn't totally appropriate for sliding-window detection since it was trained for whole-image classification. Nevertheless it can work just fine. Sliding-window training and finetuning can be done by defining a sliding-window ground truth and loss such that a loss map is made for every location and solving as usual. (This is an exercise for the reader.)" ] diff --git a/examples/siamese/mnist_siamese.ipynb b/examples/siamese/mnist_siamese.ipynb index 5abd0469ba6..8e076663ca6 100644 --- a/examples/siamese/mnist_siamese.ipynb +++ b/examples/siamese/mnist_siamese.ipynb @@ -3,7 +3,8 @@ "description": "Extracting features and plotting the Siamese network embedding.", "example_name": "Siamese network embedding", "include_in_docs": true, - "priority": 6 + "priority": 6, + "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" }, "nbformat": 3, "nbformat_minor": 0, @@ -55,10 +56,8 @@ "MODEL_FILE = 'mnist_siamese.prototxt'\n", "# decrease if you want to preview during training\n", "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", - "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE)\n", - "net.set_phase_test()\n", - "net.set_mode_cpu()\n", - "net.set_input_scale('data', 0.00390625)" + "caffe.set_mode_cpu()\n", + "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" ], "language": "python", "metadata": {}, @@ -105,10 +104,7 @@ "collapsed": false, "input": [ "# reshape and preprocess\n", - "caffe_in = raw_data.reshape(n, 28, 28).transpose((1,2,0))\n", - "caffe_in = net.preprocess('data', caffe_in) \n", - "caffe_in = caffe_in.reshape((n,1,28,28))\n", - "# pass data through network\n", + "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", "out = net.forward_all(data=caffe_in)" ], "language": "python", @@ -143,9 +139,9 @@ { "metadata": {}, "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAA6MAAAIXCAYAAABpSojLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXt0W9Wd9/3V3RdZlm05OI5jxSkkGAjYwQlpkxTTkNKa\ngEWLO4PplDClelbpdOiaNcmzuqZM553CmllP2unM2y7omzKTUAYBThhCQhNCnMRO4oDzALmVpJgm\nxMU4iuO7ndiybOv9Y2ufi3R0l+Uj+fdZy8uSztn77HN+un31uwEEQRAEQRAEQRAEQRAEQRAEQRAE\nQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRBE\nkskC0A7gFIBzAP5ldpdDEARBEARBEARBzBVy/P/1AN4DsGYW10IQBEEQBEEQBEGkAdokzHHd/98I\nQAegPwlzEgRBEARBEARBEBlMMsSoFixM9wqAw2DhugRBEARBEARBEASREvLBwnRrZ3kdBEEQBEEQ\nBEEQhMrRJ3GuIQC/B1ADoIU/WFpa6uvu7k7iYQiCIAiCIAiCIAgVcQHAjbEO0iR4UBuASQCDALIB\n7Afw/wA4KNnH5/P5EjwMkQw2btyI7du3z/YyCJAt1AbZQz2QLdQD2UI9kC3UA9lCXZA91INGowHi\n0JaJekbnA3gRLG9UC+AlyIUoQRAEQRAEQRAEQQSRqBg9C2B5MhZCzDyLFi2a7SUQfsgW6oLsoR7I\nFuqBbKEeyBbqgWyhLsge6U8yqukSaUJtbe1sL4HwQ7ZQF2QP9UC2UA9kC/VAtlAPZAt1QfZIf0iM\nEgRBEARBEARBECknmdV0CYIgCIIgCIIgiAAKCwsxMDAw28tImIKCAvT39ydtvkSr6UYDVdMlCIIg\nCIIgCGLOotFokAmaKNR5xFtNl8J0CYIgCIIgCIIgiJRDYnQO0dLSMttLIPyQLdQF2UM9kC3UA9lC\nPZAt1APZQl2QPdIfEqMEQRAEQRAEQRBEyqGcUYIgCIIgCIIgiBmEckaVIc8oQRAEQRAEQRDEHKa/\nvx8PPfQQzGYzFi1ahFdeeSUlxyUxOoeguHr1QLZQF2QP9UC2UA9kC/VAtlAPZAt1QfZIHj/4wQ+Q\nlZWFnp4evPzyy/j+97+Pc+fOzfhxSYwSBEEQBEEQBEHMUa5du4b/+Z//wc9+9jPk5ORg9erVqK+v\nx0svvTTjx6acUYIgCIIgCIIgiBkkYs6o0wl0dAA5OYDLBVitsR0ggfEnT57EmjVrcO3aNeGxf/u3\nf0NLSwt2794d1XlQzihBEARBEARBEEQ60tEBtLYC+/YxYZnC8aOjo7BYLLLH8vLyMDIyEvs6YoTE\n6ByC4urVA9lCXZA91APZQj2QLdQD2UI9kC3URUbZIyeH/a+pAbZuTel4s9mM4eFh2WNDQ0PIy8uL\nfR0xQmKUIAiCIAiCIAhiNnG5gIYG4MCB2EN0Exy/ZMkSTE5O4k9/+pPw2OnTp3HbbbfFvo4YoZxR\ngiAIgiAIgiCIGUTtfUYfeeQRaDQavPDCC/jwww+xYcMGvPvuu6isrJTtRzmjBEEQBEEQBEEQRNJ4\n7rnnMDY2hnnz5uHb3/42fvOb3wQJ0ZmAxOgcIqPi6tMcsoW6IHuoB7KFeiBbqAeyhXogW6gLskfy\nKCgowBtvvIHR0VFcunQJf/mXf5mS45IYJQiCIAiCIAiCIFIO5YwSBEEQBEEQBEHMIGrPGY0Wyhkl\nCIIgCIIgCIIg0h4So3MIiqtXD2QLdUH2UA9kC/VAtlAPZAv1QLZQF2SP9IfEKEEQBEEQBEEQBJFy\nKGeUIAiCIAiCIAhiBqGcUWXIM0oQBEEQBEEQBEGkHBKjcwiKq1cPZAt1QfZQD2QL9UC2UA9kC/VA\ntlAXZI/0h8QoQRAEQRAEQRAEkXIoZ5QgCIIgCIIgCGIGUXPO6K9//Wts374df/jDH/DII49g27Zt\nIfdNds6oPtYBBEEQBEEQBEEQRGawYMECPP3009i/fz/GxsZSemwK051DUFy9eiBbqAuyh3ogW6gH\nsoV6IFuoB7KFuiB7JIeHHnoI9fX1KCoqSvmxSYwSBEEQBEEQBEHMKk4AtQDqAAzOwnjMShgx5YwS\nBEEQBEEQBEHMIJFzRmsBtPpvNwBoivEIiY4Hnn76aXR1daU0Z5Q8owRBEARBEARBELNKjv9/DYCt\nszB+djyjJEbnEBRXrx7IFuqC7KEeyBbqgWyhHsgW6oFsoS4yyx4uMI/mAQDWWRgveDdTClXTJQiC\nIAiCIAiCmFWsiCe0Nhnjp6am4PV6MTk5iampKXg8Huj1euh0ugTWEx2UM0oQBEEQBEEQBDGDqLnP\n6D/90z/hn//5n4Me+8d//MegfZOdM0pilCAIgiAIgiAIYgZRsxiNBSpgRMRNZsXVpzdkC3VB9lAP\nZAv1QLZQD2QL9UC2UBdkj/SHxChBEARBEARBEASRcihMlyAIgiAIgiAIYgahMF1lyDNKEARBEARB\nEARBpBwSo3MIiqtXD2QLdUH2UA9kC/VAtlAPaWMLpxOorQXq6oDBwdlezYyQNraYI5A90h8SowRB\nEARBEETidHQAra3Avn1MmBIEQUSAckYJgiAIgiCIxKmrY0K0pgY4cACwWmd7RQShGihnNMR8SVhT\nJEiMEgRBEARBZDqDg8wjunUrCVGCCIDEqDIUpjuHoLh69UC2UBdkD/VAtlAPZAv1kDa2sFqBpqaM\nFqJpY4s5Atkj/SExShAEQRAEMZuksvDPHCgyRBBEbExMTOC73/0uFi1aBIvFgurqarz99tspOTaF\n6RIEQRAEQcwmtbWs8A8AFBcDJhNgtwMWC+ByJdfTKD1WRQVQXg7k5CR+HKeTFTBKxlwEkYGoOUz3\n+vXr2LJlCx5//HGUl5fj97//PR555BGcPXsWdrtdtm+yw3T18S6aIAiCIAiCSAI5Oey/2Qxcvcpu\nd3Wx/5WVwPnzyRN3/Fg1NUz0cmHqdLIQ23jhlXSTMRcJW4JIKTk5OfjpT38q3L///vtRUVGBDz/8\nMEiMJhsK051DUFy9eiBbqAuyh3ogW6gHsgVSF9LqcgENDcCqVey+xSJuc7vR4nAk/1gHDojHqalh\nRYcSQSpyE51LxS1i6HWhLjLJHk44UYta1KEOg4j9/SbR8VKuXLmCjo4O3HrrrQnNEw0kRgmCIAiC\nIJRIpihyOoH584HCQmD9erm45YV/duxgQvHMGaCkhG2rqQH+/u/l84QSyNGIZ2mRIakwTdT7mMy5\nkils44Vya4kU04EOtKIV+7APTsT+fpPoeI7X68Wjjz6KjRs3YsmSJXHPEy2UM0oQBEEQBKEE75tp\nswFLlyaWwynN1QSYcGtqCh2SGqpNinQePkc021Id+prI8dTQIibctSSIOIiUM1qHOuzDPtSgBgdw\nAFbE9txPdDwATE9Po7GxEaOjo3jzzTeh0+miPg9q7UIQBEEQBJFMuLdv6VKgrS0xDyn39gFAdbXo\n8QvlfZV6MKVeOoOBbVfyGobzKKY69DWR46mhRYwavLPEnMIFFxrQELeQTHS8z+fDd7/7XVy9ehWv\nv/66ohCdCUiMziEyKa4+3SFbqAuyh3ogW6gHsgVEUZSM3EqXC6ivBxwO4NAhUWhFIXpaTpwQhZ3Z\nHDoctriYeXGVRNyFC+x/fj6wZUt85xAL6S7mQoQd0+tCXWSSPaywoglNcQnJZIz//ve/jz/+8Y/Y\nvXs3TCZTXHPEA4lRgiAIgiCIcCQjH9JqBXbtAt54Qz5HNHPzL4Y1NcC2baG9hp2dQG8v0Nwc7I3k\nFTGHhoAVK+S5kDORH5nMHNLZQA3eWYJIEZ2dndi6dStOnz6NkpIS5OXlIS8vD6+88sqMH5tyRgmC\nIAiCIJLBTOVlRptDyXNca2qCRSDfZjYDo6PssYYGUXQNDbHHiovZ+FDrp7YrBBEXau4zGguUM0oQ\nBEEQBKFGpHmSlZWxeRnDeSetVvbncISvouv1slBgLkSlcz7/vLx9DA+f7egQhahez/qchsvzVHHb\nFYIg0g8So3OITIqrT3fIFuqC7KEeyBbqgWwRI04na8nCcbtDizUl4blnjyjyNm6U7d7S0iIXgcuX\ny8fzbc3NgNEoeiulY+65B+jpYY9LBSvP7SwoAFavZrfD5Xmmey5ogtDrQl2QPdIfEqMEQRAEQRCJ\n5k12dAADA+L9cGJNybvo8YjbNQqRblIRWFoqHx8oEPm5fPRR8JhAwcpzOy9eZDmtkfI8lXJBQ/VQ\npV6dBEFEgHJGCYIgCIKYG4TLd0y0ryTPyayuBsrLge3b5WJNetzGxuDczvXrmVCsrpZX2+VI80YD\nxwPMW1payir/Dg+zVjQAUFYGnD2rfMxkEaqHKvXqJAgByhlVRp+ENREEQRAEka7MpYI03CMJsPOW\niqNEw09drtBFhgKPq7RvaSlry1JUpDw/LzTkdDKxWVIC3HgjyyPNyWHjuQDV+gPfdDrg979nY8Ot\nLxrCPU+kPVSrqsTrN8dDegmCiAyF6c4hKK5ePZAt1AXZQz2QLWaBEAVpZtIWR5xO7K6txd66OnhS\nGb4ZThzF24qEh6IuWwZ0dzMPZGCYqjRclovBwIJEYdqyyGzR0cFEp9sNHDwo2o73ETWbgelpdntq\nCvj619ntwFYlfG0LFwJr1kQOpQ2Xs+pysXOprwcOHw4OAU7X9i4K0HuUuiB7pD/kGSUIgiCIucws\neK8GOzrg9nsKjzqduDdV4ZvhvINcrMWK1OvZ1cX+c6+rdFtZGRNlmzezx8+cEXNMQ+V9ck/kk0+K\nx5Pu193NbufnA/v3A/fey6rhSqmqin7dlZXA+fPKwlF6XJMp2MP8xhvBY+K9pgRBzBkoZ5QgCIKY\nVY44nRjs6IA+JwfrXC6YMsSDkjZE28Myieytq0PXvn2w1dTg/gMH0tvmPFc0P5+1SKmpAW65hXk6\nP/qIeTuleZqB+ZVVVcybCITO+5TmWw4OivudOMHauQDMM+nxsLVw8vOBS5eU7crXrdMxDyonVG5n\nuJzVdLYfQaQIyhkNMV8S1hQJEqMEQRBESHbX1gpessUNDanzkhGzhmdwEEedTqzdujW9hSggirQt\nW4BNm5hYczjkHtGzZ0WPKBeonKws4M47gwVodjYwNgYYDEx8FhaKuZqBghZguaYmE2vfMjnJ9jt1\nCrDblfM9BwdZ3qm0iq/NBixdytYSLn94Fn7AIIh0h8SoMpQzOoeguHr1QLZQF2SP2UXvD/+z1dRg\n+jvfmeXVRM+s5T2miJl8XZisVtzb1JT+QhQQQ1HtdjEPVJojevYse5yHxfb2MtHIGR9nAnTfPuD4\ncfZYVRXL/QQArxct7e3ynF4eMpuXx/7n5gJ9fSxsd3KSPabTAd//vrwP6b59wE03Ma8oAEi/UJaU\nMCHK1xKqR6r0nDPBfjFCnxfqguyRPL797W9j/vz5sFgsWLx4MZ599tmUHJfEKEEQBDGrrHO5sLih\nAfcfOAAj/wKeBvC8x659+3A03Bd3IjOIpmemVHDyHNFVq5ho40Kzpgb4+GMm/gDm+eRwcbhokSgq\nOdKcXpcLqKhg4cAlJWwbwMJyAVZNt6+PicrKSvEYZjNbGxeb2dni/KtWMY9o4LHiuQ4EQaQdP/7x\nj/Hpp59ieHgY+/btw69+9Su8/fbbM35cCtMlCIIgVEM65Y9mVN7jXCOedjbz57MKtgCrGrtrV/A+\nPA9TmktptbJcUgDQaIBPPwWefRY4dw744ANArwdGR8U5eA5pQwOrrFtQwMYtXw7s2CGuVbqeujrm\nHd2yhR1bGgbM12s0soJJzc3i+vgxeG9Tfm2Uwm/5NZMWXqLeoQQRNekUpvvxxx9j3bp12L17N5Yv\nXy7bRn1GCYIgiIxl1qqsxsE6lytz8h7nGtJKsjfdBKxYgSPFxRjs7Az9Q4g0t1Kj8H1L2v9z505R\nzAV6Pu+5B+jvFwWqdE6rleVtAkx4Op1snT09TDQWFjLvZ00NyyflGI1s7GOPiY/p9cy7ajYD//f/\nMk9qdjYLI962je1fWgoUF4u9TaXVbwMFu/SaAWwdW7aEv84EQUSNE0AHgBwALgCxfqokOh4Annzy\nSbz44ovweDz49a9/HSREZwIK051DUFy9eiBbqAuyR/wkO2/ynP8Ltq2mBmtT1GYkXjIq71GBjH5d\n8JxLSdjq4N694cOu77yT/a+uZmIuEGn/z02bxHDWW28Vc0RrapgADBSiABOt3HP5+OOiMPR60cL3\n8flYaGxzM3DtGnvs9ttZeG1TkxgirNUCd9zB5hwdZbmkbW1snM8nCuXOTtYKRqG3aVD/WX7NcnPZ\n/6Ehdp5zjIx+XaQhmWSPDgCtAPaBCctUjweA5557DqOjo2hubsZPfvITnDhxIs6ZoofEKEEQBBE3\nyc6bXP7000L+aKaKPEIFuFwsxHTVKna/pgZ6fz/OkD+E7NjBxhw6FLkP59atophrbQW+9jXmmfzz\nn4H29uCxer28vcrx42LF2unp4P3NZjGntKODhQxLBe70NAsB5m1fpHCvrtPJQm4BFhoceM6B58Pz\nVLmnV2lMJJxOFl5cWAisX085pwQhwf+KQw2AeH6KTXQ8R6PRoLa2Fg0NDXjllVcSmCnK4834EShn\nlCAIImOZC3mT6ZTHSsSIpEWJBwgdds1DVi9cYFVzeesT3q7lwgVgwQLmaXzvPbZPYP6otN0LwMRY\nayswMaG8toYGFp4b2MKltJQVLmpuDn1ePES3qop5RXt62OO3387m27wZ2L5dFKvFxew8pOet1L5F\n2lLG4QDeeCPMxVUgsCUN5ZwSc4hIOaODYB7NrYgvxDbR8YE88cQTKCkpwTPPPCN7nPqMEgRBEFET\nj5CKZUxG9YsMQTr0QT3idGJwzx7oPR6su/NOmKSFboj4USraw2loAN56S567CYgiLVDMcXEKsDDa\ntWuZl7G5md2XekAtFnbMNWuAri5RXHJhCwDz5il7PgHghhvYfHfcAbzwAvDUU6zn6NgYG8PDfaVU\nVADl5cFFnaS5o16vvABSrM8x6TXgRZPoeUrMEdRcwOjq1as4ePAgHnjgAWRlZaG5uRnf+ta30Nzc\njBUrVsj2JTFKxE1LSwtqa2tnexkEyBZqg9tjpoXbbCAVUnkVFcgtL4+41tkUX2p8baSD91dmMwD3\nJsHjpEZbJJWbb2b5nQYD8P77zJsZSKAnLz+fhcOazSzE9/BheXgtwATXggWigCsuZh5Tg4GFzkpF\nrdnM+oxOTTGBKBWlxcVM/A0OogVAbVkZ61nKvbHvvsu8qmYzq7j72WfK52mzAVlZrLjS8LDyPtXV\nbK1tbeKYFSvYGt58UwwBdjjYeShV242GwUFg40YWKsyLKKUZGf+6SDPSyR5qFqO9vb14+OGHcfr0\nafh8PixZsgQ/+clP8OCDDwbtS9V0CYIgZoh4Krmqvfqr3p/3Zaupgc5kimqt0jFqLyKUCtKhaq5g\nMwBr48nlm4u43aLIWrqU3Zfa1+lkgg9ggu/LXwaee05sndLczIRZoBhtb2eCkotOqcDUS752VVUB\nFy/K+4lKvaNXr4q3s7LYtptvZseWHnN0VN4vVIpWG9zmBWBFiHgBpNJS5qFsbBTPlfciNRhE72tB\nQeIC0mpVbolDEMSsYrPZZq0YVKKe0YUAfgdgHgAfWJjy/xuwD3lGCYJIC+LxgKXSaxaPF1YaRnuw\nsTGqtc6F0NtMwzM4iKOPP461Ph9M27enpccp5RQXy4VaYA5kqPzGhQtZ6KxOx4TpiRPMq6nEsmXA\nH/4QvL20FPjoI9ZWRroGqfiLFpuNiWo+TqMRj1dcLBe1AGs9c9ttTEzbbEyIWyzM68ur7fb1ycfk\n5wOnTyt7j4H4+rYSxBxDzZ7RWFBbmG6J/+8UADOADwA4AJyX7ENilCCItCAeERavcItHWCYaPksi\nk5jzSEXTv/4r807y7yj19XKvnTS/MS+Phcja7SyPk4ezAnLxp9PJvZalpax4kNT7yUN8L10CrlwB\nRkbY41p/g4PA6rkWCwuv5f+jRacDWlqAb3xDFKRWK/Dgg6zgEi/GxKv72myiMDaZ5H1VIxUrkgp3\nKkpEEIqQGFUm0dYubjAhCgCjYCK0NME5iRkik3oxpTtkC3XB7RFP38hYxkh7cvafOxdzS5REw2ej\nXWuye4fGAr021ENG2kLaO/O++1joLcBE6fbt8n1dLqCoiN0eGRF7alos8v34l7KqKiZUOVotE528\nL+fNN7OWJqOjzAN56ZIoRAEmQqVCdO9eJuzOnEHL3Xezgkbl5dGf69QUsG6duL6CAlbEqLNT7Ifa\n2cm21dSw9fPbH3/MPKj8/rZtYt/Uurrg4keBbWAymIx8XaQxZI/0J5l9RhcBqAag0ECLIAhC/Rxx\nOvHS/PnYXliI369fH5cQCyfkpD05hy9cABCbsFzncqWkB2eye4cShGrwv+4AMDH2hz+wfEuTiYXU\nrlkjii2rVczFzM8Htmxht10u5imcN4/dLyxkorWnhxUj4nmh09PA7t3yvNS77mK3a2pET2gofv1r\ntobHHmNVcPPzQ7eBCcXEBPN2lpWx/FS7XbwGFguwfz8TvAcOiH1UDxxg+50/L963WoE9e0Qh//jj\n8uPwvq3xVNglCGJOk6wCRmYAOwE8BeYhlbFx40YsWrQIAGC1WlFVVSVUvuK/aND9mb9fW1urqvXQ\nfbo/2/f/Y8MGjHZ1obq0FJ6qKhw9cQL9bjeWAvi8uRm/djhw5z/9U0zzt504geLTpwEgaPy5sTFc\nBbC6pgbrd+7E1scew6K//3tBWEaa/91Tp6B/8km0b96MwY4OnBsbw/Knn8ZXN2xI6vXhHtiLFgt6\nzp/HRF0d1rlcePfUqaTMH+k+Z7afH3P9Pn9MLeuJ+77LBXR0oGVsDMjLAz+7Fq0W6Otj99vb0QIA\nXV3svtOJliefBEZH2f2hIbQ8+ijwzDNs/uJitExMADodaqem2HYA6O6Wzz85ye5rNGhpbgZuvBG1\ndjtgMrHxgLi//79wf+9e4X4tgJZFi8T5lPYPdd9iAY4dQ8upU8DPf45af6hvy/Aw8Ld/i1r/dWpp\naQGefBK1/P3o1Cn5/dFRcX6fT369N29GS0cHcN99qN2/H7Ba1WN/uk/3VXQ/U2hpacGpU6cw6P/R\n/dKlS3HPlYzWLgYAbwHYB+DfFbZTzihBEKokMAdzYnQUXf4csaLqamw4dChmD2S4gkbhcjZjySGd\n6dYrfJ3XurtxxZ8bp9b+mgQRkdpaMZ+xpIR5RI1Gsd1JXh4Ll+U5mdIemoWFYlVcad6kdE4Ob/si\nrVQLBOeRJkJWFvO+xsr69cA77wSv++67WZ5sqPcbaY7t9etsbFUVcPvtLMSXFyxyOChnlCAiQDmj\nymgTXQ+A/wRwDspClFARmfarTDpDtlAH3APoXrIEa7duxTqXC/b6etgdjriEKCAPpW3fvFkWshsu\nZzOW0Nh4c0ejzQXl6zT4c+NS2eKFXhvqIR1tofgc5/mMNhsThQaDKEQBYO1aoKICqKxkYnXnTtbL\ns7ZWDKWtrmZ5kxw+J8Aq0zocrNpsQwPwxS+yx4uK2F8SwlZbADGcOBq0AV/vjh5l/6XrBpiADAy5\nBcT80J07xdDcggJ2focPMyHKH3c6KWeUmDXIHulPomG6qwF8G8AZACf9j/0YwNsJzksQBDHj8P6R\ni77zHUEg3pdADzypd/P4U0/h0ptvwuvPF4vUgzQWgRlv38tYe6KmQ3/NOclstNFIk9Ydis9xl4ut\nv7tbrILL26DU1AAvvyz37G3axPI/+f2yMpZP6nCI5+9yMRH34YfMI+rxsP9NTSzf1OlkOZbReDG5\nZzYSY2PsLxoKCpiXdnKSVfutq2Pi8pNPgvdV8tTs2cM8yBxexIjbXUl8Op3stkqfGwRBqJNkhOlG\ngsJ0CYJIOxJtvWIqLobH307BWFCARy5eDDtHKtquqL0nKhEl0lDLVIVEJuuYMyxqwz7Hpa1ali0D\nFi9mFXStVnEbD9FtbJTfl4rVkhJW3MdqDX1dnE7gt7+VL46HugYibQ0Ty7ZIFBWxcOH2duBv/zY4\nrJivyWplnmGLRbSJNDzZaGRVh3fsEO3FBTeJT4KIGgrTVSZZBYwIgiAyikAPi9FqjSiupN5No9WK\n7uZmGAsK8M2TJyOKMR4aGw1c6I1cuIBcux1GiyUqwZdKT2esXlgiBmYjJDJZx+StVQAmZqTPi0hC\nNQohK3uOb94s3597M30+UYRyuPeUi6viYhbWG+gJBJjHcPly5qXkfTm1WuDQIeCRR4DLl4F335Uv\nbNky1lNUSYyG+3IazRfXrCzgK19hwlMqOPv6mEC+/XZx/Tyv1WhkIcvXr7O/7m62feNGlkN6552s\n/QzPf21ultvLaqW8UILIUD755BMsW7YMDQ0NeOmll2b8eInmjBJpBMXVqweyxcyilDcWLl9SyR6B\nYbPR5HRK80XX79iBxQ0NeOTiReTZ7Uk9P76Wa11d6Glri7oFS7ic1WT2Fj3idKL/zBkArBBUtPmm\nR5xOPFtVNSv9TdOKFLXRkL0uknXMcKJW2gNU6fkcaruk/6UJEJ/jgftbrawAkVLBHi6u+OOdnUxo\nNjcDS5awsN6sLHH/Tz9lonRykt2fnmbir6mJHTOwBUt3d3yFh/y0hNs4OclE64svspYsvLVMbi4L\nS16zhq3fbGbrNBqZOFYqqnTqFLuWAFBfL+a/1tSwnFX/dQ7qMzqHoM9vdUH2SD4/+MEPsHLlSu7p\nnHFIjBIEkXEoCcdYe2cGFiLi4qqwqiqkuJKKvXDC77Wbb8Y2qxUvFhdjhDedR3R9TqVCz5CXByD6\nAkPR9kBNtLfoYEcHJvwhfuby8oheWL6uizt3ov/0aepvGonNm1lOY2Nj6kRBoFiLl3CiNpL3NdT2\nUCI1EW8uH2s2A1evil7CSITyZPb1iQWTwuF/TcfE5CQ793vuYaKXC+Rr18Q82eZmJoZHRphQ/sMf\n2D4FBcDq1ex2dTXLj21tZfsbjfLeo4FFiwiCyDheffVVFBQUYN26dSkLKSYxOofgvY6I2YdsMbMo\nFQMKVyBvj4r2AAAgAElEQVRIyR5SMSkVVxMDA9jvcCTkvbvudsM7NARPby92r1kjPD7Y0YExtxsT\nAwP4vLlZUZBJ11Kydq0gmEMJPqkAHTh3LqTgjLdCrxLSuWq3b4+4PxfCEwMDWJqkNWQ0kTyISWJG\n3qfCidrA0NhAQgnZUKIzGm/uzTeLYbmSH4aEtfDqtbwSbziWLBG9kvESopBRbeADvFouF69mM9DV\nJRZmUoJ7OXJygBMn2LW5eBF46y12+9AhljcKiNfSamV/Dgfw0UfybXMU+vxWF5lkD0mQR1y/MyY6\nfnh4GD/96U/xy1/+MqW5rZQzShBExqGUG5lIvqRUXOlMpoRyIY84nZj09yDU5eTgwWPHgo4DhA5v\nla7lKy+/LJxLqDxSae5mdkmJMDZw7mTmk8Y6Fz+noupqmMvLUbt9OxU8CkemttGQhsYG5pMCofMU\nA/M9I+0vxe1mOZQAC2f97DP5WgAm4iYmQns2NRomQj/9NLwYTAbf+AZw5Ahw663s/LKzWfuV0dHI\nY2trWeGlY8dYOK/02jQ1sWs4PCy2t+HXUprnW1YWWtynScVlglAr4VLqUzH+6aefxhNPPIHS0tKU\nhegCJEbnFC0tLRn1C1I6Q7YIT6KVWJWKAYUrENTS0gKtX7gpFQWSiquDjY0AYvPeSc/HOzwMnz+M\nrrS2VpZPus7lQsvGjYBGg9pt22TnzefQGQyw19cHCTap6LzW1QWAiWWpeF2/cyfaN21SFImxFFCK\nRKi5QtlVen3fPXWKhGgkQomvJJPy96l4RXYixXQMBvHYkh+GZAWLfD5lIarXsxzM6WllEWowJE2c\ntgCozc9nPT4HBtg3zooKoL9fDMsNh07HqvuGy1/v6BBb39xzD1Bezq4Dv0a8sjB/zgWKz0S/CacJ\n9PmtLjLJHon+zpjI+FOnTuHgwYM4eZJ16iTPKEEQc5rZqMQaSszxUF2+Bi6cdNnZ2O9wRCWYQ3kn\ns2w27K6tlc0Rqs+pdI7FDQ1Bxxu+cEF2X5rbKvVShruWR5xOdO7ZgymPB8V33ol7d+xIqjAMZddk\nCuE50VImUyuZpkhky3j/feYR5d5C6VoqK8Vem0ptWSKJwCSKUQCiBxdg3zZNJuaNjYapKdY/lRd2\nyskRQ5MvXGDn/vHH8rm5sKyvZ8LXZGJ5ytzrGSg+M9VjTxApItG3wETGt7a24tKlSygvLwcAjI6O\nYmpqCufPn8f7778f+2JigPqMEgShOlLZDzPwmIb8fHiHhiIeW9pTdHFDQ1gxJT0fqXdyv8MR1xxK\n63pzzRpc4V4NAHaHA/e98QaAYIHWvnmzomCTnlM0a4qV/164ENe7umCwWPDwmTNJrzIMxGYXIkOJ\nJlw0mn0GB8VWMLy9SSzodPI806IiVsgoUYqKWLGhc+dYgSKNhrVvOX069Jhly1h4r7RfanExK84k\npawMOHtW7LNqswFLl7Jj8b6jvJ9qYG9WgHqPEkQY1NxndGxsDCP+nHWfz4ef//znuHTpEn7zm9+g\nqKhIti/1GSUIIuNJZT/MbVYrvKOj0Gi1WLB+Pb7829+GDGWVEk3BHy4CtQYD7A6HEHrLBVIsRYPW\nuVz4n+XLoTOZcLCxMcjrZ+CFRwDo8/LwpX//d+F+oEfyek+PcP+1ykos/OpXMdLZiQFeoASxtWSJ\nljy7Hde7uuAdHkb7pk0zIhSTWYiJSFOiCReNtE+gWH3qKRaWG01ILIcL0awsJky5mEuE/HwmaKXC\n2OcLL0RLSpgQtVrl3kurlc3DPbj5+cxDbLWKLhZejZcj9XoquWHoxx+CSEuys7ORnZ0t3DebzcjO\nzg4SojMBVdOdQ1AvJvVAtghPuLYoSiTSI9M7OoqPp6bg83rhPnoUeXZ7VMeWtn4JtS8Xgd3NzdAZ\nDEH7ZRcXw2SzyYoQBbZ24ed2sLER2aWluOLvK/rqkiWy813nckHrr/w5OTKC4089JczZ8+67AFgr\nmLu2bJEVShp3u/HnvXvhbm2Fp7cXuuxsmIqKYJqBDyAumMMJxURfG9HYhYiOtH2fiiZcNNI+gRWL\nOztjE6JSdDrmWZ2ejm88gBb8HYDDwJALQL58o7SvqMnERKXRyB6rqWFFizZvZgWMTp9mnlWrFXjh\nBeblXL6c7Ts0xEJ5ATEUnP/IVVXFvKrSnNFktftJM9L2dZGhkD1mhp/+9Kf43e9+l5JjkRglCCLt\nSaRHpoa3SNBo4Ghvj3pcNIKZ53EaLBbctWVL0PaRzk54enuFNi6BrV1eX74cF5qahHPj82n0eniu\nXkXXvn2s4JF/PVKRyds4DHZ0YHpiAgDgHRlB+6ZNWOdyIUuSu6rzf3E1WCwovP12ePr60B2itYyU\nWH8ESIVQjPWHDCJNCNWzQOnxSC1dlKrGOp3A/PlAYSGwYIHoaayqYmJV+triGI1MaHKU2rosWybf\nBwA2bIjlzP0sBGvwUgfg/xMfzs8HHnyQHfvaNcDjYaJyYkJe+XbPHiauu7tFz+qmTUxMFhayucxm\n5r2VXl9+LQ8fBt54Y84JT4IgZh4So3OITKk2lgmQLZJLIqGZ5Q88gKUaDW740pdg9ifuBxKv55Xn\nRPKw1EjrDmztklNaCq+/aImxoAAPvfceFjc0iP0CAfR++KGwtqLbbxfG1m7bJjsGwIoa9Z46Bdei\nRfB5vVhYV4f7DxxAXkWFsM4Rf6/FwGupdA1i/RGgffNmXO/pwcHGxpDXMR1fG4l45tXMjNkinmZ4\noXqrKj0u7Y2pdAxeNdbtFj2B//3f7P7AABNsfMxnn7H8yeefZ6Ls7rvFeSYm5DmhgZ7TggImArmX\nEmDey7ffju6cOXo9ag23+e+cAPC/xG21tUxc8mPz9wZpyC3ARKqUwHDb4mLWHoa31eEk4v1MtOmh\nSknH96hMhuyR/pAYJQgi7UnE4+bp6wN8PlxpawspqMKJrnBChIelmmw2XOvuDtoncN3rXC7klpcL\nYbIGf/6GsaAA3zx5UgghNuTmCnNcv3JFWFv/Rx/B7nBgw6FDsrYpdocD9vp6PHD4MMZ6euAdGoKn\nrw/9Z87AZLXKwmcfeu89mCsqoPXnpvJQYamHll+DWH8ESMSDrWYy9bxmjFDCMhROJ3DmDLvNPZWc\nUOG24Y6hNMYfPSDDbGZCb98+YNUqtm+IateKDAywirx2O8AjMK5diz3cd3IS8D4MYAeArwKQVNVt\naQHeey94jDTk1ukUQ4QrK8VwWx6629gI3HEH2x4qbDmZPyAQBEFIoAJGc4hM6sWU7swFW6SyxUYi\nrUH0OTn4GMDqMIJKSXTxNihjV68K3pHXly+HubxcOGdeiOlad7dQ6TZcSxOT1Yq8igohzzRr3jyh\npyivgDt84QKm+RdLgwE+yZdoT2+vMI90Tl5VFwC0/p6BGp0O2QsWYG9dHdY+/7ysaJO5vFwocPQ/\ny5djvL9f5qFdu3Urjjid8A4PI7ukhFUIllTozS4uxkhnZ1D1Xl4gKVLOaLq9NoTnh60Ga7u3skhK\nF4A0j2hsaWlBLe8fGa7qbKzE2gKko0Ms/rNokXwNoXoZ8GPYbMzTWVcnrl9pjNUqr3RrMgF33ikW\nOXK7xUJHJlOwpxEA8vIAfzVKgbEx4IMPIp+jFK02KL+0BUOoxbeC95W2ewFYMSNOTw/w2GPAK6+I\nLWYqKli4LSAv4uRwMM9vqEq40n2XLGHXwG5nOaWhnhcZ2uolHd+jMhmyR/pDYpQgiBkh2b1CZ0rc\nrnO5cN7hwP27doWcU6m6L8/v5GiNRoz39WHU3/ePn/O9TU3YW1cHQBRh4VqtcLEIAOM9PdAZjTBZ\nrbLrKaDQw7Bzzx68WFyMb7z/vmLrlG+8/z52r1mDyfFx9PpzZI//6EcywSoV3zqTCSP+c9IaDPjm\nyZMwWa1MiPvP//iPfgTPwICwPlNxMTz+lhGB1Xtzy8oyrriQ8Pzo3gpTm/+8nAAyobBoNJVpYyXW\nZnhSUeMPPxcI1XOVH6O1VawG+/jjYt5j4JgPPmAtTLjI9HhYD1Ip+/cD69cDt90mCkyTiYXjvvce\n8MQTsbd/AXAEwCDYF7J1N98MU34+EEP+uqIIBti5B1YAlry/BF3XcLbg+5rNYjsYfz/mkM+L2egb\nSxBE2kF9RgmCmBGS3St0tvpHhhLB/PwAwGi1wrJ0qSDuACC7pASWL3wBo52dyF6wAKOdnXjovfeQ\nZ7fLziWrpAT5X/iC4Dk1FRXBOzKC6YkJ2bUT+qBaLPAOD8NWU4PekyflOWsajeAZyS0rw8Kvfx2d\ne/ZgyuOB7c47sX7HDmH92wsLMeH3Ntnr63Hfrl3CuQ5/8gkmx8dRvHw5fAC6m5tlocJK46cmJgR7\nG61WdDc3C+s/2NgY8bkgvc6BnlWT1ZpST3vc1AHYB6AGwAGkvWcUQHAvydm47oOD8YuawkLRq1pe\nzjyDoby8g4MsjNXtZufb0cEKHQVSUsL2MZnY629qihUpys0FxseZ+JO+LpWQ9CDdDYD/rLW4rg73\n+nzsmnNMJuDECeC++9hxufjMzwdWrwaeew740Y+YQL58WRSfZjPLA+XwXqP8vJWua6j+q3zfgQEm\nuC0Wdm1m83lBEGmGmvuMxkKy+4xSzihBEDNCsiunzlb/yFD5gOtcLtjr62F3OPDIp58ii1ek9DPm\nduNKWxuudXWht70d4243dq1ahcOPPYZ+nv8G1lqFC1FdTg48fX2YnpgI8iDy6/nwmTPCdZ2/Zg0A\noODWW2Gvr4fJvwZdTg4ePHZMVp2321+dd3dtLV5euBDT/i/C2pwcTF67Bs/goHCu17u7MdHfj8+b\nm6EzGrG4oQGPXLwo87Ta7rwTACuKVLt9u8ze63fskOWdrn3++ajb4HTt24c/79sXdM1D2UFVxYNc\nABqQOUIUiFyZNhXEUkQnMLfR/zxFVRWwcKGYw7h8eXAOpNXK2qDw81UKxTWbmQfV4WAicXKS/QA0\nOclCZj0eUYgaDMxrGojBwNbj91LyEDUbgLUGA7vmUnw+4Be/YP8NBiA7mx0bAE6dAh59lB23vFwU\noqWlLM+Vn1ddnVyIhrquofI8N29mYb8AUF/Pcnj5deK5pxlWqIggiNRAntE5BMXVqweyRex4Bgdl\nobLJ9JSFs0e0Hl7P4CCaKitlobuKSDwiAGT5YVqTCdMeD0w2G/KXLsVoZyemJyYw7fXKPJs8X3Vy\nfBw6oxGlX/kKLre0YHJsDJNjY7Bv2IDxvj4MfPSRkEdaWFUFfW4ueqQN7CXkVVTAMzjIvJ2SNZmK\nilC8cqUQTsw9rYW33w5TQQFqt21TvCaxerL5dXYvWYLl5eUyz6rUMxxoh9nymM8Fkv4+Fcrrlsz5\nm5rEPEqeA8m9f42NopfXZBLDdxsalMNMV60Sw2VLSlhYPM8r5e1O/K8vAWm+Z0GB6JXlmExMiPJ5\ny8rgmZzEUbcbay0WmOrqmHezrU0WXttisaBWyUsrZd48JhjNZrZ2mw04eJAVJ9qxI7rrHcoTXlsr\nhmsHXq/585nHFmAiXRLyn4nQ57e6SCd7kGdUGfKMEgSRFgT2j4y1gmm8HjTu8ZsYGoJr0SK86A8h\nVVrft86fR9a8eeEn9AtRo9XKvJl+z4kuJwcPnTiBxQ0NyF+6FD1tbbje1YXxnh7Bs9lUWYnDjz2G\nC01NGHO74R0cxHhPDz59/XV2f2gIvokJXD5yBO7WViZEtVoYCwow3tMjFBAKWntREcb7+oSwW6EI\nilYLT18fuvbtw6s33YSLO3cKnlZ3ayt0BkNIcR6rJ5tf51W/+AXW79gR5EkN5WmfLY85EQczXV21\no0MUogUFYvgpb/Pi9TKv3oEDLMwUCF9c5/PP2f/8fJYTunKlfExgTqnBAHzxi+y2ViuGyErzND0e\ngL9/2GyA3Q7T2BjuBWAaHmbisbVVnuep08kLGuXlibe1kq9xPh9QVCS2aDl4kOV3BrZrCUcoT3i4\nYkRSD3IGfNEmCCK1kGeUIIi0JNac1Fg8aELu5IULyLPbYbBY4G5rw6TfM5FbVoZHP/tMti/30AJA\ny8aN6P3wQ3ivX4d3eBg+XmjI7zXh+Zcnn30WfWfOoO/UKTx04oTQJ/S/Fy7EdV4cJABTURFrRxMC\nQ14ebMuX43JgsaNAFCp2hkJvNmNSmnsG5ml94PDhsJ7iwKJPM0GqjqNW0iKXlhNL/mk8XlQ+f0EB\ncPIkq/YKKHv1eA5kdjYThzk5rNcmv+1yARs2iN7TkhImSDdtkudYGo3yQmL19cDvfy+KSS4k+fcg\ni4WFuG7axKr8SiMVqqqYZ7O7m+13003ySrylpUwQ//u/sxxRn4+dh/S1npXF8lYNBnYeQ0Ns3sOH\nE/NEP/YYu7ZKXtb165ngTcZxCCKDUbtntLa2Fu3t7dDrWfJAWVkZzp8/H7Rfsj2jJEYJgkgqqfpy\nHKsIiVa88p6a3sCWCX502dn41vnzyLPbg/Y1V1QIrV0mhocVQ2Jzy8rw8Nmz2LVqFYY+/lh43O5w\n4L433sARpxN/eu01QfgGojEY4PN6YcjPD7lGY2EhvKOj8E1MCAWP+H9O1rx5GOc5YCHQ6PUo37AB\nk6Oj+Ly5GUXV1ciZPx8DH32E3LIyGCyWIBtHKkQ055jh0FTVhClHc56xFCIKFxYailDzhxPB0uPY\nbGLYbUkJq5orrY5bUcHyMqXnaLWK3tj8fODSJSYie3vZfjk58lDekhIWhut0Ajt3ysN4HQ7myeTv\nG9nZrDUMIC8+JL3Wzz/PQnJ50SWdLrgSbzJCZ8PZI5ECUwQxh1C7GL3nnnvwV3/1V/jrv/7rsPtR\nmC4RNy0tLbO9BMJPJttCGj7LC+bMRHGZwLBdIHwobriCSlJ7DHZ0hBR5AOCbmsKhRx/F3ro69J87\nJ+u/OXntmnDuw598wgZIw+g0GmQvWICDjY0YvngxYGKfIG5DCVEA8Hm90BgMmLdiRch9Jvr74ZuY\nQHZJCR4+cwZ5FRXQGo3C9qLqajx04gSyS0pCzgEApsJCjF+9Ch+YWC5ctgy9H36I0c5OXGlrQ9e+\nfWjZuFE2JlIhokhk3GujowNHWluxe98+7K2sTPrrYCbDlGOyRTQhuLEUIoqnR2Wo+V0uJiRNJpY3\nKrWB9DhVVeLjbjfLveSvkZoa5pnk53jTTUzk8jH5+cDXv86En17PPKYrVsjDbTUa5l0F5L1TATbP\ntm3y8GF/pATAckaFQkEvviiu46mnWNiuXg98+inzjALyeQLb4cRDOHvEYtcMIOPeo9IcskdymQ2x\nTGKUIIikIv1ynFNaGrMYSYRweaRK4lUJvn5jQQFuWL2aPSgRlNMTE4IQu3riBADmrbQsWYJx3n8P\nQNEdd8Bks8lDYX0+9La3o2vfPjF0F4A+Lw+127ejc88eRSGs0euh0YttoX1eLz6Pop+hRqtFnt2O\n3PJyoZARAOTMn488ux1lX/0qE6kajVw0+xnv6cGVtjZ0+4/V9c47QQWa3MeOYW9dHV696SZss1px\n5fhxAMz+RXfcIdyORigdcTrR9qMfpbQy7oxX483JwSBY644utzvpr4NkV62Om3jEYziireIrrZ77\n2GPKVV2tVuZhbGtjAm7jRnGcNI90xw65+Ny2TV5dlws8s5l5O3lIcEUF86Lu389EotsNTEyw25If\ngVBYCFRXs7BWaR4pwI6zeDFw/ToTtAcOAGfPituHhoA332RzTkyIj2s0LLR3cpIVV/rkE7ZeabXb\nZDwv1FBVmSAyHSeAWrA2YfF8HCU6HsCPf/xjFBcXY82aNWiNlO6TJChMlyCIpCINn42mv2QySUZv\nU+n6AeCo04mxnh4hB9OQlwfvyIg8jzKwsTyYQDXk5WGivz/oGLaaGuhMJlxpa4MhPx8Pnz6Nk88+\niz/+9rfBC9JoYH/oIXQfOgRvjGLphtWr8bW33sLLCxZg8vp12ZwanQ763NywXuDAdYQrTqLR6+Hz\nXwNdVha+ffkyAMRUAXk2Qk5n/JiDg9hbWYkutztlr4NZIdmhmtGGN4cLsz1/Xhwn7TfqcLDbgWGn\nTidw7hxw4QLzYEpaGcm2LV3KxtbUALfcwkSi0utIo2EC1mRiglUaPltfD7zzjhiGK4WvJz9f3ufU\nYJDnp1ZXA4cOycOCz50Dnn12ZqsWEwQRFxHDdGsBcP3XACDWj6MEx584cQK33norjEYjXnnlFfzN\n3/wNTp06hcWLF8v2ozBdgiBUjdQDmWqvTTKOJ10/v/3VXbtgdzhgr6/Hw2fPYnFDA+bxHn5abZAQ\nBZj3cqK/H7llZTD6K+ZqsrKwsK4O9x84AMsXvgCtv1dg6xNP4NKbbyovyOeD++jRsEI0q7hYvKPT\nCTevtLWhqbJS9hif0zc5GVGIGrgnyD8mFBq9HnqzmR0+Jwff+uMf0b55M/Y7HJiQFD6KVAE5npDT\nRD2boY6ZNI+p1Yp1588n/XWgqv6qQPJDNSOF/XLPJq8QrRRmKx3H+41WVzOPp5Int6ODeU/dblZg\nKHA9fFtBgeglfOcdZSGq07HXzNAQ81z6oygAMC/q9u3yqrjScVu2sNtSr+rttwNf+pI43uFgQtRq\nZVV9y8qYELXbZ75qMUEQM4P/bQk1AOIJMElw/MqVK5GbmwuDwYDvfOc7WL16Nfbu3RvHQmKDxOgc\nguLq1cNcsUW0obGpOl6oL/Ch7MH3P9jYiNpt23Dfrl3Is9txb1MT6/kZEIary82VjS9YtgwPnz2L\nb548idyyMlQ4HPBeu4aDjY0YunAB0x4PvEND6G5uhkeSP6bLyxNaxOjNZkwofNnV+b/ImoqKkLd4\nMbRZWTAWFgbtN+Z2Y3JkRPH8NNnZyFmwgHl2FfAODyuG7/J1cXyTkzDm5UFrNGLeihUw5ucrCk8u\n/PRmM8YHBgQb8Ovs83oxuHp1TKIt1hY/gYT6ASPReaXMxOsgmesLRUrfp6ThtoODcrGYnR0cfssF\nV28vE2JKYbbSHzR27GACkgs4pbDTcKHG0m3btonCW9rWRIpG4hwwm+U/5tx4Ixsr9XJypqZEIfz+\n+yxPta4OLc8+C+zaxdZ89CgrSMTXbbcDn30menKTHTIdiUDbZThz5fM7Xcgoe7jAPJoHAMTzcZHo\n+FmCxChBEHOGaL/AH3E68dL8+fjjf/1X2BzU4oAiQr6AL5eWxYthslqRZ7fj0c8+w/XLl4X5rko8\nJYVVVSjhXg+dDvOWL8dDJ07AVFyMSX9V3CAmJ6E1meAZHMTV9nZMj4+zkGB/H1PuDZWKxkB8Y2MY\nu3JF0bMLAPrc3JDj53/5y0IBJFtNDczl5ZiemMDl1lYcdToVPY7rXC5oTSZMjo6iu7kZr954o1AI\nyt3ais+bm6HT62MSbYkW8AklFNXev1Tt64sZqTdv+XIWnlpSwirOdnYGe/qkguvsWbGyrTTHU2rT\nQM+tkic3XF5kqG3Z2crnw19T69ezarcc7pl1OkWBys8FYN7Q7m4m7PLzgfvvB65dA372M7ZduuZQ\nIjCa/M5kCkjyxBJEcrCChdbGKyQTGD80NIT9+/djfHwck5OTePnll3H06FF87Wtfi3Mx0UM5owRB\nzBkCc0rbN29WzGGU5hECrJjRIxcvBgkWz+AgXqusxLg/H/Du//xPvLFyJaY9HthqalBwyy2y1iY8\nh1aab5pTWooGf6jhqzfdJBQayiopQeFtt6G7uTmoLQs0Gujz8sJW3c0qLsb09DQ0QNi+pNGiN5uh\nz87G+NWrwvV7Y+VKjHz6KYz5+Si87TZcbm0VtgFQbL2zvbAQE9IqohDb1Uh7l0bbImim+oyqvX+p\n2tcXM9L2KyaT2N6koQEYHQ1uzZKqdiKRclfXrBHXqpRXXVbGxPLGjWz7tm1sDmmu67x5LJS3oIBF\nKfBCaPX18j6igXmw8bS/4SQyNpBY+scSxBxGza1dent7UVdXhz/+8Y/Q6XSorKzEz372M6xbty5o\nX8oZJQiCiJPAkMxQnlK9xFNhtFrxzZMnFb/wm6xW/IUkH7Do9tvxV263cH+ks1M2Pz8+zzfVm83w\nAXh7wwYcbGyUFRkad7vRf+YM7A4HHj5zRsgvBQD4fGGFKACMX72Kib6+6ISoVhsUYqwJCN2dHB2F\nT6OB3eEQhPzwn/4E3+QkPH19uHz8OMx2O7QmE3YsW4a3N2yQ5YvyUFx+jhqJ55Z7lPMWLUL75s3Y\nXVuLizt3RuXFnqlQcJPVCqPViv0Oh3ryMiWkOgR+xpF686RtSbZuVfb0paqdSCSvn3Stn37KhBkP\n0dXpgAULWDuZ7dvlobVSz+6JE+z8Ll4MbgUj9ZoG5sEmEo6bzFBeqrRLEGmPzWbDiRMnMDw8jIGB\nARw/flxRiM4E5BmdQ7S0tKC2tna2l0GAbKEWuKfUvWQJ/nd7u/DF3jM4iJaNG9F36hRyyspgtFhk\n3rlovXb/vXAhrnd1wWCxsH6f/pwuz+AgXiopwXSofDMJerMZUx5PUAjwjKLT4YZVqzD4ySfw9PTI\nNvGKs4HeYwAwFRfDI2lvA7Cc1uKVK+EdHsYV7kHyk1tWhvybb0Z3c7PgUf15bS2KT58W9gm8dvES\nrc2kzEZ1XzWR8vcp7oU0GFieJfciziaRvH7cQ5udzYoZeTyswu6HHzIvJ8/XllbsDXeO69cDzc1A\nbi7wxS8CL7wArFqFFrcbtYFrkHqHN2+OrYJuqjzLGQh9fquLdLKHmj2jsUCeUZUzx/L4CSKt4Z7K\nVb/4hUycmKxW3LdrF8yLFqHH31NU6p2TelSbKivhGRxULI7EBZR3eBh77rkHL82fj+2FhWhuaICO\nN6cPg95igUarTa0QBYCpKVxpa8O8mhqYioqEh7VGI0a7u7G3rg66gD6JxsJCTPN1Sooeefr60LVv\nH4YvXAAAGPLzAbBcx9KvfAX9Z86wNjh+z6zO7wHmuare4WG0B1Y29aN0zXm+7/bCQvx+/Xrh8XgK\n/mRcXmYipOLDjXshm5uZWItGJM30uiJ5/biHtrOTeS4HBljYrtksCtGCAnnF3nDnuGMHC1O+do3t\n831Wm+AAACAASURBVKMfsdDcu+8OnwcbTd6m9FoBqfEsEwRBRIA8o0kmmWkYBBFItK33iMTgXrSB\njz6Cp7c3qD8k96hyjEVFwPS0kAvJvWgvL1yIa11dMOTnw3rzzbgq6TOYNW8exnt6xPxRrVZWmRcA\nsktKMD05KeSRAmChf7xIUbzo9TDE0mPUj/WOOzDo91ra6+sBANNeL7QGA8Z7ewXPp8lmk63ZWFCA\nb548ifZNm3DXli1o37QJa7duxX6HQ+ZdXdzQgLVbt+Ko04nxgQGZx1Qpj1Q6PpTHlj8eTw/ajMvL\nTIRUfLgpeSEjvemp5UN34UKgq4vdvv12lgfa3MyE6MmTYqXbaPIrpT1RCwuBu+6K/IYfzbxquVYE\nMUchz6gyyvX8ibhJdUV1Ym7Bf/wG2Hc0+i6RGKFCN7kXDWChpFLxcsTpZMWEJMVKJqR5mTodxnp6\n4BkcRK7djmtdXfAODaH35ElhF73ZDI1WC1NREQpvuw3GggJMDAzgcmsrNDodfFNT0OXkYH5tLT5v\nbhbH5eZi8to15ZPR6aDNysJ0qO0StHo9NNx7qSCCQzF0/jwA5ims3b5dJtBeXrhQWEf+0qUY1mox\n3tMjCFHeEgdgebhNlZUYlwjWwqoqQfTd29SkKASldglVsVea72sqKhI8uWuff14QwdEKS74WAqn5\ncHO5gkNHI73pRbOucII23m2B2O2iGK2oYDmiPHz3sceACxfYPtnZrDDR9u2h57vzTjFUt7+ficyb\nbgJWrAi9RoMh8rz0BYUgCBVCYboBJBrxo+Y8/ozqxZTmxGsL+i6RXLi4ORiigJGtpgYPnz0b1H/y\nSlubWDWTizpetGRqSmhvYuTFTQD4JiaQU1oqtDYZc7tZ4Z/WVvSdPo2pyUlos7NRcNtt0JpMcLz7\nLq5fvizzMOoC2kjo8/Kg4eGyU1NRCVFoNNDn5IgVbUMJUU3wj5vGvDzY6+uDxPnu2lpM8JDEqSn0\ntLXB098PbVYWLEuW4Oj3vy8rADTY0YExtxs+f7GWnNJSoYouf20oFegJFJ88zLrglluEQkNrn38e\n5vJymIqKMD05KYRZt2/aJMwXqt8sIUf2PjWTH278g7exMTiHMfBNL/BDOnBdSh/i4UJYlbY5ncD8\n+cB//Vf0LUukhYy4IOThu62tTKi2tTGRaTSGv4a8J+oXv8jua7Vo6e0V295Iz08a9htpXjV/QUkj\n6LuUuiB7pD8kRgNItF1Wqgr8EXOTTPouMdOCINz8fNuAv6WKwWIRPGiewcGgqrtSuCAqqq4WwnMB\nyFo6FFVXC2JJ6xeQBosF9cePyzx3fN+c0lL0tLVhemwM/adPY9rjwclnngnad97KlaL4BDB/7Vpk\n33BDbBfG52P9SCXosrJgu+uuoP0C8fT14Yok1BgQBX1gyK9vchLT4+PobW8PW624sKoKDR99FJW3\ncp3LhbyKCuhMJhxsbAQA3NvUJKta3L5pE8wVFfD09QlrCsz5jCd/dM4zkx9u4T54A9/0AvuROhys\n9Uu4ucL9iqe0raOD5X/ycHhpzmcoQr058/n9udKoqWHe0XC/evNrvWMHYLOJ7zEFBUBpqfz8YvmF\nkr6gEAShQkiMBpDJnqd0qTY2F4jXFvF+l1CjJ2imBUG4+fk2T28vtCYT7qqsFDxory9fjv0Oh9CW\nJPDacaG64dAhzFu5EgATmgALP11YV4cNhw7BZLXCZLWiePlyAKwQz1v33BOUZ2EuLxc8qHwevdkM\nt9+7yD2Uhrw8rHnuOZTefbewz9TEBHIXLJDNZ7RaURwoLMOh1cLR3o7rn38ue1ifm4sbVq9GdkkJ\nsoqLhcfH3e6QwjIUBosFd23ZItxf53LB7nDAXl8veEQ54V4bJqsVueXluBJQVIqvQW82Y3xgAMOf\nfMKOm5eH3IULofWLV/7cp8JE0ZGyz4xwH7yBb3r+QliwWFheZjTCM9yveErbpM9pnY7lgEZi82bW\nK7SxUS4w+fynT4vH4d7SSL96W62Av9BZrV4PtLRE1/aGmFHou5S6IHukP1TAKACqdk5kImpsURFP\nQZlkzR9YgCi7pARjbjdsNTXQmkzo8RfiWdzQgOs9PSGvHc9rlBblCTwP6Tp0JpOsvQlfG8ByIKHT\n4dOdO4Xw1UCMhYXQZ2VhrKdH2EdrNGJ6YkLYZ2FdHb7y8sv43bx5UVfhNdvtuPb554rH5UWFXqus\nxLj/Gkmvp2dwEL8rLpaNNRUVwdPXB11ODqb8fUWT9bwLtGv75s0YOHcOPSdOCOfLjw/I283wNURb\nmCiedjBEHAwOMi9naSkTW+HyM9esYeGuAMuT9HqB6mrg0CE2hn+InzwJXL3K9nn/fbGAULTr2bgR\nOHYM4PngkQr+SIsDVVQA5eWh81B37mQFiqqqgMOHw3/ZkJ5vQ4MYqkxfUggi7aACRsqQZzSATI5i\nobh69ZBqW6jRExQuFHam51/nciGrpAQAuyZFv/ylsC/3UvJrFe7a8bxGXpxH6Tyk6+Cez6LqapjL\ny4PCTS+3tIQUogAw0d+P693dsn2kQtRgtWJiaAg7ly2L+joBgHd0VPm4Wi3+vH8/XiopQe6CBcgu\nKYGnvx+/mzcPW7VabDUY8EpFhWzsA0eP4i//9CcsbmhAyZe+BED52oXy1re0tIT15AfalefwciFq\nq6mBrbpauF10xx1Ba1DKR1UiGu/9azffjG1WK14sLsZIZ2fY+RIhVBubmYx4SNn7lNXKxFtbW2Rv\nIfcMms1MiAJsLLcl/xC/ehUYGgJ6e5mgi3U9u3YB/siHqEKlpB7ZwFBaKR0dYqXcRYsif9nwn2/L\nkiUsvDcwLDkaEi2GQT3rZNB3KXVB9kh/SIwSxBxgpoVfPEQrCJI5P//yfrCxEQ+9955wTXJKSoR9\nA69VtNculDAwWa0wWq3Y73Bg2uuF3eHAhkOHYK6oCAo3nfJ4Qs6v0fuLn+t0svu8HycAaHU6XGlr\nw7Wurqi9ogaLRejtKXs8P59V7x0exrTHg74PPsCY242RixfZ3D4fMDmJiYAvpwcaGnD8qadwvacH\nPjCvq9Zkwo5ly/DmmjXC9encs0cQei2PPy6bI5wIDLSrNIeXF1e6d8cOwWbrJbdjfa5F8yPOdbcb\n3qEheHp7sTtW0RMDStcko3Jfo82R4WGpq1aJ+2/fHrwfz63OyWEeznCEEluxhMBK9w0MpZXCw4zz\n81kIbiSRx+f9xS+iD+8NJNFiGImOJwgibXj11VdRWVkJs9mMG2+8EccivX8mAQrTJQhizjCT4crh\n5lbaphRG/Nb69eiWtHLhaPR6bDh8GO889JBQXXdhXR0uHznCepQC0OXlwZibizG3W2gPAwAag4GF\n04TwuOaUlmJidBSTw8Oyx+0OB9zHjrHjxdD+BZCHxkpb4HAWNzSgq7lZqOhrLCrCvJUrhVBYfm30\nZjPmrVqF9Tt2hBSSoUJupSG22X6vZWC4baQw3GjCeV8sLoantxe6nBx869w55MUSDhoDSs+XmQ51\nTymx5shE2r+zk3lEjx2LHKKb7P6b4dYmDbu12ZjnNtrjRtNLNJnjkjWeIAgA6g/TPXDgAL73ve+h\nqakJK1euxOXLl+Hz+VBaWirbL9lhuiRGCYKYM8Ty5T2UUOGPj1y4gFy7HUaLBetcLhxsbIyYo6o3\nm3HDqlW4d8cOAAgSOp7BQbx6441CvqOUxQ0NmBgdlR3DtWiRrIqtsbBQqJSbU1qK4hUrMN7bK8tT\nDUJBLAIAdDroc3IwPTGB+5ub8c43viEKzDDYampgtFoVRTXAPK4Pnz6N1ieeQHdzM3S5uZjyt6SR\n5nS+umRJUK4nJ5pcTukPACabTRDxeRUVyC0vhz4nB97hYeHa8GPEmic60tmJ3WvW4MFjx2ZMiALK\nwjja3FciAgsXstYrFgtw5kxs+aWxIhV2VitryRKtyIu3qEWixTComAZBJAW1i9EvfelL+N73vofH\nA6KVAqGcUSJuKK5ePajdFpmaIhQq5FbJHqFCIPnj17q6hAq8R51OZBcXw2SzhcwbNdlsmBwdxefN\nzXitshIAhPBdHrravnkzfBIPpDTHlLeKMVdUCNVhtTx0FyxcV2c0snH5+ag/fhz37dolzKGE3mxW\nFqIAMDWFyZERTHs82PvVr4q5qTodNP7j8JBhANDodDAWFMBktSLLZhNa0Bj8YcS63FwAgHdoCLtW\nrcLdL7yAxQ0NuMHfS1FvNsMzMIB33noLJqtVCB0OrMQrtUG48FRpiK2tqkq4nVNaKowd9odMSsNw\nYw19zbPb8ehnn82oEAXE8OT2zZuFcHAAMxrqrvb3KYFIb1iRtnPbDQ8DmzbN5Erl4by8n2gUQrSl\npSX+ohaJFsPI5GIacaCK14UTQC2AOgAZ9BkdD6qwR5JItA5AIuOnpqbwwQcfoKenBzfddBMWLlyI\nH/7whxgfH495HbFCYpQgMph4RWW6pghF6i0qbdkSiVD5gvxxg79vIN8+0tkJT28vPm9uRlNlZVDe\naPGKFcJ93h4lUPgMdnQIoaumoiJ4/aGzI3/+M3YsW4bXli7Ftc8+E0TwvLvugtYvDCdHRzHtzxP1\nDg2hfdMmHHE64R0ehtbfHkLKwro6zON5dwAKbr1VaCMTyNTYmOCBtT/wgHguU1PQZWcDGg18ACYG\nBvB5czOrCOxfS8mXvywTnQAw5najfdMm3NvUhPU7dkBrMglC/dS//isACOLOOzyM9gCBILWNLjtb\n0ebSHx54DmnBLbdg4Nw5AKy/qUOSNxyYg6qmYl9SMipPNFlEesOKtD1cjmco4n1zlQo7EnlEvHQA\naAWwD0yYEhlBou/viYy/cuUKvF4vXn/9dRw7dgynTp3CyZMn8cwzz8S8jlghMTqHoF5M6iFVtohX\nVKZrv91oeosqbVOyRygvKn/84dOnZdul/TbHAnpx8nHSCr5KlXql94v8FWH1ZjMm+vpwvasL45KW\nLgCw8l/+BaXr1gWN4SLt4+3bcaWtDdMKv2wacnORW1oKY2EhsubNQ8GyZbJiSFxkAszrCbBw17Gr\nVzH08cfCcWzLlzPvKs9R1euFNWr0eqx57jlBdGYHnD+AoGt3h83G1hdQ1RgQf2zweb1CsaKRzk7B\nrq9JfgSQFjrit0c6OwWxn7dokWIVZDUW+5KSSrGcNp8Zkd6wIm2Pp1dnsn+xiyBu08YWc4CYbTET\nXkz+llkDII0+o2eCTHptJPr+nsj47OxsAMAPf/hD3HDDDSgqKsLf/d3fYe/evTGvI1ZIjKqYTA2V\nJFJHvKIyXfuoh3ojPuJ0ov/MGQDMIxbNm3Soar+h2rmsc7kUxZZ03F+cPx+2Uq/0fm5pKUzFxZjy\nh8dqJCGxnDdWrsTa559XrBw70tkZsqKu1Js70d+P8Z4efPr665gcGQEAGAsKWIsbfwivb2qK9Qyd\nmEBPWxs8vb3ILSuTeRoB5i2+YfVq4b5vclLw0DZVVmK8rw8agwHXurrw9oYNgjdT7/8Q1Fss+NJ/\n/IdwPQNFIf9B4fPmZuiMxiAhO+5249WbbgrZ/kTnDx221dSgdtu2mOweiplurxKIKsSy2j6cIr1h\nRdoej4cy2b/YpWs4ChGZmfBiugA0ADgAII0+o4nwJPr+nsj4goIClJWVxXzMZEBiVMUk+7Mpk+Lq\n051U2SJeUZmu0WOh3oil4a95ixYFvUknwx4mqxXfChCbSvtIhU5gHuDBxkahGM1IZyc8V6/C5xej\nvqkpISSXM+3x4K177sH1nh5Zv9JAkQaw4kb2b3wDpuJiGP3HH+FtJgBBuBoLClB2330Y41U+wTyc\nBcuWCRV3tUYjpiYm8MnLLwvXNae0FIvq6+GbnBTWyUVv5549GHO74fN64fN6MeZ2C21tXqusRM7C\nhQCAyeFh/J9Vq4KuBUfpxwapx1lvNsPT2xuy/Yk+NzfpQi7VYbMz0hIphLgM+bpQm3CK9IYl3Z4s\nIZ3sX+wiiFv6/FYPIW0RygM6E15MK4AmkBBFZr02En1/T3T8448/jl/96le4evUqBgYG8Mtf/hIP\nPPBAXHPFgj7yLsRska6hkoR64N/B5gr8jTgQqYgJ5RGLFaWqq6GOHwkuaABWYffepiYxN9VigXd4\nWF6l1l8B11ZTA53JFDQWYCLtRZtNCJ+d6O/Hn/fsgc/rRXdzM446nci123GtqwsAawFjtFhQVF2N\na599JowDmIdz9OJFAEys5i9Zgqvt7cJ2Y0EBGj76CPsdDqE6bW5ZmSD6AvunGvLy4PV7YcfdbuFx\nW00NesbHZedjtFqF67z2+efRvmmTTKRyj/NRpxMef84qF6tSj3hRdTVqt29PujdR7TmmUcHFJcDE\nWqTncDp/OMV6rqFI9pury0UVa9Md7gEFmDDlTw+X//5WkHgkVM3TTz+N3t5eLFmyBFlZWfiLv/gL\n/MM//MOMH5dau6gYqqZOEMkhkRYYoVp9JLNnqVLLGb7mu7ZsEQTY8aeewp/37UPBLbfAVFCA2m3b\nFFvK8DVfOX5cMVTXZLMhf+lSDH38MTy9vTAWFMCyZAl6/QIzu6QEYxKRCADQ65Fls8GQk4MRvzAF\nmNcUOh18k5PQaLXweb3Qm80wWizIq6iAwWLB1PXruNzaCkN+PkpWr8aa555D0803Y2p8HAaLBQ8e\nPYqTzzyDtVu3Bp3Pfocj6uscaGepjez19bhv1664bRTtMdOSWPtIpvOHUyzn6nQy8ZqTw8Riup0r\nkVrqwEJxaxA6fNYJJlpzwEQqPaXmFGpv7RIt1GeUIAgihXBB89K3geHbbFiwbAVc61w4/lDovqKx\nEknQcHHZf+aMEBYr7ckZOFYqwnRZWZiSFDDKLilB3he+gB6JB7P0K19B5549mBgYQFF1NfKXLsWl\nN97AdIBHEwCg1wOSIkoao1EIJWYPBPcttTsc0BkMsjW+uWaNrMcn94DqDAYMdXQgd+FCGCwWTPs9\nufFc52T0lY0XPt/whQvIs9th8PejVaVgTWdxGSuxnOv/z97bR8dR3mfDl6SV1pLW0q60MooqLGzC\nZ21HwgsmMXQ3D3KIBanUNMoHpQL6VHtOeHLaPjmx3z7tyfvmNP0mz5O273lL7Saxoa3S2lDbOCCo\nFUuyITGEFAwFEiUQOxgjjM0KIdv6sP17/7j33rnn3ntmZ3ZndmeluXzmeDVzz/05uzPXXL+PREJT\nUfv7l5aZiQ/7mEJuBTQBTT3th6ae+lgS8MmoGr7P6BLCYrKrtwuvxdtYymvhRZitBzfDfO/KEF6K\nnMbwiWEkDycdDSSTy8/j+P79mBwfzxBRORqtfK5oOnr322+jWji2YsMGFpwoffwzL7+sizKbeu01\nvPnEE3oiKuYTldO/CHlRAZaSRkRNWsGV+/jB8eMAtDyi3FT54MgIFs6fz/iUVodChvNsFDxIFXU3\n1xo57fvJ6zt34kRmLJ5NxWLgc7kof6fsOMR7yBx5Ua5FmcJwLaz4cfpRcB2H/90of/hk1MeSgNfi\nbSxWeI30OwFOOkPLGIG7KtWI/3fdA+4EkjGA6HNZVVuLqmAQ37/rLsPorbzPkeuvx1N9fahMk8ma\npibMnj6dRdLe/dGPMudemp3N5DcFmH/nZel8pMHmZjStW5dJ+dLc1YVWIXpudTiMhiuvzAQw4sGQ\nnurryxDGQ8kkvl1Xh3NpX9WF6WnsvfnmTKTbxquvRvNHPgJA8/EV51kkoKlXX1USSFXU3Vxw2vdT\n9Pl1st4lj2L+yDgdpMhq350YoxvpRHwUDj8Krg8fWfDNdH0sCdh1ifKRHxazVdvQplvwf1Y9g7v/\nGVhzZ2E+okYmoUb7v7dpE06OjKCpsxPV9fU681beD9W5orluZTCI5s7OTOAh8dwdjY0aAU2b2VZU\nVYHSQYxCHR1YOHsWdOEC5tMPx/Xt7fjMyy8DAMbuuw8gwjs//CFmT50CwKLr8qBGos/nuVOnMn+L\n6OjtRVVNTYawcdPjZ7du1Y1LrI/7tspmuCrz3FxmuE77fqp8fotpouu02bFnUOofmUL8SK323Ykx\nJuCbg/rw4TH4Zrpq+MqojyWBcs2bWW7wkFWb4whXNyD5j8DKXy1c4TIyCTXaz/OHfmp0VKm0HUom\n8fquXVnnTgupWy7NzWEmbRobjEYxc/JkRq0UU8aE161D1bJlCNTXA2DqZ117O+ZOn84QUW7eyyMI\n375nD27fuzdzHAAWzp/H9++6C5VpxROVlXjr4EHlXaq6sREf+9u/zaS5eaqvD/MzMwA0E+UTw8MY\nu+8+nYLZd+SI0oS3tqUFwWg0K72PmRmuVaXbal5Ro3y0xUKxU84UDU7+yOSjQBZiZmO1706M0TcH\n9eHDR5nAJ6NLCEvZrt5reTMX61qUI+k/lEzizzo7c5ILJ31EjUxCjfaLREnVj6mJCSy8/z4AZhrL\nz13e0ZGpoyYSyZC3xmuuwSnBj/HTzz+P+vZ2RDdswNTRo7gomOqGVq7M+Jg2d3UZ+mAeSiZxSQhs\ntJBKZXw+KwIB4NIlzJ05g1M/+hFqIhFUBoMIp81xF95/H89u2QIAOPzcczoSpUsLQ6QbvxHR++D4\nccydPo230ilszObWLsqF5JmO1yIJ8+TvlJM/MvkQy0KIotW+K8rZXgsjc9DF6EtRZHjye7GE4a9H\n+WPJk1H/d9k6/LnykQteI/1WMDUxgfeOHs1JLpz0ETUitlYIr1nAoppIBL/5wguZY1xF5fs5eauR\n1NXlHR34rTffxLKmJt15gVAIC2fP4tYHH8Tq/n7cefAgbt+7V9m3qYmJTDCjikAgU39ixw7UpMtX\n1dWhae1azKdSuDQ3h9l33mFjikZxNq3Ucv/WikAAJw4cyJgCNXV2IrFzJ57duhXnTp3S+czKaiWf\nj0AohNlUCnNTUwW/TOBtpF55RTd3XoXpeMvZid7gR8aqYq1DPsTSDhmWb5py341uqk78kBoF0ynn\ntffhw8eixJL3GS21+0k5wZ8rH4sRdtJ/lAr5+jta3c/rr6quxvs/+xkWzp3D3LvvZsovX7UKF86f\nx8W5OUTXr0d9Wxs+OH5c1x8+jzWRCD41NpbJGxoMh/HB8eN47JZb8OtPP43DX/wiTgwPIxAKoWX9\netSEw5g9fTrjB9vR14fJp5/G3OnTmfa5f6rsB8v9XuV9t27fjn+9+urMGArNAwvo0+WI/bG7Vk6h\noHYWoRN9Xnl/3U5pk+umWYqb6iJce8/Dzy3qIw3fZ1SNJa+MLmYfN6fhz5WPxQgnzW/dgplp6KFk\nUudjKcJIzZX3i9FnF86e1RHRmkgEdW1tOD85iflUCidHRvCzhx/W/DjvvReANo9feOMNNK9bl6n/\nUDKJ0XvuQWTtWtQ0NjLi1NKCCzMzeHt8HFU1NTo/2MSOHWi58cZM+02dnTripzI/lfcFw2G0xGJZ\n5QqB2IYRERXnMi8zXhvmJwW1U4729DmQlxm226YcuW6a3Ke7oQF44AF3+iBjEa695zEBFkxqGIyY\n+vDhQ4clT0YL/V0uJ9PVQu3q/XuYc/B9HLyDYDiMwP33e5aIAuYP2kakxI7ZIq8/GI3q/D5RWYnm\ndesQqK01Pjmd5sWI+MoBiFREUXwh8MMXX8RtQ0Po6OtDR28vPjU6mtOU2eq+QmC1voJ8U22YUBbU\njkUSVk6/U558qZTrpsl9uqengRtvNH2QcGwtytGXwi5cTmtjey1KFUxqiaT3KaffKS8jFAph+fLl\nmS0QCOD3fu/3itJ2oCiteBj8dzlf8GcHgD07LGbT1ULnyocPH/nhtqEhw7QjRqSEk1SApUkxM1vk\n9c+cPIlTaXPZiupq0MIC3h4fR01zc+ZvEc1dXahpaMBjiQSmX38dyzs6UN3QgNuGhjIpWWYFc1uk\nzXpU4xH7xyP0qsBJbz77CoHV+szWKidsmJ8U1M4ihNPrDUCfxqWlBTh+3F5Kl1w3zbRFAEIh4N13\ntZcQ/o22MHAlEmCErNTTOZTux3YU10TXa/Pgw9OYEayrzp49i9bWVnz2s58tSttL3me0UDjtflFI\nCjMfPnwsbqj8BI38QrkPZzAaRUVlJS4tLCC6fj027d5tSF5E/9lgOIy3RkYQCIVwQWECXLVsGe5+\n+21d3k8OVT7Rps7OLJXTaExLFoX6MDp4A0keSmJiagJ1gToM3TaEcHAJrovo09nSwggj4Jx/J1/v\nVAoYGfGGH2cx/RvdeuDpATOJjSE7mrDb8JJ/qJV5yKe/XhpjmaFcfEYfeughfP3rX8fPf/5z5XHf\nZ9RjcNp01Q9058PH4oHTZvwqk1wj81hutth4zTWYPXUq4+9p5l8omjp2796N0KpVqEhHt23u6kKw\nuRkAi4r72Z/8hJk4p9U8Of8p38/TwXAiKpsPl0uqlKKgUBNKB28gE1MTGJ8cx/CJYSQPL9F1EZXq\ndBoiR4Mm8PXevds7PjBm/o1O/6C59cBjlNamGPCSf6iVecinv14a4yLDoUNJPPZYAk880YO5Ofvf\nsULP53jooYcwMDCQ9/l24ZPRAuG0+4WbQYK4Xb1X/Vy92i834Ps4eAturYfTz1q5/ARFogdAl8YF\nYOqkmX+hSGyD4TBCK1dm8peGVq7Ep3/8Y9S3t+Ozr76ayWHKCexnXnpJ57PH98vpYGTyKY/JaC3y\nSt2x1ODgDaQuUAf8FIhFY9h+6xKNWCe+bXaTMFp4kCjaPcPMv9HpHzS3HniM0to4BNO1KJV/qApW\n5iGf/nppjFhcz1NTUxOYnBzHiRPDOJzHS8BCzweA48eP49ChQ7jnnnvyOt+rIB/WkUoR9fez/53G\n6OgoERHF40TMeYu15RV4tV9ugK+FD2/ArfXYvJldz7GYM9/p2VSKDvT306xBZQ+3ttI2gLYB9GRf\nX+acJ/v66MneXsPzVBgfHKQdkQhtA2h3Z6etc43q2xeP085olLYB9GgsRrOplG5M44OD9Kcf+Qg9\nvnlzVnv74vHM2A4s9h+IfOHgDSQ1m6L4N+KUmnXhZlRuGCSiOBFtJqISTUfmN2pwkN0sN29234Yt\nRAAAIABJREFU50EhRUT9pB6n0z9obj7wOAGDdTe9X5jNnxeRT389NsZyep7KxYkef3wzbdsGevTR\nGM3m8dtb6PlERF//+tcpkUiYljEaB4C8bJB9n1EBySSwfz8wNwesX89ehJbaYsYNcD/XUAi4+Wb1\nOEvhu+qnP/Ox2OB2GkMZO5uaMJ9KAQA6entx+969edcl5m3s6OszDCiUT32qPJ2Hkkm8vmtXRomV\nc0WWQz5YL8D3wXUBCWiBYPpR2kAwpUz4XewfNDMUw28xAeN1L6R93+dyySKXz+jc3BQOH07i1lu3\nI5iHn36h5wPA1VdfjT/6oz/Cvem0bSr4PqMC3HBfmJzUYgksVveloSEWi2Fmho3zqquy57AUvqt+\n6hgfiw3FzqJQlU7BEli+HB/727+1dI6R+atoPpvYsaPgvuXK0zk1MZEhopU1NZg5eVLXJ0+m7jBB\nqcyKfR9cF1Bss0SzlBylTPjtxg9avulHiuG3aGqyXED7vs+lDwMEg2F0d+/Km0gWev4PfvADnDx5\nEv39/Xmdny/Kmoy65b4AAJ2dxf+ddxvcrj4cZvcxgKmjp09nz6GV+53TLwOWQvozjsXk47AYsGjW\n4+JFAMCFDz7AD37/93MW52qkirwUO08nJ6tvhEJo7urCqWeesRSoyasoFSksKP+ohEXzvSgUxQ6I\noyArmbVw4q2tlwI05EvMivGCwGDdx8bGCmvfib7nS+IXYe5R/3fKOTz88MP4zd/8TdTX1xe13bLO\nM+r0C8KhIeC++5jX4s6di5sUDQ2x+9EzzzCFtKEBeOCB7ONm1jhLKceqDx/lgIvz89ofFbktZUQ1\nsiYS0ZGXYuTpFE1Kb33wQTy7ZQuuGBjAhb//ewD2CJXXzFNVpLAY6VJ0+Ue3bgUmJnDo9dcx1dGB\nQDoHbKnnpuzAA8EUC2ZkxYmE3166eedLzIqRu9Ns3Y3at2KC60Tf880h6uce9WGCf/iHfyhJu2Xt\nM+ol94VygugP+t57wLPPsv123U98H08fPtxDPuTq8U2b8NbICJq7unDnwYOm5xxKJvHGI49gPpVC\ndWMjPnP0aCZCbrEg+pEGo1G03HgjbhsaAgBl7lSrdcn+pqWAKv9r4rEExidZH/tX92NXt8t9TPsX\nPgZgMr0rn7lJIokJTKAOdRjCEMK+k5u7mIK7RMtLN2+3x1psJFAc/2Ixh+j1AI7Dmg9qKXOw+iib\nPKO54PuMClhKZp1OQjRvPn6c7ctHXfZ9PH34cA/5mHl2796dSaeSi8RNTUxkgh21ffzjRSeigKYe\nBkIhzJ0+nRlrPia5TpqnWoWZX6hqDHUB1seipUvh8yvlgLWLCUxgHOMYxjCSvpOb+1Cl5HDSvDLf\nm7cbJp4up2EpOtwyH5bnXjQhPg7rps6lzMFqhEVoOuzDHsqajHodbrhlFFInt6sXzZuPHMmfUPKX\nAVu3esf9pFzg+zh4C15cj3zIlR0S53SAonzA/Ugvu/nmTF8u5ZlouxQBjuy+MBi6bQj9q/tx4I4D\nrpjoykj+z/+JxF/+Jf52xw6s+MIXbM9N5p6RfsKOIYbtxUwsmAQOfSiJx5oSeGLT0s4vO/bcmHNB\nb/J9k2/Fv7PciIXV/grlxr43ZlzOLbInz71I4u0QYDPyX6q1KzCgkxfv3z7swSejLsKNiLSF1plM\nAtPTQGsr8MgjQEdH4eqy2Kerr/ZJqQ8fMvKJrOo2ufJCdFpOnrmie8eBA6gJhQqqq5hj4YQ+GI3i\nrBT9V4VwMIxd3buKQkQBYOL8eYxHIviP06cx9Du/k/fcDGEI/ejHARworonuBDA1OYHJ1DhOjCzx\n6MDB9P/FiuirghXS49VIsUZES+zvdTAmYWK5b5i0w8neVoP28oXZ3JsRYDsEs1RrV+xo1T48h7L2\nGfU63HDLKLRON1KUiXlLZ2acrdstlCKPqo+lC6/5M/pwBtwv9OzJk3jnmWcAFLi+Dv8w9TzxBIZP\nnEAsGsWBO+5AOBjMfZKX0AM8MdyDExhGtCuGOw4W/8WJK0Gn8llnL/hWWumDV30SE1D7cvL+QnFM\nhF0fTaP28kW+62+nH6VaOy9c20WC7zOqhq+Mugg3fCoLrZOb6IZCLJ+qFZEml2kw71Pa0q4k6c/s\nohR5VMsBpcqNuNhRCn/GpYJSXrNcja0u0CczA4d/mIZuuw39q1d7m4ia3WCGgNt6h7C6r986EXXY\n1HBiagLjk+MYPjGM5OESmTglAfQBmHGm+bxhxb/Tiz6JgLH6NgSgVThWC/X1Y9dH02m1L1/fWrEf\nRmPjKNXaLTa/YR+eBPmwj8FBonicaPNmolTKmTpHR0cplSJqaSFiCWyI+vtznxePWyufSrHjTvXX\nTWzezMYTi5Wmv6Ojo8Vv1AL2xeO0DaBtAB2wcnEsEri9HrOpFB3o76dZFy628cFB2heP0+ObN7tS\nf7Fhdy28cM06tr6l/mGSUJTfKas3GMv1ERHSmwPVbX58M2EbKPZojFKzDq2J3XWOE41i1LExlRyD\nxNZpMxEV6zIfIKIWIupWtJkiNq8psnT9jN40yo7HFHWp6ix0vIWcb3NsBbdXAnj1eUqFxcKJjMYB\nIC/Zd9Epo17K5VwI3PLDDIeZcglYVzCt5nMtp+jGfiRgNXwFzx246c+YT9RdJ1FqNd0L16zZ+tqa\nn6X4w+REwnBRDa1O73NCkUomMfTNafS/1YoDtzzinK+v3XUud586Wa0uhW/icQDvAhhRtKkKBhQF\ncBJqFfGryK0ginUWOt5Czs8n0FGh/bVrnVBuQa98eBLfAfAOgJcNjheVrTv9krVU4C9OQyHnxyMq\nmLICq1Jk3VA8nVB+3VCPlzrcVPB85EY+KufjmzfTNoAejcWKum68rzsikZIqk16/Zr2g3HoaTtxg\n4qQpPr2kKUFGsHrz8MoDhahulSPipFfkNlNuZdFp5GqTq4HdxK6hjeScwl7oeJ2aL6vXUaHtxcne\n3InlV1FZqbJ2UWxO5BaMxoE8lVEncCuALniEjDpp5VRKssPvz93d2ngGBpzvj3yvzffea3eurLST\nq043nhNKseY+qfbBkQ9xKRUZE/taCjJcLijVy4IlBbsPz1ZvHh4zm7YNr5hbyutjh1w7NQajNnn9\nESqMMJv1s9CXCcV+GVFoe3bnTizv5EsAD6LYnMgu3nzzTbrzzjupqamJWltb6Utf+hJduHAhq5zR\nOFBCMgoAV8AjZNRJFU+8X61aVRrCII6nUPKlsquX77X53nvt9s1KO7nqdOM5oVgvwsW18MrL96UM\nr/iclBNx4X19pKuLnuztday/XlkLp+B15ZaIDB+k7/ibOyi+L06bH9/snL+kG7D78Gz15uGhQAh5\nfS/iVNwHeyNCVgi5iZO7YxDrt0iYlWvhdj9FWCXopXoZYddfViyfhypbTveMYnMiu/iN3/gNuvfe\ne2lubo4mJydp7dq19Hd/93dZ5YzGAd9nlCGX36Idn1LRlaWtrTTRV8XxOOFaI4O7rlx/PdDXByws\nsP/tuizZ7ZsVl5lcdQ4NAatWAcEgcNddzvjUujHHxW5zsfhNL0V4IfenVfC+3nnwIG7fu9fz/S0V\n8vUXlr/HyUNJJB5LoOeJHkzNOfzFNvAROzFzwvlIsm7AbjROqz6bbgZCKMYPdbF9TY18DQuJlurk\nGFS+ibz+TrCIxdwP1KjP1wK4E0ALmB9qof1U9SmXD6VVn05VuWL4Z9r1lxXLezUa8xLBK6+8gs99\n7nOoqanBZZddhk9+8pN45ZVXXG834HoLAO69915cccUVAIBwOIzOzk4kEgkAwNjYGAAU7e/nnhvD\n0aMAkEAyCdx//xi+8Q1gZiaBujr2dyjEyg8NAX19Y/jKV4C//3t2/tVXj2FgQDv/uefGEAwCTz2V\nQDjsbv/F/oTD+ZyfwB/8gb6/d989hhMngF/+MoFUCgDGEI+z+pNJ4JFHxrCwANx8cwK7d2vl29pY\nf158kdU/NMTKDwyM4cUXrfVn167CxhsOAw0NY2Dp/bT1LGS+779/DOfOAXv3JrB1qzPra3R98eP3\n3w+EQgls367NZyHXy3PPAUePsr/7+sbwta8V7/vl/13Y3z988UUE7r8/Q1yK0f7Rb3wDq2ZmEKir\nQ+D++1ETCuU8v3JoCFMTE3j1/HlcevppfOLOOx3rzzeOfgNfm/4a6gJ1uD9wP0I1ufvj5b+/cfQb\nmFk1Y3s8LIgd+zuZTODU3RMYT+eqTdYksat7l3P9rUv/ffUYMAAkwP4OVgWBnwKxjTFsv3W7J+bT\nsb93Gc/fUOUQJqYmcP7V8/jqDV/FnZ9QXN/JJMaeew4IBpF46ikgHLbX/sQExtLrmUgmTfuTSCSQ\nSCTsj/f+MeAckNibAFx+PgGAsfPpv2MJYHue9X0DSMwkgLp0/+8HEqF0ffL98c4x4ASQaEsAQ4rj\ncv3PjQFH09f3dcDYPyrqN3t+uXMM+BmQuJQAzgJjsTFgd/r4EDDWNwZ8BUjw55WVY8BJIFGRADYC\nY18ZA6T7P54DEun79dh1Y8AckJhKAASMYQy4E0g8LfXH4PtquB6hBJACxr4ntdc3BnzN5e/b+XT/\nYsDYwBgwZuH8XS72xwN/m6HQvMaFnn/77bdjaGgI8Xgc7733HoaHh/Gnf/qnyrJjY2N48cUXMZV+\nmXbs2DFbbbmBK+ARM91cUFnnWDGTVFnriOe1tLhnwmvVp9COj2U0yspt3Kjtk+eltVV/rBCf0lzj\na20likSYj6ydOXTTpcepsRZSTz7+pOXu5uSjuMjHT9XNoDzxfXHCNhC2gfoPlL/dujielp0tlk1e\ns1wo3EgxwmFgkpiaTVH/gX5vm+i6AEvXYKE3iMX0Qy0G/+mjwkxH42Td3FVV1qz+zUL5fMxpxfaq\niOhYjvKNFtoz8pfkWwdlj8eq2XOKWDqbUgaPKvfgWw4jFycq9P5X6Plnzpyhrq4uCgQCVFFRQffd\nd5+ynNE44PuMWoOKVFq5J6hIgd2It6o6rJAN1T1PdZ7Kx7W9nRHOzZuJbrhhNKu/1dXs/3XriHp7\n9X2IRLRyjY3sWHs7+7uhgeiY4oc4H/Ik9tvufd1Nlx6nnhVU9Vj1ccjnecdDbk5lg3LyOXEa+fip\nuunbetNf3OQe6SoBOIkMfTtk6yFB/h6Xghgu1e+FJeJf6A3C5g+1p9ciToURSBF2yJKqrFn9KSJq\ntVG/QXujVaNERy2Ujwp9WWPQnspfkm+VpCe0Vgi3jDrSyPNRMiaHdv1LvRIcizz+3ZCQixMV+tKx\nkPMvXbpEsViM/vzP/5zm5+fpzJkz1NvbS1u3bs0qazQOlJCMfhcsG9McgDcB3Ccdtz2ZxYaVe4KK\nFKgi3lqtg5NFkfC1tqrPt6rmiuVkxRMg2rhxVNdfkZT29mYTSV6uuppowwa2f8MGc3JkRJ7MSCrv\nN0DU1ZU/iXI6Km2+pE7uh6oeqz+ebr485/38X+2D9OhGe+lEFhusrsdijHycT4AdN4Py7H9qv+Ok\nK5+UOU6Bk8ju/d1lR7LL6SHPSVgi/kV+6+eJtTAiIIUSSBF2lLQUsVQgG4U+mdU/mC7bSrlVTaP2\n+olG949mH1PNzTEiaiOiHqkvg+k+RIipyTzQz0YiWkFEYcpWSMXxtAr7VyjGKaJBKNtuUi4ulLOb\njqXEBiye+G5YRC5OVOhLx0LOP3XqFFVUVND09HRm3549e2jNmjVZZY3GgRIro2awPSFehBkpsHpP\nykUWjQieVTVXLMePNzYal5NJtEwkeTmxrzU15uTIaJ7MFL5UipHhvj51nWYEQDwm9rOUUWmdNGV2\n83mH9/PL8PMgWoUf+bg84YVcn0vV5NUu7L44GKRBilOcNtNmSrkl0XhIBZJR1BctcVITEE4KV5Ce\nYKmQj9mm2fzLfTKrXyzrdD5LuR9Wy/Ly4j6RbNYRm1eRPEcU5xshSJrKaqbmFpKOxWPfCS/Dy5zo\n0qVL1NbWRn/1V39FFy5coFQqRX19ffRbv/VbWWWNxgGfjLoLJ0iBiix2dREtW2Zu+ppPf/jxY8eM\ny8l1GBFJUbkEmKlurnaN1E9etx2FyYwAiMe4j2upXXDKxRWI9/OPG8onnUipUS5r60MPbla8Mxql\nPRs3LmkrAK9j01da6aovg371S6A9X+jNWT5OcUL6X79bEk2cPKMCySjqixYzAiISKLvzJJLNAcom\niXGTeq2QIl5/VCgr+me2mpxrFXI/ZPVzQPi7WWi7K12+Pf13AzHS2CuVE8fdLexfTuYq7waDOkQM\nEiO81cTmyYoJbz4vFczqWyLwOic6cuQI3XLLLRQOhykajdLnPvc5OnXqVFY5o3HAJ6OlQb5meyJp\nu+wyjVA1N7tnAmhkysDH0N2tVidTKY3oRaOaD6qVPop19/YSDQxkmyfnun8aEYDBQa2eri5z4l1M\nWHlx4QWzEt7Pd46VQR5El2F1Pf5jYJD+Mhqnfd0+mXELbnw3uFnxno0bS66QlhNK8Tu15s8iGd/a\nTz/el7P8ZtpMIFCMYu4pox5QgYzWoqi5ic0IiKjWNRqUMUJcOFf0s+RfUaP5t2p2K9bfTmr/TBs/\nB6Ojo9mESp4bsU0QUUD6O0h6893LFOX5nMrjTlE2UeX9aSeiZenzm4R+xEhN9FV9lecibnKMKPfL\nBBlG9eVJUr3wPGUVi4UTGY0DPhl1F0akM5fZnhWyKhIzvvWZ3IPzjT5r9IVVRdk1MkUWTWGDQWa2\nGw4b90P2k+Vmw3yzogYPDLBoxXIbYrTfnh5rc+AVuPXjuRh9GosBq+vhBXPPxQ43HyyK+uC+CFCK\nh7xP7GO+tet3dVkyaU5Rivqp3z0iyhopeURQo7WYHUjRgZZ+mu1OlVZl4mpdI2UTw1wkQySb3cLn\nXCpcnKyRSV5/gJj6F06fu0LRlgq8//VE1EA02jCqVxxbFGMTyW698FkmfbxumayKpFX1jCTO2QBl\nR+4VVV8+d6J63UHamohKa6diLnK9jIkL56teJsjrb1RfXHGuBfhktPgwGgd8MpoNJx/MjUhnLrM9\nkTAZEUzuu1lZqZXtNbFOkqPP1taq1Uqr41dFBc4VTEksa0bGxflZsSL7HLN54f03UlHF/WbkvRDw\nPohRifOZ42LB92l0F5zM/J9ojLo3pjyz7j7MMTg+SPF9cfrEvm7a84XekhJR3herKV48CRfN7Hzf\nWpuIU14P8I5DRRj5dSKqpiqTWPFc/tmKwsZJTSUxpdDoxXaK9CSJb72KPssYJEZgVSSRty3uixAL\nWtRCjGC2kLZGjaSlWokSI8NGJFTcRP9WPi9t6fbCpA94JNfXJIwvIu0X56Ev/b9q3nO9jMn1MoGP\nn1+jA+l5kH2LPWCB4DZKyYmchNE44JPRbDj5YK4inYODjKC0thqreyJhMiKYqRRTDRsaWLlIRCM+\n3KxVlVJGtYlmvlaIMG9fDGgkz5lIuLgprFyWp34xqjuVUivAZqbJMumWCT/vQz4ReK2SSLkP8rXk\nNfLn+zS6C27u2b0x5al192EOL+Uudbsvg+OD1PpwK0V2RKh7f7c7pC5O3iBAPuw9wBfbVy9OanKV\n2xXYminnMdKTr3ZFGT5O2Sy3i6zNl5Hi+ATpc3hWGZQDMaInk21VTlFxLDWkratYtsWknRoiWq/Y\nz+ePE8VOUpNGovy+26qXCWYk06iNQi0QysAXtZScyEkYjQM+Gc1GoQ/mKhJm1USXn9vczI53dhr3\nYXBQb77Kz+Fms3IbAwNETU1aZNuqKjVhkomwaMqgUvyOHVP7hqqi1KZSmtLZ2JhNxlVkTyawAFF9\nvfEc8pymy5czM9x8oxirIK+dETnl1xB/USC/jOBznGt9xboHB4k+8pFRV1S1ImcbWDSwa+bjk357\nsBPx0w2Tq0Jzt5VTX0SyWyjhNVwLJxQMr5mVeByGa2HnAT5OxX2JwK+TTtITrQ6yrnrK15hoatpH\nmuJZR5oyGqfscQ4QU0+jlJ1qRYZK0RW2UYwy4sv7WEN60iqqpVWkji4sk2Nxi6THwlVETkyXC2MT\nU7asTc+FiuCKZrcD6fF3C/XbNcm1C5Vfr8NtZL4bcSru9Z0HSsmJnITROOCT0WwU+mCeS/UyeyAV\nzzWLPiuX5X6gvN62NvZZzPUpksP2do1AcjPfWIwRVk5w16xh7Y+Ojmbu/5zIygRWVmm5ya5qnAMD\njLS2tWWbsIqq7IoVlMm3GQho+9vazHO0iuPkeVmdem6R185orc2iEovnmCnPct3s79Elp6p5+dnz\njjtGbfXNJ/32YOZrKxNVN8iol0w/3e4LJ7vYBurc3alrx66JsCMEyAheMyvxMgaJRj8yakzerCpC\nuXJwWqnDDsTrhCtxXaQnTUYBcTYSC8TTQvp0MTXCuT3ECE4dMR9O3ncembaKtOiw8RxtipAjAzcQ\n0eVEVEEaGT0qjE8cTy1pxFRUS2W/0hQxArlCaEMkohxiv/mY+9NluHmtrP52pcv1kn4txbpUfq5E\nesIqHhMDJIl5XXNdN+JcckXcid8PAZnfqTIw8y0lJ3ISRuOAT0adRy71w+yBVDxXNLU1M7utqWGE\nU4w8K5JCvlVX69U4mZzK5EokSiqzU3mMMjk2ilKrqosHQAqHs4kukUY+ed85+VWZJYtEVRxjNGrs\nw2kV8tpxFdZOeh2r6phcziuqWrHJoZefPZ3qm5cJdylhFjjIDwrlENIXX+rXu6lvfw/1PtmbRTi9\nZK7smR/CckCczIlUruMcZiTAah1GyEVKxLbtBMQRtxbSE7xeRXmVCWx/jjblvsuKaA1lm+GKvq+8\n7ijpFUuuBofIeG75vBwT5kfsjxhcSByzCrwuMY1MG2nksY2ySbKcb1WeSw5xv3jcqDyHOJcuxfbI\nQL6+PWi2W86cSITROOCTUedRiPohnitHqxUVx1SK6KqrmGIoqoaagsY2fkwMHMRJpuqenitnKCe5\n4TAzC25rYwpmJKKppoEA0VEhSbL8oM3r4gqs2DcxWJFowsrnhZPO9nY94Q4Gtc+XX87mq7tbI6ZW\nAyfZhdhfq8GQrF4fcjk3VbVC87e6Saa8/OzpVN+8TLhLCe5rqzLRXSwRbksemMjCxeeEibBj4zT5\nISz5XHoNucibE4qQ1TqMFLI4WSezuZQxrmyKpr0ioQMx81NOuni5rvQ+kQAFiCmVcdKriCLEvq8i\nfUAgs41Hw+U5OnMFI+Ims7lIktifZtICKDWRfs5FiHWqzHV5XeLfsj+qGWnn+xul47muG1ERt3pt\nOkUi42T9miwSypkTiTAaB3wyWnrk8juMxdRBgrjSybdQSE/AamqI1q9n5JU/b4gPzap7uqg4chXx\npptGMyon/19UHFVbb686qi1PtdLXlx3UKBBgBDUaVft6EqlV1UjE2He2o4PVx8kq95N1itiIY3Mz\n1yuHW6HI7ZAhFQFzk0x52bR1//5RR/pWCKnVBZ75nkuBZzwImaiWU5h+EaVQHXWk7ddNfB7SsGsi\nrFqLYozTVhseVD8cR4poND5qPD4nzB6t1hEnNSlz0kRSJEjVxMgeN/dcTsxkdiNlK5i9Uv9qKLuf\nKoh9l4nc8uzxjmJUHYwn18ZfdIvnVBNTLFek+1shHKsTPgdJn05GjrDbKBwT6+BblDSSvY40E155\n3YyuA5WCSybl+feym4xfAhhBnB8LPzGu+rY7jMXCiYzGAZ+MFg92c46KD+FilFv+zFBRoZ0nqqN9\nfdmEkZMyVV5PuV/ZhG8047/Jwc1TxU1MMdPXp/f/lNVJUVG77LLsumTFjRNjrhBz8snNgcUIuaIa\nKpv9cpJuJc+qFbWPt2UWUMkqVAGL5PbdeuC2Q4ZU5NDL6qWbcGo9Cgqq5WDgmXJGuZLRUgRJ0pG2\nx/vyvviMlEjVWhRjnLbaiJOtB9dyhWe+F/wBn5uj5iIxhbQhBgISVT1VqhWuGvKARmKaEfF4u/Q/\nj8ormxDXEyOIR4kRN0H1HK0ezTbRVW2iUioqg+0m54ibTKb530bmwCB1VN9lpCfZoqlvihix5XNh\nJaWOFcSF9ux+L22SyNE7RtV9dvKadAiLhRMZjQM+GS0ejEin1Qd5+YFVNFNdtkwjadx3kdcbjerL\nymRJDhpUV5dN4OTzRPPUQIApmXx8PGWKHOyIn2OkqPGtqkobg+p4W1u2P+rAgKa6HjumN2sWFVFx\nUwU3EgmgKhqw0ZqYBVSyCnXAosIIrlUUqj56Wb1c7BADz3Q90rVklNHFglIESXKKGNpRIosxTltt\neFD9WNQwUsicbkPMC1pF2SamIGb2ejkxoima1gbT/UuRlj9zheJ8sTwPknRMars6vV8khpeRRmij\nxMhglDTCV0eMADaTlmv0mDA+IzNaedtE2cGUasjcjLhB+ruSmKIqknR5zeJC+RbhcyHPK4V8L+2S\nyDgZ99ljlhNe50SvvvoqffzjH6fGxkb68Ic/THv27FGWMxoHfDJaPHByyM1p8/UF5ISpqYnV19nJ\nAhjJxIXXK5KqSCS7HTmPp6iy8o1H1uXti2V6etj+D3+Y7W9qIvr857PrOHxYGycfg+jrqSK+fM5k\n1VWcBxVx5Od1dTGTYVml5TlZ+d8tLdn1GEUDVsEJMiZfH04Q3HJGLmXaD/rDkJpNUe+TvdT3ZN+i\nI6Ki8jZwcMBVf8Cl5G/ISVuhc+q22unqmnhQ/TCExx6MPQ1OZkSlL0gaCeWmn3HhuLhFSa9+NkrH\nqxX1czKjUjubDNoRN9G/U944YV1Fxr6llVL5DcSIMCfSst+s2baCmPnvBql+HnVYVEC5ghwV5qme\njCPqWrl+i/m9NCO+cXKGXDsEL3OihYUFuuqqq+ib3/wmXbp0iQ4ePEj19fU0MTGRVdZoHPDJKIPR\ng62TD7ypFCM9hapdomJWV8cIFCd1qqiunORwk1YZoj9qfb3mi6oRwFGqrtYItEjsGhq0oEIiQVWl\ngGlvJ7rmGqbeqgivijBzksfbXL5cG4PYD26yzI/L0XbFea+uZmVE8if3OxYzjgZsFXZU/aC6AAAg\nAElEQVSvnYEBvel1T092+1ZMrhYLSculDHsh6E8pTeDk1CaLkUyJylt0ZzTzOfEXq7Lyjxa6Fm75\nNBa6Lm6ua6FjNlIinfpeFNQ/rxK4fPoVp7wfjD1jplsscDLDiVKI9EGI+Pxzxa+TzEkbP7YmXccG\nRZnKdJ3HSE8qeYqY9N+jtaOkVCFVZBek9uG0s/F0LjwabqVBOTF6LyeSRmbEYv96SR2FGKT3Tc2V\nkqdEGN0/akx8VUS1hL8pxeZEdvDyyy9TKBTS7fvEJz5BX/3qV7PKGo0DeZLRgKO00wOYmADGx9nn\nZBLYtct8v4xkkpWtqwOGhoBwOLtMOAzEYsDwMPt/+3bzPhnVWVfH/o/FgGAQeOYZ7ZzpaWDLFn0/\nh4ZYXdu36/vF6//Zz4BIBJiZAc6eZccqK4GPflSre2EBGBlh58zNaXXccgtw/Lg2RwBQVQXMz+vH\nUlsLPP008JGPAO+/bz7udevY/9deC0xOAtXVjHIAwAcfADfdBPz0p/p+iMevugqIx4G2Nv3ccGza\nBPzZn7G5am0FrrlG6//8PDvvwAE2V0brLUO1VlavHY7jx7VxAEBNjfX2Rdht101Y+V4YQbzOVd+V\nXMcXO6YmJjCZXujDySQm7j6F8Un2d/JwEru6S7jwJkgeSmJiagJ1gToM3TaEcND4oqgLsEX+8JkQ\nQhcJp1cAsWgMg7uDurF3O3CR87Zi0Ri23+rcBTUxNZFZlxv+/QasrF9pOnZ5fsTznV7XQsccDoax\nq3tXVp890b8JAPy+lATgla9DPv1K/9YhBiDfSzOZbrsOwBAAG7/FRa2zUITB5nQKwNUA3gWb79b0\ncXH+2wGMpj8nAaQAjACoAnARQBeAPQC2gM17GEBPunwIwPl0uUvpOrcAiKfrQPrYfwFYAbZ2vw3g\n39P1PwLggtDvRgAfB/A2gGelMVWm2+CogLVH9yfS/y9P/8/rUNUHADPpvl8H4Ej6//PQ5gMAFtL/\nxwDsBLAV2niXA/ggfSwIbZ5bhXPsXL9uX18hGH//htLt83UHrH93S/C9SB46hImpKdQFAhi67TaE\ng8Gini/j0qVL+K//+q+C6vAKXGHwRsiV0iSXqaRVlcaOOadYJ0/nItchp1yxYtKpinKr2mT1srOT\nqXc8KNC6dawtHswoFGL9FE2G+bZpE2tb9uUUN9GvUxUtWNw6OrR+qPxBRTNbUfkMh5k5sXhOX5++\nrd5es9nLxjXXZPedyH5QH16ez/2GDfmpm14KJlSIepnru7LU/VTl1CalCISTD+z6GW78oxb6Zi3o\nm7WgX/uTdkrNppRpXawoiGZlCvFpNDMnFtdl496NOccuz4+b66oacz7zmGtN81V3C/Iz9apPqNwv\nK2qLYLqYt1IeJ+fVKbFOMX9mvjCbC/GY1WA5srrXT1oQoAbS+2NSui7R91H1LCCX4VuEmIoaJk09\nFJVWI6UQxExceV/kPi+jbP9PebOjoIaIBVfqI7U/Ld+ipFZplxNTXPm8x6U5aCWiz5MWsbiL8vcT\nFuv2gqJq9TclTo73Oxcniu/bR9i2jbBtG/UfOGC7/kLOn5+fp9WrV9Nf//Vf0/z8PD311FNUU1ND\nn/zkJ7PKGo0Dvpkug9GDrdkDr2gO6YZ/n0hOOGky6ncuk1Ij/0qRzK1bp5nmqgheOKyPTsvTt4jB\nkVpbs9PQACz3ZzzOtra2bKJZXa31i/tMmhFl2axX3tfZmZ06pqqKtS/2t6KCtcv9b+vrWf+WLdP8\nX7lZsJH5q5hWpro6f1/gVEqflsYtEucWVPPjJWK82CCnNilFIJx8YJdcqYinKv+oFZLrlimuWG9g\ne0DXxsDBAWrZ2ULd+7up+3vdOccuz08x1lUkOBv3OE+YTefdLdM3r/qEyv2Kk60H17yvYTfIuUyc\nLObaNkScjOdCPBY1KSciRRqR4+MWiaBIEPmcyPN0DTGyGCVjwgjK9udsI81UWE79woMahUlP7AbT\n5WqE8wYo25+U/92QPl/2a8218Si5QWk/J5wqc17R91Wcc07uRR9a2ZQ3X/C6q0nzWS3l99nqb4oL\n37VcnGjz448Ttm2j2KOPUmp21nb9hZ7/0ksvUTwep+bmZvrkJz9Jd999N/3u7/5uVjmjccAno/lD\nVH16e50nAKmUnrQ1NeXvByirrAAL7iPn5BQDBWnbaNY+HjFXFe125UqN3IlEViRXKvW0ulo/XlGZ\nFfslE2VZbRVVZD6PYhnuF1pVxfKwisRURXbb2rLnUEw9w/tZWUl09Gj23Nvx4eTkjRNcmcSV0v8n\n1zhU87PY1Usn12Ox+Prmgl1ypSKeKtz0FzfZJnpOgdcb+nYoQxQiOyJZqmHvk705x54P+ZT9h1Uw\nU9TEPrY+1FowYZa/F6bz3voTgczMWR7zooHNB1e713BmLdwg5ylyjngQmc+FeKzbpJz8ckMeN68n\nSHrSFaXsdC1EeqLXnt6XIqJaYb9q65Xa3pzOMyqWaSa9OhuX2kpJ+1RbE2WTSk4sK8g6UZWDFoG0\nXKldijnn82wWobdQMqbyRXVQIXXtecqF71ouTpSanaX+AwfyIpJOnC/jox/9KG3fvj1rv9E44JPR\n/FEM1ccsj2U+JEcOzCPulxVTTrDWrMkmo9yUVVZv6+vVpI6ndYlG9flCRSInE1OuYDY0aEQnEskm\nwC0teuKqMrPlpsRVVYwI8/Qwcv9VG48WLM4VD9okqrK8nAyZpJmtWy6lu5RkNJfJ7VJUQZ1cDy8E\nZCpn7H9qvytETwWZ2PF6u/d3Z4josWn2lMnJQ+W2Smre2UyfP/B5UzPLfMwwN32lla76MuhXvwTa\n8wU1IzBT1ESCc2z6WMHzKH8vTMtHfph+0HyWqPces2EuTth8cLV7Dbt+z+AkpZPMx2BFAR8gZgIr\nR2Ml0uZpgBhJaaVsM1siPXlTKZ+8HqPItOJXQyRyFem6OBFTBSHidfLcpFK7o5FRylIe64Q+ikGX\njAIJGQUhMtpUJr7Lpbo60/PKTWt5sKZjpF2b8nVqZjrcRRoZLwTy2CMO1CmgnIJ7eZ0TvfTSS3T+\n/Hk6e/YsPfDAA7R69Wqan5/PKmc0DvhkNH9YUX0KVTvM8ljaeXi1YoYsE7OqKi1vp0iGxTQvcoTg\nXJuscIZCjLxypVT07+RbX5++n6mUPs8pJ6+A5tcqz7lcnqunAwPauXV1rJxsdtvQwMjz0aNaH+RU\nMXK6HhEySTMjp6q+O418U6bkIpuLXQV1G0uRzLsFtyMLy8SOt9e9v1uXYmdwfFDnJ4ptoJp/rDE1\nsxTrbtnZYmkMa/4skjnn049rtpKiYvqJfWoTYd7H1odbMwS6qOj+DBH+lagz4V/4boKTQTFtiRPT\nbZVMi+RlhUH5uFDGKO9jxKQMkV5BNYviKpLRZuEcsV9y+hZeT1yxH8SIdK/ULvel5Sa4rcIY6kjv\nf9oj/b0qfQ5Xnyul8efaYsQIZS/p/UN7SG/q3CeNqZrURF+Eqh9OkVCOFGnXTcRCnxYxvM6JtmzZ\nQpFIhEKhEPX09NDrr7+uLGc0Dvhk1F04pXaoHvSdfnhVkSzeb5GgVVToiZccRMnKFovpy1dXs7b5\nfBkFZOJEqbs7m2C2tTEyJ/aVk06VX2l/v9rMuKmJpcrZtEnfx/Z2rR9ifaJpsUr5lNdOzid62WXa\n+aLC65Y6ZhQYS3Vc7INPNu3B7ouokvn6Fpp6JMc4OSG6/X+1062PbnQ99czg+Dg1fudPCNu+RNhW\nmyF8ThJU2VTSSHUU92MbqO5bdZl9RmaWKpPfXL6BnGiu39Wlq3NfPE7bANoGppiu+pdVtHGPfg3c\n8qO1jEIvfK+mcLGKYvU/TmpiVSzI5CUXkZTnIi6db2QCKpnGEogRrxXpPnQTUwG56smVSZFE8TWR\nAwNFiZFDTuQilG1Kqso3KhPYFmJmsZ8nPSnuJX2AJTnXp0gWjfKS8m2ZNB4xoFCKsv0xo9L5lemx\nGb286CZ9Wz3kzvXrgslrOWKxcCKjccAno+7CacIoPvh9/vNMsTNS5MTy9fWawqfKNcohK52BANF3\nvzuayWMqbqtWacSwt5fVW1trTkKXLWPmrKmU2j+1vZ21H48zRXRggBGmSIS1I5oS9/ZmR8FVkcvq\nao08crLH10Mm0mKfWlr0qumxY9p8cl/b+nrtMzffFcmwilDKPqyqqMHRPxinjY/uo/a/epw2ds9m\nHvRFs5J8VXdZAZf76Ct01mFm5lMuZreFkpFc4+SE6Kovw1XSw9dCjAoY2bHVFdIlm0oa+fHx/Wv+\nbQ21/VMbHZs+ltPMUjb5zeUbqFI2OfGOfjNIq78C6vrjBpp855hyDtzwo3Xa/M30RUKcSkOunEKc\n7Pffxo9/Zi04QeLmpcWOMiySFyOTXpl4iERdNAfuI2sRdXl9MmFURZEV515UcSuI6FPEyFpY2h8n\notXCvnrSfC95ECJuctst+YzWkF55rSBGDMV9YiAj0WRYPi5vVek+cCIpvgjoSPc7F5lVbeIcpYgR\nUE6q46StRT6Rj0sA30y3+DAaB3wymj+s3A+cVjvEBz+R0Bg97KrIWTBo3h/uX8m3qqpRWr5cv6+r\nS69M9vVlR9blROvwYUYoW1v1RFiuE9AHVOrvz1Zqxb8DAY3INTSwuuW+i1tNjUZy+fivuorVE4lk\nB10St/Xr1ZGIRRItm+ByM+dcprviOWvWsP5tfFR7oMbggcx8iD+e+ZKdVEp7acDnTT7uK6DWYHYz\nKxdSXygZyTVOHg23648bXE09w9eCRwWM7PhrOjb9jtbPHOMcHB+n+L59tPnxx20HcUjNpijxF6vo\nX7o36oIIycRzcHyQWh9upciOCHV/r5tSsynDtDBWfDeJ1CRbVmTNot66Ea3X6Yc80xcJJUzhUrDa\nLipWuXwuRdj48c8KYHSM3FeaVGpvihiJtGPGGSeNCPWS5jMap9zmuiJU6VJAmirJo9byPotErYfM\n/SONfE9lsrhSEcDIylZDjMzGFftF0ltHzFe1lbJVTr7JpsvilssXVZ4jcQ3FvqlUYNU6ecCiwSej\nxYfROOCT0fxRCuVDfPCzkk7GKECPqr+cXIuRb+WtsZEpmwMD+kBFPT3ZxLe6mvmCtrczU9SaGrY1\nNrK+c/LH1ciuruwxyea1ZrlRW1uN/VdFn9fqatbOwIC+DFd/jfKW8nMBvW9rJJJtgivWoTKFlX1g\n+/qYspupJ/1AjT98lFA7q2tDdS3YJTsiqfayalfOKBdSXygZyTVOHg138p1scuWGb6dRVMBc4yw0\nT5toEnvA4Eslk8T+A/1ZPqIqX1Sz+VERzPZ/bidsA1Vtryp6mhg3YPoioYRmfAWr7XHSHtTtpEUp\n1ZsuqwRCJG+FpHtRvWiIC3WDNNPZXAqpSDBF9VEVtVYs20TGhLOestVJUXk2Su0iblWUbcLMiaGY\npzQgfV4v/C2aE/cp+iTmBm2XjlUTU4p5/bWkj84bFM4V50i83OV1shL52KguH0qUAyeyAqNxwCej\n+YPfD3iE2GKkZJBJTK6HXV5GJB+dnWz/4KCxCSxXHvnndev0ZEkmnn19+rQkPT3GKqJIYNvbiT79\nac3cmEeR5YF8VHk3xU0mjqLJ66ZNGslT5S2V6960SR8symzjqnAkolcWUylmviya6lohfFl+prOz\n1Pp/H8gQUbEN0W9WVHntoFxUOx+LGyX3VxRQaJ41VT7U7DY2Z8bb9Qjz7xSJlmiaO3BwgBq/05hz\nflQEU8wV2v7P7fn5Alsgwqoy7rxgyB6j20GqrKBgE+d8VV1L0RPJeeUpTtYIhEiumiz0waiv/EWD\nqMhxcsOD5ZgFKRLRJpTjmzjvfC1CQv/rpfKdlE1MxbG2kzoCrRipVhUUiZNC/vlyYiSbmxOrVEvR\nDFj2O1WlW+Fzs0LaL0YFXpbuvzinEdLMbWUVn69bN+kVb/EF0UB6LHKEZCcsGjygrhYL5cCJrMBo\nHPDJaP5QET2vqEyqIDqy+qZSMgGitWs1H9DeXqJly0az/E1FxXXdOspEpuWkcmBAUzFVvqFVVSzg\n0MaNerUzGGR/i2Suvd2YlG7cqNXP07bwegIBtsXjrN+yya9oZtzYqL1QUJWtqtLWmafHWbVKO0eM\ngiuTcKMgTOLLC3EtZF9cOc+oE4q8VdVuqeS+NEKu8ZeTmY8X4aS/YqFrkSvPWi7yYyUfamo2Rb1P\n9uoi7opES/wsEvWqbVUZs95cGBwfpMgOFl23c3dn3vNq5UWBoYnwl91/wVDIiwyniGzBSrMbqq4U\ndXYUo84pT1YJBCeMIpkz60M8RznxODfXlUmk3KdB0iLXdlN2Ds02qXyK9Oat7aQR2EpiCqGcz7OB\njJU/Rf9HMZpNBjnh4/VESU8Q28mYlMoBljhJ5HXxFC5iqhijaLyiwiymmBGP889c7Y4L+4zW16iM\nE9e+lfZNUE7373LgRFZgNA74ZLRweFFlMiIs/OG6vV1P+ESz0xUrNOU0HieqrBzVEcPBQUa4VqzQ\nghHJbYr+rOJnlZqp6gPfKipYeVWQH5WCW1vLTHVlAtzczMgd70tXFwsA1dzMxiGa965YoZHN9esZ\nsT16NJvAiYRVJMvLlrH/ly/Xz49qbaJRdv1wNTYa1fvdymTzjjtGM+SdK9yFwoxwlUsQHreQa/zl\ndDPzIpw0G3V7LXKRHyeVOpFQVm2ryrS7amiVYRu8fTF1TM8TBsmPLcDKiwJVmc2PbyZ82T3fYDv9\nM4KXFHnHEScdyRi9ejT3A79VhckqgeDlRIJlZkqbi+TKx8U0NRuImZZukOqPk55siabD64RyqiBJ\nvB0j01q+HZXmRCTAbZRF7kavHmXKo9iXMLF9vB6RrDam9x9Lj/Uo6RVUURGtEdrsJi1PqJwqRg5c\nVJluUyTrfaRfPyNzWysvJ9xUQFV121BLy+n+XU6cyAxG44BPRgtHwZHpLapPdlQqI4KsCmjU1pZt\nmtramq3w1dUxAidHi+X94kQvGNQI57p1jNjJZI8TTZHMiX1bu9Y8oFC+W1+ftlZie2JfeLlcRERU\ndDkBbWxkBNbovMFB7TzRj5X3S5xzlZ+o2Ke+QnxxDOq0GlnXzrVYzuqqF180lRu8YE7pBHKRH5Hg\ntD7UamrayoMYtT3cRhv3Zqe7Eeuq2FZB2AZa/p3ltOHfNxiSKFXQoo5/6ch77q28KFCVsXKeE9dE\nIS8y3Igg7BnwB3QeddbK8OKkEREnuTknWLlMaXORXPl4XKhP3lpITyz5XBwjpqrKcyLWVZHeNpLe\n9xGkVytFcieqr0bklZM7bm7cTcwPU7U+onLJ3yWJBIv3t1P4LCqY4jyLAbLqSW2+yzdOkEViJ5rY\nHqPsNbLycsJNBVRVt1HZXPC4yW8kEuGEray3SCSiHB98Mlp6WFWf7KhURgRZ9OsUH7JVOUb532vX\nMsJ67Ji+DzU1ajNbMUpuXZ2eqEajzJS2tTVbCe3tJero0FRMkSCrTH35JvtnypuczoVITwo7O7OV\nV25ubEZE+PGuLs08WJw31Xni/HFSLvoc8zplP1EOHi24sdE8RY8dmBEuo+vIzrVYzupquQQh8jLc\nUqGKTXIHDg5Qy84W6t6vNpcV/UGtmrYalVflGsU20LJvLSNsAzV+pzGTxkU+Z/l3lmd8UkXfUTfy\nrdoFb5urvvK4jSILO91PTwRycuvhN5+Hf7cjEhspm1bHLqcK4QSLm6FyoiiSspXEzFtXECNSRulG\nOGmVTWD5HPal6+K5OMV0KzL5FBXP5ZQ9p3GprNgP/pmb5HYZnLeCNBKbIkaIGw3aNIsC3CjUGyON\nbIp9suqPaxd219/O9ZnvtRwnd8bqwxLgk9HSw6r6wsuFQua5Rc3AH65ln0eZkHK/yP5+ov37R7P6\nEInoCZi41dSw/6uq9MS0vV2v/IlqZCiU7avZ08O2tjZmJsvrlTfR95NvXImtqGB9rahg5q983uR2\nVIS2t9eciPC5FP1ju7q0eVOdx8lkdTVTUFtb9fOYq801a0YdJ3b5EC7xmhX9ZXOlsclV1k24odCW\nk5lPqZCPCmUlMI5Mco3WQjzv2+s+TN9pbKT4fw/Sxl0bsoiOGVHTKZ8PZyufqdkUtT7UajjWTO7P\nndFMPcu/vVxZXs41yrfAtoBhH/g5YkoYkdRyEt36cGumjpX/stIVwme0FjIRl8ctHg9sDygJ66JB\nnIry8GvpN6pQ9SoXuRggplhyv8U46ceeKy+lWF40O+0hfboao+i1croR0VSVkzsx92iI9AS2RtHm\ngLR/bfocnr6G90kwHx0NjeoJoeiHKY6rXZiHdspWNLn6K88l7xtvU+UfWkNadNxcyqJKLTWDVZIp\ntmHl2rdzfdooq/tulDBFlA+fjHoCVslAKmUttyiRtQdvle+iikjdccdoJuqumKeTkwwx52g0qvlZ\niuaq3ORUTjUjEtKWFr3/Z0eHNoaBAbWvaUMDMx0WzxMJsWqTTXIDgex0Nl1dxsRJnNuBAT2R7e3V\nRymuqyNd8CdVhGEzJVVe01CIkdGmpuJFcFZBvGZzKZ9GZXmgpmKNwQ2F1iejuZGPCpUrd6aYMzP0\n7RB1f6+b9j+1P2ddsfsDtA2gq76sViSNVFzRh9NM0eve360LTmTUj9pv1VLzjmaK74tnyqtykKZm\nU7TioRVKJdUKSUvNprLSxYjjUBE+K+okL9P+T+20ce9Gav/ndtq4RzM3Nvpe8DXr3N2pnCeVIhzZ\nESkvU1o5cI5R1wt9+LX44J9ZCzfNEOOkJzBy/eLxfsoeu3g8KpUlqbwYXEeeX5GIGKUbUW39RPR5\nxb64oix3j5FVR9ltRr4ONkp5RmU/TB4sqSrdrpHJr6j+quZSBK9fjgAcSM/zMcoGT/3SQMxH1c5L\nirjUNyPwPpv5EhfBbFb3O+WEObGPvAGfjJYXrKqoVh68RaVVVZaTLjm3J/dV5GlMNm7U0oxw30lO\nTEXSyFO3iCaxvLxMNLu69MRNFcBo+fJsH9ZcRNRsq6hgJsKXX55tfizOi6iqim01NmYTLrlffE54\nkKJQiJWXo+aKUK2DGDCp2KROhh2/SrFssaNQ+/6f5QPDwDjb9DkzRZXRiJiJ5/1/7U20DaA1v1+p\nVObEsiIRE81dcyl6qvygcoTbjXs3ZpUX07iI+y976DIdMaveXp0xw7VC0uR56/6eXm0VlVM5nYxI\nZLkfrKqv4hbdGc1WWtMPlqlfT1H/48YvJjKKcLqPkR2RLHNkzyNO2aRGhUIffsV2rPx+2i1vhwzI\nRE+uXyZM8thz5aUUy6dIryJanV/RbLWTtEBBKkIcIT2B5eavoumsqDqGKXuOxPpALMgSiJkDcyIo\n9lEmn9VCeRALutRL2WT8GGVfR3LKFTmSMN/aFPNWiGmu1Rcs3BdVVHzltuIW++FxX08f1oA8yWil\nw8TTh0UMDQH9/cCBA0A4bFyuro79H4sB27ezz8kkkEgAPT3A1JRW1803Z5cFgIkJYHwcSKX0dVP6\nkgmHgZUrgWeeAUZGgOpqYGFBKxcIAC0t7PP0NCsTjwP19WzfmjXARz7CPl+8yP4PhYAVK4A9e4CG\nBq0uXm9Vlbbv/HngySeB999nf1dUsD7xuuyCCDhzBnj7bTYmPu5QiH2emmJ/z81p58zPa583bmTt\n87kXUVnJyp4+DbS3Ay+9xOZmZobNcU2N8XrK6xCLAV1d7HM0yvo8Pg4MD7M1Lhb49XT0KNDcnN1/\n+XoD2DW3ahUQDAI//SnbJ193bsHqd8cOVGP0UTiGbhtC/+p+HLjjAMLBsHJfOBjGjS03AgBi0Ri2\n36q+iMTzfvvp/0R9ezue+NILWfXzsqtCqxCsDGL/8f0YnxzH8IlhPPfucwCANeE16Ah1IFgZxNrd\na3HLvlvQ80QPqquqAQChQAip2RT2HduXOfe+sfswMTWB1Dz7Al+x/Ao0VLMft2gwipMzJ/HQxEN4\nf+H9TD8qUYlT507hsWOP4Z3ZdzL7P5j/AAvEfgxXhlbq+m6EltoWRIPRTNnd3bvRuqwVANDZ1Inm\nYDNmLsxg5OQIvvvz7+r6cYEuZD5Pzk7i8n+5HP/4k3/UlQGA5dXLM+M/PXcawyeGkTws/BhNABgH\ntjZsxeHXD2P1d1dj0/c2YWpO/6UJB8PY1b0Lu7t3o391P974whvoWN6Rc4yegvj73wXA6LctDGBX\n+v9C2omZtFFI+fSaYRhArvvKEIBWk/qHAPQDOAA2Xnns4vHdUllI5cPpNnKNZSuAUwDuAjCVPu+1\ndN2jAH4qtcPnpwbA1enzHkyXeTn9/8F02SS0R+cGAC+m20sA6AFwD4CXhL7UA6hOf74E4DSALdK4\nGqTyC0J5AFgFYG96fqIAZgCMAPi4NE5AW7uR9HiaDOaoMz0W3u8poR/y3MrlVJDX2QjHweaA16Na\nR9X1quqDnevUx6JDRRHaSJNlH/lgaoo9KG/frj14f+hDwOQk+9zbC+zda1wWYA/Yw8PAsmVjmJ1N\nAGCE84YbgKYm9nB/112sTCzGHvI//GFGjurqgFdfBb74RXaco6kJeO899rmjg5HUVIqVn58HLqSf\nffr7WX+uu471ORRiZa67jhEvGZEIcPXVwLPPGs9JTQ07/yc/AX7t14Dnn88m2vX1wNmz7HNVFSO4\nYp927QI2bWLEOhRiZBJg8/aLX7D/p6aAa64BTp1i7b3/PnDVVazffJ7CYW1+xX0q8HKdnUB9/Riu\nvDKB118H/vM/GeH/4ANtDt54wxmilUwyElxXx9ZZVWcikb0WfI7k40b729uBl192jhwWG52dYzh6\nNAFAP0Yf7iN5KIlX33sVr0+/jiO/cQS/+PEvkEgkCq438VgC45PahR0KhDBzgX3R+zr6kJpL6Y4D\nQG9HL55880nMXWJvqqorqjOksbejF/MX5zF8YhihQAg3X3YzvvVr38LH938cZ2bPYPrCdF793NCy\nAU3BJgzdNmRKSsXxrAqtwsrQSvxs+mc4M3sGl+gSCKQjnTIqUAECobOpE2988P58GxwAACAASURB\nVAamF7L7u6xyGeoCdaioqMCZuTO4evJqPPt/Pav1qwfAMJD4fxIYb9Pmrn91P3Z1e/dLkzyUxMTU\nBOoCdTnnOYMpAPeCPSXtgOlDeV71i+0kwR7UTU4bGxtj34tc5ZNgD/Z1YKTiLrAH/Bhykwsb/XEE\nVtpKgJEUAGgBEATQAUa2hhTn8TpPAngmva8fjCya1d0HYI+0LwCAf6WqkSGWYxhDAglGAkelPojX\nDSeajQDeB1uD68FIXF26vpH0/qDU3zCARwCkoLXD50D8mq8FcCjd/3Hh/O1Qz604vlYwYp/vOqd/\nD9AFYCWAnYq6VGss9oGvDa/L6nUqIPPd8FFyVFRUAHlwS18Z9TjCYfZgLD7ki4peRYV5WYCRkJYW\nYHZW27ewwAgfV+FktenHP2YEZu1aRkQffJApnQAjUzfcwD5Ho4wIcjJ47pxG+rhSFg4Dr72mKYin\nTrF9ra1af+rqWP0vvMCILsBImYy6OmDZMqbizc0Bhw4Bly4xgsrVWUBTbSsqmMIq9+naa4GxMbaP\nz0sgwMhVOMzmpK8PWLeOEf4bb2REtLqake9gkBF4UZnmc2eksvFy69axedi/nym3589rRBRg/cqH\n1Kna5WqsmdrKFeDGRv0cycfN9rtBRIupVgaD7P9iqbs+NExMTeCZU89gcnYSW57doiyTPJRE4rEE\nep7oyVLhZFz7b9civCOMH7zzAwBMNezr6MPNK5jpSCgQwszCDKormcTB1c1YNIadiZ2oC2iSWGMN\n+1LUVNRg3/F9GD7B3sjNXJjByFsj2PLsFqwMrcwQ0UBFQNkno/0A8Oy7z2L4xDCu+7frMDU3ZThW\n3q9QIIT35t7D+OQ4Tp47iblLc1igBVMiWh+oB6UloNR8CucunNOOVdVnPs9emsV78+/hzNwZtNe3\n43/f/L/1xCqtmNTdoM1RZ1OnoZrtFUxMTWQUbp3Sa4YwmIK1BzkfjPOqX2zHjrKaq7ysMFlVufLt\nD4dK7cqlwllpi19qIQDvAjgBRtpUCloSjJTNAKgVzksZtC+qdjsU7YlfqZvS/3cC2ACgF3oiysd6\nFxgp2wNNHT4KbQ2OQ1ufemk/wEjvL6ERUQC4ApqazNurBZvXQ9ArwlyBNJpbUfGfhHUVUrWW/No6\nCPZdUa2jqh8qtdTudepjUcFXRssQXNHr6gIOHrRGArgq19DAVMzGRqb0hULMvHf37ux6ROWrpYWR\nvVAI2JH+0b7hBqaeTqdfsAcCjPR1dTHVtL2dtccVOVlBBIB77wWefprVAwC1tYysHT/OSJ+okFZX\nAzfdxAicCq2tmvq6fj1rc3xcIzJtbUB3N6v76afVZsDBIDPzvXhRO97SwsbFCbdoxsxVNFGBnJ7W\n+qhS2WQlkq8JR77KnKxghsPAI4+wfnd2AqOj6muFK+oPPABs2ZKtrIuK+9at2jgffFBd3ikYKbJu\nwMiqwIf7uPyfL8eJcyfQUN2Alz7zktKcU1QFW5e14rXPvWaoPoV3hDPmp8uqluHtu99GOBjG1NwU\nrv7Xq/Hu3LsAmDpaXVWNBzY8gC3PbsH2W7cjHAxj0+ObMPLWCLqau7DnE3vw8f0fxy9mfpHVTlNN\nBNeFr8dP3/8pTs+dRk1lDdaG1+I/3/vPDPEDmLpaW1WL6QvTqKusw7lL57Lq4ujr6MORU0cweZ6Z\nvgQQwIraFbhAFzB/cR4zCzO4IDwhN85X4f0a7YcsUhPBB/Mf6MpUV1SjoaYBZ+bOIBqMYv7ivE7F\nXd+8Hi+nXsb8pXk0Vjfi/YX3EYvGskyfRUzNTeG+sftAIOxM7MwqV5BS6AJ6nujB8InhnOPyav32\nOoO8FaaCkEC22qXaZwRZ0eX95spaCkxFbAAwDfX4xPZWgJHJ90zaV6l2cnsctWDksROMZG6V+isr\nk0ZjvRyMUDeAmQDzn7tboCmjIuRxHk+XfVo4l/f7BgBtMFaNebnrwIionWskAetrmQvFVN99FBW+\nMrqEsHs3ezi3SkQBTZV76SX2/+bNjDzOzDBiK6pmXJF65RX2dygEvPuu5k8aDmt+ppxEVVczNZX3\n64orGCETFTlZQQyHmYnxTTdpbZ8/zwjo5CQjjRyVlcwcV/Q/FdHQABw5wpRa0X8zlvZJ6epi4zl+\nnB1TEdGqKqa2zs9rxwMBNnbRDJgT0ViMkedEAnjoIU2B/NnPtOMqlY0ril1dTHV96SVNJTY6x4pK\nKCuYExNav6+4wvha4Yp6R4daWRcVd1Fp3bJFXd4pGCmybsDIqsCH++Dkc3phOqOMyuqgqFZOzk4i\neThpqCByxbMSldjQsiGzPxwMI9bCfhBi0Rh2JHYgXBPGPaP3YGZ+JtPu+Qvn0Vrbimsar8E9o/dg\nal79hVuYmcEzp57JENHaqlr8+L0f64goAKxrWpchf/M0r6oqgwMnDmSIKABcwAWcPH8Sp2ZPYWph\nKkMym2YDaP4A6HzjInrebUV9oB6BigA+WNAT0VAghAVayCidlRWVWebEPz7zY8xfmkd7fTuOfuao\n0gdXRjgYxp7b92Dv7Xux9dmtWetQkFLoAkR/Y1V/C0ISGPrmEPrf6seBW0pMRAF3FCYrfoYqtcuO\nf6uRzyBX1rjK+BKMxyeqfqfA/CzN2pdVO1FZ/Rb0j9RBMP/IEWjEWeyv1bFyAjkN5m/KwZ9thHga\nqEH2GDsAvAk9EeVjWQm9aqxatzA0n1s714hdX2Uz5Ku++1i08MloGSKfB+ebbwaeeGIMsRhTwN5+\nWzNdjUTUAY94kB6jwEicLEQijICtW6f1ix8TgwaZmRFzMiaaih45wshXXR0zk/3DP2RqnGjey3HL\nLYxM3Xijvq8ycef9WrOG1cMDM0UirA4RFRVa+bVrNTNlTiIPHNDIrRgAKRYzD7AzNATE42M4eJCR\n8Y4OZsZsdo5sbmsUVEisQyRzXM0uFMUkiG4EKjLCGLfZ9lF0iGay22/djrGxsSwyM3TbUCZYDy9n\nRHie//TzqKmswSVcwvjkuO6YHDxJrmNiagLPvPMMJs9PYuStEYxPjmcCFgHIBDu58lQdbmz/GABG\n+OYvzWfU2ErptvrWubcAMJ9NbkobrAxmzcOVoStRVVGVtb9KeDoNBULoubwH150N4cxyYPxa4Ee/\nsoDZC7O4QBcy9cumybFoDC9/5uUsYt1Y3ag73rG8A7u6d2UIldn3InkoiQ/904fwrZ98SxfkCdBM\nis0CUhUTPKCSas0LxgQQHglj15/sQvhL7v1QGa6FTDjceNC3ElxGRYLNiLHc71xkh4+rA8bjkwMw\nHTFpXwVxnFsA/Fp6/xoA/B5fBYzdPpbd31wvAfh4fyqdx9ECFtCIk9L6dJsjYEpmrvcmSWjBljrT\ndeci+HauEQ+b0fr37/KHT0aXCCYnWUCf06cZ6RKJ5Asv6H0duSIaiwH/7b8xtbK1lZl8iqSAk4U3\n3mCESsTQkKZSjoywAEBTU8xXs7KSEb2mJuALX2C+mWvWsP+PHtUISEcH8Cu/wlRX7t+6ZQsjbn19\nrH6A+Ye++CIb18KC3qcT0BNg3ufDhxkhn5jQxsCJIY/0S6Qpv1deyaLGcmK7d6+e8C1nQSjR3MzM\nn3lAJBXCYeBrX9PPJSfqW7eyAFVNTcwcmxNNmdy/+mq2L6hM9q2SuWuvZcdbWvRqtArFJIhLUa10\n2k/Wjq9lqaCKuMvJTDQYxcmzJ3HX9+/Ckd84oitnRHg6lnfgtrbblMdEUpI8lMQPT/0QADNl/eX0\nL/FK6pXMeZ3RTgBAV3MXei7vQUeoA801Tai6WIEPXd+FFaEPAUAmKBLHpUzYTFbPkb4jWBVapVNM\neYAkEb88+0ucv3g+a/9FXER1RXWmrfrqery4Qnv79e78GVwEM+Woq6pDz+U9WNe8Dqk5RqL7Ovpw\nfeR6XLfrOsxf0s4LVgZx6NcPWVJCVZiYmsDk+UnduF448wISjyWwQAvo7eg1rDff6/JQMonHEgk8\n0dODOTtfEIH41MFhoiz6GBr5JrqJYkQhzUUURUVRhBHpSab3F+LbqoKs+onElV8Dl4OZuapUXnmc\ne9N1HQYLZgQAFwH8oaK/ZgRPHO9pAO2KcfLItKn08Y8KxyaRm5BOQPMxTYGtxyvSeFT9SsBc8ebw\n1UwfLsL3GV0iaGlhRJRHx21szPaRU0VH5VFwAUYA9+yx3ib3EeXo7wf+4z+0FC4AM6XlqmIu/0o5\nyuzUFIu8++67+nOiUTZWozrNoIosaxYlV/a3PHnS3F/UCNzn9KWX9CbBvI6pKRZAiY+L+62a+YJa\nRTisrUl7O/Dmm/nX5aMwOO0nK/paej3iqYipuSkkDydx8uxJPPMO+0L1r+5HuCac8UV88NYHdb6e\nqvNVxzjkaLsc7fXtePkzLwOArg6j8ipEaiKoRCW6ol3YvWk3+p7qs3wuj3qrwrqmdRj/1DhaHmrR\nmeNe33g9js0cw9qmtWgKNuHHp3+MU7OnADDSeZEuKgMcideE6OfZUtuC4x8cN/X55H6SHGvCa9AY\nbMysV2ttK177rNq31851Kfbr7m9OY2aE1b+6vx/d/AuSK2x4Ahl/t6kvTCH5O+bXhi1MgaUQ4feh\nQv3p7KIYPqJTMPfxS8CeP6FYPgLgDYN6AWNfUrsQ2+SQo8lOwdjvsgWMLNYBeBXZZrJm/RTbNhqv\nvI6A5tvJoZrbJID96b5dAFNF66H5nwbB1FhVZiWxX8W+bn0sSuTrM2oc4s9H2cHsfvz880w5fPpp\nTcWUH3RFE0xOvsTIveI7BSspQ1pa9KRp+3ZGqDiWLQM2bNBSpchmn8kkI2cAq/+FF/QBdIaG2HnD\nw1pApliMlR0Zya7z2msZsa6uZvPB54GP5fXXsxXN9nY9EVWNmwcwOnXKfs5N3qezZzWzaY6uLq2O\ncJiZIA8P61PRmPmCWkV1Om9aXR27PnyUDk6bQXvNXNIquILZ80QPAK3/IqmL/XsMN7bciN//we9n\nESd+PqCRmR+d/hECCKCmqgbPf/p5nR9qQ6AB0xemEQqEcIku4c7hO9FQ06AjYmL5XJien8ZFXMTI\nyRFEHsoOC14fqMfZC2eV51ahSkc0Rbz63qu46/t3ZZRQjnfn3kVXtCtDBLmSCqhVWICZ6FahConH\nEqgL1GF6fhrPnNLO5yltkoeTSrI4dNsQ7hu7D/OX5lFdWY2diZ246/t3ZY5Pnp80PNfOdcnNagHg\ng4+14rdHgGgshltVviUA+zHOurml/48B4b8PY1fYwafuMKs3QySK/TUbgpooOkXiAE0VM4Jdf0Je\nPgLgBZj3myu//JjdpeP1cZWQp1kBtGiyYaHNNmhETmyvG8C/A7gxXYcMs36ajZdDtY6vQR9sqBaM\nQIprOgE9YW0DdO+y5sBMjlXzVqgfqJPXmI8lDd9MtwxhZMpnlsajowP4p38ayzKnFaEywVy/nv3P\n83aapQyR+3X8uBbsh5Om559nJr+trSxP6N69WpvcRDUYZCrorl2aShiPszHI7fI+i+a93E9UVjMn\nJxlh5abK8rydOKGfT1XKkv37tfbvuy+7Du5nm8uMlfs48D5xIrpmDZu/vr7sAFV8rNyHNxplqrBV\nk06j6+b551mfX30129y63JCvmatXfE6cNoNWmcB6CSpzTXEteP+vj1yPvqf6Mma0oUAIp+dOY/jE\nMJ745ROmPoCczJy7cA7TF6Zxeu40PrbvYxi6bQh9HX3oCHXgush1qEAFZi7M4OS5k3jm1DMYPjGM\nFQ+vwPEPjmf6smLZCkvjkskiR2ttK1aFVunIooi6yjodEZX9Ty/gAoZPDGcpp+/OvovXp18HwEyb\nxTQwRnh/4f2MX+zwieHM+QAyRHTZz5chNZtSmtLyIEaPb34ce2/fi3AwjJbalszYupq7DImmnetS\nJK6P/I8jWN3fjzsOHEBQ/ILkeovjtr+bE/XnMJk0/I0yMp8shvkuh5Xxi+N7MF3+DWQrdvkGBhLb\n+RCAJgCbwFRM0Tz2KPR+pbJv5evSMY63wXw4x4GxjrHsdTLrJ5+fTwG4B+o1Vq2jbHYspoW5AWw+\nX4EeNcj2nTWat0KvW7euMTvmw/DO/dtH/vCV0TKE0UtgK6qKmaLJVT4Ru3drprDc9/O119Rtyf16\nPf2j3tgI/M3fsM8dHcxXUwRvc/9+zSRYDgi0c6d6jFu3MkXyi1/Uj0dl3sgVwKoq5ova08PO4XVW\nVWlRdNva1IRApRSLCq6Z2aw49/ffr+8Tx4c/bGwKzdeHmwaLJsEqMUCG0XXT0bF4THNzCSReh+o7\nWFB9gkLoJJxK3SGqXrKKJrZxcv5kRrVrr2/HteFrMfLWCGLRGMLBcOazSH74+UdOHclq9/yF87jr\n+3ehpbYFU3NTOD6jdpZeoAVc+a9XohKVIBA2tGzImL+aoRKViAQjODN3Rrd/am4Kk5cm9YUJqLwI\nLF/WiEBVAOfmtNQvov+pGbqau3BN4zXYc2wPUnOpDBmOBCOYvTirNNGN1ETQGe3MzN0jmx7BzXtv\nxuT5SXQ2deLNs2/izMUzGDk5Yro24vof/+B4hsiuDK00Tr9j47ocum0IycNJ3PNoLZ79u3sQ4D/Y\nukJD5rmZcil7hcKJ+gtVAGUUqnrZgZXxi+O7EaxfQLa6puq3mYmwqh3+FRuBRspCAK4FUzVfk+oU\n23wETEmU2xN9g6ehETA+bisK9UnoVVdRkTVSFsW55US5ASx1DZ/PSgCXACwH8DfQSGyueRPrNlM5\njY65dY05/V0oInyxOD8sWp9RK2ak5Qo5X6foQ5krV6IqF2WueVL5fm7fnt2W3K8777TnP9nUpPeX\nXLsWWL2akdnjx9W5LcXxtLYyoiyPgV8LAIv6e/nlWv5ScSynTrG6zAglz/EqlhH7YORXy4kR983k\n83H8OHDNNYzkRqPss5ib1QxG14FT5csRS2GMXoBTvqhm+Rnl3KKTs5OZcoDmz8k/11bV6sx1rfhp\nNgebswijiApUoBKVhkqnVxBAAJfS/zgiNRHUVNbgndl3lOV//oWfo7GmUecXK/rarn1kLU6cPYHG\n6kYc/cxRXf5Xo/W3km8z3xcZjyUSmEz/0Or8RRcLnPb9zOXnWWzw8YWgBTrqB0vBIvotbof1fque\n/Hk7ANAFYA8Y+TXz6TWbK95GtdD3EVhfp4Qwvlbo83v2AbZ8NsVcpGJdVQB4TnZej11WJPZT7ovR\nMbeuMae/C0VEAkvbDdfPM/r/s/f20VHcZ77nR+o31HprSS2MZaANmRgbx47A8svYJnQChCDbQYmt\nZEOycXLPuO8NO5k7c+fCnpnZ7OTcvdnZe5yzd7LnnsnCzF7jOFHMi21MYhgGYYRkjOWxxxhf40EJ\n2NhCFiBQSwhJrZZU+0f1r1RdXdVd1V3dakF9z+Gg7q76/Z7fS1fXt57v8zwapJOsznUYSfnMZB8V\nD5WDQdmztmdP5nlSl14RtTVbWuD4cZlwCkmk1i5RE1T0lUk6KSTBHo8s073pJtkjKsqn6NW2VD8k\n7+/Xr5cqxnj0KExPz2S+Fd5VMW9CMpzOs/nee7J91aqYET0PsBY9PTNEVF1KJxSS7V6yRPYGi9qs\nK1emyk21ElSrkk4rx9ud1TWXdq2cox3jXMgmOxdhVyxqOrmmkI1Weao4+MjBpOOEZ21r91ZaDrYw\nMjHCmaEzSXLdM1fl80W5lEp3ZUr/4rMSg99OCck0EXXhosabGh9aCEwymeJFjU5EdYkowJcXfZlQ\nZSgpuzAkZxsOlcvkcyg+pNR/FTBafzPy22xLqwiPaEq86PWCbCST6eSMRvLd2YIYXyLMRPGmab1r\nVuzWk4m2ARuRid6ryDJg4YU18uBtRSbFm0idR9FHOzIhFfVOza6TenzaUjNWPYui7Iu2rVqddqxK\naNPZYvRZvvaY3d+FAqKQgoTrCdetZ9TxkKSio6ODxsZwisQT9OdJ7V1WeyRbWlIzzup5PvXkpEuW\nwOLFcpuilIjwyoo+tcdHo7LHdMWK1DjKaHQm4696DFpPpBobN8pZfMvKkvtPl6QIUjPtijE//HBm\nD7DYj6KUzocfdhAOh5XPtVmDly9PbdPuTKvpkK++rLZr5FE23Z9JD15HR/J6XG+IdHbSE43id7tp\nW7OGgC+1zmX685O9WUDGbLXZ2Rnhza43aVjRwJXYFbovyY/7jdYunff00QOPJkl7X/vqa9z74r1c\niskukoA3wInHT7Clewv/9Mk/KXVCQZbZBucFuTh+UTe7bQklVHuqicaT73qaFzXTfr49qYRKscGF\ni0pvJX63nyUVS5ISNanXOT4dp/1IO00PJXs5b995O2eHzxKX4jx000P89iu/tbQHzHhP9RCLRumK\nRFi1fXtyvKhJ2CUtzxsyeLJ0r1Fh5p4bRutNy8W7ZtaDlqmPm5mR9rYge1PT9GHp9yJd31bHbnS8\n3vvp5kZvr9lpZ4HR0dFB+MfhovguFPlU5R1ONl0NMoWQ3KgQXsDmZvl1Y6OcXOiZZ1LnSR1/JzyS\nMOONVGew1cuEKwhdWZn8XlOTnJxItKkuwSJIh9o2cfyHH8qvFy9OtTEQkKW5Yq1Ftt2TJ2cITHW1\n3M7Fi3Im2mvXZI+rmlSr4wszxeRCcqZb4QFOF6tbXy//+/znk72qAtq6rz/4QWqbag/s00/r92MX\n7M7qmm27Rh5lLYweIMzVbLJ2oyca5WgiIDvS1cWutWtNnxvp7GTXmUUMxX3A/6fED+YjFrUn2sO7\nV97l3d53WTBPlmOkW7uk5Dbr9rClewtlrjJaDrZweui08tmhRw6xtXurEjtZ7anmxOMnZO+gNyB+\nQBVMM82UJHtEBRF14VK8pBKSbjbcC2MXqPJUMRAbyDhW0Z67xM2kNEkJJdw3/z66L3ZnPDcXTDFF\ndCJKdCJK32gfAAueW0BTfROnBk8xODFTn3R1w2r2PrI3ibj1j/YrcaHHLhwzzJirh0hnhOH4MAvK\nFrBn3Z4UApyOJPoCgZykuenik4sC2cTJpXPD5Dt4Ldv2hTfN6LUVGMVpZupTiyuqv7XPkeqBYIb2\n00Gvb7vmLt376eZGb6+lm6Nc1qhQKBKX5FyYqmLEdSvTNSNZvdGgfpIn5IxHjsgxjnrzZEQc9DLY\nGhFZUYpEHKcmbo2N6dvXHi+SGGmhXmvRr4g9DQRkO0+flsnvyIgc8xmJ6I9PnYxITTiFXRs3pma6\nTZGGRuSswLW1coypyCwskkDJWYDDivT0ySdheFiWQr/zjizd1ZPUiky3Q0PywwGBdDLWbOW2dmd1\nzbZdLUlP8tx3dhLet4/m/fs59WFMV25uNmvn9ewVBfC75eeOTcEg21et0j3GSNLcE40yFL8JuIsa\n7w/zSur9bj8skwnkG197I+Paqdc3VBli19pdnLt6jqP9RxmIDeB3+/G5fGw6vCmJaH2x4YtKDGRP\ntIfohDxed4k8T3W+OianZeJ6V81dbAxt5My3zrCgTCbIFe6KFAIL8PbA26aIKMADNz3AxtBG7gnK\n8QkSEgvKFhD0BU2d787iWbKeVBnk8i/HLhxT5qfCXcFIfIS9f7w3Ze7Hp8aVv++sudPSfuiJ9nDs\nwjH6x/oV6W+2sl2rKPoHUxlupnWvUenkjPnKdFqo9s3ALplomepvr+azc8iZeNuBzwLNEG4M59Zf\nprkzIzlVH/OkzvHp5ka910S5mFmWt+aCcDic/4zZDvKK61am6yB3mEmIZAQzSZYgc/tWbdAmW9q4\nUY4FBTlxUW+v7Fl89135f23bahlpeTm43XKM6Nq1chZgMwmx9OS8IyPJ86H2ytbXy0RVHGv08N9o\nTtNJX83IYos52Ve69Q/v26d4+xacX0r/f1rryPINEI3FiHR1sX3VKkOJrnFSmv0c6O2lxjvEO48/\nSajSXHmT7OyMZiX/TZGX9skZYn0un1J7c0HZAvrHZBnv8prlSrKjo58eZXRqNKm9Om8dlyfkxEae\nEg/1ZfUsqVhCmbuMrv4uw9qdVlHhrmBscowppvhc4HN0bezi37/+7/nV735lKlZVXQ/UDGpjbtxV\nNUwjJZHmSnclVyevUu4qZ2J6QmlTTx79wEsPKPLpjaGN7F2/13T/QqIb9AVZVr2MKm+Vsl4V7goe\nmP8Au9fttkVCWyhpuW2wW9+X7yQwdrYfAX6DXBPzHuS4zEIu0TpksrkCOc5U3bcYpzbxUi7ur0Sb\nHzTB/34I/j6gGW6YzJJT9TFBZMJs1jb1Xmsx0ZcDBybhJDBykBFWazEZeZfNeNuELFVPVivaNOO9\n1h6TqW9tsiW1N1XtWbz3Xti0KZXkqL2lLtdMXdIXXzSfEEst521slPsQHsHly2Ui+vrrHYBMiJcv\nn+lTT4YqxhyPy+eKNsQcpJPvZpLFChmyGFtDgxwDa3fiomyRbo+ovX1v/C+rkuZ3rtYZzRcCPh+7\n1q5NGytqnJRmDa1Ll3L2W3+SVyIq2xlgs3uzZbKg9q5VeCoUb2mVR5ZWNAWbeKNlxtMqvKcHeg8Q\nm0omlivqVrAiuEJ5HZfiSu3R9r5224goyF5GQTp7hnvYdHgT//jJP6YlosJ768JFtVdH828A1xRc\n8U1yMXYJCYn5vvmsu2UdG0Mbea/1PYK+INemrilEtCnYxLHOYwSeCVD/bL1Sb7XWV6t8viO8w9J4\nhSfbVeJSarl6XV7qffWMTI4oZWTsgNbjqk3WVHTI4OWzfI3Kt6fIzvZFOZZBZFJYaE+rSEqkJaKg\nm3ip47sd2fUjvJlx6NoIf3gI9gR0hmtGcqo+ptHE8Wqo95peX4VOBpRjf9f77/eNAIeMOgCsyTnN\nZCpWy1IXLEiWrGbbr5m+t26Fz3xG7nPPnmQSIyS/FRWybQcOyMmP1P2qZaTehFzH74c//EP5bzOx\njm1tMiHauHEmM68gVSIzcDzhzBgago8+kiXERqRcjLm9XfbSqrMLRyIzNVmHhuSMxOq5zCSLVcdk\nAoyNzWT0tTsLda4ZetWy3GgsppCkQ488Qmi+L2l+r8cs2vmGkaTZDJGdSCvNSgAAIABJREFUbaiJ\n9DPhZxTSoSfjDfgCScdrycnN/pvZvW63Isk1g3mueay+ebUlmxtrG5VsvgAT0xMc6D3ApfFLac5C\niXudYiqjJNhTMlPIuCSRbLfCVcHl2GUuxi5y9NOj/OzBnxGqDHFv/b2ATMY3hjayvGY5/df6GYoP\nMRAb4Ladt9G8v5mfr/p50j6JdEa4+bmbqd1Ryy2/vIWHX37YMHu1IITqBE/eUi9N9XK605wktJob\nWjOyXCNpeoQIYcI000x0rmoX7ZKwFqJ91QNcVmAt5s8O4iTGsjW5rQgQDkDzLhhSZ9GtyLIfVWZe\nyQtDAQP+aIboq4+xmuE3U1+FlmAXg+TbwazCkek6AOQYx4TiMUnaqod0mYqF5PONN+TamS4XTKke\n8mcjIzXbt7a9JUtkchWLyUmUFi2aqeupltHq1ScVsaMnTsCbb8rJk7KRLGslsJs2yfZXVclxoiJJ\nU7psvNoxizbE66VLk+uzGrWjB9G22w2Tk8lJqeyWu+aaoVcty21dulQ3CY+TRfvGgJEEU1tr1MgL\nppYDD00MseTXS5RERS2hFl5a/xLRWJTvd3yf3577LZNMGtpS5a6ieXEzn1z7hH++9M+mMukuLF/I\nlxq+xEtnX+Lq1FXlfbVs1wpEEiQ1Kj2V3F1zt5JRGOQswFWeqqTMwd5SL2sa1nB66DQfj3xMtbea\ndbes48AnB5KOExDzI6CWd6shJL56CYrWvbKO9vPtrKhbwauPvgrYIKENkyQ3jD6XWfJtJE0PE+Zo\norFWWtnlaBfziyjwPeQ70mewRqjC2Ccz1bQV3mWx6UyJiVTS5qFD8FSgSLOu5kPinW5u8i0pd1Aw\nODJdBzkhplKe6eTlSEI6b5vw4on21ERUSFbVMCMjNVtbU5t8qKFBJtiDg3K5mO5u+XVNzYyUF+T3\n/uAPkj12PT3y8bEY/Of/rC8XNePp03pyhf0nT+onadKbA+2Y6+uTPamiPqu2fqoZiLZ///vMSamM\noPVYGkGsdUWFvCYpXvIM7ZhJwpOvxEvFDrNrkOs5xQIjCaZafnvHzjt0vXORzggtB1s43n+cR//x\nUX7Q9QO+cPMXANkr+Ez4GeWY2FSMap8shS01+Lkcnhxm59mdHLtwjInpCcOapWpcGrvEL373C4WI\nllJKCSWMTI5YIqLeUi8uXFR4KlISGl2NX6W+rD7pPQkpiWCWUKJ4ZD8e+ZhJaZLLscu8+NGLynEl\nlFDpqkxqA2a8iu8Pvq98VumRj6twVzA4Pkg0FtVNULR77W5al7by6qOvJtWNzUlCq5EbmmnTyHvq\nTzTWRBPbb4RqgTbLMi03FwD2IpdUsboF7MyiqmnLctNqD98dpA5e5YWsDqRxLGsnsNCy2XxIvNN5\nP53kQzc8HDJ6AyGdrl4QmhUr5DIv6ZA2ji9x9RblS1askMmaWrKqhhkZqZrMpeu7p2fGQ7h48QzJ\ngxmiFgzKEt3PfU72Bgpcvpws7dSSZD3iaUaurG1H2P/hhx3KODLNgXbM//RPchxrezt873uwe7d8\n/nvvWSdiou1QKPl/K0ROlA050NtLpKvL8Li2ttSMxlbaUctyjSSj2WbRNhNzkqvMOJ8wuwa5nlMI\nmFkLPRIR6Yxw/OJx5Zj+8X5WvrgyRYYpyFHvaC/HLshxizXeGoUcbe3eyq4zuxQCdTl2GW+pl+ZF\nzdT56nTtUdchlZCSCKm31JtynjbudJrplFqmWohYUYEFZQuYVzpPKdUyzXTScUKyrO67wi3rC++u\nvZsGf4NCwpuCTUp8rd/l5w/ny3EJFWcr+PBbH3L/TfcDUOutZWB8gOb9zZy6ckrJWtzgb5BjT594\nLyX+U2+t8hK/mcUNrZE0vY02WmnlEIcIFMndsa1xcVpyY7NM0rbmzJAwO4mMpi2jpg3XQi037id1\n8GalzdoJ/I3q9fczjiJ35EPinY7Z59ifEzM69+GQ0RsU2htrQWjUJUuygbbsy6uvwiuvyLJfM0RL\nCyt1KdXH7tiRHLv5yCOyRzEelyWx7e1yHCvoeya1BFGPeJqxzYynziqB0nqxtYSy0B5BMx5LkO26\nVw5L052zTO3MduyimYcPswWza5DrOcUCPRLRE+1JksjWeGto8DekeOUEOVInN1LHmfZEe1LkqRPT\nExy/cJzLscsZbavz1Slti3MrPBVK/GaVu8ro1LSYkqZwIceYLq9ezgff+ACvSw5s97v8rKiTky5N\nSpMsLF+ozM3bX3+bBn8DzYua+R+t/4MlFUuodFfy+drP82z4WWUe/+Xxf2Fh+UJOfeMUe9fvpXVp\nK7/+0q8JVYbYvU72ZHpcHoXA/27od8r8vd/6PnvX7yVUGUqJ/zQifEbxmlkjixtaI1IcIMAudhUN\nEc0WhlxOS3bs9C7a2ZwZVmsncdK0Zdj0T4GbgVrkTLxictsAobrKJSGQdgLVz660z6wK7TXNFo73\n00EaODGjNyhyjd8rFLSlPdKVIUlbBiScHCfa1CQnONqyRc5Au2VL+nhQbVmYUMh62ZlIZyc90Sh+\nt5u2NWuyJlXr1slkesWK3B8e2AEzZUOUY9PMmZV2ZgOFjke1UnJHPXdbu3+YEqNndM7KF16gwe+n\nyuvNaU9mHItO3KDdEGVDAALeACceP8EPun7Agd4DNAWbFDIk4kWfvv9ptnRvSYkpVLejhiijUuGu\nYGRyRDdO01fqo7GuUSl9ArIEt8JTwXB8GID6efU01jVy6PyhrMcq4jY/8+vPcPbqWWq8NdxddzdH\nPz2qWyJFPf/DE8NKHKle6ZZ0qN1Rq9Qi9ZZ6kSSJB296kL3r97K1eys90R48Lg/l7nJ2hHekr+1r\nEK/pIA0yxSRqEMYg5lEboyfatimAUShLc26uWGMJw8xMLCRPrt7g1cdbLb0SYKb0TCNwhOR5sNq2\nAwd5RLYxow4ZvUExVxO9WCHRt98ux4N6PHDnnfJ5K1bIEt4dO5LHrD72rbdmysAIPPywfpIhK4TB\nTAIeM8il/utsoJjrmFpBoec92wdGVm7y7dqTdtqULaKxKN/r+B4llPBM+Jkk4mklMU40FuW252/j\nUiw1q+3C8oW89tXX2NK9hafvf5rNr23m8PnDSbJbb4mXCSl9EqPWpa0c6z9G32if7ueC+LpL3JRQ\nQlyKU0op00wnEevAM4Gk2E4XLiXZUlJCHtX8L5i3gP7x/hTSauaBgUg8JAi5ejwXRy8qfQR9Qe6t\nvzftg4ekmqOBZVR5qkw9qFDs/Fc/bYfaCHgCpojZdYEwloiHIZczyRYtcl/7YRurtRliYkG/NqnR\n8ZlItdGEp5uHYiXsDm5IOAmMHGSEWlc/VxO9pKupqUV//0yd0N/9bkY2rCcZVh/78MOpbRklGTIr\n3YxE4OTbsixyRU2Q705PZxipcTstLXLc5VxBMctbBczEnGQbj5otrEjUk84zUdJi5tjCSHWt2GQ2\n/kcr8wz4Auxdv5eX1r+kEJpsYhO3dm9VPJ4BT0CJwWysbeS9J95TSsSEKkO8suEV+v/nfhbMm8mI\nNiFN4CtN9jD7XX68pbKkNugL0jfSx7X4tZS+Xbion1evJAKalCaJS3FKKOG3X/mtInfd2r2V8L4w\no5OjyrkSkkJESynl4thFRf6qnv83vvaGbl1PvURD6rWIdEYYi4+xYN4C7gneo7zfWNvI9lXblT7c\nJW4GYgMc6D3A9zuMA9yEfHdZYJki/TVTY1SxM3CAyJLIDVMOoqOjw7L+1VAZaVLaOqsVNyJAC1AM\nv3UaKWzH5g7YiGxfJiIK5iWqRhOebr0c+asTM3odwCGjNyjsuLGOROSSMEY1RK20YzYxjPBYDg3J\n0tp08CRK7Pn98Prr6cerPva111I/NyLvgjAEg3LGXqMx9PTA4NNr4K2lLH7pESpEEVOLmAvETots\nSdWNjmwfGBnF6OkfmzkplB2wYpNZ6JEnu9oVUlRXqUshprdW3qpre8AX4INvfpCUtOgLN3+BCneF\nEuM5OjXKxPQEC8sXsqx6GccuHkuKSy2lFG+JlymmuDR+SZH0CkhItPxTC8cvHOfm527m7//17zna\nf5S4FNcdwzTTHP30KEt/vTSpNujymuU8sPcBZXyCSELmBwY90R6OXTxG/3g/H418RJ2vjvnz5rN3\n/d6kmq4VnpkijOmSMokHBerYXTM1RhU7B5vY/svttsU5zglYJB65hlPaHEpqDcVUe1Jty0rgfwMm\nMF+GRm8h9GI9s5nwbBdZ9L8IeJjijzl1cF3Dkek6yBraOMzW1plkP1YkmVbkiFbkxefOyV7O115L\nld1mOtastFRIN/v6UmW86jbicTnOM1dZ9FyUV881WbGDuQEh81TLVu3Aol8uone0FxcuqrxVDE4M\npu1DyEaPXTimENeNoY109HUkEU7RxqbDm9LGpILsPR2IDdgyHpCTKt1Xf19SvKjAgrIFfKbyM3w4\n8jCXx12UuV00Bd9l97pnk+S77w++z0BsgKZgE75Sn2Hc6brfrqO9r53G2kaOPHYk47pYlVIrx9+9\nncAfB4pPwnkdYVZVssUkP1Xb4gPEVyiXGM0wqZLrQk64un8BJ+bUQY5wYkYdFByCGMFMMp2WFutx\nblYIVqGIjZYgZyLZ6jEsXy6T25MnZ0rNbNwIXm/udjvEzoEDGXokJptESdpzHv3HRzl2YYawLSxf\nyHtPvGfYljoeE1BI2Gef/ywDsQFKKCHoC/LPX/9nfvLOTzg1eIrXL7ye5DWs9lQzzz2PC2MX8JR4\nuLv2bj659gnjU+NJXtJqT7VCcF24eGD+Axy7eIwabw0dj3Vwz4v3pCRVUkPEi+rjPwDLEn+/RdD3\nEvfW38twfFiZD5G19/TQaYWYakm60br85txviE3FuCd4T1JyJQcOdJGOmBU6mFVtyybsIcmzTbZF\n/9XA0Cza4eC6gkNGHWRER0cH4XDYtvaiUbnOZUmJXJs0EMjOc5cPgpVr0hztODKRbPUY1MeC/lzY\nvRYOcoOzHsWDXNYim0RJNz93M/1jMkHbGNrIxNSE4rk08u7dvvN2+kf78ZR6uLPmTo72H6WxtpFb\nK2+lylvFuavnoARe739d8XZqk/zMc83jzsCdvH35bUD2Xs6UjvkOMB9vaQkT039HY+0ybq28lb99\n8G/Z/NpmTlw+wesbX6faW51E/ESCIXeJm3vq7qF7oJtKTyVX41dpCjaxZ90e7nvpPi6OX1TGIhIj\nVXr+iqvxxcCHlLv+gWtTA3AaPLfLHtsVdSvwu/yKR9Tv9rOidgVV3vSJhyKdEXad2ZXkJc41iVUh\nsjPPBtKNy7lGqRBm9jLIRqGjpYPw3nBuxG22kzOJ/p8GtsyiHTbA+W4UD7Ilo+7MhzhwoI9AQE4G\npEZb2wwp27rVHCEU8at2QsRWAtxxB3zwgcW4u7Zkgpwp7lE9BnFsYyPceusMUXeQDLtK3djd1lzE\n9XpzbhVWEiUJxKZmMuGWUELbmja+3/F9JCSlPIl2fvtH+xVy9bvh39G6tFUhhHqZa4U9mw5vAmQZ\n7rLqZZweOq3YG/AFaD/fnrBkPrCMiWnwlf4Re9f/OaFKOX7glZ5r4P88PFoNibhLYZ+n1EOoIsRC\n/0LKPGVsDG3kZw/+LKmEzelvnubbr36bfxn4FyamJrgycQWAVQv+FW/pg0h0cG2ykfbz7cxzzWNc\nGgdgccViJqYmFHt9Lp/iMY10RQzJpbZuqzpONVuIeOFMfZvCbKaM1fRt67iuZ8xmMGsA+DG57xMR\n6zlbUPevZ8esp1J2cCPB8Yw6yBtms5apWkJsR//RKKxcCQ0NcmZdM3GksymlnQvlVOwsK1KoEiXF\nCqdmo4wnjzzJ/o/30xhsZPfazFLQSGeE3Wd3E52Icnft3Rx97KjuOdr5PXL+CAOxAfwuP6e+cUom\nigmoY1n3rNuTRASFhLVvpE/xMAoZMMAdO++gf7yfEv4EiTuBD4Gf4Sudxu/2c89H0zR8NMS5OvDX\nLaDtzz9IIcDqeNNMe8Eo7lbYOTg+SHtfu/I5oHhiRexrpphd0UfAG+DBmx7kV1/6Vc4PS2yNFw4z\ne142Td/N30uM61oTh/YfurHK1ljBbHsVC4XZJIRhnPqlDizDKe3ioOgwm1lU29pgwQL7+g8E5Pqk\nx45lzmRb6BIgetmI50LWXTvLihSqREmx4sywXPOoylPF0/dnqHl0HePc1XMMxAZoP99uukxIdEL+\n0iypXGJIaoTHNegL0netjztr76TB36AQUVFmZtEvF3EldoUF8xawZ90epQyMKMVy15676BvpS/KI\ninjUgC/Alxd9maAvSKVnF/AW8DNcTBCbjjE4MUh7wxAHPgdHl8GBYL8yRrVH2OuSM3Wb2QtGWY5F\nptvd63Ynfa4ulWM2Q7I47sNvfcgrG16xxWtva3Zmu7xsetlRLfatjGv/IQLtgeLIJFuMyDVNsBlk\ns552tzGbGYVnNZWygxsNDhm9gVDoWkyzWcs0EJCluXb2b7aMixnYuRZ6xFPYWlEhJ1F68knz5XMK\nBTvLiuTa1lyvUyY8c8PxYbZ0Z6h5VOTIZS30ZLqRzk7C+/bRvH8/0VgsqT6pp9SjHL8jvMOwXW1N\nzKOfHuWhBQ8p8y7klb2jvXRf6qZ/vJ8t3VuUvvac3SN/fq2XYxePMRAboMHfkEKmBJkejvcDf0+N\ndx4P3zxT+Lix+i4+H5+fNMZIZ4Th+DBlrjJcJS6l9qiZvaAml9q6rQAnjp8wrNNqtoar3nERIoQJ\n00wz0Szu0rOpH2sIu+o09kBkUYTwHWGa/25mDs32HTkZoeVgCyMTI7pEYK5fo2YN2RLCNERQWYtM\nbedKJmfzQckcql/qfDfmPpyYUQd5Qz5iQfPdfzp5q4gjVZdxiURmd4yg74Fua4PbboNLl+SSMsEg\nDCQqRRSDzQABn88WOa06XtTo/es9jtRqrcZigzomc7N7c9bttK1pS8nk2hONKhLuSFcXF0dn4vJa\nQi1J8Z5aW0T8rSA/zfubgdR5FiS4ylPFcHxY+bzlYEtSpl0XLqaYAuC++vvY2r11Jsts/T0KOS53\nlVPuKefNr71Jtbc6KYZVHsfMGEVZGYDuS91KX1obM8UVFzJesYcejiY0gBEi7CqEBtBI8mhX7J4f\neub3cHSZhTlU9Z00//82wi7frjkvQy2KsENBCIVBZtfaDBFM13YEOJn4uzFNG+nQhj1y5GzmwMz3\noigW2MH1AMczegPByTaWGenkrYLcVsn3/TnJf+1cCz0PdCAg2wfy/42NM3/nIlnWkwTPNgTZONDb\nS6SrK+P7epjr3w3huVte83/ScrBT8QLOFYgb8QO9B/hF6S+ybkfPY6aVcKu9p8+En0k5Xm2LVupr\nJA8V75984iRLKpfgc/nYdHiTQi5X1K1gY2ij4uVcUbeCZ8LP0BPtoX+sX5bgnm+nwlNBva+ea1PX\nuDh+kS3dWwj4Ary0/iX2rt+bIpWVxzdDhEFOENQSakmxMd241O2oSWy+vhf+xJ1+E01st1EDqOfd\nVZBvyWMb+OusJ9ASSJr/NdtTZKhz8Ro1mypTBdl6F9N4BpW1SNd2D5Ao7catqW0YIcmJaZccOV+S\n26JY4Ln53XCQDIeMOnCggpk419mUH+vBKEZVbefu3fbYnG0saj5JrFG86I0URyoIyrmrY6YJeDEh\nmyy4WhgREbWEe2t3N8PxP2JB2U/Ys04/flFri7pdIIkIis82Hd7E9lXbCVWGWFy+mGMXjnGg9wAV\nngpal7by6qOvsnf9XvZ+ea/yemv3Vk5eOan0KwhqU32T4VzojVEQ4eZFzfhKfXx49UNG4iNpx1Xm\nKjNsx5Y4zAxoo41WWjnEIQI2ulPSEu7EtT3ypxHC/5MBYc0FAWj78yzmMME+2ra10bqwMPNfKBQk\n7DCTBDVbuakZIpiubfXgnzHfbV74Xb4kt05cqYM5BMlBceDIkSOzbULRY3BQklpb5f/zibm6Fhs2\nSBJIUlOTtTlavVo+D+T51eKpp+RjNmywPveD4+NS66FD0uD4uKn39TBX10OLDa+8IrFtm9T0wgum\nxm0WTx19Slr98mppwysbpMFx+78cg+ODUuuhVmlwfDCrtXjq6FGp+r//J4ltfyyxrUxqPaSzySRJ\nWv3yyxLbtkls2ya1HjqU0Rb5nNUS25DYRkq76s8W/GKBNDg+KG14ZYPENqSmF5qS5krM4cJfLpQe\neukhqeaZGuVcz3aPtPY3a6XB8cGU/o36S2cL25CCO4JJ66VuN107aphdi3zvD7MwmntJkiRpUJKk\nVkla/YK5sRcMqyVJIvEvjTlz8RqVmHIprztitWRq/uyEqbXIcvAbJHkoQUmSHkq8nr1vVAYUZIEz\nYy5+N65XAFmVT3E8ow4cqKD1MmqTnxjB7HFzAem8mNl6hTN5nHPJ/itiT7UxoQGfj4DXS8vBg7O6\nLrkma7ECO5NCqZFJ4pkOZsafa0KanmiUofhNwF3UeH9o6F0V3vKg7xp9I/9F1ztmJIPV81SKzwD6\nx+TstkYeRiXJUSKJ0eCE0PB9h7j0Q9r7bufzL/zhTBIbXfv1bYl0dnLyypeBPwbKKHeVMxAbSFov\n9bjs8ESrkWl/pJXP2oi03t2Ep8s/z96x54zr2LtUiKS3RTt/WQ5eODGXAceYdQVsehRkgR3cCHDq\njDpwkAZm61darXNZzHVA81EfNlPtVVEXtqnJXvlzMdQfDRNWkrW00lqYZC02I5e6joUYf/P+/Rzo\n7aXGO8Q7jz9JqHK+7nHRWIxIVxd9I/+FYxfbZZsy1OKMxqKsfHElDf4GqjxVScl/orEod+y6g/6x\nftP1Nqs91QzFh2isbeSTa59wOfYk8q0n1Pk+4nLsbwztEvU/1QmXIHmfN/j7WF5zjPbz7YY2GbWj\nRaakR9qxGfU3m3VwtWOA5ARQZseYN0S5MWpm5gvX6fw1IxPRJuZEQlsHDgCnzqgDB3mB2bhDq/GJ\nxVwHNB/1YTPVXs1XHK6VdTHybufq9c5XspZCIpd4wkKMX3iEz37rTwyJKMx40au8MyVdtHGhep5S\ndRyo2vMX8AX44BsfWKq3+e4T79K6tJUjjx3hvvr7gAkAyl2XkKTnkuxKtV/fg6ze5++3/gW71ybX\nB9WOL5MnWniz90T3KB7PO3bdYejVzLQ/7PbEWoHWa5tczqaTXWcWcbT/Lg70dlj2+tuCOeRdKqTK\nwzTm0PxZwRyqrOLAQc5wyOgNBKcWk3UYyR61Ular8sixsQ7AXsJnF2YjQVMmspotzK5LR0eHLVl5\ndW3IU7KWdLBbFrm1+yQXR7/DpsOvWybkVsefzXXKSKptaJOGPGWTaXamb2v1NkOVIeX4+rI/w1sS\nAIa5NvV/c2Wil4XlCy2TfrHPl9ccouXgeiWh0kx5m9TxpSMWovTK4KlB5T0hQ9bD1u6tXBy9yKbD\nm3T3WyGTI2mRbu3MyruLAcXw+y32xQEOECle8WjeUYi1uE45dl5QDN8NB7nBDjL6FeBfgd8B/6sN\n7TlwUBCY8XgZ3eRqPZtWb4Z/9CN7CF8+stRmGzdbjLCyLvnKyhsgwC52FYyIQm4xnvrtZSbkRgR4\nNsafyTYrcaGQPzJ17uoYE9IioAp4HK/XS+iJEJt8myx5nsQ+P3f1A9111xtfOmJxpvMM7IMyqYz6\nefUp52qRab/lGhOcDnrXJ/V6/3zVzw3XTny3ZXn3n183WWwtIVM2WhWuB5WHAwcOig+5xoy6gNPA\nWuA88M/At4APVMc4MaMOihK5xBPaFeOYa+yoEt/5nU58oSh+l5t7Tqxh97O+6yruMtLZSU80it/t\npm3NGluT8wiIeMLtq1YltW/0vlUUYgwCucR46rcnx2Q2BYOGXmYzcYGzFZ+nZ1uECD304MfPz2M/\nZ0vXlowxlEbIdlxiXuFDqj3PcvsTi+mu7JbtzCK+Vl73Oircn+GBm1aye+16Aj5fUozo1u6t9ER7\neN/9PgNrBmjyNaV4rR/e9zDH+o8BsDG0Ea/LmzbGMt1+iyCXq/AjSw/tXnG965PZGFW7vttzGmFI\nhHTLutA0Wy5KlAgRtrN9Vh8u2YJ8b0wHDm5AzFbM6H3A74GPgDjwPLAxxzYdOCgIsvF4CU9kPA4t\nLbNXt1NAxHdWfCZKbHE/g7f00n5Tl61xqHbEXeYKtWfutuefz4uXNl1WXitebyPkKve1Ars9eWbk\nzmbiAtN50PKZcTWTZ3CLb0uO2Xyz80S3rVlDS2ghG0Pn+GjTKWora2U7s/Q8ta1pI+j7HCOTDbSf\n71f2mdozKWwd6B1gYddCXfl0lbtKtiPYxI7wjqS50Rtruv2Wl7qJKuhdn8zGqNr13Z7TsJCNdrZV\nDhacuJmPz/fGdODAgWnkSkZvAT5Rve5NvOegCOHo6pORTRkMQR7b28HjyZ6IirXINVmQiO98YKV8\nQ8aHQRrfX6XbVrZE0TBuVqe9fBEuccNZClyKxWxvvxDfjVzlvlZgtyzSzE27GQKcPn4vQXIO209U\n9WyzU3J4ZvhB4D9Q5fkrnr7/v5k+L+Dz8dL6Zvaulz2M2cYXi/nZdHgTjcE7AON9pl6D91a9l9SP\n+E7HpX/HxtC3+OuKv072cnZGOHnlJAAr6lZQ5nqK8L59bDr8OttXPaefvCjxf7aVNzKtvd71aTZj\nVLOBmcRAuV6jDOdxDmXKEfyxJQLSzUAtsA5DZpqWb+awMefqvZRVMj9XMFfXw8EMciWjjv7WgW3I\nR/xjOmTzVNyOTLORCPzpn8rj/PnPc4sdFfGdu9evoWXhUjaee4Qjr+hLdLMlioZxszrt2UW4tES3\nbc0a6n0+phOf13i9thO6fMfG5qsGaLHADAFORxIESbqt+jZjopplDKyebXYmlgpVNgHLGI4vZkv3\nyazbydbzpJ6fcvfzafdZWi9m4jvdfr4fr+vfUOGtSCIxp66cUuqjLq5YzLmrYxmvKblynczxqKnX\np3zGqOYDdiUG0pJaNfk4ZTSPhcqUYwMTEvzxnh6o6QcGgXYMPZusTotNAAAgAElEQVRp+eYcIuF2\nwXEGOyhWuHM8/zywSPV6EbJ3NAnf+973uPXWWwEIBAI0NjYSDoeBmScazuv8vw6Hw0Vlj/a17HWU\nX0ciYXbt0j/+pz+FkZEwfj9s3txBRUXh7N28uYPRUdi7N0wgkF17b74J774b5t134cknO/jxj2Hr\nVnn8Y2Md/OhH8Oij1u17qXktHf4OTpzQ/9zvdsPp09xWXc32J5/MeT702tvsdjN69Sp7n3ySgM+X\ndfs9w8NyHNjp07R88AEdf/7nNNXXc+DwYSo8Ht75q79S2tfbD22lpfREo4ydOsWPVq7k0S9/OWP/\nP963j6OJAqsRr5dda9faun8CPh+b3W5OHD9eFN+32Xh94vgJNrs3KyRB/XnbmjZa/lsL//GB/6jE\nNY6dGuNHK380Q1T7b+O7t34XgZzWgwCbOzZzghPJ15d3f8rIkhH8bj+b3Zup8FZkbK/KMw+Aeb//\nZ85cfpboqiYCvkDB5lc9P3906yYeTcR1Gx0vYii1n4+dOgWXLtH00ENsX7WKE8eP82bXm7xb/y4A\nNR/WwAQ0PSTLd9f/1/836XjD/nIY39ipMaiVPbnfnf4uHR0dRbOfbVu/cGL9Om7ju3xXJm2a49tK\nS/nTn/wEn8vFwT/7M93r65sdb/Iu70JYJqY9HZt5Vz6ABW4/nE487Hlye+HH2wMdid/3cCQMBr/v\n6V5v7uhgFLjbn/icDvgMhLenP35vOExA+3kAOjZ3gMHv5fX4eizxuikcZnsR2OO8nvuvT5w4QTTh\nPfroo4+YLbiBM8CtgBc4AdyhOUZy4MAMNmyQJJCkpiZJGhw0Pm71avk4kKTW1oKZZxv0xlmIMQ2O\nj0uthw5Jg+PjkiRJ0lNPyf1u2JB+vtVQn/PRheT2ssVTR49Kq19+WdrwyitKWxteeUVi2zap6YUX\nlPcGx8elJW1t0kN79yYdq8zdd45Kwf9Lbuehl16S2LZNYts2qfXQIVN26PU528hmjSRJf07nEla/\nvFpiGxLbkFoPtUqD44PK/4Xu2wwGx8el+h1/KbGtzNJ5dsGu+dFeIyRJkja8skFiG1LTC03SR8Mf\nJfWjd7zdsDK2p44+Ja1+ebW04ZUNGY+3cqxZPCVJ0mpJkjZIkmSlxUFpUGqVWqXBNGetfvnljNe0\nDdIGCQmpSWqSBqVBaf9TknRktSS9tkGSPrpQuO+QgXGShCRJTZK1ydHDoCRJLZIkbcy9rWzXbC5i\nUJKkVun6H6eD2QOzqJjdgJxR9/fAX+h8Pttz4yCBI0eOzLYJaTE4KBOxTDfdZklrIWHl5n9wUJJW\nrz6SZPtsjCkbApwtaU43P3o3WUY3uXrHirmr+OuZzxY8+6wlYnnkyJGMN9azQfCyne9MN67FTFaP\nHDmSRIDM3jzbRS6s9v2U9JS0WlotBV8JWra52CF/L2aZxFiAlQcJ2Tx0yNi/JPMtJPmm307c9zd/\nk/GapiW18XwaZBVFyoRWSzNTtEQyR0yL/V7qRoOzHsUDsiSjucp0QZafH7ChHQc3OET8Yya0tclx\nl9u355bJ1k6IWCuASFeXbvkTdWmP//iX7iTb8zUmbTmRrd3dymtP9RrAZyn+NduYWfX83LFzJx98\n85tKjJdenKmIA0vpX+dYMXeDK92098uf7Vm3ji3d3ZZKNhj1qTcGozW2G34/8J1OKj4TZXClm2jM\nXEmYTLG7hRpLtiVP2ta0KaVIzJ4jYgsBIl0Rw3Iedvct4v1Yg5yddtXcSJpjFiL+ci7AbBZdq8ea\n7j/xf7bJmtLhRytX8ovS0rTXNBFzLODOp0FWIWJTZxERUqu5qKfIx0yVmwizbq4DBzcMcq0zagYJ\nsuzAwfULc3UYC1+vU9vnxdFR5XXLwqV4dqy1RICj0exI80w9RRRbxPijsRgrX3iBBr+fKq83pQan\nmlD/fNUqQ5JptmZgtvU+zaxxLn3onRONwme37WOgxtq+yTQXVsaSC8zWe8wFYt7eH3yXgdjf0BS8\nM29ZVHVrbNLMAQ7QRGq9zmLBbNV3LbQ9Sj3VF7YT+CCQtoakuvaqbf0jk5jt+l0WHkVn0OwiTGpJ\nVfUUbUL2rDRxQ+U1cuDANmRbZ9Qhow4ckD1BETBDhApFANL1uenw4YLbAPL83LFzJ/3j47p9pyPq\nVkl8prXM9qGAdo3T9ZNNH0bn5GPfmCXuuaJ5fzMHeg/QFGyyjSBqiUzLwU5l3haWX+C9J36YN7Kl\nR67lrKURtrPdNBFVj6G+rJ5zV8/llSgW4qFAUdkTJpV1FBDqa0N92W84d/UDy+ub629SwaHndiwy\nNJOebDrcPRlzYEkdFBmyJaOl9pvioFghMmE5SEWu9THN1WGcKe1x4vhxoPDlROwqL2LV7oDPxwff\n/KZh3+lkpVbLxWRaS732zHw3tGucrp9sStwYnWPHmmlrDGZT1igbZFPvMdNaaEt9qOftvSe25tXr\npyftzKYci3oM+z/en1PZmmztBjKW28jXb0Y+JLLJHST+TyNPzee1V31tOPBxbVbrq72+FP3vd4Hq\nhmTYsmnRRvpqLmar3Dza0ZG1DbMFo3lLN59GS5rLGuQDRf/dcJARdsSMOnAw52FXfcx00ItJzHfs\nnrbPdHGRVp7EZ2N3ur7b1qwx9NSl+0wPmdYyU3tm5yFdP6KPMpeLloMHTc2pkV2ZYlnN2G1XPKVV\n5CPeMJXIlOXk5bUiGa0v+zOCvtUEfHcAZVmOIHkMAV+A9vPt+SNmpImDFXebkPcgudt3/oT+0Wk8\npXD40f+HCu9fWpbIml6rNjK6uPJ57VVfGwLeV2jvs068tdcX8QCzaFGg+NRctmymsFWznsBekEvm\nZGGDGeTikTQ612je0s2n0ZIW8LLh4AaBI9N14IDCyRa1mA3pLugTFyvSUqt2F1Jylutamp0HM/3k\nM05YO6ctBw8a9pUPuexswe5YPyuSUbvWUz0GwPbYRdPIpFtMA6txn4Fn/g+G4jcBspz6k2//yLK5\nucp71TbHpX9H+/l+e669Ggbw5DtHOPDxx3y+ro5/WH0fW7r/2PL6ztZvUtYokMY1hy2bEWHMqbvz\nZkNiH73hh6+0wVDAuso8jP4YjGxONxajJc3nGjiY28hWput4Rh04wLz3yS4IIuFxudgYCrEjHLbt\nhsMM8dPzCljxDut58dJ75rL3Qoh2zwwPE6qspMrjSUtoc11Ls/Ngpp98ety1c5reUzvjGdvafXJu\nxaJpYLe31VoGVnvWUzuGgDdAy8GWwicYMuFBNIJVb7snERTkd13kta/+W8umQu7yXrXNG0MBWpf+\nG3vInsZVdO47V7kUi9He18eW7pNZ7ddC/ybljAJly81hy2aEWedu3mxI7KMHgG0R+Oku605mozGo\nbd7KzLOTnwNb0B+L0ZLmcw0c3JhwYkZvIBS7rj4SgXAYmpvlrK3XM97s6uJofz/t58/jdblSssfm\nEstkJv5Vt0SKhdjErd3ddH36KUt//WvW/fa3RGMxUzGUQZ+PvpERS2MT7faOjnLswoWs43rTQf3d\nEPOwvKaGloMHc4opsytGVw/aNUzXlyA/AV8g5/jofKPQ1ykzca3iOxmXJDaGQraspzqO99SVUznF\njUaIECZMM81ErURxZQiSS7cWZomhGOedtcdp8Pdx6hsRQpXzzduoQjYxyEY27wj/nX1x0xoGkI+H\nUMX++20r0gQlmo3rzNSOHjLFlAqc6Ogwb4MVJPbRZBMc2J6d19FoDOp5U8eCbsHCfOq0VQy4ob4b\n1ykcz6iDokFPDxxNPF2ORMzVHJ2r8LlcgP7NipEXMdtYRr3z9DybVuJJe6JR+sfGAGjv60vyzAV9\nPo729VG7Ywf3BIPsXrdO6a9vZIRjFy+mjC0dRLtVHg/D8Xhe43phZh7UksxsY8ry6d3QW0Mr85nv\necw37CoPYsbTqv5Oti5daguBUXvpFsxbAOTg8RO1TpGJ6S6zLqocgtPM1GONdEbYdWYXQ/EhAFqX\nVmRNRCF3r3ham3MJ1NO4iqzGuWeLCBF66MGPnzbairKsUFZIE5SYaZnUn+/rAbeF4EatJ9DsltA7\nLqvtlNhH7u2wQ3WCmbas9Kd+dlKGzNfPACGgyoq9DhzYBCdm1EHRoLkZDhyApiY4dMhaHcu5hnTx\nQEbxmNnGMtoR56ZtY2RiQqkbWunx8N4TT1Dt9cqE89o1jl24oJzbEgrx0vr1acemhpb4gkwGn77/\nfsMao7lCj7ALW4PXgizb/whVHh9tbcW5L63E5M65WDQDFLJcST5iu9VxvHvW7WFL95asE/q8736f\ngTUDNPks1joNk9cSKOo1qvHWcPZbZ2c1XjntA4wws1oOJhuECSsPIVppNf8QotiRJigxTPplUn/+\ndjOszCG4MVNf6Y4ze65ddljpTx0L2qI6T8CMvU7ZFwd6cEq7OJjzaGuD1tbrn4iCLHO9ODrKpsOH\nUySgRnJLq7GM4tx055mVBOtJQusS7V+Nx9nS3a30e25kJOncielppa/heJwFZWXsWbfO8IZeLSO9\nY+dOQPb4/eSddwznTItIZyc3P/cctTt2sO6VVzIeryddFeuwbP8jHGv3ceCA7LG3ikxzbEeJCSvS\n20KVdck38l4eRIV8yK3VktNQZUiRUVuB8K4O9A6wsGuhNSIKec+AKtaoxlvDO4+/M+uJs7RlgZKQ\n57nIRzkMf8LoJprYns8UtoVGGr1sumWKACcTf68APmNWd6tpI4y8Th6dvvTWUc+mXLdTJju0sNKf\nWmYrzquycD4UrJKPgxsEDhm9gVDsuvpAQJbmXu9EFGZiRvXIQ8DnI+D1psQram+IzZKYdDfSeiRG\nr11tGwGfj/vq64FUkhsqL0/qw1NaqvR17MIF+sfG2NLdrXyu7U8QX4D+8XHFLiuES8iIBycmaD9/\nns8+/3xaMvj2668DsKKuThmLIG1VHnnOmppgexZ3FWq7V77wQsrc2hHDeb1Ib8H8dSqb+EFtvVWz\nyAeBV8fxZgs1IX9v1XvWZZoZbtZz/c0Qa3T2W2cJVYZyassOpH2AkQVxsYJcb9711qKNNlpptf4Q\notiRJigx3TL1AIOJvxcD1VkEN6rXqUKnrx7gaEdH0joKm5YjexqbkRMDmdlORg8pMtmhRbbbV5x3\n0uL5BarkYwrFfm/rIDMcMurAwSxAxIwaJfRRE5T6X/yC+mefZWhigl1r17K1u5vwvn3sOXvWFIlJ\ndyOtJjFlLpdhu3ptGJHcKq9X+ftzgQA7wuGUvtSESUvG2tasYcG8eSnHWiFcakJb7nYzEIsZzlNP\nNMpIPA7A4oqK1DqnOh57K8m21HY3+P0pc2sHkcxnoiQrsMPLaxbZkLkZz1gdn31+W0HszCdyTeiT\n70wkuRLurBMzGaBtTRtLli7B94iPTb5NyW3meS7ycfMeIMAudl1fRDQD0i2Teo53ZNm+uo1ndPrS\nW0dh0zmsJwZSk87PMkNK1f1UAReBTRh71bPdvuK8kMXz8/zsJi3yoTJwMLtwYkYdzAoKWXeyGCHi\n9tQJfdTxnCJGrRSYTpyzsLycT7797aT4TSCnODZhR5nLxcsffcRQgpSBTJSXVVdT5fVaWqNoLMb3\nOzp45/JlxiYniU9Pc08wyD+sXs0Xf/MbGvz+pDb14vH04hqtxDpGYzG+19FBCTAyOUn7+fOG85RN\nPGA4PJNsq7UVAn88s5/ry8o4d/VqSszr9lWr2HT4sKmxzlXks66qFnrxf5mSGok4zQr3XzMy2VAQ\nOx2kwmzyqXzERM5WnGWBynDe0LBjjjO1ke7zbOpvinMqABHg0ppoXy+uc46EM+cVYZz5KFZkGzPq\nkFEHs4JC3rQWM4yIkCAoh3t7uTIxgd/l4tQ3vkGoslI5Z0VdHYsrKkzVKM1E/tXrUe3x8MWGBi6N\njekSZbPQkuZarxcJGJyYSGpTS8bselCh1HItLaXC4+EZg3nKhgxqk221dM6Mtd7n41LC26adt+uJ\neOohH4l+jKCXwChTUqNoLEqkK8Jg7Du0n+8viJ3FgGJ7+Gc2+VQzzRzgAE1YTMyUBvlos1C4UZPG\nzJVxq4mqupZnOpvFOYNAO/pENhuSm2/YvSZW2ivG+XAgw0lg5CAjiklXfz3FuGUDsRZG8kohi/2X\nxx9nYXm5QkTV57z66KPsXb9eOc9IIhnp7GTH6dOKPHTZzp0p0kSxHjVeL+8+8QQvrV+vyG21a2Q1\n6ZHAlYkJhYiq29Qmc0qKsfyvXablsMKuRb/8JQ+//LIiN27v6+O1/n7DxEcBn4/NbnfaOFzt+1rp\nrno/f76uTnfeRF/XQ/IgI9ghFzZ7ndKL/8uU1EjIRnevXV8UsuZskE3ca7ZxyUZrkat81mzyqXzE\nRM7VOMuOjo4bNmlMtuPOl5zT6HthVMsznc3inN2kyl6F/XFgI8VFvOzei1ba00qEi+ne1kF2cMio\ng1lBrjethYxNyycykZNQZSWffPvbChFNd47RDWdPNEpcpU64qEoKJNC2Zg1LKitZXlPDD7q6ePLI\nEYYnJlgwb56S+VbMudlY1bY1a5ifiP2s9Mj5AMtdLuaVluIqKdEnny++yMkrVwA5mVDD4VUcPUrG\nTLaRzk52nTnD0f5+ekdHOXbhgkJ83SUlSszo99L8aKnb0JtDJcPvrl1QFktKtqXez7vXrZuzRCcd\nzJCgQpJtvXhJszGUxfxQINO1LW1GWAPY/fBP1DU9wAEiWdyKml6nPMREzuU4y2JKGlNIZDtuq4TJ\nTvJq1Wa9mE9hfzvgZaZ+qdZGM3bfnji/Hjm2NVfYvRezzQbs4PqAI9N1MCdxo8h8b9+5k/7RUTyl\npbz19a8nkVItjCSS4n2BcrebP7zpJnYnkiEJ+d5wPK7UB/WVlhJLlGTZGAqxd/36rGJVhSz16fvv\n594XX1TkqwLqmqVNwSA+l0uxYWMoxMTP1hvWnlVLD98eGODi+DggX9Qk5CdttT4f8elpJRZWXfNU\n287JK1cUAlvj9XL2W99Sxrbol7+kd3Q0yW7tnis2KaTdKGRdTy0KObf56MtsjCRkvrap65OaTVxk\ntzzcrNQ1QoQeevDjp422WSeAc0XuaYQbNe7UaNyZ1tOsnFO0c5KZbLyZYhGN+hbve4By5ERK2a6V\n1v6tCZuGNDaGSY6hFJ5ZtW0B1XkLgU+ytEnA7r14o+7t6w2OTNfBDYVilPmm82hk68ntHx1lKB5n\nIBbj4X370h5r5G1uW7OGjaEQzYsWUefzcS2R0CfS1ZXk8TszPAzIczqteoDU+emnNO/fjyeRAXhF\nXR0bQ6GMRDTS2UnLwYOMTExQ7fXSlCgFU5Xwkqprli6prMTncnE6ocVtCgbZEQ7T1gZLftSJ7y/2\nsel144zDg6r3RfvTwEAsxnCCiJYCg7FYyvyLdtSe1I7HHksam/ohQI3Xq1uv1cirer2gkHU9tbCj\n/M1s9mXFm5np2pZNBl27PcFmpa65elDthh3SwtnM5HmjeoSMxp1pPc1mfBXtCCJqxjtn1LeeN1MN\nK/tHa38PM4SyhpkkR8cT71UCTxvYJmqV+oHXsrRHDbv34o26tx3IcGc+xMH1go6ODsKJMhvFjEgE\nenrA75fLaujVHW1bs6bgiWAyeUzETSzI2VMDXq+u1zHS1cVmt9vUWoganX6Xi9e++tW0x4obTr33\n9ya8gWrvqcjuCvKN755169jS3c32VatY8utfE02Qs8GE53JxeTn1Ph91Pp9h0iT1HA1PTCgJkO7Y\nuZM3vvY1tnR38/T99yv9iDYWl5crc7ewvHyG6PpgcdPMvDb88pdUeb0Mjo8rWYbLXS48paVE43Ea\na2sJzptHe1+fYpOg1dPA0f5+Fv7qV7hLSvCUlrL2lltkWfDp07BsGQCTksRfv/VWkgdVEFyAq9em\nufVvDnHvO+vY/ayPQEBee+F9rfF6lTI515OXtG1NG5GuCNtXbc+pNmYm6F2nCvXwKdIZ4eSVm4Fb\nWFFXo/vQIRuvqRUin+naJuJe7YTwYJ7hDCFCVFFFG22c6Dihe50SUtdM8CfEd000sb0IhKV2SAvF\njT7IN/KF0gfMxu93sXuSM62nIDhm22kEbkUu6ZJurB0dHfgTaxEE+oBFyOVRTmts0s6hlf2jtV/Y\nWQOsR86yexKYSLx/FbmkjN68vAU8jExE1RV/rdiTzX7I5EG2Y2/NlXtbB8ZwyKiDokNPz0zZjEgE\ndulcHY2IV17t0pBNbf/aG+aWgweV4xeUlSV9duL4cczgra9/nYf37eO1r341yTtndFOc7mY50tnJ\ncDzOgrIyJQ5Ue+MrxtRUX0/7+fNKaZkVdXX4XS6OXbxIe18fka4uTl6+nCIhVs+RqBUK0D8+zpbu\nbqV9o7nzlZYyHIux4LnnqPJ6mZyeprRkRvExNjXF2NhY0rnXpqZgaoqF5eUceewxAOqefVYhq1rE\npqa4lvD8vvjRR0xMpx6pfk/Mm6ekhLgkMemZZKihj3b/Tr63+ZvsbfMlJYB65/HHefLIkbR7JVfM\nhiTYLhKUje2FevjUE+1hcOKXwHdYXDFCwNeq+Tz9NcAIVoj8rFzbEh5MgF5kSX+ECJvZnFO7bbQR\nIcJ2tudVomtWDtxG7lLAGyluU0tU9OSfswk71jPbduoT/+LAscR7IhhmIcneTPUc5rJ/1Haqy70I\nrFC1qR1PCH1prhV71GO5A/iAzPNlRHZn66GOg+KEQ0ZvIMyVJ0f+xNWxqQm2F9GvfWb5XPINs/p4\ntdcx4POZXguRwEh7A290U2z0vpCRCu+dIIZGN767167ltuefV2I8379yRSF3jbW1bF+1ilvb2pT2\nlu3cyaKKCj66ehWAu2trWR4I8MKHHxKXJOUcYUtPNMqZ4WEmpqeJT0/jc7nwJuJURazqpUQMqBm4\nS0q4vboakG/mK9xuhicnAZjnclHp8XBpfJwVdXV8PDLC5ViMEuT42YmJCao+9zlFzqtFTzSqeLWT\nEBin5DtdwNq0a68dt1kSlk4hkC0pKgZksl3XE1cggiZ7MMdoCr7DjvAhnc+z89Dmw5tpJ4QHs5pq\nhhhSPJmBcG50I6MH1Sb3iJpMR4gY9mnWUyba0SO4dhEgq5iN328tUVEToHx40axCxEK25NiPlX0B\n8lr8GLikeq8aWUKrjU/VI3tG+yfTnKntVHtzG5Alwe8je3Y9yJ5QM3NhZT/7VX/3Y45EGpFdOx/q\nzJV7WwfGcBIYOSg6RKPyjfj27foS3dmC1SQgdiYN0SY1USf9OfTII2z9oY+eHnh/zX4GbkpNYqQ+\nX5ucR02S6svKOHf1Kn63m/j0NO19fVS43YwkiB1A/bx5bFi0iLbf/55J1XfbXVKivG7w+/lsVZXS\n5/x58/jKokWcu3o1KVFQLvCWlDAhSbiAqcR7vtJS/G43I/E4cUlS6rNWe73KWgxNTHDbzp2K93Nh\neTkP33QTz589q7Qdqqjg1ooKeR4kifbz51lRV8fNfj+He3uJSRKVbg/vtT6hm1RKb+2tJt0Kh2cU\nAq2tyQoBq/U8iymRTLa1SDs7I0SjPbjdftasacOXB6mwqENq5MG8XuvERokSIcLTPM0WtuTdk6kg\njC3V6/NROzRMWCG4rbSakiVrUUzfu2ygTSpjpb5jmMxLawdhTdePtn2ztT/NQMyFkPb+LbJEVsyV\nXiKjTP3fjEzyQC7lsjdN/1FgJTIRrUq0dyv2JinS6/OOhI1ma3waJSZyEhZdn8g2gZFDRm8gOLr6\n2YcgfmOnTnH7/fcrxC+Tp0zcwAd9PpZVV1PmdlPh8fBMInZTIS5lMRb+RRfvbU2+WRbn13i9BHw+\nroyPMz41RWNdHT1DQwo5rPf5FG9oqKKCa/E4EnBZk/Sn1uvliopQ1vl8RGMxpoCy0lI++OY3+UFX\nV1IWXzVZ1UJNKEuAu2tq+HR0lMuJNrVoCgb5g6oqDp8/z5QkcWViIoU0u5DlxrUJObLefAhCFP7p\nT3k3kWDJDdxTX0/3Jfm598ZQCK/LRZnLxbmrV3n3yhUlntZsJudIZyd7zp5lcGKCFXV1vProoxnJ\nzKK/7KQ3FqWqzM3JP1lDaP7M8VZJUS431lYywZqB2vat3T9MadvoOrVvX5j+RDbfpUtbWTtLnsa5\nnjXZiv15/82wqXq9INN2kmg7CK4dhFagGH6/rRAIM0sbZoZI1ieOtUoSFyHLY6uQ4yfV8ZBachfF\nlmcfdHR00BgOp52LsE5feu+pUctMEqU64D7Sz4e2vSPAAInfvkR74ny7PNV6e2C2Y4vV343ZtuVG\nR7Zk1JHpOnBQQCgSxUuX+PDjjxXil0lqKWSgfSMjSlKg1qVLlRtJIW0O/ttuQneOctfu3YQqK6ny\neGhbsyZJRqqW1wrCBVDt8bC8tpajn35KhdtN37VrSn1SUS5FYEQjaZ2WJIU0fvGWWwhVVlJfVqbE\nWZa73VxLEMWA18vqm2/mSF+fIo390i234He7KQEujY0pY9Re0cpdLsamppAkiYO9vQqJXlhezu2B\nAO3nzwNQ5XZzR02NMr6VL77I4vJyxfsraqj+QVUVLQcP8uHVq5Ago5PAu5cvK3PyswcfJFRZmVLa\nJujz0TcyQvP+/Rlv6nuiUcXWxRUVhsdGOjv5zblzxKammP6sBPE4w8CWk8n7w0i2auQ9zCWRjMgE\nCxDpiuQsN1XbbqVtdyIJUDDYxKoCZ/NVQy0z/uzzz3Nvff2cIqW5SLyz8/SluT20SfNqNqGSFdgR\n71rIBE6F8MJakbOaWVoh1axAlryK7K9WVjKETEaHkT2Tu5jZcQOq40qwTxr6U2CEZNmqFnp9pes/\nwsxvbClwmczzoW1vCDlJ0S1Ad+IzEdspYjS3ReB8DwQysDWjb63YA+rP3wYuJj7/PvCSgb1m+7CK\nnwI/TrQzzEwMrxOLOnfgeEYdOMgBVr0kao9cwOulva/PUK6oJib3BIPsXreOTYcPJ8tzE3VCPbgp\nf34NA08c5NhAf1I7rpISyl0uvC4Xb3396zS9+CIDKk+nSGVsYNwAACAASURBVFIEUOf1MpyQuKqh\n9oSWAu88/jhNL76oHDd/3jwujo8rntsqrzcpm674XGSx9ZSWMjY5SVySkjyFag+igJYIa1HhdlPl\n9bLI7+fty5cV7+uCsjL6x8bwlZYSn55Wxqj20AZ9PmUu1P3UeL2KDcL7KdausbaWWysrk0hz0Ofj\n3vr6JJmz2A9WvKI3P/cc/ZoETVbkrEbew1w8R9nUtcxH27FYlK6uCKtWbeeHvq2zJn8U+0DtiV9S\nUcHihKy72IlptjJpyNbTF8aKP2quS1vVyIfH1gh2emHNIlcyIbxsg8ilULJxkOt5YMMkJ/dZAbyq\nsjnbZx+Z6pGq5+PnwBdJltEa9S9I0xDJqAHOprHVyFMt5kSgFZk8HwDeCsM9Jr6OYdJ/a9Wfe5AT\nOUFmebGVPrQw2m/qdhZgTUbswF44dUYdOJgFWK1JqK4FunvdOt26oDCTcKh/bIzBiQklg622lqjo\nv72/l5OPvcip4SspfU5JEsOTk0qt0re+/nUWlpdzTzAIzBDRCrebyxMTCsGsTJQzqXDPCCg8JSW8\n8/jj3F1Xx+qGBkDOtPvm175G69KlLKuu5tjFixzo7eXNhFeysbaWN7/2Nep9PiYlibGpKYXw+kpL\nk8iZ2oMo4Epk0xX/i4tWnc+HCxiZnKRvdJTugQGFZDYFg7zR0kLr0qX4XK6kzLrimBqvl8bEHDQF\ng3zh5puV8dyjel8kqhFzf+Sxx3hp/XqqvF5lfgZiMQ709nLg449T9oN6TD3RKLe2tVH/7LOcSyR7\nUiM2NSNKrvZ4TNVzVcPIeyg8R9ncEGdT1zIfbft8Adau3YXPF5jV+pViHzxw002AvEca/P6014EI\nEcKEaaaZaMGrUybDqB6xGWTn6bPmjyq22qS5IJfvnVXY4YW1WnPSat1WbfvCy7Yb43qgRjaJ9+PI\nCYz0kgZVAPORPXUi2VEutSwz1SNVz8cWYDGyl07Mj1H/6vqhJar/lwKb0F+LCPK4RzTvhZHnZL7G\nxjbkOb7b5Ncxkxf3ZOLvRuBB1d87MF6z25ETLXkSn4uCaeoyOHrnCRjtN7Wtb2CutqyD4oJDRm8g\ndHR0zLYJRYNIZyfhffto3r+fqCYe0gqsZtcUEsUTx4+nLUSvrlsJMxlsteeo+2/w+xXSo6e/d5WU\nEJucZMULL3B7dTU1iTYq3G7m+3zck5CpNtbW0hIK8ciiRbhLShiZnOTKxAQLy8u5+N3vcnddHSBn\n3G1dupRXH32UUGUlu9au5dyI/NNYCgqpvbWyklBlJT6XK8WmGp8vafx+d7LlLmbI45caGlhYXk5L\nKISvtDQpntSdIKor6urYGAqxvKaGJ48cYWRiQqnVKtoTWF5Twz984Qu0Ll1K7blzfDA4iKekhCqv\nl39YvTrlhl0793qk5POJuVFLeEX/TcEgrtJShuJx5cGAGpHOTsUzW+Xx8O4TT7B3/XpLhGHNmjaW\nLm3lkUcO2ZbgR2SCzUdt0YAvQGBtgBZfi0LUzFynZrN+pdgHYv8feuQR5cGE0XWgmAhWuuuOFtq1\naKONVlotxlCK2+DU20M9km732hbTg4BckOl7oV6brQQskUoBq+TSquzVqP10JNHoHPF+OzKpUZ/b\nhhyDOoIsH91iwjYzOJP4v6yjg2ZSd7R2PrSvjciWOK4GOc4TZJXO28jj/raOLdp5Ed5VMScPAksA\nHzKhJfG5x/jrmIR0h/UwQ8hvRfaEipjVgI5tAv3IRHky8blX00em/We03zZ3dCjthMjtgYOD2YET\nM+rghoRdpTFyrX9oJPMVpCzg9fLgTTfxqy99STf77c9XrVJKxmw6fBiQb4jPDg9zZWICF/Cbr3yF\nSFcXt/j9Sgxle18ftV4vngTZHJmcZOrKFRaUlXF7IMCno6OcvHIlyYtY5nIx/xe/QAKC8+bxmaoq\n3rl8mdCvfkVseprGujouJsqxqCWxR/r6WPfKK0leP4B5JSW80dKSMp7PPv+8QmRrvF4GJiZoCgaV\nG+jwvn1K+ReBLzY08PvhYfxuNxNTU5wZGlIktDVeL6WAv7SUEdV5xy5cUErcNHZ3c7G8HICjn36a\nVBM13VrtWrs2KSEPkBLb2xIK0bp0KdsTYwPwu1y89tWvJrXfE40qiZG+1NCgm6U33Z6BGe/hXIK2\nLIeZ2paFql+ZDurY10zXgdkkz3YgN+mscaShXkkWu9fWbNmXuQ513Gy2NRytkkurIb/ZxGxmUxok\nkHj/gM7nVqTF2mNFfOoYcobcrcBvgBhwD/APJGfUFfNThhy7OYBMxETbYl3U83iTjh0dyMmYRD+7\nmSHGVcDTwJPMeFdrgGdILsWzEtlT6w9A267MmX3TxQer5/6ZxDxcRCa9bRivjYdkiIcILYlztJ5S\nLbT7TazPGHBQZwwO5g6cmFEHNyRyiZuyE0blPtJlSzVzztDEBA/v28drX/2qQmrEmEFOBHRNRQ7V\nr32Jep8C3tJSVtTV8dalS7qZbY3gLS1VyqcASjIjgYfmz+e3GzbQcvBg0ngGx8dp7+ujsbaWvevX\nK2R7a3c3vzl3joHx8aSsvC6g0utFkiTFm6wkTtKMU406n4/bE/GtoowNyN7Vu2pr+afe3qR4Xa2d\n6R5gGO2vc1evpqxLpnO0sFoiptiRj7IchUam2PFCxQ7mK94yX/GIhVj762F/WUW2SYrzXW4jm/bV\nJUzOIRPCKuSYTDXxM9tXGPNxiupjFwCfIzm2VU32wDgjcFhznBv4PbAe2Vso6oKGkL2F6vSAfuAu\nZhISCbuPMpM0SJ33QZ1VWOyDCmQiK9ptQZaz9qtem006BKlzqx5fKzNeYO3cnwNuAyaQJb1HSJ7D\njcjjN7s/tP0K76qWYDsZdgsHp7SLAwcWkEu9QDtLO2hLtpwbGUnKgqtuWyQ0EmSssbaWI489ptu/\nXu1QT2kpXpeL/3HlCsMTE0RVMmCRYKjC7WZsclIhnd7SUiQgrvFEGiUVqvR4uBqPs6KujjqfTyF4\nFW43ntLSlHhQvZqpIHsXy1wuhRCWud1cHB1lUnXugrIyLo6NkWxZMua5XIxPTSn2uktKeHjBAgJe\nb1ICopZQiHcuX2YkHmdFMMhYPK58BrC4vJyr8bhuEiLtftja3c2+jz7iSizGgwsWsPfLXzZdl3bl\niy/S4Pfrrr9AsTxIsQu5EjW7S89kg2J5QJCv2pj5InR2kfR0N5uFTCJULMg3qbQbmchCmGQyB9mX\ngklH1LV23IXsCRVoQSaO2rqrkEwItSRXm1AIZAntx8yUNBN1QWuYkfD6gNPAD1TnCxJXp+pPjQbg\nfOLvKDL5u6Q5phk4zozUVi/pkBUCZ/bhRwQ4hezVfYNkwtwELEcmrKLPTN5bbb9qYqteg7DB+w7s\nh5PAyEFGODGjM7ASN6WF1aRFevGpYi1E3KFI/NM7OsqxCxd02+6JRukfG1O8gg3l5Yb2q238xe9+\nJyc56uuj3ONhSWVlEhFtCgaVBEMjKiJaCkxMTycR0RLkeMgHb0oWE1W43bSEQrz3xBO0Ll3KXbW1\njE1NMX/ePGq8XkYmJ1OIaLnLxdP33099WZkcYzk6yqP/+I9sOnyY7atWsfvsWSWBU5+GiAa8Xr68\ncGFaIgrgS8SSCuI8KUnUl5VRX1bGqaj8k99YW8voqVNEYzEux2K0nz+vJF8S+OTaNcX+4Lx5yrzf\nvnMn//1f/zVpP/REo1wYHycuSRz99FNTewTkPbm4vNxw/QWySUBjV4x0PqBN8mJ0nYp0RgjvC9O8\nv5lobCbiSpSHOdB7gEjX7MRjWo0dN4NsYh3tkAOr41tbOlqAbGNFM8OuBD/pYs30+7Caqmf2YeX3\nO9dEPYWG2VjB6sT/2lIwkLqiRitsFAsZQU6+I+z4Psk1S2uQvY4XgfUdHUQTbXkTn4vfoiDQp7Fj\nGDmhUL3K/ivMEFEX8Fri76bE/43InstQ4ry6RBt7MV7XMuB11WshV9Yijiz3Ff1UkzpXZuKHI8jy\n4dcTtu0hdU5Fu08i78ljiXGJON565DkLIJNUdZ/pbBDzOg+41tHBJoxlvmbl4XPvqnD9wIkZdeDA\nIqzeeGrjUwNeL28eO0bD6Cj1ZWVcHB3l9JAc7VHl8TAcj+u2fWZ4OOm1OjGPkY1qiDZFbKkoUVLl\n9fLA3r0pZFFbCkU8+R2IxRi6eJH/n713j46ruvM9P1K9pNKrJJWMLGwLOwlgAo5lBCExvi6QHdom\niQVBHUJ6AT3rUneS6Tvpnhu7+951+/asNcnMrKHvTPesmcvF6Y7NdKOADTGY2L60BdYDE0xwsE3H\nNEpwUCIbWZatkvxSSbL3/LHPPrXPqVNPlV72+Wp5uerUOXvvs/epx/d8f7/fN1BURFwIvEVFfGXR\nIs6OjfEdo+Jv6+uvc/D0aUCqq6r/j2Ixzht2GBevXGHzoUP0fPqpaa9y6tIlAGqeey6tnctrDzxA\nW0eHZZsHksKIRyYnLc+bw2FKPR62f/SRGTLcUFbGz8+cYcTIGfVAkrWN/uxXw8Pm44FLl8w+fcXF\nlvkFKPN6GY7HicXjab1FlbLqM4o8qXFGdu9OUuBTeYymQ6FypGcTqTxJg0YF4eZwM1tnwH/USTls\nb2lhVc//RWDNTh4L/F1BQmTzyXUstDfm9/k+kJ2P52xasuSei5hvVqWL6UCm9VO5gk8jSYxuBaP2\nt6/oIM4rrKrqqjxFpbj1Yg2P7Sah1lQD7yMJlT0HUz+mHvkdqXwu/9gYq3q+CUnc9FudPuBrRttB\nEnmnKs9UkVb1PaO8VKuRXqSlwBeN8f8rrR11Xmrudmp9vonMefUDz5EIFwZ4EkksVbXcJpyr6dot\nbjD6X6r1r69JHYmcVo9xXAyphA4h17PeeF2t62O25zp6Sczrh8a/TSTChO0FrbKJFHA/FWYPbpiu\nCxc5ItcQX3tYpZ57WBcIcMYgYovKynjr6183cyTtbd/76qsmwavy+fjkscdMH0t72HAsHqf6uefM\nYyMLF7LLCBe1j18PMXSC7iGWCrpfZ6C4mCtCWPI6g14vFT4fpzUPzaDHw5dvuIE3Tp1KSzyzgR4i\n5YRKn4/7GhrYHomwfMcOi5dnXUkJE1evmsWD1JqoNksNX9TRyUlKi4v58Jvf5Ifvv09vLMbbp08z\nIQQe4JeG5U0sHufJzk7eGhjgrDEn6UI39fnf1NiI3+Nh65o1OeWoZsJ0hfbOJAFJ5Ukai8eI9kTZ\numZryhDdQo4zVShsofMqF7OYfvqpooqjHKXRotNMH/INa50Nn0twDv3LjHyzKl1MB3INK3ba376i\nj5F6hReSIF8LkKGwan+QZOqS9rgc+R14CZl7WYUkcvbw101IEqsImheppp5Dqn+3GH0NIX98+4Bf\nYCWDjcgKtT9H5lbqKAIqjL6/hCza8yWkWroFq1ep1zhnlYua6sf+IiRxV30tJlH1FuS87rEdEyE5\nbFr1qW7/6t6mYaO9Eaw3jZci13IYSXp3Yc0D1tfZHrKr1qvKaLccuAdJuvN9NxfyU+F6zVN1w3Rd\nuJgh5Briaw+r1JVVZQXSHA7zwSOPmBYpTr6jxw1FLuT3c/SRRyzenPaw4VAgQM/XvkYx0PO1r1ly\nS+3jtyuuOprDYYI+ew08K7xFRSaRA4hfvWohouFAAC9YiCjApStX6CgAEYVkIloEfNGwqvEXF7M8\nFGLcKGRkr+p7ZmzMVJmbamv5xcMPs7SigqZwmEBxMe889BDH2tpYVFbGh9/8Jo0VFfyjEfqsFNR7\nFy5kSXk5IOf3lQce4G6j/0wKun49bI9EzLUpZOjnVLwl02EmLUtSeZJmYz1TyHGmCoUtdMVcRT5H\nGGFzFuYUhbIwyTd0NpfzV2PdF93HZGRySnFxSiHRQ/8yI0t/i2sMczUMMdewYqf97StaZ/xzalNP\nVBhEzouqlusnQcyajH+DSMKkjhshef5UuKvF0xpJRAPI76SDSCIKMtpGFfLRbwWPIImenYiqY0aN\nNn5m7NOFVGB1r1LV9xBwr/FczUMJUlUFSZTeIqFMgyTc+s1n9e2vrp0yEiHFVVgr5E5q259Gzn8A\nOQdqbLXG/83I/FZF3P/Z+F9fV32d7SG7ar2PIsnuBaS6OpVP90J+KuRqk3S9wyWj1xHcnNHZgZM/\n5drz59n/4IPsXL8+K5LQG4uZYbRrFy60VGJVpKVcCwkFSZCuRKPcu3AhkJw3qJ7bSSLA+htvZGl5\nOYHiYjxFqW9y+Y0Q3kmH6IdKn4/G8nLGr1xh1BYumw1Ki4tzur3mKyoy8w4EcPjMGfxFRdwRCnHo\nzBn29fdz8wsvUOxwPvHjx2ltbDQ9U5eUlXF4aIj41av84P33aayo4Pff/rY57+M2QuuUG5otAXTa\nL9rdzejEBPWlpby0fv2UCWSuN1BS5WfaUXBPyO5uVv7wh465rVPxOy3kOFPlTxY6r7KSSiD7MedK\nuLMhr7l8Z+Ry/mqspb2leLu8sA+i/2d215wd+diFzL+syuzWIhPZvJZ/IOvhtxuRSvkZnAnKndpj\nFYYaQiqS4yRI1RIw3oUS6lv35s5Ovmw8LiORy9mHlRAqxElN/u3fnOrbWPWV6jtQ2B6r94Hdzfui\nMa4jSBX0X5BKrFJm/zXwBWPflcj5UPAhiyXVIUNyu5BkVX37jWAlrmXa9nuQaxAnQdD9SDW0Hvgs\nMqJBYYIEcXaCOr9yEgR2B/IGwjLjvZHbZ0AyCvmpkN/n0vULl4y6cDHDCAUC/M933kkoEMiaJNjV\nMx3tLS2EjeJDHSdPpix8oyuoq15+mR0ff2xR9xQ8wAfnznF2bIyDg4OcGx8n4JCfGiguZjxNCH6g\nuJiTFy8miKht10xE8/LVq0lf1KUp8mTLvF4mhLAUOZoExoXg8LlzgDyvM0aRonqjaJLC6MQEApJ8\nXu0EH2TRIntuqqeoiP/Y1GTZlu3aOu3XG4tx8PRpBi5fZvOhQ2mOnh5kWxSo0ASsNxbj6LlzWRcH\nyxaFHGcq5dC+fapKZa5jVoS7nHKGGc7YZ7bkVT+PJ3gi5TnloqiqsfqChq7SDL0r8ytEdX1qnM7I\nthDQtfoDWT9/5cPpdK47kSG1rciKuYrA6ipfBfC3JBTTWmSeaCvwn5Hksw5J9gaNft7Rjl2PJKlq\nDEoRzITbjT4+QBK38iyOeQepPtaTTOiGkeHBVchqvY3Ap8iv4xEkWQ8Zff0Lknz6jPHrSuyRNP1X\nIefzS9q2AeDXtn2akBY1A8hiR3q+qVJpFew3VtpJrYAqJTyf93+20QK5RhW4n0u5wc0ZdeFiGqBs\nWOJXrnBnXR0786zcq2DP87z1xRcZuHQJX3Ex7z38MN/p6WFffz/lXi/3LFjATkNN0/NJlZdmudeL\np6jI9ORUSGXX4i0q4vDDD7N+zx4Gx8a4o7qaz1RW8ubJk46KZ5HxL10Op2fSwxVvLq6lzlh/4438\nZmSE/osXk0i1HaoQE0iP0abaWjo//dTc1trYyK4HHgDkfH/uhRfMPFg9Z9P/ox859rWorIzff/vb\nlm352gDNtn1LqvzM6e/32rGtmWoOZXd3lFisF683SEtLO4EMaxAjxs3czBkjky1Tn7pdy23cRh99\njjm1C1nIgBFIWEMN5ziX9znpY40S5UexH1EVrYKtsPHt2bnmriVkynnLNTczG8yl3Dj9/F8ivQ+p\ngp4/uhFJls4az8PAXUhCporlLEWql3GkmnMWSRgv2Nq1+21+VWtDh1PhvaVIVdZeICgbLDDaO2vb\nrud+6lYzK0n2+wRruDJIcvo+iTBjfdyq7RhQQ+J3hBe4D3ltbCdhlaP/1qhEzt/bJPK9b0Xm1iqo\nPNpfGf3br+8I+Vu32I9VIcH263kqfVxPcHNGXbiYQ1A2LMPj42nVymwQ7e5m+Y4ddPT307Z/P7F4\nnIFLlxiZmGAoHufe3bupKy3FW1Qk1dFTp8z+dDW03Ocz7VvsRBSsRLRcq8Y7KQQ/eP99PvrmN2lb\ntozur3+dXQ88gN9jDQgKejyUFBUlEdEiZMguAL+rxjdaRnnp1D96PMAHZ89yNh63kMNUn4Ihv988\nt7PxOB2nTlFjkJ2m2lq2GYpztLub1tdfN49TOZsqrFnvy2uE/BYBn6msTAotzdUGSCFV6G6+9izZ\nht0m+nfOz5xuTFdu62xgqqHBsVgvAwNd9PfvoycLpTBEiGbDyCGbPnXltY++lCppXMuw8xqB8FMN\ndw4Zf5tCm9i4YyOxUGzWrrlrCZnUmOkITi5E6G+hcln18/8hUrF8LEOb+iepH7jbeFyOJD77SNil\neJAkbwBJElU1W/VtqUJrlRqrhw7r5EqHnYj6jHa7SBDRKqAHmX/phDLt8SAYt4us6MCqMrYi1cwD\nxhiDtv3tOaujJGxsqpBhuKrvcaPdLVhJxSSSQL5i9KHIpv5b416kL6peeOyEre+TyPkYMsZgv37V\n2J1sdSKkv67s0QKprudrPapgtuGS0esIbs7ozEG3VmmqrU0qQJPLWliI7alT/HFnp1lwJ+jx8NbX\nv07f+fOmwlft95u2IMqGpDkcZlskQrNRVKfcZv3iteVRfumGG6gvKTGPLfV4aH39dfb87nc0Pv88\ndc89x66vfMW0bQFZkGhMiCRFVCDDYAECN1xm1dJyRzKcLRQFvgIMjI2ZbYO0V3HSR6t8Pt57+GHa\nli3jngULzPN696GHWFpezm/fe49lP/kJ63/2M44PD9M1MMBQPM6isjKTGOn2KAAramo4/PDD+I0+\nuz79lOUvvmghinp4tVqTbMhkqtBdO7HNlqDm6sWZS35mIT1MQ4EA3/V6p5WIFqrQTyZMNTTYa1jW\nhMPNrNEsa9KNP5c+9bDaVMS5s7OTO40MuyaaeJd3WcpSAgR4jMfM/vOZU3uYsH7NFXqNokRZyEJq\nqGE966d13fU+C3kO2XxnzEYm7FR/pCsLjWwJbTqCka7gTarjVf5oE7CNBKG9RztGfadcQZIytLVQ\nRAxgDdabAfq5DZGZTBYh59NeUjAO/AUy1NfJj7HE+Gcfr0KRMc59yKJBXzXa3K6NM3UZQ4lDSKIH\nMrxXFSe6iCS6dcj5s5Prlcb/quo1JEKPm4DnSV4T+/gntePGjf5WGccsBg52duIzXjtonOdyo79M\n15X9Bk6q69kNu51euGTUhYtpQHtLC5saG82iOFP5cW33DBXAew8/zKKyMo7/4R/SWFFh7lPt9/P+\nN75B3/nzjoSqrrSUukDAQiKLgRIbGQ16vXxoKKH7H3zQbO/S5CSjk5MMxeM8sG8fuSIeGOO3F2WJ\nh5U1NZZxZINSjydt9V2n1qqN6sOqUrFeNKqxooIl5eXExsdNsv/u4CAgbyJ8oFUtVnO8sqaG1sZG\nur72NVbU1tLS0GD2NTA2ZlFAdaVPzWG2Kqmd5DlV181WeZ1OL8581d9c1dpCYaYqAOdblVahpaWd\nZcvaePDB/ZYQ3XTjz7fPdCR2Jztpo403eZNGGlnCEg5ykH3sYznLiRHLa07TKcev8ZrZ3pM8mdO5\nOKGXXgYYYJhhOugwxz2dmMlK04VCPgrlVH+k61Vgq5FKY7oxZKvEpiIV9uN3GuN/k4SSucPYrnwv\n9Rgg+3eMIl/VSGK1A6kQRpChwurc/EhiZo/caUD6jHpJ5HGClXSOIUnWKcCpFOBZkpVNHfp35mUS\nhO2zyHl+FecQ4lSoQobX6rceJx3GpmxXFiOJqlJ6v0TCj/Qxkknjals75cY41Q0CVYm3Cxn2O4os\ngKQT6gHS5w0r2G/gpLqe51/Js/kFN2fUhYs5gHS5hbF4nFtefJHBsTFW1tRYbFoUnjhwgH2/+x1f\nqK2loayM1/r6GB4ft+wf7e5mx8cfm6pk0OOh1OslFo9b7mY69VH24x9zyZYfWuH1ct62rRjwFhcz\nfjV1xmjI7+f8+DhBj4fzV7LPG202rFYOGmRRQeWfVPp8IISZx1oEbFi8mHBJCX3nzyfNrZrzdwYH\niTuMd1NjI68YOaSQ2l82Fo+z/MUXGRgbozkc5rbq6qT+ot3dvHTiRNKapIPuP9q2bJkMFbb1n22O\nZTZenAq55rnmm+cZ2R2ha8DIqVzWxo51M5OFo+dKFqrw0kxitsev+ldoo40LXMhpTFGiHOc4H/Mx\n7/COxUM1SpQf82OuGJ9KrbSyi11TGrPybNUx3V6os7lO+eZxRpj5vDiVv1iNzEl8IsMYsvWCTJUf\nm+l4fe6eQeY8nkUSnWIkwbLncaqxN5JQQ0dsr9+MVBd1hIDfIlU8FXNTDPwB8F+Q5GsASfxGcc4v\nVQiTyOcsFMqQ1XPtv+A3IhVRu9doCTJFR4X42vNOFTYh10cdr7zMm5A3BUDeCNDJbimwAlkZOAL8\n1Ghbzz9djlwrVVAq27xhF4WDmzPqwsU8Rjp1KRQImPmaqUhM3/nznDHyIPf+7nemDcxNFRUWP1JF\nRKv9fprCYc5qRLQIqCsp4ZUHHrD0Ee3uZsyhUJGdiILxRZSGiALExse5AimJaMjvZ61hRwNwR3U1\nmxobua26muMxea+8yufj4cZGwoGAmY86OjFBiaEe+oqKOPKNb7BnwwaLIrnqpz811UYVjutERL1F\nRfztl79s2ZaqOm4oEHBUkfW11K159DVJB7sS6tR/tjmWmcJudRX2+LlzOSmd+eZ5Tqdamw65hLIW\nMgS5UFDjv43baKU1YxhoocNF22mn3tCMlKqZa0hyL70c5CADDCR5qPbSaxLRECG2sW3KY260ZKQV\nzoooHQpdaToX5JvHWci8uGxVVqVEnUCSOacx6G09Q7Jy5dRXKiUrk5Krz91mZCEhpbhdRRJR9SnX\nhCRWaux2IupHFvS5k0Q+qarY6zX6CWHN8bxqHNcIfGiM9ZjRZ7pbt3eTuWKvP8PrkCAFHuQa/BZr\nCHAx8L/hrMSGsOavOv0SuB1J6I8Zz8tJ2MOcQxLKG7ASUZCKrqrE+wYJkqsT5VuQublqfRtx1cz5\nApeMXkdwc0bnDuxr4RSCqSOTTYg63ltURMwgPXpRouIxMwAAIABJREFUHn0fFcqrSFy1UdhHAGfG\nxrhn1y7zh7dSU52+VD4fCrFx8eKU51ju86V8Ld1ts/sXLmTyyhUWlJSwcfFiur/+dV554AH6zp83\nCd19DQ28PTjIUDxuEuxwIMDS8nLqS0r49aOPsqK2lltffJG3DHXxjupqGoJBk2h9PCp/YlT6fPDR\nR1Rp450Ugtt/so3m5/4XTp8fJBP09XFaS32bvibpkA3Jy9U/NBX0myFqXlJdi4UaQ6qCNdP9OZVL\nKGu+IcjTCTX+dEWHIEFCX+KlvMNFndYiRIgP+dBCtHIND04Xoqteq6aaIxwpCJFTnq1NNLGJTTNC\nEKcapm1HLu+LfEllIfPisiXE2YRJ2gminWDkQr4zhVva5049r9S2b+/sNEN7VXEeNQ5FRH1IsnoO\nmeN4DFk0aJXx+iTwA1ufCj1IYo1x3OdIJmf6j/fbkSHCYynOSUFXKYuxWtno271I4tuFnG/da/Uq\n8rzesx3nQVrMlBrPK4EvauPbiCTuPUh1U6nL+nwvQpJNJzVVn/8vaNsrADo7aUaGAutFo6ZaEAsK\nV1zLRXq4ZNSFizmAOsP3Ml9SobxGJ4UwCxktKS93VNFOfOtbNFZU0N7SwtLycq7YlEE97/G1vr6k\nYkMlHg8bFy/mrU2b2LNhg4XEgSS3tYEA5V6vI+lsCAap8ae+R/vTvj4ODg4yODbGe2fO8L2333Ys\nxhTXlNWGYJBbqqo4NDTEwNiY6c05cOmSeTf5bDxuEvDmcJjIwoWEAwGawmFW33ADRx95xDLeC5Rw\nOH4Df7j72ZRjdYITidS3bTl0KCu1rVBEMxvoZPmdhx6akYq2uRRJmi1kukk03UinaqYjdFGi7GAH\nXXQxbPzsy6QG5qKg5kK0nNpNpxqq105wwlQ0C+XZ+iZv8gqvzLvQ7FyRL6nUiZr9R3iuP8rTEeJs\nixBl05b99Uw5p5mg5u42JKGZQJKoYyTmtN5hjPo4qpEemzXaa4NI8nfSeF4JPG07TuEskljXIJU+\n/RvYgySL6lu7AUnwtiDVQx36t2wRCXUW4/gJ41z0T3k997MWmad6xtbuVdu2IuCXyArGyuJmFKlk\nLgB+BtyIXI/HSJDgZuBdZP5ogOSKwx5k/qg+/zHjGD/SbuYDYC2pb17UGf/6bG1nez0Xolq0i8xw\nc0ZdXFfI1/dxuqHnBy49t4zLZ/3Eq2PcebuXnQ9kN87yH/+Yi0bo7PLKSt5+6KGUx6l5OHbunKk2\nKui5fzXbt1teV69tOXSI3liMj0dHOT8xwcjEBJVeL1WBACPxuKP/qMLGxYvp6O9nPMvPhdpAgLMG\naVtUVmYWFVr/s5/RceoUZR4PX6qvByHoOHXKMv66555jKB4n6PFw/A//kCq/38y7bH39dUtO5o51\n6wht22Yh30HivP+Nh7i5dlFWY80G9lzQHevWTem6LMQ1nSof9nrHbM9LOq9S5dW5la1J5Eo/rooq\n7uM+trEtLQlL11eUKL30OvqQpoI65hjHTEKcb57mVD1brzXMhLdnBGvu5iC55ZOmytl0ansqbdlf\nb82xbSfYw22V72em+baP8wngJ0jStxJpo6L7jarx3UtuBYQUwkjCNo6MbNJJldcY70US4b0lSPW0\nGEkow8jQ1lKgk+QCRF6HbU5YjSSc+tzrKEUSUBXuXESCaL5iO85vvH5F61uf/4NaO4uA3xuP9ffE\nBFKNVuep+vwtCQuZCNldJ9nmKLuQyDdn1KlKtAsX1yx0e45oTw871q0raPv5EgNdgQnsXMPBFa9D\neICOgeRxpupDr/g3MjlpEkansdhtSnzFxayorubkpUu8tH69uW+p18vw+DjeoiIqfD5L/ql+PMgv\nj1MXL6bNa1F9BX0+xsedgnGSoYjo7aEQPZs2mWPYuX49n3vhBYbicTpOniRQXEwRcPTsWW5qb+eu\nBQt446tf5cH/9t946+tfp7FC3hdWc+mket1VV0fHqVMEi4u4dFVwiQD/8f1/Yce6wpFRe7/2wlK5\nXpeFuKaVCuvCitmel4+NepBVVPG0qaNIKHUy3XFevHTTzQpWZOwrndKqKsMCfI7PcRd3ZSSl+jGp\n2s2EW7mVAQa4xKW827gWodQaSBCnQkNVIlUK3neM59mG/iqF04k45xpGrNoiRXt6XyoXcWWWbSvo\n7Y5irWq7gPTznermQB8JVXMYmQ+pigzp49PDYFOhBvkdq8bVgCRpTiRWFQTSq8s2I38jHEIStADw\nGe34BcgbDjoUGdRJndPzg0j1MdV3/2Wsqq0w2u5Czp26HspJKKsK1SQq50JC7Q0Cb2n7vUaiCNRG\nEhWSz2p93kuCvGZ7DbaT/kaIi8LADdO9juDmjE5/2F22OWb2tdDDOCt9ARiX42yqTh5nqj78HklH\ngx4Pb2/aZNlPL9yj24QolHu9HD57loHLl80Q12h3NxcMwjgphLQ+OXmSVS+/zLFzsuRChRb2Gp+c\nzEhEPx8KsT0SoTkcthyfDc6Nj5shrmU//jGNzz/PiDG+cq+X+NWrCGBCCEYmJug4eZK/eu89fv/t\nb5tEVC9I88yaNeac/9F//a9Edu+GoiI2NTby5YUN5nkV+jqxh/HaC0vl2t9sh5JOFfYiQe7nVAIq\nTHWEEe7jvpShqvYwVnXcJJP8wMxMSw+n0Fm1FoqollPOEEOW/FOnENooUY4ZtGAFK/LO0xxggBFG\nmGCCYop5iZcKEmI7U16zhYT+vkj9Qzq3YNp0eysFaRSZN5hv6K9TmONUclP19pTXpBp/L4lcxJum\nMM6Pte2TJEI81Xzra2H3SV1OYi5VO1XIcNgBEgTvOLAMGWr6DFIdXKD1e5txjCJf50iEt9YiiZ+9\nOm8qNCDn+qS2LQ4cNR5XGq8vxapQfd4Yl/pWUdedUw2JSazFhDxIgpoOIeR82r1d9SJPnSTIehi4\nA0mkf07iGu3s7LTk1PqRa7Je21YM7DEeK1/VemTF3XTXiWvpMjNwlVEX1xXaW1qmNewuX2KgKzDt\n7fDkd1soWtTDtpbkcep9lHo8RHbvJuj1JqmAFrXV4zHVs/p/+Aeu2kJkfYbfp97msXPnksJtq/1+\nLk5OmqG7xUB9SQmfrazkvaFEYfnVN9zA+2fPsryqin8eHmby6lVqSkq4o6aG1tdfx+fxsKmxkb/9\n8pf57ltvsff3v8cOf1GRGcqrCPYTBw4kKbIlHg+Vfj8XHEKDez79lMX/+I80VlRQ6fMxOjHBwdOn\nAdh86JA55/0XLnBUC53dHomkvE7synQ6BdoJdrXNXlgq90JA03tNTzfsyu53vdf215JTyGuqMFhV\neKeZZgIETKVR5YQq6CpklKjluGyVxHRKazvtRImaPp16u3rfy1nOh3zIa7xmhuYuYhGv8Era808F\nn1Zi5SpX2czmgoTo2udrvoX9plZrctNM0+2tF4xR/eQzS07EOd+29PbKkcTzt8ZzXWFTxWzsSBfe\nrB/7EglblUwWIXrRIoxjViHDSs8b20aQ1i86xo1/HcBmDrCLp4nxE56kiiISxXhqSBT0uRtZrfYU\nVkXUrlTqOablwNtGW41gGhxVIwnjGJKY3W+0oX+L9iIJXBVy7g4irV6KjeOHcSamGP39Ajl/6pdB\nJZJInjFeP0IiP1nl1LYCnyKJtirypK53/bwfQFYbVutxJ3Ium0is/afaeK4abe0wzku1owpiuZhd\nuDmjLlwUEIXIMcsU6qv34ZT3qO+36qc/pSEY5KNYjKF4nHKvN4m0Vfl8HH3kETYfOpTUph1rFy7k\nyNBQUlGjukCAM0Y4baXXS9+3v83yHTsYuGwtqVDj93POILJLy8u5fOUK8StXkvJWARaXlXFhfJxL\nV67w7kMPsaK21vS09BYVMSmEmQt6965dDI7JWoIqz6XM4+GizT6mvrSUgcuXLXmlugdoU20tb371\nq2kJ6Oj4uOl12rZsGYOXLiXyfSsqWFJWllOYdi7XzFzNeZ4K8vUpnU+IdkfpjfUS9AYZbRnlYED+\nFFL5j6lyIp/gCfaxjy8Y9SMVEbSrjHZPSyBlPulU4JSn6uQ72kGHSUbtPqG55H/20ccylnGVq1RS\nyTGOJdm05INsPUDzyZWdKqbWZ24Zbun2zpSnmS0K1Y7e3uewemqq8WPry04+9dzEehJkJopUKj9G\nVoRtzGHcag7V904YSR5HHfYNIBXJChJEtYle7uBt+riJIJW0s8rs71bgN0gV9DYkgdqCJMfD5vFS\nCUynkrYZ5/CcMTYvcBipKts9U+1oIKGo2nNbU/mI6v3+nAQBVjYuQSR5bECqzse0cejHVCHVW/WO\nV3OtsAnM21xO66XvrzxMQ7h5oNMJ12fUhYs5gGwroKbzL8wU6pvJRkTfb0lZGQdPn2YoHmdRWRn3\n3HBDUnufrari3t27OXXhAo+98QY+I9y3qbaWBbZ81K5PP+WyQfBUiG2518sV44ZTtd/PsbY2thw6\nxJnL9tp+4NEU2IZgkIHLlx2JaK1RCGl4YoL41av84H15X7m9pYWlFRV8obaWQHExP29tpbGiwjKH\n9914I23LlvEl41z1CrrvtLYmVYrVPUDtFYgV0lmf6GugW8fY1y7VmudSNXcuWo1MFfn6lM4n9MZ6\n6RroYl//Pj7ukcF7urqYKl+zjz7OcIYOOiinPGMFWierFRWSupjF3Mu9UwpNdaqiq/uOevGaZBik\nlco2tlnCYpXa2UwzpZSmHVsjjXyJLwEwyih3cVdWYcqZkK0HqFJQ87HFyRdT6zO3ANh0excqPDGb\ndnIJLg4BdxmPlc+nGr+9L3uIsF61doBE2LBSygbAdL3N9vzVHP7G+P8WEkQ0RCL0NowkoT5jHNXG\na7v4IX3cRBcR9rHKHFMUWV1W3U79NTJE9UUSxK0BSbBU1V4/iXBH9eNeKdK9JIjjJPBXJGxYFG7H\nWgEYZG6rGs9xbXutcT5FJMJq9Wq9ADtJEFGQ+aBxY/wdyHXp0s6nzHh8o/F8hMR6qNBaHUXG9oXI\nkOeDyAJR6jpqRyrUtVg9WAtpYeSiMHDJ6HUENxdr7uDdnp6UpCKXUN9MP+T1tj545BF2rlvHpsZG\ni7XK4aEh+i9e5ODgIPv6++kdGaEuEKA2EODdhx+mbdky7rvxRnP/8atXzaq2geJiLkxOcm58nIZg\nkK81NvLEgQO8dOKEY/7oXXV1idxYbQz6bTQPMDI+bgkRHrx8mVg8bhLsw0NDFpIa1HJPe0dGGLx0\nycz/PPbII2afjRUVhPx+Wl9/ncX/+I/c++qr0jLmo48IFBdz4ORJ6p57jr7z59GRzvrEmu+bIOjD\n8biFdOpE8uYXXsho7eKE+Z4f6gQ7Gb8WP6eCXoNshpt5Z807SUTIiRzpeZeK1KWyU0lntaLITT/9\nHORgTiQnm7VQvqNhwkwySYwYwwyziEW8yZuECFkIVhll5rn+E/+UcWwq5Liccs5wxnEfO4HLRE6z\ntaZJV9RpupCqz+zeF7lRyLmSD5erfYYiE3afTx16MaMmErmJ9cY23QbmV9q2rdrxEZIJchRY2dlp\nbt+CDDH9jnGsCm+uRoahfkSCpA4hlcHTSNI1CGzmRwTNa1xuV/mvOiaQxE4RMi/wr5Bq71Ek2QqS\nCLNtwUq47PYxPyO5YNESEt6gIHNGnycRwq1IY7Hx+CwyT3QCWdn2A6ONbFBNss/pRSRJtefogjW0\nFhLhuK8BA52dDJMI41XXUQiZB3vWaFddW3PluneRgEtGXbiYBQQM9dGJVOSiFKkf8qm8K+1thQIB\nXnngAb64QN6v9cmQCrzG/83hMIvLyjgTj9Nx6pSZV7lz3TrqS0rMfT545BF++P77TGoepXfX1fFP\n/f10DQw4qp0ra2p4/v77TeLR3tJCY3k5NX4/X77hBhqCQSq8XlnS3Rba3/XppyZpdyJkqiBSjd/P\nyYsX6RoYoOPkSYqAxooKC9lRpLD/0iVTNQ6XlBAoLmZ0cpKheJx7d+9OuSb29nQyVVdaireoiAuT\nk3ScPMnnNNKpxl3u9XImHs9L3bweVMRrEe0t7bQta2P/g/tpDDQmEaEtbGGQQR7jMZM89dJrhrou\nYUla4pSNH2kVVUB6YpWNwuiktH6P73FFu/3URBMf8IE5Zp1gbWe7ef5xrDdjnMamiPo9RnkTp33s\nBK5Qiqbq+63oW4QiofzNK/PoM5+CT/MVufqEZkMm9GJGS4x9tyAryKrCNX1IEjyEDKFVxWzsRYl0\ntXIHkvypYkXHsRZUUoVx3keGl6qx6hVzlYIoyZafdlZRh1QOO5BhyL8iPSaBN4y+TyEJl5qvcuP4\nLuAGJPF7H2uRGN1PVGEvksyFkfP/ljF+e17sVay5oncgiWgj6cN2FTzACmCxtk2NrRkZKq17vW4k\nQVxXGttUyG3coQ2d1OdSuTlXL10XhYObM+rCxSygkP6F0e5utn/0ERPG+2xBSQkfffObaT1Gjw8P\n8/HoKDcaKiMkPDwfe+MNxxw++5h1v8yQ389vv/Utlv3kJ45EtCEY5FdtbZYx2S1NllZU8PsLF5gU\ngpLiYsp9PoYMYq3ncsbicVa9/DINwSCVfj/tLS2yvZ4eTl28aBYoAtjU2MiC0lJLnqU6P4Vqv58T\n3/qWaRNTBPyrhQt55StfyXlt9DnR0bZsmbRx6elheGwsyQ81G1yL+aLXK+x5ga20JuVRZpvXCNn5\nkT7N02xmsyXf0z6O5SxnwDBI2MQmS+Ehp74U6qjjDGcAaKCBX/Ery3hTeaKuZz0ddLCCFSxlKdvZ\nzha2OOZMxoixilVc4hLjjHMnd7KTnYQI8QRP8CIvUkIJdxlBnKnya/NChJzNK2cj33S+Qs/30/M6\n8/UJBee8wIit7QtYcxBVf/p+fmRYcCWSaNqtVFSV3GYkoVWv271JAZ5E/uj+G5ILIi1GKp8eEqG5\nDUjCOEiy52c1iaI9CpXGfucc5iNXFBltlSFVq3O2selYD/yT8biGxE2AWuScqQoTVUabym5F5ZxW\nI6vmtiLPuRI5Z8ux2rWUIedsC5Igf4zMvR0x2u5GKsIqn7jN2H8VUnWdQM7ZThLzbrf0sXvAusgN\nbs6oCxfzCLnkCWZCbyxmElGAwbGxtIrba319HDx9moHLlzl2Vn4tKLXTHnaqj88+Zr0K7JFvfIMt\nhw6hbjyFfD6qjJDVFTU1SURUjVu3NGkIBk1F9P4bb+TXjz7KpsZGWhsbLUWFQoEAS8rLzbDiaE+P\nObZKLVz3dsNGxp5n2d7SQn1pqdmvqmD73sMP4y8uRmBVYlPBKQfUbpmj5lYR+JDfz+XJSepLSkw/\n12h3lMjuCBv3biQWT30/di7li6bLeXaRGaZy113L53Y/y6/2tkC81KL65aKQpQsn3cIWeuihiSaG\nGeZ7fM9UP49z3KIg6krlHvZQRx19ZtCctS+9Yq8qsFROObdxm7mvUlHv4A5OcYpFLMJn/EWI8Hf8\nHW200UUXr/BKUkivrmqGCLGEJZzmtFnVdxWriBDhNV4jTpwRRuigwxIKXBASqMkr39v6vYzqMcxO\nvmm2mGvWNrrSmasHqR1K3ZrAmk+KQ9v2sN2ttv2qkeGgKvRTWbXYVbylSCL6nrG9koQ36T7j8eeR\nJCmOJE66sqvnQyqyFzbaXYEktdXaOVYhq8kew4oAkniB1Xc8FUpJTQJU+G0MSUQXAfel2LcLeAI5\n76q9lcg8WlVSsRrYQKJwE0gi6kcSxP9s9KPm2l6kStm1KKW2C0nelWIbQc6VyicOIxXjx5AEd5BE\nrqr+bnSy9Mn32nORP1wyeh3hWszFmq8o5FrYCVBTbW1S6K9OHi5ruZgTQrCorMxCPLMlyoq0nvjW\nt2isqKA3FiNmkEtPcbFF8UyXz6oIoV5o6Pn77zdDinc98IB5vDqPXw0Pm/vq59re0kJrYyObGhvp\n2bTJschTKBDgK4sWEQ4EuLOujiq/n87OThorKmhpyOwvqsbw0okTSeSwvaXFDGduqq1lU2NjUrGk\ng4ODDIyNmX6uenGbaE/qH62FzhedCqGcTmI83z+nsvmhb/p2xj7D0EA1Q/03sKjn31vIU7Z5jZCe\nuPbSywADJoHbxz6TJP2SXwIyhPdpnuZO7gSgmGImmWSoc4h7udexr41spI46QoT4O/6OYoq5wAVT\n6YwQ4SVeoovH6OcfOMh/4CJeJo2/LrpMqxZ9zOmIdVDLfCummHOco4suM5wZZIiwHgpcEGgVT46G\njmZFMgudb1rI98VcJspTLS6jyEUHkqypME9V0EZvO4SsqKu2bSFBZFuBEyQK+ujho//Q2WmGkT5h\ntH2QRMjoKBjvLIkJkvMZ7WPWix5tQuaYHjTO41Mw4g4k7ja22XM+zxjnrHxIU2EFkqB9SILkFiOJ\nuZOx1kpkGO5OEgWBdIyTKEZ0Fkkcw8Zr7UhSfRvwOslhvMreZg8JYulBElH1S2UFVqse9SmgQp9v\n7uxku9afytG130RQ56K/G/UbFGp93cJGM49r29DNhYvrAO0tLfxxZyfjV6/iLy5mWyTiqEKq8NEF\nJSVgEMWVNTUc+NrXclZo9ZDR7739Nn3nz1sIYigQoOPkSZrDYbZHIinHrYf9pvLLVH19fP48o/G4\nWdjITqIBM2+zNxbjsTfeoL2lxbHdvvPnGYrH6Th50uJtmY1npz6X6nwVOQwFAnz4zW+mbMOJUOrF\nbbauSf2jtdB+onZ/T90WKBOuxUJKhUI2HpZ13XWEY2HEcBEXkPO4f80ThMh9XfVwUKfXjmn6iSKZ\nIEmSBw+HOMQII2xmMzvZySpW8VvDvbGIIvaYVvESiiQvJMgZLtNBB9/juxRpkVmnOc3vUd7BNyN/\n4gM8CzwKwApWmCRNP4dneCYpnNicN+rw4OEKV7jKVUaMn69NNLGQhfjxs41tluPs4bKpwoDTQkl3\nZE8ylTdroe11CoGZLsyUzt/TDm2q84JOLgIk+6ja29b7031Xw0hV7RmsIbU7kCGl9nBekIrlCDJn\n8UKK8TmpbroSq3JNN2qvT9j2fwsoSdG+8utU7cWQKifGeOuM/u8BvgBcNvb/MpL82ZNMGoADJNZs\nKdZzBhmTqchyEOlFqhTIHUgCaz8mhMw7VSRc/V+NJJ/6/ouwWvX4kIT9b5Fr87g2PrVGav6UT+yf\nGvOwHev1Z/fsdUNzZwduzqgLF9cBlJdjOBDgM5WV/Pb8eZrr6kwFMhVS5SnquZHhQMDM7VR5p997\n+232/u53+D0elpaXm7md2ZAoe59Ovqcqz3PLoUNJ49PHZvdetc+HPW8z1fnq2yeuXqXj1CnKPB7K\nfD7efeghGivsRe2d4ZQrHIvHiPZE2bpmK6HAzP1onYq/ZyFznjNhvuXeZZPrGdkdoWugCyhlUdm/\n54NHtuQ9j/Z8URXqGiTIKKMcTMpyg0UEuJ+HeIGfMs44FVTwAR/wQ37IDnaYJE+16USoa/AybOgv\nrTTQQ5yznKWUUu7mbrroYiUr+RVPM8E64F2K+AMEw9RSy2EOm56h6c5Brbki9vrYbud2Pstn0xJQ\nfQ7aaGOQwax9Tp2QKv91PmGmzyFCYfJAs4Gef/oYqf0knQiyyjPVyWS68ar9y4x/+4EfkAgHLSZR\n6KfK+LeYRE6kGo8acymyqFIQSYLvQZLDJqQyOWnsU4Y1hFVHGTI/EuQPfP3XdyuYjr8hrEWJMNq+\nTCI3NIxUGPXxOs2RExSh3ELC37TcmIMhJJlWVXkVVEVekKHNioSrcUewXkfHkPPjQ4ZI6w7E+nUw\nP9+l8xNuzqgLF/MY052Dp0JqbwmFOHTmDINjY5T5fEkKpH0MqcIxlddmpc/H52tkIJOed6qUx1OX\nLllyO7OBvU+lwqkcVD3P02l82ah2qfJiU52vvr3c56MuEODilSsMauG2OnLxFA0FQuxYt2NGiShM\nrTJvIXOeM2EuhxQ6IZtcz4Qa/nk+eOTf5j2PdvsXeyXZj40AtUqtlqf8YRvnn9jFuBE0d57z3Md9\nSWRP+YbeyI0WH9AoUYTxe2MFJWzjbdazHj9+7uZunuM5yinnBP9MOY9Swkt42Igwfnqe5SybDQdB\n/RzKKGOYYUsu683cbOa3qrEVUUSIEPXUJxFRwHEOlAo4VVUwl/DpuYqZPoep5oHmAj3/NF3Ir5Od\njNr/nizH245UGi8iFckfGP0otVGvONuNVBWdQnWVPcxr2pg2A18x2q8FDiPJ2odYQ3Dt9ihqrsux\nEtFKrKGu6jjdj7TMeKza13M4U81RFc74NZJEvkQiNPcCibzZESQRVZ98zUgiqsKn1xrblYWLfm5q\nXQaMdobAlkyQmNPHSK6Mm6lqbrrX3Yq70wOXjF5HmO+5WNcS7Gsx3cVpthw6xOClS3wUkx+fTkTN\nPoZodzfHzsm6fPY8VKUEjk5MEPL7k0iNnUBmCufUyZvPZnujSNNRwy9U5ajq/ejtZ0OyUnlbpiKy\n+vZtkQjNdXXm81KPJ2sSP5cwk4QyF9jfG7Ph9TgVZPNDX7d6mcpNCN3+pZdebuIm3uZtQM5XhAh1\n1NFEExvZSCsLDEuEZuKa5X2IEA00mGTPi5dqqpnslL6hpzhl+oAuZznHOU7MCPcdoILv8Z/Yxz7G\nGTdzQT14GGWSYc4Sp40rZg1NWejoaZ42xv2aeQ4XuUgHHfyaX5v7KW/Rd3kXkPmtd3M3MWJ00JF0\ng8JO0N/B6uuaj3VKvgV/ClkoaD5/f081DzRfpLOAcSLIav+dpB+vWouQcbzejvLDtIfW/iBFn5Ag\nxkolbEKqlK8iw187jON/j1T/7jT2W4lUBHUZ6i6sZFHhXhKhrhFkQaUGZFiwOld7nqki0+nm6CiS\nmAVJFE0qJaGM6srnSuM1hSoSPqz2uVbtv0lyLq/aVxHqQGcnb9nGns67NpOv7VSOdZEfXDLqwsUc\nwHTn4ClyNBSPO+Za6mMIBwKcunCBl06cMG1alpSXW/bXiw1tj0SSSI2dQGZS33TyVub10rZsGbdV\nV9P6+us89sYbbF2zJsnfU+/HqQBTKu/VdEhVVv/iAAAgAElEQVRFZO3b9ed958/npc5OF7q7o+ze\nHWHv3o3E01TnnS+Yi76L+ZAM/RgCFEQN14m6Bw8jjDDBhKkcvsmbnOEMXXThw8cuPiJk/PS70/gZ\nXUUVRzhiqqfVVPMbfmP6egJUkAhDH2DAVBsBBjlDO+0mkfXj5xSnGDWzwKwKDcAFLpjKaNAo+6JX\n/2ym2eItWk45E8bP+/u4jxqjrIzTDQq7P2sjVl/XfFTBfNX5+abqTxfSkcLZgiLIupeleifnMl47\n0VaEcyUy1BSs1XudiJc923sJMlxXxShUYyWvO5EqaxnwF0gvUZD+pf+FBFlU/a8EnjceKzLVhSSb\nyoO1lWRCUIKcF1XcaSGyoNN64/UdSHK8B0mCFZktJVE0qAypfvqAT5D5pAoB4NtIxfR7RvsB43yV\nLYtePbfDaEfN3XtItXg71hBdSCb+uqLps71mRzolX51XFRi301zMFwgXLlykx/DYmGjbv18Mj41N\nS/sb9uwRPPusaH755ZR9qDGs3rVL8Oyz5r/KH/9YfDI6Kp7q6hJrX31VbNizR3wyOpo0Xv31XM/D\naXxrX33VHEP9c8/l3KZ+fNv+/Tkdmwucxj7d65kOr766Vjz7LOLZZxH797fNeP/XA9aKtQLjr01k\nN8eZjnlKPCXWirVig9gghsVwVm0Oi2HRJtrEsBgWYREWCESxKDb78Qmf+bhVtJr91It6ERIhsUAs\nEJ+IT4QQQjwuHhd1ok6sE+vEsPG3SWwSraJVfCI+EfWiXiAQzaJZfCI+EUWiyGxb/VULj/iiqEra\nHhJVYoFYYD6vETVitVgtNogN4hOxVrQJxFpRLhCIJtFknr86v3VinUAg/qgL8eevesQLe1aLW8eW\nmG3o87VBbBAIRLkoN89lqlBtNovmnNrL97jpxFNCiLVCiA1CzJERpcZMjHWtEALjXyE+LYeNdoZt\njzMdU2+Modl4vsF47hdCfFFY5+ApIUSVNu7aFOdg7/8pIUS1tq/af6323C+ECAghfMb2x43/nY5T\nuEUI4TW2F2v7FNmOSfevzmFb2Djvdba5yQb2c9fPcZNIvy7p1u0GWzsurCD53mNWcAsYuXBxHSCX\ngjOqsE2Z18tFo3Jt27JlDF66lLYwUDaFg3IZ3+Lnn6f/4kVzn1zb1Is23RIKUenzmUWJUhUqygcz\nWcwnG+zdu5H+/n2Ew808+OB+AjOci3o9IJsiRbkeYy/ik2thnT76uJd7+QyfoYsu/rQ7TE1sgkHv\nCLtaqrkpcBuVVPJLfslpTpvHqb70/uuoo5lm02dUVbm9j/tooIFKKjnIQVP99OChAQ9LGOc9wB6H\nsJa1PMdz/Cl/ikAwxJBWVKiVHfh4Ag//Hy+Yx1RTzUUuUmr8DTDA/7QbbjFqmR1eBlvXWc8hSpTj\nHOdd3jWVVKdzybUQlr3gT7rqsE6VgUspzbvvQiPCzBUTmioiFGas6dZLFeRxKnI0U4gCx5Gq2ztI\nle8JY1zjJBRSNQcREvNSjQzb7cC54JAO/TiQeaUq/sFecEmhDqutDMAdyBxYvYKtvRiSH6mUprOY\nUVDhzh3aNr0QUytSzZxKMSKndc6lyrNCDYnQY70gVDbIp7/5hnwLGM0EZpuouzBw4MCB2R6CCwNz\neS2UqrfuZz+zKH6Z1NVs1NdcoCu0nq1bxbrXXsupXVPpfeWVJIXUrprO5fXIFWNjw2L//jYxNjbX\ndQ9nzIe10BXJQh1TKBVN9fPyq6tNhfy7+72mKqkrmlWiyuxLVxTV65UHKs3HYREWfuE3n9eJOov6\nmayTWv9qRa14XDwu1oq1poqrn6uuHKf6+5M98nz+4mVE6RhJSqq9Df1cVJ9qe5WoEmERNpXhXLBW\npFbTnBTwfJR0Owr1vlCKWy4q03QjlQJaqLGuFanXK1v1Uh/ja1NYi1uEVDUDIqF4rhZWNXCREKJS\nWFVCNQe6uhkSQnyinYPeTptIntdFxmsVwqp0NoqEson2uFkkVMky7fVW27zYVdFq49zU/pUiWVnV\n/200xrfJeLxJ5KaGZvPecFrntSL5usikxqtxNWUxLjuc+rvWQJ7KqJsz6sKFCwtUzuXOdetS5kk6\nKYBTqc7qhEq/33x8RQg6Tp3icy+8kHUOqDqPSociSteyT2YgEGLduh3ToogWsiBLUttGEas/f+ed\naakonbLfPM4pn7zDTMf8WXcd//vuMH+5N0RpitN3Gqt9m+qnxCtzQH8bhm1rZIRDJZWW/M/VrDbH\no3Jz9TxNhXLKGWLIrL4LcBd3WbxF7b9Aimw3x89ylj3soYsuhhgiSJAAAR7jMWLELHmoqfD3LfDe\nMvjbB+Gy8RGzhCXmOagc2pWspJVW81yaaWYlK83HxRQzwghDDHFvUh1OK5zmPFVOmVN1Y31czTRT\nSum0vYeywWwVE0qHVEVhCjXWdDmA2eaH6mP86xz6tldfVRVg48Aho71fG/uWIyvD9pPw3azCWrTn\nNRLK3JeRKqo6B1UzWy+mpM+ryqs8j7WK7SIwypFJBfIwiXlXhYS+pO2vV+Xt1Y5tMfY9AUZWt8wF\nbyJRdAlkDuta7fHzxjm8gsw/fYXMRaRyhdM6O10XmQoU6YWVch3XTFaVdpGM2SbqLly4mIcYHhsT\n4e3bE+qolseaSw6oU/7m42++KcLbt4t1P/vZrOR1The6up4Sr766VuzZs2FalNFCKDwp256hHN+k\nfqfxnFLBKb9az/X9T/vrTbXvFnGLqeJ9UXwxrepWLxLH/fdjj4v/Yb9PlI4hPMKTpGiWiTJLTqXK\nJdXzTFXeaUAEBCLRTq2oFavF6oxKZjqV1CsSam2raLXklKp+60SdRdG0/60UKy0qsl191p/rObG1\nolYgEEERzKiMqlxZBGKTkSWWSk3T12KTllGmj2M2rre5jafEBvFzQwWbmBa1Nlv1MxV0NTJXRWyt\nsKphYeGsDLaJhOrmMf6vElL51NW6kHbcAttY7OdpV5b1558IIZYKqaaqMVUb252Qag5TqdfDtnPd\nJKSiuknklk87nXAagzqfciHXo5DjmwvnPN3AVUZduHAxF5Gvh2ooEOAuw0Kl2u/n3oULAWc1M10f\nThYmyge14+TJOWu9kg9isV4GBrro799HT0/hq3dOp83KbKnVs2Edk2SjRJTDXqmo/TYMT68ZMKuv\nDjBgqnhHOGIZq67EqX2Xs5wYMT4M9PH/rpvgcgCuaJlbQwzhwWPaqKh+eullgAEz11JhggniRhbo\nFa7gw8cVrpg5n/lghBEmTT0FBMKx3zOcQSCSVFaAeupZwQqWs5waalhv1PhMVTm3jz7OcIYOOggS\nxI+fu7iLKs0p0UkFjWsZsGocSmXZYttfv5a2s908Th/HfLMqmn700s4f0MaL7OdfT4taq7wr7VVz\ns0UvCTVSVZ/NFnY1TFWAXW1sb0Iqg6oCbphEnuV9SDVTV+sS8ULSR1P/lLerf3XGP/VcryD8BHIe\nDiLV2EVIRdNelVZhC9ADLENW01VzqKvXyoJlo/HaXdq5b0fmV75CYj2ms8JyNn6gTmNQ3rEXkDms\nTt+i+XqNzsWq0tcTZpuouzAwH3KxrhfMhbWYSvXbXJCv4vVUV5dYvWuXqH/uOfHJ6GiSwqmP3ykv\nNB3s+a1zYT0KgT17Nohnn0W8/HLztCij+eRKZt22sb6vvf56wdtO2+80nlMq2K+/elEvSscQT+2X\nuZB6LqVSBoMiKI6Ko5axpsqzXCKWiGpRbSqgqZRFr/CayuAisSh5jwP5ap/Z/1WJKlEiSnI6JiRC\nYrVYbZ5jNkqjnpOrq7r6MU6qparkq+empto/m2sp3+vtWvmMSsbMZLGuFfnn69lHmGktdCXzE+Gs\nhuWiNOpq3VohFdFsZmytcD5nfXu2M28/xmkO7f0VUglMl8t54MCBpNftY3Fq53Hh3GamKzJV2y5c\nZdSFCxc5wq7OTBfyVbx6YzEODg4yMDbG5kOHkhROffwfj47m1Eeh81vnClpa2lm2rG3aqujmkyuZ\nddvG+pb7/Zl3LmS/2jlNZ06sDvv1FyfO5QD8aB1UBxos1Xbf4z0WsYjjHGcFKyxjVaroClbgxWu2\nf4pTptdmGWUpxzHJJJvZzK3cyilOOe5TTTXF0/hT4QIXGGMs7T6VZjachAcPBzloniNIL9Snbc5/\n+no+wzOmX61qz65OOqmWO9lJG228yZtJ1719/y1sYZBBMw/WCdP5HpqfmJks1qnk66UboZNKpiuZ\nm3FWw+wqmWpnAthk66sdmeN5wWi3Oc14dKgs7Eqsnpi6F6qej5oOuhdqE9n5cxZSCcyUy2l/PdV6\n6/vtTdFmpivSzf2cn5htou7ChQsH5FL9dioqar6em7lU73XyPZ1LmCkV2sXUMFv5fEp9U7mY2XiN\n2vMTG0SDQFh9RqtFtXhUPJqUB6r+lAKb6nWf8ImgCFpyTmf6r1gUi6PiqJm7WS7KzZxP+1+dqLPM\nnZ7vuUAssOSSLhXlYrWoFBtEWAwb6rBSLVXV30ViUdr1sKuchbl+5pMT6PzBdOXrrRXJKlk+Wq9T\nOzr0arStIrurxF5dVyGfuVDVbhuNdp36nc6cyExzan89G+U5H/9SkaZtF/krozOB2Z4bFy5cOCAX\nkjgbxWUyjS9fkjsbmK3iPC5yQ6HsVXKFIjWpwkedYB+rvaCQR3jEWrHWUhhI2boUi2KLrUmRgzGL\nTmpn+2+pWJpU4EgVVaoU0n5Gt3EpFaVitVhtKZJkn9O1osrcXi8ClvV2Cn/OhlwW5vpZK9wgwPkD\nJ5KUD1nJVDjHbimyVqS/Sm4RCcuVFTmORYjUZDdTv9OFTHOa7Zzr+7mksvDADdN1kQmdnZ2zPQQX\nBubCWjgV9kmF2Sguk2l8+uv5FklSmO71uJatZAqNQq1FPteEsjfRw2RnAip0M1X4qBPsY1XHrmQl\ntdRyhSt00WUJZRXG74SrXGWIIT7H51jPeovdi8JVrkJn5rHfwA1ZnmV+aKKJBhoYZNDcVkQRt3M7\n9dSzjnUECHCZy+brl7nMQQ5aiiTVUMMpTmnFhnzmawPEzUJOkAi/VcWNwoQtx6ZCYa4f5yDAufCd\n4UJCXwunkM5cwlOjwELg54CX1IVzdEuRLWCWLVuJc6joAAnLlaEsx6IjVVjsbIWo6nNqD43u7OzM\nes71/dyCQnMHLhl14cJFRsz1HMuZyn/NF3N9/q5F5HNN5JrPV+gc0zrjL5v+7WNVROgAB7ibuwFJ\namupTdnGBBN00GHxD80F5ZRbqs3mgjrqLM99GjnU8Tt+x0d8ZNkmEBzmMAMMsJvdxIlbKgarHNfb\nuZ2NbGQTm1jOcg5ykH3sI0qUdt6j3nBbtJN/NZdHOUobbdzCLZZjU6Ew+aBz0Ql05pFvxdKZxlQJ\nTS+SOMZIkMdMfqh6dd+bUvSt3k1B4O08xpWKdDpdnTO9Vk5Eeb5cLy6ckVwvvfAwlFsXLlzkgmh3\nN72xGEGvl/aWlnlBYmZrzBv37mVffz/N4bBL+FwAM3NNRIjQRRcAbbSxgx2z1l6UKL30EiTIMzzD\nfdzHJS4xxJBJ1IoowoPHohjezu2UUcYhDiW16cOXZLlSKBRRZCq1AF68lnHZ0UADk0wyyCBVVDHC\nSMZjWmllF7sA2MhG9rGPZppN5TJGjChRtrLVJJD6PLbTToiQ47EuphcRMN4JkvxM7Z01d7ERSaoA\n7kBap2wnPblVxzST+pZFH3Av8Bap7VrSIYYkeKoQkROiSGJ4jAQ5nom1cjr/CNfH9TLXUVRUBHlw\nS5eMunAxRxHZvZuugQEA2pYtY8e6dbM8osyYrTHH4nGiPT1sXbNmxolovgS8uztKLNaL1xukpaV9\nWqrfXs+YiWtCkZQwYW7hFiqpNAnMVNqzkx4ngmSHTmSXspRznGOEEfP1IoooptiiIIIkbHHi7DN/\nEmeGB4+lHeW/KfJLF6KYYhkWnAaP8ihv8ibDDHM3d1NHHXvYk5IsV1PNCU6Yc6WIZyml9NGXci71\neaynng/5ECCJtM51ZHPNzGVkQ7iuBcSAP0Ym2m0nu/PMhijOBCIkCCDM3Fo5nf/1cr3MdeRLRt0w\n3esIbs7J3EE2azEf8wxna8xTzR+dynsj3xDhWKyXgYEu+vv30dOTOvTvekOhPqecco6j3VEiuyNs\n3LuRWDy7YK50obgqnDPbMM5MSJVz2EsvXXSxj30sZ7ljSLBuM9JAg4WIgiSKdiLaTDPb2MZBDprb\nLDYqnc7jtLczVazB+llR5PBb5mVeZpBBJpjgIAc5ytGURNSPn5u52WKxokJo++gz59JprYKaicUA\nA0SJzgk7llzfF/o1M5VrcrYwl4OVC/lbKgTsAl4h+/PMJzR4OsJY1TuliWQ7mumE/fw7Ozvn9PXi\nIjOmQkbbgF8BV4BVhRmOCxcuFOZjnuFcGPNM54/mS8C9XvlVHg43s2aN61amSN+f8+fT5vHZG+ul\na6CLff37iGZ5AyDdj/p8Cg+lQ8j4a6XVQn7tBGkVq4gQYTGLuYEbCBDgIAdZwAJe4iVzPKnIUyWV\nbGKTSXovctF8bZRR87EfP6tZnXHcquRsNtD9UEGGAr/CKyxggfm6U1s68byDO2igIWmflayklVbu\n4i4OcchxzT7mz4EDVPIWT/OjpDbaaaeeemDq65kKM5Hf5uSZOp/gFpcpLDL5dOYDRQDfJDcyPR1w\nr5frF7cCNwMHSE9GZ7fOsAsXLq4r5OKfWgjkazEzNjYs9u9vE2NjbmF5IWbG43PDng2CZxHNLzeL\n4SznPRu7DrvfpBBCPCWeEmvF2qw8Q3U4zcOwGDY9M5tFc5KNi/7XKlrF4+JxUSfqxDqxTlSLaoGw\nWrV4hVc8LB42x5fKZ7RIFAmf8KXtL9c/ZcWi91ElqsRasVY0ikaLp2mxKDbtWfTtNaJGhEXY0o5P\n+MRRcVQ8JZ4yz3mlWJk096vFhGlNsVT8wnGNnNazkFgrpt8eY7rP4drDtePv6nQm+XifunCRK5hF\nn1GXjLpw4WLOYD75j+aCp8RT4vtd9eKHr1aLV/esu+ZI7Ex4fA6PDYu2/W2ORDQVecz3R32+5DrV\nPOjjKBNlKcneRrHR0vdGsVEERVDcKe5MeUxERNISyIAIWMigIop7xV7hF/6Uxy0Xy3MiqnWiLuVr\nTl6oDaLBMq6ACFiIc62oNdfzFnGLqBJVwif2mz/KV4sH81qj/JCgCBvEeE7EIN8bGy5ywVpxrfi7\nrhXJZzKfPDWvndsC1x9wfUZdZIKbMzp34K7F9CEX/1SF+bAevfRyNTZAeGCYgf6Oay7PVOVL/lXn\nX01bbl4oEGLHuh2EHIpFpQrHzTdfUA+TLKU0awuYVHmjegjvGGPm9hJKLKGvC7vf4J7dB/mTvfDl\n+Eqe53nu4i4Oc9jcp4gii7focY7zMA+buZpmWHCn/C9MOCmP8ypX+RbfMttxyvP8mI/TnquOGmpS\nzo0PH8L2GydMmCvGn0KcOL/gF4C0nTnLWXM9T3CCEUaY4BGKeYmXGKHSKJpUiFDWzDY/iUDJdp7K\nKb+tl166Oudv/uf8QPYOmnP9+8LpTOZTGGuuIcVzfT1cZIY3w+v7wUiesOI/AK9l28mTTz7JTTfd\nBEAoFGLlypVEIhEgcRG5z93n19Nzhbkynuv9ucJcGY/T8yBBThwHzsA9q5tYs2brnBrfVJ+HCPHd\nzu9y5MgRmVA3w/0HCUIn3MzNbI1sTXq9uztKT8+7eDwB/uzPXicQCKVtr512Wjtb+T7f568jfy0r\ntHbK6rWdkc6049kR2ZH0epQo7Z3tMr/TmJ9AZ4BtbOPvI39PBx2UdJYwenCMuw0Lzxv/nzKO3HmE\nYET+PF3SuYRP+ZT3Iu+xgQ2c7zwPwGBkkN3sRnRKwncpcgl4Fo4co5ibGI3839JCRQ7H7H+kc8R8\nLhBJr493jkt7mIiR72l7XX8+yiiTnZOOrzsdP844o52jSfuPM86iyCKucpULnRcIEuTpyNPS4qUT\nYISrkTbuYyllnWVUU81LkZcIkX49Mz1XhBEgGomygx22/YPIpzcTifwNO9K01x5pp5deLnde5i/5\nS3P9bu68mcd5POX746udnfQDDZEI7cCROfT+nvvP2+nsbAW+TyQSSru/wtwav379RIgCj3d2cmQO\njCfX50Hj+c2dnTwud0i7v8JcGf/19PzIkSPEYvLm2yeffEK+KIS1ywHg3wG/TPG6ody6cHF9wrXw\nSI356KU6W4gR47vxJ/mjniJa1mxzr6MCw8lzUsfu3REGBiTZWLasjXXrrE526d7nhfCpjJCwHKmi\nijLKeJu3aaTRHPsww9y6t4M7+qE63MTXH3yTQCDZT/NWbuXX/Nq0U7FbtchtPVzhXuPZi8CjOY/Z\ng4dOOrmf+/PyK1WWL3ZPUieofcKEKaaYs5w1z2kpSznLWbM4UxNNBAmaVYQL4RGbeY2TDSlS2a/o\na91GG1vZmpW1TATXa3G+IopUBIPIwkDX86f7XLGucZE7ZtNn9ADwfdBigKxwyaiL6xqZfsRez5iP\nXqourk/s3buR/v59hMPNPPjg/qSbAene55mIbjZQZKeaat7nfRodrOxT3bCIEuU1XuMc5yillAtc\nSGvPEiLEFzhJF0HgXeArYLOKyRZBgvjxT1uFZIX1rCdEiFOc0qxqngVuwcs4k7QBIwQJUkEFZznL\nJJOsZCUHODDl0PBc1liR0GMcY5hhwEqI87154Xotzl9EcG8kuJj/mA2f0YeA3wP3AHsgB8dsF7MC\ne0iDi5mBk4WHuxYSc8VL9XpYj+7uKLt3R9i7dyPxLD02ZwNzaS30PMDmlmdYtqzNkYhCaqueKFFa\naeUCF6Y0FpVLeoITjkQUJIksDyzg/1g3zGcDd3Av97KRjRznOAMMMM44I4xk9An14MHHE4Q6/wb4\nCn4uW/xHiygiRIh66imnPG1bl7lsElG7rctU4MVr2s4ECSIQbGUrffRpe90MrGWS9cCzVFLJHdzB\naU7LsGOggYaC5Cjnklus8pMVEbXnrDrlDWfzvmjH9VqcCUzHZ1T2Gasu7Ojs7JwRuyQX04epfDPs\nMv65cOEiDVpa2unpibJmzdZ5HVo5HSG17S0tRHt62LpmjRuiO82IxXpN5a6nJ+oq9FlAkQaAPwls\nZkeaOUv1PtfbiBLNOxxUkZ1cxtxPP4Dp4alwG7fxG37DOONJxxdRxFnO0sFLrOZTGviKTW2U+aEx\nYjTQQAklaYm2Hl6rCKAHDxvYwM/5OWc563hcGWXEiZvH6PDg4TCHWcIS6qnnEpfooIMneZJGGs3z\nhkvG/+8C/4ZRRnmf9y1t+fClHLtCdzRKrLcXbzDIa+11fBjqSwqtzQWqQNRKVnITN7GNbUnFqvK5\nTlSRGhfZY66Ex7bjhqZOBaroEch5dN8H8wuFCNPNBDdM14WLawBuSO38RqYwUxfJyBQumU0+eCHy\nRVPBqX/Vn471rKeXXkYZxYePd3mX7/Ad9rGPSiopoYQv8AUOc5hznAOk8vgbfkMjjWab5ZRbiOd6\n1vMjfsQ93MM5zjmSW5X3aYcXryPR1FFCiaVycIAA1VTzDu+Y6nANNabC2EorceLsYx9VVBmBxc8C\n/wY9zDhAgDhxM0R3C1scczcVdkciDHTJn7p9bXX8rzvOAPnnmhYibNtFYRDBDY+9FuCGqM8NzEaY\nrgsXLq4jzJWQWhf5oaWlPW2Y6XzEdIcep7JZUVBqc3//vpRWO5nayBdRonTFdiT1r/rTw2rLKOMm\nbmKYYQYZZDObzf366OM0p7mJm5JUzM1sBqCOOoopTlJAP+IjnuAJmmiil15aaSVMGJDqXi21VFHl\nOH5FRJtocgw7rqXWolp68LCSlTTRZGnzTu4029nGNvO8jnKUekqBR1nJUlMdbqaZj/iINtrMXFEn\nWx89RJugHEe4uZl/3voFs5187WDytQtykR75hGrOx/BYNyQ1GW6I+vyGq4xeR+js7DRLMruYXczH\ntYjF49dsSO18XI+5gHwqRWc6Jpe1cCoaNJPVq2dCbbZXXFUq3jGO8e29w9zRDyPhav7HB09Y+l/P\nejrooIkm3uRN7uAO+umnkkqOccwkgE7FdED6eNZ31rM4sphRRi1hugC3cztVVJnb66nnQz4027SH\n9tpRSSX3cz/b2EYrrWZoMUjVtIgiswKvDx+llJrVcHVFMkaMVayigQYqqbQom7oCqcZVSil9WMNs\nndRrvaLtt2Kb+O+iftZs3crlUOp2Cgk9NLilvZ2fHznifkZlQITcVc58KrfO9vdFhOtbzbWHVh9x\nv7/nDPJVRgtXTcCFCxfXNEKBgBuaez0jGoXeXggGob0dQqGMeahOxLCQuatORYNmMjc213zwVEQ5\nlcUHJOecDjJoPv/7Fvi3PdX8uzXvJ/W/k51EifJUd+n/z97bB8dV3vmen37Xu1pvtmyMhZUAcSaA\nDWLwEnxpkIwvhsQKoCTDbA1ka6arbnZ2Zrd2TN2X2qm5W8mtqUvuztzaqUmNZ7J4QtCAbYLDm6+D\nHMtyDCgD4WUCjEVsMEhyW5attmVbarWk3j+ePqdPd59+71afln4finL3Oc95znOep0+rv+f3xo+C\nG3nEeY6/64ZLnkvcwz2sZz011CQJzTrqqKGGDWxgmGF+w29oNyk3/kW+yFu8pb8PEOBxHucAB9jL\nXmVRTEArv+LFy7u8Swcd+PHzPu/HtUt0362jThfKDhxMMUWQIF68PMETXOACn/AJABvZSD31TDCB\nCxdv8ZY+n3vZGycytRjefvqT3Ga1uM4uuvhb7x68e9V2T4p+NAtrscRpcGREdw0+5vfj/O53C+pv\nJZCPlbMS42wr0ZpbTBLjQ+XOqHzETXcFIU+OrIOshbXw+XwVk222bIyMwNGjcPCgEqakziCrYebG\nmumYXO4NM9fjTP3ngtFV06w0icfjpadnb9YW0VRuvWZuohpGUbSb3fr7zWzmPs9O/kPPKVZ5kt1c\nvdH/3gz+jPpAgI2jC/zPx6CJJtayVrcF5ukAACAASURBVD/fSU4CKplOCy1c5jITTPBrfg0+dZ43\neZMd7MCNWx9LAw26pVLjl/ySa7mW1azmDd5ISpyksvS6uIVb+HP+HB8+9rM/ziKrUUutPi7NFVer\nhzrAABvZSJAgI4xw0RAPGiDAJ3zCRS4yySR36bVSzedTm6tEt9lM7tWJ/aRbw3xw1kQ/x11dbN29\nW/5mZMFSuWqWey1Wuktqohgv93oIhSNiVBAEgezi/7JlWQrb6I9jurpgt/oRnykO1UwYFjN21UwM\nFrP/oguMFELZTCBptNFGK626IOqnnw1soIYa04RBGprVb8KphNonrfDy1kbe4R09nrSLLnz4aKWV\nCSb0ki+11OousutZTwcdXMM13M7ttNPOfvZzmtNxYtSOnfOcZ5RRJpggSJAJJmihRb/GeeYJE+Yo\nR3mFV+LKm2xiU1zpl3rq9bjOfeyjjz5Ws1rfHyCAH78+d9qxXXTpMaU11PBLfhk3L4kiM9UDh0xx\nnYn9pFvDfHipv43TfW3842teZlai4sgDzcq53KdrpVxnKla6GF+OiBhdQVipft9KR9bCWgwODhbV\nolZMYVtMChLJ/f3Q1wevvQbeqMtiGsvg0JCfublLVFW1s23bfr1NJmtiofdGrtbKdOQiMDJZUSG1\nUE5nhTvNaSaZZIAB3R10Pes5zvGUCXeMFsMfdcOvO208/UATv+O5nUYa4853hjNMMsk44/q4Navk\nDYM3sIc9gBLmxzlOgABb2KInF7JFw4O0jLkOHPrY7dj5Cl9hJzu5kzvjrkvL2ltPPTvYoScT0vro\noENPmKRZeRMTKJ3iFGHC7GQnv+W3+jW9zdusYx1f5+s8xmNxa5IoMo0PHDRrazYk9lPsRFUfeU/z\nX/ae40WvWnf5m2EdZC3KS6IYl/WofESMCoIgUFyLWjGFbTEpSCR7vbB3ry5EsznXxMRxZmcDDA/v\nymO05ScXgZGNFTWVUDazwmni8gM+AOIFsZlITjy/1qbK08S7Pb/LmGeKAQa4nut5lEf1+EitndFa\nWked3mcnnXqiHo0AAeqoo4++uOtw4WILW4BYSZejHGWYYf6Bf4izfGqZe6eZVi7BwFu8xTrW0UUX\nwwzHzaVWmsbI27zNAAMMM0wjjfocdtDB53zOGc6YrolRuBsz9mrW1swk5zMtdobcYltarYXkgxUE\nIYaI0RWE+NVbB1kLa+Hz+YpqUbNqGZWlFMnauTyeVq5cGc/aGmuleyMXgVFs8aCJy0kmWcc6vsyX\n6aWXHeygkcY4112z82tC+hSnaKYZUK6sk0xykIN8h+8AMcH9Pu+zgQ148HCa06pTH0wxxZ3cST/9\neiKjLrp4iqfw4o0rBxMmTCON9NHHvdyrbw8QYBe78OEzvdYAATaykUYauZ/7GWEEUBZaLVmRUQyb\nHW8mIlOtiVG411EXd13ZrZ2WQuUgFMF924zEByFWui8Kp/TzV0qW11pUPrIelY+UdhEEoaJYytId\ny41QKJhT9tdinOvKlXHOnlXZWrXyK9YlsWiAmqNsPnPGMiLFsI4llhsxlj9po41znANiZU7SnV/b\n9xqv6W6oO9nJAQ7EtTNmiE0cyyu8EneOJ3iCPezRY0s1tH6DBNnIRgIE9GsAuJEbmWCCm7mZAAEm\nmNCP7aOPYxwjQCCuzw1sYC1rOclJ9rGPHnoIEcKJk3nmqaOOLWxhLWt1K24bbfwP/ocupg9wQJ+X\nxLkFcly7HSgh1YVEruWDzJ8gLEfyLe0iltEVhPjVWwdZi/wpdjzm0JCf739/07JKNpQqNrSY1t9M\naOdyuZT7Z7bW2PLeG+YWm2w+c8V200yXIOcWbtFfp8sImzi2LroAlSxIiwU1op3jJm5S7quDqp7o\nMzyTdI4RRpKEqLHfLWzhKldx42Y96+mll0d5lF/xK/ro4yhHOcEJ3SqplW+ZZTauzy66WMtaPV61\njz5OcII++niER3Di5DKXGWCAgxzULZ7P8RwTTOgJk4yW08S5zX3tlj6FyvL6m1HZKWiW11pUPrIe\nlY/UGRUEoaIotqvp90438uGFG/COruZ7g9+ld3t/Tsdb0VK7lLU2zTDWzdzT/UPcx3YtiTW2cMwr\n+JXCvdk4R//HUBszwdNxnyFNIGkYa2Fqx6ey5KWqW6rVHjU7zo+fS1yinXZe4iUaaaSX3jiLohGt\nJAxAI404cdJKq74tQECP8XyZl/XMv/dwDzPM0Eknt3Ebb/Imt3M75zjHAAN6OZibuIkv8AWe4ike\n5dG4fu/hHgIEmGFG3+7Fyy3cwgADdNHFb/ktIUL6+Izut4lzmzuVWJ3SSsj8CYIQQ9x0BUGoKIrt\nanrTnv/Cb+bUj+iHOtbx/PYdOR3/4os+Xfh1dvbhdnvLLk5ffXUHo6MHaW3tKkvcqtHdU3MjTSSV\nYCovQZRFdDdGi00p3JuNc/SXL7bSFJgEiuPKvIY1uqtrL728wAv6vlTznuuaXeACwwwD0EIL5zkP\ngAcPNdRwmcuECVNDDbdzO0c5ShddePBwnON6n3308QZvMMooDhxsYQujjLKOdTTQQD/q4ZDR5Tex\nD1Cut8/wjC62++hjgAEaaeQ93qOD5Fqsyw9zN3NBEISlQNx0BUFYERTb1XRt21cAuK2liR/5unM+\nPtFqZoWyLuVOoJRNMp9i1/AsDuYV/Erh3myco43OTUBhlldjhlijxdCYYAhSz7s2nlZaGWfctESN\n8VgtyVEXXWxmM6BcbUOEmGKKMGGqqOJDPuQAB3S3WC1rL8BmNrOb3bpQXGCB4xznKlfjStd48fIR\nH5n2AfA7/A6ttNJLr17+RatN+imf5iREsynRY13KmxhoyO/nRZ+PV3fsIBSstLkTBGE5ExGswZEj\nR8o9BCGKrIV1mJqdjdz9gx9EpmZn8zp+dnYq8tprfZHZ2anI0aN/FHnqqabI3/0dkR/9qC7y0ks9\nkdnZqSKPODVHj/5R5Gc/uzvyyiv3L+l5E5mKTEX6In2RqUjqMdwfuT9ChEhXpCuuXSH3RrbXb4V5\nMs6R8TOUL3dH7o4Q/W9VZFWECJFNkU1Ja5Bq3rXxfDXyVb2fu4/cHfmjyB9F7o7cHbk/cn+kJ9Kj\nH/tp5FN9/Nqx2n4iROoidZGeSE/S+aciU5GdkZ2R3khv5A8ifxC5O3J3pDXSqh/nirgiDZGGCBEi\ntZHalH3siOyItEfaIzsiOyJTkam46++L9BVlHgvpp9hkd1/cH4lEiEQiXZFImnuvVPzs7rsjfweR\nv4PIa33WmbtiI3+/rYWsh3UA8nKFlZhRQRDKglViLb0eD39x2214PZ68jtesZqBiNefmpgCYn7/M\n+PgAg4OPs337gXRdFI3EWNFyuQxniskbGvLzh8FL3OVs5w+79+Mt0riyjZUtd0wtJMyRh4LHYLS0\n7mc/u9hlGhtqjD017tPGs4Mdej9/xp/xA36gu+/uZCd99OnHGtdYy+j7OI/zS37Jec4zwAB+/Oxl\nr6l7sNE12IWLsOE/gCtcievDONZXeCXl9RdSWqey63v2Y+ZmvlQ4a6LW9a4utu6utLkTBKFcSMyo\nIAhLiiZCL1x4Xxdu1i/5kR1arKaKgFgEoKOjl+3bYzF7Q0N+Tp9+iYWFEG1tt9HTs69oIjExVvTQ\nod64eFarzHFinG2xxpVtrGw27Sot+i7b0jJf4ksECODCxVu8leTCGiTI/zN0K5uCa/mts4G/7v6P\nnPFcYhNPcoQXsortTSydkig8tZhUYzsvXj3G8yIXaaCBS1yK66MY15+JQvuxZix0thT2qQ8Fgxzz\n+9m6ezcebyVdtyAIxSDfmFERo4KwjLCKtTEdRiEClC3JTinQEt3MzExw5sxRWlo28+CDv4i7tsTr\nL6YYS0y0U+5ERqko1biyTTRk1i7x3tnu8eoVN/tIzv2Z7b3mHxpiJBikxumkv7s7bwt8sfDi5SIX\nAVjHOj7n86Q2xs/oW519/H3PXnqZ4wXcSW3NxJeZoDMTqMZ2Wl9P8iS72KX/W6y6rUtFNomgrIsP\n0n7qBUEQUiMJjISMSC0m61CqtVjK5DmpallmQkv409y8iY6OXkuIpGKth+aye999B+js7EsSohC7\nfoCWls1FKxViPL92zkyJjPJdw0JJN65C1iLbRENm7RLvHfMiL6Rsn4qRYJCjgQAHR0fxHzuW+0UV\nGRcuQLmj/pJfmrbRPqNvBW7gJ1t30wU8lUKI7mVvUkIks7qdibU9E9tprzvoiPu3koQolM7Nd2n+\nfmf61Asgv6WshqxH5SNiVBCWEaWoh5iKfIWvJkS+9rUjbN/+QtmFaClIJ4q6u/vp6NhJR0evqVhd\nqnHA0j68yGVc5SDx3ulH2YZew9xZMdt7rcapUjN0tbaye+vW4g46R/z4+QJfwIOHN3jDNMusHz9/\n1X2Jsc527t7yn3nQ4005ByOM6FbWJprSii8zgbocMRPdxeEHKMvlDihZlt9Mn3pBEITiI266grCM\nKEU9xFRY1QVUyJ5c17AS3MDzJdd7J9v2wVAI/7Fj7N66tewuukYX0g1sYD3r86o3qqG53jbRxDu8\ns0JqeZYLH+JCW3wqLTZcEKyMxIwKgrCkLKXwNSMXYVRqEVXM/os9VrP+nnvuS1y9GsBmc+By1VJb\nux63uyHj+UqVeKjYLGfRXAjGuE0PHo5zHIgXnWaxnanIJ9nP8v3xX+or24GqH9qFWC6Lhw+R+IJQ\nLCRmVMiI+NVbh+WwFuV2tczFxTSxbWKspHE98omjNPa/d+/GpONy6bPYrrNm/V29GiAcvsjc3AVm\nZs4yMXGc0dGDPPdc8tiNLIUbeKFrAeVzP84WP358+NjBDoI5ulzmcmxiW6MLaQMNQHJso7HNu4Pv\npu0/H9fbEdSP/4OA9VamEEp7ZYOD30VcaItPPlGyy+Hv93JC1qPyETEqCEJFkoswSmybTqyY7Usl\nirTtU1Mf6NtmZgJZ9VmM68oGs/7sdpXExmZzYLdX6W1nZ5PHbrz2rVt/mDYhUqrj8k2OlGreMgmy\npYydzocRRpKS/pTi2MS2RvGYKrax1LGd5UyRU8hDgMyU+srqUHY7EaLFRKJkBaH8iBhdQfh8vnIP\nQYgia1E4mTLFpmubKFaM62EmZFKJIm17KDSJ3e6JHl/H7OxUnADLRRzlcl3ZYNbfQw+9RW3tOlpa\nupifv6S3NRtfvNX3S1y+PM7hw4+WzMKbaS0gsyAr9hwWW8QUknE1l2PTtc1GdJbie6qcP/4LeQiQ\nmWJcmZ9USYpSr0XqY4TMeMld4svfb2sh61H5SMyoIAgrjnTxrmb7UiX6MW7ftm0/P/3p7YRC54D4\nmMpyx9emQht/c/Mm6uuvw+d7Kml8WptEzGJGjbGai4thxscHCkpwlWreEuMan+CJpFqXxcSY1Ked\ndj7io4LOkU+sZT7HFnKe5chSf25yx0fuEYz5HCMIglB8JGbUYpSrfl86xK/eOshalJfEeFfjepjF\nwqaytBm319d30NbWBcRb8oaG/Bw61Mvc3OWsxlbq7w4zt9t0ZXa6u/upqmoHwOVqBFJbeI3WUJer\nLi/rZKa1gOTyGaW1eMUsjAABAgWfI19XWD9+eunlMtl9lgp1uS3291S5/y4u9ecmd1K7+qZeC6kN\nutTI329rIetR+TjLPYDlivajDODYMb8lsk6+994PuHTpLyTD5AqikrKKWnmsmijKtL27uz/Jkpfr\nd0GpvzuM/f/0p7frAjoVHo+Xb33rI44d83PHHU8yPLwrpYXX6FZrZmUtFprI0rhz6CS3BaHa2cCf\ndD8J0QoqxfpM9dPPRjYSIJCXa22x0MQTKGGaruxKOvz4kyyC/qEhRoJBapxO+ru7S1KGJtfPttk4\nCyHxc1OIu3Rp6Ee53e4me8fRfI4RBEGwDmIZLRFWTKCxYcNlS2eYXEksVYyD1bOKGkk31lJbVIq1\nHmaWPO27wCyW1IzE745Crt3sWON4QqFzWX02tOuqr+9Im0G5GLGa+axFV7CDGwOwfvQS7x/bpW8v\n1uffi5eP+Mg04c9SYiaetDX+r69ey7bQXWniWmOxhSN8mGQRHAkGORoIcHB0FP+xY0Dxv6dy/btY\nastlqiRO5SN1BGPqtcgn6lEoBIlRtBayHpWPiNESUewEGsXAigJZKC2VtObpxmoUFc8/f6vlXODT\noSxybczPX2Z8fCCjKEr87ihEUJkdq/W/atUWILfPhpm4NW4DylLup8qpypQkXksxP//5urwW80GK\nmXjS1tg7OsqGY8fTCLdY6ZEaTgLxorbGqRylulpb2b11a0HjTEWufxdLbbksdeZgYSmRRE6CUKlI\nAqMVxM9//jJ2+48tl0RlJTI4OLgkT/OsmjjHjHRj1ZLoeDytLCzM6RlgzZLo5EM265HO5dNsn3Fb\nJBJmbCy/ZD6pkidlckEdGvJz6tR+5uamaG7eREvLzUxPn9bbAzl/Nl580ae7WWpzb7atEPK5N4yf\nneHhJ/R52br1h2ndipeCbOcnH5dU4xoHmxv4i69d4nc8XbpYje8zjJcBoIsg+/GzKy6xUTAUwn/s\nGLu3btVddJfqeyoVkoApRrnXwvr4WKpETrIW1kLWwzpIAiMhI253XVmsFkL5SJUAZqnJxjqUbqya\nRaWx8UZdiLrdTUtq7c21Nqlxm9NZm7enRHV1Gx5Pa9JxmSymweAIc3NTANTXX8f09Om49vl8Nsws\njdo2j6eVy5fHy2KxNl6LcV6Gh3eV/fOfrXU2H5dU4xp/uf4uHvTEW03j+6xDKz3ipSPJIuj1eNjb\n01OSWNF8EculkD2SyEkQKhURoysIeXJkHVbaWhQau6eJDbdbuWO63U08/PA7RRMZ2axHOlGRTqSp\nZD576OnZy/DwEzm7bI6O/pxQaJKxsQEGB7+T1XgS97vdDVy48D4Azc2b8hbxZm6WxgcFExPHC47P\nLPTeSDUv5crk2t3dz4XODfzwAQ/f8Dyask5pPi6pxmvd5nsmSbjF9/kUucYWrrTvKStT/LVYbm6t\nS1fBVu4LayHrUfmIm64gCCUnlatprpTK7TibrKu51iZNdB09ffolZmbOAQuActl0u70Zz7tnT7Nu\n/ero2Mn27Qeymgvj/kOHenVX0Y6OXqqr24qeubhYa1wIfvx8EvqQO4+d5H/d+iarPB36vmK7E2vn\ny8a11lintI8+0yy4rw09xq+Dr7LRuYnt3fuymr9MnwFxcxVS40PqkwqCUEzETVfIiNRisg4rbS2K\nldCrVG7Hx479KmOCpHTnNtuX6Do6MxNAE6I2m5M77ngyK4txa+ttgLJo+nx7shpP4v5EK+nJk3uL\nnmW5WGtcyL0xwggDnuP83z0B/tizK25fKZJ5Zetam43VcyZ4mqbAJIHRzEmuNDJ9BqxWZzQ/lpsF\nLz/UWhRzLsStNV+scV8IGrIelY/UGRUEoeSkqtNpFRwOFSfX2tqF3e6Jq4WYznqZbR1LTQhpRCLz\nDA/vykogbdu2L876lU/tTGP900OHegmHLwL5xd2mOr8V1jid6DOrAVvK8xnppz+jhbKSMl8vLVoW\nYFBizLrfI6Wn0LnwR/uoAX4I7ELqkwqCUG7ETVcQhGVFPmLN6O54+PCjce6mRhfXRPfOp59eE7V4\nKvfX7dtfSNn/4ODjnD37BrOzE3rfgGkW2FTZcc1cfbXxZHPdQ0N+Rkb+kcXFOWw2Jw899DYtLTdn\nnB8jpXB3LRbFdEvNxgW3mOerpMzXS8sO4CDKglf6eMBcMf+cGEVfP8Ubc6Fz4UNccwVBKBX5uumK\nZVQQhGWF5voK8PTTa2lruxWXq8G05Iq2bXj4Ca5eneDw4UeTyoGks1gtLIQM71I/dPN4vFRXrwLA\nZnPhctXq2zUxZxy3mUVWCdGA3meiVTPxeDORGAyOsLg4p0Ybmeedd76Xs5i0sgVPc0stBpoLLijB\nYdavFy9/PORlKNhbcPytFSzL1qQfJe6sacEz/5yUyppb6FyIa64gCNZDYkZXEOJXbx1kLUqH0SV2\ncXGGs2ePpyy5om0zxowmlgNJFwvpdFYD4HLVc+ed/z3tuILBEWZnJ4hEwpw5czQuLnBoyK9nu/V4\nWrh8eZxTp/bHjdMofG02Z1I24UuXTkbH0sAddzyZcW5aWjbnJSaLFRuaCqvcG9m64J4+/ZK+ToOD\njy/R6JYGa6yFl1yzAJvhx48PHzvYkTKjcT6Yf06KL/rUWhQ6F0uXcXY5Y437QtCQ9ah8xDIqCMKy\noru7n+ee28jsbACXq4Fw+FLKkivaNmPMaGI5EM06aUZ9/QauXh0nHJ7WRazxmOrqNkZHfx4VkjHP\nlcTyKsZ6kZEITEwc1/e1tnbhcFSjWV5drkYeeeQ96us74sYYDl8GIBy+pI/FbG6UYLLh8z2Vl5hc\nKRa8bOI8IdE6vhSRL0KuaBbLi1yMe18MzD8n2VgwS+XKmw5NzAqCIFgHiRkVBGFJySemM1e0+Ls7\n7niSl1++h5qatbqrLpC2DItxPMb4yOrqdr75zY/i9mvlTJzOOlat2sK2bfviYkw9nlZCoUnDyJxU\nVTWzdu29XL16Rp+D/ftv4sqVUVyuRpzOamZmArhc9bS3b+Xee59JKM0SK++SOEYgbiwSe1h6Xnll\nG2NjA7S0bObBB38hc25BjKV1mmjiFKcsUOrGh8RvCoKwnMg3ZlTEqCAIS8pSJ8DJ53yaYJ6a+iBO\nTNbVbaCubr0uIgGeffYGQqFzev9zc5cZHT0I2LHZHEQi4aT+PZ62uGPGx4eYnT0LqFhQzUqqjTdd\nDU9tX3PzJq5c+ZxQ6DygrLa1tetLIvqX4oFCpSCJh6zPDnZwkIM00cQ7vEMHHZkPKhmaRfQDYJKl\nS8xUDkusIAgrCakzKmRE/Oqtw0pei6VOgJPN+QYHBxka8uv1RaemPiQQOEooNIndHnPhralZq8cH\nPvvs9Rw+/CgtLbfE9V9d3RbtdTEqRNXXrMtVHx1PHbCovw4EjjM7GxO8drsrabxmcZpDQ35+9KMa\nRkd/js3mor6+k0hE9dvcvClurLnUEjXOw5Ejj5nWXM2mPmq+VNq9Uarat1bAumuRW73Nfvrpo49T\nnCqzEIVYcqNJYB3ZCtHC10I770FIUxNXyIx174uViaxH5SMxo4IgLCmlqPdoRCuBEgpdwOGopqVl\nEx0dvRljJI3ZaKur2wElCLdt269n1z18+FFAichQaJLR0YN0dPTS2dmHw1HNoUO9eiIihY3W1s1c\nuTLGjh2HePnlbkKhSebnLwN25ucvR18r3G4va9fey9jYAB6Pl9df/1Omp08zPX2S2toODh9+VLdE\nBoMjLCzMABCJLPD55y/rmXLr669jcvItQMWY3nHHk1lbM43zYLTg/vjHq1iz5m62bdtn6Yy6S4XV\nrMP+oSFGgkFqnE76u7vxejxlHU9pyS1bbTGzLBeOMblRqSyiZlZQyaQrCII1ETddQRBKzlL+cE+M\noYTs3HONrrANDV9kbOwwLS23xMVeai6ZodAUY2MDenxmbe1afvvbf4pzybXbXTQ338zk5Nv6GDQX\n3tbWLi5e/Jhw+KLe3mZzsn791xkfP6xvt9lcSW6+dXUd1NVdF+dCbLM5cDrrCIcv4nY34/VuZGrq\nN3o/Dkc1NpuL+flLgBLb69bdFyd03W4VU6vVWXU667Db3czNXUiay61bd5fsgYL2WflX50le6+7A\n5WlIWeeznFit3qrvxRc5GlClf/o6O9nb01PW8WSisO8Ea9ceTU+Q0peq8ZEcj7oU5xUEYSUjMaOC\nIFiWpfzhrolKDZerkUcf/TTux65mPV1YCNHaehvbtu0DVGIjh6OaTz/9mS7k6us3MD8/k9TWGCtq\nt3tYXIxlVa2pWcs11/Rw+vRLzM1N6clttHNs3bqbgYE+xsYGUG68yr3WaImMx9imRY8LBaiqWkV9\n/QbOnRs2HUsqkpMrxYRmqmtrbt7E1752JEk4FPNhg/Gz8lYn/H0P9NGX1rJVDitlujjecrDj1Vc5\nODpKV2srrz3wgOUto4V9J1hJWBUai5nr8dm0r2SxLghCpSIxo0JGxK/eOqy0tVhKt87u7n6qqlYB\nyu31kUfeSxIKweAIMzMB5uamGB8f4G/+pleP/ZuePq0LUbe7iZqatXFtBwcfx+Px0tbWpV+T5h6r\nrrWOvr4PmJ4+rScimp7+THfx1eILa2rWYrM50USm292kx58mfpd7PM2Aqg3a2ro5bt/cXFCvMarK\nwFQZ9pr/TVDt3HHbtHIzidf2rW+doKOjl46OnaZCVJtPYwypMe7UGGuaDR9+qFyPp1ob+cnWzHU+\nzc6/FJS63mqu9Hd309fZWVQhWsrvqcK+E4pTe7Q4FBqLmd3xsbXIpr3UEy0lK+3vt9WR9ah8RIwK\nglBylvKHu8fj5VvfOkFnZx+/93uf6PU4jRjrhjY3b+Kmm/5MF1BTUx8AYLe7sdmcTEwMxx179uwb\nhEJBqqvb8HjacLu9GL9K5+dnOXz4URwOlYjIZnMyN3ee0dGD0RqfilOn9hGJzGsj4uGH36G2di0e\nT2tUpKIf/+CDh+ns7OPBB39BT88+bLaYkFxcnCMUmsTprMFu92C3a8fa0WqTGrHbPWzbtp+6ug1x\n2+vrr9PXxrhe9fUdbN/+Atu3H0i5donCIhdxmChcb731/6Kzs4/vPPAeD3r6eI3XMrroliOG1WqJ\ni7weD3t7eixvEdWwmpjPn0JjMXM9XmvfCoxjnsTJSmJdEAQhPeKmKwjCiiMUCjI4+B0ggs+3B4/H\nG+c2WFu7jrq6Ds6ePW56fGdnH1evTujtY7GdNjQB6HY3Mz8/E7WaLgDQ0dFLdXVbXJIg7fhrrulh\nbu4SExPJ57TZnLjdXh566C3q6zuYnj7Nc8/doFtk3e4mHA43MzNnE8ZjTnwJmtTut9mSWN4kFxfW\nYrhwF1pexY+fEUaoocaS8alCIlYqU1Koy3Cux2vtxwHtu0LqlAqCUH4kZlQQBKEANAGlJSWy2YjG\ndCo0gacJLGOin5aWW5ie/oRItHoX6QAAIABJREFUZJGZmYBp/263l9/7vU84dKg3KcGSRlXVKmZn\nJ3A6G/RkQ0Zqa9fx+7//edx4bTYHVVVtzMycQxO92ljdbi+rV9/F6OghACKRMB5PK42NN+J0VuNw\nuLHbXbogLxa5iEMrxF768HE0mvAlU3yqYAV8aAl6/GxghPUr8EFCrnGhZgLeSqJeEIRKR2JGhYyI\nX711kLUojHxiElMdMzTk5/vf3xQVby3Mz19mfHwAp7M2zl02EglTW7uOpqYvc+hQL5FIGJvNzfz8\nZc6ePY7d7oqrFxqPg+ZmFQ9qdBFWxL637XYXHk8bbW2bcbma4lvZHMzPh9izp5lXXtnG1q0/xONp\nIxJZiArgBf1cmlV0zZq7CYeniUTC+vgbG29kYuI44+MDuFy1ad1v88XowppprRLdNctxb9REXR+z\niU9dSVjje8qspmjMtXWEtRzlKAc5iN+S9TNzq4maiuS1yDUu1CzWtFi1R4tzjZWCNe4LQUPWo/IR\nMSoIRaSQxC1C9mQTk2hci2efvYF//df/z/SYYHCECxfeY2xsALtdxXm2tnbh8+2JxoMqnM5aGhu/\nxMWLJwkEjkatpjGvj8uXPzPEgCayQCBwlH37bmJu7hLxDw5jfVy9epZQ6Bxnzhxlfv5KXA+RyAKh\n0Dnm5lRZmRde2GJIeKSw2VysWXNX3DUY4ykfeeRfcLsbotdTx+zsVNzn1I8fHz52sINgkX5UZlqr\nbGIvS31f9dNPH9nFpwpLjZlgigmxGtTn2boPEool+BLJNS7ULDbVbFsuwlJru5/SXKMgCCsBcdMV\nhCJitdqDy5VUrp3GEh+p4i/ByWOPndOtdqdO7dfLr9x33wu8/PI91NSsxeVq4I47/pIDB7awsBAG\nlNDUXGBbWjZz9eqZlG65xcBmcxOJzKXZ78Lj8TI7ew63u4mHH36Ht976cz777KBeIxWIc5kNhYJx\npVu0z+nQkJ+jwb1MOC/yo2540JPaXTWXGMtiuOHKfbWSSe+OGiSIHz+72W3RBwlWKbOSGJv6JeAM\nMAv8M3BztJ2P5BqlqTC2hfJfoyAI5SRfN11n5iaCIGRLObJ6rkS6u/tNYxKNiYGqqtpNj73mmnv0\nY4LBEb38Sl3deurrO6itXa/34XbX0db2u3ExnpoLrLKELlAcYnVEjaxb18Pk5K8NgteGzebUx6Bc\nhR16fdDh4Sf0Gqnj4wM8++z1LCzMMD8f4pNPXmD16q+yffsB2tq6dIGofU6DwRFWBy6yGvjfjjXx\nH3pSf35HGNFjLP3408ZYplqrXJD7aiXTT7oEP168Fo/x7QduBTzAo5QvNlOzpGqcArQkZ/cDY9HX\nuWT31dpuAq4DnkKEqCAIuSJuuisI8asvPdmWK5C1iJGPC2Yq106jaPnGN96kuloJUperDoDm5pvj\nrGpa+1OnGjh79g327GlmYuJNQFkdp6c/00u9JBIKnWdurnCX0erqNbrrbCJnzvwSr/dGamuvBRxA\nJClLbnPzTbjdXg4d6mVk5B/1GqlqjJNRd995IpF5AoGj/PSntzI5+TY2mwuXq1Zvq83FxdYm/s+t\n78RZmRLXKJcYy1xLoJjdG8unDEhlYY3vqUovU+IF1qMy3+bvxpp6LfKN1zR6rG0yvM4lFlVrewR4\nIYv22WLtGFRr3BeChqxH5SNiVBCKiNVqD1YCudSkzERifcxvfvMjOjv7eOSR39DZ2cfXvnY0bm1U\nrdBWIpEFZmcnmJubYnExBCir47lzw4RCk5Tyq3JxcT6lqJ2fvxSNH71KLEFRPE5nrT6HWqmXVDid\nDdTUrGV2doJIJMyZM0f1Odfm7k8eOMUqT3xt1sQ1WuoYS7mvhMqm0Fqk6TDGpG4ke/H21ei/XwGe\nMWzPRfyX6kFBqeJsBUGwIhIzKghCWSlVaQ9j/Gh3d79pv8ZYxHS4XF7C4fgfecpdVktYZKeqqoVQ\n6GLaGE9znMB8Qn/xVFW1MTt7Lmm7x9MSTap0Iiqa01Nd3c78/IxuPdXqiw4PP5F2rqxQfkUQKpdc\naonmWm5Fi0nVyLbmaKH1UTNRSNkYq8TZCoKQC1JnVBCEiiSXmpTZCEyNxKQ3brc36VhNZKWK2QR0\nd1bNeulw1LN27VYWFuYYHx9AfY1m/o5LJSijZ8HlaohzsTXicjXG7WtouJ6mpt9hZuacnqTJbncn\nWEad2Gy2JLfe2HW52bDhIa5ePcOFC+/rsbNmCYJyWSNBEArBR/YJhECJyo1AAGuJNx+5XYeRUgtl\nQRBKgdQZFTIifvXWodLXIt9SG2bH5eKCaXQX/elPb005hqEhPxcuvB/tv4UrV8Y5dWp/kjuwctNt\n48QJ8/PZbC4cjqo4N9qFhWnOnn2Ds2dfR4vjzIbUQhQgorsHK+wYv88TRer09KecO/cWwaCKZ21p\n2UxVVatx5Dz88Nt8+9sfU1u7jjVr7la9RkvXAEQic3zyyfMEAkd1IZoqQdBSuslW+r2RK1YuB7XS\n1sIamLv0pl4LL/ARsTjPJ8gt3jLX+Mxs22vXUQdMZdm3hrXjhOW+sBayHpWPZNMVBCFnjFlrjx3z\nZ11qw3jcs89eT1vb7RktnEaMCYquXPlc7+snP1mH3e7Ebnfx0ENvcfr0S7rAikQWOXs2VuLFKLim\np0/rJU4cjioWFkIkisv5+emkcWh9FwunsxabzcHCwmx0i7mVVmEnEglz9eqovmV6+nTcMddeez8f\nfPA3BIMjNDXdxNatP2R4eBeXLn3G5OSw3k6zmra0bKaubj0+356MLru5kos1uxjHVRr53kvCciV9\n9mBzjJlytXhLov3sNbw2c5tN1T4V2bbvB64HJoEBVEbh9SbnFwRhpSNuuoIg5Ey+MYTaccb4yI6O\nXrZvfyGr4zV3UYejmo8/foZYUh8Vdwlgt3twOKp0a2JV1SpmZydMBdfExJssLoZwuRq59tr7OXVq\nL0ZRt3r1V+OEbPFx4nbX5yVuHY46FhYu43TWMT9/Wd+urcmhQ726yKmr20Bd3Xqmpj4gFJrU57+5\neRP19dfh8z2lr2Gxa3rm299KqS0q8bj5UkhMYiWQ7/Wlirf0Ye42m2t8pta+DtgC7EtzjLGtk5h1\nNFe3XUEQKgFx0xUEYclIVWojk8uhdpzTWWfYmv3DKs1dVFkClRB1u71xpVEWF0M4HG5AWfy+8Y1f\nUVe3AYejhoWFOV5//U85eXJvNPusco0Nhy8yNjZAokVyauqjrMcGYLdX59Qe5rHb83NQWVwMUVXV\nTlvbbYBKRtTR0auvidGKXFOzlkDgKKHQJLW16/j2t38bzS58hO3bX4hbw3xreqZa+3z7Wym1RaVs\nTb4s94yr+V5fG6qm6cco0afFX74f3b8ZqEaJ02uBC0A7sB9zUZnoltsfPcdllMUz1dj8wCXAFW2r\nfSeYZRS2dikXQRBKi4jRFYT41VuHSl+LVDGEmcq0aMe1tXUBSkD5fHvi2mQTQ6cJFbe7iYcffpeH\nH/513P7FxQU6Onp58MFfUF/fQV3deiYmjjM6epDPPjuYFIMZCNxAa+smEpmbu5B+IhKIj/vMjlDo\nIjZbOkHqwOmsT9imXHVnZwNMTX1ER8fOJGFpFDmaWG9t7eKRR/6F+vqOlDGg+YqjVGufS39DQ36+\n//1NvPrqDrZu/eGKEGlWLltj7e+pUpZLyZVSiKn46zNfC7PzngZCwEViYnEEFbep7X8JJXRHgWFi\nyY92AI8Z+nwMZcHURPGtQC+xB4ja3H8JJWTbov0TPedxQEugtil6rJn1tbIeLFj7vlh5yHpUPhIz\nKghCXpjF9GVrzdq2bV/K7KyJMXRmWXC7u/uTjjdmnJ2bu4DD4dLdcaemVKIfj6c1qRan3e6hq+s/\n0939b3n66dXR/Q6qqlqZnT2b46zELKs2mwu3u4FQ6HzaI1KVgnE663C7G9i583V+9rM7mZ+fxums\nY3FxHrvdzfz8JQBmZydwONxJ86iJnKEhP+HwJaqr29m2bT8ejzdu7aqr25iePh03v/m4xKZa+1z6\nCwZHuHDhPUZH32N4eFfWx62U+FLBSD6xlaUi17jLbEi8vh8Af0G8267ZeWsMfWyOHv9o9H0dyhKq\n0YCyXtahYjsPoiyZmoBsQ4lagCZgreF8HmLW1ICh3V3A54ZxbAKuA54i9TpZ6cGCIAhLjcSMCoKQ\nM0NDfk6e3KuLv+rqdr75TeXSWmgJkMQYOmPsY6oSLQCvvLIt6mobq59pPFaJJacu4ozU1XWwsBBi\ndvY8kcg8VVWrCIevsrCQnLwoG6qrV+t9JZNdKZhrrtnGAw/8HIADB+7SS7h4PG160iUgY6yhMfbS\n42nD6fQQCl3S58HYn8fTGpdUKheRd+TIY3z22UFaWm5h27Z9ea1/vvGTKyW+VLAqS1EX00dyzKfZ\neYPA46jvmacM2/wo6+gAMYH418Auw3YjLag4/IvRPt4F/h3xNU2bgTuAX6CssRjaNpL9wwIp5SII\nywGJGRUEYckIBkfiXF1nZgIcO+bP2uVwaMjP00+vYc+eZl55ZVucO26iW2eixS2VO2hPzz46Onp1\nl1WPx8ulSycBcLka8Hpv0gWYy9UY5xp75coYMzOBaHbZCLOzZ7MSojabi5qaa5K2z8ycNRWidnsN\nbW2/m7FfiFkaAd3NVsXaKutrYoxopn6czjpCoXNcuTKqz4Pb7aWl5RbD/sm4ec3kdq25VD/zzLV8\n8skBQqFzjI8PmLbNhnxdhFdKfKlgVfqJlVYplZgysx6andcLHABeSNi2F5VsaAMwjhK2fxjtax8q\nbtRIkJi184rhfMbfmRdQ4rQm4bg7SV2excy1WGuba1kaQRCWAyJGVxDiV28dKn0ttB//mqDLVQQE\ngyPMzASYm5tibCxevHg8XtxuL4cO9ZrGDqZzB92+/QW2bz+gC5n6+g4AwuFLnD+v4krdbi+PPPIe\na9f6otdSx7/+q5kFUyP1Q75IJBwtB5Mdi4tXOXduOGm7zebm2mt30N6u6oEaY2mHhvzMzV3CZnMx\nP3+ZUOg8tbXrTJMPmaEJvFWrtkS3OPR9q1ffybZt++js7GP1arXfOK+ZRJ4mVuMFblPeglCt73dz\ntqpKEqDSUOnfU0uHmfAqbhzp4OB3UULSg3K7DaY4rxlaTOf1wCpggpg11E+sVqnxu27B8DqMsqQ2\nkezV0QXclrAtOf5eobkSa/Gh1xM/P5UROyr3hbWQ9ah8JGZUEMpIpca6aTGbd9zxJMPDu5LccjNd\nl9Hq19KyOUm8GONGX3hhC9/61kdxiXnMXIET4yBHR3/O7Oxk9Hyx8idudwNHjjzGxYsf4/G0oIUR\nuFyN2O1OkxjP+B9fDQ3XMzd3kdnZiailsvAwhPXr72f79gNxpWt+8pO10bqj8f23tnbR1PRlDh3q\n1ec3XW1QzVqt9T0zM8GZM0dpadnMvfc+k7TfOK+p5lpDW0ctXlcllHpnyT/H+ca5CkLpMMZzFqPG\nZl20j1xqiGoYYzqNHh/GzLo1QD0qhjQbGoB7gD3R9y3EYuaPAdtQMaY/R1lQI9H9RpGrxakmxrtK\n7KggrCQkZlQQyshyjXXLdF2hUJDBwccBW1yNSw0tdlDDrI9EwWuMD02Mq7TZPEQioai77sYk66QW\n8/r663/Kxx//OOP1qTqdEeJ/WKVrq1leHUnH2GxOVq26A5erQReWxnhcDZerkbVr78HneyopjvbM\nmWPMzAQA87qtxrnauvWHpg8Q8kETsKkeSgjCysUYz+lBZZaFwmpsJsaIPoESoe8Ty5Zr1n8bSvjV\nAG8Af476+dcA/BOxhEVaveZqYCbDWJqBk8SEbyPJQtZDLJY0FYnxrhI7KgiVSr4xo2IZFYQyslxj\n3TJd1/DwE4RCwTgLqZHu7n6ee24js7OBaCzjFKFQME7oJGbdNcZGJgu5GubmQoTDl7hw4b2k883N\nXYpaIjP9AFOYJyZK11YlLXK56giHL6MJUperAbvdzdmz6ofqs89+kbm5adMMuzabjTvv/Os4V2Wn\ns47Z2Snm52eNZ0w61jhXuWSpzYTRIrlcHqQIQoxMFsd0GLPhatlsC7X4JWbYNVpfU/XvB76Asoi+\nAdyMiikFWENMiIISogDmGb7juUDMxXeEZCF6M/AZqcVoS/Rf45xqbseCIKwkJGZ0BSF+9dZBW4vl\nGuuW6bqyqUeqXHNbmZ+/zNjYAM8+e31c7dFEwavcU9uYn78cV77FZnOhCbTm5k1R11ojdsbHO0yF\nqNvdTHEcSNT5w+GLOBxubDYXVVVtPPLI+0QiMUtpKHQ+ZamXubkgzz13I6FQMO5ax8cHdDHqdNZz\n553/PenYSnroId9T1kHWopAYRmM8Z74JjmJxp4ODL5McI2osn5KuhucwShR+L2GfmVB0k43Hh/pe\n3Af8PfGCWMMY7lBPTHx+BehAieDzqLjVjVRSwiK5L6yFrEflI2JUEMqIlQveF0Km68okjoaG/Ozd\nuzEuflPL9Do4+B0gWfB6PF7a2roAZXFU2IlEFpibUz90pqZ+Qyg0FXcup7OOK1fGTMZYx9ycFuuU\njDEbb/bYcThqiETCzM6e44UXfpfm5ptNWzY3b2Lt2p64bYuLIY4d8zM8/IRunW1u3kRrq8qIOz8/\nzfDwrqS+cnnooWXINQp/QViZFCuGMdtEQ4kYxfAPTPZrIvcI8dlzjWjX0IrKonstqhboDpT1MpFF\nk21mZAq/GiMmMKuBt6NjPYZKhmS0pAaALyJZdAVhZSIxo4IglBSzZEZmyXKMGGNOE+no2Mn27QdM\n9yXGMH722SHTuqKZsNs9rF69hTNnjGPIrj6ohtPZYHpum80VLSGjuPbaHbhctXz++SHCYdXe4ajB\n7a5nYSGEzeYgFFKi2OVq5JFH3uPIkcf0+eno6GVhIZRXfU4zlmscsyDkTrljGFPFiJ5EWRcbyOw+\nrF3DUVQWXSMuVKKhI2SOES2UVqAKNe63SO2+q8W8fgklUl3R9h0lHp8gCIUidUYFQbAkZi652VpO\nY++Va62x5IkZWr/19R309OzF4XBnNcZE193FxRCTk+8R/xWZvRD1er/C6tXJ9UQ9ntaka7Pb3fT0\n7NXrjzoctTgcbmZmzjI3FyQUOo/drq4jHL7I66//73H1U++886+prm7D42nD7Y6fT62e6z/8g4en\nnmpKqulqRiW59ApCacnXopkL6UrAJLr3apbSUVRCJM19OF0f2jWESSaMSn6UbQx8pp+MtSm2O1EJ\nlLRxpxKiXpTw9wO/RWUAngRuQKymgrB8ETG6ghC/euuw3NfC6OrpcLiA3MRNd3c/HR29XHvtDjo6\neunr+w2dnX187WtH0lr9El1MH3roLWpr1/Hww+9RV9dBfM42B2vW+Ojo6OXs2RuS+gqHg2TvshZP\nY+MXqalZm7R9bu5iXHKllpbNuN0NvPiiD5tNxaguLFzR3Yq1Ng5Hlf7+/Pl3dAtqOHyJ4eFdTE+f\nJhQ6x/h4fM1WrZ7r4uIc4XCQsbEBnn9+U1o33HLHMS/3e6OSkLVYCtLFpcbEsFoL7UFWY/RfzX04\nm9jWxFqgRPv7Jdk/aPOSPu/llRTbsxW7QZS77vvEx63OkVvcbnFrvCYi94W1kPWofAoRo0+iqiS/\nB/yU2LejIAgrnNOnX9KtoXa7O6W4SRWf6PF4qa5uIxy+wsJCCLe7MavYWqMV9umn23n++c00Nn6J\n99//b9H+jT+KFjh79nUWFkLccssuOjv7WLPmbgBcrvpoG0fSOWw2ta25+WaqqlaZjMLOmTNDTE2d\nSNpjdM+trm4nHL7Mxx8/QyBwlLGxAd0CqlFTs5YHH/wFbW23R8+5idradboYdbub2Lp1d0prplm2\n4rm5yxmTR7ndXg4d6i163KjEo+bPypm70goJ63Ey+m8D6mdVOjRL6XvEW0zTxbZq8wnxP9OqgK8D\nj6ESDGXDBbIXlvlyHng3+toFfDX6Ope43UISTwmCsNQUEjO6DTiMMh38ZXTbvzdpJzGjgrDC2LOn\nmbk5lSjIrO6lxtNPr0lZH9MYu6jVATUTs1o8anV1G6dPv8Tc3BQORy0LC7Gn9Ha7h8XF1PXubDYn\nbreXBx88zM9/3ktV1SomJ3+ti0e73c3i4hx2u4vm5pu5cmWM3t43qa/v4JVXtjE2NpBhRuwkWlk7\nOnYyPj6oW0ptNhff/vbHHDiwhZmZQFz8pzHG9vDhRxkdPYjb3cTDD79DfX1HUgyuNi8Ohwu73c3E\nxK8IhSZpbt5EVVUr4+MDaeNLSxU3KvGo+bNy5s5HLDtrITU5K4W7yK4GaboyM2axrVp7Yw1SF8o1\ndxMqTnQjKi4TVBbdefL1BikutwBnUZ+Fz1GC3QecIXOZHT+wH3XNm4FfpGkrCEIxKUed0dcMr4eB\nhwvoSxCEZURb222MjQ3Q0rIZn++plO0WFowCMf6hldGqNzMT4Ngxv/4DfGjIz+nTLzEzcw7Nncso\nOF2uGl2MNjdvYnr60zRi1E4kMk8oNMnPfvY/YbO5mJ7+xDCOOtrabsPtbmJ29pxeE/Sll+6hrm49\nDocLj2cVoVBichANG4k/8Lzer+B2NzI/HxPMzc03c+zYv6O3902Gh3fFJXcy1vPs7u5PSv5k3A/x\ndUU7O/v49rc/1o8B0iaPUtdcmrhRiUfNn5Uzd8XKYFspaJm/M12vsaaon3jR6o3+vxbl0upFubsO\nG9o4iMWNXodKhnTOsD9dbVEnqiTLu2naFBOtFvTzxMZ8mNh4E6/fyAgx8X0eVfImUcAWUj9WEIRi\nU6yY0f8FeLVIfQklQvzqrcNyX4uenn10dvbx4IO/iBM8ia6Gra0qjsmYmEhrE4mEdTfYxB/gWiyk\nJkTd7iaczmp9/8KCOra2dh0uVy02m/lXncfTisvVwIkTKoNtU9NNuqXS7fZis7mZn7/MmTNHCQSO\nEQye0MdTU7NWd69tb7+Tzs4+Vq/+alz/bneT7tYLYLdX4fG0UFvbzqVLJ/XyLG53E+fPv83o6EGG\nh3eldUk2S/6UOK+JwsV4TDblhEoVN5pNv8v93siXcsTylmct8q3JaQXycTHO5nr9DA6+HX29GXPR\nOoLKiLtAvKurlpxNi8HsQgngvWRXTxTgHmBNlm2LiSZEu1DWUoiVqNHmOHHOtYcZdajyMWbuuoW5\n8cp3lLWQ9ah8MllGXwPaTbb/R+Cl6Ov/hHqk1p+qk8cff5zrrrsOAK/Xy6ZNm/D5fEDsQyTv5f1K\neq9hlfGU4n1Pz96k/ceO/YoLF97jxhuVdc7t/lOmp8M89tgBPB6VpOP48V/R1qaejAeDX8Xh2Bi3\nH2JWohMnwOWq5T/9p3c4evQPOXJkAJvNzg03BAmHYWTESSQyyo03Atg5cUJZKNV7eP/9SVatugOH\n412am29iePgjwmG46aYm1q3bzsGDe/X2odB5TkTDQF2uj7HbnZw4AY2NN/DYY09x4MAWfvWrj/X2\nbreXVav+lnff/UtaW9+jqekmTpyIMDX1G268cYDq6nb9+NtuW8/Y2ACBwA1cd90foJHtfF+6pCyh\nJ07ARx/18sd/fIBjx/wsLv4Bb7zxbtHWr9D3b7zxLk7nd3UxZdb+3XfzG+9yf+/xeHE6v5v3eubz\n/t133y3T9e5d4vMV+r4fGEEJxsuo3X4GB79bpOsdAS6j3tbg85ndPzVof158vhrgDQYH/wSYxuf7\nNbCZwcEa4N/j8/0AuGhoT7S/VO9fA5w5tC/kvR2fzw7MMzi4AViLz/dydP9G4BI+3/Ho+14giM/3\nnuH9n+Hz/TMwaehfWZ3VfP0An+/9aPsvAn9gOP9gtL0v7XsN63z+VvZ7DauMZyW9f/fddwkG1YO3\nTz/9lHwptM7o48AfAd3AbIo2EjMqCBbFrAZoKft49dUdpvUwjX1EImHGxtLHNIZCQQYHHwds+HxP\nxcVVjo6+pmejtdmcuvVRuanFWwJaWjYTDk9z6dIn+r7a2nU88si/cOhQb8papxpaW4/Hy1NPeeMy\n5Wr1UI3xnPv23cTVq6O4XA18/evHeOed72XtOpuOVPMqCMJS4CPmQguxuqDFug8T642a9RsEfh9l\nEf03wCCqhMrNwKfAOmJ1SR+N9qeRWw3lpaMNZcXUrtdHbJ6bgFPErsU4N9p8bQbWA3tS9LETMK9Z\nLQhC7pSjzui/BXah7uZUQlQQBAtjVgO0lH2kcjU09uF01mZ0R/R4vGzffoDt219Iiptsa+sClNBs\nb9fcZu3EhKgNcODxtHHffS8kxJ26dHEZi1lNzqir9a+11Y7VcLubmZ2d5NVXd0Tfq+y08/OXAVWS\n5Z13vpeT62w6yl2ORRBWNtp3xWbUT6Jiuxj3k9mV1wu8AoyhEv0EULGTR1ElV4x1SfuJd3qLoLLr\n3oK1OIcSnWtQ38OaiHSgxPYTKFfcdlTSIm1utPn6BUpsGufMGJO8p2QjFwQhewoRo/8vyin/NeAd\n4G+LMiKhZCS6NAjlwyprUYykKLn0kUp0Gfvw+fbkLcyGhvyEwzNUVbVz330vcN99B/B42ohPIBQB\nFgiFzjE8vAu73aW737a1/a7eqrq6DY+nDbvdGM0Qe51YNuWaa3qw2VzRREphzp49zujoQZ599npO\nndpPIHBUt9hqc1Wsch2FilkrYZV7o9RUQqmWlbIWhZNO/BQDL8rlN9t+jd9Nm6L/QyxJkhdVma/d\nsP0MKgGSFQkQ/x2+APSgBOjx6P5dhv2x+qzJZCPs0yP3hbWQ9ah8ChGj1wMdqEeBm4HvFmVEgiAs\nGcWwqBWrj/r6DTgcHg4ffjTvH+fB4AgTE8eZnQ0wPLwLj8erW0rd7iba2+/W2zoctYRCUzz44GFd\ncJ49e1y37k5PnyYUOpeQhTdWY8/YFuDq1TNEImEWF0PMz09rZyEUmtTL3LS0bKajY6c+V0aL8N69\nGy0rSoTiUwyvBMEqpBM/S4Ef5X6qJfHpR2WR7QBqgQ+BloTxaYLUKMz6KazIwlJRg7Kaallzc8m8\nXO61EgQhkUJjRrNBYkbzISLlAAAWz0lEQVQFQciIsY6ix9NKW9vtcTGo2cSmarGTHk8rXu+NuFwN\nbN36Q71UCsDzz2/i8uVRNNfczs4+5uYuJ8Vcan2ZxZoC2GyOaNmX29m2bZ9e/9PYvqqqjdnZc4AD\nm83G6tVfZfv2A/rYY+dAH0ti/chixPUK1kPifIXi4cO8Nqtxu0YbSrz1Yy7ItgHGuslNxERfOalD\n5cpsAyZQmXbrUPGxz1AacelHSsAIQvaUI2ZUEAQhJ1K5Jg4N+blwQWU4VBbLySSLUTaWJM1K6/Xe\nqLvJGkuleDxe6uquw1gSZuvW3UnW3aEhP3Nzl7DZXJgLUReRyALh8EXGxwc4dsyv99Hefle0jZOF\nhTm9j0hknkDgaNzYu7v7qa5WrnKp3JzFgrY8kThfoXikqs2aGPdeh7IompU0+RJKbP3CsK0elRLE\nCtbSWZTwvI5YyZfLwG9QVuBcyulkS2ElYARByA4RoysI8au3Dit1LVIJq2BwRHdlXVxUxddbWjbH\nibNsYlO12EmXqyFlW60ft1uVbzl0qJe/+qvtcZlsNXffSET96Glu3qTXPHU663C7G/T+mps3xdXy\nrK/v0LP4hsMX9T6MbY3j/eY3P0orSooR11tJrJR7oxLifFfKWlQC6dciVRxkP8qSqD1QM2YWPwbc\nRUzEBYCLxMdmTqNql85TfuZRFtt/Tth+hZhgvJV4d+VCMRf5cl9YC1mPyscKj7sEQVghpBJW2nan\ns07POFtXtz7uh3p3d3/W5U/M2mrurg6Hi46Onfh8e/TyLefOwY9/vIo1a+5m27Z9+niamzdRX38d\nPt9TADz77PWEQpPMz1+mpmYtbW234/PtYXj4Cd2VNhy+ZCgnA83NN1Nbuw673YXPtydp7JooMWJ0\nzTW6GVtZuAiCUC60OEiz7V3ESp84gGGUOA1E/wdl9XOZHG9F5gB39F87SkCDuj4PMbdkP+Zzkgv9\n0X60pE+CIJQCiRkVBGHJMNbcNAorbfvs7BTj4+lrjObD0JCfkyf36nVAtdjMxJhNbd/WrbtNx5kq\nzs8Y71pd3c7MTACXq5H29q9y773PpL0Os5hQY39mcaRC7kjsrbAyCRITVDcBo9HtdSg3V60+50WU\npfSfgLtRFlInqoz8oRR9l6s+qQs1Ps3iW4XKBqzVHK0DtgD7iBeREgMqCKUk35hRsYwKglASzH78\na1ZAs309PXtTitV0fWZDMDiiC1EtThSUBXXv3o3MzCgLgZZhFzC1VobDl6iubmfbtv1x5zZafLdt\n25+TJVNzXQY4dsxPT8/eFeeauxSYzbMgLH+MVtMOYmL036Ay7WpWPy/weXRfKypJ0DzxMaSJlMvQ\nEDa8rkFlC9ayAV8PTKJcehOto1oMKCb7BEEoFxIzuoIQv3rrsBLWIl3iHeO+n/70Vj2pEZA2ji6x\nz2xrNRrjRB9++B29fy1m87PPVgNOFhauMDY2oI93aMjP00+vYc+eZk6e3MvZs8eZmVFlY7T9L77o\nY3ExTEdHLw888Br19R05xQKaCc9iJLephDqWZpTq3hCBnzsr4XuqUsh/LYxlX6qj27pQGWhTlTgJ\np3htJVqAdcDXgcdQ1wdwe/RfJ/BzVHZg7fsvVaKn3JD7wlrIelQ+IkYFQSgJxh//Dkd1nDAy7qup\nWZu1wEwUFNlmmq2ubouWi7kNt7sxbp/H441mtFVxnkbLaTA4wsxMgLm5Kd2y6vG0cvnyOK++uoML\nFz4kEDjK+PgADocrL+FoVmO1GMltJAtvPJK9VliZGDPC1mGe6CiR2wyvbwZ2Yr2fizcB9wPPE7u+\nFpQABfV9fhFlIf0iSqz+EHX9F1FZeduA00s5aEEQTJCYUUEQSoLR5VZLFATJMZlafU4tDjOxrdGd\nMhQK8vzzt1JTsxa3u4FIJMzYWOYY00wxmMaaomvW3MV99x1IqDUKTU030dDQyczMJBMTxwGoqmpn\ndjaQd4yr5nZ84cL7ejbhYsWISh1LQRCUCNMSGBlFqB/4CSoRkBd4G+XG60e5vX5MzILqRSUNspqV\nNNeYVa0Gq5dY4qN1xNyTBUEoBKkzKgiCpTBa9xItmsZ9iRardO6Uqk7oeiYmVA1Rp7M2K2tXJhdN\nFX/aCixw5kysFmh3dz91dR243S1UV6/G59ujl3Vpbe3iG994syBr2+nTLxEIHNWFaDFdSMUSKAjF\npTJd31OVfRlBlW1ZAM6jkhdp24+jYkZro8f4URZSq5FOiLoT3jcRc83VMgfXAL/M4jxGV+dKWXdB\nqBxEjK4gxK/eOqy0tUgnjBJdUjOJKKOw9Pn2ZOXOmqnPN954l7a22/V+NUGoxO91zM2dZ3xcxZIa\n+8o1PjSRhYWQ/rqmZm1RhWMl1LE0Y6XdG1ZG1iKecrq+578WWgKjxO+BmoTXd6EE1wfRbca4yhGU\n5VTjJpbGsS5fGoDNxAvSBZQoDwJvoSyiH6KswWYYBeiHxFyB/XJfWAxZj8pHxKggCEBpn/qnEkZm\n5xwefoKrVyf0+MlE8rH4ZSPMVFxpG253fJt0Vt1CaW1VsVnNzZvo6/sgZZ+VaZERhOXF8kqC1Y8S\nWmtRYusMSnBNooSa0ZKqCdfNqPjRIWCNoa96YFXph5ySRGF8CVVPVatJqm3TMux2oFxzUwlRiI+1\nPRndVljiI0EQzJGYUUEQgMxxlUt1znLV2Ex13kzlZgoh276l7qgglJ9SfheUn1SxpRCrVVqNSvhT\ng7IunlviMeZDEzAVfb0JOEJ29UWN87Ef2EWsDI4gCGZIzKggCAVRjqf+Zucsl/Uh1XlL6e6abd/L\nyyIjCJVJpbq+m+NHWTebUeVPtEyzmhA1uqmCcvU9TcxaWAoPjWx/kq6Kjqsq+r6e5BhRjSnAE22f\nrRCF+FjbDlKXwREEoVBEjK4gxK/eOlhxLcpR29LsnKVMvJNqfIODg2VL+JPNnKUa23J037XivbFS\nkbWwDmotCkmkk3jsCBBAibUBlOVvL/BEtN1+jHGSCmOdzjujrxtQNT3N+LLJtibgloRt9UA7sRqh\nmdiIyvKrlaCZJj6ZUaJhJgT8Oot+zQR48t8CuS+shaxH5SNiVBAEoDhP/XNN8GF2zlJaH9KNr1xW\nj2zmLNXYpJaoIKwkjHGMud7viccaExhtIj5Z0VFirq3GOEmjtfBA9PVplIBLZFX0WCPtwClUnCrE\nYlA/Q8WsNpv0Y+bxdxS4AThh2GYsOxNBZcx1GbYFUCI2nYgvZH4FQciXVI+zhGWIz+cr9xCEKMt1\nLfJ1J9XqbTqdNdEyK6URhKnGV871KMQFdzm67y7Xe6MSkbWwDmotjJbJXO93s2O/gxJue4hZALVk\nPS6gjnjLoJaZF5RYmwAeBf4B2IISfBoLwEuG967ouVaj3GurgFHgU5So3YcSuzcQH4tqlnOk1tBG\nqzV6c/T8E9FxX47u96Aso0T3+w3XkEim+fUDI/h8NSgBL267VkC+pyofSWAkCELRyDfBx1Il6AmF\ngjz//K3U1KzF7W4oqfDNZUz5JkVZ3glVBEGIR0sklE8inWyPvQtVZ9RIH8kCzoeyImr7d6NiUGdR\nTneLhrZOlGuusTxMIk5iFtYBYiLT2Fc7cAdwJdrGKDp3okS1n5jrcRfwReB5lOV0E0q0akmY+jFP\n1JRqjhKvWRLJCYIRSWAkZET86q3Dcl2LfF1dl8rCNzz8BKHQBSYmjse5tpZzPQpxD15eCVUUy/Xe\nqERkLayDWotUNUOzIdtjG6L/ugzvnzRpl2hF9BKL4dTEY23033lgLMN554kJzA3Rf419dQEfodyD\n96HE4BbDvj3ErlHb/xrK/Vdz4b2O+CRMia64meZIXfPg4A1IiRfrIN9TlY+IUUEQys5SJQ86ffol\nwuGLALjd3mXj2ioIglActLjQW6PvL6GSG6VqZywDczr6rx1oJRYz2gW8ibJeahlw64CWhD6dwF8D\n61FJiYj2vTPhPEbRuQEVc9qJygocJF5UGkXzUxTm6qxd839DXHQFoXiIm64gCCuGPXuamZtTiTmu\nvXYH99//SplHJAiCYEXS1R1NRaKLby/Kwmp0ezW26QVeR8V5auwE5qLnbgLeQZVWUfGaye61PmKu\nsxDvPusHPkTFwb4Z7acQV2dBENKRr5uuJDASBGHZkpgYqa3tNsbGBmhp2cy99z5T7uEJgrDiSCWq\nrEY/uYu2BsPrWlQ8576E47U2mqUSVOZdzZXWluLcWqZbovu80W1vGvr+CvHWzhFiwlcrXWNMwiQI\nghUQN90VhPjVWwdZi6UhsfRJT88+Ojv7ePDBX8S5A8t6WAdZC+sga1EK8isfsvRrkWt8qh/l0rsK\nZdHUkgwlXmOie+8TxGJLb0IJVLNzJ7rXvoSax5ChzRczHFMc5L6wFrIelY+IUUEQli2JiZGWY8If\nQRAqidIIpPKjWSEnUEIUzK8xUWiOEKv9+QVSi99EEXshYb+WWTeY5hhBEKyIxIwKglB2SlVnVEqf\nCIJgLZZrzKIWY6qxDvgXMl9jqtjUTO7MTcSEp4uYm6+UXBGEcpFvzKiIUUEQys5S1RkVBEEQciWb\nONcgsBEIkH3SI7MEQ9r2vcDF6PsNqAy7xvNvQ7kBb0Jl7tXqiooVVBDKhdQZFTIifvXWQdYinqWq\nM5qKpVyPoSE/L77o49VXdxAKBTMfsMKQe8M6yFpYh/KuRTZxrl5UHdBc3GI1194A8eVjRogJ0SZg\nrcn5tVqiR4ivK1osIepHZerdQbzrb7nXQkhE1qPyETEqCELZWao6o1YgMamSIAiCtck2zjXXpEep\n+tW2a6VdGkzaGc+V63mzIb9EU4Ig5I646QqCICwhr766g9HRg7S2dq0I8S0IQqVTqjjXVP0mbk93\n/lKVysmnzqogrGwkZlQQBKECkKRKgiAIuZBOcPqI1R8tZvKi5ZpoShBKh8SMChkRv3rrIGthLZZy\nPaS8THrk3rAOshbWYWWvRTqX2VKVyknt+ruy18J6yHpUPiJGBUEQBEEQBIuSTnBKLVFBqHTETVcQ\nBEEQBEGwKOIyKwiVgMSMCoIgCIIgCIIgCEuOxIwKGRG/eusga2EtZD2sg6yFdZC1sA6VvRapa3ZW\n4rkqey2WH7IelY+IUUEQBEEQBKFELGXNTqkPKgiVhrjpCoIgCIIgCCViKWt2Sn1QQSgXEjMqCIIg\nCIIgWIylTEBUzHOlq28qCEIiEjMqZET86q2DrIW1kPWwDrIW1kHWwjpU9lqkrtlp7XOZu/xW9los\nP2Q9Kh9nuQcgCIKw3Bga8hMMjuB01tDd3Y/HI0/UBUEQKot09U0FQSgW4qYrCIJQZF580UcgcBSA\nzs4+enr2lnlEgiBUBuIaah2kvqkg5EK+brpiGRUEQSgyTqd6ot7a2sXWrfJEXRCEbNFcQ0EJIXmQ\nVT40l19BEEqJxIyuIMSv3jrIWliLYq9Hd3c/nZ19PPDAa+KimyNyb1iH/7+9ewuR5CrjAP4XEwO6\nYgjqasyGxRsqglEhBi+wYCKJ4O0hj4II4oOgb0ZdwQcRZH0IiORRjEgU8RIUE3CVPIjiiphZL7gx\nBkeiJlHRlUgERdeHU2GGZrq7+lLVX9O/HwxT01Uzc5h/fdN9us45JYtNOHpoqCzqkEUt8th+OqMA\na3bFFVfmxhu/qiMKLOiuJLfGbUmAXWHOKAAAAEtzaxcAAAC2hs7oDjGuvg5Z1CKPOmRRhyzqkEUd\nsqhFHttPZxQAAIDRmTMKAADA0swZBQAAYGvojO4Q4+rrkEUt8qhDFnXIog5Z1CGLWuSx/XRGAQAA\nGJ05owAAACzNnFEAAAC2hs7oDjGuvg5Z1CKPOmRRhyzqkEUdsqhFHttPZxQAAIDRmTMKAADA0swZ\nBQAAYGvojO4Q4+rrkEUt8qhDFnXIog5Z1CGLWuSx/XRGAQAAGJ05owAAACzNnFEAAAC2hs7oDjGu\nvg5Z1CKPOmRRhyzqkEUdsqhFHttPZxQAAIDRmTMKAADA0swZBQAAYGvojO4Q4+rrkEUt8qhDFnXI\nog5Z1CGLWuSx/XRGAQAAGJ05owAAACzNnFEAAAC2hs7oDjGuvg5Z1CKPOmRRhyzqkEUdsqhFHttP\nZxQAAIDRmTMKAADA0swZBQAAYGus0hn9ZJLzSfaSfD/JibW0iMEYV1+HLGqRRx2yqEMWdciiDlnU\nIo/tt0pn9EySVyW5LsndST6xlhYxmL29vU03gY4sapFHHbKoQxZ1yKIOWdQij+23Smf08UPbx5L8\ndcW2MLCLFy9uugl0ZFGLPOqQRR2yqEMWdciiFnlsv8tW/P5PJXl3kieS3LB6cwAAANgF866Mnk3y\niyM+3tbtP53k2iRfSHL7ME1kXfb39zfdBDqyqEUedciiDlnUIYs6ZFGLPLbfum7tcm2Se5K88oh9\nv03yojX9HgAAAGp5KMmLF/2mVYbpviTJg932O5LcP+W4hRsFAAAA03wtbcjuXpKvJ3nuZpsDAAAA\nAAAAMKLPJPl1kvNJvpHkWVOO20/y87ThvT8ZpWW7p28WNye5kDbs+rZxmrZzbk3yqyT/TfKaGcft\nR10MrW8W6mIcV6UtlvebJN9NcuWU4/ajNobS51z/bLf/fJJXj9SuXTQvi1NJ/pFWB/cn+fhoLdst\nn0/yWNoIwGnUxHjm5XEq6mIsJ5Lcl/Y66pdJPjjluI3Wx005WKX3093HUX6X9iKE4fTJ4qlpi0yd\nTHJ52rDrl4/RuB3zsiQvTSvgWR0gdTG8Plmoi/GcSfLhbvu2eM4YW59z/a1pixQmyeuS/Hisxu2Y\nPlmcSvKtUVu1m96U9gJ6WudHTYxrXh6noi7G8rwk13Xbx5I8kBWfM+bd2mUZZ5P8r9s+l+SaGceu\nazVfjtYni+vTnvz2k/wnyVfSFqRivS6kXfnpQ10Mq08W6mI8b09yZ7d9Z5J3zjhWbaxfn3P9cEbn\n0q5eHx+pfbuk7/8ddTC8HyT5+4z9amJc8/JI1MVYHk17oyxJ/pk2AvPqiWMWqo8hOqOHvTcHPeNJ\nl5J8L8lPk7xv4HYwPYsXJHn40Nd/6B5jM9RFDepiPMfThl+l+zztCUttDKPPuX7UMbPeaGY5fbK4\nlOT1aUPf7knyinGaxgQ1UYu62IyTaVesz008vlB9LHtrl7Npl2knfSzJt7vt00n+neSuKT/jDUke\nSfKc7uddSHvng8WsmsWlgdq1i/pkMY+6WI9Vs1AX6zUtj9MTX1/K9L+92hhG33N98qqDGlm/Pn/T\nn6XN2XoiyS1J7k6bdsD41EQd6mJ8x9LurPKhtCukk3rXx7Kd0Zvm7H9P2njhN8845pHu81+SfDNt\neIoXFotbNYs/phXwk06kvYPB4uZl0Ye6WI9Vs1AX6zUrj8fSOqqPJnl+kj9POU5tDKPPuT55zDXd\nY6xXnyweP7R9b5I70uZS/23YpjFBTdSiLsZ1edptPb+U1vGftPH6uDlthaVnzzjm6Ume2W0/I8kP\nk7xl4Hbtoj5ZXJbkobRL7U+LhVqGdl+S107Zpy7GNSsLdTGeMzlYNfQjOXoBI7UxnD7n+uHFKG6I\nxVqG0ieL4zm44nB92vxShnEy/RYwUhPjOJnpeaiL8TwlyReT3D7jmI3Xx4NJfp+D5ZXv6B6/Osl3\nuu0Xpv2T3UtbFvijI7dxV/TJImlDGh5IWzhBFsN4V9r4+X+lXQG6t3tcXYyvTxaJuhjLVWlzQSdv\n7aI2xnPUuf7+7uNJn+v2n8/sFcFZzbwsPpBWA3tJfpT2Qo/1+3KSP6VNcXo4bd0NNbE58/JQF+N5\nY9riqHs56F/cEvUBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAi/g9WwoSRDa/NUgAA\nAABJRU5ErkJggg==\n", + "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", "text": [ - "" + "" ] } ], @@ -155,4 +151,4 @@ "metadata": {} } ] -} +} \ No newline at end of file diff --git a/examples/web_demo/app.py b/examples/web_demo/app.py index e456526fa55..c667ea94c11 100644 --- a/examples/web_demo/app.py +++ b/examples/web_demo/app.py @@ -10,12 +10,13 @@ import tornado.httpserver import numpy as np import pandas as pd -from PIL import Image as PILImage +from PIL import Image import cStringIO as StringIO import urllib -import caffe import exifutil +import caffe + REPO_DIRNAME = os.path.abspath(os.path.dirname(__file__) + '/../..') UPLOAD_FOLDER = '/tmp/caffe_demos_uploads' ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif']) @@ -80,7 +81,7 @@ def classify_upload(): def embed_image_html(image): """Creates an image embedded in HTML base64 format.""" - image_pil = PILImage.fromarray((255 * image).astype('uint8')) + image_pil = Image.fromarray((255 * image).astype('uint8')) image_pil = image_pil.resize((256, 256)) string_buf = StringIO.StringIO() image_pil.save(string_buf, format='png') @@ -114,15 +115,18 @@ class ImagenetClassifier(object): "File for {} is missing. Should be at: {}".format(key, val)) default_args['image_dim'] = 256 default_args['raw_scale'] = 255. - default_args['gpu_mode'] = False def __init__(self, model_def_file, pretrained_model_file, mean_file, raw_scale, class_labels_file, bet_file, image_dim, gpu_mode): logging.info('Loading net and associated files...') + if gpu_mode: + caffe.set_mode_gpu() + else: + caffe.set_mode_cpu() self.net = caffe.Classifier( model_def_file, pretrained_model_file, image_dims=(image_dim, image_dim), raw_scale=raw_scale, - mean=np.load(mean_file), channel_swap=(2, 1, 0), gpu=gpu_mode + mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0) ) with open(class_labels_file) as f: @@ -206,8 +210,9 @@ def start_from_terminal(app): opts, args = parser.parse_args() ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu}) - # Initialize classifier + # Initialize classifier + warm start by forward for allocation app.clf = ImagenetClassifier(**ImagenetClassifier.default_args) + app.clf.net.forward() if opts.debug: app.run(debug=True, host='0.0.0.0', port=opts.port) diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 739583f036e..f3e2c397528 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -30,9 +30,6 @@ namespace caffe { #ifdef USE_GREENTEA -/*template -cl_mem Subregion(cl_mem in, size_t off, size_t size);*/ - viennacl::ocl::handle WrapHandle(cl_mem in, viennacl::ocl::context &ctx); #endif diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 8e4b7b101fa..4e81763c076 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -8,15 +8,6 @@ #ifndef GREENTEA_MATH_FUNCTIONS_HPP_ #define GREENTEA_MATH_FUNCTIONS_HPP_ -// Define ViennaCL flags -#ifndef NDEBUG -#define NDEBUG -#endif - -#ifndef VIENNACL_WITH_OPENCL -#define VIENNACL_WITH_OPENCL -#endif - #include "caffe/greentea/greentea.hpp" #include "caffe/util/math_functions.hpp" #include "viennacl/ocl/context.hpp" @@ -36,17 +27,24 @@ template void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); template -void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, +void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const Dtype alpha, - const cl_mem A, int offA, const cl_mem B, int offB, const Dtype beta, - cl_mem C, int offC); + const cl_mem A, const int offA, const cl_mem B, const int offB, const Dtype beta, + cl_mem C, const int offC); -/*template - void greentea_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, - const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, - Dtype* y); +template +void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, const int M, const int N, + const Dtype alpha, const cl_mem A, const int offA, + const cl_mem x, const int offx, const Dtype beta, + cl_mem y, const int offy); +template +void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const cl_mem X, + const int offX, cl_mem Y, const int offY); + + +/* template void greentea_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, Dtype* Y); diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 3899e6a7d92..abaa6d9073c 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -218,6 +218,9 @@ class ConvolutionSKLayer : public Layer { Blob bias_multiplier_; bool bias_term_; int M_, K_, N_; + + // (FTschopp) Additional parameter for block splitting of the GEMM call + int blocks_; }; /** diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index a2f82089cac..df0401daa1c 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -4,7 +4,7 @@ if(NOT HAVE_PYTHON) endif() include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) -file(GLOB_RECURSE python_srcs ${CMAKE_SOURCE_DIR}/python/*.cpp) +file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) add_library(pycaffe SHARED ${python_srcs}) target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 03967a21029..dff7f627016 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -163,9 +164,10 @@ struct NdarrayCallPolicies : public bp::default_call_policies { // the shape information from the blob. void* data = PyArray_DATA(reinterpret_cast(result)); Py_DECREF(result); - npy_intp dims[] = {blob->num(), blob->channels(), - blob->height(), blob->width()}; - PyObject* arr_obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32, data); + const int num_axes = blob->num_axes(); + vector dims(blob->shape().begin(), blob->shape().end()); + PyObject *arr_obj = PyArray_SimpleNewFromData(num_axes, dims.data(), + NPY_FLOAT32, data); // SetBaseObject steals a ref, so we need to INCREF. Py_INCREF(pyblob.ptr()); PyArray_SetBaseObject(reinterpret_cast(arr_obj), @@ -174,6 +176,20 @@ struct NdarrayCallPolicies : public bp::default_call_policies { } }; +bp::object Blob_Reshape(bp::tuple args, bp::dict kwargs) { + if (bp::len(kwargs) > 0) { + throw std::runtime_error("Blob.reshape takes no kwargs"); + } + Blob* self = bp::extract*>(args[0]); + vector shape(bp::len(args) - 1); + for (int i = 1; i < bp::len(args); ++i) { + shape[i - 1] = bp::extract(args[i]); + } + self->Reshape(shape); + // We need to explicitly return None to use bp::raw_function. + return bp::object(); +} + BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(SolveOverloads, Solve, 0, 1); BOOST_PYTHON_MODULE(_caffe) { @@ -218,8 +234,9 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("channels", &Blob::channels) .add_property("height", &Blob::height) .add_property("width", &Blob::width) - .add_property("count", &Blob::count) - .def("reshape", &Blob::Reshape) + .add_property("count", static_cast::*)() const>( + &Blob::count)) + .def("reshape", bp::raw_function(&Blob_Reshape)) .add_property("data", bp::make_function(&Blob::mutable_cpu_data, NdarrayCallPolicies())) .add_property("diff", bp::make_function(&Blob::mutable_cpu_diff, @@ -244,7 +261,8 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("iter", &Solver::iter) .def("solve", static_cast::*)(const char*)>( &Solver::Solve), SolveOverloads()) - .def("step", &Solver::Step); + .def("step", &Solver::Step) + .def("restore", &Solver::Restore); bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py index 94dd063a2c7..49f8003ce9d 100644 --- a/python/caffe/classifier.py +++ b/python/caffe/classifier.py @@ -28,7 +28,7 @@ def __init__(self, model_file, pretrained_file, image_dims=None, # configure pre-processing in_ = self.inputs[0] self.transformer = caffe.io.Transformer( - {in_: self.blobs[in_].data.shape for in_ in self.inputs}) + {in_: self.blobs[in_].data.shape}) self.transformer.set_transpose(in_, (2,0,1)) if mean is not None: self.transformer.set_mean(in_, mean) @@ -83,7 +83,7 @@ def predict(self, inputs, oversample=True): for ix, in_ in enumerate(input_): caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_) out = self.forward_all(**{self.inputs[0]: caffe_in}) - predictions = out[self.outputs[0]].squeeze(axis=(2,3)) + predictions = out[self.outputs[0]] # For oversampling, average predictions across crops. if oversample: diff --git a/python/caffe/detector.py b/python/caffe/detector.py index 4ea07fb7b36..a67b818b93f 100644 --- a/python/caffe/detector.py +++ b/python/caffe/detector.py @@ -24,7 +24,7 @@ class Detector(caffe.Net): Detector extends Net for windowed detection by a list of crops or selective search proposals. """ - def __init__(self, model_file, pretrained_file, gpu=False, mean=None, + def __init__(self, model_file, pretrained_file, mean=None, input_scale=None, raw_scale=None, channel_swap=None, context_pad=None): """ @@ -40,7 +40,7 @@ def __init__(self, model_file, pretrained_file, gpu=False, mean=None, # configure pre-processing in_ = self.inputs[0] self.transformer = caffe.io.Transformer( - {in_: self.blobs[in_].data.shape for in_ in self.inputs}) + {in_: self.blobs[in_].data.shape}) self.transformer.set_transpose(in_, (2,0,1)) if mean is not None: self.transformer.set_mean(in_, mean) diff --git a/python/caffe/io.py b/python/caffe/io.py index f51e3a64d36..6ae2cf13cc0 100644 --- a/python/caffe/io.py +++ b/python/caffe/io.py @@ -7,6 +7,7 @@ # Python3 will most likely not be able to load protobuf from caffe.proto import caffe_pb2 except: + import sys if sys.version_info >= (3,0): print("Failed to include caffe_pb2, things might go wrong!") else: @@ -237,12 +238,20 @@ def set_mean(self, in_, mean): mean: mean ndarray (input dimensional or broadcastable) """ self.__check_input(in_) + ms = mean.shape if mean.ndim == 1: + # broadcast channels + if ms[0] != self.inputs[in_][1]: + raise ValueError('Mean channels incompatible with input.') mean = mean[:, np.newaxis, np.newaxis] - mk, mh, mw = mean.shape - in_k, in_h, in_w = self.inputs[in_][1:] - #if mk != in_k or (mh, mw) != (in_h, in_w) and (mh, mw) != (1, 1): - # raise Exception('Mean shape incompatible with input shape.') + else: + # elementwise mean + if len(ms) == 2: + ms = (1,) + ms + if len(ms) != 3: + raise ValueError('Mean shape invalid') + if ms != self.inputs[in_][1:]: + raise ValueError('Mean shape incompatible with input shape.') self.mean[in_] = mean diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index d662d6cc282..3c19261f690 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -41,12 +41,12 @@ def _Net_params(self): @property def _Net_inputs(self): - return [self.blobs.keys()[i] for i in self._inputs] + return [list(self.blobs.keys())[i] for i in self._inputs] @property def _Net_outputs(self): - return [self.blobs.keys()[i] for i in self._outputs] + return [list(self.blobs.keys())[i] for i in self._outputs] def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): @@ -85,8 +85,6 @@ def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): # Set input according to defined shapes and make arrays single and # C-contiguous as Caffe expects. for in_, blob in kwargs.iteritems(): - if blob.ndim != 4: - raise Exception('{} blob is not 4-d'.format(in_)) if blob.shape[0] != self.blobs[in_].num: raise Exception('Input is not batch sized') self.blobs[in_].data[...] = blob diff --git a/python/caffe/test/test_python_layer.py b/python/caffe/test/test_python_layer.py index 383c283959d..dd99f6f15b9 100644 --- a/python/caffe/test/test_python_layer.py +++ b/python/caffe/test/test_python_layer.py @@ -11,8 +11,7 @@ def setup(self, bottom, top): pass def reshape(self, bottom, top): - top[0].reshape(bottom[0].num, bottom[0].channels, bottom[0].height, - bottom[0].width) + top[0].reshape(*bottom[0].data.shape) def forward(self, bottom, top): top[0].data[...] = 10 * bottom[0].data @@ -21,17 +20,16 @@ def backward(self, top, propagate_down, bottom): bottom[0].diff[...] = 10 * top[0].diff def python_net_file(): - f = tempfile.NamedTemporaryFile(delete=False) - f.write("""name: 'pythonnet' force_backward: true - input: 'data' input_dim: 10 input_dim: 9 input_dim: 8 input_dim: 7 - layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } - layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } - layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") - f.close() - return f.name + with tempfile.NamedTemporaryFile(delete=False) as f: + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } + layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") + return f.name class TestPythonLayer(unittest.TestCase): def setUp(self): diff --git a/python/classify.py b/python/classify.py index 81d06369341..4544c51b4c2 100755 --- a/python/classify.py +++ b/python/classify.py @@ -96,23 +96,30 @@ def main(argv): if args.channel_swap: channel_swap = [int(s) for s in args.channel_swap.split(',')] + if args.gpu: + caffe.set_mode_gpu() + print("GPU mode") + else: + caffe.set_mode_cpu() + print("CPU mode") + # Make classifier. classifier = caffe.Classifier(args.model_def, args.pretrained_model, - image_dims=image_dims, gpu=args.gpu, mean=mean, + image_dims=image_dims, mean=mean, input_scale=args.input_scale, raw_scale=args.raw_scale, channel_swap=channel_swap) - if args.gpu: - print('GPU mode') - # Load numpy array (.npy), directory glob (*.jpg), or image file. args.input_file = os.path.expanduser(args.input_file) if args.input_file.endswith('npy'): + print("Loading file: %s" % args.input_file) inputs = np.load(args.input_file) elif os.path.isdir(args.input_file): + print("Loading folder: %s" % args.input_file) inputs =[caffe.io.load_image(im_f) for im_f in glob.glob(args.input_file + '/*.' + args.ext)] else: + print("Loading file: %s" % args.input_file) inputs = [caffe.io.load_image(args.input_file)] print("Classifying %d inputs." % len(inputs)) @@ -123,6 +130,7 @@ def main(argv): print("Done in %.2f s." % (time.time() - start)) # Save + print("Saving results into %s" % args.output_file) np.save(args.output_file, predictions) diff --git a/python/detect.py b/python/detect.py index d395bd97abf..691098f5c53 100755 --- a/python/detect.py +++ b/python/detect.py @@ -107,19 +107,22 @@ def main(argv): if args.channel_swap: channel_swap = [int(s) for s in args.channel_swap.split(',')] + if args.gpu: + caffe.set_mode_gpu() + print("GPU mode") + else: + caffe.set_mode_cpu() + print("CPU mode") + # Make detector. - detector = caffe.Detector(args.model_def, args.pretrained_model, - gpu=args.gpu, mean=mean, + detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean, input_scale=args.input_scale, raw_scale=args.raw_scale, channel_swap=channel_swap, context_pad=args.context_pad) - if args.gpu: - print('GPU mode') - # Load input. t = time.time() - print('Loading input...') + print("Loading input...") if args.input_file.lower().endswith('txt'): with open(args.input_file) as f: inputs = [_.strip() for _ in f.readlines()] diff --git a/python/requirements.txt b/python/requirements.txt index 908373bf452..7bc164a42b5 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -2,7 +2,6 @@ Cython>=0.19.2 numpy>=1.7.1 scipy>=0.13.2 scikit-image>=0.9.3 -scikit-learn>=0.14.1 matplotlib>=1.3.1 ipython>=1.1.0 h5py>=2.2.0 @@ -14,3 +13,4 @@ python-dateutil>=1.4,<2 protobuf>=2.5.0 python-gflags>=2.0 pyyaml>=3.10 +Pillow>=2.3.0 diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index 82f386cf029..0e8c37861b0 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -67,4 +67,3 @@ export PATH=/home/travis/miniconda/bin:$PATH conda update --yes conda conda install --yes numpy scipy matplotlib scikit-image pip pip install protobuf -rm /home/travis/miniconda/lib/libm.* diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 41f96f56f14..3f1a59e1106 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -26,7 +26,6 @@ #include "viennacl/ocl/platform.hpp" #include "viennacl/ocl/backend.hpp" - #ifdef USE_CLBLAS #include #endif @@ -79,12 +78,12 @@ template void greentea_copy(const int N, const cl_mem X, cl_mem Y, template void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); -template -void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, +template +void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const Dtype alpha, const cl_mem A, int offA, - const cl_mem B, int offB, const Dtype beta, cl_mem C, - int offC) { + const int K, const Dtype alpha, const cl_mem A, + const int offA, const cl_mem B, const int offB, + const Dtype beta, cl_mem C, const int offC) { int offArow = offA; int offAcol = 0; @@ -141,91 +140,113 @@ void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, (TransB == CblasNoTrans) ? clblasNoTrans : clblasTrans; viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK(clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, - offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); + GREENTEA_CL_BLAS_CHECK( + clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); } else { - GREENTEA_CL_BLAS_CHECK(clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, - offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); + GREENTEA_CL_BLAS_CHECK( + clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); } #endif } -template void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, +template void greentea_gpu_gemm(const int ctx_id, + const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const cl_mem A, - int offA, const cl_mem B, int offB, - const float beta, cl_mem C, int offC); -template void greentea_gpu_gemm(int ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, - const int M, const int N, const int K, - const double alpha, const cl_mem A, - int offA, const cl_mem B, int offB, - const double beta, cl_mem C, int offC); + const int offA, const cl_mem B, + const int offB, const float beta, + cl_mem C, const int offC); +template void greentea_gpu_gemm(const int ctx_id, + const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, + const int M, const int N, const int K, + const double alpha, const cl_mem A, + const int offA, const cl_mem B, + const int offB, const double beta, + cl_mem C, const int offC); -/* template<> - void greentea_gpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const double alpha, - const double* A, const double* B, - const double beta, double* C) { - // Note that cublas follows fortran order. - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; - cublasOperation_t cuTransA = - (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - cublasOperation_t cuTransB = - (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - CUBLAS_CHECK( - cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, - B, ldb, A, lda, &beta, C, N)); - } +template +void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, + const int M, const int N, const Dtype alpha, + const cl_mem A, const int offA, const cl_mem x, + const int offx, const Dtype beta, cl_mem y, + const int offy) { - template<> - void greentea_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const float alpha, const float* A, - const float* x, const float beta, float* y) { - cublasOperation_t cuTransA = - (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; - CUBLAS_CHECK( - cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, - &beta, y, 1)); - } + int lda = (TransA == CblasNoTrans) ? N : M; - template<> - void greentea_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const double alpha, const double* A, - const double* x, const double beta, double* y) { - cublasOperation_t cuTransA = - (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; - CUBLAS_CHECK( - cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, - &beta, y, 1)); - } +#ifdef USE_VIENNACLBLAS + // TODO +#endif - template<> - void greentea_gpu_axpy(const int N, const float alpha, const float* X, - float* Y) { - CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); - } +#ifdef USE_CLBLAS - template<> - void greentea_gpu_axpy(const int N, const double alpha, const double* X, - double* Y) { - CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); - } + clblasOrder clOrder = clblasRowMajor; + clblasTranspose clTransA = + (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; - void greentea_gpu_memcpy(const size_t N, const void* X, void* Y) { - if (X != Y) { - CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) - } - } + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); + + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + } +#endif +} + +template void greentea_gpu_gemv(const int ctx_id, + const CBLAS_TRANSPOSE TransA, + const int M, const int N, + const float alpha, const cl_mem A, + const int offA, const cl_mem x, + const int offx, const float beta, + cl_mem y, const int offy); +template void greentea_gpu_gemv(const int ctx_id, + const CBLAS_TRANSPOSE TransA, + const int M, const int N, + const double alpha, const cl_mem A, + const int offA, const cl_mem x, + const int offx, const double beta, + cl_mem y, const int offy); + +template +void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const cl_mem X, + const int offX, cl_mem Y, const int offY) { + +#ifdef USE_VIENNACLBLAS + // TODO +#endif + +#ifdef USE_CLBLAS + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + + } +#endif +} + +template void greentea_gpu_axpy(const int ctx_id, const int N, const float alpha, const cl_mem X, + const int offX, cl_mem Y, const int offY); +template void greentea_gpu_axpy(const int ctx_id, const int N, const double alpha, const cl_mem X, + const int offX, cl_mem Y, const int offY); + + +/* template<> void greentea_gpu_scal(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index 3f486ad656d..dc1050fe653 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -12,6 +12,9 @@ template void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { + // TODO: (FTschopp) Dynamically change this, or layer param + blocks_ = 8; + ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK( !conv_param.has_kernel_size() @@ -83,7 +86,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, - width_out, this->device_context_); + width_out / blocks_, this->device_context_); // Set the parameters CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; bias_term_ = this->layer_param_.convolution_param().bias_term(); @@ -132,7 +135,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, template void ConvolutionSKLayer::Reshape(const vector*>& bottom, const vector*>& top) { - LayerSetUp(bottom, top); + //LayerSetUp(bottom, top); } template diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 33e396beaf7..51655e508d4 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -53,7 +53,8 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, } else { // GreenTea backend code #ifdef USE_GREENTEA - std::cout << "CONV GREENTEA BEGIN" << std::endl; + std::cout << "CONV GREENTEA BEGIN: " << this->layer_param().name() + << std::endl; viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( @@ -70,39 +71,45 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, int col_offset = K_ * N_; int top_offset = M_ * N_; + std::cout << "M:" << M_ << std::endl; + std::cout << "N:" << N_ << std::endl; + std::cout << "K:" << K_ << std::endl; + for (int n = 0; n < num_; ++n) { - // First, im2col - greentea_im2col_sk_gpu(program, ctx, bottom_data, - bottom[i]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, - pad_w_, stride_h_, stride_w_, kstride_h_, - kstride_w_, col_data); - ctx.get_queue().finish(); + for (int k = 0; k < blocks_; ++k) { + // First, im2col + greentea_im2col_sk_gpu(program, ctx, bottom_data, + bottom[i]->offset(n), channels_, + height_, width_, kernel_h_, kernel_w_, + pad_h_, pad_w_, stride_h_, stride_w_, + kstride_h_, kstride_w_, col_data); + ctx.get_queue().finish(); - std::cout << "After im2col" << std::endl; + std::cout << "After im2col" << std::endl; - // Second, innerproduct with groups - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, - weight_offset * g, col_data, col_offset * g, - (Dtype) 0., top_data, - top[i]->offset(n) + top_offset * g); - } - ctx.get_queue().finish(); + // Second, innerproduct with groups + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, M_, N_, K_, (Dtype) 1., + weight, weight_offset * g, col_data, + col_offset * g, (Dtype) 0., top_data, + top[i]->offset(n) + top_offset * g); + ctx.get_queue().finish(); + } - std::cout << "After gpu gemm" << std::endl; + std::cout << "After gpu gemm" << std::endl; // Third, add bias - if (bias_term_) { - greentea_gpu_gemm( - this->device_context_.id(), CblasNoTrans, CblasNoTrans, - num_output_, N_, 1, (Dtype) 1., - (cl_mem) (this->blobs_[1]->gpu_data()), 0, - (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., - top_data, top[i]->offset(n)); - ctx.get_queue().finish(); + if (bias_term_) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num_output_, N_, 1, + (Dtype) 1., + (cl_mem) (this->blobs_[1]->gpu_data()), 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., top_data, top[i]->offset(n)); + ctx.get_queue().finish(); + } } } } From 578b6df1423155ecf733e893457b60940bdef918 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 20:39:12 +0200 Subject: [PATCH 009/254] More kernels added. --- CMakeLists.txt | 1 + cmake/Dependencies.cmake | 9 +- .../greentea/greentea_math_functions.hpp | 36 ++++---- include/caffe/vision_layers.hpp | 3 - src/caffe/greentea/cl_kernels.cpp | 24 +++--- .../{activation_kernels.cl => activation.cl} | 0 .../cl_kernels/{aux_kernels.cl => aux.cl} | 0 .../{channel_kernels.cl => channel.cl} | 0 .../greentea/cl_kernels/convolution_sk.cl | 26 ++++++ ...col_sk_gpu_kernels.cl => im2col_sk_gpu.cl} | 0 src/caffe/greentea/cl_kernels/math.cl | 25 ++++++ .../{pooling_sk_kernels.cl => pooling_sk.cl} | 0 .../greentea/greentea_math_functions.cpp | 72 +++++++++------- src/caffe/layers/conv_sk_layer.cpp | 18 ++-- src/caffe/layers/conv_sk_layer.cu | 56 +++++++++++-- src/caffe/layers/softmax_layer.cu | 84 ++++++++++++++----- 16 files changed, 261 insertions(+), 93 deletions(-) rename src/caffe/greentea/cl_kernels/{activation_kernels.cl => activation.cl} (100%) rename src/caffe/greentea/cl_kernels/{aux_kernels.cl => aux.cl} (100%) rename src/caffe/greentea/cl_kernels/{channel_kernels.cl => channel.cl} (100%) create mode 100644 src/caffe/greentea/cl_kernels/convolution_sk.cl rename src/caffe/greentea/cl_kernels/{im2col_sk_gpu_kernels.cl => im2col_sk_gpu.cl} (100%) create mode 100644 src/caffe/greentea/cl_kernels/math.cl rename src/caffe/greentea/cl_kernels/{pooling_sk_kernels.cl => pooling_sk.cl} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 54b044d347b..74fa70c9d20 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,6 +20,7 @@ caffe_option(BUILD_python "Build Python wrapper" ON) set(python_version "2" CACHE STRING "Specify which python version to use") caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE) caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE) +caffe_option(BUILD_python_layer "Build the caffe python layer" ON) # ---[ Dependencies include(cmake/Dependencies.cmake) diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index b1ac96c6777..f328e8246ab 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -25,7 +25,7 @@ include(cmake/ProtoBuf.cmake) # ---[ HDF5 find_package(HDF5 COMPONENTS HL REQUIRED) -include_directories(SYSTEM ${HDF5_INCLUDE_DIRS}) +include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR}) list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES}) # ---[ LMDB @@ -35,7 +35,7 @@ list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES}) # ---[ LevelDB find_package(LevelDB REQUIRED) -include_directories(SYSTEM ${LEVELDB_INCLUDE}) +include_directories(SYSTEM ${LevelDB_INCLUDE}) list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES}) # ---[ Snappy @@ -127,6 +127,11 @@ if(BUILD_python) endif() if(PYTHONLIBS_FOUND AND NUMPY_FOUND AND Boost_PYTHON_FOUND) set(HAVE_PYTHON TRUE) + if(BUILD_python_layer) + add_definitions(-DWITH_PYTHON_LAYER) + include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) + list(APPEND Caffe_LINKER_LIBS ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) + endif() endif() endif() diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 4e81763c076..757ca63c09f 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -18,31 +18,39 @@ namespace caffe { -void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, viennacl::ocl::context &ctx); - -void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, viennacl::ocl::context &ctx); +void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, + viennacl::ocl::context &ctx); +void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, + viennacl::ocl::context &ctx); template -void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); +void greentea_copy(const int N, const cl_mem X, cl_mem Y, + viennacl::ocl::context &ctx); template void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const Dtype alpha, - const cl_mem A, const int offA, const cl_mem B, const int offB, const Dtype beta, - cl_mem C, const int offC); + const CBLAS_TRANSPOSE TransB, const int M, const int N, + const int K, const Dtype alpha, const cl_mem A, + const int offA, const cl_mem B, const int offB, + const Dtype beta, cl_mem C, const int offC); template -void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, const int M, const int N, - const Dtype alpha, const cl_mem A, const int offA, - const cl_mem x, const int offx, const Dtype beta, - cl_mem y, const int offy); +void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, + const int M, const int N, const Dtype alpha, + const cl_mem A, const int offA, const cl_mem x, + const int offx, const Dtype beta, cl_mem y, + const int offy); template -void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const cl_mem X, - const int offX, cl_mem Y, const int offY); +void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, + const cl_mem X, const int offX, cl_mem Y, + const int offY); +template +void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy); /* template diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index abaa6d9073c..3899e6a7d92 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -218,9 +218,6 @@ class ConvolutionSKLayer : public Layer { Blob bias_multiplier_; bool bias_term_; int M_, K_, N_; - - // (FTschopp) Additional parameter for block splitting of the GEMM call - int blocks_; }; /** diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 5389cb1e0d3..1447fbf8700 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -3,19 +3,23 @@ #include #include namespace caffe { -std::string activation_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void relu_forward_s(const int n, __global const float* in,\n __global float* out, float negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void relu_forward_d(const int n, __global const double* in,\n __global double* out, double negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; -std::string aux_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void gpu_set_s(const int n, const float alpha, __global float* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}\n\n__kernel void gpu_set_d(const int n, const double alpha, __global double* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; -std::string channel_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_channel_max_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_max_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double maxval = (double) -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_subtract_s(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const float* channel_max,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_subtract_d(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const double* channel_max,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_exp_s(const int count, __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_exp_d(const int count, __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_channel_sum_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_sum_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_div_s(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const float* channel_sum,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_div_d(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const double* channel_sum,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_dot_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data_1,\n __global const float* data_2,\n __global float* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}\n\n__kernel void kernel_channel_dot_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data_1,\n __global const double* data_2,\n __global double* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string im2col_sk_gpu_kernel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string pooling_sk_kernels = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n const int use_mask,\n __global int* mask,\n __global float* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string activation = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void relu_forward_s(const int n, __global const float* in,\n __global float* out, float negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void relu_forward_d(const int n, __global const double* in,\n __global double* out, double negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; +std::string aux = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void gpu_set_s(const int n, const float alpha, __global float* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}\n\n__kernel void gpu_set_d(const int n, const double alpha, __global double* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string channel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_channel_max_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_max_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double maxval = (double) -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_subtract_s(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const float* channel_max,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_subtract_d(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const double* channel_max,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_exp_s(const int count, __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_exp_d(const int count, __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_channel_sum_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_sum_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_div_s(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const float* channel_sum,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_div_d(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const double* channel_sum,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_dot_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data_1,\n __global const float* data_2,\n __global float* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}\n\n__kernel void kernel_channel_dot_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data_1,\n __global const double* data_2,\n __global double* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; +std::string convolution_sk = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void convolution_sk_s(__global const float *w, __global const float *in,\n const int in_off, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, __global float *out, const int out_off) {\n\n\n\n for (int index = get_global_id(0); index < 0; index += get_global_size(0)) {\n\n//(*(out+))\n\n }\n}"; +std::string im2col_sk_gpu = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string math = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_mul_s(const int n, __global const float* a, const int offa,\n __global float* b, const int offb, __global float* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}\n\n__kernel void kernel_mul_d(const int n, __global const double* a, const int offa,\n __global double* b, const int offb, __global double* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; +std::string pooling_sk = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n const int use_mask,\n __global int* mask,\n __global float* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_gpu = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MIN 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data,\n __global const float* label,\n __global float* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global float* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((float) (prob_data[n * dim + label_value * spatial_dim + s]),\n (float) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}\n\n__kernel void softmax_loss_forward_gpu_d(int n,\n __global const double* prob_data,\n __global const double* label,\n __global double* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global double* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((double) (prob_data[n * dim + label_value * spatial_dim + s]),\n (double) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; - ss << activation_kernels << "\n\n"; - ss << aux_kernels << "\n\n"; - ss << channel_kernels << "\n\n"; - ss << im2col_sk_gpu_kernel << "\n\n"; - ss << pooling_sk_kernels << "\n\n"; + ss << activation << "\n\n"; + ss << aux << "\n\n"; + ss << channel << "\n\n"; + ss << convolution_sk << "\n\n"; + ss << im2col_sk_gpu << "\n\n"; + ss << math << "\n\n"; + ss << pooling_sk << "\n\n"; ss << softmax_loss_gpu << "\n\n"; std::string kernel_string = ss.str(); const char* kernel_program = kernel_string.c_str(); diff --git a/src/caffe/greentea/cl_kernels/activation_kernels.cl b/src/caffe/greentea/cl_kernels/activation.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/activation_kernels.cl rename to src/caffe/greentea/cl_kernels/activation.cl diff --git a/src/caffe/greentea/cl_kernels/aux_kernels.cl b/src/caffe/greentea/cl_kernels/aux.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/aux_kernels.cl rename to src/caffe/greentea/cl_kernels/aux.cl diff --git a/src/caffe/greentea/cl_kernels/channel_kernels.cl b/src/caffe/greentea/cl_kernels/channel.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/channel_kernels.cl rename to src/caffe/greentea/cl_kernels/channel.cl diff --git a/src/caffe/greentea/cl_kernels/convolution_sk.cl b/src/caffe/greentea/cl_kernels/convolution_sk.cl new file mode 100644 index 00000000000..0f67dbe046c --- /dev/null +++ b/src/caffe/greentea/cl_kernels/convolution_sk.cl @@ -0,0 +1,26 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void convolution_sk_s(__global const float *w, __global const float *in, + const int in_off, const int height, + const int width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, + const int ext_kernel_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, __global float *out, const int out_off) { + + + + for (int index = get_global_id(0); index < 0; index += get_global_size(0)) { + +//(*(out+)) + + } +} diff --git a/src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernels.cl b/src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/im2col_sk_gpu_kernels.cl rename to src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl new file mode 100644 index 00000000000..64374a5e79c --- /dev/null +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -0,0 +1,25 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#pragma OPENCL EXTENSION cl_amd_fp64 : enable + +__kernel void kernel_mul_s(const int n, __global const float* a, const int offa, + __global float* b, const int offb, __global float* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] + b[index + offb]; + } +} + +__kernel void kernel_mul_d(const int n, __global const double* a, const int offa, + __global double* b, const int offb, __global double* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] + b[index + offb]; + } +} diff --git a/src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/pooling_sk_kernels.cl rename to src/caffe/greentea/cl_kernels/pooling_sk.cl diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 3f1a59e1106..cef3536db61 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -86,23 +86,25 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const Dtype beta, cl_mem C, const int offC) { int offArow = offA; + int offBrow = offB; + int offCrow = offC; + + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + int ldc = N; + +#ifdef USE_VIENNACLBLAS + int offAcol = 0; int incArow = 1; int incAcol = 1; - int offBrow = offB; int offBcol = 0; int incBrow = 1; int incBcol = 1; - int offCrow = offC; int offCcol = 0; int incCrow = 1; int incCcol = 1; - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; - int ldc = N; - -#ifdef USE_VIENNACLBLAS ViennaCLBackend backend; ViennaCLBackendCreate(&backend); ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); @@ -218,8 +220,9 @@ template void greentea_gpu_gemv(const int ctx_id, cl_mem y, const int offy); template -void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const cl_mem X, - const int offX, cl_mem Y, const int offY) { +void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, + const cl_mem X, const int offX, cl_mem Y, + const int offY) { #ifdef USE_VIENNACLBLAS // TODO @@ -240,11 +243,38 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const c #endif } -template void greentea_gpu_axpy(const int ctx_id, const int N, const float alpha, const cl_mem X, - const int offX, cl_mem Y, const int offY); -template void greentea_gpu_axpy(const int ctx_id, const int N, const double alpha, const cl_mem X, - const int offX, cl_mem Y, const int offY); +template void greentea_gpu_axpy(const int ctx_id, const int N, + const float alpha, const cl_mem X, + const int offX, cl_mem Y, + const int offY); +template void greentea_gpu_axpy(const int ctx_id, const int N, + const double alpha, const cl_mem X, + const int offX, cl_mem Y, + const int offY); +template +void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_mul = program.get_kernel( + CL_KERNEL_SELECT("kernel_mul")); + viennacl::ocl::enqueue( + oclk_mul(N, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, + WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_mul(const int ctx_id, const int N, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); +template void greentea_gpu_mul(const int ctx_id, const int N, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); /* template<> @@ -408,22 +438,6 @@ template void greentea_gpu_axpy(const int ctx_id, const int N, const dou } } - template<> - void greentea_gpu_mul(const int N, const float* a, const float* b, - float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - mul_kernel<<>>( - N, a, b, y); - } - - template<> - void greentea_gpu_mul(const int N, const double* a, const double* b, - double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - mul_kernel<<>>( - N, a, b, y); - } - template __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index dc1050fe653..49861440e59 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -11,10 +11,6 @@ namespace caffe { template void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - - // TODO: (FTschopp) Dynamically change this, or layer param - blocks_ = 8; - ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK( !conv_param.has_kernel_size() @@ -85,8 +81,9 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; + col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, - width_out / blocks_, this->device_context_); + width_out, this->device_context_); // Set the parameters CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; bias_term_ = this->layer_param_.convolution_param().bias_term(); @@ -95,7 +92,8 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, K_ = channels_ * kernel_h_ * kernel_w_ / group_; N_ = height_out * width_out; for (int top_id = 0; top_id < top.size(); ++top_id) { - top[top_id]->Reshape(num_, num_output_, height_out, width_out, this->device_context_); + top[top_id]->Reshape(num_, num_output_, height_out, width_out, + this->device_context_); } // Check if we need to set up the weights if (this->blobs_.size() > 0) { @@ -109,7 +107,8 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, } // Intialize the weight this->blobs_[0].reset( - new Blob(num_output_, channels_ / group_, kernel_h_, kernel_w_, this->device_context_)); + new Blob(num_output_, channels_ / group_, kernel_h_, kernel_w_, + this->device_context_)); // fill the weights shared_ptr > weight_filler( GetFiller( @@ -117,7 +116,8 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, weight_filler->Fill(this->blobs_[0].get(), this->device_context_); // If necessary, initialize and fill the bias term if (bias_term_) { - this->blobs_[1].reset(new Blob(1, 1, 1, num_output_, this->device_context_)); + this->blobs_[1].reset( + new Blob(1, 1, 1, num_output_, this->device_context_)); shared_ptr > bias_filler( GetFiller( this->layer_param_.convolution_param().bias_filler())); @@ -135,7 +135,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, template void ConvolutionSKLayer::Reshape(const vector*>& bottom, const vector*>& top) { - //LayerSetUp(bottom, top); + LayerSetUp(bottom, top); } template diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 51655e508d4..e6a3cfed89b 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -77,30 +77,77 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int n = 0; n < num_; ++n) { - for (int k = 0; k < blocks_; ++k) { + if (1 == 1) { + // First, im2col greentea_im2col_sk_gpu(program, ctx, bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); + + // Second, innerproduct with groups + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, M_, N_, K_, (Dtype) 1., + weight, weight_offset * g, col_data, + col_offset * g, (Dtype) 0., top_data, + top[i]->offset(n) + top_offset * g); + ctx.get_queue().finish(); + } + + } + else + { + + /*viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( + CL_KERNEL_SELECT("relu_forward")); + viennacl::ocl::enqueue( + oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) top_data, ctx), negative_slope), + ctx.get_queue()); + ctx.get_queue().finish();*/ + + /*for (int k = 0; k < blocks_; ++k) { + + + int blocked_width = (k == blocks_-1)?(width_-k*(width_/blocks_)):width_/blocks_; + + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; + int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; + + int blocked_width_out = (k == blocks_-1)?(width_out - (blocks_ - 1) * (width_out / blocks_)):width_out/blocks_; + + greentea_im2col_sk_gpu(program, ctx, bottom_data, + bottom[i]->offset(n) + (channels_ * height_ * (width_/blocks_)*k), channels_, + height_, blocked_width, kernel_h_, kernel_w_, + pad_h_, pad_w_, stride_h_, stride_w_, + kstride_h_, kstride_w_, col_data); ctx.get_queue().finish(); std::cout << "After im2col" << std::endl; + std::cout << "Num output: " << num_output_ << std::endl; + std::cout << "Height: " << height_ << std::endl; + std::cout << "Width: " << width_ << std::endl; + // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, M_, N_, K_, (Dtype) 1., + CblasNoTrans, M_, height_out*blocked_width_out, K_, (Dtype) 1., weight, weight_offset * g, col_data, col_offset * g, (Dtype) 0., top_data, - top[i]->offset(n) + top_offset * g); + top[i]->offset(n) + top_offset * g + num_output_*height_out*(width_out/blocks_)*k); ctx.get_queue().finish(); } std::cout << "After gpu gemm" << std::endl; -// Third, add bias + }*/ + } + // Third, add bias if (bias_term_) { greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasNoTrans, num_output_, N_, 1, @@ -109,7 +156,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., top_data, top[i]->offset(n)); ctx.get_queue().finish(); - } } } } diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index a7c61e530ea..6dc734f6a01 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -2,7 +2,9 @@ #include #include +#ifdef USE_CUDA #include "thrust/device_vector.h" +#endif #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" @@ -15,6 +17,7 @@ namespace caffe { +#ifdef USE_CUDA template __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, @@ -96,12 +99,14 @@ __global__ void kernel_channel_dot(const int num, const int channels, channel_dot[index] = dot; } } +#endif template void SoftmaxLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA // CUDA backend code const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); @@ -138,7 +143,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, kernel_channel_div<<>>(count, num, channels, spatial_dim, scale_data, top_data); - +#endif } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -205,26 +210,63 @@ template void SoftmaxLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { -const Dtype* top_diff = top[0]->gpu_diff(); -const Dtype* top_data = top[0]->gpu_data(); -Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); -Dtype* scale_data = scale_.mutable_gpu_data(); -int count = top[0]->count(); -int num = top[0]->num(); -int channels = top[0]->channels(); -int spatial_dim = top[0]->height() * top[0]->width(); -caffe_copy(top[0]->count(), top_diff, bottom_diff); -// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. -// NOLINT_NEXT_LINE(whitespace/operators) -kernel_channel_dot<<>>(num, channels, spatial_dim, top_diff, top_data, - scale_data); -// NOLINT_NEXT_LINE(whitespace/operators) -kernel_channel_subtract<<>>(count, num, channels, spatial_dim, - scale_data, bottom_diff); -// elementwise multiplication -caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); + int count = top[0]->count(); + int num = top[0]->num(); + int channels = top[0]->channels(); + int spatial_dim = top[0]->height() * top[0]->width(); + + if(this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + Dtype* scale_data = scale_.mutable_gpu_data(); + caffe_copy(top[0]->count(), top_diff, bottom_diff); + // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_dot<<>>(num, channels, spatial_dim, top_diff, top_data, + scale_data); + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_subtract<<>>(count, num, channels, spatial_dim, + scale_data, bottom_diff); + // elementwise multiplication + caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); +#endif + } else { +#ifdef USE_GREENTEA + const cl_mem top_diff = (cl_mem)(top[0]->gpu_diff()); + const cl_mem top_data = (cl_mem)(top[0]->gpu_data()); + cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); + cl_mem scale_data = (cl_mem)(scale_.mutable_gpu_data()); + + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + greentea_copy(top[0]->count(), top_diff, bottom_diff, ctx); + ctx.get_queue().finish(); + + viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel(CL_KERNEL_SELECT("kernel_channel_dot")); + viennacl::ocl::enqueue( + oclk_channel_dot(count, num, channels, spatial_dim, + WrapHandle(top_diff, ctx), WrapHandle(top_data, ctx), WrapHandle(scale_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + + viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel(CL_KERNEL_SELECT("kernel_channel_subtract")); + viennacl::ocl::enqueue( + oclk_channel_subtract(count, num, channels, spatial_dim, + WrapHandle(scale_data, ctx), WrapHandle(bottom_diff, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + + greentea_gpu_mul(this->device_context_.id(), top[0]->count(),bottom_diff, 0, top_data, 0, bottom_diff, 0); + ctx.get_queue().finish(); +#endif + } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); From 5a0612e8d5efaea5e3a6bbf8748f880c7b724479 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 20:58:19 +0200 Subject: [PATCH 010/254] fixed accidental change mismatch. --- .../imagenet/bvlc_caffenet_full_conv.prototxt | 216 ------------------ 1 file changed, 216 deletions(-) delete mode 100644 examples/imagenet/bvlc_caffenet_full_conv.prototxt diff --git a/examples/imagenet/bvlc_caffenet_full_conv.prototxt b/examples/imagenet/bvlc_caffenet_full_conv.prototxt deleted file mode 100644 index 7b22bfa1404..00000000000 --- a/examples/imagenet/bvlc_caffenet_full_conv.prototxt +++ /dev/null @@ -1,216 +0,0 @@ -# This file is for the net_surgery.ipynb example notebook. -name: "CaffeNetConv" -input: "data" -input_dim: 1 -input_dim: 3 -input_dim: 451 -input_dim: 451 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - convolution_param { - num_output: 96 - kernel_size: 11 - stride: 4 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "conv1" - top: "conv1" -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 3 - stride: 2 - } -} -layer { - name: "norm1" - type: "LRN" - bottom: "pool1" - top: "norm1" - lrn_param { - local_size: 5 - alpha: 0.0001 - beta: 0.75 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "norm1" - top: "conv2" - convolution_param { - num_output: 256 - pad: 2 - kernel_size: 5 - group: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "conv2" - top: "conv2" -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 3 - stride: 2 - } -} -layer { - name: "norm2" - type: "LRN" - bottom: "pool2" - top: "norm2" - lrn_param { - local_size: 5 - alpha: 0.0001 - beta: 0.75 - } -} -layer { - name: "conv3" - type: "Convolution" - bottom: "norm2" - top: "conv3" - convolution_param { - num_output: 384 - pad: 1 - kernel_size: 3 - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "conv3" - top: "conv3" -} -layer { - name: "conv4" - type: "Convolution" - bottom: "conv3" - top: "conv4" - convolution_param { - num_output: 384 - pad: 1 - kernel_size: 3 - group: 2 - } -} -layer { - name: "relu4" - type: "ReLU" - bottom: "conv4" - top: "conv4" -} -layer { - name: "conv5" - type: "Convolution" - bottom: "conv4" - top: "conv5" - convolution_param { - num_output: 256 - pad: 1 - kernel_size: 3 - group: 2 - } -} -layer { - name: "relu5" - type: "ReLU" - bottom: "conv5" - top: "conv5" -} -layer { - name: "pool5" - type: "Pooling" - bottom: "conv5" - top: "pool5" - pooling_param { - pool: MAX - kernel_size: 3 - stride: 2 - } -} -layer { - name: "fc6-conv" - type: "Convolution" - bottom: "pool5" - top: "fc6-conv" - convolution_param { - num_output: 4096 - kernel_size: 6 - } -} -layer { - name: "relu6" - type: "ReLU" - bottom: "fc6-conv" - top: "fc6-conv" -} -layer { - name: "drop6" - type: "Dropout" - bottom: "fc6-conv" - top: "fc6-conv" - dropout_param { - dropout_ratio: 0.5 - } -} -layer { - name: "fc7-conv" - type: "Convolution" - bottom: "fc6-conv" - top: "fc7-conv" - convolution_param { - num_output: 4096 - kernel_size: 1 - } -} -layer { - name: "relu7" - type: "ReLU" - bottom: "fc7-conv" - top: "fc7-conv" -} -layer { - name: "drop7" - type: "Dropout" - bottom: "fc7-conv" - top: "fc7-conv" - dropout_param { - dropout_ratio: 0.5 - } -} -layer { - name: "fc8-conv" - type: "Convolution" - bottom: "fc7-conv" - top: "fc8-conv" - convolution_param { - num_output: 1000 - kernel_size: 1 - } -} -layer { - name: "prob" - type: "Softmax" - bottom: "fc8-conv" - top: "prob" -} From 1e8262e15c5416ed89b310ad7a9080328b69948c Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 21:28:31 +0200 Subject: [PATCH 011/254] Fixed merging errors. --- src/caffe/proto/caffe.proto | 54 ++++-- src/caffe/test/test_accuracy_layer.cpp | 108 +++++++++++- src/caffe/test/test_blob.cpp | 59 ++++++- src/caffe/test/test_concat_layer.cpp | 128 ++++++++++---- src/caffe/test/test_hdf5_output_layer.cpp | 9 +- src/caffe/test/test_hdf5data_layer.cpp | 16 +- src/caffe/test/test_lrn_layer.cpp | 38 +++++ src/caffe/test/test_net.cpp | 17 +- src/caffe/test/test_neuron_layer.cpp | 196 ++++++++++++++++++++++ src/caffe/test/test_pooling_layer.cpp | 8 - src/caffe/test/test_slice_layer.cpp | 6 +- src/caffe/test/test_solver.cpp | 17 +- 12 files changed, 557 insertions(+), 99 deletions(-) diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 9e02939b1ca..489f9aceeda 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -58,8 +58,8 @@ message NetParameter { // The shape of the input blobs. repeated BlobShape input_shape = 8; - - // The dim of the input blobs. For each input blob there should be four + // 4D input dimensions -- deprecated. Use "shape" instead + // If specified, for each input blob there should be four // values specifying the num, channels, height and width of the input blob. // Thus, there should be a total of (4 * #input) numbers. repeated int32 input_dim = 4; @@ -394,6 +394,16 @@ message AccuracyParameter { // the top k scoring classes. By default, only compare to the top scoring // class (i.e. argmax). optional uint32 top_k = 1 [default = 1]; + + // The "label" axis of the prediction blob, whose argmax corresponds to the + // predicted label -- may be negative to index from the end (e.g., -1 for the + // last axis). For example, if axis == 1 and the predictions are + // (N x C x H x W), the label blob is expected to contain N*H*W ground truth + // labels with integer values in {0, 1, ..., C-1}. + optional int32 axis = 2 [default = 1]; + + // If specified, ignore instances with the given label. + optional int32 ignore_label = 3; } // Message that stores parameters used by ArgMaxLayer @@ -405,9 +415,13 @@ message ArgMaxParameter { // Message that stores parameters used by ConcatLayer message ConcatParameter { - // Concat Layer needs to specify the dimension along the concat will happen, - // the other dimensions must be the same for all the bottom blobs - // By default it will concatenate blobs along channels dimension + // The axis along which to concatenate -- may be negative to index from the + // end (e.g., -1 for the last axis). Other axes must have the + // same dimension for all the bottom blobs. + // By default, ConcatLayer concatenates blobs along the "channels" axis ( + optional int32 axis = 2 [default = 1]; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. optional uint32 concat_dim = 1 [default = 1]; } @@ -496,6 +510,7 @@ message DummyDataParameter { repeated FillerParameter data_filler = 1; repeated BlobShape shape = 6; + // 4D dimensions -- deprecated. Use "shape" instead. repeated uint32 num = 2; repeated uint32 channels = 3; repeated uint32 height = 4; @@ -533,6 +548,13 @@ message HDF5DataParameter { optional string source = 1; // Specify the batch size. optional uint32 batch_size = 2; + + // Specify whether to shuffle the data. + // If shuffle == true, the ordering of the HDF5 files is shuffled, + // and the ordering of data within any given HDF5 file is shuffl + // but data between different files are not interleaved; all of a file' + // data are output (in a random order) before moving onto another file. + optional bool shuffle = 3 [default = false]; } // Message that stores parameters used by HDF5OutputLayer @@ -593,6 +615,11 @@ message InnerProductParameter { optional bool bias_term = 2 [default = true]; // whether to have bias terms optional FillerParameter weight_filler = 3; // The filler for the weight optional FillerParameter bias_filler = 4; // The filler for the bias + + // The first axis to be lumped into a single inner product computation; + // all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 5 [default = 1]; } // Message that stores parameters used by LRNLayer @@ -700,12 +727,14 @@ message SigmoidParameter { // Message that stores parameters used by SliceLayer message SliceParameter { - // SliceLayer needs to know which dimension to slice across. - // Currently, SliceLayer only supports slicing across num (dim 0) - // and channels (dim 1). - // By default, SliceLayer slices across channels. - optional uint32 slice_dim = 1 [default = 1]; + // The axis along which to slice -- may be negative to index from the end + // (e.g., -1 for the last axis). + // By default, SliceLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 3 [default = 1] repeated uint32 slice_point = 2; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 slice_dim = 1 [default = 1]; } // Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer @@ -716,6 +745,11 @@ message SoftmaxParameter { CUDNN = 2; } optional Engine engine = 1 [default = DEFAULT]; + + // The axis along which to perform the softmax -- may be negative to i + // from the end (e.g., -1 for the last axis). + // Any other axes will be evaluated as independent softmaxes. + optional int32 axis = 2 [default = 1]; } // Message that stores parameters used by TanHLayer diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index fa59fab1e8a..6cbf51df45e 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -19,10 +19,24 @@ template class AccuracyLayerTest : public ::testing::Test { protected: AccuracyLayerTest() - : blob_bottom_data_(new Blob(100, 10, 1, 1)), - blob_bottom_label_(new Blob(100, 1, 1, 1)), + : blob_bottom_data_(new Blob()), + blob_bottom_label_(new Blob()), blob_top_(new Blob()), top_k_(3) { + vector shape(2); + shape[0] = 100; + shape[1] = 10; + blob_bottom_data_->Reshape(shape); + shape.resize(1); + blob_bottom_label_->Reshape(shape); + FillBottoms(); + + blob_bottom_vec_.push_back(blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_); + } + + virtual void FillBottoms() { // fill the probability values FillerParameter filler_param; GaussianFiller filler(filler_param); @@ -33,14 +47,11 @@ class AccuracyLayerTest : public ::testing::Test { caffe::rng_t* prefetch_rng = static_cast(rng->generator()); Dtype* label_data = blob_bottom_label_->mutable_cpu_data(); - for (int i = 0; i < 100; ++i) { + for (int i = 0; i < blob_bottom_label_->count(); ++i) { label_data[i] = (*prefetch_rng)() % 10; } - - blob_bottom_vec_.push_back(blob_bottom_data_); - blob_bottom_vec_.push_back(blob_bottom_label_); - blob_top_vec_.push_back(blob_top_); } + virtual ~AccuracyLayerTest() { delete blob_bottom_data_; delete blob_bottom_label_; @@ -106,6 +117,89 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { num_correct_labels / 100.0, 1e-4); } +TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { + Caffe::set_mode(Caffe::CPU); + this->blob_bottom_data_->Reshape(2, 10, 4, 5); + vector label_shape(3); + label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; + this->blob_bottom_label_->Reshape(label_shape); + this->FillBottoms(); + LayerParameter layer_param; + layer_param.mutable_accuracy_param()->set_axis(1); + AccuracyLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam max_value; + const int num_labels = this->blob_bottom_label_->count(); + int max_id; + int num_correct_labels = 0; + vector label_offset(3); + for (int n = 0; n < this->blob_bottom_data_->num(); ++n) { + for (int h = 0; h < this->blob_bottom_data_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_data_->width(); ++w) { + max_value = -FLT_MAX; + max_id = 0; + for (int c = 0; c < this->blob_bottom_data_->channels(); ++c) { + const TypeParam pred_value = + this->blob_bottom_data_->data_at(n, c, h, w); + if (pred_value > max_value) { + max_value = pred_value; + max_id = c; + } + } + label_offset[0] = n; label_offset[1] = h; label_offset[2] = w; + const int correct_label = + static_cast(this->blob_bottom_label_->data_at(label_offset)); + if (max_id == correct_label) { + ++num_correct_labels; + } + } + } + } + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / TypeParam(num_labels), 1e-4); +} + +TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + const TypeParam kIgnoreLabelValue = -1; + layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); + AccuracyLayer layer(layer_param); + // Manually set some labels to the ignore label value (-1). + this->blob_bottom_label_->mutable_cpu_data()[2] = kIgnoreLabelValue; + this->blob_bottom_label_->mutable_cpu_data()[5] = kIgnoreLabelValue; + this->blob_bottom_label_->mutable_cpu_data()[32] = kIgnoreLabelValue; + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam max_value; + int max_id; + int num_correct_labels = 0; + int count = 0; + for (int i = 0; i < 100; ++i) { + if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + continue; + } + ++count; + max_value = -FLT_MAX; + max_id = 0; + for (int j = 0; j < 10; ++j) { + if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { + max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + max_id = j; + } + } + if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; + } + } + EXPECT_EQ(count, 97); // We set 3 out of 100 labels to kIgnoreLabelValue. + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / TypeParam(count), 1e-4); +} + TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { LayerParameter layer_param; AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param(); diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index e0678061173..7da6423b67c 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -1,4 +1,5 @@ #include +#include #include "gtest/gtest.h" @@ -31,10 +32,7 @@ TYPED_TEST(BlobSimpleTest, TestInitialization) { EXPECT_EQ(this->blob_preshaped_->height(), 4); EXPECT_EQ(this->blob_preshaped_->width(), 5); EXPECT_EQ(this->blob_preshaped_->count(), 120); - EXPECT_EQ(this->blob_->num(), 0); - EXPECT_EQ(this->blob_->channels(), 0); - EXPECT_EQ(this->blob_->height(), 0); - EXPECT_EQ(this->blob_->width(), 0); + EXPECT_EQ(this->blob_->num_axes(), 0); EXPECT_EQ(this->blob_->count(), 0); } @@ -54,6 +52,59 @@ TYPED_TEST(BlobSimpleTest, TestReshape) { EXPECT_EQ(this->blob_->count(), 120); } +TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { + BlobProto blob_proto; + + // Reshape to (3 x 2). + vector shape(2); + shape[0] = 3; + shape[1] = 2; + this->blob_->Reshape(shape); + + // (3 x 2) blob == (1 x 1 x 3 x 2) legacy blob + blob_proto.set_num(1); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_TRUE(this->blob_->ShapeEquals(blob_proto)); + + // (3 x 2) blob != (0 x 1 x 3 x 2) legacy blob + blob_proto.set_num(0); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); + + // (3 x 2) blob != (3 x 1 x 3 x 2) legacy blob + blob_proto.set_num(3); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); + + // Reshape to (1 x 3 x 2). + shape.insert(shape.begin(), 1); + this->blob_->Reshape(shape); + + // (1 x 3 x 2) blob == (1 x 1 x 3 x 2) legacy blob + blob_proto.set_num(1); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_TRUE(this->blob_->ShapeEquals(blob_proto)); + + // Reshape to (2 x 3 x 2). + shape[0] = 2; + this->blob_->Reshape(shape); + + // (2 x 3 x 2) blob != (1 x 1 x 3 x 2) legacy blob + blob_proto.set_num(1); + blob_proto.set_channels(1); + blob_proto.set_height(3); + blob_proto.set_width(2); + EXPECT_FALSE(this->blob_->ShapeEquals(blob_proto)); +} + template class BlobMathTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index f14f1d2fa4f..662a50fa23b 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -19,9 +19,9 @@ class ConcatLayerTest : public MultiDeviceTest { protected: ConcatLayerTest() - : blob_bottom_0(new Blob(2, 3, 6, 5)), - blob_bottom_1(new Blob(2, 5, 6, 5)), - blob_bottom_2(new Blob(5, 3, 6, 5)), + : blob_bottom_0_(new Blob(2, 3, 6, 5)), + blob_bottom_1_(new Blob(2, 5, 6, 5)), + blob_bottom_2_(new Blob(5, 3, 6, 5)), blob_top_(new Blob()) {} virtual void SetUp() { // fill the values @@ -29,30 +29,30 @@ class ConcatLayerTest : public MultiDeviceTest { FillerParameter filler_param; filler_param.set_value(1.); filler.reset(new ConstantFiller(filler_param)); - filler->Fill(this->blob_bottom_0); + filler->Fill(this->blob_bottom_0_); filler_param.set_value(2.); filler.reset(new ConstantFiller(filler_param)); - filler->Fill(this->blob_bottom_1); + filler->Fill(this->blob_bottom_1_); filler_param.set_value(3.); filler.reset(new ConstantFiller(filler_param)); - filler->Fill(this->blob_bottom_2); - blob_bottom_vec_0.push_back(blob_bottom_0); - blob_bottom_vec_0.push_back(blob_bottom_1); - blob_bottom_vec_1.push_back(blob_bottom_0); - blob_bottom_vec_1.push_back(blob_bottom_2); + filler->Fill(this->blob_bottom_2_); + blob_bottom_vec_0_.push_back(blob_bottom_0_); + blob_bottom_vec_0_.push_back(blob_bottom_1_); + blob_bottom_vec_1_.push_back(blob_bottom_0_); + blob_bottom_vec_1_.push_back(blob_bottom_2_); blob_top_vec_.push_back(blob_top_); } virtual ~ConcatLayerTest() { - delete blob_bottom_0; delete blob_bottom_1; - delete blob_bottom_2; delete blob_top_; + delete blob_bottom_0_; delete blob_bottom_1_; + delete blob_bottom_2_; delete blob_top_; } - Blob* const blob_bottom_0; - Blob* const blob_bottom_1; - Blob* const blob_bottom_2; + Blob* const blob_bottom_0_; + Blob* const blob_bottom_1_; + Blob* const blob_bottom_2_; Blob* const blob_top_; - vector*> blob_bottom_vec_0, blob_bottom_vec_1; + vector*> blob_bottom_vec_0_, blob_bottom_vec_1_; vector*> blob_top_vec_; }; @@ -61,61 +61,115 @@ TYPED_TEST_CASE(ConcatLayerTest, TestDtypesAndDevices); TYPED_TEST(ConcatLayerTest, TestSetupNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - layer_param.mutable_concat_param()->set_concat_dim(0); + layer_param.mutable_concat_param()->set_axis(0); ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_1, this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_1_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), - this->blob_bottom_0->num() + this->blob_bottom_2->num()); - EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()); - EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); - EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); + this->blob_bottom_0_->num() + this->blob_bottom_2_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); } TYPED_TEST(ConcatLayerTest, TestSetupChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num()); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); EXPECT_EQ(this->blob_top_->channels(), - this->blob_bottom_0->channels()+this->blob_bottom_1->channels()); - EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); - EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); } +TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConcatLayer layer(layer_param); + // "channels" index is the third one from the end -- test negative indexing + // by setting axis to -3 and checking that we get the same results as above in + // TestSetupChannels. + layer_param.mutable_concat_param()->set_axis(-3); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(ConcatLayerTest, TestForwardNum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_concat_param()->set_axis(0); + ConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_1_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_1_, this->blob_top_vec_); + for (int n = 0; n < this->blob_bottom_vec_1_[0]->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), + this->blob_bottom_vec_1_[0]->data_at(n, c, h, w)); + } + } + } + } + for (int n = 0; n < this->blob_bottom_vec_1_[1]->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n + 2, c, h, w), + this->blob_bottom_vec_1_[1]->data_at(n, c, h, w)); + } + } + } + } +} -TYPED_TEST(ConcatLayerTest, TestNum) { +TYPED_TEST(ConcatLayerTest, TestForwardChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_0, this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_0->channels(); ++c) { + for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), - this->blob_bottom_vec_0[0]->data_at(n, c, h, w)); + this->blob_bottom_vec_0_[0]->data_at(n, c, h, w)); } } } - for (int c = 0; c < this->blob_bottom_1->channels(); ++c) { + for (int c = 0; c < this->blob_bottom_1_->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { - EXPECT_EQ(this->blob_top_->data_at(n, c+3, h, w), - this->blob_bottom_vec_0[1]->data_at(n, c, h, w)); + EXPECT_EQ(this->blob_top_->data_at(n, c + 3, h, w), + this->blob_bottom_vec_0_[1]->data_at(n, c, h, w)); } } } } } -TYPED_TEST(ConcatLayerTest, TestGradient) { +TYPED_TEST(ConcatLayerTest, TestGradientNum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_concat_param()->set_axis(0); + ConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, this->blob_bottom_vec_1_, + this->blob_top_vec_); +} + +TYPED_TEST(ConcatLayerTest, TestGradientChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradient(&layer, this->blob_bottom_vec_0, + checker.CheckGradient(&layer, this->blob_bottom_vec_0_, this->blob_top_vec_); } diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp index 441d2313c48..a23034f284a 100644 --- a/src/caffe/test/test_hdf5_output_layer.cpp +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -88,9 +88,6 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { LayerParameter param; param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_); - param.add_bottom("data"); - param.add_bottom("label"); - // This code block ensures that the layer is deconstructed and // the output hdf5 file is closed. { @@ -106,11 +103,13 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { this->input_file_name_; Blob* blob_data = new Blob(); - hdf5_load_nd_dataset(file_id, "data_0", 0, 4, blob_data); + hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, + blob_data); this->CheckBlobEqual(*(this->blob_data_), *blob_data); Blob* blob_label = new Blob(); - hdf5_load_nd_dataset(file_id, "label_0", 0, 4, blob_label); + hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, + blob_label); this->CheckBlobEqual(*(this->blob_label_), *blob_label); status = H5Fclose(file_id); diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index 8d3b3d1e987..c9b027f88cf 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -77,15 +77,13 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { EXPECT_EQ(this->blob_top_data_->height(), height); EXPECT_EQ(this->blob_top_data_->width(), width); - EXPECT_EQ(this->blob_top_label_->num(), batch_size); - EXPECT_EQ(this->blob_top_label_->channels(), 1); - EXPECT_EQ(this->blob_top_label_->height(), 1); - EXPECT_EQ(this->blob_top_label_->width(), 1); - - EXPECT_EQ(this->blob_top_label2_->num(), batch_size); - EXPECT_EQ(this->blob_top_label2_->channels(), 1); - EXPECT_EQ(this->blob_top_label2_->height(), 1); - EXPECT_EQ(this->blob_top_label2_->width(), 1); + EXPECT_EQ(this->blob_top_label_->num_axes(), 2); + EXPECT_EQ(this->blob_top_label_->shape(0), batch_size); + EXPECT_EQ(this->blob_top_label_->shape(1), 1); + + EXPECT_EQ(this->blob_top_label2_->num_axes(), 2); + EXPECT_EQ(this->blob_top_label2_->shape(0), batch_size); + EXPECT_EQ(this->blob_top_label2_->shape(1), 1); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index 07425df9b3a..c4e2f8ea7f2 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -138,6 +138,22 @@ TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) { } } +TYPED_TEST(LRNLayerTest, TestForwardAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -159,6 +175,28 @@ TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) { this->blob_top_vec_); } +TYPED_TEST(LRNLayerTest, TestGradientAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 1680a3f28d5..08106e79274 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -63,18 +63,19 @@ class NetTest : public MultiDeviceTest { " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " num: 5 " - " channels: 1 " - " height: 1 " - " width: 1 " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " " data_filler { " " type: 'gaussian' " " std: 0.01 " " } " + " shape { " + " dim: 5 " + " } " " data_filler { " " type: 'constant' " " value: 0 " diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index ad10720116d..c9d52f247a6 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -99,6 +100,23 @@ class NeuronLayerTest : public MultiDeviceTest { GradientChecker checker(1e-2, 1e-3); checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); } + + void TestPReLU(PReLULayer *layer) { + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype* slope_data = layer->blobs()[0]->cpu_data(); + int hw = this->blob_bottom_->height() * this->blob_bottom_->width(); + int channels = this->blob_bottom_->channels(); + bool channel_shared = layer->layer_param().prelu_param().channel_shared(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + int c = channel_shared ? 0 : (i / hw) % channels; + EXPECT_EQ(top_data[i], + std::max(bottom_data[i], (Dtype)(0)) + + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); + } + } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -392,6 +410,184 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradient) { this->blob_top_vec_); } +TYPED_TEST(NeuronLayerTest, TestPReLUParam) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* slopes = layer.blobs()[0]->cpu_data(); + int count = layer.blobs()[0]->count(); + for (int i = 0; i < count; ++i, ++slopes) { + EXPECT_EQ(*slopes, 0.25); + } +} + +TYPED_TEST(NeuronLayerTest, TestPReLUForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(layer.blobs()[0].get()); + this->TestPReLU(&layer); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUForwardChannelShared) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_prelu_param()->set_channel_shared(true); + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + this->TestPReLU(&layer); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(layer.blobs()[0].get()); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUGradientChannelShared) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_prelu_param()->set_channel_shared(true); + PReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter prelu_layer_param; + LayerParameter relu_layer_param; + relu_layer_param.mutable_relu_param()->set_negative_slope(0.25); + PReLULayer prelu(prelu_layer_param); + ReLULayer relu(relu_layer_param); + // Set up blobs + vector*> blob_bottom_vec_2; + vector*> blob_top_vec_2; + shared_ptr > blob_bottom_2(new Blob()); + shared_ptr > blob_top_2(new Blob()); + blob_bottom_vec_2.push_back(blob_bottom_2.get()); + blob_top_vec_2.push_back(blob_top_2.get()); + blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); + // SetUp layers + prelu.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + relu.SetUp(blob_bottom_vec_2, blob_top_vec_2); + // Check forward + prelu.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + relu.Forward(this->blob_bottom_vec_, blob_top_vec_2); + for (int s = 0; s < blob_top_2->count(); ++s) { + EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); + } + // Check backward + shared_ptr > tmp_blob(new Blob()); + tmp_blob->ReshapeLike(*blob_top_2.get()); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(tmp_blob.get()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + this->blob_top_->mutable_cpu_diff()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + blob_top_2->mutable_cpu_diff()); + vector propagate_down; + propagate_down.push_back(true); + prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); + relu.Backward(blob_top_vec_2, propagate_down, blob_bottom_vec_2); + for (int s = 0; s < blob_bottom_2->count(); ++s) { + EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); + } +} + +TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { + typedef typename TypeParam::Dtype Dtype; + // Set layer parameters + LayerParameter ip_layer_param; + LayerParameter prelu_layer_param; + InnerProductParameter *ip_param = + ip_layer_param.mutable_inner_product_param(); + ip_param->mutable_weight_filler()->set_type("gaussian"); + ip_param->set_num_output(3); + InnerProductLayer ip(ip_layer_param); + PReLULayer prelu(prelu_layer_param); + InnerProductLayer ip2(ip_layer_param); + PReLULayer prelu2(prelu_layer_param); + // Set up blobs + vector*> blob_bottom_vec_2; + vector*> blob_middle_vec_2; + vector*> blob_top_vec_2; + shared_ptr > blob_bottom_2(new Blob()); + shared_ptr > blob_middle_2(new Blob()); + shared_ptr > blob_top_2(new Blob()); + blob_bottom_vec_2.push_back(blob_bottom_2.get()); + blob_middle_vec_2.push_back(blob_middle_2.get()); + blob_top_vec_2.push_back(blob_top_2.get()); + blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); + // SetUp layers + ip.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + prelu.SetUp(this->blob_top_vec_, this->blob_top_vec_); + ip2.SetUp(blob_bottom_vec_2, blob_middle_vec_2); + prelu2.SetUp(blob_middle_vec_2, blob_top_vec_2); + caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), + ip2.blobs()[0]->mutable_cpu_data()); + // Forward in-place + ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); + ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); + prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); + // Forward non-in-place + ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); + ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); + prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); + prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); + // Check numbers + for (int s = 0; s < blob_top_2->count(); ++s) { + EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); + } + // Fill top diff with random numbers + shared_ptr > tmp_blob(new Blob()); + tmp_blob->ReshapeLike(*blob_top_2.get()); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(tmp_blob.get()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + this->blob_top_->mutable_cpu_diff()); + caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + blob_top_2->mutable_cpu_diff()); + // Backward in-place + vector propagate_down; + propagate_down.push_back(true); + prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_top_vec_); + ip.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); + // Backward non-in-place + prelu2.Backward(blob_top_vec_2, propagate_down, blob_middle_vec_2); + ip2.Backward(blob_middle_vec_2, propagate_down, blob_bottom_vec_2); + // Check numbers + for (int s = 0; s < blob_bottom_2->count(); ++s) { + EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); + } + for (int s = 0; s < ip.blobs()[0]->count(); ++s) { + EXPECT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]); + } + for (int s = 0; s < ip.blobs()[1]->count(); ++s) { + EXPECT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]); + } + for (int s = 0; s < prelu.blobs()[0]->count(); ++s) { + EXPECT_EQ(prelu.blobs()[0]->cpu_diff()[s], + prelu2.blobs()[0]->cpu_diff()[s]); + } +} + #ifdef USE_CUDNN template class CuDNNNeuronLayerTest : public ::testing::Test { diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 435caa8381e..e9964e7f0b7 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -976,9 +976,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { EXPECT_EQ(this->blob_top_->width(), 2); } -// This test and all following cuDNN pooling tests with padding are commented -// for now, since cuDNN pooling does not currently support padding. -/* TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; @@ -994,7 +991,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { EXPECT_EQ(this->blob_top_->height(), 4); EXPECT_EQ(this->blob_top_->width(), 3); } -*/ /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { @@ -1062,7 +1058,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } } -/* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; @@ -1107,7 +1102,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); } -*/ /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { @@ -1175,7 +1169,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } } -/* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { @@ -1194,7 +1187,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { } } } -*/ #endif diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index 395be280089..ccd03646d19 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -62,7 +62,7 @@ TYPED_TEST_CASE(SliceLayerTest, TestDtypesAndDevices); TYPED_TEST(SliceLayerTest, TestSetupNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - layer_param.mutable_slice_param()->set_slice_dim(0); + layer_param.mutable_slice_param()->set_axis(0); SliceLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_); EXPECT_EQ(this->blob_bottom_->num(), 3 * this->blob_top_0_->num()); @@ -91,7 +91,7 @@ TYPED_TEST(SliceLayerTest, TestSetupChannels) { TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - layer_param.mutable_slice_param()->set_slice_dim(0); + layer_param.mutable_slice_param()->set_axis(0); SliceLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); const int top_num = this->blob_bottom_->num() / 2; @@ -166,7 +166,7 @@ TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) { // Gradient checks are slow; reduce blob size. this->ReduceBottomBlobSize(); LayerParameter layer_param; - layer_param.mutable_slice_param()->set_slice_dim(0); + layer_param.mutable_slice_param()->set_axis(0); SliceLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, diff --git a/src/caffe/test/test_solver.cpp b/src/caffe/test/test_solver.cpp index 1c2c9bbb740..ceabc9cdd2c 100644 --- a/src/caffe/test/test_solver.cpp +++ b/src/caffe/test/test_solver.cpp @@ -55,14 +55,15 @@ TYPED_TEST(SolverTest, TestInitTrainTestNets) { " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " num: 5 " - " channels: 3 " - " height: 10 " - " width: 10 " - " num: 5 " - " channels: 1 " - " height: 1 " - " width: 1 " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " shape { " + " dim: 5 " + " } " " } " " top: 'data' " " top: 'label' " From c1cde253d6d89a0199818774641224b21a0db946 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 21:30:59 +0200 Subject: [PATCH 012/254] Fixed merging errors. --- tools/caffe.cpp | 17 +++++++++++++++-- tools/extract_features.cpp | 6 +++--- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index f04e28a3674..eb9e97f5e27 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -5,6 +5,7 @@ #include #include +#include "boost/algorithm/string.hpp" #include "caffe/caffe.hpp" using caffe::Blob; @@ -76,6 +77,19 @@ int device_query() { } RegisterBrewFunction(device_query); +// Load the weights from the specified caffemodel(s) into the train and +// test nets. +void CopyLayers(caffe::Solver* solver, const std::string& model_list) { + std::vector model_names; + boost::split(model_names, model_list, boost::is_any_of(",") ); + for (int i = 0; i < model_names.size(); ++i) { + LOG(INFO) << "Finetuning from " << model_names[i]; + solver->net()->CopyTrainedLayersFrom(model_names[i]); + for (int j = 0; j < solver->test_nets().size(); ++j) { + solver->test_nets()[j]->CopyTrainedLayersFrom(model_names[i]); + } + } +} // Train / Finetune a model. int train() { @@ -112,8 +126,7 @@ int train() { LOG(INFO) << "Resuming from " << FLAGS_snapshot; solver->Solve(FLAGS_snapshot); } else if (FLAGS_weights.size()) { - LOG(INFO) << "Finetuning from " << FLAGS_weights; - solver->net()->CopyTrainedLayersFrom(FLAGS_weights); + CopyLayers(&*solver, FLAGS_weights); solver->Solve(); } else { solver->Solve(); diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index f86ff96ca82..364c436dfd8 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -147,9 +147,9 @@ int feature_extraction_pipeline(int argc, char** argv) { int dim_features = feature_blob->count() / batch_size; const Dtype* feature_blob_data; for (int n = 0; n < batch_size; ++n) { - datum.set_height(dim_features); - datum.set_width(1); - datum.set_channels(1); + datum.set_height(feature_blob->height()); + datum.set_width(feature_blob->width()); + datum.set_channels(feature_blob->channels()); datum.clear_data(); datum.clear_float_data(); feature_blob_data = feature_blob->cpu_data() + From 7c12a63eb388317c6823ab6b901dc66ed3dc4498 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 21:44:52 +0200 Subject: [PATCH 013/254] Fixed merge errors. --- src/caffe/solver.cpp | 14 ++++---------- src/caffe/util/io.cpp | 11 ++++++----- test.txt | 1 - 3 files changed, 10 insertions(+), 16 deletions(-) delete mode 100644 test.txt diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index da0783ae3d3..b71c1ca942e 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -485,16 +485,10 @@ void SGDSolver::PreSolve() { update_.clear(); temp_.clear(); for (int i = 0; i < net_params.size(); ++i) { - const Blob* net_param = net_params[i].get(); - history_.push_back(shared_ptr >(new Blob( - net_param->num(), net_param->channels(), net_param->height(), - net_param->width(),Caffe::GetDefaultDeviceContext()))); - update_.push_back(shared_ptr >(new Blob( - net_param->num(), net_param->channels(), net_param->height(), - net_param->width(),Caffe::GetDefaultDeviceContext()))); - temp_.push_back(shared_ptr >(new Blob( - net_param->num(), net_param->channels(), net_param->height(), - net_param->width(),Caffe::GetDefaultDeviceContext()))); + const vector& shape = net_params[i]->shape(); + history_.push_back(shared_ptr>(new Blob(shape,Caffe::GetDefaultDeviceContext()))); + update_.push_back(shared_ptr>(new Blob(shape,Caffe::GetDefaultDeviceContext()))); + temp_.push_back(shared_ptr>(new Blob(shape,Caffe::GetDefaultDeviceContext()))); } } diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index 1532b320901..d7b7aadf3df 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -252,11 +252,12 @@ void hdf5_load_nd_dataset_helper( CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name_; CHECK_EQ(class_, H5T_FLOAT) << "Expected float or double data"; - blob->Reshape( - dims[0], - (dims.size() > 1) ? dims[1] : 1, - (dims.size() > 2) ? dims[2] : 1, - (dims.size() > 3) ? dims[3] : 1, blob->device_context()); + + vector blob_dims(dims.size()); + for (int i = 0; i < dims.size(); ++i) { + blob_dims[i] = dims[i]; + } + blob->Reshape(blob_dims,blob->device_context()); } template <> diff --git a/test.txt b/test.txt deleted file mode 100644 index ec78c958209..00000000000 --- a/test.txt +++ /dev/null @@ -1 +0,0 @@ -PROJECT := caffe\n\nCONFIG_FILE := Makefile.config\ninclude $(CONFIG_FILE)\n\nCXXFLAGS += -std=c++11 -Wno-deprecated-declarations\nLINKFLAGS += -std=c++11 -Wno-deprecated-declarations\nNVCCFLAGS += -Xcompiler "-Wno-deprecated-declarations" -Xlinker "-Wno-deprecated-declarations" -Xarchive "-Wno-deprecated-declarations" -Xnvlink "-Wno-deprecated-declarations"\n\nBUILD_DIR_LINK := $(BUILD_DIR)\nRELEASE_BUILD_DIR ?= .$(BUILD_DIR)_release\nDEBUG_BUILD_DIR ?= .$(BUILD_DIR)_debug\n\nDEBUG ?= 0\nifeq ($(DEBUG), 1)\n BUILD_DIR := $(DEBUG_BUILD_DIR)\n OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR)\nelse\n BUILD_DIR := $(RELEASE_BUILD_DIR)\n OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR)\nendif\n\n\n# All of the directories containing code.\nSRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \\n \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print)\n\n# The target shared library name\nLIB_BUILD_DIR := $(BUILD_DIR)/lib\nSTATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a\nDYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so\n\n##############################\n# Get all source files\n##############################\n# CXX_SRCS are the source files excluding the test ones.\nCXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")\n# CU_SRCS are the cuda source files\nCU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")\n# TEST_SRCS are the test source files\nTEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp\nTEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp")\nTEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS))\nTEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")\nGTEST_SRC := src/gtest/gtest-all.cpp\n# TOOL_SRCS are the source files for the tool binaries\nTOOL_SRCS := $(shell find tools -name "*.cpp")\n# EXAMPLE_SRCS are the source files for the example binaries\nEXAMPLE_SRCS := $(shell find examples -name "*.cpp")\n# BUILD_INCLUDE_DIR contains any generated header files we want to include.\nBUILD_INCLUDE_DIR := $(BUILD_DIR)/src\n# PROTO_SRCS are the protocol buffer definitions\nPROTO_SRC_DIR := src/$(PROJECT)/proto\nPROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto)\n# PROTO_BUILD_DIR will contain the .cc and obj files generated from\n# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files\nPROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR)\nPROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto\n# NONGEN_CXX_SRCS includes all source/header files except those generated\n# automatically (e.g., by proto).\nNONGEN_CXX_SRCS := $(shell find \\n src/$(PROJECT) \\n include/$(PROJECT) \\n python/$(PROJECT) \\n matlab/$(PROJECT) \\n examples \\n tools \\n -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh")\nLINT_SCRIPT := scripts/cpp_lint.py\nLINT_OUTPUT_DIR := $(BUILD_DIR)/.lint\nLINT_EXT := lint.txt\nLINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS)))\nEMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT)\nNONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT)\n# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT)\nPY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp\nPY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so\nPY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp\n# MAT$(PROJECT)_SRC is the matlab wrapper for $(PROJECT)\nMAT$(PROJECT)_SRC := matlab/$(PROJECT)/mat$(PROJECT).cpp\nifneq ($(MATLAB_DIR),)\n MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext)\nendif\nMAT$(PROJECT)_SO := matlab/$(PROJECT)/$(PROJECT).$(MAT_SO_EXT)\n\n##############################\n# Derive generated files\n##############################\n# The generated files for protocol buffers\nPROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \\n $(notdir ${PROTO_SRCS:.proto=.pb.h}))\nPROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \\n $(notdir ${PROTO_SRCS:.proto=.pb.h}))\nPROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc})\nPY_PROTO_BUILD_DIR := python/$(PROJECT)/proto\nPY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py\nPROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \\n $(PY_PROTO_BUILD_DIR)/$(notdir $(file)))\n# The objects corresponding to the source files\n# These objects will be linked into the final shared library, so we\n# exclude the tool, example, and test objects.\nCXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o})\nCU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o})\nPROTO_OBJS := ${PROTO_GEN_CC:.cc=.o}\nOBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)\n# tool, example, and test objects\nTOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o})\nTOOL_BUILD_DIR := $(BUILD_DIR)/tools\nTEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test\nTEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test\nTEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o})\nTEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o})\nTEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS)\nGTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o})\nEXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o})\n# Output files for automatic dependency generation\nDEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \\n ${TEST_CU_OBJS:.o=.d}\n# tool, example, and test bins\nTOOL_BINS := ${TOOL_OBJS:.o=.bin}\nEXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin}\n# symlinks to tool bins without the ".bin" extension\nTOOL_BIN_LINKS := ${TOOL_BINS:.bin=}\n# Put the test binaries in build/test for convenience.\nTEST_BIN_DIR := $(BUILD_DIR)/test\nTEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \\n $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj))))))\nTEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \\n $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj))))))\nTEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS)\n# TEST_ALL_BIN is the test binary that links caffe dynamically.\nTEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin\n\n##############################\n# Derive compiler warning dump locations\n##############################\nWARNS_EXT := warnings.txt\nCXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)})\nCU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)})\nTOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)})\nEXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)})\nTEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)})\nTEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)})\nALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS)\nALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS)\nALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS)\n\nEMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT)\nNONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT)\n\n##############################\n# GreenTea backend related include and lib\n##############################\n\nifeq ($(USE_GREENTEA),1)\n # Find a valid OpenCL library\n ifdef OPENCL_INC\n CLLINC = '$(OPENCL_INC)'\n endif\n \n ifdef OPENCL_LIB\n CLLIBS = '$(OPENCL_LIB)'\n endif\n \n ifdef OPENCLROOT\n CLLIBS = '$(OPENCLROOT)'\n endif\n \n ifdef CUDA_PATH\n CLLIBS = '$(CUDA_PATH)/lib/x64'\n endif\n \n ifdef INTELOCLSDKROOT\n CLLIBS = '$(INTELOCLSDKROOT)/lib/x64'\n endif\n \n ifdef AMDAPPSDKROOT\n CLLIBS = '$(AMDAPPSDKROOT)/lib/x86_64'\n CLLINC = '$(AMDAPPSDKROOT)/include'\n endif\n # Requires valid OpenCL library\n LIBRARY_DIRS += $(CLLIBS)\n # Requires valid OpenCL headers and valid ViennaCL\n INCLUDE_DIRS += $(CLLINC) $(VIENNACL_DIR)\n # Requires OpenCL compile library flag and librt\n LIBRARIES += OpenCL rt\n # Additional flags\n COMMON_FLAGS += -DUSE_GREENTEA\nendif\n\n##############################\n# Derive include and lib directories\n##############################\nCUDA_INCLUDE_DIR := $(CUDA_DIR)/include\n\nCUDA_LIB_DIR :=\n# add /lib64 only if it exists\nifneq ("$(wildcard $(CUDA_DIR)/lib64)","")\n CUDA_LIB_DIR += $(CUDA_DIR)/lib64\nendif\nCUDA_LIB_DIR += $(CUDA_DIR)/lib\n\nINCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include\nifneq ($(CPU_ONLY), 1)\n INCLUDE_DIRS += $(CUDA_INCLUDE_DIR)\n LIBRARY_DIRS += $(CUDA_LIB_DIR)\n LIBRARIES += cudart cublas curand\nendif\nLIBRARIES += glog gflags protobuf leveldb snappy \\n lmdb boost_system hdf5_hl hdf5 m \\n opencv_core opencv_highgui opencv_imgproc\nPYTHON_LIBRARIES := boost_python python2.7\nWARNINGS := -Wall -Wno-sign-compare\n\n##############################\n# Set build directories\n##############################\n\nDISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib\nDIST_ALIASES := dist\nifneq ($(strip $(DISTRIBUTE_DIR)),distribute)\n DIST_ALIASES += distribute\nendif\n\nALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \\n $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \\n $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \\n $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR))\n\n##############################\n# Set directory for Doxygen-generated documentation\n##############################\nDOXYGEN_CONFIG_FILE ?= ./.Doxyfile\n# should be the same as OUTPUT_DIRECTORY in the .Doxyfile\nDOXYGEN_OUTPUT_DIR ?= ./doxygen\nDOXYGEN_COMMAND ?= doxygen\n# All the files that might have Doxygen documentation.\nDOXYGEN_SOURCES := $(shell find \\n src/$(PROJECT) \\n include/$(PROJECT) \\n python/ \\n matlab/ \\n examples \\n tools \\n -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \\n -name "*.py" -or -name "*.m")\nDOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE)\n\n\n##############################\n# Configure build\n##############################\n\n# Determine platform\nUNAME := $(shell uname -s)\nifeq ($(UNAME), Linux)\n LINUX := 1\nelse ifeq ($(UNAME), Darwin)\n OSX := 1\nendif\n\n# Linux\nifeq ($(LINUX), 1)\n CXX ?= /usr/bin/g++\n GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)\n # older versions of gcc are too dumb to build boost with -Wuninitalized\n ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1)\n WARNINGS += -Wno-uninitialized\n endif\n # boost::thread is reasonably called boost_thread (compare OS X)\n # We will also explicitly add stdc++ to the link target.\n LIBRARIES += boost_thread stdc++\nendif\n\n# OS X:\n# clang++ instead of g++\n# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0\nifeq ($(OSX), 1)\n CXX := /usr/bin/clang++\n CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d')\n ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1)\n CXXFLAGS += -stdlib=libstdc++\n LINKFLAGS += -stdlib=libstdc++\n endif\n # clang throws this warning for cuda headers\n WARNINGS += -Wno-unneeded-internal-declaration\n # gtest needs to use its own tuple to not conflict with clang\n COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1\n # boost::thread is called boost_thread-mt to mark multithreading on OS X\n LIBRARIES += boost_thread-mt\n # we need to explicitly ask for the rpath to be obeyed\n DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so\n ORIGIN := @loader_path\nelse\n ORIGIN := \$$ORIGIN\nendif\n\n# Custom compiler\nifdef CUSTOM_CXX\n CXX := $(CUSTOM_CXX)\nendif\n\n# Static linking\nifneq (,$(findstring clang++,$(CXX)))\n STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME)\nelse ifneq (,$(findstring g++,$(CXX)))\n STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive\nelse\n # The following line must not be indented with a tab, since we are not inside a target\n $(error Cannot static link with the $(CXX) compiler)\nendif\n\n# Debugging\nifeq ($(DEBUG), 1)\n COMMON_FLAGS += -DDEBUG -g -O0\n NVCCFLAGS += -G\nelse\n COMMON_FLAGS += -DNDEBUG -O2\nendif\n\n# cuDNN acceleration configuration.\nifeq ($(USE_CUDNN), 1)\n LIBRARIES += cudnn\n COMMON_FLAGS += -DUSE_CUDNN\nendif\n\n# CPU-only configuration\nifeq ($(CPU_ONLY), 1)\n OBJS := $(PROTO_OBJS) $(CXX_OBJS)\n TEST_OBJS := $(TEST_CXX_OBJS)\n TEST_BINS := $(TEST_CXX_BINS)\n ALL_WARNS := $(ALL_CXX_WARNS)\n TEST_FILTER := --gtest_filter="-*GPU*"\n COMMON_FLAGS += -DCPU_ONLY\nendif\n\n# Python layer support\nifeq ($(WITH_PYTHON_LAYER), 1)\n COMMON_FLAGS += -DWITH_PYTHON_LAYER\n LIBRARIES += $(PYTHON_LIBRARIES)\nendif\n\n# BLAS configuration (default = ATLAS)\nBLAS ?= atlas\nifeq ($(BLAS), mkl)\n # MKL\n LIBRARIES += mkl_rt\n COMMON_FLAGS += -DUSE_MKL\n MKL_DIR ?= /opt/intel/mkl\n BLAS_INCLUDE ?= $(MKL_DIR)/include\n BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64\nelse ifeq ($(BLAS), open)\n # OpenBLAS\n LIBRARIES += openblas\nelse\n # ATLAS\n ifeq ($(LINUX), 1)\n ifeq ($(BLAS), atlas)\n # Linux simply has cblas and atlas\n LIBRARIES += cblas atlas\n endif\n else ifeq ($(OSX), 1)\n # OS X packages atlas as the vecLib framework\n LIBRARIES += cblas\n # 10.10 has accelerate while 10.9 has veclib\n XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6')\n ifneq (,$(findstring version: 6,$(XCODE_CLT_VER)))\n BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/\n LDFLAGS += -framework Accelerate\n else\n BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/\n LDFLAGS += -framework vecLib\n endif\n endif\nendif\nINCLUDE_DIRS += $(BLAS_INCLUDE)\nLIBRARY_DIRS += $(BLAS_LIB)\n\nLIBRARY_DIRS += $(LIB_BUILD_DIR)\n\n# Automatic dependency generation (nvcc is handled separately)\nCXXFLAGS += -MMD -MP\n\n# Complete build flags.\nCOMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))\nCXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)\nNVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)\n# mex may invoke an older gcc that is too liberal with -Wuninitalized\nMATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized\nLINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)\n\nUSE_PKG_CONFIG ?= 0\nifeq ($(USE_PKG_CONFIG), 1)\n PKG_CONFIG := $(shell pkg-config opencv --libs)\nelse\n PKG_CONFIG :=\nendif\nLDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \\n $(foreach library,$(LIBRARIES),-l$(library))\nPYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library))\n\n# 'superclean' target recursively* deletes all files ending with an extension\n# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older\n# versions of Caffe that do not place all generated files in a location known\n# to the 'clean' target.\n#\n# 'supercleanlist' will list the files to be deleted by make superclean.\n#\n# * Recursive with the exception that symbolic links are never followed, per the\n# default behavior of 'find'.\nSUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo\n\n# Set the sub-targets of the 'everything' target.\nEVERYTHING_TARGETS := all py$(PROJECT) test warn lint\n# Only build matcaffe as part of "everything" if MATLAB_DIR is specified.\nifneq ($(MATLAB_DIR),)\n EVERYTHING_TARGETS += mat$(PROJECT)\nendif\n\n##############################\n# Define build targets\n##############################\n.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \\n py mat py$(PROJECT) mat$(PROJECT) proto runtest \\n superclean supercleanlist supercleanfiles warn everything\n\nall: clkernels $(STATIC_NAME) $(DYNAMIC_NAME) tools examples\n\neverything: $(EVERYTHING_TARGETS)\n\nlinecount:\n cloc --read-lang-def=$(PROJECT).cloc \\n src/$(PROJECT) include/$(PROJECT) tools examples \\n python matlab\n\nlint: $(EMPTY_LINT_REPORT)\n\nlintclean:\n @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT)\n\ndocs: $(DOXYGEN_OUTPUT_DIR)\n @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen\n\n$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES)\n $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE)\n\n$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR)\n @ cat $(LINT_OUTPUTS) > $@\n @ if [ -s "$@" ]; then \\n cat $@; \\n mv $@ $(NONEMPTY_LINT_REPORT); \\n echo "Found one or more lint errors."; \\n exit 1; \\n fi; \\n $(RM) $(NONEMPTY_LINT_REPORT); \\n echo "No lint errors!";\n\n$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR)\n @ mkdir -p $(dir $@)\n @ python $(LINT_SCRIPT) $< 2>&1 \\n | grep -v "^Done processing " \\n | grep -v "^Total errors found: 0" \\n > $@ \\n || true\n\ntest: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS)\n\ntools: $(TOOL_BINS) $(TOOL_BIN_LINKS)\n\nexamples: $(EXAMPLE_BINS)\n\npy$(PROJECT): py\n\npy: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)\n\n$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME)\n @ echo CXX/LD -o $@ $<\n $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \\n -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \\n -Wl,-rpath,$(ORIGIN)/../../build/lib\n\nmat$(PROJECT): mat\n\nmat: $(MAT$(PROJECT)_SO)\n\n$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)\n @ if [ -z "$(MATLAB_DIR)" ]; then \\n echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \\n "to build mat$(PROJECT)."; \\n exit 1; \\n fi\n @ echo MEX $<\n $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \\n CXX="$(CXX)" \\n CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \\n CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@\n\nruntest: $(TEST_ALL_BIN)\n $(TOOL_BUILD_DIR)/caffe\n $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER)\n\npytest: py\n cd python; python -m unittest discover -s caffe/test\n\nwarn: $(EMPTY_WARN_REPORT)\n\n$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)\n @ cat $(ALL_WARNS) > $@\n @ if [ -s "$@" ]; then \\n cat $@; \\n mv $@ $(NONEMPTY_WARN_REPORT); \\n echo "Compiler produced one or more warnings."; \\n exit 1; \\n fi; \\n $(RM) $(NONEMPTY_WARN_REPORT); \\n echo "No compiler warnings!";\n\n$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o\n\n$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked\n\n# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link\n# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it\n# exists and $(DEBUG) is toggled later.\n$(BUILD_DIR)/.linked:\n @ mkdir -p $(BUILD_DIR)\n @ $(RM) $(OTHER_BUILD_DIR)/.linked\n @ $(RM) -r $(BUILD_DIR_LINK)\n @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK)\n @ touch $@\n\n$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)\n @ mkdir -p $@\n\n$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)\n @ echo LD -o $@\n $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS)\n\n$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)\n @ echo AR -o $@\n $(Q)ar rcs $@ $(OBJS)\n\n$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS)\n @ echo CXX $<\n $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \\n || (cat $@.$(WARNS_EXT); exit 1)\n @ cat $@.$(WARNS_EXT)\n\n$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \\n | $(PROTO_BUILD_DIR)\n @ echo CXX $<\n $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \\n || (cat $@.$(WARNS_EXT); exit 1)\n @ cat $@.$(WARNS_EXT)\n\n$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS)\n @ echo NVCC $<\n $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \\n -odir $(@D)\n $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \\n || (cat $@.$(WARNS_EXT); exit 1)\n @ cat $@.$(WARNS_EXT)\n\n$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \\n | $(DYNAMIC_NAME) $(TEST_BIN_DIR)\n @ echo CXX/LD -o $@ $<\n $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \\n -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib\n\n$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \\n $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)\n @ echo LD $<\n $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \\n -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib\n\n$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \\n $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)\n @ echo LD $<\n $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \\n -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib\n\n# Target for extension-less symlinks to tool binaries with extension '*.bin'.\n$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR)\n @ $(RM) $@\n @ ln -s $(abspath $<) $@\n\n$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME)\n @ echo CXX/LD -o $@\n $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \\n -Wl,-rpath,$(ORIGIN)/../lib\n\n$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME)\n @ echo CXX/LD -o $@\n $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \\n -Wl,-rpath,$(ORIGIN)/../../lib\n \nclkernels: src/caffe/greentea/cl_kernels/*.cl\n src/caffe/greentea/cl_kernels.sh\n\nproto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER)\n\n$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \\n $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR)\n @ echo PROTOC $<\n $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $<\n\n$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \\n $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR)\n @ echo PROTOC \(python\) $<\n $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $<\n\n$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)\n touch $(PY_PROTO_INIT)\n\nclean:\n @- $(RM) -rf $(ALL_BUILD_DIRS)\n @- $(RM) -rf $(OTHER_BUILD_DIR)\n @- $(RM) -rf $(BUILD_DIR_LINK)\n @- $(RM) -rf $(DISTRIBUTE_DIR)\n @- $(RM) $(PY$(PROJECT)_SO)\n @- $(RM) $(MAT$(PROJECT)_SO)\n\nsupercleanfiles:\n $(eval SUPERCLEAN_FILES := $(strip \\n $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \\n -not -path './data/*'))))\n\nsupercleanlist: supercleanfiles\n @ \\n if [ -z "$(SUPERCLEAN_FILES)" ]; then \\n echo "No generated files found."; \\n else \\n echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \\n fi\n\nsuperclean: clean supercleanfiles\n @ \\n if [ -z "$(SUPERCLEAN_FILES)" ]; then \\n echo "No generated files found."; \\n else \\n echo "Deleting the following generated files:"; \\n echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \\n $(RM) $(SUPERCLEAN_FILES); \\n fi\n\n$(DIST_ALIASES): $(DISTRIBUTE_DIR)\n\n$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS)\n # add include\n cp -r include $(DISTRIBUTE_DIR)/\n mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto\n cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto\n # add tool and example binaries\n cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin\n cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin\n # add libraries\n cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib\n cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib\n # add python - it's not the standard way, indeed...\n cp -r python $(DISTRIBUTE_DIR)/python\n\n-include $(DEPS) From 74f5f9398304535e869f1042f85624b92c221d8c Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 22:12:49 +0200 Subject: [PATCH 014/254] Fixed more merge errors. --- include/caffe/data_layers.hpp | 8 +++++--- include/caffe/filler.hpp | 19 +++++++++---------- src/caffe/proto/caffe.proto | 2 +- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 5cd48d3a3f8..2bb9d948169 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -171,6 +171,8 @@ class HDF5DataLayer : public Layer { unsigned int current_file_; hsize_t current_row_; std::vector > > hdf_blobs_; + std::vector data_permutation_; + std::vector file_permutation_; }; /** @@ -191,7 +193,8 @@ class HDF5OutputLayer : public Layer { const vector*>& top) {} virtual inline const char* type() const { return "HDF5Output"; } - virtual inline int MinBottomBlobs() const { return 1; } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } virtual inline int ExactNumTopBlobs() const { return 0; } inline std::string file_name() const { return file_name_; } @@ -205,14 +208,13 @@ class HDF5OutputLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - //virtual void SaveBlobs(); + virtual void SaveBlobs(); bool file_opened_; std::string file_name_; hid_t file_id_; Blob data_blob_; Blob label_blob_; - int current_batch_; }; /** diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index e483fe13c7e..bb18e8e1e28 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -21,7 +21,7 @@ class Filler { public: explicit Filler(const FillerParameter& param) : filler_param_(param) {} virtual ~Filler() {} - virtual void Fill(Blob* blob, DeviceContext &device_context) = 0; + virtual void Fill(Blob* blob) = 0; protected: FillerParameter filler_param_; }; // class Filler @@ -33,7 +33,7 @@ class ConstantFiller : public Filler { public: explicit ConstantFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob, DeviceContext &device_context) { + virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); const int count = blob->count(); const Dtype value = this->filler_param_.value(); @@ -52,7 +52,7 @@ class UniformFiller : public Filler { public: explicit UniformFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob, DeviceContext &device_context) { + virtual void Fill(Blob* blob) { CHECK(blob->count()); caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); @@ -67,7 +67,7 @@ class GaussianFiller : public Filler { public: explicit GaussianFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob, DeviceContext &device_context) { + virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); CHECK(blob->count()); caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), @@ -79,11 +79,10 @@ class GaussianFiller : public Filler { // These have num == channels == 1; width is number of inputs; height is // number of outputs. The 'sparse' variable specifies the mean number // of non-zero input weights for a given output. - CHECK_EQ(blob->num(), 1); - CHECK_EQ(blob->channels(), 1); - int num_outputs = blob->height(); + CHECK_GE(blob->num_axes(), 1); + const int num_outputs = blob->shape(0); Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); - rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int), device_context)); + rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); for (int i = 0; i < blob->count(); ++i) { @@ -104,7 +103,7 @@ class PositiveUnitballFiller : public Filler { public: explicit PositiveUnitballFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob, DeviceContext &device_context) { + virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); DCHECK(blob->count()); caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); @@ -146,7 +145,7 @@ class XavierFiller : public Filler { public: explicit XavierFiller(const FillerParameter& param) : Filler(param) {} - virtual void Fill(Blob* blob, DeviceContext &device_context) { + virtual void Fill(Blob* blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); Dtype scale = sqrt(Dtype(3) / fan_in); diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 489f9aceeda..e1c51f72591 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -730,7 +730,7 @@ message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1] + optional int32 axis = 3 [default = 1]; repeated uint32 slice_point = 2; // DEPRECATED: alias for "axis" -- does not support negative indexing. From dc3a37d64695e535d94a8f8fefea05819bee26a5 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 22:17:42 +0200 Subject: [PATCH 015/254] ... --- include/caffe/common_layers.hpp | 342 +++++++++++--------------------- 1 file changed, 121 insertions(+), 221 deletions(-) diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index 551d2706755..cae1c3e4ee6 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -25,7 +25,7 @@ namespace caffe { * * NOTE: does not implement Backwards operation. */ -template +template class ArgMaxLayer : public Layer { public: /** @@ -37,22 +37,15 @@ class ArgMaxLayer : public Layer { * if set, output a vector of pairs (max_ind, max_val) for each image. */ explicit ArgMaxLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "ArgMax"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "ArgMax"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: /** @@ -67,11 +60,10 @@ class ArgMaxLayer : public Layer { * @f$ (for @f$ K = 1 @f$). */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /// @brief Not implemented (non-differentiable function) virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { NOT_IMPLEMENTED; } bool out_max_val_; @@ -82,26 +74,19 @@ class ArgMaxLayer : public Layer { * @brief Takes at least two Blob%s and concatenates them along either the num * or channel dimension, outputting the result. */ -template +template class ConcatLayer : public Layer { public: explicit ConcatLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Concat"; - } - virtual inline int MinBottomBlobs() const { - return 2; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "Concat"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: /** @@ -114,24 +99,24 @@ class ConcatLayer : public Layer { * - K @f$ (N \times C \times H \times W) @f$ * the inputs @f$ x_K @f$ * @param top output Blob vector (length 1) - * -# @f$ (KN \times C \times H \times W) @f$ if concat_dim == 0, or - * @f$ (N \times KC \times H \times W) @f$ if concat_dim == 1: + * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or + * @f$ (N \times KC \times H \times W) @f$ if axis == 1: * the concatenated output @f$ * y = [\begin{array}{cccc} x_1 & x_2 & ... & x_K \end{array}] * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the concatenate inputs. * * @param top output Blob vector (length 1), providing the error gradient with * respect to the outputs - * -# @f$ (KN \times C \times H \times W) @f$ if concat_dim == 0, or - * @f$ (N \times KC \times H \times W) @f$ if concat_dim == 1: + * -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or + * @f$ (N \times KC \times H \times W) @f$ if axis == 1: * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ * with respect to concatenated outputs @f$ y @f$ * @param propagate_down see Layer::Backward. @@ -148,19 +133,14 @@ class ConcatLayer : public Layer { * @f$ */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); - Blob col_bob_; int count_; - int num_; - int channels_; - int height_; - int width_; - int concat_dim_; + int num_concats_; + int concat_input_size_; + int concat_axis_; }; /** @@ -169,38 +149,29 @@ class ConcatLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class EltwiseLayer : public Layer { public: explicit EltwiseLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Eltwise"; - } - virtual inline int MinBottomBlobs() const { - return 2; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "Eltwise"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); EltwiseParameter_EltwiseOp op_; vector coeffs_; @@ -219,24 +190,17 @@ class EltwiseLayer : public Layer { * and in Backward, the diff pointer of the bottom Blob to that of the top Blob * (see Blob::ShareDiff). */ -template +template class FlattenLayer : public Layer { public: explicit FlattenLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Flatten"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "Flatten"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: /** @@ -248,9 +212,7 @@ class FlattenLayer : public Layer { * the outputs -- i.e., the (virtually) copied, flattened inputs */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the concatenate inputs. @@ -262,13 +224,7 @@ class FlattenLayer : public Layer { * gradient is (virtually) copied */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - - int count_; + const vector& propagate_down, const vector*>& bottom); }; /** @@ -277,38 +233,29 @@ class FlattenLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class InnerProductLayer : public Layer { public: explicit InnerProductLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "InnerProduct"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "InnerProduct"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); int M_; int K_; @@ -322,36 +269,27 @@ class InnerProductLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class MVNLayer : public Layer { public: explicit MVNLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "MVN"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "MVN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); Blob mean_, variance_, temp_; @@ -363,40 +301,29 @@ class MVNLayer : public Layer { * @brief Ignores bottom blobs while producing no top blobs. (This is useful * to suppress outputs during testing.) */ -template +template class SilenceLayer : public Layer { public: explicit SilenceLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void Reshape(const vector*>& bottom, - const vector*>& top) { - } + const vector*>& top) {} - virtual inline const char* type() const { - return "Silence"; - } - virtual inline int MinBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 0; - } + virtual inline const char* type() const { return "Silence"; } + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 0; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top) { - } + const vector*>& top) {} // We can't define Forward_gpu here, since STUB_GPU will provide // its own definition for CPU_ONLY mode. virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -404,37 +331,31 @@ class SilenceLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class SoftmaxLayer : public Layer { public: explicit SoftmaxLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Softmax"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "Softmax"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); + int outer_num_; + int inner_num_; + int softmax_axis_; /// sum_multiplier is used to carry out sum using BLAS Blob sum_multiplier_; /// scale is an intermediate Blob to hold temporary results. @@ -448,25 +369,25 @@ class SoftmaxLayer : public Layer { */ template class CuDNNSoftmaxLayer : public SoftmaxLayer { -public: + public: explicit CuDNNSoftmaxLayer(const LayerParameter& param) - : SoftmaxLayer(param), handles_setup_(false) {} + : SoftmaxLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNSoftmaxLayer(); -protected: + protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensor4dDescriptor_t bottom_desc_; - cudnnTensor4dDescriptor_t top_desc_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; }; #endif @@ -476,36 +397,27 @@ class CuDNNSoftmaxLayer : public SoftmaxLayer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class SplitLayer : public Layer { public: explicit SplitLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Split"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int MinTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "Split"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); int count_; }; @@ -516,46 +428,34 @@ class SplitLayer : public Layer { * * TODO(dox): thorough documentation for Forward, Backward, and proto params. */ -template +template class SliceLayer : public Layer { public: explicit SliceLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Slice"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int MinTopBlobs() const { - return 2; - } + virtual inline const char* type() const { return "Slice"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 2; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); - Blob col_bob_; int count_; - int num_; - int channels_; - int height_; - int width_; - int slice_dim_; + int num_slices_; + int slice_size_; + int slice_axis_; vector slice_point_; }; From eb9904b1f53d1d685a7bda1e178a2a199141ece2 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 23:37:38 +0200 Subject: [PATCH 016/254] Conflicts on compability to caffe. --- HEADER | 6 - SOURCE | 2 - include/caffe/blob.hpp | 12 +- include/caffe/filler.hpp | 2 +- include/caffe/loss_layers.hpp | 253 ++++++--------- include/caffe/neuron_layers.hpp | 262 +++++++--------- include/caffe/util/cudnn.hpp | 34 ++- include/caffe/vision_layers.hpp | 2 +- src/caffe/blob.cpp | 4 + src/caffe/layers/accuracy_layer.cpp | 72 +++-- src/caffe/layers/argmax_layer.cpp | 4 +- src/caffe/layers/base_conv_layer.cpp | 171 +++++------ src/caffe/layers/base_data_layer.cpp | 4 +- src/caffe/layers/base_data_layer.cu | 2 +- src/caffe/layers/concat_layer.cpp | 135 ++++---- src/caffe/layers/concat_layer.cu | 71 ++--- src/caffe/layers/contrastive_loss_layer.cpp | 8 +- src/caffe/layers/conv_layer.cpp | 19 +- src/caffe/layers/conv_sk_layer.cpp | 4 +- src/caffe/layers/cudnn_conv_layer.cpp | 12 +- src/caffe/layers/cudnn_conv_layer.cu | 94 ++++-- src/caffe/layers/cudnn_pooling_layer.cpp | 10 +- src/caffe/layers/cudnn_pooling_layer.cu | 12 +- src/caffe/layers/cudnn_relu_layer.cpp | 4 +- src/caffe/layers/cudnn_relu_layer.cu | 16 +- src/caffe/layers/cudnn_sigmoid_layer.cpp | 4 +- src/caffe/layers/cudnn_sigmoid_layer.cu | 16 +- src/caffe/layers/cudnn_softmax_layer.cpp | 12 +- src/caffe/layers/cudnn_softmax_layer.cu | 15 +- src/caffe/layers/cudnn_tanh_layer.cpp | 4 +- src/caffe/layers/cudnn_tanh_layer.cu | 17 +- src/caffe/layers/data_layer.cpp | 67 ++-- src/caffe/layers/dropout_layer.cpp | 2 +- src/caffe/layers/dummy_data_layer.cpp | 62 ++-- src/caffe/layers/eltwise_layer.cpp | 13 +- src/caffe/layers/euclidean_loss_layer.cpp | 8 +- src/caffe/layers/flatten_layer.cpp | 15 +- src/caffe/layers/hdf5_data_layer.cpp | 64 +++- src/caffe/layers/hdf5_data_layer.cu | 19 +- src/caffe/layers/hdf5_output_layer.cpp | 54 ++-- src/caffe/layers/hdf5_output_layer.cu | 17 +- src/caffe/layers/im2col_layer.cpp | 2 + src/caffe/layers/image_data_layer.cpp | 21 +- src/caffe/layers/infogain_loss_layer.cpp | 2 +- src/caffe/layers/inner_product_layer.cpp | 95 +++--- src/caffe/layers/loss_layer.cpp | 3 +- src/caffe/layers/lrn_layer.cpp | 12 +- src/caffe/layers/lrn_layer.cu | 39 +-- src/caffe/layers/memory_data_layer.cpp | 5 +- src/caffe/layers/mvn_layer.cpp | 101 +++--- src/caffe/layers/pooling_layer.cpp | 321 ++++++++++---------- src/caffe/layers/pooling_layer.cu | 283 ++++++++--------- src/caffe/layers/prelu_layer.cpp | 2 +- src/caffe/layers/slice_layer.cpp | 142 ++++----- src/caffe/layers/slice_layer.cu | 70 ++--- src/caffe/layers/softmax_layer.cpp | 62 ++-- src/caffe/layers/softmax_loss_layer.cpp | 43 +-- src/caffe/layers/split_layer.cpp | 3 +- src/caffe/layers/window_data_layer.cpp | 186 ++++++------ 59 files changed, 1492 insertions(+), 1504 deletions(-) delete mode 100644 HEADER delete mode 100644 SOURCE diff --git a/HEADER b/HEADER deleted file mode 100644 index 3a0cb78f1f0..00000000000 --- a/HEADER +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef GREENTEA_CL_KERNELS_HPP_ -#define GREENTEA_CL_KERNELS_HPP_ -#endif -#ifndef GREENTEA_CL_KERNELS_HPP_ -#define GREENTEA_CL_KERNELS_HPP_ -#endif diff --git a/SOURCE b/SOURCE deleted file mode 100644 index d3039a2bb7b..00000000000 --- a/SOURCE +++ /dev/null @@ -1,2 +0,0 @@ -#include "+include/caffe/greentea/cl_kernels.hpp+" -#include "include/caffe/greentea/cl_kernels.hpp" diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 3c274a93b9e..cc116b72f28 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -196,12 +196,20 @@ class Blob { inline Dtype data_at(const int n, const int c, const int h, const int w) const { - return *(cpu_data() + offset(n, c, h, w)); + return cpu_data()[offset(n, c, h, w)]; } inline Dtype diff_at(const int n, const int c, const int h, const int w) const { - return *(cpu_diff() + offset(n, c, h, w)); + return cpu_diff()[offset(n, c, h, w)]; + } + + inline Dtype data_at(const vector& index) const { + return cpu_data()[offset(index)]; + } + + inline Dtype diff_at(const vector& index) const { + return cpu_diff()[offset(index)]; } inline const shared_ptr& data() const { diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index bb18e8e1e28..8471c5be95c 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -82,7 +82,7 @@ class GaussianFiller : public Filler { CHECK_GE(blob->num_axes(), 1); const int num_outputs = blob->shape(0); Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); - rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); + rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int), blob->device_context())); int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); for (int i = 0; i < blob->count(); ++i) { diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 1a80c7f6f05..d3eecd2e510 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -19,7 +19,7 @@ const float kLOG_THRESHOLD = 1e-20; * @brief Computes the classification accuracy for a one-of-many * classification task. */ -template +template class AccuracyLayer : public Layer { public: /** @@ -31,22 +31,15 @@ class AccuracyLayer : public Layer { * correct if the correct label is among the top 5 predicted labels. */ explicit AccuracyLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Accuracy"; - } - virtual inline int ExactNumBottomBlobs() const { - return 2; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "Accuracy"; } + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: /** @@ -74,20 +67,25 @@ class AccuracyLayer : public Layer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); + /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { for (int i = 0; i < propagate_down.size(); ++i) { - if (propagate_down[i]) { - NOT_IMPLEMENTED; - } + if (propagate_down[i]) { NOT_IMPLEMENTED; } } } + int label_axis_, outer_num_, inner_num_; + int top_k_; + + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; }; /** @@ -98,20 +96,17 @@ class AccuracyLayer : public Layer { * LossLayers are typically only capable of backpropagating to their first input * -- the predictions. */ -template +template class LossLayer : public Layer { public: explicit LossLayer(const LayerParameter& param) - : Layer(param) { - } - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); + : Layer(param) {} + virtual void LayerSetUp( + const vector*>& bottom, const vector*>& top); + virtual void Reshape( + const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { - return 2; - } + virtual inline int ExactNumBottomBlobs() const { return 2; } /** * @brief For convenience and backwards compatibility, instruct the Net to @@ -119,12 +114,8 @@ class LossLayer : public Layer { * they output their singleton loss, (even if the user didn't specify * one in the prototxt, etc.). */ - virtual inline bool AutoTopBlobs() const { - return true; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline bool AutoTopBlobs() const { return true; } + virtual inline int ExactNumTopBlobs() const { return 1; } /** * We usually cannot backpropagate to the labels; ignore force_backward for * these inputs. @@ -158,22 +149,16 @@ class LossLayer : public Layer { * d = \left| \left| a_n - b_n \right| \right|_2^2 @f$. * This can be used to train siamese networks. */ -template +template class ContrastiveLossLayer : public LossLayer { public: explicit ContrastiveLossLayer(const LayerParameter& param) - : LossLayer(param), - diff_() { - } + : LossLayer(param), diff_() {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { - return 3; - } - virtual inline const char* type() const { - return "ContrastiveLoss"; - } + virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline const char* type() const { return "ContrastiveLoss"; } /** * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate * to the first two inputs. @@ -185,9 +170,9 @@ class ContrastiveLossLayer : public LossLayer { protected: /// @copydoc ContrastiveLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the Contrastive error gradient w.r.t. the inputs. @@ -215,11 +200,9 @@ class ContrastiveLossLayer : public LossLayer { * propagate_down[1] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); Blob diff_; // cached for backward pass Blob dist_sq_; // cached for backward pass @@ -253,19 +236,15 @@ class ContrastiveLossLayer : public LossLayer { * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve * linear least squares problems! We use it only as an instructive example.) */ -template +template class EuclideanLossLayer : public LossLayer { public: explicit EuclideanLossLayer(const LayerParameter& param) - : LossLayer(param), - diff_() { - } + : LossLayer(param), diff_() {} virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "EuclideanLoss"; - } + virtual inline const char* type() const { return "EuclideanLoss"; } /** * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate * to both inputs -- override to return true and always allow force_backward. @@ -277,9 +256,9 @@ class EuclideanLossLayer : public LossLayer { protected: /// @copydoc EuclideanLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the Euclidean error gradient w.r.t. the inputs. @@ -315,11 +294,9 @@ class EuclideanLossLayer : public LossLayer { * @f$ if propagate_down[1] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); Blob diff_; }; @@ -367,21 +344,18 @@ class EuclideanLossLayer : public LossLayer { * outside the InnerProductLayer and no other losses outside the * HingeLossLayer). */ -template +template class HingeLossLayer : public LossLayer { public: explicit HingeLossLayer(const LayerParameter& param) - : LossLayer(param) { - } + : LossLayer(param) {} - virtual inline const char* type() const { - return "HingeLoss"; - } + virtual inline const char* type() const { return "HingeLoss"; } protected: /// @copydoc HingeLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the hinge loss error gradient w.r.t. the predictions. @@ -411,8 +385,7 @@ class HingeLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -447,39 +420,29 @@ class HingeLossLayer : public LossLayer { * \log(\hat{p}_{n,k}) * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. */ -template +template class InfogainLossLayer : public LossLayer { public: explicit InfogainLossLayer(const LayerParameter& param) - : LossLayer(param), - infogain_() { - } + : LossLayer(param), infogain_() {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should // be the infogain matrix. (Otherwise the infogain matrix is loaded from a // file specified by LayerParameter.) - virtual inline int ExactNumBottomBlobs() const { - return -1; - } - virtual inline int MinBottomBlobs() const { - return 2; - } - virtual inline int MaxBottomBlobs() const { - return 3; - } + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MaxBottomBlobs() const { return 3; } - virtual inline const char* type() const { - return "InfogainLoss"; - } + virtual inline const char* type() const { return "InfogainLoss"; } protected: /// @copydoc InfogainLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the infogain loss error gradient w.r.t. the predictions. @@ -514,8 +477,7 @@ class InfogainLossLayer : public LossLayer { * gradient computation is not implemented. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); Blob infogain_; }; @@ -549,23 +511,20 @@ class InfogainLossLayer : public LossLayer { * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) * @f$ */ -template +template class MultinomialLogisticLossLayer : public LossLayer { public: explicit MultinomialLogisticLossLayer(const LayerParameter& param) - : LossLayer(param) { - } + : LossLayer(param) {} virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "MultinomialLogisticLoss"; - } + virtual inline const char* type() const { return "MultinomialLogisticLoss"; } protected: /// @copydoc MultinomialLogisticLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the multinomial logistic loss error gradient w.r.t. the @@ -596,8 +555,7 @@ class MultinomialLogisticLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -629,29 +587,26 @@ class MultinomialLogisticLossLayer : public LossLayer { * \right] * @f$ */ -template +template class SigmoidCrossEntropyLossLayer : public LossLayer { public: explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) : LossLayer(param), - sigmoid_layer_(new SigmoidLayer(param)), - sigmoid_output_(new Blob()) { - } + sigmoid_layer_(new SigmoidLayer(param)), + sigmoid_output_(new Blob()) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "SigmoidCrossEntropyLoss"; - } + virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } protected: /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the @@ -684,11 +639,9 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); /// The internal SigmoidLayer used to map predictions to probabilities. shared_ptr > sigmoid_layer_; @@ -701,7 +654,7 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { }; // Forward declare SoftmaxLayer for use in SoftmaxWithLossLayer. -template class SoftmaxLayer; +template class SoftmaxLayer; /** * @brief Computes the multinomial logistic loss for a one-of-many @@ -731,44 +684,35 @@ template class SoftmaxLayer; * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) * @f$, for softmax output class probabilites @f$ \hat{p} @f$ */ -template +template class SoftmaxWithLossLayer : public LossLayer { public: - /** - * @param param provides LossParameter loss_param, with options: - * - ignore_label (optional) - * Specify a label value that should be ignored when computing the loss. - * - normalize (optional, default true) - * If true, the loss is normalized by the number of (nonignored) labels - * present; otherwise the loss is simply summed over spatial locations. - */ + /** + * @param param provides LossParameter loss_param, with options: + * - ignore_label (optional) + * Specify a label value that should be ignored when computing the loss. + * - normalize (optional, default true) + * If true, the loss is normalized by the number of (nonignored) labels + * present; otherwise the loss is simply summed over spatial locations. + */ explicit SoftmaxWithLossLayer(const LayerParameter& param) - : LossLayer(param) { - } + : LossLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "SoftmaxWithLoss"; - } - virtual inline int ExactNumTopBlobs() const { - return -1; - } - virtual inline int MinTopBlobs() const { - return 1; - } - virtual inline int MaxTopBlobs() const { - return 2; - } + virtual inline const char* type() const { return "SoftmaxWithLoss"; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } protected: /// @copydoc SoftmaxWithLossLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the softmax loss error gradient w.r.t. the predictions. * @@ -797,11 +741,10 @@ class SoftmaxWithLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); + /// The internal SoftmaxLayer used to map predictions to a distribution. shared_ptr > softmax_layer_; @@ -818,6 +761,8 @@ class SoftmaxWithLossLayer : public LossLayer { /// Whether to normalize the loss by the total number of values present /// (otherwise just by the batch size). bool normalize_; + + int softmax_axis_, outer_num_, inner_num_; }; } // namespace caffe diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 1d3e191aeb8..323215134c7 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -22,21 +22,16 @@ namespace caffe { * each element of the output depends only on the corresponding input * element. */ -template +template class NeuronLayer : public Layer { public: explicit NeuronLayer(const LayerParameter& param) - : Layer(param) { - } + : Layer(param) {} virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } }; /** @@ -49,31 +44,24 @@ class NeuronLayer : public Layer { * -# @f$ (N \times C \times H \times W) @f$ * the computed outputs @f$ y = |x| @f$ */ -template +template class AbsValLayer : public NeuronLayer { public: explicit AbsValLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "AbsVal"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int ExactNumTopBlobs() const { - return 1; - } + virtual inline const char* type() const { return "AbsVal"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: /// @copydoc AbsValLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the absolute value inputs. @@ -93,11 +81,9 @@ class AbsValLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -117,23 +103,20 @@ class AbsValLayer : public NeuronLayer { * \end{array} \right. * @f$ */ -template +template class BNLLLayer : public NeuronLayer { public: explicit BNLLLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} - virtual inline const char* type() const { - return "BNLL"; - } + virtual inline const char* type() const { return "BNLL"; } protected: /// @copydoc BNLLLayer virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the BNLL inputs. @@ -152,11 +135,9 @@ class BNLLLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -170,7 +151,7 @@ class BNLLLayer : public NeuronLayer { * -# @f$ (N \times C \times H \times W) @f$ * the computed outputs @f$ y = |x| @f$ */ -template +template class DropoutLayer : public NeuronLayer { public: /** @@ -180,16 +161,13 @@ class DropoutLayer : public NeuronLayer { * Sets the probability @f$ p @f$ that any given unit is dropped. */ explicit DropoutLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Dropout"; - } + virtual inline const char* type() const { return "Dropout"; } protected: /** @@ -209,15 +187,13 @@ class DropoutLayer : public NeuronLayer { * @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$. */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ Blob rand_vec_; @@ -233,7 +209,7 @@ class DropoutLayer : public NeuronLayer { * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, * and base @f$ \gamma @f$. */ -template +template class ExpLayer : public NeuronLayer { public: /** @@ -245,14 +221,11 @@ class ExpLayer : public NeuronLayer { * the base @f$ \gamma @f$ */ explicit ExpLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Exp"; - } + virtual inline const char* type() const { return "Exp"; } protected: /** @@ -266,9 +239,9 @@ class ExpLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the exp inputs. @@ -288,11 +261,9 @@ class ExpLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); Dtype inner_scale_, outer_scale_; }; @@ -302,7 +273,7 @@ class ExpLayer : public NeuronLayer { * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, * and power @f$ \gamma @f$. */ -template +template class PowerLayer : public NeuronLayer { public: /** @@ -313,14 +284,11 @@ class PowerLayer : public NeuronLayer { * - power (\b optional, default 1) the power @f$ \gamma @f$ */ explicit PowerLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Power"; - } + virtual inline const char* type() const { return "Power"; } protected: /** @@ -334,9 +302,9 @@ class PowerLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the power inputs. @@ -359,11 +327,9 @@ class PowerLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); /// @brief @f$ \gamma @f$ from layer_param_.power_param() Dtype power_; @@ -379,7 +345,7 @@ class PowerLayer : public NeuronLayer { * @brief Rectified Linear Unit non-linearity @f$ y = \max(0, x) @f$. * The simple max is fast to compute, and the function does not saturate. */ -template +template class ReLULayer : public NeuronLayer { public: /** @@ -389,12 +355,9 @@ class ReLULayer : public NeuronLayer { * the value @f$ \nu @f$ by which negative values are multiplied. */ explicit ReLULayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} - virtual inline const char* type() const { - return "ReLU"; - } + virtual inline const char* type() const { return "ReLU"; } protected: /** @@ -409,9 +372,9 @@ class ReLULayer : public NeuronLayer { * the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$. */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the ReLU inputs. @@ -442,11 +405,9 @@ class ReLULayer : public NeuronLayer { * @f$. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; #ifdef USE_CUDNN @@ -455,25 +416,25 @@ class ReLULayer : public NeuronLayer { */ template class CuDNNReLULayer : public ReLULayer { -public: + public: explicit CuDNNReLULayer(const LayerParameter& param) - : ReLULayer(param), handles_setup_(false) {} + : ReLULayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNReLULayer(); -protected: + protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensor4dDescriptor_t bottom_desc_; - cudnnTensor4dDescriptor_t top_desc_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; }; #endif @@ -485,16 +446,13 @@ class CuDNNReLULayer : public ReLULayer { * Note that the gradient vanishes as the values move away from 0. * The ReLULayer is often a better choice for this reason. */ -template +template class SigmoidLayer : public NeuronLayer { public: explicit SigmoidLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} - virtual inline const char* type() const { - return "Sigmoid"; - } + virtual inline const char* type() const { return "Sigmoid"; } protected: /** @@ -508,9 +466,9 @@ class SigmoidLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the sigmoid inputs. @@ -530,11 +488,9 @@ class SigmoidLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; #ifdef USE_CUDNN @@ -543,25 +499,25 @@ class SigmoidLayer : public NeuronLayer { */ template class CuDNNSigmoidLayer : public SigmoidLayer { -public: + public: explicit CuDNNSigmoidLayer(const LayerParameter& param) - : SigmoidLayer(param), handles_setup_(false) {} + : SigmoidLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNSigmoidLayer(); -protected: + protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensor4dDescriptor_t bottom_desc_; - cudnnTensor4dDescriptor_t top_desc_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; }; #endif @@ -573,16 +529,13 @@ class CuDNNSigmoidLayer : public SigmoidLayer { * Note that the gradient vanishes as the values move away from 0. * The ReLULayer is often a better choice for this reason. */ -template +template class TanHLayer : public NeuronLayer { public: explicit TanHLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} - virtual inline const char* type() const { - return "TanH"; - } + virtual inline const char* type() const { return "TanH"; } protected: /** @@ -596,9 +549,9 @@ class TanHLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the sigmoid inputs. @@ -620,11 +573,9 @@ class TanHLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); }; #ifdef USE_CUDNN @@ -633,25 +584,25 @@ class TanHLayer : public NeuronLayer { */ template class CuDNNTanHLayer : public TanHLayer { -public: + public: explicit CuDNNTanHLayer(const LayerParameter& param) - : TanHLayer(param), handles_setup_(false) {} + : TanHLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, const vector*>& top); virtual ~CuDNNTanHLayer(); -protected: + protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensor4dDescriptor_t bottom_desc_; - cudnnTensor4dDescriptor_t top_desc_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_; + cudnnTensorDescriptor_t top_desc_; }; #endif @@ -659,7 +610,7 @@ class CuDNNTanHLayer : public TanHLayer { * @brief Tests whether the input exceeds a threshold: outputs 1 for inputs * above threshold; 0 otherwise. */ -template +template class ThresholdLayer : public NeuronLayer { public: /** @@ -669,14 +620,11 @@ class ThresholdLayer : public NeuronLayer { * the threshold value @f$ t @f$ to which the input values are compared. */ explicit ThresholdLayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "Threshold"; - } + virtual inline const char* type() const { return "Threshold"; } protected: /** @@ -694,13 +642,12 @@ class ThresholdLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /// @brief Not implemented (non-differentiable function) virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { NOT_IMPLEMENTED; } @@ -715,7 +662,7 @@ class ThresholdLayer : public NeuronLayer { * channels. The number of axes of input blob should be greater than or * equal to 2. The 1st axis (0-based) is seen as channels. */ -template +template class PReLULayer : public NeuronLayer { public: /** @@ -727,18 +674,15 @@ class PReLULayer : public NeuronLayer { * negative slopes are shared across channels. */ explicit PReLULayer(const LayerParameter& param) - : NeuronLayer(param) { - } + : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { - return "PReLU"; - } + virtual inline const char* type() const { return "PReLU"; } protected: /** @@ -752,9 +696,9 @@ class PReLULayer : public NeuronLayer { * @f$. */ virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the PReLU inputs. @@ -785,11 +729,9 @@ class PReLULayer : public NeuronLayer { * @f$. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); + const vector& propagate_down, const vector*>& bottom); bool channel_shared_; Blob multiplier_; // dot multipler for backward computation of params diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index eaed7333df8..b531dd5fa7a 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -50,41 +50,45 @@ template class dataType; template<> class dataType { public: static const cudnnDataType_t type = CUDNN_DATA_FLOAT; + static float oneval, zeroval; + static const void *one, *zero; }; template<> class dataType { public: static const cudnnDataType_t type = CUDNN_DATA_DOUBLE; + static double oneval, zeroval; + static const void *one, *zero; }; template -inline void createTensor4dDesc(cudnnTensor4dDescriptor_t* desc) { - CUDNN_CHECK(cudnnCreateTensor4dDescriptor(desc)); +inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) { + CUDNN_CHECK(cudnnCreateTensorDescriptor(desc)); } template -inline void setTensor4dDesc(cudnnTensor4dDescriptor_t* desc, +inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, int n, int c, int h, int w, int stride_n, int stride_c, int stride_h, int stride_w) { CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(*desc, dataType::type, - n, c, h, w, stride_n, stride_c, stride_h, stride_w)); + n, c, h, w, stride_n, stride_c, stride_h, stride_w)); } template -inline void setTensor4dDesc(cudnnTensor4dDescriptor_t* desc, +inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, int n, int c, int h, int w) { const int stride_w = 1; const int stride_h = w * stride_w; const int stride_c = h * stride_h; const int stride_n = c * stride_c; setTensor4dDesc(desc, n, c, h, w, - stride_n, stride_c, stride_h, stride_w); + stride_n, stride_c, stride_h, stride_w); } template inline void createFilterDesc(cudnnFilterDescriptor_t* desc, int n, int c, int h, int w) { CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); - CUDNN_CHECK(cudnnSetFilterDescriptor(*desc, dataType::type, + CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType::type, n, c, h, w)); } @@ -95,29 +99,29 @@ inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) { template inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, - cudnnTensor4dDescriptor_t bottom, cudnnFilterDescriptor_t filter, + cudnnTensorDescriptor_t bottom, cudnnFilterDescriptor_t filter, int pad_h, int pad_w, int stride_h, int stride_w) { - CUDNN_CHECK(cudnnSetConvolutionDescriptor(*conv, bottom, filter, + CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION)); } template -inline void createPoolingDesc(cudnnPoolingDescriptor_t* conv, +inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, PoolingParameter_PoolMethod poolmethod, cudnnPoolingMode_t* mode, - int h, int w, int stride_h, int stride_w) { + int h, int w, int pad_h, int pad_w, int stride_h, int stride_w) { switch (poolmethod) { case PoolingParameter_PoolMethod_MAX: *mode = CUDNN_POOLING_MAX; break; case PoolingParameter_PoolMethod_AVE: - *mode = CUDNN_POOLING_AVERAGE; + *mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; break; default: LOG(FATAL) << "Unknown pooling method."; } - CUDNN_CHECK(cudnnCreatePoolingDescriptor(conv)); - CUDNN_CHECK(cudnnSetPoolingDescriptor(*conv, *mode, h, w, - stride_h, stride_w)); + CUDNN_CHECK(cudnnCreatePoolingDescriptor(pool_desc)); + CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, h, w, + pad_h, pad_w, stride_h, stride_w)); } } // namespace cudnn diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 3899e6a7d92..aed1d6fdd16 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -655,7 +655,7 @@ class CuDNNPoolingLayer : public PoolingLayer { bool handles_setup_; cudnnHandle_t handle_; - cudnnTensor4dDescriptor_t bottom_desc_, top_desc_; + cudnnTensorDescriptor_t bottom_desc_, top_desc_; cudnnPoolingDescriptor_t pooling_desc_; cudnnPoolingMode_t mode_; }; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 316f3c3a55f..d033f80ba85 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -1,3 +1,7 @@ +#include +#include + + #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index db65383af81..90aad675ed3 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -14,19 +14,30 @@ template void AccuracyLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { top_k_ = this->layer_param_.accuracy_param().top_k(); + + has_ignore_label_ = + this->layer_param_.accuracy_param().has_ignore_label(); + if (has_ignore_label_) { + ignore_label_ = this->layer_param_.accuracy_param().ignore_label(); + } } template void AccuracyLayer::Reshape( const vector*>& bottom, const vector*>& top) { - CHECK_EQ(bottom[0]->num(), bottom[1]->num()) - << "The data and label should have the same number."; - CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num()) + CHECK_LE(top_k_, bottom[0]->count() / bottom[1]->count()) << "top_k must be less than or equal to the number of classes."; - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - top[0]->Reshape(1, 1, 1, 1, this->device_context_); + label_axis_ = + bottom[0]->CanonicalAxisIndex(this->layer_param_.accuracy_param().axis()); + outer_num_ = bottom[0]->count(0, label_axis_); + inner_num_ = bottom[0]->count(label_axis_ + 1); + CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) + << "Number of labels must match number of predictions; " + << "e.g., if label axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; + vector top_shape(0); // Accuracy is a scalar; 0 axes. + top[0]->Reshape(top_shape); } template @@ -35,31 +46,42 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, Dtype accuracy = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); - int num = bottom[0]->num(); - int dim = bottom[0]->count() / bottom[0]->num(); + const int dim = bottom[0]->count() / outer_num_; + const int num_labels = bottom[0]->shape(label_axis_); vector maxval(top_k_+1); vector max_id(top_k_+1); - for (int i = 0; i < num; ++i) { - // Top-k accuracy - std::vector > bottom_data_vector; - for (int j = 0; j < dim; ++j) { - bottom_data_vector.push_back( - std::make_pair(bottom_data[i * dim + j], j)); - } - std::partial_sort( - bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, - bottom_data_vector.end(), std::greater >()); - // check if true label is in top k predictions - for (int k = 0; k < top_k_; k++) { - if (bottom_data_vector[k].second == static_cast(bottom_label[i])) { - ++accuracy; - break; + int count = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = + static_cast(bottom_label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + continue; + } + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, num_labels); + // Top-k accuracy + std::vector > bottom_data_vector; + for (int k = 0; k < num_labels; ++k) { + bottom_data_vector.push_back(std::make_pair( + bottom_data[i * dim + k * inner_num_ + j], k)); + } + std::partial_sort( + bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, + bottom_data_vector.end(), std::greater >()); + // check if true label is in top k predictions + for (int k = 0; k < top_k_; k++) { + if (bottom_data_vector[k].second == label_value) { + ++accuracy; + break; + } } + ++count; } } // LOG(INFO) << "Accuracy: " << accuracy; - top[0]->mutable_cpu_data()[0] = accuracy / num; + top[0]->mutable_cpu_data()[0] = accuracy / count; // Accuracy layer should not be used as a loss function. } diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index 79cec2e09d6..c4040cdcaaa 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -23,10 +23,10 @@ void ArgMaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { if (out_max_val_) { // Produces max_ind and max_val - top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1, this->device_context_); + top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); } else { // Produces only max_ind - top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1, this->device_context_); + top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); } } diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 421949cddc8..40f2477c69a 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -8,10 +8,11 @@ namespace caffe { -template +template void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK(!conv_param.has_kernel_size() != @@ -21,11 +22,11 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, (conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; CHECK((!conv_param.has_pad() && conv_param.has_pad_h() - && conv_param.has_pad_w()) + && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK((!conv_param.has_stride() && conv_param.has_stride_h() - && conv_param.has_stride_w()) + && conv_param.has_stride_w()) || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; if (conv_param.has_kernel_size()) { @@ -34,8 +35,8 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); } - CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { pad_h_ = pad_w_ = conv_param.pad(); } else { @@ -50,15 +51,16 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, } // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. - is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1 && stride_h_ == 1 && stride_w_ == 1 - && pad_h_ == 0 && pad_w_ == 0; + is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1 + && stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0; // Configure output channels and groups. channels_ = bottom[0]->channels(); num_output_ = this->layer_param_.convolution_param().num_output(); CHECK_GT(num_output_, 0); group_ = this->layer_param_.convolution_param().group(); CHECK_EQ(channels_ % group_, 0); - CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; + CHECK_EQ(num_output_ % group_, 0) + << "Number of output should be multiples of group."; if (reverse_dimensions()) { conv_out_channels_ = channels_; conv_in_channels_ = num_output_; @@ -71,7 +73,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // - blobs_[1] holds the biases (optional) bias_term_ = this->layer_param_.convolution_param().bias_term(); if (this->blobs_.size() > 0) { - LOG(INFO)<< "Skipping parameter initialization"; + LOG(INFO) << "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); @@ -81,40 +83,42 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Initialize and fill the weights: // output channels x input channels per-group x kernel height x kernel width this->blobs_[0].reset(new Blob( - conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_, this->device_context_)); + conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_,this->device_context_)); shared_ptr > weight_filler(GetFiller( - this->layer_param_.convolution_param().weight_filler())); - weight_filler->Fill(this->blobs_[0].get(), this->device_context_); - // If necessary, initialize and fill the biases: - // 1 x 1 x 1 x output channels + this->layer_param_.convolution_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); + // If necessary, initialize and fill the biases. if (bias_term_) { - this->blobs_[1].reset(new Blob(1, 1, 1, num_output_, this->device_context_)); + vector bias_shape(1, num_output_); + this->blobs_[1].reset(new Blob(bias_shape,this->device_context_)); shared_ptr > bias_filler(GetFiller( - this->layer_param_.convolution_param().bias_filler())); - bias_filler->Fill(this->blobs_[1].get(), this->device_context_); + this->layer_param_.convolution_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); } } // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); } -template +template void BaseConvolutionLayer::Reshape(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; num_ = bottom[0]->num(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); - CHECK_EQ(bottom[0]->channels(), channels_)<< "Input size incompatible with" - " convolution kernel."; + CHECK_EQ(bottom[0]->channels(), channels_) << "Input size incompatible with" + " convolution kernel."; // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { - CHECK_EQ(num_, bottom[bottom_id]->num())<< "Inputs must have same num."; + CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num."; CHECK_EQ(channels_, bottom[bottom_id]->channels()) - << "Inputs must have same channels."; + << "Inputs must have same channels."; CHECK_EQ(height_, bottom[bottom_id]->height()) - << "Inputs must have same height."; + << "Inputs must have same height."; CHECK_EQ(width_, bottom[bottom_id]->width()) - << "Inputs must have same width."; + << "Inputs must have same width."; } // Shape the tops. compute_output_shape(); @@ -144,17 +148,16 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, } // Set up the all ones "bias multiplier" for adding biases by BLAS if (bias_term_) { - bias_multiplier_.Reshape(1, 1, 1, height_out_ * width_out_, this->device_context_); + vector bias_multiplier_shape(1, height_out_ * width_out_); + bias_multiplier_.Reshape(bias_multiplier_shape, this->device_context_); caffe_set(bias_multiplier_.count(), Dtype(1), - bias_multiplier_.mutable_cpu_data()); + bias_multiplier_.mutable_cpu_data()); } } -template +template void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, - const Dtype* weights, - Dtype* output, - bool skip_im2col) { + const Dtype* weights, Dtype* output, bool skip_im2col) { const Dtype* col_buff = input; if (!is_1x1_) { if (!skip_im2col) { @@ -163,47 +166,42 @@ void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, col_buff = col_buffer_.cpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, - conv_out_channels_ / group_, conv_out_spatial_dim_, - kernel_dim_ / group_, (Dtype) 1., - weights + weight_offset_ * g, - col_buff + col_offset_ * g, (Dtype) 0., - output + output_offset_ * g); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / + group_, conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, + (Dtype)0., output + output_offset_ * g); } } -template +template void BaseConvolutionLayer::forward_cpu_bias(Dtype* output, - const Dtype* bias) { + const Dtype* bias) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - height_out_ * width_out_, 1, (Dtype) 1., bias, - bias_multiplier_.cpu_data(), (Dtype) 1., output); + height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(), + (Dtype)1., output); } -template +template void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, - const Dtype* weights, - Dtype* input) { + const Dtype* weights, Dtype* input) { Dtype* col_buff = col_buffer_.mutable_cpu_data(); if (is_1x1_) { col_buff = input; } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype) 1., weights + weight_offset_ * g, - output + output_offset_ * g, (Dtype) 0., - col_buff + col_offset_ * g); + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, + (Dtype)0., col_buff + col_offset_ * g); } if (!is_1x1_) { conv_col2im_cpu(col_buff, input); } } -template +template void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, - const Dtype* output, - Dtype* weights) { + const Dtype* output, Dtype* weights) { const Dtype* col_buff = input; if (!is_1x1_) { conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); @@ -211,27 +209,24 @@ void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype) 1., output + output_offset_ * g, - col_buff + col_offset_ * g, (Dtype) 1., - weights + weight_offset_ * g); + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, + (Dtype)1., weights + weight_offset_ * g); } } -template +template void BaseConvolutionLayer::backward_cpu_bias(Dtype* bias, - const Dtype* input) { + const Dtype* input) { caffe_cpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., - input, bias_multiplier_.cpu_data(), 1., bias); + input, bias_multiplier_.cpu_data(), 1., bias); } #ifndef CPU_ONLY -template +template void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, - const Dtype* weights, - Dtype* output, - bool skip_im2col) { + const Dtype* weights, Dtype* output, bool skip_im2col) { const Dtype* col_buff = input; if (!is_1x1_) { if (!skip_im2col) { @@ -240,47 +235,42 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, col_buff = col_buffer_.gpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, - conv_out_channels_ / group_, conv_out_spatial_dim_, - kernel_dim_ / group_, (Dtype) 1., - weights + weight_offset_ * g, - col_buff + col_offset_ * g, (Dtype) 0., - output + output_offset_ * g); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / + group_, conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, + (Dtype)0., output + output_offset_ * g); } } -template +template void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, - const Dtype* bias) { + const Dtype* bias) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - height_out_ * width_out_, 1, (Dtype) 1., bias, - bias_multiplier_.gpu_data(), (Dtype) 1., output); + height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), + (Dtype)1., output); } -template +template void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, - const Dtype* weights, - Dtype* input) { + const Dtype* weights, Dtype* input) { Dtype* col_buff = col_buffer_.mutable_gpu_data(); if (is_1x1_) { col_buff = input; } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype) 1., weights + weight_offset_ * g, - output + output_offset_ * g, (Dtype) 0., - col_buff + col_offset_ * g); + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, + (Dtype)0., col_buff + col_offset_ * g); } if (!is_1x1_) { conv_col2im_gpu(col_buff, input); } } -template +template void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, - const Dtype* output, - Dtype* weights) { + const Dtype* output, Dtype* weights) { const Dtype* col_buff = input; if (!is_1x1_) { conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); @@ -288,18 +278,17 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype) 1., output + output_offset_ * g, - col_buff + col_offset_ * g, (Dtype) 1., - weights + weight_offset_ * g); + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, + (Dtype)1., weights + weight_offset_ * g); } } -template +template void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, - const Dtype* input) { + const Dtype* input) { caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., - input, bias_multiplier_.gpu_data(), 1., bias); + input, bias_multiplier_.gpu_data(), 1., bias); } #endif // !CPU_ONLY diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index e9f18097923..352200915d7 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -24,7 +24,7 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, // The subclasses should setup the size of bottom and top DataLayerSetUp(bottom, top); data_transformer_.reset( - new DataTransformer(transform_param_, this->phase_, this->device_context_)); + new DataTransformer(transform_param_, this->phase_)); data_transformer_->InitRand(); } @@ -64,7 +64,7 @@ void BasePrefetchingDataLayer::Forward_cpu( DLOG(INFO) << "Thread joined"; // Reshape to loaded data. top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width(), this->device_context_); + this->prefetch_data_.height(), this->prefetch_data_.width()); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 5cc956c147d..775f6c47f7e 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -11,7 +11,7 @@ void BasePrefetchingDataLayer::Forward_gpu( JoinPrefetchThread(); // Reshape to loaded data. top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width(), this->device_context_); + this->prefetch_data_.height(), this->prefetch_data_.width()); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 90630f3b9d3..15a67016413 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -6,99 +6,86 @@ namespace caffe { -template +template void ConcatLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - concat_dim_ = this->layer_param_.concat_param().concat_dim(); - CHECK_GE(concat_dim_, 0)<< - "concat_dim should be >= 0"; - CHECK_LE(concat_dim_, 1)<< - "For now concat_dim <=1, it can only concat num and channels"; + const vector*>& top) { + const ConcatParameter& concat_param = this->layer_param_.concat_param(); + CHECK(!(concat_param.has_axis() && concat_param.has_concat_dim())) + << "Either axis or concat_dim should be specified; not both."; } -template +template void ConcatLayer::Reshape(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { + const int num_axes = bottom[0]->num_axes(); + const ConcatParameter& concat_param = this->layer_param_.concat_param(); + if (concat_param.has_concat_dim()) { + concat_axis_ = static_cast(concat_param.concat_dim()); + // Don't allow negative indexing for concat_dim, a uint32 -- almost + // certainly unintended. + CHECK_GE(concat_axis_, 0) << "casting concat_dim from uint32 to int32 " + << "produced negative result; concat_dim must satisfy " + << "0 <= concat_dim < " << kMaxBlobAxes; + CHECK_LT(concat_axis_, num_axes) << "concat_dim out of range."; + } else { + concat_axis_ = bottom[0]->CanonicalAxisIndex(concat_param.axis()); + } // Initialize with the first blob. - count_ = bottom[0]->count(); - num_ = bottom[0]->num(); - channels_ = bottom[0]->channels(); - height_ = bottom[0]->height(); - width_ = bottom[0]->width(); + vector top_shape = bottom[0]->shape(); + num_concats_ = bottom[0]->count(0, concat_axis_); + concat_input_size_ = bottom[0]->count(concat_axis_ + 1); + int bottom_count_sum = bottom[0]->count(); for (int i = 1; i < bottom.size(); ++i) { - count_ += bottom[i]->count(); - if (concat_dim_ == 0) { - num_ += bottom[i]->num(); - } else if (concat_dim_ == 1) { - channels_ += bottom[i]->channels(); - } else if (concat_dim_ == 2) { - height_ += bottom[i]->height(); - } else if (concat_dim_ == 3) { - width_ += bottom[i]->width(); + CHECK_EQ(num_axes, bottom[i]->num_axes()) + << "All inputs must have the same #axes."; + for (int j = 0; j < num_axes; ++j) { + if (j == concat_axis_) { continue; } + CHECK_EQ(top_shape[j], bottom[i]->shape(j)) + << "All inputs must have the same shape, except at concat_axis."; } + bottom_count_sum += bottom[i]->count(); + top_shape[concat_axis_] += bottom[i]->shape(concat_axis_); } - top[0]->Reshape(num_, channels_, height_, width_, this->device_context_); - CHECK_EQ(count_, top[0]->count()); + top[0]->Reshape(top_shape,this->device_context_); + CHECK_EQ(bottom_count_sum, top[0]->count()); } -template +template void ConcatLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { Dtype* top_data = top[0]->mutable_cpu_data(); - if (concat_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->cpu_data(); - int num_elem = bottom[i]->count(); - caffe_copy(num_elem, bottom_data, top_data + top[0]->offset(offset_num)); - offset_num += bottom[i]->num(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, + bottom_data + n * bottom_concat_axis * concat_input_size_, + top_data + (n * top_concat_axis + offset_concat_axis) + * concat_input_size_); } - } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->cpu_data(); - int num_elem = bottom[i]->channels() * bottom[i]->height() - * bottom[i]->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, bottom_data + bottom[i]->offset(n), - top_data + top[0]->offset(n, offset_channel)); - } - offset_channel += bottom[i]->channels(); - } // concat_dim_ is guaranteed to be 0 or 1 by LayerSetUp. + offset_concat_axis += bottom_concat_axis; } } -template +template void ConcatLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); - if (concat_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < bottom.size(); ++i) { - Blob* blob = bottom[i]; - if (propagate_down[i]) { - Dtype* bottom_diff = blob->mutable_cpu_diff(); - caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num), - bottom_diff); - } - offset_num += blob->num(); - } - } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < bottom.size(); ++i) { - Blob* blob = bottom[i]; - if (propagate_down[i]) { - Dtype* bottom_diff = blob->mutable_cpu_diff(); - int num_elem = blob->channels() * blob->height() * blob->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, top_diff + top[0]->offset(n, offset_channel), - bottom_diff + blob->offset(n)); - } - } - offset_channel += blob->channels(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + for (int i = 0; i < bottom.size(); ++i) { + if (!propagate_down[i]) { continue; } + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + bottom_diff + n * bottom_concat_axis * concat_input_size_); } - } // concat_dim_ is guaranteed to be 0 or 1 by LayerSetUp. + offset_concat_axis += bottom_concat_axis; + } } #ifdef CPU_ONLY diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 88fc090025f..dbadb5aeb30 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -10,29 +10,18 @@ template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); - if (concat_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - caffe_copy(bottom[i]->count(), bottom_data, - top_data + top[0]->offset(offset_num)); - offset_num += bottom[i]->num(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, + bottom_data + n * bottom_concat_axis * concat_input_size_, + top_data + (n * top_concat_axis + offset_concat_axis) + * concat_input_size_); } - } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - int num_elem = - bottom[i]->channels() * bottom[i]->height() * bottom[i]->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, bottom_data+bottom[i]->offset(n), - top_data + top[0]->offset(n, offset_channel)); - } - offset_channel += bottom[i]->channels(); - } - } else { - LOG(FATAL) << "concat_dim along dim" << concat_dim_ << - " not implemented yet"; + offset_concat_axis += bottom_concat_axis; } } @@ -40,34 +29,18 @@ template void ConcatLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); - if (concat_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < bottom.size(); ++i) { - Blob* blob = bottom[i]; - if (propagate_down[i]) { - Dtype* bottom_diff = blob->mutable_gpu_diff(); - caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num), - bottom_diff); - } - offset_num += blob->num(); - } - } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < bottom.size(); ++i) { - Blob* blob = bottom[i]; - if (propagate_down[i]) { - Dtype* bottom_diff = blob->mutable_gpu_diff(); - int num_elem = blob->channels()*blob->height()*blob->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, top_diff + top[0]->offset(n, offset_channel), - bottom_diff + blob->offset(n)); - } - } - offset_channel += blob->channels(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + for (int i = 0; i < bottom.size(); ++i) { + if (!propagate_down[i]) { continue; } + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + bottom_diff + n * bottom_concat_axis * concat_input_size_); } - } else { - LOG(FATAL) << "concat_dim along dim" << concat_dim_ << - " not implemented yet"; + offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 9bb02688d01..0692c11c257 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -20,11 +20,11 @@ void ContrastiveLossLayer::LayerSetUp( CHECK_EQ(bottom[2]->channels(), 1); CHECK_EQ(bottom[2]->height(), 1); CHECK_EQ(bottom[2]->width(), 1); - diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, this->device_context_); - diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, this->device_context_); - dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1, this->device_context_); + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1, this->device_context_); + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); for (int i = 0; i < bottom[0]->channels(); ++i) summer_vec_.mutable_cpu_data()[i] = Dtype(1); } diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 0d41c47424b..c0c9f6f3371 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -8,7 +8,7 @@ namespace caffe { -template +template void ConvolutionLayer::compute_output_shape() { this->height_out_ = (this->height_ + 2 * this->pad_h_ - this->kernel_h_) / this->stride_h_ + 1; @@ -16,16 +16,16 @@ void ConvolutionLayer::compute_output_shape() { / this->stride_w_ + 1; } -template +template void ConvolutionLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* weight = this->blobs_[0]->cpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_cpu_gemm(bottom_data + bottom[i]->offset(n), weight, - top_data + top[i]->offset(n)); + top_data + top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->cpu_data(); this->forward_cpu_bias(top_data + top[i]->offset(n), bias); @@ -34,10 +34,9 @@ void ConvolutionLayer::Forward_cpu(const vector*>& bottom, } } -template +template void ConvolutionLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); if (this->param_propagate_down_[0]) { @@ -45,7 +44,7 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, } if (this->bias_term_ && this->param_propagate_down_[1]) { caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); + this->blobs_[1]->mutable_cpu_diff()); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); @@ -63,12 +62,12 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_cpu_gemm(bottom_data + bottom[i]->offset(n), - top_diff + top[i]->offset(n), weight_diff); + top_diff + top[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_cpu_gemm(top_diff + top[i]->offset(n), weight, - bottom_diff + bottom[i]->offset(n)); + bottom_diff + bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index 49861440e59..b6b402a6dc7 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -113,7 +113,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, shared_ptr > weight_filler( GetFiller( this->layer_param_.convolution_param().weight_filler())); - weight_filler->Fill(this->blobs_[0].get(), this->device_context_); + weight_filler->Fill(this->blobs_[0].get()); // If necessary, initialize and fill the bias term if (bias_term_) { this->blobs_[1].reset( @@ -121,7 +121,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, shared_ptr > bias_filler( GetFiller( this->layer_param_.convolution_param().bias_filler())); - bias_filler->Fill(this->blobs_[1].get(), this->device_context_); + bias_filler->Fill(this->blobs_[1].get()); } } // Set up the all ones "bias multiplier" for adding bias using blas diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 4a69ca20d0a..104d2b9d669 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -24,6 +24,8 @@ void CuDNNConvolutionLayer::LayerSetUp( // Initialize CUDA streams and cuDNN. stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; handle_ = new cudnnHandle_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; + workspaceSizeInBytes = 0; + workspace = NULL; for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { CUDA_CHECK(cudaStreamCreate(&stream_[g])); @@ -43,10 +45,10 @@ void CuDNNConvolutionLayer::LayerSetUp( // Create tensor descriptor(s) for data and corresponding convolution(s). for (int i = 0; i < bottom.size(); i++) { - cudnnTensor4dDescriptor_t bottom_desc; + cudnnTensorDescriptor_t bottom_desc; cudnn::createTensor4dDesc(&bottom_desc); bottom_descs_.push_back(bottom_desc); - cudnnTensor4dDescriptor_t top_desc; + cudnnTensorDescriptor_t top_desc; cudnn::createTensor4dDesc(&top_desc); top_descs_.push_back(top_desc); cudnnConvolutionDescriptor_t conv_desc; @@ -104,12 +106,12 @@ CuDNNConvolutionLayer::~CuDNNConvolutionLayer() { if (!handles_setup_) { return; } for (int i = 0; i < bottom_descs_.size(); i++) { - cudnnDestroyTensor4dDescriptor(bottom_descs_[i]); - cudnnDestroyTensor4dDescriptor(top_descs_[i]); + cudnnDestroyTensorDescriptor(bottom_descs_[i]); + cudnnDestroyTensorDescriptor(top_descs_[i]); cudnnDestroyConvolutionDescriptor(conv_descs_[i]); } if (this->bias_term_) { - cudnnDestroyTensor4dDescriptor(bias_desc_); + cudnnDestroyTensorDescriptor(bias_desc_); } cudnnDestroyFilterDescriptor(filter_desc_); diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 071014e1b48..4a1a4c4f4f2 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -19,23 +19,70 @@ void CuDNNConvolutionLayer::Forward_gpu( Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); + size_t workspace_limit_bytes = this->kernel_h_ * + this->kernel_w_ * + this->channels_ * + sizeof(int) + 1; + // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { + cudnnConvolutionFwdAlgo_t algo; + + // pick the convolution algorithm + // TODO(shelhamer) this should be done during reshape + // TODO(shelhamer) the choice of automatic or manual algorithm picking + // should be exposed in proto + CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], + bottom_descs_[i], + filter_desc_, + conv_descs_[i], + top_descs_[i], + CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, + workspace_limit_bytes, // memoryLimitInBytes, + &algo)); + + // get minimum size of the workspace needed for the desired algorithm + size_t workspaceSizeInBytes_temp = 0; + + CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], + bottom_descs_[i], + filter_desc_, + conv_descs_[i], + top_descs_[i], + algo, + &workspaceSizeInBytes_temp)); + + if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { + workspaceSizeInBytes = workspaceSizeInBytes_temp; + // free the existing workspace and allocate a new (larger) one + cudaFree(this->workspace); + cudaError_t err = cudaMalloc(&(this->workspace), workspaceSizeInBytes); + if (err != cudaSuccess) { + // force zero memory path + algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; + workspace = NULL; + workspaceSizeInBytes = 0; + } + } + // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], - bottom_descs_[i], bottom_data + bottom_offset_ * g, - filter_desc_, weight + weight_offset_ * g, - conv_descs_[i], - top_descs_[i], top_data + top_offset_ * g, - CUDNN_RESULT_NO_ACCUMULATE)); + cudnn::dataType::one, + bottom_descs_[i], bottom_data + bottom_offset_ * g, + filter_desc_, weight + weight_offset_ * g, + conv_descs_[i], + algo, workspace, workspaceSizeInBytes, + cudnn::dataType::zero, + top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); - Dtype alpha = 1.; - CUDNN_CHECK(cudnnAddTensor4d(handle_[g], CUDNN_ADD_SAME_C, &alpha, - bias_desc_, bias_data + bias_offset_ * g, - top_descs_[i], top_data + top_offset_ * g)); + CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, + cudnn::dataType::one, + bias_desc_, bias_data + bias_offset_ * g, + cudnn::dataType::one, + top_descs_[i], top_data + top_offset_ * g)); } } @@ -68,20 +115,22 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], - top_descs_[i], top_diff + top_offset_ * g, - bias_desc_, bias_diff + bias_offset_ * g, - CUDNN_RESULT_ACCUMULATE)); + cudnn::dataType::one, + top_descs_[i], top_diff + top_offset_ * g, + cudnn::dataType::one, + bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], - bottom_descs_[i], bottom_data + bottom_offset_ * g, - top_descs_[i], top_diff + top_offset_ * g, - conv_descs_[i], - filter_desc_, weight_diff + weight_offset_ * g, - CUDNN_RESULT_ACCUMULATE)); + cudnn::dataType::one, + bottom_descs_[i], bottom_data + bottom_offset_ * g, + top_descs_[i], top_diff + top_offset_ * g, + conv_descs_[i], + cudnn::dataType::one, + filter_desc_, weight_diff + weight_offset_ * g)); } // Gradient w.r.t. bottom data. @@ -91,11 +140,12 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], - filter_desc_, weight + weight_offset_ * g, - top_descs_[i], top_diff + top_offset_ * g, - conv_descs_[i], - bottom_descs_[i], bottom_diff + bottom_offset_ * g, - CUDNN_RESULT_NO_ACCUMULATE)); + cudnn::dataType::one, + filter_desc_, weight + weight_offset_ * g, + top_descs_[i], top_diff + top_offset_ * g, + conv_descs_[i], + cudnn::dataType::zero, + bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp index dd90195637b..c92c4e477b5 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cpp +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -13,15 +13,13 @@ template void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { PoolingLayer::LayerSetUp(bottom, top); - // Sanity check: CUDNN currently only supports pad == 0. - CHECK_EQ(this->pad_h_, 0); - CHECK_EQ(this->pad_w_, 0); CUDNN_CHECK(cudnnCreate(&handle_)); cudnn::createTensor4dDesc(&bottom_desc_); cudnn::createTensor4dDesc(&top_desc_); cudnn::createPoolingDesc(&pooling_desc_, this->layer_param_.pooling_param().pool(), &mode_, - this->kernel_h_, this->kernel_w_, this->stride_h_, this->stride_w_); + this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_, + this->stride_h_, this->stride_w_); handles_setup_ = true; } @@ -40,8 +38,8 @@ CuDNNPoolingLayer::~CuDNNPoolingLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensor4dDescriptor(bottom_desc_); - cudnnDestroyTensor4dDescriptor(top_desc_); + cudnnDestroyTensorDescriptor(bottom_desc_); + cudnnDestroyTensorDescriptor(top_desc_); cudnnDestroyPoolingDescriptor(pooling_desc_); cudnnDestroy(handle_); } diff --git a/src/caffe/layers/cudnn_pooling_layer.cu b/src/caffe/layers/cudnn_pooling_layer.cu index 1c113aad75f..a952b855a48 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cu +++ b/src/caffe/layers/cudnn_pooling_layer.cu @@ -15,7 +15,10 @@ void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, - bottom_desc_, bottom_data, top_desc_, top_data)); + cudnn::dataType::one, + bottom_desc_, bottom_data, + cudnn::dataType::zero, + top_desc_, top_data)); } template @@ -29,8 +32,11 @@ void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, - top_desc_, top_data, top_desc_, top_diff, - bottom_desc_, bottom_data, bottom_desc_, bottom_diff)); + cudnn::dataType::one, + top_desc_, top_data, top_desc_, top_diff, + bottom_desc_, bottom_data, + cudnn::dataType::zero, + bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index 0b8a6bc3248..759d83984ef 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -35,8 +35,8 @@ CuDNNReLULayer::~CuDNNReLULayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensor4dDescriptor(this->bottom_desc_); - cudnnDestroyTensor4dDescriptor(this->top_desc_); + cudnnDestroyTensorDescriptor(this->bottom_desc_); + cudnnDestroyTensorDescriptor(this->top_desc_); cudnnDestroy(this->handle_); } diff --git a/src/caffe/layers/cudnn_relu_layer.cu b/src/caffe/layers/cudnn_relu_layer.cu index 862508707a0..21d14857dd2 100644 --- a/src/caffe/layers/cudnn_relu_layer.cu +++ b/src/caffe/layers/cudnn_relu_layer.cu @@ -18,8 +18,11 @@ void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_RELU, - this->bottom_desc_, bottom_data, this->top_desc_, top_data)); + CUDNN_ACTIVATION_RELU, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); } template @@ -40,9 +43,12 @@ void CuDNNReLULayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_RELU, - this->top_desc_, top_data, this->top_desc_, top_diff, - this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); + CUDNN_ACTIVATION_RELU, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNReLULayer); diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index 67bd9c373b0..32637873d46 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -35,8 +35,8 @@ CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensor4dDescriptor(this->bottom_desc_); - cudnnDestroyTensor4dDescriptor(this->top_desc_); + cudnnDestroyTensorDescriptor(this->bottom_desc_); + cudnnDestroyTensorDescriptor(this->top_desc_); cudnnDestroy(this->handle_); } diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cu b/src/caffe/layers/cudnn_sigmoid_layer.cu index 31b094e25d4..7a06cf721da 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cu +++ b/src/caffe/layers/cudnn_sigmoid_layer.cu @@ -13,8 +13,11 @@ void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_SIGMOID, - this->bottom_desc_, bottom_data, this->top_desc_, top_data)); + CUDNN_ACTIVATION_SIGMOID, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); } template @@ -30,9 +33,12 @@ void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_SIGMOID, - this->top_desc_, top_data, this->top_desc_, top_diff, - this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); + CUDNN_ACTIVATION_SIGMOID, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSigmoidLayer); diff --git a/src/caffe/layers/cudnn_softmax_layer.cpp b/src/caffe/layers/cudnn_softmax_layer.cpp index 83a5b69a626..77a3225adcd 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cpp +++ b/src/caffe/layers/cudnn_softmax_layer.cpp @@ -26,10 +26,10 @@ template void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { SoftmaxLayer::Reshape(bottom, top); - int N = bottom[0]->num(); - int K = bottom[0]->channels(); - int H = bottom[0]->height(); - int W = bottom[0]->width(); + int N = this->outer_num_; + int K = bottom[0]->shape(this->softmax_axis_); + int H = this->inner_num_; + int W = 1; cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } @@ -39,8 +39,8 @@ CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensor4dDescriptor(bottom_desc_); - cudnnDestroyTensor4dDescriptor(top_desc_); + cudnnDestroyTensorDescriptor(bottom_desc_); + cudnnDestroyTensorDescriptor(top_desc_); cudnnDestroy(handle_); } diff --git a/src/caffe/layers/cudnn_softmax_layer.cu b/src/caffe/layers/cudnn_softmax_layer.cu index f328afdd831..a9e2fcefaf7 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cu +++ b/src/caffe/layers/cudnn_softmax_layer.cu @@ -17,8 +17,11 @@ void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, - CUDNN_SOFTMAX_MODE_CHANNEL, - bottom_desc_, bottom_data, top_desc_, top_data)); + CUDNN_SOFTMAX_MODE_CHANNEL, + cudnn::dataType::one, + bottom_desc_, bottom_data, + cudnn::dataType::zero, + top_desc_, top_data)); } template @@ -29,9 +32,13 @@ void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, - CUDNN_SOFTMAX_MODE_CHANNEL, - top_desc_, top_data, top_desc_, top_diff, bottom_desc_, bottom_diff)); + CUDNN_SOFTMAX_MODE_CHANNEL, + cudnn::dataType::one, + top_desc_, top_data, top_desc_, top_diff, + cudnn::dataType::zero, + bottom_desc_, bottom_diff)); } } diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index b1d2b86384e..376faad324d 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -35,8 +35,8 @@ CuDNNTanHLayer::~CuDNNTanHLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - cudnnDestroyTensor4dDescriptor(this->bottom_desc_); - cudnnDestroyTensor4dDescriptor(this->top_desc_); + cudnnDestroyTensorDescriptor(this->bottom_desc_); + cudnnDestroyTensorDescriptor(this->top_desc_); cudnnDestroy(this->handle_); } diff --git a/src/caffe/layers/cudnn_tanh_layer.cu b/src/caffe/layers/cudnn_tanh_layer.cu index bf9ec7cfac4..d287f6fee85 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cu +++ b/src/caffe/layers/cudnn_tanh_layer.cu @@ -13,8 +13,11 @@ void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, - CUDNN_ACTIVATION_TANH, - this->bottom_desc_, bottom_data, this->top_desc_, top_data)); + CUDNN_ACTIVATION_TANH, + cudnn::dataType::one, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->top_desc_, top_data)); } template @@ -29,10 +32,14 @@ void CuDNNTanHLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + CUDNN_CHECK(cudnnActivationBackward(this->handle_, - CUDNN_ACTIVATION_TANH, - this->top_desc_, top_data, this->top_desc_, top_diff, - this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); + CUDNN_ACTIVATION_TANH, + cudnn::dataType::one, + this->top_desc_, top_data, this->top_desc_, top_diff, + this->bottom_desc_, bottom_data, + cudnn::dataType::zero, + this->bottom_desc_, bottom_diff)); } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNTanHLayer); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index f74a775fa24..6feab024e87 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -16,14 +16,14 @@ namespace caffe { -template +template DataLayer::~DataLayer() { this->JoinPrefetchThread(); } -template +template void DataLayer::DataLayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // Initialize DB db_.reset(db::GetDB(this->layer_param_.data_param().backend())); db_->Open(this->layer_param_.data_param().source(), db::READ); @@ -31,9 +31,9 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, // Check if we should randomly skip a few data points if (this->layer_param_.data_param().rand_skip()) { - unsigned int skip = caffe_rng_rand() - % this->layer_param_.data_param().rand_skip(); - LOG(INFO)<< "Skipping first " << skip << " data points."; + unsigned int skip = caffe_rng_rand() % + this->layer_param_.data_param().rand_skip(); + LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { cursor_->Next(); } @@ -43,39 +43,40 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, datum.ParseFromString(cursor_->value()); bool force_color = this->layer_param_.data_param().force_encoded_color(); - if ((force_color && DecodeDatum(&datum, true)) || DecodeDatumNative(&datum)) { - LOG(INFO)<< "Decoding Datum"; + if ((force_color && DecodeDatum(&datum, true)) || + DecodeDatumNative(&datum)) { + LOG(INFO) << "Decoding Datum"; } // image int crop_size = this->layer_param_.transform_param().crop_size(); if (crop_size > 0) { top[0]->Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size, this->device_context_); + datum.channels(), crop_size, crop_size,this->device_context_); this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size, this->device_context_); - this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size, this->device_context_); + datum.channels(), crop_size, crop_size,this->device_context_); + this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size,this->device_context_); } else { - top[0]->Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), datum.height(), datum.width(), this->device_context_); + top[0]->Reshape( + this->layer_param_.data_param().batch_size(), datum.channels(), + datum.height(), datum.width(),this->device_context_); this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), datum.height(), - datum.width(), this->device_context_); - this->transformed_data_.Reshape(1, datum.channels(), datum.height(), - datum.width(), this->device_context_); + datum.channels(), datum.height(), datum.width(),this->device_context_); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width(),this->device_context_); } - LOG(INFO)<< "output data size: " << top[0]->num() << "," - << top[0]->channels() << "," << top[0]->height() << "," - << top[0]->width(); + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); // label if (this->output_labels_) { - top[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1, this->device_context_); - this->prefetch_label_.Reshape(this->layer_param_.data_param().batch_size(), - 1, 1, 1, this->device_context_); + vector label_shape(1, this->layer_param_.data_param().batch_size()); + top[1]->Reshape(label_shape,this->device_context_); + this->prefetch_label_.Reshape(label_shape,this->device_context_); } } // This function is used to create a thread that prefetches the data. -template +template void DataLayer::InternalThreadEntry() { CPUTimer batch_timer; batch_timer.Start(); @@ -99,10 +100,10 @@ void DataLayer::InternalThreadEntry() { DecodeDatumNative(&datum); } } - this->prefetch_data_.Reshape(1, datum.channels(), datum.height(), - datum.width(), this->device_context_); - this->transformed_data_.Reshape(1, datum.channels(), datum.height(), - datum.width(), this->device_context_); + this->prefetch_data_.Reshape(1, datum.channels(), + datum.height(), datum.width(),this->device_context_); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width(),this->device_context_); } Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); @@ -125,7 +126,7 @@ void DataLayer::InternalThreadEntry() { cv_img = DecodeDatumToCVMatNative(datum); } if (cv_img.channels() != this->transformed_data_.channels()) { - LOG(WARNING)<< "Your dataset contains encoded images with mixed " + LOG(WARNING) << "Your dataset contains encoded images with mixed " << "channel sizes. Consider adding a 'force_color' flag to the " << "model definition, or rebuild your dataset using " << "convert_imageset."; @@ -149,14 +150,14 @@ void DataLayer::InternalThreadEntry() { // go to the next iter cursor_->Next(); if (!cursor_->valid()) { - DLOG(INFO)<< "Restarting data prefetching from start."; + DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } batch_timer.Stop(); - DLOG(INFO)<< "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; - DLOG(INFO)<< " Read time: " << read_time / 1000 << " ms."; - DLOG(INFO)<< "Transform time: " << trans_time / 1000 << " ms."; + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; } INSTANTIATE_CLASS(DataLayer); diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 63ae058e25e..ec1256fd2fa 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -27,7 +27,7 @@ void DropoutLayer::Reshape(const vector*>& bottom, NeuronLayer::Reshape(bottom, top); // Set up the cache for random number generation rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(),this->device_context_); + bottom[0]->height(), bottom[0]->width()); } template diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp index cbb7aa737b7..6b0d617464c 100644 --- a/src/caffe/layers/dummy_data_layer.cpp +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -16,18 +16,30 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, num_data_filler == num_top) << "Number of data fillers must be 0, 1 or equal to the number of tops: " << num_top << "; you specified " << num_data_filler << " data fillers."; - CHECK(param.num_size() == 1 || param.num_size() == num_top) - << "Must specify either a single (1) 'num' or one for each top blob " - << "(" << num_top << "); you specified " << param.num_size() << "."; - CHECK(param.channels_size() == 1 || param.channels_size() == num_top) - << "Must specify either a single (1) 'channels' or one for each top blob " - << "(" << num_top << "); you specified " << param.channels_size() << "."; - CHECK(param.height_size() == 1 || param.height_size() == num_top) - << "Must specify either a single (1) 'height' or one for each top blob " - << "(" << num_top << "); you specified " << param.height_size() << "."; - CHECK(param.width_size() == 1 || param.width_size() == num_top) - << "Must specify either a single (1) 'width' or one for each top blob " - << "(" << num_top << "); you specified " << param.width_size() << "."; + + const bool legacy_dims = param.num_size() || param.channels_size() || + param.height_size() || param.width_size(); + if (legacy_dims) { + CHECK_EQ(0, param.shape_size()) + << "Both shape and legacy fields were specified"; + // Using deprecated 4D output dim specifiers. + CHECK(param.num_size() == 1 || param.num_size() == num_top) + << "Must specify 'num' once, or once per top blob " + << "(" << num_top << "); specified " << param.num_size() << "."; + CHECK(param.channels_size() == 1 || param.channels_size() == num_top) + << "Must specify 'channels' once, or once per top blob " + << "(" << num_top << "); specified " << param.channels_size() << "."; + CHECK(param.height_size() == 1 || param.height_size() == num_top) + << "Must specify 'height' once, or once per top blob " + << "(" << num_top << "); specified " << param.height_size() << "."; + CHECK(param.width_size() == 1 || param.width_size() == num_top) + << "Must specify 'width' once, or once per top blob " + << "(" << num_top << "); specified " << param.width_size() << "."; + } else { + CHECK(param.shape_size() == 1 || param.shape_size() == num_top) + << "Must specify 'shape' once, or once per top blob " + << "(" << num_top << "); specified " << param.shape_size() << "."; + } // refill_[i] tells Forward i whether or not to actually refill top Blob i. // If refill_[i] is false, Forward does nothing for Blob i. We use this to // avoid wastefully refilling "constant" Blobs in every forward pass. @@ -63,14 +75,19 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, } } for (int i = 0; i < num_top; ++i) { - const int num = (param.num_size() == 1) ? param.num(0) : param.num(i); - const int channels = - (param.channels_size() == 1) ? param.channels(0) : param.channels(i); - const int height = - (param.height_size() == 1) ? param.height(0) : param.height(i); - const int width = - (param.width_size() == 1) ? param.width(0) : param.width(i); - top[i]->Reshape(num, channels, height, width, this->device_context_); + if (legacy_dims) { + const int num = (param.num_size() == 1) ? param.num(0) : param.num(i); + const int channels = + (param.channels_size() == 1) ? param.channels(0) : param.channels(i); + const int height = + (param.height_size() == 1) ? param.height(0) : param.height(i); + const int width = + (param.width_size() == 1) ? param.width(0) : param.width(i); + top[i]->Reshape(num, channels, height, width); + } else { + const int shape_index = (param.shape_size() == 1) ? 0 : i; + top[i]->Reshape(param.shape(shape_index)); + } } // Run Forward once, with refill_ inverted, to fill the constant Blobs. this->Forward(bottom, top); @@ -84,13 +101,10 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, template void DummyDataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - - DeviceContext &device_context = Caffe::GetDeviceContext(this->layer_param_.device()); - for (int i = 0; i < top.size(); ++i) { const int filler_id = (fillers_.size() > 1) ? i : 0; if (refill_[filler_id]) { - fillers_[filler_id]->Fill(top[i], device_context); + fillers_[filler_id]->Fill(top[i]); } } } diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index b9f9792c494..a80700736bd 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -31,21 +31,14 @@ void EltwiseLayer::LayerSetUp(const vector*>& bottom, template void EltwiseLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int num = bottom[0]->num(); - const int channels = bottom[0]->channels(); - const int height = bottom[0]->height(); - const int width = bottom[0]->width(); for (int i = 1; i < bottom.size(); ++i) { - CHECK_EQ(num, bottom[i]->num()); - CHECK_EQ(channels, bottom[i]->channels()); - CHECK_EQ(height, bottom[i]->height()); - CHECK_EQ(width, bottom[i]->width()); + CHECK(bottom[i]->shape() == bottom[0]->shape()); } - top[0]->Reshape(num, channels, height, width, this->device_context_); + top[0]->ReshapeLike(*bottom[0]); // If max operation, we will initialize the vector index part. if (this->layer_param_.eltwise_param().operation() == EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { - max_idx_.Reshape(bottom[0]->num(), channels, height, width, this->device_context_); + max_idx_.Reshape(bottom[0]->shape()); } } diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 9d2fa229c08..80efa31b22c 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -11,11 +11,9 @@ template void EuclideanLossLayer::Reshape( const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); - CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); - CHECK_EQ(bottom[0]->height(), bottom[1]->height()); - CHECK_EQ(bottom[0]->width(), bottom[1]->width()); - diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(), this->device_context_); + CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) + << "Inputs must have the same dimension."; + diff_.ReshapeLike(*bottom[0]); } template diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 1d6c5f2535c..e76272656be 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,12 +9,11 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - int channels_out = bottom[0]->channels() * bottom[0]->height() - * bottom[0]->width(); - top[0]->Reshape(bottom[0]->num(), channels_out, 1, 1, this->device_context_); - count_ = bottom[0]->num() * channels_out; - CHECK_EQ(count_, bottom[0]->count()); - CHECK_EQ(count_, top[0]->count()); + vector top_shape(2); + top_shape[0] = bottom[0]->num(); + top_shape[1] = bottom[0]->count() / bottom[0]->num(); + top[0]->Reshape(top_shape,this->device_context_); + CHECK_EQ(top[0]->count(), bottom[0]->count()); } template @@ -29,10 +28,6 @@ void FlattenLayer::Backward_cpu(const vector*>& top, bottom[0]->ShareDiff(*top[0]); } -#ifdef CPU_ONLY -STUB_GPU(FlattenLayer); -#endif - INSTANTIATE_CLASS(FlattenLayer); REGISTER_LAYER_CLASS(Flatten); diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index c32a6446113..8a782f7e524 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -14,9 +14,9 @@ #include "hdf5_hl.h" #include "stdint.h" +#include "caffe/data_layers.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" -#include "caffe/vision_layers.hpp" namespace caffe { @@ -36,7 +36,7 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { hdf_blobs_.resize(top_size); const int MIN_DATA_DIM = 1; - const int MAX_DATA_DIM = 4; + const int MAX_DATA_DIM = INT_MAX; for (int i = 0; i < top_size; ++i) { hdf_blobs_[i] = shared_ptr >(new Blob()); @@ -48,11 +48,25 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { CHECK_GE(status, 0) << "Failed to close HDF5 file: " << filename; // MinTopBlobs==1 guarantees at least one top blob - int num = hdf_blobs_[0]->num(); + CHECK_GE(hdf_blobs_[0]->num_axes(), 1) << "Input must have at least 1 axis."; + const int num = hdf_blobs_[0]->shape(0); for (int i = 1; i < top_size; ++i) { - CHECK_EQ(hdf_blobs_[i]->num(), num); + CHECK_EQ(hdf_blobs_[i]->shape(0), num); + } + // Default to identity permutation. + data_permutation_.clear(); + data_permutation_.resize(hdf_blobs_[0]->shape(0)); + for (int i = 0; i < hdf_blobs_[0]->shape(0); i++) + data_permutation_[i] = i; + + // Shuffle if needed. + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); + DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) + << " rows (shuffled)"; + } else { + DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) << " rows"; } - DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->num() << " rows"; } template @@ -81,16 +95,33 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, CHECK_GE(num_files_, 1) << "Must have at least 1 HDF5 filename listed in " << source; + file_permutation_.clear(); + file_permutation_.resize(num_files_); + // Default to identity permutation. + for (int i = 0; i < num_files_; i++) { + file_permutation_[i] = i; + } + + // Shuffle if needed. + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), file_permutation_.end()); + } + // Load the first HDF5 file and initialize the line counter. - LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); + LoadHDF5FileData(hdf_filenames_[file_permutation_[current_file_]].c_str()); current_row_ = 0; // Reshape blobs. const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); const int top_size = this->layer_param_.top_size(); + vector top_shape; for (int i = 0; i < top_size; ++i) { - top[i]->Reshape(batch_size, hdf_blobs_[i]->channels(), - hdf_blobs_[i]->height(), hdf_blobs_[i]->width(), this->device_context_); + top_shape.resize(hdf_blobs_[i]->num_axes()); + top_shape[0] = batch_size; + for (int j = 1; j < top_shape.size(); ++j) { + top_shape[j] = hdf_blobs_[i]->shape(j); + } + top[i]->Reshape(top_shape); } } @@ -99,22 +130,29 @@ void HDF5DataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); for (int i = 0; i < batch_size; ++i, ++current_row_) { - if (current_row_ == hdf_blobs_[0]->num()) { + if (current_row_ == hdf_blobs_[0]->shape(0)) { if (num_files_ > 1) { ++current_file_; if (current_file_ == num_files_) { current_file_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), + file_permutation_.end()); + } DLOG(INFO) << "Looping around to first file."; } - LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); + LoadHDF5FileData( + hdf_filenames_[file_permutation_[current_file_]].c_str()); } current_row_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) + std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); } for (int j = 0; j < this->layer_param_.top_size(); ++j) { - int data_dim = top[j]->count() / top[j]->num(); + int data_dim = top[j]->count() / top[j]->shape(0); caffe_copy(data_dim, - &hdf_blobs_[j]->cpu_data()[current_row_ * data_dim], - &top[j]->mutable_cpu_data()[i * data_dim]); + &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] + * data_dim], &top[j]->mutable_cpu_data()[i * data_dim]); } } } diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 02e3821d104..5e3e4ced141 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -10,9 +10,9 @@ TODO: #include "hdf5.h" #include "hdf5_hl.h" +#include "caffe/data_layers.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" -#include "caffe/vision_layers.hpp" namespace caffe { @@ -21,22 +21,29 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); for (int i = 0; i < batch_size; ++i, ++current_row_) { - if (current_row_ == hdf_blobs_[0]->num()) { + if (current_row_ == hdf_blobs_[0]->shape(0)) { if (num_files_ > 1) { current_file_ += 1; if (current_file_ == num_files_) { current_file_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), + file_permutation_.end()); + } DLOG(INFO) << "Looping around to first file."; } - LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); + LoadHDF5FileData( + hdf_filenames_[file_permutation_[current_file_]].c_str()); } current_row_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) + std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); } for (int j = 0; j < this->layer_param_.top_size(); ++j) { - int data_dim = top[j]->count() / top[j]->num(); + int data_dim = top[j]->count() / top[j]->shape(0); caffe_copy(data_dim, - &hdf_blobs_[j]->cpu_data()[current_row_ * data_dim], - &top[j]->mutable_gpu_data()[i * data_dim]); + &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] + * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); } } } diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index 5de5de6d65d..9f797cce357 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -11,18 +11,17 @@ namespace caffe { -template +template void HDF5OutputLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { file_name_ = this->layer_param_.hdf5_output_param().file_name(); file_id_ = H5Fcreate(file_name_.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(file_id_, 0)<< "Failed to open HDF5 file" << file_name_; + H5P_DEFAULT); + CHECK_GE(file_id_, 0) << "Failed to open HDF5 file" << file_name_; file_opened_ = true; - current_batch_ = 0; } -template +template HDF5OutputLayer::~HDF5OutputLayer() { if (file_opened_) { herr_t status = H5Fclose(file_id_); @@ -30,24 +29,41 @@ HDF5OutputLayer::~HDF5OutputLayer() { } } -template +template +void HDF5OutputLayer::SaveBlobs() { + // TODO: no limit on the number of blobs + LOG(INFO) << "Saving HDF5 file " << file_name_; + CHECK_EQ(data_blob_.num(), label_blob_.num()) << + "data blob and label blob must have the same batch size"; + hdf5_save_nd_dataset(file_id_, HDF5_DATA_DATASET_NAME, data_blob_); + hdf5_save_nd_dataset(file_id_, HDF5_DATA_LABEL_NAME, label_blob_); + LOG(INFO) << "Successfully saved " << data_blob_.num() << " rows"; +} + +template void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { - CHECK_EQ(this->layer_param_.bottom_size(), bottom.size()); - for (int i = 0; i < bottom.size(); ++i) { - stringstream batch_id; - batch_id << this->layer_param_.bottom(i) << "_" << current_batch_; - LOG_FIRST_N(INFO, bottom.size()) << "Saving batch " << batch_id.str() - << " to HDF5 file " << file_name_; - hdf5_save_nd_dataset(file_id_, batch_id.str(), *bottom[i]); + const vector*>& top) { + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width(),this->device_context_); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width(),this->device_context_); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + caffe_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim], + &data_blob_.mutable_cpu_data()[i * data_datum_dim]); + caffe_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim], + &label_blob_.mutable_cpu_data()[i * label_datum_dim]); } - current_batch_++; + SaveBlobs(); } -template +template void HDF5OutputLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { return; } diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index e8b797824d8..ae497c34fc2 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -14,7 +14,22 @@ namespace caffe { template void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - Forward_cpu(bottom, top); + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width()); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], + &data_blob_.mutable_cpu_data()[i * data_datum_dim]); + caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], + &label_blob_.mutable_cpu_data()[i * label_datum_dim]); + } + SaveBlobs(); } template diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 6cddf0008cd..90acdd46cfc 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -50,6 +50,8 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, template void Im2colLayer::Reshape(const vector*>& bottom, const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 0ad1c70fefa..38ebbd5ec14 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -69,20 +69,21 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const int crop_size = this->layer_param_.transform_param().crop_size(); const int batch_size = this->layer_param_.image_data_param().batch_size(); if (crop_size > 0) { - top[0]->Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); - this->transformed_data_.Reshape(1, channels, crop_size, crop_size, this->device_context_); + top[0]->Reshape(batch_size, channels, crop_size, crop_size); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); + this->transformed_data_.Reshape(1, channels, crop_size, crop_size); } else { - top[0]->Reshape(batch_size, channels, height, width, this->device_context_); - this->prefetch_data_.Reshape(batch_size, channels, height, width, this->device_context_); - this->transformed_data_.Reshape(1, channels, height, width, this->device_context_); + top[0]->Reshape(batch_size, channels, height, width); + this->prefetch_data_.Reshape(batch_size, channels, height, width); + this->transformed_data_.Reshape(1, channels, height, width); } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label - top[1]->Reshape(batch_size, 1, 1, 1, this->device_context_); - this->prefetch_label_.Reshape(batch_size, 1, 1, 1, this->device_context_); + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape); + this->prefetch_label_.Reshape(label_shape); } template @@ -115,9 +116,9 @@ void ImageDataLayer::InternalThreadEntry() { cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, 0, 0, is_color); this->prefetch_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols, this->device_context_); + cv_img.rows, cv_img.cols); this->transformed_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols, this->device_context_); + cv_img.rows, cv_img.cols); } Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index 2540443d05b..a1e0b40de0e 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -20,7 +20,7 @@ void InfogainLossLayer::LayerSetUp( BlobProto blob_proto; ReadProtoFromBinaryFile( this->layer_param_.infogain_loss_param().source(), &blob_proto); - infogain_.FromProto(blob_proto, this->device_context_); + infogain_.FromProto(blob_proto); } } diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 323850ab324..8c876680021 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -9,20 +9,21 @@ namespace caffe { -template +template void InnerProductLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - - DeviceContext &device_context = Caffe::GetDeviceContext( - this->layer_param_.device()); - + const vector*>& top) { const int num_output = this->layer_param_.inner_product_param().num_output(); bias_term_ = this->layer_param_.inner_product_param().bias_term(); N_ = num_output; - K_ = bottom[0]->count() / bottom[0]->num(); + const int axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.inner_product_param().axis()); + // Dimensions starting from "axis" are "flattened" into a single + // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W), + // and axis == 1, N inner products with dimension CHW are performed. + K_ = bottom[0]->count(axis); // Check if we need to set up the weights if (this->blobs_.size() > 0) { - LOG(INFO)<< "Skipping parameter initialization"; + LOG(INFO) << "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); @@ -30,77 +31,91 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, this->blobs_.resize(1); } // Intialize the weight - this->blobs_[0].reset(new Blob(1, 1, N_, K_, this->device_context_)); + vector weight_shape(2); + weight_shape[0] = N_; + weight_shape[1] = K_; + this->blobs_[0].reset(new Blob(weight_shape,this->device_context_)); // fill the weights shared_ptr > weight_filler(GetFiller( - this->layer_param_.inner_product_param().weight_filler())); - weight_filler->Fill(this->blobs_[0].get(),device_context); + this->layer_param_.inner_product_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); // If necessary, intiialize and fill the bias term if (bias_term_) { - this->blobs_[1].reset(new Blob(1, 1, 1, N_, this->device_context_)); + vector bias_shape(1, N_); + this->blobs_[1].reset(new Blob(bias_shape,this->device_context_)); shared_ptr > bias_filler(GetFiller( - this->layer_param_.inner_product_param().bias_filler())); - bias_filler->Fill(this->blobs_[1].get(),device_context); + this->layer_param_.inner_product_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); } } // parameter initialization this->param_propagate_down_.resize(this->blobs_.size(), true); } -template +template void InnerProductLayer::Reshape(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // Figure out the dimensions - M_ = bottom[0]->num(); - CHECK_EQ(bottom[0]->count() / bottom[0]->num(), K_)<< "Input size " - "incompatible with inner product parameters."; - top[0]->Reshape(bottom[0]->num(), N_, 1, 1, this->device_context_); + const int axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.inner_product_param().axis()); + const int new_K = bottom[0]->count(axis); + CHECK_EQ(K_, new_K) + << "Input size incompatible with inner product parameters."; + // The first "axis" dimensions are independent inner products; the total + // number of these is M_, the product over these dimensions. + M_ = bottom[0]->count(0, axis); + // The top shape will be the bottom shape with the flattened axes dropped, + // and replaced by a single axis with dimension num_output (N_). + vector top_shape = bottom[0]->shape(); + top_shape.resize(axis + 1); + top_shape[axis] = N_; + top[0]->Reshape(top_shape,this->device_context_); // Set up the bias multiplier if (bias_term_) { - bias_multiplier_.Reshape(1, 1, 1, M_, this->device_context_); + vector bias_shape(1, M_); + bias_multiplier_.Reshape(bias_shape,this->device_context_); caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data()); } } -template +template void InnerProductLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const Dtype* weight = this->blobs_[0]->cpu_data(); - caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., - bottom_data, weight, (Dtype) 0., top_data); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., + bottom_data, weight, (Dtype)0., top_data); if (bias_term_) { - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., - bias_multiplier_.cpu_data(), - this->blobs_[1]->cpu_data(), (Dtype) 1., top_data); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., + bias_multiplier_.cpu_data(), + this->blobs_[1]->cpu_data(), (Dtype)1., top_data); } } -template -void InnerProductLayer::Backward_cpu( - const vector*>& top, const vector& propagate_down, +template +void InnerProductLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight - caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., - top_diff, bottom_data, (Dtype) 0., - this->blobs_[0]->mutable_cpu_diff()); + caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias - caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype) 1., top_diff, - bias_multiplier_.cpu_data(), (Dtype) 0., - this->blobs_[1]->mutable_cpu_diff()); + caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + bias_multiplier_.cpu_data(), (Dtype)0., + this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bottom data - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., - top_diff, this->blobs_[0]->cpu_data(), (Dtype) 0., - bottom[0]->mutable_cpu_diff()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., + top_diff, this->blobs_[0]->cpu_data(), (Dtype)0., + bottom[0]->mutable_cpu_diff()); } } diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index 471adc5c56c..3496a5c2a8a 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -24,7 +24,8 @@ void LossLayer::Reshape( const vector*>& bottom, const vector*>& top) { CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; - top[0]->Reshape(1, 1, 1, 1, this->device_context_); + vector loss_shape(0); // Loss layers output a scalar; 0 axes. + top[0]->Reshape(loss_shape); } INSTANTIATE_CLASS(LossLayer); diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index deaca6b2db9..36c1ace4c99 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -69,14 +69,16 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, template void LRNLayer::Reshape(const vector*>& bottom, const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: - top[0]->Reshape(num_, channels_, height_, width_, this->device_context_); - scale_.Reshape(num_, channels_, height_, width_, this->device_context_); + top[0]->Reshape(num_, channels_, height_, width_); + scale_.Reshape(num_, channels_, height_, width_); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: split_layer_->Reshape(bottom, split_top_vec_); @@ -113,7 +115,7 @@ void LRNLayer::CrossChannelForward_cpu( for (int i = 0; i < scale_.count(); ++i) { scale_data[i] = k_; } - Blob padded_square(1, channels_ + size_ - 1, height_, width_, this->device_context_); + Blob padded_square(1, channels_ + size_ - 1, height_, width_); Dtype* padded_square_data = padded_square.mutable_cpu_data(); caffe_set(padded_square.count(), Dtype(0), padded_square_data); Dtype alpha_over_size = alpha_ / size_; @@ -184,8 +186,8 @@ void LRNLayer::CrossChannelBackward_cpu( const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* scale_data = scale_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - Blob padded_ratio(1, channels_ + size_ - 1, height_, width_, this->device_context_); - Blob accum_ratio(1, 1, height_, width_, this->device_context_); + Blob padded_ratio(1, channels_ + size_ - 1, height_, width_); + Blob accum_ratio(1, 1, height_, width_); Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); // We hack a little bit by using the diff() to store an additional result diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 58c39926c72..24aa6a30130 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -26,26 +26,24 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* in, Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values - while (head < post_pad) { + while (head < post_pad && head < channels) { accum_scale += in[head * step] * in[head * step]; ++head; } - // until we reach size, nothing needs to be subtracted - while (head < size) { - accum_scale += in[head * step] * in[head * step]; - scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; - ++head; - } // both add and subtract while (head < channels) { accum_scale += in[head * step] * in[head * step]; - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + if (head - size >= 0) { + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + } scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + if (head - size >= 0) { + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + } scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } @@ -143,26 +141,19 @@ __global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values - while (head < post_pad) { + while (head < post_pad && head < channels) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; ++head; } - // until we reach size, nothing needs to be subtracted - while (head < size) { - accum_ratio += top_diff[head * step] * top_data[head * step] / - scale[head * step]; - bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] - * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * - bottom_data[(head - post_pad) * step] * accum_ratio; - ++head; - } // both add and subtract while (head < channels) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + if (head - size >= 0) { + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; + } bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; @@ -170,8 +161,10 @@ __global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, } // subtract only while (head < channels + post_pad) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + if (head - size >= 0) { + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; + } bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index d5e90174994..97d4e4e6345 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -19,10 +19,11 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, CHECK_GT(batch_size_ * size_, 0) << "batch_size, channels, height, and width must be specified and" " positive in memory_data_param"; + vector label_shape(1, batch_size_); top[0]->Reshape(batch_size_, channels_, height_, width_, this->device_context_); - top[1]->Reshape(batch_size_, 1, 1, 1, this->device_context_); + top[1]->Reshape(label_shape, this->device_context_); added_data_.Reshape(batch_size_, channels_, height_, width_, this->device_context_); - added_label_.Reshape(batch_size_, 1, 1, 1, this->device_context_); + added_label_.Reshape(label_shape, this->device_context_); data_ = NULL; labels_ = NULL; added_data_.cpu_data(); diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 4650cf9033e..b74d7b4f300 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -7,26 +7,26 @@ namespace caffe { -template +template void MVNLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), - bottom[0]->width(), this->device_context_); - mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, - this->device_context_); - variance_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1, - this->device_context_); - temp_.Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), - bottom[0]->width(), this->device_context_); - sum_multiplier_.Reshape(1, 1, bottom[0]->height(), bottom[0]->width(), - this->device_context_); + const vector*>& top) { + top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), + 1, 1); + variance_.Reshape(bottom[0]->num(), bottom[0]->channels(), + 1, 1); + temp_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + sum_multiplier_.Reshape(1, 1, + bottom[0]->height(), bottom[0]->width()); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); } -template +template void MVNLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); int num; @@ -41,57 +41,55 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ caffe_powx(bottom[0]->count(), bottom_data, Dtype(2), - temp_.mutable_cpu_data()); + temp_.mutable_cpu_data()); // computes variance using var(X) = E(X^2) - (EX)^2 caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., - mean_.mutable_cpu_data()); // EX + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), - sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); // E(X^2) + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); // (EX)^2 + temp_.mutable_cpu_data()); // (EX)^2 caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance + variance_.mutable_cpu_data()); // variance // do mean and variance normalization // subtract mean caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_add(temp_.count(), bottom_data, temp_.cpu_data(), top_data); // normalize variance caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), - variance_.mutable_cpu_data()); + variance_.mutable_cpu_data()); caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_div(temp_.count(), top_data, temp_.cpu_data(), top_data); } else { caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., - mean_.mutable_cpu_data()); // EX + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX // subtract mean caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_add(temp_.count(), bottom_data, temp_.cpu_data(), top_data); } } -template +template void MVNLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* bottom_data = bottom[0]->cpu_data(); @@ -109,47 +107,45 @@ void MVNLayer::Backward_cpu(const vector*>& top, if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_cpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, - sum_multiplier_.cpu_data(), 0., - mean_.mutable_cpu_data()); + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., - bottom_diff); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 0., + bottom_diff); caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff); caffe_cpu_gemv(CblasNoTrans, num, dim, 1., top_diff, - sum_multiplier_.cpu_data(), 0., - mean_.mutable_cpu_data()); + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - mean_.cpu_data(), sum_multiplier_.cpu_data(), 1., - bottom_diff); + mean_.cpu_data(), sum_multiplier_.cpu_data(), 1., + bottom_diff); caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim), - bottom_diff); + bottom_diff); // put the squares of bottom into temp_ - caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); + caffe_powx(temp_.count(), bottom_data, Dtype(2), + temp_.mutable_cpu_data()); // computes variance using var(X) = E(X^2) - (EX)^2 caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., - mean_.mutable_cpu_data()); // EX + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), - sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); // E(X^2) + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); // (EX)^2 + temp_.mutable_cpu_data()); // (EX)^2 caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance + variance_.mutable_cpu_data()); // variance // normalize variance caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), - variance_.mutable_cpu_data()); + variance_.mutable_cpu_data()); caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., - temp_.mutable_cpu_data()); + variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., + temp_.mutable_cpu_data()); caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff); } else { @@ -157,6 +153,7 @@ void MVNLayer::Backward_cpu(const vector*>& top, } } + #ifdef CPU_ONLY STUB_GPU(MVNLayer); #endif diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index a20e7afee8b..fbc823245c7 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -13,28 +13,28 @@ namespace caffe { using std::min; using std::max; -template +template void PoolingLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { PoolingParameter pool_param = this->layer_param_.pooling_param(); if (pool_param.global_pooling()) { CHECK(!(pool_param.has_kernel_size() || - pool_param.has_kernel_h() || pool_param.has_kernel_w())) - << "With Global_pooling: true Filter size cannot specified"; + pool_param.has_kernel_h() || pool_param.has_kernel_w())) + << "With Global_pooling: true Filter size cannot specified"; } else { CHECK(!pool_param.has_kernel_size() != - !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK(pool_param.has_kernel_size() || - (pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "For non-square filters both kernel_h and kernel_w are required."; + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; } CHECK((!pool_param.has_pad() && pool_param.has_pad_h() - && pool_param.has_pad_w()) + && pool_param.has_pad_w()) || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK((!pool_param.has_stride() && pool_param.has_stride_h() - && pool_param.has_stride_w()) + && pool_param.has_stride_w()) || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; global_pooling_ = pool_param.global_pooling(); @@ -49,8 +49,8 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, kernel_w_ = pool_param.kernel_w(); } } - CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!pool_param.has_pad_h()) { pad_h_ = pad_w_ = pool_param.pad(); } else { @@ -65,7 +65,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } if (global_pooling_) { CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) - << "With Global_pooling: true; only pad = 0 and stride = 1"; + << "With Global_pooling: true; only pad = 0 and stride = 1"; } if (pad_h_ != 0 || pad_w_ != 0) { CHECK(this->layer_param_.pooling_param().pool() @@ -78,9 +78,11 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } } -template +template void PoolingLayer::Reshape(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); @@ -88,10 +90,10 @@ void PoolingLayer::Reshape(const vector*>& bottom, kernel_h_ = bottom[0]->height(); kernel_w_ = bottom[0]->width(); } - pooled_height_ = static_cast(ceil( - static_cast(height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; - pooled_width_ = static_cast(ceil( - static_cast(width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; + pooled_height_ = static_cast(ceil(static_cast( + height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; + pooled_width_ = static_cast(ceil(static_cast( + width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; if (pad_h_ || pad_w_) { // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. @@ -104,29 +106,30 @@ void PoolingLayer::Reshape(const vector*>& bottom, CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } - top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_,this->device_context_); + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_,this->device_context_); if (top.size() > 1) { top[1]->ReshapeLike(*top[0],this->device_context_); } // If max pooling, we will initialize the vector index part. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX && top.size() == 1) { + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_,this->device_context_); + pooled_width_,this->device_context_); } // If stochastic pooling, we will initialize the random index part. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_STOCHASTIC) { + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_STOCHASTIC) { rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_,this->device_context_); + pooled_width_,this->device_context_); } } // TODO(Yangqing): Is there a faster way to do pooling in the channel-first // case? -template +template void PoolingLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const int top_count = top[0]->count(); @@ -137,99 +140,98 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, // Different pooling methods. We explicitly do the switch outside the for // loop to save time, although this results in more code. switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - // Initialize - if (use_top_mask) { - top_mask = top[1]->mutable_cpu_data(); - caffe_set(top_count, Dtype(-1), top_mask); - } else { - mask = max_idx_.mutable_cpu_data(); - caffe_set(top_count, -1, mask); - } - caffe_set(top_count, Dtype(-FLT_MAX), top_data); - // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_); - int wend = min(wstart + kernel_w_, width_); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - const int pool_index = ph * pooled_width_ + pw; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - const int index = h * width_ + w; - if (bottom_data[index] > top_data[pool_index]) { - top_data[pool_index] = bottom_data[index]; - if (use_top_mask) { - top_mask[pool_index] = static_cast(index); - } else { - mask[pool_index] = index; - } + case PoolingParameter_PoolMethod_MAX: + // Initialize + if (use_top_mask) { + top_mask = top[1]->mutable_cpu_data(); + caffe_set(top_count, Dtype(-1), top_mask); + } else { + mask = max_idx_.mutable_cpu_data(); + caffe_set(top_count, -1, mask); + } + caffe_set(top_count, Dtype(-FLT_MAX), top_data); + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_); + int wend = min(wstart + kernel_w_, width_); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + const int pool_index = ph * pooled_width_ + pw; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + const int index = h * width_ + w; + if (bottom_data[index] > top_data[pool_index]) { + top_data[pool_index] = bottom_data[index]; + if (use_top_mask) { + top_mask[pool_index] = static_cast(index); + } else { + mask[pool_index] = index; } } } } } - // compute offset - bottom_data += bottom[0]->offset(0, 1); - top_data += top[0]->offset(0, 1); - if (use_top_mask) { - top_mask += top[0]->offset(0, 1); - } else { - mask += top[0]->offset(0, 1); - } + } + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); + if (use_top_mask) { + top_mask += top[0]->offset(0, 1); + } else { + mask += top[0]->offset(0, 1); } } - break; - case PoolingParameter_PoolMethod_AVE: - for (int i = 0; i < top_count; ++i) { - top_data[i] = 0; - } - // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height_); - wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - top_data[ph * pooled_width_ + pw] += bottom_data[h * width_ - + w]; - } + } + break; + case PoolingParameter_PoolMethod_AVE: + for (int i = 0; i < top_count; ++i) { + top_data[i] = 0; + } + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_ + pad_h_); + int wend = min(wstart + kernel_w_, width_ + pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + top_data[ph * pooled_width_ + pw] += + bottom_data[h * width_ + w]; } - top_data[ph * pooled_width_ + pw] /= pool_size; } + top_data[ph * pooled_width_ + pw] /= pool_size; } - // compute offset - bottom_data += bottom[0]->offset(0, 1); - top_data += top[0]->offset(0, 1); } + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - NOT_IMPLEMENTED; - break; - default: - LOG(FATAL)<< "Unknown pooling method."; } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; } +} -template +template void PoolingLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } @@ -243,72 +245,73 @@ void PoolingLayer::Backward_cpu(const vector*>& top, const int* mask = NULL; // suppress warnings about uninitialized variables const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - // The main loop - if (use_top_mask) { - top_mask = top[1]->cpu_data(); - } else { - mask = max_idx_.cpu_data(); - } - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - const int index = ph * pooled_width_ + pw; - const int bottom_index = - use_top_mask ? top_mask[index] : mask[index]; - bottom_diff[bottom_index] += top_diff[index]; - } - } - bottom_diff += bottom[0]->offset(0, 1); - top_diff += top[0]->offset(0, 1); - if (use_top_mask) { - top_mask += top[0]->offset(0, 1); - } else { - mask += top[0]->offset(0, 1); + case PoolingParameter_PoolMethod_MAX: + // The main loop + if (use_top_mask) { + top_mask = top[1]->cpu_data(); + } else { + mask = max_idx_.cpu_data(); + } + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + const int index = ph * pooled_width_ + pw; + const int bottom_index = + use_top_mask ? top_mask[index] : mask[index]; + bottom_diff[bottom_index] += top_diff[index]; } } + bottom_diff += bottom[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); + if (use_top_mask) { + top_mask += top[0]->offset(0, 1); + } else { + mask += top[0]->offset(0, 1); + } } - break; - case PoolingParameter_PoolMethod_AVE: - // The main loop - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height_); - wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - bottom_diff[h * width_ + w] += top_diff[ph * pooled_width_ - + pw] / pool_size; - } + } + break; + case PoolingParameter_PoolMethod_AVE: + // The main loop + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_h_, height_ + pad_h_); + int wend = min(wstart + kernel_w_, width_ + pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + bottom_diff[h * width_ + w] += + top_diff[ph * pooled_width_ + pw] / pool_size; } } } - // offset - bottom_diff += bottom[0]->offset(0, 1); - top_diff += top[0]->offset(0, 1); } + // offset + bottom_diff += bottom[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - NOT_IMPLEMENTED; - break; - default: - LOG(FATAL)<< "Unknown pooling method."; } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; } +} + #ifdef CPU_ONLY - STUB_GPU(PoolingLayer); +STUB_GPU(PoolingLayer); #endif INSTANTIATE_CLASS(PoolingLayer); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 4ee7d119245..d1d48501af3 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -8,17 +8,14 @@ namespace caffe { -template +template __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, - const int pad_h, const int pad_w, - Dtype* top_data, int* mask, Dtype* top_mask) { - CUDA_KERNEL_LOOP(index, nthreads) - { + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, + int* mask, Dtype* top_mask) { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -49,17 +46,13 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, } } -template +template __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, - const int pad_h, const int pad_w, - Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -84,17 +77,14 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, } } -template +template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* bottom_data, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, Dtype* rand_idx, - Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, Dtype* rand_idx, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -127,16 +117,15 @@ __global__ void StoPoolForwardTrain(const int nthreads, } } -template -__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + +template +__global__ void StoPoolForwardTest(const int nthreads, + const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -160,77 +149,74 @@ __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, } } -template + +template void PoolingLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); -// We'll output the mask to top[1] if it's of size >1. + // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, - mask, top_mask); - break; - case PoolingParameter_PoolMethod_AVE: + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, + mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - if (this->phase_ == TRAIN) { - // We need to create the random index as well. - caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), - rand_idx_.mutable_gpu_data()); - // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<<>>( + CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTest<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, top_data); + StoPoolForwardTest<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, top_data); + } + break; + default: + LOG(FATAL) << "Unknown pooling method."; } - break; -default: - LOG(FATAL)<< "Unknown pooling method."; -} - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; } -template + +template __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, - const int pad_h, const int pad_w, - Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + const int* mask, const Dtype* top_mask, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; @@ -269,17 +255,14 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } } -template +template __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, - const int pad_h, const int pad_w, - Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width + pad_w; @@ -307,18 +290,17 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, } } -template -__global__ void StoPoolBackward(const int nthreads, const Dtype* rand_idx, - const Dtype* top_diff, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { -// find out the local index -// find out the local offset + +template +__global__ void StoPoolBackward(const int nthreads, + const Dtype* rand_idx, const Dtype* top_diff, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; @@ -332,18 +314,18 @@ __global__ void StoPoolBackward(const int nthreads, const Dtype* rand_idx, top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff[ph * pooled_width + pw] - * (index == static_cast(rand_idx[ph * pooled_width + pw])); + gradient += top_diff[ph * pooled_width + pw] * + (index == static_cast(rand_idx[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } -template + +template void PoolingLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } @@ -356,41 +338,42 @@ void PoolingLayer::Backward_gpu(const vector*>& top, const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward<<>>( - count, top_diff, mask, top_mask, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - bottom_diff); - break; - case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolBackward<<>>( - count, top_diff, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolBackward<<>>( - count, rand_idx_.gpu_data(), top_diff, - top[0]->num(), channels_, height_, width_, pooled_height_, - pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, - bottom_diff); - break; - default: - LOG(FATAL)<< "Unknown pooling method."; + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); } - CUDA_POST_KERNEL_CHECK - ; + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward<<>>( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + bottom_diff); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolBackward<<>>( + count, top_diff, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolBackward<<>>( + count, rand_idx_.gpu_data(), top_diff, + top[0]->num(), channels_, height_, width_, pooled_height_, + pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, + bottom_diff); + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + CUDA_POST_KERNEL_CHECK; } + INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); + } // namespace caffe diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 8d482e1ce4a..0dceadd9718 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -33,7 +33,7 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, filler_param.set_value(0.25); filler.reset(GetFiller(filler_param)); } - filler->Fill(this->blobs_[0].get(),this->device_context_); + filler->Fill(this->blobs_[0].get()); } if (channel_shared_) { CHECK_EQ(this->blobs_[0]->count(), 1) diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index affed529586..711f4eb676c 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -11,9 +11,8 @@ template void SliceLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { const SliceParameter& slice_param = this->layer_param_.slice_param(); - slice_dim_ = slice_param.slice_dim(); - CHECK_GE(slice_dim_, 0); - CHECK_LE(slice_dim_, 1) << "Can only slice num and channels"; + CHECK(!(slice_param.has_axis() && slice_param.has_slice_dim())) + << "Either axis or slice_dim should be specified; not both."; slice_point_.clear(); std::copy(slice_param.slice_point().begin(), slice_param.slice_point().end(), @@ -23,18 +22,27 @@ void SliceLayer::LayerSetUp(const vector*>& bottom, template void SliceLayer::Reshape(const vector*>& bottom, const vector*>& top) { - count_ = 0; - num_ = bottom[0]->num(); - channels_ = bottom[0]->channels(); - height_ = bottom[0]->height(); - width_ = bottom[0]->width(); + const int num_axes = bottom[0]->num_axes(); + const SliceParameter& slice_param = this->layer_param_.slice_param(); + if (slice_param.has_slice_dim()) { + slice_axis_ = static_cast(slice_param.slice_dim()); + // Don't allow negative indexing for slice_dim, a uint32 -- almost + // certainly unintended. + CHECK_GE(slice_axis_, 0) << "casting slice_dim from uint32 to int32 " + << "produced negative result; slice_dim must satisfy " + << "0 <= slice_dim < " << kMaxBlobAxes; + CHECK_LT(slice_axis_, num_axes) << "slice_dim out of range."; + } else { + slice_axis_ = bottom[0]->CanonicalAxisIndex(slice_param.axis()); + } + vector top_shape = bottom[0]->shape(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + num_slices_ = bottom[0]->count(0, slice_axis_); + slice_size_ = bottom[0]->count(slice_axis_ + 1); + int count = 0; if (slice_point_.size() != 0) { CHECK_EQ(slice_point_.size(), top.size() - 1); - if (slice_dim_ == 0) { - CHECK_LE(top.size(), num_); - } else { - CHECK_LE(top.size(), channels_); - } + CHECK_LE(top.size(), bottom_slice_axis); int prev = 0; vector slices; for (int i = 0; i < slice_point_.size(); ++i) { @@ -42,94 +50,64 @@ void SliceLayer::Reshape(const vector*>& bottom, slices.push_back(slice_point_[i] - prev); prev = slice_point_[i]; } - if (slice_dim_ == 0) { - slices.push_back(num_ - prev); - for (int i = 0; i < top.size(); ++i) { - top[i]->Reshape(slices[i], channels_, height_, width_,this->device_context_); - count_ += top[i]->count(); - } - } else { - slices.push_back(channels_ - prev); - for (int i = 0; i < top.size(); ++i) { - top[i]->Reshape(num_, slices[i], height_, width_,this->device_context_); - count_ += top[i]->count(); - } + slices.push_back(bottom_slice_axis - prev); + for (int i = 0; i < top.size(); ++i) { + top_shape[slice_axis_] = slices[i]; + top[i]->Reshape(top_shape,this->device_context_); + count += top[i]->count(); } } else { - if (slice_dim_ == 0) { - CHECK_EQ(num_ % top.size(), 0) - << "Number of top blobs (" << top.size() << ") " - << "should evenly divide input num ( " << num_ << ")"; - num_ = num_ / top.size(); - } else { - CHECK_EQ(channels_ % top.size(), 0) - << "Number of top blobs (" << top.size() << ") " - << "should evenly divide input channels ( " << channels_ << ")"; - channels_ = channels_ / top.size(); - } + CHECK_EQ(bottom_slice_axis % top.size(), 0) + << "Number of top blobs (" << top.size() << ") should evenly " + << "divide input slice axis (" << bottom_slice_axis << ")"; + top_shape[slice_axis_] = bottom_slice_axis / top.size(); for (int i = 0; i < top.size(); ++i) { - top[i]->Reshape(num_, channels_, height_, width_,this->device_context_); - count_ += top[i]->count(); + top[i]->Reshape(top_shape,this->device_context_); + count += top[i]->count(); } } - CHECK_EQ(count_, bottom[0]->count()); + CHECK_EQ(count, bottom[0]->count()); } template void SliceLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - const Dtype* bottom_data = bottom[0]->mutable_cpu_data(); - if (slice_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - Dtype* top_data = blob->mutable_cpu_data(); - caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num), - top_data); - offset_num += blob->num(); + int offset_slice_axis = 0; + const Dtype* bottom_data = bottom[0]->cpu_data(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int i = 0; i < top.size(); ++i) { + Dtype* top_data = top[i]->mutable_cpu_data(); + const int top_slice_axis = top[i]->shape(slice_axis_); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + bottom_data + bottom_offset, top_data + top_offset); } - } else if (slice_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - Dtype* top_data = blob->mutable_cpu_data(); - const int num_elem = blob->channels() * blob->height() * blob->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel), - top_data + blob->offset(n)); - } - offset_channel += blob->channels(); - } - } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. + offset_slice_axis += top_slice_axis; + } } template void SliceLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } + int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - if (slice_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - const Dtype* top_diff = blob->cpu_diff(); - caffe_copy(blob->count(), top_diff, - bottom_diff + bottom[0]->offset(offset_num)); - offset_num += blob->num(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->cpu_diff(); + const int top_slice_axis = top[i]->shape(slice_axis_); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + top_diff + top_offset, bottom_diff + bottom_offset); } - } else if (slice_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - const Dtype* top_diff = blob->cpu_diff(); - const int num_elem = blob->channels() * blob->height() * blob->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, top_diff + blob->offset(n), - bottom_diff + bottom[0]->offset(n, offset_channel)); - } - offset_channel += blob->channels(); - } - } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. + offset_slice_axis += top_slice_axis; + } } #ifdef CPU_ONLY diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index b5c5e61533f..e6e65677bd8 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -9,58 +9,42 @@ namespace caffe { template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); - if (slice_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - Dtype* top_data = blob->mutable_gpu_data(); - caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num), - top_data); - offset_num += blob->num(); + int offset_slice_axis = 0; + const Dtype* bottom_data = bottom[0]->gpu_data(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int i = 0; i < top.size(); ++i) { + Dtype* top_data = top[i]->mutable_gpu_data(); + const int top_slice_axis = top[i]->shape(slice_axis_); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + bottom_data + bottom_offset, top_data + top_offset); } - } else if (slice_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - Dtype* top_data = blob->mutable_gpu_data(); - const int num_elem = blob->channels() * blob->height() * blob->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel), - top_data + blob->offset(n)); - } - offset_channel += blob->channels(); - } - } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. + offset_slice_axis += top_slice_axis; + } } template void SliceLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } + int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (slice_dim_ == 0) { - int offset_num = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - const Dtype* top_diff = blob->gpu_diff(); - caffe_copy(blob->count(), top_diff, - bottom_diff + bottom[0]->offset(offset_num)); - offset_num += blob->num(); - } - } else if (slice_dim_ == 1) { - int offset_channel = 0; - for (int i = 0; i < top.size(); ++i) { - Blob* blob = top[i]; - const Dtype* top_diff = blob->gpu_diff(); - const int num_elem = blob->channels() * blob->height() * blob->width(); - for (int n = 0; n < num_; ++n) { - caffe_copy(num_elem, top_diff + blob->offset(n), - bottom_diff + bottom[0]->offset(n, offset_channel)); - } - offset_channel += blob->channels(); + const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + const int top_slice_axis = top[i]->shape(slice_axis_); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + top_diff + top_offset, bottom_diff + bottom_offset); } - } // slice_dim_ is guaranteed to be 0 or 1 by SetUp. + offset_slice_axis += top_slice_axis; + } } INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer); diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index 4e01c76bc4e..21f6391ed67 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -10,14 +10,18 @@ namespace caffe { template void SoftmaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { - top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(),this->device_context_); - sum_multiplier_.Reshape(1, bottom[0]->channels(), 1, 1,this->device_context_); + softmax_axis_ = + bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); + top[0]->ReshapeLike(*bottom[0],this->device_context_); + vector mult_dims(1, bottom[0]->shape(softmax_axis_)); + sum_multiplier_.Reshape(mult_dims,this->device_context_); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); - for (int i = 0; i < sum_multiplier_.count(); ++i) { - multiplier_data[i] = 1.; - } - scale_.Reshape(bottom[0]->num(), 1, bottom[0]->height(), bottom[0]->width(),this->device_context_); + caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); + outer_num_ = bottom[0]->count(0, softmax_axis_); + inner_num_ = bottom[0]->count(softmax_axis_ + 1); + vector scale_dims = bottom[0]->shape(); + scale_dims[softmax_axis_] = 1; + scale_.Reshape(scale_dims,this->device_context_); } template @@ -26,34 +30,32 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - int dim = bottom[0]->count() / bottom[0]->num(); - int spatial_dim = bottom[0]->height() * bottom[0]->width(); + int channels = bottom[0]->shape(softmax_axis_); + int dim = bottom[0]->count() / outer_num_; caffe_copy(bottom[0]->count(), bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. - for (int i = 0; i < num; ++i) { + for (int i = 0; i < outer_num_; ++i) { // initialize scale_data to the first plane - caffe_copy(spatial_dim, bottom_data + i * dim, scale_data); + caffe_copy(inner_num_, bottom_data + i * dim, scale_data); for (int j = 0; j < channels; j++) { - for (int k = 0; k < spatial_dim; k++) { + for (int k = 0; k < inner_num_; k++) { scale_data[k] = std::max(scale_data[k], - bottom_data[i * dim + j * spatial_dim + k]); + bottom_data[i * dim + j * inner_num_ + k]); } } // subtraction - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, - 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data + i * dim); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, + 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data); // exponentiation - caffe_exp(dim, top_data + i * dim, top_data + i * dim); + caffe_exp(dim, top_data, top_data); // sum after exp - caffe_cpu_gemv(CblasTrans, channels, spatial_dim, 1., - top_data + i * dim, sum_multiplier_.cpu_data(), 0., scale_data); + caffe_cpu_gemv(CblasTrans, channels, inner_num_, 1., + top_data, sum_multiplier_.cpu_data(), 0., scale_data); // division for (int j = 0; j < channels; j++) { - caffe_div(spatial_dim, top_data + top[0]->offset(i, j), scale_data, - top_data + top[0]->offset(i, j)); + caffe_div(inner_num_, top_data, scale_data, top_data); + top_data += inner_num_; } } } @@ -66,20 +68,18 @@ void SoftmaxLayer::Backward_cpu(const vector*>& top, const Dtype* top_data = top[0]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); Dtype* scale_data = scale_.mutable_cpu_data(); - int num = top[0]->num(); - int channels = top[0]->channels(); - int dim = top[0]->count() / top[0]->num(); - int spatial_dim = top[0]->height() * top[0]->width(); + int channels = top[0]->shape(softmax_axis_); + int dim = top[0]->count() / outer_num_; caffe_copy(top[0]->count(), top_diff, bottom_diff); - for (int i = 0; i < num; ++i) { + for (int i = 0; i < outer_num_; ++i) { // compute dot(top_diff, top_data) and subtract them from the bottom diff - for (int k = 0; k < spatial_dim; ++k) { + for (int k = 0; k < inner_num_; ++k) { scale_data[k] = caffe_cpu_strided_dot(channels, - bottom_diff + i * dim + k, spatial_dim, - top_data + i * dim + k, spatial_dim); + bottom_diff + i * dim + k, inner_num_, + top_data + i * dim + k, inner_num_); } // subtraction - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, 1, + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim); } // elementwise multiplication diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 0a8d2db071a..e52a98dccb7 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -35,6 +35,15 @@ void SoftmaxWithLossLayer::Reshape( const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); + softmax_axis_ = + bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); + outer_num_ = bottom[0]->count(0, softmax_axis_); + inner_num_ = bottom[0]->count(softmax_axis_ + 1); + CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) + << "Number of labels must match number of predictions; " + << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; if (top.size() >= 2) { // softmax output top[1]->ReshapeLike(*bottom[0],this->device_context_); @@ -48,20 +57,18 @@ void SoftmaxWithLossLayer::Forward_cpu( softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); const Dtype* label = bottom[1]->cpu_data(); - int num = prob_.num(); - int dim = prob_.count() / num; - int spatial_dim = prob_.height() * prob_.width(); + int dim = prob_.count() / outer_num_; int count = 0; Dtype loss = 0; - for (int i = 0; i < num; ++i) { - for (int j = 0; j < spatial_dim; j++) { - const int label_value = static_cast(label[i * spatial_dim + j]); + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; j++) { + const int label_value = static_cast(label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } DCHECK_GE(label_value, 0); - DCHECK_LT(label_value, prob_.channels()); - loss -= log(std::max(prob_data[i * dim + label_value * spatial_dim + j], + DCHECK_LT(label_value, prob_.shape(softmax_axis_)); + loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j], Dtype(FLT_MIN))); ++count; } @@ -69,7 +76,7 @@ void SoftmaxWithLossLayer::Forward_cpu( if (normalize_) { top[0]->mutable_cpu_data()[0] = loss / count; } else { - top[0]->mutable_cpu_data()[0] = loss / num; + top[0]->mutable_cpu_data()[0] = loss / outer_num_; } if (top.size() == 2) { top[1]->ShareData(prob_); @@ -88,19 +95,17 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, const Dtype* prob_data = prob_.cpu_data(); caffe_copy(prob_.count(), prob_data, bottom_diff); const Dtype* label = bottom[1]->cpu_data(); - int num = prob_.num(); - int dim = prob_.count() / num; - int spatial_dim = prob_.height() * prob_.width(); + int dim = prob_.count() / outer_num_; int count = 0; - for (int i = 0; i < num; ++i) { - for (int j = 0; j < spatial_dim; ++j) { - const int label_value = static_cast(label[i * spatial_dim + j]); + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = static_cast(label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { - for (int c = 0; c < bottom[0]->channels(); ++c) { - bottom_diff[i * dim + c * spatial_dim + j] = 0; + for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { + bottom_diff[i * dim + c * inner_num_ + j] = 0; } } else { - bottom_diff[i * dim + label_value * spatial_dim + j] -= 1; + bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; ++count; } } @@ -110,7 +115,7 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, if (normalize_) { caffe_scal(prob_.count(), loss_weight / count, bottom_diff); } else { - caffe_scal(prob_.count(), loss_weight / num, bottom_diff); + caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } } } diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index e58fafb78af..7deda02107d 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -18,8 +18,7 @@ void SplitLayer::Reshape(const vector*>& bottom, // some strange effects in practice...) CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " "allow in-place computation."; - top[i]->Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(),this->device_context_); + top[i]->ReshapeLike(*bottom[0],this->device_context_); CHECK_EQ(count_, top[i]->count()); } } diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index 62192792401..f5106d30a46 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -25,14 +25,14 @@ namespace caffe { -template +template WindowDataLayer::~WindowDataLayer() { this->JoinPrefetchThread(); } -template +template void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // LayerSetUp runs through the window_file and creates two structures // that hold windows: one for foreground (object) windows and one // for background (non-object) windows. We use an overlap threshold @@ -48,24 +48,24 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, // num_windows // class_index overlap x1 y1 x2 y2 - LOG(INFO)<< "Window data layer:" << std::endl - << " foreground (object) overlap threshold: " - << this->layer_param_.window_data_param().fg_threshold() << std::endl - << " background (non-object) overlap threshold: " - << this->layer_param_.window_data_param().bg_threshold() << std::endl - << " foreground sampling fraction: " - << this->layer_param_.window_data_param().fg_fraction() << std::endl - << " cache_images: " - << this->layer_param_.window_data_param().cache_images() << std::endl - << " root_folder: " - << this->layer_param_.window_data_param().root_folder(); + LOG(INFO) << "Window data layer:" << std::endl + << " foreground (object) overlap threshold: " + << this->layer_param_.window_data_param().fg_threshold() << std::endl + << " background (non-object) overlap threshold: " + << this->layer_param_.window_data_param().bg_threshold() << std::endl + << " foreground sampling fraction: " + << this->layer_param_.window_data_param().fg_fraction() << std::endl + << " cache_images: " + << this->layer_param_.window_data_param().cache_images() << std::endl + << " root_folder: " + << this->layer_param_.window_data_param().root_folder(); cache_images_ = this->layer_param_.window_data_param().cache_images(); string root_folder = this->layer_param_.window_data_param().root_folder(); const bool prefetch_needs_rand = - this->transform_param_.mirror() || - this->transform_param_.crop_size(); + this->transform_param_.mirror() || + this->transform_param_.crop_size(); if (prefetch_needs_rand) { const unsigned int prefetch_rng_seed = caffe_rng_rand(); prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); @@ -75,7 +75,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, std::ifstream infile(this->layer_param_.window_data_param().source().c_str()); CHECK(infile.good()) << "Failed to open window file " - << this->layer_param_.window_data_param().source() << std::endl; + << this->layer_param_.window_data_param().source() << std::endl; map label_hist; label_hist.insert(std::make_pair(0, 0)); @@ -109,9 +109,9 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, int num_windows; infile >> num_windows; const float fg_threshold = - this->layer_param_.window_data_param().fg_threshold(); + this->layer_param_.window_data_param().fg_threshold(); const float bg_threshold = - this->layer_param_.window_data_param().bg_threshold(); + this->layer_param_.window_data_param().bg_threshold(); for (int i = 0; i < num_windows; ++i) { int label, x1, y1, x2, y2; float overlap; @@ -144,61 +144,62 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, if (image_index % 100 == 0) { LOG(INFO) << "num: " << image_index << " " - << image_path << " " - << image_size[0] << " " - << image_size[1] << " " - << image_size[2] << " " - << "windows to process: " << num_windows; + << image_path << " " + << image_size[0] << " " + << image_size[1] << " " + << image_size[2] << " " + << "windows to process: " << num_windows; } - }while (infile >> hashtag >> image_index); + } while (infile >> hashtag >> image_index); LOG(INFO) << "Number of images: " << image_index+1; for (map::iterator it = label_hist.begin(); it != label_hist.end(); ++it) { LOG(INFO) << "class " << it->first << " has " << label_hist[it->first] - << " samples"; + << " samples"; } LOG(INFO) << "Amount of context padding: " - << this->layer_param_.window_data_param().context_pad(); + << this->layer_param_.window_data_param().context_pad(); LOG(INFO) << "Crop mode: " - << this->layer_param_.window_data_param().crop_mode(); + << this->layer_param_.window_data_param().crop_mode(); // image const int crop_size = this->transform_param_.crop_size(); CHECK_GT(crop_size, 0); const int batch_size = this->layer_param_.window_data_param().batch_size(); - top[0]->Reshape(batch_size, channels, crop_size, crop_size,this->device_context_); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size,this->device_context_); + top[0]->Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); LOG(INFO) << "output data size: " << top[0]->num() << "," - << top[0]->channels() << "," << top[0]->height() << "," - << top[0]->width(); + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); // label - top[1]->Reshape(batch_size, 1, 1, 1,this->device_context_); - this->prefetch_label_.Reshape(batch_size, 1, 1, 1,this->device_context_); + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape, this->device_context_); + this->prefetch_label_.Reshape(label_shape, this->device_context_); // data mean has_mean_file_ = this->transform_param_.has_mean_file(); has_mean_values_ = this->transform_param_.mean_value_size() > 0; if (has_mean_file_) { const string& mean_file = - this->transform_param_.mean_file(); + this->transform_param_.mean_file(); LOG(INFO) << "Loading mean file from: " << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); - data_mean_.FromProto(blob_proto,this->device_context_); + data_mean_.FromProto(blob_proto, this->device_context_); } if (has_mean_values_) { CHECK(has_mean_file_ == false) << - "Cannot specify mean_file and mean_value at the same time"; + "Cannot specify mean_file and mean_value at the same time"; for (int c = 0; c < this->transform_param_.mean_value_size(); ++c) { mean_values_.push_back(this->transform_param_.mean_value(c)); } CHECK(mean_values_.size() == 1 || mean_values_.size() == channels) << - "Specify either 1 mean_value or as many as channels: " << channels; + "Specify either 1 mean_value or as many as channels: " << channels; if (channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < channels; ++c) { @@ -208,16 +209,16 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, } } -template +template unsigned int WindowDataLayer::PrefetchRand() { CHECK(prefetch_rng_); - caffe::rng_t* prefetch_rng = static_cast(prefetch_rng_ - ->generator()); + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); return (*prefetch_rng)(); } // Thread fetching the data -template +template void WindowDataLayer::InternalThreadEntry() { // At each iteration, sample N windows where N*p are foreground (object) // windows and N*(1-p) are background (non-object) windows @@ -264,10 +265,9 @@ void WindowDataLayer::InternalThreadEntry() { // sample a window timer.Start(); const unsigned int rand_index = PrefetchRand(); - vector window = - (is_fg) ? - fg_windows_[rand_index % fg_windows_.size()] : - bg_windows_[rand_index % bg_windows_.size()]; + vector window = (is_fg) ? + fg_windows_[rand_index % fg_windows_.size()] : + bg_windows_[rand_index % bg_windows_.size()]; bool do_mirror = mirror && PrefetchRand() % 2; @@ -278,12 +278,12 @@ void WindowDataLayer::InternalThreadEntry() { cv::Mat cv_img; if (this->cache_images_) { pair image_cached = - image_database_cache_[window[WindowDataLayer::IMAGE_INDEX]]; + image_database_cache_[window[WindowDataLayer::IMAGE_INDEX]]; cv_img = DecodeDatumToCVMat(image_cached.second, true); } else { cv_img = cv::imread(image.first, CV_LOAD_IMAGE_COLOR); if (!cv_img.data) { - LOG(ERROR)<< "Could not open or find file " << image.first; + LOG(ERROR) << "Could not open or find file " << image.first; return; } } @@ -303,12 +303,12 @@ void WindowDataLayer::InternalThreadEntry() { // scale factor by which to expand the original region // such that after warping the expanded region to crop_size x crop_size // there's exactly context_pad amount of padding on each side - Dtype context_scale = static_cast(crop_size) - / static_cast(crop_size - 2 * context_pad); + Dtype context_scale = static_cast(crop_size) / + static_cast(crop_size - 2*context_pad); // compute the expanded region - Dtype half_height = static_cast(y2 - y1 + 1) / 2.0; - Dtype half_width = static_cast(x2 - x1 + 1) / 2.0; + Dtype half_height = static_cast(y2-y1+1)/2.0; + Dtype half_width = static_cast(x2-x1+1)/2.0; Dtype center_x = static_cast(x1) + half_width; Dtype center_y = static_cast(y1) + half_height; if (use_square) { @@ -318,16 +318,16 @@ void WindowDataLayer::InternalThreadEntry() { half_height = half_width; } } - x1 = static_cast(round(center_x - half_width * context_scale)); - x2 = static_cast(round(center_x + half_width * context_scale)); - y1 = static_cast(round(center_y - half_height * context_scale)); - y2 = static_cast(round(center_y + half_height * context_scale)); + x1 = static_cast(round(center_x - half_width*context_scale)); + x2 = static_cast(round(center_x + half_width*context_scale)); + y1 = static_cast(round(center_y - half_height*context_scale)); + y2 = static_cast(round(center_y + half_height*context_scale)); // the expanded region may go outside of the image // so we compute the clipped (expanded) region and keep track of // the extent beyond the image - int unclipped_height = y2 - y1 + 1; - int unclipped_width = x2 - x1 + 1; + int unclipped_height = y2-y1+1; + int unclipped_width = x2-x1+1; int pad_x1 = std::max(0, -x1); int pad_y1 = std::max(0, -y1); int pad_x2 = std::max(0, x2 - cv_img.cols + 1); @@ -342,25 +342,25 @@ void WindowDataLayer::InternalThreadEntry() { CHECK_LT(x2, cv_img.cols); CHECK_LT(y2, cv_img.rows); - int clipped_height = y2 - y1 + 1; - int clipped_width = x2 - x1 + 1; + int clipped_height = y2-y1+1; + int clipped_width = x2-x1+1; // scale factors that would be used to warp the unclipped // expanded region - Dtype scale_x = static_cast(crop_size) - / static_cast(unclipped_width); - Dtype scale_y = static_cast(crop_size) - / static_cast(unclipped_height); + Dtype scale_x = + static_cast(crop_size)/static_cast(unclipped_width); + Dtype scale_y = + static_cast(crop_size)/static_cast(unclipped_height); // size to warp the clipped expanded region to - cv_crop_size.width = static_cast(round( - static_cast(clipped_width) * scale_x)); - cv_crop_size.height = static_cast(round( - static_cast(clipped_height) * scale_y)); - pad_x1 = static_cast(round(static_cast(pad_x1) * scale_x)); - pad_x2 = static_cast(round(static_cast(pad_x2) * scale_x)); - pad_y1 = static_cast(round(static_cast(pad_y1) * scale_y)); - pad_y2 = static_cast(round(static_cast(pad_y2) * scale_y)); + cv_crop_size.width = + static_cast(round(static_cast(clipped_width)*scale_x)); + cv_crop_size.height = + static_cast(round(static_cast(clipped_height)*scale_y)); + pad_x1 = static_cast(round(static_cast(pad_x1)*scale_x)); + pad_x2 = static_cast(round(static_cast(pad_x2)*scale_x)); + pad_y1 = static_cast(round(static_cast(pad_y1)*scale_y)); + pad_y2 = static_cast(round(static_cast(pad_y2)*scale_y)); pad_h = pad_y1; // if we're mirroring, we mirror the padding too (to be pedantic) @@ -380,10 +380,10 @@ void WindowDataLayer::InternalThreadEntry() { } } - cv::Rect roi(x1, y1, x2 - x1 + 1, y2 - y1 + 1); + cv::Rect roi(x1, y1, x2-x1+1, y2-y1+1); cv::Mat cv_cropped_img = cv_img(roi); - cv::resize(cv_cropped_img, cv_cropped_img, cv_crop_size, 0, 0, - cv::INTER_LINEAR); + cv::resize(cv_cropped_img, cv_cropped_img, + cv_crop_size, 0, 0, cv::INTER_LINEAR); // horizontal flip at random if (do_mirror) { @@ -397,12 +397,12 @@ void WindowDataLayer::InternalThreadEntry() { for (int w = 0; w < cv_cropped_img.cols; ++w) { for (int c = 0; c < channels; ++c) { int top_index = ((item_id * channels + c) * crop_size + h + pad_h) - * crop_size + w + pad_w; + * crop_size + w + pad_w; // int top_index = (c * height + h) * width + w; Dtype pixel = static_cast(ptr[img_index++]); if (this->has_mean_file_) { int mean_index = (c * mean_height + h + mean_off + pad_h) - * mean_width + w + mean_off + pad_w; + * mean_width + w + mean_off + pad_w; top_data[top_index] = (pixel - mean[mean_index]) * scale; } else { if (this->has_mean_values_) { @@ -418,46 +418,46 @@ void WindowDataLayer::InternalThreadEntry() { // get window label top_label[item_id] = window[WindowDataLayer::LABEL]; -#if 0 + #if 0 // useful debugging code for dumping transformed windows to disk string file_id; std::stringstream ss; ss << PrefetchRand(); ss >> file_id; std::ofstream inf((string("dump/") + file_id + - string("_info.txt")).c_str(), std::ofstream::out); + string("_info.txt")).c_str(), std::ofstream::out); inf << image.first << std::endl - << window[WindowDataLayer::X1]+1 << std::endl - << window[WindowDataLayer::Y1]+1 << std::endl - << window[WindowDataLayer::X2]+1 << std::endl - << window[WindowDataLayer::Y2]+1 << std::endl - << do_mirror << std::endl - << top_label[item_id] << std::endl - << is_fg << std::endl; + << window[WindowDataLayer::X1]+1 << std::endl + << window[WindowDataLayer::Y1]+1 << std::endl + << window[WindowDataLayer::X2]+1 << std::endl + << window[WindowDataLayer::Y2]+1 << std::endl + << do_mirror << std::endl + << top_label[item_id] << std::endl + << is_fg << std::endl; inf.close(); std::ofstream top_data_file((string("dump/") + file_id + - string("_data.txt")).c_str(), + string("_data.txt")).c_str(), std::ofstream::out | std::ofstream::binary); for (int c = 0; c < channels; ++c) { for (int h = 0; h < crop_size; ++h) { for (int w = 0; w < crop_size; ++w) { top_data_file.write(reinterpret_cast( - &top_data[((item_id * channels + c) * crop_size + h) - * crop_size + w]), + &top_data[((item_id * channels + c) * crop_size + h) + * crop_size + w]), sizeof(Dtype)); } } } top_data_file.close(); -#endif + #endif item_id++; } } batch_timer.Stop(); - DLOG(INFO)<< "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; - DLOG(INFO)<< " Read time: " << read_time / 1000 << " ms."; - DLOG(INFO)<< "Transform time: " << trans_time / 1000 << " ms."; + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; } INSTANTIATE_CLASS(WindowDataLayer); From 13a6b7c33df15e489eff2bc0c1452437b07cf9ae Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 26 Apr 2015 23:58:08 +0200 Subject: [PATCH 017/254] Almost working again. --- include/caffe/common.hpp | 1 + include/caffe/vision_layers.hpp | 58 +---- src/caffe/layers/accuracy_layer.cpp | 2 +- src/caffe/layers/argmax_layer.cpp | 4 +- src/caffe/layers/contrastive_loss_layer.cpp | 8 +- src/caffe/layers/datarandtransform_layer.cpp | 235 ------------------- src/caffe/layers/euclidean_loss_layer.cpp | 2 +- src/caffe/proto/caffe.proto | 45 ---- 8 files changed, 13 insertions(+), 342 deletions(-) delete mode 100644 src/caffe/layers/datarandtransform_layer.cpp diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index af29060c054..f7d8529ea32 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -5,6 +5,7 @@ #include #include +#include #include #include // NOLINT(readability/streams) #include // NOLINT(readability/streams) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index aed1d6fdd16..caeda209509 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -16,58 +16,6 @@ namespace caffe { -template -class DataRandTransformLayer : public Layer { - public: - explicit DataRandTransformLayer(const LayerParameter& param) - : Layer(param) { - } - - virtual inline const char* type() const { - return "DataRandTransform"; - } - - virtual ~DataRandTransformLayer(); - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - - int NUM_; - int CHANNELS_; - int WIDTH_; - int HEIGHT_; - - bool apply_normalization_; - - bool apply_mirroring_; - float prob_mirroring_; - - bool apply_rot_; - float rot_min_; - float rot_max_; - - bool apply_blur_; - int blur_size_; - float blur_max_var_; - - bool apply_contrast_brightness_; - float alpha_; - float beta_; -}; - /** * @brief Abstract base class that factors out the BLAS code common to * ConvolutionLayer and DeconvolutionLayer. @@ -369,11 +317,13 @@ class CuDNNConvolutionLayer : public ConvolutionLayer { bool handles_setup_; cudnnHandle_t* handle_; cudaStream_t* stream_; - vector bottom_descs_, top_descs_; - cudnnTensor4dDescriptor_t bias_desc_; + vector bottom_descs_, top_descs_; + cudnnTensorDescriptor_t bias_desc_; cudnnFilterDescriptor_t filter_desc_; vector conv_descs_; int bottom_offset_, top_offset_, weight_offset_, bias_offset_; + size_t workspaceSizeInBytes; + void *workspace; }; #endif diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index 90aad675ed3..49093830739 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -37,7 +37,7 @@ void AccuracyLayer::Reshape( << "label count (number of labels) must be N*H*W, " << "with integer values in {0, 1, ..., C-1}."; vector top_shape(0); // Accuracy is a scalar; 0 axes. - top[0]->Reshape(top_shape); + top[0]->Reshape(top_shape,this->device_context_); } template diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index c4040cdcaaa..8a5ace322e6 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -23,10 +23,10 @@ void ArgMaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { if (out_max_val_) { // Produces max_ind and max_val - top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); + top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1,this->device_context_); } else { // Produces only max_ind - top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); + top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1,this->device_context_); } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 0692c11c257..815a33799a8 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -20,11 +20,11 @@ void ContrastiveLossLayer::LayerSetUp( CHECK_EQ(bottom[2]->channels(), 1); CHECK_EQ(bottom[2]->height(), 1); CHECK_EQ(bottom[2]->width(), 1); - diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1,this->device_context_); + diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1,this->device_context_); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1,this->device_context_); // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1,this->device_context_); for (int i = 0; i < bottom[0]->channels(); ++i) summer_vec_.mutable_cpu_data()[i] = Dtype(1); } diff --git a/src/caffe/layers/datarandtransform_layer.cpp b/src/caffe/layers/datarandtransform_layer.cpp deleted file mode 100644 index 03bb543dc79..00000000000 --- a/src/caffe/layers/datarandtransform_layer.cpp +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2014 Julien Martel - -#include "caffe/layer.hpp" -#include "caffe/util/io.hpp" -#include "caffe/vision_layers.hpp" - -#include -#include - -namespace caffe { - -template -DataRandTransformLayer::~DataRandTransformLayer() { -} - -template -void DataRandTransformLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - CHECK_EQ(bottom.size(), 1)<< "Data Rand Transform Layer takes a single blob as input."; - CHECK_EQ(top.size(), 1) << "Data Rand Transform Layer takes a single blob as output."; - - cv::namedWindow("Test",0); - - // Bottom[0] cause there is only one input blob - NUM_ = bottom[0]->num(); - CHANNELS_ = bottom[0]->channels(); - HEIGHT_ = bottom[0]->height(); - WIDTH_ = bottom[0]->width(); - - // Announce for the top blob layer - top[0]->Reshape(NUM_, - CHANNELS_, - HEIGHT_, - WIDTH_, this->device_context_ - ); - - // Read the layer parameters - apply_normalization_ = this->layer_param_.apply_normalization(); - - apply_mirroring_ = this->layer_param_.apply_mirroring(); - prob_mirroring_ = this->layer_param_.prob_mirroring(); - - apply_rot_ = this->layer_param_.apply_rot(); - rot_min_ = this->layer_param_.rot_min(); - rot_max_ = this->layer_param_.rot_max(); - - apply_blur_ = this->layer_param_.apply_blur(); - blur_size_ = this->layer_param_.blur_size(); - blur_max_var_ = this->layer_param_.blur_max_var(); - - apply_contrast_brightness_ = this->layer_param_.apply_contrast_brightness(); - alpha_ = this->layer_param_.alpha_c(); - beta_ = this->layer_param_.beta_c(); - - /* - LOG(ERROR) << "\nRotation: " << apply_rot_ << ", min: " << rot_min_ << ", max: " << rot_max_ - << "\nBlur: " << apply_blur_ << ", size: " << blur_size_ << ", var: " << blur_max_var_ - << "\nContrast/Brightness: " << apply_contrast_brightness_ << ", alpha: " << alpha_ << ", beta: " << beta_; - */ - return; -} - -template -void DataRandTransformLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - //TODO?? -} - -template -void DataRandTransformLayer::Forward_cpu( - const vector*>& bottom, const vector*>& top) { - // Transform the blob data in an opencv image - cv::Mat img(bottom[0]->height(), bottom[0]->width(), CV_32FC3); - cv::Mat imgTransformed(img.rows, img.cols, CV_32FC3); - - // Normalization - float val; - std::vector mean(CHANNELS_); - std::vector std(CHANNELS_); - - // Center of rotation - cv::Point2f center = cv::Point2f(img.cols / 2, img.rows / 2); - - // Mirroring - cv::Mat map_x(HEIGHT_, WIDTH_, CV_32FC1); - cv::Mat map_y(HEIGHT_, WIDTH_, CV_32FC1); - for (int j = 0; j < HEIGHT_; j++) { - for (int i = 0; i < WIDTH_; i++) { - map_x.at(j, i) = WIDTH_ - i; - map_y.at(j, i) = HEIGHT_ - j; - } - } - - for (int n = 0; n < NUM_; n++) { - // Reinit for normalization - for (int c = 0; c < CHANNELS_; c++) { - mean[c] = 0; - std[c] = 0; - } - - // Transform the data into an opencv structure to apply transformations - for (int c = 0; c < CHANNELS_; c++) { - for (int h = 0; h < HEIGHT_; h++) { - for (int w = 0; w < WIDTH_; w++) { - val = (bottom[0]->data_at(n, c, h, w)); - - img.at(h, w)[c] = val; - mean[c] += val; - } - } - } - for (int c = 0; c < CHANNELS_; c++) { - mean[c] = mean[c] / (img.rows * img.cols); - //LOG(ERROR) << "Mean" << c << "="<< mean[c]; - } - - // Normalize patch-wise - if (apply_normalization_) { - for (int h = 0; h < HEIGHT_; h++) { - for (int w = 0; w < WIDTH_; w++) { - for (int c = 0; c < CHANNELS_; c++) { - val = img.at(h, w)[c]; - - std[c] += (mean[c] - val) * (mean[c] - val); - } - } - } - for (int c = 0; c < CHANNELS_; c++) { - std[c] = sqrtf(std[c] / (img.rows * img.cols)); - //LOG(ERROR) << "Std" << c << "="<< std[c]; - } - - for (int h = 0; h < HEIGHT_; h++) { - for (int w = 0; w < WIDTH_; w++) { - for (int c = 0; c < CHANNELS_; c++) { - img.at(h, w)[c] = (img.at(h, w)[c] - mean[c]) - / std[c]; - } - } - } - } - - // Double mirroring - if (apply_mirroring_) { - cv::Scalar color; - if (apply_normalization_) - color = cv::Scalar(0, 0, 0); - else - color = cv::Scalar(0.5, 0.5, 0.5); - - if (float(rand()) / RAND_MAX < prob_mirroring_) { - cv::remap(img, imgTransformed, map_x, map_y, CV_INTER_LINEAR, - cv::BORDER_CONSTANT, color); - imgTransformed.copyTo(img); - } - } - - // Rotate image - if (apply_rot_) { - float angle = rot_min_ + (rot_max_ - rot_min_) * float(rand()) / RAND_MAX; // [-rot_min ; rot_max] - cv::Mat rot_mat = cv::getRotationMatrix2D(center, angle, 1.0); - - cv::Scalar color; - if (apply_normalization_) - color = cv::Scalar(0, 0, 0); - else - color = cv::Scalar(0.5, 0.5, 0.5); - - cv::warpAffine(img, imgTransformed, rot_mat, img.size(), - cv::WARP_INVERSE_MAP, cv::BORDER_CONSTANT, color); - imgTransformed.copyTo(img); - } - - // Blur image - if (apply_blur_) { - float s = blur_max_var_ * float(rand()) / RAND_MAX; // [0.0 ; max_var] - cv::GaussianBlur(img, img, cv::Size(blur_size_, blur_size_), s); - } - - // Contrast enhancement - if (apply_contrast_brightness_) { - float alpha = (1.0 - alpha_) + (2 * alpha_) * float(rand()) / RAND_MAX; //[1.0-alpha ; 1.0+alpha] - float beta = 2 * beta_ * float(rand()) / RAND_MAX - beta_; //[-beta ; +beta] - for (int y = 0; y < HEIGHT_; y++) { - for (int x = 0; x < WIDTH_; x++) { - for (int c = 0; c < CHANNELS_; c++) { - img.at(y, x)[c] = alpha * (img.at(y, x)[c]) - + beta; - } - } - } - } - - // === DEBUG - //cv::imshow("Test",0.5+0.5*img); - //cv::waitKey(1); - - //Fill back to the blob - Dtype* data = top[0]->mutable_cpu_data(); - for (int c = 0; c < CHANNELS_; c++) { - for (int h = 0; h < HEIGHT_; h++) { - for (int w = 0; w < WIDTH_; w++) { - *(data + top[0]->offset(n, c, h, w)) = img.at(h, w)[c]; - } - } - } - } -} - -template -void DataRandTransformLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { - // No GPU implementation for now, just apply the CPU transformations - Forward_cpu(bottom, top); -} - -// The backward operations are dummy - they do not carry any computation. -template -void DataRandTransformLayer::Backward_cpu( - const vector*>& top, const vector& propagate_down, - const vector*>& bottom) { - NOT_IMPLEMENTED; -} - -template -void DataRandTransformLayer::Backward_gpu( - const vector*>& top, const vector& propagate_down, - const vector*>& bottom) { - NOT_IMPLEMENTED; -} - -INSTANTIATE_CLASS(DataRandTransformLayer); -REGISTER_LAYER_CLASS(DataRandTransform); - -} // namespace caffe diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 80efa31b22c..9fb445386a2 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -13,7 +13,7 @@ void EuclideanLossLayer::Reshape( LossLayer::Reshape(bottom, top); CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) << "Inputs must have the same dimension."; - diff_.ReshapeLike(*bottom[0]); + diff_.ReshapeLike(*bottom[0],this->device_context_,this->device_context_); } template diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index e1c51f72591..1b3b3207307 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -289,32 +289,10 @@ message LayerParameter { repeated NetStateRule include = 8; repeated NetStateRule exclude = 9; - // The optional random transformation variables for the data transformation layer - // Note: parameters 70 to 90 inserted by Fabian Tschopp for datarandtransform_layer.cpp - optional bool apply_normalization = 70 [ default = false ]; - - optional bool apply_mirroring = 80 [ default = false ]; - optional float prob_mirroring = 81 [ default = 0.5 ]; - - optional bool apply_rot = 71 [ default = false ]; - optional float rot_min = 72 [ default = -10.0 ]; - optional float rot_max = 73 [ default = 10.0 ]; - - optional bool apply_blur = 74 [ default = false ]; - optional uint32 blur_size = 75 [ default = 7 ]; - optional float blur_max_var = 76 [ default = 1.5 ]; - - optional bool apply_contrast_brightness = 77 [ default = false ]; - optional float alpha_c = 78 [ default = 1.3 ]; - optional float beta_c = 79 [ default = 0.1 ]; - - optional float temp = 90 [default = 1]; - // Parameters for Greentea optional int32 device = 95 [default = -1]; // Parameters for Splitnet optional int32 buffer = 96 [default = -1]; - // Parameters for data pre-processing. optional TransformationParameter transform_param = 100; @@ -849,7 +827,6 @@ message V1LayerParameter { TANH = 23; WINDOW_DATA = 24; THRESHOLD = 31; - DATA_RAND_TRANSFORM = 70; CONVOLUTION_SK = 71; POOLING_SK = 72; } @@ -987,28 +964,6 @@ message V0LayerParameter { // the other dimensions must be the same for all the bottom blobs. // By default it will concatenate blobs along the channels dimension. optional uint32 concat_dim = 65 [default = 1]; - - // The optional random transformation variables for the data transformation layer - // Note: parameters 70 to 90 inserted by Fabian Tschopp for datarandtransform_layer.cpp - optional bool apply_normalization = 70 [ default = false ]; - - optional bool apply_mirroring = 80 [ default = false ]; - optional float prob_mirroring = 81 [ default = 0.5 ]; - - optional bool apply_rot = 71 [ default = false ]; - optional float rot_min = 72 [ default = -10.0 ]; - optional float rot_max = 73 [ default = 10.0 ]; - - optional bool apply_blur = 74 [ default = false ]; - optional uint32 blur_size = 75 [ default = 7 ]; - optional float blur_max_var = 76 [ default = 1.5 ]; - - optional bool apply_contrast_brightness = 77 [ default = false ]; - optional float alpha_c = 78 [ default = 1.3 ]; - optional float beta_c = 79 [ default = 0.1 ]; - - optional float temp = 90 [default = 1]; - optional HDF5OutputParameter hdf5_output_param = 1001; } From e94917b313358092ff55dfae2dda8e886cac3877 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 27 Apr 2015 00:32:58 +0200 Subject: [PATCH 018/254] Compiles again. --- src/caffe/blob.cpp | 4 +--- src/caffe/layer_factory.cpp | 9 ++------- src/caffe/layers/base_data_layer.cpp | 4 ++-- src/caffe/layers/base_data_layer.cu | 2 +- src/caffe/layers/dropout_layer.cpp | 2 +- src/caffe/layers/dummy_data_layer.cpp | 4 ++-- src/caffe/layers/eltwise_layer.cpp | 4 ++-- src/caffe/layers/euclidean_loss_layer.cpp | 2 +- src/caffe/layers/flatten_layer.cu | 23 ----------------------- src/caffe/layers/hdf5_data_layer.cpp | 2 +- src/caffe/layers/hdf5_output_layer.cu | 4 ++-- src/caffe/layers/image_data_layer.cpp | 20 ++++++++++---------- src/caffe/layers/infogain_loss_layer.cpp | 2 +- src/caffe/layers/loss_layer.cpp | 2 +- src/caffe/layers/lrn_layer.cpp | 10 +++++----- src/caffe/layers/mvn_layer.cpp | 10 +++++----- src/caffe/util/upgrade_proto.cpp | 4 ---- 17 files changed, 37 insertions(+), 71 deletions(-) delete mode 100644 src/caffe/layers/flatten_layer.cu diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index d033f80ba85..27ac2627a4e 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -1,7 +1,6 @@ #include #include - #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" @@ -60,8 +59,7 @@ void Blob::Reshape(const BlobShape& shape, template void Blob::ReshapeLike(const Blob& other, DeviceContext device_context) { - Reshape(other.num(), other.channels(), other.height(), other.width(), - device_context); + Reshape(other.shape(), device_context); } template diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index eccbfb2caa4..afe9774f0c2 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -11,19 +11,14 @@ namespace caffe { -template -shared_ptr > GetDataRandTransformLayer(const LayerParameter& param) { - return shared_ptr >(new DataRandTransformLayer(param)); -} - template shared_ptr > GetConvolutionSKLayer(const LayerParameter& param) { - return shared_ptr >(new ConvolutionSKLayer(param)); + return shared_ptr>(new ConvolutionSKLayer(param)); } template shared_ptr > GetPoolingSKLayer(const LayerParameter& param) { - return shared_ptr >(new PoolingSKLayer(param)); + return shared_ptr>(new PoolingSKLayer(param)); } // Get convolution layer according to engine. diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 352200915d7..e9f18097923 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -24,7 +24,7 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, // The subclasses should setup the size of bottom and top DataLayerSetUp(bottom, top); data_transformer_.reset( - new DataTransformer(transform_param_, this->phase_)); + new DataTransformer(transform_param_, this->phase_, this->device_context_)); data_transformer_->InitRand(); } @@ -64,7 +64,7 @@ void BasePrefetchingDataLayer::Forward_cpu( DLOG(INFO) << "Thread joined"; // Reshape to loaded data. top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + this->prefetch_data_.height(), this->prefetch_data_.width(), this->device_context_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 775f6c47f7e..5cc956c147d 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -11,7 +11,7 @@ void BasePrefetchingDataLayer::Forward_gpu( JoinPrefetchThread(); // Reshape to loaded data. top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + this->prefetch_data_.height(), this->prefetch_data_.width(), this->device_context_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index ec1256fd2fa..02157b6cd74 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -27,7 +27,7 @@ void DropoutLayer::Reshape(const vector*>& bottom, NeuronLayer::Reshape(bottom, top); // Set up the cache for random number generation rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); + bottom[0]->height(), bottom[0]->width(), this->device_context_); } template diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp index 6b0d617464c..8358a31808d 100644 --- a/src/caffe/layers/dummy_data_layer.cpp +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -83,10 +83,10 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, (param.height_size() == 1) ? param.height(0) : param.height(i); const int width = (param.width_size() == 1) ? param.width(0) : param.width(i); - top[i]->Reshape(num, channels, height, width); + top[i]->Reshape(num, channels, height, width, this->device_context_); } else { const int shape_index = (param.shape_size() == 1) ? 0 : i; - top[i]->Reshape(param.shape(shape_index)); + top[i]->Reshape(param.shape(shape_index), this->device_context_); } } // Run Forward once, with refill_ inverted, to fill the constant Blobs. diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index a80700736bd..9d035e9b6e3 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -34,11 +34,11 @@ void EltwiseLayer::Reshape(const vector*>& bottom, for (int i = 1; i < bottom.size(); ++i) { CHECK(bottom[i]->shape() == bottom[0]->shape()); } - top[0]->ReshapeLike(*bottom[0]); + top[0]->ReshapeLike(*bottom[0], this->device_context_); // If max operation, we will initialize the vector index part. if (this->layer_param_.eltwise_param().operation() == EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { - max_idx_.Reshape(bottom[0]->shape()); + max_idx_.Reshape(bottom[0]->shape(), this->device_context_); } } diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 9fb445386a2..37aa2b45e9a 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -13,7 +13,7 @@ void EuclideanLossLayer::Reshape( LossLayer::Reshape(bottom, top); CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) << "Inputs must have the same dimension."; - diff_.ReshapeLike(*bottom[0],this->device_context_,this->device_context_); + diff_.ReshapeLike(*bottom[0], this->device_context_); } template diff --git a/src/caffe/layers/flatten_layer.cu b/src/caffe/layers/flatten_layer.cu deleted file mode 100644 index 42abdad4499..00000000000 --- a/src/caffe/layers/flatten_layer.cu +++ /dev/null @@ -1,23 +0,0 @@ -#include - -#include "caffe/layer.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -void FlattenLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - top[0]->ShareData(*bottom[0]); -} - -template -void FlattenLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - bottom[0]->ShareDiff(*top[0]); -} - -INSTANTIATE_LAYER_GPU_FUNCS(FlattenLayer); - -} // namespace caffe diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 8a782f7e524..e75a051bd43 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -121,7 +121,7 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, for (int j = 1; j < top_shape.size(); ++j) { top_shape[j] = hdf_blobs_[i]->shape(j); } - top[i]->Reshape(top_shape); + top[i]->Reshape(top_shape, this->device_context_); } } diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index ae497c34fc2..327387a9eb6 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -17,9 +17,9 @@ void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, CHECK_GE(bottom.size(), 2); CHECK_EQ(bottom[0]->num(), bottom[1]->num()); data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); + bottom[0]->height(), bottom[0]->width(), this->device_context_); label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), - bottom[1]->height(), bottom[1]->width()); + bottom[1]->height(), bottom[1]->width(), this->device_context_); const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 38ebbd5ec14..95829a49e55 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -69,21 +69,21 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const int crop_size = this->layer_param_.transform_param().crop_size(); const int batch_size = this->layer_param_.image_data_param().batch_size(); if (crop_size > 0) { - top[0]->Reshape(batch_size, channels, crop_size, crop_size); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); - this->transformed_data_.Reshape(1, channels, crop_size, crop_size); + top[0]->Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); + this->transformed_data_.Reshape(1, channels, crop_size, crop_size, this->device_context_); } else { - top[0]->Reshape(batch_size, channels, height, width); - this->prefetch_data_.Reshape(batch_size, channels, height, width); - this->transformed_data_.Reshape(1, channels, height, width); + top[0]->Reshape(batch_size, channels, height, width, this->device_context_); + this->prefetch_data_.Reshape(batch_size, channels, height, width, this->device_context_); + this->transformed_data_.Reshape(1, channels, height, width, this->device_context_); } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label vector label_shape(1, batch_size); - top[1]->Reshape(label_shape); - this->prefetch_label_.Reshape(label_shape); + top[1]->Reshape(label_shape, this->device_context_); + this->prefetch_label_.Reshape(label_shape, this->device_context_); } template @@ -116,9 +116,9 @@ void ImageDataLayer::InternalThreadEntry() { cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, 0, 0, is_color); this->prefetch_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); + cv_img.rows, cv_img.cols, this->device_context_); this->transformed_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); + cv_img.rows, cv_img.cols, this->device_context_); } Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index a1e0b40de0e..2540443d05b 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -20,7 +20,7 @@ void InfogainLossLayer::LayerSetUp( BlobProto blob_proto; ReadProtoFromBinaryFile( this->layer_param_.infogain_loss_param().source(), &blob_proto); - infogain_.FromProto(blob_proto); + infogain_.FromProto(blob_proto, this->device_context_); } } diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index 3496a5c2a8a..5086dbaca09 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -25,7 +25,7 @@ void LossLayer::Reshape( CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; vector loss_shape(0); // Loss layers output a scalar; 0 axes. - top[0]->Reshape(loss_shape); + top[0]->Reshape(loss_shape, this->device_context_); } INSTANTIATE_CLASS(LossLayer); diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 36c1ace4c99..4bb2fb08c01 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -77,8 +77,8 @@ void LRNLayer::Reshape(const vector*>& bottom, width_ = bottom[0]->width(); switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: - top[0]->Reshape(num_, channels_, height_, width_); - scale_.Reshape(num_, channels_, height_, width_); + top[0]->Reshape(num_, channels_, height_, width_, this->device_context_); + scale_.Reshape(num_, channels_, height_, width_, this->device_context_); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: split_layer_->Reshape(bottom, split_top_vec_); @@ -115,7 +115,7 @@ void LRNLayer::CrossChannelForward_cpu( for (int i = 0; i < scale_.count(); ++i) { scale_data[i] = k_; } - Blob padded_square(1, channels_ + size_ - 1, height_, width_); + Blob padded_square(1, channels_ + size_ - 1, height_, width_, this->device_context_); Dtype* padded_square_data = padded_square.mutable_cpu_data(); caffe_set(padded_square.count(), Dtype(0), padded_square_data); Dtype alpha_over_size = alpha_ / size_; @@ -186,8 +186,8 @@ void LRNLayer::CrossChannelBackward_cpu( const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* scale_data = scale_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - Blob padded_ratio(1, channels_ + size_ - 1, height_, width_); - Blob accum_ratio(1, 1, height_, width_); + Blob padded_ratio(1, channels_ + size_ - 1, height_, width_, this->device_context_); + Blob accum_ratio(1, 1, height_, width_, this->device_context_); Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); // We hack a little bit by using the diff() to store an additional result diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index b74d7b4f300..b64f2b77a02 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -11,15 +11,15 @@ template void MVNLayer::Reshape(const vector*>& bottom, const vector*>& top) { top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); + bottom[0]->height(), bottom[0]->width(), this->device_context_); mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), - 1, 1); + 1, 1, this->device_context_); variance_.Reshape(bottom[0]->num(), bottom[0]->channels(), - 1, 1); + 1, 1, this->device_context_); temp_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); + bottom[0]->height(), bottom[0]->width(), this->device_context_); sum_multiplier_.Reshape(1, 1, - bottom[0]->height(), bottom[0]->width()); + bottom[0]->height(), bottom[0]->width(), this->device_context_); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); } diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 9f1aa250b27..f0f9854ebfa 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -512,8 +512,6 @@ V1LayerParameter_LayerType UpgradeV0LayerType(const string& type) { return V1LayerParameter_LayerType_TANH; } else if (type == "window_data") { return V1LayerParameter_LayerType_WINDOW_DATA; - } else if (type == "data_rand_transform") { - return V1LayerParameter_LayerType_DATA_RAND_TRANSFORM; } else { LOG(FATAL)<< "Unknown layer name: " << type; return V1LayerParameter_LayerType_NONE; @@ -938,8 +936,6 @@ const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { return "WindowData"; case V1LayerParameter_LayerType_THRESHOLD: return "Threshold"; - case V1LayerParameter_LayerType_DATA_RAND_TRANSFORM: - return "DataRandTransform"; case V1LayerParameter_LayerType_CONVOLUTION_SK: return "ConvolutionSK"; case V1LayerParameter_LayerType_POOLING_SK: From b9bc9e026af61e99b3bf4344e6faeec28d3d8e7c Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 27 Apr 2015 01:15:14 +0200 Subject: [PATCH 019/254] Corrected net.cpp to incorporate the newest Caffe changes and GreenTea. --- src/caffe/net.cpp | 54 ++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index cca66b21534..e0e1204c694 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -48,7 +48,15 @@ void Net::Init(const NetParameter& in_param) { name_ = param.name(); map blob_name_to_idx; set available_blobs; - CHECK_EQ(param.input_size() * 4, param.input_dim_size())<< "Incorrect input blob dimension specifications."; + CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0) + << "Must specify either input_shape OR deprecated input_dim, not both."; + if (param.input_dim_size() > 0) { + // Deprecated 4D dimensions. + CHECK_EQ(param.input_size() * 4, param.input_dim_size())<< "Incorrect input blob dimension specifications."; + } else { + CHECK_EQ(param.input_size(), param.input_shape_size()) + << "Exactly one input_shape must be specified per input."; + } memory_used_ = 0; // set the input blobs for (int input_id = 0; input_id < param.input_size(); ++input_id) { @@ -56,7 +64,7 @@ void Net::Init(const NetParameter& in_param) { AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); } DLOG(INFO)<< "Memory required for data: " << memory_used_ * sizeof(Dtype); - // For each layer, set up their input and output + // For each layer, set up its input and output bottom_vecs_.resize(param.layer_size()); top_vecs_.resize(param.layer_size()); bottom_id_vecs_.resize(param.layer_size()); @@ -108,11 +116,7 @@ void Net::Init(const NetParameter& in_param) { blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); } blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id); - LOG(INFO)<< "Top shape: " << top_vecs_[layer_id][top_id]->num() << " " - << top_vecs_[layer_id][top_id]->channels() << " " - << top_vecs_[layer_id][top_id]->height() << " " - << top_vecs_[layer_id][top_id]->width() << " (" - << top_vecs_[layer_id][top_id]->count() << ")"; + LOG(INFO)<< "Top shape: " << top_vecs_[layer_id][top_id]->shape_string(); if (layer->loss(top_id)) { LOG(INFO)<< " with loss weight " << layer->loss(top_id); } @@ -347,10 +351,16 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, if (blob_name_to_idx) {(*blob_name_to_idx)[blob_name] = blob_id;} if (layer_id == -1) { // Set the (explicitly specified) dimensions of the input blob. - blob_pointer->Reshape(param.input_dim(top_id * 4), - param.input_dim(top_id * 4 + 1), - param.input_dim(top_id * 4 + 2), - param.input_dim(top_id * 4 + 3), Caffe::GetDeviceContext(layer_param->device())); + if (param.input_dim_size() > 0) { + blob_pointer->Reshape(param.input_dim(top_id * 4), + param.input_dim(top_id * 4 + 1), + param.input_dim(top_id * 4 + 2), + param.input_dim(top_id * 4 + 3), Caffe::GetDeviceContext(layer_param->device())); + } + else + { + blob_pointer->Reshape(param.input_shape(top_id),Caffe::GetDeviceContext(layer_param->device())); + } net_input_blob_indices_.push_back(blob_id); net_input_blobs_.push_back(blob_pointer.get()); } else { @@ -433,14 +443,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, CHECK_EQ(this_blob->count(), owner_blob->count())<< "Shared parameter blobs must have the same count."; } else { // Strict dimension checking -- all dims must be the same. - CHECK_EQ(this_blob->num(), owner_blob->num()) - << "Shared parameter blobs must have the same num."; - CHECK_EQ(this_blob->channels(), owner_blob->channels()) - << "Shared parameter blobs must have the same channels."; - CHECK_EQ(this_blob->height(), owner_blob->height()) - << "Shared parameter blobs must have the same height."; - CHECK_EQ(this_blob->width(), owner_blob->width()) - << "Shared parameter blobs must have the same width."; + CHECK(this_blob->shape() == owner_blob->shape()); } layers_[layer_id]->blobs()[param_id]->ShareData( *layers_[owner_layer_id]->blobs()[owner_param_id]); @@ -653,10 +656,7 @@ void Net::ShareTrainedLayersWith(const Net* other) { CHECK_EQ(target_blobs.size(), source_layer->blobs().size())<< "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { Blob* source_blob = source_layer->blobs()[j].get(); - CHECK_EQ(target_blobs[j]->num(), source_blob->num()); - CHECK_EQ(target_blobs[j]->channels(), source_blob->channels()); - CHECK_EQ(target_blobs[j]->height(), source_blob->height()); - CHECK_EQ(target_blobs[j]->width(), source_blob->width()); + CHECK(target_blobs[j]->shape() == source_blob->shape()); target_blobs[j]->ShareData(*source_blob); } } @@ -721,12 +721,8 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { ->blobs(); CHECK_EQ(target_blobs.size(), source_layer.blobs_size())<< "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { - CHECK_EQ(target_blobs[j]->num(), source_layer.blobs(j).num()); - CHECK_EQ(target_blobs[j]->channels(), source_layer.blobs(j).channels()); - CHECK_EQ(target_blobs[j]->height(), source_layer.blobs(j).height()); - CHECK_EQ(target_blobs[j]->width(), source_layer.blobs(j).width()); - target_blobs[j]->FromProto(source_layer.blobs(j), - layers_[target_layer_id]->device_context()); + const bool kReshape = false; + target_blobs[j]->FromProto(source_layer.blobs(j), layers_[target_layer_id]->device_context(), kReshape); } } } From 786d23eb6dd7f3cd9b77abf0790f395999721487 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 6 May 2015 01:09:57 +0200 Subject: [PATCH 020/254] Conv-SK kernel experiments (naive implementation). --- Makefile | 3 +- include/caffe/greentea/autotuner.hpp | 15 + include/caffe/greentea/cl_kernels.hpp | 2 + include/caffe/greentea/greentea.hpp | 2 +- include/caffe/greentea/greentea_im2col.hpp | 2 + .../greentea/greentea_math_functions.hpp | 4 +- src/caffe/greentea/autotuner.cpp | 12 + src/caffe/greentea/cl_headers/header.cl | 30 ++ src/caffe/greentea/cl_kernels.cpp | 58 +++- src/caffe/greentea/cl_kernels.sh | 65 +++- src/caffe/greentea/cl_kernels/AHY9U3~9.CL | 10 + src/caffe/greentea/cl_kernels/activation.cl | 20 +- src/caffe/greentea/cl_kernels/aux.cl | 22 -- src/caffe/greentea/cl_kernels/channel.cl | 134 ++------ .../greentea/cl_kernels/convolution_sk.cl | 291 +++++++++++++++++- .../greentea/cl_kernels/im2col_sk_gpu.cl | 54 +--- src/caffe/greentea/cl_kernels/math.cl | 19 +- src/caffe/greentea/cl_kernels/pooling_sk.cl | 72 +---- .../greentea/cl_kernels/softmax_loss_gpu.cl | 48 +-- src/caffe/greentea/greentea_im2col.cpp | 3 +- .../greentea/greentea_math_functions.cpp | 9 +- src/caffe/layers/conv_sk_layer.cpp | 5 +- src/caffe/layers/conv_sk_layer.cu | 104 +++---- src/caffe/layers/pooling_sk_layer.cu | 16 +- 24 files changed, 577 insertions(+), 423 deletions(-) create mode 100644 include/caffe/greentea/autotuner.hpp create mode 100644 src/caffe/greentea/autotuner.cpp create mode 100644 src/caffe/greentea/cl_headers/header.cl create mode 100644 src/caffe/greentea/cl_kernels/AHY9U3~9.CL delete mode 100644 src/caffe/greentea/cl_kernels/aux.cl diff --git a/Makefile b/Makefile index 9dfbe1a9ac7..803cb00165f 100644 --- a/Makefile +++ b/Makefile @@ -212,6 +212,7 @@ ifeq ($(USE_GREENTEA),1) CL_KERNELS_CPP = src/caffe/greentea/cl_kernels.cpp CL_KERNELS = src/caffe/greentea/cl_kernels/*.cl + CL_HEADERS = src/caffe/greentea/cl_headers/*.cl CL_KERNELS_SH = src/caffe/greentea/cl_kernels.sh endif @@ -633,7 +634,7 @@ $(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) -Wl,-rpath,$(ORIGIN)/../../lib # Copy the OpenCL kernels into C++ char strings -$(CL_KERNELS_CPP) : $(CL_KERNELS) +$(CL_KERNELS_CPP) : $(CL_HEADERS) $(CL_KERNELS) $(CL_KERNELS_SH) proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) diff --git a/include/caffe/greentea/autotuner.hpp b/include/caffe/greentea/autotuner.hpp new file mode 100644 index 00000000000..1e1c3263ef8 --- /dev/null +++ b/include/caffe/greentea/autotuner.hpp @@ -0,0 +1,15 @@ +/* + * autotuner.hpp + * + * Created on: Apr 28, 2015 + * Author: fabian + */ + +#ifndef GREENTEA_AUTOTUNER_HPP_ +#define GREENTEA_AUTOTUNER_HPP_ + + + + + +#endif /* GREENTEA_AUTOTUNER_HPP_ */ diff --git a/include/caffe/greentea/cl_kernels.hpp b/include/caffe/greentea/cl_kernels.hpp index c07c31db47c..02073744dc4 100644 --- a/include/caffe/greentea/cl_kernels.hpp +++ b/include/caffe/greentea/cl_kernels.hpp @@ -1,4 +1,5 @@ // AUTOMATICALLY GENERATED FILE, DO NOT EDIT +#ifdef USE_GREENTEA #ifndef GREENTEA_CL_KERNELS_HPP_ #define GREENTEA_CL_KERNELS_HPP_ #include "caffe/greentea/greentea.hpp" @@ -11,3 +12,4 @@ namespace caffe { viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx); } #endif +#endif diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index f3e2c397528..f08d78ed0fa 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -74,7 +74,7 @@ struct is_same { #endif // Macro to select the single (_s) or double (_d) precision kernel -#define CL_KERNEL_SELECT(kernel) is_same::value ? kernel "_s" : kernel "_d" +#define CL_KERNEL_SELECT(kernel) is_same::value ? kernel "_float" : kernel "_double" #endif diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index 57e1c0712f3..a314e4c04cc 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -7,6 +7,7 @@ #ifndef GREENTEA_IM2COL_HPP_ #define GREENTEA_IM2COL_HPP_ +#ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" @@ -50,4 +51,5 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, } +#endif #endif /* GREENTEA_IM2COL_HPP_ */ diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 757ca63c09f..d01735ef2af 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -7,7 +7,7 @@ #ifndef GREENTEA_MATH_FUNCTIONS_HPP_ #define GREENTEA_MATH_FUNCTIONS_HPP_ - +#ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/util/math_functions.hpp" #include "viennacl/ocl/context.hpp" @@ -137,5 +137,5 @@ void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, void greentea_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y);*/ } - +#endif #endif /* GREENTEA_MATH_FUNCTIONS_HPP_ */ diff --git a/src/caffe/greentea/autotuner.cpp b/src/caffe/greentea/autotuner.cpp new file mode 100644 index 00000000000..9683f10980a --- /dev/null +++ b/src/caffe/greentea/autotuner.cpp @@ -0,0 +1,12 @@ +/* + * autotuner.cpp + * + * Created on: Apr 28, 2015 + * Author: Fabian Tschopp + */ + + + + + + diff --git a/src/caffe/greentea/cl_headers/header.cl b/src/caffe/greentea/cl_headers/header.cl new file mode 100644 index 00000000000..19c4325eaf6 --- /dev/null +++ b/src/caffe/greentea/cl_headers/header.cl @@ -0,0 +1,30 @@ +#ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define __constant +#define __local +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define get_local_id(x) 0 +#define get_local_size(x) 0 +#define FLT_MAX 0 +#define FLT_MIN 0 +#define cl_khr_fp64 +#define cl_amd_fp64 +#define DOUBLE_SUPPORT_AVAILABLE +#define CLK_LOCAL_MEM_FENCE +#define Dtype float +#define barrier(x) +#endif + +#define CONCAT(A,B) A##_##B +#define TEMPLATE(name,type) CONCAT(name,type) + + +#if defined(cl_khr_fp64) +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#elif defined(cl_amd_fp64) +#pragma OPENCL EXTENSION cl_amd_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#endif diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 1447fbf8700..6346905e33e 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -1,29 +1,55 @@ // AUTOMATICALLY GENERATED FILE, DO NOT EDIT +#ifdef USE_GREENTEA #include "caffe/greentea/cl_kernels.hpp" #include #include namespace caffe { -std::string activation = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void relu_forward_s(const int n, __global const float* in,\n __global float* out, float negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void relu_forward_d(const int n, __global const double* in,\n __global double* out, double negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; -std::string aux = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void gpu_set_s(const int n, const float alpha, __global float* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}\n\n__kernel void gpu_set_d(const int n, const double alpha, __global double* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; -std::string channel = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_channel_max_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_max_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double maxval = (double) -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void kernel_channel_subtract_s(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const float* channel_max,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_subtract_d(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const double* channel_max,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_exp_s(const int count, __global const float* data,\n __global float* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_exp_d(const int count, __global const double* data,\n __global double* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void kernel_channel_sum_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data,\n __global float* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_sum_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data,\n __global double* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void kernel_channel_div_s(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const float* channel_sum,\n __global float* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_div_d(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const double* channel_sum,\n __global double* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void kernel_channel_dot_s(const int num, const int channels,\n const int spatial_dim,\n __global const float* data_1,\n __global const float* data_2,\n __global float* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}\n\n__kernel void kernel_channel_dot_d(const int num, const int channels,\n const int spatial_dim,\n __global const double* data_1,\n __global const double* data_2,\n __global double* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n double dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string convolution_sk = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void convolution_sk_s(__global const float *w, __global const float *in,\n const int in_off, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, __global float *out, const int out_off) {\n\n\n\n for (int index = get_global_id(0); index < 0; index += get_global_size(0)) {\n\n//(*(out+))\n\n }\n}"; -std::string im2col_sk_gpu = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global float* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global float* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const float* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void im2col_sk_gpu_kernel_d(const int n,\n __global const double* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global double* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global double* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const double* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string math = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void kernel_mul_s(const int n, __global const float* a, const int offa,\n __global float* b, const int offb, __global float* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}\n\n__kernel void kernel_mul_d(const int n, __global const double* a, const int offa,\n __global double* b, const int offb, __global double* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; -std::string pooling_sk = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MAX 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void max_pool_forward_sk_s(const int nthreads,\n __global float* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global float* top_data,\n const int use_mask,\n __global int* mask,\n __global float* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n float maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void max_pool_forward_sk_d(const int nthreads,\n __global const double* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global double* top_data,\n __global int* mask,\n __global double* top_mask) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n double maxval = -FLT_MAX;\n int maxidx = -1;\n __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; -std::string softmax_loss_gpu = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define FLT_MIN 0\n#endif\n\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n\n__kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data,\n __global const float* label,\n __global float* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global float* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((float) (prob_data[n * dim + label_value * spatial_dim + s]),\n (float) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}\n\n__kernel void softmax_loss_forward_gpu_d(int n,\n __global const double* prob_data,\n __global const double* label,\n __global double* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global double* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((double) (prob_data[n * dim + label_value * spatial_dim + s]),\n (double) FLT_MIN));\n counts[index] = 1;\n }\n }\n\n}"; +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; +std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; +std::string aux_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; +std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*\n// Very naive implementation\n__kernel void TEMPLATE(convolution_ip4v0,Dtype)(__global const float *w,\n __global const float *in,\n const int height,\n const int width,\n __global float *out) {\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int ext_kernel_h = 73;\n const int ext_kernel_w = 73;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Across y-dimension\n for (int yoff = get_global_id(2); yoff < height - ext_kernel_h + 1; yoff +=\n get_global_size(2)) {\n\n // Across x-dimension\n for (int xoff = get_global_id(1); xoff < width - ext_kernel_w + 1; xoff +=\n get_global_size(1)) {\n\n // Across output features\n for (int fout = get_global_id(0); fout < fout_count; fout +=\n get_global_size(0)) {\n\n int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n float outval = 0.0;\n for (int fin = 0; fin < fin_count; ++fin) {\n // Across the kernel itself\n int fin_ptr = fin * width * height;\n int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n\n outval += w[j + i * kernel_w + finout_w_ptr]\n * in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin_ptr];\n }\n }\n }\n out[xoff + yoff * out_w + fout * out_w * out_h] = outval;\n }\n }\n }\n}\n\n// More optimized\n__kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n\n // Kernel uses Z-workers across batches and output features\n // Y-workers across Y-input\n // X-workers across X-input\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n const int fin_fraction = 16;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[16 * 100];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n const int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; fin += fin_fraction) {\n const int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n\n // Load local weights\n // TODO: Correction for non-fitting fraction divisors\n#pragma unroll 1\n for (int k = 0; k < fin_fraction; ++k) {\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w + k * kernel_w * kernel_h] = w[j + i * kernel_w\n + k * kernel_w * kernel_h + finout_w_ptr];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across a fraction of input features\n#pragma unroll 1\n for (int finoff = 0; finoff < fin_fraction; ++finoff) {\n const int finoff_ptr = (finoff + fin) * width * height;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 2\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 2\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w + finoff * kernel_w * kernel_h],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + finoff_ptr + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n const int width = 584;\n const int height = 584;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10*10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[i+j*kernel_w] = w[j + i * kernel_w + fout * fin_count * kernel_h * kernel_w + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 5\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 5\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[i+j*kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin*width*height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n } barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; +std::string im2col_sk_gpu_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string softmax_loss_gpu_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; +std::string aux_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; +std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*\n// Very naive implementation\n__kernel void TEMPLATE(convolution_ip4v0,Dtype)(__global const float *w,\n __global const float *in,\n const int height,\n const int width,\n __global float *out) {\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int ext_kernel_h = 73;\n const int ext_kernel_w = 73;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Across y-dimension\n for (int yoff = get_global_id(2); yoff < height - ext_kernel_h + 1; yoff +=\n get_global_size(2)) {\n\n // Across x-dimension\n for (int xoff = get_global_id(1); xoff < width - ext_kernel_w + 1; xoff +=\n get_global_size(1)) {\n\n // Across output features\n for (int fout = get_global_id(0); fout < fout_count; fout +=\n get_global_size(0)) {\n\n int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n float outval = 0.0;\n for (int fin = 0; fin < fin_count; ++fin) {\n // Across the kernel itself\n int fin_ptr = fin * width * height;\n int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n\n outval += w[j + i * kernel_w + finout_w_ptr]\n * in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin_ptr];\n }\n }\n }\n out[xoff + yoff * out_w + fout * out_w * out_h] = outval;\n }\n }\n }\n}\n\n// More optimized\n__kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n\n // Kernel uses Z-workers across batches and output features\n // Y-workers across Y-input\n // X-workers across X-input\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n const int fin_fraction = 16;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[16 * 100];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n const int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; fin += fin_fraction) {\n const int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n\n // Load local weights\n // TODO: Correction for non-fitting fraction divisors\n#pragma unroll 1\n for (int k = 0; k < fin_fraction; ++k) {\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w + k * kernel_w * kernel_h] = w[j + i * kernel_w\n + k * kernel_w * kernel_h + finout_w_ptr];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across a fraction of input features\n#pragma unroll 1\n for (int finoff = 0; finoff < fin_fraction; ++finoff) {\n const int finoff_ptr = (finoff + fin) * width * height;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 2\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 2\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w + finoff * kernel_w * kernel_h],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + finoff_ptr + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n const int width = 584;\n const int height = 584;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10*10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[i+j*kernel_w] = w[j + i * kernel_w + fout * fin_count * kernel_h * kernel_w + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 5\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 5\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[i+j*kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin*width*height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n } barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; +std::string im2col_sk_gpu_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string softmax_loss_gpu_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; - ss << activation << "\n\n"; - ss << aux << "\n\n"; - ss << channel << "\n\n"; - ss << convolution_sk << "\n\n"; - ss << im2col_sk_gpu << "\n\n"; - ss << math << "\n\n"; - ss << pooling_sk << "\n\n"; - ss << softmax_loss_gpu << "\n\n"; + ss << header << "\n\n"; + ss << "#define Dtype float" << "\n\n"; + ss << activation_float << "\n\n"; + ss << aux_float << "\n\n"; + ss << channel_float << "\n\n"; + ss << convolution_sk_float << "\n\n"; + ss << im2col_sk_gpu_float << "\n\n"; + ss << math_float << "\n\n"; + ss << pooling_sk_float << "\n\n"; + ss << softmax_loss_gpu_float << "\n\n"; + ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; + ss << "#undef Dtype" << "\n\n"; + ss << "#define Dtype double" << "\n\n"; + ss << activation_double << "\n\n"; + ss << aux_double << "\n\n"; + ss << channel_double << "\n\n"; + ss << convolution_sk_double << "\n\n"; + ss << im2col_sk_gpu_double << "\n\n"; + ss << math_double << "\n\n"; + ss << pooling_sk_double << "\n\n"; + ss << softmax_loss_gpu_double << "\n\n"; + ss << "#endif" << "\n\n"; std::string kernel_string = ss.str(); const char* kernel_program = kernel_string.c_str(); viennacl::ocl::program &program = ctx.add_program(kernel_program,"kernel_program"); return program; } } +#endif diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index 8829ac408db..1af7639d4a9 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -3,6 +3,7 @@ # load the kernels to ViennaCL/OpenCL contexts. # Outputs (overwrites): cl_kernels.hpp and cl_kernels.cpp +CL_HEADERDIR="src/caffe/greentea/cl_headers/*.cl" CL_KERNELDIR="src/caffe/greentea/cl_kernels/*.cl" HEADER='include/caffe/greentea/cl_kernels.hpp' INCHEADER='caffe/greentea/cl_kernels.hpp' @@ -10,6 +11,8 @@ SOURCE='src/caffe/greentea/cl_kernels.cpp' echo "// AUTOMATICALLY GENERATED FILE, DO NOT EDIT" > $HEADER echo "// AUTOMATICALLY GENERATED FILE, DO NOT EDIT" > $SOURCE +echo "#ifdef USE_GREENTEA" >> $HEADER +echo "#ifdef USE_GREENTEA" >> $SOURCE echo "#ifndef GREENTEA_CL_KERNELS_HPP_" >> $HEADER echo "#define GREENTEA_CL_KERNELS_HPP_" >> $HEADER @@ -29,6 +32,30 @@ echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx);" >> echo "}" >> $HEADER echo "#endif" >> $HEADER +shopt -s nullglob +for CL_KERNEL in $CL_HEADERDIR +do + CL_KERNEL_STR=`cat $CL_KERNEL` + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo -n "std::string $CL_KERNEL_NAME = \"" >> $SOURCE + echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE + echo "\";" >> $SOURCE +done + +shopt -s nullglob +for CL_KERNEL in $CL_HEADERDIR $CL_KERNELDIR +do + CL_KERNEL_STR=`cat $CL_KERNEL` + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo -n "std::string ${CL_KERNEL_NAME}_float = \"" >> $SOURCE + echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE + echo "\";" >> $SOURCE +done + shopt -s nullglob for CL_KERNEL in $CL_KERNELDIR do @@ -36,22 +63,54 @@ do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo "std::string $CL_KERNEL_NAME = \"$CL_KERNEL_STR\";" | sed -e ':a;N;$!ba;s/\n/\\n/g' >> $SOURCE -done + echo -n "std::string ${CL_KERNEL_NAME}_double = \"" >> $SOURCE + echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE + echo "\";" >> $SOURCE +done + + echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) {" >> $SOURCE echo " std::stringstream ss;" >> $SOURCE + shopt -s nullglob -for CL_KERNEL in $CL_KERNELDIR +for CL_KERNEL in $CL_HEADERDIR do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" echo " ss << $CL_KERNEL_NAME << \"\\n\\n\";" >> $SOURCE done + +shopt -s nullglob +echo " ss << \"#define Dtype float\" << \"\\n\\n\";" >> $SOURCE +for CL_KERNEL in $CL_KERNELDIR +do + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo " ss << ${CL_KERNEL_NAME}_float << \"\\n\\n\";" >> $SOURCE +done + +shopt -s nullglob +echo " ss << \"#ifdef DOUBLE_SUPPORT_AVAILABLE\" << \"\\n\\n\";" >> $SOURCE +echo " ss << \"#undef Dtype\" << \"\\n\\n\";" >> $SOURCE +echo " ss << \"#define Dtype double\" << \"\\n\\n\";" >> $SOURCE +for CL_KERNEL in $CL_KERNELDIR +do + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo " ss << ${CL_KERNEL_NAME}_double << \"\\n\\n\";" >> $SOURCE +done +echo " ss << \"#endif\" << \"\\n\\n\";" >> $SOURCE + echo " std::string kernel_string = ss.str();" >> $SOURCE echo " const char* kernel_program = kernel_string.c_str();" >> $SOURCE echo " viennacl::ocl::program &program = ctx.add_program(kernel_program,\"kernel_program\");" >> $SOURCE echo " return program;" >> $SOURCE echo "}" >> $SOURCE echo "}" >> $SOURCE + +echo "#endif" >> $HEADER +echo "#endif" >> $SOURCE diff --git a/src/caffe/greentea/cl_kernels/AHY9U3~9.CL b/src/caffe/greentea/cl_kernels/AHY9U3~9.CL new file mode 100644 index 00000000000..2c3babe1fa6 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/AHY9U3~9.CL @@ -0,0 +1,10 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index db44980e154..fe07f8850d3 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -1,23 +1,9 @@ #ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 -#define FLT_MAX 0 +#include "header.cl" #endif -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable - -__kernel void relu_forward_s(const int n, __global const float* in, - __global float* out, float negative_slope) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; - } -} - -__kernel void relu_forward_d(const int n, __global const double* in, - __global double* out, double negative_slope) { +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in, + __global Dtype* out, Dtype negative_slope) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } diff --git a/src/caffe/greentea/cl_kernels/aux.cl b/src/caffe/greentea/cl_kernels/aux.cl deleted file mode 100644 index 68f4dfd9e3f..00000000000 --- a/src/caffe/greentea/cl_kernels/aux.cl +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 -#define FLT_MAX 0 -#endif - -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable - -__kernel void gpu_set_s(const int n, const float alpha, __global float* y) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - y[index] = alpha; - } -} - -__kernel void gpu_set_d(const int n, const double alpha, __global double* y) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - y[index] = alpha; - } -} diff --git a/src/caffe/greentea/cl_kernels/channel.cl b/src/caffe/greentea/cl_kernels/channel.cl index 028b488bdc2..e1a5b0c3160 100644 --- a/src/caffe/greentea/cl_kernels/channel.cl +++ b/src/caffe/greentea/cl_kernels/channel.cl @@ -1,51 +1,28 @@ #ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 -#define FLT_MAX 0 +#include "header.cl" #endif -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable - -__kernel void kernel_channel_max_s(const int num, const int channels, +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, const int spatial_dim, - __global const float* data, - __global float* out) { + __global const Dtype* data, + __global Dtype* out) { for (int index = get_global_id(0); index < num * spatial_dim; index += get_global_size(0)) { int n = index / spatial_dim; int s = index % spatial_dim; float maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { - maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); - } - out[index] = maxval; - } -} - -__kernel void kernel_channel_max_d(const int num, const int channels, - const int spatial_dim, - __global const double* data, - __global double* out) { - for (int index = get_global_id(0); index < num * spatial_dim; index += - get_global_size(0)) { - int n = index / spatial_dim; - int s = index % spatial_dim; - double maxval = (double) -FLT_MAX; - for (int c = 0; c < channels; ++c) { - maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); + maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); } out[index] = maxval; } } -__kernel void kernel_channel_subtract_s(const int count, const int num, +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, const int channels, const int spatial_dim, - __global const float* channel_max, - __global float* data) { + __global const Dtype* channel_max, + __global Dtype* data) { for (int index = get_global_id(0); index < count; index += get_global_size(0)) { int n = index / channels / spatial_dim; @@ -54,60 +31,23 @@ __kernel void kernel_channel_subtract_s(const int count, const int num, } } -__kernel void kernel_channel_subtract_d(const int count, const int num, - const int channels, - const int spatial_dim, - __global const double* channel_max, - __global double* data) { - for (int index = get_global_id(0); index < count; - index += get_global_size(0)) { - int n = index / channels / spatial_dim; - int s = index % spatial_dim; - data[index] -= channel_max[n * spatial_dim + s]; - } -} - -__kernel void kernel_exp_s(const int count, __global const float* data, - __global float* out) { +__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, + __global Dtype* out) { for (int index = get_global_id(0); index < count; index += get_global_size(0)) { out[index] = exp(data[index]); } } -__kernel void kernel_exp_d(const int count, __global const double* data, - __global double* out) { - for (int index = get_global_id(0); index < count; - index += get_global_size(0)) { - out[index] = exp(data[index]); - } -} - -__kernel void kernel_channel_sum_s(const int num, const int channels, - const int spatial_dim, - __global const float* data, - __global float* channel_sum) { - for (int index = get_global_id(0); index < num * spatial_dim; index += - get_global_size(0)) { - int n = index / spatial_dim; - int s = index % spatial_dim; - float sum = 0; - for (int c = 0; c < channels; ++c) { - sum += data[(n * channels + c) * spatial_dim + s]; - } - channel_sum[index] = sum; - } -} - -__kernel void kernel_channel_sum_d(const int num, const int channels, +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, const int spatial_dim, - __global const double* data, - __global double* channel_sum) { + __global const Dtype* data, + __global Dtype* channel_sum) { for (int index = get_global_id(0); index < num * spatial_dim; index += get_global_size(0)) { int n = index / spatial_dim; int s = index % spatial_dim; - double sum = 0; + Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } @@ -115,10 +55,10 @@ __kernel void kernel_channel_sum_d(const int num, const int channels, } } -__kernel void kernel_channel_div_s(const int count, const int num, +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, const int channels, const int spatial_dim, - __global const float* channel_sum, - __global float* data) { + __global const Dtype* channel_sum, + __global Dtype* data) { for (int index = get_global_id(0); index < count; index += get_global_size(0)) { int n = index / channels / spatial_dim; @@ -127,46 +67,16 @@ __kernel void kernel_channel_div_s(const int count, const int num, } } -__kernel void kernel_channel_div_d(const int count, const int num, - const int channels, const int spatial_dim, - __global const double* channel_sum, - __global double* data) { - for (int index = get_global_id(0); index < count; - index += get_global_size(0)) { - int n = index / channels / spatial_dim; - int s = index % spatial_dim; - data[index] /= channel_sum[n * spatial_dim + s]; - } -} - -__kernel void kernel_channel_dot_s(const int num, const int channels, - const int spatial_dim, - __global const float* data_1, - __global const float* data_2, - __global float* channel_dot) { - for (int index = get_global_id(0); index < num * spatial_dim; index += - get_global_size(0)) { - int n = index / spatial_dim; - int s = index % spatial_dim; - float dot = 0; - for (int c = 0; c < channels; ++c) { - dot += (data_1[(n * channels + c) * spatial_dim + s] - * data_2[(n * channels + c) * spatial_dim + s]); - } - channel_dot[index] = dot; - } -} - -__kernel void kernel_channel_dot_d(const int num, const int channels, +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, const int spatial_dim, - __global const double* data_1, - __global const double* data_2, - __global double* channel_dot) { + __global const Dtype* data_1, + __global const Dtype* data_2, + __global Dtype* channel_dot) { for (int index = get_global_id(0); index < num * spatial_dim; index += get_global_size(0)) { int n = index / spatial_dim; int s = index % spatial_dim; - double dot = 0; + Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); diff --git a/src/caffe/greentea/cl_kernels/convolution_sk.cl b/src/caffe/greentea/cl_kernels/convolution_sk.cl index 0f67dbe046c..bb9f8c0ec79 100644 --- a/src/caffe/greentea/cl_kernels/convolution_sk.cl +++ b/src/caffe/greentea/cl_kernels/convolution_sk.cl @@ -1,26 +1,287 @@ #ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 +#include "header.cl" #endif -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable +/* +// Very naive implementation +__kernel void TEMPLATE(convolution_ip4v0,Dtype)(__global const float *w, + __global const float *in, + const int height, + const int width, + __global float *out) { -__kernel void convolution_sk_s(__global const float *w, __global const float *in, - const int in_off, const int height, - const int width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, __global float *out, const int out_off) { + // TESTING + // Current tests for IP4. + // 10x10 kernel, + const int kernel_h = 10; + const int kernel_w = 10; + const int ext_kernel_h = 73; + const int ext_kernel_w = 73; + const int fout_count = 1024; + const int fin_count = 192; + const int kstride_h = 8; + const int kstride_w = 8; + const int stride_h = 1; + const int stride_w = 1; + const int out_h = (height - ext_kernel_h) / stride_h + 1; + const int out_w = (width - ext_kernel_w) / stride_w + 1; - for (int index = get_global_id(0); index < 0; index += get_global_size(0)) { + // Across y-dimension + for (int yoff = get_global_id(2); yoff < height - ext_kernel_h + 1; yoff += + get_global_size(2)) { -//(*(out+)) + // Across x-dimension + for (int xoff = get_global_id(1); xoff < width - ext_kernel_w + 1; xoff += + get_global_size(1)) { + // Across output features + for (int fout = get_global_id(0); fout < fout_count; fout += + get_global_size(0)) { + + int fout_w_ptr = fout * fin_count * kernel_h * kernel_w; + + // Across input features + float outval = 0.0; + for (int fin = 0; fin < fin_count; ++fin) { + // Across the kernel itself + int fin_ptr = fin * width * height; + int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + + outval += w[j + i * kernel_w + finout_w_ptr] + * in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width + + fin_ptr]; + } + } + } + out[xoff + yoff * out_w + fout * out_w * out_h] = outval; + } + } + } +} + +// More optimized +__kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w, + __global const Dtype *in, + __global float *out) { + + // Kernel uses Z-workers across batches and output features + // Y-workers across Y-input + // X-workers across X-input + + // TESTING + // Current tests for IP4. + // 10x10 kernel, + + const int width = 200; + const int height = 200; + const int kernel_h = 10; + const int kernel_w = 10; + const int fout_count = 1024; + const int fin_count = 192; + const int kstride_h = 8; + const int kstride_w = 8; + const int stride_h = 1; + const int stride_w = 1; + const int batch_size = 1; + + const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; + const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; + + const int out_h = (height - ext_kernel_h) / stride_h + 1; + const int out_w = (width - ext_kernel_w) / stride_w + 1; + + const int fin_fraction = 16; + + // Clear the output + { +#pragma unroll 1 + for (int i = + get_global_id( + 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1); + i < fout_count * out_h * out_w; + i += get_global_size(0) * get_global_size(1) * get_global_size(2)) { + out[i] = 0.0; + } + } + + // Local weight buffer + __local Dtype wl[16 * 100]; + + // Across output features +#pragma unroll 1 + for (int fout = get_global_id(2); fout < fout_count; + fout += get_global_size(2)) { + + const int fout_w_ptr = fout * fin_count * kernel_h * kernel_w; + + // Across input features +#pragma unroll 1 + for (int fin = 0; fin < fin_count; fin += fin_fraction) { + const int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr; + + // Load local weights + // TODO: Correction for non-fitting fraction divisors +#pragma unroll 1 + for (int k = 0; k < fin_fraction; ++k) { +#pragma unroll 1 + for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { +#pragma unroll 1 + for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { + wl[j + i * kernel_w + k * kernel_w * kernel_h] = w[j + i * kernel_w + + k * kernel_w * kernel_h + finout_w_ptr]; + } + } + } + + barrier(CLK_LOCAL_MEM_FENCE); + + // Across batches (keeps weights in local memory) +#pragma unroll 1 + for (int batch = 0; batch < batch_size; ++batch) { + + const int batch_in_off = batch * width * height * fin_count; + const int batch_out_off = batch * out_w * out_h * fout_count; + + // Across a fraction of input features +#pragma unroll 1 + for (int finoff = 0; finoff < fin_fraction; ++finoff) { + const int finoff_ptr = (finoff + fin) * width * height; + + // Across y-dimension +#pragma unroll 1 + for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1; + yoff += get_global_size(1)) { + + // Across x-dimension +#pragma unroll 1 + for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1; + xoff += get_global_size(0)) { + + Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h + + batch_out_off]; + + // Across the kernel itself +#pragma unroll 2 + for (int i = 0; i < kernel_h; ++i) { +#pragma unroll 2 + for (int j = 0; j < kernel_w; ++j) { + outval = fma( + wl[j + i * kernel_w + finoff * kernel_w * kernel_h], + in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width + + finoff_ptr + batch_in_off], + outval); + } + } + + out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] = + outval; + } + } + } + }barrier(CLK_LOCAL_MEM_FENCE); + } + } +}*/ + +// Fits into 32 KB +__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, + __global const Dtype *in, + __global float *out) { + const int width = 584; + const int height = 584; + const int kernel_h = 10; + const int kernel_w = 10; + const int fout_count = 1024; + const int fin_count = 192; + const int kstride_h = 8; + const int kstride_w = 8; + const int stride_h = 1; + const int stride_w = 1; + const int batch_size = 1; + + const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; + const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; + + const int out_h = (height - ext_kernel_h) / stride_h + 1; + const int out_w = (width - ext_kernel_w) / stride_w + 1; + + // Clear the output + { +#pragma unroll 1 + for (int i = + get_global_id( + 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1); + i < fout_count * out_h * out_w; + i += get_global_size(0) * get_global_size(1) * get_global_size(2)) { + out[i] = 0.0; + } + } + + // Local weight buffer + __local Dtype wl[10*10]; + + // Across output features +#pragma unroll 1 + for (int fout = get_global_id(2); fout < fout_count; + fout += get_global_size(2)) { + + // Across input features +#pragma unroll 1 + for (int fin = 0; fin < fin_count; ++fin) { + + // Load local weights +#pragma unroll 1 + for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { +#pragma unroll 1 + for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { + wl[i+j*kernel_w] = w[j + i * kernel_w + fout * fin_count * kernel_h * kernel_w + fin * kernel_h * kernel_w]; + } + } + + barrier(CLK_LOCAL_MEM_FENCE); + + // Across batches (keeps weights in local memory) +#pragma unroll 1 + for (int batch = 0; batch < batch_size; ++batch) { + + const int batch_in_off = batch * width * height * fin_count; + const int batch_out_off = batch * out_w * out_h * fout_count; + + // Across y-dimension +#pragma unroll 1 + for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1; + yoff += get_global_size(1)) { + + // Across x-dimension +#pragma unroll 1 + for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1; + xoff += get_global_size(0)) { + + Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h + + batch_out_off]; + + // Across the kernel itself +#pragma unroll 5 + for (int i = 0; i < kernel_h; ++i) { +#pragma unroll 5 + for (int j = 0; j < kernel_w; ++j) { + outval = fma( + wl[i+j*kernel_w], + in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width + + fin*width*height + batch_in_off], + outval); + } + } + + out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] = + outval; + } + } + } barrier(CLK_LOCAL_MEM_FENCE); + } } } diff --git a/src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl b/src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl index c9ccf80da27..776223c1953 100644 --- a/src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl +++ b/src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl @@ -1,52 +1,8 @@ #ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 +#include "header.cl" #endif -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable - -__kernel void im2col_sk_gpu_kernel_s(const int n, __global const float* data_im, - const int data_offset, const int height, - const int width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, const int height_col, - const int width_col, - __global float* data_col) { - - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - int w_out = index % width_col; - int h_index = index / width_col; - int h_out = h_index % height_col; - int channel_in = h_index / height_col; - int channel_out = channel_in * kernel_h * kernel_w; - int h_in = h_out * stride_h - pad_h; - int w_in = w_out * stride_w - pad_w; - __global float* data_col_ptr = data_col; - data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; - __global const float* data_im_ptr = data_im + data_offset; - data_im_ptr += (channel_in * height + h_in) * width + w_in; - for (int i = 0; i < ext_kernel_h; i += kstride_h) { - for (int j = 0; j < ext_kernel_w; j += kstride_w) { - int h = h_in + i; - int w = w_in + j; - (*data_col_ptr) = - (h >= 0 && w >= 0 && h < height && w < width) ? - data_im_ptr[i * width + j] : 0; - data_col_ptr += height_col * width_col; - } - } - } - -} - -__kernel void im2col_sk_gpu_kernel_d(const int n, - __global const double* data_im, +__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im, const int data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int ext_kernel_h, @@ -55,7 +11,7 @@ __kernel void im2col_sk_gpu_kernel_d(const int n, const int stride_w, const int kstride_h, const int kstride_w, const int height_col, const int width_col, - __global double* data_col) { + __global Dtype* data_col) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { int w_out = index % width_col; @@ -65,9 +21,9 @@ __kernel void im2col_sk_gpu_kernel_d(const int n, int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_h; int w_in = w_out * stride_w - pad_w; - __global double* data_col_ptr = data_col; + __global Dtype* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; - __global const double* data_im_ptr = data_im + data_offset; + __global const Dtype* data_im_ptr = data_im + data_offset; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ext_kernel_h; i += kstride_h) { for (int j = 0; j < ext_kernel_w; j += kstride_w) { diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index 64374a5e79c..cbc0a457dc5 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -1,25 +1,12 @@ #ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 +#include "header.cl" #endif -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable - -__kernel void kernel_mul_s(const int n, __global const float* a, const int offa, - __global float* b, const int offb, __global float* y, +__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa, + __global Dtype* b, const int offb, __global Dtype* y, const int offy) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { y[index + offy] = a[index + offa] + b[index + offb]; } } -__kernel void kernel_mul_d(const int n, __global const double* a, const int offa, - __global double* b, const int offb, __global double* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - y[index + offy] = a[index + offa] + b[index + offb]; - } -} diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index a66c7ee3bbb..352801b8e79 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -1,16 +1,9 @@ #ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 -#define FLT_MAX 0 +#include "header.cl" #endif -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable - -__kernel void max_pool_forward_sk_s(const int nthreads, - __global float* bottom_data, +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, + __global Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, @@ -19,10 +12,10 @@ __kernel void max_pool_forward_sk_s(const int nthreads, const int ext_kernel_w, const int stride_h, const int stride_w, const int kstride_h, const int kstride_w, const int pad_h, - const int pad_w, __global float* top_data, + const int pad_w, __global Dtype* top_data, const int use_mask, __global int* mask, - __global float* top_mask) { + __global Dtype* top_mask) { for (int index = get_global_id(0); index < nthreads; index += get_global_size(0)) { int pw = index % pooled_width; @@ -33,11 +26,11 @@ __kernel void max_pool_forward_sk_s(const int nthreads, int wstart = pw * stride_w - pad_w; int hend = min(hstart + ext_kernel_h, height); int wend = min(wstart + ext_kernel_w, width); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - float maxval = -FLT_MAX; + hstart = max(hstart, (int)0); + wstart = max(wstart, (int)0); + Dtype maxval = -FLT_MAX; int maxidx = -1; - __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; + __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += kstride_h) { for (int w = wstart; w < wend; w += kstride_w) { if (bottom_data_ptr[h * width + w] > maxval) { @@ -54,50 +47,3 @@ __kernel void max_pool_forward_sk_s(const int nthreads, } } } - - -__kernel void max_pool_forward_sk_d(const int nthreads, - __global const double* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, const int pad_h, - const int pad_w, __global double* top_data, - __global int* mask, - __global double* top_mask) { - - for (int index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height); - int wend = min(wstart + ext_kernel_w, width); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - double maxval = -FLT_MAX; - int maxidx = -1; - __global float* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { - if (bottom_data_ptr[h * width + w] > maxval) { - maxidx = h * width + w; - maxval = bottom_data_ptr[maxidx]; - } - } - } - top_data[index] = maxval; - if (mask) { - mask[index] = maxidx; - } else { - top_mask[index] = maxidx; - } - } -} diff --git a/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl b/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl index aee2c312dc4..930ac9ab4e2 100644 --- a/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl +++ b/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl @@ -1,21 +1,14 @@ #ifndef __OPENCL_VERSION__ -#define __kernel -#define __global -#define get_global_id(x) 0 -#define get_global_size(x) 0 -#define FLT_MIN 0 +#include "header.cl" #endif -#pragma OPENCL EXTENSION cl_khr_fp64 : enable -#pragma OPENCL EXTENSION cl_amd_fp64 : enable - -__kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data, - __global const float* label, - __global float* loss, const int num, +__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data, + __global const Dtype* label, + __global Dtype* loss, const int num, const int dim, const int spatial_dim, const int has_ignore_label_, const int ignore_label_, - __global float* counts) { + __global Dtype* counts) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { const int n = index / spatial_dim; @@ -26,36 +19,9 @@ __kernel void softmax_loss_forward_gpu_s(int n, __global const float* prob_data, counts[index] = 0; } else { loss[index] = -log( - max((float) (prob_data[n * dim + label_value * spatial_dim + s]), - (float) FLT_MIN)); + max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype)FLT_MIN)); counts[index] = 1; } } - -} - -__kernel void softmax_loss_forward_gpu_d(int n, - __global const double* prob_data, - __global const double* label, - __global double* loss, const int num, - const int dim, const int spatial_dim, - const int has_ignore_label_, - const int ignore_label_, - __global double* counts) { - - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - const int n = index / spatial_dim; - const int s = index % spatial_dim; - const int label_value = (int) (label[n * spatial_dim + s]); - if (has_ignore_label_ == 1 && label_value == ignore_label_) { - loss[index] = 0; - counts[index] = 0; - } else { - loss[index] = -log( - max((double) (prob_data[n * dim + label_value * spatial_dim + s]), - (double) FLT_MIN)); - counts[index] = 1; - } - } - } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 784772c50fa..2db85578cac 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -4,7 +4,7 @@ * Created on: Apr 8, 2015 * Author: Fabian Tschopp */ - +#ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" namespace caffe { @@ -73,3 +73,4 @@ template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, cl_mem data_col); } +#endif diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index cef3536db61..d1c5cae7038 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -4,7 +4,7 @@ * Created on: Apr 6, 2015 * Author: Fabian Tschopp */ - +#ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" @@ -272,9 +272,9 @@ template void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem b, const int offb, cl_mem y, const int offy); template void greentea_gpu_mul(const int ctx_id, const int N, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); /* template<> @@ -624,3 +624,4 @@ template void greentea_gpu_mul(const int ctx_id, const int N, */ } +#endif diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index b6b402a6dc7..587ae4b6167 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -82,8 +82,11 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; - col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, + // TODO: Change this + if(kstride_h_ != 8) { + col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, width_out, this->device_context_); + } // Set the parameters CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; bias_term_ = this->layer_param_.convolution_param().bias_term(); diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index e6a3cfed89b..9c07edf85f0 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -62,22 +62,23 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); - cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); - cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); - const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); + // Cheating, for now + if (kstride_h_ != 8) { - int weight_offset = M_ * K_; - int col_offset = K_ * N_; - int top_offset = M_ * N_; + const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); + cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); + cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); + const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); - std::cout << "M:" << M_ << std::endl; - std::cout << "N:" << N_ << std::endl; - std::cout << "K:" << K_ << std::endl; + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; - for (int n = 0; n < num_; ++n) { + std::cout << "M:" << M_ << std::endl; + std::cout << "N:" << N_ << std::endl; + std::cout << "K:" << K_ << std::endl; - if (1 == 1) { + for (int n = 0; n < num_; ++n) { // First, im2col greentea_im2col_sk_gpu(program, ctx, bottom_data, @@ -96,69 +97,62 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, ctx.get_queue().finish(); } + // Third, add bias + if (bias_term_) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num_output_, N_, 1, + (Dtype) 1., + (cl_mem) (this->blobs_[1]->gpu_data()), 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., top_data, top[i]->offset(n)); + ctx.get_queue().finish(); + } } - else - { - - /*viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( - CL_KERNEL_SELECT("relu_forward")); - viennacl::ocl::enqueue( - oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) top_data, ctx), negative_slope), - ctx.get_queue()); - ctx.get_queue().finish();*/ + } else { - /*for (int k = 0; k < blocks_; ++k) { + const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); + cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); + const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); + viennacl::ocl::kernel &oclk_ip4 = program.get_kernel( + CL_KERNEL_SELECT("convolution_ip4v2")); - int blocked_width = (k == blocks_-1)?(width_-k*(width_/blocks_)):width_/blocks_; + LOG(INFO)<< ctx.devices()[0].max_work_group_size(); - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; - int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; + LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[0]; + LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[1]; + LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[2]; + LOG(INFO)<< ctx.devices()[0].preferred_vector_width_float(); - int blocked_width_out = (k == blocks_-1)?(width_out - (blocks_ - 1) * (width_out / blocks_)):width_out/blocks_; + oclk_ip4.global_work_size(0, 128); + oclk_ip4.global_work_size(1, 128); + oclk_ip4.global_work_size(2, 128); + oclk_ip4.local_work_size(0, 128); + oclk_ip4.local_work_size(1, 2); + oclk_ip4.local_work_size(2, 1); - greentea_im2col_sk_gpu(program, ctx, bottom_data, - bottom[i]->offset(n) + (channels_ * height_ * (width_/blocks_)*k), channels_, - height_, blocked_width, kernel_h_, kernel_w_, - pad_h_, pad_w_, stride_h_, stride_w_, - kstride_h_, kstride_w_, col_data); - ctx.get_queue().finish(); - - std::cout << "After im2col" << std::endl; + viennacl::ocl::enqueue( + oclk_ip4(WrapHandle(weight, ctx), WrapHandle(bottom_data, ctx), + WrapHandle(top_data, ctx)), + ctx.get_queue()); - std::cout << "Num output: " << num_output_ << std::endl; - std::cout << "Height: " << height_ << std::endl; - std::cout << "Width: " << width_ << std::endl; + ctx.get_queue().finish(); - // Second, innerproduct with groups - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, M_, height_out*blocked_width_out, K_, (Dtype) 1., - weight, weight_offset * g, col_data, - col_offset * g, (Dtype) 0., top_data, - top[i]->offset(n) + top_offset * g + num_output_*height_out*(width_out/blocks_)*k); - ctx.get_queue().finish(); - } - - std::cout << "After gpu gemm" << std::endl; - - }*/ - } + for (int n = 0; n < num_; ++n) { // Third, add bias if (bias_term_) { greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., - (cl_mem) (this->blobs_[1]->gpu_data()), 0, + (cl_mem) (this->blobs_[i]->gpu_data()), 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., top_data, top[i]->offset(n)); ctx.get_queue().finish(); + } } } } + std::cout << "CONV GREENTEA END" << std::endl; #endif } diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index 337e792ca75..f4094a952d8 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -269,8 +269,12 @@ break; default: LOG(FATAL)<< "Unknown pooling method."; } - CUDA_POST_KERNEL_CHECK - ; + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + CUDA_POST_KERNEL_CHECK + ; +#endif + } } template @@ -366,8 +370,12 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, default: LOG(FATAL)<< "Unknown or unsupported pooling method in Backward_gpu()."; } - CUDA_POST_KERNEL_CHECK - ; + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + CUDA_POST_KERNEL_CHECK + ; +#endif + } } INSTANTIATE_LAYER_GPU_FUNCS(PoolingSKLayer); From a36aa3cdb41f323d735448481e8b483e2b8427ce Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 8 May 2015 03:41:35 +0200 Subject: [PATCH 021/254] Back to clBLAS. --- Makefile | 6 +- src/caffe/greentea/cl_kernels.cpp | 15 +- src/caffe/greentea/cl_kernels.sh | 3 + .../cl_kernels/{aux.cl => auxiliary.cl} | 0 .../greentea/cl_kernels/convolution_sk.cl | 254 ++++++++---------- src/caffe/layers/conv_sk_layer.cpp | 2 +- src/caffe/layers/conv_sk_layer.cu | 12 +- src/caffe/layers/prelu_layer.cpp | 2 +- 8 files changed, 130 insertions(+), 164 deletions(-) rename src/caffe/greentea/cl_kernels/{aux.cl => auxiliary.cl} (100%) diff --git a/Makefile b/Makefile index 79a9d90d9ef..832a13f0bbc 100644 --- a/Makefile +++ b/Makefile @@ -214,13 +214,17 @@ ifeq ($(USE_GREENTEA),1) COMMON_FLAGS += -DVIENNACL_DEBUG_ALL endif + ifeq ($(GREENTEA_DOUBLE_SUPPORT), 1) + COMMON_FLAGS += -DGREENTEA_DOUBLE_SUPPORT + endif + CL_KERNELS_CPP = src/caffe/greentea/cl_kernels.cpp CL_KERNELS = src/caffe/greentea/cl_kernels/*.cl CL_HEADERS = src/caffe/greentea/cl_headers/*.cl CL_KERNELS_SH = src/caffe/greentea/cl_kernels.sh endif -ifeq ($(USE_CUDA),1) +ifeq ($(USE_CUDA), 1) COMMON_FLAGS += -DUSE_CUDA endif diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 6346905e33e..85985e13968 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -7,17 +7,17 @@ namespace caffe { std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; -std::string aux_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*\n// Very naive implementation\n__kernel void TEMPLATE(convolution_ip4v0,Dtype)(__global const float *w,\n __global const float *in,\n const int height,\n const int width,\n __global float *out) {\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int ext_kernel_h = 73;\n const int ext_kernel_w = 73;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Across y-dimension\n for (int yoff = get_global_id(2); yoff < height - ext_kernel_h + 1; yoff +=\n get_global_size(2)) {\n\n // Across x-dimension\n for (int xoff = get_global_id(1); xoff < width - ext_kernel_w + 1; xoff +=\n get_global_size(1)) {\n\n // Across output features\n for (int fout = get_global_id(0); fout < fout_count; fout +=\n get_global_size(0)) {\n\n int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n float outval = 0.0;\n for (int fin = 0; fin < fin_count; ++fin) {\n // Across the kernel itself\n int fin_ptr = fin * width * height;\n int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n\n outval += w[j + i * kernel_w + finout_w_ptr]\n * in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin_ptr];\n }\n }\n }\n out[xoff + yoff * out_w + fout * out_w * out_h] = outval;\n }\n }\n }\n}\n\n// More optimized\n__kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n\n // Kernel uses Z-workers across batches and output features\n // Y-workers across Y-input\n // X-workers across X-input\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n const int fin_fraction = 16;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[16 * 100];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n const int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; fin += fin_fraction) {\n const int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n\n // Load local weights\n // TODO: Correction for non-fitting fraction divisors\n#pragma unroll 1\n for (int k = 0; k < fin_fraction; ++k) {\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w + k * kernel_w * kernel_h] = w[j + i * kernel_w\n + k * kernel_w * kernel_h + finout_w_ptr];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across a fraction of input features\n#pragma unroll 1\n for (int finoff = 0; finoff < fin_fraction; ++finoff) {\n const int finoff_ptr = (finoff + fin) * width * height;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 2\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 2\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w + finoff * kernel_w * kernel_h],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + finoff_ptr + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n const int width = 584;\n const int height = 584;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10*10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[i+j*kernel_w] = w[j + i * kernel_w + fout * fin_count * kernel_h * kernel_w + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 5\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 5\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[i+j*kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin*width*height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n } barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; +std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string im2col_sk_gpu_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_gpu_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; -std::string aux_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*\n// Very naive implementation\n__kernel void TEMPLATE(convolution_ip4v0,Dtype)(__global const float *w,\n __global const float *in,\n const int height,\n const int width,\n __global float *out) {\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int ext_kernel_h = 73;\n const int ext_kernel_w = 73;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Across y-dimension\n for (int yoff = get_global_id(2); yoff < height - ext_kernel_h + 1; yoff +=\n get_global_size(2)) {\n\n // Across x-dimension\n for (int xoff = get_global_id(1); xoff < width - ext_kernel_w + 1; xoff +=\n get_global_size(1)) {\n\n // Across output features\n for (int fout = get_global_id(0); fout < fout_count; fout +=\n get_global_size(0)) {\n\n int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n float outval = 0.0;\n for (int fin = 0; fin < fin_count; ++fin) {\n // Across the kernel itself\n int fin_ptr = fin * width * height;\n int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n\n outval += w[j + i * kernel_w + finout_w_ptr]\n * in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin_ptr];\n }\n }\n }\n out[xoff + yoff * out_w + fout * out_w * out_h] = outval;\n }\n }\n }\n}\n\n// More optimized\n__kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n\n // Kernel uses Z-workers across batches and output features\n // Y-workers across Y-input\n // X-workers across X-input\n\n // TESTING\n // Current tests for IP4.\n // 10x10 kernel,\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n const int fin_fraction = 16;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[16 * 100];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n const int fout_w_ptr = fout * fin_count * kernel_h * kernel_w;\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; fin += fin_fraction) {\n const int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr;\n\n // Load local weights\n // TODO: Correction for non-fitting fraction divisors\n#pragma unroll 1\n for (int k = 0; k < fin_fraction; ++k) {\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w + k * kernel_w * kernel_h] = w[j + i * kernel_w\n + k * kernel_w * kernel_h + finout_w_ptr];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across a fraction of input features\n#pragma unroll 1\n for (int finoff = 0; finoff < fin_fraction; ++finoff) {\n const int finoff_ptr = (finoff + fin) * width * height;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 2\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 2\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w + finoff * kernel_w * kernel_h],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + finoff_ptr + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global float *out) {\n const int width = 584;\n const int height = 584;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10*10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[i+j*kernel_w] = w[j + i * kernel_w + fout * fin_count * kernel_h * kernel_w + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 5\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 5\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[i+j*kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin*width*height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n } barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; +std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string im2col_sk_gpu_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; @@ -27,18 +27,19 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << header << "\n\n"; ss << "#define Dtype float" << "\n\n"; ss << activation_float << "\n\n"; - ss << aux_float << "\n\n"; + ss << auxiliary_float << "\n\n"; ss << channel_float << "\n\n"; ss << convolution_sk_float << "\n\n"; ss << im2col_sk_gpu_float << "\n\n"; ss << math_float << "\n\n"; ss << pooling_sk_float << "\n\n"; ss << softmax_loss_gpu_float << "\n\n"; +#ifdef GREENTEA_DOUBLE_SUPPORT ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; ss << "#undef Dtype" << "\n\n"; ss << "#define Dtype double" << "\n\n"; ss << activation_double << "\n\n"; - ss << aux_double << "\n\n"; + ss << auxiliary_double << "\n\n"; ss << channel_double << "\n\n"; ss << convolution_sk_double << "\n\n"; ss << im2col_sk_gpu_double << "\n\n"; @@ -46,8 +47,10 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << pooling_sk_double << "\n\n"; ss << softmax_loss_gpu_double << "\n\n"; ss << "#endif" << "\n\n"; +#endif // GREENTEA_DOUBLE_SUUPORT std::string kernel_string = ss.str(); const char* kernel_program = kernel_string.c_str(); + ctx.build_options("-cl-fast-relaxed-math -cl-mad-enable"); viennacl::ocl::program &program = ctx.add_program(kernel_program,"kernel_program"); return program; } diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index 1af7639d4a9..afc31475de6 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -93,6 +93,7 @@ do done shopt -s nullglob +echo "#ifdef GREENTEA_DOUBLE_SUPPORT" >> $SOURCE echo " ss << \"#ifdef DOUBLE_SUPPORT_AVAILABLE\" << \"\\n\\n\";" >> $SOURCE echo " ss << \"#undef Dtype\" << \"\\n\\n\";" >> $SOURCE echo " ss << \"#define Dtype double\" << \"\\n\\n\";" >> $SOURCE @@ -104,9 +105,11 @@ do echo " ss << ${CL_KERNEL_NAME}_double << \"\\n\\n\";" >> $SOURCE done echo " ss << \"#endif\" << \"\\n\\n\";" >> $SOURCE +echo "#endif // GREENTEA_DOUBLE_SUUPORT" >> $SOURCE echo " std::string kernel_string = ss.str();" >> $SOURCE echo " const char* kernel_program = kernel_string.c_str();" >> $SOURCE +echo " ctx.build_options(\"-cl-fast-relaxed-math -cl-mad-enable\");" >> $SOURCE echo " viennacl::ocl::program &program = ctx.add_program(kernel_program,\"kernel_program\");" >> $SOURCE echo " return program;" >> $SOURCE echo "}" >> $SOURCE diff --git a/src/caffe/greentea/cl_kernels/aux.cl b/src/caffe/greentea/cl_kernels/auxiliary.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/aux.cl rename to src/caffe/greentea/cl_kernels/auxiliary.cl diff --git a/src/caffe/greentea/cl_kernels/convolution_sk.cl b/src/caffe/greentea/cl_kernels/convolution_sk.cl index bb9f8c0ec79..3e002fec7c2 100644 --- a/src/caffe/greentea/cl_kernels/convolution_sk.cl +++ b/src/caffe/greentea/cl_kernels/convolution_sk.cl @@ -2,79 +2,9 @@ #include "header.cl" #endif -/* -// Very naive implementation -__kernel void TEMPLATE(convolution_ip4v0,Dtype)(__global const float *w, - __global const float *in, - const int height, - const int width, - __global float *out) { - - // TESTING - // Current tests for IP4. - // 10x10 kernel, - - const int kernel_h = 10; - const int kernel_w = 10; - const int ext_kernel_h = 73; - const int ext_kernel_w = 73; - const int fout_count = 1024; - const int fin_count = 192; - const int kstride_h = 8; - const int kstride_w = 8; - const int stride_h = 1; - const int stride_w = 1; - - const int out_h = (height - ext_kernel_h) / stride_h + 1; - const int out_w = (width - ext_kernel_w) / stride_w + 1; - - // Across y-dimension - for (int yoff = get_global_id(2); yoff < height - ext_kernel_h + 1; yoff += - get_global_size(2)) { - - // Across x-dimension - for (int xoff = get_global_id(1); xoff < width - ext_kernel_w + 1; xoff += - get_global_size(1)) { - - // Across output features - for (int fout = get_global_id(0); fout < fout_count; fout += - get_global_size(0)) { - - int fout_w_ptr = fout * fin_count * kernel_h * kernel_w; - - // Across input features - float outval = 0.0; - for (int fin = 0; fin < fin_count; ++fin) { - // Across the kernel itself - int fin_ptr = fin * width * height; - int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr; - for (int i = 0; i < kernel_h; ++i) { - for (int j = 0; j < kernel_w; ++j) { - - outval += w[j + i * kernel_w + finout_w_ptr] - * in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width - + fin_ptr]; - } - } - } - out[xoff + yoff * out_w + fout * out_w * out_h] = outval; - } - } - } -} - -// More optimized -__kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w, +__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w, __global const Dtype *in, - __global float *out) { - - // Kernel uses Z-workers across batches and output features - // Y-workers across Y-input - // X-workers across X-input - - // TESTING - // Current tests for IP4. - // 10x10 kernel, + __global Dtype *out) { const int width = 200; const int height = 200; @@ -87,6 +17,8 @@ __kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w, const int stride_h = 1; const int stride_w = 1; const int batch_size = 1; + const int buff_w = 73; + const int buff_h = 73; const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; @@ -94,8 +26,6 @@ __kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w, const int out_h = (height - ext_kernel_h) / stride_h + 1; const int out_w = (width - ext_kernel_w) / stride_w + 1; - const int fin_fraction = 16; - // Clear the output { #pragma unroll 1 @@ -108,37 +38,33 @@ __kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w, } } - // Local weight buffer - __local Dtype wl[16 * 100]; + // Local weight buffer (in local memory) + __local Dtype wl[10 * 10]; + // Local input buffer (in local memory) + __local Dtype il[73 * 73]; + // Local accumulators (in registers) + Dtype al[2 * 2]; // Across output features #pragma unroll 1 for (int fout = get_global_id(2); fout < fout_count; fout += get_global_size(2)) { - const int fout_w_ptr = fout * fin_count * kernel_h * kernel_w; - // Across input features #pragma unroll 1 - for (int fin = 0; fin < fin_count; fin += fin_fraction) { - const int finout_w_ptr = fin * kernel_h * kernel_w + fout_w_ptr; + for (int fin = 0; fin < fin_count; ++fin) { // Load local weights - // TODO: Correction for non-fitting fraction divisors -#pragma unroll 1 - for (int k = 0; k < fin_fraction; ++k) { #pragma unroll 1 - for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { + for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { #pragma unroll 1 - for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { - wl[j + i * kernel_w + k * kernel_w * kernel_h] = w[j + i * kernel_w - + k * kernel_w * kernel_h + finout_w_ptr]; - } + for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { + wl[j + i * kernel_w] = w[j + i * kernel_w + + fout * fin_count * kernel_h * kernel_w + + fin * kernel_h * kernel_w]; } } - barrier(CLK_LOCAL_MEM_FENCE); - // Across batches (keeps weights in local memory) #pragma unroll 1 for (int batch = 0; batch < batch_size; ++batch) { @@ -146,53 +72,81 @@ __kernel void TEMPLATE(convolution_ip4v1,Dtype)(__global const Dtype *w, const int batch_in_off = batch * width * height * fin_count; const int batch_out_off = batch * out_w * out_h * fout_count; - // Across a fraction of input features -#pragma unroll 1 - for (int finoff = 0; finoff < fin_fraction; ++finoff) { - const int finoff_ptr = (finoff + fin) * width * height; + // Shift the patch window across width and height + for (int yoff = 0; yoff < height; yoff += buff_h) { + for (int xoff = 0; xoff < width; xoff += buff_w) { - // Across y-dimension + // Load image patch +#pragma unroll 1 + for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) { #pragma unroll 1 - for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1; - yoff += get_global_size(1)) { + for (int j = get_local_id(0); j < buff_w; + j += get_local_size(0)) { + int xidx = (j + xoff); + int yidx = (i + yoff); + if (xidx < width && yidx < height) { + il[j + i * buff_w] = in[xidx + yidx * width + + fin * width * height + batch_in_off]; + } + } + } + + barrier(CLK_LOCAL_MEM_FENCE); - // Across x-dimension + // Kernel inner loop #pragma unroll 1 - for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1; - xoff += get_global_size(0)) { + for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) { +#pragma unroll 1 + for (int j = get_local_id(0); j < buff_w; + j += get_local_size(0)) { - Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h - + batch_out_off]; + // Load accumulators +#pragma unroll 1 + for (int k = 0; k < 4; k++) { + int xidx = (j + xoff - k % 2 * buff_w); + int yidx = (i + yoff - k / 2 * buff_h); + if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) { + al[k] = out[xidx + yidx * out_w + fout * out_w * out_h + + batch_out_off]; + } + } - // Across the kernel itself #pragma unroll 2 - for (int i = 0; i < kernel_h; ++i) { + for (int ki = 0; ki < kernel_h; ++ki) { #pragma unroll 2 - for (int j = 0; j < kernel_w; ++j) { - outval = fma( - wl[j + i * kernel_w + finoff * kernel_w * kernel_h], - in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width - + finoff_ptr + batch_in_off], - outval); + for (int kj = 0; kj < kernel_w; ++kj) { + al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] += + wl[kj + ki * kernel_w] + * il[(j + kj * kstride_w) % buff_w + + ((i + ki * kstride_h) % buff_h) * buff_w]; + } } - } - out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] = - outval; - } + // Store accumulators +#pragma unroll 1 + for (int k = 0; k < 4; k++) { + int xidx = (j + xoff - k % 2 * buff_w); + int yidx = (i + yoff - k / 2 * buff_h); + if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) { + out[xidx + yidx * out_w + fout * out_w * out_h + + batch_out_off] = al[k]; + } + } + } + }barrier(CLK_LOCAL_MEM_FENCE); } } - }barrier(CLK_LOCAL_MEM_FENCE); + } } } -}*/ +} // Fits into 32 KB __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, __global const Dtype *in, - __global float *out) { - const int width = 584; - const int height = 584; + __global Dtype *out) { + const int width = 200; + const int height = 200; const int kernel_h = 10; const int kernel_w = 10; const int fout_count = 1024; @@ -222,7 +176,7 @@ __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, } // Local weight buffer - __local Dtype wl[10*10]; + __local Dtype wl[10 * 10]; // Across output features #pragma unroll 1 @@ -235,12 +189,14 @@ __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, // Load local weights #pragma unroll 1 - for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { + for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { #pragma unroll 1 - for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { - wl[i+j*kernel_w] = w[j + i * kernel_w + fout * fin_count * kernel_h * kernel_w + fin * kernel_h * kernel_w]; - } + for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { + wl[j + i * kernel_w] = w[j + i * kernel_w + + fout * fin_count * kernel_h * kernel_w + + fin * kernel_h * kernel_w]; } + } barrier(CLK_LOCAL_MEM_FENCE); @@ -251,37 +207,37 @@ __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, const int batch_in_off = batch * width * height * fin_count; const int batch_out_off = batch * out_w * out_h * fout_count; - // Across y-dimension + // Across y-dimension #pragma unroll 1 - for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1; - yoff += get_global_size(1)) { + for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1; + yoff += get_global_size(1)) { - // Across x-dimension + // Across x-dimension #pragma unroll 1 - for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1; - xoff += get_global_size(0)) { - - Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h - + batch_out_off]; - - // Across the kernel itself -#pragma unroll 5 - for (int i = 0; i < kernel_h; ++i) { -#pragma unroll 5 - for (int j = 0; j < kernel_w; ++j) { - outval = fma( - wl[i+j*kernel_w], - in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width - + fin*width*height + batch_in_off], - outval); - } - } + for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1; + xoff += get_global_size(0)) { + + Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h + + batch_out_off]; - out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] = - outval; + // Across the kernel itself +#pragma unroll 1 + for (int i = 0; i < kernel_h; ++i) { +#pragma unroll 1 + for (int j = 0; j < kernel_w; ++j) { + outval = fma( + wl[j + i * kernel_w], + in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width + + fin * width * height + batch_in_off], + outval); + } } + + out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] = + outval; + } } - } barrier(CLK_LOCAL_MEM_FENCE); + }barrier(CLK_LOCAL_MEM_FENCE); } } } diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index 587ae4b6167..00fffd70319 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -83,7 +83,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; // TODO: Change this - if(kstride_h_ != 8) { + if(kstride_h_ != 23 || this->device_context_.backend() == BACKEND_CUDA) { col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, width_out, this->device_context_); } diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 9c07edf85f0..5d7f1386020 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -63,7 +63,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { // Cheating, for now - if (kstride_h_ != 8) { + if (kstride_h_ != 23) { const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); @@ -115,7 +115,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); viennacl::ocl::kernel &oclk_ip4 = program.get_kernel( - CL_KERNEL_SELECT("convolution_ip4v2")); + CL_KERNEL_SELECT("convolution_ip4v3")); LOG(INFO)<< ctx.devices()[0].max_work_group_size(); @@ -124,11 +124,11 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[2]; LOG(INFO)<< ctx.devices()[0].preferred_vector_width_float(); - oclk_ip4.global_work_size(0, 128); - oclk_ip4.global_work_size(1, 128); + oclk_ip4.global_work_size(0, 16); + oclk_ip4.global_work_size(1, 16); oclk_ip4.global_work_size(2, 128); - oclk_ip4.local_work_size(0, 128); - oclk_ip4.local_work_size(1, 2); + oclk_ip4.local_work_size(0, 16); + oclk_ip4.local_work_size(1, 16); oclk_ip4.local_work_size(2, 1); viennacl::ocl::enqueue( diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index afd0bc58059..fe200dcb6db 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -45,7 +45,7 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); multiplier_.Reshape(vector(1, bottom[0]->count(1)),this->device_context_); - backward_buff_.Reshape(vector(1, bottom[0]->count(1))); + backward_buff_.Reshape(vector(1, bottom[0]->count(1)),this->device_context_); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } From 46aee8b2b0c016e23275df8993e0edf53d6b0afa Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 10 May 2015 15:31:51 +0200 Subject: [PATCH 022/254] Syncedmem HOST_PTR for CPU, and done implementing the Convolution-SK cBLAS-openCL hybrid approach: Uses OpenCL for all kernels and highly optimized openBLAS or MKL for GEMM. --- include/caffe/util/device_alternate.hpp | 6 + src/caffe/layers/conv_sk_layer.cu | 143 +++++++++------ src/caffe/layers/pooling_sk_layer.cu | 233 ++++++++++++++---------- src/caffe/syncedmem.cpp | 48 ++++- 4 files changed, 266 insertions(+), 164 deletions(-) diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 6ea595dba2d..1fc82fa1c9b 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -90,6 +90,12 @@ const char* curandGetErrorString(curandStatus_t error); const int CAFFE_CUDA_NUM_THREADS = 512; #endif +#ifdef __CDT_PARSER__ +#define CUDA_KERNEL(...) +#else +#define CUDA_KERNEL(...) <<< __VA_ARGS__ >>> +#endif + // CUDA: number of blocks for threads. inline int CAFFE_GET_BLOCKS(const int N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 5d7f1386020..42972ebdef2 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -63,94 +63,119 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { // Cheating, for now - if (kstride_h_ != 23) { + //if (kstride_h_ != 23) { - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); - cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); - cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); - const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); + const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); + cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); + cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); + const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); - int weight_offset = M_ * K_; - int col_offset = K_ * N_; - int top_offset = M_ * N_; + Dtype* top_data_cpu; + Dtype* col_data_cpu; + const Dtype* weight_cpu; + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + top_data_cpu = top[i]->mutable_cpu_data(); + col_data_cpu = col_buffer_.mutable_cpu_data(); + weight_cpu = this->blobs_[0]->cpu_data(); + } - std::cout << "M:" << M_ << std::endl; - std::cout << "N:" << N_ << std::endl; - std::cout << "K:" << K_ << std::endl; + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; - for (int n = 0; n < num_; ++n) { + std::cout << "M:" << M_ << std::endl; + std::cout << "N:" << N_ << std::endl; + std::cout << "K:" << K_ << std::endl; - // First, im2col - greentea_im2col_sk_gpu(program, ctx, bottom_data, - bottom[i]->offset(n), channels_, - height_, width_, kernel_h_, kernel_w_, - pad_h_, pad_w_, stride_h_, stride_w_, - kstride_h_, kstride_w_, col_data); + for (int n = 0; n < num_; ++n) { - // Second, innerproduct with groups - for (int g = 0; g < group_; ++g) { + // First, im2col + greentea_im2col_sk_gpu(program, ctx, bottom_data, + bottom[i]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, + pad_w_, stride_h_, stride_w_, kstride_h_, + kstride_w_, col_data); + ctx.get_queue().finish(); + + // Second, innerproduct with groups + for (int g = 0; g < group_; ++g) { + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + std::cout << "CPU GEMM" << std::endl; + caffe_cpu_gemm( + CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., + weight_cpu + weight_offset * g, col_data_cpu + col_offset * g, + (Dtype) 0., top_data_cpu + top[i]->offset(n) + top_offset * g); + } else { greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, weight_offset * g, col_data, col_offset * g, (Dtype) 0., top_data, top[i]->offset(n) + top_offset * g); - ctx.get_queue().finish(); } + ctx.get_queue().finish(); + } - // Third, add bias - if (bias_term_) { + // Third, add bias + if (bias_term_) { + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, N_, + 1, (Dtype) 1., this->blobs_[1]->cpu_data(), + bias_multiplier_.cpu_data(), (Dtype) 1., + top_data_cpu + top[i]->offset(n)); + } else { greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., top_data, top[i]->offset(n)); - ctx.get_queue().finish(); } + ctx.get_queue().finish(); } - } else { + } + /*} else { - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); - cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); - const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); + const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); + cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); + const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); - viennacl::ocl::kernel &oclk_ip4 = program.get_kernel( - CL_KERNEL_SELECT("convolution_ip4v3")); + viennacl::ocl::kernel &oclk_ip4 = program.get_kernel( + CL_KERNEL_SELECT("convolution_ip4v3")); - LOG(INFO)<< ctx.devices()[0].max_work_group_size(); + LOG(INFO)<< ctx.devices()[0].max_work_group_size(); - LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[0]; - LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[1]; - LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[2]; - LOG(INFO)<< ctx.devices()[0].preferred_vector_width_float(); + LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[0]; + LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[1]; + LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[2]; + LOG(INFO)<< ctx.devices()[0].preferred_vector_width_float(); - oclk_ip4.global_work_size(0, 16); - oclk_ip4.global_work_size(1, 16); - oclk_ip4.global_work_size(2, 128); - oclk_ip4.local_work_size(0, 16); - oclk_ip4.local_work_size(1, 16); - oclk_ip4.local_work_size(2, 1); + oclk_ip4.global_work_size(0, 16); + oclk_ip4.global_work_size(1, 16); + oclk_ip4.global_work_size(2, 128); + oclk_ip4.local_work_size(0, 16); + oclk_ip4.local_work_size(1, 16); + oclk_ip4.local_work_size(2, 1); - viennacl::ocl::enqueue( - oclk_ip4(WrapHandle(weight, ctx), WrapHandle(bottom_data, ctx), - WrapHandle(top_data, ctx)), - ctx.get_queue()); + viennacl::ocl::enqueue( + oclk_ip4(WrapHandle(weight, ctx), WrapHandle(bottom_data, ctx), + WrapHandle(top_data, ctx)), + ctx.get_queue()); - ctx.get_queue().finish(); + ctx.get_queue().finish(); - for (int n = 0; n < num_; ++n) { - // Third, add bias - if (bias_term_) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, num_output_, N_, 1, - (Dtype) 1., - (cl_mem) (this->blobs_[i]->gpu_data()), 0, - (cl_mem) (bias_multiplier_.gpu_data()), 0, - (Dtype) 1., top_data, top[i]->offset(n)); - ctx.get_queue().finish(); - } - } - } + for (int n = 0; n < num_; ++n) { + // Third, add bias + if (bias_term_) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num_output_, N_, 1, + (Dtype) 1., + (cl_mem) (this->blobs_[i]->gpu_data()), 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., top_data, top[i]->offset(n)); + ctx.get_queue().finish(); + } + } + }*/ } std::cout << "CONV GREENTEA END" << std::endl; diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index f4094a952d8..ee81e9a6abb 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -9,10 +9,11 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" -#endif +#endif // USE_GREENTEA namespace caffe { +#ifdef USE_CUDA template __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, @@ -173,6 +174,7 @@ __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, top_data[index] = cumvalues / cumsum; } } +#endif // USE_CUDA template void PoolingSKLayer::Forward_gpu(const vector*>& bottom, @@ -196,87 +198,103 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, top_data, + mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, top_data, - mask, top_mask); + AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == caffe::TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + rand_idx_.mutable_gpu_data(), top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, top_data); + } + break; + default: + LOG(FATAL)<< "Unknown pooling method."; + } + CUDA_POST_KERNEL_CHECK; + +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - std::cout << "POOLING GREENTEA BEGIN" << std::endl; - viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_forward_sk")); - viennacl::ocl::enqueue( - oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, ctx), - bottom[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - WrapHandle((cl_mem) top_data, ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, ctx), - WrapHandle((cl_mem) top_mask, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); - std::cout << "POOLING GREENTEA END" << std::endl; -#endif + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + { + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + std::cout << "POOLING GREENTEA BEGIN" << std::endl; + viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_forward_sk")); + viennacl::ocl::enqueue( + oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, ctx), + bottom[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapHandle((cl_mem) top_data, ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, ctx), + WrapHandle((cl_mem) top_mask, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + std::cout << "POOLING GREENTEA END" << std::endl; + } + break; + case PoolingParameter_PoolMethod_AVE: + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + break; + default: + LOG(FATAL)<< "Unknown pooling method."; + } +#endif // USE_GREENTEA } - break; - case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, top_data); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - if (this->phase_ == caffe::TRAIN) { - // We need to create the random index as well. - caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), - rand_idx_.mutable_gpu_data()); - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTrain<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - rand_idx_.mutable_gpu_data(), top_data); - } else { - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTest<<>>( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, top_data); -} -break; -default: -LOG(FATAL)<< "Unknown pooling method."; -} - if (this->device_context_.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - CUDA_POST_KERNEL_CHECK - ; -#endif } -} +#ifdef USE_CUDA template __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int* mask, const Dtype* top_mask, @@ -334,6 +352,7 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, bottom_diff[index] = gradient; } } +#endif // USE_CUDA template void PoolingSKLayer::Backward_gpu(const vector*>& top, @@ -351,32 +370,48 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward<<>>( - count, top_diff, mask, top_mask, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - bottom_diff); - break; - default: - LOG(FATAL)<< "Unknown or unsupported pooling method in Backward_gpu()."; - } if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - CUDA_POST_KERNEL_CHECK - ; -#endif + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + bottom_diff); + break; + default: + LOG(FATAL)<<"Unknown or unsupported pooling method in Backward_gpu()."; + } + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA + } + else + { +#ifdef USE_GREENTEA + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // TODO + break; + default: + LOG(FATAL)<<"Unknown or unsupported pooling method in Backward_gpu()."; + } +#endif // USE_GREENTEA + } } -} INSTANTIATE_LAYER_GPU_FUNCS(PoolingSKLayer); diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 4dfbeb3c2e7..8b928dd76f3 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -52,7 +52,10 @@ inline void SyncedMemory::to_cpu() { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_.id()); - greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, cpu_ptr_, ctx); + // On the CPU, memory is shared (and no copy needed) + if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { + greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, cpu_ptr_, ctx); + } #endif } head_ = SYNCED; @@ -79,8 +82,18 @@ inline void SyncedMemory::to_gpu() { viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_.id()); cl_int err; - cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - size_, NULL, &err); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // CPU memory is shared + if (cpu_ptr_ == NULL) { + CaffeMallocHost(&cpu_ptr_, size_); + } + cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), + CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, + size_, cpu_ptr_, &err); + } else { + cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + size_, NULL, &err); + } gpu_ptr_ = (void*) cl_gpu_mem_; ctx.get_queue().finish(); #endif @@ -100,12 +113,25 @@ inline void SyncedMemory::to_gpu() { device_context_.id()); if (gpu_ptr_ == NULL) { cl_int err; - cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - size_, NULL, &err); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // CPU memory is shared + if (cpu_ptr_ == NULL) { + CaffeMallocHost(&cpu_ptr_, size_); + } + cl_gpu_mem_ = clCreateBuffer( + ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, + size_, cpu_ptr_, &err); + } else { + cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + size_, NULL, &err); + } gpu_ptr_ = (void*) cl_gpu_mem_; ctx.get_queue().finish(); } - greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, ctx); + // On the CPU, memory is shared (and no copy needed) + if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { + greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, ctx); + } #endif } head_ = SYNCED; @@ -131,6 +157,16 @@ void SyncedMemory::set_cpu_data(void* data) { CaffeFreeHost(cpu_ptr_); } cpu_ptr_ = data; + if (device_context_.backend() == Backend::BACKEND_OpenCL) { +#ifdef USE_GREENTEA + viennacl::ocl::context ctx = viennacl::ocl::get_context( + device_context_.id()); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // If host memory is released and shared + gpu_ptr_ = NULL; + } +#endif + } head_ = HEAD_AT_CPU; own_cpu_data_ = false; } From 3a21223b48dcb3843b899807849d918a2e555640 Mon Sep 17 00:00:00 2001 From: Jan Funke Date: Tue, 12 May 2015 12:24:18 -0400 Subject: [PATCH 023/254] caffe requires -std=c++11 compiler flag --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 74fa70c9d20..5e28655ddba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,11 +27,11 @@ include(cmake/Dependencies.cmake) # ---[ Flags if(UNIX OR APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11") endif() if(USE_libstdcpp) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++ -std=c++11") message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)") endif() From a482706c8c8dee5931d475eee89ce866f2417089 Mon Sep 17 00:00:00 2001 From: Jan Funke Date: Tue, 12 May 2015 12:24:38 -0400 Subject: [PATCH 024/254] bugfix: switch-block in blob.cpp was broken by pre-compiler directives --- src/caffe/blob.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 2795ab8e082..0a1435659f8 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -457,10 +457,10 @@ void Blob::scale_diff(Dtype scale_factor) { #endif } return; - } #else NO_GPU; #endif + } case SyncedMemory::UNINITIALIZED: return; default: From 2c1fb1185fb7a2738ba43b8382bd3872fddf3426 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 12 May 2015 23:14:25 +0200 Subject: [PATCH 025/254] Updated makefile example configuration with OpenCL and OpenBLAS. --- Makefile.config.example | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/Makefile.config.example b/Makefile.config.example index 7a8aafd7c9f..07e8bea5f76 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -1,6 +1,14 @@ ## Refer to http://caffe.berkeleyvision.org/installation.html # Contributions simplifying and improving our build system are welcome! +# GreenTea (ViennaCL/OpenCL) backend switch +USE_CUDA := 1 +USE_GREENTEA := 1 +VIENNACL_DIR = ../ViennaCL +USE_CLBLAS := 1 +USE_VIENNACLBLAS := 0 +GREENTEA_DOUBLE_SUPPORT := 1 + # cuDNN acceleration switch (uncomment to build with cuDNN). # USE_CUDNN := 1 @@ -30,7 +38,7 @@ CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ # atlas for ATLAS (default) # mkl for MKL # open for OpenBlas -BLAS := atlas +BLAS := open # Custom (MKL/ATLAS/OpenBLAS) include and lib directories. # Leave commented to accept the defaults for your choice of BLAS # (which should work)! From d97b0525bceb23d97ef93b35d04eee1207d24052 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 13 May 2015 00:15:21 +0200 Subject: [PATCH 026/254] Math backend advancements. --- CMakeLists.txt | 6 +- cmake/Dependencies.cmake | 2 +- include/caffe/greentea/greentea.hpp | 8 +- .../greentea/greentea_math_functions.hpp | 107 ++---- src/caffe/greentea/cl_kernels.cpp | 20 +- .../greentea/cl_kernels/convolution_sk.cl | 4 +- .../{im2col_sk_gpu.cl => im2col_sk.cl} | 0 .../{softmax_loss_gpu.cl => softmax_loss.cl} | 0 .../greentea/greentea_math_functions.cpp | 322 +++++++++++++----- src/caffe/syncedmem.cpp | 3 + 10 files changed, 289 insertions(+), 183 deletions(-) rename src/caffe/greentea/cl_kernels/{im2col_sk_gpu.cl => im2col_sk.cl} (100%) rename src/caffe/greentea/cl_kernels/{softmax_loss_gpu.cl => softmax_loss.cl} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5e28655ddba..f82a1ca033c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,8 +13,10 @@ include(cmake/Summary.cmake) include(cmake/ConfigGen.cmake) # ---[ Options -caffe_option(CPU_ONLY "Build Caffe wihtout CUDA support" OFF) # TODO: rename to USE_CUDA -caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" ON IF NOT CPU_ONLY) +caffe_option(CPU_ONLY "Build Caffe wihtout CUDA and OpenCL support" OFF) # TODO: rename to USE_CUDA +caffe_option(USE_CUDA "Build Caffe with CUDA support" ON) +caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" ON) +caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" ON IF USE_CUDA AND NOT CPU_ONLY) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) caffe_option(BUILD_python "Build Python wrapper" ON) set(python_version "2" CACHE STRING "Specify which python version to use") diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index f328e8246ab..0342d949db4 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -46,7 +46,7 @@ list(APPEND Caffe_LINKER_LIBS ${Snappy_LIBRARIES}) # ---[ CUDA include(cmake/Cuda.cmake) if(NOT HAVE_CUDA) - if(CPU_ONLY) + if(CPU_ONLY OR NOT USE_CUDA) message("-- CUDA is disabled. Building without it...") else() message("-- CUDA is not detected by cmake. Building without it...") diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index f08d78ed0fa..c9c545e5aba 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -63,14 +63,14 @@ struct is_same { #ifdef USE_VIENNACLBLAS #define GREENTEA_VCL_BLAS_CHECK(condition) \ - ViennaCLStatus status = condition; \ - CHECK_EQ(status, ViennaCLSuccess) << "GreenTea ViennaCL BLAS ERROR"; + {ViennaCLStatus status = condition; \ + CHECK_EQ(status, ViennaCLSuccess) << "GreenTea ViennaCL BLAS ERROR";} #endif #ifdef USE_CLBLAS #define GREENTEA_CL_BLAS_CHECK(condition) \ - clblasStatus status = condition; \ - CHECK_EQ(status, clblasSuccess) << "GreenTea CL BLAS ERROR"; + {clblasStatus status = condition; \ + CHECK_EQ(status, clblasSuccess) << "GreenTea CL BLAS ERROR";} #endif // Macro to select the single (_s) or double (_d) precision kernel diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index d01735ef2af..677a0795f36 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -18,6 +18,12 @@ namespace caffe { +inline void greentea_memset(const size_t N, const int alpha, cl_mem X, + const int offX, viennacl::ocl::context &ctx) { + clEnqueueFillBuffer(ctx.get_queue().handle().get(), X, &alpha, sizeof(int), + offX, N, 0, NULL, NULL); +} + void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, viennacl::ocl::context &ctx); @@ -44,97 +50,36 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, template void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, - const cl_mem X, const int offX, cl_mem Y, - const int offY); + const cl_mem x, const int offx, cl_mem y, + const int offy); template void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, const int offa, const cl_mem b, const int offb, cl_mem y, const int offy); -/* - template - void greentea_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, - Dtype* Y); - - template - void greentea_gpu_axpby(const int N, const Dtype alpha, const Dtype* X, - const Dtype beta, Dtype* Y); - - - - template - void greentea_gpu_set(const int N, const Dtype alpha, Dtype *X); - - inline void greentea_gpu_memset(const size_t N, const int alpha, void* X) { - /* viennacl::m - #ifndef CPU_ONLY - CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn) - #else - NO_GPU; - #endif*/ -/*} - - template - void greentea_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X); - - template - void greentea_gpu_scal(const int N, const Dtype alpha, Dtype *X); - - template - void greentea_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); - - template - void greentea_gpu_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); - - template - void greentea_gpu_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); - - template - void greentea_gpu_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); - - template - void greentea_gpu_abs(const int n, const Dtype* a, Dtype* y); - - template - void greentea_gpu_exp(const int n, const Dtype* a, Dtype* y); - - template - void greentea_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); - - void greentea_gpu_rng_uniform(const int n, unsigned int* r); - - template - void greentea_gpu_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); - - template - void greentea_gpu_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, - Dtype* r); - - template - void greentea_gpu_rng_bernoulli(const int n, const Dtype p, int* r); - - template - void greentea_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out); - - template - uint32_t greentea_gpu_hamming_distance(const int n, const Dtype* x, - const Dtype* y); - - template - void greentea_gpu_asum(const int n, const Dtype* x, Dtype* y); +template +void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, + cl_mem x, int offx); - template - void greentea_gpu_sign(const int n, const Dtype* x, Dtype* y); +template +void greentea_gpu_axpby(const int ctx_id, const int N, const Dtype alpha, + const cl_mem X, const int offX, const Dtype beta, + cl_mem Y, const int offY); - template - void greentea_gpu_sgnbit(const int n, const Dtype* x, Dtype* y); +template +void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, + const int offX, const cl_mem Y, const int offY, + Dtype* out); - template - void greentea_gpu_fabs(const int n, const Dtype* x, Dtype* y); +template +void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, + const int offX, Dtype* Y); - template - void greentea_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y);*/ +template +void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, + const cl_mem X, const int offX, cl_mem Y, + const int offY); } #endif diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 85985e13968..20322e4a397 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -9,19 +9,19 @@ std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#defin std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; -std::string im2col_sk_gpu_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; +std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; -std::string softmax_loss_gpu_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; -std::string im2col_sk_gpu_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; +std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; -std::string softmax_loss_gpu_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; ss << header << "\n\n"; @@ -30,10 +30,10 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << auxiliary_float << "\n\n"; ss << channel_float << "\n\n"; ss << convolution_sk_float << "\n\n"; - ss << im2col_sk_gpu_float << "\n\n"; + ss << im2col_sk_float << "\n\n"; ss << math_float << "\n\n"; ss << pooling_sk_float << "\n\n"; - ss << softmax_loss_gpu_float << "\n\n"; + ss << softmax_loss_float << "\n\n"; #ifdef GREENTEA_DOUBLE_SUPPORT ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; ss << "#undef Dtype" << "\n\n"; @@ -42,10 +42,10 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << auxiliary_double << "\n\n"; ss << channel_double << "\n\n"; ss << convolution_sk_double << "\n\n"; - ss << im2col_sk_gpu_double << "\n\n"; + ss << im2col_sk_double << "\n\n"; ss << math_double << "\n\n"; ss << pooling_sk_double << "\n\n"; - ss << softmax_loss_gpu_double << "\n\n"; + ss << softmax_loss_double << "\n\n"; ss << "#endif" << "\n\n"; #endif // GREENTEA_DOUBLE_SUUPORT std::string kernel_string = ss.str(); diff --git a/src/caffe/greentea/cl_kernels/convolution_sk.cl b/src/caffe/greentea/cl_kernels/convolution_sk.cl index 3e002fec7c2..f3e47eb0088 100644 --- a/src/caffe/greentea/cl_kernels/convolution_sk.cl +++ b/src/caffe/greentea/cl_kernels/convolution_sk.cl @@ -2,7 +2,7 @@ #include "header.cl" #endif -__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w, +/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w, __global const Dtype *in, __global Dtype *out) { @@ -240,4 +240,4 @@ __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, }barrier(CLK_LOCAL_MEM_FENCE); } } -} +}*/ diff --git a/src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl b/src/caffe/greentea/cl_kernels/im2col_sk.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/im2col_sk_gpu.cl rename to src/caffe/greentea/cl_kernels/im2col_sk.cl diff --git a/src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl b/src/caffe/greentea/cl_kernels/softmax_loss.cl similarity index 100% rename from src/caffe/greentea/cl_kernels/softmax_loss_gpu.cl rename to src/caffe/greentea/cl_kernels/softmax_loss.cl diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index d1c5cae7038..69fa633db78 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -4,6 +4,7 @@ * Created on: Apr 6, 2015 * Author: Fabian Tschopp */ + #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" @@ -40,8 +41,9 @@ namespace caffe { void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, viennacl::ocl::context &ctx) { if (Y != NULL) { - cl_int err = clEnqueueReadBuffer(ctx.get_queue().handle().get(), X, CL_TRUE, - 0, N, Y, 0, NULL, NULL); + clEnqueueReadBuffer(ctx.get_queue().handle().get(), X, CL_TRUE, 0, N, Y, 0, + NULL, + NULL); } ctx.get_queue().finish(); } @@ -50,9 +52,9 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, viennacl::ocl::context &ctx) { if (X != NULL) { - cl_int err = clEnqueueWriteBuffer(ctx.get_queue().handle().get(), Y, + clEnqueueWriteBuffer(ctx.get_queue().handle().get(), Y, CL_TRUE, - 0, N, X, 0, NULL, NULL); + 0, N, X, 0, NULL, NULL); } ctx.get_queue().finish(); } @@ -62,8 +64,8 @@ template void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx) { if (X != Y) { - cl_int err = clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, 0, 0, - sizeof(Dtype) * N, 0, NULL, NULL); + clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, 0, 0, + sizeof(Dtype) * N, 0, NULL, NULL); } ctx.get_queue().finish(); } @@ -85,26 +87,12 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const int offA, const cl_mem B, const int offB, const Dtype beta, cl_mem C, const int offC) { - int offArow = offA; - int offBrow = offB; - int offCrow = offC; - int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; #ifdef USE_VIENNACLBLAS - int offAcol = 0; - int incArow = 1; - int incAcol = 1; - int offBcol = 0; - int incBrow = 1; - int incBcol = 1; - int offCcol = 0; - int incCrow = 1; - int incCcol = 1; - ViennaCLBackend backend; ViennaCLBackendCreate(&backend); ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); @@ -121,17 +109,13 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_VCL_BLAS_CHECK( ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, - vclOrderC, M, N, K, alpha, A, offArow, offAcol, - incArow, incAcol, lda, B, offBrow, offBcol, incBrow, - incBcol, ldb, beta, C, offCrow, offCcol, incCrow, - incCcol, ldc)); + vclOrderC, M, N, K, alpha, A, 0, offA, 1, 1, lda, B, + 0, offB, 1, 1, ldb, beta, C, 0, offC, 1, 1, ldc)); } else { GREENTEA_VCL_BLAS_CHECK( ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, - vclOrderC, M, N, K, alpha, A, offArow, offAcol, - incArow, incAcol, lda, B, offBrow, offBcol, incBrow, - incBcol, ldb, beta, C, offCrow, offCcol, incCrow, - incCcol, ldc)); + vclOrderC, M, N, K, alpha, A, 0, offA, 1, 1, lda, B, + 0, offB, 1, 1, ldb, beta, C, 0, offC, 1, 1, ldc)); } #endif #ifdef USE_CLBLAS @@ -146,10 +130,10 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); + clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offArow, lda, B, offBrow, ldb, beta, C, offCrow, ldc, 1, &queue, 0, NULL, NULL)); + clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); } #endif @@ -182,7 +166,23 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, int lda = (TransA == CblasNoTrans) ? N : M; #ifdef USE_VIENNACLBLAS - // TODO + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + + ViennaCLOrder vclOrder = ViennaCLRowMajor; + ViennaCLTranspose vclTransA = + (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSgemv(backend, vclOrder, vclTransA, M, N, alpha, A, offA, + 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDgemv(backend, vclOrder, vclTransA, M, N, alpha, A, offA, + 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); + } #endif #ifdef USE_CLBLAS @@ -225,7 +225,17 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const int offY) { #ifdef USE_VIENNACLBLAS - // TODO + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); + } #endif #ifdef USE_CLBLAS @@ -276,66 +286,212 @@ template void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem b, const int offb, cl_mem y, const int offy); -/* - template<> - void greentea_gpu_scal(const int N, const float alpha, float *X) { - CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); - } +template +void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, + cl_mem x, int offx) { - template<> - void greentea_gpu_scal(const int N, const double alpha, double *X) { - CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); - } +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); - template<> - void greentea_gpu_axpby(const int N, const float alpha, const float* X, - const float beta, float* Y) { - greentea_gpu_scal(N, beta, Y); - greentea_gpu_axpy(N, alpha, X, Y); - } + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSscal(backend, N, alpha, x, offx, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDscal(backend, N, alpha, x, offx, 1)); + } +#endif - template<> - void greentea_gpu_axpby(const int N, const double alpha, - const double* X, const double beta, double* Y) { - greentea_gpu_scal(N, beta, Y); - greentea_gpu_axpy(N, alpha, X, Y); - } +#ifdef USE_CLBLAS + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); - template<> - void greentea_gpu_dot(const int n, const float* x, const float* y, - float* out) { - CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); - } + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK(clblasSscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK(clblasDscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); + } +#endif +} - template<> - void greentea_gpu_dot(const int n, const double* x, const double* y, - double * out) { - CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); - } +template void greentea_gpu_scal(const int ctx_id, const int N, + const float alpha, cl_mem x, + const int offx); +template void greentea_gpu_scal(const int ctx_id, const int N, + const double alpha, cl_mem x, + const int offx); - template<> - void greentea_gpu_asum(const int n, const float* x, float* y) { - CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); - } +template +void greentea_gpu_axpby(const int ctx_id, const int N, const Dtype alpha, + const cl_mem X, const int offX, const Dtype beta, + cl_mem Y, const int offY) { + greentea_gpu_scal(ctx_id, N, beta, Y, offY); + greentea_gpu_axpy(ctx_id, N, alpha, X, offX, Y, offY); +} - template<> - void greentea_gpu_asum(const int n, const double* x, double* y) { - CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); - } +template void greentea_gpu_axpby(const int ctx_id, const int N, + const float alpha, const cl_mem X, + const int offX, const float beta, + cl_mem Y, const int offY); - template<> - void greentea_gpu_scale(const int n, const float alpha, const float *x, - float* y) { - CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); - CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); - } +template void greentea_gpu_axpby(const int ctx_id, const int N, + const double alpha, const cl_mem X, + const int offX, const double beta, + cl_mem Y, const int offY); - template<> - void greentea_gpu_scale(const int n, const double alpha, - const double *x, double* y) { - CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); - CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); - } +template +void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, + const int offX, const cl_mem Y, const int offY, + Dtype* out) { + +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSdot(backend, n, out, X, offX, 1, Y, offY, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDdot(backend, n, out, X, offX, 1, Y, offY, 1)); + } +#endif + +#ifdef USE_CLBLAS + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); + + cl_int err; + cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + sizeof(Dtype), NULL, &err); + cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + n * sizeof(Dtype), NULL, &err); + + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); + } + + greentea_gpu_memcpy(sizeof(Dtype), gpuout, &out, ctx); + + ctx.get_queue().finish(); + clReleaseMemObject(gpuout); + clReleaseMemObject(scratch); + +#endif +} + +template void greentea_gpu_dot(const int ctx_id, const int n, + const cl_mem X, const int offX, + const cl_mem Y, const int offY, + float* out); +template void greentea_gpu_dot(const int ctx_id, const int n, + const cl_mem X, const int offX, + const cl_mem Y, const int offY, + double* out); + +template +void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, + const int offX, Dtype* Y) { + +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSasum(backend, n, Y, X, offX, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDasum(backend, n, Y, X, offX, 1)); + } +#endif + +#ifdef USE_CLBLAS + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); + + cl_int err; + cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + sizeof(Dtype), NULL, &err); + cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + n * sizeof(Dtype), NULL, &err); + + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); + } + + greentea_gpu_memcpy(sizeof(Dtype), gpuout, &Y, ctx); + + ctx.get_queue().finish(); + clReleaseMemObject(gpuout); + clReleaseMemObject(scratch); +#endif +} + +template void greentea_gpu_asum(const int ctx_id, const int n, + const cl_mem X, const int offX, + float* Y); +template void greentea_gpu_asum(const int ctx_id, const int n, + const cl_mem X, const int offX, + double* Y); + +template +void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, + const cl_mem X, const int offX, cl_mem Y, + const int offY) { + +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLScopy(backend, n, X, offX, 1, Y, offY, 1)); + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSscal(backend, n, alpha, X, offX, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDcopy(backend, n, X, offX, 1, Y, offY, 1)); + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDscal(backend, n, alpha, X, offX, 1)); + } +#endif + +#ifdef USE_CLBLAS + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); + + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasScopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasSscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDcopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasDscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); + } +#endif + +} + +template void greentea_gpu_scale(const int ctx_id, const int n, + const float alpha, const cl_mem X, + const int offX, cl_mem Y, + const int offY); + +template void greentea_gpu_scale(const int ctx_id, const int n, + const double alpha, const cl_mem X, + const int offX, cl_mem Y, + const int offY); + +/* template __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 8b928dd76f3..5fbdc621c00 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -86,6 +86,7 @@ inline void SyncedMemory::to_gpu() { // CPU memory is shared if (cpu_ptr_ == NULL) { CaffeMallocHost(&cpu_ptr_, size_); + caffe_memset(size_, 0, cpu_ptr_); } cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, @@ -93,6 +94,8 @@ inline void SyncedMemory::to_gpu() { } else { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); + int alpha = 0; + greentea_memset(size_, alpha, cl_gpu_mem_, 0, ctx); } gpu_ptr_ = (void*) cl_gpu_mem_; ctx.get_queue().finish(); From a92ec0c0e870d837554c31223d629b0ff81eda28 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 13 May 2015 13:17:11 +0200 Subject: [PATCH 027/254] Finished math functions for OpenCL. --- .../greentea/greentea_math_functions.hpp | 61 +- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/math.cl | 87 ++- .../greentea/greentea_math_functions.cpp | 590 +++++++++--------- src/caffe/layers/pooling_sk_layer.cu | 14 +- src/caffe/syncedmem.cpp | 4 +- 6 files changed, 445 insertions(+), 315 deletions(-) diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 677a0795f36..7ec9c081283 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -24,11 +24,11 @@ inline void greentea_memset(const size_t N, const int alpha, cl_mem X, offX, N, 0, NULL, NULL); } -void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, - viennacl::ocl::context &ctx); +void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, + void *Y, viennacl::ocl::context &ctx); void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, - viennacl::ocl::context &ctx); + const int offY, viennacl::ocl::context &ctx); template void greentea_copy(const int N, const cl_mem X, cl_mem Y, @@ -81,6 +81,59 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, const cl_mem X, const int offX, cl_mem Y, const int offY); +template +void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, + cl_mem Y, const int offY); + +template +void greentea_gpu_add_scalar(const int ctx_id, const int N, const Dtype alpha, + cl_mem Y, const int offY); + +template +void greentea_gpu_add(const int ctx_id, const int n, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy); + +template +void greentea_gpu_sub(const int ctx_id, const int n, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy); + +template +void greentea_gpu_div(const int ctx_id, const int N, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy); + +template +void greentea_gpu_abs(const int ctx_id, const int N, const cl_mem a, + const int offa, cl_mem y, const int offy); + +template +void greentea_gpu_exp(const int ctx_id, const int N, const cl_mem a, + const int offa, cl_mem y, const int offy); + +template +void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, + const int offa, const Dtype alpha, cl_mem y, + const int offy); + +template +void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy); + +template +void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy); + +template +void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, + const Dtype b, cl_mem r, const int offr); + +template +void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, + const Dtype sigma, cl_mem r, const int offr); + } -#endif + +#endif // USE GREENTEA #endif /* GREENTEA_MATH_FUNCTIONS_HPP_ */ diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 20322e4a397..b0cc1bf60d4 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -11,7 +11,7 @@ std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\ std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = abs(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (Dtype(0) < x[index + offx])\n - (x[index + offx] < Dtype(0));\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; @@ -19,7 +19,7 @@ std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa,\n __global Dtype* b, const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] + b[index + offb];\n }\n}"; +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = abs(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (Dtype(0) < x[index + offx])\n - (x[index + offx] < Dtype(0));\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index cbc0a457dc5..85c7ca5eb65 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -2,11 +2,90 @@ #include "header.cl" #endif -__kernel void TEMPLATE(kernel_mul,Dtype)(const int n, __global const Dtype* a, const int offa, - __global Dtype* b, const int offb, __global Dtype* y, - const int offy) { +__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - y[index + offy] = a[index + offa] + b[index + offb]; + y[index + offy] = a[index + offa] * b[index + offb]; } } +__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] / b[index + offb]; + } +} + +__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__global Dtype* Y, + const int offY) { + for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + Y[offY + index] += alpha; + } +} + +__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] + b[offb + index]; + } +} + +__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] - b[offb + index]; + } +} + +__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = abs(a[offa + index]); + } +} + +__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = exp(a[offa + index]); + } +} + +__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, + const int offa, Dtype alpha, + __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = pow(a[offa + index], alpha); + } +} + +__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = (Dtype(0) < x[index + offx]) + - (x[index + offx] < Dtype(0)); + } +} + +__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = signbit(x[index + offx]); + } +} diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 69fa633db78..0fa26b6a973 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -27,6 +27,10 @@ #include "viennacl/ocl/platform.hpp" #include "viennacl/ocl/backend.hpp" +#include +#include +#include + #ifdef USE_CLBLAS #include #endif @@ -38,11 +42,12 @@ namespace caffe { // Copy from OpenCL buffer to main memory -void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, - viennacl::ocl::context &ctx) { +void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, + void *Y, viennacl::ocl::context &ctx) { if (Y != NULL) { - clEnqueueReadBuffer(ctx.get_queue().handle().get(), X, CL_TRUE, 0, N, Y, 0, - NULL, + clEnqueueReadBuffer(ctx.get_queue().handle().get(), X, CL_TRUE, offX, N, Y, + 0, + NULL, NULL); } ctx.get_queue().finish(); @@ -50,11 +55,11 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, void *Y, // Copy from main memory to OpenCL buffer void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, - viennacl::ocl::context &ctx) { + const int offY, viennacl::ocl::context &ctx) { if (X != NULL) { clEnqueueWriteBuffer(ctx.get_queue().handle().get(), Y, CL_TRUE, - 0, N, X, 0, NULL, NULL); + offY, N, X, 0, NULL, NULL); } ctx.get_queue().finish(); } @@ -269,8 +274,7 @@ void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); - viennacl::ocl::kernel &oclk_mul = program.get_kernel( - CL_KERNEL_SELECT("kernel_mul")); + viennacl::ocl::kernel &oclk_mul = program.get_kernel(CL_KERNEL_SELECT("mul")); viennacl::ocl::enqueue( oclk_mul(N, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, WrapHandle(y, ctx), offy), @@ -286,6 +290,29 @@ template void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem b, const int offb, cl_mem y, const int offy); +template +void greentea_gpu_div(const int ctx_id, const int N, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_div = program.get_kernel(CL_KERNEL_SELECT("div")); + viennacl::ocl::enqueue( + oclk_div(N, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, + WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_div(const int ctx_id, const int N, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); +template void greentea_gpu_div(const int ctx_id, const int N, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); + template void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, cl_mem x, int offx) { @@ -376,7 +403,7 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, clblasDdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); } - greentea_gpu_memcpy(sizeof(Dtype), gpuout, &out, ctx); + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, &out, ctx); ctx.get_queue().finish(); clReleaseMemObject(gpuout); @@ -428,7 +455,7 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, clblasDasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); } - greentea_gpu_memcpy(sizeof(Dtype), gpuout, &Y, ctx); + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, &Y, ctx); ctx.get_queue().finish(); clReleaseMemObject(gpuout); @@ -491,293 +518,264 @@ template void greentea_gpu_scale(const int ctx_id, const int n, const int offX, cl_mem Y, const int offY); -/* +template +void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, + cl_mem Y, const int offY) { + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); - template - __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = alpha; - } - } - - template - void greentea_gpu_set(const int N, const Dtype alpha, Dtype* Y) { - if (alpha == 0) { - CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) - return; - } - // NOLINT_NEXT_LINE(whitespace/operators) - set_kernel<<>>( - N, alpha, Y); - } - - template void greentea_gpu_set(const int N, const int alpha, int* Y); - template void greentea_gpu_set(const int N, const float alpha, - float* Y); - template void greentea_gpu_set(const int N, const double alpha, - double* Y); - - template - __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] += alpha; - } - } - - template<> - void greentea_gpu_add_scalar(const int N, const float alpha, float* Y) { - // NOLINT_NEXT_LINE(whitespace/operators) - add_scalar_kernel<<>>( - N, alpha, Y); - } - - template<> - void greentea_gpu_add_scalar(const int N, const double alpha, double* Y) { - // NOLINT_NEXT_LINE(whitespace/operators) - add_scalar_kernel<<>>( - N, alpha, Y); - } - - template - __global__ void add_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = a[index] + b[index]; - } - } - - template<> - void greentea_gpu_add(const int N, const float* a, const float* b, - float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - add_kernel<<>>( - N, a, b, y); - } - - template<> - void greentea_gpu_add(const int N, const double* a, const double* b, - double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - add_kernel<<>>( - N, a, b, y); - } - - template - __global__ void sub_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = a[index] - b[index]; - } - } - - template<> - void greentea_gpu_sub(const int N, const float* a, const float* b, - float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - sub_kernel<<>>( - N, a, b, y); - } - - template<> - void greentea_gpu_sub(const int N, const double* a, const double* b, - double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - sub_kernel<<>>( - N, a, b, y); - } - - template - __global__ void mul_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = a[index] * b[index]; - } - } - - template - __global__ void div_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = a[index] / b[index]; - } - } - - template<> - void greentea_gpu_div(const int N, const float* a, const float* b, - float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - div_kernel<<>>( - N, a, b, y); - } - - template<> - void greentea_gpu_div(const int N, const double* a, const double* b, - double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - div_kernel<<>>( - N, a, b, y); - } - - template - __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = abs(a[index]); - } - } - - template<> - void greentea_gpu_abs(const int N, const float* a, float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - abs_kernel<<>>( - N, a, y); - } - - template<> - void greentea_gpu_abs(const int N, const double* a, double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - abs_kernel<<>>( - N, a, y); - } - - template - __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = exp(a[index]); - } - } - - template<> - void greentea_gpu_exp(const int N, const float* a, float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - exp_kernel<<>>( - N, a, y); - } - - template<> - void greentea_gpu_exp(const int N, const double* a, double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - exp_kernel<<>>( - N, a, y); - } - - template - __global__ void powx_kernel(const int n, const Dtype* a, - const Dtype alpha, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = pow(a[index], alpha); - } - } - - template<> - void greentea_gpu_powx(const int N, const float* a, const float alpha, - float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - powx_kernel<<>>( - N, a, alpha, y); - } - - template<> - void greentea_gpu_powx(const int N, const double* a, const double alpha, - double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - powx_kernel<<>>( - N, a, alpha, y); - } - - DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - - (x[index] < Dtype(0))); - DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); - - __global__ void popc_kernel(const int n, const float* a, const float* b, - uint8_t* y) { - CUDA_KERNEL_LOOP(index, n) - { - y[index] = __popc( - static_cast(a[index]) ^ static_cast(b[index])); - } - } - - __global__ void popcll_kernel(const int n, const double* a, const double* b, - uint8_t* y) { - CUDA_KERNEL_LOOP(index, n) - { - y[index] = __popcll( - static_cast(a[index]) ^ static_cast(b[index])); - } - } - - template<> - uint32_t greentea_gpu_hamming_distance(const int n, const float* x, - const float* y) { - // TODO: Fix caffe_gpu_hamming_distance (see failing unit test - // TestHammingDistanceGPU in test_math_functions.cpp). - NOT_IMPLEMENTED; - thrust::device_vector popcounts(n); - // NOLINT_NEXT_LINE(whitespace/operators) - popc_kernel<<>>( - n, x, y, thrust::raw_pointer_cast(popcounts.data())); - return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, - thrust::plus()); - } - - template<> - uint32_t greentea_gpu_hamming_distance(const int n, const double* x, - const double* y) { - // TODO: Fix caffe_gpu_hamming_distance (see failing unit test - // TestHammingDistanceGPU in test_math_functions.cpp). - NOT_IMPLEMENTED; - thrust::device_vector popcounts(n); - // NOLINT_NEXT_LINE(whitespace/operators) - popcll_kernel<<>>( - n, x, y, thrust::raw_pointer_cast(popcounts.data())); - return thrust::reduce(popcounts.begin(), popcounts.end(), - (uint32_t) 0, - thrust::plus()); - } - - void greentea_gpu_rng_uniform(const int n, unsigned int* r) { - CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); - } - - template<> - void greentea_gpu_rng_uniform(const int n, const float a, const float b, - float* r) { - CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); - const float range = b - a; - if (range != static_cast(1)) { - greentea_gpu_scal(n, range, r); - } - if (a != static_cast(0)) { - greentea_gpu_add_scalar(n, a, r); - } - } - - template<> - void greentea_gpu_rng_uniform(const int n, const double a, - const double b, double* r) { - CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); - const double range = b - a; - if (range != static_cast(1)) { - greentea_gpu_scal(n, range, r); - } - if (a != static_cast(0)) { - greentea_gpu_add_scalar(n, a, r); - } - } - - template<> - void greentea_gpu_rng_gaussian(const int n, const float mu, const float sigma, - float* r) { - CURAND_CHECK(curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); - } - - template<> - void greentea_gpu_rng_gaussian(const int n, const double mu, const double sigma, - double* r) { - CURAND_CHECK( - curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); - } - */ + clEnqueueFillBuffer(queue, Y, &alpha, sizeof(Dtype), offY, N, 0, NULL, NULL); +} + +template void greentea_gpu_set(const int ctx_id, const int N, + const int alpha, cl_mem Y, const int offY); +template void greentea_gpu_set(const int ctx_id, const int N, + const float alpha, cl_mem Y, + const int offY); +template void greentea_gpu_set(const int ctx_id, const int N, + const double alpha, cl_mem Y, + const int offY); + +template +void greentea_gpu_add_scalar(const int ctx_id, const int N, const Dtype alpha, + cl_mem Y, const int offY) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_add_scalar = program.get_kernel( + CL_KERNEL_SELECT("add_scalar")); + viennacl::ocl::enqueue(oclk_add_scalar(N, alpha, WrapHandle(Y, ctx), offY), + ctx.get_queue()); +} + +template void greentea_gpu_add_scalar(const int ctx_id, const int N, + const float alpha, cl_mem Y, + const int offY); +template void greentea_gpu_add_scalar(const int ctx_id, const int N, + const double alpha, cl_mem Y, + const int offY); + +template +void greentea_gpu_add(const int ctx_id, const int n, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_add = program.get_kernel(CL_KERNEL_SELECT("add")); + viennacl::ocl::enqueue( + oclk_add(n, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, + WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_add(const int ctx_id, const int n, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); +template void greentea_gpu_add(const int ctx_id, const int n, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); + +template +void greentea_gpu_sub(const int ctx_id, const int n, const cl_mem a, + const int offa, const cl_mem b, const int offb, cl_mem y, + const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_sub = program.get_kernel(CL_KERNEL_SELECT("sub")); + viennacl::ocl::enqueue( + oclk_sub(n, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, + WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_sub(const int ctx_id, const int n, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); +template void greentea_gpu_sub(const int ctx_id, const int n, + const cl_mem a, const int offa, + const cl_mem b, const int offb, cl_mem y, + const int offy); + +template +void greentea_gpu_abs(const int ctx_id, const int N, const cl_mem a, + const int offa, cl_mem y, const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_abs = program.get_kernel(CL_KERNEL_SELECT("abs")); + viennacl::ocl::enqueue( + oclk_abs(N, WrapHandle(a, ctx), offa, WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_abs(const int ctx_id, const int N, + const cl_mem a, const int offa, cl_mem y, + const int offy); +template void greentea_gpu_abs(const int ctx_id, const int N, + const cl_mem a, const int offa, cl_mem y, + const int offy); + +template +void greentea_gpu_exp(const int ctx_id, const int N, const cl_mem a, + const int offa, cl_mem y, const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_exp = program.get_kernel(CL_KERNEL_SELECT("exp")); + viennacl::ocl::enqueue( + oclk_exp(N, WrapHandle(a, ctx), offa, WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_exp(const int ctx_id, const int N, + const cl_mem a, const int offa, cl_mem y, + const int offy); +template void greentea_gpu_exp(const int ctx_id, const int N, + const cl_mem a, const int offa, cl_mem y, + const int offy); + +template +void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, + const int offa, const Dtype alpha, cl_mem y, + const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_powx = program.get_kernel( + CL_KERNEL_SELECT("powx")); + viennacl::ocl::enqueue( + oclk_powx(N, WrapHandle(a, ctx), offa, alpha, WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_powx(const int ctx_id, const int N, + const cl_mem a, const int offa, + const float alpha, cl_mem y, + const int offy); +template void greentea_gpu_powx(const int ctx_id, const int N, + const cl_mem a, const int offa, + const double alpha, cl_mem y, + const int offy); + +template +void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_sign = program.get_kernel(CL_KERNEL_SELECT("sign")); + viennacl::ocl::enqueue( + oclk_sign(n, WrapHandle(x, ctx), offx, WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy); +template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy); + +template +void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_sgnbit = program.get_kernel(CL_KERNEL_SELECT("sgnbit")); + viennacl::ocl::enqueue( + oclk_sgnbit(n, WrapHandle(x, ctx), offx, WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy); +template void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, + cl_mem y, const int offy); + +void greentea_gpu_rng_uniform(const int n, unsigned int* r) { + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); +} + +template +void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, + const Dtype b, cl_mem r, const int offr) { + + struct timeval start_time; + gettimeofday(&start_time, NULL); +#ifdef __APPLE__ + std::seed_seq seq {(int)(start_time.tv_sec), (int)(start_time.tv_usec)}; +#else + std::seed_seq seq { start_time.tv_sec, start_time.tv_usec }; +#endif + std::mt19937_64 generator(seq); + std::uniform_real_distribution distribution(a, b); + std::function rndfunc = std::bind(distribution, generator); + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + + std::vector random(n); + + for (int i = 0; i < n; ++i) { + random[i] = rndfunc(); + } + + greentea_gpu_memcpy(n, &random[0], r, offr, ctx); +} + +template void greentea_gpu_rng_uniform(const int ctx_id, const int n, + const float a, const float b, + cl_mem r, const int offr); +template void greentea_gpu_rng_uniform(const int ctx_id, const int n, + const double a, const double b, + cl_mem r, const int offr); + +template +void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, + const Dtype sigma, cl_mem r, const int offr) { + + struct timeval start_time; + gettimeofday(&start_time, NULL); +#ifdef __APPLE__ + std::seed_seq seq {(int)(start_time.tv_sec), (int)(start_time.tv_usec)}; +#else + std::seed_seq seq { start_time.tv_sec, start_time.tv_usec }; +#endif + std::mt19937_64 generator(seq); + std::normal_distribution distribution(mu, sigma); + std::function rndfunc = std::bind(distribution, generator); + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + + std::vector random(n); + + for (int i = 0; i < n; ++i) { + random[i] = rndfunc(); + } + + greentea_gpu_memcpy(n, &random[0], r, offr, ctx); +} + +template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, + const float mu, + const float sigma, cl_mem r, + const int offr); + +template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, + const double mu, + const double sigma, cl_mem r, + const int offr); } #endif diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index ee81e9a6abb..35d6548fbc1 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -180,13 +180,6 @@ template void PoolingSKLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); -#endif - const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); @@ -256,6 +249,11 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, #endif // USE_CUDA } else { #ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { @@ -284,8 +282,10 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, } break; case PoolingParameter_PoolMethod_AVE: + // TODO break; case PoolingParameter_PoolMethod_STOCHASTIC: + // TODO break; default: LOG(FATAL)<< "Unknown pooling method."; diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 5fbdc621c00..c7d63d7954a 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -54,7 +54,7 @@ inline void SyncedMemory::to_cpu() { device_context_.id()); // On the CPU, memory is shared (and no copy needed) if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { - greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, cpu_ptr_, ctx); + greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, 0, cpu_ptr_, ctx); } #endif } @@ -133,7 +133,7 @@ inline void SyncedMemory::to_gpu() { } // On the CPU, memory is shared (and no copy needed) if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { - greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, ctx); + greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, ctx); } #endif } From a56e883ac8e3ee573cb4709bc46a6939266081a9 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 15 May 2015 13:45:39 +0200 Subject: [PATCH 028/254] Small corrections to match bvlc::master. --- include/caffe/common.hpp | 1 - include/caffe/syncedmem.hpp | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 3184080db71..3a0e114b195 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -68,7 +68,6 @@ private:\ #define NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented Yet" // See PR #1236 -namespace cv {class Mat;} namespace caffe { diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 68fda2ca626..160391d9cfb 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -12,12 +12,12 @@ namespace caffe { // Theoretically, CaffeMallocHost and CaffeFreeHost should simply call the // cudaMallocHost and cudaFree functions in order to create pinned memory. -// However, those codes rely on the existence of a cuda GPU (I don't know +// However, those codes rely on the existence of a CUDA GPU (I don't know // why that is a must since allocating memory should not be accessing the -// GPU resorce, but it just creates an error as of Cuda 5.0) and will cause +// GPU resource, but it just creates an error as of CUDA 5.0) and will cause // problem when running on a machine without GPU. Thus, we simply define // these two functions for safety and possible future change if the problem -// of calling cuda functions disappears in a future version. +// of calling CUDA functions disappears in a future version. // // In practice, although we are creating unpinned memory here, as long as we // are constantly accessing them the memory pages almost always stays in From 6527e5856d2cac93a8837c983336e68346dfba18 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 16 May 2015 23:55:43 +0200 Subject: [PATCH 029/254] Fixed broken kernels, fixed solver numbers. --- src/caffe/greentea/cl_kernels.cpp | 4 ++-- src/caffe/greentea/cl_kernels/math.cl | 6 +++--- src/caffe/solver.cpp | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index b0cc1bf60d4..51706ac4500 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -11,7 +11,7 @@ std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\ std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = abs(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (Dtype(0) < x[index + offx])\n - (x[index + offx] < Dtype(0));\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; @@ -19,7 +19,7 @@ std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = abs(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (Dtype(0) < x[index + offx])\n - (x[index + offx] < Dtype(0));\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index 85c7ca5eb65..83cd873b8f7 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -52,7 +52,7 @@ __kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, const int offa, __global Dtype* y, const int offy) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - y[offy + index] = abs(a[offa + index]); + y[offy + index] = fabs((Dtype)(a[offa + index])); } } @@ -77,8 +77,8 @@ __kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, const int offx, __global Dtype* y, const int offy) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - y[index + offy] = (Dtype(0) < x[index + offx]) - - (x[index + offx] < Dtype(0)); + y[index + offy] = (0.0 < x[index + offx]) + - (x[index + offx] < 0.0); } } diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 4157daee80c..56bd6e39c2c 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -216,7 +216,7 @@ void Solver::StepPrefilled() { net_->Update(); // Save a snapshot if needed. - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { Snapshot(); } } From a1bb0d132054fb8cc8e7839065d3001e50ffd48a Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 17 May 2015 02:33:42 +0200 Subject: [PATCH 030/254] Finished Blob for OpenCL. --- src/caffe/blob.cpp | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 0a1435659f8..40b9ab31177 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -34,7 +34,7 @@ void Blob::Reshape(const vector& shape, device_context_ = device_context; for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); - CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; + CHECK_LE(shape[i], INT_MAX / count_)<< "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; } @@ -178,8 +178,9 @@ void Blob::Update() { static_cast(data_->mutable_gpu_data())); } else { #ifdef USE_GREENTEA - // TODO - //greentea_gpu_axpy(count_, Dtype(-1), (cl_mem)(diff_->gpu_data()), (cl_mem)(data_->mutable_gpu_data())); + greentea_gpu_axpy(device_context_.id(), count_, Dtype(-1), + (cl_mem) (diff_->gpu_data()), 0, + (cl_mem) (data_->mutable_gpu_data()), 0); #endif } #else @@ -224,9 +225,10 @@ Dtype Blob::asum_data() const { return asum; } else { #ifdef USE_GREENTEA - // TODO - //greentea_gpu_asum(count_, (cl_mem) gpu_data(), &asum); - return 0; + Dtype asum; + greentea_gpu_asum(device_context_.id(), count_, (cl_mem) gpu_data(), 0, + &asum); + return asum; #endif } #else @@ -268,9 +270,10 @@ Dtype Blob::asum_diff() const { return asum; } else { #ifdef USE_GREENTEA - // TODO - //greentea_gpu_asum(count_, gpu_diff(), &asum); - return 0; + Dtype asum; + greentea_gpu_asum(device_context_.id(), count_, (cl_mem) gpu_diff(), 0, + &asum); + return asum; #endif } #else @@ -315,8 +318,8 @@ Dtype Blob::sumsq_data() const { caffe_gpu_dot(count_, data, data, &sumsq); } else { #ifdef USE_GREENTEA - // TODO - //greentea_gpu_dot(count_, data, data, &sumsq); + greentea_gpu_dot(device_context_.id(), count_, data, 0, data, 0, + &sumsq); #endif } #else @@ -363,8 +366,8 @@ Dtype Blob::sumsq_diff() const { caffe_gpu_dot(count_, diff, diff, &sumsq); } else { #ifdef USE_GREENTEA - // TODO - // greentea_gpu_dot(count_, diff, diff, &sumsq); + greentea_gpu_dot(device_context_.id(), count_, diff, 0, diff, 0, + &sumsq); #endif } break; @@ -408,8 +411,7 @@ void Blob::scale_data(Dtype scale_factor) { caffe_gpu_scal(count_, scale_factor, data); } else { #ifdef USE_GREENTEA - // TODO - //greentea_gpu_scal(count_, scale_factor, data); + greentea_gpu_scal(device_context_.id(), count_, scale_factor, data, 0); #endif } return; @@ -452,8 +454,7 @@ void Blob::scale_diff(Dtype scale_factor) { caffe_gpu_scal(count_, scale_factor, diff); } else { #ifdef USE_GREENTEA - // TODO - //greentea_gpu_scal(count_, scale_factor, diff); + greentea_gpu_scal(device_context_.id(), count_, scale_factor, diff, 0); #endif } return; From 917ff6b0aececb70fb57bbe14cf2fa073e184571 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 17 May 2015 14:10:12 +0200 Subject: [PATCH 031/254] Backward computation for Conv-SK in OpenCL done. --- include/caffe/greentea/autotuner.hpp | 15 - include/caffe/greentea/greentea_im2col.hpp | 10 + src/caffe/blob.cpp | 8 +- src/caffe/greentea/autotuner.cpp | 12 - src/caffe/greentea/cl_kernels.cpp | 8 +- src/caffe/greentea/cl_kernels/im2col.cl | 4 + src/caffe/greentea/cl_kernels/im2col_sk.cl | 74 ++++- src/caffe/greentea/greentea_im2col.cpp | 66 +++- .../greentea/greentea_math_functions.cpp | 32 +- src/caffe/layers/conv_sk_layer.cu | 289 +++++++++++------- 10 files changed, 357 insertions(+), 161 deletions(-) delete mode 100644 include/caffe/greentea/autotuner.hpp delete mode 100644 src/caffe/greentea/autotuner.cpp create mode 100644 src/caffe/greentea/cl_kernels/im2col.cl diff --git a/include/caffe/greentea/autotuner.hpp b/include/caffe/greentea/autotuner.hpp deleted file mode 100644 index 1e1c3263ef8..00000000000 --- a/include/caffe/greentea/autotuner.hpp +++ /dev/null @@ -1,15 +0,0 @@ -/* - * autotuner.hpp - * - * Created on: Apr 28, 2015 - * Author: fabian - */ - -#ifndef GREENTEA_AUTOTUNER_HPP_ -#define GREENTEA_AUTOTUNER_HPP_ - - - - - -#endif /* GREENTEA_AUTOTUNER_HPP_ */ diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index a314e4c04cc..d4403bf7242 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -30,6 +30,16 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, const int kstride_h, const int kstride_w, cl_mem data_col); +template +void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, const cl_mem data_col, + const int channels, const int height, + const int width, const int patch_h, + const int patch_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + cl_mem data_im, const int data_offset); + /*template void im2col_gpu(const Dtype* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 40b9ab31177..b985e3c4bd4 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -318,7 +318,7 @@ Dtype Blob::sumsq_data() const { caffe_gpu_dot(count_, data, data, &sumsq); } else { #ifdef USE_GREENTEA - greentea_gpu_dot(device_context_.id(), count_, data, 0, data, 0, + greentea_gpu_dot(device_context_.id(), count_, (cl_mem)data, 0, (cl_mem)data, 0, &sumsq); #endif } @@ -366,7 +366,7 @@ Dtype Blob::sumsq_diff() const { caffe_gpu_dot(count_, diff, diff, &sumsq); } else { #ifdef USE_GREENTEA - greentea_gpu_dot(device_context_.id(), count_, diff, 0, diff, 0, + greentea_gpu_dot(device_context_.id(), count_, (cl_mem)diff, 0, (cl_mem)diff, 0, &sumsq); #endif } @@ -411,7 +411,7 @@ void Blob::scale_data(Dtype scale_factor) { caffe_gpu_scal(count_, scale_factor, data); } else { #ifdef USE_GREENTEA - greentea_gpu_scal(device_context_.id(), count_, scale_factor, data, 0); + greentea_gpu_scal(device_context_.id(), count_, scale_factor, (cl_mem)data, 0); #endif } return; @@ -454,7 +454,7 @@ void Blob::scale_diff(Dtype scale_factor) { caffe_gpu_scal(count_, scale_factor, diff); } else { #ifdef USE_GREENTEA - greentea_gpu_scal(device_context_.id(), count_, scale_factor, diff, 0); + greentea_gpu_scal(device_context_.id(), count_, scale_factor, (cl_mem)diff, 0); #endif } return; diff --git a/src/caffe/greentea/autotuner.cpp b/src/caffe/greentea/autotuner.cpp deleted file mode 100644 index 9683f10980a..00000000000 --- a/src/caffe/greentea/autotuner.cpp +++ /dev/null @@ -1,12 +0,0 @@ -/* - * autotuner.cpp - * - * Created on: Apr 28, 2015 - * Author: Fabian Tschopp - */ - - - - - - diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 51706ac4500..cb9a09ce23c 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -10,7 +10,8 @@ std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; -std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; +std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; @@ -18,7 +19,8 @@ std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.c std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; -std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}"; +std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; +std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; @@ -30,6 +32,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << auxiliary_float << "\n\n"; ss << channel_float << "\n\n"; ss << convolution_sk_float << "\n\n"; + ss << im2col_float << "\n\n"; ss << im2col_sk_float << "\n\n"; ss << math_float << "\n\n"; ss << pooling_sk_float << "\n\n"; @@ -42,6 +45,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << auxiliary_double << "\n\n"; ss << channel_double << "\n\n"; ss << convolution_sk_double << "\n\n"; + ss << im2col_double << "\n\n"; ss << im2col_sk_double << "\n\n"; ss << math_double << "\n\n"; ss << pooling_sk_double << "\n\n"; diff --git a/src/caffe/greentea/cl_kernels/im2col.cl b/src/caffe/greentea/cl_kernels/im2col.cl new file mode 100644 index 00000000000..0ed82aa79be --- /dev/null +++ b/src/caffe/greentea/cl_kernels/im2col.cl @@ -0,0 +1,4 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + diff --git a/src/caffe/greentea/cl_kernels/im2col_sk.cl b/src/caffe/greentea/cl_kernels/im2col_sk.cl index 776223c1953..40bfa568fbf 100644 --- a/src/caffe/greentea/cl_kernels/im2col_sk.cl +++ b/src/caffe/greentea/cl_kernels/im2col_sk.cl @@ -2,16 +2,19 @@ #include "header.cl" #endif -__kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const Dtype* data_im, - const int data_offset, const int height, - const int width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, const int height_col, - const int width_col, - __global Dtype* data_col) { +__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, + __global const Dtype* data_im, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_col) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { int w_out = index % width_col; @@ -38,3 +41,54 @@ __kernel void TEMPLATE(im2col_sk_gpu_kernel,Dtype)(const int n, __global const D } } + +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col, + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, + const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_im, const int data_offset) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; + } + } + + data_im[data_offset + index] = val; + } +} diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 2db85578cac..5c9143d3796 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -33,9 +33,9 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, CL_KERNEL_SELECT("im2col_sk_gpu_kernel")); viennacl::ocl::enqueue( - kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, kernel_h, - kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, - stride_w, kstride_h, kstride_w, height_col, width_col, + kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, + kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, + stride_h, stride_w, kstride_h, kstride_w, height_col, width_col, WrapHandle(data_col, ctx)), ctx.get_queue()); @@ -72,5 +72,65 @@ template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, const int kstride_w, cl_mem data_col); +template +void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, const cl_mem data_col, + const int channels, const int height, + const int width, const int patch_h, + const int patch_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + cl_mem data_im, const int data_offset) { + + if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) { + LOG(FATAL)<<"stride greater than 1 or pad greater than 0 not tested in col2im_sk_gpu()."; + } + + int ext_patch_h = (patch_h - 1) * kstride_h + 1; + int ext_patch_w = (patch_w - 1) * kstride_w + 1; + int height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; + int num_kernels = channels * height * width; + + viennacl::ocl::kernel &kernel = prog.get_kernel( + CL_KERNEL_SELECT("col2im_sk")); + + viennacl::ocl::enqueue( + kernel(num_kernels, num_kernels, WrapHandle(data_col,ctx), height, width, channels, + patch_h, patch_w, ext_patch_h, ext_patch_w, + pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, + height_col, width_col, WrapHandle(data_im,ctx), data_offset), + ctx.get_queue()); + +} + +template void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_col, + const int channels, + const int height, const int width, + const int patch_h, + const int patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, cl_mem data_im, + const int data_offset); + +template void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_col, + const int channels, + const int height, const int width, + const int patch_h, + const int patch_w, const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + cl_mem data_im, + const int data_offset); + } #endif diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 0fa26b6a973..d84e0d8eb3b 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -670,39 +670,45 @@ template void greentea_gpu_powx(const int ctx_id, const int N, template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy) { + cl_mem y, const int offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); - viennacl::ocl::kernel &oclk_sign = program.get_kernel(CL_KERNEL_SELECT("sign")); + viennacl::ocl::kernel &oclk_sign = program.get_kernel( + CL_KERNEL_SELECT("sign")); viennacl::ocl::enqueue( oclk_sign(n, WrapHandle(x, ctx), offx, WrapHandle(y, ctx), offy), ctx.get_queue()); } -template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy); -template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy); +template void greentea_gpu_sign(const int ctx_id, const int n, + const cl_mem x, int offx, cl_mem y, + const int offy); +template void greentea_gpu_sign(const int ctx_id, const int n, + const cl_mem x, int offx, cl_mem y, + const int offy); template -void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy) { +void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, + int offx, cl_mem y, const int offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); - viennacl::ocl::kernel &oclk_sgnbit = program.get_kernel(CL_KERNEL_SELECT("sgnbit")); + viennacl::ocl::kernel &oclk_sgnbit = program.get_kernel( + CL_KERNEL_SELECT("sgnbit")); viennacl::ocl::enqueue( oclk_sgnbit(n, WrapHandle(x, ctx), offx, WrapHandle(y, ctx), offy), ctx.get_queue()); } -template void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy); -template void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy); +template void greentea_gpu_sgnbit(const int ctx_id, const int n, + const cl_mem x, int offx, cl_mem y, + const int offy); +template void greentea_gpu_sgnbit(const int ctx_id, const int n, + const cl_mem x, int offx, cl_mem y, + const int offy); void greentea_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 42972ebdef2..cc0e1f14aa2 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -19,6 +19,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA // CUDA backend code for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); @@ -50,6 +51,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, } } } +#endif } else { // GreenTea backend code #ifdef USE_GREENTEA @@ -62,9 +64,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { - // Cheating, for now - //if (kstride_h_ != 23) { - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); @@ -133,49 +132,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, ctx.get_queue().finish(); } } - /*} else { - - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); - cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); - const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); - - viennacl::ocl::kernel &oclk_ip4 = program.get_kernel( - CL_KERNEL_SELECT("convolution_ip4v3")); - - LOG(INFO)<< ctx.devices()[0].max_work_group_size(); - - LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[0]; - LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[1]; - LOG(INFO)<< ctx.devices()[0].max_work_item_sizes()[2]; - LOG(INFO)<< ctx.devices()[0].preferred_vector_width_float(); - - oclk_ip4.global_work_size(0, 16); - oclk_ip4.global_work_size(1, 16); - oclk_ip4.global_work_size(2, 128); - oclk_ip4.local_work_size(0, 16); - oclk_ip4.local_work_size(1, 16); - oclk_ip4.local_work_size(2, 1); - - viennacl::ocl::enqueue( - oclk_ip4(WrapHandle(weight, ctx), WrapHandle(bottom_data, ctx), - WrapHandle(top_data, ctx)), - ctx.get_queue()); - - ctx.get_queue().finish(); - - for (int n = 0; n < num_; ++n) { - // Third, add bias - if (bias_term_) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, num_output_, N_, 1, - (Dtype) 1., - (cl_mem) (this->blobs_[i]->gpu_data()), 0, - (cl_mem) (bias_multiplier_.gpu_data()), 0, - (Dtype) 1., top_data, top[i]->offset(n)); - ctx.get_queue().finish(); - } - } - }*/ } std::cout << "CONV GREENTEA END" << std::endl; @@ -187,72 +143,201 @@ template void ConvolutionSKLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - const Dtype* weight = NULL; - Dtype* weight_diff = NULL; - if (this->param_propagate_down_[0]) { - weight = this->blobs_[0]->gpu_data(); - weight_diff = this->blobs_[0]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - Dtype* bias_diff = NULL; - if (bias_term_ && this->param_propagate_down_[1]) { - bias_diff = this->blobs_[1]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); - } - const int weight_offset = M_ * K_; - const int col_offset = K_ * N_; - const int top_offset = M_ * N_; - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = NULL; - // Bias gradient, if necessary. + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + const Dtype* weight = NULL; + Dtype* weight_diff = NULL; + if (this->param_propagate_down_[0]) { + weight = this->blobs_[0]->gpu_data(); + weight_diff = this->blobs_[0]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + Dtype* bias_diff = NULL; if (bias_term_ && this->param_propagate_down_[1]) { - top_diff = top[i]->gpu_diff(); - for (int n = 0; n < num_; ++n) { - caffe_gpu_gemv(CblasNoTrans, num_output_, N_, 1., - top_diff + top[0]->offset(n), - bias_multiplier_.gpu_data(), 1., bias_diff); - } + bias_diff = this->blobs_[1]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } - if (this->param_propagate_down_[0] || propagate_down[i]) { - if (!top_diff) { + const int weight_offset = M_ * K_; + const int col_offset = K_ * N_; + const int top_offset = M_ * N_; + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = NULL; + // Bias gradient, if necessary. + if (bias_term_ && this->param_propagate_down_[1]) { top_diff = top[i]->gpu_diff(); + for (int n = 0; n < num_; ++n) { + caffe_gpu_gemv(CblasNoTrans, num_output_, N_, 1., + top_diff + top[0]->offset(n), + bias_multiplier_.gpu_data(), 1., bias_diff); + } } - Dtype* col_data = col_buffer_.mutable_gpu_data(); - Dtype* col_diff = col_buffer_.mutable_gpu_diff(); - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - for (int n = 0; n < num_; ++n) { - // Since we saved memory in the forward pass by not storing all col - // data, we will need to recompute them. - im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, - stride_w_, kstride_h_, kstride_w_, col_data); - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, K_, N_, - (Dtype) 1., - top_diff + top[i]->offset(n) + top_offset * g, - col_data + col_offset * g, (Dtype) 1., - weight_diff + weight_offset * g); + if (this->param_propagate_down_[0] || propagate_down[i]) { + if (!top_diff) { + top_diff = top[i]->gpu_diff(); + } + Dtype* col_data = col_buffer_.mutable_gpu_data(); + Dtype* col_diff = col_buffer_.mutable_gpu_diff(); + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + for (int n = 0; n < num_; ++n) { + // Since we saved memory in the forward pass by not storing all col + // data, we will need to recompute them. + im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, + stride_w_, kstride_h_, kstride_w_, col_data); + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm( + CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., + top_diff + top[i]->offset(n) + top_offset * g, + col_data + col_offset * g, (Dtype) 1., + weight_diff + weight_offset * g); + } + } + // gradient w.r.t. bottom data, if necessary + if (propagate_down[i]) { + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm( + CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., + weight + weight_offset * g, + top_diff + top[i]->offset(n) + top_offset * g, (Dtype) 0., + col_diff + col_offset * g); + } + // col2im back to the data + col2im_sk_gpu(col_diff, channels_, height_, width_, kernel_h_, + kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + kstride_h_, kstride_w_, + bottom_diff + bottom[i]->offset(n)); + } + } + } + } +#endif + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + const Dtype* weight_cpu = NULL; + Dtype* weight_diff_cpu = NULL; + cl_mem weight = NULL; + cl_mem weight_diff = NULL; + + if (this->param_propagate_down_[0]) { + weight = (cl_mem) (this->blobs_[0]->gpu_data()); + weight_diff = (cl_mem) (this->blobs_[0]->mutable_gpu_diff()); + greentea_gpu_set(this->device_context_.id(), this->blobs_[0]->count(), + Dtype(0), weight_diff, 0); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + weight_cpu = this->blobs_[0]->cpu_data(); + weight_diff_cpu = this->blobs_[0]->mutable_cpu_diff(); + } + } + + cl_mem bias_diff = NULL; + + if (bias_term_ && this->param_propagate_down_[1]) { + bias_diff = (cl_mem) (this->blobs_[1]->mutable_gpu_diff()); + greentea_gpu_set(this->device_context_.id(), this->blobs_[1]->count(), + Dtype(0), bias_diff, 0); + } + const int weight_offset = M_ * K_; + const int col_offset = K_ * N_; + const int top_offset = M_ * N_; + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff_cpu = NULL; + cl_mem top_diff = NULL; + // Bias gradient, if necessary. + if (bias_term_ && this->param_propagate_down_[1]) { + top_diff = (cl_mem) (top[i]->gpu_diff()); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + top_diff_cpu = top[i]->cpu_diff(); + } + for (int n = 0; n < num_; ++n) { + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + num_output_, N_, (Dtype) 1., top_diff, + top[0]->offset(n), + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., bias_diff, 0); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + if (!top_diff) { + top_diff = (cl_mem) (top[i]->gpu_diff()); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + top_diff_cpu = top[i]->cpu_diff(); } } - // gradient w.r.t. bottom data, if necessary - if (propagate_down[i]) { - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasTrans, CblasNoTrans, K_, N_, M_, - (Dtype) 1., weight + weight_offset * g, - top_diff + top[i]->offset(n) + top_offset * g, - (Dtype) 0., col_diff + col_offset * g); + cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); + cl_mem col_diff = (cl_mem) (col_buffer_.mutable_gpu_diff()); + const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); + cl_mem bottom_diff = (cl_mem) (bottom[i]->mutable_gpu_diff()); + + Dtype* col_data_cpu = NULL; + Dtype* col_diff_cpu = NULL; + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + col_data_cpu = col_buffer_.mutable_cpu_data(); + col_diff_cpu = col_buffer_.mutable_cpu_diff(); + } + + for (int n = 0; n < num_; ++n) { + // Since we saved memory in the forward pass by not storing all col + // data, we will need to recompute them. + greentea_im2col_sk_gpu(program, ctx, bottom_data, + bottom[i]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, kstride_h_, kstride_w_, + col_data); + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + for (int g = 0; g < group_; ++g) { + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + caffe_cpu_gemm( + CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., + top_diff_cpu + top[i]->offset(n) + top_offset * g, + col_data_cpu + col_offset * g, (Dtype) 1., + weight_diff_cpu + weight_offset * g); + } else { + greentea_gpu_gemm(this->device_context_.id(), + CblasNoTrans, CblasTrans, M_, K_, N_, + (Dtype) 1., top_diff, + top[i]->offset(n) + top_offset * g, + col_data, col_offset * g, (Dtype) 1., + weight_diff, weight_offset * g); + } + } + } + // gradient w.r.t. bottom data, if necessary + if (propagate_down[i]) { + for (int g = 0; g < group_; ++g) { + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + caffe_cpu_gemm( + CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., + weight_cpu + weight_offset * g, + top_diff_cpu + top[i]->offset(n) + top_offset * g, + (Dtype) 0., col_diff_cpu + col_offset * g); + } else { + greentea_gpu_gemm(this->device_context_.id(), CblasTrans, + CblasNoTrans, K_, N_, M_, (Dtype) 1., + weight, weight_offset * g, top_diff, + top[i]->offset(n) + top_offset * g, + (Dtype) 0., col_diff, col_offset * g); + } + } + // col2im back to the data + greentea_col2im_sk_gpu(program, ctx, col_diff, channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, kstride_h_, kstride_w_, + bottom_diff, bottom[i]->offset(n)); } - // col2im back to the data - col2im_sk_gpu(col_diff, channels_, height_, width_, kernel_h_, - kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, - kstride_h_, kstride_w_, - bottom_diff + bottom[i]->offset(n)); } } } +#endif } } From afca55859d13458334d24dafb033976db1aa0161 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 18 May 2015 00:59:03 +0200 Subject: [PATCH 032/254] Training fixed for SGD-SK networks. --- include/caffe/blob.hpp | 7 + include/caffe/common.hpp | 2 +- .../greentea/greentea_math_functions.hpp | 4 + include/caffe/layer.hpp | 38 +- include/caffe/net.hpp | 54 +- include/caffe/solver.hpp | 1 + src/caffe/blob.cpp | 33 +- src/caffe/common.cpp | 57 +- src/caffe/greentea/cl_kernels.cpp | 12 +- src/caffe/greentea/cl_kernels.sh | 2 +- src/caffe/greentea/cl_kernels/activation.cl | 17 +- src/caffe/greentea/cl_kernels/pooling_sk.cl | 103 ++- src/caffe/greentea/cl_kernels/softmax_loss.cl | 50 +- src/caffe/greentea/greentea_im2col.cpp | 9 +- .../greentea/greentea_math_functions.cpp | 13 +- src/caffe/layers/conv_sk_layer.cu | 9 - src/caffe/layers/pooling_sk_layer.cu | 42 +- src/caffe/layers/relu_layer.cu | 39 +- src/caffe/layers/softmax_layer.cu | 218 ++--- src/caffe/layers/softmax_loss_layer.cu | 131 ++- src/caffe/solver.cpp | 768 +++++++++--------- 21 files changed, 983 insertions(+), 626 deletions(-) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index cc116b72f28..50d4e73f097 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -8,7 +8,14 @@ #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/syncedmem.hpp" + +#ifdef USE_CUDA #include "caffe/util/math_functions.hpp" +#endif + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea_math_functions.hpp" +#endif const int kMaxBlobAxes = INT_MAX; diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 3184080db71..3bdf10bc1e7 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -185,7 +185,7 @@ class Caffe { #ifdef USE_GREENTEA vector ocl_programs_; viennacl::ocl::program default_ocl_program_; -#endif +#endif // USE_GREENTEA private: // The private constructor to avoid duplicate instantiation. diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 7ec9c081283..f0b96c907cd 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -30,6 +30,10 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, const int offY, viennacl::ocl::context &ctx); +void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, + cl_mem Y, const int offY, + viennacl::ocl::context &ctx); + template void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 02a09c632be..c6041264af5 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -458,17 +458,35 @@ inline Dtype Layer::Forward(const vector*>& bottom, case Caffe::GPU: Forward_gpu(bottom, top); #ifndef CPU_ONLY - for (int top_id = 0; top_id < top.size(); ++top_id) { - if (!this->loss(top_id)) { - continue; + if (device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int top_id = 0; top_id < top.size(); ++top_id) { + if (!this->loss(top_id)) { + continue; + } + const int count = top[top_id]->count(); + const Dtype* data = top[top_id]->gpu_data(); + const Dtype* loss_weights = top[top_id]->gpu_diff(); + Dtype blob_loss = 0; + caffe_gpu_dot(count, data, loss_weights, &blob_loss); + loss += blob_loss; } - const int count = top[top_id]->count(); - const Dtype* data = top[top_id]->gpu_data(); - const Dtype* loss_weights = top[top_id]->gpu_diff(); - Dtype blob_loss = 0; - // TODO: Greentea backend here - caffe_gpu_dot(count, data, loss_weights, &blob_loss); - loss += blob_loss; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + for (int top_id = 0; top_id < top.size(); ++top_id) { + if (!this->loss(top_id)) { + continue; + } + const int count = top[top_id]->count(); + cl_mem data = (cl_mem)(top[top_id]->gpu_data()); + cl_mem loss_weights = (cl_mem)(top[top_id]->gpu_diff()); + Dtype blob_loss = 0; + greentea_gpu_dot(this->device_context_.id(), count, data, 0, + loss_weights, 0, &blob_loss); + loss += blob_loss; + } +#endif // USE_GREENTEA } #endif break; diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 5665df1edf2..daa60fd519b 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -20,12 +20,13 @@ namespace caffe { * * TODO(dox): more thorough description. */ -template +template class Net { public: explicit Net(const NetParameter& param); explicit Net(const string& param_file, Phase phase); - virtual ~Net() {} + virtual ~Net() { + } /// @brief Initialize a network with a NetParameter. void Init(const NetParameter& param); @@ -49,8 +50,8 @@ class Net { Dtype ForwardFrom(int start); Dtype ForwardTo(int end); /// @brief Run forward using a set of bottom blobs, and return the result. - const vector*>& Forward(const vector* > & bottom, - Dtype* loss = NULL); + const vector*>& Forward(const vector*> & bottom, + Dtype* loss = NULL); /** * @brief Run forward using a serialized BlobProtoVector and return the * result as a serialized BlobProtoVector @@ -75,7 +76,7 @@ class Net { */ void Reshape(); - Dtype ForwardBackward(const vector* > & bottom) { + Dtype ForwardBackward(const vector*> & bottom) { Dtype loss; Forward(bottom, &loss); Backward(); @@ -102,11 +103,17 @@ class Net { void ToProto(NetParameter* param, bool write_diff = false) const; /// @brief returns the network name. - inline const string& name() const { return name_; } + inline const string& name() const { + return name_; + } /// @brief returns the layer names - inline const vector& layer_names() const { return layer_names_; } + inline const vector& layer_names() const { + return layer_names_; + } /// @brief returns the blob names - inline const vector& blob_names() const { return blob_names_; } + inline const vector& blob_names() const { + return blob_names_; + } /// @brief returns the blobs inline const vector > >& blobs() const { return blobs_; @@ -116,7 +123,9 @@ class Net { return layers_; } /// @brief returns the phase: TRAIN or TEST - inline Phase phase() const { return phase_; } + inline Phase phase() const { + return phase_; + } /** * @brief returns the bottom vecs for each layer -- usually you won't * need this unless you do per-layer checks such as gradients. @@ -145,17 +154,25 @@ class Net { return params_; } /// @brief returns the parameter learning rate multipliers - inline const vector& params_lr() const { return params_lr_; } + inline const vector& params_lr() const { + return params_lr_; + } inline const vector& params_weight_decay() const { return params_weight_decay_; } const map& param_names_index() const { return param_names_index_; } - inline const vector& param_owners() const { return param_owners_; } + inline const vector& param_owners() const { + return param_owners_; + } /// @brief Input and output blob numbers - inline int num_inputs() const { return net_input_blobs_.size(); } - inline int num_outputs() const { return net_output_blobs_.size(); } + inline int num_inputs() const { + return net_input_blobs_.size(); + } + inline int num_outputs() const { + return net_output_blobs_.size(); + } inline const vector*>& input_blobs() const { return net_input_blobs_; } @@ -173,7 +190,9 @@ class Net { bool has_layer(const string& layer_name) const; const shared_ptr > layer_by_name(const string& layer_name) const; - void set_debug_info(const bool value) { debug_info_ = value; } + void set_debug_info(const bool value) { + debug_info_ = value; + } // Helpers for Init. /** @@ -181,10 +200,10 @@ class Net { * phase, level, and stage. */ static void FilterNet(const NetParameter& param, - NetParameter* param_filtered); + NetParameter* param_filtered); /// @brief return whether NetState state meets NetStateRule rule static bool StateMeetsRule(const NetState& state, const NetStateRule& rule, - const string& layer_name); + const string& layer_name); protected: // Helpers for Init. @@ -259,10 +278,9 @@ class Net { /// Whether to compute and display debug info for the net. bool debug_info_; - DISABLE_COPY_AND_ASSIGN(Net); +DISABLE_COPY_AND_ASSIGN(Net); }; - } // namespace caffe #endif // CAFFE_NET_HPP_ diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 7805bcd2a40..dc427372184 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -66,6 +66,7 @@ class Solver { int current_step_; shared_ptr > net_; vector > > test_nets_; + DeviceContext device_context_; DISABLE_COPY_AND_ASSIGN(Solver); }; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index b985e3c4bd4..2dfce802648 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -173,9 +173,11 @@ void Blob::Update() { #ifndef CPU_ONLY // perform computation on GPU if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_axpy(count_, Dtype(-1), static_cast(diff_->gpu_data()), static_cast(data_->mutable_gpu_data())); +#endif } else { #ifdef USE_GREENTEA greentea_gpu_axpy(device_context_.id(), count_, Dtype(-1), @@ -220,9 +222,11 @@ Dtype Blob::asum_data() const { case SyncedMemory::SYNCED: { #ifndef CPU_ONLY if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA Dtype asum; caffe_gpu_asum(count_, gpu_data(), &asum); return asum; +#endif } else { #ifdef USE_GREENTEA Dtype asum; @@ -265,9 +269,11 @@ Dtype Blob::asum_diff() const { case SyncedMemory::SYNCED: { #ifndef CPU_ONLY if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA Dtype asum; caffe_gpu_asum(count_, gpu_diff(), &asum); return asum; +#endif } else { #ifdef USE_GREENTEA Dtype asum; @@ -306,20 +312,23 @@ Dtype Blob::sumsq_data() const { return 0; } switch (data_->head()) { - case SyncedMemory::HEAD_AT_CPU: + case SyncedMemory::HEAD_AT_CPU: { data = cpu_data(); sumsq = caffe_cpu_dot(count_, data, data); break; + } case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: { #ifndef CPU_ONLY data = gpu_data(); if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_dot(count_, data, data, &sumsq); +#endif } else { #ifdef USE_GREENTEA - greentea_gpu_dot(device_context_.id(), count_, (cl_mem)data, 0, (cl_mem)data, 0, - &sumsq); + greentea_gpu_dot(device_context_.id(), count_, (cl_mem) data, 0, + (cl_mem) data, 0, &sumsq); #endif } #else @@ -363,17 +372,19 @@ Dtype Blob::sumsq_diff() const { #ifndef CPU_ONLY diff = gpu_diff(); if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_dot(count_, diff, diff, &sumsq); +#endif } else { #ifdef USE_GREENTEA - greentea_gpu_dot(device_context_.id(), count_, (cl_mem)diff, 0, (cl_mem)diff, 0, - &sumsq); + greentea_gpu_dot(device_context_.id(), count_, (cl_mem) diff, 0, + (cl_mem) diff, 0, &sumsq); #endif } - break; #else NO_GPU; #endif + break; } case SyncedMemory::UNINITIALIZED: return 0; @@ -408,10 +419,13 @@ void Blob::scale_data(Dtype scale_factor) { #ifndef CPU_ONLY data = mutable_gpu_data(); if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_scal(count_, scale_factor, data); +#endif } else { #ifdef USE_GREENTEA - greentea_gpu_scal(device_context_.id(), count_, scale_factor, (cl_mem)data, 0); + greentea_gpu_scal(device_context_.id(), count_, scale_factor, + (cl_mem) data, 0); #endif } return; @@ -451,10 +465,13 @@ void Blob::scale_diff(Dtype scale_factor) { #ifndef CPU_ONLY diff = mutable_gpu_diff(); if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_scal(count_, scale_factor, diff); +#endif } else { #ifdef USE_GREENTEA - greentea_gpu_scal(device_context_.id(), count_, scale_factor, (cl_mem)diff, 0); + greentea_gpu_scal(device_context_.id(), count_, scale_factor, + (cl_mem) diff, 0); #endif } return; diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index a86f29ca30d..26b5e7529bd 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -118,27 +118,38 @@ Caffe::~Caffe() { } void Caffe::set_random_seed(const unsigned int seed) { - // Curand seed - static bool g_curand_availability_logged = false; - if (Get().curand_generator_) { - CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(curand_generator(), seed)); - CURAND_CHECK(curandSetGeneratorOffset(curand_generator(), 0)); - } else { - if (!g_curand_availability_logged) { - LOG(ERROR)<< - "Curand not available. Skipping setting the curand seed."; - g_curand_availability_logged = true; + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // Curand seed + static bool g_curand_availability_logged = false; + if (Get().curand_generator_) { + CURAND_CHECK( + curandSetPseudoRandomGeneratorSeed(curand_generator(), seed)); + CURAND_CHECK(curandSetGeneratorOffset(curand_generator(), 0)); + } else { + if (!g_curand_availability_logged) { + LOG(ERROR)<< + "Curand not available. Skipping setting the curand seed."; + g_curand_availability_logged = true; + } } + // RNG seed + Get().random_generator_.reset(new RNG(seed)); +#endif + } else { +#ifdef USE_GREENTEA +// TODO: Proper RNG and Seed for OpenCL +#endif } - // RNG seed - Get().random_generator_.reset(new RNG(seed)); } void Caffe::EnumerateDevices() { int cuda_device_count = 0; int greentea_device_count = 0; +#ifdef USE_CUDA cudaGetDeviceCount(&cuda_device_count); +#endif #ifdef USE_GREENTEA typedef std::vector platforms_type; @@ -166,6 +177,7 @@ void Caffe::EnumerateDevices() { #endif // Display info for all devices +#ifdef USE_CUDA for (int i = 0; i < cuda_device_count; ++i) { cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, i)); @@ -176,6 +188,7 @@ void Caffe::EnumerateDevices() { LOG(INFO)<< "Name: " << prop.name; LOG(INFO)<< "Total global memory: " << prop.totalGlobalMem; } +#endif #ifdef USE_GREENTEA for (int i = 0; i < greentea_device_count; ++i) { @@ -201,7 +214,9 @@ void Caffe::SetDevices(std::vector device_ids) { int cuda_device_count = 0; int greentea_device_count = 0; +#ifdef USE_CUDA cudaGetDeviceCount(&cuda_device_count); +#endif for (int i = 0; i < cuda_device_count; ++i) { Get().device_contexts_.push_back(DeviceContext(i, Backend::BACKEND_CUDA)); @@ -256,7 +271,7 @@ void Caffe::SetDevices(std::vector device_ids) { } } -#endif +#endif // USE_GREENTEA } @@ -268,7 +283,7 @@ DeviceContext& Caffe::GetDeviceContext(int id) { viennacl::ocl::program & Caffe::GetDeviceProgram(int id) { return id == -1 ? Get().default_ocl_program_ : Get().ocl_programs_[id]; } -#endif +#endif // USE_GREENTEA DeviceContext& Caffe::GetDefaultDeviceContext() { return Get().default_device_context_; @@ -304,19 +319,20 @@ void Caffe::SetDevice(const int device_id) { CURAND_CHECK( curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, cluster_seedgen())); -#endif +#endif // USE_CUDA } else { #ifdef USE_GREENTEA #ifdef USE_CLBLAS clblasSetup(); -#endif -#endif +#endif // USE_CLBLAS +#endif // USE_GREENTEA } } // TODO: (FTschopp) fix this for the new backend void Caffe::DeviceQuery() { if (Get().default_device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA cudaDeviceProp prop; int device; if (cudaSuccess != cudaGetDevice(&device)) { @@ -349,11 +365,12 @@ void Caffe::DeviceQuery() { << (prop.kernelExecTimeoutEnabled ? "Yes" : "No"); } } +#endif // USE_CUDA else { #ifdef USE_GREENTEA - // TODO -#endif + // TODO: Complete OpenCL device information of current device +#endif // USE_GREENTEA } return; @@ -391,6 +408,7 @@ void* Caffe::RNG::generator() { return static_cast(generator_->rng()); } +#ifdef USE_CUDA const char* cublasGetErrorString(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: @@ -452,6 +470,7 @@ const char* curandGetErrorString(curandStatus_t error) { } return "Unknown curand status"; } +#endif // USE_CUDA #endif // CPU_ONLY diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index cb9a09ce23c..02aa2558864 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,24 +6,24 @@ namespace caffe { std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in,\n __global Dtype* out, Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}"; +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, __global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n __global Dtype* bottom_data,\n const int num, const int channels,\n const int height, const int width,\n const int pooled_height,\n const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h,\n const int ext_kernel_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int)0);\n wstart = max(wstart, (int)0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data,\n __global const Dtype* label,\n __global Dtype* loss, const int num,\n const int dim, const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype)FLT_MIN));\n counts[index] = 1;\n }\n }\n}"; +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, __global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; ss << header << "\n\n"; diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index afc31475de6..412c41d1e43 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -105,7 +105,7 @@ do echo " ss << ${CL_KERNEL_NAME}_double << \"\\n\\n\";" >> $SOURCE done echo " ss << \"#endif\" << \"\\n\\n\";" >> $SOURCE -echo "#endif // GREENTEA_DOUBLE_SUUPORT" >> $SOURCE +echo "#endif // GREENTEA_DOUBLE_SUPPORT" >> $SOURCE echo " std::string kernel_string = ss.str();" >> $SOURCE echo " const char* kernel_program = kernel_string.c_str();" >> $SOURCE diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index fe07f8850d3..d7d693a7a57 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -2,9 +2,22 @@ #include "header.cl" #endif -__kernel void TEMPLATE(relu_forward,Dtype)(const int n, __global const Dtype* in, - __global Dtype* out, Dtype negative_slope) { +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out, + Dtype negative_slope) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } + +__kernel void TEMPLATE(relu_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + } +} diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index 352801b8e79..080a770cd16 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -2,20 +2,27 @@ #include "header.cl" #endif -__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, - __global Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, const int pad_h, - const int pad_w, __global Dtype* top_data, - const int use_mask, - __global int* mask, - __global Dtype* top_mask) { +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, __global Dtype* bottom_data, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, + __global int* mask, + __global Dtype* top_mask) { for (int index = get_global_id(0); index < nthreads; index += get_global_size(0)) { int pw = index % pooled_width; @@ -26,11 +33,12 @@ __kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, int wstart = pw * stride_w - pad_w; int hend = min(hstart + ext_kernel_h, height); int wend = min(wstart + ext_kernel_w, width); - hstart = max(hstart, (int)0); - wstart = max(wstart, (int)0); + hstart = max(hstart, (int) 0); + wstart = max(wstart, (int) 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; + __global Dtype* bottom_data_ptr = bottom_data + + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += kstride_h) { for (int w = wstart; w < wend; w += kstride_w) { if (bottom_data_ptr[h * width + w] > maxval) { @@ -47,3 +55,64 @@ __kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, } } } + +__kernel void TEMPLATE(max_pool_backward_sk,Dtype)( + const int nthreads, __global const Dtype* top_diff, const int use_mask, + __global const int* mask, __global const Dtype* top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* bottom_diff) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + __global const int* mask_ptr = mask; + __global const Dtype* top_diff_ptr = top_diff; + +// find out the local index +// find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff_ptr += offset; + if (use_mask == 1) { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask_ptr[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } else { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + diff --git a/src/caffe/greentea/cl_kernels/softmax_loss.cl b/src/caffe/greentea/cl_kernels/softmax_loss.cl index 930ac9ab4e2..fd027d3a2f6 100644 --- a/src/caffe/greentea/cl_kernels/softmax_loss.cl +++ b/src/caffe/greentea/cl_kernels/softmax_loss.cl @@ -2,13 +2,12 @@ #include "header.cl" #endif -__kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dtype* prob_data, - __global const Dtype* label, - __global Dtype* loss, const int num, - const int dim, const int spatial_dim, - const int has_ignore_label_, - const int ignore_label_, - __global Dtype* counts) { +__kernel void TEMPLATE(softmax_loss_forward,Dtype)( + int n, __global const Dtype* prob_data, __global const Dtype* label, + __global Dtype* loss, + const int num, const int dim, const int spatial_dim, + const int has_ignore_label_, const int ignore_label_, + __global Dtype* counts) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { const int n = index / spatial_dim; @@ -19,9 +18,42 @@ __kernel void TEMPLATE(softmax_loss_forward_gpu,Dtype)(int n, __global const Dty counts[index] = 0; } else { loss[index] = -log( - max((Dtype)(prob_data[n * dim + label_value * spatial_dim + s]), - (Dtype)FLT_MIN)); + max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype) FLT_MIN)); counts[index] = 1; } } } + +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, + __global const Dtype* top, + __global const Dtype* label, + __global Dtype* bottom_diff, + const int num, + const int dim, + const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global Dtype* counts) { + + const int channels = dim / spatial_dim; + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } + } +} diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 5c9143d3796..0b30f0224c3 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -20,9 +20,6 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, const int kstride_h, const int kstride_w, cl_mem data_col) { - std::cout << "DATA_IM: " << data_im << std::endl; - std::cout << "DATA_COL: " << data_col << std::endl; - int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; @@ -30,7 +27,7 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, int num_kernels = channels * height_col * width_col; viennacl::ocl::kernel &kernel = prog.get_kernel( - CL_KERNEL_SELECT("im2col_sk_gpu_kernel")); + CL_KERNEL_SELECT("im2col_sk")); viennacl::ocl::enqueue( kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, @@ -38,8 +35,6 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, stride_h, stride_w, kstride_h, kstride_w, height_col, width_col, WrapHandle(data_col, ctx)), ctx.get_queue()); - - std::cout << "END OF IM2COL" << std::endl; } // Explicit instantiation @@ -96,7 +91,7 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, CL_KERNEL_SELECT("col2im_sk")); viennacl::ocl::enqueue( - kernel(num_kernels, num_kernels, WrapHandle(data_col,ctx), height, width, channels, + kernel(num_kernels, WrapHandle(data_col,ctx), height, width, channels, patch_h, patch_w, ext_patch_h, ext_patch_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, height_col, width_col, WrapHandle(data_im,ctx), data_offset), diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index d84e0d8eb3b..9c6dc99a39a 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -64,6 +64,15 @@ void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, ctx.get_queue().finish(); } +// Copy from OpenCL to OpenCL buffer +void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, + cl_mem Y, const int offY, + viennacl::ocl::context &ctx) { + clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, offX, offY, N, 0, NULL, + NULL); + ctx.get_queue().finish(); +} + // Copy from OpenCL buffer to OpenCL buffer template void greentea_copy(const int N, const cl_mem X, cl_mem Y, @@ -403,7 +412,7 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, clblasDdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); } - greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, &out, ctx); + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, out, ctx); ctx.get_queue().finish(); clReleaseMemObject(gpuout); @@ -455,7 +464,7 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, clblasDasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); } - greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, &Y, ctx); + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, Y, ctx); ctx.get_queue().finish(); clReleaseMemObject(gpuout); diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index cc0e1f14aa2..ccdb6d85a29 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -55,8 +55,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, } else { // GreenTea backend code #ifdef USE_GREENTEA - std::cout << "CONV GREENTEA BEGIN: " << this->layer_param().name() - << std::endl; viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( @@ -82,10 +80,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, int col_offset = K_ * N_; int top_offset = M_ * N_; - std::cout << "M:" << M_ << std::endl; - std::cout << "N:" << N_ << std::endl; - std::cout << "K:" << K_ << std::endl; - for (int n = 0; n < num_; ++n) { // First, im2col @@ -99,7 +93,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - std::cout << "CPU GEMM" << std::endl; caffe_cpu_gemm( CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., weight_cpu + weight_offset * g, col_data_cpu + col_offset * g, @@ -133,8 +126,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, } } } - - std::cout << "CONV GREENTEA END" << std::endl; #endif } } diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index 35d6548fbc1..d9750a06f0e 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -262,7 +262,6 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, } else { mask = max_idx_.mutable_gpu_data(); } - std::cout << "POOLING GREENTEA BEGIN" << std::endl; viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( CL_KERNEL_SELECT("max_pool_forward_sk")); viennacl::ocl::enqueue( @@ -278,7 +277,6 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, WrapHandle((cl_mem) top_mask, ctx)), ctx.get_queue()); ctx.get_queue().finish(); - std::cout << "POOLING GREENTEA END" << std::endl; } break; case PoolingParameter_PoolMethod_AVE: @@ -361,7 +359,6 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); - caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; @@ -372,6 +369,7 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA + caffe_gpu_set(count, Dtype(0.), bottom_diff); switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { @@ -391,22 +389,44 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, default: LOG(FATAL)<<"Unknown or unsupported pooling method in Backward_gpu()."; } - CUDA_POST_KERNEL_CHECK; + CUDA_POST_KERNEL_CHECK + ; #endif // USE_CUDA - } - else - { + } else { #ifdef USE_GREENTEA - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + greentea_gpu_set(this->device_context_.id(), count, Dtype(0.), + (cl_mem) bottom_diff, 0); + + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } - // TODO + viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_backward_sk")); + viennacl::ocl::enqueue( + oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, ctx), + WrapHandle((cl_mem) top_mask, ctx), + top[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapHandle((cl_mem) bottom_diff, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + } break; - default: + default: LOG(FATAL)<<"Unknown or unsupported pooling method in Backward_gpu()."; } #endif // USE_GREENTEA diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index 498138a2eac..c69506bce54 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -28,14 +28,15 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - ReLUForward<<>>( + ReLUForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK ; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - std::cout << "RELU GREENTEA BEGIN" << std::endl; viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( @@ -47,9 +48,7 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, WrapHandle((cl_mem) top_data, ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); - std::cout << "RELU GREENTEA END" << std::endl; - -#endif +#endif // USE_GREENTEA } // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data @@ -79,11 +78,31 @@ void ReLULayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); - // NOLINT_NEXT_LINE(whitespace/operators) - ReLUBackward<<>>( - count, top_diff, bottom_data, bottom_diff, negative_slope); - CUDA_POST_KERNEL_CHECK - ; + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + ReLUBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_diff, bottom_data, bottom_diff, negative_slope); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( + CL_KERNEL_SELECT("relu_backward")); + viennacl::ocl::enqueue( + oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, ctx), + WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) bottom_diff, ctx), + negative_slope), + ctx.get_queue()); + ctx.get_queue().finish(); +#endif // USE_GREENTEA + } } } diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 6dc734f6a01..718b72941f6 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -120,90 +120,91 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_max<<>>(num, channels, spatial_dim, top_data, - scale_data); + kernel_channel_max CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), + CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, + scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_subtract<<>>(count, num, channels, spatial_dim, - scale_data, top_data); + kernel_channel_subtract CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, + scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) - kernel_exp<<>>(num * channels * spatial_dim, top_data, - top_data); + kernel_exp CUDA_KERNEL(CAFFE_GET_BLOCKS(num * channels * spatial_dim), + CAFFE_CUDA_NUM_THREADS)(num * channels * spatial_dim, top_data, + top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_sum<<>>(num, channels, spatial_dim, top_data, - scale_data); + kernel_channel_sum CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), + CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, + scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_div<<>>(count, num, channels, spatial_dim, - scale_data, top_data); + kernel_channel_div CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, + scale_data, top_data); #endif -} else { + } else { #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); - const cl_mem bottom_data = (cl_mem) (bottom[0]->gpu_data()); - cl_mem top_data = (cl_mem) (top[0]->mutable_gpu_data()); - cl_mem scale_data = (cl_mem) (scale_.mutable_gpu_data()); - int count = bottom[0]->count(); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - int spatial_dim = bottom[0]->height() * bottom[0]->width(); + const cl_mem bottom_data = (cl_mem) (bottom[0]->gpu_data()); + cl_mem top_data = (cl_mem) (top[0]->mutable_gpu_data()); + cl_mem scale_data = (cl_mem) (scale_.mutable_gpu_data()); + int count = bottom[0]->count(); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + int spatial_dim = bottom[0]->height() * bottom[0]->width(); - greentea_copy(count, bottom_data, top_data, ctx); + greentea_copy(count, bottom_data, top_data, ctx); - viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( - CL_KERNEL_SELECT("kernel_channel_max")); - viennacl::ocl::enqueue( - oclk_channel_max(num, channels, spatial_dim, WrapHandle(top_data, ctx), - WrapHandle(scale_data, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_max")); + viennacl::ocl::enqueue( + oclk_channel_max(num, channels, spatial_dim, WrapHandle(top_data, ctx), + WrapHandle(scale_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); - viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( - CL_KERNEL_SELECT("kernel_channel_subtract")); - viennacl::ocl::enqueue( - oclk_channel_subtract(count, num, channels, spatial_dim, - WrapHandle(scale_data, ctx), - WrapHandle(top_data, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_subtract")); + viennacl::ocl::enqueue( + oclk_channel_subtract(count, num, channels, spatial_dim, + WrapHandle(scale_data, ctx), + WrapHandle(top_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); - viennacl::ocl::kernel &oclk_exp = program.get_kernel( - CL_KERNEL_SELECT("kernel_exp")); - viennacl::ocl::enqueue( - oclk_exp(num * channels * spatial_dim, WrapHandle(top_data, ctx), - WrapHandle(top_data, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_exp = program.get_kernel( + CL_KERNEL_SELECT("kernel_exp")); + viennacl::ocl::enqueue( + oclk_exp(num * channels * spatial_dim, WrapHandle(top_data, ctx), + WrapHandle(top_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); - viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( - CL_KERNEL_SELECT("kernel_channel_sum")); - viennacl::ocl::enqueue( - oclk_channel_sum(num, channels, spatial_dim, WrapHandle(top_data, ctx), - WrapHandle(scale_data, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_sum")); + viennacl::ocl::enqueue( + oclk_channel_sum(num, channels, spatial_dim, WrapHandle(top_data, ctx), + WrapHandle(scale_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); - viennacl::ocl::kernel &oclk_channel_div = program.get_kernel( - CL_KERNEL_SELECT("kernel_channel_div")); - viennacl::ocl::enqueue( - oclk_channel_div(count, num, channels, spatial_dim, - WrapHandle(scale_data, ctx), WrapHandle(top_data, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_div = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_div")); + viennacl::ocl::enqueue( + oclk_channel_div(count, num, channels, spatial_dim, + WrapHandle(scale_data, ctx), + WrapHandle(top_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); #endif -} + } } template @@ -215,56 +216,61 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, int channels = top[0]->channels(); int spatial_dim = top[0]->height() * top[0]->width(); - if(this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* top_data = top[0]->gpu_data(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - Dtype* scale_data = scale_.mutable_gpu_data(); - caffe_copy(top[0]->count(), top_diff, bottom_diff); - // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. - // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_dot<<>>(num, channels, spatial_dim, top_diff, top_data, - scale_data); - // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_subtract<<>>(count, num, channels, spatial_dim, - scale_data, bottom_diff); - // elementwise multiplication - caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + Dtype* scale_data = scale_.mutable_gpu_data(); + caffe_copy(top[0]->count(), top_diff, bottom_diff); + // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_dot CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), + CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_diff, top_data, + scale_data); + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_subtract CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, + scale_data, bottom_diff); + // elementwise multiplication + caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); #endif } else { #ifdef USE_GREENTEA - const cl_mem top_diff = (cl_mem)(top[0]->gpu_diff()); - const cl_mem top_data = (cl_mem)(top[0]->gpu_data()); - cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); - cl_mem scale_data = (cl_mem)(scale_.mutable_gpu_data()); + const cl_mem top_diff = (cl_mem) (top[0]->gpu_diff()); + const cl_mem top_data = (cl_mem) (top[0]->gpu_data()); + cl_mem bottom_diff = (cl_mem) (bottom[0]->mutable_gpu_diff()); + cl_mem scale_data = (cl_mem) (scale_.mutable_gpu_data()); - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); - greentea_copy(top[0]->count(), top_diff, bottom_diff, ctx); - ctx.get_queue().finish(); + greentea_copy(top[0]->count(), top_diff, bottom_diff, ctx); + ctx.get_queue().finish(); - viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel(CL_KERNEL_SELECT("kernel_channel_dot")); - viennacl::ocl::enqueue( - oclk_channel_dot(count, num, channels, spatial_dim, - WrapHandle(top_diff, ctx), WrapHandle(top_data, ctx), WrapHandle(scale_data, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_dot")); + viennacl::ocl::enqueue( + oclk_channel_dot(count, num, channels, spatial_dim, + WrapHandle(top_diff, ctx), WrapHandle(top_data, ctx), + WrapHandle(scale_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); - viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel(CL_KERNEL_SELECT("kernel_channel_subtract")); - viennacl::ocl::enqueue( - oclk_channel_subtract(count, num, channels, spatial_dim, - WrapHandle(scale_data, ctx), WrapHandle(bottom_diff, ctx)), - ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( + CL_KERNEL_SELECT("kernel_channel_subtract")); + viennacl::ocl::enqueue( + oclk_channel_subtract(count, num, channels, spatial_dim, + WrapHandle(scale_data, ctx), + WrapHandle(bottom_diff, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); - greentea_gpu_mul(this->device_context_.id(), top[0]->count(),bottom_diff, 0, top_data, 0, bottom_diff, 0); - ctx.get_queue().finish(); + greentea_gpu_mul(this->device_context_.id(), top[0]->count(), + bottom_diff, 0, top_data, 0, bottom_diff, 0); + ctx.get_queue().finish(); #endif } } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index dc13a0b7961..335aabdc492 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -44,7 +44,7 @@ void SoftmaxWithLossLayer::Forward_gpu( softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); if (this->device_context_.backend() == BACKEND_CUDA) { - // CUDA backend code +#ifdef USE_CUDA const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int num = prob_.num(); @@ -59,8 +59,8 @@ void SoftmaxWithLossLayer::Forward_gpu( // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) - SoftmaxLossForwardGPU<<>>(nthreads, prob_data, label, loss_data, + SoftmaxLossForwardGPU CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), + CAFFE_CUDA_NUM_THREADS)(nthreads, prob_data, label, loss_data, num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); @@ -75,8 +75,51 @@ void SoftmaxWithLossLayer::Forward_gpu( if (top.size() == 2) { top[1]->ShareData(prob_); } +#endif // USE_CUDA } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + cl_mem prob_data = (cl_mem) (prob_.gpu_data()); + cl_mem label = (cl_mem) (bottom[1]->gpu_data()); + const int num = prob_.num(); + const int dim = prob_.count() / num; + const int spatial_dim = prob_.height() * prob_.width(); + const int nthreads = num * spatial_dim; + cl_mem loss_data = (cl_mem) (bottom[0]->mutable_gpu_diff()); + cl_mem counts = (cl_mem) (prob_.mutable_gpu_diff()); + + viennacl::ocl::kernel &oclk_softmax_loss_forward = program.get_kernel( + CL_KERNEL_SELECT("softmax_loss_forward")); + viennacl::ocl::enqueue( + oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, ctx), + WrapHandle(label, ctx), + WrapHandle(loss_data, ctx), num, dim, + spatial_dim, has_ignore_label_ ? 1 : 0, + ignore_label_, WrapHandle(counts, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + Dtype loss; + + greentea_gpu_asum(this->device_context_.id(), nthreads, loss_data, 0, + &loss); + if (normalize_) { + Dtype count; + greentea_gpu_asum(this->device_context_.id(), nthreads, counts, 0, + &count); + loss /= count; + } else { + loss /= num; + } + top[0]->mutable_cpu_data()[0] = loss; + if (top.size() == 2) { + top[1]->ShareData(prob_); + } +#endif // USE_GREENTEA } } @@ -116,29 +159,67 @@ void SoftmaxWithLossLayer::Backward_gpu( << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const Dtype* prob_data = prob_.gpu_data(); - const Dtype* top_data = top[0]->gpu_data(); - caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); - const Dtype* label = bottom[1]->gpu_data(); - const int num = prob_.num(); - const int dim = prob_.count() / num; - const int spatial_dim = prob_.height() * prob_.width(); - const int nthreads = num * spatial_dim; - // Since this memory is never used for anything else, - // we use to to avoid allocating new GPU memory. - Dtype* counts = prob_.mutable_gpu_diff(); - // NOLINT_NEXT_LINE(whitespace/operators) - SoftmaxLossBackwardGPU<<>>(nthreads, top_data, label, bottom_diff, - num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts); - const Dtype loss_weight = top[0]->cpu_diff()[0]; - if (normalize_) { - Dtype count; - caffe_gpu_asum(nthreads, counts, &count); - caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const Dtype* prob_data = prob_.gpu_data(); + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); + const Dtype* label = bottom[1]->gpu_data(); + const int num = prob_.num(); + const int dim = prob_.count() / num; + const int spatial_dim = prob_.height() * prob_.width(); + const int nthreads = num * spatial_dim; + // Since this memory is never used for anything else, + // we use to to avoid allocating new GPU memory. + Dtype* counts = prob_.mutable_gpu_diff(); + // NOLINT_NEXT_LINE(whitespace/operators) + SoftmaxLossBackwardGPUCUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), + CAFFE_CUDA_NUM_THREADS)(nthreads, top_data, label, bottom_diff, + num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts); + const Dtype loss_weight = top[0]->cpu_diff()[0]; + if (normalize_) { + Dtype count; + caffe_gpu_asum(nthreads, counts, &count); + caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); + } else { + caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); + } +#endif // USE_CUDA } else { - caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); + cl_mem prob_data = (cl_mem)(prob_.gpu_data()); + cl_mem top_data = (cl_mem)(top[0]->gpu_data()); + greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data,0, bottom_diff,0,ctx); + cl_mem label = (cl_mem)(bottom[1]->gpu_data()); + const int num = prob_.num(); + const int dim = prob_.count() / num; + const int spatial_dim = prob_.height() * prob_.width(); + const int nthreads = num * spatial_dim; + cl_mem counts = (cl_mem)(prob_.mutable_gpu_diff()); + + viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( + CL_KERNEL_SELECT("softmax_loss_backward")); + viennacl::ocl::enqueue( + oclk_softmax_loss_backward(nthreads, WrapHandle(top_data,ctx), WrapHandle(label,ctx), WrapHandle(bottom_diff,ctx), num, dim, spatial_dim, has_ignore_label_?1:0, ignore_label_, WrapHandle(counts,ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + + const Dtype loss_weight = top[0]->cpu_diff()[0]; + if (normalize_) { + Dtype count; + greentea_gpu_asum(this->device_context_.id(), nthreads, counts, 0, &count); + greentea_gpu_scal(this->device_context_.id(), prob_.count(), loss_weight / count, bottom_diff, 0); + } else { + greentea_gpu_scal(this->device_context_.id(), prob_.count(), loss_weight / num, bottom_diff, 0); + } +#endif // USE_GREENTEA } } } diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 56bd6e39c2c..34892c74fb5 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -13,13 +13,13 @@ namespace caffe { -template +template Solver::Solver(const SolverParameter& param) : net_() { Init(param); } -template +template Solver::Solver(const string& param_file) : net_() { SolverParameter param; @@ -27,47 +27,48 @@ Solver::Solver(const string& param_file) Init(param); } -template +template void Solver::Init(const SolverParameter& param) { - LOG(INFO) << "Initializing solver from parameters: " << std::endl - << param.DebugString(); + device_context_ = Caffe::GetDefaultDeviceContext(); + LOG(INFO)<< "Initializing solver from parameters: " << std::endl + << param.DebugString(); param_ = param; - CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; + CHECK_GE(param_.average_loss(), 1)<< "average_loss should be non-negative."; if (param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } // Scaffolding code InitTrainNet(); InitTestNets(); - LOG(INFO) << "Solver scaffolding done."; + LOG(INFO)<< "Solver scaffolding done."; iter_ = 0; current_step_ = 0; } -template +template void Solver::InitTrainNet() { - const int num_train_nets = param_.has_net() + param_.has_net_param() + - param_.has_train_net() + param_.has_train_net_param(); + const int num_train_nets = param_.has_net() + param_.has_net_param() + + param_.has_train_net() + param_.has_train_net_param(); const string& field_names = "net, net_param, train_net, train_net_param"; - CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " - << "using one of these fields: " << field_names; - CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " - << "one of these fields specifying a train_net: " << field_names; + CHECK_GE(num_train_nets, 1)<< "SolverParameter must specify a train net " + << "using one of these fields: " << field_names; + CHECK_LE(num_train_nets, 1)<< "SolverParameter must not contain more than " + << "one of these fields specifying a train_net: " << field_names; NetParameter net_param; if (param_.has_train_net_param()) { - LOG(INFO) << "Creating training net specified in train_net_param."; + LOG(INFO)<< "Creating training net specified in train_net_param."; net_param.CopyFrom(param_.train_net_param()); } else if (param_.has_train_net()) { LOG(INFO) << "Creating training net from train_net file: " - << param_.train_net(); + << param_.train_net(); ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); } if (param_.has_net_param()) { - LOG(INFO) << "Creating training net specified in net_param."; + LOG(INFO)<< "Creating training net specified in net_param."; net_param.CopyFrom(param_.net_param()); } if (param_.has_net()) { - LOG(INFO) << "Creating training net from net file: " << param_.net(); + LOG(INFO)<< "Creating training net from net file: " << param_.net(); ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); } // Set the correct NetState. We start with the solver defaults (lowest @@ -82,22 +83,20 @@ void Solver::InitTrainNet() { net_.reset(new Net(net_param)); } -template +template void Solver::InitTestNets() { const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); const int num_generic_nets = has_net_param + has_net_file; - CHECK_LE(num_generic_nets, 1) - << "Both net_param and net_file may not be specified."; + CHECK_LE(num_generic_nets, 1)<< "Both net_param and net_file may not be specified."; const int num_test_net_params = param_.test_net_param_size(); const int num_test_net_files = param_.test_net_size(); const int num_test_nets = num_test_net_params + num_test_net_files; if (num_generic_nets) { - CHECK_GE(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; + CHECK_GE(param_.test_iter_size(), num_test_nets)<< "test_iter must be specified for each test network."; } else { - CHECK_EQ(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; + CHECK_EQ(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; } // If we have a generic net (specified by net or net_param, rather than // test_net or test_net_param), we may have an unlimited number of actual @@ -107,8 +106,7 @@ void Solver::InitTestNets() { const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; const int num_test_net_instances = num_test_nets + num_generic_net_instances; if (param_.test_state_size()) { - CHECK_EQ(param_.test_state_size(), num_test_net_instances) - << "test_state must be unspecified or specified once per test net."; + CHECK_EQ(param_.test_state_size(), num_test_net_instances)<< "test_state must be unspecified or specified once per test net."; } if (num_test_net_instances) { CHECK_GT(param_.test_interval(), 0); @@ -117,13 +115,13 @@ void Solver::InitTestNets() { vector sources(num_test_net_instances); vector net_params(num_test_net_instances); for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { - sources[test_net_id] = "test_net_param"; - net_params[test_net_id].CopyFrom(param_.test_net_param(i)); + sources[test_net_id] = "test_net_param"; + net_params[test_net_id].CopyFrom(param_.test_net_param(i)); } for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { - sources[test_net_id] = "test_net file: " + param_.test_net(i); - ReadNetParamsFromTextFileOrDie(param_.test_net(i), - &net_params[test_net_id]); + sources[test_net_id] = "test_net file: " + param_.test_net(i); + ReadNetParamsFromTextFileOrDie(param_.test_net(i), + &net_params[test_net_id]); } const int remaining_test_nets = param_.test_iter_size() - test_net_id; if (has_net_param) { @@ -151,14 +149,13 @@ void Solver::InitTestNets() { net_state.MergeFrom(param_.test_state(i)); } net_params[i].mutable_state()->CopyFrom(net_state); - LOG(INFO) - << "Creating test net (#" << i << ") specified by " << sources[i]; + LOG(INFO)<< "Creating test net (#" << i << ") specified by " << sources[i]; test_nets_[i].reset(new Net(net_params[i])); test_nets_[i]->set_debug_info(param_.debug_info()); } } -template +template void Solver::StepPrefilled() { // Prefilled stepping can only do one at a time because the memory layer has to be refilled int iters = 1; @@ -191,24 +188,24 @@ void Solver::StepPrefilled() { losses[idx] = loss; } if (display) { - LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + LOG(INFO)<< "Iteration " << iter_ << ", loss = " << smoothed_loss; const vector*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); const string& output_name = - net_->blob_names()[net_->output_blob_indices()[j]]; + net_->blob_names()[net_->output_blob_indices()[j]]; const Dtype loss_weight = - net_->blob_loss_weights()[net_->output_blob_indices()[j]]; + net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * result_vec[k] << " loss)"; + << " = " << loss_weight * result_vec[k] << " loss)"; } LOG(INFO) << " Train net output #" - << score_index++ << ": " << output_name << " = " - << result_vec[k] << loss_msg_stream.str(); + << score_index++ << ": " << output_name << " = " + << result_vec[k] << loss_msg_stream.str(); } } } @@ -216,14 +213,13 @@ void Solver::StepPrefilled() { net_->Update(); // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { Snapshot(); } } } - -template +template void Solver::Step(int iters) { vector*> bottom_vec; const int start_iter = iter_; @@ -251,24 +247,24 @@ void Solver::Step(int iters) { losses[idx] = loss; } if (display) { - LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + LOG(INFO)<< "Iteration " << iter_ << ", loss = " << smoothed_loss; const vector*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); const string& output_name = - net_->blob_names()[net_->output_blob_indices()[j]]; + net_->blob_names()[net_->output_blob_indices()[j]]; const Dtype loss_weight = - net_->blob_loss_weights()[net_->output_blob_indices()[j]]; + net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * result_vec[k] << " loss)"; + << " = " << loss_weight * result_vec[k] << " loss)"; } LOG(INFO) << " Train net output #" - << score_index++ << ": " << output_name << " = " - << result_vec[k] << loss_msg_stream.str(); + << score_index++ << ": " << output_name << " = " + << result_vec[k] << loss_msg_stream.str(); } } } @@ -280,15 +276,15 @@ void Solver::Step(int iters) { ++iter_; // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { Snapshot(); } } } -template +template void Solver::Solve(const char* resume_file) { - LOG(INFO) << "Solving " << net_->name(); + LOG(INFO)<< "Solving " << net_->name(); LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); if (resume_file) { @@ -322,20 +318,19 @@ void Solver::Solve(const char* resume_file) { LOG(INFO) << "Optimization Done."; } - -template +template void Solver::TestAll() { for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { Test(test_net_id); } } -template +template void Solver::Test(const int test_net_id) { - LOG(INFO) << "Iteration " << iter_ - << ", Testing net (#" << test_net_id << ")"; + LOG(INFO)<< "Iteration " << iter_ + << ", Testing net (#" << test_net_id << ")"; CHECK_NOTNULL(test_nets_[test_net_id].get())-> - ShareTrainedLayersWith(net_.get()); + ShareTrainedLayersWith(net_.get()); vector test_score; vector test_score_output_id; vector*> bottom_vec; @@ -344,7 +339,7 @@ void Solver::Test(const int test_net_id) { for (int i = 0; i < param_.test_iter(test_net_id); ++i) { Dtype iter_loss; const vector*>& result = - test_net->Forward(bottom_vec, &iter_loss); + test_net->Forward(bottom_vec, &iter_loss); if (param_.test_compute_loss()) { loss += iter_loss; } @@ -372,22 +367,21 @@ void Solver::Test(const int test_net_id) { } for (int i = 0; i < test_score.size(); ++i) { const int output_blob_index = - test_net->output_blob_indices()[test_score_output_id[i]]; + test_net->output_blob_indices()[test_score_output_id[i]]; const string& output_name = test_net->blob_names()[output_blob_index]; const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; ostringstream loss_msg_stream; const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); if (loss_weight) { loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * mean_score << " loss)"; + << " = " << loss_weight * mean_score << " loss)"; } LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " - << mean_score << loss_msg_stream.str(); + << mean_score << loss_msg_stream.str(); } } - -template +template void Solver::Snapshot() { NetParameter net_param; // For intermediate results, we will also dump the gradient values. @@ -399,7 +393,7 @@ void Solver::Snapshot() { snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); filename += iter_str_buffer; model_filename = filename + ".caffemodel"; - LOG(INFO) << "Snapshotting to " << model_filename; + LOG(INFO)<< "Snapshotting to " << model_filename; WriteProtoToBinaryFile(net_param, model_filename.c_str()); SolverState state; SnapshotSolverState(&state); @@ -407,11 +401,11 @@ void Solver::Snapshot() { state.set_learned_net(model_filename); state.set_current_step(current_step_); snapshot_filename = filename + ".solverstate"; - LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; + LOG(INFO)<< "Snapshotting solver state to " << snapshot_filename; WriteProtoToBinaryFile(state, snapshot_filename.c_str()); } -template +template void Solver::Restore(const char* state_file) { SolverState state; NetParameter net_param; @@ -425,7 +419,6 @@ void Solver::Restore(const char* state_file) { RestoreSolverState(state); } - // Return the current learning rate. The currently implemented learning rate // policies are as follows: // - fixed: always return base_lr. @@ -441,7 +434,7 @@ void Solver::Restore(const char* state_file) { // // where base_lr, max_iter, gamma, step, stepvalue and power are defined // in the solver parameter protocol buffer, and iter is the current iteration. -template +template Dtype SGDSolver::GetLearningRate() { Dtype rate; const string& lr_policy = this->param_.lr_policy(); @@ -449,23 +442,23 @@ Dtype SGDSolver::GetLearningRate() { rate = this->param_.base_lr(); } else if (lr_policy == "step") { this->current_step_ = this->iter_ / this->param_.stepsize(); - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); + rate = this->param_.base_lr() + * pow(this->param_.gamma(), this->current_step_); } else if (lr_policy == "exp") { rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); } else if (lr_policy == "inv") { - rate = this->param_.base_lr() * - pow(Dtype(1) + this->param_.gamma() * this->iter_, - - this->param_.power()); + rate = this->param_.base_lr() + * pow(Dtype(1) + this->param_.gamma() * this->iter_, + -this->param_.power()); } else if (lr_policy == "multistep") { - if (this->current_step_ < this->param_.stepvalue_size() && - this->iter_ >= this->param_.stepvalue(this->current_step_)) { + if (this->current_step_ < this->param_.stepvalue_size() + && this->iter_ >= this->param_.stepvalue(this->current_step_)) { this->current_step_++; - LOG(INFO) << "MultiStep Status: Iteration " << + LOG(INFO)<< "MultiStep Status: Iteration " << this->iter_ << ", step = " << this->current_step_; } rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); + pow(this->param_.gamma(), this->current_step_); } else if (lr_policy == "poly") { rate = this->param_.base_lr() * pow(Dtype(1.) - (Dtype(this->iter_) / Dtype(this->param_.max_iter())), @@ -473,14 +466,14 @@ Dtype SGDSolver::GetLearningRate() { } else if (lr_policy == "sigmoid") { rate = this->param_.base_lr() * (Dtype(1.) / (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - - Dtype(this->param_.stepsize()))))); + Dtype(this->param_.stepsize()))))); } else { LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; } return rate; } -template +template void SGDSolver::PreSolve() { // Initialize the history const vector > >& net_params = this->net_->params(); @@ -489,16 +482,24 @@ void SGDSolver::PreSolve() { temp_.clear(); for (int i = 0; i < net_params.size(); ++i) { const vector& shape = net_params[i]->shape(); - history_.push_back(shared_ptr>(new Blob(shape,Caffe::GetDefaultDeviceContext()))); - update_.push_back(shared_ptr>(new Blob(shape,Caffe::GetDefaultDeviceContext()))); - temp_.push_back(shared_ptr>(new Blob(shape,Caffe::GetDefaultDeviceContext()))); + history_.push_back( + shared_ptr>( + new Blob(shape, Caffe::GetDefaultDeviceContext()))); + update_.push_back( + shared_ptr>( + new Blob(shape, Caffe::GetDefaultDeviceContext()))); + temp_.push_back( + shared_ptr>( + new Blob(shape, Caffe::GetDefaultDeviceContext()))); } } -template +template void SGDSolver::ClipGradients() { const Dtype clip_gradients = this->param_.clip_gradients(); - if (clip_gradients < 0) { return; } + if (clip_gradients < 0) { + return; + } const vector > >& net_params = this->net_->params(); Dtype sumsq_diff = 0; for (int i = 0; i < net_params.size(); ++i) { @@ -509,9 +510,9 @@ void SGDSolver::ClipGradients() { const Dtype l2norm_diff = std::sqrt(sumsq_diff); if (l2norm_diff > clip_gradients) { Dtype scale_factor = clip_gradients / l2norm_diff; - LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " - << l2norm_diff << " > " << clip_gradients << ") " - << "by scale factor " << scale_factor; + LOG(INFO)<< "Gradient clipping: scaling down gradients (L2 norm " + << l2norm_diff << " > " << clip_gradients << ") " + << "by scale factor " << scale_factor; for (int i = 0; i < net_params.size(); ++i) { if (this->net_->param_owners()[i] < 0) { net_params[i]->scale_diff(scale_factor); @@ -520,7 +521,7 @@ void SGDSolver::ClipGradients() { } } -template +template void SGDSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); @@ -529,93 +530,138 @@ void SGDSolver::ComputeUpdateValue() { // get the learning rate Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + LOG(INFO)<< "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); Dtype momentum = this->param_.momentum(); Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } } - } - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA + } + } else if (regularization_type == "L1") { + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_sign( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (temp_[param_id]->mutable_gpu_data()), 0); + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (temp_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA + } + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } + } + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + greentea_gpu_axpby(this->device_context_.id(), + net_params[param_id]->count(), local_rate, + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + momentum, + (cl_mem) (history_[param_id]->mutable_gpu_data()), + 0); + // copy + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (history_[param_id]->gpu_data()), + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); +#endif // USE_GREENTEA } } - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } #else - NO_GPU; + NO_GPU; #endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } + break; + default: + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } } -} -template +template void SGDSolver::SnapshotSolverState(SolverState* state) { state->clear_history(); for (int i = 0; i < history_.size(); ++i) { @@ -625,17 +671,16 @@ void SGDSolver::SnapshotSolverState(SolverState* state) { } } -template +template void SGDSolver::RestoreSolverState(const SolverState& state) { - CHECK_EQ(state.history_size(), history_.size()) - << "Incorrect length of history blobs."; + CHECK_EQ(state.history_size(), history_.size())<< "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; for (int i = 0; i < history_.size(); ++i) { history_[i]->FromProto(state.history(i),history_[i]->device_context()); } } -template +template void NesterovSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); @@ -644,115 +689,112 @@ void NesterovSolver::ComputeUpdateValue() { // get the learning rate Dtype rate = this->GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + LOG(INFO)<< "Iteration " << this->iter_ << ", lr = " << rate; } SGDSolver::ClipGradients(); Dtype momentum = this->param_.momentum(); Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } } - } - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); - // compute udpate: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); + // compute udpate: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } } - } - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); - // compute udpate: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); + // compute udpate: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } #else - NO_GPU; + NO_GPU; #endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + break; + default: { + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } } } -template +template void AdaGradSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); @@ -762,130 +804,126 @@ void AdaGradSolver::ComputeUpdateValue() { Dtype rate = this->GetLearningRate(); Dtype delta = this->param_.delta(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + LOG(INFO)<< "Iteration " << this->iter_ << ", lr = " << rate; } SGDSolver::ClipGradients(); Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } } - } - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), delta, + this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } } - } - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); - } + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), delta, + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } #else - NO_GPU; + NO_GPU; #endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + break; + default: + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } } -} INSTANTIATE_CLASS(Solver); INSTANTIATE_CLASS(SGDSolver); From 2e46381cd793b42da18f73e6990231b862394b21 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 18 May 2015 02:48:59 +0200 Subject: [PATCH 033/254] Fixed OpenCL 1.1 compability. --- .../greentea/greentea_math_functions.hpp | 16 ++++----- src/caffe/greentea/cl_kernels.cpp | 6 +++- src/caffe/greentea/cl_kernels/fillbuffer.cl | 10 ++++++ .../greentea/greentea_math_functions.cpp | 36 +++++++++++++++---- src/caffe/syncedmem.cpp | 2 +- 5 files changed, 52 insertions(+), 18 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/fillbuffer.cl diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index f0b96c907cd..970799fd5cc 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -18,11 +18,8 @@ namespace caffe { -inline void greentea_memset(const size_t N, const int alpha, cl_mem X, - const int offX, viennacl::ocl::context &ctx) { - clEnqueueFillBuffer(ctx.get_queue().handle().get(), X, &alpha, sizeof(int), - offX, N, 0, NULL, NULL); -} +void greentea_memset(const int ctx_id, const size_t N, const int alpha, + cl_mem X, const int offX); void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, void *Y, viennacl::ocl::context &ctx); @@ -31,8 +28,7 @@ void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, const int offY, viennacl::ocl::context &ctx); void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, - cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + cl_mem Y, const int offY, viennacl::ocl::context &ctx); template void greentea_copy(const int N, const cl_mem X, cl_mem Y, @@ -123,11 +119,11 @@ void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy); + cl_mem y, const int offy); template -void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy); +void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, + int offx, cl_mem y, const int offy); template void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 02aa2558864..0861d79cf6f 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -10,6 +10,7 @@ std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; +std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; @@ -19,6 +20,7 @@ std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.c std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; +std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; @@ -32,6 +34,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << auxiliary_float << "\n\n"; ss << channel_float << "\n\n"; ss << convolution_sk_float << "\n\n"; + ss << fillbuffer_float << "\n\n"; ss << im2col_float << "\n\n"; ss << im2col_sk_float << "\n\n"; ss << math_float << "\n\n"; @@ -45,13 +48,14 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << auxiliary_double << "\n\n"; ss << channel_double << "\n\n"; ss << convolution_sk_double << "\n\n"; + ss << fillbuffer_double << "\n\n"; ss << im2col_double << "\n\n"; ss << im2col_sk_double << "\n\n"; ss << math_double << "\n\n"; ss << pooling_sk_double << "\n\n"; ss << softmax_loss_double << "\n\n"; ss << "#endif" << "\n\n"; -#endif // GREENTEA_DOUBLE_SUUPORT +#endif // GREENTEA_DOUBLE_SUPPORT std::string kernel_string = ss.str(); const char* kernel_program = kernel_string.c_str(); ctx.build_options("-cl-fast-relaxed-math -cl-mad-enable"); diff --git a/src/caffe/greentea/cl_kernels/fillbuffer.cl b/src/caffe/greentea/cl_kernels/fillbuffer.cl new file mode 100644 index 00000000000..31423e32507 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/fillbuffer.cl @@ -0,0 +1,10 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 9c6dc99a39a..59597dbe07d 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -18,9 +18,7 @@ #include #include "caffe/common.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" #include "viennacl/backend/opencl.hpp" #include "viennacl/ocl/context.hpp" #include "viennacl/ocl/device.hpp" @@ -41,6 +39,22 @@ namespace caffe { +void greentea_memset(const int ctx_id, const size_t N, const int alpha, + cl_mem X, const int offX) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + // OpenCL Version >= 1.2 approach + // clEnqueueFillBuffer(ctx.get_queue().handle().get(), X, &alpha, sizeof(int), + // offX, N, 0, NULL, NULL); + // OpenCL Version < 1.2 fallback + typedef float Dtype; + viennacl::ocl::kernel &oclk_fill = program.get_kernel( + CL_KERNEL_SELECT("fill")); + viennacl::ocl::enqueue(oclk_fill(int(N/sizeof(Dtype)), Dtype(alpha), WrapHandle(X, ctx), offX), + ctx.get_queue()); + ctx.get_queue().finish(); +} + // Copy from OpenCL buffer to main memory void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, void *Y, viennacl::ocl::context &ctx) { @@ -68,7 +82,8 @@ void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, cl_mem Y, const int offY, viennacl::ocl::context &ctx) { - clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, offX, offY, N, 0, NULL, + clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, offX, offY, N, 0, + NULL, NULL); ctx.get_queue().finish(); } @@ -530,10 +545,19 @@ template void greentea_gpu_scale(const int ctx_id, const int n, template void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, cl_mem Y, const int offY) { - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + // OpenCL Version >= 1.2 approach + //clEnqueueFillBuffer(ctx.get_queue().handle().get(), Y, &alpha, sizeof(Dtype), + // offY, N, 0, NULL, NULL); + + // OpenCL Version < 1.2 fallback + viennacl::ocl::kernel &oclk_fill = program.get_kernel( + CL_KERNEL_SELECT("fill")); + viennacl::ocl::enqueue(oclk_fill(N, alpha, WrapHandle(Y, ctx), offY), + ctx.get_queue()); - clEnqueueFillBuffer(queue, Y, &alpha, sizeof(Dtype), offY, N, 0, NULL, NULL); + ctx.get_queue().finish(); } template void greentea_gpu_set(const int ctx_id, const int N, diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index c7d63d7954a..b4a6533f365 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -95,7 +95,7 @@ inline void SyncedMemory::to_gpu() { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); int alpha = 0; - greentea_memset(size_, alpha, cl_gpu_mem_, 0, ctx); + greentea_memset(device_context_.id(), size_, alpha, cl_gpu_mem_, 0); } gpu_ptr_ = (void*) cl_gpu_mem_; ctx.get_queue().finish(); From 8c952537823461537cf6c16b00309f1d74e63454 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 18 May 2015 16:35:04 +0200 Subject: [PATCH 034/254] Finished solver infrastructure for OpenCL. --- src/caffe/solver.cpp | 380 ++++++++++++++++++++++++++++++------------- 1 file changed, 271 insertions(+), 109 deletions(-) diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 34892c74fb5..9d11dd32c13 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -573,40 +573,32 @@ void SGDSolver::ComputeUpdateValue() { break; case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay caffe_gpu_axpy(net_params[param_id]->count(), local_decay, net_params[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA + greentea_gpu_axpy( this->device_context_.id(), net_params[param_id]->count(), local_decay, (cl_mem) (net_params[param_id]->gpu_data()), 0, (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA - } - } else if (regularization_type == "L1") { - if (this->device_context_.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA + } else if (regularization_type == "L1") { caffe_gpu_sign(net_params[param_id]->count(), net_params[param_id]->gpu_data(), temp_[param_id]->mutable_gpu_data()); caffe_gpu_axpy(net_params[param_id]->count(), local_decay, temp_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA + greentea_gpu_sign( this->device_context_.id(), net_params[param_id]->count(), (cl_mem) (net_params[param_id]->gpu_data()), 0, @@ -615,15 +607,11 @@ void SGDSolver::ComputeUpdateValue() { this->device_context_.id(), net_params[param_id]->count(), local_decay, (cl_mem) (temp_[param_id]->gpu_data()), 0, (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA } } else { LOG(FATAL)<< "Unknown regularization type: " << regularization_type; } - } - if (this->device_context_.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA caffe_gpu_axpby(net_params[param_id]->count(), local_rate, net_params[param_id]->gpu_diff(), momentum, history_[param_id]->mutable_gpu_data()); @@ -631,9 +619,7 @@ void SGDSolver::ComputeUpdateValue() { caffe_copy(net_params[param_id]->count(), history_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); @@ -648,18 +634,63 @@ void SGDSolver::ComputeUpdateValue() { net_params[param_id]->count(), (cl_mem) (history_[param_id]->gpu_data()), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); -#endif // USE_GREENTEA } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + } else if (regularization_type == "L1") { + greentea_gpu_sign( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (temp_[param_id]->mutable_gpu_data()), 0); + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (temp_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + } + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } + + greentea_gpu_axpby(this->device_context_.id(), + net_params[param_id]->count(), local_rate, + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + momentum, + (cl_mem) (history_[param_id]->mutable_gpu_data()), + 0); + // copy + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (history_[param_id]->gpu_data()), + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); + } +#endif // USE_GREENTEA } #else NO_GPU; #endif } break; - default: - LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + default: { + LOG(FATAL)<<"Unknown caffe mode: " << Caffe::mode(); } } +} template void SGDSolver::SnapshotSolverState(SolverState* state) { @@ -740,53 +771,115 @@ void NesterovSolver::ComputeUpdateValue() { net_params[param_id]->mutable_cpu_diff()); } break; - case Caffe::GPU: + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } } + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute udpate: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + greentea_copy(net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), + ctx); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + } else if (regularization_type == "L1") { + greentea_gpu_sign( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (this->temp_[param_id]->mutable_gpu_data()), 0); + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (this->temp_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } + } - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); + // update history + greentea_gpu_axpby( + this->device_context_.id(), net_params[param_id]->count(), + local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, + momentum, (cl_mem) (this->history_[param_id]->mutable_gpu_data()), + 0); - // compute udpate: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); + // compute udpate: step back then over step + greentea_gpu_axpby( + this->device_context_.id(), net_params[param_id]->count(), + Dtype(1) + momentum, + (cl_mem) (this->history_[param_id]->gpu_data()), 0, -momentum, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); + // copy + greentea_copy(net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), + ctx); + } +#endif // USE_GREENTEA } #else NO_GPU; #endif + } break; default: { LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); @@ -863,67 +956,136 @@ void AdaGradSolver::ComputeUpdateValue() { net_params[param_id]->mutable_cpu_diff()); } break; - case Caffe::GPU: + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } } - } - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); - caffe_gpu_add_scalar(net_params[param_id]->count(), delta, - this->update_[param_id]->mutable_gpu_data()); + caffe_gpu_add_scalar(net_params[param_id]->count(), delta, + this->update_[param_id]->mutable_gpu_data()); - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + } else if (regularization_type == "L1") { + greentea_gpu_sign( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_data()), 0, + (cl_mem) (this->temp_[param_id]->mutable_gpu_data()), 0); + greentea_gpu_axpy( + this->device_context_.id(), net_params[param_id]->count(), + local_decay, (cl_mem) (this->temp_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + greentea_gpu_powx( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // update history + greentea_gpu_add( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->history_[param_id]->gpu_data()), 0, + (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); + + // prepare update + greentea_gpu_powx( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), 0, Dtype(0.5), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_add_scalar( + this->device_context_.id(), net_params[param_id]->count(), delta, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_div( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // scale and copy + greentea_gpu_axpby( + this->device_context_.id(), net_params[param_id]->count(), + local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, + Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + } +#endif // USE_GREENTEA } #else NO_GPU; #endif + } break; - default: + default: { LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); } } +} INSTANTIATE_CLASS(Solver); INSTANTIATE_CLASS(SGDSolver); From b4c42e69a9d4e1dd1df6f97705cdeafcbdba117e Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 18 May 2015 16:53:25 +0200 Subject: [PATCH 035/254] Fixed opencl/cuda aliasing bug. --- src/caffe/solver.cpp | 44 ++++++++------------------------------------ 1 file changed, 8 insertions(+), 36 deletions(-) diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 9d11dd32c13..815598f1040 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -587,10 +587,6 @@ void SGDSolver::ComputeUpdateValue() { net_params[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); - greentea_gpu_axpy( - this->device_context_.id(), net_params[param_id]->count(), - local_decay, (cl_mem) (net_params[param_id]->gpu_data()), 0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); } else if (regularization_type == "L1") { caffe_gpu_sign(net_params[param_id]->count(), net_params[param_id]->gpu_data(), @@ -598,15 +594,6 @@ void SGDSolver::ComputeUpdateValue() { caffe_gpu_axpy(net_params[param_id]->count(), local_decay, temp_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); - - greentea_gpu_sign( - this->device_context_.id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_data()), 0, - (cl_mem) (temp_[param_id]->mutable_gpu_data()), 0); - greentea_gpu_axpy( - this->device_context_.id(), net_params[param_id]->count(), - local_decay, (cl_mem) (temp_[param_id]->gpu_data()), 0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); } } else { LOG(FATAL)<< "Unknown regularization type: " << regularization_type; @@ -619,21 +606,6 @@ void SGDSolver::ComputeUpdateValue() { caffe_copy(net_params[param_id]->count(), history_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); - - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); - - greentea_gpu_axpby(this->device_context_.id(), - net_params[param_id]->count(), local_rate, - (cl_mem) (net_params[param_id]->gpu_diff()), 0, - momentum, - (cl_mem) (history_[param_id]->mutable_gpu_data()), - 0); - // copy - greentea_copy( - net_params[param_id]->count(), - (cl_mem) (history_[param_id]->gpu_data()), - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); } #endif // USE_CUDA } else { @@ -825,10 +797,10 @@ void NesterovSolver::ComputeUpdateValue() { for (int param_id = 0; param_id < net_params.size(); ++param_id) { // save history momentum for stepping back - greentea_copy(net_params[param_id]->count(), - (cl_mem) (this->history_[param_id]->gpu_data()), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), - ctx); + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), ctx); Dtype local_rate = rate * net_params_lr[param_id]; Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; @@ -869,10 +841,10 @@ void NesterovSolver::ComputeUpdateValue() { (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); // copy - greentea_copy(net_params[param_id]->count(), - (cl_mem) (this->update_[param_id]->gpu_data()), - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), - ctx); + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); } #endif // USE_GREENTEA } From 7043eea5e8d4baaea4ea41ec999b27bd6592dc75 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 18 May 2015 21:05:33 +0200 Subject: [PATCH 036/254] Conv-SK fix. --- src/caffe/greentea/cl_kernels/pooling_sk.cl | 121 +++++++++++++++++++- src/caffe/layers/conv_sk_layer.cu | 5 + src/caffe/layers/pooling_sk_layer.cu | 105 +++++++++++------ 3 files changed, 196 insertions(+), 35 deletions(-) diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index 080a770cd16..48e3c1b1879 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -2,7 +2,8 @@ #include "header.cl" #endif -__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, __global Dtype* bottom_data, +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__global Dtype* bottom_data, const int num, const int channels, const int height, @@ -116,3 +117,121 @@ __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( } } +__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data_ptr[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, __global Dtype* rand_idx, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data_ptr[h * width + w]; + return; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + cumvalues += bottom_data_ptr[h * width + w] + * bottom_data_ptr[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } + +} diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index ccdb6d85a29..b765481ff16 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -283,6 +283,9 @@ void ConvolutionSKLayer::Backward_gpu( width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); + + ctx.get_queue().finish(); + // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { @@ -324,6 +327,8 @@ void ConvolutionSKLayer::Backward_gpu( width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, bottom_diff, bottom[i]->offset(n)); + + ctx.get_queue().finish(); } } } diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index d9750a06f0e..ea8abadb056 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -241,56 +241,93 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, stride_h_, stride_w_, kstride_h_, kstride_w_, top_data); } break; - default: + default: { LOG(FATAL)<< "Unknown pooling method."; } - CUDA_POST_KERNEL_CHECK; + } + CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - { - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_forward_sk")); + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_forward_sk")); + viennacl::ocl::enqueue( + oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, ctx), + bottom[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapHandle((cl_mem) top_data, ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, ctx), + WrapHandle((cl_mem) top_mask, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + } + break; + case PoolingParameter_PoolMethod_AVE: { + viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("ave_pool_forward_sk")); + viennacl::ocl::enqueue( + oclk_ave_pool_forward(count, WrapHandle((cl_mem) bottom_data,ctx), bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, WrapHandle((cl_mem)top_data,ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: { + if (this->phase_ == caffe::TRAIN) { + // We need to create the random index as well. + greentea_gpu_rng_uniform(this->device_context_.id(),count, Dtype(0), Dtype(1), + (cl_mem)(rand_idx_.mutable_gpu_data()),0); + + viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("sto_pool_forward_train_sk")); viennacl::ocl::enqueue( - oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, ctx), - bottom[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, + oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data,ctx), bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - WrapHandle((cl_mem) top_data, ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, ctx), - WrapHandle((cl_mem) top_mask, ctx)), + WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()),ctx), WrapHandle((cl_mem)(top_data),ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + } else { + viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("sto_pool_forward_test_sk")); + viennacl::ocl::enqueue( + oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data,ctx), bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, WrapHandle((cl_mem)top_data,ctx)), ctx.get_queue()); ctx.get_queue().finish(); } - break; - case PoolingParameter_PoolMethod_AVE: - // TODO - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - // TODO - break; - default: + } + break; + default: { LOG(FATAL)<< "Unknown pooling method."; } -#endif // USE_GREENTEA } +#endif // USE_GREENTEA } +} #ifdef USE_CUDA template From adb616bf23e9ca3301f516694492122590da0453 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 23 May 2015 23:32:09 +0200 Subject: [PATCH 037/254] CSK kernels. --- src/caffe/greentea/cl_kernels.cpp | 12 +++++----- .../greentea/cl_kernels/convolution_sk.cl | 23 ++++++++++--------- src/caffe/greentea/cl_kernels/im2col_sk.cl | 6 +++-- src/caffe/greentea/greentea_im2col.cpp | 17 ++++++++++++++ 4 files changed, 39 insertions(+), 19 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 0861d79cf6f..98f3a42665c 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -9,22 +9,22 @@ std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#defin std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; +std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; -std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; +std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, __global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n int yidx = (i + yoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 2\n for (int ki = 0; ki < kernel_h; ++ki) {\n#pragma unroll 2\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] +=\n wl[kj + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w\n + ((i + ki * kstride_h) % buff_h) * buff_w];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 1\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 1\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}*/"; +std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; -std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im, const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; +std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, __global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; diff --git a/src/caffe/greentea/cl_kernels/convolution_sk.cl b/src/caffe/greentea/cl_kernels/convolution_sk.cl index f3e47eb0088..434af92647d 100644 --- a/src/caffe/greentea/cl_kernels/convolution_sk.cl +++ b/src/caffe/greentea/cl_kernels/convolution_sk.cl @@ -79,11 +79,11 @@ // Load image patch #pragma unroll 1 for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) { + int yidx = (i + yoff); #pragma unroll 1 for (int j = get_local_id(0); j < buff_w; j += get_local_size(0)) { int xidx = (j + xoff); - int yidx = (i + yoff); if (xidx < width && yidx < height) { il[j + i * buff_w] = in[xidx + yidx * width + fin * width * height + batch_in_off]; @@ -111,14 +111,15 @@ } } -#pragma unroll 2 +#pragma unroll 1 for (int ki = 0; ki < kernel_h; ++ki) { -#pragma unroll 2 + int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w; + int alpos_i = (i + ki * kstride_h) / buff_h * 2; +#pragma unroll 10 for (int kj = 0; kj < kernel_w; ++kj) { - al[(j + kj * kstride_w) / buff_w + (i + ki * kstride_h) / buff_h * 2] += - wl[kj + ki * kernel_w] - * il[(j + kj * kstride_w) % buff_w - + ((i + ki * kstride_h) % buff_h) * buff_w]; + al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj + + ki * kernel_w] + * il[(j + kj * kstride_w) % buff_w + ilpos_i]; } } @@ -139,7 +140,7 @@ } } } -} +}*/ // Fits into 32 KB __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, @@ -221,9 +222,9 @@ __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, + batch_out_off]; // Across the kernel itself -#pragma unroll 1 +#pragma unroll 10 for (int i = 0; i < kernel_h; ++i) { -#pragma unroll 1 +#pragma unroll 10 for (int j = 0; j < kernel_w; ++j) { outval = fma( wl[j + i * kernel_w], @@ -240,4 +241,4 @@ __kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, }barrier(CLK_LOCAL_MEM_FENCE); } } -}*/ +} diff --git a/src/caffe/greentea/cl_kernels/im2col_sk.cl b/src/caffe/greentea/cl_kernels/im2col_sk.cl index 40bfa568fbf..fb76612d93d 100644 --- a/src/caffe/greentea/cl_kernels/im2col_sk.cl +++ b/src/caffe/greentea/cl_kernels/im2col_sk.cl @@ -42,7 +42,8 @@ __kernel void TEMPLATE(im2col_sk,Dtype)(const int n, } -__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_col, +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, + __global const Dtype* data_col, const int height, const int width, const int channels, const int patch_h, const int patch_w, @@ -53,7 +54,8 @@ __kernel void TEMPLATE(col2im_sk,Dtype)(const int n, __global const Dtype* data_ const int kstride_w, const int height_col, const int width_col, - __global Dtype* data_im, const int data_offset) { + __global Dtype* data_im, + const int data_offset) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { Dtype val = 0; diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 0b30f0224c3..ed0b15b0c14 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -29,6 +29,23 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, viennacl::ocl::kernel &kernel = prog.get_kernel( CL_KERNEL_SELECT("im2col_sk")); + /*std::cout << "num_kernels: " << num_kernels << std::endl; + std::cout << "data_offset: " << data_offset << std::endl; + std::cout << "height: " << height << std::endl; + std::cout << "width: " << width << std::endl; + std::cout << "kernel_h: " << kernel_h << std::endl; + std::cout << "kernel_w: " << kernel_w << std::endl; + std::cout << "ext_kernel_h: " << ext_kernel_h << std::endl; + std::cout << "ext_kernel_w: " << ext_kernel_w << std::endl; + std::cout << "pad_h: " << pad_h << std::endl; + std::cout << "pad_w: " << pad_w << std::endl; + std::cout << "stride_h: " << stride_h << std::endl; + std::cout << "stride_w: " << stride_w << std::endl; + std::cout << "kstride_h: " << kstride_h << std::endl; + std::cout << "kstride_w: " << kstride_w << std::endl; + std::cout << "height_col: " << height_col << std::endl; + std::cout << "width_col: " << width_col << std::endl;*/ + viennacl::ocl::enqueue( kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, From 21e4a7248c700e0ec7b9d3857774dc712b55adbb Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 27 May 2015 23:43:57 +0200 Subject: [PATCH 038/254] U-Net additional MergeCrop Layer. --- include/caffe/vision_layers.hpp | 74 ++++++++++++--- src/caffe/layers/mergecrop_layer.cpp | 54 +++++++++++ src/caffe/layers/mergecrop_layer.cu | 137 +++++++++++++++++++++++++++ 3 files changed, 252 insertions(+), 13 deletions(-) create mode 100644 src/caffe/layers/mergecrop_layer.cpp create mode 100644 src/caffe/layers/mergecrop_layer.cu diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 119611f7aa3..deb406beae0 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -16,6 +16,45 @@ namespace caffe { +template +class MergeCropLayer : public Layer { + public: + explicit MergeCropLayer(const LayerParameter& param) + : Layer(param) { + } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { + return 2; + } + + virtual inline int ExactNumTopBlobs() const { + return 1; + } + + virtual inline const char* type() const { + return "MergeCrop"; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + +}; + /** * @brief Abstract base class that factors out the BLAS code common to * ConvolutionLayer and DeconvolutionLayer. @@ -617,35 +656,45 @@ class CuDNNPoolingLayer : public PoolingLayer { * so that the result vector of different sized * images are of the same size. */ -template +template class SPPLayer : public Layer { public: explicit SPPLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param) { + } virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - const vector*>& top); + const vector*>& top); - virtual inline const char* type() const { return "SPP"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "SPP"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int MinTopBlobs() const { + return 1; + } // MAX POOL layers can output an extra top blob for the mask; // others can only output the pooled inputs. virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; + return + (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) ? 2 : 1; } protected: virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); + const vector& propagate_down, + const vector*>& bottom); // calculates the kernel and stride dimensions for the pooling layer, // returns a correctly configured LayerParameter for a PoolingLayer virtual LayerParameter GetPoolingParam(const int pyramid_level, - const int bottom_h, const int bottom_w, const SPPParameter spp_param); + const int bottom_h, const int bottom_w, + const SPPParameter spp_param); int pyramid_height_; int bottom_h_, bottom_w_; @@ -679,5 +728,4 @@ class SPPLayer : public Layer { } // namespace caffe - #endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp new file mode 100644 index 00000000000..ceddb4b894e --- /dev/null +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -0,0 +1,54 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void MergeCropLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + // Nothing to do here, other than the reshaping + Reshape(bottom, top); +} + +template +void MergeCropLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + + // Same number of batches requires + CHECK_EQ(bottom[0]->num(),bottom[1]->num()); + int num = bottom[0]->num(); + + // All channels of both inputs are copied + int channels = bottom[0]->channels() + bottom[1]->channels(); + + // Width and height of the smaller input, which should be input 0 + int height = bottom[0]->height(); + int width = bottom[0]->width(); + + top[0]->Reshape(num, channels, height, width, this->device_context_); +} + +template +void MergeCropLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + LOG(FATAL)<< "Foward_cpu() not implemented for MergeCropLayer."; +} + +template +void MergeCropLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + LOG(FATAL)<< "Backward_cpu() not implemented for MergeCropLayer."; +} + +#ifdef CPU_ONLY +STUB_GPU(MergeCropLayer); +#endif + +INSTANTIATE_CLASS(MergeCropLayer); +REGISTER_LAYER_CLASS(MergeCrop); + +} // namespace caffe diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu new file mode 100644 index 00000000000..2cd20863348 --- /dev/null +++ b/src/caffe/layers/mergecrop_layer.cu @@ -0,0 +1,137 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +#ifdef USE_CUDA +template +__global__ void CopyForward(const int nthreads, const Dtype* bottom_a, + const Dtype* bottom_b, Dtype* top, int num, + int channels_a, int channels_b, int height_a, + int width_a, int height_b, int width_b) { + + CUDA_KERNEL_LOOP(index, nthreads) + { + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / (channels_a * channels_b * height_a * width_a); + + int bottom_id = ((index + - batch_id * channels_a * channels_b * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = bottom_a[aidx]; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = + ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h) + * width_a + w + (h * 2 + 1) * pad_w); + top[index] = bottom_b[bidx]; + } + } + +} + +template +__global__ void CopyBackward(const int nthreads, Dtype* bottom_a, + const Dtype* top, int num, int channels_a, + int channels_b, int height_a, int width_a, + int height_b, int width_b) { + + CUDA_KERNEL_LOOP(index, nthreads) + { + + int batch_id = index / (channels_a * channels_b * height_a * width_a); + + int bottom_id = ((index + - batch_id * channels_a * channels_b * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = top[index]; + } + } + +} +#endif // USE_CUDA + +template +void MergeCropLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + + int count = top[0]->count() * 2; + + const Dtype* bottom_data_a = bottom[0]->gpu_data(); + const Dtype* bottom_data_b = bottom[1]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + + int num = bottom[0]->num(); + + // All channels of both inputs are copied + int channels_a = bottom[0]->channels(); + int channels_b = bottom[1]->channels(); + + // Width and height of the smaller input, which should be input 0 + int height_a = bottom[0]->height(); + int width_a = bottom[0]->width(); + + int height_b = bottom[1]->height(); + int width_b = bottom[1]->width(); + + CopyForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( + count, bottom_data_a, bottom_data_b, top_data, num, channels_a, + channels_b, height_a, width_a, height_b, width_b); + +} + +template +void MergeCropLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + int count = top[0]->count() * 2; + + Dtype* bottom_diff_a = bottom[0]->mutable_gpu_diff(); + const Dtype* top_diff = top[0]->gpu_diff(); + + int num = bottom[0]->num(); + + // All channels of both inputs are copied + int channels_a = bottom[0]->channels(); + int channels_b = bottom[1]->channels(); + + // Width and height of the smaller input, which should be input 0 + int height_a = bottom[0]->height(); + int width_a = bottom[0]->width(); + + int height_b = bottom[1]->height(); + int width_b = bottom[1]->width(); + + CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( + count, bottom_diff_a, top_diff, num, channels_a, channels_b, height_a, + width_a, height_b, width_b); +} + +INSTANTIATE_LAYER_GPU_FUNCS(MergeCropLayer); + +} // namespace caffe From 4da03d0f9edfdf486d5a39823aacccf3591184a8 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 28 May 2015 00:48:29 +0200 Subject: [PATCH 039/254] Restored Solver class for OpenCL after merging with BVLC::Master. --- src/caffe/solver.cpp | 107 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 99 insertions(+), 8 deletions(-) diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 710e2ecb98c..b967bf6ee71 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -592,7 +592,25 @@ void SGDSolver::Regularize(int param_id) { #endif // USE_CUDA } else { #ifdef USE_GREENTEA - // TODO: REDO OPENCL SOLVER + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + greentea_gpu_axpy(this->device_context_.id(), net_params[param_id]->count(), + local_decay, + (cl_mem)(net_params[param_id]->gpu_data()),0, + (cl_mem)(net_params[param_id]->mutable_gpu_diff()),0); + } else if (regularization_type == "L1") { + greentea_gpu_sign(this->device_context_.id(), net_params[param_id]->count(), + (cl_mem)(net_params[param_id]->gpu_data()),0, + (cl_mem)(temp_[param_id]->mutable_gpu_data()),0); + greentea_gpu_axpy(this->device_context_.id(),net_params[param_id]->count(), + local_decay, + (cl_mem)(temp_[param_id]->gpu_data()),0, + (cl_mem)(net_params[param_id]->mutable_gpu_diff()),0); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } #endif // USE_GREENTEA } #else @@ -600,8 +618,9 @@ void SGDSolver::Regularize(int param_id) { #endif break; } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + default: { + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } } } @@ -634,7 +653,17 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { #endif // USE_CUDA } else { #ifdef USE_GREENTEA - // TODO + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + greentea_gpu_axpby( + this->device_context_.id(), net_params[param_id]->count(), + local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, + momentum, (cl_mem) (history_[param_id]->mutable_gpu_data()), 0); + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (history_[param_id]->gpu_data()), + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); #endif // USE_GREENTEA } #else @@ -642,10 +671,11 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { #endif break; } - default: + default: { LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); } } +} template void SGDSolver::SnapshotSolverState(SolverState* state) { @@ -721,7 +751,34 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { #endif // USE_CUDA } else { #ifdef USE_GREENTEA - // TODO + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + // save history momentum for stepping back + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), ctx); + + // update history + greentea_gpu_axpby( + this->device_context_.id(), net_params[param_id]->count(), + local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, + momentum, (cl_mem) (this->history_[param_id]->mutable_gpu_data()), + 0); + + // compute update: step back then over step + greentea_gpu_axpby( + this->device_context_.id(), net_params[param_id]->count(), + Dtype(1) + momentum, + (cl_mem) (this->history_[param_id]->gpu_data()), 0, -momentum, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // copy + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); #endif // USE_GREENTEA } #else @@ -729,10 +786,11 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { #endif break; } - default: + default: { LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); } } +} template void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { @@ -806,7 +864,40 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { #endif // USE_CUDA } else { #ifdef USE_GREENTEA - // TODO + // compute square of gradient in update + greentea_gpu_powx( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // update history + greentea_gpu_add( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->history_[param_id]->gpu_data()), 0, + (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); + + // prepare update + greentea_gpu_powx( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), 0, Dtype(0.5), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_add_scalar( + this->device_context_.id(), net_params[param_id]->count(), delta, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_div( + this->device_context_.id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // scale and copy + greentea_gpu_axpby( + this->device_context_.id(), net_params[param_id]->count(), + local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, + Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); #endif // USE_GREENTEA } #else From 9a14aef8d67b6b939ae3967ab070d4c8980d554c Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 1 Jun 2015 13:22:25 +0200 Subject: [PATCH 040/254] More OpenCL adaptions --- include/caffe/greentea/greentea_im2col.hpp | 33 ++- include/caffe/solver.hpp | 4 - include/caffe/vision_layers.hpp | 55 +++- src/caffe/greentea/cl_kernels.cpp | 8 +- src/caffe/greentea/cl_kernels/im2col.cl | 60 +++++ src/caffe/greentea/cl_kernels/mergecrop.cl | 65 +++++ src/caffe/greentea/greentea_im2col.cpp | 87 ++++++ src/caffe/layers/base_conv_layer.cpp | 296 +++++++++++++++------ src/caffe/layers/conv_layer.cu | 22 +- src/caffe/layers/deconv_layer.cpp | 6 + src/caffe/layers/deconv_layer.cu | 22 +- src/caffe/layers/im2col_layer.cu | 6 + src/caffe/layers/mergecrop_layer.cu | 63 ++++- src/caffe/test/test_im2col_kernel.cu | 6 + src/caffe/util/im2col.cu | 2 + 15 files changed, 592 insertions(+), 143 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/mergecrop.cl diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index d4403bf7242..c7188f0cab1 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -40,26 +40,25 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, const int kstride_h, const int kstride_w, cl_mem data_im, const int data_offset); -/*template - void im2col_gpu(const Dtype* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_col); +template +void greentea_im2col_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, const cl_mem data_im, const int data_im_off, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, cl_mem data_col, const int data_col_off); + +template +void greentea_col2im_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, const cl_mem data_col, const int data_col_off, + const int channels, const int height, const int width, + const int patch_h, const int patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, cl_mem data_im, const int data_im_off); - template - void col2im_sk_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, - Dtype* data_im); - template - void col2im_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im);*/ } -#endif +#endif // USE_GREENTEA #endif /* GREENTEA_IM2COL_HPP_ */ diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index a89b1218b87..336bd6637a5 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -44,8 +44,6 @@ class Solver { } void Snapshot(); void Restore(const char* resume_file); - // Get the update value for the current iteration. - virtual void ComputeUpdateValue() = 0; protected: // Make and apply the update value for the current iteration. @@ -94,8 +92,6 @@ class SGDSolver : public Solver { return history_; } - virtual void ComputeUpdateValue(); - protected: void PreSolve(); Dtype GetLearningRate(); diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index deb406beae0..d31debef472 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -14,6 +14,11 @@ #include "caffe/neuron_layers.hpp" #include "caffe/proto/caffe.pb.h" + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { template @@ -93,14 +98,17 @@ class BaseConvolutionLayer : public Layer { void backward_cpu_bias(Dtype* bias, const Dtype* input); #ifndef CPU_ONLY - void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); - void forward_gpu_bias(Dtype* output, const Dtype* bias); - void backward_gpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* col_output); - void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, + void forward_gpu_gemm(const Dtype* col_input, const int col_input_off, + const Dtype* weights, Dtype* output, + const int output_off, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const int output_off, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const int input_off, + const Dtype* weights, Dtype* col_output, + const int col_output_off); + void weight_gpu_gemm(const Dtype* col_input, const int col_input_off, + const Dtype* output, const int output_off, Dtype* weights); - void backward_gpu_bias(Dtype* bias, const Dtype* input); + void backward_gpu_bias(Dtype* bias, const Dtype* input, const int input_off); #endif // reverse_dimensions should return true iff we are implementing deconv, so @@ -133,7 +141,9 @@ class BaseConvolutionLayer : public Layer { kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); } + #ifndef CPU_ONLY +#ifdef USE_CUDA inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, @@ -144,7 +154,36 @@ class BaseConvolutionLayer : public Layer { kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); } -#endif +#endif // USE_CUDA +#ifdef USE_GREENTEA + inline void greentea_conv_im2col_gpu(const Dtype* data, const int data_off, + Dtype* col_buff, + const int col_buff_off) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + greentea_im2col_gpu(program, ctx, (cl_mem) data, data_off, + conv_in_channels_, conv_in_height_, + conv_in_width_, kernel_h_, kernel_w_, pad_h_, + pad_w_, stride_h_, stride_w_, (cl_mem) col_buff, + col_buff_off); + } + inline void greentea_conv_col2im_gpu(const Dtype* col_buff, + const int col_buff_off, Dtype* data, + const int data_off) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + greentea_col2im_gpu(program, ctx, (cl_mem) col_buff, col_buff_off, + conv_in_channels_, conv_in_height_, + conv_in_width_, kernel_h_, kernel_w_, pad_h_, + pad_w_, stride_h_, stride_w_, (cl_mem) data, + data_off); + } +#endif // USE_GREENTEA +#endif // !CPU_ONLY int conv_out_channels_; int conv_in_channels_; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 98f3a42665c..b4dbbf7943c 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -11,9 +11,10 @@ std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\ std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; -std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; +std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset =\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index] = val;\n }\n}"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; @@ -21,9 +22,10 @@ std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; -std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif"; +std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset =\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index] = val;\n }\n}"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { @@ -38,6 +40,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << im2col_float << "\n\n"; ss << im2col_sk_float << "\n\n"; ss << math_float << "\n\n"; + ss << mergecrop_float << "\n\n"; ss << pooling_sk_float << "\n\n"; ss << softmax_loss_float << "\n\n"; #ifdef GREENTEA_DOUBLE_SUPPORT @@ -52,6 +55,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << im2col_double << "\n\n"; ss << im2col_sk_double << "\n\n"; ss << math_double << "\n\n"; + ss << mergecrop_double << "\n\n"; ss << pooling_sk_double << "\n\n"; ss << softmax_loss_double << "\n\n"; ss << "#endif" << "\n\n"; diff --git a/src/caffe/greentea/cl_kernels/im2col.cl b/src/caffe/greentea/cl_kernels/im2col.cl index 0ed82aa79be..6d047fc2d9d 100644 --- a/src/caffe/greentea/cl_kernels/im2col.cl +++ b/src/caffe/greentea/cl_kernels/im2col.cl @@ -2,3 +2,63 @@ #include "header.cl" #endif +__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_im) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + int offset = + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index] = val; + } +} diff --git a/src/caffe/greentea/cl_kernels/mergecrop.cl b/src/caffe/greentea/cl_kernels/mergecrop.cl new file mode 100644 index 00000000000..572266e7b9a --- /dev/null +++ b/src/caffe/greentea/cl_kernels/mergecrop.cl @@ -0,0 +1,65 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(merge_copy_forward, Dtype)( + const int nthreads, __global const Dtype* bottom_a, + __global const Dtype* bottom_b, + __global Dtype* top, + int num, int channels_a, int channels_b, int height_a, int width_a, + int height_b, int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / (channels_a * channels_b * height_a * width_a); + + int bottom_id = ((index + - batch_id * channels_a * channels_b * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = bottom_a[aidx]; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = + ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h) + * width_a + w + (h * 2 + 1) * pad_w); + top[index] = bottom_b[bidx]; + } + } +} + +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, +__global Dtype* bottom_a, + __global const Dtype* top, + int num, int channels_a, + int channels_b, int height_a, + int width_a, int height_b, + int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int batch_id = index / (channels_a * channels_b * height_a * width_a); + + int bottom_id = ((index + - batch_id * channels_a * channels_b * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = top[index]; + } + } +} diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index ed0b15b0c14..e5d41f3ad8a 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -144,5 +144,92 @@ template void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, cl_mem data_im, const int data_offset); +template +void greentea_im2col_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, const cl_mem data_im, + const int data_im_off, const int channels, + const int height, const int width, const int kernel_h, + const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + cl_mem data_col, const int data_col_off) { + // We are going to launch channels * height_col * width_col kernels, each + // kernel responsible for copying a single-channel grid. + int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; + int num_kernels = channels * height_col * width_col; + + viennacl::ocl::kernel &kernel = prog.get_kernel(CL_KERNEL_SELECT("im2col")); + + viennacl::ocl::enqueue( + kernel(num_kernels, WrapHandle(data_im, ctx), data_im_off, height, width, + kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, + width_col, WrapHandle(data_col, ctx), data_col_off), + ctx.get_queue()); +} + +template void greentea_im2col_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_im, + const int data_im_off, + const int channels, const int height, + const int width, const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, cl_mem data_col, + const int data_col_off); + +template void greentea_im2col_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_im, + const int data_im_off, + const int channels, const int height, + const int width, const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, cl_mem data_col, + const int data_col_off); + +template +void greentea_col2im_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, const cl_mem data_col, + const int data_col_off, const int channels, + const int height, const int width, const int patch_h, + const int patch_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, cl_mem data_im, + const int data_im_off) { + int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; + int num_kernels = channels * height * width; + + viennacl::ocl::kernel &kernel = prog.get_kernel(CL_KERNEL_SELECT("col2im")); + + viennacl::ocl::enqueue( + kernel(num_kernels, WrapHandle(data_col, ctx), data_col_off, height, width, channels, + patch_h, patch_w, pad_h, pad_w, stride_h, stride_w, height_col, + width_col, WrapHandle(data_im, ctx), data_im_off), + ctx.get_queue()); +} + +template void greentea_col2im_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_col, + const int data_col_off, + const int channels, const int height, + const int width, const int patch_h, + const int patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, cl_mem data_im, + const int data_im_off); +template void greentea_col2im_gpu(viennacl::ocl::program &prog, + viennacl::ocl::context &ctx, + const cl_mem data_col, + const int data_col_off, + const int channels, const int height, + const int width, const int patch_h, + const int patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, cl_mem data_im, + const int data_im_off); + } #endif diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 40f2477c69a..c910301b9b7 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -6,29 +6,35 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { -template +template void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes())<< "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK(!conv_param.has_kernel_size() != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) - << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK(conv_param.has_kernel_size() || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) - << "For non-square filters both kernel_h and kernel_w are required."; + << "For non-square filters both kernel_h and kernel_w are required."; CHECK((!conv_param.has_pad() && conv_param.has_pad_h() - && conv_param.has_pad_w()) + && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) - << "pad is pad OR pad_h and pad_w are required."; + << "pad is pad OR pad_h and pad_w are required."; CHECK((!conv_param.has_stride() && conv_param.has_stride_h() - && conv_param.has_stride_w()) + && conv_param.has_stride_w()) || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) - << "Stride is stride OR stride_h and stride_w are required."; + << "Stride is stride OR stride_h and stride_w are required."; if (conv_param.has_kernel_size()) { kernel_h_ = kernel_w_ = conv_param.kernel_size(); } else { @@ -52,7 +58,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1 - && stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0; + && stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0; // Configure output channels and groups. channels_ = bottom[0]->channels(); num_output_ = this->layer_param_.convolution_param().num_output(); @@ -60,7 +66,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, group_ = this->layer_param_.convolution_param().group(); CHECK_EQ(channels_ % group_, 0); CHECK_EQ(num_output_ % group_, 0) - << "Number of output should be multiples of group."; + << "Number of output should be multiples of group."; if (reverse_dimensions()) { conv_out_channels_ = channels_; conv_in_channels_ = num_output_; @@ -83,16 +89,16 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Initialize and fill the weights: // output channels x input channels per-group x kernel height x kernel width this->blobs_[0].reset(new Blob( - conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_,this->device_context_)); + conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_,this->device_context_)); shared_ptr > weight_filler(GetFiller( - this->layer_param_.convolution_param().weight_filler())); + this->layer_param_.convolution_param().weight_filler())); weight_filler->Fill(this->blobs_[0].get()); // If necessary, initialize and fill the biases. if (bias_term_) { vector bias_shape(1, num_output_); this->blobs_[1].reset(new Blob(bias_shape,this->device_context_)); shared_ptr > bias_filler(GetFiller( - this->layer_param_.convolution_param().bias_filler())); + this->layer_param_.convolution_param().bias_filler())); bias_filler->Fill(this->blobs_[1].get()); } } @@ -100,25 +106,25 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, this->param_propagate_down_.resize(this->blobs_.size(), true); } -template +template void BaseConvolutionLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes())<< "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; num_ = bottom[0]->num(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); CHECK_EQ(bottom[0]->channels(), channels_) << "Input size incompatible with" - " convolution kernel."; + " convolution kernel."; // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num."; CHECK_EQ(channels_, bottom[bottom_id]->channels()) - << "Inputs must have same channels."; + << "Inputs must have same channels."; CHECK_EQ(height_, bottom[bottom_id]->height()) - << "Inputs must have same height."; + << "Inputs must have same height."; CHECK_EQ(width_, bottom[bottom_id]->width()) - << "Inputs must have same width."; + << "Inputs must have same width."; } // Shape the tops. compute_output_shape(); @@ -155,9 +161,11 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, } } -template +template void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, - const Dtype* weights, Dtype* output, bool skip_im2col) { + const Dtype* weights, + Dtype* output, + bool skip_im2col) { const Dtype* col_buff = input; if (!is_1x1_) { if (!skip_im2col) { @@ -166,42 +174,47 @@ void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, col_buff = col_buffer_.cpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, - (Dtype)0., output + output_offset_ * g); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, + conv_out_channels_ / group_, conv_out_spatial_dim_, + kernel_dim_ / group_, (Dtype) 1., + weights + weight_offset_ * g, + col_buff + col_offset_ * g, (Dtype) 0., + output + output_offset_ * g); } } -template +template void BaseConvolutionLayer::forward_cpu_bias(Dtype* output, - const Dtype* bias) { + const Dtype* bias) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(), - (Dtype)1., output); + height_out_ * width_out_, 1, (Dtype) 1., bias, + bias_multiplier_.cpu_data(), (Dtype) 1., output); } -template +template void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, - const Dtype* weights, Dtype* input) { + const Dtype* weights, + Dtype* input) { Dtype* col_buff = col_buffer_.mutable_cpu_data(); if (is_1x1_) { col_buff = input; } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, - (Dtype)0., col_buff + col_offset_ * g); + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype) 1., weights + weight_offset_ * g, + output + output_offset_ * g, (Dtype) 0., + col_buff + col_offset_ * g); } if (!is_1x1_) { conv_col2im_cpu(col_buff, input); } } -template +template void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, - const Dtype* output, Dtype* weights) { + const Dtype* output, + Dtype* weights) { const Dtype* col_buff = input; if (!is_1x1_) { conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); @@ -209,86 +222,193 @@ void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, - (Dtype)1., weights + weight_offset_ * g); + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype) 1., output + output_offset_ * g, + col_buff + col_offset_ * g, (Dtype) 1., + weights + weight_offset_ * g); } } -template +template void BaseConvolutionLayer::backward_cpu_bias(Dtype* bias, - const Dtype* input) { + const Dtype* input) { caffe_cpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., - input, bias_multiplier_.cpu_data(), 1., bias); + input, bias_multiplier_.cpu_data(), 1., bias); } #ifndef CPU_ONLY -template +template void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, - const Dtype* weights, Dtype* output, bool skip_im2col) { + const int input_off, + const Dtype* weights, + Dtype* output, + const int output_off, + bool skip_im2col) { const Dtype* col_buff = input; - if (!is_1x1_) { - if (!skip_im2col) { - conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (!is_1x1_) { + if (!skip_im2col) { + conv_im2col_gpu(input + input_off, col_buffer_.mutable_gpu_data()); + } + col_buff = col_buffer_.gpu_data(); } - col_buff = col_buffer_.gpu_data(); - } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, - (Dtype)0., output + output_offset_ * g); + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm( + CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, + conv_out_spatial_dim_, kernel_dim_ / group_, (Dtype) 1., + weights + weight_offset_ * g, + col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 0., + output + output_off + output_offset_ * g); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + if (!is_1x1_) { + if (!skip_im2col) { + greentea_conv_im2col_gpu(input, input_off, col_buffer_.mutable_gpu_data(),0); + } + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, conv_out_channels_ / group_, + conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype) 1., (cl_mem)weights, weight_offset_ * g, + (cl_mem)col_buff, + (is_1x1_ ? input_off : 0) + col_offset_ * g, + (Dtype) 0., (cl_mem)output, + output_off + output_offset_ * g); + } +#endif // USE_GREENTEA } } -template +template void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, - const Dtype* bias) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), - (Dtype)1., output); + const int output_off, + const Dtype* bias) { + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, + height_out_ * width_out_, 1, (Dtype) 1., bias, + bias_multiplier_.gpu_data(), (Dtype) 1., + output + output_off); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num_output_, + height_out_ * width_out_, 1, (Dtype) 1., + (cl_mem) bias, 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., (cl_mem) output, output_off); +#endif // USE_GREENTEA + } } -template +template void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, - const Dtype* weights, Dtype* input) { + const int output_off, + const Dtype* weights, + Dtype* input, + const int input_off) { Dtype* col_buff = col_buffer_.mutable_gpu_data(); if (is_1x1_) { col_buff = input; } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, - (Dtype)0., col_buff + col_offset_ * g); - } - if (!is_1x1_) { - conv_col2im_gpu(col_buff, input); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm( + CblasTrans, CblasNoTrans, kernel_dim_ / group_, conv_out_spatial_dim_, + conv_out_channels_ / group_, (Dtype) 1., weights + weight_offset_ * g, + output + output_off + output_offset_ * g, (Dtype) 0., + col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g); + } + if (!is_1x1_) { + conv_col2im_gpu(col_buff, input + input_off); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_.id(), CblasTrans, + CblasNoTrans, kernel_dim_ / group_, + conv_out_spatial_dim_, + conv_out_channels_ / group_, (Dtype) 1., + (cl_mem) weights, weight_offset_ * g, + (cl_mem) output, output_off + output_offset_ * g, + (Dtype) 0., (cl_mem) col_buff, + is_1x1_ ? input_off : 0 + col_offset_ * g); + } + if (!is_1x1_) { + greentea_conv_col2im_gpu(col_buff, 0, input, input_off); + } +#endif // USE_GREENTEA } } -template +template void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, - const Dtype* output, Dtype* weights) { + const int input_off, + const Dtype* output, + const int output_off, + Dtype* weights) { const Dtype* col_buff = input; - if (!is_1x1_) { - conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); - col_buff = col_buffer_.gpu_data(); - } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, - (Dtype)1., weights + weight_offset_ * g); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (!is_1x1_) { + conv_im2col_gpu(input + input_off, col_buffer_.mutable_gpu_data()); + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, + conv_out_channels_ / group_, kernel_dim_ / group_, + conv_out_spatial_dim_, (Dtype) 1., + output + output_offset_ * g, + col_buff + col_offset_ * g, (Dtype) 1., + weights + weight_offset_ * g); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + if (!is_1x1_) { + greentea_conv_im2col_gpu(input, input_off, col_buffer_.mutable_gpu_data(),0); + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype) 1., (cl_mem) output, output_offset_ * g, + (cl_mem) col_buff, col_offset_ * g, (Dtype) 1., + (cl_mem) weights, weight_offset_ * g); + } +#endif // USE_GREENTEA } } -template +template void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, - const Dtype* input) { - caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., - input, bias_multiplier_.gpu_data(), 1., bias); + const Dtype* input, + const int input_off) { + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, + 1., input + input_off, bias_multiplier_.gpu_data(), + 1., bias); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + num_output_, height_out_ * width_out_, 1., + (cl_mem) input, input_off, + (cl_mem) (bias_multiplier_.gpu_data()), 0, 1., + (cl_mem) bias, 0); +#endif // USE_GREENTEA + } } #endif // !CPU_ONLY diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 3902fdf3930..7513fc75d64 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -6,6 +6,12 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { template @@ -16,11 +22,11 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { - this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight, - top_data + top[i]->offset(n)); + this->forward_gpu_gemm(bottom_data, bottom[i]->offset(n), weight, + top_data, top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data + top[i]->offset(n), bias); + this->forward_gpu_bias(top_data, top[i]->offset(n), bias); } } } @@ -44,7 +50,7 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); + this->backward_gpu_bias(bias_diff, top_diff, top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { @@ -53,13 +59,13 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n), - top_diff + top[i]->offset(n), weight_diff); + this->weight_gpu_gemm(bottom_data, bottom[i]->offset(n), + top_diff, top[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { - this->backward_gpu_gemm(top_diff + top[i]->offset(n), weight, - bottom_diff + bottom[i]->offset(n)); + this->backward_gpu_gemm(top_diff, top[i]->offset(n), weight, + bottom_diff, bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index e6d65ab526b..dece516a587 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -6,6 +6,12 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { template diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 9198dd64c72..3512006cfe8 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -6,6 +6,12 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { template @@ -16,11 +22,11 @@ void DeconvolutionLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight, - top_data + top[i]->offset(n)); + this->backward_gpu_gemm(bottom_data, bottom[i]->offset(n), weight, + top_data, top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data + top[i]->offset(n), bias); + this->forward_gpu_bias(top_data, top[i]->offset(n), bias); } } } @@ -46,20 +52,20 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); + this->backward_gpu_bias(bias_diff, top_diff, top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(top_diff + top[i]->offset(n), - bottom_data + bottom[i]->offset(n), weight_diff); + this->weight_gpu_gemm(top_diff, top[i]->offset(n), + bottom_data, bottom[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { - this->forward_gpu_gemm(top_diff + top[i]->offset(n), weight, - bottom_diff + bottom[i]->offset(n)); + this->forward_gpu_gemm(top_diff, top[i]->offset(n), weight, + bottom_diff, bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 9c338b14cb7..6bc119d38fa 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -5,6 +5,12 @@ #include "caffe/util/im2col.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { template diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index 2cd20863348..c36ef21952c 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -4,6 +4,11 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { #ifdef USE_CUDA @@ -77,7 +82,7 @@ template void MergeCropLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - int count = top[0]->count() * 2; + int count = top[0]->count(); const Dtype* bottom_data_a = bottom[0]->gpu_data(); const Dtype* bottom_data_b = bottom[1]->gpu_data(); @@ -96,9 +101,30 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, int height_b = bottom[1]->height(); int width_b = bottom[1]->width(); - CopyForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( - count, bottom_data_a, bottom_data_b, top_data, num, channels_a, - channels_b, height_a, width_a, height_b, width_b); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + CopyForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( + count, bottom_data_a, bottom_data_b, top_data, num, channels_a, + channels_b, height_a, width_a, height_b, width_b); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_copy_forward = program.get_kernel( + CL_KERNEL_SELECT("merge_copy_forward")); + viennacl::ocl::enqueue( + oclk_copy_forward(count, WrapHandle((cl_mem) bottom_data_a, ctx), + WrapHandle((cl_mem) bottom_data_b, ctx), + WrapHandle((cl_mem) top_data, ctx), num, channels_a, + channels_b, height_a, width_a, height_b, width_b), + ctx.get_queue()); + ctx.get_queue().finish(); +#endif // USE_GREENTEA + } } @@ -109,7 +135,7 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, if (!propagate_down[0]) { return; } - int count = top[0]->count() * 2; + int count = top[0]->count(); Dtype* bottom_diff_a = bottom[0]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); @@ -127,9 +153,30 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, int height_b = bottom[1]->height(); int width_b = bottom[1]->width(); - CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( - count, bottom_diff_a, top_diff, num, channels_a, channels_b, height_a, - width_a, height_b, width_b); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( + count, bottom_diff_a, top_diff, num, channels_a, channels_b, height_a, + width_a, height_b, width_b); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_copy_backward = program.get_kernel( + CL_KERNEL_SELECT("merge_copy_backward")); + viennacl::ocl::enqueue( + oclk_copy_backward(count, WrapHandle((cl_mem) bottom_diff_a, ctx), + WrapHandle((cl_mem) top_diff, ctx), num, channels_a, + channels_b, height_a, width_a, height_b, width_b), + ctx.get_queue()); + ctx.get_queue().finish(); + +#endif // USE_GREENTEA + } } INSTANTIATE_LAYER_GPU_FUNCS(MergeCropLayer); diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index ee684c00255..4a2026e2f78 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -11,6 +11,12 @@ #include "caffe/test/test_caffe_main.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif + namespace caffe { // Forward declare kernel functions diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 5acfadd9a18..09a0582df8b 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -8,6 +8,7 @@ namespace caffe { +#ifdef USE_CUDA template __global__ void im2col_sk_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, @@ -293,4 +294,5 @@ template void col2im_gpu(const double* data_col, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, double* data_im); +#endif // USE_CUDA } // namespace caffe From d1fdac97e6b8b7406d89ea19bd4b2d3fa7649108 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 5 Jun 2015 00:53:08 +0200 Subject: [PATCH 041/254] Added maxpool forwarding and inner product layer for OpenCL. --- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/pooling.cl | 33 +++---- src/caffe/layers/absval_layer.cu | 1 + src/caffe/layers/filter_layer.cpp | 2 +- src/caffe/layers/inner_product_layer.cu | 121 +++++++++++++++++------ src/caffe/layers/reduction_layer.cpp | 4 +- 6 files changed, 112 insertions(+), 53 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index a7bbf67a04d..8f8af4349f3 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -15,7 +15,7 @@ std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data,\n __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + bottom_data += (n * channels\n + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; @@ -27,7 +27,7 @@ std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data,\n __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data + bottom_data += (n * channels\n + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (mask) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index 51094cbf765..b36f31c45d8 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -6,37 +6,36 @@ __kernel void TEMPLATE(max_pool_forward,Dtype)( const int nthreads, __global const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, const int kstride_h, - const int kstride_w, const int pad_h, const int pad_w, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data, - __global int* mask, __global Dtype* top_mask) { + const int use_mask, __global int* mask, __global Dtype* top_mask) { for (int index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height); - int wend = min(wstart + ext_kernel_w, width); + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - __global Dtype* bottom_data_ptr = bottom_data + bottom_data += (n * channels - + c) * height * width; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { - if (bottom_data_ptr[h * width + w] > maxval) { + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_data_ptr[maxidx]; + maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; - if (mask) { + if (use_mask == 1) { mask[index] = maxidx; } else { top_mask[index] = maxidx; diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu index 91f3c77fe9a..1bd8a1010c0 100644 --- a/src/caffe/layers/absval_layer.cu +++ b/src/caffe/layers/absval_layer.cu @@ -14,6 +14,7 @@ void AbsValLayer::Forward_gpu( caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); } + template void AbsValLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index be1db32dbaa..d69e19e5c80 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -55,7 +55,7 @@ void FilterLayer::Reshape(const vector*>& bottom, shape_top[0] = new_tops_num; for (int ts = 1; ts < num_axes; ++ts) shape_top[ts] = bottom[t]->shape(ts); - top[t]->Reshape(shape_top); + top[t]->Reshape(shape_top,this->device_context_); } } diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index dd90cac12a8..0a0ddd0ae57 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -9,45 +9,104 @@ namespace caffe { -template +template void InnerProductLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); - caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., - bottom_data, weight, (Dtype)0., top_data); - if (bias_term_) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., - bias_multiplier_.gpu_data(), - this->blobs_[1]->gpu_data(), (Dtype)1., top_data); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., + bottom_data, weight, (Dtype) 0., top_data); + if (bias_term_) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., + bias_multiplier_.gpu_data(), + this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); + } +#endif // USE CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasTrans, M_, N_, K_, (Dtype) 1., + (cl_mem) bottom_data, 0, (cl_mem) weight, 0, + (Dtype) 0., (cl_mem) top_data, 0); + if (bias_term_) { + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, M_, N_, 1, (Dtype) 1., + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (cl_mem) (this->blobs_[1]->gpu_data()), 0, + (Dtype) 1., (cl_mem) top_data, 0); + } +#endif // USE_GREENTEA } } -template -void InnerProductLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, +template +void InnerProductLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (this->param_propagate_down_[0]) { - const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* bottom_data = bottom[0]->gpu_data(); - // Gradient with respect to weight - caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); - } - if (bias_term_ && this->param_propagate_down_[1]) { - const Dtype* top_diff = top[0]->gpu_diff(); - // Gradient with respect to bias - caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.gpu_data(), (Dtype)1., - this->blobs_[1]->mutable_gpu_diff()); - } - if (propagate_down[0]) { - const Dtype* top_diff = top[0]->gpu_diff(); - // Gradient with respect to bottom data - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., - top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., - bottom[0]->mutable_gpu_diff()); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (this->param_propagate_down_[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + // Gradient with respect to weight + caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., + top_diff, bottom_data, (Dtype) 1., + this->blobs_[0]->mutable_gpu_diff()); + } + if (bias_term_ && this->param_propagate_down_[1]) { + const Dtype* top_diff = top[0]->gpu_diff(); + // Gradient with respect to bias + caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype) 1., top_diff, + bias_multiplier_.gpu_data(), (Dtype) 1., + this->blobs_[1]->mutable_gpu_diff()); + } + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + // Gradient with respect to bottom data + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., + top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., + bottom[0]->mutable_gpu_diff()); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + if (this->param_propagate_down_[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + // Gradient with respect to weight + greentea_gpu_gemm(this->device_context_.id(), CblasTrans, + CblasNoTrans, N_, K_, M_, (Dtype) 1., + (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, + (Dtype) 1., + (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), + 0); + } + if (bias_term_ && this->param_propagate_down_[1]) { + const Dtype* top_diff = top[0]->gpu_diff(); + // Gradient with respect to bias + greentea_gpu_gemv(this->device_context_.id(), CblasTrans, M_, N_, + (Dtype) 1., (cl_mem) top_diff, 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., + (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), + 0); + } + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + // Gradient with respect to bottom data + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, M_, K_, N_, (Dtype) 1., + (cl_mem) top_diff, 0, + (cl_mem) (this->blobs_[0]->gpu_data()), 0, + (Dtype) 0., + (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); + } +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/reduction_layer.cpp b/src/caffe/layers/reduction_layer.cpp index 8ae6329ebe4..bd404eba4e3 100644 --- a/src/caffe/layers/reduction_layer.cpp +++ b/src/caffe/layers/reduction_layer.cpp @@ -25,14 +25,14 @@ void ReductionLayer::Reshape(const vector*>& bottom, // we'd need to also copy any axes following an "end_axis". vector top_shape(bottom[0]->shape().begin(), bottom[0]->shape().begin() + axis_); - top[0]->Reshape(top_shape); + top[0]->Reshape(top_shape, this->device_context_); num_ = bottom[0]->count(0, axis_); dim_ = bottom[0]->count(axis_); CHECK_EQ(num_, top[0]->count()); if (op_ == ReductionParameter_ReductionOp_SUM || op_ == ReductionParameter_ReductionOp_MEAN) { vector sum_mult_shape(1, dim_); - sum_multiplier_.Reshape(sum_mult_shape); + sum_multiplier_.Reshape(sum_mult_shape, this->device_context_); caffe_set(dim_, Dtype(1), sum_multiplier_.mutable_cpu_data()); } coeff_ = this->layer_param().reduction_param().coeff(); From 33705e734e0aafe2c80f8f3ae710321acc6749cd Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 9 Jun 2015 02:12:17 +0200 Subject: [PATCH 042/254] More OpenCL layers implemented. WIP complete backend at 35%. --- .goutputstream-L9KQZX | 21 ++ Greentea_Building_Blocks.txt | 27 ++ include/caffe/solver.hpp | 1 - src/caffe/greentea/cl_kernels.cpp | 20 +- src/caffe/greentea/cl_kernels/activation.cl | 8 + src/caffe/greentea/cl_kernels/bnll.cl | 24 ++ src/caffe/greentea/cl_kernels/concat.cl | 26 ++ src/caffe/greentea/cl_kernels/pooling.cl | 246 ++++++++++++++++++ src/caffe/greentea/cl_kernels/pooling_sk.cl | 1 - src/caffe/layers/absval_layer.cu | 47 +++- src/caffe/layers/base_data_layer.cu | 48 +++- src/caffe/layers/bnll_layer.cu | 97 +++++-- src/caffe/layers/concat_layer.cu | 100 +++++-- src/caffe/layers/contrastive_loss_layer.cu | 2 +- src/caffe/layers/cudnn_conv_layer.cu | 4 +- src/caffe/layers/dropout_layer.cu | 6 +- src/caffe/layers/eltwise_layer.cu | 6 +- src/caffe/layers/lrn_layer.cu | 6 +- src/caffe/layers/prelu_layer.cu | 10 +- .../sigmoid_cross_entropy_loss_layer.cu | 41 ++- src/caffe/layers/sigmoid_layer.cu | 4 +- src/caffe/layers/slice_layer.cu | 4 +- src/caffe/layers/tanh_layer.cu | 4 +- src/caffe/layers/threshold_layer.cu | 50 +++- src/caffe/solver.cpp | 106 +++----- 25 files changed, 722 insertions(+), 187 deletions(-) create mode 100644 .goutputstream-L9KQZX create mode 100644 Greentea_Building_Blocks.txt create mode 100644 src/caffe/greentea/cl_kernels/bnll.cl create mode 100644 src/caffe/greentea/cl_kernels/concat.cl diff --git a/.goutputstream-L9KQZX b/.goutputstream-L9KQZX new file mode 100644 index 00000000000..2ee200be18a --- /dev/null +++ b/.goutputstream-L9KQZX @@ -0,0 +1,21 @@ +GREENTEA BUILDING BLOCKS: + +viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); +viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + +viennacl::ocl::kernel &oclk_kernel = program.get_kernel( + CL_KERNEL_SELECT("kernel")); +viennacl::ocl::enqueue( + oclk_max_pool_forward(WrapHandle((cl_mem) data, ctx)), + ctx.get_queue()); +ctx.get_queue().finish(); + +if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA +#endif // USE_GREENTEA +} diff --git a/Greentea_Building_Blocks.txt b/Greentea_Building_Blocks.txt new file mode 100644 index 00000000000..24d0c498d64 --- /dev/null +++ b/Greentea_Building_Blocks.txt @@ -0,0 +1,27 @@ +GREENTEA BUILDING BLOCKS: + +viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); +viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + +viennacl::ocl::kernel &oclk_kernel = program.get_kernel( + CL_KERNEL_SELECT("kernel")); +viennacl::ocl::enqueue( + oclk_kernel(WrapHandle((cl_mem) data, ctx)), + ctx.get_queue()); +ctx.get_queue().finish(); + +if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA +#endif // USE_GREENTEA +} + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#endif diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 5b61965c138..4537870002a 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -29,7 +29,6 @@ class Solver { Solve(resume_file.c_str()); } void Step(int iters); - void StepPrefilled(); virtual ~Solver() { } inline shared_ptr > net() { diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 8f8af4349f3..295093735c3 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,29 +6,33 @@ namespace caffe { std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n float kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; +std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}"; +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; +std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n float kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; +std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}"; -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; @@ -36,7 +40,9 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << "#define Dtype float" << "\n\n"; ss << activation_float << "\n\n"; ss << auxiliary_float << "\n\n"; + ss << bnll_float << "\n\n"; ss << channel_float << "\n\n"; + ss << concat_float << "\n\n"; ss << convolution_sk_float << "\n\n"; ss << fillbuffer_float << "\n\n"; ss << im2col_float << "\n\n"; @@ -52,7 +58,9 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << "#define Dtype double" << "\n\n"; ss << activation_double << "\n\n"; ss << auxiliary_double << "\n\n"; + ss << bnll_double << "\n\n"; ss << channel_double << "\n\n"; + ss << concat_double << "\n\n"; ss << convolution_sk_double << "\n\n"; ss << fillbuffer_double << "\n\n"; ss << im2col_double << "\n\n"; diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index d7d693a7a57..d56be34807f 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -21,3 +21,11 @@ __kernel void TEMPLATE(relu_backward,Dtype)(const int n, * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } + +__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > threshold ? 1 : 0; + } +} diff --git a/src/caffe/greentea/cl_kernels/bnll.cl b/src/caffe/greentea/cl_kernels/bnll.cl new file mode 100644 index 00000000000..a7f95b28b9d --- /dev/null +++ b/src/caffe/greentea/cl_kernels/bnll.cl @@ -0,0 +1,24 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + } +} + +__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff) { + float kBNLL_THRESHOLD = 50.; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} diff --git a/src/caffe/greentea/cl_kernels/concat.cl b/src/caffe/greentea/cl_kernels/concat.cl new file mode 100644 index 00000000000..d08db5df6be --- /dev/null +++ b/src/caffe/greentea/cl_kernels/concat.cl @@ -0,0 +1,26 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, + const int forward, const int num_concats, + const int concat_size, + const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, + __global Dtype* out_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward == 1) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index b36f31c45d8..9be07fc3838 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -42,3 +42,249 @@ __kernel void TEMPLATE(max_pool_forward,Dtype)( } } } + +__kernel void TEMPLATE(ave_pool_forward,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_train,DType)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* rand_idx, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + return; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test,DType)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + offset; + if (use_mask == 1) { + __global const int* mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + __global const Dtype* top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* const top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(sto_pool_backward,Dtype)( + const int nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* rand_idx_slice = rand_idx + + (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] + * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index 48e3c1b1879..3b2108b870e 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -104,7 +104,6 @@ __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( } } } else { - mask_ptr += offset; for (int ph = phstart; ph <= phend; ph += kstride_h) { for (int pw = pwstart; pw <= pwend; pw += kstride_w) { if (top_mask[ph * pooled_width + pw] == h * width + w) { diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu index 1bd8a1010c0..5cecf7249a6 100644 --- a/src/caffe/layers/absval_layer.cu +++ b/src/caffe/layers/absval_layer.cu @@ -4,32 +4,59 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template -void AbsValLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { +template +void AbsValLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); - caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_abs(this->device_context_.id(), count, + (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (top_data), + 0); +#endif // USE_GREENTEA + } } - -template +template void AbsValLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - caffe_gpu_sign(count, bottom_data, bottom_diff); - caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_sign(count, bottom_data, bottom_diff); + caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_sign(this->device_context_.id(), count, (cl_mem) bottom_data, + 0, (cl_mem) bottom_diff, 0); + greentea_gpu_mul(this->device_context_.id(), count, (cl_mem) bottom_diff, + 0, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); +#endif // USE_GREENTEA + } + } } INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); - } // namespace caffe diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 218c01fa5ef..83ef49c49e8 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -4,23 +4,47 @@ namespace caffe { -template +template void BasePrefetchingDataLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { // First, join the thread JoinPrefetchThread(); - // Reshape to loaded data. - top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); - // Copy the data - caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), - top[0]->mutable_gpu_data()); - if (this->output_labels_) { - // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_, this->device_context_); - // Copy the labels. - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), - top[1]->mutable_gpu_data()); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // Reshape to loaded data. + top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); + // Copy the data + caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), + top[0]->mutable_gpu_data()); + if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(prefetch_label_, this->device_context_); + // Copy the labels. + caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), + top[1]->mutable_gpu_data()); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + // Reshape to loaded data. + top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); + // Copy the data + greentea_copy(prefetch_data_.count(), (cl_mem)(prefetch_data_.cpu_data()), + (cl_mem)(top[0]->mutable_gpu_data()), ctx); + if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(prefetch_label_, this->device_context_); + // Copy the labels. + greentea_copy(prefetch_label_.count(), (cl_mem)(prefetch_label_.cpu_data()), + (cl_mem)(top[1]->mutable_gpu_data()), ctx); + } +#endif // USE_GREENTEA } + // Start a new prefetch thread CreatePrefetchThread(); } diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index d963d0687d2..b9cc50dd976 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -4,57 +4,110 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { const float kBNLL_THRESHOLD = 50.; -template +#ifdef USE_CUDA +template __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) { - out[index] = in[index] > 0 ? - in[index] + log(1. + exp(-in[index])) : - log(1. + exp(in[index])); + CUDA_KERNEL_LOOP(index, n) + { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } +#endif // USE_CUDA -template +template void BNLLLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - BNLLForward<<>>( - count, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_bnll = program.get_kernel( + CL_KERNEL_SELECT("bnll_forward")); + viennacl::ocl::enqueue( + oclk_bnll(count, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); +#endif // USE_GREENTEA + } } -template +#ifdef USE_CUDA +template __global__ void BNLLBackward(const int n, const Dtype* in_diff, - const Dtype* in_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) { + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) + { Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } +#endif // USE_CUDA -template +template void BNLLLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - BNLLBackward<<>>( - count, top_diff, bottom_data, bottom_diff); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_diff, bottom_data, bottom_diff); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_bnll = program.get_kernel( + CL_KERNEL_SELECT("bnll_backward")); + viennacl::ocl::enqueue( + oclk_bnll(count, WrapHandle((cl_mem) top_diff, ctx), + WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) bottom_diff, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); +#endif // USE_GREENTEA + } } } INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); - } // namespace caffe diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 8f2e85d8f52..44900e66654 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -4,19 +4,26 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template __global__ void Concat(const int nthreads, const Dtype* in_data, - const bool forward, const int num_concats, const int concat_size, - const int top_concat_axis, const int bottom_concat_axis, - const int offset_concat_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { + const bool forward, const int num_concats, + const int concat_size, const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) + { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; - const int top_index = concat_index + - (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { @@ -25,9 +32,9 @@ __global__ void Concat(const int nthreads, const Dtype* in_data, } } -template +template void ConcatLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); @@ -37,31 +44,84 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + nthreads, bottom_data, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_concat = program.get_kernel( + CL_KERNEL_SELECT("concat")); + viennacl::ocl::enqueue( + oclk_concat(nthreads, WrapHandle((cl_mem) bottom_data, ctx), + kForward ? 1 : 0, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + +#endif // USE_GREENTEA + } offset_concat_axis += bottom_concat_axis; + } } -template +template void ConcatLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { - if (!propagate_down[i]) { continue; } + if (!propagate_down[i]) { + continue; + } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_concat = program.get_kernel( + CL_KERNEL_SELECT("concat")); + viennacl::ocl::enqueue( + oclk_concat(nthreads, WrapHandle((cl_mem) top_diff, ctx), + kForward ? 1 : 0, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, + WrapHandle((cl_mem) bottom_diff, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); + +#endif // USE_GREENTEA + } + offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 931239316ac..63f014338a9 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -95,7 +95,7 @@ void ContrastiveLossLayer::Backward_gpu(const vector*>& top, const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) - CLLBackward<<>>( + CLLBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index b4e802e13d1..f583063be70 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -89,7 +89,7 @@ void CuDNNConvolutionLayer::Forward_gpu( // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) - sync_conv_groups<<<1, 1>>>(); + sync_conv_groupsCUDA_KERNEL(1, 1)(); } } @@ -150,7 +150,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) - sync_conv_groups<<<1, 1>>>(); + sync_conv_groupsCUDA_KERNEL(1, 1)(); } } diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index f9ea04f4acf..bca3fd46f07 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -32,7 +32,7 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) - DropoutForward<<>>( + DropoutForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, mask, uint_thres_, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { @@ -61,8 +61,8 @@ void DropoutLayer::Backward_gpu(const vector*>& top, static_cast(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) - DropoutBackward<<>>( + DropoutBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 2247870d97f..799db672c70 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -55,11 +55,11 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) - MaxForward <<>>( + MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) - MaxForward<<>>( + MaxForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; @@ -120,7 +120,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( + CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, i, mask, bottom_diff); break; default: diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 001b3c34ac1..983072afd61 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -88,13 +88,13 @@ void LRNLayer::CrossChannelForward_gpu( // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) - LRNFillScale<<>>( + LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) - LRNComputeOutput<<>>( + LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; } @@ -183,7 +183,7 @@ void LRNLayer::CrossChannelBackward_gpu( const vector*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) - LRNComputeDiff<<>>( + LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index e1f20048f60..e4ea647967e 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -55,7 +55,7 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, } // NOLINT_NEXT_LINE(whitespace/operators) - PReLUForward<<>>( + PReLUForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, channels, dim, bottom_data, top_data, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } @@ -86,8 +86,8 @@ void PReLULayer::Backward_gpu(const vector*>& top, for (int n = 0; n < bottom[0]->num(); ++n) { // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackward<<>>( + PReLUParamBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(cdim), + CAFFE_CUDA_NUM_THREADS)( cdim, top_diff + top[0]->offset(n), bottom_data + bottom[0]->offset(n), backward_buff_.mutable_gpu_diff()); @@ -113,8 +113,8 @@ void PReLULayer::Backward_gpu(const vector*>& top, const Dtype* slope_data = this->blobs_[0]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) - PReLUBackward<<>>( + PReLUBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 547fa80c72f..603ce4770ce 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -6,32 +6,53 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (propagate_down[1]) { - LOG(FATAL) << this->type() - << " Layer cannot backpropagate to label inputs."; + LOG(FATAL)<< this->type() + << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - // First, compute the diff const int count = bottom[0]->count(); const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - caffe_copy(count, sigmoid_output_data, bottom_diff); - caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); - // Scale down gradient - const Dtype loss_weight = top[0]->cpu_diff()[0]; - caffe_gpu_scal(count, loss_weight / num, bottom_diff); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // First, compute the diff + caffe_copy(count, sigmoid_output_data, bottom_diff); + caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); + // Scale down gradient + const Dtype loss_weight = top[0]->cpu_diff()[0]; + caffe_gpu_scal(count, loss_weight / num, bottom_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + // First, compute the diff + greentea_copy(count, (cl_mem)sigmoid_output_data, (cl_mem)bottom_diff, ctx); + greentea_gpu_axpy(this->device_context_.id(), count, Dtype(-1), (cl_mem)target,0, (cl_mem)bottom_diff,0); + // Scale down gradient + const Dtype loss_weight = top[0]->cpu_diff()[0]; + greentea_gpu_scal(this->device_context_.id(), count, loss_weight / num, (cl_mem)bottom_diff,0); +#endif // USE_GREENTEA + } } } INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); - } // namespace caffe diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index e1af0657ec1..a5c3878cddc 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -21,7 +21,7 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) - SigmoidForward<<>>( + SigmoidForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " @@ -50,7 +50,7 @@ void SigmoidLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) - SigmoidBackward<<>>( + SigmoidBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 796841d3f52..1e1bd5f93da 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -38,7 +38,7 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( + CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( nthreads, bottom_data, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; @@ -59,7 +59,7 @@ void SliceLayer::Backward_gpu(const vector*>& top, const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( + CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( nthreads, top_diff, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); offset_slice_axis += top_slice_axis; diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index ccd6e63ee7c..7c16127759c 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -23,7 +23,7 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) - TanHForward<<>>( + TanHForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } @@ -47,7 +47,7 @@ void TanHLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) - TanHBackward<<>>( + TanHBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } diff --git a/src/caffe/layers/threshold_layer.cu b/src/caffe/layers/threshold_layer.cu index bfa7f159460..05ca1a60ff3 100644 --- a/src/caffe/layers/threshold_layer.cu +++ b/src/caffe/layers/threshold_layer.cu @@ -4,30 +4,58 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +#ifdef USE_CUDA +template __global__ void ThresholdForward(const int n, const Dtype threshold, - const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) { + const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) + { out[index] = in[index] > threshold ? 1 : 0; } } +#endif -template +template void ThresholdLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - ThresholdForward<<>>( - count, threshold_, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK; -} + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + ThresholdForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, threshold_, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_threshold = program.get_kernel( + CL_KERNEL_SELECT("threshold")); + viennacl::ocl::enqueue( + oclk_threshold(count, threshold_, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); + ctx.get_queue().finish(); +#endif // USE_GREENTEA + } + +} INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); - } // namespace caffe diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index a45c8896927..26c745f3e6c 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -156,9 +156,7 @@ void Solver::InitTestNets() { } template -void Solver::StepPrefilled() { - // Prefilled stepping can only do one at a time because the memory layer has to be refilled - int iters = 1; +void Solver::Step(int iters) { vector*> bottom_vec; const int start_iter = iter_; const int stop_iter = iter_ + iters; @@ -166,83 +164,49 @@ void Solver::StepPrefilled() { vector losses; Dtype smoothed_loss = 0; - for (; iter_ < stop_iter; ++iter_) { - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization())) { - // Currently can't do testing with this solver method - //TestAll(); - } - - const bool display = param_.display() && iter_ % param_.display() == 0; - net_->set_debug_info(display && param_.debug_info()); - Dtype loss; - net_->ForwardPrefilled(&loss); - net_->Backward(); - if (losses.size() < average_loss) { - losses.push_back(loss); - int size = losses.size(); - smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; - } else { - int idx = (iter_ - start_iter) % average_loss; - smoothed_loss += (loss - losses[idx]) / average_loss; - losses[idx] = loss; - } - if (display) { - LOG(INFO)<< "Iteration " << iter_ << ", loss = " << smoothed_loss; - const vector*>& result = net_->output_blobs(); - int score_index = 0; - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - const string& output_name = - net_->blob_names()[net_->output_blob_indices()[j]]; - const Dtype loss_weight = - net_->blob_loss_weights()[net_->output_blob_indices()[j]]; - for (int k = 0; k < result[j]->count(); ++k) { - ostringstream loss_msg_stream; - if (loss_weight) { - loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * result_vec[k] << " loss)"; + while (iter_ < stop_iter) { + // zero-init the params + for (int i = 0; i < net_->params().size(); ++i) { + shared_ptr > blob = net_->params()[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + if (blob->device_context().backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_set(blob->device_context().id(),blob->count(), static_cast(0), + (cl_mem)(blob->mutable_gpu_diff()),0); +#endif // USE_GREENTEA } - LOG(INFO) << " Train net output #" - << score_index++ << ": " << output_name << " = " - << result_vec[k] << loss_msg_stream.str(); - } +#else + NO_GPU; +#endif + break; } } - ApplyUpdate(); - - // Increment the internal iter_ counter -- its value should always indicate - // the number of times the weights have been updated. - ++iter_; - - // Save a snapshot if needed. - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { - Snapshot(); - } - } -} -template -void Solver::Step(int iters) { - vector*> bottom_vec; - const int start_iter = iter_; - const int stop_iter = iter_ + iters; - int average_loss = this->param_.average_loss(); - vector losses; - Dtype smoothed_loss = 0; - - for (; iter_ < stop_iter; ++iter_) { if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { - // Currently can't do testing with this solver method - //TestAll(); + TestAll(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - Dtype loss; - net_->ForwardPrefilled(&loss); - net_->Backward(); + // accumulate the loss and gradient + Dtype loss = 0; + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(bottom_vec); + } + loss /= param_.iter_size(); + // average the loss across iterations for smoothed reporting if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); @@ -281,7 +245,7 @@ void Solver::Step(int iters) { ++iter_; // Save a snapshot if needed. - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { Snapshot(); } } From 83a92ff3fffc54de3afd2282809badd2560a0185 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 9 Jun 2015 20:10:09 +0200 Subject: [PATCH 043/254] Finished hybrid OpenCL/CBLAS backend, fixed bnll layer for OpenCL. --- Greentea_Building_Blocks.txt | 1 - src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/bnll.cl | 4 +- .../greentea/greentea_math_functions.cpp | 482 +++++++++++------- src/caffe/layers/conv_sk_layer.cu | 126 ++--- src/caffe/layers/pooling_sk_layer.cu | 5 - 6 files changed, 322 insertions(+), 300 deletions(-) diff --git a/Greentea_Building_Blocks.txt b/Greentea_Building_Blocks.txt index 24d0c498d64..9b42db9674f 100644 --- a/Greentea_Building_Blocks.txt +++ b/Greentea_Building_Blocks.txt @@ -23,5 +23,4 @@ if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" -#include "caffe/greentea/greentea_im2col.hpp" #endif diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 295093735c3..a3a41af396d 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -8,7 +8,7 @@ std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __gl std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; -std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n float kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; +std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; @@ -22,7 +22,7 @@ std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; -std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n float kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; +std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; diff --git a/src/caffe/greentea/cl_kernels/bnll.cl b/src/caffe/greentea/cl_kernels/bnll.cl index a7f95b28b9d..99e0094d41e 100644 --- a/src/caffe/greentea/cl_kernels/bnll.cl +++ b/src/caffe/greentea/cl_kernels/bnll.cl @@ -16,9 +16,9 @@ __kernel void TEMPLATE(bnll_backward,Dtype)(const int n, __global const Dtype* in_diff, __global const Dtype* in_data, __global Dtype* out_diff) { - float kBNLL_THRESHOLD = 50.; + Dtype kBNLL_THRESHOLD = 50.; for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); + Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 59597dbe07d..2906f388ac9 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -25,6 +25,8 @@ #include "viennacl/ocl/platform.hpp" #include "viennacl/ocl/backend.hpp" +#include "caffe/util/math_functions.hpp" + #include #include #include @@ -50,8 +52,9 @@ void greentea_memset(const int ctx_id, const size_t N, const int alpha, typedef float Dtype; viennacl::ocl::kernel &oclk_fill = program.get_kernel( CL_KERNEL_SELECT("fill")); - viennacl::ocl::enqueue(oclk_fill(int(N/sizeof(Dtype)), Dtype(alpha), WrapHandle(X, ctx), offX), - ctx.get_queue()); + viennacl::ocl::enqueue( + oclk_fill(int(N / sizeof(Dtype)), Dtype(alpha), WrapHandle(X, ctx), offX), + ctx.get_queue()); ctx.get_queue().finish(); } @@ -116,56 +119,72 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const int offA, const cl_mem B, const int offB, const Dtype beta, cl_mem C, const int offC) { - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; - int ldc = N; + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); -#ifdef USE_VIENNACLBLAS + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // Make sure the OpenCL queue is empty before using CBLAS + ctx.get_queue().finish(); + Dtype *Aptr, *Bptr, *Cptr; + clGetMemObjectInfo(A, CL_MEM_HOST_PTR, sizeof(Dtype*), &Aptr, NULL); + clGetMemObjectInfo(B, CL_MEM_HOST_PTR, sizeof(Dtype*), &Bptr, NULL); + clGetMemObjectInfo(C, CL_MEM_HOST_PTR, sizeof(Dtype*), &Cptr, NULL); - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + caffe_cpu_gemm(TransA, TransB, M, N, K, alpha, Aptr + offA, + Bptr + offB, beta, Cptr + offC); + } else { - ViennaCLTranspose vclTransA = - (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; - ViennaCLTranspose vclTransB = - (TransB == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + int ldc = N; - ViennaCLOrder vclOrderA = ViennaCLRowMajor; - ViennaCLOrder vclOrderB = ViennaCLRowMajor; - ViennaCLOrder vclOrderC = ViennaCLRowMajor; +#ifdef USE_VIENNACLBLAS - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, - vclOrderC, M, N, K, alpha, A, 0, offA, 1, 1, lda, B, - 0, offB, 1, 1, ldb, beta, C, 0, offC, 1, 1, ldc)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, vclTransB, - vclOrderC, M, N, K, alpha, A, 0, offA, 1, 1, lda, B, - 0, offB, 1, 1, ldb, beta, C, 0, offC, 1, 1, ldc)); - } + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, + static_cast(ctx_id)); + + ViennaCLTranspose vclTransA = + (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + ViennaCLTranspose vclTransB = + (TransB == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + + ViennaCLOrder vclOrderA = ViennaCLRowMajor; + ViennaCLOrder vclOrderB = ViennaCLRowMajor; + ViennaCLOrder vclOrderC = ViennaCLRowMajor; + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, + vclTransB, vclOrderC, M, N, K, alpha, A, 0, offA, + 1, 1, lda, B, 0, offB, 1, 1, ldb, beta, C, 0, + offC, 1, 1, ldc)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, + vclTransB, vclOrderC, M, N, K, alpha, A, 0, offA, + 1, 1, lda, B, 0, offB, 1, 1, ldb, beta, C, 0, + offC, 1, 1, ldc)); + } #endif #ifdef USE_CLBLAS - clblasOrder clOrder = clblasRowMajor; - clblasTranspose clTransA = - (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; - clblasTranspose clTransB = - (TransB == CblasNoTrans) ? clblasNoTrans : clblasTrans; - - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); - - if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK( - clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); - } else { - GREENTEA_CL_BLAS_CHECK( - clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); - } + clblasOrder clOrder = clblasRowMajor; + clblasTranspose clTransA = + (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; + clblasTranspose clTransB = + (TransB == CblasNoTrans) ? clblasNoTrans : clblasTrans; + + cl_command_queue queue = ctx.get_queue().handle().get(); + + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); + } #endif - + } } template void greentea_gpu_gemm(const int ctx_id, @@ -192,45 +211,60 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, const int offx, const Dtype beta, cl_mem y, const int offy) { - int lda = (TransA == CblasNoTrans) ? N : M; + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); -#ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); - - ViennaCLOrder vclOrder = ViennaCLRowMajor; - ViennaCLTranspose vclTransA = - (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; - - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSgemv(backend, vclOrder, vclTransA, M, N, alpha, A, offA, - 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // Make sure the OpenCL queue is empty before using CBLAS + ctx.get_queue().finish(); + Dtype *Aptr, *xptr, *yptr; + clGetMemObjectInfo(A, CL_MEM_HOST_PTR, sizeof(Dtype*), &Aptr, NULL); + clGetMemObjectInfo(x, CL_MEM_HOST_PTR, sizeof(Dtype*), &xptr, NULL); + clGetMemObjectInfo(y, CL_MEM_HOST_PTR, sizeof(Dtype*), &yptr, NULL); + + caffe_cpu_gemv(TransA, M, N, alpha, Aptr + offA, xptr + offx, beta, + yptr + offy); } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDgemv(backend, vclOrder, vclTransA, M, N, alpha, A, offA, - 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); - } + + int lda = (TransA == CblasNoTrans) ? N : M; + +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, + static_cast(ctx_id)); + + ViennaCLOrder vclOrder = ViennaCLRowMajor; + ViennaCLTranspose vclTransA = + (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSgemv(backend, vclOrder, vclTransA, M, N, alpha, A, + offA, 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDgemv(backend, vclOrder, vclTransA, M, N, alpha, A, + offA, 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); + } #endif #ifdef USE_CLBLAS - clblasOrder clOrder = clblasRowMajor; - clblasTranspose clTransA = - (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; + clblasOrder clOrder = clblasRowMajor; + clblasTranspose clTransA = + (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); + cl_command_queue queue = ctx.get_queue().handle().get(); - if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK( - clblasSgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); - } else { - GREENTEA_CL_BLAS_CHECK( - clblasDgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); - } + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + } #endif + } } template void greentea_gpu_gemv(const int ctx_id, @@ -253,33 +287,46 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const cl_mem X, const int offX, cl_mem Y, const int offY) { -#ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // Make sure the OpenCL queue is empty before using CBLAS + ctx.get_queue().finish(); + Dtype *Xptr, *Yptr; + clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); + clGetMemObjectInfo(Y, CL_MEM_HOST_PTR, sizeof(Dtype*), &Yptr, NULL); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); + caffe_axpy(N, alpha, Xptr + offX, Yptr + offY); } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); - } + +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, + static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); + } #endif #ifdef USE_CLBLAS - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); + cl_command_queue queue = ctx.get_queue().handle().get(); - if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK( - clblasSaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - } else { - GREENTEA_CL_BLAS_CHECK( - clblasDaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - } + } #endif + } } template void greentea_gpu_axpy(const int ctx_id, const int N, @@ -340,29 +387,41 @@ template void greentea_gpu_div(const int ctx_id, const int N, template void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, cl_mem x, int offx) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); -#ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // Make sure the OpenCL queue is empty before using CBLAS + ctx.get_queue().finish(); + Dtype *xptr; + clGetMemObjectInfo(x, CL_MEM_HOST_PTR, sizeof(Dtype*), &xptr, NULL); + caffe_scal(N, alpha, xptr + offx); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSscal(backend, N, alpha, x, offx, 1)); } else { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDscal(backend, N, alpha, x, offx, 1)); - } +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, + static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSscal(backend, N, alpha, x, offx, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDscal(backend, N, alpha, x, offx, 1)); + } #endif #ifdef USE_CLBLAS - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); + cl_command_queue queue = ctx.get_queue().handle().get(); - if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK(clblasSscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); - } else { - GREENTEA_CL_BLAS_CHECK(clblasDscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); - } + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK(clblasSscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK(clblasDscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); + } #endif + } } template void greentea_gpu_scal(const int ctx_id, const int N, @@ -394,46 +453,58 @@ template void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, const int offX, const cl_mem Y, const int offY, Dtype* out) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); -#ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // Make sure the OpenCL queue is empty before using CBLAS + ctx.get_queue().finish(); + Dtype *Xptr, *Yptr; + clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); + clGetMemObjectInfo(Y, CL_MEM_HOST_PTR, sizeof(Dtype*), &Yptr, NULL); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSdot(backend, n, out, X, offX, 1, Y, offY, 1)); + *out = caffe_cpu_dot(n, Xptr + offX, Yptr + offY); } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDdot(backend, n, out, X, offX, 1, Y, offY, 1)); - } + +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, + static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSdot(backend, n, out, X, offX, 1, Y, offY, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDdot(backend, n, out, X, offX, 1, Y, offY, 1)); + } #endif #ifdef USE_CLBLAS - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); - - cl_int err; - cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - sizeof(Dtype), NULL, &err); - cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - n * sizeof(Dtype), NULL, &err); - - if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK( - clblasSdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); - } else { - GREENTEA_CL_BLAS_CHECK( - clblasDdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); - } + cl_command_queue queue = ctx.get_queue().handle().get(); - greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, out, ctx); + cl_int err; + cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + sizeof(Dtype), NULL, &err); + cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + n * sizeof(Dtype), NULL, &err); - ctx.get_queue().finish(); - clReleaseMemObject(gpuout); - clReleaseMemObject(scratch); + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); + } + + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, out, ctx); + + ctx.get_queue().finish(); + clReleaseMemObject(gpuout); + clReleaseMemObject(scratch); #endif + } } template void greentea_gpu_dot(const int ctx_id, const int n, @@ -448,43 +519,53 @@ template void greentea_gpu_dot(const int ctx_id, const int n, template void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, const int offX, Dtype* Y) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); -#ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // Make sure the OpenCL queue is empty before using CBLAS + ctx.get_queue().finish(); + Dtype* Xptr; + clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); + *Y = caffe_cpu_asum(n, Xptr + offX); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSasum(backend, n, Y, X, offX, 1)); } else { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDasum(backend, n, Y, X, offX, 1)); - } +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, + static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSasum(backend, n, Y, X, offX, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDasum(backend, n, Y, X, offX, 1)); + } #endif #ifdef USE_CLBLAS - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); - - cl_int err; - cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - sizeof(Dtype), NULL, &err); - cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - n * sizeof(Dtype), NULL, &err); - - if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK( - clblasSasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); - } else { - GREENTEA_CL_BLAS_CHECK( - clblasDasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); - } - - greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, Y, ctx); - - ctx.get_queue().finish(); - clReleaseMemObject(gpuout); - clReleaseMemObject(scratch); + cl_command_queue queue = ctx.get_queue().handle().get(); + + cl_int err; + cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + sizeof(Dtype), NULL, &err); + cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, + n * sizeof(Dtype), NULL, &err); + + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasSasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); + } + + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, Y, ctx); + + ctx.get_queue().finish(); + clReleaseMemObject(gpuout); + clReleaseMemObject(scratch); #endif + } } template void greentea_gpu_asum(const int ctx_id, const int n, @@ -498,38 +579,51 @@ template void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, const cl_mem X, const int offX, cl_mem Y, const int offY) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // Make sure the OpenCL queue is empty before using CBLAS + ctx.get_queue().finish(); + Dtype *Xptr, *Yptr; + clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); + clGetMemObjectInfo(Y, CL_MEM_HOST_PTR, sizeof(Dtype*), &Yptr, NULL); + caffe_cpu_scale(n, alpha, Xptr + offX, Yptr + offY); -#ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); - - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLScopy(backend, n, X, offX, 1, Y, offY, 1)); - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSscal(backend, n, alpha, X, offX, 1)); } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDcopy(backend, n, X, offX, 1, Y, offY, 1)); - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDscal(backend, n, alpha, X, offX, 1)); - } +#ifdef USE_VIENNACLBLAS + ViennaCLBackend backend; + ViennaCLBackendCreate(&backend); + ViennaCLBackendSetOpenCLContextID(backend, + static_cast(ctx_id)); + + if (std::is_same::value) { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLScopy(backend, n, X, offX, 1, Y, offY, 1)); + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSscal(backend, n, alpha, X, offX, 1)); + } else { + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDcopy(backend, n, X, offX, 1, Y, offY, 1)); + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDscal(backend, n, alpha, X, offX, 1)); + } #endif #ifdef USE_CLBLAS - viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); - cl_command_queue queue = ctx.get_queue().handle().get(); - - if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK( - clblasScopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - GREENTEA_CL_BLAS_CHECK(clblasSscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); - } else { - GREENTEA_CL_BLAS_CHECK( - clblasDcopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - GREENTEA_CL_BLAS_CHECK(clblasDscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); - } + viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); + cl_command_queue queue = ctx.get_queue().handle().get(); + + if (std::is_same::value) { + GREENTEA_CL_BLAS_CHECK( + clblasScopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasSscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); + } else { + GREENTEA_CL_BLAS_CHECK( + clblasDcopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasDscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); + } #endif - + } } template void greentea_gpu_scale(const int ctx_id, const int n, diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index b765481ff16..a94a2776d8a 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -67,15 +67,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); - Dtype* top_data_cpu; - Dtype* col_data_cpu; - const Dtype* weight_cpu; - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - top_data_cpu = top[i]->mutable_cpu_data(); - col_data_cpu = col_buffer_.mutable_cpu_data(); - weight_cpu = this->blobs_[0]->cpu_data(); - } - int weight_offset = M_ * K_; int col_offset = K_ * N_; int top_offset = M_ * N_; @@ -88,41 +79,23 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); - ctx.get_queue().finish(); // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - caffe_cpu_gemm( - CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., - weight_cpu + weight_offset * g, col_data_cpu + col_offset * g, - (Dtype) 0., top_data_cpu + top[i]->offset(n) + top_offset * g); - } else { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, M_, N_, K_, (Dtype) 1., - weight, weight_offset * g, col_data, - col_offset * g, (Dtype) 0., top_data, - top[i]->offset(n) + top_offset * g); - } - ctx.get_queue().finish(); + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, + weight_offset * g, col_data, col_offset * g, + (Dtype) 0., top_data, + top[i]->offset(n) + top_offset * g); } // Third, add bias if (bias_term_) { - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, N_, - 1, (Dtype) 1., this->blobs_[1]->cpu_data(), - bias_multiplier_.cpu_data(), (Dtype) 1., - top_data_cpu + top[i]->offset(n)); - } else { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, num_output_, N_, 1, - (Dtype) 1., - (cl_mem) (this->blobs_[1]->gpu_data()), 0, - (cl_mem) (bias_multiplier_.gpu_data()), 0, - (Dtype) 1., top_data, top[i]->offset(n)); - } - ctx.get_queue().finish(); + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num_output_, N_, 1, (Dtype) 1., + (cl_mem) (this->blobs_[1]->gpu_data()), 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., top_data, top[i]->offset(n)); } } } @@ -213,8 +186,6 @@ void ConvolutionSKLayer::Backward_gpu( viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_.id()); - const Dtype* weight_cpu = NULL; - Dtype* weight_diff_cpu = NULL; cl_mem weight = NULL; cl_mem weight_diff = NULL; @@ -223,10 +194,6 @@ void ConvolutionSKLayer::Backward_gpu( weight_diff = (cl_mem) (this->blobs_[0]->mutable_gpu_diff()); greentea_gpu_set(this->device_context_.id(), this->blobs_[0]->count(), Dtype(0), weight_diff, 0); - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - weight_cpu = this->blobs_[0]->cpu_data(); - weight_diff_cpu = this->blobs_[0]->mutable_cpu_diff(); - } } cl_mem bias_diff = NULL; @@ -240,14 +207,10 @@ void ConvolutionSKLayer::Backward_gpu( const int col_offset = K_ * N_; const int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff_cpu = NULL; cl_mem top_diff = NULL; // Bias gradient, if necessary. if (bias_term_ && this->param_propagate_down_[1]) { top_diff = (cl_mem) (top[i]->gpu_diff()); - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - top_diff_cpu = top[i]->cpu_diff(); - } for (int n = 0; n < num_; ++n) { greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num_output_, N_, (Dtype) 1., top_diff, @@ -259,76 +222,47 @@ void ConvolutionSKLayer::Backward_gpu( if (this->param_propagate_down_[0] || propagate_down[i]) { if (!top_diff) { top_diff = (cl_mem) (top[i]->gpu_diff()); - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - top_diff_cpu = top[i]->cpu_diff(); - } } cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); cl_mem col_diff = (cl_mem) (col_buffer_.mutable_gpu_diff()); const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem bottom_diff = (cl_mem) (bottom[i]->mutable_gpu_diff()); - Dtype* col_data_cpu = NULL; - Dtype* col_diff_cpu = NULL; - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - col_data_cpu = col_buffer_.mutable_cpu_data(); - col_diff_cpu = col_buffer_.mutable_cpu_diff(); - } - for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. greentea_im2col_sk_gpu(program, ctx, bottom_data, - bottom[i]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, kstride_h_, kstride_w_, - col_data); - - ctx.get_queue().finish(); + bottom[i]->offset(n), channels_, + height_, width_, kernel_h_, kernel_w_, + pad_h_, pad_w_, stride_h_, stride_w_, + kstride_h_, kstride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - caffe_cpu_gemm( - CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., - top_diff_cpu + top[i]->offset(n) + top_offset * g, - col_data_cpu + col_offset * g, (Dtype) 1., - weight_diff_cpu + weight_offset * g); - } else { - greentea_gpu_gemm(this->device_context_.id(), - CblasNoTrans, CblasTrans, M_, K_, N_, - (Dtype) 1., top_diff, - top[i]->offset(n) + top_offset * g, - col_data, col_offset * g, (Dtype) 1., - weight_diff, weight_offset * g); - } + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasTrans, M_, K_, N_, (Dtype) 1., + top_diff, + top[i]->offset(n) + top_offset * g, + col_data, col_offset * g, (Dtype) 1., + weight_diff, weight_offset * g); } } // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - caffe_cpu_gemm( - CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., - weight_cpu + weight_offset * g, - top_diff_cpu + top[i]->offset(n) + top_offset * g, - (Dtype) 0., col_diff_cpu + col_offset * g); - } else { - greentea_gpu_gemm(this->device_context_.id(), CblasTrans, - CblasNoTrans, K_, N_, M_, (Dtype) 1., - weight, weight_offset * g, top_diff, - top[i]->offset(n) + top_offset * g, - (Dtype) 0., col_diff, col_offset * g); - } + greentea_gpu_gemm(this->device_context_.id(), CblasTrans, + CblasNoTrans, K_, N_, M_, (Dtype) 1., + weight, weight_offset * g, top_diff, + top[i]->offset(n) + top_offset * g, + (Dtype) 0., col_diff, col_offset * g); } // col2im back to the data - greentea_col2im_sk_gpu(program, ctx, col_diff, channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, kstride_h_, kstride_w_, - bottom_diff, bottom[i]->offset(n)); - - ctx.get_queue().finish(); + greentea_col2im_sk_gpu(program, ctx, col_diff, channels_, + height_, width_, kernel_h_, kernel_w_, + pad_h_, pad_w_, stride_h_, stride_w_, + kstride_h_, kstride_w_, bottom_diff, + bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index ea8abadb056..12666a8c596 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -276,7 +276,6 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, WrapHandle((cl_mem) mask, ctx), WrapHandle((cl_mem) top_mask, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; case PoolingParameter_PoolMethod_AVE: { @@ -289,7 +288,6 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, stride_h_, stride_w_, kstride_h_, kstride_w_, pad_h_, pad_w_, WrapHandle((cl_mem)top_data,ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; case PoolingParameter_PoolMethod_STOCHASTIC: { @@ -307,7 +305,6 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, stride_h_, stride_w_, kstride_h_, kstride_w_, WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()),ctx), WrapHandle((cl_mem)(top_data),ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } else { viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( CL_KERNEL_SELECT("sto_pool_forward_test_sk")); @@ -317,7 +314,6 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, WrapHandle((cl_mem)top_data,ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } } break; @@ -460,7 +456,6 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, pad_h_, pad_w_, WrapHandle((cl_mem) bottom_diff, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; default: From 1e26f2653daac1eb8f1b8f301bf520ce80eb3582 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 10 Jun 2015 16:38:48 +0200 Subject: [PATCH 044/254] Fixed bugs with DType/Dtype. --- Greentea_Building_Blocks.txt | 1 - .../greentea/greentea_math_functions.hpp | 3 + src/caffe/greentea/cl_kernels.cpp | 12 +- .../greentea/cl_kernels/contrastive_loss.cl | 33 +++++ src/caffe/greentea/cl_kernels/dropout.cl | 24 ++++ src/caffe/greentea/cl_kernels/pooling.cl | 4 +- .../greentea/greentea_math_functions.cpp | 30 +++- src/caffe/layers/bnll_layer.cu | 2 - src/caffe/layers/concat_layer.cu | 6 +- src/caffe/layers/contrastive_loss_layer.cu | 134 ++++++++++++------ src/caffe/layers/dropout_layer.cu | 131 ++++++++++++----- src/caffe/layers/softmax_layer.cu | 18 +-- src/caffe/layers/softmax_loss_layer.cu | 2 - src/caffe/layers/split_layer.cu | 61 ++++++-- src/caffe/util/math_functions.cu | 42 +++--- 15 files changed, 363 insertions(+), 140 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/contrastive_loss.cl create mode 100644 src/caffe/greentea/cl_kernels/dropout.cl diff --git a/Greentea_Building_Blocks.txt b/Greentea_Building_Blocks.txt index 9b42db9674f..6d632273211 100644 --- a/Greentea_Building_Blocks.txt +++ b/Greentea_Building_Blocks.txt @@ -10,7 +10,6 @@ viennacl::ocl::kernel &oclk_kernel = program.get_kernel( viennacl::ocl::enqueue( oclk_kernel(WrapHandle((cl_mem) data, ctx)), ctx.get_queue()); -ctx.get_queue().finish(); if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 970799fd5cc..a5c954b4d18 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -129,6 +129,9 @@ template void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, const Dtype b, cl_mem r, const int offr); +void greentea_gpu_rng_uniform(const int ctx_id, const int n, cl_mem r, + int offr); + template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, const Dtype sigma, cl_mem r, const int offr); diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index a3a41af396d..e7129d66aa2 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -11,13 +11,15 @@ std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\ std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; +std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; +std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const float scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const float scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; @@ -25,13 +27,15 @@ std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; +std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; +std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const float scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const float scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,DType)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { @@ -43,7 +47,9 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << bnll_float << "\n\n"; ss << channel_float << "\n\n"; ss << concat_float << "\n\n"; + ss << contrastive_loss_float << "\n\n"; ss << convolution_sk_float << "\n\n"; + ss << dropout_float << "\n\n"; ss << fillbuffer_float << "\n\n"; ss << im2col_float << "\n\n"; ss << im2col_sk_float << "\n\n"; @@ -61,7 +67,9 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << bnll_double << "\n\n"; ss << channel_double << "\n\n"; ss << concat_double << "\n\n"; + ss << contrastive_loss_double << "\n\n"; ss << convolution_sk_double << "\n\n"; + ss << dropout_double << "\n\n"; ss << fillbuffer_double << "\n\n"; ss << im2col_double << "\n\n"; ss << im2col_sk_double << "\n\n"; diff --git a/src/caffe/greentea/cl_kernels/contrastive_loss.cl b/src/caffe/greentea/cl_kernels/contrastive_loss.cl new file mode 100644 index 00000000000..fd47c607020 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/contrastive_loss.cl @@ -0,0 +1,33 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, + const Dtype margin, const int legacy_version, + const Dtype alpha, __global const Dtype* y, + __global const Dtype* diff, __global const Dtype* dist_sq, + __global Dtype *bottom_diff) { + for (int i = get_global_id(0); i < count; + i += get_global_size(0)) { + int n = i / channels; // the num index, to access y and dist_sq + if ((int)(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist = 0.0; + Dtype beta = 0.0; + if (legacy_version == 1) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + 1e-4) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} diff --git a/src/caffe/greentea/cl_kernels/dropout.cl b/src/caffe/greentea/cl_kernels/dropout.cl new file mode 100644 index 00000000000..9295892cadc --- /dev/null +++ b/src/caffe/greentea/cl_kernels/dropout.cl @@ -0,0 +1,24 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, + __global const Dtype* in, + __global const unsigned int* mask, + const unsigned int threshold, + const float scale, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] * (mask[index] > threshold) * scale; + } +} + +__kernel void TEMPLATE(dropout_backward,Dtype)( + const int n, __global const Dtype* in_diff, + __global const unsigned int* mask, const unsigned int threshold, + const float scale, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index 9be07fc3838..041d8ded4a3 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -78,7 +78,7 @@ __kernel void TEMPLATE(ave_pool_forward,Dtype)( } } -__kernel void TEMPLATE(sto_pool_forward_train,DType)( +__kernel void TEMPLATE(sto_pool_forward_train,Dtype)( const int nthreads, __global const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, @@ -120,7 +120,7 @@ __kernel void TEMPLATE(sto_pool_forward_train,DType)( } } -__kernel void TEMPLATE(sto_pool_forward_test,DType)( +__kernel void TEMPLATE(sto_pool_forward_test,Dtype)( const int nthreads, __global const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 2906f388ac9..adda60820ca 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include "caffe/common.hpp" @@ -837,8 +838,29 @@ template void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, cl_mem y, const int offy); -void greentea_gpu_rng_uniform(const int n, unsigned int* r) { - CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); +void greentea_gpu_rng_uniform(const int ctx_id, const int n, cl_mem r, + int offr) { + + struct timeval start_time; + gettimeofday(&start_time, NULL); +#ifdef __APPLE__ + std::seed_seq seq {(int)(start_time.tv_sec), (int)(start_time.tv_usec)}; +#else + std::seed_seq seq { start_time.tv_sec, start_time.tv_usec }; +#endif + std::mt19937_64 generator(seq); + std::uniform_int_distribution distribution(0, UINT32_MAX); + std::function rndfunc = std::bind(distribution, generator); + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + + std::vector random(n); + + for (int i = 0; i < n; ++i) { + random[i] = rndfunc(); + } + + greentea_gpu_memcpy(sizeof(unsigned int) * n, &random[0], r, offr, ctx); } template @@ -864,7 +886,7 @@ void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, random[i] = rndfunc(); } - greentea_gpu_memcpy(n, &random[0], r, offr, ctx); + greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, ctx); } template void greentea_gpu_rng_uniform(const int ctx_id, const int n, @@ -897,7 +919,7 @@ void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, random[i] = rndfunc(); } - greentea_gpu_memcpy(n, &random[0], r, offr, ctx); + greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, ctx); } template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index b9cc50dd976..07954b63f98 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -53,7 +53,6 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, oclk_bnll(count, WrapHandle((cl_mem) bottom_data, ctx), WrapHandle((cl_mem) top_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); #endif // USE_GREENTEA } } @@ -102,7 +101,6 @@ void BNLLLayer::Backward_gpu(const vector*>& top, WrapHandle((cl_mem) bottom_data, ctx), WrapHandle((cl_mem) bottom_diff, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); #endif // USE_GREENTEA } } diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 44900e66654..06b58a408c2 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -11,6 +11,7 @@ namespace caffe { +#ifdef USE_CUDA template __global__ void Concat(const int nthreads, const Dtype* in_data, const bool forward, const int num_concats, @@ -31,6 +32,7 @@ __global__ void Concat(const int nthreads, const Dtype* in_data, } } } +#endif // USE_CUDA template void ConcatLayer::Forward_gpu(const vector*>& bottom, @@ -68,8 +70,6 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, top_concat_axis, bottom_concat_axis, offset_concat_axis, WrapHandle((cl_mem) top_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); - #endif // USE_GREENTEA } offset_concat_axis += bottom_concat_axis; @@ -117,8 +117,6 @@ void ConcatLayer::Backward_gpu(const vector*>& top, top_concat_axis, bottom_concat_axis, offset_concat_axis, WrapHandle((cl_mem) bottom_diff, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); - #endif // USE_GREENTEA } diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 63f014338a9..674541333db 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -6,34 +6,52 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template void ContrastiveLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { const int count = bottom[0]->count(); - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[1]->gpu_data(), // b - diff_.mutable_gpu_data()); // a_i-b_i - caffe_gpu_powx( - count, - diff_.mutable_gpu_data(), // a_i-b_i - Dtype(2), - diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2 - caffe_gpu_gemv( - CblasNoTrans, - bottom[0]->num(), - bottom[0]->channels(), - Dtype(1.0), - diff_sq_.gpu_data(), // (a_i-b_i)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_sub(count, bottom[0]->gpu_data(), // a + bottom[1]->gpu_data(), // b + diff_.mutable_gpu_data()); // a_i-b_i + caffe_gpu_powx(count, diff_.mutable_gpu_data(), // a_i-b_i + Dtype(2), diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2 + caffe_gpu_gemv(CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), + Dtype(1.0), + diff_sq_.gpu_data(), // (a_i-b_i)^2 + summer_vec_.gpu_data(), Dtype(0.0), + dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_sub(this->device_context_.id(), count, + (cl_mem) (bottom[0]->gpu_data()), 0, // a + (cl_mem) (bottom[1]->gpu_data()), 0, // b + (cl_mem) (diff_.mutable_gpu_data()), 0); // a_i-b_i + greentea_gpu_powx(this->device_context_.id(), count, + (cl_mem) (diff_.mutable_gpu_data()), 0, // a_i-b_i + Dtype(2), (cl_mem) (diff_sq_.mutable_gpu_data()), 0); // (a_i-b_i)^2 + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), + (cl_mem) (diff_sq_.gpu_data()), + 0, // (a_i-b_i)^2 + (cl_mem) (summer_vec_.gpu_data()), 0, Dtype(0.0), + (cl_mem) (dist_sq_.mutable_gpu_data()), 0); // \Sum (a_i-b_i)^2 +#endif // USE_GREENTEA + } + Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); + bool legacy_version = this->layer_param_.contrastive_loss_param() + .legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs @@ -44,7 +62,7 @@ void ContrastiveLossLayer::Forward_gpu( } else { Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0)); - loss += dist*dist; + loss += dist * dist; } } } @@ -52,12 +70,15 @@ void ContrastiveLossLayer::Forward_gpu( top[0]->mutable_cpu_data()[0] = loss; } -template +#ifdef USE_CUDA +template __global__ void CLLBackward(const int count, const int channels, - const Dtype margin, const bool legacy_version, const Dtype alpha, - const Dtype* y, const Dtype* diff, const Dtype* dist_sq, - Dtype *bottom_diff) { - CUDA_KERNEL_LOOP(i, count) { + const Dtype margin, const bool legacy_version, + const Dtype alpha, const Dtype* y, + const Dtype* diff, const Dtype* dist_sq, + Dtype *bottom_diff) { + CUDA_KERNEL_LOOP(i, count) + { int n = i / channels; // the num index, to access y and dist_sq if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; @@ -80,28 +101,55 @@ __global__ void CLLBackward(const int count, const int channels, } } } +#endif // USE_CUDA -template -void ContrastiveLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { +template +void ContrastiveLossLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - const bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); + const bool legacy_version = this->layer_param_.contrastive_loss_param() + .legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[0]->num()); - // NOLINT_NEXT_LINE(whitespace/operators) - CLLBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, channels, margin, legacy_version, alpha, - bottom[2]->gpu_data(), // pair similarity 0 or 1 - diff_.gpu_data(), // the cached eltwise difference between a and b - dist_sq_.gpu_data(), // the cached square distance between a and b - bottom[i]->mutable_gpu_diff()); - CUDA_POST_KERNEL_CHECK; + const Dtype alpha = sign * top[0]->cpu_diff()[0] + / static_cast(bottom[0]->num()); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + CLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, channels, margin, legacy_version, alpha, + bottom[2]->gpu_data(), // pair similarity 0 or 1 + diff_.gpu_data(),// the cached eltwise difference between a and b + dist_sq_.gpu_data(),// the cached square distance between a and b + bottom[i]->mutable_gpu_diff()); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_cll = program.get_kernel( + CL_KERNEL_SELECT("cll_backward")); + viennacl::ocl::enqueue( + oclk_cll(count, channels, margin, legacy_version ? 1 : 0, alpha, + WrapHandle((cl_mem) (bottom[2]->gpu_data()), ctx), + WrapHandle((cl_mem) (diff_.gpu_data()), ctx), + WrapHandle((cl_mem) (dist_sq_.gpu_data()), ctx), + WrapHandle((cl_mem) (bottom[i]->mutable_gpu_diff()), ctx)), + ctx.get_queue()); + +#endif // USE_GREENTEA + } + } } } diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index bca3fd46f07..34cfcc33fe8 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -10,68 +10,129 @@ namespace caffe { - -template +#ifdef USE_CUDA +template __global__ void DropoutForward(const int n, const Dtype* in, - const unsigned int* mask, const unsigned int threshold, const float scale, - Dtype* out) { - CUDA_KERNEL_LOOP(index, n) { + const unsigned int* mask, + const unsigned int threshold, const float scale, + Dtype* out) { + CUDA_KERNEL_LOOP(index, n) + { out[index] = in[index] * (mask[index] > threshold) * scale; } } +#endif // USE_CUDA -template +template void DropoutLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - if (this->phase_ == TRAIN) { - unsigned int* mask = - static_cast(rand_vec_.mutable_gpu_data()); - caffe_gpu_rng_uniform(count, mask); - // set thresholds - // NOLINT_NEXT_LINE(whitespace/operators) - DropoutForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, mask, uint_thres_, scale_, top_data); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (this->phase_ == TRAIN) { + unsigned int* mask = + static_cast(rand_vec_.mutable_gpu_data()); + caffe_gpu_rng_uniform(count, mask); + // set thresholds + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, mask, uint_thres_, scale_, top_data); + CUDA_POST_KERNEL_CHECK + ; + } else { + caffe_copy(count, bottom_data, top_data); + } +#endif // USE_CUDA } else { - caffe_copy(count, bottom_data, top_data); +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + if (this->phase_ == TRAIN) { + cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data()); + greentea_gpu_rng_uniform(this->device_context_.id(), count, mask, 0); + // set thresholds + viennacl::ocl::kernel &oclk_dropout = program.get_kernel( + CL_KERNEL_SELECT("dropout_forward")); + viennacl::ocl::enqueue( + oclk_dropout(count, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle(mask, ctx), uint_thres_, scale_, + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); + } else { + greentea_copy(count, (cl_mem) bottom_data, (cl_mem) top_data, ctx); + } +#endif // USE_GREENTEA } + } -template +#ifdef USE_CUDA +template __global__ void DropoutBackward(const int n, const Dtype* in_diff, - const unsigned int* mask, const unsigned int threshold, const float scale, - Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) { + const unsigned int* mask, + const unsigned int threshold, const float scale, + Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) + { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } +#endif // USE_CUDA -template +template void DropoutLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (this->phase_ == TRAIN) { - const unsigned int* mask = - static_cast(rand_vec_.gpu_data()); - const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - DropoutBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, top_diff, mask, uint_thres_, scale_, bottom_diff); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (this->phase_ == TRAIN) { + const unsigned int* mask = static_cast(rand_vec_ + .gpu_data()); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, top_diff, mask, uint_thres_, scale_, bottom_diff); + CUDA_POST_KERNEL_CHECK + ; + } else { + caffe_copy(top[0]->count(), top_diff, bottom_diff); + } +#endif // USE_CUDA } else { - caffe_copy(top[0]->count(), top_diff, bottom_diff); +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + if (this->phase_ == TRAIN) { + cl_mem mask = (cl_mem) (rand_vec_.gpu_data()); + const int count = bottom[0]->count(); + viennacl::ocl::kernel &oclk_dropout = program.get_kernel( + CL_KERNEL_SELECT("dropout_backward")); + viennacl::ocl::enqueue( + oclk_dropout(count, WrapHandle((cl_mem) top_diff, ctx), + WrapHandle(mask, ctx), uint_thres_, scale_, + WrapHandle((cl_mem) bottom_diff, ctx)), + ctx.get_queue()); + } else { + greentea_copy(top[0]->count(), (cl_mem) top_diff, (cl_mem) bottom_diff, ctx); + } +#endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); - } // namespace caffe diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 718b72941f6..52621689ee8 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -167,7 +167,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, oclk_channel_max(num, channels, spatial_dim, WrapHandle(top_data, ctx), WrapHandle(scale_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); @@ -176,7 +176,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, WrapHandle(scale_data, ctx), WrapHandle(top_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_exp = program.get_kernel( CL_KERNEL_SELECT("kernel_exp")); @@ -184,7 +184,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, oclk_exp(num * channels * spatial_dim, WrapHandle(top_data, ctx), WrapHandle(top_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_sum")); @@ -192,7 +192,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, oclk_channel_sum(num, channels, spatial_dim, WrapHandle(top_data, ctx), WrapHandle(scale_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_div = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_div")); @@ -201,7 +201,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, WrapHandle(scale_data, ctx), WrapHandle(top_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); + #endif } @@ -248,7 +248,7 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, this->device_context_.id()); greentea_copy(top[0]->count(), top_diff, bottom_diff, ctx); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_dot")); @@ -257,7 +257,7 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, WrapHandle(top_diff, ctx), WrapHandle(top_data, ctx), WrapHandle(scale_data, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); + viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); @@ -266,11 +266,11 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, WrapHandle(scale_data, ctx), WrapHandle(bottom_diff, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); + greentea_gpu_mul(this->device_context_.id(), top[0]->count(), bottom_diff, 0, top_data, 0, bottom_diff, 0); - ctx.get_queue().finish(); + #endif } } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 335aabdc492..09e8f955929 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -101,7 +101,6 @@ void SoftmaxWithLossLayer::Forward_gpu( spatial_dim, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); Dtype loss; @@ -209,7 +208,6 @@ void SoftmaxWithLossLayer::Backward_gpu( viennacl::ocl::enqueue( oclk_softmax_loss_backward(nthreads, WrapHandle(top_data,ctx), WrapHandle(label,ctx), WrapHandle(bottom_diff,ctx), num, dim, spatial_dim, has_ignore_label_?1:0, ignore_label_, WrapHandle(counts,ctx)), ctx.get_queue()); - ctx.get_queue().finish(); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index a4f5df26452..0e6833b8e28 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -6,33 +6,64 @@ namespace caffe { -template +template void SplitLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { for (int i = 0; i < top.size(); ++i) { top[i]->ShareData(*bottom[0]); } } -template +template void SplitLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } - if (top.size() == 1) { - caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { return; } - caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), - bottom[0]->mutable_gpu_diff()); - // Add remaining top blob diffs. - for (int i = 2; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->gpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (top.size() == 1) { + caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); + return; + } + caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), + bottom[0]->mutable_gpu_diff()); + // Add remaining top blob diffs. + for (int i = 2; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + if (top.size() == 1) { + greentea_copy(count_, (cl_mem) (top[0]->gpu_diff()), + (cl_mem) (bottom[0]->mutable_gpu_diff()), ctx); + return; + } + greentea_gpu_add(this->device_context_.id(), count_, + (cl_mem) (top[0]->gpu_diff()), 0, + (cl_mem) (top[1]->gpu_diff()), 0, + (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); + // Add remaining top blob diffs. + for (int i = 2; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + greentea_gpu_axpy(this->device_context_.id(), count_, Dtype(1.), + (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); + } +#endif // USE_GREENTEA } } - INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); } // namespace caffe diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 2631a0740d6..5d55f0ddc2a 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -156,7 +156,7 @@ void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { return; } // NOLINT_NEXT_LINE(whitespace/operators) - set_kernel<<>>( + set_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, alpha, Y); } @@ -174,14 +174,14 @@ __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_scalar_kernel<<>>( + add_scalar_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_scalar_kernel<<>>( + add_scalar_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, alpha, Y); } @@ -197,7 +197,7 @@ template <> void caffe_gpu_add(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_kernel<<>>( + add_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -205,7 +205,7 @@ template <> void caffe_gpu_add(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_kernel<<>>( + add_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -221,7 +221,7 @@ template <> void caffe_gpu_sub(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - sub_kernel<<>>( + sub_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -229,7 +229,7 @@ template <> void caffe_gpu_sub(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - sub_kernel<<>>( + sub_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -245,7 +245,7 @@ template <> void caffe_gpu_mul(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - mul_kernel<<>>( + mul_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -253,7 +253,7 @@ template <> void caffe_gpu_mul(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - mul_kernel<<>>( + mul_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -269,7 +269,7 @@ template <> void caffe_gpu_div(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - div_kernel<<>>( + div_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -277,7 +277,7 @@ template <> void caffe_gpu_div(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - div_kernel<<>>( + div_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } @@ -291,14 +291,14 @@ __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { template <> void caffe_gpu_abs(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - abs_kernel<<>>( + abs_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template <> void caffe_gpu_abs(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - abs_kernel<<>>( + abs_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } @@ -313,14 +313,14 @@ __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { template <> void caffe_gpu_exp(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - exp_kernel<<>>( + exp_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template <> void caffe_gpu_exp(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - exp_kernel<<>>( + exp_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } @@ -334,14 +334,14 @@ __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { template <> void caffe_gpu_log(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( + log_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template <> void caffe_gpu_log(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( + log_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } @@ -357,7 +357,7 @@ template <> void caffe_gpu_powx(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - powx_kernel<<>>( + powx_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, alpha, y); } @@ -365,7 +365,7 @@ template <> void caffe_gpu_powx(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - powx_kernel<<>>( + powx_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, alpha, y); } @@ -397,7 +397,7 @@ uint32_t caffe_gpu_hamming_distance(const int n, const float* x, NOT_IMPLEMENTED; thrust::device_vector popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) - popc_kernel<<>>( + popc_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus()); @@ -411,7 +411,7 @@ uint32_t caffe_gpu_hamming_distance(const int n, const double* x, NOT_IMPLEMENTED; thrust::device_vector popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) - popcll_kernel<<>>( + popcll_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ From 47d2d3c49ec57acaa6dca65dc28a143d2a9359f0 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 12 Jun 2015 16:00:38 +0200 Subject: [PATCH 045/254] Fixed Tests for CUDA, OpenCL tests WIP. --- Makefile | 2 +- Makefile.config.example | 12 ++ include/caffe/common.hpp | 9 +- .../caffe/test/test_gradient_check_util.hpp | 2 +- include/caffe/util/benchmark.hpp | 4 +- include/caffe/util/device_alternate.hpp | 5 +- include/caffe/util/math_functions.hpp | 2 + src/caffe/common.cpp | 15 ++- src/caffe/layers/base_conv_layer.cpp | 8 +- src/caffe/layers/contrastive_loss_layer.cu | 2 +- src/caffe/net.cpp | 4 +- src/caffe/syncedmem.cpp | 10 +- src/caffe/test/test_accuracy_layer.cpp | 8 +- src/caffe/test/test_argmax_layer.cpp | 2 +- src/caffe/test/test_blob.cpp | 12 +- src/caffe/test/test_caffe_main.cpp | 17 +-- src/caffe/test/test_common.cpp | 8 +- src/caffe/test/test_concat_layer.cpp | 6 +- .../test/test_contrastive_loss_layer.cpp | 6 +- src/caffe/test/test_convolution_layer.cpp | 16 +-- src/caffe/test/test_data_transformer.cpp | 30 ++--- src/caffe/test/test_deconvolution_layer.cpp | 4 +- src/caffe/test/test_eltwise_layer.cpp | 6 +- src/caffe/test/test_euclidean_loss_layer.cpp | 4 +- src/caffe/test/test_filler.cpp | 12 +- src/caffe/test/test_filter_layer.cpp | 6 +- src/caffe/test/test_flatten_layer.cpp | 2 +- src/caffe/test/test_gradient_based_solver.cpp | 6 +- src/caffe/test/test_hinge_loss_layer.cpp | 4 +- src/caffe/test/test_im2col_kernel.cu | 8 +- src/caffe/test/test_im2col_layer.cpp | 2 +- src/caffe/test/test_infogain_loss_layer.cpp | 6 +- src/caffe/test/test_inner_product_layer.cpp | 2 +- src/caffe/test/test_lrn_layer.cpp | 4 +- src/caffe/test/test_math_functions.cpp | 4 +- .../test/test_maxpool_dropout_layers.cpp | 2 +- src/caffe/test/test_memory_data_layer.cpp | 4 +- src/caffe/test/test_mergecrop_layer.cpp | 123 ++++++++++++++++++ .../test_multinomial_logistic_loss_layer.cpp | 4 +- src/caffe/test/test_mvn_layer.cpp | 2 +- src/caffe/test/test_net.cpp | 40 +++--- src/caffe/test/test_neuron_layer.cpp | 10 +- src/caffe/test/test_pooling_layer.cpp | 12 +- src/caffe/test/test_power_layer.cpp | 2 +- .../test/test_random_number_generator.cpp | 8 +- src/caffe/test/test_reduction_layer.cpp | 2 +- src/caffe/test/test_reshape_layer.cpp | 4 +- .../test_sigmoid_cross_entropy_loss_layer.cpp | 4 +- src/caffe/test/test_slice_layer.cpp | 4 +- src/caffe/test/test_softmax_layer.cpp | 2 +- .../test/test_softmax_with_loss_layer.cpp | 4 +- src/caffe/test/test_split_layer.cpp | 2 +- src/caffe/test/test_spp_layer.cpp | 6 +- src/caffe/test/test_stochastic_pooling.cpp | 2 +- src/caffe/test/test_syncedmem.cpp | 16 +-- src/caffe/test/test_tanh_layer.cpp | 2 +- src/caffe/test/test_threshold_layer.cpp | 2 +- src/caffe/test/test_util_blas.cpp | 18 +-- src/caffe/util/benchmark.cpp | 12 ++ src/caffe/util/math_functions.cpp | 2 + 60 files changed, 355 insertions(+), 184 deletions(-) create mode 100644 src/caffe/test/test_mergecrop_layer.cpp diff --git a/Makefile b/Makefile index d93546d40bc..c2727288d58 100644 --- a/Makefile +++ b/Makefile @@ -609,7 +609,7 @@ ifeq ($(USE_CUDA), 1) @ cat $@.$(WARNS_EXT) else @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + $(Q)$(CXX) $< $(CXXFLAGS) -x c++ -c $< -o $@ 2> $@.$(WARNS_EXT) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) endif diff --git a/Makefile.config.example b/Makefile.config.example index c621822d1cb..dbcba5c99bf 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -2,11 +2,23 @@ # Contributions simplifying and improving our build system are welcome! # GreenTea (ViennaCL/OpenCL) backend switch + +# Enable the CUDA backend USE_CUDA := 1 + +# Enable the OpenCL/Greentea backend USE_GREENTEA := 1 + +# Folder of the ViennaCL header-only library VIENNACL_DIR = ../ViennaCL + +# Either set CLBLAS or VIENNACLBLAS to 1, not both. +# If you want to use OpenCL/Greentea on the CPU only, you can also disable both. +# When both are disabled, GPUs won't work. CPUs always use CBLAS (Atlas, MKL or OpenBLAS). USE_CLBLAS := 1 USE_VIENNACLBLAS := 0 + +# Enable double precision support for OpenCL/Greentea GREENTEA_DOUBLE_SUPPORT := 1 # cuDNN acceleration switch (uncomment to build with cuDNN). diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 3bdf10bc1e7..de247ab4968 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -15,6 +15,7 @@ #include #include // pair #include +#include #include "caffe/util/device_alternate.hpp" @@ -130,11 +131,13 @@ class Caffe { return *(Get().random_generator_); } #ifndef CPU_ONLY +#ifdef USE_CUDA inline static cublasHandle_t cublas_handle() { return Get().cublas_handle_; } inline static curandGenerator_t curand_generator() { return Get().curand_generator_; } -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY // Returns the mode: running on CPU or GPU. inline static Brew mode() { return Get().mode_; } @@ -170,9 +173,11 @@ class Caffe { protected: #ifndef CPU_ONLY +#ifdef USE_CUDA cublasHandle_t cublas_handle_; curandGenerator_t curand_generator_; -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY shared_ptr random_generator_; Brew mode_; diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index cc5dcbad0ee..63d9bd3e370 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -112,7 +112,7 @@ void GradientChecker::CheckGradientSingle(Layer* layer, for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { Blob* current_blob = blobs_to_check[blob_id]; computed_gradient_blobs[blob_id].reset(new Blob()); - computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob); + computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob, Caffe::GetDefaultDeviceContext()); const int count = blobs_to_check[blob_id]->count(); const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); Dtype* computed_gradients = diff --git a/include/caffe/util/benchmark.hpp b/include/caffe/util/benchmark.hpp index d63582776ee..55dc27d5583 100644 --- a/include/caffe/util/benchmark.hpp +++ b/include/caffe/util/benchmark.hpp @@ -28,9 +28,11 @@ class Timer { bool running_; bool has_run_at_least_once_; #ifndef CPU_ONLY +#ifdef USE_CUDA cudaEvent_t start_gpu_; cudaEvent_t stop_gpu_; -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY boost::posix_time::ptime start_cpu_; boost::posix_time::ptime stop_cpu_; float elapsed_milliseconds_; diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 1fc82fa1c9b..323a14b1055 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -30,6 +30,7 @@ void classname::funcname##_##gpu(const vector*>& top, \ const vector*>& bottom) { NO_GPU; } \ #else // Normal GPU + CPU Caffe. +#ifdef USE_CUDA // Include CUDA macros and headers only if the backend is enabled #include #include @@ -103,6 +104,8 @@ inline int CAFFE_GET_BLOCKS(const int N) { } // namespace caffe -#endif // CPU_ONLY + +#endif // USE_CUDA +#endif // CPU_ONLY #endif // CAFFE_UTIL_DEVICE_ALTERNATE_H_ diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 2cacd8e72cd..2c79f89f2b3 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -176,7 +176,9 @@ void caffe_gpu_set(const int N, const Dtype alpha, Dtype *X); inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn) +#endif // USE_CUDA #else NO_GPU; #endif diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 26b5e7529bd..9c6ee45a0c6 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -91,12 +91,16 @@ void* Caffe::RNG::generator() { #else // Normal GPU + CPU Caffe. Caffe::Caffe() - : cublas_handle_(NULL), + : +#ifdef USE_CUDA + cublas_handle_(NULL), curand_generator_(NULL), +#endif // USE_CUDA random_generator_(), mode_(Caffe::CPU) { // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). +#ifdef USE_CUDA if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { LOG(ERROR)<< "Cannot create Cublas handle. Cublas won't be available."; } @@ -107,14 +111,17 @@ Caffe::Caffe() != CURAND_STATUS_SUCCESS) { LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; } +#endif // USE_CUDA } Caffe::~Caffe() { +#ifdef USE_CUDA if (cublas_handle_) CUBLAS_CHECK(cublasDestroy(cublas_handle_)); if (curand_generator_) { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); } +#endif // USE_CUDA } void Caffe::set_random_seed(const unsigned int seed) { @@ -135,11 +142,11 @@ void Caffe::set_random_seed(const unsigned int seed) { } // RNG seed Get().random_generator_.reset(new RNG(seed)); -#endif +#endif // USE_CUDA } else { #ifdef USE_GREENTEA // TODO: Proper RNG and Seed for OpenCL -#endif +#endif // USE_GREENTEA } } @@ -364,8 +371,8 @@ void Caffe::DeviceQuery() { LOG(INFO)<< "Kernel execution timeout: " << (prop.kernelExecTimeoutEnabled ? "Yes" : "No"); } - } #endif // USE_CUDA + } else { #ifdef USE_GREENTEA diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index c910301b9b7..a1bb6e946c0 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -367,8 +367,8 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype) 1., - output + output_offset_ * g, - col_buff + col_offset_ * g, (Dtype) 1., + output + output_off + output_offset_ * g, + col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., weights + weight_offset_ * g); } #endif // USE_CUDA @@ -382,8 +382,8 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype) 1., (cl_mem) output, output_offset_ * g, - (cl_mem) col_buff, col_offset_ * g, (Dtype) 1., + (Dtype) 1., (cl_mem) output, output_off + output_offset_ * g, + (cl_mem) col_buff, (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., (cl_mem) weights, weight_offset_ * g); } #endif // USE_GREENTEA diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 674541333db..4047c064b76 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -60,7 +60,7 @@ void ContrastiveLossLayer::Forward_gpu( if (legacy_version) { loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); } else { - Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), + Dtype dist = std::max(margin - (Dtype)sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0)); loss += dist * dist; } diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 75930b63e63..6a20a5a51b7 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -385,11 +385,11 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, blob_pointer->Reshape(param.input_dim(top_id * 4), param.input_dim(top_id * 4 + 1), param.input_dim(top_id * 4 + 2), - param.input_dim(top_id * 4 + 3), Caffe::GetDeviceContext(layer_param->device())); + param.input_dim(top_id * 4 + 3), Caffe::GetDefaultDeviceContext()); } else { - blob_pointer->Reshape(param.input_shape(top_id),Caffe::GetDeviceContext(layer_param->device())); + blob_pointer->Reshape(param.input_shape(top_id),Caffe::GetDefaultDeviceContext()); } net_input_blob_indices_.push_back(blob_id); net_input_blobs_.push_back(blob_pointer.get()); diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index b4a6533f365..db04214f6a6 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -21,11 +21,13 @@ SyncedMemory::~SyncedMemory() { #ifndef CPU_ONLY if (gpu_ptr_) { if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA CUDA_CHECK(cudaFree(gpu_ptr_)); +#endif // USE_CUDA } else { #ifdef USE_GREENTEA clReleaseMemObject(cl_gpu_mem_); -#endif +#endif // USE_GREENTEA } } #endif // CPU_ONLY @@ -75,8 +77,10 @@ inline void SyncedMemory::to_gpu() { switch (head_) { case UNINITIALIZED: { if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); caffe_gpu_memset(size_, 0, gpu_ptr_); +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( @@ -106,10 +110,12 @@ inline void SyncedMemory::to_gpu() { } case HEAD_AT_CPU: { if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA if (gpu_ptr_ == NULL) { CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); } caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_); +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( @@ -135,7 +141,7 @@ inline void SyncedMemory::to_gpu() { if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, ctx); } -#endif +#endif // USE_GREENTEA } head_ = SYNCED; break; diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index c14b67cc0e9..3cb69c66ddb 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -26,9 +26,9 @@ class AccuracyLayerTest : public CPUDeviceTest { vector shape(2); shape[0] = 100; shape[1] = 10; - blob_bottom_data_->Reshape(shape); + blob_bottom_data_->Reshape(shape, Caffe::GetDefaultDeviceContext()); shape.resize(1); - blob_bottom_label_->Reshape(shape); + blob_bottom_label_->Reshape(shape, Caffe::GetDefaultDeviceContext()); FillBottoms(); blob_bottom_vec_.push_back(blob_bottom_data_); @@ -117,10 +117,10 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { - this->blob_bottom_data_->Reshape(2, 10, 4, 5); + this->blob_bottom_data_->Reshape(2, 10, 4, 5, Caffe::GetDefaultDeviceContext()); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; - this->blob_bottom_label_->Reshape(label_shape); + this->blob_bottom_label_->Reshape(label_shape, Caffe::GetDefaultDeviceContext()); this->FillBottoms(); LayerParameter layer_param; layer_param.mutable_accuracy_param()->set_axis(1); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 895c3d372ff..a06713a3147 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -16,7 +16,7 @@ template class ArgMaxLayerTest : public CPUDeviceTest { protected: ArgMaxLayerTest() - : blob_bottom_(new Blob(10, 20, 1, 1)), + : blob_bottom_(new Blob(10, 20, 1, 1, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()), top_k_(5) { Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index 7da6423b67c..e6cbd280abf 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -16,7 +16,7 @@ class BlobSimpleTest : public ::testing::Test { protected: BlobSimpleTest() : blob_(new Blob()), - blob_preshaped_(new Blob(2, 3, 4, 5)) {} + blob_preshaped_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())) {} virtual ~BlobSimpleTest() { delete blob_; delete blob_preshaped_; } Blob* const blob_; Blob* const blob_preshaped_; @@ -44,7 +44,7 @@ TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) { } TYPED_TEST(BlobSimpleTest, TestReshape) { - this->blob_->Reshape(2, 3, 4, 5); + this->blob_->Reshape(2, 3, 4, 5, Caffe::GetDefaultDeviceContext()); EXPECT_EQ(this->blob_->num(), 2); EXPECT_EQ(this->blob_->channels(), 3); EXPECT_EQ(this->blob_->height(), 4); @@ -59,7 +59,7 @@ TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { vector shape(2); shape[0] = 3; shape[1] = 2; - this->blob_->Reshape(shape); + this->blob_->Reshape(shape, Caffe::GetDefaultDeviceContext()); // (3 x 2) blob == (1 x 1 x 3 x 2) legacy blob blob_proto.set_num(1); @@ -84,7 +84,7 @@ TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { // Reshape to (1 x 3 x 2). shape.insert(shape.begin(), 1); - this->blob_->Reshape(shape); + this->blob_->Reshape(shape, Caffe::GetDefaultDeviceContext()); // (1 x 3 x 2) blob == (1 x 1 x 3 x 2) legacy blob blob_proto.set_num(1); @@ -95,7 +95,7 @@ TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { // Reshape to (2 x 3 x 2). shape[0] = 2; - this->blob_->Reshape(shape); + this->blob_->Reshape(shape, Caffe::GetDefaultDeviceContext()); // (2 x 3 x 2) blob != (1 x 1 x 3 x 2) legacy blob blob_proto.set_num(1); @@ -110,7 +110,7 @@ class BlobMathTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: BlobMathTest() - : blob_(new Blob(2, 3, 4, 5)), + : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), epsilon_(1e-6) {} virtual ~BlobMathTest() { delete blob_; } diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index c8caf5ac58e..32a483aca18 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -6,7 +6,7 @@ namespace caffe { #ifndef CPU_ONLY - cudaDeviceProp CAFFE_TEST_CUDA_PROP; +cudaDeviceProp CAFFE_TEST_CUDA_PROP; #endif } @@ -14,26 +14,23 @@ namespace caffe { using caffe::CAFFE_TEST_CUDA_PROP; #endif +using caffe::Caffe; + int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); caffe::GlobalInit(&argc, &argv); #ifndef CPU_ONLY - // Before starting testing, let's first print out a few cuda defice info. - int device; - cudaGetDeviceCount(&device); - cout << "Cuda number of devices: " << device << endl; + int device = 0; if (argc > 1) { // Use the given device device = atoi(argv[1]); - cudaSetDevice(device); - cout << "Setting to use device " << device << endl; } else if (CUDA_TEST_DEVICE >= 0) { // Use the device assigned in build configuration; but with a lower priority device = CUDA_TEST_DEVICE; } - cudaGetDevice(&device); - cout << "Current device id: " << device << endl; - cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); + cout << "Setting to use device " << device << endl; + Caffe::SetDevice(device); + //cudaSetDevice(device); #endif // invoke the test. return RUN_ALL_TESTS(); diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index b3a61b0fd25..aa911c03f6d 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -30,8 +30,8 @@ TEST_F(CommonTest, TestBrewMode) { } TEST_F(CommonTest, TestRandSeedCPU) { - SyncedMemory data_a(10 * sizeof(int)); - SyncedMemory data_b(10 * sizeof(int)); + SyncedMemory data_a(10 * sizeof(int), Caffe::GetDefaultDeviceContext()); + SyncedMemory data_b(10 * sizeof(int), Caffe::GetDefaultDeviceContext()); Caffe::set_random_seed(1701); caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); @@ -47,8 +47,8 @@ TEST_F(CommonTest, TestRandSeedCPU) { #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestRandSeedGPU) { - SyncedMemory data_a(10 * sizeof(unsigned int)); - SyncedMemory data_b(10 * sizeof(unsigned int)); + SyncedMemory data_a(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); + SyncedMemory data_b(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); Caffe::set_random_seed(1701); CURAND_CHECK(curandGenerate(Caffe::curand_generator(), static_cast(data_a.mutable_gpu_data()), 10)); diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index 662a50fa23b..1c0d30c80cd 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -19,9 +19,9 @@ class ConcatLayerTest : public MultiDeviceTest { protected: ConcatLayerTest() - : blob_bottom_0_(new Blob(2, 3, 6, 5)), - blob_bottom_1_(new Blob(2, 5, 6, 5)), - blob_bottom_2_(new Blob(5, 3, 6, 5)), + : blob_bottom_0_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), + blob_bottom_1_(new Blob(2, 5, 6, 5, Caffe::GetDefaultDeviceContext())), + blob_bottom_2_(new Blob(5, 3, 6, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) {} virtual void SetUp() { // fill the values diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 1e9447cbc51..61040768383 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,9 +22,9 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), - blob_bottom_data_j_(new Blob(512, 2, 1, 1)), - blob_bottom_y_(new Blob(512, 1, 1, 1)), + : blob_bottom_data_i_(new Blob(512, 2, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_data_j_(new Blob(512, 2, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_y_(new Blob(512, 1, 1, 1, Caffe::GetDefaultDeviceContext())), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 67d41fff844..0fd974e5f5f 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -106,8 +106,8 @@ class ConvolutionLayerTest : public MultiDeviceTest { protected: ConvolutionLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 4)), - blob_bottom_2_(new Blob(2, 3, 6, 4)), + : blob_bottom_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), + blob_bottom_2_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()), blob_top_2_(new Blob()) {} virtual void SetUp() { @@ -130,7 +130,7 @@ class ConvolutionLayerTest : public MultiDeviceTest { virtual Blob* MakeReferenceTop(Blob* top) { this->ref_blob_top_.reset(new Blob()); - this->ref_blob_top_->ReshapeLike(*top); + this->ref_blob_top_->ReshapeLike(*top, Caffe::GetDefaultDeviceContext()); return this->ref_blob_top_.get(); } @@ -283,7 +283,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { filler_param.set_value(1.); filler.reset(new GaussianFiller(filler_param)); filler->Fill(this->blob_bottom_); - this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_, Caffe::GetDefaultDeviceContext()); // Compute Sobel G_x operator as 3 x 3 convolution. LayerParameter layer_param; ConvolutionParameter* convolution_param = @@ -295,7 +295,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { shared_ptr > layer( new ConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3, Caffe::GetDefaultDeviceContext())); Dtype* weights = layer->blobs()[0]->mutable_cpu_data(); for (int c = 0; c < 3; ++c) { int i = c * 9; // 3 x 3 filter @@ -328,7 +328,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { convolution_param->set_bias_term(false); layer.reset(new ConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1, Caffe::GetDefaultDeviceContext())); Dtype* weights_1 = layer->blobs()[0]->mutable_cpu_data(); for (int c = 0; c < 3; ++c) { int i = c * 3; // 3 x 1 filter @@ -339,7 +339,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // (2) the [-1 0 1] row filter - blob_sep->CopyFrom(*this->blob_top_2_, false, true); + blob_sep->CopyFrom(*this->blob_top_2_, Caffe::GetDefaultDeviceContext(), false, true); sep_blob_bottom_vec.clear(); sep_blob_bottom_vec.push_back(blob_sep.get()); convolution_param->set_kernel_h(1); @@ -350,7 +350,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { convolution_param->set_bias_term(false); layer.reset(new ConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); + layer->blobs()[0].reset(new Blob(1, 3, 1, 3, Caffe::GetDefaultDeviceContext())); Dtype* weights_2 = layer->blobs()[0]->mutable_cpu_data(); for (int c = 0; c < 3; ++c) { int i = c * 3; // 1 x 3 filter diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index 16570e20356..10adbf04f9b 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -40,14 +40,14 @@ class DataTransformTest : public ::testing::Test { const Datum& datum, Phase phase) { // Get crop sequence with Caffe seed 1701. DataTransformer* transformer = - new DataTransformer(transform_param, phase); + new DataTransformer(transform_param, phase, Caffe::GetDefaultDeviceContext()); const int crop_size = transform_param.crop_size(); Caffe::set_random_seed(seed_); transformer->InitRand(); Blob* blob = - new Blob(1, datum.channels(), datum.height(), datum.width()); + new Blob(1, datum.channels(), datum.height(), datum.width(), Caffe::GetDefaultDeviceContext()); if (transform_param.crop_size() > 0) { - blob->Reshape(1, datum.channels(), crop_size, crop_size); + blob->Reshape(1, datum.channels(), crop_size, crop_size, Caffe::GetDefaultDeviceContext()); } vector > crop_sequence; @@ -90,9 +90,9 @@ TYPED_TEST(DataTransformTest, TestEmptyTransform) { Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width); + Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); DataTransformer* transformer = - new DataTransformer(transform_param, TEST); + new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -114,9 +114,9 @@ TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, 3, 4, 5); + Blob* blob = new Blob(1, 3, 4, 5, Caffe::GetDefaultDeviceContext()); DataTransformer* transformer = - new DataTransformer(transform_param, TEST); + new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -141,10 +141,10 @@ TYPED_TEST(DataTransformTest, TestCropSize) { Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); DataTransformer* transformer = - new DataTransformer(transform_param, TEST); + new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); Blob* blob = - new Blob(1, channels, crop_size, crop_size); + new Blob(1, channels, crop_size, crop_size, Caffe::GetDefaultDeviceContext()); for (int iter = 0; iter < this->num_iter_; ++iter) { transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -279,9 +279,9 @@ TYPED_TEST(DataTransformTest, TestMeanValue) { transform_param.add_mean_value(mean_value); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width); + Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); DataTransformer* transformer = - new DataTransformer(transform_param, TEST); + new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); for (int j = 0; j < blob->count(); ++j) { @@ -302,9 +302,9 @@ TYPED_TEST(DataTransformTest, TestMeanValues) { transform_param.add_mean_value(2); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width); + Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); DataTransformer* transformer = - new DataTransformer(transform_param, TEST); + new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); for (int c = 0; c < channels; ++c) { @@ -342,9 +342,9 @@ TYPED_TEST(DataTransformTest, TestMeanFile) { transform_param.set_mean_file(*mean_file); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width); + Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); DataTransformer* transformer = - new DataTransformer(transform_param, TEST); + new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); for (int j = 0; j < blob->count(); ++j) { diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index fc63d5efbe3..625ad6803e8 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -21,8 +21,8 @@ class DeconvolutionLayerTest : public MultiDeviceTest { protected: DeconvolutionLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 4)), - blob_bottom_2_(new Blob(2, 3, 6, 4)), + : blob_bottom_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), + blob_bottom_2_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()), blob_top_2_(new Blob()) {} virtual void SetUp() { diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index be0c1347709..8410b7d70d5 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -19,9 +19,9 @@ class EltwiseLayerTest : public MultiDeviceTest { protected: EltwiseLayerTest() - : blob_bottom_a_(new Blob(2, 3, 4, 5)), - blob_bottom_b_(new Blob(2, 3, 4, 5)), - blob_bottom_c_(new Blob(2, 3, 4, 5)), + : blob_bottom_a_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + blob_bottom_b_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + blob_bottom_c_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { // fill the values Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp index 1949742bbcb..939e9f08b8e 100644 --- a/src/caffe/test/test_euclidean_loss_layer.cpp +++ b/src/caffe/test/test_euclidean_loss_layer.cpp @@ -21,8 +21,8 @@ class EuclideanLossLayerTest : public MultiDeviceTest { protected: EuclideanLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1)), - blob_bottom_label_(new Blob(10, 5, 1, 1)), + : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_label_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 728b8dc5f0d..91c6b39f916 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -12,7 +12,7 @@ template class ConstantFillerTest : public ::testing::Test { protected: ConstantFillerTest() - : blob_(new Blob(2, 3, 4, 5)), + : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), filler_param_() { filler_param_.set_value(10.); filler_.reset(new ConstantFiller(filler_param_)); @@ -40,7 +40,7 @@ template class UniformFillerTest : public ::testing::Test { protected: UniformFillerTest() - : blob_(new Blob(2, 3, 4, 5)), + : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), filler_param_() { filler_param_.set_min(1.); filler_param_.set_max(2.); @@ -69,7 +69,7 @@ template class PositiveUnitballFillerTest : public ::testing::Test { protected: PositiveUnitballFillerTest() - : blob_(new Blob(2, 3, 4, 5)), + : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), filler_param_() { filler_.reset(new PositiveUnitballFiller(filler_param_)); filler_->Fill(blob_); @@ -106,7 +106,7 @@ template class GaussianFillerTest : public ::testing::Test { protected: GaussianFillerTest() - : blob_(new Blob(2, 3, 4, 5)), + : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), filler_param_() { filler_param_.set_mean(10.); filler_param_.set_std(0.1); @@ -146,7 +146,7 @@ template class XavierFillerTest : public ::testing::Test { protected: XavierFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), + : blob_(new Blob(1000, 2, 4, 5, Caffe::GetDefaultDeviceContext())), filler_param_() { } virtual void test_params(FillerParameter_VarianceNorm variance_norm, @@ -195,7 +195,7 @@ template class MSRAFillerTest : public ::testing::Test { protected: MSRAFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), + : blob_(new Blob(1000, 2, 4, 5, Caffe::GetDefaultDeviceContext())), filler_param_() { } virtual void test_params(FillerParameter_VarianceNorm variance_norm, diff --git a/src/caffe/test/test_filter_layer.cpp b/src/caffe/test/test_filter_layer.cpp index c641b6ef6e8..a9628796c87 100644 --- a/src/caffe/test/test_filter_layer.cpp +++ b/src/caffe/test/test_filter_layer.cpp @@ -20,9 +20,9 @@ class FilterLayerTest : public MultiDeviceTest { protected: FilterLayerTest() - : blob_bottom_data_(new Blob(4, 3, 6, 4)), - blob_bottom_labels_(new Blob(4, 1, 1, 1)), - blob_bottom_selector_(new Blob(4, 1, 1, 1)), + : blob_bottom_data_(new Blob(4, 3, 6, 4, Caffe::GetDefaultDeviceContext())), + blob_bottom_labels_(new Blob(4, 1, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_selector_(new Blob(4, 1, 1, 1, Caffe::GetDefaultDeviceContext())), blob_top_data_(new Blob()), blob_top_labels_(new Blob()) {} virtual void SetUp() { diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 7b6757cba32..7b2dffcd40e 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -18,7 +18,7 @@ class FlattenLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: FlattenLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5)), + : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index c9135d64e70..01143ba8f52 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -161,9 +161,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { (*updated_params)[i].reset(new Blob()); } Blob& updated_weights = *(*updated_params)[0]; - updated_weights.ReshapeLike(weights); + updated_weights.ReshapeLike(weights, Caffe::GetDefaultDeviceContext()); Blob& updated_bias = *(*updated_params)[1]; - updated_bias.ReshapeLike(bias); + updated_bias.ReshapeLike(bias, Caffe::GetDefaultDeviceContext()); for (int i = 0; i <= D; ++i) { // Compute the derivative with respect to the ith weight (i.e., the ith @@ -290,7 +290,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { vector > > noaccum_params(param_blobs.size()); for (int i = 0; i < param_blobs.size(); ++i) { noaccum_params[i].reset(new Blob()); - noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); + noaccum_params[i]->CopyFrom(*param_blobs[i], Caffe::GetDefaultDeviceContext(), false, true); } // Solve by equivalent accumulation of gradients over divided batches. this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, diff --git a/src/caffe/test/test_hinge_loss_layer.cpp b/src/caffe/test/test_hinge_loss_layer.cpp index b6a99022905..ce8b1f0faca 100644 --- a/src/caffe/test/test_hinge_loss_layer.cpp +++ b/src/caffe/test/test_hinge_loss_layer.cpp @@ -21,8 +21,8 @@ class HingeLossLayerTest : public MultiDeviceTest { protected: HingeLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1)), - blob_bottom_label_(new Blob(10, 1, 1, 1)), + : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_label_(new Blob(10, 1, 1, 1, Caffe::GetDefaultDeviceContext())), blob_top_loss_(new Blob()) { // fill the values Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index aee0d0681de..fa249f2f8e7 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -35,7 +35,7 @@ class Im2colKernelTest : public GPUDeviceTest { protected: Im2colKernelTest() // big so launches > 1024 threads - : blob_bottom_(new Blob(5, 500, 10, 10)), + : blob_bottom_(new Blob(5, 500, 10, 10, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()), blob_top_cpu_(new Blob()) { FillerParameter filler_param; @@ -78,12 +78,12 @@ TYPED_TEST(Im2colKernelTest, TestGPU) { this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, - this->width_col_); + this->width_col_, Caffe::GetDefaultDeviceContext()); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, - this->width_col_); + this->width_col_, Caffe::GetDefaultDeviceContext()); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); @@ -107,7 +107,7 @@ TYPED_TEST(Im2colKernelTest, TestGPU) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel<<>>( + im2col_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index f50abe103f8..5a3986f83b8 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -18,7 +18,7 @@ class Im2colLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: Im2colLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5)), + : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp index 7ec2f8073c1..117a2dc166d 100644 --- a/src/caffe/test/test_infogain_loss_layer.cpp +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -21,9 +21,9 @@ class InfogainLossLayerTest : public MultiDeviceTest { protected: InfogainLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1)), - blob_bottom_label_(new Blob(10, 1, 1, 1)), - blob_bottom_infogain_(new Blob(1, 1, 5, 5)), + : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_label_(new Blob(10, 1, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_infogain_(new Blob(1, 1, 5, 5, Caffe::GetDefaultDeviceContext())), blob_top_loss_(new Blob()) { Caffe::set_random_seed(1701); FillerParameter filler_param; diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp index c03df17383a..1924fda602b 100644 --- a/src/caffe/test/test_inner_product_layer.cpp +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -22,7 +22,7 @@ class InnerProductLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: InnerProductLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5)), + : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index c4e2f8ea7f2..dedfe1d1604 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -28,7 +28,7 @@ class LRNLayerTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 7, 3, 3); + blob_bottom_->Reshape(2, 7, 3, 3, Caffe::GetDefaultDeviceContext()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); @@ -53,7 +53,7 @@ void LRNLayerTest::ReferenceLRNForward( Blob* blob_top) { typedef typename TypeParam::Dtype Dtype; blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(), - blob_bottom.height(), blob_bottom.width()); + blob_bottom.height(), blob_bottom.width(), Caffe::GetDefaultDeviceContext()); Dtype* top_data = blob_top->mutable_cpu_data(); LRNParameter lrn_param = layer_param.lrn_param(); Dtype alpha = lrn_param.alpha(); diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index a095b544e17..0af0dd31edb 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -27,8 +27,8 @@ class MathFunctionsTest : public MultiDeviceTest { virtual void SetUp() { Caffe::set_random_seed(1701); - this->blob_bottom_->Reshape(11, 17, 19, 23); - this->blob_top_->Reshape(11, 17, 19, 23); + this->blob_bottom_->Reshape(11, 17, 19, 23, Caffe::GetDefaultDeviceContext()); + this->blob_top_->Reshape(11, 17, 19, 23, Caffe::GetDefaultDeviceContext()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index 611d9790863..fbaf2a86084 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -22,7 +22,7 @@ class MaxPoolingDropoutTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1703); - blob_bottom_->Reshape(2, 3, 6, 5); + blob_bottom_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); // fill the values FillerParameter filler_param; filler_param.set_value(1.); diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp index a79033f59f1..ad0d3ccebbd 100644 --- a/src/caffe/test/test_memory_data_layer.cpp +++ b/src/caffe/test/test_memory_data_layer.cpp @@ -31,8 +31,8 @@ class MemoryDataLayerTest : public MultiDeviceTest { // pick random input data FillerParameter filler_param; GaussianFiller filler(filler_param); - data_->Reshape(batches_ * batch_size_, channels_, height_, width_); - labels_->Reshape(batches_ * batch_size_, 1, 1, 1); + data_->Reshape(batches_ * batch_size_, channels_, height_, width_, Caffe::GetDefaultDeviceContext()); + labels_->Reshape(batches_ * batch_size_, 1, 1, 1, Caffe::GetDefaultDeviceContext()); filler.Fill(this->data_); filler.Fill(this->labels_); } diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp new file mode 100644 index 00000000000..09bc4ae383c --- /dev/null +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -0,0 +1,123 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MergeCropLayerTest : public GPUDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MergeCropLayerTest() + : blob_bottom_a_(new Blob()), + blob_bottom_b_(new Blob()), + blob_top_(new Blob()) { + } + + virtual void SetUp() { + blob_bottom_a_->Reshape(2, 3, 4, 2, Caffe::GetDefaultDeviceContext()); + blob_bottom_b_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); + // fill the values + blob_bottom_vec_.push_back(blob_bottom_a_); + blob_bottom_vec_.push_back(blob_bottom_b_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~MergeCropLayerTest() { + delete blob_bottom_a_; + delete blob_bottom_b_; + delete blob_top_; + } + + void TestForward() { + + int a_h = blob_bottom_a_->height(); + int a_w = blob_bottom_a_->width(); + int a_c = blob_bottom_a_->channels(); + + for (int n = 0; n < blob_bottom_a_->num(); ++n) { + for (int c = 0; c < a_c; ++c) { + for (int i = 0; i < a_h * a_w; ++i) { + blob_bottom_a_->mutable_cpu_data()[i + c * a_h * a_w + + n * a_h * a_w * a_c] = i + 100 * a_c; + } + } + } + + int b_h = blob_bottom_b_->height(); + int b_w = blob_bottom_b_->width(); + int b_c = blob_bottom_b_->channels(); + + for (int n = 0; n < blob_bottom_b_->num(); ++n) { + for (int c = 0; c < b_c; ++c) { + for (int i = 0; i < b_h * b_w; ++i) { + blob_bottom_b_->mutable_cpu_data()[i + c * b_h * b_w + + n * b_h * b_w * b_c] = -(i + 100 * b_c); + } + } + } + + LayerParameter layer_param; + MergeCropLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); + EXPECT_EQ( + this->blob_top_->channels(), + this->blob_bottom_a_->channels() + this->blob_bottom_b_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_a_->height()); + EXPECT_EQ(this->blob_top_->width(), 2); + + layer.Forward(blob_bottom_vec_, blob_top_vec_); + + for (int i = 0; i < 5; i += 8) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); + } + } + + void TestBackward() { + + } + + Blob* const blob_bottom_a_; + Blob* const blob_bottom_b_; + Blob* const blob_top_; + + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(MergeCropLayerTest, TestDtypesAndDevices); + +TYPED_TEST(MergeCropLayerTest, TestSetup){ +typedef typename TypeParam::Dtype Dtype; +LayerParameter layer_param; +MergeCropLayer layer(layer_param); +layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + +EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); +EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_a_->channels() + this->blob_bottom_b_->channels()); +EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_a_->height()); +EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(MergeCropLayerTest, TestForward){ +this->TestForward(); +} + +TYPED_TEST(MergeCropLayerTest, TestBackward){ +this->TestBackward(); +} + +} + // namespace caffe diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index b2db984feb1..c318e127350 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -19,8 +19,8 @@ template class MultinomialLogisticLossLayerTest : public CPUDeviceTest { protected: MultinomialLogisticLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1)), - blob_bottom_label_(new Blob(10, 1, 1, 1)), + : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_label_(new Blob(10, 1, 1, 1, Caffe::GetDefaultDeviceContext())), blob_top_loss_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_mvn_layer.cpp b/src/caffe/test/test_mvn_layer.cpp index 933b4326417..5682f87764c 100644 --- a/src/caffe/test/test_mvn_layer.cpp +++ b/src/caffe/test/test_mvn_layer.cpp @@ -18,7 +18,7 @@ class MVNLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: MVNLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5)), + : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 782a96bc9b6..39d57ce2a2b 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -38,7 +38,7 @@ class NetTest : public MultiDeviceTest { const bool kReshape = true; for (int i = 0; i < net_blobs.size(); ++i) { (*blobs_copy)[i].reset(new Blob()); - (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); + (*blobs_copy)[i]->CopyFrom(*net_blobs[i], Caffe::GetDefaultDeviceContext(), copy_diff, kReshape); } } @@ -51,7 +51,7 @@ class NetTest : public MultiDeviceTest { const bool kReshape = true; for (int i = 0; i < net_params.size(); ++i) { (*params_copy)[i].reset(new Blob()); - (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); + (*params_copy)[i]->CopyFrom(*net_params[i], Caffe::GetDefaultDeviceContext(), copy_diff, kReshape); } } @@ -873,7 +873,7 @@ TYPED_TEST(NetTest, TestLossWeightMidNet) { const bool kCopyDiff = true; const bool kReshape = true; Blob data_grad; - data_grad.CopyFrom(*this->net_->blob_by_name("data"), kCopyDiff, kReshape); + data_grad.CopyFrom(*this->net_->blob_by_name("data"), Caffe::GetDefaultDeviceContext(), kCopyDiff, kReshape); // Check that the loss is non-trivial, otherwise the test doesn't prove much. const Dtype kMinLossAbsValue = 1e-2; ASSERT_GE(fabs(loss), kMinLossAbsValue); @@ -1115,8 +1115,8 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { Blob shared_params; const bool reshape = true; const bool copy_diff = false; - shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); - shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); + shared_params.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), copy_diff, reshape); + shared_params.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), !copy_diff, reshape); const int count = ip1_weights->count(); // Make sure the diffs are non-trivial. for (int i = 0; i < count; ++i) { @@ -1152,11 +1152,11 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { this->net_->Backward(); // Compute the expected update. Blob unshared_params1; - unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape); - unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape); + unshared_params1.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), copy_diff, reshape); + unshared_params1.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), !copy_diff, reshape); Blob unshared_params2; - unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); - unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); + unshared_params2.CopyFrom(*ip2_weights, Caffe::GetDefaultDeviceContext(), copy_diff, reshape); + unshared_params2.CopyFrom(*ip2_weights, Caffe::GetDefaultDeviceContext(), !copy_diff, reshape); // Make sure the diffs are non-trivial and sum to the diff in the shared net. for (int i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); @@ -1203,7 +1203,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { Blob shared_params; const bool kReshape = true; const bool kCopyDiff = false; - shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); + shared_params.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), kCopyDiff, kReshape); const int count = ip1_weights->count(); // Write the net to a NetParameter, as in Solver::Snapshot. @@ -1318,10 +1318,10 @@ TYPED_TEST(NetTest, TestFromTo) { // Run Forward and Backward, recording the data diff and loss. Blob data; - data.ReshapeLike(*this->net_->blob_by_name("data")); + data.ReshapeLike(*this->net_->blob_by_name("data"), Caffe::GetDefaultDeviceContext()); this->net_->ForwardPrefilled(); this->net_->Backward(); - data.CopyFrom(*this->net_->blob_by_name("data"), true, true); + data.CopyFrom(*this->net_->blob_by_name("data"), Caffe::GetDefaultDeviceContext(), true, true); const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data(); Dtype loss = *loss_ptr; @@ -2273,8 +2273,8 @@ TYPED_TEST(NetTest, TestReshape) { FillerParameter filler_param; filler_param.set_std(1); GaussianFiller filler(filler_param); - Blob blob1(4, 3, 9, 11); - Blob blob2(2, 3, 12, 10); + Blob blob1(4, 3, 9, 11, Caffe::GetDefaultDeviceContext()); + Blob blob2(2, 3, 12, 10, Caffe::GetDefaultDeviceContext()); filler.Fill(&blob1); filler.Fill(&blob2); @@ -2282,28 +2282,28 @@ TYPED_TEST(NetTest, TestReshape) { Blob* input_blob = this->net_->input_blobs()[0]; Blob* output_blob = this->net_->output_blobs()[0]; input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width()); + blob1.width(), Caffe::GetDefaultDeviceContext()); caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); // call backward just to make sure it runs this->net_->Backward(); Blob output1(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width()); + output_blob->height(), output_blob->width(), Caffe::GetDefaultDeviceContext()); caffe_copy(output1.count(), output_blob->cpu_data(), output1.mutable_cpu_data()); input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width()); + blob2.width(), Caffe::GetDefaultDeviceContext()); caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); Blob output2(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width()); + output_blob->height(), output_blob->width(), Caffe::GetDefaultDeviceContext()); caffe_copy(output2.count(), output_blob->cpu_data(), output2.mutable_cpu_data()); input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width()); + blob1.width(), Caffe::GetDefaultDeviceContext()); caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); @@ -2312,7 +2312,7 @@ TYPED_TEST(NetTest, TestReshape) { } input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width()); + blob2.width(), Caffe::GetDefaultDeviceContext()); caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c6e4d27b903..4f7adc4477a 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -21,7 +21,7 @@ class NeuronLayerTest : public MultiDeviceTest { protected: NeuronLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5)), + : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values @@ -605,7 +605,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { shared_ptr > blob_top_2(new Blob()); blob_bottom_vec_2.push_back(blob_bottom_2.get()); blob_top_vec_2.push_back(blob_top_2.get()); - blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); + blob_bottom_2->CopyFrom(*this->blob_bottom_, Caffe::GetDefaultDeviceContext(), false, true); // SetUp layers prelu.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); relu.SetUp(blob_bottom_vec_2, blob_top_vec_2); @@ -617,7 +617,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { } // Check backward shared_ptr > tmp_blob(new Blob()); - tmp_blob->ReshapeLike(*blob_top_2.get()); + tmp_blob->ReshapeLike(*blob_top_2.get(), Caffe::GetDefaultDeviceContext()); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(tmp_blob.get()); @@ -657,7 +657,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { blob_bottom_vec_2.push_back(blob_bottom_2.get()); blob_middle_vec_2.push_back(blob_middle_2.get()); blob_top_vec_2.push_back(blob_top_2.get()); - blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); + blob_bottom_2->CopyFrom(*this->blob_bottom_, Caffe::GetDefaultDeviceContext(), false, true); // SetUp layers ip.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); prelu.SetUp(this->blob_top_vec_, this->blob_top_vec_); @@ -677,7 +677,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { } // Fill top diff with random numbers shared_ptr > tmp_blob(new Blob()); - tmp_blob->ReshapeLike(*blob_top_2.get()); + tmp_blob->ReshapeLike(*blob_top_2.get(), Caffe::GetDefaultDeviceContext()); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(tmp_blob.get()); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 69f2d5c1135..725e8f27d73 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -24,7 +24,7 @@ class PoolingLayerTest : public MultiDeviceTest { blob_top_mask_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 3, 6, 5); + blob_bottom_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); @@ -50,7 +50,7 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; - blob_bottom_->Reshape(num, channels, 3, 5); + blob_bottom_->Reshape(num, channels, 3, 5, Caffe::GetDefaultDeviceContext()); // Input: 2x 2 channels of: // [1 2 5 2 3] // [9 4 1 4 8] @@ -123,7 +123,7 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; - blob_bottom_->Reshape(num, channels, 6, 6); + blob_bottom_->Reshape(num, channels, 6, 6, Caffe::GetDefaultDeviceContext()); // Input: 2x 2 channels of: // [35 1 6 26 19 24] // [ 3 32 7 21 23 25] @@ -248,7 +248,7 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; - blob_bottom_->Reshape(num, channels, 6, 6); + blob_bottom_->Reshape(num, channels, 6, 6, Caffe::GetDefaultDeviceContext()); // Input: 2x 2 channels of: // [35 1 6 26 19 24] // [ 3 32 7 21 23 25] @@ -480,7 +480,7 @@ TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) { pooling_param->set_stride(2); pooling_param->set_pad(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - this->blob_bottom_->Reshape(1, 1, 3, 3); + this->blob_bottom_->Reshape(1, 1, 3, 3, Caffe::GetDefaultDeviceContext()); // Input: // [ 1 2 4 ] // [ 2 3 2 ] @@ -545,7 +545,7 @@ TYPED_TEST(PoolingLayerTest, TestForwardAve) { pooling_param->set_stride(1); pooling_param->set_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); - this->blob_bottom_->Reshape(1, 1, 3, 3); + this->blob_bottom_->Reshape(1, 1, 3, 3, Caffe::GetDefaultDeviceContext()); FillerParameter filler_param; filler_param.set_value(Dtype(2)); ConstantFiller filler(filler_param); diff --git a/src/caffe/test/test_power_layer.cpp b/src/caffe/test/test_power_layer.cpp index 76c9e857f36..68c2d76c30e 100644 --- a/src/caffe/test/test_power_layer.cpp +++ b/src/caffe/test/test_power_layer.cpp @@ -19,7 +19,7 @@ class PowerLayerTest : public MultiDeviceTest { protected: PowerLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5)), + : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 98424c06bfc..49c6d06e565 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -18,10 +18,10 @@ class RandomNumberGeneratorTest : public ::testing::Test { : mean_bound_multiplier_(3.8), // ~99.99% confidence for test failure. sample_size_(10000), seed_(1701), - data_(new SyncedMemory(sample_size_ * sizeof(Dtype))), - data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype))), - int_data_(new SyncedMemory(sample_size_ * sizeof(int))), - int_data_2_(new SyncedMemory(sample_size_ * sizeof(int))) {} + data_(new SyncedMemory(sample_size_ * sizeof(Dtype), Caffe::GetDefaultDeviceContext())), + data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype), Caffe::GetDefaultDeviceContext())), + int_data_(new SyncedMemory(sample_size_ * sizeof(int), Caffe::GetDefaultDeviceContext())), + int_data_2_(new SyncedMemory(sample_size_ * sizeof(int), Caffe::GetDefaultDeviceContext())) {} virtual void SetUp() { Caffe::set_random_seed(this->seed_); diff --git a/src/caffe/test/test_reduction_layer.cpp b/src/caffe/test/test_reduction_layer.cpp index f568a18089a..378b27960c4 100644 --- a/src/caffe/test/test_reduction_layer.cpp +++ b/src/caffe/test/test_reduction_layer.cpp @@ -19,7 +19,7 @@ class ReductionLayerTest : public MultiDeviceTest { protected: ReductionLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5)), + : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { // fill the values Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_reshape_layer.cpp b/src/caffe/test/test_reshape_layer.cpp index 9d08ec60d4e..d1d608a14be 100644 --- a/src/caffe/test/test_reshape_layer.cpp +++ b/src/caffe/test/test_reshape_layer.cpp @@ -18,7 +18,7 @@ class ReshapeLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: ReshapeLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5)), + : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; @@ -251,7 +251,7 @@ TYPED_TEST(ReshapeLayerTest, TestForwardAfterReshape) { // We know the above produced the correct result from TestForward. // Reshape the bottom and call layer.Reshape, then try again. vector new_bottom_shape(1, 2 * 3 * 6 * 5); - this->blob_bottom_->Reshape(new_bottom_shape); + this->blob_bottom_->Reshape(new_bottom_shape, Caffe::GetDefaultDeviceContext()); layer.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp index e5737e43f6e..a4c286210a6 100644 --- a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -21,8 +21,8 @@ class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest { protected: SigmoidCrossEntropyLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1)), - blob_bottom_targets_(new Blob(10, 5, 1, 1)), + : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), + blob_bottom_targets_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), blob_top_loss_(new Blob()) { // Fill the data vector FillerParameter data_filler_param; diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index ccd03646d19..a1b7ac9beb8 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -19,7 +19,7 @@ class SliceLayerTest : public MultiDeviceTest { protected: SliceLayerTest() - : blob_bottom_(new Blob(6, 12, 2, 3)), + : blob_bottom_(new Blob(6, 12, 2, 3, Caffe::GetDefaultDeviceContext())), blob_top_0_(new Blob()), blob_top_1_(new Blob()), blob_top_2_(new Blob()) {} @@ -38,7 +38,7 @@ class SliceLayerTest : public MultiDeviceTest { } virtual void ReduceBottomBlobSize() { - blob_bottom_->Reshape(4, 5, 2, 2); + blob_bottom_->Reshape(4, 5, 2, 2, Caffe::GetDefaultDeviceContext()); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 996da4b8f7c..435a46007de 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -19,7 +19,7 @@ class SoftmaxLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: SoftmaxLayerTest() - : blob_bottom_(new Blob(2, 10, 2, 3)), + : blob_bottom_(new Blob(2, 10, 2, 3, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp index 1498d5c5ce1..318c1ddcbda 100644 --- a/src/caffe/test/test_softmax_with_loss_layer.cpp +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -24,8 +24,8 @@ class SoftmaxWithLossLayerTest : public MultiDeviceTest { protected: SoftmaxWithLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 2, 3)), - blob_bottom_label_(new Blob(10, 1, 2, 3)), + : blob_bottom_data_(new Blob(10, 5, 2, 3, Caffe::GetDefaultDeviceContext())), + blob_bottom_label_(new Blob(10, 1, 2, 3, Caffe::GetDefaultDeviceContext())), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp index be5204bfc3e..5cbe6e6ba80 100644 --- a/src/caffe/test/test_split_layer.cpp +++ b/src/caffe/test/test_split_layer.cpp @@ -23,7 +23,7 @@ class SplitLayerTest : public MultiDeviceTest { protected: SplitLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5)), + : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), blob_top_a_(new Blob()), blob_top_b_(new Blob()) { // fill the values diff --git a/src/caffe/test/test_spp_layer.cpp b/src/caffe/test/test_spp_layer.cpp index b2585f1a5fa..95949510635 100644 --- a/src/caffe/test/test_spp_layer.cpp +++ b/src/caffe/test/test_spp_layer.cpp @@ -26,9 +26,9 @@ class SPPLayerTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 3, 9, 8); - blob_bottom_2_->Reshape(4, 3, 1024, 765); - blob_bottom_3_->Reshape(10, 3, 7, 7); + blob_bottom_->Reshape(2, 3, 9, 8, Caffe::GetDefaultDeviceContext()); + blob_bottom_2_->Reshape(4, 3, 1024, 765, Caffe::GetDefaultDeviceContext()); + blob_bottom_3_->Reshape(10, 3, 7, 7, Caffe::GetDefaultDeviceContext()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index f84464c322c..d7ff1236af1 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -26,7 +26,7 @@ class StochasticPoolingLayerTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 3, 6, 5); + blob_bottom_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); // fill the values FillerParameter filler_param; filler_param.set_min(0.1); diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index b946233d07c..6b07cc1b12d 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -15,10 +15,10 @@ namespace caffe { class SyncedMemoryTest : public ::testing::Test {}; TEST_F(SyncedMemoryTest, TestInitialization) { - SyncedMemory mem(10); + SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); EXPECT_EQ(mem.head(), SyncedMemory::UNINITIALIZED); EXPECT_EQ(mem.size(), 10); - SyncedMemory* p_mem = new SyncedMemory(10 * sizeof(float)); + SyncedMemory* p_mem = new SyncedMemory(10 * sizeof(float), Caffe::GetDefaultDeviceContext()); EXPECT_EQ(p_mem->size(), 10 * sizeof(float)); delete p_mem; } @@ -26,7 +26,7 @@ TEST_F(SyncedMemoryTest, TestInitialization) { #ifndef CPU_ONLY // GPU test TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) { - SyncedMemory mem(10); + SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); EXPECT_TRUE(mem.cpu_data()); EXPECT_TRUE(mem.gpu_data()); EXPECT_TRUE(mem.mutable_cpu_data()); @@ -36,7 +36,7 @@ TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) { #endif TEST_F(SyncedMemoryTest, TestAllocationCPU) { - SyncedMemory mem(10); + SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); EXPECT_TRUE(mem.cpu_data()); EXPECT_TRUE(mem.mutable_cpu_data()); } @@ -44,7 +44,7 @@ TEST_F(SyncedMemoryTest, TestAllocationCPU) { #ifndef CPU_ONLY // GPU test TEST_F(SyncedMemoryTest, TestAllocationGPU) { - SyncedMemory mem(10); + SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); EXPECT_TRUE(mem.gpu_data()); EXPECT_TRUE(mem.mutable_gpu_data()); } @@ -52,7 +52,7 @@ TEST_F(SyncedMemoryTest, TestAllocationGPU) { #endif TEST_F(SyncedMemoryTest, TestCPUWrite) { - SyncedMemory mem(10); + SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); void* cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); caffe_memset(mem.size(), 1, cpu_data); @@ -71,7 +71,7 @@ TEST_F(SyncedMemoryTest, TestCPUWrite) { #ifndef CPU_ONLY // GPU test TEST_F(SyncedMemoryTest, TestGPURead) { - SyncedMemory mem(10); + SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); void* cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); caffe_memset(mem.size(), 1, cpu_data); @@ -101,7 +101,7 @@ TEST_F(SyncedMemoryTest, TestGPURead) { } TEST_F(SyncedMemoryTest, TestGPUWrite) { - SyncedMemory mem(10); + SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); void* gpu_data = mem.mutable_gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); caffe_gpu_memset(mem.size(), 1, gpu_data); diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp index 5dc92832fc8..5e87c620c12 100644 --- a/src/caffe/test/test_tanh_layer.cpp +++ b/src/caffe/test/test_tanh_layer.cpp @@ -33,7 +33,7 @@ class TanHLayerTest : public MultiDeviceTest { protected: TanHLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5)), + : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { Caffe::set_random_seed(1701); FillerParameter filler_param; diff --git a/src/caffe/test/test_threshold_layer.cpp b/src/caffe/test/test_threshold_layer.cpp index 05ce82120e6..b4c21c95463 100644 --- a/src/caffe/test/test_threshold_layer.cpp +++ b/src/caffe/test/test_threshold_layer.cpp @@ -16,7 +16,7 @@ class ThresholdLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: ThresholdLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5)), + : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 8770f309951..38a343ed00e 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -20,9 +20,9 @@ class GemmTest : public ::testing::Test {}; TYPED_TEST_CASE(GemmTest, TestDtypes); TYPED_TEST(GemmTest, TestGemmCPUGPU) { - Blob A(1, 1, 2, 3); - Blob B(1, 1, 3, 4); - Blob C(1, 1, 2, 4); + Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); + Blob B(1, 1, 3, 4, Caffe::GetDefaultDeviceContext()); + Blob C(1, 1, 2, 4, Caffe::GetDefaultDeviceContext()); TypeParam data[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6}; TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12}; @@ -44,7 +44,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { } // Test when we have a transposed A - A.Reshape(1, 1, 3, 2); + A.Reshape(1, 1, 3, 2, Caffe::GetDefaultDeviceContext()); caffe_copy(6, A_reshape_data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -58,7 +58,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { } // Test when we have a transposed A and a transposed B too - B.Reshape(1, 1, 4, 3); + B.Reshape(1, 1, 4, 3, Caffe::GetDefaultDeviceContext()); caffe_copy(12, B_reshape_data, B.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -72,7 +72,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { } // Test when we have a transposed B - A.Reshape(1, 1, 2, 3); + A.Reshape(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); caffe_copy(6, data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -91,9 +91,9 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { TYPED_TEST(GemmTest, TestGemvCPUGPU) { - Blob A(1, 1, 2, 3); - Blob x(1, 1, 1, 3); - Blob y(1, 1, 1, 2); + Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); + Blob x(1, 1, 1, 3, Caffe::GetDefaultDeviceContext()); + Blob y(1, 1, 1, 2, Caffe::GetDefaultDeviceContext()); TypeParam data[6] = {1, 2, 3, 4, 5, 6}; TypeParam result_2[2] = {14, 32}; TypeParam result_3[3] = {9, 12, 15}; diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index 1d269c351c1..7222f60b884 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -15,8 +15,10 @@ Timer::Timer() Timer::~Timer() { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaEventDestroy(start_gpu_)); CUDA_CHECK(cudaEventDestroy(stop_gpu_)); +#endif // USE_CUDA #else NO_GPU; #endif @@ -27,7 +29,9 @@ void Timer::Start() { if (!running()) { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); +#endif // USE_CUDA #else NO_GPU; #endif @@ -43,8 +47,10 @@ void Timer::Stop() { if (running()) { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); +#endif // USE_CUDA #else NO_GPU; #endif @@ -66,10 +72,12 @@ float Timer::MicroSeconds() { } if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, stop_gpu_)); // Cuda only measure milliseconds elapsed_microseconds_ = elapsed_milliseconds_ * 1000; +#endif // USE_CUDA #else NO_GPU; #endif @@ -89,8 +97,10 @@ float Timer::MilliSeconds() { } if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, stop_gpu_)); +#endif // USE_CUDA #else NO_GPU; #endif @@ -108,8 +118,10 @@ void Timer::Init() { if (!initted()) { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaEventCreate(&start_gpu_)); CUDA_CHECK(cudaEventCreate(&stop_gpu_)); +#endif // USE_CUDA #else NO_GPU; #endif diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 0aab6b17b85..a337027ad3c 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -87,8 +87,10 @@ void caffe_copy(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY +#ifdef USE_CUDA // NOLINT_NEXT_LINE(caffe/alt_fn) CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault)); +#endif // USE_CUDA #else NO_GPU; #endif From dd8b5bf65f0e7d55bc42edb31c3c40a27d245b08 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 13 Jun 2015 01:55:57 +0200 Subject: [PATCH 046/254] 70% layer support on OpenCL. --- .../greentea/greentea_math_functions.hpp | 5 + src/caffe/greentea/cl_kernels.cpp | 16 +- src/caffe/greentea/cl_kernels/activation.cl | 36 +++ src/caffe/greentea/cl_kernels/eltwise.cl | 45 +++ src/caffe/greentea/cl_kernels/math.cl | 8 + src/caffe/greentea/cl_kernels/slice.cl | 26 ++ .../greentea/greentea_math_functions.cpp | 24 ++ src/caffe/layers/base_conv_layer.cpp | 2 +- src/caffe/layers/eltwise_layer.cu | 271 +++++++++++++----- src/caffe/layers/euclidean_loss_layer.cu | 70 +++-- src/caffe/layers/exp_layer.cu | 70 +++-- src/caffe/layers/log_layer.cu | 107 +++++-- src/caffe/layers/sigmoid_layer.cu | 86 ++++-- src/caffe/layers/slice_layer.cu | 90 ++++-- src/caffe/layers/tanh_layer.cu | 85 ++++-- src/caffe/proto/caffe.proto | 4 +- 16 files changed, 754 insertions(+), 191 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/eltwise.cl create mode 100644 src/caffe/greentea/cl_kernels/slice.cl diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index a5c954b4d18..c3b9b9ca92d 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -117,6 +117,11 @@ void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, const int offa, const Dtype alpha, cl_mem y, const int offy); +template +void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, + const int offa, cl_mem y, + const int offy); + template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, cl_mem y, const int offy); diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index e7129d66aa2..37387d7223c 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,7 +6,7 @@ namespace caffe { std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; @@ -14,15 +14,17 @@ std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const float scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const float scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; +std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; +std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; @@ -30,13 +32,15 @@ std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const float scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const float scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; +std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; +std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { std::stringstream ss; @@ -50,6 +54,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << contrastive_loss_float << "\n\n"; ss << convolution_sk_float << "\n\n"; ss << dropout_float << "\n\n"; + ss << eltwise_float << "\n\n"; ss << fillbuffer_float << "\n\n"; ss << im2col_float << "\n\n"; ss << im2col_sk_float << "\n\n"; @@ -57,6 +62,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << mergecrop_float << "\n\n"; ss << pooling_float << "\n\n"; ss << pooling_sk_float << "\n\n"; + ss << slice_float << "\n\n"; ss << softmax_loss_float << "\n\n"; #ifdef GREENTEA_DOUBLE_SUPPORT ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; @@ -70,6 +76,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << contrastive_loss_double << "\n\n"; ss << convolution_sk_double << "\n\n"; ss << dropout_double << "\n\n"; + ss << eltwise_double << "\n\n"; ss << fillbuffer_double << "\n\n"; ss << im2col_double << "\n\n"; ss << im2col_sk_double << "\n\n"; @@ -77,6 +84,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << mergecrop_double << "\n\n"; ss << pooling_double << "\n\n"; ss << pooling_sk_double << "\n\n"; + ss << slice_double << "\n\n"; ss << softmax_loss_double << "\n\n"; ss << "#endif" << "\n\n"; #endif // GREENTEA_DOUBLE_SUPPORT diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index d56be34807f..0bfade9c9f8 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -22,6 +22,42 @@ __kernel void TEMPLATE(relu_backward,Dtype)(const int n, } } +__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = tanh(in[index]); + } +} + +__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + __kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, __global const Dtype* in, __global Dtype* out) { diff --git a/src/caffe/greentea/cl_kernels/eltwise.cl b/src/caffe/greentea/cl_kernels/eltwise.cl new file mode 100644 index 00000000000..fe99a4e649b --- /dev/null +++ b/src/caffe/greentea/cl_kernels/eltwise.cl @@ -0,0 +1,45 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(eltwise_max_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int blob_idx, + __global Dtype* top_data, + __global int* mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int blob_idx, + __global const int* mask, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index 83cd873b8f7..7ac9ea4424a 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -64,6 +64,14 @@ __kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, } } +__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = log(a[offa + index]); + } +} + __kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, const int offa, Dtype alpha, __global Dtype* y, diff --git a/src/caffe/greentea/cl_kernels/slice.cl b/src/caffe/greentea/cl_kernels/slice.cl new file mode 100644 index 00000000000..e9fa61e3fe0 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/slice.cl @@ -0,0 +1,26 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(slice,Dtype)(const int nthreads, + __global const Dtype* in_data, + const int forward, const int num_slices, + const int slice_size, + const int bottom_slice_axis, + const int top_slice_axis, + const int offset_slice_axis, + __global Dtype* out_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward == 1) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index adda60820ca..7eef6590b4d 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -796,6 +796,30 @@ template void greentea_gpu_powx(const int ctx_id, const int N, const double alpha, cl_mem y, const int offy); +template +void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, + const int offa, cl_mem y, + const int offy) { + + viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + + viennacl::ocl::kernel &oclk_log = program.get_kernel( + CL_KERNEL_SELECT("log")); + viennacl::ocl::enqueue( + oclk_log(N, WrapHandle(a, ctx), offa, WrapHandle(y, ctx), offy), + ctx.get_queue()); +} + +template void greentea_gpu_log(const int ctx_id, const int N, + const cl_mem a, const int offa, + cl_mem y, + const int offy); +template void greentea_gpu_log(const int ctx_id, const int N, + const cl_mem a, const int offa, + cl_mem y, + const int offy); + template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, cl_mem y, const int offy) { diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index a1bb6e946c0..13042eab3c8 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -341,7 +341,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, (cl_mem) weights, weight_offset_ * g, (cl_mem) output, output_off + output_offset_ * g, (Dtype) 0., (cl_mem) col_buff, - is_1x1_ ? input_off : 0 + col_offset_ * g); + (is_1x1_ ? input_off : 0) + col_offset_ * g); } if (!is_1x1_) { greentea_conv_col2im_gpu(col_buff, 0, input, input_off); diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 799db672c70..62693823e17 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -5,13 +5,20 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +#ifdef USE_CUDA +template __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, - const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, - int* mask) { - CUDA_KERNEL_LOOP(index, nthreads) { + const Dtype* bottom_data_b, const int blob_idx, + Dtype* top_data, int* mask) { + CUDA_KERNEL_LOOP(index, nthreads) + { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { @@ -30,48 +37,103 @@ __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, } } } +#endif // USE_CUDA -template +template void EltwiseLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); - switch (op_) { - case EltwiseParameter_EltwiseOp_PROD: - caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), - top_data); - for (int i = 2; i < bottom.size(); ++i) { - caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); - } - break; - case EltwiseParameter_EltwiseOp_SUM: - caffe_gpu_set(count, Dtype(0.), top_data); - // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? - for (int i = 0; i < bottom.size(); ++i) { - caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), + top_data); + for (int i = 2; i < bottom.size(); ++i) { + caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_SUM: + caffe_gpu_set(count, Dtype(0.), top_data); + // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? + for (int i = 0; i < bottom.size(); ++i) { + caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_MAX: + mask = max_idx_.mutable_gpu_data(); + // NOLINT_NEXT_LINE(whitespace/operators) + MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); + for (int i = 2; i < bottom.size(); ++i) { + // NOLINT_NEXT_LINE(whitespace/operators) + MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); + } + break; + default: { + LOG(FATAL)<< "Unknown elementwise operation."; + } } - break; - case EltwiseParameter_EltwiseOp_MAX: - mask = max_idx_.mutable_gpu_data(); - // NOLINT_NEXT_LINE(whitespace/operators) - MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); - for (int i = 2; i < bottom.size(); ++i) { - // NOLINT_NEXT_LINE(whitespace/operators) - MaxForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: { + greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)(bottom[0]->gpu_data()),0, (cl_mem)(bottom[1]->gpu_data()),0, + (cl_mem)top_data,0); + for (int i = 2; i < bottom.size(); ++i) { + greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)top_data,0, (cl_mem)(bottom[i]->gpu_data()),0, (cl_mem)top_data,0); + } + } + break; + case EltwiseParameter_EltwiseOp_SUM: { + greentea_gpu_set(this->device_context_.id(), count, 0, (cl_mem)top_data, 0); + for (int i = 0; i < bottom.size(); ++i) { + greentea_gpu_axpy(this->device_context_.id(), count, coeffs_[i], (cl_mem)(bottom[i]->gpu_data()),0, (cl_mem)top_data, 0); + } + } + break; + case EltwiseParameter_EltwiseOp_MAX: { + mask = max_idx_.mutable_gpu_data(); + + viennacl::ocl::kernel &oclk_max_forward = program.get_kernel( + CL_KERNEL_SELECT("eltwise_max_forward")); + + viennacl::ocl::enqueue( + oclk_max_forward(count, WrapHandle((cl_mem)(bottom[0]->gpu_data()),ctx), WrapHandle((cl_mem)(bottom[1]->gpu_data()),ctx), 0, WrapHandle((cl_mem)top_data,ctx), WrapHandle((cl_mem)mask,ctx)), + ctx.get_queue()); + + for (int i = 2; i < bottom.size(); ++i) { + viennacl::ocl::enqueue( + oclk_max_forward(count, WrapHandle((cl_mem)(top_data),ctx), WrapHandle((cl_mem)(bottom[i]->gpu_data()),ctx), i-1, WrapHandle((cl_mem)top_data,ctx), WrapHandle((cl_mem)mask,ctx)), + ctx.get_queue()); + } + } + break; + default: { + LOG(FATAL)<< "Unknown elementwise operation."; + } } - break; - default: - LOG(FATAL) << "Unknown elementwise operation."; +#endif // USE_GREENTEA } } -template +#ifdef USE_CUDA +template __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, - const int blob_idx, const int* mask, Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { + const int blob_idx, const int* mask, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) + { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; @@ -79,54 +141,123 @@ __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, bottom_diff[index] = gradient; } } +#endif // USE_CUDA -template +template void EltwiseLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - for (int i = 0; i < bottom.size(); ++i) { - if (propagate_down[i]) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - switch (op_) { - case EltwiseParameter_EltwiseOp_PROD: - if (stable_prod_grad_) { - bool initialized = false; - for (int j = 0; j < bottom.size(); ++j) { - if (i == j) { continue; } - if (!initialized) { - caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); - initialized = true; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int i = 0; i < bottom.size(); ++i) { + if (propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + if (stable_prod_grad_) { + bool initialized = false; + for (int j = 0; j < bottom.size(); ++j) { + if (i == j) { + continue; + } + if (!initialized) { + caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); + initialized = true; + } else { + caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, + bottom_diff); + } + } + } else { + caffe_gpu_div(count, top_data, bottom_data, bottom_diff); + } + caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); + break; + case EltwiseParameter_EltwiseOp_SUM: + if (coeffs_[i] == Dtype(1.)) { + caffe_copy(count, top_diff, bottom_diff); } else { - caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, - bottom_diff); + caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } + break; + case EltwiseParameter_EltwiseOp_MAX: + mask = max_idx_.gpu_data(); + MaxBackward // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_diff, i, mask, bottom_diff); + break; + default: { + LOG(FATAL)<< "Unknown elementwise operation."; } - } else { - caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } - caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); - break; - case EltwiseParameter_EltwiseOp_SUM: - if (coeffs_[i] == Dtype(1.)) { - caffe_copy(count, top_diff, bottom_diff); - } else { - caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); + } + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + for (int i = 0; i < bottom.size(); ++i) { + if (propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: { + if (stable_prod_grad_) { + bool initialized = false; + for (int j = 0; j < bottom.size(); ++j) { + if (i == j) { + continue; + } + if (!initialized) { + greentea_copy(count, (cl_mem)(bottom[j]->gpu_data()), (cl_mem)(bottom_diff), ctx); + initialized = true; + } else { + greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)bottom[j]->gpu_data(),0, (cl_mem)bottom_diff,0, + (cl_mem)bottom_diff,0); + } + } + } else { + greentea_gpu_div(this->device_context_.id(), count, (cl_mem)top_data,0, (cl_mem)bottom_data,0, (cl_mem)bottom_diff,0); + } + greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)bottom_diff,0, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0); + } + break; + case EltwiseParameter_EltwiseOp_SUM: { + if (coeffs_[i] == Dtype(1.)) { + greentea_copy(count, (cl_mem)top_diff, (cl_mem)bottom_diff,ctx); + } else { + greentea_gpu_scale(count, coeffs_[i],0, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0); + } + } + break; + case EltwiseParameter_EltwiseOp_MAX: { + mask = max_idx_.gpu_data(); + + viennacl::ocl::kernel &oclk_max_forward = program.get_kernel( + CL_KERNEL_SELECT("eltwise_max_backward")); + + viennacl::ocl::enqueue( + oclk_max_forward(count, WrapHandle((cl_mem)top_diff,ctx),i, WrapHandle((cl_mem)mask,ctx), 0, WrapHandle((cl_mem)bottom_diff,ctx)), + ctx.get_queue()); + } + break; + default: { + LOG(FATAL)<< "Unknown elementwise operation."; + } } - break; - case EltwiseParameter_EltwiseOp_MAX: - mask = max_idx_.gpu_data(); - MaxBackward // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, top_diff, i, mask, bottom_diff); - break; - default: - LOG(FATAL) << "Unknown elementwise operation."; } } +#endif } } diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index 5b1de3ad2d9..c7129e9041b 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -5,36 +5,66 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { int count = bottom[0]->count(); - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), - bottom[1]->gpu_data(), - diff_.mutable_gpu_data()); - Dtype dot; - caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); - Dtype loss = dot / bottom[0]->num() / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), + diff_.mutable_gpu_data()); + Dtype dot; + caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); + Dtype loss = dot / bottom[0]->num() / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_sub(this->device_context_.id(), count, + (cl_mem) (bottom[0]->gpu_data()), 0, + (cl_mem) (bottom[1]->gpu_data()), 0, + (cl_mem) (diff_.mutable_gpu_data()), 0); + Dtype dot; + greentea_gpu_dot(this->device_context_.id(), count, + (cl_mem) (diff_.gpu_data()), 0, + (cl_mem) (diff_.gpu_data()), 0, &dot); + Dtype loss = dot / bottom[0]->num() / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +#endif // USE_GREENTEA + } } -template -void EuclideanLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { +template +void EuclideanLossLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); - caffe_gpu_axpby( - bottom[i]->count(), // count - alpha, // alpha - diff_.gpu_data(), // a - Dtype(0), // beta - bottom[i]->mutable_gpu_diff()); // b + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_axpby(bottom[i]->count(), // count + alpha, // alpha + diff_.gpu_data(), // a + Dtype(0), // beta + bottom[i]->mutable_gpu_diff()); // b +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_axpby(this->device_context_.id(), bottom[i]->count(), + alpha, (cl_mem) (diff_.gpu_data()), 0, Dtype(0), + (cl_mem) (bottom[i]->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA + } } } } diff --git a/src/caffe/layers/exp_layer.cu b/src/caffe/layers/exp_layer.cu index 2d75d8dd6c7..4c4974b67cb 100644 --- a/src/caffe/layers/exp_layer.cu +++ b/src/caffe/layers/exp_layer.cu @@ -7,38 +7,76 @@ namespace caffe { -template +template void ExpLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const int count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - if (inner_scale_ == Dtype(1)) { - caffe_gpu_exp(count, bottom_data, top_data); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (inner_scale_ == Dtype(1)) { + caffe_gpu_exp(count, bottom_data, top_data); + } else { + caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); + caffe_gpu_exp(count, top_data, top_data); + } + if (outer_scale_ != Dtype(1)) { + caffe_gpu_scal(count, outer_scale_, top_data); + } +#endif // USE_CUDA } else { - caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); - caffe_gpu_exp(count, top_data, top_data); - } - if (outer_scale_ != Dtype(1)) { - caffe_gpu_scal(count, outer_scale_, top_data); +#ifdef USE_GREENTEA + if (inner_scale_ == Dtype(1)) { + greentea_gpu_exp(this->device_context_.id(), count, + (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); + } else { + greentea_gpu_scale(this->device_context_.id(), count, inner_scale_, + (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); + greentea_gpu_exp(this->device_context_.id(), count, + (cl_mem) top_data, 0, (cl_mem) top_data, 0); + } + if (outer_scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, outer_scale_, + (cl_mem) top_data, 0); + } +#endif // USE_GREENTEA } } -template +template void ExpLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } const int count = bottom[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - caffe_gpu_mul(count, top_data, top_diff, bottom_diff); - if (inner_scale_ != Dtype(1)) { - caffe_gpu_scal(count, inner_scale_, bottom_diff); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_mul(count, top_data, top_diff, bottom_diff); + if (inner_scale_ != Dtype(1)) { + caffe_gpu_scal(count, inner_scale_, bottom_diff); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_mul(this->device_context_.id(), count, + (cl_mem) top_data, 0, (cl_mem) top_diff, 0, + (cl_mem) bottom_diff, 0); + if (inner_scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, inner_scale_, + (cl_mem) bottom_diff, 0); + } +#endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); - } // namespace caffe diff --git a/src/caffe/layers/log_layer.cu b/src/caffe/layers/log_layer.cu index 847c86cd10c..924ef7ee6b5 100644 --- a/src/caffe/layers/log_layer.cu +++ b/src/caffe/layers/log_layer.cu @@ -7,37 +7,75 @@ namespace caffe { -template +template void LogLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const int count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { - caffe_gpu_log(count, bottom_data, top_data); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { + caffe_gpu_log(count, bottom_data, top_data); + } else { + caffe_copy(count, bottom_data, top_data); + if (input_scale_ != Dtype(1)) { + caffe_gpu_scal(count, input_scale_, top_data); + } + if (input_shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, input_shift_, top_data); + } + caffe_gpu_log(count, top_data, top_data); + } + if (base_scale_ != Dtype(1)) { + caffe_gpu_scal(count, base_scale_, top_data); + } +#endif // USE_CUDA } else { - caffe_copy(count, bottom_data, top_data); - if (input_scale_ != Dtype(1)) { - caffe_gpu_scal(count, input_scale_, top_data); +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { + greentea_gpu_log(this->device_context_.id(), count, + (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); + } else { + greentea_copy(count, (cl_mem) bottom_data, (cl_mem) top_data, ctx); + if (input_scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, + input_scale_, (cl_mem) top_data, 0); + } + if (input_shift_ != Dtype(0)) { + greentea_gpu_add_scalar(this->device_context_.id(), count, + input_shift_, (cl_mem) top_data, 0); + } + greentea_gpu_log(this->device_context_.id(), count, + (cl_mem) top_data, 0, (cl_mem) top_data, 0); } - if (input_shift_ != Dtype(0)) { - caffe_gpu_add_scalar(count, input_shift_, top_data); + if (base_scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, base_scale_, + (cl_mem) top_data, 0); } - caffe_gpu_log(count, top_data, top_data); - } - if (base_scale_ != Dtype(1)) { - caffe_gpu_scal(count, base_scale_, top_data); +#endif // USE_GREENTEA } + } -template +template void LogLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } - const int count = bottom[0]->count(); - const Dtype* bottom_data = bottom[0]->gpu_data(); - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + const int count = bottom[0]->count(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA caffe_copy(count, bottom_data, bottom_diff); if (input_scale_ != Dtype(1)) { caffe_gpu_scal(count, input_scale_, bottom_diff); @@ -50,6 +88,35 @@ void LogLayer::Backward_gpu(const vector*>& top, caffe_gpu_scal(count, backward_num_scale_, bottom_diff); } caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + greentea_copy(count, (cl_mem) bottom_data, (cl_mem) bottom_diff, + ctx); + if (input_scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, input_scale_, + (cl_mem) bottom_diff, 0); + } + if (input_shift_ != Dtype(0)) { + greentea_gpu_add_scalar(this->device_context_.id(), count, + input_shift_, (cl_mem) bottom_diff, 0); + } + greentea_gpu_powx(this->device_context_.id(), count, + (cl_mem) bottom_diff, 0, Dtype(-1), + (cl_mem) bottom_diff, 0); + if (backward_num_scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, + backward_num_scale_, (cl_mem) bottom_diff, 0); + } + greentea_gpu_mul(this->device_context_.id(), count, + (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, + (cl_mem) bottom_diff, 0); +#endif // USE_GREENTEA + } + } INSTANTIATE_LAYER_GPU_FUNCS(LogLayer); diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index a5c3878cddc..f476cc4ad5e 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -7,23 +7,47 @@ namespace caffe { -template +#ifdef USE_CUDA +template __global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) { + CUDA_KERNEL_LOOP(index, n) + { out[index] = 1. / (1. + exp(-in[index])); } } +#endif // USE_CUDA -template +template void SigmoidLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - SigmoidForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( + CL_KERNEL_SELECT("sigmoid_forward")); + viennacl::ocl::enqueue( + oclk_sigmoid(count, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data @@ -31,32 +55,56 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, // << " threads: " << CAFFE_CUDA_NUM_THREADS; } -template +#ifdef USE_CUDA +template __global__ void SigmoidBackward(const int n, const Dtype* in_diff, - const Dtype* out_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) { + const Dtype* out_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) + { const Dtype sigmoid_x = out_data[index]; out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); } } +#endif // USE_CUDA -template +template void SigmoidLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - SigmoidBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, top_diff, top_data, bottom_diff); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_diff, top_data, bottom_diff); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( + CL_KERNEL_SELECT("sigmoid_backward")); + viennacl::ocl::enqueue( + oclk_sigmoid(count, WrapHandle((cl_mem) top_diff, ctx), + WrapHandle((cl_mem) top_data, ctx), + WrapHandle((cl_mem) bottom_diff, ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + } } INSTANTIATE_LAYER_GPU_FUNCS(SigmoidLayer); - } // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 1e1bd5f93da..1770b240f3b 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -6,17 +6,20 @@ namespace caffe { -template +#ifdef USE_CUDA +template __global__ void Slice(const int nthreads, const Dtype* in_data, - const bool forward, const int num_slices, const int slice_size, - const int bottom_slice_axis, const int top_slice_axis, - const int offset_slice_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { + const bool forward, const int num_slices, + const int slice_size, const int bottom_slice_axis, + const int top_slice_axis, const int offset_slice_axis, + Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) + { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; - const int bottom_index = slice_index + - (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; if (forward) { out_data[index] = in_data[bottom_index]; } else { @@ -24,10 +27,11 @@ __global__ void Slice(const int nthreads, const Dtype* in_data, } } } +#endif // USE_CUDA -template +template void SliceLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -37,18 +41,43 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( - nthreads, bottom_data, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + Slice // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + nthreads, bottom_data, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_slice = program.get_kernel( + CL_KERNEL_SELECT("slice")); + viennacl::ocl::enqueue( + oclk_slice(nthreads, WrapHandle((cl_mem) bottom_data, ctx), + kForward ? 1 : 0, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + offset_slice_axis += top_slice_axis; } } -template +template void SliceLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -58,10 +87,31 @@ void SliceLayer::Backward_gpu(const vector*>& top, const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( - nthreads, top_diff, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + Slice // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + nthreads, top_diff, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_slice = program.get_kernel( + CL_KERNEL_SELECT("slice")); + viennacl::ocl::enqueue( + oclk_slice(nthreads, WrapHandle((cl_mem) top_diff, ctx), + kForward ? 1 : 0, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, + WrapHandle((cl_mem) bottom_diff, ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } offset_slice_axis += top_slice_axis; } } diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index 7c16127759c..b028a8f2030 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -9,51 +9,98 @@ namespace caffe { -template +#ifdef USE_CUDA +template __global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) { + CUDA_KERNEL_LOOP(index, n) + { out[index] = tanh(in[index]); } } +#endif // USE_CUDA -template +template void TanHLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - TanHForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + TanHForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_tanh = program.get_kernel( + CL_KERNEL_SELECT("tanh_forward")); + viennacl::ocl::enqueue( + oclk_tanh(count, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } } -template +#ifdef USE_CUDA +template __global__ void TanHBackward(const int n, const Dtype* in_diff, - const Dtype* out_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) { + const Dtype* out_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) + { Dtype tanhx = out_data[index]; out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); } } +#endif // USE_CUDA -template +template void TanHLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - TanHBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, top_diff, top_data, bottom_diff); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + TanHBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_diff, top_data, bottom_diff); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_tanh = program.get_kernel( + CL_KERNEL_SELECT("tanh_backward")); + viennacl::ocl::enqueue( + oclk_tanh(count, WrapHandle((cl_mem) top_diff, ctx), + WrapHandle((cl_mem) top_data, ctx), + WrapHandle((cl_mem) bottom_diff, ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + } } INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer); - } // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index c5861d4a6a6..a26a74107d7 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -970,8 +970,8 @@ message V1LayerParameter { TANH = 23; WINDOW_DATA = 24; THRESHOLD = 31; - CONVOLUTION_SK = 71; - POOLING_SK = 72; + CONVOLUTION_SK = 40; + POOLING_SK = 41; } optional LayerType type = 5; repeated BlobProto blobs = 6; From 1b2bdd83a27ff1a88069aeea5d247e30829e134d Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 14 Jun 2015 02:22:25 +0200 Subject: [PATCH 047/254] 85% OpenCL support. --- .../greentea/greentea_math_functions.hpp | 4 + src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/activation.cl | 45 +++- .../greentea/greentea_math_functions.cpp | 22 ++ src/caffe/layers/filter_layer.cu | 96 ++++++-- src/caffe/layers/hdf5_data_layer.cu | 66 ++--- src/caffe/layers/hdf5_output_layer.cu | 48 ++-- src/caffe/layers/im2col_layer.cu | 65 ++++- src/caffe/layers/lrn_layer.cu | 6 + src/caffe/layers/power_layer.cu | 214 +++++++++++----- src/caffe/layers/prelu_layer.cu | 228 +++++++++++++----- src/caffe/splitnet/splitnet.cpp | 18 -- 12 files changed, 588 insertions(+), 228 deletions(-) delete mode 100644 src/caffe/splitnet/splitnet.cpp diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index c3b9b9ca92d..83ecde838a8 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -34,6 +34,10 @@ template void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); +template +void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, + viennacl::ocl::context &ctx); + template void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 37387d7223c..55e09d25078 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,7 +6,7 @@ namespace caffe { std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; @@ -24,7 +24,7 @@ std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}"; +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index 0bfade9c9f8..c56cf3c1a79 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -41,17 +41,17 @@ __kernel void TEMPLATE(tanh_backward,Dtype)(const int n, } __kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, - __global const Dtype* in, - __global Dtype* out) { + __global const Dtype* in, + __global Dtype* out) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = 1. / (1. + exp(-in[index])); } } __kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, - __global const Dtype* in_diff, - __global const Dtype* out_data, - __global Dtype* out_diff) { + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { const Dtype sigmoid_x = out_data[index]; out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); @@ -65,3 +65,38 @@ __kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, out[index] = in[index] > threshold ? 1 : 0; } } + +__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in, + __global Dtype* out, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + } +} + +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, + __global const Dtype* in_diff, const int in_diff_off, + __global const Dtype* in_data, const int in_data_off, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); + } +} diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 7eef6590b4d..9f977ae6bc3 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -113,6 +113,28 @@ template void greentea_copy(const int N, const cl_mem X, cl_mem Y, template void greentea_copy(const int N, const cl_mem X, cl_mem Y, viennacl::ocl::context &ctx); +// Copy from OpenCL buffer to OpenCL buffer +template +void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, + viennacl::ocl::context &ctx) { + if (X != Y) { + clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, offX, offY, + sizeof(Dtype) * N, 0, NULL, NULL); + } + ctx.get_queue().finish(); +} + +// Explicit instantiations +template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, + viennacl::ocl::context &ctx); + + template void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, diff --git a/src/caffe/layers/filter_layer.cu b/src/caffe/layers/filter_layer.cu index cf929eeeadf..e87f8d288db 100644 --- a/src/caffe/layers/filter_layer.cu +++ b/src/caffe/layers/filter_layer.cu @@ -6,9 +6,9 @@ namespace caffe { -template +template void FilterLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { int new_tops_num = indices_to_forward_.size(); // forward all filtered items for all bottoms but the Selector (bottom[last]) for (int t = 0; t < top.size(); ++t) { @@ -18,18 +18,33 @@ void FilterLayer::Forward_gpu(const vector*>& bottom, for (int n = 0; n < new_tops_num; ++n) { int data_offset_top = n * dim; int data_offset_bottom = indices_to_forward_[n] * dim; - caffe_copy(dim, bottom_data + data_offset_bottom, - top_data + data_offset_top); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_copy(dim, bottom_data + data_offset_bottom, + top_data + data_offset_top); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + greentea_copy(dim, (cl_mem) bottom_data, + data_offset_bottom, (cl_mem) top_data, data_offset_top, ctx); +#endif // USE_GREENTEA + } + } } } -template +template void FilterLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[bottom.size() - 1]) { - LOG(FATAL) << this->type() - << "Layer cannot backpropagate to filter index inputs"; + LOG(FATAL)<< this->type() + << "Layer cannot backpropagate to filter index inputs"; } for (int i = 0; i < top.size(); ++i) { // bottom[last] is the selector and never needs backpropagation @@ -40,27 +55,60 @@ void FilterLayer::Backward_gpu(const vector*>& top, int batch_offset = 0; int data_offset_bottom = 0; int data_offset_top = 0; - for (int n = 0; n < bottom[i]->shape(0); ++n) { - if (next_to_backward_offset >= indices_to_forward_.size()) { - // we already visited all items that were been forwarded, so - // just set to zero remaining ones - data_offset_bottom = n * dim; - caffe_gpu_set(dim, Dtype(0), - bottom[i]->mutable_gpu_diff() + data_offset_bottom); - } else { - batch_offset = indices_to_forward_[next_to_backward_offset]; - data_offset_bottom = n * dim; - if (n != batch_offset) { // this data was not been forwarded + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int n = 0; n < bottom[i]->shape(0); ++n) { + if (next_to_backward_offset >= indices_to_forward_.size()) { + // we already visited all items that were been forwarded, so + // just set to zero remaining ones + data_offset_bottom = n * dim; caffe_gpu_set(dim, Dtype(0), bottom[i]->mutable_gpu_diff() + data_offset_bottom); - } else { // this data was been forwarded - data_offset_top = next_to_backward_offset * dim; - ++next_to_backward_offset; // point to next forwarded item index - caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top, - bottom[i]->mutable_gpu_diff() + data_offset_bottom); + } else { + batch_offset = indices_to_forward_[next_to_backward_offset]; + data_offset_bottom = n * dim; + if (n != batch_offset) { // this data was not been forwarded + caffe_gpu_set(dim, Dtype(0), + bottom[i]->mutable_gpu_diff() + data_offset_bottom); + } else { // this data was been forwarded + data_offset_top = next_to_backward_offset * dim; + ++next_to_backward_offset;// point to next forwarded item index + caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top, + bottom[i]->mutable_gpu_diff() + data_offset_bottom); + } } } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + for (int n = 0; n < bottom[i]->shape(0); ++n) { + if (next_to_backward_offset >= indices_to_forward_.size()) { + // we already visited all items that were been forwarded, so + // just set to zero remaining ones + data_offset_bottom = n * dim; + greentea_gpu_set(this->device_context_.id(), dim, Dtype(0), + (cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom); + } else { + batch_offset = indices_to_forward_[next_to_backward_offset]; + data_offset_bottom = n * dim; + if (n != batch_offset) { // this data was not been forwarded + greentea_gpu_set(this->device_context_.id(), dim, Dtype(0), + (cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom); + } else { // this data was been forwarded + data_offset_top = next_to_backward_offset * dim; + ++next_to_backward_offset;// point to next forwarded item index + greentea_copy(dim, (cl_mem)(top[i]->mutable_gpu_diff()), data_offset_top, + (cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom, ctx); + } + } + } +#endif // USE_GREENTEA } + } } } diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 5e3e4ced141..3f7843ecade 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -1,7 +1,7 @@ /* -TODO: -- only load parts of the file, in accordance with a prototxt param "max_mem" -*/ + TODO: + - only load parts of the file, in accordance with a prototxt param "max_mem" + */ #include #include @@ -16,35 +16,47 @@ TODO: namespace caffe { -template +template void HDF5DataLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - for (int i = 0; i < batch_size; ++i, ++current_row_) { - if (current_row_ == hdf_blobs_[0]->shape(0)) { - if (num_files_ > 1) { - current_file_ += 1; - if (current_file_ == num_files_) { - current_file_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) { - std::random_shuffle(file_permutation_.begin(), - file_permutation_.end()); + const vector*>& top) { + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); + for (int i = 0; i < batch_size; ++i, ++current_row_) { + if (current_row_ == hdf_blobs_[0]->shape(0)) { + if (num_files_ > 1) { + current_file_ += 1; + if (current_file_ == num_files_) { + current_file_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), + file_permutation_.end()); + } + DLOG(INFO)<< "Looping around to first file."; } - DLOG(INFO) << "Looping around to first file."; + LoadHDF5FileData( + hdf_filenames_[file_permutation_[current_file_]].c_str()); } - LoadHDF5FileData( - hdf_filenames_[file_permutation_[current_file_]].c_str()); + current_row_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) + std::random_shuffle(data_permutation_.begin(), + data_permutation_.end()); + } + for (int j = 0; j < this->layer_param_.top_size(); ++j) { + int data_dim = top[j]->count() / top[j]->shape(0); + caffe_copy( + data_dim, + &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] + * data_dim], + &top[j]->mutable_gpu_data()[i * data_dim]); } - current_row_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) - std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); - } - for (int j = 0; j < this->layer_param_.top_size(); ++j) { - int data_dim = top[j]->count() / top[j]->shape(0); - caffe_copy(data_dim, - &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] - * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + Forward_cpu(bottom, top); +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index 327387a9eb6..c68bd677ecc 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -11,30 +11,42 @@ namespace caffe { -template +template void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - CHECK_GE(bottom.size(), 2); - CHECK_EQ(bottom[0]->num(), bottom[1]->num()); - data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(), this->device_context_); - label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), - bottom[1]->height(), bottom[1]->width(), this->device_context_); - const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); - const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); - - for (int i = 0; i < bottom[0]->num(); ++i) { - caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], - &data_blob_.mutable_cpu_data()[i * data_datum_dim]); - caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], - &label_blob_.mutable_cpu_data()[i * label_datum_dim]); + const vector*>& top) { + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width(), + this->device_context_); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width(), + this->device_context_); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], + &data_blob_.mutable_cpu_data()[i * data_datum_dim]); + caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], + &label_blob_.mutable_cpu_data()[i * label_datum_dim]); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + Forward_cpu(bottom, top); +#endif // USE_GREENTEA } + SaveBlobs(); } -template +template void HDF5OutputLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { return; } diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 6bc119d38fa..c842855401a 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -13,30 +13,69 @@ namespace caffe { -template +template void Im2colLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - for (int n = 0; n < bottom[0]->num(); ++n) { - im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, top_data + top[0]->offset(n)); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int n = 0; n < bottom[0]->num(); ++n) { + im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + top_data + top[0]->offset(n)); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + for (int n = 0; n < bottom[0]->num(); ++n) { + greentea_im2col_gpu(program, ctx, (cl_mem)bottom_data, bottom[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + (cl_mem)top_data, top[0]->offset(n)); + } +#endif // USE_GREENTEA } + } -template +template void Im2colLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - for (int n = 0; n < top[0]->num(); ++n) { - col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int n = 0; n < top[0]->num(); ++n) { + col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, + bottom_diff + bottom[0]->offset(n)); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + for (int n = 0; n < top[0]->num(); ++n) { + greentea_col2im_gpu(program, ctx, (cl_mem) top_diff, top[0]->offset(n), + channels_, height_, width_, kernel_h_, kernel_w_, + pad_h_, pad_w_, stride_h_, stride_w_, + (cl_mem) bottom_diff, bottom[0]->offset(n)); + } +#endif // USE_GREENTEA } -} +} INSTANTIATE_LAYER_GPU_FUNCS(Im2colLayer); diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 983072afd61..d62b5bb8dff 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -6,6 +6,7 @@ namespace caffe { +#ifdef USE_CUDA template __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, @@ -51,6 +52,7 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* const in, } } } +#endif // USE_CUDA template @@ -69,6 +71,7 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, } // TODO: check if it would be faster to just put it into the previous kernel. +#ifdef USE_CUDA template __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { @@ -76,6 +79,7 @@ __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, out[index] = in[index] * pow(scale[index], negative_beta); } } +#endif // USE_CUDA template void LRNLayer::CrossChannelForward_gpu( @@ -119,6 +123,7 @@ void LRNLayer::Backward_gpu(const vector*>& top, } } +#ifdef USE_CUDA template __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, @@ -176,6 +181,7 @@ __global__ void LRNComputeDiff(const int nthreads, } } } +#endif // USE_CUDA template void LRNLayer::CrossChannelBackward_gpu( diff --git a/src/caffe/layers/power_layer.cu b/src/caffe/layers/power_layer.cu index 90d944059b6..05376d38cb2 100644 --- a/src/caffe/layers/power_layer.cu +++ b/src/caffe/layers/power_layer.cu @@ -5,83 +5,185 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { -template +template void PowerLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // Special case where we can ignore the input: scale or power is 0. - if (diff_scale_ == Dtype(0)) { - Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); - caffe_gpu_set(count, value, top_data); - return; - } - const Dtype* bottom_data = bottom[0]->gpu_data(); - caffe_copy(count, bottom_data, top_data); - if (scale_ != Dtype(1)) { - caffe_gpu_scal(count, scale_, top_data); - } - if (shift_ != Dtype(0)) { - caffe_gpu_add_scalar(count, shift_, top_data); - } - if (power_ != Dtype(1)) { - caffe_gpu_powx(count, top_data, power_, top_data); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // Special case where we can ignore the input: scale or power is 0. + if (diff_scale_ == Dtype(0)) { + Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); + caffe_gpu_set(count, value, top_data); + return; + } + const Dtype* bottom_data = bottom[0]->gpu_data(); + caffe_copy(count, bottom_data, top_data); + if (scale_ != Dtype(1)) { + caffe_gpu_scal(count, scale_, top_data); + } + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, shift_, top_data); + } + if (power_ != Dtype(1)) { + caffe_gpu_powx(count, top_data, power_, top_data); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + if (diff_scale_ == Dtype(0)) { + Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); + greentea_gpu_set(this->device_context_.id(), count, value, + (cl_mem) top_data, 0); + return; + } + const Dtype* bottom_data = bottom[0]->gpu_data(); + caffe_copy(count, bottom_data, top_data); + if (scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, scale_, + (cl_mem) top_data, 0); + } + if (shift_ != Dtype(0)) { + greentea_gpu_add_scalar(this->device_context_.id(), count, shift_, + (cl_mem) top_data, 0); + } + if (power_ != Dtype(1)) { + greentea_gpu_powx(this->device_context_.id(), count, + (cl_mem) top_data, 0, power_, (cl_mem) top_data, + 0); + } +#endif // USE_GREENTEA } } -template +template void PowerLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); - if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { - caffe_gpu_set(count, diff_scale_, bottom_diff); - } else { - const Dtype* bottom_data = bottom[0]->gpu_data(); - // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) - // = diff_scale * y / (shift + scale * x) - if (power_ == Dtype(2)) { - // Special case for y = (shift + scale * x)^2 - // -> dy/dx = 2 * scale * (shift + scale * x) - // = diff_scale * shift + diff_scale * scale * x - caffe_gpu_axpby(count, diff_scale_ * scale_, bottom_data, - Dtype(0), bottom_diff); - if (shift_ != Dtype(0)) { - caffe_gpu_add_scalar(count, diff_scale_ * shift_, bottom_diff); - } - } else if (shift_ == Dtype(0)) { - // Special case for y = (scale * x)^power - // -> dy/dx = scale * power * (scale * x)^(power - 1) - // = scale * power * (scale * x)^power * (scale * x)^(-1) - // = power * y / x - const Dtype* top_data = top[0]->gpu_data(); - caffe_gpu_div(count, top_data, bottom_data, bottom_diff); - caffe_gpu_scal(count, power_, bottom_diff); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { + caffe_gpu_set(count, diff_scale_, bottom_diff); } else { - caffe_copy(count, bottom_data, bottom_diff); - if (scale_ != Dtype(1)) { - caffe_gpu_scal(count, scale_, bottom_diff); - } - if (shift_ != Dtype(0)) { - caffe_gpu_add_scalar(count, shift_, bottom_diff); + const Dtype* bottom_data = bottom[0]->gpu_data(); + // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) + // = diff_scale * y / (shift + scale * x) + if (power_ == Dtype(2)) { + // Special case for y = (shift + scale * x)^2 + // -> dy/dx = 2 * scale * (shift + scale * x) + // = diff_scale * shift + diff_scale * scale * x + caffe_gpu_axpby(count, diff_scale_ * scale_, bottom_data, Dtype(0), + bottom_diff); + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, diff_scale_ * shift_, bottom_diff); + } + } else if (shift_ == Dtype(0)) { + // Special case for y = (scale * x)^power + // -> dy/dx = scale * power * (scale * x)^(power - 1) + // = scale * power * (scale * x)^power * (scale * x)^(-1) + // = power * y / x + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_div(count, top_data, bottom_data, bottom_diff); + caffe_gpu_scal(count, power_, bottom_diff); + } else { + caffe_copy(count, bottom_data, bottom_diff); + if (scale_ != Dtype(1)) { + caffe_gpu_scal(count, scale_, bottom_diff); + } + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, shift_, bottom_diff); + } + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_div(count, top_data, bottom_diff, bottom_diff); + if (diff_scale_ != Dtype(1)) { + caffe_gpu_scal(count, diff_scale_, bottom_diff); + } } - const Dtype* top_data = top[0]->gpu_data(); - caffe_gpu_div(count, top_data, bottom_diff, bottom_diff); - if (diff_scale_ != Dtype(1)) { - caffe_gpu_scal(count, diff_scale_, bottom_diff); + } + caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { + greentea_gpu_set(this->device_context_.id(), count, diff_scale_, + (cl_mem) bottom_diff, 0); + } else { + const Dtype* bottom_data = bottom[0]->gpu_data(); + // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) + // = diff_scale * y / (shift + scale * x) + if (power_ == Dtype(2)) { + // Special case for y = (shift + scale * x)^2 + // -> dy/dx = 2 * scale * (shift + scale * x) + // = diff_scale * shift + diff_scale * scale * x + greentea_gpu_axpby(this->device_context_.id(), count, + diff_scale_ * scale_, (cl_mem) bottom_data, 0, + Dtype(0), (cl_mem) bottom_diff, 0); + if (shift_ != Dtype(0)) { + greentea_gpu_add_scalar(this->device_context_.id(), count, + diff_scale_ * shift_, (cl_mem) bottom_diff, + 0); + } + } else if (shift_ == Dtype(0)) { + // Special case for y = (scale * x)^power + // -> dy/dx = scale * power * (scale * x)^(power - 1) + // = scale * power * (scale * x)^power * (scale * x)^(-1) + // = power * y / x + const Dtype* top_data = top[0]->gpu_data(); + greentea_gpu_div(this->device_context_.id(), count, + (cl_mem) top_data, 0, (cl_mem) bottom_data, 0, + (cl_mem) bottom_diff, 0); + greentea_gpu_scal(this->device_context_.id(), count, power_, + (cl_mem) bottom_diff, 0); + } else { + greentea_copy(count, (cl_mem) bottom_data, + (cl_mem) bottom_diff, ctx); + if (scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, scale_, + (cl_mem) bottom_diff, 0); + } + if (shift_ != Dtype(0)) { + greentea_gpu_add_scalar(this->device_context_.id(), count, shift_, + (cl_mem) bottom_diff, 0); + } + const Dtype* top_data = top[0]->gpu_data(); + greentea_gpu_div(this->device_context_.id(), count, + (cl_mem) top_data, 0, (cl_mem) bottom_diff, 0, + (cl_mem) bottom_diff, 0); + if (diff_scale_ != Dtype(1)) { + greentea_gpu_scal(this->device_context_.id(), count, diff_scale_, + (cl_mem) bottom_diff, 0); + } } } + greentea_gpu_mul(this->device_context_.id(), count, + (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, + (cl_mem) bottom_diff, 0); +#endif // USE_GREENTEA } - caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); + } } INSTANTIATE_LAYER_GPU_FUNCS(PowerLayer); - } // namespace caffe diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index e4ea647967e..b087ec2c278 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -4,43 +4,54 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { +#ifdef USE_CUDA // CUDA kernele for forward -template +template __global__ void PReLUForward(const int n, const int channels, const int dim, - const Dtype* in, Dtype* out, const Dtype* slope_data, - const int div_factor) { - CUDA_KERNEL_LOOP(index, n) { + const Dtype* in, Dtype* out, + const Dtype* slope_data, const int div_factor) { + CUDA_KERNEL_LOOP(index, n) + { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward -template +template __global__ void PReLUBackward(const int n, const int channels, const int dim, - const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, - const Dtype* slope_data, const int div_factor) { - CUDA_KERNEL_LOOP(index, n) { + const Dtype* in_diff, const Dtype* in_data, + Dtype* out_diff, const Dtype* slope_data, + const int div_factor) { + CUDA_KERNEL_LOOP(index, n) + { int c = (index / dim) % channels / div_factor; - out_diff[index] = in_diff[index] * ((in_data[index] > 0) - + (in_data[index] <= 0) * slope_data[c]); + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } // CUDA kernel for element-wise parameter backward -template +template __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, - const Dtype* in_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) { + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) + { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } +#endif // USE_CUDA -template +template void PReLULayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); @@ -49,21 +60,46 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; - // For in-place computation - if (top[0] == bottom[0]) { - caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); - } + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // For in-place computation + if (top[0] == bottom[0]) { + caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); + } + + // NOLINT_NEXT_LINE(whitespace/operators) + PReLUForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, channels, dim, bottom_data, top_data, slope_data, div_factor); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); - // NOLINT_NEXT_LINE(whitespace/operators) - PReLUForwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, channels, dim, bottom_data, top_data, slope_data, div_factor); - CUDA_POST_KERNEL_CHECK; + if (top[0] == bottom[0]) { + greentea_copy(count, (cl_mem)bottom_data, (cl_mem)(bottom_memory_.mutable_gpu_data()), ctx); + } + + viennacl::ocl::kernel &oclk_prelu = program.get_kernel( + CL_KERNEL_SELECT("prelu_forward")); + viennacl::ocl::enqueue( + oclk_prelu(count, channels, dim, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) top_data, ctx), + WrapHandle((cl_mem) slope_data, ctx), div_factor), + ctx.get_queue()); + +#endif // USE_GREENTEA + } } -template +template void PReLULayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); @@ -75,54 +111,116 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - // Propagate to param - // Since to write bottom diff will affect top diff if top and bottom blobs - // are identical (in-place computaion), we first compute param backward to - // keep top_diff unchanged. - if (this->param_propagate_down_[0]) { - Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); - int cdim = channels * dim; - Dtype dsum = 0.; - for (int n = 0; n < bottom[0]->num(); ++n) { - // compute element-wise diff + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // Propagate to param + // Since to write bottom diff will affect top diff if top and bottom blobs + // are identical (in-place computaion), we first compute param backward to + // keep top_diff unchanged. + if (this->param_propagate_down_[0]) { + Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); + int cdim = channels * dim; + Dtype dsum = 0.; + for (int n = 0; n < bottom[0]->num(); ++n) { + // compute element-wise diff + // NOLINT_NEXT_LINE(whitespace/operators) + PReLUParamBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(cdim), + CAFFE_CUDA_NUM_THREADS)( + cdim, top_diff + top[0]->offset(n), + bottom_data + bottom[0]->offset(n), + backward_buff_.mutable_gpu_diff()); + CUDA_POST_KERNEL_CHECK + ; + if (channel_shared_) { + Dtype d; + caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), + multiplier_.gpu_data(), &d); + dsum += d; + } else { + caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., + backward_buff_.gpu_diff(), + multiplier_.gpu_data(), 1., slope_diff); + } + } + if (channel_shared_) { + caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + } + } + // Propagate to bottom + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const Dtype* slope_data = this->blobs_[0]->gpu_data(); + int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(cdim), + PReLUBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - cdim, top_diff + top[0]->offset(n), - bottom_data + bottom[0]->offset(n), - backward_buff_.mutable_gpu_diff()); - CUDA_POST_KERNEL_CHECK; + count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, + div_factor); + CUDA_POST_KERNEL_CHECK + ; + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + if (this->param_propagate_down_[0]) { + Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); + int cdim = channels * dim; + Dtype dsum = 0.; + for (int n = 0; n < bottom[0]->num(); ++n) { + + viennacl::ocl::kernel &oclk_prelu_param = program.get_kernel( + CL_KERNEL_SELECT("prelu_param_backward")); + viennacl::ocl::enqueue( + oclk_prelu_param( + cdim, WrapHandle((cl_mem) top_diff, ctx), top[0]->offset(n), + WrapHandle((cl_mem) bottom_data, ctx), bottom[0]->offset(n), + WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), ctx)), + ctx.get_queue()); + + if (channel_shared_) { + Dtype d; + greentea_gpu_dot(this->device_context_.id(), channels * dim, + (cl_mem) (backward_buff_.gpu_diff()), 0, + (cl_mem) (multiplier_.gpu_data()), 0, &d); + dsum += d; + } else { + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + channels, dim, 1., + (cl_mem) (backward_buff_.gpu_diff()), 0, + (cl_mem) (multiplier_.gpu_data()), 0, 1., + (cl_mem) slope_diff, 0); + } + } if (channel_shared_) { - Dtype d; - caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), - multiplier_.gpu_data(), &d); - dsum += d; - } else { - caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., - slope_diff); + greentea_gpu_add_scalar(this->device_context_.id(), + this->blobs_[0]->count(), Dtype(dsum), + (cl_mem) slope_diff, 0); } } - if (channel_shared_) { - caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + // Propagate to bottom + if (propagate_down[0]) { + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const Dtype* slope_data = this->blobs_[0]->gpu_data(); + int div_factor = channel_shared_ ? channels : 1; + + viennacl::ocl::kernel &oclk_prelu = program.get_kernel( + CL_KERNEL_SELECT("prelu_backward")); + viennacl::ocl::enqueue( + oclk_prelu(count, channels, dim, WrapHandle((cl_mem) top_diff, ctx), + WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) bottom_diff, ctx), + WrapHandle((cl_mem) slope_data, ctx), div_factor), + ctx.get_queue()); } - } - // Propagate to bottom - if (propagate_down[0]) { - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const Dtype* slope_data = this->blobs_[0]->gpu_data(); - int div_factor = channel_shared_ ? channels : 1; - // NOLINT_NEXT_LINE(whitespace/operators) - PReLUBackwardCUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, - div_factor); - CUDA_POST_KERNEL_CHECK; +#endif // USE_GREENTEA } } - INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); - } // namespace caffe diff --git a/src/caffe/splitnet/splitnet.cpp b/src/caffe/splitnet/splitnet.cpp deleted file mode 100644 index 3197b97a6e6..00000000000 --- a/src/caffe/splitnet/splitnet.cpp +++ /dev/null @@ -1,18 +0,0 @@ -/* - * splitnet.cpp - * - * Created on: Apr 5, 2015 - * Author: Fabian Tschopp - */ - -#include "caffe/splitnet/splitnet.hpp" - -namespace caffe { - -// TODO -template -Splitnet::Splitnet() { -} - -INSTANTIATE_CLASS(Splitnet); -} From 5f778acf2d23192c984653198a7860185e97d60d Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 14 Jun 2015 20:16:21 +0200 Subject: [PATCH 048/254] 100% OpenCL support. WIP: Debugging. --- src/caffe/greentea/cl_kernels.cpp | 4 + src/caffe/greentea/cl_kernels/lrn.cl | 121 +++++++++++ src/caffe/layers/lrn_layer.cu | 219 ++++++++++++-------- src/caffe/layers/mvn_layer.cu | 289 +++++++++++++++++++-------- src/caffe/layers/reduction_layer.cu | 209 +++++++++++++------ 5 files changed, 627 insertions(+), 215 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/lrn.cl diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 55e09d25078..6f84d0b89f3 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -18,6 +18,7 @@ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; +std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; @@ -36,6 +37,7 @@ std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; +std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; @@ -58,6 +60,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << fillbuffer_float << "\n\n"; ss << im2col_float << "\n\n"; ss << im2col_sk_float << "\n\n"; + ss << lrn_float << "\n\n"; ss << math_float << "\n\n"; ss << mergecrop_float << "\n\n"; ss << pooling_float << "\n\n"; @@ -80,6 +83,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { ss << fillbuffer_double << "\n\n"; ss << im2col_double << "\n\n"; ss << im2col_sk_double << "\n\n"; + ss << lrn_double << "\n\n"; ss << math_double << "\n\n"; ss << mergecrop_double << "\n\n"; ss << pooling_double << "\n\n"; diff --git a/src/caffe/greentea/cl_kernels/lrn.cl b/src/caffe/greentea/cl_kernels/lrn.cl new file mode 100644 index 00000000000..6c898acd073 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/lrn.cl @@ -0,0 +1,121 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, + __global const Dtype* in, + __global const Dtype* scale, + const Dtype negative_beta, + __global Dtype* out) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + __global Dtype* const scale) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* in_off = in + offset; + __global Dtype* scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* top_data, + __global const Dtype* scale, + __global const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* bottom_off = bottom_data + offset; + __global const Dtype* top_off = top_data + offset; + __global const Dtype* scale_off = scale + offset; + __global Dtype* top_diff_off = top_diff + offset; + __global Dtype* bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index d62b5bb8dff..71ed04a6bbc 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -7,12 +7,14 @@ namespace caffe { #ifdef USE_CUDA -template +template __global__ void LRNFillScale(const int nthreads, const Dtype* const in, - const int num, const int channels, const int height, - const int width, const int size, const Dtype alpha_over_size, - const Dtype k, Dtype* const scale) { - CUDA_KERNEL_LOOP(index, nthreads) { + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + Dtype* const scale) { + CUDA_KERNEL_LOOP(index, nthreads) + { // find out the local offset const int w = index % width; const int h = (index / width) % height; @@ -36,7 +38,7 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* const in, accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; @@ -45,7 +47,7 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* const in, while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; @@ -54,84 +56,124 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* const in, } #endif // USE_CUDA - -template +template void LRNLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { - case LRNParameter_NormRegion_ACROSS_CHANNELS: - CrossChannelForward_gpu(bottom, top); - break; - case LRNParameter_NormRegion_WITHIN_CHANNEL: - WithinChannelForward(bottom, top); - break; - default: - LOG(FATAL) << "Unknown normalization region."; + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelForward_gpu(bottom, top); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelForward(bottom, top); + break; + default: + LOG(FATAL)<< "Unknown normalization region."; + } } -} // TODO: check if it would be faster to just put it into the previous kernel. #ifdef USE_CUDA -template +template __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, - const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { - CUDA_KERNEL_LOOP(index, nthreads) { + const Dtype* const scale, + const Dtype negative_beta, Dtype* const out) { + CUDA_KERNEL_LOOP(index, nthreads) + { out[index] = in[index] * pow(scale[index], negative_beta); } } #endif // USE_CUDA -template +template void LRNLayer::CrossChannelForward_gpu( const vector*>& bottom, const vector*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); - // We will launch one kernel for each pixel location, and have the kernel - // go through all the channels. - int n_threads = num_ * height_ * width_; - // NOLINT_NEXT_LINE(whitespace/operators) - LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( - n_threads, bottom_data, num_, channels_, height_, width_, size_, - alpha_ / size_, k_, scale_data); - CUDA_POST_KERNEL_CHECK; - n_threads = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( - n_threads, bottom_data, scale_data, -beta_, top_data); - CUDA_POST_KERNEL_CHECK; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // We will launch one kernel for each pixel location, and have the kernel + // go through all the channels. + int n_threads = num_ * height_ * width_; + // NOLINT_NEXT_LINE(whitespace/operators) + LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( + n_threads, bottom_data, num_, channels_, height_, width_, size_, + alpha_ / size_, k_, scale_data); + CUDA_POST_KERNEL_CHECK + ; + n_threads = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( + n_threads, bottom_data, scale_data, -beta_, top_data); + CUDA_POST_KERNEL_CHECK + ; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + int n_threads = num_ * height_ * width_; + viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( + CL_KERNEL_SELECT("lrn_fill_scale")); + viennacl::ocl::enqueue( + oclk_lrn_fill(n_threads, WrapHandle((cl_mem) bottom_data, ctx), num_, + channels_, height_, width_, size_, alpha_ / size_, k_, + WrapHandle((cl_mem) scale_data, ctx)), + ctx.get_queue()); + + n_threads = bottom[0]->count(); + viennacl::ocl::kernel &oclk_lrn_compute = program.get_kernel( + CL_KERNEL_SELECT("lrn_compute_output")); + viennacl::ocl::enqueue( + oclk_lrn_compute(n_threads, WrapHandle((cl_mem) bottom_data, ctx), + WrapHandle((cl_mem) scale_data, ctx), -beta_, + WrapHandle((cl_mem) top_data, ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + } template void LRNLayer::CrossChannelForward_gpu( const vector*>& bottom, const vector*>& top); template void LRNLayer::CrossChannelForward_gpu( const vector*>& bottom, const vector*>& top); - -template +template void LRNLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { - case LRNParameter_NormRegion_ACROSS_CHANNELS: - CrossChannelBackward_gpu(top, propagate_down, bottom); - break; - case LRNParameter_NormRegion_WITHIN_CHANNEL: - WithinChannelBackward(top, propagate_down, bottom); - break; - default: - LOG(FATAL) << "Unknown normalization region."; + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelBackward_gpu(top, propagate_down, bottom); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelBackward(top, propagate_down, bottom); + break; + default: + LOG(FATAL)<< "Unknown normalization region."; + } } -} #ifdef USE_CUDA -template +template __global__ void LRNComputeDiff(const int nthreads, - const Dtype* const bottom_data, const Dtype* const top_data, - const Dtype* const scale, const Dtype* const top_diff, - const int num, const int channels, const int height, - const int width, const int size, const Dtype negative_beta, - const Dtype cache_ratio, Dtype* const bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { + const Dtype* const bottom_data, + const Dtype* const top_data, + const Dtype* const scale, + const Dtype* const top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + Dtype* const bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) + { // find out the local offset const int w = index % width; const int h = (index / width) % height; @@ -149,33 +191,31 @@ __global__ void LRNComputeDiff(const int nthreads, Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } @@ -183,17 +223,42 @@ __global__ void LRNComputeDiff(const int nthreads, } #endif // USE_CUDA -template +template void LRNLayer::CrossChannelBackward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { int n_threads = num_ * height_ * width_; - // NOLINT_NEXT_LINE(whitespace/operators) - LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( - n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), - scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, - size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), - bottom[0]->mutable_gpu_diff()); + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( + n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), + scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, + size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), + bottom[0]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_.id()); + + viennacl::ocl::kernel &oclk_lrn = program.get_kernel( + CL_KERNEL_SELECT("lrn_compute_diff")); + viennacl::ocl::enqueue( + oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), ctx), + WrapHandle((cl_mem) (top[0]->gpu_data()), ctx), + WrapHandle((cl_mem) (scale_.gpu_data()), ctx), + WrapHandle((cl_mem) (top[0]->gpu_diff()), ctx), num_, + channels_, height_, width_, size_, -beta_, + Dtype(2. * alpha_ * beta_ / size_), + WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + } template void LRNLayer::CrossChannelBackward_gpu( const vector*>& top, const vector& propagate_down, @@ -202,8 +267,6 @@ template void LRNLayer::CrossChannelBackward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom); - - INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 3888a0c7106..74a1e6cc188 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -7,9 +7,9 @@ namespace caffe { -template +template void MVNLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int num; @@ -20,58 +20,141 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, int dim = bottom[0]->count() / num; - if (this->layer_param_.mvn_param().normalize_variance()) { - // put the squares of bottom into temp_ - caffe_gpu_powx(bottom[0]->count(), bottom_data, Dtype(2), - temp_.mutable_gpu_data()); - - // computes variance using var(X) = E(X^2) - (EX)^2 - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), - sum_multiplier_.gpu_data(), 0., - variance_.mutable_gpu_data()); // E(X^2) - caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), - temp_.mutable_gpu_data()); // (EX)^2 - caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), - variance_.mutable_gpu_data()); // variance - - // do mean and variance normalization - // subtract mean - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., - mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., - temp_.mutable_gpu_data()); - - caffe_gpu_add(temp_.count(), bottom_data, temp_.gpu_data(), top_data); - - // normalize variance - caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), - variance_.mutable_gpu_data()); - - caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); - - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., - temp_.mutable_gpu_data()); - - caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (this->layer_param_.mvn_param().normalize_variance()) { + // put the squares of bottom into temp_ + caffe_gpu_powx(bottom[0]->count(), bottom_data, Dtype(2), + temp_.mutable_gpu_data()); + + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); // EX + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), + sum_multiplier_.gpu_data(), 0., + variance_.mutable_gpu_data()); // E(X^2) + caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), + temp_.mutable_gpu_data()); // (EX)^2 + caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), + variance_.mutable_gpu_data()); // variance + + // do mean and variance normalization + // subtract mean + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., + temp_.mutable_gpu_data()); + + caffe_gpu_add(temp_.count(), bottom_data, temp_.gpu_data(), top_data); + + // normalize variance + caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), + variance_.mutable_gpu_data()); + + caffe_gpu_add_scalar(variance_.count(), eps_, + variance_.mutable_gpu_data()); + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + variance_.gpu_data(), sum_multiplier_.gpu_data(), + 0., temp_.mutable_gpu_data()); + + caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data); + } else { + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); // EX + + // subtract mean + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., + temp_.mutable_gpu_data()); + + caffe_gpu_add(temp_.count(), bottom_data, temp_.gpu_data(), top_data); + } +#endif // USE_CUDA } else { - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX - - // subtract mean - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., - mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., - temp_.mutable_gpu_data()); - - caffe_gpu_add(temp_.count(), bottom_data, temp_.gpu_data(), top_data); +#ifdef USE_GREENTEA + if (this->layer_param_.mvn_param().normalize_variance()) { + // put the squares of bottom into temp_ + greentea_gpu_powx(this->device_context_.id(), bottom[0]->count(), + (cl_mem) bottom_data, 0, Dtype(2), + (cl_mem) (temp_.mutable_gpu_data()), 0); + + // computes variance using var(X) = E(X^2) - (EX)^2 + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + dim, 1. / dim, (cl_mem) (bottom_data), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (mean_.mutable_gpu_data()), 0); // EX + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + dim, 1. / dim, (cl_mem) (temp_.gpu_data()), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (variance_.mutable_gpu_data()), 0); // E(X^2) + greentea_gpu_powx(this->device_context_.id(), mean_.count(), + (cl_mem) mean_.gpu_data(), 0, Dtype(2), + (cl_mem) (temp_.mutable_gpu_data()), 0); // (EX)^2 + greentea_gpu_sub(this->device_context_.id(), mean_.count(), + (cl_mem) (variance_.gpu_data()), 0, + (cl_mem) (temp_.gpu_data()), 0, + (cl_mem) (variance_.mutable_gpu_data()), 0); // variance + + // do mean and variance normalization + // subtract mean + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num, dim, 1, -1., + (cl_mem) (mean_.gpu_data()), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (temp_.mutable_gpu_data()), 0); + + greentea_gpu_add(this->device_context_.id(), temp_.count(), + (cl_mem) bottom_data, 0, + (cl_mem) (temp_.gpu_data()), 0, (cl_mem) top_data, + 0); + + // normalize variance + greentea_gpu_powx(this->device_context_.id(), variance_.count(), + (cl_mem) (variance_.gpu_data()), 0, Dtype(0.5), + (cl_mem) (variance_.mutable_gpu_data()), 0); + + greentea_gpu_add_scalar(this->device_context_.id(), + variance_.count(), eps_, + (cl_mem) (variance_.mutable_gpu_data()), + 0); + + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num, dim, 1, 1., + (cl_mem) (variance_.gpu_data()), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (temp_.mutable_gpu_data()), 0); + + greentea_gpu_div(this->device_context_.id(), temp_.count(), + (cl_mem) top_data, 0, (cl_mem) (temp_.gpu_data()), + 0, (cl_mem) top_data, 0); + } else { + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + dim, 1. / dim, (cl_mem) bottom_data, 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (mean_.mutable_gpu_data()), 0); // EX + + // subtract mean + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num, dim, 1, -1., + (cl_mem) (mean_.gpu_data()), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (temp_.mutable_gpu_data()), 0); + + greentea_gpu_add(this->device_context_.id(), temp_.count(), + (cl_mem) bottom_data, 0, + (cl_mem) (temp_.gpu_data()), 0, (cl_mem) top_data, + 0); + } +#endif // USE_GREENTEA } } -template +template void MVNLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); @@ -85,40 +168,88 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; - if (this->layer_param_.mvn_param().normalize_variance()) { - caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); - caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., - bottom_diff); - caffe_gpu_mul(temp_.count(), top_data, bottom_diff, bottom_diff); - - caffe_gpu_gemv(CblasNoTrans, num, dim, 1., top_diff, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - mean_.gpu_data(), sum_multiplier_.gpu_data(), 1., - bottom_diff); - - caffe_gpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim), - bottom_diff); - - // put the squares of bottom into temp_ - caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), - temp_.mutable_gpu_data()); - - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., - temp_.mutable_gpu_data()); - - caffe_gpu_div(temp_.count(), bottom_diff, temp_.gpu_data(), bottom_diff); + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (this->layer_param_.mvn_param().normalize_variance()) { + caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); + caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, + sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., + bottom_diff); + caffe_gpu_mul(temp_.count(), top_data, bottom_diff, bottom_diff); + + caffe_gpu_gemv(CblasNoTrans, num, dim, 1., top_diff, + sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + mean_.gpu_data(), sum_multiplier_.gpu_data(), 1., + bottom_diff); + + caffe_gpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim), + bottom_diff); + + // put the squares of bottom into temp_ + caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), + temp_.mutable_gpu_data()); + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., + variance_.gpu_data(), sum_multiplier_.gpu_data(), + 0., temp_.mutable_gpu_data()); + + caffe_gpu_div(temp_.count(), bottom_diff, temp_.gpu_data(), bottom_diff); + } else { + caffe_copy(temp_.count(), top_diff, bottom_diff); + } +#endif // USE_CUDA } else { - caffe_copy(temp_.count(), top_diff, bottom_diff); +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_.id()); + + if (this->layer_param_.mvn_param().normalize_variance()) { + greentea_gpu_mul(this->device_context_.id(), temp_.count(), + (cl_mem)top_data,0, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0); + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + dim, 1., (cl_mem)bottom_diff,0, (cl_mem)(sum_multiplier_.gpu_data()),0, + 0., (cl_mem)(mean_.mutable_gpu_data()),0); + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num, dim, 1, 1., (cl_mem)(mean_.gpu_data()),0, + (cl_mem)(sum_multiplier_.gpu_data()),0, 0., (cl_mem)bottom_diff,0); + greentea_gpu_mul(this->device_context_.id(), temp_.count(), + (cl_mem)top_data,0, (cl_mem)bottom_diff,0, (cl_mem)bottom_diff,0); + + greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + dim, 1., (cl_mem)top_diff,0, (cl_mem)(sum_multiplier_.gpu_data()),0, + 0., (cl_mem)(mean_.mutable_gpu_data()),0); + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num, dim, 1, 1., (cl_mem)(mean_.gpu_data()),0, + (cl_mem)(sum_multiplier_.gpu_data()),0, 1., (cl_mem)bottom_diff,0); + + greentea_gpu_axpby(this->device_context_.id(), temp_.count(), + Dtype(1), (cl_mem)top_diff,0, Dtype(-1. / dim), + (cl_mem)bottom_diff,0); + + // put the squares of bottom into temp_ + greentea_gpu_powx(this->device_context_.id(), temp_.count(), + (cl_mem)bottom_data,0, Dtype(2), (cl_mem)(temp_.mutable_gpu_data()),0); + + greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + CblasNoTrans, num, dim, 1, 1., + (cl_mem)(variance_.gpu_data()),0, (cl_mem)(sum_multiplier_.gpu_data()),0, + 0., (cl_mem)(temp_.mutable_gpu_data()),0); + + greentea_gpu_div(this->device_context_.id(), temp_.count(), + (cl_mem)bottom_diff,0, (cl_mem)(temp_.gpu_data()),0, (cl_mem)bottom_diff,0); + } else { + greentea_copy(temp_.count(), (cl_mem)top_diff, + (cl_mem)bottom_diff, ctx); + } +#endif // USE_GREENTEA } } - INSTANTIATE_LAYER_GPU_FUNCS(MVNLayer); - } // namespace caffe diff --git a/src/caffe/layers/reduction_layer.cu b/src/caffe/layers/reduction_layer.cu index 2dbd3bc9f94..301635d511d 100644 --- a/src/caffe/layers/reduction_layer.cu +++ b/src/caffe/layers/reduction_layer.cu @@ -7,84 +7,177 @@ namespace caffe { -template -void ReductionLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { +template +void ReductionLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* mult_data = NULL; - if (sum_multiplier_.count() > 0) { - mult_data = sum_multiplier_.gpu_data(); - } - Dtype* top_data = top[0]->mutable_cpu_data(); - for (int i = 0; i < num_; ++i) { - switch (op_) { - case ReductionParameter_ReductionOp_SUM: - case ReductionParameter_ReductionOp_MEAN: - caffe_gpu_dot(dim_, mult_data, bottom_data, top_data); - break; - case ReductionParameter_ReductionOp_ASUM: - caffe_gpu_asum(dim_, bottom_data, top_data); - break; - case ReductionParameter_ReductionOp_SUMSQ: - caffe_gpu_dot(dim_, bottom_data, bottom_data, top_data); - break; - default: - LOG(FATAL) << "Unknown reduction op: " + + int bottom_data_off = 0; + int top_data_off = 0; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (sum_multiplier_.count() > 0) { + mult_data = sum_multiplier_.gpu_data(); + } + Dtype* top_data = top[0]->mutable_cpu_data(); + for (int i = 0; i < num_; ++i) { + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + caffe_gpu_dot(dim_, mult_data, bottom_data + bottom_data_off, + top_data + top_data_off); + break; + case ReductionParameter_ReductionOp_ASUM: + caffe_gpu_asum(dim_, bottom_data + bottom_data_off, + top_data + top_data_off); + break; + case ReductionParameter_ReductionOp_SUMSQ: + caffe_gpu_dot(dim_, bottom_data + bottom_data_off, + bottom_data + bottom_data_off, top_data + top_data_off); + break; + default: + LOG(FATAL)<< "Unknown reduction op: " << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data_off += dim_; + ++top_data_off; + } + if (coeff_ != Dtype(1)) { + // Reset the top_data pointer. + top_data = top[0]->mutable_gpu_data(); + caffe_gpu_scal(num_, coeff_, top_data); } - bottom_data += dim_; - ++top_data; - } - if (coeff_ != Dtype(1)) { - // Reset the top_data pointer. - top_data = top[0]->mutable_gpu_data(); - caffe_gpu_scal(num_, coeff_, top_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + if (sum_multiplier_.count() > 0) { + mult_data = sum_multiplier_.gpu_data(); + } + Dtype* top_data = top[0]->mutable_cpu_data(); + for (int i = 0; i < num_; ++i) { + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + greentea_gpu_dot(this->device_context_.id(), dim_, + (cl_mem) mult_data, 0, (cl_mem) bottom_data, + bottom_data_off, top_data + top_data_off); + break; + case ReductionParameter_ReductionOp_ASUM: + greentea_gpu_asum(this->device_context_.id(), dim_, + (cl_mem) bottom_data, bottom_data_off, + top_data + top_data_off); + break; + case ReductionParameter_ReductionOp_SUMSQ: + greentea_gpu_dot(this->device_context_.id(), dim_, + (cl_mem) bottom_data, bottom_data_off, + (cl_mem) bottom_data, bottom_data_off, + top_data + top_data_off); + break; + default: + LOG(FATAL)<< "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data_off += dim_; + ++top_data_off; + } + if (coeff_ != Dtype(1)) { + // Reset the top_data pointer. + top_data = top[0]->mutable_gpu_data(); + greentea_gpu_scal(this->device_context_.id(), num_, coeff_, + (cl_mem) top_data, 0); + } +#endif // USE_GREENTEA } } -template +template void ReductionLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } + const vector& propagate_down, + const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } // Get bottom_data, if needed. const Dtype* bottom_data = NULL; switch (op_) { - // Operations that don't need bottom_data - case ReductionParameter_ReductionOp_SUM: - case ReductionParameter_ReductionOp_MEAN: - break; - // Operations that need bottom_data - case ReductionParameter_ReductionOp_ASUM: - case ReductionParameter_ReductionOp_SUMSQ: - bottom_data = bottom[0]->gpu_data(); - break; - default: - LOG(FATAL) << "Unknown reduction op: " - << ReductionParameter_ReductionOp_Name(op_); - } - const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - for (int i = 0; i < num_; ++i) { - const Dtype bottom_coeff = (*top_diff) * coeff_; - switch (op_) { + // Operations that don't need bottom_data case ReductionParameter_ReductionOp_SUM: case ReductionParameter_ReductionOp_MEAN: - caffe_gpu_set(dim_, bottom_coeff, bottom_diff); break; + // Operations that need bottom_data case ReductionParameter_ReductionOp_ASUM: - caffe_gpu_sign(dim_, bottom_data, bottom_diff); - caffe_gpu_scal(dim_, bottom_coeff, bottom_diff); - break; case ReductionParameter_ReductionOp_SUMSQ: - caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data, bottom_diff); + bottom_data = bottom[0]->gpu_data(); break; default: - LOG(FATAL) << "Unknown reduction op: " + LOG(FATAL)<< "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + + int bottom_data_off = 0; + int bottom_diff_off = 0; + int top_diff_off = 0; + + if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int i = 0; i < num_; ++i) { + const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_; + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + caffe_gpu_set(dim_, bottom_coeff, bottom_diff + bottom_diff_off); + break; + case ReductionParameter_ReductionOp_ASUM: + caffe_gpu_sign(dim_, bottom_data + bottom_data_off, bottom_diff + bottom_diff_off); + caffe_gpu_scal(dim_, bottom_coeff, bottom_diff + bottom_diff_off); + break; + case ReductionParameter_ReductionOp_SUMSQ: + caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data + bottom_data_off, bottom_diff + bottom_diff_off); + break; + default: + LOG(FATAL)<< "Unknown reduction op: " + << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data_off += dim_; + bottom_diff_off += dim_; + ++top_diff_off; + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + for (int i = 0; i < num_; ++i) { + const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_; + switch (op_) { + case ReductionParameter_ReductionOp_SUM: + case ReductionParameter_ReductionOp_MEAN: + greentea_gpu_set(this->device_context_.id(), dim_, + bottom_coeff, (cl_mem) bottom_diff, bottom_diff_off); + break; + case ReductionParameter_ReductionOp_ASUM: + greentea_gpu_sign(this->device_context_.id(), dim_, + (cl_mem) bottom_data, bottom_data_off, + (cl_mem) bottom_diff, bottom_diff_off); + greentea_gpu_scal(this->device_context_.id(), dim_, + bottom_coeff, (cl_mem) bottom_diff, bottom_diff_off); + break; + case ReductionParameter_ReductionOp_SUMSQ: + greentea_gpu_scale(this->device_context_.id(), dim_, + 2 * bottom_coeff, (cl_mem) bottom_data, bottom_data_off, + (cl_mem) bottom_diff, bottom_diff_off); + break; + default: + LOG(FATAL)<< "Unknown reduction op: " << ReductionParameter_ReductionOp_Name(op_); + } + bottom_data_off += dim_; + bottom_diff_off += dim_; + ++top_diff_off; } - bottom_data += dim_; - bottom_diff += dim_; - ++top_diff; +#endif // USE_GREENTEA } } From 835d218ee01027b4a2b7afc0d490c452b6cda5d7 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 15 Jun 2015 04:11:09 +0200 Subject: [PATCH 049/254] Partial bug fixes, interface changes, OpenCL testing WIP. --- include/caffe/blob.hpp | 3 +- .../greentea/greentea_math_functions.hpp | 8 +- include/caffe/syncedmem.hpp | 6 + .../caffe/test/test_gradient_check_util.hpp | 114 +++++++------ include/caffe/util/math_functions.hpp | 3 + src/caffe/blob.cpp | 8 +- src/caffe/common.cpp | 5 +- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/dropout.cl | 4 +- .../greentea/greentea_math_functions.cpp | 154 +++++++----------- src/caffe/layers/base_data_layer.cu | 8 +- src/caffe/layers/dropout_layer.cu | 4 +- src/caffe/layers/eltwise_layer.cu | 8 +- src/caffe/layers/hdf5_data_layer.cpp | 2 +- src/caffe/layers/hdf5_output_layer.cpp | 4 +- src/caffe/layers/hdf5_output_layer.cu | 3 +- src/caffe/layers/hinge_loss_layer.cpp | 2 +- src/caffe/layers/log_layer.cu | 4 +- src/caffe/layers/mvn_layer.cu | 4 +- src/caffe/layers/power_layer.cu | 6 +- src/caffe/layers/prelu_layer.cu | 2 +- .../sigmoid_cross_entropy_loss_layer.cu | 2 +- src/caffe/layers/softmax_layer.cu | 75 ++++----- src/caffe/layers/split_layer.cu | 4 +- src/caffe/solver.cpp | 12 +- src/caffe/test/test_im2col_kernel.cu | 56 ++++--- src/caffe/test/test_math_functions.cpp | 92 ++++++++++- src/caffe/test/test_neuron_layer.cpp | 10 +- .../test/test_random_number_generator.cpp | 46 +++++- src/caffe/test/test_syncedmem.cpp | 60 ++++++- src/caffe/test/test_util_blas.cpp | 102 ++++++++++-- src/caffe/util/benchmark.cpp | 12 +- src/caffe/util/math_functions.cpp | 14 ++ 33 files changed, 544 insertions(+), 297 deletions(-) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 50d4e73f097..1033a36d8ec 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -35,7 +35,8 @@ class Blob { : data_(), diff_(), count_(0), - capacity_(0) { + capacity_(0), + device_context_(Caffe::GetDefaultDeviceContext()) { } explicit Blob(const int num, const int channels, const int height, const int width, DeviceContext device_context); diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 83ecde838a8..e052fc9c1cb 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -31,11 +31,15 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, cl_mem Y, const int offY, viennacl::ocl::context &ctx); template -void greentea_copy(const int N, const cl_mem X, cl_mem Y, +void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, viennacl::ocl::context &ctx); template -void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, +void greentea_copy(const int N, const cl_mem X, const int offX, Dtype* Y, + viennacl::ocl::context &ctx); + +template +void greentea_copy(const int N, const Dtype* X, cl_mem Y, const int offY, viennacl::ocl::context &ctx); template diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 160391d9cfb..59f1097c649 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -43,6 +43,9 @@ inline void CaffeFreeHost(void* ptr) { class SyncedMemory { public: #ifdef USE_GREENTEA + SyncedMemory() + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false), device_context_(Caffe::GetDefaultDeviceContext()), cl_gpu_mem_(NULL) {} SyncedMemory(DeviceContext device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), own_cpu_data_(false), device_context_(device_context), cl_gpu_mem_(NULL) {} @@ -50,6 +53,9 @@ class SyncedMemory { : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), own_cpu_data_(false), device_context_(device_context), cl_gpu_mem_(NULL) {} #else + SyncedMemory() + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false), device_context_(Caffe::GetDefaultDeviceContext()) {} SyncedMemory(DeviceContext device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), own_cpu_data_(false), device_context_(device_context) {} diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 63d9bd3e370..2e56c69a0d3 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -15,50 +15,59 @@ namespace caffe { // The gradient checker adds a L2 normalization loss function on top of the // top blobs, and checks the gradient. -template +template class GradientChecker { public: // kink and kink_range specify an ignored nonsmooth region of the form // kink - kink_range <= |feature value| <= kink + kink_range, // which accounts for all nonsmoothness in use by caffe GradientChecker(const Dtype stepsize, const Dtype threshold, - const unsigned int seed = 1701, const Dtype kink = 0., - const Dtype kink_range = -1) - : stepsize_(stepsize), threshold_(threshold), seed_(seed), - kink_(kink), kink_range_(kink_range) {} + const unsigned int seed = 1701, const Dtype kink = 0., + const Dtype kink_range = -1) + : stepsize_(stepsize), + threshold_(threshold), + seed_(seed), + kink_(kink), + kink_range_(kink_range) { + } // Checks the gradient of a layer, with provided bottom layers and top // layers. // Note that after the gradient check, we do not guarantee that the data // stored in the layer parameters and the blobs are unchanged. void CheckGradient(Layer* layer, const vector*>& bottom, - const vector*>& top, int check_bottom = -1) { - layer->SetUp(bottom, top); - CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1); + const vector*>& top, int check_bottom = -1) { + layer->SetUp(bottom, top); + CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1); } void CheckGradientExhaustive(Layer* layer, - const vector*>& bottom, const vector*>& top, - int check_bottom = -1); + const vector*>& bottom, + const vector*>& top, + int check_bottom = -1); // CheckGradientEltwise can be used to test layers that perform element-wise // computation only (e.g., neuron layers) -- where (d y_i) / (d x_j) = 0 when // i != j. void CheckGradientEltwise(Layer* layer, - const vector*>& bottom, const vector*>& top); + const vector*>& bottom, + const vector*>& top); void CheckGradientSingle(Layer* layer, - const vector*>& bottom, const vector*>& top, - int check_bottom, int top_id, int top_data_id, bool element_wise = false); + const vector*>& bottom, + const vector*>& top, int check_bottom, + int top_id, int top_data_id, bool element_wise = + false); // Checks the gradient of a network. This network should not have any data // layers or loss layers, since the function does not explicitly deal with // such cases yet. All input blobs and parameter blobs are going to be // checked, layer-by-layer to avoid numerical problems to accumulate. void CheckGradientNet(const Net& net, - const vector*>& input); + const vector*>& input); protected: Dtype GetObjAndGradient(const Layer& layer, - const vector*>& top, int top_id = -1, int top_data_id = -1); + const vector*>& top, int top_id = -1, + int top_data_id = -1); Dtype stepsize_; Dtype threshold_; unsigned int seed_; @@ -66,11 +75,11 @@ class GradientChecker { Dtype kink_range_; }; - -template -void GradientChecker::CheckGradientSingle(Layer* layer, - const vector*>& bottom, const vector*>& top, - int check_bottom, int top_id, int top_data_id, bool element_wise) { +template +void GradientChecker::CheckGradientSingle( + Layer* layer, const vector*>& bottom, + const vector*>& top, int check_bottom, int top_id, + int top_data_id, bool element_wise) { if (element_wise) { CHECK_EQ(0, layer->blobs().size()); CHECK_LE(0, top_id); @@ -107,17 +116,19 @@ void GradientChecker::CheckGradientSingle(Layer* layer, GetObjAndGradient(*layer, top, top_id, top_data_id); layer->Backward(top, propagate_down, bottom); // Store computed gradients for all checked blobs - vector > > - computed_gradient_blobs(blobs_to_check.size()); + vector > > computed_gradient_blobs( + blobs_to_check.size()); for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { Blob* current_blob = blobs_to_check[blob_id]; computed_gradient_blobs[blob_id].reset(new Blob()); - computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob, Caffe::GetDefaultDeviceContext()); + computed_gradient_blobs[blob_id]->ReshapeLike( + *current_blob, Caffe::GetDefaultDeviceContext()); const int count = blobs_to_check[blob_id]->count(); const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); - Dtype* computed_gradients = - computed_gradient_blobs[blob_id]->mutable_cpu_data(); - caffe_copy(count, diff, computed_gradients); + Dtype* computed_gradients = computed_gradient_blobs[blob_id] + ->mutable_cpu_data(); + + caffe_cpu_copy(count, diff, computed_gradients); } // Compute derivative of top w.r.t. each bottom and parameter input using // finite differencing. @@ -143,18 +154,18 @@ void GradientChecker::CheckGradientSingle(Layer* layer, current_blob->mutable_cpu_data()[feat_id] += stepsize_; Caffe::set_random_seed(seed_); layer->Forward(bottom, top); - positive_objective = - GetObjAndGradient(*layer, top, top_id, top_data_id); + positive_objective = GetObjAndGradient(*layer, top, top_id, + top_data_id); // Compute loss with stepsize_ subtracted from input. current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2; Caffe::set_random_seed(seed_); layer->Forward(bottom, top); - negative_objective = - GetObjAndGradient(*layer, top, top_id, top_data_id); + negative_objective = GetObjAndGradient(*layer, top, top_id, + top_data_id); // Recover original input value. current_blob->mutable_cpu_data()[feat_id] += stepsize_; - estimated_gradient = (positive_objective - negative_objective) / - stepsize_ / 2.; + estimated_gradient = (positive_objective - negative_objective) + / stepsize_ / 2.; } Dtype computed_gradient = computed_gradients[feat_id]; Dtype feature = current_blob->cpu_data()[feat_id]; @@ -167,11 +178,10 @@ void GradientChecker::CheckGradientSingle(Layer* layer, Dtype scale = std::max( std::max(fabs(computed_gradient), fabs(estimated_gradient)), 1.); EXPECT_NEAR(computed_gradient, estimated_gradient, threshold_ * scale) - << "debug: (top_id, top_data_id, blob_id, feat_id)=" - << top_id << "," << top_data_id << "," << blob_id << "," << feat_id - << "; feat = " << feature - << "; objective+ = " << positive_objective - << "; objective- = " << negative_objective; + << "debug: (top_id, top_data_id, blob_id, feat_id)=" << top_id + << "," << top_data_id << "," << blob_id << "," << feat_id + << "; feat = " << feature << "; objective+ = " << positive_objective + << "; objective- = " << negative_objective; } // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id]; // LOG(ERROR) << "computed gradient: " << computed_gradient @@ -180,12 +190,12 @@ void GradientChecker::CheckGradientSingle(Layer* layer, } } -template -void GradientChecker::CheckGradientExhaustive(Layer* layer, - const vector*>& bottom, const vector*>& top, - int check_bottom) { +template +void GradientChecker::CheckGradientExhaustive( + Layer* layer, const vector*>& bottom, + const vector*>& top, int check_bottom) { layer->SetUp(bottom, top); - CHECK_GT(top.size(), 0) << "Exhaustive mode requires at least one top blob."; + CHECK_GT(top.size(), 0)<< "Exhaustive mode requires at least one top blob."; // LOG(ERROR) << "Exhaustive Mode."; for (int i = 0; i < top.size(); ++i) { // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count(); @@ -196,11 +206,12 @@ void GradientChecker::CheckGradientExhaustive(Layer* layer, } } -template -void GradientChecker::CheckGradientEltwise(Layer* layer, - const vector*>& bottom, const vector*>& top) { +template +void GradientChecker::CheckGradientEltwise( + Layer* layer, const vector*>& bottom, + const vector*>& top) { layer->SetUp(bottom, top); - CHECK_GT(top.size(), 0) << "Eltwise mode requires at least one top blob."; + CHECK_GT(top.size(), 0)<< "Eltwise mode requires at least one top blob."; const int check_bottom = -1; const bool element_wise = true; for (int i = 0; i < top.size(); ++i) { @@ -210,7 +221,7 @@ void GradientChecker::CheckGradientEltwise(Layer* layer, } } -template +template void GradientChecker::CheckGradientNet( const Net& net, const vector*>& input) { const vector > >& layers = net.layers(); @@ -218,14 +229,15 @@ void GradientChecker::CheckGradientNet( vector*> >& top_vecs = net.top_vecs(); for (int i = 0; i < layers.size(); ++i) { net.Forward(input); - LOG(ERROR) << "Checking gradient for " << layers[i]->layer_param().name(); + LOG(ERROR)<< "Checking gradient for " << layers[i]->layer_param().name(); CheckGradientExhaustive(*(layers[i].get()), bottom_vecs[i], top_vecs[i]); } } -template +template Dtype GradientChecker::GetObjAndGradient(const Layer& layer, - const vector*>& top, int top_id, int top_data_id) { + const vector*>& top, + int top_id, int top_data_id) { Dtype loss = 0; if (top_id < 0) { // the loss will be half of the sum of squares of all outputs @@ -238,7 +250,7 @@ Dtype GradientChecker::GetObjAndGradient(const Layer& layer, loss += top_blob_data[j] * top_blob_data[j]; } // set the diff: simply the data. - caffe_copy(top_blob->count(), top_blob_data, top_blob_diff); + caffe_cpu_copy(top_blob->count(), top_blob_data, top_blob_diff); } loss /= 2.; } else { diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 2c79f89f2b3..9d1e99fc306 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -33,6 +33,9 @@ template void caffe_cpu_axpby(const int N, const Dtype alpha, const Dtype* X, const Dtype beta, Dtype* Y); +template +void caffe_cpu_copy(const int N, const Dtype* X, Dtype* Y); + template void caffe_copy(const int N, const Dtype *X, Dtype *Y); diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 2dfce802648..da3723ff0c5 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -534,13 +534,13 @@ void Blob::CopyFrom(const Blob& source, DeviceContext device_context, #ifdef USE_GREENTEA if (copy_diff) { greentea_copy( - count_, (cl_mem) (source.gpu_diff()), - (cl_mem) (diff_->mutable_gpu_data()), + count_, (cl_mem) (source.gpu_diff()),0, + (cl_mem) (diff_->mutable_gpu_data()),0, viennacl::ocl::get_context(device_context_.id())); } else { greentea_copy( - count_, (cl_mem) (source.gpu_data()), - (cl_mem) (data_->mutable_gpu_data()), + count_, (cl_mem) (source.gpu_data()),0, + (cl_mem) (data_->mutable_gpu_data()),0, viennacl::ocl::get_context(device_context_.id())); } #endif diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 9c6ee45a0c6..c9fd79dbde8 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -140,14 +140,15 @@ void Caffe::set_random_seed(const unsigned int seed) { g_curand_availability_logged = true; } } - // RNG seed - Get().random_generator_.reset(new RNG(seed)); + #endif // USE_CUDA } else { #ifdef USE_GREENTEA // TODO: Proper RNG and Seed for OpenCL #endif // USE_GREENTEA } + // RNG seed + Get().random_generator_.reset(new RNG(seed)); } void Caffe::EnumerateDevices() { diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 6f84d0b89f3..f37a212902b 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -13,7 +13,7 @@ std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; -std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const float scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const float scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; +std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; @@ -32,7 +32,7 @@ std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; -std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const float scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const float scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; +std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; diff --git a/src/caffe/greentea/cl_kernels/dropout.cl b/src/caffe/greentea/cl_kernels/dropout.cl index 9295892cadc..acb79214b41 100644 --- a/src/caffe/greentea/cl_kernels/dropout.cl +++ b/src/caffe/greentea/cl_kernels/dropout.cl @@ -6,7 +6,7 @@ __kernel void TEMPLATE(dropout_forward,Dtype)(const int n, __global const Dtype* in, __global const unsigned int* mask, const unsigned int threshold, - const float scale, + const Dtype scale, __global Dtype* out) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = in[index] * (mask[index] > threshold) * scale; @@ -16,7 +16,7 @@ __kernel void TEMPLATE(dropout_forward,Dtype)(const int n, __kernel void TEMPLATE(dropout_backward,Dtype)( const int n, __global const Dtype* in_diff, __global const unsigned int* mask, const unsigned int threshold, - const float scale, + const Dtype scale, __global Dtype* out_diff) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 9f977ae6bc3..8774b7cf776 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -92,48 +92,63 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, ctx.get_queue().finish(); } -// Copy from OpenCL buffer to OpenCL buffer template -void greentea_copy(const int N, const cl_mem X, cl_mem Y, +void greentea_copy(const int N, const cl_mem X, const int offX, Dtype* Y, viennacl::ocl::context &ctx) { - if (X != Y) { - clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, 0, 0, - sizeof(Dtype) * N, 0, NULL, NULL); - } + greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX, Y, ctx); ctx.get_queue().finish(); } -// Explicit instantiations -template void greentea_copy(const int N, const cl_mem X, cl_mem Y, - viennacl::ocl::context &ctx); -template void greentea_copy(const int N, const cl_mem X, cl_mem Y, - viennacl::ocl::context &ctx); -template void greentea_copy(const int N, const cl_mem X, cl_mem Y, - viennacl::ocl::context &ctx); -template void greentea_copy(const int N, const cl_mem X, cl_mem Y, - viennacl::ocl::context &ctx); +template +void greentea_copy(const int N, const Dtype* X, cl_mem Y, const int offY, + viennacl::ocl::context &ctx) { + greentea_gpu_memcpy(sizeof(Dtype) * N, X, Y, offY, ctx); + ctx.get_queue().finish(); +} // Copy from OpenCL buffer to OpenCL buffer template -void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx) { +void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, + const int offY, viennacl::ocl::context &ctx) { if (X != Y) { - clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, offX, offY, - sizeof(Dtype) * N, 0, NULL, NULL); + greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX, Y, offY, ctx); } ctx.get_queue().finish(); } // Explicit instantiations -template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, +template void greentea_copy(const int N, const cl_mem X, const int offX, + int* Y, viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, + const int offX, unsigned int* Y, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, + float* Y, viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, + double* Y, viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const int* X, cl_mem Y, + const int offY, viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const unsigned int* X, + cl_mem Y, const int offY, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const float* X, cl_mem Y, + const int offY, viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const double* X, cl_mem Y, + const int offY, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, + cl_mem Y, const int offY, viennacl::ocl::context &ctx); - +template void greentea_copy(const int N, const cl_mem X, + const int offX, cl_mem Y, + const int offY, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, + cl_mem Y, const int offY, + viennacl::ocl::context &ctx); +template void greentea_copy(const int N, const cl_mem X, const int offX, + cl_mem Y, const int offY, + viennacl::ocl::context &ctx); template void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, @@ -281,10 +296,10 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasSgemv(clOrder,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDgemv(clOrder,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasDgemv(clOrder,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); } #endif } @@ -623,12 +638,12 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, GREENTEA_VCL_BLAS_CHECK( ViennaCLOpenCLScopy(backend, n, X, offX, 1, Y, offY, 1)); GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSscal(backend, n, alpha, X, offX, 1)); + ViennaCLOpenCLSscal(backend, n, alpha, Y, offY, 1)); } else { GREENTEA_VCL_BLAS_CHECK( ViennaCLOpenCLDcopy(backend, n, X, offX, 1, Y, offY, 1)); GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDscal(backend, n, alpha, X, offX, 1)); + ViennaCLOpenCLDscal(backend, n, alpha, Y, offY, 1)); } #endif @@ -639,11 +654,11 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( clblasScopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - GREENTEA_CL_BLAS_CHECK(clblasSscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasSscal(n,alpha,Y,offY,1,1,&queue,0,NULL,NULL)); } else { GREENTEA_CL_BLAS_CHECK( clblasDcopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - GREENTEA_CL_BLAS_CHECK(clblasDscal(n,alpha,X,offX,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasDscal(n,alpha,Y,offY,1,1,&queue,0,NULL,NULL)); } #endif } @@ -820,27 +835,23 @@ template void greentea_gpu_powx(const int ctx_id, const int N, template void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, - const int offy) { + const int offa, cl_mem y, const int offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); - viennacl::ocl::kernel &oclk_log = program.get_kernel( - CL_KERNEL_SELECT("log")); + viennacl::ocl::kernel &oclk_log = program.get_kernel(CL_KERNEL_SELECT("log")); viennacl::ocl::enqueue( oclk_log(N, WrapHandle(a, ctx), offa, WrapHandle(y, ctx), offy), ctx.get_queue()); } template void greentea_gpu_log(const int ctx_id, const int N, - const cl_mem a, const int offa, - cl_mem y, - const int offy); + const cl_mem a, const int offa, cl_mem y, + const int offy); template void greentea_gpu_log(const int ctx_id, const int N, - const cl_mem a, const int offa, - cl_mem y, - const int offy); + const cl_mem a, const int offa, cl_mem y, + const int offy); template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, @@ -886,52 +897,18 @@ template void greentea_gpu_sgnbit(const int ctx_id, const int n, void greentea_gpu_rng_uniform(const int ctx_id, const int n, cl_mem r, int offr) { - - struct timeval start_time; - gettimeofday(&start_time, NULL); -#ifdef __APPLE__ - std::seed_seq seq {(int)(start_time.tv_sec), (int)(start_time.tv_usec)}; -#else - std::seed_seq seq { start_time.tv_sec, start_time.tv_usec }; -#endif - std::mt19937_64 generator(seq); - std::uniform_int_distribution distribution(0, UINT32_MAX); - std::function rndfunc = std::bind(distribution, generator); - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - std::vector random(n); - - for (int i = 0; i < n; ++i) { - random[i] = rndfunc(); - } - + caffe_gpu_rng_uniform(n, &random[0]); greentea_gpu_memcpy(sizeof(unsigned int) * n, &random[0], r, offr, ctx); } template void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, const Dtype b, cl_mem r, const int offr) { - - struct timeval start_time; - gettimeofday(&start_time, NULL); -#ifdef __APPLE__ - std::seed_seq seq {(int)(start_time.tv_sec), (int)(start_time.tv_usec)}; -#else - std::seed_seq seq { start_time.tv_sec, start_time.tv_usec }; -#endif - std::mt19937_64 generator(seq); - std::uniform_real_distribution distribution(a, b); - std::function rndfunc = std::bind(distribution, generator); - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - std::vector random(n); - - for (int i = 0; i < n; ++i) { - random[i] = rndfunc(); - } - + caffe_rng_uniform(n, a, b, &random[0]); greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, ctx); } @@ -945,26 +922,9 @@ template void greentea_gpu_rng_uniform(const int ctx_id, const int n, template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, const Dtype sigma, cl_mem r, const int offr) { - - struct timeval start_time; - gettimeofday(&start_time, NULL); -#ifdef __APPLE__ - std::seed_seq seq {(int)(start_time.tv_sec), (int)(start_time.tv_usec)}; -#else - std::seed_seq seq { start_time.tv_sec, start_time.tv_usec }; -#endif - std::mt19937_64 generator(seq); - std::normal_distribution distribution(mu, sigma); - std::function rndfunc = std::bind(distribution, generator); - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - std::vector random(n); - - for (int i = 0; i < n; ++i) { - random[i] = rndfunc(); - } - + caffe_rng_gaussian(n, mu, sigma, &random[0]); greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, ctx); } diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 83ef49c49e8..9ff417f2fc8 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -33,14 +33,14 @@ void BasePrefetchingDataLayer::Forward_gpu( // Reshape to loaded data. top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); // Copy the data - greentea_copy(prefetch_data_.count(), (cl_mem)(prefetch_data_.cpu_data()), - (cl_mem)(top[0]->mutable_gpu_data()), ctx); + greentea_copy(prefetch_data_.count(), (cl_mem)(prefetch_data_.gpu_data()),0, + (cl_mem)(top[0]->mutable_gpu_data()),0, ctx); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(prefetch_label_, this->device_context_); // Copy the labels. - greentea_copy(prefetch_label_.count(), (cl_mem)(prefetch_label_.cpu_data()), - (cl_mem)(top[1]->mutable_gpu_data()), ctx); + greentea_copy(prefetch_label_.count(), (cl_mem)(prefetch_label_.gpu_data()),0, + (cl_mem)(top[1]->mutable_gpu_data()),0, ctx); } #endif // USE_GREENTEA } diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index 34cfcc33fe8..a66870fddf6 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -64,7 +64,7 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, WrapHandle((cl_mem) top_data, ctx)), ctx.get_queue()); } else { - greentea_copy(count, (cl_mem) bottom_data, (cl_mem) top_data, ctx); + greentea_copy(count, (cl_mem) bottom_data,0, (cl_mem) top_data,0, ctx); } #endif // USE_GREENTEA } @@ -126,7 +126,7 @@ void DropoutLayer::Backward_gpu(const vector*>& top, WrapHandle((cl_mem) bottom_diff, ctx)), ctx.get_queue()); } else { - greentea_copy(top[0]->count(), (cl_mem) top_diff, (cl_mem) bottom_diff, ctx); + greentea_copy(top[0]->count(), (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, ctx); } #endif // USE_GREENTEA } diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 62693823e17..8e64e2a39f0 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -219,7 +219,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, continue; } if (!initialized) { - greentea_copy(count, (cl_mem)(bottom[j]->gpu_data()), (cl_mem)(bottom_diff), ctx); + greentea_copy(count, (cl_mem)(bottom[j]->gpu_data()),0, (cl_mem)(bottom_diff), 0, ctx); initialized = true; } else { greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)bottom[j]->gpu_data(),0, (cl_mem)bottom_diff,0, @@ -234,7 +234,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, break; case EltwiseParameter_EltwiseOp_SUM: { if (coeffs_[i] == Dtype(1.)) { - greentea_copy(count, (cl_mem)top_diff, (cl_mem)bottom_diff,ctx); + greentea_copy(count, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0,ctx); } else { greentea_gpu_scale(count, coeffs_[i],0, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0); } @@ -243,11 +243,11 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, case EltwiseParameter_EltwiseOp_MAX: { mask = max_idx_.gpu_data(); - viennacl::ocl::kernel &oclk_max_forward = program.get_kernel( + viennacl::ocl::kernel &oclk_max_backward = program.get_kernel( CL_KERNEL_SELECT("eltwise_max_backward")); viennacl::ocl::enqueue( - oclk_max_forward(count, WrapHandle((cl_mem)top_diff,ctx),i, WrapHandle((cl_mem)mask,ctx), 0, WrapHandle((cl_mem)bottom_diff,ctx)), + oclk_max_backward(count, WrapHandle((cl_mem)top_diff,ctx),i, WrapHandle((cl_mem)mask,ctx), WrapHandle((cl_mem)bottom_diff,ctx)), ctx.get_queue()); } break; diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index e75a051bd43..331af7de779 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -150,7 +150,7 @@ void HDF5DataLayer::Forward_cpu(const vector*>& bottom, } for (int j = 0; j < this->layer_param_.top_size(); ++j) { int data_dim = top[j]->count() / top[j]->shape(0); - caffe_copy(data_dim, + caffe_cpu_copy(data_dim, &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] * data_dim], &top[j]->mutable_cpu_data()[i * data_dim]); } diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index 9f797cce357..644e74ec8ef 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -53,9 +53,9 @@ void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); for (int i = 0; i < bottom[0]->num(); ++i) { - caffe_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim], + caffe_cpu_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim], &data_blob_.mutable_cpu_data()[i * data_datum_dim]); - caffe_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim], + caffe_cpu_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim], &label_blob_.mutable_cpu_data()[i * label_datum_dim]); } SaveBlobs(); diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index c68bd677ecc..410849e0985 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -33,14 +33,13 @@ void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], &label_blob_.mutable_cpu_data()[i * label_datum_dim]); } + SaveBlobs(); #endif // USE_CUDA } else { #ifdef USE_GREENTEA Forward_cpu(bottom, top); #endif // USE_GREENTEA } - - SaveBlobs(); } template diff --git a/src/caffe/layers/hinge_loss_layer.cpp b/src/caffe/layers/hinge_loss_layer.cpp index a2fb2a18309..ab94b84ccc4 100644 --- a/src/caffe/layers/hinge_loss_layer.cpp +++ b/src/caffe/layers/hinge_loss_layer.cpp @@ -20,7 +20,7 @@ void HingeLossLayer::Forward_cpu(const vector*>& bottom, int count = bottom[0]->count(); int dim = count / num; - caffe_copy(count, bottom_data, bottom_diff); + caffe_cpu_copy(count, bottom_data, bottom_diff); for (int i = 0; i < num; ++i) { bottom_diff[i * dim + static_cast(label[i])] *= -1; } diff --git a/src/caffe/layers/log_layer.cu b/src/caffe/layers/log_layer.cu index 924ef7ee6b5..3c998781af1 100644 --- a/src/caffe/layers/log_layer.cu +++ b/src/caffe/layers/log_layer.cu @@ -41,7 +41,7 @@ void LogLayer::Forward_gpu(const vector*>& bottom, greentea_gpu_log(this->device_context_.id(), count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); } else { - greentea_copy(count, (cl_mem) bottom_data, (cl_mem) top_data, ctx); + greentea_copy(count, (cl_mem) bottom_data,0, (cl_mem) top_data,0, ctx); if (input_scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, input_scale_, (cl_mem) top_data, 0); @@ -94,7 +94,7 @@ void LogLayer::Backward_gpu(const vector*>& top, viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); - greentea_copy(count, (cl_mem) bottom_data, (cl_mem) bottom_diff, + greentea_copy(count, (cl_mem) bottom_data,0, (cl_mem) bottom_diff,0, ctx); if (input_scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, input_scale_, diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 74a1e6cc188..f32b149eff9 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -243,8 +243,8 @@ void MVNLayer::Backward_gpu(const vector*>& top, greentea_gpu_div(this->device_context_.id(), temp_.count(), (cl_mem)bottom_diff,0, (cl_mem)(temp_.gpu_data()),0, (cl_mem)bottom_diff,0); } else { - greentea_copy(temp_.count(), (cl_mem)top_diff, - (cl_mem)bottom_diff, ctx); + greentea_copy(temp_.count(), (cl_mem)top_diff, 0, + (cl_mem)bottom_diff, 0, ctx); } #endif // USE_GREENTEA } diff --git a/src/caffe/layers/power_layer.cu b/src/caffe/layers/power_layer.cu index 05376d38cb2..1b85f1eeba5 100644 --- a/src/caffe/layers/power_layer.cu +++ b/src/caffe/layers/power_layer.cu @@ -50,7 +50,7 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, return; } const Dtype* bottom_data = bottom[0]->gpu_data(); - caffe_copy(count, bottom_data, top_data); + greentea_copy(count, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, ctx); if (scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, scale_, (cl_mem) top_data, 0); @@ -155,8 +155,8 @@ void PowerLayer::Backward_gpu(const vector*>& top, greentea_gpu_scal(this->device_context_.id(), count, power_, (cl_mem) bottom_diff, 0); } else { - greentea_copy(count, (cl_mem) bottom_data, - (cl_mem) bottom_diff, ctx); + greentea_copy(count, (cl_mem) bottom_data,0, + (cl_mem) bottom_diff,0, ctx); if (scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, scale_, (cl_mem) bottom_diff, 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index b087ec2c278..13701a6df95 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -81,7 +81,7 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, this->device_context_.id()); if (top[0] == bottom[0]) { - greentea_copy(count, (cl_mem)bottom_data, (cl_mem)(bottom_memory_.mutable_gpu_data()), ctx); + greentea_copy(count, (cl_mem)bottom_data,0, (cl_mem)(bottom_memory_.mutable_gpu_data()),0, ctx); } viennacl::ocl::kernel &oclk_prelu = program.get_kernel( diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 603ce4770ce..3113f7518e8 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -43,7 +43,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( this->device_context_.id()); // First, compute the diff - greentea_copy(count, (cl_mem)sigmoid_output_data, (cl_mem)bottom_diff, ctx); + greentea_copy(count, (cl_mem)sigmoid_output_data, 0, (cl_mem)bottom_diff, 0, ctx); greentea_gpu_axpy(this->device_context_.id(), count, Dtype(-1), (cl_mem)target,0, (cl_mem)bottom_diff,0); // Scale down gradient const Dtype loss_weight = top[0]->cpu_diff()[0]; diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 52621689ee8..284af3220f1 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -105,16 +105,18 @@ template void SoftmaxLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + Dtype* scale_data = scale_.mutable_gpu_data(); + int count = bottom[0]->count(); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + int spatial_dim = bottom[0]->height() * bottom[0]->width(); + + if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // CUDA backend code - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - Dtype* scale_data = scale_.mutable_gpu_data(); - int count = bottom[0]->count(); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - int spatial_dim = bottom[0]->height() * bottom[0]->width(); caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. @@ -151,21 +153,13 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_.id()); - const cl_mem bottom_data = (cl_mem) (bottom[0]->gpu_data()); - cl_mem top_data = (cl_mem) (top[0]->mutable_gpu_data()); - cl_mem scale_data = (cl_mem) (scale_.mutable_gpu_data()); - int count = bottom[0]->count(); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - int spatial_dim = bottom[0]->height() * bottom[0]->width(); - - greentea_copy(count, bottom_data, top_data, ctx); + greentea_copy(count, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, ctx); viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_max")); viennacl::ocl::enqueue( - oclk_channel_max(num, channels, spatial_dim, WrapHandle(top_data, ctx), - WrapHandle(scale_data, ctx)), + oclk_channel_max(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, ctx), + WrapHandle((cl_mem)scale_data, ctx)), ctx.get_queue()); @@ -173,24 +167,24 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, - WrapHandle(scale_data, ctx), - WrapHandle(top_data, ctx)), + WrapHandle((cl_mem)scale_data, ctx), + WrapHandle((cl_mem)top_data, ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_exp = program.get_kernel( CL_KERNEL_SELECT("kernel_exp")); viennacl::ocl::enqueue( - oclk_exp(num * channels * spatial_dim, WrapHandle(top_data, ctx), - WrapHandle(top_data, ctx)), + oclk_exp(num * channels * spatial_dim, WrapHandle((cl_mem)top_data, ctx), + WrapHandle((cl_mem)top_data, ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_sum")); viennacl::ocl::enqueue( - oclk_channel_sum(num, channels, spatial_dim, WrapHandle(top_data, ctx), - WrapHandle(scale_data, ctx)), + oclk_channel_sum(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, ctx), + WrapHandle((cl_mem)scale_data, ctx)), ctx.get_queue()); @@ -198,8 +192,8 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("kernel_channel_div")); viennacl::ocl::enqueue( oclk_channel_div(count, num, channels, spatial_dim, - WrapHandle(scale_data, ctx), - WrapHandle(top_data, ctx)), + WrapHandle((cl_mem)scale_data, ctx), + WrapHandle((cl_mem)top_data, ctx)), ctx.get_queue()); @@ -215,13 +209,13 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, int num = top[0]->num(); int channels = top[0]->channels(); int spatial_dim = top[0]->height() * top[0]->width(); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + Dtype* scale_data = scale_.mutable_gpu_data(); if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* top_data = top[0]->gpu_data(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - Dtype* scale_data = scale_.mutable_gpu_data(); caffe_copy(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) @@ -237,39 +231,32 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, #endif } else { #ifdef USE_GREENTEA - const cl_mem top_diff = (cl_mem) (top[0]->gpu_diff()); - const cl_mem top_data = (cl_mem) (top[0]->gpu_data()); - cl_mem bottom_diff = (cl_mem) (bottom[0]->mutable_gpu_diff()); - cl_mem scale_data = (cl_mem) (scale_.mutable_gpu_data()); viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_.id()); - greentea_copy(top[0]->count(), top_diff, bottom_diff, ctx); - + greentea_copy(top[0]->count(), (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, ctx); viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_dot")); viennacl::ocl::enqueue( - oclk_channel_dot(count, num, channels, spatial_dim, - WrapHandle(top_diff, ctx), WrapHandle(top_data, ctx), - WrapHandle(scale_data, ctx)), + oclk_channel_dot(num, channels, spatial_dim, + WrapHandle((cl_mem)top_diff, ctx), WrapHandle((cl_mem)top_data, ctx), + WrapHandle((cl_mem)scale_data, ctx)), ctx.get_queue()); - viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, - WrapHandle(scale_data, ctx), - WrapHandle(bottom_diff, ctx)), + WrapHandle((cl_mem)scale_data, ctx), + WrapHandle((cl_mem)bottom_diff, ctx)), ctx.get_queue()); - greentea_gpu_mul(this->device_context_.id(), top[0]->count(), - bottom_diff, 0, top_data, 0, bottom_diff, 0); + (cl_mem)bottom_diff, 0, (cl_mem)top_data, 0, (cl_mem)bottom_diff, 0); #endif } diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index 0e6833b8e28..882390eff95 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -45,8 +45,8 @@ void SplitLayer::Backward_gpu(const vector*>& top, this->device_context_.id()); if (top.size() == 1) { - greentea_copy(count_, (cl_mem) (top[0]->gpu_diff()), - (cl_mem) (bottom[0]->mutable_gpu_diff()), ctx); + greentea_copy(count_, (cl_mem) (top[0]->gpu_diff()), 0, + (cl_mem) (bottom[0]->mutable_gpu_diff()), 0, ctx); return; } greentea_gpu_add(this->device_context_.id(), count_, diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 26c745f3e6c..2be26a94e0b 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -669,8 +669,8 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { momentum, (cl_mem) (history_[param_id]->mutable_gpu_data()), 0); greentea_copy( net_params[param_id]->count(), - (cl_mem) (history_[param_id]->gpu_data()), - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); + (cl_mem) (history_[param_id]->gpu_data()),0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()),0, ctx); #endif // USE_GREENTEA } #else @@ -764,8 +764,8 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { // save history momentum for stepping back greentea_copy( net_params[param_id]->count(), - (cl_mem) (this->history_[param_id]->gpu_data()), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), ctx); + (cl_mem) (this->history_[param_id]->gpu_data()),0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()),0, ctx); // update history greentea_gpu_axpby( @@ -784,8 +784,8 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { // copy greentea_copy( net_params[param_id]->count(), - (cl_mem) (this->update_[param_id]->gpu_data()), - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), ctx); + (cl_mem) (this->update_[param_id]->gpu_data()),0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()),0, ctx); #endif // USE_GREENTEA } #else diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index fa249f2f8e7..0e375fca446 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -20,6 +20,7 @@ namespace caffe { // Forward declare kernel functions +#ifdef USE_CUDA template __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, @@ -29,6 +30,7 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; +#endif // USE_CUDA template class Im2colKernelTest : public GPUDeviceTest { @@ -98,33 +100,39 @@ TYPED_TEST(Im2colKernelTest, TestGPU) { cpu_data + this->blob_top_cpu_->offset(n)); } - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - int grid_dim = default_grid_dim/grid_div; - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( - num_kernels, bottom_data + this->blob_bottom_->offset(n), - this->height_, this->width_, this->kernel_size_, this->kernel_size_, - this->pad_, this->pad_, this->stride_, this->stride_, - this->height_col_, this->width_col_, - top_data + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; - } + DeviceContext cid = Caffe::GetDefaultDeviceContext(); + + if(cid.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + int grid_dim = default_grid_dim/grid_div; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( + num_kernels, bottom_data + this->blob_bottom_->offset(n), + this->height_, this->width_, this->kernel_size_, this->kernel_size_, + this->pad_, this->pad_, this->stride_, this->stride_, + this->height_col_, this->width_col_, + top_data + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = cpu_data[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = cpu_data[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; + } } } +#endif // USE_CUDA } } diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 0af0dd31edb..26dc0891ff5 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -13,6 +13,11 @@ #include "caffe/test/test_caffe_main.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { template @@ -178,14 +183,38 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { std_asum += std::fabs(x[i]); } TypeParam gpu_asum; - caffe_gpu_asum(n, this->blob_bottom_->gpu_data(), &gpu_asum); + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_asum(n, this->blob_bottom_->gpu_data(), &gpu_asum); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_asum(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, &gpu_asum); +#endif // USE_GREENTEA + } EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } TYPED_TEST(GPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); - caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_sign(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, + (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); +#endif // USE_GREENTEA + } + const TypeParam* signs = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); for (int i = 0; i < n; ++i) { @@ -195,8 +224,20 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); - caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_sgnbit(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); +#endif // USE_GREENTEA + } + const TypeParam* signbits = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); for (int i = 0; i < n; ++i) { @@ -206,8 +247,21 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { TYPED_TEST(GPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); - caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_abs(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, + (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); +#endif // USE_GREENTEA + } + const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); for (int i = 0; i < n; ++i) { @@ -219,8 +273,20 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; - caffe_gpu_scale(n, alpha, this->blob_bottom_->gpu_data(), + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_scale(n, alpha, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_scale(dc.id(), n, alpha, (cl_mem)(this->blob_bottom_->gpu_data()),0, + (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); +#endif // USE_GREENTEA + } + const TypeParam* scaled = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); for (int i = 0; i < n; ++i) { @@ -232,7 +298,21 @@ TYPED_TEST(GPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - caffe_copy(n, bottom_data, top_data); + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + if (dc.backend() == BACKEND_CUDA) { + #ifdef USE_CUDA + caffe_copy(n, bottom_data, top_data); + #endif // USE_CUDA + } else { + #ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + dc.id()); + + greentea_copy(n, (cl_mem)bottom_data,0, (cl_mem)top_data,0, ctx); + #endif // USE_GREENTEA + } + bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); for (int i = 0; i < n; ++i) { diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 4f7adc4477a..c1651d8dc3b 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -621,9 +621,9 @@ TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(tmp_blob.get()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + caffe_cpu_copy(blob_top_2->count(), tmp_blob->cpu_data(), this->blob_top_->mutable_cpu_diff()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + caffe_cpu_copy(blob_top_2->count(), tmp_blob->cpu_data(), blob_top_2->mutable_cpu_diff()); vector propagate_down; propagate_down.push_back(true); @@ -663,7 +663,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { prelu.SetUp(this->blob_top_vec_, this->blob_top_vec_); ip2.SetUp(blob_bottom_vec_2, blob_middle_vec_2); prelu2.SetUp(blob_middle_vec_2, blob_top_vec_2); - caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), + caffe_cpu_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), ip2.blobs()[0]->mutable_cpu_data()); // Forward in-place ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -681,9 +681,9 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(tmp_blob.get()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + caffe_cpu_copy(blob_top_2->count(), tmp_blob->cpu_data(), this->blob_top_->mutable_cpu_diff()); - caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(), + caffe_cpu_copy(blob_top_2->count(), tmp_blob->cpu_data(), blob_top_2->mutable_cpu_diff()); // Backward in-place vector propagate_down; diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 49c6d06e565..c7dafea439e 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -9,6 +9,11 @@ #include "caffe/test/test_caffe_main.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { template @@ -173,21 +178,54 @@ class RandomNumberGeneratorTest : public ::testing::Test { void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu_data) { Dtype* rng_data = static_cast(gpu_data); - caffe_gpu_rng_gaussian(sample_size_, mu, sigma, rng_data); + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_rng_gaussian(sample_size_, mu, sigma, rng_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_rng_gaussian(dc.id(), sample_size_, mu, sigma, (cl_mem)rng_data, 0); +#endif // USE_GREENTEA + } } void RngUniformFillGPU(const Dtype lower, const Dtype upper, void* gpu_data) { CHECK_GE(upper, lower); Dtype* rng_data = static_cast(gpu_data); - caffe_gpu_rng_uniform(sample_size_, lower, upper, rng_data); + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_rng_uniform(sample_size_, lower, upper, rng_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_rng_uniform(dc.id(), sample_size_, lower, upper, (cl_mem)rng_data, 0); +#endif // USE_GREENTEA + } + } // Fills with uniform integers in [0, UINT_MAX] using 2 argument form of // caffe_gpu_rng_uniform. void RngUniformIntFillGPU(void* gpu_data) { unsigned int* rng_data = static_cast(gpu_data); - caffe_gpu_rng_uniform(sample_size_, rng_data); - } + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_rng_uniform(sample_size_, rng_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_rng_uniform(dc.id(), sample_size_, (cl_mem)rng_data, 0); +#endif // USE_GREENTEA + } +} #endif diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index 6b07cc1b12d..2994e6d234c 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -10,6 +10,11 @@ #include "caffe/test/test_caffe_main.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { class SyncedMemoryTest : public ::testing::Test {}; @@ -79,7 +84,21 @@ TEST_F(SyncedMemoryTest, TestGPURead) { EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); // check if values are the same char* recovered_value = new char[10]; - caffe_gpu_memcpy(10, gpu_data, recovered_value); + + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_memcpy(10, gpu_data, recovered_value); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); + greentea_gpu_memcpy(10, (cl_mem)gpu_data,0, recovered_value, ctx); +#endif // USE_GREENTEA + } + for (int i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(recovered_value))[i], 1); } @@ -93,7 +112,18 @@ TEST_F(SyncedMemoryTest, TestGPURead) { gpu_data = mem.gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); // check if values are the same - caffe_gpu_memcpy(10, gpu_data, recovered_value); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_memcpy(10, gpu_data, recovered_value); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); + greentea_gpu_memcpy(10, (cl_mem)gpu_data,0, recovered_value, ctx); +#endif // USE_GREENTEA + } + for (int i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(recovered_value))[i], 2); } @@ -104,7 +134,19 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); void* gpu_data = mem.mutable_gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); - caffe_gpu_memset(mem.size(), 1, gpu_data); + + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_memset(mem.size(), 1, gpu_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_memset(dc.id(), mem.size(), 1, (cl_mem)gpu_data, 0); +#endif // USE_GREENTEA + } + const void* cpu_data = mem.cpu_data(); for (int i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(cpu_data))[i], 1); @@ -113,7 +155,17 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { gpu_data = mem.mutable_gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); - caffe_gpu_memset(mem.size(), 2, gpu_data); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_memset(mem.size(), 2, gpu_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_memset(dc.id(), mem.size(), 2, (cl_mem)gpu_data, 0); +#endif // USE_GREENTEA + } + cpu_data = mem.cpu_data(); for (int i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(cpu_data))[i], 2); diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 38a343ed00e..6cf2cf175b4 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -20,6 +20,8 @@ class GemmTest : public ::testing::Test {}; TYPED_TEST_CASE(GemmTest, TestDtypes); TYPED_TEST(GemmTest, TestGemmCPUGPU) { + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); Blob B(1, 1, 3, 4, Caffe::GetDefaultDeviceContext()); Blob C(1, 1, 2, 4, Caffe::GetDefaultDeviceContext()); @@ -27,18 +29,41 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6}; TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12}; TypeParam result[8] = {38, 44, 50, 56, 83, 98, 113, 128}; - caffe_copy(6, data, A.mutable_cpu_data()); - caffe_copy(12, data, B.mutable_cpu_data()); - if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_copy(6, data, A.mutable_cpu_data()); + caffe_copy(12, data, B.mutable_cpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); + greentea_copy(6, data, (cl_mem)(A.mutable_cpu_data()),0, ctx); + greentea_copy(12, data, (cl_mem)(B.mutable_cpu_data()),0, ctx); +#endif // USE_GREENTEA + } + + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2 || dc.backend() == BACKEND_OpenCL) { // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); +#endif // USE_GREENTEA + } + for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -51,8 +76,19 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasTrans, CblasNoTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); +#endif // USE_GREENTEA + } + for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -79,8 +115,19 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } - caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); +#endif // USE_GREENTEA + } + for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -91,23 +138,47 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { TYPED_TEST(GemmTest, TestGemvCPUGPU) { + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); Blob x(1, 1, 1, 3, Caffe::GetDefaultDeviceContext()); Blob y(1, 1, 1, 2, Caffe::GetDefaultDeviceContext()); TypeParam data[6] = {1, 2, 3, 4, 5, 6}; TypeParam result_2[2] = {14, 32}; TypeParam result_3[3] = {9, 12, 15}; - caffe_copy(6, data, A.mutable_cpu_data()); - caffe_copy(3, data, x.mutable_cpu_data()); - if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_copy(6, data, A.mutable_cpu_data()); + caffe_copy(3, data, x.mutable_cpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); + greentea_copy(6, data, (cl_mem)(A.mutable_cpu_data()),0, ctx); + greentea_copy(3, data, (cl_mem)(x.mutable_cpu_data()),0, ctx); +#endif // USE_GREENTEA + } + + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2 || dc.backend() == BACKEND_OpenCL) { caffe_cpu_gemv(CblasNoTrans, 2, 3, 1., A.cpu_data(), x.cpu_data(), 0., y.mutable_cpu_data()); for (int i = 0; i < 2; ++i) { EXPECT_EQ(y.cpu_data()[i], result_2[i]); } - caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), x.gpu_data(), 0., y.mutable_gpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemv(dc.id(), CblasNoTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, + (cl_mem)(y.gpu_data()),0, 0., (cl_mem)(x.mutable_gpu_data()),0); +#endif // USE_GREENTEA + } + for (int i = 0; i < 2; ++i) { EXPECT_EQ(y.cpu_data()[i], result_2[i]); } @@ -119,8 +190,19 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { for (int i = 0; i < 3; ++i) { EXPECT_EQ(x.cpu_data()[i], result_3[i]); } - caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), y.gpu_data(), 0., x.mutable_gpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemv(dc.id(), CblasTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, + (cl_mem)(y.gpu_data()),0, 0., (cl_mem)(x.mutable_gpu_data()),0); +#endif // USE_GREENTEA + } + for (int i = 0; i < 3; ++i) { EXPECT_EQ(x.cpu_data()[i], result_3[i]); } diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index 7222f60b884..69ef23afec8 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -13,7 +13,7 @@ Timer::Timer() } Timer::~Timer() { - if (Caffe::mode() == Caffe::GPU) { + if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventDestroy(start_gpu_)); @@ -27,7 +27,7 @@ Timer::~Timer() { void Timer::Start() { if (!running()) { - if (Caffe::mode() == Caffe::GPU) { + if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); @@ -45,7 +45,7 @@ void Timer::Start() { void Timer::Stop() { if (running()) { - if (Caffe::mode() == Caffe::GPU) { + if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); @@ -70,7 +70,7 @@ float Timer::MicroSeconds() { if (running()) { Stop(); } - if (Caffe::mode() == Caffe::GPU) { + if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, @@ -95,7 +95,7 @@ float Timer::MilliSeconds() { if (running()) { Stop(); } - if (Caffe::mode() == Caffe::GPU) { + if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, @@ -116,7 +116,7 @@ float Timer::Seconds() { void Timer::Init() { if (!initted()) { - if (Caffe::mode() == Caffe::GPU) { + if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventCreate(&start_gpu_)); diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index a337027ad3c..b6a151df3b6 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -82,6 +82,20 @@ void caffe_add_scalar(const int N, const double alpha, double* Y) { } } + +template +void caffe_cpu_copy(const int N, const Dtype* X, Dtype* Y) { + if (X != Y) { + memcpy(Y, X, sizeof(Dtype) * N); + } +} + +template void caffe_cpu_copy(const int N, const int* X, int* Y); +template void caffe_cpu_copy(const int N, const unsigned int* X, + unsigned int* Y); +template void caffe_cpu_copy(const int N, const float* X, float* Y); +template void caffe_cpu_copy(const int N, const double* X, double* Y); + template void caffe_copy(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { From 1bbb95474014cbe74e0b94c5284e1ffbf4d0e2fc Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 15 Jun 2015 14:51:50 +0200 Subject: [PATCH 050/254] Further bugfixes. --- include/caffe/util/math_functions.hpp | 2 + .../greentea/greentea_math_functions.cpp | 22 ++++----- src/caffe/syncedmem.cpp | 5 ++ src/caffe/test/test_common.cpp | 28 ++++++----- src/caffe/test/test_util_blas.cpp | 49 ++++++++----------- src/caffe/util/math_functions.cpp | 11 +++++ 6 files changed, 64 insertions(+), 53 deletions(-) diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 9d1e99fc306..e8d5d4ba92d 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -75,6 +75,8 @@ unsigned int caffe_rng_rand(); template Dtype caffe_nextafter(const Dtype b); +void caffe_rng_uniform(const int n, unsigned int* r); + template void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 8774b7cf776..4aafa1a21c6 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -16,7 +16,6 @@ #include #include #include -#include #include "caffe/common.hpp" @@ -271,24 +270,21 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, ViennaCLBackendSetOpenCLContextID(backend, static_cast(ctx_id)); - ViennaCLOrder vclOrder = ViennaCLRowMajor; ViennaCLTranspose vclTransA = (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; if (std::is_same::value) { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSgemv(backend, vclOrder, vclTransA, M, N, alpha, A, + ViennaCLOpenCLSgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, A, offA, 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); } else { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDgemv(backend, vclOrder, vclTransA, M, N, alpha, A, + ViennaCLOpenCLDgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, A, offA, 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); } #endif #ifdef USE_CLBLAS - - clblasOrder clOrder = clblasRowMajor; clblasTranspose clTransA = (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; @@ -296,10 +292,10 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSgemv(clOrder,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasSgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDgemv(clOrder,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasDgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); } #endif } @@ -511,10 +507,10 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, if (std::is_same::value) { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSdot(backend, n, out, X, offX, 1, Y, offY, 1)); + ViennaCLOpenCLSdot(backend, n, (float*)out, X, offX, 1, Y, offY, 1)); } else { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDdot(backend, n, out, X, offX, 1, Y, offY, 1)); + ViennaCLOpenCLDdot(backend, n, (double*)out, X, offX, 1, Y, offY, 1)); } #endif @@ -574,9 +570,9 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, static_cast(ctx_id)); if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSasum(backend, n, Y, X, offX, 1)); + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSasum(backend, n, (float*)Y, X, offX, 1)); } else { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDasum(backend, n, Y, X, offX, 1)); + GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDasum(backend, n, (double*)Y, X, offX, 1)); } #endif @@ -899,7 +895,7 @@ void greentea_gpu_rng_uniform(const int ctx_id, const int n, cl_mem r, int offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); std::vector random(n); - caffe_gpu_rng_uniform(n, &random[0]); + caffe_rng_uniform(n, &random[0]); greentea_gpu_memcpy(sizeof(unsigned int) * n, &random[0], r, offr, ctx); } diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index db04214f6a6..5c8a8112774 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -54,10 +54,12 @@ inline void SyncedMemory::to_cpu() { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_.id()); + ctx.get_queue().finish(); // On the CPU, memory is shared (and no copy needed) if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, 0, cpu_ptr_, ctx); } + ctx.get_queue().finish(); #endif } head_ = SYNCED; @@ -85,6 +87,7 @@ inline void SyncedMemory::to_gpu() { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_.id()); + ctx.get_queue().finish(); cl_int err; if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { // CPU memory is shared @@ -120,6 +123,7 @@ inline void SyncedMemory::to_gpu() { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_.id()); + ctx.get_queue().finish(); if (gpu_ptr_ == NULL) { cl_int err; if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -141,6 +145,7 @@ inline void SyncedMemory::to_gpu() { if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, ctx); } + ctx.get_queue().finish(); #endif // USE_GREENTEA } head_ = SYNCED; diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index aa911c03f6d..a77dcef1b85 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -47,17 +47,23 @@ TEST_F(CommonTest, TestRandSeedCPU) { #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestRandSeedGPU) { - SyncedMemory data_a(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); - SyncedMemory data_b(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); - Caffe::set_random_seed(1701); - CURAND_CHECK(curandGenerate(Caffe::curand_generator(), - static_cast(data_a.mutable_gpu_data()), 10)); - Caffe::set_random_seed(1701); - CURAND_CHECK(curandGenerate(Caffe::curand_generator(), - static_cast(data_b.mutable_gpu_data()), 10)); - for (int i = 0; i < 10; ++i) { - EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], - ((const unsigned int*)(data_b.cpu_data()))[i]); + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + + if(dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + SyncedMemory data_a(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); + SyncedMemory data_b(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); + Caffe::set_random_seed(1701); + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), + static_cast(data_a.mutable_gpu_data()), 10)); + Caffe::set_random_seed(1701); + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), + static_cast(data_b.mutable_gpu_data()), 10)); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], + ((const unsigned int*)(data_b.cpu_data()))[i]); + } +#endif // USE_CUDA } } diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 6cf2cf175b4..3d497f7bb71 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -30,18 +30,8 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12}; TypeParam result[8] = {38, 44, 50, 56, 83, 98, 113, 128}; - if (dc.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_copy(6, data, A.mutable_cpu_data()); - caffe_copy(12, data, B.mutable_cpu_data()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); - greentea_copy(6, data, (cl_mem)(A.mutable_cpu_data()),0, ctx); - greentea_copy(12, data, (cl_mem)(B.mutable_cpu_data()),0, ctx); -#endif // USE_GREENTEA - } + caffe_cpu_copy(6, data, A.mutable_cpu_data()); + caffe_cpu_copy(12, data, B.mutable_cpu_data()); if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2 || dc.backend() == BACKEND_OpenCL) { // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; @@ -70,7 +60,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { // Test when we have a transposed A A.Reshape(1, 1, 3, 2, Caffe::GetDefaultDeviceContext()); - caffe_copy(6, A_reshape_data, A.mutable_cpu_data()); + caffe_cpu_copy(6, A_reshape_data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); for (int i = 0; i < 8; ++i) { @@ -95,21 +85,32 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { // Test when we have a transposed A and a transposed B too B.Reshape(1, 1, 4, 3, Caffe::GetDefaultDeviceContext()); - caffe_copy(12, B_reshape_data, B.mutable_cpu_data()); + caffe_cpu_copy(12, B_reshape_data, B.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } - caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., + + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasTrans, CblasTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); +#endif // USE_GREENTEA + } + for (int i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } // Test when we have a transposed B A.Reshape(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); - caffe_copy(6, data, A.mutable_cpu_data()); + caffe_cpu_copy(6, data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); for (int i = 0; i < 8; ++i) { @@ -147,18 +148,8 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { TypeParam result_2[2] = {14, 32}; TypeParam result_3[3] = {9, 12, 15}; - if (dc.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_copy(6, data, A.mutable_cpu_data()); - caffe_copy(3, data, x.mutable_cpu_data()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); - greentea_copy(6, data, (cl_mem)(A.mutable_cpu_data()),0, ctx); - greentea_copy(3, data, (cl_mem)(x.mutable_cpu_data()),0, ctx); -#endif // USE_GREENTEA - } + caffe_cpu_copy(6, data, A.mutable_cpu_data()); + caffe_cpu_copy(3, data, x.mutable_cpu_data()); if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2 || dc.backend() == BACKEND_OpenCL) { caffe_cpu_gemv(CblasNoTrans, 2, 3, 1., A.cpu_data(), @@ -184,7 +175,7 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { } // Test transpose case - caffe_copy(2, data, y.mutable_cpu_data()); + caffe_cpu_copy(2, data, y.mutable_cpu_data()); caffe_cpu_gemv(CblasTrans, 2, 3, 1., A.cpu_data(), y.cpu_data(), 0., x.mutable_cpu_data()); for (int i = 0; i < 3; ++i) { diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index b6a151df3b6..c5c26b34758 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -258,6 +258,17 @@ float caffe_nextafter(const float b); template double caffe_nextafter(const double b); +void caffe_rng_uniform(const int n, unsigned int* r) { + CHECK_GE(n, 0); + CHECK(r); + boost::uniform_int random_distribution(0, UINT32_MAX); + boost::variate_generator> + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = variate_generator(); + } +} + template void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r) { CHECK_GE(n, 0); From e83dd2cf0677fa370651372864e5b34754877d50 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 16 Jun 2015 04:41:52 +0200 Subject: [PATCH 051/254] All but one tests passed by OpenCL backend. Simplified DeviceContext parameter passing. --- include/caffe/blob.hpp | 23 ++-- include/caffe/layer.hpp | 2 +- .../caffe/test/test_gradient_check_util.hpp | 2 +- src/caffe/blob.cpp | 41 +++--- src/caffe/data_transformer.cpp | 6 +- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/fillbuffer.cl | 7 + .../greentea/greentea_math_functions.cpp | 50 +++---- src/caffe/layers/accuracy_layer.cpp | 2 +- src/caffe/layers/argmax_layer.cpp | 4 +- src/caffe/layers/base_conv_layer.cpp | 8 +- src/caffe/layers/base_data_layer.cpp | 4 +- src/caffe/layers/base_data_layer.cu | 8 +- src/caffe/layers/concat_layer.cpp | 2 +- src/caffe/layers/contrastive_loss_layer.cpp | 8 +- src/caffe/layers/conv_sk_layer.cpp | 7 +- src/caffe/layers/data_layer.cpp | 14 +- src/caffe/layers/dropout_layer.cpp | 2 +- src/caffe/layers/dummy_data_layer.cpp | 4 +- src/caffe/layers/eltwise_layer.cpp | 4 +- src/caffe/layers/eltwise_layer.cu | 4 +- src/caffe/layers/euclidean_loss_layer.cpp | 2 +- src/caffe/layers/filter_layer.cpp | 2 +- src/caffe/layers/flatten_layer.cpp | 2 +- src/caffe/layers/hdf5_data_layer.cpp | 2 +- src/caffe/layers/hdf5_output_layer.cpp | 4 +- src/caffe/layers/hdf5_output_layer.cu | 6 +- src/caffe/layers/im2col_layer.cpp | 2 +- src/caffe/layers/image_data_layer.cpp | 14 +- src/caffe/layers/infogain_loss_layer.cpp | 2 +- src/caffe/layers/inner_product_layer.cpp | 4 +- src/caffe/layers/loss_layer.cpp | 2 +- src/caffe/layers/lrn_layer.cpp | 4 +- src/caffe/layers/memory_data_layer.cpp | 24 ++-- src/caffe/layers/mergecrop_layer.cpp | 2 +- src/caffe/layers/mvn_layer.cpp | 10 +- src/caffe/layers/neuron_layer.cpp | 2 +- src/caffe/layers/pooling_layer.cpp | 8 +- src/caffe/layers/pooling_sk_layer.cpp | 8 +- src/caffe/layers/prelu_layer.cpp | 8 +- src/caffe/layers/reduction_layer.cpp | 4 +- src/caffe/layers/reshape_layer.cpp | 2 +- src/caffe/layers/slice_layer.cpp | 4 +- src/caffe/layers/softmax_layer.cpp | 6 +- src/caffe/layers/softmax_loss_layer.cpp | 2 +- src/caffe/layers/split_layer.cpp | 2 +- src/caffe/layers/window_data_layer.cpp | 10 +- src/caffe/net.cpp | 38 +++--- src/caffe/solver.cpp | 2 +- src/caffe/test/test_accuracy_layer.cpp | 8 +- src/caffe/test/test_argmax_layer.cpp | 2 +- src/caffe/test/test_blob.cpp | 12 +- src/caffe/test/test_common.cpp | 10 +- src/caffe/test/test_concat_layer.cpp | 6 +- .../test/test_contrastive_loss_layer.cpp | 6 +- src/caffe/test/test_convolution_layer.cpp | 16 +-- src/caffe/test/test_data_transformer.cpp | 16 +-- src/caffe/test/test_deconvolution_layer.cpp | 4 +- src/caffe/test/test_eltwise_layer.cpp | 6 +- src/caffe/test/test_euclidean_loss_layer.cpp | 4 +- src/caffe/test/test_filler.cpp | 12 +- src/caffe/test/test_filter_layer.cpp | 6 +- src/caffe/test/test_flatten_layer.cpp | 2 +- src/caffe/test/test_gradient_based_solver.cpp | 6 +- src/caffe/test/test_hinge_loss_layer.cpp | 4 +- src/caffe/test/test_im2col_kernel.cu | 6 +- src/caffe/test/test_im2col_layer.cpp | 2 +- src/caffe/test/test_infogain_loss_layer.cpp | 6 +- src/caffe/test/test_inner_product_layer.cpp | 14 +- src/caffe/test/test_lrn_layer.cpp | 4 +- src/caffe/test/test_math_functions.cpp | 4 +- .../test/test_maxpool_dropout_layers.cpp | 4 +- src/caffe/test/test_memory_data_layer.cpp | 4 +- src/caffe/test/test_mergecrop_layer.cpp | 123 ------------------ .../test_multinomial_logistic_loss_layer.cpp | 4 +- src/caffe/test/test_mvn_layer.cpp | 2 +- src/caffe/test/test_net.cpp | 40 +++--- src/caffe/test/test_neuron_layer.cpp | 10 +- src/caffe/test/test_pooling_layer.cpp | 12 +- src/caffe/test/test_power_layer.cpp | 2 +- src/caffe/test/test_reduction_layer.cpp | 2 +- src/caffe/test/test_reshape_layer.cpp | 4 +- .../test_sigmoid_cross_entropy_loss_layer.cpp | 4 +- src/caffe/test/test_slice_layer.cpp | 4 +- src/caffe/test/test_softmax_layer.cpp | 2 +- .../test/test_softmax_with_loss_layer.cpp | 4 +- src/caffe/test/test_split_layer.cpp | 2 +- src/caffe/test/test_spp_layer.cpp | 6 +- src/caffe/test/test_stochastic_pooling.cpp | 2 +- src/caffe/test/test_tanh_layer.cpp | 2 +- src/caffe/test/test_threshold_layer.cpp | 2 +- src/caffe/test/test_util_blas.cpp | 8 +- src/caffe/util/io.cpp | 2 +- src/caffe/util/math_functions.cpp | 2 +- 94 files changed, 334 insertions(+), 453 deletions(-) delete mode 100644 src/caffe/test/test_mergecrop_layer.cpp diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 1033a36d8ec..ebae1086e7a 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -38,9 +38,16 @@ class Blob { capacity_(0), device_context_(Caffe::GetDefaultDeviceContext()) { } + explicit Blob(DeviceContext device_context) + : data_(), + diff_(), + count_(0), + capacity_(0), + device_context_(device_context) { + } explicit Blob(const int num, const int channels, const int height, - const int width, DeviceContext device_context); - explicit Blob(const vector& shape, DeviceContext device_context); + const int width, DeviceContext device_context = Caffe::GetDefaultDeviceContext()); + explicit Blob(const vector& shape, DeviceContext device_context = Caffe::GetDefaultDeviceContext()); /** * @brief Change the dimensions of the blob, allocating new memory if @@ -56,11 +63,11 @@ class Blob { * an error; either Net::Forward or Net::Reshape need to be called to * propagate the new input shape to higher layers. */ - void Reshape(const vector& shape, DeviceContext device_context); - void Reshape(const BlobShape& shape, DeviceContext device_context); + void Reshape(const vector& shape); + void Reshape(const BlobShape& shape); void Reshape(const int num, const int channels, const int height, - const int width, DeviceContext device_context); - void ReshapeLike(const Blob& other, DeviceContext device_context); + const int width); + void ReshapeLike(const Blob& other); inline string shape_string() const { ostringstream stream; for (int i = 0; i < shape_.size(); ++i) { @@ -199,7 +206,7 @@ class Blob { * of other (and die otherwise); if true, Reshape this Blob to other's * shape if necessary */ - void CopyFrom(const Blob& source, DeviceContext device_context, bool copy_diff = false, + void CopyFrom(const Blob& source, bool copy_diff = false, bool reshape = false); inline Dtype data_at(const int n, const int c, const int h, @@ -240,7 +247,7 @@ class Blob { Dtype* mutable_cpu_diff(); Dtype* mutable_gpu_diff(); void Update(); - void FromProto(const BlobProto& proto, DeviceContext device_context, bool reshape = true); + void FromProto(const BlobProto& proto, bool reshape = true); void ToProto(BlobProto* proto, bool write_diff = false) const; /// @brief Compute the sum of absolute values (L1 norm) of the data. diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index c6041264af5..6bed9aa9e7d 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -43,7 +43,7 @@ class Layer { blobs_.resize(layer_param_.blobs_size()); for (int i = 0; i < layer_param_.blobs_size(); ++i) { blobs_[i].reset(new Blob()); - blobs_[i]->FromProto(layer_param_.blobs(i), device_context_); + blobs_[i]->FromProto(layer_param_.blobs(i)); } } } diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 2e56c69a0d3..c94bb2e1479 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -122,7 +122,7 @@ void GradientChecker::CheckGradientSingle( Blob* current_blob = blobs_to_check[blob_id]; computed_gradient_blobs[blob_id].reset(new Blob()); computed_gradient_blobs[blob_id]->ReshapeLike( - *current_blob, Caffe::GetDefaultDeviceContext()); + *current_blob); const int count = blobs_to_check[blob_id]->count(); const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); Dtype* computed_gradients = computed_gradient_blobs[blob_id] diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index da3723ff0c5..2b2fe1cb20e 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -15,23 +15,20 @@ namespace caffe { template void Blob::Reshape(const int num, const int channels, const int height, - const int width, DeviceContext device_context) { + const int width) { vector shape(4); shape[0] = num; shape[1] = channels; shape[2] = height; shape[3] = width; - device_context_ = device_context; - Reshape(shape, device_context); + Reshape(shape); } template -void Blob::Reshape(const vector& shape, - DeviceContext device_context) { +void Blob::Reshape(const vector& shape) { CHECK_LE(shape.size(), kMaxBlobAxes); count_ = 1; shape_.resize(shape.size()); - device_context_ = device_context; for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); CHECK_LE(shape[i], INT_MAX / count_)<< "blob size exceeds INT_MAX"; @@ -46,21 +43,18 @@ void Blob::Reshape(const vector& shape, } template -void Blob::Reshape(const BlobShape& shape, - DeviceContext device_context) { +void Blob::Reshape(const BlobShape& shape) { CHECK_LE(shape.dim_size(), kMaxBlobAxes); vector shape_vec(shape.dim_size()); - device_context_ = device_context; for (int i = 0; i < shape.dim_size(); ++i) { shape_vec[i] = shape.dim(i); } - Reshape(shape_vec, device_context_); + Reshape(shape_vec); } template -void Blob::ReshapeLike(const Blob& other, - DeviceContext device_context) { - Reshape(other.shape(), device_context); +void Blob::ReshapeLike(const Blob& other) { + Reshape(other.shape()); } template @@ -69,7 +63,7 @@ Blob::Blob(const int num, const int channels, const int height, // capacity_ must be initialized before calling Reshape : capacity_(0), device_context_(device_context) { - Reshape(num, channels, height, width, device_context); + Reshape(num, channels, height, width); } template @@ -77,7 +71,7 @@ Blob::Blob(const vector& shape, DeviceContext device_context) // capacity_ must be initialized before calling Reshape : capacity_(0), device_context_(device_context) { - Reshape(shape, device_context_); + Reshape(shape); } template @@ -508,14 +502,11 @@ bool Blob::ShapeEquals(const BlobProto& other) { } template -void Blob::CopyFrom(const Blob& source, DeviceContext device_context, - bool copy_diff, bool reshape) { - - device_context_ = device_context; +void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { if (source.count() != count_ || source.shape() != shape_) { if (reshape) { - ReshapeLike(source, device_context_); + ReshapeLike(source); } else { LOG(FATAL)<< "Trying to copy blobs of different sizes."; } @@ -549,10 +540,10 @@ void Blob::CopyFrom(const Blob& source, DeviceContext device_context, } case Caffe::CPU: { if (copy_diff) { - caffe_copy(count_, source.cpu_diff(), + caffe_cpu_copy(count_, source.cpu_diff(), static_cast(diff_->mutable_cpu_data())); } else { - caffe_copy(count_, source.cpu_data(), + caffe_cpu_copy(count_, source.cpu_data(), static_cast(data_->mutable_cpu_data())); } break; @@ -563,9 +554,7 @@ void Blob::CopyFrom(const Blob& source, DeviceContext device_context, } template -void Blob::FromProto(const BlobProto& proto, - DeviceContext device_context, bool reshape) { - device_context_ = device_context; +void Blob::FromProto(const BlobProto& proto, bool reshape) { if (reshape) { vector shape; if (proto.has_num() || proto.has_channels() || proto.has_height() @@ -583,7 +572,7 @@ void Blob::FromProto(const BlobProto& proto, shape[i] = proto.shape().dim(i); } } - Reshape(shape, device_context_); + Reshape(shape); } else { CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)"; } diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index d1922dbe047..838a4075458 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -23,7 +23,7 @@ DataTransformer::DataTransformer(const TransformationParameter& param, LOG(INFO) << "Loading mean file from: " << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); - data_mean_.FromProto(blob_proto, device_context_); + data_mean_.FromProto(blob_proto); } // check if we want to use mean_value if (param_.mean_value_size() > 0) { @@ -335,10 +335,10 @@ void DataTransformer::Transform(Blob* input_blob, // Initialize transformed_blob with the right shape. if (crop_size) { transformed_blob->Reshape(input_num, input_channels, - crop_size, crop_size, this->device_context_); + crop_size, crop_size); } else { transformed_blob->Reshape(input_num, input_channels, - input_height, input_width, this->device_context_); + input_height, input_width); } } diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index f37a212902b..7a2b3b14837 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -15,7 +15,7 @@ std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"hea std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; -std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; +std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; @@ -34,7 +34,7 @@ std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"he std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; -std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; +std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; diff --git a/src/caffe/greentea/cl_kernels/fillbuffer.cl b/src/caffe/greentea/cl_kernels/fillbuffer.cl index 31423e32507..be95994518f 100644 --- a/src/caffe/greentea/cl_kernels/fillbuffer.cl +++ b/src/caffe/greentea/cl_kernels/fillbuffer.cl @@ -2,6 +2,13 @@ #include "header.cl" #endif +__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + __kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, const int offx) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 4aafa1a21c6..7fa9aa2b259 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -51,11 +51,10 @@ void greentea_memset(const int ctx_id, const size_t N, const int alpha, // OpenCL Version < 1.2 fallback typedef float Dtype; viennacl::ocl::kernel &oclk_fill = program.get_kernel( - CL_KERNEL_SELECT("fill")); + CL_KERNEL_SELECT("fillbuffer")); viennacl::ocl::enqueue( - oclk_fill(int(N / sizeof(Dtype)), Dtype(alpha), WrapHandle(X, ctx), offX), + oclk_fill((int) N, (unsigned char) (alpha), WrapHandle(X, ctx), offX), ctx.get_queue()); - ctx.get_queue().finish(); } // Copy from OpenCL buffer to main memory @@ -67,7 +66,6 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, NULL, NULL); } - ctx.get_queue().finish(); } // Copy from main memory to OpenCL buffer @@ -78,7 +76,6 @@ void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, CL_TRUE, offY, N, X, 0, NULL, NULL); } - ctx.get_queue().finish(); } // Copy from OpenCL to OpenCL buffer @@ -88,31 +85,26 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, offX, offY, N, 0, NULL, NULL); - ctx.get_queue().finish(); } template void greentea_copy(const int N, const cl_mem X, const int offX, Dtype* Y, viennacl::ocl::context &ctx) { - greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX, Y, ctx); - ctx.get_queue().finish(); + greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX * sizeof(Dtype), Y, ctx); } template void greentea_copy(const int N, const Dtype* X, cl_mem Y, const int offY, viennacl::ocl::context &ctx) { - greentea_gpu_memcpy(sizeof(Dtype) * N, X, Y, offY, ctx); - ctx.get_queue().finish(); + greentea_gpu_memcpy(sizeof(Dtype) * N, X, Y, offY * sizeof(Dtype), ctx); } // Copy from OpenCL buffer to OpenCL buffer template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, viennacl::ocl::context &ctx) { - if (X != Y) { - greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX, Y, offY, ctx); - } - ctx.get_queue().finish(); + greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX * sizeof(Dtype), Y, + offY * sizeof(Dtype), ctx); } // Explicit instantiations @@ -262,8 +254,6 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, yptr + offy); } else { - int lda = (TransA == CblasNoTrans) ? N : M; - #ifdef USE_VIENNACLBLAS ViennaCLBackend backend; ViennaCLBackendCreate(&backend); @@ -275,12 +265,14 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, A, - offA, 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); + ViennaCLOpenCLSgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, + A, offA, 0, 1, 1, N, x, offx, 1, beta, y, offy, + 1)); } else { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, A, - offA, 0, 1, 1, lda, x, offx, 1, beta, y, offy, 1)); + ViennaCLOpenCLDgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, + A, offA, 0, 1, 1, N, x, offx, 1, beta, y, offy, + 1)); } #endif @@ -292,10 +284,10 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasSgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,lda,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasDgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); } #endif } @@ -507,10 +499,10 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, if (std::is_same::value) { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSdot(backend, n, (float*)out, X, offX, 1, Y, offY, 1)); + ViennaCLOpenCLSdot(backend, n, (float* )out, X, offX, 1, Y, offY, 1)); } else { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDdot(backend, n, (double*)out, X, offX, 1, Y, offY, 1)); + ViennaCLOpenCLDdot(backend, n, (double* )out, X, offX, 1, Y, offY, 1)); } #endif @@ -533,7 +525,6 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, out, ctx); - ctx.get_queue().finish(); clReleaseMemObject(gpuout); clReleaseMemObject(scratch); @@ -570,9 +561,11 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, static_cast(ctx_id)); if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLSasum(backend, n, (float*)Y, X, offX, 1)); + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLSasum(backend, n, (float* )Y, X, offX, 1)); } else { - GREENTEA_VCL_BLAS_CHECK(ViennaCLOpenCLDasum(backend, n, (double*)Y, X, offX, 1)); + GREENTEA_VCL_BLAS_CHECK( + ViennaCLOpenCLDasum(backend, n, (double* )Y, X, offX, 1)); } #endif @@ -595,7 +588,6 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, Y, ctx); - ctx.get_queue().finish(); clReleaseMemObject(gpuout); clReleaseMemObject(scratch); #endif @@ -684,8 +676,6 @@ void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, CL_KERNEL_SELECT("fill")); viennacl::ocl::enqueue(oclk_fill(N, alpha, WrapHandle(Y, ctx), offY), ctx.get_queue()); - - ctx.get_queue().finish(); } template void greentea_gpu_set(const int ctx_id, const int N, diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index 49093830739..90aad675ed3 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -37,7 +37,7 @@ void AccuracyLayer::Reshape( << "label count (number of labels) must be N*H*W, " << "with integer values in {0, 1, ..., C-1}."; vector top_shape(0); // Accuracy is a scalar; 0 axes. - top[0]->Reshape(top_shape,this->device_context_); + top[0]->Reshape(top_shape); } template diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index 8a5ace322e6..c4040cdcaaa 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -23,10 +23,10 @@ void ArgMaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { if (out_max_val_) { // Produces max_ind and max_val - top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1,this->device_context_); + top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); } else { // Produces only max_ind - top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1,this->device_context_); + top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); } } diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 13042eab3c8..affa8d2579c 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -129,7 +129,7 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, // Shape the tops. compute_output_shape(); for (int top_id = 0; top_id < top.size(); ++top_id) { - top[top_id]->Reshape(num_, num_output_, height_out_, width_out_, this->device_context_); + top[top_id]->Reshape(num_, num_output_, height_out_, width_out_); } if (reverse_dimensions()) { conv_in_height_ = height_out_; @@ -148,14 +148,14 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, // overly large memory usage. In the special case of 1x1 convolution // it goes lazily unused to save memory. if (reverse_dimensions()) { - col_buffer_.Reshape(1, kernel_dim_, height_, width_, this->device_context_); + col_buffer_.Reshape(1, kernel_dim_, height_, width_); } else { - col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_, this->device_context_); + col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_); } // Set up the all ones "bias multiplier" for adding biases by BLAS if (bias_term_) { vector bias_multiplier_shape(1, height_out_ * width_out_); - bias_multiplier_.Reshape(bias_multiplier_shape, this->device_context_); + bias_multiplier_.Reshape(bias_multiplier_shape); caffe_set(bias_multiplier_.count(), Dtype(1), bias_multiplier_.mutable_cpu_data()); } diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 8f6a56a3fce..859cd75d52e 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -62,14 +62,14 @@ void BasePrefetchingDataLayer::Forward_cpu( JoinPrefetchThread(); DLOG(INFO) << "Thread joined"; // Reshape to loaded data. - top[0]->ReshapeLike(prefetch_data_, this->device_context_); + top[0]->ReshapeLike(prefetch_data_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_, this->device_context_); + top[1]->ReshapeLike(prefetch_label_); // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_cpu_data()); diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 9ff417f2fc8..801e38ce92e 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -13,13 +13,13 @@ void BasePrefetchingDataLayer::Forward_gpu( if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Reshape to loaded data. - top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); + top[0]->ReshapeLike(this->prefetch_data_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_, this->device_context_); + top[1]->ReshapeLike(prefetch_label_); // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_gpu_data()); @@ -31,13 +31,13 @@ void BasePrefetchingDataLayer::Forward_gpu( this->device_context_.id()); // Reshape to loaded data. - top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); + top[0]->ReshapeLike(this->prefetch_data_); // Copy the data greentea_copy(prefetch_data_.count(), (cl_mem)(prefetch_data_.gpu_data()),0, (cl_mem)(top[0]->mutable_gpu_data()),0, ctx); if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_, this->device_context_); + top[1]->ReshapeLike(prefetch_label_); // Copy the labels. greentea_copy(prefetch_label_.count(), (cl_mem)(prefetch_label_.gpu_data()),0, (cl_mem)(top[1]->mutable_gpu_data()),0, ctx); diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 15a67016413..1cac8fc3387 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -46,7 +46,7 @@ void ConcatLayer::Reshape(const vector*>& bottom, bottom_count_sum += bottom[i]->count(); top_shape[concat_axis_] += bottom[i]->shape(concat_axis_); } - top[0]->Reshape(top_shape,this->device_context_); + top[0]->Reshape(top_shape); CHECK_EQ(bottom_count_sum, top[0]->count()); } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index ffadf06abad..25e167819d3 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -20,11 +20,11 @@ void ContrastiveLossLayer::LayerSetUp( CHECK_EQ(bottom[2]->channels(), 1); CHECK_EQ(bottom[2]->height(), 1); CHECK_EQ(bottom[2]->width(), 1); - diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1,this->device_context_); - diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1,this->device_context_); - dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1,this->device_context_); + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1,this->device_context_); + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); for (int i = 0; i < bottom[0]->channels(); ++i) summer_vec_.mutable_cpu_data()[i] = Dtype(1); } diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index 00fffd70319..242fc520850 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -85,7 +85,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, // TODO: Change this if(kstride_h_ != 23 || this->device_context_.backend() == BACKEND_CUDA) { col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, - width_out, this->device_context_); + width_out); } // Set the parameters CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; @@ -95,8 +95,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, K_ = channels_ * kernel_h_ * kernel_w_ / group_; N_ = height_out * width_out; for (int top_id = 0; top_id < top.size(); ++top_id) { - top[top_id]->Reshape(num_, num_output_, height_out, width_out, - this->device_context_); + top[top_id]->Reshape(num_, num_output_, height_out, width_out); } // Check if we need to set up the weights if (this->blobs_.size() > 0) { @@ -129,7 +128,7 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, } // Set up the all ones "bias multiplier" for adding bias using blas if (bias_term_) { - bias_multiplier_.Reshape(1, 1, 1, N_, this->device_context_); + bias_multiplier_.Reshape(1, 1, 1, N_); caffe_set(N_, Dtype(1), bias_multiplier_.mutable_cpu_data()); } this->param_propagate_down_.resize(this->blobs_.size(), true); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 22c5129fa3d..cc2a3e51e9d 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -43,11 +43,11 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, datum.ParseFromString(cursor_->value()); // Use data_transformer to infer the expected blob shape from datum. vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape, this->device_context_); + this->transformed_data_.Reshape(top_shape); // Reshape top[0] and prefetch_data according to the batch_size. top_shape[0] = this->layer_param_.data_param().batch_size(); - this->prefetch_data_.Reshape(top_shape, this->device_context_); - top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); LOG(INFO)<< "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," @@ -55,8 +55,8 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, // label if (this->output_labels_) { vector label_shape(1, this->layer_param_.data_param().batch_size()); - top[1]->Reshape(label_shape, this->device_context_); - this->prefetch_label_.Reshape(label_shape, this->device_context_); + top[1]->Reshape(label_shape); + this->prefetch_label_.Reshape(label_shape); } } @@ -78,10 +78,10 @@ void DataLayer::InternalThreadEntry() { datum.ParseFromString(cursor_->value()); // Use data_transformer to infer the expected blob shape from datum. vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape, this->device_context_); + this->transformed_data_.Reshape(top_shape); // Reshape prefetch_data according to the batch_size. top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape, this->device_context_); + this->prefetch_data_.Reshape(top_shape); Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 02157b6cd74..ec1256fd2fa 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -27,7 +27,7 @@ void DropoutLayer::Reshape(const vector*>& bottom, NeuronLayer::Reshape(bottom, top); // Set up the cache for random number generation rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(), this->device_context_); + bottom[0]->height(), bottom[0]->width()); } template diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp index 8358a31808d..6b0d617464c 100644 --- a/src/caffe/layers/dummy_data_layer.cpp +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -83,10 +83,10 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, (param.height_size() == 1) ? param.height(0) : param.height(i); const int width = (param.width_size() == 1) ? param.width(0) : param.width(i); - top[i]->Reshape(num, channels, height, width, this->device_context_); + top[i]->Reshape(num, channels, height, width); } else { const int shape_index = (param.shape_size() == 1) ? 0 : i; - top[i]->Reshape(param.shape(shape_index), this->device_context_); + top[i]->Reshape(param.shape(shape_index)); } } // Run Forward once, with refill_ inverted, to fill the constant Blobs. diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index 9d035e9b6e3..a80700736bd 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -34,11 +34,11 @@ void EltwiseLayer::Reshape(const vector*>& bottom, for (int i = 1; i < bottom.size(); ++i) { CHECK(bottom[i]->shape() == bottom[0]->shape()); } - top[0]->ReshapeLike(*bottom[0], this->device_context_); + top[0]->ReshapeLike(*bottom[0]); // If max operation, we will initialize the vector index part. if (this->layer_param_.eltwise_param().operation() == EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { - max_idx_.Reshape(bottom[0]->shape(), this->device_context_); + max_idx_.Reshape(bottom[0]->shape()); } } diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 8e64e2a39f0..6ef4d292bd1 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -96,7 +96,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, } break; case EltwiseParameter_EltwiseOp_SUM: { - greentea_gpu_set(this->device_context_.id(), count, 0, (cl_mem)top_data, 0); + greentea_gpu_set(this->device_context_.id(), count, 0, (cl_mem)top_data, 0); for (int i = 0; i < bottom.size(); ++i) { greentea_gpu_axpy(this->device_context_.id(), count, coeffs_[i], (cl_mem)(bottom[i]->gpu_data()),0, (cl_mem)top_data, 0); } @@ -236,7 +236,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, if (coeffs_[i] == Dtype(1.)) { greentea_copy(count, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0,ctx); } else { - greentea_gpu_scale(count, coeffs_[i],0, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0); + greentea_gpu_scale(this->device_context_.id(), count, coeffs_[i],(cl_mem)top_diff,0, (cl_mem)bottom_diff,0); } } break; diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 37aa2b45e9a..80efa31b22c 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -13,7 +13,7 @@ void EuclideanLossLayer::Reshape( LossLayer::Reshape(bottom, top); CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) << "Inputs must have the same dimension."; - diff_.ReshapeLike(*bottom[0], this->device_context_); + diff_.ReshapeLike(*bottom[0]); } template diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index d69e19e5c80..be1db32dbaa 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -55,7 +55,7 @@ void FilterLayer::Reshape(const vector*>& bottom, shape_top[0] = new_tops_num; for (int ts = 1; ts < num_axes; ++ts) shape_top[ts] = bottom[t]->shape(ts); - top[t]->Reshape(shape_top,this->device_context_); + top[t]->Reshape(shape_top); } } diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index adb41d96b10..f7e5c9c2172 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -22,7 +22,7 @@ void FlattenLayer::Reshape(const vector*>& bottom, for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { top_shape.push_back(bottom[0]->shape(i)); } - top[0]->Reshape(top_shape, this->device_context_); + top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()); } diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 331af7de779..fadd2179e49 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -121,7 +121,7 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, for (int j = 1; j < top_shape.size(); ++j) { top_shape[j] = hdf_blobs_[i]->shape(j); } - top[i]->Reshape(top_shape, this->device_context_); + top[i]->Reshape(top_shape); } } diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index 644e74ec8ef..cb5f0e0c7ee 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -46,9 +46,9 @@ void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, CHECK_GE(bottom.size(), 2); CHECK_EQ(bottom[0]->num(), bottom[1]->num()); data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(),this->device_context_); + bottom[0]->height(), bottom[0]->width()); label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), - bottom[1]->height(), bottom[1]->width(),this->device_context_); + bottom[1]->height(), bottom[1]->width()); const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index 410849e0985..de7cb14ca56 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -19,11 +19,9 @@ void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, CHECK_GE(bottom.size(), 2); CHECK_EQ(bottom[0]->num(), bottom[1]->num()); data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(), - this->device_context_); + bottom[0]->height(), bottom[0]->width()); label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), - bottom[1]->height(), bottom[1]->width(), - this->device_context_); + bottom[1]->height(), bottom[1]->width()); const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 90acdd46cfc..1c802714e33 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -58,7 +58,7 @@ void Im2colLayer::Reshape(const vector*>& bottom, top[0]->Reshape( bottom[0]->num(), channels_ * kernel_h_ * kernel_w_, (height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1, - (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1, this->device_context_); + (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1); } template diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 7225af0afd2..18c035cba9d 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -64,20 +64,20 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, new_height, new_width, is_color); // Use data_transformer to infer the expected blob shape from a cv_image. vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape, this->device_context_); + this->transformed_data_.Reshape(top_shape); // Reshape prefetch_data and top[0] according to the batch_size. const int batch_size = this->layer_param_.image_data_param().batch_size(); top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape, this->device_context_); - top[0]->ReshapeLike(this->prefetch_data_, this->device_context_); + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label vector label_shape(1, batch_size); - top[1]->Reshape(label_shape, this->device_context_); - this->prefetch_label_.Reshape(label_shape, this->device_context_); + top[1]->Reshape(label_shape); + this->prefetch_label_.Reshape(label_shape); } template @@ -110,10 +110,10 @@ void ImageDataLayer::InternalThreadEntry() { new_height, new_width, is_color); // Use data_transformer to infer the expected blob shape from a cv_img. vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape, this->device_context_); + this->transformed_data_.Reshape(top_shape); // Reshape prefetch_data according to the batch_size. top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape, this->device_context_); + this->prefetch_data_.Reshape(top_shape); Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index 2540443d05b..a1e0b40de0e 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -20,7 +20,7 @@ void InfogainLossLayer::LayerSetUp( BlobProto blob_proto; ReadProtoFromBinaryFile( this->layer_param_.infogain_loss_param().source(), &blob_proto); - infogain_.FromProto(blob_proto, this->device_context_); + infogain_.FromProto(blob_proto); } } diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 10971b7c3d2..0155516bf07 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -68,11 +68,11 @@ void InnerProductLayer::Reshape(const vector*>& bottom, vector top_shape = bottom[0]->shape(); top_shape.resize(axis + 1); top_shape[axis] = N_; - top[0]->Reshape(top_shape,this->device_context_); + top[0]->Reshape(top_shape); // Set up the bias multiplier if (bias_term_) { vector bias_shape(1, M_); - bias_multiplier_.Reshape(bias_shape,this->device_context_); + bias_multiplier_.Reshape(bias_shape); caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data()); } } diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index 5086dbaca09..3496a5c2a8a 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -25,7 +25,7 @@ void LossLayer::Reshape( CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; vector loss_shape(0); // Loss layers output a scalar; 0 axes. - top[0]->Reshape(loss_shape, this->device_context_); + top[0]->Reshape(loss_shape); } INSTANTIATE_CLASS(LossLayer); diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 4bb2fb08c01..2c1b5f07fa1 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -77,8 +77,8 @@ void LRNLayer::Reshape(const vector*>& bottom, width_ = bottom[0]->width(); switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: - top[0]->Reshape(num_, channels_, height_, width_, this->device_context_); - scale_.Reshape(num_, channels_, height_, width_, this->device_context_); + top[0]->Reshape(num_, channels_, height_, width_); + scale_.Reshape(num_, channels_, height_, width_); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: split_layer_->Reshape(bottom, split_top_vec_); diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index 97d4e4e6345..42de4198bc4 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -20,10 +20,10 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, "batch_size, channels, height, and width must be specified and" " positive in memory_data_param"; vector label_shape(1, batch_size_); - top[0]->Reshape(batch_size_, channels_, height_, width_, this->device_context_); - top[1]->Reshape(label_shape, this->device_context_); - added_data_.Reshape(batch_size_, channels_, height_, width_, this->device_context_); - added_label_.Reshape(label_shape, this->device_context_); + top[0]->Reshape(batch_size_, channels_, height_, width_); + top[1]->Reshape(label_shape); + added_data_.Reshape(batch_size_, channels_, height_, width_); + added_label_.Reshape(label_shape); data_ = NULL; labels_ = NULL; added_data_.cpu_data(); @@ -38,8 +38,8 @@ void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { CHECK_GT(num, 0) << "There is no datum to add."; CHECK_EQ(num % batch_size_, 0) << "The added data must be a multiple of the batch size."; - added_data_.Reshape(num, channels_, height_, width_, this->device_context_); - added_label_.Reshape(num, 1, 1, 1, this->device_context_); + added_data_.Reshape(num, channels_, height_, width_); + added_label_.Reshape(num, 1, 1, 1); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(datum_vector, &added_data_); // Copy Labels @@ -62,8 +62,8 @@ void MemoryDataLayer::AddMatVector(const vector& mat_vector, CHECK_GT(num, 0) << "There is no mat to add"; CHECK_EQ(num % batch_size_, 0) << "The added data must be a multiple of the batch size."; - added_data_.Reshape(num, channels_, height_, width_,this->device_context_); - added_label_.Reshape(num, 1, 1, 1,this->device_context_); + added_data_.Reshape(num, channels_, height_, width_); + added_label_.Reshape(num, 1, 1, 1); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(mat_vector, &added_data_); // Copy Labels @@ -98,16 +98,16 @@ void MemoryDataLayer::set_batch_size(int new_size) { CHECK(!has_new_data_) << "Can't change batch_size until current data has been consumed."; batch_size_ = new_size; - added_data_.Reshape(batch_size_, channels_, height_, width_,this->device_context_); - added_label_.Reshape(batch_size_, 1, 1, 1,this->device_context_); + added_data_.Reshape(batch_size_, channels_, height_, width_); + added_label_.Reshape(batch_size_, 1, 1, 1); } template void MemoryDataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset"; - top[0]->Reshape(batch_size_, channels_, height_, width_,this->device_context_); - top[1]->Reshape(batch_size_, 1, 1, 1,this->device_context_); + top[0]->Reshape(batch_size_, channels_, height_, width_); + top[1]->Reshape(batch_size_, 1, 1, 1); top[0]->set_cpu_data(data_ + pos_ * size_); top[1]->set_cpu_data(labels_ + pos_); pos_ = (pos_ + batch_size_) % n_; diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index ceddb4b894e..3a577b2e269 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -28,7 +28,7 @@ void MergeCropLayer::Reshape(const vector*>& bottom, int height = bottom[0]->height(); int width = bottom[0]->width(); - top[0]->Reshape(num, channels, height, width, this->device_context_); + top[0]->Reshape(num, channels, height, width); } template diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index cff5260ebc9..3e79bddcdde 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -11,15 +11,15 @@ template void MVNLayer::Reshape(const vector*>& bottom, const vector*>& top) { top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(), this->device_context_); + bottom[0]->height(), bottom[0]->width()); mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), - 1, 1, this->device_context_); + 1, 1); variance_.Reshape(bottom[0]->num(), bottom[0]->channels(), - 1, 1, this->device_context_); + 1, 1); temp_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width(), this->device_context_); + bottom[0]->height(), bottom[0]->width()); sum_multiplier_.Reshape(1, 1, - bottom[0]->height(), bottom[0]->width(), this->device_context_); + bottom[0]->height(), bottom[0]->width()); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); eps_ = this->layer_param_.mvn_param().eps(); diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp index 1a55a1c84bb..ba67b43878e 100644 --- a/src/caffe/layers/neuron_layer.cpp +++ b/src/caffe/layers/neuron_layer.cpp @@ -8,7 +8,7 @@ namespace caffe { template void NeuronLayer::Reshape(const vector*>& bottom, const vector*>& top) { - top[0]->ReshapeLike(*bottom[0],this->device_context_); + top[0]->ReshapeLike(*bottom[0]); } INSTANTIATE_CLASS(NeuronLayer); diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index fbc823245c7..c8d41499455 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -107,21 +107,21 @@ void PoolingLayer::Reshape(const vector*>& bottom, CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_,this->device_context_); + pooled_width_); if (top.size() > 1) { - top[1]->ReshapeLike(*top[0],this->device_context_); + top[1]->ReshapeLike(*top[0]); } // If max pooling, we will initialize the vector index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_,this->device_context_); + pooled_width_); } // If stochastic pooling, we will initialize the random index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_STOCHASTIC) { rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_,this->device_context_); + pooled_width_); } } diff --git a/src/caffe/layers/pooling_sk_layer.cpp b/src/caffe/layers/pooling_sk_layer.cpp index 3fb90285d05..8527eec4eec 100644 --- a/src/caffe/layers/pooling_sk_layer.cpp +++ b/src/caffe/layers/pooling_sk_layer.cpp @@ -89,21 +89,21 @@ void PoolingSKLayer::LayerSetUp(const vector*>& bottom, pooled_width_ = static_cast(ceil( static_cast(width_ + 2 * pad_w_ - ext_kernel_w) / stride_w_)) + 1; - top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_,this->device_context_); + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); if (top.size() > 1) { - top[1]->ReshapeLike(*top[0],this->device_context_); + top[1]->ReshapeLike(*top[0]); } // If max pooling, we will initialize the vector index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_,this->device_context_); + pooled_width_); } // If stochastic pooling, we will initialize the random index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_STOCHASTIC) { rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_,this->device_context_); + pooled_width_); } } diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index e78ca94fa85..b49c2debd74 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -44,8 +44,8 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count(1)),this->device_context_); - backward_buff_.Reshape(vector(1, bottom[0]->count(1)),this->device_context_); + multiplier_.Reshape(vector(1, bottom[0]->count(1))); + backward_buff_.Reshape(vector(1, bottom[0]->count(1))); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -53,10 +53,10 @@ template void PReLULayer::Reshape(const vector*>& bottom, const vector*>& top) { CHECK_GE(bottom[0]->num_axes(), 2)<< "Number of axes of bottom blob must be >=2."; - top[0]->ReshapeLike(*bottom[0],this->device_context_); + top[0]->ReshapeLike(*bottom[0]); if (bottom[0] == top[0]) { // For in-place computation - bottom_memory_.ReshapeLike(*bottom[0],this->device_context_); + bottom_memory_.ReshapeLike(*bottom[0]); } } diff --git a/src/caffe/layers/reduction_layer.cpp b/src/caffe/layers/reduction_layer.cpp index bd404eba4e3..8ae6329ebe4 100644 --- a/src/caffe/layers/reduction_layer.cpp +++ b/src/caffe/layers/reduction_layer.cpp @@ -25,14 +25,14 @@ void ReductionLayer::Reshape(const vector*>& bottom, // we'd need to also copy any axes following an "end_axis". vector top_shape(bottom[0]->shape().begin(), bottom[0]->shape().begin() + axis_); - top[0]->Reshape(top_shape, this->device_context_); + top[0]->Reshape(top_shape); num_ = bottom[0]->count(0, axis_); dim_ = bottom[0]->count(axis_); CHECK_EQ(num_, top[0]->count()); if (op_ == ReductionParameter_ReductionOp_SUM || op_ == ReductionParameter_ReductionOp_MEAN) { vector sum_mult_shape(1, dim_); - sum_multiplier_.Reshape(sum_mult_shape, this->device_context_); + sum_multiplier_.Reshape(sum_mult_shape); caffe_set(dim_, Dtype(1), sum_multiplier_.mutable_cpu_data()); } coeff_ = this->layer_param().reduction_param().coeff(); diff --git a/src/caffe/layers/reshape_layer.cpp b/src/caffe/layers/reshape_layer.cpp index e694f2a9753..ffe970f2689 100644 --- a/src/caffe/layers/reshape_layer.cpp +++ b/src/caffe/layers/reshape_layer.cpp @@ -82,7 +82,7 @@ void ReshapeLayer::Reshape(const vector*>& bottom, const int inferred_dim = bottom[0]->count() / explicit_count; top_shape[start_axis + inferred_axis_] = inferred_dim; } - top[0]->Reshape(top_shape, this->device_context_); + top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()) << "output count must match input count"; top[0]->ShareData(*bottom[0]); diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 711f4eb676c..e4418c9cf9c 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -53,7 +53,7 @@ void SliceLayer::Reshape(const vector*>& bottom, slices.push_back(bottom_slice_axis - prev); for (int i = 0; i < top.size(); ++i) { top_shape[slice_axis_] = slices[i]; - top[i]->Reshape(top_shape,this->device_context_); + top[i]->Reshape(top_shape); count += top[i]->count(); } } else { @@ -62,7 +62,7 @@ void SliceLayer::Reshape(const vector*>& bottom, << "divide input slice axis (" << bottom_slice_axis << ")"; top_shape[slice_axis_] = bottom_slice_axis / top.size(); for (int i = 0; i < top.size(); ++i) { - top[i]->Reshape(top_shape,this->device_context_); + top[i]->Reshape(top_shape); count += top[i]->count(); } } diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index 21f6391ed67..04712c9e653 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -12,16 +12,16 @@ void SoftmaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { softmax_axis_ = bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); - top[0]->ReshapeLike(*bottom[0],this->device_context_); + top[0]->ReshapeLike(*bottom[0]); vector mult_dims(1, bottom[0]->shape(softmax_axis_)); - sum_multiplier_.Reshape(mult_dims,this->device_context_); + sum_multiplier_.Reshape(mult_dims); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); outer_num_ = bottom[0]->count(0, softmax_axis_); inner_num_ = bottom[0]->count(softmax_axis_ + 1); vector scale_dims = bottom[0]->shape(); scale_dims[softmax_axis_] = 1; - scale_.Reshape(scale_dims,this->device_context_); + scale_.Reshape(scale_dims); } template diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index e52a98dccb7..ba312f67fbc 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -46,7 +46,7 @@ void SoftmaxWithLossLayer::Reshape( << "with integer values in {0, 1, ..., C-1}."; if (top.size() >= 2) { // softmax output - top[1]->ReshapeLike(*bottom[0],this->device_context_); + top[1]->ReshapeLike(*bottom[0]); } } diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 7deda02107d..272cb59cd37 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -18,7 +18,7 @@ void SplitLayer::Reshape(const vector*>& bottom, // some strange effects in practice...) CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " "allow in-place computation."; - top[i]->ReshapeLike(*bottom[0],this->device_context_); + top[i]->ReshapeLike(*bottom[0]); CHECK_EQ(count_, top[i]->count()); } } diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index f5106d30a46..c127d56bc46 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -170,16 +170,16 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, const int crop_size = this->transform_param_.crop_size(); CHECK_GT(crop_size, 0); const int batch_size = this->layer_param_.window_data_param().batch_size(); - top[0]->Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size, this->device_context_); + top[0]->Reshape(batch_size, channels, crop_size, crop_size); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label vector label_shape(1, batch_size); - top[1]->Reshape(label_shape, this->device_context_); - this->prefetch_label_.Reshape(label_shape, this->device_context_); + top[1]->Reshape(label_shape); + this->prefetch_label_.Reshape(label_shape); // data mean has_mean_file_ = this->transform_param_.has_mean_file(); @@ -190,7 +190,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, LOG(INFO) << "Loading mean file from: " << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); - data_mean_.FromProto(blob_proto, this->device_context_); + data_mean_.FromProto(blob_proto); } if (has_mean_values_) { CHECK(has_mean_file_ == false) << diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 6a20a5a51b7..9972c76a8bb 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -80,9 +80,8 @@ void Net::Init(const NetParameter& in_param) { const LayerParameter& layer_param = param.layer(layer_id); if (layer_param.propagate_down_size() > 0) { CHECK_EQ(layer_param.propagate_down_size(), - layer_param.bottom_size()) - << "propagate_down param must be specified " - << "either 0 or bottom_size times "; + layer_param.bottom_size())<< "propagate_down param must be specified " + << "either 0 or bottom_size times "; } layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); @@ -183,7 +182,7 @@ void Net::Init(const NetParameter& in_param) { if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { layer_need_backward_[layer_id] = false; for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); - ++bottom_id) { + ++bottom_id) { bottom_need_backward_[layer_id][bottom_id] = false; } } @@ -207,7 +206,7 @@ void Net::Init(const NetParameter& in_param) { } if (!bottom_need_backward_[layer_id][bottom_id]) { const string& blob_name = - blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; blobs_skip_backp.insert(blob_name); } } @@ -385,11 +384,11 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, blob_pointer->Reshape(param.input_dim(top_id * 4), param.input_dim(top_id * 4 + 1), param.input_dim(top_id * 4 + 2), - param.input_dim(top_id * 4 + 3), Caffe::GetDefaultDeviceContext()); + param.input_dim(top_id * 4 + 3)); } else { - blob_pointer->Reshape(param.input_shape(top_id),Caffe::GetDefaultDeviceContext()); + blob_pointer->Reshape(param.input_shape(top_id)); } net_input_blob_indices_.push_back(blob_id); net_input_blobs_.push_back(blob_pointer.get()); @@ -425,8 +424,7 @@ int Net::AppendBottom(const NetParameter& param, const int layer_id, // Check if the backpropagation on bottom_id should be skipped if (layer_param.propagate_down_size() > 0) propagate_down = layer_param.propagate_down(bottom_id); - const bool need_backward = blob_need_backward_[blob_id] && - propagate_down; + const bool need_backward = blob_need_backward_[blob_id] && propagate_down; bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } @@ -549,7 +547,7 @@ const vector*>& Net::Forward( const vector*> & bottom, Dtype* loss) { // Copy bottom to internal bottom for (int i = 0; i < bottom.size(); ++i) { - net_input_blobs_[i]->CopyFrom(*bottom[i], bottom[i]->device_context()); + net_input_blobs_[i]->CopyFrom(*bottom[i]); } return ForwardPrefilled(loss); } @@ -561,8 +559,7 @@ string Net::Forward(const string& input_blob_protos, Dtype* loss) { blob_proto_vec.ParseFromString(input_blob_protos); CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size())<< "Incorrect input size."; for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { - net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i), - net_input_blobs_[i]->device_context()); + net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); } } ForwardPrefilled(loss); @@ -758,7 +755,7 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { CHECK_EQ(target_blobs.size(), source_layer.blobs_size())<< "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { const bool kReshape = false; - target_blobs[j]->FromProto(source_layer.blobs(j), layers_[target_layer_id]->device_context(), kReshape); + target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); } } } @@ -813,10 +810,21 @@ void Net::Update() { caffe_add(count, this_diff, owner_diff, owner_diff); break; #ifndef CPU_ONLY - case Caffe::GPU: + case Caffe::GPU: { this_diff = params_[i]->gpu_diff(); owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); - caffe_gpu_add(count, this_diff, owner_diff, owner_diff); + DeviceContext dc = Caffe::GetDefaultDeviceContext(); + if (dc.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_add(count, this_diff, owner_diff, owner_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_add(dc.id(), count, (cl_mem) this_diff, 0, + (cl_mem) owner_diff, 0, (cl_mem) owner_diff, 0); +#endif // USE_GREENTEA + } + } break; #else NO_GPU; diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 2be26a94e0b..a968ea2cf34 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -699,7 +699,7 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { CHECK_EQ(state.history_size(), history_.size())<< "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; for (int i = 0; i < history_.size(); ++i) { - history_[i]->FromProto(state.history(i),this->device_context_); + history_[i]->FromProto(state.history(i)); } } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 3cb69c66ddb..c14b67cc0e9 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -26,9 +26,9 @@ class AccuracyLayerTest : public CPUDeviceTest { vector shape(2); shape[0] = 100; shape[1] = 10; - blob_bottom_data_->Reshape(shape, Caffe::GetDefaultDeviceContext()); + blob_bottom_data_->Reshape(shape); shape.resize(1); - blob_bottom_label_->Reshape(shape, Caffe::GetDefaultDeviceContext()); + blob_bottom_label_->Reshape(shape); FillBottoms(); blob_bottom_vec_.push_back(blob_bottom_data_); @@ -117,10 +117,10 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { - this->blob_bottom_data_->Reshape(2, 10, 4, 5, Caffe::GetDefaultDeviceContext()); + this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; - this->blob_bottom_label_->Reshape(label_shape, Caffe::GetDefaultDeviceContext()); + this->blob_bottom_label_->Reshape(label_shape); this->FillBottoms(); LayerParameter layer_param; layer_param.mutable_accuracy_param()->set_axis(1); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index a06713a3147..895c3d372ff 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -16,7 +16,7 @@ template class ArgMaxLayerTest : public CPUDeviceTest { protected: ArgMaxLayerTest() - : blob_bottom_(new Blob(10, 20, 1, 1, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(10, 20, 1, 1)), blob_top_(new Blob()), top_k_(5) { Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index e6cbd280abf..7da6423b67c 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -16,7 +16,7 @@ class BlobSimpleTest : public ::testing::Test { protected: BlobSimpleTest() : blob_(new Blob()), - blob_preshaped_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())) {} + blob_preshaped_(new Blob(2, 3, 4, 5)) {} virtual ~BlobSimpleTest() { delete blob_; delete blob_preshaped_; } Blob* const blob_; Blob* const blob_preshaped_; @@ -44,7 +44,7 @@ TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) { } TYPED_TEST(BlobSimpleTest, TestReshape) { - this->blob_->Reshape(2, 3, 4, 5, Caffe::GetDefaultDeviceContext()); + this->blob_->Reshape(2, 3, 4, 5); EXPECT_EQ(this->blob_->num(), 2); EXPECT_EQ(this->blob_->channels(), 3); EXPECT_EQ(this->blob_->height(), 4); @@ -59,7 +59,7 @@ TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { vector shape(2); shape[0] = 3; shape[1] = 2; - this->blob_->Reshape(shape, Caffe::GetDefaultDeviceContext()); + this->blob_->Reshape(shape); // (3 x 2) blob == (1 x 1 x 3 x 2) legacy blob blob_proto.set_num(1); @@ -84,7 +84,7 @@ TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { // Reshape to (1 x 3 x 2). shape.insert(shape.begin(), 1); - this->blob_->Reshape(shape, Caffe::GetDefaultDeviceContext()); + this->blob_->Reshape(shape); // (1 x 3 x 2) blob == (1 x 1 x 3 x 2) legacy blob blob_proto.set_num(1); @@ -95,7 +95,7 @@ TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { // Reshape to (2 x 3 x 2). shape[0] = 2; - this->blob_->Reshape(shape, Caffe::GetDefaultDeviceContext()); + this->blob_->Reshape(shape); // (2 x 3 x 2) blob != (1 x 1 x 3 x 2) legacy blob blob_proto.set_num(1); @@ -110,7 +110,7 @@ class BlobMathTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: BlobMathTest() - : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_(new Blob(2, 3, 4, 5)), epsilon_(1e-6) {} virtual ~BlobMathTest() { delete blob_; } diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index a77dcef1b85..8c9fad16a9c 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -15,9 +15,13 @@ class CommonTest : public ::testing::Test {}; #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestCublasHandlerGPU) { - int cuda_device_id; - CUDA_CHECK(cudaGetDevice(&cuda_device_id)); - EXPECT_TRUE(Caffe::cublas_handle()); + if(Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + int cuda_device_id; + CUDA_CHECK(cudaGetDevice(&cuda_device_id)); + EXPECT_TRUE(Caffe::cublas_handle()); +#endif // USE_CUDA + } } #endif diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index 1c0d30c80cd..662a50fa23b 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -19,9 +19,9 @@ class ConcatLayerTest : public MultiDeviceTest { protected: ConcatLayerTest() - : blob_bottom_0_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), - blob_bottom_1_(new Blob(2, 5, 6, 5, Caffe::GetDefaultDeviceContext())), - blob_bottom_2_(new Blob(5, 3, 6, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_0_(new Blob(2, 3, 6, 5)), + blob_bottom_1_(new Blob(2, 5, 6, 5)), + blob_bottom_2_(new Blob(5, 3, 6, 5)), blob_top_(new Blob()) {} virtual void SetUp() { // fill the values diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 61040768383..1e9447cbc51 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,9 +22,9 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(512, 2, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_data_j_(new Blob(512, 2, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_y_(new Blob(512, 1, 1, 1, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 0fd974e5f5f..67d41fff844 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -106,8 +106,8 @@ class ConvolutionLayerTest : public MultiDeviceTest { protected: ConvolutionLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), - blob_bottom_2_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), blob_top_(new Blob()), blob_top_2_(new Blob()) {} virtual void SetUp() { @@ -130,7 +130,7 @@ class ConvolutionLayerTest : public MultiDeviceTest { virtual Blob* MakeReferenceTop(Blob* top) { this->ref_blob_top_.reset(new Blob()); - this->ref_blob_top_->ReshapeLike(*top, Caffe::GetDefaultDeviceContext()); + this->ref_blob_top_->ReshapeLike(*top); return this->ref_blob_top_.get(); } @@ -283,7 +283,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { filler_param.set_value(1.); filler.reset(new GaussianFiller(filler_param)); filler->Fill(this->blob_bottom_); - this->blob_bottom_2_->CopyFrom(*this->blob_bottom_, Caffe::GetDefaultDeviceContext()); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); // Compute Sobel G_x operator as 3 x 3 convolution. LayerParameter layer_param; ConvolutionParameter* convolution_param = @@ -295,7 +295,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { shared_ptr > layer( new ConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 3, Caffe::GetDefaultDeviceContext())); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); Dtype* weights = layer->blobs()[0]->mutable_cpu_data(); for (int c = 0; c < 3; ++c) { int i = c * 9; // 3 x 3 filter @@ -328,7 +328,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { convolution_param->set_bias_term(false); layer.reset(new ConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 1, Caffe::GetDefaultDeviceContext())); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); Dtype* weights_1 = layer->blobs()[0]->mutable_cpu_data(); for (int c = 0; c < 3; ++c) { int i = c * 3; // 3 x 1 filter @@ -339,7 +339,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // (2) the [-1 0 1] row filter - blob_sep->CopyFrom(*this->blob_top_2_, Caffe::GetDefaultDeviceContext(), false, true); + blob_sep->CopyFrom(*this->blob_top_2_, false, true); sep_blob_bottom_vec.clear(); sep_blob_bottom_vec.push_back(blob_sep.get()); convolution_param->set_kernel_h(1); @@ -350,7 +350,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { convolution_param->set_bias_term(false); layer.reset(new ConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 1, 3, Caffe::GetDefaultDeviceContext())); + layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); Dtype* weights_2 = layer->blobs()[0]->mutable_cpu_data(); for (int c = 0; c < 3; ++c) { int i = c * 3; // 1 x 3 filter diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index 10adbf04f9b..18c7d94c4fc 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -45,9 +45,9 @@ class DataTransformTest : public ::testing::Test { Caffe::set_random_seed(seed_); transformer->InitRand(); Blob* blob = - new Blob(1, datum.channels(), datum.height(), datum.width(), Caffe::GetDefaultDeviceContext()); + new Blob(1, datum.channels(), datum.height(), datum.width()); if (transform_param.crop_size() > 0) { - blob->Reshape(1, datum.channels(), crop_size, crop_size, Caffe::GetDefaultDeviceContext()); + blob->Reshape(1, datum.channels(), crop_size, crop_size); } vector > crop_sequence; @@ -90,7 +90,7 @@ TYPED_TEST(DataTransformTest, TestEmptyTransform) { Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); + Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); @@ -114,7 +114,7 @@ TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, 3, 4, 5, Caffe::GetDefaultDeviceContext()); + Blob* blob = new Blob(1, 3, 4, 5); DataTransformer* transformer = new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); @@ -144,7 +144,7 @@ TYPED_TEST(DataTransformTest, TestCropSize) { new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); Blob* blob = - new Blob(1, channels, crop_size, crop_size, Caffe::GetDefaultDeviceContext()); + new Blob(1, channels, crop_size, crop_size); for (int iter = 0; iter < this->num_iter_; ++iter) { transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -279,7 +279,7 @@ TYPED_TEST(DataTransformTest, TestMeanValue) { transform_param.add_mean_value(mean_value); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); + Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); @@ -302,7 +302,7 @@ TYPED_TEST(DataTransformTest, TestMeanValues) { transform_param.add_mean_value(2); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); + Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); @@ -342,7 +342,7 @@ TYPED_TEST(DataTransformTest, TestMeanFile) { transform_param.set_mean_file(*mean_file); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - Blob* blob = new Blob(1, channels, height, width, Caffe::GetDefaultDeviceContext()); + Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); transformer->InitRand(); diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index 625ad6803e8..fc63d5efbe3 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -21,8 +21,8 @@ class DeconvolutionLayerTest : public MultiDeviceTest { protected: DeconvolutionLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), - blob_bottom_2_(new Blob(2, 3, 6, 4, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), blob_top_(new Blob()), blob_top_2_(new Blob()) {} virtual void SetUp() { diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index 8410b7d70d5..be0c1347709 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -19,9 +19,9 @@ class EltwiseLayerTest : public MultiDeviceTest { protected: EltwiseLayerTest() - : blob_bottom_a_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), - blob_bottom_b_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), - blob_bottom_c_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_a_(new Blob(2, 3, 4, 5)), + blob_bottom_b_(new Blob(2, 3, 4, 5)), + blob_bottom_c_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { // fill the values Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp index 939e9f08b8e..1949742bbcb 100644 --- a/src/caffe/test/test_euclidean_loss_layer.cpp +++ b/src/caffe/test/test_euclidean_loss_layer.cpp @@ -21,8 +21,8 @@ class EuclideanLossLayerTest : public MultiDeviceTest { protected: EuclideanLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_label_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 5, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 91c6b39f916..728b8dc5f0d 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -12,7 +12,7 @@ template class ConstantFillerTest : public ::testing::Test { protected: ConstantFillerTest() - : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_(new Blob(2, 3, 4, 5)), filler_param_() { filler_param_.set_value(10.); filler_.reset(new ConstantFiller(filler_param_)); @@ -40,7 +40,7 @@ template class UniformFillerTest : public ::testing::Test { protected: UniformFillerTest() - : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_(new Blob(2, 3, 4, 5)), filler_param_() { filler_param_.set_min(1.); filler_param_.set_max(2.); @@ -69,7 +69,7 @@ template class PositiveUnitballFillerTest : public ::testing::Test { protected: PositiveUnitballFillerTest() - : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_(new Blob(2, 3, 4, 5)), filler_param_() { filler_.reset(new PositiveUnitballFiller(filler_param_)); filler_->Fill(blob_); @@ -106,7 +106,7 @@ template class GaussianFillerTest : public ::testing::Test { protected: GaussianFillerTest() - : blob_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_(new Blob(2, 3, 4, 5)), filler_param_() { filler_param_.set_mean(10.); filler_param_.set_std(0.1); @@ -146,7 +146,7 @@ template class XavierFillerTest : public ::testing::Test { protected: XavierFillerTest() - : blob_(new Blob(1000, 2, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_(new Blob(1000, 2, 4, 5)), filler_param_() { } virtual void test_params(FillerParameter_VarianceNorm variance_norm, @@ -195,7 +195,7 @@ template class MSRAFillerTest : public ::testing::Test { protected: MSRAFillerTest() - : blob_(new Blob(1000, 2, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_(new Blob(1000, 2, 4, 5)), filler_param_() { } virtual void test_params(FillerParameter_VarianceNorm variance_norm, diff --git a/src/caffe/test/test_filter_layer.cpp b/src/caffe/test/test_filter_layer.cpp index a9628796c87..c641b6ef6e8 100644 --- a/src/caffe/test/test_filter_layer.cpp +++ b/src/caffe/test/test_filter_layer.cpp @@ -20,9 +20,9 @@ class FilterLayerTest : public MultiDeviceTest { protected: FilterLayerTest() - : blob_bottom_data_(new Blob(4, 3, 6, 4, Caffe::GetDefaultDeviceContext())), - blob_bottom_labels_(new Blob(4, 1, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_selector_(new Blob(4, 1, 1, 1, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_(new Blob(4, 3, 6, 4)), + blob_bottom_labels_(new Blob(4, 1, 1, 1)), + blob_bottom_selector_(new Blob(4, 1, 1, 1)), blob_top_data_(new Blob()), blob_top_labels_(new Blob()) {} virtual void SetUp() { diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 7b2dffcd40e..7b6757cba32 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -18,7 +18,7 @@ class FlattenLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: FlattenLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index 01143ba8f52..c9135d64e70 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -161,9 +161,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { (*updated_params)[i].reset(new Blob()); } Blob& updated_weights = *(*updated_params)[0]; - updated_weights.ReshapeLike(weights, Caffe::GetDefaultDeviceContext()); + updated_weights.ReshapeLike(weights); Blob& updated_bias = *(*updated_params)[1]; - updated_bias.ReshapeLike(bias, Caffe::GetDefaultDeviceContext()); + updated_bias.ReshapeLike(bias); for (int i = 0; i <= D; ++i) { // Compute the derivative with respect to the ith weight (i.e., the ith @@ -290,7 +290,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { vector > > noaccum_params(param_blobs.size()); for (int i = 0; i < param_blobs.size(); ++i) { noaccum_params[i].reset(new Blob()); - noaccum_params[i]->CopyFrom(*param_blobs[i], Caffe::GetDefaultDeviceContext(), false, true); + noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); } // Solve by equivalent accumulation of gradients over divided batches. this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, diff --git a/src/caffe/test/test_hinge_loss_layer.cpp b/src/caffe/test/test_hinge_loss_layer.cpp index ce8b1f0faca..b6a99022905 100644 --- a/src/caffe/test/test_hinge_loss_layer.cpp +++ b/src/caffe/test/test_hinge_loss_layer.cpp @@ -21,8 +21,8 @@ class HingeLossLayerTest : public MultiDeviceTest { protected: HingeLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_label_(new Blob(10, 1, 1, 1, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 0e375fca446..a27b780859e 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -37,7 +37,7 @@ class Im2colKernelTest : public GPUDeviceTest { protected: Im2colKernelTest() // big so launches > 1024 threads - : blob_bottom_(new Blob(5, 500, 10, 10, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(5, 500, 10, 10)), blob_top_(new Blob()), blob_top_cpu_(new Blob()) { FillerParameter filler_param; @@ -80,12 +80,12 @@ TYPED_TEST(Im2colKernelTest, TestGPU) { this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, - this->width_col_, Caffe::GetDefaultDeviceContext()); + this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, - this->width_col_, Caffe::GetDefaultDeviceContext()); + this->width_col_); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index 5a3986f83b8..f50abe103f8 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -18,7 +18,7 @@ class Im2colLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: Im2colLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp index 117a2dc166d..7ec2f8073c1 100644 --- a/src/caffe/test/test_infogain_loss_layer.cpp +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -21,9 +21,9 @@ class InfogainLossLayerTest : public MultiDeviceTest { protected: InfogainLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_label_(new Blob(10, 1, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_infogain_(new Blob(1, 1, 5, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)), + blob_bottom_infogain_(new Blob(1, 1, 5, 5)), blob_top_loss_(new Blob()) { Caffe::set_random_seed(1701); FillerParameter filler_param; diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp index 1924fda602b..96092529834 100644 --- a/src/caffe/test/test_inner_product_layer.cpp +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -22,7 +22,7 @@ class InnerProductLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: InnerProductLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; @@ -57,12 +57,12 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) { TYPED_TEST(InnerProductLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; - bool IS_VALID_CUDA = false; + bool IS_VALID_DEVICE = false; #ifndef CPU_ONLY - IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2; + IS_VALID_DEVICE = CAFFE_TEST_CUDA_PROP.major >= 2 || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL; #endif if (Caffe::mode() == Caffe::CPU || - sizeof(Dtype) == 4 || IS_VALID_CUDA) { + sizeof(Dtype) == 4 || IS_VALID_DEVICE) { LayerParameter layer_param; InnerProductParameter* inner_product_param = layer_param.mutable_inner_product_param(); @@ -87,12 +87,12 @@ TYPED_TEST(InnerProductLayerTest, TestForward) { TYPED_TEST(InnerProductLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; - bool IS_VALID_CUDA = false; + bool IS_VALID_DEVICE = false; #ifndef CPU_ONLY - IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2; + IS_VALID_DEVICE = CAFFE_TEST_CUDA_PROP.major >= 2 || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL; #endif if (Caffe::mode() == Caffe::CPU || - sizeof(Dtype) == 4 || IS_VALID_CUDA) { + sizeof(Dtype) == 4 || IS_VALID_DEVICE) { LayerParameter layer_param; InnerProductParameter* inner_product_param = layer_param.mutable_inner_product_param(); diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index dedfe1d1604..c4e2f8ea7f2 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -28,7 +28,7 @@ class LRNLayerTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 7, 3, 3, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(2, 7, 3, 3); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); @@ -53,7 +53,7 @@ void LRNLayerTest::ReferenceLRNForward( Blob* blob_top) { typedef typename TypeParam::Dtype Dtype; blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(), - blob_bottom.height(), blob_bottom.width(), Caffe::GetDefaultDeviceContext()); + blob_bottom.height(), blob_bottom.width()); Dtype* top_data = blob_top->mutable_cpu_data(); LRNParameter lrn_param = layer_param.lrn_param(); Dtype alpha = lrn_param.alpha(); diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 26dc0891ff5..aeb6e2ec45a 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -32,8 +32,8 @@ class MathFunctionsTest : public MultiDeviceTest { virtual void SetUp() { Caffe::set_random_seed(1701); - this->blob_bottom_->Reshape(11, 17, 19, 23, Caffe::GetDefaultDeviceContext()); - this->blob_top_->Reshape(11, 17, 19, 23, Caffe::GetDefaultDeviceContext()); + this->blob_bottom_->Reshape(11, 17, 19, 23); + this->blob_top_->Reshape(11, 17, 19, 23); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index fbaf2a86084..45218c9eea7 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -22,7 +22,7 @@ class MaxPoolingDropoutTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1703); - blob_bottom_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(2, 3, 6, 5); // fill the values FillerParameter filler_param; filler_param.set_value(1.); @@ -121,6 +121,8 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { for (int i = 0; i < this->blob_bottom_->count(); ++i) { sum_with_dropout += bottom_diff[i]; } + // REMOVE: + std::cout << "SUM: " << sum_with_dropout << std::endl; EXPECT_GE(sum_with_dropout, sum); } diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp index ad0d3ccebbd..a79033f59f1 100644 --- a/src/caffe/test/test_memory_data_layer.cpp +++ b/src/caffe/test/test_memory_data_layer.cpp @@ -31,8 +31,8 @@ class MemoryDataLayerTest : public MultiDeviceTest { // pick random input data FillerParameter filler_param; GaussianFiller filler(filler_param); - data_->Reshape(batches_ * batch_size_, channels_, height_, width_, Caffe::GetDefaultDeviceContext()); - labels_->Reshape(batches_ * batch_size_, 1, 1, 1, Caffe::GetDefaultDeviceContext()); + data_->Reshape(batches_ * batch_size_, channels_, height_, width_); + labels_->Reshape(batches_ * batch_size_, 1, 1, 1); filler.Fill(this->data_); filler.Fill(this->labels_); } diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp deleted file mode 100644 index 09bc4ae383c..00000000000 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ /dev/null @@ -1,123 +0,0 @@ -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class MergeCropLayerTest : public GPUDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - MergeCropLayerTest() - : blob_bottom_a_(new Blob()), - blob_bottom_b_(new Blob()), - blob_top_(new Blob()) { - } - - virtual void SetUp() { - blob_bottom_a_->Reshape(2, 3, 4, 2, Caffe::GetDefaultDeviceContext()); - blob_bottom_b_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); - // fill the values - blob_bottom_vec_.push_back(blob_bottom_a_); - blob_bottom_vec_.push_back(blob_bottom_b_); - blob_top_vec_.push_back(blob_top_); - } - - virtual ~MergeCropLayerTest() { - delete blob_bottom_a_; - delete blob_bottom_b_; - delete blob_top_; - } - - void TestForward() { - - int a_h = blob_bottom_a_->height(); - int a_w = blob_bottom_a_->width(); - int a_c = blob_bottom_a_->channels(); - - for (int n = 0; n < blob_bottom_a_->num(); ++n) { - for (int c = 0; c < a_c; ++c) { - for (int i = 0; i < a_h * a_w; ++i) { - blob_bottom_a_->mutable_cpu_data()[i + c * a_h * a_w - + n * a_h * a_w * a_c] = i + 100 * a_c; - } - } - } - - int b_h = blob_bottom_b_->height(); - int b_w = blob_bottom_b_->width(); - int b_c = blob_bottom_b_->channels(); - - for (int n = 0; n < blob_bottom_b_->num(); ++n) { - for (int c = 0; c < b_c; ++c) { - for (int i = 0; i < b_h * b_w; ++i) { - blob_bottom_b_->mutable_cpu_data()[i + c * b_h * b_w - + n * b_h * b_w * b_c] = -(i + 100 * b_c); - } - } - } - - LayerParameter layer_param; - MergeCropLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, blob_top_vec_); - - EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); - EXPECT_EQ( - this->blob_top_->channels(), - this->blob_bottom_a_->channels() + this->blob_bottom_b_->channels()); - EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_a_->height()); - EXPECT_EQ(this->blob_top_->width(), 2); - - layer.Forward(blob_bottom_vec_, blob_top_vec_); - - for (int i = 0; i < 5; i += 8) { - EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); - } - } - - void TestBackward() { - - } - - Blob* const blob_bottom_a_; - Blob* const blob_bottom_b_; - Blob* const blob_top_; - - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(MergeCropLayerTest, TestDtypesAndDevices); - -TYPED_TEST(MergeCropLayerTest, TestSetup){ -typedef typename TypeParam::Dtype Dtype; -LayerParameter layer_param; -MergeCropLayer layer(layer_param); -layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - -EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); -EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_a_->channels() + this->blob_bottom_b_->channels()); -EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_a_->height()); -EXPECT_EQ(this->blob_top_->width(), 2); -} - -TYPED_TEST(MergeCropLayerTest, TestForward){ -this->TestForward(); -} - -TYPED_TEST(MergeCropLayerTest, TestBackward){ -this->TestBackward(); -} - -} - // namespace caffe diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index c318e127350..b2db984feb1 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -19,8 +19,8 @@ template class MultinomialLogisticLossLayerTest : public CPUDeviceTest { protected: MultinomialLogisticLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_label_(new Blob(10, 1, 1, 1, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)), blob_top_loss_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_mvn_layer.cpp b/src/caffe/test/test_mvn_layer.cpp index 5682f87764c..933b4326417 100644 --- a/src/caffe/test/test_mvn_layer.cpp +++ b/src/caffe/test/test_mvn_layer.cpp @@ -18,7 +18,7 @@ class MVNLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: MVNLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 39d57ce2a2b..50b46954ad7 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -38,7 +38,7 @@ class NetTest : public MultiDeviceTest { const bool kReshape = true; for (int i = 0; i < net_blobs.size(); ++i) { (*blobs_copy)[i].reset(new Blob()); - (*blobs_copy)[i]->CopyFrom(*net_blobs[i], Caffe::GetDefaultDeviceContext(), copy_diff, kReshape); + (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); } } @@ -51,7 +51,7 @@ class NetTest : public MultiDeviceTest { const bool kReshape = true; for (int i = 0; i < net_params.size(); ++i) { (*params_copy)[i].reset(new Blob()); - (*params_copy)[i]->CopyFrom(*net_params[i], Caffe::GetDefaultDeviceContext(), copy_diff, kReshape); + (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); } } @@ -873,7 +873,7 @@ TYPED_TEST(NetTest, TestLossWeightMidNet) { const bool kCopyDiff = true; const bool kReshape = true; Blob data_grad; - data_grad.CopyFrom(*this->net_->blob_by_name("data"), Caffe::GetDefaultDeviceContext(), kCopyDiff, kReshape); + data_grad.CopyFrom(*this->net_->blob_by_name("data"), kCopyDiff, kReshape); // Check that the loss is non-trivial, otherwise the test doesn't prove much. const Dtype kMinLossAbsValue = 1e-2; ASSERT_GE(fabs(loss), kMinLossAbsValue); @@ -1115,8 +1115,8 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { Blob shared_params; const bool reshape = true; const bool copy_diff = false; - shared_params.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), copy_diff, reshape); - shared_params.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), !copy_diff, reshape); + shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); + shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); const int count = ip1_weights->count(); // Make sure the diffs are non-trivial. for (int i = 0; i < count; ++i) { @@ -1152,11 +1152,11 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { this->net_->Backward(); // Compute the expected update. Blob unshared_params1; - unshared_params1.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), copy_diff, reshape); - unshared_params1.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), !copy_diff, reshape); + unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape); + unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape); Blob unshared_params2; - unshared_params2.CopyFrom(*ip2_weights, Caffe::GetDefaultDeviceContext(), copy_diff, reshape); - unshared_params2.CopyFrom(*ip2_weights, Caffe::GetDefaultDeviceContext(), !copy_diff, reshape); + unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); + unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); // Make sure the diffs are non-trivial and sum to the diff in the shared net. for (int i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); @@ -1203,7 +1203,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { Blob shared_params; const bool kReshape = true; const bool kCopyDiff = false; - shared_params.CopyFrom(*ip1_weights, Caffe::GetDefaultDeviceContext(), kCopyDiff, kReshape); + shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); const int count = ip1_weights->count(); // Write the net to a NetParameter, as in Solver::Snapshot. @@ -1318,10 +1318,10 @@ TYPED_TEST(NetTest, TestFromTo) { // Run Forward and Backward, recording the data diff and loss. Blob data; - data.ReshapeLike(*this->net_->blob_by_name("data"), Caffe::GetDefaultDeviceContext()); + data.ReshapeLike(*this->net_->blob_by_name("data")); this->net_->ForwardPrefilled(); this->net_->Backward(); - data.CopyFrom(*this->net_->blob_by_name("data"), Caffe::GetDefaultDeviceContext(), true, true); + data.CopyFrom(*this->net_->blob_by_name("data"), true, true); const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data(); Dtype loss = *loss_ptr; @@ -2273,8 +2273,8 @@ TYPED_TEST(NetTest, TestReshape) { FillerParameter filler_param; filler_param.set_std(1); GaussianFiller filler(filler_param); - Blob blob1(4, 3, 9, 11, Caffe::GetDefaultDeviceContext()); - Blob blob2(2, 3, 12, 10, Caffe::GetDefaultDeviceContext()); + Blob blob1(4, 3, 9, 11); + Blob blob2(2, 3, 12, 10); filler.Fill(&blob1); filler.Fill(&blob2); @@ -2282,28 +2282,28 @@ TYPED_TEST(NetTest, TestReshape) { Blob* input_blob = this->net_->input_blobs()[0]; Blob* output_blob = this->net_->output_blobs()[0]; input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width(), Caffe::GetDefaultDeviceContext()); + blob1.width()); caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); // call backward just to make sure it runs this->net_->Backward(); Blob output1(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width(), Caffe::GetDefaultDeviceContext()); + output_blob->height(), output_blob->width()); caffe_copy(output1.count(), output_blob->cpu_data(), output1.mutable_cpu_data()); input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width(), Caffe::GetDefaultDeviceContext()); + blob2.width()); caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); Blob output2(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width(), Caffe::GetDefaultDeviceContext()); + output_blob->height(), output_blob->width()); caffe_copy(output2.count(), output_blob->cpu_data(), output2.mutable_cpu_data()); input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width(), Caffe::GetDefaultDeviceContext()); + blob1.width()); caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); @@ -2312,7 +2312,7 @@ TYPED_TEST(NetTest, TestReshape) { } input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width(), Caffe::GetDefaultDeviceContext()); + blob2.width()); caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c1651d8dc3b..50ae7b81c15 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -21,7 +21,7 @@ class NeuronLayerTest : public MultiDeviceTest { protected: NeuronLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values @@ -605,7 +605,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { shared_ptr > blob_top_2(new Blob()); blob_bottom_vec_2.push_back(blob_bottom_2.get()); blob_top_vec_2.push_back(blob_top_2.get()); - blob_bottom_2->CopyFrom(*this->blob_bottom_, Caffe::GetDefaultDeviceContext(), false, true); + blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); // SetUp layers prelu.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); relu.SetUp(blob_bottom_vec_2, blob_top_vec_2); @@ -617,7 +617,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { } // Check backward shared_ptr > tmp_blob(new Blob()); - tmp_blob->ReshapeLike(*blob_top_2.get(), Caffe::GetDefaultDeviceContext()); + tmp_blob->ReshapeLike(*blob_top_2.get()); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(tmp_blob.get()); @@ -657,7 +657,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { blob_bottom_vec_2.push_back(blob_bottom_2.get()); blob_middle_vec_2.push_back(blob_middle_2.get()); blob_top_vec_2.push_back(blob_top_2.get()); - blob_bottom_2->CopyFrom(*this->blob_bottom_, Caffe::GetDefaultDeviceContext(), false, true); + blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true); // SetUp layers ip.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); prelu.SetUp(this->blob_top_vec_, this->blob_top_vec_); @@ -677,7 +677,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { } // Fill top diff with random numbers shared_ptr > tmp_blob(new Blob()); - tmp_blob->ReshapeLike(*blob_top_2.get(), Caffe::GetDefaultDeviceContext()); + tmp_blob->ReshapeLike(*blob_top_2.get()); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(tmp_blob.get()); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 725e8f27d73..69f2d5c1135 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -24,7 +24,7 @@ class PoolingLayerTest : public MultiDeviceTest { blob_top_mask_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(2, 3, 6, 5); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); @@ -50,7 +50,7 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; - blob_bottom_->Reshape(num, channels, 3, 5, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(num, channels, 3, 5); // Input: 2x 2 channels of: // [1 2 5 2 3] // [9 4 1 4 8] @@ -123,7 +123,7 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; - blob_bottom_->Reshape(num, channels, 6, 6, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(num, channels, 6, 6); // Input: 2x 2 channels of: // [35 1 6 26 19 24] // [ 3 32 7 21 23 25] @@ -248,7 +248,7 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; - blob_bottom_->Reshape(num, channels, 6, 6, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(num, channels, 6, 6); // Input: 2x 2 channels of: // [35 1 6 26 19 24] // [ 3 32 7 21 23 25] @@ -480,7 +480,7 @@ TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) { pooling_param->set_stride(2); pooling_param->set_pad(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - this->blob_bottom_->Reshape(1, 1, 3, 3, Caffe::GetDefaultDeviceContext()); + this->blob_bottom_->Reshape(1, 1, 3, 3); // Input: // [ 1 2 4 ] // [ 2 3 2 ] @@ -545,7 +545,7 @@ TYPED_TEST(PoolingLayerTest, TestForwardAve) { pooling_param->set_stride(1); pooling_param->set_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); - this->blob_bottom_->Reshape(1, 1, 3, 3, Caffe::GetDefaultDeviceContext()); + this->blob_bottom_->Reshape(1, 1, 3, 3); FillerParameter filler_param; filler_param.set_value(Dtype(2)); ConstantFiller filler(filler_param); diff --git a/src/caffe/test/test_power_layer.cpp b/src/caffe/test/test_power_layer.cpp index 68c2d76c30e..76c9e857f36 100644 --- a/src/caffe/test/test_power_layer.cpp +++ b/src/caffe/test/test_power_layer.cpp @@ -19,7 +19,7 @@ class PowerLayerTest : public MultiDeviceTest { protected: PowerLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_reduction_layer.cpp b/src/caffe/test/test_reduction_layer.cpp index 378b27960c4..f568a18089a 100644 --- a/src/caffe/test/test_reduction_layer.cpp +++ b/src/caffe/test/test_reduction_layer.cpp @@ -19,7 +19,7 @@ class ReductionLayerTest : public MultiDeviceTest { protected: ReductionLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { // fill the values Caffe::set_random_seed(1701); diff --git a/src/caffe/test/test_reshape_layer.cpp b/src/caffe/test/test_reshape_layer.cpp index d1d608a14be..9d08ec60d4e 100644 --- a/src/caffe/test/test_reshape_layer.cpp +++ b/src/caffe/test/test_reshape_layer.cpp @@ -18,7 +18,7 @@ class ReshapeLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: ReshapeLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; @@ -251,7 +251,7 @@ TYPED_TEST(ReshapeLayerTest, TestForwardAfterReshape) { // We know the above produced the correct result from TestForward. // Reshape the bottom and call layer.Reshape, then try again. vector new_bottom_shape(1, 2 * 3 * 6 * 5); - this->blob_bottom_->Reshape(new_bottom_shape, Caffe::GetDefaultDeviceContext()); + this->blob_bottom_->Reshape(new_bottom_shape); layer.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp index a4c286210a6..e5737e43f6e 100644 --- a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -21,8 +21,8 @@ class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest { protected: SigmoidCrossEntropyLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), - blob_bottom_targets_(new Blob(10, 5, 1, 1, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_targets_(new Blob(10, 5, 1, 1)), blob_top_loss_(new Blob()) { // Fill the data vector FillerParameter data_filler_param; diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index a1b7ac9beb8..ccd03646d19 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -19,7 +19,7 @@ class SliceLayerTest : public MultiDeviceTest { protected: SliceLayerTest() - : blob_bottom_(new Blob(6, 12, 2, 3, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(6, 12, 2, 3)), blob_top_0_(new Blob()), blob_top_1_(new Blob()), blob_top_2_(new Blob()) {} @@ -38,7 +38,7 @@ class SliceLayerTest : public MultiDeviceTest { } virtual void ReduceBottomBlobSize() { - blob_bottom_->Reshape(4, 5, 2, 2, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(4, 5, 2, 2); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 435a46007de..996da4b8f7c 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -19,7 +19,7 @@ class SoftmaxLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: SoftmaxLayerTest() - : blob_bottom_(new Blob(2, 10, 2, 3, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 10, 2, 3)), blob_top_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp index 318c1ddcbda..1498d5c5ce1 100644 --- a/src/caffe/test/test_softmax_with_loss_layer.cpp +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -24,8 +24,8 @@ class SoftmaxWithLossLayerTest : public MultiDeviceTest { protected: SoftmaxWithLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 2, 3, Caffe::GetDefaultDeviceContext())), - blob_bottom_label_(new Blob(10, 1, 2, 3, Caffe::GetDefaultDeviceContext())), + : blob_bottom_data_(new Blob(10, 5, 2, 3)), + blob_bottom_label_(new Blob(10, 1, 2, 3)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp index 5cbe6e6ba80..be5204bfc3e 100644 --- a/src/caffe/test/test_split_layer.cpp +++ b/src/caffe/test/test_split_layer.cpp @@ -23,7 +23,7 @@ class SplitLayerTest : public MultiDeviceTest { protected: SplitLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_a_(new Blob()), blob_top_b_(new Blob()) { // fill the values diff --git a/src/caffe/test/test_spp_layer.cpp b/src/caffe/test/test_spp_layer.cpp index 95949510635..b2585f1a5fa 100644 --- a/src/caffe/test/test_spp_layer.cpp +++ b/src/caffe/test/test_spp_layer.cpp @@ -26,9 +26,9 @@ class SPPLayerTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 3, 9, 8, Caffe::GetDefaultDeviceContext()); - blob_bottom_2_->Reshape(4, 3, 1024, 765, Caffe::GetDefaultDeviceContext()); - blob_bottom_3_->Reshape(10, 3, 7, 7, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(2, 3, 9, 8); + blob_bottom_2_->Reshape(4, 3, 1024, 765); + blob_bottom_3_->Reshape(10, 3, 7, 7); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index d7ff1236af1..f84464c322c 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -26,7 +26,7 @@ class StochasticPoolingLayerTest : public MultiDeviceTest { blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); - blob_bottom_->Reshape(2, 3, 6, 5, Caffe::GetDefaultDeviceContext()); + blob_bottom_->Reshape(2, 3, 6, 5); // fill the values FillerParameter filler_param; filler_param.set_min(0.1); diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp index 5e87c620c12..5dc92832fc8 100644 --- a/src/caffe/test/test_tanh_layer.cpp +++ b/src/caffe/test/test_tanh_layer.cpp @@ -33,7 +33,7 @@ class TanHLayerTest : public MultiDeviceTest { protected: TanHLayerTest() - : blob_bottom_(new Blob(2, 3, 4, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { Caffe::set_random_seed(1701); FillerParameter filler_param; diff --git a/src/caffe/test/test_threshold_layer.cpp b/src/caffe/test/test_threshold_layer.cpp index b4c21c95463..05ce82120e6 100644 --- a/src/caffe/test/test_threshold_layer.cpp +++ b/src/caffe/test/test_threshold_layer.cpp @@ -16,7 +16,7 @@ class ThresholdLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: ThresholdLayerTest() - : blob_bottom_(new Blob(2, 3, 6, 5, Caffe::GetDefaultDeviceContext())), + : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { Caffe::set_random_seed(1701); // fill the values diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 3d497f7bb71..55de67772f0 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -59,7 +59,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { } // Test when we have a transposed A - A.Reshape(1, 1, 3, 2, Caffe::GetDefaultDeviceContext()); + A.Reshape(1, 1, 3, 2); caffe_cpu_copy(6, A_reshape_data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -84,7 +84,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { } // Test when we have a transposed A and a transposed B too - B.Reshape(1, 1, 4, 3, Caffe::GetDefaultDeviceContext()); + B.Reshape(1, 1, 4, 3); caffe_cpu_copy(12, B_reshape_data, B.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -109,7 +109,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { } // Test when we have a transposed B - A.Reshape(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); + A.Reshape(1, 1, 2, 3); caffe_cpu_copy(6, data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -166,7 +166,7 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { } else { #ifdef USE_GREENTEA greentea_gpu_gemv(dc.id(), CblasNoTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, - (cl_mem)(y.gpu_data()),0, 0., (cl_mem)(x.mutable_gpu_data()),0); + (cl_mem)(x.gpu_data()),0, 0., (cl_mem)(y.mutable_gpu_data()),0); #endif // USE_GREENTEA } diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index d7b7aadf3df..fcece954f54 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -257,7 +257,7 @@ void hdf5_load_nd_dataset_helper( for (int i = 0; i < dims.size(); ++i) { blob_dims[i] = dims[i]; } - blob->Reshape(blob_dims,blob->device_context()); + blob->Reshape(blob_dims); } template <> diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index c5c26b34758..8c92738c1a5 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -261,7 +261,7 @@ double caffe_nextafter(const double b); void caffe_rng_uniform(const int n, unsigned int* r) { CHECK_GE(n, 0); CHECK(r); - boost::uniform_int random_distribution(0, UINT32_MAX); + boost::uniform_int random_distribution(INT32_MIN, INT32_MAX); boost::variate_generator> variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { From 9812e5926cf4e17a2377db6c87e5500cc1eb1c57 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 16 Jun 2015 04:44:23 +0200 Subject: [PATCH 052/254] Cleanup --- .goutputstream-L9KQZX | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 .goutputstream-L9KQZX diff --git a/.goutputstream-L9KQZX b/.goutputstream-L9KQZX deleted file mode 100644 index 2ee200be18a..00000000000 --- a/.goutputstream-L9KQZX +++ /dev/null @@ -1,21 +0,0 @@ -GREENTEA BUILDING BLOCKS: - -viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); -viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); - -viennacl::ocl::kernel &oclk_kernel = program.get_kernel( - CL_KERNEL_SELECT("kernel")); -viennacl::ocl::enqueue( - oclk_max_pool_forward(WrapHandle((cl_mem) data, ctx)), - ctx.get_queue()); -ctx.get_queue().finish(); - -if (this->device_context_.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA -#endif // USE_GREENTEA -} From 22bb9632d6c7057e9ecc73449a21e15203179848 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 16 Jun 2015 15:09:02 +0200 Subject: [PATCH 053/254] OpenCL passes all Caffe tests. --- examples/cpp_classification/classification.cpp | 4 ++-- src/caffe/greentea/cl_kernels.cpp | 4 ++-- src/caffe/greentea/cl_kernels/math.cl | 6 +++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/examples/cpp_classification/classification.cpp b/examples/cpp_classification/classification.cpp index f46dc491fa2..1c6371e382b 100644 --- a/examples/cpp_classification/classification.cpp +++ b/examples/cpp_classification/classification.cpp @@ -118,7 +118,7 @@ void Classifier::SetMean(const string& mean_file) { /* Convert from BlobProto to Blob */ Blob mean_blob; - mean_blob.FromProto(blob_proto, Caffe::GetDefaultDeviceContext()); + mean_blob.FromProto(blob_proto); CHECK_EQ(mean_blob.channels(), num_channels_) << "Number of channels of mean file doesn't match input layer."; @@ -145,7 +145,7 @@ void Classifier::SetMean(const string& mean_file) { std::vector Classifier::Predict(const cv::Mat& img) { Blob* input_layer = net_->input_blobs()[0]; input_layer->Reshape(1, num_channels_, - input_geometry_.height, input_geometry_.width, Caffe::GetDefaultDeviceContext()); + input_geometry_.height, input_geometry_.width); /* Forward dimension change to all layers. */ net_->Reshape(); diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 7a2b3b14837..b4a5801f8e2 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -19,7 +19,7 @@ std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; @@ -38,7 +38,7 @@ std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.c std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index 7ac9ea4424a..63ceba5fe41 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -77,7 +77,11 @@ __kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, __global Dtype* y, const int offy) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - y[offy + index] = pow(a[offa + index], alpha); + if(a[offa + index] < 0 && alpha < 1 && alpha > -1) { + y[offy + index] = NAN; + } else { + y[offy + index] = pow(a[offa + index], alpha); + } } } From 89a1750eca7cf8ed5deb66d06bb3e2123d3ec3a4 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 16 Jun 2015 19:45:30 +0200 Subject: [PATCH 054/254] Fixed all CPU-OpenCL-hybrid errors. All OpenCL kernels are now thread count independent. --- src/caffe/greentea/cl_kernels.cpp | 4 +-- src/caffe/greentea/cl_kernels/pooling.cl | 5 +-- src/caffe/layers/pooling_layer.cu | 9 +----- .../test/test_contrastive_loss_layer.cpp | 4 +-- src/caffe/test/test_lrn_layer.cpp | 2 +- .../test/test_maxpool_dropout_layers.cpp | 2 -- src/caffe/test/test_neuron_layer.cpp | 31 ++++++++++--------- 7 files changed, 25 insertions(+), 32 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index b4a5801f8e2..1293784e31b 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -21,7 +21,7 @@ std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\ std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; @@ -40,7 +40,7 @@ std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index 041d8ded4a3..4e0f2a0ca10 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -79,7 +79,7 @@ __kernel void TEMPLATE(ave_pool_forward,Dtype)( } __kernel void TEMPLATE(sto_pool_forward_train,Dtype)( - const int nthreads, __global const Dtype* const bottom_data, const int num, + const int nthreads, __global const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, @@ -113,7 +113,8 @@ __kernel void TEMPLATE(sto_pool_forward_train,Dtype)( if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; - return; + h = hend; + w = wend; } } } diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index e19865d2936..f9f4cd7b814 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -257,7 +257,6 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, WrapHandle((cl_mem) mask, ctx), WrapHandle((cl_mem) top_mask, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; case PoolingParameter_PoolMethod_AVE: { @@ -268,7 +267,6 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, WrapHandle((cl_mem)top_data,ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; case PoolingParameter_PoolMethod_STOCHASTIC: { @@ -282,9 +280,8 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data,ctx), bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, - stride_h_, stride_w_, WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()),ctx), WrapHandle((cl_mem)(top_data),ctx)), + stride_h_, stride_w_, WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()),ctx), WrapHandle((cl_mem)top_data,ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } else { viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( CL_KERNEL_SELECT("sto_pool_forward_test")); @@ -293,7 +290,6 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, WrapHandle((cl_mem)top_data,ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } } break; @@ -516,7 +512,6 @@ void PoolingLayer::Backward_gpu(const vector*>& top, pad_w_, WrapHandle((cl_mem) bottom_diff, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; case PoolingParameter_PoolMethod_AVE: { @@ -530,7 +525,6 @@ void PoolingLayer::Backward_gpu(const vector*>& top, pad_w_, WrapHandle((cl_mem) bottom_diff, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; case PoolingParameter_PoolMethod_STOCHASTIC: { @@ -544,7 +538,6 @@ void PoolingLayer::Backward_gpu(const vector*>& top, kernel_w_, stride_h_, stride_w_, WrapHandle((cl_mem) bottom_diff, ctx)), ctx.get_queue()); - ctx.get_queue().finish(); } break; default: { diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 1e9447cbc51..359e49b0419 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -84,7 +84,7 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { } } loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-5); } TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { @@ -126,7 +126,7 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { } } loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-5); } TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index c4e2f8ea7f2..13bb9b6b287 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -23,7 +23,7 @@ class LRNLayerTest : public MultiDeviceTest { protected: LRNLayerTest() - : epsilon_(Dtype(1e-5)), + : epsilon_(Dtype(1e-3)), blob_bottom_(new Blob()), blob_top_(new Blob()) {} virtual void SetUp() { diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index 45218c9eea7..611d9790863 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -121,8 +121,6 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { for (int i = 0; i < this->blob_bottom_->count(); ++i) { sum_with_dropout += bottom_diff[i]; } - // REMOVE: - std::cout << "SUM: " << sum_with_dropout << std::endl; EXPECT_GE(sum_with_dropout, sum); } diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 50ae7b81c15..c7259662d8f 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -77,7 +77,7 @@ class NeuronLayerTest : public MultiDeviceTest { ExpLayer layer(layer_param); layer.SetUp(blob_bottom_vec_, blob_top_vec_); layer.Forward(blob_bottom_vec_, blob_top_vec_); - const Dtype kDelta = 2e-4; + const Dtype kDelta = 2e-2; const Dtype* bottom_data = blob_bottom_->cpu_data(); const Dtype* top_data = blob_top_->cpu_data(); for (int i = 0; i < blob_bottom_->count(); ++i) { @@ -97,7 +97,7 @@ class NeuronLayerTest : public MultiDeviceTest { layer_param.mutable_exp_param()->set_scale(scale); layer_param.mutable_exp_param()->set_shift(shift); ExpLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); + GradientChecker checker(1e-2, 1e-2); checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); } @@ -135,7 +135,7 @@ class NeuronLayerTest : public MultiDeviceTest { LogLayer layer(layer_param); layer.SetUp(blob_bottom_vec_, blob_top_vec_); layer.Forward(blob_bottom_vec_, blob_top_vec_); - const Dtype kDelta = 2e-4; + const Dtype kDelta = 2e-3; const Dtype* bottom_data = blob_bottom_->cpu_data(); const Dtype* top_data = blob_top_->cpu_data(); for (int i = 0; i < blob_bottom_->count(); ++i) { @@ -157,7 +157,7 @@ class NeuronLayerTest : public MultiDeviceTest { layer_param.mutable_log_param()->set_scale(scale); layer_param.mutable_log_param()->set_shift(shift); LogLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); + GradientChecker checker(1e-2, 1e-1); checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); } }; @@ -182,7 +182,7 @@ TYPED_TEST(NeuronLayerTest, TestAbsGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; AbsValLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -206,7 +206,7 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ReLULayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -237,7 +237,7 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientWithNegativeSlope) { CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); ReLULayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -251,8 +251,9 @@ TYPED_TEST(NeuronLayerTest, TestSigmoid) { // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype kDelta = 2e-3; for (int i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + EXPECT_NEAR(top_data[i], 1. / (1 + exp(-bottom_data[i])),kDelta); // check that we squashed the value between 0 and 1 EXPECT_GE(top_data[i], 0.); EXPECT_LE(top_data[i], 1.); @@ -263,7 +264,7 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; SigmoidLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -295,7 +296,7 @@ TYPED_TEST(NeuronLayerTest, TestTanHGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; TanHLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); + GradientChecker checker(1e-2, 1e-2); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -496,7 +497,7 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradient) { LayerParameter layer_param; layer_param.set_phase(TRAIN); DropoutLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); + GradientChecker checker(1e-2, 1e-2); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -506,7 +507,7 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) { LayerParameter layer_param; layer_param.set_phase(TEST); DropoutLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); + GradientChecker checker(1e-2, 1e-2); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -530,7 +531,7 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; BNLLLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); + GradientChecker checker(1e-2, 1e-2); checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -575,7 +576,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUGradient) { FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(layer.blobs()[0].get()); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } @@ -586,7 +587,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUGradientChannelShared) { layer_param.mutable_prelu_param()->set_channel_shared(true); PReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } From c6d2f2f84f29d65bfdab1a81ccd15c0abcd02a4b Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 17 Jun 2015 00:49:03 +0200 Subject: [PATCH 055/254] Fixed OpenCL-only build. --- Makefile | 7 +- Makefile.config.example | 4 +- absval_layer.d | 339 +++++++++++++++++++ base_data_layer.d | 326 +++++++++++++++++++ bnll_layer.d | 339 +++++++++++++++++++ concat_layer.d | 339 +++++++++++++++++++ contrastive_loss_layer.d | 342 ++++++++++++++++++++ conv_layer.d | 342 ++++++++++++++++++++ include/caffe/greentea/greentea.hpp | 2 +- include/caffe/test/test_caffe_main.hpp | 2 +- include/caffe/util/math_functions.hpp | 4 +- src/caffe/common.cpp | 24 +- src/caffe/layers/absval_layer.cu | 2 +- src/caffe/layers/relu_layer.cu | 4 + src/caffe/layers/silence_layer.cu | 2 + src/caffe/layers/softmax_loss_layer.cu | 4 + src/caffe/syncedmem.cpp | 2 + src/caffe/test/test_caffe_main.cpp | 14 +- src/caffe/test/test_im2col_kernel.cu | 61 ++-- src/caffe/test/test_inner_product_layer.cpp | 78 ++--- src/caffe/test/test_math_functions.cpp | 2 + src/caffe/test/test_platform.cpp | 4 +- src/caffe/test/test_softmax_layer.cpp | 2 +- src/caffe/test/test_util_blas.cpp | 229 +++++++------ src/caffe/util/math_functions.cu | 3 + 25 files changed, 2240 insertions(+), 237 deletions(-) create mode 100644 absval_layer.d create mode 100644 base_data_layer.d create mode 100644 bnll_layer.d create mode 100644 concat_layer.d create mode 100644 contrastive_loss_layer.d create mode 100644 conv_layer.d diff --git a/Makefile b/Makefile index c2727288d58..cff06c890fd 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,11 @@ $(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) endif include $(CONFIG_FILE) +ifeq ($(CPU_ONLY),1) + USE_CUDA := 0 + USE_GREENTEA := 0 +endif + CXXFLAGS += -std=c++11 -Wno-deprecated-declarations LINKFLAGS += -std=c++11 -Wno-deprecated-declarations NVCCFLAGS += -Xcompiler "-Wno-deprecated-declarations" -Xlinker "-Wno-deprecated-declarations" -Xarchive "-Wno-deprecated-declarations" -Xnvlink "-Wno-deprecated-declarations" @@ -609,7 +614,7 @@ ifeq ($(USE_CUDA), 1) @ cat $@.$(WARNS_EXT) else @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -x c++ -c $< -o $@ 2> $@.$(WARNS_EXT) \ + $(Q)$(CXX) $(CXXFLAGS) -c -x c++ $< -o $@ 2> $@.$(WARNS_EXT) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) endif diff --git a/Makefile.config.example b/Makefile.config.example index dbcba5c99bf..ce2df4fceb7 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -15,10 +15,12 @@ VIENNACL_DIR = ../ViennaCL # Either set CLBLAS or VIENNACLBLAS to 1, not both. # If you want to use OpenCL/Greentea on the CPU only, you can also disable both. # When both are disabled, GPUs won't work. CPUs always use CBLAS (Atlas, MKL or OpenBLAS). +# The chosen BLAS library needs to be compiled and installed from source. +# CLBLAS should be faster, especially on AMD cards. USE_CLBLAS := 1 USE_VIENNACLBLAS := 0 -# Enable double precision support for OpenCL/Greentea +# Enable or disable double precision support for OpenCL/Greentea GREENTEA_DOUBLE_SUPPORT := 1 # cuDNN acceleration switch (uncomment to build with cuDNN). diff --git a/absval_layer.d b/absval_layer.d new file mode 100644 index 00000000000..aac7a03e07a --- /dev/null +++ b/absval_layer.d @@ -0,0 +1,339 @@ +absval_layer.o: src/caffe/layers/absval_layer.cu include/caffe/layer.hpp \ + include/caffe/blob.hpp include/caffe/common.hpp \ + include/caffe/util/device_alternate.hpp \ + include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ + /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ + ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ + ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ + ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ + ../ViennaCL/viennacl/tools/shared_ptr.hpp \ + ../ViennaCL/viennacl/ocl/device.hpp \ + ../ViennaCL/viennacl/ocl/device_utils.hpp \ + ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ + ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ + ../ViennaCL/viennacl/ocl/platform.hpp \ + ../ViennaCL/viennacl/ocl/command_queue.hpp \ + ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ + ../ViennaCL/viennacl/ocl/enqueue.hpp \ + ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ + ../ViennaCL/viennacl/detail/vector_def.hpp \ + ../ViennaCL/viennacl/tools/entry_proxy.hpp \ + ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ + ../ViennaCL/viennacl/backend/mem_handle.hpp \ + ../ViennaCL/viennacl/backend/cpu_ram.hpp \ + ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ + ../ViennaCL/viennacl/traits/context.hpp \ + ../ViennaCL/viennacl/backend/util.hpp \ + ../ViennaCL/viennacl/meta/result_of.hpp \ + ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ + ../ViennaCL/viennacl/tools/tools.hpp \ + ../ViennaCL/viennacl/tools/adapter.hpp \ + ../ViennaCL/viennacl/meta/predicate.hpp \ + ../ViennaCL/viennacl/traits/size.hpp \ + ../ViennaCL/viennacl/traits/start.hpp \ + ../ViennaCL/viennacl/traits/stride.hpp \ + ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ + ../ViennaCL/viennacl/ocl/utils.hpp \ + ../ViennaCL/viennacl/linalg/opencl/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ + ../ViennaCL/viennacl/scheduler/preset.hpp \ + ../ViennaCL/viennacl/device_specific/forwards.h \ + ../ViennaCL/viennacl/scheduler/io.hpp \ + ../ViennaCL/viennacl/scheduler/forwards.h \ + ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ + ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ + ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ + ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ + ../ViennaCL/viennacl/device_specific/utils.hpp \ + ../ViennaCL/viennacl/detail/matrix_def.hpp \ + ../ViennaCL/viennacl/traits/row_major.hpp \ + ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ + ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ + ../ViennaCL/viennacl/slice.hpp \ + ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ + ../ViennaCL/viennacl/linalg/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ + ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ + ../ViennaCL/viennacl/vector_proxy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ + .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ + include/caffe/util/math_functions.hpp \ + include/caffe/util/mkl_alternate.hpp \ + include/caffe/greentea/greentea_math_functions.hpp \ + include/caffe/layer_factory.hpp include/caffe/vision_layers.hpp \ + include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ + include/caffe/data_transformer.hpp include/caffe/filler.hpp \ + include/caffe/internal_thread.hpp include/caffe/util/db.hpp \ + include/caffe/loss_layers.hpp include/caffe/neuron_layers.hpp \ + include/caffe/greentea/greentea_im2col.hpp + +include/caffe/layer.hpp: + +include/caffe/blob.hpp: + +include/caffe/common.hpp: + +include/caffe/util/device_alternate.hpp: + +include/caffe/greentea/greentea.hpp: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: + +../ViennaCL/viennacl/ocl/context.hpp: + +../ViennaCL/viennacl/ocl/forwards.h: + +../ViennaCL/viennacl/ocl/handle.hpp: + +../ViennaCL/viennacl/ocl/error.hpp: + +../ViennaCL/viennacl/ocl/kernel.hpp: + +../ViennaCL/viennacl/ocl/program.hpp: + +../ViennaCL/viennacl/tools/shared_ptr.hpp: + +../ViennaCL/viennacl/ocl/device.hpp: + +../ViennaCL/viennacl/ocl/device_utils.hpp: + +../ViennaCL/viennacl/forwards.h: + +../ViennaCL/viennacl/meta/enable_if.hpp: + +../ViennaCL/viennacl/version.hpp: + +../ViennaCL/viennacl/ocl/local_mem.hpp: + +../ViennaCL/viennacl/ocl/platform.hpp: + +../ViennaCL/viennacl/ocl/command_queue.hpp: + +../ViennaCL/viennacl/tools/sha1.hpp: + +../ViennaCL/viennacl/ocl/backend.hpp: + +../ViennaCL/viennacl/ocl/enqueue.hpp: + +../ViennaCL/viennacl/backend/opencl.hpp: + +../ViennaCL/viennacl/vector.hpp: + +../ViennaCL/viennacl/detail/vector_def.hpp: + +../ViennaCL/viennacl/tools/entry_proxy.hpp: + +../ViennaCL/viennacl/scalar.hpp: + +../ViennaCL/viennacl/backend/memory.hpp: + +../ViennaCL/viennacl/backend/mem_handle.hpp: + +../ViennaCL/viennacl/backend/cpu_ram.hpp: + +../ViennaCL/viennacl/context.hpp: + +../ViennaCL/viennacl/traits/handle.hpp: + +../ViennaCL/viennacl/traits/context.hpp: + +../ViennaCL/viennacl/backend/util.hpp: + +../ViennaCL/viennacl/meta/result_of.hpp: + +../ViennaCL/viennacl/linalg/scalar_operations.hpp: + +../ViennaCL/viennacl/tools/tools.hpp: + +../ViennaCL/viennacl/tools/adapter.hpp: + +../ViennaCL/viennacl/meta/predicate.hpp: + +../ViennaCL/viennacl/traits/size.hpp: + +../ViennaCL/viennacl/traits/start.hpp: + +../ViennaCL/viennacl/traits/stride.hpp: + +../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: + +../ViennaCL/viennacl/ocl/utils.hpp: + +../ViennaCL/viennacl/linalg/opencl/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: + +../ViennaCL/viennacl/scheduler/preset.hpp: + +../ViennaCL/viennacl/device_specific/forwards.h: + +../ViennaCL/viennacl/scheduler/io.hpp: + +../ViennaCL/viennacl/scheduler/forwards.h: + +../ViennaCL/viennacl/device_specific/execution_handler.hpp: + +../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: + +../ViennaCL/viennacl/device_specific/templates/template_base.hpp: + +../ViennaCL/viennacl/device_specific/mapped_objects.hpp: + +../ViennaCL/viennacl/device_specific/utils.hpp: + +../ViennaCL/viennacl/detail/matrix_def.hpp: + +../ViennaCL/viennacl/traits/row_major.hpp: + +../ViennaCL/viennacl/device_specific/tree_parsing.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: + +../ViennaCL/viennacl/matrix_proxy.hpp: + +../ViennaCL/viennacl/range.hpp: + +../ViennaCL/viennacl/slice.hpp: + +../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/utils.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: + +../ViennaCL/viennacl/linalg/detail/op_executor.hpp: + +../ViennaCL/viennacl/linalg/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/detail/op_applier.hpp: + +../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: + +../ViennaCL/viennacl/vector_proxy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: + +.build_release/src/caffe/proto/caffe.pb.h: + +include/caffe/syncedmem.hpp: + +include/caffe/util/math_functions.hpp: + +include/caffe/util/mkl_alternate.hpp: + +include/caffe/greentea/greentea_math_functions.hpp: + +include/caffe/layer_factory.hpp: + +include/caffe/vision_layers.hpp: + +include/caffe/common_layers.hpp: + +include/caffe/data_layers.hpp: + +include/caffe/data_transformer.hpp: + +include/caffe/filler.hpp: + +include/caffe/internal_thread.hpp: + +include/caffe/util/db.hpp: + +include/caffe/loss_layers.hpp: + +include/caffe/neuron_layers.hpp: + +include/caffe/greentea/greentea_im2col.hpp: diff --git a/base_data_layer.d b/base_data_layer.d new file mode 100644 index 00000000000..d859800a89f --- /dev/null +++ b/base_data_layer.d @@ -0,0 +1,326 @@ +base_data_layer.o: src/caffe/layers/base_data_layer.cu \ + include/caffe/data_layers.hpp include/caffe/blob.hpp \ + include/caffe/common.hpp include/caffe/util/device_alternate.hpp \ + include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ + /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ + ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ + ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ + ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ + ../ViennaCL/viennacl/tools/shared_ptr.hpp \ + ../ViennaCL/viennacl/ocl/device.hpp \ + ../ViennaCL/viennacl/ocl/device_utils.hpp \ + ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ + ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ + ../ViennaCL/viennacl/ocl/platform.hpp \ + ../ViennaCL/viennacl/ocl/command_queue.hpp \ + ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ + ../ViennaCL/viennacl/ocl/enqueue.hpp \ + ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ + ../ViennaCL/viennacl/detail/vector_def.hpp \ + ../ViennaCL/viennacl/tools/entry_proxy.hpp \ + ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ + ../ViennaCL/viennacl/backend/mem_handle.hpp \ + ../ViennaCL/viennacl/backend/cpu_ram.hpp \ + ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ + ../ViennaCL/viennacl/traits/context.hpp \ + ../ViennaCL/viennacl/backend/util.hpp \ + ../ViennaCL/viennacl/meta/result_of.hpp \ + ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ + ../ViennaCL/viennacl/tools/tools.hpp \ + ../ViennaCL/viennacl/tools/adapter.hpp \ + ../ViennaCL/viennacl/meta/predicate.hpp \ + ../ViennaCL/viennacl/traits/size.hpp \ + ../ViennaCL/viennacl/traits/start.hpp \ + ../ViennaCL/viennacl/traits/stride.hpp \ + ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ + ../ViennaCL/viennacl/ocl/utils.hpp \ + ../ViennaCL/viennacl/linalg/opencl/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ + ../ViennaCL/viennacl/scheduler/preset.hpp \ + ../ViennaCL/viennacl/device_specific/forwards.h \ + ../ViennaCL/viennacl/scheduler/io.hpp \ + ../ViennaCL/viennacl/scheduler/forwards.h \ + ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ + ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ + ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ + ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ + ../ViennaCL/viennacl/device_specific/utils.hpp \ + ../ViennaCL/viennacl/detail/matrix_def.hpp \ + ../ViennaCL/viennacl/traits/row_major.hpp \ + ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ + ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ + ../ViennaCL/viennacl/slice.hpp \ + ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ + ../ViennaCL/viennacl/linalg/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ + ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ + ../ViennaCL/viennacl/vector_proxy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ + .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ + include/caffe/util/math_functions.hpp \ + include/caffe/util/mkl_alternate.hpp \ + include/caffe/greentea/greentea_math_functions.hpp \ + include/caffe/data_transformer.hpp include/caffe/filler.hpp \ + include/caffe/internal_thread.hpp include/caffe/layer.hpp \ + include/caffe/layer_factory.hpp include/caffe/util/db.hpp + +include/caffe/data_layers.hpp: + +include/caffe/blob.hpp: + +include/caffe/common.hpp: + +include/caffe/util/device_alternate.hpp: + +include/caffe/greentea/greentea.hpp: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: + +../ViennaCL/viennacl/ocl/context.hpp: + +../ViennaCL/viennacl/ocl/forwards.h: + +../ViennaCL/viennacl/ocl/handle.hpp: + +../ViennaCL/viennacl/ocl/error.hpp: + +../ViennaCL/viennacl/ocl/kernel.hpp: + +../ViennaCL/viennacl/ocl/program.hpp: + +../ViennaCL/viennacl/tools/shared_ptr.hpp: + +../ViennaCL/viennacl/ocl/device.hpp: + +../ViennaCL/viennacl/ocl/device_utils.hpp: + +../ViennaCL/viennacl/forwards.h: + +../ViennaCL/viennacl/meta/enable_if.hpp: + +../ViennaCL/viennacl/version.hpp: + +../ViennaCL/viennacl/ocl/local_mem.hpp: + +../ViennaCL/viennacl/ocl/platform.hpp: + +../ViennaCL/viennacl/ocl/command_queue.hpp: + +../ViennaCL/viennacl/tools/sha1.hpp: + +../ViennaCL/viennacl/ocl/backend.hpp: + +../ViennaCL/viennacl/ocl/enqueue.hpp: + +../ViennaCL/viennacl/backend/opencl.hpp: + +../ViennaCL/viennacl/vector.hpp: + +../ViennaCL/viennacl/detail/vector_def.hpp: + +../ViennaCL/viennacl/tools/entry_proxy.hpp: + +../ViennaCL/viennacl/scalar.hpp: + +../ViennaCL/viennacl/backend/memory.hpp: + +../ViennaCL/viennacl/backend/mem_handle.hpp: + +../ViennaCL/viennacl/backend/cpu_ram.hpp: + +../ViennaCL/viennacl/context.hpp: + +../ViennaCL/viennacl/traits/handle.hpp: + +../ViennaCL/viennacl/traits/context.hpp: + +../ViennaCL/viennacl/backend/util.hpp: + +../ViennaCL/viennacl/meta/result_of.hpp: + +../ViennaCL/viennacl/linalg/scalar_operations.hpp: + +../ViennaCL/viennacl/tools/tools.hpp: + +../ViennaCL/viennacl/tools/adapter.hpp: + +../ViennaCL/viennacl/meta/predicate.hpp: + +../ViennaCL/viennacl/traits/size.hpp: + +../ViennaCL/viennacl/traits/start.hpp: + +../ViennaCL/viennacl/traits/stride.hpp: + +../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: + +../ViennaCL/viennacl/ocl/utils.hpp: + +../ViennaCL/viennacl/linalg/opencl/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: + +../ViennaCL/viennacl/scheduler/preset.hpp: + +../ViennaCL/viennacl/device_specific/forwards.h: + +../ViennaCL/viennacl/scheduler/io.hpp: + +../ViennaCL/viennacl/scheduler/forwards.h: + +../ViennaCL/viennacl/device_specific/execution_handler.hpp: + +../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: + +../ViennaCL/viennacl/device_specific/templates/template_base.hpp: + +../ViennaCL/viennacl/device_specific/mapped_objects.hpp: + +../ViennaCL/viennacl/device_specific/utils.hpp: + +../ViennaCL/viennacl/detail/matrix_def.hpp: + +../ViennaCL/viennacl/traits/row_major.hpp: + +../ViennaCL/viennacl/device_specific/tree_parsing.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: + +../ViennaCL/viennacl/matrix_proxy.hpp: + +../ViennaCL/viennacl/range.hpp: + +../ViennaCL/viennacl/slice.hpp: + +../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/utils.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: + +../ViennaCL/viennacl/linalg/detail/op_executor.hpp: + +../ViennaCL/viennacl/linalg/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/detail/op_applier.hpp: + +../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: + +../ViennaCL/viennacl/vector_proxy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: + +.build_release/src/caffe/proto/caffe.pb.h: + +include/caffe/syncedmem.hpp: + +include/caffe/util/math_functions.hpp: + +include/caffe/util/mkl_alternate.hpp: + +include/caffe/greentea/greentea_math_functions.hpp: + +include/caffe/data_transformer.hpp: + +include/caffe/filler.hpp: + +include/caffe/internal_thread.hpp: + +include/caffe/layer.hpp: + +include/caffe/layer_factory.hpp: + +include/caffe/util/db.hpp: diff --git a/bnll_layer.d b/bnll_layer.d new file mode 100644 index 00000000000..ba7ff34e30e --- /dev/null +++ b/bnll_layer.d @@ -0,0 +1,339 @@ +bnll_layer.o: src/caffe/layers/bnll_layer.cu include/caffe/layer.hpp \ + include/caffe/blob.hpp include/caffe/common.hpp \ + include/caffe/util/device_alternate.hpp \ + include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ + /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ + ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ + ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ + ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ + ../ViennaCL/viennacl/tools/shared_ptr.hpp \ + ../ViennaCL/viennacl/ocl/device.hpp \ + ../ViennaCL/viennacl/ocl/device_utils.hpp \ + ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ + ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ + ../ViennaCL/viennacl/ocl/platform.hpp \ + ../ViennaCL/viennacl/ocl/command_queue.hpp \ + ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ + ../ViennaCL/viennacl/ocl/enqueue.hpp \ + ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ + ../ViennaCL/viennacl/detail/vector_def.hpp \ + ../ViennaCL/viennacl/tools/entry_proxy.hpp \ + ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ + ../ViennaCL/viennacl/backend/mem_handle.hpp \ + ../ViennaCL/viennacl/backend/cpu_ram.hpp \ + ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ + ../ViennaCL/viennacl/traits/context.hpp \ + ../ViennaCL/viennacl/backend/util.hpp \ + ../ViennaCL/viennacl/meta/result_of.hpp \ + ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ + ../ViennaCL/viennacl/tools/tools.hpp \ + ../ViennaCL/viennacl/tools/adapter.hpp \ + ../ViennaCL/viennacl/meta/predicate.hpp \ + ../ViennaCL/viennacl/traits/size.hpp \ + ../ViennaCL/viennacl/traits/start.hpp \ + ../ViennaCL/viennacl/traits/stride.hpp \ + ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ + ../ViennaCL/viennacl/ocl/utils.hpp \ + ../ViennaCL/viennacl/linalg/opencl/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ + ../ViennaCL/viennacl/scheduler/preset.hpp \ + ../ViennaCL/viennacl/device_specific/forwards.h \ + ../ViennaCL/viennacl/scheduler/io.hpp \ + ../ViennaCL/viennacl/scheduler/forwards.h \ + ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ + ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ + ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ + ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ + ../ViennaCL/viennacl/device_specific/utils.hpp \ + ../ViennaCL/viennacl/detail/matrix_def.hpp \ + ../ViennaCL/viennacl/traits/row_major.hpp \ + ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ + ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ + ../ViennaCL/viennacl/slice.hpp \ + ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ + ../ViennaCL/viennacl/linalg/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ + ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ + ../ViennaCL/viennacl/vector_proxy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ + .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ + include/caffe/util/math_functions.hpp \ + include/caffe/util/mkl_alternate.hpp \ + include/caffe/greentea/greentea_math_functions.hpp \ + include/caffe/layer_factory.hpp include/caffe/vision_layers.hpp \ + include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ + include/caffe/data_transformer.hpp include/caffe/filler.hpp \ + include/caffe/internal_thread.hpp include/caffe/util/db.hpp \ + include/caffe/loss_layers.hpp include/caffe/neuron_layers.hpp \ + include/caffe/greentea/greentea_im2col.hpp + +include/caffe/layer.hpp: + +include/caffe/blob.hpp: + +include/caffe/common.hpp: + +include/caffe/util/device_alternate.hpp: + +include/caffe/greentea/greentea.hpp: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: + +../ViennaCL/viennacl/ocl/context.hpp: + +../ViennaCL/viennacl/ocl/forwards.h: + +../ViennaCL/viennacl/ocl/handle.hpp: + +../ViennaCL/viennacl/ocl/error.hpp: + +../ViennaCL/viennacl/ocl/kernel.hpp: + +../ViennaCL/viennacl/ocl/program.hpp: + +../ViennaCL/viennacl/tools/shared_ptr.hpp: + +../ViennaCL/viennacl/ocl/device.hpp: + +../ViennaCL/viennacl/ocl/device_utils.hpp: + +../ViennaCL/viennacl/forwards.h: + +../ViennaCL/viennacl/meta/enable_if.hpp: + +../ViennaCL/viennacl/version.hpp: + +../ViennaCL/viennacl/ocl/local_mem.hpp: + +../ViennaCL/viennacl/ocl/platform.hpp: + +../ViennaCL/viennacl/ocl/command_queue.hpp: + +../ViennaCL/viennacl/tools/sha1.hpp: + +../ViennaCL/viennacl/ocl/backend.hpp: + +../ViennaCL/viennacl/ocl/enqueue.hpp: + +../ViennaCL/viennacl/backend/opencl.hpp: + +../ViennaCL/viennacl/vector.hpp: + +../ViennaCL/viennacl/detail/vector_def.hpp: + +../ViennaCL/viennacl/tools/entry_proxy.hpp: + +../ViennaCL/viennacl/scalar.hpp: + +../ViennaCL/viennacl/backend/memory.hpp: + +../ViennaCL/viennacl/backend/mem_handle.hpp: + +../ViennaCL/viennacl/backend/cpu_ram.hpp: + +../ViennaCL/viennacl/context.hpp: + +../ViennaCL/viennacl/traits/handle.hpp: + +../ViennaCL/viennacl/traits/context.hpp: + +../ViennaCL/viennacl/backend/util.hpp: + +../ViennaCL/viennacl/meta/result_of.hpp: + +../ViennaCL/viennacl/linalg/scalar_operations.hpp: + +../ViennaCL/viennacl/tools/tools.hpp: + +../ViennaCL/viennacl/tools/adapter.hpp: + +../ViennaCL/viennacl/meta/predicate.hpp: + +../ViennaCL/viennacl/traits/size.hpp: + +../ViennaCL/viennacl/traits/start.hpp: + +../ViennaCL/viennacl/traits/stride.hpp: + +../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: + +../ViennaCL/viennacl/ocl/utils.hpp: + +../ViennaCL/viennacl/linalg/opencl/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: + +../ViennaCL/viennacl/scheduler/preset.hpp: + +../ViennaCL/viennacl/device_specific/forwards.h: + +../ViennaCL/viennacl/scheduler/io.hpp: + +../ViennaCL/viennacl/scheduler/forwards.h: + +../ViennaCL/viennacl/device_specific/execution_handler.hpp: + +../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: + +../ViennaCL/viennacl/device_specific/templates/template_base.hpp: + +../ViennaCL/viennacl/device_specific/mapped_objects.hpp: + +../ViennaCL/viennacl/device_specific/utils.hpp: + +../ViennaCL/viennacl/detail/matrix_def.hpp: + +../ViennaCL/viennacl/traits/row_major.hpp: + +../ViennaCL/viennacl/device_specific/tree_parsing.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: + +../ViennaCL/viennacl/matrix_proxy.hpp: + +../ViennaCL/viennacl/range.hpp: + +../ViennaCL/viennacl/slice.hpp: + +../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/utils.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: + +../ViennaCL/viennacl/linalg/detail/op_executor.hpp: + +../ViennaCL/viennacl/linalg/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/detail/op_applier.hpp: + +../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: + +../ViennaCL/viennacl/vector_proxy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: + +.build_release/src/caffe/proto/caffe.pb.h: + +include/caffe/syncedmem.hpp: + +include/caffe/util/math_functions.hpp: + +include/caffe/util/mkl_alternate.hpp: + +include/caffe/greentea/greentea_math_functions.hpp: + +include/caffe/layer_factory.hpp: + +include/caffe/vision_layers.hpp: + +include/caffe/common_layers.hpp: + +include/caffe/data_layers.hpp: + +include/caffe/data_transformer.hpp: + +include/caffe/filler.hpp: + +include/caffe/internal_thread.hpp: + +include/caffe/util/db.hpp: + +include/caffe/loss_layers.hpp: + +include/caffe/neuron_layers.hpp: + +include/caffe/greentea/greentea_im2col.hpp: diff --git a/concat_layer.d b/concat_layer.d new file mode 100644 index 00000000000..7ff4d21c0aa --- /dev/null +++ b/concat_layer.d @@ -0,0 +1,339 @@ +concat_layer.o: src/caffe/layers/concat_layer.cu include/caffe/layer.hpp \ + include/caffe/blob.hpp include/caffe/common.hpp \ + include/caffe/util/device_alternate.hpp \ + include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ + /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ + ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ + ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ + ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ + ../ViennaCL/viennacl/tools/shared_ptr.hpp \ + ../ViennaCL/viennacl/ocl/device.hpp \ + ../ViennaCL/viennacl/ocl/device_utils.hpp \ + ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ + ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ + ../ViennaCL/viennacl/ocl/platform.hpp \ + ../ViennaCL/viennacl/ocl/command_queue.hpp \ + ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ + ../ViennaCL/viennacl/ocl/enqueue.hpp \ + ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ + ../ViennaCL/viennacl/detail/vector_def.hpp \ + ../ViennaCL/viennacl/tools/entry_proxy.hpp \ + ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ + ../ViennaCL/viennacl/backend/mem_handle.hpp \ + ../ViennaCL/viennacl/backend/cpu_ram.hpp \ + ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ + ../ViennaCL/viennacl/traits/context.hpp \ + ../ViennaCL/viennacl/backend/util.hpp \ + ../ViennaCL/viennacl/meta/result_of.hpp \ + ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ + ../ViennaCL/viennacl/tools/tools.hpp \ + ../ViennaCL/viennacl/tools/adapter.hpp \ + ../ViennaCL/viennacl/meta/predicate.hpp \ + ../ViennaCL/viennacl/traits/size.hpp \ + ../ViennaCL/viennacl/traits/start.hpp \ + ../ViennaCL/viennacl/traits/stride.hpp \ + ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ + ../ViennaCL/viennacl/ocl/utils.hpp \ + ../ViennaCL/viennacl/linalg/opencl/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ + ../ViennaCL/viennacl/scheduler/preset.hpp \ + ../ViennaCL/viennacl/device_specific/forwards.h \ + ../ViennaCL/viennacl/scheduler/io.hpp \ + ../ViennaCL/viennacl/scheduler/forwards.h \ + ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ + ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ + ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ + ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ + ../ViennaCL/viennacl/device_specific/utils.hpp \ + ../ViennaCL/viennacl/detail/matrix_def.hpp \ + ../ViennaCL/viennacl/traits/row_major.hpp \ + ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ + ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ + ../ViennaCL/viennacl/slice.hpp \ + ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ + ../ViennaCL/viennacl/linalg/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ + ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ + ../ViennaCL/viennacl/vector_proxy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ + .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ + include/caffe/util/math_functions.hpp \ + include/caffe/util/mkl_alternate.hpp \ + include/caffe/greentea/greentea_math_functions.hpp \ + include/caffe/layer_factory.hpp include/caffe/vision_layers.hpp \ + include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ + include/caffe/data_transformer.hpp include/caffe/filler.hpp \ + include/caffe/internal_thread.hpp include/caffe/util/db.hpp \ + include/caffe/loss_layers.hpp include/caffe/neuron_layers.hpp \ + include/caffe/greentea/greentea_im2col.hpp + +include/caffe/layer.hpp: + +include/caffe/blob.hpp: + +include/caffe/common.hpp: + +include/caffe/util/device_alternate.hpp: + +include/caffe/greentea/greentea.hpp: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: + +../ViennaCL/viennacl/ocl/context.hpp: + +../ViennaCL/viennacl/ocl/forwards.h: + +../ViennaCL/viennacl/ocl/handle.hpp: + +../ViennaCL/viennacl/ocl/error.hpp: + +../ViennaCL/viennacl/ocl/kernel.hpp: + +../ViennaCL/viennacl/ocl/program.hpp: + +../ViennaCL/viennacl/tools/shared_ptr.hpp: + +../ViennaCL/viennacl/ocl/device.hpp: + +../ViennaCL/viennacl/ocl/device_utils.hpp: + +../ViennaCL/viennacl/forwards.h: + +../ViennaCL/viennacl/meta/enable_if.hpp: + +../ViennaCL/viennacl/version.hpp: + +../ViennaCL/viennacl/ocl/local_mem.hpp: + +../ViennaCL/viennacl/ocl/platform.hpp: + +../ViennaCL/viennacl/ocl/command_queue.hpp: + +../ViennaCL/viennacl/tools/sha1.hpp: + +../ViennaCL/viennacl/ocl/backend.hpp: + +../ViennaCL/viennacl/ocl/enqueue.hpp: + +../ViennaCL/viennacl/backend/opencl.hpp: + +../ViennaCL/viennacl/vector.hpp: + +../ViennaCL/viennacl/detail/vector_def.hpp: + +../ViennaCL/viennacl/tools/entry_proxy.hpp: + +../ViennaCL/viennacl/scalar.hpp: + +../ViennaCL/viennacl/backend/memory.hpp: + +../ViennaCL/viennacl/backend/mem_handle.hpp: + +../ViennaCL/viennacl/backend/cpu_ram.hpp: + +../ViennaCL/viennacl/context.hpp: + +../ViennaCL/viennacl/traits/handle.hpp: + +../ViennaCL/viennacl/traits/context.hpp: + +../ViennaCL/viennacl/backend/util.hpp: + +../ViennaCL/viennacl/meta/result_of.hpp: + +../ViennaCL/viennacl/linalg/scalar_operations.hpp: + +../ViennaCL/viennacl/tools/tools.hpp: + +../ViennaCL/viennacl/tools/adapter.hpp: + +../ViennaCL/viennacl/meta/predicate.hpp: + +../ViennaCL/viennacl/traits/size.hpp: + +../ViennaCL/viennacl/traits/start.hpp: + +../ViennaCL/viennacl/traits/stride.hpp: + +../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: + +../ViennaCL/viennacl/ocl/utils.hpp: + +../ViennaCL/viennacl/linalg/opencl/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: + +../ViennaCL/viennacl/scheduler/preset.hpp: + +../ViennaCL/viennacl/device_specific/forwards.h: + +../ViennaCL/viennacl/scheduler/io.hpp: + +../ViennaCL/viennacl/scheduler/forwards.h: + +../ViennaCL/viennacl/device_specific/execution_handler.hpp: + +../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: + +../ViennaCL/viennacl/device_specific/templates/template_base.hpp: + +../ViennaCL/viennacl/device_specific/mapped_objects.hpp: + +../ViennaCL/viennacl/device_specific/utils.hpp: + +../ViennaCL/viennacl/detail/matrix_def.hpp: + +../ViennaCL/viennacl/traits/row_major.hpp: + +../ViennaCL/viennacl/device_specific/tree_parsing.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: + +../ViennaCL/viennacl/matrix_proxy.hpp: + +../ViennaCL/viennacl/range.hpp: + +../ViennaCL/viennacl/slice.hpp: + +../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/utils.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: + +../ViennaCL/viennacl/linalg/detail/op_executor.hpp: + +../ViennaCL/viennacl/linalg/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/detail/op_applier.hpp: + +../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: + +../ViennaCL/viennacl/vector_proxy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: + +.build_release/src/caffe/proto/caffe.pb.h: + +include/caffe/syncedmem.hpp: + +include/caffe/util/math_functions.hpp: + +include/caffe/util/mkl_alternate.hpp: + +include/caffe/greentea/greentea_math_functions.hpp: + +include/caffe/layer_factory.hpp: + +include/caffe/vision_layers.hpp: + +include/caffe/common_layers.hpp: + +include/caffe/data_layers.hpp: + +include/caffe/data_transformer.hpp: + +include/caffe/filler.hpp: + +include/caffe/internal_thread.hpp: + +include/caffe/util/db.hpp: + +include/caffe/loss_layers.hpp: + +include/caffe/neuron_layers.hpp: + +include/caffe/greentea/greentea_im2col.hpp: diff --git a/contrastive_loss_layer.d b/contrastive_loss_layer.d new file mode 100644 index 00000000000..1ba2b5d6a41 --- /dev/null +++ b/contrastive_loss_layer.d @@ -0,0 +1,342 @@ +contrastive_loss_layer.o: src/caffe/layers/contrastive_loss_layer.cu \ + include/caffe/layer.hpp include/caffe/blob.hpp include/caffe/common.hpp \ + include/caffe/util/device_alternate.hpp \ + include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ + /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ + ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ + ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ + ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ + ../ViennaCL/viennacl/tools/shared_ptr.hpp \ + ../ViennaCL/viennacl/ocl/device.hpp \ + ../ViennaCL/viennacl/ocl/device_utils.hpp \ + ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ + ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ + ../ViennaCL/viennacl/ocl/platform.hpp \ + ../ViennaCL/viennacl/ocl/command_queue.hpp \ + ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ + ../ViennaCL/viennacl/ocl/enqueue.hpp \ + ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ + ../ViennaCL/viennacl/detail/vector_def.hpp \ + ../ViennaCL/viennacl/tools/entry_proxy.hpp \ + ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ + ../ViennaCL/viennacl/backend/mem_handle.hpp \ + ../ViennaCL/viennacl/backend/cpu_ram.hpp \ + ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ + ../ViennaCL/viennacl/traits/context.hpp \ + ../ViennaCL/viennacl/backend/util.hpp \ + ../ViennaCL/viennacl/meta/result_of.hpp \ + ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ + ../ViennaCL/viennacl/tools/tools.hpp \ + ../ViennaCL/viennacl/tools/adapter.hpp \ + ../ViennaCL/viennacl/meta/predicate.hpp \ + ../ViennaCL/viennacl/traits/size.hpp \ + ../ViennaCL/viennacl/traits/start.hpp \ + ../ViennaCL/viennacl/traits/stride.hpp \ + ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ + ../ViennaCL/viennacl/ocl/utils.hpp \ + ../ViennaCL/viennacl/linalg/opencl/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ + ../ViennaCL/viennacl/scheduler/preset.hpp \ + ../ViennaCL/viennacl/device_specific/forwards.h \ + ../ViennaCL/viennacl/scheduler/io.hpp \ + ../ViennaCL/viennacl/scheduler/forwards.h \ + ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ + ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ + ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ + ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ + ../ViennaCL/viennacl/device_specific/utils.hpp \ + ../ViennaCL/viennacl/detail/matrix_def.hpp \ + ../ViennaCL/viennacl/traits/row_major.hpp \ + ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ + ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ + ../ViennaCL/viennacl/slice.hpp \ + ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ + ../ViennaCL/viennacl/linalg/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ + ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ + ../ViennaCL/viennacl/vector_proxy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ + .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ + include/caffe/util/math_functions.hpp \ + include/caffe/util/mkl_alternate.hpp \ + include/caffe/greentea/greentea_math_functions.hpp \ + include/caffe/layer_factory.hpp include/caffe/util/io.hpp \ + include/caffe/vision_layers.hpp include/caffe/common_layers.hpp \ + include/caffe/data_layers.hpp include/caffe/data_transformer.hpp \ + include/caffe/filler.hpp include/caffe/internal_thread.hpp \ + include/caffe/util/db.hpp include/caffe/loss_layers.hpp \ + include/caffe/neuron_layers.hpp \ + include/caffe/greentea/greentea_im2col.hpp + +include/caffe/layer.hpp: + +include/caffe/blob.hpp: + +include/caffe/common.hpp: + +include/caffe/util/device_alternate.hpp: + +include/caffe/greentea/greentea.hpp: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: + +../ViennaCL/viennacl/ocl/context.hpp: + +../ViennaCL/viennacl/ocl/forwards.h: + +../ViennaCL/viennacl/ocl/handle.hpp: + +../ViennaCL/viennacl/ocl/error.hpp: + +../ViennaCL/viennacl/ocl/kernel.hpp: + +../ViennaCL/viennacl/ocl/program.hpp: + +../ViennaCL/viennacl/tools/shared_ptr.hpp: + +../ViennaCL/viennacl/ocl/device.hpp: + +../ViennaCL/viennacl/ocl/device_utils.hpp: + +../ViennaCL/viennacl/forwards.h: + +../ViennaCL/viennacl/meta/enable_if.hpp: + +../ViennaCL/viennacl/version.hpp: + +../ViennaCL/viennacl/ocl/local_mem.hpp: + +../ViennaCL/viennacl/ocl/platform.hpp: + +../ViennaCL/viennacl/ocl/command_queue.hpp: + +../ViennaCL/viennacl/tools/sha1.hpp: + +../ViennaCL/viennacl/ocl/backend.hpp: + +../ViennaCL/viennacl/ocl/enqueue.hpp: + +../ViennaCL/viennacl/backend/opencl.hpp: + +../ViennaCL/viennacl/vector.hpp: + +../ViennaCL/viennacl/detail/vector_def.hpp: + +../ViennaCL/viennacl/tools/entry_proxy.hpp: + +../ViennaCL/viennacl/scalar.hpp: + +../ViennaCL/viennacl/backend/memory.hpp: + +../ViennaCL/viennacl/backend/mem_handle.hpp: + +../ViennaCL/viennacl/backend/cpu_ram.hpp: + +../ViennaCL/viennacl/context.hpp: + +../ViennaCL/viennacl/traits/handle.hpp: + +../ViennaCL/viennacl/traits/context.hpp: + +../ViennaCL/viennacl/backend/util.hpp: + +../ViennaCL/viennacl/meta/result_of.hpp: + +../ViennaCL/viennacl/linalg/scalar_operations.hpp: + +../ViennaCL/viennacl/tools/tools.hpp: + +../ViennaCL/viennacl/tools/adapter.hpp: + +../ViennaCL/viennacl/meta/predicate.hpp: + +../ViennaCL/viennacl/traits/size.hpp: + +../ViennaCL/viennacl/traits/start.hpp: + +../ViennaCL/viennacl/traits/stride.hpp: + +../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: + +../ViennaCL/viennacl/ocl/utils.hpp: + +../ViennaCL/viennacl/linalg/opencl/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: + +../ViennaCL/viennacl/scheduler/preset.hpp: + +../ViennaCL/viennacl/device_specific/forwards.h: + +../ViennaCL/viennacl/scheduler/io.hpp: + +../ViennaCL/viennacl/scheduler/forwards.h: + +../ViennaCL/viennacl/device_specific/execution_handler.hpp: + +../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: + +../ViennaCL/viennacl/device_specific/templates/template_base.hpp: + +../ViennaCL/viennacl/device_specific/mapped_objects.hpp: + +../ViennaCL/viennacl/device_specific/utils.hpp: + +../ViennaCL/viennacl/detail/matrix_def.hpp: + +../ViennaCL/viennacl/traits/row_major.hpp: + +../ViennaCL/viennacl/device_specific/tree_parsing.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: + +../ViennaCL/viennacl/matrix_proxy.hpp: + +../ViennaCL/viennacl/range.hpp: + +../ViennaCL/viennacl/slice.hpp: + +../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/utils.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: + +../ViennaCL/viennacl/linalg/detail/op_executor.hpp: + +../ViennaCL/viennacl/linalg/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/detail/op_applier.hpp: + +../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: + +../ViennaCL/viennacl/vector_proxy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: + +.build_release/src/caffe/proto/caffe.pb.h: + +include/caffe/syncedmem.hpp: + +include/caffe/util/math_functions.hpp: + +include/caffe/util/mkl_alternate.hpp: + +include/caffe/greentea/greentea_math_functions.hpp: + +include/caffe/layer_factory.hpp: + +include/caffe/util/io.hpp: + +include/caffe/vision_layers.hpp: + +include/caffe/common_layers.hpp: + +include/caffe/data_layers.hpp: + +include/caffe/data_transformer.hpp: + +include/caffe/filler.hpp: + +include/caffe/internal_thread.hpp: + +include/caffe/util/db.hpp: + +include/caffe/loss_layers.hpp: + +include/caffe/neuron_layers.hpp: + +include/caffe/greentea/greentea_im2col.hpp: diff --git a/conv_layer.d b/conv_layer.d new file mode 100644 index 00000000000..58b346180c1 --- /dev/null +++ b/conv_layer.d @@ -0,0 +1,342 @@ +conv_layer.o: src/caffe/layers/conv_layer.cu include/caffe/filler.hpp \ + include/caffe/blob.hpp include/caffe/common.hpp \ + include/caffe/util/device_alternate.hpp \ + include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ + /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ + ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ + ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ + ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ + ../ViennaCL/viennacl/tools/shared_ptr.hpp \ + ../ViennaCL/viennacl/ocl/device.hpp \ + ../ViennaCL/viennacl/ocl/device_utils.hpp \ + ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ + ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ + ../ViennaCL/viennacl/ocl/platform.hpp \ + ../ViennaCL/viennacl/ocl/command_queue.hpp \ + ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ + ../ViennaCL/viennacl/ocl/enqueue.hpp \ + ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ + ../ViennaCL/viennacl/detail/vector_def.hpp \ + ../ViennaCL/viennacl/tools/entry_proxy.hpp \ + ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ + ../ViennaCL/viennacl/backend/mem_handle.hpp \ + ../ViennaCL/viennacl/backend/cpu_ram.hpp \ + ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ + ../ViennaCL/viennacl/traits/context.hpp \ + ../ViennaCL/viennacl/backend/util.hpp \ + ../ViennaCL/viennacl/meta/result_of.hpp \ + ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ + ../ViennaCL/viennacl/tools/tools.hpp \ + ../ViennaCL/viennacl/tools/adapter.hpp \ + ../ViennaCL/viennacl/meta/predicate.hpp \ + ../ViennaCL/viennacl/traits/size.hpp \ + ../ViennaCL/viennacl/traits/start.hpp \ + ../ViennaCL/viennacl/traits/stride.hpp \ + ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ + ../ViennaCL/viennacl/ocl/utils.hpp \ + ../ViennaCL/viennacl/linalg/opencl/common.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ + ../ViennaCL/viennacl/scheduler/preset.hpp \ + ../ViennaCL/viennacl/device_specific/forwards.h \ + ../ViennaCL/viennacl/scheduler/io.hpp \ + ../ViennaCL/viennacl/scheduler/forwards.h \ + ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ + ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ + ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ + ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ + ../ViennaCL/viennacl/device_specific/utils.hpp \ + ../ViennaCL/viennacl/detail/matrix_def.hpp \ + ../ViennaCL/viennacl/traits/row_major.hpp \ + ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ + ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ + ../ViennaCL/viennacl/slice.hpp \ + ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ + ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ + ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ + ../ViennaCL/viennacl/linalg/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ + ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ + ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ + ../ViennaCL/viennacl/vector_proxy.hpp \ + ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ + .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ + include/caffe/util/math_functions.hpp \ + include/caffe/util/mkl_alternate.hpp \ + include/caffe/greentea/greentea_math_functions.hpp \ + include/caffe/layer.hpp include/caffe/layer_factory.hpp \ + include/caffe/util/im2col.hpp include/caffe/vision_layers.hpp \ + include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ + include/caffe/data_transformer.hpp include/caffe/internal_thread.hpp \ + include/caffe/util/db.hpp include/caffe/loss_layers.hpp \ + include/caffe/neuron_layers.hpp \ + include/caffe/greentea/greentea_im2col.hpp + +include/caffe/filler.hpp: + +include/caffe/blob.hpp: + +include/caffe/common.hpp: + +include/caffe/util/device_alternate.hpp: + +include/caffe/greentea/greentea.hpp: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: + +/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: + +../ViennaCL/viennacl/ocl/context.hpp: + +../ViennaCL/viennacl/ocl/forwards.h: + +../ViennaCL/viennacl/ocl/handle.hpp: + +../ViennaCL/viennacl/ocl/error.hpp: + +../ViennaCL/viennacl/ocl/kernel.hpp: + +../ViennaCL/viennacl/ocl/program.hpp: + +../ViennaCL/viennacl/tools/shared_ptr.hpp: + +../ViennaCL/viennacl/ocl/device.hpp: + +../ViennaCL/viennacl/ocl/device_utils.hpp: + +../ViennaCL/viennacl/forwards.h: + +../ViennaCL/viennacl/meta/enable_if.hpp: + +../ViennaCL/viennacl/version.hpp: + +../ViennaCL/viennacl/ocl/local_mem.hpp: + +../ViennaCL/viennacl/ocl/platform.hpp: + +../ViennaCL/viennacl/ocl/command_queue.hpp: + +../ViennaCL/viennacl/tools/sha1.hpp: + +../ViennaCL/viennacl/ocl/backend.hpp: + +../ViennaCL/viennacl/ocl/enqueue.hpp: + +../ViennaCL/viennacl/backend/opencl.hpp: + +../ViennaCL/viennacl/vector.hpp: + +../ViennaCL/viennacl/detail/vector_def.hpp: + +../ViennaCL/viennacl/tools/entry_proxy.hpp: + +../ViennaCL/viennacl/scalar.hpp: + +../ViennaCL/viennacl/backend/memory.hpp: + +../ViennaCL/viennacl/backend/mem_handle.hpp: + +../ViennaCL/viennacl/backend/cpu_ram.hpp: + +../ViennaCL/viennacl/context.hpp: + +../ViennaCL/viennacl/traits/handle.hpp: + +../ViennaCL/viennacl/traits/context.hpp: + +../ViennaCL/viennacl/backend/util.hpp: + +../ViennaCL/viennacl/meta/result_of.hpp: + +../ViennaCL/viennacl/linalg/scalar_operations.hpp: + +../ViennaCL/viennacl/tools/tools.hpp: + +../ViennaCL/viennacl/tools/adapter.hpp: + +../ViennaCL/viennacl/meta/predicate.hpp: + +../ViennaCL/viennacl/traits/size.hpp: + +../ViennaCL/viennacl/traits/start.hpp: + +../ViennaCL/viennacl/traits/stride.hpp: + +../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: + +../ViennaCL/viennacl/ocl/utils.hpp: + +../ViennaCL/viennacl/linalg/opencl/common.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: + +../ViennaCL/viennacl/scheduler/preset.hpp: + +../ViennaCL/viennacl/device_specific/forwards.h: + +../ViennaCL/viennacl/scheduler/io.hpp: + +../ViennaCL/viennacl/scheduler/forwards.h: + +../ViennaCL/viennacl/device_specific/execution_handler.hpp: + +../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: + +../ViennaCL/viennacl/device_specific/templates/template_base.hpp: + +../ViennaCL/viennacl/device_specific/mapped_objects.hpp: + +../ViennaCL/viennacl/device_specific/utils.hpp: + +../ViennaCL/viennacl/detail/matrix_def.hpp: + +../ViennaCL/viennacl/traits/row_major.hpp: + +../ViennaCL/viennacl/device_specific/tree_parsing.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: + +../ViennaCL/viennacl/matrix_proxy.hpp: + +../ViennaCL/viennacl/range.hpp: + +../ViennaCL/viennacl/slice.hpp: + +../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/utils.hpp: + +../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: + +../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: + +../ViennaCL/viennacl/linalg/detail/op_executor.hpp: + +../ViennaCL/viennacl/linalg/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/detail/op_applier.hpp: + +../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: + +../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: + +../ViennaCL/viennacl/vector_proxy.hpp: + +../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: + +.build_release/src/caffe/proto/caffe.pb.h: + +include/caffe/syncedmem.hpp: + +include/caffe/util/math_functions.hpp: + +include/caffe/util/mkl_alternate.hpp: + +include/caffe/greentea/greentea_math_functions.hpp: + +include/caffe/layer.hpp: + +include/caffe/layer_factory.hpp: + +include/caffe/util/im2col.hpp: + +include/caffe/vision_layers.hpp: + +include/caffe/common_layers.hpp: + +include/caffe/data_layers.hpp: + +include/caffe/data_transformer.hpp: + +include/caffe/internal_thread.hpp: + +include/caffe/util/db.hpp: + +include/caffe/loss_layers.hpp: + +include/caffe/neuron_layers.hpp: + +include/caffe/greentea/greentea_im2col.hpp: diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index c9c545e5aba..cc44a481409 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -73,7 +73,7 @@ struct is_same { CHECK_EQ(status, clblasSuccess) << "GreenTea CL BLAS ERROR";} #endif -// Macro to select the single (_s) or double (_d) precision kernel +// Macro to select the single (_float) or double (_double) precision kernel #define CL_KERNEL_SELECT(kernel) is_same::value ? kernel "_float" : kernel "_double" #endif diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index fc156091476..e20d7e1a6bc 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -17,7 +17,7 @@ using std::endl; #ifdef CMAKE_BUILD #include "caffe_config.h" #else - #define CUDA_TEST_DEVICE -1 + #define TEST_DEVICE -1 #define CMAKE_SOURCE_DIR "src/" #define EXAMPLES_SOURCE_DIR "examples/" #define CMAKE_EXT "" diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index e8d5d4ba92d..b8b7b0a42df 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -151,6 +151,7 @@ template void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); #ifndef CPU_ONLY // GPU +#ifdef USE_CUDA // Decaf gpu gemm provides an interface that is almost the same as the cpu // gemm function - following the c convention and calling the fortran-order @@ -280,7 +281,8 @@ void caffe_gpu_##name(const int n, const double* x, double* y) { \ n, x, y); \ } -#endif // !CPU_ONLY +#endif // USE_CUDA +#endif // !CPU_ONLY } // namespace caffe diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index c9fd79dbde8..7baf96c6ecb 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -10,7 +10,7 @@ #include "caffe/greentea/cl_kernels.hpp" #ifdef USE_CLBLAS #include -#endif +#endif // USE_CLBLAS #endif namespace caffe { @@ -93,7 +93,7 @@ void* Caffe::RNG::generator() { Caffe::Caffe() : #ifdef USE_CUDA - cublas_handle_(NULL), + cublas_handle_(NULL), curand_generator_(NULL), #endif // USE_CUDA random_generator_(), @@ -140,7 +140,6 @@ void Caffe::set_random_seed(const unsigned int seed) { g_curand_availability_logged = true; } } - #endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -179,10 +178,12 @@ void Caffe::EnumerateDevices() { #endif LOG(INFO)<< "Total devices: " << cuda_device_count + greentea_device_count; +#ifdef USE_CUDA LOG(INFO)<< "CUDA devices: " << cuda_device_count; +#endif // USE_CUDA #ifdef USE_GREENTEA LOG(INFO)<< "OpenCL devices: " << greentea_device_count; -#endif +#endif // USE_GREENTEA // Display info for all devices #ifdef USE_CUDA @@ -196,7 +197,7 @@ void Caffe::EnumerateDevices() { LOG(INFO)<< "Name: " << prop.name; LOG(INFO)<< "Total global memory: " << prop.totalGlobalMem; } -#endif +#endif /// USE_CUDA #ifdef USE_GREENTEA for (int i = 0; i < greentea_device_count; ++i) { @@ -207,7 +208,7 @@ void Caffe::EnumerateDevices() { LOG(INFO)<< "Name: " << std::get<1>(platform_devices[i]).name(); LOG(INFO)<< "Total global memory: " << std::get<1>(platform_devices[i]).global_mem_size(); } -#endif +#endif // USE_GREENTEA } @@ -224,7 +225,7 @@ void Caffe::SetDevices(std::vector device_ids) { #ifdef USE_CUDA cudaGetDeviceCount(&cuda_device_count); -#endif +#endif // USE_CUDA for (int i = 0; i < cuda_device_count; ++i) { Get().device_contexts_.push_back(DeviceContext(i, Backend::BACKEND_CUDA)); @@ -232,7 +233,7 @@ void Caffe::SetDevices(std::vector device_ids) { // Dummy to have same vector size as device contexts viennacl::ocl::program program; Get().ocl_programs_.push_back(program); -#endif +#endif // USE_GREENTEA } // Initialize GreenTea devices @@ -298,7 +299,6 @@ DeviceContext& Caffe::GetDefaultDeviceContext() { } void Caffe::SetDevice(const int device_id) { - std::vector devices; devices.push_back(device_id); Caffe::SetDevices(devices); @@ -337,7 +337,7 @@ void Caffe::SetDevice(const int device_id) { } } -// TODO: (FTschopp) fix this for the new backend +// TODO: Fix this for the new backend void Caffe::DeviceQuery() { if (Get().default_device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -373,9 +373,7 @@ void Caffe::DeviceQuery() { << (prop.kernelExecTimeoutEnabled ? "Yes" : "No"); } #endif // USE_CUDA - } - else - { + } else { #ifdef USE_GREENTEA // TODO: Complete OpenCL device information of current device #endif // USE_GREENTEA diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu index 5cecf7249a6..253760139f3 100644 --- a/src/caffe/layers/absval_layer.cu +++ b/src/caffe/layers/absval_layer.cu @@ -34,7 +34,7 @@ void AbsValLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const int count = top[0]->count(); - const Dtype* top_data = top[0]->gpu_data(); + //const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index c69506bce54..8e9e5734b91 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -11,6 +11,7 @@ namespace caffe { +#ifdef USE_CUDA template __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { @@ -19,6 +20,7 @@ __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } +#endif // USE_CUDA template void ReLULayer::Forward_gpu(const vector*>& bottom, @@ -57,6 +59,7 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, // << " threads: " << CAFFE_CUDA_NUM_THREADS; } +#ifdef USE_CUDA template __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, @@ -67,6 +70,7 @@ __global__ void ReLUBackward(const int n, const Dtype* in_diff, * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } +#endif // USE_CUDA template void ReLULayer::Backward_gpu(const vector*>& top, diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index 1536ca15cea..d35b942df80 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -24,8 +24,10 @@ void SilenceLayer::Backward_gpu(const vector*>& top, for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { if (this->device_context_.backend() == BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_set(bottom[i]->count(), Dtype(0), bottom[i]->mutable_gpu_data()); +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 09e8f955929..7f4c9ec4b5d 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -13,6 +13,7 @@ namespace caffe { +#ifdef USE_CUDA template __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, @@ -37,6 +38,7 @@ __global__ void SoftmaxLossForwardGPU(const int nthreads, } } } +#endif // USE_CUDA template void SoftmaxWithLossLayer::Forward_gpu( @@ -122,6 +124,7 @@ void SoftmaxWithLossLayer::Forward_gpu( } } +#ifdef USE_CUDA template __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, @@ -148,6 +151,7 @@ __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, } } } +#endif // USE_CUDA template void SoftmaxWithLossLayer::Backward_gpu( diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 5c8a8112774..245a0bb3af3 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -49,7 +49,9 @@ inline void SyncedMemory::to_cpu() { own_cpu_data_ = true; } if (device_context_.backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_); +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index 32a483aca18..1de107ffaa2 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -4,16 +4,6 @@ #include "caffe/caffe.hpp" #include "caffe/test/test_caffe_main.hpp" -namespace caffe { -#ifndef CPU_ONLY -cudaDeviceProp CAFFE_TEST_CUDA_PROP; -#endif -} - -#ifndef CPU_ONLY -using caffe::CAFFE_TEST_CUDA_PROP; -#endif - using caffe::Caffe; int main(int argc, char** argv) { @@ -24,9 +14,9 @@ int main(int argc, char** argv) { if (argc > 1) { // Use the given device device = atoi(argv[1]); - } else if (CUDA_TEST_DEVICE >= 0) { + } else if (TEST_DEVICE >= 0) { // Use the device assigned in build configuration; but with a lower priority - device = CUDA_TEST_DEVICE; + device = TEST_DEVICE; } cout << "Setting to use device " << device << endl; Caffe::SetDevice(device); diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index a27b780859e..106fdd4e075 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -1,3 +1,4 @@ +#ifdef USE_CUDA #include #include @@ -11,12 +12,6 @@ #include "caffe/test/test_caffe_main.hpp" -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#include "caffe/greentea/greentea_im2col.hpp" -#endif - namespace caffe { // Forward declare kernel functions @@ -100,40 +95,36 @@ TYPED_TEST(Im2colKernelTest, TestGPU) { cpu_data + this->blob_top_cpu_->offset(n)); } - DeviceContext cid = Caffe::GetDefaultDeviceContext(); - if(cid.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - int grid_dim = default_grid_dim/grid_div; - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( - num_kernels, bottom_data + this->blob_bottom_->offset(n), - this->height_, this->width_, this->kernel_size_, this->kernel_size_, - this->pad_, this->pad_, this->stride_, this->stride_, - this->height_col_, this->width_col_, - top_data + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; - } + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + int grid_dim = default_grid_dim/grid_div; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( + num_kernels, bottom_data + this->blob_bottom_->offset(n), + this->height_, this->width_, this->kernel_size_, this->kernel_size_, + this->pad_, this->pad_, this->stride_, this->stride_, + this->height_col_, this->width_col_, + top_data + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = cpu_data[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; - } + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = cpu_data[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; } } -#endif // USE_CUDA } } } // namespace caffe +#endif // USE_CUDA diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp index 96092529834..50c62a8bce7 100644 --- a/src/caffe/test/test_inner_product_layer.cpp +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -13,10 +13,6 @@ namespace caffe { -#ifndef CPU_ONLY -extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; -#endif - template class InnerProductLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; @@ -57,57 +53,39 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) { TYPED_TEST(InnerProductLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; - bool IS_VALID_DEVICE = false; -#ifndef CPU_ONLY - IS_VALID_DEVICE = CAFFE_TEST_CUDA_PROP.major >= 2 || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL; -#endif - if (Caffe::mode() == Caffe::CPU || - sizeof(Dtype) == 4 || IS_VALID_DEVICE) { - LayerParameter layer_param; - InnerProductParameter* inner_product_param = - layer_param.mutable_inner_product_param(); - inner_product_param->set_num_output(10); - inner_product_param->mutable_weight_filler()->set_type("uniform"); - inner_product_param->mutable_bias_filler()->set_type("uniform"); - inner_product_param->mutable_bias_filler()->set_min(1); - inner_product_param->mutable_bias_filler()->set_max(2); - shared_ptr > layer( - new InnerProductLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); - for (int i = 0; i < count; ++i) { - EXPECT_GE(data[i], 1.); - } - } else { - LOG(ERROR) << "Skipping test due to old architecture."; + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); } } TYPED_TEST(InnerProductLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; - bool IS_VALID_DEVICE = false; -#ifndef CPU_ONLY - IS_VALID_DEVICE = CAFFE_TEST_CUDA_PROP.major >= 2 || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL; -#endif - if (Caffe::mode() == Caffe::CPU || - sizeof(Dtype) == 4 || IS_VALID_DEVICE) { - LayerParameter layer_param; - InnerProductParameter* inner_product_param = - layer_param.mutable_inner_product_param(); - inner_product_param->set_num_output(10); - inner_product_param->mutable_weight_filler()->set_type("gaussian"); - inner_product_param->mutable_bias_filler()->set_type("gaussian"); - inner_product_param->mutable_bias_filler()->set_min(1); - inner_product_param->mutable_bias_filler()->set_max(2); - InnerProductLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); - } else { - LOG(ERROR) << "Skipping test due to old architecture."; - } + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + InnerProductLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } // namespace caffe diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index aeb6e2ec45a..4e84b4dc9dd 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -165,6 +165,7 @@ TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { +#ifdef USE_CUDA int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -173,6 +174,7 @@ TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { y = this->blob_top_->gpu_data(); int computed_distance = caffe_gpu_hamming_distance(n, x, y); EXPECT_EQ(reference_distance, computed_distance); +#endif // USE_CUDA } TYPED_TEST(GPUMathFunctionsTest, TestAsum) { diff --git a/src/caffe/test/test_platform.cpp b/src/caffe/test/test_platform.cpp index f3513e08814..75e5cb9dda3 100644 --- a/src/caffe/test/test_platform.cpp +++ b/src/caffe/test/test_platform.cpp @@ -1,4 +1,5 @@ #ifndef CPU_ONLY +#ifdef USE_CUDA #include #include @@ -54,4 +55,5 @@ TEST_F(PlatformTest, TestInitialization) { } // namespace caffe -#endif // CPU_ONLY +#endif // USE_CUDA +#endif // CPU_ONLY diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 996da4b8f7c..90019a6d170 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -75,7 +75,7 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; SoftmaxLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); + GradientChecker checker(1e-2, 1e-2); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 55de67772f0..32dff12b2c0 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -12,8 +12,6 @@ namespace caffe { -extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; - template class GemmTest : public ::testing::Test {}; @@ -33,107 +31,103 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { caffe_cpu_copy(6, data, A.mutable_cpu_data()); caffe_cpu_copy(12, data, B.mutable_cpu_data()); - if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2 || dc.backend() == BACKEND_OpenCL) { - // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., - A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } + // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } - if (dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., - A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); + greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); #endif // USE_GREENTEA - } - - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } - - // Test when we have a transposed A - A.Reshape(1, 1, 3, 2); - caffe_cpu_copy(6, A_reshape_data, A.mutable_cpu_data()); - caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., - A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } - - if (dc.backend() == BACKEND_CUDA) { + } + + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed A + A.Reshape(1, 1, 3, 2); + caffe_cpu_copy(6, A_reshape_data, A.mutable_cpu_data()); + caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., - A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + caffe_gpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasTrans, CblasNoTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); + greentea_gpu_gemm(dc.id(), CblasTrans, CblasNoTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); #endif // USE_GREENTEA - } - - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } - - // Test when we have a transposed A and a transposed B too - B.Reshape(1, 1, 4, 3); - caffe_cpu_copy(12, B_reshape_data, B.mutable_cpu_data()); - caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., - A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } - - if (dc.backend() == BACKEND_CUDA) { + } + + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed A and a transposed B too + B.Reshape(1, 1, 4, 3); + caffe_cpu_copy(12, B_reshape_data, B.mutable_cpu_data()); + caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., - A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasTrans, CblasTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); + greentea_gpu_gemm(dc.id(), CblasTrans, CblasTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); #endif // USE_GREENTEA - } - - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } - - // Test when we have a transposed B - A.Reshape(1, 1, 2, 3); - caffe_cpu_copy(6, data, A.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., - A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } - - if (dc.backend() == BACKEND_CUDA) { + } + + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed B + A.Reshape(1, 1, 2, 3); + caffe_cpu_copy(6, data, A.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., - A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); + greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasTrans, 2, 4, 3, 1., + (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); #endif // USE_GREENTEA - } + } - for (int i = 0; i < 8; ++i) { - EXPECT_EQ(C.cpu_data()[i], result[i]); - } - } else { - LOG(ERROR) << "Skipping test due to old architecture."; + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); } } @@ -151,54 +145,51 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { caffe_cpu_copy(6, data, A.mutable_cpu_data()); caffe_cpu_copy(3, data, x.mutable_cpu_data()); - if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2 || dc.backend() == BACKEND_OpenCL) { - caffe_cpu_gemv(CblasNoTrans, 2, 3, 1., A.cpu_data(), - x.cpu_data(), 0., y.mutable_cpu_data()); - for (int i = 0; i < 2; ++i) { - EXPECT_EQ(y.cpu_data()[i], result_2[i]); - } - if (dc.backend() == BACKEND_CUDA) { + caffe_cpu_gemv(CblasNoTrans, 2, 3, 1., A.cpu_data(), + x.cpu_data(), 0., y.mutable_cpu_data()); + for (int i = 0; i < 2; ++i) { + EXPECT_EQ(y.cpu_data()[i], result_2[i]); + } + + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), - x.gpu_data(), 0., y.mutable_gpu_data()); + caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), + x.gpu_data(), 0., y.mutable_gpu_data()); #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(dc.id(), CblasNoTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, - (cl_mem)(x.gpu_data()),0, 0., (cl_mem)(y.mutable_gpu_data()),0); + greentea_gpu_gemv(dc.id(), CblasNoTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, + (cl_mem)(x.gpu_data()),0, 0., (cl_mem)(y.mutable_gpu_data()),0); #endif // USE_GREENTEA - } + } - for (int i = 0; i < 2; ++i) { - EXPECT_EQ(y.cpu_data()[i], result_2[i]); - } + for (int i = 0; i < 2; ++i) { + EXPECT_EQ(y.cpu_data()[i], result_2[i]); + } - // Test transpose case - caffe_cpu_copy(2, data, y.mutable_cpu_data()); - caffe_cpu_gemv(CblasTrans, 2, 3, 1., A.cpu_data(), - y.cpu_data(), 0., x.mutable_cpu_data()); - for (int i = 0; i < 3; ++i) { - EXPECT_EQ(x.cpu_data()[i], result_3[i]); - } + // Test transpose case + caffe_cpu_copy(2, data, y.mutable_cpu_data()); + caffe_cpu_gemv(CblasTrans, 2, 3, 1., A.cpu_data(), + y.cpu_data(), 0., x.mutable_cpu_data()); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(x.cpu_data()[i], result_3[i]); + } - if (dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), - y.gpu_data(), 0., x.mutable_gpu_data()); + caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), + y.gpu_data(), 0., x.mutable_gpu_data()); #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(dc.id(), CblasTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, - (cl_mem)(y.gpu_data()),0, 0., (cl_mem)(x.mutable_gpu_data()),0); + greentea_gpu_gemv(dc.id(), CblasTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, + (cl_mem)(y.gpu_data()),0, 0., (cl_mem)(x.mutable_gpu_data()),0); #endif // USE_GREENTEA - } + } - for (int i = 0; i < 3; ++i) { - EXPECT_EQ(x.cpu_data()[i], result_3[i]); - } - } else { - LOG(ERROR) << "Skipping test due to old architecture."; + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(x.cpu_data()[i], result_3[i]); } } diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 5d55f0ddc2a..39b2550aa2b 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -1,3 +1,4 @@ +#ifdef USE_CUDA #include // CUDA's, not caffe's, for fabs, signbit #include #include // thrust::plus @@ -12,6 +13,7 @@ namespace caffe { + template <> void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, @@ -463,3 +465,4 @@ void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, } } // namespace caffe +#endif // USE_CUDA From f86718e9ee46977786de09846a44474bb500b309 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 17 Jun 2015 00:57:34 +0200 Subject: [PATCH 056/254] Cleanup. --- absval_layer.d | 339 -------------------------------------- base_data_layer.d | 326 ------------------------------------- bnll_layer.d | 339 -------------------------------------- concat_layer.d | 339 -------------------------------------- contrastive_loss_layer.d | 342 --------------------------------------- conv_layer.d | 342 --------------------------------------- 6 files changed, 2027 deletions(-) delete mode 100644 absval_layer.d delete mode 100644 base_data_layer.d delete mode 100644 bnll_layer.d delete mode 100644 concat_layer.d delete mode 100644 contrastive_loss_layer.d delete mode 100644 conv_layer.d diff --git a/absval_layer.d b/absval_layer.d deleted file mode 100644 index aac7a03e07a..00000000000 --- a/absval_layer.d +++ /dev/null @@ -1,339 +0,0 @@ -absval_layer.o: src/caffe/layers/absval_layer.cu include/caffe/layer.hpp \ - include/caffe/blob.hpp include/caffe/common.hpp \ - include/caffe/util/device_alternate.hpp \ - include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ - /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ - ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ - ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ - ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ - ../ViennaCL/viennacl/tools/shared_ptr.hpp \ - ../ViennaCL/viennacl/ocl/device.hpp \ - ../ViennaCL/viennacl/ocl/device_utils.hpp \ - ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ - ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ - ../ViennaCL/viennacl/ocl/platform.hpp \ - ../ViennaCL/viennacl/ocl/command_queue.hpp \ - ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ - ../ViennaCL/viennacl/ocl/enqueue.hpp \ - ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ - ../ViennaCL/viennacl/detail/vector_def.hpp \ - ../ViennaCL/viennacl/tools/entry_proxy.hpp \ - ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ - ../ViennaCL/viennacl/backend/mem_handle.hpp \ - ../ViennaCL/viennacl/backend/cpu_ram.hpp \ - ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ - ../ViennaCL/viennacl/traits/context.hpp \ - ../ViennaCL/viennacl/backend/util.hpp \ - ../ViennaCL/viennacl/meta/result_of.hpp \ - ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ - ../ViennaCL/viennacl/tools/tools.hpp \ - ../ViennaCL/viennacl/tools/adapter.hpp \ - ../ViennaCL/viennacl/meta/predicate.hpp \ - ../ViennaCL/viennacl/traits/size.hpp \ - ../ViennaCL/viennacl/traits/start.hpp \ - ../ViennaCL/viennacl/traits/stride.hpp \ - ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ - ../ViennaCL/viennacl/ocl/utils.hpp \ - ../ViennaCL/viennacl/linalg/opencl/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ - ../ViennaCL/viennacl/scheduler/preset.hpp \ - ../ViennaCL/viennacl/device_specific/forwards.h \ - ../ViennaCL/viennacl/scheduler/io.hpp \ - ../ViennaCL/viennacl/scheduler/forwards.h \ - ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ - ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ - ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ - ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ - ../ViennaCL/viennacl/device_specific/utils.hpp \ - ../ViennaCL/viennacl/detail/matrix_def.hpp \ - ../ViennaCL/viennacl/traits/row_major.hpp \ - ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ - ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ - ../ViennaCL/viennacl/slice.hpp \ - ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ - ../ViennaCL/viennacl/linalg/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ - ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ - ../ViennaCL/viennacl/vector_proxy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ - .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ - include/caffe/util/math_functions.hpp \ - include/caffe/util/mkl_alternate.hpp \ - include/caffe/greentea/greentea_math_functions.hpp \ - include/caffe/layer_factory.hpp include/caffe/vision_layers.hpp \ - include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ - include/caffe/data_transformer.hpp include/caffe/filler.hpp \ - include/caffe/internal_thread.hpp include/caffe/util/db.hpp \ - include/caffe/loss_layers.hpp include/caffe/neuron_layers.hpp \ - include/caffe/greentea/greentea_im2col.hpp - -include/caffe/layer.hpp: - -include/caffe/blob.hpp: - -include/caffe/common.hpp: - -include/caffe/util/device_alternate.hpp: - -include/caffe/greentea/greentea.hpp: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: - -../ViennaCL/viennacl/ocl/context.hpp: - -../ViennaCL/viennacl/ocl/forwards.h: - -../ViennaCL/viennacl/ocl/handle.hpp: - -../ViennaCL/viennacl/ocl/error.hpp: - -../ViennaCL/viennacl/ocl/kernel.hpp: - -../ViennaCL/viennacl/ocl/program.hpp: - -../ViennaCL/viennacl/tools/shared_ptr.hpp: - -../ViennaCL/viennacl/ocl/device.hpp: - -../ViennaCL/viennacl/ocl/device_utils.hpp: - -../ViennaCL/viennacl/forwards.h: - -../ViennaCL/viennacl/meta/enable_if.hpp: - -../ViennaCL/viennacl/version.hpp: - -../ViennaCL/viennacl/ocl/local_mem.hpp: - -../ViennaCL/viennacl/ocl/platform.hpp: - -../ViennaCL/viennacl/ocl/command_queue.hpp: - -../ViennaCL/viennacl/tools/sha1.hpp: - -../ViennaCL/viennacl/ocl/backend.hpp: - -../ViennaCL/viennacl/ocl/enqueue.hpp: - -../ViennaCL/viennacl/backend/opencl.hpp: - -../ViennaCL/viennacl/vector.hpp: - -../ViennaCL/viennacl/detail/vector_def.hpp: - -../ViennaCL/viennacl/tools/entry_proxy.hpp: - -../ViennaCL/viennacl/scalar.hpp: - -../ViennaCL/viennacl/backend/memory.hpp: - -../ViennaCL/viennacl/backend/mem_handle.hpp: - -../ViennaCL/viennacl/backend/cpu_ram.hpp: - -../ViennaCL/viennacl/context.hpp: - -../ViennaCL/viennacl/traits/handle.hpp: - -../ViennaCL/viennacl/traits/context.hpp: - -../ViennaCL/viennacl/backend/util.hpp: - -../ViennaCL/viennacl/meta/result_of.hpp: - -../ViennaCL/viennacl/linalg/scalar_operations.hpp: - -../ViennaCL/viennacl/tools/tools.hpp: - -../ViennaCL/viennacl/tools/adapter.hpp: - -../ViennaCL/viennacl/meta/predicate.hpp: - -../ViennaCL/viennacl/traits/size.hpp: - -../ViennaCL/viennacl/traits/start.hpp: - -../ViennaCL/viennacl/traits/stride.hpp: - -../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: - -../ViennaCL/viennacl/ocl/utils.hpp: - -../ViennaCL/viennacl/linalg/opencl/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: - -../ViennaCL/viennacl/scheduler/preset.hpp: - -../ViennaCL/viennacl/device_specific/forwards.h: - -../ViennaCL/viennacl/scheduler/io.hpp: - -../ViennaCL/viennacl/scheduler/forwards.h: - -../ViennaCL/viennacl/device_specific/execution_handler.hpp: - -../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: - -../ViennaCL/viennacl/device_specific/templates/template_base.hpp: - -../ViennaCL/viennacl/device_specific/mapped_objects.hpp: - -../ViennaCL/viennacl/device_specific/utils.hpp: - -../ViennaCL/viennacl/detail/matrix_def.hpp: - -../ViennaCL/viennacl/traits/row_major.hpp: - -../ViennaCL/viennacl/device_specific/tree_parsing.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: - -../ViennaCL/viennacl/matrix_proxy.hpp: - -../ViennaCL/viennacl/range.hpp: - -../ViennaCL/viennacl/slice.hpp: - -../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/utils.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: - -../ViennaCL/viennacl/linalg/detail/op_executor.hpp: - -../ViennaCL/viennacl/linalg/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/detail/op_applier.hpp: - -../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: - -../ViennaCL/viennacl/vector_proxy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: - -.build_release/src/caffe/proto/caffe.pb.h: - -include/caffe/syncedmem.hpp: - -include/caffe/util/math_functions.hpp: - -include/caffe/util/mkl_alternate.hpp: - -include/caffe/greentea/greentea_math_functions.hpp: - -include/caffe/layer_factory.hpp: - -include/caffe/vision_layers.hpp: - -include/caffe/common_layers.hpp: - -include/caffe/data_layers.hpp: - -include/caffe/data_transformer.hpp: - -include/caffe/filler.hpp: - -include/caffe/internal_thread.hpp: - -include/caffe/util/db.hpp: - -include/caffe/loss_layers.hpp: - -include/caffe/neuron_layers.hpp: - -include/caffe/greentea/greentea_im2col.hpp: diff --git a/base_data_layer.d b/base_data_layer.d deleted file mode 100644 index d859800a89f..00000000000 --- a/base_data_layer.d +++ /dev/null @@ -1,326 +0,0 @@ -base_data_layer.o: src/caffe/layers/base_data_layer.cu \ - include/caffe/data_layers.hpp include/caffe/blob.hpp \ - include/caffe/common.hpp include/caffe/util/device_alternate.hpp \ - include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ - /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ - ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ - ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ - ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ - ../ViennaCL/viennacl/tools/shared_ptr.hpp \ - ../ViennaCL/viennacl/ocl/device.hpp \ - ../ViennaCL/viennacl/ocl/device_utils.hpp \ - ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ - ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ - ../ViennaCL/viennacl/ocl/platform.hpp \ - ../ViennaCL/viennacl/ocl/command_queue.hpp \ - ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ - ../ViennaCL/viennacl/ocl/enqueue.hpp \ - ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ - ../ViennaCL/viennacl/detail/vector_def.hpp \ - ../ViennaCL/viennacl/tools/entry_proxy.hpp \ - ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ - ../ViennaCL/viennacl/backend/mem_handle.hpp \ - ../ViennaCL/viennacl/backend/cpu_ram.hpp \ - ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ - ../ViennaCL/viennacl/traits/context.hpp \ - ../ViennaCL/viennacl/backend/util.hpp \ - ../ViennaCL/viennacl/meta/result_of.hpp \ - ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ - ../ViennaCL/viennacl/tools/tools.hpp \ - ../ViennaCL/viennacl/tools/adapter.hpp \ - ../ViennaCL/viennacl/meta/predicate.hpp \ - ../ViennaCL/viennacl/traits/size.hpp \ - ../ViennaCL/viennacl/traits/start.hpp \ - ../ViennaCL/viennacl/traits/stride.hpp \ - ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ - ../ViennaCL/viennacl/ocl/utils.hpp \ - ../ViennaCL/viennacl/linalg/opencl/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ - ../ViennaCL/viennacl/scheduler/preset.hpp \ - ../ViennaCL/viennacl/device_specific/forwards.h \ - ../ViennaCL/viennacl/scheduler/io.hpp \ - ../ViennaCL/viennacl/scheduler/forwards.h \ - ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ - ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ - ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ - ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ - ../ViennaCL/viennacl/device_specific/utils.hpp \ - ../ViennaCL/viennacl/detail/matrix_def.hpp \ - ../ViennaCL/viennacl/traits/row_major.hpp \ - ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ - ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ - ../ViennaCL/viennacl/slice.hpp \ - ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ - ../ViennaCL/viennacl/linalg/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ - ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ - ../ViennaCL/viennacl/vector_proxy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ - .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ - include/caffe/util/math_functions.hpp \ - include/caffe/util/mkl_alternate.hpp \ - include/caffe/greentea/greentea_math_functions.hpp \ - include/caffe/data_transformer.hpp include/caffe/filler.hpp \ - include/caffe/internal_thread.hpp include/caffe/layer.hpp \ - include/caffe/layer_factory.hpp include/caffe/util/db.hpp - -include/caffe/data_layers.hpp: - -include/caffe/blob.hpp: - -include/caffe/common.hpp: - -include/caffe/util/device_alternate.hpp: - -include/caffe/greentea/greentea.hpp: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: - -../ViennaCL/viennacl/ocl/context.hpp: - -../ViennaCL/viennacl/ocl/forwards.h: - -../ViennaCL/viennacl/ocl/handle.hpp: - -../ViennaCL/viennacl/ocl/error.hpp: - -../ViennaCL/viennacl/ocl/kernel.hpp: - -../ViennaCL/viennacl/ocl/program.hpp: - -../ViennaCL/viennacl/tools/shared_ptr.hpp: - -../ViennaCL/viennacl/ocl/device.hpp: - -../ViennaCL/viennacl/ocl/device_utils.hpp: - -../ViennaCL/viennacl/forwards.h: - -../ViennaCL/viennacl/meta/enable_if.hpp: - -../ViennaCL/viennacl/version.hpp: - -../ViennaCL/viennacl/ocl/local_mem.hpp: - -../ViennaCL/viennacl/ocl/platform.hpp: - -../ViennaCL/viennacl/ocl/command_queue.hpp: - -../ViennaCL/viennacl/tools/sha1.hpp: - -../ViennaCL/viennacl/ocl/backend.hpp: - -../ViennaCL/viennacl/ocl/enqueue.hpp: - -../ViennaCL/viennacl/backend/opencl.hpp: - -../ViennaCL/viennacl/vector.hpp: - -../ViennaCL/viennacl/detail/vector_def.hpp: - -../ViennaCL/viennacl/tools/entry_proxy.hpp: - -../ViennaCL/viennacl/scalar.hpp: - -../ViennaCL/viennacl/backend/memory.hpp: - -../ViennaCL/viennacl/backend/mem_handle.hpp: - -../ViennaCL/viennacl/backend/cpu_ram.hpp: - -../ViennaCL/viennacl/context.hpp: - -../ViennaCL/viennacl/traits/handle.hpp: - -../ViennaCL/viennacl/traits/context.hpp: - -../ViennaCL/viennacl/backend/util.hpp: - -../ViennaCL/viennacl/meta/result_of.hpp: - -../ViennaCL/viennacl/linalg/scalar_operations.hpp: - -../ViennaCL/viennacl/tools/tools.hpp: - -../ViennaCL/viennacl/tools/adapter.hpp: - -../ViennaCL/viennacl/meta/predicate.hpp: - -../ViennaCL/viennacl/traits/size.hpp: - -../ViennaCL/viennacl/traits/start.hpp: - -../ViennaCL/viennacl/traits/stride.hpp: - -../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: - -../ViennaCL/viennacl/ocl/utils.hpp: - -../ViennaCL/viennacl/linalg/opencl/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: - -../ViennaCL/viennacl/scheduler/preset.hpp: - -../ViennaCL/viennacl/device_specific/forwards.h: - -../ViennaCL/viennacl/scheduler/io.hpp: - -../ViennaCL/viennacl/scheduler/forwards.h: - -../ViennaCL/viennacl/device_specific/execution_handler.hpp: - -../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: - -../ViennaCL/viennacl/device_specific/templates/template_base.hpp: - -../ViennaCL/viennacl/device_specific/mapped_objects.hpp: - -../ViennaCL/viennacl/device_specific/utils.hpp: - -../ViennaCL/viennacl/detail/matrix_def.hpp: - -../ViennaCL/viennacl/traits/row_major.hpp: - -../ViennaCL/viennacl/device_specific/tree_parsing.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: - -../ViennaCL/viennacl/matrix_proxy.hpp: - -../ViennaCL/viennacl/range.hpp: - -../ViennaCL/viennacl/slice.hpp: - -../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/utils.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: - -../ViennaCL/viennacl/linalg/detail/op_executor.hpp: - -../ViennaCL/viennacl/linalg/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/detail/op_applier.hpp: - -../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: - -../ViennaCL/viennacl/vector_proxy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: - -.build_release/src/caffe/proto/caffe.pb.h: - -include/caffe/syncedmem.hpp: - -include/caffe/util/math_functions.hpp: - -include/caffe/util/mkl_alternate.hpp: - -include/caffe/greentea/greentea_math_functions.hpp: - -include/caffe/data_transformer.hpp: - -include/caffe/filler.hpp: - -include/caffe/internal_thread.hpp: - -include/caffe/layer.hpp: - -include/caffe/layer_factory.hpp: - -include/caffe/util/db.hpp: diff --git a/bnll_layer.d b/bnll_layer.d deleted file mode 100644 index ba7ff34e30e..00000000000 --- a/bnll_layer.d +++ /dev/null @@ -1,339 +0,0 @@ -bnll_layer.o: src/caffe/layers/bnll_layer.cu include/caffe/layer.hpp \ - include/caffe/blob.hpp include/caffe/common.hpp \ - include/caffe/util/device_alternate.hpp \ - include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ - /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ - ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ - ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ - ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ - ../ViennaCL/viennacl/tools/shared_ptr.hpp \ - ../ViennaCL/viennacl/ocl/device.hpp \ - ../ViennaCL/viennacl/ocl/device_utils.hpp \ - ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ - ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ - ../ViennaCL/viennacl/ocl/platform.hpp \ - ../ViennaCL/viennacl/ocl/command_queue.hpp \ - ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ - ../ViennaCL/viennacl/ocl/enqueue.hpp \ - ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ - ../ViennaCL/viennacl/detail/vector_def.hpp \ - ../ViennaCL/viennacl/tools/entry_proxy.hpp \ - ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ - ../ViennaCL/viennacl/backend/mem_handle.hpp \ - ../ViennaCL/viennacl/backend/cpu_ram.hpp \ - ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ - ../ViennaCL/viennacl/traits/context.hpp \ - ../ViennaCL/viennacl/backend/util.hpp \ - ../ViennaCL/viennacl/meta/result_of.hpp \ - ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ - ../ViennaCL/viennacl/tools/tools.hpp \ - ../ViennaCL/viennacl/tools/adapter.hpp \ - ../ViennaCL/viennacl/meta/predicate.hpp \ - ../ViennaCL/viennacl/traits/size.hpp \ - ../ViennaCL/viennacl/traits/start.hpp \ - ../ViennaCL/viennacl/traits/stride.hpp \ - ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ - ../ViennaCL/viennacl/ocl/utils.hpp \ - ../ViennaCL/viennacl/linalg/opencl/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ - ../ViennaCL/viennacl/scheduler/preset.hpp \ - ../ViennaCL/viennacl/device_specific/forwards.h \ - ../ViennaCL/viennacl/scheduler/io.hpp \ - ../ViennaCL/viennacl/scheduler/forwards.h \ - ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ - ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ - ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ - ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ - ../ViennaCL/viennacl/device_specific/utils.hpp \ - ../ViennaCL/viennacl/detail/matrix_def.hpp \ - ../ViennaCL/viennacl/traits/row_major.hpp \ - ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ - ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ - ../ViennaCL/viennacl/slice.hpp \ - ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ - ../ViennaCL/viennacl/linalg/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ - ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ - ../ViennaCL/viennacl/vector_proxy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ - .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ - include/caffe/util/math_functions.hpp \ - include/caffe/util/mkl_alternate.hpp \ - include/caffe/greentea/greentea_math_functions.hpp \ - include/caffe/layer_factory.hpp include/caffe/vision_layers.hpp \ - include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ - include/caffe/data_transformer.hpp include/caffe/filler.hpp \ - include/caffe/internal_thread.hpp include/caffe/util/db.hpp \ - include/caffe/loss_layers.hpp include/caffe/neuron_layers.hpp \ - include/caffe/greentea/greentea_im2col.hpp - -include/caffe/layer.hpp: - -include/caffe/blob.hpp: - -include/caffe/common.hpp: - -include/caffe/util/device_alternate.hpp: - -include/caffe/greentea/greentea.hpp: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: - -../ViennaCL/viennacl/ocl/context.hpp: - -../ViennaCL/viennacl/ocl/forwards.h: - -../ViennaCL/viennacl/ocl/handle.hpp: - -../ViennaCL/viennacl/ocl/error.hpp: - -../ViennaCL/viennacl/ocl/kernel.hpp: - -../ViennaCL/viennacl/ocl/program.hpp: - -../ViennaCL/viennacl/tools/shared_ptr.hpp: - -../ViennaCL/viennacl/ocl/device.hpp: - -../ViennaCL/viennacl/ocl/device_utils.hpp: - -../ViennaCL/viennacl/forwards.h: - -../ViennaCL/viennacl/meta/enable_if.hpp: - -../ViennaCL/viennacl/version.hpp: - -../ViennaCL/viennacl/ocl/local_mem.hpp: - -../ViennaCL/viennacl/ocl/platform.hpp: - -../ViennaCL/viennacl/ocl/command_queue.hpp: - -../ViennaCL/viennacl/tools/sha1.hpp: - -../ViennaCL/viennacl/ocl/backend.hpp: - -../ViennaCL/viennacl/ocl/enqueue.hpp: - -../ViennaCL/viennacl/backend/opencl.hpp: - -../ViennaCL/viennacl/vector.hpp: - -../ViennaCL/viennacl/detail/vector_def.hpp: - -../ViennaCL/viennacl/tools/entry_proxy.hpp: - -../ViennaCL/viennacl/scalar.hpp: - -../ViennaCL/viennacl/backend/memory.hpp: - -../ViennaCL/viennacl/backend/mem_handle.hpp: - -../ViennaCL/viennacl/backend/cpu_ram.hpp: - -../ViennaCL/viennacl/context.hpp: - -../ViennaCL/viennacl/traits/handle.hpp: - -../ViennaCL/viennacl/traits/context.hpp: - -../ViennaCL/viennacl/backend/util.hpp: - -../ViennaCL/viennacl/meta/result_of.hpp: - -../ViennaCL/viennacl/linalg/scalar_operations.hpp: - -../ViennaCL/viennacl/tools/tools.hpp: - -../ViennaCL/viennacl/tools/adapter.hpp: - -../ViennaCL/viennacl/meta/predicate.hpp: - -../ViennaCL/viennacl/traits/size.hpp: - -../ViennaCL/viennacl/traits/start.hpp: - -../ViennaCL/viennacl/traits/stride.hpp: - -../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: - -../ViennaCL/viennacl/ocl/utils.hpp: - -../ViennaCL/viennacl/linalg/opencl/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: - -../ViennaCL/viennacl/scheduler/preset.hpp: - -../ViennaCL/viennacl/device_specific/forwards.h: - -../ViennaCL/viennacl/scheduler/io.hpp: - -../ViennaCL/viennacl/scheduler/forwards.h: - -../ViennaCL/viennacl/device_specific/execution_handler.hpp: - -../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: - -../ViennaCL/viennacl/device_specific/templates/template_base.hpp: - -../ViennaCL/viennacl/device_specific/mapped_objects.hpp: - -../ViennaCL/viennacl/device_specific/utils.hpp: - -../ViennaCL/viennacl/detail/matrix_def.hpp: - -../ViennaCL/viennacl/traits/row_major.hpp: - -../ViennaCL/viennacl/device_specific/tree_parsing.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: - -../ViennaCL/viennacl/matrix_proxy.hpp: - -../ViennaCL/viennacl/range.hpp: - -../ViennaCL/viennacl/slice.hpp: - -../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/utils.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: - -../ViennaCL/viennacl/linalg/detail/op_executor.hpp: - -../ViennaCL/viennacl/linalg/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/detail/op_applier.hpp: - -../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: - -../ViennaCL/viennacl/vector_proxy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: - -.build_release/src/caffe/proto/caffe.pb.h: - -include/caffe/syncedmem.hpp: - -include/caffe/util/math_functions.hpp: - -include/caffe/util/mkl_alternate.hpp: - -include/caffe/greentea/greentea_math_functions.hpp: - -include/caffe/layer_factory.hpp: - -include/caffe/vision_layers.hpp: - -include/caffe/common_layers.hpp: - -include/caffe/data_layers.hpp: - -include/caffe/data_transformer.hpp: - -include/caffe/filler.hpp: - -include/caffe/internal_thread.hpp: - -include/caffe/util/db.hpp: - -include/caffe/loss_layers.hpp: - -include/caffe/neuron_layers.hpp: - -include/caffe/greentea/greentea_im2col.hpp: diff --git a/concat_layer.d b/concat_layer.d deleted file mode 100644 index 7ff4d21c0aa..00000000000 --- a/concat_layer.d +++ /dev/null @@ -1,339 +0,0 @@ -concat_layer.o: src/caffe/layers/concat_layer.cu include/caffe/layer.hpp \ - include/caffe/blob.hpp include/caffe/common.hpp \ - include/caffe/util/device_alternate.hpp \ - include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ - /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ - ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ - ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ - ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ - ../ViennaCL/viennacl/tools/shared_ptr.hpp \ - ../ViennaCL/viennacl/ocl/device.hpp \ - ../ViennaCL/viennacl/ocl/device_utils.hpp \ - ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ - ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ - ../ViennaCL/viennacl/ocl/platform.hpp \ - ../ViennaCL/viennacl/ocl/command_queue.hpp \ - ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ - ../ViennaCL/viennacl/ocl/enqueue.hpp \ - ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ - ../ViennaCL/viennacl/detail/vector_def.hpp \ - ../ViennaCL/viennacl/tools/entry_proxy.hpp \ - ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ - ../ViennaCL/viennacl/backend/mem_handle.hpp \ - ../ViennaCL/viennacl/backend/cpu_ram.hpp \ - ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ - ../ViennaCL/viennacl/traits/context.hpp \ - ../ViennaCL/viennacl/backend/util.hpp \ - ../ViennaCL/viennacl/meta/result_of.hpp \ - ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ - ../ViennaCL/viennacl/tools/tools.hpp \ - ../ViennaCL/viennacl/tools/adapter.hpp \ - ../ViennaCL/viennacl/meta/predicate.hpp \ - ../ViennaCL/viennacl/traits/size.hpp \ - ../ViennaCL/viennacl/traits/start.hpp \ - ../ViennaCL/viennacl/traits/stride.hpp \ - ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ - ../ViennaCL/viennacl/ocl/utils.hpp \ - ../ViennaCL/viennacl/linalg/opencl/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ - ../ViennaCL/viennacl/scheduler/preset.hpp \ - ../ViennaCL/viennacl/device_specific/forwards.h \ - ../ViennaCL/viennacl/scheduler/io.hpp \ - ../ViennaCL/viennacl/scheduler/forwards.h \ - ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ - ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ - ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ - ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ - ../ViennaCL/viennacl/device_specific/utils.hpp \ - ../ViennaCL/viennacl/detail/matrix_def.hpp \ - ../ViennaCL/viennacl/traits/row_major.hpp \ - ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ - ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ - ../ViennaCL/viennacl/slice.hpp \ - ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ - ../ViennaCL/viennacl/linalg/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ - ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ - ../ViennaCL/viennacl/vector_proxy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ - .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ - include/caffe/util/math_functions.hpp \ - include/caffe/util/mkl_alternate.hpp \ - include/caffe/greentea/greentea_math_functions.hpp \ - include/caffe/layer_factory.hpp include/caffe/vision_layers.hpp \ - include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ - include/caffe/data_transformer.hpp include/caffe/filler.hpp \ - include/caffe/internal_thread.hpp include/caffe/util/db.hpp \ - include/caffe/loss_layers.hpp include/caffe/neuron_layers.hpp \ - include/caffe/greentea/greentea_im2col.hpp - -include/caffe/layer.hpp: - -include/caffe/blob.hpp: - -include/caffe/common.hpp: - -include/caffe/util/device_alternate.hpp: - -include/caffe/greentea/greentea.hpp: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: - -../ViennaCL/viennacl/ocl/context.hpp: - -../ViennaCL/viennacl/ocl/forwards.h: - -../ViennaCL/viennacl/ocl/handle.hpp: - -../ViennaCL/viennacl/ocl/error.hpp: - -../ViennaCL/viennacl/ocl/kernel.hpp: - -../ViennaCL/viennacl/ocl/program.hpp: - -../ViennaCL/viennacl/tools/shared_ptr.hpp: - -../ViennaCL/viennacl/ocl/device.hpp: - -../ViennaCL/viennacl/ocl/device_utils.hpp: - -../ViennaCL/viennacl/forwards.h: - -../ViennaCL/viennacl/meta/enable_if.hpp: - -../ViennaCL/viennacl/version.hpp: - -../ViennaCL/viennacl/ocl/local_mem.hpp: - -../ViennaCL/viennacl/ocl/platform.hpp: - -../ViennaCL/viennacl/ocl/command_queue.hpp: - -../ViennaCL/viennacl/tools/sha1.hpp: - -../ViennaCL/viennacl/ocl/backend.hpp: - -../ViennaCL/viennacl/ocl/enqueue.hpp: - -../ViennaCL/viennacl/backend/opencl.hpp: - -../ViennaCL/viennacl/vector.hpp: - -../ViennaCL/viennacl/detail/vector_def.hpp: - -../ViennaCL/viennacl/tools/entry_proxy.hpp: - -../ViennaCL/viennacl/scalar.hpp: - -../ViennaCL/viennacl/backend/memory.hpp: - -../ViennaCL/viennacl/backend/mem_handle.hpp: - -../ViennaCL/viennacl/backend/cpu_ram.hpp: - -../ViennaCL/viennacl/context.hpp: - -../ViennaCL/viennacl/traits/handle.hpp: - -../ViennaCL/viennacl/traits/context.hpp: - -../ViennaCL/viennacl/backend/util.hpp: - -../ViennaCL/viennacl/meta/result_of.hpp: - -../ViennaCL/viennacl/linalg/scalar_operations.hpp: - -../ViennaCL/viennacl/tools/tools.hpp: - -../ViennaCL/viennacl/tools/adapter.hpp: - -../ViennaCL/viennacl/meta/predicate.hpp: - -../ViennaCL/viennacl/traits/size.hpp: - -../ViennaCL/viennacl/traits/start.hpp: - -../ViennaCL/viennacl/traits/stride.hpp: - -../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: - -../ViennaCL/viennacl/ocl/utils.hpp: - -../ViennaCL/viennacl/linalg/opencl/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: - -../ViennaCL/viennacl/scheduler/preset.hpp: - -../ViennaCL/viennacl/device_specific/forwards.h: - -../ViennaCL/viennacl/scheduler/io.hpp: - -../ViennaCL/viennacl/scheduler/forwards.h: - -../ViennaCL/viennacl/device_specific/execution_handler.hpp: - -../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: - -../ViennaCL/viennacl/device_specific/templates/template_base.hpp: - -../ViennaCL/viennacl/device_specific/mapped_objects.hpp: - -../ViennaCL/viennacl/device_specific/utils.hpp: - -../ViennaCL/viennacl/detail/matrix_def.hpp: - -../ViennaCL/viennacl/traits/row_major.hpp: - -../ViennaCL/viennacl/device_specific/tree_parsing.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: - -../ViennaCL/viennacl/matrix_proxy.hpp: - -../ViennaCL/viennacl/range.hpp: - -../ViennaCL/viennacl/slice.hpp: - -../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/utils.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: - -../ViennaCL/viennacl/linalg/detail/op_executor.hpp: - -../ViennaCL/viennacl/linalg/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/detail/op_applier.hpp: - -../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: - -../ViennaCL/viennacl/vector_proxy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: - -.build_release/src/caffe/proto/caffe.pb.h: - -include/caffe/syncedmem.hpp: - -include/caffe/util/math_functions.hpp: - -include/caffe/util/mkl_alternate.hpp: - -include/caffe/greentea/greentea_math_functions.hpp: - -include/caffe/layer_factory.hpp: - -include/caffe/vision_layers.hpp: - -include/caffe/common_layers.hpp: - -include/caffe/data_layers.hpp: - -include/caffe/data_transformer.hpp: - -include/caffe/filler.hpp: - -include/caffe/internal_thread.hpp: - -include/caffe/util/db.hpp: - -include/caffe/loss_layers.hpp: - -include/caffe/neuron_layers.hpp: - -include/caffe/greentea/greentea_im2col.hpp: diff --git a/contrastive_loss_layer.d b/contrastive_loss_layer.d deleted file mode 100644 index 1ba2b5d6a41..00000000000 --- a/contrastive_loss_layer.d +++ /dev/null @@ -1,342 +0,0 @@ -contrastive_loss_layer.o: src/caffe/layers/contrastive_loss_layer.cu \ - include/caffe/layer.hpp include/caffe/blob.hpp include/caffe/common.hpp \ - include/caffe/util/device_alternate.hpp \ - include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ - /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ - ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ - ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ - ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ - ../ViennaCL/viennacl/tools/shared_ptr.hpp \ - ../ViennaCL/viennacl/ocl/device.hpp \ - ../ViennaCL/viennacl/ocl/device_utils.hpp \ - ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ - ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ - ../ViennaCL/viennacl/ocl/platform.hpp \ - ../ViennaCL/viennacl/ocl/command_queue.hpp \ - ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ - ../ViennaCL/viennacl/ocl/enqueue.hpp \ - ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ - ../ViennaCL/viennacl/detail/vector_def.hpp \ - ../ViennaCL/viennacl/tools/entry_proxy.hpp \ - ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ - ../ViennaCL/viennacl/backend/mem_handle.hpp \ - ../ViennaCL/viennacl/backend/cpu_ram.hpp \ - ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ - ../ViennaCL/viennacl/traits/context.hpp \ - ../ViennaCL/viennacl/backend/util.hpp \ - ../ViennaCL/viennacl/meta/result_of.hpp \ - ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ - ../ViennaCL/viennacl/tools/tools.hpp \ - ../ViennaCL/viennacl/tools/adapter.hpp \ - ../ViennaCL/viennacl/meta/predicate.hpp \ - ../ViennaCL/viennacl/traits/size.hpp \ - ../ViennaCL/viennacl/traits/start.hpp \ - ../ViennaCL/viennacl/traits/stride.hpp \ - ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ - ../ViennaCL/viennacl/ocl/utils.hpp \ - ../ViennaCL/viennacl/linalg/opencl/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ - ../ViennaCL/viennacl/scheduler/preset.hpp \ - ../ViennaCL/viennacl/device_specific/forwards.h \ - ../ViennaCL/viennacl/scheduler/io.hpp \ - ../ViennaCL/viennacl/scheduler/forwards.h \ - ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ - ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ - ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ - ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ - ../ViennaCL/viennacl/device_specific/utils.hpp \ - ../ViennaCL/viennacl/detail/matrix_def.hpp \ - ../ViennaCL/viennacl/traits/row_major.hpp \ - ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ - ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ - ../ViennaCL/viennacl/slice.hpp \ - ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ - ../ViennaCL/viennacl/linalg/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ - ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ - ../ViennaCL/viennacl/vector_proxy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ - .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ - include/caffe/util/math_functions.hpp \ - include/caffe/util/mkl_alternate.hpp \ - include/caffe/greentea/greentea_math_functions.hpp \ - include/caffe/layer_factory.hpp include/caffe/util/io.hpp \ - include/caffe/vision_layers.hpp include/caffe/common_layers.hpp \ - include/caffe/data_layers.hpp include/caffe/data_transformer.hpp \ - include/caffe/filler.hpp include/caffe/internal_thread.hpp \ - include/caffe/util/db.hpp include/caffe/loss_layers.hpp \ - include/caffe/neuron_layers.hpp \ - include/caffe/greentea/greentea_im2col.hpp - -include/caffe/layer.hpp: - -include/caffe/blob.hpp: - -include/caffe/common.hpp: - -include/caffe/util/device_alternate.hpp: - -include/caffe/greentea/greentea.hpp: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: - -../ViennaCL/viennacl/ocl/context.hpp: - -../ViennaCL/viennacl/ocl/forwards.h: - -../ViennaCL/viennacl/ocl/handle.hpp: - -../ViennaCL/viennacl/ocl/error.hpp: - -../ViennaCL/viennacl/ocl/kernel.hpp: - -../ViennaCL/viennacl/ocl/program.hpp: - -../ViennaCL/viennacl/tools/shared_ptr.hpp: - -../ViennaCL/viennacl/ocl/device.hpp: - -../ViennaCL/viennacl/ocl/device_utils.hpp: - -../ViennaCL/viennacl/forwards.h: - -../ViennaCL/viennacl/meta/enable_if.hpp: - -../ViennaCL/viennacl/version.hpp: - -../ViennaCL/viennacl/ocl/local_mem.hpp: - -../ViennaCL/viennacl/ocl/platform.hpp: - -../ViennaCL/viennacl/ocl/command_queue.hpp: - -../ViennaCL/viennacl/tools/sha1.hpp: - -../ViennaCL/viennacl/ocl/backend.hpp: - -../ViennaCL/viennacl/ocl/enqueue.hpp: - -../ViennaCL/viennacl/backend/opencl.hpp: - -../ViennaCL/viennacl/vector.hpp: - -../ViennaCL/viennacl/detail/vector_def.hpp: - -../ViennaCL/viennacl/tools/entry_proxy.hpp: - -../ViennaCL/viennacl/scalar.hpp: - -../ViennaCL/viennacl/backend/memory.hpp: - -../ViennaCL/viennacl/backend/mem_handle.hpp: - -../ViennaCL/viennacl/backend/cpu_ram.hpp: - -../ViennaCL/viennacl/context.hpp: - -../ViennaCL/viennacl/traits/handle.hpp: - -../ViennaCL/viennacl/traits/context.hpp: - -../ViennaCL/viennacl/backend/util.hpp: - -../ViennaCL/viennacl/meta/result_of.hpp: - -../ViennaCL/viennacl/linalg/scalar_operations.hpp: - -../ViennaCL/viennacl/tools/tools.hpp: - -../ViennaCL/viennacl/tools/adapter.hpp: - -../ViennaCL/viennacl/meta/predicate.hpp: - -../ViennaCL/viennacl/traits/size.hpp: - -../ViennaCL/viennacl/traits/start.hpp: - -../ViennaCL/viennacl/traits/stride.hpp: - -../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: - -../ViennaCL/viennacl/ocl/utils.hpp: - -../ViennaCL/viennacl/linalg/opencl/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: - -../ViennaCL/viennacl/scheduler/preset.hpp: - -../ViennaCL/viennacl/device_specific/forwards.h: - -../ViennaCL/viennacl/scheduler/io.hpp: - -../ViennaCL/viennacl/scheduler/forwards.h: - -../ViennaCL/viennacl/device_specific/execution_handler.hpp: - -../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: - -../ViennaCL/viennacl/device_specific/templates/template_base.hpp: - -../ViennaCL/viennacl/device_specific/mapped_objects.hpp: - -../ViennaCL/viennacl/device_specific/utils.hpp: - -../ViennaCL/viennacl/detail/matrix_def.hpp: - -../ViennaCL/viennacl/traits/row_major.hpp: - -../ViennaCL/viennacl/device_specific/tree_parsing.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: - -../ViennaCL/viennacl/matrix_proxy.hpp: - -../ViennaCL/viennacl/range.hpp: - -../ViennaCL/viennacl/slice.hpp: - -../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/utils.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: - -../ViennaCL/viennacl/linalg/detail/op_executor.hpp: - -../ViennaCL/viennacl/linalg/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/detail/op_applier.hpp: - -../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: - -../ViennaCL/viennacl/vector_proxy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: - -.build_release/src/caffe/proto/caffe.pb.h: - -include/caffe/syncedmem.hpp: - -include/caffe/util/math_functions.hpp: - -include/caffe/util/mkl_alternate.hpp: - -include/caffe/greentea/greentea_math_functions.hpp: - -include/caffe/layer_factory.hpp: - -include/caffe/util/io.hpp: - -include/caffe/vision_layers.hpp: - -include/caffe/common_layers.hpp: - -include/caffe/data_layers.hpp: - -include/caffe/data_transformer.hpp: - -include/caffe/filler.hpp: - -include/caffe/internal_thread.hpp: - -include/caffe/util/db.hpp: - -include/caffe/loss_layers.hpp: - -include/caffe/neuron_layers.hpp: - -include/caffe/greentea/greentea_im2col.hpp: diff --git a/conv_layer.d b/conv_layer.d deleted file mode 100644 index 58b346180c1..00000000000 --- a/conv_layer.d +++ /dev/null @@ -1,342 +0,0 @@ -conv_layer.o: src/caffe/layers/conv_layer.cu include/caffe/filler.hpp \ - include/caffe/blob.hpp include/caffe/common.hpp \ - include/caffe/util/device_alternate.hpp \ - include/caffe/greentea/greentea.hpp /opt/AMDAPPSDK-2.9-1/include/CL/cl.h \ - /opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h \ - ../ViennaCL/viennacl/ocl/context.hpp ../ViennaCL/viennacl/ocl/forwards.h \ - ../ViennaCL/viennacl/ocl/handle.hpp ../ViennaCL/viennacl/ocl/error.hpp \ - ../ViennaCL/viennacl/ocl/kernel.hpp ../ViennaCL/viennacl/ocl/program.hpp \ - ../ViennaCL/viennacl/tools/shared_ptr.hpp \ - ../ViennaCL/viennacl/ocl/device.hpp \ - ../ViennaCL/viennacl/ocl/device_utils.hpp \ - ../ViennaCL/viennacl/forwards.h ../ViennaCL/viennacl/meta/enable_if.hpp \ - ../ViennaCL/viennacl/version.hpp ../ViennaCL/viennacl/ocl/local_mem.hpp \ - ../ViennaCL/viennacl/ocl/platform.hpp \ - ../ViennaCL/viennacl/ocl/command_queue.hpp \ - ../ViennaCL/viennacl/tools/sha1.hpp ../ViennaCL/viennacl/ocl/backend.hpp \ - ../ViennaCL/viennacl/ocl/enqueue.hpp \ - ../ViennaCL/viennacl/backend/opencl.hpp ../ViennaCL/viennacl/vector.hpp \ - ../ViennaCL/viennacl/detail/vector_def.hpp \ - ../ViennaCL/viennacl/tools/entry_proxy.hpp \ - ../ViennaCL/viennacl/scalar.hpp ../ViennaCL/viennacl/backend/memory.hpp \ - ../ViennaCL/viennacl/backend/mem_handle.hpp \ - ../ViennaCL/viennacl/backend/cpu_ram.hpp \ - ../ViennaCL/viennacl/context.hpp ../ViennaCL/viennacl/traits/handle.hpp \ - ../ViennaCL/viennacl/traits/context.hpp \ - ../ViennaCL/viennacl/backend/util.hpp \ - ../ViennaCL/viennacl/meta/result_of.hpp \ - ../ViennaCL/viennacl/linalg/scalar_operations.hpp \ - ../ViennaCL/viennacl/tools/tools.hpp \ - ../ViennaCL/viennacl/tools/adapter.hpp \ - ../ViennaCL/viennacl/meta/predicate.hpp \ - ../ViennaCL/viennacl/traits/size.hpp \ - ../ViennaCL/viennacl/traits/start.hpp \ - ../ViennaCL/viennacl/traits/stride.hpp \ - ../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp \ - ../ViennaCL/viennacl/ocl/utils.hpp \ - ../ViennaCL/viennacl/linalg/opencl/common.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp \ - ../ViennaCL/viennacl/scheduler/preset.hpp \ - ../ViennaCL/viennacl/device_specific/forwards.h \ - ../ViennaCL/viennacl/scheduler/io.hpp \ - ../ViennaCL/viennacl/scheduler/forwards.h \ - ../ViennaCL/viennacl/device_specific/execution_handler.hpp \ - ../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp \ - ../ViennaCL/viennacl/device_specific/templates/template_base.hpp \ - ../ViennaCL/viennacl/device_specific/mapped_objects.hpp \ - ../ViennaCL/viennacl/device_specific/utils.hpp \ - ../ViennaCL/viennacl/detail/matrix_def.hpp \ - ../ViennaCL/viennacl/traits/row_major.hpp \ - ../ViennaCL/viennacl/device_specific/tree_parsing.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp \ - ../ViennaCL/viennacl/matrix_proxy.hpp ../ViennaCL/viennacl/range.hpp \ - ../ViennaCL/viennacl/slice.hpp \ - ../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/utils.hpp \ - ../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp \ - ../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/common.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_executor.hpp \ - ../ViennaCL/viennacl/linalg/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/detail/op_applier.hpp \ - ../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp \ - ../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp \ - ../ViennaCL/viennacl/vector_proxy.hpp \ - ../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp \ - .build_release/src/caffe/proto/caffe.pb.h include/caffe/syncedmem.hpp \ - include/caffe/util/math_functions.hpp \ - include/caffe/util/mkl_alternate.hpp \ - include/caffe/greentea/greentea_math_functions.hpp \ - include/caffe/layer.hpp include/caffe/layer_factory.hpp \ - include/caffe/util/im2col.hpp include/caffe/vision_layers.hpp \ - include/caffe/common_layers.hpp include/caffe/data_layers.hpp \ - include/caffe/data_transformer.hpp include/caffe/internal_thread.hpp \ - include/caffe/util/db.hpp include/caffe/loss_layers.hpp \ - include/caffe/neuron_layers.hpp \ - include/caffe/greentea/greentea_im2col.hpp - -include/caffe/filler.hpp: - -include/caffe/blob.hpp: - -include/caffe/common.hpp: - -include/caffe/util/device_alternate.hpp: - -include/caffe/greentea/greentea.hpp: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl.h: - -/opt/AMDAPPSDK-2.9-1/include/CL/cl_platform.h: - -../ViennaCL/viennacl/ocl/context.hpp: - -../ViennaCL/viennacl/ocl/forwards.h: - -../ViennaCL/viennacl/ocl/handle.hpp: - -../ViennaCL/viennacl/ocl/error.hpp: - -../ViennaCL/viennacl/ocl/kernel.hpp: - -../ViennaCL/viennacl/ocl/program.hpp: - -../ViennaCL/viennacl/tools/shared_ptr.hpp: - -../ViennaCL/viennacl/ocl/device.hpp: - -../ViennaCL/viennacl/ocl/device_utils.hpp: - -../ViennaCL/viennacl/forwards.h: - -../ViennaCL/viennacl/meta/enable_if.hpp: - -../ViennaCL/viennacl/version.hpp: - -../ViennaCL/viennacl/ocl/local_mem.hpp: - -../ViennaCL/viennacl/ocl/platform.hpp: - -../ViennaCL/viennacl/ocl/command_queue.hpp: - -../ViennaCL/viennacl/tools/sha1.hpp: - -../ViennaCL/viennacl/ocl/backend.hpp: - -../ViennaCL/viennacl/ocl/enqueue.hpp: - -../ViennaCL/viennacl/backend/opencl.hpp: - -../ViennaCL/viennacl/vector.hpp: - -../ViennaCL/viennacl/detail/vector_def.hpp: - -../ViennaCL/viennacl/tools/entry_proxy.hpp: - -../ViennaCL/viennacl/scalar.hpp: - -../ViennaCL/viennacl/backend/memory.hpp: - -../ViennaCL/viennacl/backend/mem_handle.hpp: - -../ViennaCL/viennacl/backend/cpu_ram.hpp: - -../ViennaCL/viennacl/context.hpp: - -../ViennaCL/viennacl/traits/handle.hpp: - -../ViennaCL/viennacl/traits/context.hpp: - -../ViennaCL/viennacl/backend/util.hpp: - -../ViennaCL/viennacl/meta/result_of.hpp: - -../ViennaCL/viennacl/linalg/scalar_operations.hpp: - -../ViennaCL/viennacl/tools/tools.hpp: - -../ViennaCL/viennacl/tools/adapter.hpp: - -../ViennaCL/viennacl/meta/predicate.hpp: - -../ViennaCL/viennacl/traits/size.hpp: - -../ViennaCL/viennacl/traits/start.hpp: - -../ViennaCL/viennacl/traits/stride.hpp: - -../ViennaCL/viennacl/linalg/host_based/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/scalar_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/scalar.hpp: - -../ViennaCL/viennacl/ocl/utils.hpp: - -../ViennaCL/viennacl/linalg/opencl/common.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/matrix.hpp: - -../ViennaCL/viennacl/scheduler/preset.hpp: - -../ViennaCL/viennacl/device_specific/forwards.h: - -../ViennaCL/viennacl/scheduler/io.hpp: - -../ViennaCL/viennacl/scheduler/forwards.h: - -../ViennaCL/viennacl/device_specific/execution_handler.hpp: - -../ViennaCL/viennacl/device_specific/lazy_program_compiler.hpp: - -../ViennaCL/viennacl/device_specific/templates/template_base.hpp: - -../ViennaCL/viennacl/device_specific/mapped_objects.hpp: - -../ViennaCL/viennacl/device_specific/utils.hpp: - -../ViennaCL/viennacl/detail/matrix_def.hpp: - -../ViennaCL/viennacl/traits/row_major.hpp: - -../ViennaCL/viennacl/device_specific/tree_parsing.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/vector_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/barts.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_product_template.hpp: - -../ViennaCL/viennacl/matrix_proxy.hpp: - -../ViennaCL/viennacl/range.hpp: - -../ViennaCL/viennacl/slice.hpp: - -../ViennaCL/viennacl/device_specific/templates/row_wise_reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/utils.hpp: - -../ViennaCL/viennacl/device_specific/templates/matrix_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/reduction_template.hpp: - -../ViennaCL/viennacl/device_specific/templates/vector_axpy_template.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/common.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/tesla_c2050.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_470.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/maxwell/geforce_gtx_750_ti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/scrapper.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/tesla/geforce_gtx_260.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/southern_islands/tahiti.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/northern_islands/devastator.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/kepler/tesla_k20m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gtx_580.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/volcanic_islands/hawaii.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cypress.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/amd/evergreen/cedar.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/nvidia/fermi/geforce_gt_540m.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/accelerator/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/cpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/devices/gpu/fallback.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_axpy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/row_wise_reduction.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/matrix_product.hpp: - -../ViennaCL/viennacl/linalg/detail/op_executor.hpp: - -../ViennaCL/viennacl/linalg/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/host_based/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/detail/op_applier.hpp: - -../ViennaCL/viennacl/linalg/opencl/vector_operations.hpp: - -../ViennaCL/viennacl/linalg/opencl/kernels/vector.hpp: - -../ViennaCL/viennacl/vector_proxy.hpp: - -../ViennaCL/viennacl/device_specific/builtin_database/reduction.hpp: - -.build_release/src/caffe/proto/caffe.pb.h: - -include/caffe/syncedmem.hpp: - -include/caffe/util/math_functions.hpp: - -include/caffe/util/mkl_alternate.hpp: - -include/caffe/greentea/greentea_math_functions.hpp: - -include/caffe/layer.hpp: - -include/caffe/layer_factory.hpp: - -include/caffe/util/im2col.hpp: - -include/caffe/vision_layers.hpp: - -include/caffe/common_layers.hpp: - -include/caffe/data_layers.hpp: - -include/caffe/data_transformer.hpp: - -include/caffe/internal_thread.hpp: - -include/caffe/util/db.hpp: - -include/caffe/loss_layers.hpp: - -include/caffe/neuron_layers.hpp: - -include/caffe/greentea/greentea_im2col.hpp: From 513fade875795eadfbf5260a18da237d655ca6ed Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 17 Jun 2015 01:00:36 +0200 Subject: [PATCH 057/254] Fixed CUDA-only build. Now both backends can be properly enabled and disabled at run- and compile-time. All test cases get passed by AMD OpenCL, Intel OpenCL, nVidia OpenCL and CUDA. --- src/caffe/test/test_caffe_main.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index 1de107ffaa2..84b0ca6603c 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -4,6 +4,20 @@ #include "caffe/caffe.hpp" #include "caffe/test/test_caffe_main.hpp" +namespace caffe { +#ifndef CPU_ONLY +#ifdef USE_CUDA +cudaDeviceProp CAFFE_TEST_CUDA_PROP; +#endif // USE_CUDA +#endif +} + +#ifndef CPU_ONLY +#ifdef USE_CUDA +using caffe::CAFFE_TEST_CUDA_PROP; +#endif // USE_CUDA +#endif + using caffe::Caffe; int main(int argc, char** argv) { From 6328b53cdfedd5ccba868b255fdbccc657ba2c56 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 17 Jun 2015 17:20:47 +0200 Subject: [PATCH 058/254] Makefile, Cmake and Travis script update. --- CMakeLists.txt | 2 +- Makefile.config.example | 4 ++-- scripts/travis/travis_install.sh | 18 +++++++++++++----- src/caffe/layers/cudnn_conv_layer.cu | 2 +- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8852ee606e3..ab1188f581f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,7 +15,7 @@ include(cmake/Summary.cmake) include(cmake/ConfigGen.cmake) # ---[ Options -caffe_option(CPU_ONLY "Build Caffe wihtout CUDA and OpenCL support" OFF) # TODO: rename to USE_CUDA +caffe_option(CPU_ONLY "Build Caffe wihtout CUDA and OpenCL support" OFF) caffe_option(USE_CUDA "Build Caffe with CUDA support" ON) caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" ON) caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" ON IF USE_CUDA AND NOT CPU_ONLY) diff --git a/Makefile.config.example b/Makefile.config.example index ce2df4fceb7..c61de273681 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -7,7 +7,7 @@ USE_CUDA := 1 # Enable the OpenCL/Greentea backend -USE_GREENTEA := 1 +USE_GREENTEA := 0 # Folder of the ViennaCL header-only library VIENNACL_DIR = ../ViennaCL @@ -17,7 +17,7 @@ VIENNACL_DIR = ../ViennaCL # When both are disabled, GPUs won't work. CPUs always use CBLAS (Atlas, MKL or OpenBLAS). # The chosen BLAS library needs to be compiled and installed from source. # CLBLAS should be faster, especially on AMD cards. -USE_CLBLAS := 1 +USE_CLBLAS := 0 USE_VIENNACLBLAS := 0 # Enable or disable double precision support for OpenCL/Greentea diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index b6e6f6ce821..352929a7384 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -9,17 +9,25 @@ MAKE="make --jobs=$NUM_THREADS" # This ppa is for gflags and glog add-apt-repository -y ppa:tuleu/precise-backports +# This ppa is for boost 1.54 +add-apt-repository -y ppa:boost-latest/ppa +# This ppa is for g++ 4.8 +add-apt repository -y ppa:ubuntu-toolchain-r/test + apt-get -y update apt-get install \ - wget git curl \ + g++-4.8 wget git curl \ python-dev python-numpy \ libleveldb-dev libsnappy-dev libopencv-dev \ - libboost-dev libboost-system-dev libboost-python-dev libboost-thread-dev \ + libboost-dev libboost-system-dev libboost-python-dev libboost-thread-dev libboost-python1.54-dev \ libprotobuf-dev protobuf-compiler \ libatlas-dev libatlas-base-dev \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ bc +update-alternatives +--install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 + # Add a special apt-repository to install CMake 2.8.9 for CMake Caffe build, # if needed. By default, Aptitude in Ubuntu 12.04 installs CMake 2.8.7, but # Caffe requires a minimum CMake version of 2.8.8. @@ -31,7 +39,7 @@ fi # Install CUDA, if needed if $WITH_CUDA; then - CUDA_URL=http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1204/x86_64/cuda-repo-ubuntu1204_6.5-14_amd64.deb + CUDA_URL=http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1204/x86_64/cuda-repo-ubuntu1204_7.0-28_amd64.deb CUDA_FILE=/tmp/cuda_install.deb curl $CUDA_URL -o $CUDA_FILE dpkg -i $CUDA_FILE @@ -39,11 +47,11 @@ if $WITH_CUDA; then apt-get -y update # Install the minimal CUDA subpackages required to test Caffe build. # For a full CUDA installation, add 'cuda' to the list of packages. - apt-get -y install cuda-core-6-5 cuda-cublas-6-5 cuda-cublas-dev-6-5 cuda-cudart-6-5 cuda-cudart-dev-6-5 cuda-curand-6-5 cuda-curand-dev-6-5 + apt-get -y install cuda-core-7-0 cuda-cublas-7-0 cuda-cublas-dev-7-0 cuda-cudart-7-0 cuda-cudart-dev-7-0 cuda-curand-7-0 cuda-curand-dev-7-0 # Create CUDA symlink at /usr/local/cuda # (This would normally be created by the CUDA installer, but we create it # manually since we did a partial installation.) - ln -s /usr/local/cuda-6.5 /usr/local/cuda + ln -s /usr/local/cuda-7.0 /usr/local/cuda fi # Install LMDB diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index f583063be70..dc9affaed28 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -150,7 +150,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) - sync_conv_groupsCUDA_KERNEL(1, 1)(); + sync_conv_groups CUDA_KERNEL(1, 1)(); } } From c70039be86a8fac478e01503957fc468b3e2a986 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 17 Jun 2015 18:41:01 +0200 Subject: [PATCH 059/254] Travis fix. --- scripts/travis/travis_install.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index 352929a7384..b01a2531304 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -12,7 +12,7 @@ add-apt-repository -y ppa:tuleu/precise-backports # This ppa is for boost 1.54 add-apt-repository -y ppa:boost-latest/ppa # This ppa is for g++ 4.8 -add-apt repository -y ppa:ubuntu-toolchain-r/test +add-apt-repository -y ppa:ubuntu-toolchain-r/test apt-get -y update apt-get install \ @@ -25,8 +25,7 @@ apt-get install \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ bc -update-alternatives ---install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 +update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 # Add a special apt-repository to install CMake 2.8.9 for CMake Caffe build, # if needed. By default, Aptitude in Ubuntu 12.04 installs CMake 2.8.7, but From 1f1801bbe3f1702a5d387489014e80694a47202f Mon Sep 17 00:00:00 2001 From: Jim Date: Thu, 18 Jun 2015 07:23:46 -0500 Subject: [PATCH 060/254] fix caffe time command. --- include/caffe/common.hpp | 2 ++ src/caffe/common.cpp | 13 +++++++++++++ tools/caffe.cpp | 2 ++ 3 files changed, 17 insertions(+) diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index de247ab4968..9ff3d211e3c 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -162,6 +162,8 @@ class Caffe { static void EnumerateDevices(); // Prepares contexts for devices to use static void SetDevices(std::vector device_ids); + // Finish executing gpu kernels on the specified-device. + static void Synchronize(int device_id); // Get a device context static DeviceContext& GetDeviceContext(int id); diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 7baf96c6ecb..1feb67edf2c 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -150,6 +150,19 @@ void Caffe::set_random_seed(const unsigned int seed) { Get().random_generator_.reset(new RNG(seed)); } +void Caffe::Synchronize(int device_id) { +#ifdef USE_GREENTEA + DeviceContext& device_context = Caffe::GetDeviceContext(device_id); + if ( device_context.backend() == BACKEND_OpenCL ) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + GetDeviceContext(device_id).id()); + ctx.get_queue().finish(); + } +#else + (void) device_id; +#endif +} + void Caffe::EnumerateDevices() { int cuda_device_count = 0; int greentea_device_count = 0; diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 0b7523fccf9..4a58d79e242 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -253,6 +253,7 @@ int time() { for (int i = 0; i < layers.size(); ++i) { timer.Start(); layers[i]->Forward(bottom_vecs[i], top_vecs[i]); + Caffe::Synchronize(FLAGS_gpu); forward_time_per_layer[i] += timer.MicroSeconds(); } forward_time += forward_timer.MicroSeconds(); @@ -261,6 +262,7 @@ int time() { timer.Start(); layers[i]->Backward(top_vecs[i], bottom_need_backward[i], bottom_vecs[i]); + Caffe::Synchronize(FLAGS_gpu); backward_time_per_layer[i] += timer.MicroSeconds(); } backward_time += backward_timer.MicroSeconds(); From 0d06b4e60ea234f6552f2f9086cf91ae491110f7 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 18 Jun 2015 16:38:24 +0200 Subject: [PATCH 061/254] Travis fix, should finally be correct. --- scripts/travis/travis_install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index b01a2531304..76ef89c850d 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -19,7 +19,7 @@ apt-get install \ g++-4.8 wget git curl \ python-dev python-numpy \ libleveldb-dev libsnappy-dev libopencv-dev \ - libboost-dev libboost-system-dev libboost-python-dev libboost-thread-dev libboost-python1.54-dev \ + libboost-dev libboost-system-dev libboost-python-dev libboost-thread-dev \ libprotobuf-dev protobuf-compiler \ libatlas-dev libatlas-base-dev \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ From 7208da643b7facfc8ceaa1387a437fb44d5ebec2 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 18 Jun 2015 17:15:30 +0200 Subject: [PATCH 062/254] Fixed travis boost version, for C++11 incompability fix. --- scripts/travis/travis_install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index 76ef89c850d..fd0295e386d 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -19,7 +19,7 @@ apt-get install \ g++-4.8 wget git curl \ python-dev python-numpy \ libleveldb-dev libsnappy-dev libopencv-dev \ - libboost-dev libboost-system-dev libboost-python-dev libboost-thread-dev \ + libboost1.54-dev libboost-system1.54-dev libboost-python1.54-dev libboost-thread1.54-dev \ libprotobuf-dev protobuf-compiler \ libatlas-dev libatlas-base-dev \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ From 97030c3fc9faa0ef32ebf628146541f6ca54a26f Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 18 Jun 2015 17:30:22 +0200 Subject: [PATCH 063/254] Adapted makefile to work with Travis CI. --- Makefile.config.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.config.example b/Makefile.config.example index c61de273681..e439eb042a2 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -52,7 +52,7 @@ CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ # atlas for ATLAS (default) # mkl for MKL # open for OpenBlas -BLAS := open +BLAS := atlas # Custom (MKL/ATLAS/OpenBLAS) include and lib directories. # Leave commented to accept the defaults for your choice of BLAS # (which should work)! From f505a3a82d93de1aa133ecfe851f21409b56a82d Mon Sep 17 00:00:00 2001 From: Jim Date: Thu, 18 Jun 2015 10:49:27 -0500 Subject: [PATCH 064/254] caffe device_query enumerates devices. --- tools/caffe.cpp | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 4a58d79e242..3c20517c11c 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -67,12 +67,17 @@ static BrewFunction GetBrewFunction(const caffe::string& name) { // To add a command, define a function "int command()" and register it with // RegisterBrewFunction(action); -// Device Query: show diagnostic information for a GPU device. +// Device Query: show diagnostic information for a GPU device, or +// enumerate all devices if none is specified. int device_query() { - CHECK_GT(FLAGS_gpu, -1) << "Need a device ID to query."; - LOG(INFO) << "Querying device ID = " << FLAGS_gpu; - caffe::Caffe::SetDevice(FLAGS_gpu); - caffe::Caffe::DeviceQuery(); + if ( FLAGS_gpu < 0 ) { + //CHECK_GT(FLAGS_gpu, -1) << "Need a device ID to query."; + Caffe::EnumerateDevices(); + } else { + LOG(INFO) << "Querying device ID = " << FLAGS_gpu; + caffe::Caffe::SetDevice(FLAGS_gpu); + caffe::Caffe::DeviceQuery(); + } return 0; } RegisterBrewFunction(device_query); From a30ab907419108285a8dc096bdbf34f03d436a69 Mon Sep 17 00:00:00 2001 From: Jim Date: Thu, 18 Jun 2015 10:52:39 -0500 Subject: [PATCH 065/254] Fix comment. --- tools/caffe.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 3c20517c11c..aef7b5bbd74 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -71,7 +71,7 @@ static BrewFunction GetBrewFunction(const caffe::string& name) { // enumerate all devices if none is specified. int device_query() { if ( FLAGS_gpu < 0 ) { - //CHECK_GT(FLAGS_gpu, -1) << "Need a device ID to query."; + // If no gpu is specified, enumerate all the devices. Caffe::EnumerateDevices(); } else { LOG(INFO) << "Querying device ID = " << FLAGS_gpu; From 48061322afa18cbf1cb48034a3c22175e21ab0ef Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 18 Jun 2015 23:13:27 +0200 Subject: [PATCH 066/254] Fixed MergeCrop layer for U-Net, fixed compile with CUDNN. --- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/mergecrop.cl | 15 +- src/caffe/greentea/greentea.cpp | 17 -- src/caffe/layers/cudnn_conv_layer.cu | 2 +- src/caffe/layers/mergecrop_layer.cu | 12 +- src/caffe/test/test_mergecrop_layer.cpp | 213 +++++++++++++++++++++ 6 files changed, 231 insertions(+), 32 deletions(-) create mode 100644 src/caffe/test/test_mergecrop_layer.cpp diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 1293784e31b..a90ca2cdf0c 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -20,7 +20,7 @@ std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; +std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; @@ -39,7 +39,7 @@ std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h)\n * width_a + w + (h * 2 + 1) * pad_w);\n top[index] = bottom_b[bidx];\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / (channels_a * channels_b * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * channels_a * channels_b * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; +std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; diff --git a/src/caffe/greentea/cl_kernels/mergecrop.cl b/src/caffe/greentea/cl_kernels/mergecrop.cl index 572266e7b9a..cbd48c624ed 100644 --- a/src/caffe/greentea/cl_kernels/mergecrop.cl +++ b/src/caffe/greentea/cl_kernels/mergecrop.cl @@ -8,15 +8,17 @@ __kernel void TEMPLATE(merge_copy_forward, Dtype)( __global Dtype* top, int num, int channels_a, int channels_b, int height_a, int width_a, int height_b, int width_b) { + for (int index = get_global_id(0); index < nthreads; index += get_global_size(0)) { + int pad_h = (height_b - height_a) / 2; int pad_w = (width_b - width_a) / 2; - int batch_id = index / (channels_a * channels_b * height_a * width_a); + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); int bottom_id = ((index - - batch_id * channels_a * channels_b * height_a * width_a) + - batch_id * (channels_a + channels_b) * height_a * width_a) / (channels_a * height_a * width_a)) % 2; int h = ((index / width_a) % height_a); @@ -30,11 +32,12 @@ __kernel void TEMPLATE(merge_copy_forward, Dtype)( } else { int channel_id = (index / ((width_a * height_a)) % channels_b); int bidx = - ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h) - * width_a + w + (h * 2 + 1) * pad_w); + (((batch_id) * channels_b + channel_id) * height_b + * width_b) + width_b * (h + pad_h) + pad_w + w; top[index] = bottom_b[bidx]; } } + } __kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, @@ -46,10 +49,10 @@ __global Dtype* bottom_a, int width_b) { for (int index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int batch_id = index / (channels_a * channels_b * height_a * width_a); + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); int bottom_id = ((index - - batch_id * channels_a * channels_b * height_a * width_a) + - batch_id * (channels_a + channels_b) * height_a * width_a) / (channels_a * height_a * width_a)) % 2; int h = ((index / width_a) % height_a); diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index 08baa0f962f..3a9112db71c 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -10,23 +10,6 @@ namespace caffe { #ifdef USE_GREENTEA -/*template -cl_mem Subregion(cl_mem in, size_t off, size_t size) { - cl_buffer_region* region = new cl_buffer_region(); - region->origin = sizeof(Dtype) * off; - region->size = sizeof(Dtype) * size; - cl_int status; - const cl_mem out = clCreateSubBuffer(in, CL_MEM_READ_WRITE, - CL_BUFFER_CREATE_TYPE_REGION, - region, &status); - std::cout << "SUBREGION: " << status << std::endl; - return out; -} - -template cl_mem Subregion(cl_mem in, size_t off, size_t size); -template cl_mem Subregion(cl_mem in, size_t off, size_t size); -template cl_mem Subregion(cl_mem in, size_t off, size_t size); -template cl_mem Subregion(cl_mem in, size_t off, size_t size);*/ viennacl::ocl::handle WrapHandle(cl_mem in, viennacl::ocl::context &ctx) { diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index dc9affaed28..581febc4546 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -89,7 +89,7 @@ void CuDNNConvolutionLayer::Forward_gpu( // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) - sync_conv_groupsCUDA_KERNEL(1, 1)(); + sync_conv_groups CUDA_KERNEL(1, 1)(); } } diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index c36ef21952c..6710be9e978 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -24,10 +24,10 @@ __global__ void CopyForward(const int nthreads, const Dtype* bottom_a, int pad_h = (height_b - height_a) / 2; int pad_w = (width_b - width_a) / 2; - int batch_id = index / (channels_a * channels_b * height_a * width_a); + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); int bottom_id = ((index - - batch_id * channels_a * channels_b * height_a * width_a) + - batch_id * (channels_a + channels_b) * height_a * width_a) / (channels_a * height_a * width_a)) % 2; int h = ((index / width_a) % height_a); @@ -41,8 +41,8 @@ __global__ void CopyForward(const int nthreads, const Dtype* bottom_a, } else { int channel_id = (index / ((width_a * height_a)) % channels_b); int bidx = - ((((batch_id) * channels_b + channel_id) * height_a + h + pad_h) - * width_a + w + (h * 2 + 1) * pad_w); + (((batch_id) * channels_b + channel_id) * height_b + * width_b) + width_b * (h + pad_h) + pad_w + w; top[index] = bottom_b[bidx]; } } @@ -58,10 +58,10 @@ __global__ void CopyBackward(const int nthreads, Dtype* bottom_a, CUDA_KERNEL_LOOP(index, nthreads) { - int batch_id = index / (channels_a * channels_b * height_a * width_a); + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); int bottom_id = ((index - - batch_id * channels_a * channels_b * height_a * width_a) + - batch_id * (channels_a + channels_b) * height_a * width_a) / (channels_a * height_a * width_a)) % 2; int h = ((index / width_a) % height_a); diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp new file mode 100644 index 00000000000..fcb7d385e1a --- /dev/null +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -0,0 +1,213 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class MergeCropLayerTest : public GPUDeviceTest { + typedef TypeParam Dtype; + + protected: + MergeCropLayerTest() + : blob_bottom_a_(new Blob()), + blob_bottom_b_(new Blob()), + blob_top_(new Blob()) { + } + + virtual void SetUp() { + blob_bottom_a_->Reshape(2, 3, 4, 2); + blob_bottom_b_->Reshape(2, 3, 6, 4); + // fill the values + blob_bottom_vec_.push_back(blob_bottom_a_); + blob_bottom_vec_.push_back(blob_bottom_b_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~MergeCropLayerTest() { + delete blob_bottom_a_; + delete blob_bottom_b_; + delete blob_top_; + } + + void TestForward() { + int a_h = blob_bottom_a_->height(); + int a_w = blob_bottom_a_->width(); + int a_c = blob_bottom_a_->channels(); + + for (int n = 0; n < blob_bottom_a_->num(); ++n) { + for (int c = 0; c < a_c; ++c) { + for (int h = 0; h < a_h; ++h) { + for (int w = 0; w < a_w; ++w) { + blob_bottom_a_->mutable_cpu_data()[w + h * a_w + c * a_h * a_w + + n * a_h * a_w * a_c] = (w + h * 10 + c * 100 + n * 1000 + + 10000); + } + } + } + } + + int b_h = blob_bottom_b_->height(); + int b_w = blob_bottom_b_->width(); + int b_c = blob_bottom_b_->channels(); + + for (int n = 0; n < blob_bottom_b_->num(); ++n) { + for (int c = 0; c < b_c; ++c) { + for (int h = 0; h < b_h; ++h) { + for (int w = 0; w < b_w; ++w) { + blob_bottom_b_->mutable_cpu_data()[w + h * b_w + c * b_h * b_w + + n * b_h * b_w * b_c] = -(w + h * 10 + c * 100 + n * 1000 + + 10000); + } + } + } + } + + LayerParameter layer_param; + MergeCropLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); + EXPECT_EQ( + this->blob_top_->channels(), + this->blob_bottom_a_->channels() + this->blob_bottom_b_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_a_->height()); + EXPECT_EQ(this->blob_top_->width(), 2); + + layer.Forward(blob_bottom_vec_, blob_top_vec_); + + // Test copy from A + int offset = 0; + for (int n = 0; n < blob_bottom_a_->num(); ++n) { + for (int c = 0; c < a_c; ++c) { + for (int h = 0; h < a_h; ++h) { + for (int w = 0; w < a_w; ++w) { + EXPECT_EQ( + (w + h * 10 + c * 100 + n * 1000 + 10000), + blob_top_->cpu_data()[offset + w + h * a_w + c * a_h * a_w]); + } + } + } + offset += a_h * a_w * (a_c + b_c); + } + + // Test copy from B + offset = a_h * a_w * a_c; + for (int n = 0; n < blob_bottom_a_->num(); ++n) { + for (int c = 0; c < b_c; ++c) { + for (int h = 0; h < b_h; ++h) { + for (int w = 0; w < b_w; ++w) { + if (h >= (b_h - a_h) / 2 && h < a_h && w >= (b_w - a_w) / 2 + && w < a_w) { + EXPECT_EQ( + -(w + h * 10 + c * 100 + n * 1000 + 10000), + blob_top_->mutable_cpu_data()[offset + (w - (b_h - a_h) / 2) + + (h - (b_h - a_h) / 2) * a_w + c * a_h * a_w]); + } + } + } + } + offset += a_h * a_w * (a_c + b_c); + } + } + + void TestBackward() { + int a_h = blob_bottom_a_->height(); + int a_w = blob_bottom_a_->width(); + int a_c = blob_bottom_a_->channels(); + + for (int n = 0; n < blob_bottom_a_->num(); ++n) { + for (int c = 0; c < a_c; ++c) { + for (int h = 0; h < a_h; ++h) { + for (int w = 0; w < a_w; ++w) { + blob_bottom_a_->mutable_cpu_data()[w + h * a_w + c * a_h * a_w + + n * a_h * a_w * a_c] = (w + h * 10 + c * 100 + n * 1000 + + 10000); + } + } + } + } + + int b_h = blob_bottom_b_->height(); + int b_w = blob_bottom_b_->width(); + int b_c = blob_bottom_b_->channels(); + + for (int n = 0; n < blob_bottom_b_->num(); ++n) { + for (int c = 0; c < b_c; ++c) { + for (int h = 0; h < b_h; ++h) { + for (int w = 0; w < b_w; ++w) { + blob_bottom_b_->mutable_cpu_data()[w + h * b_w + c * b_h * b_w + + n * b_h * b_w * b_c] = -(w + h * 10 + c * 100 + n * 1000 + + 10000); + } + } + } + } + + LayerParameter layer_param; + MergeCropLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + + layer.Forward(blob_bottom_vec_, blob_top_vec_); + caffe_cpu_copy(blob_top_->count(), blob_top_->cpu_data(), + blob_top_->mutable_cpu_diff()); + + vector propagate_down(blob_bottom_vec_.size(), true); + layer.Backward(blob_top_vec_, propagate_down, blob_bottom_vec_); + + + // Test copy to A + for (int n = 0; n < blob_bottom_a_->num(); ++n) { + for (int c = 0; c < a_c; ++c) { + for (int h = 0; h < a_h; ++h) { + for (int w = 0; w < a_w; ++w) { + EXPECT_EQ((w + h * 10 + c * 100 + n * 1000 + 10000), + blob_bottom_a_->cpu_diff()[w + h * a_w + c * a_h * a_w + + n * a_h * a_w * a_c]); + } + } + } + } + } + + Blob* const blob_bottom_a_; + Blob* const blob_bottom_b_; + Blob* const blob_top_; + + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(MergeCropLayerTest, TestDtypes); + +TYPED_TEST(MergeCropLayerTest, TestSetup){ + typedef TypeParam Dtype; + LayerParameter layer_param; + MergeCropLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_a_->channels() + this->blob_bottom_b_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_a_->height()); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(MergeCropLayerTest, TestForward){ + this->TestForward(); +} + +TYPED_TEST(MergeCropLayerTest, TestBackward){ + this->TestBackward(); +} + +} + // namespace caffe From e2903ebe1ee43880d476e5e8cc408205d8bb2b14 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 20 Jun 2015 02:37:51 +0200 Subject: [PATCH 067/254] Cleanup and OpenCL CUDNN compability fix. --- .../greentea/cl_kernels/convolution_sk.cl | 244 ------------------ src/caffe/layer_factory.cpp | 12 +- 2 files changed, 6 insertions(+), 250 deletions(-) delete mode 100644 src/caffe/greentea/cl_kernels/convolution_sk.cl diff --git a/src/caffe/greentea/cl_kernels/convolution_sk.cl b/src/caffe/greentea/cl_kernels/convolution_sk.cl deleted file mode 100644 index 434af92647d..00000000000 --- a/src/caffe/greentea/cl_kernels/convolution_sk.cl +++ /dev/null @@ -1,244 +0,0 @@ -#ifndef __OPENCL_VERSION__ -#include "header.cl" -#endif - -/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w, - __global const Dtype *in, - __global Dtype *out) { - - const int width = 200; - const int height = 200; - const int kernel_h = 10; - const int kernel_w = 10; - const int fout_count = 1024; - const int fin_count = 192; - const int kstride_h = 8; - const int kstride_w = 8; - const int stride_h = 1; - const int stride_w = 1; - const int batch_size = 1; - const int buff_w = 73; - const int buff_h = 73; - - const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; - const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; - - const int out_h = (height - ext_kernel_h) / stride_h + 1; - const int out_w = (width - ext_kernel_w) / stride_w + 1; - - // Clear the output - { -#pragma unroll 1 - for (int i = - get_global_id( - 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1); - i < fout_count * out_h * out_w; - i += get_global_size(0) * get_global_size(1) * get_global_size(2)) { - out[i] = 0.0; - } - } - - // Local weight buffer (in local memory) - __local Dtype wl[10 * 10]; - // Local input buffer (in local memory) - __local Dtype il[73 * 73]; - // Local accumulators (in registers) - Dtype al[2 * 2]; - - // Across output features -#pragma unroll 1 - for (int fout = get_global_id(2); fout < fout_count; - fout += get_global_size(2)) { - - // Across input features -#pragma unroll 1 - for (int fin = 0; fin < fin_count; ++fin) { - - // Load local weights -#pragma unroll 1 - for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { -#pragma unroll 1 - for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { - wl[j + i * kernel_w] = w[j + i * kernel_w - + fout * fin_count * kernel_h * kernel_w - + fin * kernel_h * kernel_w]; - } - } - - // Across batches (keeps weights in local memory) -#pragma unroll 1 - for (int batch = 0; batch < batch_size; ++batch) { - - const int batch_in_off = batch * width * height * fin_count; - const int batch_out_off = batch * out_w * out_h * fout_count; - - // Shift the patch window across width and height - for (int yoff = 0; yoff < height; yoff += buff_h) { - for (int xoff = 0; xoff < width; xoff += buff_w) { - - // Load image patch -#pragma unroll 1 - for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) { - int yidx = (i + yoff); -#pragma unroll 1 - for (int j = get_local_id(0); j < buff_w; - j += get_local_size(0)) { - int xidx = (j + xoff); - if (xidx < width && yidx < height) { - il[j + i * buff_w] = in[xidx + yidx * width - + fin * width * height + batch_in_off]; - } - } - } - - barrier(CLK_LOCAL_MEM_FENCE); - - // Kernel inner loop -#pragma unroll 1 - for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) { -#pragma unroll 1 - for (int j = get_local_id(0); j < buff_w; - j += get_local_size(0)) { - - // Load accumulators -#pragma unroll 1 - for (int k = 0; k < 4; k++) { - int xidx = (j + xoff - k % 2 * buff_w); - int yidx = (i + yoff - k / 2 * buff_h); - if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) { - al[k] = out[xidx + yidx * out_w + fout * out_w * out_h - + batch_out_off]; - } - } - -#pragma unroll 1 - for (int ki = 0; ki < kernel_h; ++ki) { - int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w; - int alpos_i = (i + ki * kstride_h) / buff_h * 2; -#pragma unroll 10 - for (int kj = 0; kj < kernel_w; ++kj) { - al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj - + ki * kernel_w] - * il[(j + kj * kstride_w) % buff_w + ilpos_i]; - } - } - - // Store accumulators -#pragma unroll 1 - for (int k = 0; k < 4; k++) { - int xidx = (j + xoff - k % 2 * buff_w); - int yidx = (i + yoff - k / 2 * buff_h); - if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) { - out[xidx + yidx * out_w + fout * out_w * out_h - + batch_out_off] = al[k]; - } - } - } - }barrier(CLK_LOCAL_MEM_FENCE); - } - } - } - } - } -}*/ - -// Fits into 32 KB -__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w, - __global const Dtype *in, - __global Dtype *out) { - const int width = 200; - const int height = 200; - const int kernel_h = 10; - const int kernel_w = 10; - const int fout_count = 1024; - const int fin_count = 192; - const int kstride_h = 8; - const int kstride_w = 8; - const int stride_h = 1; - const int stride_w = 1; - const int batch_size = 1; - - const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; - const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; - - const int out_h = (height - ext_kernel_h) / stride_h + 1; - const int out_w = (width - ext_kernel_w) / stride_w + 1; - - // Clear the output - { -#pragma unroll 1 - for (int i = - get_global_id( - 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1); - i < fout_count * out_h * out_w; - i += get_global_size(0) * get_global_size(1) * get_global_size(2)) { - out[i] = 0.0; - } - } - - // Local weight buffer - __local Dtype wl[10 * 10]; - - // Across output features -#pragma unroll 1 - for (int fout = get_global_id(2); fout < fout_count; - fout += get_global_size(2)) { - - // Across input features -#pragma unroll 1 - for (int fin = 0; fin < fin_count; ++fin) { - - // Load local weights -#pragma unroll 1 - for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) { -#pragma unroll 1 - for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) { - wl[j + i * kernel_w] = w[j + i * kernel_w - + fout * fin_count * kernel_h * kernel_w - + fin * kernel_h * kernel_w]; - } - } - - barrier(CLK_LOCAL_MEM_FENCE); - - // Across batches (keeps weights in local memory) -#pragma unroll 1 - for (int batch = 0; batch < batch_size; ++batch) { - - const int batch_in_off = batch * width * height * fin_count; - const int batch_out_off = batch * out_w * out_h * fout_count; - - // Across y-dimension -#pragma unroll 1 - for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1; - yoff += get_global_size(1)) { - - // Across x-dimension -#pragma unroll 1 - for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1; - xoff += get_global_size(0)) { - - Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h - + batch_out_off]; - - // Across the kernel itself -#pragma unroll 10 - for (int i = 0; i < kernel_h; ++i) { -#pragma unroll 10 - for (int j = 0; j < kernel_w; ++j) { - outval = fma( - wl[j + i * kernel_w], - in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width - + fin * width * height + batch_in_off], - outval); - } - } - - out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] = - outval; - } - } - }barrier(CLK_LOCAL_MEM_FENCE); - } - } -} diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index afe9774f0c2..c8999653df4 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -31,7 +31,7 @@ shared_ptr > GetConvolutionLayer(const LayerParameter& param) { engine = ConvolutionParameter_Engine_CUDNN; #endif } - if (engine == ConvolutionParameter_Engine_CAFFE) { + if (engine == ConvolutionParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new ConvolutionLayer(param)); #ifdef USE_CUDNN } else if (engine == ConvolutionParameter_Engine_CUDNN) { @@ -54,7 +54,7 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { engine = PoolingParameter_Engine_CUDNN; #endif } - if (engine == PoolingParameter_Engine_CAFFE) { + if (engine == PoolingParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new PoolingLayer(param)); #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { @@ -84,7 +84,7 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { engine = ReLUParameter_Engine_CUDNN; #endif } - if (engine == ReLUParameter_Engine_CAFFE) { + if (engine == ReLUParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new ReLULayer(param)); #ifdef USE_CUDNN } else if (engine == ReLUParameter_Engine_CUDNN) { @@ -107,7 +107,7 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { engine = SigmoidParameter_Engine_CUDNN; #endif } - if (engine == SigmoidParameter_Engine_CAFFE) { + if (engine == SigmoidParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new SigmoidLayer(param)); #ifdef USE_CUDNN } else if (engine == SigmoidParameter_Engine_CUDNN) { @@ -130,7 +130,7 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { engine = SoftmaxParameter_Engine_CUDNN; #endif } - if (engine == SoftmaxParameter_Engine_CAFFE) { + if (engine == SoftmaxParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new SoftmaxLayer(param)); #ifdef USE_CUDNN } else if (engine == SoftmaxParameter_Engine_CUDNN) { @@ -153,7 +153,7 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { engine = TanHParameter_Engine_CUDNN; #endif } - if (engine == TanHParameter_Engine_CAFFE) { + if (engine == TanHParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new TanHLayer(param)); #ifdef USE_CUDNN } else if (engine == TanHParameter_Engine_CUDNN) { From 114c4b1b993123eba1cb72bc9544bb80b99bcf2f Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 21 Jun 2015 01:56:05 +0200 Subject: [PATCH 068/254] LINT fix. --- Greentea_Building_Blocks.txt | 4 +- include/caffe/blob.hpp | 15 +- include/caffe/caffe.hpp | 3 +- include/caffe/common.hpp | 13 +- include/caffe/data_transformer.hpp | 15 +- include/caffe/filler.hpp | 87 +++--- include/caffe/greentea/cl_kernels.hpp | 4 +- include/caffe/greentea/greentea.hpp | 16 +- include/caffe/greentea/greentea_im2col.hpp | 55 ++-- .../greentea/greentea_math_functions.hpp | 26 +- include/caffe/layer.hpp | 11 +- include/caffe/solver.hpp | 5 +- include/caffe/splitnet/splitnet.hpp | 29 -- include/caffe/syncedmem.hpp | 68 +++-- include/caffe/util/benchmark.hpp | 4 +- include/caffe/util/device_alternate.hpp | 8 +- include/caffe/util/im2col.hpp | 73 ++--- include/caffe/util/math_functions.hpp | 146 +++++---- include/caffe/vision_layers.hpp | 19 +- src/caffe/blob.cpp | 38 ++- src/caffe/common.cpp | 108 ++++--- src/caffe/data_transformer.cpp | 14 +- src/caffe/greentea/cl_kernels.cpp | 173 ++++++----- src/caffe/greentea/cl_kernels.sh | 35 +-- src/caffe/greentea/greentea.cpp | 21 +- src/caffe/greentea/greentea_im2col.cpp | 84 ++--- .../greentea/greentea_math_functions.cpp | 193 ++++++------ src/caffe/layer_factory.cpp | 18 +- src/caffe/layers/absval_layer.cu | 25 +- src/caffe/layers/base_conv_layer.cpp | 60 ++-- src/caffe/layers/base_data_layer.cpp | 3 +- src/caffe/layers/base_data_layer.cu | 15 +- src/caffe/layers/bnll_layer.cu | 40 ++- src/caffe/layers/concat_layer.cu | 28 +- src/caffe/layers/contrastive_loss_layer.cu | 59 ++-- src/caffe/layers/conv_layer.cu | 2 +- src/caffe/layers/conv_sk_layer.cpp | 12 +- src/caffe/layers/conv_sk_layer.cu | 11 +- src/caffe/layers/deconv_layer.cpp | 6 - src/caffe/layers/deconv_layer.cu | 21 +- src/caffe/layers/dropout_layer.cu | 46 ++- src/caffe/layers/eltwise_layer.cu | 84 +++-- src/caffe/layers/euclidean_loss_layer.cu | 8 +- src/caffe/layers/exp_layer.cu | 8 +- src/caffe/layers/filter_layer.cu | 28 +- src/caffe/layers/hdf5_data_layer.cu | 5 +- src/caffe/layers/hdf5_output_layer.cu | 4 +- src/caffe/layers/im2col_layer.cu | 29 +- src/caffe/layers/inner_product_layer.cpp | 56 ++-- src/caffe/layers/inner_product_layer.cu | 8 +- src/caffe/layers/log_layer.cu | 17 +- src/caffe/layers/lrn_layer.cpp | 131 ++++---- src/caffe/layers/lrn_layer.cu | 66 ++-- src/caffe/layers/mergecrop_layer.cpp | 3 +- src/caffe/layers/mergecrop_layer.cu | 47 ++- src/caffe/layers/mvn_layer.cu | 66 ++-- src/caffe/layers/pooling_layer.cu | 112 +++---- src/caffe/layers/pooling_sk_layer.cu | 90 +++--- src/caffe/layers/power_layer.cu | 16 +- src/caffe/layers/prelu_layer.cpp | 12 +- src/caffe/layers/prelu_layer.cu | 56 ++-- src/caffe/layers/reduction_layer.cu | 25 +- src/caffe/layers/relu_layer.cu | 40 ++- .../sigmoid_cross_entropy_loss_layer.cu | 14 +- src/caffe/layers/sigmoid_layer.cu | 41 ++- src/caffe/layers/silence_layer.cu | 4 +- src/caffe/layers/slice_layer.cu | 21 +- src/caffe/layers/softmax_layer.cu | 69 ++--- src/caffe/layers/softmax_loss_layer.cu | 45 +-- src/caffe/layers/split_layer.cu | 6 +- src/caffe/layers/tanh_layer.cu | 41 ++- src/caffe/layers/threshold_layer.cu | 19 +- src/caffe/net.cpp | 33 +- src/caffe/solver.cpp | 82 ++--- src/caffe/syncedmem.cpp | 22 +- src/caffe/test/test_caffe_main.cpp | 6 +- src/caffe/test/test_common.cpp | 14 +- src/caffe/test/test_data_transformer.cpp | 24 +- src/caffe/test/test_im2col_kernel.cu | 7 +- src/caffe/test/test_math_functions.cpp | 51 ++-- src/caffe/test/test_mergecrop_layer.cpp | 24 +- src/caffe/test/test_neuron_layer.cpp | 2 +- src/caffe/test/test_platform.cpp | 4 +- .../test/test_random_number_generator.cpp | 37 ++- src/caffe/test/test_syncedmem.cpp | 39 +-- src/caffe/test/test_util_blas.cpp | 66 ++-- src/caffe/util/benchmark.cpp | 53 ++-- src/caffe/util/im2col.cu | 288 ++++++++++-------- src/caffe/util/math_functions.cpp | 214 ++++++------- src/caffe/util/math_functions.cu | 278 ++++++++--------- src/caffe/util/upgrade_proto.cpp | 9 +- 91 files changed, 2109 insertions(+), 1928 deletions(-) delete mode 100644 include/caffe/splitnet/splitnet.hpp diff --git a/Greentea_Building_Blocks.txt b/Greentea_Building_Blocks.txt index 6d632273211..22aed3de73e 100644 --- a/Greentea_Building_Blocks.txt +++ b/Greentea_Building_Blocks.txt @@ -13,10 +13,10 @@ viennacl::ocl::enqueue( if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA -#endif // USE_GREENTEA +#endif // USE_GREENTEA } #ifdef USE_GREENTEA diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index ebae1086e7a..54d8bc9de34 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -46,8 +46,10 @@ class Blob { device_context_(device_context) { } explicit Blob(const int num, const int channels, const int height, - const int width, DeviceContext device_context = Caffe::GetDefaultDeviceContext()); - explicit Blob(const vector& shape, DeviceContext device_context = Caffe::GetDefaultDeviceContext()); + const int width, DeviceContext device_context = + Caffe::GetDefaultDeviceContext()); + explicit Blob(const vector& shape, DeviceContext device_context = + Caffe::GetDefaultDeviceContext()); /** * @brief Change the dimensions of the blob, allocating new memory if @@ -139,7 +141,9 @@ class Blob { * Dies on out of range index. */ inline int CanonicalAxisIndex(int axis_index) const { - CHECK_GE(axis_index, -num_axes())<<"axis " << axis_index << " out of range for " << num_axes() + CHECK_GE(axis_index, -num_axes()) + <<"axis " << axis_index + << " out of range for " << num_axes() << "-D Blob with shape " << shape_string(); CHECK_LT(axis_index, num_axes()) << "axis " << axis_index << " out of range for " << num_axes() @@ -290,7 +294,7 @@ class Blob { */ DeviceContext device_context(); -protected: + protected: shared_ptr data_; shared_ptr diff_; vector shape_; @@ -302,7 +306,6 @@ class Blob { }; // class Blob -} - // namespace caffe +} // namespace caffe #endif // CAFFE_BLOB_HPP_ diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp index d7c7984697b..b0c9e98d33b 100644 --- a/include/caffe/caffe.hpp +++ b/include/caffe/caffe.hpp @@ -7,6 +7,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" +#include "caffe/greentea/greentea.hpp" #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/net.hpp" @@ -15,6 +16,6 @@ #include "caffe/util/benchmark.hpp" #include "caffe/util/io.hpp" #include "caffe/vision_layers.hpp" -#include "caffe/greentea/greentea.hpp" + #endif // CAFFE_CAFFE_HPP_ diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 9ff3d211e3c..75bc4b53c09 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include // NOLINT(readability/streams) @@ -15,7 +16,7 @@ #include #include // pair #include -#include + #include "caffe/util/device_alternate.hpp" @@ -136,8 +137,8 @@ class Caffe { inline static curandGenerator_t curand_generator() { return Get().curand_generator_; } -#endif // USE_CUDA -#endif // !CPU_ONLY +#endif // USE_CUDA +#endif // !CPU_ONLY // Returns the mode: running on CPU or GPU. inline static Brew mode() { return Get().mode_; } @@ -178,8 +179,8 @@ class Caffe { #ifdef USE_CUDA cublasHandle_t cublas_handle_; curandGenerator_t curand_generator_; -#endif // USE_CUDA -#endif // !CPU_ONLY +#endif // USE_CUDA +#endif // !CPU_ONLY shared_ptr random_generator_; Brew mode_; @@ -192,7 +193,7 @@ class Caffe { #ifdef USE_GREENTEA vector ocl_programs_; viennacl::ocl::program default_ocl_program_; -#endif // USE_GREENTEA +#endif // USE_GREENTEA private: // The private constructor to avoid duplicate instantiation. diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index bbb5a06aa16..ffaeb56a4d9 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -13,11 +13,13 @@ namespace caffe { * @brief Applies common transformations to the input data, such as * scaling, mirroring, substracting the image mean... */ -template +template class DataTransformer { public: - explicit DataTransformer(const TransformationParameter& param, Phase phase, DeviceContext device_context); - virtual ~DataTransformer() {} + explicit DataTransformer(const TransformationParameter& param, Phase phase, + DeviceContext device_context); + virtual ~DataTransformer() { + } /** * @brief Initialize the Random number generations if needed by the @@ -48,7 +50,7 @@ class DataTransformer { * set_cpu_data() is used. See memory_layer.cpp for an example. */ void Transform(const vector & datum_vector, - Blob* transformed_blob); + Blob* transformed_blob); /** * @brief Applies the transformation defined in the data layer's @@ -61,7 +63,7 @@ class DataTransformer { * set_cpu_data() is used. See memory_layer.cpp for an example. */ void Transform(const vector & mat_vector, - Blob* transformed_blob); + Blob* transformed_blob); /** * @brief Applies the transformation defined in the data layer's @@ -124,7 +126,7 @@ class DataTransformer { vector InferBlobShape(const cv::Mat& cv_img); protected: - /** + /** * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). * * @param n @@ -138,7 +140,6 @@ class DataTransformer { // Tranformation parameters TransformationParameter param_; - shared_ptr rng_; Phase phase_; Blob data_mean_; diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index 21c44c017c4..1968ace61c9 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -16,23 +16,27 @@ namespace caffe { /// @brief Fills a Blob with constant or randomly-generated data. -template +template class Filler { public: - explicit Filler(const FillerParameter& param) : filler_param_(param) {} - virtual ~Filler() {} + explicit Filler(const FillerParameter& param) + : filler_param_(param) { + } + virtual ~Filler() { + } virtual void Fill(Blob* blob) = 0; protected: FillerParameter filler_param_; -}; // class Filler - +}; +// class Filler /// @brief Fills a Blob with constant values @f$ x = 0 @f$. -template +template class ConstantFiller : public Filler { public: explicit ConstantFiller(const FillerParameter& param) - : Filler(param) {} + : Filler(param) { + } virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); const int count = blob->count(); @@ -42,36 +46,40 @@ class ConstantFiller : public Filler { data[i] = value; } CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; /// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$. -template +template class UniformFiller : public Filler { public: explicit UniformFiller(const FillerParameter& param) - : Filler(param) {} + : Filler(param) { + } virtual void Fill(Blob* blob) { CHECK(blob->count()); caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), - Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); + Dtype(this->filler_param_.max()), + blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; /// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$. -template +template class GaussianFiller : public Filler { public: explicit GaussianFiller(const FillerParameter& param) - : Filler(param) {} + : Filler(param) { + } virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); CHECK(blob->count()); caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), - Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); + Dtype(this->filler_param_.std()), + blob->mutable_cpu_data()); int sparse = this->filler_param_.sparse(); CHECK_GE(sparse, -1); if (sparse >= 0) { @@ -82,7 +90,9 @@ class GaussianFiller : public Filler { CHECK_GE(blob->num_axes(), 1); const int num_outputs = blob->shape(0); Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); - rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int), blob->device_context())); + rand_vec_.reset( + new SyncedMemory(blob->count() * sizeof(int), + blob->device_context())); int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); for (int i = 0; i < blob->count(); ++i) { @@ -98,11 +108,12 @@ class GaussianFiller : public Filler { /** @brief Fills a Blob with values @f$ x \in [0, 1] @f$ * such that @f$ \forall i \sum_j x_{ij} = 1 @f$. */ -template +template class PositiveUnitballFiller : public Filler { public: explicit PositiveUnitballFiller(const FillerParameter& param) - : Filler(param) {} + : Filler(param) { + } virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); DCHECK(blob->count()); @@ -121,7 +132,7 @@ class PositiveUnitballFiller : public Filler { } } CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; @@ -141,28 +152,29 @@ class PositiveUnitballFiller : public Filler { * * TODO(dox): make notation in above comment consistent with rest & use LaTeX. */ -template +template class XavierFiller : public Filler { public: explicit XavierFiller(const FillerParameter& param) - : Filler(param) {} + : Filler(param) { + } virtual void Fill(Blob* blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); int fan_out = blob->count() / blob->channels(); Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { + if (this->filler_param_.variance_norm() + == FillerParameter_VarianceNorm_AVERAGE) { n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { + } else if (this->filler_param_.variance_norm() + == FillerParameter_VarianceNorm_FAN_OUT) { n = fan_out; } Dtype scale = sqrt(Dtype(3) / n); caffe_rng_uniform(blob->count(), -scale, scale, - blob->mutable_cpu_data()); + blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; @@ -183,28 +195,29 @@ class XavierFiller : public Filler { * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this * is currently not the case for inner product layers. */ -template +template class MSRAFiller : public Filler { public: explicit MSRAFiller(const FillerParameter& param) - : Filler(param) {} + : Filler(param) { + } virtual void Fill(Blob* blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); int fan_out = blob->count() / blob->channels(); Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { + if (this->filler_param_.variance_norm() + == FillerParameter_VarianceNorm_AVERAGE) { n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { + } else if (this->filler_param_.variance_norm() + == FillerParameter_VarianceNorm_FAN_OUT) { n = fan_out; } Dtype std = sqrt(Dtype(2) / n); caffe_rng_gaussian(blob->count(), Dtype(0), std, - blob->mutable_cpu_data()); + blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; @@ -214,7 +227,7 @@ class MSRAFiller : public Filler { * Ideally this would be replaced by a factory pattern, but we will leave it * this way for now. */ -template +template Filler* GetFiller(const FillerParameter& param) { const std::string& type = param.type(); if (type == "constant") { @@ -232,7 +245,7 @@ Filler* GetFiller(const FillerParameter& param) { } else { CHECK(false) << "Unknown filler name: " << param.type(); } - return (Filler*)(NULL); + return (Filler*) (NULL); } } // namespace caffe diff --git a/include/caffe/greentea/cl_kernels.hpp b/include/caffe/greentea/cl_kernels.hpp index 02073744dc4..31d42c40d0d 100644 --- a/include/caffe/greentea/cl_kernels.hpp +++ b/include/caffe/greentea/cl_kernels.hpp @@ -4,12 +4,12 @@ #define GREENTEA_CL_KERNELS_HPP_ #include "caffe/greentea/greentea.hpp" #include "viennacl/backend/opencl.hpp" +#include "viennacl/ocl/backend.hpp" #include "viennacl/ocl/context.hpp" #include "viennacl/ocl/device.hpp" #include "viennacl/ocl/platform.hpp" -#include "viennacl/ocl/backend.hpp" namespace caffe { -viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx); +viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx); } #endif #endif diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index cc44a481409..4a00f9bae93 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -19,18 +19,19 @@ #endif #include "CL/cl.h" +#include "viennacl/backend/opencl.hpp" +#include "viennacl/ocl/backend.hpp" #include "viennacl/ocl/context.hpp" #include "viennacl/ocl/device.hpp" #include "viennacl/ocl/platform.hpp" -#include "viennacl/ocl/backend.hpp" -#include "viennacl/backend/opencl.hpp" #include "viennacl/vector.hpp" #endif namespace caffe { #ifdef USE_GREENTEA -viennacl::ocl::handle WrapHandle(cl_mem in, viennacl::ocl::context &ctx); +viennacl::ocl::handle WrapHandle(cl_mem in, + viennacl::ocl::context *ctx); #endif enum Backend { @@ -74,10 +75,13 @@ struct is_same { #endif // Macro to select the single (_float) or double (_double) precision kernel -#define CL_KERNEL_SELECT(kernel) is_same::value ? kernel "_float" : kernel "_double" +#define CL_KERNEL_SELECT(kernel) \ + is_same::value ? \ + kernel "_float" : \ + kernel "_double" #endif -} +} // namespace caffe -#endif /* CAFFE_GREENTEA_HPP_ */ +#endif /* CAFFE_GREENTEA_HPP_ */ diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index c7188f0cab1..b5fad1335b6 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -11,54 +11,57 @@ #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" +#include "viennacl/ocl/backend.hpp" #include "viennacl/ocl/context.hpp" #include "viennacl/ocl/device.hpp" #include "viennacl/ocl/platform.hpp" -#include "viennacl/ocl/backend.hpp" #include "viennacl/vector.hpp" namespace caffe { template -void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_im, - const int data_offset, const int channels, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - cl_mem data_col); +void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + const cl_mem data_im, const int data_offset, + const int channels, const int height, + const int width, const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, cl_mem data_col); template -void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_col, - const int channels, const int height, - const int width, const int patch_h, - const int patch_w, const int pad_h, const int pad_w, +void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + const cl_mem data_col, const int channels, + const int height, const int width, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int kstride_h, const int kstride_w, cl_mem data_im, const int data_offset); template -void greentea_im2col_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_im, const int data_im_off, +void greentea_im2col_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + const cl_mem data_im, const int data_im_off, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, - const int stride_w, cl_mem data_col, const int data_col_off); + const int stride_w, cl_mem data_col, + const int data_col_off); template -void greentea_col2im_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_col, const int data_col_off, +void greentea_col2im_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + const cl_mem data_col, const int data_col_off, const int channels, const int height, const int width, const int patch_h, const int patch_w, const int pad_h, const int pad_w, const int stride_h, - const int stride_w, cl_mem data_im, const int data_im_off); - - + const int stride_w, cl_mem data_im, + const int data_im_off); -} +} // namespace caffe -#endif // USE_GREENTEA -#endif /* GREENTEA_IM2COL_HPP_ */ +#endif // USE_GREENTEA +#endif /* GREENTEA_IM2COL_HPP_ */ diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index e052fc9c1cb..29d02a71e63 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -10,10 +10,10 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/util/math_functions.hpp" +#include "viennacl/ocl/backend.hpp" #include "viennacl/ocl/context.hpp" #include "viennacl/ocl/device.hpp" #include "viennacl/ocl/platform.hpp" -#include "viennacl/ocl/backend.hpp" #include "viennacl/vector.hpp" namespace caffe { @@ -22,25 +22,26 @@ void greentea_memset(const int ctx_id, const size_t N, const int alpha, cl_mem X, const int offX); void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, - void *Y, viennacl::ocl::context &ctx); + void *Y, viennacl::ocl::context *ctx); void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, - const int offY, viennacl::ocl::context &ctx); + const int offY, viennacl::ocl::context *ctx); void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, - cl_mem Y, const int offY, viennacl::ocl::context &ctx); + cl_mem Y, const int offY, + viennacl::ocl::context *ctx); template -void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); +void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, + const int offY, viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, Dtype* Y, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_copy(const int N, const Dtype* X, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, @@ -127,8 +128,7 @@ void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, template void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, - const int offy); + const int offa, cl_mem y, const int offy); template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, @@ -149,7 +149,7 @@ template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, const Dtype sigma, cl_mem r, const int offr); -} +} // namespace caffe -#endif // USE GREENTEA -#endif /* GREENTEA_MATH_FUNCTIONS_HPP_ */ +#endif // USE GREENTEA +#endif /* GREENTEA_MATH_FUNCTIONS_HPP_ */ diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 6bed9aa9e7d..892d8affefa 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -376,7 +376,8 @@ class Layer { virtual void CheckBlobCounts(const vector*>& bottom, const vector*>& top) { if (ExactNumBottomBlobs() >= 0) { - CHECK_EQ(ExactNumBottomBlobs(), bottom.size())<< type() << " Layer takes " << ExactNumBottomBlobs() + CHECK_EQ(ExactNumBottomBlobs(), bottom.size())<< type() + << " Layer takes " << ExactNumBottomBlobs() << " bottom blob(s) as input."; } if (MinBottomBlobs() >= 0) { @@ -471,7 +472,7 @@ inline Dtype Layer::Forward(const vector*>& bottom, caffe_gpu_dot(count, data, loss_weights, &blob_loss); loss += blob_loss; } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA for (int top_id = 0; top_id < top.size(); ++top_id) { @@ -479,14 +480,14 @@ inline Dtype Layer::Forward(const vector*>& bottom, continue; } const int count = top[top_id]->count(); - cl_mem data = (cl_mem)(top[top_id]->gpu_data()); - cl_mem loss_weights = (cl_mem)(top[top_id]->gpu_diff()); + cl_mem data = (cl_mem) (top[top_id]->gpu_data()); + cl_mem loss_weights = (cl_mem) (top[top_id]->gpu_diff()); Dtype blob_loss = 0; greentea_gpu_dot(this->device_context_.id(), count, data, 0, loss_weights, 0, &blob_loss); loss += blob_loss; } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } #endif break; diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 4537870002a..f3bafae1bca 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -9,7 +9,7 @@ namespace caffe { /** - * @brief An interface for classes that perform optimization on Net%s. + * @brief An interface for classes that perform optimization on Nets. * * Requires implementation of ApplyUpdate to compute a parameter update * given the current state of the Net parameters. @@ -141,7 +141,8 @@ class AdaGradSolver : public SGDSolver { protected: virtual void ComputeUpdateValue(int param_id, Dtype rate); void constructor_sanity_check() { - CHECK_EQ(0, this->param_.momentum())<< "Momentum cannot be used with AdaGrad."; + CHECK_EQ(0, this->param_.momentum()) + << "Momentum cannot be used with AdaGrad."; } DISABLE_COPY_AND_ASSIGN(AdaGradSolver); diff --git a/include/caffe/splitnet/splitnet.hpp b/include/caffe/splitnet/splitnet.hpp deleted file mode 100644 index ca5349f458a..00000000000 --- a/include/caffe/splitnet/splitnet.hpp +++ /dev/null @@ -1,29 +0,0 @@ -/* - * splitnet.hpp - * - * Created on: Apr 5, 2015 - * Author: fabian - */ - -#ifndef CAFFE_SPLITNET_HPP_ -#define CAFFE_SPLITNET_HPP_ - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/layer.hpp" -#include "caffe/proto/caffe.pb.h" - -namespace caffe { - -template -class Splitnet { - public: - Splitnet(); - private: - -}; - - -} - -#endif /* CAFFE_SPLITNET_HPP_ */ diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 59f1097c649..d817e64fb62 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -33,7 +33,6 @@ inline void CaffeFreeHost(void* ptr) { free(ptr); } - /** * @brief Manages memory allocation and synchronization between the host (CPU) * and device (GPU). @@ -44,36 +43,62 @@ class SyncedMemory { public: #ifdef USE_GREENTEA SyncedMemory() - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), - own_cpu_data_(false), device_context_(Caffe::GetDefaultDeviceContext()), cl_gpu_mem_(NULL) {} - SyncedMemory(DeviceContext device_context) - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), - own_cpu_data_(false), device_context_(device_context), cl_gpu_mem_(NULL) {} + : cpu_ptr_(NULL), + gpu_ptr_(NULL), + size_(0), + head_(UNINITIALIZED), + own_cpu_data_(false), + device_context_(Caffe::GetDefaultDeviceContext()), + cl_gpu_mem_(NULL) { + } + explicit SyncedMemory(DeviceContext device_context) + : cpu_ptr_(NULL), + gpu_ptr_(NULL), + size_(0), + head_(UNINITIALIZED), + own_cpu_data_(false), + device_context_(device_context), + cl_gpu_mem_(NULL) { + } explicit SyncedMemory(size_t size, DeviceContext device_context) - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), - own_cpu_data_(false), device_context_(device_context), cl_gpu_mem_(NULL) {} + : cpu_ptr_(NULL), + gpu_ptr_(NULL), + size_(size), + head_(UNINITIALIZED), + own_cpu_data_(false), + device_context_(device_context), + cl_gpu_mem_(NULL) { + } #else SyncedMemory() - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), - own_cpu_data_(false), device_context_(Caffe::GetDefaultDeviceContext()) {} - SyncedMemory(DeviceContext device_context) - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), - own_cpu_data_(false), device_context_(device_context) {} + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false), device_context_(Caffe::GetDefaultDeviceContext()) {} + explicit SyncedMemory(DeviceContext device_context) + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false), device_context_(device_context) {} explicit SyncedMemory(size_t size, DeviceContext device_context) - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), - own_cpu_data_(false), device_context_(device_context) {} + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), + own_cpu_data_(false), device_context_(device_context) {} #endif - ~SyncedMemory(); const void* cpu_data(); void set_cpu_data(void* data); const void* gpu_data(); void* mutable_cpu_data(); void* mutable_gpu_data(); - enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }; - SyncedHead head() { return head_; } - size_t size() { return size_; } + enum SyncedHead { + UNINITIALIZED, + HEAD_AT_CPU, + HEAD_AT_GPU, + SYNCED + }; + SyncedHead head() { + return head_; + } + size_t size() { + return size_; + } private: void to_cpu(); @@ -89,8 +114,9 @@ class SyncedMemory { cl_mem cl_gpu_mem_; #endif - DISABLE_COPY_AND_ASSIGN(SyncedMemory); -}; // class SyncedMemory +DISABLE_COPY_AND_ASSIGN(SyncedMemory); +}; +// class SyncedMemory } // namespace caffe diff --git a/include/caffe/util/benchmark.hpp b/include/caffe/util/benchmark.hpp index 55dc27d5583..ba2a34156e3 100644 --- a/include/caffe/util/benchmark.hpp +++ b/include/caffe/util/benchmark.hpp @@ -31,8 +31,8 @@ class Timer { #ifdef USE_CUDA cudaEvent_t start_gpu_; cudaEvent_t stop_gpu_; -#endif // USE_CUDA -#endif // !CPU_ONLY +#endif // USE_CUDA +#endif // !CPU_ONLY boost::posix_time::ptime start_cpu_; boost::posix_time::ptime stop_cpu_; float elapsed_milliseconds_; diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 323a14b1055..f7ea5c7f9b9 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -30,7 +30,7 @@ void classname::funcname##_##gpu(const vector*>& top, \ const vector*>& bottom) { NO_GPU; } \ #else // Normal GPU + CPU Caffe. -#ifdef USE_CUDA // Include CUDA macros and headers only if the backend is enabled +#ifdef USE_CUDA // Include CUDA macros and headers only if enabled #include #include @@ -91,7 +91,9 @@ const char* curandGetErrorString(curandStatus_t error); const int CAFFE_CUDA_NUM_THREADS = 512; #endif +// CDT hacks: allow proper code formatting and remove errors in CDT #ifdef __CDT_PARSER__ +#include "device_launch_parameters.h" #define CUDA_KERNEL(...) #else #define CUDA_KERNEL(...) <<< __VA_ARGS__ >>> @@ -105,7 +107,7 @@ inline int CAFFE_GET_BLOCKS(const int N) { } // namespace caffe -#endif // USE_CUDA -#endif // CPU_ONLY +#endif // USE_CUDA +#endif // !CPU_ONLY #endif // CAFFE_UTIL_DEVICE_ALTERNATE_H_ diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index cbd4f8686d2..7cf05a84ade 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -3,42 +3,43 @@ namespace caffe { -template -void im2col_cpu(const Dtype* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_col); - -template -void col2im_cpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im); - -template -void im2col_sk_gpu(const Dtype* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, Dtype* data_col); - -template -void im2col_gpu(const Dtype* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_col); - -template -void col2im_sk_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, - Dtype* data_im); - -template -void col2im_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im); +template +void im2col_cpu(const Dtype* data_im, const int channels, const int height, + const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_col); + +template +void col2im_cpu(const Dtype* data_col, const int channels, const int height, + const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im); + +template +void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, + const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_col); + +template +void im2col_gpu(const Dtype* data_im, const int channels, const int height, + const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_col); + +template +void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, + const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_im); + +template +void col2im_gpu(const Dtype* data_col, const int channels, const int height, + const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im); } // namespace caffe diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index b8b7b0a42df..c1cca3a9f1b 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -14,103 +14,101 @@ namespace caffe { // Caffe gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. -template -void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, - const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, - Dtype* C); +template +void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, + const int M, const int N, const int K, const Dtype alpha, + const Dtype* A, const Dtype* B, const Dtype beta, Dtype* C); -template +template void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, - const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, - Dtype* y); + const Dtype alpha, const Dtype* A, const Dtype* x, + const Dtype beta, Dtype* y); -template -void caffe_axpy(const int N, const Dtype alpha, const Dtype* X, - Dtype* Y); +template +void caffe_axpy(const int N, const Dtype alpha, const Dtype* X, Dtype* Y); -template +template void caffe_cpu_axpby(const int N, const Dtype alpha, const Dtype* X, - const Dtype beta, Dtype* Y); + const Dtype beta, Dtype* Y); -template +template void caffe_cpu_copy(const int N, const Dtype* X, Dtype* Y); -template +template void caffe_copy(const int N, const Dtype *X, Dtype *Y); -template +template void caffe_set(const int N, const Dtype alpha, Dtype *X); inline void caffe_memset(const size_t N, const int alpha, void* X) { memset(X, alpha, N); // NOLINT(caffe/alt_fn) } -template +template void caffe_add_scalar(const int N, const Dtype alpha, Dtype *X); -template +template void caffe_scal(const int N, const Dtype alpha, Dtype *X); -template +template void caffe_sqr(const int N, const Dtype* a, Dtype* y); -template +template void caffe_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); unsigned int caffe_rng_rand(); -template +template Dtype caffe_nextafter(const Dtype b); void caffe_rng_uniform(const int n, unsigned int* r); -template +template void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); -template +template void caffe_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, Dtype* r); -template +template void caffe_rng_bernoulli(const int n, const Dtype p, int* r); -template +template void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); -template +template void caffe_exp(const int n, const Dtype* a, Dtype* y); -template +template void caffe_log(const int n, const Dtype* a, Dtype* y); -template +template void caffe_abs(const int n, const Dtype* a, Dtype* y); -template +template Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y); -template +template Dtype caffe_cpu_strided_dot(const int n, const Dtype* x, const int incx, - const Dtype* y, const int incy); + const Dtype* y, const int incy); -template +template int caffe_cpu_hamming_distance(const int n, const Dtype* x, const Dtype* y); // Returns the sum of the absolute values of the elements of vector x -template +template Dtype caffe_cpu_asum(const int n, const Dtype* x); // the branchless, type-safe version from @@ -142,12 +140,12 @@ DEFINE_CAFFE_CPU_UNARY_FUNC(sign, y[i] = caffe_sign(x[i])); // The name sngbit is meant to avoid conflicts with std::signbit in the macro. // The extra parens are needed because CUDA < 6.5 defines signbit as a macro, // and we don't want that to expand here when CUDA headers are also included. -DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, \ - y[i] = static_cast((std::signbit)(x[i]))); +DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, + y[i] = static_cast((std::signbit)(x[i]))); DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])); -template +template void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); #ifndef CPU_ONLY // GPU @@ -156,68 +154,66 @@ void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); // Decaf gpu gemm provides an interface that is almost the same as the cpu // gemm function - following the c convention and calling the fortran-order // gpu code under the hood. -template -void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, - const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, - Dtype* C); +template +void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, + const int M, const int N, const int K, const Dtype alpha, + const Dtype* A, const Dtype* B, const Dtype beta, Dtype* C); -template +template void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, - const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, - Dtype* y); + const Dtype alpha, const Dtype* A, const Dtype* x, + const Dtype beta, Dtype* y); -template -void caffe_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, - Dtype* Y); +template +void caffe_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, Dtype* Y); -template +template void caffe_gpu_axpby(const int N, const Dtype alpha, const Dtype* X, - const Dtype beta, Dtype* Y); + const Dtype beta, Dtype* Y); void caffe_gpu_memcpy(const size_t N, const void *X, void *Y); -template +template void caffe_gpu_set(const int N, const Dtype alpha, Dtype *X); inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn) -#endif // USE_CUDA +#endif // USE_CUDA #else NO_GPU; #endif } -template +template void caffe_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X); -template +template void caffe_gpu_scal(const int N, const Dtype alpha, Dtype *X); -template +template void caffe_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_gpu_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_gpu_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_gpu_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); -template +template void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); -template +template void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); -template +template void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); -template +template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); // caffe_gpu_rng_uniform with two arguments generates integers in the range @@ -229,24 +225,24 @@ void caffe_gpu_rng_uniform(const int n, unsigned int* r); // specification of curandGenerateUniform. With a = 0, b = 1, just calls // curandGenerateUniform; with other limits will shift and scale the outputs // appropriately after calling curandGenerateUniform. -template +template void caffe_gpu_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); -template +template void caffe_gpu_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, Dtype* r); -template +template void caffe_gpu_rng_bernoulli(const int n, const Dtype p, int* r); -template +template void caffe_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out); -template +template uint32_t caffe_gpu_hamming_distance(const int n, const Dtype* x, const Dtype* y); -template +template void caffe_gpu_asum(const int n, const Dtype* x, Dtype* y); template @@ -255,10 +251,10 @@ void caffe_gpu_sign(const int n, const Dtype* x, Dtype* y); template void caffe_gpu_sgnbit(const int n, const Dtype* x, Dtype* y); -template +template void caffe_gpu_fabs(const int n, const Dtype* x, Dtype* y); -template +template void caffe_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); #define DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(name, operation) \ @@ -281,8 +277,8 @@ void caffe_gpu_##name(const int n, const double* x, double* y) { \ n, x, y); \ } -#endif // USE_CUDA -#endif // !CPU_ONLY +#endif // USE_CUDA +#endif // !CPU_ONLY } // namespace caffe diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index c325b2dec89..67e1e6afb6e 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -56,7 +56,6 @@ class MergeCropLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - }; /** @@ -153,7 +152,7 @@ class BaseConvolutionLayer : public Layer { kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); } -#endif // USE_CUDA +#endif // USE_CUDA #ifdef USE_GREENTEA inline void greentea_conv_im2col_gpu(const Dtype* data, const int data_off, Dtype* col_buff, @@ -162,7 +161,7 @@ class BaseConvolutionLayer : public Layer { this->device_context_.id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_.id()); - greentea_im2col_gpu(program, ctx, (cl_mem) data, data_off, + greentea_im2col_gpu(&program, &ctx, (cl_mem) data, data_off, conv_in_channels_, conv_in_height_, conv_in_width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, (cl_mem) col_buff, @@ -175,14 +174,14 @@ class BaseConvolutionLayer : public Layer { this->device_context_.id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_.id()); - greentea_col2im_gpu(program, ctx, (cl_mem) col_buff, col_buff_off, + greentea_col2im_gpu(&program, &ctx, (cl_mem) col_buff, col_buff_off, conv_in_channels_, conv_in_height_, conv_in_width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, (cl_mem) data, data_off); } -#endif // USE_GREENTEA -#endif // !CPU_ONLY +#endif // USE_GREENTEA +#endif // !CPU_ONLY int conv_out_channels_; int conv_in_channels_; @@ -376,7 +375,7 @@ class DeconvolutionLayer : public BaseConvolutionLayer { */ template class CuDNNConvolutionLayer : public ConvolutionLayer { -public: + public: explicit CuDNNConvolutionLayer(const LayerParameter& param) : ConvolutionLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, @@ -385,7 +384,7 @@ class CuDNNConvolutionLayer : public ConvolutionLayer { const vector*>& top); virtual ~CuDNNConvolutionLayer(); -protected: + protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, @@ -662,7 +661,7 @@ class PoolingLayer : public Layer { */ template class CuDNNPoolingLayer : public PoolingLayer { -public: + public: explicit CuDNNPoolingLayer(const LayerParameter& param) : PoolingLayer(param), handles_setup_(false) {} virtual void LayerSetUp(const vector*>& bottom, @@ -674,7 +673,7 @@ class CuDNNPoolingLayer : public PoolingLayer { virtual inline int MinTopBlobs() const {return -1;} virtual inline int ExactNumTopBlobs() const {return 1;} -protected: + protected: virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); virtual void Backward_gpu(const vector*>& top, diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 2b2fe1cb20e..92f84ae2652 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -61,16 +61,14 @@ template Blob::Blob(const int num, const int channels, const int height, const int width, DeviceContext device_context) // capacity_ must be initialized before calling Reshape - : capacity_(0), - device_context_(device_context) { + : capacity_(0), device_context_(device_context) { Reshape(num, channels, height, width); } template Blob::Blob(const vector& shape, DeviceContext device_context) // capacity_ must be initialized before calling Reshape - : capacity_(0), - device_context_(device_context) { + : capacity_(0), device_context_(device_context) { Reshape(shape); } @@ -492,7 +490,8 @@ bool Blob::ShapeEquals(const BlobProto& other) { // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)). return shape_.size() <= 4 && LegacyShape(-4) == other.num() && LegacyShape(-3) == other.channels() - && LegacyShape(-2) == other.height() && LegacyShape(-1) == other.width(); + && LegacyShape(-2) == other.height() + && LegacyShape(-1) == other.width(); } vector other_shape(other.shape().dim_size()); for (int i = 0; i < other.shape().dim_size(); ++i) { @@ -503,7 +502,6 @@ bool Blob::ShapeEquals(const BlobProto& other) { template void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { - if (source.count() != count_ || source.shape() != shape_) { if (reshape) { ReshapeLike(source); @@ -516,23 +514,23 @@ void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { if (device_context_.backend() == BACKEND_CUDA) { if (copy_diff) { caffe_copy(count_, source.gpu_diff(), - static_cast(diff_->mutable_gpu_data())); + static_cast(diff_->mutable_gpu_data())); } else { caffe_copy(count_, source.gpu_data(), - static_cast(data_->mutable_gpu_data())); + static_cast(data_->mutable_gpu_data())); } } else { #ifdef USE_GREENTEA if (copy_diff) { greentea_copy( - count_, (cl_mem) (source.gpu_diff()),0, - (cl_mem) (diff_->mutable_gpu_data()),0, - viennacl::ocl::get_context(device_context_.id())); + count_, (cl_mem) (source.gpu_diff()), 0, + (cl_mem) (diff_->mutable_gpu_data()), 0, + &viennacl::ocl::get_context(device_context_.id())); } else { greentea_copy( - count_, (cl_mem) (source.gpu_data()),0, - (cl_mem) (data_->mutable_gpu_data()),0, - viennacl::ocl::get_context(device_context_.id())); + count_, (cl_mem) (source.gpu_data()), 0, + (cl_mem) (data_->mutable_gpu_data()), 0, + &viennacl::ocl::get_context(device_context_.id())); } #endif } @@ -541,17 +539,17 @@ void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { case Caffe::CPU: { if (copy_diff) { caffe_cpu_copy(count_, source.cpu_diff(), - static_cast(diff_->mutable_cpu_data())); + static_cast(diff_->mutable_cpu_data())); } else { caffe_cpu_copy(count_, source.cpu_data(), - static_cast(data_->mutable_cpu_data())); + static_cast(data_->mutable_cpu_data())); } break; } default: - LOG(FATAL)<< "Unknown caffe mode."; - } + LOG(FATAL)<< "Unknown caffe mode."; } +} template void Blob::FromProto(const BlobProto& proto, bool reshape) { @@ -610,8 +608,8 @@ void Blob::ToProto(BlobProto* proto, bool write_diff) const { } INSTANTIATE_CLASS(Blob); -template class Blob ; -template class Blob ; +template class Blob; +template class Blob; } // namespace caffe diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 1feb67edf2c..646feaa823c 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include "caffe/common.hpp" #include "caffe/util/rng.hpp" @@ -10,7 +11,7 @@ #include "caffe/greentea/cl_kernels.hpp" #ifdef USE_CLBLAS #include -#endif // USE_CLBLAS +#endif // USE_CLBLAS #endif namespace caffe { @@ -67,11 +68,11 @@ void Caffe::DeviceQuery() { } class Caffe::RNG::Generator { -public: + public: Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {} caffe::rng_t* rng() {return rng_.get();} -private: + private: shared_ptr rng_; }; @@ -95,7 +96,7 @@ Caffe::Caffe() #ifdef USE_CUDA cublas_handle_(NULL), curand_generator_(NULL), -#endif // USE_CUDA +#endif // USE_CUDA random_generator_(), mode_(Caffe::CPU) { // Try to create a cublas handler, and report an error if failed (but we will @@ -111,7 +112,7 @@ Caffe::Caffe() != CURAND_STATUS_SUCCESS) { LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; } -#endif // USE_CUDA +#endif // USE_CUDA } Caffe::~Caffe() { @@ -121,7 +122,7 @@ Caffe::~Caffe() { if (curand_generator_) { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); } -#endif // USE_CUDA +#endif // USE_CUDA } void Caffe::set_random_seed(const unsigned int seed) { @@ -140,11 +141,11 @@ void Caffe::set_random_seed(const unsigned int seed) { g_curand_availability_logged = true; } } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA // TODO: Proper RNG and Seed for OpenCL -#endif // USE_GREENTEA +#endif // USE_GREENTEA } // RNG seed Get().random_generator_.reset(new RNG(seed)); @@ -153,9 +154,9 @@ void Caffe::set_random_seed(const unsigned int seed) { void Caffe::Synchronize(int device_id) { #ifdef USE_GREENTEA DeviceContext& device_context = Caffe::GetDeviceContext(device_id); - if ( device_context.backend() == BACKEND_OpenCL ) { + if (device_context.backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( - GetDeviceContext(device_id).id()); + GetDeviceContext(device_id).id()); ctx.get_queue().finish(); } #else @@ -175,7 +176,8 @@ void Caffe::EnumerateDevices() { typedef std::vector platforms_type; platforms_type platforms = viennacl::ocl::get_platforms(); - std::vector> platform_devices; + std::vector> platform_devices; // Loop through devices for (std::size_t platform_id = 0; platform_id < platforms.size(); @@ -193,60 +195,66 @@ void Caffe::EnumerateDevices() { LOG(INFO)<< "Total devices: " << cuda_device_count + greentea_device_count; #ifdef USE_CUDA LOG(INFO)<< "CUDA devices: " << cuda_device_count; -#endif // USE_CUDA +#endif // USE_CUDA #ifdef USE_GREENTEA LOG(INFO)<< "OpenCL devices: " << greentea_device_count; -#endif // USE_GREENTEA +#endif // USE_GREENTEA // Display info for all devices #ifdef USE_CUDA for (int i = 0; i < cuda_device_count; ++i) { cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, i)); - LOG(INFO)<< "Device id: " << i; - LOG(INFO)<< "Device backend: " << "CUDA"; - LOG(INFO)<< "Backend details: " << "CUDA"; - LOG(INFO)<< "Device vendor: " << "NVIDIA Corporation"; - LOG(INFO)<< "Name: " << prop.name; - LOG(INFO)<< "Total global memory: " << prop.totalGlobalMem; + LOG(INFO)<< "Device id: " + << i; + LOG(INFO)<< "Device backend: " + << "CUDA"; + LOG(INFO)<< "Backend details: " + << "CUDA"; + LOG(INFO)<< "Device vendor: " + << "NVIDIA Corporation"; + LOG(INFO)<< "Name: " + << prop.name; + LOG(INFO)<< "Total global memory: " + << prop.totalGlobalMem; } -#endif /// USE_CUDA +#endif // USE_CUDA #ifdef USE_GREENTEA for (int i = 0; i < greentea_device_count; ++i) { - LOG(INFO)<< "Device id: " << cuda_device_count + i; - LOG(INFO)<< "Device backend: " << "OpenCL"; - LOG(INFO)<< "Backend details: " << std::get<0>(platform_devices[i]).info(); - LOG(INFO)<< "Device vendor: " << std::get<1>(platform_devices[i]).vendor(); - LOG(INFO)<< "Name: " << std::get<1>(platform_devices[i]).name(); - LOG(INFO)<< "Total global memory: " << std::get<1>(platform_devices[i]).global_mem_size(); + LOG(INFO)<< "Device id: " + << cuda_device_count + i; + LOG(INFO)<< "Device backend: " + << "OpenCL"; + LOG(INFO)<< "Backend details: " + << std::get<0>(platform_devices[i]).info(); + LOG(INFO)<< "Device vendor: " + << std::get<1>(platform_devices[i]).vendor(); + LOG(INFO)<< "Name: " + << std::get<1>(platform_devices[i]).name(); + LOG(INFO)<< "Total global memory: " + << std::get<1>(platform_devices[i]).global_mem_size(); } -#endif // USE_GREENTEA - +#endif // USE_GREENTEA } void Caffe::SetDevices(std::vector device_ids) { - Get().device_contexts_.clear(); - #ifdef USE_GREENTEA Get().ocl_programs_.clear(); #endif - int cuda_device_count = 0; int greentea_device_count = 0; - #ifdef USE_CUDA cudaGetDeviceCount(&cuda_device_count); -#endif // USE_CUDA - +#endif // USE_CUDA for (int i = 0; i < cuda_device_count; ++i) { Get().device_contexts_.push_back(DeviceContext(i, Backend::BACKEND_CUDA)); #ifdef USE_GREENTEA // Dummy to have same vector size as device contexts viennacl::ocl::program program; Get().ocl_programs_.push_back(program); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } // Initialize GreenTea devices @@ -254,7 +262,8 @@ void Caffe::SetDevices(std::vector device_ids) { typedef std::vector platforms_type; platforms_type platforms = viennacl::ocl::get_platforms(); - std::vector> platform_devices; + std::vector> platform_devices; // Loop through devices for (std::size_t platform_id = 0; platform_id < platforms.size(); @@ -276,11 +285,12 @@ void Caffe::SetDevices(std::vector device_ids) { viennacl::ocl::setup_context( device_id, std::get<1>(platform_devices[greentea_device_count])); viennacl::ocl::context ctx = viennacl::ocl::get_context( - static_cast(device_id)); - viennacl::ocl::program & program = RegisterKernels(ctx); + static_cast(device_id)); + viennacl::ocl::program & program = RegisterKernels(&ctx); Get().ocl_programs_.push_back(program); - //viennacl::ocl::switch_context(device_id); - //viennacl::ocl::switch_device(std::get<1>(platform_devices[device_id - cuda_device_count])); + // viennacl::ocl::switch_context(device_id); + // viennacl::ocl::switch_device(std::get<1> + // (platform_devices[device_id - cuda_device_count])); is_used = true; } } @@ -292,9 +302,7 @@ void Caffe::SetDevices(std::vector device_ids) { greentea_device_count++; } } - -#endif // USE_GREENTEA - +#endif // USE_GREENTEA } DeviceContext& Caffe::GetDeviceContext(int id) { @@ -305,7 +313,7 @@ DeviceContext& Caffe::GetDeviceContext(int id) { viennacl::ocl::program & Caffe::GetDeviceProgram(int id) { return id == -1 ? Get().default_ocl_program_ : Get().ocl_programs_[id]; } -#endif // USE_GREENTEA +#endif // USE_GREENTEA DeviceContext& Caffe::GetDefaultDeviceContext() { return Get().default_device_context_; @@ -340,13 +348,13 @@ void Caffe::SetDevice(const int device_id) { CURAND_CHECK( curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, cluster_seedgen())); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA #ifdef USE_CLBLAS clblasSetup(); -#endif // USE_CLBLAS -#endif // USE_GREENTEA +#endif // USE_CLBLAS +#endif // USE_GREENTEA } } @@ -385,11 +393,11 @@ void Caffe::DeviceQuery() { LOG(INFO)<< "Kernel execution timeout: " << (prop.kernelExecTimeoutEnabled ? "Yes" : "No"); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA // TODO: Complete OpenCL device information of current device -#endif // USE_GREENTEA +#endif // USE_GREENTEA } return; @@ -489,7 +497,7 @@ const char* curandGetErrorString(curandStatus_t error) { } return "Unknown curand status"; } -#endif // USE_CUDA +#endif // USE_CUDA #endif // CPU_ONLY diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 838a4075458..ba389e69746 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -12,7 +12,8 @@ namespace caffe { template DataTransformer::DataTransformer(const TransformationParameter& param, - Phase phase, DeviceContext device_context) + Phase phase, + DeviceContext device_context) : param_(param), phase_(phase), device_context_(device_context) { // check if we want to use mean_file @@ -234,7 +235,8 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK_GE(num, 1); // (FTschopp) Fixed for float data - CHECK(cv_img.depth() == CV_8U || cv_img.depth() == CV_32F) << "Image data type must be unsigned byte or 4 byte float"; + CHECK(cv_img.depth() == CV_8U || cv_img.depth() == CV_32F) + << "Image data type must be unsigned byte or 4 byte float"; const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); @@ -301,11 +303,11 @@ void DataTransformer::Transform(const cv::Mat& cv_img, } // int top_index = (c * height + h) * width + w; Dtype pixel; - if(cv_img.depth() == CV_8U) { + if (cv_img.depth() == CV_8U) { pixel = static_cast(ptr[img_index++]); - } - else { - pixel = static_cast(((float*)ptr)[img_index++]); + } else { + pixel = static_cast((reinterpret_cast(ptr)) + [img_index++]); } if (has_mean_file) { int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index a90ca2cdf0c..5af538a716b 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -4,99 +4,96 @@ #include #include namespace caffe { -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; -std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; -std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; -std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; -std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; -std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; -std::string convolution_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; -std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; -std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; -std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; -std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; -std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; -std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; -std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; -std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; -std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; -std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; -std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; -std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; -std::string convolution_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n/*__kernel void TEMPLATE(convolution_ip4v3,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n const int buff_w = 73;\n const int buff_h = 73;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer (in local memory)\n __local Dtype wl[10 * 10];\n // Local input buffer (in local memory)\n __local Dtype il[73 * 73];\n // Local accumulators (in registers)\n Dtype al[2 * 2];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Shift the patch window across width and height\n for (int yoff = 0; yoff < height; yoff += buff_h) {\n for (int xoff = 0; xoff < width; xoff += buff_w) {\n\n // Load image patch\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n int yidx = (i + yoff);\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n int xidx = (j + xoff);\n if (xidx < width && yidx < height) {\n il[j + i * buff_w] = in[xidx + yidx * width\n + fin * width * height + batch_in_off];\n }\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Kernel inner loop\n#pragma unroll 1\n for (int i = get_local_id(1); i < buff_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < buff_w;\n j += get_local_size(0)) {\n\n // Load accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n al[k] = out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off];\n }\n }\n\n#pragma unroll 1\n for (int ki = 0; ki < kernel_h; ++ki) {\n int ilpos_i = ((i + ki * kstride_h) % buff_h) * buff_w;\n int alpos_i = (i + ki * kstride_h) / buff_h * 2;\n#pragma unroll 10\n for (int kj = 0; kj < kernel_w; ++kj) {\n al[(j + kj * kstride_w) / buff_w + alpos_i] += wl[kj\n + ki * kernel_w]\n * il[(j + kj * kstride_w) % buff_w + ilpos_i];\n }\n }\n\n // Store accumulators\n#pragma unroll 1\n for (int k = 0; k < 4; k++) {\n int xidx = (j + xoff - k % 2 * buff_w);\n int yidx = (i + yoff - k / 2 * buff_h);\n if (xidx >= 0 && xidx < out_w && yidx >= 0 && yidx < out_h) {\n out[xidx + yidx * out_w + fout * out_w * out_h\n + batch_out_off] = al[k];\n }\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n }\n }\n }\n}*/\n\n// Fits into 32 KB\n__kernel void TEMPLATE(convolution_ip4v2,Dtype)(__global const Dtype *w,\n __global const Dtype *in,\n __global Dtype *out) {\n const int width = 200;\n const int height = 200;\n const int kernel_h = 10;\n const int kernel_w = 10;\n const int fout_count = 1024;\n const int fin_count = 192;\n const int kstride_h = 8;\n const int kstride_w = 8;\n const int stride_h = 1;\n const int stride_w = 1;\n const int batch_size = 1;\n\n const int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;\n const int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;\n\n const int out_h = (height - ext_kernel_h) / stride_h + 1;\n const int out_w = (width - ext_kernel_w) / stride_w + 1;\n\n // Clear the output\n {\n#pragma unroll 1\n for (int i =\n get_global_id(\n 0)+get_global_id(1)*get_global_size(0)+get_global_id(2)*get_global_size(0)*get_global_size(1);\n i < fout_count * out_h * out_w;\n i += get_global_size(0) * get_global_size(1) * get_global_size(2)) {\n out[i] = 0.0;\n }\n }\n\n // Local weight buffer\n __local Dtype wl[10 * 10];\n\n // Across output features\n#pragma unroll 1\n for (int fout = get_global_id(2); fout < fout_count;\n fout += get_global_size(2)) {\n\n // Across input features\n#pragma unroll 1\n for (int fin = 0; fin < fin_count; ++fin) {\n\n // Load local weights\n#pragma unroll 1\n for (int i = get_local_id(1); i < kernel_h; i += get_local_size(1)) {\n#pragma unroll 1\n for (int j = get_local_id(0); j < kernel_w; j += get_local_size(0)) {\n wl[j + i * kernel_w] = w[j + i * kernel_w\n + fout * fin_count * kernel_h * kernel_w\n + fin * kernel_h * kernel_w];\n }\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n // Across batches (keeps weights in local memory)\n#pragma unroll 1\n for (int batch = 0; batch < batch_size; ++batch) {\n\n const int batch_in_off = batch * width * height * fin_count;\n const int batch_out_off = batch * out_w * out_h * fout_count;\n\n // Across y-dimension\n#pragma unroll 1\n for (int yoff = get_global_id(1); yoff < height - ext_kernel_h + 1;\n yoff += get_global_size(1)) {\n\n // Across x-dimension\n#pragma unroll 1\n for (int xoff = get_global_id(0); xoff < width - ext_kernel_w + 1;\n xoff += get_global_size(0)) {\n\n Dtype outval = out[xoff + yoff * out_w + fout * out_w * out_h\n + batch_out_off];\n\n // Across the kernel itself\n#pragma unroll 10\n for (int i = 0; i < kernel_h; ++i) {\n#pragma unroll 10\n for (int j = 0; j < kernel_w; ++j) {\n outval = fma(\n wl[j + i * kernel_w],\n in[(xoff + j * kstride_w) + (yoff + i * kstride_h) * width\n + fin * width * height + batch_in_off],\n outval);\n }\n }\n\n out[xoff + yoff * out_w + fout * out_w * out_h + batch_out_off] =\n outval;\n }\n }\n }barrier(CLK_LOCAL_MEM_FENCE);\n }\n }\n}"; -std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; -std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; -std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; -std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; -std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; -std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; -std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; -std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; -viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) { +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; // NOLINT +std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; // NOLINT +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT +std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT +std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT +std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT +std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT +std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT +std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT +std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT +std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; - ss << header << "\n\n"; - ss << "#define Dtype float" << "\n\n"; - ss << activation_float << "\n\n"; - ss << auxiliary_float << "\n\n"; - ss << bnll_float << "\n\n"; - ss << channel_float << "\n\n"; - ss << concat_float << "\n\n"; - ss << contrastive_loss_float << "\n\n"; - ss << convolution_sk_float << "\n\n"; - ss << dropout_float << "\n\n"; - ss << eltwise_float << "\n\n"; - ss << fillbuffer_float << "\n\n"; - ss << im2col_float << "\n\n"; - ss << im2col_sk_float << "\n\n"; - ss << lrn_float << "\n\n"; - ss << math_float << "\n\n"; - ss << mergecrop_float << "\n\n"; - ss << pooling_float << "\n\n"; - ss << pooling_sk_float << "\n\n"; - ss << slice_float << "\n\n"; - ss << softmax_loss_float << "\n\n"; + ss << header << "\n\n"; // NOLINT + ss << "#define Dtype float" << "\n\n"; // NOLINT + ss << activation_float << "\n\n"; // NOLINT + ss << auxiliary_float << "\n\n"; // NOLINT + ss << bnll_float << "\n\n"; // NOLINT + ss << channel_float << "\n\n"; // NOLINT + ss << concat_float << "\n\n"; // NOLINT + ss << contrastive_loss_float << "\n\n"; // NOLINT + ss << dropout_float << "\n\n"; // NOLINT + ss << eltwise_float << "\n\n"; // NOLINT + ss << fillbuffer_float << "\n\n"; // NOLINT + ss << im2col_float << "\n\n"; // NOLINT + ss << im2col_sk_float << "\n\n"; // NOLINT + ss << lrn_float << "\n\n"; // NOLINT + ss << math_float << "\n\n"; // NOLINT + ss << mergecrop_float << "\n\n"; // NOLINT + ss << pooling_float << "\n\n"; // NOLINT + ss << pooling_sk_float << "\n\n"; // NOLINT + ss << slice_float << "\n\n"; // NOLINT + ss << softmax_loss_float << "\n\n"; // NOLINT #ifdef GREENTEA_DOUBLE_SUPPORT - ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; - ss << "#undef Dtype" << "\n\n"; - ss << "#define Dtype double" << "\n\n"; - ss << activation_double << "\n\n"; - ss << auxiliary_double << "\n\n"; - ss << bnll_double << "\n\n"; - ss << channel_double << "\n\n"; - ss << concat_double << "\n\n"; - ss << contrastive_loss_double << "\n\n"; - ss << convolution_sk_double << "\n\n"; - ss << dropout_double << "\n\n"; - ss << eltwise_double << "\n\n"; - ss << fillbuffer_double << "\n\n"; - ss << im2col_double << "\n\n"; - ss << im2col_sk_double << "\n\n"; - ss << lrn_double << "\n\n"; - ss << math_double << "\n\n"; - ss << mergecrop_double << "\n\n"; - ss << pooling_double << "\n\n"; - ss << pooling_sk_double << "\n\n"; - ss << slice_double << "\n\n"; - ss << softmax_loss_double << "\n\n"; + ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; // NOLINT + ss << "#undef Dtype" << "\n\n"; // NOLINT + ss << "#define Dtype double" << "\n\n"; // NOLINT + ss << activation_double << "\n\n"; // NOLINT + ss << auxiliary_double << "\n\n"; // NOLINT + ss << bnll_double << "\n\n"; // NOLINT + ss << channel_double << "\n\n"; // NOLINT + ss << concat_double << "\n\n"; // NOLINT + ss << contrastive_loss_double << "\n\n"; // NOLINT + ss << dropout_double << "\n\n"; // NOLINT + ss << eltwise_double << "\n\n"; // NOLINT + ss << fillbuffer_double << "\n\n"; // NOLINT + ss << im2col_double << "\n\n"; // NOLINT + ss << im2col_sk_double << "\n\n"; // NOLINT + ss << lrn_double << "\n\n"; // NOLINT + ss << math_double << "\n\n"; // NOLINT + ss << mergecrop_double << "\n\n"; // NOLINT + ss << pooling_double << "\n\n"; // NOLINT + ss << pooling_sk_double << "\n\n"; // NOLINT + ss << slice_double << "\n\n"; // NOLINT + ss << softmax_loss_double << "\n\n"; // NOLINT ss << "#endif" << "\n\n"; -#endif // GREENTEA_DOUBLE_SUPPORT +#endif // GREENTEA_DOUBLE_SUPPORT std::string kernel_string = ss.str(); const char* kernel_program = kernel_string.c_str(); - ctx.build_options("-cl-fast-relaxed-math -cl-mad-enable"); - viennacl::ocl::program &program = ctx.add_program(kernel_program,"kernel_program"); + ctx->build_options("-cl-fast-relaxed-math -cl-mad-enable"); + viennacl::ocl::program &program = ctx->add_program(kernel_program, + "kernel_program"); return program; } -} +} // namespace caffe #endif diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index 412c41d1e43..68add4ec26c 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -18,17 +18,17 @@ echo "#ifndef GREENTEA_CL_KERNELS_HPP_" >> $HEADER echo "#define GREENTEA_CL_KERNELS_HPP_" >> $HEADER echo "#include \"caffe/greentea/greentea.hpp\"" >> $HEADER echo "#include \"viennacl/backend/opencl.hpp\"" >> $HEADER +echo "#include \"viennacl/ocl/backend.hpp\"" >> $HEADER echo "#include \"viennacl/ocl/context.hpp\"" >> $HEADER echo "#include \"viennacl/ocl/device.hpp\"" >> $HEADER echo "#include \"viennacl/ocl/platform.hpp\"" >> $HEADER -echo "#include \"viennacl/ocl/backend.hpp\"" >> $HEADER echo "namespace caffe {" >> $HEADER echo "#include \"$INCHEADER\"" >> $SOURCE echo "#include " >> $SOURCE echo "#include " >> $SOURCE echo "namespace caffe {" >> $SOURCE -echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx);" >> $HEADER +echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx);" >> $HEADER echo "}" >> $HEADER echo "#endif" >> $HEADER @@ -41,7 +41,7 @@ do CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" echo -n "std::string $CL_KERNEL_NAME = \"" >> $SOURCE echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE - echo "\";" >> $SOURCE + echo "\"; // NOLINT" >> $SOURCE done shopt -s nullglob @@ -53,7 +53,7 @@ do CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" echo -n "std::string ${CL_KERNEL_NAME}_float = \"" >> $SOURCE echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE - echo "\";" >> $SOURCE + echo "\"; // NOLINT" >> $SOURCE done shopt -s nullglob @@ -65,12 +65,12 @@ do CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" echo -n "std::string ${CL_KERNEL_NAME}_double = \"" >> $SOURCE echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE - echo "\";" >> $SOURCE + echo "\"; // NOLINT" >> $SOURCE done -echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context &ctx) {" >> $SOURCE +echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) {" >> $SOURCE echo " std::stringstream ss;" >> $SOURCE shopt -s nullglob @@ -79,41 +79,42 @@ do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo " ss << $CL_KERNEL_NAME << \"\\n\\n\";" >> $SOURCE + echo " ss << $CL_KERNEL_NAME << \"\\n\\n\"; // NOLINT" >> $SOURCE done shopt -s nullglob -echo " ss << \"#define Dtype float\" << \"\\n\\n\";" >> $SOURCE +echo " ss << \"#define Dtype float\" << \"\\n\\n\"; // NOLINT" >> $SOURCE for CL_KERNEL in $CL_KERNELDIR do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo " ss << ${CL_KERNEL_NAME}_float << \"\\n\\n\";" >> $SOURCE + echo " ss << ${CL_KERNEL_NAME}_float << \"\\n\\n\"; // NOLINT" >> $SOURCE done shopt -s nullglob echo "#ifdef GREENTEA_DOUBLE_SUPPORT" >> $SOURCE -echo " ss << \"#ifdef DOUBLE_SUPPORT_AVAILABLE\" << \"\\n\\n\";" >> $SOURCE -echo " ss << \"#undef Dtype\" << \"\\n\\n\";" >> $SOURCE -echo " ss << \"#define Dtype double\" << \"\\n\\n\";" >> $SOURCE +echo " ss << \"#ifdef DOUBLE_SUPPORT_AVAILABLE\" << \"\\n\\n\"; // NOLINT" >> $SOURCE +echo " ss << \"#undef Dtype\" << \"\\n\\n\"; // NOLINT" >> $SOURCE +echo " ss << \"#define Dtype double\" << \"\\n\\n\"; // NOLINT" >> $SOURCE for CL_KERNEL in $CL_KERNELDIR do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo " ss << ${CL_KERNEL_NAME}_double << \"\\n\\n\";" >> $SOURCE + echo " ss << ${CL_KERNEL_NAME}_double << \"\\n\\n\"; // NOLINT" >> $SOURCE done echo " ss << \"#endif\" << \"\\n\\n\";" >> $SOURCE -echo "#endif // GREENTEA_DOUBLE_SUPPORT" >> $SOURCE +echo "#endif // GREENTEA_DOUBLE_SUPPORT" >> $SOURCE echo " std::string kernel_string = ss.str();" >> $SOURCE echo " const char* kernel_program = kernel_string.c_str();" >> $SOURCE -echo " ctx.build_options(\"-cl-fast-relaxed-math -cl-mad-enable\");" >> $SOURCE -echo " viennacl::ocl::program &program = ctx.add_program(kernel_program,\"kernel_program\");" >> $SOURCE +echo " ctx->build_options(\"-cl-fast-relaxed-math -cl-mad-enable\");" >> $SOURCE +echo " viennacl::ocl::program &program = ctx->add_program(kernel_program," >> $SOURCE +echo " \"kernel_program\");" >> $SOURCE echo " return program;" >> $SOURCE echo "}" >> $SOURCE -echo "}" >> $SOURCE +echo "} // namespace caffe" >> $SOURCE echo "#endif" >> $HEADER echo "#endif" >> $SOURCE diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index 3a9112db71c..2c5157aec75 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -12,16 +12,17 @@ namespace caffe { #ifdef USE_GREENTEA viennacl::ocl::handle WrapHandle(cl_mem in, - viennacl::ocl::context &ctx) { + viennacl::ocl::context *ctx) { if (in != NULL) { - viennacl::ocl::handle memhandle(in, ctx); + viennacl::ocl::handle memhandle(in, *ctx); memhandle.inc(); return memhandle; } else { cl_int err; - cl_mem dummy = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, 0, - NULL, &err); - viennacl::ocl::handle memhandle(dummy, ctx); + cl_mem dummy = clCreateBuffer(ctx->handle().get(), CL_MEM_READ_WRITE, 0, + NULL, + &err); + viennacl::ocl::handle memhandle(dummy, *ctx); return memhandle; } } @@ -29,15 +30,11 @@ viennacl::ocl::handle WrapHandle(cl_mem in, #endif DeviceContext::DeviceContext() - : id_(0), - backend_(Backend::BACKEND_CUDA) { - + : id_(0), backend_(Backend::BACKEND_CUDA) { } DeviceContext::DeviceContext(int id, Backend backend) - : id_(id), - backend_(backend) { - + : id_(id), backend_(backend) { } Backend DeviceContext::backend() const { @@ -48,4 +45,4 @@ int DeviceContext::id() const { return id_; } -} +} // namespace caffe diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 7542316d5ce..cfdf686b411 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -10,8 +10,8 @@ namespace caffe { template -void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_im, +void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_im, const int data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, @@ -19,14 +19,13 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, const int stride_h, const int stride_w, const int kstride_h, const int kstride_w, cl_mem data_col) { - int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; - viennacl::ocl::kernel &kernel = prog.get_kernel( + viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("im2col_sk")); /*std::cout << "num_kernels: " << num_kernels << std::endl; @@ -51,12 +50,12 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, height_col, width_col, WrapHandle(data_col, ctx)), - ctx.get_queue()); + ctx->get_queue()); } // Explicit instantiation -template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_im, const int data_offset, const int channels, @@ -69,8 +68,8 @@ template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, const int kstride_w, cl_mem data_col); -template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_im, const int data_offset, const int channels, @@ -85,17 +84,18 @@ template void greentea_im2col_sk_gpu(viennacl::ocl::program &prog, cl_mem data_col); template -void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_col, +void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int kstride_h, const int kstride_w, cl_mem data_im, const int data_offset) { - if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) { - LOG(FATAL)<<"stride greater than 1 or pad greater than 0 not tested in col2im_sk_gpu()."; + LOG(FATAL) + << "stride greater than 1 or pad greater than 0" + << " not tested in col2im_sk_gpu()."; } int ext_patch_h = (patch_h - 1) * kstride_h + 1; @@ -104,20 +104,19 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, int width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; int num_kernels = channels * height * width; - viennacl::ocl::kernel &kernel = prog.get_kernel( + viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("col2im_sk")); viennacl::ocl::enqueue( - kernel(num_kernels, WrapHandle(data_col,ctx), height, width, channels, + kernel(num_kernels, WrapHandle(data_col, ctx), height, width, channels, patch_h, patch_w, ext_patch_h, ext_patch_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, - height_col, width_col, WrapHandle(data_im,ctx), data_offset), - ctx.get_queue()); - + height_col, width_col, WrapHandle(data_im, ctx), data_offset), + ctx->get_queue()); } -template void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_col, const int channels, const int height, const int width, @@ -129,8 +128,8 @@ template void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, const int kstride_w, cl_mem data_im, const int data_offset); -template void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_col, const int channels, const int height, const int width, @@ -145,8 +144,8 @@ template void greentea_col2im_sk_gpu(viennacl::ocl::program &prog, const int data_offset); template -void greentea_im2col_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_im, +void greentea_im2col_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_im, const int data_im_off, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, @@ -158,16 +157,16 @@ void greentea_im2col_gpu(viennacl::ocl::program &prog, int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; - viennacl::ocl::kernel &kernel = prog.get_kernel(CL_KERNEL_SELECT("im2col")); + viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("im2col")); viennacl::ocl::enqueue( kernel(num_kernels, WrapHandle(data_im, ctx), data_im_off, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, WrapHandle(data_col, ctx), data_col_off), - ctx.get_queue()); + ctx->get_queue()); } -template void greentea_im2col_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_im2col_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_im, const int data_im_off, const int channels, const int height, @@ -177,8 +176,8 @@ template void greentea_im2col_gpu(viennacl::ocl::program &prog, const int stride_w, cl_mem data_col, const int data_col_off); -template void greentea_im2col_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_im2col_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_im, const int data_im_off, const int channels, const int height, @@ -189,8 +188,8 @@ template void greentea_im2col_gpu(viennacl::ocl::program &prog, const int data_col_off); template -void greentea_col2im_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, const cl_mem data_col, +void greentea_col2im_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_col, const int data_col_off, const int channels, const int height, const int width, const int patch_h, const int patch_w, const int pad_h, const int pad_w, @@ -200,17 +199,18 @@ void greentea_col2im_gpu(viennacl::ocl::program &prog, int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int num_kernels = channels * height * width; - viennacl::ocl::kernel &kernel = prog.get_kernel(CL_KERNEL_SELECT("col2im")); + viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("col2im")); viennacl::ocl::enqueue( - kernel(num_kernels, WrapHandle(data_col, ctx), data_col_off, height, width, channels, - patch_h, patch_w, pad_h, pad_w, stride_h, stride_w, height_col, - width_col, WrapHandle(data_im, ctx), data_im_off), - ctx.get_queue()); + kernel(num_kernels, WrapHandle(data_col, ctx), data_col_off, height, + width, channels, patch_h, patch_w, pad_h, pad_w, stride_h, + stride_w, height_col, width_col, WrapHandle(data_im, ctx), + data_im_off), + ctx->get_queue()); } -template void greentea_col2im_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_col2im_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_col, const int data_col_off, const int channels, const int height, @@ -219,8 +219,8 @@ template void greentea_col2im_gpu(viennacl::ocl::program &prog, const int pad_w, const int stride_h, const int stride_w, cl_mem data_im, const int data_im_off); -template void greentea_col2im_gpu(viennacl::ocl::program &prog, - viennacl::ocl::context &ctx, +template void greentea_col2im_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, const cl_mem data_col, const int data_col_off, const int channels, const int height, @@ -230,5 +230,5 @@ template void greentea_col2im_gpu(viennacl::ocl::program &prog, const int stride_w, cl_mem data_im, const int data_im_off); -} +} // namespace caffe #endif diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 7fa9aa2b259..a057442ce5a 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -12,25 +12,25 @@ #include #include -#include +#include #include #include #include +#include +#include +#include +#include #include "caffe/common.hpp" #include "viennacl/backend/opencl.hpp" +#include "viennacl/ocl/backend.hpp" #include "viennacl/ocl/context.hpp" #include "viennacl/ocl/device.hpp" #include "viennacl/ocl/platform.hpp" -#include "viennacl/ocl/backend.hpp" #include "caffe/util/math_functions.hpp" -#include -#include -#include - #ifdef USE_CLBLAS #include #endif @@ -53,15 +53,16 @@ void greentea_memset(const int ctx_id, const size_t N, const int alpha, viennacl::ocl::kernel &oclk_fill = program.get_kernel( CL_KERNEL_SELECT("fillbuffer")); viennacl::ocl::enqueue( - oclk_fill((int) N, (unsigned char) (alpha), WrapHandle(X, ctx), offX), + oclk_fill(static_cast(N), static_cast(alpha), + WrapHandle(X, &ctx), offX), ctx.get_queue()); } // Copy from OpenCL buffer to main memory void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, - void *Y, viennacl::ocl::context &ctx) { + void *Y, viennacl::ocl::context *ctx) { if (Y != NULL) { - clEnqueueReadBuffer(ctx.get_queue().handle().get(), X, CL_TRUE, offX, N, Y, + clEnqueueReadBuffer(ctx->get_queue().handle().get(), X, CL_TRUE, offX, N, Y, 0, NULL, NULL); @@ -70,9 +71,9 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, // Copy from main memory to OpenCL buffer void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, - const int offY, viennacl::ocl::context &ctx) { + const int offY, viennacl::ocl::context *ctx) { if (X != NULL) { - clEnqueueWriteBuffer(ctx.get_queue().handle().get(), Y, + clEnqueueWriteBuffer(ctx->get_queue().handle().get(), Y, CL_TRUE, offY, N, X, 0, NULL, NULL); } @@ -81,65 +82,65 @@ void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, // Copy from OpenCL to OpenCL buffer void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx) { - clEnqueueCopyBuffer(ctx.get_queue().handle().get(), X, Y, offX, offY, N, 0, + viennacl::ocl::context *ctx) { + clEnqueueCopyBuffer(ctx->get_queue().handle().get(), X, Y, offX, offY, N, 0, NULL, NULL); } template void greentea_copy(const int N, const cl_mem X, const int offX, Dtype* Y, - viennacl::ocl::context &ctx) { + viennacl::ocl::context *ctx) { greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX * sizeof(Dtype), Y, ctx); } template void greentea_copy(const int N, const Dtype* X, cl_mem Y, const int offY, - viennacl::ocl::context &ctx) { + viennacl::ocl::context *ctx) { greentea_gpu_memcpy(sizeof(Dtype) * N, X, Y, offY * sizeof(Dtype), ctx); } // Copy from OpenCL buffer to OpenCL buffer template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, - const int offY, viennacl::ocl::context &ctx) { - greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX * sizeof(Dtype), Y, - offY * sizeof(Dtype), ctx); + const int offY, viennacl::ocl::context *ctx) { + greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX * sizeof(Dtype), Y, + offY * sizeof(Dtype), ctx); } // Explicit instantiations template void greentea_copy(const int N, const cl_mem X, const int offX, - int* Y, viennacl::ocl::context &ctx); + int* Y, viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, unsigned int* Y, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, - float* Y, viennacl::ocl::context &ctx); + float* Y, viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, - double* Y, viennacl::ocl::context &ctx); + double* Y, viennacl::ocl::context *ctx); template void greentea_copy(const int N, const int* X, cl_mem Y, - const int offY, viennacl::ocl::context &ctx); + const int offY, viennacl::ocl::context *ctx); template void greentea_copy(const int N, const unsigned int* X, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_copy(const int N, const float* X, cl_mem Y, - const int offY, viennacl::ocl::context &ctx); + const int offY, viennacl::ocl::context *ctx); template void greentea_copy(const int N, const double* X, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, const int offY, - viennacl::ocl::context &ctx); + viennacl::ocl::context *ctx); template void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, @@ -147,7 +148,6 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, const int K, const Dtype alpha, const cl_mem A, const int offA, const cl_mem B, const int offB, const Dtype beta, cl_mem C, const int offC) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -161,7 +161,6 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, caffe_cpu_gemm(TransA, TransB, M, N, K, alpha, Aptr + offA, Bptr + offB, beta, Cptr + offC); } else { - int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; @@ -207,10 +206,14 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); + clblasSgemm(clOrder, clTransA, clTransB, + M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, + C, offC, ldc, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDgemm(clOrder, clTransA, clTransB, M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, C, offC, ldc, 1, &queue, 0, NULL, NULL)); + clblasDgemm(clOrder, clTransA, clTransB, + M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, + C, offC, ldc, 1, &queue, 0, NULL, NULL)); } #endif } @@ -239,7 +242,6 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, const cl_mem A, const int offA, const cl_mem x, const int offx, const Dtype beta, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -253,7 +255,6 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, caffe_cpu_gemv(TransA, M, N, alpha, Aptr + offA, xptr + offx, beta, yptr + offy); } else { - #ifdef USE_VIENNACLBLAS ViennaCLBackend backend; ViennaCLBackendCreate(&backend); @@ -284,10 +285,12 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasSgemv(clblasRowMajor, clTransA, M, N, alpha, A, offA, N, x, offx, 1, + beta, y, offy, 1, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDgemv(clblasRowMajor,clTransA,M,N,alpha,A,offA,N,x,offx,1,beta,y,offy,1,1,&queue,0,NULL,NULL)); + clblasDgemv(clblasRowMajor, clTransA, M, N, alpha, A, offA, N, x, offx, 1, + beta, y, offy, 1, 1, &queue, 0, NULL, NULL)); } #endif } @@ -312,7 +315,6 @@ template void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, const cl_mem X, const int offX, cl_mem Y, const int offY) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -324,7 +326,6 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, caffe_axpy(N, alpha, Xptr + offX, Yptr + offY); } else { - #ifdef USE_VIENNACLBLAS ViennaCLBackend backend; ViennaCLBackendCreate(&backend); @@ -345,11 +346,12 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); + clblasSaxpy(N, alpha, X, offX, + 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDaxpy(N,alpha,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - + clblasDaxpy(N, alpha, X, offX, + 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); } #endif } @@ -373,8 +375,8 @@ void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, viennacl::ocl::kernel &oclk_mul = program.get_kernel(CL_KERNEL_SELECT("mul")); viennacl::ocl::enqueue( - oclk_mul(N, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, - WrapHandle(y, ctx), offy), + oclk_mul(N, WrapHandle(a, &ctx), offa, WrapHandle(b, &ctx), offb, + WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -396,8 +398,8 @@ void greentea_gpu_div(const int ctx_id, const int N, const cl_mem a, viennacl::ocl::kernel &oclk_div = program.get_kernel(CL_KERNEL_SELECT("div")); viennacl::ocl::enqueue( - oclk_div(N, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, - WrapHandle(y, ctx), offy), + oclk_div(N, WrapHandle(a, &ctx), offa, WrapHandle(b, &ctx), offb, + WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -442,9 +444,11 @@ void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, cl_command_queue queue = ctx.get_queue().handle().get(); if (std::is_same::value) { - GREENTEA_CL_BLAS_CHECK(clblasSscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasSscal(N, alpha, x, offx, + 1, 1, &queue, 0, NULL, NULL)); } else { - GREENTEA_CL_BLAS_CHECK(clblasDscal(N,alpha,x,offx,1,1,&queue,0,NULL,NULL)); + GREENTEA_CL_BLAS_CHECK(clblasDscal(N, alpha, x, offx, + 1, 1, &queue, 0, NULL, NULL)); } #endif } @@ -490,7 +494,6 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, *out = caffe_cpu_dot(n, Xptr + offX, Yptr + offY); } else { - #ifdef USE_VIENNACLBLAS ViennaCLBackend backend; ViennaCLBackendCreate(&backend); @@ -499,10 +502,12 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, if (std::is_same::value) { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSdot(backend, n, (float* )out, X, offX, 1, Y, offY, 1)); + ViennaCLOpenCLSdot(backend, n, reinterpret_cast(out), X, offX, + 1, Y, offY, 1)); } else { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDdot(backend, n, (double* )out, X, offX, 1, Y, offY, 1)); + ViennaCLOpenCLDdot(backend, n, reinterpret_cast(out), X, + offX, 1, Y, offY, 1)); } #endif @@ -517,13 +522,15 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); + clblasSdot(n, gpuout, 0, X, offX, 1, Y, + offY, 1, scratch, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDdot(n,gpuout,0,X,offX,1,Y,offY,1,scratch,1,&queue,0,NULL,NULL)); + clblasDdot(n, gpuout, 0, X, offX, 1, Y, + offY, 1, scratch, 1, &queue, 0, NULL, NULL)); } - greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, out, ctx); + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, out, &ctx); clReleaseMemObject(gpuout); clReleaseMemObject(scratch); @@ -562,10 +569,12 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, if (std::is_same::value) { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSasum(backend, n, (float* )Y, X, offX, 1)); + ViennaCLOpenCLSasum(backend, n, reinterpret_cast(Y), X, offX, + 1)); } else { GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDasum(backend, n, (double* )Y, X, offX, 1)); + ViennaCLOpenCLDasum(backend, n, reinterpret_cast(Y), X, offX, + 1)); } #endif @@ -580,13 +589,15 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); + clblasSasum(n, gpuout, 0, X, offX, 1, + scratch, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDasum(n,gpuout,0,X,offX,1,scratch,1,&queue,0,NULL,NULL)); + clblasDasum(n, gpuout, 0, X, offX, 1, + scratch, 1, &queue, 0, NULL, NULL)); } - greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, Y, ctx); + greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, Y, &ctx); clReleaseMemObject(gpuout); clReleaseMemObject(scratch); @@ -641,12 +652,14 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasScopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - GREENTEA_CL_BLAS_CHECK(clblasSscal(n,alpha,Y,offY,1,1,&queue,0,NULL,NULL)); + clblasScopy(n, X, offX, 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); + GREENTEA_CL_BLAS_CHECK( + clblasSscal(n, alpha, Y, offY, 1, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDcopy(n,X,offX,1,Y,offY,1,1,&queue,0,NULL,NULL)); - GREENTEA_CL_BLAS_CHECK(clblasDscal(n,alpha,Y,offY,1,1,&queue,0,NULL,NULL)); + clblasDcopy(n, X, offX, 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); + GREENTEA_CL_BLAS_CHECK( + clblasDscal(n, alpha, Y, offY, 1, 1, &queue, 0, NULL, NULL)); } #endif } @@ -668,13 +681,14 @@ void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); // OpenCL Version >= 1.2 approach - //clEnqueueFillBuffer(ctx.get_queue().handle().get(), Y, &alpha, sizeof(Dtype), - // offY, N, 0, NULL, NULL); + // clEnqueueFillBuffer(ctx.get_queue().handle().get(), + // Y, &alpha, sizeof(Dtype), + // offY, N, 0, NULL, NULL); // OpenCL Version < 1.2 fallback viennacl::ocl::kernel &oclk_fill = program.get_kernel( CL_KERNEL_SELECT("fill")); - viennacl::ocl::enqueue(oclk_fill(N, alpha, WrapHandle(Y, ctx), offY), + viennacl::ocl::enqueue(oclk_fill(N, alpha, WrapHandle(Y, &ctx), offY), ctx.get_queue()); } @@ -690,13 +704,12 @@ template void greentea_gpu_set(const int ctx_id, const int N, template void greentea_gpu_add_scalar(const int ctx_id, const int N, const Dtype alpha, cl_mem Y, const int offY) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_add_scalar = program.get_kernel( CL_KERNEL_SELECT("add_scalar")); - viennacl::ocl::enqueue(oclk_add_scalar(N, alpha, WrapHandle(Y, ctx), offY), + viennacl::ocl::enqueue(oclk_add_scalar(N, alpha, WrapHandle(Y, &ctx), offY), ctx.get_queue()); } @@ -711,14 +724,13 @@ template void greentea_gpu_add(const int ctx_id, const int n, const cl_mem a, const int offa, const cl_mem b, const int offb, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_add = program.get_kernel(CL_KERNEL_SELECT("add")); viennacl::ocl::enqueue( - oclk_add(n, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, - WrapHandle(y, ctx), offy), + oclk_add(n, WrapHandle(a, &ctx), offa, WrapHandle(b, &ctx), offb, + WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -735,14 +747,13 @@ template void greentea_gpu_sub(const int ctx_id, const int n, const cl_mem a, const int offa, const cl_mem b, const int offb, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_sub = program.get_kernel(CL_KERNEL_SELECT("sub")); viennacl::ocl::enqueue( - oclk_sub(n, WrapHandle(a, ctx), offa, WrapHandle(b, ctx), offb, - WrapHandle(y, ctx), offy), + oclk_sub(n, WrapHandle(a, &ctx), offa, WrapHandle(b, &ctx), offb, + WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -758,13 +769,12 @@ template void greentea_gpu_sub(const int ctx_id, const int n, template void greentea_gpu_abs(const int ctx_id, const int N, const cl_mem a, const int offa, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_abs = program.get_kernel(CL_KERNEL_SELECT("abs")); viennacl::ocl::enqueue( - oclk_abs(N, WrapHandle(a, ctx), offa, WrapHandle(y, ctx), offy), + oclk_abs(N, WrapHandle(a, &ctx), offa, WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -778,13 +788,12 @@ template void greentea_gpu_abs(const int ctx_id, const int N, template void greentea_gpu_exp(const int ctx_id, const int N, const cl_mem a, const int offa, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_exp = program.get_kernel(CL_KERNEL_SELECT("exp")); viennacl::ocl::enqueue( - oclk_exp(N, WrapHandle(a, ctx), offa, WrapHandle(y, ctx), offy), + oclk_exp(N, WrapHandle(a, &ctx), offa, WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -799,14 +808,13 @@ template void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, const int offa, const Dtype alpha, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_powx = program.get_kernel( CL_KERNEL_SELECT("powx")); viennacl::ocl::enqueue( - oclk_powx(N, WrapHandle(a, ctx), offa, alpha, WrapHandle(y, ctx), offy), + oclk_powx(N, WrapHandle(a, &ctx), offa, alpha, WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -822,13 +830,12 @@ template void greentea_gpu_powx(const int ctx_id, const int N, template void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, const int offa, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_log = program.get_kernel(CL_KERNEL_SELECT("log")); viennacl::ocl::enqueue( - oclk_log(N, WrapHandle(a, ctx), offa, WrapHandle(y, ctx), offy), + oclk_log(N, WrapHandle(a, &ctx), offa, WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -842,14 +849,13 @@ template void greentea_gpu_log(const int ctx_id, const int N, template void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_sign = program.get_kernel( CL_KERNEL_SELECT("sign")); viennacl::ocl::enqueue( - oclk_sign(n, WrapHandle(x, ctx), offx, WrapHandle(y, ctx), offy), + oclk_sign(n, WrapHandle(x, &ctx), offx, WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -863,14 +869,13 @@ template void greentea_gpu_sign(const int ctx_id, const int n, template void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, int offx, cl_mem y, const int offy) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); viennacl::ocl::kernel &oclk_sgnbit = program.get_kernel( CL_KERNEL_SELECT("sgnbit")); viennacl::ocl::enqueue( - oclk_sgnbit(n, WrapHandle(x, ctx), offx, WrapHandle(y, ctx), offy), + oclk_sgnbit(n, WrapHandle(x, &ctx), offx, WrapHandle(y, &ctx), offy), ctx.get_queue()); } @@ -884,18 +889,18 @@ template void greentea_gpu_sgnbit(const int ctx_id, const int n, void greentea_gpu_rng_uniform(const int ctx_id, const int n, cl_mem r, int offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - std::vector random(n); + std::vector random(n); //NOLINT caffe_rng_uniform(n, &random[0]); - greentea_gpu_memcpy(sizeof(unsigned int) * n, &random[0], r, offr, ctx); + greentea_gpu_memcpy(sizeof(unsigned int) * n, &random[0], r, offr, &ctx); } template void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, const Dtype b, cl_mem r, const int offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - std::vector random(n); + std::vector random(n); // NOLINT caffe_rng_uniform(n, a, b, &random[0]); - greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, ctx); + greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, &ctx); } template void greentea_gpu_rng_uniform(const int ctx_id, const int n, @@ -909,9 +914,9 @@ template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, const Dtype sigma, cl_mem r, const int offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - std::vector random(n); + std::vector random(n); // NOLINT caffe_rng_gaussian(n, mu, sigma, &random[0]); - greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, ctx); + greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, &ctx); } template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, @@ -924,5 +929,5 @@ template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const double sigma, cl_mem r, const int offr); -} +} // namespace caffe #endif diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index c8999653df4..84dd698e479 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -31,7 +31,8 @@ shared_ptr > GetConvolutionLayer(const LayerParameter& param) { engine = ConvolutionParameter_Engine_CUDNN; #endif } - if (engine == ConvolutionParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + if (engine == ConvolutionParameter_Engine_CAFFE + || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new ConvolutionLayer(param)); #ifdef USE_CUDNN } else if (engine == ConvolutionParameter_Engine_CUDNN) { @@ -54,7 +55,8 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { engine = PoolingParameter_Engine_CUDNN; #endif } - if (engine == PoolingParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + if (engine == PoolingParameter_Engine_CAFFE + || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new PoolingLayer(param)); #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { @@ -84,7 +86,8 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { engine = ReLUParameter_Engine_CUDNN; #endif } - if (engine == ReLUParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + if (engine == ReLUParameter_Engine_CAFFE + || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new ReLULayer(param)); #ifdef USE_CUDNN } else if (engine == ReLUParameter_Engine_CUDNN) { @@ -107,7 +110,8 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { engine = SigmoidParameter_Engine_CUDNN; #endif } - if (engine == SigmoidParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + if (engine == SigmoidParameter_Engine_CAFFE + || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new SigmoidLayer(param)); #ifdef USE_CUDNN } else if (engine == SigmoidParameter_Engine_CUDNN) { @@ -130,7 +134,8 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { engine = SoftmaxParameter_Engine_CUDNN; #endif } - if (engine == SoftmaxParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + if (engine == SoftmaxParameter_Engine_CAFFE + || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new SoftmaxLayer(param)); #ifdef USE_CUDNN } else if (engine == SoftmaxParameter_Engine_CUDNN) { @@ -153,7 +158,8 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { engine = TanHParameter_Engine_CUDNN; #endif } - if (engine == TanHParameter_Engine_CAFFE || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + if (engine == TanHParameter_Engine_CAFFE + || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { return shared_ptr >(new TanHLayer(param)); #ifdef USE_CUDNN } else if (engine == TanHParameter_Engine_CUDNN) { diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu index 253760139f3..87de89b1009 100644 --- a/src/caffe/layers/absval_layer.cu +++ b/src/caffe/layers/absval_layer.cu @@ -19,13 +19,13 @@ void AbsValLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_abs(this->device_context_.id(), count, - (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (top_data), - 0); -#endif // USE_GREENTEA + (cl_mem) (bottom[0]->gpu_data()), 0, + (cl_mem) (top_data), 0); +#endif // USE_GREENTEA } } @@ -34,7 +34,7 @@ void AbsValLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const int count = top[0]->count(); - //const Dtype* top_data = top[0]->gpu_data(); + // const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); @@ -44,16 +44,17 @@ void AbsValLayer::Backward_gpu(const vector*>& top, #ifdef USE_CUDA caffe_gpu_sign(count, bottom_data, bottom_diff); caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sign(this->device_context_.id(), count, (cl_mem) bottom_data, - 0, (cl_mem) bottom_diff, 0); - greentea_gpu_mul(this->device_context_.id(), count, (cl_mem) bottom_diff, - 0, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); -#endif // USE_GREENTEA + greentea_gpu_sign(this->device_context_.id(), count, + (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, + 0); + greentea_gpu_mul(this->device_context_.id(), count, + (cl_mem) bottom_diff, 0, (cl_mem) top_diff, 0, + (cl_mem) bottom_diff, 0); +#endif // USE_GREENTEA } - } } diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index affa8d2579c..fb7d1895af3 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -8,8 +8,8 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { @@ -89,14 +89,15 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Initialize and fill the weights: // output channels x input channels per-group x kernel height x kernel width this->blobs_[0].reset(new Blob( - conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_,this->device_context_)); + conv_out_channels_, conv_in_channels_ / group_, + kernel_h_, kernel_w_, this->device_context_)); shared_ptr > weight_filler(GetFiller( this->layer_param_.convolution_param().weight_filler())); weight_filler->Fill(this->blobs_[0].get()); // If necessary, initialize and fill the biases. if (bias_term_) { vector bias_shape(1, num_output_); - this->blobs_[1].reset(new Blob(bias_shape,this->device_context_)); + this->blobs_[1].reset(new Blob(bias_shape, this->device_context_)); shared_ptr > bias_filler(GetFiller( this->layer_param_.convolution_param().bias_filler())); bias_filler->Fill(this->blobs_[1].get()); @@ -262,12 +263,13 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 0., output + output_off + output_offset_ * g); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA if (!is_1x1_) { if (!skip_im2col) { - greentea_conv_im2col_gpu(input, input_off, col_buffer_.mutable_gpu_data(),0); + greentea_conv_im2col_gpu(input, input_off, + col_buffer_.mutable_gpu_data(), 0); } col_buff = col_buffer_.gpu_data(); } @@ -275,13 +277,13 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype) 1., (cl_mem)weights, weight_offset_ * g, - (cl_mem)col_buff, + (Dtype) 1., (cl_mem) weights, weight_offset_ * g, + (cl_mem) col_buff, (is_1x1_ ? input_off : 0) + col_offset_ * g, - (Dtype) 0., (cl_mem)output, + (Dtype) 0., (cl_mem) output, output_off + output_offset_ * g); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -295,7 +297,7 @@ void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, height_out_ * width_out_, 1, (Dtype) 1., bias, bias_multiplier_.gpu_data(), (Dtype) 1., output + output_off); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, @@ -304,7 +306,7 @@ void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, (cl_mem) bias, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) output, output_off); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -330,7 +332,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, if (!is_1x1_) { conv_col2im_gpu(col_buff, input + input_off); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA for (int g = 0; g < group_; ++g) { @@ -346,7 +348,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, if (!is_1x1_) { greentea_conv_col2im_gpu(col_buff, 0, input, input_off); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -364,29 +366,33 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, col_buff = col_buffer_.gpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasTrans, - conv_out_channels_ / group_, kernel_dim_ / group_, - conv_out_spatial_dim_, (Dtype) 1., - output + output_off + output_offset_ * g, - col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., - weights + weight_offset_ * g); + caffe_gpu_gemm( + CblasNoTrans, CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype) 1., + output + output_off + output_offset_ * g, + col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., + weights + weight_offset_ * g); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA if (!is_1x1_) { - greentea_conv_im2col_gpu(input, input_off, col_buffer_.mutable_gpu_data(),0); + greentea_conv_im2col_gpu(input, input_off, col_buffer_.mutable_gpu_data(), + 0); col_buff = col_buffer_.gpu_data(); } for (int g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype) 1., (cl_mem) output, output_off + output_offset_ * g, - (cl_mem) col_buff, (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., - (cl_mem) weights, weight_offset_ * g); + (Dtype) 1., (cl_mem) output, + output_off + output_offset_ * g, + (cl_mem) col_buff, + (is_1x1_ ? input_off : 0) + col_offset_ * g, + (Dtype) 1., (cl_mem) weights, + weight_offset_ * g); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -399,7 +405,7 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., input + input_off, bias_multiplier_.gpu_data(), 1., bias); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, @@ -407,7 +413,7 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, (cl_mem) input, input_off, (cl_mem) (bias_multiplier_.gpu_data()), 0, 1., (cl_mem) bias, 0); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 859cd75d52e..441dbf10ecb 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -21,7 +21,8 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, output_labels_ = true; } data_transformer_.reset( - new DataTransformer(transform_param_, this->phase_, this->device_context_)); + new DataTransformer(transform_param_, + this->phase_, this->device_context_)); data_transformer_->InitRand(); // The subclasses should setup the size of bottom and top DataLayerSetUp(bottom, top); diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 801e38ce92e..c93567cfe4f 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -24,7 +24,7 @@ void BasePrefetchingDataLayer::Forward_gpu( caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_gpu_data()); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -33,18 +33,19 @@ void BasePrefetchingDataLayer::Forward_gpu( // Reshape to loaded data. top[0]->ReshapeLike(this->prefetch_data_); // Copy the data - greentea_copy(prefetch_data_.count(), (cl_mem)(prefetch_data_.gpu_data()),0, - (cl_mem)(top[0]->mutable_gpu_data()),0, ctx); + greentea_copy(prefetch_data_.count(), + (cl_mem) (prefetch_data_.gpu_data()), 0, + (cl_mem) (top[0]->mutable_gpu_data()), 0, &ctx); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(prefetch_label_); // Copy the labels. - greentea_copy(prefetch_label_.count(), (cl_mem)(prefetch_label_.gpu_data()),0, - (cl_mem)(top[1]->mutable_gpu_data()),0, ctx); + greentea_copy(prefetch_label_.count(), + (cl_mem) (prefetch_label_.gpu_data()), 0, + (cl_mem) (top[1]->mutable_gpu_data()), 0, &ctx); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - // Start a new prefetch thread CreatePrefetchThread(); } diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index 07954b63f98..30384811ed5 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -16,14 +16,13 @@ const float kBNLL_THRESHOLD = 50.; #ifdef USE_CUDA template __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } -#endif // USE_CUDA +#endif // USE_CUDA template void BNLLLayer::Forward_gpu(const vector*>& bottom, @@ -35,11 +34,11 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - BNLLForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + BNLLForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -50,10 +49,10 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_bnll = program.get_kernel( CL_KERNEL_SELECT("bnll_forward")); viennacl::ocl::enqueue( - oclk_bnll(count, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) top_data, ctx)), + oclk_bnll(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -61,13 +60,12 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, template __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } -#endif // USE_CUDA +#endif // USE_CUDA template void BNLLLayer::Backward_gpu(const vector*>& top, @@ -82,11 +80,11 @@ void BNLLLayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - BNLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + BNLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, bottom_data, bottom_diff); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -97,11 +95,11 @@ void BNLLLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_bnll = program.get_kernel( CL_KERNEL_SELECT("bnll_backward")); viennacl::ocl::enqueue( - oclk_bnll(count, WrapHandle((cl_mem) top_diff, ctx), - WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) bottom_diff, ctx)), + oclk_bnll(count, WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 06b58a408c2..5d97c854db6 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -18,8 +18,7 @@ __global__ void Concat(const int nthreads, const Dtype* in_data, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; @@ -32,7 +31,7 @@ __global__ void Concat(const int nthreads, const Dtype* in_data, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void ConcatLayer::Forward_gpu(const vector*>& bottom, @@ -50,10 +49,11 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), + CAFFE_CUDA_NUM_THREADS)( nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -65,15 +65,14 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_concat = program.get_kernel( CL_KERNEL_SELECT("concat")); viennacl::ocl::enqueue( - oclk_concat(nthreads, WrapHandle((cl_mem) bottom_data, ctx), + oclk_concat(nthreads, WrapHandle((cl_mem) bottom_data, &ctx), kForward ? 1 : 0, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, - WrapHandle((cl_mem) top_data, ctx)), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } offset_concat_axis += bottom_concat_axis; - } } @@ -97,10 +96,11 @@ void ConcatLayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), + CAFFE_CUDA_NUM_THREADS)( nthreads, top_diff, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -112,12 +112,12 @@ void ConcatLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_concat = program.get_kernel( CL_KERNEL_SELECT("concat")); viennacl::ocl::enqueue( - oclk_concat(nthreads, WrapHandle((cl_mem) top_diff, ctx), + oclk_concat(nthreads, WrapHandle((cl_mem) top_diff, &ctx), kForward ? 1 : 0, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, - WrapHandle((cl_mem) bottom_diff, ctx)), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } offset_concat_axis += bottom_concat_axis; diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 4047c064b76..6367aa3cb5d 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -30,23 +30,25 @@ void ContrastiveLossLayer::Forward_gpu( diff_sq_.gpu_data(), // (a_i-b_i)^2 summer_vec_.gpu_data(), Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_sub(this->device_context_.id(), count, - (cl_mem) (bottom[0]->gpu_data()), 0, // a - (cl_mem) (bottom[1]->gpu_data()), 0, // b - (cl_mem) (diff_.mutable_gpu_data()), 0); // a_i-b_i + (cl_mem) (bottom[0]->gpu_data()), 0, + (cl_mem) (bottom[1]->gpu_data()), 0, + (cl_mem) (diff_.mutable_gpu_data()), 0); greentea_gpu_powx(this->device_context_.id(), count, - (cl_mem) (diff_.mutable_gpu_data()), 0, // a_i-b_i - Dtype(2), (cl_mem) (diff_sq_.mutable_gpu_data()), 0); // (a_i-b_i)^2 + (cl_mem) (diff_.mutable_gpu_data()), + 0, // a_i-b_i + Dtype(2), (cl_mem) (diff_sq_.mutable_gpu_data()), + 0); // (a_i-b_i)^2 greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, - bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), - (cl_mem) (diff_sq_.gpu_data()), - 0, // (a_i-b_i)^2 - (cl_mem) (summer_vec_.gpu_data()), 0, Dtype(0.0), - (cl_mem) (dist_sq_.mutable_gpu_data()), 0); // \Sum (a_i-b_i)^2 -#endif // USE_GREENTEA + bottom[0]->num(), bottom[0]->channels(), + Dtype(1.0), (cl_mem) (diff_sq_.gpu_data()), + 0, // (a_i-b_i)^2 + (cl_mem) (summer_vec_.gpu_data()), 0, Dtype(0.0), + (cl_mem) (dist_sq_.mutable_gpu_data()), 0); +#endif // USE_GREENTEA } Dtype margin = this->layer_param_.contrastive_loss_param().margin(); @@ -60,7 +62,7 @@ void ContrastiveLossLayer::Forward_gpu( if (legacy_version) { loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); } else { - Dtype dist = std::max(margin - (Dtype)sqrt(dist_sq_.cpu_data()[i]), + Dtype dist = std::max(margin - (Dtype) sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0)); loss += dist * dist; } @@ -77,8 +79,7 @@ __global__ void CLLBackward(const int count, const int channels, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { - CUDA_KERNEL_LOOP(i, count) - { + CUDA_KERNEL_LOOP(i, count) { int n = i / channels; // the num index, to access y and dist_sq if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; @@ -101,7 +102,7 @@ __global__ void CLLBackward(const int count, const int channels, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void ContrastiveLossLayer::Backward_gpu( @@ -121,15 +122,15 @@ void ContrastiveLossLayer::Backward_gpu( if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - CLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + CLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 - diff_.gpu_data(),// the cached eltwise difference between a and b - dist_sq_.gpu_data(),// the cached square distance between a and b + diff_.gpu_data(), // the cached eltwise difference between a and b + dist_sq_.gpu_data(), // the cached square distance between a and b bottom[i]->mutable_gpu_diff()); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -140,16 +141,16 @@ void ContrastiveLossLayer::Backward_gpu( viennacl::ocl::kernel &oclk_cll = program.get_kernel( CL_KERNEL_SELECT("cll_backward")); viennacl::ocl::enqueue( - oclk_cll(count, channels, margin, legacy_version ? 1 : 0, alpha, - WrapHandle((cl_mem) (bottom[2]->gpu_data()), ctx), - WrapHandle((cl_mem) (diff_.gpu_data()), ctx), - WrapHandle((cl_mem) (dist_sq_.gpu_data()), ctx), - WrapHandle((cl_mem) (bottom[i]->mutable_gpu_diff()), ctx)), + oclk_cll( + count, channels, margin, legacy_version ? 1 : 0, alpha, + WrapHandle((cl_mem) (bottom[2]->gpu_data()), &ctx), + WrapHandle((cl_mem) (diff_.gpu_data()), &ctx), + WrapHandle((cl_mem) (dist_sq_.gpu_data()), &ctx), + WrapHandle((cl_mem) (bottom[i]->mutable_gpu_diff()), &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } } } diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 40f2fa2ec4f..156cdb43ca0 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -8,8 +8,8 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index 242fc520850..da6d0614efa 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -21,7 +21,8 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; CHECK( - (!conv_param.has_pad() && conv_param.has_pad_h() && conv_param.has_pad_w()) + (!conv_param.has_pad() && conv_param.has_pad_h() + && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK( @@ -83,12 +84,13 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; // TODO: Change this - if(kstride_h_ != 23 || this->device_context_.backend() == BACKEND_CUDA) { + if (kstride_h_ != 23 || this->device_context_.backend() == BACKEND_CUDA) { col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, - width_out); + width_out); } // Set the parameters - CHECK_EQ(num_output_ % group_, 0)<< "Number of output should be multiples of group."; + CHECK_EQ(num_output_ % group_, 0) + << "Number of output should be multiples of group."; bias_term_ = this->layer_param_.convolution_param().bias_term(); // Figure out the dimensions for individual gemms. M_ = num_output_ / group_; @@ -99,8 +101,6 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, } // Check if we need to set up the weights if (this->blobs_.size() > 0) { - // (FTschopp) Silence this output: - //LOG(INFO)<< "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index a94a2776d8a..2834703bb62 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -8,8 +8,8 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { @@ -17,7 +17,6 @@ namespace caffe { template void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // CUDA backend code @@ -61,7 +60,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, this->device_context_.id()); for (int i = 0; i < bottom.size(); ++i) { - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); @@ -72,9 +70,8 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { - // First, im2col - greentea_im2col_sk_gpu(program, ctx, bottom_data, + greentea_im2col_sk_gpu(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, @@ -231,7 +228,7 @@ void ConvolutionSKLayer::Backward_gpu( for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. - greentea_im2col_sk_gpu(program, ctx, bottom_data, + greentea_im2col_sk_gpu(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, @@ -258,7 +255,7 @@ void ConvolutionSKLayer::Backward_gpu( (Dtype) 0., col_diff, col_offset * g); } // col2im back to the data - greentea_col2im_sk_gpu(program, ctx, col_diff, channels_, + greentea_col2im_sk_gpu(&program, &ctx, col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, bottom_diff, diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 47b7e8982e6..a4612963b6b 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -6,12 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#include "caffe/greentea/greentea_im2col.hpp" -#endif - namespace caffe { template diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 14cf4e2f33b..f3eb0ec34f8 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -8,22 +8,22 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { -template +template void DeconvolutionLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_gemm(bottom_data, bottom[i]->offset(n), weight, - top_data, top[i]->offset(n)); + top_data, top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); this->forward_gpu_bias(top_data, top[i]->offset(n), bias); @@ -32,9 +32,10 @@ void DeconvolutionLayer::Forward_gpu(const vector*>& bottom, } } -template -void DeconvolutionLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { +template +void DeconvolutionLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { @@ -52,13 +53,13 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(top_diff, top[i]->offset(n), - bottom_data, bottom[i]->offset(n), weight_diff); + this->weight_gpu_gemm(top_diff, top[i]->offset(n), bottom_data, + bottom[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->forward_gpu_gemm(top_diff, top[i]->offset(n), weight, - bottom_diff, bottom[i]->offset(n)); + bottom_diff, bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index a66870fddf6..8b130c7c4e9 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -16,12 +16,11 @@ __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } -#endif // USE_CUDA +#endif // USE_CUDA template void DropoutLayer::Forward_gpu(const vector*>& bottom, @@ -38,14 +37,14 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) - DropoutForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + DropoutForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, mask, uint_thres_, scale_, top_data); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -59,16 +58,16 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_dropout = program.get_kernel( CL_KERNEL_SELECT("dropout_forward")); viennacl::ocl::enqueue( - oclk_dropout(count, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle(mask, ctx), uint_thres_, scale_, - WrapHandle((cl_mem) top_data, ctx)), + oclk_dropout(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle(mask, &ctx), uint_thres_, scale_, + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); } else { - greentea_copy(count, (cl_mem) bottom_data,0, (cl_mem) top_data,0, ctx); + greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0, + &ctx); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } #ifdef USE_CUDA @@ -77,12 +76,11 @@ __global__ void DropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } -#endif // USE_CUDA +#endif // USE_CUDA template void DropoutLayer::Backward_gpu(const vector*>& top, @@ -102,12 +100,11 @@ void DropoutLayer::Backward_gpu(const vector*>& top, DropoutBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, mask, uint_thres_, scale_, bottom_diff); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -121,14 +118,15 @@ void DropoutLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_dropout = program.get_kernel( CL_KERNEL_SELECT("dropout_backward")); viennacl::ocl::enqueue( - oclk_dropout(count, WrapHandle((cl_mem) top_diff, ctx), - WrapHandle(mask, ctx), uint_thres_, scale_, - WrapHandle((cl_mem) bottom_diff, ctx)), + oclk_dropout(count, WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle(mask, &ctx), uint_thres_, scale_, + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); } else { - greentea_copy(top[0]->count(), (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, ctx); + greentea_copy(top[0]->count(), (cl_mem) top_diff, 0, + (cl_mem) bottom_diff, 0, &ctx); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 6ef4d292bd1..3e8d46c24ad 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -17,8 +17,7 @@ template __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { @@ -37,7 +36,7 @@ __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void EltwiseLayer::Forward_gpu(const vector*>& bottom, @@ -66,11 +65,14 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) - MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); + MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), + 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) - MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; @@ -78,7 +80,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, LOG(FATAL)<< "Unknown elementwise operation."; } } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -88,17 +90,26 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { - greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)(bottom[0]->gpu_data()),0, (cl_mem)(bottom[1]->gpu_data()),0, - (cl_mem)top_data,0); + greentea_gpu_mul(this->device_context_.id(), + count, (cl_mem)(bottom[0]->gpu_data()), 0, + (cl_mem)(bottom[1]->gpu_data()), 0, + (cl_mem)top_data, 0); for (int i = 2; i < bottom.size(); ++i) { - greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)top_data,0, (cl_mem)(bottom[i]->gpu_data()),0, (cl_mem)top_data,0); + greentea_gpu_mul(this->device_context_.id(), + count, (cl_mem)top_data, 0, + (cl_mem)(bottom[i]->gpu_data()), 0, + (cl_mem)top_data, 0); } } break; case EltwiseParameter_EltwiseOp_SUM: { - greentea_gpu_set(this->device_context_.id(), count, 0, (cl_mem)top_data, 0); + greentea_gpu_set(this->device_context_.id(), count, 0, + (cl_mem)top_data, 0); for (int i = 0; i < bottom.size(); ++i) { - greentea_gpu_axpy(this->device_context_.id(), count, coeffs_[i], (cl_mem)(bottom[i]->gpu_data()),0, (cl_mem)top_data, 0); + greentea_gpu_axpy(this->device_context_.id(), + count, coeffs_[i], + (cl_mem)(bottom[i]->gpu_data()), + 0, (cl_mem)top_data, 0); } } break; @@ -109,12 +120,19 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("eltwise_max_forward")); viennacl::ocl::enqueue( - oclk_max_forward(count, WrapHandle((cl_mem)(bottom[0]->gpu_data()),ctx), WrapHandle((cl_mem)(bottom[1]->gpu_data()),ctx), 0, WrapHandle((cl_mem)top_data,ctx), WrapHandle((cl_mem)mask,ctx)), + oclk_max_forward(count, + WrapHandle((cl_mem)(bottom[0]->gpu_data()), &ctx), + WrapHandle((cl_mem)(bottom[1]->gpu_data()), &ctx), 0, + WrapHandle((cl_mem)top_data, &ctx), + WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); for (int i = 2; i < bottom.size(); ++i) { viennacl::ocl::enqueue( - oclk_max_forward(count, WrapHandle((cl_mem)(top_data),ctx), WrapHandle((cl_mem)(bottom[i]->gpu_data()),ctx), i-1, WrapHandle((cl_mem)top_data,ctx), WrapHandle((cl_mem)mask,ctx)), + oclk_max_forward(count, WrapHandle((cl_mem)(top_data), &ctx), + WrapHandle((cl_mem)(bottom[i]->gpu_data()), &ctx), i-1, + WrapHandle((cl_mem)top_data, &ctx), + WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); } } @@ -123,7 +141,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, LOG(FATAL)<< "Unknown elementwise operation."; } } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -132,8 +150,7 @@ template __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; @@ -141,7 +158,7 @@ __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, bottom_diff[index] = gradient; } } -#endif // USE_CUDA +#endif // USE_CUDA template void EltwiseLayer::Backward_gpu(const vector*>& top, @@ -198,7 +215,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, } } } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -219,24 +236,35 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, continue; } if (!initialized) { - greentea_copy(count, (cl_mem)(bottom[j]->gpu_data()),0, (cl_mem)(bottom_diff), 0, ctx); + greentea_copy(count, + (cl_mem)(bottom[j]->gpu_data()), 0, + (cl_mem)(bottom_diff), 0, &ctx); initialized = true; } else { - greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)bottom[j]->gpu_data(),0, (cl_mem)bottom_diff,0, - (cl_mem)bottom_diff,0); + greentea_gpu_mul(this->device_context_.id(), count, + (cl_mem)bottom[j]->gpu_data(), 0, + (cl_mem)bottom_diff, 0, + (cl_mem)bottom_diff, 0); } } } else { - greentea_gpu_div(this->device_context_.id(), count, (cl_mem)top_data,0, (cl_mem)bottom_data,0, (cl_mem)bottom_diff,0); + greentea_gpu_div(this->device_context_.id(), + count, (cl_mem)top_data, 0, + (cl_mem)bottom_data, 0, (cl_mem)bottom_diff, 0); } - greentea_gpu_mul(this->device_context_.id(), count, (cl_mem)bottom_diff,0, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0); + greentea_gpu_mul(this->device_context_.id(), + count, (cl_mem)bottom_diff, 0, + (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0); } break; case EltwiseParameter_EltwiseOp_SUM: { if (coeffs_[i] == Dtype(1.)) { - greentea_copy(count, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0,ctx); + greentea_copy(count, (cl_mem)top_diff, + 0, (cl_mem)bottom_diff, 0, &ctx); } else { - greentea_gpu_scale(this->device_context_.id(), count, coeffs_[i],(cl_mem)top_diff,0, (cl_mem)bottom_diff,0); + greentea_gpu_scale(this->device_context_.id(), + count, coeffs_[i], (cl_mem)top_diff, + 0, (cl_mem)bottom_diff, 0); } } break; @@ -247,7 +275,9 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, CL_KERNEL_SELECT("eltwise_max_backward")); viennacl::ocl::enqueue( - oclk_max_backward(count, WrapHandle((cl_mem)top_diff,ctx),i, WrapHandle((cl_mem)mask,ctx), WrapHandle((cl_mem)bottom_diff,ctx)), + oclk_max_backward(count, WrapHandle((cl_mem)top_diff, &ctx), i, + WrapHandle((cl_mem)mask, &ctx), + WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); } break; diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index c7129e9041b..d6ebf24d3dd 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -25,7 +25,7 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); Dtype loss = dot / bottom[0]->num() / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_sub(this->device_context_.id(), count, @@ -38,7 +38,7 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (diff_.gpu_data()), 0, &dot); Dtype loss = dot / bottom[0]->num() / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -57,13 +57,13 @@ void EuclideanLossLayer::Backward_gpu( diff_.gpu_data(), // a Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // b -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_axpby(this->device_context_.id(), bottom[i]->count(), alpha, (cl_mem) (diff_.gpu_data()), 0, Dtype(0), (cl_mem) (bottom[i]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/exp_layer.cu b/src/caffe/layers/exp_layer.cu index 4c4974b67cb..992a0edccc8 100644 --- a/src/caffe/layers/exp_layer.cu +++ b/src/caffe/layers/exp_layer.cu @@ -25,7 +25,7 @@ void ExpLayer::Forward_gpu(const vector*>& bottom, if (outer_scale_ != Dtype(1)) { caffe_gpu_scal(count, outer_scale_, top_data); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA if (inner_scale_ == Dtype(1)) { @@ -41,7 +41,7 @@ void ExpLayer::Forward_gpu(const vector*>& bottom, greentea_gpu_scal(this->device_context_.id(), count, outer_scale_, (cl_mem) top_data, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -63,7 +63,7 @@ void ExpLayer::Backward_gpu(const vector*>& top, if (inner_scale_ != Dtype(1)) { caffe_gpu_scal(count, inner_scale_, bottom_diff); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_mul(this->device_context_.id(), count, @@ -73,7 +73,7 @@ void ExpLayer::Backward_gpu(const vector*>& top, greentea_gpu_scal(this->device_context_.id(), count, inner_scale_, (cl_mem) bottom_diff, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/filter_layer.cu b/src/caffe/layers/filter_layer.cu index e87f8d288db..b6ec642cfa9 100644 --- a/src/caffe/layers/filter_layer.cu +++ b/src/caffe/layers/filter_layer.cu @@ -23,17 +23,16 @@ void FilterLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA caffe_copy(dim, bottom_data + data_offset_bottom, top_data + data_offset_top); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_.id()); - greentea_copy(dim, (cl_mem) bottom_data, - data_offset_bottom, (cl_mem) top_data, data_offset_top, ctx); -#endif // USE_GREENTEA + greentea_copy(dim, (cl_mem) bottom_data, data_offset_bottom, + (cl_mem) top_data, data_offset_top, &ctx); +#endif // USE_GREENTEA } - } } } @@ -73,17 +72,17 @@ void FilterLayer::Backward_gpu(const vector*>& top, bottom[i]->mutable_gpu_diff() + data_offset_bottom); } else { // this data was been forwarded data_offset_top = next_to_backward_offset * dim; - ++next_to_backward_offset;// point to next forwarded item index + ++next_to_backward_offset; // point to next forwarded item index caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top, bottom[i]->mutable_gpu_diff() + data_offset_bottom); } } } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_.id()); for (int n = 0; n < bottom[i]->shape(0); ++n) { if (next_to_backward_offset >= indices_to_forward_.size()) { @@ -100,15 +99,16 @@ void FilterLayer::Backward_gpu(const vector*>& top, (cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom); } else { // this data was been forwarded data_offset_top = next_to_backward_offset * dim; - ++next_to_backward_offset;// point to next forwarded item index - greentea_copy(dim, (cl_mem)(top[i]->mutable_gpu_diff()), data_offset_top, - (cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom, ctx); + ++next_to_backward_offset; // point to next forwarded item index + greentea_copy(dim, (cl_mem)(top[i]->mutable_gpu_diff()), + data_offset_top, + (cl_mem)(bottom[i]->mutable_gpu_diff()), + data_offset_bottom, &ctx); } } } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } } } diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 3f7843ecade..376a5b1700f 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -19,7 +19,6 @@ namespace caffe { template void HDF5DataLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); @@ -52,11 +51,11 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, &top[j]->mutable_gpu_data()[i * data_dim]); } } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA Forward_cpu(bottom, top); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index de7cb14ca56..f772a023e0e 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -32,11 +32,11 @@ void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, &label_blob_.mutable_cpu_data()[i * label_datum_dim]); } SaveBlobs(); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA Forward_cpu(bottom, top); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index c842855401a..25cf17508fa 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -7,8 +7,8 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { @@ -26,7 +26,7 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, top_data + top[0]->offset(n)); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -35,13 +35,14 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, this->device_context_.id()); for (int n = 0; n < bottom[0]->num(); ++n) { - greentea_im2col_gpu(program, ctx, (cl_mem)bottom_data, bottom[0]->offset(n), channels_, height_, width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, - (cl_mem)top_data, top[0]->offset(n)); + greentea_im2col_gpu(&program, &ctx, (cl_mem) bottom_data, + bottom[0]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, (cl_mem) top_data, + top[0]->offset(n)); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } template @@ -58,7 +59,7 @@ void Im2colLayer::Backward_gpu(const vector*>& top, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -67,14 +68,14 @@ void Im2colLayer::Backward_gpu(const vector*>& top, this->device_context_.id()); for (int n = 0; n < top[0]->num(); ++n) { - greentea_col2im_gpu(program, ctx, (cl_mem) top_diff, top[0]->offset(n), - channels_, height_, width_, kernel_h_, kernel_w_, - pad_h_, pad_w_, stride_h_, stride_w_, - (cl_mem) bottom_diff, bottom[0]->offset(n)); + greentea_col2im_gpu(&program, &ctx, (cl_mem) top_diff, + top[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, (cl_mem) bottom_diff, + bottom[0]->offset(n)); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } INSTANTIATE_LAYER_GPU_FUNCS(Im2colLayer); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 0155516bf07..f9bf6c32997 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -9,9 +9,9 @@ namespace caffe { -template +template void InnerProductLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const int num_output = this->layer_param_.inner_product_param().num_output(); bias_term_ = this->layer_param_.inner_product_param().bias_term(); N_ = num_output; @@ -23,7 +23,7 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, K_ = bottom[0]->count(axis); // Check if we need to set up the weights if (this->blobs_.size() > 0) { - LOG(INFO) << "Skipping parameter initialization"; + LOG(INFO)<< "Skipping parameter initialization"; } else { if (bias_term_) { this->blobs_.resize(2); @@ -34,26 +34,27 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, vector weight_shape(2); weight_shape[0] = N_; weight_shape[1] = K_; - this->blobs_[0].reset(new Blob(weight_shape,this->device_context_)); + this->blobs_[0].reset(new Blob(weight_shape, + this->device_context_)); // fill the weights shared_ptr > weight_filler(GetFiller( - this->layer_param_.inner_product_param().weight_filler())); + this->layer_param_.inner_product_param().weight_filler())); weight_filler->Fill(this->blobs_[0].get()); // If necessary, intiialize and fill the bias term if (bias_term_) { vector bias_shape(1, N_); - this->blobs_[1].reset(new Blob(bias_shape,this->device_context_)); + this->blobs_[1].reset(new Blob(bias_shape, this->device_context_)); shared_ptr > bias_filler(GetFiller( - this->layer_param_.inner_product_param().bias_filler())); + this->layer_param_.inner_product_param().bias_filler())); bias_filler->Fill(this->blobs_[1].get()); } } // parameter initialization this->param_propagate_down_.resize(this->blobs_.size(), true); } -template +template void InnerProductLayer::Reshape(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // Figure out the dimensions const int axis = bottom[0]->CanonicalAxisIndex( this->layer_param_.inner_product_param().axis()); @@ -77,45 +78,46 @@ void InnerProductLayer::Reshape(const vector*>& bottom, } } -template +template void InnerProductLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const Dtype* weight = this->blobs_[0]->cpu_data(); - caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., - bottom_data, weight, (Dtype)0., top_data); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., + bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) { - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., - bias_multiplier_.cpu_data(), - this->blobs_[1]->cpu_data(), (Dtype)1., top_data); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., + bias_multiplier_.cpu_data(), + this->blobs_[1]->cpu_data(), (Dtype) 1., top_data); } } -template -void InnerProductLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, +template +void InnerProductLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight - caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); + caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., + top_diff, bottom_data, (Dtype) 1., + this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias - caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)1., - this->blobs_[1]->mutable_cpu_diff()); + caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype) 1., top_diff, + bias_multiplier_.cpu_data(), (Dtype) 1., + this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bottom data - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., - top_diff, this->blobs_[0]->cpu_data(), (Dtype)0., - bottom[0]->mutable_cpu_diff()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., + top_diff, this->blobs_[0]->cpu_data(), (Dtype) 0., + bottom[0]->mutable_cpu_diff()); } } diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index 0a0ddd0ae57..39df634702b 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -25,7 +25,7 @@ void InnerProductLayer::Forward_gpu(const vector*>& bottom, bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } -#endif // USE CUDA +#endif // USE CUDA } else { #ifdef USE_GREENTEA greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, @@ -39,7 +39,7 @@ void InnerProductLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (this->blobs_[1]->gpu_data()), 0, (Dtype) 1., (cl_mem) top_data, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -72,7 +72,7 @@ void InnerProductLayer::Backward_gpu( top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA if (this->param_propagate_down_[0]) { @@ -106,7 +106,7 @@ void InnerProductLayer::Backward_gpu( (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/log_layer.cu b/src/caffe/layers/log_layer.cu index 3c998781af1..cd194847e4c 100644 --- a/src/caffe/layers/log_layer.cu +++ b/src/caffe/layers/log_layer.cu @@ -31,7 +31,7 @@ void LogLayer::Forward_gpu(const vector*>& bottom, if (base_scale_ != Dtype(1)) { caffe_gpu_scal(count, base_scale_, top_data); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -41,7 +41,8 @@ void LogLayer::Forward_gpu(const vector*>& bottom, greentea_gpu_log(this->device_context_.id(), count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); } else { - greentea_copy(count, (cl_mem) bottom_data,0, (cl_mem) top_data,0, ctx); + greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0, + &ctx); if (input_scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, input_scale_, (cl_mem) top_data, 0); @@ -57,9 +58,8 @@ void LogLayer::Forward_gpu(const vector*>& bottom, greentea_gpu_scal(this->device_context_.id(), count, base_scale_, (cl_mem) top_data, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } template @@ -88,14 +88,14 @@ void LogLayer::Backward_gpu(const vector*>& top, caffe_gpu_scal(count, backward_num_scale_, bottom_diff); } caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); - greentea_copy(count, (cl_mem) bottom_data,0, (cl_mem) bottom_diff,0, - ctx); + greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, + 0, &ctx); if (input_scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, input_scale_, (cl_mem) bottom_diff, 0); @@ -114,9 +114,8 @@ void LogLayer::Backward_gpu(const vector*>& top, greentea_gpu_mul(this->device_context_.id(), count, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, (cl_mem) bottom_diff, 0); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } INSTANTIATE_LAYER_GPU_FUNCS(LogLayer); diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 2c1b5f07fa1..cb6961cebac 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -6,17 +6,17 @@ namespace caffe { -template +template void LRNLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { size_ = this->layer_param_.lrn_param().local_size(); - CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size"; + CHECK_EQ(size_ % 2, 1)<< "LRN only supports odd values for local_size"; pre_pad_ = (size_ - 1) / 2; alpha_ = this->layer_param_.lrn_param().alpha(); beta_ = this->layer_param_.lrn_param().beta(); k_ = this->layer_param_.lrn_param().k(); - if (this->layer_param_.lrn_param().norm_region() == - LRNParameter_NormRegion_WITHIN_CHANNEL) { + if (this->layer_param_.lrn_param().norm_region() + == LRNParameter_NormRegion_WITHIN_CHANNEL) { // Set up split_layer_ to use inputs in the numerator and denominator. split_top_vec_.clear(); split_top_vec_.push_back(&product_input_); @@ -66,21 +66,21 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, } } -template +template void LRNLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes())<< "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); switch (this->layer_param_.lrn_param().norm_region()) { - case LRNParameter_NormRegion_ACROSS_CHANNELS: + case LRNParameter_NormRegion_ACROSS_CHANNELS: top[0]->Reshape(num_, channels_, height_, width_); scale_.Reshape(num_, channels_, height_, width_); break; - case LRNParameter_NormRegion_WITHIN_CHANNEL: + case LRNParameter_NormRegion_WITHIN_CHANNEL: split_layer_->Reshape(bottom, split_top_vec_); square_layer_->Reshape(square_bottom_vec_, square_top_vec_); pool_layer_->Reshape(square_top_vec_, pool_top_vec_); @@ -90,22 +90,22 @@ void LRNLayer::Reshape(const vector*>& bottom, } } -template +template void LRNLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { - case LRNParameter_NormRegion_ACROSS_CHANNELS: - CrossChannelForward_cpu(bottom, top); - break; - case LRNParameter_NormRegion_WITHIN_CHANNEL: - WithinChannelForward(bottom, top); - break; - default: - LOG(FATAL) << "Unknown normalization region."; + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelForward_cpu(bottom, top); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelForward(bottom, top); + break; + default: + LOG(FATAL)<< "Unknown normalization region."; + } } -} -template +template void LRNLayer::CrossChannelForward_cpu( const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); @@ -115,35 +115,35 @@ void LRNLayer::CrossChannelForward_cpu( for (int i = 0; i < scale_.count(); ++i) { scale_data[i] = k_; } - Blob padded_square(1, channels_ + size_ - 1, height_, width_, this->device_context_); + Blob padded_square(1, channels_ + size_ - 1, height_, width_, + this->device_context_); Dtype* padded_square_data = padded_square.mutable_cpu_data(); caffe_set(padded_square.count(), Dtype(0), padded_square_data); Dtype alpha_over_size = alpha_ / size_; // go through the images for (int n = 0; n < num_; ++n) { // compute the padded square - caffe_sqr(channels_ * height_ * width_, - bottom_data + bottom[0]->offset(n), - padded_square_data + padded_square.offset(0, pre_pad_)); + caffe_sqr(channels_ * height_ * width_, bottom_data + bottom[0]->offset(n), + padded_square_data + padded_square.offset(0, pre_pad_)); // Create the first channel scale for (int c = 0; c < size_; ++c) { caffe_axpy(height_ * width_, alpha_over_size, - padded_square_data + padded_square.offset(0, c), - scale_data + scale_.offset(n, 0)); + padded_square_data + padded_square.offset(0, c), + scale_data + scale_.offset(n, 0)); } for (int c = 1; c < channels_; ++c) { // copy previous scale - caffe_copy(height_ * width_, - scale_data + scale_.offset(n, c - 1), - scale_data + scale_.offset(n, c)); + caffe_copy(height_ * width_, scale_data + scale_.offset(n, c - 1), + scale_data + scale_.offset(n, c)); // add head - caffe_axpy(height_ * width_, alpha_over_size, + caffe_axpy( + height_ * width_, alpha_over_size, padded_square_data + padded_square.offset(0, c + size_ - 1), scale_data + scale_.offset(n, c)); // subtract tail caffe_axpy(height_ * width_, -alpha_over_size, - padded_square_data + padded_square.offset(0, c - 1), - scale_data + scale_.offset(n, c)); + padded_square_data + padded_square.offset(0, c - 1), + scale_data + scale_.offset(n, c)); } } @@ -152,9 +152,9 @@ void LRNLayer::CrossChannelForward_cpu( caffe_mul(scale_.count(), top_data, bottom_data, top_data); } -template -void LRNLayer::WithinChannelForward( - const vector*>& bottom, const vector*>& top) { +template +void LRNLayer::WithinChannelForward(const vector*>& bottom, + const vector*>& top) { split_layer_->Forward(bottom, split_top_vec_); square_layer_->Forward(square_bottom_vec_, square_top_vec_); pool_layer_->Forward(square_top_vec_, pool_top_vec_); @@ -162,22 +162,23 @@ void LRNLayer::WithinChannelForward( product_layer_->Forward(product_bottom_vec_, top); } -template +template void LRNLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { - case LRNParameter_NormRegion_ACROSS_CHANNELS: - CrossChannelBackward_cpu(top, propagate_down, bottom); - break; - case LRNParameter_NormRegion_WITHIN_CHANNEL: - WithinChannelBackward(top, propagate_down, bottom); - break; - default: - LOG(FATAL) << "Unknown normalization region."; + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelBackward_cpu(top, propagate_down, bottom); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelBackward(top, propagate_down, bottom); + break; + default: + LOG(FATAL)<< "Unknown normalization region."; + } } -} -template +template void LRNLayer::CrossChannelBackward_cpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { @@ -186,7 +187,8 @@ void LRNLayer::CrossChannelBackward_cpu( const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* scale_data = scale_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - Blob padded_ratio(1, channels_ + size_ - 1, height_, width_, this->device_context_); + Blob padded_ratio(1, channels_ + size_ - 1, height_, width_, + this->device_context_); Blob accum_ratio(1, 1, height_, width_, this->device_context_); Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); @@ -203,10 +205,12 @@ void LRNLayer::CrossChannelBackward_cpu( for (int n = 0; n < num_; ++n) { int block_offset = scale_.offset(n); // first, compute diff_i * y_i / s_i - caffe_mul(channels_ * height_ * width_, - top_diff + block_offset, top_data + block_offset, + caffe_mul( + channels_ * height_ * width_, top_diff + block_offset, + top_data + block_offset, padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); - caffe_div(channels_ * height_ * width_, + caffe_div( + channels_ * height_ * width_, padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad), scale_data + block_offset, padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); @@ -214,25 +218,28 @@ void LRNLayer::CrossChannelBackward_cpu( caffe_set(accum_ratio.count(), Dtype(0), accum_ratio_data); for (int c = 0; c < size_ - 1; ++c) { caffe_axpy(height_ * width_, 1., - padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); + padded_ratio_data + padded_ratio.offset(0, c), + accum_ratio_data); } for (int c = 0; c < channels_; ++c) { - caffe_axpy(height_ * width_, 1., + caffe_axpy( + height_ * width_, 1., padded_ratio_data + padded_ratio.offset(0, c + size_ - 1), accum_ratio_data); // compute bottom diff - caffe_mul(height_ * width_, - bottom_data + top[0]->offset(n, c), - accum_ratio_data, accum_ratio_times_bottom); + caffe_mul(height_ * width_, bottom_data + top[0]->offset(n, c), + accum_ratio_data, accum_ratio_times_bottom); caffe_axpy(height_ * width_, -cache_ratio_value, - accum_ratio_times_bottom, bottom_diff + top[0]->offset(n, c)); + accum_ratio_times_bottom, + bottom_diff + top[0]->offset(n, c)); caffe_axpy(height_ * width_, -1., - padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); + padded_ratio_data + padded_ratio.offset(0, c), + accum_ratio_data); } } } -template +template void LRNLayer::WithinChannelBackward( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 71ed04a6bbc..d314c2b92a7 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -13,8 +13,7 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; @@ -54,7 +53,7 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* const in, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void LRNLayer::Forward_gpu(const vector*>& bottom, @@ -77,12 +76,11 @@ template __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } -#endif // USE_CUDA +#endif // USE_CUDA template void LRNLayer::CrossChannelForward_gpu( @@ -98,18 +96,19 @@ void LRNLayer::CrossChannelForward_gpu( // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) - LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( - n_threads, bottom_data, num_, channels_, height_, width_, size_, + LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), + CAFFE_CUDA_NUM_THREADS)( + n_threads, bottom_data, num_, channels_, height_, + width_, size_, alpha_ / size_, k_, scale_data); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) - LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( + LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), + CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, scale_data, -beta_, top_data); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -122,22 +121,21 @@ void LRNLayer::CrossChannelForward_gpu( viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_fill_scale")); viennacl::ocl::enqueue( - oclk_lrn_fill(n_threads, WrapHandle((cl_mem) bottom_data, ctx), num_, + oclk_lrn_fill(n_threads, WrapHandle((cl_mem) bottom_data, &ctx), num_, channels_, height_, width_, size_, alpha_ / size_, k_, - WrapHandle((cl_mem) scale_data, ctx)), + WrapHandle((cl_mem) scale_data, &ctx)), ctx.get_queue()); n_threads = bottom[0]->count(); viennacl::ocl::kernel &oclk_lrn_compute = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_output")); viennacl::ocl::enqueue( - oclk_lrn_compute(n_threads, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) scale_data, ctx), -beta_, - WrapHandle((cl_mem) top_data, ctx)), + oclk_lrn_compute(n_threads, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) scale_data, &ctx), -beta_, + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } template void LRNLayer::CrossChannelForward_gpu( const vector*>& bottom, const vector*>& top); @@ -172,8 +170,7 @@ __global__ void LRNComputeDiff(const int nthreads, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; @@ -221,7 +218,7 @@ __global__ void LRNComputeDiff(const int nthreads, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void LRNLayer::CrossChannelBackward_gpu( @@ -232,12 +229,14 @@ void LRNLayer::CrossChannelBackward_gpu( if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( + LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), + CAFFE_CUDA_NUM_THREADS)( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), - scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, + scale_.gpu_data(), top[0]->gpu_diff(), num_, + channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -248,17 +247,16 @@ void LRNLayer::CrossChannelBackward_gpu( viennacl::ocl::kernel &oclk_lrn = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_diff")); viennacl::ocl::enqueue( - oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), ctx), - WrapHandle((cl_mem) (top[0]->gpu_data()), ctx), - WrapHandle((cl_mem) (scale_.gpu_data()), ctx), - WrapHandle((cl_mem) (top[0]->gpu_diff()), ctx), num_, + oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), &ctx), + WrapHandle((cl_mem) (top[0]->gpu_data()), &ctx), + WrapHandle((cl_mem) (scale_.gpu_data()), &ctx), + WrapHandle((cl_mem) (top[0]->gpu_diff()), &ctx), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), - WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), ctx)), + WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } template void LRNLayer::CrossChannelBackward_gpu( const vector*>& top, const vector& propagate_down, diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index 3a577b2e269..9af459dd5bd 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -16,9 +16,8 @@ void MergeCropLayer::LayerSetUp(const vector*>& bottom, template void MergeCropLayer::Reshape(const vector*>& bottom, const vector*>& top) { - // Same number of batches requires - CHECK_EQ(bottom[0]->num(),bottom[1]->num()); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); int num = bottom[0]->num(); // All channels of both inputs are copied diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index 6710be9e978..951c4cac164 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -17,10 +17,7 @@ __global__ void CopyForward(const int nthreads, const Dtype* bottom_a, const Dtype* bottom_b, Dtype* top, int num, int channels_a, int channels_b, int height_a, int width_a, int height_b, int width_b) { - - CUDA_KERNEL_LOOP(index, nthreads) - { - + CUDA_KERNEL_LOOP(index, nthreads) { int pad_h = (height_b - height_a) / 2; int pad_w = (width_b - width_a) / 2; @@ -40,13 +37,11 @@ __global__ void CopyForward(const int nthreads, const Dtype* bottom_a, top[index] = bottom_a[aidx]; } else { int channel_id = (index / ((width_a * height_a)) % channels_b); - int bidx = - (((batch_id) * channels_b + channel_id) * height_b - * width_b) + width_b * (h + pad_h) + pad_w + w; + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; top[index] = bottom_b[bidx]; } } - } template @@ -54,10 +49,7 @@ __global__ void CopyBackward(const int nthreads, Dtype* bottom_a, const Dtype* top, int num, int channels_a, int channels_b, int height_a, int width_a, int height_b, int width_b) { - - CUDA_KERNEL_LOOP(index, nthreads) - { - + CUDA_KERNEL_LOOP(index, nthreads) { int batch_id = index / ((channels_a + channels_b) * height_a * width_a); int bottom_id = ((index @@ -74,14 +66,12 @@ __global__ void CopyBackward(const int nthreads, Dtype* bottom_a, bottom_a[aidx] = top[index]; } } - } -#endif // USE_CUDA +#endif // USE_CUDA template void MergeCropLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - int count = top[0]->count(); const Dtype* bottom_data_a = bottom[0]->gpu_data(); @@ -103,10 +93,11 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - CopyForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( + CopyForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS) ( count, bottom_data_a, bottom_data_b, top_data, num, channels_a, channels_b, height_a, width_a, height_b, width_b); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -117,15 +108,14 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_copy_forward = program.get_kernel( CL_KERNEL_SELECT("merge_copy_forward")); viennacl::ocl::enqueue( - oclk_copy_forward(count, WrapHandle((cl_mem) bottom_data_a, ctx), - WrapHandle((cl_mem) bottom_data_b, ctx), - WrapHandle((cl_mem) top_data, ctx), num, channels_a, - channels_b, height_a, width_a, height_b, width_b), + oclk_copy_forward(count, WrapHandle((cl_mem) bottom_data_a, &ctx), + WrapHandle((cl_mem) bottom_data_b, &ctx), + WrapHandle((cl_mem) top_data, &ctx), num, channels_a, + channels_b, height_a, width_a, height_b, width_b), ctx.get_queue()); ctx.get_queue().finish(); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } template @@ -155,10 +145,11 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( + CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS) ( count, bottom_diff_a, top_diff, num, channels_a, channels_b, height_a, width_a, height_b, width_b); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -169,13 +160,13 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_copy_backward = program.get_kernel( CL_KERNEL_SELECT("merge_copy_backward")); viennacl::ocl::enqueue( - oclk_copy_backward(count, WrapHandle((cl_mem) bottom_diff_a, ctx), - WrapHandle((cl_mem) top_diff, ctx), num, channels_a, + oclk_copy_backward(count, WrapHandle((cl_mem) bottom_diff_a, &ctx), + WrapHandle((cl_mem) top_diff, &ctx), num, channels_a, channels_b, height_a, width_a, height_b, width_b), ctx.get_queue()); ctx.get_queue().finish(); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index f32b149eff9..1862c807e61 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -71,7 +71,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_add(temp_.count(), bottom_data, temp_.gpu_data(), top_data); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA if (this->layer_param_.mvn_param().normalize_variance()) { @@ -84,18 +84,18 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, dim, 1. / dim, (cl_mem) (bottom_data), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., - (cl_mem) (mean_.mutable_gpu_data()), 0); // EX + (cl_mem) (mean_.mutable_gpu_data()), 0); greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, dim, 1. / dim, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., - (cl_mem) (variance_.mutable_gpu_data()), 0); // E(X^2) + (cl_mem) (variance_.mutable_gpu_data()), 0); greentea_gpu_powx(this->device_context_.id(), mean_.count(), (cl_mem) mean_.gpu_data(), 0, Dtype(2), - (cl_mem) (temp_.mutable_gpu_data()), 0); // (EX)^2 + (cl_mem) (temp_.mutable_gpu_data()), 0); greentea_gpu_sub(this->device_context_.id(), mean_.count(), (cl_mem) (variance_.gpu_data()), 0, (cl_mem) (temp_.gpu_data()), 0, - (cl_mem) (variance_.mutable_gpu_data()), 0); // variance + (cl_mem) (variance_.mutable_gpu_data()), 0); // do mean and variance normalization // subtract mean @@ -147,7 +147,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) top_data, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -202,51 +202,63 @@ void MVNLayer::Backward_gpu(const vector*>& top, } else { caffe_copy(temp_.count(), top_diff, bottom_diff); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_.id()); if (this->layer_param_.mvn_param().normalize_variance()) { greentea_gpu_mul(this->device_context_.id(), temp_.count(), - (cl_mem)top_data,0, (cl_mem)top_diff,0, (cl_mem)bottom_diff,0); + (cl_mem) top_data, 0, (cl_mem) top_diff, 0, + (cl_mem) bottom_diff, 0); greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, - dim, 1., (cl_mem)bottom_diff,0, (cl_mem)(sum_multiplier_.gpu_data()),0, - 0., (cl_mem)(mean_.mutable_gpu_data()),0); + dim, 1., (cl_mem) bottom_diff, 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (mean_.mutable_gpu_data()), 0); greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, num, dim, 1, 1., (cl_mem)(mean_.gpu_data()),0, - (cl_mem)(sum_multiplier_.gpu_data()),0, 0., (cl_mem)bottom_diff,0); + CblasNoTrans, num, dim, 1, 1., + (cl_mem) (mean_.gpu_data()), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) bottom_diff, 0); greentea_gpu_mul(this->device_context_.id(), temp_.count(), - (cl_mem)top_data,0, (cl_mem)bottom_diff,0, (cl_mem)bottom_diff,0); + (cl_mem) top_data, 0, (cl_mem) bottom_diff, 0, + (cl_mem) bottom_diff, 0); greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, - dim, 1., (cl_mem)top_diff,0, (cl_mem)(sum_multiplier_.gpu_data()),0, - 0., (cl_mem)(mean_.mutable_gpu_data()),0); + dim, 1., (cl_mem) top_diff, 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (mean_.mutable_gpu_data()), 0); greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, - CblasNoTrans, num, dim, 1, 1., (cl_mem)(mean_.gpu_data()),0, - (cl_mem)(sum_multiplier_.gpu_data()),0, 1., (cl_mem)bottom_diff,0); + CblasNoTrans, num, dim, 1, 1., + (cl_mem) (mean_.gpu_data()), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 1., + (cl_mem) bottom_diff, 0); greentea_gpu_axpby(this->device_context_.id(), temp_.count(), - Dtype(1), (cl_mem)top_diff,0, Dtype(-1. / dim), - (cl_mem)bottom_diff,0); + Dtype(1), (cl_mem) top_diff, 0, + Dtype(-1. / dim), (cl_mem) bottom_diff, 0); // put the squares of bottom into temp_ greentea_gpu_powx(this->device_context_.id(), temp_.count(), - (cl_mem)bottom_data,0, Dtype(2), (cl_mem)(temp_.mutable_gpu_data()),0); + (cl_mem) bottom_data, 0, Dtype(2), + (cl_mem) (temp_.mutable_gpu_data()), 0); greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, CblasNoTrans, num, dim, 1, 1., - (cl_mem)(variance_.gpu_data()),0, (cl_mem)(sum_multiplier_.gpu_data()),0, - 0., (cl_mem)(temp_.mutable_gpu_data()),0); + (cl_mem) (variance_.gpu_data()), 0, + (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., + (cl_mem) (temp_.mutable_gpu_data()), 0); greentea_gpu_div(this->device_context_.id(), temp_.count(), - (cl_mem)bottom_diff,0, (cl_mem)(temp_.gpu_data()),0, (cl_mem)bottom_diff,0); + (cl_mem) bottom_diff, 0, + (cl_mem) (temp_.gpu_data()), 0, + (cl_mem) bottom_diff, 0); } else { - greentea_copy(temp_.count(), (cl_mem)top_diff, 0, - (cl_mem)bottom_diff, 0, ctx); + greentea_copy(temp_.count(), (cl_mem) top_diff, 0, + (cl_mem) bottom_diff, 0, &ctx); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index f9f4cd7b814..5cdfbf92d0e 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -19,8 +19,7 @@ __global__ void MaxPoolForward(const int nthreads, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; @@ -61,8 +60,7 @@ __global__ void AvePoolForward(const int nthreads, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; @@ -98,8 +96,7 @@ __global__ void StoPoolForwardTrain(const int nthreads, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; @@ -142,8 +139,7 @@ __global__ void StoPoolForwardTest(const int nthreads, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; @@ -167,7 +163,7 @@ __global__ void StoPoolForwardTest(const int nthreads, top_data[index] = cumvalues / cumsum; } } -#endif // USE_CUDA +#endif // USE_CUDA template void PoolingLayer::Forward_gpu(const vector*>& bottom, @@ -190,7 +186,8 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, @@ -198,7 +195,8 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); @@ -228,9 +226,8 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, LOG(FATAL)<< "Unknown pooling method."; } } - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -248,14 +245,14 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( CL_KERNEL_SELECT("max_pool_forward")); viennacl::ocl::enqueue( - oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, ctx), + oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - WrapHandle((cl_mem) top_data, ctx), + WrapHandle((cl_mem) top_data, &ctx), mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, ctx), - WrapHandle((cl_mem) top_mask, ctx)), + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx)), ctx.get_queue()); } break; @@ -263,32 +260,44 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel( CL_KERNEL_SELECT("ave_pool_forward")); viennacl::ocl::enqueue( - oclk_ave_pool_forward(count, WrapHandle((cl_mem) bottom_data,ctx), bottom[0]->num(), channels_, + oclk_ave_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), + bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, WrapHandle((cl_mem)top_data,ctx)), + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); } break; case PoolingParameter_PoolMethod_STOCHASTIC: { if (this->phase_ == caffe::TRAIN) { // We need to create the random index as well. - greentea_gpu_rng_uniform(this->device_context_.id(),count, Dtype(0), Dtype(1), - (cl_mem)(rand_idx_.mutable_gpu_data()),0); + greentea_gpu_rng_uniform(this->device_context_.id(), count, + Dtype(0), Dtype(1), + (cl_mem)(rand_idx_.mutable_gpu_data()), 0); viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( CL_KERNEL_SELECT("sto_pool_forward_train")); viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data,ctx), bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, - stride_h_, stride_w_, WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()),ctx), WrapHandle((cl_mem)top_data,ctx)), + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, + stride_h_, stride_w_, + WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx), + WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); } else { viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( CL_KERNEL_SELECT("sto_pool_forward_test")); viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data,ctx), bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, - stride_h_, stride_w_, WrapHandle((cl_mem)top_data,ctx)), + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, + pooled_width_, kernel_h_, kernel_w_, + stride_h_, stride_w_, WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); } } @@ -297,7 +306,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, LOG(FATAL)<< "Unknown pooling method."; } } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -312,8 +321,7 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; @@ -361,8 +369,7 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; @@ -399,8 +406,7 @@ __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; @@ -421,13 +427,13 @@ __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, gradient += top_diff_slice[ph * pooled_width + pw] * (index - == static_cast(rand_idx_slice[ph * pooled_width + pw])); + == static_cast(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } -#endif // USE_CUDA +#endif // USE_CUDA template void PoolingLayer::Backward_gpu(const vector*>& top, @@ -455,7 +461,8 @@ void PoolingLayer::Backward_gpu(const vector*>& top, mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, @@ -463,14 +470,16 @@ void PoolingLayer::Backward_gpu(const vector*>& top, break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + AvePoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + StoPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, @@ -480,9 +489,8 @@ void PoolingLayer::Backward_gpu(const vector*>& top, LOG(FATAL)<< "Unknown pooling method."; } } - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -502,15 +510,15 @@ void PoolingLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( CL_KERNEL_SELECT("max_pool_backward")); viennacl::ocl::enqueue( - oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, ctx), + oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, ctx), - WrapHandle((cl_mem) top_mask, ctx), + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx), top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - WrapHandle((cl_mem) bottom_diff, ctx)), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); } break; @@ -518,12 +526,12 @@ void PoolingLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_ave_pool_backward = program.get_kernel( CL_KERNEL_SELECT("ave_pool_backward")); viennacl::ocl::enqueue( - oclk_ave_pool_backward(count, WrapHandle((cl_mem) top_diff, ctx), + oclk_ave_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - WrapHandle((cl_mem) bottom_diff, ctx)), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); } break; @@ -532,11 +540,11 @@ void PoolingLayer::Backward_gpu(const vector*>& top, CL_KERNEL_SELECT("sto_pool_backward")); viennacl::ocl::enqueue( oclk_sto_pool_backward( - count, WrapHandle((cl_mem) (rand_idx_.gpu_data()), ctx), - WrapHandle((cl_mem) top_diff, ctx), top[0]->num(), channels_, + count, WrapHandle((cl_mem) (rand_idx_.gpu_data()), &ctx), + WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, - WrapHandle((cl_mem) bottom_diff, ctx)), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); } break; diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index 12666a8c596..1a6f3fa957b 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -9,7 +9,7 @@ #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" -#endif // USE_GREENTEA +#endif // USE_GREENTEA namespace caffe { @@ -25,8 +25,7 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int kstride_h, const int kstride_w, const int pad_h, const int pad_w, Dtype* top_data, int* mask, Dtype* top_mask) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -68,8 +67,7 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, const int kstride_h, const int kstride_w, const int pad_h, const int pad_w, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -106,8 +104,7 @@ __global__ void StoPoolForwardTrain(const int nthreads, const int stride_w, const int kstride_h, const int kstride_w, Dtype* rand_idx, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -150,8 +147,7 @@ __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, const int ext_kernel_w, const int stride_h, const int stride_w, const int kstride_h, const int kstride_w, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; @@ -174,12 +170,11 @@ __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, top_data[index] = cumvalues / cumsum; } } -#endif // USE_CUDA +#endif // USE_CUDA template void PoolingSKLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); @@ -201,7 +196,8 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, @@ -211,7 +207,8 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, @@ -247,7 +244,7 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, } CUDA_POST_KERNEL_CHECK; -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -265,16 +262,17 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( CL_KERNEL_SELECT("max_pool_forward_sk")); viennacl::ocl::enqueue( - oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, ctx), + oclk_max_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, pad_h_, pad_w_, - WrapHandle((cl_mem) top_data, ctx), + WrapHandle((cl_mem) top_data, &ctx), mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, ctx), - WrapHandle((cl_mem) top_mask, ctx)), + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx)), ctx.get_queue()); } break; @@ -282,37 +280,46 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel( CL_KERNEL_SELECT("ave_pool_forward_sk")); viennacl::ocl::enqueue( - oclk_ave_pool_forward(count, WrapHandle((cl_mem) bottom_data,ctx), bottom[0]->num(), channels_, + oclk_ave_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), + bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, WrapHandle((cl_mem)top_data,ctx)), + pad_h_, pad_w_, WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); } break; case PoolingParameter_PoolMethod_STOCHASTIC: { if (this->phase_ == caffe::TRAIN) { // We need to create the random index as well. - greentea_gpu_rng_uniform(this->device_context_.id(),count, Dtype(0), Dtype(1), - (cl_mem)(rand_idx_.mutable_gpu_data()),0); + greentea_gpu_rng_uniform(this->device_context_.id(), count, + Dtype(0), Dtype(1), + (cl_mem)(rand_idx_.mutable_gpu_data()), 0); viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( CL_KERNEL_SELECT("sto_pool_forward_train_sk")); viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data,ctx), bottom[0]->num(), channels_, + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, - WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()),ctx), WrapHandle((cl_mem)(top_data),ctx)), + WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx), + WrapHandle((cl_mem)(top_data), &ctx)), ctx.get_queue()); } else { viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( CL_KERNEL_SELECT("sto_pool_forward_test_sk")); viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data,ctx), bottom[0]->num(), channels_, + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, WrapHandle((cl_mem)top_data,ctx)), + stride_h_, stride_w_, kstride_h_, kstride_w_, + WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); } } @@ -321,7 +328,7 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, LOG(FATAL)<< "Unknown pooling method."; } } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -338,8 +345,7 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int kstride_h, const int kstride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; @@ -383,7 +389,7 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, bottom_diff[index] = gradient; } } -#endif // USE_CUDA +#endif // USE_CUDA template void PoolingSKLayer::Backward_gpu(const vector*>& top, @@ -411,7 +417,8 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, @@ -420,11 +427,11 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, bottom_diff); break; default: - LOG(FATAL)<<"Unknown or unsupported pooling method in Backward_gpu()."; + LOG(FATAL)<< + "Unknown or unsupported pooling method in Backward_gpu()."; } - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -445,23 +452,24 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( CL_KERNEL_SELECT("max_pool_backward_sk")); viennacl::ocl::enqueue( - oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, ctx), + oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, ctx), - WrapHandle((cl_mem) top_mask, ctx), + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx), top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, pad_h_, pad_w_, - WrapHandle((cl_mem) bottom_diff, ctx)), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); } break; default: - LOG(FATAL)<<"Unknown or unsupported pooling method in Backward_gpu()."; + LOG(FATAL)<< + "Unknown or unsupported pooling method in Backward_gpu()."; } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/power_layer.cu b/src/caffe/layers/power_layer.cu index 1b85f1eeba5..839036db20b 100644 --- a/src/caffe/layers/power_layer.cu +++ b/src/caffe/layers/power_layer.cu @@ -37,7 +37,7 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, if (power_ != Dtype(1)) { caffe_gpu_powx(count, top_data, power_, top_data); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -50,7 +50,8 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, return; } const Dtype* bottom_data = bottom[0]->gpu_data(); - greentea_copy(count, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, ctx); + greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0, + &ctx); if (scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, scale_, (cl_mem) top_data, 0); @@ -64,7 +65,7 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, (cl_mem) top_data, 0, power_, (cl_mem) top_data, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -118,7 +119,7 @@ void PowerLayer::Backward_gpu(const vector*>& top, } } caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -155,8 +156,8 @@ void PowerLayer::Backward_gpu(const vector*>& top, greentea_gpu_scal(this->device_context_.id(), count, power_, (cl_mem) bottom_diff, 0); } else { - greentea_copy(count, (cl_mem) bottom_data,0, - (cl_mem) bottom_diff,0, ctx); + greentea_copy(count, (cl_mem) bottom_data, 0, + (cl_mem) bottom_diff, 0, &ctx); if (scale_ != Dtype(1)) { greentea_gpu_scal(this->device_context_.id(), count, scale_, (cl_mem) bottom_diff, 0); @@ -178,9 +179,8 @@ void PowerLayer::Backward_gpu(const vector*>& top, greentea_gpu_mul(this->device_context_.id(), count, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, (cl_mem) bottom_diff, 0); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } } diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index b49c2debd74..ac59792c996 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -10,7 +10,8 @@ namespace caffe { template void PReLULayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - CHECK_GE(bottom[0]->num_axes(), 2)<< "Number of axes of bottom blob must be >=2."; + CHECK_GE(bottom[0]->num_axes(), 2) + << "Number of axes of bottom blob must be >=2."; PReLUParameter prelu_param = this->layer_param().prelu_param(); int channels = bottom[0]->channels(); channel_shared_ = prelu_param.channel_shared(); @@ -19,9 +20,11 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, } else { this->blobs_.resize(1); if (channel_shared_) { - this->blobs_[0].reset(new Blob(vector(0),this->device_context_)); + this->blobs_[0].reset(new Blob(vector(0), + this->device_context_)); } else { - this->blobs_[0].reset(new Blob(vector(1, channels),this->device_context_)); + this->blobs_[0].reset(new Blob(vector(1, channels), + this->device_context_)); } shared_ptr > filler; if (prelu_param.has_filler()) { @@ -52,7 +55,8 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, template void PReLULayer::Reshape(const vector*>& bottom, const vector*>& top) { - CHECK_GE(bottom[0]->num_axes(), 2)<< "Number of axes of bottom blob must be >=2."; + CHECK_GE(bottom[0]->num_axes(), 2) + << "Number of axes of bottom blob must be >=2."; top[0]->ReshapeLike(*bottom[0]); if (bottom[0] == top[0]) { // For in-place computation diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index 13701a6df95..b716fb1039d 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -17,8 +17,7 @@ template __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int div_factor) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } @@ -30,8 +29,7 @@ __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const int div_factor) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); @@ -42,12 +40,11 @@ __global__ void PReLUBackward(const int n, const int channels, const int dim, template __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } -#endif // USE_CUDA +#endif // USE_CUDA template void PReLULayer::Forward_gpu(const vector*>& bottom, @@ -68,11 +65,11 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, } // NOLINT_NEXT_LINE(whitespace/operators) - PReLUForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + PReLUForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, channels, dim, bottom_data, top_data, slope_data, div_factor); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -81,18 +78,20 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, this->device_context_.id()); if (top[0] == bottom[0]) { - greentea_copy(count, (cl_mem)bottom_data,0, (cl_mem)(bottom_memory_.mutable_gpu_data()),0, ctx); + greentea_copy(count, (cl_mem) bottom_data, 0, + (cl_mem) (bottom_memory_.mutable_gpu_data()), 0, + &ctx); } viennacl::ocl::kernel &oclk_prelu = program.get_kernel( CL_KERNEL_SELECT("prelu_forward")); viennacl::ocl::enqueue( - oclk_prelu(count, channels, dim, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) top_data, ctx), - WrapHandle((cl_mem) slope_data, ctx), div_factor), + oclk_prelu(count, channels, dim, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx), + WrapHandle((cl_mem) slope_data, &ctx), div_factor), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -129,8 +128,7 @@ void PReLULayer::Backward_gpu(const vector*>& top, cdim, top_diff + top[0]->offset(n), bottom_data + bottom[0]->offset(n), backward_buff_.mutable_gpu_diff()); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), @@ -156,10 +154,9 @@ void PReLULayer::Backward_gpu(const vector*>& top, CAFFE_CUDA_NUM_THREADS)( count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -172,14 +169,13 @@ void PReLULayer::Backward_gpu(const vector*>& top, int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { - viennacl::ocl::kernel &oclk_prelu_param = program.get_kernel( CL_KERNEL_SELECT("prelu_param_backward")); viennacl::ocl::enqueue( oclk_prelu_param( - cdim, WrapHandle((cl_mem) top_diff, ctx), top[0]->offset(n), - WrapHandle((cl_mem) bottom_data, ctx), bottom[0]->offset(n), - WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), ctx)), + cdim, WrapHandle((cl_mem) top_diff, &ctx), top[0]->offset(n), + WrapHandle((cl_mem) bottom_data, &ctx), bottom[0]->offset(n), + WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), &ctx)), ctx.get_queue()); if (channel_shared_) { @@ -211,13 +207,13 @@ void PReLULayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_prelu = program.get_kernel( CL_KERNEL_SELECT("prelu_backward")); viennacl::ocl::enqueue( - oclk_prelu(count, channels, dim, WrapHandle((cl_mem) top_diff, ctx), - WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) bottom_diff, ctx), - WrapHandle((cl_mem) slope_data, ctx), div_factor), + oclk_prelu(count, channels, dim, WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) bottom_diff, &ctx), + WrapHandle((cl_mem) slope_data, &ctx), div_factor), ctx.get_queue()); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/reduction_layer.cu b/src/caffe/layers/reduction_layer.cu index 301635d511d..b0972253a2e 100644 --- a/src/caffe/layers/reduction_layer.cu +++ b/src/caffe/layers/reduction_layer.cu @@ -49,7 +49,7 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, top_data = top[0]->mutable_gpu_data(); caffe_gpu_scal(num_, coeff_, top_data); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA if (sum_multiplier_.count() > 0) { @@ -88,7 +88,7 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, greentea_gpu_scal(this->device_context_.id(), num_, coeff_, (cl_mem) top_data, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -132,11 +132,13 @@ void ReductionLayer::Backward_gpu(const vector*>& top, caffe_gpu_set(dim_, bottom_coeff, bottom_diff + bottom_diff_off); break; case ReductionParameter_ReductionOp_ASUM: - caffe_gpu_sign(dim_, bottom_data + bottom_data_off, bottom_diff + bottom_diff_off); + caffe_gpu_sign(dim_, bottom_data + bottom_data_off, + bottom_diff + bottom_diff_off); caffe_gpu_scal(dim_, bottom_coeff, bottom_diff + bottom_diff_off); break; case ReductionParameter_ReductionOp_SUMSQ: - caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data + bottom_data_off, bottom_diff + bottom_diff_off); + caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data + bottom_data_off, + bottom_diff + bottom_diff_off); break; default: LOG(FATAL)<< "Unknown reduction op: " @@ -146,7 +148,7 @@ void ReductionLayer::Backward_gpu(const vector*>& top, bottom_diff_off += dim_; ++top_diff_off; } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA for (int i = 0; i < num_; ++i) { @@ -155,19 +157,22 @@ void ReductionLayer::Backward_gpu(const vector*>& top, case ReductionParameter_ReductionOp_SUM: case ReductionParameter_ReductionOp_MEAN: greentea_gpu_set(this->device_context_.id(), dim_, - bottom_coeff, (cl_mem) bottom_diff, bottom_diff_off); + bottom_coeff, (cl_mem) bottom_diff, + bottom_diff_off); break; case ReductionParameter_ReductionOp_ASUM: greentea_gpu_sign(this->device_context_.id(), dim_, (cl_mem) bottom_data, bottom_data_off, (cl_mem) bottom_diff, bottom_diff_off); greentea_gpu_scal(this->device_context_.id(), dim_, - bottom_coeff, (cl_mem) bottom_diff, bottom_diff_off); + bottom_coeff, (cl_mem) bottom_diff, + bottom_diff_off); break; case ReductionParameter_ReductionOp_SUMSQ: greentea_gpu_scale(this->device_context_.id(), dim_, - 2 * bottom_coeff, (cl_mem) bottom_data, bottom_data_off, - (cl_mem) bottom_diff, bottom_diff_off); + 2 * bottom_coeff, (cl_mem) bottom_data, + bottom_data_off, (cl_mem) bottom_diff, + bottom_diff_off); break; default: LOG(FATAL)<< "Unknown reduction op: " @@ -177,7 +182,7 @@ void ReductionLayer::Backward_gpu(const vector*>& top, bottom_diff_off += dim_; ++top_diff_off; } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index 8e9e5734b91..6ac882334f9 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -15,12 +15,11 @@ namespace caffe { template __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } -#endif // USE_CUDA +#endif // USE_CUDA template void ReLULayer::Forward_gpu(const vector*>& bottom, @@ -32,11 +31,11 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - ReLUForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + ReLUForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data, negative_slope); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -46,11 +45,11 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( - oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) top_data, ctx), negative_slope), + oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data @@ -64,13 +63,12 @@ template __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } -#endif // USE_CUDA +#endif // USE_CUDA template void ReLULayer::Backward_gpu(const vector*>& top, @@ -85,11 +83,11 @@ void ReLULayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - ReLUBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + ReLUBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, bottom_data, bottom_diff, negative_slope); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -99,13 +97,13 @@ void ReLULayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( CL_KERNEL_SELECT("relu_backward")); viennacl::ocl::enqueue( - oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, ctx), - WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) bottom_diff, ctx), + oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) bottom_diff, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 3113f7518e8..13e0581d151 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -36,19 +36,23 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( // Scale down gradient const Dtype loss_weight = top[0]->cpu_diff()[0]; caffe_gpu_scal(count, loss_weight / num, bottom_diff); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_.id()); // First, compute the diff - greentea_copy(count, (cl_mem)sigmoid_output_data, 0, (cl_mem)bottom_diff, 0, ctx); - greentea_gpu_axpy(this->device_context_.id(), count, Dtype(-1), (cl_mem)target,0, (cl_mem)bottom_diff,0); + greentea_copy(count, (cl_mem)sigmoid_output_data, 0, + (cl_mem)bottom_diff, 0, &ctx); + greentea_gpu_axpy(this->device_context_.id(), count, + Dtype(-1), (cl_mem)target, 0, + (cl_mem)bottom_diff, 0); // Scale down gradient const Dtype loss_weight = top[0]->cpu_diff()[0]; - greentea_gpu_scal(this->device_context_.id(), count, loss_weight / num, (cl_mem)bottom_diff,0); -#endif // USE_GREENTEA + greentea_gpu_scal(this->device_context_.id(), count, loss_weight / num, + (cl_mem)bottom_diff, 0); +#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index f476cc4ad5e..b0576c41bcb 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -10,12 +10,11 @@ namespace caffe { #ifdef USE_CUDA template __global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out[index] = 1. / (1. + exp(-in[index])); } } -#endif // USE_CUDA +#endif // USE_CUDA template void SigmoidLayer::Forward_gpu(const vector*>& bottom, @@ -27,11 +26,11 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - SigmoidForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + SigmoidForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -42,10 +41,10 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( CL_KERNEL_SELECT("sigmoid_forward")); viennacl::ocl::enqueue( - oclk_sigmoid(count, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) top_data, ctx)), + oclk_sigmoid(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } // << " count: " << count << " bottom_data: " @@ -59,13 +58,12 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, template __global__ void SigmoidBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { const Dtype sigmoid_x = out_data[index]; out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); } } -#endif // USE_CUDA +#endif // USE_CUDA template void SigmoidLayer::Backward_gpu(const vector*>& top, @@ -80,11 +78,11 @@ void SigmoidLayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - SigmoidBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + SigmoidBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, top_data, bottom_diff); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -95,13 +93,12 @@ void SigmoidLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( CL_KERNEL_SELECT("sigmoid_backward")); viennacl::ocl::enqueue( - oclk_sigmoid(count, WrapHandle((cl_mem) top_diff, ctx), - WrapHandle((cl_mem) top_data, ctx), - WrapHandle((cl_mem) bottom_diff, ctx)), + oclk_sigmoid(count, WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle((cl_mem) top_data, &ctx), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } } diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index d35b942df80..af1313fc0fd 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -27,7 +27,7 @@ void SilenceLayer::Backward_gpu(const vector*>& top, #ifdef USE_CUDA caffe_gpu_set(bottom[i]->count(), Dtype(0), bottom[i]->mutable_gpu_data()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -39,7 +39,7 @@ void SilenceLayer::Backward_gpu(const vector*>& top, viennacl::ocl::enqueue( oclk_gpu_set( bottom[i]->count(), Dtype(0), - WrapHandle((cl_mem) bottom[i]->mutable_gpu_data(),ctx)), + WrapHandle((cl_mem) bottom[i]->mutable_gpu_data(), &ctx)), ctx.get_queue()); ctx.get_queue().finish(); #endif diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 1770b240f3b..1ca1b414d3b 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -13,8 +13,7 @@ __global__ void Slice(const int nthreads, const Dtype* in_data, const int slice_size, const int bottom_slice_axis, const int top_slice_axis, const int offset_slice_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; @@ -27,7 +26,7 @@ __global__ void Slice(const int nthreads, const Dtype* in_data, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void SliceLayer::Forward_gpu(const vector*>& bottom, @@ -48,7 +47,7 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( nthreads, bottom_data, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -59,12 +58,12 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_slice = program.get_kernel( CL_KERNEL_SELECT("slice")); viennacl::ocl::enqueue( - oclk_slice(nthreads, WrapHandle((cl_mem) bottom_data, ctx), + oclk_slice(nthreads, WrapHandle((cl_mem) bottom_data, &ctx), kForward ? 1 : 0, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, - WrapHandle((cl_mem) top_data, ctx)), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } offset_slice_axis += top_slice_axis; @@ -94,7 +93,7 @@ void SliceLayer::Backward_gpu(const vector*>& top, CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( nthreads, top_diff, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -105,12 +104,12 @@ void SliceLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_slice = program.get_kernel( CL_KERNEL_SELECT("slice")); viennacl::ocl::enqueue( - oclk_slice(nthreads, WrapHandle((cl_mem) top_diff, ctx), + oclk_slice(nthreads, WrapHandle((cl_mem) top_diff, &ctx), kForward ? 1 : 0, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, - WrapHandle((cl_mem) bottom_diff, ctx)), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } offset_slice_axis += top_slice_axis; } diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 284af3220f1..92cda7edf55 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -11,8 +11,8 @@ #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { @@ -22,8 +22,7 @@ template __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { - CUDA_KERNEL_LOOP(index, num * spatial_dim) - { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; @@ -39,8 +38,7 @@ __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { - CUDA_KERNEL_LOOP(index, count) - { + CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; @@ -49,8 +47,7 @@ __global__ void kernel_channel_subtract(const int count, const int num, template __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { - CUDA_KERNEL_LOOP(index, count) - { + CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } @@ -59,8 +56,7 @@ template __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { - CUDA_KERNEL_LOOP(index, num * spatial_dim) - { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; @@ -75,8 +71,7 @@ template __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { - CUDA_KERNEL_LOOP(index, count) - { + CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; @@ -87,8 +82,7 @@ template __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { - CUDA_KERNEL_LOOP(index, num * spatial_dim) - { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; @@ -104,7 +98,6 @@ __global__ void kernel_channel_dot(const int num, const int channels, template void SoftmaxLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); @@ -113,7 +106,6 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); - if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // CUDA backend code @@ -132,7 +124,8 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) - kernel_exp CUDA_KERNEL(CAFFE_GET_BLOCKS(num * channels * spatial_dim), + kernel_exp CUDA_KERNEL( + CAFFE_GET_BLOCKS(num * channels * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num * channels * spatial_dim, top_data, top_data); // sum after exp @@ -153,13 +146,15 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_.id()); - greentea_copy(count, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, ctx); + greentea_copy(count, (cl_mem)bottom_data, + 0, (cl_mem)top_data, 0, &ctx); viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_max")); viennacl::ocl::enqueue( - oclk_channel_max(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, ctx), - WrapHandle((cl_mem)scale_data, ctx)), + oclk_channel_max(num, channels, spatial_dim, + WrapHandle((cl_mem)top_data, &ctx), + WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); @@ -167,24 +162,26 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, - WrapHandle((cl_mem)scale_data, ctx), - WrapHandle((cl_mem)top_data, ctx)), + WrapHandle((cl_mem)scale_data, &ctx), + WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_exp = program.get_kernel( CL_KERNEL_SELECT("kernel_exp")); viennacl::ocl::enqueue( - oclk_exp(num * channels * spatial_dim, WrapHandle((cl_mem)top_data, ctx), - WrapHandle((cl_mem)top_data, ctx)), + oclk_exp(num * channels * spatial_dim, + WrapHandle((cl_mem)top_data, &ctx), + WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_sum")); viennacl::ocl::enqueue( - oclk_channel_sum(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, ctx), - WrapHandle((cl_mem)scale_data, ctx)), + oclk_channel_sum(num, channels, spatial_dim, + WrapHandle((cl_mem)top_data, &ctx), + WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); @@ -192,8 +189,8 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("kernel_channel_div")); viennacl::ocl::enqueue( oclk_channel_div(count, num, channels, spatial_dim, - WrapHandle((cl_mem)scale_data, ctx), - WrapHandle((cl_mem)top_data, ctx)), + WrapHandle((cl_mem)scale_data, &ctx), + WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); @@ -217,7 +214,8 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(top[0]->count(), top_diff, bottom_diff); - // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. + // Compute inner1d(top_diff, top_data) and + // subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_diff, top_data, @@ -237,26 +235,29 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_.id()); - greentea_copy(top[0]->count(), (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, ctx); + greentea_copy(top[0]->count(), (cl_mem)top_diff, + 0, (cl_mem)bottom_diff, 0, &ctx); viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_dot")); viennacl::ocl::enqueue( oclk_channel_dot(num, channels, spatial_dim, - WrapHandle((cl_mem)top_diff, ctx), WrapHandle((cl_mem)top_data, ctx), - WrapHandle((cl_mem)scale_data, ctx)), + WrapHandle((cl_mem)top_diff, &ctx), + WrapHandle((cl_mem)top_data, &ctx), + WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, - WrapHandle((cl_mem)scale_data, ctx), - WrapHandle((cl_mem)bottom_diff, ctx)), + WrapHandle((cl_mem)scale_data, &ctx), + WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); greentea_gpu_mul(this->device_context_.id(), top[0]->count(), - (cl_mem)bottom_diff, 0, (cl_mem)top_data, 0, (cl_mem)bottom_diff, 0); + (cl_mem)bottom_diff, 0, + (cl_mem)top_data, 0, (cl_mem)bottom_diff, 0); #endif } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 7f4c9ec4b5d..c6ce3326524 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -7,8 +7,8 @@ #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { @@ -22,8 +22,7 @@ __global__ void SoftmaxLossForwardGPU(const int nthreads, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast(label[n * spatial_dim + s]); @@ -38,7 +37,7 @@ __global__ void SoftmaxLossForwardGPU(const int nthreads, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void SoftmaxWithLossLayer::Forward_gpu( @@ -77,7 +76,7 @@ void SoftmaxWithLossLayer::Forward_gpu( if (top.size() == 2) { top[1]->ShareData(prob_); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -97,11 +96,11 @@ void SoftmaxWithLossLayer::Forward_gpu( viennacl::ocl::kernel &oclk_softmax_loss_forward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_forward")); viennacl::ocl::enqueue( - oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, ctx), - WrapHandle(label, ctx), - WrapHandle(loss_data, ctx), num, dim, + oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, &ctx), + WrapHandle(label, &ctx), + WrapHandle(loss_data, &ctx), num, dim, spatial_dim, has_ignore_label_ ? 1 : 0, - ignore_label_, WrapHandle(counts, ctx)), + ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype loss; @@ -120,7 +119,7 @@ void SoftmaxWithLossLayer::Forward_gpu( if (top.size() == 2) { top[1]->ShareData(prob_); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -134,8 +133,7 @@ __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; - CUDA_KERNEL_LOOP(index, nthreads) - { + CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast(label[n * spatial_dim + s]); @@ -151,7 +149,7 @@ __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, } } } -#endif // USE_CUDA +#endif // USE_CUDA template void SoftmaxWithLossLayer::Backward_gpu( @@ -188,7 +186,7 @@ void SoftmaxWithLossLayer::Backward_gpu( } else { caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -199,7 +197,8 @@ void SoftmaxWithLossLayer::Backward_gpu( cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); cl_mem prob_data = (cl_mem)(prob_.gpu_data()); cl_mem top_data = (cl_mem)(top[0]->gpu_data()); - greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data,0, bottom_diff,0,ctx); + greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), + prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); const int num = prob_.num(); const int dim = prob_.count() / num; @@ -210,18 +209,24 @@ void SoftmaxWithLossLayer::Backward_gpu( viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_backward")); viennacl::ocl::enqueue( - oclk_softmax_loss_backward(nthreads, WrapHandle(top_data,ctx), WrapHandle(label,ctx), WrapHandle(bottom_diff,ctx), num, dim, spatial_dim, has_ignore_label_?1:0, ignore_label_, WrapHandle(counts,ctx)), + oclk_softmax_loss_backward(nthreads, WrapHandle(top_data, &ctx), + WrapHandle(label, &ctx), WrapHandle(bottom_diff, &ctx), + num, dim, spatial_dim, has_ignore_label_ ? 1 : 0, + ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; - greentea_gpu_asum(this->device_context_.id(), nthreads, counts, 0, &count); - greentea_gpu_scal(this->device_context_.id(), prob_.count(), loss_weight / count, bottom_diff, 0); + greentea_gpu_asum(this->device_context_.id(), + nthreads, counts, 0, &count); + greentea_gpu_scal(this->device_context_.id(), + prob_.count(), loss_weight / count, bottom_diff, 0); } else { - greentea_gpu_scal(this->device_context_.id(), prob_.count(), loss_weight / num, bottom_diff, 0); + greentea_gpu_scal(this->device_context_.id(), + prob_.count(), loss_weight / num, bottom_diff, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index 882390eff95..1b3f0fd1eb5 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -36,7 +36,7 @@ void SplitLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -46,7 +46,7 @@ void SplitLayer::Backward_gpu(const vector*>& top, if (top.size() == 1) { greentea_copy(count_, (cl_mem) (top[0]->gpu_diff()), 0, - (cl_mem) (bottom[0]->mutable_gpu_diff()), 0, ctx); + (cl_mem) (bottom[0]->mutable_gpu_diff()), 0, &ctx); return; } greentea_gpu_add(this->device_context_.id(), count_, @@ -60,7 +60,7 @@ void SplitLayer::Backward_gpu(const vector*>& top, greentea_gpu_axpy(this->device_context_.id(), count_, Dtype(1.), (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index b028a8f2030..0302a2f0f96 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -12,12 +12,11 @@ namespace caffe { #ifdef USE_CUDA template __global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out[index] = tanh(in[index]); } } -#endif // USE_CUDA +#endif // USE_CUDA template void TanHLayer::Forward_gpu(const vector*>& bottom, @@ -29,11 +28,11 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - TanHForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + TanHForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -44,10 +43,10 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_tanh = program.get_kernel( CL_KERNEL_SELECT("tanh_forward")); viennacl::ocl::enqueue( - oclk_tanh(count, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) top_data, ctx)), + oclk_tanh(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -55,13 +54,12 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, template __global__ void TanHBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { Dtype tanhx = out_data[index]; out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); } } -#endif // USE_CUDA +#endif // USE_CUDA template void TanHLayer::Backward_gpu(const vector*>& top, @@ -76,11 +74,11 @@ void TanHLayer::Backward_gpu(const vector*>& top, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - TanHBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + TanHBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, top_diff, top_data, bottom_diff); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -91,13 +89,12 @@ void TanHLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_tanh = program.get_kernel( CL_KERNEL_SELECT("tanh_backward")); viennacl::ocl::enqueue( - oclk_tanh(count, WrapHandle((cl_mem) top_diff, ctx), - WrapHandle((cl_mem) top_data, ctx), - WrapHandle((cl_mem) bottom_diff, ctx)), + oclk_tanh(count, WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle((cl_mem) top_data, &ctx), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } } diff --git a/src/caffe/layers/threshold_layer.cu b/src/caffe/layers/threshold_layer.cu index 05ca1a60ff3..676759d3d8a 100644 --- a/src/caffe/layers/threshold_layer.cu +++ b/src/caffe/layers/threshold_layer.cu @@ -15,8 +15,7 @@ namespace caffe { template __global__ void ThresholdForward(const int n, const Dtype threshold, const Dtype* in, Dtype* out) { - CUDA_KERNEL_LOOP(index, n) - { + CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > threshold ? 1 : 0; } } @@ -32,11 +31,11 @@ void ThresholdLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - ThresholdForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + ThresholdForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, threshold_, bottom_data, top_data); - CUDA_POST_KERNEL_CHECK - ; -#endif // USE_CUDA + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -47,13 +46,13 @@ void ThresholdLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_threshold = program.get_kernel( CL_KERNEL_SELECT("threshold")); viennacl::ocl::enqueue( - oclk_threshold(count, threshold_, WrapHandle((cl_mem) bottom_data, ctx), - WrapHandle((cl_mem) top_data, ctx)), + oclk_threshold(count, threshold_, + WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); ctx.get_queue().finish(); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } - } INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 9972c76a8bb..27a413e57f4 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -52,7 +52,8 @@ void Net::Init(const NetParameter& in_param) { << "Must specify either input_shape OR deprecated input_dim, not both."; if (param.input_dim_size() > 0) { // Deprecated 4D dimensions. - CHECK_EQ(param.input_size() * 4, param.input_dim_size())<< "Incorrect input blob dimension specifications."; + CHECK_EQ(param.input_size() * 4, param.input_dim_size()) + << "Incorrect input blob dimension specifications."; } else { CHECK_EQ(param.input_size(), param.input_shape_size()) << "Exactly one input_shape must be specified per input."; @@ -131,7 +132,8 @@ void Net::Init(const NetParameter& in_param) { DLOG(INFO)<< "Memory required for data: " << memory_used_ * sizeof(Dtype); const int param_size = layer_param.param_size(); const int num_param_blobs = layers_[layer_id]->blobs().size(); - CHECK_LE(param_size, num_param_blobs)<< "Too many params specified for layer " << layer_param.name(); + CHECK_LE(param_size, num_param_blobs) + << "Too many params specified for layer " << layer_param.name(); ParamSpec default_param_spec; for (int param_id = 0; param_id < num_param_blobs; ++param_id) { const ParamSpec* param_spec = @@ -385,9 +387,7 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, param.input_dim(top_id * 4 + 1), param.input_dim(top_id * 4 + 2), param.input_dim(top_id * 4 + 3)); - } - else - { + } else { blob_pointer->Reshape(param.input_shape(top_id)); } net_input_blob_indices_.push_back(blob_id); @@ -408,7 +408,6 @@ template int Net::AppendBottom(const NetParameter& param, const int layer_id, const int bottom_id, set* available_blobs, map* blob_name_to_idx) { - const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { @@ -461,7 +460,8 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, // Named param blob with name we've seen before: share params const int owner_net_param_id = param_names_index_[param_name]; param_owners_.push_back(owner_net_param_id); - const pair& owner_index = param_layer_indices_[owner_net_param_id]; + const pair& owner_index = + param_layer_indices_[owner_net_param_id]; const int owner_layer_id = owner_index.first; const int owner_param_id = owner_index.second; LOG(INFO)<< "Sharing parameters '" << param_name << "' owned by " @@ -475,7 +475,8 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, && (layer_param.param(param_id).share_mode() == ParamSpec_DimCheckMode_PERMISSIVE)) { // Permissive dimension checking -- only check counts are the same. - CHECK_EQ(this_blob->count(), owner_blob->count())<< "Shared parameter blobs must have the same count."; + CHECK_EQ(this_blob->count(), owner_blob->count()) + << "Shared parameter blobs must have the same count."; } else { // Strict dimension checking -- all dims must be the same. CHECK(this_blob->shape() == owner_blob->shape()); @@ -557,7 +558,8 @@ string Net::Forward(const string& input_blob_protos, Dtype* loss) { BlobProtoVector blob_proto_vec; if (net_input_blobs_.size()) { blob_proto_vec.ParseFromString(input_blob_protos); - CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size())<< "Incorrect input size."; + CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size()) + << "Incorrect input size."; for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); } @@ -686,7 +688,8 @@ void Net::ShareTrainedLayersWith(const Net* other) { DLOG(INFO)<< "Copying source layer " << source_layer_name; vector > >& target_blobs = layers_[target_layer_id] ->blobs(); - CHECK_EQ(target_blobs.size(), source_layer->blobs().size())<< "Incompatible number of blobs for layer " << source_layer_name; + CHECK_EQ(target_blobs.size(), source_layer->blobs().size()) + << "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { Blob* source_blob = source_layer->blobs()[j].get(); CHECK(target_blobs[j]->shape() == source_blob->shape()); @@ -752,7 +755,8 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { DLOG(INFO)<< "Copying source layer " << source_layer_name; vector > >& target_blobs = layers_[target_layer_id] ->blobs(); - CHECK_EQ(target_blobs.size(), source_layer.blobs_size())<< "Incompatible number of blobs for layer " << source_layer_name; + CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) + << "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { const bool kReshape = false; target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); @@ -817,12 +821,13 @@ void Net::Update() { if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_add(count, this_diff, owner_diff, owner_diff); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_add(dc.id(), count, (cl_mem) this_diff, 0, - (cl_mem) owner_diff, 0, (cl_mem) owner_diff, 0); -#endif // USE_GREENTEA + (cl_mem) owner_diff, 0, (cl_mem) owner_diff, + 0); +#endif // USE_GREENTEA } } break; diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index a968ea2cf34..112c267b412 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -88,12 +88,14 @@ void Solver::InitTestNets() { const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); const int num_generic_nets = has_net_param + has_net_file; - CHECK_LE(num_generic_nets, 1)<< "Both net_param and net_file may not be specified."; + CHECK_LE(num_generic_nets, 1) + << "Both net_param and net_file may not be specified."; const int num_test_net_params = param_.test_net_param_size(); const int num_test_net_files = param_.test_net_size(); const int num_test_nets = num_test_net_params + num_test_net_files; if (num_generic_nets) { - CHECK_GE(param_.test_iter_size(), num_test_nets)<< "test_iter must be specified for each test network."; + CHECK_GE(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; } else { CHECK_EQ(param_.test_iter_size(), num_test_nets) << "test_iter must be specified for each test network."; @@ -106,7 +108,8 @@ void Solver::InitTestNets() { const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; const int num_test_net_instances = num_test_nets + num_generic_net_instances; if (param_.test_state_size()) { - CHECK_EQ(param_.test_state_size(), num_test_net_instances)<< "test_state must be unspecified or specified once per test net."; + CHECK_EQ(param_.test_state_size(), num_test_net_instances) + << "test_state must be unspecified or specified once per test net."; } if (num_test_net_instances) { CHECK_GT(param_.test_interval(), 0); @@ -179,12 +182,13 @@ void Solver::Step(int iters) { #ifdef USE_CUDA caffe_gpu_set(blob->count(), static_cast(0), blob->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_set(blob->device_context().id(),blob->count(), static_cast(0), - (cl_mem)(blob->mutable_gpu_diff()),0); -#endif // USE_GREENTEA + greentea_gpu_set(blob->device_context().id(), + blob->count(), static_cast(0), + (cl_mem)(blob->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA } #else NO_GPU; @@ -525,14 +529,14 @@ void SGDSolver::Normalize(int param_id) { #ifdef USE_CUDA caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_scal(this->device_context_.id(), net_params[param_id]->count(), accum_normalization, (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } #else NO_GPU; @@ -575,7 +579,7 @@ void SGDSolver::Regularize(int param_id) { } case Caffe::GPU: { #ifndef CPU_ONLY - if(this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_.backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (local_decay) { if (regularization_type == "L2") { @@ -593,32 +597,37 @@ void SGDSolver::Regularize(int param_id) { temp_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); } else { - LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + LOG(FATAL)<< "Unknown regularization type: " + << regularization_type; } } -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA if (local_decay) { if (regularization_type == "L2") { // add weight decay - greentea_gpu_axpy(this->device_context_.id(), net_params[param_id]->count(), + greentea_gpu_axpy(this->device_context_.id(), + net_params[param_id]->count(), local_decay, - (cl_mem)(net_params[param_id]->gpu_data()),0, - (cl_mem)(net_params[param_id]->mutable_gpu_diff()),0); + (cl_mem)(net_params[param_id]->gpu_data()), 0, + (cl_mem)(net_params[param_id]->mutable_gpu_diff()), 0); } else if (regularization_type == "L1") { - greentea_gpu_sign(this->device_context_.id(), net_params[param_id]->count(), - (cl_mem)(net_params[param_id]->gpu_data()),0, - (cl_mem)(temp_[param_id]->mutable_gpu_data()),0); - greentea_gpu_axpy(this->device_context_.id(),net_params[param_id]->count(), + greentea_gpu_sign(this->device_context_.id(), + net_params[param_id]->count(), + (cl_mem)(net_params[param_id]->gpu_data()), 0, + (cl_mem)(temp_[param_id]->mutable_gpu_data()), 0); + greentea_gpu_axpy(this->device_context_.id(), + net_params[param_id]->count(), local_decay, - (cl_mem)(temp_[param_id]->gpu_data()),0, - (cl_mem)(net_params[param_id]->mutable_gpu_diff()),0); + (cl_mem)(temp_[param_id]->gpu_data()), 0, + (cl_mem)(net_params[param_id]->mutable_gpu_diff()), 0); } else { - LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + LOG(FATAL)<< "Unknown regularization type: " + << regularization_type; } } -#endif // USE_GREENTEA +#endif // USE_GREENTEA } #else NO_GPU; @@ -657,7 +666,7 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_copy(net_params[param_id]->count(), history_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -669,9 +678,9 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { momentum, (cl_mem) (history_[param_id]->mutable_gpu_data()), 0); greentea_copy( net_params[param_id]->count(), - (cl_mem) (history_[param_id]->gpu_data()),0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()),0, ctx); -#endif // USE_GREENTEA + (cl_mem) (history_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0, &ctx); +#endif // USE_GREENTEA } #else NO_GPU; @@ -696,7 +705,8 @@ void SGDSolver::SnapshotSolverState(SolverState* state) { template void SGDSolver::RestoreSolverState(const SolverState& state) { - CHECK_EQ(state.history_size(), history_.size())<< "Incorrect length of history blobs."; + CHECK_EQ(state.history_size(), history_.size()) + << "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; for (int i = 0; i < history_.size(); ++i) { history_[i]->FromProto(state.history(i)); @@ -755,7 +765,7 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_copy(net_params[param_id]->count(), this->update_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -764,8 +774,8 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { // save history momentum for stepping back greentea_copy( net_params[param_id]->count(), - (cl_mem) (this->history_[param_id]->gpu_data()),0, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()),0, ctx); + (cl_mem) (this->history_[param_id]->gpu_data()), 0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0, &ctx); // update history greentea_gpu_axpby( @@ -784,9 +794,9 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { // copy greentea_copy( net_params[param_id]->count(), - (cl_mem) (this->update_[param_id]->gpu_data()),0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()),0, ctx); -#endif // USE_GREENTEA + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0, &ctx); +#endif // USE_GREENTEA } #else NO_GPU; @@ -868,7 +878,7 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_gpu_axpby(net_params[param_id]->count(), local_rate, this->update_[param_id]->gpu_data(), Dtype(0), net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA // compute square of gradient in update @@ -905,7 +915,7 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { this->device_context_.id(), net_params[param_id]->count(), local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } #else NO_GPU; diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 245a0bb3af3..01aafe674c3 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -7,8 +7,8 @@ #include "caffe/greentea/greentea.hpp" #ifdef USE_GREENTEA -#include "caffe/greentea/greentea_math_functions.hpp" #include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { @@ -23,11 +23,11 @@ SyncedMemory::~SyncedMemory() { if (device_context_.backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaFree(gpu_ptr_)); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA clReleaseMemObject(cl_gpu_mem_); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } #endif // CPU_ONLY @@ -51,7 +51,7 @@ inline void SyncedMemory::to_cpu() { if (device_context_.backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( @@ -59,7 +59,7 @@ inline void SyncedMemory::to_cpu() { ctx.get_queue().finish(); // On the CPU, memory is shared (and no copy needed) if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { - greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, 0, cpu_ptr_, ctx); + greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, 0, cpu_ptr_, &ctx); } ctx.get_queue().finish(); #endif @@ -84,7 +84,7 @@ inline void SyncedMemory::to_gpu() { #ifdef USE_CUDA CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); caffe_gpu_memset(size_, 0, gpu_ptr_); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( @@ -106,7 +106,7 @@ inline void SyncedMemory::to_gpu() { int alpha = 0; greentea_memset(device_context_.id(), size_, alpha, cl_gpu_mem_, 0); } - gpu_ptr_ = (void*) cl_gpu_mem_; + gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); #endif } @@ -120,7 +120,7 @@ inline void SyncedMemory::to_gpu() { CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); } caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( @@ -140,15 +140,15 @@ inline void SyncedMemory::to_gpu() { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); } - gpu_ptr_ = (void*) cl_gpu_mem_; + gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); } // On the CPU, memory is shared (and no copy needed) if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { - greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, ctx); + greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, &ctx); } ctx.get_queue().finish(); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } head_ = SYNCED; break; diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index 84b0ca6603c..ff169ad10c6 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -8,14 +8,14 @@ namespace caffe { #ifndef CPU_ONLY #ifdef USE_CUDA cudaDeviceProp CAFFE_TEST_CUDA_PROP; -#endif // USE_CUDA +#endif // USE_CUDA #endif } #ifndef CPU_ONLY #ifdef USE_CUDA using caffe::CAFFE_TEST_CUDA_PROP; -#endif // USE_CUDA +#endif // USE_CUDA #endif using caffe::Caffe; @@ -34,7 +34,7 @@ int main(int argc, char** argv) { } cout << "Setting to use device " << device << endl; Caffe::SetDevice(device); - //cudaSetDevice(device); + // cudaSetDevice(device); #endif // invoke the test. return RUN_ALL_TESTS(); diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index 8c9fad16a9c..5647c602c21 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -15,12 +15,12 @@ class CommonTest : public ::testing::Test {}; #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestCublasHandlerGPU) { - if(Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifdef USE_CUDA int cuda_device_id; CUDA_CHECK(cudaGetDevice(&cuda_device_id)); EXPECT_TRUE(Caffe::cublas_handle()); -#endif // USE_CUDA +#endif // USE_CUDA } } @@ -53,10 +53,12 @@ TEST_F(CommonTest, TestRandSeedCPU) { TEST_F(CommonTest, TestRandSeedGPU) { DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA - SyncedMemory data_a(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); - SyncedMemory data_b(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); + SyncedMemory data_a(10 * sizeof(unsigned int), + Caffe::GetDefaultDeviceContext()); + SyncedMemory data_b(10 * sizeof(unsigned int), + Caffe::GetDefaultDeviceContext()); Caffe::set_random_seed(1701); CURAND_CHECK(curandGenerate(Caffe::curand_generator(), static_cast(data_a.mutable_gpu_data()), 10)); @@ -67,7 +69,7 @@ TEST_F(CommonTest, TestRandSeedGPU) { EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], ((const unsigned int*)(data_b.cpu_data()))[i]); } -#endif // USE_CUDA +#endif // USE_CUDA } } diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index 18c7d94c4fc..4b42e7de097 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -40,7 +40,8 @@ class DataTransformTest : public ::testing::Test { const Datum& datum, Phase phase) { // Get crop sequence with Caffe seed 1701. DataTransformer* transformer = - new DataTransformer(transform_param, phase, Caffe::GetDefaultDeviceContext()); + new DataTransformer(transform_param, phase, + Caffe::GetDefaultDeviceContext()); const int crop_size = transform_param.crop_size(); Caffe::set_random_seed(seed_); transformer->InitRand(); @@ -92,7 +93,8 @@ TYPED_TEST(DataTransformTest, TestEmptyTransform) { FillDatum(label, channels, height, width, unique_pixels, &datum); Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = - new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); + new DataTransformer(transform_param, TEST, + Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -116,7 +118,8 @@ TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { FillDatum(label, channels, height, width, unique_pixels, &datum); Blob* blob = new Blob(1, 3, 4, 5); DataTransformer* transformer = - new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); + new DataTransformer(transform_param, TEST, + Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -141,7 +144,8 @@ TYPED_TEST(DataTransformTest, TestCropSize) { Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); DataTransformer* transformer = - new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); + new DataTransformer(transform_param, TEST, + Caffe::GetDefaultDeviceContext()); transformer->InitRand(); Blob* blob = new Blob(1, channels, crop_size, crop_size); @@ -257,7 +261,8 @@ TYPED_TEST(DataTransformTest, TestCropMirrorTest) { Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); transform_param.set_crop_size(crop_size); - int num_matches_crop = this->NumSequenceMatches(transform_param, datum, TEST); + int num_matches_crop = this->NumSequenceMatches(transform_param, + datum, TEST); transform_param.set_mirror(true); int num_matches_crop_mirror = @@ -281,7 +286,8 @@ TYPED_TEST(DataTransformTest, TestMeanValue) { FillDatum(label, channels, height, width, unique_pixels, &datum); Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = - new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); + new DataTransformer(transform_param, TEST, + Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); for (int j = 0; j < blob->count(); ++j) { @@ -304,7 +310,8 @@ TYPED_TEST(DataTransformTest, TestMeanValues) { FillDatum(label, channels, height, width, unique_pixels, &datum); Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = - new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); + new DataTransformer(transform_param, TEST, + Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); for (int c = 0; c < channels; ++c) { @@ -344,7 +351,8 @@ TYPED_TEST(DataTransformTest, TestMeanFile) { FillDatum(label, channels, height, width, unique_pixels, &datum); Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = - new DataTransformer(transform_param, TEST, Caffe::GetDefaultDeviceContext()); + new DataTransformer(transform_param, TEST, + Caffe::GetDefaultDeviceContext()); transformer->InitRand(); transformer->Transform(datum, blob); for (int j = 0; j < blob->count(); ++j) { diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 106fdd4e075..5d6d69b61a4 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -25,7 +25,7 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; -#endif // USE_CUDA +#endif // USE_CUDA template class Im2colKernelTest : public GPUDeviceTest { @@ -105,7 +105,8 @@ TYPED_TEST(Im2colKernelTest, TestGPU) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( + im2col_gpu_kernel + CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, @@ -127,4 +128,4 @@ TYPED_TEST(Im2colKernelTest, TestGPU) { } } // namespace caffe -#endif // USE_CUDA +#endif // USE_CUDA diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 4e84b4dc9dd..6e3cea87823 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -174,7 +174,7 @@ TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { y = this->blob_top_->gpu_data(); int computed_distance = caffe_gpu_hamming_distance(n, x, y); EXPECT_EQ(reference_distance, computed_distance); -#endif // USE_CUDA +#endif // USE_CUDA } TYPED_TEST(GPUMathFunctionsTest, TestAsum) { @@ -191,11 +191,12 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_asum(n, this->blob_bottom_->gpu_data(), &gpu_asum); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_asum(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, &gpu_asum); -#endif // USE_GREENTEA + greentea_gpu_asum(dc.id(), n, + (cl_mem)(this->blob_bottom_->gpu_data()), 0, &gpu_asum); +#endif // USE_GREENTEA } EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } @@ -209,12 +210,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { #ifdef USE_CUDA caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sign(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, - (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); -#endif // USE_GREENTEA + greentea_gpu_sign(dc.id(), n, + (cl_mem)(this->blob_bottom_->gpu_data()), 0, + (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA } const TypeParam* signs = this->blob_bottom_->cpu_diff(); @@ -233,11 +235,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { #ifdef USE_CUDA caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sgnbit(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); -#endif // USE_GREENTEA + greentea_gpu_sgnbit(dc.id(), n, + (cl_mem)(this->blob_bottom_->gpu_data()), 0, + (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA } const TypeParam* signbits = this->blob_bottom_->cpu_diff(); @@ -256,12 +260,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestFabs) { #ifdef USE_CUDA caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_abs(dc.id(), n, (cl_mem)(this->blob_bottom_->gpu_data()),0, - (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); -#endif // USE_GREENTEA + greentea_gpu_abs(dc.id(), n, + (cl_mem)(this->blob_bottom_->gpu_data()), 0, + (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA } const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); @@ -281,12 +286,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { #ifdef USE_CUDA caffe_gpu_scale(n, alpha, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_scale(dc.id(), n, alpha, (cl_mem)(this->blob_bottom_->gpu_data()),0, - (cl_mem)(this->blob_bottom_->mutable_gpu_diff()),0); -#endif // USE_GREENTEA + greentea_gpu_scale(dc.id(), n, alpha, + (cl_mem)(this->blob_bottom_->gpu_data()), 0, + (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA } const TypeParam* scaled = this->blob_bottom_->cpu_diff(); @@ -305,14 +311,15 @@ TYPED_TEST(GPUMathFunctionsTest, TestCopy) { if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(n, bottom_data, top_data); - #endif // USE_CUDA + #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( dc.id()); - greentea_copy(n, (cl_mem)bottom_data,0, (cl_mem)top_data,0, ctx); - #endif // USE_GREENTEA + greentea_copy(n, (cl_mem)bottom_data, 0, + (cl_mem)top_data, 0, &ctx); + #endif // USE_GREENTEA } bottom_data = this->blob_bottom_->cpu_data(); diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp index fcb7d385e1a..d19b763d403 100644 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -6,10 +6,9 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" -#include "caffe/util/math_functions.hpp" - #include "caffe/test/test_caffe_main.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" namespace caffe { @@ -19,8 +18,7 @@ class MergeCropLayerTest : public GPUDeviceTest { protected: MergeCropLayerTest() - : blob_bottom_a_(new Blob()), - blob_bottom_b_(new Blob()), + : blob_bottom_a_(new Blob()), blob_bottom_b_(new Blob()), blob_top_(new Blob()) { } @@ -164,13 +162,13 @@ class MergeCropLayerTest : public GPUDeviceTest { vector propagate_down(blob_bottom_vec_.size(), true); layer.Backward(blob_top_vec_, propagate_down, blob_bottom_vec_); - // Test copy to A for (int n = 0; n < blob_bottom_a_->num(); ++n) { for (int c = 0; c < a_c; ++c) { for (int h = 0; h < a_h; ++h) { for (int w = 0; w < a_w; ++w) { - EXPECT_EQ((w + h * 10 + c * 100 + n * 1000 + 10000), + EXPECT_EQ( + (w + h * 10 + c * 100 + n * 1000 + 10000), blob_bottom_a_->cpu_diff()[w + h * a_w + c * a_h * a_w + n * a_h * a_w * a_c]); } @@ -189,25 +187,25 @@ class MergeCropLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(MergeCropLayerTest, TestDtypes); -TYPED_TEST(MergeCropLayerTest, TestSetup){ +TYPED_TEST(MergeCropLayerTest, TestSetup) { typedef TypeParam Dtype; LayerParameter layer_param; MergeCropLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); - EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_a_->channels() + this->blob_bottom_b_->channels()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_a_->channels() + + this->blob_bottom_b_->channels()); EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_a_->height()); EXPECT_EQ(this->blob_top_->width(), 2); } -TYPED_TEST(MergeCropLayerTest, TestForward){ +TYPED_TEST(MergeCropLayerTest, TestForward) { this->TestForward(); } -TYPED_TEST(MergeCropLayerTest, TestBackward){ +TYPED_TEST(MergeCropLayerTest, TestBackward) { this->TestBackward(); } -} - // namespace caffe +} // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c7259662d8f..e332a1c7506 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -253,7 +253,7 @@ TYPED_TEST(NeuronLayerTest, TestSigmoid) { const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype kDelta = 2e-3; for (int i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_NEAR(top_data[i], 1. / (1 + exp(-bottom_data[i])),kDelta); + EXPECT_NEAR(top_data[i], 1. / (1 + exp(-bottom_data[i])), kDelta); // check that we squashed the value between 0 and 1 EXPECT_GE(top_data[i], 0.); EXPECT_LE(top_data[i], 1.); diff --git a/src/caffe/test/test_platform.cpp b/src/caffe/test/test_platform.cpp index 75e5cb9dda3..ff9b751a4e2 100644 --- a/src/caffe/test/test_platform.cpp +++ b/src/caffe/test/test_platform.cpp @@ -55,5 +55,5 @@ TEST_F(PlatformTest, TestInitialization) { } // namespace caffe -#endif // USE_CUDA -#endif // CPU_ONLY +#endif // USE_CUDA +#endif // CPU_ONLY diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index c7dafea439e..48fd0bcb910 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -23,10 +23,14 @@ class RandomNumberGeneratorTest : public ::testing::Test { : mean_bound_multiplier_(3.8), // ~99.99% confidence for test failure. sample_size_(10000), seed_(1701), - data_(new SyncedMemory(sample_size_ * sizeof(Dtype), Caffe::GetDefaultDeviceContext())), - data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype), Caffe::GetDefaultDeviceContext())), - int_data_(new SyncedMemory(sample_size_ * sizeof(int), Caffe::GetDefaultDeviceContext())), - int_data_2_(new SyncedMemory(sample_size_ * sizeof(int), Caffe::GetDefaultDeviceContext())) {} + data_(new SyncedMemory(sample_size_ * sizeof(Dtype), + Caffe::GetDefaultDeviceContext())), + data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype), + Caffe::GetDefaultDeviceContext())), + int_data_(new SyncedMemory(sample_size_ * sizeof(int), + Caffe::GetDefaultDeviceContext())), + int_data_2_(new SyncedMemory(sample_size_ * sizeof(int), + Caffe::GetDefaultDeviceContext())) {} virtual void SetUp() { Caffe::set_random_seed(this->seed_); @@ -181,14 +185,15 @@ class RandomNumberGeneratorTest : public ::testing::Test { DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_rng_gaussian(sample_size_, mu, sigma, rng_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_rng_gaussian(dc.id(), sample_size_, mu, sigma, (cl_mem)rng_data, 0); -#endif // USE_GREENTEA + greentea_gpu_rng_gaussian(dc.id(), sample_size_, + mu, sigma, (cl_mem)rng_data, 0); +#endif // USE_GREENTEA } } @@ -198,16 +203,16 @@ class RandomNumberGeneratorTest : public ::testing::Test { DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_rng_uniform(sample_size_, lower, upper, rng_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_rng_uniform(dc.id(), sample_size_, lower, upper, (cl_mem)rng_data, 0); -#endif // USE_GREENTEA + greentea_gpu_rng_uniform(dc.id(), sample_size_, + lower, upper, (cl_mem)rng_data, 0); +#endif // USE_GREENTEA } - } // Fills with uniform integers in [0, UINT_MAX] using 2 argument form of @@ -216,14 +221,14 @@ class RandomNumberGeneratorTest : public ::testing::Test { unsigned int* rng_data = static_cast(gpu_data); DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_rng_uniform(sample_size_, rng_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_rng_uniform(dc.id(), sample_size_, (cl_mem)rng_data, 0); -#endif // USE_GREENTEA +#endif // USE_GREENTEA } } diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index 2994e6d234c..3e84dea7adb 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -17,13 +17,15 @@ namespace caffe { -class SyncedMemoryTest : public ::testing::Test {}; +class SyncedMemoryTest : public ::testing::Test { +}; TEST_F(SyncedMemoryTest, TestInitialization) { SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); EXPECT_EQ(mem.head(), SyncedMemory::UNINITIALIZED); EXPECT_EQ(mem.size(), 10); - SyncedMemory* p_mem = new SyncedMemory(10 * sizeof(float), Caffe::GetDefaultDeviceContext()); + SyncedMemory* p_mem = new SyncedMemory(10 * sizeof(float), + Caffe::GetDefaultDeviceContext()); EXPECT_EQ(p_mem->size(), 10 * sizeof(float)); delete p_mem; } @@ -85,18 +87,17 @@ TEST_F(SyncedMemoryTest, TestGPURead) { // check if values are the same char* recovered_value = new char[10]; - DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memcpy(10, gpu_data, recovered_value); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); - greentea_gpu_memcpy(10, (cl_mem)gpu_data,0, recovered_value, ctx); -#endif // USE_GREENTEA + greentea_gpu_memcpy(10, (cl_mem) gpu_data, 0, recovered_value, &ctx); +#endif // USE_GREENTEA } for (int i = 0; i < mem.size(); ++i) { @@ -113,15 +114,15 @@ TEST_F(SyncedMemoryTest, TestGPURead) { EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); // check if values are the same - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memcpy(10, gpu_data, recovered_value); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); - greentea_gpu_memcpy(10, (cl_mem)gpu_data,0, recovered_value, ctx); -#endif // USE_GREENTEA + greentea_gpu_memcpy(10, (cl_mem) gpu_data, 0, recovered_value, &ctx); +#endif // USE_GREENTEA } for (int i = 0; i < mem.size(); ++i) { @@ -137,14 +138,14 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memset(mem.size(), 1, gpu_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_memset(dc.id(), mem.size(), 1, (cl_mem)gpu_data, 0); -#endif // USE_GREENTEA + greentea_memset(dc.id(), mem.size(), 1, (cl_mem) gpu_data, 0); +#endif // USE_GREENTEA } const void* cpu_data = mem.cpu_data(); @@ -156,14 +157,14 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { gpu_data = mem.mutable_gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); - if(dc.backend() == BACKEND_CUDA) { + if (dc.backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memset(mem.size(), 2, gpu_data); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_memset(dc.id(), mem.size(), 2, (cl_mem)gpu_data, 0); -#endif // USE_GREENTEA + greentea_memset(dc.id(), mem.size(), 2, (cl_mem) gpu_data, 0); +#endif // USE_GREENTEA } cpu_data = mem.cpu_data(); diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 32dff12b2c0..f32a79a0ab8 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -43,12 +43,15 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #ifdef USE_CUDA caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); -#endif // USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasNoTrans, + 2, 4, 3, 1., + (cl_mem)(A.gpu_data()), 0, + (cl_mem)(B.gpu_data()), 0, 0., + (cl_mem)(C.mutable_gpu_data()), 0); +#endif // USE_GREENTEA } for (int i = 0; i < 8; ++i) { @@ -68,12 +71,15 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #ifdef USE_CUDA caffe_gpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasTrans, CblasNoTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); -#endif // USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasTrans, CblasNoTrans, + 2, 4, 3, 1., + (cl_mem)(A.gpu_data()), 0, + (cl_mem)(B.gpu_data()), 0, + 0., (cl_mem)(C.mutable_gpu_data()), 0); +#endif // USE_GREENTEA } for (int i = 0; i < 8; ++i) { @@ -93,12 +99,15 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #ifdef USE_CUDA caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasTrans, CblasTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); -#endif // USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasTrans, CblasTrans, + 2, 4, 3, 1., + (cl_mem)(A.gpu_data()), 0, + (cl_mem)(B.gpu_data()), 0, 0., + (cl_mem)(C.mutable_gpu_data()), 0); +#endif // USE_GREENTEA } for (int i = 0; i < 8; ++i) { @@ -118,12 +127,15 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #ifdef USE_CUDA caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasTrans, 2, 4, 3, 1., - (cl_mem)(A.gpu_data()),0, (cl_mem)(B.gpu_data()),0, 0., (cl_mem)(C.mutable_gpu_data()),0); -#endif // USE_GREENTEA + greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasTrans, + 2, 4, 3, 1., + (cl_mem)(A.gpu_data()), 0, + (cl_mem)(B.gpu_data()), 0, 0., + (cl_mem)(C.mutable_gpu_data()), 0); +#endif // USE_GREENTEA } for (int i = 0; i < 8; ++i) { @@ -156,12 +168,15 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { #ifdef USE_CUDA caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), x.gpu_data(), 0., y.mutable_gpu_data()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(dc.id(), CblasNoTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, - (cl_mem)(x.gpu_data()),0, 0., (cl_mem)(y.mutable_gpu_data()),0); -#endif // USE_GREENTEA + greentea_gpu_gemv(dc.id(), CblasNoTrans, + 2, 3, 1., + (cl_mem)(A.gpu_data()), 0, + (cl_mem)(x.gpu_data()), 0, 0., + (cl_mem)(y.mutable_gpu_data()), 0); +#endif // USE_GREENTEA } for (int i = 0; i < 2; ++i) { @@ -180,12 +195,15 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { #ifdef USE_CUDA caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), y.gpu_data(), 0., x.mutable_gpu_data()); -#endif // USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(dc.id(), CblasTrans, 2, 3, 1., (cl_mem)(A.gpu_data()),0, - (cl_mem)(y.gpu_data()),0, 0., (cl_mem)(x.mutable_gpu_data()),0); -#endif // USE_GREENTEA + greentea_gpu_gemv(dc.id(), CblasTrans, + 2, 3, 1., + (cl_mem)(A.gpu_data()), 0, + (cl_mem)(y.gpu_data()), 0, 0., + (cl_mem)(x.mutable_gpu_data()), 0); +#endif // USE_GREENTEA } for (int i = 0; i < 3; ++i) { diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index 69ef23afec8..cad58ad994b 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -6,19 +6,18 @@ namespace caffe { Timer::Timer() - : initted_(false), - running_(false), - has_run_at_least_once_(false) { + : initted_(false), running_(false), has_run_at_least_once_(false) { Init(); } Timer::~Timer() { - if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventDestroy(start_gpu_)); CUDA_CHECK(cudaEventDestroy(stop_gpu_)); -#endif // USE_CUDA +#endif // USE_CUDA #else NO_GPU; #endif @@ -27,11 +26,12 @@ Timer::~Timer() { void Timer::Start() { if (!running()) { - if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); -#endif // USE_CUDA +#endif // USE_CUDA #else NO_GPU; #endif @@ -45,12 +45,13 @@ void Timer::Start() { void Timer::Stop() { if (running()) { - if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); -#endif // USE_CUDA +#endif // USE_CUDA #else NO_GPU; #endif @@ -61,25 +62,25 @@ void Timer::Stop() { } } - float Timer::MicroSeconds() { if (!has_run_at_least_once()) { - LOG(WARNING) << "Timer has never been run before reading time."; + LOG(WARNING)<< "Timer has never been run before reading time."; return 0; } if (running()) { Stop(); } - if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, - stop_gpu_)); + stop_gpu_)); // Cuda only measure milliseconds elapsed_microseconds_ = elapsed_milliseconds_ * 1000; -#endif // USE_CUDA +#endif // USE_CUDA #else - NO_GPU; + NO_GPU; #endif } else { elapsed_microseconds_ = (stop_cpu_ - start_cpu_).total_microseconds(); @@ -89,20 +90,21 @@ float Timer::MicroSeconds() { float Timer::MilliSeconds() { if (!has_run_at_least_once()) { - LOG(WARNING) << "Timer has never been run before reading time."; + LOG(WARNING)<< "Timer has never been run before reading time."; return 0; } if (running()) { Stop(); } - if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, - stop_gpu_)); -#endif // USE_CUDA + stop_gpu_)); +#endif // USE_CUDA #else - NO_GPU; + NO_GPU; #endif } else { elapsed_milliseconds_ = (stop_cpu_ - start_cpu_).total_milliseconds(); @@ -116,12 +118,13 @@ float Timer::Seconds() { void Timer::Init() { if (!initted()) { - if (Caffe::mode() == Caffe::GPU && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventCreate(&start_gpu_)); CUDA_CHECK(cudaEventCreate(&stop_gpu_)); -#endif // USE_CUDA +#endif // USE_CUDA #else NO_GPU; #endif @@ -153,14 +156,14 @@ void CPUTimer::Stop() { float CPUTimer::MilliSeconds() { if (!has_run_at_least_once()) { - LOG(WARNING) << "Timer has never been run before reading time."; + LOG(WARNING)<< "Timer has never been run before reading time."; return 0; } if (running()) { Stop(); } this->elapsed_milliseconds_ = (this->stop_cpu_ - - this->start_cpu_).total_milliseconds(); + this->start_cpu_).total_milliseconds(); return this->elapsed_milliseconds_; } @@ -173,7 +176,7 @@ float CPUTimer::MicroSeconds() { Stop(); } this->elapsed_microseconds_ = (this->stop_cpu_ - - this->start_cpu_).total_microseconds(); + this->start_cpu_).total_microseconds(); return this->elapsed_microseconds_; } diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 09a0582df8b..501e2fe4e01 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -9,15 +9,16 @@ namespace caffe { #ifdef USE_CUDA -template +template __global__ void im2col_sk_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int height_col, const int width_col, - Dtype* data_col) { + const int height, const int width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, const int height_col, + const int width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { int w_out = index % width_col; int h_index = index / width_col; @@ -34,21 +35,21 @@ __global__ void im2col_sk_gpu_kernel(const int n, const Dtype* data_im, for (int j = 0; j < ext_kernel_w; j += kstride_w) { int h = h_in + i; int w = w_in + j; - *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? - data_im_ptr[i * width + j] : 0; + *data_col_ptr = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } -template -void im2col_sk_gpu(const Dtype* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - Dtype* data_col) { +template +void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, + const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; @@ -57,40 +58,44 @@ void im2col_sk_gpu(const Dtype* data_im, const int channels, int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; - //LOG(INFO) << "ext_height = " << ext_kernel_h; - //LOG(INFO) << "ext_width = " << ext_kernel_w; + // LOG(INFO) << "ext_height = " << ext_kernel_h; + // LOG(INFO) << "ext_width = " << ext_kernel_w; // NOLINT_NEXT_LINE(whitespace/operators) - im2col_sk_gpu_kernel<<>>( + im2col_sk_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), + CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, height, width, kernel_h, kernel_w, - ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, + ext_kernel_h, ext_kernel_w, pad_h, pad_w, + stride_h, stride_w, kstride_h, kstride_w, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } - // Explicit instantiation template void im2col_sk_gpu(const float* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - float* data_col); + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + float* data_col); template void im2col_sk_gpu(const double* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - double* data_col); - + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + double* data_col); -template +template __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, - Dtype* data_col) { + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { int w_out = index % width_col; int h_index = index / width_col; @@ -107,28 +112,28 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, for (int j = 0; j < kernel_w; ++j) { int h = h_in + i; int w = w_in + j; - *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? - data_im_ptr[i * width + j] : 0; + *data_col_ptr = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } -template -void im2col_gpu(const Dtype* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - Dtype* data_col) { +template +void im2col_gpu(const Dtype* data_im, const int channels, const int height, + const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel<<>>( + im2col_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), + CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); @@ -137,25 +142,29 @@ void im2col_gpu(const Dtype* data_im, const int channels, // Explicit instantiation template void im2col_gpu(const float* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - float* data_col); + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + float* data_col); template void im2col_gpu(const double* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - double* data_col); + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + double* data_col); // Support of stride_h and stride_w greater than 1 is not implemented -template +template __global__ void col2im_sk_gpu_kernel(const int n, const Dtype* data_col, - const int height, const int width, const int channels, - const int patch_h, const int patch_w, - const int ext_patch_h, const int ext_patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int height_col, const int width_col, - Dtype* data_im) { + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, const int height_col, + const int width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width + pad_w; @@ -164,22 +173,26 @@ __global__ void col2im_sk_gpu_kernel(const int n, const Dtype* data_col, // compute the start and end of the output int width_col_1 = width_col - 1; int height_col_1 = height_col - 1; - int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w)+ 1; - int w_col_end = (w >= width_col) ? width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; - int h_col_end = (h >= height_col) ? height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; int w_num = (w - w_col_start) / kstride_w; int h_num = (h - h_col_start) / kstride_h; int coeff_w_idx = height_col * width_col; int coeff_h_idx = patch_w * coeff_w_idx; int offset = c * patch_h * coeff_h_idx; - for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += kstride_h, --h_idx) { - for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += kstride_w, --w_idx) { - //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; - //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; - //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; - val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + h_col * width_col + w_col]; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; } } @@ -187,48 +200,55 @@ __global__ void col2im_sk_gpu_kernel(const int n, const Dtype* data_col, } } -template -void col2im_sk_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, - Dtype* data_im) { +template +void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, + const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_im) { if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) - LOG(FATAL) << "stride greater than 1 or pad greater than 0 not tested in col2im_sk_gpu()."; - int ext_patch_h = (patch_h - 1) * kstride_h + 1; - int ext_patch_w = (patch_w - 1) * kstride_w + 1; - int height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; - int num_kernels = channels * height * width; + LOG(FATAL) + << "stride greater than 1 or pad greater" + << " than 0 not tested in col2im_sk_gpu()."; + int ext_patch_h = (patch_h - 1) * kstride_h + 1; + int ext_patch_w = (patch_w - 1) * kstride_w + 1; + int height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; + int num_kernels = channels * height * width; - col2im_sk_gpu_kernel<<>>( - num_kernels, data_col, height, width, channels, - patch_h, patch_w, ext_patch_h, ext_patch_w, - pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, - height_col, width_col, data_im); - CUDA_POST_KERNEL_CHECK; -} + col2im_sk_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), + CAFFE_CUDA_NUM_THREADS)( + num_kernels, data_col, height, width, channels, + patch_h, patch_w, ext_patch_h, ext_patch_w, + pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, + height_col, width_col, data_im); + CUDA_POST_KERNEL_CHECK; + } // Explicit instantiation template void col2im_sk_gpu(const float* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, float* data_im); + const int height, const int width, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + float* data_im); template void col2im_sk_gpu(const double* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, double* data_im); + const int height, const int width, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + double* data_im); - -template +template __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, - const int height, const int width, const int channels, - const int patch_h, const int patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, - Dtype* data_im) { + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int height_col, + const int width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width + pad_w; @@ -240,18 +260,18 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); /* - for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { - for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { - // the col location: [c * width * height + h_out, w_out] - int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize - + (w - w_col * stride_w); - val += data_col[(c_col * height_col + h_col) * width_col + w_col]; - } - } - */ + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + // the col location: [c * width * height + h_out, w_out] + int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize + + (w - w_col * stride_w); + val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + } + } + */ // equivalent implementation - int offset = - (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int offset = (c * patch_h * patch_w + h * patch_w + w) * height_col + * width_col; int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; int coeff_w_col = (1 - stride_w * height_col * width_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { @@ -263,21 +283,19 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, } } - - -template -void col2im_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im) { +template +void col2im_gpu(const Dtype* data_col, const int channels, const int height, + const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im) { int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) - col2im_gpu_kernel<<>>( + col2im_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), + CAFFE_CUDA_NUM_THREADS)( num_kernels, data_col, height, width, channels, patch_h, patch_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im); @@ -286,13 +304,17 @@ void col2im_gpu(const Dtype* data_col, const int channels, // Explicit instantiation template void col2im_gpu(const float* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, float* data_im); + const int height, const int width, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + float* data_im); template void col2im_gpu(const double* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, double* data_im); + const int height, const int width, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + double* data_im); -#endif // USE_CUDA +#endif // USE_CUDA } // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 8c92738c1a5..27f7b50302d 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -11,49 +11,55 @@ namespace caffe { template<> void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, - const float alpha, const float* A, const float* B, const float beta, - float* C) { + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const float alpha, + const float* A, const float* B, const float beta, + float* C) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; - cblas_sgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, - ldb, beta, C, N); + cblas_sgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, + beta, C, N); } template<> void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, - const double alpha, const double* A, const double* B, const double beta, - double* C) { + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const double alpha, + const double* A, const double* B, const double beta, + double* C) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; - cblas_dgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, - ldb, beta, C, N); + cblas_dgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, + beta, C, N); } -template <> +template<> void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const float alpha, const float* A, const float* x, - const float beta, float* y) { + const int N, const float alpha, const float* A, + const float* x, const float beta, float* y) { cblas_sgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); } -template <> +template<> void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const double alpha, const double* A, const double* x, - const double beta, double* y) { + const int N, const double alpha, const double* A, + const double* x, const double beta, double* y) { cblas_dgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); } -template <> +template<> void caffe_axpy(const int N, const float alpha, const float* X, - float* Y) { cblas_saxpy(N, alpha, X, 1, Y, 1); } + float* Y) { + cblas_saxpy(N, alpha, X, 1, Y, 1); +} -template <> +template<> void caffe_axpy(const int N, const double alpha, const double* X, - double* Y) { cblas_daxpy(N, alpha, X, 1, Y, 1); } + double* Y) { + cblas_daxpy(N, alpha, X, 1, Y, 1); +} -template +template void caffe_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { memset(Y, 0, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) @@ -68,35 +74,34 @@ template void caffe_set(const int N, const int alpha, int* Y); template void caffe_set(const int N, const float alpha, float* Y); template void caffe_set(const int N, const double alpha, double* Y); -template <> +template<> void caffe_add_scalar(const int N, const float alpha, float* Y) { for (int i = 0; i < N; ++i) { Y[i] += alpha; } } -template <> +template<> void caffe_add_scalar(const int N, const double alpha, double* Y) { for (int i = 0; i < N; ++i) { Y[i] += alpha; } } - -template +template void caffe_cpu_copy(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { - memcpy(Y, X, sizeof(Dtype) * N); - } + memcpy(Y, X, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) + } } template void caffe_cpu_copy(const int N, const int* X, int* Y); template void caffe_cpu_copy(const int N, const unsigned int* X, - unsigned int* Y); + unsigned int* Y); template void caffe_cpu_copy(const int N, const float* X, float* Y); template void caffe_cpu_copy(const int N, const double* X, double* Y); -template +template void caffe_copy(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { if (Caffe::mode() == Caffe::GPU) { @@ -104,7 +109,7 @@ void caffe_copy(const int N, const Dtype* X, Dtype* Y) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(caffe/alt_fn) CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault)); -#endif // USE_CUDA +#endif // USE_CUDA #else NO_GPU; #endif @@ -116,140 +121,134 @@ void caffe_copy(const int N, const Dtype* X, Dtype* Y) { template void caffe_copy(const int N, const int* X, int* Y); template void caffe_copy(const int N, const unsigned int* X, - unsigned int* Y); + unsigned int* Y); template void caffe_copy(const int N, const float* X, float* Y); template void caffe_copy(const int N, const double* X, double* Y); -template <> +template<> void caffe_scal(const int N, const float alpha, float *X) { cblas_sscal(N, alpha, X, 1); } -template <> +template<> void caffe_scal(const int N, const double alpha, double *X) { cblas_dscal(N, alpha, X, 1); } -template <> +template<> void caffe_cpu_axpby(const int N, const float alpha, const float* X, const float beta, float* Y) { cblas_saxpby(N, alpha, X, 1, beta, Y, 1); } -template <> +template<> void caffe_cpu_axpby(const int N, const double alpha, const double* X, const double beta, double* Y) { cblas_daxpby(N, alpha, X, 1, beta, Y, 1); } -template <> -void caffe_add(const int n, const float* a, const float* b, - float* y) { +template<> +void caffe_add(const int n, const float* a, const float* b, float* y) { vsAdd(n, a, b, y); } -template <> +template<> void caffe_add(const int n, const double* a, const double* b, - double* y) { + double* y) { vdAdd(n, a, b, y); } -template <> -void caffe_sub(const int n, const float* a, const float* b, - float* y) { +template<> +void caffe_sub(const int n, const float* a, const float* b, float* y) { vsSub(n, a, b, y); } -template <> +template<> void caffe_sub(const int n, const double* a, const double* b, - double* y) { + double* y) { vdSub(n, a, b, y); } -template <> -void caffe_mul(const int n, const float* a, const float* b, - float* y) { +template<> +void caffe_mul(const int n, const float* a, const float* b, float* y) { vsMul(n, a, b, y); } -template <> +template<> void caffe_mul(const int n, const double* a, const double* b, - double* y) { + double* y) { vdMul(n, a, b, y); } -template <> -void caffe_div(const int n, const float* a, const float* b, - float* y) { +template<> +void caffe_div(const int n, const float* a, const float* b, float* y) { vsDiv(n, a, b, y); } -template <> +template<> void caffe_div(const int n, const double* a, const double* b, - double* y) { + double* y) { vdDiv(n, a, b, y); } -template <> -void caffe_powx(const int n, const float* a, const float b, - float* y) { +template<> +void caffe_powx(const int n, const float* a, const float b, float* y) { vsPowx(n, a, b, y); } -template <> +template<> void caffe_powx(const int n, const double* a, const double b, - double* y) { + double* y) { vdPowx(n, a, b, y); } -template <> +template<> void caffe_sqr(const int n, const float* a, float* y) { vsSqr(n, a, y); } -template <> +template<> void caffe_sqr(const int n, const double* a, double* y) { vdSqr(n, a, y); } -template <> +template<> void caffe_exp(const int n, const float* a, float* y) { vsExp(n, a, y); } -template <> +template<> void caffe_exp(const int n, const double* a, double* y) { vdExp(n, a, y); } -template <> +template<> void caffe_log(const int n, const float* a, float* y) { vsLn(n, a, y); } -template <> +template<> void caffe_log(const int n, const double* a, double* y) { vdLn(n, a, y); } -template <> +template<> void caffe_abs(const int n, const float* a, float* y) { - vsAbs(n, a, y); + vsAbs(n, a, y); } -template <> +template<> void caffe_abs(const int n, const double* a, double* y) { - vdAbs(n, a, y); + vdAbs(n, a, y); } unsigned int caffe_rng_rand() { return (*caffe_rng())(); } -template +template Dtype caffe_nextafter(const Dtype b) { - return boost::math::nextafter( - b, std::numeric_limits::max()); + return boost::math::nextafter(b, std::numeric_limits::max()); } template @@ -263,20 +262,20 @@ void caffe_rng_uniform(const int n, unsigned int* r) { CHECK(r); boost::uniform_int random_distribution(INT32_MIN, INT32_MAX); boost::variate_generator> - variate_generator(caffe_rng(), random_distribution); + variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { r[i] = variate_generator(); } } -template +template void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r) { CHECK_GE(n, 0); CHECK(r); CHECK_LE(a, b); boost::uniform_real random_distribution(a, caffe_nextafter(b)); - boost::variate_generator > - variate_generator(caffe_rng(), random_distribution); + boost::variate_generator> + variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { r[i] = variate_generator(); } @@ -290,37 +289,37 @@ template void caffe_rng_uniform(const int n, const double a, const double b, double* r); -template -void caffe_rng_gaussian(const int n, const Dtype a, - const Dtype sigma, Dtype* r) { +template +void caffe_rng_gaussian(const int n, const Dtype a, const Dtype sigma, + Dtype* r) { CHECK_GE(n, 0); CHECK(r); CHECK_GT(sigma, 0); boost::normal_distribution random_distribution(a, sigma); - boost::variate_generator > - variate_generator(caffe_rng(), random_distribution); + boost::variate_generator> + variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { r[i] = variate_generator(); } } template -void caffe_rng_gaussian(const int n, const float mu, - const float sigma, float* r); +void caffe_rng_gaussian(const int n, const float mu, const float sigma, + float* r); template void caffe_rng_gaussian(const int n, const double mu, const double sigma, double* r); -template +template void caffe_rng_bernoulli(const int n, const Dtype p, int* r) { CHECK_GE(n, 0); CHECK(r); CHECK_GE(p, 0); CHECK_LE(p, 1); boost::bernoulli_distribution random_distribution(p); - boost::variate_generator > - variate_generator(caffe_rng(), random_distribution); + boost::variate_generator> + variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { r[i] = variate_generator(); } @@ -332,15 +331,15 @@ void caffe_rng_bernoulli(const int n, const double p, int* r); template void caffe_rng_bernoulli(const int n, const float p, int* r); -template +template void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r) { CHECK_GE(n, 0); CHECK(r); CHECK_GE(p, 0); CHECK_LE(p, 1); boost::bernoulli_distribution random_distribution(p); - boost::variate_generator > - variate_generator(caffe_rng(), random_distribution); + boost::variate_generator> + variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { r[i] = static_cast(variate_generator()); } @@ -352,19 +351,20 @@ void caffe_rng_bernoulli(const int n, const double p, unsigned int* r); template void caffe_rng_bernoulli(const int n, const float p, unsigned int* r); -template <> +template<> float caffe_cpu_strided_dot(const int n, const float* x, const int incx, - const float* y, const int incy) { + const float* y, const int incy) { return cblas_sdot(n, x, incx, y, incy); } -template <> +template<> double caffe_cpu_strided_dot(const int n, const double* x, - const int incx, const double* y, const int incy) { + const int incx, const double* y, + const int incy) { return cblas_ddot(n, x, incx, y, incy); } -template +template Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y) { return caffe_cpu_strided_dot(n, x, 1, y, 1); } @@ -375,46 +375,46 @@ float caffe_cpu_dot(const int n, const float* x, const float* y); template double caffe_cpu_dot(const int n, const double* x, const double* y); -template <> +template<> int caffe_cpu_hamming_distance(const int n, const float* x, - const float* y) { + const float* y) { int dist = 0; for (int i = 0; i < n; ++i) { - dist += __builtin_popcount(static_cast(x[i]) ^ - static_cast(y[i])); + dist += __builtin_popcount( + static_cast(x[i]) ^ static_cast(y[i])); } return dist; } -template <> +template<> int caffe_cpu_hamming_distance(const int n, const double* x, - const double* y) { + const double* y) { int dist = 0; for (int i = 0; i < n; ++i) { - dist += __builtin_popcountl(static_cast(x[i]) ^ - static_cast(y[i])); + dist += __builtin_popcountl( + static_cast(x[i]) ^ static_cast(y[i])); } return dist; } -template <> +template<> float caffe_cpu_asum(const int n, const float* x) { return cblas_sasum(n, x, 1); } -template <> +template<> double caffe_cpu_asum(const int n, const double* x) { return cblas_dasum(n, x, 1); } -template <> +template<> void caffe_cpu_scale(const int n, const float alpha, const float *x, float* y) { cblas_scopy(n, x, 1, y, 1); cblas_sscal(n, alpha, y, 1); } -template <> +template<> void caffe_cpu_scale(const int n, const double alpha, const double *x, double* y) { cblas_dcopy(n, x, 1, y, 1); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 39b2550aa2b..eac7c3f3f80 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -7,18 +7,19 @@ #include #include #include +#include #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { - -template <> +template<> void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, - const float alpha, const float* A, const float* B, const float beta, - float* C) { + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const float alpha, + const float* A, const float* B, const float beta, + float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; @@ -27,14 +28,15 @@ void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, - N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); + N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } -template <> +template<> void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, - const double alpha, const double* A, const double* B, const double beta, - double* C) { + const CBLAS_TRANSPOSE TransB, const int M, + const int N, const int K, const double alpha, + const double* A, const double* B, const double beta, + double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; @@ -43,38 +45,38 @@ void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, - N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); + N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } -template <> +template<> void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const float alpha, const float* A, const float* x, - const float beta, float* y) { + const int N, const float alpha, const float* A, + const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; - CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, - A, N, x, 1, &beta, y, 1)); + CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, + N, M, &alpha, A, N, x, 1, &beta, y, 1)); } -template <> +template<> void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const double alpha, const double* A, const double* x, - const double beta, double* y) { + const int N, const double alpha, const double* A, + const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; - CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, - A, N, x, 1, &beta, y, 1)); + CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, + &alpha, A, N, x, 1, &beta, y, 1)); } -template <> +template<> void caffe_gpu_axpy(const int N, const float alpha, const float* X, - float* Y) { + float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } -template <> +template<> void caffe_gpu_axpy(const int N, const double alpha, const double* X, - double* Y) { + double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } @@ -84,81 +86,81 @@ void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { } } -template <> +template<> void caffe_gpu_scal(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } -template <> +template<> void caffe_gpu_scal(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } -template <> +template<> void caffe_gpu_axpby(const int N, const float alpha, const float* X, - const float beta, float* Y) { + const float beta, float* Y) { caffe_gpu_scal(N, beta, Y); caffe_gpu_axpy(N, alpha, X, Y); } -template <> +template<> void caffe_gpu_axpby(const int N, const double alpha, const double* X, - const double beta, double* Y) { + const double beta, double* Y) { caffe_gpu_scal(N, beta, Y); caffe_gpu_axpy(N, alpha, X, Y); } -template <> +template<> void caffe_gpu_dot(const int n, const float* x, const float* y, - float* out) { + float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } -template <> +template<> void caffe_gpu_dot(const int n, const double* x, const double* y, - double * out) { + double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } -template <> +template<> void caffe_gpu_asum(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } -template <> +template<> void caffe_gpu_asum(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } -template <> +template<> void caffe_gpu_scale(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } -template <> +template<> void caffe_gpu_scale(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } -template +template __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } -template +template void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) - set_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + set_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, alpha, Y); } @@ -166,234 +168,235 @@ template void caffe_gpu_set(const int N, const int alpha, int* Y); template void caffe_gpu_set(const int N, const float alpha, float* Y); template void caffe_gpu_set(const int N, const double alpha, double* Y); -template +template __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } -template <> +template<> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_scalar_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + add_scalar_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), + CAFFE_CUDA_NUM_THREADS)( N, alpha, Y); } -template <> +template<> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_scalar_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + add_scalar_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), + CAFFE_CUDA_NUM_THREADS)( N, alpha, Y); } -template -__global__ void add_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { +template +__global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, + Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } -template <> +template<> void caffe_gpu_add(const int N, const float* a, const float* b, - float* y) { + float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + add_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template <> +template<> void caffe_gpu_add(const int N, const double* a, const double* b, - double* y) { + double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - add_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + add_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template -__global__ void sub_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { +template +__global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, + Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } -template <> +template<> void caffe_gpu_sub(const int N, const float* a, const float* b, - float* y) { + float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - sub_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + sub_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template <> +template<> void caffe_gpu_sub(const int N, const double* a, const double* b, - double* y) { + double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - sub_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + sub_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template -__global__ void mul_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { +template +__global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, + Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } -template <> -void caffe_gpu_mul(const int N, const float* a, - const float* b, float* y) { +template<> +void caffe_gpu_mul(const int N, const float* a, const float* b, + float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - mul_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + mul_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template <> -void caffe_gpu_mul(const int N, const double* a, - const double* b, double* y) { +template<> +void caffe_gpu_mul(const int N, const double* a, const double* b, + double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - mul_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + mul_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template -__global__ void div_kernel(const int n, const Dtype* a, - const Dtype* b, Dtype* y) { +template +__global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, + Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } -template <> -void caffe_gpu_div(const int N, const float* a, - const float* b, float* y) { +template<> +void caffe_gpu_div(const int N, const float* a, const float* b, + float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - div_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + div_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template <> -void caffe_gpu_div(const int N, const double* a, - const double* b, double* y) { +template<> +void caffe_gpu_div(const int N, const double* a, const double* b, + double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - div_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + div_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, b, y); } -template +template __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } -template <> +template<> void caffe_gpu_abs(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - abs_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + abs_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } -template <> +template<> void caffe_gpu_abs(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - abs_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + abs_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } - -template +template __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } -template <> +template<> void caffe_gpu_exp(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - exp_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + exp_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } -template <> +template<> void caffe_gpu_exp(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - exp_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + exp_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } -template +template __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } -template <> +template<> void caffe_gpu_log(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - log_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + log_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } -template <> +template<> void caffe_gpu_log(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - log_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + log_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } -template -__global__ void powx_kernel(const int n, const Dtype* a, - const Dtype alpha, Dtype* y) { +template +__global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, + Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } -template <> -void caffe_gpu_powx(const int N, const float* a, - const float alpha, float* y) { +template<> +void caffe_gpu_powx(const int N, const float* a, const float alpha, + float* y) { // NOLINT_NEXT_LINE(whitespace/operators) - powx_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + powx_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, alpha, y); } -template <> -void caffe_gpu_powx(const int N, const double* a, - const double alpha, double* y) { +template<> +void caffe_gpu_powx(const int N, const double* a, const double alpha, + double* y) { // NOLINT_NEXT_LINE(whitespace/operators) - powx_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( + powx_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, alpha, y); } -DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - - (x[index] < Dtype(0))); +DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC( + sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); -__global__ void popc_kernel(const int n, const float* a, - const float* b, uint8_t* y) { +__global__ void popc_kernel(const int n, const float* a, const float* b, + uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { - y[index] = __popc(static_cast(a[index]) ^ - static_cast(b[index])); + y[index] = __popc( + static_cast(a[index]) ^ static_cast(b[index])); } } -__global__ void popcll_kernel(const int n, const double* a, - const double* b, uint8_t* y) { +__global__ void popcll_kernel(const int n, const double* a, const double* b, + uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { - y[index] = __popcll(static_cast(a[index]) ^ - static_cast(b[index])); + y[index] = __popcll( + static_cast(a[index]) ^ static_cast(b[index])); } } -template <> +template<> uint32_t caffe_gpu_hamming_distance(const int n, const float* x, - const float* y) { + const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; @@ -401,13 +404,13 @@ uint32_t caffe_gpu_hamming_distance(const int n, const float* x, // NOLINT_NEXT_LINE(whitespace/operators) popc_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)( n, x, y, thrust::raw_pointer_cast(popcounts.data())); - return thrust::reduce(popcounts.begin(), popcounts.end(), - (uint32_t) 0, thrust::plus()); + return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, + thrust::plus()); } -template <> +template<> uint32_t caffe_gpu_hamming_distance(const int n, const double* x, - const double* y) { + const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; @@ -416,15 +419,16 @@ uint32_t caffe_gpu_hamming_distance(const int n, const double* x, popcll_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), - /* NOLINT_NEXT_LINE(build/include_what_you_use) */ - (uint32_t) 0, thrust::plus()); + /* NOLINT_NEXT_LINE(build/include_what_you_use) */ + (uint32_t) 0, + thrust::plus()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } -template <> +template<> void caffe_gpu_rng_uniform(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); @@ -437,7 +441,7 @@ void caffe_gpu_rng_uniform(const int n, const float a, const float b, } } -template <> +template<> void caffe_gpu_rng_uniform(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); @@ -450,14 +454,14 @@ void caffe_gpu_rng_uniform(const int n, const double a, const double b, } } -template <> +template<> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } -template <> +template<> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( @@ -465,4 +469,4 @@ void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, } } // namespace caffe -#endif // USE_CUDA +#endif // USE_CUDA diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index f0f9854ebfa..4714158ca09 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -95,9 +95,12 @@ void UpgradeV0PaddingLayers(const NetParameter& param, << "Padding layer input to " "non-convolutional / non-pooling layer type " << layer_param.type(); - CHECK_EQ(layer_connection.bottom_size(), 1)<< "Conv Layer takes a single blob as input."; - CHECK_EQ(source_layer.bottom_size(), 1)<< "Padding Layer takes a single blob as input."; - CHECK_EQ(source_layer.top_size(), 1)<< "Padding Layer produces a single blob as output."; + CHECK_EQ(layer_connection.bottom_size(), 1) + << "Conv Layer takes a single blob as input."; + CHECK_EQ(source_layer.bottom_size(), 1) + << "Padding Layer takes a single blob as input."; + CHECK_EQ(source_layer.top_size(), 1) + << "Padding Layer produces a single blob as output."; int layer_index = param_upgraded_pad->layers_size() - 1; param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() ->set_pad(source_layer.layer().pad()); From e1e4f89704a9c6aff36a7b1a9436b01fb28c0db9 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 21 Jun 2015 02:33:38 +0200 Subject: [PATCH 069/254] Updated travis script. --- scripts/travis/travis_install.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index fd0295e386d..a3097cc2edc 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -16,7 +16,7 @@ add-apt-repository -y ppa:ubuntu-toolchain-r/test apt-get -y update apt-get install \ - g++-4.8 wget git curl \ + gcc-4.8 g++-4.8 wget git curl \ python-dev python-numpy \ libleveldb-dev libsnappy-dev libopencv-dev \ libboost1.54-dev libboost-system1.54-dev libboost-python1.54-dev libboost-thread1.54-dev \ @@ -25,6 +25,10 @@ apt-get install \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ bc + +update-alternatives --remove-all gcc +update-alternatives --remove-all g++ +update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90 update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 # Add a special apt-repository to install CMake 2.8.9 for CMake Caffe build, From 2a3d94b3120ccac75d6809330b0a826aee81e02b Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 21 Jun 2015 02:40:49 +0200 Subject: [PATCH 070/254] Another attempt on travis script fixing. --- scripts/travis/travis_install.sh | 3 --- src/caffe/test/test_mergecrop_layer.cpp | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index a3097cc2edc..e82fa7dc0ce 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -25,9 +25,6 @@ apt-get install \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ bc - -update-alternatives --remove-all gcc -update-alternatives --remove-all g++ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90 update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp index d19b763d403..6a42c61112f 100644 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -1,3 +1,4 @@ +#ifndef CPU_ONLY // CPU-GPU test #include #include @@ -209,3 +210,4 @@ TYPED_TEST(MergeCropLayerTest, TestBackward) { } } // namespace caffe +#endif // !CPU_ONLY From 83bc548509181bcaa75859c5ae748b4457f82704 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 21 Jun 2015 04:06:17 +0200 Subject: [PATCH 071/254] Trying g++ 4.9 with Travis. --- include/caffe/common.hpp | 2 +- scripts/travis/travis_install.sh | 8 +-- src/caffe/common.cpp | 2 +- src/caffe/test/test_im2col_kernel.cu | 99 ++++++++++++++-------------- tools/caffe.cpp | 2 +- 5 files changed, 58 insertions(+), 55 deletions(-) diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 75bc4b53c09..f10dc0da37e 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -167,7 +167,7 @@ class Caffe { static void Synchronize(int device_id); // Get a device context - static DeviceContext& GetDeviceContext(int id); + static DeviceContext & GetDeviceContext(int id); // Get a device OpenCL program #ifdef USE_GREENTEA diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index e82fa7dc0ce..e8adc101eca 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -11,12 +11,12 @@ MAKE="make --jobs=$NUM_THREADS" add-apt-repository -y ppa:tuleu/precise-backports # This ppa is for boost 1.54 add-apt-repository -y ppa:boost-latest/ppa -# This ppa is for g++ 4.8 +# This ppa is for g++ 4.9 add-apt-repository -y ppa:ubuntu-toolchain-r/test apt-get -y update apt-get install \ - gcc-4.8 g++-4.8 wget git curl \ + gcc-4.9 g++-4.9 wget git curl \ python-dev python-numpy \ libleveldb-dev libsnappy-dev libopencv-dev \ libboost1.54-dev libboost-system1.54-dev libboost-python1.54-dev libboost-thread1.54-dev \ @@ -25,8 +25,8 @@ apt-get install \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ bc -update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90 -update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 +update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.9 90 +update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.9 90 # Add a special apt-repository to install CMake 2.8.9 for CMake Caffe build, # if needed. By default, Aptitude in Ubuntu 12.04 installs CMake 2.8.7, but diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 646feaa823c..c324cd976a3 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -305,7 +305,7 @@ void Caffe::SetDevices(std::vector device_ids) { #endif // USE_GREENTEA } -DeviceContext& Caffe::GetDeviceContext(int id) { +DeviceContext & Caffe::GetDeviceContext(int id) { return id == -1 ? Get().default_device_context_ : Get().device_contexts_[id]; } diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 5d6d69b61a4..62276b274b9 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -71,57 +71,60 @@ class Im2colKernelTest : public GPUDeviceTest { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); - TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); - - // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), - this->channels_, this->height_, this->width_, - this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, - this->stride_, this->stride_, - cpu_data + this->blob_top_cpu_->offset(n)); - } - - - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); + TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); + + // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { - int grid_dim = default_grid_dim/grid_div; - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel - CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( - num_kernels, bottom_data + this->blob_bottom_->offset(n), - this->height_, this->width_, this->kernel_size_, this->kernel_size_, - this->pad_, this->pad_, this->stride_, this->stride_, - this->height_col_, this->width_col_, - top_data + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; + im2col_cpu(this->blob_bottom_->cpu_data() + + this->blob_bottom_->offset(n), + this->channels_, this->height_, this->width_, + this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, + this->stride_, this->stride_, + cpu_data + this->blob_top_cpu_->offset(n)); } - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = cpu_data[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; + + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + int grid_dim = default_grid_dim/grid_div; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel + CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( + num_kernels, bottom_data + this->blob_bottom_->offset(n), + this->height_, this->width_, this->kernel_size_, this->kernel_size_, + this->pad_, this->pad_, this->stride_, this->stride_, + this->height_col_, this->width_col_, + top_data + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } + + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = cpu_data[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; + } } } } diff --git a/tools/caffe.cpp b/tools/caffe.cpp index aef7b5bbd74..7a65077d350 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -72,7 +72,7 @@ static BrewFunction GetBrewFunction(const caffe::string& name) { int device_query() { if ( FLAGS_gpu < 0 ) { // If no gpu is specified, enumerate all the devices. - Caffe::EnumerateDevices(); + caffe::Caffe::EnumerateDevices(); } else { LOG(INFO) << "Querying device ID = " << FLAGS_gpu; caffe::Caffe::SetDevice(FLAGS_gpu); From f80da91314efab8fa548b703ded7fd7aa4505b60 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 21 Jun 2015 04:30:52 +0200 Subject: [PATCH 072/254] Common.cpp fix for CUDA and CPU_ONLY build. --- src/caffe/common.cpp | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index c324cd976a3..b9bfb971cb7 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -47,6 +47,14 @@ void GlobalInit(int* pargc, char*** pargv) { ::google::InstallFailureSignalHandler(); } +DeviceContext & Caffe::GetDeviceContext(int id) { + return id == -1 ? Get().default_device_context_ : Get().device_contexts_[id]; +} + +DeviceContext& Caffe::GetDefaultDeviceContext() { + return Get().default_device_context_; +} + #ifdef CPU_ONLY // CPU-only Caffe. Caffe::Caffe() @@ -67,6 +75,12 @@ void Caffe::DeviceQuery() { NO_GPU; } +void Caffe::Synchronize(int device_id) { +} + +void Caffe::EnumerateDevices() { +} + class Caffe::RNG::Generator { public: Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} @@ -193,12 +207,8 @@ void Caffe::EnumerateDevices() { #endif LOG(INFO)<< "Total devices: " << cuda_device_count + greentea_device_count; -#ifdef USE_CUDA LOG(INFO)<< "CUDA devices: " << cuda_device_count; -#endif // USE_CUDA -#ifdef USE_GREENTEA LOG(INFO)<< "OpenCL devices: " << greentea_device_count; -#endif // USE_GREENTEA // Display info for all devices #ifdef USE_CUDA @@ -305,20 +315,12 @@ void Caffe::SetDevices(std::vector device_ids) { #endif // USE_GREENTEA } -DeviceContext & Caffe::GetDeviceContext(int id) { - return id == -1 ? Get().default_device_context_ : Get().device_contexts_[id]; -} - #ifdef USE_GREENTEA viennacl::ocl::program & Caffe::GetDeviceProgram(int id) { return id == -1 ? Get().default_ocl_program_ : Get().ocl_programs_[id]; } #endif // USE_GREENTEA -DeviceContext& Caffe::GetDefaultDeviceContext() { - return Get().default_device_context_; -} - void Caffe::SetDevice(const int device_id) { std::vector devices; devices.push_back(device_id); From 3ba0ef45b658760e5b4af96b79f9f82599fbbe62 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 21 Jun 2015 04:45:24 +0200 Subject: [PATCH 073/254] Now Travis CI works. --- src/caffe/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index b9bfb971cb7..c7a2c6b94e6 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -254,7 +254,7 @@ void Caffe::SetDevices(std::vector device_ids) { Get().ocl_programs_.clear(); #endif int cuda_device_count = 0; - int greentea_device_count = 0; + // int greentea_device_count = 0; #ifdef USE_CUDA cudaGetDeviceCount(&cuda_device_count); #endif // USE_CUDA From 737886bacbb406150229fac14455d889e21d474a Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 21 Jun 2015 12:44:33 +0200 Subject: [PATCH 074/254] Fix for bricked OpenCL device counter. --- src/caffe/common.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index c7a2c6b94e6..23c1028782c 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -254,7 +254,6 @@ void Caffe::SetDevices(std::vector device_ids) { Get().ocl_programs_.clear(); #endif int cuda_device_count = 0; - // int greentea_device_count = 0; #ifdef USE_CUDA cudaGetDeviceCount(&cuda_device_count); #endif // USE_CUDA @@ -269,6 +268,8 @@ void Caffe::SetDevices(std::vector device_ids) { // Initialize GreenTea devices #ifdef USE_GREENTEA + int greentea_device_count = 0; + typedef std::vector platforms_type; platforms_type platforms = viennacl::ocl::get_platforms(); From 1e574c5abab888709cc7476f56ccf45ec8fa96f0 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 22 Jun 2015 20:34:06 +0200 Subject: [PATCH 075/254] PoolingSK kernel fix, added #2213, synchronized with master branch. --- include/caffe/filler.hpp | 58 +++++++++++++++++++++ src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/pooling_sk.cl | 3 +- 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index 1968ace61c9..64e9ba6d396 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -178,6 +178,62 @@ class XavierFiller : public Filler { } }; + + +/*! +@brief Fills a Blob with coefficients of bilinear interpolation for upsampling. +This is intended to be used in DeconvolutionLayer acting as UpsamplingLayer. +You can upsample a feature map with shape of (B, C, H, W) by any integer factor +using the following proto. +\code +layer { + name: "upsample", type: "Deconvolution" + bottom: "{{bottom_name}}" top: "{{top_name}}" + convolution_param { + kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} + num_output: {{C}} group: {{C}} + pad: {{ceil((factor - 1) / 2.)}} + weight_filler: { type: "bilinear_upsampling" } bias_term: false + } + param { lr_mult: 0 decay_mult: 0 } +} +\endcode +Please use this by replacing `{{}}` with your values. By specifying +`num_output: {{C}} group: {{C}}`, it behaves as +channel-wise convolution. The filter shape of this deconvolution layer will be +(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) +interpolation kernel for every channel of the filter identically. The resulting +shape of the top feature map will be (B, C, factor * H, factor * W). +Note that the learning rate and the +weight decay are set to 0 in order to keep coefficient values of bilinear +interpolation unchanged during training. If you apply this to an image, this +operation is equivalent to the following call in Python with Scikit.Image. +\code{.py} +out = skimage.transform.rescale(img, factor, mode='constant', cval=0) +\endcode + */ +template +class BilinearFiller : public Filler { + public: + explicit BilinearFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; + CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; + Dtype* data = blob->mutable_cpu_data(); + int f = ceil(blob->width() / 2.); + float c = (2 * f - 1 - f % 2) / (2. * f); + for (int i = 0; i < blob->count(); ++i) { + float x = i % blob->width(); + float y = (i / blob->width()) % blob->height(); + data[i] = ((1.0 - fabs(x / f - c)) * (1.0 - fabs(y / f - c))); + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + + /** * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming @@ -240,6 +296,8 @@ Filler* GetFiller(const FillerParameter& param) { return new UniformFiller(param); } else if (type == "xavier") { return new XavierFiller(param); + } else if (type == "bilinear") { + return new BilinearFiller(param); } else if (type == "msra") { return new MSRAFiller(param); } else { diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 5af538a716b..1d1f46d03a9 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -21,7 +21,7 @@ std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#en std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT @@ -39,7 +39,7 @@ std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#e std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n return;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index 3b2108b870e..da98af58206 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -191,7 +191,8 @@ __kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_data_ptr[h * width + w]; - return; + h = hend; + w = wend; } } } From 702bab44c0ccaf75c9d0c3cfd777da0a3623f2f8 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 24 Jun 2015 00:21:10 +0200 Subject: [PATCH 076/254] Performance improvement for Convolution Layers, fix for CuDNN tests with OpenCL. --- include/caffe/blob.hpp | 10 +- src/caffe/blob.cpp | 16 +- src/caffe/layers/base_conv_layer.cpp | 11 +- src/caffe/layers/conv_sk_layer.cpp | 15 +- src/caffe/test/test_convolution_layer.cpp | 424 +++++++++++----------- src/caffe/test/test_neuron_layer.cpp | 162 +++++---- src/caffe/test/test_pooling_layer.cpp | 270 +++++++------- src/caffe/test/test_softmax_layer.cpp | 66 ++-- 8 files changed, 517 insertions(+), 457 deletions(-) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 54d8bc9de34..02da73fc4bc 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -64,12 +64,14 @@ class Blob { * Note that reshaping an input blob and immediately calling Net::Backward is * an error; either Net::Forward or Net::Reshape need to be called to * propagate the new input shape to higher layers. + * + * Reshape returns true if new memory was allocated. */ - void Reshape(const vector& shape); - void Reshape(const BlobShape& shape); - void Reshape(const int num, const int channels, const int height, + bool Reshape(const vector& shape); + bool Reshape(const BlobShape& shape); + bool Reshape(const int num, const int channels, const int height, const int width); - void ReshapeLike(const Blob& other); + bool ReshapeLike(const Blob& other); inline string shape_string() const { ostringstream stream; for (int i = 0; i < shape_.size(); ++i) { diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 92f84ae2652..6e7ecfd96a9 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -14,18 +14,18 @@ namespace caffe { template -void Blob::Reshape(const int num, const int channels, const int height, +bool Blob::Reshape(const int num, const int channels, const int height, const int width) { vector shape(4); shape[0] = num; shape[1] = channels; shape[2] = height; shape[3] = width; - Reshape(shape); + return Reshape(shape); } template -void Blob::Reshape(const vector& shape) { +bool Blob::Reshape(const vector& shape) { CHECK_LE(shape.size(), kMaxBlobAxes); count_ = 1; shape_.resize(shape.size()); @@ -39,22 +39,24 @@ void Blob::Reshape(const vector& shape) { capacity_ = count_; data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype), device_context_)); diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype), device_context_)); + return true; } + return false; } template -void Blob::Reshape(const BlobShape& shape) { +bool Blob::Reshape(const BlobShape& shape) { CHECK_LE(shape.dim_size(), kMaxBlobAxes); vector shape_vec(shape.dim_size()); for (int i = 0; i < shape.dim_size(); ++i) { shape_vec[i] = shape.dim(i); } - Reshape(shape_vec); + return Reshape(shape_vec); } template -void Blob::ReshapeLike(const Blob& other) { - Reshape(other.shape()); +bool Blob::ReshapeLike(const Blob& other) { + return Reshape(other.shape()); } template diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index fb7d1895af3..28e9b0300ac 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -156,9 +156,14 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, // Set up the all ones "bias multiplier" for adding biases by BLAS if (bias_term_) { vector bias_multiplier_shape(1, height_out_ * width_out_); - bias_multiplier_.Reshape(bias_multiplier_shape); - caffe_set(bias_multiplier_.count(), Dtype(1), - bias_multiplier_.mutable_cpu_data()); + bool reshaped = bias_multiplier_.Reshape(bias_multiplier_shape); + // This will trigger a memory copy if in GPU mode, + // which may not be necessary. + // Thus omit to set the values if not necessary. + if (reshaped) { + caffe_set(bias_multiplier_.count(), Dtype(1), + bias_multiplier_.mutable_cpu_data()); + } } } diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index da6d0614efa..20a61e7ca78 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -83,11 +83,9 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; - // TODO: Change this - if (kstride_h_ != 23 || this->device_context_.backend() == BACKEND_CUDA) { - col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, + col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, width_out); - } + // Set the parameters CHECK_EQ(num_output_ % group_, 0) << "Number of output should be multiples of group."; @@ -128,8 +126,13 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, } // Set up the all ones "bias multiplier" for adding bias using blas if (bias_term_) { - bias_multiplier_.Reshape(1, 1, 1, N_); - caffe_set(N_, Dtype(1), bias_multiplier_.mutable_cpu_data()); + bool reshaped = bias_multiplier_.Reshape(1, 1, 1, N_); + // This will trigger a memory copy if in GPU mode, + // which may not be necessary. + // Thus omit to set the values if not necessary. + if (reshaped) { + caffe_set(N_, Dtype(1), bias_multiplier_.mutable_cpu_data()); + } } this->param_propagate_down_.resize(this->blobs_.size(), true); } diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 67d41fff844..37884f391d2 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -467,231 +467,243 @@ class CuDNNConvolutionLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); - convolution_param->set_num_output(4); - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 2); - EXPECT_EQ(this->blob_top_->channels(), 4); - EXPECT_EQ(this->blob_top_->height(), 2); - EXPECT_EQ(this->blob_top_->width(), 1); - EXPECT_EQ(this->blob_top_2_->num(), 2); - EXPECT_EQ(this->blob_top_2_->channels(), 4); - EXPECT_EQ(this->blob_top_2_->height(), 2); - EXPECT_EQ(this->blob_top_2_->width(), 1); - // setting group should not change the shape - convolution_param->set_num_output(3); - convolution_param->set_group(3); - layer.reset(new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 2); - EXPECT_EQ(this->blob_top_->channels(), 3); - EXPECT_EQ(this->blob_top_->height(), 2); - EXPECT_EQ(this->blob_top_->width(), 1); - EXPECT_EQ(this->blob_top_2_->num(), 2); - EXPECT_EQ(this->blob_top_2_->channels(), 3); - EXPECT_EQ(this->blob_top_2_->height(), 2); - EXPECT_EQ(this->blob_top_2_->width(), 1); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); + } } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); - convolution_param->set_num_output(4); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("constant"); - convolution_param->mutable_bias_filler()->set_value(0.1); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Check against reference convolution. - const TypeParam* top_data; - const TypeParam* ref_top_data; - caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_)); - top_data = this->blob_top_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); - } - caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_2_)); - top_data = this->blob_top_2_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const TypeParam* top_data; + const TypeParam* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } } } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); - convolution_param->set_num_output(3); - convolution_param->set_group(3); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("constant"); - convolution_param->mutable_bias_filler()->set_value(0.1); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Check against reference convolution. - const TypeParam* top_data; - const TypeParam* ref_top_data; - caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_)); - top_data = this->blob_top_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const TypeParam* top_data; + const TypeParam* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } } } TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { - // Test separable convolution by computing the Sobel operator - // as a single filter then comparing the result - // as the convolution of two rectangular filters. + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + // Test separable convolution by computing the Sobel operator + // as a single filter then comparing the result + // as the convolution of two rectangular filters. - // Fill bottoms with identical Gaussian noise. - shared_ptr > filler; - FillerParameter filler_param; - filler_param.set_value(1.); - filler.reset(new GaussianFiller(filler_param)); - filler->Fill(this->blob_bottom_); - this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); - // Compute Sobel G_x operator as 3 x 3 convolution. - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); - convolution_param->set_num_output(1); - convolution_param->set_bias_term(false); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); - TypeParam* weights = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 9; // 3 x 3 filter - weights[i + 0] = -1; - weights[i + 1] = 0; - weights[i + 2] = 1; - weights[i + 3] = -2; - weights[i + 4] = 0; - weights[i + 5] = 2; - weights[i + 6] = -1; - weights[i + 7] = 0; - weights[i + 8] = 1; - } - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. - // (1) the [1 2 1] column filter - vector*> sep_blob_bottom_vec; - vector*> sep_blob_top_vec; - shared_ptr > blob_sep(new Blob()); - sep_blob_bottom_vec.push_back(this->blob_bottom_2_); - sep_blob_top_vec.push_back(this->blob_top_2_); - convolution_param->clear_kernel_size(); - convolution_param->clear_stride(); - convolution_param->set_kernel_h(3); - convolution_param->set_kernel_w(1); - convolution_param->set_stride_h(2); - convolution_param->set_stride_w(1); - convolution_param->set_num_output(1); - convolution_param->set_bias_term(false); - layer.reset(new CuDNNConvolutionLayer(layer_param)); - layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); - TypeParam* weights_1 = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 3; // 3 x 1 filter - weights_1[i + 0] = 1; - weights_1[i + 1] = 2; - weights_1[i + 2] = 1; - } - layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); - layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); - // (2) the [-1 0 1] row filter - blob_sep->CopyFrom(*this->blob_top_2_, false, true); - sep_blob_bottom_vec.clear(); - sep_blob_bottom_vec.push_back(blob_sep.get()); - convolution_param->set_kernel_h(1); - convolution_param->set_kernel_w(3); - convolution_param->set_stride_h(1); - convolution_param->set_stride_w(2); - convolution_param->set_num_output(1); - convolution_param->set_bias_term(false); - layer.reset(new CuDNNConvolutionLayer(layer_param)); - layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); - TypeParam* weights_2 = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 3; // 1 x 3 filter - weights_2[i + 0] = -1; - weights_2[i + 1] = 0; - weights_2[i + 2] = 1; - } - layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); - layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); - // Test equivalence of full and separable filters. - const TypeParam* top_data = this->blob_top_->cpu_data(); - const TypeParam* sep_top_data = this->blob_top_2_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + // Fill bottoms with identical Gaussian noise. + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new GaussianFiller(filler_param)); + filler->Fill(this->blob_bottom_); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); + // Compute Sobel G_x operator as 3 x 3 convolution. + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); + TypeParam* weights = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 9; // 3 x 3 filter + weights[i + 0] = -1; + weights[i + 1] = 0; + weights[i + 2] = 1; + weights[i + 3] = -2; + weights[i + 4] = 0; + weights[i + 5] = 2; + weights[i + 6] = -1; + weights[i + 7] = 0; + weights[i + 8] = 1; + } + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. + // (1) the [1 2 1] column filter + vector*> sep_blob_bottom_vec; + vector*> sep_blob_top_vec; + shared_ptr > blob_sep(new Blob()); + sep_blob_bottom_vec.push_back(this->blob_bottom_2_); + sep_blob_top_vec.push_back(this->blob_top_2_); + convolution_param->clear_kernel_size(); + convolution_param->clear_stride(); + convolution_param->set_kernel_h(3); + convolution_param->set_kernel_w(1); + convolution_param->set_stride_h(2); + convolution_param->set_stride_w(1); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); + TypeParam* weights_1 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 3 x 1 filter + weights_1[i + 0] = 1; + weights_1[i + 1] = 2; + weights_1[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // (2) the [-1 0 1] row filter + blob_sep->CopyFrom(*this->blob_top_2_, false, true); + sep_blob_bottom_vec.clear(); + sep_blob_bottom_vec.push_back(blob_sep.get()); + convolution_param->set_kernel_h(1); + convolution_param->set_kernel_w(3); + convolution_param->set_stride_h(1); + convolution_param->set_stride_w(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); + TypeParam* weights_2 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 1 x 3 filter + weights_2[i + 0] = -1; + weights_2[i + 1] = 0; + weights_2[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // Test equivalence of full and separable filters. + const TypeParam* top_data = this->blob_top_->cpu_data(); + const TypeParam* sep_top_data = this->blob_top_2_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + } } } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); - convolution_param->set_num_output(2); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - CuDNNConvolutionLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + CuDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); - convolution_param->set_num_output(3); - convolution_param->set_group(3); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - CuDNNConvolutionLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + CuDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } #endif diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index e332a1c7506..8e5f60ee222 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -735,96 +735,110 @@ class CuDNNNeuronLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { - LayerParameter layer_param; - CuDNNReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Now, check values - const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); - const TypeParam* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_GE(top_data[i], 0.); - EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + } } } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { - LayerParameter layer_param; - CuDNNReLULayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { - LayerParameter layer_param; - CHECK(google::protobuf::TextFormat::ParseFromString( - "relu_param { negative_slope: 0.01 }", &layer_param)); - CuDNNReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Now, check values - const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); - const TypeParam* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - if (top_data[i] >= 0) { - EXPECT_FLOAT_EQ(top_data[i], bottom_data[i]); - } else { - EXPECT_FLOAT_EQ(top_data[i], bottom_data[i] * 0.01); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "relu_param { negative_slope: 0.01 }", &layer_param)); + CuDNNReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] >= 0) { + EXPECT_FLOAT_EQ(top_data[i], bottom_data[i]); + } else { + EXPECT_FLOAT_EQ(top_data[i], bottom_data[i] * 0.01); + } } } } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { - LayerParameter layer_param; - CHECK(google::protobuf::TextFormat::ParseFromString( - "relu_param { negative_slope: 0.01 }", &layer_param)); - CuDNNReLULayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "relu_param { negative_slope: 0.01 }", &layer_param)); + CuDNNReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { - LayerParameter layer_param; - CuDNNSigmoidLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Now, check values - const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); - const TypeParam* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); - // check that we squashed the value between 0 and 1 - EXPECT_GE(top_data[i], 0.); - EXPECT_LE(top_data[i], 1.); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNSigmoidLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + // check that we squashed the value between 0 and 1 + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + } } } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { - LayerParameter layer_param; - CuDNNSigmoidLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNSigmoidLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { - LayerParameter layer_param; - CuDNNTanHLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Test exact values - for (int i = 0; i < this->blob_bottom_->num(); ++i) { - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { - for (int k = 0; k < this->blob_bottom_->height(); ++k) { - for (int l = 0; l < this->blob_bottom_->width(); ++l) { - EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, - (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / - (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); - EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, - (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / - (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNTanHLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test exact values + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + } } } } @@ -832,11 +846,13 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { - LayerParameter layer_param; - CuDNNTanHLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNTanHLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } #endif diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 69f2d5c1135..5a474035276 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -963,31 +963,35 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { - LayerParameter layer_param; - PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); - CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); - EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); - EXPECT_EQ(this->blob_top_->height(), 3); - EXPECT_EQ(this->blob_top_->width(), 2); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); + } } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { - LayerParameter layer_param; - PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); - pooling_param->set_pad(1); - pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); - CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); - EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); - EXPECT_EQ(this->blob_top_->height(), 4); - EXPECT_EQ(this->blob_top_->width(), 3); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 3); + } } /* @@ -1017,9 +1021,11 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { - this->TestForwardSquare(); - this->TestForwardRectHigh(); - this->TestForwardRectWide(); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); + } } // Currently, cuDNN does not support a top mask, so we comment this and @@ -1034,66 +1040,70 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { - LayerParameter layer_param; - PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_h(kernel_h); - pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); - // currenty, cuDNN pooling does not support padding - pooling_param->set_pad(0); - pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - CuDNNPoolingLayer layer(layer_param); - GradientChecker checker(1e-4, 1e-2); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + // currenty, cuDNN pooling does not support padding + pooling_param->set_pad(0); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + CuDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } } } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { - LayerParameter layer_param; - PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); - pooling_param->set_pad(2); - pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - this->blob_bottom_->Reshape(1, 1, 3, 3); - // Input: - // [ 1 2 4 ] - // [ 2 3 2 ] - // [ 4 2 1 ] - this->blob_bottom_->mutable_cpu_data()[0] = 1; - this->blob_bottom_->mutable_cpu_data()[1] = 2; - this->blob_bottom_->mutable_cpu_data()[2] = 4; - this->blob_bottom_->mutable_cpu_data()[3] = 2; - this->blob_bottom_->mutable_cpu_data()[4] = 3; - this->blob_bottom_->mutable_cpu_data()[5] = 2; - this->blob_bottom_->mutable_cpu_data()[6] = 4; - this->blob_bottom_->mutable_cpu_data()[7] = 2; - this->blob_bottom_->mutable_cpu_data()[8] = 1; - CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 1); - EXPECT_EQ(this->blob_top_->channels(), 1); - EXPECT_EQ(this->blob_top_->height(), 3); - EXPECT_EQ(this->blob_top_->width(), 3); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - TypeParam epsilon = 1e-8; - // Output: - // [ 1 4 4 ] - // [ 4 4 4 ] - // [ 4 4 1 ] - EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); - EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_bottom_->Reshape(1, 1, 3, 3); + // Input: + // [ 1 2 4 ] + // [ 2 3 2 ] + // [ 4 2 1 ] + this->blob_bottom_->mutable_cpu_data()[0] = 1; + this->blob_bottom_->mutable_cpu_data()[1] = 2; + this->blob_bottom_->mutable_cpu_data()[2] = 4; + this->blob_bottom_->mutable_cpu_data()[3] = 2; + this->blob_bottom_->mutable_cpu_data()[4] = 3; + this->blob_bottom_->mutable_cpu_data()[5] = 2; + this->blob_bottom_->mutable_cpu_data()[6] = 4; + this->blob_bottom_->mutable_cpu_data()[7] = 2; + this->blob_bottom_->mutable_cpu_data()[8] = 1; + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + TypeParam epsilon = 1e-8; + // Output: + // [ 1 4 4 ] + // [ 4 4 4 ] + // [ 4 4 1 ] + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); + } } /* @@ -1118,61 +1128,67 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { - LayerParameter layer_param; - PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(1); - // Currently, cuDNN pooling does not support padding, so we use - // a simplified version of this test. - pooling_param->set_pad(0); - pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); - this->blob_bottom_->Reshape(1, 1, 3, 3); - FillerParameter filler_param; - filler_param.set_value(TypeParam(2)); - ConstantFiller filler(filler_param); - filler.Fill(this->blob_bottom_); - CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 1); - EXPECT_EQ(this->blob_top_->channels(), 1); - EXPECT_EQ(this->blob_top_->height(), 1); - EXPECT_EQ(this->blob_top_->width(), 1); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - TypeParam epsilon = 1e-5; - EXPECT_NEAR(this->blob_top_->cpu_data()[0], 2.0, epsilon); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(1); + // Currently, cuDNN pooling does not support padding, so we use + // a simplified version of this test. + pooling_param->set_pad(0); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + this->blob_bottom_->Reshape(1, 1, 3, 3); + FillerParameter filler_param; + filler_param.set_value(TypeParam(2)); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + CuDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + TypeParam epsilon = 1e-5; + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 2.0, epsilon); + } } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { - LayerParameter layer_param; - PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_h(kernel_h); - pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); - pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); - CuDNNPoolingLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + CuDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } } } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { - LayerParameter layer_param; - PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_h(kernel_h); - pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); - pooling_param->set_pad(2); - pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); - CuDNNPoolingLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + CuDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } } } diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 90019a6d170..f433b16eda4 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -104,32 +104,34 @@ class CuDNNSoftmaxLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { - LayerParameter layer_param; - CuDNNSoftmaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Test sum - for (int i = 0; i < this->blob_bottom_->num(); ++i) { - for (int k = 0; k < this->blob_bottom_->height(); ++k) { - for (int l = 0; l < this->blob_bottom_->width(); ++l) { - TypeParam sum = 0; - for (int j = 0; j < this->blob_top_->channels(); ++j) { - sum += this->blob_top_->data_at(i, j, k, l); - } - EXPECT_GE(sum, 0.999); - EXPECT_LE(sum, 1.001); - // Test exact values - TypeParam scale = 0; - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { - scale += exp(this->blob_bottom_->data_at(i, j, k, l)); - } - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { - EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, - exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) - << "debug: " << i << " " << j; - EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, - exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) - << "debug: " << i << " " << j; + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNSoftmaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test sum + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + TypeParam sum = 0; + for (int j = 0; j < this->blob_top_->channels(); ++j) { + sum += this->blob_top_->data_at(i, j, k, l); + } + EXPECT_GE(sum, 0.999); + EXPECT_LE(sum, 1.001); + // Test exact values + TypeParam scale = 0; + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + scale += exp(this->blob_bottom_->data_at(i, j, k, l)); + } + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) + << "debug: " << i << " " << j; + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) + << "debug: " << i << " " << j; + } } } } @@ -137,11 +139,13 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { - LayerParameter layer_param; - CuDNNSoftmaxLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + LayerParameter layer_param; + CuDNNSoftmaxLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } #endif From 6b1bc3a281b708bcb8170ab60fe1296f43a8b585 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 25 Jun 2015 04:08:49 +0200 Subject: [PATCH 077/254] . --- include/caffe/net.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index daa60fd519b..e2cd7b8c13a 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -32,7 +32,7 @@ class Net { void Init(const NetParameter& param); /** - * @brief Run Forward with the input Blob%s already fed separately. + * @brief Run Forward with the input Blobs already fed separately. * * You can get the input blobs using input_blobs(). */ From e98cb1719efbb874063d091b756b629f8b49ffa3 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 26 Jun 2015 04:49:14 +0200 Subject: [PATCH 078/254] Added preparations for multi-queue support, enabled ViennaCL compile without dynamic library. --- Makefile | 1 - include/caffe/greentea/greentea.hpp | 7 + src/caffe/common.cpp | 7 +- src/caffe/greentea/greentea.cpp | 8 + .../greentea/greentea_math_functions.cpp | 216 +++++++++--------- src/caffe/layers/conv_layer.cu | 47 ++++ src/caffe/layers/conv_sk_layer.cu | 2 + 7 files changed, 179 insertions(+), 109 deletions(-) diff --git a/Makefile b/Makefile index cff06c890fd..b82ca592ee7 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,6 @@ ifeq ($(USE_GREENTEA),1) # Use ViennaCL BLAS ifeq ($(USE_VIENNACLBLAS), 1) - LIBRARIES += viennacl COMMON_FLAGS += -DUSE_VIENNACLBLAS endif diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 4a00f9bae93..916e1063b3e 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -27,6 +27,11 @@ #include "viennacl/vector.hpp" #endif +#ifndef GREENTEA_QUEUE_COUNT +#define GREENTEA_QUEUE_COUNT 1 + +#endif + namespace caffe { #ifdef USE_GREENTEA @@ -50,6 +55,8 @@ class DeviceContext { Backend backend_; }; +void FinishQueues(viennacl::ocl::context *ctx); + template struct is_same { static const bool value = false; diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 23c1028782c..aae50c0d826 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -295,13 +295,18 @@ void Caffe::SetDevices(std::vector device_ids) { // Setup actual context and compile kernels for this device viennacl::ocl::setup_context( device_id, std::get<1>(platform_devices[greentea_device_count])); - viennacl::ocl::context ctx = viennacl::ocl::get_context( + viennacl::ocl::context &ctx = viennacl::ocl::get_context( static_cast(device_id)); viennacl::ocl::program & program = RegisterKernels(&ctx); Get().ocl_programs_.push_back(program); // viennacl::ocl::switch_context(device_id); // viennacl::ocl::switch_device(std::get<1> // (platform_devices[device_id - cuda_device_count])); + + // Add defined number of queues + for (int q = 0; q < GREENTEA_QUEUE_COUNT - 1; ++q) { + ctx.add_queue(ctx.current_device()); + } is_used = true; } } diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index 2c5157aec75..630006a1cc0 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -45,4 +45,12 @@ int DeviceContext::id() const { return id_; } +void FinishQueues(viennacl::ocl::context *ctx) { + for (int i = 0; i < GREENTEA_QUEUE_COUNT; ++i) { + ctx->switch_queue(i); + ctx->get_queue().finish(); + } + ctx->switch_queue(0); +} + } // namespace caffe diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index a057442ce5a..d8584718687 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -36,7 +36,16 @@ #endif #ifdef USE_VIENNACLBLAS -#include "libviennacl/include/viennacl.hpp" +#include "viennacl/detail/matrix_def.hpp" +#include "viennacl/detail/vector_def.hpp" +#include "viennacl/linalg/inner_prod.hpp" +#include "viennacl/linalg/norm_1.hpp" +#include "viennacl/linalg/norm_2.hpp" +#include "viennacl/linalg/norm_inf.hpp" +#include "viennacl/linalg/prod.hpp" +#include "viennacl/matrix.hpp" +#include "viennacl/scalar.hpp" +#include "viennacl/vector.hpp" #endif namespace caffe { @@ -167,33 +176,41 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, #ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, - static_cast(ctx_id)); + typedef typename viennacl::matrix_base::size_type size_type; + typedef typename viennacl::matrix_base::size_type difference_type; + + size_type A_size1 = static_cast((TransA == CblasTrans) ? K : M); + size_type A_size2 = static_cast((TransA == CblasTrans) ? M : K); + + size_type B_size1 = static_cast((TransB == CblasTrans) ? N : K); + size_type B_size2 = static_cast((TransB == CblasTrans) ? K : N); + + viennacl::matrix_base matA(A, ctx, + A_size1, size_type(0), difference_type(1), size_type(M), + A_size2, size_type(offA), difference_type(1), size_type(lda), true); + + viennacl::matrix_base matB(B, ctx, + B_size1, size_type(0), difference_type(1), size_type(K), + B_size2, size_type(offB), difference_type(1), size_type(ldb), true); + + viennacl::matrix_base matC(C, ctx, + size_type(M), size_type(0), difference_type(1), size_type(M), + size_type(N), size_type(offC), difference_type(1), + size_type(ldc), true); + + if (TransA == CblasTrans && TransB == CblasTrans) + viennacl::linalg::prod_impl(viennacl::trans(matA), viennacl::trans(matB), + matC, alpha, beta); + else if (TransA == CblasTrans && TransB == CblasNoTrans) + viennacl::linalg::prod_impl(viennacl::trans(matA), matB, + matC, alpha, beta); + else if (TransA == CblasNoTrans && TransB == CblasTrans) + viennacl::linalg::prod_impl(matA, viennacl::trans(matB), + matC, alpha, beta); + else if (TransA == CblasNoTrans && TransB == CblasNoTrans) + viennacl::linalg::prod_impl(matA, matB, + matC, alpha, beta); - ViennaCLTranspose vclTransA = - (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; - ViennaCLTranspose vclTransB = - (TransB == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; - - ViennaCLOrder vclOrderA = ViennaCLRowMajor; - ViennaCLOrder vclOrderB = ViennaCLRowMajor; - ViennaCLOrder vclOrderC = ViennaCLRowMajor; - - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSgemm(backend, vclOrderA, vclTransA, vclOrderB, - vclTransB, vclOrderC, M, N, K, alpha, A, 0, offA, - 1, 1, lda, B, 0, offB, 1, 1, ldb, beta, C, 0, - offC, 1, 1, ldc)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDgemm(backend, vclOrderA, vclTransA, vclOrderB, - vclTransB, vclOrderC, M, N, K, alpha, A, 0, offA, - 1, 1, lda, B, 0, offB, 1, 1, ldb, beta, C, 0, - offC, 1, 1, ldc)); - } #endif #ifdef USE_CLBLAS clblasOrder clOrder = clblasRowMajor; @@ -256,25 +273,27 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, yptr + offy); } else { #ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, - static_cast(ctx_id)); - ViennaCLTranspose vclTransA = - (TransA == CblasNoTrans) ? ViennaCLNoTrans : ViennaCLTrans; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; + + viennacl::vector_base v1(x, + size_type((TransA == CblasTrans) ? M : N), + size_type(offx), difference_type(1), ctx); + viennacl::vector_base v2(y, + size_type((TransA == CblasTrans) ? N : M), + size_type(offy), difference_type(1), ctx); + viennacl::matrix_base mat(A, ctx, + size_type(M), size_type(0), + difference_type(1), size_type(M), + size_type(N), size_type(offA), + difference_type(1), size_type(N), true); + v2 *= beta; + if (TransA == CblasTrans) + v2 += alpha * viennacl::linalg::prod(viennacl::trans(mat), v1); + else + v2 += alpha * viennacl::linalg::prod(mat, v1); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, - A, offA, 0, 1, 1, N, x, offx, 1, beta, y, offy, - 1)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDgemv(backend, ViennaCLRowMajor, vclTransA, M, N, alpha, - A, offA, 0, 1, 1, N, x, offx, 1, beta, y, offy, - 1)); - } #endif #ifdef USE_CLBLAS @@ -327,18 +346,17 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, caffe_axpy(N, alpha, Xptr + offX, Yptr + offY); } else { #ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, - static_cast(ctx_id)); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDaxpy(backend, N, alpha, X, offX, 1, Y, offY, 1)); - } + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; + + viennacl::vector_base v1(X, size_type(N), size_type(offX), + difference_type(1), ctx); + viennacl::vector_base v2(Y, size_type(N), size_type(offY), + difference_type(1), ctx); + + v2 += alpha * v1; + #endif #ifdef USE_CLBLAS @@ -426,18 +444,15 @@ void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, } else { #ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, - static_cast(ctx_id)); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSscal(backend, N, alpha, x, offx, 1)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDscal(backend, N, alpha, x, offx, 1)); - } + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; + + viennacl::vector_base v1(x, size_type(N), + size_type(offx), difference_type(1), ctx); + + v1 *= alpha; + #endif #ifdef USE_CLBLAS @@ -495,20 +510,17 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, *out = caffe_cpu_dot(n, Xptr + offX, Yptr + offY); } else { #ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, - static_cast(ctx_id)); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSdot(backend, n, reinterpret_cast(out), X, offX, - 1, Y, offY, 1)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDdot(backend, n, reinterpret_cast(out), X, - offX, 1, Y, offY, 1)); - } + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; + + viennacl::vector_base v1(X, size_type(n), + size_type(offX), difference_type(1), ctx); + viennacl::vector_base v2(Y, size_type(n), + size_type(offY), difference_type(1), ctx); + + *out = viennacl::linalg::inner_prod(v1, v2); + #endif #ifdef USE_CLBLAS @@ -562,20 +574,15 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, } else { #ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, - static_cast(ctx_id)); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSasum(backend, n, reinterpret_cast(Y), X, offX, - 1)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDasum(backend, n, reinterpret_cast(Y), X, offX, - 1)); - } + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; + + viennacl::vector_base v1(X, size_type(n), + size_type(offX), difference_type(1), ctx); + + *Y = viennacl::linalg::norm_1(v1); + #endif #ifdef USE_CLBLAS @@ -628,22 +635,17 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, } else { #ifdef USE_VIENNACLBLAS - ViennaCLBackend backend; - ViennaCLBackendCreate(&backend); - ViennaCLBackendSetOpenCLContextID(backend, - static_cast(ctx_id)); - if (std::is_same::value) { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLScopy(backend, n, X, offX, 1, Y, offY, 1)); - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLSscal(backend, n, alpha, Y, offY, 1)); - } else { - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDcopy(backend, n, X, offX, 1, Y, offY, 1)); - GREENTEA_VCL_BLAS_CHECK( - ViennaCLOpenCLDscal(backend, n, alpha, Y, offY, 1)); - } + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; + + viennacl::vector_base v1(X, size_type(n), + size_type(offX), difference_type(1), ctx); + viennacl::vector_base v2(Y, size_type(n), + size_type(offY), difference_type(1), ctx); + + v2 = v1 * alpha; + #endif #ifdef USE_CLBLAS diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 156cdb43ca0..627a7481a67 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -22,6 +22,14 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { +#ifdef USE_GREENTEA + if (this->device_context_.backend() == BACKEND_OpenCL) { + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(this->device_context_.id()); + // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); + } +#endif // USE_GREENTEA + this->forward_gpu_gemm(bottom_data, bottom[i]->offset(n), weight, top_data, top[i]->offset(n)); if (this->bias_term_) { @@ -29,6 +37,13 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, this->forward_gpu_bias(top_data, top[i]->offset(n), bias); } } +#ifdef USE_GREENTEA + if (this->device_context_.backend() == BACKEND_OpenCL) { + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(this->device_context_.id()); + FinishQueues(&ctx); + } +#endif // USE_GREENTEA } } @@ -43,13 +58,37 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { +#ifdef USE_GREENTEA + if (this->device_context_.backend() == BACKEND_OpenCL) { + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(this->device_context_.id()); + ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); + } +#endif // USE_GREENTEA + this->backward_gpu_bias(bias_diff, top_diff, top[i]->offset(n)); + +#ifdef USE_GREENTEA + if (this->device_context_.backend() == BACKEND_OpenCL) { + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(this->device_context_.id()); + FinishQueues(&ctx); + } +#endif // USE_GREENTEA } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { +#ifdef USE_GREENTEA + if (this->device_context_.backend() == BACKEND_OpenCL) { + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(this->device_context_.id()); + // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); + } +#endif // USE_GREENTEA + // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data, bottom[i]->offset(n), @@ -60,6 +99,14 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, this->backward_gpu_gemm(top_diff, top[i]->offset(n), weight, bottom_diff, bottom[i]->offset(n)); } + +#ifdef USE_GREENTEA + if (this->device_context_.backend() == BACKEND_OpenCL) { + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(this->device_context_.id()); + FinishQueues(&ctx); + } +#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 2834703bb62..b39926245ad 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -70,6 +70,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { + // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); // First, im2col greentea_im2col_sk_gpu(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, @@ -95,6 +96,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, (Dtype) 1., top_data, top[i]->offset(n)); } } + // FinishQueues(&ctx); } #endif } From 926d66ae890ed7ac0245adf5dc17848384630a99 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 26 Jun 2015 05:05:49 +0200 Subject: [PATCH 079/254] Declaration fix. --- include/caffe/greentea/greentea.hpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 916e1063b3e..d4e951cbf06 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -28,7 +28,7 @@ #endif #ifndef GREENTEA_QUEUE_COUNT -#define GREENTEA_QUEUE_COUNT 1 +#define GREENTEA_QUEUE_COUNT 4 #endif @@ -55,7 +55,6 @@ class DeviceContext { Backend backend_; }; -void FinishQueues(viennacl::ocl::context *ctx); template struct is_same { @@ -69,11 +68,7 @@ struct is_same { #ifdef USE_GREENTEA -#ifdef USE_VIENNACLBLAS -#define GREENTEA_VCL_BLAS_CHECK(condition) \ - {ViennaCLStatus status = condition; \ - CHECK_EQ(status, ViennaCLSuccess) << "GreenTea ViennaCL BLAS ERROR";} -#endif +void FinishQueues(viennacl::ocl::context *ctx); #ifdef USE_CLBLAS #define GREENTEA_CL_BLAS_CHECK(condition) \ From 75841bccb8cd1a8118ce3e46f67d5a8f0091773e Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 26 Jun 2015 20:45:11 +0200 Subject: [PATCH 080/254] Changed device context to pointer type. Further preparation for memory saving and multi queue. --- include/caffe/blob.hpp | 12 ++-- include/caffe/caffe.hpp | 2 + include/caffe/common.hpp | 11 +-- include/caffe/data_transformer.hpp | 4 +- include/caffe/device_context.hpp | 43 +++++++++++ include/caffe/greentea/greentea.hpp | 16 +---- include/caffe/layer.hpp | 9 +-- include/caffe/solver.hpp | 2 +- include/caffe/syncedmem.hpp | 6 +- include/caffe/test/test_caffe_main.hpp | 1 + include/caffe/vision_layers.hpp | 8 +-- src/caffe/blob.cpp | 41 +++++------ src/caffe/common.cpp | 20 +++--- src/caffe/data_transformer.cpp | 2 +- src/caffe/device_context.cpp | 72 +++++++++++++++++++ src/caffe/greentea/greentea.cpp | 22 ++---- src/caffe/greentea/greentea_im2col.cpp | 1 + src/caffe/layer_factory.cpp | 12 ++-- src/caffe/layers/absval_layer.cu | 10 +-- src/caffe/layers/base_conv_layer.cpp | 20 +++--- src/caffe/layers/base_data_layer.cu | 4 +- src/caffe/layers/bnll_layer.cu | 12 ++-- src/caffe/layers/concat_layer.cu | 12 ++-- src/caffe/layers/contrastive_loss_layer.cu | 14 ++-- src/caffe/layers/conv_layer.cu | 30 ++++---- src/caffe/layers/conv_sk_layer.cu | 31 ++++---- src/caffe/layers/dropout_layer.cu | 14 ++-- src/caffe/layers/eltwise_layer.cu | 28 ++++---- src/caffe/layers/euclidean_loss_layer.cu | 10 +-- src/caffe/layers/exp_layer.cu | 18 ++--- src/caffe/layers/filter_layer.cu | 12 ++-- src/caffe/layers/hdf5_data_layer.cu | 2 +- src/caffe/layers/hdf5_output_layer.cu | 2 +- src/caffe/layers/im2col_layer.cu | 12 ++-- src/caffe/layers/inner_product_layer.cu | 14 ++-- src/caffe/layers/log_layer.cu | 28 ++++---- src/caffe/layers/lrn_layer.cu | 12 ++-- src/caffe/layers/mergecrop_layer.cu | 12 ++-- src/caffe/layers/mvn_layer.cu | 54 +++++++------- src/caffe/layers/pooling_layer.cu | 16 ++--- src/caffe/layers/pooling_sk_layer.cu | 16 ++--- src/caffe/layers/power_layer.cu | 36 +++++----- src/caffe/layers/prelu_layer.cu | 18 ++--- src/caffe/layers/reduction_layer.cu | 20 +++--- src/caffe/layers/relu_layer.cu | 12 ++-- .../sigmoid_cross_entropy_loss_layer.cu | 8 +-- src/caffe/layers/sigmoid_layer.cu | 12 ++-- src/caffe/layers/silence_layer.cu | 6 +- src/caffe/layers/slice_layer.cu | 12 ++-- src/caffe/layers/softmax_layer.cu | 14 ++-- src/caffe/layers/softmax_loss_layer.cu | 22 +++--- src/caffe/layers/split_layer.cu | 10 +-- src/caffe/layers/tanh_layer.cu | 12 ++-- src/caffe/layers/threshold_layer.cu | 6 +- src/caffe/net.cpp | 6 +- src/caffe/solver.cpp | 44 ++++++------ src/caffe/syncedmem.cpp | 21 +++--- src/caffe/test/test_common.cpp | 6 +- src/caffe/test/test_convolution_layer.cpp | 12 ++-- src/caffe/test/test_im2col_kernel.cu | 2 +- src/caffe/test/test_math_functions.cpp | 36 +++++----- src/caffe/test/test_neuron_layer.cpp | 16 ++--- src/caffe/test/test_pooling_layer.cpp | 16 ++--- .../test/test_random_number_generator.cpp | 18 ++--- src/caffe/test/test_softmax_layer.cpp | 4 +- src/caffe/test/test_syncedmem.cpp | 20 +++--- src/caffe/test/test_util_blas.cpp | 28 ++++---- src/caffe/util/benchmark.cpp | 13 ++-- 68 files changed, 603 insertions(+), 494 deletions(-) create mode 100644 include/caffe/device_context.hpp create mode 100644 src/caffe/device_context.cpp diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 02da73fc4bc..94028e8b7e6 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -21,6 +21,8 @@ const int kMaxBlobAxes = INT_MAX; namespace caffe { +class DeviceContext; + /** * @brief A wrapper around SyncedMemory holders serving as the basic * computational unit through which Layer%s, Net%s, and Solver%s @@ -38,7 +40,7 @@ class Blob { capacity_(0), device_context_(Caffe::GetDefaultDeviceContext()) { } - explicit Blob(DeviceContext device_context) + explicit Blob(DeviceContext *device_context) : data_(), diff_(), count_(0), @@ -46,9 +48,9 @@ class Blob { device_context_(device_context) { } explicit Blob(const int num, const int channels, const int height, - const int width, DeviceContext device_context = + const int width, DeviceContext *device_context = Caffe::GetDefaultDeviceContext()); - explicit Blob(const vector& shape, DeviceContext device_context = + explicit Blob(const vector& shape, DeviceContext *device_context = Caffe::GetDefaultDeviceContext()); /** @@ -294,7 +296,7 @@ class Blob { /** * @brief Return the device context to which this blob and shared memory belongs */ - DeviceContext device_context(); + DeviceContext *device_context(); protected: shared_ptr data_; @@ -302,7 +304,7 @@ class Blob { vector shape_; int count_; int capacity_; - DeviceContext device_context_; + DeviceContext *device_context_; DISABLE_COPY_AND_ASSIGN(Blob); }; diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp index b0c9e98d33b..b8278048bbf 100644 --- a/include/caffe/caffe.hpp +++ b/include/caffe/caffe.hpp @@ -6,6 +6,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" +#include "caffe/device_context.hpp" #include "caffe/filler.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/layer.hpp" @@ -18,4 +19,5 @@ #include "caffe/vision_layers.hpp" + #endif // CAFFE_CAFFE_HPP_ diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index f10dc0da37e..eb9dba1ec4f 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -17,10 +17,9 @@ #include // pair #include - +#include "caffe/greentea/greentea.hpp" #include "caffe/util/device_alternate.hpp" -#include "caffe/greentea/greentea.hpp" // gflags 2.1 issue: namespace google was changed to gflags without warning. // Luckily we will be able to use GFLAGS_GFLAGS_H_ to detect if it is version @@ -74,6 +73,8 @@ namespace cv {class Mat;} namespace caffe { +class DeviceContext; + // We will use the boost shared_ptr instead of the new C++11 one mainly // because cuda does not work (at least now) well with C++11 features. using boost::shared_ptr; @@ -157,7 +158,7 @@ class Caffe { static void DeviceQuery(); // Get the default device - static DeviceContext& GetDefaultDeviceContext(); + static DeviceContext *GetDefaultDeviceContext(); // Prints info about all devices static void EnumerateDevices(); @@ -167,7 +168,7 @@ class Caffe { static void Synchronize(int device_id); // Get a device context - static DeviceContext & GetDeviceContext(int id); + static DeviceContext *GetDeviceContext(int id); // Get a device OpenCL program #ifdef USE_GREENTEA @@ -188,7 +189,7 @@ class Caffe { vector device_contexts_; - DeviceContext default_device_context_; + DeviceContext *default_device_context_; #ifdef USE_GREENTEA vector ocl_programs_; diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index ffaeb56a4d9..1ccb1398393 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -17,7 +17,7 @@ template class DataTransformer { public: explicit DataTransformer(const TransformationParameter& param, Phase phase, - DeviceContext device_context); + DeviceContext *device_context); virtual ~DataTransformer() { } @@ -144,7 +144,7 @@ class DataTransformer { Phase phase_; Blob data_mean_; vector mean_values_; - DeviceContext device_context_; + DeviceContext *device_context_; }; } // namespace caffe diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp new file mode 100644 index 00000000000..a28bbd05d34 --- /dev/null +++ b/include/caffe/device_context.hpp @@ -0,0 +1,43 @@ +/* + * device_context.hpp + * + * Created on: Jun 26, 2015 + * Author: Fabian Tschopp + */ + +#ifndef CAFFE_DEVICE_CONTEXT_HPP_ +#define CAFFE_DEVICE_CONTEXT_HPP_ + +#include +#include "caffe/blob.hpp" +#include "caffe/greentea/greentea.hpp" + + +using std::vector; + +namespace caffe { + +class DeviceContext { + public: + explicit DeviceContext(); + explicit DeviceContext(int id, Backend backend); + Backend backend() const; + int id() const; + int WorkgroupSize(int id); + + + template + Blob * Buffer(int id); + + + private: + void Init(); + std::vector workgroup_sizes_; + int id_; + Backend backend_; + std::vector< Blob > buff_f_; + std::vector< Blob > buff_d_; +}; +} // namespace caffe + +#endif /* CAFFE_DEVICE_CONTEXT_HPP_ */ diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index d4e951cbf06..14e16f79fd7 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -8,6 +8,8 @@ #ifndef CAFFE_GREENTEA_HPP_ #define CAFFE_GREENTEA_HPP_ +#include + // Define ViennaCL/GreenTea flags #ifdef USE_GREENTEA #ifndef NDEBUG @@ -28,8 +30,7 @@ #endif #ifndef GREENTEA_QUEUE_COUNT -#define GREENTEA_QUEUE_COUNT 4 - +#define GREENTEA_QUEUE_COUNT 8 #endif namespace caffe { @@ -44,17 +45,6 @@ enum Backend { BACKEND_OpenCL }; -class DeviceContext { - public: - DeviceContext(); - DeviceContext(int id, Backend backend); - Backend backend() const; - int id() const; - private: - int id_; - Backend backend_; -}; - template struct is_same { diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 892d8affefa..c1ae0f829f7 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -7,6 +7,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" +#include "caffe/device_context.hpp" #include "caffe/layer_factory.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/device_alternate.hpp" @@ -315,7 +316,7 @@ class Layer { /** * @brief Returns the device context this layer runs on */ - inline DeviceContext device_context() { + inline DeviceContext *device_context() { return device_context_; } @@ -334,7 +335,7 @@ class Layer { vector loss_; /** Device context */ - DeviceContext device_context_; + DeviceContext *device_context_; /** @brief Using the CPU device, compute the layer output. */ virtual void Forward_cpu(const vector*>& bottom, @@ -459,7 +460,7 @@ inline Dtype Layer::Forward(const vector*>& bottom, case Caffe::GPU: Forward_gpu(bottom, top); #ifndef CPU_ONLY - if (device_context_.backend() == BACKEND_CUDA) { + if (device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { @@ -483,7 +484,7 @@ inline Dtype Layer::Forward(const vector*>& bottom, cl_mem data = (cl_mem) (top[top_id]->gpu_data()); cl_mem loss_weights = (cl_mem) (top[top_id]->gpu_diff()); Dtype blob_loss = 0; - greentea_gpu_dot(this->device_context_.id(), count, data, 0, + greentea_gpu_dot(this->device_context_->id(), count, data, 0, loss_weights, 0, &blob_loss); loss += blob_loss; } diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index f3bafae1bca..5968f56c64b 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -66,7 +66,7 @@ class Solver { int current_step_; shared_ptr > net_; vector > > test_nets_; - DeviceContext device_context_; + DeviceContext *device_context_; DISABLE_COPY_AND_ASSIGN(Solver); }; diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index d817e64fb62..a4b0082897e 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -51,7 +51,7 @@ class SyncedMemory { device_context_(Caffe::GetDefaultDeviceContext()), cl_gpu_mem_(NULL) { } - explicit SyncedMemory(DeviceContext device_context) + explicit SyncedMemory(DeviceContext *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), @@ -60,7 +60,7 @@ class SyncedMemory { device_context_(device_context), cl_gpu_mem_(NULL) { } - explicit SyncedMemory(size_t size, DeviceContext device_context) + explicit SyncedMemory(size_t size, DeviceContext *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), @@ -109,7 +109,7 @@ class SyncedMemory { size_t size_; SyncedHead head_; bool own_cpu_data_; - DeviceContext device_context_; + DeviceContext *device_context_; #ifdef USE_GREENTEA cl_mem cl_gpu_mem_; #endif diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index e20d7e1a6bc..1a0af805538 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -10,6 +10,7 @@ #include #include "caffe/common.hpp" +#include "caffe/device_context.hpp" using std::cout; using std::endl; diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 67e1e6afb6e..630667c8de4 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -158,9 +158,9 @@ class BaseConvolutionLayer : public Layer { Dtype* col_buff, const int col_buff_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); greentea_im2col_gpu(&program, &ctx, (cl_mem) data, data_off, conv_in_channels_, conv_in_height_, conv_in_width_, kernel_h_, kernel_w_, pad_h_, @@ -171,9 +171,9 @@ class BaseConvolutionLayer : public Layer { const int col_buff_off, Dtype* data, const int data_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); greentea_col2im_gpu(&program, &ctx, (cl_mem) col_buff, col_buff_off, conv_in_channels_, conv_in_height_, conv_in_width_, kernel_h_, kernel_w_, pad_h_, diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 6e7ecfd96a9..3692bf8be74 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -3,6 +3,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" +#include "caffe/device_context.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" @@ -61,14 +62,14 @@ bool Blob::ReshapeLike(const Blob& other) { template Blob::Blob(const int num, const int channels, const int height, - const int width, DeviceContext device_context) + const int width, DeviceContext *device_context) // capacity_ must be initialized before calling Reshape : capacity_(0), device_context_(device_context) { Reshape(num, channels, height, width); } template -Blob::Blob(const vector& shape, DeviceContext device_context) +Blob::Blob(const vector& shape, DeviceContext *device_context) // capacity_ must be initialized before calling Reshape : capacity_(0), device_context_(device_context) { Reshape(shape); @@ -166,7 +167,7 @@ void Blob::Update() { case SyncedMemory::SYNCED: { #ifndef CPU_ONLY // perform computation on GPU - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_axpy(count_, Dtype(-1), static_cast(diff_->gpu_data()), @@ -174,7 +175,7 @@ void Blob::Update() { #endif } else { #ifdef USE_GREENTEA - greentea_gpu_axpy(device_context_.id(), count_, Dtype(-1), + greentea_gpu_axpy(device_context_->id(), count_, Dtype(-1), (cl_mem) (diff_->gpu_data()), 0, (cl_mem) (data_->mutable_gpu_data()), 0); #endif @@ -195,7 +196,7 @@ template<> unsigned int Blob::asum_data() const { } template -DeviceContext Blob::device_context() { +DeviceContext *Blob::device_context() { return device_context_; } @@ -215,7 +216,7 @@ Dtype Blob::asum_data() const { case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA Dtype asum; caffe_gpu_asum(count_, gpu_data(), &asum); @@ -224,7 +225,7 @@ Dtype Blob::asum_data() const { } else { #ifdef USE_GREENTEA Dtype asum; - greentea_gpu_asum(device_context_.id(), count_, (cl_mem) gpu_data(), 0, + greentea_gpu_asum(device_context_->id(), count_, (cl_mem) gpu_data(), 0, &asum); return asum; #endif @@ -262,7 +263,7 @@ Dtype Blob::asum_diff() const { case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: { #ifndef CPU_ONLY - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA Dtype asum; caffe_gpu_asum(count_, gpu_diff(), &asum); @@ -271,7 +272,7 @@ Dtype Blob::asum_diff() const { } else { #ifdef USE_GREENTEA Dtype asum; - greentea_gpu_asum(device_context_.id(), count_, (cl_mem) gpu_diff(), 0, + greentea_gpu_asum(device_context_->id(), count_, (cl_mem) gpu_diff(), 0, &asum); return asum; #endif @@ -315,13 +316,13 @@ Dtype Blob::sumsq_data() const { case SyncedMemory::SYNCED: { #ifndef CPU_ONLY data = gpu_data(); - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_dot(count_, data, data, &sumsq); #endif } else { #ifdef USE_GREENTEA - greentea_gpu_dot(device_context_.id(), count_, (cl_mem) data, 0, + greentea_gpu_dot(device_context_->id(), count_, (cl_mem) data, 0, (cl_mem) data, 0, &sumsq); #endif } @@ -365,13 +366,13 @@ Dtype Blob::sumsq_diff() const { case SyncedMemory::SYNCED: { #ifndef CPU_ONLY diff = gpu_diff(); - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_dot(count_, diff, diff, &sumsq); #endif } else { #ifdef USE_GREENTEA - greentea_gpu_dot(device_context_.id(), count_, (cl_mem) diff, 0, + greentea_gpu_dot(device_context_->id(), count_, (cl_mem) diff, 0, (cl_mem) diff, 0, &sumsq); #endif } @@ -412,13 +413,13 @@ void Blob::scale_data(Dtype scale_factor) { case SyncedMemory::SYNCED: { #ifndef CPU_ONLY data = mutable_gpu_data(); - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_scal(count_, scale_factor, data); #endif } else { #ifdef USE_GREENTEA - greentea_gpu_scal(device_context_.id(), count_, scale_factor, + greentea_gpu_scal(device_context_->id(), count_, scale_factor, (cl_mem) data, 0); #endif } @@ -458,13 +459,13 @@ void Blob::scale_diff(Dtype scale_factor) { case SyncedMemory::SYNCED: { #ifndef CPU_ONLY diff = mutable_gpu_diff(); - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_scal(count_, scale_factor, diff); #endif } else { #ifdef USE_GREENTEA - greentea_gpu_scal(device_context_.id(), count_, scale_factor, + greentea_gpu_scal(device_context_->id(), count_, scale_factor, (cl_mem) diff, 0); #endif } @@ -513,7 +514,7 @@ void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { } switch (Caffe::mode()) { case Caffe::GPU: { - if (device_context_.backend() == BACKEND_CUDA) { + if (device_context_->backend() == BACKEND_CUDA) { if (copy_diff) { caffe_copy(count_, source.gpu_diff(), static_cast(diff_->mutable_gpu_data())); @@ -527,12 +528,12 @@ void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { greentea_copy( count_, (cl_mem) (source.gpu_diff()), 0, (cl_mem) (diff_->mutable_gpu_data()), 0, - &viennacl::ocl::get_context(device_context_.id())); + &viennacl::ocl::get_context(device_context_->id())); } else { greentea_copy( count_, (cl_mem) (source.gpu_data()), 0, (cl_mem) (data_->mutable_gpu_data()), 0, - &viennacl::ocl::get_context(device_context_.id())); + &viennacl::ocl::get_context(device_context_->id())); } #endif } diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index aae50c0d826..b5444ce4e6f 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -5,6 +5,7 @@ #include #include "caffe/common.hpp" +#include "caffe/device_context.hpp" #include "caffe/util/rng.hpp" #ifdef USE_GREENTEA @@ -47,11 +48,12 @@ void GlobalInit(int* pargc, char*** pargv) { ::google::InstallFailureSignalHandler(); } -DeviceContext & Caffe::GetDeviceContext(int id) { - return id == -1 ? Get().default_device_context_ : Get().device_contexts_[id]; +DeviceContext *Caffe::GetDeviceContext(int id) { + return id == -1 ? Get().default_device_context_ : + &(Get().device_contexts_[id]); } -DeviceContext& Caffe::GetDefaultDeviceContext() { +DeviceContext *Caffe::GetDefaultDeviceContext() { return Get().default_device_context_; } @@ -140,7 +142,7 @@ Caffe::~Caffe() { } void Caffe::set_random_seed(const unsigned int seed) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Curand seed static bool g_curand_availability_logged = false; @@ -167,10 +169,10 @@ void Caffe::set_random_seed(const unsigned int seed) { void Caffe::Synchronize(int device_id) { #ifdef USE_GREENTEA - DeviceContext& device_context = Caffe::GetDeviceContext(device_id); - if (device_context.backend() == BACKEND_OpenCL) { + DeviceContext * device_context = Caffe::GetDeviceContext(device_id); + if (device_context->backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( - GetDeviceContext(device_id).id()); + GetDeviceContext(device_id)->id()); ctx.get_queue().finish(); } #else @@ -334,7 +336,7 @@ void Caffe::SetDevice(const int device_id) { Get().default_device_context_ = GetDeviceContext(device_id); - if (Get().default_device_context_.backend() == Backend::BACKEND_CUDA) { + if (Get().default_device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA int current_device; CUDA_CHECK(cudaGetDevice(¤t_device)); @@ -368,7 +370,7 @@ void Caffe::SetDevice(const int device_id) { // TODO: Fix this for the new backend void Caffe::DeviceQuery() { - if (Get().default_device_context_.backend() == BACKEND_CUDA) { + if (Get().default_device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA cudaDeviceProp prop; int device; diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index f22edfeb3c3..e7409e99be3 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -13,7 +13,7 @@ namespace caffe { template DataTransformer::DataTransformer(const TransformationParameter& param, Phase phase, - DeviceContext device_context) + DeviceContext *device_context) : param_(param), phase_(phase), device_context_(device_context) { // check if we want to use mean_file diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp new file mode 100644 index 00000000000..3e06d7362e6 --- /dev/null +++ b/src/caffe/device_context.cpp @@ -0,0 +1,72 @@ +/* + * device_context.cpp + * + * Created on: Jun 26, 2015 + * Author: Fabian Tschopp + */ + +#include +#include "caffe/device_context.hpp" +#include "caffe/greentea/greentea.hpp" +#include "caffe/util/device_alternate.hpp" + + +namespace caffe { + +DeviceContext::DeviceContext() + : workgroup_sizes_(3, 0), id_(0), backend_(Backend::BACKEND_CUDA) { + this->Init(); +} + +DeviceContext::DeviceContext(int id, Backend backend) + : workgroup_sizes_(3, 0), id_(id), backend_(backend) { + this->Init(); +} + +void DeviceContext::Init() { +if(backend_ == BACKEND_CUDA) { +#ifdef USE_CUDA + workgroup_sizes_[0] = CAFFE_CUDA_NUM_THREADS; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + std::vector temp(3); + clGetDeviceInfo(viennacl::ocl::get_context(id_).devices()[0].id(), + CL_DEVICE_MAX_WORK_ITEM_SIZES, sizeof(size_t), &temp[0], NULL); + workgroup_sizes_[0] = temp[0]; + workgroup_sizes_[1] = temp[1]; + workgroup_sizes_[2] = temp[2]; +#endif // USE_GREENTEA + } +} + +Backend DeviceContext::backend() const { + return backend_; +} + +int DeviceContext::id() const { + return id_; +} + +int DeviceContext::WorkgroupSize(int id) { + return workgroup_sizes_[id]; + return 0; +} + +template<> +Blob *DeviceContext::Buffer(int id) { + if (buff_f_.size() <= id) { + buff_f_.push_back(Blob(this)); + } + return &(buff_f_[id]); +} + +template<> +Blob *DeviceContext::Buffer(int id) { + if (buff_d_.size() <= id) { + buff_d_.push_back(Blob(this)); + } + return &(buff_d_[id]); +} + +} // namespace caffe diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index 630006a1cc0..a8547c81a5a 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -6,6 +6,7 @@ */ #include "caffe/greentea/greentea.hpp" +#include "caffe/util/device_alternate.hpp" namespace caffe { @@ -27,24 +28,6 @@ viennacl::ocl::handle WrapHandle(cl_mem in, } } -#endif - -DeviceContext::DeviceContext() - : id_(0), backend_(Backend::BACKEND_CUDA) { -} - -DeviceContext::DeviceContext(int id, Backend backend) - : id_(id), backend_(backend) { -} - -Backend DeviceContext::backend() const { - return backend_; -} - -int DeviceContext::id() const { - return id_; -} - void FinishQueues(viennacl::ocl::context *ctx) { for (int i = 0; i < GREENTEA_QUEUE_COUNT; ++i) { ctx->switch_queue(i); @@ -53,4 +36,7 @@ void FinishQueues(viennacl::ocl::context *ctx) { ctx->switch_queue(0); } +#endif + + } // namespace caffe diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index cfdf686b411..e49766e185f 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -158,6 +158,7 @@ void greentea_im2col_gpu(viennacl::ocl::program *prog, int num_kernels = channels * height_col * width_col; viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("im2col")); + viennacl::ocl::enqueue( kernel(num_kernels, WrapHandle(data_im, ctx), data_im_off, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 84dd698e479..ed5b744f95b 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -32,7 +32,7 @@ shared_ptr > GetConvolutionLayer(const LayerParameter& param) { #endif } if (engine == ConvolutionParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { return shared_ptr >(new ConvolutionLayer(param)); #ifdef USE_CUDNN } else if (engine == ConvolutionParameter_Engine_CUDNN) { @@ -56,7 +56,7 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { #endif } if (engine == PoolingParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { return shared_ptr >(new PoolingLayer(param)); #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { @@ -87,7 +87,7 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { #endif } if (engine == ReLUParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { return shared_ptr >(new ReLULayer(param)); #ifdef USE_CUDNN } else if (engine == ReLUParameter_Engine_CUDNN) { @@ -111,7 +111,7 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { #endif } if (engine == SigmoidParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { return shared_ptr >(new SigmoidLayer(param)); #ifdef USE_CUDNN } else if (engine == SigmoidParameter_Engine_CUDNN) { @@ -135,7 +135,7 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { #endif } if (engine == SoftmaxParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { return shared_ptr >(new SoftmaxLayer(param)); #ifdef USE_CUDNN } else if (engine == SoftmaxParameter_Engine_CUDNN) { @@ -159,7 +159,7 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { #endif } if (engine == TanHParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext().backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { return shared_ptr >(new TanHLayer(param)); #ifdef USE_CUDNN } else if (engine == TanHParameter_Engine_CUDNN) { diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu index 87de89b1009..35c912d3eb1 100644 --- a/src/caffe/layers/absval_layer.cu +++ b/src/caffe/layers/absval_layer.cu @@ -16,13 +16,13 @@ void AbsValLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_abs(this->device_context_.id(), count, + greentea_gpu_abs(this->device_context_->id(), count, (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (top_data), 0); #endif // USE_GREENTEA @@ -40,17 +40,17 @@ void AbsValLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_sign(count, bottom_data, bottom_diff); caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sign(this->device_context_.id(), count, + greentea_gpu_sign(this->device_context_->id(), count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, 0); - greentea_gpu_mul(this->device_context_.id(), count, + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem) bottom_diff, 0, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); #endif // USE_GREENTEA diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 28e9b0300ac..6622a38834b 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -252,7 +252,7 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, const int output_off, bool skip_im2col) { const Dtype* col_buff = input; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (!is_1x1_) { if (!skip_im2col) { @@ -279,7 +279,7 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, col_buff = col_buffer_.gpu_data(); } for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, conv_out_spatial_dim_, kernel_dim_ / group_, (Dtype) 1., (cl_mem) weights, weight_offset_ * g, @@ -296,7 +296,7 @@ template void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, const int output_off, const Dtype* bias) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, height_out_ * width_out_, 1, (Dtype) 1., bias, @@ -305,7 +305,7 @@ void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num_output_, height_out_ * width_out_, 1, (Dtype) 1., (cl_mem) bias, 0, @@ -325,7 +325,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, if (is_1x1_) { col_buff = input; } - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int g = 0; g < group_; ++g) { caffe_gpu_gemm( @@ -341,7 +341,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, } else { #ifdef USE_GREENTEA for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasTrans, CblasNoTrans, kernel_dim_ / group_, conv_out_spatial_dim_, conv_out_channels_ / group_, (Dtype) 1., @@ -364,7 +364,7 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, const int output_off, Dtype* weights) { const Dtype* col_buff = input; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (!is_1x1_) { conv_im2col_gpu(input + input_off, col_buffer_.mutable_gpu_data()); @@ -387,7 +387,7 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, col_buff = col_buffer_.gpu_data(); } for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype) 1., (cl_mem) output, @@ -405,7 +405,7 @@ template void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, const Dtype* input, const int input_off) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., input + input_off, bias_multiplier_.gpu_data(), @@ -413,7 +413,7 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num_output_, height_out_ * width_out_, 1., (cl_mem) input, input_off, (cl_mem) (bias_multiplier_.gpu_data()), 0, 1., diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index c93567cfe4f..0fa571e8f3d 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -10,7 +10,7 @@ void BasePrefetchingDataLayer::Forward_gpu( // First, join the thread JoinPrefetchThread(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Reshape to loaded data. top[0]->ReshapeLike(this->prefetch_data_); @@ -28,7 +28,7 @@ void BasePrefetchingDataLayer::Forward_gpu( } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); // Reshape to loaded data. top[0]->ReshapeLike(this->prefetch_data_); diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index 30384811ed5..69558505ddd 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -31,7 +31,7 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) BNLLForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -42,9 +42,9 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_bnll = program.get_kernel( CL_KERNEL_SELECT("bnll_forward")); @@ -77,7 +77,7 @@ void BNLLLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) BNLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -88,9 +88,9 @@ void BNLLLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_bnll = program.get_kernel( CL_KERNEL_SELECT("bnll_backward")); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 5d97c854db6..c51b5d556d1 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -46,7 +46,7 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), @@ -58,9 +58,9 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_concat = program.get_kernel( CL_KERNEL_SELECT("concat")); @@ -93,7 +93,7 @@ void ConcatLayer::Backward_gpu(const vector*>& top, const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) Concat CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), @@ -105,9 +105,9 @@ void ConcatLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_concat = program.get_kernel( CL_KERNEL_SELECT("concat")); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 6367aa3cb5d..737e1c7c575 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -18,7 +18,7 @@ void ContrastiveLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_sub(count, bottom[0]->gpu_data(), // a bottom[1]->gpu_data(), // b @@ -33,16 +33,16 @@ void ContrastiveLossLayer::Forward_gpu( #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sub(this->device_context_.id(), count, + greentea_gpu_sub(this->device_context_->id(), count, (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (bottom[1]->gpu_data()), 0, (cl_mem) (diff_.mutable_gpu_data()), 0); - greentea_gpu_powx(this->device_context_.id(), count, + greentea_gpu_powx(this->device_context_->id(), count, (cl_mem) (diff_.mutable_gpu_data()), 0, // a_i-b_i Dtype(2), (cl_mem) (diff_sq_.mutable_gpu_data()), 0); // (a_i-b_i)^2 - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), (cl_mem) (diff_sq_.gpu_data()), 0, // (a_i-b_i)^2 @@ -119,7 +119,7 @@ void ContrastiveLossLayer::Backward_gpu( const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -134,9 +134,9 @@ void ContrastiveLossLayer::Backward_gpu( } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_cll = program.get_kernel( CL_KERNEL_SELECT("cll_backward")); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 627a7481a67..9367e3438ad 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -23,10 +23,10 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { #ifdef USE_GREENTEA - if (this->device_context_.backend() == BACKEND_OpenCL) { + if (this->device_context_->backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_.id()); - // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); + viennacl::ocl::get_context(this->device_context_->id()); + // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); } #endif // USE_GREENTEA @@ -38,9 +38,9 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, } } #ifdef USE_GREENTEA - if (this->device_context_.backend() == BACKEND_OpenCL) { + if (this->device_context_->backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_.id()); + viennacl::ocl::get_context(this->device_context_->id()); FinishQueues(&ctx); } #endif // USE_GREENTEA @@ -59,19 +59,19 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { #ifdef USE_GREENTEA - if (this->device_context_.backend() == BACKEND_OpenCL) { + if (this->device_context_->backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_.id()); - ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); + viennacl::ocl::get_context(this->device_context_->id()); + // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); } #endif // USE_GREENTEA this->backward_gpu_bias(bias_diff, top_diff, top[i]->offset(n)); #ifdef USE_GREENTEA - if (this->device_context_.backend() == BACKEND_OpenCL) { + if (this->device_context_->backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_.id()); + viennacl::ocl::get_context(this->device_context_->id()); FinishQueues(&ctx); } #endif // USE_GREENTEA @@ -82,10 +82,10 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { #ifdef USE_GREENTEA - if (this->device_context_.backend() == BACKEND_OpenCL) { + if (this->device_context_->backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_.id()); - // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); + viennacl::ocl::get_context(this->device_context_->id()); + // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); } #endif // USE_GREENTEA @@ -101,9 +101,9 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, } #ifdef USE_GREENTEA - if (this->device_context_.backend() == BACKEND_OpenCL) { + if (this->device_context_->backend() == BACKEND_OpenCL) { viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_.id()); + viennacl::ocl::get_context(this->device_context_->id()); FinishQueues(&ctx); } #endif // USE_GREENTEA diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index b39926245ad..9b18b9060ac 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -17,7 +17,7 @@ namespace caffe { template void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // CUDA backend code for (int i = 0; i < bottom.size(); ++i) { @@ -55,9 +55,9 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, // GreenTea backend code #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); for (int i = 0; i < bottom.size(); ++i) { const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); @@ -80,7 +80,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, weight_offset * g, col_data, col_offset * g, (Dtype) 0., top_data, @@ -89,7 +89,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, // Third, add bias if (bias_term_) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, @@ -107,7 +107,7 @@ void ConvolutionSKLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA const Dtype* weight = NULL; Dtype* weight_diff = NULL; @@ -181,9 +181,9 @@ void ConvolutionSKLayer::Backward_gpu( } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); cl_mem weight = NULL; cl_mem weight_diff = NULL; @@ -191,7 +191,7 @@ void ConvolutionSKLayer::Backward_gpu( if (this->param_propagate_down_[0]) { weight = (cl_mem) (this->blobs_[0]->gpu_data()); weight_diff = (cl_mem) (this->blobs_[0]->mutable_gpu_diff()); - greentea_gpu_set(this->device_context_.id(), this->blobs_[0]->count(), + greentea_gpu_set(this->device_context_->id(), this->blobs_[0]->count(), Dtype(0), weight_diff, 0); } @@ -199,7 +199,7 @@ void ConvolutionSKLayer::Backward_gpu( if (bias_term_ && this->param_propagate_down_[1]) { bias_diff = (cl_mem) (this->blobs_[1]->mutable_gpu_diff()); - greentea_gpu_set(this->device_context_.id(), this->blobs_[1]->count(), + greentea_gpu_set(this->device_context_->id(), this->blobs_[1]->count(), Dtype(0), bias_diff, 0); } const int weight_offset = M_ * K_; @@ -211,7 +211,7 @@ void ConvolutionSKLayer::Backward_gpu( if (bias_term_ && this->param_propagate_down_[1]) { top_diff = (cl_mem) (top[i]->gpu_diff()); for (int n = 0; n < num_; ++n) { - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num_output_, N_, (Dtype) 1., top_diff, top[0]->offset(n), (cl_mem) (bias_multiplier_.gpu_data()), 0, @@ -239,7 +239,8 @@ void ConvolutionSKLayer::Backward_gpu( // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), + CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff, top[i]->offset(n) + top_offset * g, @@ -250,7 +251,8 @@ void ConvolutionSKLayer::Backward_gpu( // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_.id(), CblasTrans, + greentea_gpu_gemm(this->device_context_->id(), + CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., weight, weight_offset * g, top_diff, top[i]->offset(n) + top_offset * g, @@ -258,7 +260,8 @@ void ConvolutionSKLayer::Backward_gpu( } // col2im back to the data greentea_col2im_sk_gpu(&program, &ctx, col_diff, channels_, - height_, width_, kernel_h_, kernel_w_, + height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, bottom_diff, bottom[i]->offset(n)); diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index 8b130c7c4e9..bed083dd5a9 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -29,7 +29,7 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->phase_ == TRAIN) { unsigned int* mask = @@ -48,12 +48,12 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); if (this->phase_ == TRAIN) { cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data()); - greentea_gpu_rng_uniform(this->device_context_.id(), count, mask, 0); + greentea_gpu_rng_uniform(this->device_context_->id(), count, mask, 0); // set thresholds viennacl::ocl::kernel &oclk_dropout = program.get_kernel( CL_KERNEL_SELECT("dropout_forward")); @@ -90,7 +90,7 @@ void DropoutLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast(rand_vec_ @@ -108,9 +108,9 @@ void DropoutLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); if (this->phase_ == TRAIN) { cl_mem mask = (cl_mem) (rand_vec_.gpu_data()); diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 3e8d46c24ad..f776dd69c26 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -45,7 +45,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA switch (op_) { case EltwiseParameter_EltwiseOp_PROD: @@ -84,18 +84,18 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { - greentea_gpu_mul(this->device_context_.id(), + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem)(bottom[0]->gpu_data()), 0, (cl_mem)(bottom[1]->gpu_data()), 0, (cl_mem)top_data, 0); for (int i = 2; i < bottom.size(); ++i) { - greentea_gpu_mul(this->device_context_.id(), + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem)top_data, 0, (cl_mem)(bottom[i]->gpu_data()), 0, (cl_mem)top_data, 0); @@ -103,10 +103,10 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, } break; case EltwiseParameter_EltwiseOp_SUM: { - greentea_gpu_set(this->device_context_.id(), count, 0, + greentea_gpu_set(this->device_context_->id(), count, 0, (cl_mem)top_data, 0); for (int i = 0; i < bottom.size(); ++i) { - greentea_gpu_axpy(this->device_context_.id(), + greentea_gpu_axpy(this->device_context_->id(), count, coeffs_[i], (cl_mem)(bottom[i]->gpu_data()), 0, (cl_mem)top_data, 0); @@ -169,7 +169,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { @@ -219,9 +219,9 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { @@ -241,18 +241,18 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, (cl_mem)(bottom_diff), 0, &ctx); initialized = true; } else { - greentea_gpu_mul(this->device_context_.id(), count, + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem)bottom[j]->gpu_data(), 0, (cl_mem)bottom_diff, 0, (cl_mem)bottom_diff, 0); } } } else { - greentea_gpu_div(this->device_context_.id(), + greentea_gpu_div(this->device_context_->id(), count, (cl_mem)top_data, 0, (cl_mem)bottom_data, 0, (cl_mem)bottom_diff, 0); } - greentea_gpu_mul(this->device_context_.id(), + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem)bottom_diff, 0, (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0); } @@ -262,7 +262,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, greentea_copy(count, (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, &ctx); } else { - greentea_gpu_scale(this->device_context_.id(), + greentea_gpu_scale(this->device_context_->id(), count, coeffs_[i], (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0); } diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index d6ebf24d3dd..adfc0a90142 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -17,7 +17,7 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); @@ -28,12 +28,12 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sub(this->device_context_.id(), count, + greentea_gpu_sub(this->device_context_->id(), count, (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (bottom[1]->gpu_data()), 0, (cl_mem) (diff_.mutable_gpu_data()), 0); Dtype dot; - greentea_gpu_dot(this->device_context_.id(), count, + greentea_gpu_dot(this->device_context_->id(), count, (cl_mem) (diff_.gpu_data()), 0, (cl_mem) (diff_.gpu_data()), 0, &dot); Dtype loss = dot / bottom[0]->num() / Dtype(2); @@ -50,7 +50,7 @@ void EuclideanLossLayer::Backward_gpu( if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_axpby(bottom[i]->count(), // count alpha, // alpha @@ -60,7 +60,7 @@ void EuclideanLossLayer::Backward_gpu( #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_axpby(this->device_context_.id(), bottom[i]->count(), + greentea_gpu_axpby(this->device_context_->id(), bottom[i]->count(), alpha, (cl_mem) (diff_.gpu_data()), 0, Dtype(0), (cl_mem) (bottom[i]->mutable_gpu_diff()), 0); #endif // USE_GREENTEA diff --git a/src/caffe/layers/exp_layer.cu b/src/caffe/layers/exp_layer.cu index 992a0edccc8..9d33e5ef4f9 100644 --- a/src/caffe/layers/exp_layer.cu +++ b/src/caffe/layers/exp_layer.cu @@ -14,7 +14,7 @@ void ExpLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (inner_scale_ == Dtype(1)) { caffe_gpu_exp(count, bottom_data, top_data); @@ -29,16 +29,18 @@ void ExpLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA if (inner_scale_ == Dtype(1)) { - greentea_gpu_exp(this->device_context_.id(), count, + greentea_gpu_exp(this->device_context_->id(), count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); } else { - greentea_gpu_scale(this->device_context_.id(), count, inner_scale_, + greentea_gpu_scale(this->device_context_->id(), + count, inner_scale_, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); - greentea_gpu_exp(this->device_context_.id(), count, + greentea_gpu_exp(this->device_context_->id(), count, (cl_mem) top_data, 0, (cl_mem) top_data, 0); } if (outer_scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, outer_scale_, + greentea_gpu_scal(this->device_context_->id(), + count, outer_scale_, (cl_mem) top_data, 0); } #endif // USE_GREENTEA @@ -57,7 +59,7 @@ void ExpLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_mul(count, top_data, top_diff, bottom_diff); if (inner_scale_ != Dtype(1)) { @@ -66,11 +68,11 @@ void ExpLayer::Backward_gpu(const vector*>& top, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_mul(this->device_context_.id(), count, + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem) top_data, 0, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); if (inner_scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, inner_scale_, + greentea_gpu_scal(this->device_context_->id(), count, inner_scale_, (cl_mem) bottom_diff, 0); } #endif // USE_GREENTEA diff --git a/src/caffe/layers/filter_layer.cu b/src/caffe/layers/filter_layer.cu index b6ec642cfa9..94cdad0a332 100644 --- a/src/caffe/layers/filter_layer.cu +++ b/src/caffe/layers/filter_layer.cu @@ -19,7 +19,7 @@ void FilterLayer::Forward_gpu(const vector*>& bottom, int data_offset_top = n * dim; int data_offset_bottom = indices_to_forward_[n] * dim; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(dim, bottom_data + data_offset_bottom, top_data + data_offset_top); @@ -27,7 +27,7 @@ void FilterLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); greentea_copy(dim, (cl_mem) bottom_data, data_offset_bottom, (cl_mem) top_data, data_offset_top, &ctx); @@ -55,7 +55,7 @@ void FilterLayer::Backward_gpu(const vector*>& top, int data_offset_bottom = 0; int data_offset_top = 0; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int n = 0; n < bottom[i]->shape(0); ++n) { if (next_to_backward_offset >= indices_to_forward_.size()) { @@ -82,20 +82,20 @@ void FilterLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); for (int n = 0; n < bottom[i]->shape(0); ++n) { if (next_to_backward_offset >= indices_to_forward_.size()) { // we already visited all items that were been forwarded, so // just set to zero remaining ones data_offset_bottom = n * dim; - greentea_gpu_set(this->device_context_.id(), dim, Dtype(0), + greentea_gpu_set(this->device_context_->id(), dim, Dtype(0), (cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom); } else { batch_offset = indices_to_forward_[next_to_backward_offset]; data_offset_bottom = n * dim; if (n != batch_offset) { // this data was not been forwarded - greentea_gpu_set(this->device_context_.id(), dim, Dtype(0), + greentea_gpu_set(this->device_context_->id(), dim, Dtype(0), (cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom); } else { // this data was been forwarded data_offset_top = next_to_backward_offset * dim; diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 376a5b1700f..deb141c8344 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -19,7 +19,7 @@ namespace caffe { template void HDF5DataLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); for (int i = 0; i < batch_size; ++i, ++current_row_) { diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index f772a023e0e..4add4169fa3 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -14,7 +14,7 @@ namespace caffe { template void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CHECK_GE(bottom.size(), 2); CHECK_EQ(bottom[0]->num(), bottom[1]->num()); diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 25cf17508fa..ead80e86603 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -19,7 +19,7 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int n = 0; n < bottom[0]->num(); ++n) { im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, width_, @@ -30,9 +30,9 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); for (int n = 0; n < bottom[0]->num(); ++n) { greentea_im2col_gpu(&program, &ctx, (cl_mem) bottom_data, @@ -52,7 +52,7 @@ void Im2colLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int n = 0; n < top[0]->num(); ++n) { col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, @@ -63,9 +63,9 @@ void Im2colLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); for (int n = 0; n < top[0]->num(); ++n) { greentea_col2im_gpu(&program, &ctx, (cl_mem) top_diff, diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index 39df634702b..4875f4caafc 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -16,7 +16,7 @@ void InnerProductLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); @@ -28,12 +28,12 @@ void InnerProductLayer::Forward_gpu(const vector*>& bottom, #endif // USE CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) weight, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) { - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, @@ -48,7 +48,7 @@ void InnerProductLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); @@ -79,7 +79,7 @@ void InnerProductLayer::Backward_gpu( const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight - greentea_gpu_gemm(this->device_context_.id(), CblasTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, (Dtype) 1., @@ -89,7 +89,7 @@ void InnerProductLayer::Backward_gpu( if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias - greentea_gpu_gemv(this->device_context_.id(), CblasTrans, M_, N_, + greentea_gpu_gemv(this->device_context_->id(), CblasTrans, M_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., @@ -99,7 +99,7 @@ void InnerProductLayer::Backward_gpu( if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, diff --git a/src/caffe/layers/log_layer.cu b/src/caffe/layers/log_layer.cu index cd194847e4c..c0b9f5ba571 100644 --- a/src/caffe/layers/log_layer.cu +++ b/src/caffe/layers/log_layer.cu @@ -14,7 +14,7 @@ void LogLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { caffe_gpu_log(count, bottom_data, top_data); @@ -35,27 +35,27 @@ void LogLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { - greentea_gpu_log(this->device_context_.id(), count, + greentea_gpu_log(this->device_context_->id(), count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0); } else { greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0, &ctx); if (input_scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, + greentea_gpu_scal(this->device_context_->id(), count, input_scale_, (cl_mem) top_data, 0); } if (input_shift_ != Dtype(0)) { - greentea_gpu_add_scalar(this->device_context_.id(), count, + greentea_gpu_add_scalar(this->device_context_->id(), count, input_shift_, (cl_mem) top_data, 0); } - greentea_gpu_log(this->device_context_.id(), count, + greentea_gpu_log(this->device_context_->id(), count, (cl_mem) top_data, 0, (cl_mem) top_data, 0); } if (base_scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, base_scale_, + greentea_gpu_scal(this->device_context_->id(), count, base_scale_, (cl_mem) top_data, 0); } #endif // USE_GREENTEA @@ -74,7 +74,7 @@ void LogLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(count, bottom_data, bottom_diff); if (input_scale_ != Dtype(1)) { @@ -92,26 +92,26 @@ void LogLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, 0, &ctx); if (input_scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, input_scale_, + greentea_gpu_scal(this->device_context_->id(), count, input_scale_, (cl_mem) bottom_diff, 0); } if (input_shift_ != Dtype(0)) { - greentea_gpu_add_scalar(this->device_context_.id(), count, + greentea_gpu_add_scalar(this->device_context_->id(), count, input_shift_, (cl_mem) bottom_diff, 0); } - greentea_gpu_powx(this->device_context_.id(), count, + greentea_gpu_powx(this->device_context_->id(), count, (cl_mem) bottom_diff, 0, Dtype(-1), (cl_mem) bottom_diff, 0); if (backward_num_scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, + greentea_gpu_scal(this->device_context_->id(), count, backward_num_scale_, (cl_mem) bottom_diff, 0); } - greentea_gpu_mul(this->device_context_.id(), count, + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, (cl_mem) bottom_diff, 0); #endif // USE_GREENTEA diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index d314c2b92a7..1d36e85da94 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -90,7 +90,7 @@ void LRNLayer::CrossChannelForward_gpu( Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. @@ -113,9 +113,9 @@ void LRNLayer::CrossChannelForward_gpu( #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); int n_threads = num_ * height_ * width_; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( @@ -226,7 +226,7 @@ void LRNLayer::CrossChannelBackward_gpu( const vector*>& bottom) { int n_threads = num_ * height_ * width_; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), @@ -240,9 +240,9 @@ void LRNLayer::CrossChannelBackward_gpu( } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_lrn = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_diff")); diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index 951c4cac164..f55680d8de7 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -91,7 +91,7 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, int height_b = bottom[1]->height(); int width_b = bottom[1]->width(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CopyForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( @@ -101,9 +101,9 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_copy_forward = program.get_kernel( CL_KERNEL_SELECT("merge_copy_forward")); @@ -143,7 +143,7 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, int height_b = bottom[1]->height(); int width_b = bottom[1]->width(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS) ( @@ -153,9 +153,9 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_copy_backward = program.get_kernel( CL_KERNEL_SELECT("merge_copy_backward")); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 1862c807e61..9c904377b8b 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -20,7 +20,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, int dim = bottom[0]->count() / num; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ @@ -76,73 +76,73 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ - greentea_gpu_powx(this->device_context_.id(), bottom[0]->count(), + greentea_gpu_powx(this->device_context_->id(), bottom[0]->count(), (cl_mem) bottom_data, 0, Dtype(2), (cl_mem) (temp_.mutable_gpu_data()), 0); // computes variance using var(X) = E(X^2) - (EX)^2 - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num, dim, 1. / dim, (cl_mem) (bottom_data), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (mean_.mutable_gpu_data()), 0); - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num, dim, 1. / dim, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (variance_.mutable_gpu_data()), 0); - greentea_gpu_powx(this->device_context_.id(), mean_.count(), + greentea_gpu_powx(this->device_context_->id(), mean_.count(), (cl_mem) mean_.gpu_data(), 0, Dtype(2), (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_sub(this->device_context_.id(), mean_.count(), + greentea_gpu_sub(this->device_context_->id(), mean_.count(), (cl_mem) (variance_.gpu_data()), 0, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) (variance_.mutable_gpu_data()), 0); // do mean and variance normalization // subtract mean - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num, dim, 1, -1., (cl_mem) (mean_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_add(this->device_context_.id(), temp_.count(), + greentea_gpu_add(this->device_context_->id(), temp_.count(), (cl_mem) bottom_data, 0, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) top_data, 0); // normalize variance - greentea_gpu_powx(this->device_context_.id(), variance_.count(), + greentea_gpu_powx(this->device_context_->id(), variance_.count(), (cl_mem) (variance_.gpu_data()), 0, Dtype(0.5), (cl_mem) (variance_.mutable_gpu_data()), 0); - greentea_gpu_add_scalar(this->device_context_.id(), + greentea_gpu_add_scalar(this->device_context_->id(), variance_.count(), eps_, (cl_mem) (variance_.mutable_gpu_data()), 0); - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num, dim, 1, 1., (cl_mem) (variance_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_div(this->device_context_.id(), temp_.count(), + greentea_gpu_div(this->device_context_->id(), temp_.count(), (cl_mem) top_data, 0, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) top_data, 0); } else { - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num, dim, 1. / dim, (cl_mem) bottom_data, 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (mean_.mutable_gpu_data()), 0); // EX // subtract mean - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num, dim, 1, -1., (cl_mem) (mean_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_add(this->device_context_.id(), temp_.count(), + greentea_gpu_add(this->device_context_->id(), temp_.count(), (cl_mem) bottom_data, 0, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) top_data, 0); @@ -168,7 +168,7 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->layer_param_.mvn_param().normalize_variance()) { caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); @@ -206,51 +206,51 @@ void MVNLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); if (this->layer_param_.mvn_param().normalize_variance()) { - greentea_gpu_mul(this->device_context_.id(), temp_.count(), + greentea_gpu_mul(this->device_context_->id(), temp_.count(), (cl_mem) top_data, 0, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num, dim, 1., (cl_mem) bottom_diff, 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (mean_.mutable_gpu_data()), 0); - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num, dim, 1, 1., (cl_mem) (mean_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) bottom_diff, 0); - greentea_gpu_mul(this->device_context_.id(), temp_.count(), + greentea_gpu_mul(this->device_context_->id(), temp_.count(), (cl_mem) top_data, 0, (cl_mem) bottom_diff, 0, (cl_mem) bottom_diff, 0); - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, num, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num, dim, 1., (cl_mem) top_diff, 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (mean_.mutable_gpu_data()), 0); - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num, dim, 1, 1., (cl_mem) (mean_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 1., (cl_mem) bottom_diff, 0); - greentea_gpu_axpby(this->device_context_.id(), temp_.count(), + greentea_gpu_axpby(this->device_context_->id(), temp_.count(), Dtype(1), (cl_mem) top_diff, 0, Dtype(-1. / dim), (cl_mem) bottom_diff, 0); // put the squares of bottom into temp_ - greentea_gpu_powx(this->device_context_.id(), temp_.count(), + greentea_gpu_powx(this->device_context_->id(), temp_.count(), (cl_mem) bottom_data, 0, Dtype(2), (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_gemm(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num, dim, 1, 1., (cl_mem) (variance_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_div(this->device_context_.id(), temp_.count(), + greentea_gpu_div(this->device_context_->id(), temp_.count(), (cl_mem) bottom_diff, 0, (cl_mem) (temp_.gpu_data()), 0, (cl_mem) bottom_diff, 0); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 5cdfbf92d0e..ddce6a65eb7 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -176,7 +176,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, int* mask = NULL; Dtype* top_mask = NULL; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: @@ -231,9 +231,9 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { @@ -272,7 +272,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, case PoolingParameter_PoolMethod_STOCHASTIC: { if (this->phase_ == caffe::TRAIN) { // We need to create the random index as well. - greentea_gpu_rng_uniform(this->device_context_.id(), count, + greentea_gpu_rng_uniform(this->device_context_->id(), count, Dtype(0), Dtype(1), (cl_mem)(rand_idx_.mutable_gpu_data()), 0); @@ -450,7 +450,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, const int* mask = NULL; const Dtype* top_mask = NULL; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(count, Dtype(0.), bottom_diff); switch (this->layer_param_.pooling_param().pool()) { @@ -494,11 +494,11 @@ void PoolingLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); - greentea_gpu_set(this->device_context_.id(), count, Dtype(0.), + greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), (cl_mem) bottom_diff, 0); switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index 1a6f3fa957b..0762536272f 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -186,7 +186,7 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: @@ -248,9 +248,9 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { @@ -293,7 +293,7 @@ void PoolingSKLayer::Forward_gpu(const vector*>& bottom, case PoolingParameter_PoolMethod_STOCHASTIC: { if (this->phase_ == caffe::TRAIN) { // We need to create the random index as well. - greentea_gpu_rng_uniform(this->device_context_.id(), count, + greentea_gpu_rng_uniform(this->device_context_->id(), count, Dtype(0), Dtype(1), (cl_mem)(rand_idx_.mutable_gpu_data()), 0); @@ -406,7 +406,7 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(count, Dtype(0.), bottom_diff); switch (this->layer_param_.pooling_param().pool()) { @@ -435,11 +435,11 @@ void PoolingSKLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); - greentea_gpu_set(this->device_context_.id(), count, Dtype(0.), + greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), (cl_mem) bottom_diff, 0); switch (this->layer_param_.pooling_param().pool()) { diff --git a/src/caffe/layers/power_layer.cu b/src/caffe/layers/power_layer.cu index 839036db20b..521cce5a308 100644 --- a/src/caffe/layers/power_layer.cu +++ b/src/caffe/layers/power_layer.cu @@ -18,7 +18,7 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Special case where we can ignore the input: scale or power is 0. if (diff_scale_ == Dtype(0)) { @@ -41,11 +41,11 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); if (diff_scale_ == Dtype(0)) { Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); - greentea_gpu_set(this->device_context_.id(), count, value, + greentea_gpu_set(this->device_context_->id(), count, value, (cl_mem) top_data, 0); return; } @@ -53,15 +53,15 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0, &ctx); if (scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, scale_, + greentea_gpu_scal(this->device_context_->id(), count, scale_, (cl_mem) top_data, 0); } if (shift_ != Dtype(0)) { - greentea_gpu_add_scalar(this->device_context_.id(), count, shift_, + greentea_gpu_add_scalar(this->device_context_->id(), count, shift_, (cl_mem) top_data, 0); } if (power_ != Dtype(1)) { - greentea_gpu_powx(this->device_context_.id(), count, + greentea_gpu_powx(this->device_context_->id(), count, (cl_mem) top_data, 0, power_, (cl_mem) top_data, 0); } @@ -78,7 +78,7 @@ void PowerLayer::Backward_gpu(const vector*>& top, const int count = bottom[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { caffe_gpu_set(count, diff_scale_, bottom_diff); @@ -123,10 +123,10 @@ void PowerLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { - greentea_gpu_set(this->device_context_.id(), count, diff_scale_, + greentea_gpu_set(this->device_context_->id(), count, diff_scale_, (cl_mem) bottom_diff, 0); } else { const Dtype* bottom_data = bottom[0]->gpu_data(); @@ -136,11 +136,11 @@ void PowerLayer::Backward_gpu(const vector*>& top, // Special case for y = (shift + scale * x)^2 // -> dy/dx = 2 * scale * (shift + scale * x) // = diff_scale * shift + diff_scale * scale * x - greentea_gpu_axpby(this->device_context_.id(), count, + greentea_gpu_axpby(this->device_context_->id(), count, diff_scale_ * scale_, (cl_mem) bottom_data, 0, Dtype(0), (cl_mem) bottom_diff, 0); if (shift_ != Dtype(0)) { - greentea_gpu_add_scalar(this->device_context_.id(), count, + greentea_gpu_add_scalar(this->device_context_->id(), count, diff_scale_ * shift_, (cl_mem) bottom_diff, 0); } @@ -150,33 +150,33 @@ void PowerLayer::Backward_gpu(const vector*>& top, // = scale * power * (scale * x)^power * (scale * x)^(-1) // = power * y / x const Dtype* top_data = top[0]->gpu_data(); - greentea_gpu_div(this->device_context_.id(), count, + greentea_gpu_div(this->device_context_->id(), count, (cl_mem) top_data, 0, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, 0); - greentea_gpu_scal(this->device_context_.id(), count, power_, + greentea_gpu_scal(this->device_context_->id(), count, power_, (cl_mem) bottom_diff, 0); } else { greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, 0, &ctx); if (scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, scale_, + greentea_gpu_scal(this->device_context_->id(), count, scale_, (cl_mem) bottom_diff, 0); } if (shift_ != Dtype(0)) { - greentea_gpu_add_scalar(this->device_context_.id(), count, shift_, + greentea_gpu_add_scalar(this->device_context_->id(), count, shift_, (cl_mem) bottom_diff, 0); } const Dtype* top_data = top[0]->gpu_data(); - greentea_gpu_div(this->device_context_.id(), count, + greentea_gpu_div(this->device_context_->id(), count, (cl_mem) top_data, 0, (cl_mem) bottom_diff, 0, (cl_mem) bottom_diff, 0); if (diff_scale_ != Dtype(1)) { - greentea_gpu_scal(this->device_context_.id(), count, diff_scale_, + greentea_gpu_scal(this->device_context_->id(), count, diff_scale_, (cl_mem) bottom_diff, 0); } } } - greentea_gpu_mul(this->device_context_.id(), count, + greentea_gpu_mul(this->device_context_->id(), count, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0, (cl_mem) bottom_diff, 0); #endif // USE_GREENTEA diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index b716fb1039d..8c810bf2ddf 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -57,7 +57,7 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // For in-place computation if (top[0] == bottom[0]) { @@ -73,9 +73,9 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); if (top[0] == bottom[0]) { greentea_copy(count, (cl_mem) bottom_data, 0, @@ -110,7 +110,7 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs @@ -160,9 +160,9 @@ void PReLULayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); @@ -180,12 +180,12 @@ void PReLULayer::Backward_gpu(const vector*>& top, if (channel_shared_) { Dtype d; - greentea_gpu_dot(this->device_context_.id(), channels * dim, + greentea_gpu_dot(this->device_context_->id(), channels * dim, (cl_mem) (backward_buff_.gpu_diff()), 0, (cl_mem) (multiplier_.gpu_data()), 0, &d); dsum += d; } else { - greentea_gpu_gemv(this->device_context_.id(), CblasNoTrans, + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, channels, dim, 1., (cl_mem) (backward_buff_.gpu_diff()), 0, (cl_mem) (multiplier_.gpu_data()), 0, 1., @@ -193,7 +193,7 @@ void PReLULayer::Backward_gpu(const vector*>& top, } } if (channel_shared_) { - greentea_gpu_add_scalar(this->device_context_.id(), + greentea_gpu_add_scalar(this->device_context_->id(), this->blobs_[0]->count(), Dtype(dsum), (cl_mem) slope_diff, 0); } diff --git a/src/caffe/layers/reduction_layer.cu b/src/caffe/layers/reduction_layer.cu index b0972253a2e..f8557436b1f 100644 --- a/src/caffe/layers/reduction_layer.cu +++ b/src/caffe/layers/reduction_layer.cu @@ -16,7 +16,7 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, int bottom_data_off = 0; int top_data_off = 0; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (sum_multiplier_.count() > 0) { mult_data = sum_multiplier_.gpu_data(); @@ -60,17 +60,17 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, switch (op_) { case ReductionParameter_ReductionOp_SUM: case ReductionParameter_ReductionOp_MEAN: - greentea_gpu_dot(this->device_context_.id(), dim_, + greentea_gpu_dot(this->device_context_->id(), dim_, (cl_mem) mult_data, 0, (cl_mem) bottom_data, bottom_data_off, top_data + top_data_off); break; case ReductionParameter_ReductionOp_ASUM: - greentea_gpu_asum(this->device_context_.id(), dim_, + greentea_gpu_asum(this->device_context_->id(), dim_, (cl_mem) bottom_data, bottom_data_off, top_data + top_data_off); break; case ReductionParameter_ReductionOp_SUMSQ: - greentea_gpu_dot(this->device_context_.id(), dim_, + greentea_gpu_dot(this->device_context_->id(), dim_, (cl_mem) bottom_data, bottom_data_off, (cl_mem) bottom_data, bottom_data_off, top_data + top_data_off); @@ -85,7 +85,7 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, if (coeff_ != Dtype(1)) { // Reset the top_data pointer. top_data = top[0]->mutable_gpu_data(); - greentea_gpu_scal(this->device_context_.id(), num_, coeff_, + greentea_gpu_scal(this->device_context_->id(), num_, coeff_, (cl_mem) top_data, 0); } #endif // USE_GREENTEA @@ -122,7 +122,7 @@ void ReductionLayer::Backward_gpu(const vector*>& top, int bottom_diff_off = 0; int top_diff_off = 0; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int i = 0; i < num_; ++i) { const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_; @@ -156,20 +156,20 @@ void ReductionLayer::Backward_gpu(const vector*>& top, switch (op_) { case ReductionParameter_ReductionOp_SUM: case ReductionParameter_ReductionOp_MEAN: - greentea_gpu_set(this->device_context_.id(), dim_, + greentea_gpu_set(this->device_context_->id(), dim_, bottom_coeff, (cl_mem) bottom_diff, bottom_diff_off); break; case ReductionParameter_ReductionOp_ASUM: - greentea_gpu_sign(this->device_context_.id(), dim_, + greentea_gpu_sign(this->device_context_->id(), dim_, (cl_mem) bottom_data, bottom_data_off, (cl_mem) bottom_diff, bottom_diff_off); - greentea_gpu_scal(this->device_context_.id(), dim_, + greentea_gpu_scal(this->device_context_->id(), dim_, bottom_coeff, (cl_mem) bottom_diff, bottom_diff_off); break; case ReductionParameter_ReductionOp_SUMSQ: - greentea_gpu_scale(this->device_context_.id(), dim_, + greentea_gpu_scale(this->device_context_->id(), dim_, 2 * bottom_coeff, (cl_mem) bottom_data, bottom_data_off, (cl_mem) bottom_diff, bottom_diff_off); diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index 6ac882334f9..be32327f803 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -28,7 +28,7 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -39,9 +39,9 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( @@ -80,7 +80,7 @@ void ReLULayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -91,9 +91,9 @@ void ReLULayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( CL_KERNEL_SELECT("relu_backward")); viennacl::ocl::enqueue( diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 13e0581d151..5f9e3f702fb 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -28,7 +28,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // First, compute the diff caffe_copy(count, sigmoid_output_data, bottom_diff); @@ -40,17 +40,17 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); // First, compute the diff greentea_copy(count, (cl_mem)sigmoid_output_data, 0, (cl_mem)bottom_diff, 0, &ctx); - greentea_gpu_axpy(this->device_context_.id(), count, + greentea_gpu_axpy(this->device_context_->id(), count, Dtype(-1), (cl_mem)target, 0, (cl_mem)bottom_diff, 0); // Scale down gradient const Dtype loss_weight = top[0]->cpu_diff()[0]; - greentea_gpu_scal(this->device_context_.id(), count, loss_weight / num, + greentea_gpu_scal(this->device_context_->id(), count, loss_weight / num, (cl_mem)bottom_diff, 0); #endif // USE_GREENTEA } diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index b0576c41bcb..3f98c625769 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -23,7 +23,7 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) SigmoidForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -34,9 +34,9 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( CL_KERNEL_SELECT("sigmoid_forward")); @@ -75,7 +75,7 @@ void SigmoidLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) SigmoidBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -86,9 +86,9 @@ void SigmoidLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( CL_KERNEL_SELECT("sigmoid_backward")); diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index af1313fc0fd..82414517945 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -23,7 +23,7 @@ void SilenceLayer::Backward_gpu(const vector*>& top, const vector*>& bottom) { for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(bottom[i]->count(), Dtype(0), bottom[i]->mutable_gpu_data()); @@ -31,9 +31,9 @@ void SilenceLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel( CL_KERNEL_SELECT("gpu_set")); viennacl::ocl::enqueue( diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 1ca1b414d3b..54044b13ef2 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -41,7 +41,7 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA Slice // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( @@ -51,9 +51,9 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_slice = program.get_kernel( CL_KERNEL_SELECT("slice")); @@ -87,7 +87,7 @@ void SliceLayer::Backward_gpu(const vector*>& top, const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA Slice // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( @@ -97,9 +97,9 @@ void SliceLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_slice = program.get_kernel( CL_KERNEL_SELECT("slice")); diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 92cda7edf55..41a3a2bb248 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -106,7 +106,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // CUDA backend code caffe_copy(count, bottom_data, top_data); @@ -142,9 +142,9 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); greentea_copy(count, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, &ctx); @@ -211,7 +211,7 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and @@ -231,9 +231,9 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); greentea_copy(top[0]->count(), (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, &ctx); @@ -255,7 +255,7 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); - greentea_gpu_mul(this->device_context_.id(), top[0]->count(), + greentea_gpu_mul(this->device_context_->id(), top[0]->count(), (cl_mem)bottom_diff, 0, (cl_mem)top_data, 0, (cl_mem)bottom_diff, 0); diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index c6ce3326524..6fa00eaa9ec 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -44,7 +44,7 @@ void SoftmaxWithLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); @@ -80,9 +80,9 @@ void SoftmaxWithLossLayer::Forward_gpu( } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); @@ -105,11 +105,11 @@ void SoftmaxWithLossLayer::Forward_gpu( Dtype loss; - greentea_gpu_asum(this->device_context_.id(), nthreads, loss_data, 0, + greentea_gpu_asum(this->device_context_->id(), nthreads, loss_data, 0, &loss); if (normalize_) { Dtype count; - greentea_gpu_asum(this->device_context_.id(), nthreads, counts, 0, + greentea_gpu_asum(this->device_context_->id(), nthreads, counts, 0, &count); loss /= count; } else { @@ -160,7 +160,7 @@ void SoftmaxWithLossLayer::Backward_gpu( << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); @@ -190,9 +190,9 @@ void SoftmaxWithLossLayer::Backward_gpu( } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); cl_mem prob_data = (cl_mem)(prob_.gpu_data()); @@ -218,12 +218,12 @@ void SoftmaxWithLossLayer::Backward_gpu( const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; - greentea_gpu_asum(this->device_context_.id(), + greentea_gpu_asum(this->device_context_->id(), nthreads, counts, 0, &count); - greentea_gpu_scal(this->device_context_.id(), + greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / count, bottom_diff, 0); } else { - greentea_gpu_scal(this->device_context_.id(), + greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / num, bottom_diff, 0); } #endif // USE_GREENTEA diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index 1b3f0fd1eb5..7d1676466cf 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -22,7 +22,7 @@ void SplitLayer::Backward_gpu(const vector*>& top, return; } - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (top.size() == 1) { caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); @@ -40,16 +40,16 @@ void SplitLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); if (top.size() == 1) { greentea_copy(count_, (cl_mem) (top[0]->gpu_diff()), 0, (cl_mem) (bottom[0]->mutable_gpu_diff()), 0, &ctx); return; } - greentea_gpu_add(this->device_context_.id(), count_, + greentea_gpu_add(this->device_context_->id(), count_, (cl_mem) (top[0]->gpu_diff()), 0, (cl_mem) (top[1]->gpu_diff()), 0, (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); @@ -57,7 +57,7 @@ void SplitLayer::Backward_gpu(const vector*>& top, for (int i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - greentea_gpu_axpy(this->device_context_.id(), count_, Dtype(1.), + greentea_gpu_axpy(this->device_context_->id(), count_, Dtype(1.), (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); } #endif // USE_GREENTEA diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index 0302a2f0f96..97ee34be287 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -25,7 +25,7 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) TanHForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -36,9 +36,9 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_tanh = program.get_kernel( CL_KERNEL_SELECT("tanh_forward")); @@ -71,7 +71,7 @@ void TanHLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) TanHBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -82,9 +82,9 @@ void TanHLayer::Backward_gpu(const vector*>& top, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_tanh = program.get_kernel( CL_KERNEL_SELECT("tanh_backward")); diff --git a/src/caffe/layers/threshold_layer.cu b/src/caffe/layers/threshold_layer.cu index 676759d3d8a..d080bb6daad 100644 --- a/src/caffe/layers/threshold_layer.cu +++ b/src/caffe/layers/threshold_layer.cu @@ -28,7 +28,7 @@ void ThresholdLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) ThresholdForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -39,9 +39,9 @@ void ThresholdLayer::Forward_gpu(const vector*>& bottom, } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); + this->device_context_->id()); viennacl::ocl::kernel &oclk_threshold = program.get_kernel( CL_KERNEL_SELECT("threshold")); diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 27a413e57f4..b60efb0c0dd 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -817,14 +817,14 @@ void Net::Update() { case Caffe::GPU: { this_diff = params_[i]->gpu_diff(); owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_add(count, this_diff, owner_diff, owner_diff); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_add(dc.id(), count, (cl_mem) this_diff, 0, + greentea_gpu_add(dc->id(), count, (cl_mem) this_diff, 0, (cl_mem) owner_diff, 0, (cl_mem) owner_diff, 0); #endif // USE_GREENTEA diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 112c267b412..d8e53f46e33 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -178,14 +178,14 @@ void Solver::Step(int iters) { break; case Caffe::GPU: #ifndef CPU_ONLY - if (blob->device_context().backend() == BACKEND_CUDA) { + if (blob->device_context()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(blob->count(), static_cast(0), blob->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_set(blob->device_context().id(), + greentea_gpu_set(blob->device_context()->id(), blob->count(), static_cast(0), (cl_mem)(blob->mutable_gpu_diff()), 0); #endif // USE_GREENTEA @@ -525,14 +525,14 @@ void SGDSolver::Normalize(int param_id) { } case Caffe::GPU: { #ifndef CPU_ONLY - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, net_params[param_id]->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_scal(this->device_context_.id(), + greentea_gpu_scal(this->device_context_->id(), net_params[param_id]->count(), accum_normalization, (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); @@ -579,7 +579,7 @@ void SGDSolver::Regularize(int param_id) { } case Caffe::GPU: { #ifndef CPU_ONLY - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (local_decay) { if (regularization_type == "L2") { @@ -607,17 +607,17 @@ void SGDSolver::Regularize(int param_id) { if (local_decay) { if (regularization_type == "L2") { // add weight decay - greentea_gpu_axpy(this->device_context_.id(), + greentea_gpu_axpy(this->device_context_->id(), net_params[param_id]->count(), local_decay, (cl_mem)(net_params[param_id]->gpu_data()), 0, (cl_mem)(net_params[param_id]->mutable_gpu_diff()), 0); } else if (regularization_type == "L1") { - greentea_gpu_sign(this->device_context_.id(), + greentea_gpu_sign(this->device_context_->id(), net_params[param_id]->count(), (cl_mem)(net_params[param_id]->gpu_data()), 0, (cl_mem)(temp_[param_id]->mutable_gpu_data()), 0); - greentea_gpu_axpy(this->device_context_.id(), + greentea_gpu_axpy(this->device_context_->id(), net_params[param_id]->count(), local_decay, (cl_mem)(temp_[param_id]->gpu_data()), 0, @@ -658,7 +658,7 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { } case Caffe::GPU: { #ifndef CPU_ONLY - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_axpby(net_params[param_id]->count(), local_rate, net_params[param_id]->gpu_diff(), momentum, @@ -670,10 +670,10 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); greentea_gpu_axpby( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, momentum, (cl_mem) (history_[param_id]->mutable_gpu_data()), 0); greentea_copy( @@ -744,7 +744,7 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { } case Caffe::GPU: { #ifndef CPU_ONLY - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // save history momentum for stepping back caffe_copy(net_params[param_id]->count(), @@ -769,7 +769,7 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); + this->device_context_->id()); // save history momentum for stepping back greentea_copy( @@ -779,14 +779,14 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { // update history greentea_gpu_axpby( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, momentum, (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); // compute update: step back then over step greentea_gpu_axpby( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), Dtype(1) + momentum, (cl_mem) (this->history_[param_id]->gpu_data()), 0, -momentum, (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); @@ -848,7 +848,7 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { } case Caffe::GPU: { #ifndef CPU_ONLY - if (this->device_context_.backend() == BACKEND_CUDA) { + if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // compute square of gradient in update caffe_gpu_powx(net_params[param_id]->count(), @@ -883,36 +883,36 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { #ifdef USE_GREENTEA // compute square of gradient in update greentea_gpu_powx( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); // update history greentea_gpu_add( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), (cl_mem) (this->update_[param_id]->gpu_data()), 0, (cl_mem) (this->history_[param_id]->gpu_data()), 0, (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); // prepare update greentea_gpu_powx( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), (cl_mem) (this->history_[param_id]->gpu_data()), 0, Dtype(0.5), (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); greentea_gpu_add_scalar( - this->device_context_.id(), net_params[param_id]->count(), delta, + this->device_context_->id(), net_params[param_id]->count(), delta, (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); greentea_gpu_div( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), (cl_mem) (net_params[param_id]->gpu_diff()), 0, (cl_mem) (this->update_[param_id]->gpu_data()), 0, (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); // scale and copy greentea_gpu_axpby( - this->device_context_.id(), net_params[param_id]->count(), + this->device_context_->id(), net_params[param_id]->count(), local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); #endif // USE_GREENTEA diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 01aafe674c3..0e4b7332078 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -1,6 +1,7 @@ #include #include "caffe/common.hpp" +#include "caffe/device_context.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" @@ -20,7 +21,7 @@ SyncedMemory::~SyncedMemory() { #ifndef CPU_ONLY if (gpu_ptr_) { - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaFree(gpu_ptr_)); #endif // USE_CUDA @@ -48,14 +49,14 @@ inline void SyncedMemory::to_cpu() { CaffeMallocHost(&cpu_ptr_, size_); own_cpu_data_ = true; } - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_); #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( - device_context_.id()); + device_context_->id()); ctx.get_queue().finish(); // On the CPU, memory is shared (and no copy needed) if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { @@ -80,7 +81,7 @@ inline void SyncedMemory::to_gpu() { #ifndef CPU_ONLY switch (head_) { case UNINITIALIZED: { - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); caffe_gpu_memset(size_, 0, gpu_ptr_); @@ -88,7 +89,7 @@ inline void SyncedMemory::to_gpu() { } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( - device_context_.id()); + device_context_->id()); ctx.get_queue().finish(); cl_int err; if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -104,7 +105,7 @@ inline void SyncedMemory::to_gpu() { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); int alpha = 0; - greentea_memset(device_context_.id(), size_, alpha, cl_gpu_mem_, 0); + greentea_memset(device_context_->id(), size_, alpha, cl_gpu_mem_, 0); } gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); @@ -114,7 +115,7 @@ inline void SyncedMemory::to_gpu() { break; } case HEAD_AT_CPU: { - if (device_context_.backend() == Backend::BACKEND_CUDA) { + if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA if (gpu_ptr_ == NULL) { CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); @@ -124,7 +125,7 @@ inline void SyncedMemory::to_gpu() { } else { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( - device_context_.id()); + device_context_->id()); ctx.get_queue().finish(); if (gpu_ptr_ == NULL) { cl_int err; @@ -173,10 +174,10 @@ void SyncedMemory::set_cpu_data(void* data) { CaffeFreeHost(cpu_ptr_); } cpu_ptr_ = data; - if (device_context_.backend() == Backend::BACKEND_OpenCL) { + if (device_context_->backend() == Backend::BACKEND_OpenCL) { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( - device_context_.id()); + device_context_->id()); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { // If host memory is released and shared gpu_ptr_ = NULL; diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index 5647c602c21..9481adcf8ed 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -15,7 +15,7 @@ class CommonTest : public ::testing::Test {}; #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestCublasHandlerGPU) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA int cuda_device_id; CUDA_CHECK(cudaGetDevice(&cuda_device_id)); @@ -51,9 +51,9 @@ TEST_F(CommonTest, TestRandSeedCPU) { #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestRandSeedGPU) { - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA SyncedMemory data_a(10 * sizeof(unsigned int), Caffe::GetDefaultDeviceContext()); diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 37884f391d2..5eee3754b17 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -467,7 +467,7 @@ class CuDNNConvolutionLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -506,7 +506,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -543,7 +543,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -572,7 +572,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. @@ -670,7 +670,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -689,7 +689,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 62276b274b9..321f29de516 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -71,7 +71,7 @@ class Im2colKernelTest : public GPUDeviceTest { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 6e3cea87823..536cb200fd2 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -186,15 +186,15 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { } TypeParam gpu_asum; - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_asum(n, this->blob_bottom_->gpu_data(), &gpu_asum); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_asum(dc.id(), n, + greentea_gpu_asum(dc->id(), n, (cl_mem)(this->blob_bottom_->gpu_data()), 0, &gpu_asum); #endif // USE_GREENTEA } @@ -204,16 +204,16 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { TYPED_TEST(GPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sign(dc.id(), n, + greentea_gpu_sign(dc->id(), n, (cl_mem)(this->blob_bottom_->gpu_data()), 0, (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); #endif // USE_GREENTEA @@ -229,16 +229,16 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_sgnbit(dc.id(), n, + greentea_gpu_sgnbit(dc->id(), n, (cl_mem)(this->blob_bottom_->gpu_data()), 0, (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); #endif // USE_GREENTEA @@ -254,16 +254,16 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { TYPED_TEST(GPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_abs(dc.id(), n, + greentea_gpu_abs(dc->id(), n, (cl_mem)(this->blob_bottom_->gpu_data()), 0, (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); #endif // USE_GREENTEA @@ -281,15 +281,15 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; - DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_scale(n, alpha, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_scale(dc.id(), n, alpha, + greentea_gpu_scale(dc->id(), n, alpha, (cl_mem)(this->blob_bottom_->gpu_data()), 0, (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0); #endif // USE_GREENTEA @@ -307,15 +307,15 @@ TYPED_TEST(GPUMathFunctionsTest, TestCopy) { const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(n, bottom_data, top_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - dc.id()); + dc->id()); greentea_copy(n, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, &ctx); diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 8e5f60ee222..70ab0bad359 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -735,7 +735,7 @@ class CuDNNNeuronLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -751,7 +751,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -761,7 +761,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -782,7 +782,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -794,7 +794,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -812,7 +812,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -822,7 +822,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -846,7 +846,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 5a474035276..ef861bc8e62 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -963,7 +963,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -978,7 +978,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1021,7 +1021,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1040,7 +1040,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1061,7 +1061,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1128,7 +1128,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1155,7 +1155,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1174,7 +1174,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 48fd0bcb910..7460990f523 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -183,15 +183,15 @@ class RandomNumberGeneratorTest : public ::testing::Test { void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu_data) { Dtype* rng_data = static_cast(gpu_data); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_rng_gaussian(sample_size_, mu, sigma, rng_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_rng_gaussian(dc.id(), sample_size_, + greentea_gpu_rng_gaussian(dc->id(), sample_size_, mu, sigma, (cl_mem)rng_data, 0); #endif // USE_GREENTEA } @@ -201,15 +201,15 @@ class RandomNumberGeneratorTest : public ::testing::Test { CHECK_GE(upper, lower); Dtype* rng_data = static_cast(gpu_data); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_rng_uniform(sample_size_, lower, upper, rng_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_rng_uniform(dc.id(), sample_size_, + greentea_gpu_rng_uniform(dc->id(), sample_size_, lower, upper, (cl_mem)rng_data, 0); #endif // USE_GREENTEA } @@ -219,15 +219,15 @@ class RandomNumberGeneratorTest : public ::testing::Test { // caffe_gpu_rng_uniform. void RngUniformIntFillGPU(void* gpu_data) { unsigned int* rng_data = static_cast(gpu_data); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_rng_uniform(sample_size_, rng_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_rng_uniform(dc.id(), sample_size_, (cl_mem)rng_data, 0); + greentea_gpu_rng_uniform(dc->id(), sample_size_, (cl_mem)rng_data, 0); #endif // USE_GREENTEA } } diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index f433b16eda4..ea27ba45eaf 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -104,7 +104,7 @@ class CuDNNSoftmaxLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -139,7 +139,7 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index 3e84dea7adb..0800410853c 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -87,15 +87,15 @@ TEST_F(SyncedMemoryTest, TestGPURead) { // check if values are the same char* recovered_value = new char[10]; - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memcpy(10, gpu_data, recovered_value); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); + viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc->id()); greentea_gpu_memcpy(10, (cl_mem) gpu_data, 0, recovered_value, &ctx); #endif // USE_GREENTEA } @@ -114,13 +114,13 @@ TEST_F(SyncedMemoryTest, TestGPURead) { EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); // check if values are the same - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memcpy(10, gpu_data, recovered_value); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc.id()); + viennacl::ocl::context &ctx = viennacl::ocl::get_context(dc->id()); greentea_gpu_memcpy(10, (cl_mem) gpu_data, 0, recovered_value, &ctx); #endif // USE_GREENTEA } @@ -136,15 +136,15 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { void* gpu_data = mem.mutable_gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memset(mem.size(), 1, gpu_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_memset(dc.id(), mem.size(), 1, (cl_mem) gpu_data, 0); + greentea_memset(dc->id(), mem.size(), 1, (cl_mem) gpu_data, 0); #endif // USE_GREENTEA } @@ -157,13 +157,13 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { gpu_data = mem.mutable_gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_memset(mem.size(), 2, gpu_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_memset(dc.id(), mem.size(), 2, (cl_mem) gpu_data, 0); + greentea_memset(dc->id(), mem.size(), 2, (cl_mem) gpu_data, 0); #endif // USE_GREENTEA } diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index f32a79a0ab8..fbf434b1e33 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -18,7 +18,7 @@ class GemmTest : public ::testing::Test {}; TYPED_TEST_CASE(GemmTest, TestDtypes); TYPED_TEST(GemmTest, TestGemmCPUGPU) { - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); Blob B(1, 1, 3, 4, Caffe::GetDefaultDeviceContext()); @@ -39,14 +39,14 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { } - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasNoTrans, + greentea_gpu_gemm(dc->id(), CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., (cl_mem)(A.gpu_data()), 0, (cl_mem)(B.gpu_data()), 0, 0., @@ -67,14 +67,14 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { EXPECT_EQ(C.cpu_data()[i], result[i]); } - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasTrans, CblasNoTrans, + greentea_gpu_gemm(dc->id(), CblasTrans, CblasNoTrans, 2, 4, 3, 1., (cl_mem)(A.gpu_data()), 0, (cl_mem)(B.gpu_data()), 0, @@ -95,14 +95,14 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { EXPECT_EQ(C.cpu_data()[i], result[i]); } - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasTrans, CblasTrans, + greentea_gpu_gemm(dc->id(), CblasTrans, CblasTrans, 2, 4, 3, 1., (cl_mem)(A.gpu_data()), 0, (cl_mem)(B.gpu_data()), 0, 0., @@ -123,14 +123,14 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { EXPECT_EQ(C.cpu_data()[i], result[i]); } - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemm(dc.id(), CblasNoTrans, CblasTrans, + greentea_gpu_gemm(dc->id(), CblasNoTrans, CblasTrans, 2, 4, 3, 1., (cl_mem)(A.gpu_data()), 0, (cl_mem)(B.gpu_data()), 0, 0., @@ -145,7 +145,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { TYPED_TEST(GemmTest, TestGemvCPUGPU) { - DeviceContext dc = Caffe::GetDefaultDeviceContext(); + DeviceContext *dc = Caffe::GetDefaultDeviceContext(); Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); Blob x(1, 1, 1, 3, Caffe::GetDefaultDeviceContext()); @@ -164,14 +164,14 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { EXPECT_EQ(y.cpu_data()[i], result_2[i]); } - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), x.gpu_data(), 0., y.mutable_gpu_data()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(dc.id(), CblasNoTrans, + greentea_gpu_gemv(dc->id(), CblasNoTrans, 2, 3, 1., (cl_mem)(A.gpu_data()), 0, (cl_mem)(x.gpu_data()), 0, 0., @@ -191,14 +191,14 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { EXPECT_EQ(x.cpu_data()[i], result_3[i]); } - if (dc.backend() == BACKEND_CUDA) { + if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), y.gpu_data(), 0., x.mutable_gpu_data()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(dc.id(), CblasTrans, + greentea_gpu_gemv(dc->id(), CblasTrans, 2, 3, 1., (cl_mem)(A.gpu_data()), 0, (cl_mem)(y.gpu_data()), 0, 0., diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index cad58ad994b..6c72b720d93 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -1,6 +1,7 @@ #include #include "caffe/common.hpp" +#include "caffe/device_context.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { @@ -12,7 +13,7 @@ Timer::Timer() Timer::~Timer() { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventDestroy(start_gpu_)); @@ -27,7 +28,7 @@ Timer::~Timer() { void Timer::Start() { if (!running()) { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); @@ -46,7 +47,7 @@ void Timer::Start() { void Timer::Stop() { if (running()) { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); @@ -71,7 +72,7 @@ float Timer::MicroSeconds() { Stop(); } if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, @@ -97,7 +98,7 @@ float Timer::MilliSeconds() { Stop(); } if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, @@ -119,7 +120,7 @@ float Timer::Seconds() { void Timer::Init() { if (!initted()) { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext().backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventCreate(&start_gpu_)); From 1345b39b1e42a72931b6ef6d99e23149a084b555 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 26 Jun 2015 22:25:18 +0200 Subject: [PATCH 081/254] Device local memory. --- include/caffe/device_context.hpp | 9 ++++----- include/caffe/net.hpp | 2 ++ include/caffe/solver.hpp | 1 + include/caffe/vision_layers.hpp | 1 + src/caffe/device_context.cpp | 14 ++++++++------ 5 files changed, 16 insertions(+), 11 deletions(-) diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index a28bbd05d34..74c45c0ac25 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -8,6 +8,7 @@ #ifndef CAFFE_DEVICE_CONTEXT_HPP_ #define CAFFE_DEVICE_CONTEXT_HPP_ +#include #include #include "caffe/blob.hpp" #include "caffe/greentea/greentea.hpp" @@ -25,18 +26,16 @@ class DeviceContext { int id() const; int WorkgroupSize(int id); - template - Blob * Buffer(int id); - + shared_ptr< Blob > Buffer(int id); private: void Init(); std::vector workgroup_sizes_; int id_; Backend backend_; - std::vector< Blob > buff_f_; - std::vector< Blob > buff_d_; + std::vector< shared_ptr< Blob > > buff_f_; + std::vector< shared_ptr< Blob > > buff_d_; }; } // namespace caffe diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index e2cd7b8c13a..bef5e10ef9c 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -9,9 +9,11 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" +#include "caffe/device_context.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" + namespace caffe { /** diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 5968f56c64b..e010eb45ccb 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -5,6 +5,7 @@ #include #include "caffe/net.hpp" +#include "caffe/device_context.hpp" namespace caffe { diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 630667c8de4..aa16e37804f 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -9,6 +9,7 @@ #include "caffe/common.hpp" #include "caffe/common_layers.hpp" #include "caffe/data_layers.hpp" +#include "caffe/device_context.hpp" #include "caffe/layer.hpp" #include "caffe/loss_layers.hpp" #include "caffe/neuron_layers.hpp" diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index 3e06d7362e6..00ddd31c68a 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -54,19 +54,21 @@ int DeviceContext::WorkgroupSize(int id) { } template<> -Blob *DeviceContext::Buffer(int id) { +shared_ptr< Blob > DeviceContext::Buffer(int id) { if (buff_f_.size() <= id) { - buff_f_.push_back(Blob(this)); + shared_ptr > blob_pointer(new Blob(this)); + buff_f_.push_back(blob_pointer); } - return &(buff_f_[id]); + return buff_f_[id]; } template<> -Blob *DeviceContext::Buffer(int id) { +shared_ptr< Blob > DeviceContext::Buffer(int id) { if (buff_d_.size() <= id) { - buff_d_.push_back(Blob(this)); + shared_ptr > blob_pointer(new Blob(this)); + buff_d_.push_back(blob_pointer); } - return &(buff_d_[id]); + return buff_d_[id]; } } // namespace caffe From 295307efced8b5d50c44de0e8d65d2b05c07d309 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 27 Jun 2015 00:01:56 +0200 Subject: [PATCH 082/254] Device Context fix. --- include/caffe/device_context.hpp | 4 +++- include/caffe/solver.hpp | 2 +- src/caffe/common.cpp | 10 ++++++++-- src/caffe/device_context.cpp | 15 +++++++++++++-- src/caffe/syncedmem.cpp | 2 ++ 5 files changed, 27 insertions(+), 6 deletions(-) diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index 74c45c0ac25..ca87ef4694f 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -29,8 +29,10 @@ class DeviceContext { template shared_ptr< Blob > Buffer(int id); - private: + int num_queues(); + void Init(); + private: std::vector workgroup_sizes_; int id_; Backend backend_; diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index e010eb45ccb..47a5ca17c62 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -4,8 +4,8 @@ #include #include -#include "caffe/net.hpp" #include "caffe/device_context.hpp" +#include "caffe/net.hpp" namespace caffe { diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index b5444ce4e6f..72c8533285a 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -260,7 +260,12 @@ void Caffe::SetDevices(std::vector device_ids) { cudaGetDeviceCount(&cuda_device_count); #endif // USE_CUDA for (int i = 0; i < cuda_device_count; ++i) { - Get().device_contexts_.push_back(DeviceContext(i, Backend::BACKEND_CUDA)); + Get().device_contexts_.emplace_back(DeviceContext(i, Backend::BACKEND_CUDA)); + for(int j = 0; j < device_ids.size(); ++j) { + if(device_ids[j] == i) { + Caffe::GetDeviceContext(i)->Init(); + } + } #ifdef USE_GREENTEA // Dummy to have same vector size as device contexts viennacl::ocl::program program; @@ -286,7 +291,7 @@ void Caffe::SetDevices(std::vector device_ids) { for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { platform_devices.push_back( std::make_tuple(platforms[platform_id], devices[device_id])); - Get().device_contexts_.push_back( + Get().device_contexts_.emplace_back( DeviceContext(cuda_device_count + greentea_device_count, Backend::BACKEND_OpenCL)); // Check if this device is really used and initialize @@ -309,6 +314,7 @@ void Caffe::SetDevices(std::vector device_ids) { for (int q = 0; q < GREENTEA_QUEUE_COUNT - 1; ++q) { ctx.add_queue(ctx.current_device()); } + Caffe::GetDeviceContext(device_id)->Init(); is_used = true; } } diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index 00ddd31c68a..e79493e3efb 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -15,12 +15,10 @@ namespace caffe { DeviceContext::DeviceContext() : workgroup_sizes_(3, 0), id_(0), backend_(Backend::BACKEND_CUDA) { - this->Init(); } DeviceContext::DeviceContext(int id, Backend backend) : workgroup_sizes_(3, 0), id_(id), backend_(backend) { - this->Init(); } void DeviceContext::Init() { @@ -53,6 +51,19 @@ int DeviceContext::WorkgroupSize(int id) { return 0; } +int DeviceContext::num_queues() { + if (backend_ == BACKEND_CUDA) { +#ifdef USE_CUDA + return 1; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + return GREENTEA_QUEUE_COUNT; +#endif // USE_GREENTEA + } + return 1; +} + template<> shared_ptr< Blob > DeviceContext::Buffer(int id) { if (buff_f_.size() <= id) { diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 0e4b7332078..b9cdd062fb4 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -105,6 +105,8 @@ inline void SyncedMemory::to_gpu() { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); int alpha = 0; + // REMOVE: + std::cout << device_context_->id() << std::endl; greentea_memset(device_context_->id(), size_, alpha, cl_gpu_mem_, 0); } gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); From 9375b7606f71f423e20122541e78ba791991cbe4 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 27 Jun 2015 03:14:02 +0200 Subject: [PATCH 083/254] OpenCL speedup with multi-queue execution. --- include/caffe/device_context.hpp | 4 ++ include/caffe/greentea/greentea.hpp | 2 - include/caffe/vision_layers.hpp | 2 + src/caffe/common.cpp | 7 ++-- src/caffe/device_context.cpp | 43 +++++++++++++++++++- src/caffe/greentea/greentea.cpp | 8 ---- src/caffe/layers/base_conv_layer.cpp | 53 +++++++++++++++++++------ src/caffe/layers/conv_layer.cu | 59 ++++++---------------------- src/caffe/layers/conv_sk_layer.cu | 2 - src/caffe/syncedmem.cpp | 2 - 10 files changed, 103 insertions(+), 79 deletions(-) diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index ca87ef4694f..140e9f35c66 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -24,15 +24,19 @@ class DeviceContext { explicit DeviceContext(int id, Backend backend); Backend backend() const; int id() const; + int current_queue_id(); int WorkgroupSize(int id); template shared_ptr< Blob > Buffer(int id); int num_queues(); + void SwitchQueue(int id); + void FinishQueues(); void Init(); private: + int current_queue_id_; std::vector workgroup_sizes_; int id_; Backend backend_; diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 14e16f79fd7..061e3e4e381 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -58,8 +58,6 @@ struct is_same { #ifdef USE_GREENTEA -void FinishQueues(viennacl::ocl::context *ctx); - #ifdef USE_CLBLAS #define GREENTEA_CL_BLAS_CHECK(condition) \ {clblasStatus status = condition; \ diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index aa16e37804f..c8775fd298d 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -108,6 +108,8 @@ class BaseConvolutionLayer : public Layer { const Dtype* output, const int output_off, Dtype* weights); void backward_gpu_bias(Dtype* bias, const Dtype* input, const int input_off); + + shared_ptr< Blob > col_buffer(); #endif // reverse_dimensions should return true iff we are implementing deconv, so diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 72c8533285a..80d7d5de60e 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -260,9 +260,10 @@ void Caffe::SetDevices(std::vector device_ids) { cudaGetDeviceCount(&cuda_device_count); #endif // USE_CUDA for (int i = 0; i < cuda_device_count; ++i) { - Get().device_contexts_.emplace_back(DeviceContext(i, Backend::BACKEND_CUDA)); - for(int j = 0; j < device_ids.size(); ++j) { - if(device_ids[j] == i) { + Get().device_contexts_.emplace_back( + DeviceContext(i, Backend::BACKEND_CUDA)); + for (int j = 0; j < device_ids.size(); ++j) { + if (device_ids[j] == i) { Caffe::GetDeviceContext(i)->Init(); } } diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index e79493e3efb..b9e4d9d300b 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -14,11 +14,13 @@ namespace caffe { DeviceContext::DeviceContext() - : workgroup_sizes_(3, 0), id_(0), backend_(Backend::BACKEND_CUDA) { + : current_queue_id_(0), workgroup_sizes_(3, 0), id_(0), + backend_(Backend::BACKEND_CUDA) { } DeviceContext::DeviceContext(int id, Backend backend) - : workgroup_sizes_(3, 0), id_(id), backend_(backend) { + : current_queue_id_(0), workgroup_sizes_(3, 0), id_(id), + backend_(backend) { } void DeviceContext::Init() { @@ -82,4 +84,41 @@ shared_ptr< Blob > DeviceContext::Buffer(int id) { return buff_d_[id]; } +int DeviceContext::current_queue_id() { + return current_queue_id_; +} + +void DeviceContext::SwitchQueue(int id) { + if (backend_ == BACKEND_CUDA) { +#ifdef USE_CUDA + (void) id; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(id_); + ctx.switch_queue(id % num_queues()); + current_queue_id_ = id % num_queues(); +#endif // USE_GREENTEA + } +} + +void DeviceContext::FinishQueues() { + if (backend_ == BACKEND_CUDA) { +#ifdef USE_CUDA +#endif // USE_CUDA + } else { + #ifdef USE_GREENTEA + viennacl::ocl::context &ctx = + viennacl::ocl::get_context(id_); + for (int i = 0; i < num_queues(); ++i) { + ctx.switch_queue(i); + ctx.get_queue().finish(); + } + ctx.switch_queue(0); + current_queue_id_ = 0; + #endif // USE_GREENTEA + } +} + } // namespace caffe diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index a8547c81a5a..da516dd8324 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -28,14 +28,6 @@ viennacl::ocl::handle WrapHandle(cl_mem in, } } -void FinishQueues(viennacl::ocl::context *ctx) { - for (int i = 0; i < GREENTEA_QUEUE_COUNT; ++i) { - ctx->switch_queue(i); - ctx->get_queue().finish(); - } - ctx->switch_queue(0); -} - #endif diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 6622a38834b..f4ef196615a 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -148,10 +148,25 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, // The im2col result buffer will only hold one image at a time to avoid // overly large memory usage. In the special case of 1x1 convolution // it goes lazily unused to save memory. - if (reverse_dimensions()) { - col_buffer_.Reshape(1, kernel_dim_, height_, width_); + if (Caffe::mode() == Caffe::Brew::CPU) { + if (reverse_dimensions()) { + col_buffer_.Reshape(1, kernel_dim_, height_, width_); + } else { + col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_); + } } else { - col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_); + // Shared column buffer per device-queue across all layers on that device + for (int i = 0; i < this->device_context_->num_queues(); ++i) { + if (reverse_dimensions()) { + shared_ptr< Blob > buffer = + this->device_context_->template Buffer(i); + buffer->Reshape(1, kernel_dim_, height_, width_); + } else { + shared_ptr< Blob > buffer = + this->device_context_->template Buffer(i); + buffer->Reshape(1, kernel_dim_, height_out_, width_out_); + } + } } // Set up the all ones "bias multiplier" for adding biases by BLAS if (bias_term_) { @@ -256,9 +271,9 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, #ifdef USE_CUDA if (!is_1x1_) { if (!skip_im2col) { - conv_im2col_gpu(input + input_off, col_buffer_.mutable_gpu_data()); + conv_im2col_gpu(input + input_off, col_buffer()->mutable_gpu_data()); } - col_buff = col_buffer_.gpu_data(); + col_buff = col_buffer()->gpu_data(); } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm( @@ -274,9 +289,9 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, if (!is_1x1_) { if (!skip_im2col) { greentea_conv_im2col_gpu(input, input_off, - col_buffer_.mutable_gpu_data(), 0); + col_buffer()->mutable_gpu_data(), 0); } - col_buff = col_buffer_.gpu_data(); + col_buff = col_buffer()->gpu_data(); } for (int g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, @@ -321,7 +336,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, const Dtype* weights, Dtype* input, const int input_off) { - Dtype* col_buff = col_buffer_.mutable_gpu_data(); + Dtype* col_buff = col_buffer()->mutable_gpu_data(); if (is_1x1_) { col_buff = input; } @@ -367,8 +382,8 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (!is_1x1_) { - conv_im2col_gpu(input + input_off, col_buffer_.mutable_gpu_data()); - col_buff = col_buffer_.gpu_data(); + conv_im2col_gpu(input + input_off, col_buffer()->mutable_gpu_data()); + col_buff = col_buffer()->gpu_data(); } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm( @@ -382,9 +397,9 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, } else { #ifdef USE_GREENTEA if (!is_1x1_) { - greentea_conv_im2col_gpu(input, input_off, col_buffer_.mutable_gpu_data(), - 0); - col_buff = col_buffer_.gpu_data(); + greentea_conv_im2col_gpu(input, input_off, + col_buffer()->mutable_gpu_data(), 0); + col_buff = col_buffer()->gpu_data(); } for (int g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, @@ -422,6 +437,18 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, } } +template +shared_ptr< Blob > BaseConvolutionLayer::col_buffer() { + if (this->device_context_->backend() == BACKEND_CUDA) { + return this->device_context_->template Buffer(0); + } else { + viennacl::ocl::context ctx = + viennacl::ocl::get_context(this->device_context_->id()); + return this->device_context_-> + template Buffer(this->device_context_->current_queue_id()); + } +} + #endif // !CPU_ONLY INSTANTIATE_CLASS(BaseConvolutionLayer); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 9367e3438ad..83001595c37 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -21,15 +21,11 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); + // Multi queue execution, all previous work needs to be done first + this->device_context_->FinishQueues(); for (int n = 0; n < this->num_; ++n) { -#ifdef USE_GREENTEA - if (this->device_context_->backend() == BACKEND_OpenCL) { - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_->id()); - // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); - } -#endif // USE_GREENTEA - + // Multi queue execution, go through work queues + this->device_context_->SwitchQueue(n); this->forward_gpu_gemm(bottom_data, bottom[i]->offset(n), weight, top_data, top[i]->offset(n)); if (this->bias_term_) { @@ -37,13 +33,8 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, this->forward_gpu_bias(top_data, top[i]->offset(n), bias); } } -#ifdef USE_GREENTEA - if (this->device_context_->backend() == BACKEND_OpenCL) { - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_->id()); - FinishQueues(&ctx); - } -#endif // USE_GREENTEA + // Multi queue execution, finish all queues + this->device_context_->FinishQueues(); } } @@ -57,38 +48,20 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + // Multi queue execution, all previous work needs to be done first + this->device_context_->FinishQueues(); for (int n = 0; n < this->num_; ++n) { -#ifdef USE_GREENTEA - if (this->device_context_->backend() == BACKEND_OpenCL) { - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_->id()); - // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); - } -#endif // USE_GREENTEA - + // Multi queue execution, go through work queues + this->device_context_->SwitchQueue(n); this->backward_gpu_bias(bias_diff, top_diff, top[i]->offset(n)); - -#ifdef USE_GREENTEA - if (this->device_context_->backend() == BACKEND_OpenCL) { - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_->id()); - FinishQueues(&ctx); - } -#endif // USE_GREENTEA } + // Multi queue execution, finish all queues + this->device_context_->FinishQueues(); } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { -#ifdef USE_GREENTEA - if (this->device_context_->backend() == BACKEND_OpenCL) { - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_->id()); - // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); - } -#endif // USE_GREENTEA - // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data, bottom[i]->offset(n), @@ -99,14 +72,6 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, this->backward_gpu_gemm(top_diff, top[i]->offset(n), weight, bottom_diff, bottom[i]->offset(n)); } - -#ifdef USE_GREENTEA - if (this->device_context_->backend() == BACKEND_OpenCL) { - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(this->device_context_->id()); - FinishQueues(&ctx); - } -#endif // USE_GREENTEA } } } diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 9b18b9060ac..7326385eb0c 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -70,7 +70,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { - // ctx.switch_queue(n % GREENTEA_QUEUE_COUNT); // First, im2col greentea_im2col_sk_gpu(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, @@ -96,7 +95,6 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, (Dtype) 1., top_data, top[i]->offset(n)); } } - // FinishQueues(&ctx); } #endif } diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index b9cdd062fb4..0e4b7332078 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -105,8 +105,6 @@ inline void SyncedMemory::to_gpu() { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); int alpha = 0; - // REMOVE: - std::cout << device_context_->id() << std::endl; greentea_memset(device_context_->id(), size_, alpha, cl_gpu_mem_, 0); } gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); From 5c13fa3cb31aceb7ff890c19daae9dfe350110b6 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 27 Jun 2015 03:29:37 +0200 Subject: [PATCH 084/254] CPU_ONLY and CUDA build fix. --- include/caffe/syncedmem.hpp | 4 ++-- src/caffe/layers/base_conv_layer.cpp | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index a4b0082897e..e926f6dd502 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -73,10 +73,10 @@ class SyncedMemory { SyncedMemory() : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), own_cpu_data_(false), device_context_(Caffe::GetDefaultDeviceContext()) {} - explicit SyncedMemory(DeviceContext device_context) + explicit SyncedMemory(DeviceContext *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), own_cpu_data_(false), device_context_(device_context) {} - explicit SyncedMemory(size_t size, DeviceContext device_context) + explicit SyncedMemory(size_t size, DeviceContext *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), own_cpu_data_(false), device_context_(device_context) {} #endif diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index f4ef196615a..73837c8482a 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -440,12 +440,16 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, template shared_ptr< Blob > BaseConvolutionLayer::col_buffer() { if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA return this->device_context_->template Buffer(0); +#endif // USE_CUDA } else { +#ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context(this->device_context_->id()); return this->device_context_-> template Buffer(this->device_context_->current_queue_id()); +#endif // USE_GREENTEA } } From 3769bd759984620c8d63a42d0cd4e7a403cd9c17 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 27 Jun 2015 03:48:22 +0200 Subject: [PATCH 085/254] Small fix. --- src/caffe/layers/base_conv_layer.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 73837c8482a..8d43eaf7b75 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -451,6 +451,7 @@ shared_ptr< Blob > BaseConvolutionLayer::col_buffer() { template Buffer(this->device_context_->current_queue_id()); #endif // USE_GREENTEA } + return NULL; } #endif // !CPU_ONLY From 7d95d57fb38dfd69d9a8f65b1ce02932e5507107 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 27 Jun 2015 23:58:26 +0200 Subject: [PATCH 086/254] ConvSK memory reduction. --- include/caffe/vision_layers.hpp | 2 ++ src/caffe/layers/base_conv_layer.cpp | 11 ----------- src/caffe/layers/conv_sk_layer.cpp | 11 ++++++++++- src/caffe/layers/conv_sk_layer.cu | 18 ++++++++++++------ 4 files changed, 24 insertions(+), 18 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index c8775fd298d..9e42f4dd15c 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -233,6 +233,8 @@ class ConvolutionSKLayer : public Layer { const vector& propagate_down, const vector*>& bottom); + shared_ptr< Blob > col_buffer(); + int kernel_h_, kernel_w_; int stride_h_, stride_w_; int channels_; diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 8d43eaf7b75..4f9e0921743 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -439,19 +439,8 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, template shared_ptr< Blob > BaseConvolutionLayer::col_buffer() { - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - return this->device_context_->template Buffer(0); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context ctx = - viennacl::ocl::get_context(this->device_context_->id()); return this->device_context_-> template Buffer(this->device_context_->current_queue_id()); -#endif // USE_GREENTEA - } - return NULL; } #endif // !CPU_ONLY diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index 20a61e7ca78..aae757e91c6 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -83,8 +83,17 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; - col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, + if (Caffe::mode() == Caffe::Brew::CPU) { + col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, width_out); + } else { + // Shared column buffer per device-queue across all layers on that device + for (int i = 0; i < this->device_context_->num_queues(); ++i) { + shared_ptr< Blob > buffer = + this->device_context_->template Buffer(i); + buffer->Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, width_out); + } + } // Set the parameters CHECK_EQ(num_output_ % group_, 0) diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu index 7326385eb0c..dd58b55914e 100644 --- a/src/caffe/layers/conv_sk_layer.cu +++ b/src/caffe/layers/conv_sk_layer.cu @@ -23,7 +23,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); - Dtype* col_data = col_buffer_.mutable_gpu_data(); + Dtype* col_data = col_buffer()->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; int col_offset = K_ * N_; @@ -62,7 +62,7 @@ void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); - cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); + cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); int weight_offset = M_ * K_; @@ -137,8 +137,8 @@ void ConvolutionSKLayer::Backward_gpu( if (!top_diff) { top_diff = top[i]->gpu_diff(); } - Dtype* col_data = col_buffer_.mutable_gpu_data(); - Dtype* col_diff = col_buffer_.mutable_gpu_diff(); + Dtype* col_data = col_buffer()->mutable_gpu_data(); + Dtype* col_diff = col_buffer()->mutable_gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < num_; ++n) { @@ -220,8 +220,8 @@ void ConvolutionSKLayer::Backward_gpu( if (!top_diff) { top_diff = (cl_mem) (top[i]->gpu_diff()); } - cl_mem col_data = (cl_mem) (col_buffer_.mutable_gpu_data()); - cl_mem col_diff = (cl_mem) (col_buffer_.mutable_gpu_diff()); + cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); + cl_mem col_diff = (cl_mem) (col_buffer()->mutable_gpu_diff()); const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem bottom_diff = (cl_mem) (bottom[i]->mutable_gpu_diff()); @@ -271,6 +271,12 @@ void ConvolutionSKLayer::Backward_gpu( } } +template +shared_ptr< Blob > ConvolutionSKLayer::col_buffer() { + return this->device_context_-> + template Buffer(this->device_context_->current_queue_id()); +} + INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionSKLayer); } // namespace caffe From 1f2f12920123cced79a7d24e8c505fea969f6678 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 28 Jun 2015 17:16:20 -0400 Subject: [PATCH 087/254] Memory usage recording. --- include/caffe/device_context.hpp | 8 ++++++++ src/caffe/device_context.cpp | 25 +++++++++++++++++++++++-- src/caffe/layers/conv_sk_layer.cpp | 3 ++- src/caffe/syncedmem.cpp | 9 +++++++++ 4 files changed, 42 insertions(+), 3 deletions(-) diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index 140e9f35c66..0a6d76831de 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -35,11 +35,19 @@ class DeviceContext { void FinishQueues(); void Init(); + + size_t memory_usage(); + size_t peak_memory_usage(); + void IncreaseMemoryUsage(size_t bytes); + void DecreaseMemoryUsage(size_t bytes); + private: int current_queue_id_; std::vector workgroup_sizes_; int id_; Backend backend_; + size_t memory_usage_; + size_t peak_memory_usage_; std::vector< shared_ptr< Blob > > buff_f_; std::vector< shared_ptr< Blob > > buff_d_; }; diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index b9e4d9d300b..b75c0f6ea28 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -15,12 +15,14 @@ namespace caffe { DeviceContext::DeviceContext() : current_queue_id_(0), workgroup_sizes_(3, 0), id_(0), - backend_(Backend::BACKEND_CUDA) { + backend_(Backend::BACKEND_CUDA), + memory_usage_(0), peak_memory_usage_(0) { } DeviceContext::DeviceContext(int id, Backend backend) : current_queue_id_(0), workgroup_sizes_(3, 0), id_(id), - backend_(backend) { + backend_(backend), + memory_usage_(0), peak_memory_usage_(0) { } void DeviceContext::Init() { @@ -121,4 +123,23 @@ void DeviceContext::FinishQueues() { } } +size_t DeviceContext::memory_usage() { + return memory_usage_; +} + +size_t DeviceContext::peak_memory_usage() { + return peak_memory_usage_; +} + +void DeviceContext::IncreaseMemoryUsage(size_t bytes) { + memory_usage_ += bytes; + if (memory_usage_ > peak_memory_usage_) { + peak_memory_usage_ = memory_usage_; + } +} + +void DeviceContext::DecreaseMemoryUsage(size_t bytes) { + memory_usage_ -= bytes; +} + } // namespace caffe diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index aae757e91c6..ebd0d7f213b 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -91,7 +91,8 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, for (int i = 0; i < this->device_context_->num_queues(); ++i) { shared_ptr< Blob > buffer = this->device_context_->template Buffer(i); - buffer->Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, width_out); + buffer->Reshape(1, channels_ * kernel_h_ * kernel_w_, + height_out, width_out); } } diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 0e4b7332078..6dc9d9dc340 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -24,10 +24,12 @@ SyncedMemory::~SyncedMemory() { if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaFree(gpu_ptr_)); + device_context_->DecreaseMemoryUsage(size_); #endif // USE_CUDA } else { #ifdef USE_GREENTEA clReleaseMemObject(cl_gpu_mem_); + device_context_->DecreaseMemoryUsage(size_); #endif // USE_GREENTEA } } @@ -84,6 +86,7 @@ inline void SyncedMemory::to_gpu() { if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + device_context_->IncreaseMemoryUsage(size_); caffe_gpu_memset(size_, 0, gpu_ptr_); #endif // USE_CUDA } else { @@ -101,9 +104,11 @@ inline void SyncedMemory::to_gpu() { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, size_, cpu_ptr_, &err); + device_context_->IncreaseMemoryUsage(size_); } else { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); + device_context_->IncreaseMemoryUsage(size_); int alpha = 0; greentea_memset(device_context_->id(), size_, alpha, cl_gpu_mem_, 0); } @@ -119,6 +124,7 @@ inline void SyncedMemory::to_gpu() { #ifdef USE_CUDA if (gpu_ptr_ == NULL) { CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + device_context_->IncreaseMemoryUsage(size_); } caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_); #endif // USE_CUDA @@ -137,9 +143,12 @@ inline void SyncedMemory::to_gpu() { cl_gpu_mem_ = clCreateBuffer( ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, size_, cpu_ptr_, &err); + device_context_->IncreaseMemoryUsage(size_); + } else { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); + device_context_->IncreaseMemoryUsage(size_); } gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); From d70924c38b5a0d191d2d1f69f08a876630e0ae5d Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 28 Jun 2015 18:23:26 -0400 Subject: [PATCH 088/254] CUDA crash on exit fix. --- src/caffe/common.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 80d7d5de60e..bbbe2c1b5d0 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -132,6 +132,9 @@ Caffe::Caffe() } Caffe::~Caffe() { + // Make sure all device contexts and + // dependent memory blocks are freed properly + device_contexts_.clear(); #ifdef USE_CUDA if (cublas_handle_) CUBLAS_CHECK(cublasDestroy(cublas_handle_)); From 21857ecca3096ca0b87730de6dca02fe64a0b8f7 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 28 Jun 2015 18:32:03 -0400 Subject: [PATCH 089/254] Memory free success check removed. --- src/caffe/syncedmem.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 6dc9d9dc340..111f708a16a 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -23,7 +23,7 @@ SyncedMemory::~SyncedMemory() { if (gpu_ptr_) { if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA - CUDA_CHECK(cudaFree(gpu_ptr_)); + cudaFree(gpu_ptr_); device_context_->DecreaseMemoryUsage(size_); #endif // USE_CUDA } else { From 429d0483dc2fbb4fab08441b6f11399e33e3bb0e Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 28 Jun 2015 18:54:10 -0400 Subject: [PATCH 090/254] Device synchronization also for CUDA. --- src/caffe/common.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index bbbe2c1b5d0..b8b1435da01 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -171,16 +171,18 @@ void Caffe::set_random_seed(const unsigned int seed) { } void Caffe::Synchronize(int device_id) { -#ifdef USE_GREENTEA DeviceContext * device_context = Caffe::GetDeviceContext(device_id); - if (device_context->backend() == BACKEND_OpenCL) { + if (device_context->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + cudaDeviceSynchronize(); +#endif + } else { +#ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( GetDeviceContext(device_id)->id()); ctx.get_queue().finish(); - } -#else - (void) device_id; #endif + } } void Caffe::EnumerateDevices() { From 362c6a30baecc0a5a2d3b26e626fa2de830fe232 Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 30 Jun 2015 11:12:41 -0400 Subject: [PATCH 091/254] ND-SK preparations. --- include/caffe/blob.hpp | 2 + include/caffe/device_context.hpp | 1 + include/caffe/loss_layers.hpp | 2 +- include/caffe/util/im2col.hpp | 30 ++- include/caffe/vision_layers.hpp | 209 +++++++++++++++- src/caffe/blob.cpp | 11 + src/caffe/device_context.cpp | 4 + src/caffe/layers/base_conv_layer.cpp | 19 +- src/caffe/layers/base_conv_nd_layer.cpp | 314 ++++++++++++++++++++++++ src/caffe/layers/conv_nd_layer.cpp | 45 ++++ src/caffe/layers/conv_nd_layer.cu | 71 ++++++ src/caffe/layers/conv_sk_layer.cpp | 21 +- src/caffe/layers/deconv_nd_layer.cpp | 48 ++++ src/caffe/layers/deconv_nd_layer.cu | 72 ++++++ src/caffe/layers/im2col_layer.cpp | 19 +- src/caffe/layers/softmax_loss_layer.cpp | 6 + src/caffe/layers/softmax_loss_layer.cu | 19 +- src/caffe/proto/caffe.proto | 22 +- src/caffe/util/im2col.cu | 225 ++++++++++++++++- src/caffe/util/upgrade_proto.cpp | 6 +- 20 files changed, 1098 insertions(+), 48 deletions(-) create mode 100644 src/caffe/layers/base_conv_nd_layer.cpp create mode 100644 src/caffe/layers/conv_nd_layer.cpp create mode 100644 src/caffe/layers/conv_nd_layer.cu create mode 100644 src/caffe/layers/deconv_nd_layer.cpp create mode 100644 src/caffe/layers/deconv_nd_layer.cu diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 94028e8b7e6..68a119baf6a 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -247,6 +247,7 @@ class Blob { const Dtype* cpu_data() const; void set_cpu_data(Dtype* data); + const int* gpu_shape() const; const Dtype* gpu_data() const; const Dtype* cpu_diff() const; const Dtype* gpu_diff() const; @@ -301,6 +302,7 @@ class Blob { protected: shared_ptr data_; shared_ptr diff_; + shared_ptr shape_data_; vector shape_; int count_; int capacity_; diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index 0a6d76831de..a948490f977 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -40,6 +40,7 @@ class DeviceContext { size_t peak_memory_usage(); void IncreaseMemoryUsage(size_t bytes); void DecreaseMemoryUsage(size_t bytes); + void ResetPeakMemoryUsage(); private: int current_queue_id_; diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 86c34241168..ef48c0acd42 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -703,7 +703,7 @@ class SoftmaxWithLossLayer : public LossLayer { virtual inline const char* type() const { return "SoftmaxWithLoss"; } virtual inline int ExactNumTopBlobs() const { return -1; } virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } + virtual inline int MaxTopBlobs() const { return 3; } protected: /// @copydoc SoftmaxWithLossLayer diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index 7cf05a84ade..d95b324b88e 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -17,10 +17,10 @@ void col2im_cpu(const Dtype* data_col, const int channels, const int height, template void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, - const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, - Dtype* data_col); + const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_col); template void im2col_gpu(const Dtype* data_im, const int channels, const int height, @@ -30,10 +30,10 @@ void im2col_gpu(const Dtype* data_im, const int channels, const int height, template void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, - Dtype* data_im); + const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_im); template void col2im_gpu(const Dtype* data_col, const int channels, const int height, @@ -41,6 +41,20 @@ void col2im_gpu(const Dtype* data_col, const int channels, const int height, const int pad_h, const int pad_w, const int stride_h, const int stride_w, Dtype* data_im); + +template +void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_col); + +template +void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, + const int im_size, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_im); + + } // namespace caffe #endif // CAFFE_UTIL_IM2COL_HPP_ diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 9e42f4dd15c..333e5673d6d 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -200,10 +200,164 @@ class BaseConvolutionLayer : public Layer { Blob bias_multiplier_; }; + +/** + * @brief Abstract base class that factors out the BLAS code common to + * ConvolutionLayer and DeconvolutionLayer for N dimensions. + */ +template +class BaseConvolutionNDLayer : public Layer { + public: + explicit BaseConvolutionNDLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline bool EqualNumBottomTopBlobs() const { return true; } + + protected: + // Helper functions that abstract away the column buffer and gemm arguments. + // The last argument in forward_cpu_gemm is so that we can skip the im2col if + // we just called weight_cpu_gemm with the same input. + +#ifndef CPU_ONLY + void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* col_output); + void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* + weights); + void backward_gpu_bias(Dtype* bias, const Dtype* input); +#endif // !CPU_ONLY + + // reverse_dimensions should return true iff we are implementing deconv, so + // that conv helpers know which dimensions are which. + virtual bool reverse_dimensions() = 0; + // Compute height_out_ and width_out_ from other parameters. + virtual void compute_output_shape() = 0; + + /// @brief The spatial dimensions of a filter kernel. + Blob kernel_shape_; + /// @brief The spatial dimensions of the stride. + Blob stride_; + /// @brief The spatial dimensions of the padding. + Blob pad_; + /// @brief The spatial dimensions of the convolution input. + Blob conv_input_shape_; + /// @brief The spatial dimensions of the input. + Blob input_shape_; + /// @brief The spatial dimensions of the col_buffer. + vector col_buffer_shape_; + /// @brief The spatial dimensions of the output. + vector output_shape_; + + int num_spatial_axes_; + int bottom_dim_; + int top_dim_; + + int channel_axis_; + int num_; + int channels_; + int group_; + int num_output_; + bool bias_term_; + bool is_1x1_; + + private: + // wrap im2col/col2im so we don't have to remember the (long) argument lists +#ifndef CPU_ONLY +#ifdef USE_CUDA + inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { + im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), col_buff); + } + inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { + col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), + data); + } +#endif // USE_CUDA +#ifdef USE_GREENTEA +#endif // USE_GREENTEA +#endif // !CPU_ONLY + + int num_kernels_im2col_; + int num_kernels_col2im_; + int conv_out_channels_; + int conv_in_channels_; + int conv_out_spatial_dim_; + int out_spatial_dim_; + int kernel_dim_; + int weight_offset_; + int col_offset_; + int output_offset_; + + Blob col_buffer_; + Blob bias_multiplier_; +}; + +template +class ConvolutionNDLayer : public BaseConvolutionNDLayer { + public: + explicit ConvolutionNDLayer(const LayerParameter& param) + : BaseConvolutionNDLayer(param) { + } + + virtual inline const char* type() const { + return "ConvolutionND"; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual inline bool reverse_dimensions() { + return false; + } + virtual void compute_output_shape(); +}; + +template +class DeconvolutionNDLayer : public BaseConvolutionNDLayer { + public: + explicit DeconvolutionNDLayer(const LayerParameter& param) + : BaseConvolutionNDLayer(param) {} + + virtual inline const char* type() const { return "DeconvolutionND"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return true; } + virtual void compute_output_shape(); +}; + + /** * @brief Convolves the input image for pixelwise classification. * - * Layer introduced by Hongsheng et al. + * Layer introduced by Li, Hongsheng et al. */ template class ConvolutionSKLayer : public Layer { @@ -249,6 +403,57 @@ class ConvolutionSKLayer : public Layer { int M_, K_, N_; }; + +/** + * @brief Convolves the input image for pixelwise classification. + * + * Layer introduced by Hongsheng et al. + */ +template +class ConvolutionNDSKLayer : public Layer { + public: + explicit ConvolutionNDSKLayer(const LayerParameter& param) + : Layer(param) { + } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { + return "ConvolutionNDSK"; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + shared_ptr< Blob > col_buffer(); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int channels_; + int group_; + int height_, width_; + int pad_h_, pad_w_; + int kstride_h_, kstride_w_; + int num_, num_output_; + Blob col_buffer_; + Blob bias_multiplier_; + bool bias_term_; + int M_, K_, N_; +}; + + /** * @brief Convolves the input image with a bank of learned filters, * and (optionally) adds biases. @@ -321,6 +526,8 @@ class ConvolutionLayer : public BaseConvolutionLayer { virtual void compute_output_shape(); }; + + /** * @brief Convolve the input with a bank of learned filters, and (optionally) * add biases, treating filters and convolution parameters in the diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 3692bf8be74..6a20f651155 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -30,11 +30,16 @@ bool Blob::Reshape(const vector& shape) { CHECK_LE(shape.size(), kMaxBlobAxes); count_ = 1; shape_.resize(shape.size()); + shape_data_.reset(new SyncedMemory(shape.size() + * sizeof(int), device_context_)); + int* shape_data = static_cast(shape_data_->mutable_cpu_data()); + for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); CHECK_LE(shape[i], INT_MAX / count_)<< "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; + shape_data[i] = shape[i]; } if (count_ > capacity_) { capacity_ = count_; @@ -75,6 +80,12 @@ Blob::Blob(const vector& shape, DeviceContext *device_context) Reshape(shape); } +template +const int* Blob::gpu_shape() const { + CHECK(shape_data_); + return (const int*)shape_data_->gpu_data(); +} + template const Dtype* Blob::cpu_data() const { CHECK(data_); diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index b75c0f6ea28..e9de6226a14 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -142,4 +142,8 @@ void DeviceContext::DecreaseMemoryUsage(size_t bytes) { memory_usage_ -= bytes; } +void DeviceContext::ResetPeakMemoryUsage() { + peak_memory_usage_ = memory_usage_; +} + } // namespace caffe diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 4f9e0921743..b7654384763 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -21,22 +21,23 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, << "corresponding to (num, channels, height, width)"; // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); - CHECK(!conv_param.has_kernel_size() != + CHECK(!(conv_param.kernel_size_size() > 0) != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; - CHECK(conv_param.has_kernel_size() || + CHECK((conv_param.kernel_size_size() > 0) || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; - CHECK((!conv_param.has_pad() && conv_param.has_pad_h() + CHECK((!(conv_param.pad_size() > 0) && conv_param.has_pad_h() && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; - CHECK((!conv_param.has_stride() && conv_param.has_stride_h() + CHECK((!(conv_param.stride_size() > 0) && conv_param.has_stride_h() && conv_param.has_stride_w()) || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; - if (conv_param.has_kernel_size()) { - kernel_h_ = kernel_w_ = conv_param.kernel_size(); + if (conv_param.kernel_size_size() > 0) { + kernel_h_ = kernel_w_ = conv_param.kernel_size_size() > 0 ? + conv_param.kernel_size().Get(0) : 1; } else { kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); @@ -44,13 +45,15 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { - pad_h_ = pad_w_ = conv_param.pad(); + pad_h_ = pad_w_ = conv_param.pad_size() > 0 ? + conv_param.pad().Get(0) : 0; } else { pad_h_ = conv_param.pad_h(); pad_w_ = conv_param.pad_w(); } if (!conv_param.has_stride_h()) { - stride_h_ = stride_w_ = conv_param.stride(); + stride_h_ = stride_w_ = conv_param.stride_size() > 0 ? + conv_param.stride().Get(0) : 1; } else { stride_h_ = conv_param.stride_h(); stride_w_ = conv_param.stride_w(); diff --git a/src/caffe/layers/base_conv_nd_layer.cpp b/src/caffe/layers/base_conv_nd_layer.cpp new file mode 100644 index 00000000000..280fea27767 --- /dev/null +++ b/src/caffe/layers/base_conv_nd_layer.cpp @@ -0,0 +1,314 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void BaseConvolutionNDLayer::LayerSetUp( + const vector*>& bottom, + const vector*>& top) { + // Configure the kernel size, padding, stride, and inputs. + ConvolutionParameter conv_param = this->layer_param_.convolution_param(); + channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); + const int first_spatial_axis = channel_axis_ + 1; + const int num_axes = bottom[0]->num_axes(); + num_spatial_axes_ = num_axes - first_spatial_axis; + CHECK_GE(num_spatial_axes_, 1); + // Setup input dimensions (input_shape_). + vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); + input_shape_.Reshape(bottom_dim_blob_shape); + int* input_shape_data = input_shape_.mutable_cpu_data(); + for (int i = 0; i < num_spatial_axes_ + 1; ++i) { + input_shape_data[i] = bottom[0]->shape(channel_axis_ + i); + } + vector spatial_dim_blob_shape(1, num_spatial_axes_); + // Setup filter kernel dimensions (kernel_shape_). + kernel_shape_.Reshape(spatial_dim_blob_shape); + int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "kernel_h & kernel_w can only be used for 2D convolution."; + CHECK_EQ(0, conv_param.kernel_size_size()) + << "Either kernel_size or kernel_h/w should be specified; not both."; + kernel_shape_data[0] = conv_param.kernel_h(); + kernel_shape_data[1] = conv_param.kernel_w(); + } else { + const int num_kernel_dims = conv_param.kernel_size_size(); + CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) + << "kernel_size must be specified once, or once per spatial dimension " + << "(kernel_size specified " << num_kernel_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + for (int i = 0; i < num_spatial_axes_; ++i) { + kernel_shape_data[i] = + conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); + } + } + for (int i = 0; i < num_spatial_axes_; ++i) { + CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; + } + // Setup stride dimensions (stride_). + stride_.Reshape(spatial_dim_blob_shape); + int* stride_data = stride_.mutable_cpu_data(); + if (conv_param.has_stride_h() || conv_param.has_stride_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "stride_h & stride_w can only be used for 2D convolution."; + CHECK_EQ(0, conv_param.stride_size()) + << "Either stride or stride_h/w should be specified; not both."; + stride_data[0] = conv_param.stride_h(); + stride_data[1] = conv_param.stride_w(); + } else { + const int num_stride_dims = conv_param.stride_size(); + CHECK(num_stride_dims == 0 || num_stride_dims == 1 || + num_stride_dims == num_spatial_axes_) + << "stride must be specified once, or once per spatial dimension " + << "(stride specified " << num_stride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultStride = 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : + conv_param.stride((num_stride_dims == 1) ? 0 : i); + CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; + } + } + // Setup pad dimensions (pad_). + pad_.Reshape(spatial_dim_blob_shape); + int* pad_data = pad_.mutable_cpu_data(); + if (conv_param.has_pad_h() || conv_param.has_pad_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "pad_h & pad_w can only be used for 2D convolution."; + CHECK_EQ(0, conv_param.pad_size()) + << "Either pad or pad_h/w should be specified; not both."; + pad_data[0] = conv_param.pad_h(); + pad_data[1] = conv_param.pad_w(); + } else { + const int num_pad_dims = conv_param.pad_size(); + CHECK(num_pad_dims == 0 || num_pad_dims == 1 || + num_pad_dims == num_spatial_axes_) + << "pad must be specified once, or once per spatial dimension " + << "(pad specified " << num_pad_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultPad = 0; + for (int i = 0; i < num_spatial_axes_; ++i) { + pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : + conv_param.pad((num_pad_dims == 1) ? 0 : i); + } + } + // Special case: im2col is the identity for 1x1 convolution with stride 1 + // and no padding, so flag for skipping the buffer and transformation. + is_1x1_ = true; + for (int i = 0; i < num_spatial_axes_; ++i) { + is_1x1_ &= + kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0; + if (!is_1x1_) { break; } + } + // Configure output channels and groups. + channels_ = bottom[0]->shape(channel_axis_); + num_output_ = this->layer_param_.convolution_param().num_output(); + CHECK_GT(num_output_, 0); + group_ = this->layer_param_.convolution_param().group(); + CHECK_EQ(channels_ % group_, 0); + CHECK_EQ(num_output_ % group_, 0) + << "Number of output should be multiples of group."; + if (reverse_dimensions()) { + conv_out_channels_ = channels_; + conv_in_channels_ = num_output_; + } else { + conv_out_channels_ = num_output_; + conv_in_channels_ = channels_; + } + // Handle the parameters: weights and biases. + // - blobs_[0] holds the filter weights + // - blobs_[1] holds the biases (optional) + bias_term_ = this->layer_param_.convolution_param().bias_term(); + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + if (bias_term_) { + this->blobs_.resize(2); + } else { + this->blobs_.resize(1); + } + // Initialize and fill the weights: + // output channels x input channels per-group x kernel height x kernel width + vector weight_shape(2); + weight_shape[0] = conv_out_channels_; + weight_shape[1] = conv_in_channels_ / group_; + for (int i = 0; i < num_spatial_axes_; ++i) { + weight_shape.push_back(kernel_shape_data[i]); + } + this->blobs_[0].reset(new Blob(weight_shape)); + shared_ptr > weight_filler(GetFiller( + this->layer_param_.convolution_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); + // If necessary, initialize and fill the biases. + if (bias_term_) { + vector bias_shape(1, num_output_); + this->blobs_[1].reset(new Blob(bias_shape)); + shared_ptr > bias_filler(GetFiller( + this->layer_param_.convolution_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); + } + } + // Propagate gradients to the parameters (as directed by backward pass). + this->param_propagate_down_.resize(this->blobs_.size(), true); +} + +template +void BaseConvolutionNDLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + ConvolutionParameter conv_param = this->layer_param_.convolution_param(); + channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); + const int first_spatial_axis = channel_axis_ + 1; + const int num_axes = bottom[0]->num_axes(); + num_spatial_axes_ = num_axes - first_spatial_axis; + CHECK_GE(num_spatial_axes_, 1); + num_ = bottom[0]->count(0, channel_axis_); + CHECK_EQ(bottom[0]->shape(channel_axis_), channels_) + << "Input size incompatible with convolution kernel."; + // TODO: generalize to handle inputs of different shapes. + for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { + CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) + << "All inputs must have the same shape."; + } + // Shape the tops. + compute_output_shape(); + vector top_shape = bottom[0]->shape(); + top_shape[channel_axis_] = num_output_; + top_shape.resize(first_spatial_axis); // Discard input spatial axes. + for (int i = 0; i < num_spatial_axes_; ++i) { + top_shape.push_back(output_shape_[i]); + } + for (int top_id = 0; top_id < top.size(); ++top_id) { + top[top_id]->Reshape(top_shape); + } + if (reverse_dimensions()) { + conv_out_spatial_dim_ = bottom[0]->count(first_spatial_axis); + } else { + conv_out_spatial_dim_ = top[0]->count(first_spatial_axis); + } + const int* kernel_shape_data = kernel_shape_.cpu_data(); + kernel_dim_ = conv_in_channels_; + for (int i = 0; i < num_spatial_axes_; ++i) { + kernel_dim_ *= kernel_shape_data[i]; + } + weight_offset_ = conv_out_channels_ * kernel_dim_ / group_ / group_; + col_offset_ = kernel_dim_ * conv_out_spatial_dim_ / group_; + output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_; + // Setup input dimensions (conv_input_shape_). + vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); + conv_input_shape_.Reshape(bottom_dim_blob_shape); + int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data(); + for (int i = 0; i < num_spatial_axes_ + 1; ++i) { + if (reverse_dimensions()) { + conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i); + } else { + conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i); + } + } + // The im2col result buffer will only hold one image at a time to avoid + // overly large memory usage. In the special case of 1x1 convolution + // it goes lazily unused to save memory. + col_buffer_shape_.clear(); + col_buffer_shape_.push_back(kernel_dim_); + const int* input_shape_data = input_shape_.cpu_data() + 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + if (reverse_dimensions()) { + col_buffer_shape_.push_back(input_shape_data[i]); + } else { + col_buffer_shape_.push_back(output_shape_[i]); + } + } + col_buffer_.Reshape(col_buffer_shape_); + bottom_dim_ = bottom[0]->count(channel_axis_); + top_dim_ = top[0]->count(channel_axis_); + num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_; + num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_; + // Set up the all ones "bias multiplier" for adding biases by BLAS + out_spatial_dim_ = top[0]->count(first_spatial_axis); + if (bias_term_) { + vector bias_multiplier_shape(1, out_spatial_dim_); + bias_multiplier_.Reshape(bias_multiplier_shape); + caffe_set(bias_multiplier_.count(), Dtype(1), + bias_multiplier_.mutable_cpu_data()); + } +} + +#ifndef CPU_ONLY + +template +void BaseConvolutionNDLayer::forward_gpu_gemm(const Dtype* input, + const Dtype* weights, Dtype* output, bool skip_im2col) { + const Dtype* col_buff = input; + if (!is_1x1_) { + if (!skip_im2col) { + conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + } + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / + group_, conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, + (Dtype)0., output + output_offset_ * g); + } +} + +template +void BaseConvolutionNDLayer::forward_gpu_bias(Dtype* output, + const Dtype* bias) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, + out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), + (Dtype)1., output); +} + +template +void BaseConvolutionNDLayer::backward_gpu_gemm(const Dtype* output, + const Dtype* weights, Dtype* input) { + Dtype* col_buff = col_buffer_.mutable_gpu_data(); + if (is_1x1_) { + col_buff = input; + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, + (Dtype)0., col_buff + col_offset_ * g); + } + if (!is_1x1_) { + conv_col2im_gpu(col_buff, input); + } +} + +template +void BaseConvolutionNDLayer::weight_gpu_gemm(const Dtype* input, + const Dtype* output, Dtype* weights) { + const Dtype* col_buff = input; + if (!is_1x1_) { + conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, + (Dtype)1., weights + weight_offset_ * g); + } +} + +template +void BaseConvolutionNDLayer::backward_gpu_bias(Dtype* bias, + const Dtype* input) { + caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., + input, bias_multiplier_.gpu_data(), 1., bias); +} + +#endif // !CPU_ONLY + +INSTANTIATE_CLASS(BaseConvolutionNDLayer); + +} // namespace caffe diff --git a/src/caffe/layers/conv_nd_layer.cpp b/src/caffe/layers/conv_nd_layer.cpp new file mode 100644 index 00000000000..6c3bf8a40ee --- /dev/null +++ b/src/caffe/layers/conv_nd_layer.cpp @@ -0,0 +1,45 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ConvolutionNDLayer::compute_output_shape() { + // input_shape_ + 1 to skip channel axis + const int* input_shape_data = this->input_shape_.cpu_data() + 1; + const int* kernel_shape_data = this->kernel_shape_.cpu_data(); + const int* stride_data = this->stride_.cpu_data(); + const int* pad_data = this->pad_.cpu_data(); + this->output_shape_.clear(); + for (int i = 0; i < this->num_spatial_axes_; ++i) { + const int input_dim = input_shape_data[i]; + const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) + / stride_data[i] + 1; + this->output_shape_.push_back(output_dim); + } +} + +template +void ConvolutionNDLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + NOT_IMPLEMENTED; +} + +template +void ConvolutionNDLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; +} + +#ifdef CPU_ONLY +STUB_GPU(ConvolutionNDLayer); +#endif + +INSTANTIATE_CLASS(ConvolutionNDLayer); + +} // namespace caffe diff --git a/src/caffe/layers/conv_nd_layer.cu b/src/caffe/layers/conv_nd_layer.cu new file mode 100644 index 00000000000..851331b5da1 --- /dev/null +++ b/src/caffe/layers/conv_nd_layer.cu @@ -0,0 +1,71 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void ConvolutionNDLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, + top_data + n * this->top_dim_); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->gpu_data(); + this->forward_gpu_bias(top_data + n * this->top_dim_, bias); + } + } + } +} + +template +void ConvolutionNDLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, + top_diff + n * this->top_dim_, weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight, + bottom_diff + n * this->bottom_dim_); + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionNDLayer); + +} // namespace caffe diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index ebd0d7f213b..fbda73e1c37 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -13,25 +13,25 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK( - !conv_param.has_kernel_size() + !(conv_param.kernel_size_size() > 0) != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK( - conv_param.has_kernel_size() + (conv_param.kernel_size_size() > 0) || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; CHECK( - (!conv_param.has_pad() && conv_param.has_pad_h() + (!(conv_param.pad_size() > 9) && conv_param.has_pad_h() && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK( - (!conv_param.has_stride() && conv_param.has_stride_h() + (!(conv_param.stride_size() > 0) && conv_param.has_stride_h() && conv_param.has_stride_w()) || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; - if (conv_param.has_kernel_size()) { - kernel_h_ = kernel_w_ = conv_param.kernel_size(); + if (conv_param.kernel_size_size() > 0) { + kernel_h_ = kernel_w_ = conv_param.kernel_size().Get(0); } else { kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); @@ -39,7 +39,8 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { - pad_h_ = pad_w_ = conv_param.pad(); + pad_h_ = pad_w_ = conv_param.pad_size() > 0 ? + conv_param.pad().Get(0) : 0; } else { pad_h_ = conv_param.pad_h(); pad_w_ = conv_param.pad_w(); @@ -47,13 +48,15 @@ void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, CHECK_EQ(pad_h_, 0)<< "pad_h_ must be 0"; CHECK_EQ(pad_w_, 0)<< "pad_w_ must be 0"; if (!conv_param.has_stride_h()) { - stride_h_ = stride_w_ = conv_param.stride(); + stride_h_ = stride_w_ = conv_param.stride_size() > 0 ? + conv_param.stride().Get(0) : 1; } else { stride_h_ = conv_param.stride_h(); stride_w_ = conv_param.stride_w(); } if (!conv_param.has_kstride_h()) { - kstride_h_ = kstride_w_ = conv_param.kstride(); + kstride_h_ = kstride_w_ = conv_param.kstride_size() > 0 ? + conv_param.kstride().Get(0) : 0; } else { kstride_h_ = conv_param.kstride_h(); kstride_w_ = conv_param.kstride_w(); diff --git a/src/caffe/layers/deconv_nd_layer.cpp b/src/caffe/layers/deconv_nd_layer.cpp new file mode 100644 index 00000000000..d5684a6163b --- /dev/null +++ b/src/caffe/layers/deconv_nd_layer.cpp @@ -0,0 +1,48 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DeconvolutionNDLayer::compute_output_shape() { + // input_shape_ + 1 to skip channel axis + const int* input_shape_data = this->input_shape_.cpu_data() + 1; + const int* kernel_shape_data = this->kernel_shape_.cpu_data(); + const int* stride_data = this->stride_.cpu_data(); + const int* pad_data = this->pad_.cpu_data(); + this->output_shape_.clear(); + for (int i = 0; i < this->num_spatial_axes_; ++i) { + const int input_dim = input_shape_data[i]; + const int output_dim = stride_data[i] * (input_dim - 1) + + kernel_shape_data[i] - 2 * pad_data[i]; + this->output_shape_.push_back(output_dim); + } +} + +template +void DeconvolutionNDLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + NOT_IMPLEMENTED; +} + +template +void DeconvolutionNDLayer::Backward_cpu( + const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; +} + +#ifdef CPU_ONLY +STUB_GPU(DeconvolutionNDLayer); +#endif + +INSTANTIATE_CLASS(DeconvolutionNDLayer); +REGISTER_LAYER_CLASS(DeconvolutionND); + +} // namespace caffe diff --git a/src/caffe/layers/deconv_nd_layer.cu b/src/caffe/layers/deconv_nd_layer.cu new file mode 100644 index 00000000000..9908ebd5296 --- /dev/null +++ b/src/caffe/layers/deconv_nd_layer.cu @@ -0,0 +1,72 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DeconvolutionNDLayer::Forward_gpu( + const vector*>& bottom, + const vector*>& top) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, + top_data + n * this->top_dim_); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->gpu_data(); + this->forward_gpu_bias(top_data + n * this->top_dim_, bias); + } + } + } +} + +template +void DeconvolutionNDLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(top_diff + n * this->top_dim_, + bottom_data + n * this->bottom_dim_, weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->forward_gpu_gemm(top_diff + n * this->top_dim_, weight, + bottom_diff + n * this->bottom_dim_); + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(DeconvolutionNDLayer); + +} // namespace caffe diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 1c802714e33..cc3fe413264 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -11,22 +11,23 @@ template void Im2colLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { ConvolutionParameter conv_param = this->layer_param_.convolution_param(); - CHECK(!conv_param.has_kernel_size() != + CHECK(!(conv_param.kernel_size_size() > 0) != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; - CHECK(conv_param.has_kernel_size() || + CHECK((conv_param.kernel_size_size() > 0) || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; - CHECK((!conv_param.has_pad() && conv_param.has_pad_h() + CHECK((!(conv_param.pad_size() > 0) && conv_param.has_pad_h() && conv_param.has_pad_w()) || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; - CHECK((!conv_param.has_stride() && conv_param.has_stride_h() + CHECK((!(conv_param.stride_size() > 0) && conv_param.has_stride_h() && conv_param.has_stride_w()) || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; - if (conv_param.has_kernel_size()) { - kernel_h_ = kernel_w_ = conv_param.kernel_size(); + if (conv_param.kernel_size_size() > 0) { + kernel_h_ = kernel_w_ = conv_param.kernel_size_size() > 0 ? + conv_param.kernel_size().Get(0) : 1; } else { kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); @@ -34,13 +35,15 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { - pad_h_ = pad_w_ = conv_param.pad(); + pad_h_ = pad_w_ = conv_param.pad_size() > 0 ? + conv_param.pad().Get(0) : 0; } else { pad_h_ = conv_param.pad_h(); pad_w_ = conv_param.pad_w(); } if (!conv_param.has_stride_h()) { - stride_h_ = stride_w_ = conv_param.stride(); + stride_h_ = stride_w_ = conv_param.stride_size() > 0 ? + conv_param.stride_size() : 1; } else { stride_h_ = conv_param.stride_h(); stride_w_ = conv_param.stride_w(); diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index ba312f67fbc..fede08a4855 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -117,6 +117,12 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, } else { caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } + if (bottom.size() == 3) { + // TODO: Correct this for easy diff scaling + std::cout << "Size: " << bottom[0]->count() << std::endl; + const Dtype* weight = bottom[2]->cpu_data(); + caffe_mul(bottom[2]->count(), bottom_diff, weight, bottom_diff); + } } } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 6fa00eaa9ec..7f4554c0bb2 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -186,6 +186,12 @@ void SoftmaxWithLossLayer::Backward_gpu( } else { caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } + if (bottom.size() == 3) { + // TODO: Correct this for easy diff scaling + std::cout << "Size: " << bottom[0]->count() << std::endl; + const Dtype* weight = bottom[2]->gpu_data(); + caffe_gpu_mul(bottom[2]->count(), bottom_diff, weight, bottom_diff); + } #endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -218,14 +224,21 @@ void SoftmaxWithLossLayer::Backward_gpu( const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; - greentea_gpu_asum(this->device_context_->id(), + greentea_gpu_asum(this->device_context_->id(), nthreads, counts, 0, &count); - greentea_gpu_scal(this->device_context_->id(), + greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / count, bottom_diff, 0); } else { - greentea_gpu_scal(this->device_context_->id(), + greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / num, bottom_diff, 0); } + if (bottom.size() == 3) { + // TODO: Correct this for easy diff scaling + std::cout << "Size: " << bottom[0]->count() << std::endl; + const cl_mem weight = (cl_mem)(bottom[2]->gpu_data()); + greentea_gpu_mul(this->device_context_->id(), + bottom[2]->count(), bottom_diff, 0, weight, 0, bottom_diff, 0); + } #endif // USE_GREENTEA } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index a26a74107d7..d459220eb24 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -443,14 +443,14 @@ message ConvolutionParameter { optional bool bias_term = 2 [default = true]; // whether to have bias terms // Pad, kernel size, and stride are all given as a single value for equal // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X) + repeated uint32 pad = 3; // The padding size (equal in Y, X) optional uint32 pad_h = 9 [default = 0]; // The padding height optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 4; // The kernel size (square) + repeated uint32 kernel_size = 4; // The kernel size (square) optional uint32 kernel_h = 11; // The kernel height optional uint32 kernel_w = 12; // The kernel width optional uint32 group = 5 [default = 1]; // The group size for group conv - optional uint32 stride = 6 [default = 1]; // The stride (equal in Y, X) + repeated uint32 stride = 6; // The stride (equal in Y, X) optional uint32 stride_h = 13; // The stride height optional uint32 stride_w = 14; // The stride width optional FillerParameter weight_filler = 7; // The filler for the weight @@ -461,9 +461,22 @@ message ConvolutionParameter { CUDNN = 2; } optional Engine engine = 15 [default = DEFAULT]; - optional uint32 kstride = 16 [default = 0]; + + // Strided kernel parameters + repeated uint32 kstride = 16; optional uint32 kstride_h = 17 [default = 0]; optional uint32 kstride_w = 18 [default = 0]; + + // The axis to interpret as "channels" when performing convolution. + // Preceding dimensions are treated as independent inputs; + // succeeding dimensions are treated as "spatial". + // With (N, C, H, W) inputs, and axis == 1 (the default), we perform + // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for + // groups g>1) filters across the spatial axes (H, W) of the input. + // With (N, C, D, H, W) inputs, and axis == 1, we perform + // N independent 3D convolutions, sliding (C/g)-channels + // filters across the spatial axes (D, H, W) of the input. + optional int32 axis = 19 [default = 1]; } message DataParameter { @@ -690,6 +703,7 @@ message PoolingParameter { optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) optional uint32 stride_h = 7; // The stride height optional uint32 stride_w = 8; // The stride width + optional uint32 stride_d = 16; // The stride depth enum Engine { DEFAULT = 0; CAFFE = 1; diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 501e2fe4e01..8be171c91ce 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -2,6 +2,7 @@ #include #include #include +#include #include "caffe/common.hpp" #include "caffe/util/im2col.hpp" @@ -58,9 +59,6 @@ void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; - // LOG(INFO) << "ext_height = " << ext_kernel_h; - // LOG(INFO) << "ext_width = " << ext_kernel_w; - // NOLINT_NEXT_LINE(whitespace/operators) im2col_sk_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( @@ -316,5 +314,226 @@ template void col2im_gpu(const double* data_col, const int channels, const int stride_h, const int stride_w, double* data_im); + +template +__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, + const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_col) { + int d_temp[num_axes]; // NOLINT(runtime/arrays) + int d_iter[num_axes]; // NOLINT(runtime/arrays) + int i; + CUDA_KERNEL_LOOP(index, n) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + Dtype* data_col_ptr = data_col + channel_out; + const Dtype* data_im_ptr = data_im + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { break; } + } + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } // CUDA_KERNEL_LOOP(index, n) +} + +template +__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col, + const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_im) { + int d_im[num_axes]; // NOLINT(runtime/arrays) + int d_col_iter[num_axes]; // NOLINT(runtime/arrays) + int d_col_start[num_axes]; // NOLINT(runtime/arrays) + int d_col_end[num_axes]; // NOLINT(runtime/arrays) + CUDA_KERNEL_LOOP(index, n) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += + (d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} + +template +void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, Dtype* data_col) { + switch (num_spatial_axes) { + case 1: + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( + num_kernels, data_im, im_shape, col_shape, + kernel_shape, pad, stride, data_col); + break; + case 2: + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( + num_kernels, data_im, im_shape, col_shape, + kernel_shape, pad, stride, data_col); + break; + case 3: + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( + num_kernels, data_im, im_shape, col_shape, + kernel_shape, pad, stride, data_col); + break; + default: { + LOG(FATAL) << "im2col_nd_gpu does not support computation with " + << num_spatial_axes << " spatial axes"; + } + } + CUDA_POST_KERNEL_CHECK; +} + +// Explicit instantiation +template void im2col_nd_gpu(const float* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + float* data_col); +template void im2col_nd_gpu(const double* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + double* data_col); + + +template +void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, + const int im_size, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_im) { + switch (num_spatial_axes) { + case 1: + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( + im_size, data_col, im_shape, col_shape, + kernel_shape, pad, stride, data_im); + break; + case 2: + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( + im_size, data_col, im_shape, col_shape, + kernel_shape, pad, stride, data_im); + break; + case 3: + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( + im_size, data_col, im_shape, col_shape, + kernel_shape, pad, stride, data_im); + break; + default: { + LOG(FATAL) << "im2col_gpu does not support computation with " + << num_spatial_axes << " spatial axes"; + } + } + CUDA_POST_KERNEL_CHECK; +} + +// Explicit instantiation +template void col2im_nd_gpu(const float* data_col, const int num_spatial_axes, + const int im_size, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + float* data_im); +template void col2im_nd_gpu(const double* data_col, const int num_spatial_axes, + const int im_size, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + double* data_im); + #endif // USE_CUDA } // namespace caffe diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 4714158ca09..3425f5f548c 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -193,7 +193,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, } if (v0_layer_param.has_pad()) { if (type == "conv") { - layer_param->mutable_convolution_param()->set_pad(v0_layer_param.pad()); + layer_param->mutable_convolution_param()->add_pad(v0_layer_param.pad()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_pad(v0_layer_param.pad()); } else { @@ -203,7 +203,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, } if (v0_layer_param.has_kernelsize()) { if (type == "conv") { - layer_param->mutable_convolution_param()->set_kernel_size( + layer_param->mutable_convolution_param()->add_kernel_size( v0_layer_param.kernelsize()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_kernel_size( @@ -224,7 +224,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, } if (v0_layer_param.has_stride()) { if (type == "conv") { - layer_param->mutable_convolution_param()->set_stride( + layer_param->mutable_convolution_param()->add_stride( v0_layer_param.stride()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_stride( From ba8e709ca6a2d99edf0c1488193a9dacc58d1a66 Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 30 Jun 2015 11:34:31 -0400 Subject: [PATCH 092/254] CPU only copy. --- src/caffe/layers/base_data_layer.cpp | 4 ++-- src/caffe/layers/concat_layer.cpp | 4 ++-- src/caffe/layers/dropout_layer.cpp | 4 ++-- src/caffe/layers/eltwise_layer.cpp | 4 ++-- src/caffe/layers/filter_layer.cpp | 4 ++-- src/caffe/layers/log_layer.cpp | 4 ++-- src/caffe/layers/lrn_layer.cpp | 3 ++- src/caffe/layers/mvn_layer.cpp | 2 +- src/caffe/layers/power_layer.cpp | 4 ++-- src/caffe/layers/prelu_layer.cpp | 2 +- src/caffe/layers/slice_layer.cpp | 4 ++-- src/caffe/layers/softmax_layer.cpp | 6 +++--- src/caffe/layers/softmax_loss_layer.cpp | 2 +- src/caffe/layers/split_layer.cpp | 2 +- src/caffe/solver.cpp | 7 ++++--- src/caffe/test/test_math_functions.cpp | 2 +- 16 files changed, 30 insertions(+), 28 deletions(-) diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 441dbf10ecb..7258b9fd60b 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -65,14 +65,14 @@ void BasePrefetchingDataLayer::Forward_cpu( // Reshape to loaded data. top[0]->ReshapeLike(prefetch_data_); // Copy the data - caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), + caffe_cpu_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(prefetch_label_); // Copy the labels. - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), + caffe_cpu_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_cpu_data()); } // Start a new prefetch thread diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 1cac8fc3387..409b35cf5f9 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -60,7 +60,7 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->cpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, + caffe_cpu_copy(bottom_concat_axis * concat_input_size_, bottom_data + n * bottom_concat_axis * concat_input_size_, top_data + (n * top_concat_axis + offset_concat_axis) * concat_input_size_); @@ -80,7 +80,7 @@ void ConcatLayer::Backward_cpu(const vector*>& top, Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + caffe_cpu_copy(bottom_concat_axis * concat_input_size_, top_diff + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, bottom_diff + n * bottom_concat_axis * concat_input_size_); } diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index ec1256fd2fa..1c3f2c216d6 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -44,7 +44,7 @@ void DropoutLayer::Forward_cpu(const vector*>& bottom, top_data[i] = bottom_data[i] * mask[i] * scale_; } } else { - caffe_copy(bottom[0]->count(), bottom_data, top_data); + caffe_cpu_copy(bottom[0]->count(), bottom_data, top_data); } } @@ -62,7 +62,7 @@ void DropoutLayer::Backward_cpu(const vector*>& top, bottom_diff[i] = top_diff[i] * mask[i] * scale_; } } else { - caffe_copy(top[0]->count(), top_diff, bottom_diff); + caffe_cpu_copy(top[0]->count(), top_diff, bottom_diff); } } } diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index a80700736bd..e2e0be79587 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -115,7 +115,7 @@ void EltwiseLayer::Backward_cpu(const vector*>& top, for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { - caffe_copy(count, bottom[j]->cpu_data(), bottom_diff); + caffe_cpu_copy(count, bottom[j]->cpu_data(), bottom_diff); initialized = true; } else { caffe_mul(count, bottom[j]->cpu_data(), bottom_diff, @@ -129,7 +129,7 @@ void EltwiseLayer::Backward_cpu(const vector*>& top, break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1)) { - caffe_copy(count, top_diff, bottom_diff); + caffe_cpu_copy(count, top_diff, bottom_diff); } else { caffe_cpu_scale(count, coeffs_[i], top_diff, bottom_diff); } diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index be1db32dbaa..7a2d91fbe19 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -71,7 +71,7 @@ void FilterLayer::Forward_cpu(const vector*>& bottom, for (int n = 0; n < new_tops_num; ++n) { int data_offset_top = n * dim; int data_offset_bottom = indices_to_forward_[n] * bottom[t]->count(1); - caffe_copy(dim, bottom_data + data_offset_bottom, + caffe_cpu_copy(dim, bottom_data + data_offset_bottom, top_data + data_offset_top); } } @@ -108,7 +108,7 @@ void FilterLayer::Backward_cpu(const vector*>& top, } else { // this data was been forwarded data_offset_top = next_to_backward_offset * dim; next_to_backward_offset++; // point to next forwarded item index - caffe_copy(dim, top[i]->mutable_cpu_diff() + data_offset_top, + caffe_cpu_copy(dim, top[i]->mutable_cpu_diff() + data_offset_top, bottom[i]->mutable_cpu_diff() + data_offset_bottom); } } diff --git a/src/caffe/layers/log_layer.cpp b/src/caffe/layers/log_layer.cpp index 55a227f6226..3ca25d0946f 100644 --- a/src/caffe/layers/log_layer.cpp +++ b/src/caffe/layers/log_layer.cpp @@ -41,7 +41,7 @@ void LogLayer::Forward_cpu(const vector*>& bottom, if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { caffe_log(count, bottom_data, top_data); } else { - caffe_copy(count, bottom_data, top_data); + caffe_cpu_copy(count, bottom_data, top_data); if (input_scale_ != Dtype(1)) { caffe_scal(count, input_scale_, top_data); } @@ -63,7 +63,7 @@ void LogLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - caffe_copy(count, bottom_data, bottom_diff); + caffe_cpu_copy(count, bottom_data, bottom_diff); if (input_scale_ != Dtype(1)) { caffe_scal(count, input_scale_, bottom_diff); } diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index cb6961cebac..08821ef3d79 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -133,7 +133,8 @@ void LRNLayer::CrossChannelForward_cpu( } for (int c = 1; c < channels_; ++c) { // copy previous scale - caffe_copy(height_ * width_, scale_data + scale_.offset(n, c - 1), + caffe_cpu_copy(height_ * width_, + scale_data + scale_.offset(n, c - 1), scale_data + scale_.offset(n, c)); // add head caffe_axpy( diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 3e79bddcdde..ab645ce0bb3 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -130,7 +130,7 @@ void MVNLayer::Backward_cpu(const vector*>& top, caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff); } else { - caffe_copy(temp_.count(), top_diff, bottom_diff); + caffe_cpu_copy(temp_.count(), top_diff, bottom_diff); } } diff --git a/src/caffe/layers/power_layer.cpp b/src/caffe/layers/power_layer.cpp index 4fe34c49f32..347d9a12aeb 100644 --- a/src/caffe/layers/power_layer.cpp +++ b/src/caffe/layers/power_layer.cpp @@ -30,7 +30,7 @@ void PowerLayer::Forward_cpu(const vector*>& bottom, return; } const Dtype* bottom_data = bottom[0]->cpu_data(); - caffe_copy(count, bottom_data, top_data); + caffe_cpu_copy(count, bottom_data, top_data); if (scale_ != Dtype(1)) { caffe_scal(count, scale_, top_data); } @@ -74,7 +74,7 @@ void PowerLayer::Backward_cpu(const vector*>& top, caffe_div(count, top_data, bottom_data, bottom_diff); caffe_scal(count, power_, bottom_diff); } else { - caffe_copy(count, bottom_data, bottom_diff); + caffe_cpu_copy(count, bottom_data, bottom_diff); if (scale_ != Dtype(1)) { caffe_scal(count, scale_, bottom_diff); } diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index ac59792c996..5ec4d9bd61b 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -76,7 +76,7 @@ void PReLULayer::Forward_cpu(const vector*>& bottom, // For in-place computation if (bottom[0] == top[0]) { - caffe_copy(count, bottom_data, bottom_memory_.mutable_cpu_data()); + caffe_cpu_copy(count, bottom_data, bottom_memory_.mutable_cpu_data()); } // if channel_shared, channel index in the following computation becomes diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index e4418c9cf9c..418361f8cf8 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -82,7 +82,7 @@ void SliceLayer::Forward_cpu(const vector*>& bottom, const int top_offset = n * top_slice_axis * slice_size_; const int bottom_offset = (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, + caffe_cpu_copy(top_slice_axis * slice_size_, bottom_data + bottom_offset, top_data + top_offset); } offset_slice_axis += top_slice_axis; @@ -103,7 +103,7 @@ void SliceLayer::Backward_cpu(const vector*>& top, const int top_offset = n * top_slice_axis * slice_size_; const int bottom_offset = (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, + caffe_cpu_copy(top_slice_axis * slice_size_, top_diff + top_offset, bottom_diff + bottom_offset); } offset_slice_axis += top_slice_axis; diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index 04712c9e653..fbd378102f6 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -32,12 +32,12 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, Dtype* scale_data = scale_.mutable_cpu_data(); int channels = bottom[0]->shape(softmax_axis_); int dim = bottom[0]->count() / outer_num_; - caffe_copy(bottom[0]->count(), bottom_data, top_data); + caffe_cpu_copy(bottom[0]->count(), bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. for (int i = 0; i < outer_num_; ++i) { // initialize scale_data to the first plane - caffe_copy(inner_num_, bottom_data + i * dim, scale_data); + caffe_cpu_copy(inner_num_, bottom_data + i * dim, scale_data); for (int j = 0; j < channels; j++) { for (int k = 0; k < inner_num_; k++) { scale_data[k] = std::max(scale_data[k], @@ -70,7 +70,7 @@ void SoftmaxLayer::Backward_cpu(const vector*>& top, Dtype* scale_data = scale_.mutable_cpu_data(); int channels = top[0]->shape(softmax_axis_); int dim = top[0]->count() / outer_num_; - caffe_copy(top[0]->count(), top_diff, bottom_diff); + caffe_cpu_copy(top[0]->count(), top_diff, bottom_diff); for (int i = 0; i < outer_num_; ++i) { // compute dot(top_diff, top_data) and subtract them from the bottom diff for (int k = 0; k < inner_num_; ++k) { diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index fede08a4855..3c303cd0cae 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -93,7 +93,7 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* prob_data = prob_.cpu_data(); - caffe_copy(prob_.count(), prob_data, bottom_diff); + caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); const Dtype* label = bottom[1]->cpu_data(); int dim = prob_.count() / outer_num_; int count = 0; diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 272cb59cd37..59a821976c8 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -36,7 +36,7 @@ void SplitLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } if (top.size() == 1) { - caffe_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff()); + caffe_cpu_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff()); return; } caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index d8e53f46e33..9c7dce7e2e2 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -652,7 +652,8 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_cpu_axpby(net_params[param_id]->count(), local_rate, net_params[param_id]->cpu_diff(), momentum, history_[param_id]->mutable_cpu_data()); - caffe_copy(net_params[param_id]->count(), history_[param_id]->cpu_data(), + caffe_cpu_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), net_params[param_id]->mutable_cpu_diff()); break; } @@ -722,7 +723,7 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { switch (Caffe::mode()) { case Caffe::CPU: { // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), + caffe_cpu_copy(net_params[param_id]->count(), this->history_[param_id]->cpu_data(), this->update_[param_id]->mutable_cpu_data()); @@ -737,7 +738,7 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { this->update_[param_id]->mutable_cpu_data()); // copy - caffe_copy(net_params[param_id]->count(), + caffe_cpu_copy(net_params[param_id]->count(), this->update_[param_id]->cpu_data(), net_params[param_id]->mutable_cpu_diff()); break; diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 536cb200fd2..211318cf9b9 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -149,7 +149,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); - caffe_copy(n, bottom_data, top_data); + caffe_cpu_copy(n, bottom_data, top_data); for (int i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); } From e7204a062a43868c8b9ce9378bc98169abfb0700 Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 30 Jun 2015 12:54:38 -0400 Subject: [PATCH 093/254] Test fix for ND kernels. --- src/caffe/layers/base_conv_layer.cpp | 6 +- src/caffe/test/test_convolution_layer.cpp | 67 +++++++++++---------- src/caffe/test/test_deconvolution_layer.cpp | 12 ++-- src/caffe/test/test_im2col_layer.cpp | 16 ++--- 4 files changed, 52 insertions(+), 49 deletions(-) diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index b7654384763..b9883d804cf 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -37,7 +37,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, << "Stride is stride OR stride_h and stride_w are required."; if (conv_param.kernel_size_size() > 0) { kernel_h_ = kernel_w_ = conv_param.kernel_size_size() > 0 ? - conv_param.kernel_size().Get(0) : 1; + conv_param.kernel_size(0) : 1; } else { kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); @@ -46,14 +46,14 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { pad_h_ = pad_w_ = conv_param.pad_size() > 0 ? - conv_param.pad().Get(0) : 0; + conv_param.pad(0) : 0; } else { pad_h_ = conv_param.pad_h(); pad_w_ = conv_param.pad_w(); } if (!conv_param.has_stride_h()) { stride_h_ = stride_w_ = conv_param.stride_size() > 0 ? - conv_param.stride().Get(0) : 1; + conv_param.stride(0) : 1; } else { stride_h_ = conv_param.stride_h(); stride_w_ = conv_param.stride_w(); diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 5eee3754b17..0ebd1cdf40e 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -21,22 +21,25 @@ void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, Blob* out) { // Kernel size, stride, and pad int kernel_h, kernel_w; - if (conv_param->has_kernel_size()) { - kernel_h = kernel_w = conv_param->kernel_size(); + if (conv_param->kernel_size_size() > 0) { + kernel_h = kernel_w = conv_param->kernel_size_size() > 0 ? + conv_param->kernel_size(0) : 1; } else { kernel_h = conv_param->kernel_h(); kernel_w = conv_param->kernel_w(); } int pad_h, pad_w; if (!conv_param->has_pad_h()) { - pad_h = pad_w = conv_param->pad(); + pad_h = pad_w = conv_param->pad_size() > 0 ? + conv_param->pad(0) : 0; } else { pad_h = conv_param->pad_h(); pad_w = conv_param->pad_w(); } int stride_h, stride_w; if (!conv_param->has_stride_h()) { - stride_h = stride_w = conv_param->stride(); + stride_h = stride_w = conv_param->stride_size() > 0 ? + conv_param->stride(0) : 1; } else { stride_h = conv_param->stride_h(); stride_w = conv_param->stride_w(); @@ -150,8 +153,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(4); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); @@ -188,8 +191,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("constant"); @@ -222,8 +225,8 @@ TYPED_TEST(ConvolutionLayerTest, Test1x1Convolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(1); - convolution_param->set_stride(1); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("constant"); @@ -249,8 +252,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); @@ -288,8 +291,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(1); convolution_param->set_bias_term(false); shared_ptr > layer( @@ -375,8 +378,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGradient) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(2); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); @@ -393,8 +396,8 @@ TYPED_TEST(ConvolutionLayerTest, Test1x1Gradient) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->set_kernel_size(1); - convolution_param->set_stride(1); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); convolution_param->set_num_output(2); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); @@ -409,8 +412,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); @@ -473,8 +476,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(4); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); @@ -512,8 +515,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("constant"); @@ -547,8 +550,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); @@ -588,8 +591,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(1); convolution_param->set_bias_term(false); shared_ptr > layer( @@ -676,8 +679,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(2); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); @@ -693,8 +696,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index fc63d5efbe3..5b1d5d2f375 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -58,8 +58,8 @@ TYPED_TEST(DeconvolutionLayerTest, TestSetup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(4); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); @@ -96,8 +96,8 @@ TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("constant"); convolution_param->mutable_weight_filler()->set_value(1); @@ -144,8 +144,8 @@ TYPED_TEST(DeconvolutionLayerTest, TestGradient) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->set_kernel_size(2); - convolution_param->set_stride(1); + convolution_param->add_kernel_size(2); + convolution_param->add_stride(1); convolution_param->set_num_output(1); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index f50abe103f8..a9db55c1ca7 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -41,8 +41,8 @@ TYPED_TEST(Im2colLayerTest, TestSetup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); Im2colLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); @@ -56,8 +56,8 @@ TYPED_TEST(Im2colLayerTest, TestForward) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); Im2colLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -73,8 +73,8 @@ TYPED_TEST(Im2colLayerTest, TestGradient) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->set_kernel_size(3); - convolution_param->set_stride(2); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, @@ -89,7 +89,7 @@ TYPED_TEST(Im2colLayerTest, TestRect) { layer_param.mutable_convolution_param(); convolution_param->set_kernel_h(5); convolution_param->set_kernel_w(3); - convolution_param->set_stride(2); + convolution_param->add_stride(2); Im2colLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -108,7 +108,7 @@ TYPED_TEST(Im2colLayerTest, TestRectGradient) { layer_param.mutable_convolution_param(); convolution_param->set_kernel_h(5); convolution_param->set_kernel_w(3); - convolution_param->set_stride(2); + convolution_param->add_stride(2); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, From d66c1127afbc808f29174cb1ec08aef574ed8a9e Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 30 Jun 2015 22:14:42 -0400 Subject: [PATCH 094/254] Preparations for Malis Loss. --- include/caffe/loss_layers.hpp | 47 ++++ src/caffe/layers/im2col_layer.cpp | 6 +- src/caffe/layers/malis_loss_layer.cpp | 349 ++++++++++++++++++++++++++ 3 files changed, 399 insertions(+), 3 deletions(-) create mode 100644 src/caffe/layers/malis_loss_layer.cpp diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index ef48c0acd42..26fc45f4d12 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -1,6 +1,7 @@ #ifndef CAFFE_LOSS_LAYERS_HPP_ #define CAFFE_LOSS_LAYERS_HPP_ +#include #include #include #include @@ -763,6 +764,52 @@ class SoftmaxWithLossLayer : public LossLayer { int softmax_axis_, outer_num_, inner_num_; }; + +template +class MalisLossLayer : public LossLayer { + public: + explicit MalisLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MalisLoss"; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// The internal SoftmaxLayer used to map predictions to a distribution. + shared_ptr > softmax_layer_; + /// prob stores the output probability predictions from the SoftmaxLayer. + Blob prob_; + /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_bottom_vec_; + /// top vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_top_vec_; + + int softmax_axis_, outer_num_, inner_num_; + + private: + void FindBlobs(const cv::Mat &binary, + std::vector > &blobs); + + void Malis(Dtype* conn_data, int conn_num_dims, int* conn_dims, + int conn_num_elements, + Dtype* nhood_data, int nhood_num_dims, int* nhood_dims, + int* seg_data, int seg_num_dims, int* seg_dims, + int seg_num_elements, + bool pos, Dtype* dloss_data, Dtype* loss_out, + Dtype *classerr_out, Dtype *rand_index_out, + Dtype margin = 0.3); +}; + + } // namespace caffe #endif // CAFFE_LOSS_LAYERS_HPP_ diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index cc3fe413264..ed8992c0b48 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -27,7 +27,7 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, << "Stride is stride OR stride_h and stride_w are required."; if (conv_param.kernel_size_size() > 0) { kernel_h_ = kernel_w_ = conv_param.kernel_size_size() > 0 ? - conv_param.kernel_size().Get(0) : 1; + conv_param.kernel_size(0) : 1; } else { kernel_h_ = conv_param.kernel_h(); kernel_w_ = conv_param.kernel_w(); @@ -36,14 +36,14 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!conv_param.has_pad_h()) { pad_h_ = pad_w_ = conv_param.pad_size() > 0 ? - conv_param.pad().Get(0) : 0; + conv_param.pad(0) : 0; } else { pad_h_ = conv_param.pad_h(); pad_w_ = conv_param.pad_w(); } if (!conv_param.has_stride_h()) { stride_h_ = stride_w_ = conv_param.stride_size() > 0 ? - conv_param.stride_size() : 1; + conv_param.stride(0) : 1; } else { stride_h_ = conv_param.stride_h(); stride_w_ = conv_param.stride_w(); diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp new file mode 100644 index 00000000000..c6358d8a143 --- /dev/null +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -0,0 +1,349 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +#include + +namespace caffe { + +template +class MalisAffinityGraphCompare{ + private: + const Dtype * mEdgeWeightArray; + public: + MalisAffinityGraphCompare(const Dtype * EdgeWeightArray){ + mEdgeWeightArray = EdgeWeightArray; + } + bool operator() (const int ind1, const int ind2) const { + return (mEdgeWeightArray[ind1] > mEdgeWeightArray[ind2]); + } +}; + + +// Derived from https://github.com/srinituraga/malis/blob/master/matlab/malis_loss_mex.cpp +// conn_data: 4d connectivity graph [y * x * z * #edges] +// nhood_data: graph neighborhood descriptor [3 * #edges] +// seg_data: true target segmentation [y * x * z] +// pos: is this a positive example pass [true] or a negative example pass [false] ? +// margin: sq-sq loss margin [0.3] +template +void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int* conn_dims, int conn_num_elements, + Dtype* nhood_data, int nhood_num_dims, int* nhood_dims, + int* seg_data, int seg_num_dims, int* seg_dims, int seg_num_elements, + bool pos, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, + Dtype margin) { + if (nhood_num_dims != 2) { + LOG(FATAL)<<"wrong size for nhood"; + } + if ((nhood_dims[1] != (conn_num_dims - 1)) + || (nhood_dims[0] != conn_dims[conn_num_dims - 1])) { + LOG(FATAL)<<"nhood and conn dimensions don't match"; + } + + /* Cache for speed to access neighbors */ + int nVert = 1; + for (int i = 0; i < conn_num_dims - 1; ++i) + nVert = nVert * conn_dims[i]; + + vector prodDims(conn_num_dims - 1); + prodDims[0] = 1; + for (int i = 1; i < conn_num_dims - 1; ++i) + prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; + + /* convert n-d offset vectors into linear array offset scalars */ + vector nHood(nhood_dims[0]); + for (int i = 0; i < nhood_dims[0]; ++i) { + nHood[i] = 0; + for (int j = 0; j < nhood_dims[1]; ++j) { + nHood[i] += (int32_t) nhood_data[i + j * nhood_dims[0]] * prodDims[j]; + } + } + + /* Disjoint sets and sparse overlap vectors */ + vector > overlap(nVert); + vector rank(nVert); + vector parent(nVert); + map segSizes; + int nLabeledVert = 0; + int nPairPos = 0; + boost::disjoint_sets dsets(&rank[0], &parent[0]); + for (int i = 0; i < nVert; ++i) { + dsets.make_set(i); + if (0 != seg_data[i]) { + overlap[i].insert(pair(seg_data[i], 1)); + ++nLabeledVert; + ++segSizes[seg_data[i]]; + nPairPos += (segSizes[seg_data[i]] - 1); + } + } + int nPairTot = (nLabeledVert * (nLabeledVert - 1)) / 2; + int nPairNeg = nPairTot - nPairPos; + int nPairNorm; + if (pos) { + nPairNorm = nPairPos; + } else { + nPairNorm = nPairNeg; + } + + /* Sort all the edges in increasing order of weight */ + std::vector pqueue( + static_cast(3) * (conn_dims[0] - 1) * (conn_dims[1] - 1) + * (conn_dims[2] - 1)); + int j = 0; + for (int d = 0, i = 0; d < conn_dims[3]; ++d) + for (int z = 0; z < conn_dims[2]; ++z) + for (int y = 0; y < conn_dims[1]; ++y) + for (int x = 0; x < conn_dims[0]; ++x, ++i) { + if (x > 0 && y > 0 && z > 0) + pqueue[j++] = i; + } + sort(pqueue.begin(), pqueue.end(), MalisAffinityGraphCompare(conn_data)); + + /* Start MST */ + int minEdge; + int e, v1, v2; + int set1, set2; + int nPair = 0; + double loss = 0, dl = 0; + int nPairIncorrect = 0; + map::iterator it1, it2; + + /* Start Kruskal's */ + for (int i = 0; i < pqueue.size(); ++i) { + minEdge = pqueue[i]; + e = minEdge / nVert; + v1 = minEdge % nVert; + v2 = v1 + nHood[e]; + + set1 = dsets.find_set(v1); + set2 = dsets.find_set(v2); + if (set1 != set2) { + dsets.link(set1, set2); + + /* compute the dloss for this MST edge */ + for (it1 = overlap[set1].begin(); it1 != overlap[set1].end(); ++it1) { + for (it2 = overlap[set2].begin(); it2 != overlap[set2].end(); ++it2) { + + nPair = it1->second * it2->second; + + if (pos && (it1->first == it2->first)) { + // +ve example pairs + // Sq-Sq loss is used here + dl = std::max(0.0, 0.5 + margin - conn_data[minEdge]); + loss += 0.5 * dl * dl * nPair; + dloss_data[minEdge] += dl * nPair; + if (conn_data[minEdge] <= 0.5) { // an error + nPairIncorrect += nPair; + } + + } else if ((!pos) && (it1->first != it2->first)) { + // -ve example pairs + // Sq-Sq loss is used here + dl = -std::max(0.0, conn_data[minEdge] - 0.5 + margin); + loss += 0.5 * dl * dl * nPair; + dloss_data[minEdge] += dl * nPair; + if (conn_data[minEdge] > 0.5) { // an error + nPairIncorrect += nPair; + } + } + } + } + dloss_data[minEdge] /= nPairNorm; + /* HARD-CODED ALERT!! + * The derivative of the activation function is also multiplied here. + * Assumes the logistic nonlinear activation function. + */ + dloss_data[minEdge] *= conn_data[minEdge] * (1 - conn_data[minEdge]); // DSigmoid + + /* move the pixel bags of the non-representative to the representative */ + if (dsets.find_set(set1) == set2) // make set1 the rep to keep and set2 the rep to empty + std::swap(set1, set2); + + it2 = overlap[set2].begin(); + while (it2 != overlap[set2].end()) { + it1 = overlap[set1].find(it2->first); + if (it1 == overlap[set1].end()) { + overlap[set1].insert(pair(it2->first, it2->second)); + } else { + it1->second += it2->second; + } + overlap[set2].erase(it2++); + } + } // end link + } // end while + + /* Return items */ + double classerr, randIndex; + loss /= nPairNorm; + *loss_out = loss; + classerr = (double) nPairIncorrect / (double) nPairNorm; + *classerr_out = classerr; + randIndex = 1.0 - ((double) nPairIncorrect / (double) nPairNorm); + *rand_index_out = randIndex; +} + + +// Derived from http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp +template +void MalisLossLayer::FindBlobs(const cv::Mat &binary, + std::vector > &blobs) { + blobs.clear(); + +// Fill the label_image with the blobs +// 0 - background +// 1 - unlabelled foreground +// 2+ - labelled foreground + + cv::Mat label_image; + binary.convertTo(label_image, CV_32SC1); + + int label_count = 2; // starts at 2 because 0,1 are used already + + for (int y = 0; y < label_image.rows; y++) { + int *row = (int*) label_image.ptr(y); + for (int x = 0; x < label_image.cols; x++) { + if (row[x] != 1) { + continue; + } + + cv::Rect rect; + cv::floodFill(label_image, cv::Point(x, y), label_count, &rect, 0, 0, 4); + + std::vector blob; + + for (int i = rect.y; i < (rect.y + rect.height); i++) { + int *row2 = (int*) label_image.ptr(i); + for (int j = rect.x; j < (rect.x + rect.width); j++) { + if (row2[j] != label_count) { + continue; + } + + blob.push_back(cv::Point2i(j, i)); + } + } + + blobs.push_back(blob); + + label_count++; + } + } +} + + + +template +void MalisLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + + // Set up the softmax layer + LayerParameter softmax_param(this->layer_param_); + softmax_param.set_type("Softmax"); + softmax_layer_ = LayerRegistry::CreateLayer(softmax_param); + softmax_bottom_vec_.clear(); + softmax_bottom_vec_.push_back(bottom[0]); + softmax_top_vec_.clear(); + softmax_top_vec_.push_back(&prob_); + softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_); +} + +template +void MalisLossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + LossLayer::Reshape(bottom, top); + softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); + softmax_axis_ = + bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); + outer_num_ = bottom[0]->count(0, softmax_axis_); + inner_num_ = bottom[0]->count(softmax_axis_ + 1); + CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) + << "Number of labels must match number of predictions; " + << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; + if (top.size() >= 2) { + // softmax output + top[1]->ReshapeLike(*bottom[0]); + } +} + +template +void MalisLossLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + // The forward pass computes the softmax prob values. + softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); + const Dtype* prob_data = prob_.cpu_data(); + const Dtype* label = bottom[1]->cpu_data(); + int dim = prob_.count() / outer_num_; + int count = 0; + Dtype loss = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; j++) { + const int label_value = static_cast(label[i * inner_num_ + j]); + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, prob_.shape(softmax_axis_)); + loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j], + Dtype(FLT_MIN))); + ++count; + } + } + top[0]->mutable_cpu_data()[0] = loss / count; + if (top.size() == 2) { + top[1]->ShareData(prob_); + } +} + +template +void MalisLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + + if (propagate_down[0]) { + + // Diff to propagate to (size w * h * c) + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + + // The predictions (size w * h * c) + const Dtype* prob_data = prob_.cpu_data(); + + // Labels (size w * h, c values) + const Dtype* label = bottom[1]->cpu_data(); + + + caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); + int dim = prob_.count() / outer_num_; + int count = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = static_cast(label[i * inner_num_ + j]); + bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; + ++count; + } + } + // Scale gradient + const Dtype loss_weight = top[0]->cpu_diff()[0]; + caffe_scal(prob_.count(), loss_weight / count, bottom_diff); + } +} + + +INSTANTIATE_CLASS(MalisLossLayer); +REGISTER_LAYER_CLASS(MalisLoss); + +} // namespace caffe From b041f84c09712cfb91873266e61ba33e8b204bdb Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 30 Jun 2015 22:18:44 -0400 Subject: [PATCH 095/254] Cleanup after merge with bvlc::master. --- include/caffe/filler.hpp | 57 ---------------------------------------- 1 file changed, 57 deletions(-) diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index 2eeb2417a99..5475fa326a2 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -179,61 +179,6 @@ class XavierFiller : public Filler { }; - -/*! -@brief Fills a Blob with coefficients of bilinear interpolation for upsampling. -This is intended to be used in DeconvolutionLayer acting as UpsamplingLayer. -You can upsample a feature map with shape of (B, C, H, W) by any integer factor -using the following proto. -\code -layer { - name: "upsample", type: "Deconvolution" - bottom: "{{bottom_name}}" top: "{{top_name}}" - convolution_param { - kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} - num_output: {{C}} group: {{C}} - pad: {{ceil((factor - 1) / 2.)}} - weight_filler: { type: "bilinear_upsampling" } bias_term: false - } - param { lr_mult: 0 decay_mult: 0 } -} -\endcode -Please use this by replacing `{{}}` with your values. By specifying -`num_output: {{C}} group: {{C}}`, it behaves as -channel-wise convolution. The filter shape of this deconvolution layer will be -(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) -interpolation kernel for every channel of the filter identically. The resulting -shape of the top feature map will be (B, C, factor * H, factor * W). -Note that the learning rate and the -weight decay are set to 0 in order to keep coefficient values of bilinear -interpolation unchanged during training. If you apply this to an image, this -operation is equivalent to the following call in Python with Scikit.Image. -\code{.py} -out = skimage.transform.rescale(img, factor, mode='constant', cval=0) -\endcode - */ -template -class BilinearFiller : public Filler { - public: - explicit BilinearFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; - CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; - Dtype* data = blob->mutable_cpu_data(); - int f = ceil(blob->width() / 2.); - float c = (2 * f - 1 - f % 2) / (2. * f); - for (int i = 0; i < blob->count(); ++i) { - float x = i % blob->width(); - float y = (i / blob->width()) % blob->height(); - data[i] = ((1.0 - fabs(x / f - c)) * (1.0 - fabs(y / f - c))); - } - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - - /** * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming @@ -354,8 +299,6 @@ Filler* GetFiller(const FillerParameter& param) { return new BilinearFiller(param); } else if (type == "msra") { return new MSRAFiller(param); - } else if (type == "bilinear") { - return new BilinearFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } From 0e7540b09a3946322bc9df009cf463d572ea5802 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 1 Jul 2015 16:03:52 -0400 Subject: [PATCH 096/254] Malis loss layer for testing. --- Makefile | 4 +- include/caffe/loss_layers.hpp | 20 +- src/caffe/layers/malis_loss_layer.cpp | 354 +++++++++++++++++++------- 3 files changed, 270 insertions(+), 108 deletions(-) diff --git a/Makefile b/Makefile index b82ca592ee7..7d0cd050768 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ ifeq ($(CPU_ONLY),1) USE_GREENTEA := 0 endif -CXXFLAGS += -std=c++11 -Wno-deprecated-declarations -LINKFLAGS += -std=c++11 -Wno-deprecated-declarations +CXXFLAGS += -std=c++11 -fopenmp -Wno-deprecated-declarations +LINKFLAGS += -std=c++11 -fopenmp -Wno-deprecated-declarations NVCCFLAGS += -Xcompiler "-Wno-deprecated-declarations" -Xlinker "-Wno-deprecated-declarations" -Xarchive "-Wno-deprecated-declarations" -Xnvlink "-Wno-deprecated-declarations" BUILD_DIR_LINK := $(BUILD_DIR) diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 26fc45f4d12..90e12aa24d8 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -704,7 +704,7 @@ class SoftmaxWithLossLayer : public LossLayer { virtual inline const char* type() const { return "SoftmaxWithLoss"; } virtual inline int ExactNumTopBlobs() const { return -1; } virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 3; } + virtual inline int MaxTopBlobs() const { return 2; } protected: /// @copydoc SoftmaxWithLossLayer @@ -776,7 +776,9 @@ class MalisLossLayer : public LossLayer { const vector*>& top); virtual inline const char* type() const { return "MalisLoss"; } - virtual inline int ExactNumTopBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -795,15 +797,17 @@ class MalisLossLayer : public LossLayer { int softmax_axis_, outer_num_, inner_num_; + int conn_num_dims_; + std::vector conn_dims_; + std::vector nhood_data_; + std::vector nhood_dims_; + private: - void FindBlobs(const cv::Mat &binary, - std::vector > &blobs); + cv::Mat FindBlobs(const cv::Mat &input, + std::vector > *blobs); void Malis(Dtype* conn_data, int conn_num_dims, int* conn_dims, - int conn_num_elements, - Dtype* nhood_data, int nhood_num_dims, int* nhood_dims, - int* seg_data, int seg_num_dims, int* seg_dims, - int seg_num_elements, + int* nhood_data, int* nhood_dims, int* seg_data, bool pos, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, Dtype margin = 0.3); diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index c6358d8a143..1b139d31056 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -1,66 +1,66 @@ +#include +#include +#include + #include #include -#include -#include -#include #include -#include -#include -#include +#include #include +#include +#include +#include + + #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#include - namespace caffe { -template -class MalisAffinityGraphCompare{ - private: +template +class MalisAffinityGraphCompare { + private: const Dtype * mEdgeWeightArray; - public: - MalisAffinityGraphCompare(const Dtype * EdgeWeightArray){ - mEdgeWeightArray = EdgeWeightArray; - } - bool operator() (const int ind1, const int ind2) const { - return (mEdgeWeightArray[ind1] > mEdgeWeightArray[ind2]); - } + public: + explicit MalisAffinityGraphCompare(const Dtype * EdgeWeightArray) { + mEdgeWeightArray = EdgeWeightArray; + } + bool operator()(const int ind1, const int ind2) const { + return (mEdgeWeightArray[ind1] > mEdgeWeightArray[ind2]); + } }; - // Derived from https://github.com/srinituraga/malis/blob/master/matlab/malis_loss_mex.cpp // conn_data: 4d connectivity graph [y * x * z * #edges] // nhood_data: graph neighborhood descriptor [3 * #edges] // seg_data: true target segmentation [y * x * z] -// pos: is this a positive example pass [true] or a negative example pass [false] ? +// pos: is this a positive example pass [true] or +// a negative example pass [false] ? // margin: sq-sq loss margin [0.3] -template -void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int* conn_dims, int conn_num_elements, - Dtype* nhood_data, int nhood_num_dims, int* nhood_dims, - int* seg_data, int seg_num_dims, int* seg_dims, int seg_num_elements, - bool pos, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, +template +void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, + int* conn_dims, int* nhood_data, + int* nhood_dims, int* seg_data, bool pos, + Dtype* dloss_data, Dtype* loss_out, + Dtype *classerr_out, Dtype *rand_index_out, Dtype margin) { - if (nhood_num_dims != 2) { - LOG(FATAL)<<"wrong size for nhood"; - } if ((nhood_dims[1] != (conn_num_dims - 1)) || (nhood_dims[0] != conn_dims[conn_num_dims - 1])) { - LOG(FATAL)<<"nhood and conn dimensions don't match"; + LOG(FATAL) << "nhood and conn dimensions don't match"; } /* Cache for speed to access neighbors */ int nVert = 1; for (int i = 0; i < conn_num_dims - 1; ++i) - nVert = nVert * conn_dims[i]; + nVert = nVert * conn_dims[i]; vector prodDims(conn_num_dims - 1); prodDims[0] = 1; for (int i = 1; i < conn_num_dims - 1; ++i) - prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; + prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; /* convert n-d offset vectors into linear array offset scalars */ vector nHood(nhood_dims[0]); @@ -100,16 +100,17 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int* conn /* Sort all the edges in increasing order of weight */ std::vector pqueue( static_cast(3) * (conn_dims[0] - 1) * (conn_dims[1] - 1) - * (conn_dims[2] - 1)); + * (conn_dims[2] - 1)); int j = 0; for (int d = 0, i = 0; d < conn_dims[3]; ++d) - for (int z = 0; z < conn_dims[2]; ++z) - for (int y = 0; y < conn_dims[1]; ++y) - for (int x = 0; x < conn_dims[0]; ++x, ++i) { - if (x > 0 && y > 0 && z > 0) - pqueue[j++] = i; - } - sort(pqueue.begin(), pqueue.end(), MalisAffinityGraphCompare(conn_data)); + for (int z = 0; z < conn_dims[2]; ++z) + for (int y = 0; y < conn_dims[1]; ++y) + for (int x = 0; x < conn_dims[0]; ++x, ++i) { + if (x > 0 && y > 0 && z > 0) + pqueue[j++] = i; + } + sort(pqueue.begin(), pqueue.end(), + MalisAffinityGraphCompare(conn_data)); /* Start MST */ int minEdge; @@ -135,7 +136,6 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int* conn /* compute the dloss for this MST edge */ for (it1 = overlap[set1].begin(); it1 != overlap[set1].end(); ++it1) { for (it2 = overlap[set2].begin(); it2 != overlap[set2].end(); ++it2) { - nPair = it1->second * it2->second; if (pos && (it1->first == it2->first)) { @@ -165,11 +165,14 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int* conn * The derivative of the activation function is also multiplied here. * Assumes the logistic nonlinear activation function. */ - dloss_data[minEdge] *= conn_data[minEdge] * (1 - conn_data[minEdge]); // DSigmoid - + // dloss_data[minEdge] *= + // conn_data[minEdge] * (1 - conn_data[minEdge]); // DSigmoid + // Don't pre-multiply derivative, will be done + // later in the softmax backward /* move the pixel bags of the non-representative to the representative */ - if (dsets.find_set(set1) == set2) // make set1 the rep to keep and set2 the rep to empty - std::swap(set1, set2); + // make set1 the rep to keep and set2 the rep to empty + if (dsets.find_set(set1) == set2) + std::swap(set1, set2); it2 = overlap[set2].begin(); while (it2 != overlap[set2].end()) { @@ -188,33 +191,32 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int* conn double classerr, randIndex; loss /= nPairNorm; *loss_out = loss; - classerr = (double) nPairIncorrect / (double) nPairNorm; + classerr = static_cast(nPairIncorrect) + / static_cast(nPairNorm); *classerr_out = classerr; - randIndex = 1.0 - ((double) nPairIncorrect / (double) nPairNorm); + randIndex = 1.0 - static_cast(nPairIncorrect) + / static_cast(nPairNorm); *rand_index_out = randIndex; } +// Derived from +// http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp +template +cv::Mat MalisLossLayer::FindBlobs( + const cv::Mat &input, std::vector > *blobs) { + blobs->clear(); -// Derived from http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp -template -void MalisLossLayer::FindBlobs(const cv::Mat &binary, - std::vector > &blobs) { - blobs.clear(); - -// Fill the label_image with the blobs -// 0 - background -// 1 - unlabelled foreground -// 2+ - labelled foreground + // Fill the label_image with the blobs cv::Mat label_image; - binary.convertTo(label_image, CV_32SC1); + input.convertTo(label_image, CV_32SC1); - int label_count = 2; // starts at 2 because 0,1 are used already + int label_count = 2; for (int y = 0; y < label_image.rows; y++) { - int *row = (int*) label_image.ptr(y); + int *row = reinterpret_cast(label_image.ptr(y)); for (int x = 0; x < label_image.cols; x++) { - if (row[x] != 1) { + if (row[x] > 1) { continue; } @@ -223,29 +225,30 @@ void MalisLossLayer::FindBlobs(const cv::Mat &binary, std::vector blob; +#pragma omp parallel for for (int i = rect.y; i < (rect.y + rect.height); i++) { - int *row2 = (int*) label_image.ptr(i); + int *row2 = reinterpret_cast(label_image.ptr(i)); for (int j = rect.x; j < (rect.x + rect.width); j++) { if (row2[j] != label_count) { continue; } - +#pragma omp critical blob.push_back(cv::Point2i(j, i)); } } - blobs.push_back(blob); + blobs->push_back(blob); label_count++; } } -} - + return label_image; +} -template -void MalisLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { +template +void MalisLossLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { LossLayer::LayerSetUp(bottom, top); // Set up the softmax layer @@ -259,29 +262,47 @@ void MalisLossLayer::LayerSetUp( softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_); } -template -void MalisLossLayer::Reshape( - const vector*>& bottom, const vector*>& top) { +template +void MalisLossLayer::Reshape(const vector*>& bottom, + const vector*>& top) { LossLayer::Reshape(bottom, top); softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); - softmax_axis_ = - bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); + softmax_axis_ = bottom[0]->CanonicalAxisIndex( + this->layer_param_.softmax_param().axis()); outer_num_ = bottom[0]->count(0, softmax_axis_); inner_num_ = bottom[0]->count(softmax_axis_ + 1); CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) - << "Number of labels must match number of predictions; " - << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " - << "label count (number of labels) must be N*H*W, " - << "with integer values in {0, 1, ..., C-1}."; + << "Number of labels must match number of predictions; " + << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; if (top.size() >= 2) { // softmax output top[1]->ReshapeLike(*bottom[0]); } + + conn_num_dims_ = 4; + conn_dims_.push_back(bottom[0]->width() - 1); // X-axis + conn_dims_.push_back(bottom[0]->height() - 1); // Y-axis + conn_dims_.push_back(1); // Z-axis + conn_dims_.push_back(2); // #edges + + nhood_dims_.push_back(2); // #edges + nhood_dims_.push_back(3); // 3 dimensional + + nhood_data_.push_back(1); // Edge 1, X + nhood_data_.push_back(0); // Edge 2, X + + nhood_data_.push_back(0); // Edge 1, Y + nhood_data_.push_back(1); // Edge 2, Y + + nhood_data_.push_back(0); // Edge 1, Z + nhood_data_.push_back(0); // Edge 2, Z } -template -void MalisLossLayer::Forward_cpu( - const vector*>& bottom, const vector*>& top) { +template +void MalisLossLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { // The forward pass computes the softmax prob values. softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); @@ -294,8 +315,9 @@ void MalisLossLayer::Forward_cpu( const int label_value = static_cast(label[i * inner_num_ + j]); DCHECK_GE(label_value, 0); DCHECK_LT(label_value, prob_.shape(softmax_axis_)); - loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j], - Dtype(FLT_MIN))); + loss -= log( + std::max(prob_data[i * dim + label_value * inner_num_ + j], + Dtype(FLT_MIN))); ++count; } } @@ -305,17 +327,19 @@ void MalisLossLayer::Forward_cpu( } } -template +template void MalisLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { + std::cout << "Outer dim: " << outer_num_ << std::endl; + std::cout << "Inner dim: " << inner_num_ << std::endl; if (propagate_down[1]) { - LOG(FATAL) << this->type() - << " Layer cannot backpropagate to label inputs."; + LOG(FATAL)<< this->type() + << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - // Diff to propagate to (size w * h * c) Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); @@ -325,24 +349,158 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, // Labels (size w * h, c values) const Dtype* label = bottom[1]->cpu_data(); + cv::namedWindow("labelled"); + cv::namedWindow("prob"); + cv::namedWindow("diff"); + + cv::Mat img(bottom[1]->height(), bottom[1]->width(), CV_8SC1); +#pragma omp parallel for + for (int y = 0; y < bottom[1]->height(); ++y) { + for (int x = 0; x < bottom[1]->width(); ++x) { + img.at(y, x) = label[y * bottom[1]->width() + x]; + } + } + + std::vector > blobs; + + cv::Mat seg = FindBlobs(img, &blobs); + + // This is for debugging only: + { + cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3); + for (size_t i = 0; i < blobs.size(); i++) { + unsigned char r = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT + unsigned char g = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT + unsigned char b = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT + + for (size_t j = 0; j < blobs[i].size(); j++) { + int x = blobs[i][j].x; + int y = blobs[i][j].y; + + output.at(y, x)[0] = b; + output.at(y, x)[1] = g; + output.at(y, x)[2] = r; + } + } + cv::imshow("labelled", output); + cv::waitKey(100); + } + + Dtype loss_out = 0; + Dtype classerr_out = 0; + Dtype rand_index_out = 0; + + std::vector conn_data_pos( + 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + std::vector conn_data_neg( + 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + std::vector dloss_pos( + 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + std::vector dloss_neg( + 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + + // Construct positive and negative affinity graph +#pragma omp parallel for + for (int i = 0; i < bottom[0]->height() - 1; ++i) { + for (int j = 0; j < bottom[0]->width() - 1; ++j) { + // Center + Dtype p0 = prob_data[i * bottom[0]->width() + j]; + // Right + Dtype p1 = prob_data[i * bottom[0]->width() + (j + 1)]; + // Bottom + Dtype p2 = prob_data[(i + 1) * bottom[0]->width() + j]; + + // Center + Dtype g0 = label[i * bottom[0]->width() + j]; + // Right + Dtype g1 = label[i * bottom[0]->width() + (j + 1)]; + // Bottom + Dtype g2 = label[(i + 1) * bottom[0]->width() + j]; + + conn_data_pos[i * (bottom[0]->width() - 1) + j] = std::max( + 1.0 - std::fabs(p0 - p1), 1.0 - std::fabs(g0 - g1)); + conn_data_neg[i * (bottom[0]->width() - 1) + j] = std::min( + 1.0 - std::fabs(p0 - p1), 1.0 - std::fabs(g0 - g1)); + conn_data_pos[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) + + i * (bottom[0]->width() - 1) + j] = std::max( + 1.0 - std::fabs(p0 - p2), 1.0 - std::fabs(g0 - g2)); + conn_data_neg[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) + + i * (bottom[0]->width() - 1) + j] = std::min( + 1.0 - std::fabs(p0 - p2), 1.0 - std::fabs(g0 - g2)); + } + } + + std::cout << "Before MALIS 1" << std::endl; + + Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], + &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), + true, &dloss_pos[0], + &loss_out, &classerr_out, &rand_index_out); + + std::cout << "Before MALIS 2" << std::endl; + + Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], + &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), + false, &dloss_neg[0], + &loss_out, &classerr_out, &rand_index_out); + + std::cout << "Before PROB BACK" << std::endl; caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); - int dim = prob_.count() / outer_num_; - int count = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; ++j) { - const int label_value = static_cast(label[i * inner_num_ + j]); - bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; - ++count; + + std::cout << "Before LOSS BACK" << std::endl; + + // Spread out the losses to pixels + for (int i = 0; i < bottom[0]->height() - 1; ++i) { + for (int j = 0; j < bottom[0]->width() - 1; ++j) { + Dtype lxp = dloss_pos[i * (bottom[0]->width() - 1) + j]; + Dtype lxn = dloss_neg[i * (bottom[0]->width() - 1) + j]; + + Dtype lyp = dloss_pos[(bottom[0]->width() - 1) + * (bottom[0]->height() - 1) + i * (bottom[0]->width() - 1) + j]; + Dtype lyn = dloss_neg[(bottom[0]->width() - 1) + * (bottom[0]->height() - 1) + i * (bottom[0]->width() - 1) + j]; + + // Pick labels + const int l0 = static_cast + (label[i * bottom[0]->width() + j]); + const int l1 = static_cast + (label[i * bottom[0]->width() + (j + 1)]); + const int l2 = static_cast + (label[(i + 1) * bottom[0]->width() + j]); + + // Center + bottom_diff[l0 * inner_num_ + i * bottom[0]->width() + j] += 0.5 + * (lxp + lxn + lyp + lyn); + + // Right + bottom_diff[l1 * inner_num_ + i * bottom[0]->width() + (j + 1)] += 0.5 + * (lxp + lxn); + + // Bottom + bottom_diff[l2 * inner_num_ + (i + 1) * bottom[0]->width() + j] += 0.5 + * (lyp + lyn); } } - // Scale gradient - const Dtype loss_weight = top[0]->cpu_diff()[0]; - caffe_scal(prob_.count(), loss_weight / count, bottom_diff); + + Dtype* prob_rd = prob_.mutable_cpu_data(); + + cv::Mat wrapped_1(bottom[0]->height(), bottom[0]->width(), + cv::DataType::type, + prob_rd, sizeof(Dtype) * bottom[0]->width()); + cv::imshow("prob", wrapped_1); + cv::waitKey(100); + + cv::Mat wrapped_2(bottom[0]->height(), bottom[0]->width(), + cv::DataType::type, + bottom_diff, sizeof(Dtype) * bottom[0]->width()); + cv::imshow("diff", wrapped_2); + cv::waitKey(100); + + std::cout << "After LOSS BACK" << std::endl; } } - INSTANTIATE_CLASS(MalisLossLayer); REGISTER_LAYER_CLASS(MalisLoss); From 0409ee41e0a77039576c377512eb01e4f2e50999 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 2 Jul 2015 11:15:14 -0400 Subject: [PATCH 097/254] Makefile adaptions, defaults to ViennaCLBLAS. --- Makefile | 5 --- Makefile.config.example | 6 +-- .../greentea/greentea_math_functions.cpp | 21 ++++------ src/caffe/layers/malis_loss_layer.cpp | 38 +++++++++++++++---- 4 files changed, 38 insertions(+), 32 deletions(-) diff --git a/Makefile b/Makefile index 7d0cd050768..19fa62b93cc 100644 --- a/Makefile +++ b/Makefile @@ -199,11 +199,6 @@ ifeq ($(USE_GREENTEA),1) COMMON_FLAGS += -DUSE_CLBLAS endif - # Use ViennaCL BLAS - ifeq ($(USE_VIENNACLBLAS), 1) - COMMON_FLAGS += -DUSE_VIENNACLBLAS - endif - # Requires valid OpenCL library LIBRARY_DIRS += $(CLLIBS) # Requires valid OpenCL headers and valid ViennaCL diff --git a/Makefile.config.example b/Makefile.config.example index e439eb042a2..e7cb96897df 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -12,13 +12,9 @@ USE_GREENTEA := 0 # Folder of the ViennaCL header-only library VIENNACL_DIR = ../ViennaCL -# Either set CLBLAS or VIENNACLBLAS to 1, not both. -# If you want to use OpenCL/Greentea on the CPU only, you can also disable both. -# When both are disabled, GPUs won't work. CPUs always use CBLAS (Atlas, MKL or OpenBLAS). -# The chosen BLAS library needs to be compiled and installed from source. +# Either set clBLAS to 1 or it will use ViennaclBLAS. # CLBLAS should be faster, especially on AMD cards. USE_CLBLAS := 0 -USE_VIENNACLBLAS := 0 # Enable or disable double precision support for OpenCL/Greentea GREENTEA_DOUBLE_SUPPORT := 1 diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index d8584718687..02f8ff8168a 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -33,9 +33,7 @@ #ifdef USE_CLBLAS #include -#endif - -#ifdef USE_VIENNACLBLAS +#else #include "viennacl/detail/matrix_def.hpp" #include "viennacl/detail/vector_def.hpp" #include "viennacl/linalg/inner_prod.hpp" @@ -174,7 +172,7 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; -#ifdef USE_VIENNACLBLAS +#ifndef USE_CLBLAS typedef typename viennacl::matrix_base::size_type size_type; typedef typename viennacl::matrix_base::size_type difference_type; @@ -211,8 +209,7 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, viennacl::linalg::prod_impl(matA, matB, matC, alpha, beta); -#endif -#ifdef USE_CLBLAS +#else clblasOrder clOrder = clblasRowMajor; clblasTranspose clTransA = (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; @@ -272,7 +269,7 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, caffe_cpu_gemv(TransA, M, N, alpha, Aptr + offA, xptr + offx, beta, yptr + offy); } else { -#ifdef USE_VIENNACLBLAS +#ifndef USE_CLBLAS typedef typename viennacl::vector_base::size_type size_type; typedef typename viennacl::vector_base::size_type difference_type; @@ -294,9 +291,7 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, else v2 += alpha * viennacl::linalg::prod(mat, v1); -#endif - -#ifdef USE_CLBLAS +#else clblasTranspose clTransA = (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; @@ -345,7 +340,7 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, caffe_axpy(N, alpha, Xptr + offX, Yptr + offY); } else { -#ifdef USE_VIENNACLBLAS +#ifndef USE_CLBLAS typedef typename viennacl::vector_base::size_type size_type; typedef typename viennacl::vector_base::size_type difference_type; @@ -357,9 +352,7 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, v2 += alpha * v1; -#endif - -#ifdef USE_CLBLAS +#else cl_command_queue queue = ctx.get_queue().handle().get(); if (std::is_same::value) { diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 1b139d31056..4f49d4f405a 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -11,8 +11,6 @@ #include #include - - #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/util/math_functions.hpp" @@ -121,6 +119,8 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int nPairIncorrect = 0; map::iterator it1, it2; + std::cout << "Pqueue size: " << pqueue.size() << std::endl; + /* Start Kruskal's */ for (int i = 0; i < pqueue.size(); ++i) { minEdge = pqueue[i]; @@ -211,12 +211,14 @@ cv::Mat MalisLossLayer::FindBlobs( cv::Mat label_image; input.convertTo(label_image, CV_32SC1); - int label_count = 2; + // Segment into label numbers higher than the original label numbers + int label_count = prob_.channels(); for (int y = 0; y < label_image.rows; y++) { int *row = reinterpret_cast(label_image.ptr(y)); for (int x = 0; x < label_image.cols; x++) { - if (row[x] > 1) { + // Skip background and already labeled areas + if (row[x] >= prob_.channels() || row[x] == 0) { continue; } @@ -417,15 +419,22 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, // Bottom Dtype g2 = label[(i + 1) * bottom[0]->width() + j]; - conn_data_pos[i * (bottom[0]->width() - 1) + j] = std::max( + // X positive + conn_data_pos[i * (bottom[0]->width() - 1) + j] = std::min( 1.0 - std::fabs(p0 - p1), 1.0 - std::fabs(g0 - g1)); - conn_data_neg[i * (bottom[0]->width() - 1) + j] = std::min( + + // X negative + conn_data_neg[i * (bottom[0]->width() - 1) + j] = std::max( 1.0 - std::fabs(p0 - p1), 1.0 - std::fabs(g0 - g1)); + + // Y positive conn_data_pos[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) - + i * (bottom[0]->width() - 1) + j] = std::max( + + i * (bottom[0]->width() - 1) + j] = std::min( 1.0 - std::fabs(p0 - p2), 1.0 - std::fabs(g0 - g2)); + + // Y negative conn_data_neg[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) - + i * (bottom[0]->width() - 1) + j] = std::min( + + i * (bottom[0]->width() - 1) + j] = std::max( 1.0 - std::fabs(p0 - p2), 1.0 - std::fabs(g0 - g2)); } } @@ -444,6 +453,19 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, false, &dloss_neg[0], &loss_out, &classerr_out, &rand_index_out); + + auto minmax = std::minmax_element(dloss_neg.begin(),dloss_neg.end()); + + std::cout << "DLoss_neg min/max: " << + dloss_neg[minmax.first - dloss_neg.begin()] << " " << + dloss_neg[minmax.second - dloss_neg.begin()] << std::endl; + + minmax = std::minmax_element(dloss_pos.begin(),dloss_pos.end()); + + std::cout << "DLoss_pos min/max: " << + dloss_pos[minmax.first - dloss_pos.begin()] << " " << + dloss_pos[minmax.second - dloss_pos.begin()] << std::endl; + std::cout << "Before PROB BACK" << std::endl; caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); From 5ab1159cb10f4b5ee8fbe5522b7a1011d3fa9ae7 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 2 Jul 2015 11:17:28 -0400 Subject: [PATCH 098/254] ViennaCLBLAS flag removed --- .../greentea/greentea_math_functions.cpp | 24 +++++++------------ 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 02f8ff8168a..3e2613b2b33 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -436,7 +436,7 @@ void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, caffe_scal(N, alpha, xptr + offx); } else { -#ifdef USE_VIENNACLBLAS +#ifndef USE_CLBLAS typedef typename viennacl::vector_base::size_type size_type; typedef typename viennacl::vector_base::size_type difference_type; @@ -446,9 +446,7 @@ void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, v1 *= alpha; -#endif - -#ifdef USE_CLBLAS +#else cl_command_queue queue = ctx.get_queue().handle().get(); if (std::is_same::value) { @@ -502,8 +500,7 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, *out = caffe_cpu_dot(n, Xptr + offX, Yptr + offY); } else { -#ifdef USE_VIENNACLBLAS - +#ifndef USE_CLBLAS typedef typename viennacl::vector_base::size_type size_type; typedef typename viennacl::vector_base::size_type difference_type; @@ -514,9 +511,7 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, *out = viennacl::linalg::inner_prod(v1, v2); -#endif - -#ifdef USE_CLBLAS +#else cl_command_queue queue = ctx.get_queue().handle().get(); cl_int err; @@ -566,7 +561,7 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, *Y = caffe_cpu_asum(n, Xptr + offX); } else { -#ifdef USE_VIENNACLBLAS +#ifndef USE_CLBLAS typedef typename viennacl::vector_base::size_type size_type; typedef typename viennacl::vector_base::size_type difference_type; @@ -576,9 +571,7 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, *Y = viennacl::linalg::norm_1(v1); -#endif - -#ifdef USE_CLBLAS +#else cl_command_queue queue = ctx.get_queue().handle().get(); cl_int err; @@ -627,7 +620,7 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, caffe_cpu_scale(n, alpha, Xptr + offX, Yptr + offY); } else { -#ifdef USE_VIENNACLBLAS +#ifndef USE_CLBLAS typedef typename viennacl::vector_base::size_type size_type; typedef typename viennacl::vector_base::size_type difference_type; @@ -639,9 +632,8 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, v2 = v1 * alpha; -#endif +#else -#ifdef USE_CLBLAS viennacl::ocl::context ctx = viennacl::ocl::get_context(ctx_id); cl_command_queue queue = ctx.get_queue().handle().get(); From 208dfcd9c72d83b9884f7b8744ec64534106ca48 Mon Sep 17 00:00:00 2001 From: Jan Funke Date: Thu, 2 Jul 2015 11:28:18 -0400 Subject: [PATCH 099/254] added CMake support for Greentea For this, three new options were added: USE_CUDA, USE_GREENTEA, and USE_CLBLAS. These options are now set in caffe_config.h, which is included in caffe_common.hpp. --- CMakeLists.txt | 5 +- cmake/Cuda.cmake | 4 +- cmake/Dependencies.cmake | 23 +++++ cmake/Modules/FindOpenCL.cmake | 90 +++++++++++++++++++ cmake/Modules/FindViennaCL.cmake | 42 +++++++++ cmake/Modules/FindclBLAS.cmake | 53 +++++++++++ cmake/Templates/caffe_config.h.in | 11 ++- include/caffe/common.hpp | 4 + include/caffe/greentea/cl_kernels.hpp | 1 + include/caffe/util/math_functions.hpp | 6 -- src/caffe/greentea/cl_kernels.cpp | 1 + src/caffe/greentea/cl_kernels.sh | 2 + src/caffe/greentea/greentea.cpp | 1 + src/caffe/greentea/greentea_im2col.cpp | 1 + .../greentea/greentea_math_functions.cpp | 4 +- src/caffe/util/math_functions.cu | 13 +-- 16 files changed, 243 insertions(+), 18 deletions(-) create mode 100644 cmake/Modules/FindOpenCL.cmake create mode 100644 cmake/Modules/FindViennaCL.cmake create mode 100644 cmake/Modules/FindclBLAS.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index ab1188f581f..ff842282e2d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,7 +18,8 @@ include(cmake/ConfigGen.cmake) caffe_option(CPU_ONLY "Build Caffe wihtout CUDA and OpenCL support" OFF) caffe_option(USE_CUDA "Build Caffe with CUDA support" ON) caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" ON) -caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" ON IF USE_CUDA AND NOT CPU_ONLY) +caffe_option(USE_CLBLAS "Build Caffe with clBLAS support (instead of using ViennaClBLAS)" OFF) +caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" OFF) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) caffe_option(BUILD_python "Build Python wrapper" ON) set(python_version "2" CACHE STRING "Specify which python version to use") @@ -31,7 +32,7 @@ include(cmake/Dependencies.cmake) # ---[ Flags if(UNIX OR APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11 -DCMAKE_BUILD") endif() if(USE_libstdcpp) diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake index ff58d31c166..f5477884e50 100644 --- a/cmake/Cuda.cmake +++ b/cmake/Cuda.cmake @@ -145,7 +145,7 @@ macro(caffe_cuda_compile objlist_variable) endforeach() if(UNIX OR APPLE) - list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC) + list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC -std=c++11) endif() if(APPLE) @@ -213,6 +213,8 @@ if(USE_CUDNN) add_definitions(-DUSE_CUDNN) include_directories(SYSTEM ${CUDNN_INCLUDE}) list(APPEND Caffe_LINKER_LIBS ${CUDNN_LIBRARY}) + else() + message(FATAL_ERROR "CuDNN requested, but not found.") endif() endif() diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index 4f9afc9b3a2..5177b598a3b 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -56,6 +56,29 @@ if(NOT HAVE_CUDA) add_definitions(-DCPU_ONLY) endif() +# ---[ ViennaCL +if (USE_GREENTEA) + find_package(ViennaCL) + if (NOT ViennaCL_FOUND) + message(FATAL_ERROR "ViennaCL required for GREENTEA but not found.") + endif() + include_directories(SYSTEM ${ViennaCL_INCLUDE_DIRS}) + list(APPEND Caffe_LINKER_LIBS ${ViennaCL_LIBRARIES}) + set(HAVE_VIENNACL TRUE) + set(VIENNACL_WITH_OPENCL ${ViennaCL_WITH_OPENCL}) +endif() + +# ---[ clBLAS +if (USE_CLBLAS) + find_package(clBLAS) + if (NOT CLBLAS_FOUND) + message(FATAL_ERROR "clBLAS required but not found.") + endif() + include_directories(SYSTEM ${CLBLAS_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS ${CLBLAS_LIBRARY}) + set(HAVE_CLBLAS TRUE) +endif() + # ---[ OpenCV find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs) if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found diff --git a/cmake/Modules/FindOpenCL.cmake b/cmake/Modules/FindOpenCL.cmake new file mode 100644 index 00000000000..27054a327ce --- /dev/null +++ b/cmake/Modules/FindOpenCL.cmake @@ -0,0 +1,90 @@ +# This file taken from FindOpenCL project @ http://gitorious.com/findopencl +# +# - Try to find OpenCL +# This module tries to find an OpenCL implementation on your system. It supports +# AMD / ATI, Apple and NVIDIA implementations, but shoudl work, too. +# +# Once done this will define +# OPENCL_FOUND - system has OpenCL +# OPENCL_INCLUDE_DIRS - the OpenCL include directory +# OPENCL_LIBRARIES - link these to use OpenCL +# +# WIN32 should work, but is untested + +FIND_PACKAGE( PackageHandleStandardArgs ) + +SET (OPENCL_VERSION_STRING "0.1.0") +SET (OPENCL_VERSION_MAJOR 0) +SET (OPENCL_VERSION_MINOR 1) +SET (OPENCL_VERSION_PATCH 0) + +IF (APPLE) + + FIND_LIBRARY(OPENCL_LIBRARIES OpenCL DOC "OpenCL lib for OSX") + FIND_PATH(OPENCL_INCLUDE_DIRS OpenCL/cl.h DOC "Include for OpenCL on OSX") + FIND_PATH(_OPENCL_CPP_INCLUDE_DIRS OpenCL/cl.hpp DOC "Include for OpenCL CPP bindings on OSX") + +ELSE (APPLE) + + IF (WIN32) + + FIND_PATH(OPENCL_INCLUDE_DIRS CL/cl.h) + FIND_PATH(_OPENCL_CPP_INCLUDE_DIRS CL/cl.hpp) + + # The AMD SDK currently installs both x86 and x86_64 libraries + # This is only a hack to find out architecture + IF( ${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64" ) + SET(OPENCL_LIB_DIR "$ENV{ATISTREAMSDKROOT}/lib/x86_64") + SET(OPENCL_LIB_DIR "$ENV{ATIINTERNALSTREAMSDKROOT}/lib/x86_64") + ELSE (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64") + SET(OPENCL_LIB_DIR "$ENV{ATISTREAMSDKROOT}/lib/x86") + SET(OPENCL_LIB_DIR "$ENV{ATIINTERNALSTREAMSDKROOT}/lib/x86") + ENDIF( ${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64" ) + + # find out if the user asked for a 64-bit build, and use the corresponding + # 64 or 32 bit NVIDIA library paths to the search: + STRING(REGEX MATCH "Win64" ISWIN64 ${CMAKE_GENERATOR}) + IF("${ISWIN64}" STREQUAL "Win64") + FIND_LIBRARY(OPENCL_LIBRARIES OpenCL.lib ${OPENCL_LIB_DIR} $ENV{CUDA_LIB_PATH} $ENV{CUDA_PATH}/lib/x64) + ELSE("${ISWIN64}" STREQUAL "Win64") + FIND_LIBRARY(OPENCL_LIBRARIES OpenCL.lib ${OPENCL_LIB_DIR} $ENV{CUDA_LIB_PATH} $ENV{CUDA_PATH}/lib/Win32) + ENDIF("${ISWIN64}" STREQUAL "Win64") + + GET_FILENAME_COMPONENT(_OPENCL_INC_CAND ${OPENCL_LIB_DIR}/../../include ABSOLUTE) + + # On Win32 search relative to the library + FIND_PATH(OPENCL_INCLUDE_DIRS CL/cl.h PATHS "${_OPENCL_INC_CAND}" $ENV{CUDA_INC_PATH} $ENV{CUDA_PATH}/include) + FIND_PATH(_OPENCL_CPP_INCLUDE_DIRS CL/cl.hpp PATHS "${_OPENCL_INC_CAND}" $ENV{CUDA_INC_PATH} $ENV{CUDA_PATH}/include) + + ELSE (WIN32) + + # Unix style platforms + FIND_LIBRARY(OPENCL_LIBRARIES OpenCL + ENV LD_LIBRARY_PATH + ) + + GET_FILENAME_COMPONENT(OPENCL_LIB_DIR ${OPENCL_LIBRARIES} PATH) + GET_FILENAME_COMPONENT(_OPENCL_INC_CAND ${OPENCL_LIB_DIR}/../../include ABSOLUTE) + + # The AMD SDK currently does not place its headers + # in /usr/include, therefore also search relative + # to the library + FIND_PATH(OPENCL_INCLUDE_DIRS CL/cl.h PATHS ${_OPENCL_INC_CAND} "/usr/local/cuda/include") + FIND_PATH(_OPENCL_CPP_INCLUDE_DIRS CL/cl.hpp PATHS ${_OPENCL_INC_CAND} "/usr/local/cuda/include") + + ENDIF (WIN32) + +ENDIF (APPLE) + +FIND_PACKAGE_HANDLE_STANDARD_ARGS( OpenCL DEFAULT_MSG OPENCL_LIBRARIES OPENCL_INCLUDE_DIRS ) + +IF( _OPENCL_CPP_INCLUDE_DIRS ) + SET( OPENCL_HAS_CPP_BINDINGS TRUE ) + LIST( APPEND OPENCL_INCLUDE_DIRS ${_OPENCL_CPP_INCLUDE_DIRS} ) + # This is often the same, so clean up + LIST( REMOVE_DUPLICATES OPENCL_INCLUDE_DIRS ) +ENDIF( _OPENCL_CPP_INCLUDE_DIRS ) + +MARK_AS_ADVANCED( + OPENCL_INCLUDE_DIRS +) diff --git a/cmake/Modules/FindViennaCL.cmake b/cmake/Modules/FindViennaCL.cmake new file mode 100644 index 00000000000..82e3e2dc6ac --- /dev/null +++ b/cmake/Modules/FindViennaCL.cmake @@ -0,0 +1,42 @@ +SET(ViennaCL_WITH_OPENCL TRUE) + +SET(VIENNACL_INCLUDE_SEARCH_PATHS + /usr/include + /usr/local/include + /opt/ViennaCL/include + $ENV{VIENNACL_HOME} + $ENV{VIENNACL_HOME}/include +) + +FIND_PATH(ViennaCL_INCLUDE_DIR NAMES viennacl/forwards.h PATHS ${VIENNACL_INCLUDE_SEARCH_PATHS}) + +SET(ViennaCL_FOUND ON) + +# Check include files +IF(NOT ViennaCL_INCLUDE_DIR) + SET(ViennaCL_FOUND OFF) + MESSAGE(STATUS "Could not find ViennaCL include. Turning ViennaCL_FOUND off") +ENDIF() + +IF (ViennaCL_FOUND) + IF (NOT ViennaCL_FIND_QUIETLY) + MESSAGE(STATUS "Found ViennaCL include: ${ViennaCL_INCLUDE_DIR}") + ENDIF (NOT ViennaCL_FIND_QUIETLY) +ELSE (ViennaCL_FOUND) + IF (ViennaCL_FIND_REQUIRED) + MESSAGE(FATAL_ERROR "Could not find ViennaCL") + ENDIF (ViennaCL_FIND_REQUIRED) +ENDIF (ViennaCL_FOUND) + +IF(ViennaCL_WITH_OPENCL) + find_package(OpenCL REQUIRED) +ENDIF(ViennaCL_WITH_OPENCL) + +set(ViennaCL_INCLUDE_DIRS ${ViennaCL_INCLUDE_DIR} ${OPENCL_INCLUDE_DIRS}) +set(ViennaCL_LIBRARIES ${OPENCL_LIBRARIES}) + +MARK_AS_ADVANCED( + ViennaCL_INCLUDE_DIR + ViennaCL_INCLUDE_DIRS + ViennaCL_LIBRARIES +) diff --git a/cmake/Modules/FindclBLAS.cmake b/cmake/Modules/FindclBLAS.cmake new file mode 100644 index 00000000000..b9766fb6854 --- /dev/null +++ b/cmake/Modules/FindclBLAS.cmake @@ -0,0 +1,53 @@ +SET(CLBLAS_INCLUDE_SEARCH_PATHS + /usr/include + /usr/local/include + /opt/clBLAS/include + $ENV{CLBLAS_HOME} + $ENV{CLBLAS_HOME}/include +) + +SET(CLBLAS_LIB_SEARCH_PATHS + /lib/ + /lib64/ + /usr/lib + /usr/lib64 + /usr/local/lib + /usr/local/lib64 + /opt/clBLAS/lib + $ENV{CLBLAS_HOME} + $ENV{CLBLAS_HOME}/lib + ) + +FIND_PATH(CLBLAS_INCLUDE_DIR NAMES clBLAS.h PATHS ${CLBLAS_INCLUDE_SEARCH_PATHS}) +FIND_LIBRARY(CLBLAS_LIBRARY NAMES clBLAS PATHS ${CLBLAS_LIB_SEARCH_PATHS}) + +SET(CLBLAS_FOUND ON) + +# Check include files +IF(NOT CLBLAS_INCLUDE_DIR) + SET(CLBLAS_FOUND OFF) + MESSAGE(STATUS "Could not find CLBLAS include. Turning CLBLAS_FOUND off") +ENDIF() + +# Check libraries +IF(NOT CLBLAS_LIBRARY) + SET(CLBLAS_FOUND OFF) + MESSAGE(STATUS "Could not find CLBLAS lib. Turning CLBLAS_FOUND off") +ENDIF() + +IF (CLBLAS_FOUND) + IF (NOT CLBLAS_FIND_QUIETLY) + MESSAGE(STATUS "Found CLBLAS libraries: ${CLBLAS_LIBRARY}") + MESSAGE(STATUS "Found CLBLAS include: ${CLBLAS_INCLUDE_DIR}") + ENDIF (NOT CLBLAS_FIND_QUIETLY) +ELSE (CLBLAS_FOUND) + IF (CLBLAS_FIND_REQUIRED) + MESSAGE(FATAL_ERROR "Could not find CLBLAS") + ENDIF (CLBLAS_FIND_REQUIRED) +ENDIF (CLBLAS_FOUND) + +MARK_AS_ADVANCED( + CLBLAS_INCLUDE_DIR + CLBLAS_LIBRARY +) + diff --git a/cmake/Templates/caffe_config.h.in b/cmake/Templates/caffe_config.h.in index 6039e8f6b21..609f69bcd22 100644 --- a/cmake/Templates/caffe_config.h.in +++ b/cmake/Templates/caffe_config.h.in @@ -6,12 +6,21 @@ /* NVIDA Cuda */ #cmakedefine HAVE_CUDA +#cmakedefine USE_CUDA + +/* OpenCl kernels */ +#cmakedefine USE_GREENTEA +#cmakedefine VIENNACL_WITH_OPENCL + +/* clBLAS */ +#cmakedefine HAVE_CLBLAS +#cmakedefine USE_CLBLAS /* NVIDA cuDNN */ #cmakedefine HAVE_CUDNN #cmakedefine USE_CUDNN -/* NVIDA cuDNN */ +/* Disable CUDA and OpenCL */ #cmakedefine CPU_ONLY /* Test device */ diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index eb9dba1ec4f..b16b5e4f136 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -1,6 +1,10 @@ #ifndef CAFFE_COMMON_HPP_ #define CAFFE_COMMON_HPP_ +#ifdef CMAKE_BUILD + #include "caffe_config.h" +#endif + #include #include #include diff --git a/include/caffe/greentea/cl_kernels.hpp b/include/caffe/greentea/cl_kernels.hpp index 31d42c40d0d..0b7cf3c4891 100644 --- a/include/caffe/greentea/cl_kernels.hpp +++ b/include/caffe/greentea/cl_kernels.hpp @@ -1,4 +1,5 @@ // AUTOMATICALLY GENERATED FILE, DO NOT EDIT +#include "caffe/common.hpp" #ifdef USE_GREENTEA #ifndef GREENTEA_CL_KERNELS_HPP_ #define GREENTEA_CL_KERNELS_HPP_ diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index c1cca3a9f1b..4afb8667f17 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -177,13 +177,7 @@ template void caffe_gpu_set(const int N, const Dtype alpha, Dtype *X); inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) { -#ifndef CPU_ONLY -#ifdef USE_CUDA CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn) -#endif // USE_CUDA -#else - NO_GPU; -#endif } template diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 1d1f46d03a9..e7950f01f8a 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -1,4 +1,5 @@ // AUTOMATICALLY GENERATED FILE, DO NOT EDIT +#include "caffe/common.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/cl_kernels.hpp" #include diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index 68add4ec26c..f7c1478bf39 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -11,7 +11,9 @@ SOURCE='src/caffe/greentea/cl_kernels.cpp' echo "// AUTOMATICALLY GENERATED FILE, DO NOT EDIT" > $HEADER echo "// AUTOMATICALLY GENERATED FILE, DO NOT EDIT" > $SOURCE +echo "#include \"caffe/common.hpp\"" >> $HEADER echo "#ifdef USE_GREENTEA" >> $HEADER +echo "#include \"caffe/common.hpp\"" >> $SOURCE echo "#ifdef USE_GREENTEA" >> $SOURCE echo "#ifndef GREENTEA_CL_KERNELS_HPP_" >> $HEADER diff --git a/src/caffe/greentea/greentea.cpp b/src/caffe/greentea/greentea.cpp index da516dd8324..713d13c8b40 100644 --- a/src/caffe/greentea/greentea.cpp +++ b/src/caffe/greentea/greentea.cpp @@ -5,6 +5,7 @@ * Author: Fabian Tschopp */ +#include "caffe/common.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/util/device_alternate.hpp" diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index e49766e185f..6baeb4af8eb 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -4,6 +4,7 @@ * Created on: Apr 8, 2015 * Author: Fabian Tschopp */ +#include "caffe/common.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 3e2613b2b33..2b4797424e5 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -5,6 +5,8 @@ * Author: Fabian Tschopp */ +#include "caffe/common.hpp" + #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" @@ -21,8 +23,6 @@ #include #include -#include "caffe/common.hpp" - #include "viennacl/backend/opencl.hpp" #include "viennacl/ocl/backend.hpp" #include "viennacl/ocl/context.hpp" diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index eac7c3f3f80..e5796a68665 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -1,9 +1,3 @@ -#ifdef USE_CUDA -#include // CUDA's, not caffe's, for fabs, signbit -#include -#include // thrust::plus -#include - #include #include #include @@ -12,6 +6,13 @@ #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" +#ifdef USE_CUDA + +#include // CUDA's, not caffe's, for fabs, signbit +#include +#include // thrust::plus +#include + namespace caffe { template<> From eeeae599c56508ee8a6069d4c24c5c92df150a12 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 2 Jul 2015 14:30:31 -0400 Subject: [PATCH 100/254] OpenCL build support in CMAKE. --- CMakeLists.txt | 4 +- Makefile | 16 ++-- cmake/ConfigGen.cmake | 3 - cmake/Cuda.cmake | 5 +- cmake/Dependencies.cmake | 3 - cmake/Modules/FindOpenCL.cmake | 8 +- cmake/Modules/FindViennaCL.cmake | 1 + cmake/Targets.cmake | 2 + cmake/Templates/caffe_config.h.in | 5 ++ include/caffe/neuron_layers.hpp | 5 +- include/caffe/vision_layers.hpp | 1 + src/caffe/CMakeLists.txt | 2 +- src/caffe/layers/absval_layer.cpp | 4 + src/caffe/layers/base_conv_nd_layer.cpp | 83 ++++++++++++------- src/caffe/layers/base_data_layer.cpp | 4 + src/caffe/layers/bnll_layer.cpp | 6 +- src/caffe/layers/bnll_layer.cu | 2 - src/caffe/layers/concat_layer.cpp | 4 + src/caffe/layers/contrastive_loss_layer.cpp | 4 + src/caffe/layers/conv_layer.cpp | 4 + src/caffe/layers/conv_nd_layer.cpp | 4 + src/caffe/layers/conv_nd_layer.cu | 56 +++++++------ src/caffe/layers/conv_sk_layer.cpp | 4 + src/caffe/layers/deconv_layer.cpp | 4 + src/caffe/layers/deconv_nd_layer.cpp | 4 + src/caffe/layers/deconv_nd_layer.cu | 56 +++++++------ src/caffe/layers/dropout_layer.cpp | 4 + src/caffe/layers/eltwise_layer.cpp | 4 + src/caffe/layers/euclidean_loss_layer.cpp | 4 + src/caffe/layers/exp_layer.cpp | 4 + src/caffe/layers/filter_layer.cpp | 4 + src/caffe/layers/hdf5_data_layer.cpp | 4 + src/caffe/layers/hdf5_output_layer.cpp | 4 + src/caffe/layers/im2col_layer.cpp | 4 + src/caffe/layers/inner_product_layer.cpp | 4 + src/caffe/layers/log_layer.cpp | 4 + src/caffe/layers/lrn_layer.cpp | 4 + src/caffe/layers/malis_loss_layer.cpp | 15 +++- src/caffe/layers/mergecrop_layer.cpp | 4 + src/caffe/layers/mvn_layer.cpp | 4 + src/caffe/layers/pooling_layer.cpp | 4 + src/caffe/layers/pooling_sk_layer.cpp | 4 + src/caffe/layers/power_layer.cpp | 4 + src/caffe/layers/prelu_layer.cpp | 4 + src/caffe/layers/reduction_layer.cpp | 4 + src/caffe/layers/relu_layer.cpp | 4 + .../sigmoid_cross_entropy_loss_layer.cpp | 4 + src/caffe/layers/sigmoid_layer.cpp | 4 + src/caffe/layers/silence_layer.cpp | 4 + src/caffe/layers/slice_layer.cpp | 4 + src/caffe/layers/softmax_layer.cpp | 4 + src/caffe/layers/softmax_loss_layer.cpp | 4 + src/caffe/layers/split_layer.cpp | 4 + src/caffe/layers/tanh_layer.cpp | 4 + src/caffe/layers/threshold_layer.cpp | 3 + 55 files changed, 310 insertions(+), 110 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ff842282e2d..64a06f06a94 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,11 +32,11 @@ include(cmake/Dependencies.cmake) # ---[ Flags if(UNIX OR APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11 -DCMAKE_BUILD") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11 -fopenmp -DCMAKE_BUILD") endif() if(USE_libstdcpp) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++ -std=c++11") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++ -std=c++11 -fopenmp") message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)") endif() diff --git a/Makefile b/Makefile index 19fa62b93cc..707390f7a27 100644 --- a/Makefile +++ b/Makefile @@ -373,6 +373,17 @@ ifeq ($(CPU_ONLY), 1) COMMON_FLAGS += -DCPU_ONLY endif +# Grentea but not CUDA configuration +ifeq ($(USE_CUDA), 0) + ifeq ($(USE_GREENTEA), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*CUDNN*" + endif +endif + # Python layer support ifeq ($(WITH_PYTHON_LAYER), 1) COMMON_FLAGS += -DWITH_PYTHON_LAYER @@ -606,11 +617,6 @@ ifeq ($(USE_CUDA), 1) $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) -else - @ echo CXX $< - $(Q)$(CXX) $(CXXFLAGS) -c -x c++ $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) endif $(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ diff --git a/cmake/ConfigGen.cmake b/cmake/ConfigGen.cmake index a9101e34350..c8d28dc51aa 100644 --- a/cmake/ConfigGen.cmake +++ b/cmake/ConfigGen.cmake @@ -53,13 +53,10 @@ function(caffe_generate_export_configs) set(Caffe_DEFINITIONS "") if(NOT HAVE_CUDA) set(HAVE_CUDA FALSE) - list(APPEND Caffe_DEFINITIONS -DCPU_ONLY) endif() if(NOT HAVE_CUDNN) set(HAVE_CUDNN FALSE) - else() - list(APPEND DEFINITIONS -DUSE_CUDNN) endif() if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake index f5477884e50..07b047438e0 100644 --- a/cmake/Cuda.cmake +++ b/cmake/Cuda.cmake @@ -1,4 +1,4 @@ -if(CPU_ONLY) +if(CPU_ONLY OR NOT USE_CUDA) return() endif() @@ -149,7 +149,7 @@ macro(caffe_cuda_compile objlist_variable) endif() if(APPLE) - list(APPEND CUDA_NVCC_FLAGS -Xcompiler -Wno-unused-function) + list(APPEND CUDA_NVCC_FLAGS -std=c++11 -Xcompiler -Wno-unused-function) endif() cuda_compile(cuda_objcs ${ARGN}) @@ -210,7 +210,6 @@ list(APPEND Caffe_LINKER_LIBS ${CUDA_CUDART_LIBRARY} if(USE_CUDNN) detect_cuDNN() if(HAVE_CUDNN) - add_definitions(-DUSE_CUDNN) include_directories(SYSTEM ${CUDNN_INCLUDE}) list(APPEND Caffe_LINKER_LIBS ${CUDNN_LIBRARY}) else() diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index 5177b598a3b..2a9d9545bc6 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -51,9 +51,6 @@ if(NOT HAVE_CUDA) else() message("-- CUDA is not detected by cmake. Building without it...") endif() - - # TODO: remove this not cross platform define in future. Use caffe_config.h instead. - add_definitions(-DCPU_ONLY) endif() # ---[ ViennaCL diff --git a/cmake/Modules/FindOpenCL.cmake b/cmake/Modules/FindOpenCL.cmake index 27054a327ce..cc7a0a2a2dc 100644 --- a/cmake/Modules/FindOpenCL.cmake +++ b/cmake/Modules/FindOpenCL.cmake @@ -34,11 +34,11 @@ ELSE (APPLE) # The AMD SDK currently installs both x86 and x86_64 libraries # This is only a hack to find out architecture IF( ${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64" ) - SET(OPENCL_LIB_DIR "$ENV{ATISTREAMSDKROOT}/lib/x86_64") - SET(OPENCL_LIB_DIR "$ENV{ATIINTERNALSTREAMSDKROOT}/lib/x86_64") + SET(OPENCL_LIB_DIR "$ENV{AMDAPPSDKROOT}/lib/x86_64") + SET(OPENCL_LIB_DIR "$ENV{AMDAPPSDKROOT}/lib/x86_64") ELSE (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64") - SET(OPENCL_LIB_DIR "$ENV{ATISTREAMSDKROOT}/lib/x86") - SET(OPENCL_LIB_DIR "$ENV{ATIINTERNALSTREAMSDKROOT}/lib/x86") + SET(OPENCL_LIB_DIR "$ENV{AMDAPPSDKROOT}/lib/x86") + SET(OPENCL_LIB_DIR "$ENV{AMDAPPSDKROOT}/lib/x86") ENDIF( ${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64" ) # find out if the user asked for a 64-bit build, and use the corresponding diff --git a/cmake/Modules/FindViennaCL.cmake b/cmake/Modules/FindViennaCL.cmake index 82e3e2dc6ac..d9aa4b91b3f 100644 --- a/cmake/Modules/FindViennaCL.cmake +++ b/cmake/Modules/FindViennaCL.cmake @@ -1,6 +1,7 @@ SET(ViennaCL_WITH_OPENCL TRUE) SET(VIENNACL_INCLUDE_SEARCH_PATHS + .. /usr/include /usr/local/include /opt/ViennaCL/include diff --git a/cmake/Targets.cmake b/cmake/Targets.cmake index ed0ff9660fd..60b82acce07 100644 --- a/cmake/Targets.cmake +++ b/cmake/Targets.cmake @@ -54,12 +54,14 @@ function(caffe_pickup_caffe_sources root) caffe_source_group("Include" GLOB "${root}/include/caffe/*.h*") caffe_source_group("Include\\Util" GLOB "${root}/include/caffe/util/*.h*") caffe_source_group("Include" GLOB "${PROJECT_BINARY_DIR}/caffe_config.h*") + caffe_source_group("Include" GLOB "${root}/include/caffe/greentea/*.hpp") caffe_source_group("Source" GLOB "${root}/src/caffe/*.cpp") caffe_source_group("Source\\Util" GLOB "${root}/src/caffe/util/*.cpp") caffe_source_group("Source\\Layers" GLOB "${root}/src/caffe/layers/*.cpp") caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/layers/*.cu") caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/util/*.cu") caffe_source_group("Source\\Proto" GLOB "${root}/src/caffe/proto/*.proto") + caffe_source_group("Source" GLOB "${root}/src/caffe/greentea*.cpp") # source groups for test target caffe_source_group("Include" GLOB "${root}/include/caffe/test/test_*.h*") diff --git a/cmake/Templates/caffe_config.h.in b/cmake/Templates/caffe_config.h.in index 609f69bcd22..7407a19218c 100644 --- a/cmake/Templates/caffe_config.h.in +++ b/cmake/Templates/caffe_config.h.in @@ -1,3 +1,6 @@ +#ifndef CAFFE_CONFIG_HPP_ +#define CAFFE_CONFIG_HPP_ + /* Sources directory */ #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" @@ -39,3 +42,5 @@ /* Matlab */ #cmakedefine HAVE_MATLAB + +#endif // CAFFE_CONFIG_HPP_ \ No newline at end of file diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index c2e0774aaa2..f43a7c19767 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -14,7 +14,6 @@ #define HDF5_DATA_LABEL_NAME "label" namespace caffe { - /** * @brief An interface for layers that take one blob as input (@f$ x @f$) * and produce one equally-sized blob as output (@f$ y @f$), where @@ -85,6 +84,10 @@ class AbsValLayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); }; + +const float kBNLL_THRESHOLD = 50.; + + /** * @brief Computes @f$ y = x + \log(1 + \exp(-x)) @f$ if @f$ x > 0 @f$; * @f$ y = \log(1 + \exp(x)) @f$ otherwise. diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 333e5673d6d..22e410ae806 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -21,6 +21,7 @@ namespace caffe { + template class MergeCropLayer : public Layer { public: diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index 40e6c11f5b0..51c6f585759 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -12,7 +12,7 @@ caffe_default_properties(proto) # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) -if(HAVE_CUDA) +if(USE_CUDA AND HAVE_CUDA) caffe_cuda_compile(cuda_objs ${cuda}) list(APPEND srcs ${cuda_objs} ${cuda}) endif() diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp index 5ce28c9e2b4..57b77452519 100644 --- a/src/caffe/layers/absval_layer.cpp +++ b/src/caffe/layers/absval_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/neuron_layers.hpp" #include "caffe/util/math_functions.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "absval_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/base_conv_nd_layer.cpp b/src/caffe/layers/base_conv_nd_layer.cpp index 280fea27767..61197b25edf 100644 --- a/src/caffe/layers/base_conv_nd_layer.cpp +++ b/src/caffe/layers/base_conv_nd_layer.cpp @@ -244,26 +244,35 @@ template void BaseConvolutionNDLayer::forward_gpu_gemm(const Dtype* input, const Dtype* weights, Dtype* output, bool skip_im2col) { const Dtype* col_buff = input; - if (!is_1x1_) { - if (!skip_im2col) { - conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (!is_1x1_) { + if (!skip_im2col) { + conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + } + col_buff = col_buffer_.gpu_data(); } - col_buff = col_buffer_.gpu_data(); - } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, - (Dtype)0., output + output_offset_ * g); + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / + group_, conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, + (Dtype)0., output + output_offset_ * g); + } +#endif // USE_CUDA } } template void BaseConvolutionNDLayer::forward_gpu_bias(Dtype* output, const Dtype* bias) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), - (Dtype)1., output); + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, + out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), + (Dtype)1., output); +#endif // USE_CUDA + } } template @@ -273,14 +282,18 @@ void BaseConvolutionNDLayer::backward_gpu_gemm(const Dtype* output, if (is_1x1_) { col_buff = input; } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, - (Dtype)0., col_buff + col_offset_ * g); - } - if (!is_1x1_) { - conv_col2im_gpu(col_buff, input); + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, + conv_out_spatial_dim_, conv_out_channels_ / group_, + (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, + (Dtype)0., col_buff + col_offset_ * g); + } + if (!is_1x1_) { + conv_col2im_gpu(col_buff, input); + } +#endif // USE_CUDA } } @@ -288,23 +301,31 @@ template void BaseConvolutionNDLayer::weight_gpu_gemm(const Dtype* input, const Dtype* output, Dtype* weights) { const Dtype* col_buff = input; - if (!is_1x1_) { - conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); - col_buff = col_buffer_.gpu_data(); - } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, - (Dtype)1., weights + weight_offset_ * g); + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (!is_1x1_) { + conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + col_buff = col_buffer_.gpu_data(); + } + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, + (Dtype)1., weights + weight_offset_ * g); + } +#endif // USE_CUDA } } template void BaseConvolutionNDLayer::backward_gpu_bias(Dtype* bias, const Dtype* input) { - caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., input, bias_multiplier_.gpu_data(), 1., bias); +#endif // USE_CUDA + } } #endif // !CPU_ONLY diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 7258b9fd60b..cf1fc76deea 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/data_layers.hpp" #include "caffe/util/io.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "base_data_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp index 9ba0ea9a715..e04102728c9 100644 --- a/src/caffe/layers/bnll_layer.cpp +++ b/src/caffe/layers/bnll_layer.cpp @@ -4,9 +4,11 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -namespace caffe { +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "bnll_layer.cu" +#endif -const float kBNLL_THRESHOLD = 50.; +namespace caffe { template void BNLLLayer::Forward_cpu(const vector*>& bottom, diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index 69558505ddd..f7ce47d9f69 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -11,8 +11,6 @@ namespace caffe { -const float kBNLL_THRESHOLD = 50.; - #ifdef USE_CUDA template __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 409b35cf5f9..4696e00886b 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "concat_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 25e167819d3..726051a8a75 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "contrastive_loss_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 928ef5ee468..f791c48eece 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "conv_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/conv_nd_layer.cpp b/src/caffe/layers/conv_nd_layer.cpp index 6c3bf8a40ee..4469a41eb24 100644 --- a/src/caffe/layers/conv_nd_layer.cpp +++ b/src/caffe/layers/conv_nd_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "conv_nd_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/conv_nd_layer.cu b/src/caffe/layers/conv_nd_layer.cu index 851331b5da1..c1afc535f83 100644 --- a/src/caffe/layers/conv_nd_layer.cu +++ b/src/caffe/layers/conv_nd_layer.cu @@ -31,38 +31,42 @@ void ConvolutionNDLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->gpu_diff(); - // Bias gradient, if necessary. + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } if (this->bias_term_ && this->param_propagate_down_[1]) { - Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); - } + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); } - if (this->param_propagate_down_[0] || propagate_down[i]) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, - top_diff + n * this->top_dim_, weight_diff); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } - // gradient w.r.t. bottom data, if necessary. - if (propagate_down[i]) { - this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_); + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, + top_diff + n * this->top_dim_, weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight, + bottom_diff + n * this->bottom_dim_); + } } } } +#endif // USE_CUDA } } diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index fbda73e1c37..c6bf7e23bc9 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "conv_sk_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index a4612963b6b..101f440312e 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "deconv_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/deconv_nd_layer.cpp b/src/caffe/layers/deconv_nd_layer.cpp index d5684a6163b..8cf2fa40924 100644 --- a/src/caffe/layers/deconv_nd_layer.cpp +++ b/src/caffe/layers/deconv_nd_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "deconv_nd_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/deconv_nd_layer.cu b/src/caffe/layers/deconv_nd_layer.cu index 9908ebd5296..c47101c85f0 100644 --- a/src/caffe/layers/deconv_nd_layer.cu +++ b/src/caffe/layers/deconv_nd_layer.cu @@ -32,38 +32,42 @@ void DeconvolutionNDLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->gpu_diff(); - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - // Bias gradient, if necessary. + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } if (this->bias_term_ && this->param_propagate_down_[1]) { - Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); - } + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); } - if (this->param_propagate_down_[0] || propagate_down[i]) { - for (int n = 0; n < this->num_; ++n) { - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(top_diff + n * this->top_dim_, - bottom_data + n * this->bottom_dim_, weight_diff); + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } - // gradient w.r.t. bottom data, if necessary. - if (propagate_down[i]) { - this->forward_gpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_); + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(top_diff + n * this->top_dim_, + bottom_data + n * this->bottom_dim_, weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->forward_gpu_gemm(top_diff + n * this->top_dim_, weight, + bottom_diff + n * this->bottom_dim_); + } } } } +#endif // USE CUDA } } diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 1c3f2c216d6..d5922e87352 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -8,6 +8,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "dropout_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index e2e0be79587..9bdee1b29e4 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "eltwise_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 80efa31b22c..7075d0bb5c4 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "euclidean_loss_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/exp_layer.cpp b/src/caffe/layers/exp_layer.cpp index c7e7c60cfad..73ffc7794c7 100644 --- a/src/caffe/layers/exp_layer.cpp +++ b/src/caffe/layers/exp_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "exp_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index 7a2d91fbe19..6cb4a6ef763 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "filter_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index fadd2179e49..0d984924176 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -18,6 +18,10 @@ #include "caffe/layer.hpp" #include "caffe/util/io.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "hdf5_data_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index cb5f0e0c7ee..f6b09b6ed62 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -9,6 +9,10 @@ #include "caffe/util/io.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "hdf5_output_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index ed8992c0b48..362933dac87 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/im2col.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "im2col_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index f9bf6c32997..7c9d1468587 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -7,6 +7,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "inner_product_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/log_layer.cpp b/src/caffe/layers/log_layer.cpp index 3ca25d0946f..b3c2b3f865b 100644 --- a/src/caffe/layers/log_layer.cpp +++ b/src/caffe/layers/log_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/neuron_layers.hpp" #include "caffe/util/math_functions.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "log_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 08821ef3d79..6c3280b13f3 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "lrn_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 4f49d4f405a..fcf32287827 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -439,6 +439,19 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, } } + auto minmax = std::minmax_element(conn_data_neg.begin(),conn_data_neg.end()); + + std::cout << "Conndata neg min/max: " << + conn_data_neg[minmax.first - conn_data_neg.begin()] << " " << + conn_data_neg[minmax.second - conn_data_neg.begin()] << std::endl; + + minmax = std::minmax_element(dloss_pos.begin(),dloss_pos.end()); + + std::cout << "Conndata pos min/max: " << + conn_data_pos[minmax.first - conn_data_pos.begin()] << " " << + conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl; + + std::cout << "Before MALIS 1" << std::endl; Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], @@ -454,7 +467,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, &loss_out, &classerr_out, &rand_index_out); - auto minmax = std::minmax_element(dloss_neg.begin(),dloss_neg.end()); + minmax = std::minmax_element(dloss_neg.begin(),dloss_neg.end()); std::cout << "DLoss_neg min/max: " << dloss_neg[minmax.first - dloss_neg.begin()] << " " << diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index 9af459dd5bd..c25c6dd4e03 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "mergecrop_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index ab645ce0bb3..faa05f1c88c 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "mvn_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index c8d41499455..f48ef961c94 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -8,6 +8,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "pooling_layer.cu" +#endif + namespace caffe { using std::min; diff --git a/src/caffe/layers/pooling_sk_layer.cpp b/src/caffe/layers/pooling_sk_layer.cpp index 8527eec4eec..6d17be8083e 100644 --- a/src/caffe/layers/pooling_sk_layer.cpp +++ b/src/caffe/layers/pooling_sk_layer.cpp @@ -8,6 +8,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "pooling_sk_layer.cu" +#endif + namespace caffe { using std::min; diff --git a/src/caffe/layers/power_layer.cpp b/src/caffe/layers/power_layer.cpp index 347d9a12aeb..5ddf8bc4927 100644 --- a/src/caffe/layers/power_layer.cpp +++ b/src/caffe/layers/power_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "power_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 5ec4d9bd61b..2b09cae70dc 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "prelu_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/reduction_layer.cpp b/src/caffe/layers/reduction_layer.cpp index 8ae6329ebe4..36ddaf008e4 100644 --- a/src/caffe/layers/reduction_layer.cpp +++ b/src/caffe/layers/reduction_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "reduction_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/relu_layer.cpp b/src/caffe/layers/relu_layer.cpp index cc00319a578..b94aaeef200 100644 --- a/src/caffe/layers/relu_layer.cpp +++ b/src/caffe/layers/relu_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "relu_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index cc236fe1e8e..727d1cd9d5e 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -6,6 +6,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "sigmoid_cross_entropy_loss_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/sigmoid_layer.cpp b/src/caffe/layers/sigmoid_layer.cpp index 48c384905bf..98af4d439fd 100644 --- a/src/caffe/layers/sigmoid_layer.cpp +++ b/src/caffe/layers/sigmoid_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "sigmoid_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/silence_layer.cpp b/src/caffe/layers/silence_layer.cpp index 4abf9eff4a2..fa02212ea20 100644 --- a/src/caffe/layers/silence_layer.cpp +++ b/src/caffe/layers/silence_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "silence_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 418361f8cf8..7bc028a58f3 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "slice_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index fbd378102f6..57344822700 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -5,6 +5,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "softmax_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 3c303cd0cae..9bfb65d8a1f 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -7,6 +7,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "softmax_loss_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 59a821976c8..d2117a220e8 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -4,6 +4,10 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "split_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/tanh_layer.cpp b/src/caffe/layers/tanh_layer.cpp index ee5ed773c74..ebcb5f6c249 100644 --- a/src/caffe/layers/tanh_layer.cpp +++ b/src/caffe/layers/tanh_layer.cpp @@ -7,6 +7,10 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "tanh_layer.cu" +#endif + namespace caffe { template diff --git a/src/caffe/layers/threshold_layer.cpp b/src/caffe/layers/threshold_layer.cpp index 2365e7b9c72..a6f976899a3 100644 --- a/src/caffe/layers/threshold_layer.cpp +++ b/src/caffe/layers/threshold_layer.cpp @@ -3,6 +3,9 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "threshold_layer.cu" +#endif namespace caffe { From 7ed15762ea8a8ee22a3b68e5218094f81e5fabf0 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 2 Jul 2015 17:06:19 -0400 Subject: [PATCH 101/254] Python ND kernel fix, travis update for OpenCL, test parameter relaxation for OpenCL-Intel. --- python/caffe/net_spec.py | 6 ++++-- scripts/travis/travis_install.sh | 3 +++ src/caffe/test/test_gradient_based_solver.cpp | 2 +- src/caffe/test/test_mvn_layer.cpp | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py index f54328d56f1..7a1d18d02aa 100644 --- a/python/caffe/net_spec.py +++ b/python/caffe/net_spec.py @@ -71,8 +71,10 @@ def assign_proto(proto, name, val): for k, v in val.iteritems(): assign_proto(getattr(proto, name), k, v) else: - setattr(proto, name, val) - + try: + setattr(proto, name, val) + except (AttributeError): + getattr(proto, name).append(val) class Top(object): """A Top specifies a single output blob (which could be one of several diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index e8adc101eca..c23510a2838 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -13,6 +13,8 @@ add-apt-repository -y ppa:tuleu/precise-backports add-apt-repository -y ppa:boost-latest/ppa # This ppa is for g++ 4.9 add-apt-repository -y ppa:ubuntu-toolchain-r/test +# This ppa is for ViennaCL +add-apt-repository -y ppa:tsmithe/pyviennacl apt-get -y update apt-get install \ @@ -22,6 +24,7 @@ apt-get install \ libboost1.54-dev libboost-system1.54-dev libboost-python1.54-dev libboost-thread1.54-dev \ libprotobuf-dev protobuf-compiler \ libatlas-dev libatlas-base-dev \ + libviennacl-dev ocl-icd-libopencl1 \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ bc diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index c9135d64e70..5ecb41ab93b 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -241,7 +241,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { const Blob& solver_updated_weights = *param_blobs[0]; ASSERT_EQ(D, solver_updated_weights.count()); const double kPrecision = 1e-2; - const double kMinPrecision = 1e-7; + const double kMinPrecision = 1e-5; for (int i = 0; i < D; ++i) { const Dtype expected_updated_weight = updated_weights.cpu_data()[i]; const Dtype solver_updated_weight = solver_updated_weights.cpu_data()[i]; diff --git a/src/caffe/test/test_mvn_layer.cpp b/src/caffe/test/test_mvn_layer.cpp index 933b4326417..c89cc9265ec 100644 --- a/src/caffe/test/test_mvn_layer.cpp +++ b/src/caffe/test/test_mvn_layer.cpp @@ -141,7 +141,7 @@ TYPED_TEST(MVNLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; MVNLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); + GradientChecker checker(1e-2, 1e-2); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } From d72345817cf5156257eda946b2562a0642642b5f Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 2 Jul 2015 18:19:33 -0400 Subject: [PATCH 102/254] OpenCL in travis and LINT fix. --- scripts/travis/travis_install.sh | 3 ++- src/caffe/layers/absval_layer.cpp | 7 ++++--- src/caffe/layers/base_conv_nd_layer.cpp | 3 ++- src/caffe/layers/base_data_layer.cpp | 6 +++--- src/caffe/layers/bnll_layer.cpp | 6 +++--- src/caffe/layers/malis_loss_layer.cpp | 9 +++++---- 6 files changed, 19 insertions(+), 15 deletions(-) diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh index c23510a2838..2b839f60c7c 100755 --- a/scripts/travis/travis_install.sh +++ b/scripts/travis/travis_install.sh @@ -24,7 +24,8 @@ apt-get install \ libboost1.54-dev libboost-system1.54-dev libboost-python1.54-dev libboost-thread1.54-dev \ libprotobuf-dev protobuf-compiler \ libatlas-dev libatlas-base-dev \ - libviennacl-dev ocl-icd-libopencl1 \ + fglrx opencl-headers \ + libviennacl-dev \ libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ bc diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp index 57b77452519..640bbe9ebd0 100644 --- a/src/caffe/layers/absval_layer.cpp +++ b/src/caffe/layers/absval_layer.cpp @@ -1,12 +1,13 @@ +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "absval_layer.cu" +#endif + #include #include "caffe/layer.hpp" #include "caffe/neuron_layers.hpp" #include "caffe/util/math_functions.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "absval_layer.cu" -#endif namespace caffe { diff --git a/src/caffe/layers/base_conv_nd_layer.cpp b/src/caffe/layers/base_conv_nd_layer.cpp index 61197b25edf..0d3dd22dc90 100644 --- a/src/caffe/layers/base_conv_nd_layer.cpp +++ b/src/caffe/layers/base_conv_nd_layer.cpp @@ -308,7 +308,8 @@ void BaseConvolutionNDLayer::weight_gpu_gemm(const Dtype* input, col_buff = col_buffer_.gpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, + caffe_gpu_gemm(CblasNoTrans, CblasTrans, + conv_out_channels_ / group_, kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, (Dtype)1., weights + weight_offset_ * g); diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index cf1fc76deea..fb9ac6433d7 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -1,13 +1,13 @@ #include #include -#include "caffe/data_layers.hpp" -#include "caffe/util/io.hpp" - #if defined(USE_GREENTEA) && !defined(USE_CUDA) #include "base_data_layer.cu" #endif +#include "caffe/data_layers.hpp" +#include "caffe/util/io.hpp" + namespace caffe { template diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp index e04102728c9..732a5accfc3 100644 --- a/src/caffe/layers/bnll_layer.cpp +++ b/src/caffe/layers/bnll_layer.cpp @@ -1,13 +1,13 @@ #include #include -#include "caffe/layer.hpp" -#include "caffe/vision_layers.hpp" - #if defined(USE_GREENTEA) && !defined(USE_CUDA) #include "bnll_layer.cu" #endif +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + namespace caffe { template diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index fcf32287827..0f085106730 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -439,13 +439,14 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, } } - auto minmax = std::minmax_element(conn_data_neg.begin(),conn_data_neg.end()); + auto minmax = std::minmax_element(conn_data_neg.begin(), + conn_data_neg.end()); std::cout << "Conndata neg min/max: " << conn_data_neg[minmax.first - conn_data_neg.begin()] << " " << conn_data_neg[minmax.second - conn_data_neg.begin()] << std::endl; - minmax = std::minmax_element(dloss_pos.begin(),dloss_pos.end()); + minmax = std::minmax_element(dloss_pos.begin(), dloss_pos.end()); std::cout << "Conndata pos min/max: " << conn_data_pos[minmax.first - conn_data_pos.begin()] << " " << @@ -467,13 +468,13 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, &loss_out, &classerr_out, &rand_index_out); - minmax = std::minmax_element(dloss_neg.begin(),dloss_neg.end()); + minmax = std::minmax_element(dloss_neg.begin(), dloss_neg.end()); std::cout << "DLoss_neg min/max: " << dloss_neg[minmax.first - dloss_neg.begin()] << " " << dloss_neg[minmax.second - dloss_neg.begin()] << std::endl; - minmax = std::minmax_element(dloss_pos.begin(),dloss_pos.end()); + minmax = std::minmax_element(dloss_pos.begin(), dloss_pos.end()); std::cout << "DLoss_pos min/max: " << dloss_pos[minmax.first - dloss_pos.begin()] << " " << From 0c01e55a03aeec4e640d66a4ac20245ff6c7244d Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 09:37:26 -0400 Subject: [PATCH 103/254] Merge with upstream, OpenCL kernel update, ViennaCL include fix. --- Makefile | 6 +- Makefile.config.example | 4 +- include/caffe/loss_layers.hpp | 3 +- src/caffe/device_context.cpp | 5 +- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels.sh | 4 +- .../greentea/greentea_math_functions.cpp | 2 - src/caffe/layers/malis_loss_layer.cpp | 243 ++++++++++-------- src/caffe/util/math_functions.cpp | 2 +- 9 files changed, 148 insertions(+), 125 deletions(-) diff --git a/Makefile b/Makefile index 707390f7a27..7e3569734e2 100644 --- a/Makefile +++ b/Makefile @@ -209,14 +209,10 @@ ifeq ($(USE_GREENTEA),1) COMMON_FLAGS += -DUSE_GREENTEA -DVIENNACL_WITH_OPENCL # Viennacl runtime debug output - ifeq ($(DEBUG), 1) + ifeq ($(VIENNACL_DEBUG), 1) COMMON_FLAGS += -DVIENNACL_DEBUG_ALL endif - ifeq ($(GREENTEA_DOUBLE_SUPPORT), 1) - COMMON_FLAGS += -DGREENTEA_DOUBLE_SUPPORT - endif - CL_KERNELS_CPP = src/caffe/greentea/cl_kernels.cpp CL_KERNELS = src/caffe/greentea/cl_kernels/*.cl CL_HEADERS = src/caffe/greentea/cl_headers/*.cl diff --git a/Makefile.config.example b/Makefile.config.example index e7cb96897df..46d41d478ab 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -16,9 +16,6 @@ VIENNACL_DIR = ../ViennaCL # CLBLAS should be faster, especially on AMD cards. USE_CLBLAS := 0 -# Enable or disable double precision support for OpenCL/Greentea -GREENTEA_DOUBLE_SUPPORT := 1 - # cuDNN acceleration switch (uncomment to build with cuDNN). # USE_CUDNN := 1 @@ -103,6 +100,7 @@ DISTRIBUTE_DIR := distribute # Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171 # DEBUG := 1 +# VIENNACL_DEBUG := 0 # The ID of the GPU that 'make runtest' will use to run unit tests. TEST_GPUID := 0 diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 90e12aa24d8..3ba8858a47a 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -803,8 +803,7 @@ class MalisLossLayer : public LossLayer { std::vector nhood_dims_; private: - cv::Mat FindBlobs(const cv::Mat &input, - std::vector > *blobs); + cv::Mat FindBlobs(const cv::Mat &input); void Malis(Dtype* conn_data, int conn_num_dims, int* conn_dims, int* nhood_data, int* nhood_dims, int* seg_data, diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index e9de6226a14..1118fca065b 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -26,7 +26,7 @@ DeviceContext::DeviceContext(int id, Backend backend) } void DeviceContext::Init() { -if(backend_ == BACKEND_CUDA) { + if (backend_ == BACKEND_CUDA) { #ifdef USE_CUDA workgroup_sizes_[0] = CAFFE_CUDA_NUM_THREADS; #endif // USE_CUDA @@ -34,7 +34,8 @@ if(backend_ == BACKEND_CUDA) { #ifdef USE_GREENTEA std::vector temp(3); clGetDeviceInfo(viennacl::ocl::get_context(id_).devices()[0].id(), - CL_DEVICE_MAX_WORK_ITEM_SIZES, sizeof(size_t), &temp[0], NULL); + CL_DEVICE_MAX_WORK_ITEM_SIZES, + sizeof(size_t), &temp[0], NULL); workgroup_sizes_[0] = temp[0]; workgroup_sizes_[1] = temp[1]; workgroup_sizes_[2] = temp[2]; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index e7950f01f8a..2b44727f9dd 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -65,7 +65,6 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << pooling_sk_float << "\n\n"; // NOLINT ss << slice_float << "\n\n"; // NOLINT ss << softmax_loss_float << "\n\n"; // NOLINT -#ifdef GREENTEA_DOUBLE_SUPPORT ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; // NOLINT ss << "#undef Dtype" << "\n\n"; // NOLINT ss << "#define Dtype double" << "\n\n"; // NOLINT @@ -88,10 +87,9 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << slice_double << "\n\n"; // NOLINT ss << softmax_loss_double << "\n\n"; // NOLINT ss << "#endif" << "\n\n"; -#endif // GREENTEA_DOUBLE_SUPPORT std::string kernel_string = ss.str(); const char* kernel_program = kernel_string.c_str(); - ctx->build_options("-cl-fast-relaxed-math -cl-mad-enable"); + // ctx->build_options("-cl-fast-relaxed-math -cl-mad-enable"); viennacl::ocl::program &program = ctx->add_program(kernel_program, "kernel_program"); return program; diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index f7c1478bf39..2a5146c8f57 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -95,7 +95,6 @@ do done shopt -s nullglob -echo "#ifdef GREENTEA_DOUBLE_SUPPORT" >> $SOURCE echo " ss << \"#ifdef DOUBLE_SUPPORT_AVAILABLE\" << \"\\n\\n\"; // NOLINT" >> $SOURCE echo " ss << \"#undef Dtype\" << \"\\n\\n\"; // NOLINT" >> $SOURCE echo " ss << \"#define Dtype double\" << \"\\n\\n\"; // NOLINT" >> $SOURCE @@ -107,11 +106,10 @@ do echo " ss << ${CL_KERNEL_NAME}_double << \"\\n\\n\"; // NOLINT" >> $SOURCE done echo " ss << \"#endif\" << \"\\n\\n\";" >> $SOURCE -echo "#endif // GREENTEA_DOUBLE_SUPPORT" >> $SOURCE echo " std::string kernel_string = ss.str();" >> $SOURCE echo " const char* kernel_program = kernel_string.c_str();" >> $SOURCE -echo " ctx->build_options(\"-cl-fast-relaxed-math -cl-mad-enable\");" >> $SOURCE +echo " // ctx->build_options(\"-cl-fast-relaxed-math -cl-mad-enable\");" >> $SOURCE echo " viennacl::ocl::program &program = ctx->add_program(kernel_program," >> $SOURCE echo " \"kernel_program\");" >> $SOURCE echo " return program;" >> $SOURCE diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 2b4797424e5..12976f3367f 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -34,8 +34,6 @@ #ifdef USE_CLBLAS #include #else -#include "viennacl/detail/matrix_def.hpp" -#include "viennacl/detail/vector_def.hpp" #include "viennacl/linalg/inner_prod.hpp" #include "viennacl/linalg/norm_1.hpp" #include "viennacl/linalg/norm_2.hpp" diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 0f085106730..2cfe6d80832 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -51,44 +51,50 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } /* Cache for speed to access neighbors */ - int nVert = 1; - for (int i = 0; i < conn_num_dims - 1; ++i) - nVert = nVert * conn_dims[i]; + // nVert stores (x * y * z) + long nVert = 1; + for (long i = 0; i < conn_num_dims - 1; ++i) { + nVert = nVert * conn_dims[i]; + } - vector prodDims(conn_num_dims - 1); + // prodDims stores x, x*y, x*y*z offsets + vector prodDims(conn_num_dims - 1); prodDims[0] = 1; - for (int i = 1; i < conn_num_dims - 1; ++i) - prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; + for (long i = 1; i < conn_num_dims - 1; ++i) { + prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; + } /* convert n-d offset vectors into linear array offset scalars */ + // nHood is a vector of size #edges vector nHood(nhood_dims[0]); - for (int i = 0; i < nhood_dims[0]; ++i) { + for (long i = 0; i < nhood_dims[0]; ++i) { nHood[i] = 0; - for (int j = 0; j < nhood_dims[1]; ++j) { + for (long j = 0; j < nhood_dims[1]; ++j) { nHood[i] += (int32_t) nhood_data[i + j * nhood_dims[0]] * prodDims[j]; } } /* Disjoint sets and sparse overlap vectors */ - vector > overlap(nVert); - vector rank(nVert); - vector parent(nVert); - map segSizes; - int nLabeledVert = 0; - int nPairPos = 0; - boost::disjoint_sets dsets(&rank[0], &parent[0]); - for (int i = 0; i < nVert; ++i) { + vector > overlap(nVert); + vector rank(nVert); + vector parent(nVert); + map segSizes; + long nLabeledVert = 0; + long nPairPos = 0; + boost::disjoint_sets dsets(&rank[0], &parent[0]); + // Loop over all seg data items + for (long i = 0; i < nVert; ++i) { dsets.make_set(i); if (0 != seg_data[i]) { - overlap[i].insert(pair(seg_data[i], 1)); + overlap[i].insert(pair(seg_data[i], 1)); ++nLabeledVert; ++segSizes[seg_data[i]]; nPairPos += (segSizes[seg_data[i]] - 1); } } - int nPairTot = (nLabeledVert * (nLabeledVert - 1)) / 2; - int nPairNeg = nPairTot - nPairPos; - int nPairNorm; + long nPairTot = (nLabeledVert * (nLabeledVert - 1)) / 2; + long nPairNeg = nPairTot - nPairPos; + long nPairNorm; if (pos) { nPairNorm = nPairPos; } else { @@ -96,33 +102,42 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } /* Sort all the edges in increasing order of weight */ - std::vector pqueue( - static_cast(3) * (conn_dims[0] - 1) * (conn_dims[1] - 1) - * (conn_dims[2] - 1)); - int j = 0; - for (int d = 0, i = 0; d < conn_dims[3]; ++d) - for (int z = 0; z < conn_dims[2]; ++z) - for (int y = 0; y < conn_dims[1]; ++y) - for (int x = 0; x < conn_dims[0]; ++x, ++i) { - if (x > 0 && y > 0 && z > 0) - pqueue[j++] = i; + std::vector pqueue( + conn_dims[3] * std::max((conn_dims[0] - 1), 1) + * std::max((conn_dims[1] - 1), 1) + * std::max((conn_dims[2] - 1), 1)); + long j = 0; + // Loop over #edges + for (long d = 0, i = 0; d < conn_dims[3]; ++d) { + // Loop over Z + for (long z = 0; z < conn_dims[2]; ++z) { + // Loop over Y + for (long y = 0; y < conn_dims[1]; ++y) { + // Loop over X + for (long x = 0; x < conn_dims[0]; ++x, ++i) { + if (x < std::max(conn_dims[0] - 1, 1) && + y < std::max(conn_dims[1] - 1, 1) && + z < std::max(conn_dims[2] - 1, 1)) { + pqueue[j++] = i; + } + } + } + } } sort(pqueue.begin(), pqueue.end(), MalisAffinityGraphCompare(conn_data)); /* Start MST */ - int minEdge; - int e, v1, v2; - int set1, set2; - int nPair = 0; + long minEdge; + long e, v1, v2; + long set1, set2; + long nPair = 0; double loss = 0, dl = 0; - int nPairIncorrect = 0; - map::iterator it1, it2; - - std::cout << "Pqueue size: " << pqueue.size() << std::endl; + long nPairIncorrect = 0; + map::iterator it1, it2; /* Start Kruskal's */ - for (int i = 0; i < pqueue.size(); ++i) { + for (long i = 0; i < pqueue.size(); ++i) { minEdge = pqueue[i]; e = minEdge / nVert; v1 = minEdge % nVert; @@ -130,6 +145,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, set1 = dsets.find_set(v1); set2 = dsets.find_set(v2); + if (set1 != set2) { dsets.link(set1, set2); @@ -169,10 +185,12 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, // conn_data[minEdge] * (1 - conn_data[minEdge]); // DSigmoid // Don't pre-multiply derivative, will be done // later in the softmax backward + /* move the pixel bags of the non-representative to the representative */ // make set1 the rep to keep and set2 the rep to empty - if (dsets.find_set(set1) == set2) - std::swap(set1, set2); + if (dsets.find_set(set1) == set2) { + std::swap(set1, set2); + } it2 = overlap[set2].begin(); while (it2 != overlap[set2].end()) { @@ -202,9 +220,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, // Derived from // http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp template -cv::Mat MalisLossLayer::FindBlobs( - const cv::Mat &input, std::vector > *blobs) { - blobs->clear(); +cv::Mat MalisLossLayer::FindBlobs(const cv::Mat &input) { // Fill the label_image with the blobs @@ -221,30 +237,11 @@ cv::Mat MalisLossLayer::FindBlobs( if (row[x] >= prob_.channels() || row[x] == 0) { continue; } - cv::Rect rect; cv::floodFill(label_image, cv::Point(x, y), label_count, &rect, 0, 0, 4); - - std::vector blob; - -#pragma omp parallel for - for (int i = rect.y; i < (rect.y + rect.height); i++) { - int *row2 = reinterpret_cast(label_image.ptr(i)); - for (int j = rect.x; j < (rect.x + rect.width); j++) { - if (row2[j] != label_count) { - continue; - } -#pragma omp critical - blob.push_back(cv::Point2i(j, i)); - } - } - - blobs->push_back(blob); - label_count++; } } - return label_image; } @@ -284,8 +281,8 @@ void MalisLossLayer::Reshape(const vector*>& bottom, } conn_num_dims_ = 4; - conn_dims_.push_back(bottom[0]->width() - 1); // X-axis - conn_dims_.push_back(bottom[0]->height() - 1); // Y-axis + conn_dims_.push_back(bottom[0]->width()); // X-axis + conn_dims_.push_back(bottom[0]->height()); // Y-axis conn_dims_.push_back(1); // Z-axis conn_dims_.push_back(2); // #edges @@ -333,9 +330,6 @@ template void MalisLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - std::cout << "Outer dim: " << outer_num_ << std::endl; - std::cout << "Inner dim: " << inner_num_ << std::endl; - if (propagate_down[1]) { LOG(FATAL)<< this->type() << " Layer cannot backpropagate to label inputs."; @@ -351,9 +345,9 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, // Labels (size w * h, c values) const Dtype* label = bottom[1]->cpu_data(); - cv::namedWindow("labelled"); - cv::namedWindow("prob"); - cv::namedWindow("diff"); + // cv::namedWindow("labelled"); + // cv::namedWindow("prob"); + // cv::namedWindow("diff"); cv::Mat img(bottom[1]->height(), bottom[1]->width(), CV_8SC1); #pragma omp parallel for @@ -363,30 +357,54 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, } } - std::vector > blobs; - - cv::Mat seg = FindBlobs(img, &blobs); + cv::Mat seg = FindBlobs(img); // This is for debugging only: - { - cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3); - for (size_t i = 0; i < blobs.size(); i++) { + /*{ + std::vector labels; + + for(int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { + int val = reinterpret_cast(seg.ptr(0))[i]; + bool found = false; + for(int j = 0; j < labels.size(); ++j) { + if(val == labels[j]) { + found = true; + } + } + if(found == false) { + labels.push_back(val); + } + } + + std::vector colors; + + for(int i = 0; i < labels.size(); ++i) { unsigned char r = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT unsigned char g = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT unsigned char b = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - for (size_t j = 0; j < blobs[i].size(); j++) { - int x = blobs[i][j].x; - int y = blobs[i][j].y; + cv::Vec3b color(r,g,b); + colors.push_back(color); + } + + cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3); - output.at(y, x)[0] = b; - output.at(y, x)[1] = g; - output.at(y, x)[2] = r; + for(int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { + int val = reinterpret_cast(seg.ptr(0))[i]; + if(val == 0) { + output.at(i) = cv::Vec3b(0,0,0); + continue; + } + for(int j = 0; j < labels.size(); ++j) { + if(val == labels[j]) { + output.at(i) = colors[j]; + } } } + cv::imshow("labelled", output); cv::waitKey(100); - } + }*/ Dtype loss_out = 0; Dtype classerr_out = 0; @@ -406,11 +424,11 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, for (int i = 0; i < bottom[0]->height() - 1; ++i) { for (int j = 0; j < bottom[0]->width() - 1; ++j) { // Center - Dtype p0 = prob_data[i * bottom[0]->width() + j]; + Dtype p0 = prob_data[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + j]; // Right - Dtype p1 = prob_data[i * bottom[0]->width() + (j + 1)]; + Dtype p1 = prob_data[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + (j + 1)]; // Bottom - Dtype p2 = prob_data[(i + 1) * bottom[0]->width() + j]; + Dtype p2 = prob_data[bottom[0]->width() * bottom[0]->height() + (i + 1) * bottom[0]->width() + j]; // Center Dtype g0 = label[i * bottom[0]->width() + j]; @@ -420,25 +438,37 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, Dtype g2 = label[(i + 1) * bottom[0]->width() + j]; // X positive - conn_data_pos[i * (bottom[0]->width() - 1) + j] = std::min( - 1.0 - std::fabs(p0 - p1), 1.0 - std::fabs(g0 - g1)); + conn_data_pos[i * (bottom[0]->width() - 1) + j] = std::max( + (p0 + p1) / 2.0, (g0 + g1) / 2.0); // X negative - conn_data_neg[i * (bottom[0]->width() - 1) + j] = std::max( - 1.0 - std::fabs(p0 - p1), 1.0 - std::fabs(g0 - g1)); + conn_data_neg[i * (bottom[0]->width() - 1) + j] = std::min( + (p0 + p1) / 2.0, (g0 + g1) / 2.0); // Y positive conn_data_pos[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) - + i * (bottom[0]->width() - 1) + j] = std::min( - 1.0 - std::fabs(p0 - p2), 1.0 - std::fabs(g0 - g2)); + + i * (bottom[0]->width() - 1) + j] = std::max( + (p0 + p2) / 2.0, (g0 + g2) / 2.0); // Y negative conn_data_neg[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) - + i * (bottom[0]->width() - 1) + j] = std::max( - 1.0 - std::fabs(p0 - p2), 1.0 - std::fabs(g0 - g2)); + + i * (bottom[0]->width() - 1) + j] = std::min( + (p0 + p2) / 2.0, (g0 + g2) / 2.0); } } + /*cv::Mat cd_pos(bottom[0]->height()-1, bottom[0]->width()-1, + cv::DataType::type, + &conn_data_pos[0], sizeof(Dtype) * (bottom[0]->width()-1)); + cv::imshow("prob", cd_pos); + cv::waitKey(100); + + cv::Mat cd_neg(bottom[0]->height()-1, bottom[0]->width()-1, + cv::DataType::type, + &conn_data_neg[0], sizeof(Dtype) * (bottom[0]->width()-1)); + cv::imshow("diff", cd_neg); + cv::waitKey(0); + auto minmax = std::minmax_element(conn_data_neg.begin(), conn_data_neg.end()); @@ -446,21 +476,22 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, conn_data_neg[minmax.first - conn_data_neg.begin()] << " " << conn_data_neg[minmax.second - conn_data_neg.begin()] << std::endl; - minmax = std::minmax_element(dloss_pos.begin(), dloss_pos.end()); + minmax = std::minmax_element(conn_data_pos.begin(), + conn_data_pos.end()); std::cout << "Conndata pos min/max: " << conn_data_pos[minmax.first - conn_data_pos.begin()] << " " << conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl; - std::cout << "Before MALIS 1" << std::endl; + std::cout << "Before MALIS 1" << std::endl;*/ Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), true, &dloss_pos[0], &loss_out, &classerr_out, &rand_index_out); - std::cout << "Before MALIS 2" << std::endl; + //std::cout << "Before MALIS 2" << std::endl; Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), @@ -468,7 +499,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, &loss_out, &classerr_out, &rand_index_out); - minmax = std::minmax_element(dloss_neg.begin(), dloss_neg.end()); + /*minmax = std::minmax_element(dloss_neg.begin(), dloss_neg.end()); std::cout << "DLoss_neg min/max: " << dloss_neg[minmax.first - dloss_neg.begin()] << " " << @@ -476,15 +507,15 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, minmax = std::minmax_element(dloss_pos.begin(), dloss_pos.end()); - std::cout << "DLoss_pos min/max: " << + /*std::cout << "DLoss_pos min/max: " << dloss_pos[minmax.first - dloss_pos.begin()] << " " << dloss_pos[minmax.second - dloss_pos.begin()] << std::endl; std::cout << "Before PROB BACK" << std::endl; - caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); + //caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); - std::cout << "Before LOSS BACK" << std::endl; + std::cout << "Before LOSS BACK" << std::endl;*/ // Spread out the losses to pixels for (int i = 0; i < bottom[0]->height() - 1; ++i) { @@ -519,7 +550,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, } } - Dtype* prob_rd = prob_.mutable_cpu_data(); + /*Dtype* prob_rd = prob_.mutable_cpu_data(); cv::Mat wrapped_1(bottom[0]->height(), bottom[0]->width(), cv::DataType::type, @@ -533,7 +564,11 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, cv::imshow("diff", wrapped_2); cv::waitKey(100); - std::cout << "After LOSS BACK" << std::endl; + std::cout << "After LOSS BACK" << std::endl;*/ + + const Dtype loss_weight = top[0]->cpu_diff()[0]; + caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); + } } diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 27f7b50302d..cd9aa25c9de 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -260,7 +260,7 @@ double caffe_nextafter(const double b); void caffe_rng_uniform(const int n, unsigned int* r) { CHECK_GE(n, 0); CHECK(r); - boost::uniform_int random_distribution(INT32_MIN, INT32_MAX); + boost::uniform_int random_distribution(0, UINT32_MAX); boost::variate_generator> variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { From e9716c0c583962723270ab20320d891a9b10f3ec Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 11:17:25 -0400 Subject: [PATCH 104/254] Random fix in OpenCL, CPU_ONLY test fix. --- CMakeLists.txt | 4 ++-- include/caffe/device_context.hpp | 4 ++++ include/caffe/greentea/greentea.hpp | 4 ++++ include/caffe/util/device_alternate.hpp | 4 ++++ src/caffe/common.cpp | 5 +++-- src/caffe/greentea/cl_kernels.cpp | 4 ++-- src/caffe/greentea/cl_kernels/dropout.cl | 2 +- src/caffe/layers/malis_loss_layer.cpp | 2 +- src/caffe/syncedmem.cpp | 20 ++++++++++++-------- src/caffe/util/math_functions.cpp | 4 ++-- 10 files changed, 35 insertions(+), 18 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 64a06f06a94..cc7a72323ef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,8 +16,8 @@ include(cmake/ConfigGen.cmake) # ---[ Options caffe_option(CPU_ONLY "Build Caffe wihtout CUDA and OpenCL support" OFF) -caffe_option(USE_CUDA "Build Caffe with CUDA support" ON) -caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" ON) +caffe_option(USE_CUDA "Build Caffe with CUDA support" OFF IF CPU_ONLY) +caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" OFF IF CPU_ONLY) caffe_option(USE_CLBLAS "Build Caffe with clBLAS support (instead of using ViennaClBLAS)" OFF) caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" OFF) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index a948490f977..7177a660d70 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -8,6 +8,10 @@ #ifndef CAFFE_DEVICE_CONTEXT_HPP_ #define CAFFE_DEVICE_CONTEXT_HPP_ +#ifdef CMAKE_BUILD + #include "caffe_config.h" +#endif + #include #include #include "caffe/blob.hpp" diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 061e3e4e381..d24847d0a9f 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -8,6 +8,10 @@ #ifndef CAFFE_GREENTEA_HPP_ #define CAFFE_GREENTEA_HPP_ +#ifdef CMAKE_BUILD + #include "caffe_config.h" +#endif + #include // Define ViennaCL/GreenTea flags diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index f7ea5c7f9b9..c67994a9de4 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -1,6 +1,10 @@ #ifndef CAFFE_UTIL_DEVICE_ALTERNATE_H_ #define CAFFE_UTIL_DEVICE_ALTERNATE_H_ +#ifdef CMAKE_BUILD + #include "caffe_config.h" +#endif + #ifdef CPU_ONLY // CPU-only Caffe. #include diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index b8b1435da01..a7f397c3235 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -60,7 +60,7 @@ DeviceContext *Caffe::GetDefaultDeviceContext() { #ifdef CPU_ONLY // CPU-only Caffe. Caffe::Caffe() -: random_generator_(), mode_(Caffe::CPU) {} +: random_generator_(), mode_(Caffe::CPU), default_device_context_(nullptr) {} Caffe::~Caffe() {} @@ -114,7 +114,8 @@ Caffe::Caffe() curand_generator_(NULL), #endif // USE_CUDA random_generator_(), - mode_(Caffe::CPU) { + mode_(Caffe::CPU), + default_device_context_(nullptr) { // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). #ifdef USE_CUDA diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 2b44727f9dd..01fff6b8f55 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -13,7 +13,7 @@ std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#e std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT @@ -31,7 +31,7 @@ std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n# std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * (mask[index] > threshold) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/dropout.cl b/src/caffe/greentea/cl_kernels/dropout.cl index acb79214b41..fa69290df8f 100644 --- a/src/caffe/greentea/cl_kernels/dropout.cl +++ b/src/caffe/greentea/cl_kernels/dropout.cl @@ -9,7 +9,7 @@ __kernel void TEMPLATE(dropout_forward,Dtype)(const int n, const Dtype scale, __global Dtype* out) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - out[index] = in[index] * (mask[index] > threshold) * scale; + out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; } } diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 2cfe6d80832..0233f171fdb 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -507,7 +507,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, minmax = std::minmax_element(dloss_pos.begin(), dloss_pos.end()); - /*std::cout << "DLoss_pos min/max: " << + std::cout << "DLoss_pos min/max: " << dloss_pos[minmax.first - dloss_pos.begin()] << " " << dloss_pos[minmax.second - dloss_pos.begin()] << std::endl; diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 111f708a16a..2200035caa1 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -114,7 +114,7 @@ inline void SyncedMemory::to_gpu() { } gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); -#endif +#endif // USE_GREENTEA } head_ = HEAD_AT_GPU; break; @@ -183,16 +183,20 @@ void SyncedMemory::set_cpu_data(void* data) { CaffeFreeHost(cpu_ptr_); } cpu_ptr_ = data; - if (device_context_->backend() == Backend::BACKEND_OpenCL) { +#ifndef CPU_ONLY #ifdef USE_GREENTEA - viennacl::ocl::context ctx = viennacl::ocl::get_context( - device_context_->id()); - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // If host memory is released and shared - gpu_ptr_ = NULL; + if (Caffe::mode() == Caffe::Brew::GPU) { + if(device_context_->backend() == Backend::BACKEND_OpenCL) { + viennacl::ocl::context ctx = viennacl::ocl::get_context( + device_context_->id()); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + // If host memory is released and shared + gpu_ptr_ = nullptr; + } } -#endif } +#endif // USE_GREENTEA +#endif // !CPU_ONLY head_ = HEAD_AT_CPU; own_cpu_data_ = false; } diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index cd9aa25c9de..bd92b39b914 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -260,8 +260,8 @@ double caffe_nextafter(const double b); void caffe_rng_uniform(const int n, unsigned int* r) { CHECK_GE(n, 0); CHECK(r); - boost::uniform_int random_distribution(0, UINT32_MAX); - boost::variate_generator> + boost::uniform_int random_distribution(INT32_MIN, INT32_MAX); + boost::variate_generator> variate_generator(caffe_rng(), random_distribution); for (int i = 0; i < n; ++i) { r[i] = variate_generator(); From 19bbb938163a689f4ad5530f5727fdffe79cee52 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 11:49:53 -0400 Subject: [PATCH 105/254] Lint fix, test fix. --- src/caffe/layers/malis_loss_layer.cpp | 79 +++++++++++++------------ src/caffe/syncedmem.cpp | 4 +- src/caffe/test/test_mergecrop_layer.cpp | 25 ++++---- 3 files changed, 54 insertions(+), 54 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 0233f171fdb..8c1faf24fa9 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -52,49 +52,49 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, /* Cache for speed to access neighbors */ // nVert stores (x * y * z) - long nVert = 1; - for (long i = 0; i < conn_num_dims - 1; ++i) { + int64_t nVert = 1; + for (int64_t i = 0; i < conn_num_dims - 1; ++i) { nVert = nVert * conn_dims[i]; } // prodDims stores x, x*y, x*y*z offsets - vector prodDims(conn_num_dims - 1); + vector prodDims(conn_num_dims - 1); prodDims[0] = 1; - for (long i = 1; i < conn_num_dims - 1; ++i) { + for (int64_t i = 1; i < conn_num_dims - 1; ++i) { prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; } /* convert n-d offset vectors into linear array offset scalars */ // nHood is a vector of size #edges vector nHood(nhood_dims[0]); - for (long i = 0; i < nhood_dims[0]; ++i) { + for (int64_t i = 0; i < nhood_dims[0]; ++i) { nHood[i] = 0; - for (long j = 0; j < nhood_dims[1]; ++j) { + for (int64_t j = 0; j < nhood_dims[1]; ++j) { nHood[i] += (int32_t) nhood_data[i + j * nhood_dims[0]] * prodDims[j]; } } /* Disjoint sets and sparse overlap vectors */ - vector > overlap(nVert); - vector rank(nVert); - vector parent(nVert); - map segSizes; - long nLabeledVert = 0; - long nPairPos = 0; - boost::disjoint_sets dsets(&rank[0], &parent[0]); + vector > overlap(nVert); + vector rank(nVert); + vector parent(nVert); + map segSizes; + int64_t nLabeledVert = 0; + int64_t nPairPos = 0; + boost::disjoint_sets dsets(&rank[0], &parent[0]); // Loop over all seg data items - for (long i = 0; i < nVert; ++i) { + for (int64_t i = 0; i < nVert; ++i) { dsets.make_set(i); if (0 != seg_data[i]) { - overlap[i].insert(pair(seg_data[i], 1)); + overlap[i].insert(pair(seg_data[i], 1)); ++nLabeledVert; ++segSizes[seg_data[i]]; nPairPos += (segSizes[seg_data[i]] - 1); } } - long nPairTot = (nLabeledVert * (nLabeledVert - 1)) / 2; - long nPairNeg = nPairTot - nPairPos; - long nPairNorm; + int64_t nPairTot = (nLabeledVert * (nLabeledVert - 1)) / 2; + int64_t nPairNeg = nPairTot - nPairPos; + int64_t nPairNorm; if (pos) { nPairNorm = nPairPos; } else { @@ -102,19 +102,19 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } /* Sort all the edges in increasing order of weight */ - std::vector pqueue( + std::vector pqueue( conn_dims[3] * std::max((conn_dims[0] - 1), 1) * std::max((conn_dims[1] - 1), 1) * std::max((conn_dims[2] - 1), 1)); - long j = 0; + int64_t j = 0; // Loop over #edges - for (long d = 0, i = 0; d < conn_dims[3]; ++d) { + for (int64_t d = 0, i = 0; d < conn_dims[3]; ++d) { // Loop over Z - for (long z = 0; z < conn_dims[2]; ++z) { + for (int64_t z = 0; z < conn_dims[2]; ++z) { // Loop over Y - for (long y = 0; y < conn_dims[1]; ++y) { + for (int64_t y = 0; y < conn_dims[1]; ++y) { // Loop over X - for (long x = 0; x < conn_dims[0]; ++x, ++i) { + for (int64_t x = 0; x < conn_dims[0]; ++x, ++i) { if (x < std::max(conn_dims[0] - 1, 1) && y < std::max(conn_dims[1] - 1, 1) && z < std::max(conn_dims[2] - 1, 1)) { @@ -128,16 +128,16 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, MalisAffinityGraphCompare(conn_data)); /* Start MST */ - long minEdge; - long e, v1, v2; - long set1, set2; - long nPair = 0; + int64_t minEdge; + int64_t e, v1, v2; + int64_t set1, set2; + int64_t nPair = 0; double loss = 0, dl = 0; - long nPairIncorrect = 0; - map::iterator it1, it2; + int64_t nPairIncorrect = 0; + map::iterator it1, it2; /* Start Kruskal's */ - for (long i = 0; i < pqueue.size(); ++i) { + for (int64_t i = 0; i < pqueue.size(); ++i) { minEdge = pqueue[i]; e = minEdge / nVert; v1 = minEdge % nVert; @@ -221,9 +221,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, // http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp template cv::Mat MalisLossLayer::FindBlobs(const cv::Mat &input) { - // Fill the label_image with the blobs - cv::Mat label_image; input.convertTo(label_image, CV_32SC1); @@ -424,11 +422,17 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, for (int i = 0; i < bottom[0]->height() - 1; ++i) { for (int j = 0; j < bottom[0]->width() - 1; ++j) { // Center - Dtype p0 = prob_data[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + j]; + Dtype p0 = prob_data[bottom[0]->width() + * bottom[0]->height() + + i * bottom[0]->width() + j]; // Right - Dtype p1 = prob_data[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + (j + 1)]; + Dtype p1 = prob_data[bottom[0]->width() + * bottom[0]->height() + + i * bottom[0]->width() + (j + 1)]; // Bottom - Dtype p2 = prob_data[bottom[0]->width() * bottom[0]->height() + (i + 1) * bottom[0]->width() + j]; + Dtype p2 = prob_data[bottom[0]->width() + * bottom[0]->height() + + (i + 1) * bottom[0]->width() + j]; // Center Dtype g0 = label[i * bottom[0]->width() + j]; @@ -491,7 +495,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, true, &dloss_pos[0], &loss_out, &classerr_out, &rand_index_out); - //std::cout << "Before MALIS 2" << std::endl; + // std::cout << "Before MALIS 2" << std::endl; Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), @@ -568,7 +572,6 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, const Dtype loss_weight = top[0]->cpu_diff()[0]; caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); - } } diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 2200035caa1..9718d09e222 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -186,12 +186,12 @@ void SyncedMemory::set_cpu_data(void* data) { #ifndef CPU_ONLY #ifdef USE_GREENTEA if (Caffe::mode() == Caffe::Brew::GPU) { - if(device_context_->backend() == Backend::BACKEND_OpenCL) { + if (device_context_->backend() == Backend::BACKEND_OpenCL) { viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_->id()); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // If host memory is released and shared gpu_ptr_ = nullptr; + ctx.get_queue().finish(); } } } diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp index 6a42c61112f..6010e41ca8b 100644 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -15,12 +15,10 @@ namespace caffe { template class MergeCropLayerTest : public GPUDeviceTest { - typedef TypeParam Dtype; - protected: MergeCropLayerTest() - : blob_bottom_a_(new Blob()), blob_bottom_b_(new Blob()), - blob_top_(new Blob()) { + : blob_bottom_a_(new Blob()), blob_bottom_b_(new Blob()), + blob_top_(new Blob()) { } virtual void SetUp() { @@ -72,7 +70,7 @@ class MergeCropLayerTest : public GPUDeviceTest { } LayerParameter layer_param; - MergeCropLayer layer(layer_param); + MergeCropLayer layer(layer_param); layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); @@ -153,11 +151,11 @@ class MergeCropLayerTest : public GPUDeviceTest { } LayerParameter layer_param; - MergeCropLayer layer(layer_param); + MergeCropLayer layer(layer_param); layer.SetUp(blob_bottom_vec_, blob_top_vec_); layer.Forward(blob_bottom_vec_, blob_top_vec_); - caffe_cpu_copy(blob_top_->count(), blob_top_->cpu_data(), + caffe_cpu_copy(blob_top_->count(), blob_top_->cpu_data(), blob_top_->mutable_cpu_diff()); vector propagate_down(blob_bottom_vec_.size(), true); @@ -178,20 +176,19 @@ class MergeCropLayerTest : public GPUDeviceTest { } } - Blob* const blob_bottom_a_; - Blob* const blob_bottom_b_; - Blob* const blob_top_; + Blob* const blob_bottom_a_; + Blob* const blob_bottom_b_; + Blob* const blob_top_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; }; TYPED_TEST_CASE(MergeCropLayerTest, TestDtypes); TYPED_TEST(MergeCropLayerTest, TestSetup) { - typedef TypeParam Dtype; LayerParameter layer_param; - MergeCropLayer layer(layer_param); + MergeCropLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_a_->num()); From d7c5213409db512c361ccaee03ac1b4e556355ab Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 11:50:21 -0400 Subject: [PATCH 106/254] . --- src/caffe/test/test_mergecrop_layer.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp index 6010e41ca8b..4ff8fb6d827 100644 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -17,7 +17,8 @@ template class MergeCropLayerTest : public GPUDeviceTest { protected: MergeCropLayerTest() - : blob_bottom_a_(new Blob()), blob_bottom_b_(new Blob()), + : blob_bottom_a_(new Blob()), + blob_bottom_b_(new Blob()), blob_top_(new Blob()) { } From fab66e9001c1692d6489ca19969d56423f0bb1b7 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 12:10:53 -0400 Subject: [PATCH 107/254] Limit travis resources, test fix in CPU_ONLY mode. --- scripts/travis/travis_build_and_test.sh | 3 ++- src/caffe/syncedmem.cpp | 2 +- src/caffe/test/test_mergecrop_layer.cpp | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index 8ff63f31fdd..fe63eb9daaf 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -2,7 +2,8 @@ # Script called by Travis to do a CPU-only build of and test Caffe. set -e -MAKE="make --jobs=$NUM_THREADS --keep-going" +# Limit jobs to stay within available RAM/Swap +MAKE="make --jobs=2 --keep-going" if $WITH_CMAKE; then mkdir build diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 9718d09e222..cfc3171b344 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -70,7 +70,7 @@ inline void SyncedMemory::to_cpu() { head_ = SYNCED; #else NO_GPU; -#endif +#endif // !CPU_ONLY break; } case HEAD_AT_CPU: diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp index 4ff8fb6d827..623bed878fc 100644 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -1,4 +1,3 @@ -#ifndef CPU_ONLY // CPU-GPU test #include #include @@ -11,6 +10,8 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifndef CPU_ONLY // CPU-GPU test + namespace caffe { template From 303f9dac2c114089a0d79b6a30f74b8437e85564 Mon Sep 17 00:00:00 2001 From: Jan Funke Date: Fri, 3 Jul 2015 14:09:27 -0400 Subject: [PATCH 108/254] fixed compilation error with ViennaCl 1.5.1 --- .../greentea/greentea_math_functions.cpp | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 12976f3367f..0a949df2807 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -44,6 +44,19 @@ #include "viennacl/vector.hpp" #endif +// ViennaCL 1.5.1 compability fix +#ifndef VIENNACL_MINOR_VERSION +#define VIENNACL_MINOR_VERSION 5 +#endif + +#if VIENNACL_MINOR_VERSION > 5 +#define VCL_ROW_MAJOR , true +#define VCL_COL_MAJOR , false +#else +#define VCL_ROW_MAJOR +#define VCL_COL_MAJOR +#endif + namespace caffe { void greentea_memset(const int ctx_id, const size_t N, const int alpha, @@ -183,16 +196,16 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, viennacl::matrix_base matA(A, ctx, A_size1, size_type(0), difference_type(1), size_type(M), - A_size2, size_type(offA), difference_type(1), size_type(lda), true); + A_size2, size_type(offA), difference_type(1), size_type(lda) VCL_ROW_MAJOR); viennacl::matrix_base matB(B, ctx, B_size1, size_type(0), difference_type(1), size_type(K), - B_size2, size_type(offB), difference_type(1), size_type(ldb), true); + B_size2, size_type(offB), difference_type(1), size_type(ldb) VCL_ROW_MAJOR); viennacl::matrix_base matC(C, ctx, size_type(M), size_type(0), difference_type(1), size_type(M), size_type(N), size_type(offC), difference_type(1), - size_type(ldc), true); + size_type(ldc) VCL_ROW_MAJOR); if (TransA == CblasTrans && TransB == CblasTrans) viennacl::linalg::prod_impl(viennacl::trans(matA), viennacl::trans(matB), @@ -282,7 +295,7 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, size_type(M), size_type(0), difference_type(1), size_type(M), size_type(N), size_type(offA), - difference_type(1), size_type(N), true); + difference_type(1), size_type(N) VCL_ROW_MAJOR); v2 *= beta; if (TransA == CblasTrans) v2 += alpha * viennacl::linalg::prod(viennacl::trans(mat), v1); From f1050ef0fe8e5c0e5f38a3cf3a98ad1b977bb6d7 Mon Sep 17 00:00:00 2001 From: Jan Funke Date: Fri, 3 Jul 2015 14:09:50 -0400 Subject: [PATCH 109/254] fixed segfault in CPU OpenCL version --- src/caffe/syncedmem.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index cfc3171b344..efc5aa4dd28 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -179,24 +179,24 @@ const void* SyncedMemory::cpu_data() { void SyncedMemory::set_cpu_data(void* data) { CHECK(data); - if (own_cpu_data_) { - CaffeFreeHost(cpu_ptr_); - } - cpu_ptr_ = data; #ifndef CPU_ONLY #ifdef USE_GREENTEA if (Caffe::mode() == Caffe::Brew::GPU) { if (device_context_->backend() == Backend::BACKEND_OpenCL) { viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_->id()); + ctx.get_queue().finish(); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { gpu_ptr_ = nullptr; - ctx.get_queue().finish(); } } } #endif // USE_GREENTEA #endif // !CPU_ONLY + if (own_cpu_data_) { + CaffeFreeHost(cpu_ptr_); + } + cpu_ptr_ = data; head_ = HEAD_AT_CPU; own_cpu_data_ = false; } From 5fd41b03869fe1635e0ebfc35adc6a9e767c1831 Mon Sep 17 00:00:00 2001 From: Jan Funke Date: Fri, 3 Jul 2015 14:02:19 -0400 Subject: [PATCH 110/254] fixed visibility of caffe options in CMakeLists.txt --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cc7a72323ef..64a06f06a94 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,8 +16,8 @@ include(cmake/ConfigGen.cmake) # ---[ Options caffe_option(CPU_ONLY "Build Caffe wihtout CUDA and OpenCL support" OFF) -caffe_option(USE_CUDA "Build Caffe with CUDA support" OFF IF CPU_ONLY) -caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" OFF IF CPU_ONLY) +caffe_option(USE_CUDA "Build Caffe with CUDA support" ON) +caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" ON) caffe_option(USE_CLBLAS "Build Caffe with clBLAS support (instead of using ViennaClBLAS)" OFF) caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" OFF) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) From e353ba3445d8d91a153698ca29c876697849845a Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 15:47:51 -0400 Subject: [PATCH 111/254] Makefile fix. --- src/caffe/greentea/greentea_math_functions.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 0a949df2807..25e87554aa1 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -196,11 +196,13 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, viennacl::matrix_base matA(A, ctx, A_size1, size_type(0), difference_type(1), size_type(M), - A_size2, size_type(offA), difference_type(1), size_type(lda) VCL_ROW_MAJOR); + A_size2, size_type(offA), difference_type(1), size_type(lda) + VCL_ROW_MAJOR); viennacl::matrix_base matB(B, ctx, B_size1, size_type(0), difference_type(1), size_type(K), - B_size2, size_type(offB), difference_type(1), size_type(ldb) VCL_ROW_MAJOR); + B_size2, size_type(offB), difference_type(1), size_type(ldb) + VCL_ROW_MAJOR); viennacl::matrix_base matC(C, ctx, size_type(M), size_type(0), difference_type(1), size_type(M), @@ -295,7 +297,8 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, size_type(M), size_type(0), difference_type(1), size_type(M), size_type(N), size_type(offA), - difference_type(1), size_type(N) VCL_ROW_MAJOR); + difference_type(1), size_type(N) + VCL_ROW_MAJOR); v2 *= beta; if (TransA == CblasTrans) v2 += alpha * viennacl::linalg::prod(viennacl::trans(mat), v1); From 6f77a9ca1dbe3cc2ae194420b0756c0ed1e2050d Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 18:06:58 -0400 Subject: [PATCH 112/254] Definition fix for CMAKE build. --- include/caffe/util/device_alternate.hpp | 6 ++++-- src/caffe/device_context.cpp | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index c67994a9de4..199a5091de9 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -7,6 +7,8 @@ #ifdef CPU_ONLY // CPU-only Caffe. +extern const int CAFFE_CUDA_NUM_THREADS = 0; + #include // Stub out GPU calls as unavailable. @@ -90,9 +92,9 @@ const char* curandGetErrorString(curandStatus_t error); // Use 1024 threads per block, which requires cuda sm_2x or above, // or fall back to attempt compatibility (best of luck to you). #if __CUDA_ARCH__ >= 200 - const int CAFFE_CUDA_NUM_THREADS = 1024; + extern const int CAFFE_CUDA_NUM_THREADS = 1024; #else - const int CAFFE_CUDA_NUM_THREADS = 512; + extern const int CAFFE_CUDA_NUM_THREADS = 512; #endif // CDT hacks: allow proper code formatting and remove errors in CDT diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index 1118fca065b..30160e936bb 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -10,7 +10,6 @@ #include "caffe/greentea/greentea.hpp" #include "caffe/util/device_alternate.hpp" - namespace caffe { DeviceContext::DeviceContext() @@ -26,6 +25,7 @@ DeviceContext::DeviceContext(int id, Backend backend) } void DeviceContext::Init() { +#ifndef CPU_ONLY if (backend_ == BACKEND_CUDA) { #ifdef USE_CUDA workgroup_sizes_[0] = CAFFE_CUDA_NUM_THREADS; @@ -41,6 +41,7 @@ void DeviceContext::Init() { workgroup_sizes_[2] = temp[2]; #endif // USE_GREENTEA } +#endif // !CPU_ONLY } Backend DeviceContext::backend() const { From 7ea0252ae96f3af5ac8120aa8402b9909ae0b467 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 18:41:43 -0400 Subject: [PATCH 113/254] Changed CUDA threads variable to a define. --- include/caffe/util/device_alternate.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 199a5091de9..665d5f61b6f 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -7,7 +7,7 @@ #ifdef CPU_ONLY // CPU-only Caffe. -extern const int CAFFE_CUDA_NUM_THREADS = 0; +#define CAFFE_CUDA_NUM_THREADS 0 #include @@ -92,9 +92,9 @@ const char* curandGetErrorString(curandStatus_t error); // Use 1024 threads per block, which requires cuda sm_2x or above, // or fall back to attempt compatibility (best of luck to you). #if __CUDA_ARCH__ >= 200 - extern const int CAFFE_CUDA_NUM_THREADS = 1024; +#define CAFFE_CUDA_NUM_THREADS 1024 #else - extern const int CAFFE_CUDA_NUM_THREADS = 512; +#define CAFFE_CUDA_NUM_THREADS 512 #endif // CDT hacks: allow proper code formatting and remove errors in CDT From 32ee1ead8efd518cd03f5b386a06b2b9ff5ef71b Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 19:06:27 -0400 Subject: [PATCH 114/254] Fixed Malis loss: working for 2 labels. --- src/caffe/layers/malis_loss_layer.cpp | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 8c1faf24fa9..97d9aaf9ae7 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -176,18 +176,9 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } } } + dloss_data[minEdge] /= nPairNorm; - /* HARD-CODED ALERT!! - * The derivative of the activation function is also multiplied here. - * Assumes the logistic nonlinear activation function. - */ - // dloss_data[minEdge] *= - // conn_data[minEdge] * (1 - conn_data[minEdge]); // DSigmoid - // Don't pre-multiply derivative, will be done - // later in the softmax backward - - /* move the pixel bags of the non-representative to the representative */ - // make set1 the rep to keep and set2 the rep to empty + if (dsets.find_set(set1) == set2) { std::swap(set1, set2); } @@ -200,8 +191,8 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } else { it1->second += it2->second; } - overlap[set2].erase(it2++); } + overlap[set2].clear(); } // end link } // end while From 823e19ec4734f8b00adaf493de2ea00aa70f0fce Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 19:19:30 -0400 Subject: [PATCH 115/254] Malis loss, loop fix. --- src/caffe/layers/malis_loss_layer.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 97d9aaf9ae7..342bd232e26 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -183,11 +183,12 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, std::swap(set1, set2); } - it2 = overlap[set2].begin(); - while (it2 != overlap[set2].end()) { + for (it2 = overlap[set2].begin(); + it2 != overlap[set2].end(); ++it2) { it1 = overlap[set1].find(it2->first); if (it1 == overlap[set1].end()) { - overlap[set1].insert(pair(it2->first, it2->second)); + overlap[set1].insert(pair + (it2->first, it2->second)); } else { it1->second += it2->second; } From e3fced9d93ce096a178ecef97eb01290c87c33d5 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 3 Jul 2015 23:45:59 -0400 Subject: [PATCH 116/254] Malis loss layer corrected array sizes. --- src/caffe/layers/malis_loss_layer.cpp | 75 +++++++++++++++------------ 1 file changed, 41 insertions(+), 34 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 342bd232e26..36f2a1ef371 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -26,7 +26,7 @@ class MalisAffinityGraphCompare { explicit MalisAffinityGraphCompare(const Dtype * EdgeWeightArray) { mEdgeWeightArray = EdgeWeightArray; } - bool operator()(const int ind1, const int ind2) const { + bool operator()(const int64_t& ind1, const int64_t& ind2) const { return (mEdgeWeightArray[ind1] > mEdgeWeightArray[ind2]); } }; @@ -58,7 +58,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } // prodDims stores x, x*y, x*y*z offsets - vector prodDims(conn_num_dims - 1); + std::vector prodDims(conn_num_dims - 1); prodDims[0] = 1; for (int64_t i = 1; i < conn_num_dims - 1; ++i) { prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; @@ -66,7 +66,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, /* convert n-d offset vectors into linear array offset scalars */ // nHood is a vector of size #edges - vector nHood(nhood_dims[0]); + std::vector nHood(nhood_dims[0]); for (int64_t i = 0; i < nhood_dims[0]; ++i) { nHood[i] = 0; for (int64_t j = 0; j < nhood_dims[1]; ++j) { @@ -75,10 +75,10 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } /* Disjoint sets and sparse overlap vectors */ - vector > overlap(nVert); - vector rank(nVert); - vector parent(nVert); - map segSizes; + std::vector > overlap(nVert); + std::vector rank(nVert); + std::vector parent(nVert); + std::map segSizes; int64_t nLabeledVert = 0; int64_t nPairPos = 0; boost::disjoint_sets dsets(&rank[0], &parent[0]); @@ -86,7 +86,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, for (int64_t i = 0; i < nVert; ++i) { dsets.make_set(i); if (0 != seg_data[i]) { - overlap[i].insert(pair(seg_data[i], 1)); + overlap[i].insert(std::pair(seg_data[i], 1)); ++nLabeledVert; ++segSizes[seg_data[i]]; nPairPos += (segSizes[seg_data[i]] - 1); @@ -124,7 +124,10 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, } } } - sort(pqueue.begin(), pqueue.end(), + + pqueue.resize(j); + + std::sort(pqueue.begin(), pqueue.end(), MalisAffinityGraphCompare(conn_data)); /* Start MST */ @@ -134,18 +137,26 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, int64_t nPair = 0; double loss = 0, dl = 0; int64_t nPairIncorrect = 0; - map::iterator it1, it2; + std::map::iterator it1, it2; /* Start Kruskal's */ for (int64_t i = 0; i < pqueue.size(); ++i) { minEdge = pqueue[i]; + // nVert = x * y * z, minEdge in [0, x * y * z * #edges] + + // e: edge dimension (0: X, 1: Y, 2: Z) e = minEdge / nVert; + + // v1: node at edge beginning v1 = minEdge % nVert; + + // v2: neighborhood node at edge e v2 = v1 + nHood[e]; set1 = dsets.find_set(v1); set2 = dsets.find_set(v2); + if (set1 != set2) { dsets.link(set1, set2); @@ -401,13 +412,13 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, Dtype rand_index_out = 0; std::vector conn_data_pos( - 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + 2 * bottom[0]->height() * bottom[0]->width()); std::vector conn_data_neg( - 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + 2 * bottom[0]->height() * bottom[0]->width()); std::vector dloss_pos( - 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + 2 * bottom[0]->height() * bottom[0]->width()); std::vector dloss_neg( - 2 * (bottom[0]->height() - 1) * (bottom[0]->width() - 1)); + 2 * bottom[0]->height() * bottom[0]->width()); // Construct positive and negative affinity graph #pragma omp parallel for @@ -434,21 +445,21 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, Dtype g2 = label[(i + 1) * bottom[0]->width() + j]; // X positive - conn_data_pos[i * (bottom[0]->width() - 1) + j] = std::max( + conn_data_pos[i * bottom[0]->width() + j] = std::max( (p0 + p1) / 2.0, (g0 + g1) / 2.0); // X negative - conn_data_neg[i * (bottom[0]->width() - 1) + j] = std::min( + conn_data_neg[i * bottom[0]->width() + j] = std::min( (p0 + p1) / 2.0, (g0 + g1) / 2.0); // Y positive - conn_data_pos[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) - + i * (bottom[0]->width() - 1) + j] = std::max( + conn_data_pos[bottom[0]->width() * bottom[0]->height() + + i * bottom[0]->width() + j] = std::max( (p0 + p2) / 2.0, (g0 + g2) / 2.0); // Y negative - conn_data_neg[(bottom[0]->width() - 1) * (bottom[0]->height() - 1) - + i * (bottom[0]->width() - 1) + j] = std::min( + conn_data_neg[bottom[0]->width() * bottom[0]->height() + + i * bottom[0]->width() + j] = std::min( (p0 + p2) / 2.0, (g0 + g2) / 2.0); } } @@ -477,21 +488,17 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, std::cout << "Conndata pos min/max: " << conn_data_pos[minmax.first - conn_data_pos.begin()] << " " << - conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl; + conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl;*/ - std::cout << "Before MALIS 1" << std::endl;*/ - - Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], + Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), - true, &dloss_pos[0], + false, &dloss_neg[0], &loss_out, &classerr_out, &rand_index_out); - // std::cout << "Before MALIS 2" << std::endl; - - Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], + Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), - false, &dloss_neg[0], + true, &dloss_pos[0], &loss_out, &classerr_out, &rand_index_out); @@ -516,13 +523,13 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, // Spread out the losses to pixels for (int i = 0; i < bottom[0]->height() - 1; ++i) { for (int j = 0; j < bottom[0]->width() - 1; ++j) { - Dtype lxp = dloss_pos[i * (bottom[0]->width() - 1) + j]; - Dtype lxn = dloss_neg[i * (bottom[0]->width() - 1) + j]; + Dtype lxp = dloss_pos[i * bottom[0]->width() + j]; + Dtype lxn = dloss_neg[i * bottom[0]->width() + j]; - Dtype lyp = dloss_pos[(bottom[0]->width() - 1) - * (bottom[0]->height() - 1) + i * (bottom[0]->width() - 1) + j]; + Dtype lyp = dloss_pos[bottom[0]->width() + * bottom[0]->height() + i * bottom[0]->width() + j]; Dtype lyn = dloss_neg[(bottom[0]->width() - 1) - * (bottom[0]->height() - 1) + i * (bottom[0]->width() - 1) + j]; + * bottom[0]->height() + i * bottom[0]->width() + j]; // Pick labels const int l0 = static_cast From fe419ebefe15c0bb59eb787ef7445d326e434b0d Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 4 Jul 2015 02:15:17 -0400 Subject: [PATCH 117/254] Malis loss layer improvement. --- src/caffe/layers/malis_loss_layer.cpp | 69 +++++++++++++++++---------- 1 file changed, 45 insertions(+), 24 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 36f2a1ef371..5b241b980b4 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -45,6 +45,8 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, Dtype margin) { + Dtype threshold = 0.5; + if ((nhood_dims[1] != (conn_num_dims - 1)) || (nhood_dims[0] != conn_dims[conn_num_dims - 1])) { LOG(FATAL) << "nhood and conn dimensions don't match"; @@ -167,21 +169,19 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, if (pos && (it1->first == it2->first)) { // +ve example pairs - // Sq-Sq loss is used here - dl = std::max(0.0, 0.5 + margin - conn_data[minEdge]); - loss += 0.5 * dl * dl * nPair; - dloss_data[minEdge] += dl * nPair; - if (conn_data[minEdge] <= 0.5) { // an error + dl = std::max(Dtype(0.0), threshold + margin - conn_data[minEdge]); + loss += dl * nPair; + dloss_data[minEdge] -= (dl > 0) * nPair; + if (conn_data[minEdge] <= threshold) { // an error nPairIncorrect += nPair; } } else if ((!pos) && (it1->first != it2->first)) { // -ve example pairs - // Sq-Sq loss is used here - dl = -std::max(0.0, conn_data[minEdge] - 0.5 + margin); - loss += 0.5 * dl * dl * nPair; - dloss_data[minEdge] += dl * nPair; - if (conn_data[minEdge] > 0.5) { // an error + dl = std::max(Dtype(0.0), conn_data[minEdge] - threshold + margin); + loss += dl * nPair; + dloss_data[minEdge] += (dl > 0) * nPair; + if (conn_data[minEdge] > threshold) { // an error nPairIncorrect += nPair; } } @@ -347,7 +347,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, const Dtype* label = bottom[1]->cpu_data(); // cv::namedWindow("labelled"); - // cv::namedWindow("prob"); + cv::namedWindow("prob"); // cv::namedWindow("diff"); cv::Mat img(bottom[1]->height(), bottom[1]->width(), CV_8SC1); @@ -532,45 +532,66 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, * bottom[0]->height() + i * bottom[0]->width() + j]; // Pick labels - const int l0 = static_cast + /*const int l0 = static_cast (label[i * bottom[0]->width() + j]); const int l1 = static_cast (label[i * bottom[0]->width() + (j + 1)]); const int l2 = static_cast - (label[(i + 1) * bottom[0]->width() + j]); + (label[(i + 1) * bottom[0]->width() + j]);*/ + + // Center + bottom_diff[0 * inner_num_ + i * bottom[0]->width() + j] -= 0.5 + * (lxp + lxn + lyp + lyn); + + // Right + bottom_diff[0 * inner_num_ + i * bottom[0]->width() + (j + 1)] -= 0.5 + * (lxp + lxn); + + // Bottom + bottom_diff[0 * inner_num_ + (i + 1) * bottom[0]->width() + j] -= 0.5 + * (lyp + lyn); + // Center - bottom_diff[l0 * inner_num_ + i * bottom[0]->width() + j] += 0.5 + bottom_diff[1 * inner_num_ + i * bottom[0]->width() + j] += 0.5 * (lxp + lxn + lyp + lyn); // Right - bottom_diff[l1 * inner_num_ + i * bottom[0]->width() + (j + 1)] += 0.5 + bottom_diff[1 * inner_num_ + i * bottom[0]->width() + (j + 1)] += 0.5 * (lxp + lxn); // Bottom - bottom_diff[l2 * inner_num_ + (i + 1) * bottom[0]->width() + j] += 0.5 + bottom_diff[1 * inner_num_ + (i + 1) * bottom[0]->width() + j] += 0.5 * (lyp + lyn); } } /*Dtype* prob_rd = prob_.mutable_cpu_data(); - cv::Mat wrapped_1(bottom[0]->height(), bottom[0]->width(), + cv::Mat wrapped_prob(bottom[0]->height(), bottom[0]->width(), cv::DataType::type, prob_rd, sizeof(Dtype) * bottom[0]->width()); - cv::imshow("prob", wrapped_1); + cv::imshow("prob", wrapped_prob); cv::waitKey(100); - cv::Mat wrapped_2(bottom[0]->height(), bottom[0]->width(), + cv::Mat wrapped_diff(bottom[0]->height(), bottom[0]->width(), cv::DataType::type, bottom_diff, sizeof(Dtype) * bottom[0]->width()); - cv::imshow("diff", wrapped_2); - cv::waitKey(100); - std::cout << "After LOSS BACK" << std::endl;*/ + double minVal, maxVal; + cv::minMaxLoc(wrapped_diff, &minVal, &maxVal); + + std::cout << "Max loss: " << maxVal << std::endl; + std::cout << "Min loss: " << minVal << std::endl; - const Dtype loss_weight = top[0]->cpu_diff()[0]; - caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); + cv::Mat tmp; + wrapped_diff.convertTo(tmp, CV_32FC1, 1.0 / (maxVal - minVal), + -minVal * 1.0 / (maxVal - minVal)); + + cv::imshow("diff", tmp); + cv::waitKey(0); + + std::cout << "After LOSS BACK" << std::endl;*/ } } From 6c10407d580820cd581215a128aa95dbdb6eb39a Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 4 Jul 2015 02:17:29 -0400 Subject: [PATCH 118/254] Comment out cv window. --- src/caffe/layers/malis_loss_layer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 5b241b980b4..ce08214e6b4 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -347,7 +347,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, const Dtype* label = bottom[1]->cpu_data(); // cv::namedWindow("labelled"); - cv::namedWindow("prob"); + // cv::namedWindow("prob"); // cv::namedWindow("diff"); cv::Mat img(bottom[1]->height(), bottom[1]->width(), CV_8SC1); From ec51b23ca5beed91bf37f062627fb2b515a8c055 Mon Sep 17 00:00:00 2001 From: Alastair Harrison Date: Sun, 5 Jul 2015 19:05:46 +0200 Subject: [PATCH 119/254] Fix OpenCL inclusion on OSX - When linking OpenCL from an OSX framework, the include path needs to be 'OpenCL/cl.h', rather than the more common 'CL/cl.h' found on linux platforms. --- include/caffe/greentea/greentea.hpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index d24847d0a9f..5e62eeacc9b 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -24,7 +24,12 @@ #define VIENNACL_WITH_OPENCL #endif +#ifndef __APPLE__ #include "CL/cl.h" +#else +#include "OpenCL/cl.h" +#endif + #include "viennacl/backend/opencl.hpp" #include "viennacl/ocl/backend.hpp" #include "viennacl/ocl/context.hpp" From e4f92650b5bd8f5dd9c4ecd7ee507b42d293be17 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 5 Jul 2015 21:01:18 -0400 Subject: [PATCH 120/254] Update README.md --- README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/README.md b/README.md index ebec286d550..39f840f3a89 100644 --- a/README.md +++ b/README.md @@ -32,3 +32,21 @@ Please cite Caffe in your publications if it helps your research: Title = {Caffe: Convolutional Architecture for Fast Feature Embedding}, Year = {2014} } + +## Additional Notes +This fork of Caffe contains an OpenCL backend and additional layers for fast image segmentation. +This work is partially supported by: +- AMD +- HHMI Janelia +- UZH, INI +- ETH Zurich + +For a C++ frontend and models to use for image semgentation with this fork, see: +- Frontend: https://github.com/naibaf7/caffe_neural_tool +- Models: https://github.com/naibaf7/caffe_neural_models + +## OpenCL backend +The backend is supposed to work with all vendors. Note however there might be problems with libOpenCL.so provided by nVidia. +It is recommended to install another OpenCL implementation after installing nVidia drivers. Possibilities are: +- Intel OpenCL, recommended if you have an Intel CPU along the nVidia GPU. +- AMD APP SDK (OpenCL), recommended if you have an AMD GPU or CPU. From 95a0194364c1f62eb3a16687dd85770bfe691588 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 5 Jul 2015 21:02:29 -0400 Subject: [PATCH 121/254] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 39f840f3a89..03aa129d8e1 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ This work is partially supported by: - UZH, INI - ETH Zurich -For a C++ frontend and models to use for image semgentation with this fork, see: +For a C++ frontend and models to use for image segmentation with this fork, see: - Frontend: https://github.com/naibaf7/caffe_neural_tool - Models: https://github.com/naibaf7/caffe_neural_models From 75c1e9b18cabec8eb71eb7952f9080fc82147e6e Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 5 Jul 2015 21:04:47 -0400 Subject: [PATCH 122/254] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 03aa129d8e1..7ed882624b6 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ For a C++ frontend and models to use for image segmentation with this fork, see: - Frontend: https://github.com/naibaf7/caffe_neural_tool - Models: https://github.com/naibaf7/caffe_neural_models -## OpenCL backend +## OpenCL Backend The backend is supposed to work with all vendors. Note however there might be problems with libOpenCL.so provided by nVidia. It is recommended to install another OpenCL implementation after installing nVidia drivers. Possibilities are: - Intel OpenCL, recommended if you have an Intel CPU along the nVidia GPU. From d197026bf18db9c243044a33868ea2469ca0b430 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 5 Jul 2015 21:06:26 -0400 Subject: [PATCH 123/254] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7ed882624b6..df16adeea23 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ For a C++ frontend and models to use for image segmentation with this fork, see: - Models: https://github.com/naibaf7/caffe_neural_models ## OpenCL Backend -The backend is supposed to work with all vendors. Note however there might be problems with libOpenCL.so provided by nVidia. -It is recommended to install another OpenCL implementation after installing nVidia drivers. Possibilities are: +The backend is supposed to work with all vendors. Note however there may be problems with libOpenCL.so provided by nVidia. +It is therefore recommended to install another OpenCL implementation after installing nVidia drivers. Possibilities are: - Intel OpenCL, recommended if you have an Intel CPU along the nVidia GPU. - AMD APP SDK (OpenCL), recommended if you have an AMD GPU or CPU. From 723f4cdd983f2d0dcef9039e66d3ed6fa572b08c Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 6 Jul 2015 16:24:10 +0200 Subject: [PATCH 124/254] Malis debugging version. --- src/caffe/layers/malis_loss_layer.cpp | 135 ++++++++++++++++---------- 1 file changed, 86 insertions(+), 49 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index ce08214e6b4..4ac93ad72d9 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -6,11 +6,13 @@ #include #include #include +#include #include #include #include #include + #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/util/math_functions.hpp" @@ -346,9 +348,11 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, // Labels (size w * h, c values) const Dtype* label = bottom[1]->cpu_data(); - // cv::namedWindow("labelled"); - // cv::namedWindow("prob"); - // cv::namedWindow("diff"); + cv::namedWindow("labelled"); + cv::namedWindow("cdn"); + cv::namedWindow("cdp"); + cv::namedWindow("prob"); + cv::namedWindow("diff"); cv::Mat img(bottom[1]->height(), bottom[1]->width(), CV_8SC1); #pragma omp parallel for @@ -361,7 +365,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, cv::Mat seg = FindBlobs(img); // This is for debugging only: - /*{ + { std::vector labels; for(int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { @@ -404,8 +408,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, } cv::imshow("labelled", output); - cv::waitKey(100); - }*/ + } Dtype loss_out = 0; Dtype classerr_out = 0; @@ -445,37 +448,29 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, Dtype g2 = label[(i + 1) * bottom[0]->width() + j]; // X positive - conn_data_pos[i * bottom[0]->width() + j] = std::max( - (p0 + p1) / 2.0, (g0 + g1) / 2.0); + conn_data_pos[i * bottom[0]->width() + j] = std::min( + std::min(1.0 - std::fabs(p0 - p1), (p0 + p1) / 2.0), + std::min(1.0 - std::fabs(g0 - g1), (g0 + g1) / 2.0)); // X negative - conn_data_neg[i * bottom[0]->width() + j] = std::min( - (p0 + p1) / 2.0, (g0 + g1) / 2.0); + conn_data_neg[i * bottom[0]->width() + j] = std::max( + std::min(1.0 - std::fabs(p0 - p1), (p0 + p1) / 2.0), + std::min(1.0 - std::fabs(g0 - g1), (g0 + g1) / 2.0)); // Y positive conn_data_pos[bottom[0]->width() * bottom[0]->height() - + i * bottom[0]->width() + j] = std::max( - (p0 + p2) / 2.0, (g0 + g2) / 2.0); + + i * bottom[0]->width() + j] = std::min( + std::min(1.0 - std::fabs(p0 - p2), (p0 + p2) / 2.0), + std::min(1.0 - std::fabs(g0 - g2), (g0 + g2) / 2.0)); // Y negative conn_data_neg[bottom[0]->width() * bottom[0]->height() - + i * bottom[0]->width() + j] = std::min( - (p0 + p2) / 2.0, (g0 + g2) / 2.0); + + i * bottom[0]->width() + j] = std::max( + std::min(1.0 - std::fabs(p0 - p2), (p0 + p2) / 2.0), + std::min(1.0 - std::fabs(g0 - g2), (g0 + g2) / 2.0)); } } - /*cv::Mat cd_pos(bottom[0]->height()-1, bottom[0]->width()-1, - cv::DataType::type, - &conn_data_pos[0], sizeof(Dtype) * (bottom[0]->width()-1)); - cv::imshow("prob", cd_pos); - cv::waitKey(100); - - cv::Mat cd_neg(bottom[0]->height()-1, bottom[0]->width()-1, - cv::DataType::type, - &conn_data_neg[0], sizeof(Dtype) * (bottom[0]->width()-1)); - cv::imshow("diff", cd_neg); - cv::waitKey(0); - auto minmax = std::minmax_element(conn_data_neg.begin(), conn_data_neg.end()); @@ -488,7 +483,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, std::cout << "Conndata pos min/max: " << conn_data_pos[minmax.first - conn_data_pos.begin()] << " " << - conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl;*/ + conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl; Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], @@ -496,13 +491,21 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, false, &dloss_neg[0], &loss_out, &classerr_out, &rand_index_out); + std::cout << "Loss: " << loss_out << std::endl; + std::cout << "Class: " << classerr_out << std::endl; + std::cout << "Rand: " << rand_index_out << std::endl; + Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), true, &dloss_pos[0], &loss_out, &classerr_out, &rand_index_out); + std::cout << "Loss: " << loss_out << std::endl; + std::cout << "Class: " << classerr_out << std::endl; + std::cout << "Rand: " << rand_index_out << std::endl; - /*minmax = std::minmax_element(dloss_neg.begin(), dloss_neg.end()); + + minmax = std::minmax_element(dloss_neg.begin(), dloss_neg.end()); std::cout << "DLoss_neg min/max: " << dloss_neg[minmax.first - dloss_neg.begin()] << " " << @@ -518,7 +521,33 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, //caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); - std::cout << "Before LOSS BACK" << std::endl;*/ + std::cout << "Before LOSS BACK" << std::endl; + + cv::Mat cd_pos(bottom[0]->height(), bottom[0]->width(), + cv::DataType::type, + &dloss_pos[0], sizeof(Dtype) * bottom[0]->width()); + + double minVal, maxVal; + cv::Mat tmp; + + cv::minMaxLoc(cd_pos, &minVal, &maxVal); + cd_pos.convertTo(tmp, CV_32FC1, 1.0 / (maxVal - minVal), + -minVal * 1.0 / (maxVal - minVal)); + + cv::imshow("cdp", tmp); + + cv::Mat cd_neg(bottom[0]->height(), bottom[0]->width(), + cv::DataType::type, + &dloss_neg[0], sizeof(Dtype) * bottom[0]->width()); + + cv::minMaxLoc(cd_neg, &minVal, &maxVal); + + cd_neg.convertTo(tmp, CV_32FC1, 1.0 / (maxVal - minVal), + -minVal * 1.0 / (maxVal - minVal)); + cv::imshow("cdn", tmp); + + // Clear the diff + caffe_set(bottom[0]->count(), Dtype(0.0), bottom_diff); // Spread out the losses to pixels for (int i = 0; i < bottom[0]->height() - 1; ++i) { @@ -528,70 +557,78 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, Dtype lyp = dloss_pos[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + j]; - Dtype lyn = dloss_neg[(bottom[0]->width() - 1) + Dtype lyn = dloss_neg[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + j]; - // Pick labels - /*const int l0 = static_cast - (label[i * bottom[0]->width() + j]); + // Pick label scalings + const int l0 = static_cast + (label[i * bottom[0]->width() + j]) * 2 - 1; const int l1 = static_cast - (label[i * bottom[0]->width() + (j + 1)]); + (label[i * bottom[0]->width() + (j + 1)]) * 2 - 1; const int l2 = static_cast - (label[(i + 1) * bottom[0]->width() + j]);*/ + (label[(i + 1) * bottom[0]->width() + j]) * 2 - 1; // Center bottom_diff[0 * inner_num_ + i * bottom[0]->width() + j] -= 0.5 - * (lxp + lxn + lyp + lyn); + * (lxp + lxn + lyp + lyn); // Right bottom_diff[0 * inner_num_ + i * bottom[0]->width() + (j + 1)] -= 0.5 - * (lxp + lxn); + * (lxp + lxn); // Bottom bottom_diff[0 * inner_num_ + (i + 1) * bottom[0]->width() + j] -= 0.5 - * (lyp + lyn); + * (lyp + lyn); // Center bottom_diff[1 * inner_num_ + i * bottom[0]->width() + j] += 0.5 - * (lxp + lxn + lyp + lyn); + * (lxp + lxn + lyp + lyn); // Right bottom_diff[1 * inner_num_ + i * bottom[0]->width() + (j + 1)] += 0.5 - * (lxp + lxn); + * (lxp + lxn); // Bottom bottom_diff[1 * inner_num_ + (i + 1) * bottom[0]->width() + j] += 0.5 - * (lyp + lyn); + * (lyp + lyn); } } - /*Dtype* prob_rd = prob_.mutable_cpu_data(); + Dtype* prob_rd = prob_.mutable_cpu_data(); cv::Mat wrapped_prob(bottom[0]->height(), bottom[0]->width(), cv::DataType::type, prob_rd, sizeof(Dtype) * bottom[0]->width()); cv::imshow("prob", wrapped_prob); - cv::waitKey(100); cv::Mat wrapped_diff(bottom[0]->height(), bottom[0]->width(), cv::DataType::type, bottom_diff, sizeof(Dtype) * bottom[0]->width()); - double minVal, maxVal; cv::minMaxLoc(wrapped_diff, &minVal, &maxVal); std::cout << "Max loss: " << maxVal << std::endl; std::cout << "Min loss: " << minVal << std::endl; - cv::Mat tmp; - wrapped_diff.convertTo(tmp, CV_32FC1, 1.0 / (maxVal - minVal), - -minVal * 1.0 / (maxVal - minVal)); + + Dtype sum = std::accumulate(bottom_diff,bottom_diff+bottom[0]->height()*bottom[0]->width(),0.0); + Dtype mean = sum / (bottom[0]->width()*bottom[0]->height()); + + std::vector msd(bottom[0]->height()*bottom[0]->width()); + std::transform(bottom_diff,bottom_diff+(bottom[0]->height()*bottom[0]->width()),msd.begin(),std::bind2nd(std::minus(), mean)); + + Dtype sqsum = std::inner_product(msd.begin(), msd.end(), msd.begin(), 0.0); + Dtype stdev = std::sqrt(sqsum/(bottom[0]->width()*bottom[0]->height())); + + + wrapped_diff.convertTo(tmp, CV_32FC1, 1.0 / (2.0 * stdev), + (stdev - mean) * 1.0 / (2.0 * stdev)); cv::imshow("diff", tmp); - cv::waitKey(0); + cv::waitKey(2); - std::cout << "After LOSS BACK" << std::endl;*/ + std::cout << "After LOSS BACK" << std::endl; } } From 5da63bb97c63bd2a2ffdfa023f67a1c11d370cb2 Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 6 Jul 2015 23:45:31 -0400 Subject: [PATCH 125/254] Disabled debug in Malis-loss layer. --- src/caffe/layers/malis_loss_layer.cpp | 69 ++++++++++++++++----------- 1 file changed, 42 insertions(+), 27 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 4ac93ad72d9..036df9adb7c 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,8 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +//#define CAFFE_MALIS_DEBUG + namespace caffe { template @@ -283,6 +286,10 @@ void MalisLossLayer::Reshape(const vector*>& bottom, top[1]->ReshapeLike(*bottom[0]); } + conn_dims_.clear(); + nhood_dims_.clear(); + nhood_data_.clear(); + conn_num_dims_ = 4; conn_dims_.push_back(bottom[0]->width()); // X-axis conn_dims_.push_back(bottom[0]->height()); // Y-axis @@ -348,11 +355,13 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, // Labels (size w * h, c values) const Dtype* label = bottom[1]->cpu_data(); +#ifdef CAFFE_MALIS_DEBUG cv::namedWindow("labelled"); cv::namedWindow("cdn"); cv::namedWindow("cdp"); cv::namedWindow("prob"); cv::namedWindow("diff"); +#endif cv::Mat img(bottom[1]->height(), bottom[1]->width(), CV_8SC1); #pragma omp parallel for @@ -364,44 +373,45 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, cv::Mat seg = FindBlobs(img); +#ifdef CAFFE_MALIS_DEBUG // This is for debugging only: { std::vector labels; - for(int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { + for (int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { int val = reinterpret_cast(seg.ptr(0))[i]; bool found = false; - for(int j = 0; j < labels.size(); ++j) { - if(val == labels[j]) { + for (int j = 0; j < labels.size(); ++j) { + if (val == labels[j]) { found = true; } } - if(found == false) { + if (found == false) { labels.push_back(val); } } std::vector colors; - for(int i = 0; i < labels.size(); ++i) { + for (int i = 0; i < labels.size(); ++i) { unsigned char r = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT unsigned char g = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT unsigned char b = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - cv::Vec3b color(r,g,b); + cv::Vec3b color(r, g, b); colors.push_back(color); } cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3); - for(int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { + for (int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { int val = reinterpret_cast(seg.ptr(0))[i]; - if(val == 0) { - output.at(i) = cv::Vec3b(0,0,0); + if (val == 0) { + output.at(i) = cv::Vec3b(0, 0, 0); continue; } - for(int j = 0; j < labels.size(); ++j) { - if(val == labels[j]) { + for (int j = 0; j < labels.size(); ++j) { + if (val == labels[j]) { output.at(i) = colors[j]; } } @@ -409,6 +419,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, cv::imshow("labelled", output); } +#endif Dtype loss_out = 0; Dtype classerr_out = 0; @@ -471,6 +482,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, } } +#ifdef CAFFE_MALIS_DEBUG auto minmax = std::minmax_element(conn_data_neg.begin(), conn_data_neg.end()); @@ -484,27 +496,29 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, std::cout << "Conndata pos min/max: " << conn_data_pos[minmax.first - conn_data_pos.begin()] << " " << conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl; - +#endif Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), false, &dloss_neg[0], &loss_out, &classerr_out, &rand_index_out); +#ifdef CAFFE_MALIS_DEBUG std::cout << "Loss: " << loss_out << std::endl; std::cout << "Class: " << classerr_out << std::endl; std::cout << "Rand: " << rand_index_out << std::endl; +#endif Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), true, &dloss_pos[0], &loss_out, &classerr_out, &rand_index_out); +#ifdef MALIS_DEBUG std::cout << "Loss: " << loss_out << std::endl; std::cout << "Class: " << classerr_out << std::endl; std::cout << "Rand: " << rand_index_out << std::endl; - minmax = std::minmax_element(dloss_neg.begin(), dloss_neg.end()); std::cout << "DLoss_neg min/max: " << @@ -517,12 +531,6 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, dloss_pos[minmax.first - dloss_pos.begin()] << " " << dloss_pos[minmax.second - dloss_pos.begin()] << std::endl; - std::cout << "Before PROB BACK" << std::endl; - - //caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); - - std::cout << "Before LOSS BACK" << std::endl; - cv::Mat cd_pos(bottom[0]->height(), bottom[0]->width(), cv::DataType::type, &dloss_pos[0], sizeof(Dtype) * bottom[0]->width()); @@ -545,6 +553,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, cd_neg.convertTo(tmp, CV_32FC1, 1.0 / (maxVal - minVal), -minVal * 1.0 / (maxVal - minVal)); cv::imshow("cdn", tmp); +#endif // Clear the diff caffe_set(bottom[0]->count(), Dtype(0.0), bottom_diff); @@ -561,12 +570,12 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, * bottom[0]->height() + i * bottom[0]->width() + j]; // Pick label scalings - const int l0 = static_cast + /*const int l0 = static_cast (label[i * bottom[0]->width() + j]) * 2 - 1; const int l1 = static_cast (label[i * bottom[0]->width() + (j + 1)]) * 2 - 1; const int l2 = static_cast - (label[(i + 1) * bottom[0]->width() + j]) * 2 - 1; + (label[(i + 1) * bottom[0]->width() + j]) * 2 - 1;*/ // Center bottom_diff[0 * inner_num_ + i * bottom[0]->width() + j] -= 0.5 @@ -595,6 +604,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, } } +#ifdef CAFFE_MALIS_DEBUG Dtype* prob_rd = prob_.mutable_cpu_data(); cv::Mat wrapped_prob(bottom[0]->height(), bottom[0]->width(), @@ -612,14 +622,20 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, std::cout << "Min loss: " << minVal << std::endl; - Dtype sum = std::accumulate(bottom_diff,bottom_diff+bottom[0]->height()*bottom[0]->width(),0.0); + Dtype sum = std::accumulate(bottom_diff, + bottom_diff + + bottom[0]->height() * bottom[0]->width(), + 0.0); + Dtype mean = sum / (bottom[0]->width()*bottom[0]->height()); std::vector msd(bottom[0]->height()*bottom[0]->width()); - std::transform(bottom_diff,bottom_diff+(bottom[0]->height()*bottom[0]->width()),msd.begin(),std::bind2nd(std::minus(), mean)); + std::transform(bottom_diff, + bottom_diff + (bottom[0]->height()*bottom[0]->width()), + msd.begin(), std::bind2nd(std::minus(), mean)); - Dtype sqsum = std::inner_product(msd.begin(), msd.end(), msd.begin(), 0.0); - Dtype stdev = std::sqrt(sqsum/(bottom[0]->width()*bottom[0]->height())); + Dtype sqsum = std::inner_product(msd.begin(), msd.end(), msd.begin(), 0.0); + Dtype stdev = std::sqrt(sqsum/(bottom[0]->width()*bottom[0]->height())); wrapped_diff.convertTo(tmp, CV_32FC1, 1.0 / (2.0 * stdev), @@ -627,8 +643,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, cv::imshow("diff", tmp); cv::waitKey(2); - - std::cout << "After LOSS BACK" << std::endl; +#endif } } From da4b704dd72e8802bf90298c5da2843efbc6d074 Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 7 Jul 2015 00:54:34 -0400 Subject: [PATCH 126/254] Preparations for ND-SK kernels and build system fixes. --- src/caffe/layers/absval_layer.cpp | 7 +- src/caffe/layers/base_conv_nd_layer.cpp | 27 +++- src/caffe/layers/base_data_layer.cpp | 8 +- src/caffe/layers/bnll_layer.cpp | 8 +- src/caffe/layers/conv_nd_layer.cpp | 5 +- src/caffe/layers/malis_loss_layer.cpp | 3 +- src/caffe/test/test_convolution_nd_layer.cpp | 128 +++++++++++++++++++ src/caffe/util/im2col.cu | 99 ++++++-------- 8 files changed, 208 insertions(+), 77 deletions(-) create mode 100644 src/caffe/test/test_convolution_nd_layer.cpp diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp index 640bbe9ebd0..26819a34405 100644 --- a/src/caffe/layers/absval_layer.cpp +++ b/src/caffe/layers/absval_layer.cpp @@ -1,13 +1,12 @@ -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "absval_layer.cu" -#endif - #include #include "caffe/layer.hpp" #include "caffe/neuron_layers.hpp" #include "caffe/util/math_functions.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "absval_layer.cu" // NOLINT +#endif namespace caffe { diff --git a/src/caffe/layers/base_conv_nd_layer.cpp b/src/caffe/layers/base_conv_nd_layer.cpp index 0d3dd22dc90..3686ce281f1 100644 --- a/src/caffe/layers/base_conv_nd_layer.cpp +++ b/src/caffe/layers/base_conv_nd_layer.cpp @@ -98,12 +98,37 @@ void BaseConvolutionNDLayer::LayerSetUp( conv_param.pad((num_pad_dims == 1) ? 0 : i); } } + // Setup kernel stride dimensions + kstride_.Reshape(spatial_dim_blob_shape); + int* kstride_data = kstride_.mutable_cpu_data(); + if (conv_param.has_kstride_h() || conv_param.has_kstride_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "kstride_h & kstride_w can only be used for 2D convolution."; + CHECK_EQ(0, conv_param.kstride_size()) + << "Etiher kstride or kstirde_h/w should be specified; not both."; + kstride_data[0] = conv_param.pad_h(); + kstride_data[1] = conv_param.pad_w(); + } else { + const int num_kstride_dims = conv_param.kstride_size(); + CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || + num_kstride_dims == num_spatial_axes_) + << "kstride must be specified once, or once per spatial dimension " + << "(kstride specified " << num_kstride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultKstride = 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : + conv_param.kstride((num_kstride_dims == 1) ? 0 : i); + } + } + // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. is_1x1_ = true; for (int i = 0; i < num_spatial_axes_; ++i) { is_1x1_ &= - kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0; + kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0 + && kstride_data[i] == 1; if (!is_1x1_) { break; } } // Configure output channels and groups. diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index fb9ac6433d7..8d9a5bd2ebc 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -1,13 +1,13 @@ #include #include -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "base_data_layer.cu" -#endif - #include "caffe/data_layers.hpp" #include "caffe/util/io.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "base_data_layer.cu" // NOLINT +#endif + namespace caffe { template diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp index 732a5accfc3..4440fd8f947 100644 --- a/src/caffe/layers/bnll_layer.cpp +++ b/src/caffe/layers/bnll_layer.cpp @@ -1,13 +1,13 @@ #include #include -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "bnll_layer.cu" -#endif - #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" +#if defined(USE_GREENTEA) && !defined(USE_CUDA) +#include "bnll_layer.cu" // NOLINT +#endif + namespace caffe { template diff --git a/src/caffe/layers/conv_nd_layer.cpp b/src/caffe/layers/conv_nd_layer.cpp index 4469a41eb24..8fe43541cd5 100644 --- a/src/caffe/layers/conv_nd_layer.cpp +++ b/src/caffe/layers/conv_nd_layer.cpp @@ -19,10 +19,13 @@ void ConvolutionNDLayer::compute_output_shape() { const int* kernel_shape_data = this->kernel_shape_.cpu_data(); const int* stride_data = this->stride_.cpu_data(); const int* pad_data = this->pad_.cpu_data(); + const int* kstride_data = this->kstride_.cpu_data(); this->output_shape_.clear(); for (int i = 0; i < this->num_spatial_axes_; ++i) { const int input_dim = input_shape_data[i]; - const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) + const int ext_kernel_shape = (kernel_shape_data[i] - 1) + * kstride_data[i] + 1; + const int output_dim = (input_dim + 2 * pad_data[i] - ext_kernel_shape) / stride_data[i] + 1; this->output_shape_.push_back(output_dim); } diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 036df9adb7c..e5108a50065 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -19,7 +20,7 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -//#define CAFFE_MALIS_DEBUG +// #define CAFFE_MALIS_DEBUG namespace caffe { diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp new file mode 100644 index 00000000000..56e5b205527 --- /dev/null +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -0,0 +1,128 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +#ifndef CPU_ONLY // CPU-GPU test + +namespace caffe { + +template +class ConvolutionNDLayerTest : public GPUDeviceTest { + protected: + ConvolutionNDLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) { + } + + virtual void SetUp() { + BlobShape shape; + shape.add_dim(1); // Batch + shape.add_dim(1); // Channels + shape.add_dim(5); // Depth + shape.add_dim(5); // Height + shape.add_dim(5); // Width + blob_bottom_->Reshape(shape); + // fill the values + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~ConvolutionNDLayerTest() { + delete blob_bottom_; + delete blob_top_; + } + + void TestForward() { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + + convolution_param->add_kernel_size(3); + convolution_param->add_kernel_size(3); + convolution_param->add_kernel_size(3); + + convolution_param->add_kstride(2); + convolution_param->add_kstride(2); + convolution_param->add_kstride(2); + + convolution_param->set_num_output(4); + + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0); + + ConvolutionNDLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + int d = blob_bottom_->shape(2); + int h = blob_bottom_->shape(3); + int w = blob_bottom_->shape(4); + + TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); + + for (int cd = 0; cd < d; ++cd) { + for (int ch = 0; ch < h; ++ch) { + for (int cw = 0; cw < w; ++cw) { + bottom_data[cw + ch * w + cd * w * h] = + cw + ch * w + cd * w * h; + } + } + } + + } + + void TestBackward() { + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(ConvolutionNDLayerTest, TestDtypes); + +TYPED_TEST(ConvolutionNDLayerTest, TestSetup) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + + convolution_param->add_kernel_size(3); + convolution_param->add_kernel_size(3); + convolution_param->add_kernel_size(3); + + convolution_param->add_kstride(2); + convolution_param->add_kstride(2); + convolution_param->add_kstride(2); + + convolution_param->set_num_output(4); + + + ConvolutionNDLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(1, this->blob_top_->shape(2)); + EXPECT_EQ(1, this->blob_top_->shape(3)); + EXPECT_EQ(1, this->blob_top_->shape(4)); +} + +TYPED_TEST(ConvolutionNDLayerTest, TestForward) { + this->TestForward(); +} + +TYPED_TEST(ConvolutionNDLayerTest, TestBackward) { + this->TestBackward(); +} + +} // namespace caffe +#endif // !CPU_ONLY diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 8be171c91ce..88a25643c9c 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -315,13 +315,15 @@ template void col2im_gpu(const double* data_col, const int channels, double* data_im); -template -__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, +template +__global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, + const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, + const int* kstride, Dtype* data_col) { - int d_temp[num_axes]; // NOLINT(runtime/arrays) - int d_iter[num_axes]; // NOLINT(runtime/arrays) + int d_temp[6]; // NOLINT(runtime/arrays) + int d_iter[6]; // NOLINT(runtime/arrays) int i; CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate @@ -354,6 +356,8 @@ __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; if (!in_range) { break; } } + + // Write column data if (in_range) { int data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { @@ -364,14 +368,19 @@ __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, } else { *data_col_ptr = 0; } + data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { - const int d_max = kernel_shape[i]; + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 - ++d_iter[i]; + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; incremented = true; break; } @@ -380,15 +389,17 @@ __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, } // CUDA_KERNEL_LOOP(index, n) } -template -__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col, +template +__global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, + const Dtype* data_col, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, + const int* kstride, Dtype* data_im) { - int d_im[num_axes]; // NOLINT(runtime/arrays) - int d_col_iter[num_axes]; // NOLINT(runtime/arrays) - int d_col_start[num_axes]; // NOLINT(runtime/arrays) - int d_col_end[num_axes]; // NOLINT(runtime/arrays) + int d_im[6]; // NOLINT(runtime/arrays) + int d_col_iter[6]; // NOLINT(runtime/arrays) + int d_col_start[6]; // NOLINT(runtime/arrays) + int d_col_end[6]; // NOLINT(runtime/arrays) CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. @@ -454,31 +465,12 @@ template void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, Dtype* data_col) { - switch (num_spatial_axes) { - case 1: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 2: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 3: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - default: { - LOG(FATAL) << "im2col_nd_gpu does not support computation with " - << num_spatial_axes << " spatial axes"; - } - } + const int* pad, const int* stride, + const int* kstride, Dtype* data_col) { + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( + num_kernels, num_spatial_axes, data_im, im_shape, col_shape, + kernel_shape, pad, stride, kstride, data_col); CUDA_POST_KERNEL_CHECK; } @@ -486,42 +478,23 @@ void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, template void im2col_nd_gpu(const float* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, - float* data_col); + const int* kstride, float* data_col); template void im2col_nd_gpu(const double* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, - double* data_col); + const int* kstride, double* data_col); template void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, + const int* kstride, Dtype* data_im) { - switch (num_spatial_axes) { - case 1: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 2: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 3: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - default: { - LOG(FATAL) << "im2col_gpu does not support computation with " - << num_spatial_axes << " spatial axes"; - } - } + im_size, num_spatial_axes, data_col, im_shape, col_shape, + kernel_shape, pad, stride, kstride, data_im); CUDA_POST_KERNEL_CHECK; } @@ -529,10 +502,12 @@ void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, template void col2im_nd_gpu(const float* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, + const int* kstride, float* data_im); template void col2im_nd_gpu(const double* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, + const int* kstride, double* data_im); #endif // USE_CUDA From 6e34c7d12718e65c97a735faab3f58ac2261da9e Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 7 Jul 2015 00:55:34 -0400 Subject: [PATCH 127/254] Lint fix. --- CMakeLists.txt | 4 ++-- cmake/Dependencies.cmake | 5 +++++ include/caffe/util/im2col.hpp | 4 ++-- include/caffe/vision_layers.hpp | 6 ++++-- src/caffe/test/test_convolution_nd_layer.cpp | 1 - 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 64a06f06a94..ff842282e2d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,11 +32,11 @@ include(cmake/Dependencies.cmake) # ---[ Flags if(UNIX OR APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11 -fopenmp -DCMAKE_BUILD") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -std=c++11 -DCMAKE_BUILD") endif() if(USE_libstdcpp) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++ -std=c++11 -fopenmp") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++ -std=c++11") message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)") endif() diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index 2a9d9545bc6..4da50683c64 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -85,6 +85,11 @@ include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS}) list(APPEND Caffe_LINKER_LIBS ${OpenCV_LIBS}) message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})") +# ---[ OpenMP +find_package(OpenMP QUIET) +# If OpenMP is not found then OpenMP_CXX_FLAGS will be empty +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") + # ---[ BLAS if(NOT APPLE) set(BLAS "Atlas" CACHE STRING "Selected BLAS library") diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index d95b324b88e..07678206600 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -46,13 +46,13 @@ template void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col); + const int* kstride, Dtype* data_col); template void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_im); + const int* kstride, Dtype* data_im); } // namespace caffe diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 22e410ae806..a4c1f2ebae4 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -248,6 +248,8 @@ class BaseConvolutionNDLayer : public Layer { Blob stride_; /// @brief The spatial dimensions of the padding. Blob pad_; + /// @brief The spatial dimension of the kernel stride. + Blob kstride_; /// @brief The spatial dimensions of the convolution input. Blob conv_input_shape_; /// @brief The spatial dimensions of the input. @@ -277,13 +279,13 @@ class BaseConvolutionNDLayer : public Layer { im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), col_buff); + stride_.gpu_data(), kstride_.gpu_data(), col_buff); } inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - data); + kstride_.gpu_data(), data); } #endif // USE_CUDA #ifdef USE_GREENTEA diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp index 56e5b205527..76f570f9a8d 100644 --- a/src/caffe/test/test_convolution_nd_layer.cpp +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -77,7 +77,6 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { } } } - } void TestBackward() { From a4c970360eefe9f6a9035f8261b7ab011e92c47b Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 7 Jul 2015 13:22:42 -0400 Subject: [PATCH 128/254] Fixed CMAKE build. --- CMakeLists.txt | 8 ++++++++ Makefile | 16 +++++----------- cmake/Targets.cmake | 10 ++++++++++ src/caffe/layers/absval_layer.cpp | 4 ---- src/caffe/layers/base_data_layer.cpp | 4 ---- src/caffe/layers/bnll_layer.cpp | 4 ---- src/caffe/layers/concat_layer.cpp | 4 ---- src/caffe/layers/contrastive_loss_layer.cpp | 4 ---- src/caffe/layers/conv_layer.cpp | 4 ---- src/caffe/layers/conv_nd_layer.cpp | 4 ---- src/caffe/layers/conv_sk_layer.cpp | 4 ---- src/caffe/layers/deconv_layer.cpp | 4 ---- src/caffe/layers/deconv_nd_layer.cpp | 4 ---- src/caffe/layers/dropout_layer.cpp | 4 ---- src/caffe/layers/eltwise_layer.cpp | 4 ---- src/caffe/layers/euclidean_loss_layer.cpp | 4 ---- src/caffe/layers/exp_layer.cpp | 4 ---- src/caffe/layers/filter_layer.cpp | 4 ---- src/caffe/layers/hdf5_data_layer.cpp | 4 ---- src/caffe/layers/hdf5_output_layer.cpp | 4 ---- src/caffe/layers/im2col_layer.cpp | 4 ---- src/caffe/layers/inner_product_layer.cpp | 4 ---- src/caffe/layers/log_layer.cpp | 4 ---- src/caffe/layers/lrn_layer.cpp | 4 ---- src/caffe/layers/mergecrop_layer.cpp | 4 ---- src/caffe/layers/mvn_layer.cpp | 4 ---- src/caffe/layers/pooling_layer.cpp | 4 ---- src/caffe/layers/pooling_sk_layer.cpp | 4 ---- src/caffe/layers/power_layer.cpp | 4 ---- src/caffe/layers/prelu_layer.cpp | 4 ---- src/caffe/layers/reduction_layer.cpp | 4 ---- src/caffe/layers/relu_layer.cpp | 4 ---- .../layers/sigmoid_cross_entropy_loss_layer.cpp | 4 ---- src/caffe/layers/sigmoid_layer.cpp | 4 ---- src/caffe/layers/silence_layer.cpp | 4 ---- src/caffe/layers/slice_layer.cpp | 4 ---- src/caffe/layers/softmax_layer.cpp | 4 ---- src/caffe/layers/softmax_loss_layer.cpp | 4 ---- src/caffe/layers/split_layer.cpp | 4 ---- src/caffe/layers/tanh_layer.cpp | 4 ---- src/caffe/layers/threshold_layer.cpp | 4 ---- 41 files changed, 23 insertions(+), 163 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ff842282e2d..5eababf5480 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,6 +27,14 @@ caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE) caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE) caffe_option(BUILD_python_layer "Build the caffe python layer" ON) +# ---[ Flag consistency check +if(CPU_ONLY) + set(USE_CUDA OFF) + set(USE_GREENTEA OFF) + set(USE_CUDNN OFF) + set(USE_CLBLAS OFF) +endif() + # ---[ Dependencies include(cmake/Dependencies.cmake) diff --git a/Makefile b/Makefile index 7e3569734e2..a217bde40a5 100644 --- a/Makefile +++ b/Makefile @@ -369,17 +369,6 @@ ifeq ($(CPU_ONLY), 1) COMMON_FLAGS += -DCPU_ONLY endif -# Grentea but not CUDA configuration -ifeq ($(USE_CUDA), 0) - ifeq ($(USE_GREENTEA), 1) - OBJS := $(PROTO_OBJS) $(CXX_OBJS) - TEST_OBJS := $(TEST_CXX_OBJS) - TEST_BINS := $(TEST_CXX_BINS) - ALL_WARNS := $(ALL_CXX_WARNS) - TEST_FILTER := --gtest_filter="-*CUDNN*" - endif -endif - # Python layer support ifeq ($(WITH_PYTHON_LAYER), 1) COMMON_FLAGS += -DWITH_PYTHON_LAYER @@ -613,6 +602,11 @@ ifeq ($(USE_CUDA), 1) $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) +else + @ echo CXX $< + $(Q)$(CXX) $(CXXFLAGS) -c -x c++ $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) endif $(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ diff --git a/cmake/Targets.cmake b/cmake/Targets.cmake index 60b82acce07..5f1790f0f1c 100644 --- a/cmake/Targets.cmake +++ b/cmake/Targets.cmake @@ -89,6 +89,16 @@ function(caffe_pickup_caffe_sources root) file(GLOB_RECURSE proto_files ${root}/src/caffe/*.proto) list(APPEND srcs ${proto_files}) + # OpenCL but not CUDA backend tweak + if(USE_GREENTEA AND NOT USE_CUDA) + SET_SOURCE_FILES_PROPERTIES(${cuda} PROPERTIES LANGUAGE CXX) + SET_SOURCE_FILES_PROPERTIES(${cuda} PROPERTIES COMPILE_FLAGS "-x c++") + SET_SOURCE_FILES_PROPERTIES(${test_cuda} PROPERTIES LANGUAGE CXX) + SET_SOURCE_FILES_PROPERTIES(${test_cuda} PROPERTIES COMPILE_FLAGS "-x c++") + list(APPEND srcs ${cuda}) + list(APPEND test_srcs ${test_cuda}) + endif() + # convet to absolute paths caffe_convert_absolute_paths(srcs) caffe_convert_absolute_paths(cuda) diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp index 26819a34405..5ce28c9e2b4 100644 --- a/src/caffe/layers/absval_layer.cpp +++ b/src/caffe/layers/absval_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/neuron_layers.hpp" #include "caffe/util/math_functions.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "absval_layer.cu" // NOLINT -#endif - namespace caffe { template diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 8d9a5bd2ebc..7258b9fd60b 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/data_layers.hpp" #include "caffe/util/io.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "base_data_layer.cu" // NOLINT -#endif - namespace caffe { template diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp index 4440fd8f947..75e8650d044 100644 --- a/src/caffe/layers/bnll_layer.cpp +++ b/src/caffe/layers/bnll_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "bnll_layer.cu" // NOLINT -#endif - namespace caffe { template diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 4696e00886b..409b35cf5f9 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "concat_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 726051a8a75..25e167819d3 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "contrastive_loss_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index f791c48eece..928ef5ee468 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "conv_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/conv_nd_layer.cpp b/src/caffe/layers/conv_nd_layer.cpp index 8fe43541cd5..aa521fdb4a6 100644 --- a/src/caffe/layers/conv_nd_layer.cpp +++ b/src/caffe/layers/conv_nd_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "conv_nd_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp index c6bf7e23bc9..fbda73e1c37 100644 --- a/src/caffe/layers/conv_sk_layer.cpp +++ b/src/caffe/layers/conv_sk_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "conv_sk_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 101f440312e..a4612963b6b 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "deconv_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/deconv_nd_layer.cpp b/src/caffe/layers/deconv_nd_layer.cpp index 8cf2fa40924..d5684a6163b 100644 --- a/src/caffe/layers/deconv_nd_layer.cpp +++ b/src/caffe/layers/deconv_nd_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "deconv_nd_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index d5922e87352..1c3f2c216d6 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -8,10 +8,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "dropout_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index 9bdee1b29e4..e2e0be79587 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "eltwise_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 7075d0bb5c4..80efa31b22c 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "euclidean_loss_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/exp_layer.cpp b/src/caffe/layers/exp_layer.cpp index 73ffc7794c7..c7e7c60cfad 100644 --- a/src/caffe/layers/exp_layer.cpp +++ b/src/caffe/layers/exp_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "exp_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index 6cb4a6ef763..7a2d91fbe19 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "filter_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 0d984924176..fadd2179e49 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -18,10 +18,6 @@ #include "caffe/layer.hpp" #include "caffe/util/io.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "hdf5_data_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index f6b09b6ed62..cb5f0e0c7ee 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -9,10 +9,6 @@ #include "caffe/util/io.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "hdf5_output_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 362933dac87..ed8992c0b48 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/im2col.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "im2col_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 7c9d1468587..f9bf6c32997 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -7,10 +7,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "inner_product_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/log_layer.cpp b/src/caffe/layers/log_layer.cpp index b3c2b3f865b..3ca25d0946f 100644 --- a/src/caffe/layers/log_layer.cpp +++ b/src/caffe/layers/log_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/neuron_layers.hpp" #include "caffe/util/math_functions.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "log_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 6c3280b13f3..08821ef3d79 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "lrn_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index c25c6dd4e03..9af459dd5bd 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "mergecrop_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index faa05f1c88c..ab645ce0bb3 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "mvn_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index f48ef961c94..c8d41499455 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -8,10 +8,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "pooling_layer.cu" -#endif - namespace caffe { using std::min; diff --git a/src/caffe/layers/pooling_sk_layer.cpp b/src/caffe/layers/pooling_sk_layer.cpp index 6d17be8083e..8527eec4eec 100644 --- a/src/caffe/layers/pooling_sk_layer.cpp +++ b/src/caffe/layers/pooling_sk_layer.cpp @@ -8,10 +8,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "pooling_sk_layer.cu" -#endif - namespace caffe { using std::min; diff --git a/src/caffe/layers/power_layer.cpp b/src/caffe/layers/power_layer.cpp index 5ddf8bc4927..347d9a12aeb 100644 --- a/src/caffe/layers/power_layer.cpp +++ b/src/caffe/layers/power_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "power_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 2b09cae70dc..5ec4d9bd61b 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "prelu_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/reduction_layer.cpp b/src/caffe/layers/reduction_layer.cpp index 36ddaf008e4..8ae6329ebe4 100644 --- a/src/caffe/layers/reduction_layer.cpp +++ b/src/caffe/layers/reduction_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "reduction_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/relu_layer.cpp b/src/caffe/layers/relu_layer.cpp index b94aaeef200..cc00319a578 100644 --- a/src/caffe/layers/relu_layer.cpp +++ b/src/caffe/layers/relu_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "relu_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index 727d1cd9d5e..cc236fe1e8e 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -6,10 +6,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "sigmoid_cross_entropy_loss_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/sigmoid_layer.cpp b/src/caffe/layers/sigmoid_layer.cpp index 98af4d439fd..48c384905bf 100644 --- a/src/caffe/layers/sigmoid_layer.cpp +++ b/src/caffe/layers/sigmoid_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "sigmoid_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/silence_layer.cpp b/src/caffe/layers/silence_layer.cpp index fa02212ea20..4abf9eff4a2 100644 --- a/src/caffe/layers/silence_layer.cpp +++ b/src/caffe/layers/silence_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "silence_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 7bc028a58f3..418361f8cf8 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "slice_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index 57344822700..fbd378102f6 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -5,10 +5,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "softmax_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 9bfb65d8a1f..3c303cd0cae 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -7,10 +7,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "softmax_loss_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index d2117a220e8..59a821976c8 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -4,10 +4,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "split_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/tanh_layer.cpp b/src/caffe/layers/tanh_layer.cpp index ebcb5f6c249..ee5ed773c74 100644 --- a/src/caffe/layers/tanh_layer.cpp +++ b/src/caffe/layers/tanh_layer.cpp @@ -7,10 +7,6 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "tanh_layer.cu" -#endif - namespace caffe { template diff --git a/src/caffe/layers/threshold_layer.cpp b/src/caffe/layers/threshold_layer.cpp index a6f976899a3..8f72d6c4a51 100644 --- a/src/caffe/layers/threshold_layer.cpp +++ b/src/caffe/layers/threshold_layer.cpp @@ -3,10 +3,6 @@ #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#if defined(USE_GREENTEA) && !defined(USE_CUDA) -#include "threshold_layer.cu" -#endif - namespace caffe { template From 7fa198d73af5e199db2f86d0acbf10807b482736 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 8 Jul 2015 01:12:38 -0400 Subject: [PATCH 129/254] Separated Malis loss layer into connected component, affinity and malis loss layer for more flexibility with different activation functions and network outputs (affinity graph or pixel classification map). --- include/caffe/loss_layers.hpp | 29 +- include/caffe/vision_layers.hpp | 78 ++- src/caffe/layers/affinity_layer.cpp | 177 +++++++ .../layers/connected_component_layer.cpp | 97 ++++ src/caffe/layers/malis_loss_layer.cpp | 485 +++++------------- src/caffe/proto/caffe.proto | 9 + 6 files changed, 505 insertions(+), 370 deletions(-) create mode 100644 src/caffe/layers/affinity_layer.cpp create mode 100644 src/caffe/layers/connected_component_layer.cpp diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 3ba8858a47a..87a3e9823e3 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -776,6 +776,7 @@ class MalisLossLayer : public LossLayer { const vector*>& top); virtual inline const char* type() const { return "MalisLoss"; } + virtual inline int ExactNumBottomBlobs() const { return 3; } virtual inline int ExactNumTopBlobs() const { return -1; } virtual inline int MinTopBlobs() const { return 1; } virtual inline int MaxTopBlobs() const { return 2; } @@ -786,30 +787,22 @@ class MalisLossLayer : public LossLayer { virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - /// The internal SoftmaxLayer used to map predictions to a distribution. - shared_ptr > softmax_layer_; - /// prob stores the output probability predictions from the SoftmaxLayer. - Blob prob_; - /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward - vector*> softmax_bottom_vec_; - /// top vector holder used in call to the underlying SoftmaxLayer::Forward - vector*> softmax_top_vec_; - - int softmax_axis_, outer_num_, inner_num_; + private: + void Malis(const Dtype* conn_data, const int conn_num_dims, + const int* conn_dims, + const int* nhood_data, const int* nhood_dims, + const Dtype* seg_data, + const bool pos, Dtype* dloss_data, Dtype* loss_out, + Dtype *classerr_out, Dtype *rand_index_out, + Dtype margin, Dtype threshold); int conn_num_dims_; std::vector conn_dims_; std::vector nhood_data_; std::vector nhood_dims_; - private: - cv::Mat FindBlobs(const cv::Mat &input); - - void Malis(Dtype* conn_data, int conn_num_dims, int* conn_dims, - int* nhood_data, int* nhood_dims, int* seg_data, - bool pos, Dtype* dloss_data, Dtype* loss_out, - Dtype *classerr_out, Dtype *rand_index_out, - Dtype margin = 0.3); + Blob dloss_pos_; + Blob dloss_neg_; }; diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index a4c1f2ebae4..17a46c129fd 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -21,12 +21,47 @@ namespace caffe { +/** + * @brief Computes a one edge per dimension 2D affinity graph + * for a given segmentation/label map + */ +template +class AffinityLayer : public Layer { + public: + explicit AffinityLayer(const LayerParameter& param) + : Layer(param) { + } + + virtual inline const char* type() const { + return "Affinity"; + } + + protected: + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + private: + std::vector< shared_ptr< Blob > > min_index_; + std::vector offsets_; +}; + +/** + * @brief Computes a connected components map from a segmentation map. + */ template -class MergeCropLayer : public Layer { +class ConnectedComponentLayer : public Layer { public: - explicit MergeCropLayer(const LayerParameter& param) - : Layer(param) { + explicit ConnectedComponentLayer(const LayerParameter& param) + : Layer(param) { } virtual void LayerSetUp(const vector*>& bottom, @@ -36,13 +71,48 @@ class MergeCropLayer : public Layer { const vector*>& top); virtual inline int ExactNumBottomBlobs() const { - return 2; + return 1; } virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline const char* type() const { + return "ConnectedComponent"; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + private: + cv::Mat FindBlobs(const int maxlabel, const cv::Mat &input); +}; + +/** + * @brief Merges and crops feature maps for U-Net architectures. + */ +template +class MergeCropLayer : public Layer { + public: + explicit MergeCropLayer(const LayerParameter& param) + : Layer(param) { + } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { + return 2; + } + virtual inline const char* type() const { return "MergeCrop"; } diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp new file mode 100644 index 00000000000..48bdb5319fb --- /dev/null +++ b/src/caffe/layers/affinity_layer.cpp @@ -0,0 +1,177 @@ +#include +#include +#include + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +// #define CAFFE_AFFINITY_DEBUG + +namespace caffe { + +template +void AffinityLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + offsets_.clear(); + offsets_.resize(bottom.size()); + if (this->layer_param().has_affinity_param()) { + AffinityParameter affinity_param = this->layer_param().affinity_param(); + for (int i = 0; i < + std::min(static_cast(bottom.size()), + static_cast(affinity_param.offset_size())); ++i) { + offsets_[i] = affinity_param.offset(i); + } + } + +#ifdef CAFFE_AFFINITY_DEBUG + cv::namedWindow("prob"); + cv::namedWindow("diff"); +#endif +} + +template +void AffinityLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + min_index_.clear(); + for (int bidx = 0; bidx < bottom.size(); ++bidx) { + // 1, #edges, height, width + top[bidx]->Reshape(1, 2, bottom[bidx]->height(), bottom[bidx]->width()); + + shared_ptr > blob_pointer( + new Blob(this->device_context())); + min_index_.push_back(blob_pointer); + + // 1, #edges, height, width + min_index_[bidx]->Reshape(1, 2, bottom[bidx]->height(), + bottom[bidx]->width()); + } +} + +template +void AffinityLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + for (int bidx = 0; bidx < bottom.size(); ++bidx) { + const Dtype* bottom_data = bottom[bidx]->cpu_data(); + Dtype* top_data = top[bidx]->mutable_cpu_data(); + Dtype* min_data = min_index_[bidx]->mutable_cpu_data(); + + int inner_num = bottom[bidx]->width() + * bottom[bidx]->height(); + + int xmin, ymin; + + // Construct affinity graph +#pragma omp parallel for + for (int i = 0; i < bottom[bidx]->height() - 1; ++i) { + for (int j = 0; j < bottom[bidx]->width() - 1; ++j) { + // Center + Dtype p0 = bottom_data[offsets_[bidx] * inner_num + + i * bottom[bidx]->width() + j]; + // Right + Dtype p1 = bottom_data[offsets_[bidx] * inner_num + + i * bottom[bidx]->width() + (j + 1)]; + // Bottom + Dtype p2 = bottom_data[offsets_[bidx] * inner_num + + (i + 1) * bottom[bidx]->width() + j]; + + // X edge + top_data[i * bottom[bidx]->width() + j] = std::min(p0, p1); + xmin = p0 < p1 ? 0 : 1; + min_data[i * bottom[bidx]->width() + j] = xmin; + + // Y edge + top_data[inner_num + + i * bottom[bidx]->width() + j] = std::min(p0, p2); + ymin = p0 < p2 ? 0 : 1; + min_data[inner_num + + i * bottom[bidx]->width() + j] = ymin; + } + } + } +} + +template +void AffinityLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + for (int bidx = 0; bidx < bottom.size(); ++bidx) { + if (propagate_down[bidx]) { + const Dtype* top_diff = top[bidx]->cpu_diff(); + Dtype* bottom_diff = bottom[bidx]->mutable_cpu_diff(); + const Dtype* min_data = min_index_[bidx]->cpu_diff(); + + caffe_set(bottom[0]->count(), Dtype(0.0), bottom_diff); + + int inner_num = bottom[bidx]->width() + * bottom[bidx]->height(); + + // Spread out the affinity losses to pixels + for (int i = 0; i < bottom[0]->height() - 1; ++i) { + for (int j = 0; j < bottom[0]->width() - 1; ++j) { + Dtype lx = top_diff[i * bottom[0]->width() + j]; + Dtype ly = top_diff[inner_num + i * bottom[0]->width() + j]; + + int mx = min_data[i * bottom[0]->width() + j]; + int my = min_data[bottom[0]->width() + * bottom[0]->height() + i * bottom[0]->width() + j]; + + // Only propagate to min index contributor of affinity graph + bottom_diff[0 * inner_num + i * bottom[0]->width() + (j + mx)] -= lx; + bottom_diff[0 * inner_num + (i + my) * bottom[0]->width() + j] -= ly; + bottom_diff[1 * inner_num + i * bottom[0]->width() + (j + mx)] += lx; + bottom_diff[1 * inner_num + (i + my) * bottom[0]->width() + j] += ly; + } + } +#ifdef CAFFE_AFFINITY_DEBUG + { + cv::Mat tmp; + + Dtype* prob_rd = bottom[bidx]->mutable_cpu_data(); + + cv::Mat wrapped_prob(bottom[0]->height(), bottom[0]->width(), + cv::DataType::type, + prob_rd, sizeof(Dtype) * bottom[0]->width()); + cv::imshow("prob", wrapped_prob); + + cv::Mat wrapped_diff(bottom[0]->height(), bottom[0]->width(), + cv::DataType::type, + bottom_diff, sizeof(Dtype) * bottom[0]->width()); + + Dtype sum = std::accumulate(bottom_diff, + bottom_diff + + bottom[0]->height() * bottom[0]->width(), + 0.0); + + Dtype mean = sum / (bottom[0]->width()*bottom[0]->height()); + + std::vector msd(bottom[0]->height() * bottom[0]->width()); + std::transform(bottom_diff, + bottom_diff + (bottom[0]->height()*bottom[0]->width()), + msd.begin(), std::bind2nd(std::minus(), mean)); + + Dtype sqsum = std::inner_product(msd.begin(), + msd.end(), msd.begin(), 0.0); + Dtype stdev = std::sqrt(sqsum / (bottom[0]->width() + * bottom[0]->height())); + + wrapped_diff.convertTo(tmp, CV_32FC1, 1.0 / (2.0 * stdev), + (stdev - mean) * 1.0 / (2.0 * stdev)); + + cv::imshow("diff", tmp); + cv::waitKey(2); + } +#endif + } + } +} + +INSTANTIATE_CLASS(AffinityLayer); +REGISTER_LAYER_CLASS(Affinity); + +} // namespace caffe diff --git a/src/caffe/layers/connected_component_layer.cpp b/src/caffe/layers/connected_component_layer.cpp new file mode 100644 index 00000000000..2992ccb3573 --- /dev/null +++ b/src/caffe/layers/connected_component_layer.cpp @@ -0,0 +1,97 @@ +#include +#include + +#include + +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +// Derived from +// http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp +template +cv::Mat ConnectedComponentLayer::FindBlobs(int maxlabel, + const cv::Mat &input) { + // Fill the label_image with the blobs + cv::Mat label_image; + input.convertTo(label_image, CV_32SC1); + + int label_count = maxlabel + 1; + + // Segment into label numbers higher than the original label numbers + for (int y = 0; y < label_image.rows; y++) { + int *row = reinterpret_cast(label_image.ptr(y)); + for (int x = 0; x < label_image.cols; x++) { + // Skip background and already labeled areas + if (row[x] > maxlabel || row[x] == 0) { + continue; + } + cv::Rect rect; + cv::floodFill(label_image, cv::Point(x, y), label_count, &rect, 0, 0, 4); + label_count++; + } + } + return label_image; +} + +template +void ConnectedComponentLayer::LayerSetUp( + const vector*>& bottom, + const vector*>& top) { +} + +template +void ConnectedComponentLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + top[0]->ReshapeLike(*bottom[0]); +} + +template +void ConnectedComponentLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + + cv::Mat img(bottom[0]->height(), bottom[0]->width(), CV_8SC1); + + for (int nc = 0; nc < bottom[0]->num() * bottom[0]->channels(); ++nc) { + int maxlabel = 0; + for (int y = 0; y < bottom[0]->height(); ++y) { + for (int x = 0; x < bottom[0]->width(); ++x) { + int val = bottom_data[nc * bottom[0]->width() * bottom[0]->height() + + bottom[0]->width() * y + x]; + if (val > maxlabel) { + maxlabel = val; + } + img.at(y, x) = val; + } + } + cv::Mat seg = FindBlobs(maxlabel, img); +#pragma omp parallel for + for (int y = 0; y < seg.rows; ++y) { + for (int x = 0; x < seg.cols; ++x) { + top_data[nc * bottom[0]->width() * bottom[0]->height() + + bottom[0]->width() * y + x] = seg.at(y, x); + } + } + } +} + +template +void ConnectedComponentLayer::Backward_cpu( + const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + // Nothing to do, don't backpropagate to labels + return; +} + +INSTANTIATE_CLASS(ConnectedComponentLayer); +REGISTER_LAYER_CLASS(ConnectedComponent); + +} // namespace caffe diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index e5108a50065..0724e8ff7a5 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -14,7 +14,6 @@ #include #include - #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/util/math_functions.hpp" @@ -45,14 +44,14 @@ class MalisAffinityGraphCompare { // a negative example pass [false] ? // margin: sq-sq loss margin [0.3] template -void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, - int* conn_dims, int* nhood_data, - int* nhood_dims, int* seg_data, bool pos, +void MalisLossLayer::Malis(const Dtype* conn_data, + const int conn_num_dims, + const int* conn_dims, const int* nhood_data, + const int* nhood_dims, const Dtype* seg_data, + const bool pos, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, - Dtype margin) { - Dtype threshold = 0.5; - + Dtype margin, Dtype threshold) { if ((nhood_dims[1] != (conn_num_dims - 1)) || (nhood_dims[0] != conn_dims[conn_num_dims - 1])) { LOG(FATAL) << "nhood and conn dimensions don't match"; @@ -177,6 +176,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, // +ve example pairs dl = std::max(Dtype(0.0), threshold + margin - conn_data[minEdge]); loss += dl * nPair; + // Only use indicator for loss dloss_data[minEdge] -= (dl > 0) * nPair; if (conn_data[minEdge] <= threshold) { // an error nPairIncorrect += nPair; @@ -186,6 +186,7 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, // -ve example pairs dl = std::max(Dtype(0.0), conn_data[minEdge] - threshold + margin); loss += dl * nPair; + // Only use indicator for loss dloss_data[minEdge] += (dl > 0) * nPair; if (conn_data[minEdge] > threshold) { // an error nPairIncorrect += nPair; @@ -226,64 +227,24 @@ void MalisLossLayer::Malis(Dtype* conn_data, int conn_num_dims, *rand_index_out = randIndex; } -// Derived from -// http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp -template -cv::Mat MalisLossLayer::FindBlobs(const cv::Mat &input) { - // Fill the label_image with the blobs - cv::Mat label_image; - input.convertTo(label_image, CV_32SC1); - - // Segment into label numbers higher than the original label numbers - int label_count = prob_.channels(); - - for (int y = 0; y < label_image.rows; y++) { - int *row = reinterpret_cast(label_image.ptr(y)); - for (int x = 0; x < label_image.cols; x++) { - // Skip background and already labeled areas - if (row[x] >= prob_.channels() || row[x] == 0) { - continue; - } - cv::Rect rect; - cv::floodFill(label_image, cv::Point(x, y), label_count, &rect, 0, 0, 4); - label_count++; - } - } - return label_image; -} template void MalisLossLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); - // Set up the softmax layer - LayerParameter softmax_param(this->layer_param_); - softmax_param.set_type("Softmax"); - softmax_layer_ = LayerRegistry::CreateLayer(softmax_param); - softmax_bottom_vec_.clear(); - softmax_bottom_vec_.push_back(bottom[0]); - softmax_top_vec_.clear(); - softmax_top_vec_.push_back(&prob_); - softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_); +#ifdef CAFFE_MALIS_DEBUG + cv::namedWindow("labelled"); + cv::namedWindow("test"); +#endif } template void MalisLossLayer::Reshape(const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); - softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); - softmax_axis_ = bottom[0]->CanonicalAxisIndex( - this->layer_param_.softmax_param().axis()); - outer_num_ = bottom[0]->count(0, softmax_axis_); - inner_num_ = bottom[0]->count(softmax_axis_ + 1); - CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) - << "Number of labels must match number of predictions; " - << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " - << "label count (number of labels) must be N*H*W, " - << "with integer values in {0, 1, ..., C-1}."; + if (top.size() >= 2) { - // softmax output top[1]->ReshapeLike(*bottom[0]); } @@ -308,343 +269,171 @@ void MalisLossLayer::Reshape(const vector*>& bottom, nhood_data_.push_back(0); // Edge 1, Z nhood_data_.push_back(0); // Edge 2, Z + + dloss_pos_.Reshape( + 1, 2, bottom[0]->height(), bottom[0]->width()); + dloss_neg_.Reshape( + 1, 2, bottom[0]->height(), bottom[0]->width()); } template void MalisLossLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - // The forward pass computes the softmax prob values. - softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); - const Dtype* prob_data = prob_.cpu_data(); - const Dtype* label = bottom[1]->cpu_data(); - int dim = prob_.count() / outer_num_; - int count = 0; - Dtype loss = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; j++) { - const int label_value = static_cast(label[i * inner_num_ + j]); - DCHECK_GE(label_value, 0); - DCHECK_LT(label_value, prob_.shape(softmax_axis_)); - loss -= log( - std::max(prob_data[i * dim + label_value * inner_num_ + j], - Dtype(FLT_MIN))); - ++count; - } - } - top[0]->mutable_cpu_data()[0] = loss / count; - if (top.size() == 2) { - top[1]->ShareData(prob_); - } -} - -template -void MalisLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { - if (propagate_down[1]) { - LOG(FATAL)<< this->type() - << " Layer cannot backpropagate to label inputs."; - } - - if (propagate_down[0]) { - // Diff to propagate to (size w * h * c) - Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - - // The predictions (size w * h * c) - const Dtype* prob_data = prob_.cpu_data(); - - // Labels (size w * h, c values) - const Dtype* label = bottom[1]->cpu_data(); - #ifdef CAFFE_MALIS_DEBUG - cv::namedWindow("labelled"); - cv::namedWindow("cdn"); - cv::namedWindow("cdp"); - cv::namedWindow("prob"); - cv::namedWindow("diff"); -#endif - - cv::Mat img(bottom[1]->height(), bottom[1]->width(), CV_8SC1); -#pragma omp parallel for - for (int y = 0; y < bottom[1]->height(); ++y) { - for (int x = 0; x < bottom[1]->width(); ++x) { - img.at(y, x) = label[y * bottom[1]->width() + x]; - } - } - - cv::Mat seg = FindBlobs(img); - -#ifdef CAFFE_MALIS_DEBUG - // This is for debugging only: - { - std::vector labels; - - for (int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { - int val = reinterpret_cast(seg.ptr(0))[i]; - bool found = false; - for (int j = 0; j < labels.size(); ++j) { - if (val == labels[j]) { - found = true; - } - } - if (found == false) { - labels.push_back(val); + // This is for debugging only: + { + std::vector labels; + const Dtype* seg_data = bottom[2]->cpu_data(); + for (int i = 0; i < bottom[2]->height() * bottom[2]->width(); ++i) { + int val = static_cast(seg_data[i]); + bool found = false; + for (int j = 0; j < labels.size(); ++j) { + if (val == labels[j]) { + found = true; } } - - std::vector colors; - - for (int i = 0; i < labels.size(); ++i) { - unsigned char r = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - unsigned char g = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - unsigned char b = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - - cv::Vec3b color(r, g, b); - colors.push_back(color); + if (found == false) { + labels.push_back(val); } + } - cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3); + std::vector colors; - for (int i = 0; i < bottom[1]->height() *bottom[1]->width(); ++i) { - int val = reinterpret_cast(seg.ptr(0))[i]; - if (val == 0) { - output.at(i) = cv::Vec3b(0, 0, 0); - continue; - } - for (int j = 0; j < labels.size(); ++j) { - if (val == labels[j]) { - output.at(i) = colors[j]; - } - } - } + for (int i = 0; i < labels.size(); ++i) { + unsigned char r = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT + unsigned char g = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT + unsigned char b = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - cv::imshow("labelled", output); + cv::Vec3b color(r, g, b); + colors.push_back(color); } -#endif - Dtype loss_out = 0; - Dtype classerr_out = 0; - Dtype rand_index_out = 0; + cv::Mat output = cv::Mat::zeros(cv::Size(bottom[1]->height(), + bottom[1]->width()), CV_8UC3); - std::vector conn_data_pos( - 2 * bottom[0]->height() * bottom[0]->width()); - std::vector conn_data_neg( - 2 * bottom[0]->height() * bottom[0]->width()); - std::vector dloss_pos( - 2 * bottom[0]->height() * bottom[0]->width()); - std::vector dloss_neg( - 2 * bottom[0]->height() * bottom[0]->width()); + const Dtype* imgdata = bottom[2]->cpu_data(); - // Construct positive and negative affinity graph -#pragma omp parallel for - for (int i = 0; i < bottom[0]->height() - 1; ++i) { - for (int j = 0; j < bottom[0]->width() - 1; ++j) { - // Center - Dtype p0 = prob_data[bottom[0]->width() - * bottom[0]->height() - + i * bottom[0]->width() + j]; - // Right - Dtype p1 = prob_data[bottom[0]->width() - * bottom[0]->height() - + i * bottom[0]->width() + (j + 1)]; - // Bottom - Dtype p2 = prob_data[bottom[0]->width() - * bottom[0]->height() - + (i + 1) * bottom[0]->width() + j]; - - // Center - Dtype g0 = label[i * bottom[0]->width() + j]; - // Right - Dtype g1 = label[i * bottom[0]->width() + (j + 1)]; - // Bottom - Dtype g2 = label[(i + 1) * bottom[0]->width() + j]; - - // X positive - conn_data_pos[i * bottom[0]->width() + j] = std::min( - std::min(1.0 - std::fabs(p0 - p1), (p0 + p1) / 2.0), - std::min(1.0 - std::fabs(g0 - g1), (g0 + g1) / 2.0)); - - // X negative - conn_data_neg[i * bottom[0]->width() + j] = std::max( - std::min(1.0 - std::fabs(p0 - p1), (p0 + p1) / 2.0), - std::min(1.0 - std::fabs(g0 - g1), (g0 + g1) / 2.0)); - - // Y positive - conn_data_pos[bottom[0]->width() * bottom[0]->height() - + i * bottom[0]->width() + j] = std::min( - std::min(1.0 - std::fabs(p0 - p2), (p0 + p2) / 2.0), - std::min(1.0 - std::fabs(g0 - g2), (g0 + g2) / 2.0)); - - // Y negative - conn_data_neg[bottom[0]->width() * bottom[0]->height() - + i * bottom[0]->width() + j] = std::max( - std::min(1.0 - std::fabs(p0 - p2), (p0 + p2) / 2.0), - std::min(1.0 - std::fabs(g0 - g2), (g0 + g2) / 2.0)); + for (int i = 0; i < bottom[1]->height() * bottom[1]->width(); ++i) { + int val = imgdata[i]; + if (val == 0) { + output.at(i) = cv::Vec3b(0, 0, 0); + continue; + } + for (int j = 0; j < labels.size(); ++j) { + if (val == labels[j]) { + output.at(i) = colors[j]; + } } } + cv::imshow("labelled", output); + } +#endif -#ifdef CAFFE_MALIS_DEBUG - auto minmax = std::minmax_element(conn_data_neg.begin(), - conn_data_neg.end()); - - std::cout << "Conndata neg min/max: " << - conn_data_neg[minmax.first - conn_data_neg.begin()] << " " << - conn_data_neg[minmax.second - conn_data_neg.begin()] << std::endl; - - minmax = std::minmax_element(conn_data_pos.begin(), - conn_data_pos.end()); + int inner_num = bottom[0]->width() * bottom[0]->height(); - std::cout << "Conndata pos min/max: " << - conn_data_pos[minmax.first - conn_data_pos.begin()] << " " << - conn_data_pos[minmax.second - conn_data_pos.begin()] << std::endl; -#endif + // Predicted affinity + const Dtype* affinity_prob_x = bottom[0]->cpu_data(); + const Dtype* affinity_prob_y = bottom[0]->cpu_data() + inner_num; - Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], - &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), - false, &dloss_neg[0], - &loss_out, &classerr_out, &rand_index_out); + // Effective affinity + const Dtype* affinity_x = bottom[1]->cpu_data(); + const Dtype* affinity_y = bottom[1]->cpu_data() + inner_num; #ifdef CAFFE_MALIS_DEBUG - std::cout << "Loss: " << loss_out << std::endl; - std::cout << "Class: " << classerr_out << std::endl; - std::cout << "Rand: " << rand_index_out << std::endl; + {Dtype* prob_rd = bottom[0]->mutable_cpu_data(); + cv::Mat wrapped(bottom[0]->height(), bottom[0]->width(), + cv::DataType::type, + prob_rd, sizeof(Dtype) * bottom[0]->width()); + cv::imshow("test", wrapped);} #endif - Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], - &nhood_dims_[0], reinterpret_cast(seg.ptr(0)), - true, &dloss_pos[0], - &loss_out, &classerr_out, &rand_index_out); - -#ifdef MALIS_DEBUG - std::cout << "Loss: " << loss_out << std::endl; - std::cout << "Class: " << classerr_out << std::endl; - std::cout << "Rand: " << rand_index_out << std::endl; - - minmax = std::minmax_element(dloss_neg.begin(), dloss_neg.end()); + // Connection data + std::vector conn_data_pos( + 2 * bottom[0]->height() * bottom[0]->width()); + std::vector conn_data_neg( + 2 * bottom[0]->height() * bottom[0]->width()); - std::cout << "DLoss_neg min/max: " << - dloss_neg[minmax.first - dloss_neg.begin()] << " " << - dloss_neg[minmax.second - dloss_neg.begin()] << std::endl; + // Construct positive and negative affinity graph +#pragma omp parallel for + for (int i = 0; i < bottom[0]->height() - 1; ++i) { + for (int j = 0; j < bottom[0]->width() - 1; ++j) { + // X positive + conn_data_pos[i * bottom[0]->width() + j] = std::min( + affinity_prob_x[i * bottom[0]->width() + j], + affinity_x[i * bottom[0]->width() + j]); + + // X negative + conn_data_neg[i * bottom[0]->width() + j] = std::max( + affinity_prob_x[i * bottom[0]->width() + j], + affinity_x[i * bottom[0]->width() + j]); + + // Y positive + conn_data_pos[inner_num + + i * bottom[0]->width() + j] = std::min( + affinity_prob_y[i * bottom[0]->width() + j], + affinity_y[i * bottom[0]->width() + j]); + + // Y negative + conn_data_neg[inner_num + + i * bottom[0]->width() + j] = std::max( + affinity_prob_y[i * bottom[0]->width() + j], + affinity_y[i * bottom[0]->width() + j]); + } + } - minmax = std::minmax_element(dloss_pos.begin(), dloss_pos.end()); + Dtype loss = 0; - std::cout << "DLoss_pos min/max: " << - dloss_pos[minmax.first - dloss_pos.begin()] << " " << - dloss_pos[minmax.second - dloss_pos.begin()] << std::endl; + Dtype loss_out = 0; + Dtype classerr_out = 0; + Dtype rand_index_out = 0; - cv::Mat cd_pos(bottom[0]->height(), bottom[0]->width(), - cv::DataType::type, - &dloss_pos[0], sizeof(Dtype) * bottom[0]->width()); + Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], + &nhood_dims_[0], bottom[2]->cpu_data(), + false, dloss_neg_.mutable_cpu_data(), + &loss_out, &classerr_out, &rand_index_out, 0.3, 0.5); - double minVal, maxVal; - cv::Mat tmp; + loss += loss_out; - cv::minMaxLoc(cd_pos, &minVal, &maxVal); - cd_pos.convertTo(tmp, CV_32FC1, 1.0 / (maxVal - minVal), - -minVal * 1.0 / (maxVal - minVal)); + Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], + &nhood_dims_[0], bottom[2]->cpu_data(), + true, dloss_pos_.mutable_cpu_data(), + &loss_out, &classerr_out, &rand_index_out, 0.3, 0.5); - cv::imshow("cdp", tmp); + loss += loss_out; - cv::Mat cd_neg(bottom[0]->height(), bottom[0]->width(), - cv::DataType::type, - &dloss_neg[0], sizeof(Dtype) * bottom[0]->width()); + top[0]->mutable_cpu_data()[0] = loss; - cv::minMaxLoc(cd_neg, &minVal, &maxVal); + if (top.size() == 2) { + top[1]->ShareData(*(bottom[0])); + } +} - cd_neg.convertTo(tmp, CV_32FC1, 1.0 / (maxVal - minVal), - -minVal * 1.0 / (maxVal - minVal)); - cv::imshow("cdn", tmp); -#endif +template +void MalisLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + // Diff to propagate to (size w * h * c) + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* dloss_pos_data = dloss_pos_.cpu_data(); + const Dtype* dloss_neg_data = dloss_neg_.cpu_data(); // Clear the diff caffe_set(bottom[0]->count(), Dtype(0.0), bottom_diff); - // Spread out the losses to pixels - for (int i = 0; i < bottom[0]->height() - 1; ++i) { - for (int j = 0; j < bottom[0]->width() - 1; ++j) { - Dtype lxp = dloss_pos[i * bottom[0]->width() + j]; - Dtype lxn = dloss_neg[i * bottom[0]->width() + j]; - - Dtype lyp = dloss_pos[bottom[0]->width() - * bottom[0]->height() + i * bottom[0]->width() + j]; - Dtype lyn = dloss_neg[bottom[0]->width() - * bottom[0]->height() + i * bottom[0]->width() + j]; - - // Pick label scalings - /*const int l0 = static_cast - (label[i * bottom[0]->width() + j]) * 2 - 1; - const int l1 = static_cast - (label[i * bottom[0]->width() + (j + 1)]) * 2 - 1; - const int l2 = static_cast - (label[(i + 1) * bottom[0]->width() + j]) * 2 - 1;*/ - - // Center - bottom_diff[0 * inner_num_ + i * bottom[0]->width() + j] -= 0.5 - * (lxp + lxn + lyp + lyn); - - // Right - bottom_diff[0 * inner_num_ + i * bottom[0]->width() + (j + 1)] -= 0.5 - * (lxp + lxn); - - // Bottom - bottom_diff[0 * inner_num_ + (i + 1) * bottom[0]->width() + j] -= 0.5 - * (lyp + lyn); - - - // Center - bottom_diff[1 * inner_num_ + i * bottom[0]->width() + j] += 0.5 - * (lxp + lxn + lyp + lyn); - - // Right - bottom_diff[1 * inner_num_ + i * bottom[0]->width() + (j + 1)] += 0.5 - * (lxp + lxn); - - // Bottom - bottom_diff[1 * inner_num_ + (i + 1) * bottom[0]->width() + j] += 0.5 - * (lyp + lyn); + int inner_num = bottom[0]->height() * bottom[0]->width(); + +#pragma omp parallel for + for (int i = 0; i < bottom[0]->height(); ++i) { + for (int j = 0; j < bottom[0]->width(); ++j) { + bottom_diff[i * bottom[0]->width() + j] = + dloss_pos_data[i * bottom[0]->width() + j] + + dloss_neg_data[i * bottom[0]->width() + j]; + bottom_diff[inner_num + i * bottom[0]->width() + j] = + dloss_pos_data[inner_num + i * bottom[0]->width() + j] + + dloss_neg_data[inner_num + i * bottom[0]->width() + j]; } } - -#ifdef CAFFE_MALIS_DEBUG - Dtype* prob_rd = prob_.mutable_cpu_data(); - - cv::Mat wrapped_prob(bottom[0]->height(), bottom[0]->width(), - cv::DataType::type, - prob_rd, sizeof(Dtype) * bottom[0]->width()); - cv::imshow("prob", wrapped_prob); - - cv::Mat wrapped_diff(bottom[0]->height(), bottom[0]->width(), - cv::DataType::type, - bottom_diff, sizeof(Dtype) * bottom[0]->width()); - - cv::minMaxLoc(wrapped_diff, &minVal, &maxVal); - - std::cout << "Max loss: " << maxVal << std::endl; - std::cout << "Min loss: " << minVal << std::endl; - - - Dtype sum = std::accumulate(bottom_diff, - bottom_diff - + bottom[0]->height() * bottom[0]->width(), - 0.0); - - Dtype mean = sum / (bottom[0]->width()*bottom[0]->height()); - - std::vector msd(bottom[0]->height()*bottom[0]->width()); - std::transform(bottom_diff, - bottom_diff + (bottom[0]->height()*bottom[0]->width()), - msd.begin(), std::bind2nd(std::minus(), mean)); - - Dtype sqsum = std::inner_product(msd.begin(), msd.end(), msd.begin(), 0.0); - Dtype stdev = std::sqrt(sqsum/(bottom[0]->width()*bottom[0]->height())); - - - wrapped_diff.convertTo(tmp, CV_32FC1, 1.0 / (2.0 * stdev), - (stdev - mean) * 1.0 / (2.0 * stdev)); - - cv::imshow("diff", tmp); - cv::waitKey(2); -#endif } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index d459220eb24..4f3e0e66ac4 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -355,6 +355,7 @@ message LayerParameter { optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; + optional AffinityParameter affinity_param = 137; } // Message that stores parameters used to apply transformation @@ -1029,6 +1030,7 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; + optional AffinityParameter affinity_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters @@ -1134,3 +1136,10 @@ message PReLUParameter { // Whether or not slope paramters are shared across channels. optional bool channel_shared = 2 [default = false]; } + +message AffinityParameter { + // Offset parameter to change the channel to use for creating an affinity graph + // Defined once per bottom blob + repeated int32 offset = 1; +} + From bcf9d3f2727190f3edbd2a816c75fc416a472506 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 8 Jul 2015 16:13:32 +0200 Subject: [PATCH 130/254] Resetting loss memory blobs. --- src/caffe/layers/affinity_layer.cpp | 2 +- src/caffe/layers/malis_loss_layer.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp index 48bdb5319fb..cd80fea2485 100644 --- a/src/caffe/layers/affinity_layer.cpp +++ b/src/caffe/layers/affinity_layer.cpp @@ -11,7 +11,7 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -// #define CAFFE_AFFINITY_DEBUG +#define CAFFE_AFFINITY_DEBUG namespace caffe { diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 0724e8ff7a5..65f68bc386e 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -19,7 +19,7 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -// #define CAFFE_MALIS_DEBUG +#define CAFFE_MALIS_DEBUG namespace caffe { From 52818c7860fd14ff6168c7430b83682fffb1cb9a Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 8 Jul 2015 10:14:20 -0400 Subject: [PATCH 131/254] MALIS loss memory resetting. --- src/caffe/layers/malis_loss_layer.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 0724e8ff7a5..59b40c854a9 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -387,6 +387,9 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, Dtype classerr_out = 0; Dtype rand_index_out = 0; + caffe_set(dloss_neg_.count(), Dtype(0.0), dloss_neg_.mutable_cpu_data()); + caffe_set(dloss_pos_.count(), Dtype(0.0), dloss_pos_.mutable_cpu_data()); + Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], bottom[2]->cpu_data(), false, dloss_neg_.mutable_cpu_data(), From 614bff7ab66dcb8950947fa394d78ca969244d7d Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 8 Jul 2015 10:15:01 -0400 Subject: [PATCH 132/254] MALIS loss memory reset. --- src/caffe/layers/affinity_layer.cpp | 2 +- src/caffe/layers/malis_loss_layer.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp index cd80fea2485..48bdb5319fb 100644 --- a/src/caffe/layers/affinity_layer.cpp +++ b/src/caffe/layers/affinity_layer.cpp @@ -11,7 +11,7 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#define CAFFE_AFFINITY_DEBUG +// #define CAFFE_AFFINITY_DEBUG namespace caffe { diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index db77ad22dcf..59b40c854a9 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -19,7 +19,7 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -#define CAFFE_MALIS_DEBUG +// #define CAFFE_MALIS_DEBUG namespace caffe { From a136e4bd11945bddb6cf4c9082c8808d87e679bb Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 8 Jul 2015 10:58:11 -0400 Subject: [PATCH 133/254] Caffe tool initialization fix. --- tools/caffe.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 7a65077d350..cd20b1e6018 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -116,8 +116,8 @@ int train() { // Set device id and mode if (FLAGS_gpu >= 0) { LOG(INFO) << "Use GPU with device ID " << FLAGS_gpu; - Caffe::SetDevice(FLAGS_gpu); Caffe::set_mode(Caffe::GPU); + Caffe::SetDevice(FLAGS_gpu); } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); @@ -150,8 +150,8 @@ int test() { // Set device id and mode if (FLAGS_gpu >= 0) { LOG(INFO) << "Use GPU with device ID " << FLAGS_gpu; - Caffe::SetDevice(FLAGS_gpu); Caffe::set_mode(Caffe::GPU); + Caffe::SetDevice(FLAGS_gpu); } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); @@ -215,8 +215,8 @@ int time() { // Set device id and mode if (FLAGS_gpu >= 0) { LOG(INFO) << "Use GPU with device ID " << FLAGS_gpu; - Caffe::SetDevice(FLAGS_gpu); Caffe::set_mode(Caffe::GPU); + Caffe::SetDevice(FLAGS_gpu); } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); From 5f6ce45f550222db1c770c7e391815fa9dbc7313 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 8 Jul 2015 14:07:12 -0400 Subject: [PATCH 134/254] TryCatch block for broken OpenCL implementations. --- src/caffe/common.cpp | 93 ++++++++++++-------- src/caffe/layers/conv_nd_layer.cu | 4 + src/caffe/test/test_convolution_nd_layer.cpp | 11 +++ 3 files changed, 69 insertions(+), 39 deletions(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index a7f397c3235..70c94d0032f 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -205,11 +205,17 @@ void Caffe::EnumerateDevices() { for (std::size_t platform_id = 0; platform_id < platforms.size(); ++platform_id) { typedef std::vector devices_type; - devices_type devices = platforms[platform_id].devices(CL_DEVICE_TYPE_ALL); - for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { - platform_devices.push_back( - std::make_tuple(platforms[platform_id], devices[device_id])); - greentea_device_count++; + try { + devices_type devices = platforms[platform_id].devices(CL_DEVICE_TYPE_ALL); + for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { + platform_devices.push_back( + std::make_tuple(platforms[platform_id], devices[device_id])); + greentea_device_count++; + } + } catch (...) { + LOG(INFO)<< "OpenCL platform: " + << platforms[platform_id].info() + << " does not work correctly."; } } #endif @@ -294,43 +300,52 @@ void Caffe::SetDevices(std::vector device_ids) { for (std::size_t platform_id = 0; platform_id < platforms.size(); ++platform_id) { typedef std::vector devices_type; - devices_type devices = platforms[platform_id].devices(CL_DEVICE_TYPE_ALL); - for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { - platform_devices.push_back( - std::make_tuple(platforms[platform_id], devices[device_id])); - Get().device_contexts_.emplace_back( - DeviceContext(cuda_device_count + greentea_device_count, - Backend::BACKEND_OpenCL)); - // Check if this device is really used and initialize - bool is_used = false; - for (int i = 0; i < device_ids.size(); ++i) { - int device_id = device_ids[i]; - if (device_id == cuda_device_count + greentea_device_count) { - // Setup actual context and compile kernels for this device - viennacl::ocl::setup_context( - device_id, std::get<1>(platform_devices[greentea_device_count])); - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - static_cast(device_id)); - viennacl::ocl::program & program = RegisterKernels(&ctx); - Get().ocl_programs_.push_back(program); - // viennacl::ocl::switch_context(device_id); - // viennacl::ocl::switch_device(std::get<1> - // (platform_devices[device_id - cuda_device_count])); - - // Add defined number of queues - for (int q = 0; q < GREENTEA_QUEUE_COUNT - 1; ++q) { - ctx.add_queue(ctx.current_device()); + try { + devices_type devices = platforms[platform_id].devices( + CL_DEVICE_TYPE_ALL); + for (std::size_t device_id = 0; device_id < devices.size(); + ++device_id) { + platform_devices.push_back( + std::make_tuple(platforms[platform_id], devices[device_id])); + Get().device_contexts_.emplace_back( + DeviceContext(cuda_device_count + greentea_device_count, + Backend::BACKEND_OpenCL)); + // Check if this device is really used and initialize + bool is_used = false; + for (int i = 0; i < device_ids.size(); ++i) { + int device_id = device_ids[i]; + if (device_id == cuda_device_count + greentea_device_count) { + // Setup actual context and compile kernels for this device + viennacl::ocl::setup_context( + device_id, std::get<1>( + platform_devices[greentea_device_count])); + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + static_cast(device_id)); + viennacl::ocl::program & program = RegisterKernels(&ctx); + Get().ocl_programs_.push_back(program); + // viennacl::ocl::switch_context(device_id); + // viennacl::ocl::switch_device(std::get<1> + // (platform_devices[device_id - cuda_device_count])); + + // Add defined number of queues + for (int q = 0; q < GREENTEA_QUEUE_COUNT - 1; ++q) { + ctx.add_queue(ctx.current_device()); + } + Caffe::GetDeviceContext(device_id)->Init(); + is_used = true; } - Caffe::GetDeviceContext(device_id)->Init(); - is_used = true; } + // Device not used, dummy + if (!is_used) { + viennacl::ocl::program program; + Get().ocl_programs_.push_back(program); + } + greentea_device_count++; } - // Device not used, dummy - if (!is_used) { - viennacl::ocl::program program; - Get().ocl_programs_.push_back(program); - } - greentea_device_count++; + } catch (...) { + LOG(INFO)<< "OpenCL platform: " + << platforms[platform_id].info() + << " does not work correctly."; } } #endif // USE_GREENTEA diff --git a/src/caffe/layers/conv_nd_layer.cu b/src/caffe/layers/conv_nd_layer.cu index c1afc535f83..d13f202055e 100644 --- a/src/caffe/layers/conv_nd_layer.cu +++ b/src/caffe/layers/conv_nd_layer.cu @@ -67,6 +67,10 @@ void ConvolutionNDLayer::Backward_gpu(const vector*>& top, } } #endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + // TODO +#endif // USE_GREENTEA } } diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp index 76f570f9a8d..1bbe684e4fb 100644 --- a/src/caffe/test/test_convolution_nd_layer.cpp +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -69,14 +69,25 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); + TypeParam checksum = 0; + for (int cd = 0; cd < d; ++cd) { for (int ch = 0; ch < h; ++ch) { for (int cw = 0; cw < w; ++cw) { bottom_data[cw + ch * w + cd * w * h] = cw + ch * w + cd * w * h; + if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { + checksum += cw + ch * w + cd * w * h; + } } } } + + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + const TypeParam *top_data = blob_top_->cpu_data(); + + EXPECT_EQ(checksum, top_data[0]); } void TestBackward() { From 5bf9e26271018c4f81e739d034102e6d09528056 Mon Sep 17 00:00:00 2001 From: Srini Turaga Date: Wed, 8 Jul 2015 14:43:30 -0400 Subject: [PATCH 135/254] My Makefile configuration. Without OpenMP for now. --- Makefile.config.srini | 113 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 Makefile.config.srini diff --git a/Makefile.config.srini b/Makefile.config.srini new file mode 100644 index 00000000000..c4125eba5bb --- /dev/null +++ b/Makefile.config.srini @@ -0,0 +1,113 @@ +## Refer to http://caffe.berkeleyvision.org/installation.html +# Contributions simplifying and improving our build system are welcome! + +# GreenTea (ViennaCL/OpenCL) backend switch + +# Enable the CUDA backend +USE_CUDA := 1 + +# Enable the OpenCL/Greentea backend +USE_GREENTEA := 0 + +# Folder of the ViennaCL header-only library +VIENNACL_DIR = ../ViennaCL + +# Either set clBLAS to 1 or it will use ViennaclBLAS. +# CLBLAS should be faster, especially on AMD cards. +USE_CLBLAS := 0 + +# cuDNN acceleration switch (uncomment to build with cuDNN). +USE_CUDNN := 0 + +# CPU-only switch (uncomment to build without GPU support). +# CPU_ONLY := 1 + +# To customize your choice of compiler, uncomment and set the following. +# N.B. the default for Linux is g++ and the default for OSX is clang++ +# CUSTOM_CXX := g++ + +# CUDA directory contains bin/ and lib/ directories that we need. +CUDA_DIR := /usr/local/cuda-7.0 +# On Ubuntu 14.04, if cuda tools are installed via +# "sudo apt-get install nvidia-cuda-toolkit" then use this instead: +# CUDA_DIR := /usr + +# CUDA architecture setting: going with all of them. +# For CUDA < 6.0, comment the *_50 lines for compatibility. +CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ + -gencode arch=compute_20,code=sm_21 \ + -gencode arch=compute_30,code=sm_30 \ + -gencode arch=compute_35,code=sm_35 \ + -gencode arch=compute_50,code=sm_50 \ + -gencode arch=compute_50,code=compute_50 + +# BLAS choice: +# atlas for ATLAS (default) +# mkl for MKL +# open for OpenBlas +# BLAS := atlas +BLAS := mkl +# Custom (MKL/ATLAS/OpenBLAS) include and lib directories. +# Leave commented to accept the defaults for your choice of BLAS +# (which should work)! +# BLAS_INCLUDE := /path/to/your/blas +# BLAS_LIB := /path/to/your/blas +BLAS_INCLUDE := /usr/local/mkl/include +BLAS_LIB := /usr/local/mkl/lib + +# Homebrew puts openblas in a directory that is not on the standard search path +# BLAS_INCLUDE := $(shell brew --prefix openblas)/include +# BLAS_LIB := $(shell brew --prefix openblas)/lib + +# This is required only if you will compile the matlab interface. +# MATLAB directory should contain the mex binary in /bin. +# MATLAB_DIR := /usr/local +# MATLAB_DIR := /Applications/MATLAB_R2012b.app +MATLAB_DIR := /opt/MATLAB/R2015a/ + +# NOTE: this is required only if you will compile the python interface. +# We need to be able to find Python.h and numpy/arrayobject.h. +# PYTHON_INCLUDE := /usr/include/python2.7 \ + /usr/lib/python2.7/dist-packages/numpy/core/include +# Anaconda Python distribution is quite popular. Include path: +# Verify anaconda location, sometimes it's in root. +ANACONDA_HOME := $(HOME)/anaconda +PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ + $(ANACONDA_HOME)/include/python2.7 \ + $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \ + +# We need to be able to find libpythonX.X.so or .dylib. +# PYTHON_LIB := /usr/lib +PYTHON_LIB := $(ANACONDA_HOME)/lib + +# Homebrew installs numpy in a non standard path (keg only) +# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include +# PYTHON_LIB += $(shell brew --prefix numpy)/lib + +# Uncomment to support layers written in Python (will link against Python libs) +# WITH_PYTHON_LAYER := 1 + +# Whatever else you find you need goes here. +INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include $(HOME)/include +LIBRARY_DIRS := /opt/rh/devtoolset-3/root/usr/lib/gcc/x86_64-redhat-linux/4.9.1/ $(PYTHON_LIB) /usr/lib64 /usr/lib $(HOME)/lib $(HOME)/lib64 /usr/local/lib + +# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies +# INCLUDE_DIRS += $(shell brew --prefix)/include +# LIBRARY_DIRS += $(shell brew --prefix)/lib + +# Uncomment to use `pkg-config` to specify OpenCV library paths. +# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.) +# USE_PKG_CONFIG := 1 + +BUILD_DIR := build +DISTRIBUTE_DIR := distribute + +# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171 +# DEBUG := 1 +# VIENNACL_DEBUG := 0 + +# The ID of the GPU that 'make runtest' will use to run unit tests. +TEST_GPUID := 0 + +# enable pretty build (comment to see full commands) +#Q ?= @ From b06162c9af604bc93a468a684076b2d38bdc946e Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 8 Jul 2015 17:08:09 -0400 Subject: [PATCH 136/254] ND-SK forward/backward layers for CUDA and OpenCL. --- include/caffe/greentea/greentea_im2col.hpp | 20 +++ include/caffe/vision_layers.hpp | 58 ++++++- src/caffe/greentea/cl_kernels.cpp | 4 + src/caffe/greentea/cl_kernels/im2col_nd.cl | 160 ++++++++++++++++++ src/caffe/greentea/greentea_im2col.cpp | 79 +++++++++ src/caffe/layers/base_conv_nd_layer.cpp | 163 +++++++++++++++---- src/caffe/layers/conv_nd_layer.cu | 75 +++++---- src/caffe/layers/deconv_nd_layer.cu | 40 +++-- src/caffe/test/test_convolution_nd_layer.cpp | 1 + 9 files changed, 520 insertions(+), 80 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/im2col_nd.cl diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index b5fad1335b6..87fd44d816b 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -61,6 +61,26 @@ void greentea_col2im_gpu(viennacl::ocl::program *prog, const int stride_w, cl_mem data_im, const int data_im_off); +template +void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, const int num_spatial_axes, + const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem kstride, cl_mem data_col, int data_col_off); + +template +void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, const int num_spatial_axes, + const int im_size, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_im, + int data_off); + + + } // namespace caffe #endif // USE_GREENTEA diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 17a46c129fd..61d0e0e1556 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -296,14 +296,19 @@ class BaseConvolutionNDLayer : public Layer { // we just called weight_cpu_gemm with the same input. #ifndef CPU_ONLY - void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); - void forward_gpu_bias(Dtype* output, const Dtype* bias); - void backward_gpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* col_output); - void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* - weights); - void backward_gpu_bias(Dtype* bias, const Dtype* input); + void forward_gpu_gemm(const Dtype* col_input, const int col_input_off, + const Dtype* weights, Dtype* output, + const int output_off, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const int output_off, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const int input_off, + const Dtype* weights, Dtype* col_output, + const int col_output_off); + void weight_gpu_gemm(const Dtype* col_input, const int col_input_off, + const Dtype* output, const int output_off, + Dtype* weights); + void backward_gpu_bias(Dtype* bias, const Dtype* input, const int input_off); + + shared_ptr< Blob > col_buffer(); #endif // !CPU_ONLY // reverse_dimensions should return true iff we are implementing deconv, so @@ -359,6 +364,43 @@ class BaseConvolutionNDLayer : public Layer { } #endif // USE_CUDA #ifdef USE_GREENTEA + inline void greentea_conv_im2col_gpu(const Dtype* data, const int data_off, + Dtype* col_buff, + const int col_buff_off) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + greentea_im2col_nd_gpu(&program, &ctx, (cl_mem)data, data_off, + num_spatial_axes_, + num_kernels_im2col_, + (cl_mem)(conv_input_shape_.gpu_data()), + (cl_mem)(col_buffer_.gpu_shape()), + (cl_mem)(kernel_shape_.gpu_data()), + (cl_mem)(pad_.gpu_data()), + (cl_mem)(stride_.gpu_data()), + (cl_mem)(kstride_.gpu_data()), + (cl_mem) col_buff, col_buff_off); + } + inline void greentea_conv_col2im_gpu(const Dtype* col_buff, + const int col_buff_off, Dtype* data, + const int data_off) { + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + greentea_col2im_nd_gpu(&program, &ctx, + (cl_mem) col_buff, col_buff_off, + num_spatial_axes_, + num_kernels_col2im_, + (cl_mem)(conv_input_shape_.gpu_data()), + (cl_mem)(col_buffer_.gpu_shape()), + (cl_mem)(kernel_shape_.gpu_data()), + (cl_mem)(pad_.gpu_data()), + (cl_mem)(stride_.gpu_data()), + (cl_mem)(kstride_.gpu_data()), + (cl_mem) data, data_off); + } #endif // USE_GREENTEA #endif // !CPU_ONLY diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 01fff6b8f55..4b5e07921a3 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -17,6 +17,7 @@ std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset + data_col_off];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_off] = val;\n }\n}"; // NOLINT std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT @@ -35,6 +36,7 @@ std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset + data_col_off];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_off] = val;\n }\n}"; // NOLINT std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT @@ -57,6 +59,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << eltwise_float << "\n\n"; // NOLINT ss << fillbuffer_float << "\n\n"; // NOLINT ss << im2col_float << "\n\n"; // NOLINT + ss << im2col_nd_float << "\n\n"; // NOLINT ss << im2col_sk_float << "\n\n"; // NOLINT ss << lrn_float << "\n\n"; // NOLINT ss << math_float << "\n\n"; // NOLINT @@ -78,6 +81,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << eltwise_double << "\n\n"; // NOLINT ss << fillbuffer_double << "\n\n"; // NOLINT ss << im2col_double << "\n\n"; // NOLINT + ss << im2col_nd_double << "\n\n"; // NOLINT ss << im2col_sk_double << "\n\n"; // NOLINT ss << lrn_double << "\n\n"; // NOLINT ss << math_double << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl new file mode 100644 index 00000000000..0bab9c39bc1 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -0,0 +1,160 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index + data_off] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset + data_col_off]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index + data_off] = val; + } +} diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 6baeb4af8eb..7283f08af18 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -232,5 +232,84 @@ template void greentea_col2im_gpu(viennacl::ocl::program *prog, const int stride_w, cl_mem data_im, const int data_im_off); + + + +template +void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, const int num_spatial_axes, + const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem kstride, cl_mem data_col, int data_col_off) { + viennacl::ocl::kernel &kernel = prog->get_kernel( + CL_KERNEL_SELECT("im2col_nd")); + + viennacl::ocl::enqueue( + kernel(num_kernels, num_spatial_axes, + WrapHandle(data_im, ctx), data_off, WrapHandle(im_shape, ctx), + WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), + WrapHandle(pad, ctx), WrapHandle(stride, ctx), + WrapHandle(kstride, ctx), WrapHandle(data_col, ctx), + data_col_off), + ctx->get_queue()); +} + +// Explicit instantiation +template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, + const int num_spatial_axes, const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem kstride, cl_mem data_col, int data_col_off); + +template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, + const int num_spatial_axes, const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem kstride, cl_mem data_col, int data_col_off); + +template +void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, const int num_spatial_axes, + const int im_size, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_im, + int data_off) { + viennacl::ocl::kernel &kernel = prog->get_kernel( + CL_KERNEL_SELECT("col2im_nd")); + + viennacl::ocl::enqueue( + kernel(im_size, num_spatial_axes, + WrapHandle(data_col, ctx), data_col_off, WrapHandle(im_shape, ctx), + WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), + WrapHandle(pad, ctx), WrapHandle(stride, ctx), + WrapHandle(kstride, ctx), WrapHandle(data_im, ctx), + data_off), + ctx->get_queue()); +} + +// Explicit instantiation +template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, const int num_spatial_axes, + const int im_size, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_im, + int data_off); +template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, const int num_spatial_axes, + const int im_size, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_im, + int data_off); + + } // namespace caffe #endif diff --git a/src/caffe/layers/base_conv_nd_layer.cpp b/src/caffe/layers/base_conv_nd_layer.cpp index 3686ce281f1..162cb69a5b0 100644 --- a/src/caffe/layers/base_conv_nd_layer.cpp +++ b/src/caffe/layers/base_conv_nd_layer.cpp @@ -249,6 +249,11 @@ void BaseConvolutionNDLayer::Reshape(const vector*>& bottom, } } col_buffer_.Reshape(col_buffer_shape_); + if (Caffe::mode() == Caffe::Brew::GPU) { + shared_ptr< Blob > buffer = + this->device_context_->template Buffer(0); + buffer->Reshape(col_buffer_shape_); + } bottom_dim_ = bottom[0]->count(channel_axis_); top_dim_ = top[0]->count(channel_axis_); num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_; @@ -265,95 +270,189 @@ void BaseConvolutionNDLayer::Reshape(const vector*>& bottom, #ifndef CPU_ONLY -template +template void BaseConvolutionNDLayer::forward_gpu_gemm(const Dtype* input, - const Dtype* weights, Dtype* output, bool skip_im2col) { + const int input_off, + const Dtype* weights, + Dtype* output, + const int output_off, + bool skip_im2col) { const Dtype* col_buff = input; - if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (!is_1x1_) { if (!skip_im2col) { - conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); + conv_im2col_gpu(input + input_off, col_buffer()->mutable_gpu_data()); } - col_buff = col_buffer_.gpu_data(); + col_buff = col_buffer()->gpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, - (Dtype)0., output + output_offset_ * g); + caffe_gpu_gemm( + CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, + conv_out_spatial_dim_, kernel_dim_ / group_, (Dtype) 1., + weights + weight_offset_ * g, + col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 0., + output + output_off + output_offset_ * g); } #endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + if (!is_1x1_) { + if (!skip_im2col) { + greentea_conv_im2col_gpu(input, input_off, + col_buffer()->mutable_gpu_data(), 0); + } + col_buff = col_buffer()->gpu_data(); + } + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, + CblasNoTrans, conv_out_channels_ / group_, + conv_out_spatial_dim_, kernel_dim_ / group_, + (Dtype) 1., (cl_mem) weights, weight_offset_ * g, + (cl_mem) col_buff, + (is_1x1_ ? input_off : 0) + col_offset_ * g, + (Dtype) 0., (cl_mem) output, + output_off + output_offset_ * g); + } +#endif // USE_GREENTEA } } -template +template void BaseConvolutionNDLayer::forward_gpu_bias(Dtype* output, - const Dtype* bias) { + const int output_off, + const Dtype* bias) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), - (Dtype)1., output); + out_spatial_dim_, 1, (Dtype) 1., bias, + bias_multiplier_.gpu_data(), (Dtype) 1., + output + output_off); #endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, + CblasNoTrans, num_output_, + out_spatial_dim_, 1, (Dtype) 1., + (cl_mem) bias, 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (Dtype) 1., (cl_mem) output, output_off); +#endif // USE_GREENTEA } } -template + +template void BaseConvolutionNDLayer::backward_gpu_gemm(const Dtype* output, - const Dtype* weights, Dtype* input) { - Dtype* col_buff = col_buffer_.mutable_gpu_data(); + const int output_off, + const Dtype* weights, + Dtype* input, + const int input_off) { + Dtype* col_buff = col_buffer()->mutable_gpu_data(); if (is_1x1_) { col_buff = input; } if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, conv_out_channels_ / group_, - (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, - (Dtype)0., col_buff + col_offset_ * g); + caffe_gpu_gemm( + CblasTrans, CblasNoTrans, kernel_dim_ / group_, conv_out_spatial_dim_, + conv_out_channels_ / group_, (Dtype) 1., weights + weight_offset_ * g, + output + output_off + output_offset_ * g, (Dtype) 0., + col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g); } if (!is_1x1_) { - conv_col2im_gpu(col_buff, input); + conv_col2im_gpu(col_buff, input + input_off); } #endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_->id(), CblasTrans, + CblasNoTrans, kernel_dim_ / group_, + conv_out_spatial_dim_, + conv_out_channels_ / group_, (Dtype) 1., + (cl_mem) weights, weight_offset_ * g, + (cl_mem) output, output_off + output_offset_ * g, + (Dtype) 0., (cl_mem) col_buff, + (is_1x1_ ? input_off : 0) + col_offset_ * g); + } + if (!is_1x1_) { + greentea_conv_col2im_gpu(col_buff, 0, input, input_off); + } +#endif // USE_GREENTEA } } -template +template void BaseConvolutionNDLayer::weight_gpu_gemm(const Dtype* input, - const Dtype* output, Dtype* weights) { + const int input_off, + const Dtype* output, + const int output_off, + Dtype* weights) { const Dtype* col_buff = input; if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (!is_1x1_) { - conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); - col_buff = col_buffer_.gpu_data(); + conv_im2col_gpu(input + input_off, col_buffer()->mutable_gpu_data()); + col_buff = col_buffer()->gpu_data(); } for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasTrans, - conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, - (Dtype)1., weights + weight_offset_ * g); + caffe_gpu_gemm( + CblasNoTrans, CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype) 1., + output + output_off + output_offset_ * g, + col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., + weights + weight_offset_ * g); } #endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + if (!is_1x1_) { + greentea_conv_im2col_gpu(input, input_off, + col_buffer()->mutable_gpu_data(), 0); + col_buff = col_buffer()->gpu_data(); + } + for (int g = 0; g < group_; ++g) { + greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, + CblasTrans, conv_out_channels_ / group_, + kernel_dim_ / group_, conv_out_spatial_dim_, + (Dtype) 1., (cl_mem) output, + output_off + output_offset_ * g, + (cl_mem) col_buff, + (is_1x1_ ? input_off : 0) + col_offset_ * g, + (Dtype) 1., (cl_mem) weights, + weight_offset_ * g); + } +#endif // USE_GREENTEA } } template void BaseConvolutionNDLayer::backward_gpu_bias(Dtype* bias, - const Dtype* input) { + const Dtype* input, const int input_off) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., - input, bias_multiplier_.gpu_data(), 1., bias); + input + input_off, bias_multiplier_.gpu_data(), 1., bias); #endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, + num_output_, out_spatial_dim_, 1., + (cl_mem) input, input_off, + (cl_mem) (bias_multiplier_.gpu_data()), 0, 1., + (cl_mem) bias, 0); +#endif // USE_GREENTEA } } +template +shared_ptr< Blob > BaseConvolutionNDLayer::col_buffer() { + return this->device_context_-> + template Buffer(this->device_context_->current_queue_id()); +} + #endif // !CPU_ONLY INSTANTIATE_CLASS(BaseConvolutionNDLayer); diff --git a/src/caffe/layers/conv_nd_layer.cu b/src/caffe/layers/conv_nd_layer.cu index d13f202055e..e71ac43af1b 100644 --- a/src/caffe/layers/conv_nd_layer.cu +++ b/src/caffe/layers/conv_nd_layer.cu @@ -6,6 +6,12 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { template @@ -16,11 +22,11 @@ void ConvolutionNDLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { - this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); + this->forward_gpu_gemm(bottom_data, n * this->bottom_dim_, weight, + top_data, n * this->top_dim_); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data + n * this->top_dim_, bias); + this->forward_gpu_bias(top_data, n * this->top_dim_, bias); } } } @@ -40,38 +46,47 @@ void ConvolutionNDLayer::Backward_gpu(const vector*>& top, caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), this->blobs_[1]->mutable_gpu_diff()); } - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->gpu_diff(); - // Bias gradient, if necessary. - if (this->bias_term_ && this->param_propagate_down_[1]) { - Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); - } - } - if (this->param_propagate_down_[0] || propagate_down[i]) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, - top_diff + n * this->top_dim_, weight_diff); - } - // gradient w.r.t. bottom data, if necessary. - if (propagate_down[i]) { - this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_); - } - } - } - } #endif // USE_CUDA } else { #ifdef USE_GREENTEA - // TODO + if (this->param_propagate_down_[0]) { + greentea_gpu_set(this->device_context_->id(), + this->blobs_[0]->count(), Dtype(0), + (cl_mem)weight_diff, 0); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + greentea_gpu_set(this->device_context_->id(), + this->blobs_[1]->count(), Dtype(0), + (cl_mem)(this->blobs_[1]->mutable_gpu_diff()), 0); + } #endif // USE_GREENTEA } + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Bias gradient, if necessary. + if (this->bias_term_ && this->param_propagate_down_[1]) { + Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); + } + } + if (this->param_propagate_down_[0] || propagate_down[i]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. + if (this->param_propagate_down_[0]) { + this->weight_gpu_gemm(bottom_data, n * this->bottom_dim_, + top_diff, n * this->top_dim_, weight_diff); + } + // gradient w.r.t. bottom data, if necessary. + if (propagate_down[i]) { + this->backward_gpu_gemm(top_diff, n * this->top_dim_, weight, + bottom_diff, n * this->bottom_dim_); + } + } + } + } } INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionNDLayer); diff --git a/src/caffe/layers/deconv_nd_layer.cu b/src/caffe/layers/deconv_nd_layer.cu index c47101c85f0..fb0e2b48c8b 100644 --- a/src/caffe/layers/deconv_nd_layer.cu +++ b/src/caffe/layers/deconv_nd_layer.cu @@ -6,6 +6,12 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_im2col.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { template @@ -17,11 +23,11 @@ void DeconvolutionNDLayer::Forward_gpu( const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); + this->backward_gpu_gemm(bottom_data, n * this->bottom_dim_, weight, + top_data, n * this->top_dim_); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data + n * this->top_dim_, bias); + this->forward_gpu_bias(top_data, n * this->top_dim_, bias); } } } @@ -41,6 +47,22 @@ void DeconvolutionNDLayer::Backward_gpu(const vector*>& top, caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), this->blobs_[1]->mutable_gpu_diff()); } +#endif // USE CUDA + } else { +#ifdef USE_GREENTEA + if (this->param_propagate_down_[0]) { + greentea_gpu_set(this->device_context_->id(), + this->blobs_[0]->count(), Dtype(0), + (cl_mem) weight_diff, 0); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + greentea_gpu_set(this->device_context_->id(), + this->blobs_[1]->count(), Dtype(0), + (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), + 0); + } +#endif // USE_GREENTEA + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); @@ -49,26 +71,24 @@ void DeconvolutionNDLayer::Backward_gpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); + this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(top_diff + n * this->top_dim_, - bottom_data + n * this->bottom_dim_, weight_diff); + this->weight_gpu_gemm(top_diff, n * this->top_dim_, + bottom_data, n * this->bottom_dim_, weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { - this->forward_gpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_); + this->forward_gpu_gemm(top_diff, n * this->top_dim_, weight, + bottom_diff, n * this->bottom_dim_); } } } } -#endif // USE CUDA - } } INSTANTIATE_LAYER_GPU_FUNCS(DeconvolutionNDLayer); diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp index 1bbe684e4fb..a82493b872f 100644 --- a/src/caffe/test/test_convolution_nd_layer.cpp +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -91,6 +91,7 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { } void TestBackward() { + // TODO } Blob* const blob_bottom_; From fa384d476408d00196cc2c71da272b9acf1dfd9c Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 9 Jul 2015 01:18:58 -0400 Subject: [PATCH 137/254] Fixed prefetch thread conflicts with OpenCL. --- src/caffe/layers/base_data_layer.cu | 4 ++-- src/caffe/syncedmem.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 0fa571e8f3d..135ade11d59 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -34,14 +34,14 @@ void BasePrefetchingDataLayer::Forward_gpu( top[0]->ReshapeLike(this->prefetch_data_); // Copy the data greentea_copy(prefetch_data_.count(), - (cl_mem) (prefetch_data_.gpu_data()), 0, + prefetch_data_.cpu_data(), (cl_mem) (top[0]->mutable_gpu_data()), 0, &ctx); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(prefetch_label_); // Copy the labels. greentea_copy(prefetch_label_.count(), - (cl_mem) (prefetch_label_.gpu_data()), 0, + prefetch_label_.cpu_data(), (cl_mem) (top[1]->mutable_gpu_data()), 0, &ctx); } #endif // USE_GREENTEA diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index efc5aa4dd28..173731a967f 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -181,7 +181,7 @@ void SyncedMemory::set_cpu_data(void* data) { CHECK(data); #ifndef CPU_ONLY #ifdef USE_GREENTEA - if (Caffe::mode() == Caffe::Brew::GPU) { + if (Caffe::mode() == Caffe::Brew::GPU && gpu_ptr_ != nullptr) { if (device_context_->backend() == Backend::BACKEND_OpenCL) { viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_->id()); From d732db371e78bd556eafa02aa693050d2d078fd7 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 9 Jul 2015 02:04:17 -0400 Subject: [PATCH 138/254] Fixed test binary for CMAKE compilation. --- src/caffe/test/test_caffe_main.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index ff169ad10c6..2107c7d26db 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -4,6 +4,10 @@ #include "caffe/caffe.hpp" #include "caffe/test/test_caffe_main.hpp" +#ifndef TEST_DEVICE +#define TEST_DEVICE 0 +#endif + namespace caffe { #ifndef CPU_ONLY #ifdef USE_CUDA From 0313a56dcab2a7340e278b008547ca9eb10ee61c Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 9 Jul 2015 11:26:59 -0400 Subject: [PATCH 139/254] Fixed CMAKE test target build (runtest). --- .gitignore | 1 + cmake/Templates/caffe_config.h.in | 13 +++---------- src/caffe/test/CMakeLists.txt | 8 ++++++-- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 28f2aca854b..280e1455a4f 100644 --- a/.gitignore +++ b/.gitignore @@ -76,6 +76,7 @@ distribute/* python/caffe/proto/ cmake_build .cmake_build +*.gen.cmake # Generated documentation docs/_site diff --git a/cmake/Templates/caffe_config.h.in b/cmake/Templates/caffe_config.h.in index 7407a19218c..4408745be51 100644 --- a/cmake/Templates/caffe_config.h.in +++ b/cmake/Templates/caffe_config.h.in @@ -29,16 +29,9 @@ /* Test device */ #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} -/* Temporary (TODO: remove) */ -#if 1 - #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" - #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" - #define CMAKE_EXT ".gen.cmake" -#else - #define CMAKE_SOURCE_DIR "src/" - #define EXAMPLES_SOURCE_DIR "examples/" - #define CMAKE_EXT "" -#endif +#define CMAKE_SOURCE_DIR "src/" +#define EXAMPLES_SOURCE_DIR "examples/" +#define CMAKE_EXT "" /* Matlab */ #cmakedefine HAVE_MATLAB diff --git a/src/caffe/test/CMakeLists.txt b/src/caffe/test/CMakeLists.txt index 35a803f2f41..b8212fb7e86 100644 --- a/src/caffe/test/CMakeLists.txt +++ b/src/caffe/test/CMakeLists.txt @@ -18,11 +18,15 @@ caffe_leave_only_selected_tests(test_cuda ${BUILD_only_tests}) set(the_target test.testbin) set(test_args --gtest_shuffle) -if(HAVE_CUDA) +if(HAVE_CUDA AND USE_CUDA) caffe_cuda_compile(test_cuda_objs ${test_cuda}) list(APPEND test_srcs ${test_cuda_objs} ${test_cuda}) else() - list(APPEND test_args --gtest_filter="-*GPU*") + if(USE_GREENTEA) + list(APPEND test_srcs ${test_cuda_objs} ${test_cuda}) + else() + list(APPEND test_args --gtest_filter="-*GPU*") + endif() endif() # ---[ Adding test target From 52bd7918f24168b19cad35920e0c717a25125523 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 10 Jul 2015 00:32:41 -0400 Subject: [PATCH 140/254] Changed CPU-OpenCL memory paradigmas, fixed OpenCL kernel for MVN. --- include/caffe/syncedmem.hpp | 8 ++++- src/caffe/greentea/cl_kernels.cpp | 4 +-- src/caffe/greentea/cl_kernels/math.cl | 10 +++--- src/caffe/syncedmem.cpp | 49 ++++++++++++++++----------- 4 files changed, 43 insertions(+), 28 deletions(-) diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index e926f6dd502..06b3e6221ff 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -8,6 +8,9 @@ #include "caffe/greentea/greentea.hpp" +#define OPENCL_PAGE_ALIGN 4096 +#define OPENCL_CACHE_ALIGN 64 + namespace caffe { // Theoretically, CaffeMallocHost and CaffeFreeHost should simply call the @@ -25,7 +28,10 @@ namespace caffe { // does not seem to create a memory bottleneck here. inline void CaffeMallocHost(void** ptr, size_t size) { - *ptr = malloc(size); + // Make sure the memory is zero-copy usable in OpenCL + // All OpenCL/CUDA memory copy operations might profit from this. + posix_memalign(ptr, OPENCL_PAGE_ALIGN, + ((size - 1)/OPENCL_CACHE_ALIGN + 1) * OPENCL_CACHE_ALIGN); CHECK(*ptr) << "host allocation of size " << size << " failed"; } diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 4b5e07921a3..ab9b7feace1 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -20,7 +20,7 @@ std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset + data_col_off];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_off] = val;\n }\n}"; // NOLINT std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT @@ -39,7 +39,7 @@ std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset + data_col_off];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_off] = val;\n }\n}"; // NOLINT std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(a[offa + index] < 0 && alpha < 1 && alpha > -1) {\n y[offy + index] = NAN;\n } else {\n y[offy + index] = pow(a[offa + index], alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index 63ceba5fe41..0b3be7d0cff 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -77,11 +77,11 @@ __kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, __global Dtype* y, const int offy) { for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - if(a[offa + index] < 0 && alpha < 1 && alpha > -1) { - y[offy + index] = NAN; - } else { - y[offy + index] = pow(a[offa + index], alpha); - } + if(alpha == 2.0) { + y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); + } else { + y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha); + } } } diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 173731a967f..977d96b9678 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -15,25 +15,34 @@ namespace caffe { SyncedMemory::~SyncedMemory() { - if (cpu_ptr_ && own_cpu_data_) { - CaffeFreeHost(cpu_ptr_); - } - #ifndef CPU_ONLY if (gpu_ptr_) { if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA + // Free device memory cudaFree(gpu_ptr_); device_context_->DecreaseMemoryUsage(size_); #endif // USE_CUDA } else { #ifdef USE_GREENTEA + // Free device memory clReleaseMemObject(cl_gpu_mem_); device_context_->DecreaseMemoryUsage(size_); + viennacl::ocl::context ctx = viennacl::ocl::get_context( + device_context_->id()); + ctx.get_queue().finish(); + // Special case, return to avoid double-freeing + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { + return; + } #endif // USE_GREENTEA } } -#endif // CPU_ONLY +#endif // !CPU_ONLY + // Free host memory + if (cpu_ptr_ && own_cpu_data_) { + CaffeFreeHost(cpu_ptr_); + } } inline void SyncedMemory::to_cpu() { @@ -61,7 +70,7 @@ inline void SyncedMemory::to_cpu() { device_context_->id()); ctx.get_queue().finish(); // On the CPU, memory is shared (and no copy needed) - if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { + if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU || !own_cpu_data_) { greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, 0, cpu_ptr_, &ctx); } ctx.get_queue().finish(); @@ -100,6 +109,7 @@ inline void SyncedMemory::to_gpu() { if (cpu_ptr_ == NULL) { CaffeMallocHost(&cpu_ptr_, size_); caffe_memset(size_, 0, cpu_ptr_); + own_cpu_data_ = true; } cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, @@ -135,16 +145,11 @@ inline void SyncedMemory::to_gpu() { ctx.get_queue().finish(); if (gpu_ptr_ == NULL) { cl_int err; - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // CPU memory is shared - if (cpu_ptr_ == NULL) { - CaffeMallocHost(&cpu_ptr_, size_); - } + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU && own_cpu_data_) { cl_gpu_mem_ = clCreateBuffer( ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, size_, cpu_ptr_, &err); device_context_->IncreaseMemoryUsage(size_); - } else { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, size_, NULL, &err); @@ -154,7 +159,7 @@ inline void SyncedMemory::to_gpu() { ctx.get_queue().finish(); } // On the CPU, memory is shared (and no copy needed) - if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU) { + if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU || !own_cpu_data_) { greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, &ctx); } ctx.get_queue().finish(); @@ -181,14 +186,18 @@ void SyncedMemory::set_cpu_data(void* data) { CHECK(data); #ifndef CPU_ONLY #ifdef USE_GREENTEA - if (Caffe::mode() == Caffe::Brew::GPU && gpu_ptr_ != nullptr) { - if (device_context_->backend() == Backend::BACKEND_OpenCL) { - viennacl::ocl::context ctx = viennacl::ocl::get_context( - device_context_->id()); + if (Caffe::mode() == Caffe::Brew::GPU && + device_context_->backend() == Backend::BACKEND_OpenCL) { + viennacl::ocl::context ctx = viennacl::ocl::get_context( + device_context_->id()); + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { ctx.get_queue().finish(); - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - gpu_ptr_ = nullptr; - } + cpu_ptr_ = data; + gpu_ptr_ = nullptr; + head_ = HEAD_AT_CPU; + own_cpu_data_ = false; + // Return, skipping release of the host memory + return; } } #endif // USE_GREENTEA From ffda2d570c545cc8cc0510d2db603d2425a1004e Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 11 Jul 2015 16:33:50 -0400 Subject: [PATCH 141/254] Unit-tested ND-SK convolution layer, improved CPU memory model on OpenCL, preparations for ND-SK pooling layer. --- include/caffe/syncedmem.hpp | 5 +- include/caffe/vision_layers.hpp | 108 +++---- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/im2col_nd.cl | 65 ++-- src/caffe/greentea/greentea_im2col.cpp | 1 + .../greentea/greentea_math_functions.cpp | 158 +++++++--- src/caffe/layers/lrn_layer.cpp | 4 +- src/caffe/layers/malis_loss_layer.cpp | 8 +- src/caffe/layers/pooling_layer.cpp | 20 +- src/caffe/layers/pooling_nd_layer.cpp | 134 +++++++++ src/caffe/layers/pooling_nd_layer.cu | 282 ++++++++++++++++++ src/caffe/layers/pooling_sk_layer.cpp | 21 +- src/caffe/proto/caffe.proto | 13 +- src/caffe/syncedmem.cpp | 85 ++---- src/caffe/test/test_convolution_nd_layer.cpp | 61 +++- .../test/test_maxpool_dropout_layers.cpp | 12 +- src/caffe/test/test_pooling_layer.cpp | 36 +-- src/caffe/test/test_stochastic_pooling.cpp | 16 +- src/caffe/util/im2col.cu | 73 +++-- src/caffe/util/upgrade_proto.cpp | 6 +- 20 files changed, 835 insertions(+), 277 deletions(-) create mode 100644 src/caffe/layers/pooling_nd_layer.cpp create mode 100644 src/caffe/layers/pooling_nd_layer.cu diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 06b3e6221ff..8ab0e0ec033 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -30,8 +30,9 @@ namespace caffe { inline void CaffeMallocHost(void** ptr, size_t size) { // Make sure the memory is zero-copy usable in OpenCL // All OpenCL/CUDA memory copy operations might profit from this. - posix_memalign(ptr, OPENCL_PAGE_ALIGN, - ((size - 1)/OPENCL_CACHE_ALIGN + 1) * OPENCL_CACHE_ALIGN); + CHECK_EQ(0, posix_memalign(ptr, OPENCL_PAGE_ALIGN, + ((size - 1)/OPENCL_CACHE_ALIGN + 1) * OPENCL_CACHE_ALIGN)) + << "Host memory allocation error"; CHECK(*ptr) << "host allocation of size " << size << " failed"; } diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 61d0e0e1556..5b665e8fb94 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -518,57 +518,6 @@ class ConvolutionSKLayer : public Layer { int M_, K_, N_; }; - -/** - * @brief Convolves the input image for pixelwise classification. - * - * Layer introduced by Hongsheng et al. - */ -template -class ConvolutionNDSKLayer : public Layer { - public: - explicit ConvolutionNDSKLayer(const LayerParameter& param) - : Layer(param) { - } - - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { - return "ConvolutionNDSK"; - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - - shared_ptr< Blob > col_buffer(); - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int channels_; - int group_; - int height_, width_; - int pad_h_, pad_w_; - int kstride_h_, kstride_w_; - int num_, num_output_; - Blob col_buffer_; - Blob bias_multiplier_; - bool bias_term_; - int M_, K_, N_; -}; - - /** * @brief Convolves the input image with a bank of learned filters, * and (optionally) adds biases. @@ -925,6 +874,63 @@ class PoolingSKLayer : public Layer { Blob max_idx_; }; + +/** + * @brief Pools the input image by taking the max, average, etc. within regions. + * + * For whole image processing, reducing redundancy. + */ +template +class PoolingNDLayer : public Layer { + public: + explicit PoolingNDLayer(const LayerParameter& param) + : Layer(param) { + } + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + virtual inline const char* type() const { + return "PoolingND"; + } + virtual inline int ExactNumBottomBlobs() const { + return 1; + } + virtual inline int MinTopBlobs() const { + return 1; + } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return + (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + int max_top_blobs_; + int pad_h_, pad_w_; + int channels_; + int height_, width_; + int pooled_height_, pooled_width_; + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int kstride_h_, kstride_w_; + Blob max_idx_; +}; + /** * @brief Pools the input image by taking the max, average, etc. within regions. * diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index ab9b7feace1..53112ac6a94 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -17,7 +17,7 @@ std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset + data_col_off];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT @@ -36,7 +36,7 @@ std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset + data_col_off];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index 0bab9c39bc1..fbc899d9cc9 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -95,9 +95,19 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, __global Dtype* data_im, const int data_off) { int d_im[6]; + int d_col_size[6]; int d_col_iter[6]; int d_col_start[6]; int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. @@ -110,20 +120,29 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, // Calculate col start/end indices. bool done = false; for (int i = 0; i < num_axes; ++i) { - d_col_start[i] = d_col_iter[i] = + // Old: + /*d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? - 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); - if (d_col_start[i] >= d_col_end[i]) { + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. - data_im[index + data_off] = 0; + data_im[index] = 0; done = true; - break; // for (int i = 0; i < num_axes; ++i) + break; // for (int i = 0; i < num_axes; ++i) } } if (done) { - continue; + continue; // CUDA_KERNEL_LOOP(index, n) } // Loop over the col to compute the output val. Dtype val = 0; @@ -131,30 +150,30 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, do { // Compute the final offset. int final_offset = 0; - int kernel_shape_prod = 1; + int coeff_prod = 1; for (int i = num_axes - 1; i >= 0; --i) { - final_offset += (d_im[i] - d_col_iter[i] * stride[i]) - * kernel_shape_prod; - kernel_shape_prod *= kernel_shape[i]; + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; } - final_offset += kernel_shape_prod * channel_im; - for (int i = 0; i < num_axes; ++i) { - final_offset *= col_shape[i + 1]; - final_offset += d_col_iter[i]; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; } - val += data_col[final_offset + data_col_off]; + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { - const int d_max = d_col_end[i]; - if (d_col_iter[i] == d_max - 1) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { d_col_iter[i] = d_col_start[i]; - } else { // d_col_iter[i] < d_max - 1 - ++d_col_iter[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; incremented = true; break; // for (int i = num_axes - 1; i >= 0; --i) } } // for (int i = num_axes - 1; i >= 0; --i) - } while (incremented); - data_im[index + data_off] = val; - } + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 7283f08af18..1ac9f759cfd 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -302,6 +302,7 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, cl_mem data_im, int data_off); + template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int data_col_off, const int num_spatial_axes, diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 25e87554aa1..71f7b2cd35c 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -169,15 +169,29 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // Make sure the OpenCL queue is empty before using CBLAS - ctx.get_queue().finish(); - Dtype *Aptr, *Bptr, *Cptr; - clGetMemObjectInfo(A, CL_MEM_HOST_PTR, sizeof(Dtype*), &Aptr, NULL); - clGetMemObjectInfo(B, CL_MEM_HOST_PTR, sizeof(Dtype*), &Bptr, NULL); - clGetMemObjectInfo(C, CL_MEM_HOST_PTR, sizeof(Dtype*), &Cptr, NULL); - - caffe_cpu_gemm(TransA, TransB, M, N, K, alpha, Aptr + offA, - Bptr + offB, beta, Cptr + offC); + Dtype* Aptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + A, true, CL_MAP_READ, sizeof(Dtype) * offA, + sizeof(Dtype) * M * K, 0, NULL, NULL, NULL)); + Dtype* Bptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + B, true, CL_MAP_READ, sizeof(Dtype) * offB, + sizeof(Dtype) * N * K, 0, NULL, NULL, NULL)); + Dtype* Cptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + C, true, CL_MAP_READ | CL_MAP_WRITE, + sizeof(Dtype) * offC, + sizeof(Dtype) * M * N, 0, NULL, NULL, NULL)); + + caffe_cpu_gemm(TransA, TransB, M, N, K, alpha, Aptr, + Bptr, beta, Cptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + A, Aptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + B, Bptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + C, Cptr, 0, NULL, NULL); } else { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; @@ -272,15 +286,31 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // Make sure the OpenCL queue is empty before using CBLAS - ctx.get_queue().finish(); - Dtype *Aptr, *xptr, *yptr; - clGetMemObjectInfo(A, CL_MEM_HOST_PTR, sizeof(Dtype*), &Aptr, NULL); - clGetMemObjectInfo(x, CL_MEM_HOST_PTR, sizeof(Dtype*), &xptr, NULL); - clGetMemObjectInfo(y, CL_MEM_HOST_PTR, sizeof(Dtype*), &yptr, NULL); - - caffe_cpu_gemv(TransA, M, N, alpha, Aptr + offA, xptr + offx, beta, - yptr + offy); + Dtype* Aptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + A, true, CL_MAP_READ, sizeof(Dtype) * offA, + sizeof(Dtype) * M * N, 0, NULL, NULL, NULL)); + Dtype* xptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + x, true, CL_MAP_READ, sizeof(Dtype) * offx, + sizeof(Dtype) * (TransA == CblasTrans) ? M : N, + 0, NULL, NULL, NULL)); + Dtype* yptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + y, true, CL_MAP_READ | CL_MAP_WRITE, + sizeof(Dtype) * offy, + sizeof(Dtype) * (TransA == CblasTrans) ? N : M, + 0, NULL, NULL, NULL)); + + caffe_cpu_gemv(TransA, M, N, alpha, Aptr, xptr, beta, + yptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + A, Aptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + x, xptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + y, yptr, 0, NULL, NULL); } else { #ifndef USE_CLBLAS @@ -346,13 +376,21 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // Make sure the OpenCL queue is empty before using CBLAS - ctx.get_queue().finish(); - Dtype *Xptr, *Yptr; - clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); - clGetMemObjectInfo(Y, CL_MEM_HOST_PTR, sizeof(Dtype*), &Yptr, NULL); - - caffe_axpy(N, alpha, Xptr + offX, Yptr + offY); + Dtype* Xptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + X, true, CL_MAP_READ, sizeof(Dtype) * offX, + sizeof(Dtype) * N, 0, NULL, NULL, NULL)); + Dtype* Yptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + Y, true, CL_MAP_WRITE, sizeof(Dtype) * offY, + sizeof(Dtype) * N, 0, NULL, NULL, NULL)); + + caffe_axpy(N, alpha, Xptr, Yptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + X, Xptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + Y, Yptr, 0, NULL, NULL); } else { #ifndef USE_CLBLAS @@ -443,12 +481,16 @@ void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // Make sure the OpenCL queue is empty before using CBLAS - ctx.get_queue().finish(); - Dtype *xptr; - clGetMemObjectInfo(x, CL_MEM_HOST_PTR, sizeof(Dtype*), &xptr, NULL); - caffe_scal(N, alpha, xptr + offx); + Dtype* xptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + x, true, CL_MAP_READ | CL_MAP_WRITE, + sizeof(Dtype) * offx, + sizeof(Dtype) * N, 0, NULL, NULL, NULL)); + + caffe_scal(N, alpha, xptr); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + x, xptr, 0, NULL, NULL); } else { #ifndef USE_CLBLAS @@ -506,13 +548,22 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // Make sure the OpenCL queue is empty before using CBLAS - ctx.get_queue().finish(); - Dtype *Xptr, *Yptr; - clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); - clGetMemObjectInfo(Y, CL_MEM_HOST_PTR, sizeof(Dtype*), &Yptr, NULL); + Dtype* Xptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + X, true, CL_MAP_READ, sizeof(Dtype) * offX, + sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + Dtype* Yptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + Y, true, CL_MAP_READ, sizeof(Dtype) * offY, + sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + + *out = caffe_cpu_dot(n, Xptr, Yptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + X, Xptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + Y, Yptr, 0, NULL, NULL); - *out = caffe_cpu_dot(n, Xptr + offX, Yptr + offY); } else { #ifndef USE_CLBLAS typedef typename viennacl::vector_base::size_type size_type; @@ -568,12 +619,15 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // Make sure the OpenCL queue is empty before using CBLAS - ctx.get_queue().finish(); - Dtype* Xptr; - clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); - *Y = caffe_cpu_asum(n, Xptr + offX); + Dtype* Xptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + X, true, CL_MAP_READ, sizeof(Dtype) * offX, + sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + *Y = caffe_cpu_asum(n, Xptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + X, Xptr, 0, NULL, NULL); } else { #ifndef USE_CLBLAS @@ -626,13 +680,21 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // Make sure the OpenCL queue is empty before using CBLAS - ctx.get_queue().finish(); - Dtype *Xptr, *Yptr; - clGetMemObjectInfo(X, CL_MEM_HOST_PTR, sizeof(Dtype*), &Xptr, NULL); - clGetMemObjectInfo(Y, CL_MEM_HOST_PTR, sizeof(Dtype*), &Yptr, NULL); - caffe_cpu_scale(n, alpha, Xptr + offX, Yptr + offY); - + Dtype* Xptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + X, true, CL_MAP_READ, sizeof(Dtype) * offX, + sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + Dtype* Yptr = reinterpret_cast( + clEnqueueMapBuffer(ctx.get_queue().handle().get(), + Y, true, CL_MAP_WRITE, sizeof(Dtype) * offY, + sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + + caffe_cpu_scale(n, alpha, Xptr, Yptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + X, Xptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), + Y, Yptr, 0, NULL, NULL); } else { #ifndef USE_CLBLAS diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 08821ef3d79..0000be5c88b 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -39,8 +39,8 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, LayerParameter pool_param; pool_param.mutable_pooling_param()->set_pool( PoolingParameter_PoolMethod_AVE); - pool_param.mutable_pooling_param()->set_pad(pre_pad_); - pool_param.mutable_pooling_param()->set_kernel_size(size_); + pool_param.mutable_pooling_param()->add_pad(pre_pad_); + pool_param.mutable_pooling_param()->add_kernel_size(size_); pool_layer_.reset(new PoolingLayer(pool_param)); pool_layer_->SetUp(square_top_vec_, pool_top_vec_); // Set up power_layer_ to compute (1 + alpha_/N^2 s)^-beta_, where s is diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 59b40c854a9..96ff4ae134a 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -176,8 +176,8 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // +ve example pairs dl = std::max(Dtype(0.0), threshold + margin - conn_data[minEdge]); loss += dl * nPair; - // Only use indicator for loss - dloss_data[minEdge] -= (dl > 0) * nPair; + // Use hinge loss + dloss_data[minEdge] -= dl * nPair; if (conn_data[minEdge] <= threshold) { // an error nPairIncorrect += nPair; } @@ -186,8 +186,8 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // -ve example pairs dl = std::max(Dtype(0.0), conn_data[minEdge] - threshold + margin); loss += dl * nPair; - // Only use indicator for loss - dloss_data[minEdge] += (dl > 0) * nPair; + // Use hinge loss + dloss_data[minEdge] += dl * nPair; if (conn_data[minEdge] > threshold) { // an error nPairIncorrect += nPair; } diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index c8d41499455..198f94369e6 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -18,22 +18,22 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { PoolingParameter pool_param = this->layer_param_.pooling_param(); if (pool_param.global_pooling()) { - CHECK(!(pool_param.has_kernel_size() || + CHECK(!((pool_param.kernel_size_size() > 0) || pool_param.has_kernel_h() || pool_param.has_kernel_w())) << "With Global_pooling: true Filter size cannot specified"; } else { - CHECK(!pool_param.has_kernel_size() != + CHECK(!(pool_param.kernel_size_size() > 0) != !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; - CHECK(pool_param.has_kernel_size() || + CHECK((pool_param.kernel_size_size() > 0) || (pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; } - CHECK((!pool_param.has_pad() && pool_param.has_pad_h() + CHECK((!(pool_param.pad_size() > 0) && pool_param.has_pad_h() && pool_param.has_pad_w()) || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; - CHECK((!pool_param.has_stride() && pool_param.has_stride_h() + CHECK((!(pool_param.stride_size() > 0) && pool_param.has_stride_h() && pool_param.has_stride_w()) || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; @@ -42,8 +42,8 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, kernel_h_ = bottom[0]->height(); kernel_w_ = bottom[0]->width(); } else { - if (pool_param.has_kernel_size()) { - kernel_h_ = kernel_w_ = pool_param.kernel_size(); + if (pool_param.kernel_size_size() > 0) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(0); } else { kernel_h_ = pool_param.kernel_h(); kernel_w_ = pool_param.kernel_w(); @@ -52,13 +52,15 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!pool_param.has_pad_h()) { - pad_h_ = pad_w_ = pool_param.pad(); + pad_h_ = pad_w_ = pool_param.pad_size() > 0 ? + pool_param.pad(0) : 0; } else { pad_h_ = pool_param.pad_h(); pad_w_ = pool_param.pad_w(); } if (!pool_param.has_stride_h()) { - stride_h_ = stride_w_ = pool_param.stride(); + stride_h_ = stride_w_ = pool_param.stride_size() > 0 ? + pool_param.stride(0) : 1; } else { stride_h_ = pool_param.stride_h(); stride_w_ = pool_param.stride_w(); diff --git a/src/caffe/layers/pooling_nd_layer.cpp b/src/caffe/layers/pooling_nd_layer.cpp new file mode 100644 index 00000000000..378297ac0fe --- /dev/null +++ b/src/caffe/layers/pooling_nd_layer.cpp @@ -0,0 +1,134 @@ +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +using std::min; +using std::max; + +template +void PoolingNDLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + // Set the max number of top blobs before calling base Layer::SetUp. + // If doing MAX pooling, we can optionally output an extra top Blob + // for the mask. Otherwise, we only have one top Blob. + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + max_top_blobs_ = 2; + } else { + max_top_blobs_ = 1; + } + PoolingParameter pool_param = this->layer_param_.pooling_param(); + CHECK(!(pool_param.kernel_size_size() > 0) != + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK((pool_param.kernel_size_size() > 0) || + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK((!(pool_param.pad_size() > 0) && pool_param.has_pad_h() + && pool_param.has_pad_w()) + || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!(pool_param.stride_size() > 0) && pool_param.has_stride_h() + && pool_param.has_stride_w()) + || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (pool_param.kernel_size_size() > 0) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(0); + } else { + kernel_h_ = pool_param.kernel_h(); + kernel_w_ = pool_param.kernel_w(); + } + CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; + if (!pool_param.has_pad_h()) { + pad_h_ = pad_w_ = pool_param.pad_size() > 0 ? + pool_param.pad(0) : 0; + } else { + pad_h_ = pool_param.pad_h(); + pad_w_ = pool_param.pad_w(); + } + CHECK_EQ(pad_h_, 0); + CHECK_EQ(pad_w_, 0); + if (!pool_param.has_stride_h()) { + stride_h_ = stride_w_ = pool_param.stride_size() > 0 ? + pool_param.stride(0) : 1; + } else { + stride_h_ = pool_param.stride_h(); + stride_w_ = pool_param.stride_w(); + } + if (pad_h_ != 0 || pad_w_ != 0) { + CHECK(this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE + || this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) + << "Padding implemented only for average and max pooling."; + CHECK_LT(pad_h_, kernel_h_); + CHECK_LT(pad_w_, kernel_w_); + } + if (!pool_param.has_kstride_h()) { + kstride_h_ = kstride_w_ = pool_param.kstride_size() > 0 ? + pool_param.kstride(0) : 1; + } else { + kstride_h_ = pool_param.kstride_h(); + kstride_w_ = pool_param.kstride_w(); + } + + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + pooled_height_ = static_cast(ceil( + static_cast(height_ + 2 * pad_h_ - ext_kernel_h) / stride_h_)) + 1; + pooled_width_ = static_cast(ceil( + static_cast(width_ + 2 * pad_w_ - ext_kernel_w) / stride_w_)) + 1; + + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); + if (top.size() > 1) { + top[1]->ReshapeLike(*top[0]); + } + // If max pooling, we will initialize the vector index part. + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX && top.size() == 1) { + max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + } +} + +template +void PoolingNDLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + LayerSetUp(bottom, top); +} + +template +void PoolingNDLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + LOG(FATAL)<< "Forward_cpu() not implemented in PoolingNDLayer."; +} + +template +void PoolingNDLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + LOG(FATAL)<< "Backward_cpu() not implemented in PoolingNDLayer."; + return; +} + +#ifdef CPU_ONLY +STUB_GPU(PoolingNDLayer); +#endif + +INSTANTIATE_CLASS(PoolingNDLayer); +REGISTER_LAYER_CLASS(PoolingND); + +} // namespace caffe diff --git a/src/caffe/layers/pooling_nd_layer.cu b/src/caffe/layers/pooling_nd_layer.cu new file mode 100644 index 00000000000..9907f7f3e64 --- /dev/null +++ b/src/caffe/layers/pooling_nd_layer.cu @@ -0,0 +1,282 @@ +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif // USE_GREENTEA + +namespace caffe { + +#ifdef USE_CUDA +template +__global__ void MaxPoolNDForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* top_data, int* mask, Dtype* top_mask) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + bottom_data += (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data[maxidx]; + } + } + } + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} +#endif // USE_CUDA + +template +void PoolingNDLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int count = top[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + int* mask = NULL; + Dtype* top_mask = NULL; + + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolNDForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, top_data, + mask, top_mask); + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + CUDA_POST_KERNEL_CHECK; + +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_forward_nd")); + viennacl::ocl::enqueue( + oclk_max_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), + bottom[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapHandle((cl_mem) top_data, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx)), + ctx.get_queue()); + } + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } +#endif // USE_GREENTEA + } +} + +#ifdef USE_CUDA +template +__global__ void MaxPoolNDBackward(const int nthreads, const Dtype* top_diff, + const int* mask, const Dtype* top_mask, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff += offset; + if (mask) { + mask += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } else { + mask += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} +#endif // USE_CUDA + +template +void PoolingNDLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + const int* mask = NULL; + const Dtype* top_mask = NULL; + + int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; + int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_set(count, Dtype(0.), bottom_diff); + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolNDBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + bottom_diff); + break; + default: + LOG(FATAL)<< + "Unknown or unsupported pooling method in Backward_gpu()."; + } + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), + (cl_mem) bottom_diff, 0); + + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_backward_nd")); + viennacl::ocl::enqueue( + oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx), + top[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); + } + break; + default: + LOG(FATAL)<< + "Unknown or unsupported pooling method in Backward_gpu()."; + } +#endif // USE_GREENTEA + } + } + +INSTANTIATE_LAYER_GPU_FUNCS(PoolingNDLayer); + +} // namespace caffe diff --git a/src/caffe/layers/pooling_sk_layer.cpp b/src/caffe/layers/pooling_sk_layer.cpp index 8527eec4eec..14d87b59586 100644 --- a/src/caffe/layers/pooling_sk_layer.cpp +++ b/src/caffe/layers/pooling_sk_layer.cpp @@ -26,22 +26,22 @@ void PoolingSKLayer::LayerSetUp(const vector*>& bottom, max_top_blobs_ = 1; } PoolingParameter pool_param = this->layer_param_.pooling_param(); - CHECK(!pool_param.has_kernel_size() != + CHECK(!(pool_param.kernel_size_size() > 0) != !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; - CHECK(pool_param.has_kernel_size() || + CHECK((pool_param.kernel_size_size() > 0) || (pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; - CHECK((!pool_param.has_pad() && pool_param.has_pad_h() + CHECK((!(pool_param.pad_size() > 0) && pool_param.has_pad_h() && pool_param.has_pad_w()) || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; - CHECK((!pool_param.has_stride() && pool_param.has_stride_h() + CHECK((!(pool_param.stride_size() > 0) && pool_param.has_stride_h() && pool_param.has_stride_w()) || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; - if (pool_param.has_kernel_size()) { - kernel_h_ = kernel_w_ = pool_param.kernel_size(); + if (pool_param.kernel_size_size() > 0) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(0); } else { kernel_h_ = pool_param.kernel_h(); kernel_w_ = pool_param.kernel_w(); @@ -49,7 +49,8 @@ void PoolingSKLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; if (!pool_param.has_pad_h()) { - pad_h_ = pad_w_ = pool_param.pad(); + pad_h_ = pad_w_ = pool_param.pad_size() > 0 ? + pool_param.pad(0) : 0; } else { pad_h_ = pool_param.pad_h(); pad_w_ = pool_param.pad_w(); @@ -57,7 +58,8 @@ void PoolingSKLayer::LayerSetUp(const vector*>& bottom, CHECK_EQ(pad_h_, 0); CHECK_EQ(pad_w_, 0); if (!pool_param.has_stride_h()) { - stride_h_ = stride_w_ = pool_param.stride(); + stride_h_ = stride_w_ = pool_param.stride_size() > 0 ? + pool_param.stride(0) : 1; } else { stride_h_ = pool_param.stride_h(); stride_w_ = pool_param.stride_w(); @@ -72,7 +74,8 @@ void PoolingSKLayer::LayerSetUp(const vector*>& bottom, CHECK_LT(pad_w_, kernel_w_); } if (!pool_param.has_kstride_h()) { - kstride_h_ = kstride_w_ = pool_param.kstride(); + kstride_h_ = kstride_w_ = pool_param.kstride_size() > 0 ? + pool_param.kstride(0) : 1; } else { kstride_h_ = pool_param.kstride_h(); kstride_w_ = pool_param.kstride_w(); diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 4f3e0e66ac4..332fae807e1 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -695,16 +695,15 @@ message PoolingParameter { optional PoolMethod pool = 1 [default = MAX]; // The pooling method // Pad, kernel size, and stride are all given as a single value for equal // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) + repeated uint32 pad = 4; // The padding size (equal in Y, X), default 0 optional uint32 pad_h = 9 [default = 0]; // The padding height optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) + repeated uint32 kernel_size = 2; // The kernel size (square) optional uint32 kernel_h = 5; // The kernel height optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) + repeated uint32 stride = 3; // The stride (equal in Y, X), default 1 optional uint32 stride_h = 7; // The stride height optional uint32 stride_w = 8; // The stride width - optional uint32 stride_d = 16; // The stride depth enum Engine { DEFAULT = 0; CAFFE = 1; @@ -714,9 +713,9 @@ message PoolingParameter { // If global_pooling then it will pool over the size of the bottom by doing // kernel_h = bottom->height and kernel_w = bottom->width optional bool global_pooling = 12 [default = false]; - optional uint32 kstride = 13 [default = 0]; - optional uint32 kstride_h = 14 [default = 0]; - optional uint32 kstride_w = 15 [default = 0]; + repeated uint32 kstride = 13; // The kernel stride, default 1 + optional uint32 kstride_h = 14; + optional uint32 kstride_w = 15; } message PowerParameter { diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 977d96b9678..af3909fbd87 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -21,20 +21,21 @@ SyncedMemory::~SyncedMemory() { #ifdef USE_CUDA // Free device memory cudaFree(gpu_ptr_); + gpu_ptr_ = nullptr; device_context_->DecreaseMemoryUsage(size_); #endif // USE_CUDA } else { #ifdef USE_GREENTEA // Free device memory - clReleaseMemObject(cl_gpu_mem_); - device_context_->DecreaseMemoryUsage(size_); viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_->id()); ctx.get_queue().finish(); - // Special case, return to avoid double-freeing - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - return; - } + CHECK_EQ(CL_SUCCESS, clReleaseMemObject(cl_gpu_mem_)) + << "OpenCL memory corruption"; + gpu_ptr_ = nullptr; + cl_gpu_mem_ = nullptr; + ctx.get_queue().finish(); + device_context_->DecreaseMemoryUsage(size_); #endif // USE_GREENTEA } } @@ -42,6 +43,7 @@ SyncedMemory::~SyncedMemory() { // Free host memory if (cpu_ptr_ && own_cpu_data_) { CaffeFreeHost(cpu_ptr_); + cpu_ptr_ = nullptr; } } @@ -56,7 +58,7 @@ inline void SyncedMemory::to_cpu() { } case HEAD_AT_GPU: { #ifndef CPU_ONLY - if (cpu_ptr_ == NULL) { + if (cpu_ptr_ == nullptr) { CaffeMallocHost(&cpu_ptr_, size_); own_cpu_data_ = true; } @@ -68,11 +70,7 @@ inline void SyncedMemory::to_cpu() { #ifdef USE_GREENTEA viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_->id()); - ctx.get_queue().finish(); - // On the CPU, memory is shared (and no copy needed) - if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU || !own_cpu_data_) { - greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, 0, cpu_ptr_, &ctx); - } + greentea_gpu_memcpy(size_, (cl_mem) gpu_ptr_, 0, cpu_ptr_, &ctx); ctx.get_queue().finish(); #endif } @@ -105,23 +103,18 @@ inline void SyncedMemory::to_gpu() { ctx.get_queue().finish(); cl_int err; if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - // CPU memory is shared - if (cpu_ptr_ == NULL) { - CaffeMallocHost(&cpu_ptr_, size_); - caffe_memset(size_, 0, cpu_ptr_); - own_cpu_data_ = true; - } cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), - CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, - size_, cpu_ptr_, &err); - device_context_->IncreaseMemoryUsage(size_); + CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, + size_, nullptr, &err); } else { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - size_, NULL, &err); - device_context_->IncreaseMemoryUsage(size_); - int alpha = 0; - greentea_memset(device_context_->id(), size_, alpha, cl_gpu_mem_, 0); + size_, nullptr, &err); } + CHECK_EQ(0, err) << "OpenCL buffer allocation of size " + << size_ << " failed."; + device_context_->IncreaseMemoryUsage(size_); + int alpha = 0; + greentea_memset(device_context_->id(), size_, alpha, cl_gpu_mem_, 0); gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); #endif // USE_GREENTEA @@ -132,7 +125,7 @@ inline void SyncedMemory::to_gpu() { case HEAD_AT_CPU: { if (device_context_->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA - if (gpu_ptr_ == NULL) { + if (gpu_ptr_ == nullptr) { CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); device_context_->IncreaseMemoryUsage(size_); } @@ -143,25 +136,23 @@ inline void SyncedMemory::to_gpu() { viennacl::ocl::context ctx = viennacl::ocl::get_context( device_context_->id()); ctx.get_queue().finish(); - if (gpu_ptr_ == NULL) { + if (gpu_ptr_ == nullptr) { cl_int err; - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU && own_cpu_data_) { + if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { cl_gpu_mem_ = clCreateBuffer( - ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, - size_, cpu_ptr_, &err); - device_context_->IncreaseMemoryUsage(size_); + ctx.handle().get(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, + size_, nullptr, &err); } else { cl_gpu_mem_ = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - size_, NULL, &err); - device_context_->IncreaseMemoryUsage(size_); + size_, nullptr, &err); } + CHECK_EQ(0, err) << "OpenCL buffer allocation of size " + << size_ << " failed."; + device_context_->IncreaseMemoryUsage(size_); gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); } - // On the CPU, memory is shared (and no copy needed) - if (ctx.devices()[0].type() != CL_DEVICE_TYPE_CPU || !own_cpu_data_) { - greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, &ctx); - } + greentea_gpu_memcpy(size_, cpu_ptr_, (cl_mem) gpu_ptr_, 0, &ctx); ctx.get_queue().finish(); #endif // USE_GREENTEA } @@ -184,25 +175,7 @@ const void* SyncedMemory::cpu_data() { void SyncedMemory::set_cpu_data(void* data) { CHECK(data); -#ifndef CPU_ONLY -#ifdef USE_GREENTEA - if (Caffe::mode() == Caffe::Brew::GPU && - device_context_->backend() == Backend::BACKEND_OpenCL) { - viennacl::ocl::context ctx = viennacl::ocl::get_context( - device_context_->id()); - if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - ctx.get_queue().finish(); - cpu_ptr_ = data; - gpu_ptr_ = nullptr; - head_ = HEAD_AT_CPU; - own_cpu_data_ = false; - // Return, skipping release of the host memory - return; - } - } -#endif // USE_GREENTEA -#endif // !CPU_ONLY - if (own_cpu_data_) { + if (cpu_ptr_ && own_cpu_data_) { CaffeFreeHost(cpu_ptr_); } cpu_ptr_ = data; diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp index a82493b872f..6caded9b9f4 100644 --- a/src/caffe/test/test_convolution_nd_layer.cpp +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -30,6 +30,14 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { shape.add_dim(5); // Height shape.add_dim(5); // Width blob_bottom_->Reshape(shape); + + shape.add_dim(1); // Batch + shape.add_dim(1); // Channels + shape.add_dim(1); // Depth + shape.add_dim(1); // Height + shape.add_dim(1); // Width + blob_top_->Reshape(shape); + // fill the values blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); @@ -53,7 +61,9 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { convolution_param->add_kstride(2); convolution_param->add_kstride(2); - convolution_param->set_num_output(4); + convolution_param->set_num_output(1); + + convolution_param->set_axis(1); convolution_param->mutable_weight_filler()->set_type("constant"); convolution_param->mutable_weight_filler()->set_value(1); @@ -91,7 +101,54 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { } void TestBackward() { - // TODO + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + + convolution_param->add_kernel_size(3); + convolution_param->add_kernel_size(3); + convolution_param->add_kernel_size(3); + + convolution_param->add_kstride(2); + convolution_param->add_kstride(2); + convolution_param->add_kstride(2); + + convolution_param->set_num_output(1); + + convolution_param->set_axis(1); + + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0); + + ConvolutionNDLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam *top_diff = blob_top_->mutable_cpu_diff(); + + *top_diff = 1; + + std::vector prop_down; + prop_down.push_back(true); + + layer.Backward(this->blob_top_vec_, prop_down, this->blob_bottom_vec_); + + const TypeParam *bottom_diff = blob_bottom_->cpu_diff(); + + int d = blob_bottom_->shape(2); + int h = blob_bottom_->shape(3); + int w = blob_bottom_->shape(4); + + for (int cd = 0; cd < d; ++cd) { + for (int ch = 0; ch < h; ++ch) { + for (int cw = 0; cw < w; ++cw) { + if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { + EXPECT_EQ(1, bottom_diff[cw + ch * w + cd * w * h]); + } + } + } + } } Blob* const blob_bottom_; diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index 611d9790863..f1bc4bcc442 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -44,8 +44,8 @@ TYPED_TEST(MaxPoolingDropoutTest, TestSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); PoolingLayer max_layer(layer_param); max_layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); DropoutLayer dropout_layer(layer_param); @@ -61,8 +61,8 @@ TYPED_TEST(MaxPoolingDropoutTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -91,8 +91,8 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index ef861bc8e62..4a43772f9d3 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -46,7 +46,7 @@ class PoolingLayerTest : public MultiDeviceTest { void TestForwardSquare() { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(2); + pooling_param->add_kernel_size(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; @@ -374,8 +374,8 @@ TYPED_TEST(PoolingLayerTest, TestSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); @@ -388,9 +388,9 @@ TYPED_TEST(PoolingLayerTest, TestSetupPadded) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); - pooling_param->set_pad(1); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); + pooling_param->add_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -461,8 +461,8 @@ TYPED_TEST(PoolingLayerTest, TestGradientMax) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); - pooling_param->set_pad(1); + pooling_param->add_stride(2); + pooling_param->add_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); @@ -476,9 +476,9 @@ TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); - pooling_param->set_pad(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); + pooling_param->add_pad(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); this->blob_bottom_->Reshape(1, 1, 3, 3); // Input: @@ -525,7 +525,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); + pooling_param->add_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); this->blob_top_vec_.push_back(this->blob_top_mask_); PoolingLayer layer(layer_param); @@ -541,9 +541,9 @@ TYPED_TEST(PoolingLayerTest, TestForwardAve) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(1); - pooling_param->set_pad(1); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(1); + pooling_param->add_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); this->blob_bottom_->Reshape(1, 1, 3, 3); FillerParameter filler_param; @@ -577,7 +577,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientAve) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); + pooling_param->add_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); @@ -595,8 +595,8 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); - pooling_param->set_pad(2); + pooling_param->add_stride(2); + pooling_param->add_pad(2); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index f84464c322c..d8a798c85ec 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -57,8 +57,8 @@ TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); @@ -80,8 +80,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -123,8 +123,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { LayerParameter layer_param; layer_param.set_phase(TEST); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -160,8 +160,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 88a25643c9c..aa95ce9a575 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -315,13 +315,13 @@ template void col2im_gpu(const double* data_col, const int channels, double* data_im); -template +template __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, - const Dtype* data_im, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, - Dtype* data_col) { + const Dtype* data_im, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, const int* kstride, + Dtype* data_col) { int d_temp[6]; // NOLINT(runtime/arrays) int d_iter[6]; // NOLINT(runtime/arrays) int i; @@ -389,17 +389,27 @@ __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, } // CUDA_KERNEL_LOOP(index, n) } -template +template __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, - const Dtype* data_col, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, - Dtype* data_im) { + const Dtype* data_col, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, const int* kstride, + Dtype* data_im) { int d_im[6]; // NOLINT(runtime/arrays) + int d_col_size[6]; // NOLINT(runtime/arrays) int d_col_iter[6]; // NOLINT(runtime/arrays) int d_col_start[6]; // NOLINT(runtime/arrays) int d_col_end[6]; // NOLINT(runtime/arrays) + int d_ext_patch[6]; // NOLINT(runtime/arrays) + int d_idx[6]; // NOLINT(runtime/arrays) + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. @@ -412,11 +422,20 @@ __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, // Calculate col start/end indices. bool done = false; for (int i = 0; i < num_axes; ++i) { - d_col_start[i] = d_col_iter[i] = + // Old: + /*d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); - if (d_col_start[i] >= d_col_end[i]) { + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; @@ -433,25 +452,25 @@ __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, do { // Compute the final offset. int final_offset = 0; - int kernel_shape_prod = 1; + int coeff_prod = 1; for (int i = num_axes - 1; i >= 0; --i) { - final_offset += - (d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod; - kernel_shape_prod *= kernel_shape[i]; + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; } - final_offset += kernel_shape_prod * channel_im; - for (int i = 0; i < num_axes; ++i) { - final_offset *= col_shape[i + 1]; - final_offset += d_col_iter[i]; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; } + final_offset += channel_im * coeff_prod; val += data_col[final_offset]; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { - const int d_max = d_col_end[i]; - if (d_col_iter[i] == d_max - 1) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { d_col_iter[i] = d_col_start[i]; - } else { // d_col_iter[i] < d_max - 1 - ++d_col_iter[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; incremented = true; break; // for (int i = num_axes - 1; i >= 0; --i) } diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 3425f5f548c..58748a2fc8f 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -195,7 +195,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, if (type == "conv") { layer_param->mutable_convolution_param()->add_pad(v0_layer_param.pad()); } else if (type == "pool") { - layer_param->mutable_pooling_param()->set_pad(v0_layer_param.pad()); + layer_param->mutable_pooling_param()->add_pad(v0_layer_param.pad()); } else { LOG(ERROR)<< "Unknown parameter pad for layer type " << type; is_fully_compatible = false; @@ -206,7 +206,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_convolution_param()->add_kernel_size( v0_layer_param.kernelsize()); } else if (type == "pool") { - layer_param->mutable_pooling_param()->set_kernel_size( + layer_param->mutable_pooling_param()->add_kernel_size( v0_layer_param.kernelsize()); } else { LOG(ERROR)<< "Unknown parameter kernelsize for layer type " << type; @@ -227,7 +227,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, layer_param->mutable_convolution_param()->add_stride( v0_layer_param.stride()); } else if (type == "pool") { - layer_param->mutable_pooling_param()->set_stride( + layer_param->mutable_pooling_param()->add_stride( v0_layer_param.stride()); } else { LOG(ERROR)<< "Unknown parameter stride for layer type " << type; From b7921dd977eb487fc8e2d94198dcaf7a72549125 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 11 Jul 2015 19:58:57 -0400 Subject: [PATCH 142/254] Update for CPU mode when compiling with Greentea or CUDA. --- src/caffe/common.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 70c94d0032f..e751370395a 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -172,17 +172,19 @@ void Caffe::set_random_seed(const unsigned int seed) { } void Caffe::Synchronize(int device_id) { - DeviceContext * device_context = Caffe::GetDeviceContext(device_id); - if (device_context->backend() == BACKEND_CUDA) { + if(Caffe::mode() == Brew::GPU) { + DeviceContext * device_context = Caffe::GetDeviceContext(device_id); + if (device_context->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - cudaDeviceSynchronize(); + cudaDeviceSynchronize(); #endif - } else { + } else { #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - GetDeviceContext(device_id)->id()); - ctx.get_queue().finish(); + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + GetDeviceContext(device_id)->id()); + ctx.get_queue().finish(); #endif + } } } From 7de2f44c537e06ec3b8f371b32798396ee645136 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 11 Jul 2015 19:59:44 -0400 Subject: [PATCH 143/254] LINT fix. --- src/caffe/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index e751370395a..c487257b4cf 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -172,7 +172,7 @@ void Caffe::set_random_seed(const unsigned int seed) { } void Caffe::Synchronize(int device_id) { - if(Caffe::mode() == Brew::GPU) { + if (Caffe::mode() == Brew::GPU) { DeviceContext * device_context = Caffe::GetDeviceContext(device_id); if (device_context->backend() == BACKEND_CUDA) { #ifdef USE_CUDA From d68582302f6c7d8d36614b4d4785f77646d893f6 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 15 Jul 2015 02:29:42 +0200 Subject: [PATCH 144/254] Revised ND-SK kernels for convolution and pooling. --- error.txt | 26 ++ include/caffe/vision_layers.hpp | 19 +- src/caffe/greentea/cl_kernels.cpp | 4 + src/caffe/greentea/cl_kernels/pooling_nd.cl | 177 ++++++++++++ src/caffe/layers/pooling_nd_layer.cpp | 158 +++++++---- src/caffe/layers/pooling_nd_layer.cu | 281 ++++++++++++-------- src/caffe/layers/pooling_sk_layer.cu | 2 +- src/caffe/proto/caffe.proto | 1 + src/caffe/test/test_pooling_nd_layer.cpp | 198 ++++++++++++++ src/caffe/util/im2col.cu | 4 +- 10 files changed, 697 insertions(+), 173 deletions(-) create mode 100644 error.txt create mode 100644 src/caffe/greentea/cl_kernels/pooling_nd.cl create mode 100644 src/caffe/test/test_pooling_nd_layer.cpp diff --git a/error.txt b/error.txt new file mode 100644 index 00000000000..97db1dd3620 --- /dev/null +++ b/error.txt @@ -0,0 +1,26 @@ +Setting to use device 1 +Note: Google Test filter = *ND* +[==========] Running 12 tests from 4 test cases. +[----------] Global test environment set-up. +[----------] 3 tests from ConvolutionNDLayerTest/0, where TypeParam = float +[ RUN ] ConvolutionNDLayerTest/0.TestSetup +[ OK ] ConvolutionNDLayerTest/0.TestSetup (0 ms) +[ RUN ] ConvolutionNDLayerTest/0.TestForward +[ OK ] ConvolutionNDLayerTest/0.TestForward (2 ms) +[ RUN ] ConvolutionNDLayerTest/0.TestBackward +[ OK ] ConvolutionNDLayerTest/0.TestBackward (2 ms) +[----------] 3 tests from ConvolutionNDLayerTest/0 (4 ms total) + +[----------] 3 tests from ConvolutionNDLayerTest/1, where TypeParam = double +[ RUN ] ConvolutionNDLayerTest/1.TestSetup +[ OK ] ConvolutionNDLayerTest/1.TestSetup (0 ms) +[ RUN ] ConvolutionNDLayerTest/1.TestForward +[ OK ] ConvolutionNDLayerTest/1.TestForward (1 ms) +[ RUN ] ConvolutionNDLayerTest/1.TestBackward +[ OK ] ConvolutionNDLayerTest/1.TestBackward (2 ms) +[----------] 3 tests from ConvolutionNDLayerTest/1 (3 ms total) + +[----------] 3 tests from PoolingNDLayerTest/0, where TypeParam = float +[ RUN ] PoolingNDLayerTest/0.TestSetup +[ OK ] PoolingNDLayerTest/0.TestSetup (0 ms) +[ RUN ] PoolingNDLayerTest/0.TestForward diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 5b665e8fb94..217ab0f6608 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -920,14 +920,19 @@ class PoolingNDLayer : public Layer { == PoolingParameter_PoolMethod_MAX) ? 2 : 1; } - int max_top_blobs_; - int pad_h_, pad_w_; + Blob kernel_shape_; + Blob ext_kernel_shape_; + Blob stride_; + Blob pad_; + Blob kstride_; + Blob size_; + Blob pooled_size_; + + int channel_axis_; + int num_spatial_axes_; int channels_; - int height_, width_; - int pooled_height_, pooled_width_; - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int kstride_h_, kstride_w_; + + int max_top_blobs_; Blob max_idx_; }; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 53112ac6a94..34813dcd0c5 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -23,6 +23,7 @@ std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#en std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT @@ -42,6 +43,7 @@ std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#e std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT @@ -65,6 +67,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << math_float << "\n\n"; // NOLINT ss << mergecrop_float << "\n\n"; // NOLINT ss << pooling_float << "\n\n"; // NOLINT + ss << pooling_nd_float << "\n\n"; // NOLINT ss << pooling_sk_float << "\n\n"; // NOLINT ss << slice_float << "\n\n"; // NOLINT ss << softmax_loss_float << "\n\n"; // NOLINT @@ -87,6 +90,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << math_double << "\n\n"; // NOLINT ss << mergecrop_double << "\n\n"; // NOLINT ss << pooling_double << "\n\n"; // NOLINT + ss << pooling_nd_double << "\n\n"; // NOLINT ss << pooling_sk_double << "\n\n"; // NOLINT ss << slice_double << "\n\n"; // NOLINT ss << softmax_loss_double << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/pooling_nd.cl b/src/caffe/greentea/cl_kernels/pooling_nd.cl new file mode 100644 index 00000000000..58c14403fac --- /dev/null +++ b/src/caffe/greentea/cl_kernels/pooling_nd.cl @@ -0,0 +1,177 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* bottom_data, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* top_data, + const int use_mask, + __global int* mask, __global Dtype* top_mask) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (use_mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* bottom_diff) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (use_mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} diff --git a/src/caffe/layers/pooling_nd_layer.cpp b/src/caffe/layers/pooling_nd_layer.cpp index 378297ac0fe..38f50a46554 100644 --- a/src/caffe/layers/pooling_nd_layer.cpp +++ b/src/caffe/layers/pooling_nd_layer.cpp @@ -26,81 +26,133 @@ void PoolingNDLayer::LayerSetUp(const vector*>& bottom, max_top_blobs_ = 1; } PoolingParameter pool_param = this->layer_param_.pooling_param(); + channel_axis_ = bottom[0]->CanonicalAxisIndex(pool_param.axis()); + channels_ = bottom[0]->shape(channel_axis_); + + const int first_spatial_axis = channel_axis_ + 1; + const int num_axes = bottom[0]->num_axes(); + num_spatial_axes_ = num_axes - first_spatial_axis; + CHECK_GE(num_spatial_axes_, 1); + vector size_shape(1, num_spatial_axes_); + + kernel_shape_.Reshape(size_shape); + int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + CHECK(!(pool_param.kernel_size_size() > 0) != !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK((pool_param.kernel_size_size() > 0) || (pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; - CHECK((!(pool_param.pad_size() > 0) && pool_param.has_pad_h() - && pool_param.has_pad_w()) - || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) - << "pad is pad OR pad_h and pad_w are required."; - CHECK((!(pool_param.stride_size() > 0) && pool_param.has_stride_h() - && pool_param.has_stride_w()) - || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) - << "Stride is stride OR stride_h and stride_w are required."; - if (pool_param.kernel_size_size() > 0) { - kernel_h_ = kernel_w_ = pool_param.kernel_size(0); + + if (pool_param.has_kernel_h() && pool_param.has_kernel_w()) { + kernel_shape_data[0] = pool_param.kernel_h(); + kernel_shape_data[1] = pool_param.kernel_w(); } else { - kernel_h_ = pool_param.kernel_h(); - kernel_w_ = pool_param.kernel_w(); + const int num_kernel_dims = pool_param.kernel_size_size(); + CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_); + for (int i = 0; i < num_spatial_axes_; ++i) { + kernel_shape_data[i] = + pool_param.kernel_size((num_kernel_dims == 1) ? 0 : i); + CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; + } } - CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; - if (!pool_param.has_pad_h()) { - pad_h_ = pad_w_ = pool_param.pad_size() > 0 ? - pool_param.pad(0) : 0; + + // Setup stride dimensions (stride_). + stride_.Reshape(size_shape); + int* stride_data = stride_.mutable_cpu_data(); + if (pool_param.has_stride_h() || pool_param.has_stride_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "stride_h & stride_w can only be used for 2D convolution."; + CHECK_EQ(0, pool_param.stride_size()) + << "Either stride or stride_h/w should be specified; not both."; + stride_data[0] = pool_param.stride_h(); + stride_data[1] = pool_param.stride_w(); } else { - pad_h_ = pool_param.pad_h(); - pad_w_ = pool_param.pad_w(); + const int num_stride_dims = pool_param.stride_size(); + CHECK(num_stride_dims == 0 || num_stride_dims == 1 || + num_stride_dims == num_spatial_axes_) + << "stride must be specified once, or once per spatial dimension " + << "(stride specified " << num_stride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultStride = 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : + pool_param.stride((num_stride_dims == 1) ? 0 : i); + CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; + } } - CHECK_EQ(pad_h_, 0); - CHECK_EQ(pad_w_, 0); - if (!pool_param.has_stride_h()) { - stride_h_ = stride_w_ = pool_param.stride_size() > 0 ? - pool_param.stride(0) : 1; + // Setup pad dimensions (pad_). + pad_.Reshape(size_shape); + int* pad_data = pad_.mutable_cpu_data(); + if (pool_param.has_pad_h() || pool_param.has_pad_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "pad_h & pad_w can only be used for 2D convolution."; + CHECK_EQ(0, pool_param.pad_size()) + << "Either pad or pad_h/w should be specified; not both."; + pad_data[0] = pool_param.pad_h(); + pad_data[1] = pool_param.pad_w(); } else { - stride_h_ = pool_param.stride_h(); - stride_w_ = pool_param.stride_w(); - } - if (pad_h_ != 0 || pad_w_ != 0) { - CHECK(this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_AVE - || this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) - << "Padding implemented only for average and max pooling."; - CHECK_LT(pad_h_, kernel_h_); - CHECK_LT(pad_w_, kernel_w_); + const int num_pad_dims = pool_param.pad_size(); + CHECK(num_pad_dims == 0 || num_pad_dims == 1 || + num_pad_dims == num_spatial_axes_) + << "pad must be specified once, or once per spatial dimension " + << "(pad specified " << num_pad_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultPad = 0; + for (int i = 0; i < num_spatial_axes_; ++i) { + pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : + pool_param.pad((num_pad_dims == 1) ? 0 : i); + } } - if (!pool_param.has_kstride_h()) { - kstride_h_ = kstride_w_ = pool_param.kstride_size() > 0 ? - pool_param.kstride(0) : 1; + // Setup kernel stride dimensions + kstride_.Reshape(size_shape); + int* kstride_data = kstride_.mutable_cpu_data(); + if (pool_param.has_kstride_h() || pool_param.has_kstride_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "kstride_h & kstride_w can only be used for 2D convolution."; + CHECK_EQ(0, pool_param.kstride_size()) + << "Etiher kstride or kstirde_h/w should be specified; not both."; + kstride_data[0] = pool_param.pad_h(); + kstride_data[1] = pool_param.pad_w(); } else { - kstride_h_ = pool_param.kstride_h(); - kstride_w_ = pool_param.kstride_w(); + const int num_kstride_dims = pool_param.kstride_size(); + CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || + num_kstride_dims == num_spatial_axes_) + << "kstride must be specified once, or once per spatial dimension " + << "(kstride specified " << num_kstride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultKstride = 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : + pool_param.kstride((num_kstride_dims == 1) ? 0 : i); + } } - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; + size_.Reshape(size_shape); + pooled_size_.Reshape(size_shape); + ext_kernel_shape_.Reshape(size_shape); + int* size_data = size_.mutable_cpu_data(); + int* pooled_size_data = pooled_size_.mutable_cpu_data(); + int* ext_kernel_shape_data = ext_kernel_shape_.mutable_cpu_data(); - channels_ = bottom[0]->channels(); - height_ = bottom[0]->height(); - width_ = bottom[0]->width(); - pooled_height_ = static_cast(ceil( - static_cast(height_ + 2 * pad_h_ - ext_kernel_h) / stride_h_)) + 1; - pooled_width_ = static_cast(ceil( - static_cast(width_ + 2 * pad_w_ - ext_kernel_w) / stride_w_)) + 1; - - top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); + vector top_shape = bottom[0]->shape(); + for (int i = 0; i < num_spatial_axes_; ++i) { + size_data[i] = bottom[0]->shape(channel_axis_ + 1 + i); + ext_kernel_shape_data[i] = (kernel_shape_data[i] - 1) * kstride_data[i] + 1; + pooled_size_data[i] = static_cast(ceil( + static_cast(size_data[i] + 2 * pad_data[i] + - ext_kernel_shape_data[i]) / stride_data[i])) + 1; + top_shape[channel_axis_ + 1 + i] = pooled_size_data[i]; + } + top[0]->Reshape(top_shape); if (top.size() > 1) { top[1]->ReshapeLike(*top[0]); } // If max pooling, we will initialize the vector index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX && top.size() == 1) { - max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + max_idx_.Reshape(top_shape); } } diff --git a/src/caffe/layers/pooling_nd_layer.cu b/src/caffe/layers/pooling_nd_layer.cu index 9907f7f3e64..96b9144efcf 100644 --- a/src/caffe/layers/pooling_nd_layer.cu +++ b/src/caffe/layers/pooling_nd_layer.cu @@ -15,38 +15,75 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void MaxPoolNDForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, - Dtype* top_data, int* mask, Dtype* top_mask) { - CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height); - int wend = min(wstart + ext_kernel_w, width); - hstart = max(hstart, 0); - wstart = max(wstart, 0); +__global__ void MaxPoolNDForward(const int n, const int num_axes, + const Dtype* bottom_data, + const int channels, const int* size, + const int* pooled_size, const int* kernel_size, + const int* ext_kernel_size, const int* stride, + const int* kstride, const int* pad, + Dtype* top_data, int* mask, Dtype* top_mask) { + int d_idx[6]; // NOLINT(runtime/arrays) + int d_start[6]; // NOLINT(runtime/arrays) + int d_end[6]; // NOLINT(runtime/arrays) + int d_iter[6]; // NOLINT(runtime/arrays) + int i; + + CUDA_KERNEL_LOOP(index, n) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + Dtype maxval = -FLT_MAX; int maxidx = -1; - bottom_data += (n * channels + c) * height * width; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { - if (bottom_data[h * width + w] > maxval) { - maxidx = h * width + w; - maxval = bottom_data[maxidx]; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; } } - } + } while (incremented); + top_data[index] = maxval; if (mask) { mask[index] = maxidx; @@ -68,9 +105,6 @@ void PoolingNDLayer::Forward_gpu(const vector*>& bottom, int* mask = NULL; Dtype* top_mask = NULL; - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA switch (this->layer_param_.pooling_param().pool()) { @@ -82,13 +116,12 @@ void PoolingNDLayer::Forward_gpu(const vector*>& bottom, } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolNDForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, top_data, - mask, top_mask); + CAFFE_CUDA_NUM_THREADS)( + count, num_spatial_axes_, bottom_data, + channels_, size_.gpu_data(), pooled_size_.gpu_data(), + kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), + top_data, mask, top_mask); break; default: { LOG(FATAL)<< "Unknown pooling method."; @@ -114,17 +147,20 @@ void PoolingNDLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( CL_KERNEL_SELECT("max_pool_forward_nd")); viennacl::ocl::enqueue( - oclk_max_pool_forward(count, - WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - WrapHandle((cl_mem) top_data, &ctx), + oclk_max_pool_forward(count, num_spatial_axes_, + WrapHandle((cl_mem)bottom_data, &ctx), + channels_, + WrapHandle((cl_mem)(size_.gpu_data()), &ctx), + WrapHandle((cl_mem)(pooled_size_.gpu_data()), &ctx), + WrapHandle((cl_mem)(kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem)(ext_kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem)(stride_.gpu_data()), &ctx), + WrapHandle((cl_mem)(kstride_.gpu_data()), &ctx), + WrapHandle((cl_mem)(pad_.gpu_data()), &ctx), + WrapHandle((cl_mem)top_data, &ctx), mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, &ctx), - WrapHandle((cl_mem) top_mask, &ctx)), + WrapHandle((cl_mem)mask, &ctx), + WrapHandle((cl_mem)top_mask, &ctx)), ctx.get_queue()); } break; @@ -138,58 +174,84 @@ void PoolingNDLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void MaxPoolNDBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, - Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { +__global__ void MaxPoolNDBackward(const int n, const int num_axes, + const Dtype* top_diff, const int* mask, + const Dtype* top_mask, + const int channels, const int* size, + const int* pooled_size, + const int* kernel_size, + const int* ext_kernel_size, const int* stride, + const int* kstride, const int* pad, + Dtype* bottom_diff) { + int d_idx[6]; // NOLINT(runtime/arrays) + int d_start[6]; // NOLINT(runtime/arrays) + int d_end[6]; // NOLINT(runtime/arrays) + int d_iter[6]; // NOLINT(runtime/arrays) + int i; + + CUDA_KERNEL_LOOP(index, n) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; - int pooled_height_1 = pooled_height - 1; - int pooled_width_1 = pooled_width - 1; - int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; - int phend = - (h >= pooled_height) ? - pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; - int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; - int pwend = - (w >= pooled_width) ? - pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); Dtype gradient = 0; - int offset = (n * channels + c) * pooled_height * pooled_width; - top_diff += offset; - if (mask) { - mask += offset; - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { - if (mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; - } + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; } } - } else { - mask += offset; - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { - if (top_mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; - } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; } } - } + } while (incremented); bottom_diff[index] = gradient; } } @@ -207,9 +269,6 @@ void PoolingNDLayer::Backward_gpu(const vector*>& top, const int* mask = NULL; const Dtype* top_mask = NULL; - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(count, Dtype(0.), bottom_diff); @@ -222,12 +281,11 @@ void PoolingNDLayer::Backward_gpu(const vector*>& top, } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolNDBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, top_diff, mask, top_mask, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, + CAFFE_CUDA_NUM_THREADS)( + count, num_spatial_axes_, top_diff, mask, top_mask, + channels_, size_.gpu_data(), pooled_size_.gpu_data(), + kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), bottom_diff); break; default: @@ -256,16 +314,19 @@ void PoolingNDLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( CL_KERNEL_SELECT("max_pool_backward_nd")); viennacl::ocl::enqueue( - oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, &ctx), - WrapHandle((cl_mem) top_mask, &ctx), - top[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - WrapHandle((cl_mem) bottom_diff, &ctx)), + oclk_max_pool_backward( + count, num_spatial_axes_, WrapHandle((cl_mem) top_diff, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx), channels_, + WrapHandle((cl_mem) (size_.gpu_data()), &ctx), + WrapHandle((cl_mem) (pooled_size_.gpu_data()), &ctx), + WrapHandle((cl_mem) (kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem) (ext_kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem) (stride_.gpu_data()), &ctx), + WrapHandle((cl_mem) (kstride_.gpu_data()), &ctx), + WrapHandle((cl_mem) (pad_.gpu_data()), &ctx), + WrapHandle((cl_mem) bottom_diff, &ctx)), ctx.get_queue()); } break; diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu index 0762536272f..60e5e224ef6 100644 --- a/src/caffe/layers/pooling_sk_layer.cu +++ b/src/caffe/layers/pooling_sk_layer.cu @@ -377,7 +377,7 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } } } else { - mask += offset; + top_mask += offset; for (int ph = phstart; ph <= phend; ph += kstride_h) { for (int pw = pwstart; pw <= pwend; pw += kstride_w) { if (top_mask[ph * pooled_width + pw] == h * width + w) { diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 332fae807e1..e46f62bc32b 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -716,6 +716,7 @@ message PoolingParameter { repeated uint32 kstride = 13; // The kernel stride, default 1 optional uint32 kstride_h = 14; optional uint32 kstride_w = 15; + optional int32 axis = 16 [default = 1]; } message PowerParameter { diff --git a/src/caffe/test/test_pooling_nd_layer.cpp b/src/caffe/test/test_pooling_nd_layer.cpp new file mode 100644 index 00000000000..7598f84e729 --- /dev/null +++ b/src/caffe/test/test_pooling_nd_layer.cpp @@ -0,0 +1,198 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +#ifndef CPU_ONLY // CPU-GPU test + +namespace caffe { + +template +class PoolingNDLayerTest : public GPUDeviceTest { + protected: + PoolingNDLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) { + } + + virtual void SetUp() { + BlobShape shape; + shape.add_dim(1); // Batch + shape.add_dim(1); // Channels + shape.add_dim(5); // Depth + shape.add_dim(5); // Height + shape.add_dim(5); // Width + blob_bottom_->Reshape(shape); + + shape.add_dim(1); // Batch + shape.add_dim(1); // Channels + shape.add_dim(1); // Depth + shape.add_dim(1); // Height + shape.add_dim(1); // Width + blob_top_->Reshape(shape); + + // fill the values + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~PoolingNDLayerTest() { + delete blob_bottom_; + delete blob_top_; + } + + void TestForward() { + LayerParameter layer_param; + PoolingParameter* pooling_param = + layer_param.mutable_pooling_param(); + + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + + pooling_param->set_axis(1); + + PoolingNDLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + int d = blob_bottom_->shape(2); + int h = blob_bottom_->shape(3); + int w = blob_bottom_->shape(4); + + TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); + + TypeParam maxval = 0; + + for (int cd = 0; cd < d; ++cd) { + for (int ch = 0; ch < h; ++ch) { + for (int cw = 0; cw < w; ++cw) { + bottom_data[cw + ch * w + cd * w * h] = + cw + ch * w + cd * w * h; + if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { + maxval = std::max((TypeParam)(cw + ch * w + cd * w * h), maxval); + } + } + } + } + + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + const TypeParam *top_data = blob_top_->cpu_data(); + + EXPECT_EQ(maxval, top_data[0]); + } + + void TestBackward() { + LayerParameter layer_param; + PoolingParameter* pooling_param = + layer_param.mutable_pooling_param(); + + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + + pooling_param->set_axis(1); + + PoolingNDLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + int d = blob_bottom_->shape(2); + int h = blob_bottom_->shape(3); + int w = blob_bottom_->shape(4); + + TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); + + TypeParam maxval = 0; + + for (int cd = 0; cd < d; ++cd) { + for (int ch = 0; ch < h; ++ch) { + for (int cw = 0; cw < w; ++cw) { + bottom_data[cw + ch * w + cd * w * h] = + cw + ch * w + cd * w * h; + if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { + maxval = std::max((TypeParam)(cw + ch * w + cd * w * h), maxval); + } + } + } + } + + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam *top_diff = blob_top_->mutable_cpu_diff(); + top_diff[0] = maxval; + + std::vector prop_down; + prop_down.push_back(true); + + layer.Backward(this->blob_top_vec_, prop_down, this->blob_bottom_vec_); + + const TypeParam *bottom_diff = blob_bottom_->cpu_diff(); + + for (int cd = 0; cd < d; ++cd) { + for (int ch = 0; ch < h; ++ch) { + for (int cw = 0; cw < w; ++cw) { + if (maxval == cw + ch * w + cd * w * h) { + EXPECT_EQ(maxval, bottom_diff[cw + ch * w + cd * w * h]); + } + } + } + } + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(PoolingNDLayerTest, TestDtypes); + +TYPED_TEST(PoolingNDLayerTest, TestSetup) { + LayerParameter layer_param; + PoolingParameter* Pooling_param = + layer_param.mutable_pooling_param(); + + Pooling_param->add_kernel_size(3); + Pooling_param->add_kernel_size(3); + Pooling_param->add_kernel_size(3); + + Pooling_param->add_kstride(2); + Pooling_param->add_kstride(2); + Pooling_param->add_kstride(2); + + + PoolingNDLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(1, this->blob_top_->shape(2)); + EXPECT_EQ(1, this->blob_top_->shape(3)); + EXPECT_EQ(1, this->blob_top_->shape(4)); +} + +TYPED_TEST(PoolingNDLayerTest, TestForward) { + this->TestForward(); +} + +TYPED_TEST(PoolingNDLayerTest, TestBackward) { + this->TestBackward(); +} + +} // namespace caffe +#endif // !CPU_ONLY diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index aa95ce9a575..40c46e099e8 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -375,9 +375,9 @@ __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, // Old: const int d_max = kernel_shape[i]; // New (strided, limit is the external kernel size): const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; - if (d_iter[i] == d_max - 1) { + if (d_iter[i] > d_max - kstride[i]) { d_iter[i] = 0; - } else { // d_iter[i] < d_max - 1 + } else { // d_iter[i] <= d_max - kstride[i] // Old: ++d_iter[i]; // New (strided, increment by the stride each time): d_iter[i] += kstride[i]; From ad781eea63500327f1b61fb3381608345cdb0892 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 15 Jul 2015 02:43:57 +0200 Subject: [PATCH 145/254] Removed logfile. --- error.txt | 26 -------------------------- 1 file changed, 26 deletions(-) delete mode 100644 error.txt diff --git a/error.txt b/error.txt deleted file mode 100644 index 97db1dd3620..00000000000 --- a/error.txt +++ /dev/null @@ -1,26 +0,0 @@ -Setting to use device 1 -Note: Google Test filter = *ND* -[==========] Running 12 tests from 4 test cases. -[----------] Global test environment set-up. -[----------] 3 tests from ConvolutionNDLayerTest/0, where TypeParam = float -[ RUN ] ConvolutionNDLayerTest/0.TestSetup -[ OK ] ConvolutionNDLayerTest/0.TestSetup (0 ms) -[ RUN ] ConvolutionNDLayerTest/0.TestForward -[ OK ] ConvolutionNDLayerTest/0.TestForward (2 ms) -[ RUN ] ConvolutionNDLayerTest/0.TestBackward -[ OK ] ConvolutionNDLayerTest/0.TestBackward (2 ms) -[----------] 3 tests from ConvolutionNDLayerTest/0 (4 ms total) - -[----------] 3 tests from ConvolutionNDLayerTest/1, where TypeParam = double -[ RUN ] ConvolutionNDLayerTest/1.TestSetup -[ OK ] ConvolutionNDLayerTest/1.TestSetup (0 ms) -[ RUN ] ConvolutionNDLayerTest/1.TestForward -[ OK ] ConvolutionNDLayerTest/1.TestForward (1 ms) -[ RUN ] ConvolutionNDLayerTest/1.TestBackward -[ OK ] ConvolutionNDLayerTest/1.TestBackward (2 ms) -[----------] 3 tests from ConvolutionNDLayerTest/1 (3 ms total) - -[----------] 3 tests from PoolingNDLayerTest/0, where TypeParam = float -[ RUN ] PoolingNDLayerTest/0.TestSetup -[ OK ] PoolingNDLayerTest/0.TestSetup (0 ms) -[ RUN ] PoolingNDLayerTest/0.TestForward From d99153d051954e7ac394cc2fc5ddee7117322165 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 18 Jul 2015 04:19:02 +0200 Subject: [PATCH 146/254] Fixed draw_net.py for ND, SK and new repeated fields in those layers. --- .gitignore | 1 + python/caffe/draw.py | 30 +++++++++++++++++------------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index 280e1455a4f..887277debc5 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,7 @@ models/* *lmdb # build, distribute, and bins (+ python proto bindings) +cmake_build build .build_debug/* .build_release/* diff --git a/python/caffe/draw.py b/python/caffe/draw.py index 324929deca4..6959c268a09 100644 --- a/python/caffe/draw.py +++ b/python/caffe/draw.py @@ -40,7 +40,7 @@ def get_edge_label(layer): if layer.type == 'Data': edge_label = 'Batch ' + str(layer.data_param.batch_size) - elif layer.type == 'Convolution': + elif layer.type == 'Convolution' or layer.type == 'ConvolutionND' or layer.type == 'ConvolutionSK': edge_label = str(layer.convolution_param.num_output) elif layer.type == 'InnerProduct': edge_label = str(layer.inner_product_param.num_output) @@ -74,32 +74,36 @@ def get_layer_label(layer, rankdir): # horizontal space is not; separate words with newlines separator = '\\n' - if layer.type == 'Convolution': + if layer.type == 'Convolution' or layer.type == 'ConvolutionND' or layer.type == 'ConvolutionSK': # Outer double quotes needed or else colon characters don't parse # properly - node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\ + node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d%skstride: %d"' %\ (layer.name, separator, layer.type, separator, - layer.convolution_param.kernel_size, + layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size) > 0 else 1, separator, - layer.convolution_param.stride, + layer.convolution_param.stride[0] if len(layer.convolution_param.stride) > 0 else 1, separator, - layer.convolution_param.pad) - elif layer.type == 'Pooling': + layer.convolution_param.pad[0] if len(layer.convolution_param.pad) > 0 else 0, + separator, + layer.convolution_param.kstride[0] if len(layer.convolution_param.kstride) > 0 else 1) + elif layer.type == 'Pooling' or layer.type == 'PoolingND' or layer.type == 'PoolingSK': pooling_types_dict = get_pooling_types_dict() - node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\ + node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d%skstride: %d"' %\ (layer.name, separator, pooling_types_dict[layer.pooling_param.pool], layer.type, separator, - layer.pooling_param.kernel_size, + layer.pooling_param.kernel_size[0] if len(layer.pooling_param.kernel_size) > 0 else 1, + separator, + layer.pooling_param.stride[0] if len(layer.pooling_param.stride) > 0 else 1, separator, - layer.pooling_param.stride, + layer.pooling_param.pad[0] if len(layer.pooling_param.pad) > 0 else 0, separator, - layer.pooling_param.pad) + layer.pooling_param.kstride[0] if len(layer.pooling_param.kstride) > 0 else 1) else: node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type) return node_label @@ -109,9 +113,9 @@ def choose_color_by_layertype(layertype): """Define colors for nodes based on the layer type. """ color = '#6495ED' # Default - if layertype == 'Convolution': + if layertype == 'Convolution' or layertype == 'ConvolutionND' or layertype == 'ConvolutionSK': color = '#FF5050' - elif layertype == 'Pooling': + elif layertype == 'Pooling' or layertype == 'PoolingND' or layertype == 'PoolingSK': color = '#FF9900' elif layertype == 'InnerProduct': color = '#CC33FF' From f5ab436d235d24e1e492647e3c6189f6f5c1ae24 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 18 Jul 2015 17:09:54 +0200 Subject: [PATCH 147/254] Updated MergeCrop layer parameters. --- include/caffe/vision_layers.hpp | 4 ++ protoc_generator.sh | 2 +- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/mergecrop.cl | 26 ++++++++---- src/caffe/layers/mergecrop_layer.cpp | 32 ++++++++++++++- src/caffe/layers/mergecrop_layer.cu | 48 ++++++++++++++++------ src/caffe/proto/caffe.proto | 7 ++++ 7 files changed, 99 insertions(+), 24 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 217ab0f6608..43eade85b91 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -128,6 +128,10 @@ class MergeCropLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + + private: + Blob forward; + Blob backward; }; /** diff --git a/protoc_generator.sh b/protoc_generator.sh index 1c94ccb5530..f19a4b7bb98 100644 --- a/protoc_generator.sh +++ b/protoc_generator.sh @@ -1,3 +1,3 @@ protoc src/caffe/proto/caffe.proto --cpp_out=. -mkdir include/caffe/proto +mkdir -p include/caffe/proto mv src/caffe/proto/caffe.pb.h include/caffe/proto diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 34813dcd0c5..e1e46692c24 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -21,7 +21,7 @@ std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\ std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT +std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a, const int forward_a,\n __global const Dtype* bottom_b, const int forward_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = forward_a == 1 ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b)\n + width_b * (h + pad_h) + pad_w + w;\n top[index] = forward_b == 1 ? bottom_b[bidx] : 0;\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n int backward_a,\n __global Dtype* bottom_b,\n int backward_b,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = backward_a == 1 ? top[index] : 0;\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b)\n + width_b * (h + pad_h) + pad_w + w;\n bottom_b[bidx] = backward_b == 1 ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT @@ -41,7 +41,7 @@ std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a,\n __global const Dtype* bottom_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = bottom_a[aidx];\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx =\n (((batch_id) * channels_b + channel_id) * height_b\n * width_b) + width_b * (h + pad_h) + pad_w + w;\n top[index] = bottom_b[bidx];\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = top[index];\n }\n }\n}"; // NOLINT +std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(\n const int nthreads, __global const Dtype* bottom_a, const int forward_a,\n __global const Dtype* bottom_b, const int forward_b,\n __global Dtype* top,\n int num, int channels_a, int channels_b, int height_a, int width_a,\n int height_b, int width_b) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n top[index] = forward_a == 1 ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b)\n + width_b * (h + pad_h) + pad_w + w;\n top[index] = forward_b == 1 ? bottom_b[bidx] : 0;\n }\n }\n\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n__global Dtype* bottom_a,\n int backward_a,\n __global Dtype* bottom_b,\n int backward_b,\n __global const Dtype* top,\n int num, int channels_a,\n int channels_b, int height_a,\n int width_a, int height_b,\n int width_b) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * height_a * width_a);\n\n int pad_h = (height_b - height_a) / 2;\n int pad_w = (width_b - width_a) / 2;\n\n int bottom_id = ((index\n - batch_id * (channels_a + channels_b) * height_a * width_a)\n / (channels_a * height_a * width_a)) % 2;\n\n int h = ((index / width_a) % height_a);\n int w = (index % width_a);\n\n if (bottom_id == 0) {\n int channel_id = (index / ((width_a * height_a)) % channels_a);\n int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h)\n * width_a + w);\n bottom_a[aidx] = backward_a == 1 ? top[index] : 0;\n } else {\n int channel_id = (index / ((width_a * height_a)) % channels_b);\n int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b)\n + width_b * (h + pad_h) + pad_w + w;\n bottom_b[bidx] = backward_b == 1 ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/mergecrop.cl b/src/caffe/greentea/cl_kernels/mergecrop.cl index cbd48c624ed..001f7bce368 100644 --- a/src/caffe/greentea/cl_kernels/mergecrop.cl +++ b/src/caffe/greentea/cl_kernels/mergecrop.cl @@ -3,8 +3,8 @@ #endif __kernel void TEMPLATE(merge_copy_forward, Dtype)( - const int nthreads, __global const Dtype* bottom_a, - __global const Dtype* bottom_b, + const int nthreads, __global const Dtype* bottom_a, const int forward_a, + __global const Dtype* bottom_b, const int forward_b, __global Dtype* top, int num, int channels_a, int channels_b, int height_a, int width_a, int height_b, int width_b) { @@ -28,13 +28,12 @@ __kernel void TEMPLATE(merge_copy_forward, Dtype)( int channel_id = (index / ((width_a * height_a)) % channels_a); int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) * width_a + w); - top[index] = bottom_a[aidx]; + top[index] = forward_a == 1 ? bottom_a[aidx] : 0; } else { int channel_id = (index / ((width_a * height_a)) % channels_b); - int bidx = - (((batch_id) * channels_b + channel_id) * height_b - * width_b) + width_b * (h + pad_h) + pad_w + w; - top[index] = bottom_b[bidx]; + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + top[index] = forward_b == 1 ? bottom_b[bidx] : 0; } } @@ -42,6 +41,9 @@ __kernel void TEMPLATE(merge_copy_forward, Dtype)( __kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, __global Dtype* bottom_a, + int backward_a, + __global Dtype* bottom_b, + int backward_b, __global const Dtype* top, int num, int channels_a, int channels_b, int height_a, @@ -51,6 +53,9 @@ __global Dtype* bottom_a, index += get_global_size(0)) { int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + int bottom_id = ((index - batch_id * (channels_a + channels_b) * height_a * width_a) / (channels_a * height_a * width_a)) % 2; @@ -62,7 +67,12 @@ __global Dtype* bottom_a, int channel_id = (index / ((width_a * height_a)) % channels_a); int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) * width_a + w); - bottom_a[aidx] = top[index]; + bottom_a[aidx] = backward_a == 1 ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b == 1 ? top[index] : 0; } } } diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index 9af459dd5bd..ee6fa7ac50c 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -9,7 +9,37 @@ namespace caffe { template void MergeCropLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - // Nothing to do here, other than the reshaping + std::vector forward_shape(1); + forward_shape[0] = 2; + + std::vector backward_shape(1); + backward_shape[0] = 2; + + forward.Reshape(forward_shape); + backward.Reshape(backward_shape); + + int* forward_data = forward.mutable_cpu_data(); + int* backward_data = backward.mutable_cpu_data(); + + // By default, forward both a and b + forward_data[0] = 1; + forward_data[1] = 1; + + // By default, backward a and do not backward b + backward_data[0] = 1; + backward_data[1] = 0; + + + if (this->layer_param_.has_mergecrop_param()) { + MergeCropParameter mergecrop_param = this->layer_param_.mergecrop_param(); + for (int i = 0; i < mergecrop_param.forward_size(); ++i) { + forward_data[i] = mergecrop_param.forward(i); + } + for (int i = 0; i < mergecrop_param.backward_size(); ++i) { + backward_data[i] = mergecrop_param.backward(i); + } + } + Reshape(bottom, top); } diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index f55680d8de7..2d499788425 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -14,9 +14,10 @@ namespace caffe { #ifdef USE_CUDA template __global__ void CopyForward(const int nthreads, const Dtype* bottom_a, - const Dtype* bottom_b, Dtype* top, int num, - int channels_a, int channels_b, int height_a, - int width_a, int height_b, int width_b) { + bool forward_a, const Dtype* bottom_b, + bool forward_b, Dtype* top, int num, int channels_a, + int channels_b, int height_a, int width_a, + int height_b, int width_b) { CUDA_KERNEL_LOOP(index, nthreads) { int pad_h = (height_b - height_a) / 2; int pad_w = (width_b - width_a) / 2; @@ -34,22 +35,26 @@ __global__ void CopyForward(const int nthreads, const Dtype* bottom_a, int channel_id = (index / ((width_a * height_a)) % channels_a); int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) * width_a + w); - top[index] = bottom_a[aidx]; + top[index] = forward_a ? bottom_a[aidx] : 0; } else { int channel_id = (index / ((width_a * height_a)) % channels_b); int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + width_b * (h + pad_h) + pad_w + w; - top[index] = bottom_b[bidx]; + top[index] = forward_b ? bottom_b[bidx] : 0; } } } template __global__ void CopyBackward(const int nthreads, Dtype* bottom_a, + bool backward_a, Dtype* bottom_b, bool backward_b, const Dtype* top, int num, int channels_a, int channels_b, int height_a, int width_a, int height_b, int width_b) { CUDA_KERNEL_LOOP(index, nthreads) { + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); int bottom_id = ((index @@ -63,7 +68,12 @@ __global__ void CopyBackward(const int nthreads, Dtype* bottom_a, int channel_id = (index / ((width_a * height_a)) % channels_a); int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) * width_a + w); - bottom_a[aidx] = top[index]; + bottom_a[aidx] = backward_a ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b ? top[index] : 0; } } } @@ -72,6 +82,8 @@ __global__ void CopyBackward(const int nthreads, Dtype* bottom_a, template void MergeCropLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { + int* forward_data = forward.mutable_cpu_data(); + int count = top[0]->count(); const Dtype* bottom_data_a = bottom[0]->gpu_data(); @@ -94,8 +106,9 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CopyForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS) ( - count, bottom_data_a, bottom_data_b, top_data, num, channels_a, + CAFFE_CUDA_NUM_THREADS) ( + count, bottom_data_a, forward_data[0], bottom_data_b, + forward_data[1], top_data, num, channels_a, channels_b, height_a, width_a, height_b, width_b); #endif // USE_CUDA } else { @@ -109,9 +122,11 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, CL_KERNEL_SELECT("merge_copy_forward")); viennacl::ocl::enqueue( oclk_copy_forward(count, WrapHandle((cl_mem) bottom_data_a, &ctx), + forward_data[0], WrapHandle((cl_mem) bottom_data_b, &ctx), - WrapHandle((cl_mem) top_data, &ctx), num, channels_a, - channels_b, height_a, width_a, height_b, width_b), + forward_data[1], WrapHandle((cl_mem) top_data, &ctx), + num, channels_a, channels_b, height_a, width_a, + height_b, width_b), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA @@ -125,9 +140,13 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, if (!propagate_down[0]) { return; } + + int* backward_data = backward.mutable_cpu_data(); + int count = top[0]->count(); Dtype* bottom_diff_a = bottom[0]->mutable_gpu_diff(); + Dtype* bottom_diff_b = bottom[1]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); int num = bottom[0]->num(); @@ -146,8 +165,10 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS) ( - count, bottom_diff_a, top_diff, num, channels_a, channels_b, height_a, + CAFFE_CUDA_NUM_THREADS) ( + count, bottom_diff_a, backward_data[0], + bottom_diff_b, backward_data[1], top_diff, num, + channels_a, channels_b, height_a, width_a, height_b, width_b); #endif // USE_CUDA } else { @@ -161,6 +182,9 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, CL_KERNEL_SELECT("merge_copy_backward")); viennacl::ocl::enqueue( oclk_copy_backward(count, WrapHandle((cl_mem) bottom_diff_a, &ctx), + backward_data[0], + WrapHandle((cl_mem) bottom_diff_b, &ctx), + backward_data[1], WrapHandle((cl_mem) top_diff, &ctx), num, channels_a, channels_b, height_a, width_a, height_b, width_b), ctx.get_queue()); diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index e46f62bc32b..c195e3253aa 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -356,6 +356,7 @@ message LayerParameter { optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; optional AffinityParameter affinity_param = 137; + optional MergeCropParameter mergecrop_param = 138; } // Message that stores parameters used to apply transformation @@ -1143,3 +1144,9 @@ message AffinityParameter { repeated int32 offset = 1; } +message MergeCropParameter { + // Forward and backward enable/disable + // Defined once per bottom blob + repeated bool forward = 1; + repeated bool backward = 2; +} From b2bceb89e09f885ecd1cc4168162aec9f3c67c7f Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 18 Jul 2015 23:18:33 +0200 Subject: [PATCH 148/254] Fixed loss division by zero bug. --- src/caffe/layers/softmax_loss_layer.cu | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 7f4554c0bb2..2761efa4681 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -182,7 +182,10 @@ void SoftmaxWithLossLayer::Backward_gpu( if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); - caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); + // Fix the division by zero bug + if (count > 0) { + caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); + } } else { caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } @@ -226,8 +229,11 @@ void SoftmaxWithLossLayer::Backward_gpu( Dtype count; greentea_gpu_asum(this->device_context_->id(), nthreads, counts, 0, &count); - greentea_gpu_scal(this->device_context_->id(), + // Fix the division by zero bug + if (count > 0) { + greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / count, bottom_diff, 0); + } } else { greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / num, bottom_diff, 0); From 5bc7afeb5f5c5d595bb39165976b77698c66c2cf Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 18 Jul 2015 23:49:04 +0200 Subject: [PATCH 149/254] 2nd attempt fixing zero valid labels present. --- src/caffe/layers/softmax_loss_layer.cu | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 2761efa4681..51fc5fe135a 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -183,9 +183,10 @@ void SoftmaxWithLossLayer::Backward_gpu( Dtype count; caffe_gpu_asum(nthreads, counts, &count); // Fix the division by zero bug - if (count > 0) { - caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); + if (count == 0) { + count = num; } + caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } @@ -228,15 +229,16 @@ void SoftmaxWithLossLayer::Backward_gpu( if (normalize_) { Dtype count; greentea_gpu_asum(this->device_context_->id(), - nthreads, counts, 0, &count); + nthreads, counts, 0, &count); // Fix the division by zero bug - if (count > 0) { - greentea_gpu_scal(this->device_context_->id(), - prob_.count(), loss_weight / count, bottom_diff, 0); + if (count == 0) { + count = num; } + greentea_gpu_scal(this->device_context_->id(), + prob_.count(), loss_weight / count, bottom_diff, 0); } else { greentea_gpu_scal(this->device_context_->id(), - prob_.count(), loss_weight / num, bottom_diff, 0); + prob_.count(), loss_weight / num, bottom_diff, 0); } if (bottom.size() == 3) { // TODO: Correct this for easy diff scaling From 642ea00495fbc94610124644a44da5b500e84756 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 19 Jul 2015 00:35:55 +0200 Subject: [PATCH 150/254] NaN failure. --- src/caffe/layers/softmax_loss_layer.cu | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 51fc5fe135a..55079db4c67 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -68,6 +68,9 @@ void SoftmaxWithLossLayer::Forward_gpu( if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); + if (count == 0) { + count = num; + } loss /= count; } else { loss /= num; @@ -111,6 +114,9 @@ void SoftmaxWithLossLayer::Forward_gpu( Dtype count; greentea_gpu_asum(this->device_context_->id(), nthreads, counts, 0, &count); + if (count == 0) { + count = num; + } loss /= count; } else { loss /= num; From e060fbf65c5d5cf1f9440cf3badb3d3475751e98 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 19 Jul 2015 01:05:56 +0200 Subject: [PATCH 151/254] Another attempt at NaN loss fixing. --- src/caffe/layers/softmax_loss_layer.cpp | 12 ++++++++++-- src/caffe/layers/softmax_loss_layer.cu | 25 ++++++++++++++----------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 3c303cd0cae..96b9edfd6a5 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -74,7 +74,11 @@ void SoftmaxWithLossLayer::Forward_cpu( } } if (normalize_) { - top[0]->mutable_cpu_data()[0] = loss / count; + if (count == 0) { + top[0]->mutable_cpu_data()[0] = 0; + } else { + top[0]->mutable_cpu_data()[0] = loss / count; + } } else { top[0]->mutable_cpu_data()[0] = loss / outer_num_; } @@ -113,7 +117,11 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, // Scale gradient const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { - caffe_scal(prob_.count(), loss_weight / count, bottom_diff); + if (count == 0) { + caffe_set(prob_.count(), 0.0, bottom_diff); + } else { + caffe_scal(prob_.count(), loss_weight / count, bottom_diff); + } } else { caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 55079db4c67..e6d3028361a 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -69,9 +69,10 @@ void SoftmaxWithLossLayer::Forward_gpu( Dtype count; caffe_gpu_asum(nthreads, counts, &count); if (count == 0) { - count = num; + loss = 0; + } else { + loss /= count; } - loss /= count; } else { loss /= num; } @@ -115,9 +116,10 @@ void SoftmaxWithLossLayer::Forward_gpu( greentea_gpu_asum(this->device_context_->id(), nthreads, counts, 0, &count); if (count == 0) { - count = num; + loss = 0; + } else { + loss /= count; } - loss /= count; } else { loss /= num; } @@ -188,11 +190,11 @@ void SoftmaxWithLossLayer::Backward_gpu( if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); - // Fix the division by zero bug if (count == 0) { - count = num; + caffe_gpu_set(prob_.count(), 0.0, bottom_diff); + } else { + caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } - caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } @@ -236,12 +238,13 @@ void SoftmaxWithLossLayer::Backward_gpu( Dtype count; greentea_gpu_asum(this->device_context_->id(), nthreads, counts, 0, &count); - // Fix the division by zero bug if (count == 0) { - count = num; - } - greentea_gpu_scal(this->device_context_->id(), + greentea_gpu_set(this->device_context_->id(), + prob_.count(), 0.0, bottom_diff, 0); + } else { + greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / count, bottom_diff, 0); + } } else { greentea_gpu_scal(this->device_context_->id(), prob_.count(), loss_weight / num, bottom_diff, 0); From 51d9be64d5e4435a5e1c6bef538c71d34e62605e Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 20 Jul 2015 17:41:51 +0200 Subject: [PATCH 152/254] Fixed div by zero bug in MALIS loss function. --- src/caffe/layers/malis_loss_layer.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 96ff4ae134a..01240cefedb 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -195,7 +195,11 @@ void MalisLossLayer::Malis(const Dtype* conn_data, } } - dloss_data[minEdge] /= nPairNorm; + if (nPairNorm > 0) { + dloss_data[minEdge] /= nPairNorm; + } else { + dloss_data[minEdge] = 0; + } if (dsets.find_set(set1) == set2) { std::swap(set1, set2); @@ -217,7 +221,11 @@ void MalisLossLayer::Malis(const Dtype* conn_data, /* Return items */ double classerr, randIndex; - loss /= nPairNorm; + if (nPairNorm > 0) { + loss /= nPairNorm; + } else { + loss = 0; + } *loss_out = loss; classerr = static_cast(nPairIncorrect) / static_cast(nPairNorm); From 2970da7d30ec0e91b763850e0cf9073ddd669076 Mon Sep 17 00:00:00 2001 From: "tzu.ta.lin" Date: Wed, 22 Jul 2015 14:21:17 +0800 Subject: [PATCH 153/254] Include CUDA only if uing CUDA. Using OpenCL and CPU only didn't need to include CUDA header and libs --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a2b800472a1..e61dafd0402 100644 --- a/Makefile +++ b/Makefile @@ -236,7 +236,7 @@ endif CUDA_LIB_DIR += $(CUDA_DIR)/lib INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include -ifneq ($(CPU_ONLY), 1) +ifeq ($(USE_CUDA), 1) INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) LIBRARY_DIRS += $(CUDA_LIB_DIR) LIBRARIES += cudart cublas curand From 3a2e976150a94f3a3f39ed840a0906394c64fa5b Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 22 Jul 2015 20:34:27 +0200 Subject: [PATCH 154/254] Updated drawing scripts for ND-SK networks. --- python/caffe/draw.py | 18 +++++++++++++----- python/draw_net.py | 15 ++++++++++++++- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/python/caffe/draw.py b/python/caffe/draw.py index 6959c268a09..ae32c5acedc 100644 --- a/python/caffe/draw.py +++ b/python/caffe/draw.py @@ -122,7 +122,7 @@ def choose_color_by_layertype(layertype): return color -def get_pydot_graph(caffe_net, rankdir, label_edges=True): +def get_pydot_graph(caffe_net, rankdir, margin, page, pagesize, size, label_edges=True): """Create a data structure which represents the `caffe_net`. Parameters @@ -137,9 +137,17 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True): ------- pydot graph object """ + pydot_graph = pydot.Dot(caffe_net.name, graph_type='digraph', rankdir=rankdir) + + if margin != '': pydot_graph.set('margin',margin) + if page != '': pydot_graph.set('page', page) + if pagesize != '': pydot_graph.set('pagesize', pagesize) + if size != '': pydot_graph.set('size', size) + + pydot_nodes = {} pydot_edges = [] for layer in caffe_net.layer: @@ -181,7 +189,7 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True): return pydot_graph -def draw_net(caffe_net, rankdir, ext='png'): +def draw_net(caffe_net, rankdir, margin, page, pagesize, size, ext='png'): """Draws a caffe net and returns the image string encoded using the given extension. @@ -196,10 +204,10 @@ def draw_net(caffe_net, rankdir, ext='png'): string : Postscript representation of the graph. """ - return get_pydot_graph(caffe_net, rankdir).create(format=ext) + return get_pydot_graph(caffe_net, rankdir, margin, page, pagesize, size).create(format=ext) -def draw_net_to_file(caffe_net, filename, rankdir='LR'): +def draw_net_to_file(caffe_net, filename, rankdir='LR', margin='', page='', pagesize='', size=''): """Draws a caffe net, and saves it to file using the format given as the file extension. Use '.raw' to output raw text that you can manually feed to graphviz to draw graphs. @@ -214,4 +222,4 @@ def draw_net_to_file(caffe_net, filename, rankdir='LR'): """ ext = filename[filename.rfind('.')+1:] with open(filename, 'wb') as fid: - fid.write(draw_net(caffe_net, rankdir, ext)) + fid.write(draw_net(caffe_net, rankdir, margin, page, pagesize, size, ext)) diff --git a/python/draw_net.py b/python/draw_net.py index ec76a744da3..811274d976b 100755 --- a/python/draw_net.py +++ b/python/draw_net.py @@ -28,6 +28,18 @@ def parse_args(): 'http://www.graphviz.org/doc/info/' 'attrs.html#k:rankdir'), default='LR') + parser.add_argument('--margin', + help=('Margin parameter'), + default='') + parser.add_argument('--page', + help=('Page parameter'), + default='') + parser.add_argument('--pagesize', + help=('Pagesize parameter'), + default='') + parser.add_argument('--size', + help=('Size parameter'), + default='') args = parser.parse_args() return args @@ -38,7 +50,8 @@ def main(): net = caffe_pb2.NetParameter() text_format.Merge(open(args.input_net_proto_file).read(), net) print('Drawing net to %s' % args.output_image_file) - caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir) + caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir, + args.margin, args.page, args.pagesize, args.size) if __name__ == '__main__': From 9ebb32cfa2d957b03ee59b2a41d26db52b4df2bc Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 2 Aug 2015 15:08:22 +0200 Subject: [PATCH 155/254] Makefile fix for OS X. --- Makefile | 6 +++++- include/caffe/layer.hpp | 11 +++++++++++ include/caffe/vision_layers.hpp | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e61dafd0402..d7afcb149bd 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,11 @@ ifeq ($(USE_GREENTEA),1) # Requires valid OpenCL headers and valid ViennaCL INCLUDE_DIRS += $(CLLINC) $(VIENNACL_DIR) # Requires OpenCL compile library flag and librt - LIBRARIES += OpenCL rt + ifeq ($(OS_X), 1) + LDFLAGS += -framework OpenCL + else + LIBRARIES += OpenCL rt + endif # Additional flags COMMON_FLAGS += -DUSE_GREENTEA -DVIENNACL_WITH_OPENCL diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index bb0f5457b42..71a0e0afe92 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -320,6 +320,17 @@ class Layer { return device_context_; } + /** + * @brief Returns the estimated floating point operations of this layer + */ + virtual size_t ForwardFlops() { + return 0; + } + + virtual size_t BackwardFlops() { + return 0; + } + protected: /** The protobuf that stores the layer parameters */ LayerParameter layer_param_; diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 43eade85b91..72171a81f1c 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -294,6 +294,7 @@ class BaseConvolutionNDLayer : public Layer { virtual inline int MinTopBlobs() const { return 1; } virtual inline bool EqualNumBottomTopBlobs() const { return true; } + protected: // Helper functions that abstract away the column buffer and gemm arguments. // The last argument in forward_cpu_gemm is so that we can skip the im2col if @@ -434,6 +435,20 @@ class ConvolutionNDLayer : public BaseConvolutionNDLayer { return "ConvolutionND"; } + virtual size_t ForwardFlops() { + size_t group = this->group_; + size_t N = 1; + size_t M = this->num_output_ / group; + size_t K = this->channels_; + const int* kshape = this->kernel_shape_.cpu_data(); + for (int i = 0; i < this->output_shape_.size(); ++i) { + N *= this->output_shape_[i]; + K *= kshape[i]; + } + K /= group; + return group* (M * N * (2 * K - 1)); + } + protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); @@ -494,6 +509,14 @@ class ConvolutionSKLayer : public Layer { return "ConvolutionSK"; } + virtual size_t ForwardFlops() { + size_t M = this->M_; + size_t N = this->N_; + size_t K = this->K_; + size_t group = this->group_; + return group * (M * N * (2 * K - 1)); + } + protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); @@ -577,6 +600,15 @@ class ConvolutionLayer : public BaseConvolutionLayer { return "Convolution"; } + virtual size_t ForwardFlops() { + size_t group = this->group_; + size_t N = this->height_out_ * this->width_out_; + size_t M = this->num_output_ / group; + size_t K = this->channels_ * this->kernel_h_ * this->kernel_w_; + K /= group; + return group * (M * N * (2 * K - 1)); + } + protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); From 19f756b0a7d5137c1153a11890dc89d8f91f27ce Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 20:14:29 +0200 Subject: [PATCH 156/254] Removed include for CUDA when not present. --- src/caffe/parallel.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index ca0e32b01c1..145cfd27fd3 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -1,6 +1,8 @@ #ifndef CPU_ONLY +#ifdef USE_CUDA #include -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY #include #include #include From 5bfcdf65371c0cbfa065e778450d9512b96eadb1 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 20:20:34 +0200 Subject: [PATCH 157/254] Excluded CUDA Multi-GPU code in Greentea-only build. --- src/caffe/parallel.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index 145cfd27fd3..127592172ee 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -81,6 +81,7 @@ template GPUParams::GPUParams(shared_ptr > root_solver, int device) : Params(root_solver) { #ifndef CPU_ONLY +#ifdef USE_CUDA int initial_device; CUDA_CHECK(cudaGetDevice(&initial_device)); @@ -96,6 +97,7 @@ GPUParams::GPUParams(shared_ptr > root_solver, int device) caffe_gpu_set(size_, Dtype(0), diff_); CUDA_CHECK(cudaSetDevice(initial_device)); +#endif // USE_CUDA #else NO_GPU; #endif @@ -104,9 +106,11 @@ GPUParams::GPUParams(shared_ptr > root_solver, int device) template GPUParams::~GPUParams() { #ifndef CPU_ONLY +#ifdef USE_CUDA CUDA_CHECK(cudaFree(data_)); CUDA_CHECK(cudaFree(diff_)); -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY } template @@ -221,6 +225,7 @@ P2PSync::P2PSync(shared_ptr > root_solver, : GPUParams(root_solver, param.device_id()), parent_(parent), children_(), queue_(), initial_iter_(root_solver->iter()), solver_() { #ifndef CPU_ONLY +#ifdef USE_CUDA int initial_device; CUDA_CHECK(cudaGetDevice(&initial_device)); const int self = param.device_id(); @@ -253,6 +258,7 @@ P2PSync::P2PSync(shared_ptr > root_solver, } CUDA_CHECK(cudaSetDevice(initial_device)); +#endif // USE_CUDA #else NO_GPU; #endif @@ -261,6 +267,7 @@ P2PSync::P2PSync(shared_ptr > root_solver, template P2PSync::~P2PSync() { #ifndef CPU_ONLY +#ifdef USE_CUDA int initial_device; CUDA_CHECK(cudaGetDevice(&initial_device)); const int self = solver_->param().device_id(); @@ -277,7 +284,8 @@ P2PSync::~P2PSync() { } CUDA_CHECK(cudaSetDevice(initial_device)); -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY } template From afc89be1757c2b65da2b58c01f89c1d2568a4b3c Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 20:31:57 +0200 Subject: [PATCH 158/254] GPU device vector fixed in caffe.cpp --- src/caffe/layers/split_layer.cu | 2 -- tools/caffe.cpp | 8 +++++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index 7d1676466cf..c725612359f 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -41,8 +41,6 @@ void SplitLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); if (top.size() == 1) { greentea_copy(count_, (cl_mem) (top[0]->gpu_diff()), 0, diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 2c337778cca..74f07d4e940 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -12,6 +12,7 @@ namespace bp = boost::python; #include "boost/algorithm/string.hpp" #include "caffe/caffe.hpp" +#include "caffe/device_context.hpp" using caffe::Blob; using caffe::Caffe; @@ -22,6 +23,7 @@ using caffe::shared_ptr; using caffe::string; using caffe::Timer; using caffe::vector; +using caffe::DeviceContext; using std::ostringstream; DEFINE_string(gpu, "", @@ -187,7 +189,11 @@ int train() { if (gpus.size() > 1) { caffe::P2PSync sync(solver, NULL, solver->param()); - sync.run(gpus); + std::vector devices; + for (int i = 0; i < gpus.size(); ++i) { + devices.push_back(Caffe::Get().GetDeviceContext(i)); + } + sync.run(devices); } else { LOG(INFO) << "Starting Optimization"; solver->Solve(); From c066af76be234a8dcb5f253ab97447ab128ff268 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 20:43:48 +0200 Subject: [PATCH 159/254] Fixed gradient solver test CUDA exclusions. --- src/caffe/test/test_gradient_based_solver.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index c837c76b750..a18fca33a0f 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -75,10 +75,13 @@ class GradientBasedSolverTest : public MultiDeviceTest { ostringstream proto; int device_id = 0; #ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { +#ifdef USE_CUDA + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { CUDA_CHECK(cudaGetDevice(&device_id)); } -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY proto << "snapshot_after_train: " << snapshot << " " "max_iter: " << num_iters << " " @@ -473,10 +476,13 @@ class GradientBasedSolverTest : public MultiDeviceTest { // Test over all numbers of devices. int available_devices = 1; #ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { +#ifdef USE_CUDA + if (Caffe::mode() == Caffe::GPU && + Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { CUDA_CHECK(cudaGetDeviceCount(&available_devices)); } -#endif +#endif // USE_CUDA +#endif // !CPU_ONLY for (int devices = 1; devices <= available_devices; ++devices) { // Configure batch size for single / multi device equivalence. // Constant data is needed for multi device as for accumulation. From a8ca694c211bf3c31c91803c582a6d54c05611b4 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 21:14:24 +0200 Subject: [PATCH 160/254] CPU only fix --- include/caffe/common.hpp | 2 +- src/caffe/common.cpp | 33 ++++++++++++++++++--------------- tools/caffe.cpp | 2 +- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 1f4cd66d10d..b6480501d4d 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -112,7 +112,7 @@ class Caffe { // Thread local context for Caffe. Moved to common.cpp instead of // including boost/thread.hpp to avoid a boost/NVCC issues (#1009, #1010) // on OSX. Also fails on Linux with CUDA 7.0.18. - static Caffe& Get(); + static Caffe& Get(bool get_global = false); enum Brew { CPU, GPU }; diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index aa851052c82..a214a240ce4 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -23,18 +23,22 @@ static boost::thread_specific_ptr thread_instance_; static shared_ptr global_instance_; -Caffe& Caffe::Get() { +Caffe& Caffe::Get(bool get_global) { if (!global_instance_.get()) { // The first call must be single threaded global_instance_.reset(new Caffe()); } - if (!thread_instance_.get()) { - // Every thread initially gets a copy of the global initialization - // Later, every thread can switch to a different default device - thread_instance_.reset(new Caffe(*(global_instance_.get()))); + if (get_global) { + return *(global_instance_.get()); + } else { + if (!thread_instance_.get()) { + // Every thread initially gets a copy of the global initialization + // Later, every thread can switch to a different default device + thread_instance_.reset(new Caffe(*(global_instance_.get()))); + } + return *(thread_instance_.get()); } - return *(thread_instance_.get()); } // random seeding @@ -111,8 +115,7 @@ Caffe::Caffe() mode_(Caffe::CPU), default_device_context_(nullptr), solver_count_(1), - root_solver_(true), - is_master_(true) {} + root_solver_(true) {} Caffe::~Caffe() {} @@ -329,9 +332,9 @@ int Caffe::EnumerateDevices(bool silent) { void Caffe::SetDevices(std::vector device_ids) { int initcount = 0; - Get().device_contexts_.clear(); + Get(true).device_contexts_.clear(); #ifdef USE_GREENTEA - Get().ocl_programs_.clear(); + Get(true).ocl_programs_.clear(); #endif int cuda_device_count = 0; #ifdef USE_CUDA @@ -342,7 +345,7 @@ void Caffe::SetDevices(std::vector device_ids) { if (device_ids[j] == i) { shared_ptr device( new DeviceContext(i, initcount, Backend::BACKEND_CUDA)); - Get().device_contexts_.emplace_back(device); + Get(true).device_contexts_.emplace_back(device); Caffe::GetDeviceContext(initcount)->Init(); ++initcount; } @@ -350,7 +353,7 @@ void Caffe::SetDevices(std::vector device_ids) { #ifdef USE_GREENTEA // Dummy to have same vector size as device contexts viennacl::ocl::program program; - Get().ocl_programs_.push_back(program); + Get(true).ocl_programs_.push_back(program); #endif // USE_GREENTEA } @@ -387,7 +390,7 @@ void Caffe::SetDevices(std::vector device_ids) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( static_cast(device_id)); viennacl::ocl::program & program = RegisterKernels(&ctx); - Get().ocl_programs_.push_back(program); + Get(true).ocl_programs_.push_back(program); // viennacl::ocl::switch_context(device_id); // viennacl::ocl::switch_device(std::get<1> // (platform_devices[device_id - cuda_device_count])); @@ -399,7 +402,7 @@ void Caffe::SetDevices(std::vector device_ids) { shared_ptr device( new DeviceContext(cuda_device_count + greentea_device_count, initcount, Backend::BACKEND_OpenCL)); - Get().device_contexts_.emplace_back(device); + Get(true).device_contexts_.emplace_back(device); Caffe::GetDeviceContext(initcount)->Init(); ++initcount; is_used = true; @@ -408,7 +411,7 @@ void Caffe::SetDevices(std::vector device_ids) { // Device not used, dummy if (!is_used) { viennacl::ocl::program program; - Get().ocl_programs_.push_back(program); + Get(true).ocl_programs_.push_back(program); } greentea_device_count++; } diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 74f07d4e940..ff75c5883c4 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -191,7 +191,7 @@ int train() { caffe::P2PSync sync(solver, NULL, solver->param()); std::vector devices; for (int i = 0; i < gpus.size(); ++i) { - devices.push_back(Caffe::Get().GetDeviceContext(i)); + devices.push_back(Caffe::Get(true).GetDeviceContext(i)); } sync.run(devices); } else { From 5cc81477b4d6a301ebf3c188be22e928e56502de Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 21:16:23 +0200 Subject: [PATCH 161/254] Fixed CPU only (second). --- src/caffe/common.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index a214a240ce4..18631a3f789 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -135,7 +135,8 @@ void Caffe::DeviceQuery() { void Caffe::Synchronize(int device_id) { } -void Caffe::EnumerateDevices() { +int Caffe::EnumerateDevices(bool silent) { + return 0; } class Caffe::RNG::Generator { From 39300179ded5413f1516c1249e75bfdf1869378e Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 21:37:42 +0200 Subject: [PATCH 162/254] Fixed CPU only (3rd). --- src/caffe/common.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 18631a3f789..efff0a8358b 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -107,6 +107,21 @@ Caffe::Caffe(const Caffe &obj) { #endif // USE_CUDA } +void Caffe::SelectDevice(DeviceContext* device_context) { +#ifndef CPU_ONLY + Get().default_device_context_ = device_context; + if (device_context->backend() == Backend::BACKEND_CUDA) { +#ifdef USE_CUDA + CUDA_CHECK(cudaSetDevice(device_context->id())); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + +#endif // USE_GREENTEA + } +#endif // !CPU_ONLY +} + #ifdef CPU_ONLY // CPU-only Caffe. @@ -466,19 +481,6 @@ void Caffe::SetDevice(const int device_id) { } } -void Caffe::SelectDevice(DeviceContext* device_context) { - Get().default_device_context_ = device_context; - if (device_context->backend() == Backend::BACKEND_CUDA) { -#ifdef USE_CUDA - CUDA_CHECK(cudaSetDevice(device_context->id())); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - -#endif // USE_GREENTEA - } -} - // TODO: Fix this for the new backend void Caffe::DeviceQuery() { if (Get().default_device_context_->backend() == BACKEND_CUDA) { From 6d0ec674f07b62b390c7c6315856327014f32064 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 15 Aug 2015 21:48:12 +0200 Subject: [PATCH 163/254] CPU only initialization fix. --- tools/caffe.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index ff75c5883c4..9e51818620d 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -108,6 +108,7 @@ int device_query() { // If no gpu is specified, enumerate all the devices. caffe::Caffe::EnumerateDevices(); } else { +#ifndef CPU_ONLY LOG(INFO) << "Querying GPUs " << FLAGS_gpu; vector gpus; get_gpus(&gpus); @@ -116,6 +117,7 @@ int device_query() { caffe::Caffe::SetDevice(i); caffe::Caffe::DeviceQuery(); } +#endif // !CPU_ONLY } return 0; } @@ -162,6 +164,7 @@ int train() { if (gpus.size() == 0) { Caffe::set_mode(Caffe::CPU); } else { +#ifndef CPU_ONLY // Load all devices that will be used Caffe::SetDevices(gpus); @@ -176,6 +179,7 @@ int train() { Caffe::SetDevice(0); Caffe::set_mode(Caffe::GPU); Caffe::set_solver_count(gpus.size()); +#endif // !CPU_ONLY } shared_ptr > solver(caffe::GetSolver(solver_param)); @@ -213,10 +217,12 @@ int test() { vector gpus; get_gpus(&gpus); if (gpus.size() != 0) { +#ifndef CPU_ONLY LOG(INFO) << "Use GPU with device ID " << gpus[0]; Caffe::SetDevices(gpus); Caffe::set_mode(Caffe::GPU); Caffe::SetDevice(0); +#endif // !CPU_ONLY } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); @@ -281,10 +287,12 @@ int time() { vector gpus; get_gpus(&gpus); if (gpus.size() != 0) { +#ifndef CPU_ONLY LOG(INFO) << "Use GPU with device ID " << gpus[0]; Caffe::SetDevices(gpus); Caffe::set_mode(Caffe::GPU); Caffe::SetDevice(0); +#endif // !CPU_ONLY } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); From 4e709ea86c158b3a57ecdb1ffee55e07b29fd4ea Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 16 Aug 2015 06:12:12 +0200 Subject: [PATCH 164/254] Updated pointer types for Caffe object. --- include/caffe/common.hpp | 14 +- include/caffe/device_context.hpp | 8 + src/caffe/common.cpp | 142 +++++++++--------- src/caffe/device_context.cpp | 34 ++++- .../greentea/greentea_math_functions.cpp | 1 + src/caffe/test/test_caffe_main.cpp | 2 +- tools/caffe.cpp | 10 +- 7 files changed, 114 insertions(+), 97 deletions(-) diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index b6480501d4d..62854f34b63 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -106,13 +106,14 @@ void GlobalInit(int* pargc, char*** pargv); // caffe is going to use for cublas, curand, etc. class Caffe { public: + Caffe(); Caffe(const Caffe &obj); ~Caffe(); // Thread local context for Caffe. Moved to common.cpp instead of // including boost/thread.hpp to avoid a boost/NVCC issues (#1009, #1010) // on OSX. Also fails on Linux with CUDA 7.0.18. - static Caffe& Get(bool get_global = false); + static Caffe& Get(); enum Brew { CPU, GPU }; @@ -201,21 +202,12 @@ class Caffe { // The shared ptrs are being referenced on every thread, // while the default device will be handled thread local - vector > device_contexts_; + static vector > device_contexts_; DeviceContext* default_device_context_; shared_ptr cpu_device_context_; int solver_count_; bool root_solver_; - -#ifdef USE_GREENTEA - vector ocl_programs_; - viennacl::ocl::program default_ocl_program_; -#endif // USE_GREENTEA - - private: - // The private constructor to avoid duplicate instantiation. - Caffe(); }; } // namespace caffe diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index 4a8b623a6e6..f10d4d77497 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -32,6 +32,11 @@ class DeviceContext { int current_queue_id(); int WorkgroupSize(int id); +#ifdef USE_GREENTEA + viennacl::ocl::program &program(); + void SetProgram(); +#endif // USE_GREENTEA + template shared_ptr< Blob > Buffer(int id); @@ -57,6 +62,9 @@ class DeviceContext { size_t peak_memory_usage_; std::vector< shared_ptr< Blob > > buff_f_; std::vector< shared_ptr< Blob > > buff_d_; +#ifdef USE_GREENTEA + viennacl::ocl::program ocl_program_; +#endif // USE_GREENTEA }; } // namespace caffe diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index efff0a8358b..4018c6e6358 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -21,24 +22,27 @@ namespace caffe { // Make sure each thread can have different values. static boost::thread_specific_ptr thread_instance_; -static shared_ptr global_instance_; +// Pointer to the global instance of Caffe +static Caffe* global_instance_; +static std::atomic first(true); -Caffe& Caffe::Get(bool get_global) { - if (!global_instance_.get()) { +// Device contexts are initialized once and shared on all threads +std::vector< shared_ptr > Caffe::device_contexts_; + +Caffe& Caffe::Get() { + if (first.exchange(false)) { // The first call must be single threaded - global_instance_.reset(new Caffe()); + // and defines the global instance + thread_instance_.reset(new Caffe()); + global_instance_ = thread_instance_.get(); } - - if (get_global) { - return *(global_instance_.get()); - } else { - if (!thread_instance_.get()) { - // Every thread initially gets a copy of the global initialization - // Later, every thread can switch to a different default device - thread_instance_.reset(new Caffe(*(global_instance_.get()))); - } - return *(thread_instance_.get()); + if (!thread_instance_.get()) { + // Every thread initially gets a copy of the global initialization. + // Later, every thread can switch to a different default device + // or change other aspects of the Caffe object + thread_instance_.reset(new Caffe(*global_instance_)); } + return *(thread_instance_.get()); } // random seeding @@ -71,8 +75,11 @@ void GlobalInit(int* pargc, char*** pargv) { } DeviceContext *Caffe::GetDeviceContext(int id) { - return id == -1 ? Get().default_device_context_: - Get().device_contexts_[id].get(); + // The default device context is thread-local + // The list of device contexts is global + return + id == -1 ? + Get().default_device_context_ : Get().device_contexts_[id].get(); } DeviceContext *Caffe::GetDefaultDeviceContext() { @@ -86,11 +93,11 @@ DeviceContext *Caffe::GetCPUDeviceContext() { // Copy constructor for thread-local copy Caffe::Caffe(const Caffe &obj) { mode_ = obj.mode_; - device_contexts_ = obj.device_contexts_; default_device_context_ = obj.default_device_context_; cpu_device_context_ = obj.cpu_device_context_; root_solver_ = obj.root_solver_; solver_count_ = obj.solver_count_; + // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). #ifdef USE_CUDA @@ -99,10 +106,11 @@ Caffe::Caffe(const Caffe &obj) { } // Try to create a curand handler. if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT) - != CURAND_STATUS_SUCCESS || - curandSetPseudoRandomGeneratorSeed(curand_generator_, cluster_seedgen()) - != CURAND_STATUS_SUCCESS) { - LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; + != CURAND_STATUS_SUCCESS + || curandSetPseudoRandomGeneratorSeed(curand_generator_, + cluster_seedgen()) + != CURAND_STATUS_SUCCESS) { + LOG(ERROR)<< "Cannot create Curand generator. Curand won't be available."; } #endif // USE_CUDA } @@ -110,6 +118,7 @@ Caffe::Caffe(const Caffe &obj) { void Caffe::SelectDevice(DeviceContext* device_context) { #ifndef CPU_ONLY Get().default_device_context_ = device_context; + if (device_context->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaSetDevice(device_context->id())); @@ -122,15 +131,14 @@ void Caffe::SelectDevice(DeviceContext* device_context) { #endif // !CPU_ONLY } - #ifdef CPU_ONLY // CPU-only Caffe. Caffe::Caffe() : random_generator_(), - mode_(Caffe::CPU), - default_device_context_(nullptr), - solver_count_(1), - root_solver_(true) {} +mode_(Caffe::CPU), +default_device_context_(nullptr), +solver_count_(1), +root_solver_(true) {} Caffe::~Caffe() {} @@ -185,11 +193,9 @@ Caffe::Caffe() curand_generator_(NULL), #endif // USE_CUDA random_generator_(), - mode_(Caffe::CPU), - default_device_context_(nullptr), + mode_(Caffe::CPU), default_device_context_(nullptr), cpu_device_context_(new DeviceContext(-1, -1, Backend::BACKEND_CPU)), - solver_count_(1), - root_solver_(true) { + solver_count_(1), root_solver_(true) { // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). #ifdef USE_CUDA @@ -209,7 +215,9 @@ Caffe::Caffe() Caffe::~Caffe() { // Make sure all device contexts and // dependent memory blocks are freed properly - device_contexts_.clear(); + if (this == global_instance_) { + device_contexts_.clear(); + } #ifdef USE_CUDA if (cublas_handle_) CUBLAS_CHECK(cublasDestroy(cublas_handle_)); @@ -277,7 +285,7 @@ int Caffe::EnumerateDevices(bool silent) { platforms_type platforms = viennacl::ocl::get_platforms(); std::vector> platform_devices; + viennacl::ocl::device>> platform_devices; // Loop through devices for (std::size_t platform_id = 0; platform_id < platforms.size(); @@ -348,10 +356,7 @@ int Caffe::EnumerateDevices(bool silent) { void Caffe::SetDevices(std::vector device_ids) { int initcount = 0; - Get(true).device_contexts_.clear(); -#ifdef USE_GREENTEA - Get(true).ocl_programs_.clear(); -#endif + Get().device_contexts_.clear(); int cuda_device_count = 0; #ifdef USE_CUDA cudaGetDeviceCount(&cuda_device_count); @@ -361,16 +366,16 @@ void Caffe::SetDevices(std::vector device_ids) { if (device_ids[j] == i) { shared_ptr device( new DeviceContext(i, initcount, Backend::BACKEND_CUDA)); - Get(true).device_contexts_.emplace_back(device); - Caffe::GetDeviceContext(initcount)->Init(); + Get().device_contexts_.emplace_back(device); + device->Init(); + ++initcount; + } else { + // Temporary until device abstraction is done + shared_ptr device(new DeviceContext()); + Get().device_contexts_.emplace_back(device); ++initcount; } } -#ifdef USE_GREENTEA - // Dummy to have same vector size as device contexts - viennacl::ocl::program program; - Get(true).ocl_programs_.push_back(program); -#endif // USE_GREENTEA } // Initialize GreenTea devices @@ -380,8 +385,8 @@ void Caffe::SetDevices(std::vector device_ids) { typedef std::vector platforms_type; platforms_type platforms = viennacl::ocl::get_platforms(); - std::vector> platform_devices; + std::vector< std::tuple > platform_devices; // Loop through devices for (std::size_t platform_id = 0; platform_id < platforms.size(); @@ -389,46 +394,32 @@ void Caffe::SetDevices(std::vector device_ids) { typedef std::vector devices_type; try { devices_type devices = platforms[platform_id].devices( - CL_DEVICE_TYPE_ALL); - for (std::size_t device_id = 0; device_id < devices.size(); - ++device_id) { + CL_DEVICE_TYPE_ALL); + for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { platform_devices.push_back( std::make_tuple(platforms[platform_id], devices[device_id])); // Check if this device is really used and initialize - bool is_used = false; for (int i = 0; i < device_ids.size(); ++i) { int device_id = device_ids[i]; if (device_id == cuda_device_count + greentea_device_count) { // Setup actual context and compile kernels for this device viennacl::ocl::setup_context( - device_id, std::get<1>( - platform_devices[greentea_device_count])); - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - static_cast(device_id)); - viennacl::ocl::program & program = RegisterKernels(&ctx); - Get(true).ocl_programs_.push_back(program); - // viennacl::ocl::switch_context(device_id); - // viennacl::ocl::switch_device(std::get<1> - // (platform_devices[device_id - cuda_device_count])); - - // Add defined number of queues - for (int q = 0; q < GREENTEA_QUEUE_COUNT - 1; ++q) { - ctx.add_queue(ctx.current_device()); - } + device_id, + std::get<1>(platform_devices[greentea_device_count])); + shared_ptr device( - new DeviceContext(cuda_device_count + greentea_device_count, - initcount, Backend::BACKEND_OpenCL)); - Get(true).device_contexts_.emplace_back(device); - Caffe::GetDeviceContext(initcount)->Init(); + new DeviceContext(device_id, + initcount, Backend::BACKEND_OpenCL)); + Get().device_contexts_.emplace_back(device); + device->Init(); + ++initcount; + } else { + // Temporary until device abstraction is done + shared_ptr device(new DeviceContext()); + Get().device_contexts_.emplace_back(device); ++initcount; - is_used = true; } } - // Device not used, dummy - if (!is_used) { - viennacl::ocl::program program; - Get(true).ocl_programs_.push_back(program); - } greentea_device_count++; } } catch (...) { @@ -442,7 +433,10 @@ void Caffe::SetDevices(std::vector device_ids) { #ifdef USE_GREENTEA viennacl::ocl::program & Caffe::GetDeviceProgram(int id) { - return id == -1 ? Get().default_ocl_program_ : Get().ocl_programs_[id]; + return + id == -1 ? + Get().default_device_context_->program() : + Get().GetDeviceContext(id)->program(); } #endif // USE_GREENTEA diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index 341fb53c481..1a17d96a2b9 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -10,6 +10,10 @@ #include "caffe/greentea/greentea.hpp" #include "caffe/util/device_alternate.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/cl_kernels.hpp" +#endif // USE_GREENTEA + namespace caffe { DeviceContext::DeviceContext() @@ -32,13 +36,21 @@ void DeviceContext::Init() { #endif // USE_CUDA } else { #ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context(id_); + std::vector temp(3); - clGetDeviceInfo(viennacl::ocl::get_context(id_).devices()[0].id(), + clGetDeviceInfo(ctx.devices()[0].id(), CL_DEVICE_MAX_WORK_ITEM_SIZES, sizeof(size_t), &temp[0], NULL); workgroup_sizes_[0] = temp[0]; workgroup_sizes_[1] = temp[1]; workgroup_sizes_[2] = temp[2]; + + SetProgram(); + + for (int q = 0; q < GREENTEA_QUEUE_COUNT - 1; ++q) { + ctx.add_queue(ctx.devices()[0]); + } #endif // USE_GREENTEA } #endif // !CPU_ONLY @@ -116,16 +128,15 @@ void DeviceContext::FinishQueues() { #ifdef USE_CUDA #endif // USE_CUDA } else { - #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(id_); +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context(id_); for (int i = 0; i < num_queues(); ++i) { - ctx.switch_queue(i); + ctx.switch_queue(0); ctx.get_queue().finish(); } ctx.switch_queue(0); current_queue_id_ = 0; - #endif // USE_GREENTEA +#endif // USE_GREENTEA } } @@ -152,4 +163,15 @@ void DeviceContext::ResetPeakMemoryUsage() { peak_memory_usage_ = memory_usage_; } +#ifdef USE_GREENTEA +viennacl::ocl::program &DeviceContext::program() { + return ocl_program_; + } + +void DeviceContext::SetProgram() { + ocl_program_ = RegisterKernels(&(viennacl::ocl::get_context( + static_cast(id_)))); +} +#endif // USE_GREENTEA + } // namespace caffe diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 71f7b2cd35c..d7b000ebca2 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -63,6 +63,7 @@ void greentea_memset(const int ctx_id, const size_t N, const int alpha, cl_mem X, const int offX) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + // OpenCL Version >= 1.2 approach // clEnqueueFillBuffer(ctx.get_queue().handle().get(), X, &alpha, sizeof(int), // offX, N, 0, NULL, NULL); diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index 3c5f03c4715..fb6df8cf17b 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -40,7 +40,7 @@ int main(int argc, char** argv) { } cout << "Setting to use device " << device << endl; Caffe::SetDevices(std::vector{device}); - Caffe::SetDevice(0); + Caffe::SetDevice(device); #endif // invoke the test. return RUN_ALL_TESTS(); diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 9e51818620d..f6769b906ad 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -114,7 +114,7 @@ int device_query() { get_gpus(&gpus); Caffe::SetDevices(gpus); for (int i = 0; i < gpus.size(); ++i) { - caffe::Caffe::SetDevice(i); + caffe::Caffe::SetDevice(gpus[i]); caffe::Caffe::DeviceQuery(); } #endif // !CPU_ONLY @@ -176,7 +176,7 @@ int train() { solver_param.set_device_id(gpus[0]); // Initialize the first device - Caffe::SetDevice(0); + Caffe::SetDevice(gpus[0]); Caffe::set_mode(Caffe::GPU); Caffe::set_solver_count(gpus.size()); #endif // !CPU_ONLY @@ -195,7 +195,7 @@ int train() { caffe::P2PSync sync(solver, NULL, solver->param()); std::vector devices; for (int i = 0; i < gpus.size(); ++i) { - devices.push_back(Caffe::Get(true).GetDeviceContext(i)); + devices.push_back(Caffe::Get().GetDeviceContext(i)); } sync.run(devices); } else { @@ -221,7 +221,7 @@ int test() { LOG(INFO) << "Use GPU with device ID " << gpus[0]; Caffe::SetDevices(gpus); Caffe::set_mode(Caffe::GPU); - Caffe::SetDevice(0); + Caffe::SetDevice(gpus[0]); #endif // !CPU_ONLY } else { LOG(INFO) << "Use CPU."; @@ -291,7 +291,7 @@ int time() { LOG(INFO) << "Use GPU with device ID " << gpus[0]; Caffe::SetDevices(gpus); Caffe::set_mode(Caffe::GPU); - Caffe::SetDevice(0); + Caffe::SetDevice(gpus[0]); #endif // !CPU_ONLY } else { LOG(INFO) << "Use CPU."; From 2806b26006f66a92916427c35cd2710fcad08a6f Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 16 Aug 2015 15:01:12 +0200 Subject: [PATCH 165/254] Python GPU device initialization compability fix. --- src/caffe/common.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 4018c6e6358..856ee3b7bdd 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -441,6 +441,13 @@ viennacl::ocl::program & Caffe::GetDeviceProgram(int id) { #endif // USE_GREENTEA void Caffe::SetDevice(const int device_id) { + // Fix for compability to python and other interfaces that do not + // know or call SetDevices directly + if (Get().device_contexts_.size() == 0) { + // No device has been initialized so far + Caffe::SetDevices(std::vector { device_id }); + } + Get().default_device_context_ = GetDeviceContext(device_id); if (Get().default_device_context_->backend() == Backend::BACKEND_CUDA) { From e8eaeecc45018d4855b3018571dab650f30c1e33 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 17 Aug 2015 03:43:45 +0200 Subject: [PATCH 166/254] cuDNN compability fix. --- src/caffe/layer_factory.cpp | 2 +- src/caffe/test/test_pooling_layer.cpp | 42 +++++++++++++-------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 5c4262293d6..1fb19d4cffb 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -66,7 +66,7 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { PoolingParameter p_param = param.pooling_param(); - if (p_param.pad() || p_param.pad_h() || p_param.pad_w() || + if (p_param.pad(0) || p_param.pad_h() || p_param.pad_w() || param.top_size() > 1) { LOG(INFO) << "CUDNN does not support padding or multiple tops. " << "Using Caffe's own pooling layer."; diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 4a43772f9d3..89dd6060c3e 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -417,8 +417,8 @@ TYPED_TEST(PoolingLayerTest, TestSetupGlobalPooling) { /* TYPED_TEST(PoolingLayerTest, PrintBackward) { LayerParameter layer_param; - layer_param.set_kernelsize(3); - layer_param.set_stride(2); + layer_param.add_kernel_size(3); + layer_param.add_stride(2); layer_param.set_pool(LayerParameter_PoolMethod_MAX); PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -638,7 +638,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { void TestForwardSquare() { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(2); + pooling_param->add_kernel_size(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); const int num = 2; const int channels = 2; @@ -966,8 +966,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); CuDNNPoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); @@ -981,9 +981,9 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); - pooling_param->set_pad(1); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); + pooling_param->add_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); CuDNNPoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -1047,9 +1047,9 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); + pooling_param->add_stride(2); // currenty, cuDNN pooling does not support padding - pooling_param->set_pad(0); + pooling_param->add_pad(0); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); @@ -1064,9 +1064,9 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(2); - pooling_param->set_pad(2); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(2); + pooling_param->add_pad(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); this->blob_bottom_->Reshape(1, 1, 3, 3); // Input: @@ -1114,8 +1114,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); - pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + pooling_param->add_stride(2); + pooling_param->add_pool(PoolingParameter_PoolMethod_MAX); this->blob_top_vec_.push_back(this->blob_top_mask_); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); @@ -1131,11 +1131,11 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->set_kernel_size(3); - pooling_param->set_stride(1); + pooling_param->add_kernel_size(3); + pooling_param->add_stride(1); // Currently, cuDNN pooling does not support padding, so we use // a simplified version of this test. - pooling_param->set_pad(0); + pooling_param->add_pad(0); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); this->blob_bottom_->Reshape(1, 1, 3, 3); FillerParameter filler_param; @@ -1162,7 +1162,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); + pooling_param->add_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); @@ -1181,8 +1181,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); pooling_param->set_kernel_w(kernel_w); - pooling_param->set_stride(2); - pooling_param->set_pad(2); + pooling_param->add_stride(2); + pooling_param->add_pad(2); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); From ced83da0a8e494aa9caf662d1f61a23c61bfb572 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 20 Aug 2015 01:40:27 +0200 Subject: [PATCH 167/254] Fix includes for Greentea build with CMake. --- include/caffe/parallel.hpp | 4 ++++ src/caffe/parallel.cpp | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp index 74711ce1680..90b0d7c0ca5 100644 --- a/include/caffe/parallel.hpp +++ b/include/caffe/parallel.hpp @@ -1,6 +1,10 @@ #ifndef CAFFE_PARALLEL_HPP_ #define CAFFE_PARALLEL_HPP_ +#ifdef CMAKE_BUILD + #include "caffe_config.h" +#endif + #include #include diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index 0b1c1d3a6dc..6a0ddfabdb1 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -1,3 +1,7 @@ +#ifdef CMAKE_BUILD + #include "caffe_config.h" +#endif + #ifndef CPU_ONLY #ifdef USE_CUDA #include From 2d24b88aaa71ecf1938b0e155a1dc73a708294a4 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 20 Aug 2015 22:10:29 +0200 Subject: [PATCH 168/254] CPU only fix. --- include/caffe/common.hpp | 2 +- src/caffe/common.cpp | 5 +++-- src/caffe/device_context.cpp | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 62854f34b63..0c2812f3e99 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -203,8 +203,8 @@ class Caffe { // The shared ptrs are being referenced on every thread, // while the default device will be handled thread local static vector > device_contexts_; - DeviceContext* default_device_context_; shared_ptr cpu_device_context_; + DeviceContext* default_device_context_; int solver_count_; bool root_solver_; diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 856ee3b7bdd..f5baf0749ad 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -123,7 +123,7 @@ void Caffe::SelectDevice(DeviceContext* device_context) { #ifdef USE_CUDA CUDA_CHECK(cudaSetDevice(device_context->id())); #endif // USE_CUDA - } else { + } else if (device_context->backend() == Backend::BACKEND_OpenCL) { #ifdef USE_GREENTEA #endif // USE_GREENTEA @@ -193,8 +193,9 @@ Caffe::Caffe() curand_generator_(NULL), #endif // USE_CUDA random_generator_(), - mode_(Caffe::CPU), default_device_context_(nullptr), + mode_(Caffe::CPU), cpu_device_context_(new DeviceContext(-1, -1, Backend::BACKEND_CPU)), + default_device_context_(cpu_device_context_.get()), solver_count_(1), root_solver_(true) { // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index 1a17d96a2b9..742bd5b56fa 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -18,7 +18,7 @@ namespace caffe { DeviceContext::DeviceContext() : current_queue_id_(0), workgroup_sizes_(3, 0), id_(0), - list_id_(0), backend_(Backend::BACKEND_CUDA), + list_id_(0), backend_(Backend::BACKEND_CPU), memory_usage_(0), peak_memory_usage_(0) { } From e828cf27616772b119897830df49eb5638568a03 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 21 Aug 2015 00:44:27 +0200 Subject: [PATCH 169/254] Critical bugfix with OpenCL queues. --- src/caffe/device_context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index 742bd5b56fa..14b40d61628 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -131,7 +131,7 @@ void DeviceContext::FinishQueues() { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(id_); for (int i = 0; i < num_queues(); ++i) { - ctx.switch_queue(0); + ctx.switch_queue(i); ctx.get_queue().finish(); } ctx.switch_queue(0); From f84c2a4fb8d633bc7d8fc9771eb06a3cf2215212 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sun, 23 Aug 2015 02:38:03 +0200 Subject: [PATCH 170/254] Catching up with BVLC master. --- include/caffe/solver.hpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 588c502f22a..99694f5cec8 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -10,9 +10,6 @@ namespace caffe { /** -<<<<<<< HEAD - * @brief An interface for classes that perform optimization on Nets. -======= * @brief Enumeration of actions that a client of the Solver may request by * implementing the Solver's action request function, which a * a client may optionally provide in order to request early termination @@ -36,7 +33,6 @@ typedef boost::function ActionCallback; /** * @brief An interface for classes that perform optimization on Net%s. ->>>>>>> 0dfc5dac3d8bf17f833e21ae6ce7bc3ea19a03fa * * Requires implementation of ApplyUpdate to compute a parameter update * given the current state of the Net parameters. @@ -124,9 +120,6 @@ class Solver { // in data parallelism const Solver* const root_solver_; -<<<<<<< HEAD -DISABLE_COPY_AND_ASSIGN(Solver); -======= // A function that can be set by a client of the Solver to provide indication // that it wants a snapshot saved and/or to exit early. ActionCallback action_request_function_; @@ -135,7 +128,6 @@ DISABLE_COPY_AND_ASSIGN(Solver); bool requested_early_exit_; DISABLE_COPY_AND_ASSIGN(Solver); ->>>>>>> 0dfc5dac3d8bf17f833e21ae6ce7bc3ea19a03fa }; From ac9d92a97a87d96ce178dadc803144dfb7c80fb0 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 26 Aug 2015 01:06:00 +0200 Subject: [PATCH 171/254] Synchronization bugfix. --- src/caffe/layers/conv_layer.cu | 2 ++ tools/caffe.cpp | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 5e35c454e41..5c4e17df5be 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -65,6 +65,8 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { + // Multi queue execution, all previous work needs to be done first + this->device_context_->FinishQueues(); for (int n = 0; n < this->num_; ++n) { // Multi queue execution, go through work queues this->device_context_->SwitchQueue(n); diff --git a/tools/caffe.cpp b/tools/caffe.cpp index e1909fe74b6..fa3ee5c74c0 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -364,7 +364,7 @@ int time() { for (int i = 0; i < layers.size(); ++i) { timer.Start(); layers[i]->Forward(bottom_vecs[i], top_vecs[i]); - Caffe::Synchronize(0); + Caffe::Synchronize(Caffe::GetDefaultDeviceContext()->id()); forward_time_per_layer[i] += timer.MicroSeconds(); } forward_time += forward_timer.MicroSeconds(); @@ -373,7 +373,7 @@ int time() { timer.Start(); layers[i]->Backward(top_vecs[i], bottom_need_backward[i], bottom_vecs[i]); - Caffe::Synchronize(0); + Caffe::Synchronize(Caffe::GetDefaultDeviceContext()->id()); backward_time_per_layer[i] += timer.MicroSeconds(); } backward_time += backward_timer.MicroSeconds(); From eb2bf8daa987c2ef5c6a51739a13a342119d5be5 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 26 Aug 2015 03:08:57 +0200 Subject: [PATCH 172/254] Embed layer OpenCL preparations. Will still fail during runtest. OpenCL kernels missing. --- src/caffe/layers/embed_layer.cu | 72 +++++++++++++++++++++++++---- src/caffe/proto/caffe.proto | 2 +- src/caffe/test/test_embed_layer.cpp | 4 -- 3 files changed, 64 insertions(+), 14 deletions(-) diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu index 672fb9c608c..4081210cd45 100644 --- a/src/caffe/layers/embed_layer.cu +++ b/src/caffe/layers/embed_layer.cu @@ -5,11 +5,20 @@ #include "caffe/common_layers.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" +#ifdef USE_CUDA #include "caffe/util/gpu_util.cuh" +#endif // USE_CUDA #include "caffe/util/math_functions.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + + namespace caffe { +#ifdef USE_CUDA template __global__ void EmbedForward(const int nthreads, const Dtype* bottom_data, const Dtype* weight, const int M, const int N, const int K, @@ -40,6 +49,7 @@ __global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data, caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index); } } +#endif // USE_CUDA template void EmbedLayer::Forward_gpu(const vector*>& bottom, @@ -48,14 +58,33 @@ void EmbedLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const int count = top[0]->count(); - EmbedForward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - count, bottom_data, weight, M_, N_, K_, top_data); - if (bias_term_) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1), - bias_multiplier_.gpu_data(), - this->blobs_[1]->gpu_data(), Dtype(1), top_data); - } + + if(this->device_context()->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + + EmbedForward // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, weight, M_, N_, K_, top_data); + if (bias_term_) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1), + bias_multiplier_.gpu_data(), + this->blobs_[1]->gpu_data(), Dtype(1), top_data); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + // TODO: Implement OpenCL kernel + + if (bias_term_) { + greentea_gpu_gemm(this->device_context()->id(), CblasNoTrans, + CblasNoTrans, M_, N_, 1, Dtype(1), + (cl_mem) (bias_multiplier_.gpu_data()), 0, + (cl_mem) (this->blobs_[1]->gpu_data()), 0, + Dtype(1), (cl_mem) top_data, 0); + } + +#endif // USE_GREENTEA + } } template @@ -68,18 +97,43 @@ void EmbedLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + + if(this->device_context()->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA EmbedBackward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( + CUDA_KERNEL(CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS)( top_count, bottom_data, top_diff, M_, N_, K_, weight_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + // TODO: Implement OpenCL kernel +#endif // USE_GREENTEA + } + } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); + + if(this->device_context()->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA caffe_gpu_gemv(CblasTrans, M_, N_, Dtype(1), top_diff, bias_multiplier_.gpu_data(), Dtype(1), bias_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_gemv(this->device_context()->id(), CblasTrans, M_, N_, + Dtype(1), (cl_mem) top_diff, 0, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + Dtype(1), (cl_mem) bias_diff, 0); +#endif // USE_GREENTEA + } + + } } INSTANTIATE_LAYER_GPU_FUNCS(EmbedLayer); } // namespace caffe + diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 8f2300262db..56fb504a6cc 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -388,8 +388,8 @@ message LayerParameter { optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; - optional AffinityParameter affinity_param = 137; optional MergeCropParameter mergecrop_param = 138; + optional AffinityParameter affinity_param = 139; } // Message that stores parameters used to apply transformation diff --git a/src/caffe/test/test_embed_layer.cpp b/src/caffe/test/test_embed_layer.cpp index 7a4fb9800f2..577079867a7 100644 --- a/src/caffe/test/test_embed_layer.cpp +++ b/src/caffe/test/test_embed_layer.cpp @@ -13,10 +13,6 @@ namespace caffe { -#ifndef CPU_ONLY -extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; -#endif - template class EmbedLayerTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; From 78c4b813589607237a7bb7cdd4275f9f7e610339 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 26 Aug 2015 03:19:17 +0200 Subject: [PATCH 173/254] OpenCL/CPU fix. --- src/caffe/layers/embed_layer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/layers/embed_layer.cpp b/src/caffe/layers/embed_layer.cpp index be6b2cd2727..e4891f3f148 100644 --- a/src/caffe/layers/embed_layer.cpp +++ b/src/caffe/layers/embed_layer.cpp @@ -76,7 +76,7 @@ void EmbedLayer::Forward_cpu(const vector*>& bottom, DCHECK_GE(index, 0); DCHECK_LT(index, K_); DCHECK_EQ(static_cast(index), bottom_data[n]) << "non-integer input"; - caffe_copy(N_, weight + index * N_, top_data + n * N_); + caffe_cpu_copy(N_, weight + index * N_, top_data + n * N_); } if (bias_term_) { const Dtype* bias = this->blobs_[1]->cpu_data(); From 84757de6b16ba0947fee8901f6d57c9cc9f819ca Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 26 Aug 2015 03:21:18 +0200 Subject: [PATCH 174/254] Cleanup in embed_layer.cu. --- src/caffe/layers/embed_layer.cu | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu index 4081210cd45..74dc84991e5 100644 --- a/src/caffe/layers/embed_layer.cu +++ b/src/caffe/layers/embed_layer.cu @@ -32,11 +32,6 @@ __global__ void EmbedForward(const int nthreads, const Dtype* bottom_data, } } -template -__global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data, - const Dtype* top_diff, const int M, const int N, const int K, - Dtype* weight_diff); - template __global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data, const Dtype* top_diff, const int M, const int N, const int K, From 765f5b15f5a1dbf6fa21e292c88b95f159d16377 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 26 Aug 2015 16:16:08 +0200 Subject: [PATCH 175/254] EmbedLayer and TileLayer support in OpenCL. --- src/caffe/greentea/cl_headers/header.cl | 6 +++ src/caffe/greentea/cl_kernels.cpp | 12 ++++- src/caffe/greentea/cl_kernels/embed.cl | 68 +++++++++++++++++++++++++ src/caffe/greentea/cl_kernels/tile.cl | 39 ++++++++++++++ src/caffe/layers/embed_layer.cu | 26 +++++++++- src/caffe/layers/tile_layer.cu | 61 +++++++++++++++++++--- src/caffe/proto/caffe.proto | 4 +- 7 files changed, 204 insertions(+), 12 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/embed.cl create mode 100644 src/caffe/greentea/cl_kernels/tile.cl diff --git a/src/caffe/greentea/cl_headers/header.cl b/src/caffe/greentea/cl_headers/header.cl index 19c4325eaf6..ddf296b3a8a 100644 --- a/src/caffe/greentea/cl_headers/header.cl +++ b/src/caffe/greentea/cl_headers/header.cl @@ -15,6 +15,7 @@ #define CLK_LOCAL_MEM_FENCE #define Dtype float #define barrier(x) +#define atomic_cmpxchg(x, y, z) x #endif #define CONCAT(A,B) A##_##B @@ -28,3 +29,8 @@ #pragma OPENCL EXTENSION cl_amd_fp64 : enable #define DOUBLE_SUPPORT_AVAILABLE #endif + +#if defined(cl_khr_int64_base_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#define ATOMICS_64_AVAILABLE +#endif diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index e1e46692c24..6330346b861 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -5,8 +5,8 @@ #include #include namespace caffe { -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; // NOLINT -std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif"; // NOLINT +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT @@ -15,6 +15,7 @@ std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (Dtype == float)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#else\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#endif\n#endif\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT @@ -27,6 +28,7 @@ std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n const int tile_size, const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int n = index / tile_size / num_tiles / bottom_tile_axis;\n const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int tile_size,\n const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size) % bottom_tile_axis;\n const int n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT @@ -35,6 +37,7 @@ std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (Dtype == float)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#else\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#endif\n#endif\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT @@ -47,6 +50,7 @@ std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.c std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n const int tile_size, const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int n = index / tile_size / num_tiles / bottom_tile_axis;\n const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int tile_size,\n const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size) % bottom_tile_axis;\n const int n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; ss << header << "\n\n"; // NOLINT @@ -59,6 +63,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << contrastive_loss_float << "\n\n"; // NOLINT ss << dropout_float << "\n\n"; // NOLINT ss << eltwise_float << "\n\n"; // NOLINT + ss << embed_float << "\n\n"; // NOLINT ss << fillbuffer_float << "\n\n"; // NOLINT ss << im2col_float << "\n\n"; // NOLINT ss << im2col_nd_float << "\n\n"; // NOLINT @@ -71,6 +76,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << pooling_sk_float << "\n\n"; // NOLINT ss << slice_float << "\n\n"; // NOLINT ss << softmax_loss_float << "\n\n"; // NOLINT + ss << tile_float << "\n\n"; // NOLINT ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; // NOLINT ss << "#undef Dtype" << "\n\n"; // NOLINT ss << "#define Dtype double" << "\n\n"; // NOLINT @@ -82,6 +88,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << contrastive_loss_double << "\n\n"; // NOLINT ss << dropout_double << "\n\n"; // NOLINT ss << eltwise_double << "\n\n"; // NOLINT + ss << embed_double << "\n\n"; // NOLINT ss << fillbuffer_double << "\n\n"; // NOLINT ss << im2col_double << "\n\n"; // NOLINT ss << im2col_nd_double << "\n\n"; // NOLINT @@ -94,6 +101,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << pooling_sk_double << "\n\n"; // NOLINT ss << slice_double << "\n\n"; // NOLINT ss << softmax_loss_double << "\n\n"; // NOLINT + ss << tile_double << "\n\n"; // NOLINT ss << "#endif" << "\n\n"; std::string kernel_string = ss.str(); const char* kernel_program = kernel_string.c_str(); diff --git a/src/caffe/greentea/cl_kernels/embed.cl b/src/caffe/greentea/cl_kernels/embed.cl new file mode 100644 index 00000000000..5303c95b282 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/embed.cl @@ -0,0 +1,68 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* weight, + const int M, const int N, + const int K, + __global Dtype* top_data) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + top_data[top_index] = weight[weight_index]; + } + } + +// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html +#if (Dtype == float) +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned int intVal; + Dtype floatVal; + } newVal; + union { + unsigned int intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#else +#ifdef ATOMICS_64_AVAILABLE +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned long intVal; + Dtype floatVal; + } newVal; + union { + unsigned long intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#endif +#endif + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} diff --git a/src/caffe/greentea/cl_kernels/tile.cl b/src/caffe/greentea/cl_kernels/tile.cl new file mode 100644 index 00000000000..0332503f6fe --- /dev/null +++ b/src/caffe/greentea/cl_kernels/tile.cl @@ -0,0 +1,39 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + + +__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, + const int tile_size, const int num_tiles, + const int bottom_tile_axis, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + + +__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int tile_size, + const int num_tiles, + const int bottom_tile_axis, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; + } + } +} diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu index 74dc84991e5..f10e17ac206 100644 --- a/src/caffe/layers/embed_layer.cu +++ b/src/caffe/layers/embed_layer.cu @@ -68,7 +68,18 @@ void EmbedLayer::Forward_gpu(const vector*>& bottom, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - // TODO: Implement OpenCL kernel + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + viennacl::ocl::kernel &oclk_embed = program.get_kernel( + CL_KERNEL_SELECT("embed_forward")); + viennacl::ocl::enqueue( + oclk_embed(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) weight, &ctx), M_, N_, K_, + WrapHandle((cl_mem) top_data, &ctx)), + ctx.get_queue()); if (bias_term_) { greentea_gpu_gemm(this->device_context()->id(), CblasNoTrans, @@ -101,7 +112,18 @@ void EmbedLayer::Backward_gpu(const vector*>& top, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - // TODO: Implement OpenCL kernel + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + viennacl::ocl::kernel &oclk_embed = program.get_kernel( + CL_KERNEL_SELECT("embed_backward")); + viennacl::ocl::enqueue( + oclk_embed(top_count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) top_diff, &ctx), M_, N_, K_, + WrapHandle((cl_mem) weight_diff, &ctx)), + ctx.get_queue()); #endif // USE_GREENTEA } diff --git a/src/caffe/layers/tile_layer.cu b/src/caffe/layers/tile_layer.cu index 7fd3bc47d0f..561e14bba72 100644 --- a/src/caffe/layers/tile_layer.cu +++ b/src/caffe/layers/tile_layer.cu @@ -4,8 +4,15 @@ #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + + namespace caffe { +#ifdef USE_CUDA template __global__ void Tile(const int nthreads, const Dtype* bottom_data, const int tile_size, const int num_tiles, const int bottom_tile_axis, @@ -18,6 +25,7 @@ __global__ void Tile(const int nthreads, const Dtype* bottom_data, top_data[index] = bottom_data[bottom_index]; } } +#endif // USE_CUDA template void TileLayer::Forward_gpu( @@ -26,11 +34,31 @@ void TileLayer::Forward_gpu( Dtype* top_data = top[0]->mutable_gpu_data(); const int bottom_tile_axis = bottom[0]->shape(axis_); const int nthreads = top[0]->count(); - Tile // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data); + if (this->device_context()->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + Tile // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + viennacl::ocl::kernel &oclk_tile = program.get_kernel( + CL_KERNEL_SELECT("tile")); + viennacl::ocl::enqueue( + oclk_tile(nthreads, WrapHandle((cl_mem) bottom_data, &ctx), inner_dim_, + tiles_, bottom_tile_axis, + WrapHandle((cl_mem) top_data, &ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } } +#ifdef USE_CUDA template __global__ void TileBackward(const int nthreads, const Dtype* top_diff, const int tile_size, const int num_tiles, const int bottom_tile_axis, @@ -47,6 +75,7 @@ __global__ void TileBackward(const int nthreads, const Dtype* top_diff, } } } +#endif // USE_CUDA template void TileLayer::Backward_gpu(const vector*>& top, @@ -57,9 +86,29 @@ void TileLayer::Backward_gpu(const vector*>& top, const int bottom_tile_axis = bottom[0]->shape(axis_); const int tile_size = inner_dim_ / bottom_tile_axis; const int nthreads = bottom[0]->count(); - TileBackward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff); + + if (this->device_context()->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + TileBackward // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( + nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + viennacl::ocl::kernel &oclk_tile = program.get_kernel( + CL_KERNEL_SELECT("tile_backward")); + viennacl::ocl::enqueue( + oclk_tile(nthreads, WrapHandle((cl_mem) top_diff, &ctx), tile_size, + tiles_, bottom_tile_axis, + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } } INSTANTIATE_LAYER_GPU_FUNCS(TileLayer); diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index cc751fbf491..2ab65b657eb 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -389,8 +389,8 @@ message LayerParameter { optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; - optional MergeCropParameter mergecrop_param = 138; - optional AffinityParameter affinity_param = 139; + optional MergeCropParameter mergecrop_param = 139; + optional AffinityParameter affinity_param = 140; } // Message that stores parameters used to apply transformation From 522bf16a365b494013d66a66909ee71115e154d9 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Wed, 26 Aug 2015 23:42:56 +0200 Subject: [PATCH 176/254] Cleanup. --- src/caffe/layers/embed_layer.cu | 12 +++--------- src/caffe/layers/mvn_layer.cu | 16 +++++++++------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu index f10e17ac206..31ee48dfbcc 100644 --- a/src/caffe/layers/embed_layer.cu +++ b/src/caffe/layers/embed_layer.cu @@ -53,8 +53,7 @@ void EmbedLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const int count = top[0]->count(); - - if(this->device_context()->backend() == BACKEND_CUDA) { + if (this->device_context()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA EmbedForward // NOLINT_NEXT_LINE(whitespace/operators) @@ -103,8 +102,7 @@ void EmbedLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - - if(this->device_context()->backend() == BACKEND_CUDA) { + if (this->device_context()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA EmbedBackward // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS)( @@ -126,13 +124,11 @@ void EmbedLayer::Backward_gpu(const vector*>& top, ctx.get_queue()); #endif // USE_GREENTEA } - } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - - if(this->device_context()->backend() == BACKEND_CUDA) { + if (this->device_context()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemv(CblasTrans, M_, N_, Dtype(1), top_diff, bias_multiplier_.gpu_data(), Dtype(1), bias_diff); @@ -145,8 +141,6 @@ void EmbedLayer::Backward_gpu(const vector*>& top, Dtype(1), (cl_mem) bias_diff, 0); #endif // USE_GREENTEA } - - } } diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index aa68e4df4a1..448b4635794 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -201,11 +201,13 @@ void MVNLayer::Backward_gpu(const vector*>& top, caffe_gpu_div(temp_.count(), bottom_diff, temp_.gpu_data(), bottom_diff); } else { caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, top_diff, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); + sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., - mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., - temp_.mutable_gpu_data()); - caffe_gpu_add(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff); + mean_.gpu_data(), sum_multiplier_.gpu_data(), 0., + temp_.mutable_gpu_data()); + caffe_gpu_add(temp_.count(), top_diff, temp_.gpu_data(), + bottom_diff); } #endif // USE_CUDA } else { @@ -269,9 +271,9 @@ void MVNLayer::Backward_gpu(const vector*>& top, (cl_mem) (mean_.gpu_data()), 0, (cl_mem) (sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_add(his->device_context_->id(), temp_.count(), - (cl_mem) top_diff, 0, (cl_mem) (temp_.gpu_data()), 0, - (cl_mem) (bottom_diff), 0); + greentea_gpu_add(this->device_context_->id(), temp_.count(), + (cl_mem) top_diff, 0, (cl_mem) (temp_.gpu_data()), + 0, (cl_mem) (bottom_diff), 0); } #endif // USE_GREENTEA } From cfbd0f69a62c5b656fff6b025925887aa60d65b5 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 27 Aug 2015 01:17:48 +0200 Subject: [PATCH 177/254] Device capability check for EmbedLayer (64 bit atomics). --- include/caffe/device_context.hpp | 2 + log.txt | 11916 ++++++++++++++++++++++ src/caffe/device_context.cpp | 52 +- src/caffe/greentea/cl_headers/header.cl | 2 + src/caffe/greentea/cl_kernels.cpp | 11 +- src/caffe/greentea/cl_kernels.sh | 3 + src/caffe/greentea/cl_kernels/embed.cl | 24 +- src/caffe/test/test_embed_layer.cpp | 12 + 8 files changed, 12000 insertions(+), 22 deletions(-) create mode 100644 log.txt diff --git a/include/caffe/device_context.hpp b/include/caffe/device_context.hpp index f10d4d77497..f095807d2b7 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device_context.hpp @@ -13,6 +13,7 @@ #endif #include +#include #include #include "caffe/blob.hpp" #include "caffe/greentea/greentea.hpp" @@ -51,6 +52,7 @@ class DeviceContext { void IncreaseMemoryUsage(size_t bytes); void DecreaseMemoryUsage(size_t bytes); void ResetPeakMemoryUsage(); + bool CheckCapability(std::string cap); private: int current_queue_id_; diff --git a/log.txt b/log.txt new file mode 100644 index 00000000000..948175bc454 --- /dev/null +++ b/log.txt @@ -0,0 +1,11916 @@ +Setting to use device 1 +Build Status = -2 ( Err = -42 ) +Log: ptxas application ptx input, line 10702; error : Call has wrong number of parameters +ptxas fatal : Ptx assembly aborted due to errors + +Sources: #ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define __constant +#define __local +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define get_local_id(x) 0 +#define get_local_size(x) 0 +#define FLT_MAX 0 +#define FLT_MIN 0 +#define cl_khr_fp64 +#define cl_amd_fp64 +#define DOUBLE_SUPPORT_AVAILABLE +#define CLK_LOCAL_MEM_FENCE +#define Dtype float +#define barrier(x) +#define atomic_cmpxchg(x, y, z) x +#endif + +#define CONCAT(A,B) A##_##B +#define TEMPLATE(name,type) CONCAT(name,type) + +#define TYPE_FLOAT 1 +#define TYPE_DOUBLE 2 + +#if defined(cl_khr_fp64) +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#elif defined(cl_amd_fp64) +#pragma OPENCL EXTENSION cl_amd_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#endif + +#if defined(cl_khr_int64_base_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#define ATOMICS_64_AVAILABLE +#endif + +#define Dtype float + +#define TYPE TYPE_FLOAT + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +__kernel void TEMPLATE(relu_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + } +} + +__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = tanh(in[index]); + } +} + +__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in, + __global Dtype* out, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + } +} + +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, + __global const Dtype* in_diff, const int in_diff_off, + __global const Dtype* in_data, const int in_data_off, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + } +} + +__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff) { + Dtype kBNLL_THRESHOLD = 50.; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); + } + out[index] = maxval; + } +} + +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, + const int channels, + const int spatial_dim, + __global const Dtype* channel_max, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, + const int channels, const int spatial_dim, + __global const Dtype* channel_sum, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data_1, + __global const Dtype* data_2, + __global Dtype* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, + const int forward, const int num_concats, + const int concat_size, + const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, + __global Dtype* out_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward == 1) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, + const Dtype margin, const int legacy_version, + const Dtype alpha, __global const Dtype* y, + __global const Dtype* diff, __global const Dtype* dist_sq, + __global Dtype *bottom_diff) { + for (int i = get_global_id(0); i < count; + i += get_global_size(0)) { + int n = i / channels; // the num index, to access y and dist_sq + if ((int)(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist = 0.0; + Dtype beta = 0.0; + if (legacy_version == 1) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + 1e-4) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, + __global const Dtype* in, + __global const unsigned int* mask, + const unsigned int threshold, + const Dtype scale, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; + } +} + +__kernel void TEMPLATE(dropout_backward,Dtype)( + const int n, __global const Dtype* in_diff, + __global const unsigned int* mask, const unsigned int threshold, + const Dtype scale, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(eltwise_max_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int blob_idx, + __global Dtype* top_data, + __global int* mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int blob_idx, + __global const int* mask, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* weight, + const int M, const int N, + const int K, + __global Dtype* top_data) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + top_data[top_index] = weight[weight_index]; + } + } + +// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html +#if (TYPE == TYPE_FLOAT) +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned int intVal; + Dtype floatVal; + } newVal; + union { + unsigned int intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#if (TYPE == TYPE_DOUBLE) +#ifdef ATOMICS_64_AVAILABLE +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned long intVal; + Dtype floatVal; + } newVal; + union { + unsigned long intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#endif + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_col, const int data_col_off) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col + data_col_off; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_im_off; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_im, const int data_im_off) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + int offset = data_col_off + + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index + data_im_off] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_size[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + // Old: + /*d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int coeff_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; + } + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; + } + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, + __global const Dtype* data_im, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_offset; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + (*data_col_ptr) = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} + +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, + __global const Dtype* data_col, + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, + const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_im, + const int data_offset) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; + } + } + + data_im[data_offset + index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, + __global const Dtype* in, + __global const Dtype* scale, + const Dtype negative_beta, + __global Dtype* out) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + __global Dtype* const scale) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* in_off = in + offset; + __global Dtype* scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* top_data, + __global const Dtype* scale, + __global const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* bottom_off = bottom_data + offset; + __global const Dtype* top_off = top_data + offset; + __global const Dtype* scale_off = scale + offset; + __global Dtype* top_diff_off = top_diff + offset; + __global Dtype* bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] * b[index + offb]; + } +} + +__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] / b[index + offb]; + } +} + +__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__global Dtype* Y, + const int offY) { + for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + Y[offY + index] += alpha; + } +} + +__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] + b[offb + index]; + } +} + +__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] - b[offb + index]; + } +} + +__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = fabs((Dtype)(a[offa + index])); + } +} + +__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = exp(a[offa + index]); + } +} + +__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = log(a[offa + index]); + } +} + +__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, + const int offa, Dtype alpha, + __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + if(alpha == 2.0) { + y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); + } else { + y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha); + } + } +} + +__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = (0.0 < x[index + offx]) + - (x[index + offx] < 0.0); + } +} + +__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = signbit(x[index + offx]); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(merge_copy_forward, Dtype)( + const int nthreads, __global const Dtype* bottom_a, const int forward_a, + __global const Dtype* bottom_b, const int forward_b, + __global Dtype* top, + int num, int channels_a, int channels_b, int height_a, int width_a, + int height_b, int width_b) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = forward_a == 1 ? bottom_a[aidx] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + top[index] = forward_b == 1 ? bottom_b[bidx] : 0; + } + } + +} + +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, +__global Dtype* bottom_a, + int backward_a, + __global Dtype* bottom_b, + int backward_b, + __global const Dtype* top, + int num, int channels_a, + int channels_b, int height_a, + int width_a, int height_b, + int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = backward_a == 1 ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b == 1 ? top[index] : 0; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, __global int* mask, __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_slice[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(ave_pool_forward,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_train,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* rand_idx, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + offset; + if (use_mask == 1) { + __global const int* mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + __global const Dtype* top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* const top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(sto_pool_backward,Dtype)( + const int nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* rand_idx_slice = rand_idx + + (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] + * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* bottom_data, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* top_data, + const int use_mask, + __global int* mask, __global Dtype* top_mask) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (use_mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* bottom_diff) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (use_mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__global Dtype* bottom_data, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, + __global int* mask, + __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, (int) 0); + wstart = max(wstart, (int) 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global Dtype* bottom_data_ptr = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data_ptr[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data_ptr[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(max_pool_backward_sk,Dtype)( + const int nthreads, __global const Dtype* top_diff, const int use_mask, + __global const int* mask, __global const Dtype* top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* bottom_diff) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + __global const int* mask_ptr = mask; + __global const Dtype* top_diff_ptr = top_diff; + +// find out the local index +// find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff_ptr += offset; + if (use_mask == 1) { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask_ptr[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } else { + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data_ptr[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, __global Dtype* rand_idx, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data_ptr[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + cumvalues += bottom_data_ptr[h * width + w] + * bottom_data_ptr[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } + +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(slice,Dtype)(const int nthreads, + __global const Dtype* in_data, + const int forward, const int num_slices, + const int slice_size, + const int bottom_slice_axis, + const int top_slice_axis, + const int offset_slice_axis, + __global Dtype* out_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward == 1) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(softmax_loss_forward,Dtype)( + int n, __global const Dtype* prob_data, __global const Dtype* label, + __global Dtype* loss, + const int num, const int dim, const int spatial_dim, + const int has_ignore_label_, const int ignore_label_, + __global Dtype* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype) FLT_MIN)); + counts[index] = 1; + } + } +} + +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, + __global const Dtype* top, + __global const Dtype* label, + __global Dtype* bottom_diff, + const int num, + const int dim, + const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global Dtype* counts) { + + const int channels = dim / spatial_dim; + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + + +__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, + const int tile_size, const int num_tiles, + const int bottom_tile_axis, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + + +__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int tile_size, + const int num_tiles, + const int bottom_tile_axis, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; + } + } +} + +#ifdef DOUBLE_SUPPORT_AVAILABLE + +#undef Dtype + +#define Dtype double + +#undef TYPE + +#define TYPE TYPE_DOUBLE + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +__kernel void TEMPLATE(relu_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + } +} + +__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = tanh(in[index]); + } +} + +__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in, + __global Dtype* out, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + } +} + +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, + __global const Dtype* in_diff, const int in_diff_off, + __global const Dtype* in_data, const int in_data_off, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + } +} + +__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff) { + Dtype kBNLL_THRESHOLD = 50.; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); + } + out[index] = maxval; + } +} + +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, + const int channels, + const int spatial_dim, + __global const Dtype* channel_max, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, + const int channels, const int spatial_dim, + __global const Dtype* channel_sum, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data_1, + __global const Dtype* data_2, + __global Dtype* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, + const int forward, const int num_concats, + const int concat_size, + const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, + __global Dtype* out_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward == 1) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, + const Dtype margin, const int legacy_version, + const Dtype alpha, __global const Dtype* y, + __global const Dtype* diff, __global const Dtype* dist_sq, + __global Dtype *bottom_diff) { + for (int i = get_global_id(0); i < count; + i += get_global_size(0)) { + int n = i / channels; // the num index, to access y and dist_sq + if ((int)(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist = 0.0; + Dtype beta = 0.0; + if (legacy_version == 1) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + 1e-4) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, + __global const Dtype* in, + __global const unsigned int* mask, + const unsigned int threshold, + const Dtype scale, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; + } +} + +__kernel void TEMPLATE(dropout_backward,Dtype)( + const int n, __global const Dtype* in_diff, + __global const unsigned int* mask, const unsigned int threshold, + const Dtype scale, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(eltwise_max_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int blob_idx, + __global Dtype* top_data, + __global int* mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int blob_idx, + __global const int* mask, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* weight, + const int M, const int N, + const int K, + __global Dtype* top_data) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + top_data[top_index] = weight[weight_index]; + } + } + +// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html +#if (TYPE == TYPE_FLOAT) +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned int intVal; + Dtype floatVal; + } newVal; + union { + unsigned int intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#if (TYPE == TYPE_DOUBLE) +#ifdef ATOMICS_64_AVAILABLE +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned long intVal; + Dtype floatVal; + } newVal; + union { + unsigned long intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#endif + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_col, const int data_col_off) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col + data_col_off; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_im_off; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_im, const int data_im_off) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + int offset = data_col_off + + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index + data_im_off] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_size[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + // Old: + /*d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int coeff_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; + } + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; + } + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, + __global const Dtype* data_im, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_offset; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + (*data_col_ptr) = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} + +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, + __global const Dtype* data_col, + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, + const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_im, + const int data_offset) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; + } + } + + data_im[data_offset + index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, + __global const Dtype* in, + __global const Dtype* scale, + const Dtype negative_beta, + __global Dtype* out) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + __global Dtype* const scale) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* in_off = in + offset; + __global Dtype* scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* top_data, + __global const Dtype* scale, + __global const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* bottom_off = bottom_data + offset; + __global const Dtype* top_off = top_data + offset; + __global const Dtype* scale_off = scale + offset; + __global Dtype* top_diff_off = top_diff + offset; + __global Dtype* bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] * b[index + offb]; + } +} + +__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] / b[index + offb]; + } +} + +__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__global Dtype* Y, + const int offY) { + for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + Y[offY + index] += alpha; + } +} + +__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] + b[offb + index]; + } +} + +__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] - b[offb + index]; + } +} + +__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = fabs((Dtype)(a[offa + index])); + } +} + +__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = exp(a[offa + index]); + } +} + +__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = log(a[offa + index]); + } +} + +__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, + const int offa, Dtype alpha, + __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + if(alpha == 2.0) { + y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); + } else { + y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha); + } + } +} + +__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = (0.0 < x[index + offx]) + - (x[index + offx] < 0.0); + } +} + +__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = signbit(x[index + offx]); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(merge_copy_forward, Dtype)( + const int nthreads, __global const Dtype* bottom_a, const int forward_a, + __global const Dtype* bottom_b, const int forward_b, + __global Dtype* top, + int num, int channels_a, int channels_b, int height_a, int width_a, + int height_b, int width_b) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = forward_a == 1 ? bottom_a[aidx] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + top[index] = forward_b == 1 ? bottom_b[bidx] : 0; + } + } + +} + +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, +__global Dtype* bottom_a, + int backward_a, + __global Dtype* bottom_b, + int backward_b, + __global const Dtype* top, + int num, int channels_a, + int channels_b, int height_a, + int width_a, int height_b, + int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = backward_a == 1 ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b == 1 ? top[index] : 0; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, __global int* mask, __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_slice[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(ave_pool_forward,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_train,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* rand_idx, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + offset; + if (use_mask == 1) { + __global const int* mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + __global const Dtype* top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* const top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(sto_pool_backward,Dtype)( + const int nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* rand_idx_slice = rand_idx + + (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] + * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* bottom_data, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* top_data, + const int use_mask, + __global int* mask, __global Dtype* top_mask) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (use_mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* bottom_diff) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (use_mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__global Dtype* bottom_data, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, + __global int* mask, + __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, (int) 0); + wstart = max(wstart, (int) 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global Dtype* bottom_data_ptr = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data_ptr[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data_ptr[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(max_pool_backward_sk,Dtype)( + const int nthreads, __global const Dtype* top_diff, const int use_mask, + __global const int* mask, __global const Dtype* top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* bottom_diff) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + __global const int* mask_ptr = mask; + __global const Dtype* top_diff_ptr = top_diff; + +// find out the local index +// find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff_ptr += offset; + if (use_mask == 1) { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask_ptr[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } else { + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data_ptr[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, __global Dtype* rand_idx, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data_ptr[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + cumvalues += bottom_data_ptr[h * width + w] + * bottom_data_ptr[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } + +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(slice,Dtype)(const int nthreads, + __global const Dtype* in_data, + const int forward, const int num_slices, + const int slice_size, + const int bottom_slice_axis, + const int top_slice_axis, + const int offset_slice_axis, + __global Dtype* out_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward == 1) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(softmax_loss_forward,Dtype)( + int n, __global const Dtype* prob_data, __global const Dtype* label, + __global Dtype* loss, + const int num, const int dim, const int spatial_dim, + const int has_ignore_label_, const int ignore_label_, + __global Dtype* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype) FLT_MIN)); + counts[index] = 1; + } + } +} + +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, + __global const Dtype* top, + __global const Dtype* label, + __global Dtype* bottom_diff, + const int num, + const int dim, + const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global Dtype* counts) { + + const int channels = dim / spatial_dim; + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + + +__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, + const int tile_size, const int num_tiles, + const int bottom_tile_axis, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + + +__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int tile_size, + const int num_tiles, + const int bottom_tile_axis, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; + } + } +} + +#endif + + +Build Status = -2 ( Err = -42 ) +Log: ptxas application ptx input, line 10702; error : Call has wrong number of parameters +ptxas fatal : Ptx assembly aborted due to errors + +Sources: #ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define __constant +#define __local +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define get_local_id(x) 0 +#define get_local_size(x) 0 +#define FLT_MAX 0 +#define FLT_MIN 0 +#define cl_khr_fp64 +#define cl_amd_fp64 +#define DOUBLE_SUPPORT_AVAILABLE +#define CLK_LOCAL_MEM_FENCE +#define Dtype float +#define barrier(x) +#define atomic_cmpxchg(x, y, z) x +#endif + +#define CONCAT(A,B) A##_##B +#define TEMPLATE(name,type) CONCAT(name,type) + +#define TYPE_FLOAT 1 +#define TYPE_DOUBLE 2 + +#if defined(cl_khr_fp64) +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#elif defined(cl_amd_fp64) +#pragma OPENCL EXTENSION cl_amd_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#endif + +#if defined(cl_khr_int64_base_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#define ATOMICS_64_AVAILABLE +#endif + +#define Dtype float + +#define TYPE TYPE_FLOAT + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +__kernel void TEMPLATE(relu_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + } +} + +__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = tanh(in[index]); + } +} + +__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in, + __global Dtype* out, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + } +} + +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, + __global const Dtype* in_diff, const int in_diff_off, + __global const Dtype* in_data, const int in_data_off, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + } +} + +__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff) { + Dtype kBNLL_THRESHOLD = 50.; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); + } + out[index] = maxval; + } +} + +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, + const int channels, + const int spatial_dim, + __global const Dtype* channel_max, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, + const int channels, const int spatial_dim, + __global const Dtype* channel_sum, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data_1, + __global const Dtype* data_2, + __global Dtype* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, + const int forward, const int num_concats, + const int concat_size, + const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, + __global Dtype* out_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward == 1) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, + const Dtype margin, const int legacy_version, + const Dtype alpha, __global const Dtype* y, + __global const Dtype* diff, __global const Dtype* dist_sq, + __global Dtype *bottom_diff) { + for (int i = get_global_id(0); i < count; + i += get_global_size(0)) { + int n = i / channels; // the num index, to access y and dist_sq + if ((int)(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist = 0.0; + Dtype beta = 0.0; + if (legacy_version == 1) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + 1e-4) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, + __global const Dtype* in, + __global const unsigned int* mask, + const unsigned int threshold, + const Dtype scale, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; + } +} + +__kernel void TEMPLATE(dropout_backward,Dtype)( + const int n, __global const Dtype* in_diff, + __global const unsigned int* mask, const unsigned int threshold, + const Dtype scale, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(eltwise_max_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int blob_idx, + __global Dtype* top_data, + __global int* mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int blob_idx, + __global const int* mask, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* weight, + const int M, const int N, + const int K, + __global Dtype* top_data) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + top_data[top_index] = weight[weight_index]; + } + } + +// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html +#if (TYPE == TYPE_FLOAT) +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned int intVal; + Dtype floatVal; + } newVal; + union { + unsigned int intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#if (TYPE == TYPE_DOUBLE) +#ifdef ATOMICS_64_AVAILABLE +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned long intVal; + Dtype floatVal; + } newVal; + union { + unsigned long intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#endif + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_col, const int data_col_off) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col + data_col_off; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_im_off; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_im, const int data_im_off) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + int offset = data_col_off + + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index + data_im_off] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_size[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + // Old: + /*d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int coeff_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; + } + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; + } + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, + __global const Dtype* data_im, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_offset; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + (*data_col_ptr) = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} + +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, + __global const Dtype* data_col, + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, + const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_im, + const int data_offset) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; + } + } + + data_im[data_offset + index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, + __global const Dtype* in, + __global const Dtype* scale, + const Dtype negative_beta, + __global Dtype* out) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + __global Dtype* const scale) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* in_off = in + offset; + __global Dtype* scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* top_data, + __global const Dtype* scale, + __global const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* bottom_off = bottom_data + offset; + __global const Dtype* top_off = top_data + offset; + __global const Dtype* scale_off = scale + offset; + __global Dtype* top_diff_off = top_diff + offset; + __global Dtype* bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] * b[index + offb]; + } +} + +__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] / b[index + offb]; + } +} + +__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__global Dtype* Y, + const int offY) { + for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + Y[offY + index] += alpha; + } +} + +__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] + b[offb + index]; + } +} + +__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] - b[offb + index]; + } +} + +__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = fabs((Dtype)(a[offa + index])); + } +} + +__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = exp(a[offa + index]); + } +} + +__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = log(a[offa + index]); + } +} + +__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, + const int offa, Dtype alpha, + __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + if(alpha == 2.0) { + y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); + } else { + y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha); + } + } +} + +__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = (0.0 < x[index + offx]) + - (x[index + offx] < 0.0); + } +} + +__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = signbit(x[index + offx]); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(merge_copy_forward, Dtype)( + const int nthreads, __global const Dtype* bottom_a, const int forward_a, + __global const Dtype* bottom_b, const int forward_b, + __global Dtype* top, + int num, int channels_a, int channels_b, int height_a, int width_a, + int height_b, int width_b) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = forward_a == 1 ? bottom_a[aidx] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + top[index] = forward_b == 1 ? bottom_b[bidx] : 0; + } + } + +} + +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, +__global Dtype* bottom_a, + int backward_a, + __global Dtype* bottom_b, + int backward_b, + __global const Dtype* top, + int num, int channels_a, + int channels_b, int height_a, + int width_a, int height_b, + int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = backward_a == 1 ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b == 1 ? top[index] : 0; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, __global int* mask, __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_slice[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(ave_pool_forward,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_train,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* rand_idx, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + offset; + if (use_mask == 1) { + __global const int* mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + __global const Dtype* top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* const top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(sto_pool_backward,Dtype)( + const int nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* rand_idx_slice = rand_idx + + (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] + * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* bottom_data, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* top_data, + const int use_mask, + __global int* mask, __global Dtype* top_mask) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (use_mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* bottom_diff) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (use_mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__global Dtype* bottom_data, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, + __global int* mask, + __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, (int) 0); + wstart = max(wstart, (int) 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global Dtype* bottom_data_ptr = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data_ptr[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data_ptr[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(max_pool_backward_sk,Dtype)( + const int nthreads, __global const Dtype* top_diff, const int use_mask, + __global const int* mask, __global const Dtype* top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* bottom_diff) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + __global const int* mask_ptr = mask; + __global const Dtype* top_diff_ptr = top_diff; + +// find out the local index +// find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff_ptr += offset; + if (use_mask == 1) { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask_ptr[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } else { + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data_ptr[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, __global Dtype* rand_idx, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data_ptr[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + cumvalues += bottom_data_ptr[h * width + w] + * bottom_data_ptr[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } + +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(slice,Dtype)(const int nthreads, + __global const Dtype* in_data, + const int forward, const int num_slices, + const int slice_size, + const int bottom_slice_axis, + const int top_slice_axis, + const int offset_slice_axis, + __global Dtype* out_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward == 1) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(softmax_loss_forward,Dtype)( + int n, __global const Dtype* prob_data, __global const Dtype* label, + __global Dtype* loss, + const int num, const int dim, const int spatial_dim, + const int has_ignore_label_, const int ignore_label_, + __global Dtype* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype) FLT_MIN)); + counts[index] = 1; + } + } +} + +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, + __global const Dtype* top, + __global const Dtype* label, + __global Dtype* bottom_diff, + const int num, + const int dim, + const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global Dtype* counts) { + + const int channels = dim / spatial_dim; + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + + +__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, + const int tile_size, const int num_tiles, + const int bottom_tile_axis, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + + +__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int tile_size, + const int num_tiles, + const int bottom_tile_axis, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; + } + } +} + +#ifdef DOUBLE_SUPPORT_AVAILABLE + +#undef Dtype + +#define Dtype double + +#undef TYPE + +#define TYPE TYPE_DOUBLE + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +__kernel void TEMPLATE(relu_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + } +} + +__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = tanh(in[index]); + } +} + +__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in, + __global Dtype* out, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + } +} + +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, + __global const Dtype* in_diff, const int in_diff_off, + __global const Dtype* in_data, const int in_data_off, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + } +} + +__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff) { + Dtype kBNLL_THRESHOLD = 50.; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); + } + out[index] = maxval; + } +} + +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, + const int channels, + const int spatial_dim, + __global const Dtype* channel_max, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, + const int channels, const int spatial_dim, + __global const Dtype* channel_sum, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data_1, + __global const Dtype* data_2, + __global Dtype* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, + const int forward, const int num_concats, + const int concat_size, + const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, + __global Dtype* out_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward == 1) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, + const Dtype margin, const int legacy_version, + const Dtype alpha, __global const Dtype* y, + __global const Dtype* diff, __global const Dtype* dist_sq, + __global Dtype *bottom_diff) { + for (int i = get_global_id(0); i < count; + i += get_global_size(0)) { + int n = i / channels; // the num index, to access y and dist_sq + if ((int)(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist = 0.0; + Dtype beta = 0.0; + if (legacy_version == 1) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + 1e-4) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, + __global const Dtype* in, + __global const unsigned int* mask, + const unsigned int threshold, + const Dtype scale, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; + } +} + +__kernel void TEMPLATE(dropout_backward,Dtype)( + const int n, __global const Dtype* in_diff, + __global const unsigned int* mask, const unsigned int threshold, + const Dtype scale, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(eltwise_max_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int blob_idx, + __global Dtype* top_data, + __global int* mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int blob_idx, + __global const int* mask, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* weight, + const int M, const int N, + const int K, + __global Dtype* top_data) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + top_data[top_index] = weight[weight_index]; + } + } + +// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html +#if (TYPE == TYPE_FLOAT) +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned int intVal; + Dtype floatVal; + } newVal; + union { + unsigned int intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#if (TYPE == TYPE_DOUBLE) +#ifdef ATOMICS_64_AVAILABLE +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned long intVal; + Dtype floatVal; + } newVal; + union { + unsigned long intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#endif + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_col, const int data_col_off) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col + data_col_off; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_im_off; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_im, const int data_im_off) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + int offset = data_col_off + + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index + data_im_off] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_size[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + // Old: + /*d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int coeff_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; + } + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; + } + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, + __global const Dtype* data_im, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_offset; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + (*data_col_ptr) = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} + +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, + __global const Dtype* data_col, + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, + const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_im, + const int data_offset) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; + } + } + + data_im[data_offset + index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, + __global const Dtype* in, + __global const Dtype* scale, + const Dtype negative_beta, + __global Dtype* out) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + __global Dtype* const scale) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* in_off = in + offset; + __global Dtype* scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* top_data, + __global const Dtype* scale, + __global const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* bottom_off = bottom_data + offset; + __global const Dtype* top_off = top_data + offset; + __global const Dtype* scale_off = scale + offset; + __global Dtype* top_diff_off = top_diff + offset; + __global Dtype* bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] * b[index + offb]; + } +} + +__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] / b[index + offb]; + } +} + +__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__global Dtype* Y, + const int offY) { + for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + Y[offY + index] += alpha; + } +} + +__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] + b[offb + index]; + } +} + +__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] - b[offb + index]; + } +} + +__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = fabs((Dtype)(a[offa + index])); + } +} + +__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = exp(a[offa + index]); + } +} + +__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = log(a[offa + index]); + } +} + +__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, + const int offa, Dtype alpha, + __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + if(alpha == 2.0) { + y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); + } else { + y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha); + } + } +} + +__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = (0.0 < x[index + offx]) + - (x[index + offx] < 0.0); + } +} + +__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = signbit(x[index + offx]); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(merge_copy_forward, Dtype)( + const int nthreads, __global const Dtype* bottom_a, const int forward_a, + __global const Dtype* bottom_b, const int forward_b, + __global Dtype* top, + int num, int channels_a, int channels_b, int height_a, int width_a, + int height_b, int width_b) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = forward_a == 1 ? bottom_a[aidx] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + top[index] = forward_b == 1 ? bottom_b[bidx] : 0; + } + } + +} + +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, +__global Dtype* bottom_a, + int backward_a, + __global Dtype* bottom_b, + int backward_b, + __global const Dtype* top, + int num, int channels_a, + int channels_b, int height_a, + int width_a, int height_b, + int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = backward_a == 1 ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b == 1 ? top[index] : 0; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, __global int* mask, __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_slice[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(ave_pool_forward,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_train,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* rand_idx, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + offset; + if (use_mask == 1) { + __global const int* mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + __global const Dtype* top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* const top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(sto_pool_backward,Dtype)( + const int nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* rand_idx_slice = rand_idx + + (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] + * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* bottom_data, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* top_data, + const int use_mask, + __global int* mask, __global Dtype* top_mask) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (use_mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* bottom_diff) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (use_mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__global Dtype* bottom_data, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, + __global int* mask, + __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, (int) 0); + wstart = max(wstart, (int) 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global Dtype* bottom_data_ptr = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data_ptr[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data_ptr[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(max_pool_backward_sk,Dtype)( + const int nthreads, __global const Dtype* top_diff, const int use_mask, + __global const int* mask, __global const Dtype* top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* bottom_diff) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + __global const int* mask_ptr = mask; + __global const Dtype* top_diff_ptr = top_diff; + +// find out the local index +// find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff_ptr += offset; + if (use_mask == 1) { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask_ptr[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } else { + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data_ptr[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, __global Dtype* rand_idx, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data_ptr[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + cumvalues += bottom_data_ptr[h * width + w] + * bottom_data_ptr[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } + +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(slice,Dtype)(const int nthreads, + __global const Dtype* in_data, + const int forward, const int num_slices, + const int slice_size, + const int bottom_slice_axis, + const int top_slice_axis, + const int offset_slice_axis, + __global Dtype* out_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward == 1) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(softmax_loss_forward,Dtype)( + int n, __global const Dtype* prob_data, __global const Dtype* label, + __global Dtype* loss, + const int num, const int dim, const int spatial_dim, + const int has_ignore_label_, const int ignore_label_, + __global Dtype* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype) FLT_MIN)); + counts[index] = 1; + } + } +} + +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, + __global const Dtype* top, + __global const Dtype* label, + __global Dtype* bottom_diff, + const int num, + const int dim, + const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global Dtype* counts) { + + const int channels = dim / spatial_dim; + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + + +__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, + const int tile_size, const int num_tiles, + const int bottom_tile_axis, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + + +__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int tile_size, + const int num_tiles, + const int bottom_tile_axis, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; + } + } +} + +#endif + + +Build Status = -2 ( Err = -42 ) +Log: ptxas application ptx input, line 10702; error : Call has wrong number of parameters +ptxas fatal : Ptx assembly aborted due to errors + +Sources: #ifndef __OPENCL_VERSION__ +#define __kernel +#define __global +#define __constant +#define __local +#define get_global_id(x) 0 +#define get_global_size(x) 0 +#define get_local_id(x) 0 +#define get_local_size(x) 0 +#define FLT_MAX 0 +#define FLT_MIN 0 +#define cl_khr_fp64 +#define cl_amd_fp64 +#define DOUBLE_SUPPORT_AVAILABLE +#define CLK_LOCAL_MEM_FENCE +#define Dtype float +#define barrier(x) +#define atomic_cmpxchg(x, y, z) x +#endif + +#define CONCAT(A,B) A##_##B +#define TEMPLATE(name,type) CONCAT(name,type) + +#define TYPE_FLOAT 1 +#define TYPE_DOUBLE 2 + +#if defined(cl_khr_fp64) +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#elif defined(cl_amd_fp64) +#pragma OPENCL EXTENSION cl_amd_fp64 : enable +#define DOUBLE_SUPPORT_AVAILABLE +#endif + +#if defined(cl_khr_int64_base_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#define ATOMICS_64_AVAILABLE +#endif + +#define Dtype float + +#define TYPE TYPE_FLOAT + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +__kernel void TEMPLATE(relu_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + } +} + +__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = tanh(in[index]); + } +} + +__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in, + __global Dtype* out, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + } +} + +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, + __global const Dtype* in_diff, const int in_diff_off, + __global const Dtype* in_data, const int in_data_off, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + } +} + +__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff) { + Dtype kBNLL_THRESHOLD = 50.; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); + } + out[index] = maxval; + } +} + +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, + const int channels, + const int spatial_dim, + __global const Dtype* channel_max, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, + const int channels, const int spatial_dim, + __global const Dtype* channel_sum, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data_1, + __global const Dtype* data_2, + __global Dtype* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, + const int forward, const int num_concats, + const int concat_size, + const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, + __global Dtype* out_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward == 1) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, + const Dtype margin, const int legacy_version, + const Dtype alpha, __global const Dtype* y, + __global const Dtype* diff, __global const Dtype* dist_sq, + __global Dtype *bottom_diff) { + for (int i = get_global_id(0); i < count; + i += get_global_size(0)) { + int n = i / channels; // the num index, to access y and dist_sq + if ((int)(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist = 0.0; + Dtype beta = 0.0; + if (legacy_version == 1) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + 1e-4) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, + __global const Dtype* in, + __global const unsigned int* mask, + const unsigned int threshold, + const Dtype scale, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; + } +} + +__kernel void TEMPLATE(dropout_backward,Dtype)( + const int n, __global const Dtype* in_diff, + __global const unsigned int* mask, const unsigned int threshold, + const Dtype scale, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(eltwise_max_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int blob_idx, + __global Dtype* top_data, + __global int* mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int blob_idx, + __global const int* mask, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* weight, + const int M, const int N, + const int K, + __global Dtype* top_data) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + top_data[top_index] = weight[weight_index]; + } + } + +// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html +#if (TYPE == TYPE_FLOAT) +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned int intVal; + Dtype floatVal; + } newVal; + union { + unsigned int intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#if (TYPE == TYPE_DOUBLE) +#ifdef ATOMICS_64_AVAILABLE +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned long intVal; + Dtype floatVal; + } newVal; + union { + unsigned long intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#endif + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_col, const int data_col_off) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col + data_col_off; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_im_off; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_im, const int data_im_off) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + int offset = data_col_off + + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index + data_im_off] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_size[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + // Old: + /*d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int coeff_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; + } + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; + } + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, + __global const Dtype* data_im, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_offset; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + (*data_col_ptr) = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} + +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, + __global const Dtype* data_col, + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, + const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_im, + const int data_offset) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; + } + } + + data_im[data_offset + index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, + __global const Dtype* in, + __global const Dtype* scale, + const Dtype negative_beta, + __global Dtype* out) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + __global Dtype* const scale) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* in_off = in + offset; + __global Dtype* scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* top_data, + __global const Dtype* scale, + __global const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* bottom_off = bottom_data + offset; + __global const Dtype* top_off = top_data + offset; + __global const Dtype* scale_off = scale + offset; + __global Dtype* top_diff_off = top_diff + offset; + __global Dtype* bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] * b[index + offb]; + } +} + +__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] / b[index + offb]; + } +} + +__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__global Dtype* Y, + const int offY) { + for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + Y[offY + index] += alpha; + } +} + +__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] + b[offb + index]; + } +} + +__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] - b[offb + index]; + } +} + +__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = fabs((Dtype)(a[offa + index])); + } +} + +__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = exp(a[offa + index]); + } +} + +__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = log(a[offa + index]); + } +} + +__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, + const int offa, Dtype alpha, + __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + if(alpha == 2.0) { + y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); + } else { + y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha); + } + } +} + +__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = (0.0 < x[index + offx]) + - (x[index + offx] < 0.0); + } +} + +__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = signbit(x[index + offx]); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(merge_copy_forward, Dtype)( + const int nthreads, __global const Dtype* bottom_a, const int forward_a, + __global const Dtype* bottom_b, const int forward_b, + __global Dtype* top, + int num, int channels_a, int channels_b, int height_a, int width_a, + int height_b, int width_b) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = forward_a == 1 ? bottom_a[aidx] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + top[index] = forward_b == 1 ? bottom_b[bidx] : 0; + } + } + +} + +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, +__global Dtype* bottom_a, + int backward_a, + __global Dtype* bottom_b, + int backward_b, + __global const Dtype* top, + int num, int channels_a, + int channels_b, int height_a, + int width_a, int height_b, + int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = backward_a == 1 ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b == 1 ? top[index] : 0; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, __global int* mask, __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_slice[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(ave_pool_forward,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_train,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* rand_idx, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + offset; + if (use_mask == 1) { + __global const int* mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + __global const Dtype* top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* const top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(sto_pool_backward,Dtype)( + const int nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* rand_idx_slice = rand_idx + + (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] + * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* bottom_data, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* top_data, + const int use_mask, + __global int* mask, __global Dtype* top_mask) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (use_mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* bottom_diff) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (use_mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__global Dtype* bottom_data, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, + __global int* mask, + __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, (int) 0); + wstart = max(wstart, (int) 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global Dtype* bottom_data_ptr = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data_ptr[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data_ptr[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(max_pool_backward_sk,Dtype)( + const int nthreads, __global const Dtype* top_diff, const int use_mask, + __global const int* mask, __global const Dtype* top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* bottom_diff) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + __global const int* mask_ptr = mask; + __global const Dtype* top_diff_ptr = top_diff; + +// find out the local index +// find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff_ptr += offset; + if (use_mask == 1) { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask_ptr[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } else { + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data_ptr[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, __global Dtype* rand_idx, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data_ptr[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + cumvalues += bottom_data_ptr[h * width + w] + * bottom_data_ptr[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } + +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(slice,Dtype)(const int nthreads, + __global const Dtype* in_data, + const int forward, const int num_slices, + const int slice_size, + const int bottom_slice_axis, + const int top_slice_axis, + const int offset_slice_axis, + __global Dtype* out_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward == 1) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(softmax_loss_forward,Dtype)( + int n, __global const Dtype* prob_data, __global const Dtype* label, + __global Dtype* loss, + const int num, const int dim, const int spatial_dim, + const int has_ignore_label_, const int ignore_label_, + __global Dtype* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype) FLT_MIN)); + counts[index] = 1; + } + } +} + +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, + __global const Dtype* top, + __global const Dtype* label, + __global Dtype* bottom_diff, + const int num, + const int dim, + const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global Dtype* counts) { + + const int channels = dim / spatial_dim; + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + + +__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, + const int tile_size, const int num_tiles, + const int bottom_tile_axis, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + + +__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int tile_size, + const int num_tiles, + const int bottom_tile_axis, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; + } + } +} + +#ifdef DOUBLE_SUPPORT_AVAILABLE + +#undef Dtype + +#define Dtype double + +#undef TYPE + +#define TYPE TYPE_DOUBLE + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(relu_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; + } +} + +__kernel void TEMPLATE(relu_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + Dtype negative_slope) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + } +} + +__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = tanh(in[index]); + } +} + +__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* out_data, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in, + __global Dtype* out, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; + } +} + +__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, + const int dim, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff, + __global const Dtype* slope_data, + const int div_factor) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int c = (index / dim) % channels / div_factor; + out_diff[index] = in_diff[index] + * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + } +} + +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, + __global const Dtype* in_diff, const int in_diff_off, + __global const Dtype* in_data, const int in_data_off, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, + __global const Dtype* in, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = + in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + } +} + +__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, + __global const Dtype* in_diff, + __global const Dtype* in_data, + __global Dtype* out_diff) { + Dtype kBNLL_THRESHOLD = 50.; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + float maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); + } + out[index] = maxval; + } +} + +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, + const int channels, + const int spatial_dim, + __global const Dtype* channel_max, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] -= channel_max[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + out[index] = exp(data[index]); + } +} + +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data, + __global Dtype* channel_sum) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, + const int channels, const int spatial_dim, + __global const Dtype* channel_sum, + __global Dtype* data) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, + const int spatial_dim, + __global const Dtype* data_1, + __global const Dtype* data_2, + __global Dtype* channel_dot) { + for (int index = get_global_id(0); index < num * spatial_dim; index += + get_global_size(0)) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype dot = 0; + for (int c = 0; c < channels; ++c) { + dot += (data_1[(n * channels + c) * spatial_dim + s] + * data_2[(n * channels + c) * spatial_dim + s]); + } + channel_dot[index] = dot; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, + const int forward, const int num_concats, + const int concat_size, + const int top_concat_axis, + const int bottom_concat_axis, + const int offset_concat_axis, + __global Dtype* out_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward == 1) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, + const Dtype margin, const int legacy_version, + const Dtype alpha, __global const Dtype* y, + __global const Dtype* diff, __global const Dtype* dist_sq, + __global Dtype *bottom_diff) { + for (int i = get_global_id(0); i < count; + i += get_global_size(0)) { + int n = i / channels; // the num index, to access y and dist_sq + if ((int)(y[n])) { // similar pairs + bottom_diff[i] = alpha * diff[i]; + } else { // dissimilar pairs + Dtype mdist = 0.0; + Dtype beta = 0.0; + if (legacy_version == 1) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + 1e-4) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; + } else { + bottom_diff[i] = 0; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, + __global const Dtype* in, + __global const unsigned int* mask, + const unsigned int threshold, + const Dtype scale, + __global Dtype* out) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; + } +} + +__kernel void TEMPLATE(dropout_backward,Dtype)( + const int n, __global const Dtype* in_diff, + __global const unsigned int* mask, const unsigned int threshold, + const Dtype scale, + __global Dtype* out_diff) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(eltwise_max_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int blob_idx, + __global Dtype* top_data, + __global int* mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype maxval = -FLT_MAX; + int maxidx = -1; + if (bottom_data_a[index] > bottom_data_b[index]) { + // only update for very first bottom_data blob (blob_idx == 0) + if (blob_idx == 0) { + maxval = bottom_data_a[index]; + top_data[index] = maxval; + maxidx = blob_idx; + mask[index] = maxidx; + } + } else { + maxval = bottom_data_b[index]; + top_data[index] = maxval; + maxidx = blob_idx + 1; + mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int blob_idx, + __global const int* mask, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + Dtype gradient = 0; + if (mask[index] == blob_idx) { + gradient += top_diff[index]; + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* weight, + const int M, const int N, + const int K, + __global Dtype* top_data) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + top_data[top_index] = weight[weight_index]; + } + } + +// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html +#if (TYPE == TYPE_FLOAT) +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned int intVal; + Dtype floatVal; + } newVal; + union { + unsigned int intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#if (TYPE == TYPE_DOUBLE) +#ifdef ATOMICS_64_AVAILABLE +inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { + union { + unsigned long intVal; + Dtype floatVal; + } newVal; + union { + unsigned long intVal; + Dtype floatVal; + } prevVal; + do { + prevVal.floatVal = *source; + newVal.floatVal = prevVal.floatVal + operand; + } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); +} +#endif + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, + const int offx) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + x[index + offx] = alpha; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_col, const int data_col_off) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col + data_col_off; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_im_off; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } +} + +__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off, + const int height, const int width, const int channels, + const int patch_h, const int patch_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + __global Dtype* data_im, const int data_im_off) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int w_col_end = min(w / stride_w + 1, width_col); + int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int h_col_end = min(h / stride_h + 1, height_col); + int offset = data_col_off + + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; + int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int coeff_w_col = (1 - stride_w * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index + data_im_off] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_size[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + // Old: + /*d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int coeff_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; + } + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; + } + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, + __global const Dtype* data_im, + const int data_offset, const int height, + const int width, const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_col) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int w_out = index % width_col; + int h_index = index / width_col; + int h_out = h_index % height_col; + int channel_in = h_index / height_col; + int channel_out = channel_in * kernel_h * kernel_w; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + __global Dtype* data_col_ptr = data_col; + data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; + __global const Dtype* data_im_ptr = data_im + data_offset; + data_im_ptr += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ext_kernel_h; i += kstride_h) { + for (int j = 0; j < ext_kernel_w; j += kstride_w) { + int h = h_in + i; + int w = w_in + j; + (*data_col_ptr) = + (h >= 0 && w >= 0 && h < height && w < width) ? + data_im_ptr[i * width + j] : 0; + data_col_ptr += height_col * width_col; + } + } + } + +} + +__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, + __global const Dtype* data_col, + const int height, const int width, + const int channels, const int patch_h, + const int patch_w, + const int ext_patch_h, + const int ext_patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, + const int height_col, + const int width_col, + __global Dtype* data_im, + const int data_offset) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + Dtype val = 0; + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = index / (width * height); + // compute the start and end of the output + int width_col_1 = width_col - 1; + int height_col_1 = height_col - 1; + int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int w_col_end = + (w >= width_col) ? + width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; + int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int h_col_end = + (h >= height_col) ? + height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; + int w_num = (w - w_col_start) / kstride_w; + int h_num = (h - h_col_start) / kstride_h; + + int coeff_w_idx = height_col * width_col; + int coeff_h_idx = patch_w * coeff_w_idx; + int offset = c * patch_h * coeff_h_idx; + for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + kstride_h, --h_idx) { + for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + kstride_w, --w_idx) { + //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + + h_col * width_col + w_col]; + } + } + + data_im[data_offset + index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, + __global const Dtype* in, + __global const Dtype* scale, + const Dtype negative_beta, + __global Dtype* out) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, + const int num, const int channels, + const int height, const int width, const int size, + const Dtype alpha_over_size, const Dtype k, + __global Dtype* const scale) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* in_off = in + offset; + __global Dtype* scale_off = scale + offset; + int head = 0; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad && head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in_off[head * step] * in_off[head * step]; + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; + } + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + ++head; + } + } +} + +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, + __global const Dtype* bottom_data, + __global const Dtype* top_data, + __global const Dtype* scale, + __global const Dtype* top_diff, const int num, + const int channels, const int height, + const int width, const int size, + const Dtype negative_beta, + const Dtype cache_ratio, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + __global const Dtype* bottom_off = bottom_data + offset; + __global const Dtype* top_off = top_data + offset; + __global const Dtype* scale_off = scale + offset; + __global Dtype* top_diff_off = top_diff + offset; + __global Dtype* bottom_diff_off = bottom_diff + offset; + int head = 0; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad && head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff_off[head * step] * top_off[head * step] + / scale_off[head * step]; + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + if (head - size >= 0) { + accum_ratio -= top_diff_off[(head - size) * step] + * top_off[(head - size) * step] / scale_off[(head - size) * step]; + } + bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) + * step] * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] * b[index + offb]; + } +} + +__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, + const int offa, + __global Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = a[index + offa] / b[index + offb]; + } +} + +__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__global Dtype* Y, + const int offY) { + for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + Y[offY + index] += alpha; + } +} + +__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] + b[offb + index]; + } +} + +__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, + const int offa, __global const Dtype* b, + const int offb, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = a[offa + index] - b[offb + index]; + } +} + +__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = fabs((Dtype)(a[offa + index])); + } +} + +__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = exp(a[offa + index]); + } +} + +__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, + const int offa, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[offy + index] = log(a[offa + index]); + } +} + +__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, + const int offa, Dtype alpha, + __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + if(alpha == 2.0) { + y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); + } else { + y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha); + } + } +} + +__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = (0.0 < x[index + offx]) + - (x[index + offx] < 0.0); + } +} + +__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, + const int offx, __global Dtype* y, + const int offy) { + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + y[index + offy] = signbit(x[index + offx]); + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(merge_copy_forward, Dtype)( + const int nthreads, __global const Dtype* bottom_a, const int forward_a, + __global const Dtype* bottom_b, const int forward_b, + __global Dtype* top, + int num, int channels_a, int channels_b, int height_a, int width_a, + int height_b, int width_b) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + top[index] = forward_a == 1 ? bottom_a[aidx] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + top[index] = forward_b == 1 ? bottom_b[bidx] : 0; + } + } + +} + +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, +__global Dtype* bottom_a, + int backward_a, + __global Dtype* bottom_b, + int backward_b, + __global const Dtype* top, + int num, int channels_a, + int channels_b, int height_a, + int width_a, int height_b, + int width_b) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int batch_id = index / ((channels_a + channels_b) * height_a * width_a); + + int pad_h = (height_b - height_a) / 2; + int pad_w = (width_b - width_a) / 2; + + int bottom_id = ((index + - batch_id * (channels_a + channels_b) * height_a * width_a) + / (channels_a * height_a * width_a)) % 2; + + int h = ((index / width_a) % height_a); + int w = (index % width_a); + + if (bottom_id == 0) { + int channel_id = (index / ((width_a * height_a)) % channels_a); + int aidx = ((((batch_id) * channels_a + channel_id) * height_a + h) + * width_a + w); + bottom_a[aidx] = backward_a == 1 ? top[index] : 0; + } else { + int channel_id = (index / ((width_a * height_a)) % channels_b); + int bidx = (((batch_id) * channels_b + channel_id) * height_b * width_b) + + width_b * (h + pad_h) + pad_w + w; + bottom_b[bidx] = backward_b == 1 ? top[index] : 0; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, __global int* mask, __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_slice[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_slice[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(ave_pool_forward,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + const int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_slice[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_train,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* rand_idx, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + } + } + const float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_slice[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test,Dtype)( + const int nthreads, __global const Dtype* const bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_slice = bottom_data + + (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + Dtype gradient = 0; + const int offset = (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + offset; + if (use_mask == 1) { + __global const int* mask_slice = mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } else { + __global const Dtype* top_mask_slice = top_mask + offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* const top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + kernel_h, height + pad_h); + int wend = min(wstart + kernel_w, width + pad_w); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(sto_pool_backward,Dtype)( + const int nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + // find out the local index + // find out the local offset + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); + Dtype gradient = 0; + __global const Dtype* rand_idx_slice = rand_idx + + (n * channels + c) * pooled_height * pooled_width; + __global const Dtype* top_diff_slice = top_diff + + (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff_slice[ph * pooled_width + pw] + * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* bottom_data, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* top_data, + const int use_mask, + __global int* mask, __global Dtype* top_mask) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (use_mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + + +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, + const int num_axes, + const __global Dtype* top_diff, + const int use_mask, + __global const int* mask, + __global const Dtype* top_mask, + const int channels, + __global const int* size, + __global const int* pooled_size, + __global const int* kernel_size, + __global const int* ext_kernel_size, + __global const int* stride, + __global const int* kstride, + __global const int* pad, + __global Dtype* bottom_diff) { + int d_idx[6]; + int d_start[6]; + int d_end[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (use_mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__global Dtype* bottom_data, + const int num, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int kernel_h, + const int kernel_w, + const int ext_kernel_h, + const int ext_kernel_w, + const int stride_h, + const int stride_w, + const int kstride_h, + const int kstride_w, + const int pad_h, + const int pad_w, + __global Dtype* top_data, + const int use_mask, + __global int* mask, + __global Dtype* top_mask) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, (int) 0); + wstart = max(wstart, (int) 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + __global Dtype* bottom_data_ptr = bottom_data + + (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data_ptr[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data_ptr[maxidx]; + } + } + } + top_data[index] = maxval; + if (use_mask == 1) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +__kernel void TEMPLATE(max_pool_backward_sk,Dtype)( + const int nthreads, __global const Dtype* top_diff, const int use_mask, + __global const int* mask, __global const Dtype* top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* bottom_diff) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + __global const int* mask_ptr = mask; + __global const Dtype* top_diff_ptr = top_diff; + +// find out the local index +// find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff_ptr += offset; + if (use_mask == 1) { + mask_ptr += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask_ptr[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } else { + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_ptr[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, const int pad_h, const int pad_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data_ptr[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, __global Dtype* rand_idx, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data_ptr[h * width + w]; + h = hend; + w = wend; + } + } + } + } +} + +__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( + const int nthreads, __global const Dtype* bottom_data, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, const int kstride_h, + const int kstride_w, + __global Dtype* top_data) { + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + __global const Dtype* bottom_data_ptr = bottom_data; + bottom_data_ptr += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data_ptr[h * width + w]; + cumvalues += bottom_data_ptr[h * width + w] + * bottom_data_ptr[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } + +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(slice,Dtype)(const int nthreads, + __global const Dtype* in_data, + const int forward, const int num_slices, + const int slice_size, + const int bottom_slice_axis, + const int top_slice_axis, + const int offset_slice_axis, + __global Dtype* out_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward == 1) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(softmax_loss_forward,Dtype)( + int n, __global const Dtype* prob_data, __global const Dtype* label, + __global Dtype* loss, + const int num, const int dim, const int spatial_dim, + const int has_ignore_label_, const int ignore_label_, + __global Dtype* counts) { + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + loss[index] = 0; + counts[index] = 0; + } else { + loss[index] = -log( + max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), + (Dtype) FLT_MIN)); + counts[index] = 1; + } + } +} + +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, + __global const Dtype* top, + __global const Dtype* label, + __global Dtype* bottom_diff, + const int num, + const int dim, + const int spatial_dim, + const int has_ignore_label_, + const int ignore_label_, + __global Dtype* counts) { + + const int channels = dim / spatial_dim; + + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = (int) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; + } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; + } + } + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + + +__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, + const int tile_size, const int num_tiles, + const int bottom_tile_axis, + __global Dtype* top_data) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + + +__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, + __global const Dtype* top_diff, + const int tile_size, + const int num_tiles, + const int bottom_tile_axis, + __global Dtype* bottom_diff) { + for (int index = get_global_id(0); index < nthreads; + index += get_global_size(0)) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; + } + } +} + +#endif + + +Note: Google Test filter = *Embed* +[==========] Running 20 tests from 4 test cases. +[----------] Global test environment set-up. +[----------] 5 tests from EmbedLayerTest/0, where TypeParam = caffe::CPUDevice +[ RUN ] EmbedLayerTest/0.TestSetUp +[ OK ] EmbedLayerTest/0.TestSetUp (1 ms) +[ RUN ] EmbedLayerTest/0.TestForward +[ OK ] EmbedLayerTest/0.TestForward (0 ms) +[ RUN ] EmbedLayerTest/0.TestForwardWithBias +[ OK ] EmbedLayerTest/0.TestForwardWithBias (0 ms) +[ RUN ] EmbedLayerTest/0.TestGradient +[ OK ] EmbedLayerTest/0.TestGradient (7 ms) +[ RUN ] EmbedLayerTest/0.TestGradientWithBias +[ OK ] EmbedLayerTest/0.TestGradientWithBias (12 ms) +[----------] 5 tests from EmbedLayerTest/0 (20 ms total) + +[----------] 5 tests from EmbedLayerTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] EmbedLayerTest/1.TestSetUp +[ OK ] EmbedLayerTest/1.TestSetUp (0 ms) +[ RUN ] EmbedLayerTest/1.TestForward +[ OK ] EmbedLayerTest/1.TestForward (0 ms) +[ RUN ] EmbedLayerTest/1.TestForwardWithBias +[ OK ] EmbedLayerTest/1.TestForwardWithBias (0 ms) +[ RUN ] EmbedLayerTest/1.TestGradient +[ OK ] EmbedLayerTest/1.TestGradient (7 ms) +[ RUN ] EmbedLayerTest/1.TestGradientWithBias +[ OK ] EmbedLayerTest/1.TestGradientWithBias (12 ms) +[----------] 5 tests from EmbedLayerTest/1 (19 ms total) + +[----------] 5 tests from EmbedLayerTest/2, where TypeParam = caffe::GPUDevice +[ RUN ] EmbedLayerTest/2.TestSetUp +[ OK ] EmbedLayerTest/2.TestSetUp (0 ms) +[ RUN ] EmbedLayerTest/2.TestForward +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/2.TestForward, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] EmbedLayerTest/2.TestForwardWithBias +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/2.TestForwardWithBias, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] EmbedLayerTest/2.TestGradient +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/2.TestGradient, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] EmbedLayerTest/2.TestGradientWithBias +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/2.TestGradientWithBias, where TypeParam = caffe::GPUDevice (0 ms) +[----------] 5 tests from EmbedLayerTest/2 (1 ms total) + +[----------] 5 tests from EmbedLayerTest/3, where TypeParam = caffe::GPUDevice +[ RUN ] EmbedLayerTest/3.TestSetUp +[ OK ] EmbedLayerTest/3.TestSetUp (0 ms) +[ RUN ] EmbedLayerTest/3.TestForward +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/3.TestForward, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] EmbedLayerTest/3.TestForwardWithBias +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/3.TestForwardWithBias, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] EmbedLayerTest/3.TestGradient +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/3.TestGradient, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] EmbedLayerTest/3.TestGradientWithBias +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] EmbedLayerTest/3.TestGradientWithBias, where TypeParam = caffe::GPUDevice (1 ms) +[----------] 5 tests from EmbedLayerTest/3 (2 ms total) + +[----------] Global test environment tear-down +[==========] 20 tests from 4 test cases ran. (42 ms total) +[ PASSED ] 12 tests. +[ FAILED ] 8 tests, listed below: +[ FAILED ] EmbedLayerTest/2.TestForward, where TypeParam = caffe::GPUDevice +[ FAILED ] EmbedLayerTest/2.TestForwardWithBias, where TypeParam = caffe::GPUDevice +[ FAILED ] EmbedLayerTest/2.TestGradient, where TypeParam = caffe::GPUDevice +[ FAILED ] EmbedLayerTest/2.TestGradientWithBias, where TypeParam = caffe::GPUDevice +[ FAILED ] EmbedLayerTest/3.TestForward, where TypeParam = caffe::GPUDevice +[ FAILED ] EmbedLayerTest/3.TestForwardWithBias, where TypeParam = caffe::GPUDevice +[ FAILED ] EmbedLayerTest/3.TestGradient, where TypeParam = caffe::GPUDevice +[ FAILED ] EmbedLayerTest/3.TestGradientWithBias, where TypeParam = caffe::GPUDevice + + 8 FAILED TESTS + YOU HAVE 2 DISABLED TESTS + diff --git a/src/caffe/device_context.cpp b/src/caffe/device_context.cpp index 14b40d61628..6c939af058f 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device_context.cpp @@ -5,6 +5,8 @@ * Author: Fabian Tschopp */ +#include +#include #include #include "caffe/device_context.hpp" #include "caffe/greentea/greentea.hpp" @@ -17,15 +19,13 @@ namespace caffe { DeviceContext::DeviceContext() - : current_queue_id_(0), workgroup_sizes_(3, 0), id_(0), - list_id_(0), backend_(Backend::BACKEND_CPU), - memory_usage_(0), peak_memory_usage_(0) { + : current_queue_id_(0), workgroup_sizes_(3, 0), id_(0), list_id_(0), + backend_(Backend::BACKEND_CPU), memory_usage_(0), peak_memory_usage_(0) { } DeviceContext::DeviceContext(int id, int list_id, Backend backend) - : current_queue_id_(0), workgroup_sizes_(3, 0), id_(id), - list_id_(list_id), backend_(backend), - memory_usage_(0), peak_memory_usage_(0) { + : current_queue_id_(0), workgroup_sizes_(3, 0), id_(id), list_id_(list_id), + backend_(backend), memory_usage_(0), peak_memory_usage_(0) { } void DeviceContext::Init() { @@ -87,7 +87,7 @@ int DeviceContext::num_queues() { } template<> -shared_ptr< Blob > DeviceContext::Buffer(int id) { +shared_ptr > DeviceContext::Buffer(int id) { if (buff_f_.size() <= id) { shared_ptr > blob_pointer(new Blob(this)); buff_f_.push_back(blob_pointer); @@ -96,7 +96,7 @@ shared_ptr< Blob > DeviceContext::Buffer(int id) { } template<> -shared_ptr< Blob > DeviceContext::Buffer(int id) { +shared_ptr > DeviceContext::Buffer(int id) { if (buff_d_.size() <= id) { shared_ptr > blob_pointer(new Blob(this)); buff_d_.push_back(blob_pointer); @@ -115,8 +115,7 @@ void DeviceContext::SwitchQueue(int id) { #endif // USE_CUDA } else { #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = - viennacl::ocl::get_context(id_); + viennacl::ocl::context &ctx = viennacl::ocl::get_context(id_); ctx.switch_queue(id % num_queues()); current_queue_id_ = id % num_queues(); #endif // USE_GREENTEA @@ -163,15 +162,40 @@ void DeviceContext::ResetPeakMemoryUsage() { peak_memory_usage_ = memory_usage_; } +bool DeviceContext::CheckCapability(std::string cap) { + if (backend_ == BACKEND_OpenCL) { #ifdef USE_GREENTEA -viennacl::ocl::program &DeviceContext::program() { - return ocl_program_; + viennacl::ocl::context &ctx = viennacl::ocl::get_context(id_); + + size_t size; + size_t max_size = 1024 * 1024; + clGetDeviceInfo(ctx.devices()[0].id(), CL_DEVICE_EXTENSIONS, + 0, NULL, &size); + + // Cap at 1 MB to capture faulty OpenCL implementations (nVidia) + std::vector exts(std::min(size, max_size)); + + clGetDeviceInfo(ctx.devices()[0].id(), CL_DEVICE_EXTENSIONS, + size, &(exts[0]), NULL); + + std::string extsstr(&(exts[0])); + return extsstr.find(cap) != std::string::npos; +#endif } + return true; +} + +#ifdef USE_GREENTEA +viennacl::ocl::program &DeviceContext::program() { + return ocl_program_; +} void DeviceContext::SetProgram() { - ocl_program_ = RegisterKernels(&(viennacl::ocl::get_context( - static_cast(id_)))); + ocl_program_ = RegisterKernels( + &(viennacl::ocl::get_context(static_cast(id_)))); } + + #endif // USE_GREENTEA } // namespace caffe diff --git a/src/caffe/greentea/cl_headers/header.cl b/src/caffe/greentea/cl_headers/header.cl index ddf296b3a8a..d2457dea2a7 100644 --- a/src/caffe/greentea/cl_headers/header.cl +++ b/src/caffe/greentea/cl_headers/header.cl @@ -21,6 +21,8 @@ #define CONCAT(A,B) A##_##B #define TEMPLATE(name,type) CONCAT(name,type) +#define TYPE_FLOAT 1 +#define TYPE_DOUBLE 2 #if defined(cl_khr_fp64) #pragma OPENCL EXTENSION cl_khr_fp64 : enable diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 6330346b861..0ed5285bcd4 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -5,8 +5,8 @@ #include #include namespace caffe { -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT -std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT @@ -15,7 +15,7 @@ std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (Dtype == float)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#else\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#endif\n#endif\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}"; // NOLINT +std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT @@ -37,7 +37,7 @@ std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (Dtype == float)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#else\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n#endif\n#endif\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}"; // NOLINT +std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT @@ -55,6 +55,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; ss << header << "\n\n"; // NOLINT ss << "#define Dtype float" << "\n\n"; // NOLINT + ss << "#define TYPE TYPE_FLOAT" << "\n\n"; // NOLINT ss << activation_float << "\n\n"; // NOLINT ss << auxiliary_float << "\n\n"; // NOLINT ss << bnll_float << "\n\n"; // NOLINT @@ -80,6 +81,8 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << "#ifdef DOUBLE_SUPPORT_AVAILABLE" << "\n\n"; // NOLINT ss << "#undef Dtype" << "\n\n"; // NOLINT ss << "#define Dtype double" << "\n\n"; // NOLINT + ss << "#undef TYPE" << "\n\n"; // NOLINT + ss << "#define TYPE TYPE_DOUBLE" << "\n\n"; // NOLINT ss << activation_double << "\n\n"; // NOLINT ss << auxiliary_double << "\n\n"; // NOLINT ss << bnll_double << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index 2a5146c8f57..45782a61286 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -86,6 +86,7 @@ done shopt -s nullglob echo " ss << \"#define Dtype float\" << \"\\n\\n\"; // NOLINT" >> $SOURCE +echo " ss << \"#define TYPE TYPE_FLOAT\" << \"\\n\\n\"; // NOLINT" >> $SOURCE for CL_KERNEL in $CL_KERNELDIR do CL_KERNEL_NAME=`echo $CL_KERNEL` @@ -98,6 +99,8 @@ shopt -s nullglob echo " ss << \"#ifdef DOUBLE_SUPPORT_AVAILABLE\" << \"\\n\\n\"; // NOLINT" >> $SOURCE echo " ss << \"#undef Dtype\" << \"\\n\\n\"; // NOLINT" >> $SOURCE echo " ss << \"#define Dtype double\" << \"\\n\\n\"; // NOLINT" >> $SOURCE +echo " ss << \"#undef TYPE\" << \"\\n\\n\"; // NOLINT" >> $SOURCE +echo " ss << \"#define TYPE TYPE_DOUBLE\" << \"\\n\\n\"; // NOLINT" >> $SOURCE for CL_KERNEL in $CL_KERNELDIR do CL_KERNEL_NAME=`echo $CL_KERNEL` diff --git a/src/caffe/greentea/cl_kernels/embed.cl b/src/caffe/greentea/cl_kernels/embed.cl index 5303c95b282..6b4ded93225 100644 --- a/src/caffe/greentea/cl_kernels/embed.cl +++ b/src/caffe/greentea/cl_kernels/embed.cl @@ -19,7 +19,7 @@ __kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, } // atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html -#if (Dtype == float) +#if (TYPE == TYPE_FLOAT) inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { union { unsigned int intVal; @@ -34,7 +34,23 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#else + +__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int M, const int N, const int K, + __global Dtype* weight_diff) { + for (int top_index = get_global_id(0); top_index < nthreads; + top_index += get_global_size(0)) { + const int n = top_index / N; + const int d = top_index % N; + const int index = (int)(bottom_data[n]); + const int weight_index = index * N + d; + + TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); + } +} +#endif + +#if (TYPE == TYPE_DOUBLE) #ifdef ATOMICS_64_AVAILABLE inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { union { @@ -50,8 +66,6 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#endif -#endif __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, __global const Dtype* top_diff, const int M, const int N, const int K, @@ -66,3 +80,5 @@ __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); } } +#endif +#endif diff --git a/src/caffe/test/test_embed_layer.cpp b/src/caffe/test/test_embed_layer.cpp index 577079867a7..8b97b5f0fb3 100644 --- a/src/caffe/test/test_embed_layer.cpp +++ b/src/caffe/test/test_embed_layer.cpp @@ -137,6 +137,12 @@ TYPED_TEST(EmbedLayerTest, TestForwardWithBias) { TYPED_TEST(EmbedLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; + // Skip the test on unsupported OpenCL devices with double + if (!Caffe::GetDefaultDeviceContext()-> + CheckCapability("cl_khr_int64_base_atomics") + && is_same::value) { + return; + } LayerParameter layer_param; EmbedParameter* embed_param = layer_param.mutable_embed_param(); embed_param->set_num_output(10); @@ -157,6 +163,12 @@ TYPED_TEST(EmbedLayerTest, TestGradient) { TYPED_TEST(EmbedLayerTest, TestGradientWithBias) { typedef typename TypeParam::Dtype Dtype; + // Skip the test on unsupported OpenCL devices with double + if (!Caffe::GetDefaultDeviceContext()-> + CheckCapability("cl_khr_int64_base_atomics") + && is_same::value) { + return; + } LayerParameter layer_param; EmbedParameter* embed_param = layer_param.mutable_embed_param(); embed_param->set_num_output(10); From b5eec63289469e0d2b6ddbbe914931fff85c2568 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 15 Sep 2015 01:00:08 +0200 Subject: [PATCH 178/254] Update README.md --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index df16adeea23..d07b953115c 100644 --- a/README.md +++ b/README.md @@ -50,3 +50,7 @@ The backend is supposed to work with all vendors. Note however there may be prob It is therefore recommended to install another OpenCL implementation after installing nVidia drivers. Possibilities are: - Intel OpenCL, recommended if you have an Intel CPU along the nVidia GPU. - AMD APP SDK (OpenCL), recommended if you have an AMD GPU or CPU. + +## Technical Report +Available on arXiv: +http://arxiv.org/abs/1509.03371 From dd07c6fdb70138a3832111ca0226fdffb053c2d9 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 22 Sep 2015 03:21:34 +0200 Subject: [PATCH 179/254] Change DeviceContext to device, refactoring convolution layer. --- include/caffe/blob.hpp | 18 +- include/caffe/caffe.hpp | 2 +- include/caffe/common.hpp | 16 +- include/caffe/data_transformer.hpp | 4 +- include/caffe/dev_ptr.hpp | 42 ++ .../caffe/{device_context.hpp => device.hpp} | 6 +- include/caffe/greentea/greentea_im2col.hpp | 65 +-- include/caffe/internal_thread.hpp | 8 +- include/caffe/layer.hpp | 6 +- include/caffe/net.hpp | 2 +- include/caffe/parallel.hpp | 14 +- include/caffe/solver.hpp | 4 +- include/caffe/syncedmem.hpp | 14 +- include/caffe/test/test_caffe_main.hpp | 2 +- include/caffe/util/im2col.hpp | 53 ++- include/caffe/vision_layers.hpp | 394 ++---------------- src/caffe/blob.cpp | 9 +- src/caffe/common.cpp | 43 +- src/caffe/data_reader.cpp | 2 +- src/caffe/data_transformer.cpp | 2 +- src/caffe/{device_context.cpp => device.cpp} | 45 +- src/caffe/greentea/cl_kernels.cpp | 8 +- src/caffe/greentea/cl_kernels/im2col_nd.cl | 112 ++--- src/caffe/greentea/cl_kernels/im2col_ndsk.cl | 179 ++++++++ src/caffe/greentea/greentea_im2col.cpp | 25 +- src/caffe/internal_thread.cpp | 4 +- src/caffe/layer_factory.cpp | 12 +- src/caffe/layers/base_conv_layer.cpp | 2 +- src/caffe/layers/im2col_layer.cu | 28 +- src/caffe/net.cpp | 4 +- src/caffe/parallel.cpp | 8 +- src/caffe/solver.cpp | 8 +- src/caffe/syncedmem.cpp | 7 +- src/caffe/test/test_common.cpp | 12 +- src/caffe/test/test_convolution_layer.cpp | 179 ++++---- src/caffe/test/test_convolution_nd_layer.cpp | 6 +- src/caffe/test/test_embed_layer.cpp | 4 +- src/caffe/test/test_gradient_based_solver.cpp | 6 +- src/caffe/test/test_im2col_kernel.cu | 17 +- src/caffe/test/test_internal_thread.cpp | 8 +- src/caffe/test/test_math_functions.cpp | 12 +- .../test/test_random_number_generator.cpp | 14 +- src/caffe/test/test_syncedmem.cpp | 20 +- src/caffe/test/test_util_blas.cpp | 16 +- src/caffe/util/benchmark.cpp | 14 +- src/caffe/util/im2col.cu | 337 +++++++++++---- tools/caffe.cpp | 11 +- 47 files changed, 952 insertions(+), 852 deletions(-) create mode 100644 include/caffe/dev_ptr.hpp rename include/caffe/{device_context.hpp => device.hpp} (92%) rename src/caffe/{device_context.cpp => device.cpp} (79%) create mode 100644 src/caffe/greentea/cl_kernels/im2col_ndsk.cl diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 70eb93277cb..c2009646af6 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -21,7 +21,7 @@ const int kMaxBlobAxes = 32; namespace caffe { -class DeviceContext; +class device; /** * @brief A wrapper around SyncedMemory holders serving as the basic @@ -38,9 +38,9 @@ class Blob { diff_(), count_(0), capacity_(0), - device_context_(Caffe::GetDefaultDeviceContext()) { + device_context_(Caffe::GetDefaultDevice()) { } - explicit Blob(DeviceContext *device_context) + explicit Blob(device *device_context) : data_(), diff_(), count_(0), @@ -48,10 +48,10 @@ class Blob { device_context_(device_context) { } explicit Blob(const int num, const int channels, const int height, - const int width, DeviceContext *device_context = - Caffe::GetDefaultDeviceContext()); - explicit Blob(const vector& shape, DeviceContext *device_context = - Caffe::GetDefaultDeviceContext()); + const int width, device *device_context = + Caffe::GetDefaultDevice()); + explicit Blob(const vector& shape, device *device_context = + Caffe::GetDefaultDevice()); /** * @brief Change the dimensions of the blob, allocating new memory if @@ -297,7 +297,7 @@ class Blob { /** * @brief Return the device context to which this blob and shared memory belongs */ - DeviceContext *device_context(); + device *device_context(); protected: shared_ptr data_; @@ -306,7 +306,7 @@ class Blob { vector shape_; int count_; int capacity_; - DeviceContext *device_context_; + device *device_context_; DISABLE_COPY_AND_ASSIGN(Blob); }; diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp index a7ba81691ba..60c4ba192d9 100644 --- a/include/caffe/caffe.hpp +++ b/include/caffe/caffe.hpp @@ -6,7 +6,6 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" -#include "caffe/device_context.hpp" #include "caffe/filler.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/layer.hpp" @@ -18,6 +17,7 @@ #include "caffe/util/benchmark.hpp" #include "caffe/util/io.hpp" #include "caffe/vision_layers.hpp" +#include "device.hpp" diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 0c2812f3e99..25c2c36767e 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -77,7 +77,7 @@ namespace cv {class Mat;} namespace caffe { -class DeviceContext; +class device; // We will use the boost shared_ptr instead of the new C++11 one mainly // because cuda does not work (at least now) well with C++11 features. @@ -161,7 +161,7 @@ class Caffe { // requires us to reset those values. static void SetDevice(const int device_id); // Switch the current device - static void SelectDevice(DeviceContext* device_context); + static void SelectDevice(device* device_context); // Prints the current GPU status. static void DeviceQuery(); // Parallel training info @@ -171,8 +171,8 @@ class Caffe { inline static void set_root_solver(bool val) { Get().root_solver_ = val; } // Get the default device - static DeviceContext *GetDefaultDeviceContext(); - static DeviceContext *GetCPUDeviceContext(); + static device *GetDefaultDevice(); + static device *GetCPUDeviceContext(); // Prints info about all devices static int EnumerateDevices(bool silent = false); @@ -182,7 +182,7 @@ class Caffe { static void Synchronize(int device_id); // Get a device context - static DeviceContext *GetDeviceContext(int id); + static device *GetDeviceContext(int id); // Get a device OpenCL program #ifdef USE_GREENTEA @@ -202,9 +202,9 @@ class Caffe { // The shared ptrs are being referenced on every thread, // while the default device will be handled thread local - static vector > device_contexts_; - shared_ptr cpu_device_context_; - DeviceContext* default_device_context_; + static vector > device_contexts_; + shared_ptr cpu_device_context_; + device* default_device_context_; int solver_count_; bool root_solver_; diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 9769c9c597c..e21764eb803 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -17,7 +17,7 @@ template class DataTransformer { public: explicit DataTransformer(const TransformationParameter& param, Phase phase, - DeviceContext *device_context); + device *device_context); virtual ~DataTransformer() { } @@ -148,7 +148,7 @@ class DataTransformer { Phase phase_; Blob data_mean_; vector mean_values_; - DeviceContext *device_context_; + device *device_context_; }; } // namespace caffe diff --git a/include/caffe/dev_ptr.hpp b/include/caffe/dev_ptr.hpp new file mode 100644 index 00000000000..5388b853ecb --- /dev/null +++ b/include/caffe/dev_ptr.hpp @@ -0,0 +1,42 @@ +#ifndef CAFFE_DEVPTR_HPP_ +#define CAFFE_DEVPTR_HPP_ + +namespace caffe { + +/* + * dev_ptr class should be constructed similarly to shared_ptr of Boost. + * (but excluding the smart pointer features, so memory management + * is explicit, and only support types (float, void, double, char, int, ...)) + * It should be possible to use this object just like pointers, + * independently of the backend and device used. + * Dereferencing (although inefficient on some backends) should also + * be supported. + * */ +template class dev_ptr { + public: + // Explicit constructors and destructors + virtual dev_ptr(); + virtual dev_ptr(dev_ptr const& other); + virtual ~dev_ptr(); + + /* Comparators should act like comparators on normal pointers. + /* This can depend on the offset and cl_mem object for OpenCL, + /* and wrap around pointer comparison for CPU and CUDA. + * */ + template virtual inline bool operator==(dev_ptr const &a, + dev_ptr const &b); + template virtual inline bool operator!=(dev_ptr const &a, + dev_ptr const &b); + template virtual inline bool operator>(dev_ptr const &a, + dev_ptr const &b); + // TODO: Remaining cases + + // TODO: Dereference, increment, bracket and other C++ operators + + // TODO: Explicit casting template conversions +}; + +} // namespace caffe + + +#endif /* CAFFE_DEVPTR_HPP_ */ diff --git a/include/caffe/device_context.hpp b/include/caffe/device.hpp similarity index 92% rename from include/caffe/device_context.hpp rename to include/caffe/device.hpp index f095807d2b7..100579ef76d 100644 --- a/include/caffe/device_context.hpp +++ b/include/caffe/device.hpp @@ -23,10 +23,10 @@ using std::vector; namespace caffe { -class DeviceContext { +class device { public: - explicit DeviceContext(); - explicit DeviceContext(int id, int list_id, Backend backend); + explicit device(); + explicit device(int id, int list_id, Backend backend); Backend backend() const; int id() const; int list_id() const; diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index 87fd44d816b..811ca9feea8 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -1,10 +1,3 @@ -/* - * greentea_im2col.hpp - * - * Created on: Apr 8, 2015 - * Author: fabian - */ - #ifndef GREENTEA_IM2COL_HPP_ #define GREENTEA_IM2COL_HPP_ #ifdef USE_GREENTEA @@ -19,6 +12,27 @@ namespace caffe { +template +void greentea_im2col_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + const cl_mem data_im, const int data_im_off, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, cl_mem data_col, + const int data_col_off); + +template +void greentea_col2im_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + const cl_mem data_col, const int data_col_off, + const int channels, const int height, const int width, + const int patch_h, const int patch_w, const int pad_h, + const int pad_w, const int stride_h, + const int stride_w, cl_mem data_im, + const int data_im_off); + + template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, @@ -42,27 +56,26 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, cl_mem data_im, const int data_offset); template -void greentea_im2col_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, - const cl_mem data_im, const int data_im_off, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, cl_mem data_col, - const int data_col_off); +void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, const int num_spatial_axes, + const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem data_col, int data_col_off); template -void greentea_col2im_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, - const cl_mem data_col, const int data_col_off, - const int channels, const int height, const int width, - const int patch_h, const int patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, cl_mem data_im, - const int data_im_off); +void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, const int num_spatial_axes, + const int im_size, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem data_im, + int data_off); + template -void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, +void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int data_off, const int num_spatial_axes, const int num_kernels, @@ -71,7 +84,7 @@ void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, cl_mem kstride, cl_mem data_col, int data_col_off); template -void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, +void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int data_col_off, const int num_spatial_axes, const int im_size, cl_mem im_shape, @@ -79,8 +92,6 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, cl_mem stride, cl_mem kstride, cl_mem data_im, int data_off); - - } // namespace caffe #endif // USE_GREENTEA diff --git a/include/caffe/internal_thread.hpp b/include/caffe/internal_thread.hpp index efa8c44e1c5..ffaf18eaa49 100644 --- a/include/caffe/internal_thread.hpp +++ b/include/caffe/internal_thread.hpp @@ -2,7 +2,7 @@ #define CAFFE_INTERNAL_THREAD_HPP_ #include "caffe/common.hpp" -#include "caffe/device_context.hpp" +#include "device.hpp" /** Forward declare boost::thread instead of including boost/thread.hpp @@ -31,7 +31,7 @@ class InternalThread { * thread values, e.g. device id, solver index etc. The random seed * is initialized using caffe_rng_rand. */ - void StartInternalThread(DeviceContext* device_context); + void StartInternalThread(device* device_context); /** Will not return until the internal thread has exited. */ void StopInternalThread(); @@ -47,10 +47,10 @@ class InternalThread { /* Should be tested when running loops to exit when requested. */ bool must_stop(); - DeviceContext* thread_device_context_; + device* thread_device_context_; private: - void entry(DeviceContext* device_context, Caffe::Brew mode, int rand_seed, + void entry(device* device_context, Caffe::Brew mode, int rand_seed, int solver_count, bool root_solver); shared_ptr thread_; diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index f66873faec3..3915ca210e3 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -7,12 +7,12 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" -#include "caffe/device_context.hpp" #include "caffe/layer_factory.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/device_alternate.hpp" #include "caffe/greentea/greentea.hpp" +#include "device.hpp" /** Forward declare boost::thread instead of including boost/thread.hpp @@ -348,7 +348,7 @@ class Layer { /** * @brief Returns the device context this layer runs on */ - inline DeviceContext *device_context() { + inline device *device_context() { return device_context_; } @@ -378,7 +378,7 @@ class Layer { vector loss_; /** Device context */ - DeviceContext *device_context_; + device *device_context_; /** @brief Using the CPU device, compute the layer output. */ virtual void Forward_cpu(const vector*>& bottom, diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index f4252b65489..5b06224bf98 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -9,9 +9,9 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" -#include "caffe/device_context.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" +#include "device.hpp" namespace caffe { diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp index 90b0d7c0ca5..b0bd395f50c 100644 --- a/include/caffe/parallel.hpp +++ b/include/caffe/parallel.hpp @@ -65,24 +65,24 @@ class GPUParams : public Params { class DevicePair { public: - DevicePair(DeviceContext* parent, DeviceContext* device) + DevicePair(device* parent, device* device) : parent_(parent), device_(device) { } - inline DeviceContext* parent() { + inline device* parent() { return parent_; } - inline DeviceContext* device() { + inline device* device() { return device_; } // Group GPUs in pairs, by proximity depending on machine's topology - static void compute(const vector devices, + static void compute(const vector devices, vector* pairs); protected: - DeviceContext* parent_; - DeviceContext* device_; + device* parent_; + device* device_; }; // Synchronous data parallelism using map-reduce between local GPUs. @@ -98,7 +98,7 @@ class P2PSync : public GPUParams, public Solver::Callback, return solver_; } - void run(const vector& gpus); + void run(const vector& gpus); protected: void on_start(); diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index f37dfea039c..28756c9a6ca 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -4,8 +4,8 @@ #include #include -#include "caffe/device_context.hpp" #include "caffe/net.hpp" +#include "device.hpp" namespace caffe { @@ -115,7 +115,7 @@ class Solver { int current_step_; shared_ptr > net_; vector > > test_nets_; - DeviceContext *device_context_; + device *device_context_; vector callbacks_; // The root solver that holds root nets (actually containing shared layers) diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 027a64e42e6..0b72308aa8c 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -32,10 +32,10 @@ class SyncedMemory { head_(UNINITIALIZED), own_cpu_data_(false), own_gpu_data_(false), - device_context_(Caffe::GetDefaultDeviceContext()), + device_context_(Caffe::GetDefaultDevice()), cl_gpu_mem_(NULL) { } - explicit SyncedMemory(DeviceContext *device_context) + explicit SyncedMemory(device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), @@ -45,7 +45,7 @@ class SyncedMemory { device_context_(device_context), cl_gpu_mem_(NULL) { } - explicit SyncedMemory(size_t size, DeviceContext *device_context) + explicit SyncedMemory(size_t size, device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), @@ -63,9 +63,9 @@ class SyncedMemory { head_(UNINITIALIZED), own_cpu_data_(false), own_gpu_data_(false), - device_context_(Caffe::GetDefaultDeviceContext()) { + device_context_(Caffe::GetDefaultDevice()) { } - explicit SyncedMemory(DeviceContext *device_context) + explicit SyncedMemory(device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), @@ -74,7 +74,7 @@ class SyncedMemory { own_gpu_data_(false), device_context_(device_context) { } - explicit SyncedMemory(size_t size, DeviceContext *device_context) + explicit SyncedMemory(size_t size, device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), @@ -121,7 +121,7 @@ class SyncedMemory { SyncedHead head_; bool own_cpu_data_; bool own_gpu_data_; - DeviceContext *device_context_; + device *device_context_; #ifdef USE_GREENTEA cl_mem cl_gpu_mem_; diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index 1a0af805538..063e31cff21 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -9,8 +9,8 @@ #include #include +#include "../device.hpp" #include "caffe/common.hpp" -#include "caffe/device_context.hpp" using std::cout; using std::endl; diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index c5be2fa24d4..d6a12d706b0 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -16,18 +16,36 @@ void col2im_cpu(const Dtype* data_col, const int channels, const int height, const int stride_w, Dtype* data_im); template -void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, - const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, +void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes, + const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, Dtype* data_col); +template +void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, + const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_im); + template void im2col_gpu(const Dtype* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, Dtype* data_col); +template +void col2im_gpu(const Dtype* data_col, const int channels, const int height, + const int width, const int patch_h, const int patch_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, Dtype* data_im); + +template +void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, + const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int kstride_h, const int kstride_w, + Dtype* data_col); + template void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, @@ -35,30 +53,31 @@ void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, const int stride_w, const int kstride_h, const int kstride_w, Dtype* data_im); -template -void col2im_gpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im); - template void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, const int* kstride, - Dtype* data_col); + const int* pad, const int* stride, Dtype* data_col); template void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, Dtype* data_im); + Dtype* data_im); template -void col2im_gpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im); +void im2col_ndsk_gpu(const Dtype* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, const int* kstride, + Dtype* data_col); + +template +void col2im_ndsk_gpu(const Dtype* data_col, const int num_spatial_axes, + const int im_size, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, const int* kstride, + Dtype* data_im); } // namespace caffe diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index f6c907ff106..ce159122e2f 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -9,11 +9,11 @@ #include "caffe/common.hpp" #include "caffe/common_layers.hpp" #include "caffe/data_layers.hpp" -#include "caffe/device_context.hpp" #include "caffe/layer.hpp" #include "caffe/loss_layers.hpp" #include "caffe/neuron_layers.hpp" #include "caffe/proto/caffe.pb.h" +#include "device.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" @@ -199,6 +199,8 @@ class BaseConvolutionLayer : public Layer { Blob stride_; /// @brief The spatial dimensions of the padding. Blob pad_; + /// @brief The spatial dimension of the kernel stride. + Blob kstride_; /// @brief The spatial dimensions of the convolution input. Blob conv_input_shape_; /// @brief The spatial dimensions of the input. @@ -256,16 +258,25 @@ class BaseConvolutionLayer : public Layer { #ifdef USE_CUDA inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - im2col_gpu(data, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], col_buff); + if(this->use_skernel_) { + im2col_sk_gpu(data, conv_in_channels_, conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + kstride_.cpu_data()[0], kstride_.cpu_data()[1], col_buff); + } else { + im2col_gpu(data, conv_in_channels_, conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], col_buff); + } } else { im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), kstride_.gpu_data(), col_buff); + stride_.gpu_data(), col_buff); } } inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { @@ -279,7 +290,6 @@ class BaseConvolutionLayer : public Layer { col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - kstride_.gpu_data(), data); } } @@ -293,9 +303,13 @@ class BaseConvolutionLayer : public Layer { viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); greentea_im2col_gpu(&program, &ctx, (cl_mem) data, data_off, - conv_in_channels_, conv_in_height_, - conv_in_width_, kernel_h_, kernel_w_, pad_h_, - pad_w_, stride_h_, stride_w_, (cl_mem) col_buff, + conv_in_channels_, + conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], (cl_mem) col_buff, col_buff_off); } inline void greentea_conv_col2im_gpu(const Dtype* col_buff, @@ -306,10 +320,13 @@ class BaseConvolutionLayer : public Layer { viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); greentea_col2im_gpu(&program, &ctx, (cl_mem) col_buff, col_buff_off, - conv_in_channels_, conv_in_height_, - conv_in_width_, kernel_h_, kernel_w_, pad_h_, - pad_w_, stride_h_, stride_w_, (cl_mem) data, - data_off); + conv_in_channels_, + conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], (cl_mem) data, data_off); } #endif // USE_GREENTEA #endif // !CPU_ONLY @@ -323,168 +340,24 @@ class BaseConvolutionLayer : public Layer { int col_offset_; int output_offset_; + bool use_skernel_; + Blob col_buffer_; Blob bias_multiplier_; }; -/** - * @brief Abstract base class that factors out the BLAS code common to - * ConvolutionLayer and DeconvolutionLayer for N dimensions. - */ -template -class BaseConvolutionNDLayer : public Layer { - public: - explicit BaseConvolutionNDLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline bool EqualNumBottomTopBlobs() const { return true; } - - - protected: - // Helper functions that abstract away the column buffer and gemm arguments. - // The last argument in forward_cpu_gemm is so that we can skip the im2col if - // we just called weight_cpu_gemm with the same input. - -#ifndef CPU_ONLY - void forward_gpu_gemm(const Dtype* col_input, const int col_input_off, - const Dtype* weights, Dtype* output, - const int output_off, bool skip_im2col = false); - void forward_gpu_bias(Dtype* output, const int output_off, const Dtype* bias); - void backward_gpu_gemm(const Dtype* input, const int input_off, - const Dtype* weights, Dtype* col_output, - const int col_output_off); - void weight_gpu_gemm(const Dtype* col_input, const int col_input_off, - const Dtype* output, const int output_off, - Dtype* weights); - void backward_gpu_bias(Dtype* bias, const Dtype* input, const int input_off); - - shared_ptr< Blob > col_buffer(); -#endif // !CPU_ONLY - - // reverse_dimensions should return true iff we are implementing deconv, so - // that conv helpers know which dimensions are which. - virtual bool reverse_dimensions() = 0; - // Compute height_out_ and width_out_ from other parameters. - virtual void compute_output_shape() = 0; - - /// @brief The spatial dimensions of a filter kernel. - Blob kernel_shape_; - /// @brief The spatial dimensions of the stride. - Blob stride_; - /// @brief The spatial dimensions of the padding. - Blob pad_; - /// @brief The spatial dimension of the kernel stride. - Blob kstride_; - /// @brief The spatial dimensions of the convolution input. - Blob conv_input_shape_; - /// @brief The spatial dimensions of the input. - Blob input_shape_; - /// @brief The spatial dimensions of the col_buffer. - vector col_buffer_shape_; - /// @brief The spatial dimensions of the output. - vector output_shape_; - - int num_spatial_axes_; - int bottom_dim_; - int top_dim_; - - int channel_axis_; - int num_; - int channels_; - int group_; - int num_output_; - bool bias_term_; - bool is_1x1_; - - private: - // wrap im2col/col2im so we don't have to remember the (long) argument lists -#ifndef CPU_ONLY -#ifdef USE_CUDA - inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { - im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), kstride_.gpu_data(), col_buff); - } - inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { - col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - kstride_.gpu_data(), data); - } -#endif // USE_CUDA -#ifdef USE_GREENTEA - inline void greentea_conv_im2col_gpu(const Dtype* data, const int data_off, - Dtype* col_buff, - const int col_buff_off) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - greentea_im2col_nd_gpu(&program, &ctx, (cl_mem)data, data_off, - num_spatial_axes_, - num_kernels_im2col_, - (cl_mem)(conv_input_shape_.gpu_data()), - (cl_mem)(col_buffer_.gpu_shape()), - (cl_mem)(kernel_shape_.gpu_data()), - (cl_mem)(pad_.gpu_data()), - (cl_mem)(stride_.gpu_data()), - (cl_mem)(kstride_.gpu_data()), - (cl_mem) col_buff, col_buff_off); - } - inline void greentea_conv_col2im_gpu(const Dtype* col_buff, - const int col_buff_off, Dtype* data, - const int data_off) { - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - greentea_col2im_nd_gpu(&program, &ctx, - (cl_mem) col_buff, col_buff_off, - num_spatial_axes_, - num_kernels_col2im_, - (cl_mem)(conv_input_shape_.gpu_data()), - (cl_mem)(col_buffer_.gpu_shape()), - (cl_mem)(kernel_shape_.gpu_data()), - (cl_mem)(pad_.gpu_data()), - (cl_mem)(stride_.gpu_data()), - (cl_mem)(kstride_.gpu_data()), - (cl_mem) data, data_off); - } -#endif // USE_GREENTEA -#endif // !CPU_ONLY - - int num_kernels_im2col_; - int num_kernels_col2im_; - int conv_out_channels_; - int conv_in_channels_; - int conv_out_spatial_dim_; - int out_spatial_dim_; - int kernel_dim_; - int weight_offset_; - int col_offset_; - int output_offset_; - Blob col_buffer_; - Blob bias_multiplier_; -}; template -class ConvolutionNDLayer : public BaseConvolutionNDLayer { +class ConvolutionLayer : public BaseConvolutionLayer { public: - explicit ConvolutionNDLayer(const LayerParameter& param) - : BaseConvolutionNDLayer(param) { + explicit ConvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) { } virtual inline const char* type() const { - return "ConvolutionND"; + return "Convolution"; } virtual size_t ForwardFlops() { @@ -519,12 +392,12 @@ class ConvolutionNDLayer : public BaseConvolutionNDLayer { }; template -class DeconvolutionNDLayer : public BaseConvolutionNDLayer { +class DeconvolutionLayer : public BaseConvolutionLayer { public: - explicit DeconvolutionNDLayer(const LayerParameter& param) - : BaseConvolutionNDLayer(param) {} + explicit DeconvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} - virtual inline const char* type() const { return "DeconvolutionND"; } + virtual inline const char* type() const { return "Deconvolution"; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -539,189 +412,6 @@ class DeconvolutionNDLayer : public BaseConvolutionNDLayer { virtual void compute_output_shape(); }; - -/** - * @brief Convolves the input image for pixelwise classification. - * - * Layer introduced by Li, Hongsheng et al. - */ -template -class ConvolutionSKLayer : public Layer { - public: - explicit ConvolutionSKLayer(const LayerParameter& param) - : Layer(param) { - } - - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { - return "ConvolutionSK"; - } - - virtual size_t ForwardFlops() { - size_t M = this->M_; - size_t N = this->N_; - size_t K = this->K_; - size_t group = this->group_; - return group * (M * N * (2 * K - 1)); - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - - shared_ptr< Blob > col_buffer(); - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int channels_; - int group_; - int height_, width_; - int pad_h_, pad_w_; - int kstride_h_, kstride_w_; - int num_, num_output_; - Blob col_buffer_; - Blob bias_multiplier_; - bool bias_term_; - int M_, K_, N_; -}; - -/** - * @brief Convolves the input image with a bank of learned filters, - * and (optionally) adds biases. - * - * Caffe convolves by reduction to matrix multiplication. This achieves - * high-throughput and generality of input and filter dimensions but comes at - * the cost of memory for matrices. This makes use of efficiency in BLAS. - * - * The input is "im2col" transformed to a channel K' x H x W data matrix - * for multiplication with the N x K' x H x W filter matrix to yield a - * N' x H x W output matrix that is then "col2im" restored. K' is the - * input channel * kernel height * kernel width dimension of the unrolled - * inputs so that the im2col matrix has a column for each input region to - * be filtered. col2im restores the output spatial structure by rolling up - * the output channel N' columns of the output matrix. - */ -template -class ConvolutionLayer : public BaseConvolutionLayer { - public: - /** - * @param param provides ConvolutionParameter convolution_param, - * with ConvolutionLayer options: - * - num_output. The number of filters. - * - kernel_size / kernel_h / kernel_w. The filter dimensions, given by - * kernel_size for square filters or kernel_h and kernel_w for rectangular - * filters. - * - stride / stride_h / stride_w (\b optional, default 1). The filter - * stride, given by stride_size for equal dimensions or stride_h and stride_w - * for different strides. By default the convolution is dense with stride 1. - * - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for - * convolution, given by pad for equal dimensions or pad_h and pad_w for - * different padding. Input padding is computed implicitly instead of - * actually padding. - * - group (\b optional, default 1). The number of filter groups. Group - * convolution is a method for reducing parameterization by selectively - * connecting input and output channels. The input and output channel dimensions must be divisible - * by the number of groups. For group @f$ \geq 1 @f$, the - * convolutional filters' input and output channels are separated s.t. each - * group takes 1 / group of the input channels and makes 1 / group of the - * output channels. Concretely 4 input channels, 8 output channels, and - * 2 groups separate input channels 1-2 and output channels 1-4 into the - * first group and input channels 3-4 and output channels 5-8 into the second - * group. - * - bias_term (\b optional, default true). Whether to have a bias. - * - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library - * kernels + stream parallelism) engines. - */ - explicit ConvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) { - } - - virtual inline const char* type() const { - return "Convolution"; - } - - virtual size_t ForwardFlops() { - size_t group = this->group_; - size_t N = this->height_out_ * this->width_out_; - size_t M = this->num_output_ / group; - size_t K = this->channels_ * this->kernel_h_ * this->kernel_w_; - K /= group; - return group * (M * N * (2 * K - 1)); - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual inline bool reverse_dimensions() { - return false; - } - virtual void compute_output_shape(); -}; - - - -/** - * @brief Convolve the input with a bank of learned filters, and (optionally) - * add biases, treating filters and convolution parameters in the - * opposite sense as ConvolutionLayer. - * - * ConvolutionLayer computes each output value by dotting an input window with - * a filter; DeconvolutionLayer multiplies each input value by a filter - * elementwise, and sums over the resulting output windows. In other words, - * DeconvolutionLayer is ConvolutionLayer with the forward and backward passes - * reversed. DeconvolutionLayer reuses ConvolutionParameter for its - * parameters, but they take the opposite sense as in ConvolutionLayer (so - * padding is removed from the output rather than added to the input, and - * stride results in upsampling rather than downsampling). - */ -template -class DeconvolutionLayer : public BaseConvolutionLayer { - public: - explicit DeconvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) { - } - - virtual inline const char* type() const { - return "Deconvolution"; - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual inline bool reverse_dimensions() { - return true; - } - virtual void compute_output_shape(); -}; - #ifdef USE_CUDNN /* * @brief cuDNN implementation of ConvolutionLayer. diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index f0029d97416..6726de42169 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -2,8 +2,9 @@ #include #include "caffe/blob.hpp" + +#include "../../include/caffe/device.hpp" #include "caffe/common.hpp" -#include "caffe/device_context.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" @@ -68,14 +69,14 @@ bool Blob::ReshapeLike(const Blob& other) { template Blob::Blob(const int num, const int channels, const int height, - const int width, DeviceContext *device_context) + const int width, device *device_context) // capacity_ must be initialized before calling Reshape : capacity_(0), device_context_(device_context) { Reshape(num, channels, height, width); } template -Blob::Blob(const vector& shape, DeviceContext *device_context) +Blob::Blob(const vector& shape, device *device_context) // capacity_ must be initialized before calling Reshape : capacity_(0), device_context_(device_context) { Reshape(shape); @@ -208,7 +209,7 @@ template<> unsigned int Blob::asum_data() const { } template -DeviceContext *Blob::device_context() { +device *Blob::device_context() { return device_context_; } diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 1627cd7e438..7e5fe9a4874 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -9,7 +9,8 @@ #include #include "caffe/common.hpp" -#include "caffe/device_context.hpp" + +#include "caffe/device.hpp" #include "caffe/util/rng.hpp" #ifdef USE_GREENTEA @@ -29,7 +30,7 @@ static Caffe* global_instance_; static std::atomic first(true); // Device contexts are initialized once and shared on all threads -std::vector< shared_ptr > Caffe::device_contexts_; +std::vector< shared_ptr > Caffe::device_contexts_; Caffe& Caffe::Get() { if (first.exchange(false)) { @@ -76,7 +77,7 @@ void GlobalInit(int* pargc, char*** pargv) { ::google::InstallFailureSignalHandler(); } -DeviceContext *Caffe::GetDeviceContext(int id) { +device *Caffe::GetDeviceContext(int id) { // The default device context is thread-local // The list of device contexts is global return @@ -84,11 +85,11 @@ DeviceContext *Caffe::GetDeviceContext(int id) { Get().default_device_context_ : Get().device_contexts_[id].get(); } -DeviceContext *Caffe::GetDefaultDeviceContext() { +device *Caffe::GetDefaultDevice() { return Get().default_device_context_; } -DeviceContext *Caffe::GetCPUDeviceContext() { +device *Caffe::GetCPUDeviceContext() { return Get().cpu_device_context_.get(); } @@ -117,7 +118,7 @@ Caffe::Caffe(const Caffe &obj) { #endif // USE_CUDA } -void Caffe::SelectDevice(DeviceContext* device_context) { +void Caffe::SelectDevice(device* device_context) { #ifndef CPU_ONLY Get().default_device_context_ = device_context; @@ -196,7 +197,7 @@ Caffe::Caffe() #endif // USE_CUDA random_generator_(), mode_(Caffe::CPU), - cpu_device_context_(new DeviceContext(-1, -1, Backend::BACKEND_CPU)), + cpu_device_context_(new device(-1, -1, Backend::BACKEND_CPU)), default_device_context_(cpu_device_context_.get()), solver_count_(1), root_solver_(true) { // Try to create a cublas handler, and report an error if failed (but we will @@ -233,7 +234,7 @@ Caffe::~Caffe() { } void Caffe::set_random_seed(const unsigned int seed) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Curand seed static bool g_curand_availability_logged = false; @@ -260,7 +261,7 @@ void Caffe::set_random_seed(const unsigned int seed) { void Caffe::Synchronize(int device_id) { if (Caffe::mode() == Brew::GPU) { - DeviceContext * device_context = Caffe::GetDeviceContext(device_id); + device * device_context = Caffe::GetDeviceContext(device_id); if (device_context->backend() == BACKEND_CUDA) { #ifdef USE_CUDA cudaDeviceSynchronize(); @@ -367,15 +368,15 @@ void Caffe::SetDevices(std::vector device_ids) { for (int i = 0; i < cuda_device_count; ++i) { for (int j = 0; j < device_ids.size(); ++j) { if (device_ids[j] == i) { - shared_ptr device( - new DeviceContext(i, initcount, Backend::BACKEND_CUDA)); - Get().device_contexts_.emplace_back(device); - device->Init(); + shared_ptr dev( + new device(i, initcount, Backend::BACKEND_CUDA)); + Get().device_contexts_.emplace_back(dev); + dev->Init(); ++initcount; } else { // Temporary until device abstraction is done - shared_ptr device(new DeviceContext()); - Get().device_contexts_.emplace_back(device); + shared_ptr dev(new device()); + Get().device_contexts_.emplace_back(dev); ++initcount; } } @@ -410,16 +411,16 @@ void Caffe::SetDevices(std::vector device_ids) { device_id, std::get<1>(platform_devices[greentea_device_count])); - shared_ptr device( - new DeviceContext(device_id, + shared_ptr dev( + new device(device_id, initcount, Backend::BACKEND_OpenCL)); - Get().device_contexts_.emplace_back(device); - device->Init(); + Get().device_contexts_.emplace_back(dev); + dev->Init(); ++initcount; } else { // Temporary until device abstraction is done - shared_ptr device(new DeviceContext()); - Get().device_contexts_.emplace_back(device); + shared_ptr dev(new device()); + Get().device_contexts_.emplace_back(dev); ++initcount; } } diff --git a/src/caffe/data_reader.cpp b/src/caffe/data_reader.cpp index 9aff1050ffb..e237a5b1cb6 100644 --- a/src/caffe/data_reader.cpp +++ b/src/caffe/data_reader.cpp @@ -63,7 +63,7 @@ DataReader::QueuePair::~QueuePair() { DataReader::Body::Body(const LayerParameter& param) : param_(param), new_queue_pairs_() { - StartInternalThread(Caffe::Get().GetDefaultDeviceContext()); + StartInternalThread(Caffe::Get().GetDefaultDevice()); } DataReader::Body::~Body() { diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index d2768d27d09..4c968af6063 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -15,7 +15,7 @@ namespace caffe { template DataTransformer::DataTransformer(const TransformationParameter& param, Phase phase, - DeviceContext *device_context) + device *device_context) : param_(param), phase_(phase), device_context_(device_context) { // check if we want to use mean_file diff --git a/src/caffe/device_context.cpp b/src/caffe/device.cpp similarity index 79% rename from src/caffe/device_context.cpp rename to src/caffe/device.cpp index 6c939af058f..828e92e2bd4 100644 --- a/src/caffe/device_context.cpp +++ b/src/caffe/device.cpp @@ -8,7 +8,8 @@ #include #include #include -#include "caffe/device_context.hpp" + +#include "caffe/device.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/util/device_alternate.hpp" @@ -18,17 +19,17 @@ namespace caffe { -DeviceContext::DeviceContext() +device::device() : current_queue_id_(0), workgroup_sizes_(3, 0), id_(0), list_id_(0), backend_(Backend::BACKEND_CPU), memory_usage_(0), peak_memory_usage_(0) { } -DeviceContext::DeviceContext(int id, int list_id, Backend backend) +device::device(int id, int list_id, Backend backend) : current_queue_id_(0), workgroup_sizes_(3, 0), id_(id), list_id_(list_id), backend_(backend), memory_usage_(0), peak_memory_usage_(0) { } -void DeviceContext::Init() { +void device::Init() { #ifndef CPU_ONLY if (backend_ == BACKEND_CUDA) { #ifdef USE_CUDA @@ -56,24 +57,24 @@ void DeviceContext::Init() { #endif // !CPU_ONLY } -Backend DeviceContext::backend() const { +Backend device::backend() const { return backend_; } -int DeviceContext::id() const { +int device::id() const { return id_; } -int DeviceContext::list_id() const { +int device::list_id() const { return list_id_; } -int DeviceContext::WorkgroupSize(int id) { +int device::WorkgroupSize(int id) { return workgroup_sizes_[id]; return 0; } -int DeviceContext::num_queues() { +int device::num_queues() { if (backend_ == BACKEND_CUDA) { #ifdef USE_CUDA return 1; @@ -87,7 +88,7 @@ int DeviceContext::num_queues() { } template<> -shared_ptr > DeviceContext::Buffer(int id) { +shared_ptr > device::Buffer(int id) { if (buff_f_.size() <= id) { shared_ptr > blob_pointer(new Blob(this)); buff_f_.push_back(blob_pointer); @@ -96,7 +97,7 @@ shared_ptr > DeviceContext::Buffer(int id) { } template<> -shared_ptr > DeviceContext::Buffer(int id) { +shared_ptr > device::Buffer(int id) { if (buff_d_.size() <= id) { shared_ptr > blob_pointer(new Blob(this)); buff_d_.push_back(blob_pointer); @@ -104,11 +105,11 @@ shared_ptr > DeviceContext::Buffer(int id) { return buff_d_[id]; } -int DeviceContext::current_queue_id() { +int device::current_queue_id() { return current_queue_id_; } -void DeviceContext::SwitchQueue(int id) { +void device::SwitchQueue(int id) { if (backend_ == BACKEND_CUDA) { #ifdef USE_CUDA (void) id; @@ -122,7 +123,7 @@ void DeviceContext::SwitchQueue(int id) { } } -void DeviceContext::FinishQueues() { +void device::FinishQueues() { if (backend_ == BACKEND_CUDA) { #ifdef USE_CUDA #endif // USE_CUDA @@ -139,30 +140,30 @@ void DeviceContext::FinishQueues() { } } -size_t DeviceContext::memory_usage() { +size_t device::memory_usage() { return memory_usage_; } -size_t DeviceContext::peak_memory_usage() { +size_t device::peak_memory_usage() { return peak_memory_usage_; } -void DeviceContext::IncreaseMemoryUsage(size_t bytes) { +void device::IncreaseMemoryUsage(size_t bytes) { memory_usage_ += bytes; if (memory_usage_ > peak_memory_usage_) { peak_memory_usage_ = memory_usage_; } } -void DeviceContext::DecreaseMemoryUsage(size_t bytes) { +void device::DecreaseMemoryUsage(size_t bytes) { memory_usage_ -= bytes; } -void DeviceContext::ResetPeakMemoryUsage() { +void device::ResetPeakMemoryUsage() { peak_memory_usage_ = memory_usage_; } -bool DeviceContext::CheckCapability(std::string cap) { +bool device::CheckCapability(std::string cap) { if (backend_ == BACKEND_OpenCL) { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(id_); @@ -186,11 +187,11 @@ bool DeviceContext::CheckCapability(std::string cap) { } #ifdef USE_GREENTEA -viennacl::ocl::program &DeviceContext::program() { +viennacl::ocl::program &device::program() { return ocl_program_; } -void DeviceContext::SetProgram() { +void device::SetProgram() { ocl_program_ = RegisterKernels( &(viennacl::ocl::get_context(static_cast(id_)))); } diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 0ed5285bcd4..25d5b1feb3c 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -18,7 +18,8 @@ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n Dtype* data_col_ptr = data_col + channel_out;\n const Dtype* data_im_ptr = data_im + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes,\n __global const Dtype* data_col,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT +std::string im2col_ndsk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT @@ -40,7 +41,8 @@ std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n } // CUDA_KERNEL_LOOP(index, n)\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n Dtype* data_col_ptr = data_col + channel_out;\n const Dtype* data_im_ptr = data_im + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes,\n __global const Dtype* data_col,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT +std::string im2col_ndsk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT @@ -68,6 +70,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << fillbuffer_float << "\n\n"; // NOLINT ss << im2col_float << "\n\n"; // NOLINT ss << im2col_nd_float << "\n\n"; // NOLINT + ss << im2col_ndsk_float << "\n\n"; // NOLINT ss << im2col_sk_float << "\n\n"; // NOLINT ss << lrn_float << "\n\n"; // NOLINT ss << math_float << "\n\n"; // NOLINT @@ -95,6 +98,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << fillbuffer_double << "\n\n"; // NOLINT ss << im2col_double << "\n\n"; // NOLINT ss << im2col_nd_double << "\n\n"; // NOLINT + ss << im2col_ndsk_double << "\n\n"; // NOLINT ss << im2col_sk_double << "\n\n"; // NOLINT ss << lrn_double << "\n\n"; // NOLINT ss << math_double << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index fbc899d9cc9..4dfc96781e1 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -3,16 +3,16 @@ #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, - __global const Dtype* data_im, - const int data_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, - __global const int* kstride, - __global Dtype* data_col, - const int data_col_off) { + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; int d_iter[6]; int i; @@ -38,8 +38,8 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, data_col_inc *= col_shape[i + 1]; d_iter[i] = 0; } - __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; - __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + Dtype* data_col_ptr = data_col + channel_out; + const Dtype* data_im_ptr = data_im + channel_in; bool incremented; do { bool in_range = true; @@ -50,8 +50,6 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, break; } } - - // Write column data if (in_range) { int data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { @@ -62,19 +60,14 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } else { *data_col_ptr = 0; } - data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { - // Old: const int d_max = kernel_shape[i]; - // New (strided, limit is the external kernel size): - const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + const int d_max = kernel_shape[i]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 - // Old: ++d_iter[i]; - // New (strided, increment by the stride each time): - d_iter[i] += kstride[i]; + ++d_iter[i]; incremented = true; break; } @@ -83,30 +76,20 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, - __global const Dtype* data_col, - const int data_col_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, - __global const int* kstride, - __global Dtype* data_im, - const int data_off) { + + +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + __global const Dtype* data_col, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im) { int d_im[6]; - int d_col_size[6]; int d_col_iter[6]; int d_col_start[6]; int d_col_end[6]; - int d_ext_patch[6]; - int d_idx[6]; - - for (int i = num_axes - 1; i >= 0; --i) { - d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; - d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) - / stride[i] + 1; - } for (int index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate @@ -120,20 +103,11 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, // Calculate col start/end indices. bool done = false; for (int i = 0; i < num_axes; ++i) { - // Old: - /*d_col_start[i] = d_col_iter[i] = + d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? - 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ - // New: - d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? - d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; - d_col_iter[i] = d_col_start[i]; - d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; - d_col_end[i] = (d_im[i] >= d_col_size[i]) ? - (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) - % kstride[i] : d_im[i]; - if (d_col_start[i] > d_col_end[i]) { + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; @@ -142,7 +116,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } } if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) + continue; } // Loop over the col to compute the output val. Dtype val = 0; @@ -150,30 +124,30 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, do { // Compute the final offset. int final_offset = 0; - int coeff_prod = 1; + int kernel_shape_prod = 1; for (int i = num_axes - 1; i >= 0; --i) { - final_offset += d_col_iter[i] * coeff_prod; - coeff_prod *= d_col_size[i]; + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; } - for (int i = num_axes - 1; i >= 0; --i) { - final_offset += d_idx[i] * coeff_prod; - coeff_prod *= kernel_shape[i]; + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; } - final_offset += channel_im * coeff_prod; val += data_col[final_offset]; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { - if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; - d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; - } else { // d_col_iter[i] <= d_max - kstride[1] - d_col_iter[i] += kstride[i]; - --d_idx[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; incremented = true; break; // for (int i = num_axes - 1; i >= 0; --i) } } // for (int i = num_axes - 1; i >= 0; --i) - } while (incremented); + } while (incremented); data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) + } } diff --git a/src/caffe/greentea/cl_kernels/im2col_ndsk.cl b/src/caffe/greentea/cl_kernels/im2col_ndsk.cl new file mode 100644 index 00000000000..1db4c7b01c5 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/im2col_ndsk.cl @@ -0,0 +1,179 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + + // Write column data + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + // Old: const int d_max = kernel_shape[i]; + // New (strided, limit is the external kernel size): + const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + // Old: ++d_iter[i]; + // New (strided, increment by the stride each time): + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_im, + const int data_off) { + int d_im[6]; + int d_col_size[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + int d_ext_patch[6]; + int d_idx[6]; + + for (int i = num_axes - 1; i >= 0; --i) { + d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + / stride[i] + 1; + } + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + // Old: + /*d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + // New: + d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + d_col_end[i] = (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) + % kstride[i] : d_im[i]; + if (d_col_start[i] > d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int coeff_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_col_iter[i] * coeff_prod; + coeff_prod *= d_col_size[i]; + } + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += d_idx[i] * coeff_prod; + coeff_prod *= kernel_shape[i]; + } + final_offset += channel_im * coeff_prod; + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + if (d_col_iter[i] > d_col_end[i] - kstride[i]) { + d_col_iter[i] = d_col_start[i]; + d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; + } else { // d_col_iter[i] <= d_max - kstride[1] + d_col_iter[i] += kstride[i]; + --d_idx[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } +} diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 1ac9f759cfd..e7433f79fb7 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -29,23 +29,6 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("im2col_sk")); - /*std::cout << "num_kernels: " << num_kernels << std::endl; - std::cout << "data_offset: " << data_offset << std::endl; - std::cout << "height: " << height << std::endl; - std::cout << "width: " << width << std::endl; - std::cout << "kernel_h: " << kernel_h << std::endl; - std::cout << "kernel_w: " << kernel_w << std::endl; - std::cout << "ext_kernel_h: " << ext_kernel_h << std::endl; - std::cout << "ext_kernel_w: " << ext_kernel_w << std::endl; - std::cout << "pad_h: " << pad_h << std::endl; - std::cout << "pad_w: " << pad_w << std::endl; - std::cout << "stride_h: " << stride_h << std::endl; - std::cout << "stride_w: " << stride_w << std::endl; - std::cout << "kstride_h: " << kstride_h << std::endl; - std::cout << "kstride_w: " << kstride_w << std::endl; - std::cout << "height_col: " << height_col << std::endl; - std::cout << "width_col: " << width_col << std::endl;*/ - viennacl::ocl::enqueue( kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, @@ -236,7 +219,7 @@ template void greentea_col2im_gpu(viennacl::ocl::program *prog, template -void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, +void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int data_off, const int num_spatial_axes, const int num_kernels, @@ -274,7 +257,7 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, cl_mem kstride, cl_mem data_col, int data_col_off); template -void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, +void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int data_col_off, const int num_spatial_axes, const int im_size, cl_mem im_shape, @@ -295,7 +278,7 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, } // Explicit instantiation -template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, +template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int data_col_off, const int num_spatial_axes, const int im_size, cl_mem im_shape, @@ -303,7 +286,7 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, cl_mem stride, cl_mem kstride, cl_mem data_im, int data_off); -template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, +template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int data_col_off, const int num_spatial_axes, const int im_size, cl_mem im_shape, diff --git a/src/caffe/internal_thread.cpp b/src/caffe/internal_thread.cpp index cf606ef790c..6bd83c8cb92 100644 --- a/src/caffe/internal_thread.cpp +++ b/src/caffe/internal_thread.cpp @@ -18,7 +18,7 @@ bool InternalThread::must_stop() { return thread_ && thread_->interruption_requested(); } -void InternalThread::StartInternalThread(DeviceContext* device_context) { +void InternalThread::StartInternalThread(device* device_context) { CHECK(!is_started()) << "Threads should persist and not be restarted."; thread_device_context_ = device_context; @@ -37,7 +37,7 @@ void InternalThread::StartInternalThread(DeviceContext* device_context) { } } -void InternalThread::entry(DeviceContext* device_context, Caffe::Brew mode, +void InternalThread::entry(device* device_context, Caffe::Brew mode, int rand_seed, int solver_count, bool root_solver) { Caffe::SelectDevice(device_context); Caffe::set_mode(mode); diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 1fb19d4cffb..0d15fb5b071 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -37,7 +37,7 @@ shared_ptr > GetConvolutionLayer(const LayerParameter& param) { #endif } if (engine == ConvolutionParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { return shared_ptr >(new ConvolutionLayer(param)); #ifdef USE_CUDNN } else if (engine == ConvolutionParameter_Engine_CUDNN) { @@ -61,7 +61,7 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { #endif } if (engine == PoolingParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { return shared_ptr >(new PoolingLayer(param)); #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { @@ -92,7 +92,7 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { #endif } if (engine == ReLUParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { return shared_ptr >(new ReLULayer(param)); #ifdef USE_CUDNN } else if (engine == ReLUParameter_Engine_CUDNN) { @@ -116,7 +116,7 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { #endif } if (engine == SigmoidParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { return shared_ptr >(new SigmoidLayer(param)); #ifdef USE_CUDNN } else if (engine == SigmoidParameter_Engine_CUDNN) { @@ -140,7 +140,7 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { #endif } if (engine == SoftmaxParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { return shared_ptr >(new SoftmaxLayer(param)); #ifdef USE_CUDNN } else if (engine == SoftmaxParameter_Engine_CUDNN) { @@ -164,7 +164,7 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { #endif } if (engine == TanHParameter_Engine_CAFFE - || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) { + || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { return shared_ptr >(new TanHLayer(param)); #ifdef USE_CUDNN } else if (engine == TanHParameter_Engine_CUDNN) { diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 8b0b2cf5fd8..cd7fa865890 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -419,7 +419,7 @@ void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, #ifdef USE_GREENTEA greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num_output_, - height_out_ * width_out_, 1, (Dtype) 1., + out_spatial_dim_, 1, (Dtype) 1., (cl_mem) bias, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) output, output_off); diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index b0795204f30..3882c64dd92 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -49,19 +49,23 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, for (int n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - greentea_im2col_gpu(&program, &ctx, (cl_mem)bottom_data, n * bottom_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - (cl_mem)top_data, n * top_dim_); + greentea_im2col_gpu(&program, &ctx, (cl_mem) bottom_data, + n * bottom_dim_, channels_, + bottom[0]->shape(channel_axis_ + 1), + bottom[0]->shape(channel_axis_ + 2), + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], (cl_mem) top_data, + n * top_dim_); } else { - greentea_im2col_nd_gpu(&program, &ctx, (cl_mem)bottom_data, n * bottom_dim_, num_spatial_axes_, - num_kernels, bottom[0]->gpu_shape() + channel_axis_, - top[0]->gpu_shape() + channel_axis_, - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), (cl_mem)top_data, n * top_dim_); + greentea_im2col_nd_gpu(&program, &ctx, (cl_mem) bottom_data, + n * bottom_dim_, num_spatial_axes_, num_kernels, + bottom[0]->gpu_shape() + channel_axis_, + top[0]->gpu_shape() + channel_axis_, + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), (cl_mem) top_data, + n * top_dim_); } } diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 9759136c909..be429ee5321 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -1045,14 +1045,14 @@ void Net::ClearParamDiffs() { break; case Caffe::GPU: #ifndef CPU_ONLY - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(blob->count(), static_cast(0), blob->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_set(Caffe::GetDefaultDeviceContext()->id(), + greentea_gpu_set(Caffe::GetDefaultDevice()->id(), blob->count(), static_cast(0), (cl_mem)(blob->mutable_gpu_diff()), 0); #endif // USE_GREENTEA diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index 6a0ddfabdb1..8ea2a2b2000 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -125,10 +125,10 @@ void GPUParams::configure(Solver* solver) const { apply_buffers(net, diff_, size_, replace_gpu_diff); } -void DevicePair::compute(const vector devices, +void DevicePair::compute(const vector devices, vector* pairs) { #ifndef CPU_ONLY - vector remaining(devices); + vector remaining(devices); // Depth for reduction tree int remaining_depth = static_cast(ceil(log2(remaining.size()))); @@ -412,7 +412,7 @@ void P2PSync::on_gradients_ready() { } template -void P2PSync::run(const vector& gpus) { +void P2PSync::run(const vector& gpus) { // Pair devices for map-reduce synchronization vector pairs; DevicePair::compute(gpus, &pairs); @@ -451,7 +451,7 @@ void P2PSync::run(const vector& gpus) { LOG(INFO)<< "Starting Optimization"; for (int i = 1; i < syncs.size(); ++i) { - syncs[i]->StartInternalThread(Caffe::GetDefaultDeviceContext()); + syncs[i]->StartInternalThread(Caffe::GetDefaultDevice()); } // Run root solver on current thread diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index e5fd769784e..892d136194c 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -49,7 +49,7 @@ Solver::Solver(const string& param_file, const Solver* root_solver) template void Solver::Init(const SolverParameter& param) { - device_context_ = Caffe::GetDefaultDeviceContext(); + device_context_ = Caffe::GetDefaultDevice(); CHECK(Caffe::root_solver() || root_solver_) << "root_solver_ needs to be set for all non-root solvers"; LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " @@ -559,13 +559,13 @@ void SGDSolver::PreSolve() { const vector& shape = net_params[i]->shape(); history_.push_back( shared_ptr>( - new Blob(shape, Caffe::GetDefaultDeviceContext()))); + new Blob(shape, Caffe::GetDefaultDevice()))); update_.push_back( shared_ptr>( - new Blob(shape, Caffe::GetDefaultDeviceContext()))); + new Blob(shape, Caffe::GetDefaultDevice()))); temp_.push_back( shared_ptr>( - new Blob(shape, Caffe::GetDefaultDeviceContext()))); + new Blob(shape, Caffe::GetDefaultDevice()))); } } diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 67d837a6293..ebeff043ab7 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -1,9 +1,10 @@ #include #include "caffe/common.hpp" -#include "caffe/device_context.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/syncedmem.hpp" + +#include "../../include/caffe/device.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA @@ -22,7 +23,7 @@ namespace caffe { void CaffeMallocHost(void** ptr, size_t size) { #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaMallocHost(ptr, size)); return; @@ -43,7 +44,7 @@ void CaffeMallocHost(void** ptr, size_t size) { void CaffeFreeHost(void* ptr) { #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA cudaFreeHost(ptr); return; diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index 9481adcf8ed..da3e937866e 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -15,7 +15,7 @@ class CommonTest : public ::testing::Test {}; #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestCublasHandlerGPU) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA int cuda_device_id; CUDA_CHECK(cudaGetDevice(&cuda_device_id)); @@ -34,8 +34,8 @@ TEST_F(CommonTest, TestBrewMode) { } TEST_F(CommonTest, TestRandSeedCPU) { - SyncedMemory data_a(10 * sizeof(int), Caffe::GetDefaultDeviceContext()); - SyncedMemory data_b(10 * sizeof(int), Caffe::GetDefaultDeviceContext()); + SyncedMemory data_a(10 * sizeof(int), Caffe::GetDefaultDevice()); + SyncedMemory data_b(10 * sizeof(int), Caffe::GetDefaultDevice()); Caffe::set_random_seed(1701); caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); @@ -51,14 +51,14 @@ TEST_F(CommonTest, TestRandSeedCPU) { #ifndef CPU_ONLY // GPU Caffe singleton test. TEST_F(CommonTest, TestRandSeedGPU) { - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA SyncedMemory data_a(10 * sizeof(unsigned int), - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); SyncedMemory data_b(10 * sizeof(unsigned int), - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); Caffe::set_random_seed(1701); CURAND_CHECK(curandGenerate(Caffe::curand_generator(), static_cast(data_a.mutable_gpu_data()), 10)); diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 0c2596fbb86..1be0272d356 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -835,94 +835,95 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { - // Test separable convolution by computing the Sobel operator - // as a single filter then comparing the result - // as the convolution of two rectangular filters. - // Fill bottoms with identical Gaussian noise. - shared_ptr > filler; - FillerParameter filler_param; - filler_param.set_value(1.); - filler.reset(new GaussianFiller(filler_param)); - filler->Fill(this->blob_bottom_); - this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); - // Compute Sobel G_x operator as 3 x 3 convolution. - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(1); - convolution_param->set_bias_term(false); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); - TypeParam* weights = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 9; // 3 x 3 filter - weights[i + 0] = -1; - weights[i + 1] = 0; - weights[i + 2] = 1; - weights[i + 3] = -2; - weights[i + 4] = 0; - weights[i + 5] = 2; - weights[i + 6] = -1; - weights[i + 7] = 0; - weights[i + 8] = 1; - } - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. - // (1) the [1 2 1] column filter - vector*> sep_blob_bottom_vec; - vector*> sep_blob_top_vec; - shared_ptr > blob_sep(new Blob()); - sep_blob_bottom_vec.push_back(this->blob_bottom_2_); - sep_blob_top_vec.push_back(this->blob_top_2_); - convolution_param->clear_kernel_size(); - convolution_param->clear_stride(); - convolution_param->set_kernel_h(3); - convolution_param->set_kernel_w(1); - convolution_param->set_stride_h(2); - convolution_param->set_stride_w(1); - convolution_param->set_num_output(1); - convolution_param->set_bias_term(false); - layer.reset(new CuDNNConvolutionLayer(layer_param)); - layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); - TypeParam* weights_1 = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 3; // 3 x 1 filter - weights_1[i + 0] = 1; - weights_1[i + 1] = 2; - weights_1[i + 2] = 1; - } - layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); - layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); - // (2) the [-1 0 1] row filter - blob_sep->CopyFrom(*this->blob_top_2_, false, true); - sep_blob_bottom_vec.clear(); - sep_blob_bottom_vec.push_back(blob_sep.get()); - convolution_param->set_kernel_h(1); - convolution_param->set_kernel_w(3); - convolution_param->set_stride_h(1); - convolution_param->set_stride_w(2); - convolution_param->set_num_output(1); - convolution_param->set_bias_term(false); - layer.reset(new CuDNNConvolutionLayer(layer_param)); - layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 1, 1, 3)); - TypeParam* weights_2 = layer->blobs()[0]->mutable_cpu_data(); - weights_2[0] = -1; - weights_2[1] = 0; - weights_2[2] = 1; - layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); - layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); - // Test equivalence of full and separable filters. - const TypeParam* top_data = this->blob_top_->cpu_data(); - const TypeParam* sep_top_data = this->blob_top_2_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + // Test separable convolution by computing the Sobel operator + // as a single filter then comparing the result + // as the convolution of two rectangular filters. + // Fill bottoms with identical Gaussian noise. + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new GaussianFiller(filler_param)); + filler->Fill(this->blob_bottom_); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); + // Compute Sobel G_x operator as 3 x 3 convolution. + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); + TypeParam* weights = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 9; // 3 x 3 filter + weights[i + 0] = -1; + weights[i + 1] = 0; + weights[i + 2] = 1; + weights[i + 3] = -2; + weights[i + 4] = 0; + weights[i + 5] = 2; + weights[i + 6] = -1; + weights[i + 7] = 0; + weights[i + 8] = 1; + } + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. + // (1) the [1 2 1] column filter + vector*> sep_blob_bottom_vec; + vector*> sep_blob_top_vec; + shared_ptr > blob_sep(new Blob()); + sep_blob_bottom_vec.push_back(this->blob_bottom_2_); + sep_blob_top_vec.push_back(this->blob_top_2_); + convolution_param->clear_kernel_size(); + convolution_param->clear_stride(); + convolution_param->set_kernel_h(3); + convolution_param->set_kernel_w(1); + convolution_param->set_stride_h(2); + convolution_param->set_stride_w(1); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); + TypeParam* weights_1 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 3 x 1 filter + weights_1[i + 0] = 1; + weights_1[i + 1] = 2; + weights_1[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // (2) the [-1 0 1] row filter + blob_sep->CopyFrom(*this->blob_top_2_, false, true); + sep_blob_bottom_vec.clear(); + sep_blob_bottom_vec.push_back(blob_sep.get()); + convolution_param->set_kernel_h(1); + convolution_param->set_kernel_w(3); + convolution_param->set_stride_h(1); + convolution_param->set_stride_w(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 1, 1, 3)); + TypeParam* weights_2 = layer->blobs()[0]->mutable_cpu_data(); + weights_2[0] = -1; + weights_2[1] = 0; + weights_2[2] = 1; + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // Test equivalence of full and separable filters. + const TypeParam* top_data = this->blob_top_->cpu_data(); + const TypeParam* sep_top_data = this->blob_top_2_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + } } } @@ -958,7 +959,5 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } - #endif - } // namespace caffe diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp index 6caded9b9f4..44b8170d46b 100644 --- a/src/caffe/test/test_convolution_nd_layer.cpp +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -70,7 +70,7 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { convolution_param->mutable_bias_filler()->set_type("constant"); convolution_param->mutable_bias_filler()->set_value(0); - ConvolutionNDLayer layer(layer_param); + ConvolutionLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); int d = blob_bottom_->shape(2); @@ -122,7 +122,7 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { convolution_param->mutable_bias_filler()->set_type("constant"); convolution_param->mutable_bias_filler()->set_value(0); - ConvolutionNDLayer layer(layer_param); + ConvolutionLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam *top_diff = blob_top_->mutable_cpu_diff(); @@ -176,7 +176,7 @@ TYPED_TEST(ConvolutionNDLayerTest, TestSetup) { convolution_param->set_num_output(4); - ConvolutionNDLayer layer(layer_param); + ConvolutionLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(1, this->blob_top_->shape(2)); diff --git a/src/caffe/test/test_embed_layer.cpp b/src/caffe/test/test_embed_layer.cpp index 8b97b5f0fb3..5edc558e838 100644 --- a/src/caffe/test/test_embed_layer.cpp +++ b/src/caffe/test/test_embed_layer.cpp @@ -138,7 +138,7 @@ TYPED_TEST(EmbedLayerTest, TestForwardWithBias) { TYPED_TEST(EmbedLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; // Skip the test on unsupported OpenCL devices with double - if (!Caffe::GetDefaultDeviceContext()-> + if (!Caffe::GetDefaultDevice()-> CheckCapability("cl_khr_int64_base_atomics") && is_same::value) { return; @@ -164,7 +164,7 @@ TYPED_TEST(EmbedLayerTest, TestGradient) { TYPED_TEST(EmbedLayerTest, TestGradientWithBias) { typedef typename TypeParam::Dtype Dtype; // Skip the test on unsupported OpenCL devices with double - if (!Caffe::GetDefaultDeviceContext()-> + if (!Caffe::GetDefaultDevice()-> CheckCapability("cl_khr_int64_base_atomics") && is_same::value) { return; diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index a18fca33a0f..aa130d8de80 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -77,7 +77,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { #ifndef CPU_ONLY #ifdef USE_CUDA if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { CUDA_CHECK(cudaGetDevice(&device_id)); } #endif // USE_CUDA @@ -198,7 +198,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { this->solver_->Solve(); } else { LOG(INFO) << "Multi-GPU test on " << devices << " devices"; - vector gpus; + vector gpus; // put current device at the beginning int device_id = solver_->param().device_id(); gpus.push_back(Caffe::Get().GetDeviceContext(device_id)); @@ -478,7 +478,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { #ifndef CPU_ONLY #ifdef USE_CUDA if (Caffe::mode() == Caffe::GPU && - Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { CUDA_CHECK(cudaGetDeviceCount(&available_devices)); } #endif // USE_CUDA diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index a117f33864d..31e288e0489 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -200,14 +200,15 @@ TYPED_TEST(Im2colKernelTest, TestND) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_nd_gpu_kernelCUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS) ( - num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), - this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, - this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), - this->blob_stride_->gpu_data(), - top_data_gpu + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel + CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS) ( + num_kernels, 2, bottom_data_gpu + this->blob_bottom_->offset(n), + this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, + this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), + this->blob_stride_->gpu_data(), + top_data_gpu + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version diff --git a/src/caffe/test/test_internal_thread.cpp b/src/caffe/test/test_internal_thread.cpp index ca546983dcf..1c1cc9e44f4 100644 --- a/src/caffe/test/test_internal_thread.cpp +++ b/src/caffe/test/test_internal_thread.cpp @@ -14,7 +14,7 @@ class InternalThreadTest : public ::testing::Test {}; TEST_F(InternalThreadTest, TestStartAndExit) { InternalThread thread; EXPECT_FALSE(thread.is_started()); - thread.StartInternalThread(Caffe::Get().GetDefaultDeviceContext()); + thread.StartInternalThread(Caffe::Get().GetDefaultDevice()); EXPECT_TRUE(thread.is_started()); thread.StopInternalThread(); EXPECT_FALSE(thread.is_started()); @@ -35,17 +35,17 @@ class TestThreadB : public InternalThread { TEST_F(InternalThreadTest, TestRandomSeed) { TestThreadA t1; Caffe::set_random_seed(9658361); - t1.StartInternalThread(Caffe::Get().GetDefaultDeviceContext()); + t1.StartInternalThread(Caffe::Get().GetDefaultDevice()); t1.StopInternalThread(); TestThreadA t2; Caffe::set_random_seed(9658361); - t2.StartInternalThread(Caffe::Get().GetDefaultDeviceContext()); + t2.StartInternalThread(Caffe::Get().GetDefaultDevice()); t2.StopInternalThread(); TestThreadB t3; Caffe::set_random_seed(3435563); - t3.StartInternalThread(Caffe::Get().GetDefaultDeviceContext()); + t3.StartInternalThread(Caffe::Get().GetDefaultDevice()); t3.StopInternalThread(); } diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 211318cf9b9..130625f09cb 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -186,7 +186,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { } TypeParam gpu_asum; - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -204,7 +204,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { TYPED_TEST(GPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -229,7 +229,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -254,7 +254,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { TYPED_TEST(GPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -281,7 +281,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_scale(n, alpha, this->blob_bottom_->gpu_data(), @@ -307,7 +307,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestCopy) { const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(n, bottom_data, top_data); diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 7460990f523..eaba811475c 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -24,13 +24,13 @@ class RandomNumberGeneratorTest : public ::testing::Test { sample_size_(10000), seed_(1701), data_(new SyncedMemory(sample_size_ * sizeof(Dtype), - Caffe::GetDefaultDeviceContext())), + Caffe::GetDefaultDevice())), data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype), - Caffe::GetDefaultDeviceContext())), + Caffe::GetDefaultDevice())), int_data_(new SyncedMemory(sample_size_ * sizeof(int), - Caffe::GetDefaultDeviceContext())), + Caffe::GetDefaultDevice())), int_data_2_(new SyncedMemory(sample_size_ * sizeof(int), - Caffe::GetDefaultDeviceContext())) {} + Caffe::GetDefaultDevice())) {} virtual void SetUp() { Caffe::set_random_seed(this->seed_); @@ -183,7 +183,7 @@ class RandomNumberGeneratorTest : public ::testing::Test { void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu_data) { Dtype* rng_data = static_cast(gpu_data); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -201,7 +201,7 @@ class RandomNumberGeneratorTest : public ::testing::Test { CHECK_GE(upper, lower); Dtype* rng_data = static_cast(gpu_data); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -219,7 +219,7 @@ class RandomNumberGeneratorTest : public ::testing::Test { // caffe_gpu_rng_uniform. void RngUniformIntFillGPU(void* gpu_data) { unsigned int* rng_data = static_cast(gpu_data); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index 0800410853c..c9c16360d64 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -21,11 +21,11 @@ class SyncedMemoryTest : public ::testing::Test { }; TEST_F(SyncedMemoryTest, TestInitialization) { - SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); + SyncedMemory mem(10, Caffe::GetDefaultDevice()); EXPECT_EQ(mem.head(), SyncedMemory::UNINITIALIZED); EXPECT_EQ(mem.size(), 10); SyncedMemory* p_mem = new SyncedMemory(10 * sizeof(float), - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); EXPECT_EQ(p_mem->size(), 10 * sizeof(float)); delete p_mem; } @@ -33,7 +33,7 @@ TEST_F(SyncedMemoryTest, TestInitialization) { #ifndef CPU_ONLY // GPU test TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) { - SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); + SyncedMemory mem(10, Caffe::GetDefaultDevice()); EXPECT_TRUE(mem.cpu_data()); EXPECT_TRUE(mem.gpu_data()); EXPECT_TRUE(mem.mutable_cpu_data()); @@ -43,7 +43,7 @@ TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) { #endif TEST_F(SyncedMemoryTest, TestAllocationCPU) { - SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); + SyncedMemory mem(10, Caffe::GetDefaultDevice()); EXPECT_TRUE(mem.cpu_data()); EXPECT_TRUE(mem.mutable_cpu_data()); } @@ -51,7 +51,7 @@ TEST_F(SyncedMemoryTest, TestAllocationCPU) { #ifndef CPU_ONLY // GPU test TEST_F(SyncedMemoryTest, TestAllocationGPU) { - SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); + SyncedMemory mem(10, Caffe::GetDefaultDevice()); EXPECT_TRUE(mem.gpu_data()); EXPECT_TRUE(mem.mutable_gpu_data()); } @@ -59,7 +59,7 @@ TEST_F(SyncedMemoryTest, TestAllocationGPU) { #endif TEST_F(SyncedMemoryTest, TestCPUWrite) { - SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); + SyncedMemory mem(10, Caffe::GetDefaultDevice()); void* cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); caffe_memset(mem.size(), 1, cpu_data); @@ -78,7 +78,7 @@ TEST_F(SyncedMemoryTest, TestCPUWrite) { #ifndef CPU_ONLY // GPU test TEST_F(SyncedMemoryTest, TestGPURead) { - SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); + SyncedMemory mem(10, Caffe::GetDefaultDevice()); void* cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); caffe_memset(mem.size(), 1, cpu_data); @@ -87,7 +87,7 @@ TEST_F(SyncedMemoryTest, TestGPURead) { // check if values are the same char* recovered_value = new char[10]; - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -132,11 +132,11 @@ TEST_F(SyncedMemoryTest, TestGPURead) { } TEST_F(SyncedMemoryTest, TestGPUWrite) { - SyncedMemory mem(10, Caffe::GetDefaultDeviceContext()); + SyncedMemory mem(10, Caffe::GetDefaultDevice()); void* gpu_data = mem.mutable_gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index fbf434b1e33..61b1f1e1086 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -18,11 +18,11 @@ class GemmTest : public ::testing::Test {}; TYPED_TEST_CASE(GemmTest, TestDtypes); TYPED_TEST(GemmTest, TestGemmCPUGPU) { - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); - Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); - Blob B(1, 1, 3, 4, Caffe::GetDefaultDeviceContext()); - Blob C(1, 1, 2, 4, Caffe::GetDefaultDeviceContext()); + Blob A(1, 1, 2, 3, Caffe::GetDefaultDevice()); + Blob B(1, 1, 3, 4, Caffe::GetDefaultDevice()); + Blob C(1, 1, 2, 4, Caffe::GetDefaultDevice()); TypeParam data[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6}; TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12}; @@ -145,11 +145,11 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { TYPED_TEST(GemmTest, TestGemvCPUGPU) { - DeviceContext *dc = Caffe::GetDefaultDeviceContext(); + device *dc = Caffe::GetDefaultDevice(); - Blob A(1, 1, 2, 3, Caffe::GetDefaultDeviceContext()); - Blob x(1, 1, 1, 3, Caffe::GetDefaultDeviceContext()); - Blob y(1, 1, 1, 2, Caffe::GetDefaultDeviceContext()); + Blob A(1, 1, 2, 3, Caffe::GetDefaultDevice()); + Blob x(1, 1, 1, 3, Caffe::GetDefaultDevice()); + Blob y(1, 1, 1, 2, Caffe::GetDefaultDevice()); TypeParam data[6] = {1, 2, 3, 4, 5, 6}; TypeParam result_2[2] = {14, 32}; TypeParam result_3[3] = {9, 12, 15}; diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index 6c72b720d93..e542c982049 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -1,7 +1,7 @@ #include #include "caffe/common.hpp" -#include "caffe/device_context.hpp" +#include "caffe/device.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { @@ -13,7 +13,7 @@ Timer::Timer() Timer::~Timer() { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventDestroy(start_gpu_)); @@ -28,7 +28,7 @@ Timer::~Timer() { void Timer::Start() { if (!running()) { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); @@ -47,7 +47,7 @@ void Timer::Start() { void Timer::Stop() { if (running()) { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); @@ -72,7 +72,7 @@ float Timer::MicroSeconds() { Stop(); } if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, @@ -98,7 +98,7 @@ float Timer::MilliSeconds() { Stop(); } if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, @@ -120,7 +120,7 @@ float Timer::Seconds() { void Timer::Init() { if (!initted()) { if (Caffe::mode() == Caffe::GPU - && Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifndef CPU_ONLY #ifdef USE_CUDA CUDA_CHECK(cudaEventCreate(&start_gpu_)); diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 40c46e099e8..6964c6dd061 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -61,7 +61,7 @@ void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, // NOLINT_NEXT_LINE(whitespace/operators) im2col_sk_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), - CAFFE_CUDA_NUM_THREADS)( + CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, height, width, kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, @@ -131,7 +131,7 @@ void im2col_gpu(const Dtype* data_im, const int channels, const int height, int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), - CAFFE_CUDA_NUM_THREADS)( + CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); @@ -205,8 +205,7 @@ void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, const int stride_w, const int kstride_h, const int kstride_w, Dtype* data_im) { if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) - LOG(FATAL) - << "stride greater than 1 or pad greater" + LOG(FATAL)<< "stride greater than 1 or pad greater" << " than 0 not tested in col2im_sk_gpu()."; int ext_patch_h = (patch_h - 1) * kstride_h + 1; int ext_patch_w = (patch_w - 1) * kstride_w + 1; @@ -215,7 +214,7 @@ void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, int num_kernels = channels * height * width; col2im_sk_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), - CAFFE_CUDA_NUM_THREADS)( + CAFFE_CUDA_NUM_THREADS)( num_kernels, data_col, height, width, channels, patch_h, patch_w, ext_patch_h, ext_patch_w, pad_h, pad_w, stride_h, stride_w, kstride_h, kstride_w, @@ -252,21 +251,13 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, int w = index % width + pad_w; int h = (index / width) % height + pad_h; int c = index / (width * height); + // compute the start and end of the output int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); - /* - for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { - for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { - // the col location: [c * width * height + h_out, w_out] - int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize - + (w - w_col * stride_w); - val += data_col[(c_col * height_col + h_col) * width_col + w_col]; - } - } - */ + // equivalent implementation int offset = (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; @@ -293,7 +284,7 @@ void col2im_gpu(const Dtype* data_col, const int channels, const int height, // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) col2im_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), - CAFFE_CUDA_NUM_THREADS)( + CAFFE_CUDA_NUM_THREADS)( num_kernels, data_col, height, width, channels, patch_h, patch_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im); @@ -314,14 +305,14 @@ template void col2im_gpu(const double* data_col, const int channels, const int stride_h, const int stride_w, double* data_im); - template -__global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, - const Dtype* data_im, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, const int* kstride, - Dtype* data_col) { +__global__ void im2col_ndsk_gpu_kernel(const int n, const int num_axes, + const Dtype* data_im, + const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, const int* kstride, + Dtype* data_col) { int d_temp[6]; // NOLINT(runtime/arrays) int d_iter[6]; // NOLINT(runtime/arrays) int i; @@ -354,7 +345,9 @@ __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, for (i = 0; i < num_axes; ++i) { const int d_iter_im = d_iter[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; - if (!in_range) { break; } + if (!in_range) { + break; + } } // Write column data @@ -390,12 +383,13 @@ __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, } template -__global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, - const Dtype* data_col, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, const int* kstride, - Dtype* data_im) { +__global__ void col2im_ndsk_gpu_kernel(const int n, const int num_axes, + const Dtype* data_col, + const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, const int* kstride, + Dtype* data_im) { int d_im[6]; // NOLINT(runtime/arrays) int d_col_size[6]; // NOLINT(runtime/arrays) int d_col_iter[6]; // NOLINT(runtime/arrays) @@ -406,8 +400,8 @@ __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, for (int i = num_axes - 1; i >= 0; --i) { d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; - d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) - / stride[i] + 1; + d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) / stride[i] + + 1; } CUDA_KERNEL_LOOP(index, n) { @@ -424,17 +418,20 @@ __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, for (int i = 0; i < num_axes; ++i) { // Old: /*d_col_start[i] = d_col_iter[i] = - (d_im[i] < kernel_shape[i]) ? - 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ // New: - d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? - d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; + d_col_start[i] = + (d_im[i] < d_ext_patch[i]) ? + d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1; d_col_iter[i] = d_col_start[i]; d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; - d_col_end[i] = (d_im[i] >= d_col_size[i]) ? - (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) - % kstride[i] : d_im[i]; + d_col_end[i] = + (d_im[i] >= d_col_size[i]) ? + (d_col_size[i] - 1) + - ((d_col_size[i] - 1) - d_col_start[i]) % kstride[i] : + d_im[i]; if (d_col_start[i] > d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. @@ -454,7 +451,7 @@ __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, int final_offset = 0; int coeff_prod = 1; for (int i = num_axes - 1; i >= 0; --i) { - final_offset += d_col_iter[i] * coeff_prod; + final_offset += d_col_iter[i] * coeff_prod; coeff_prod *= d_col_size[i]; } for (int i = num_axes - 1; i >= 0; --i) { @@ -475,59 +472,251 @@ __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, break; // for (int i = num_axes - 1; i >= 0; --i) } } // for (int i = num_axes - 1; i >= 0; --i) - } while (incremented); + } while (incremented); data_im[index] = val; } // CUDA_KERNEL_LOOP(index, n) } +template +void im2col_ndsk_gpu(const Dtype* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, const int* kstride, + Dtype* data_col) { + im2col_ndsk_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( + num_kernels, num_spatial_axes, data_im, im_shape, col_shape, + kernel_shape, pad, stride, kstride, data_col); + CUDA_POST_KERNEL_CHECK; +} + +// Explicit instantiation +template void im2col_ndsk_gpu(const float* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, + const int* kstride, float* data_col); +template void im2col_ndsk_gpu(const double* data_im, const int num_spatial_axes, + const int num_kernels, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, + const int* kstride, double* data_col); + +template +void col2im_ndsk_gpu(const Dtype* data_col, const int num_spatial_axes, + const int im_size, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, const int* kstride, + Dtype* data_im) { + col2im_ndsk_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( + im_size, num_spatial_axes, data_col, im_shape, col_shape, + kernel_shape, pad, stride, kstride, data_im); + CUDA_POST_KERNEL_CHECK; +} + +// Explicit instantiation +template void col2im_ndsk_gpu(const float* data_col, const int num_axes, + const int im_size, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, + const int* kstride, float* data_im); +template void col2im_ndsk_gpu(const double* data_col, const int num_axes, + const int im_size, const int* im_shape, + const int* col_shape, const int* kernel_shape, + const int* pad, const int* stride, + const int* kstride, double* data_im); + +template +__global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, + const Dtype* data_im, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, Dtype* data_col) { + int d_temp[6]; // NOLINT(runtime/arrays) + int d_iter[6]; // NOLINT(runtime/arrays) + int i; + CUDA_KERNEL_LOOP(index, n) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + Dtype* data_col_ptr = data_col + channel_out; + const Dtype* data_im_ptr = data_im + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } // CUDA_KERNEL_LOOP(index, n) +} + template void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, - const int* kstride, Dtype* data_col) { + const int* pad, const int* stride, Dtype* data_col) { im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( - num_kernels, num_spatial_axes, data_im, im_shape, col_shape, - kernel_shape, pad, stride, kstride, data_col); + CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( + num_kernels, data_im, im_shape, col_shape, + kernel_shape, pad, stride, data_col); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation -template void im2col_nd_gpu(const float* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, float* data_col); -template void im2col_nd_gpu(const double* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, double* data_col); +template void im2col_nd_gpu(const float* data_im, + const int num_spatial_axes, + const int col_size, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, float* data_col); +template void im2col_nd_gpu(const double* data_im, + const int num_spatial_axes, + const int col_size, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, double* data_col); +template +__global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, + const Dtype* data_col, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, Dtype* data_im) { + int d_im[6]; // NOLINT(runtime/arrays) + int d_col_iter[6]; // NOLINT(runtime/arrays) + int d_col_start[6]; // NOLINT(runtime/arrays) + int d_col_end[6]; // NOLINT(runtime/arrays) + CUDA_KERNEL_LOOP(index, n) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; // CUDA_KERNEL_LOOP(index, n) + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } // CUDA_KERNEL_LOOP(index, n) +} -template +template void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, - Dtype* data_im) { - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( - im_size, num_spatial_axes, data_col, im_shape, col_shape, - kernel_shape, pad, stride, kstride, data_im); + const int im_size, const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_im) { + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( + im_size, data_col, im_shape, col_shape, + kernel_shape, pad, stride, data_im); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation -template void col2im_nd_gpu(const float* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, - float* data_im); -template void col2im_nd_gpu(const double* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - const int* kstride, - double* data_im); +template void col2im_nd_gpu(const float* data_col, + const int num_spatial_axes, + const int im_size, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, float* data_im); +template void col2im_nd_gpu(const double* data_col, + const int num_spatial_axes, + const int im_size, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, double* data_im); #endif // USE_CUDA } // namespace caffe diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 26a62a5c5f5..da19f198ed6 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -1,3 +1,4 @@ + #ifdef WITH_PYTHON_LAYER #include "boost/python.hpp" namespace bp = boost::python; @@ -12,7 +13,7 @@ namespace bp = boost::python; #include "boost/algorithm/string.hpp" #include "caffe/caffe.hpp" -#include "caffe/device_context.hpp" +#include "caffe/device.hpp" #include "caffe/util/signal_handler.h" using caffe::Blob; @@ -24,7 +25,7 @@ using caffe::shared_ptr; using caffe::string; using caffe::Timer; using caffe::vector; -using caffe::DeviceContext; +using caffe::device; using std::ostringstream; DEFINE_string(gpu, "", @@ -224,7 +225,7 @@ int train() { if (gpus.size() > 1) { caffe::P2PSync sync(solver, NULL, solver->param()); - std::vector devices; + std::vector devices; for (int i = 0; i < gpus.size(); ++i) { devices.push_back(Caffe::Get().GetDeviceContext(i)); } @@ -365,7 +366,7 @@ int time() { for (int i = 0; i < layers.size(); ++i) { timer.Start(); layers[i]->Forward(bottom_vecs[i], top_vecs[i]); - Caffe::Synchronize(Caffe::GetDefaultDeviceContext()->id()); + Caffe::Synchronize(Caffe::GetDefaultDevice()->id()); forward_time_per_layer[i] += timer.MicroSeconds(); } forward_time += forward_timer.MicroSeconds(); @@ -374,7 +375,7 @@ int time() { timer.Start(); layers[i]->Backward(top_vecs[i], bottom_need_backward[i], bottom_vecs[i]); - Caffe::Synchronize(Caffe::GetDefaultDeviceContext()->id()); + Caffe::Synchronize(Caffe::GetDefaultDevice()->id()); backward_time_per_layer[i] += timer.MicroSeconds(); } backward_time += backward_timer.MicroSeconds(); From 05641a3b9b75b3f73be54ce24452cf9cc9fbb4a7 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 22 Sep 2015 04:25:52 +0200 Subject: [PATCH 180/254] Fusing convolution layer code. --- include/caffe/common.hpp | 2 +- include/caffe/greentea/greentea_im2col.hpp | 80 ++-- include/caffe/parallel.hpp | 10 +- include/caffe/vision_layers.hpp | 2 +- src/caffe/common.cpp | 2 +- src/caffe/greentea/greentea_im2col.cpp | 200 ++++++--- src/caffe/layer_factory.cpp | 5 - src/caffe/layers/base_conv_layer.cpp | 161 ++++---- src/caffe/layers/base_conv_nd_layer.cpp | 460 --------------------- src/caffe/layers/conv_nd_layer.cpp | 48 --- src/caffe/layers/conv_nd_layer.cu | 94 ----- src/caffe/layers/conv_sk_layer.cpp | 179 -------- src/caffe/layers/conv_sk_layer.cu | 282 ------------- src/caffe/layers/deconv_nd_layer.cpp | 48 --- src/caffe/layers/deconv_nd_layer.cu | 96 ----- src/caffe/layers/im2col_layer.cu | 45 +- src/caffe/parallel.cpp | 12 +- 17 files changed, 321 insertions(+), 1405 deletions(-) delete mode 100644 src/caffe/layers/base_conv_nd_layer.cpp delete mode 100644 src/caffe/layers/conv_nd_layer.cpp delete mode 100644 src/caffe/layers/conv_nd_layer.cu delete mode 100644 src/caffe/layers/conv_sk_layer.cpp delete mode 100644 src/caffe/layers/conv_sk_layer.cu delete mode 100644 src/caffe/layers/deconv_nd_layer.cpp delete mode 100644 src/caffe/layers/deconv_nd_layer.cu diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 25c2c36767e..aa700fe9cfa 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -172,7 +172,7 @@ class Caffe { // Get the default device static device *GetDefaultDevice(); - static device *GetCPUDeviceContext(); + static device *GetCPUDevice(); // Prints info about all devices static int EnumerateDevices(bool silent = false); diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index 811ca9feea8..4c7e4b0d33b 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -14,43 +14,39 @@ namespace caffe { template void greentea_im2col_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, - const cl_mem data_im, const int data_im_off, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, cl_mem data_col, - const int data_col_off); + viennacl::ocl::context *ctx, const cl_mem data_im, + const int data_im_off, const int channels, + const int height, const int width, const int kernel_h, + const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + cl_mem data_col, const int data_col_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, - const cl_mem data_col, const int data_col_off, - const int channels, const int height, const int width, - const int patch_h, const int patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, cl_mem data_im, + viennacl::ocl::context *ctx, const cl_mem data_col, + const int data_col_off, const int channels, + const int height, const int width, const int patch_h, + const int patch_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, cl_mem data_im, const int data_im_off); - template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, - const cl_mem data_im, const int data_offset, - const int channels, const int height, - const int width, const int kernel_h, - const int kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, cl_mem data_col); + viennacl::ocl::context *ctx, const cl_mem data_im, + const int data_offset, const int channels, + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + cl_mem data_col); template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, - const cl_mem data_col, const int channels, - const int height, const int width, - const int patch_h, const int patch_w, - const int pad_h, const int pad_w, + viennacl::ocl::context *ctx, const cl_mem data_col, + const int channels, const int height, + const int width, const int patch_h, + const int patch_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int kstride_h, const int kstride_w, cl_mem data_im, const int data_offset); @@ -59,7 +55,7 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int data_off, const int num_spatial_axes, - const int num_kernels, + const int channel_axis, const int num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_col, int data_col_off); @@ -70,27 +66,25 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, const int data_col_off, const int num_spatial_axes, const int im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem data_im, - int data_off); - + cl_mem stride, cl_mem data_im, int data_off); template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, const int num_spatial_axes, - const int num_kernels, - cl_mem im_shape, cl_mem col_shape, - cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_col, int data_col_off); + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, const int num_spatial_axes, + const int num_kernels, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_col, + int data_col_off); template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, const int num_spatial_axes, - const int im_size, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem kstride, cl_mem data_im, - int data_off); + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, + const int num_spatial_axes, const int im_size, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem kstride, cl_mem data_im, int data_off); } // namespace caffe diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp index b0bd395f50c..56e8dda18b3 100644 --- a/include/caffe/parallel.hpp +++ b/include/caffe/parallel.hpp @@ -65,14 +65,16 @@ class GPUParams : public Params { class DevicePair { public: - DevicePair(device* parent, device* device) + DevicePair(device* parent, device* dev) : parent_(parent), - device_(device) { + device_(dev) { } - inline device* parent() { + + inline device* getParent() { return parent_; } - inline device* device() { + + inline device* getDevice() { return device_; } diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index eeb5d17d8e7..c403e178faf 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -261,7 +261,7 @@ class BaseConvolutionLayer : public Layer { #ifdef USE_CUDA inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - if(this->use_skernel_) { + if (this->use_skernel_) { im2col_sk_gpu(data, conv_in_channels_, conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 7e5fe9a4874..745ff05bade 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -89,7 +89,7 @@ device *Caffe::GetDefaultDevice() { return Get().default_device_context_; } -device *Caffe::GetCPUDeviceContext() { +device *Caffe::GetCPUDevice() { return Get().cpu_device_context_.get(); } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index e7433f79fb7..c6bca93860b 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -77,9 +77,8 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, const int kstride_h, const int kstride_w, cl_mem data_im, const int data_offset) { if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) { - LOG(FATAL) - << "stride greater than 1 or pad greater than 0" - << " not tested in col2im_sk_gpu()."; + LOG(FATAL)<< "stride greater than 1 or pad greater than 0" + << " not tested in col2im_sk_gpu()."; } int ext_patch_h = (patch_h - 1) * kstride_h + 1; @@ -215,85 +214,180 @@ template void greentea_col2im_gpu(viennacl::ocl::program *prog, const int stride_w, cl_mem data_im, const int data_im_off); +template +void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, const int num_spatial_axes, + const int num_kernels, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem data_col, + int data_col_off) { + viennacl::ocl::kernel &kernel = prog->get_kernel( + CL_KERNEL_SELECT("im2col_nd")); + viennacl::ocl::enqueue( + kernel(num_kernels, num_spatial_axes, WrapHandle(data_im, ctx), data_off, + WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), + WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), + WrapHandle(stride, ctx), WrapHandle(data_col, ctx), data_col_off), + ctx->get_queue()); +} +// Explicit instantiation +template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + cl_mem data_im, const int data_off, + const int num_spatial_axes, + const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem data_col, + int data_col_off); + +template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + cl_mem data_im, const int data_off, + const int num_spatial_axes, + const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem data_col, + int data_col_off); template -void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, const int num_spatial_axes, - const int num_kernels, +void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, const int num_spatial_axes, + const int channel_axis_, const int im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_col, int data_col_off) { + cl_mem data_im, int data_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( - CL_KERNEL_SELECT("im2col_nd")); + CL_KERNEL_SELECT("col2im_nd")); viennacl::ocl::enqueue( - kernel(num_kernels, num_spatial_axes, - WrapHandle(data_im, ctx), data_off, WrapHandle(im_shape, ctx), + kernel(im_size, num_spatial_axes, channel_axis_, + WrapHandle(data_col, ctx), data_col_off, WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), WrapHandle(stride, ctx), - WrapHandle(kstride, ctx), WrapHandle(data_col, ctx), - data_col_off), + WrapHandle(data_im, ctx), data_off), ctx->get_queue()); } // Explicit instantiation -template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, - const int num_spatial_axes, const int num_kernels, - cl_mem im_shape, cl_mem col_shape, - cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_col, int data_col_off); +template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + cl_mem data_col, + const int data_col_off, + const int num_spatial_axes, + const int channel_axis, + const int im_size, cl_mem im_shape, + cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem data_im, + int data_off); + +template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + cl_mem data_col, + const int data_col_off, + const int num_spatial_axes, + const int channel_axis, + const int im_size, cl_mem im_shape, + cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem data_im, + int data_off); -template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, - const int num_spatial_axes, const int num_kernels, - cl_mem im_shape, cl_mem col_shape, - cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_col, int data_col_off); +template +void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, const int num_spatial_axes, + const int num_kernels, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_col, + int data_col_off) { + viennacl::ocl::kernel &kernel = prog->get_kernel( + CL_KERNEL_SELECT("im2col_ndsk")); + + viennacl::ocl::enqueue( + kernel(num_kernels, num_spatial_axes, WrapHandle(data_im, ctx), data_off, + WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), + WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), + WrapHandle(stride, ctx), WrapHandle(kstride, ctx), + WrapHandle(data_col, ctx), data_col_off), + ctx->get_queue()); +} + +// Explicit instantiation +template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + cl_mem data_im, + const int data_off, + const int num_spatial_axes, + const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, + cl_mem data_col, + int data_col_off); + +template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, + viennacl::ocl::context *ctx, + cl_mem data_im, + const int data_off, + const int num_spatial_axes, + const int num_kernels, + cl_mem im_shape, + cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, + cl_mem data_col, + int data_col_off); template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, const int num_spatial_axes, - const int im_size, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem kstride, cl_mem data_im, - int data_off) { + viennacl::ocl::context *ctx, cl_mem data_col, + const int data_col_off, + const int num_spatial_axes, const int im_size, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem kstride, cl_mem data_im, int data_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( - CL_KERNEL_SELECT("col2im_nd")); + CL_KERNEL_SELECT("col2im_ndsk")); viennacl::ocl::enqueue( - kernel(im_size, num_spatial_axes, - WrapHandle(data_col, ctx), data_col_off, WrapHandle(im_shape, ctx), - WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), - WrapHandle(pad, ctx), WrapHandle(stride, ctx), - WrapHandle(kstride, ctx), WrapHandle(data_im, ctx), - data_off), + kernel(im_size, num_spatial_axes, WrapHandle(data_col, ctx), data_col_off, + WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), + WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), + WrapHandle(stride, ctx), WrapHandle(kstride, ctx), + WrapHandle(data_im, ctx), data_off), ctx->get_queue()); } // Explicit instantiation template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, const int num_spatial_axes, - const int im_size, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem kstride, cl_mem data_im, - int data_off); + viennacl::ocl::context *ctx, + cl_mem data_col, + const int data_col_off, + const int num_spatial_axes, + const int im_size, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, + cl_mem data_im, int data_off); template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, const int num_spatial_axes, - const int im_size, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem kstride, cl_mem data_im, - int data_off); - + viennacl::ocl::context *ctx, + cl_mem data_col, + const int data_col_off, + const int num_spatial_axes, + const int im_size, + cl_mem im_shape, + cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, + cl_mem data_im, int data_off); } // namespace caffe #endif diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 0d15fb5b071..042f9ecde0e 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -16,11 +16,6 @@ namespace caffe { -template -shared_ptr > GetConvolutionSKLayer(const LayerParameter& param) { - return shared_ptr>(new ConvolutionSKLayer(param)); -} - template shared_ptr > GetPoolingSKLayer(const LayerParameter& param) { return shared_ptr>(new PoolingSKLayer(param)); diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 53f5b4ac0d4..f078656e68d 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -17,7 +17,7 @@ namespace caffe { template void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); force_nd_im2col_ = conv_param.force_nd_im2col(); @@ -33,46 +33,46 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { CHECK_EQ(num_spatial_axes_, 2) - << "kernel_h & kernel_w can only be used for 2D convolution."; + << "kernel_h & kernel_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.kernel_size_size()) - << "Either kernel_size or kernel_h/w should be specified; not both."; + << "Either kernel_size or kernel_h/w should be specified; not both."; kernel_shape_data[0] = conv_param.kernel_h(); kernel_shape_data[1] = conv_param.kernel_w(); } else { const int num_kernel_dims = conv_param.kernel_size_size(); CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) - << "kernel_size must be specified once, or once per spatial dimension " - << "(kernel_size specified " << num_kernel_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - for (int i = 0; i < num_spatial_axes_; ++i) { - kernel_shape_data[i] = - conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); - } + << "kernel_size must be specified once, or once per spatial dimension " + << "(kernel_size specified " << num_kernel_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + for (int i = 0; i < num_spatial_axes_; ++i) { + kernel_shape_data[i] = + conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); + } } for (int i = 0; i < num_spatial_axes_; ++i) { - CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; + CHECK_GT(kernel_shape_data[i], 0)<< "Filter dimensions must be nonzero."; } // Setup stride dimensions (stride_). stride_.Reshape(spatial_dim_blob_shape); int* stride_data = stride_.mutable_cpu_data(); if (conv_param.has_stride_h() || conv_param.has_stride_w()) { CHECK_EQ(num_spatial_axes_, 2) - << "stride_h & stride_w can only be used for 2D convolution."; + << "stride_h & stride_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.stride_size()) - << "Either stride or stride_h/w should be specified; not both."; + << "Either stride or stride_h/w should be specified; not both."; stride_data[0] = conv_param.stride_h(); stride_data[1] = conv_param.stride_w(); } else { const int num_stride_dims = conv_param.stride_size(); CHECK(num_stride_dims == 0 || num_stride_dims == 1 || - num_stride_dims == num_spatial_axes_) - << "stride must be specified once, or once per spatial dimension " - << "(stride specified " << num_stride_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; + num_stride_dims == num_spatial_axes_) + << "stride must be specified once, or once per spatial dimension " + << "(stride specified " << num_stride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; const int kDefaultStride = 1; for (int i = 0; i < num_spatial_axes_; ++i) { stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : - conv_param.stride((num_stride_dims == 1) ? 0 : i); + conv_param.stride((num_stride_dims == 1) ? 0 : i); CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; } } @@ -81,31 +81,67 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, int* pad_data = pad_.mutable_cpu_data(); if (conv_param.has_pad_h() || conv_param.has_pad_w()) { CHECK_EQ(num_spatial_axes_, 2) - << "pad_h & pad_w can only be used for 2D convolution."; + << "pad_h & pad_w can only be used for 2D convolution."; CHECK_EQ(0, conv_param.pad_size()) - << "Either pad or pad_h/w should be specified; not both."; + << "Either pad or pad_h/w should be specified; not both."; pad_data[0] = conv_param.pad_h(); pad_data[1] = conv_param.pad_w(); } else { const int num_pad_dims = conv_param.pad_size(); CHECK(num_pad_dims == 0 || num_pad_dims == 1 || - num_pad_dims == num_spatial_axes_) - << "pad must be specified once, or once per spatial dimension " - << "(pad specified " << num_pad_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; + num_pad_dims == num_spatial_axes_) + << "pad must be specified once, or once per spatial dimension " + << "(pad specified " << num_pad_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; const int kDefaultPad = 0; for (int i = 0; i < num_spatial_axes_; ++i) { pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : - conv_param.pad((num_pad_dims == 1) ? 0 : i); + conv_param.pad((num_pad_dims == 1) ? 0 : i); } } + + // Setup kernel stride dimensions + kstride_.Reshape(spatial_dim_blob_shape); + int* kstride_data = kstride_.mutable_cpu_data(); + if (conv_param.has_kstride_h() || conv_param.has_kstride_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "kstride_h & kstride_w can only be used for 2D convolution."; + CHECK_EQ(0, conv_param.kstride_size()) + << "Etiher kstride or kstirde_h/w should be specified; not both."; + kstride_data[0] = conv_param.pad_h(); + kstride_data[1] = conv_param.pad_w(); + } else { + const int num_kstride_dims = conv_param.kstride_size(); + CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || + num_kstride_dims == num_spatial_axes_) + << "kstride must be specified once, or once per spatial dimension " + << "(kstride specified " << num_kstride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultKstride = 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : + conv_param.kstride((num_kstride_dims == 1) ? 0 : i); + } + } + + // Different 2D and ND im2col/col2im kernels for strided kernels + use_skernel_ = false; + for (int i = 0; i < num_spatial_axes_; ++i) { + use_skernel_ |= (kstride_data[i] != 1); + if (use_skernel_) { + break; + } + } + // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. is_1x1_ = true; for (int i = 0; i < num_spatial_axes_; ++i) { - is_1x1_ &= - kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0; - if (!is_1x1_) { break; } + is_1x1_ &= kernel_shape_data[i] == 1 && stride_data[i] == 1 + && pad_data[i] == 0; + if (!is_1x1_) { + break; + } } // Configure output channels and groups. channels_ = bottom[0]->shape(channel_axis_); @@ -114,7 +150,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, group_ = this->layer_param_.convolution_param().group(); CHECK_EQ(channels_ % group_, 0); CHECK_EQ(num_output_ % group_, 0) - << "Number of output should be multiples of group."; + << "Number of output should be multiples of group."; if (reverse_dimensions()) { conv_out_channels_ = channels_; conv_in_channels_ = num_output_; @@ -139,14 +175,14 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, if (weight_shape != this->blobs_[0]->shape()) { Blob weight_shaped_blob(weight_shape); LOG(FATAL) << "Incorrect weight shape: expected shape " - << weight_shaped_blob.shape_string() << "; instead, shape was " - << this->blobs_[0]->shape_string(); + << weight_shaped_blob.shape_string() << "; instead, shape was " + << this->blobs_[0]->shape_string(); } if (bias_term_ && bias_shape != this->blobs_[1]->shape()) { Blob bias_shaped_blob(bias_shape); LOG(FATAL) << "Incorrect bias shape: expected shape " - << bias_shaped_blob.shape_string() << "; instead, shape was " - << this->blobs_[1]->shape_string(); + << bias_shaped_blob.shape_string() << "; instead, shape was " + << this->blobs_[1]->shape_string(); } LOG(INFO) << "Skipping parameter initialization"; } else { @@ -177,13 +213,13 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, template void BaseConvolutionLayer::Reshape(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const int first_spatial_axis = channel_axis_ + 1; CHECK_EQ(bottom[0]->num_axes(), first_spatial_axis + num_spatial_axes_) - << "bottom num_axes may not change."; + << "bottom num_axes may not change."; num_ = bottom[0]->count(0, channel_axis_); CHECK_EQ(bottom[0]->shape(channel_axis_), channels_) - << "Input size incompatible with convolution kernel."; + << "Input size incompatible with convolution kernel."; // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) @@ -193,7 +229,7 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, bottom_shape_ = &bottom[0]->shape(); compute_output_shape(); vector top_shape(bottom[0]->shape().begin(), - bottom[0]->shape().begin() + channel_axis_); + bottom[0]->shape().begin() + channel_axis_); top_shape.push_back(num_output_); for (int i = 0; i < num_spatial_axes_; ++i) { top_shape.push_back(output_shape_[i]); @@ -234,23 +270,13 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, } if (Caffe::mode() == Caffe::Brew::CPU) { - if (reverse_dimensions()) { - col_buffer_.Reshape(1, kernel_dim_, height_, width_); - } else { - col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_); - } + col_buffer_.Reshape(col_buffer_shape_); } else { // Shared column buffer per device-queue across all layers on that device for (int i = 0; i < this->device_context_->num_queues(); ++i) { - if (reverse_dimensions()) { - shared_ptr< Blob > buffer = - this->device_context_->template Buffer(i); - buffer->Reshape(1, kernel_dim_, height_, width_); - } else { - shared_ptr< Blob > buffer = - this->device_context_->template Buffer(i); - buffer->Reshape(1, kernel_dim_, height_out_, width_out_); - } + shared_ptr > buffer = this->device_context_ + ->template Buffer(i); + buffer->Reshape(col_buffer_shape_); } } @@ -412,9 +438,8 @@ void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, } else { #ifdef USE_GREENTEA greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, - CblasNoTrans, num_output_, - out_spatial_dim_, 1, (Dtype) 1., - (cl_mem) bias, 0, + CblasNoTrans, num_output_, out_spatial_dim_, 1, + (Dtype) 1., (cl_mem) bias, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) output, output_off); #endif // USE_GREENTEA @@ -478,8 +503,8 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm( - CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_, conv_out_spatial_dim_, (Dtype) 1., + CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_, + conv_out_spatial_dim_, (Dtype) 1., output + output_off + output_offset_ * g, col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., weights + weight_offset_ * g); @@ -495,9 +520,8 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, for (int g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_, conv_out_spatial_dim_, - (Dtype) 1., (cl_mem) output, - output_off + output_offset_ * g, + kernel_dim_, conv_out_spatial_dim_, (Dtype) 1., + (cl_mem) output, output_off + output_offset_ * g, (cl_mem) col_buff, (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., (cl_mem) weights, @@ -513,25 +537,24 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, const int input_off) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, - 1., input + input_off, bias_multiplier_.gpu_data(), - 1., bias); + caffe_gpu_gemv(CblasNoTrans, num_output_, conv_out_spatial_dim_, 1., + input + input_off, bias_multiplier_.gpu_data(), 1., + bias); #endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, - num_output_, out_spatial_dim_, 1., - (cl_mem) input, input_off, - (cl_mem) (bias_multiplier_.gpu_data()), 0, 1., - (cl_mem) bias, 0); + num_output_, out_spatial_dim_, 1., (cl_mem) input, + input_off, (cl_mem) (bias_multiplier_.gpu_data()), + 0, 1., (cl_mem) bias, 0); #endif // USE_GREENTEA } } template -shared_ptr< Blob > BaseConvolutionLayer::col_buffer() { - return this->device_context_-> - template Buffer(this->device_context_->current_queue_id()); +shared_ptr > BaseConvolutionLayer::col_buffer() { + return this->device_context_->template Buffer( + this->device_context_->current_queue_id()); } #endif // !CPU_ONLY diff --git a/src/caffe/layers/base_conv_nd_layer.cpp b/src/caffe/layers/base_conv_nd_layer.cpp deleted file mode 100644 index 162cb69a5b0..00000000000 --- a/src/caffe/layers/base_conv_nd_layer.cpp +++ /dev/null @@ -1,460 +0,0 @@ -#include - -#include "caffe/filler.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -void BaseConvolutionNDLayer::LayerSetUp( - const vector*>& bottom, - const vector*>& top) { - // Configure the kernel size, padding, stride, and inputs. - ConvolutionParameter conv_param = this->layer_param_.convolution_param(); - channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); - const int first_spatial_axis = channel_axis_ + 1; - const int num_axes = bottom[0]->num_axes(); - num_spatial_axes_ = num_axes - first_spatial_axis; - CHECK_GE(num_spatial_axes_, 1); - // Setup input dimensions (input_shape_). - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - input_shape_.Reshape(bottom_dim_blob_shape); - int* input_shape_data = input_shape_.mutable_cpu_data(); - for (int i = 0; i < num_spatial_axes_ + 1; ++i) { - input_shape_data[i] = bottom[0]->shape(channel_axis_ + i); - } - vector spatial_dim_blob_shape(1, num_spatial_axes_); - // Setup filter kernel dimensions (kernel_shape_). - kernel_shape_.Reshape(spatial_dim_blob_shape); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); - if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "kernel_h & kernel_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.kernel_size_size()) - << "Either kernel_size or kernel_h/w should be specified; not both."; - kernel_shape_data[0] = conv_param.kernel_h(); - kernel_shape_data[1] = conv_param.kernel_w(); - } else { - const int num_kernel_dims = conv_param.kernel_size_size(); - CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) - << "kernel_size must be specified once, or once per spatial dimension " - << "(kernel_size specified " << num_kernel_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - for (int i = 0; i < num_spatial_axes_; ++i) { - kernel_shape_data[i] = - conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); - } - } - for (int i = 0; i < num_spatial_axes_; ++i) { - CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; - } - // Setup stride dimensions (stride_). - stride_.Reshape(spatial_dim_blob_shape); - int* stride_data = stride_.mutable_cpu_data(); - if (conv_param.has_stride_h() || conv_param.has_stride_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "stride_h & stride_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.stride_size()) - << "Either stride or stride_h/w should be specified; not both."; - stride_data[0] = conv_param.stride_h(); - stride_data[1] = conv_param.stride_w(); - } else { - const int num_stride_dims = conv_param.stride_size(); - CHECK(num_stride_dims == 0 || num_stride_dims == 1 || - num_stride_dims == num_spatial_axes_) - << "stride must be specified once, or once per spatial dimension " - << "(stride specified " << num_stride_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultStride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { - stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : - conv_param.stride((num_stride_dims == 1) ? 0 : i); - CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; - } - } - // Setup pad dimensions (pad_). - pad_.Reshape(spatial_dim_blob_shape); - int* pad_data = pad_.mutable_cpu_data(); - if (conv_param.has_pad_h() || conv_param.has_pad_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "pad_h & pad_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.pad_size()) - << "Either pad or pad_h/w should be specified; not both."; - pad_data[0] = conv_param.pad_h(); - pad_data[1] = conv_param.pad_w(); - } else { - const int num_pad_dims = conv_param.pad_size(); - CHECK(num_pad_dims == 0 || num_pad_dims == 1 || - num_pad_dims == num_spatial_axes_) - << "pad must be specified once, or once per spatial dimension " - << "(pad specified " << num_pad_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultPad = 0; - for (int i = 0; i < num_spatial_axes_; ++i) { - pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : - conv_param.pad((num_pad_dims == 1) ? 0 : i); - } - } - // Setup kernel stride dimensions - kstride_.Reshape(spatial_dim_blob_shape); - int* kstride_data = kstride_.mutable_cpu_data(); - if (conv_param.has_kstride_h() || conv_param.has_kstride_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "kstride_h & kstride_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.kstride_size()) - << "Etiher kstride or kstirde_h/w should be specified; not both."; - kstride_data[0] = conv_param.pad_h(); - kstride_data[1] = conv_param.pad_w(); - } else { - const int num_kstride_dims = conv_param.kstride_size(); - CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || - num_kstride_dims == num_spatial_axes_) - << "kstride must be specified once, or once per spatial dimension " - << "(kstride specified " << num_kstride_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultKstride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { - kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : - conv_param.kstride((num_kstride_dims == 1) ? 0 : i); - } - } - - // Special case: im2col is the identity for 1x1 convolution with stride 1 - // and no padding, so flag for skipping the buffer and transformation. - is_1x1_ = true; - for (int i = 0; i < num_spatial_axes_; ++i) { - is_1x1_ &= - kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0 - && kstride_data[i] == 1; - if (!is_1x1_) { break; } - } - // Configure output channels and groups. - channels_ = bottom[0]->shape(channel_axis_); - num_output_ = this->layer_param_.convolution_param().num_output(); - CHECK_GT(num_output_, 0); - group_ = this->layer_param_.convolution_param().group(); - CHECK_EQ(channels_ % group_, 0); - CHECK_EQ(num_output_ % group_, 0) - << "Number of output should be multiples of group."; - if (reverse_dimensions()) { - conv_out_channels_ = channels_; - conv_in_channels_ = num_output_; - } else { - conv_out_channels_ = num_output_; - conv_in_channels_ = channels_; - } - // Handle the parameters: weights and biases. - // - blobs_[0] holds the filter weights - // - blobs_[1] holds the biases (optional) - bias_term_ = this->layer_param_.convolution_param().bias_term(); - if (this->blobs_.size() > 0) { - LOG(INFO) << "Skipping parameter initialization"; - } else { - if (bias_term_) { - this->blobs_.resize(2); - } else { - this->blobs_.resize(1); - } - // Initialize and fill the weights: - // output channels x input channels per-group x kernel height x kernel width - vector weight_shape(2); - weight_shape[0] = conv_out_channels_; - weight_shape[1] = conv_in_channels_ / group_; - for (int i = 0; i < num_spatial_axes_; ++i) { - weight_shape.push_back(kernel_shape_data[i]); - } - this->blobs_[0].reset(new Blob(weight_shape)); - shared_ptr > weight_filler(GetFiller( - this->layer_param_.convolution_param().weight_filler())); - weight_filler->Fill(this->blobs_[0].get()); - // If necessary, initialize and fill the biases. - if (bias_term_) { - vector bias_shape(1, num_output_); - this->blobs_[1].reset(new Blob(bias_shape)); - shared_ptr > bias_filler(GetFiller( - this->layer_param_.convolution_param().bias_filler())); - bias_filler->Fill(this->blobs_[1].get()); - } - } - // Propagate gradients to the parameters (as directed by backward pass). - this->param_propagate_down_.resize(this->blobs_.size(), true); -} - -template -void BaseConvolutionNDLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - ConvolutionParameter conv_param = this->layer_param_.convolution_param(); - channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); - const int first_spatial_axis = channel_axis_ + 1; - const int num_axes = bottom[0]->num_axes(); - num_spatial_axes_ = num_axes - first_spatial_axis; - CHECK_GE(num_spatial_axes_, 1); - num_ = bottom[0]->count(0, channel_axis_); - CHECK_EQ(bottom[0]->shape(channel_axis_), channels_) - << "Input size incompatible with convolution kernel."; - // TODO: generalize to handle inputs of different shapes. - for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { - CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) - << "All inputs must have the same shape."; - } - // Shape the tops. - compute_output_shape(); - vector top_shape = bottom[0]->shape(); - top_shape[channel_axis_] = num_output_; - top_shape.resize(first_spatial_axis); // Discard input spatial axes. - for (int i = 0; i < num_spatial_axes_; ++i) { - top_shape.push_back(output_shape_[i]); - } - for (int top_id = 0; top_id < top.size(); ++top_id) { - top[top_id]->Reshape(top_shape); - } - if (reverse_dimensions()) { - conv_out_spatial_dim_ = bottom[0]->count(first_spatial_axis); - } else { - conv_out_spatial_dim_ = top[0]->count(first_spatial_axis); - } - const int* kernel_shape_data = kernel_shape_.cpu_data(); - kernel_dim_ = conv_in_channels_; - for (int i = 0; i < num_spatial_axes_; ++i) { - kernel_dim_ *= kernel_shape_data[i]; - } - weight_offset_ = conv_out_channels_ * kernel_dim_ / group_ / group_; - col_offset_ = kernel_dim_ * conv_out_spatial_dim_ / group_; - output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_; - // Setup input dimensions (conv_input_shape_). - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - conv_input_shape_.Reshape(bottom_dim_blob_shape); - int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data(); - for (int i = 0; i < num_spatial_axes_ + 1; ++i) { - if (reverse_dimensions()) { - conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i); - } else { - conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i); - } - } - // The im2col result buffer will only hold one image at a time to avoid - // overly large memory usage. In the special case of 1x1 convolution - // it goes lazily unused to save memory. - col_buffer_shape_.clear(); - col_buffer_shape_.push_back(kernel_dim_); - const int* input_shape_data = input_shape_.cpu_data() + 1; - for (int i = 0; i < num_spatial_axes_; ++i) { - if (reverse_dimensions()) { - col_buffer_shape_.push_back(input_shape_data[i]); - } else { - col_buffer_shape_.push_back(output_shape_[i]); - } - } - col_buffer_.Reshape(col_buffer_shape_); - if (Caffe::mode() == Caffe::Brew::GPU) { - shared_ptr< Blob > buffer = - this->device_context_->template Buffer(0); - buffer->Reshape(col_buffer_shape_); - } - bottom_dim_ = bottom[0]->count(channel_axis_); - top_dim_ = top[0]->count(channel_axis_); - num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_; - num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_; - // Set up the all ones "bias multiplier" for adding biases by BLAS - out_spatial_dim_ = top[0]->count(first_spatial_axis); - if (bias_term_) { - vector bias_multiplier_shape(1, out_spatial_dim_); - bias_multiplier_.Reshape(bias_multiplier_shape); - caffe_set(bias_multiplier_.count(), Dtype(1), - bias_multiplier_.mutable_cpu_data()); - } -} - -#ifndef CPU_ONLY - -template -void BaseConvolutionNDLayer::forward_gpu_gemm(const Dtype* input, - const int input_off, - const Dtype* weights, - Dtype* output, - const int output_off, - bool skip_im2col) { - const Dtype* col_buff = input; - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - if (!is_1x1_) { - if (!skip_im2col) { - conv_im2col_gpu(input + input_off, col_buffer()->mutable_gpu_data()); - } - col_buff = col_buffer()->gpu_data(); - } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm( - CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, - conv_out_spatial_dim_, kernel_dim_ / group_, (Dtype) 1., - weights + weight_offset_ * g, - col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 0., - output + output_off + output_offset_ * g); - } -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - if (!is_1x1_) { - if (!skip_im2col) { - greentea_conv_im2col_gpu(input, input_off, - col_buffer()->mutable_gpu_data(), 0); - } - col_buff = col_buffer()->gpu_data(); - } - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, - CblasNoTrans, conv_out_channels_ / group_, - conv_out_spatial_dim_, kernel_dim_ / group_, - (Dtype) 1., (cl_mem) weights, weight_offset_ * g, - (cl_mem) col_buff, - (is_1x1_ ? input_off : 0) + col_offset_ * g, - (Dtype) 0., (cl_mem) output, - output_off + output_offset_ * g); - } -#endif // USE_GREENTEA - } -} - -template -void BaseConvolutionNDLayer::forward_gpu_bias(Dtype* output, - const int output_off, - const Dtype* bias) { - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - out_spatial_dim_, 1, (Dtype) 1., bias, - bias_multiplier_.gpu_data(), (Dtype) 1., - output + output_off); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, - CblasNoTrans, num_output_, - out_spatial_dim_, 1, (Dtype) 1., - (cl_mem) bias, 0, - (cl_mem) (bias_multiplier_.gpu_data()), 0, - (Dtype) 1., (cl_mem) output, output_off); -#endif // USE_GREENTEA - } -} - - -template -void BaseConvolutionNDLayer::backward_gpu_gemm(const Dtype* output, - const int output_off, - const Dtype* weights, - Dtype* input, - const int input_off) { - Dtype* col_buff = col_buffer()->mutable_gpu_data(); - if (is_1x1_) { - col_buff = input; - } - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm( - CblasTrans, CblasNoTrans, kernel_dim_ / group_, conv_out_spatial_dim_, - conv_out_channels_ / group_, (Dtype) 1., weights + weight_offset_ * g, - output + output_off + output_offset_ * g, (Dtype) 0., - col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g); - } - if (!is_1x1_) { - conv_col2im_gpu(col_buff, input + input_off); - } -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_->id(), CblasTrans, - CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, - conv_out_channels_ / group_, (Dtype) 1., - (cl_mem) weights, weight_offset_ * g, - (cl_mem) output, output_off + output_offset_ * g, - (Dtype) 0., (cl_mem) col_buff, - (is_1x1_ ? input_off : 0) + col_offset_ * g); - } - if (!is_1x1_) { - greentea_conv_col2im_gpu(col_buff, 0, input, input_off); - } -#endif // USE_GREENTEA - } -} - -template -void BaseConvolutionNDLayer::weight_gpu_gemm(const Dtype* input, - const int input_off, - const Dtype* output, - const int output_off, - Dtype* weights) { - const Dtype* col_buff = input; - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - if (!is_1x1_) { - conv_im2col_gpu(input + input_off, col_buffer()->mutable_gpu_data()); - col_buff = col_buffer()->gpu_data(); - } - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm( - CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype) 1., - output + output_off + output_offset_ * g, - col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g, (Dtype) 1., - weights + weight_offset_ * g); - } -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - if (!is_1x1_) { - greentea_conv_im2col_gpu(input, input_off, - col_buffer()->mutable_gpu_data(), 0); - col_buff = col_buffer()->gpu_data(); - } - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, - CblasTrans, conv_out_channels_ / group_, - kernel_dim_ / group_, conv_out_spatial_dim_, - (Dtype) 1., (cl_mem) output, - output_off + output_offset_ * g, - (cl_mem) col_buff, - (is_1x1_ ? input_off : 0) + col_offset_ * g, - (Dtype) 1., (cl_mem) weights, - weight_offset_ * g); - } -#endif // USE_GREENTEA - } -} - -template -void BaseConvolutionNDLayer::backward_gpu_bias(Dtype* bias, - const Dtype* input, const int input_off) { - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., - input + input_off, bias_multiplier_.gpu_data(), 1., bias); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, - num_output_, out_spatial_dim_, 1., - (cl_mem) input, input_off, - (cl_mem) (bias_multiplier_.gpu_data()), 0, 1., - (cl_mem) bias, 0); -#endif // USE_GREENTEA - } -} - -template -shared_ptr< Blob > BaseConvolutionNDLayer::col_buffer() { - return this->device_context_-> - template Buffer(this->device_context_->current_queue_id()); -} - -#endif // !CPU_ONLY - -INSTANTIATE_CLASS(BaseConvolutionNDLayer); - -} // namespace caffe diff --git a/src/caffe/layers/conv_nd_layer.cpp b/src/caffe/layers/conv_nd_layer.cpp deleted file mode 100644 index aa521fdb4a6..00000000000 --- a/src/caffe/layers/conv_nd_layer.cpp +++ /dev/null @@ -1,48 +0,0 @@ -#include - -#include "caffe/filler.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -void ConvolutionNDLayer::compute_output_shape() { - // input_shape_ + 1 to skip channel axis - const int* input_shape_data = this->input_shape_.cpu_data() + 1; - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int* stride_data = this->stride_.cpu_data(); - const int* pad_data = this->pad_.cpu_data(); - const int* kstride_data = this->kstride_.cpu_data(); - this->output_shape_.clear(); - for (int i = 0; i < this->num_spatial_axes_; ++i) { - const int input_dim = input_shape_data[i]; - const int ext_kernel_shape = (kernel_shape_data[i] - 1) - * kstride_data[i] + 1; - const int output_dim = (input_dim + 2 * pad_data[i] - ext_kernel_shape) - / stride_data[i] + 1; - this->output_shape_.push_back(output_dim); - } -} - -template -void ConvolutionNDLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { - NOT_IMPLEMENTED; -} - -template -void ConvolutionNDLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - NOT_IMPLEMENTED; -} - -#ifdef CPU_ONLY -STUB_GPU(ConvolutionNDLayer); -#endif - -INSTANTIATE_CLASS(ConvolutionNDLayer); - -} // namespace caffe diff --git a/src/caffe/layers/conv_nd_layer.cu b/src/caffe/layers/conv_nd_layer.cu deleted file mode 100644 index e71ac43af1b..00000000000 --- a/src/caffe/layers/conv_nd_layer.cu +++ /dev/null @@ -1,94 +0,0 @@ -#include - -#include "caffe/filler.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_im2col.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#endif - -namespace caffe { - -template -void ConvolutionNDLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const Dtype* weight = this->blobs_[0]->gpu_data(); - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* top_data = top[i]->mutable_gpu_data(); - for (int n = 0; n < this->num_; ++n) { - this->forward_gpu_gemm(bottom_data, n * this->bottom_dim_, weight, - top_data, n * this->top_dim_); - if (this->bias_term_) { - const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data, n * this->top_dim_, bias); - } - } - } -} - -template -void ConvolutionNDLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - const Dtype* weight = this->blobs_[0]->gpu_data(); - Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - if (this->param_propagate_down_[0]) { - greentea_gpu_set(this->device_context_->id(), - this->blobs_[0]->count(), Dtype(0), - (cl_mem)weight_diff, 0); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - greentea_gpu_set(this->device_context_->id(), - this->blobs_[1]->count(), Dtype(0), - (cl_mem)(this->blobs_[1]->mutable_gpu_diff()), 0); - } -#endif // USE_GREENTEA - } - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->gpu_diff(); - // Bias gradient, if necessary. - if (this->bias_term_ && this->param_propagate_down_[1]) { - Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); - } - } - if (this->param_propagate_down_[0] || propagate_down[i]) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(bottom_data, n * this->bottom_dim_, - top_diff, n * this->top_dim_, weight_diff); - } - // gradient w.r.t. bottom data, if necessary. - if (propagate_down[i]) { - this->backward_gpu_gemm(top_diff, n * this->top_dim_, weight, - bottom_diff, n * this->bottom_dim_); - } - } - } - } -} - -INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionNDLayer); - -} // namespace caffe diff --git a/src/caffe/layers/conv_sk_layer.cpp b/src/caffe/layers/conv_sk_layer.cpp deleted file mode 100644 index fbda73e1c37..00000000000 --- a/src/caffe/layers/conv_sk_layer.cpp +++ /dev/null @@ -1,179 +0,0 @@ -#include - -#include "caffe/filler.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -void ConvolutionSKLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - ConvolutionParameter conv_param = this->layer_param_.convolution_param(); - CHECK( - !(conv_param.kernel_size_size() > 0) - != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) - << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; - CHECK( - (conv_param.kernel_size_size() > 0) - || (conv_param.has_kernel_h() && conv_param.has_kernel_w())) - << "For non-square filters both kernel_h and kernel_w are required."; - CHECK( - (!(conv_param.pad_size() > 9) && conv_param.has_pad_h() - && conv_param.has_pad_w()) - || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) - << "pad is pad OR pad_h and pad_w are required."; - CHECK( - (!(conv_param.stride_size() > 0) && conv_param.has_stride_h() - && conv_param.has_stride_w()) - || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) - << "Stride is stride OR stride_h and stride_w are required."; - if (conv_param.kernel_size_size() > 0) { - kernel_h_ = kernel_w_ = conv_param.kernel_size().Get(0); - } else { - kernel_h_ = conv_param.kernel_h(); - kernel_w_ = conv_param.kernel_w(); - } - CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; - if (!conv_param.has_pad_h()) { - pad_h_ = pad_w_ = conv_param.pad_size() > 0 ? - conv_param.pad().Get(0) : 0; - } else { - pad_h_ = conv_param.pad_h(); - pad_w_ = conv_param.pad_w(); - } - CHECK_EQ(pad_h_, 0)<< "pad_h_ must be 0"; - CHECK_EQ(pad_w_, 0)<< "pad_w_ must be 0"; - if (!conv_param.has_stride_h()) { - stride_h_ = stride_w_ = conv_param.stride_size() > 0 ? - conv_param.stride().Get(0) : 1; - } else { - stride_h_ = conv_param.stride_h(); - stride_w_ = conv_param.stride_w(); - } - if (!conv_param.has_kstride_h()) { - kstride_h_ = kstride_w_ = conv_param.kstride_size() > 0 ? - conv_param.kstride().Get(0) : 0; - } else { - kstride_h_ = conv_param.kstride_h(); - kstride_w_ = conv_param.kstride_w(); - } - group_ = this->layer_param_.convolution_param().group(); - num_ = bottom[0]->num(); - channels_ = bottom[0]->channels(); - height_ = bottom[0]->height(); - width_ = bottom[0]->width(); - // TODO: generalize to handle inputs of different shapes. - for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { - CHECK_EQ(num_, bottom[bottom_id]->num())<< "Inputs must have same num."; - CHECK_EQ(channels_, bottom[bottom_id]->channels()) - << "Inputs must have same channels."; - CHECK_EQ(height_, bottom[bottom_id]->height()) - << "Inputs must have same height."; - CHECK_EQ(width_, bottom[bottom_id]->width()) - << "Inputs must have same width."; - } - num_output_ = this->layer_param_.convolution_param().num_output(); - CHECK_GT(num_output_, 0); - CHECK_EQ(channels_ % group_, 0); - // The im2col result buffer would only hold one image at a time to avoid - // overly large memory usage. - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - int height_out = (height_ - ext_kernel_h) / stride_h_ + 1; - int width_out = (width_ - ext_kernel_w) / stride_w_ + 1; - - if (Caffe::mode() == Caffe::Brew::CPU) { - col_buffer_.Reshape(1, channels_ * kernel_h_ * kernel_w_, height_out, - width_out); - } else { - // Shared column buffer per device-queue across all layers on that device - for (int i = 0; i < this->device_context_->num_queues(); ++i) { - shared_ptr< Blob > buffer = - this->device_context_->template Buffer(i); - buffer->Reshape(1, channels_ * kernel_h_ * kernel_w_, - height_out, width_out); - } - } - - // Set the parameters - CHECK_EQ(num_output_ % group_, 0) - << "Number of output should be multiples of group."; - bias_term_ = this->layer_param_.convolution_param().bias_term(); - // Figure out the dimensions for individual gemms. - M_ = num_output_ / group_; - K_ = channels_ * kernel_h_ * kernel_w_ / group_; - N_ = height_out * width_out; - for (int top_id = 0; top_id < top.size(); ++top_id) { - top[top_id]->Reshape(num_, num_output_, height_out, width_out); - } - // Check if we need to set up the weights - if (this->blobs_.size() > 0) { - } else { - if (bias_term_) { - this->blobs_.resize(2); - } else { - this->blobs_.resize(1); - } - // Intialize the weight - this->blobs_[0].reset( - new Blob(num_output_, channels_ / group_, kernel_h_, kernel_w_, - this->device_context_)); - // fill the weights - shared_ptr > weight_filler( - GetFiller( - this->layer_param_.convolution_param().weight_filler())); - weight_filler->Fill(this->blobs_[0].get()); - // If necessary, initialize and fill the bias term - if (bias_term_) { - this->blobs_[1].reset( - new Blob(1, 1, 1, num_output_, this->device_context_)); - shared_ptr > bias_filler( - GetFiller( - this->layer_param_.convolution_param().bias_filler())); - bias_filler->Fill(this->blobs_[1].get()); - } - } - // Set up the all ones "bias multiplier" for adding bias using blas - if (bias_term_) { - bool reshaped = bias_multiplier_.Reshape(1, 1, 1, N_); - // This will trigger a memory copy if in GPU mode, - // which may not be necessary. - // Thus omit to set the values if not necessary. - if (reshaped) { - caffe_set(N_, Dtype(1), bias_multiplier_.mutable_cpu_data()); - } - } - this->param_propagate_down_.resize(this->blobs_.size(), true); -} - -template -void ConvolutionSKLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - LayerSetUp(bottom, top); -} - -template -void ConvolutionSKLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { - LOG(FATAL)<< "Foward_cpu() not implemented for ConvlutionSKLayer."; -} - -template -void ConvolutionSKLayer::Backward_cpu( - const vector*>& top, const vector& propagate_down, - const vector*>& bottom) { - LOG(FATAL)<< " Backward_cpu() not implemented for ConvolutionSKLayer."; -} - -#ifdef CPU_ONLY -STUB_GPU(ConvolutionSKLayer); -#endif - -INSTANTIATE_CLASS(ConvolutionSKLayer); -REGISTER_LAYER_CLASS(ConvolutionSK); - -} // namespace caffe diff --git a/src/caffe/layers/conv_sk_layer.cu b/src/caffe/layers/conv_sk_layer.cu deleted file mode 100644 index dd58b55914e..00000000000 --- a/src/caffe/layers/conv_sk_layer.cu +++ /dev/null @@ -1,282 +0,0 @@ -#include - -#include "caffe/filler.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_im2col.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#endif - -namespace caffe { - -template -void ConvolutionSKLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - // CUDA backend code - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* top_data = top[i]->mutable_gpu_data(); - Dtype* col_data = col_buffer()->mutable_gpu_data(); - const Dtype* weight = this->blobs_[0]->gpu_data(); - int weight_offset = M_ * K_; - int col_offset = K_ * N_; - int top_offset = M_ * N_; - - for (int n = 0; n < num_; ++n) { - // First, im2col - im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, - stride_w_, kstride_h_, kstride_w_, col_data); - // Second, innerproduct with groups - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, K_, - (Dtype) 1., weight + weight_offset * g, - col_data + col_offset * g, (Dtype) 0., - top_data + top[i]->offset(n) + top_offset * g); - } - // Third, add bias - if (bias_term_) { - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, - (Dtype) 1., this->blobs_[1]->gpu_data(), - bias_multiplier_.gpu_data(), (Dtype) 1., - top_data + top[i]->offset(n)); - } - } - } -#endif - } else { - // GreenTea backend code -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - - for (int i = 0; i < bottom.size(); ++i) { - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); - cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); - cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); - const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); - - int weight_offset = M_ * K_; - int col_offset = K_ * N_; - int top_offset = M_ * N_; - - for (int n = 0; n < num_; ++n) { - // First, im2col - greentea_im2col_sk_gpu(&program, &ctx, bottom_data, - bottom[i]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, - pad_w_, stride_h_, stride_w_, kstride_h_, - kstride_w_, col_data); - - // Second, innerproduct with groups - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, - CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, - weight_offset * g, col_data, col_offset * g, - (Dtype) 0., top_data, - top[i]->offset(n) + top_offset * g); - } - - // Third, add bias - if (bias_term_) { - greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, - CblasNoTrans, num_output_, N_, 1, (Dtype) 1., - (cl_mem) (this->blobs_[1]->gpu_data()), 0, - (cl_mem) (bias_multiplier_.gpu_data()), 0, - (Dtype) 1., top_data, top[i]->offset(n)); - } - } - } -#endif - } -} - -template -void ConvolutionSKLayer::Backward_gpu( - const vector*>& top, const vector& propagate_down, - const vector*>& bottom) { - - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - const Dtype* weight = NULL; - Dtype* weight_diff = NULL; - if (this->param_propagate_down_[0]) { - weight = this->blobs_[0]->gpu_data(); - weight_diff = this->blobs_[0]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - Dtype* bias_diff = NULL; - if (bias_term_ && this->param_propagate_down_[1]) { - bias_diff = this->blobs_[1]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); - } - const int weight_offset = M_ * K_; - const int col_offset = K_ * N_; - const int top_offset = M_ * N_; - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = NULL; - // Bias gradient, if necessary. - if (bias_term_ && this->param_propagate_down_[1]) { - top_diff = top[i]->gpu_diff(); - for (int n = 0; n < num_; ++n) { - caffe_gpu_gemv(CblasNoTrans, num_output_, N_, 1., - top_diff + top[0]->offset(n), - bias_multiplier_.gpu_data(), 1., bias_diff); - } - } - if (this->param_propagate_down_[0] || propagate_down[i]) { - if (!top_diff) { - top_diff = top[i]->gpu_diff(); - } - Dtype* col_data = col_buffer()->mutable_gpu_data(); - Dtype* col_diff = col_buffer()->mutable_gpu_diff(); - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - for (int n = 0; n < num_; ++n) { - // Since we saved memory in the forward pass by not storing all col - // data, we will need to recompute them. - im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, - stride_w_, kstride_h_, kstride_w_, col_data); - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm( - CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., - top_diff + top[i]->offset(n) + top_offset * g, - col_data + col_offset * g, (Dtype) 1., - weight_diff + weight_offset * g); - } - } - // gradient w.r.t. bottom data, if necessary - if (propagate_down[i]) { - for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm( - CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., - weight + weight_offset * g, - top_diff + top[i]->offset(n) + top_offset * g, (Dtype) 0., - col_diff + col_offset * g); - } - // col2im back to the data - col2im_sk_gpu(col_diff, channels_, height_, width_, kernel_h_, - kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, - kstride_h_, kstride_w_, - bottom_diff + bottom[i]->offset(n)); - } - } - } - } -#endif - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - - cl_mem weight = NULL; - cl_mem weight_diff = NULL; - - if (this->param_propagate_down_[0]) { - weight = (cl_mem) (this->blobs_[0]->gpu_data()); - weight_diff = (cl_mem) (this->blobs_[0]->mutable_gpu_diff()); - greentea_gpu_set(this->device_context_->id(), this->blobs_[0]->count(), - Dtype(0), weight_diff, 0); - } - - cl_mem bias_diff = NULL; - - if (bias_term_ && this->param_propagate_down_[1]) { - bias_diff = (cl_mem) (this->blobs_[1]->mutable_gpu_diff()); - greentea_gpu_set(this->device_context_->id(), this->blobs_[1]->count(), - Dtype(0), bias_diff, 0); - } - const int weight_offset = M_ * K_; - const int col_offset = K_ * N_; - const int top_offset = M_ * N_; - for (int i = 0; i < top.size(); ++i) { - cl_mem top_diff = NULL; - // Bias gradient, if necessary. - if (bias_term_ && this->param_propagate_down_[1]) { - top_diff = (cl_mem) (top[i]->gpu_diff()); - for (int n = 0; n < num_; ++n) { - greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, - num_output_, N_, (Dtype) 1., top_diff, - top[0]->offset(n), - (cl_mem) (bias_multiplier_.gpu_data()), 0, - (Dtype) 1., bias_diff, 0); - } - } - if (this->param_propagate_down_[0] || propagate_down[i]) { - if (!top_diff) { - top_diff = (cl_mem) (top[i]->gpu_diff()); - } - cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); - cl_mem col_diff = (cl_mem) (col_buffer()->mutable_gpu_diff()); - const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); - cl_mem bottom_diff = (cl_mem) (bottom[i]->mutable_gpu_diff()); - - for (int n = 0; n < num_; ++n) { - // Since we saved memory in the forward pass by not storing all col - // data, we will need to recompute them. - greentea_im2col_sk_gpu(&program, &ctx, bottom_data, - bottom[i]->offset(n), channels_, - height_, width_, kernel_h_, kernel_w_, - pad_h_, pad_w_, stride_h_, stride_w_, - kstride_h_, kstride_w_, col_data); - - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_->id(), - CblasNoTrans, - CblasTrans, M_, K_, N_, (Dtype) 1., - top_diff, - top[i]->offset(n) + top_offset * g, - col_data, col_offset * g, (Dtype) 1., - weight_diff, weight_offset * g); - } - } - // gradient w.r.t. bottom data, if necessary - if (propagate_down[i]) { - for (int g = 0; g < group_; ++g) { - greentea_gpu_gemm(this->device_context_->id(), - CblasTrans, - CblasNoTrans, K_, N_, M_, (Dtype) 1., - weight, weight_offset * g, top_diff, - top[i]->offset(n) + top_offset * g, - (Dtype) 0., col_diff, col_offset * g); - } - // col2im back to the data - greentea_col2im_sk_gpu(&program, &ctx, col_diff, channels_, - height_, width_, - kernel_h_, kernel_w_, - pad_h_, pad_w_, stride_h_, stride_w_, - kstride_h_, kstride_w_, bottom_diff, - bottom[i]->offset(n)); - } - } - } - } -#endif - } -} - -template -shared_ptr< Blob > ConvolutionSKLayer::col_buffer() { - return this->device_context_-> - template Buffer(this->device_context_->current_queue_id()); -} - -INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionSKLayer); - -} // namespace caffe diff --git a/src/caffe/layers/deconv_nd_layer.cpp b/src/caffe/layers/deconv_nd_layer.cpp deleted file mode 100644 index d5684a6163b..00000000000 --- a/src/caffe/layers/deconv_nd_layer.cpp +++ /dev/null @@ -1,48 +0,0 @@ -#include - -#include "caffe/filler.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -void DeconvolutionNDLayer::compute_output_shape() { - // input_shape_ + 1 to skip channel axis - const int* input_shape_data = this->input_shape_.cpu_data() + 1; - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int* stride_data = this->stride_.cpu_data(); - const int* pad_data = this->pad_.cpu_data(); - this->output_shape_.clear(); - for (int i = 0; i < this->num_spatial_axes_; ++i) { - const int input_dim = input_shape_data[i]; - const int output_dim = stride_data[i] * (input_dim - 1) - + kernel_shape_data[i] - 2 * pad_data[i]; - this->output_shape_.push_back(output_dim); - } -} - -template -void DeconvolutionNDLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - NOT_IMPLEMENTED; -} - -template -void DeconvolutionNDLayer::Backward_cpu( - const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - NOT_IMPLEMENTED; -} - -#ifdef CPU_ONLY -STUB_GPU(DeconvolutionNDLayer); -#endif - -INSTANTIATE_CLASS(DeconvolutionNDLayer); -REGISTER_LAYER_CLASS(DeconvolutionND); - -} // namespace caffe diff --git a/src/caffe/layers/deconv_nd_layer.cu b/src/caffe/layers/deconv_nd_layer.cu deleted file mode 100644 index fb0e2b48c8b..00000000000 --- a/src/caffe/layers/deconv_nd_layer.cu +++ /dev/null @@ -1,96 +0,0 @@ -#include - -#include "caffe/filler.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_im2col.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#endif - -namespace caffe { - -template -void DeconvolutionNDLayer::Forward_gpu( - const vector*>& bottom, - const vector*>& top) { - const Dtype* weight = this->blobs_[0]->gpu_data(); - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* top_data = top[i]->mutable_gpu_data(); - for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_gemm(bottom_data, n * this->bottom_dim_, weight, - top_data, n * this->top_dim_); - if (this->bias_term_) { - const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data, n * this->top_dim_, bias); - } - } - } -} - -template -void DeconvolutionNDLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - const Dtype* weight = this->blobs_[0]->gpu_data(); - Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } -#endif // USE CUDA - } else { -#ifdef USE_GREENTEA - if (this->param_propagate_down_[0]) { - greentea_gpu_set(this->device_context_->id(), - this->blobs_[0]->count(), Dtype(0), - (cl_mem) weight_diff, 0); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - greentea_gpu_set(this->device_context_->id(), - this->blobs_[1]->count(), Dtype(0), - (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), - 0); - } -#endif // USE_GREENTEA - } - for (int i = 0; i < top.size(); ++i) { - const Dtype* top_diff = top[i]->gpu_diff(); - const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - // Bias gradient, if necessary. - if (this->bias_term_ && this->param_propagate_down_[1]) { - Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); - } - } - if (this->param_propagate_down_[0] || propagate_down[i]) { - for (int n = 0; n < this->num_; ++n) { - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(top_diff, n * this->top_dim_, - bottom_data, n * this->bottom_dim_, weight_diff); - } - // gradient w.r.t. bottom data, if necessary. - if (propagate_down[i]) { - this->forward_gpu_gemm(top_diff, n * this->top_dim_, weight, - bottom_diff, n * this->bottom_dim_); - } - } - } - } -} - -INSTANTIATE_LAYER_GPU_FUNCS(DeconvolutionNDLayer); - -} // namespace caffe diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 3882c64dd92..2fa86bbe966 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -92,18 +92,18 @@ void Im2colLayer::Backward_gpu(const vector*>& top, for (int n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { col2im_gpu(top_diff + n * top_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - bottom_diff + n * bottom_dim_); + bottom[0]->shape(channel_axis_ + 1), + bottom[0]->shape(channel_axis_ + 2), + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + bottom_diff + n * bottom_dim_); } else { col2im_nd_gpu(top_diff + n * top_dim_, num_spatial_axes_, bottom_dim_, - bottom[0]->gpu_shape() + channel_axis_, - top[0]->gpu_shape() + channel_axis_, - kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - bottom_diff + n * bottom_dim_); + bottom[0]->gpu_shape() + channel_axis_, + top[0]->gpu_shape() + channel_axis_, + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), bottom_diff + n * bottom_dim_); } } #endif // USE_CUDA @@ -115,11 +115,26 @@ void Im2colLayer::Backward_gpu(const vector*>& top, this->device_context_->id()); for (int n = 0; n < top[0]->num(); ++n) { - greentea_col2im_gpu(&program, &ctx, (cl_mem) top_diff, - top[0]->offset(n), channels_, height_, width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, (cl_mem) bottom_diff, - bottom[0]->offset(n)); + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { + greentea_col2im_gpu(&program, &ctx, (cl_mem) top_diff, + n * top_dim_, channels_, + bottom[0]->shape(channel_axis_ + 1), + bottom[0]->shape(channel_axis_ + 2), + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + (cl_mem) bottom_diff, n * bottom_dim_); + } else { + greentea_col2im_nd_gpu(&program, &ctx, (cl_mem) top_diff, n * top_dim_, + num_spatial_axes_, channel_axis_, bottom_dim_, + (cl_mem)(bottom[0]->gpu_shape()), + (cl_mem)(top[0]->gpu_shape()), + (cl_mem)(kernel_shape_.gpu_data()), + (cl_mem)(pad_.gpu_data()), + stride_.gpu_data(), (cl_mem) bottom_diff, + n * bottom_dim_); + } } #endif // USE_GREENTEA } diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index 8ea2a2b2000..bb7a73d95c3 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -208,13 +208,13 @@ void DevicePair::compute(const vector devices, CHECK_EQ(remaining.size(), 1); pairs->insert(pairs->begin(), - DevicePair(Caffe::Get().GetCPUDeviceContext(), remaining[0])); + DevicePair(Caffe::Get().GetCPUDevice(), remaining[0])); CHECK(pairs->size() == devices.size()); for (int i = 0; i < pairs->size(); ++i) { - CHECK((*pairs)[i].parent() != (*pairs)[i].device()); + CHECK((*pairs)[i].getParent() != (*pairs)[i].getDevice()); for (int j = i + 1; j < pairs->size(); ++j) { - CHECK((*pairs)[i].device() != (*pairs)[j].device()); + CHECK((*pairs)[i].getDevice() != (*pairs)[j].getDevice()); } } #else @@ -418,7 +418,7 @@ void P2PSync::run(const vector& gpus) { DevicePair::compute(gpus, &pairs); ostringstream s; for (int i = 1; i < pairs.size(); ++i) { - s << (i == 1 ? "" : ", ") << pairs[i].parent() << ":" << pairs[i].device(); + s << (i == 1 ? "" : ", ") << pairs[i].getParent() << ":" << pairs[i].getDevice(); } LOG(INFO)<< "GPUs pairs " << s.str(); @@ -434,13 +434,13 @@ void P2PSync::run(const vector& gpus) { P2PSync* sync = j == 0 ? this : syncs[j].get(); if (sync) { const SolverParameter& p = sync->solver()->param(); - if (p.device_id() == pairs[i].parent()->list_id()) { + if (p.device_id() == pairs[i].getParent()->list_id()) { parent = sync; } } } if (parent) { - param.set_device_id(pairs[i].device()->list_id()); + param.set_device_id(pairs[i].getDevice()->list_id()); syncs[i].reset(new P2PSync(solver_, parent, param)); parent->children_.push_back((P2PSync*) syncs[i].get()); } From eab0c29a1e4acd04bffd7681e2aa19179734a2f9 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Thu, 24 Sep 2015 19:05:36 +0200 Subject: [PATCH 181/254] Consolidation of PoolingND, PoolingSK, Pooling layers. --- include/caffe/greentea/greentea_im2col.hpp | 7 +- include/caffe/vision_layers.hpp | 122 +- python/caffe/draw.py | 10 +- src/caffe/greentea/greentea_im2col.cpp | 14 +- src/caffe/layer_factory.cpp | 5 - src/caffe/layers/base_conv_layer.cpp | 5 +- src/caffe/layers/im2col_layer.cu | 88 +- src/caffe/layers/pooling_layer.cpp | 268 +++-- src/caffe/layers/pooling_layer.cu | 1237 ++++++++++++++++---- src/caffe/layers/pooling_nd_layer.cpp | 186 --- src/caffe/layers/pooling_nd_layer.cu | 343 ------ src/caffe/layers/pooling_sk_layer.cpp | 142 --- src/caffe/layers/pooling_sk_layer.cu | 478 -------- src/caffe/parallel.cpp | 3 +- src/caffe/proto/caffe.proto | 2 - src/caffe/test/test_data_transformer.cpp | 14 +- src/caffe/test/test_im2col_kernel.cu | 26 +- src/caffe/test/test_pooling_nd_layer.cpp | 22 +- src/caffe/util/im2col.cu | 4 +- src/caffe/util/upgrade_proto.cpp | 4 - 20 files changed, 1289 insertions(+), 1691 deletions(-) delete mode 100644 src/caffe/layers/pooling_nd_layer.cpp delete mode 100644 src/caffe/layers/pooling_nd_layer.cu delete mode 100644 src/caffe/layers/pooling_sk_layer.cpp delete mode 100644 src/caffe/layers/pooling_sk_layer.cu diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index 4c7e4b0d33b..68b4a9a37e4 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -64,9 +64,10 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int data_col_off, const int num_spatial_axes, - const int im_size, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem data_im, int data_off); + const int channel_axis, const int im_size, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem data_im, int data_off); template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index c403e178faf..41f1b290dcf 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -615,67 +615,9 @@ class LRNLayer : public Layer { * For whole image processing, reducing redundancy. */ template -class PoolingSKLayer : public Layer { - public: - explicit PoolingSKLayer(const LayerParameter& param) - : Layer(param) { - } - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - - virtual inline const char* type() const { - return "PoolingSK"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int MinTopBlobs() const { - return 1; - } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return - (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } - - int max_top_blobs_; - int pad_h_, pad_w_; - int channels_; - int height_, width_; - int pooled_height_, pooled_width_; - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int kstride_h_, kstride_w_; - Blob rand_idx_; - Blob max_idx_; -}; - - -/** - * @brief Pools the input image by taking the max, average, etc. within regions. - * - * For whole image processing, reducing redundancy. - */ -template -class PoolingNDLayer : public Layer { +class PoolingLayer : public Layer { public: - explicit PoolingNDLayer(const LayerParameter& param) + explicit PoolingLayer(const LayerParameter& param) : Layer(param) { } virtual void LayerSetUp(const vector*>& bottom, @@ -696,7 +638,7 @@ class PoolingNDLayer : public Layer { const vector*>& bottom); virtual inline const char* type() const { - return "PoolingND"; + return "Pooling"; } virtual inline int ExactNumBottomBlobs() const { return 1; @@ -724,62 +666,10 @@ class PoolingNDLayer : public Layer { int num_spatial_axes_; int channels_; - int max_top_blobs_; - Blob max_idx_; -}; - -/** - * @brief Pools the input image by taking the max, average, etc. within regions. - * - * TODO(dox): thorough documentation for Forward, Backward, and proto params. - */ -template -class PoolingLayer : public Layer { - public: - explicit PoolingLayer(const LayerParameter& param) - : Layer(param) { - } - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { - return "Pooling"; - } - virtual inline int ExactNumBottomBlobs() const { - return 1; - } - virtual inline int MinTopBlobs() const { - return 1; - } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return - (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom); - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int pad_h_, pad_w_; - int channels_; - int height_, width_; - int pooled_height_, pooled_width_; + bool use_skernel_; bool global_pooling_; + + int max_top_blobs_; Blob rand_idx_; Blob max_idx_; }; diff --git a/python/caffe/draw.py b/python/caffe/draw.py index 64c6d01e409..6887ee508a7 100644 --- a/python/caffe/draw.py +++ b/python/caffe/draw.py @@ -40,7 +40,7 @@ def get_edge_label(layer): if layer.type == 'Data': edge_label = 'Batch ' + str(layer.data_param.batch_size) - elif layer.type == 'Convolution' or layer.type == 'ConvolutionND' or layer.type == 'ConvolutionSK' or layer.type == 'Deconvolution': + elif layer.type == 'Convolution' or layer.type == 'Deconvolution': edge_label = str(layer.convolution_param.num_output) elif layer.type == 'InnerProduct': edge_label = str(layer.inner_product_param.num_output) @@ -74,7 +74,7 @@ def get_layer_label(layer, rankdir): # horizontal space is not; separate words with newlines separator = '\\n' - if layer.type == 'Convolution' or layer.type == 'ConvolutionND' or layer.type == 'ConvolutionSK' or layer.type == 'Deconvolution': + if layer.type == 'Convolution' or layer.type == 'Deconvolution': # Outer double quotes needed or else colon characters don't parse # properly node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d%skstride: %d"' %\ @@ -89,7 +89,7 @@ def get_layer_label(layer, rankdir): layer.convolution_param.pad[0] if len(layer.convolution_param.pad) > 0 else 0, separator, layer.convolution_param.kstride[0] if len(layer.convolution_param.kstride) > 0 else 1) - elif layer.type == 'Pooling' or layer.type == 'PoolingND' or layer.type == 'PoolingSK': + elif layer.type == 'Pooling': pooling_types_dict = get_pooling_types_dict() node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d%skstride: %d"' %\ (layer.name, @@ -113,9 +113,9 @@ def choose_color_by_layertype(layertype): """Define colors for nodes based on the layer type. """ color = '#6495ED' # Default - if layertype == 'Convolution' or layertype == 'ConvolutionND' or layertype == 'ConvolutionSK' or layertype == 'Deconvolution': + if layertype == 'Convolution' or layertype == 'Deconvolution': color = '#FF5050' - elif layertype == 'Pooling' or layertype == 'PoolingND' or layertype == 'PoolingSK': + elif layertype == 'Pooling': color = '#FF9900' elif layertype == 'InnerProduct': color = '#CC33FF' diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index c6bca93860b..6a699477fb7 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -216,12 +216,12 @@ template void greentea_col2im_gpu(viennacl::ocl::program *prog, template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, const int num_spatial_axes, - const int num_kernels, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem data_col, - int data_col_off) { + viennacl::ocl::context *ctx, cl_mem data_im, + const int data_off, const int num_spatial_axes, + const int channel_axis, const int num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem data_col, int data_col_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("im2col_nd")); @@ -238,6 +238,7 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int data_off, const int num_spatial_axes, + const int channel_axis, const int num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, @@ -248,6 +249,7 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int data_off, const int num_spatial_axes, + const int channel_axis, const int num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 042f9ecde0e..dca64e87b4c 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -16,11 +16,6 @@ namespace caffe { -template -shared_ptr > GetPoolingSKLayer(const LayerParameter& param) { - return shared_ptr>(new PoolingSKLayer(param)); -} - // Get convolution layer according to engine. template shared_ptr > GetConvolutionLayer(const LayerParameter& param) { diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index f078656e68d..2e9a53b095a 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -269,9 +269,8 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, } } - if (Caffe::mode() == Caffe::Brew::CPU) { - col_buffer_.Reshape(col_buffer_shape_); - } else { + col_buffer_.Reshape(col_buffer_shape_); + if (Caffe::mode() == Caffe::Brew::GPU) { // Shared column buffer per device-queue across all layers on that device for (int i = 0; i < this->device_context_->num_queues(); ++i) { shared_ptr > buffer = this->device_context_ diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 2fa86bbe966..76789c73993 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -24,7 +24,7 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA for (int n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - im2col_gpu(bottom_data + n * bottom_dim_, channels_, + im2col_gpu(bottom_data + n * bottom_dim_, channels_, bottom[0]->shape(channel_axis_ + 1), bottom[0]->shape(channel_axis_ + 2), kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], @@ -32,7 +32,7 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, stride_.cpu_data()[0], stride_.cpu_data()[1], top_data + n * top_dim_); } else { - im2col_nd_gpu(bottom_data + n * bottom_dim_, num_spatial_axes_, + im2col_nd_gpu(bottom_data + n * bottom_dim_, num_spatial_axes_, num_kernels, bottom[0]->gpu_shape() + channel_axis_, top[0]->gpu_shape() + channel_axis_, kernel_shape_.gpu_data(), pad_.gpu_data(), @@ -49,33 +49,27 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, for (int n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - greentea_im2col_gpu(&program, &ctx, (cl_mem) bottom_data, - n * bottom_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], - kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], - pad_.cpu_data()[1], stride_.cpu_data()[0], - stride_.cpu_data()[1], (cl_mem) top_data, - n * top_dim_); + greentea_im2col_gpu(&program, &ctx, (cl_mem) bottom_data, + n * bottom_dim_, channels_, + bottom[0]->shape(channel_axis_ + 1), + bottom[0]->shape(channel_axis_ + 2), + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + (cl_mem) top_data, n * top_dim_); } else { - greentea_im2col_nd_gpu(&program, &ctx, (cl_mem) bottom_data, - n * bottom_dim_, num_spatial_axes_, num_kernels, - bottom[0]->gpu_shape() + channel_axis_, - top[0]->gpu_shape() + channel_axis_, - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), (cl_mem) top_data, - n * top_dim_); + greentea_im2col_nd_gpu(&program, &ctx, (cl_mem) bottom_data, + n * bottom_dim_, num_spatial_axes_, + channel_axis_, num_kernels, + (cl_mem) (bottom[0]->gpu_shape()), + (cl_mem) (top[0]->gpu_shape()), + (cl_mem) (kernel_shape_.gpu_data()), + (cl_mem) (pad_.gpu_data()), + (cl_mem) (stride_.gpu_data()), + (cl_mem) top_data, n * top_dim_); } } - - for (int n = 0; n < bottom[0]->num(); ++n) { - greentea_im2col_gpu(&program, &ctx, (cl_mem) bottom_data, - bottom[0]->offset(n), channels_, height_, - width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, (cl_mem) top_data, - top[0]->offset(n)); - } #endif // USE_GREENTEA } } @@ -91,19 +85,20 @@ void Im2colLayer::Backward_gpu(const vector*>& top, #ifdef USE_CUDA for (int n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_gpu(top_diff + n * top_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - bottom_diff + n * bottom_dim_); + col2im_gpu(top_diff + n * top_dim_, channels_, + bottom[0]->shape(channel_axis_ + 1), + bottom[0]->shape(channel_axis_ + 2), + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], bottom_diff + n * bottom_dim_); } else { - col2im_nd_gpu(top_diff + n * top_dim_, num_spatial_axes_, bottom_dim_, - bottom[0]->gpu_shape() + channel_axis_, - top[0]->gpu_shape() + channel_axis_, - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), bottom_diff + n * bottom_dim_); + col2im_nd_gpu(top_diff + n * top_dim_, num_spatial_axes_, + bottom_dim_, + bottom[0]->gpu_shape() + channel_axis_, + top[0]->gpu_shape() + channel_axis_, + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), bottom_diff + n * bottom_dim_); } } #endif // USE_CUDA @@ -126,14 +121,15 @@ void Im2colLayer::Backward_gpu(const vector*>& top, stride_.cpu_data()[0], stride_.cpu_data()[1], (cl_mem) bottom_diff, n * bottom_dim_); } else { - greentea_col2im_nd_gpu(&program, &ctx, (cl_mem) top_diff, n * top_dim_, - num_spatial_axes_, channel_axis_, bottom_dim_, - (cl_mem)(bottom[0]->gpu_shape()), - (cl_mem)(top[0]->gpu_shape()), - (cl_mem)(kernel_shape_.gpu_data()), - (cl_mem)(pad_.gpu_data()), - stride_.gpu_data(), (cl_mem) bottom_diff, - n * bottom_dim_); + greentea_col2im_nd_gpu(&program, &ctx, (cl_mem) top_diff, + n * top_dim_, num_spatial_axes_, + channel_axis_, bottom_dim_, + (cl_mem) (bottom[0]->gpu_shape()), + (cl_mem) (top[0]->gpu_shape()), + (cl_mem) (kernel_shape_.gpu_data()), + (cl_mem) (pad_.gpu_data()), + (cl_mem) (stride_.gpu_data()), + (cl_mem) bottom_diff, n * bottom_dim_); } } #endif // USE_GREENTEA diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 198f94369e6..b4e918318da 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -13,125 +13,222 @@ namespace caffe { using std::min; using std::max; -template +template void PoolingLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { + // Set the max number of top blobs before calling base Layer::SetUp. + // If doing MAX pooling, we can optionally output an extra top Blob + // for the mask. Otherwise, we only have one top Blob. + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + max_top_blobs_ = 2; + } else { + max_top_blobs_ = 1; + } PoolingParameter pool_param = this->layer_param_.pooling_param(); + channel_axis_ = bottom[0]->CanonicalAxisIndex(pool_param.axis()); + channels_ = bottom[0]->shape(channel_axis_); + + const int first_spatial_axis = channel_axis_ + 1; + const int num_axes = bottom[0]->num_axes(); + num_spatial_axes_ = num_axes - first_spatial_axis; + CHECK_GE(num_spatial_axes_, 1); + vector size_shape(1, num_spatial_axes_); + + kernel_shape_.Reshape(size_shape); + int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + if (pool_param.global_pooling()) { + global_pooling_ = true; CHECK(!((pool_param.kernel_size_size() > 0) || pool_param.has_kernel_h() || pool_param.has_kernel_w())) << "With Global_pooling: true Filter size cannot specified"; } else { CHECK(!(pool_param.kernel_size_size() > 0) != - !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK((pool_param.kernel_size_size() > 0) || - (pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "For non-square filters both kernel_h and kernel_w are required."; + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + if (pool_param.has_kernel_h() && pool_param.has_kernel_w()) { + kernel_shape_data[0] = pool_param.kernel_h(); + kernel_shape_data[1] = pool_param.kernel_w(); + } else { + const int num_kernel_dims = pool_param.kernel_size_size(); + CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_); + for (int i = 0; i < num_spatial_axes_; ++i) { + kernel_shape_data[i] = pool_param.kernel_size( + (num_kernel_dims == 1) ? 0 : i); + CHECK_GT(kernel_shape_data[i], 0)<< "Filter dimensions must be nonzero."; + } + } + } + + size_.Reshape(size_shape); + int* size_data = size_.mutable_cpu_data(); + + vector top_shape = bottom[0]->shape(); + for (int i = 0; i < num_spatial_axes_; ++i) { + size_data[i] = bottom[0]->shape(channel_axis_ + 1 + i); + } + top[0]->Reshape(top_shape); + if (top.size() > 1) { + top[1]->ReshapeLike(*top[0]); } - CHECK((!(pool_param.pad_size() > 0) && pool_param.has_pad_h() - && pool_param.has_pad_w()) - || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) - << "pad is pad OR pad_h and pad_w are required."; - CHECK((!(pool_param.stride_size() > 0) && pool_param.has_stride_h() - && pool_param.has_stride_w()) - || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) - << "Stride is stride OR stride_h and stride_w are required."; - global_pooling_ = pool_param.global_pooling(); + if (global_pooling_) { - kernel_h_ = bottom[0]->height(); - kernel_w_ = bottom[0]->width(); - } else { - if (pool_param.kernel_size_size() > 0) { - kernel_h_ = kernel_w_ = pool_param.kernel_size(0); - } else { - kernel_h_ = pool_param.kernel_h(); - kernel_w_ = pool_param.kernel_w(); + for (int i = 0; i < num_spatial_axes_; ++i) { + kernel_shape_data[i] = size_data[i]; } } - CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; - if (!pool_param.has_pad_h()) { - pad_h_ = pad_w_ = pool_param.pad_size() > 0 ? - pool_param.pad(0) : 0; + + // Setup stride dimensions (stride_). + stride_.Reshape(size_shape); + int* stride_data = stride_.mutable_cpu_data(); + if (pool_param.has_stride_h() || pool_param.has_stride_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "stride_h & stride_w can only be used for 2D convolution."; + CHECK_EQ(0, pool_param.stride_size()) + << "Either stride or stride_h/w should be specified; not both."; + stride_data[0] = pool_param.stride_h(); + stride_data[1] = pool_param.stride_w(); } else { - pad_h_ = pool_param.pad_h(); - pad_w_ = pool_param.pad_w(); + const int num_stride_dims = pool_param.stride_size(); + CHECK(num_stride_dims == 0 || num_stride_dims == 1 || + num_stride_dims == num_spatial_axes_) + << "stride must be specified once, or once per spatial dimension " + << "(stride specified " << num_stride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultStride = 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : + pool_param.stride((num_stride_dims == 1) ? 0 : i); + CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; + } } - if (!pool_param.has_stride_h()) { - stride_h_ = stride_w_ = pool_param.stride_size() > 0 ? - pool_param.stride(0) : 1; + // Setup pad dimensions (pad_). + pad_.Reshape(size_shape); + int* pad_data = pad_.mutable_cpu_data(); + if (pool_param.has_pad_h() || pool_param.has_pad_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "pad_h & pad_w can only be used for 2D convolution."; + CHECK_EQ(0, pool_param.pad_size()) + << "Either pad or pad_h/w should be specified; not both."; + pad_data[0] = pool_param.pad_h(); + pad_data[1] = pool_param.pad_w(); } else { - stride_h_ = pool_param.stride_h(); - stride_w_ = pool_param.stride_w(); + const int num_pad_dims = pool_param.pad_size(); + CHECK(num_pad_dims == 0 || num_pad_dims == 1 || + num_pad_dims == num_spatial_axes_) + << "pad must be specified once, or once per spatial dimension " + << "(pad specified " << num_pad_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultPad = 0; + for (int i = 0; i < num_spatial_axes_; ++i) { + pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : + pool_param.pad((num_pad_dims == 1) ? 0 : i); + } } - if (global_pooling_) { - CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) - << "With Global_pooling: true; only pad = 0 and stride = 1"; + // Setup kernel stride dimensions + kstride_.Reshape(size_shape); + int* kstride_data = kstride_.mutable_cpu_data(); + if (pool_param.has_kstride_h() || pool_param.has_kstride_w()) { + CHECK_EQ(num_spatial_axes_, 2) + << "kstride_h & kstride_w can only be used for 2D convolution."; + CHECK_EQ(0, pool_param.kstride_size()) + << "Etiher kstride or kstirde_h/w should be specified; not both."; + kstride_data[0] = pool_param.pad_h(); + kstride_data[1] = pool_param.pad_w(); + } else { + const int num_kstride_dims = pool_param.kstride_size(); + CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || + num_kstride_dims == num_spatial_axes_) + << "kstride must be specified once, or once per spatial dimension " + << "(kstride specified " << num_kstride_dims << " times; " + << num_spatial_axes_ << " spatial dims);"; + const int kDefaultKstride = 1; + for (int i = 0; i < num_spatial_axes_; ++i) { + kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : + pool_param.kstride((num_kstride_dims == 1) ? 0 : i); + } } - if (pad_h_ != 0 || pad_w_ != 0) { - CHECK(this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_AVE - || this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) - << "Padding implemented only for average and max pooling."; - CHECK_LT(pad_h_, kernel_h_); - CHECK_LT(pad_w_, kernel_w_); + + // Different 2D and ND im2col/col2im kernels for strided kernels + use_skernel_ = false; + for (int i = 0; i < num_spatial_axes_; ++i) { + use_skernel_ |= (kstride_data[i] != 1); + if (use_skernel_) { + break; + } } } + template void PoolingLayer::Reshape(const vector*>& bottom, const vector*>& top) { - CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " - << "corresponding to (num, channels, height, width)"; - channels_ = bottom[0]->channels(); - height_ = bottom[0]->height(); - width_ = bottom[0]->width(); + vector size_shape(1, num_spatial_axes_); + + size_.Reshape(size_shape); + pooled_size_.Reshape(size_shape); + ext_kernel_shape_.Reshape(size_shape); + int* size_data = size_.mutable_cpu_data(); + int* pooled_size_data = pooled_size_.mutable_cpu_data(); + int* ext_kernel_shape_data = ext_kernel_shape_.mutable_cpu_data(); + int* kstride_data = kstride_.mutable_cpu_data(); + int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + int* pad_data = pad_.mutable_cpu_data(); + int* stride_data = stride_.mutable_cpu_data(); + if (global_pooling_) { - kernel_h_ = bottom[0]->height(); - kernel_w_ = bottom[0]->width(); - } - pooled_height_ = static_cast(ceil(static_cast( - height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; - pooled_width_ = static_cast(ceil(static_cast( - width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; - if (pad_h_ || pad_w_) { - // If we have padding, ensure that the last pooling starts strictly - // inside the image (instead of at the padding); otherwise clip the last. - if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) { - --pooled_height_; + for (int i = 0; i < num_spatial_axes_; ++i) { + kernel_shape_data[i] = size_data[i]; } - if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) { - --pooled_width_; - } - CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); - CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } - top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + + vector top_shape = bottom[0]->shape(); + for (int i = 0; i < num_spatial_axes_; ++i) { + size_data[i] = bottom[0]->shape(channel_axis_ + 1 + i); + ext_kernel_shape_data[i] = (kernel_shape_data[i] - 1) * kstride_data[i] + 1; + pooled_size_data[i] = static_cast(ceil( + static_cast(size_data[i] + 2 * pad_data[i] + - ext_kernel_shape_data[i]) / stride_data[i])) + 1; + top_shape[channel_axis_ + 1 + i] = pooled_size_data[i]; + } + top[0]->Reshape(top_shape); if (top.size() > 1) { top[1]->ReshapeLike(*top[0]); } + // If max pooling, we will initialize the vector index part. - if (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX && top.size() == 1) { - max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + if (this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX && top.size() == 1) { + max_idx_.Reshape(top_shape); } + // If stochastic pooling, we will initialize the random index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_STOCHASTIC) { - rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); + rand_idx_.Reshape(top_shape); } } -// TODO(Yangqing): Is there a faster way to do pooling in the channel-first -// case? template void PoolingLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { + + int kernel_h_ = kernel_shape_.cpu_data()[0]; + int kernel_w_ = kernel_shape_.cpu_data()[1]; + int stride_h_ = stride_.cpu_data()[0]; + int stride_w_ = stride_.cpu_data()[1]; + int pad_h_ = pad_.cpu_data()[0]; + int pad_w_ = pad_.cpu_data()[1]; + int height_ = size_.cpu_data()[0]; + int width_ = size_.cpu_data()[1]; + int pooled_height_ = pooled_size_.cpu_data()[0]; + int pooled_width_ = pooled_size_.cpu_data()[1]; + + const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const int top_count = top[0]->count(); @@ -234,6 +331,18 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, template void PoolingLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { + + int kernel_h_ = kernel_shape_.cpu_data()[0]; + int kernel_w_ = kernel_shape_.cpu_data()[1]; + int stride_h_ = stride_.cpu_data()[0]; + int stride_w_ = stride_.cpu_data()[1]; + int pad_h_ = pad_.cpu_data()[0]; + int pad_w_ = pad_.cpu_data()[1]; + int height_ = size_.cpu_data()[0]; + int width_ = size_.cpu_data()[1]; + int pooled_height_ = pooled_size_.cpu_data()[0]; + int pooled_width_ = pooled_size_.cpu_data()[1]; + if (!propagate_down[0]) { return; } @@ -311,11 +420,10 @@ void PoolingLayer::Backward_cpu(const vector*>& top, } } - #ifdef CPU_ONLY STUB_GPU(PoolingLayer); #endif INSTANTIATE_CLASS(PoolingLayer); -} // namespace caffe +} // namespace caffe diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index ddce6a65eb7..d0003f6c846 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -6,6 +6,11 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif // USE_GREENTEA + namespace caffe { #ifdef USE_CUDA @@ -163,154 +168,7 @@ __global__ void StoPoolForwardTest(const int nthreads, top_data[index] = cumvalues / cumsum; } } -#endif // USE_CUDA -template -void PoolingLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - int count = top[0]->count(); - // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top.size() > 1; - int* mask = NULL; - Dtype* top_mask = NULL; - - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, - mask, top_mask); - break; - case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - if (this->phase_ == TRAIN) { - // We need to create the random index as well. - caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), - rand_idx_.mutable_gpu_data()); - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, - rand_idx_.mutable_gpu_data(), top_data); - } else { - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, top_data); - } - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; - } - } - CUDA_POST_KERNEL_CHECK; -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: { - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_forward")); - viennacl::ocl::enqueue( - oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - WrapHandle((cl_mem) top_data, &ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, &ctx), - WrapHandle((cl_mem) top_mask, &ctx)), - ctx.get_queue()); - } - break; - case PoolingParameter_PoolMethod_AVE: { - viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("ave_pool_forward")); - viennacl::ocl::enqueue( - oclk_ave_pool_forward(count, - WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - WrapHandle((cl_mem)top_data, &ctx)), - ctx.get_queue()); - } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: { - if (this->phase_ == caffe::TRAIN) { - // We need to create the random index as well. - greentea_gpu_rng_uniform(this->device_context_->id(), count, - Dtype(0), Dtype(1), - (cl_mem)(rand_idx_.mutable_gpu_data()), 0); - - viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("sto_pool_forward_train")); - viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, - WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, - stride_h_, stride_w_, - WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx), - WrapHandle((cl_mem)top_data, &ctx)), - ctx.get_queue()); - } else { - viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("sto_pool_forward_test")); - viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, - WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, - height_, width_, pooled_height_, - pooled_width_, kernel_h_, kernel_w_, - stride_h_, stride_w_, WrapHandle((cl_mem)top_data, &ctx)), - ctx.get_queue()); - } - } - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; - } - } -#endif // USE_GREENTEA - } -} - -#ifdef USE_CUDA template __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, @@ -433,15 +291,770 @@ __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, bottom_diff[index] = gradient; } } + +template +__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* top_data, int* mask, Dtype* top_mask) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height); + int wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + bottom_data += (n * channels + c) * height * width; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + if (bottom_data[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data[maxidx]; + } + } + } + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +template +__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h - pad_h; + int wstart = pw * stride_w - pad_w; + int hend = min(hstart + ext_kernel_h, height + pad_h); + int wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + bottom_data += (n * channels + c) * height * width; + int pool_size = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data[h * width + w]; + ++pool_size; + } + } + top_data[index] = aveval / pool_size; + } +} + +template +__global__ void StoPoolForwardTrain(const int nthreads, + const Dtype* bottom_data, const int num, + const int channels, const int height, + const int width, const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, + const int ext_kernel_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, Dtype* rand_idx, + Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + Dtype cumsum = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data[h * width + w]; + return; + } + } + } + } +} + +template +__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, + const int height, const int width, + const int pooled_height, + const int pooled_width, const int kernel_h, + const int kernel_w, const int ext_kernel_h, + const int ext_kernel_w, const int stride_h, + const int stride_w, const int kstride_h, + const int kstride_w, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + ext_kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + ext_kernel_w, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; h += kstride_h) { + for (int w = wstart; w < wend; w += kstride_w) { + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + +template +__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, + const int* mask, const Dtype* top_mask, + const int num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, + const int ext_kernel_h, const int ext_kernel_w, + const int stride_h, const int stride_w, + const int kstride_h, const int kstride_w, + const int pad_h, const int pad_w, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int pooled_height_1 = pooled_height - 1; + int pooled_width_1 = pooled_width - 1; + int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int phend = + (h >= pooled_height) ? + pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; + int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int pwend = + (w >= pooled_width) ? + pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff += offset; + if (mask) { + mask += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } else { + top_mask += offset; + for (int ph = phstart; ph <= phend; ph += kstride_h) { + for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +template +__global__ void MaxPoolNDForward(const int n, const int num_axes, + const Dtype* bottom_data, + const int channels, const int* size, + const int* pooled_size, const int* kernel_size, + const int* ext_kernel_size, const int* stride, + const int* kstride, const int* pad, + Dtype* top_data, int* mask, Dtype* top_mask) { + int d_idx[6]; // NOLINT(runtime/arrays) + int d_start[6]; // NOLINT(runtime/arrays) + int d_end[6]; // NOLINT(runtime/arrays) + int d_iter[6]; // NOLINT(runtime/arrays) + int i; + + CUDA_KERNEL_LOOP(index, n) { + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = index % pooled_size[i]; + d_start[i] = d_idx[i] * stride[i] - pad[i]; + d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); + d_start[i] = max(d_start[i], 0); + num /= pooled_size[i]; + offset *= size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] >= d_end[i]) { + top_data[index] = -FLT_MAX; + if (mask) { + mask[index] = -1; + } else { + top_mask[index] = -1; + } + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype maxval = -FLT_MAX; + int maxidx = -1; + int final_offset = 0; + + bool incremented; + do { + final_offset = offset; + int size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * size_prod; + size_prod *= size[i]; + } + + if (bottom_data[final_offset] > maxval) { + maxidx = final_offset; + maxval = bottom_data[maxidx]; + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] >= d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +template +__global__ void MaxPoolNDBackward(const int n, const int num_axes, + const Dtype* top_diff, const int* mask, + const Dtype* top_mask, + const int channels, const int* size, + const int* pooled_size, + const int* kernel_size, + const int* ext_kernel_size, const int* stride, + const int* kstride, const int* pad, + Dtype* bottom_diff) { + int d_idx[6]; // NOLINT(runtime/arrays) + int d_start[6]; // NOLINT(runtime/arrays) + int d_end[6]; // NOLINT(runtime/arrays) + int d_iter[6]; // NOLINT(runtime/arrays) + int i; + + CUDA_KERNEL_LOOP(index, n) { + // find out the local index + // find out the local offset + int offset = 1; + int num = index; + for (i = num_axes - 1; i >= 0; --i) { + d_idx[i] = num % size[i]; + d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % + kstride[i] : d_idx[i]; + num /= size[i]; + offset *= pooled_size[i]; + d_iter[i] = d_start[i]; + + if (d_start[i] > d_end[i]) { + bottom_diff[index] = 0; + return; + } + } + int chan = num % channels; + num /= channels; + offset *= (num * channels + chan); + + Dtype gradient = 0; + int final_offset = 0; + int im_offset = 0; + + bool incremented; + do { + final_offset = offset; + im_offset = 0; + int size_prod = 1; + int pooled_size_prod = 1; + for (i = num_axes - 1; i >= 0; --i) { + final_offset += d_iter[i] * pooled_size_prod; + im_offset += d_idx[i] * size_prod; + size_prod *= size[i]; + pooled_size_prod *= pooled_size[i]; + } + + if (mask) { + if (mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } else { + if (top_mask[final_offset] == im_offset) { + gradient += top_diff[final_offset]; + } + } + + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + if (d_iter[i] > d_end[i] - kstride[i]) { + d_iter[i] = d_start[i]; + } else { + d_iter[i] += kstride[i]; + incremented = true; + break; + } + } + } while (incremented); + bottom_diff[index] = gradient; + } +} +#endif // USE_CUDA + + + +template +void PoolingLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int count = top[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + int* mask = NULL; + Dtype* top_mask = NULL; + + if (this->device_context_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + + if(num_spatial_axes_ == 2) { + + int kernel_h_ = kernel_shape_.cpu_data()[0]; + int kernel_w_ = kernel_shape_.cpu_data()[1]; + int stride_h_ = stride_.cpu_data()[0]; + int stride_w_ = stride_.cpu_data()[1]; + int pad_h_ = pad_.cpu_data()[0]; + int pad_w_ = pad_.cpu_data()[1]; + int kstride_h_ = kstride_.cpu_data()[0]; + int kstride_w_ = kstride_.cpu_data()[1]; + int height_ = size_.cpu_data()[0]; + int width_ = size_.cpu_data()[1]; + int pooled_height_ = pooled_size_.cpu_data()[0]; + int pooled_width_ = pooled_size_.cpu_data()[1]; + int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + + // 2D case + if(use_skernel_) { + // 2D-SK case + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, top_data, + mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == caffe::TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + rand_idx_.mutable_gpu_data(), top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, top_data); + } + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + CUDA_POST_KERNEL_CHECK; + } else { + // 2D case + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, + mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, + rand_idx_.mutable_gpu_data(), top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, top_data); + } + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + CUDA_POST_KERNEL_CHECK; + } + } else { + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolNDForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, num_spatial_axes_, bottom_data, + channels_, size_.gpu_data(), pooled_size_.gpu_data(), + kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), + top_data, mask, top_mask); + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + } + CUDA_POST_KERNEL_CHECK; + #endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + if(num_spatial_axes_ == 2) { + int kernel_h_ = kernel_shape_.cpu_data()[0]; + int kernel_w_ = kernel_shape_.cpu_data()[1]; + int stride_h_ = stride_.cpu_data()[0]; + int stride_w_ = stride_.cpu_data()[1]; + int pad_h_ = pad_.cpu_data()[0]; + int pad_w_ = pad_.cpu_data()[1]; + int kstride_h_ = kstride_.cpu_data()[0]; + int kstride_w_ = kstride_.cpu_data()[1]; + int height_ = size_.cpu_data()[0]; + int width_ = size_.cpu_data()[1]; + int pooled_height_ = pooled_size_.cpu_data()[0]; + int pooled_width_ = pooled_size_.cpu_data()[1]; + int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + + // 2D case + if(use_skernel_) { + // 2D-SK case + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_forward_sk")); + viennacl::ocl::enqueue( + oclk_max_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), + bottom[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapHandle((cl_mem) top_data, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx)), + ctx.get_queue()); + } + break; + case PoolingParameter_PoolMethod_AVE: { + viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("ave_pool_forward_sk")); + viennacl::ocl::enqueue( + oclk_ave_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, WrapHandle((cl_mem)top_data, &ctx)), + ctx.get_queue()); + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: { + if (this->phase_ == caffe::TRAIN) { + // We need to create the random index as well. + greentea_gpu_rng_uniform(this->device_context_->id(), count, + Dtype(0), Dtype(1), + (cl_mem)(rand_idx_.mutable_gpu_data()), 0); + + viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("sto_pool_forward_train_sk")); + viennacl::ocl::enqueue( + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx), + WrapHandle((cl_mem)(top_data), &ctx)), + ctx.get_queue()); + } else { + viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("sto_pool_forward_test_sk")); + viennacl::ocl::enqueue( + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + WrapHandle((cl_mem)top_data, &ctx)), + ctx.get_queue()); + } + } + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + } else { + // 2D case + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_forward")); + viennacl::ocl::enqueue( + oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), + bottom[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + WrapHandle((cl_mem) top_data, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx)), + ctx.get_queue()); + } + break; + case PoolingParameter_PoolMethod_AVE: { + viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("ave_pool_forward")); + viennacl::ocl::enqueue( + oclk_ave_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + WrapHandle((cl_mem)top_data, &ctx)), + ctx.get_queue()); + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: { + if (this->phase_ == caffe::TRAIN) { + // We need to create the random index as well. + greentea_gpu_rng_uniform(this->device_context_->id(), count, + Dtype(0), Dtype(1), + (cl_mem)(rand_idx_.mutable_gpu_data()), 0); + + viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("sto_pool_forward_train")); + viennacl::ocl::enqueue( + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, + stride_h_, stride_w_, + WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx), + WrapHandle((cl_mem)top_data, &ctx)), + ctx.get_queue()); + } else { + viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("sto_pool_forward_test")); + viennacl::ocl::enqueue( + oclk_sto_pool_forward(count, + WrapHandle((cl_mem)bottom_data, &ctx), + bottom[0]->num(), channels_, + height_, width_, pooled_height_, + pooled_width_, kernel_h_, kernel_w_, + stride_h_, stride_w_, WrapHandle((cl_mem)top_data, &ctx)), + ctx.get_queue()); + } + } + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + } + } else { + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_forward_nd")); + viennacl::ocl::enqueue( + oclk_max_pool_forward(count, num_spatial_axes_, + WrapHandle((cl_mem)bottom_data, &ctx), + channels_, + WrapHandle((cl_mem)(size_.gpu_data()), &ctx), + WrapHandle((cl_mem)(pooled_size_.gpu_data()), &ctx), + WrapHandle((cl_mem)(kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem)(ext_kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem)(stride_.gpu_data()), &ctx), + WrapHandle((cl_mem)(kstride_.gpu_data()), &ctx), + WrapHandle((cl_mem)(pad_.gpu_data()), &ctx), + WrapHandle((cl_mem)top_data, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem)mask, &ctx), + WrapHandle((cl_mem)top_mask, &ctx)), + ctx.get_queue()); + } + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + } + +#endif // USE_GREENTEA + } +} template void PoolingLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { - return; - } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); @@ -453,106 +1066,250 @@ void PoolingLayer::Backward_gpu(const vector*>& top, if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(count, Dtype(0.), bottom_diff); - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->gpu_data(); + + if(num_spatial_axes_ == 2) { + + int kernel_h_ = kernel_shape_.cpu_data()[0]; + int kernel_w_ = kernel_shape_.cpu_data()[1]; + int stride_h_ = stride_.cpu_data()[0]; + int stride_w_ = stride_.cpu_data()[1]; + int pad_h_ = pad_.cpu_data()[0]; + int pad_w_ = pad_.cpu_data()[1]; + int kstride_h_ = kstride_.cpu_data()[0]; + int kstride_w_ = kstride_.cpu_data()[1]; + int height_ = size_.cpu_data()[0]; + int width_ = size_.cpu_data()[1]; + int pooled_height_ = pooled_size_.cpu_data()[0]; + int pooled_width_ = pooled_size_.cpu_data()[1]; + int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + + if(use_skernel_) { + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + bottom_diff); + break; + default: + LOG(FATAL)<< + "Unknown or unsupported pooling method in Backward_gpu()."; + } + CUDA_POST_KERNEL_CHECK; } else { - mask = max_idx_.gpu_data(); + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + bottom_diff); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, top_diff, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, rand_idx_.gpu_data(), top_diff, + top[0]->num(), channels_, height_, width_, pooled_height_, + pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, + bottom_diff); + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + CUDA_POST_KERNEL_CHECK; } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, top_diff, mask, top_mask, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, - bottom_diff); - break; - case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, top_diff, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, rand_idx_.gpu_data(), top_diff, - top[0]->num(), channels_, height_, width_, pooled_height_, - pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, - bottom_diff); - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; - } + } else { + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolNDBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, num_spatial_axes_, top_diff, mask, top_mask, + channels_, size_.gpu_data(), pooled_size_.gpu_data(), + kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), + bottom_diff); + break; + default: + LOG(FATAL)<< + "Unknown or unsupported pooling method in Backward_gpu()."; + } + CUDA_POST_KERNEL_CHECK; } - CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_context_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_context_->id()); + + greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), + (cl_mem) bottom_diff, 0); + + if(num_spatial_axes_ == 2) { - greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), - (cl_mem) bottom_diff, 0); - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: { - if (use_top_mask) { - top_mask = top[1]->gpu_data(); + int kernel_h_ = kernel_shape_.cpu_data()[0]; + int kernel_w_ = kernel_shape_.cpu_data()[1]; + int stride_h_ = stride_.cpu_data()[0]; + int stride_w_ = stride_.cpu_data()[1]; + int pad_h_ = pad_.cpu_data()[0]; + int pad_w_ = pad_.cpu_data()[1]; + int kstride_h_ = kstride_.cpu_data()[0]; + int kstride_w_ = kstride_.cpu_data()[1]; + int height_ = size_.cpu_data()[0]; + int width_ = size_.cpu_data()[1]; + int pooled_height_ = pooled_size_.cpu_data()[0]; + int pooled_width_ = pooled_size_.cpu_data()[1]; + int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + + if(use_skernel_) { + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_backward_sk")); + viennacl::ocl::enqueue( + oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx), + top[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, ext_kernel_h, ext_kernel_w, + stride_h_, stride_w_, kstride_h_, kstride_w_, + pad_h_, pad_w_, + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); + } + break; + default: + LOG(FATAL)<< + "Unknown or unsupported pooling method in Backward_gpu()."; + } } else { - mask = max_idx_.gpu_data(); + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_backward")); + viennacl::ocl::enqueue( + oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + mask == NULL ? 0 : 1, + WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx), + top[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, + pad_w_, + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); + } + break; + case PoolingParameter_PoolMethod_AVE: { + viennacl::ocl::kernel &oclk_ave_pool_backward = program.get_kernel( + CL_KERNEL_SELECT("ave_pool_backward")); + viennacl::ocl::enqueue( + oclk_ave_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + top[0]->num(), channels_, height_, width_, + pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, + pad_w_, + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: { + viennacl::ocl::kernel &oclk_sto_pool_backward = program.get_kernel( + CL_KERNEL_SELECT("sto_pool_backward")); + viennacl::ocl::enqueue( + oclk_sto_pool_backward( + count, WrapHandle((cl_mem) (rand_idx_.gpu_data()), &ctx), + WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); + } + break; + default: { + LOG(FATAL)<< "Unknown pooling method."; + } + } + } + } else { + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: { + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_.gpu_data(); + } + viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( + CL_KERNEL_SELECT("max_pool_backward_nd")); + viennacl::ocl::enqueue( + oclk_max_pool_backward( + count, num_spatial_axes_, WrapHandle((cl_mem) top_diff, &ctx), + mask == NULL ? 0 : 1, WrapHandle((cl_mem) mask, &ctx), + WrapHandle((cl_mem) top_mask, &ctx), channels_, + WrapHandle((cl_mem) (size_.gpu_data()), &ctx), + WrapHandle((cl_mem) (pooled_size_.gpu_data()), &ctx), + WrapHandle((cl_mem) (kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem) (ext_kernel_shape_.gpu_data()), &ctx), + WrapHandle((cl_mem) (stride_.gpu_data()), &ctx), + WrapHandle((cl_mem) (kstride_.gpu_data()), &ctx), + WrapHandle((cl_mem) (pad_.gpu_data()), &ctx), + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); + } + break; + default: + LOG(FATAL)<< "Unknown or unsupported pooling method in Backward_gpu()."; } - viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_backward")); - viennacl::ocl::enqueue( - oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, &ctx), - WrapHandle((cl_mem) top_mask, &ctx), - top[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, - pad_w_, - WrapHandle((cl_mem) bottom_diff, &ctx)), - ctx.get_queue()); - } - break; - case PoolingParameter_PoolMethod_AVE: { - viennacl::ocl::kernel &oclk_ave_pool_backward = program.get_kernel( - CL_KERNEL_SELECT("ave_pool_backward")); - viennacl::ocl::enqueue( - oclk_ave_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), - top[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, - pad_w_, - WrapHandle((cl_mem) bottom_diff, &ctx)), - ctx.get_queue()); - } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: { - viennacl::ocl::kernel &oclk_sto_pool_backward = program.get_kernel( - CL_KERNEL_SELECT("sto_pool_backward")); - viennacl::ocl::enqueue( - oclk_sto_pool_backward( - count, WrapHandle((cl_mem) (rand_idx_.gpu_data()), &ctx), - WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, - WrapHandle((cl_mem) bottom_diff, &ctx)), - ctx.get_queue()); - } - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; } - } -#endif +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/pooling_nd_layer.cpp b/src/caffe/layers/pooling_nd_layer.cpp deleted file mode 100644 index 38f50a46554..00000000000 --- a/src/caffe/layers/pooling_nd_layer.cpp +++ /dev/null @@ -1,186 +0,0 @@ -#include -#include -#include - -#include "caffe/common.hpp" -#include "caffe/layer.hpp" -#include "caffe/syncedmem.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -using std::min; -using std::max; - -template -void PoolingNDLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - // Set the max number of top blobs before calling base Layer::SetUp. - // If doing MAX pooling, we can optionally output an extra top Blob - // for the mask. Otherwise, we only have one top Blob. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) { - max_top_blobs_ = 2; - } else { - max_top_blobs_ = 1; - } - PoolingParameter pool_param = this->layer_param_.pooling_param(); - channel_axis_ = bottom[0]->CanonicalAxisIndex(pool_param.axis()); - channels_ = bottom[0]->shape(channel_axis_); - - const int first_spatial_axis = channel_axis_ + 1; - const int num_axes = bottom[0]->num_axes(); - num_spatial_axes_ = num_axes - first_spatial_axis; - CHECK_GE(num_spatial_axes_, 1); - vector size_shape(1, num_spatial_axes_); - - kernel_shape_.Reshape(size_shape); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); - - CHECK(!(pool_param.kernel_size_size() > 0) != - !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; - CHECK((pool_param.kernel_size_size() > 0) || - (pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "For non-square filters both kernel_h and kernel_w are required."; - - if (pool_param.has_kernel_h() && pool_param.has_kernel_w()) { - kernel_shape_data[0] = pool_param.kernel_h(); - kernel_shape_data[1] = pool_param.kernel_w(); - } else { - const int num_kernel_dims = pool_param.kernel_size_size(); - CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_); - for (int i = 0; i < num_spatial_axes_; ++i) { - kernel_shape_data[i] = - pool_param.kernel_size((num_kernel_dims == 1) ? 0 : i); - CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; - } - } - - // Setup stride dimensions (stride_). - stride_.Reshape(size_shape); - int* stride_data = stride_.mutable_cpu_data(); - if (pool_param.has_stride_h() || pool_param.has_stride_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "stride_h & stride_w can only be used for 2D convolution."; - CHECK_EQ(0, pool_param.stride_size()) - << "Either stride or stride_h/w should be specified; not both."; - stride_data[0] = pool_param.stride_h(); - stride_data[1] = pool_param.stride_w(); - } else { - const int num_stride_dims = pool_param.stride_size(); - CHECK(num_stride_dims == 0 || num_stride_dims == 1 || - num_stride_dims == num_spatial_axes_) - << "stride must be specified once, or once per spatial dimension " - << "(stride specified " << num_stride_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultStride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { - stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : - pool_param.stride((num_stride_dims == 1) ? 0 : i); - CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; - } - } - // Setup pad dimensions (pad_). - pad_.Reshape(size_shape); - int* pad_data = pad_.mutable_cpu_data(); - if (pool_param.has_pad_h() || pool_param.has_pad_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "pad_h & pad_w can only be used for 2D convolution."; - CHECK_EQ(0, pool_param.pad_size()) - << "Either pad or pad_h/w should be specified; not both."; - pad_data[0] = pool_param.pad_h(); - pad_data[1] = pool_param.pad_w(); - } else { - const int num_pad_dims = pool_param.pad_size(); - CHECK(num_pad_dims == 0 || num_pad_dims == 1 || - num_pad_dims == num_spatial_axes_) - << "pad must be specified once, or once per spatial dimension " - << "(pad specified " << num_pad_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultPad = 0; - for (int i = 0; i < num_spatial_axes_; ++i) { - pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : - pool_param.pad((num_pad_dims == 1) ? 0 : i); - } - } - // Setup kernel stride dimensions - kstride_.Reshape(size_shape); - int* kstride_data = kstride_.mutable_cpu_data(); - if (pool_param.has_kstride_h() || pool_param.has_kstride_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "kstride_h & kstride_w can only be used for 2D convolution."; - CHECK_EQ(0, pool_param.kstride_size()) - << "Etiher kstride or kstirde_h/w should be specified; not both."; - kstride_data[0] = pool_param.pad_h(); - kstride_data[1] = pool_param.pad_w(); - } else { - const int num_kstride_dims = pool_param.kstride_size(); - CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || - num_kstride_dims == num_spatial_axes_) - << "kstride must be specified once, or once per spatial dimension " - << "(kstride specified " << num_kstride_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultKstride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { - kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : - pool_param.kstride((num_kstride_dims == 1) ? 0 : i); - } - } - - size_.Reshape(size_shape); - pooled_size_.Reshape(size_shape); - ext_kernel_shape_.Reshape(size_shape); - int* size_data = size_.mutable_cpu_data(); - int* pooled_size_data = pooled_size_.mutable_cpu_data(); - int* ext_kernel_shape_data = ext_kernel_shape_.mutable_cpu_data(); - - vector top_shape = bottom[0]->shape(); - for (int i = 0; i < num_spatial_axes_; ++i) { - size_data[i] = bottom[0]->shape(channel_axis_ + 1 + i); - ext_kernel_shape_data[i] = (kernel_shape_data[i] - 1) * kstride_data[i] + 1; - pooled_size_data[i] = static_cast(ceil( - static_cast(size_data[i] + 2 * pad_data[i] - - ext_kernel_shape_data[i]) / stride_data[i])) + 1; - top_shape[channel_axis_ + 1 + i] = pooled_size_data[i]; - } - top[0]->Reshape(top_shape); - if (top.size() > 1) { - top[1]->ReshapeLike(*top[0]); - } - // If max pooling, we will initialize the vector index part. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX && top.size() == 1) { - max_idx_.Reshape(top_shape); - } -} - -template -void PoolingNDLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - LayerSetUp(bottom, top); -} - -template -void PoolingNDLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { - LOG(FATAL)<< "Forward_cpu() not implemented in PoolingNDLayer."; -} - -template -void PoolingNDLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { - LOG(FATAL)<< "Backward_cpu() not implemented in PoolingNDLayer."; - return; -} - -#ifdef CPU_ONLY -STUB_GPU(PoolingNDLayer); -#endif - -INSTANTIATE_CLASS(PoolingNDLayer); -REGISTER_LAYER_CLASS(PoolingND); - -} // namespace caffe diff --git a/src/caffe/layers/pooling_nd_layer.cu b/src/caffe/layers/pooling_nd_layer.cu deleted file mode 100644 index 96b9144efcf..00000000000 --- a/src/caffe/layers/pooling_nd_layer.cu +++ /dev/null @@ -1,343 +0,0 @@ -#include -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#endif // USE_GREENTEA - -namespace caffe { - -#ifdef USE_CUDA -template -__global__ void MaxPoolNDForward(const int n, const int num_axes, - const Dtype* bottom_data, - const int channels, const int* size, - const int* pooled_size, const int* kernel_size, - const int* ext_kernel_size, const int* stride, - const int* kstride, const int* pad, - Dtype* top_data, int* mask, Dtype* top_mask) { - int d_idx[6]; // NOLINT(runtime/arrays) - int d_start[6]; // NOLINT(runtime/arrays) - int d_end[6]; // NOLINT(runtime/arrays) - int d_iter[6]; // NOLINT(runtime/arrays) - int i; - - CUDA_KERNEL_LOOP(index, n) { - int offset = 1; - int num = index; - for (i = num_axes - 1; i >= 0; --i) { - d_idx[i] = index % pooled_size[i]; - d_start[i] = d_idx[i] * stride[i] - pad[i]; - d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); - d_start[i] = max(d_start[i], 0); - num /= pooled_size[i]; - offset *= size[i]; - d_iter[i] = d_start[i]; - - if (d_start[i] >= d_end[i]) { - top_data[index] = -FLT_MAX; - if (mask) { - mask[index] = -1; - } else { - top_mask[index] = -1; - } - return; - } - } - int chan = num % channels; - num /= channels; - offset *= (num * channels + chan); - - Dtype maxval = -FLT_MAX; - int maxidx = -1; - int final_offset = 0; - - bool incremented; - do { - final_offset = offset; - int size_prod = 1; - for (i = num_axes - 1; i >= 0; --i) { - final_offset += d_iter[i] * size_prod; - size_prod *= size[i]; - } - - if (bottom_data[final_offset] > maxval) { - maxidx = final_offset; - maxval = bottom_data[maxidx]; - } - - incremented = false; - for (i = num_axes - 1; i >= 0; --i) { - if (d_iter[i] >= d_end[i] - kstride[i]) { - d_iter[i] = d_start[i]; - } else { - d_iter[i] += kstride[i]; - incremented = true; - break; - } - } - } while (incremented); - - top_data[index] = maxval; - if (mask) { - mask[index] = maxidx; - } else { - top_mask[index] = maxidx; - } - } -} -#endif // USE_CUDA - -template -void PoolingNDLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - int count = top[0]->count(); - // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top.size() > 1; - int* mask = NULL; - Dtype* top_mask = NULL; - - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolNDForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, num_spatial_axes_, bottom_data, - channels_, size_.gpu_data(), pooled_size_.gpu_data(), - kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), - stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), - top_data, mask, top_mask); - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; - } - } - CUDA_POST_KERNEL_CHECK; - -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: { - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_forward_nd")); - viennacl::ocl::enqueue( - oclk_max_pool_forward(count, num_spatial_axes_, - WrapHandle((cl_mem)bottom_data, &ctx), - channels_, - WrapHandle((cl_mem)(size_.gpu_data()), &ctx), - WrapHandle((cl_mem)(pooled_size_.gpu_data()), &ctx), - WrapHandle((cl_mem)(kernel_shape_.gpu_data()), &ctx), - WrapHandle((cl_mem)(ext_kernel_shape_.gpu_data()), &ctx), - WrapHandle((cl_mem)(stride_.gpu_data()), &ctx), - WrapHandle((cl_mem)(kstride_.gpu_data()), &ctx), - WrapHandle((cl_mem)(pad_.gpu_data()), &ctx), - WrapHandle((cl_mem)top_data, &ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem)mask, &ctx), - WrapHandle((cl_mem)top_mask, &ctx)), - ctx.get_queue()); - } - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; - } - } -#endif // USE_GREENTEA - } -} - -#ifdef USE_CUDA -template -__global__ void MaxPoolNDBackward(const int n, const int num_axes, - const Dtype* top_diff, const int* mask, - const Dtype* top_mask, - const int channels, const int* size, - const int* pooled_size, - const int* kernel_size, - const int* ext_kernel_size, const int* stride, - const int* kstride, const int* pad, - Dtype* bottom_diff) { - int d_idx[6]; // NOLINT(runtime/arrays) - int d_start[6]; // NOLINT(runtime/arrays) - int d_end[6]; // NOLINT(runtime/arrays) - int d_iter[6]; // NOLINT(runtime/arrays) - int i; - - CUDA_KERNEL_LOOP(index, n) { - // find out the local index - // find out the local offset - int offset = 1; - int num = index; - for (i = num_axes - 1; i >= 0; --i) { - d_idx[i] = num % size[i]; - d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? - d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; - d_end[i] = (d_idx[i] >= pooled_size[i]) ? - (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % - kstride[i] : d_idx[i]; - num /= size[i]; - offset *= pooled_size[i]; - d_iter[i] = d_start[i]; - - if (d_start[i] > d_end[i]) { - bottom_diff[index] = 0; - return; - } - } - int chan = num % channels; - num /= channels; - offset *= (num * channels + chan); - - Dtype gradient = 0; - int final_offset = 0; - int im_offset = 0; - - bool incremented; - do { - final_offset = offset; - im_offset = 0; - int size_prod = 1; - int pooled_size_prod = 1; - for (i = num_axes - 1; i >= 0; --i) { - final_offset += d_iter[i] * pooled_size_prod; - im_offset += d_idx[i] * size_prod; - size_prod *= size[i]; - pooled_size_prod *= pooled_size[i]; - } - - if (mask) { - if (mask[final_offset] == im_offset) { - gradient += top_diff[final_offset]; - } - } else { - if (top_mask[final_offset] == im_offset) { - gradient += top_diff[final_offset]; - } - } - - incremented = false; - for (i = num_axes - 1; i >= 0; --i) { - if (d_iter[i] > d_end[i] - kstride[i]) { - d_iter[i] = d_start[i]; - } else { - d_iter[i] += kstride[i]; - incremented = true; - break; - } - } - } while (incremented); - bottom_diff[index] = gradient; - } -} -#endif // USE_CUDA - -template -void PoolingNDLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); - // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top.size() > 1; - const int* mask = NULL; - const Dtype* top_mask = NULL; - - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_gpu_set(count, Dtype(0.), bottom_diff); - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolNDBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, num_spatial_axes_, top_diff, mask, top_mask, - channels_, size_.gpu_data(), pooled_size_.gpu_data(), - kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), - stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), - bottom_diff); - break; - default: - LOG(FATAL)<< - "Unknown or unsupported pooling method in Backward_gpu()."; - } - CUDA_POST_KERNEL_CHECK; -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - - greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), - (cl_mem) bottom_diff, 0); - - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: { - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); - } - viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_backward_nd")); - viennacl::ocl::enqueue( - oclk_max_pool_backward( - count, num_spatial_axes_, WrapHandle((cl_mem) top_diff, &ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, &ctx), - WrapHandle((cl_mem) top_mask, &ctx), channels_, - WrapHandle((cl_mem) (size_.gpu_data()), &ctx), - WrapHandle((cl_mem) (pooled_size_.gpu_data()), &ctx), - WrapHandle((cl_mem) (kernel_shape_.gpu_data()), &ctx), - WrapHandle((cl_mem) (ext_kernel_shape_.gpu_data()), &ctx), - WrapHandle((cl_mem) (stride_.gpu_data()), &ctx), - WrapHandle((cl_mem) (kstride_.gpu_data()), &ctx), - WrapHandle((cl_mem) (pad_.gpu_data()), &ctx), - WrapHandle((cl_mem) bottom_diff, &ctx)), - ctx.get_queue()); - } - break; - default: - LOG(FATAL)<< - "Unknown or unsupported pooling method in Backward_gpu()."; - } -#endif // USE_GREENTEA - } - } - -INSTANTIATE_LAYER_GPU_FUNCS(PoolingNDLayer); - -} // namespace caffe diff --git a/src/caffe/layers/pooling_sk_layer.cpp b/src/caffe/layers/pooling_sk_layer.cpp deleted file mode 100644 index 14d87b59586..00000000000 --- a/src/caffe/layers/pooling_sk_layer.cpp +++ /dev/null @@ -1,142 +0,0 @@ -#include -#include -#include - -#include "caffe/common.hpp" -#include "caffe/layer.hpp" -#include "caffe/syncedmem.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -using std::min; -using std::max; - -template -void PoolingSKLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - // Set the max number of top blobs before calling base Layer::SetUp. - // If doing MAX pooling, we can optionally output an extra top Blob - // for the mask. Otherwise, we only have one top Blob. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) { - max_top_blobs_ = 2; - } else { - max_top_blobs_ = 1; - } - PoolingParameter pool_param = this->layer_param_.pooling_param(); - CHECK(!(pool_param.kernel_size_size() > 0) != - !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; - CHECK((pool_param.kernel_size_size() > 0) || - (pool_param.has_kernel_h() && pool_param.has_kernel_w())) - << "For non-square filters both kernel_h and kernel_w are required."; - CHECK((!(pool_param.pad_size() > 0) && pool_param.has_pad_h() - && pool_param.has_pad_w()) - || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) - << "pad is pad OR pad_h and pad_w are required."; - CHECK((!(pool_param.stride_size() > 0) && pool_param.has_stride_h() - && pool_param.has_stride_w()) - || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) - << "Stride is stride OR stride_h and stride_w are required."; - if (pool_param.kernel_size_size() > 0) { - kernel_h_ = kernel_w_ = pool_param.kernel_size(0); - } else { - kernel_h_ = pool_param.kernel_h(); - kernel_w_ = pool_param.kernel_w(); - } - CHECK_GT(kernel_h_, 0)<< "Filter dimensions cannot be zero."; - CHECK_GT(kernel_w_, 0)<< "Filter dimensions cannot be zero."; - if (!pool_param.has_pad_h()) { - pad_h_ = pad_w_ = pool_param.pad_size() > 0 ? - pool_param.pad(0) : 0; - } else { - pad_h_ = pool_param.pad_h(); - pad_w_ = pool_param.pad_w(); - } - CHECK_EQ(pad_h_, 0); - CHECK_EQ(pad_w_, 0); - if (!pool_param.has_stride_h()) { - stride_h_ = stride_w_ = pool_param.stride_size() > 0 ? - pool_param.stride(0) : 1; - } else { - stride_h_ = pool_param.stride_h(); - stride_w_ = pool_param.stride_w(); - } - if (pad_h_ != 0 || pad_w_ != 0) { - CHECK(this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_AVE - || this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) - << "Padding implemented only for average and max pooling."; - CHECK_LT(pad_h_, kernel_h_); - CHECK_LT(pad_w_, kernel_w_); - } - if (!pool_param.has_kstride_h()) { - kstride_h_ = kstride_w_ = pool_param.kstride_size() > 0 ? - pool_param.kstride(0) : 1; - } else { - kstride_h_ = pool_param.kstride_h(); - kstride_w_ = pool_param.kstride_w(); - } - - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - - channels_ = bottom[0]->channels(); - height_ = bottom[0]->height(); - width_ = bottom[0]->width(); - pooled_height_ = static_cast(ceil( - static_cast(height_ + 2 * pad_h_ - ext_kernel_h) / stride_h_)) + 1; - pooled_width_ = static_cast(ceil( - static_cast(width_ + 2 * pad_w_ - ext_kernel_w) / stride_w_)) + 1; - - top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); - if (top.size() > 1) { - top[1]->ReshapeLike(*top[0]); - } - // If max pooling, we will initialize the vector index part. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX && top.size() == 1) { - max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); - } - // If stochastic pooling, we will initialize the random index part. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_STOCHASTIC) { - rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, - pooled_width_); - } -} - -template -void PoolingSKLayer::Reshape(const vector*>& bottom, - const vector*>& top) { - LayerSetUp(bottom, top); -} - -// TODO(Yangqing): Is there a faster way to do pooling in the channel-first -// case? -template -void PoolingSKLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { - LOG(FATAL)<< "Forward_cpu() not implemented in PoolingSKLayer."; -} - -template -void PoolingSKLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { - LOG(FATAL)<< "Backward_cpu() not implemented in PoolingSKLayer."; - return; -} - -#ifdef CPU_ONLY -STUB_GPU(PoolingSKLayer); -#endif - -INSTANTIATE_CLASS(PoolingSKLayer); -REGISTER_LAYER_CLASS(PoolingSK); - -} // namespace caffe diff --git a/src/caffe/layers/pooling_sk_layer.cu b/src/caffe/layers/pooling_sk_layer.cu deleted file mode 100644 index 60e5e224ef6..00000000000 --- a/src/caffe/layers/pooling_sk_layer.cu +++ /dev/null @@ -1,478 +0,0 @@ -#include -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#endif // USE_GREENTEA - -namespace caffe { - -#ifdef USE_CUDA -template -__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, - Dtype* top_data, int* mask, Dtype* top_mask) { - CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height); - int wend = min(wstart + ext_kernel_w, width); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - Dtype maxval = -FLT_MAX; - int maxidx = -1; - bottom_data += (n * channels + c) * height * width; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { - if (bottom_data[h * width + w] > maxval) { - maxidx = h * width + w; - maxval = bottom_data[maxidx]; - } - } - } - top_data[index] = maxval; - if (mask) { - mask[index] = maxidx; - } else { - top_mask[index] = maxidx; - } - } -} - -template -__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, - Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height + pad_h); - int wend = min(wstart + ext_kernel_w, width + pad_w); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height); - wend = min(wend, width); - Dtype aveval = 0; - bottom_data += (n * channels + c) * height * width; - int pool_size = 0; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - aveval += bottom_data[h * width + w]; - ++pool_size; - } - } - top_data[index] = aveval / pool_size; - } -} - -template -__global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* bottom_data, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, Dtype* rand_idx, - Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + ext_kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + ext_kernel_w, width); - Dtype cumsum = 0.; - bottom_data += (n * channels + c) * height * width; - // First pass: get sum - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { - cumsum += bottom_data[h * width + w]; - } - } - float thres = rand_idx[index] * cumsum; - // Second pass: get value, and set index. - cumsum = 0; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { - cumsum += bottom_data[h * width + w]; - if (cumsum >= thres) { - rand_idx[index] = ((n * channels + c) * height + h) * width + w; - top_data[index] = bottom_data[h * width + w]; - return; - } - } - } - } -} - -template -__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + ext_kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + ext_kernel_w, width); - // We set cumsum to be 0 to avoid divide-by-zero problems - Dtype cumsum = FLT_MIN; - Dtype cumvalues = 0.; - bottom_data += (n * channels + c) * height * width; - // First pass: get sum - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { - cumsum += bottom_data[h * width + w]; - cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; - } - } - top_data[index] = cumvalues / cumsum; - } -} -#endif // USE_CUDA - -template -void PoolingSKLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - int count = top[0]->count(); - // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top.size() > 1; - int* mask = NULL; - Dtype* top_mask = NULL; - - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, top_data, - mask, top_mask); - break; - case PoolingParameter_PoolMethod_AVE: - // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, top_data); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - if (this->phase_ == caffe::TRAIN) { - // We need to create the random index as well. - caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), - rand_idx_.mutable_gpu_data()); - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - rand_idx_.mutable_gpu_data(), top_data); - } else { - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, top_data); - } - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; - } - } - CUDA_POST_KERNEL_CHECK; - -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: { - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_forward_sk")); - viennacl::ocl::enqueue( - oclk_max_pool_forward(count, - WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - WrapHandle((cl_mem) top_data, &ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, &ctx), - WrapHandle((cl_mem) top_mask, &ctx)), - ctx.get_queue()); - } - break; - case PoolingParameter_PoolMethod_AVE: { - viennacl::ocl::kernel &oclk_ave_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("ave_pool_forward_sk")); - viennacl::ocl::enqueue( - oclk_ave_pool_forward(count, - WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, WrapHandle((cl_mem)top_data, &ctx)), - ctx.get_queue()); - } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: { - if (this->phase_ == caffe::TRAIN) { - // We need to create the random index as well. - greentea_gpu_rng_uniform(this->device_context_->id(), count, - Dtype(0), Dtype(1), - (cl_mem)(rand_idx_.mutable_gpu_data()), 0); - - viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("sto_pool_forward_train_sk")); - viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, - WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - WrapHandle((cl_mem)(rand_idx_.mutable_gpu_data()), &ctx), - WrapHandle((cl_mem)(top_data), &ctx)), - ctx.get_queue()); - } else { - viennacl::ocl::kernel &oclk_sto_pool_forward = program.get_kernel( - CL_KERNEL_SELECT("sto_pool_forward_test_sk")); - viennacl::ocl::enqueue( - oclk_sto_pool_forward(count, - WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - WrapHandle((cl_mem)top_data, &ctx)), - ctx.get_queue()); - } - } - break; - default: { - LOG(FATAL)<< "Unknown pooling method."; - } - } -#endif // USE_GREENTEA - } -} - -#ifdef USE_CUDA -template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, - Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { - // find out the local index - // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - - int pooled_height_1 = pooled_height - 1; - int pooled_width_1 = pooled_width - 1; - int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; - int phend = - (h >= pooled_height) ? - pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; - int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; - int pwend = - (w >= pooled_width) ? - pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; - - Dtype gradient = 0; - int offset = (n * channels + c) * pooled_height * pooled_width; - top_diff += offset; - if (mask) { - mask += offset; - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { - if (mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; - } - } - } - } else { - top_mask += offset; - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { - if (top_mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; - } - } - } - } - bottom_diff[index] = gradient; - } -} -#endif // USE_CUDA - -template -void PoolingSKLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, - const vector*>& bottom) { - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); - // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top.size() > 1; - const int* mask = NULL; - const Dtype* top_mask = NULL; - - int ext_kernel_h = (kernel_h_ - 1) * kstride_h_ + 1; - int ext_kernel_w = (kernel_w_ - 1) * kstride_w_ + 1; - - if (this->device_context_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_gpu_set(count, Dtype(0.), bottom_diff); - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, top_diff, mask, top_mask, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - bottom_diff); - break; - default: - LOG(FATAL)<< - "Unknown or unsupported pooling method in Backward_gpu()."; - } - CUDA_POST_KERNEL_CHECK; -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_->id()); - - greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), - (cl_mem) bottom_diff, 0); - - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: { - if (use_top_mask) { - top_mask = top[1]->gpu_data(); - } else { - mask = max_idx_.gpu_data(); - } - viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( - CL_KERNEL_SELECT("max_pool_backward_sk")); - viennacl::ocl::enqueue( - oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), - mask == NULL ? 0 : 1, - WrapHandle((cl_mem) mask, &ctx), - WrapHandle((cl_mem) top_mask, &ctx), - top[0]->num(), channels_, height_, width_, - pooled_height_, pooled_width_, kernel_h_, - kernel_w_, ext_kernel_h, ext_kernel_w, - stride_h_, stride_w_, kstride_h_, kstride_w_, - pad_h_, pad_w_, - WrapHandle((cl_mem) bottom_diff, &ctx)), - ctx.get_queue()); - } - break; - default: - LOG(FATAL)<< - "Unknown or unsupported pooling method in Backward_gpu()."; - } -#endif // USE_GREENTEA - } - } - -INSTANTIATE_LAYER_GPU_FUNCS(PoolingSKLayer); - -} // namespace caffe diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index bb7a73d95c3..159be8c7324 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -418,7 +418,8 @@ void P2PSync::run(const vector& gpus) { DevicePair::compute(gpus, &pairs); ostringstream s; for (int i = 1; i < pairs.size(); ++i) { - s << (i == 1 ? "" : ", ") << pairs[i].getParent() << ":" << pairs[i].getDevice(); + s << (i == 1 ? "" : ", ") << pairs[i].getParent() << ":" + << pairs[i].getDevice(); } LOG(INFO)<< "GPUs pairs " << s.str(); diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 85729895dd3..baff87c9bfd 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -1071,8 +1071,6 @@ message V1LayerParameter { TANH = 23; WINDOW_DATA = 24; THRESHOLD = 31; - CONVOLUTION_SK = 40; - POOLING_SK = 41; } optional LayerType type = 5; repeated BlobProto blobs = 6; diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index 32eec39ee49..dc362d02574 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -42,7 +42,7 @@ class DataTransformTest : public ::testing::Test { // Get crop sequence with Caffe seed 1701. DataTransformer* transformer = new DataTransformer(transform_param, phase, - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); const int crop_size = transform_param.crop_size(); Caffe::set_random_seed(seed_); transformer->InitRand(); @@ -95,7 +95,7 @@ TYPED_TEST(DataTransformTest, TestEmptyTransform) { Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -120,7 +120,7 @@ TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { Blob* blob = new Blob(1, 3, 4, 5); DataTransformer* transformer = new DataTransformer(transform_param, TEST, - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); @@ -146,7 +146,7 @@ TYPED_TEST(DataTransformTest, TestCropSize) { FillDatum(label, channels, height, width, unique_pixels, &datum); DataTransformer* transformer = new DataTransformer(transform_param, TEST, - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); transformer->InitRand(); Blob* blob = new Blob(1, channels, crop_size, crop_size); @@ -288,7 +288,7 @@ TYPED_TEST(DataTransformTest, TestMeanValue) { Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); for (int j = 0; j < blob->count(); ++j) { @@ -312,7 +312,7 @@ TYPED_TEST(DataTransformTest, TestMeanValues) { Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); for (int c = 0; c < channels; ++c) { @@ -353,7 +353,7 @@ TYPED_TEST(DataTransformTest, TestMeanFile) { Blob* blob = new Blob(1, channels, height, width); DataTransformer* transformer = new DataTransformer(transform_param, TEST, - Caffe::GetDefaultDeviceContext()); + Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); for (int j = 0; j < blob->count(); ++j) { diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 31e288e0489..16de869c0dd 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -16,19 +16,21 @@ namespace caffe { // Forward declare kernel functions #ifdef USE_CUDA -template +template __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, - Dtype* data_col); - -template -__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col); + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + Dtype* data_col); + +template +__global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, + const Dtype* data_im, const int* im_shape, + const int* col_shape, + const int* kernel_shape, const int* pad, + const int* stride, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; #endif // USE_CUDA diff --git a/src/caffe/test/test_pooling_nd_layer.cpp b/src/caffe/test/test_pooling_nd_layer.cpp index 7598f84e729..810b45305a7 100644 --- a/src/caffe/test/test_pooling_nd_layer.cpp +++ b/src/caffe/test/test_pooling_nd_layer.cpp @@ -64,7 +64,7 @@ class PoolingNDLayerTest : public GPUDeviceTest { pooling_param->set_axis(1); - PoolingNDLayer layer(layer_param); + PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); int d = blob_bottom_->shape(2); @@ -109,7 +109,7 @@ class PoolingNDLayerTest : public GPUDeviceTest { pooling_param->set_axis(1); - PoolingNDLayer layer(layer_param); + PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); int d = blob_bottom_->shape(2); @@ -166,19 +166,21 @@ TYPED_TEST_CASE(PoolingNDLayerTest, TestDtypes); TYPED_TEST(PoolingNDLayerTest, TestSetup) { LayerParameter layer_param; - PoolingParameter* Pooling_param = + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - Pooling_param->add_kernel_size(3); - Pooling_param->add_kernel_size(3); - Pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); - Pooling_param->add_kstride(2); - Pooling_param->add_kstride(2); - Pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - PoolingNDLayer layer(layer_param); + + PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(1, this->blob_top_->shape(2)); diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 6964c6dd061..8e0778e977c 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -602,7 +602,7 @@ void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int* pad, const int* stride, Dtype* data_col) { im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( - num_kernels, data_im, im_shape, col_shape, + num_kernels, num_spatial_axes, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); CUDA_POST_KERNEL_CHECK; } @@ -699,7 +699,7 @@ void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, Dtype* data_im) { col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( - im_size, data_col, im_shape, col_shape, + im_size, num_spatial_axes, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); CUDA_POST_KERNEL_CHECK; } diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 1dacbda0a04..6874827edb9 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -939,10 +939,6 @@ const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { return "WindowData"; case V1LayerParameter_LayerType_THRESHOLD: return "Threshold"; - case V1LayerParameter_LayerType_CONVOLUTION_SK: - return "ConvolutionSK"; - case V1LayerParameter_LayerType_POOLING_SK: - return "PoolingSK"; default: LOG(FATAL)<< "Unknown V1LayerParameter layer type: " << type; return ""; From d8d537353c0e23c01c068c80ad27ca3ae07830d1 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Fri, 25 Sep 2015 04:56:54 +0200 Subject: [PATCH 182/254] Grouped convolution fix. --- include/caffe/blob.hpp | 2 +- include/caffe/filler.hpp | 2 +- include/caffe/layer.hpp | 2 +- include/caffe/parallel.hpp | 4 +-- include/caffe/vision_layers.hpp | 49 ++++++++++++++++++++-------- src/caffe/blob.cpp | 2 +- src/caffe/layers/affinity_layer.cpp | 2 +- src/caffe/layers/base_conv_layer.cpp | 7 ++-- src/caffe/layers/base_data_layer.cpp | 8 ++--- src/caffe/layers/conv_layer.cpp | 4 ++- src/caffe/layers/embed_layer.cu | 10 +++--- src/caffe/layers/pooling_layer.cpp | 1 + src/caffe/layers/tile_layer.cu | 4 +-- src/caffe/parallel.cpp | 12 +++---- 14 files changed, 67 insertions(+), 42 deletions(-) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index c2009646af6..b5a1d4fb1ec 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -297,7 +297,7 @@ class Blob { /** * @brief Return the device context to which this blob and shared memory belongs */ - device *device_context(); + device *get_device(); protected: shared_ptr data_; diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index 5475fa326a2..c9ef243868d 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -92,7 +92,7 @@ class GaussianFiller : public Filler { Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); rand_vec_.reset( new SyncedMemory(blob->count() * sizeof(int), - blob->device_context())); + blob->get_device())); int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); for (int i = 0; i < blob->count(); ++i) { diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 3915ca210e3..cf97409c174 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -348,7 +348,7 @@ class Layer { /** * @brief Returns the device context this layer runs on */ - inline device *device_context() { + inline device *get_device() { return device_context_; } diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp index 56e8dda18b3..6a8aeecb38d 100644 --- a/include/caffe/parallel.hpp +++ b/include/caffe/parallel.hpp @@ -70,11 +70,11 @@ class DevicePair { device_(dev) { } - inline device* getParent() { + inline device* get_parent() { return parent_; } - inline device* getDevice() { + inline device* get_device() { return device_; } diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 41f1b290dcf..6caf08dcc94 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -276,24 +276,47 @@ class BaseConvolutionLayer : public Layer { stride_.cpu_data()[1], col_buff); } } else { - im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), col_buff); + if (this->use_skernel_) { + im2col_ndsk_gpu(data, num_spatial_axes_, num_kernels_im2col_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), col_buff); + } else { + im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), col_buff); + } } } inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_gpu(col_buff, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], data); + if (this->use_skernel_) { + col2im_sk_gpu(col_buff, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + kstride_.gpu_data()[0], kstride_.gpu_data()[1], data); + } else { + col2im_gpu(col_buff, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], data); + } } else { - col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - data); + if (this->use_skernel_) { + col2im_ndsk_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), data); + } else { + col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), data); + } } } #endif // USE_CUDA diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 6726de42169..d52daaf01ad 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -209,7 +209,7 @@ template<> unsigned int Blob::asum_data() const { } template -device *Blob::device_context() { +device *Blob::get_device() { return device_context_; } diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp index 48bdb5319fb..94e7c970ddc 100644 --- a/src/caffe/layers/affinity_layer.cpp +++ b/src/caffe/layers/affinity_layer.cpp @@ -44,7 +44,7 @@ void AffinityLayer::Reshape(const vector*>& bottom, top[bidx]->Reshape(1, 2, bottom[bidx]->height(), bottom[bidx]->width()); shared_ptr > blob_pointer( - new Blob(this->device_context())); + new Blob(this->get_device())); min_index_.push_back(blob_pointer); // 1, #edges, height, width diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 2e9a53b095a..a8d4c2d32c6 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -412,7 +412,7 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, for (int g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_context_->id(), CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, - conv_out_spatial_dim_, kernel_dim_ / group_, + conv_out_spatial_dim_, kernel_dim_, (Dtype) 1., (cl_mem) weights, weight_offset_ * g, (cl_mem) col_buff, (is_1x1_ ? input_off : 0) + col_offset_ * g, @@ -460,7 +460,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, for (int g = 0; g < group_; ++g) { caffe_gpu_gemm( CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, - conv_out_channels_, (Dtype) 1., weights + weight_offset_ * g, + conv_out_channels_ / group_, (Dtype) 1., weights + weight_offset_ * g, output + output_off + output_offset_ * g, (Dtype) 0., col_buff + (is_1x1_ ? input_off : 0) + col_offset_ * g); } @@ -472,8 +472,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, #ifdef USE_GREENTEA for (int g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_context_->id(), CblasTrans, - CblasNoTrans, kernel_dim_ / group_, - conv_out_spatial_dim_, + CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_ / group_, (Dtype) 1., (cl_mem) weights, weight_offset_ * g, (cl_mem) output, output_off + output_offset_ * g, diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index cdcc12ca8a2..194e9d6a281 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -67,7 +67,7 @@ void BasePrefetchingDataLayer::LayerSetUp( #endif DLOG(INFO) << "Initializing prefetch"; this->data_transformer_->InitRand(); - StartInternalThread(this->device_context()); + StartInternalThread(this->get_device()); DLOG(INFO) << "Prefetch initialized."; } @@ -77,7 +77,7 @@ void BasePrefetchingDataLayer::InternalThreadEntry() { #ifdef USE_CUDA cudaStream_t stream; if (Caffe::mode() == Caffe::GPU) { - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); } } @@ -91,7 +91,7 @@ void BasePrefetchingDataLayer::InternalThreadEntry() { #ifndef CPU_ONLY #ifdef USE_CUDA if (Caffe::mode() == Caffe::GPU) { - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { batch->data_.data().get()->async_gpu_push(stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } @@ -106,7 +106,7 @@ void BasePrefetchingDataLayer::InternalThreadEntry() { #ifndef CPU_ONLY #ifdef USE_CUDA if (Caffe::mode() == Caffe::GPU) { - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { CUDA_CHECK(cudaStreamDestroy(stream)); } } diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index fb50bb095ed..160df1551fd 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -13,11 +13,13 @@ void ConvolutionLayer::compute_output_shape() { const int* kernel_shape_data = this->kernel_shape_.cpu_data(); const int* stride_data = this->stride_.cpu_data(); const int* pad_data = this->pad_.cpu_data(); + const int* kstride_data = this->kstride_.cpu_data(); this->output_shape_.clear(); for (int i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis const int input_dim = this->input_shape(i + 1); - const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) + const int output_dim = (input_dim + 2 * pad_data[i] + - ((kernel_shape_data[i] - 1) * kstride_data[i] + 1)) / stride_data[i] + 1; this->output_shape_.push_back(output_dim); } diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu index fc88bfe0a59..6853ba301d2 100644 --- a/src/caffe/layers/embed_layer.cu +++ b/src/caffe/layers/embed_layer.cu @@ -53,7 +53,7 @@ void EmbedLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const int count = top[0]->count(); - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA EmbedForward // NOLINT_NEXT_LINE(whitespace/operators) @@ -81,7 +81,7 @@ void EmbedLayer::Forward_gpu(const vector*>& bottom, ctx.get_queue()); if (bias_term_) { - greentea_gpu_gemm(this->device_context()->id(), CblasNoTrans, + greentea_gpu_gemm(this->get_device()->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1), (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, @@ -101,7 +101,7 @@ void EmbedLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA EmbedBackward // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS)( @@ -127,14 +127,14 @@ void EmbedLayer::Backward_gpu(const vector*>& top, if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemv(CblasTrans, M_, N_, Dtype(1), top_diff, bias_multiplier_.gpu_data(), Dtype(1), bias_diff); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_gemv(this->device_context()->id(), CblasTrans, M_, N_, + greentea_gpu_gemv(this->get_device()->id(), CblasTrans, M_, N_, Dtype(1), (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, Dtype(1), (cl_mem) bias_diff, 0); diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index b4e918318da..327a2024503 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -44,6 +44,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, pool_param.has_kernel_h() || pool_param.has_kernel_w())) << "With Global_pooling: true Filter size cannot specified"; } else { + global_pooling_ = false; CHECK(!(pool_param.kernel_size_size() > 0) != !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; diff --git a/src/caffe/layers/tile_layer.cu b/src/caffe/layers/tile_layer.cu index 561e14bba72..33f29383082 100644 --- a/src/caffe/layers/tile_layer.cu +++ b/src/caffe/layers/tile_layer.cu @@ -34,7 +34,7 @@ void TileLayer::Forward_gpu( Dtype* top_data = top[0]->mutable_gpu_data(); const int bottom_tile_axis = bottom[0]->shape(axis_); const int nthreads = top[0]->count(); - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA Tile // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( @@ -87,7 +87,7 @@ void TileLayer::Backward_gpu(const vector*>& top, const int tile_size = inner_dim_ / bottom_tile_axis; const int nthreads = bottom[0]->count(); - if (this->device_context()->backend() == BACKEND_CUDA) { + if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA TileBackward // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)( diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index 159be8c7324..c7c36c34d3f 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -212,9 +212,9 @@ void DevicePair::compute(const vector devices, CHECK(pairs->size() == devices.size()); for (int i = 0; i < pairs->size(); ++i) { - CHECK((*pairs)[i].getParent() != (*pairs)[i].getDevice()); + CHECK((*pairs)[i].get_parent() != (*pairs)[i].get_device()); for (int j = i + 1; j < pairs->size(); ++j) { - CHECK((*pairs)[i].getDevice() != (*pairs)[j].getDevice()); + CHECK((*pairs)[i].get_device() != (*pairs)[j].get_device()); } } #else @@ -418,8 +418,8 @@ void P2PSync::run(const vector& gpus) { DevicePair::compute(gpus, &pairs); ostringstream s; for (int i = 1; i < pairs.size(); ++i) { - s << (i == 1 ? "" : ", ") << pairs[i].getParent() << ":" - << pairs[i].getDevice(); + s << (i == 1 ? "" : ", ") << pairs[i].get_parent() << ":" + << pairs[i].get_device(); } LOG(INFO)<< "GPUs pairs " << s.str(); @@ -435,13 +435,13 @@ void P2PSync::run(const vector& gpus) { P2PSync* sync = j == 0 ? this : syncs[j].get(); if (sync) { const SolverParameter& p = sync->solver()->param(); - if (p.device_id() == pairs[i].getParent()->list_id()) { + if (p.device_id() == pairs[i].get_parent()->list_id()) { parent = sync; } } } if (parent) { - param.set_device_id(pairs[i].getDevice()->list_id()); + param.set_device_id(pairs[i].get_device()->list_id()); syncs[i].reset(new P2PSync(solver_, parent, param)); parent->children_.push_back((P2PSync*) syncs[i].get()); } From 117b029c65b1655f782c32e8fdc63942ea4fedb9 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 26 Sep 2015 23:48:20 +0200 Subject: [PATCH 183/254] Removed temporary txt file. --- Greentea_Building_Blocks.txt | 25 ------------------------- 1 file changed, 25 deletions(-) delete mode 100644 Greentea_Building_Blocks.txt diff --git a/Greentea_Building_Blocks.txt b/Greentea_Building_Blocks.txt deleted file mode 100644 index 22aed3de73e..00000000000 --- a/Greentea_Building_Blocks.txt +++ /dev/null @@ -1,25 +0,0 @@ -GREENTEA BUILDING BLOCKS: - -viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_context_.id()); -viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_context_.id()); - -viennacl::ocl::kernel &oclk_kernel = program.get_kernel( - CL_KERNEL_SELECT("kernel")); -viennacl::ocl::enqueue( - oclk_kernel(WrapHandle((cl_mem) data, ctx)), - ctx.get_queue()); - -if (this->device_context_.backend() == BACKEND_CUDA) { -#ifdef USE_CUDA -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA -#endif // USE_GREENTEA -} - -#ifdef USE_GREENTEA -#include "caffe/greentea/greentea.hpp" -#include "caffe/greentea/greentea_math_functions.hpp" -#endif From 6afda54a3497ca4abbb4cdc520aad5d252819c0a Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 28 Sep 2015 05:20:29 +0200 Subject: [PATCH 184/254] Convolution bias fix. --- include/caffe/vision_layers.hpp | 13 +++++++------ src/caffe/layers/base_conv_layer.cpp | 2 +- src/caffe/layers/deconv_layer.cpp | 5 +++-- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 6caf08dcc94..79bc8d07936 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -278,17 +278,18 @@ class BaseConvolutionLayer : public Layer { } else { if (this->use_skernel_) { im2col_ndsk_gpu(data, num_spatial_axes_, num_kernels_im2col_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), kstride_.gpu_data(), col_buff); + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), col_buff); } else { im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), col_buff); + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), col_buff); } } } + inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { if (this->use_skernel_) { diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 4f141baf4fa..63a86fdb8e8 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -535,7 +535,7 @@ void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, const int input_off) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_gemv(CblasNoTrans, num_output_, conv_out_spatial_dim_, 1., + caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., input + input_off, bias_multiplier_.gpu_data(), 1., bias); #endif // USE_CUDA diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 91aabb315b2..4e6d6c878b0 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -8,17 +8,18 @@ namespace caffe { -template +template void DeconvolutionLayer::compute_output_shape() { const int* kernel_shape_data = this->kernel_shape_.cpu_data(); const int* stride_data = this->stride_.cpu_data(); const int* pad_data = this->pad_.cpu_data(); + const int* kstride_data = this->kstride_.cpu_data(); this->output_shape_.clear(); for (int i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis const int input_dim = this->input_shape(i + 1); const int output_dim = stride_data[i] * (input_dim - 1) - + kernel_shape_data[i] - 2 * pad_data[i]; + + ((kernel_shape_data[i] - 1) * kstride_data[i] + 1) - 2 * pad_data[i]; this->output_shape_.push_back(output_dim); } } From 04bce355d0baf8165673dab8d6d3d8f55186068e Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 28 Sep 2015 16:00:50 +0200 Subject: [PATCH 185/254] SPP/Pooling layer padding fix. --- src/caffe/layers/pooling_layer.cpp | 40 +++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 327a2024503..bb9ba6c541b 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -16,26 +16,30 @@ using std::max; template void PoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { + + PoolingParameter pool_param = this->layer_param_.pooling_param(); + // Set the max number of top blobs before calling base Layer::SetUp. // If doing MAX pooling, we can optionally output an extra top Blob // for the mask. Otherwise, we only have one top Blob. - if (this->layer_param_.pooling_param().pool() - == PoolingParameter_PoolMethod_MAX) { + if (pool_param.pool() == PoolingParameter_PoolMethod_MAX) { max_top_blobs_ = 2; } else { max_top_blobs_ = 1; } - PoolingParameter pool_param = this->layer_param_.pooling_param(); + channel_axis_ = bottom[0]->CanonicalAxisIndex(pool_param.axis()); channels_ = bottom[0]->shape(channel_axis_); const int first_spatial_axis = channel_axis_ + 1; const int num_axes = bottom[0]->num_axes(); num_spatial_axes_ = num_axes - first_spatial_axis; - CHECK_GE(num_spatial_axes_, 1); - vector size_shape(1, num_spatial_axes_); + CHECK_GE(num_spatial_axes_, 0); - kernel_shape_.Reshape(size_shape); + vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); + vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1)); + + kernel_shape_.Reshape(spatial_dim_blob_shape); int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); if (pool_param.global_pooling()) { @@ -65,7 +69,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } } - size_.Reshape(size_shape); + size_.Reshape(spatial_dim_blob_shape); int* size_data = size_.mutable_cpu_data(); vector top_shape = bottom[0]->shape(); @@ -84,7 +88,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } // Setup stride dimensions (stride_). - stride_.Reshape(size_shape); + stride_.Reshape(spatial_dim_blob_shape); int* stride_data = stride_.mutable_cpu_data(); if (pool_param.has_stride_h() || pool_param.has_stride_w()) { CHECK_EQ(num_spatial_axes_, 2) @@ -107,8 +111,9 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; } } + // Setup pad dimensions (pad_). - pad_.Reshape(size_shape); + pad_.Reshape(spatial_dim_blob_shape); int* pad_data = pad_.mutable_cpu_data(); if (pool_param.has_pad_h() || pool_param.has_pad_w()) { CHECK_EQ(num_spatial_axes_, 2) @@ -130,16 +135,17 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, pool_param.pad((num_pad_dims == 1) ? 0 : i); } } + // Setup kernel stride dimensions - kstride_.Reshape(size_shape); + kstride_.Reshape(spatial_dim_blob_shape); int* kstride_data = kstride_.mutable_cpu_data(); if (pool_param.has_kstride_h() || pool_param.has_kstride_w()) { CHECK_EQ(num_spatial_axes_, 2) << "kstride_h & kstride_w can only be used for 2D convolution."; CHECK_EQ(0, pool_param.kstride_size()) << "Etiher kstride or kstirde_h/w should be specified; not both."; - kstride_data[0] = pool_param.pad_h(); - kstride_data[1] = pool_param.pad_w(); + kstride_data[0] = pool_param.kstride_h(); + kstride_data[1] = pool_param.kstride_w(); } else { const int num_kstride_dims = pool_param.kstride_size(); CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || @@ -194,6 +200,16 @@ void PoolingLayer::Reshape(const vector*>& bottom, pooled_size_data[i] = static_cast(ceil( static_cast(size_data[i] + 2 * pad_data[i] - ext_kernel_shape_data[i]) / stride_data[i])) + 1; + if (pad_data[i] > 0) { + // If we have padding, ensure that the last pooling starts strictly + // inside the image (instead of at the padding); otherwise clip the last. + if ((pooled_size_data[i] - 1) * stride_data[i] + >= size_data[i] + pad_data[i]) { + --pooled_size_data[i]; + } + CHECK_LT((pooled_size_data[i] - 1) * stride_data[i], + size_data[i] + pad_data[i]); + } top_shape[channel_axis_ + 1 + i] = pooled_size_data[i]; } top[0]->Reshape(top_shape); From d06ee17299f0f1c83642d60aafab74974376abb4 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 2 Oct 2015 22:33:47 +0200 Subject: [PATCH 186/254] OpenCL convolution adapted. --- include/caffe/vision_layers.hpp | 124 +- log.txt | 1598 ++++++++++++++++++-- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/im2col_nd.cl | 36 +- src/caffe/greentea/greentea_im2col.cpp | 18 +- src/caffe/layers/pooling_layer.cpp | 6 +- src/caffe/layers/pooling_layer.cu | 52 +- src/caffe/util/im2col.cu | 2 - 8 files changed, 1650 insertions(+), 190 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 79bc8d07936..84d675f567d 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -323,22 +323,63 @@ class BaseConvolutionLayer : public Layer { #endif // USE_CUDA #ifdef USE_GREENTEA inline void greentea_conv_im2col_gpu(const Dtype* data, const int data_off, - Dtype* col_buff, - const int col_buff_off) { + Dtype* col_buff, const int col_buff_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); - greentea_im2col_gpu(&program, &ctx, (cl_mem) data, data_off, - conv_in_channels_, - conv_input_shape_.cpu_data()[1], - conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], - kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], - pad_.cpu_data()[1], stride_.cpu_data()[0], - stride_.cpu_data()[1], (cl_mem) col_buff, - col_buff_off); + + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { + if (this->use_skernel_) { + greentea_im2col_sk_gpu(&program, &ctx, (cl_mem) data, data_off, + conv_in_channels_, + conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], + stride_.cpu_data()[1], + kstride_.cpu_data()[0], + kstride_.cpu_data()[1], + (cl_mem) col_buff); + } else { + greentea_im2col_gpu(&program, &ctx, (cl_mem) data, data_off, + conv_in_channels_, + conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + (cl_mem) col_buff, col_buff_off); + } + } else { + if (this->use_skernel_) { + greentea_im2col_ndsk_gpu(&program, &ctx, (cl_mem) data, data_off, + num_spatial_axes_, num_kernels_im2col_, + (cl_mem) (conv_input_shape_.gpu_data()), + (cl_mem) (col_buffer_.gpu_shape()), + (cl_mem) (kernel_shape_.gpu_data()), + (cl_mem) (pad_.gpu_data()), + (cl_mem) (stride_.gpu_data()), + (cl_mem) (kstride_.gpu_data()), + (cl_mem) col_buff, col_buff_off); + } else { + greentea_im2col_nd_gpu(&program, &ctx, (cl_mem) data, data_off, + num_spatial_axes_, + 0, + num_kernels_im2col_, + (cl_mem) (conv_input_shape_.gpu_data()), + (cl_mem) (col_buffer_.gpu_shape()), + (cl_mem) (kernel_shape_.gpu_data()), + (cl_mem) (pad_.gpu_data()), + (cl_mem) (stride_.gpu_data()), + (cl_mem) col_buff, col_buff_off); + } + } } + inline void greentea_conv_col2im_gpu(const Dtype* col_buff, const int col_buff_off, Dtype* data, const int data_off) { @@ -346,14 +387,59 @@ class BaseConvolutionLayer : public Layer { this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); - greentea_col2im_gpu(&program, &ctx, (cl_mem) col_buff, col_buff_off, - conv_in_channels_, - conv_input_shape_.cpu_data()[1], - conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], - kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], - pad_.cpu_data()[1], stride_.cpu_data()[0], - stride_.cpu_data()[1], (cl_mem) data, data_off); + + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { + if (this->use_skernel_) { + greentea_col2im_sk_gpu(&program, &ctx, (cl_mem) col_buff, + conv_in_channels_, + conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], + stride_.cpu_data()[1], + kstride_.gpu_data()[0], + kstride_.gpu_data()[1], (cl_mem) data, + data_off); + } else { + greentea_col2im_gpu(&program, &ctx, (cl_mem) col_buff, + col_buff_off, conv_in_channels_, + conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + (cl_mem) data, data_off); + } + } else { + if (this->use_skernel_) { + greentea_col2im_ndsk_gpu(&program, &ctx, (cl_mem) col_buff, + col_buff_off, num_spatial_axes_, + num_kernels_col2im_, + (cl_mem) (conv_input_shape_.gpu_data()), + (cl_mem) (col_buffer_.gpu_shape()), + (cl_mem) (kernel_shape_.gpu_data()), + (cl_mem) (pad_.gpu_data()), + (cl_mem) (stride_.gpu_data()), + (cl_mem) (kstride_.gpu_data()), + (cl_mem) data, + data_off); + } else { + greentea_col2im_nd_gpu(&program, &ctx, (cl_mem) col_buff, + col_buff_off, num_spatial_axes_, + 0, + num_kernels_col2im_, + (cl_mem) (conv_input_shape_.gpu_data()), + (cl_mem) (col_buffer_.gpu_shape()), + (cl_mem) (kernel_shape_.gpu_data()), + (cl_mem) (pad_.gpu_data()), + (cl_mem) (stride_.gpu_data()), + (cl_mem) data, + data_off); + } + } } #endif // USE_GREENTEA #endif // !CPU_ONLY diff --git a/log.txt b/log.txt index 948175bc454..631be5af6f7 100644 --- a/log.txt +++ b/log.txt @@ -1,7 +1,46 @@ +.build_release/tools/caffe +caffe: command line brew +usage: caffe + +commands: + train train or finetune a model + test score a model + device_query show GPU diagnostic information + time benchmark model execution time + + Flags from tools/caffe.cpp: + -gpu (Optional; run in GPU mode on given device IDs separated by ','.Use + '-gpu all' to run on all available GPUs. The effective training batch + size is multiplied by the number of devices.) type: string default: "" + -iterations (The number of iterations to run.) type: int32 default: 50 + -model (The model definition protocol buffer text file..) type: string + default: "" + -sighup_effect (Optional; action to take when a SIGHUP signal is received: + snapshot, stop or none.) type: string default: "snapshot" + -sigint_effect (Optional; action to take when a SIGINT signal is received: + snapshot, stop or none.) type: string default: "stop" + -snapshot (Optional; the snapshot solver state to resume training.) + type: string default: "" + -solver (The solver definition protocol buffer text file.) type: string + default: "" + -weights (Optional; the pretrained weights to initialize finetuning, + separated by ','. Cannot be set simultaneously with snapshot.) + type: string default: "" +.build_release/test/test_all.testbin 1 --gtest_shuffle Setting to use device 1 -Build Status = -2 ( Err = -42 ) -Log: ptxas application ptx input, line 10702; error : Call has wrong number of parameters -ptxas fatal : Ptx assembly aborted due to errors +Build Status = -2 ( Err = -11 ) +Log: :1082:21: warning: initializing '__global float *__attribute__((address_space(16776963)))' with an expression of type 'const __global float *' discards qualifiers + __global Dtype* top_diff_off = top_diff + offset; + ^ ~~~~~~~~~~~~~~~~~ +:2752:15: error: conflicting types for 'col2im_nd_gpu_kernel' +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + ^ +:648:15: note: previous definition is here +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + ^ +:3186:21: warning: initializing '__global double *__attribute__((address_space(16776963)))' with an expression of type 'const __global double *' discards qualifiers + __global Dtype* top_diff_off = top_diff + offset; + ^ ~~~~~~~~~~~~~~~~~ Sources: #ifndef __OPENCL_VERSION__ #define __kernel @@ -470,7 +509,6 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#endif __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, __global const Dtype* top_diff, const int M, const int N, const int K, @@ -486,6 +524,7 @@ __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const } } #endif +#endif #ifndef __OPENCL_VERSION__ #include "header.cl" @@ -575,6 +614,160 @@ __kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_col, + const int data_col_off) { + + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + channel_out; + __global const Dtype* data_im_ptr = data_im + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + + + +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + __global const Dtype* data_col, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im) { + int d_im[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_im, const int data_off, __global const int* im_shape, @@ -655,7 +848,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_col, const int data_col_off, __global const int* im_shape, @@ -714,7 +907,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } } if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) + continue; } // Loop over the col to compute the output val. Dtype val = 0; @@ -747,7 +940,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) + } } #ifndef __OPENCL_VERSION__ @@ -2420,7 +2613,6 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#endif __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, __global const Dtype* top_diff, const int M, const int N, const int K, @@ -2436,6 +2628,7 @@ __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const } } #endif +#endif #ifndef __OPENCL_VERSION__ #include "header.cl" @@ -2525,6 +2718,160 @@ __kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_col, + const int data_col_off) { + + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + channel_out; + __global const Dtype* data_im_ptr = data_im + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + + + +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + __global const Dtype* data_col, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im) { + int d_im[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_im, const int data_off, __global const int* im_shape, @@ -2605,7 +2952,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_col, const int data_col_off, __global const int* im_shape, @@ -2664,7 +3011,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } } if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) + continue; } // Loop over the col to compute the output val. Dtype val = 0; @@ -2697,7 +3044,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) + } } #ifndef __OPENCL_VERSION__ @@ -3939,9 +4286,19 @@ __kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, #endif -Build Status = -2 ( Err = -42 ) -Log: ptxas application ptx input, line 10702; error : Call has wrong number of parameters -ptxas fatal : Ptx assembly aborted due to errors +Build Status = -2 ( Err = -11 ) +Log: :1082:21: warning: initializing '__global float *__attribute__((address_space(16776963)))' with an expression of type 'const __global float *' discards qualifiers + __global Dtype* top_diff_off = top_diff + offset; + ^ ~~~~~~~~~~~~~~~~~ +:2752:15: error: conflicting types for 'col2im_nd_gpu_kernel' +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + ^ +:648:15: note: previous definition is here +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + ^ +:3186:21: warning: initializing '__global double *__attribute__((address_space(16776963)))' with an expression of type 'const __global double *' discards qualifiers + __global Dtype* top_diff_off = top_diff + offset; + ^ ~~~~~~~~~~~~~~~~~ Sources: #ifndef __OPENCL_VERSION__ #define __kernel @@ -4410,7 +4767,6 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#endif __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, __global const Dtype* top_diff, const int M, const int N, const int K, @@ -4426,6 +4782,7 @@ __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const } } #endif +#endif #ifndef __OPENCL_VERSION__ #include "header.cl" @@ -4515,16 +4872,16 @@ __kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, - __global const Dtype* data_im, - const int data_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, - __global const int* kstride, - __global Dtype* data_col, - const int data_col_off) { + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; int d_iter[6]; int i; @@ -4550,8 +4907,8 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, data_col_inc *= col_shape[i + 1]; d_iter[i] = 0; } - __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; - __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + __global Dtype* data_col_ptr = data_col + channel_out; + __global const Dtype* data_im_ptr = data_im + channel_in; bool incremented; do { bool in_range = true; @@ -4562,7 +4919,161 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, break; } } - + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + + + +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + __global const Dtype* data_col, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im) { + int d_im[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global const int* kstride, + __global Dtype* data_col, + const int data_col_off) { + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + // Write column data if (in_range) { int data_im_offset = d_iter[0]; @@ -4595,7 +5106,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_col, const int data_col_off, __global const int* im_shape, @@ -4654,7 +5165,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } } if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) + continue; } // Loop over the col to compute the output val. Dtype val = 0; @@ -4687,7 +5198,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) + } } #ifndef __OPENCL_VERSION__ @@ -6360,7 +6871,6 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#endif __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, __global const Dtype* top_diff, const int M, const int N, const int K, @@ -6376,6 +6886,7 @@ __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const } } #endif +#endif #ifndef __OPENCL_VERSION__ #include "header.cl" @@ -6465,6 +6976,160 @@ __kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_col, + const int data_col_off) { + + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + channel_out; + __global const Dtype* data_im_ptr = data_im + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + + + +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + __global const Dtype* data_col, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im) { + int d_im[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_im, const int data_off, __global const int* im_shape, @@ -6545,7 +7210,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_col, const int data_col_off, __global const int* im_shape, @@ -6604,7 +7269,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } } if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) + continue; } // Loop over the col to compute the output val. Dtype val = 0; @@ -6637,7 +7302,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) + } } #ifndef __OPENCL_VERSION__ @@ -7879,9 +8544,19 @@ __kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, #endif -Build Status = -2 ( Err = -42 ) -Log: ptxas application ptx input, line 10702; error : Call has wrong number of parameters -ptxas fatal : Ptx assembly aborted due to errors +Build Status = -2 ( Err = -11 ) +Log: :1082:21: warning: initializing '__global float *__attribute__((address_space(16776963)))' with an expression of type 'const __global float *' discards qualifiers + __global Dtype* top_diff_off = top_diff + offset; + ^ ~~~~~~~~~~~~~~~~~ +:2752:15: error: conflicting types for 'col2im_nd_gpu_kernel' +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + ^ +:648:15: note: previous definition is here +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + ^ +:3186:21: warning: initializing '__global double *__attribute__((address_space(16776963)))' with an expression of type 'const __global double *' discards qualifiers + __global Dtype* top_diff_off = top_diff + offset; + ^ ~~~~~~~~~~~~~~~~~ Sources: #ifndef __OPENCL_VERSION__ #define __kernel @@ -8350,7 +9025,6 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#endif __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, __global const Dtype* top_diff, const int M, const int N, const int K, @@ -8366,6 +9040,7 @@ __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const } } #endif +#endif #ifndef __OPENCL_VERSION__ #include "header.cl" @@ -8455,12 +9130,166 @@ __kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, - __global const Dtype* data_im, - const int data_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_col, + const int data_col_off) { + + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + channel_out; + __global const Dtype* data_im_ptr = data_im + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + + + +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + __global const Dtype* data_col, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im) { + int d_im[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, __global const int* stride, __global const int* kstride, __global Dtype* data_col, @@ -8535,7 +9364,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_col, const int data_col_off, __global const int* im_shape, @@ -8594,7 +9423,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } } if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) + continue; } // Loop over the col to compute the output val. Dtype val = 0; @@ -8627,7 +9456,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) + } } #ifndef __OPENCL_VERSION__ @@ -10300,7 +11129,6 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt newVal.floatVal = prevVal.floatVal + operand; } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -#endif __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, __global const Dtype* top_diff, const int M, const int N, const int K, @@ -10316,6 +11144,7 @@ __kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const } } #endif +#endif #ifndef __OPENCL_VERSION__ #include "header.cl" @@ -10405,6 +11234,160 @@ __kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + __global const Dtype* data_im, + const int data_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_col, + const int data_col_off) { + + int d_temp[6]; + int d_iter[6]; + int i; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_in = index; + int channel_out = 1; + for (i = num_axes - 1; i >= 0; --i) { + d_temp[i] = channel_in % col_shape[i + 1]; + channel_in /= col_shape[i + 1]; + channel_out *= kernel_shape[i]; + } + channel_out *= channel_in; + int data_col_inc = 1; + for (i = 0; i < num_axes; ++i) { + channel_out *= col_shape[i + 1]; + channel_out += d_temp[i]; + d_temp[i] = d_temp[i] * stride[i] - pad[i]; + channel_in *= im_shape[i + 1]; + channel_in += d_temp[i]; + data_col_inc *= col_shape[i + 1]; + d_iter[i] = 0; + } + __global Dtype* data_col_ptr = data_col + channel_out; + __global const Dtype* data_im_ptr = data_im + channel_in; + bool incremented; + do { + bool in_range = true; + for (i = 0; i < num_axes; ++i) { + const int d_iter_im = d_iter[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + if (!in_range) { + break; + } + } + if (in_range) { + int data_im_offset = d_iter[0]; + for (i = 1; i < num_axes; ++i) { + data_im_offset *= im_shape[i + 1]; + data_im_offset += d_iter[i]; + } + *data_col_ptr = data_im_ptr[data_im_offset]; + } else { + *data_col_ptr = 0; + } + data_col_ptr += data_col_inc; + incremented = false; + for (i = num_axes - 1; i >= 0; --i) { + const int d_max = kernel_shape[i]; + if (d_iter[i] == d_max - 1) { + d_iter[i] = 0; + } else { // d_iter[i] < d_max - 1 + ++d_iter[i]; + incremented = true; + break; + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); // do + } +} + + + +__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, + __global const Dtype* data_col, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im) { + int d_im[6]; + int d_col_iter[6]; + int d_col_start[6]; + int d_col_end[6]; + + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with intermediate + // computations used to compute the spatial indices. + int channel_im = index; + // Calculate d_im (image dimensions). + for (int i = num_axes - 1; i >= 0; --i) { + d_im[i] = channel_im % im_shape[i + 1] + pad[i]; + channel_im /= im_shape[i + 1]; + } + // Calculate col start/end indices. + bool done = false; + for (int i = 0; i < num_axes; ++i) { + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_shape[i]) ? + 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { + // Skip computation if the dimension is 0 at any spatial axis -- + // final val will be 0. + data_im[index] = 0; + done = true; + break; // for (int i = 0; i < num_axes; ++i) + } + } + if (done) { + continue; + } + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + do { + // Compute the final offset. + int final_offset = 0; + int kernel_shape_prod = 1; + for (int i = num_axes - 1; i >= 0; --i) { + final_offset += (d_im[i] - d_col_iter[i] * stride[i]) + * kernel_shape_prod; + kernel_shape_prod *= kernel_shape[i]; + } + final_offset += kernel_shape_prod * channel_im; + for (int i = 0; i < num_axes; ++i) { + final_offset *= col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[final_offset]; + incremented = false; + for (int i = num_axes - 1; i >= 0; --i) { + const int d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int i = num_axes - 1; i >= 0; --i) + } + } // for (int i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[index] = val; + } +} + +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_im, const int data_off, __global const int* im_shape, @@ -10485,7 +11468,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, __global const Dtype* data_col, const int data_col_off, __global const int* im_shape, @@ -10544,7 +11527,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } } if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) + continue; } // Loop over the col to compute the output val. Dtype val = 0; @@ -10577,7 +11560,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) + } } #ifndef __OPENCL_VERSION__ @@ -11819,98 +12802,473 @@ __kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, #endif -Note: Google Test filter = *Embed* -[==========] Running 20 tests from 4 test cases. +Note: Randomizing tests' orders with a seed of 36961 . +[==========] Running 1647 tests from 240 test cases. [----------] Global test environment set-up. -[----------] 5 tests from EmbedLayerTest/0, where TypeParam = caffe::CPUDevice -[ RUN ] EmbedLayerTest/0.TestSetUp -[ OK ] EmbedLayerTest/0.TestSetUp (1 ms) -[ RUN ] EmbedLayerTest/0.TestForward -[ OK ] EmbedLayerTest/0.TestForward (0 ms) -[ RUN ] EmbedLayerTest/0.TestForwardWithBias -[ OK ] EmbedLayerTest/0.TestForwardWithBias (0 ms) -[ RUN ] EmbedLayerTest/0.TestGradient -[ OK ] EmbedLayerTest/0.TestGradient (7 ms) -[ RUN ] EmbedLayerTest/0.TestGradientWithBias -[ OK ] EmbedLayerTest/0.TestGradientWithBias (12 ms) -[----------] 5 tests from EmbedLayerTest/0 (20 ms total) - -[----------] 5 tests from EmbedLayerTest/1, where TypeParam = caffe::CPUDevice -[ RUN ] EmbedLayerTest/1.TestSetUp -[ OK ] EmbedLayerTest/1.TestSetUp (0 ms) -[ RUN ] EmbedLayerTest/1.TestForward -[ OK ] EmbedLayerTest/1.TestForward (0 ms) -[ RUN ] EmbedLayerTest/1.TestForwardWithBias -[ OK ] EmbedLayerTest/1.TestForwardWithBias (0 ms) -[ RUN ] EmbedLayerTest/1.TestGradient -[ OK ] EmbedLayerTest/1.TestGradient (7 ms) -[ RUN ] EmbedLayerTest/1.TestGradientWithBias -[ OK ] EmbedLayerTest/1.TestGradientWithBias (12 ms) -[----------] 5 tests from EmbedLayerTest/1 (19 ms total) - -[----------] 5 tests from EmbedLayerTest/2, where TypeParam = caffe::GPUDevice -[ RUN ] EmbedLayerTest/2.TestSetUp -[ OK ] EmbedLayerTest/2.TestSetUp (0 ms) -[ RUN ] EmbedLayerTest/2.TestForward +[----------] 4 tests from BlobSimpleTest/1, where TypeParam = double +[ RUN ] BlobSimpleTest/1.TestPointersCPUGPU +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] BlobSimpleTest/1.TestPointersCPUGPU, where TypeParam = double (0 ms) +[ RUN ] BlobSimpleTest/1.TestLegacyBlobProtoShapeEquals +[ OK ] BlobSimpleTest/1.TestLegacyBlobProtoShapeEquals (0 ms) +[ RUN ] BlobSimpleTest/1.TestInitialization +[ OK ] BlobSimpleTest/1.TestInitialization (0 ms) +[ RUN ] BlobSimpleTest/1.TestReshape +[ OK ] BlobSimpleTest/1.TestReshape (0 ms) +[----------] 4 tests from BlobSimpleTest/1 (1 ms total) + +[----------] 10 tests from ConcatLayerTest/2, where TypeParam = caffe::GPUDevice +[ RUN ] ConcatLayerTest/2.TestSetupChannels +[ OK ] ConcatLayerTest/2.TestSetupChannels (0 ms) +[ RUN ] ConcatLayerTest/2.TestForwardTrivial +[ OK ] ConcatLayerTest/2.TestForwardTrivial (0 ms) +[ RUN ] ConcatLayerTest/2.TestForwardChannels Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/2.TestForward, where TypeParam = caffe::GPUDevice (0 ms) -[ RUN ] EmbedLayerTest/2.TestForwardWithBias +[ FAILED ] ConcatLayerTest/2.TestForwardChannels, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConcatLayerTest/2.TestGradientChannels Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/2.TestForwardWithBias, where TypeParam = caffe::GPUDevice (0 ms) -[ RUN ] EmbedLayerTest/2.TestGradient +[ FAILED ] ConcatLayerTest/2.TestGradientChannels, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] ConcatLayerTest/2.TestForwardNum Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/2.TestGradient, where TypeParam = caffe::GPUDevice (0 ms) -[ RUN ] EmbedLayerTest/2.TestGradientWithBias +[ FAILED ] ConcatLayerTest/2.TestForwardNum, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConcatLayerTest/2.TestGradientChannelsBottomOneOnly Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/2.TestGradientWithBias, where TypeParam = caffe::GPUDevice (0 ms) -[----------] 5 tests from EmbedLayerTest/2 (1 ms total) +[ FAILED ] ConcatLayerTest/2.TestGradientChannelsBottomOneOnly, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConcatLayerTest/2.TestSetupChannelsNegativeIndexing +[ OK ] ConcatLayerTest/2.TestSetupChannelsNegativeIndexing (0 ms) +[ RUN ] ConcatLayerTest/2.TestSetupNum +[ OK ] ConcatLayerTest/2.TestSetupNum (0 ms) +[ RUN ] ConcatLayerTest/2.TestGradientNum +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConcatLayerTest/2.TestGradientNum, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConcatLayerTest/2.TestGradientTrivial +[ OK ] ConcatLayerTest/2.TestGradientTrivial (2 ms) +[----------] 10 tests from ConcatLayerTest/2 (4 ms total) + +[----------] 1 test from HDF5OutputLayerTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] HDF5OutputLayerTest/1.TestForward +[ OK ] HDF5OutputLayerTest/1.TestForward (3 ms) +[----------] 1 test from HDF5OutputLayerTest/1 (3 ms total) + +[----------] 5 tests from DeconvolutionLayerTest/2, where TypeParam = caffe::GPUDevice +[ RUN ] DeconvolutionLayerTest/2.TestSetup +[ OK ] DeconvolutionLayerTest/2.TestSetup (0 ms) +[ RUN ] DeconvolutionLayerTest/2.TestGradient +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] DeconvolutionLayerTest/2.TestGradient, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] DeconvolutionLayerTest/2.TestSimpleDeconvolution +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] DeconvolutionLayerTest/2.TestSimpleDeconvolution, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] DeconvolutionLayerTest/2.TestGradient3D +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] DeconvolutionLayerTest/2.TestGradient3D, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] DeconvolutionLayerTest/2.TestNDAgainst2D +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] DeconvolutionLayerTest/2.TestNDAgainst2D, where TypeParam = caffe::GPUDevice (5 ms) +[----------] 5 tests from DeconvolutionLayerTest/2 (8 ms total) -[----------] 5 tests from EmbedLayerTest/3, where TypeParam = caffe::GPUDevice -[ RUN ] EmbedLayerTest/3.TestSetUp -[ OK ] EmbedLayerTest/3.TestSetUp (0 ms) -[ RUN ] EmbedLayerTest/3.TestForward +[----------] 1 test from HDF5DataLayerTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] HDF5DataLayerTest/1.TestRead +[ OK ] HDF5DataLayerTest/1.TestRead (3 ms) +[----------] 1 test from HDF5DataLayerTest/1 (3 ms total) + +[----------] 8 tests from RMSPropSolverTest/2, where TypeParam = caffe::GPUDevice +[ RUN ] RMSPropSolverTest/2.TestLeastSquaresUpdateWithEverythingAccum +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestLeastSquaresUpdateWithEverythingAccum, where TypeParam = caffe::GPUDevice (2 ms) +[ RUN ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithEverythingShare +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithEverythingShare, where TypeParam = caffe::GPUDevice (3 ms) +[ RUN ] RMSPropSolverTest/2.TestSnapshot +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestSnapshot, where TypeParam = caffe::GPUDevice (2 ms) +[ RUN ] RMSPropSolverTest/2.TestSnapshotShare +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestSnapshotShare, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] RMSPropSolverTest/2.TestLeastSquaresUpdateWithEverythingAccumShare +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestLeastSquaresUpdateWithEverythingAccumShare, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithEverything +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithEverything, where TypeParam = caffe::GPUDevice (2 ms) +[ RUN ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithRmsDecay +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithRmsDecay, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithWeightDecay +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] RMSPropSolverTest/2.TestRMSPropLeastSquaresUpdateWithWeightDecay, where TypeParam = caffe::GPUDevice (1 ms) +[----------] 8 tests from RMSPropSolverTest/2 (15 ms total) + +[----------] 2 tests from GemmTest/0, where TypeParam = float +[ RUN ] GemmTest/0.TestGemvCPUGPU +[ OK ] GemmTest/0.TestGemvCPUGPU (3 ms) +[ RUN ] GemmTest/0.TestGemmCPUGPU +[ OK ] GemmTest/0.TestGemmCPUGPU (2 ms) +[----------] 2 tests from GemmTest/0 (5 ms total) + +[----------] 3 tests from BlobMathTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] BlobMathTest/1.TestScaleData +[ OK ] BlobMathTest/1.TestScaleData (1 ms) +[ RUN ] BlobMathTest/1.TestSumOfSquares +[ OK ] BlobMathTest/1.TestSumOfSquares (0 ms) +[ RUN ] BlobMathTest/1.TestAsum +[ OK ] BlobMathTest/1.TestAsum (0 ms) +[----------] 3 tests from BlobMathTest/1 (1 ms total) + +[----------] 3 tests from FilterLayerTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] FilterLayerTest/1.TestReshape +[ OK ] FilterLayerTest/1.TestReshape (0 ms) +[ RUN ] FilterLayerTest/1.TestGradient +[ OK ] FilterLayerTest/1.TestGradient (212 ms) +[ RUN ] FilterLayerTest/1.TestForward +[ OK ] FilterLayerTest/1.TestForward (0 ms) +[----------] 3 tests from FilterLayerTest/1 (212 ms total) + +[----------] 3 tests from PaddingLayerUpgradeTest +[ RUN ] PaddingLayerUpgradeTest.TestImageNet +[ OK ] PaddingLayerUpgradeTest.TestImageNet (1 ms) +[ RUN ] PaddingLayerUpgradeTest.TestSimple +[ OK ] PaddingLayerUpgradeTest.TestSimple (1 ms) +[ RUN ] PaddingLayerUpgradeTest.TestTwoTops +[ OK ] PaddingLayerUpgradeTest.TestTwoTops (0 ms) +[----------] 3 tests from PaddingLayerUpgradeTest (2 ms total) + +[----------] 5 tests from DeconvolutionLayerTest/0, where TypeParam = caffe::CPUDevice +[ RUN ] DeconvolutionLayerTest/0.TestGradient +[ OK ] DeconvolutionLayerTest/0.TestGradient (533 ms) +[ RUN ] DeconvolutionLayerTest/0.TestGradient3D +[ OK ] DeconvolutionLayerTest/0.TestGradient3D (135 ms) +[ RUN ] DeconvolutionLayerTest/0.TestSetup +[ OK ] DeconvolutionLayerTest/0.TestSetup (1 ms) +[ RUN ] DeconvolutionLayerTest/0.TestNDAgainst2D +[ OK ] DeconvolutionLayerTest/0.TestNDAgainst2D (802 ms) +[ RUN ] DeconvolutionLayerTest/0.TestSimpleDeconvolution +[ OK ] DeconvolutionLayerTest/0.TestSimpleDeconvolution (1 ms) +[----------] 5 tests from DeconvolutionLayerTest/0 (1472 ms total) + +[----------] 3 tests from FilterLayerTest/3, where TypeParam = caffe::GPUDevice +[ RUN ] FilterLayerTest/3.TestReshape +[ OK ] FilterLayerTest/3.TestReshape (0 ms) +[ RUN ] FilterLayerTest/3.TestGradient +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] FilterLayerTest/3.TestGradient, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] FilterLayerTest/3.TestForward +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] FilterLayerTest/3.TestForward, where TypeParam = caffe::GPUDevice (0 ms) +[----------] 3 tests from FilterLayerTest/3 (1 ms total) + +[----------] 1 test from SolverTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] SolverTest/1.TestInitTrainTestNets +[ OK ] SolverTest/1.TestInitTrainTestNets (1 ms) +[----------] 1 test from SolverTest/1 (1 ms total) + +[----------] 12 tests from ConvolutionLayerTest/2, where TypeParam = caffe::GPUDevice +[ RUN ] ConvolutionLayerTest/2.TestSimpleConvolution +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestSimpleConvolution, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConvolutionLayerTest/2.TestSetup +[ OK ] ConvolutionLayerTest/2.TestSetup (0 ms) +[ RUN ] ConvolutionLayerTest/2.TestSimple3DConvolution +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestSimple3DConvolution, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConvolutionLayerTest/2.TestGradient +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestGradient, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] ConvolutionLayerTest/2.TestSimpleConvolutionGroup +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestSimpleConvolutionGroup, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] ConvolutionLayerTest/2.TestNDAgainst2D +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestNDAgainst2D, where TypeParam = caffe::GPUDevice (2 ms) +[ RUN ] ConvolutionLayerTest/2.TestSobelConvolution +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestSobelConvolution, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] ConvolutionLayerTest/2.TestGradientGroup +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestGradientGroup, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConvolutionLayerTest/2.TestGradient3D +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.TestGradient3D, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] ConvolutionLayerTest/2.Test1x1Gradient +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.Test1x1Gradient, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] ConvolutionLayerTest/2.Test1x1Convolution +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.Test1x1Convolution, where TypeParam = caffe::GPUDevice (0 ms) +[ RUN ] ConvolutionLayerTest/2.Test0DConvolution +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] ConvolutionLayerTest/2.Test0DConvolution, where TypeParam = caffe::GPUDevice (0 ms) +[----------] 12 tests from ConvolutionLayerTest/2 (9 ms total) + +[----------] 44 tests from NeuronLayerTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] NeuronLayerTest/1.TestAbsGradient +[ OK ] NeuronLayerTest/1.TestAbsGradient (1 ms) +[ RUN ] NeuronLayerTest/1.TestDropoutHalf +[ OK ] NeuronLayerTest/1.TestDropoutHalf (0 ms) +[ RUN ] NeuronLayerTest/1.TestReLUGradient +[ OK ] NeuronLayerTest/1.TestReLUGradient (2 ms) +[ RUN ] NeuronLayerTest/1.TestDropoutGradient +[ OK ] NeuronLayerTest/1.TestDropoutGradient (2 ms) +[ RUN ] NeuronLayerTest/1.TestPReLUParam +[ OK ] NeuronLayerTest/1.TestPReLUParam (0 ms) +[ RUN ] NeuronLayerTest/1.TestExpGradientBase2Scale3 +[ OK ] NeuronLayerTest/1.TestExpGradientBase2Scale3 (2 ms) +[ RUN ] NeuronLayerTest/1.TestLogGradientBase2Scale3 +[ OK ] NeuronLayerTest/1.TestLogGradientBase2Scale3 (3 ms) +[ RUN ] NeuronLayerTest/1.TestExpGradient +[ OK ] NeuronLayerTest/1.TestExpGradient (2 ms) +[ RUN ] NeuronLayerTest/1.TestPReLUInPlace +[ OK ] NeuronLayerTest/1.TestPReLUInPlace (0 ms) +[ RUN ] NeuronLayerTest/1.TestExpLayerBase2Shift1 +[ OK ] NeuronLayerTest/1.TestExpLayerBase2Shift1 (0 ms) +[ RUN ] NeuronLayerTest/1.TestTanHGradient +[ OK ] NeuronLayerTest/1.TestTanHGradient (3 ms) +[ RUN ] NeuronLayerTest/1.TestExpGradientBase2Shift1Scale3 +[ OK ] NeuronLayerTest/1.TestExpGradientBase2Shift1Scale3 (3 ms) +[ RUN ] NeuronLayerTest/1.TestLogLayerBase2Shift1 +[ OK ] NeuronLayerTest/1.TestLogLayerBase2Shift1 (0 ms) +[ RUN ] NeuronLayerTest/1.TestReLU +[ OK ] NeuronLayerTest/1.TestReLU (0 ms) +[ RUN ] NeuronLayerTest/1.TestExpGradientBase2 +[ OK ] NeuronLayerTest/1.TestExpGradientBase2 (2 ms) +[ RUN ] NeuronLayerTest/1.TestDropoutThreeQuarters +[ OK ] NeuronLayerTest/1.TestDropoutThreeQuarters (0 ms) +[ RUN ] NeuronLayerTest/1.TestExpGradientBase2Shift1 +[ OK ] NeuronLayerTest/1.TestExpGradientBase2Shift1 (2 ms) +[ RUN ] NeuronLayerTest/1.TestLogGradient +[ OK ] NeuronLayerTest/1.TestLogGradient (3 ms) +[ RUN ] NeuronLayerTest/1.TestBNLLGradient +[ OK ] NeuronLayerTest/1.TestBNLLGradient (4 ms) +[ RUN ] NeuronLayerTest/1.TestLogGradientBase2 +[ OK ] NeuronLayerTest/1.TestLogGradientBase2 (3 ms) +[ RUN ] NeuronLayerTest/1.TestSigmoidGradient +[ OK ] NeuronLayerTest/1.TestSigmoidGradient (2 ms) +[ RUN ] NeuronLayerTest/1.TestExpLayer +[ OK ] NeuronLayerTest/1.TestExpLayer (0 ms) +[ RUN ] NeuronLayerTest/1.TestPReLUForward +[ OK ] NeuronLayerTest/1.TestPReLUForward (0 ms) +[ RUN ] NeuronLayerTest/1.TestLogGradientBase2Shift1 +[ OK ] NeuronLayerTest/1.TestLogGradientBase2Shift1 (3 ms) +[ RUN ] NeuronLayerTest/1.TestExpLayerBase2 +[ OK ] NeuronLayerTest/1.TestExpLayerBase2 (0 ms) +[ RUN ] NeuronLayerTest/1.TestLogLayerBase2Shift1Scale3 +[ OK ] NeuronLayerTest/1.TestLogLayerBase2Shift1Scale3 (0 ms) +[ RUN ] NeuronLayerTest/1.TestReLUGradientWithNegativeSlope +[ OK ] NeuronLayerTest/1.TestReLUGradientWithNegativeSlope (1 ms) +[ RUN ] NeuronLayerTest/1.TestLogLayer +[ OK ] NeuronLayerTest/1.TestLogLayer (0 ms) +[ RUN ] NeuronLayerTest/1.TestPReLUGradientChannelShared +[ OK ] NeuronLayerTest/1.TestPReLUGradientChannelShared (93 ms) +[ RUN ] NeuronLayerTest/1.TestPReLUGradient +[ OK ] NeuronLayerTest/1.TestPReLUGradient (92 ms) +[ RUN ] NeuronLayerTest/1.TestDropoutGradientTest +[ OK ] NeuronLayerTest/1.TestDropoutGradientTest (2 ms) +[ RUN ] NeuronLayerTest/1.TestBNLL +[ OK ] NeuronLayerTest/1.TestBNLL (0 ms) +[ RUN ] NeuronLayerTest/1.TestDropoutTestPhase +[ OK ] NeuronLayerTest/1.TestDropoutTestPhase (0 ms) +[ RUN ] NeuronLayerTest/1.TestPReLUForwardChannelShared +[ OK ] NeuronLayerTest/1.TestPReLUForwardChannelShared (1 ms) +[ RUN ] NeuronLayerTest/1.TestLogGradientBase2Shift1Scale3 +[ OK ] NeuronLayerTest/1.TestLogGradientBase2Shift1Scale3 (2 ms) +[ RUN ] NeuronLayerTest/1.TestTanH +[ OK ] NeuronLayerTest/1.TestTanH (0 ms) +[ RUN ] NeuronLayerTest/1.TestSigmoid +[ OK ] NeuronLayerTest/1.TestSigmoid (0 ms) +[ RUN ] NeuronLayerTest/1.TestExpLayerBase2Shift1Scale3 +[ OK ] NeuronLayerTest/1.TestExpLayerBase2Shift1Scale3 (0 ms) +[ RUN ] NeuronLayerTest/1.TestPReLUConsistencyReLU +[ OK ] NeuronLayerTest/1.TestPReLUConsistencyReLU (0 ms) +[ RUN ] NeuronLayerTest/1.TestExpLayerBase2Scale3 +[ OK ] NeuronLayerTest/1.TestExpLayerBase2Scale3 (1 ms) +[ RUN ] NeuronLayerTest/1.TestLogLayerBase2 +[ OK ] NeuronLayerTest/1.TestLogLayerBase2 (0 ms) +[ RUN ] NeuronLayerTest/1.TestAbsVal +[ OK ] NeuronLayerTest/1.TestAbsVal (0 ms) +[ RUN ] NeuronLayerTest/1.TestLogLayerBase2Scale3 +[ OK ] NeuronLayerTest/1.TestLogLayerBase2Scale3 (0 ms) +[ RUN ] NeuronLayerTest/1.TestReLUWithNegativeSlope +[ OK ] NeuronLayerTest/1.TestReLUWithNegativeSlope (0 ms) +[----------] 44 tests from NeuronLayerTest/1 (237 ms total) + +[----------] 6 tests from MVNLayerTest/3, where TypeParam = caffe::GPUDevice +[ RUN ] MVNLayerTest/3.TestForward +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] MVNLayerTest/3.TestForward, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] MVNLayerTest/3.TestGradientMeanOnly +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] MVNLayerTest/3.TestGradientMeanOnly, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] MVNLayerTest/3.TestForwardAcrossChannels +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] MVNLayerTest/3.TestForwardAcrossChannels, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] MVNLayerTest/3.TestGradientAcrossChannels +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] MVNLayerTest/3.TestGradientAcrossChannels, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] MVNLayerTest/3.TestForwardMeanOnly +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] MVNLayerTest/3.TestForwardMeanOnly, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] MVNLayerTest/3.TestGradient +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] MVNLayerTest/3.TestGradient, where TypeParam = caffe::GPUDevice (1 ms) +[----------] 6 tests from MVNLayerTest/3 (7 ms total) + +[----------] 3 tests from GPUStochasticPoolingLayerTest/1, where TypeParam = double +[ RUN ] GPUStochasticPoolingLayerTest/1.TestStochasticTestPhase +Number of kernels in program: 0 +unknown file: Failure +Unknown C++ exception thrown in the test body. +[ FAILED ] GPUStochasticPoolingLayerTest/1.TestStochasticTestPhase, where TypeParam = double (1 ms) +[ RUN ] GPUStochasticPoolingLayerTest/1.TestStochastic Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/3.TestForward, where TypeParam = caffe::GPUDevice (0 ms) -[ RUN ] EmbedLayerTest/3.TestForwardWithBias +[ FAILED ] GPUStochasticPoolingLayerTest/1.TestStochastic, where TypeParam = double (1 ms) +[ RUN ] GPUStochasticPoolingLayerTest/1.TestGradient Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/3.TestForwardWithBias, where TypeParam = caffe::GPUDevice (0 ms) -[ RUN ] EmbedLayerTest/3.TestGradient +[ FAILED ] GPUStochasticPoolingLayerTest/1.TestGradient, where TypeParam = double (1 ms) +[----------] 3 tests from GPUStochasticPoolingLayerTest/1 (3 ms total) + +[----------] 3 tests from BlobMathTest/0, where TypeParam = caffe::CPUDevice +[ RUN ] BlobMathTest/0.TestAsum +[ OK ] BlobMathTest/0.TestAsum (0 ms) +[ RUN ] BlobMathTest/0.TestSumOfSquares +[ OK ] BlobMathTest/0.TestSumOfSquares (0 ms) +[ RUN ] BlobMathTest/0.TestScaleData +[ OK ] BlobMathTest/0.TestScaleData (0 ms) +[----------] 3 tests from BlobMathTest/0 (1 ms total) + +[----------] 2 tests from HingeLossLayerTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] HingeLossLayerTest/1.TestGradientL1 +[ OK ] HingeLossLayerTest/1.TestGradientL1 (0 ms) +[ RUN ] HingeLossLayerTest/1.TestGradientL2 +[ OK ] HingeLossLayerTest/1.TestGradientL2 (1 ms) +[----------] 2 tests from HingeLossLayerTest/1 (1 ms total) + +[----------] 2 tests from SoftmaxLayerTest/0, where TypeParam = caffe::CPUDevice +[ RUN ] SoftmaxLayerTest/0.TestForward +[ OK ] SoftmaxLayerTest/0.TestForward (0 ms) +[ RUN ] SoftmaxLayerTest/0.TestGradient +[ OK ] SoftmaxLayerTest/0.TestGradient (174 ms) +[----------] 2 tests from SoftmaxLayerTest/0 (174 ms total) + +[----------] 4 tests from NetUpgradeTest +[ RUN ] NetUpgradeTest.TestImageNet +[ OK ] NetUpgradeTest.TestImageNet (2 ms) +[ RUN ] NetUpgradeTest.TestAllParams +[ OK ] NetUpgradeTest.TestAllParams (1 ms) +[ RUN ] NetUpgradeTest.TestSimple +[ OK ] NetUpgradeTest.TestSimple (1 ms) +[ RUN ] NetUpgradeTest.TestUpgradeV1LayerType +[ OK ] NetUpgradeTest.TestUpgradeV1LayerType (3 ms) +[----------] 4 tests from NetUpgradeTest (7 ms total) + +[----------] 4 tests from InnerProductLayerTest/0, where TypeParam = caffe::CPUDevice +[ RUN ] InnerProductLayerTest/0.TestForward +[ OK ] InnerProductLayerTest/0.TestForward (0 ms) +[ RUN ] InnerProductLayerTest/0.TestGradient +[ OK ] InnerProductLayerTest/0.TestGradient (87 ms) +[ RUN ] InnerProductLayerTest/0.TestSetUp +[ OK ] InnerProductLayerTest/0.TestSetUp (0 ms) +[ RUN ] InnerProductLayerTest/0.TestForwardNoBatch +[ OK ] InnerProductLayerTest/0.TestForwardNoBatch (0 ms) +[----------] 4 tests from InnerProductLayerTest/0 (88 ms total) + +[----------] 2 tests from HingeLossLayerTest/2, where TypeParam = caffe::GPUDevice +[ RUN ] HingeLossLayerTest/2.TestGradientL2 +[ OK ] HingeLossLayerTest/2.TestGradientL2 (198 ms) +[ RUN ] HingeLossLayerTest/2.TestGradientL1 +[ OK ] HingeLossLayerTest/2.TestGradientL1 (17 ms) +[----------] 2 tests from HingeLossLayerTest/2 (216 ms total) + +[----------] 2 tests from SigmoidCrossEntropyLossLayerTest/3, where TypeParam = caffe::GPUDevice +[ RUN ] SigmoidCrossEntropyLossLayerTest/3.TestGradient Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/3.TestGradient, where TypeParam = caffe::GPUDevice (0 ms) -[ RUN ] EmbedLayerTest/3.TestGradientWithBias +[ FAILED ] SigmoidCrossEntropyLossLayerTest/3.TestGradient, where TypeParam = caffe::GPUDevice (1 ms) +[ RUN ] SigmoidCrossEntropyLossLayerTest/3.TestSigmoidCrossEntropyLoss Number of kernels in program: 0 unknown file: Failure Unknown C++ exception thrown in the test body. -[ FAILED ] EmbedLayerTest/3.TestGradientWithBias, where TypeParam = caffe::GPUDevice (1 ms) -[----------] 5 tests from EmbedLayerTest/3 (2 ms total) - -[----------] Global test environment tear-down -[==========] 20 tests from 4 test cases ran. (42 ms total) -[ PASSED ] 12 tests. -[ FAILED ] 8 tests, listed below: -[ FAILED ] EmbedLayerTest/2.TestForward, where TypeParam = caffe::GPUDevice -[ FAILED ] EmbedLayerTest/2.TestForwardWithBias, where TypeParam = caffe::GPUDevice -[ FAILED ] EmbedLayerTest/2.TestGradient, where TypeParam = caffe::GPUDevice -[ FAILED ] EmbedLayerTest/2.TestGradientWithBias, where TypeParam = caffe::GPUDevice -[ FAILED ] EmbedLayerTest/3.TestForward, where TypeParam = caffe::GPUDevice -[ FAILED ] EmbedLayerTest/3.TestForwardWithBias, where TypeParam = caffe::GPUDevice -[ FAILED ] EmbedLayerTest/3.TestGradient, where TypeParam = caffe::GPUDevice -[ FAILED ] EmbedLayerTest/3.TestGradientWithBias, where TypeParam = caffe::GPUDevice - - 8 FAILED TESTS - YOU HAVE 2 DISABLED TESTS +[ FAILED ] SigmoidCrossEntropyLossLayerTest/3.TestSigmoidCrossEntropyLoss, where TypeParam = caffe::GPUDevice (0 ms) +[----------] 2 tests from SigmoidCrossEntropyLossLayerTest/3 (1 ms total) +[----------] 2 tests from SoftmaxLayerTest/1, where TypeParam = caffe::CPUDevice +[ RUN ] SoftmaxLayerTest/1.TestGradient +Makefile:573: recipe for target 'runtest' failed diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 25d5b1feb3c..b3506ba17cf 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -18,7 +18,7 @@ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n Dtype* data_col_ptr = data_col + channel_out;\n const Dtype* data_im_ptr = data_im + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes,\n __global const Dtype* data_col,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = data_col_off;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_ndsk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT @@ -41,7 +41,7 @@ std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n Dtype* data_col_ptr = data_col + channel_out;\n const Dtype* data_im_ptr = data_im + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes,\n __global const Dtype* data_col,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = data_col_off;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_ndsk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index 4dfc96781e1..bb2c93e6556 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -3,6 +3,7 @@ #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, + const int channel_axis, __global const Dtype* data_im, const int data_off, __global const int* im_shape, @@ -17,6 +18,9 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, int d_iter[6]; int i; + im_shape += channel_axis; + col_shape += channel_axis; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. @@ -38,8 +42,8 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, data_col_inc *= col_shape[i + 1]; d_iter[i] = 0; } - Dtype* data_col_ptr = data_col + channel_out; - const Dtype* data_im_ptr = data_im + channel_in; + __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; + __global const Dtype* data_im_ptr = data_im + data_off + channel_in; bool incremented; do { bool in_range = true; @@ -78,19 +82,25 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, -__kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, - __global const Dtype* data_col, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, - __global Dtype* data_im) { +__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, + const int channel_axis, + __global const Dtype* data_col, + const int data_col_off, + __global const int* im_shape, + __global const int* col_shape, + __global const int* kernel_shape, + __global const int* pad, + __global const int* stride, + __global Dtype* data_im, + const int data_im_off) { int d_im[6]; int d_col_iter[6]; int d_col_start[6]; int d_col_end[6]; + im_shape += channel_axis; + col_shape += channel_axis; + for (int index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. @@ -110,7 +120,7 @@ __kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. - data_im[index] = 0; + data_im[index + data_im_off] = 0; done = true; break; // for (int i = 0; i < num_axes; ++i) } @@ -123,7 +133,7 @@ __kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, bool incremented = true; do { // Compute the final offset. - int final_offset = 0; + int final_offset = data_col_off; int kernel_shape_prod = 1; for (int i = num_axes - 1; i >= 0; --i) { final_offset += (d_im[i] - d_col_iter[i] * stride[i]) @@ -148,6 +158,6 @@ __kernel void col2im_nd_gpu_kernel(const int n, const int num_axes, } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); - data_im[index] = val; + data_im[index + data_im_off] = val; } } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 6a699477fb7..73aab5c7b1c 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -218,7 +218,8 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int data_off, const int num_spatial_axes, - const int channel_axis, const int num_kernels, + const int channel_axis, + const int num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_col, int data_col_off) { @@ -226,10 +227,11 @@ void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, CL_KERNEL_SELECT("im2col_nd")); viennacl::ocl::enqueue( - kernel(num_kernels, num_spatial_axes, WrapHandle(data_im, ctx), data_off, - WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), - WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), - WrapHandle(stride, ctx), WrapHandle(data_col, ctx), data_col_off), + kernel(num_kernels, num_spatial_axes, channel_axis, + WrapHandle(data_im, ctx), data_off, WrapHandle(im_shape, ctx), + WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), + WrapHandle(pad, ctx), WrapHandle(stride, ctx), + WrapHandle(data_col, ctx), data_col_off), ctx->get_queue()); } @@ -260,7 +262,7 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int data_col_off, const int num_spatial_axes, - const int channel_axis_, const int im_size, + const int channel_axis, const int im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_im, int data_off) { @@ -268,8 +270,8 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, CL_KERNEL_SELECT("col2im_nd")); viennacl::ocl::enqueue( - kernel(im_size, num_spatial_axes, channel_axis_, - WrapHandle(data_col, ctx), data_col_off, WrapHandle(im_shape, ctx), + kernel(im_size, num_spatial_axes, channel_axis, WrapHandle(data_col, ctx), + data_col_off, WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), WrapHandle(stride, ctx), WrapHandle(data_im, ctx), data_off), diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index bb9ba6c541b..249a9b35271 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -16,7 +16,6 @@ using std::max; template void PoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - PoolingParameter pool_param = this->layer_param_.pooling_param(); // Set the max number of top blobs before calling base Layer::SetUp. @@ -64,7 +63,8 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, for (int i = 0; i < num_spatial_axes_; ++i) { kernel_shape_data[i] = pool_param.kernel_size( (num_kernel_dims == 1) ? 0 : i); - CHECK_GT(kernel_shape_data[i], 0)<< "Filter dimensions must be nonzero."; + CHECK_GT(kernel_shape_data[i], 0) + << "Filter dimensions must be nonzero."; } } } @@ -233,7 +233,6 @@ void PoolingLayer::Reshape(const vector*>& bottom, template void PoolingLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; int kernel_w_ = kernel_shape_.cpu_data()[1]; int stride_h_ = stride_.cpu_data()[0]; @@ -348,7 +347,6 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, template void PoolingLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; int kernel_w_ = kernel_shape_.cpu_data()[1]; int stride_h_ = stride_.cpu_data()[0]; diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index d0003f6c846..d6079164519 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -685,8 +685,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - if(num_spatial_axes_ == 2) { - + if (num_spatial_axes_ == 2) { int kernel_h_ = kernel_shape_.cpu_data()[0]; int kernel_w_ = kernel_shape_.cpu_data()[1]; int stride_h_ = stride_.cpu_data()[0]; @@ -703,7 +702,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; // 2D case - if(use_skernel_) { + if (use_skernel_) { // 2D-SK case switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: @@ -844,7 +843,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); - if(num_spatial_axes_ == 2) { + if (num_spatial_axes_ == 2) { int kernel_h_ = kernel_shape_.cpu_data()[0]; int kernel_w_ = kernel_shape_.cpu_data()[1]; int stride_h_ = stride_.cpu_data()[0]; @@ -861,7 +860,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; // 2D case - if(use_skernel_) { + if (use_skernel_) { // 2D-SK case switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { @@ -951,7 +950,8 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( CL_KERNEL_SELECT("max_pool_forward")); viennacl::ocl::enqueue( - oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), + oclk_max_pool_forward(count, + WrapHandle((cl_mem) bottom_data, &ctx), bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, @@ -1067,8 +1067,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, #ifdef USE_CUDA caffe_gpu_set(count, Dtype(0.), bottom_diff); - if(num_spatial_axes_ == 2) { - + if (num_spatial_axes_ == 2) { int kernel_h_ = kernel_shape_.cpu_data()[0]; int kernel_w_ = kernel_shape_.cpu_data()[1]; int stride_h_ = stride_.cpu_data()[0]; @@ -1084,7 +1083,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; - if(use_skernel_) { + if (use_skernel_) { switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { @@ -1180,8 +1179,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, greentea_gpu_set(this->device_context_->id(), count, Dtype(0.), (cl_mem) bottom_diff, 0); - if(num_spatial_axes_ == 2) { - + if (num_spatial_axes_ == 2) { int kernel_h_ = kernel_shape_.cpu_data()[0]; int kernel_w_ = kernel_shape_.cpu_data()[1]; int stride_h_ = stride_.cpu_data()[0]; @@ -1197,7 +1195,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; - if(use_skernel_) { + if (use_skernel_) { switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { @@ -1205,10 +1203,12 @@ void PoolingLayer::Backward_gpu(const vector*>& top, } else { mask = max_idx_.gpu_data(); } - viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( + viennacl::ocl::kernel &oclk_max_pool_backward = + program.get_kernel( CL_KERNEL_SELECT("max_pool_backward_sk")); viennacl::ocl::enqueue( - oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + oclk_max_pool_backward(count, + WrapHandle((cl_mem) top_diff, &ctx), mask == NULL ? 0 : 1, WrapHandle((cl_mem) mask, &ctx), WrapHandle((cl_mem) top_mask, &ctx), @@ -1233,10 +1233,12 @@ void PoolingLayer::Backward_gpu(const vector*>& top, } else { mask = max_idx_.gpu_data(); } - viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel( + viennacl::ocl::kernel &oclk_max_pool_backward = + program.get_kernel( CL_KERNEL_SELECT("max_pool_backward")); viennacl::ocl::enqueue( - oclk_max_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + oclk_max_pool_backward(count, + WrapHandle((cl_mem) top_diff, &ctx), mask == NULL ? 0 : 1, WrapHandle((cl_mem) mask, &ctx), WrapHandle((cl_mem) top_mask, &ctx), @@ -1249,10 +1251,12 @@ void PoolingLayer::Backward_gpu(const vector*>& top, } break; case PoolingParameter_PoolMethod_AVE: { - viennacl::ocl::kernel &oclk_ave_pool_backward = program.get_kernel( + viennacl::ocl::kernel &oclk_ave_pool_backward = + program.get_kernel( CL_KERNEL_SELECT("ave_pool_backward")); viennacl::ocl::enqueue( - oclk_ave_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), + oclk_ave_pool_backward(count, + WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, @@ -1262,12 +1266,14 @@ void PoolingLayer::Backward_gpu(const vector*>& top, } break; case PoolingParameter_PoolMethod_STOCHASTIC: { - viennacl::ocl::kernel &oclk_sto_pool_backward = program.get_kernel( + viennacl::ocl::kernel &oclk_sto_pool_backward = + program.get_kernel( CL_KERNEL_SELECT("sto_pool_backward")); viennacl::ocl::enqueue( oclk_sto_pool_backward( count, WrapHandle((cl_mem) (rand_idx_.gpu_data()), &ctx), - WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), channels_, + WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), + channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, WrapHandle((cl_mem) bottom_diff, &ctx)), @@ -1291,7 +1297,8 @@ void PoolingLayer::Backward_gpu(const vector*>& top, CL_KERNEL_SELECT("max_pool_backward_nd")); viennacl::ocl::enqueue( oclk_max_pool_backward( - count, num_spatial_axes_, WrapHandle((cl_mem) top_diff, &ctx), + count, num_spatial_axes_, + WrapHandle((cl_mem) top_diff, &ctx), mask == NULL ? 0 : 1, WrapHandle((cl_mem) mask, &ctx), WrapHandle((cl_mem) top_mask, &ctx), channels_, WrapHandle((cl_mem) (size_.gpu_data()), &ctx), @@ -1306,7 +1313,8 @@ void PoolingLayer::Backward_gpu(const vector*>& top, } break; default: - LOG(FATAL)<< "Unknown or unsupported pooling method in Backward_gpu()."; + LOG(FATAL) + << "Unknown or unsupported pooling method in Backward_gpu()."; } } #endif // USE_GREENTEA diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 58251bf06d8..e229d41ae65 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -509,7 +509,6 @@ void col2im_ndsk_gpu(const Dtype* data_col, const int num_spatial_axes, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* kstride, Dtype* data_im) { - col2im_ndsk_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, num_spatial_axes, data_col, im_shape, col_shape, @@ -699,7 +698,6 @@ void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, Dtype* data_im) { - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, num_spatial_axes, data_col, im_shape, col_shape, From eb4409135c685ff8071de2352c89fe92214802d2 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 3 Oct 2015 00:44:00 +0200 Subject: [PATCH 187/254] im2col_nd kernel fix in OpenCL --- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/im2col_nd.cl | 35 ++-- src/caffe/test/test_im2col_kernel.cu | 192 +++++++++++---------- 3 files changed, 118 insertions(+), 113 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index b3506ba17cf..8ece4de80d8 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -18,7 +18,7 @@ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = data_col_off;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape_ptr[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape_ptr[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_ndsk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT @@ -41,7 +41,7 @@ std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n im_shape += channel_axis;\n col_shape += channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = data_col_off;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape_ptr[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape_ptr[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT std::string im2col_ndsk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index bb2c93e6556..9bbb7046ee2 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -18,8 +18,8 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, int d_iter[6]; int i; - im_shape += channel_axis; - col_shape += channel_axis; + __global const int* im_shape_ptr = im_shape + channel_axis; + __global const int* col_shape_ptr = col_shape + channel_axis; for (int index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate @@ -27,19 +27,19 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, int channel_in = index; int channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { - d_temp[i] = channel_in % col_shape[i + 1]; - channel_in /= col_shape[i + 1]; + d_temp[i] = channel_in % col_shape_ptr[i + 1]; + channel_in /= col_shape_ptr[i + 1]; channel_out *= kernel_shape[i]; } channel_out *= channel_in; int data_col_inc = 1; for (i = 0; i < num_axes; ++i) { - channel_out *= col_shape[i + 1]; + channel_out *= col_shape_ptr[i + 1]; channel_out += d_temp[i]; d_temp[i] = d_temp[i] * stride[i] - pad[i]; - channel_in *= im_shape[i + 1]; + channel_in *= im_shape_ptr[i + 1]; channel_in += d_temp[i]; - data_col_inc *= col_shape[i + 1]; + data_col_inc *= col_shape_ptr[i + 1]; d_iter[i] = 0; } __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; @@ -49,7 +49,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, bool in_range = true; for (i = 0; i < num_axes; ++i) { const int d_iter_im = d_iter[i] + d_temp[i]; - in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1]; if (!in_range) { break; } @@ -57,7 +57,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, if (in_range) { int data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { - data_im_offset *= im_shape[i + 1]; + data_im_offset *= im_shape_ptr[i + 1]; data_im_offset += d_iter[i]; } *data_col_ptr = data_im_ptr[data_im_offset]; @@ -98,8 +98,9 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, int d_col_start[6]; int d_col_end[6]; - im_shape += channel_axis; - col_shape += channel_axis; + __global const int* im_shape_ptr = im_shape + channel_axis; + __global const int* col_shape_ptr = col_shape + channel_axis; + __global Dtype* data_col_ptr = data_col + data_col_off; for (int index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate @@ -107,8 +108,8 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, int channel_im = index; // Calculate d_im (image dimensions). for (int i = num_axes - 1; i >= 0; --i) { - d_im[i] = channel_im % im_shape[i + 1] + pad[i]; - channel_im /= im_shape[i + 1]; + d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i]; + channel_im /= im_shape_ptr[i + 1]; } // Calculate col start/end indices. bool done = false; @@ -116,7 +117,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape_ptr[i + 1]); if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. @@ -133,7 +134,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, bool incremented = true; do { // Compute the final offset. - int final_offset = data_col_off; + int final_offset = 0; int kernel_shape_prod = 1; for (int i = num_axes - 1; i >= 0; --i) { final_offset += (d_im[i] - d_col_iter[i] * stride[i]) @@ -142,10 +143,10 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, } final_offset += kernel_shape_prod * channel_im; for (int i = 0; i < num_axes; ++i) { - final_offset *= col_shape[i + 1]; + final_offset *= col_shape_ptr[i + 1]; final_offset += d_col_iter[i]; } - val += data_col[final_offset]; + val += data_col_ptr[final_offset]; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { const int d_max = d_col_end[i]; diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 16de869c0dd..0f5259f0c01 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -98,46 +98,31 @@ class Im2colKernelTest : public GPUDeviceTest { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); - TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); - - // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), - this->channels_, this->height_, this->width_, - this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, - this->stride_, this->stride_, - cpu_data + this->blob_top_cpu_->offset(n)); - } - - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { + if(Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); + TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); + + // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_cpu(this->blob_bottom_->cpu_data() - + this->blob_bottom_->offset(n), + im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, cpu_data + this->blob_top_cpu_->offset(n)); } - // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); @@ -145,25 +130,42 @@ TYPED_TEST(Im2colKernelTest, Test2D) { // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { - int grid_dim = default_grid_dim/grid_div; - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel - CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( - num_kernels, bottom_data + this->blob_bottom_->offset(n), - this->height_, this->width_, this->kernel_size_, this->kernel_size_, - this->pad_, this->pad_, this->stride_, this->stride_, - this->height_col_, this->width_col_, - top_data + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; + im2col_cpu(this->blob_bottom_->cpu_data() + + this->blob_bottom_->offset(n), + this->channels_, this->height_, this->width_, + this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, + this->stride_, this->stride_, + cpu_data + this->blob_top_cpu_->offset(n)); } - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = cpu_data[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; + + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + int grid_dim = default_grid_dim/grid_div; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel + CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( + num_kernels, bottom_data + this->blob_bottom_->offset(n), + this->height_, this->width_, this->kernel_size_, this->kernel_size_, + this->pad_, this->pad_, this->stride_, this->stride_, + this->height_col_, this->width_col_, + top_data + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } + + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = cpu_data[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; + } } } } @@ -171,55 +173,57 @@ TYPED_TEST(Im2colKernelTest, Test2D) { } TYPED_TEST(Im2colKernelTest, TestND) { - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - this->blob_top_cpu_->ReshapeLike(*this->blob_top_); - - const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); - TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); - - // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, - this->blob_bottom_->shape().data() + 1, - this->blob_top_cpu_->shape().data() + 1, - this->blob_kernel_shape_->cpu_data(), - this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), - top_data_cpu + this->blob_top_cpu_->offset(n)); - } + if(Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); + this->blob_top_cpu_->ReshapeLike(*this->blob_top_); - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { + const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); + TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); + + // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { - const int grid_dim = default_grid_dim / grid_div; - TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_nd_gpu_kernel - CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS) ( - num_kernels, 2, bottom_data_gpu + this->blob_bottom_->offset(n), - this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, - this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), - this->blob_stride_->gpu_data(), - top_data_gpu + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; + im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, + this->blob_bottom_->shape().data() + 1, + this->blob_top_cpu_->shape().data() + 1, + this->blob_kernel_shape_->cpu_data(), + this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), + top_data_cpu + this->blob_top_cpu_->offset(n)); } - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = top_data_cpu[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + const int grid_dim = default_grid_dim / grid_div; + TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel + CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS) ( + num_kernels, 2, bottom_data_gpu + this->blob_bottom_->offset(n), + this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, + this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), + this->blob_stride_->gpu_data(), + top_data_gpu + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } + + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = top_data_cpu[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; + } } } } From f9f872073e34488b46beb6cbb967c0f99974b0ad Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 4 Oct 2015 15:01:24 +0200 Subject: [PATCH 188/254] CUDA im2col fix. --- include/caffe/vision_layers.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 84d675f567d..a51b1a9a1a2 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -298,7 +298,7 @@ class BaseConvolutionLayer : public Layer { kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], pad_.cpu_data()[1], stride_.cpu_data()[0], stride_.cpu_data()[1], - kstride_.gpu_data()[0], kstride_.gpu_data()[1], data); + kstride_.cpu_data()[0], kstride_.cpu_data()[1], data); } else { col2im_gpu(col_buff, conv_in_channels_, conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], @@ -399,8 +399,8 @@ class BaseConvolutionLayer : public Layer { pad_.cpu_data()[0], pad_.cpu_data()[1], stride_.cpu_data()[0], stride_.cpu_data()[1], - kstride_.gpu_data()[0], - kstride_.gpu_data()[1], (cl_mem) data, + kstride_.cpu_data()[0], + kstride_.cpu_data()[1], (cl_mem) data, data_off); } else { greentea_col2im_gpu(&program, &ctx, (cl_mem) col_buff, From 61a05f312be151981e695250db7683c9fde7d35c Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 11 Oct 2015 03:37:09 +0200 Subject: [PATCH 189/254] Python update, Lint fixes. --- .gitignore | 3 + include/caffe/data_layers.hpp | 15 ++-- include/caffe/dev_ptr.hpp | 1 - include/caffe/solver.hpp | 5 ++ include/caffe/vision_layers.hpp | 3 +- python/caffe/_caffe.cpp | 31 ++++---- src/caffe/cuda/cuda_dev_ptr.cpp | 1 - src/caffe/layers/memory_data_layer.cpp | 105 +++++++++++++++---------- src/caffe/opencl/ocl_dev_ptr.cpp | 1 - src/caffe/proto/caffe.proto | 4 + src/caffe/test/test_im2col_kernel.cu | 10 ++- 11 files changed, 111 insertions(+), 68 deletions(-) diff --git a/.gitignore b/.gitignore index c8af6d0b452..8e20be3bbbf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,8 @@ ## General +# Fuse files +*.fuse* + # Compiled Object files *.slo *.lo diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 90fd0d19917..678b1cb79d3 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -284,16 +284,21 @@ class MemoryDataLayer : public BaseDataLayer { void Reset(Dtype* data, Dtype* label, int n); void set_batch_size(int new_size); - int batch_size() { return batch_size_; } - int channels() { return channels_; } - int height() { return height_; } - int width() { return width_; } + vector shape() { return shape_; } + vector label_shape() { return label_shape_; } + int batch_size() { return shape_[0]; } + int channels() { return shape_[1]; } + int height() { return shape_[2]; } + int width() { return shape_[3]; } protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); - int batch_size_, channels_, height_, width_, size_; + vector shape_; + vector label_shape_; + int size_; + Dtype* data_; Dtype* labels_; int n_; diff --git a/include/caffe/dev_ptr.hpp b/include/caffe/dev_ptr.hpp index f89c029a686..9134aa20dbb 100644 --- a/include/caffe/dev_ptr.hpp +++ b/include/caffe/dev_ptr.hpp @@ -16,7 +16,6 @@ namespace caffe { * */ template class dev_ptr { public: - virtual Type* get() = 0; virtual std::ptrdiff_t off() = 0; diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 28756c9a6ca..9741d660d03 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -73,6 +73,11 @@ class Solver { int iter() { return iter_; } + + int max_iter() { + return param_.max_iter(); + } + virtual void SnapshotSolverState(const string& model_filename) = 0; diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index a51b1a9a1a2..34f59f9012e 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -323,7 +323,8 @@ class BaseConvolutionLayer : public Layer { #endif // USE_CUDA #ifdef USE_GREENTEA inline void greentea_conv_im2col_gpu(const Dtype* data, const int data_off, - Dtype* col_buff, const int col_buff_off) { + Dtype* col_buff, + const int col_buff_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index ccd5776ac40..89635523605 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -50,24 +50,25 @@ static void CheckFile(const string& filename) { } void CheckContiguousArray(PyArrayObject* arr, string name, - int channels, int height, int width) { + vector shape) { if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) { throw std::runtime_error(name + " must be C contiguous"); } + // This must not hold anymore + /* if (PyArray_NDIM(arr) != 4) { throw std::runtime_error(name + " must be 4-d"); } + */ if (PyArray_TYPE(arr) != NPY_FLOAT32) { throw std::runtime_error(name + " must be float32"); } - if (PyArray_DIMS(arr)[1] != channels) { - throw std::runtime_error(name + " has wrong number of channels"); - } - if (PyArray_DIMS(arr)[2] != height) { - throw std::runtime_error(name + " has wrong height"); - } - if (PyArray_DIMS(arr)[3] != width) { - throw std::runtime_error(name + " has wrong width"); + for (int i = 1; i < PyArray_NDIM(arr); ++i) { + if (PyArray_DIMS(arr)[i] != shape[i]) { + throw std::runtime_error( + "Shape dimension " + i + " has wrong size (" + PyArray_DIMS(arr)[i] + + " vs. " + shape[i] + ")"); + } } } @@ -99,11 +100,11 @@ void Net_Save(const Net& net, string filename) { WriteProtoToBinaryFile(net_param, filename.c_str()); } -void Net_SetInputArrays(Net* net, bp::object data_obj, +void Net_SetInputArrays(Net* net, int index, bp::object data_obj, bp::object labels_obj) { // check that this network has an input MemoryDataLayer shared_ptr > md_layer = - boost::dynamic_pointer_cast >(net->layers()[0]); + boost::dynamic_pointer_cast >(net->layers()[index]); if (!md_layer) { throw std::runtime_error("set_input_arrays may only be called if the" " first layer is a MemoryDataLayer"); @@ -114,9 +115,8 @@ void Net_SetInputArrays(Net* net, bp::object data_obj, reinterpret_cast(data_obj.ptr()); PyArrayObject* labels_arr = reinterpret_cast(labels_obj.ptr()); - CheckContiguousArray(data_arr, "data array", md_layer->channels(), - md_layer->height(), md_layer->width()); - CheckContiguousArray(labels_arr, "labels array", 1, 1, 1); + CheckContiguousArray(data_arr, "data array", md_layer->shape()); + CheckContiguousArray(labels_arr, "labels array", md_layer->label_shape()); if (PyArray_DIMS(data_arr)[0] != PyArray_DIMS(labels_arr)[0]) { throw std::runtime_error("data and labels must have the same first" " dimension"); @@ -244,7 +244,7 @@ BOOST_PYTHON_MODULE(_caffe) { bp::make_function(&Net::output_blob_indices, bp::return_value_policy())) .def("_set_input_arrays", &Net_SetInputArrays, - bp::with_custodian_and_ward<1, 2, bp::with_custodian_and_ward<1, 3> >()) + bp::with_custodian_and_ward<1, 3, bp::with_custodian_and_ward<1, 4> >()) .def("save", &Net_Save); bp::class_, shared_ptr >, boost::noncopyable>( @@ -280,6 +280,7 @@ BOOST_PYTHON_MODULE(_caffe) { bp::class_, shared_ptr >, boost::noncopyable>( "Solver", bp::no_init) .add_property("net", &Solver::net) + .add_property("max_iter", &Solver::max_iter) .add_property("test_nets", bp::make_function(&Solver::test_nets, bp::return_internal_reference<>())) .add_property("iter", &Solver::iter) diff --git a/src/caffe/cuda/cuda_dev_ptr.cpp b/src/caffe/cuda/cuda_dev_ptr.cpp index 25c2bf36cd3..1b92d2a9888 100644 --- a/src/caffe/cuda/cuda_dev_ptr.cpp +++ b/src/caffe/cuda/cuda_dev_ptr.cpp @@ -7,7 +7,6 @@ namespace caffe { template cuda_dev_ptr::cuda_dev_ptr(Type* ptr) : raw_ptr_(ptr) { - } template diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index 2370aa04d3b..b6cb0752fb5 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -10,37 +10,56 @@ namespace caffe { -template +template void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, - const vector*>& top) { - batch_size_ = this->layer_param_.memory_data_param().batch_size(); - channels_ = this->layer_param_.memory_data_param().channels(); - height_ = this->layer_param_.memory_data_param().height(); - width_ = this->layer_param_.memory_data_param().width(); - size_ = channels_ * height_ * width_; - CHECK_GT(batch_size_ * size_, 0) << - "batch_size, channels, height, and width must be specified and" - " positive in memory_data_param"; - vector label_shape(1, batch_size_); - top[0]->Reshape(batch_size_, channels_, height_, width_); - top[1]->Reshape(label_shape); - added_data_.Reshape(batch_size_, channels_, height_, width_); - added_label_.Reshape(label_shape); + const vector*>& top) { + MemoryDataParameter mem_param = this->layer_param_.memory_data_param(); + + // Old 4D parameters + if (mem_param.has_batch_size() && mem_param.has_channels() + && mem_param.has_height() && mem_param.has_width()) { + shape_.clear(); + shape_.push_back(mem_param.batch_size()); + shape_.push_back(mem_param.channels()); + shape_.push_back(mem_param.height()); + shape_.push_back(mem_param.width()); + } + + // New ND parameters + if (mem_param.dim_size() > 0) { + shape_.clear(); + for (int i = 1; i < mem_param.dim_size(); ++i) { + shape_.push_back(mem_param.dim(i)); + } + } + + // Labels have shape batch_size, 1, 1, ..., 1 + label_shape_.push_back(shape_[0]); + // All sizes except the batch index + for (int i = 1; i < shape_.size(); ++i) { + size_ *= shape_[i]; + label_shape_.push_back(1); + } + + top[0]->Reshape(shape_); + top[1]->Reshape(label_shape_); + added_data_.Reshape(shape_); + added_label_.Reshape(label_shape_); data_ = NULL; labels_ = NULL; added_data_.cpu_data(); added_label_.cpu_data(); } -template +template void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { CHECK(!has_new_data_) << - "Can't add data until current data has been consumed."; + "Can't add data until current data has been consumed."; size_t num = datum_vector.size(); - CHECK_GT(num, 0) << "There is no datum to add."; - CHECK_EQ(num % batch_size_, 0) << - "The added data must be a multiple of the batch size."; - added_data_.Reshape(num, channels_, height_, width_); + CHECK_GT(num, 0)<< "There is no datum to add."; + CHECK_EQ(num % shape_[0], 0)<< + "The added data must be a multiple of the batch size."; + added_data_.Reshape(shape_); added_label_.Reshape(num, 1, 1, 1); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(datum_vector, &added_data_); @@ -61,12 +80,16 @@ void MemoryDataLayer::AddMatVector(const vector& mat_vector, const vector& labels) { size_t num = mat_vector.size(); CHECK(!has_new_data_) << - "Can't add mat until current data has been consumed."; + "Can't add mat until current data has been consumed."; CHECK_GT(num, 0) << "There is no mat to add"; - CHECK_EQ(num % batch_size_, 0) << - "The added data must be a multiple of the batch size."; - added_data_.Reshape(num, channels_, height_, width_); - added_label_.Reshape(num, 1, 1, 1); + CHECK_EQ(num % shape_[0], 0) << + "The added data must be a multiple of the batch size."; + vector added_shape = shape_; + added_shape[0] = num; + added_data_.Reshape(added_shape); + vector added_label_shape = label_shape_; + added_label_shape[0] = num; + added_label_.Reshape(label_shape_); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(mat_vector, &added_data_); // Copy Labels @@ -81,15 +104,15 @@ void MemoryDataLayer::AddMatVector(const vector& mat_vector, } #endif // USE_OPENCV -template +template void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { CHECK(data); CHECK(labels); - CHECK_EQ(n % batch_size_, 0) << "n must be a multiple of batch size"; + CHECK_EQ(n % shape_[0], 0)<< "n must be a multiple of batch size"; // Warn with transformation parameters since a memory array is meant to // be generic and no transformations are done with Reset(). if (this->layer_param_.has_transform_param()) { - LOG(WARNING) << this->type() << " does not transform array data on Reset()"; + LOG(WARNING)<< this->type() << " does not transform array data on Reset()"; } data_ = data; labels_ = labels; @@ -97,26 +120,28 @@ void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { pos_ = 0; } -template +template void MemoryDataLayer::set_batch_size(int new_size) { CHECK(!has_new_data_) << - "Can't change batch_size until current data has been consumed."; - batch_size_ = new_size; - added_data_.Reshape(batch_size_, channels_, height_, width_); - added_label_.Reshape(batch_size_, 1, 1, 1); + "Can't change batch_size until current data has been consumed."; + shape_[0] = new_size; + label_shape_[0] = new_size; + added_data_.Reshape(shape_); + added_label_.Reshape(label_shape_); } -template +template void MemoryDataLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset"; - top[0]->Reshape(batch_size_, channels_, height_, width_); - top[1]->Reshape(batch_size_, 1, 1, 1); + top[0]->Reshape(shape_); + top[1]->Reshape(label_shape_); top[0]->set_cpu_data(data_ + pos_ * size_); top[1]->set_cpu_data(labels_ + pos_); - pos_ = (pos_ + batch_size_) % n_; - if (pos_ == 0) + pos_ = (pos_ + shape_[0]) % n_; + if (pos_ == 0) { has_new_data_ = false; + } } INSTANTIATE_CLASS(MemoryDataLayer); diff --git a/src/caffe/opencl/ocl_dev_ptr.cpp b/src/caffe/opencl/ocl_dev_ptr.cpp index 5a5f0cf5957..a9965366adf 100644 --- a/src/caffe/opencl/ocl_dev_ptr.cpp +++ b/src/caffe/opencl/ocl_dev_ptr.cpp @@ -7,7 +7,6 @@ namespace caffe { template ocl_dev_ptr::ocl_dev_ptr(cl_mem ocl_mem) : ocl_mem_(ocl_mem) { - } template diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 4233cf34547..94f9df9effa 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -745,6 +745,10 @@ message MemoryDataParameter { optional uint32 channels = 2; optional uint32 height = 3; optional uint32 width = 4; + // Dim works in the following order (examples): + // batch_size, channels, height, width + // batch_size, channels, Z, Y, X + repeated uint32 dim = 5; } message MVNParameter { diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 0f5259f0c01..fd6264d6d6e 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -98,7 +98,7 @@ class Im2colKernelTest : public GPUDeviceTest { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { - if(Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, @@ -173,7 +173,7 @@ TYPED_TEST(Im2colKernelTest, Test2D) { } TYPED_TEST(Im2colKernelTest, TestND) { - if(Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, @@ -209,8 +209,10 @@ TYPED_TEST(Im2colKernelTest, TestND) { im2col_nd_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS) ( num_kernels, 2, bottom_data_gpu + this->blob_bottom_->offset(n), - this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, - this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), + this->blob_bottom_->gpu_shape() + 1, + this->blob_top_->gpu_shape() + 1, + this->blob_kernel_shape_->gpu_data(), + this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; From faf003ab576f15cdce5b8c53a5db2d0b7c903385 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 11 Oct 2015 04:14:07 +0200 Subject: [PATCH 190/254] Pycaffe check update. --- python/caffe/_caffe.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 89635523605..38ad54a120e 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -54,7 +54,7 @@ void CheckContiguousArray(PyArrayObject* arr, string name, if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) { throw std::runtime_error(name + " must be C contiguous"); } - // This must not hold anymore + // This does not have to hold anymore /* if (PyArray_NDIM(arr) != 4) { throw std::runtime_error(name + " must be 4-d"); @@ -66,8 +66,9 @@ void CheckContiguousArray(PyArrayObject* arr, string name, for (int i = 1; i < PyArray_NDIM(arr); ++i) { if (PyArray_DIMS(arr)[i] != shape[i]) { throw std::runtime_error( - "Shape dimension " + i + " has wrong size (" + PyArray_DIMS(arr)[i] - + " vs. " + shape[i] + ")"); + "Shape dimension " + std::to_string(i) + " has wrong size (" + + std::to_string(static_cast(PyArray_DIMS(arr)[i])) + " vs. " + + std::to_string(shape[i]) + ")"); } } } From d62911cda1e12684c5bdddc854c7c34990a2560e Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 12 Oct 2015 03:24:21 +0200 Subject: [PATCH 191/254] LINT fix. --- src/caffe/greentea/cl_kernels.cpp | 4 ++-- src/caffe/greentea/cl_kernels/mergecrop.cl | 8 ++++---- src/caffe/layers/mergecrop_layer.cu | 17 ++++++++--------- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 5a9297bc546..58715c814ab 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -23,7 +23,7 @@ std::string im2col_ndsk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.c std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads,\n const int dims,\n __global const Dtype* bottom_a,\n const int forward_a,\n __global const Dtype* bottom_b,\n const int forward_b,\n __global Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6]; // NOLINT(runtime/arrays)\n int tmp_idx[6]; // NOLINT(runtime/arrays)\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n const int dims,\n __global Dtype* bottom_a,\n const int backward_a,\n __global Dtype* bottom_b,\n const int backward_b,\n __global const Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6]; // NOLINT(runtime/arrays)\n int tmp_idx[6]; // NOLINT(runtime/arrays)\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT +std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads,\n const int dims,\n __global const Dtype* bottom_a,\n const int forward_a,\n __global const Dtype* bottom_b,\n const int forward_b,\n __global Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n const int dims,\n __global Dtype* bottom_a,\n const int backward_a,\n __global Dtype* bottom_b,\n const int backward_b,\n __global const Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT @@ -46,7 +46,7 @@ std::string im2col_ndsk_double = "#ifndef __OPENCL_VERSION__\n#include \"header. std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads,\n const int dims,\n __global const Dtype* bottom_a,\n const int forward_a,\n __global const Dtype* bottom_b,\n const int forward_b,\n __global Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6]; // NOLINT(runtime/arrays)\n int tmp_idx[6]; // NOLINT(runtime/arrays)\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n const int dims,\n __global Dtype* bottom_a,\n const int backward_a,\n __global Dtype* bottom_b,\n const int backward_b,\n __global const Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6]; // NOLINT(runtime/arrays)\n int tmp_idx[6]; // NOLINT(runtime/arrays)\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT +std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads,\n const int dims,\n __global const Dtype* bottom_a,\n const int forward_a,\n __global const Dtype* bottom_b,\n const int forward_b,\n __global Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n const int dims,\n __global Dtype* bottom_a,\n const int backward_a,\n __global Dtype* bottom_b,\n const int backward_b,\n __global const Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/mergecrop.cl b/src/caffe/greentea/cl_kernels/mergecrop.cl index e44f5802120..061560d5a92 100644 --- a/src/caffe/greentea/cl_kernels/mergecrop.cl +++ b/src/caffe/greentea/cl_kernels/mergecrop.cl @@ -14,8 +14,8 @@ __kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads, const int channels_b, __global const int* shape_a, __global const int* shape_b) { - int pad[6]; // NOLINT(runtime/arrays) - int tmp_idx[6]; // NOLINT(runtime/arrays) + int pad[6]; + int tmp_idx[6]; int size_a = 1; int size_b = 1; @@ -69,8 +69,8 @@ __kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, const int channels_b, __global const int* shape_a, __global const int* shape_b) { - int pad[6]; // NOLINT(runtime/arrays) - int tmp_idx[6]; // NOLINT(runtime/arrays) + int pad[6]; + int tmp_idx[6]; int size_a = 1; int size_b = 1; diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index 2a1f60d70a5..a208ce20993 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -135,8 +135,7 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, forward_[0], bottom_data_b, forward_[1], top_data, num, channels_a, channels_b, shape_a_.gpu_data(), shape_b_.gpu_data()); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -151,8 +150,8 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, oclk_copy_forward(count, spatial_dims, WrapHandle((cl_mem) bottom_data_a, &ctx), forward_[0], WrapHandle((cl_mem) bottom_data_b, &ctx), forward_[1], - WrapHandle((cl_mem) top_data, &ctx), - num, channels_a, channels_b, + WrapHandle((cl_mem) top_data, &ctx), num, channels_a, + channels_b, WrapHandle((cl_mem) (shape_a_.gpu_data()), &ctx), WrapHandle((cl_mem) (shape_b_.gpu_data()), &ctx)), ctx.get_queue()); @@ -196,8 +195,7 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, count, spatial_dims, bottom_diff_a, backward_[0], bottom_diff_b, backward_[1], top_diff, num, channels_a, channels_b, shape_a_.gpu_data(), shape_b_.gpu_data()); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -210,9 +208,10 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, CL_KERNEL_SELECT("merge_copy_backward")); viennacl::ocl::enqueue( oclk_copy_backward(count, spatial_dims, - WrapHandle((cl_mem) bottom_diff_a, &ctx), backward_[0], - WrapHandle((cl_mem) bottom_diff_b, &ctx), backward_[1], - WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle((cl_mem) bottom_diff_a, &ctx), + backward_[0], + WrapHandle((cl_mem) bottom_diff_b, &ctx), + backward_[1], WrapHandle((cl_mem) top_diff, &ctx), num, channels_a, channels_b, WrapHandle((cl_mem) (shape_a_.gpu_data()), &ctx), WrapHandle((cl_mem) (shape_b_.gpu_data()), &ctx)), From 977bd72675d4822d4eb68f36be2b56a0116c6844 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 15 Oct 2015 01:56:41 +0200 Subject: [PATCH 192/254] Reworked MALIS (flexibility/feature), added batch reindex layer for OpenCL. --- include/caffe/loss_layers.hpp | 7 +- src/caffe/greentea/cl_kernels.cpp | 4 + .../greentea/cl_kernels/batch_reindex.cl | 34 +++ src/caffe/layers/batch_reindex_layer.cu | 72 +++++- src/caffe/layers/malis_loss_layer.cpp | 210 +++++++++--------- 5 files changed, 215 insertions(+), 112 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/batch_reindex.cl diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index c357ca14495..c6e35525e2e 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -781,7 +781,9 @@ class MalisLossLayer : public LossLayer { const vector*>& top); virtual inline const char* type() const { return "MalisLoss"; } - virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return 3; } + virtual inline int MaxBottomBlobs() const { return 4; } virtual inline int ExactNumTopBlobs() const { return -1; } virtual inline int MinTopBlobs() const { return 1; } virtual inline int MaxTopBlobs() const { return 2; } @@ -801,11 +803,14 @@ class MalisLossLayer : public LossLayer { Dtype *classerr_out, Dtype *rand_index_out, Dtype margin, Dtype threshold); + int nedges_; int conn_num_dims_; std::vector conn_dims_; std::vector nhood_data_; std::vector nhood_dims_; + Blob affinity_pos_; + Blob affinity_neg_; Blob dloss_pos_; Blob dloss_neg_; }; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 58715c814ab..c28a5ec8ca7 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -9,6 +9,7 @@ std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __gl std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n int in_n = (int) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n out[index] = 0;\n int lower = (int) (begins[n]);\n int upper = lower + (int) (counts[n]);\n for (int i = lower; i < upper; ++i) {\n int in_n = (int) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT @@ -32,6 +33,7 @@ std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header. std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n const int tile_size, const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int n = index / tile_size / num_tiles / bottom_tile_axis;\n const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int tile_size,\n const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size) % bottom_tile_axis;\n const int n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n int in_n = (int) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n out[index] = 0;\n int lower = (int) (begins[n]);\n int upper = lower + (int) (counts[n]);\n for (int i = lower; i < upper; ++i) {\n int in_n = (int) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT @@ -60,6 +62,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << "#define TYPE TYPE_FLOAT" << "\n\n"; // NOLINT ss << activation_float << "\n\n"; // NOLINT ss << auxiliary_float << "\n\n"; // NOLINT + ss << batch_reindex_float << "\n\n"; // NOLINT ss << bnll_float << "\n\n"; // NOLINT ss << channel_float << "\n\n"; // NOLINT ss << concat_float << "\n\n"; // NOLINT @@ -88,6 +91,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << "#define TYPE TYPE_DOUBLE" << "\n\n"; // NOLINT ss << activation_double << "\n\n"; // NOLINT ss << auxiliary_double << "\n\n"; // NOLINT + ss << batch_reindex_double << "\n\n"; // NOLINT ss << bnll_double << "\n\n"; // NOLINT ss << channel_double << "\n\n"; // NOLINT ss << concat_double << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/batch_reindex.cl b/src/caffe/greentea/cl_kernels/batch_reindex.cl new file mode 100644 index 00000000000..44733a65494 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/batch_reindex.cl @@ -0,0 +1,34 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(br_forward,Dtype)(const int count, const int inner_dim, + __global const Dtype* in, + __global const Dtype* permut, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / (inner_dim); + int in_n = (int) (permut[n]); + out[index] = in[in_n * (inner_dim) + index % (inner_dim)]; + } +} + +__kernel void TEMPLATE(br_backward,Dtype)(const int count, const int inner_dim, + __global const Dtype* in, + __global const Dtype* top_indexes, + __global const Dtype* begins, + __global const Dtype* counts, + __global Dtype* out) { + for (int index = get_global_id(0); index < count; + index += get_global_size(0)) { + int n = index / (inner_dim); + out[index] = 0; + int lower = (int) (begins[n]); + int upper = lower + (int) (counts[n]); + for (int i = lower; i < upper; ++i) { + int in_n = (int) (top_indexes[i]); + out[index] += in[in_n * (inner_dim) + index % (inner_dim)]; + } + } +} diff --git a/src/caffe/layers/batch_reindex_layer.cu b/src/caffe/layers/batch_reindex_layer.cu index c418cab9042..cb8436ff6c8 100644 --- a/src/caffe/layers/batch_reindex_layer.cu +++ b/src/caffe/layers/batch_reindex_layer.cu @@ -8,6 +8,7 @@ namespace caffe { +#ifdef USE_CUDA template __global__ void BRForward(const int count, const int inner_dim, const Dtype* in, const Dtype* permut, Dtype* out) { @@ -17,6 +18,7 @@ __global__ void BRForward(const int count, const int inner_dim, const Dtype* in, out[index] = in[in_n * (inner_dim) + index % (inner_dim)]; } } +#endif // USE_CUDA template void BatchReindexLayer::Forward_gpu(const vector*>& bottom, @@ -27,13 +29,36 @@ void BatchReindexLayer::Forward_gpu(const vector*>& bottom, return; } int threads = top[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - BRForward <<>>( - top[0]->count(), bottom[0]->count() / bottom[0]->shape(0), - bottom[0]->gpu_data(), bottom[1]->gpu_data(), top[0]->mutable_gpu_data()); - CUDA_POST_KERNEL_CHECK; + + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + BRForward CUDA_KERNEL(CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS) ( + top[0]->count(), bottom[0]->count() / bottom[0]->shape(0), + bottom[0]->gpu_data(), bottom[1]->gpu_data(), top[0]->mutable_gpu_data()); + CUDA_POST_KERNEL_CHECK; +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_->id()); + + viennacl::ocl::kernel &oclk_br = program.get_kernel( + CL_KERNEL_SELECT("br_forward")); + viennacl::ocl::enqueue( + oclk_br(top[0]->count(), bottom[0]->count() / bottom[0]->shape(0), + WrapHandle((cl_mem) (bottom[0]->gpu_data()), &ctx), + WrapHandle((cl_mem) (bottom[1]->gpu_data()), &ctx), + WrapHandle((cl_mem) (top[0]->mutable_gpu_data()), &ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + } +#ifdef USE_CUDA template __global__ void BRBackward(const int count, const int inner_dim, const Dtype* in, const Dtype* top_indexes, @@ -50,6 +75,7 @@ __global__ void BRBackward(const int count, const int inner_dim, } } } +#endif // USE_CUDA template void BatchReindexLayer::Backward_gpu( @@ -94,12 +120,36 @@ void BatchReindexLayer::Backward_gpu( } int threads = bottom[0]->count(); - // NOLINT_NEXT_LINE(whitespace/operators) - BRBackward <<>>( - bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0), - top[0]->gpu_diff(), top_indexes.gpu_data(), begins.gpu_data(), - counts.gpu_data(), bottom[0]->mutable_gpu_diff()); - CUDA_POST_KERNEL_CHECK; + + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // NOLINT_NEXT_LINE(whitespace/operators) + BRBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS) ( + bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0), + top[0]->gpu_diff(), top_indexes.gpu_data(), begins.gpu_data(), + counts.gpu_data(), bottom[0]->mutable_gpu_diff()); + CUDA_POST_KERNEL_CHECK; +#endif //USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( + this->device_->id()); + + viennacl::ocl::kernel &oclk_br = program.get_kernel( + CL_KERNEL_SELECT("br_backward")); + viennacl::ocl::enqueue( + oclk_br(bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0), + WrapHandle((cl_mem)(top[0]->gpu_diff()), &ctx), + WrapHandle((cl_mem)(top_indexes.gpu_data()), &ctx), + WrapHandle((cl_mem)(begins.gpu_data()), &ctx), + WrapHandle((cl_mem)(counts.gpu_data()), &ctx), + WrapHandle((cl_mem)(bottom[0]->mutable_gpu_diff()), &ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } + } INSTANTIATE_LAYER_GPU_FUNCS(BatchReindexLayer); diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 01240cefedb..aba3f2c05b8 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -77,7 +77,7 @@ void MalisLossLayer::Malis(const Dtype* conn_data, for (int64_t i = 0; i < nhood_dims[0]; ++i) { nHood[i] = 0; for (int64_t j = 0; j < nhood_dims[1]; ++j) { - nHood[i] += (int32_t) nhood_data[i + j * nhood_dims[0]] * prodDims[j]; + nHood[i] += (int32_t) nhood_data[j + i * nhood_dims[1]] * prodDims[j]; } } @@ -115,16 +115,20 @@ void MalisLossLayer::Malis(const Dtype* conn_data, * std::max((conn_dims[2] - 1), 1)); int64_t j = 0; // Loop over #edges - for (int64_t d = 0, i = 0; d < conn_dims[3]; ++d) { + for (int64_t d = 0, i = 0; d < conn_dims[0]; ++d) { // Loop over Z - for (int64_t z = 0; z < conn_dims[2]; ++z) { + for (int64_t z = 0; z < conn_dims[1]; ++z) { // Loop over Y - for (int64_t y = 0; y < conn_dims[1]; ++y) { + for (int64_t y = 0; y < conn_dims[2]; ++y) { // Loop over X - for (int64_t x = 0; x < conn_dims[0]; ++x, ++i) { - if (x < std::max(conn_dims[0] - 1, 1) && - y < std::max(conn_dims[1] - 1, 1) && - z < std::max(conn_dims[2] - 1, 1)) { + for (int64_t x = 0; x < conn_dims[3]; ++x, ++i) { + // Out-of-bounds check: + if (!((z + nhood_data[d * 3 + 0] < 0) + ||(z + nhood_data[d * 3 + 0] >= conn_dims[1]) + ||(y + nhood_data[d * 3 + 1] < 0) + ||(y + nhood_data[d * 3 + 1] >= conn_dims[2]) + ||(x + nhood_data[d * 3 + 2] < 0) + ||(x + nhood_data[d * 3 + 2] >= conn_dims[3]))) { pqueue[j++] = i; } } @@ -241,6 +245,16 @@ void MalisLossLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); + // Expected inputs: + // Required (bottom 0 to 2): + // Bottom 0: Predicted affinity, shaped (batch size, #edges, (Z), (Y), X) + // Bottom 1: Segmented ground truth, shaped (batch size, 1, (Z), (Y), X) + // Bottom 2: Ground truth affinity, shaped (batch size, #edges, (Z), (Y), X) + + // Optional (bottom 3): + // Bottom 3: Edge connectivity, size #edges * 3, shaped (Z,Y,X);(Z,Y,X);... + // (this means pairs of 3 per edge) + #ifdef CAFFE_MALIS_DEBUG cv::namedWindow("labelled"); cv::namedWindow("test"); @@ -256,32 +270,40 @@ void MalisLossLayer::Reshape(const vector*>& bottom, top[1]->ReshapeLike(*bottom[0]); } + // Up to 5 dimensional; supported modes: + // batch, channels (edges), Z, Y, X => 3D affinity + // batch, channels (edges), Y, X => 2D affinity + // batch, channels (edges), X => 1D affinity + vector shape = bottom[0]->shape(); + conn_dims_.clear(); nhood_dims_.clear(); - nhood_data_.clear(); + // #edges, Z, Y, X specification (4 dimensions) conn_num_dims_ = 4; - conn_dims_.push_back(bottom[0]->width()); // X-axis - conn_dims_.push_back(bottom[0]->height()); // Y-axis - conn_dims_.push_back(1); // Z-axis - conn_dims_.push_back(2); // #edges - - nhood_dims_.push_back(2); // #edges - nhood_dims_.push_back(3); // 3 dimensional - - nhood_data_.push_back(1); // Edge 1, X - nhood_data_.push_back(0); // Edge 2, X - nhood_data_.push_back(0); // Edge 1, Y - nhood_data_.push_back(1); // Edge 2, Y - - nhood_data_.push_back(0); // Edge 1, Z - nhood_data_.push_back(0); // Edge 2, Z - - dloss_pos_.Reshape( - 1, 2, bottom[0]->height(), bottom[0]->width()); - dloss_neg_.Reshape( - 1, 2, bottom[0]->height(), bottom[0]->width()); + // Channel axis equals number of edges + nedges_ = shape[1]; + + // #edges + conn_dims_.push_back(nedges_); + // Z-axis + conn_dims_.push_back(shape.size() >= 5 ? shape[shape.size() - 3] : 1); + // Y-axis + conn_dims_.push_back(shape.size() >= 4 ? shape[shape.size() - 2] : 1); + // X-axis + conn_dims_.push_back(shape.size() >= 3 ? shape[shape.size() - 1] : 1); + + // #edges + nhood_dims_.push_back(nedges_); + // 3 dimensional (always, to simplify things; + // can just set unused spatials to 0) + nhood_dims_.push_back(3); + + affinity_pos_.Reshape(shape); + affinity_neg_.Reshape(shape); + dloss_pos_.Reshape(shape); + dloss_neg_.Reshape(shape); } template @@ -337,82 +359,80 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, } #endif - int inner_num = bottom[0]->width() * bottom[0]->height(); + // Set up the neighborhood + nhood_data_.clear(); + if (bottom.size() == 4) { + // Custom edges + for (int i = 0; i < nedges_; ++i) { + // Z edge direction + nhood_data_.push_back(bottom[3]->cpu_data()[i * 3 + 0]); + // Y edge direction + nhood_data_.push_back(bottom[3]->cpu_data()[i * 3 + 1]); + // X edge direction + nhood_data_.push_back(bottom[3]->cpu_data()[i * 3 + 2]); + } + } else { + // Dimension primary edges (+Z, +Y, +X) only: + // 1 edge: +X (0,0,1) + // 2 edges: +Y, +X (0,1,0); (0,0,1) + // 3 edges: +Z, +Y, +X (1,0,0); (0,1,0); (0,0,1) + for (int i = 3 - nedges_; i < 3; ++i) { + nhood_data_.push_back((i + 0) % 3 == 0 ? 1 : 0); + nhood_data_.push_back((i + 1) % 3 == 0 ? 1 : 0); + nhood_data_.push_back((i + 2) % 3 == 0 ? 1 : 0); + } + } // Predicted affinity - const Dtype* affinity_prob_x = bottom[0]->cpu_data(); - const Dtype* affinity_prob_y = bottom[0]->cpu_data() + inner_num; + const Dtype* affinity_prob = bottom[0]->cpu_data(); // Effective affinity - const Dtype* affinity_x = bottom[1]->cpu_data(); - const Dtype* affinity_y = bottom[1]->cpu_data() + inner_num; - -#ifdef CAFFE_MALIS_DEBUG - {Dtype* prob_rd = bottom[0]->mutable_cpu_data(); - cv::Mat wrapped(bottom[0]->height(), bottom[0]->width(), - cv::DataType::type, - prob_rd, sizeof(Dtype) * bottom[0]->width()); - cv::imshow("test", wrapped);} -#endif + const Dtype* affinity = bottom[1]->cpu_data(); - // Connection data - std::vector conn_data_pos( - 2 * bottom[0]->height() * bottom[0]->width()); - std::vector conn_data_neg( - 2 * bottom[0]->height() * bottom[0]->width()); + Dtype* affinity_data_pos = affinity_pos_.mutable_cpu_data(); + Dtype* affinity_data_neg = affinity_neg_.mutable_cpu_data(); - // Construct positive and negative affinity graph #pragma omp parallel for - for (int i = 0; i < bottom[0]->height() - 1; ++i) { - for (int j = 0; j < bottom[0]->width() - 1; ++j) { - // X positive - conn_data_pos[i * bottom[0]->width() + j] = std::min( - affinity_prob_x[i * bottom[0]->width() + j], - affinity_x[i * bottom[0]->width() + j]); - - // X negative - conn_data_neg[i * bottom[0]->width() + j] = std::max( - affinity_prob_x[i * bottom[0]->width() + j], - affinity_x[i * bottom[0]->width() + j]); - - // Y positive - conn_data_pos[inner_num - + i * bottom[0]->width() + j] = std::min( - affinity_prob_y[i * bottom[0]->width() + j], - affinity_y[i * bottom[0]->width() + j]); - - // Y negative - conn_data_neg[inner_num - + i * bottom[0]->width() + j] = std::max( - affinity_prob_y[i * bottom[0]->width() + j], - affinity_y[i * bottom[0]->width() + j]); - } + for (int i = 0; i < bottom[0]->count(); ++i) { + affinity_data_pos[i] = std::min(affinity_prob[i], affinity[i]); + affinity_data_neg[i] = std::max(affinity_prob[i], affinity[i]); + } + + size_t batch_offset = 1; + for (int i = 1; i < bottom[0]->shape().size(); ++i) { + batch_offset *= bottom[0]->shape()[i]; } Dtype loss = 0; - Dtype loss_out = 0; - Dtype classerr_out = 0; - Dtype rand_index_out = 0; +#pragma omp parallel for reduction(+:loss) + for (int batch = 0; batch < bottom[0]->shape()[0]; ++batch) { + Dtype loss_out = 0; + Dtype classerr_out = 0; + Dtype rand_index_out = 0; - caffe_set(dloss_neg_.count(), Dtype(0.0), dloss_neg_.mutable_cpu_data()); - caffe_set(dloss_pos_.count(), Dtype(0.0), dloss_pos_.mutable_cpu_data()); + caffe_set(dloss_neg_.count(), Dtype(0.0), dloss_neg_.mutable_cpu_data()); + caffe_set(dloss_pos_.count(), Dtype(0.0), dloss_pos_.mutable_cpu_data()); - Malis(&conn_data_neg[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], - &nhood_dims_[0], bottom[2]->cpu_data(), - false, dloss_neg_.mutable_cpu_data(), - &loss_out, &classerr_out, &rand_index_out, 0.3, 0.5); + Malis(&affinity_data_neg[batch_offset * batch], conn_num_dims_, + &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], + bottom[2]->cpu_data() + batch_offset * batch, false, + dloss_neg_.mutable_cpu_data() + batch_offset * batch, &loss_out, + &classerr_out, &rand_index_out, 0.3, 0.5); - loss += loss_out; + loss += loss_out; - Malis(&conn_data_pos[0], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], - &nhood_dims_[0], bottom[2]->cpu_data(), - true, dloss_pos_.mutable_cpu_data(), - &loss_out, &classerr_out, &rand_index_out, 0.3, 0.5); + Malis(&affinity_data_pos[batch_offset * batch], conn_num_dims_, + &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], + bottom[2]->cpu_data() + batch_offset * batch, true, + dloss_pos_.mutable_cpu_data() + batch_offset * batch, &loss_out, + &classerr_out, &rand_index_out, 0.3, 0.5); - loss += loss_out; + loss += loss_out; + } - top[0]->mutable_cpu_data()[0] = loss; + // Normalized loss over batch size + top[0]->mutable_cpu_data()[0] = loss / ((Dtype)bottom[0]->shape()[0]); if (top.size() == 2) { top[1]->ShareData(*(bottom[0])); @@ -424,7 +444,6 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (propagate_down[0]) { - // Diff to propagate to (size w * h * c) Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* dloss_pos_data = dloss_pos_.cpu_data(); const Dtype* dloss_neg_data = dloss_neg_.cpu_data(); @@ -432,18 +451,9 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, // Clear the diff caffe_set(bottom[0]->count(), Dtype(0.0), bottom_diff); - int inner_num = bottom[0]->height() * bottom[0]->width(); - #pragma omp parallel for - for (int i = 0; i < bottom[0]->height(); ++i) { - for (int j = 0; j < bottom[0]->width(); ++j) { - bottom_diff[i * bottom[0]->width() + j] = - dloss_pos_data[i * bottom[0]->width() + j] + - dloss_neg_data[i * bottom[0]->width() + j]; - bottom_diff[inner_num + i * bottom[0]->width() + j] = - dloss_pos_data[inner_num + i * bottom[0]->width() + j] + - dloss_neg_data[inner_num + i * bottom[0]->width() + j]; - } + for (int i = 0; i < bottom[0]->count(); ++i) { + bottom_diff[i] = dloss_pos_data[i] + dloss_neg_data[i]; } } } From 202473d56a06dd6d89292af21631f5479f48ab5a Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 15 Oct 2015 02:32:50 +0200 Subject: [PATCH 193/254] Memory Data Layer fix. --- src/caffe/layers/memory_data_layer.cpp | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index c35ba47d5f1..7d32f566105 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -15,15 +15,12 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, const vector*>& top) { MemoryDataParameter mem_param = this->layer_param_.memory_data_param(); - // Old 4D parameters - if (mem_param.has_batch_size() && mem_param.has_channels() - && mem_param.has_height() && mem_param.has_width()) { - shape_.clear(); - shape_.push_back(mem_param.batch_size()); - shape_.push_back(mem_param.channels()); - shape_.push_back(mem_param.height()); - shape_.push_back(mem_param.width()); - } + // Old 4D (2D spatial) parameters + shape_.clear(); + shape_.push_back(mem_param.batch_size()); + shape_.push_back(mem_param.channels()); + shape_.push_back(mem_param.height()); + shape_.push_back(mem_param.width()); // New ND parameters if (mem_param.dim_size() > 0) { @@ -35,6 +32,7 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, // Labels have shape batch_size, 1, 1, ..., 1 label_shape_.push_back(shape_[0]); + size_ = 1; // All sizes except the batch index for (int i = 1; i < shape_.size(); ++i) { size_ *= shape_[i]; @@ -59,8 +57,12 @@ void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { CHECK_GT(num, 0)<< "There is no datum to add."; CHECK_EQ(num % shape_[0], 0)<< "The added data must be a multiple of the batch size."; - added_data_.Reshape(shape_); - added_label_.Reshape(num, 1, 1, 1); + vector added_shape = shape_; + added_shape[0] = num; + added_data_.Reshape(added_shape); + vector added_label_shape = label_shape_; + added_label_shape[0] = num; + added_label_.Reshape(added_label_shape); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(datum_vector, &added_data_); // Copy Labels @@ -89,7 +91,7 @@ void MemoryDataLayer::AddMatVector(const vector& mat_vector, added_data_.Reshape(added_shape); vector added_label_shape = label_shape_; added_label_shape[0] = num; - added_label_.Reshape(label_shape_); + added_label_.Reshape(added_label_shape); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(mat_vector, &added_data_); // Copy Labels From 288e98150fbde2e89979b6a32e89e74301fda07c Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 16 Oct 2015 03:26:10 +0200 Subject: [PATCH 194/254] LINT fix. --- src/caffe/layers/batch_reindex_layer.cu | 13 +++++++------ src/caffe/layers/malis_loss_layer.cpp | 3 ++- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/caffe/layers/batch_reindex_layer.cu b/src/caffe/layers/batch_reindex_layer.cu index cb8436ff6c8..9d7ead651bd 100644 --- a/src/caffe/layers/batch_reindex_layer.cu +++ b/src/caffe/layers/batch_reindex_layer.cu @@ -33,9 +33,11 @@ void BatchReindexLayer::Forward_gpu(const vector*>& bottom, if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - BRForward CUDA_KERNEL(CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS) ( + BRForward CUDA_KERNEL(CAFFE_GET_BLOCKS(threads), + CAFFE_CUDA_NUM_THREADS) ( top[0]->count(), bottom[0]->count() / bottom[0]->shape(0), - bottom[0]->gpu_data(), bottom[1]->gpu_data(), top[0]->mutable_gpu_data()); + bottom[0]->gpu_data(), bottom[1]->gpu_data(), + top[0]->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { @@ -55,7 +57,6 @@ void BatchReindexLayer::Forward_gpu(const vector*>& bottom, ctx.get_queue()); #endif // USE_GREENTEA } - } #ifdef USE_CUDA @@ -124,12 +125,13 @@ void BatchReindexLayer::Backward_gpu( if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) - BRBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS) ( + BRBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(threads), + CAFFE_CUDA_NUM_THREADS) ( bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0), top[0]->gpu_diff(), top_indexes.gpu_data(), begins.gpu_data(), counts.gpu_data(), bottom[0]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; -#endif //USE_CUDA +#endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( @@ -149,7 +151,6 @@ void BatchReindexLayer::Backward_gpu( ctx.get_queue()); #endif // USE_GREENTEA } - } INSTANTIATE_LAYER_GPU_FUNCS(BatchReindexLayer); diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index aba3f2c05b8..e9ce44bff42 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -432,7 +432,8 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, } // Normalized loss over batch size - top[0]->mutable_cpu_data()[0] = loss / ((Dtype)bottom[0]->shape()[0]); + top[0]->mutable_cpu_data()[0] = loss + / (static_cast(bottom[0]->shape()[0])); if (top.size() == 2) { top[1]->ShareData(*(bottom[0])); From dc1168107f6af6edba1a9cb2d0fb0c7a81d6b86d Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 21 Oct 2015 02:16:52 +0200 Subject: [PATCH 195/254] 64 bit indexing support throughout Caffe. --- examples/cifar10/convert_cifar_data.cpp | 24 +- .../cpp_classification/classification.cpp | 40 +- examples/mnist/convert_mnist_data.cpp | 7 +- .../siamese/convert_mnist_siamese_data.cpp | 10 +- include/caffe/blob.hpp | 75 +- include/caffe/caffe.hpp | 2 + include/caffe/common.hpp | 9 +- include/caffe/common_layers.hpp | 134 +- include/caffe/cuda/cuda_dev_ptr.hpp | 2 +- include/caffe/data_layers.hpp | 76 +- include/caffe/data_reader.hpp | 2 +- include/caffe/data_transformer.hpp | 10 +- include/caffe/definitions.hpp | 14 + include/caffe/dev_ptr.hpp | 5 +- include/caffe/device.hpp | 12 +- include/caffe/filler.hpp | 34 +- include/caffe/greentea/greentea_im2col.hpp | 68 +- .../greentea/greentea_math_functions.hpp | 142 +- include/caffe/internal_thread.hpp | 4 +- include/caffe/layer.hpp | 49 +- include/caffe/loss_layers.hpp | 70 +- include/caffe/net.hpp | 72 +- include/caffe/neuron_layers.hpp | 12 +- include/caffe/opencl/ocl_dev_ptr.hpp | 4 +- include/caffe/parallel.hpp | 4 +- include/caffe/sgd_solvers.hpp | 148 ++ include/caffe/solver.hpp | 186 +- include/caffe/solver_factory.hpp | 137 ++ include/caffe/syncedmem.hpp | 10 +- .../caffe/test/test_gradient_check_util.hpp | 60 +- include/caffe/util/blocking_queue.hpp | 2 +- include/caffe/util/cudnn.hpp | 20 +- include/caffe/util/db_lmdb.hpp | 4 +- include/caffe/util/device_alternate.hpp | 4 +- include/caffe/util/im2col.hpp | 93 +- include/caffe/util/insert_splits.hpp | 6 +- include/caffe/util/io.hpp | 26 +- include/caffe/util/math_functions.hpp | 133 +- include/caffe/util/mkl_alternate.hpp | 36 +- include/caffe/vision_layers.hpp | 226 +-- log.txt | 1 + matlab/+caffe/+test/test_io.m | 18 + matlab/+caffe/private/caffe_.cpp | 54 +- src/caffe/blob.cpp | 92 +- src/caffe/common.cpp | 22 +- src/caffe/cuda/cuda_dev_ptr.cpp | 2 +- src/caffe/data_reader.cpp | 10 +- src/caffe/data_transformer.cpp | 206 +- src/caffe/device.cpp | 12 +- src/caffe/greentea/cl_headers/header.cl | 8 + src/caffe/greentea/cl_kernels.cpp | 100 +- src/caffe/greentea/cl_kernels/activation.cl | 56 +- src/caffe/greentea/cl_kernels/auxiliary.cl | 4 +- .../greentea/cl_kernels/batch_reindex.cl | 22 +- src/caffe/greentea/cl_kernels/bnll.cl | 8 +- src/caffe/greentea/cl_kernels/channel.cl | 62 +- src/caffe/greentea/cl_kernels/concat.cl | 22 +- .../greentea/cl_kernels/contrastive_loss.cl | 10 +- src/caffe/greentea/cl_kernels/dropout.cl | 14 +- src/caffe/greentea/cl_kernels/eltwise.cl | 18 +- src/caffe/greentea/cl_kernels/embed.cl | 48 +- src/caffe/greentea/cl_kernels/fillbuffer.cl | 12 +- src/caffe/greentea/cl_kernels/im2col.cl | 76 +- src/caffe/greentea/cl_kernels/im2col_nd.cl | 100 +- src/caffe/greentea/cl_kernels/im2col_ndsk.cl | 100 +- src/caffe/greentea/cl_kernels/im2col_sk.cl | 108 +- src/caffe/greentea/cl_kernels/lrn.cl | 54 +- src/caffe/greentea/cl_kernels/math.cl | 94 +- src/caffe/greentea/cl_kernels/mergecrop.cl | 104 +- src/caffe/greentea/cl_kernels/pooling.cl | 294 +-- src/caffe/greentea/cl_kernels/pooling_nd.cl | 106 +- src/caffe/greentea/cl_kernels/pooling_sk.cl | 226 +-- src/caffe/greentea/cl_kernels/slice.cl | 22 +- src/caffe/greentea/cl_kernels/softmax_loss.cl | 38 +- src/caffe/greentea/cl_kernels/tile.cl | 36 +- src/caffe/greentea/greentea_im2col.cpp | 306 +-- .../greentea/greentea_math_functions.cpp | 572 +++--- src/caffe/internal_thread.cpp | 6 +- src/caffe/layers/absval_layer.cpp | 4 +- src/caffe/layers/absval_layer.cu | 4 +- src/caffe/layers/accuracy_layer.cpp | 30 +- src/caffe/layers/affinity_layer.cpp | 30 +- src/caffe/layers/argmax_layer.cpp | 16 +- src/caffe/layers/base_conv_layer.cpp | 106 +- src/caffe/layers/base_data_layer.cpp | 6 +- src/caffe/layers/batch_reindex_layer.cpp | 26 +- src/caffe/layers/batch_reindex_layer.cu | 32 +- src/caffe/layers/bnll_layer.cpp | 8 +- src/caffe/layers/bnll_layer.cu | 8 +- src/caffe/layers/concat_layer.cpp | 32 +- src/caffe/layers/concat_layer.cu | 44 +- .../layers/connected_component_layer.cpp | 26 +- src/caffe/layers/contrastive_loss_layer.cpp | 20 +- src/caffe/layers/contrastive_loss_layer.cu | 18 +- src/caffe/layers/conv_layer.cpp | 24 +- src/caffe/layers/conv_layer.cu | 12 +- src/caffe/layers/cudnn_conv_layer.cpp | 64 +- src/caffe/layers/cudnn_conv_layer.cu | 8 +- src/caffe/layers/cudnn_lcn_layer.cpp | 2 +- src/caffe/layers/cudnn_relu_layer.cpp | 8 +- src/caffe/layers/cudnn_sigmoid_layer.cpp | 8 +- src/caffe/layers/cudnn_softmax_layer.cpp | 8 +- src/caffe/layers/cudnn_tanh_layer.cpp | 8 +- src/caffe/layers/data_layer.cpp | 18 +- src/caffe/layers/deconv_layer.cpp | 24 +- src/caffe/layers/deconv_layer.cu | 10 +- src/caffe/layers/dropout_layer.cpp | 14 +- src/caffe/layers/dropout_layer.cu | 26 +- src/caffe/layers/dummy_data_layer.cpp | 24 +- src/caffe/layers/eltwise_layer.cpp | 30 +- src/caffe/layers/eltwise_layer.cu | 40 +- src/caffe/layers/embed_layer.cpp | 20 +- src/caffe/layers/embed_layer.cu | 28 +- src/caffe/layers/euclidean_loss_layer.cpp | 4 +- src/caffe/layers/euclidean_loss_layer.cu | 4 +- src/caffe/layers/exp_layer.cpp | 4 +- src/caffe/layers/exp_layer.cu | 4 +- src/caffe/layers/filter_layer.cpp | 44 +- src/caffe/layers/filter_layer.cu | 28 +- src/caffe/layers/flatten_layer.cpp | 12 +- src/caffe/layers/hdf5_data_layer.cpp | 34 +- src/caffe/layers/hdf5_data_layer.cu | 8 +- src/caffe/layers/hdf5_output_layer.cpp | 6 +- src/caffe/layers/hdf5_output_layer.cu | 6 +- src/caffe/layers/hinge_loss_layer.cpp | 24 +- src/caffe/layers/im2col_layer.cpp | 48 +- src/caffe/layers/im2col_layer.cu | 10 +- src/caffe/layers/image_data_layer.cpp | 34 +- src/caffe/layers/infogain_loss_layer.cpp | 24 +- src/caffe/layers/inner_product_layer.cpp | 16 +- src/caffe/layers/log_layer.cpp | 4 +- src/caffe/layers/log_layer.cu | 4 +- src/caffe/layers/loss_layer.cpp | 2 +- src/caffe/layers/lrn_layer.cpp | 18 +- src/caffe/layers/lrn_layer.cu | 54 +- src/caffe/layers/malis_loss_layer.cpp | 44 +- src/caffe/layers/memory_data_layer.cpp | 26 +- src/caffe/layers/mergecrop_layer.cpp | 14 +- src/caffe/layers/mergecrop_layer.cu | 112 +- .../multinomial_logistic_loss_layer.cpp | 16 +- src/caffe/layers/mvn_layer.cpp | 8 +- src/caffe/layers/mvn_layer.cu | 8 +- src/caffe/layers/pooling_layer.cpp | 214 +-- src/caffe/layers/pooling_layer.cu | 708 +++---- src/caffe/layers/power_layer.cpp | 4 +- src/caffe/layers/power_layer.cu | 4 +- src/caffe/layers/prelu_layer.cpp | 38 +- src/caffe/layers/prelu_layer.cu | 40 +- src/caffe/layers/reduction_layer.cpp | 8 +- src/caffe/layers/reduction_layer.cu | 18 +- src/caffe/layers/relu_layer.cpp | 8 +- src/caffe/layers/relu_layer.cu | 8 +- src/caffe/layers/reshape_layer.cpp | 42 +- .../sigmoid_cross_entropy_loss_layer.cpp | 10 +- .../sigmoid_cross_entropy_loss_layer.cu | 4 +- src/caffe/layers/sigmoid_layer.cpp | 8 +- src/caffe/layers/sigmoid_layer.cu | 8 +- src/caffe/layers/silence_layer.cpp | 2 +- src/caffe/layers/silence_layer.cu | 2 +- src/caffe/layers/slice_layer.cpp | 48 +- src/caffe/layers/slice_layer.cu | 40 +- src/caffe/layers/softmax_layer.cpp | 24 +- src/caffe/layers/softmax_layer.cu | 66 +- src/caffe/layers/softmax_loss_layer.cpp | 22 +- src/caffe/layers/softmax_loss_layer.cu | 64 +- src/caffe/layers/split_layer.cpp | 6 +- src/caffe/layers/split_layer.cu | 6 +- src/caffe/layers/spp_layer.cpp | 28 +- src/caffe/layers/tanh_layer.cpp | 8 +- src/caffe/layers/tanh_layer.cu | 8 +- src/caffe/layers/threshold_layer.cpp | 4 +- src/caffe/layers/threshold_layer.cu | 4 +- src/caffe/layers/tile_layer.cpp | 10 +- src/caffe/layers/tile_layer.cu | 36 +- src/caffe/layers/window_data_layer.cpp | 130 +- src/caffe/net.cpp | 196 +- src/caffe/opencl/ocl_dev_ptr.cpp | 2 +- src/caffe/parallel.cpp | 6 +- src/caffe/proto/caffe.proto | 253 ++- src/caffe/solver.cpp | 1185 +----------- src/caffe/solvers/adadelta_solver.cpp | 243 +++ src/caffe/solvers/adagrad_solver.cpp | 129 ++ src/caffe/solvers/adam_solver.cpp | 144 ++ src/caffe/solvers/nesterov_solver.cpp | 107 ++ src/caffe/solvers/rmsprop_solver.cpp | 126 ++ src/caffe/solvers/sgd_solver.cpp | 416 +++++ src/caffe/syncedmem.cpp | 4 +- src/caffe/test/test_accuracy_layer.cpp | 92 +- src/caffe/test/test_argmax_layer.cpp | 92 +- src/caffe/test/test_batch_reindex_layer.cpp | 30 +- src/caffe/test/test_blob.cpp | 6 +- src/caffe/test/test_concat_layer.cpp | 32 +- .../test/test_contrastive_loss_layer.cpp | 18 +- src/caffe/test/test_convolution_layer.cpp | 136 +- src/caffe/test/test_convolution_nd_layer.cpp | 24 +- src/caffe/test/test_data_layer.cpp | 86 +- src/caffe/test/test_data_transformer.cpp | 180 +- src/caffe/test/test_db.cpp | 2 +- src/caffe/test/test_deconvolution_layer.cpp | 26 +- src/caffe/test/test_dummy_data_layer.cpp | 34 +- src/caffe/test/test_eltwise_layer.cpp | 16 +- src/caffe/test/test_embed_layer.cpp | 38 +- src/caffe/test/test_filler.cpp | 32 +- src/caffe/test/test_filter_layer.cpp | 10 +- src/caffe/test/test_flatten_layer.cpp | 2 +- src/caffe/test/test_gradient_based_solver.cpp | 1663 ++++++++--------- src/caffe/test/test_hdf5_output_layer.cpp | 16 +- src/caffe/test/test_hdf5data_layer.cpp | 32 +- src/caffe/test/test_hinge_loss_layer.cpp | 2 +- src/caffe/test/test_im2col_kernel.cu | 90 +- src/caffe/test/test_im2col_layer.cpp | 4 +- src/caffe/test/test_image_data_layer.cpp | 20 +- src/caffe/test/test_infogain_loss_layer.cpp | 2 +- src/caffe/test/test_inner_product_layer.cpp | 8 +- src/caffe/test/test_io.cpp | 58 +- src/caffe/test/test_lrn_layer.cpp | 112 +- src/caffe/test/test_math_functions.cpp | 62 +- .../test/test_maxpool_dropout_layers.cpp | 10 +- src/caffe/test/test_memory_data_layer.cpp | 116 +- src/caffe/test/test_mergecrop_layer.cpp | 86 +- .../test_multinomial_logistic_loss_layer.cpp | 2 +- src/caffe/test/test_mvn_layer.cpp | 48 +- src/caffe/test/test_net.cpp | 82 +- src/caffe/test/test_neuron_layer.cpp | 72 +- src/caffe/test/test_pooling_layer.cpp | 108 +- src/caffe/test/test_pooling_nd_layer.cpp | 30 +- src/caffe/test/test_power_layer.cpp | 4 +- .../test/test_random_number_generator.cpp | 102 +- src/caffe/test/test_reduction_layer.cpp | 28 +- src/caffe/test/test_reshape_layer.cpp | 8 +- .../test_sigmoid_cross_entropy_loss_layer.cpp | 10 +- src/caffe/test/test_slice_layer.cpp | 44 +- src/caffe/test/test_softmax_layer.cpp | 24 +- .../test/test_softmax_with_loss_layer.cpp | 4 +- src/caffe/test/test_solver.cpp | 1 + src/caffe/test/test_solver_factory.cpp | 50 + src/caffe/test/test_split_layer.cpp | 2 +- src/caffe/test/test_stochastic_pooling.cpp | 40 +- src/caffe/test/test_syncedmem.cpp | 14 +- src/caffe/test/test_tanh_layer.cpp | 2 +- src/caffe/test/test_threshold_layer.cpp | 4 +- src/caffe/test/test_tile_layer.cpp | 50 +- src/caffe/test/test_upgrade_proto.cpp | 2 +- src/caffe/test/test_util_blas.cpp | 24 +- src/caffe/util/blocking_queue.cpp | 2 +- src/caffe/util/db_lmdb.cpp | 4 +- src/caffe/util/hdf5.cpp | 16 +- src/caffe/util/im2col.cpp | 168 +- src/caffe/util/im2col.cu | 581 +++--- src/caffe/util/insert_splits.cpp | 56 +- src/caffe/util/io.cpp | 42 +- src/caffe/util/math_functions.cpp | 218 +-- src/caffe/util/math_functions.cu | 136 +- src/caffe/util/upgrade_proto.cpp | 58 +- src/gtest/gtest_main.cc | 2 +- tools/caffe.cpp | 24 +- tools/compute_image_mean.cpp | 24 +- tools/convert_imageset.cpp | 20 +- tools/extra/extract_seconds.py | 8 +- tools/extra/plot_training_log.py.example | 2 +- tools/extra/resize_and_crop_images.py | 8 +- tools/extract_features.cpp | 36 +- 262 files changed, 8211 insertions(+), 7948 deletions(-) create mode 100644 include/caffe/definitions.hpp create mode 100644 include/caffe/sgd_solvers.hpp create mode 100644 include/caffe/solver_factory.hpp create mode 100644 log.txt create mode 100644 matlab/+caffe/+test/test_io.m create mode 100644 src/caffe/solvers/adadelta_solver.cpp create mode 100644 src/caffe/solvers/adagrad_solver.cpp create mode 100644 src/caffe/solvers/adam_solver.cpp create mode 100644 src/caffe/solvers/nesterov_solver.cpp create mode 100644 src/caffe/solvers/rmsprop_solver.cpp create mode 100644 src/caffe/solvers/sgd_solver.cpp create mode 100644 src/caffe/test/test_solver_factory.cpp diff --git a/examples/cifar10/convert_cifar_data.cpp b/examples/cifar10/convert_cifar_data.cpp index f4c42e4d2e7..5e25447388c 100644 --- a/examples/cifar10/convert_cifar_data.cpp +++ b/examples/cifar10/convert_cifar_data.cpp @@ -22,12 +22,12 @@ using boost::scoped_ptr; using std::string; namespace db = caffe::db; -const int kCIFARSize = 32; -const int kCIFARImageNBytes = 3072; -const int kCIFARBatchSize = 10000; -const int kCIFARTrainBatches = 5; +const int_tp kCIFARSize = 32; +const int_tp kCIFARImageNBytes = 3072; +const int_tp kCIFARBatchSize = 10000; +const int_tp kCIFARTrainBatches = 5; -void read_image(std::ifstream* file, int* label, char* buffer) { +void read_image(std::ifstream* file, int_tp* label, char* buffer) { char label_char; file->read(&label_char, 1); *label = label_char; @@ -41,7 +41,7 @@ void convert_dataset(const string& input_folder, const string& output_folder, train_db->Open(output_folder + "/cifar10_train_" + db_type, db::NEW); scoped_ptr txn(train_db->NewTransaction()); // Data buffer - int label; + int_tp label; char str_buffer[kCIFARImageNBytes]; Datum datum; datum.set_channels(3); @@ -49,18 +49,18 @@ void convert_dataset(const string& input_folder, const string& output_folder, datum.set_width(kCIFARSize); LOG(INFO) << "Writing Training data"; - for (int fileid = 0; fileid < kCIFARTrainBatches; ++fileid) { + for (int_tp fileid = 0; fileid < kCIFARTrainBatches; ++fileid) { // Open files LOG(INFO) << "Training Batch " << fileid + 1; - snprintf(str_buffer, kCIFARImageNBytes, "/data_batch_%d.bin", fileid + 1); + snprintf(str_buffer, kCIFARImageNBytes, "/data_batch_%zd.bin", fileid + 1); std::ifstream data_file((input_folder + str_buffer).c_str(), std::ios::in | std::ios::binary); CHECK(data_file) << "Unable to open train file #" << fileid + 1; - for (int itemid = 0; itemid < kCIFARBatchSize; ++itemid) { + for (int_tp itemid = 0; itemid < kCIFARBatchSize; ++itemid) { read_image(&data_file, &label, str_buffer); datum.set_label(label); datum.set_data(str_buffer, kCIFARImageNBytes); - int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", + int_tp length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", fileid * kCIFARBatchSize + itemid); string out; CHECK(datum.SerializeToString(&out)); @@ -78,11 +78,11 @@ void convert_dataset(const string& input_folder, const string& output_folder, std::ifstream data_file((input_folder + "/test_batch.bin").c_str(), std::ios::in | std::ios::binary); CHECK(data_file) << "Unable to open test file."; - for (int itemid = 0; itemid < kCIFARBatchSize; ++itemid) { + for (int_tp itemid = 0; itemid < kCIFARBatchSize; ++itemid) { read_image(&data_file, &label, str_buffer); datum.set_label(label); datum.set_data(str_buffer, kCIFARImageNBytes); - int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", itemid); + int_tp length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", itemid); string out; CHECK(datum.SerializeToString(&out)); txn->Put(string(str_buffer, length), out); diff --git a/examples/cpp_classification/classification.cpp b/examples/cpp_classification/classification.cpp index de48fb692c8..f2aec825e03 100644 --- a/examples/cpp_classification/classification.cpp +++ b/examples/cpp_classification/classification.cpp @@ -25,7 +25,7 @@ class Classifier { const string& mean_file, const string& label_file); - std::vector Classify(const cv::Mat& img, int N = 5); + std::vector Classify(const cv::Mat& img, int_tp N = 5); private: void SetMean(const string& mean_file); @@ -40,7 +40,7 @@ class Classifier { private: shared_ptr > net_; cv::Size input_geometry_; - int num_channels_; + int_tp num_channels_; cv::Mat mean_; std::vector labels_; }; @@ -83,33 +83,33 @@ Classifier::Classifier(const string& model_file, << "Number of labels is different from the output layer dimension."; } -static bool PairCompare(const std::pair& lhs, - const std::pair& rhs) { +static bool PairCompare(const std::pair& lhs, + const std::pair& rhs) { return lhs.first > rhs.first; } /* Return the indices of the top N values of vector v. */ -static std::vector Argmax(const std::vector& v, int N) { - std::vector > pairs; - for (size_t i = 0; i < v.size(); ++i) +static std::vector Argmax(const std::vector& v, int_tp N) { + std::vector > pairs; + for (uint_tp i = 0; i < v.size(); ++i) pairs.push_back(std::make_pair(v[i], i)); std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare); - std::vector result; - for (int i = 0; i < N; ++i) + std::vector result; + for (int_tp i = 0; i < N; ++i) result.push_back(pairs[i].second); return result; } /* Return the top N predictions. */ -std::vector Classifier::Classify(const cv::Mat& img, int N) { +std::vector Classifier::Classify(const cv::Mat& img, int_tp N) { std::vector output = Predict(img); - N = std::min(labels_.size(), N); - std::vector maxN = Argmax(output, N); + N = std::min(labels_.size(), N); + std::vector maxN = Argmax(output, N); std::vector predictions; - for (int i = 0; i < N; ++i) { - int idx = maxN[i]; + for (int_tp i = 0; i < N; ++i) { + int_tp idx = maxN[i]; predictions.push_back(std::make_pair(labels_[idx], output[idx])); } @@ -130,7 +130,7 @@ void Classifier::SetMean(const string& mean_file) { /* The format of the mean file is planar 32-bit float BGR or grayscale. */ std::vector channels; float* data = mean_blob.mutable_cpu_data(); - for (int i = 0; i < num_channels_; ++i) { + for (int_tp i = 0; i < num_channels_; ++i) { /* Extract an individual channel. */ cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data); channels.push_back(channel); @@ -176,10 +176,10 @@ std::vector Classifier::Predict(const cv::Mat& img) { void Classifier::WrapInputLayer(std::vector* input_channels) { Blob* input_layer = net_->input_blobs()[0]; - int width = input_layer->width(); - int height = input_layer->height(); + int_tp width = input_layer->width(); + int_tp height = input_layer->height(); float* input_data = input_layer->mutable_cpu_data(); - for (int i = 0; i < input_layer->channels(); ++i) { + for (int_tp i = 0; i < input_layer->channels(); ++i) { cv::Mat channel(height, width, CV_32FC1, input_data); input_channels->push_back(channel); input_data += width * height; @@ -252,14 +252,14 @@ int main(int argc, char** argv) { std::vector predictions = classifier.Classify(img); /* Print the top N predictions. */ - for (size_t i = 0; i < predictions.size(); ++i) { + for (uint_tp i = 0; i < predictions.size(); ++i) { Prediction p = predictions[i]; std::cout << std::fixed << std::setprecision(4) << p.second << " - \"" << p.first << "\"" << std::endl; } } #else -int main(int argc, char** argv) { +int_tp main(int_tp argc, char** argv) { LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV."; } #endif // USE_OPENCV diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index 8f29bafde85..bbc8011eb84 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -22,6 +22,7 @@ #include // NOLINT(readability/streams) #include +#include "caffe/definitions.hpp" #include "caffe/proto/caffe.pb.h" #if defined(USE_LEVELDB) && defined(USE_LMDB) @@ -107,8 +108,8 @@ void convert_dataset(const char* image_filename, const char* label_filename, // Storing to db char label; char* pixels = new char[rows * cols]; - int count = 0; - const int kMaxKeyLength = 10; + int_tp count = 0; + const int_tp kMaxKeyLength = 10; char key_cstr[kMaxKeyLength]; string value; @@ -118,7 +119,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int item_id = 0; item_id < num_items; ++item_id) { + for (int_tp item_id = 0; item_id < num_items; ++item_id) { image_file.read(pixels, rows * cols); label_file.read(&label, 1); datum.set_data(pixels, rows*cols); diff --git a/examples/siamese/convert_mnist_siamese_data.cpp b/examples/siamese/convert_mnist_siamese_data.cpp index ad08036fb08..2881dd345e0 100644 --- a/examples/siamese/convert_mnist_siamese_data.cpp +++ b/examples/siamese/convert_mnist_siamese_data.cpp @@ -75,7 +75,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, char label_i; char label_j; char* pixels = new char[2 * rows * cols]; - const int kMaxKeyLength = 10; + const int_tp kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; @@ -85,9 +85,9 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair - int j = caffe::caffe_rng_rand() % num_items; + for (int_tp itemid = 0; itemid < num_items; ++itemid) { + int_tp i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int_tp j = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, @@ -124,7 +124,7 @@ int main(int argc, char** argv) { return 0; } #else -int main(int argc, char** argv) { +int_tp main(int_tp argc, char** argv) { LOG(FATAL) << "This example requires LevelDB; compile with USE_LEVELDB."; } #endif // USE_LEVELDB diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 65d2f4c625f..b623ed4e338 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -6,6 +6,7 @@ #include #include "caffe/common.hpp" +#include "caffe/definitions.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/syncedmem.hpp" @@ -17,7 +18,7 @@ #include "caffe/greentea/greentea_math_functions.hpp" #endif -const int kMaxBlobAxes = 32; +const int_tp kMaxBlobAxes = 32; namespace caffe { @@ -47,10 +48,10 @@ class Blob { capacity_(0), device_(device_context) { } - explicit Blob(const int num, const int channels, const int height, - const int width, device *device_context = + explicit Blob(const int_tp num, const int_tp channels, const int_tp height, + const int_tp width, device *device_context = Caffe::GetDefaultDevice()); - explicit Blob(const vector& shape, device *device_context = + explicit Blob(const vector& shape, device *device_context = Caffe::GetDefaultDevice()); /** @@ -69,20 +70,20 @@ class Blob { * * Reshape returns true if new memory was allocated. */ - bool Reshape(const vector& shape); + bool Reshape(const vector& shape); bool Reshape(const BlobShape& shape); - bool Reshape(const int num, const int channels, const int height, - const int width); + bool Reshape(const int_tp num, const int_tp channels, const int_tp height, + const int_tp width); bool ReshapeLike(const Blob& other); inline string shape_string() const { ostringstream stream; - for (int i = 0; i < shape_.size(); ++i) { + for (int_tp i = 0; i < shape_.size(); ++i) { stream << shape_[i] << " "; } stream << "(" << count_ << ")"; return stream.str(); } - inline const vector& shape() const { + inline const vector& shape() const { return shape_; } /** @@ -93,13 +94,13 @@ class Blob { * "canonicalized" using CanonicalAxisIndex. * Dies on out of range index. */ - inline int shape(int index) const { + inline int_tp shape(int_tp index) const { return shape_[CanonicalAxisIndex(index)]; } - inline int num_axes() const { + inline int_tp num_axes() const { return shape_.size(); } - inline int count() const { + inline int_tp count() const { return count_; } @@ -111,14 +112,14 @@ class Blob { * * @param end_axis The first axis to exclude from the slice. */ - inline int count(int start_axis, int end_axis) const { + inline int_tp count(int_tp start_axis, int_tp end_axis) const { CHECK_LE(start_axis, end_axis); CHECK_GE(start_axis, 0); CHECK_GE(end_axis, 0); CHECK_LE(start_axis, num_axes()); CHECK_LE(end_axis, num_axes()); - int count = 1; - for (int i = start_axis; i < end_axis; ++i) { + int_tp count = 1; + for (int_tp i = start_axis; i < end_axis; ++i) { count *= shape(i); } return count; @@ -129,7 +130,7 @@ class Blob { * * @param start_axis The first axis to include in the slice. */ - inline int count(int start_axis) const { + inline int_tp count(int_tp start_axis) const { return count(start_axis, num_axes()); } @@ -144,7 +145,7 @@ class Blob { * the second to last if index == -2, etc. * Dies on out of range index. */ - inline int CanonicalAxisIndex(int axis_index) const { + inline int_tp CanonicalAxisIndex(int_tp axis_index) const { CHECK_GE(axis_index, -num_axes()) <<"axis " << axis_index << " out of range for " << num_axes() @@ -159,14 +160,14 @@ class Blob { } /// @brief Deprecated legacy shape accessor num: use shape(0) instead. - inline int num() const {return LegacyShape(0);} + inline int_tp num() const {return LegacyShape(0);} /// @brief Deprecated legacy shape accessor channels: use shape(1) instead. - inline int channels() const {return LegacyShape(1);} + inline int_tp channels() const {return LegacyShape(1);} /// @brief Deprecated legacy shape accessor height: use shape(2) instead. - inline int height() const {return LegacyShape(2);} + inline int_tp height() const {return LegacyShape(2);} /// @brief Deprecated legacy shape accessor width: use shape(3) instead. - inline int width() const {return LegacyShape(3);} - inline int LegacyShape(int index) const { + inline int_tp width() const {return LegacyShape(3);} + inline int_tp LegacyShape(int_tp index) const { CHECK_LE(num_axes(), 4) << "Cannot use legacy accessors on Blobs with > 4 axes."; CHECK_LT(index, 4); @@ -179,8 +180,8 @@ class Blob { } return shape(index); } - inline int offset(const int n, const int c = 0, const int h = 0, - const int w = 0) const { + inline int_tp offset(const int_tp n, const int_tp c = 0, const int_tp h = 0, + const int_tp w = 0) const { CHECK_GE(n, 0); CHECK_LE(n, num()); CHECK_GE(channels(), 0); @@ -192,10 +193,10 @@ class Blob { return ((n * channels() + c) * height() + h) * width() + w; } - inline int offset(const vector& indices) const { + inline int_tp offset(const vector& indices) const { CHECK_LE(indices.size(), num_axes()); - int offset = 0; - for (int i = 0; i < num_axes(); ++i) { + int_tp offset = 0; + for (int_tp i = 0; i < num_axes(); ++i) { offset *= shape(i); if (indices.size() > i) { CHECK_GE(indices[i], 0); @@ -217,21 +218,21 @@ class Blob { void CopyFrom(const Blob& source, bool copy_diff = false, bool reshape = false); - inline Dtype data_at(const int n, const int c, const int h, - const int w) const { + inline Dtype data_at(const int_tp n, const int_tp c, const int_tp h, + const int_tp w) const { return cpu_data()[offset(n, c, h, w)]; } - inline Dtype diff_at(const int n, const int c, const int h, - const int w) const { + inline Dtype diff_at(const int_tp n, const int_tp c, const int_tp h, + const int_tp w) const { return cpu_diff()[offset(n, c, h, w)]; } - inline Dtype data_at(const vector& index) const { + inline Dtype data_at(const vector& index) const { return cpu_data()[offset(index)]; } - inline Dtype diff_at(const vector& index) const { + inline Dtype diff_at(const vector& index) const { return cpu_diff()[offset(index)]; } @@ -247,7 +248,7 @@ class Blob { const Dtype* cpu_data() const; void set_cpu_data(Dtype* data); - const int* gpu_shape() const; + const int_tp* gpu_shape() const; const Dtype* gpu_data() const; const Dtype* cpu_diff() const; const Dtype* gpu_diff() const; @@ -303,9 +304,9 @@ class Blob { shared_ptr data_; shared_ptr diff_; shared_ptr shape_data_; - vector shape_; - int count_; - int capacity_; + vector shape_; + uint_tp count_; + uint_tp capacity_; device *device_; DISABLE_COPY_AND_ASSIGN(Blob); diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp index 60c4ba192d9..9e92125765d 100644 --- a/include/caffe/caffe.hpp +++ b/include/caffe/caffe.hpp @@ -6,6 +6,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" +#include "caffe/definitions.hpp" #include "caffe/filler.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/layer.hpp" @@ -14,6 +15,7 @@ #include "caffe/parallel.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/solver.hpp" +#include "caffe/solver_factory.hpp" #include "caffe/util/benchmark.hpp" #include "caffe/util/io.hpp" #include "caffe/vision_layers.hpp" diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index d6e63f98f86..4c7a238afa9 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -21,6 +21,7 @@ #include // pair #include +#include "caffe/definitions.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/util/device_alternate.hpp" @@ -122,7 +123,7 @@ class Caffe { class RNG { public: RNG(); - explicit RNG(unsigned int seed); + explicit RNG(size_t); explicit RNG(const RNG&); RNG& operator=(const RNG&); void* generator(); @@ -144,6 +145,9 @@ class Caffe { inline static curandGenerator_t curand_generator() { return Get().curand_generator_; } + inline static curandGenerator_t curand_generator64() { + return Get().curand_generator64_; + } #endif // USE_CUDA #endif // !CPU_ONLY @@ -156,7 +160,7 @@ class Caffe { // it personally but better to note it here in the header file. inline static void set_mode(Brew mode) { Get().mode_ = mode; } // Sets the random seed of both boost and curand - static void set_random_seed(const unsigned int seed); + static void set_random_seed(const size_t seed); // Sets the device. Since we have cublas and curand stuff, set device also // requires us to reset those values. static void SetDevice(const int device_id); @@ -194,6 +198,7 @@ class Caffe { #ifdef USE_CUDA cublasHandle_t cublas_handle_; curandGenerator_t curand_generator_; + curandGenerator_t curand_generator64_; #endif // USE_CUDA #endif // !CPU_ONLY shared_ptr random_generator_; diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index 21a27d759a8..6b61dd31715 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -37,7 +37,7 @@ class ArgMaxLayer : public Layer { * - out_max_val (\b optional bool, default false). * if set, output a vector of pairs (max_ind, max_val) unless axis is set then * output max_val along the specified axis. - * - axis (\b optional int). + * - axis (\b optional int_tp). * if set, maximise along the specified axis else maximise the flattened * trailing dimensions for each index of the first / num dimension. */ @@ -49,8 +49,8 @@ class ArgMaxLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "ArgMax"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: /** @@ -73,16 +73,16 @@ class ArgMaxLayer : public Layer { NOT_IMPLEMENTED; } bool out_max_val_; - size_t top_k_; + uint_tp top_k_; bool has_axis_; - int axis_; + int_tp axis_; }; /** * @brief Index into the input blob along its first axis. * * This layer can be used to select, reorder, and even replicate examples in a - * batch. The second blob is cast to int and treated as an index into the + * batch. The second blob is cast to int_tp and treated as an index into the * first axis of the first blob. */ template @@ -94,8 +94,8 @@ class BatchReindexLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "BatchReindex"; } - virtual inline int ExactNumBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 2; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: /** @@ -137,12 +137,12 @@ class BatchReindexLayer : public Layer { private: struct pair_sort_first { - bool operator()(const std::pair &left, - const std::pair &right) { + bool operator()(const std::pair &left, + const std::pair &right) { return left.first < right.first; } }; - void check_batch_reindex(int initial_num, int final_num, + void check_batch_reindex(int_tp initial_num, int_tp final_num, const Dtype* ridx_data); }; @@ -162,8 +162,8 @@ class ConcatLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Concat"; } - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp MinBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: /** @@ -214,10 +214,10 @@ class ConcatLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - int count_; - int num_concats_; - int concat_input_size_; - int concat_axis_; + int_tp count_; + int_tp num_concats_; + int_tp concat_input_size_; + int_tp concat_axis_; }; /** @@ -237,8 +237,8 @@ class EltwiseLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Eltwise"; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp MinBottomBlobs() const { return 2; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -252,7 +252,7 @@ class EltwiseLayer : public Layer { EltwiseParameter_EltwiseOp op_; vector coeffs_; - Blob max_idx_; + Blob max_idx_; bool stable_prod_grad_; }; @@ -275,8 +275,8 @@ class EmbedLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Embed"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -288,9 +288,9 @@ class EmbedLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - int M_; - int K_; - int N_; + int_tp M_; + int_tp K_; + int_tp N_; bool bias_term_; Blob bias_multiplier_; }; @@ -312,8 +312,8 @@ class FilterLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Filter"; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline int_tp MinBottomBlobs() const { return 2; } + virtual inline int_tp MinTopBlobs() const { return 1; } protected: /** @@ -355,7 +355,7 @@ class FilterLayer : public Layer { const vector& propagate_down, const vector*>& bottom); bool first_reshape_; - vector indices_to_forward_; + vector indices_to_forward_; }; /** @@ -377,8 +377,8 @@ class FlattenLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Flatten"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: /** @@ -422,8 +422,8 @@ class InnerProductLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "InnerProduct"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -435,9 +435,9 @@ class InnerProductLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - int M_; - int K_; - int N_; + int_tp M_; + int_tp K_; + int_tp N_; bool bias_term_; Blob bias_multiplier_; }; @@ -456,8 +456,8 @@ class MVNLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "MVN"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -493,8 +493,8 @@ class ReshapeLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Reshape"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -507,11 +507,11 @@ class ReshapeLayer : public Layer { const vector& propagate_down, const vector*>& bottom) {} /// @brief vector of axes indices whose dimensions we'll copy from the bottom - vector copy_axes_; + vector copy_axes_; /// @brief the index of the axis whose dimension we infer, or -1 if none - int inferred_axis_; + int_tp inferred_axis_; /// @brief the product of the "constant" output dimensions - int constant_count_; + int_tp constant_count_; }; /** @@ -532,8 +532,8 @@ class ReductionLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Reduction"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -550,11 +550,11 @@ class ReductionLayer : public Layer { /// @brief a scalar coefficient applied to all outputs Dtype coeff_; /// @brief the index of the first input axis to reduce - int axis_; + int_tp axis_; /// @brief the number of reductions performed - int num_; + int_tp num_; /// @brief the input size of each reduction - int dim_; + int_tp dim_; /// @brief a helper Blob used for summation (op_ == SUM) Blob sum_multiplier_; }; @@ -572,8 +572,8 @@ class SilenceLayer : public Layer { const vector*>& top) {} virtual inline const char* type() const { return "Silence"; } - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 0; } + virtual inline int_tp MinBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 0; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -602,8 +602,8 @@ class SoftmaxLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Softmax"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -615,9 +615,9 @@ class SoftmaxLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - int outer_num_; - int inner_num_; - int softmax_axis_; + int_tp outer_num_; + int_tp inner_num_; + int_tp softmax_axis_; /// sum_multiplier is used to carry out sum using BLAS Blob sum_multiplier_; /// scale is an intermediate Blob to hold temporary results. @@ -668,8 +668,8 @@ class SplitLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Split"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp MinTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -681,7 +681,7 @@ class SplitLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - int count_; + int_tp count_; }; /** @@ -701,8 +701,8 @@ class SliceLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Slice"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp MinTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -714,11 +714,11 @@ class SliceLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - int count_; - int num_slices_; - int slice_size_; - int slice_axis_; - vector slice_point_; + int_tp count_; + int_tp num_slices_; + int_tp slice_size_; + int_tp slice_axis_; + vector slice_point_; }; /** @@ -733,8 +733,8 @@ class TileLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Tile"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -747,7 +747,7 @@ class TileLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - unsigned int axis_, tiles_, outer_dim_, inner_dim_; + uint_tp axis_, tiles_, outer_dim_, inner_dim_; }; } // namespace caffe diff --git a/include/caffe/cuda/cuda_dev_ptr.hpp b/include/caffe/cuda/cuda_dev_ptr.hpp index 20b24682cb8..7c1ad7eeed1 100644 --- a/include/caffe/cuda/cuda_dev_ptr.hpp +++ b/include/caffe/cuda/cuda_dev_ptr.hpp @@ -12,7 +12,7 @@ template class cuda_dev_ptr : public dev_ptr { explicit cuda_dev_ptr(Type* ptr); void* get(); - std::ptrdiff_t off(); + int_tp off(); private: Type* raw_ptr_; diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 678b1cb79d3..81d1e83a602 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -75,7 +75,7 @@ class BasePrefetchingDataLayer : const vector*>& top); // Prefetches batches (asynchronously if to GPU memory) - static const int PREFETCH_COUNT = 3; + static const int_tp PREFETCH_COUNT = 3; protected: virtual void InternalThreadEntry(); @@ -98,9 +98,9 @@ class DataLayer : public BasePrefetchingDataLayer { // DataLayer uses DataReader instead for sharing for parallelism virtual inline bool ShareInParallel() const { return false; } virtual inline const char* type() const { return "Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } + virtual inline int_tp ExactNumBottomBlobs() const { return 0; } + virtual inline int_tp MinTopBlobs() const { return 1; } + virtual inline int_tp MaxTopBlobs() const { return 2; } protected: virtual void load_batch(Batch* batch); @@ -127,8 +127,8 @@ class DummyDataLayer : public Layer { const vector*>& top) {} virtual inline const char* type() const { return "DummyData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 0; } + virtual inline int_tp MinTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -162,8 +162,8 @@ class HDF5DataLayer : public Layer { const vector*>& top) {} virtual inline const char* type() const { return "HDF5Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 0; } + virtual inline int_tp MinTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -177,12 +177,12 @@ class HDF5DataLayer : public Layer { virtual void LoadHDF5FileData(const char* filename); std::vector hdf_filenames_; - unsigned int num_files_; - unsigned int current_file_; + uint_tp num_files_; + uint_tp current_file_; hsize_t current_row_; std::vector > > hdf_blobs_; - std::vector data_permutation_; - std::vector file_permutation_; + std::vector data_permutation_; + std::vector file_permutation_; }; /** @@ -206,8 +206,8 @@ class HDF5OutputLayer : public Layer { virtual inline const char* type() const { return "HDF5Output"; } // TODO: no limit on the number of blobs - virtual inline int ExactNumBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 0; } + virtual inline int_tp ExactNumBottomBlobs() const { return 2; } + virtual inline int_tp ExactNumTopBlobs() const { return 0; } inline std::string file_name() const { return file_name_; } @@ -244,16 +244,16 @@ class ImageDataLayer : public BasePrefetchingDataLayer { const vector*>& top); virtual inline const char* type() const { return "ImageData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } + virtual inline int_tp ExactNumBottomBlobs() const { return 0; } + virtual inline int_tp ExactNumTopBlobs() const { return 2; } protected: shared_ptr prefetch_rng_; virtual void ShuffleImages(); virtual void load_batch(Batch* batch); - vector > lines_; - int lines_id_; + vector > lines_; + int_tp lines_id_; }; /** @@ -270,39 +270,39 @@ class MemoryDataLayer : public BaseDataLayer { const vector*>& top); virtual inline const char* type() const { return "MemoryData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } + virtual inline int_tp ExactNumBottomBlobs() const { return 0; } + virtual inline int_tp ExactNumTopBlobs() const { return 2; } virtual void AddDatumVector(const vector& datum_vector); #ifdef USE_OPENCV virtual void AddMatVector(const vector& mat_vector, - const vector& labels); + const vector& labels); #endif // USE_OPENCV // Reset should accept const pointers, but can't, because the memory // will be given to Blob, which is mutable - void Reset(Dtype* data, Dtype* label, int n); - void set_batch_size(int new_size); + void Reset(Dtype* data, Dtype* label, int_tp n); + void set_batch_size(int_tp new_size); - vector shape() { return shape_; } - vector label_shape() { return label_shape_; } - int batch_size() { return shape_[0]; } - int channels() { return shape_[1]; } - int height() { return shape_[2]; } - int width() { return shape_[3]; } + vector shape() { return shape_; } + vector label_shape() { return label_shape_; } + int_tp batch_size() { return shape_[0]; } + int_tp channels() { return shape_[1]; } + int_tp height() { return shape_[2]; } + int_tp width() { return shape_[3]; } protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); - vector shape_; - vector label_shape_; - int size_; + vector shape_; + vector label_shape_; + int_tp size_; Dtype* data_; Dtype* labels_; - int n_; - size_t pos_; + int_tp n_; + uint_tp pos_; Blob added_data_; Blob added_label_; bool has_new_data_; @@ -324,15 +324,15 @@ class WindowDataLayer : public BasePrefetchingDataLayer { const vector*>& top); virtual inline const char* type() const { return "WindowData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } + virtual inline int_tp ExactNumBottomBlobs() const { return 0; } + virtual inline int_tp ExactNumTopBlobs() const { return 2; } protected: - virtual unsigned int PrefetchRand(); + virtual uint_tp PrefetchRand(); virtual void load_batch(Batch* batch); shared_ptr prefetch_rng_; - vector > > image_database_; + vector > > image_database_; enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; vector > fg_windows_; vector > bg_windows_; diff --git a/include/caffe/data_reader.hpp b/include/caffe/data_reader.hpp index 8ed5542cb8d..ecb9f81aaec 100644 --- a/include/caffe/data_reader.hpp +++ b/include/caffe/data_reader.hpp @@ -36,7 +36,7 @@ class DataReader { // Queue pairs are shared between a body and its readers class QueuePair { public: - explicit QueuePair(int size); + explicit QueuePair(int_tp size); ~QueuePair(); BlockingQueue free_; diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 36043b94cf7..47a64fd53e8 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -99,7 +99,7 @@ class DataTransformer { * @param datum * Datum containing the data to be transformed. */ - vector InferBlobShape(const Datum& datum); + vector InferBlobShape(const Datum& datum); /** * @brief Infers the shape of transformed_blob will have when * the transformation is applied to the data. @@ -108,7 +108,7 @@ class DataTransformer { * @param datum_vector * A vector of Datum containing the data to be transformed. */ - vector InferBlobShape(const vector & datum_vector); + vector InferBlobShape(const vector & datum_vector); /** * @brief Infers the shape of transformed_blob will have when * the transformation is applied to the data. @@ -118,7 +118,7 @@ class DataTransformer { * A vector of Mat containing the data to be transformed. */ #ifdef USE_OPENCV - vector InferBlobShape(const vector & mat_vector); + vector InferBlobShape(const vector & mat_vector); /** * @brief Infers the shape of transformed_blob will have when * the transformation is applied to the data. @@ -126,7 +126,7 @@ class DataTransformer { * @param cv_img * cv::Mat containing the data to be transformed. */ - vector InferBlobShape(const cv::Mat& cv_img); + vector InferBlobShape(const cv::Mat& cv_img); #endif // USE_OPENCV protected: @@ -138,7 +138,7 @@ class DataTransformer { * @return * A uniformly random integer value from ({0, 1, ..., n-1}). */ - virtual int Rand(int n); + virtual int_tp Rand(int_tp n); void Transform(const Datum& datum, Dtype* transformed_data); // Tranformation parameters diff --git a/include/caffe/definitions.hpp b/include/caffe/definitions.hpp new file mode 100644 index 00000000000..759955ef393 --- /dev/null +++ b/include/caffe/definitions.hpp @@ -0,0 +1,14 @@ +#ifndef CAFFE_DEFINITIONS_HPP_ +#define CAFFE_DEFINITIONS_HPP_ + +#include + +// Types used for parameters, offset computations and so on +#define int_tp int64_t +#define uint_tp uint64_t + +// Definitions used to cast the types above as needed +#define int_tpc long long +#define uint_tpc unsigned long long + +#endif /* CAFFE_DEFINITIONS_HPP_ */ diff --git a/include/caffe/dev_ptr.hpp b/include/caffe/dev_ptr.hpp index 9134aa20dbb..db83797d0eb 100644 --- a/include/caffe/dev_ptr.hpp +++ b/include/caffe/dev_ptr.hpp @@ -2,13 +2,14 @@ #define CAFFE_DEVPTR_HPP_ #include +#include "caffe/definitions.hpp" namespace caffe { /* * dev_ptr class should be constructed similarly to shared_ptr of Boost. * (but excluding the smart pointer features, so memory management - * is explicit, and only support types (float, void, double, char, int, ...)) + * is explicit, and only support types (float, void, double, char, int_tp, ...)) * It should be possible to use this object just like pointers, * independently of the backend and device used. * Dereferencing (although inefficient on some backends) should also @@ -17,7 +18,7 @@ namespace caffe { template class dev_ptr { public: virtual Type* get() = 0; - virtual std::ptrdiff_t off() = 0; + virtual std::size_t off() = 0; // Comparators virtual inline bool operator==(dev_ptr const &other) = 0; diff --git a/include/caffe/device.hpp b/include/caffe/device.hpp index cdb483bcced..d48ad251ac5 100644 --- a/include/caffe/device.hpp +++ b/include/caffe/device.hpp @@ -47,10 +47,10 @@ class device { void Init(); - size_t memory_usage(); - size_t peak_memory_usage(); - void IncreaseMemoryUsage(size_t bytes); - void DecreaseMemoryUsage(size_t bytes); + uint_tp memory_usage(); + uint_tp peak_memory_usage(); + void IncreaseMemoryUsage(uint_tp bytes); + void DecreaseMemoryUsage(uint_tp bytes); void ResetPeakMemoryUsage(); bool CheckCapability(std::string cap); @@ -60,8 +60,8 @@ class device { int id_; int list_id_; Backend backend_; - size_t memory_usage_; - size_t peak_memory_usage_; + uint_tp memory_usage_; + uint_tp peak_memory_usage_; std::vector< shared_ptr< Blob > > buff_f_; std::vector< shared_ptr< Blob > > buff_d_; #ifdef USE_GREENTEA diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index c9ef243868d..85dcac2c01f 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -39,10 +39,10 @@ class ConstantFiller : public Filler { } virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); - const int count = blob->count(); + const int_tp count = blob->count(); const Dtype value = this->filler_param_.value(); CHECK(count); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { data[i] = value; } CHECK_EQ(this->filler_param_.sparse(), -1) @@ -80,7 +80,7 @@ class GaussianFiller : public Filler { caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); - int sparse = this->filler_param_.sparse(); + int_tp sparse = this->filler_param_.sparse(); CHECK_GE(sparse, -1); if (sparse >= 0) { // Sparse initialization is implemented for "weight" blobs; i.e. matrices. @@ -88,14 +88,14 @@ class GaussianFiller : public Filler { // number of outputs. The 'sparse' variable specifies the mean number // of non-zero input weights for a given output. CHECK_GE(blob->num_axes(), 1); - const int num_outputs = blob->shape(0); + const int_tp num_outputs = blob->shape(0); Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); rand_vec_.reset( - new SyncedMemory(blob->count() * sizeof(int), + new SyncedMemory(blob->count() * sizeof(int_tp), blob->get_device())); - int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); + int_tp* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); - for (int i = 0; i < blob->count(); ++i) { + for (int_tp i = 0; i < blob->count(); ++i) { data[i] *= mask[i]; } } @@ -120,14 +120,14 @@ class PositiveUnitballFiller : public Filler { caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); // We expect the filler to not be called very frequently, so we will // just use a simple implementation - int dim = blob->count() / blob->num(); + int_tp dim = blob->count() / blob->num(); CHECK(dim); - for (int i = 0; i < blob->num(); ++i) { + for (int_tp i = 0; i < blob->num(); ++i) { Dtype sum = 0; - for (int j = 0; j < dim; ++j) { + for (int_tp j = 0; j < dim; ++j) { sum += data[i * dim + j]; } - for (int j = 0; j < dim; ++j) { + for (int_tp j = 0; j < dim; ++j) { data[i * dim + j] /= sum; } } @@ -160,8 +160,8 @@ class XavierFiller : public Filler { } virtual void Fill(Blob* blob) { CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); + int_tp fan_in = blob->count() / blob->num(); + int_tp fan_out = blob->count() / blob->channels(); Dtype n = fan_in; // default to fan_in if (this->filler_param_.variance_norm() == FillerParameter_VarianceNorm_AVERAGE) { @@ -204,8 +204,8 @@ class MSRAFiller : public Filler { } virtual void Fill(Blob* blob) { CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); + int_tp fan_in = blob->count() / blob->num(); + int_tp fan_out = blob->count() / blob->channels(); Dtype n = fan_in; // default to fan_in if (this->filler_param_.variance_norm() == FillerParameter_VarianceNorm_AVERAGE) { @@ -264,9 +264,9 @@ class BilinearFiller : public Filler { CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; Dtype* data = blob->mutable_cpu_data(); - int f = ceil(blob->width() / 2.); + int_tp f = ceil(blob->width() / 2.); float c = (2 * f - 1 - f % 2) / (2. * f); - for (int i = 0; i < blob->count(); ++i) { + for (int_tp i = 0; i < blob->count(); ++i) { float x = i % blob->width(); float y = (i / blob->width()) % blob->height(); data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index 68b4a9a37e4..e89394c16a5 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -15,77 +15,77 @@ namespace caffe { template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_im_off, const int channels, - const int height, const int width, const int kernel_h, - const int kernel_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - cl_mem data_col, const int data_col_off); + const int_tp data_im_off, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + cl_mem data_col, const int_tp data_col_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int data_col_off, const int channels, - const int height, const int width, const int patch_h, - const int patch_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, cl_mem data_im, - const int data_im_off); + const int_tp data_col_off, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, cl_mem data_im, + const int_tp data_im_off); template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_offset, const int channels, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, + const int_tp data_offset, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, cl_mem data_col); template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int channels, const int height, - const int width, const int patch_h, - const int patch_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - cl_mem data_im, const int data_offset); + const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, + cl_mem data_im, const int_tp data_offset); template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, const int num_spatial_axes, - const int channel_axis, const int num_kernels, + const int_tp data_off, const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem data_col, int data_col_off); + cl_mem data_col, int_tp data_col_off); template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, const int num_spatial_axes, - const int channel_axis, const int im_size, + const int_tp data_col_off, const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem data_im, int data_off); + cl_mem data_im, int_tp data_off); template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, const int num_spatial_axes, - const int num_kernels, cl_mem im_shape, + const int_tp data_off, const int_tp num_spatial_axes, + const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, cl_mem data_col, - int data_col_off); + int_tp data_col_off); template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, - const int num_spatial_axes, const int im_size, + const int_tp data_col_off, + const int_tp num_spatial_axes, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_im, int data_off); + cl_mem kstride, cl_mem data_im, int_tp data_off); } // namespace caffe diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 29d02a71e63..488137cf8ab 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -18,136 +18,136 @@ namespace caffe { -void greentea_memset(const int ctx_id, const size_t N, const int alpha, - cl_mem X, const int offX); +void greentea_memset(const int_tp ctx_id, const uint_tp N, const int_tp alpha, + cl_mem X, const int_tp offX); -void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, +void greentea_gpu_memcpy(const uint_tp N, const cl_mem X, const int_tp offX, void *Y, viennacl::ocl::context *ctx); -void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, - const int offY, viennacl::ocl::context *ctx); +void greentea_gpu_memcpy(const uint_tp N, const void* X, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); -void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, - cl_mem Y, const int offY, +void greentea_gpu_memcpy(const uint_tp N, const cl_mem X, const int_tp offX, + cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx); template -void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, - const int offY, viennacl::ocl::context *ctx); +void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); template -void greentea_copy(const int N, const cl_mem X, const int offX, Dtype* Y, +void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, Dtype* Y, viennacl::ocl::context *ctx); template -void greentea_copy(const int N, const Dtype* X, cl_mem Y, const int offY, +void greentea_copy(const int_tp N, const Dtype* X, cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx); template -void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const Dtype alpha, const cl_mem A, - const int offA, const cl_mem B, const int offB, - const Dtype beta, cl_mem C, const int offC); +void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int_tp M, const int_tp N, + const int_tp K, const Dtype alpha, const cl_mem A, + const int_tp offA, const cl_mem B, const int_tp offB, + const Dtype beta, cl_mem C, const int_tp offC); template -void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, - const int M, const int N, const Dtype alpha, - const cl_mem A, const int offA, const cl_mem x, - const int offx, const Dtype beta, cl_mem y, - const int offy); +void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, + const int_tp M, const int_tp N, const Dtype alpha, + const cl_mem A, const int_tp offA, const cl_mem x, + const int_tp offx, const Dtype beta, cl_mem y, + const int_tp offy); template -void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, - const cl_mem x, const int offx, cl_mem y, - const int offy); +void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, + const cl_mem x, const int_tp offx, cl_mem y, + const int_tp offy); template -void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy); +void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, - cl_mem x, int offx); +void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, + cl_mem x, int_tp offx); template -void greentea_gpu_axpby(const int ctx_id, const int N, const Dtype alpha, - const cl_mem X, const int offX, const Dtype beta, - cl_mem Y, const int offY); +void greentea_gpu_axpby(const int_tp ctx_id, const int_tp N, const Dtype alpha, + const cl_mem X, const int_tp offX, const Dtype beta, + cl_mem Y, const int_tp offY); template -void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, - const int offX, const cl_mem Y, const int offY, +void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, const cl_mem X, + const int_tp offX, const cl_mem Y, const int_tp offY, Dtype* out); template -void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, - const int offX, Dtype* Y); +void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, const cl_mem X, + const int_tp offX, Dtype* Y); template -void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, - const cl_mem X, const int offX, cl_mem Y, - const int offY); +void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const Dtype alpha, + const cl_mem X, const int_tp offX, cl_mem Y, + const int_tp offY); template -void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, - cl_mem Y, const int offY); +void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const Dtype alpha, + cl_mem Y, const int_tp offY); template -void greentea_gpu_add_scalar(const int ctx_id, const int N, const Dtype alpha, - cl_mem Y, const int offY); +void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const Dtype alpha, + cl_mem Y, const int_tp offY); template -void greentea_gpu_add(const int ctx_id, const int n, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy); +void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_sub(const int ctx_id, const int n, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy); +void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_div(const int ctx_id, const int N, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy); +void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_abs(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, const int offy); +void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, cl_mem y, const int_tp offy); template -void greentea_gpu_exp(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, const int offy); +void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, cl_mem y, const int_tp offy); template -void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, - const int offa, const Dtype alpha, cl_mem y, - const int offy); +void greentea_gpu_powx(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, const Dtype alpha, cl_mem y, + const int_tp offy); template -void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, const int offy); +void greentea_gpu_log(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, cl_mem y, const int_tp offy); template -void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy); +void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, const cl_mem x, int_tp offx, + cl_mem y, const int_tp offy); template -void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, - int offx, cl_mem y, const int offy); +void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, const cl_mem x, + int_tp offx, cl_mem y, const int_tp offy); template -void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, - const Dtype b, cl_mem r, const int offr); +void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, const Dtype a, + const Dtype b, cl_mem r, const int_tp offr); -void greentea_gpu_rng_uniform(const int ctx_id, const int n, cl_mem r, - int offr); +void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, cl_mem r, + int_tp offr); template -void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, - const Dtype sigma, cl_mem r, const int offr); +void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, const Dtype mu, + const Dtype sigma, cl_mem r, const int_tp offr); } // namespace caffe diff --git a/include/caffe/internal_thread.hpp b/include/caffe/internal_thread.hpp index 3f32a7428bb..533150a5d65 100644 --- a/include/caffe/internal_thread.hpp +++ b/include/caffe/internal_thread.hpp @@ -50,8 +50,8 @@ class InternalThread { device* thread_device_; private: - void entry(device* device_context, Caffe::Brew mode, int rand_seed, - int solver_count, bool root_solver); + void entry(device* device_context, Caffe::Brew mode, int_tp rand_seed, + int_tp solver_count, bool root_solver); shared_ptr thread_; }; diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 9e37e237210..26e1acb47ea 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -7,6 +7,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" +#include "caffe/definitions.hpp" #include "caffe/layer_factory.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/device_alternate.hpp" @@ -48,7 +49,7 @@ class Layer { phase_ = param.phase(); if (layer_param_.blobs_size() > 0) { blobs_.resize(layer_param_.blobs_size()); - for (int i = 0; i < layer_param_.blobs_size(); ++i) { + for (int_tp i = 0; i < layer_param_.blobs_size(); ++i) { blobs_[i].reset(new Blob()); blobs_[i]->FromProto(layer_param_.blobs(i)); } @@ -206,14 +207,14 @@ class Layer { /** * @brief Returns the scalar loss associated with a top blob at a given index. */ - inline Dtype loss(const int top_index) const { + inline Dtype loss(const int_tp top_index) const { return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0); } /** * @brief Sets the loss associated with a top blob at a given index. */ - inline void set_loss(const int top_index, const Dtype value) { + inline void set_loss(const int_tp top_index, const Dtype value) { if (loss_.size() <= top_index) { loss_.resize(top_index + 1, Dtype(0)); } @@ -234,7 +235,7 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some exact number of bottom blobs. */ - virtual inline int ExactNumBottomBlobs() const { + virtual inline int_tp ExactNumBottomBlobs() const { return -1; } /** @@ -244,7 +245,7 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some minimum number of bottom blobs. */ - virtual inline int MinBottomBlobs() const { + virtual inline int_tp MinBottomBlobs() const { return -1; } /** @@ -254,7 +255,7 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some maximum number of bottom blobs. */ - virtual inline int MaxBottomBlobs() const { + virtual inline int_tp MaxBottomBlobs() const { return -1; } /** @@ -264,7 +265,7 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some exact number of top blobs. */ - virtual inline int ExactNumTopBlobs() const { + virtual inline int_tp ExactNumTopBlobs() const { return -1; } /** @@ -274,7 +275,7 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some minimum number of top blobs. */ - virtual inline int MinTopBlobs() const { + virtual inline int_tp MinTopBlobs() const { return -1; } /** @@ -284,7 +285,7 @@ class Layer { * This method should be overridden to return a non-negative value if your * layer expects some maximum number of top blobs. */ - virtual inline int MaxTopBlobs() const { + virtual inline int_tp MaxTopBlobs() const { return -1; } /** @@ -318,7 +319,7 @@ class Layer { * setting and backpropagate to blob i only if it needs gradient information * (as is done when force_backward == false). */ - virtual inline bool AllowForceBackward(const int bottom_index) const { + virtual inline bool AllowForceBackward(const int_tp bottom_index) const { return true; } @@ -329,7 +330,7 @@ class Layer { * You can safely ignore false values and always compute gradients * for all parameters, but possibly with wasteful computation. */ - inline bool param_propagate_down(const int param_id) { + inline bool param_propagate_down(const int_tp param_id) { return (param_propagate_down_.size() > param_id) ? param_propagate_down_[param_id] : false; @@ -338,7 +339,7 @@ class Layer { * @brief Sets whether the layer should compute gradients w.r.t. a * parameter at a particular index given by param_id. */ - inline void set_param_propagate_down(const int param_id, const bool value) { + inline void set_param_propagate_down(const int_tp param_id, const bool value) { if (param_propagate_down_.size() <= param_id) { param_propagate_down_.resize(param_id + 1, true); } @@ -355,11 +356,11 @@ class Layer { /** * @brief Returns the estimated floating point operations of this layer */ - virtual size_t ForwardFlops() { + virtual uint_tp ForwardFlops() { return 0; } - virtual size_t BackwardFlops() { + virtual uint_tp BackwardFlops() { return 0; } @@ -461,15 +462,15 @@ class Layer { * the loss function. Store non-zero loss weights in the diff blob. */ inline void SetLossWeights(const vector*>& top) { - const int num_loss_weights = layer_param_.loss_weight_size(); + const int_tp num_loss_weights = layer_param_.loss_weight_size(); if (num_loss_weights) { CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be " "unspecified or specified once per top blob."; - for (int top_id = 0; top_id < top.size(); ++top_id) { + for (int_tp top_id = 0; top_id < top.size(); ++top_id) { const Dtype loss_weight = layer_param_.loss_weight(top_id); if (loss_weight == Dtype(0)) {continue;} this->set_loss(top_id, loss_weight); - const int count = top[top_id]->count(); + const int_tp count = top[top_id]->count(); Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff(); caffe_set(count, loss_weight, loss_multiplier); } @@ -506,11 +507,11 @@ inline Dtype Layer::Forward(const vector*>& bottom, switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); - for (int top_id = 0; top_id < top.size(); ++top_id) { + for (int_tp top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { continue; } - const int count = top[top_id]->count(); + const int_tp count = top[top_id]->count(); const Dtype* data = top[top_id]->cpu_data(); const Dtype* loss_weights = top[top_id]->cpu_diff(); loss += caffe_cpu_dot(count, data, loss_weights); @@ -521,11 +522,11 @@ inline Dtype Layer::Forward(const vector*>& bottom, #ifndef CPU_ONLY if (device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - for (int top_id = 0; top_id < top.size(); ++top_id) { + for (int_tp top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { continue; } - const int count = top[top_id]->count(); + const int_tp count = top[top_id]->count(); const Dtype* data = top[top_id]->gpu_data(); const Dtype* loss_weights = top[top_id]->gpu_diff(); Dtype blob_loss = 0; @@ -535,11 +536,11 @@ inline Dtype Layer::Forward(const vector*>& bottom, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - for (int top_id = 0; top_id < top.size(); ++top_id) { + for (int_tp top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { continue; } - const int count = top[top_id]->count(); + const int_tp count = top[top_id]->count(); cl_mem data = (cl_mem) (top[top_id]->gpu_data()); cl_mem loss_weights = (cl_mem) (top[top_id]->gpu_diff()); Dtype blob_loss = 0; @@ -580,7 +581,7 @@ void Layer::ToProto(LayerParameter* param, bool write_diff) { param->Clear(); param->CopyFrom(layer_param_); param->clear_blobs(); - for (int i = 0; i < blobs_.size(); ++i) { + for (int_tp i = 0; i < blobs_.size(); ++i) { blobs_[i]->ToProto(param->add_blobs(), write_diff); } } diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index c6e35525e2e..534132c916d 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -39,12 +39,12 @@ class AccuracyLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Accuracy"; } - virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int_tp ExactNumBottomBlobs() const { return 2; } // If there are two top blobs, then the second blob will contain // accuracies per class. - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlos() const { return 2; } + virtual inline int_tp MinTopBlobs() const { return 1; } + virtual inline int_tp MaxTopBlos() const { return 2; } protected: /** @@ -78,19 +78,19 @@ class AccuracyLayer : public Layer { /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < propagate_down.size(); ++i) { + for (int_tp i = 0; i < propagate_down.size(); ++i) { if (propagate_down[i]) { NOT_IMPLEMENTED; } } } - int label_axis_, outer_num_, inner_num_; + int_tp label_axis_, outer_num_, inner_num_; - int top_k_; + int_tp top_k_; /// Whether to ignore instances with a certain label. bool has_ignore_label_; /// The label indicating that an instance should be ignored. - int ignore_label_; + int_tp ignore_label_; /// Keeps counts of the number of samples per class. Blob nums_buffer_; }; @@ -113,7 +113,7 @@ class LossLayer : public Layer { virtual void Reshape( const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int_tp ExactNumBottomBlobs() const { return 2; } /** * @brief For convenience and backwards compatibility, instruct the Net to @@ -122,12 +122,12 @@ class LossLayer : public Layer { * one in the prototxt, etc.). */ virtual inline bool AutoTopBlobs() const { return true; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } /** * We usually cannot backpropagate to the labels; ignore force_backward for * these inputs. */ - virtual inline bool AllowForceBackward(const int bottom_index) const { + virtual inline bool AllowForceBackward(const int_tp bottom_index) const { return bottom_index != 1; } }; @@ -164,13 +164,13 @@ class ContrastiveLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline int_tp ExactNumBottomBlobs() const { return 3; } virtual inline const char* type() const { return "ContrastiveLoss"; } /** * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate * to the first two inputs. */ - virtual inline bool AllowForceBackward(const int bottom_index) const { + virtual inline bool AllowForceBackward(const int_tp bottom_index) const { return bottom_index != 2; } @@ -256,7 +256,7 @@ class EuclideanLossLayer : public LossLayer { * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate * to both inputs -- override to return true and always allow force_backward. */ - virtual inline bool AllowForceBackward(const int bottom_index) const { + virtual inline bool AllowForceBackward(const int_tp bottom_index) const { return true; } @@ -440,9 +440,9 @@ class InfogainLossLayer : public LossLayer { // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should // be the infogain matrix. (Otherwise the infogain matrix is loaded from a // file specified by LayerParameter.) - virtual inline int ExactNumBottomBlobs() const { return -1; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int MaxBottomBlobs() const { return 3; } + virtual inline int_tp ExactNumBottomBlobs() const { return -1; } + virtual inline int_tp MinBottomBlobs() const { return 2; } + virtual inline int_tp MaxBottomBlobs() const { return 3; } virtual inline const char* type() const { return "InfogainLoss"; } @@ -708,9 +708,9 @@ class SoftmaxWithLossLayer : public LossLayer { const vector*>& top); virtual inline const char* type() const { return "SoftmaxWithLoss"; } - virtual inline int ExactNumTopBlobs() const { return -1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } + virtual inline int_tp ExactNumTopBlobs() const { return -1; } + virtual inline int_tp MinTopBlobs() const { return 1; } + virtual inline int_tp MaxTopBlobs() const { return 2; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -761,12 +761,12 @@ class SoftmaxWithLossLayer : public LossLayer { /// Whether to ignore instances with a certain label. bool has_ignore_label_; /// The label indicating that an instance should be ignored. - int ignore_label_; + int_tp ignore_label_; /// Whether to normalize the loss by the total number of values present /// (otherwise just by the batch size). bool normalize_; - int softmax_axis_, outer_num_, inner_num_; + int_tp softmax_axis_, outer_num_, inner_num_; }; @@ -781,12 +781,12 @@ class MalisLossLayer : public LossLayer { const vector*>& top); virtual inline const char* type() const { return "MalisLoss"; } - virtual inline int ExactNumBottomBlobs() const { return -1; } - virtual inline int MinBottomBlobs() const { return 3; } - virtual inline int MaxBottomBlobs() const { return 4; } - virtual inline int ExactNumTopBlobs() const { return -1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } + virtual inline int_tp ExactNumBottomBlobs() const { return -1; } + virtual inline int_tp MinBottomBlobs() const { return 3; } + virtual inline int_tp MaxBottomBlobs() const { return 4; } + virtual inline int_tp ExactNumTopBlobs() const { return -1; } + virtual inline int_tp MinTopBlobs() const { return 1; } + virtual inline int_tp MaxTopBlobs() const { return 2; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -795,19 +795,19 @@ class MalisLossLayer : public LossLayer { const vector& propagate_down, const vector*>& bottom); private: - void Malis(const Dtype* conn_data, const int conn_num_dims, - const int* conn_dims, - const int* nhood_data, const int* nhood_dims, + void Malis(const Dtype* conn_data, const int_tp conn_num_dims, + const int_tp* conn_dims, + const int_tp* nhood_data, const int_tp* nhood_dims, const Dtype* seg_data, const bool pos, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, Dtype margin, Dtype threshold); - int nedges_; - int conn_num_dims_; - std::vector conn_dims_; - std::vector nhood_data_; - std::vector nhood_dims_; + int_tp nedges_; + int_tp conn_num_dims_; + std::vector conn_dims_; + std::vector nhood_data_; + std::vector nhood_dims_; Blob affinity_pos_; Blob affinity_neg_; diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 5b06224bf98..65927f5f848 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -48,9 +48,9 @@ class Net { * the middle may be incorrect if all of the layers of a fan-in are not * included. */ - Dtype ForwardFromTo(int start, int end); - Dtype ForwardFrom(int start); - Dtype ForwardTo(int end); + Dtype ForwardFromTo(int_tp start, int_tp end); + Dtype ForwardFrom(int_tp start); + Dtype ForwardTo(int_tp end); /// @brief Run forward using a set of bottom blobs, and return the result. const vector*>& Forward(const vector*> & bottom, Dtype* loss = NULL); @@ -72,9 +72,9 @@ class Net { * provided during the forward pass. */ void Backward(); - void BackwardFromTo(int start, int end); - void BackwardFrom(int start); - void BackwardTo(int end); + void BackwardFromTo(int_tp start, int_tp end); + void BackwardFrom(int_tp start); + void BackwardTo(int_tp end); /** * @brief Reshape all layers from bottom to top. @@ -185,17 +185,17 @@ class Net { inline const vector& has_params_decay() const { return has_params_decay_; } - const map& param_names_index() const { + const map& param_names_index() const { return param_names_index_; } - inline const vector& param_owners() const { + inline const vector& param_owners() const { return param_owners_; } /// @brief Input and output blob numbers - inline int num_inputs() const { + inline int_tp num_inputs() const { return net_input_blobs_.size(); } - inline int num_outputs() const { + inline int_tp num_outputs() const { return net_output_blobs_.size(); } inline const vector*>& input_blobs() const { @@ -204,10 +204,10 @@ class Net { inline const vector*>& output_blobs() const { return net_output_blobs_; } - inline const vector& input_blob_indices() const { + inline const vector& input_blob_indices() const { return net_input_blob_indices_; } - inline const vector& output_blob_indices() const { + inline const vector& output_blob_indices() const { return net_output_blob_indices_; } bool has_blob(const string& blob_name) const; @@ -233,25 +233,25 @@ class Net { protected: // Helpers for Init. /// @brief Append a new input or top blob to the net. - void AppendTop(const NetParameter& param, const int layer_id, - const int top_id, set* available_blobs, - map* blob_name_to_idx); + void AppendTop(const NetParameter& param, const int_tp layer_id, + const int_tp top_id, set* available_blobs, + map* blob_name_to_idx); /// @brief Append a new bottom blob to the net. - int AppendBottom(const NetParameter& param, const int layer_id, - const int bottom_id, set* available_blobs, - map* blob_name_to_idx); + int_tp AppendBottom(const NetParameter& param, const int_tp layer_id, + const int_tp bottom_id, set* available_blobs, + map* blob_name_to_idx); /// @brief Append a new parameter blob to the net. - void AppendParam(const NetParameter& param, const int layer_id, - const int param_id); + void AppendParam(const NetParameter& param, const int_tp layer_id, + const int_tp param_id); /// @brief Helper for displaying debug info in Forward about input Blobs. - void InputDebugInfo(const int layer_id); + void InputDebugInfo(const int_tp layer_id); /// @brief Helper for displaying debug info in Forward. - void ForwardDebugInfo(const int layer_id); + void ForwardDebugInfo(const int_tp layer_id); /// @brief Helper for displaying debug info in Backward. - void BackwardDebugInfo(const int layer_id); + void BackwardDebugInfo(const int_tp layer_id); /// @brief Helper for displaying debug info in Update. - void UpdateDebugInfo(const int param_id); + void UpdateDebugInfo(const int_tp param_id); /// @brief The network name string name_; @@ -260,33 +260,33 @@ class Net { /// @brief Individual layers in the net vector > > layers_; vector layer_names_; - map layer_names_index_; + map layer_names_index_; vector layer_need_backward_; /// @brief the blobs storing intermediate results between the layer. vector > > blobs_; vector blob_names_; - map blob_names_index_; + map blob_names_index_; vector blob_need_backward_; /// bottom_vecs stores the vectors containing the input for each layer. /// They don't actually host the blobs (blobs_ does), so we simply store /// pointers. vector*> > bottom_vecs_; - vector > bottom_id_vecs_; + vector > bottom_id_vecs_; vector > bottom_need_backward_; /// top_vecs stores the vectors containing the output for each layer vector*> > top_vecs_; - vector > top_id_vecs_; + vector > top_id_vecs_; /// Vector of weight in the loss (or objective) function of each net blob, /// indexed by blob_id. vector blob_loss_weights_; - vector > param_id_vecs_; - vector param_owners_; + vector > param_id_vecs_; + vector param_owners_; vector param_display_names_; - vector > param_layer_indices_; - map param_names_index_; + vector > param_layer_indices_; + map param_names_index_; /// blob indices for the input and the output of the net - vector net_input_blob_indices_; - vector net_output_blob_indices_; + vector net_input_blob_indices_; + vector net_output_blob_indices_; vector*> net_input_blobs_; vector*> net_output_blobs_; /// The parameters in the network. @@ -299,7 +299,7 @@ class Net { * if and only if params_[i] is an "owner"; otherwise, params_[i] is a sharer * and learnable_params_[learnable_param_ids_[i]] gives its owner. */ - vector learnable_param_ids_; + vector learnable_param_ids_; /// the learning rate multipliers for learnable_params_ vector params_lr_; vector has_params_lr_; @@ -307,7 +307,7 @@ class Net { vector params_weight_decay_; vector has_params_decay_; /// The bytes of memory used by this net - size_t memory_used_; + uint_tp memory_used_; /// Whether to compute and display debug info for the net. bool debug_info_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index f43a7c19767..3c0fd11acd8 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -28,8 +28,8 @@ class NeuronLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } }; /** @@ -51,8 +51,8 @@ class AbsValLayer : public NeuronLayer { const vector*>& top); virtual inline const char* type() const { return "AbsVal"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: /// @copydoc AbsValLayer @@ -198,12 +198,12 @@ class DropoutLayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ - Blob rand_vec_; + Blob rand_vec_; /// the probability @f$ p @f$ of dropping any input Dtype threshold_; /// the scale for undropped inputs at train time @f$ 1 / (1 - p) @f$ Dtype scale_; - unsigned int uint_thres_; + uint_tp uint_thres_; }; /** diff --git a/include/caffe/opencl/ocl_dev_ptr.hpp b/include/caffe/opencl/ocl_dev_ptr.hpp index 4adfd3d3215..20962d0df2b 100644 --- a/include/caffe/opencl/ocl_dev_ptr.hpp +++ b/include/caffe/opencl/ocl_dev_ptr.hpp @@ -16,11 +16,11 @@ template class ocl_dev_ptr : public dev_ptr { public: explicit ocl_dev_ptr(cl_mem ocl_mem); Type* get(); - std::ptrdiff_t off(); + std::size_t off(); private: cl_mem ocl_mem_; - std::ptrdiff_t off_; + std::size_t off_; }; } // namespace caffe diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp index 6a8aeecb38d..f1dcb905ca8 100644 --- a/include/caffe/parallel.hpp +++ b/include/caffe/parallel.hpp @@ -30,7 +30,7 @@ class Params { virtual ~Params() { } - inline size_t size() const { + inline uint_tp size() const { return size_; } inline Dtype* data() const { @@ -41,7 +41,7 @@ class Params { } protected: - const size_t size_; // Size of buffers + const uint_tp size_; // Size of buffers Dtype* data_; // Network parameters Dtype* diff_; // Gradient diff --git a/include/caffe/sgd_solvers.hpp b/include/caffe/sgd_solvers.hpp new file mode 100644 index 00000000000..1f2d05964bd --- /dev/null +++ b/include/caffe/sgd_solvers.hpp @@ -0,0 +1,148 @@ +#ifndef CAFFE_SGD_SOLVERS_HPP_ +#define CAFFE_SGD_SOLVERS_HPP_ + +#include +#include + +#include "caffe/solver.hpp" + +namespace caffe { + +/** + * @brief Optimizes the parameters of a Net using + * stochastic gradient descent (SGD) with momentum. + */ +template +class SGDSolver : public Solver { + public: + explicit SGDSolver(const SolverParameter& param) + : Solver(param) { PreSolve(); } + explicit SGDSolver(const string& param_file) + : Solver(param_file) { PreSolve(); } + virtual inline const char* type() const { return "SGD"; } + + const vector > >& history() { return history_; } + + protected: + void PreSolve(); + Dtype GetLearningRate(); + virtual void ApplyUpdate(); + virtual void Normalize(uint_tp param_id); + virtual void Regularize(uint_tp param_id); + virtual void ComputeUpdateValue(uint_tp param_id, Dtype rate); + virtual void ClipGradients(); + virtual void SnapshotSolverState(const string& model_filename); + virtual void SnapshotSolverStateToBinaryProto(const string& model_filename); + virtual void SnapshotSolverStateToHDF5(const string& model_filename); + virtual void RestoreSolverStateFromHDF5(const string& state_file); + virtual void RestoreSolverStateFromBinaryProto(const string& state_file); + // history maintains the historical momentum data. + // update maintains update related data and is not needed in snapshots. + // temp maintains other information that might be needed in computation + // of gradients/updates and is not needed in snapshots + vector > > history_, update_, temp_; + + DISABLE_COPY_AND_ASSIGN(SGDSolver); +}; + +template +class NesterovSolver : public SGDSolver { + public: + explicit NesterovSolver(const SolverParameter& param) + : SGDSolver(param) {} + explicit NesterovSolver(const string& param_file) + : SGDSolver(param_file) {} + virtual inline const char* type() const { return "Nesterov"; } + + protected: + virtual void ComputeUpdateValue(uint_tp param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(NesterovSolver); +}; + +template +class AdaGradSolver : public SGDSolver { + public: + explicit AdaGradSolver(const SolverParameter& param) + : SGDSolver(param) { constructor_sanity_check(); } + explicit AdaGradSolver(const string& param_file) + : SGDSolver(param_file) { constructor_sanity_check(); } + virtual inline const char* type() const { return "AdaGrad"; } + + protected: + virtual void ComputeUpdateValue(uint_tp param_id, Dtype rate); + void constructor_sanity_check() { + CHECK_EQ(0, this->param_.momentum()) + << "Momentum cannot be used with AdaGrad."; + } + + DISABLE_COPY_AND_ASSIGN(AdaGradSolver); +}; + + +template +class RMSPropSolver : public SGDSolver { + public: + explicit RMSPropSolver(const SolverParameter& param) + : SGDSolver(param) { constructor_sanity_check(); } + explicit RMSPropSolver(const string& param_file) + : SGDSolver(param_file) { constructor_sanity_check(); } + virtual inline const char* type() const { return "RMSProp"; } + + protected: + virtual void ComputeUpdateValue(uint_tp param_id, Dtype rate); + void constructor_sanity_check() { + CHECK_EQ(0, this->param_.momentum()) + << "Momentum cannot be used with RMSProp."; + CHECK_GE(this->param_.rms_decay(), 0) + << "rms_decay should lie between 0 and 1."; + CHECK_LT(this->param_.rms_decay(), 1) + << "rms_decay should lie between 0 and 1."; + } + + DISABLE_COPY_AND_ASSIGN(RMSPropSolver); +}; + +template +class AdaDeltaSolver : public SGDSolver { + public: + explicit AdaDeltaSolver(const SolverParameter& param) + : SGDSolver(param) { AdaDeltaPreSolve(); } + explicit AdaDeltaSolver(const string& param_file) + : SGDSolver(param_file) { AdaDeltaPreSolve(); } + virtual inline const char* type() const { return "AdaDelta"; } + + protected: + void AdaDeltaPreSolve(); + virtual void ComputeUpdateValue(uint_tp param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(AdaDeltaSolver); +}; + +/** + * @brief AdamSolver, an algorithm for first-order gradient-based optimization + * of stochastic objective functions, based on adaptive estimates of + * lower-order moments. Described in [1]. + * + * [1] D. P. Kingma and J. L. Ba, "ADAM: A Method for Stochastic Optimization." + * arXiv preprint arXiv:1412.6980v8 (2014). + */ +template +class AdamSolver : public SGDSolver { + public: + explicit AdamSolver(const SolverParameter& param) + : SGDSolver(param) { AdamPreSolve();} + explicit AdamSolver(const string& param_file) + : SGDSolver(param_file) { AdamPreSolve(); } + virtual inline const char* type() const { return "Adam"; } + + protected: + void AdamPreSolve(); + virtual void ComputeUpdateValue(uint_tp param_id, Dtype rate); + + DISABLE_COPY_AND_ASSIGN(AdamSolver); +}; + +} // namespace caffe + +#endif // CAFFE_SGD_SOLVERS_HPP_ diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 72387eb1c60..bdc95eb9b7e 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -5,6 +5,7 @@ #include #include "caffe/net.hpp" +#include "caffe/solver_factory.hpp" #include "device.hpp" namespace caffe { @@ -58,7 +59,7 @@ class Solver { inline void Solve(const string resume_file) { Solve(resume_file.c_str()); } - void Step(int iters); + void Step(int_tp iters); // The Restore method simply dispatches to one of the // RestoreSolverStateFrom___ protected methods. You should implement these // methods to restore the state from the appropriate snapshot type. @@ -70,11 +71,11 @@ class Solver { return test_nets_; } - int iter() { + int_tp iter() { return iter_; } - int max_iter() { + int_tp max_iter() { return param_.max_iter(); } @@ -97,6 +98,13 @@ class Solver { void CheckSnapshotWritePermissions(); + /** + * @brief Returns the solver type. + */ + virtual inline const char* type() const { + return ""; + } + protected: // Make and apply the update value for the current iteration. virtual void ApplyUpdate() = 0; @@ -110,14 +118,14 @@ class Solver { string SnapshotToHDF5(); // The test routine void TestAll(); - void Test(const int test_net_id = 0); + void Test(const int_tp test_net_id = 0); virtual void RestoreSolverStateFromHDF5(const string& state_file) = 0; virtual void RestoreSolverStateFromBinaryProto(const string& state_file) = 0; - void DisplayOutputBlobs(const int net_id); + void DisplayOutputBlobs(const int_tp net_id); SolverParameter param_; - int iter_; - int current_step_; + int_tp iter_; + int_tp current_step_; shared_ptr > net_; vector > > test_nets_; device *device_; @@ -164,170 +172,6 @@ class WorkerSolver : public Solver { } }; -/** - * @brief Optimizes the parameters of a Net using - * stochastic gradient descent (SGD) with momentum. - */ -template -class SGDSolver : public Solver { - public: - explicit SGDSolver(const SolverParameter& param) - : Solver(param) { - PreSolve(); - } - explicit SGDSolver(const string& param_file) - : Solver(param_file) { - PreSolve(); - } - - const vector > >& history() { - return history_; - } - - protected: - void PreSolve(); - Dtype GetLearningRate(); - virtual void ApplyUpdate(); - virtual void Normalize(int param_id); - virtual void Regularize(int param_id); - virtual void ComputeUpdateValue(int param_id, Dtype rate); - virtual void ClipGradients(); - virtual void SnapshotSolverState(const string& model_filename); - virtual void SnapshotSolverStateToBinaryProto(const string& model_filename); - virtual void SnapshotSolverStateToHDF5(const string& model_filename); - virtual void RestoreSolverStateFromHDF5(const string& state_file); - virtual void RestoreSolverStateFromBinaryProto(const string& state_file); - // history maintains the historical momentum data. - // update maintains update related data and is not needed in snapshots. - // temp maintains other information that might be needed in computation - // of gradients/updates and is not needed in snapshots - vector > > history_, update_, temp_; - -DISABLE_COPY_AND_ASSIGN(SGDSolver); -}; - -template -class NesterovSolver : public SGDSolver { - public: - explicit NesterovSolver(const SolverParameter& param) - : SGDSolver(param) { - } - explicit NesterovSolver(const string& param_file) - : SGDSolver(param_file) { - } - - protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); - -DISABLE_COPY_AND_ASSIGN(NesterovSolver); -}; - -template -class AdaGradSolver : public SGDSolver { - public: - explicit AdaGradSolver(const SolverParameter& param) - : SGDSolver(param) { - constructor_sanity_check(); - } - explicit AdaGradSolver(const string& param_file) - : SGDSolver(param_file) { - constructor_sanity_check(); - } - - protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); - void constructor_sanity_check() { - CHECK_EQ(0, this->param_.momentum()) - << "Momentum cannot be used with AdaGrad."; - } - - DISABLE_COPY_AND_ASSIGN(AdaGradSolver); -}; - - -template -class RMSPropSolver : public SGDSolver { - public: - explicit RMSPropSolver(const SolverParameter& param) - : SGDSolver(param) { constructor_sanity_check(); } - explicit RMSPropSolver(const string& param_file) - : SGDSolver(param_file) { constructor_sanity_check(); } - - protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); - void constructor_sanity_check() { - CHECK_EQ(0, this->param_.momentum()) - << "Momentum cannot be used with RMSProp."; - CHECK_GE(this->param_.rms_decay(), 0) - << "rms_decay should lie between 0 and 1."; - CHECK_LT(this->param_.rms_decay(), 1) - << "rms_decay should lie between 0 and 1."; - } - - DISABLE_COPY_AND_ASSIGN(RMSPropSolver); -}; - -template -class AdaDeltaSolver : public SGDSolver { - public: - explicit AdaDeltaSolver(const SolverParameter& param) - : SGDSolver(param) { AdaDeltaPreSolve(); } - explicit AdaDeltaSolver(const string& param_file) - : SGDSolver(param_file) { AdaDeltaPreSolve(); } - - protected: - void AdaDeltaPreSolve(); - virtual void ComputeUpdateValue(int param_id, Dtype rate); - - DISABLE_COPY_AND_ASSIGN(AdaDeltaSolver); -}; - -/** - * @brief AdamSolver, an algorithm for first-order gradient-based optimization - * of stochastic objective functions, based on adaptive estimates of - * lower-order moments. Described in [1]. - * - * [1] D. P. Kingma and J. L. Ba, "ADAM: A Method for Stochastic Optimization." - * arXiv preprint arXiv:1412.6980v8 (2014). - */ -template -class AdamSolver : public SGDSolver { - public: - explicit AdamSolver(const SolverParameter& param) - : SGDSolver(param) { AdamPreSolve();} - explicit AdamSolver(const string& param_file) - : SGDSolver(param_file) { AdamPreSolve(); } - - protected: - void AdamPreSolve(); - virtual void ComputeUpdateValue(int param_id, Dtype rate); - - DISABLE_COPY_AND_ASSIGN(AdamSolver); -}; - -template -Solver* GetSolver(const SolverParameter& param) { - SolverParameter_SolverType type = param.solver_type(); - - switch (type) { - case SolverParameter_SolverType_SGD: - return new SGDSolver(param); - case SolverParameter_SolverType_NESTEROV: - return new NesterovSolver(param); - case SolverParameter_SolverType_ADAGRAD: - return new AdaGradSolver(param); - case SolverParameter_SolverType_RMSPROP: - return new RMSPropSolver(param); - case SolverParameter_SolverType_ADADELTA: - return new AdaDeltaSolver(param); - case SolverParameter_SolverType_ADAM: - return new AdamSolver(param); - default: - LOG(FATAL) << "Unknown SolverType: " << type; - } - return (Solver*) NULL; -} - } // namespace caffe #endif // CAFFE_OPTIMIZATION_SOLVER_HPP_ diff --git a/include/caffe/solver_factory.hpp b/include/caffe/solver_factory.hpp new file mode 100644 index 00000000000..cfff721af40 --- /dev/null +++ b/include/caffe/solver_factory.hpp @@ -0,0 +1,137 @@ +/** + * @brief A solver factory that allows one to register solvers, similar to + * layer factory. During runtime, registered solvers could be called by passing + * a SolverParameter protobuffer to the CreateSolver function: + * + * SolverRegistry::CreateSolver(param); + * + * There are two ways to register a solver. Assuming that we have a solver like: + * + * template + * class MyAwesomeSolver : public Solver { + * // your implementations + * }; + * + * and its type is its C++ class name, but without the "Solver" at the end + * ("MyAwesomeSolver" -> "MyAwesome"). + * + * If the solver is going to be created simply by its constructor, in your c++ + * file, add the following line: + * + * REGISTER_SOLVER_CLASS(MyAwesome); + * + * Or, if the solver is going to be created by another creator function, in the + * format of: + * + * template + * Solver GetMyAwesomeSolver(const SolverParameter& param) { + * // your implementation + * } + * + * then you can register the creator function instead, like + * + * REGISTER_SOLVER_CREATOR(MyAwesome, GetMyAwesomeSolver) + * + * Note that each solver type should only be registered once. + */ + +#ifndef CAFFE_SOLVER_FACTORY_H_ +#define CAFFE_SOLVER_FACTORY_H_ + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template +class Solver; + +template +class SolverRegistry { + public: + typedef Solver* (*Creator)(const SolverParameter&); + typedef std::map CreatorRegistry; + + static CreatorRegistry& Registry() { + static CreatorRegistry* g_registry_ = new CreatorRegistry(); + return *g_registry_; + } + + // Adds a creator. + static void AddCreator(const string& type, Creator creator) { + CreatorRegistry& registry = Registry(); + CHECK_EQ(registry.count(type), 0) + << "Solver type " << type << " already registered."; + registry[type] = creator; + } + + // Get a solver using a SolverParameter. + static Solver* CreateSolver(const SolverParameter& param) { + const string& type = param.type(); + CreatorRegistry& registry = Registry(); + CHECK_EQ(registry.count(type), 1) << "Unknown solver type: " << type + << " (known types: " << SolverTypeListString() << ")"; + return registry[type](param); + } + + static vector SolverTypeList() { + CreatorRegistry& registry = Registry(); + vector solver_types; + for (typename CreatorRegistry::iterator iter = registry.begin(); + iter != registry.end(); ++iter) { + solver_types.push_back(iter->first); + } + return solver_types; + } + + private: + // Solver registry should never be instantiated - everything is done with its + // static variables. + SolverRegistry() {} + + static string SolverTypeListString() { + vector solver_types = SolverTypeList(); + string solver_types_str; + for (vector::iterator iter = solver_types.begin(); + iter != solver_types.end(); ++iter) { + if (iter != solver_types.begin()) { + solver_types_str += ", "; + } + solver_types_str += *iter; + } + return solver_types_str; + } +}; + + +template +class SolverRegisterer { + public: + SolverRegisterer(const string& type, + Solver* (*creator)(const SolverParameter&)) { + // LOG(INFO) << "Registering solver type: " << type; + SolverRegistry::AddCreator(type, creator); + } +}; + + +#define REGISTER_SOLVER_CREATOR(type, creator) \ + static SolverRegisterer g_creator_f_##type(#type, creator); \ + static SolverRegisterer g_creator_d_##type(#type, creator) \ + +#define REGISTER_SOLVER_CLASS(type) \ + template \ + Solver* Creator_##type##Solver( \ + const SolverParameter& param) \ + { \ + return new type##Solver(param); \ + } \ + REGISTER_SOLVER_CREATOR(type, Creator_##type##Solver) + +} // namespace caffe + +#endif // CAFFE_SOLVER_FACTORY_H_ diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 7389ac7d93c..989243a9dc7 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -12,7 +12,7 @@ namespace caffe { -void CaffeMallocHost(void** ptr, size_t size); +void CaffeMallocHost(void** ptr, uint_tp size); void CaffeFreeHost(void* ptr); @@ -45,7 +45,7 @@ class SyncedMemory { device_(device_context), cl_gpu_mem_(NULL) { } - explicit SyncedMemory(size_t size, device *device_context) + explicit SyncedMemory(uint_tp size, device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), @@ -74,7 +74,7 @@ class SyncedMemory { own_gpu_data_(false), device_(device_context) { } - explicit SyncedMemory(size_t size, device *device_context) + explicit SyncedMemory(uint_tp size, device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), @@ -101,7 +101,7 @@ class SyncedMemory { SyncedHead head() { return head_; } - size_t size() { + uint_tp size() { return size_; } @@ -117,7 +117,7 @@ class SyncedMemory { void* cpu_ptr_; void* gpu_ptr_; - size_t size_; + uint_tp size_; SyncedHead head_; bool own_cpu_data_; bool own_gpu_data_; diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 2b9392880e7..de1a086e129 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -22,7 +22,7 @@ class GradientChecker { // kink - kink_range <= |feature value| <= kink + kink_range, // which accounts for all nonsmoothness in use by caffe GradientChecker(const Dtype stepsize, const Dtype threshold, - const unsigned int seed = 1701, const Dtype kink = 0., + const uint_tp seed = 1701, const Dtype kink = 0., const Dtype kink_range = -1) : stepsize_(stepsize), threshold_(threshold), @@ -35,14 +35,14 @@ class GradientChecker { // Note that after the gradient check, we do not guarantee that the data // stored in the layer parameters and the blobs are unchanged. void CheckGradient(Layer* layer, const vector*>& bottom, - const vector*>& top, int check_bottom = -1) { + const vector*>& top, int_tp check_bottom = -1) { layer->SetUp(bottom, top); CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1); } void CheckGradientExhaustive(Layer* layer, const vector*>& bottom, const vector*>& top, - int check_bottom = -1); + int_tp check_bottom = -1); // CheckGradientEltwise can be used to test layers that perform element-wise // computation only (e.g., neuron layers) -- where (d y_i) / (d x_j) = 0 when @@ -57,8 +57,8 @@ class GradientChecker { // param Blobs. Otherwise (if check_bottom < -1), check only param Blobs. void CheckGradientSingle(Layer* layer, const vector*>& bottom, - const vector*>& top, int check_bottom, - int top_id, int top_data_id, bool element_wise = + const vector*>& top, int_tp check_bottom, + int_tp top_id, int_tp top_data_id, bool element_wise = false); // Checks the gradient of a network. This network should not have any data @@ -70,11 +70,11 @@ class GradientChecker { protected: Dtype GetObjAndGradient(const Layer& layer, - const vector*>& top, int top_id = -1, - int top_data_id = -1); + const vector*>& top, int_tp top_id = -1, + int_tp top_data_id = -1); Dtype stepsize_; Dtype threshold_; - unsigned int seed_; + uint_tp seed_; Dtype kink_; Dtype kink_range_; }; @@ -82,14 +82,14 @@ class GradientChecker { template void GradientChecker::CheckGradientSingle( Layer* layer, const vector*>& bottom, - const vector*>& top, int check_bottom, int top_id, - int top_data_id, bool element_wise) { + const vector*>& top, int_tp check_bottom, int_tp top_id, + int_tp top_data_id, bool element_wise) { if (element_wise) { CHECK_EQ(0, layer->blobs().size()); CHECK_LE(0, top_id); CHECK_LE(0, top_data_id); - const int top_count = top[top_id]->count(); - for (int blob_id = 0; blob_id < bottom.size(); ++blob_id) { + const int_tp top_count = top[top_id]->count(); + for (int_tp blob_id = 0; blob_id < bottom.size(); ++blob_id) { CHECK_EQ(top_count, bottom[blob_id]->count()); } } @@ -97,13 +97,13 @@ void GradientChecker::CheckGradientSingle( // parameter blobs. vector*> blobs_to_check; vector propagate_down(bottom.size(), check_bottom == -1); - for (int i = 0; i < layer->blobs().size(); ++i) { + for (int_tp i = 0; i < layer->blobs().size(); ++i) { Blob* blob = layer->blobs()[i].get(); caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); blobs_to_check.push_back(blob); } if (check_bottom == -1) { - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { blobs_to_check.push_back(bottom[i]); } } else if (check_bottom >= 0) { @@ -123,12 +123,12 @@ void GradientChecker::CheckGradientSingle( // Store computed gradients for all checked blobs vector > > computed_gradient_blobs( blobs_to_check.size()); - for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { + for (int_tp blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { Blob* current_blob = blobs_to_check[blob_id]; computed_gradient_blobs[blob_id].reset(new Blob()); computed_gradient_blobs[blob_id]->ReshapeLike( *current_blob); - const int count = blobs_to_check[blob_id]->count(); + const int_tp count = blobs_to_check[blob_id]->count(); const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); Dtype* computed_gradients = computed_gradient_blobs[blob_id] ->mutable_cpu_data(); @@ -138,13 +138,13 @@ void GradientChecker::CheckGradientSingle( // Compute derivative of top w.r.t. each bottom and parameter input using // finite differencing. // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs."; - for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { + for (int_tp blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { Blob* current_blob = blobs_to_check[blob_id]; const Dtype* computed_gradients = computed_gradient_blobs[blob_id]->cpu_data(); // LOG(ERROR) << "Blob " << blob_id << ": checking " // << current_blob->count() << " parameters."; - for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) { + for (int_tp feat_id = 0; feat_id < current_blob->count(); ++feat_id) { // For an element-wise layer, we only need to do finite differencing to // compute the derivative of top[top_id][top_data_id] w.r.t. // bottom[blob_id][i] only for i == top_data_id. For any other @@ -198,13 +198,13 @@ void GradientChecker::CheckGradientSingle( template void GradientChecker::CheckGradientExhaustive( Layer* layer, const vector*>& bottom, - const vector*>& top, int check_bottom) { + const vector*>& top, int_tp check_bottom) { layer->SetUp(bottom, top); CHECK_GT(top.size(), 0)<< "Exhaustive mode requires at least one top blob."; // LOG(ERROR) << "Exhaustive Mode."; - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count(); - for (int j = 0; j < top[i]->count(); ++j) { + for (int_tp j = 0; j < top[i]->count(); ++j) { // LOG(ERROR) << "Exhaustive: blob " << i << " data " << j; CheckGradientSingle(layer, bottom, top, check_bottom, i, j); } @@ -217,10 +217,10 @@ void GradientChecker::CheckGradientEltwise( const vector*>& top) { layer->SetUp(bottom, top); CHECK_GT(top.size(), 0)<< "Eltwise mode requires at least one top blob."; - const int check_bottom = -1; + const int_tp check_bottom = -1; const bool element_wise = true; - for (int i = 0; i < top.size(); ++i) { - for (int j = 0; j < top[i]->count(); ++j) { + for (int_tp i = 0; i < top.size(); ++i) { + for (int_tp j = 0; j < top[i]->count(); ++j) { CheckGradientSingle(layer, bottom, top, check_bottom, i, j, element_wise); } } @@ -232,7 +232,7 @@ void GradientChecker::CheckGradientNet( const vector > >& layers = net.layers(); vector*> >& bottom_vecs = net.bottom_vecs(); vector*> >& top_vecs = net.top_vecs(); - for (int i = 0; i < layers.size(); ++i) { + for (int_tp i = 0; i < layers.size(); ++i) { net.Forward(input); LOG(ERROR)<< "Checking gradient for " << layers[i]->layer_param().name(); CheckGradientExhaustive(*(layers[i].get()), bottom_vecs[i], top_vecs[i]); @@ -242,16 +242,16 @@ void GradientChecker::CheckGradientNet( template Dtype GradientChecker::GetObjAndGradient(const Layer& layer, const vector*>& top, - int top_id, int top_data_id) { + int_tp top_id, int_tp top_data_id) { Dtype loss = 0; if (top_id < 0) { // the loss will be half of the sum of squares of all outputs - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { Blob* top_blob = top[i]; const Dtype* top_blob_data = top_blob->cpu_data(); Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); - int count = top_blob->count(); - for (int j = 0; j < count; ++j) { + int_tp count = top_blob->count(); + for (int_tp j = 0; j < count; ++j) { loss += top_blob_data[j] * top_blob_data[j]; } // set the diff: simply the data. @@ -260,7 +260,7 @@ Dtype GradientChecker::GetObjAndGradient(const Layer& layer, loss /= 2.; } else { // the loss will be the top_data_id-th element in the top_id-th blob. - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { Blob* top_blob = top[i]; Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); caffe_set(top_blob->count(), Dtype(0), top_blob_diff); diff --git a/include/caffe/util/blocking_queue.hpp b/include/caffe/util/blocking_queue.hpp index 955e12cc567..06507f1fd34 100644 --- a/include/caffe/util/blocking_queue.hpp +++ b/include/caffe/util/blocking_queue.hpp @@ -26,7 +26,7 @@ class BlockingQueue { // Return element without removing it T peek(); - size_t size() const; + uint_tp size() const; protected: /** diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index b531dd5fa7a..57a5e3f7e28 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -67,26 +67,26 @@ inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) { template inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, - int n, int c, int h, int w, - int stride_n, int stride_c, int stride_h, int stride_w) { + int_tp n, int_tp c, int_tp h, int_tp w, + int_tp stride_n, int_tp stride_c, int_tp stride_h, int_tp stride_w) { CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(*desc, dataType::type, n, c, h, w, stride_n, stride_c, stride_h, stride_w)); } template inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, - int n, int c, int h, int w) { - const int stride_w = 1; - const int stride_h = w * stride_w; - const int stride_c = h * stride_h; - const int stride_n = c * stride_c; + int_tp n, int_tp c, int_tp h, int_tp w) { + const int_tp stride_w = 1; + const int_tp stride_h = w * stride_w; + const int_tp stride_c = h * stride_h; + const int_tp stride_n = c * stride_c; setTensor4dDesc(desc, n, c, h, w, stride_n, stride_c, stride_h, stride_w); } template inline void createFilterDesc(cudnnFilterDescriptor_t* desc, - int n, int c, int h, int w) { + int_tp n, int_tp c, int_tp h, int_tp w) { CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType::type, n, c, h, w)); @@ -100,7 +100,7 @@ inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) { template inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, cudnnTensorDescriptor_t bottom, cudnnFilterDescriptor_t filter, - int pad_h, int pad_w, int stride_h, int stride_w) { + int_tp pad_h, int_tp pad_w, int_tp stride_h, int_tp stride_w) { CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION)); } @@ -108,7 +108,7 @@ inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, template inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, PoolingParameter_PoolMethod poolmethod, cudnnPoolingMode_t* mode, - int h, int w, int pad_h, int pad_w, int stride_h, int stride_w) { + int_tp h, int_tp w, int_tp pad_h, int_tp pad_w, int_tp stride_h, int_tp stride_w) { switch (poolmethod) { case PoolingParameter_PoolMethod_MAX: *mode = CUDNN_POOLING_MAX; diff --git a/include/caffe/util/db_lmdb.hpp b/include/caffe/util/db_lmdb.hpp index 4e1568ace50..96ba2cbc8fb 100644 --- a/include/caffe/util/db_lmdb.hpp +++ b/include/caffe/util/db_lmdb.hpp @@ -10,7 +10,7 @@ namespace caffe { namespace db { -inline void MDB_CHECK(int mdb_status) { +inline void MDB_CHECK(int_tp mdb_status) { CHECK_EQ(mdb_status, MDB_SUCCESS) << mdb_strerror(mdb_status); } @@ -37,7 +37,7 @@ class LMDBCursor : public Cursor { private: void Seek(MDB_cursor_op op) { - int mdb_status = mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, op); + int_tp mdb_status = mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, op); if (mdb_status == MDB_NOTFOUND) { valid_ = false; } else { diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 665d5f61b6f..1c67ee90da3 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -75,7 +75,7 @@ void classname::funcname##_##gpu(const vector*>& top, \ // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ - for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + for (int_tp i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) @@ -106,7 +106,7 @@ const char* curandGetErrorString(curandStatus_t error); #endif // CUDA: number of blocks for threads. -inline int CAFFE_GET_BLOCKS(const int N) { +inline int_tp CAFFE_GET_BLOCKS(const int_tp N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; } diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index d6a12d706b0..7f07cfcd6ef 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -1,82 +1,85 @@ #ifndef _CAFFE_UTIL_IM2COL_HPP_ #define _CAFFE_UTIL_IM2COL_HPP_ +#include +#include "caffe/definitions.hpp" + namespace caffe { template -void im2col_cpu(const Dtype* data_im, const int channels, const int height, - const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_col); +void im2col_cpu(const Dtype* data_im, const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, Dtype* data_col); template -void col2im_cpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im); +void col2im_cpu(const Dtype* data_col, const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, Dtype* data_im); template -void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, +void im2col_nd_cpu(const Dtype* data_im, const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, Dtype* data_col); template -void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, +void col2im_nd_cpu(const Dtype* data_col, const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, Dtype* data_im); template -void im2col_gpu(const Dtype* data_im, const int channels, const int height, - const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_col); +void im2col_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, Dtype* data_col); template -void col2im_gpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im); +void col2im_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, Dtype* data_im); template -void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, - const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, +void im2col_sk_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, Dtype* data_col); template -void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, +void col2im_sk_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, Dtype* data_im); template -void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, Dtype* data_col); +void im2col_nd_gpu(const Dtype* data_im, const int_tp num_spatial_axes, + const int_tp num_kernels, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, Dtype* data_col); template -void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, +void col2im_nd_gpu(const Dtype* data_col, const int_tp num_spatial_axes, + const int_tp im_size, const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, Dtype* data_im); template -void im2col_ndsk_gpu(const Dtype* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, const int* kstride, +void im2col_ndsk_gpu(const Dtype* data_im, const int_tp num_spatial_axes, + const int_tp num_kernels, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, const int_tp* kstride, Dtype* data_col); template -void col2im_ndsk_gpu(const Dtype* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, const int* kstride, +void col2im_ndsk_gpu(const Dtype* data_col, const int_tp num_spatial_axes, + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, const int_tp* kstride, Dtype* data_im); } // namespace caffe diff --git a/include/caffe/util/insert_splits.hpp b/include/caffe/util/insert_splits.hpp index 446abb817be..1def1fc3b95 100644 --- a/include/caffe/util/insert_splits.hpp +++ b/include/caffe/util/insert_splits.hpp @@ -12,14 +12,14 @@ namespace caffe { void InsertSplits(const NetParameter& param, NetParameter* param_split); void ConfigureSplitLayer(const string& layer_name, const string& blob_name, - const int blob_idx, const int split_count, const float loss_weight, + const int_tp blob_idx, const int_tp split_count, const float loss_weight, LayerParameter* split_layer_param); string SplitLayerName(const string& layer_name, const string& blob_name, - const int blob_idx); + const int_tp blob_idx); string SplitBlobName(const string& layer_name, const string& blob_name, - const int blob_idx, const int split_idx); + const int_tp blob_idx, const int_tp split_idx); } // namespace caffe diff --git a/include/caffe/util/io.hpp b/include/caffe/util/io.hpp index 6070b4c7f3a..45c9a92fbdf 100644 --- a/include/caffe/util/io.hpp +++ b/include/caffe/util/io.hpp @@ -20,7 +20,7 @@ inline void MakeTempFilename(string* temp_filename) { char* temp_filename_cstr = new char[temp_filename->size() + 1]; // NOLINT_NEXT_LINE(runtime/printf) strcpy(temp_filename_cstr, temp_filename->c_str()); - int fd = mkstemp(temp_filename_cstr); + int_tp fd = mkstemp(temp_filename_cstr); CHECK_GE(fd, 0) << "Failed to open a temporary file at: " << *temp_filename; close(fd); *temp_filename = temp_filename_cstr; @@ -81,38 +81,38 @@ inline void WriteProtoToBinaryFile( WriteProtoToBinaryFile(proto, filename.c_str()); } -bool ReadFileToDatum(const string& filename, const int label, Datum* datum); +bool ReadFileToDatum(const string& filename, const int_tp label, Datum* datum); inline bool ReadFileToDatum(const string& filename, Datum* datum) { return ReadFileToDatum(filename, -1, datum); } -bool ReadImageToDatum(const string& filename, const int label, - const int height, const int width, const bool is_color, +bool ReadImageToDatum(const string& filename, const int_tp label, + const int_tp height, const int_tp width, const bool is_color, const std::string & encoding, Datum* datum); -inline bool ReadImageToDatum(const string& filename, const int label, - const int height, const int width, const bool is_color, Datum* datum) { +inline bool ReadImageToDatum(const string& filename, const int_tp label, + const int_tp height, const int_tp width, const bool is_color, Datum* datum) { return ReadImageToDatum(filename, label, height, width, is_color, "", datum); } -inline bool ReadImageToDatum(const string& filename, const int label, - const int height, const int width, Datum* datum) { +inline bool ReadImageToDatum(const string& filename, const int_tp label, + const int_tp height, const int_tp width, Datum* datum) { return ReadImageToDatum(filename, label, height, width, true, datum); } -inline bool ReadImageToDatum(const string& filename, const int label, +inline bool ReadImageToDatum(const string& filename, const int_tp label, const bool is_color, Datum* datum) { return ReadImageToDatum(filename, label, 0, 0, is_color, datum); } -inline bool ReadImageToDatum(const string& filename, const int label, +inline bool ReadImageToDatum(const string& filename, const int_tp label, Datum* datum) { return ReadImageToDatum(filename, label, 0, 0, true, datum); } -inline bool ReadImageToDatum(const string& filename, const int label, +inline bool ReadImageToDatum(const string& filename, const int_tp label, const std::string & encoding, Datum* datum) { return ReadImageToDatum(filename, label, 0, 0, true, encoding, datum); } @@ -122,10 +122,10 @@ bool DecodeDatum(Datum* datum, bool is_color); #ifdef USE_OPENCV cv::Mat ReadImageToCVMat(const string& filename, - const int height, const int width, const bool is_color); + const int_tp height, const int_tp width, const bool is_color); cv::Mat ReadImageToCVMat(const string& filename, - const int height, const int width); + const int_tp height, const int_tp width); cv::Mat ReadImageToCVMat(const string& filename, const bool is_color); diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 4afb8667f17..aaeea6f39ea 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -16,100 +16,97 @@ namespace caffe { // limitation that the data has to be contiguous in memory. template void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int M, const int N, const int K, const Dtype alpha, + const int_tp M, const int_tp N, const int_tp K, const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, Dtype* C); template -void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, const int_tp N, const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, Dtype* y); template -void caffe_axpy(const int N, const Dtype alpha, const Dtype* X, Dtype* Y); +void caffe_axpy(const int_tp N, const Dtype alpha, const Dtype* X, Dtype* Y); template -void caffe_cpu_axpby(const int N, const Dtype alpha, const Dtype* X, +void caffe_cpu_axpby(const int_tp N, const Dtype alpha, const Dtype* X, const Dtype beta, Dtype* Y); template -void caffe_cpu_copy(const int N, const Dtype* X, Dtype* Y); +void caffe_cpu_copy(const int_tp N, const Dtype* X, Dtype* Y); template -void caffe_copy(const int N, const Dtype *X, Dtype *Y); +void caffe_copy(const int_tp N, const Dtype *X, Dtype *Y); template -void caffe_set(const int N, const Dtype alpha, Dtype *X); +void caffe_set(const int_tp N, const Dtype alpha, Dtype *X); -inline void caffe_memset(const size_t N, const int alpha, void* X) { +inline void caffe_memset(const uint_tp N, const int_tp alpha, void* X) { memset(X, alpha, N); // NOLINT(caffe/alt_fn) } template -void caffe_add_scalar(const int N, const Dtype alpha, Dtype *X); +void caffe_add_scalar(const int_tp N, const Dtype alpha, Dtype *X); template -void caffe_scal(const int N, const Dtype alpha, Dtype *X); +void caffe_scal(const int_tp N, const Dtype alpha, Dtype *X); template -void caffe_sqr(const int N, const Dtype* a, Dtype* y); +void caffe_sqr(const int_tp N, const Dtype* a, Dtype* y); template -void caffe_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_add(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_sub(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_mul(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_div(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); +void caffe_powx(const int_tp n, const Dtype* a, const Dtype b, Dtype* y); -unsigned int caffe_rng_rand(); +uint_tp caffe_rng_rand(); template Dtype caffe_nextafter(const Dtype b); -void caffe_rng_uniform(const int n, unsigned int* r); +void caffe_rng_uniform(const int_tp n, uint_tp* r); template -void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); +void caffe_rng_uniform(const int_tp n, const Dtype a, const Dtype b, Dtype* r); template -void caffe_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, +void caffe_rng_gaussian(const int_tp n, const Dtype mu, const Dtype sigma, Dtype* r); -template -void caffe_rng_bernoulli(const int n, const Dtype p, int* r); - -template -void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); +template +void caffe_rng_bernoulli(const int_tp n, const Dtype p, Itype* r); template -void caffe_exp(const int n, const Dtype* a, Dtype* y); +void caffe_exp(const int_tp n, const Dtype* a, Dtype* y); template -void caffe_log(const int n, const Dtype* a, Dtype* y); +void caffe_log(const int_tp n, const Dtype* a, Dtype* y); template -void caffe_abs(const int n, const Dtype* a, Dtype* y); +void caffe_abs(const int_tp n, const Dtype* a, Dtype* y); template -Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y); +Dtype caffe_cpu_dot(const int_tp n, const Dtype* x, const Dtype* y); template -Dtype caffe_cpu_strided_dot(const int n, const Dtype* x, const int incx, - const Dtype* y, const int incy); +Dtype caffe_cpu_strided_dot(const int_tp n, const Dtype* x, const int_tp incx, + const Dtype* y, const int_tp incy); template -int caffe_cpu_hamming_distance(const int n, const Dtype* x, const Dtype* y); +int_tp caffe_cpu_hamming_distance(const int_tp n, const Dtype* x, const Dtype* y); // Returns the sum of the absolute values of the elements of vector x template -Dtype caffe_cpu_asum(const int n, const Dtype* x); +Dtype caffe_cpu_asum(const int_tp n, const Dtype* x); // the branchless, type-safe version from // http://stackoverflow.com/questions/1903954/is-there-a-standard-sign-function-signum-sgn-in-c-c @@ -126,9 +123,9 @@ inline int8_t caffe_sign(Dtype val) { // So they have to be pasted here temporarily. #define DEFINE_CAFFE_CPU_UNARY_FUNC(name, operation) \ template \ - void caffe_cpu_##name(const int n, const Dtype* x, Dtype* y) { \ + void caffe_cpu_##name(const int_tp n, const Dtype* x, Dtype* y) { \ CHECK_GT(n, 0); CHECK(x); CHECK(y); \ - for (int i = 0; i < n; ++i) { \ + for (int_tp i = 0; i < n; ++i) { \ operation; \ } \ } @@ -146,7 +143,7 @@ DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])); template -void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); +void caffe_cpu_scale(const int_tp n, const Dtype alpha, const Dtype *x, Dtype* y); #ifndef CPU_ONLY // GPU #ifdef USE_CUDA @@ -156,63 +153,65 @@ void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); // gpu code under the hood. template void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int M, const int N, const int K, const Dtype alpha, + const int_tp M, const int_tp N, const int_tp K, const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, Dtype* C); template -void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, const int_tp N, const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, Dtype* y); template -void caffe_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, Dtype* Y); +void caffe_gpu_axpy(const int_tp N, const Dtype alpha, const Dtype* X, Dtype* Y); template -void caffe_gpu_axpby(const int N, const Dtype alpha, const Dtype* X, +void caffe_gpu_axpby(const int_tp N, const Dtype alpha, const Dtype* X, const Dtype beta, Dtype* Y); -void caffe_gpu_memcpy(const size_t N, const void *X, void *Y); +void caffe_gpu_memcpy(const uint_tp N, const void *X, void *Y); template -void caffe_gpu_set(const int N, const Dtype alpha, Dtype *X); +void caffe_gpu_set(const int_tp N, const Dtype alpha, Dtype *X); -inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) { +inline void caffe_gpu_memset(const uint_tp N, const int_tp alpha, void* X) { CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn) } template -void caffe_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X); +void caffe_gpu_add_scalar(const int_tp N, const Dtype alpha, Dtype *X); template -void caffe_gpu_scal(const int N, const Dtype alpha, Dtype *X); +void caffe_gpu_scal(const int_tp N, const Dtype alpha, Dtype *X); template -void caffe_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_gpu_add(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_gpu_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_gpu_sub(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_gpu_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_gpu_mul(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_gpu_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_gpu_div(const int_tp N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); +void caffe_gpu_abs(const int_tp n, const Dtype* a, Dtype* y); template -void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); +void caffe_gpu_exp(const int_tp n, const Dtype* a, Dtype* y); template -void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); +void caffe_gpu_log(const int_tp n, const Dtype* a, Dtype* y); template -void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); +void caffe_gpu_powx(const int_tp n, const Dtype* a, const Dtype b, Dtype* y); // caffe_gpu_rng_uniform with two arguments generates integers in the range // [0, UINT_MAX]. -void caffe_gpu_rng_uniform(const int n, unsigned int* r); +void caffe_gpu_rng_uniform(const int_tp n, unsigned int* r); +void caffe_gpu_rng_uniform(const int_tp n, unsigned long long* r); + // caffe_gpu_rng_uniform with four arguments generates floats in the range // (a, b] (strictly greater than a, less than or equal to b) due to the @@ -220,52 +219,52 @@ void caffe_gpu_rng_uniform(const int n, unsigned int* r); // curandGenerateUniform; with other limits will shift and scale the outputs // appropriately after calling curandGenerateUniform. template -void caffe_gpu_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); +void caffe_gpu_rng_uniform(const int_tp n, const Dtype a, const Dtype b, Dtype* r); template -void caffe_gpu_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, +void caffe_gpu_rng_gaussian(const int_tp n, const Dtype mu, const Dtype sigma, Dtype* r); template -void caffe_gpu_rng_bernoulli(const int n, const Dtype p, int* r); +void caffe_gpu_rng_bernoulli(const int_tp n, const Dtype p, int_tp* r); template -void caffe_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out); +void caffe_gpu_dot(const int_tp n, const Dtype* x, const Dtype* y, Dtype* out); template -uint32_t caffe_gpu_hamming_distance(const int n, const Dtype* x, +uint32_t caffe_gpu_hamming_distance(const int_tp n, const Dtype* x, const Dtype* y); template -void caffe_gpu_asum(const int n, const Dtype* x, Dtype* y); +void caffe_gpu_asum(const int_tp n, const Dtype* x, Dtype* y); template -void caffe_gpu_sign(const int n, const Dtype* x, Dtype* y); +void caffe_gpu_sign(const int_tp n, const Dtype* x, Dtype* y); template -void caffe_gpu_sgnbit(const int n, const Dtype* x, Dtype* y); +void caffe_gpu_sgnbit(const int_tp n, const Dtype* x, Dtype* y); template -void caffe_gpu_fabs(const int n, const Dtype* x, Dtype* y); +void caffe_gpu_fabs(const int_tp n, const Dtype* x, Dtype* y); template -void caffe_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); +void caffe_gpu_scale(const int_tp n, const Dtype alpha, const Dtype *x, Dtype* y); #define DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(name, operation) \ template \ -__global__ void name##_kernel(const int n, const Dtype* x, Dtype* y) { \ +__global__ void name##_kernel(const int_tp n, const Dtype* x, Dtype* y) { \ CUDA_KERNEL_LOOP(index, n) { \ operation; \ } \ } \ template <> \ -void caffe_gpu_##name(const int n, const float* x, float* y) { \ +void caffe_gpu_##name(const int_tp n, const float* x, float* y) { \ /* NOLINT_NEXT_LINE(whitespace/operators) */ \ name##_kernel<<>>( \ n, x, y); \ } \ template <> \ -void caffe_gpu_##name(const int n, const double* x, double* y) { \ +void caffe_gpu_##name(const int_tp n, const double* x, double* y) { \ /* NOLINT_NEXT_LINE(whitespace/operators) */ \ name##_kernel<<>>( \ n, x, y); \ diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 3355b6658a3..6db58cb665a 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -18,16 +18,16 @@ extern "C" { // be in the form e.g. y[i] = sqrt(a[i]) #define DEFINE_VSL_UNARY_FUNC(name, operation) \ template \ - void v##name(const int n, const Dtype* a, Dtype* y) { \ + void v##name(const int_tp n, const Dtype* a, Dtype* y) { \ CHECK_GT(n, 0); CHECK(a); CHECK(y); \ - for (int i = 0; i < n; ++i) { operation; } \ + for (int_tp i = 0; i < n; ++i) { operation; } \ } \ inline void vs##name( \ - const int n, const float* a, float* y) { \ + const int_tp n, const float* a, float* y) { \ v##name(n, a, y); \ } \ inline void vd##name( \ - const int n, const double* a, double* y) { \ + const int_tp n, const double* a, double* y) { \ v##name(n, a, y); \ } @@ -40,16 +40,16 @@ DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); // The operation should be in the form e.g. y[i] = pow(a[i], b) #define DEFINE_VSL_UNARY_FUNC_WITH_PARAM(name, operation) \ template \ - void v##name(const int n, const Dtype* a, const Dtype b, Dtype* y) { \ + void v##name(const int_tp n, const Dtype* a, const Dtype b, Dtype* y) { \ CHECK_GT(n, 0); CHECK(a); CHECK(y); \ - for (int i = 0; i < n; ++i) { operation; } \ + for (int_tp i = 0; i < n; ++i) { operation; } \ } \ inline void vs##name( \ - const int n, const float* a, const float b, float* y) { \ + const int_tp n, const float* a, const float b, float* y) { \ v##name(n, a, b, y); \ } \ inline void vd##name( \ - const int n, const double* a, const float b, double* y) { \ + const int_tp n, const double* a, const float b, double* y) { \ v##name(n, a, b, y); \ } @@ -59,16 +59,16 @@ DEFINE_VSL_UNARY_FUNC_WITH_PARAM(Powx, y[i] = pow(a[i], b)); // be in the form e.g. y[i] = a[i] + b[i] #define DEFINE_VSL_BINARY_FUNC(name, operation) \ template \ - void v##name(const int n, const Dtype* a, const Dtype* b, Dtype* y) { \ + void v##name(const int_tp n, const Dtype* a, const Dtype* b, Dtype* y) { \ CHECK_GT(n, 0); CHECK(a); CHECK(b); CHECK(y); \ - for (int i = 0; i < n; ++i) { operation; } \ + for (int_tp i = 0; i < n; ++i) { operation; } \ } \ inline void vs##name( \ - const int n, const float* a, const float* b, float* y) { \ + const int_tp n, const float* a, const float* b, float* y) { \ v##name(n, a, b, y); \ } \ inline void vd##name( \ - const int n, const double* a, const double* b, double* y) { \ + const int_tp n, const double* a, const double* b, double* y) { \ v##name(n, a, b, y); \ } @@ -80,15 +80,15 @@ DEFINE_VSL_BINARY_FUNC(Div, y[i] = a[i] / b[i]); // In addition, MKL comes with an additional function axpby that is not present // in standard blas. We will simply use a two-step (inefficient, of course) way // to mimic that. -inline void cblas_saxpby(const int N, const float alpha, const float* X, - const int incX, const float beta, float* Y, - const int incY) { +inline void cblas_saxpby(const int_tp N, const float alpha, const float* X, + const int_tp incX, const float beta, float* Y, + const int_tp incY) { cblas_sscal(N, beta, Y, incY); cblas_saxpy(N, alpha, X, incX, Y, incY); } -inline void cblas_daxpby(const int N, const double alpha, const double* X, - const int incX, const double beta, double* Y, - const int incY) { +inline void cblas_daxpby(const int_tp N, const double alpha, const double* X, + const int_tp incX, const double beta, double* Y, + const int_tp incY) { cblas_dscal(N, beta, Y, incY); cblas_daxpy(N, alpha, X, incX, Y, incY); } diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 06f5ab5e1c0..1b0762ef829 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -51,7 +51,7 @@ class AffinityLayer : public Layer { private: std::vector< shared_ptr< Blob > > min_index_; - std::vector offsets_; + std::vector offsets_; }; /** @@ -70,11 +70,11 @@ class ConnectedComponentLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { + virtual inline int_tp ExactNumTopBlobs() const { return 1; } @@ -90,7 +90,7 @@ class ConnectedComponentLayer : public Layer { const vector*>& bottom); private: - cv::Mat FindBlobs(const int maxlabel, const cv::Mat &input); + cv::Mat FindBlobs(const int_tp maxlabel, const cv::Mat &input); }; /** @@ -109,7 +109,7 @@ class MergeCropLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { + virtual inline int_tp ExactNumBottomBlobs() const { return 2; } @@ -130,10 +130,10 @@ class MergeCropLayer : public Layer { const vector*>& bottom); private: - vector forward_; - vector backward_; - Blob shape_a_; - Blob shape_b_; + vector forward_; + vector backward_; + Blob shape_a_; + Blob shape_b_; }; /** @@ -151,10 +151,10 @@ class BaseConvolutionLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top); - virtual inline int MinBottomBlobs() const { + virtual inline int_tp MinBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { + virtual inline int_tp MinTopBlobs() const { return 1; } virtual inline bool EqualNumBottomTopBlobs() const { @@ -174,23 +174,23 @@ class BaseConvolutionLayer : public Layer { void backward_cpu_bias(Dtype* bias, const Dtype* input); #ifndef CPU_ONLY - void forward_gpu_gemm(const Dtype* col_input, const int col_input_off, + void forward_gpu_gemm(const Dtype* col_input, const int_tp col_input_off, const Dtype* weights, Dtype* output, - const int output_off, bool skip_im2col = false); - void forward_gpu_bias(Dtype* output, const int output_off, const Dtype* bias); - void backward_gpu_gemm(const Dtype* input, const int input_off, + const int_tp output_off, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const int_tp output_off, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const int_tp input_off, const Dtype* weights, Dtype* col_output, - const int col_output_off); - void weight_gpu_gemm(const Dtype* col_input, const int col_input_off, - const Dtype* output, const int output_off, + const int_tp col_output_off); + void weight_gpu_gemm(const Dtype* col_input, const int_tp col_input_off, + const Dtype* output, const int_tp output_off, Dtype* weights); - void backward_gpu_bias(Dtype* bias, const Dtype* input, const int input_off); + void backward_gpu_bias(Dtype* bias, const Dtype* input, const int_tp input_off); shared_ptr< Blob > col_buffer(); #endif /// @brief The spatial dimensions of the input. - inline int input_shape(int i) { + inline int_tp input_shape(int_tp i) { return (*bottom_shape_)[channel_axis_ + i]; } // reverse_dimensions should return true iff we are implementing deconv, so @@ -200,32 +200,32 @@ class BaseConvolutionLayer : public Layer { virtual void compute_output_shape() = 0; /// @brief The spatial dimensions of a filter kernel. - Blob kernel_shape_; + Blob kernel_shape_; /// @brief The spatial dimensions of the stride. - Blob stride_; + Blob stride_; /// @brief The spatial dimensions of the padding. - Blob pad_; + Blob pad_; /// @brief The spatial dimension of the kernel stride. - Blob kstride_; + Blob kstride_; /// @brief The spatial dimensions of the convolution input. - Blob conv_input_shape_; + Blob conv_input_shape_; /// @brief The spatial dimensions of the col_buffer. - vector col_buffer_shape_; + vector col_buffer_shape_; /// @brief The spatial dimensions of the output. - vector output_shape_; - const vector* bottom_shape_; - - int num_spatial_axes_; - int bottom_dim_; - int top_dim_; - - int channel_axis_; - int num_; - int channels_; - int group_; - int out_spatial_dim_; - int weight_offset_; - int num_output_; + vector output_shape_; + const vector* bottom_shape_; + + int_tp num_spatial_axes_; + int_tp bottom_dim_; + int_tp top_dim_; + + int_tp channel_axis_; + int_tp num_; + int_tp channels_; + int_tp group_; + int_tp out_spatial_dim_; + int_tp weight_offset_; + int_tp num_output_; bool bias_term_; bool is_1x1_; bool force_nd_im2col_; @@ -324,9 +324,9 @@ class BaseConvolutionLayer : public Layer { } #endif // USE_CUDA #ifdef USE_GREENTEA - inline void greentea_conv_im2col_gpu(const Dtype* data, const int data_off, + inline void greentea_conv_im2col_gpu(const Dtype* data, const int_tp data_off, Dtype* col_buff, - const int col_buff_off) { + const int_tp col_buff_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( @@ -384,8 +384,8 @@ class BaseConvolutionLayer : public Layer { } inline void greentea_conv_col2im_gpu(const Dtype* col_buff, - const int col_buff_off, Dtype* data, - const int data_off) { + const int_tp col_buff_off, Dtype* data, + const int_tp data_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( @@ -447,14 +447,14 @@ class BaseConvolutionLayer : public Layer { #endif // USE_GREENTEA #endif // !CPU_ONLY - int num_kernels_im2col_; - int num_kernels_col2im_; - int conv_out_channels_; - int conv_in_channels_; - int conv_out_spatial_dim_; - int kernel_dim_; - int col_offset_; - int output_offset_; + int_tp num_kernels_im2col_; + int_tp num_kernels_col2im_; + int_tp conv_out_channels_; + int_tp conv_in_channels_; + int_tp conv_out_spatial_dim_; + int_tp kernel_dim_; + int_tp col_offset_; + int_tp output_offset_; bool use_skernel_; @@ -476,13 +476,13 @@ class ConvolutionLayer : public BaseConvolutionLayer { return "Convolution"; } - virtual size_t ForwardFlops() { - size_t group = this->group_; - size_t N = 1; - size_t M = this->num_output_ / group; - size_t K = this->channels_; - const int* kshape = this->kernel_shape_.cpu_data(); - for (int i = 0; i < this->output_shape_.size(); ++i) { + virtual uint_tp ForwardFlops() { + uint_tp group = this->group_; + uint_tp N = 1; + uint_tp M = this->num_output_ / group; + uint_tp K = this->channels_; + const int_tp* kshape = this->kernel_shape_.cpu_data(); + for (int_tp i = 0; i < this->output_shape_.size(); ++i) { N *= this->output_shape_[i]; K *= kshape[i]; } @@ -573,12 +573,12 @@ class CuDNNConvolutionLayer : public ConvolutionLayer { cudnnTensorDescriptor_t bias_desc_; cudnnFilterDescriptor_t filter_desc_; vector conv_descs_; - int bottom_offset_, top_offset_, bias_offset_; + int_tp bottom_offset_, top_offset_, bias_offset_; - size_t *workspace_fwd_sizes_; - size_t *workspace_bwd_data_sizes_; - size_t *workspace_bwd_filter_sizes_; - size_t workspaceSizeInBytes; // size of underlying storage + uint_tp *workspace_fwd_sizes_; + uint_tp *workspace_bwd_data_sizes_; + uint_tp *workspace_bwd_filter_sizes_; + uint_tp workspaceSizeInBytes; // size of underlying storage void *workspaceData; // underlying storage void **workspace; // aliases into workspaceData }; @@ -605,10 +605,10 @@ class Im2colLayer : public Layer { virtual inline const char* type() const { return "Im2col"; } - virtual inline int ExactNumBottomBlobs() const { + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { + virtual inline int_tp ExactNumTopBlobs() const { return 1; } @@ -625,19 +625,19 @@ class Im2colLayer : public Layer { const vector*>& bottom); /// @brief The spatial dimensions of a filter kernel. - Blob kernel_shape_; + Blob kernel_shape_; /// @brief The spatial dimensions of the stride. - Blob stride_; + Blob stride_; /// @brief The spatial dimensions of the padding. - Blob pad_; + Blob pad_; - int num_spatial_axes_; - int bottom_dim_; - int top_dim_; + int_tp num_spatial_axes_; + int_tp bottom_dim_; + int_tp top_dim_; - int channel_axis_; - int num_; - int channels_; + int_tp channel_axis_; + int_tp num_; + int_tp channels_; bool force_nd_im2col_; }; @@ -665,10 +665,10 @@ class LRNLayer : public Layer { virtual inline const char* type() const { return "LRN"; } - virtual inline int ExactNumBottomBlobs() const { + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { + virtual inline int_tp ExactNumTopBlobs() const { return 1; } @@ -700,15 +700,15 @@ class LRNLayer : public Layer { const vector& propagate_down, const vector*>& bottom); - int size_; - int pre_pad_; + int_tp size_; + int_tp pre_pad_; Dtype alpha_; Dtype beta_; Dtype k_; - int num_; - int channels_; - int height_; - int width_; + int_tp num_; + int_tp channels_; + int_tp height_; + int_tp width_; // Fields used for normalization ACROSS_CHANNELS // scale_ stores the intermediate summing results @@ -757,7 +757,7 @@ class CuDNNLRNLayer : public LRNLayer { cudnnLRNDescriptor_t norm_desc_; cudnnTensorDescriptor_t bottom_desc_, top_desc_; - int size_; + int_tp size_; Dtype alpha_, beta_, k_; }; @@ -784,10 +784,10 @@ class CuDNNLCNLayer : public LRNLayer { cudnnLRNDescriptor_t norm_desc_; cudnnTensorDescriptor_t bottom_desc_, top_desc_; - int size_, pre_pad_; + int_tp size_, pre_pad_; Dtype alpha_, beta_, k_; - size_t tempDataSize; + uint_tp tempDataSize; void *tempData1, *tempData2; }; @@ -824,38 +824,38 @@ class PoolingLayer : public Layer { virtual inline const char* type() const { return "Pooling"; } - virtual inline int ExactNumBottomBlobs() const { + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { + virtual inline int_tp MinTopBlobs() const { return 1; } // MAX POOL layers can output an extra top blob for the mask; // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { + virtual inline int_tp MaxTopBlobs() const { return (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) ? 2 : 1; } - Blob kernel_shape_; - Blob ext_kernel_shape_; - Blob stride_; - Blob pad_; - Blob kstride_; - Blob size_; - Blob pooled_size_; + Blob kernel_shape_; + Blob ext_kernel_shape_; + Blob stride_; + Blob pad_; + Blob kstride_; + Blob size_; + Blob pooled_size_; - int channel_axis_; - int num_spatial_axes_; - int channels_; + int_tp channel_axis_; + int_tp num_spatial_axes_; + int_tp channels_; bool use_skernel_; bool global_pooling_; - int max_top_blobs_; + int_tp max_top_blobs_; Blob rand_idx_; - Blob max_idx_; + Blob max_idx_; }; #ifdef USE_CUDNN @@ -874,8 +874,8 @@ class CuDNNPoolingLayer : public PoolingLayer { const vector*>& top); virtual ~CuDNNPoolingLayer(); // Currently, cuDNN does not support the extra top blob. - virtual inline int MinTopBlobs() const {return -1;} - virtual inline int ExactNumTopBlobs() const {return 1;} + virtual inline int_tp MinTopBlobs() const {return -1;} + virtual inline int_tp ExactNumTopBlobs() const {return 1;} protected: virtual void Forward_gpu(const vector*>& bottom, @@ -909,8 +909,8 @@ class SPPLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "SPP"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp ExactNumBottomBlobs() const { return 1; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -920,16 +920,16 @@ class SPPLayer : public Layer { const vector*>& bottom); // calculates the kernel and stride dimensions for the pooling layer, // returns a correctly configured LayerParameter for a PoolingLayer - virtual LayerParameter GetPoolingParam(const int pyramid_level, - const int bottom_h, const int bottom_w, + virtual LayerParameter GetPoolingParam(const int_tp pyramid_level, + const int_tp bottom_h, const int_tp bottom_w, const SPPParameter spp_param); - int pyramid_height_; - int bottom_h_, bottom_w_; - int num_; - int channels_; - int kernel_h_, kernel_w_; - int pad_h_, pad_w_; + int_tp pyramid_height_; + int_tp bottom_h_, bottom_w_; + int_tp num_; + int_tp channels_; + int_tp kernel_h_, kernel_w_; + int_tp pad_h_, pad_w_; bool reshaped_first_time_; /// the internal Split layer that feeds the pooling layers diff --git a/log.txt b/log.txt new file mode 100644 index 00000000000..7568e4c0a76 --- /dev/null +++ b/log.txt @@ -0,0 +1 @@ +[57261.014178] [WARN]Received Interrupt signal. diff --git a/matlab/+caffe/+test/test_io.m b/matlab/+caffe/+test/test_io.m new file mode 100644 index 00000000000..2c34bd1e938 --- /dev/null +++ b/matlab/+caffe/+test/test_io.m @@ -0,0 +1,18 @@ +classdef test_io < matlab.unittest.TestCase + methods (Test) + function test_read_write_mean(self) + % randomly generate mean data + width = 200; + height = 300; + channels = 3; + mean_data_write = 255 * rand(width, height, channels, 'single'); + % write mean data to binary proto + mean_proto_file = tempname(); + caffe.io.write_mean(mean_data_write, mean_proto_file); + % read mean data from saved binary proto and test whether they are equal + mean_data_read = caffe.io.read_mean(mean_proto_file); + self.verifyEqual(mean_data_write, mean_data_read) + delete(mean_proto_file); + end + end +end diff --git a/matlab/+caffe/private/caffe_.cpp b/matlab/+caffe/private/caffe_.cpp index 7883f79ebd9..c3d30e83c88 100644 --- a/matlab/+caffe/private/caffe_.cpp +++ b/matlab/+caffe/private/caffe_.cpp @@ -17,7 +17,7 @@ #include "caffe/caffe.hpp" -#define MEX_ARGS int nlhs, mxArray **plhs, int nrhs, const mxArray **prhs +#define MEX_ARGS int_tp nlhs, mxArray **plhs, int_tp nrhs, const mxArray **prhs using namespace caffe; // NOLINT(build/namespaces) @@ -78,9 +78,9 @@ static void mx_mat_to_blob(const mxArray* mx_mat, Blob* blob, // Copy Blob data or diff to matlab array static mxArray* blob_to_mx_mat(const Blob* blob, WhichMemory data_or_diff) { - const int num_axes = blob->num_axes(); + const int_tp num_axes = blob->num_axes(); vector dims(num_axes); - for (int blob_axis = 0, mat_axis = num_axes - 1; blob_axis < num_axes; + for (int_tp blob_axis = 0, mat_axis = num_axes - 1; blob_axis < num_axes; ++blob_axis, --mat_axis) { dims[mat_axis] = static_cast(blob->shape(blob_axis)); } @@ -106,11 +106,11 @@ static mxArray* blob_to_mx_mat(const Blob* blob, return mx_mat; } -// Convert vector to matlab row vector -static mxArray* int_vec_to_mx_vec(const vector& int_vec) { +// Convert vector to matlab row vector +static mxArray* int_vec_to_mx_vec(const vector& int_vec) { mxArray* mx_vec = mxCreateDoubleMatrix(int_vec.size(), 1, mxREAL); double* vec_mem_ptr = mxGetPr(mx_vec); - for (int i = 0; i < int_vec.size(); i++) { + for (int_tp i = 0; i < int_vec.size(); i++) { vec_mem_ptr[i] = static_cast(int_vec[i]); } return mx_vec; @@ -119,7 +119,7 @@ static mxArray* int_vec_to_mx_vec(const vector& int_vec) { // Convert vector to matlab cell vector of strings static mxArray* str_vec_to_mx_strcell(const vector& str_vec) { mxArray* mx_strcell = mxCreateCellMatrix(str_vec.size(), 1); - for (int i = 0; i < str_vec.size(); i++) { + for (int_tp i = 0; i < str_vec.size(); i++) { mxSetCell(mx_strcell, i, mxCreateString(str_vec[i].c_str())); } return mx_strcell; @@ -145,15 +145,15 @@ static T* handle_to_ptr(const mxArray* mx_handle) { // Create a handle struct vector, without setting up each handle in it template -static mxArray* create_handle_vec(int ptr_num) { - const int handle_field_num = 2; +static mxArray* create_handle_vec(int_tp ptr_num) { + const int_tp handle_field_num = 2; const char* handle_fields[handle_field_num] = { "ptr", "init_key" }; return mxCreateStructMatrix(ptr_num, 1, handle_field_num, handle_fields); } // Set up a handle in a handle struct vector by its index template -static void setup_handle(const T* ptr, int index, mxArray* mx_handle_vec) { +static void setup_handle(const T* ptr, int_tp index, mxArray* mx_handle_vec) { mxArray* mx_ptr = mxCreateNumericMatrix(1, 1, mxUINT64_CLASS, mxREAL); *reinterpret_cast(mxGetData(mx_ptr)) = reinterpret_cast(ptr); @@ -173,7 +173,7 @@ static mxArray* ptr_to_handle(const T* ptr) { template static mxArray* ptr_vec_to_handle_vec(const vector >& ptr_vec) { mxArray* mx_handle_vec = create_handle_vec(ptr_vec.size()); - for (int i = 0; i < ptr_vec.size(); i++) { + for (int_tp i = 0; i < ptr_vec.size(); i++) { setup_handle(ptr_vec[i].get(), i, mx_handle_vec); } return mx_handle_vec; @@ -199,7 +199,7 @@ static void solver_get_attr(MEX_ARGS) { mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), "Usage: caffe_('solver_get_attr', hSolver)"); Solver* solver = handle_to_ptr >(prhs[0]); - const int solver_attr_num = 2; + const int_tp solver_attr_num = 2; const char* solver_attrs[solver_attr_num] = { "hNet_net", "hNet_test_nets" }; mxArray* mx_solver_attr = mxCreateStructMatrix(1, 1, solver_attr_num, solver_attrs); @@ -242,7 +242,7 @@ static void solver_step(MEX_ARGS) { mxCHECK(nrhs == 2 && mxIsStruct(prhs[0]) && mxIsDouble(prhs[1]), "Usage: caffe_('solver_step', hSolver, iters)"); Solver* solver = handle_to_ptr >(prhs[0]); - int iters = mxGetScalar(prhs[1]); + int_tp iters = mxGetScalar(prhs[1]); solver->Step(iters); } @@ -273,7 +273,7 @@ static void net_get_attr(MEX_ARGS) { mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), "Usage: caffe_('net_get_attr', hNet)"); Net* net = handle_to_ptr >(prhs[0]); - const int net_attr_num = 6; + const int_tp net_attr_num = 6; const char* net_attrs[net_attr_num] = { "hLayer_layers", "hBlob_blobs", "input_blob_indices", "output_blob_indices", "layer_names", "blob_names"}; mxArray* mx_net_attr = mxCreateStructMatrix(1, 1, net_attr_num, @@ -345,7 +345,7 @@ static void layer_get_attr(MEX_ARGS) { mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), "Usage: caffe_('layer_get_attr', hLayer)"); Layer* layer = handle_to_ptr >(prhs[0]); - const int layer_attr_num = 1; + const int_tp layer_attr_num = 1; const char* layer_attrs[layer_attr_num] = { "hBlob_blobs" }; mxArray* mx_layer_attr = mxCreateStructMatrix(1, 1, layer_attr_num, layer_attrs); @@ -367,10 +367,10 @@ static void blob_get_shape(MEX_ARGS) { mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), "Usage: caffe_('blob_get_shape', hBlob)"); Blob* blob = handle_to_ptr >(prhs[0]); - const int num_axes = blob->num_axes(); + const int_tp num_axes = blob->num_axes(); mxArray* mx_shape = mxCreateDoubleMatrix(1, num_axes, mxREAL); double* shape_mem_mtr = mxGetPr(mx_shape); - for (int blob_axis = 0, mat_axis = num_axes - 1; blob_axis < num_axes; + for (int_tp blob_axis = 0, mat_axis = num_axes - 1; blob_axis < num_axes; ++blob_axis, --mat_axis) { shape_mem_mtr[mat_axis] = static_cast(blob->shape(blob_axis)); } @@ -384,11 +384,11 @@ static void blob_reshape(MEX_ARGS) { Blob* blob = handle_to_ptr >(prhs[0]); const mxArray* mx_shape = prhs[1]; double* shape_mem_mtr = mxGetPr(mx_shape); - const int num_axes = mxGetNumberOfElements(mx_shape); - vector blob_shape(num_axes); - for (int blob_axis = 0, mat_axis = num_axes - 1; blob_axis < num_axes; + const int_tp num_axes = mxGetNumberOfElements(mx_shape); + vector blob_shape(num_axes); + for (int_tp blob_axis = 0, mat_axis = num_axes - 1; blob_axis < num_axes; ++blob_axis, --mat_axis) { - blob_shape[blob_axis] = static_cast(shape_mem_mtr[mat_axis]); + blob_shape[blob_axis] = static_cast(shape_mem_mtr[mat_axis]); } blob->Reshape(blob_shape); } @@ -441,7 +441,7 @@ static void set_mode_gpu(MEX_ARGS) { static void set_device(MEX_ARGS) { mxCHECK(nrhs == 1 && mxIsDouble(prhs[0]), "Usage: caffe_('set_device', device_id)"); - int device_id = static_cast(mxGetScalar(prhs[0])); + int_tp device_id = static_cast(mxGetScalar(prhs[0])); Caffe::SetDevice(device_id); } @@ -483,12 +483,12 @@ static void write_mean(MEX_ARGS) { mxCHECK(nrhs == 2 && mxIsSingle(prhs[0]) && mxIsChar(prhs[1]), "Usage: caffe_('write_mean', mean_data, mean_proto_file)"); char* mean_proto_file = mxArrayToString(prhs[1]); - int ndims = mxGetNumberOfDimensions(prhs[0]); + int_tp ndims = mxGetNumberOfDimensions(prhs[0]); mxCHECK(ndims >= 2 && ndims <= 3, "mean_data must have at 2 or 3 dimensions"); const mwSize *dims = mxGetDimensions(prhs[0]); - int width = dims[0]; - int height = dims[1]; - int channels; + int_tp width = dims[0]; + int_tp height = dims[1]; + int_tp channels; if (ndims == 3) channels = dims[2]; else @@ -554,7 +554,7 @@ void mexFunction(MEX_ARGS) { char* cmd = mxArrayToString(prhs[0]); bool dispatched = false; // Dispatch to cmd handler - for (int i = 0; handlers[i].func != NULL; i++) { + for (int_tp i = 0; handlers[i].func != NULL; i++) { if (handlers[i].cmd.compare(cmd) == 0) { handlers[i].func(nlhs, plhs, nrhs-1, prhs+1); dispatched = true; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 2ba59737405..7d1840127ae 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -16,9 +16,9 @@ namespace caffe { template -bool Blob::Reshape(const int num, const int channels, const int height, - const int width) { - vector shape(4); +bool Blob::Reshape(const int_tp num, const int_tp channels, const int_tp height, + const int_tp width) { + vector shape(4); shape[0] = num; shape[1] = channels; shape[2] = height; @@ -27,16 +27,16 @@ bool Blob::Reshape(const int num, const int channels, const int height, } template -bool Blob::Reshape(const vector& shape) { +bool Blob::Reshape(const vector& shape) { CHECK_LE(shape.size(), kMaxBlobAxes); count_ = 1; shape_.resize(shape.size()); - if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) { + if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int_tp)) { shape_data_.reset( - new SyncedMemory(shape.size() * sizeof(int), device_)); + new SyncedMemory(shape.size() * sizeof(int_tp), device_)); } - int* shape_data = static_cast(shape_data_->mutable_cpu_data()); - for (int i = 0; i < shape.size(); ++i) { + int_tp* shape_data = static_cast(shape_data_->mutable_cpu_data()); + for (int_tp i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); CHECK_LE(shape[i], INT_MAX / count_)<< "blob size exceeds INT_MAX"; count_ *= shape[i]; @@ -55,8 +55,8 @@ bool Blob::Reshape(const vector& shape) { template bool Blob::Reshape(const BlobShape& shape) { CHECK_LE(shape.dim_size(), kMaxBlobAxes); - vector shape_vec(shape.dim_size()); - for (int i = 0; i < shape.dim_size(); ++i) { + vector shape_vec(shape.dim_size()); + for (int_tp i = 0; i < shape.dim_size(); ++i) { shape_vec[i] = shape.dim(i); } return Reshape(shape_vec); @@ -68,24 +68,24 @@ bool Blob::ReshapeLike(const Blob& other) { } template -Blob::Blob(const int num, const int channels, const int height, - const int width, device *device_context) +Blob::Blob(const int_tp num, const int_tp channels, const int_tp height, + const int_tp width, device *device_context) // capacity_ must be initialized before calling Reshape : capacity_(0), device_(device_context) { Reshape(num, channels, height, width); } template -Blob::Blob(const vector& shape, device *device_context) +Blob::Blob(const vector& shape, device *device_context) // capacity_ must be initialized before calling Reshape : capacity_(0), device_(device_context) { Reshape(shape); } template -const int* Blob::gpu_shape() const { +const int_tp* Blob::gpu_shape() const { CHECK(shape_data_); - return (const int*)shape_data_->gpu_data(); + return (const int_tp*)shape_data_->gpu_data(); } template @@ -156,11 +156,11 @@ void Blob::ShareDiff(const Blob& other) { // The "update" method is used for parameter blobs in a Net, which are stored // as Blob or Blob -- hence we do not define it for -// Blob or Blob. -template<> void Blob::Update() { +// Blob or Blob. +template<> void Blob::Update() { NOT_IMPLEMENTED; } -template<> void Blob::Update() { +template<> void Blob::Update() { NOT_IMPLEMENTED; } @@ -203,7 +203,7 @@ void Blob::Update() { } } -template<> unsigned int Blob::asum_data() const { +template<> uint_tp Blob::asum_data() const { NOT_IMPLEMENTED; return 0; } @@ -213,7 +213,7 @@ device *Blob::get_device() { return device_; } -template<> int Blob::asum_data() const { +template<> int_tp Blob::asum_data() const { NOT_IMPLEMENTED; return 0; } @@ -255,12 +255,12 @@ Dtype Blob::asum_data() const { return 0; } -template<> unsigned int Blob::asum_diff() const { +template<> uint_tp Blob::asum_diff() const { NOT_IMPLEMENTED; return 0; } -template<> int Blob::asum_diff() const { +template<> int_tp Blob::asum_diff() const { NOT_IMPLEMENTED; return 0; } @@ -302,12 +302,12 @@ Dtype Blob::asum_diff() const { return 0; } -template<> unsigned int Blob::sumsq_data() const { +template<> uint_tp Blob::sumsq_data() const { NOT_IMPLEMENTED; return 0; } -template<> int Blob::sumsq_data() const { +template<> int_tp Blob::sumsq_data() const { NOT_IMPLEMENTED; return 0; } @@ -352,12 +352,12 @@ Dtype Blob::sumsq_data() const { return sumsq; } -template<> unsigned int Blob::sumsq_diff() const { +template<> uint_tp Blob::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } -template<> int Blob::sumsq_diff() const { +template<> int_tp Blob::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } @@ -402,11 +402,11 @@ Dtype Blob::sumsq_diff() const { return sumsq; } -template<> void Blob::scale_data(unsigned int scale_factor) { +template<> void Blob::scale_data(uint_tp scale_factor) { NOT_IMPLEMENTED; } -template<> void Blob::scale_data(int scale_factor) { +template<> void Blob::scale_data(int_tp scale_factor) { NOT_IMPLEMENTED; } @@ -448,11 +448,11 @@ void Blob::scale_data(Dtype scale_factor) { } } -template<> void Blob::scale_diff(unsigned int scale_factor) { +template<> void Blob::scale_diff(uint_tp scale_factor) { NOT_IMPLEMENTED; } -template<> void Blob::scale_diff(int scale_factor) { +template<> void Blob::scale_diff(int_tp scale_factor) { NOT_IMPLEMENTED; } @@ -509,8 +509,8 @@ bool Blob::ShapeEquals(const BlobProto& other) { && LegacyShape(-2) == other.height() && LegacyShape(-1) == other.width(); } - vector other_shape(other.shape().dim_size()); - for (int i = 0; i < other.shape().dim_size(); ++i) { + vector other_shape(other.shape().dim_size()); + for (int_tp i = 0; i < other.shape().dim_size(); ++i) { other_shape[i] = other.shape().dim(i); } return shape_ == other_shape; @@ -570,7 +570,7 @@ void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { template void Blob::FromProto(const BlobProto& proto, bool reshape) { if (reshape) { - vector shape; + vector shape; if (proto.has_num() || proto.has_channels() || proto.has_height() || proto.has_width()) { // Using deprecated 4D Blob dimensions -- @@ -582,7 +582,7 @@ void Blob::FromProto(const BlobProto& proto, bool reshape) { shape[3] = proto.width(); } else { shape.resize(proto.shape().dim_size()); - for (int i = 0; i < proto.shape().dim_size(); ++i) { + for (int_tp i = 0; i < proto.shape().dim_size(); ++i) { shape[i] = proto.shape().dim(i); } } @@ -594,25 +594,25 @@ void Blob::FromProto(const BlobProto& proto, bool reshape) { Dtype* data_vec = mutable_cpu_data(); if (proto.double_data_size() > 0) { CHECK_EQ(count_, proto.double_data_size()); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { data_vec[i] = proto.double_data(i); } } else { CHECK_EQ(count_, proto.data_size()); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { data_vec[i] = proto.data(i); } } if (proto.double_diff_size() > 0) { CHECK_EQ(count_, proto.double_diff_size()); Dtype* diff_vec = mutable_cpu_diff(); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { diff_vec[i] = proto.double_diff(i); } } else if (proto.diff_size() > 0) { CHECK_EQ(count_, proto.diff_size()); Dtype* diff_vec = mutable_cpu_diff(); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { diff_vec[i] = proto.diff(i); } } @@ -621,18 +621,18 @@ void Blob::FromProto(const BlobProto& proto, bool reshape) { template <> void Blob::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); - for (int i = 0; i < shape_.size(); ++i) { + for (int_tp i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_double_data(); proto->clear_double_diff(); const double* data_vec = cpu_data(); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { proto->add_double_data(data_vec[i]); } if (write_diff) { const double* diff_vec = cpu_diff(); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { proto->add_double_diff(diff_vec[i]); } } @@ -641,26 +641,26 @@ void Blob::ToProto(BlobProto* proto, bool write_diff) const { template <> void Blob::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); - for (int i = 0; i < shape_.size(); ++i) { + for (int_tp i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_data(); proto->clear_diff(); const float* data_vec = cpu_data(); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { proto->add_data(data_vec[i]); } if (write_diff) { const float* diff_vec = cpu_diff(); - for (int i = 0; i < count_; ++i) { + for (int_tp i = 0; i < count_; ++i) { proto->add_diff(diff_vec[i]); } } } INSTANTIATE_CLASS(Blob); -template class Blob; -template class Blob; +template class Blob; +template class Blob; } // namespace caffe diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 5235effaf9e..aa4d0867c66 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -145,7 +145,7 @@ root_solver_(true) {} Caffe::~Caffe() {} -void Caffe::set_random_seed(const unsigned int seed) { +void Caffe::set_random_seed(const size_t seed) { // RNG seed Get().random_generator_.reset(new RNG(seed)); } @@ -168,7 +168,7 @@ int Caffe::EnumerateDevices(bool silent) { class Caffe::RNG::Generator { public: Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} - explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {} + explicit Generator(size_t seed) : rng_(new caffe::rng_t(seed)) {} caffe::rng_t* rng() {return rng_.get();} private: shared_ptr rng_; @@ -176,7 +176,7 @@ class Caffe::RNG::Generator { Caffe::RNG::RNG() : generator_(new Generator()) {} -Caffe::RNG::RNG(unsigned int seed) : generator_(new Generator(seed)) {} +Caffe::RNG::RNG(size_t seed) : generator_(new Generator(seed)) {} Caffe::RNG& Caffe::RNG::operator=(const RNG& other) { generator_ = other.generator_; @@ -213,6 +213,12 @@ Caffe::Caffe() != CURAND_STATUS_SUCCESS) { LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; } + if (curandCreateGenerator(&curand_generator64_, CURAND_RNG_QUASI_SOBOL64) + != CURAND_STATUS_SUCCESS || + curandSetPseudoRandomGeneratorSeed(curand_generator64_, cluster_seedgen()) + != CURAND_STATUS_SUCCESS) { + LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; + } #endif // USE_CUDA } @@ -233,7 +239,7 @@ Caffe::~Caffe() { #endif // USE_CUDA } -void Caffe::set_random_seed(const unsigned int seed) { +void Caffe::set_random_seed(const size_t seed) { if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Curand seed @@ -393,13 +399,13 @@ void Caffe::SetDevices(std::vector device_ids) { viennacl::ocl::device> > platform_devices; // Loop through devices - for (std::size_t platform_id = 0; platform_id < platforms.size(); + for (int platform_id = 0; platform_id < platforms.size(); ++platform_id) { typedef std::vector devices_type; try { devices_type devices = platforms[platform_id].devices( CL_DEVICE_TYPE_ALL); - for (std::size_t device_id = 0; device_id < devices.size(); ++device_id) { + for (int device_id = 0; device_id < devices.size(); ++device_id) { platform_devices.push_back( std::make_tuple(platforms[platform_id], devices[device_id])); // Check if this device is really used and initialize @@ -536,7 +542,7 @@ class Caffe::RNG::Generator { Generator() : rng_(new caffe::rng_t(cluster_seedgen())) { } - explicit Generator(unsigned int seed) + explicit Generator(size_t seed) : rng_(new caffe::rng_t(seed)) { } caffe::rng_t* rng() { @@ -550,7 +556,7 @@ Caffe::RNG::RNG() : generator_(new Generator()) { } -Caffe::RNG::RNG(unsigned int seed) +Caffe::RNG::RNG(size_t seed) : generator_(new Generator(seed)) { } diff --git a/src/caffe/cuda/cuda_dev_ptr.cpp b/src/caffe/cuda/cuda_dev_ptr.cpp index 1b92d2a9888..656cf4ead01 100644 --- a/src/caffe/cuda/cuda_dev_ptr.cpp +++ b/src/caffe/cuda/cuda_dev_ptr.cpp @@ -15,7 +15,7 @@ void* cuda_dev_ptr::get() { } template -std::ptrdiff_t cuda_dev_ptr::off() { +int_tp cuda_dev_ptr::off() { return 0; } diff --git a/src/caffe/data_reader.cpp b/src/caffe/data_reader.cpp index e237a5b1cb6..2818f7a3c14 100644 --- a/src/caffe/data_reader.cpp +++ b/src/caffe/data_reader.cpp @@ -41,9 +41,9 @@ DataReader::~DataReader() { // -DataReader::QueuePair::QueuePair(int size) { +DataReader::QueuePair::QueuePair(int_tp size) { // Initialize the free queue with requested number of datums - for (int i = 0; i < size; ++i) { + for (int_tp i = 0; i < size; ++i) { free_.push(new Datum()); } } @@ -76,19 +76,19 @@ void DataReader::Body::InternalThreadEntry() { shared_ptr cursor(db->NewCursor()); vector > qps; try { - int solver_count = param_.phase() == TRAIN ? Caffe::solver_count() : 1; + int_tp solver_count = param_.phase() == TRAIN ? Caffe::solver_count() : 1; // To ensure deterministic runs, only start running once all solvers // are ready. But solvers need to peek on one item during initialization, // so read one item, then wait for the next solver. - for (int i = 0; i < solver_count; ++i) { + for (int_tp i = 0; i < solver_count; ++i) { shared_ptr qp(new_queue_pairs_.pop()); read_one(cursor.get(), qp.get()); qps.push_back(qp); } // Main loop while (!must_stop()) { - for (int i = 0; i < solver_count; ++i) { + for (int_tp i = 0; i < solver_count; ++i) { read_one(cursor.get(), qps[i].get()); } // Check no additional readers have been created. This can happen if diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 95b7e76c840..c959369c50d 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -34,7 +34,7 @@ DataTransformer::DataTransformer(const TransformationParameter& param, if (param_.mean_value_size() > 0) { CHECK(param_.has_mean_file() == false) << "Cannot specify mean_file and mean_value at the same time"; - for (int c = 0; c < param_.mean_value_size(); ++c) { + for (int_tp c = 0; c < param_.mean_value_size(); ++c) { mean_values_.push_back(param_.mean_value(c)); } } @@ -44,11 +44,11 @@ template void DataTransformer::Transform(const Datum& datum, Dtype* transformed_data) { const string& data = datum.data(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); + const int_tp datum_channels = datum.channels(); + const int_tp datum_height = datum.height(); + const int_tp datum_width = datum.width(); - const int crop_size = param_.crop_size(); + const int_tp crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -72,17 +72,17 @@ void DataTransformer::Transform(const Datum& datum, << datum_channels; if (datum_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity - for (int c = 1; c < datum_channels; ++c) { + for (int_tp c = 1; c < datum_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } - int height = datum_height; - int width = datum_width; + int_tp height = datum_height; + int_tp width = datum_width; - int h_off = 0; - int w_off = 0; + int_tp h_off = 0; + int_tp w_off = 0; if (crop_size) { height = crop_size; width = crop_size; @@ -97,10 +97,10 @@ void DataTransformer::Transform(const Datum& datum, } Dtype datum_element; - int top_index, data_index; - for (int c = 0; c < datum_channels; ++c) { - for (int h = 0; h < height; ++h) { - for (int w = 0; w < width; ++w) { + int_tp top_index, data_index; + for (int_tp c = 0; c < datum_channels; ++c) { + for (int_tp h = 0; h < height; ++h) { + for (int_tp w = 0; w < width; ++w) { data_index = (c * datum_height + h_off + h) * datum_width + w_off + w; if (do_mirror) { top_index = (c * height + h) * width + (width - 1 - w); @@ -156,16 +156,16 @@ void DataTransformer::Transform(const Datum& datum, } } - const int crop_size = param_.crop_size(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); + const int_tp crop_size = param_.crop_size(); + const int_tp datum_channels = datum.channels(); + const int_tp datum_height = datum.height(); + const int_tp datum_width = datum.width(); // Check dimensions. - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - const int num = transformed_blob->num(); + const int_tp channels = transformed_blob->channels(); + const int_tp height = transformed_blob->height(); + const int_tp width = transformed_blob->width(); + const int_tp num = transformed_blob->num(); CHECK_EQ(channels, datum_channels); CHECK_LE(height, datum_height); @@ -187,18 +187,18 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const vector & datum_vector, Blob* transformed_blob) { - const int datum_num = datum_vector.size(); - const int num = transformed_blob->num(); - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); + const int_tp datum_num = datum_vector.size(); + const int_tp num = transformed_blob->num(); + const int_tp channels = transformed_blob->channels(); + const int_tp height = transformed_blob->height(); + const int_tp width = transformed_blob->width(); CHECK_GT(datum_num, 0)<< "There is no datum to add"; CHECK_LE(datum_num, num)<< "The size of datum_vector must be no greater than transformed_blob->num()"; Blob uni_blob(1, channels, height, width, device_); - for (int item_id = 0; item_id < datum_num; ++item_id) { - int offset = transformed_blob->offset(item_id); + for (int_tp item_id = 0; item_id < datum_num; ++item_id) { + int_tp offset = transformed_blob->offset(item_id); uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); Transform(datum_vector[item_id], &uni_blob); } @@ -208,18 +208,18 @@ void DataTransformer::Transform(const vector & datum_vector, template void DataTransformer::Transform(const vector & mat_vector, Blob* transformed_blob) { - const int mat_num = mat_vector.size(); - const int num = transformed_blob->num(); - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); + const int_tp mat_num = mat_vector.size(); + const int_tp num = transformed_blob->num(); + const int_tp channels = transformed_blob->channels(); + const int_tp height = transformed_blob->height(); + const int_tp width = transformed_blob->width(); CHECK_GT(mat_num, 0)<< "There is no MAT to add"; CHECK_EQ(mat_num, num)<< "The size of mat_vector must be equals to transformed_blob->num()"; Blob uni_blob(1, channels, height, width, device_); - for (int item_id = 0; item_id < mat_num; ++item_id) { - int offset = transformed_blob->offset(item_id); + for (int_tp item_id = 0; item_id < mat_num; ++item_id) { + int_tp offset = transformed_blob->offset(item_id); uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); Transform(mat_vector[item_id], &uni_blob); } @@ -228,16 +228,16 @@ void DataTransformer::Transform(const vector & mat_vector, template void DataTransformer::Transform(const cv::Mat& cv_img, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); - const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; + const int_tp crop_size = param_.crop_size(); + const int_tp img_channels = cv_img.channels(); + const int_tp img_height = cv_img.rows; + const int_tp img_width = cv_img.cols; // Check dimensions. - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - const int num = transformed_blob->num(); + const int_tp channels = transformed_blob->channels(); + const int_tp height = transformed_blob->height(); + const int_tp width = transformed_blob->width(); + const int_tp num = transformed_blob->num(); CHECK_EQ(channels, img_channels); CHECK_LE(height, img_height); @@ -270,14 +270,14 @@ void DataTransformer::Transform(const cv::Mat& cv_img, << img_channels; if (img_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity - for (int c = 1; c < img_channels; ++c) { + for (int_tp c = 1; c < img_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } } - int h_off = 0; - int w_off = 0; + int_tp h_off = 0; + int_tp w_off = 0; cv::Mat cv_cropped_img = cv_img; if (crop_size) { CHECK_EQ(crop_size, height); @@ -300,18 +300,18 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK(cv_cropped_img.data); Dtype* transformed_data = transformed_blob->mutable_cpu_data(); - int top_index; - for (int h = 0; h < height; ++h) { + int_tp top_index; + for (int_tp h = 0; h < height; ++h) { const uchar* ptr = cv_cropped_img.ptr(h); - int img_index = 0; - for (int w = 0; w < width; ++w) { - for (int c = 0; c < img_channels; ++c) { + int_tp img_index = 0; + for (int_tp w = 0; w < width; ++w) { + for (int_tp c = 0; c < img_channels; ++c) { if (do_mirror) { top_index = (c * height + h) * width + (width - 1 - w); } else { top_index = (c * height + h) * width + w; } - // int top_index = (c * height + h) * width + w; + // int_tp top_index = (c * height + h) * width + w; Dtype pixel; if (cv_img.depth() == CV_8U) { pixel = static_cast(ptr[img_index++]); @@ -320,7 +320,7 @@ void DataTransformer::Transform(const cv::Mat& cv_img, [img_index++]); } if (has_mean_file) { - int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; + int_tp mean_index = (c * img_height + h_off + h) * img_width + w_off + w; transformed_data[top_index] = (pixel - mean[mean_index]) * scale; } else { if (has_mean_values) { @@ -338,11 +338,11 @@ void DataTransformer::Transform(const cv::Mat& cv_img, template void DataTransformer::Transform(Blob* input_blob, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); - const int input_num = input_blob->num(); - const int input_channels = input_blob->channels(); - const int input_height = input_blob->height(); - const int input_width = input_blob->width(); + const int_tp crop_size = param_.crop_size(); + const int_tp input_num = input_blob->num(); + const int_tp input_channels = input_blob->channels(); + const int_tp input_height = input_blob->height(); + const int_tp input_width = input_blob->width(); if (transformed_blob->count() == 0) { // Initialize transformed_blob with the right shape. @@ -355,11 +355,11 @@ void DataTransformer::Transform(Blob* input_blob, } } - const int num = transformed_blob->num(); - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - const int size = transformed_blob->count(); + const int_tp num = transformed_blob->num(); + const int_tp channels = transformed_blob->channels(); + const int_tp height = transformed_blob->height(); + const int_tp width = transformed_blob->width(); + const int_tp size = transformed_blob->count(); CHECK_LE(input_num, num); CHECK_EQ(input_channels, channels); @@ -372,8 +372,8 @@ void DataTransformer::Transform(Blob* input_blob, const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; - int h_off = 0; - int w_off = 0; + int_tp h_off = 0; + int_tp w_off = 0; if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); @@ -395,8 +395,8 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_EQ(input_channels, data_mean_.channels()); CHECK_EQ(input_height, data_mean_.height()); CHECK_EQ(input_width, data_mean_.width()); - for (int n = 0; n < input_num; ++n) { - int offset = input_blob->offset(n); + for (int_tp n = 0; n < input_num; ++n) { + int_tp offset = input_blob->offset(n); caffe_sub(data_mean_.count(), input_data + offset, data_mean_.cpu_data(), input_data + offset); } @@ -409,9 +409,9 @@ void DataTransformer::Transform(Blob* input_blob, if (mean_values_.size() == 1) { caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); } else { - for (int n = 0; n < input_num; ++n) { - for (int c = 0; c < input_channels; ++c) { - int offset = input_blob->offset(n, c); + for (int_tp n = 0; n < input_num; ++n) { + for (int_tp c = 0; c < input_channels; ++c) { + int_tp offset = input_blob->offset(n, c); caffe_add_scalar(input_height * input_width, -(mean_values_[c]), input_data + offset); } @@ -421,22 +421,22 @@ void DataTransformer::Transform(Blob* input_blob, Dtype* transformed_data = transformed_blob->mutable_cpu_data(); - for (int n = 0; n < input_num; ++n) { - int top_index_n = n * channels; - int data_index_n = n * channels; - for (int c = 0; c < channels; ++c) { - int top_index_c = (top_index_n + c) * height; - int data_index_c = (data_index_n + c) * input_height + h_off; - for (int h = 0; h < height; ++h) { - int top_index_h = (top_index_c + h) * width; - int data_index_h = (data_index_c + h) * input_width + w_off; + for (int_tp n = 0; n < input_num; ++n) { + int_tp top_index_n = n * channels; + int_tp data_index_n = n * channels; + for (int_tp c = 0; c < channels; ++c) { + int_tp top_index_c = (top_index_n + c) * height; + int_tp data_index_c = (data_index_n + c) * input_height + h_off; + for (int_tp h = 0; h < height; ++h) { + int_tp top_index_h = (top_index_c + h) * width; + int_tp data_index_h = (data_index_c + h) * input_width + w_off; if (do_mirror) { - int top_index_w = top_index_h + width - 1; - for (int w = 0; w < width; ++w) { + int_tp top_index_w = top_index_h + width - 1; + for (int_tp w = 0; w < width; ++w) { transformed_data[top_index_w - w] = input_data[data_index_h + w]; } } else { - for (int w = 0; w < width; ++w) { + for (int_tp w = 0; w < width; ++w) { transformed_data[top_index_h + w] = input_data[data_index_h + w]; } } @@ -450,7 +450,7 @@ void DataTransformer::Transform(Blob* input_blob, } template -vector DataTransformer::InferBlobShape(const Datum& datum) { +vector DataTransformer::InferBlobShape(const Datum& datum) { if (datum.encoded()) { #ifdef USE_OPENCV CHECK(!(param_.force_color() && param_.force_gray())) @@ -468,16 +468,16 @@ vector DataTransformer::InferBlobShape(const Datum& datum) { LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; #endif // USE_OPENCV } - const int crop_size = param_.crop_size(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); + const int_tp crop_size = param_.crop_size(); + const int_tp datum_channels = datum.channels(); + const int_tp datum_height = datum.height(); + const int_tp datum_width = datum.width(); // Check dimensions. CHECK_GT(datum_channels, 0); CHECK_GE(datum_height, crop_size); CHECK_GE(datum_width, crop_size); // Build BlobShape. - vector shape(4); + vector shape(4); shape[0] = 1; shape[1] = datum_channels; shape[2] = (crop_size)? crop_size: datum_height; @@ -486,12 +486,12 @@ vector DataTransformer::InferBlobShape(const Datum& datum) { } template -vector DataTransformer::InferBlobShape( +vector DataTransformer::InferBlobShape( const vector & datum_vector) { - const int num = datum_vector.size(); + const int_tp num = datum_vector.size(); CHECK_GT(num, 0) << "There is no datum to in the vector"; // Use first datum in the vector to InferBlobShape. - vector shape = InferBlobShape(datum_vector[0]); + vector shape = InferBlobShape(datum_vector[0]); // Adjust num to the size of the vector. shape[0] = num; return shape; @@ -499,17 +499,17 @@ vector DataTransformer::InferBlobShape( #ifdef USE_OPENCV template -vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { - const int crop_size = param_.crop_size(); - const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; +vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { + const int_tp crop_size = param_.crop_size(); + const int_tp img_channels = cv_img.channels(); + const int_tp img_height = cv_img.rows; + const int_tp img_width = cv_img.cols; // Check dimensions. CHECK_GT(img_channels, 0); CHECK_GE(img_height, crop_size); CHECK_GE(img_width, crop_size); // Build BlobShape. - vector shape(4); + vector shape(4); shape[0] = 1; shape[1] = img_channels; shape[2] = (crop_size)? crop_size: img_height; @@ -518,12 +518,12 @@ vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { } template -vector DataTransformer::InferBlobShape( +vector DataTransformer::InferBlobShape( const vector & mat_vector) { - const int num = mat_vector.size(); + const int_tp num = mat_vector.size(); CHECK_GT(num, 0) << "There is no cv_img to in the vector"; // Use first cv_img in the vector to InferBlobShape. - vector shape = InferBlobShape(mat_vector[0]); + vector shape = InferBlobShape(mat_vector[0]); // Adjust num to the size of the vector. shape[0] = num; return shape; @@ -535,7 +535,7 @@ void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || (phase_ == TRAIN && param_.crop_size()); if (needs_rand) { - const unsigned int rng_seed = caffe_rng_rand(); + const uint_tp rng_seed = caffe_rng_rand(); rng_.reset(new Caffe::RNG(rng_seed)); } else { rng_.reset(); @@ -543,7 +543,7 @@ void DataTransformer::InitRand() { } template -int DataTransformer::Rand(int n) { +int_tp DataTransformer::Rand(int_tp n) { CHECK(rng_); CHECK_GT(n, 0); caffe::rng_t* rng = static_cast(rng_->generator()); diff --git a/src/caffe/device.cpp b/src/caffe/device.cpp index 828e92e2bd4..49cb4eef087 100644 --- a/src/caffe/device.cpp +++ b/src/caffe/device.cpp @@ -39,10 +39,10 @@ void device::Init() { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(id_); - std::vector temp(3); + std::vector temp(3); clGetDeviceInfo(ctx.devices()[0].id(), CL_DEVICE_MAX_WORK_ITEM_SIZES, - sizeof(size_t), &temp[0], NULL); + sizeof(uint_tp), &temp[0], NULL); workgroup_sizes_[0] = temp[0]; workgroup_sizes_[1] = temp[1]; workgroup_sizes_[2] = temp[2]; @@ -140,22 +140,22 @@ void device::FinishQueues() { } } -size_t device::memory_usage() { +uint_tp device::memory_usage() { return memory_usage_; } -size_t device::peak_memory_usage() { +uint_tp device::peak_memory_usage() { return peak_memory_usage_; } -void device::IncreaseMemoryUsage(size_t bytes) { +void device::IncreaseMemoryUsage(uint_tp bytes) { memory_usage_ += bytes; if (memory_usage_ > peak_memory_usage_) { peak_memory_usage_ = memory_usage_; } } -void device::DecreaseMemoryUsage(size_t bytes) { +void device::DecreaseMemoryUsage(uint_tp bytes) { memory_usage_ -= bytes; } diff --git a/src/caffe/greentea/cl_headers/header.cl b/src/caffe/greentea/cl_headers/header.cl index d2457dea2a7..d4851cb16e2 100644 --- a/src/caffe/greentea/cl_headers/header.cl +++ b/src/caffe/greentea/cl_headers/header.cl @@ -18,6 +18,14 @@ #define atomic_cmpxchg(x, y, z) x #endif +// Types used for parameters, offset computations and so on +#define int_tp long +#define uint_tp unsigned long + +// Definitions used to cast the types above as needed +#define int_tpc long +#define uint_tpc unsigned long + #define CONCAT(A,B) A##_##B #define TEMPLATE(name,type) CONCAT(name,type) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index c28a5ec8ca7..baba5da7f3a 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -5,56 +5,56 @@ #include #include namespace caffe { -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT -std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT -std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT -std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n int in_n = (int) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n out[index] = 0;\n int lower = (int) (begins[n]);\n int upper = lower + (int) (counts[n]);\n for (int i = lower; i < upper; ++i) {\n int in_n = (int) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT -std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT -std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT -std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT -std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT -std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape_ptr[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape_ptr[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_ndsk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT -std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT -std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads,\n const int dims,\n __global const Dtype* bottom_a,\n const int forward_a,\n __global const Dtype* bottom_b,\n const int forward_b,\n __global Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n const int dims,\n __global Dtype* bottom_a,\n const int backward_a,\n __global Dtype* bottom_b,\n const int backward_b,\n __global const Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT -std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT -std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n const int tile_size, const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int n = index / tile_size / num_tiles / bottom_tile_axis;\n const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int tile_size,\n const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size) % bottom_tile_axis;\n const int n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels,\n const int dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int div_factor) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n,\n __global const Dtype* in_diff, const int in_diff_off,\n __global const Dtype* in_data, const int in_data_off,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT -std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT -std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n int in_n = (int) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int count, const int inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / (inner_dim);\n out[index] = 0;\n int lower = (int) (begins[n]);\n int upper = lower + (int) (counts[n]);\n for (int i = lower; i < upper; ++i) {\n int in_n = (int) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT -std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num,\n const int channels,\n const int spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype sum = 0;\n for (int c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num,\n const int channels, const int spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int n = index / channels / spatial_dim;\n int s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels,\n const int spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int n = index / spatial_dim;\n int s = index % spatial_dim;\n Dtype dot = 0;\n for (int c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT -std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data,\n const int forward, const int num_concats,\n const int concat_size,\n const int top_concat_axis,\n const int bottom_concat_axis,\n const int offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_concat_size = concat_size * bottom_concat_axis;\n const int concat_num = index / total_concat_size;\n const int concat_index = index % total_concat_size;\n const int top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT -std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int n = i / channels; // the num index, to access y and dist_sq\n if ((int)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int n,\n __global const Dtype* in,\n __global const unsigned int* mask,\n const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int n, __global const Dtype* in_diff,\n __global const unsigned int* mask, const unsigned int threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT -std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int blob_idx,\n __global Dtype* top_data,\n __global int* mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int blob_idx,\n __global const int* mask,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int M, const int N,\n const int K,\n __global Dtype* top_data) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned int intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned int intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int M, const int N, const int K,\n __global Dtype* weight_diff) {\n for (int top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int n = top_index / N;\n const int d = top_index % N;\n const int index = (int)(bottom_data[n]);\n const int weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT -std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x,\n const int offx) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off,\n const int height, const int width, const int kernel_h, const int kernel_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_col, const int data_col_off) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < kernel_h; ++i) {\n for (int j = 0; j < kernel_w; ++j) {\n int h = h_in + i;\n int w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off,\n const int height, const int width, const int channels,\n const int patch_h, const int patch_w,\n const int pad_h, const int pad_w,\n const int stride_h, const int stride_w,\n const int height_col, const int width_col,\n __global Dtype* data_im, const int data_im_off) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int w_col_end = min(w / stride_w + 1, width_col);\n int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int h_col_end = min(h / stride_h + 1, height_col);\n int offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_col,\n const int data_col_off) {\n\n int d_temp[6];\n int d_iter[6];\n int i;\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes,\n const int channel_axis,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global Dtype* data_im,\n const int data_im_off) {\n int d_im[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n\n __global const int* im_shape_ptr = im_shape + channel_axis;\n __global const int* col_shape_ptr = col_shape + channel_axis;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape_ptr[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int kernel_shape_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int i = 0; i < num_axes; ++i) {\n final_offset *= col_shape_ptr[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n const int d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT -std::string im2col_ndsk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_im,\n const int data_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_col,\n const int data_col_off) {\n int d_temp[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_in = index;\n int channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes,\n __global const Dtype* data_col,\n const int data_col_off,\n __global const int* im_shape,\n __global const int* col_shape,\n __global const int* kernel_shape,\n __global const int* pad,\n __global const int* stride,\n __global const int* kstride,\n __global Dtype* data_im,\n const int data_off) {\n int d_im[6];\n int d_col_size[6];\n int d_col_iter[6];\n int d_col_start[6];\n int d_col_end[6];\n int d_ext_patch[6];\n int d_idx[6];\n\n for (int i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int channel_im = index;\n // Calculate d_im (image dimensions).\n for (int i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int final_offset = 0;\n int coeff_prod = 1;\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int i = num_axes - 1; i >= 0; --i)\n }\n } // for (int i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT -std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int n,\n __global const Dtype* data_im,\n const int data_offset, const int height,\n const int width, const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_col) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int w_out = index % width_col;\n int h_index = index / width_col;\n int h_out = h_index % height_col;\n int channel_in = h_index / height_col;\n int channel_out = channel_in * kernel_h * kernel_w;\n int h_in = h_out * stride_h - pad_h;\n int w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int j = 0; j < ext_kernel_w; j += kstride_w) {\n int h = h_in + i;\n int w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int n,\n __global const Dtype* data_col,\n const int height, const int width,\n const int channels, const int patch_h,\n const int patch_w,\n const int ext_patch_h,\n const int ext_patch_w, const int pad_h,\n const int pad_w, const int stride_h,\n const int stride_w, const int kstride_h,\n const int kstride_w,\n const int height_col,\n const int width_col,\n __global Dtype* data_im,\n const int data_offset) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int w = index % width + pad_w;\n int h = (index / width) % height + pad_h;\n int c = index / (width * height);\n // compute the start and end of the output\n int width_col_1 = width_col - 1;\n int height_col_1 = height_col - 1;\n int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int w_num = (w - w_col_start) / kstride_w;\n int h_num = (h - h_col_start) / kstride_h;\n\n int coeff_w_idx = height_col * width_col;\n int coeff_h_idx = patch_w * coeff_w_idx;\n int offset = c * patch_h * coeff_h_idx;\n for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT -std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in,\n const int num, const int channels,\n const int height, const int width, const int size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int head = 0;\n const int pre_pad = (size - 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int num,\n const int channels, const int height,\n const int width, const int size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int n = index / width / height;\n const int offset = (n * channels * height + h) * width + w;\n const int step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int head = 0;\n const int pre_pad = size - (size + 1) / 2;\n const int post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a,\n const int offa,\n __global Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha,\n__global Dtype* Y,\n const int offY) {\n for (int index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global const Dtype* b,\n const int offb, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a,\n const int offa, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a,\n const int offa, Dtype alpha,\n __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x,\n const int offx, __global Dtype* y,\n const int offy) {\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads,\n const int dims,\n __global const Dtype* bottom_a,\n const int forward_a,\n __global const Dtype* bottom_b,\n const int forward_b,\n __global Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads,\n const int dims,\n __global Dtype* bottom_a,\n const int backward_a,\n __global Dtype* bottom_b,\n const int backward_b,\n __global const Dtype* top,\n const int num,\n const int channels_a,\n const int channels_b,\n __global const int* shape_a,\n __global const int* shape_b) {\n int pad[6];\n int tmp_idx[6];\n int size_a = 1;\n int size_b = 1;\n\n for (int i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int batch_id = index / ((channels_a + channels_b) * size_a);\n int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int counter = index;\n for (int i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int channel_id = (index / size_a) % channels_a;\n int aidx = batch_id * channels_a + channel_id;\n for (int i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int channel_id = (index / size_a) % channels_b;\n int bidx = (batch_id * channels_b + channel_id) * size_b;\n int btemp = 1;\n for (int i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int* mask, __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n const int hend = min(hstart + kernel_h, height);\n const int wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w, const int pad_h,\n const int pad_w, __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n const int pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int nthreads, __global const Dtype* const bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int stride_h, const int stride_w,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int pw = index % pooled_width;\n const int ph = (index / pooled_width) % pooled_height;\n const int c = (index / pooled_width / pooled_height) % channels;\n const int n = index / pooled_width / pooled_height / channels;\n const int hstart = ph * stride_h;\n const int hend = min(hstart + kernel_h, height);\n const int wstart = pw * stride_w;\n const int wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int* mask_slice = mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int stride_h,\n const int stride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width + pad_w;\n const int h = (index / width) % height + pad_h;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + kernel_h, height + pad_h);\n int wend = min(wstart + kernel_w, width + pad_w);\n int pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int num, const int channels,\n const int height, const int width, const int pooled_height,\n const int pooled_width, const int kernel_h, const int kernel_w,\n const int stride_h, const int stride_w, __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int w = index % width;\n const int h = (index / width) % height;\n const int c = (index / width / height) % channels;\n const int n = index / width / height / channels;\n const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int phend = min(h / stride_h + 1, pooled_height);\n const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int ph = phstart; ph < phend; ++ph) {\n for (int pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* bottom_data,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask, __global Dtype* top_mask) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n int final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n,\n const int num_axes,\n const __global Dtype* top_diff,\n const int use_mask,\n __global const int* mask,\n __global const Dtype* top_mask,\n const int channels,\n __global const int* size,\n __global const int* pooled_size,\n __global const int* kernel_size,\n __global const int* ext_kernel_size,\n __global const int* stride,\n __global const int* kstride,\n __global const int* pad,\n __global Dtype* bottom_diff) {\n int d_idx[6];\n int d_start[6];\n int d_end[6];\n int d_iter[6];\n int i;\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int offset = 1;\n int num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int final_offset = 0;\n int im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int size_prod = 1;\n int pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads,\n__global Dtype* bottom_data,\n const int num,\n const int channels,\n const int height,\n const int width,\n const int pooled_height,\n const int pooled_width,\n const int kernel_h,\n const int kernel_w,\n const int ext_kernel_h,\n const int ext_kernel_w,\n const int stride_h,\n const int stride_w,\n const int kstride_h,\n const int kstride_w,\n const int pad_h,\n const int pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int* mask,\n __global Dtype* top_mask) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height);\n int wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int) 0);\n wstart = max(wstart, (int) 0);\n Dtype maxval = -FLT_MAX;\n int maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int* mask, __global const Dtype* top_mask, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* bottom_diff) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int w = index % width;\n int h = (index / width) % height;\n int c = (index / width / height) % channels;\n int n = index / width / height / channels;\n\n int pooled_height_1 = pooled_height - 1;\n int pooled_width_1 = pooled_width - 1;\n int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int ph = phstart; ph <= phend; ph += kstride_h) {\n for (int pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, const int pad_h, const int pad_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h - pad_h;\n int wstart = pw * stride_w - pad_w;\n int hend = min(hstart + ext_kernel_h, height + pad_h);\n int wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0);\n wstart = max(wstart, 0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int pool_size = 0;\n for (int h = hstart; h < hend; ++h) {\n for (int w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int nthreads, __global const Dtype* bottom_data, const int num,\n const int channels, const int height, const int width,\n const int pooled_height, const int pooled_width, const int kernel_h,\n const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,\n const int stride_h, const int stride_w, const int kstride_h,\n const int kstride_w,\n __global Dtype* top_data) {\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int pw = index % pooled_width;\n int ph = (index / pooled_width) % pooled_height;\n int c = (index / pooled_width / pooled_height) % channels;\n int n = index / pooled_width / pooled_height / channels;\n int hstart = ph * stride_h;\n int hend = min(hstart + ext_kernel_h, height);\n int wstart = pw * stride_w;\n int wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int h = hstart; h < hend; h += kstride_h) {\n for (int w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT -std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int nthreads,\n __global const Dtype* in_data,\n const int forward, const int num_slices,\n const int slice_size,\n const int bottom_slice_axis,\n const int top_slice_axis,\n const int offset_slice_axis,\n __global Dtype* out_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int total_slice_size = slice_size * top_slice_axis;\n const int slice_num = index / total_slice_size;\n const int slice_index = index % total_slice_size;\n const int bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int num, const int dim, const int spatial_dim,\n const int has_ignore_label_, const int ignore_label_,\n __global Dtype* counts) {\n\n for (int index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int num,\n const int dim,\n const int spatial_dim,\n const int has_ignore_label_,\n const int ignore_label_,\n __global Dtype* counts) {\n\n const int channels = dim / spatial_dim;\n\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int n = index / spatial_dim;\n const int s = index % spatial_dim;\n const int label_value = (int) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT -std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data,\n const int tile_size, const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* top_data) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int n = index / tile_size / num_tiles / bottom_tile_axis;\n const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads,\n __global const Dtype* top_diff,\n const int tile_size,\n const int num_tiles,\n const int bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int d = index % tile_size;\n const int b = (index / tile_size) % bottom_tile_axis;\n const int n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff, const int_tp in_diff_off,\n __global const Dtype* in_data, const int_tp in_data_off,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT +std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT +std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT +std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int_tp forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int_tp legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT +std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT +std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n, __global const Dtype* data_im, const int_tp data_im_off,\n const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h, const int_tp stride_w,\n const int_tp height_col, const int_tp width_col,\n __global Dtype* data_col, const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n, __global const Dtype* data_col, const int_tp data_col_off,\n const int_tp height, const int_tp width, const int_tp channels,\n const int_tp patch_h, const int_tp patch_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h, const int_tp stride_w,\n const int_tp height_col, const int_tp width_col,\n __global Dtype* data_im, const int_tp data_im_off) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int_tp w_col_end = min(w / stride_w + 1, width_col);\n int_tp h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int_tp h_col_end = min(h / stride_h + 1, height_col);\n int_tp offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int_tp coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int_tp coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int_tp h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape_ptr[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= col_shape_ptr[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_ndsk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global Dtype* data_im,\n const int_tp data_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT +std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += kstride_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp height, const int_tp width,\n const int_tp channels, const int_tp patch_h,\n const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int_tp w_num = (w - w_col_start) / kstride_w;\n int_tp h_num = (h - h_col_start) / kstride_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT +std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int_tp forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int_tp has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int_tp has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff, const int_tp in_diff_off,\n __global const Dtype* in_data, const int_tp in_data_off,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0);\n }\n}"; // NOLINT +std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT +std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT +std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int_tp forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int_tp legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT +std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT +std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n, __global const Dtype* data_im, const int_tp data_im_off,\n const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h, const int_tp stride_w,\n const int_tp height_col, const int_tp width_col,\n __global Dtype* data_col, const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n, __global const Dtype* data_col, const int_tp data_col_off,\n const int_tp height, const int_tp width, const int_tp channels,\n const int_tp patch_h, const int_tp patch_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h, const int_tp stride_w,\n const int_tp height_col, const int_tp width_col,\n __global Dtype* data_im, const int_tp data_im_off) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;\n int_tp w_col_end = min(w / stride_w + 1, width_col);\n int_tp h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;\n int_tp h_col_end = min(h / stride_h + 1, height_col);\n int_tp offset = data_col_off +\n (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;\n int_tp coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;\n int_tp coeff_w_col = (1 - stride_w * height_col * width_col);\n for (int_tp h_col = h_col_start; h_col < h_col_end; ++h_col) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; ++w_col) {\n val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];\n }\n }\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape_ptr[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index + data_im_off] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += (d_im[i] - d_col_iter[i] * stride[i])\n * kernel_shape_prod;\n kernel_shape_prod *= kernel_shape[i];\n }\n final_offset += kernel_shape_prod * channel_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= col_shape_ptr[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index + data_im_off] = val;\n }\n}"; // NOLINT +std::string im2col_ndsk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * kstride[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global Dtype* data_im,\n const int_tp data_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % kstride[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % kstride[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - kstride[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i];\n } else { // d_col_iter[i] <= d_max - kstride[1]\n d_col_iter[i] += kstride[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT +std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_sk,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += kstride_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += kstride_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im_sk,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp height, const int_tp width,\n const int_tp channels, const int_tp patch_h,\n const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % kstride_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % kstride_h : h;\n int_tp w_num = (w - w_col_start) / kstride_w;\n int_tp h_num = (h - h_col_start) / kstride_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n kstride_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n kstride_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT +std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int_tp forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int_tp has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int_tp has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; ss << header << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index c56cf3c1a79..067a92853e4 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -2,101 +2,101 @@ #include "header.cl" #endif -__kernel void TEMPLATE(relu_forward,Dtype)(const int n, +__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n, __global const Dtype* in, __global Dtype* out, Dtype negative_slope) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } -__kernel void TEMPLATE(relu_backward,Dtype)(const int n, +__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n, __global const Dtype* in_diff, __global const Dtype* in_data, __global Dtype* out_diff, Dtype negative_slope) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } -__kernel void TEMPLATE(tanh_forward,Dtype)(const int n, +__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n, __global const Dtype* in, __global Dtype* out) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = tanh(in[index]); } } -__kernel void TEMPLATE(tanh_backward,Dtype)(const int n, +__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n, __global const Dtype* in_diff, __global const Dtype* out_data, __global Dtype* out_diff) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { Dtype tanhx = out_data[index]; out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); } } -__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int n, +__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n, __global const Dtype* in, __global Dtype* out) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = 1. / (1. + exp(-in[index])); } } -__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int n, +__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n, __global const Dtype* in_diff, __global const Dtype* out_data, __global Dtype* out_diff) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { const Dtype sigmoid_x = out_data[index]; out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); } } -__kernel void TEMPLATE(threshold,Dtype)(const int n, const Dtype threshold, +__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold, __global const Dtype* in, __global Dtype* out) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = in[index] > threshold ? 1 : 0; } } -__kernel void TEMPLATE(prelu_forward,Dtype)(const int n, const int channels, - const int dim, +__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels, + const int_tp dim, __global const Dtype* in, __global Dtype* out, __global const Dtype* slope_data, - const int div_factor) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - int c = (index / dim) % channels / div_factor; + const int_tp div_factor) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + int_tp c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } -__kernel void TEMPLATE(prelu_backward,Dtype)(const int n, const int channels, - const int dim, +__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels, + const int_tp dim, __global const Dtype* in_diff, __global const Dtype* in_data, __global Dtype* out_diff, __global const Dtype* slope_data, - const int div_factor) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - int c = (index / dim) % channels / div_factor; + const int_tp div_factor) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + int_tp c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } -__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int n, - __global const Dtype* in_diff, const int in_diff_off, - __global const Dtype* in_data, const int in_data_off, +__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, + __global const Dtype* in_diff, const int_tp in_diff_off, + __global const Dtype* in_data, const int_tp in_data_off, __global Dtype* out_diff) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out_diff[index] = in_diff[index + in_diff_off] * in_data[index + in_data_off] * (in_data[index + in_data_off] <= 0); } } diff --git a/src/caffe/greentea/cl_kernels/auxiliary.cl b/src/caffe/greentea/cl_kernels/auxiliary.cl index 2c3babe1fa6..940cecb7c5f 100644 --- a/src/caffe/greentea/cl_kernels/auxiliary.cl +++ b/src/caffe/greentea/cl_kernels/auxiliary.cl @@ -2,8 +2,8 @@ #include "header.cl" #endif -__kernel void TEMPLATE(gpu_set,Dtype)(const int n, const Dtype alpha, __global Dtype* y) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[index] = alpha; } } diff --git a/src/caffe/greentea/cl_kernels/batch_reindex.cl b/src/caffe/greentea/cl_kernels/batch_reindex.cl index 44733a65494..9cc8dc2a299 100644 --- a/src/caffe/greentea/cl_kernels/batch_reindex.cl +++ b/src/caffe/greentea/cl_kernels/batch_reindex.cl @@ -2,32 +2,32 @@ #include "header.cl" #endif -__kernel void TEMPLATE(br_forward,Dtype)(const int count, const int inner_dim, +__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim, __global const Dtype* in, __global const Dtype* permut, __global Dtype* out) { - for (int index = get_global_id(0); index < count; + for (int_tp index = get_global_id(0); index < count; index += get_global_size(0)) { - int n = index / (inner_dim); - int in_n = (int) (permut[n]); + int_tp n = index / (inner_dim); + int_tp in_n = (int_tp) (permut[n]); out[index] = in[in_n * (inner_dim) + index % (inner_dim)]; } } -__kernel void TEMPLATE(br_backward,Dtype)(const int count, const int inner_dim, +__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim, __global const Dtype* in, __global const Dtype* top_indexes, __global const Dtype* begins, __global const Dtype* counts, __global Dtype* out) { - for (int index = get_global_id(0); index < count; + for (int_tp index = get_global_id(0); index < count; index += get_global_size(0)) { - int n = index / (inner_dim); + int_tp n = index / (inner_dim); out[index] = 0; - int lower = (int) (begins[n]); - int upper = lower + (int) (counts[n]); - for (int i = lower; i < upper; ++i) { - int in_n = (int) (top_indexes[i]); + int_tp lower = (int_tp) (begins[n]); + int_tp upper = lower + (int_tp) (counts[n]); + for (int_tp i = lower; i < upper; ++i) { + int_tp in_n = (int_tp) (top_indexes[i]); out[index] += in[in_n * (inner_dim) + index % (inner_dim)]; } } diff --git a/src/caffe/greentea/cl_kernels/bnll.cl b/src/caffe/greentea/cl_kernels/bnll.cl index 99e0094d41e..a5a34644494 100644 --- a/src/caffe/greentea/cl_kernels/bnll.cl +++ b/src/caffe/greentea/cl_kernels/bnll.cl @@ -2,22 +2,22 @@ #include "header.cl" #endif -__kernel void TEMPLATE(bnll_forward,Dtype)(const int n, +__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n, __global const Dtype* in, __global Dtype* out) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } -__kernel void TEMPLATE(bnll_backward,Dtype)(const int n, +__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n, __global const Dtype* in_diff, __global const Dtype* in_data, __global Dtype* out_diff) { Dtype kBNLL_THRESHOLD = 50.; - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD)); out_diff[index] = in_diff[index] * expval / (expval + 1.); } diff --git a/src/caffe/greentea/cl_kernels/channel.cl b/src/caffe/greentea/cl_kernels/channel.cl index e1a5b0c3160..bf65f536fb1 100644 --- a/src/caffe/greentea/cl_kernels/channel.cl +++ b/src/caffe/greentea/cl_kernels/channel.cl @@ -2,82 +2,82 @@ #include "header.cl" #endif -__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int num, const int channels, - const int spatial_dim, +__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels, + const int_tp spatial_dim, __global const Dtype* data, __global Dtype* out) { - for (int index = get_global_id(0); index < num * spatial_dim; index += + for (int_tp index = get_global_id(0); index < num * spatial_dim; index += get_global_size(0)) { - int n = index / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / spatial_dim; + int_tp s = index % spatial_dim; float maxval = -FLT_MAX; - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval); } out[index] = maxval; } } -__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int count, const int num, - const int channels, - const int spatial_dim, +__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num, + const int_tp channels, + const int_tp spatial_dim, __global const Dtype* channel_max, __global Dtype* data) { - for (int index = get_global_id(0); index < count; + for (int_tp index = get_global_id(0); index < count; index += get_global_size(0)) { - int n = index / channels / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / channels / spatial_dim; + int_tp s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } -__kernel void TEMPLATE(kernel_exp,Dtype)(const int count, __global const Dtype* data, +__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data, __global Dtype* out) { - for (int index = get_global_id(0); index < count; + for (int_tp index = get_global_id(0); index < count; index += get_global_size(0)) { out[index] = exp(data[index]); } } -__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int num, const int channels, - const int spatial_dim, +__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels, + const int_tp spatial_dim, __global const Dtype* data, __global Dtype* channel_sum) { - for (int index = get_global_id(0); index < num * spatial_dim; index += + for (int_tp index = get_global_id(0); index < num * spatial_dim; index += get_global_size(0)) { - int n = index / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / spatial_dim; + int_tp s = index % spatial_dim; Dtype sum = 0; - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } -__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int count, const int num, - const int channels, const int spatial_dim, +__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num, + const int_tp channels, const int_tp spatial_dim, __global const Dtype* channel_sum, __global Dtype* data) { - for (int index = get_global_id(0); index < count; + for (int_tp index = get_global_id(0); index < count; index += get_global_size(0)) { - int n = index / channels / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / channels / spatial_dim; + int_tp s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } -__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int num, const int channels, - const int spatial_dim, +__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels, + const int_tp spatial_dim, __global const Dtype* data_1, __global const Dtype* data_2, __global Dtype* channel_dot) { - for (int index = get_global_id(0); index < num * spatial_dim; index += + for (int_tp index = get_global_id(0); index < num * spatial_dim; index += get_global_size(0)) { - int n = index / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / spatial_dim; + int_tp s = index % spatial_dim; Dtype dot = 0; - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } diff --git a/src/caffe/greentea/cl_kernels/concat.cl b/src/caffe/greentea/cl_kernels/concat.cl index d08db5df6be..14e7ae7324f 100644 --- a/src/caffe/greentea/cl_kernels/concat.cl +++ b/src/caffe/greentea/cl_kernels/concat.cl @@ -2,20 +2,20 @@ #include "header.cl" #endif -__kernel void TEMPLATE(concat,Dtype)(const int nthreads, __global const Dtype* in_data, - const int forward, const int num_concats, - const int concat_size, - const int top_concat_axis, - const int bottom_concat_axis, - const int offset_concat_axis, +__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data, + const int_tp forward, const int_tp num_concats, + const int_tp concat_size, + const int_tp top_concat_axis, + const int_tp bottom_concat_axis, + const int_tp offset_concat_axis, __global Dtype* out_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - const int total_concat_size = concat_size * bottom_concat_axis; - const int concat_num = index / total_concat_size; - const int concat_index = index % total_concat_size; - const int top_index = concat_index + const int_tp total_concat_size = concat_size * bottom_concat_axis; + const int_tp concat_num = index / total_concat_size; + const int_tp concat_index = index % total_concat_size; + const int_tp top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward == 1) { out_data[top_index] = in_data[index]; diff --git a/src/caffe/greentea/cl_kernels/contrastive_loss.cl b/src/caffe/greentea/cl_kernels/contrastive_loss.cl index fd47c607020..16301731799 100644 --- a/src/caffe/greentea/cl_kernels/contrastive_loss.cl +++ b/src/caffe/greentea/cl_kernels/contrastive_loss.cl @@ -2,15 +2,15 @@ #include "header.cl" #endif -__kernel void TEMPLATE(cll_backward,Dtype)(const int count, const int channels, - const Dtype margin, const int legacy_version, +__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels, + const Dtype margin, const int_tp legacy_version, const Dtype alpha, __global const Dtype* y, __global const Dtype* diff, __global const Dtype* dist_sq, __global Dtype *bottom_diff) { - for (int i = get_global_id(0); i < count; + for (int_tp i = get_global_id(0); i < count; i += get_global_size(0)) { - int n = i / channels; // the num index, to access y and dist_sq - if ((int)(y[n])) { // similar pairs + int_tp n = i / channels; // the num index, to access y and dist_sq + if ((int_tp)(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs Dtype mdist = 0.0; diff --git a/src/caffe/greentea/cl_kernels/dropout.cl b/src/caffe/greentea/cl_kernels/dropout.cl index fa69290df8f..c686401b02c 100644 --- a/src/caffe/greentea/cl_kernels/dropout.cl +++ b/src/caffe/greentea/cl_kernels/dropout.cl @@ -2,23 +2,23 @@ #include "header.cl" #endif -__kernel void TEMPLATE(dropout_forward,Dtype)(const int n, +__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n, __global const Dtype* in, - __global const unsigned int* mask, - const unsigned int threshold, + __global const uint_tp* mask, + const uint_tp threshold, const Dtype scale, __global Dtype* out) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; } } __kernel void TEMPLATE(dropout_backward,Dtype)( - const int n, __global const Dtype* in_diff, - __global const unsigned int* mask, const unsigned int threshold, + const int_tp n, __global const Dtype* in_diff, + __global const uint_tp* mask, const uint_tp threshold, const Dtype scale, __global Dtype* out_diff) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } diff --git a/src/caffe/greentea/cl_kernels/eltwise.cl b/src/caffe/greentea/cl_kernels/eltwise.cl index fe99a4e649b..7a075cb5e75 100644 --- a/src/caffe/greentea/cl_kernels/eltwise.cl +++ b/src/caffe/greentea/cl_kernels/eltwise.cl @@ -3,14 +3,14 @@ #endif __kernel void TEMPLATE(eltwise_max_forward,Dtype)( - const int nthreads, __global const Dtype* bottom_data_a, - __global const Dtype* bottom_data_b, const int blob_idx, + const int_tp nthreads, __global const Dtype* bottom_data_a, + __global const Dtype* bottom_data_b, const int_tp blob_idx, __global Dtype* top_data, - __global int* mask) { - for (int index = get_global_id(0); index < nthreads; + __global int_tp* mask) { + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { Dtype maxval = -FLT_MAX; - int maxidx = -1; + int_tp maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { @@ -28,12 +28,12 @@ __kernel void TEMPLATE(eltwise_max_forward,Dtype)( } } -__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int nthreads, +__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads, __global const Dtype* top_diff, - const int blob_idx, - __global const int* mask, + const int_tp blob_idx, + __global const int_tp* mask, __global Dtype* bottom_diff) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { Dtype gradient = 0; if (mask[index] == blob_idx) { diff --git a/src/caffe/greentea/cl_kernels/embed.cl b/src/caffe/greentea/cl_kernels/embed.cl index 6b4ded93225..60029dcf179 100644 --- a/src/caffe/greentea/cl_kernels/embed.cl +++ b/src/caffe/greentea/cl_kernels/embed.cl @@ -2,18 +2,18 @@ #include "header.cl" #endif -__kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, +__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data, __global const Dtype* weight, - const int M, const int N, - const int K, + const int_tp M, const int_tp N, + const int_tp K, __global Dtype* top_data) { - for (int top_index = get_global_id(0); top_index < nthreads; + for (int_tp top_index = get_global_id(0); top_index < nthreads; top_index += get_global_size(0)) { - const int n = top_index / N; - const int d = top_index % N; - const int index = (int)(bottom_data[n]); - const int weight_index = index * N + d; + const int_tp n = top_index / N; + const int_tp d = top_index % N; + const int_tp index = (int_tp)(bottom_data[n]); + const int_tp weight_index = index * N + d; top_data[top_index] = weight[weight_index]; } } @@ -22,11 +22,11 @@ __kernel void TEMPLATE(embed_forward,Dtype)(const int nthreads, #if (TYPE == TYPE_FLOAT) inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) { union { - unsigned int intVal; + uint_tp intVal; Dtype floatVal; } newVal; union { - unsigned int intVal; + uint_tp intVal; Dtype floatVal; } prevVal; do { @@ -35,15 +35,15 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, - __global const Dtype* top_diff, const int M, const int N, const int K, +__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K, __global Dtype* weight_diff) { - for (int top_index = get_global_id(0); top_index < nthreads; + for (int_tp top_index = get_global_id(0); top_index < nthreads; top_index += get_global_size(0)) { - const int n = top_index / N; - const int d = top_index % N; - const int index = (int)(bottom_data[n]); - const int weight_index = index * N + d; + const int_tp n = top_index / N; + const int_tp d = top_index % N; + const int_tp index = (int_tp)(bottom_data[n]); + const int_tp weight_index = index * N + d; TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); } @@ -67,15 +67,15 @@ inline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dt } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } -__kernel void TEMPLATE(embed_backward,Dtype)(const int nthreads, __global const Dtype* bottom_data, - __global const Dtype* top_diff, const int M, const int N, const int K, +__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data, + __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K, __global Dtype* weight_diff) { - for (int top_index = get_global_id(0); top_index < nthreads; + for (int_tp top_index = get_global_id(0); top_index < nthreads; top_index += get_global_size(0)) { - const int n = top_index / N; - const int d = top_index % N; - const int index = (int)(bottom_data[n]); - const int weight_index = index * N + d; + const int_tp n = top_index / N; + const int_tp d = top_index % N; + const int_tp index = (int_tp)(bottom_data[n]); + const int_tp weight_index = index * N + d; TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index)); } diff --git a/src/caffe/greentea/cl_kernels/fillbuffer.cl b/src/caffe/greentea/cl_kernels/fillbuffer.cl index be95994518f..52d55a04a1a 100644 --- a/src/caffe/greentea/cl_kernels/fillbuffer.cl +++ b/src/caffe/greentea/cl_kernels/fillbuffer.cl @@ -2,16 +2,16 @@ #include "header.cl" #endif -__kernel void TEMPLATE(fillbuffer,Dtype)(const int n, const char alpha, __global char* x, - const int offx) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x, + const int_tp offx) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { x[index + offx] = alpha; } } -__kernel void TEMPLATE(fill,Dtype)(const int n, const Dtype alpha, __global Dtype* x, - const int offx) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x, + const int_tp offx) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { x[index + offx] = alpha; } } diff --git a/src/caffe/greentea/cl_kernels/im2col.cl b/src/caffe/greentea/cl_kernels/im2col.cl index 6bfb24a033f..688acd1e69d 100644 --- a/src/caffe/greentea/cl_kernels/im2col.cl +++ b/src/caffe/greentea/cl_kernels/im2col.cl @@ -2,29 +2,29 @@ #include "header.cl" #endif -__kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, const int data_im_off, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, - __global Dtype* data_col, const int data_col_off) { +__kernel void TEMPLATE(im2col,Dtype)(const int_tp n, __global const Dtype* data_im, const int_tp data_im_off, + const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp height_col, const int_tp width_col, + __global Dtype* data_col, const int_tp data_col_off) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - int w_out = index % width_col; - int h_index = index / width_col; - int h_out = h_index % height_col; - int channel_in = h_index / height_col; - int channel_out = channel_in * kernel_h * kernel_w; - int h_in = h_out * stride_h - pad_h; - int w_in = w_out * stride_w - pad_w; + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + int_tp w_out = index % width_col; + int_tp h_index = index / width_col; + int_tp h_out = h_index % height_col; + int_tp channel_in = h_index / height_col; + int_tp channel_out = channel_in * kernel_h * kernel_w; + int_tp h_in = h_out * stride_h - pad_h; + int_tp w_in = w_out * stride_w - pad_w; __global Dtype* data_col_ptr = data_col + data_col_off; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; __global const Dtype* data_im_ptr = data_im + data_im_off; data_im_ptr += (channel_in * height + h_in) * width + w_in; - for (int i = 0; i < kernel_h; ++i) { - for (int j = 0; j < kernel_w; ++j) { - int h = h_in + i; - int w = w_in + j; + for (int_tp i = 0; i < kernel_h; ++i) { + for (int_tp j = 0; j < kernel_w; ++j) { + int_tp h = h_in + i; + int_tp w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; @@ -33,29 +33,29 @@ __kernel void TEMPLATE(im2col,Dtype)(const int n, __global const Dtype* data_im, } } -__kernel void TEMPLATE(col2im,Dtype)(const int n, __global const Dtype* data_col, const int data_col_off, - const int height, const int width, const int channels, - const int patch_h, const int patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, - __global Dtype* data_im, const int data_im_off) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(col2im,Dtype)(const int_tp n, __global const Dtype* data_col, const int_tp data_col_off, + const int_tp height, const int_tp width, const int_tp channels, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp height_col, const int_tp width_col, + __global Dtype* data_im, const int_tp data_im_off) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { Dtype val = 0; - int w = index % width + pad_w; - int h = (index / width) % height + pad_h; - int c = index / (width * height); + int_tp w = index % width + pad_w; + int_tp h = (index / width) % height + pad_h; + int_tp c = index / (width * height); // compute the start and end of the output - int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; - int w_col_end = min(w / stride_w + 1, width_col); - int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; - int h_col_end = min(h / stride_h + 1, height_col); - int offset = data_col_off + + int_tp w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int_tp w_col_end = min(w / stride_w + 1, width_col); + int_tp h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int_tp h_col_end = min(h / stride_h + 1, height_col); + int_tp offset = data_col_off + (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; - int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; - int coeff_w_col = (1 - stride_w * height_col * width_col); - for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { - for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + int_tp coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int_tp coeff_w_col = (1 - stride_w * height_col * width_col); + for (int_tp h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int_tp w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; } } diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index 9bbb7046ee2..7774b9f005e 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -2,37 +2,37 @@ #include "header.cl" #endif -__kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, - const int channel_axis, +__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, + const int_tp channel_axis, __global const Dtype* data_im, - const int data_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, + const int_tp data_off, + __global const int_tp* im_shape, + __global const int_tp* col_shape, + __global const int_tp* kernel_shape, + __global const int_tp* pad, + __global const int_tp* stride, __global Dtype* data_col, - const int data_col_off) { + const int_tp data_col_off) { - int d_temp[6]; - int d_iter[6]; - int i; + int_tp d_temp[6]; + int_tp d_iter[6]; + int_tp i; - __global const int* im_shape_ptr = im_shape + channel_axis; - __global const int* col_shape_ptr = col_shape + channel_axis; + __global const int_tp* im_shape_ptr = im_shape + channel_axis; + __global const int_tp* col_shape_ptr = col_shape + channel_axis; - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_in = index; - int channel_out = 1; + int_tp channel_in = index; + int_tp channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { d_temp[i] = channel_in % col_shape_ptr[i + 1]; channel_in /= col_shape_ptr[i + 1]; channel_out *= kernel_shape[i]; } channel_out *= channel_in; - int data_col_inc = 1; + int_tp data_col_inc = 1; for (i = 0; i < num_axes; ++i) { channel_out *= col_shape_ptr[i + 1]; channel_out += d_temp[i]; @@ -48,14 +48,14 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, do { bool in_range = true; for (i = 0; i < num_axes; ++i) { - const int d_iter_im = d_iter[i] + d_temp[i]; + const int_tp d_iter_im = d_iter[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1]; if (!in_range) { break; } } if (in_range) { - int data_im_offset = d_iter[0]; + int_tp data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { data_im_offset *= im_shape_ptr[i + 1]; data_im_offset += d_iter[i]; @@ -67,7 +67,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { - const int d_max = kernel_shape[i]; + const int_tp d_max = kernel_shape[i]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 @@ -75,45 +75,45 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int n, const int num_axes, incremented = true; break; } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); // do } } -__kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, - const int channel_axis, +__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, + const int_tp channel_axis, __global const Dtype* data_col, - const int data_col_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, + const int_tp data_col_off, + __global const int_tp* im_shape, + __global const int_tp* col_shape, + __global const int_tp* kernel_shape, + __global const int_tp* pad, + __global const int_tp* stride, __global Dtype* data_im, - const int data_im_off) { - int d_im[6]; - int d_col_iter[6]; - int d_col_start[6]; - int d_col_end[6]; + const int_tp data_im_off) { + int_tp d_im[6]; + int_tp d_col_iter[6]; + int_tp d_col_start[6]; + int_tp d_col_end[6]; - __global const int* im_shape_ptr = im_shape + channel_axis; - __global const int* col_shape_ptr = col_shape + channel_axis; + __global const int_tp* im_shape_ptr = im_shape + channel_axis; + __global const int_tp* col_shape_ptr = col_shape + channel_axis; __global Dtype* data_col_ptr = data_col + data_col_off; - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_im = index; + int_tp channel_im = index; // Calculate d_im (image dimensions). - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i]; channel_im /= im_shape_ptr[i + 1]; } // Calculate col start/end indices. bool done = false; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; @@ -123,7 +123,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, // final val will be 0. data_im[index + data_im_off] = 0; done = true; - break; // for (int i = 0; i < num_axes; ++i) + break; // for (int_tp i = 0; i < num_axes; ++i) } } if (done) { @@ -134,30 +134,30 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int n, const int num_axes, bool incremented = true; do { // Compute the final offset. - int final_offset = 0; - int kernel_shape_prod = 1; - for (int i = num_axes - 1; i >= 0; --i) { + int_tp final_offset = 0; + int_tp kernel_shape_prod = 1; + for (int_tp i = num_axes - 1; i >= 0; --i) { final_offset += (d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod; kernel_shape_prod *= kernel_shape[i]; } final_offset += kernel_shape_prod * channel_im; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { final_offset *= col_shape_ptr[i + 1]; final_offset += d_col_iter[i]; } val += data_col_ptr[final_offset]; incremented = false; - for (int i = num_axes - 1; i >= 0; --i) { - const int d_max = d_col_end[i]; + for (int_tp i = num_axes - 1; i >= 0; --i) { + const int_tp d_max = d_col_end[i]; if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; } else { // d_col_iter[i] < d_max - 1 ++d_col_iter[i]; incremented = true; - break; // for (int i = num_axes - 1; i >= 0; --i) + break; // for (int_tp i = num_axes - 1; i >= 0; --i) } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index + data_im_off] = val; } diff --git a/src/caffe/greentea/cl_kernels/im2col_ndsk.cl b/src/caffe/greentea/cl_kernels/im2col_ndsk.cl index 1db4c7b01c5..6f455bd4c65 100644 --- a/src/caffe/greentea/cl_kernels/im2col_ndsk.cl +++ b/src/caffe/greentea/cl_kernels/im2col_ndsk.cl @@ -2,33 +2,33 @@ #include "header.cl" #endif -__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(im2col_ndsk, Dtype)(const int_tp n, const int_tp num_axes, __global const Dtype* data_im, - const int data_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, - __global const int* kstride, + const int_tp data_off, + __global const int_tp* im_shape, + __global const int_tp* col_shape, + __global const int_tp* kernel_shape, + __global const int_tp* pad, + __global const int_tp* stride, + __global const int_tp* kstride, __global Dtype* data_col, - const int data_col_off) { - int d_temp[6]; - int d_iter[6]; - int i; + const int_tp data_col_off) { + int_tp d_temp[6]; + int_tp d_iter[6]; + int_tp i; - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_in = index; - int channel_out = 1; + int_tp channel_in = index; + int_tp channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { d_temp[i] = channel_in % col_shape[i + 1]; channel_in /= col_shape[i + 1]; channel_out *= kernel_shape[i]; } channel_out *= channel_in; - int data_col_inc = 1; + int_tp data_col_inc = 1; for (i = 0; i < num_axes; ++i) { channel_out *= col_shape[i + 1]; channel_out += d_temp[i]; @@ -44,7 +44,7 @@ __kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, do { bool in_range = true; for (i = 0; i < num_axes; ++i) { - const int d_iter_im = d_iter[i] + d_temp[i]; + const int_tp d_iter_im = d_iter[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; if (!in_range) { break; @@ -53,7 +53,7 @@ __kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, // Write column data if (in_range) { - int data_im_offset = d_iter[0]; + int_tp data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { data_im_offset *= im_shape[i + 1]; data_im_offset += d_iter[i]; @@ -66,9 +66,9 @@ __kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { - // Old: const int d_max = kernel_shape[i]; + // Old: const int_tp d_max = kernel_shape[i]; // New (strided, limit is the external kernel size): - const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + const int_tp d_max = (kernel_shape[i] - 1) * kstride[i] + 1; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 @@ -78,48 +78,48 @@ __kernel void TEMPLATE(im2col_ndsk, Dtype)(const int n, const int num_axes, incremented = true; break; } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); // do } } -__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, +__kernel void TEMPLATE(col2im_ndsk, Dtype)(const int_tp n, const int_tp num_axes, __global const Dtype* data_col, - const int data_col_off, - __global const int* im_shape, - __global const int* col_shape, - __global const int* kernel_shape, - __global const int* pad, - __global const int* stride, - __global const int* kstride, + const int_tp data_col_off, + __global const int_tp* im_shape, + __global const int_tp* col_shape, + __global const int_tp* kernel_shape, + __global const int_tp* pad, + __global const int_tp* stride, + __global const int_tp* kstride, __global Dtype* data_im, - const int data_off) { - int d_im[6]; - int d_col_size[6]; - int d_col_iter[6]; - int d_col_start[6]; - int d_col_end[6]; - int d_ext_patch[6]; - int d_idx[6]; + const int_tp data_off) { + int_tp d_im[6]; + int_tp d_col_size[6]; + int_tp d_col_iter[6]; + int_tp d_col_start[6]; + int_tp d_col_end[6]; + int_tp d_ext_patch[6]; + int_tp d_idx[6]; - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) / stride[i] + 1; } - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_im = index; + int_tp channel_im = index; // Calculate d_im (image dimensions). - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { d_im[i] = channel_im % im_shape[i + 1] + pad[i]; channel_im /= im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { // Old: /*d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? @@ -138,7 +138,7 @@ __kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, // final val will be 0. data_im[index] = 0; done = true; - break; // for (int i = 0; i < num_axes; ++i) + break; // for (int_tp i = 0; i < num_axes; ++i) } } if (done) { @@ -149,20 +149,20 @@ __kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, bool incremented = true; do { // Compute the final offset. - int final_offset = 0; - int coeff_prod = 1; - for (int i = num_axes - 1; i >= 0; --i) { + int_tp final_offset = 0; + int_tp coeff_prod = 1; + for (int_tp i = num_axes - 1; i >= 0; --i) { final_offset += d_col_iter[i] * coeff_prod; coeff_prod *= d_col_size[i]; } - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { final_offset += d_idx[i] * coeff_prod; coeff_prod *= kernel_shape[i]; } final_offset += channel_im * coeff_prod; val += data_col[final_offset]; incremented = false; - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { if (d_col_iter[i] > d_col_end[i] - kstride[i]) { d_col_iter[i] = d_col_start[i]; d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; @@ -170,9 +170,9 @@ __kernel void TEMPLATE(col2im_ndsk, Dtype)(const int n, const int num_axes, d_col_iter[i] += kstride[i]; --d_idx[i]; incremented = true; - break; // for (int i = num_axes - 1; i >= 0; --i) + break; // for (int_tp i = num_axes - 1; i >= 0; --i) } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; } diff --git a/src/caffe/greentea/cl_kernels/im2col_sk.cl b/src/caffe/greentea/cl_kernels/im2col_sk.cl index fb76612d93d..2505d7114fe 100644 --- a/src/caffe/greentea/cl_kernels/im2col_sk.cl +++ b/src/caffe/greentea/cl_kernels/im2col_sk.cl @@ -2,36 +2,36 @@ #include "header.cl" #endif -__kernel void TEMPLATE(im2col_sk,Dtype)(const int n, +__kernel void TEMPLATE(im2col_sk,Dtype)(const int_tp n, __global const Dtype* data_im, - const int data_offset, const int height, - const int width, const int kernel_h, - const int kernel_w, - const int ext_kernel_h, - const int ext_kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, - const int height_col, - const int width_col, + const int_tp data_offset, const int_tp height, + const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, + const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, + const int_tp height_col, + const int_tp width_col, __global Dtype* data_col) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - int w_out = index % width_col; - int h_index = index / width_col; - int h_out = h_index % height_col; - int channel_in = h_index / height_col; - int channel_out = channel_in * kernel_h * kernel_w; - int h_in = h_out * stride_h - pad_h; - int w_in = w_out * stride_w - pad_w; + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + int_tp w_out = index % width_col; + int_tp h_index = index / width_col; + int_tp h_out = h_index % height_col; + int_tp channel_in = h_index / height_col; + int_tp channel_out = channel_in * kernel_h * kernel_w; + int_tp h_in = h_out * stride_h - pad_h; + int_tp w_in = w_out * stride_w - pad_w; __global Dtype* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; __global const Dtype* data_im_ptr = data_im + data_offset; data_im_ptr += (channel_in * height + h_in) * width + w_in; - for (int i = 0; i < ext_kernel_h; i += kstride_h) { - for (int j = 0; j < ext_kernel_w; j += kstride_w) { - int h = h_in + i; - int w = w_in + j; + for (int_tp i = 0; i < ext_kernel_h; i += kstride_h) { + for (int_tp j = 0; j < ext_kernel_w; j += kstride_w) { + int_tp h = h_in + i; + int_tp w = w_in + j; (*data_col_ptr) = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; @@ -42,49 +42,49 @@ __kernel void TEMPLATE(im2col_sk,Dtype)(const int n, } -__kernel void TEMPLATE(col2im_sk,Dtype)(const int n, +__kernel void TEMPLATE(col2im_sk,Dtype)(const int_tp n, __global const Dtype* data_col, - const int height, const int width, - const int channels, const int patch_h, - const int patch_w, - const int ext_patch_h, - const int ext_patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, - const int height_col, - const int width_col, + const int_tp height, const int_tp width, + const int_tp channels, const int_tp patch_h, + const int_tp patch_w, + const int_tp ext_patch_h, + const int_tp ext_patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, + const int_tp height_col, + const int_tp width_col, __global Dtype* data_im, - const int data_offset) { + const int_tp data_offset) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { Dtype val = 0; - int w = index % width + pad_w; - int h = (index / width) % height + pad_h; - int c = index / (width * height); + int_tp w = index % width + pad_w; + int_tp h = (index / width) % height + pad_h; + int_tp c = index / (width * height); // compute the start and end of the output - int width_col_1 = width_col - 1; - int height_col_1 = height_col - 1; - int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; - int w_col_end = + int_tp width_col_1 = width_col - 1; + int_tp height_col_1 = height_col - 1; + int_tp w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int_tp w_col_end = (w >= width_col) ? width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; - int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; - int h_col_end = + int_tp h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int_tp h_col_end = (h >= height_col) ? height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; - int w_num = (w - w_col_start) / kstride_w; - int h_num = (h - h_col_start) / kstride_h; + int_tp w_num = (w - w_col_start) / kstride_w; + int_tp h_num = (h - h_col_start) / kstride_h; - int coeff_w_idx = height_col * width_col; - int coeff_h_idx = patch_w * coeff_w_idx; - int offset = c * patch_h * coeff_h_idx; - for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + int_tp coeff_w_idx = height_col * width_col; + int_tp coeff_h_idx = patch_w * coeff_w_idx; + int_tp offset = c * patch_h * coeff_h_idx; + for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += kstride_h, --h_idx) { - for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += kstride_w, --w_idx) { - //int c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; - //int c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; + //int_tp c_col = c * patch_h * patch_w + (h - h_col) / kstride_h * patch_w + (w - w_col) / kstride_w; + //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + h_col * width_col + w_col]; diff --git a/src/caffe/greentea/cl_kernels/lrn.cl b/src/caffe/greentea/cl_kernels/lrn.cl index 6c898acd073..86ccaf5f8d7 100644 --- a/src/caffe/greentea/cl_kernels/lrn.cl +++ b/src/caffe/greentea/cl_kernels/lrn.cl @@ -2,35 +2,35 @@ #include "header.cl" #endif -__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int nthreads, +__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads, __global const Dtype* in, __global const Dtype* scale, const Dtype negative_beta, __global Dtype* out) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { out[index] = in[index] * pow(scale[index], negative_beta); } } -__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const Dtype* in, - const int num, const int channels, - const int height, const int width, const int size, +__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, const int_tp size, const Dtype alpha_over_size, const Dtype k, __global Dtype* const scale) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp n = index / width / height; + const int_tp offset = (n * channels * height + h) * width + w; + const int_tp step = height * width; __global const Dtype* in_off = in + offset; __global Dtype* scale_off = scale + offset; - int head = 0; - const int pre_pad = (size - 1) / 2; - const int post_pad = size - pre_pad - 1; + int_tp head = 0; + const int_tp pre_pad = (size - 1) / 2; + const int_tp post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values @@ -60,32 +60,32 @@ __kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int nthreads, __global const } } -__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int nthreads, +__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data, __global const Dtype* top_data, __global const Dtype* scale, - __global const Dtype* top_diff, const int num, - const int channels, const int height, - const int width, const int size, + __global const Dtype* top_diff, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp size, const Dtype negative_beta, const Dtype cache_ratio, __global Dtype* bottom_diff) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp n = index / width / height; + const int_tp offset = (n * channels * height + h) * width + w; + const int_tp step = height * width; __global const Dtype* bottom_off = bottom_data + offset; __global const Dtype* top_off = top_data + offset; __global const Dtype* scale_off = scale + offset; __global Dtype* top_diff_off = top_diff + offset; __global Dtype* bottom_diff_off = bottom_diff + offset; - int head = 0; - const int pre_pad = size - (size + 1) / 2; - const int post_pad = size - pre_pad - 1; + int_tp head = 0; + const int_tp pre_pad = size - (size + 1) / 2; + const int_tp post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index 0b3be7d0cff..d4a08e510ff 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -2,81 +2,81 @@ #include "header.cl" #endif -__kernel void TEMPLATE(mul,Dtype)(const int n, __global const Dtype* a, - const int offa, +__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, __global Dtype* b, - const int offb, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int_tp offb, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[index + offy] = a[index + offa] * b[index + offb]; } } -__kernel void TEMPLATE(div,Dtype)(const int n, __global const Dtype* a, - const int offa, +__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, __global Dtype* b, - const int offb, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int_tp offb, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[index + offy] = a[index + offa] / b[index + offb]; } } -__kernel void TEMPLATE(add_scalar,Dtype)(const int N, const Dtype alpha, +__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha, __global Dtype* Y, - const int offY) { - for (int index = get_global_id(0); index < N; index += get_global_size(0)) { + const int_tp offY) { + for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) { Y[offY + index] += alpha; } } -__kernel void TEMPLATE(add,Dtype)(const int n, __global const Dtype* a, - const int offa, __global const Dtype* b, - const int offb, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, __global const Dtype* b, + const int_tp offb, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[offy + index] = a[offa + index] + b[offb + index]; } } -__kernel void TEMPLATE(sub,Dtype)(const int n, __global const Dtype* a, - const int offa, __global const Dtype* b, - const int offb, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, __global const Dtype* b, + const int_tp offb, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[offy + index] = a[offa + index] - b[offb + index]; } } -__kernel void TEMPLATE(abs,Dtype)(const int n, __global const Dtype* a, - const int offa, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[offy + index] = fabs((Dtype)(a[offa + index])); } } -__kernel void TEMPLATE(exp,Dtype)(const int n, __global const Dtype* a, - const int offa, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[offy + index] = exp(a[offa + index]); } } -__kernel void TEMPLATE(log,Dtype)(const int n, __global const Dtype* a, - const int offa, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[offy + index] = log(a[offa + index]); } } -__kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, - const int offa, Dtype alpha, +__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a, + const int_tp offa, Dtype alpha, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { if(alpha == 2.0) { y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha); } else { @@ -85,19 +85,19 @@ __kernel void TEMPLATE(powx,Dtype)(const int n, __global const Dtype* a, } } -__kernel void TEMPLATE(sign,Dtype)(const int n, __global const Dtype* x, - const int offx, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x, + const int_tp offx, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[index + offy] = (0.0 < x[index + offx]) - (x[index + offx] < 0.0); } } -__kernel void TEMPLATE(sgnbit,Dtype)(const int n, __global const Dtype* x, - const int offx, __global Dtype* y, - const int offy) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { +__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x, + const int_tp offx, __global Dtype* y, + const int_tp offy) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { y[index + offy] = signbit(x[index + offx]); } } diff --git a/src/caffe/greentea/cl_kernels/mergecrop.cl b/src/caffe/greentea/cl_kernels/mergecrop.cl index 061560d5a92..d8d7289a8fd 100644 --- a/src/caffe/greentea/cl_kernels/mergecrop.cl +++ b/src/caffe/greentea/cl_kernels/mergecrop.cl @@ -2,53 +2,53 @@ #include "header.cl" #endif -__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads, - const int dims, +__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads, + const int_tp dims, __global const Dtype* bottom_a, - const int forward_a, + const int_tp forward_a, __global const Dtype* bottom_b, - const int forward_b, + const int_tp forward_b, __global Dtype* top, - const int num, - const int channels_a, - const int channels_b, - __global const int* shape_a, - __global const int* shape_b) { - int pad[6]; - int tmp_idx[6]; - int size_a = 1; - int size_b = 1; + const int_tp num, + const int_tp channels_a, + const int_tp channels_b, + __global const int_tp* shape_a, + __global const int_tp* shape_b) { + int_tp pad[6]; + int_tp tmp_idx[6]; + int_tp size_a = 1; + int_tp size_b = 1; - for (int i = 0; i < dims; ++i) { + for (int_tp i = 0; i < dims; ++i) { pad[i] = (shape_b[i] - shape_a[i]) / 2; size_a *= shape_a[i]; size_b *= shape_b[i]; } - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int batch_id = index / ((channels_a + channels_b) * size_a); - int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) + int_tp batch_id = index / ((channels_a + channels_b) * size_a); + int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) / (channels_a * size_a)) % 2; - int counter = index; - for (int i = dims - 1; i >= 0; --i) { + int_tp counter = index; + for (int_tp i = dims - 1; i >= 0; --i) { tmp_idx[i] = counter % shape_a[i]; counter /= shape_a[i]; } if (bottom_id == 0) { - int channel_id = (index / size_a) % channels_a; - int aidx = batch_id * channels_a + channel_id; - for (int i = 0; i < dims; ++i) { + int_tp channel_id = (index / size_a) % channels_a; + int_tp aidx = batch_id * channels_a + channel_id; + for (int_tp i = 0; i < dims; ++i) { aidx *= shape_a[i]; aidx += tmp_idx[i]; } top[index] = (forward_a == 1) ? bottom_a[aidx] : 0; } else { - int channel_id = (index / size_a) % channels_b; - int bidx = (batch_id * channels_b + channel_id) * size_b; - int btemp = 1; - for (int i = dims - 1; i >= 0; --i) { + int_tp channel_id = (index / size_a) % channels_b; + int_tp bidx = (batch_id * channels_b + channel_id) * size_b; + int_tp btemp = 1; + for (int_tp i = dims - 1; i >= 0; --i) { bidx += btemp * (tmp_idx[i] + pad[i]); btemp *= shape_b[i]; } @@ -57,53 +57,53 @@ __kernel void TEMPLATE(merge_copy_forward, Dtype)(const int nthreads, } } -__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int nthreads, - const int dims, +__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads, + const int_tp dims, __global Dtype* bottom_a, - const int backward_a, + const int_tp backward_a, __global Dtype* bottom_b, - const int backward_b, + const int_tp backward_b, __global const Dtype* top, - const int num, - const int channels_a, - const int channels_b, - __global const int* shape_a, - __global const int* shape_b) { - int pad[6]; - int tmp_idx[6]; - int size_a = 1; - int size_b = 1; + const int_tp num, + const int_tp channels_a, + const int_tp channels_b, + __global const int_tp* shape_a, + __global const int_tp* shape_b) { + int_tp pad[6]; + int_tp tmp_idx[6]; + int_tp size_a = 1; + int_tp size_b = 1; - for (int i = 0; i < dims; ++i) { + for (int_tp i = 0; i < dims; ++i) { pad[i] = (shape_b[i] - shape_a[i]) / 2; size_a *= shape_a[i]; size_b *= shape_b[i]; } - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int batch_id = index / ((channels_a + channels_b) * size_a); - int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) + int_tp batch_id = index / ((channels_a + channels_b) * size_a); + int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) / (channels_a * size_a)) % 2; - int counter = index; - for (int i = dims - 1; i >= 0; --i) { + int_tp counter = index; + for (int_tp i = dims - 1; i >= 0; --i) { tmp_idx[i] = counter % shape_a[i]; counter /= shape_a[i]; } if (bottom_id == 0) { - int channel_id = (index / size_a) % channels_a; - int aidx = batch_id * channels_a + channel_id; - for (int i = 0; i < dims; ++i) { + int_tp channel_id = (index / size_a) % channels_a; + int_tp aidx = batch_id * channels_a + channel_id; + for (int_tp i = 0; i < dims; ++i) { aidx *= shape_a[i]; aidx += tmp_idx[i]; } bottom_a[aidx] = (backward_a == 1) ? top[index] : 0; } else { - int channel_id = (index / size_a) % channels_b; - int bidx = (batch_id * channels_b + channel_id) * size_b; - int btemp = 1; - for (int i = dims - 1; i >= 0; --i) { + int_tp channel_id = (index / size_a) % channels_b; + int_tp bidx = (batch_id * channels_b + channel_id) * size_b; + int_tp btemp = 1; + for (int_tp i = dims - 1; i >= 0; --i) { bidx += btemp * (tmp_idx[i] + pad[i]); btemp *= shape_b[i]; } diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index 4e0f2a0ca10..372f6f288bf 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -3,31 +3,31 @@ #endif __kernel void TEMPLATE(max_pool_forward,Dtype)( - const int nthreads, __global const Dtype* bottom_data, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, const int stride_w, const int pad_h, - const int pad_w, + const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h, + const int_tp pad_w, __global Dtype* top_data, - const int use_mask, __global int* mask, __global Dtype* top_mask) { - for (int index = get_global_id(0); index < nthreads; + const int_tp use_mask, __global int_tp* mask, __global Dtype* top_mask) { + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - const int hend = min(hstart + kernel_h, height); - const int wend = min(wstart + kernel_w, width); - hstart = max(hstart, 0); - wstart = max(wstart, 0); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + const int_tp hend = min(hstart + kernel_h, height); + const int_tp wend = min(wstart + kernel_w, width); + hstart = max(hstart, 0L); + wstart = max(wstart, 0L); Dtype maxval = -FLT_MAX; - int maxidx = -1; + int_tp maxidx = -1; __global const Dtype* bottom_slice = bottom_data + (n * channels + c) * height * width; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; @@ -44,32 +44,32 @@ __kernel void TEMPLATE(max_pool_forward,Dtype)( } __kernel void TEMPLATE(ave_pool_forward,Dtype)( - const int nthreads, __global const Dtype* const bottom_data, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, const int stride_w, const int pad_h, - const int pad_w, __global Dtype* top_data) { - for (int index = get_global_id(0); index < nthreads; + const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h, + const int_tp pad_w, __global Dtype* top_data) { + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + kernel_h, height + pad_h); - int wend = min(wstart + kernel_w, width + pad_w); - const int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min(hstart + kernel_h, height + pad_h); + int_tp wend = min(wstart + kernel_w, width + pad_w); + const int_tp pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0L); + wstart = max(wstart, 0L); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; __global const Dtype* bottom_slice = bottom_data + (n * channels + c) * height * width; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } @@ -79,36 +79,36 @@ __kernel void TEMPLATE(ave_pool_forward,Dtype)( } __kernel void TEMPLATE(sto_pool_forward_train,Dtype)( - const int nthreads, __global const Dtype* bottom_data, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, const int stride_w, + const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, __global Dtype* rand_idx, __global Dtype* top_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + const int_tp hstart = ph * stride_h; + const int_tp hend = min(hstart + kernel_h, height); + const int_tp wstart = pw * stride_w; + const int_tp wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; __global const Dtype* bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; @@ -122,29 +122,29 @@ __kernel void TEMPLATE(sto_pool_forward_train,Dtype)( } __kernel void TEMPLATE(sto_pool_forward_test,Dtype)( - const int nthreads, __global const Dtype* const bottom_data, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, const int stride_w, + const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, __global Dtype* top_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + const int_tp hstart = ph * stride_h; + const int_tp hend = min(hstart + kernel_h, height); + const int_tp wstart = pw * stride_w; + const int_tp wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; __global const Dtype* bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } @@ -153,45 +153,45 @@ __kernel void TEMPLATE(sto_pool_forward_test,Dtype)( } } -__kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, +__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads, __global const Dtype* top_diff, - const int use_mask, - __global const int* mask, + const int_tp use_mask, + __global const int_tp* mask, __global const Dtype* top_mask, - const int num, - const int channels, - const int height, - const int width, - const int pooled_height, - const int pooled_width, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, + const int_tp num, + const int_tp channels, + const int_tp height, + const int_tp width, + const int_tp pooled_height, + const int_tp pooled_width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp pad_h, + const int_tp pad_w, __global Dtype* bottom_diff) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp c = (index / width / height) % channels; + const int_tp n = index / width / height / channels; + const int_tp phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - const int phend = min((h + pad_h) / stride_h + 1, pooled_height); - const int pwstart = + const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int_tp pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; - const int offset = (n * channels + c) * pooled_height * pooled_width; + const int_tp offset = (n * channels + c) * pooled_height * pooled_width; __global const Dtype* top_diff_slice = top_diff + offset; if (use_mask == 1) { - __global const int* mask_slice = mask + offset; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + __global const int_tp* mask_slice = mask + offset; + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } @@ -199,8 +199,8 @@ __kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, } } else { __global const Dtype* top_mask_slice = top_mask + offset; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } @@ -211,44 +211,44 @@ __kernel void TEMPLATE(max_pool_backward,Dtype)(const int nthreads, } } -__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, +__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads, __global const Dtype* top_diff, - const int num, - const int channels, - const int height, - const int width, - const int pooled_height, - const int pooled_width, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, + const int_tp num, + const int_tp channels, + const int_tp height, + const int_tp width, + const int_tp pooled_height, + const int_tp pooled_width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp pad_h, + const int_tp pad_w, __global Dtype* bottom_diff) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { // find out the local index // find out the local offset - const int w = index % width + pad_w; - const int h = (index / width) % height + pad_h; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + const int_tp w = index % width + pad_w; + const int_tp h = (index / width) % height + pad_h; + const int_tp c = (index / width / height) % channels; + const int_tp n = index / width / height / channels; + const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int_tp phend = min(h / stride_h + 1, pooled_height); + const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int_tp pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; __global const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + kernel_h, height + pad_h); - int wend = min(wstart + kernel_w, width + pad_w); - int pool_size = (hend - hstart) * (wend - wstart); + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min(hstart + kernel_h, height + pad_h); + int_tp wend = min(wstart + kernel_w, width + pad_w); + int_tp pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } @@ -257,32 +257,32 @@ __kernel void TEMPLATE(ave_pool_backward,Dtype)(const int nthreads, } __kernel void TEMPLATE(sto_pool_backward,Dtype)( - const int nthreads, __global const Dtype* rand_idx, - __global const Dtype* const top_diff, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, __global Dtype* bottom_diff) { - for (int index = get_global_id(0); index < nthreads; + const int_tp nthreads, __global const Dtype* rand_idx, + __global const Dtype* const top_diff, const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) { + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp c = (index / width / height) % channels; + const int_tp n = index / width / height / channels; + const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int_tp phend = min(h / stride_h + 1, pooled_height); + const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int_tp pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; __global const Dtype* rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; __global const Dtype* top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] - * (index == (int) (rand_idx_slice[ph * pooled_width + pw])); + * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; diff --git a/src/caffe/greentea/cl_kernels/pooling_nd.cl b/src/caffe/greentea/cl_kernels/pooling_nd.cl index 58c14403fac..40572cdcf0c 100644 --- a/src/caffe/greentea/cl_kernels/pooling_nd.cl +++ b/src/caffe/greentea/cl_kernels/pooling_nd.cl @@ -2,34 +2,34 @@ #include "header.cl" #endif -__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, - const int num_axes, - const __global Dtype* bottom_data, - const int channels, - __global const int* size, - __global const int* pooled_size, - __global const int* kernel_size, - __global const int* ext_kernel_size, - __global const int* stride, - __global const int* kstride, - __global const int* pad, +__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n, + const int_tp num_axes, + __global const Dtype* bottom_data, + const int_tp channels, + __global const int_tp* size, + __global const int_tp* pooled_size, + __global const int_tp* kernel_size, + __global const int_tp* ext_kernel_size, + __global const int_tp* stride, + __global const int_tp* kstride, + __global const int_tp* pad, __global Dtype* top_data, - const int use_mask, - __global int* mask, __global Dtype* top_mask) { - int d_idx[6]; - int d_start[6]; - int d_end[6]; - int d_iter[6]; - int i; - - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - int offset = 1; - int num = index; + const int_tp use_mask, + __global int_tp* mask, __global Dtype* top_mask) { + int_tp d_idx[6]; + int_tp d_start[6]; + int_tp d_end[6]; + int_tp d_iter[6]; + int_tp i; + + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + int_tp offset = 1; + int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = index % pooled_size[i]; d_start[i] = d_idx[i] * stride[i] - pad[i]; d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); - d_start[i] = max(d_start[i], 0); + d_start[i] = max(d_start[i], 0L); num /= pooled_size[i]; offset *= size[i]; d_iter[i] = d_start[i]; @@ -44,18 +44,18 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, return; } } - int chan = num % channels; + int_tp chan = num % channels; num /= channels; offset *= (num * channels + chan); Dtype maxval = -FLT_MAX; - int maxidx = -1; - int final_offset = 0; + int_tp maxidx = -1; + int_tp final_offset = 0; bool incremented; do { final_offset = offset; - int size_prod = 1; + int_tp size_prod = 1; for (i = num_axes - 1; i >= 0; --i) { final_offset += d_iter[i] * size_prod; size_prod *= size[i]; @@ -88,32 +88,32 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int n, } -__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, - const int num_axes, - const __global Dtype* top_diff, - const int use_mask, - __global const int* mask, +__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n, + const int_tp num_axes, + __global const Dtype* top_diff, + const int_tp use_mask, + __global const int_tp* mask, __global const Dtype* top_mask, - const int channels, - __global const int* size, - __global const int* pooled_size, - __global const int* kernel_size, - __global const int* ext_kernel_size, - __global const int* stride, - __global const int* kstride, - __global const int* pad, + const int_tp channels, + __global const int_tp* size, + __global const int_tp* pooled_size, + __global const int_tp* kernel_size, + __global const int_tp* ext_kernel_size, + __global const int_tp* stride, + __global const int_tp* kstride, + __global const int_tp* pad, __global Dtype* bottom_diff) { - int d_idx[6]; - int d_start[6]; - int d_end[6]; - int d_iter[6]; - int i; + int_tp d_idx[6]; + int_tp d_start[6]; + int_tp d_end[6]; + int_tp d_iter[6]; + int_tp i; - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { // find out the local index // find out the local offset - int offset = 1; - int num = index; + int_tp offset = 1; + int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = num % size[i]; d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? @@ -130,20 +130,20 @@ __kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int n, return; } } - int chan = num % channels; + int_tp chan = num % channels; num /= channels; offset *= (num * channels + chan); Dtype gradient = 0; - int final_offset = 0; - int im_offset = 0; + int_tp final_offset = 0; + int_tp im_offset = 0; bool incremented; do { final_offset = offset; im_offset = 0; - int size_prod = 1; - int pooled_size_prod = 1; + int_tp size_prod = 1; + int_tp pooled_size_prod = 1; for (i = num_axes - 1; i >= 0; --i) { final_offset += d_iter[i] * pooled_size_prod; im_offset += d_idx[i] * size_prod; diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index da98af58206..288e6ba5fa6 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -2,46 +2,46 @@ #include "header.cl" #endif -__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int nthreads, +__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads, __global Dtype* bottom_data, - const int num, - const int channels, - const int height, - const int width, - const int pooled_height, - const int pooled_width, - const int kernel_h, - const int kernel_w, - const int ext_kernel_h, - const int ext_kernel_w, - const int stride_h, - const int stride_w, - const int kstride_h, - const int kstride_w, - const int pad_h, - const int pad_w, + const int_tp num, + const int_tp channels, + const int_tp height, + const int_tp width, + const int_tp pooled_height, + const int_tp pooled_width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp ext_kernel_h, + const int_tp ext_kernel_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, + const int_tp pad_h, + const int_tp pad_w, __global Dtype* top_data, - const int use_mask, - __global int* mask, + const int_tp use_mask, + __global int_tp* mask, __global Dtype* top_mask) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height); - int wend = min(wstart + ext_kernel_w, width); - hstart = max(hstart, (int) 0); - wstart = max(wstart, (int) 0); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min(hstart + ext_kernel_h, height); + int_tp wend = min(wstart + ext_kernel_w, width); + hstart = max(hstart, (int_tp) 0); + wstart = max(wstart, (int_tp) 0); Dtype maxval = -FLT_MAX; - int maxidx = -1; + int_tp maxidx = -1; __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { if (bottom_data_ptr[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data_ptr[maxidx]; @@ -58,54 +58,54 @@ __global Dtype* bottom_data, } __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( - const int nthreads, __global const Dtype* top_diff, const int use_mask, - __global const int* mask, __global const Dtype* top_mask, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, const int kstride_h, - const int kstride_w, const int pad_h, const int pad_w, + const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask, + __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w, __global Dtype* bottom_diff) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - __global const int* mask_ptr = mask; + __global const int_tp* mask_ptr = mask; __global const Dtype* top_diff_ptr = top_diff; // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; + int_tp w = index % width; + int_tp h = (index / width) % height; + int_tp c = (index / width / height) % channels; + int_tp n = index / width / height / channels; - int pooled_height_1 = pooled_height - 1; - int pooled_width_1 = pooled_width - 1; - int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; - int phend = + int_tp pooled_height_1 = pooled_height - 1; + int_tp pooled_width_1 = pooled_width - 1; + int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int_tp phend = (h >= pooled_height) ? pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; - int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; - int pwend = + int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int_tp pwend = (w >= pooled_width) ? pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; Dtype gradient = 0; - int offset = (n * channels + c) * pooled_height * pooled_width; + int_tp offset = (n * channels + c) * pooled_height * pooled_width; top_diff_ptr += offset; if (use_mask == 1) { mask_ptr += offset; - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + for (int_tp ph = phstart; ph <= phend; ph += kstride_h) { + for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) { if (mask_ptr[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_ptr[ph * pooled_width + pw]; } } } } else { - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + for (int_tp ph = phstart; ph <= phend; ph += kstride_h) { + for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_ptr[ph * pooled_width + pw]; } @@ -117,35 +117,35 @@ __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( } __kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( - const int nthreads, __global const Dtype* bottom_data, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, const int kstride_h, - const int kstride_w, const int pad_h, const int pad_w, + const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w, __global Dtype* top_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height + pad_h); - int wend = min(wstart + ext_kernel_w, width + pad_w); - hstart = max(hstart, 0); - wstart = max(wstart, 0); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min(hstart + ext_kernel_h, height + pad_h); + int_tp wend = min(wstart + ext_kernel_w, width + pad_w); + hstart = max(hstart, 0L); + wstart = max(wstart, 0L); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; __global const Dtype* bottom_data_ptr = bottom_data; bottom_data_ptr += (n * channels + c) * height * width; - int pool_size = 0; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + int_tp pool_size = 0; + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { aveval += bottom_data_ptr[h * width + w]; ++pool_size; } @@ -155,38 +155,38 @@ __kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( } __kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( - const int nthreads, __global const Dtype* bottom_data, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, const int kstride_h, - const int kstride_w, __global Dtype* rand_idx, + const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, __global Dtype* rand_idx, __global Dtype* top_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + ext_kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + ext_kernel_w, width); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h; + int_tp hend = min(hstart + ext_kernel_h, height); + int_tp wstart = pw * stride_w; + int_tp wend = min(wstart + ext_kernel_w, width); Dtype cumsum = 0.; __global const Dtype* bottom_data_ptr = bottom_data; bottom_data_ptr += (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { cumsum += bottom_data_ptr[h * width + w]; } } float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { cumsum += bottom_data_ptr[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; @@ -200,32 +200,32 @@ __kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( } __kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( - const int nthreads, __global const Dtype* bottom_data, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, const int kstride_h, - const int kstride_w, + const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, __global Dtype* top_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + ext_kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + ext_kernel_w, width); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h; + int_tp hend = min(hstart + ext_kernel_h, height); + int_tp wstart = pw * stride_w; + int_tp wend = min(wstart + ext_kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; __global const Dtype* bottom_data_ptr = bottom_data; bottom_data_ptr += (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { cumsum += bottom_data_ptr[h * width + w]; cumvalues += bottom_data_ptr[h * width + w] * bottom_data_ptr[h * width + w]; diff --git a/src/caffe/greentea/cl_kernels/slice.cl b/src/caffe/greentea/cl_kernels/slice.cl index e9fa61e3fe0..5bb8b2f97d0 100644 --- a/src/caffe/greentea/cl_kernels/slice.cl +++ b/src/caffe/greentea/cl_kernels/slice.cl @@ -2,20 +2,20 @@ #include "header.cl" #endif -__kernel void TEMPLATE(slice,Dtype)(const int nthreads, +__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads, __global const Dtype* in_data, - const int forward, const int num_slices, - const int slice_size, - const int bottom_slice_axis, - const int top_slice_axis, - const int offset_slice_axis, + const int_tp forward, const int_tp num_slices, + const int_tp slice_size, + const int_tp bottom_slice_axis, + const int_tp top_slice_axis, + const int_tp offset_slice_axis, __global Dtype* out_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - const int total_slice_size = slice_size * top_slice_axis; - const int slice_num = index / total_slice_size; - const int slice_index = index % total_slice_size; - const int bottom_index = slice_index + const int_tp total_slice_size = slice_size * top_slice_axis; + const int_tp slice_num = index / total_slice_size; + const int_tp slice_index = index % total_slice_size; + const int_tp bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; if (forward == 1) { out_data[index] = in_data[bottom_index]; diff --git a/src/caffe/greentea/cl_kernels/softmax_loss.cl b/src/caffe/greentea/cl_kernels/softmax_loss.cl index fd027d3a2f6..100fd171644 100644 --- a/src/caffe/greentea/cl_kernels/softmax_loss.cl +++ b/src/caffe/greentea/cl_kernels/softmax_loss.cl @@ -3,16 +3,16 @@ #endif __kernel void TEMPLATE(softmax_loss_forward,Dtype)( - int n, __global const Dtype* prob_data, __global const Dtype* label, + int_tp n, __global const Dtype* prob_data, __global const Dtype* label, __global Dtype* loss, - const int num, const int dim, const int spatial_dim, - const int has_ignore_label_, const int ignore_label_, + const int_tp num, const int_tp dim, const int_tp spatial_dim, + const int_tp has_ignore_label_, const int_tp ignore_label_, __global Dtype* counts) { - for (int index = get_global_id(0); index < n; index += get_global_size(0)) { - const int n = index / spatial_dim; - const int s = index % spatial_dim; - const int label_value = (int) (label[n * spatial_dim + s]); + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + const int_tp n = index / spatial_dim; + const int_tp s = index % spatial_dim; + const int_tp label_value = (int_tp) (label[n * spatial_dim + s]); if (has_ignore_label_ == 1 && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; @@ -25,28 +25,28 @@ __kernel void TEMPLATE(softmax_loss_forward,Dtype)( } } -__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int nthreads, +__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads, __global const Dtype* top, __global const Dtype* label, __global Dtype* bottom_diff, - const int num, - const int dim, - const int spatial_dim, - const int has_ignore_label_, - const int ignore_label_, + const int_tp num, + const int_tp dim, + const int_tp spatial_dim, + const int_tp has_ignore_label_, + const int_tp ignore_label_, __global Dtype* counts) { - const int channels = dim / spatial_dim; + const int_tp channels = dim / spatial_dim; - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { { - const int n = index / spatial_dim; - const int s = index % spatial_dim; - const int label_value = (int) (label[n * spatial_dim + s]); + const int_tp n = index / spatial_dim; + const int_tp s = index % spatial_dim; + const int_tp label_value = (int_tp) (label[n * spatial_dim + s]); if (has_ignore_label_ == 1 && label_value == ignore_label_) { - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; diff --git a/src/caffe/greentea/cl_kernels/tile.cl b/src/caffe/greentea/cl_kernels/tile.cl index 0332503f6fe..a484efbd51f 100644 --- a/src/caffe/greentea/cl_kernels/tile.cl +++ b/src/caffe/greentea/cl_kernels/tile.cl @@ -3,35 +3,35 @@ #endif -__kernel void TEMPLATE(tile,Dtype)(const int nthreads, __global const Dtype* bottom_data, - const int tile_size, const int num_tiles, - const int bottom_tile_axis, +__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data, + const int_tp tile_size, const int_tp num_tiles, + const int_tp bottom_tile_axis, __global Dtype* top_data) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - const int d = index % tile_size; - const int b = (index / tile_size / num_tiles) % bottom_tile_axis; - const int n = index / tile_size / num_tiles / bottom_tile_axis; - const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + const int_tp d = index % tile_size; + const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int_tp n = index / tile_size / num_tiles / bottom_tile_axis; + const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d; top_data[index] = bottom_data[bottom_index]; } } -__kernel void TEMPLATE(tile_backward,Dtype)(const int nthreads, +__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads, __global const Dtype* top_diff, - const int tile_size, - const int num_tiles, - const int bottom_tile_axis, + const int_tp tile_size, + const int_tp num_tiles, + const int_tp bottom_tile_axis, __global Dtype* bottom_diff) { - for (int index = get_global_id(0); index < nthreads; + for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - const int d = index % tile_size; - const int b = (index / tile_size) % bottom_tile_axis; - const int n = index / tile_size / bottom_tile_axis; + const int_tp d = index % tile_size; + const int_tp b = (index / tile_size) % bottom_tile_axis; + const int_tp n = index / tile_size / bottom_tile_axis; bottom_diff[index] = 0; - int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; - for (int t = 0; t < num_tiles; ++t) { + int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int_tp t = 0; t < num_tiles; ++t) { bottom_diff[index] += top_diff[top_index]; top_index += bottom_tile_axis * tile_size; } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 73aab5c7b1c..ed74f6ac9fd 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -13,18 +13,18 @@ namespace caffe { template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_offset, const int channels, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, + const int_tp data_offset, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, cl_mem data_col) { - int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; - int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; - int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; - int num_kernels = channels * height_col * width_col; + int_tp ext_kernel_h = (kernel_h - 1) * kstride_h + 1; + int_tp ext_kernel_w = (kernel_w - 1) * kstride_w + 1; + int_tp height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; + int_tp num_kernels = channels * height_col * width_col; viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("im2col_sk")); @@ -41,51 +41,51 @@ void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_offset, - const int channels, - const int height, const int width, - const int kernel_h, - const int kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, - const int kstride_h, - const int kstride_w, + const int_tp data_offset, + const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, cl_mem data_col); template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_offset, - const int channels, - const int height, const int width, - const int kernel_h, - const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, - const int stride_w, - const int kstride_h, - const int kstride_w, + const int_tp data_offset, + const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, cl_mem data_col); template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int channels, const int height, - const int width, const int patch_h, - const int patch_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - cl_mem data_im, const int data_offset) { + const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, + cl_mem data_im, const int_tp data_offset) { if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) { LOG(FATAL)<< "stride greater than 1 or pad greater than 0" << " not tested in col2im_sk_gpu()."; } - int ext_patch_h = (patch_h - 1) * kstride_h + 1; - int ext_patch_w = (patch_w - 1) * kstride_w + 1; - int height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; - int num_kernels = channels * height * width; + int_tp ext_patch_h = (patch_h - 1) * kstride_h + 1; + int_tp ext_patch_w = (patch_w - 1) * kstride_w + 1; + int_tp height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; + int_tp num_kernels = channels * height * width; viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("col2im_sk")); @@ -101,44 +101,44 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int channels, - const int height, const int width, - const int patch_h, - const int patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, - const int kstride_h, - const int kstride_w, cl_mem data_im, - const int data_offset); + const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, cl_mem data_im, + const int_tp data_offset); template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int channels, - const int height, const int width, - const int patch_h, - const int patch_w, const int pad_h, - const int pad_w, - const int stride_h, - const int stride_w, - const int kstride_h, - const int kstride_w, + const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, cl_mem data_im, - const int data_offset); + const int_tp data_offset); template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_im_off, const int channels, - const int height, const int width, const int kernel_h, - const int kernel_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - cl_mem data_col, const int data_col_off) { + const int_tp data_im_off, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + cl_mem data_col, const int_tp data_col_off) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. - int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; - int num_kernels = channels * height_col * width_col; + int_tp height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; + int_tp num_kernels = channels * height_col * width_col; viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("im2col")); @@ -152,36 +152,36 @@ void greentea_im2col_gpu(viennacl::ocl::program *prog, template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_im_off, - const int channels, const int height, - const int width, const int kernel_h, - const int kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, cl_mem data_col, - const int data_col_off); + const int_tp data_im_off, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, cl_mem data_col, + const int_tp data_col_off); template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, - const int data_im_off, - const int channels, const int height, - const int width, const int kernel_h, - const int kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, cl_mem data_col, - const int data_col_off); + const int_tp data_im_off, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, cl_mem data_col, + const int_tp data_col_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int data_col_off, const int channels, - const int height, const int width, const int patch_h, - const int patch_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, cl_mem data_im, - const int data_im_off) { - int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; - int num_kernels = channels * height * width; + const int_tp data_col_off, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, cl_mem data_im, + const int_tp data_im_off) { + int_tp height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; + int_tp num_kernels = channels * height * width; viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("col2im")); @@ -196,33 +196,33 @@ void greentea_col2im_gpu(viennacl::ocl::program *prog, template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int data_col_off, - const int channels, const int height, - const int width, const int patch_h, - const int patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, cl_mem data_im, - const int data_im_off); + const int_tp data_col_off, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, cl_mem data_im, + const int_tp data_im_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int data_col_off, - const int channels, const int height, - const int width, const int patch_h, - const int patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, cl_mem data_im, - const int data_im_off); + const int_tp data_col_off, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, cl_mem data_im, + const int_tp data_im_off); template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, const int num_spatial_axes, - const int channel_axis, - const int num_kernels, + const int_tp data_off, const int_tp num_spatial_axes, + const int_tp channel_axis, + const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem data_col, int data_col_off) { + cl_mem data_col, int_tp data_col_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("im2col_nd")); @@ -238,34 +238,34 @@ void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, // Explicit instantiation template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, - cl_mem data_im, const int data_off, - const int num_spatial_axes, - const int channel_axis, - const int num_kernels, + cl_mem data_im, const int_tp data_off, + const int_tp num_spatial_axes, + const int_tp channel_axis, + const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_col, - int data_col_off); + int_tp data_col_off); template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, - cl_mem data_im, const int data_off, - const int num_spatial_axes, - const int channel_axis, - const int num_kernels, + cl_mem data_im, const int_tp data_off, + const int_tp num_spatial_axes, + const int_tp channel_axis, + const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_col, - int data_col_off); + int_tp data_col_off); template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, const int num_spatial_axes, - const int channel_axis, const int im_size, + const int_tp data_col_off, const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem data_im, int data_off) { + cl_mem data_im, int_tp data_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("col2im_nd")); @@ -282,35 +282,35 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, - const int num_spatial_axes, - const int channel_axis, - const int im_size, cl_mem im_shape, + const int_tp data_col_off, + const int_tp num_spatial_axes, + const int_tp channel_axis, + const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_im, - int data_off); + int_tp data_off); template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, - const int num_spatial_axes, - const int channel_axis, - const int im_size, cl_mem im_shape, + const int_tp data_col_off, + const int_tp num_spatial_axes, + const int_tp channel_axis, + const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_im, - int data_off); + int_tp data_off); template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, const int num_spatial_axes, - const int num_kernels, cl_mem im_shape, + const int_tp data_off, const int_tp num_spatial_axes, + const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, cl_mem data_col, - int data_col_off) { + int_tp data_col_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("im2col_ndsk")); @@ -327,36 +327,36 @@ void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, - const int num_spatial_axes, - const int num_kernels, + const int_tp data_off, + const int_tp num_spatial_axes, + const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, cl_mem data_col, - int data_col_off); + int_tp data_col_off); template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int data_off, - const int num_spatial_axes, - const int num_kernels, + const int_tp data_off, + const int_tp num_spatial_axes, + const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, cl_mem data_col, - int data_col_off); + int_tp data_col_off); template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, - const int num_spatial_axes, const int im_size, + const int_tp data_col_off, + const int_tp num_spatial_axes, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_im, int data_off) { + cl_mem kstride, cl_mem data_im, int_tp data_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("col2im_ndsk")); @@ -373,25 +373,25 @@ void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, - const int num_spatial_axes, - const int im_size, + const int_tp data_col_off, + const int_tp num_spatial_axes, + const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, - cl_mem data_im, int data_off); + cl_mem data_im, int_tp data_off); template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int data_col_off, - const int num_spatial_axes, - const int im_size, + const int_tp data_col_off, + const int_tp num_spatial_axes, + const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, - cl_mem data_im, int data_off); + cl_mem data_im, int_tp data_off); } // namespace caffe #endif diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index d7b000ebca2..89b65e7c4ad 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -59,26 +59,26 @@ namespace caffe { -void greentea_memset(const int ctx_id, const size_t N, const int alpha, - cl_mem X, const int offX) { +void greentea_memset(const int_tp ctx_id, const uint_tp N, const int_tp alpha, + cl_mem X, const int_tp offX) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); // OpenCL Version >= 1.2 approach - // clEnqueueFillBuffer(ctx.get_queue().handle().get(), X, &alpha, sizeof(int), + // clEnqueueFillBuffer(ctx.get_queue().handle().get(), X, &alpha, sizeof(int_tp), // offX, N, 0, NULL, NULL); // OpenCL Version < 1.2 fallback typedef float Dtype; viennacl::ocl::kernel &oclk_fill = program.get_kernel( CL_KERNEL_SELECT("fillbuffer")); viennacl::ocl::enqueue( - oclk_fill(static_cast(N), static_cast(alpha), + oclk_fill(static_cast(N), static_cast(alpha), WrapHandle(X, &ctx), offX), ctx.get_queue()); } // Copy from OpenCL buffer to main memory -void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, +void greentea_gpu_memcpy(const uint_tp N, const cl_mem X, const int_tp offX, void *Y, viennacl::ocl::context *ctx) { if (Y != NULL) { clEnqueueReadBuffer(ctx->get_queue().handle().get(), X, CL_TRUE, offX, N, Y, @@ -89,8 +89,8 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, } // Copy from main memory to OpenCL buffer -void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, - const int offY, viennacl::ocl::context *ctx) { +void greentea_gpu_memcpy(const uint_tp N, const void* X, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx) { if (X != NULL) { clEnqueueWriteBuffer(ctx->get_queue().handle().get(), Y, CL_TRUE, @@ -99,8 +99,8 @@ void greentea_gpu_memcpy(const size_t N, const void* X, cl_mem Y, } // Copy from OpenCL to OpenCL buffer -void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, - cl_mem Y, const int offY, +void greentea_gpu_memcpy(const uint_tp N, const cl_mem X, const int_tp offX, + cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx) { clEnqueueCopyBuffer(ctx->get_queue().handle().get(), X, Y, offX, offY, N, 0, NULL, @@ -108,65 +108,65 @@ void greentea_gpu_memcpy(const size_t N, const cl_mem X, const int offX, } template -void greentea_copy(const int N, const cl_mem X, const int offX, Dtype* Y, +void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, Dtype* Y, viennacl::ocl::context *ctx) { greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX * sizeof(Dtype), Y, ctx); } template -void greentea_copy(const int N, const Dtype* X, cl_mem Y, const int offY, +void greentea_copy(const int_tp N, const Dtype* X, cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx) { greentea_gpu_memcpy(sizeof(Dtype) * N, X, Y, offY * sizeof(Dtype), ctx); } // Copy from OpenCL buffer to OpenCL buffer template -void greentea_copy(const int N, const cl_mem X, const int offX, cl_mem Y, - const int offY, viennacl::ocl::context *ctx) { +void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx) { greentea_gpu_memcpy(sizeof(Dtype) * N, X, offX * sizeof(Dtype), Y, offY * sizeof(Dtype), ctx); } // Explicit instantiations -template void greentea_copy(const int N, const cl_mem X, const int offX, - int* Y, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const cl_mem X, - const int offX, unsigned int* Y, +template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, + int_tp* Y, viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, uint_tp* Y, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, +template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, float* Y, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, +template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, double* Y, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const int* X, cl_mem Y, - const int offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const unsigned int* X, - cl_mem Y, const int offY, +template void greentea_copy(const int_tp N, const int_tp* X, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const uint_tp* X, + cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const float* X, cl_mem Y, - const int offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const double* X, cl_mem Y, - const int offY, +template void greentea_copy(const int_tp N, const float* X, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const double* X, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, - cl_mem Y, const int offY, +template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, + cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const cl_mem X, - const int offX, cl_mem Y, - const int offY, +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, - cl_mem Y, const int offY, +template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, + cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int N, const cl_mem X, const int offX, - cl_mem Y, const int offY, +template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, + cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx); template -void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const Dtype alpha, const cl_mem A, - const int offA, const cl_mem B, const int offB, - const Dtype beta, cl_mem C, const int offC) { +void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int_tp M, const int_tp N, + const int_tp K, const Dtype alpha, const cl_mem A, + const int_tp offA, const cl_mem B, const int_tp offB, + const Dtype beta, cl_mem C, const int_tp offC) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -194,35 +194,35 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), C, Cptr, 0, NULL, NULL); } else { - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; - int ldc = N; + int_tp lda = (TransA == CblasNoTrans) ? K : M; + int_tp ldb = (TransB == CblasNoTrans) ? N : K; + int_tp ldc = N; #ifndef USE_CLBLAS - typedef typename viennacl::matrix_base::size_type size_type; - typedef typename viennacl::matrix_base::size_type difference_type; + typedef typename viennacl::matrix_base::uint_tpype uint_tpype; + typedef typename viennacl::matrix_base::uint_tpype difference_type; - size_type A_size1 = static_cast((TransA == CblasTrans) ? K : M); - size_type A_size2 = static_cast((TransA == CblasTrans) ? M : K); + uint_tpype A_size1 = static_cast((TransA == CblasTrans) ? K : M); + uint_tpype A_size2 = static_cast((TransA == CblasTrans) ? M : K); - size_type B_size1 = static_cast((TransB == CblasTrans) ? N : K); - size_type B_size2 = static_cast((TransB == CblasTrans) ? K : N); + uint_tpype B_size1 = static_cast((TransB == CblasTrans) ? N : K); + uint_tpype B_size2 = static_cast((TransB == CblasTrans) ? K : N); viennacl::matrix_base matA(A, ctx, - A_size1, size_type(0), difference_type(1), size_type(M), - A_size2, size_type(offA), difference_type(1), size_type(lda) + A_size1, uint_tpype(0), difference_type(1), uint_tpype(M), + A_size2, uint_tpype(offA), difference_type(1), uint_tpype(lda) VCL_ROW_MAJOR); viennacl::matrix_base matB(B, ctx, - B_size1, size_type(0), difference_type(1), size_type(K), - B_size2, size_type(offB), difference_type(1), size_type(ldb) + B_size1, uint_tpype(0), difference_type(1), uint_tpype(K), + B_size2, uint_tpype(offB), difference_type(1), uint_tpype(ldb) VCL_ROW_MAJOR); viennacl::matrix_base matC(C, ctx, - size_type(M), size_type(0), difference_type(1), size_type(M), - size_type(N), size_type(offC), difference_type(1), - size_type(ldc) VCL_ROW_MAJOR); + uint_tpype(M), uint_tpype(0), difference_type(1), uint_tpype(M), + uint_tpype(N), uint_tpype(offC), difference_type(1), + uint_tpype(ldc) VCL_ROW_MAJOR); if (TransA == CblasTrans && TransB == CblasTrans) viennacl::linalg::prod_impl(viennacl::trans(matA), viennacl::trans(matB), @@ -261,29 +261,29 @@ void greentea_gpu_gemm(const int ctx_id, const CBLAS_TRANSPOSE TransA, } } -template void greentea_gpu_gemm(const int ctx_id, +template void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int M, const int N, const int K, + const int_tp M, const int_tp N, const int_tp K, const float alpha, const cl_mem A, - const int offA, const cl_mem B, - const int offB, const float beta, - cl_mem C, const int offC); -template void greentea_gpu_gemm(const int ctx_id, + const int_tp offA, const cl_mem B, + const int_tp offB, const float beta, + cl_mem C, const int_tp offC); +template void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int M, const int N, const int K, + const int_tp M, const int_tp N, const int_tp K, const double alpha, const cl_mem A, - const int offA, const cl_mem B, - const int offB, const double beta, - cl_mem C, const int offC); + const int_tp offA, const cl_mem B, + const int_tp offB, const double beta, + cl_mem C, const int_tp offC); template -void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, - const int M, const int N, const Dtype alpha, - const cl_mem A, const int offA, const cl_mem x, - const int offx, const Dtype beta, cl_mem y, - const int offy) { +void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, + const int_tp M, const int_tp N, const Dtype alpha, + const cl_mem A, const int_tp offA, const cl_mem x, + const int_tp offx, const Dtype beta, cl_mem y, + const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -315,20 +315,20 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::size_type size_type; - typedef typename viennacl::vector_base::size_type difference_type; + typedef typename viennacl::vector_base::uint_tpype uint_tpype; + typedef typename viennacl::vector_base::uint_tpype difference_type; viennacl::vector_base v1(x, - size_type((TransA == CblasTrans) ? M : N), - size_type(offx), difference_type(1), ctx); + uint_tpype((TransA == CblasTrans) ? M : N), + uint_tpype(offx), difference_type(1), ctx); viennacl::vector_base v2(y, - size_type((TransA == CblasTrans) ? N : M), - size_type(offy), difference_type(1), ctx); + uint_tpype((TransA == CblasTrans) ? N : M), + uint_tpype(offy), difference_type(1), ctx); viennacl::matrix_base mat(A, ctx, - size_type(M), size_type(0), - difference_type(1), size_type(M), - size_type(N), size_type(offA), - difference_type(1), size_type(N) + uint_tpype(M), uint_tpype(0), + difference_type(1), uint_tpype(M), + uint_tpype(N), uint_tpype(offA), + difference_type(1), uint_tpype(N) VCL_ROW_MAJOR); v2 *= beta; if (TransA == CblasTrans) @@ -355,25 +355,25 @@ void greentea_gpu_gemv(const int ctx_id, const CBLAS_TRANSPOSE TransA, } } -template void greentea_gpu_gemv(const int ctx_id, +template void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, - const int M, const int N, + const int_tp M, const int_tp N, const float alpha, const cl_mem A, - const int offA, const cl_mem x, - const int offx, const float beta, - cl_mem y, const int offy); -template void greentea_gpu_gemv(const int ctx_id, + const int_tp offA, const cl_mem x, + const int_tp offx, const float beta, + cl_mem y, const int_tp offy); +template void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, - const int M, const int N, + const int_tp M, const int_tp N, const double alpha, const cl_mem A, - const int offA, const cl_mem x, - const int offx, const double beta, - cl_mem y, const int offy); + const int_tp offA, const cl_mem x, + const int_tp offx, const double beta, + cl_mem y, const int_tp offy); template -void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, - const cl_mem X, const int offX, cl_mem Y, - const int offY) { +void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, + const cl_mem X, const int_tp offX, cl_mem Y, + const int_tp offY) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -395,12 +395,12 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::size_type size_type; - typedef typename viennacl::vector_base::size_type difference_type; + typedef typename viennacl::vector_base::uint_tpype uint_tpype; + typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(X, size_type(N), size_type(offX), + viennacl::vector_base v1(X, uint_tpype(N), uint_tpype(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, size_type(N), size_type(offY), + viennacl::vector_base v2(Y, uint_tpype(N), uint_tpype(offY), difference_type(1), ctx); v2 += alpha * v1; @@ -421,19 +421,19 @@ void greentea_gpu_axpy(const int ctx_id, const int N, const Dtype alpha, } } -template void greentea_gpu_axpy(const int ctx_id, const int N, +template void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const float alpha, const cl_mem X, - const int offX, cl_mem Y, - const int offY); -template void greentea_gpu_axpy(const int ctx_id, const int N, + const int_tp offX, cl_mem Y, + const int_tp offY); +template void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const double alpha, const cl_mem X, - const int offX, cl_mem Y, - const int offY); + const int_tp offX, cl_mem Y, + const int_tp offY); template -void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy) { +void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -444,19 +444,19 @@ void greentea_gpu_mul(const int ctx_id, const int N, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_mul(const int ctx_id, const int N, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); -template void greentea_gpu_mul(const int ctx_id, const int N, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); +template void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); +template void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_div(const int ctx_id, const int N, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy) { +void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -467,18 +467,18 @@ void greentea_gpu_div(const int ctx_id, const int N, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_div(const int ctx_id, const int N, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); -template void greentea_gpu_div(const int ctx_id, const int N, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); +template void greentea_gpu_div(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); +template void greentea_gpu_div(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, - cl_mem x, int offx) { +void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, + cl_mem x, int_tp offx) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -495,11 +495,11 @@ void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::size_type size_type; - typedef typename viennacl::vector_base::size_type difference_type; + typedef typename viennacl::vector_base::uint_tpype uint_tpype; + typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(x, size_type(N), - size_type(offx), difference_type(1), ctx); + viennacl::vector_base v1(x, uint_tpype(N), + uint_tpype(offx), difference_type(1), ctx); v1 *= alpha; @@ -517,34 +517,34 @@ void greentea_gpu_scal(const int ctx_id, const int N, const Dtype alpha, } } -template void greentea_gpu_scal(const int ctx_id, const int N, +template void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const float alpha, cl_mem x, - const int offx); -template void greentea_gpu_scal(const int ctx_id, const int N, + const int_tp offx); +template void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const double alpha, cl_mem x, - const int offx); + const int_tp offx); template -void greentea_gpu_axpby(const int ctx_id, const int N, const Dtype alpha, - const cl_mem X, const int offX, const Dtype beta, - cl_mem Y, const int offY) { +void greentea_gpu_axpby(const int_tp ctx_id, const int_tp N, const Dtype alpha, + const cl_mem X, const int_tp offX, const Dtype beta, + cl_mem Y, const int_tp offY) { greentea_gpu_scal(ctx_id, N, beta, Y, offY); greentea_gpu_axpy(ctx_id, N, alpha, X, offX, Y, offY); } -template void greentea_gpu_axpby(const int ctx_id, const int N, +template void greentea_gpu_axpby(const int_tp ctx_id, const int_tp N, const float alpha, const cl_mem X, - const int offX, const float beta, - cl_mem Y, const int offY); + const int_tp offX, const float beta, + cl_mem Y, const int_tp offY); -template void greentea_gpu_axpby(const int ctx_id, const int N, +template void greentea_gpu_axpby(const int_tp ctx_id, const int_tp N, const double alpha, const cl_mem X, - const int offX, const double beta, - cl_mem Y, const int offY); + const int_tp offX, const double beta, + cl_mem Y, const int_tp offY); template -void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, - const int offX, const cl_mem Y, const int offY, +void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, const cl_mem X, + const int_tp offX, const cl_mem Y, const int_tp offY, Dtype* out) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); @@ -567,13 +567,13 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::size_type size_type; - typedef typename viennacl::vector_base::size_type difference_type; + typedef typename viennacl::vector_base::uint_tpype uint_tpype; + typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(X, size_type(n), - size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, size_type(n), - size_type(offY), difference_type(1), ctx); + viennacl::vector_base v1(X, uint_tpype(n), + uint_tpype(offX), difference_type(1), ctx); + viennacl::vector_base v2(Y, uint_tpype(n), + uint_tpype(offY), difference_type(1), ctx); *out = viennacl::linalg::inner_prod(v1, v2); @@ -605,18 +605,18 @@ void greentea_gpu_dot(const int ctx_id, const int n, const cl_mem X, } } -template void greentea_gpu_dot(const int ctx_id, const int n, - const cl_mem X, const int offX, - const cl_mem Y, const int offY, +template void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, + const cl_mem X, const int_tp offX, + const cl_mem Y, const int_tp offY, float* out); -template void greentea_gpu_dot(const int ctx_id, const int n, - const cl_mem X, const int offX, - const cl_mem Y, const int offY, +template void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, + const cl_mem X, const int_tp offX, + const cl_mem Y, const int_tp offY, double* out); template -void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, - const int offX, Dtype* Y) { +void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, const cl_mem X, + const int_tp offX, Dtype* Y) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -632,11 +632,11 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::size_type size_type; - typedef typename viennacl::vector_base::size_type difference_type; + typedef typename viennacl::vector_base::uint_tpype uint_tpype; + typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(X, size_type(n), - size_type(offX), difference_type(1), ctx); + viennacl::vector_base v1(X, uint_tpype(n), + uint_tpype(offX), difference_type(1), ctx); *Y = viennacl::linalg::norm_1(v1); @@ -667,17 +667,17 @@ void greentea_gpu_asum(const int ctx_id, const int n, const cl_mem X, } } -template void greentea_gpu_asum(const int ctx_id, const int n, - const cl_mem X, const int offX, +template void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, + const cl_mem X, const int_tp offX, float* Y); -template void greentea_gpu_asum(const int ctx_id, const int n, - const cl_mem X, const int offX, +template void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, + const cl_mem X, const int_tp offX, double* Y); template -void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, - const cl_mem X, const int offX, cl_mem Y, - const int offY) { +void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const Dtype alpha, + const cl_mem X, const int_tp offX, cl_mem Y, + const int_tp offY) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { @@ -699,13 +699,13 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::size_type size_type; - typedef typename viennacl::vector_base::size_type difference_type; + typedef typename viennacl::vector_base::uint_tpype uint_tpype; + typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(X, size_type(n), - size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, size_type(n), - size_type(offY), difference_type(1), ctx); + viennacl::vector_base v1(X, uint_tpype(n), + uint_tpype(offX), difference_type(1), ctx); + viennacl::vector_base v2(Y, uint_tpype(n), + uint_tpype(offY), difference_type(1), ctx); v2 = v1 * alpha; @@ -729,19 +729,19 @@ void greentea_gpu_scale(const int ctx_id, const int n, const Dtype alpha, } } -template void greentea_gpu_scale(const int ctx_id, const int n, +template void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const float alpha, const cl_mem X, - const int offX, cl_mem Y, - const int offY); + const int_tp offX, cl_mem Y, + const int_tp offY); -template void greentea_gpu_scale(const int ctx_id, const int n, +template void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const double alpha, const cl_mem X, - const int offX, cl_mem Y, - const int offY); + const int_tp offX, cl_mem Y, + const int_tp offY); template -void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, - cl_mem Y, const int offY) { +void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const Dtype alpha, + cl_mem Y, const int_tp offY) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); // OpenCL Version >= 1.2 approach @@ -756,18 +756,18 @@ void greentea_gpu_set(const int ctx_id, const int N, const Dtype alpha, ctx.get_queue()); } -template void greentea_gpu_set(const int ctx_id, const int N, - const int alpha, cl_mem Y, const int offY); -template void greentea_gpu_set(const int ctx_id, const int N, +template void greentea_gpu_set(const int_tp ctx_id, const int_tp N, + const int_tp alpha, cl_mem Y, const int_tp offY); +template void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const float alpha, cl_mem Y, - const int offY); -template void greentea_gpu_set(const int ctx_id, const int N, + const int_tp offY); +template void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const double alpha, cl_mem Y, - const int offY); + const int_tp offY); template -void greentea_gpu_add_scalar(const int ctx_id, const int N, const Dtype alpha, - cl_mem Y, const int offY) { +void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const Dtype alpha, + cl_mem Y, const int_tp offY) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -777,17 +777,17 @@ void greentea_gpu_add_scalar(const int ctx_id, const int N, const Dtype alpha, ctx.get_queue()); } -template void greentea_gpu_add_scalar(const int ctx_id, const int N, +template void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const float alpha, cl_mem Y, - const int offY); -template void greentea_gpu_add_scalar(const int ctx_id, const int N, + const int_tp offY); +template void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const double alpha, cl_mem Y, - const int offY); + const int_tp offY); template -void greentea_gpu_add(const int ctx_id, const int n, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy) { +void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -798,19 +798,19 @@ void greentea_gpu_add(const int ctx_id, const int n, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_add(const int ctx_id, const int n, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); -template void greentea_gpu_add(const int ctx_id, const int n, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); +template void greentea_gpu_add(const int_tp ctx_id, const int_tp n, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); +template void greentea_gpu_add(const int_tp ctx_id, const int_tp n, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_sub(const int ctx_id, const int n, const cl_mem a, - const int offa, const cl_mem b, const int offb, cl_mem y, - const int offy) { +void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, + const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -821,18 +821,18 @@ void greentea_gpu_sub(const int ctx_id, const int n, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_sub(const int ctx_id, const int n, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); -template void greentea_gpu_sub(const int ctx_id, const int n, - const cl_mem a, const int offa, - const cl_mem b, const int offb, cl_mem y, - const int offy); +template void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); +template void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, + const cl_mem a, const int_tp offa, + const cl_mem b, const int_tp offb, cl_mem y, + const int_tp offy); template -void greentea_gpu_abs(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, const int offy) { +void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -842,16 +842,16 @@ void greentea_gpu_abs(const int ctx_id, const int N, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_abs(const int ctx_id, const int N, - const cl_mem a, const int offa, cl_mem y, - const int offy); -template void greentea_gpu_abs(const int ctx_id, const int N, - const cl_mem a, const int offa, cl_mem y, - const int offy); +template void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, cl_mem y, + const int_tp offy); +template void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, cl_mem y, + const int_tp offy); template -void greentea_gpu_exp(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, const int offy) { +void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -861,17 +861,17 @@ void greentea_gpu_exp(const int ctx_id, const int N, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_exp(const int ctx_id, const int N, - const cl_mem a, const int offa, cl_mem y, - const int offy); -template void greentea_gpu_exp(const int ctx_id, const int N, - const cl_mem a, const int offa, cl_mem y, - const int offy); +template void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, cl_mem y, + const int_tp offy); +template void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, cl_mem y, + const int_tp offy); template -void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, - const int offa, const Dtype alpha, cl_mem y, - const int offy) { +void greentea_gpu_powx(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, const Dtype alpha, cl_mem y, + const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -882,18 +882,18 @@ void greentea_gpu_powx(const int ctx_id, const int N, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_powx(const int ctx_id, const int N, - const cl_mem a, const int offa, +template void greentea_gpu_powx(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, const float alpha, cl_mem y, - const int offy); -template void greentea_gpu_powx(const int ctx_id, const int N, - const cl_mem a, const int offa, + const int_tp offy); +template void greentea_gpu_powx(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, const double alpha, cl_mem y, - const int offy); + const int_tp offy); template -void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, - const int offa, cl_mem y, const int offy) { +void greentea_gpu_log(const int_tp ctx_id, const int_tp N, const cl_mem a, + const int_tp offa, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -903,16 +903,16 @@ void greentea_gpu_log(const int ctx_id, const int N, const cl_mem a, ctx.get_queue()); } -template void greentea_gpu_log(const int ctx_id, const int N, - const cl_mem a, const int offa, cl_mem y, - const int offy); -template void greentea_gpu_log(const int ctx_id, const int N, - const cl_mem a, const int offa, cl_mem y, - const int offy); +template void greentea_gpu_log(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, cl_mem y, + const int_tp offy); +template void greentea_gpu_log(const int_tp ctx_id, const int_tp N, + const cl_mem a, const int_tp offa, cl_mem y, + const int_tp offy); template -void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, - cl_mem y, const int offy) { +void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, const cl_mem x, int_tp offx, + cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -923,16 +923,16 @@ void greentea_gpu_sign(const int ctx_id, const int n, const cl_mem x, int offx, ctx.get_queue()); } -template void greentea_gpu_sign(const int ctx_id, const int n, - const cl_mem x, int offx, cl_mem y, - const int offy); -template void greentea_gpu_sign(const int ctx_id, const int n, - const cl_mem x, int offx, cl_mem y, - const int offy); +template void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, + const cl_mem x, int_tp offx, cl_mem y, + const int_tp offy); +template void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, + const cl_mem x, int_tp offx, cl_mem y, + const int_tp offy); template -void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, - int offx, cl_mem y, const int offy) { +void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, const cl_mem x, + int_tp offx, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -943,55 +943,55 @@ void greentea_gpu_sgnbit(const int ctx_id, const int n, const cl_mem x, ctx.get_queue()); } -template void greentea_gpu_sgnbit(const int ctx_id, const int n, - const cl_mem x, int offx, cl_mem y, - const int offy); -template void greentea_gpu_sgnbit(const int ctx_id, const int n, - const cl_mem x, int offx, cl_mem y, - const int offy); +template void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, + const cl_mem x, int_tp offx, cl_mem y, + const int_tp offy); +template void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, + const cl_mem x, int_tp offx, cl_mem y, + const int_tp offy); -void greentea_gpu_rng_uniform(const int ctx_id, const int n, cl_mem r, - int offr) { +void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, cl_mem r, + int_tp offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - std::vector random(n); //NOLINT + std::vector random(n); //NOLINT caffe_rng_uniform(n, &random[0]); - greentea_gpu_memcpy(sizeof(unsigned int) * n, &random[0], r, offr, &ctx); + greentea_gpu_memcpy(sizeof(uint_tp) * n, &random[0], r, offr, &ctx); } template -void greentea_gpu_rng_uniform(const int ctx_id, const int n, const Dtype a, - const Dtype b, cl_mem r, const int offr) { +void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, const Dtype a, + const Dtype b, cl_mem r, const int_tp offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); std::vector random(n); // NOLINT caffe_rng_uniform(n, a, b, &random[0]); greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, &ctx); } -template void greentea_gpu_rng_uniform(const int ctx_id, const int n, +template void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, const float a, const float b, - cl_mem r, const int offr); -template void greentea_gpu_rng_uniform(const int ctx_id, const int n, + cl_mem r, const int_tp offr); +template void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, const double a, const double b, - cl_mem r, const int offr); + cl_mem r, const int_tp offr); template -void greentea_gpu_rng_gaussian(const int ctx_id, const int n, const Dtype mu, - const Dtype sigma, cl_mem r, const int offr) { +void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, const Dtype mu, + const Dtype sigma, cl_mem r, const int_tp offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); std::vector random(n); // NOLINT caffe_rng_gaussian(n, mu, sigma, &random[0]); greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, &ctx); } -template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, +template void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, const float mu, const float sigma, cl_mem r, - const int offr); + const int_tp offr); -template void greentea_gpu_rng_gaussian(const int ctx_id, const int n, +template void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, const double mu, const double sigma, cl_mem r, - const int offr); + const int_tp offr); } // namespace caffe #endif diff --git a/src/caffe/internal_thread.cpp b/src/caffe/internal_thread.cpp index 87c4a969a9a..8622f46e711 100644 --- a/src/caffe/internal_thread.cpp +++ b/src/caffe/internal_thread.cpp @@ -24,8 +24,8 @@ void InternalThread::StartInternalThread(device* device_context) { thread_device_ = device_context; Caffe::Brew mode = Caffe::mode(); - int rand_seed = caffe_rng_rand(); - int solver_count = Caffe::solver_count(); + int_tp rand_seed = caffe_rng_rand(); + int_tp solver_count = Caffe::solver_count(); bool root_solver = Caffe::root_solver(); try { @@ -38,7 +38,7 @@ void InternalThread::StartInternalThread(device* device_context) { } void InternalThread::entry(device* device_context, Caffe::Brew mode, - int rand_seed, int solver_count, bool root_solver) { + int_tp rand_seed, int_tp solver_count, bool root_solver) { Caffe::SelectDevice(device_context); Caffe::set_mode(mode); Caffe::set_random_seed(rand_seed); diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp index 5ce28c9e2b4..f476cd01568 100644 --- a/src/caffe/layers/absval_layer.cpp +++ b/src/caffe/layers/absval_layer.cpp @@ -17,7 +17,7 @@ void AbsValLayer::LayerSetUp(const vector*>& bottom, template void AbsValLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { - const int count = top[0]->count(); + const int_tp count = top[0]->count(); Dtype* top_data = top[0]->mutable_cpu_data(); caffe_abs(count, bottom[0]->cpu_data(), top_data); } @@ -25,7 +25,7 @@ void AbsValLayer::Forward_cpu( template void AbsValLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - const int count = top[0]->count(); + const int_tp count = top[0]->count(); const Dtype* top_diff = top[0]->cpu_diff(); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->cpu_data(); diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu index 4cc5fd2219b..7a9ec151889 100644 --- a/src/caffe/layers/absval_layer.cu +++ b/src/caffe/layers/absval_layer.cu @@ -14,7 +14,7 @@ namespace caffe { template void AbsValLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - const int count = top[0]->count(); + const int_tp count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -33,7 +33,7 @@ template void AbsValLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - const int count = top[0]->count(); + const int_tp count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index e2d8d9f8a24..f292c74cee6 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -36,11 +36,11 @@ void AccuracyLayer::Reshape( << "e.g., if label axis == 1 and prediction shape is (N, C, H, W), " << "label count (number of labels) must be N*H*W, " << "with integer values in {0, 1, ..., C-1}."; - vector top_shape(0); // Accuracy is a scalar; 0 axes. + vector top_shape(0); // Accuracy is a scalar; 0 axes. top[0]->Reshape(top_shape); if (top.size() > 1) { // Per-class accuracy is a vector; 1 axes. - vector top_shape_per_class(1); + vector top_shape_per_class(1); top_shape_per_class[0] = bottom[0]->shape(label_axis_); top[1]->Reshape(top_shape_per_class); nums_buffer_.Reshape(top_shape_per_class); @@ -53,19 +53,19 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, Dtype accuracy = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); - const int dim = bottom[0]->count() / outer_num_; - const int num_labels = bottom[0]->shape(label_axis_); + const int_tp dim = bottom[0]->count() / outer_num_; + const int_tp num_labels = bottom[0]->shape(label_axis_); vector maxval(top_k_+1); - vector max_id(top_k_+1); + vector max_id(top_k_+1); if (top.size() > 1) { caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data()); caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data()); } - int count = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; ++j) { - const int label_value = - static_cast(bottom_label[i * inner_num_ + j]); + int_tp count = 0; + for (int_tp i = 0; i < outer_num_; ++i) { + for (int_tp j = 0; j < inner_num_; ++j) { + const int_tp label_value = + static_cast(bottom_label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } @@ -73,16 +73,16 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, DCHECK_GE(label_value, 0); DCHECK_LT(label_value, num_labels); // Top-k accuracy - std::vector > bottom_data_vector; - for (int k = 0; k < num_labels; ++k) { + std::vector > bottom_data_vector; + for (int_tp k = 0; k < num_labels; ++k) { bottom_data_vector.push_back(std::make_pair( bottom_data[i * dim + k * inner_num_ + j], k)); } std::partial_sort( bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, - bottom_data_vector.end(), std::greater >()); + bottom_data_vector.end(), std::greater >()); // check if true label is in top k predictions - for (int k = 0; k < top_k_; k++) { + for (int_tp k = 0; k < top_k_; k++) { if (bottom_data_vector[k].second == label_value) { ++accuracy; if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value]; @@ -96,7 +96,7 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, // LOG(INFO) << "Accuracy: " << accuracy; top[0]->mutable_cpu_data()[0] = accuracy / count; if (top.size() > 1) { - for (int i = 0; i < top[1]->count(); ++i) { + for (int_tp i = 0; i < top[1]->count(); ++i) { top[1]->mutable_cpu_data()[i] = nums_buffer_.cpu_data()[i] == 0 ? 0 : top[1]->cpu_data()[i] / nums_buffer_.cpu_data()[i]; diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp index 94e7c970ddc..8df157967c6 100644 --- a/src/caffe/layers/affinity_layer.cpp +++ b/src/caffe/layers/affinity_layer.cpp @@ -22,9 +22,9 @@ void AffinityLayer::LayerSetUp(const vector*>& bottom, offsets_.resize(bottom.size()); if (this->layer_param().has_affinity_param()) { AffinityParameter affinity_param = this->layer_param().affinity_param(); - for (int i = 0; i < - std::min(static_cast(bottom.size()), - static_cast(affinity_param.offset_size())); ++i) { + for (int_tp i = 0; i < + std::min(static_cast(bottom.size()), + static_cast(affinity_param.offset_size())); ++i) { offsets_[i] = affinity_param.offset(i); } } @@ -39,7 +39,7 @@ template void AffinityLayer::Reshape(const vector*>& bottom, const vector*>& top) { min_index_.clear(); - for (int bidx = 0; bidx < bottom.size(); ++bidx) { + for (int_tp bidx = 0; bidx < bottom.size(); ++bidx) { // 1, #edges, height, width top[bidx]->Reshape(1, 2, bottom[bidx]->height(), bottom[bidx]->width()); @@ -56,20 +56,20 @@ void AffinityLayer::Reshape(const vector*>& bottom, template void AffinityLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - for (int bidx = 0; bidx < bottom.size(); ++bidx) { + for (int_tp bidx = 0; bidx < bottom.size(); ++bidx) { const Dtype* bottom_data = bottom[bidx]->cpu_data(); Dtype* top_data = top[bidx]->mutable_cpu_data(); Dtype* min_data = min_index_[bidx]->mutable_cpu_data(); - int inner_num = bottom[bidx]->width() + int_tp inner_num = bottom[bidx]->width() * bottom[bidx]->height(); - int xmin, ymin; + int_tp xmin, ymin; // Construct affinity graph #pragma omp parallel for - for (int i = 0; i < bottom[bidx]->height() - 1; ++i) { - for (int j = 0; j < bottom[bidx]->width() - 1; ++j) { + for (int_tp i = 0; i < bottom[bidx]->height() - 1; ++i) { + for (int_tp j = 0; j < bottom[bidx]->width() - 1; ++j) { // Center Dtype p0 = bottom_data[offsets_[bidx] * inner_num + i * bottom[bidx]->width() + j]; @@ -100,7 +100,7 @@ template void AffinityLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - for (int bidx = 0; bidx < bottom.size(); ++bidx) { + for (int_tp bidx = 0; bidx < bottom.size(); ++bidx) { if (propagate_down[bidx]) { const Dtype* top_diff = top[bidx]->cpu_diff(); Dtype* bottom_diff = bottom[bidx]->mutable_cpu_diff(); @@ -108,17 +108,17 @@ void AffinityLayer::Backward_cpu(const vector*>& top, caffe_set(bottom[0]->count(), Dtype(0.0), bottom_diff); - int inner_num = bottom[bidx]->width() + int_tp inner_num = bottom[bidx]->width() * bottom[bidx]->height(); // Spread out the affinity losses to pixels - for (int i = 0; i < bottom[0]->height() - 1; ++i) { - for (int j = 0; j < bottom[0]->width() - 1; ++j) { + for (int_tp i = 0; i < bottom[0]->height() - 1; ++i) { + for (int_tp j = 0; j < bottom[0]->width() - 1; ++j) { Dtype lx = top_diff[i * bottom[0]->width() + j]; Dtype ly = top_diff[inner_num + i * bottom[0]->width() + j]; - int mx = min_data[i * bottom[0]->width() + j]; - int my = min_data[bottom[0]->width() + int_tp mx = min_data[i * bottom[0]->width() + j]; + int_tp my = min_data[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + j]; // Only propagate to min index contributor of affinity graph diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index 0c0a932dac7..29e8e363165 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -33,7 +33,7 @@ void ArgMaxLayer::LayerSetUp(const vector*>& bottom, template void ArgMaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { - std::vector shape(bottom[0]->num_axes(), 1); + std::vector shape(bottom[0]->num_axes(), 1); if (has_axis_) { // Produces max_ind or max_val per axis shape = bottom[0]->shape(); @@ -55,7 +55,7 @@ void ArgMaxLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - int dim, axis_dist; + int_tp dim, axis_dist; if (has_axis_) { dim = bottom[0]->shape(axis_); // Distance between values of axis in blob @@ -64,17 +64,17 @@ void ArgMaxLayer::Forward_cpu(const vector*>& bottom, dim = bottom[0]->count(1); axis_dist = 1; } - int num = bottom[0]->count() / dim; - std::vector > bottom_data_vector(dim); - for (int i = 0; i < num; ++i) { - for (int j = 0; j < dim; ++j) { + int_tp num = bottom[0]->count() / dim; + std::vector > bottom_data_vector(dim); + for (int_tp i = 0; i < num; ++i) { + for (int_tp j = 0; j < dim; ++j) { bottom_data_vector[j] = std::make_pair( bottom_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j); } std::partial_sort( bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, - bottom_data_vector.end(), std::greater >()); - for (int j = 0; j < top_k_; ++j) { + bottom_data_vector.end(), std::greater >()); + for (int_tp j = 0; j < top_k_; ++j) { if (out_max_val_) { if (has_axis_) { // Produces max_val per axis diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 5ea79ea0dbe..889edbc0528 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -22,15 +22,15 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, ConvolutionParameter conv_param = this->layer_param_.convolution_param(); force_nd_im2col_ = conv_param.force_nd_im2col(); channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); - const int first_spatial_axis = channel_axis_ + 1; - const int num_axes = bottom[0]->num_axes(); + const int_tp first_spatial_axis = channel_axis_ + 1; + const int_tp num_axes = bottom[0]->num_axes(); num_spatial_axes_ = num_axes - first_spatial_axis; CHECK_GE(num_spatial_axes_, 0); - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1)); + vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); + vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1L)); // Setup filter kernel dimensions (kernel_shape_). kernel_shape_.Reshape(spatial_dim_blob_shape); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { CHECK_EQ(num_spatial_axes_, 2) << "kernel_h & kernel_w can only be used for 2D convolution."; @@ -39,22 +39,22 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, kernel_shape_data[0] = conv_param.kernel_h(); kernel_shape_data[1] = conv_param.kernel_w(); } else { - const int num_kernel_dims = conv_param.kernel_size_size(); + const int_tp num_kernel_dims = conv_param.kernel_size_size(); CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) << "kernel_size must be specified once, or once per spatial dimension " << "(kernel_size specified " << num_kernel_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { kernel_shape_data[i] = conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); } } - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { CHECK_GT(kernel_shape_data[i], 0)<< "Filter dimensions must be nonzero."; } // Setup stride dimensions (stride_). stride_.Reshape(spatial_dim_blob_shape); - int* stride_data = stride_.mutable_cpu_data(); + int_tp* stride_data = stride_.mutable_cpu_data(); if (conv_param.has_stride_h() || conv_param.has_stride_w()) { CHECK_EQ(num_spatial_axes_, 2) << "stride_h & stride_w can only be used for 2D convolution."; @@ -63,14 +63,14 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, stride_data[0] = conv_param.stride_h(); stride_data[1] = conv_param.stride_w(); } else { - const int num_stride_dims = conv_param.stride_size(); + const int_tp num_stride_dims = conv_param.stride_size(); CHECK(num_stride_dims == 0 || num_stride_dims == 1 || num_stride_dims == num_spatial_axes_) << "stride must be specified once, or once per spatial dimension " << "(stride specified " << num_stride_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultStride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultStride = 1; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : conv_param.stride((num_stride_dims == 1) ? 0 : i); CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; @@ -78,7 +78,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, } // Setup pad dimensions (pad_). pad_.Reshape(spatial_dim_blob_shape); - int* pad_data = pad_.mutable_cpu_data(); + int_tp* pad_data = pad_.mutable_cpu_data(); if (conv_param.has_pad_h() || conv_param.has_pad_w()) { CHECK_EQ(num_spatial_axes_, 2) << "pad_h & pad_w can only be used for 2D convolution."; @@ -87,14 +87,14 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, pad_data[0] = conv_param.pad_h(); pad_data[1] = conv_param.pad_w(); } else { - const int num_pad_dims = conv_param.pad_size(); + const int_tp num_pad_dims = conv_param.pad_size(); CHECK(num_pad_dims == 0 || num_pad_dims == 1 || num_pad_dims == num_spatial_axes_) << "pad must be specified once, or once per spatial dimension " << "(pad specified " << num_pad_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultPad = 0; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultPad = 0; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : conv_param.pad((num_pad_dims == 1) ? 0 : i); } @@ -102,7 +102,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Setup kernel stride dimensions kstride_.Reshape(spatial_dim_blob_shape); - int* kstride_data = kstride_.mutable_cpu_data(); + int_tp* kstride_data = kstride_.mutable_cpu_data(); if (conv_param.has_kstride_h() || conv_param.has_kstride_w()) { CHECK_EQ(num_spatial_axes_, 2) << "kstride_h & kstride_w can only be used for 2D convolution."; @@ -111,14 +111,14 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, kstride_data[0] = conv_param.kstride_h(); kstride_data[1] = conv_param.kstride_w(); } else { - const int num_kstride_dims = conv_param.kstride_size(); + const int_tp num_kstride_dims = conv_param.kstride_size(); CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || num_kstride_dims == num_spatial_axes_) << "kstride must be specified once, or once per spatial dimension " << "(kstride specified " << num_kstride_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultKstride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultKstride = 1; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : conv_param.kstride((num_kstride_dims == 1) ? 0 : i); } @@ -126,7 +126,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Different 2D and ND im2col/col2im kernels for strided kernels use_skernel_ = false; - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { use_skernel_ |= (kstride_data[i] != 1); if (use_skernel_) { break; @@ -136,7 +136,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. is_1x1_ = true; - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { is_1x1_ &= kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0; if (!is_1x1_) { @@ -161,14 +161,14 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Handle the parameters: weights and biases. // - blobs_[0] holds the filter weights // - blobs_[1] holds the biases (optional) - vector weight_shape(2); + vector weight_shape(2); weight_shape[0] = conv_out_channels_; weight_shape[1] = conv_in_channels_ / group_; - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { weight_shape.push_back(kernel_shape_data[i]); } bias_term_ = this->layer_param_.convolution_param().bias_term(); - vector bias_shape(bias_term_, num_output_); + vector bias_shape(bias_term_, num_output_); if (this->blobs_.size() > 0) { CHECK_EQ(1 + bias_term_, this->blobs_.size()) << "Incorrect number of weight blobs."; @@ -214,27 +214,27 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, template void BaseConvolutionLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int first_spatial_axis = channel_axis_ + 1; + const int_tp first_spatial_axis = channel_axis_ + 1; CHECK_EQ(bottom[0]->num_axes(), first_spatial_axis + num_spatial_axes_) << "bottom num_axes may not change."; num_ = bottom[0]->count(0, channel_axis_); CHECK_EQ(bottom[0]->shape(channel_axis_), channels_) << "Input size incompatible with convolution kernel."; // TODO: generalize to handle inputs of different shapes. - for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { + for (int_tp bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) << "All inputs must have the same shape."; } // Shape the tops. bottom_shape_ = &bottom[0]->shape(); compute_output_shape(); - vector top_shape(bottom[0]->shape().begin(), + vector top_shape(bottom[0]->shape().begin(), bottom[0]->shape().begin() + channel_axis_); top_shape.push_back(num_output_); - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { top_shape.push_back(output_shape_[i]); } - for (int top_id = 0; top_id < top.size(); ++top_id) { + for (int_tp top_id = 0; top_id < top.size(); ++top_id) { top[top_id]->Reshape(top_shape); } if (reverse_dimensions()) { @@ -245,10 +245,10 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, col_offset_ = kernel_dim_ * conv_out_spatial_dim_; output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_; // Setup input dimensions (conv_input_shape_). - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); + vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); conv_input_shape_.Reshape(bottom_dim_blob_shape); - int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data(); - for (int i = 0; i < num_spatial_axes_ + 1; ++i) { + int_tp* conv_input_shape_data = conv_input_shape_.mutable_cpu_data(); + for (int_tp i = 0; i < num_spatial_axes_ + 1; ++i) { if (reverse_dimensions()) { conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i); } else { @@ -261,7 +261,7 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, col_buffer_shape_.clear(); col_buffer_shape_.push_back(kernel_dim_ * group_); - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { if (reverse_dimensions()) { col_buffer_shape_.push_back(input_shape(i + 1)); } else { @@ -272,7 +272,7 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, col_buffer_.Reshape(col_buffer_shape_); if (Caffe::mode() == Caffe::Brew::GPU) { // Shared column buffer per device-queue across all layers on that device - for (int i = 0; i < this->device_->num_queues(); ++i) { + for (int_tp i = 0; i < this->device_->num_queues(); ++i) { shared_ptr > buffer = this->device_ ->template Buffer(i); buffer->Reshape(col_buffer_shape_); @@ -287,7 +287,7 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, // Set up the all ones "bias multiplier" for adding biases by BLAS out_spatial_dim_ = top[0]->count(first_spatial_axis); if (bias_term_) { - vector bias_multiplier_shape(1, out_spatial_dim_); + vector bias_multiplier_shape(1, out_spatial_dim_); bool reshaped = bias_multiplier_.Reshape(bias_multiplier_shape); // This will trigger a memory copy if in GPU mode, // which may not be necessary. @@ -311,7 +311,7 @@ void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, } col_buff = col_buffer_.cpu_data(); } - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, conv_out_spatial_dim_, kernel_dim_, (Dtype) 1., weights + weight_offset_ * g, @@ -336,7 +336,7 @@ void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, if (is_1x1_) { col_buff = input; } - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_ / group_, (Dtype) 1., weights + weight_offset_ * g, @@ -357,7 +357,7 @@ void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); col_buff = col_buffer_.cpu_data(); } - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_, conv_out_spatial_dim_, (Dtype) 1., output + output_offset_ * g, @@ -377,10 +377,10 @@ void BaseConvolutionLayer::backward_cpu_bias(Dtype* bias, template void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, - const int input_off, + const int_tp input_off, const Dtype* weights, Dtype* output, - const int output_off, + const int_tp output_off, bool skip_im2col) { const Dtype* col_buff = input; if (this->device_->backend() == BACKEND_CUDA) { @@ -391,7 +391,7 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, } col_buff = col_buffer()->gpu_data(); } - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, conv_out_spatial_dim_, kernel_dim_, (Dtype) 1., @@ -409,7 +409,7 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, } col_buff = col_buffer()->gpu_data(); } - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_->id(), CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, conv_out_spatial_dim_, kernel_dim_, @@ -425,7 +425,7 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, template void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, - const int output_off, + const int_tp output_off, const Dtype* bias) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -447,17 +447,17 @@ void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, template void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, - const int output_off, + const int_tp output_off, const Dtype* weights, Dtype* input, - const int input_off) { + const int_tp input_off) { Dtype* col_buff = col_buffer()->mutable_gpu_data(); if (is_1x1_) { col_buff = input; } if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { caffe_gpu_gemm( CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_ / group_, (Dtype) 1., weights + weight_offset_ * g, @@ -470,7 +470,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_->id(), CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_ / group_, (Dtype) 1., @@ -488,9 +488,9 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, template void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, - const int input_off, + const int_tp input_off, const Dtype* output, - const int output_off, + const int_tp output_off, Dtype* weights) { const Dtype* col_buff = input; if (this->device_->backend() == BACKEND_CUDA) { @@ -499,7 +499,7 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, conv_im2col_gpu(input + input_off, col_buffer()->mutable_gpu_data()); col_buff = col_buffer()->gpu_data(); } - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { caffe_gpu_gemm( CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_, conv_out_spatial_dim_, (Dtype) 1., @@ -515,7 +515,7 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, col_buffer()->mutable_gpu_data(), 0); col_buff = col_buffer()->gpu_data(); } - for (int g = 0; g < group_; ++g) { + for (int_tp g = 0; g < group_; ++g) { greentea_gpu_gemm(this->device_->id(), CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_, conv_out_spatial_dim_, (Dtype) 1., @@ -532,7 +532,7 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, template void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, const Dtype* input, - const int input_off) { + const int_tp input_off) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 60c98c241c4..e9fdcb71942 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -36,7 +36,7 @@ BasePrefetchingDataLayer::BasePrefetchingDataLayer( : BaseDataLayer(param), prefetch_free_(), prefetch_full_() { - for (int i = 0; i < PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < PREFETCH_COUNT; ++i) { prefetch_free_.push(&prefetch_[i]); } } @@ -49,7 +49,7 @@ void BasePrefetchingDataLayer::LayerSetUp( // calls so that the prefetch thread does not accidentally make simultaneous // cudaMalloc calls when the main thread is running. In some GPUs this // seems to cause failures if we do not so. - for (int i = 0; i < PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < PREFETCH_COUNT; ++i) { prefetch_[i].data_.mutable_cpu_data(); if (this->output_labels_) { prefetch_[i].label_.mutable_cpu_data(); @@ -57,7 +57,7 @@ void BasePrefetchingDataLayer::LayerSetUp( } #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { - for (int i = 0; i < PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < PREFETCH_COUNT; ++i) { prefetch_[i].data_.mutable_gpu_data(); if (this->output_labels_) { prefetch_[i].label_.mutable_gpu_data(); diff --git a/src/caffe/layers/batch_reindex_layer.cpp b/src/caffe/layers/batch_reindex_layer.cpp index 3bf757c718d..6e89e7138a1 100644 --- a/src/caffe/layers/batch_reindex_layer.cpp +++ b/src/caffe/layers/batch_reindex_layer.cpp @@ -10,19 +10,19 @@ template void BatchReindexLayer::Reshape(const vector*>& bottom, const vector*>& top) { CHECK_EQ(1, bottom[1]->num_axes()); - vector newshape; + vector newshape; newshape.push_back(bottom[1]->shape(0)); - for (int i = 1; i < bottom[0]->shape().size(); ++i) { + for (int_tp i = 1; i < bottom[0]->shape().size(); ++i) { newshape.push_back(bottom[0]->shape()[i]); } top[0]->Reshape(newshape); } template -void BatchReindexLayer::check_batch_reindex(int initial_num, - int final_num, +void BatchReindexLayer::check_batch_reindex(int_tp initial_num, + int_tp final_num, const Dtype* ridx_data) { - for (int i = 0; i < final_num; ++i) { + for (int_tp i = 0; i < final_num; ++i) { CHECK_GE(ridx_data[i], 0) << "Index specified for reindex layer was negative."; CHECK_LT(ridx_data[i], initial_num) @@ -38,13 +38,13 @@ void BatchReindexLayer::Forward_cpu(const vector*>& bottom, if (top[0]->count() == 0) { return; } - int inner_dim = bottom[0]->count() / bottom[0]->shape(0); + int_tp inner_dim = bottom[0]->count() / bottom[0]->shape(0); const Dtype* in = bottom[0]->cpu_data(); const Dtype* permut = bottom[1]->cpu_data(); Dtype* out = top[0]->mutable_cpu_data(); - for (int index = 0; index < top[0]->count(); ++index) { - int n = index / (inner_dim); - int in_n = static_cast(permut[n]); + for (int_tp index = 0; index < top[0]->count(); ++index) { + int_tp n = index / (inner_dim); + int_tp in_n = static_cast(permut[n]); out[index] = in[in_n * (inner_dim) + index % (inner_dim)]; } } @@ -57,14 +57,14 @@ void BatchReindexLayer::Backward_cpu( if (!propagate_down[0]) { return; } - int inner_dim = bottom[0]->count() / bottom[0]->shape(0); + int_tp inner_dim = bottom[0]->count() / bottom[0]->shape(0); Dtype* bot_diff = bottom[0]->mutable_cpu_diff(); const Dtype* permut = bottom[1]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); caffe_set(bottom[0]->count(), Dtype(0), bot_diff); - for (int index = 0; index < top[0]->count(); ++index) { - int n = index / (inner_dim); - int in_n = static_cast(permut[n]); + for (int_tp index = 0; index < top[0]->count(); ++index) { + int_tp n = index / (inner_dim); + int_tp in_n = static_cast(permut[n]); bot_diff[in_n * (inner_dim) + index % (inner_dim)] += top_diff[index]; } } diff --git a/src/caffe/layers/batch_reindex_layer.cu b/src/caffe/layers/batch_reindex_layer.cu index 9d7ead651bd..994690c1069 100644 --- a/src/caffe/layers/batch_reindex_layer.cu +++ b/src/caffe/layers/batch_reindex_layer.cu @@ -10,11 +10,11 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void BRForward(const int count, const int inner_dim, const Dtype* in, +__global__ void BRForward(const int_tp count, const int_tp inner_dim, const Dtype* in, const Dtype* permut, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { - int n = index / (inner_dim); - int in_n = static_cast(permut[n]); + int_tp n = index / (inner_dim); + int_tp in_n = static_cast(permut[n]); out[index] = in[in_n * (inner_dim) + index % (inner_dim)]; } } @@ -28,7 +28,7 @@ void BatchReindexLayer::Forward_gpu(const vector*>& bottom, if (top[0]->count() == 0) { return; } - int threads = top[0]->count(); + int_tp threads = top[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -61,17 +61,17 @@ void BatchReindexLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void BRBackward(const int count, const int inner_dim, +__global__ void BRBackward(const int_tp count, const int_tp inner_dim, const Dtype* in, const Dtype* top_indexes, const Dtype* begins, const Dtype* counts, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { - int n = index / (inner_dim); + int_tp n = index / (inner_dim); out[index] = 0; - int lower = static_cast(begins[n]); - int upper = lower + static_cast(counts[n]); - for (int i = lower; i < upper; ++i) { - int in_n = static_cast(top_indexes[i]); + int_tp lower = static_cast(begins[n]); + int_tp upper = lower + static_cast(counts[n]); + for (int_tp i = lower; i < upper; ++i) { + int_tp in_n = static_cast(top_indexes[i]); out[index] += in[in_n * (inner_dim) + index % (inner_dim)]; } } @@ -87,10 +87,10 @@ void BatchReindexLayer::Backward_gpu( return; } - vector > mapping; + vector > mapping; const Dtype* perm = bottom[1]->cpu_data(); - for (int i = 0; i < bottom[1]->count(); ++i) { - mapping.push_back(pair(static_cast(perm[i]), i)); + for (int_tp i = 0; i < bottom[1]->count(); ++i) { + mapping.push_back(pair(static_cast(perm[i]), i)); } std::sort(mapping.begin(), mapping.end(), pair_sort_first()); @@ -101,7 +101,7 @@ void BatchReindexLayer::Backward_gpu( // k'th element of `begins` points to the location in `top_indexes` where the // list for the k'th example begin, and the k'th element of `counts` is the // length of that list. - vector shape; + vector shape; shape.push_back(bottom[1]->count()); Blob top_indexes(shape); shape[0] = bottom[0]->shape(0); @@ -112,7 +112,7 @@ void BatchReindexLayer::Backward_gpu( Dtype* b_data = begins.mutable_cpu_data(); caffe_set(begins.count(), Dtype(-1), b_data); caffe_set(counts.count(), Dtype(0), c_data); - for (int i = 0; i < mapping.size(); ++i) { + for (int_tp i = 0; i < mapping.size(); ++i) { t_i_data[i] = mapping[i].second; if (b_data[mapping[i].first] == -1) { b_data[mapping[i].first] = i; @@ -120,7 +120,7 @@ void BatchReindexLayer::Backward_gpu( c_data[mapping[i].first] += 1; } - int threads = bottom[0]->count(); + int_tp threads = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp index 75e8650d044..681df1657ce 100644 --- a/src/caffe/layers/bnll_layer.cpp +++ b/src/caffe/layers/bnll_layer.cpp @@ -11,8 +11,8 @@ void BNLLLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - const int count = bottom[0]->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = bottom[0]->count(); + for (int_tp i = 0; i < count; ++i) { top_data[i] = bottom_data[i] > 0 ? bottom_data[i] + log(1. + exp(-bottom_data[i])) : log(1. + exp(bottom_data[i])); @@ -27,9 +27,9 @@ void BNLLLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); Dtype expval; - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); bottom_diff[i] = top_diff[i] * expval / (expval + 1.); } diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index fba13b3e253..9236f3b6657 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -13,7 +13,7 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { +__global__ void BNLLForward(const int_tp n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? @@ -27,7 +27,7 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -56,7 +56,7 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void BNLLBackward(const int n, const Dtype* in_diff, +__global__ void BNLLBackward(const int_tp n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); @@ -73,7 +73,7 @@ void BNLLLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 0e267576746..f15b9df980c 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -17,10 +17,10 @@ void ConcatLayer::LayerSetUp(const vector*>& bottom, template void ConcatLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int num_axes = bottom[0]->num_axes(); + const int_tp num_axes = bottom[0]->num_axes(); const ConcatParameter& concat_param = this->layer_param_.concat_param(); if (concat_param.has_concat_dim()) { - concat_axis_ = static_cast(concat_param.concat_dim()); + concat_axis_ = static_cast(concat_param.concat_dim()); // Don't allow negative indexing for concat_dim, a uint32 -- almost // certainly unintended. CHECK_GE(concat_axis_, 0) << "casting concat_dim from uint32 to int32 " @@ -31,14 +31,14 @@ void ConcatLayer::Reshape(const vector*>& bottom, concat_axis_ = bottom[0]->CanonicalAxisIndex(concat_param.axis()); } // Initialize with the first blob. - vector top_shape = bottom[0]->shape(); + vector top_shape = bottom[0]->shape(); num_concats_ = bottom[0]->count(0, concat_axis_); concat_input_size_ = bottom[0]->count(concat_axis_ + 1); - int bottom_count_sum = bottom[0]->count(); - for (int i = 1; i < bottom.size(); ++i) { + int_tp bottom_count_sum = bottom[0]->count(); + for (int_tp i = 1; i < bottom.size(); ++i) { CHECK_EQ(num_axes, bottom[i]->num_axes()) << "All inputs must have the same #axes."; - for (int j = 0; j < num_axes; ++j) { + for (int_tp j = 0; j < num_axes; ++j) { if (j == concat_axis_) { continue; } CHECK_EQ(top_shape[j], bottom[i]->shape(j)) << "All inputs must have the same shape, except at concat_axis."; @@ -59,12 +59,12 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_cpu_data(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - for (int i = 0; i < bottom.size(); ++i) { + int_tp offset_concat_axis = 0; + const int_tp top_concat_axis = top[0]->shape(concat_axis_); + for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { + const int_tp bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int_tp n = 0; n < num_concats_; ++n) { caffe_cpu_copy(bottom_concat_axis * concat_input_size_, bottom_data + n * bottom_concat_axis * concat_input_size_, top_data + (n * top_concat_axis + offset_concat_axis) @@ -79,13 +79,13 @@ void ConcatLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->cpu_diff(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - for (int i = 0; i < bottom.size(); ++i) { - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + int_tp offset_concat_axis = 0; + const int_tp top_concat_axis = top[0]->shape(concat_axis_); + for (int_tp i = 0; i < bottom.size(); ++i) { + const int_tp bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i]) { Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); - for (int n = 0; n < num_concats_; ++n) { + for (int_tp n = 0; n < num_concats_; ++n) { caffe_cpu_copy(bottom_concat_axis * concat_input_size_, top_diff + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, bottom_diff + n * bottom_concat_axis * concat_input_size_); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index b6bbbdaf5e4..44b759ff8ac 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -13,16 +13,16 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void Concat(const int nthreads, const Dtype* in_data, - const bool forward, const int num_concats, - const int concat_size, const int top_concat_axis, - const int bottom_concat_axis, - const int offset_concat_axis, Dtype* out_data) { +__global__ void Concat(const int_tp nthreads, const Dtype* in_data, + const bool forward, const int_tp num_concats, + const int_tp concat_size, const int_tp top_concat_axis, + const int_tp bottom_concat_axis, + const int_tp offset_concat_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int total_concat_size = concat_size * bottom_concat_axis; - const int concat_num = index / total_concat_size; - const int concat_index = index % total_concat_size; - const int top_index = concat_index + const int_tp total_concat_size = concat_size * bottom_concat_axis; + const int_tp concat_num = index / total_concat_size; + const int_tp concat_index = index % total_concat_size; + const int_tp top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; @@ -38,14 +38,14 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); + int_tp offset_concat_axis = 0; + const int_tp top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; + const int_tp bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int_tp bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int_tp nthreads = bottom_concat_size * num_concats_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -82,16 +82,16 @@ void ConcatLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); + int_tp offset_concat_axis = 0; + const int_tp top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; - for (int i = 0; i < bottom.size(); ++i) { - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + for (int_tp i = 0; i < bottom.size(); ++i) { + const int_tp bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i]) { Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; + const int_tp bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int_tp bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int_tp nthreads = bottom_concat_size * num_concats_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/connected_component_layer.cpp b/src/caffe/layers/connected_component_layer.cpp index 2992ccb3573..e3b0042c380 100644 --- a/src/caffe/layers/connected_component_layer.cpp +++ b/src/caffe/layers/connected_component_layer.cpp @@ -13,18 +13,18 @@ namespace caffe { // Derived from // http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp template -cv::Mat ConnectedComponentLayer::FindBlobs(int maxlabel, +cv::Mat ConnectedComponentLayer::FindBlobs(int_tp maxlabel, const cv::Mat &input) { // Fill the label_image with the blobs cv::Mat label_image; input.convertTo(label_image, CV_32SC1); - int label_count = maxlabel + 1; + int_tp label_count = maxlabel + 1; // Segment into label numbers higher than the original label numbers - for (int y = 0; y < label_image.rows; y++) { - int *row = reinterpret_cast(label_image.ptr(y)); - for (int x = 0; x < label_image.cols; x++) { + for (int_tp y = 0; y < label_image.rows; y++) { + int_tp *row = reinterpret_cast(label_image.ptr(y)); + for (int_tp x = 0; x < label_image.cols; x++) { // Skip background and already labeled areas if (row[x] > maxlabel || row[x] == 0) { continue; @@ -59,11 +59,11 @@ void ConnectedComponentLayer::Forward_cpu( cv::Mat img(bottom[0]->height(), bottom[0]->width(), CV_8SC1); - for (int nc = 0; nc < bottom[0]->num() * bottom[0]->channels(); ++nc) { - int maxlabel = 0; - for (int y = 0; y < bottom[0]->height(); ++y) { - for (int x = 0; x < bottom[0]->width(); ++x) { - int val = bottom_data[nc * bottom[0]->width() * bottom[0]->height() + for (int_tp nc = 0; nc < bottom[0]->num() * bottom[0]->channels(); ++nc) { + int_tp maxlabel = 0; + for (int_tp y = 0; y < bottom[0]->height(); ++y) { + for (int_tp x = 0; x < bottom[0]->width(); ++x) { + int_tp val = bottom_data[nc * bottom[0]->width() * bottom[0]->height() + bottom[0]->width() * y + x]; if (val > maxlabel) { maxlabel = val; @@ -73,10 +73,10 @@ void ConnectedComponentLayer::Forward_cpu( } cv::Mat seg = FindBlobs(maxlabel, img); #pragma omp parallel for - for (int y = 0; y < seg.rows; ++y) { - for (int x = 0; x < seg.cols; ++x) { + for (int_tp y = 0; y < seg.rows; ++y) { + for (int_tp x = 0; x < seg.cols; ++x) { top_data[nc * bottom[0]->width() * bottom[0]->height() - + bottom[0]->width() * y + x] = seg.at(y, x); + + bottom[0]->width() * y + x] = seg.at(y, x); } } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 25e167819d3..0b83066013d 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -25,7 +25,7 @@ void ContrastiveLossLayer::LayerSetUp( dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); // vector of ones used to sum along channels summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) + for (int_tp i = 0; i < bottom[0]->channels(); ++i) summer_vec_.mutable_cpu_data()[i] = Dtype(1); } @@ -33,21 +33,21 @@ template void ContrastiveLossLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { - int count = bottom[0]->count(); + int_tp count = bottom[0]->count(); caffe_sub( count, bottom[0]->cpu_data(), // a bottom[1]->cpu_data(), // b diff_.mutable_cpu_data()); // a_i-b_i - const int channels = bottom[0]->channels(); + const int_tp channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); - for (int i = 0; i < bottom[0]->num(); ++i) { + for (int_tp i = 0; i < bottom[0]->num(); ++i) { dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_.cpu_data() + (i*channels), diff_.cpu_data() + (i*channels)); - if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs + if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs if (legacy_version) { @@ -68,16 +68,16 @@ void ContrastiveLossLayer::Backward_cpu(const vector*>& top, Dtype margin = this->layer_param_.contrastive_loss_param().margin(); bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version(); - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { + int_tp num = bottom[i]->num(); + int_tp channels = bottom[i]->channels(); + for (int_tp j = 0; j < num; ++j) { Dtype* bout = bottom[i]->mutable_cpu_diff(); - if (static_cast(bottom[2]->cpu_data()[j])) { // similar pairs + if (static_cast(bottom[2]->cpu_data()[j])) { // similar pairs caffe_cpu_axpby( channels, alpha, diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index c5935d8573a..039ff023921 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -16,7 +16,7 @@ namespace caffe { template void ContrastiveLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -55,8 +55,8 @@ void ContrastiveLossLayer::Forward_gpu( bool legacy_version = this->layer_param_.contrastive_loss_param() .legacy_version(); Dtype loss(0.0); - for (int i = 0; i < bottom[0]->num(); ++i) { - if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs + for (int_tp i = 0; i < bottom[0]->num(); ++i) { + if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs if (legacy_version) { @@ -74,14 +74,14 @@ void ContrastiveLossLayer::Forward_gpu( #ifdef USE_CUDA template -__global__ void CLLBackward(const int count, const int channels, +__global__ void CLLBackward(const int_tp count, const int_tp channels, const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { - int n = i / channels; // the num index, to access y and dist_sq - if (static_cast(y[n])) { // similar pairs + int_tp n = i / channels; // the num index, to access y and dist_sq + if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs Dtype mdist(0.0); @@ -108,10 +108,10 @@ template void ContrastiveLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { - const int count = bottom[0]->count(); - const int channels = bottom[0]->channels(); + const int_tp count = bottom[0]->count(); + const int_tp channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); const bool legacy_version = this->layer_param_.contrastive_loss_param() .legacy_version(); diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 160df1551fd..216faa9d07b 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -10,15 +10,15 @@ namespace caffe { template void ConvolutionLayer::compute_output_shape() { - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int* stride_data = this->stride_.cpu_data(); - const int* pad_data = this->pad_.cpu_data(); - const int* kstride_data = this->kstride_.cpu_data(); + const int_tp* kernel_shape_data = this->kernel_shape_.cpu_data(); + const int_tp* stride_data = this->stride_.cpu_data(); + const int_tp* pad_data = this->pad_.cpu_data(); + const int_tp* kstride_data = this->kstride_.cpu_data(); this->output_shape_.clear(); - for (int i = 0; i < this->num_spatial_axes_; ++i) { + for (int_tp i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis - const int input_dim = this->input_shape(i + 1); - const int output_dim = (input_dim + 2 * pad_data[i] + const int_tp input_dim = this->input_shape(i + 1); + const int_tp output_dim = (input_dim + 2 * pad_data[i] - ((kernel_shape_data[i] - 1) * kstride_data[i] + 1)) / stride_data[i] + 1; this->output_shape_.push_back(output_dim); @@ -29,10 +29,10 @@ template void ConvolutionLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* weight = this->blobs_[0]->cpu_data(); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, top_data + n * this->top_dim_); if (this->bias_term_) { @@ -48,19 +48,19 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_, diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 8fccb083471..86df781bd92 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -18,12 +18,12 @@ template void ConvolutionLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Multi queue execution, all previous work needs to be done first this->device_->FinishQueues(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { // Multi queue execution, go through work queues this->device_->SwitchQueue(n); this->forward_gpu_gemm(bottom_data, n * this->bottom_dim_, weight, @@ -43,19 +43,19 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data, n * this->bottom_dim_, @@ -66,7 +66,7 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, if (propagate_down[i]) { // Multi queue execution, all previous work needs to be done first this->device_->FinishQueues(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { // Multi queue execution, go through work queues this->device_->SwitchQueue(n); this->backward_gpu_gemm(top_diff, n * this->top_dim_, weight, diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index d7b1e0d651f..c1b7393a36b 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -32,16 +32,16 @@ void CuDNNConvolutionLayer::LayerSetUp( bwd_data_algo_ = new cudnnConvolutionBwdDataAlgo_t[bottom.size()]; // initialize size arrays - workspace_fwd_sizes_ = new size_t[bottom.size()]; - workspace_bwd_filter_sizes_ = new size_t[bottom.size()]; - workspace_bwd_data_sizes_ = new size_t[bottom.size()]; + workspace_fwd_sizes_ = new uint_tp[bottom.size()]; + workspace_bwd_filter_sizes_ = new uint_tp[bottom.size()]; + workspace_bwd_data_sizes_ = new uint_tp[bottom.size()]; // workspace data workspaceSizeInBytes = 0; workspaceData = NULL; workspace = new void*[this->group_ * CUDNN_STREAMS_PER_GROUP]; - for (size_t i = 0; i < bottom.size(); ++i) { + for (uint_tp i = 0; i < bottom.size(); ++i) { // initialize all to default algorithms fwd_algo_[i] = (cudnnConvolutionFwdAlgo_t)0; bwd_filter_algo_[i] = (cudnnConvolutionBwdFilterAlgo_t)0; @@ -52,7 +52,7 @@ void CuDNNConvolutionLayer::LayerSetUp( workspace_bwd_filter_sizes_[i] = 0; } - for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { + for (int_tp g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { CUDA_CHECK(cudaStreamCreate(&stream_[g])); CUDNN_CHECK(cudnnCreate(&handle_[g])); CUDNN_CHECK(cudnnSetStream(handle_[g], stream_[g])); @@ -63,15 +63,15 @@ void CuDNNConvolutionLayer::LayerSetUp( bias_offset_ = (this->num_output_ / this->group_); // Create filter descriptor. - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int kernel_h = kernel_shape_data[0]; - const int kernel_w = kernel_shape_data[1]; + const int_tp* kernel_shape_data = this->kernel_shape_.cpu_data(); + const int_tp kernel_h = kernel_shape_data[0]; + const int_tp kernel_w = kernel_shape_data[1]; cudnn::createFilterDesc(&filter_desc_, this->num_output_ / this->group_, this->channels_ / this->group_, kernel_h, kernel_w); // Create tensor descriptor(s) for data and corresponding convolution(s). - for (int i = 0; i < bottom.size(); i++) { + for (int_tp i = 0; i < bottom.size(); i++) { cudnnTensorDescriptor_t bottom_desc; cudnn::createTensor4dDesc(&bottom_desc); bottom_descs_.push_back(bottom_desc); @@ -101,22 +101,22 @@ void CuDNNConvolutionLayer::Reshape( << "Use 'engine: CAFFE' for general ND convolution."; bottom_offset_ = this->bottom_dim_ / this->group_; top_offset_ = this->top_dim_ / this->group_; - const int height = bottom[0]->shape(this->channel_axis_ + 1); - const int width = bottom[0]->shape(this->channel_axis_ + 2); - const int height_out = top[0]->shape(this->channel_axis_ + 1); - const int width_out = top[0]->shape(this->channel_axis_ + 2); - const int* pad_data = this->pad_.cpu_data(); - const int pad_h = pad_data[0]; - const int pad_w = pad_data[1]; - const int* stride_data = this->stride_.cpu_data(); - const int stride_h = stride_data[0]; - const int stride_w = stride_data[1]; + const int_tp height = bottom[0]->shape(this->channel_axis_ + 1); + const int_tp width = bottom[0]->shape(this->channel_axis_ + 2); + const int_tp height_out = top[0]->shape(this->channel_axis_ + 1); + const int_tp width_out = top[0]->shape(this->channel_axis_ + 2); + const int_tp* pad_data = this->pad_.cpu_data(); + const int_tp pad_h = pad_data[0]; + const int_tp pad_w = pad_data[1]; + const int_tp* stride_data = this->stride_.cpu_data(); + const int_tp stride_h = stride_data[0]; + const int_tp stride_w = stride_data[1]; // Specify workspace limit for kernels directly until we have a // planning strategy and a rewrite of Caffe's GPU memory mangagement - size_t workspace_limit_bytes = 8*1024*1024; + uint_tp workspace_limit_bytes = 8*1024*1024; - for (int i = 0; i < bottom.size(); i++) { + for (int_tp i = 0; i < bottom.size(); i++) { cudnn::setTensor4dDesc(&bottom_descs_[i], this->num_, this->channels_ / this->group_, height, width, @@ -173,11 +173,11 @@ void CuDNNConvolutionLayer::Reshape( } // reduce over all workspace sizes to get a maximum to allocate / reallocate - size_t total_workspace_fwd = 0; - size_t total_workspace_bwd_data = 0; - size_t total_workspace_bwd_filter = 0; + uint_tp total_workspace_fwd = 0; + uint_tp total_workspace_bwd_data = 0; + uint_tp total_workspace_bwd_filter = 0; - for (size_t i = 0; i < bottom.size(); i++) { + for (uint_tp i = 0; i < bottom.size(); i++) { total_workspace_fwd = std::max(total_workspace_fwd, workspace_fwd_sizes_[i]); total_workspace_bwd_data = std::max(total_workspace_bwd_data, @@ -186,11 +186,11 @@ void CuDNNConvolutionLayer::Reshape( workspace_bwd_filter_sizes_[i]); } // get max over all operations - size_t max_workspace = std::max(total_workspace_fwd, + uint_tp max_workspace = std::max(total_workspace_fwd, total_workspace_bwd_data); max_workspace = std::max(max_workspace, total_workspace_bwd_filter); // ensure all groups have enough workspace - size_t total_max_workspace = max_workspace * + uint_tp total_max_workspace = max_workspace * (this->group_ * CUDNN_STREAMS_PER_GROUP); // this is the total amount of storage needed over all groups + streams @@ -204,7 +204,7 @@ void CuDNNConvolutionLayer::Reshape( cudaError_t err = cudaMalloc(&(this->workspaceData), workspaceSizeInBytes); if (err != cudaSuccess) { // force zero memory path - for (int i = 0; i < bottom.size(); i++) { + for (int_tp i = 0; i < bottom.size(); i++) { workspace_fwd_sizes_[i] = 0; workspace_bwd_filter_sizes_[i] = 0; workspace_bwd_data_sizes_[i] = 0; @@ -214,7 +214,7 @@ void CuDNNConvolutionLayer::Reshape( } // NULL out all workspace pointers - for (int g = 0; g < (this->group_ * CUDNN_STREAMS_PER_GROUP); g++) { + for (int_tp g = 0; g < (this->group_ * CUDNN_STREAMS_PER_GROUP); g++) { workspace[g] = NULL; } // NULL out underlying data @@ -223,7 +223,7 @@ void CuDNNConvolutionLayer::Reshape( } // if we succeed in the allocation, set pointer aliases for workspaces - for (int g = 0; g < (this->group_ * CUDNN_STREAMS_PER_GROUP); g++) { + for (int_tp g = 0; g < (this->group_ * CUDNN_STREAMS_PER_GROUP); g++) { workspace[g] = reinterpret_cast(workspaceData) + g*max_workspace; } } @@ -240,7 +240,7 @@ CuDNNConvolutionLayer::~CuDNNConvolutionLayer() { // Check that handles have been setup before destroying. if (!handles_setup_) { return; } - for (int i = 0; i < bottom_descs_.size(); i++) { + for (int_tp i = 0; i < bottom_descs_.size(); i++) { cudnnDestroyTensorDescriptor(bottom_descs_[i]); cudnnDestroyTensorDescriptor(top_descs_[i]); cudnnDestroyConvolutionDescriptor(conv_descs_[i]); @@ -250,7 +250,7 @@ CuDNNConvolutionLayer::~CuDNNConvolutionLayer() { } cudnnDestroyFilterDescriptor(filter_desc_); - for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { + for (int_tp g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { cudaStreamDestroy(stream_[g]); cudnnDestroy(handle_[g]); } diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index f76c2c94a18..99f84819285 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -15,12 +15,12 @@ template void CuDNNConvolutionLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. - for (int g = 0; g < this->group_; g++) { + for (int_tp g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType::one, @@ -64,10 +64,10 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. - for (int g = 0; g < this->group_; g++) { + for (int_tp g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], diff --git a/src/caffe/layers/cudnn_lcn_layer.cpp b/src/caffe/layers/cudnn_lcn_layer.cpp index 866d810b9f9..c325f4c5cf4 100644 --- a/src/caffe/layers/cudnn_lcn_layer.cpp +++ b/src/caffe/layers/cudnn_lcn_layer.cpp @@ -40,7 +40,7 @@ void CuDNNLCNLayer::Reshape(const vector*>& bottom, CUDNN_CHECK(cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, k_)); // allocate / reallocate tempData buffers - size_t totalSizeInBytes = sizeof(Dtype)*bottom[0]->num()* \ + uint_tp totalSizeInBytes = sizeof(Dtype)*bottom[0]->num()* \ this->channels_*this->height_*this->width_; if (totalSizeInBytes > tempDataSize) { diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index 759d83984ef..e3537f30ac6 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -22,10 +22,10 @@ template void CuDNNReLULayer::Reshape(const vector*>& bottom, const vector*>& top) { ReLULayer::Reshape(bottom, top); - const int N = bottom[0]->num(); - const int K = bottom[0]->channels(); - const int H = bottom[0]->height(); - const int W = bottom[0]->width(); + const int_tp N = bottom[0]->num(); + const int_tp K = bottom[0]->channels(); + const int_tp H = bottom[0]->height(); + const int_tp W = bottom[0]->width(); cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index 32637873d46..dda3717e22b 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -22,10 +22,10 @@ template void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, const vector*>& top) { SigmoidLayer::Reshape(bottom, top); - const int N = bottom[0]->num(); - const int K = bottom[0]->channels(); - const int H = bottom[0]->height(); - const int W = bottom[0]->width(); + const int_tp N = bottom[0]->num(); + const int_tp K = bottom[0]->channels(); + const int_tp H = bottom[0]->height(); + const int_tp W = bottom[0]->width(); cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } diff --git a/src/caffe/layers/cudnn_softmax_layer.cpp b/src/caffe/layers/cudnn_softmax_layer.cpp index 77a3225adcd..393aa806d3f 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cpp +++ b/src/caffe/layers/cudnn_softmax_layer.cpp @@ -26,10 +26,10 @@ template void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { SoftmaxLayer::Reshape(bottom, top); - int N = this->outer_num_; - int K = bottom[0]->shape(this->softmax_axis_); - int H = this->inner_num_; - int W = 1; + int_tp N = this->outer_num_; + int_tp K = bottom[0]->shape(this->softmax_axis_); + int_tp H = this->inner_num_; + int_tp W = 1; cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index 376faad324d..4e27336be79 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -22,10 +22,10 @@ template void CuDNNTanHLayer::Reshape(const vector*>& bottom, const vector*>& top) { TanHLayer::Reshape(bottom, top); - const int N = bottom[0]->num(); - const int K = bottom[0]->channels(); - const int H = bottom[0]->height(); - const int W = bottom[0]->width(); + const int_tp N = bottom[0]->num(); + const int_tp K = bottom[0]->channels(); + const int_tp H = bottom[0]->height(); + const int_tp W = bottom[0]->width(); cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index c4050bfd62e..60d30538708 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -29,17 +29,17 @@ DataLayer::~DataLayer() { template void DataLayer::DataLayerSetUp(const vector*>& bottom, const vector*>& top) { - const int batch_size = this->layer_param_.data_param().batch_size(); + const int_tp batch_size = this->layer_param_.data_param().batch_size(); // Read a data point, and use it to initialize the top blob. Datum& datum = *(reader_.full().peek()); // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); + vector top_shape = this->data_transformer_->InferBlobShape(datum); this->transformed_data_.Reshape(top_shape); // Reshape top[0] and prefetch_data according to the batch_size. top_shape[0] = batch_size; top[0]->Reshape(top_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < this->PREFETCH_COUNT; ++i) { this->prefetch_[i].data_.Reshape(top_shape); } LOG(INFO)<< "output data size: " << top[0]->num() << "," @@ -47,9 +47,9 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, << top[0]->width(); // label if (this->output_labels_) { - vector label_shape(1, batch_size); + vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < this->PREFETCH_COUNT; ++i) { this->prefetch_[i].label_.Reshape(label_shape); } } @@ -68,10 +68,10 @@ void DataLayer::load_batch(Batch* batch) { // Reshape according to the first datum of each batch // on single input batches allows for inputs of varying dimension. - const int batch_size = this->layer_param_.data_param().batch_size(); + const int_tp batch_size = this->layer_param_.data_param().batch_size(); Datum& datum = *(reader_.full().peek()); // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); + vector top_shape = this->data_transformer_->InferBlobShape(datum); this->transformed_data_.Reshape(top_shape); // Reshape batch according to the batch_size. top_shape[0] = batch_size; @@ -83,14 +83,14 @@ void DataLayer::load_batch(Batch* batch) { if (this->output_labels_) { top_label = batch->label_.mutable_cpu_data(); } - for (int item_id = 0; item_id < batch_size; ++item_id) { + for (int_tp item_id = 0; item_id < batch_size; ++item_id) { timer.Start(); // get a datum Datum& datum = *(reader_.full().pop("Waiting for data")); read_time += timer.MicroSeconds(); timer.Start(); // Apply data transformations (mirror, scale, crop...) - int offset = batch->data_.offset(item_id); + int_tp offset = batch->data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); this->data_transformer_->Transform(datum, &(this->transformed_data_)); // Copy label. diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 4e6d6c878b0..20f416dfca9 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -10,15 +10,15 @@ namespace caffe { template void DeconvolutionLayer::compute_output_shape() { - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int* stride_data = this->stride_.cpu_data(); - const int* pad_data = this->pad_.cpu_data(); - const int* kstride_data = this->kstride_.cpu_data(); + const int_tp* kernel_shape_data = this->kernel_shape_.cpu_data(); + const int_tp* stride_data = this->stride_.cpu_data(); + const int_tp* pad_data = this->pad_.cpu_data(); + const int_tp* kstride_data = this->kstride_.cpu_data(); this->output_shape_.clear(); - for (int i = 0; i < this->num_spatial_axes_; ++i) { + for (int_tp i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis - const int input_dim = this->input_shape(i + 1); - const int output_dim = stride_data[i] * (input_dim - 1) + const int_tp input_dim = this->input_shape(i + 1); + const int_tp output_dim = stride_data[i] * (input_dim - 1) + ((kernel_shape_data[i] - 1) * kstride_data[i] + 1) - 2 * pad_data[i]; this->output_shape_.push_back(output_dim); } @@ -28,10 +28,10 @@ template void DeconvolutionLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* weight = this->blobs_[0]->cpu_data(); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { this->backward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, top_data + n * this->top_dim_); if (this->bias_term_) { @@ -47,19 +47,19 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { // Gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_cpu_gemm(top_diff + n * this->top_dim_, diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 8e3dec41539..52b4e2cd77d 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -18,10 +18,10 @@ template void DeconvolutionLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { this->backward_gpu_gemm(bottom_data, n * this->bottom_dim_, weight, top_data, n * this->top_dim_); if (this->bias_term_) { @@ -38,19 +38,19 @@ void DeconvolutionLayer::Backward_gpu( const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff, n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { - for (int n = 0; n < this->num_; ++n) { + for (int_tp n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(top_diff, n * this->top_dim_, bottom_data, diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 1c3f2c216d6..41756b6da86 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -18,7 +18,7 @@ void DropoutLayer::LayerSetUp(const vector*>& bottom, DCHECK(threshold_ > 0.); DCHECK(threshold_ < 1.); scale_ = 1. / (1. - threshold_); - uint_thres_ = static_cast(UINT_MAX * threshold_); + uint_thres_ = static_cast(ULONG_MAX * threshold_); } template @@ -35,12 +35,12 @@ void DropoutLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - unsigned int* mask = rand_vec_.mutable_cpu_data(); - const int count = bottom[0]->count(); + uint_tp* mask = rand_vec_.mutable_cpu_data(); + const int_tp count = bottom[0]->count(); if (this->phase_ == TRAIN) { // Create random numbers caffe_rng_bernoulli(count, 1. - threshold_, mask); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { top_data[i] = bottom_data[i] * mask[i] * scale_; } } else { @@ -56,9 +56,9 @@ void DropoutLayer::Backward_cpu(const vector*>& top, const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); if (this->phase_ == TRAIN) { - const unsigned int* mask = rand_vec_.cpu_data(); - const int count = bottom[0]->count(); - for (int i = 0; i < count; ++i) { + const uint_tp* mask = rand_vec_.cpu_data(); + const int_tp count = bottom[0]->count(); + for (int_tp i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * mask[i] * scale_; } } else { diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index 81c3441d2c2..c353d1d50e5 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -12,9 +12,9 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void DropoutForward(const int n, const Dtype* in, - const unsigned int* mask, - const unsigned int threshold, const float scale, +__global__ void DropoutForward(const int_tp n, const Dtype* in, + const uint_tp* mask, + const uint_tp threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; @@ -27,14 +27,14 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->phase_ == TRAIN) { - unsigned int* mask = - static_cast(rand_vec_.mutable_gpu_data()); - caffe_gpu_rng_uniform(count, mask); + uint_tp* mask = + static_cast(rand_vec_.mutable_gpu_data()); + caffe_gpu_rng_uniform(count, (uint_tpc*)(mask)); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) DropoutForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -72,9 +72,9 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void DropoutBackward(const int n, const Dtype* in_diff, - const unsigned int* mask, - const unsigned int threshold, const float scale, +__global__ void DropoutBackward(const int_tp n, const Dtype* in_diff, + const uint_tp* mask, + const uint_tp threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); @@ -93,9 +93,9 @@ void DropoutLayer::Backward_gpu(const vector*>& top, if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->phase_ == TRAIN) { - const unsigned int* mask = static_cast(rand_vec_ + const uint_tp* mask = static_cast(rand_vec_ .gpu_data()); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) DropoutBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( @@ -114,7 +114,7 @@ void DropoutLayer::Backward_gpu(const vector*>& top, if (this->phase_ == TRAIN) { cl_mem mask = (cl_mem) (rand_vec_.gpu_data()); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); viennacl::ocl::kernel &oclk_dropout = program.get_kernel( CL_KERNEL_SELECT("dropout_backward")); viennacl::ocl::enqueue( diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp index 6b0d617464c..70d55e49488 100644 --- a/src/caffe/layers/dummy_data_layer.cpp +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -9,9 +9,9 @@ namespace caffe { template void DummyDataLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - const int num_top = top.size(); + const int_tp num_top = top.size(); const DummyDataParameter& param = this->layer_param_.dummy_data_param(); - const int num_data_filler = param.data_filler_size(); + const int_tp num_data_filler = param.data_filler_size(); CHECK(num_data_filler == 0 || num_data_filler == 1 || num_data_filler == num_top) << "Number of data fillers must be 0, 1 or equal to the number of tops: " @@ -66,7 +66,7 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, } else { refill_.resize(num_top); fillers_.resize(num_top); - for (int i = 0; i < num_top; ++i) { + for (int_tp i = 0; i < num_top; ++i) { fillers_[i].reset(GetFiller(param.data_filler(i))); // Refill on each iteration iff not using a constant filler, // but use the inverse of this rule for the first run. @@ -74,18 +74,18 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, (strcmp(param.data_filler(i).type().c_str(), "constant") == 0); } } - for (int i = 0; i < num_top; ++i) { + for (int_tp i = 0; i < num_top; ++i) { if (legacy_dims) { - const int num = (param.num_size() == 1) ? param.num(0) : param.num(i); - const int channels = + const int_tp num = (param.num_size() == 1) ? param.num(0) : param.num(i); + const int_tp channels = (param.channels_size() == 1) ? param.channels(0) : param.channels(i); - const int height = + const int_tp height = (param.height_size() == 1) ? param.height(0) : param.height(i); - const int width = + const int_tp width = (param.width_size() == 1) ? param.width(0) : param.width(i); top[i]->Reshape(num, channels, height, width); } else { - const int shape_index = (param.shape_size() == 1) ? 0 : i; + const int_tp shape_index = (param.shape_size() == 1) ? 0 : i; top[i]->Reshape(param.shape(shape_index)); } } @@ -93,7 +93,7 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, this->Forward(bottom, top); // Invert the inverted refill_ values to refill the desired (non-constant) // Blobs in every usual forward pass. - for (int i = 0; i < refill_.size(); ++i) { + for (int_tp i = 0; i < refill_.size(); ++i) { refill_[i] = !refill_[i]; } } @@ -101,8 +101,8 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, template void DummyDataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - for (int i = 0; i < top.size(); ++i) { - const int filler_id = (fillers_.size() > 1) ? i : 0; + for (int_tp i = 0; i < top.size(); ++i) { + const int_tp filler_id = (fillers_.size() > 1) ? i : 0; if (refill_[filler_id]) { fillers_[filler_id]->Fill(top[i]); } diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index e2e0be79587..26ed9d93dc3 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -21,7 +21,7 @@ void EltwiseLayer::LayerSetUp(const vector*>& bottom, // Blob-wise coefficients for the elementwise operation. coeffs_ = vector(bottom.size(), 1); if (this->layer_param().eltwise_param().coeff_size()) { - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { coeffs_[i] = this->layer_param().eltwise_param().coeff(i); } } @@ -31,7 +31,7 @@ void EltwiseLayer::LayerSetUp(const vector*>& bottom, template void EltwiseLayer::Reshape(const vector*>& bottom, const vector*>& top) { - for (int i = 1; i < bottom.size(); ++i) { + for (int_tp i = 1; i < bottom.size(); ++i) { CHECK(bottom[i]->shape() == bottom[0]->shape()); } top[0]->ReshapeLike(*bottom[0]); @@ -45,34 +45,34 @@ void EltwiseLayer::Reshape(const vector*>& bottom, template void EltwiseLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { - int* mask = NULL; + int_tp* mask = NULL; const Dtype* bottom_data_a = NULL; const Dtype* bottom_data_b = NULL; - const int count = top[0]->count(); + const int_tp count = top[0]->count(); Dtype* top_data = top[0]->mutable_cpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data); - for (int i = 2; i < bottom.size(); ++i) { + for (int_tp i = 2; i < bottom.size(); ++i) { caffe_mul(count, top_data, bottom[i]->cpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_set(count, Dtype(0), top_data); // TODO(shelhamer) does BLAS optimize to sum for coeff = 1? - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { caffe_axpy(count, coeffs_[i], bottom[i]->cpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: // Initialize mask = max_idx_.mutable_cpu_data(); - caffe_set(count, -1, mask); + caffe_set(count, -1L, mask); caffe_set(count, Dtype(-FLT_MAX), top_data); // bottom 0 & 1 bottom_data_a = bottom[0]->cpu_data(); bottom_data_b = bottom[1]->cpu_data(); - for (int idx = 0; idx < count; ++idx) { + for (int_tp idx = 0; idx < count; ++idx) { if (bottom_data_a[idx] > bottom_data_b[idx]) { top_data[idx] = bottom_data_a[idx]; // maxval mask[idx] = 0; // maxid @@ -82,9 +82,9 @@ void EltwiseLayer::Forward_cpu( } } // bottom 2++ - for (int blob_idx = 2; blob_idx < bottom.size(); ++blob_idx) { + for (int_tp blob_idx = 2; blob_idx < bottom.size(); ++blob_idx) { bottom_data_b = bottom[blob_idx]->cpu_data(); - for (int idx = 0; idx < count; ++idx) { + for (int_tp idx = 0; idx < count; ++idx) { if (bottom_data_b[idx] > top_data[idx]) { top_data[idx] = bottom_data_b[idx]; // maxval mask[idx] = blob_idx; // maxid @@ -100,11 +100,11 @@ void EltwiseLayer::Forward_cpu( template void EltwiseLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - const int* mask = NULL; - const int count = top[0]->count(); + const int_tp* mask = NULL; + const int_tp count = top[0]->count(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); @@ -112,7 +112,7 @@ void EltwiseLayer::Backward_cpu(const vector*>& top, case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; - for (int j = 0; j < bottom.size(); ++j) { + for (int_tp j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_cpu_copy(count, bottom[j]->cpu_data(), bottom_diff); @@ -136,7 +136,7 @@ void EltwiseLayer::Backward_cpu(const vector*>& top, break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.cpu_data(); - for (int index = 0; index < count; ++index) { + for (int_tp index = 0; index < count; ++index) { Dtype gradient = 0; if (mask[index] == i) { gradient += top_diff[index]; diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index fd8b0f73552..bb5e8b3335b 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -14,12 +14,12 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, - const Dtype* bottom_data_b, const int blob_idx, - Dtype* top_data, int* mask) { +__global__ void MaxForward(const int_tp nthreads, const Dtype* bottom_data_a, + const Dtype* bottom_data_b, const int_tp blob_idx, + Dtype* top_data, int_tp* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; - int maxidx = -1; + int_tp maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { @@ -41,8 +41,8 @@ __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, template void EltwiseLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - int* mask = NULL; - const int count = top[0]->count(); + int_tp* mask = NULL; + const int_tp count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { @@ -51,14 +51,14 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); - for (int i = 2; i < bottom.size(); ++i) { + for (int_tp i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; @@ -69,7 +69,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, CAFFE_CUDA_NUM_THREADS)( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); - for (int i = 2; i < bottom.size(); ++i) { + for (int_tp i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( @@ -94,7 +94,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, count, (cl_mem)(bottom[0]->gpu_data()), 0, (cl_mem)(bottom[1]->gpu_data()), 0, (cl_mem)top_data, 0); - for (int i = 2; i < bottom.size(); ++i) { + for (int_tp i = 2; i < bottom.size(); ++i) { greentea_gpu_mul(this->device_->id(), count, (cl_mem)top_data, 0, (cl_mem)(bottom[i]->gpu_data()), 0, @@ -105,7 +105,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, case EltwiseParameter_EltwiseOp_SUM: { greentea_gpu_set(this->device_->id(), count, 0, (cl_mem)top_data, 0); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { greentea_gpu_axpy(this->device_->id(), count, coeffs_[i], (cl_mem)(bottom[i]->gpu_data()), @@ -127,7 +127,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); - for (int i = 2; i < bottom.size(); ++i) { + for (int_tp i = 2; i < bottom.size(); ++i) { viennacl::ocl::enqueue( oclk_max_forward(count, WrapHandle((cl_mem)(top_data), &ctx), WrapHandle((cl_mem)(bottom[i]->gpu_data()), &ctx), i-1, @@ -147,8 +147,8 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void MaxBackward(const int nthreads, const Dtype* top_diff, - const int blob_idx, const int* mask, +__global__ void MaxBackward(const int_tp nthreads, const Dtype* top_diff, + const int_tp blob_idx, const int_tp* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; @@ -164,14 +164,14 @@ template void EltwiseLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - const int* mask = NULL; - const int count = top[0]->count(); + const int_tp* mask = NULL; + const int_tp count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); @@ -179,7 +179,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; - for (int j = 0; j < bottom.size(); ++j) { + for (int_tp j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } @@ -223,7 +223,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); @@ -231,7 +231,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, case EltwiseParameter_EltwiseOp_PROD: { if (stable_prod_grad_) { bool initialized = false; - for (int j = 0; j < bottom.size(); ++j) { + for (int_tp j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } diff --git a/src/caffe/layers/embed_layer.cpp b/src/caffe/layers/embed_layer.cpp index e4891f3f148..aa8006e509c 100644 --- a/src/caffe/layers/embed_layer.cpp +++ b/src/caffe/layers/embed_layer.cpp @@ -28,7 +28,7 @@ void EmbedLayer::LayerSetUp(const vector*>& bottom, } // Initialize the weights -- // transposed from InnerProductLayer for spatial locality. - vector weight_shape(2); + vector weight_shape(2); weight_shape[0] = K_; weight_shape[1] = N_; this->blobs_[0].reset(new Blob(weight_shape)); @@ -38,7 +38,7 @@ void EmbedLayer::LayerSetUp(const vector*>& bottom, weight_filler->Fill(this->blobs_[0].get()); // If necessary, initialize and fill the bias term if (bias_term_) { - vector bias_shape(1, N_); + vector bias_shape(1, N_); this->blobs_[1].reset(new Blob(bias_shape)); shared_ptr > bias_filler(GetFiller( this->layer_param_.embed_param().bias_filler())); @@ -53,12 +53,12 @@ void EmbedLayer::Reshape(const vector*>& bottom, const vector*>& top) { // Figure out the dimensions M_ = bottom[0]->count(); - vector top_shape = bottom[0]->shape(); + vector top_shape = bottom[0]->shape(); top_shape.push_back(N_); top[0]->Reshape(top_shape); // Set up the bias multiplier if (bias_term_) { - vector bias_shape(1, M_); + vector bias_shape(1, M_); bias_multiplier_.Reshape(bias_shape); caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data()); } @@ -70,9 +70,9 @@ void EmbedLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - int index; - for (int n = 0; n < M_; ++n) { - index = static_cast(bottom_data[n]); + int_tp index; + for (int_tp n = 0; n < M_; ++n) { + index = static_cast(bottom_data[n]); DCHECK_GE(index, 0); DCHECK_LT(index, K_); DCHECK_EQ(static_cast(index), bottom_data[n]) << "non-integer input"; @@ -94,9 +94,9 @@ void EmbedLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - int index; - for (int n = 0; n < M_; ++n) { - index = static_cast(bottom_data[n]); + int_tp index; + for (int_tp n = 0; n < M_; ++n) { + index = static_cast(bottom_data[n]); DCHECK_GE(index, 0); DCHECK_LT(index, K_); DCHECK_EQ(static_cast(index), bottom_data[n]) diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu index 604b0d3c3aa..5714de0b7e0 100644 --- a/src/caffe/layers/embed_layer.cu +++ b/src/caffe/layers/embed_layer.cu @@ -20,27 +20,27 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void EmbedForward(const int nthreads, const Dtype* bottom_data, - const Dtype* weight, const int M, const int N, const int K, +__global__ void EmbedForward(const int_tp nthreads, const Dtype* bottom_data, + const Dtype* weight, const int_tp M, const int_tp N, const int_tp K, Dtype* top_data) { CUDA_KERNEL_LOOP(top_index, nthreads) { - const int n = top_index / N; - const int d = top_index % N; - const int index = static_cast(bottom_data[n]); - const int weight_index = index * N + d; + const int_tp n = top_index / N; + const int_tp d = top_index % N; + const int_tp index = static_cast(bottom_data[n]); + const int_tp weight_index = index * N + d; top_data[top_index] = weight[weight_index]; } } template -__global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data, - const Dtype* top_diff, const int M, const int N, const int K, +__global__ void EmbedBackward(const int_tp nthreads, const Dtype* bottom_data, + const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K, Dtype* weight_diff) { CUDA_KERNEL_LOOP(top_index, nthreads) { - const int n = top_index / N; - const int d = top_index % N; - const int index = static_cast(bottom_data[n]); - const int weight_index = index * N + d; + const int_tp n = top_index / N; + const int_tp d = top_index % N; + const int_tp index = static_cast(bottom_data[n]); + const int_tp weight_index = index * N + d; caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index); } } @@ -52,7 +52,7 @@ void EmbedLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); - const int count = top[0]->count(); + const int_tp count = top[0]->count(); if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -97,7 +97,7 @@ void EmbedLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input."; if (this->param_propagate_down_[0]) { - const int top_count = top[0]->count(); + const int_tp top_count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 80efa31b22c..3177475eabd 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -19,7 +19,7 @@ void EuclideanLossLayer::Reshape( template void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - int count = bottom[0]->count(); + int_tp count = bottom[0]->count(); caffe_sub( count, bottom[0]->cpu_data(), @@ -33,7 +33,7 @@ void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, template void EuclideanLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index 6311518ad11..72f839adde1 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -15,7 +15,7 @@ namespace caffe { template void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - int count = bottom[0]->count(); + int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -46,7 +46,7 @@ template void EuclideanLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); diff --git a/src/caffe/layers/exp_layer.cpp b/src/caffe/layers/exp_layer.cpp index c7e7c60cfad..dd27799555f 100644 --- a/src/caffe/layers/exp_layer.cpp +++ b/src/caffe/layers/exp_layer.cpp @@ -31,7 +31,7 @@ void ExpLayer::LayerSetUp(const vector*>& bottom, template void ExpLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); if (inner_scale_ == Dtype(1)) { @@ -49,7 +49,7 @@ template void ExpLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); diff --git a/src/caffe/layers/exp_layer.cu b/src/caffe/layers/exp_layer.cu index 85bdf2b67fc..c9e5894b24c 100644 --- a/src/caffe/layers/exp_layer.cu +++ b/src/caffe/layers/exp_layer.cu @@ -10,7 +10,7 @@ namespace caffe { template void ExpLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); @@ -54,7 +54,7 @@ void ExpLayer::Backward_gpu(const vector*>& top, if (!propagate_down[0]) { return; } - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index 7a2d91fbe19..9b087740673 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -19,12 +19,12 @@ void FilterLayer::Reshape(const vector*>& bottom, const vector*>& top) { // bottom[0...k-1] are the blobs to filter // bottom[last] is the "selector_blob" - int selector_index = bottom.size() - 1; - for (int i = 1; i < bottom[selector_index]->num_axes(); ++i) { + int_tp selector_index = bottom.size() - 1; + for (int_tp i = 1; i < bottom[selector_index]->num_axes(); ++i) { CHECK_EQ(bottom[selector_index]->shape(i), 1) << "Selector blob dimensions must be singletons (1), except the first"; } - for (int i = 0; i < bottom.size() - 1; ++i) { + for (int_tp i = 0; i < bottom.size() - 1; ++i) { CHECK_EQ(bottom[selector_index]->shape(0), bottom[i]->shape(0)) << "Each bottom should have the same 0th dimension as the selector blob"; } @@ -35,7 +35,7 @@ void FilterLayer::Reshape(const vector*>& bottom, // look for non-zero elements in bottom[0]. Items of each bottom that // have the same index as the items in bottom[0] with value == non-zero // will be forwarded - for (int item_id = 0; item_id < bottom[selector_index]->shape(0); ++item_id) { + for (int_tp item_id = 0; item_id < bottom[selector_index]->shape(0); ++item_id) { // we don't need an offset because item size == 1 const Dtype* tmp_data_selector = bottom_data_selector + item_id; if (*tmp_data_selector) { @@ -43,17 +43,17 @@ void FilterLayer::Reshape(const vector*>& bottom, } } // only filtered items will be forwarded - int new_tops_num = indices_to_forward_.size(); + int_tp new_tops_num = indices_to_forward_.size(); // init if (first_reshape_) { new_tops_num = bottom[0]->shape(0); first_reshape_ = false; } - for (int t = 0; t < top.size(); ++t) { - int num_axes = bottom[t]->num_axes(); - vector shape_top(num_axes); + for (int_tp t = 0; t < top.size(); ++t) { + int_tp num_axes = bottom[t]->num_axes(); + vector shape_top(num_axes); shape_top[0] = new_tops_num; - for (int ts = 1; ts < num_axes; ++ts) + for (int_tp ts = 1; ts < num_axes; ++ts) shape_top[ts] = bottom[t]->shape(ts); top[t]->Reshape(shape_top); } @@ -62,15 +62,15 @@ void FilterLayer::Reshape(const vector*>& bottom, template void FilterLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - int new_tops_num = indices_to_forward_.size(); + int_tp new_tops_num = indices_to_forward_.size(); // forward all filtered items for all bottoms but the Selector (bottom[last]) - for (int t = 0; t < top.size(); ++t) { + for (int_tp t = 0; t < top.size(); ++t) { const Dtype* bottom_data = bottom[t]->cpu_data(); Dtype* top_data = top[t]->mutable_cpu_data(); - int dim = bottom[t]->count() / bottom[t]->shape(0); - for (int n = 0; n < new_tops_num; ++n) { - int data_offset_top = n * dim; - int data_offset_bottom = indices_to_forward_[n] * bottom[t]->count(1); + int_tp dim = bottom[t]->count() / bottom[t]->shape(0); + for (int_tp n = 0; n < new_tops_num; ++n) { + int_tp data_offset_top = n * dim; + int_tp data_offset_bottom = indices_to_forward_[n] * bottom[t]->count(1); caffe_cpu_copy(dim, bottom_data + data_offset_bottom, top_data + data_offset_top); } @@ -84,16 +84,16 @@ void FilterLayer::Backward_cpu(const vector*>& top, LOG(FATAL) << this->type() << "Layer cannot backpropagate to filter index inputs"; } - for (int i = 0; i < top.size(); i++) { + for (int_tp i = 0; i < top.size(); i++) { // bottom[last] is the selector and never needs backpropagation // so we can iterate over top vector because top.size() == bottom.size() -1 if (propagate_down[i]) { - const int dim = top[i]->count() / top[i]->shape(0); - int next_to_backward_offset = 0; - int batch_offset = 0; - int data_offset_bottom = 0; - int data_offset_top = 0; - for (int n = 0; n < bottom[i]->shape(0); n++) { + const int_tp dim = top[i]->count() / top[i]->shape(0); + int_tp next_to_backward_offset = 0; + int_tp batch_offset = 0; + int_tp data_offset_bottom = 0; + int_tp data_offset_top = 0; + for (int_tp n = 0; n < bottom[i]->shape(0); n++) { data_offset_bottom = n * dim; if (next_to_backward_offset >= indices_to_forward_.size()) { // we already visited all items that were been forwarded, so diff --git a/src/caffe/layers/filter_layer.cu b/src/caffe/layers/filter_layer.cu index eecb8a5afc9..651961752be 100644 --- a/src/caffe/layers/filter_layer.cu +++ b/src/caffe/layers/filter_layer.cu @@ -9,15 +9,15 @@ namespace caffe { template void FilterLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - int new_tops_num = indices_to_forward_.size(); + int_tp new_tops_num = indices_to_forward_.size(); // forward all filtered items for all bottoms but the Selector (bottom[last]) - for (int t = 0; t < top.size(); ++t) { + for (int_tp t = 0; t < top.size(); ++t) { const Dtype* bottom_data = bottom[t]->gpu_data(); Dtype* top_data = top[t]->mutable_gpu_data(); - int dim = bottom[t]->count() / bottom[t]->shape(0); - for (int n = 0; n < new_tops_num; ++n) { - int data_offset_top = n * dim; - int data_offset_bottom = indices_to_forward_[n] * dim; + int_tp dim = bottom[t]->count() / bottom[t]->shape(0); + for (int_tp n = 0; n < new_tops_num; ++n) { + int_tp data_offset_top = n * dim; + int_tp data_offset_bottom = indices_to_forward_[n] * dim; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -45,19 +45,19 @@ void FilterLayer::Backward_gpu(const vector*>& top, LOG(FATAL)<< this->type() << "Layer cannot backpropagate to filter index inputs"; } - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { // bottom[last] is the selector and never needs backpropagation // so we can iterate over top vector because top.size() == bottom.size() -1 if (propagate_down[i]) { - const int dim = top[i]->count() / top[i]->shape(0); - int next_to_backward_offset = 0; - int batch_offset = 0; - int data_offset_bottom = 0; - int data_offset_top = 0; + const int_tp dim = top[i]->count() / top[i]->shape(0); + int_tp next_to_backward_offset = 0; + int_tp batch_offset = 0; + int_tp data_offset_bottom = 0; + int_tp data_offset_top = 0; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - for (int n = 0; n < bottom[i]->shape(0); ++n) { + for (int_tp n = 0; n < bottom[i]->shape(0); ++n) { if (next_to_backward_offset >= indices_to_forward_.size()) { // we already visited all items that were been forwarded, so // just set to zero remaining ones @@ -84,7 +84,7 @@ void FilterLayer::Backward_gpu(const vector*>& top, viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - for (int n = 0; n < bottom[i]->shape(0); ++n) { + for (int_tp n = 0; n < bottom[i]->shape(0); ++n) { if (next_to_backward_offset >= indices_to_forward_.size()) { // we already visited all items that were been forwarded, so // just set to zero remaining ones diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index f7e5c9c2172..f56335d15cb 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,17 +9,17 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int start_axis = bottom[0]->CanonicalAxisIndex( + const int_tp start_axis = bottom[0]->CanonicalAxisIndex( this->layer_param_.flatten_param().axis()); - const int end_axis = bottom[0]->CanonicalAxisIndex( + const int_tp end_axis = bottom[0]->CanonicalAxisIndex( this->layer_param_.flatten_param().end_axis()); - vector top_shape; - for (int i = 0; i < start_axis; ++i) { + vector top_shape; + for (int_tp i = 0; i < start_axis; ++i) { top_shape.push_back(bottom[0]->shape(i)); } - const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); + const int_tp flattened_dim = bottom[0]->count(start_axis, end_axis + 1); top_shape.push_back(flattened_dim); - for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { + for (int_tp i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { top_shape.push_back(bottom[0]->shape(i)); } top[0]->Reshape(top_shape); diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 714c6add955..7a51ec4eeb9 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -32,13 +32,13 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { LOG(FATAL) << "Failed opening HDF5 file: " << filename; } - int top_size = this->layer_param_.top_size(); + int_tp top_size = this->layer_param_.top_size(); hdf_blobs_.resize(top_size); - const int MIN_DATA_DIM = 1; - const int MAX_DATA_DIM = INT_MAX; + const int_tp MIN_DATA_DIM = 1; + const int_tp MAX_DATA_DIM = INT_MAX; - for (int i = 0; i < top_size; ++i) { + for (int_tp i = 0; i < top_size; ++i) { hdf_blobs_[i] = shared_ptr >(new Blob()); hdf5_load_nd_dataset(file_id, this->layer_param_.top(i).c_str(), MIN_DATA_DIM, MAX_DATA_DIM, hdf_blobs_[i].get()); @@ -49,14 +49,14 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { // MinTopBlobs==1 guarantees at least one top blob CHECK_GE(hdf_blobs_[0]->num_axes(), 1) << "Input must have at least 1 axis."; - const int num = hdf_blobs_[0]->shape(0); - for (int i = 1; i < top_size; ++i) { + const int_tp num = hdf_blobs_[0]->shape(0); + for (int_tp i = 1; i < top_size; ++i) { CHECK_EQ(hdf_blobs_[i]->shape(0), num); } // Default to identity permutation. data_permutation_.clear(); data_permutation_.resize(hdf_blobs_[0]->shape(0)); - for (int i = 0; i < hdf_blobs_[0]->shape(0); i++) + for (int_tp i = 0; i < hdf_blobs_[0]->shape(0); i++) data_permutation_[i] = i; // Shuffle if needed. @@ -98,7 +98,7 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, file_permutation_.clear(); file_permutation_.resize(num_files_); // Default to identity permutation. - for (int i = 0; i < num_files_; i++) { + for (int_tp i = 0; i < num_files_; i++) { file_permutation_[i] = i; } @@ -112,13 +112,13 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, current_row_ = 0; // Reshape blobs. - const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - const int top_size = this->layer_param_.top_size(); - vector top_shape; - for (int i = 0; i < top_size; ++i) { + const int_tp batch_size = this->layer_param_.hdf5_data_param().batch_size(); + const int_tp top_size = this->layer_param_.top_size(); + vector top_shape; + for (int_tp i = 0; i < top_size; ++i) { top_shape.resize(hdf_blobs_[i]->num_axes()); top_shape[0] = batch_size; - for (int j = 1; j < top_shape.size(); ++j) { + for (int_tp j = 1; j < top_shape.size(); ++j) { top_shape[j] = hdf_blobs_[i]->shape(j); } top[i]->Reshape(top_shape); @@ -128,8 +128,8 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, template void HDF5DataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - for (int i = 0; i < batch_size; ++i, ++current_row_) { + const int_tp batch_size = this->layer_param_.hdf5_data_param().batch_size(); + for (int_tp i = 0; i < batch_size; ++i, ++current_row_) { if (current_row_ == hdf_blobs_[0]->shape(0)) { if (num_files_ > 1) { ++current_file_; @@ -148,8 +148,8 @@ void HDF5DataLayer::Forward_cpu(const vector*>& bottom, if (this->layer_param_.hdf5_data_param().shuffle()) std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); } - for (int j = 0; j < this->layer_param_.top_size(); ++j) { - int data_dim = top[j]->count() / top[j]->shape(0); + for (int_tp j = 0; j < this->layer_param_.top_size(); ++j) { + int_tp data_dim = top[j]->count() / top[j]->shape(0); caffe_cpu_copy(data_dim, &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] * data_dim], &top[j]->mutable_cpu_data()[i * data_dim]); diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 6cd9bbe9302..a35bd8bdcdc 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -21,8 +21,8 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - for (int i = 0; i < batch_size; ++i, ++current_row_) { + const int_tp batch_size = this->layer_param_.hdf5_data_param().batch_size(); + for (int_tp i = 0; i < batch_size; ++i, ++current_row_) { if (current_row_ == hdf_blobs_[0]->shape(0)) { if (num_files_ > 1) { current_file_ += 1; @@ -42,8 +42,8 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); } - for (int j = 0; j < this->layer_param_.top_size(); ++j) { - int data_dim = top[j]->count() / top[j]->shape(0); + for (int_tp j = 0; j < this->layer_param_.top_size(); ++j) { + int_tp data_dim = top[j]->count() / top[j]->shape(0); caffe_copy( data_dim, &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index 9e8b5abba18..5b7e56c2b87 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -49,10 +49,10 @@ void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, bottom[0]->height(), bottom[0]->width()); label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), bottom[1]->height(), bottom[1]->width()); - const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); - const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + const int_tp data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int_tp label_datum_dim = bottom[1]->count() / bottom[1]->num(); - for (int i = 0; i < bottom[0]->num(); ++i) { + for (int_tp i = 0; i < bottom[0]->num(); ++i) { caffe_cpu_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim], &data_blob_.mutable_cpu_data()[i * data_datum_dim]); caffe_cpu_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim], diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index b888c46f7e9..5b744c7788d 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -21,10 +21,10 @@ void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, bottom[0]->height(), bottom[0]->width()); label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), bottom[1]->height(), bottom[1]->width()); - const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); - const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + const int_tp data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int_tp label_datum_dim = bottom[1]->count() / bottom[1]->num(); - for (int i = 0; i < bottom[0]->num(); ++i) { + for (int_tp i = 0; i < bottom[0]->num(); ++i) { caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], &data_blob_.mutable_cpu_data()[i * data_datum_dim]); caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], diff --git a/src/caffe/layers/hinge_loss_layer.cpp b/src/caffe/layers/hinge_loss_layer.cpp index ab94b84ccc4..e3fcdc7f77c 100644 --- a/src/caffe/layers/hinge_loss_layer.cpp +++ b/src/caffe/layers/hinge_loss_layer.cpp @@ -16,16 +16,16 @@ void HingeLossLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* label = bottom[1]->cpu_data(); - int num = bottom[0]->num(); - int count = bottom[0]->count(); - int dim = count / num; + int_tp num = bottom[0]->num(); + int_tp count = bottom[0]->count(); + int_tp dim = count / num; caffe_cpu_copy(count, bottom_data, bottom_diff); - for (int i = 0; i < num; ++i) { - bottom_diff[i * dim + static_cast(label[i])] *= -1; + for (int_tp i = 0; i < num; ++i) { + bottom_diff[i * dim + static_cast(label[i])] *= -1; } - for (int i = 0; i < num; ++i) { - for (int j = 0; j < dim; ++j) { + for (int_tp i = 0; i < num; ++i) { + for (int_tp j = 0; j < dim; ++j) { bottom_diff[i * dim + j] = std::max( Dtype(0), 1 + bottom_diff[i * dim + j]); } @@ -53,12 +53,12 @@ void HingeLossLayer::Backward_cpu(const vector*>& top, if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* label = bottom[1]->cpu_data(); - int num = bottom[0]->num(); - int count = bottom[0]->count(); - int dim = count / num; + int_tp num = bottom[0]->num(); + int_tp count = bottom[0]->count(); + int_tp dim = count / num; - for (int i = 0; i < num; ++i) { - bottom_diff[i * dim + static_cast(label[i])] *= -1; + for (int_tp i = 0; i < num; ++i) { + bottom_diff[i * dim + static_cast(label[i])] *= -1; } const Dtype loss_weight = top[0]->cpu_diff()[0]; diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 595c9dbbe5e..68905e5442c 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -12,15 +12,15 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { ConvolutionParameter conv_param = this->layer_param_.convolution_param(); force_nd_im2col_ = conv_param.force_nd_im2col(); - const int input_num_dims = bottom[0]->shape().size(); + const int_tp input_num_dims = bottom[0]->shape().size(); channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); - const int first_spatial_dim = channel_axis_ + 1; + const int_tp first_spatial_dim = channel_axis_ + 1; num_spatial_axes_ = input_num_dims - first_spatial_dim; CHECK_GE(num_spatial_axes_, 1); - vector dim_blob_shape(1, num_spatial_axes_); + vector dim_blob_shape(1, num_spatial_axes_); // Setup filter kernel dimensions (kernel_shape_). kernel_shape_.Reshape(dim_blob_shape); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { CHECK_EQ(num_spatial_axes_, 2) << "kernel_h & kernel_w can only be used for 2D convolution."; @@ -29,22 +29,22 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, kernel_shape_data[0] = conv_param.kernel_h(); kernel_shape_data[1] = conv_param.kernel_w(); } else { - const int num_kernel_dims = conv_param.kernel_size_size(); + const int_tp num_kernel_dims = conv_param.kernel_size_size(); CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) << "kernel_size must be specified once, or once per spatial dimension " << "(kernel_size specified " << num_kernel_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { kernel_shape_data[i] = conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); } } - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; } // Setup stride dimensions (stride_). stride_.Reshape(dim_blob_shape); - int* stride_data = stride_.mutable_cpu_data(); + int_tp* stride_data = stride_.mutable_cpu_data(); if (conv_param.has_stride_h() || conv_param.has_stride_w()) { CHECK_EQ(num_spatial_axes_, 2) << "stride_h & stride_w can only be used for 2D convolution."; @@ -53,14 +53,14 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, stride_data[0] = conv_param.stride_h(); stride_data[1] = conv_param.stride_w(); } else { - const int num_stride_dims = conv_param.stride_size(); + const int_tp num_stride_dims = conv_param.stride_size(); CHECK(num_stride_dims == 0 || num_stride_dims == 1 || num_stride_dims == num_spatial_axes_) << "stride must be specified once, or once per spatial dimension " << "(stride specified " << num_stride_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultStride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultStride = 1; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : conv_param.stride((num_stride_dims == 1) ? 0 : i); CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; @@ -68,7 +68,7 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, } // Setup pad dimensions (pad_). pad_.Reshape(dim_blob_shape); - int* pad_data = pad_.mutable_cpu_data(); + int_tp* pad_data = pad_.mutable_cpu_data(); if (conv_param.has_pad_h() || conv_param.has_pad_w()) { CHECK_EQ(num_spatial_axes_, 2) << "pad_h & pad_w can only be used for 2D convolution."; @@ -77,14 +77,14 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, pad_data[0] = conv_param.pad_h(); pad_data[1] = conv_param.pad_w(); } else { - const int num_pad_dims = conv_param.pad_size(); + const int_tp num_pad_dims = conv_param.pad_size(); CHECK(num_pad_dims == 0 || num_pad_dims == 1 || num_pad_dims == num_spatial_axes_) << "pad must be specified once, or once per spatial dimension " << "(pad specified " << num_pad_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultPad = 0; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultPad = 0; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : conv_param.pad((num_pad_dims == 1) ? 0 : i); } @@ -94,14 +94,14 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, template void Im2colLayer::Reshape(const vector*>& bottom, const vector*>& top) { - vector top_shape = bottom[0]->shape(); - const int* kernel_shape_data = kernel_shape_.cpu_data(); - const int* stride_data = stride_.cpu_data(); - const int* pad_data = pad_.cpu_data(); - for (int i = 0; i < num_spatial_axes_; ++i) { + vector top_shape = bottom[0]->shape(); + const int_tp* kernel_shape_data = kernel_shape_.cpu_data(); + const int_tp* stride_data = stride_.cpu_data(); + const int_tp* pad_data = pad_.cpu_data(); + for (int_tp i = 0; i < num_spatial_axes_; ++i) { top_shape[channel_axis_] *= kernel_shape_data[i]; - const int input_dim = bottom[0]->shape(channel_axis_ + i + 1); - const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) + const int_tp input_dim = bottom[0]->shape(channel_axis_ + i + 1); + const int_tp output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) / stride_data[i] + 1; top_shape[channel_axis_ + i + 1] = output_dim; } @@ -118,7 +118,7 @@ void Im2colLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - for (int n = 0; n < num_; ++n) { + for (int_tp n = 0; n < num_; ++n) { DCHECK_EQ(bottom[0]->shape().size() - channel_axis_, num_spatial_axes_ + 1); DCHECK_EQ(top[0]->shape().size() - channel_axis_, num_spatial_axes_ + 1); DCHECK_EQ(kernel_shape_.count(), num_spatial_axes_); @@ -147,7 +147,7 @@ void Im2colLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - for (int n = 0; n < num_; ++n) { + for (int_tp n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { col2im_cpu(top_diff + n * top_dim_, channels_, bottom[0]->shape(channel_axis_ + 1), diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 1f326604d6a..0c54394cbb4 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -18,11 +18,11 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int num_kernels = channels_ * top[0]->count(channel_axis_ + 1); + const int_tp num_kernels = channels_ * top[0]->count(channel_axis_ + 1); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - for (int n = 0; n < num_; ++n) { + for (int_tp n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { im2col_gpu(bottom_data + n * bottom_dim_, channels_, bottom[0]->shape(channel_axis_ + 1), @@ -47,7 +47,7 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); - for (int n = 0; n < num_; ++n) { + for (int_tp n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { greentea_im2col_gpu(&program, &ctx, (cl_mem) bottom_data, n * bottom_dim_, channels_, @@ -83,7 +83,7 @@ void Im2colLayer::Backward_gpu(const vector*>& top, if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - for (int n = 0; n < num_; ++n) { + for (int_tp n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { col2im_gpu(top_diff + n * top_dim_, channels_, bottom[0]->shape(channel_axis_ + 1), @@ -109,7 +109,7 @@ void Im2colLayer::Backward_gpu(const vector*>& top, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); - for (int n = 0; n < top[0]->num(); ++n) { + for (int_tp n = 0; n < top[0]->num(); ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { greentea_col2im_gpu(&program, &ctx, (cl_mem) top_diff, n * top_dim_, channels_, diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 3d2190f8bbb..35dd5c399b8 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -24,8 +24,8 @@ ImageDataLayer::~ImageDataLayer() { template void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const vector*>& top) { - const int new_height = this->layer_param_.image_data_param().new_height(); - const int new_width = this->layer_param_.image_data_param().new_width(); + const int_tp new_height = this->layer_param_.image_data_param().new_height(); + const int_tp new_width = this->layer_param_.image_data_param().new_width(); const bool is_color = this->layer_param_.image_data_param().is_color(); string root_folder = this->layer_param_.image_data_param().root_folder(); @@ -37,7 +37,7 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, LOG(INFO) << "Opening file " << source; std::ifstream infile(source.c_str()); string filename; - int label; + int_tp label; while (infile >> filename >> label) { lines_.push_back(std::make_pair(filename, label)); } @@ -45,7 +45,7 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, if (this->layer_param_.image_data_param().shuffle()) { // randomly shuffle data LOG(INFO) << "Shuffling data"; - const unsigned int prefetch_rng_seed = caffe_rng_rand(); + const uint_tp prefetch_rng_seed = caffe_rng_rand(); prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); ShuffleImages(); } @@ -54,7 +54,7 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, lines_id_ = 0; // Check if we would need to randomly skip a few data points if (this->layer_param_.image_data_param().rand_skip()) { - unsigned int skip = caffe_rng_rand() % + uint_tp skip = caffe_rng_rand() % this->layer_param_.image_data_param().rand_skip(); LOG(INFO) << "Skipping first " << skip << " data points."; CHECK_GT(lines_.size(), skip) << "Not enough points to skip"; @@ -65,13 +65,13 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, new_height, new_width, is_color); CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; // Use data_transformer to infer the expected blob shape from a cv_image. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); this->transformed_data_.Reshape(top_shape); // Reshape prefetch_data and top[0] according to the batch_size. - const int batch_size = this->layer_param_.image_data_param().batch_size(); + const int_tp batch_size = this->layer_param_.image_data_param().batch_size(); CHECK_GT(batch_size, 0) << "Positive batch size required"; top_shape[0] = batch_size; - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < this->PREFETCH_COUNT; ++i) { this->prefetch_[i].data_.Reshape(top_shape); } top[0]->Reshape(top_shape); @@ -80,9 +80,9 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label - vector label_shape(1, batch_size); + vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < this->PREFETCH_COUNT; ++i) { this->prefetch_[i].label_.Reshape(label_shape); } } @@ -105,9 +105,9 @@ void ImageDataLayer::load_batch(Batch* batch) { CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); ImageDataParameter image_data_param = this->layer_param_.image_data_param(); - const int batch_size = image_data_param.batch_size(); - const int new_height = image_data_param.new_height(); - const int new_width = image_data_param.new_width(); + const int_tp batch_size = image_data_param.batch_size(); + const int_tp new_height = image_data_param.new_height(); + const int_tp new_width = image_data_param.new_width(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); @@ -117,7 +117,7 @@ void ImageDataLayer::load_batch(Batch* batch) { new_height, new_width, is_color); CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; // Use data_transformer to infer the expected blob shape from a cv_img. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); this->transformed_data_.Reshape(top_shape); // Reshape batch according to the batch_size. top_shape[0] = batch_size; @@ -127,8 +127,8 @@ void ImageDataLayer::load_batch(Batch* batch) { Dtype* prefetch_label = batch->label_.mutable_cpu_data(); // datum scales - const int lines_size = lines_.size(); - for (int item_id = 0; item_id < batch_size; ++item_id) { + const int_tp lines_size = lines_.size(); + for (int_tp item_id = 0; item_id < batch_size; ++item_id) { // get a blob timer.Start(); CHECK_GT(lines_size, lines_id_); @@ -138,7 +138,7 @@ void ImageDataLayer::load_batch(Batch* batch) { read_time += timer.MicroSeconds(); timer.Start(); // Apply transformations (mirror, crop...) to the image - int offset = batch->data_.offset(item_id); + int_tp offset = batch->data_.offset(item_id); this->transformed_data_.set_cpu_data(prefetch_data + offset); this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); trans_time += timer.MicroSeconds(); diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index a1e0b40de0e..20c2cb8a5f4 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -37,8 +37,8 @@ void InfogainLossLayer::Reshape( CHECK_EQ(bottom[1]->channels(), 1); CHECK_EQ(bottom[1]->height(), 1); CHECK_EQ(bottom[1]->width(), 1); - const int num = bottom[0]->num(); - const int dim = bottom[0]->count() / num; + const int_tp num = bottom[0]->num(); + const int_tp dim = bottom[0]->count() / num; CHECK_EQ(infogain->num(), 1); CHECK_EQ(infogain->channels(), 1); CHECK_EQ(infogain->height(), dim); @@ -57,12 +57,12 @@ void InfogainLossLayer::Forward_cpu(const vector*>& bottom, } else { infogain_mat = bottom[2]->cpu_data(); } - int num = bottom[0]->num(); - int dim = bottom[0]->count() / bottom[0]->num(); + int_tp num = bottom[0]->num(); + int_tp dim = bottom[0]->count() / bottom[0]->num(); Dtype loss = 0; - for (int i = 0; i < num; ++i) { - int label = static_cast(bottom_label[i]); - for (int j = 0; j < dim; ++j) { + for (int_tp i = 0; i < num; ++i) { + int_tp label = static_cast(bottom_label[i]); + for (int_tp j = 0; j < dim; ++j) { Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); loss -= infogain_mat[label * dim + j] * log(prob); } @@ -92,12 +92,12 @@ void InfogainLossLayer::Backward_cpu(const vector*>& top, infogain_mat = bottom[2]->cpu_data(); } Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - int num = bottom[0]->num(); - int dim = bottom[0]->count() / bottom[0]->num(); + int_tp num = bottom[0]->num(); + int_tp dim = bottom[0]->count() / bottom[0]->num(); const Dtype scale = - top[0]->cpu_diff()[0] / num; - for (int i = 0; i < num; ++i) { - const int label = static_cast(bottom_label[i]); - for (int j = 0; j < dim; ++j) { + for (int_tp i = 0; i < num; ++i) { + const int_tp label = static_cast(bottom_label[i]); + for (int_tp j = 0; j < dim; ++j) { Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); bottom_diff[i * dim + j] = scale * infogain_mat[label * dim + j] / prob; } diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index b20c92e7d5a..11d05358b5e 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -12,10 +12,10 @@ namespace caffe { template void InnerProductLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - const int num_output = this->layer_param_.inner_product_param().num_output(); + const int_tp num_output = this->layer_param_.inner_product_param().num_output(); bias_term_ = this->layer_param_.inner_product_param().bias_term(); N_ = num_output; - const int axis = bottom[0]->CanonicalAxisIndex( + const int_tp axis = bottom[0]->CanonicalAxisIndex( this->layer_param_.inner_product_param().axis()); // Dimensions starting from "axis" are "flattened" into a single // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W), @@ -31,7 +31,7 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, this->blobs_.resize(1); } // Intialize the weight - vector weight_shape(2); + vector weight_shape(2); weight_shape[0] = N_; weight_shape[1] = K_; this->blobs_[0].reset(new Blob(weight_shape, @@ -42,7 +42,7 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, weight_filler->Fill(this->blobs_[0].get()); // If necessary, intiialize and fill the bias term if (bias_term_) { - vector bias_shape(1, N_); + vector bias_shape(1, N_); this->blobs_[1].reset(new Blob(bias_shape, this->device_)); shared_ptr > bias_filler(GetFiller( this->layer_param_.inner_product_param().bias_filler())); @@ -56,9 +56,9 @@ template void InnerProductLayer::Reshape(const vector*>& bottom, const vector*>& top) { // Figure out the dimensions - const int axis = bottom[0]->CanonicalAxisIndex( + const int_tp axis = bottom[0]->CanonicalAxisIndex( this->layer_param_.inner_product_param().axis()); - const int new_K = bottom[0]->count(axis); + const int_tp new_K = bottom[0]->count(axis); CHECK_EQ(K_, new_K) << "Input size incompatible with inner product parameters."; // The first "axis" dimensions are independent inner products; the total @@ -66,13 +66,13 @@ void InnerProductLayer::Reshape(const vector*>& bottom, M_ = bottom[0]->count(0, axis); // The top shape will be the bottom shape with the flattened axes dropped, // and replaced by a single axis with dimension num_output (N_). - vector top_shape = bottom[0]->shape(); + vector top_shape = bottom[0]->shape(); top_shape.resize(axis + 1); top_shape[axis] = N_; top[0]->Reshape(top_shape); // Set up the bias multiplier if (bias_term_) { - vector bias_shape(1, M_); + vector bias_shape(1, M_); bias_multiplier_.Reshape(bias_shape); caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data()); } diff --git a/src/caffe/layers/log_layer.cpp b/src/caffe/layers/log_layer.cpp index 3ca25d0946f..43da9788371 100644 --- a/src/caffe/layers/log_layer.cpp +++ b/src/caffe/layers/log_layer.cpp @@ -35,7 +35,7 @@ void LogLayer::LayerSetUp(const vector*>& bottom, template void LogLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { @@ -59,7 +59,7 @@ template void LogLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); diff --git a/src/caffe/layers/log_layer.cu b/src/caffe/layers/log_layer.cu index cc5b5bab3cd..5a9a85b8037 100644 --- a/src/caffe/layers/log_layer.cu +++ b/src/caffe/layers/log_layer.cu @@ -10,7 +10,7 @@ namespace caffe { template void LogLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); @@ -69,7 +69,7 @@ void LogLayer::Backward_gpu(const vector*>& top, if (!propagate_down[0]) { return; } - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index 3496a5c2a8a..d67a2ef2125 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -24,7 +24,7 @@ void LossLayer::Reshape( const vector*>& bottom, const vector*>& top) { CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; - vector loss_shape(0); // Loss layers output a scalar; 0 axes. + vector loss_shape(0); // Loss layers output a scalar; 0 axes. top[0]->Reshape(loss_shape); } diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 97d86681873..e6ac8abe52c 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -112,7 +112,7 @@ void LRNLayer::CrossChannelForward_cpu( Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); // start with the constant value - for (int i = 0; i < scale_.count(); ++i) { + for (int_tp i = 0; i < scale_.count(); ++i) { scale_data[i] = k_; } Blob padded_square(1, channels_ + size_ - 1, height_, width_, @@ -121,17 +121,17 @@ void LRNLayer::CrossChannelForward_cpu( caffe_set(padded_square.count(), Dtype(0), padded_square_data); Dtype alpha_over_size = alpha_ / size_; // go through the images - for (int n = 0; n < num_; ++n) { + for (int_tp n = 0; n < num_; ++n) { // compute the padded square caffe_sqr(channels_ * height_ * width_, bottom_data + bottom[0]->offset(n), padded_square_data + padded_square.offset(0, pre_pad_)); // Create the first channel scale - for (int c = 0; c < size_; ++c) { + for (int_tp c = 0; c < size_; ++c) { caffe_axpy(height_ * width_, alpha_over_size, padded_square_data + padded_square.offset(0, c), scale_data + scale_.offset(n, 0)); } - for (int c = 1; c < channels_; ++c) { + for (int_tp c = 1; c < channels_; ++c) { // copy previous scale caffe_cpu_copy(height_ * width_, scale_data + scale_.offset(n, c - 1), @@ -202,9 +202,9 @@ void LRNLayer::CrossChannelBackward_cpu( caffe_mul(scale_.count(), top_diff, bottom_diff, bottom_diff); // go through individual data - int inverse_pre_pad = size_ - (size_ + 1) / 2; - for (int n = 0; n < num_; ++n) { - int block_offset = scale_.offset(n); + int_tp inverse_pre_pad = size_ - (size_ + 1) / 2; + for (int_tp n = 0; n < num_; ++n) { + int_tp block_offset = scale_.offset(n); // first, compute diff_i * y_i / s_i caffe_mul( channels_ * height_ * width_, top_diff + block_offset, @@ -217,12 +217,12 @@ void LRNLayer::CrossChannelBackward_cpu( padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); // Now, compute the accumulated ratios and the bottom diff caffe_set(accum_ratio.count(), Dtype(0), accum_ratio_data); - for (int c = 0; c < size_ - 1; ++c) { + for (int_tp c = 0; c < size_ - 1; ++c) { caffe_axpy(height_ * width_, 1., padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); } - for (int c = 0; c < channels_; ++c) { + for (int_tp c = 0; c < channels_; ++c) { caffe_axpy( height_ * width_, 1., padded_ratio_data + padded_ratio.offset(0, c + size_ - 1), diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index adb50b27e7f..3899404a00d 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -8,23 +8,23 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void LRNFillScale(const int nthreads, const Dtype* const in, - const int num, const int channels, - const int height, const int width, const int size, +__global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, const int_tp size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp n = index / width / height; + const int_tp offset = (n * channels * height + h) * width + w; + const int_tp step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; - int head = 0; - const int pre_pad = (size - 1) / 2; - const int post_pad = size - pre_pad - 1; + int_tp head = 0; + const int_tp pre_pad = (size - 1) / 2; + const int_tp post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values @@ -73,7 +73,7 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // TODO: check if it would be faster to just put it into the previous kernel. #ifdef USE_CUDA template -__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, +__global__ void LRNComputeOutput(const int_tp nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { @@ -94,7 +94,7 @@ void LRNLayer::CrossChannelForward_gpu( #ifdef USE_CUDA // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. - int n_threads = num_ * height_ * width_; + int_tp n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( @@ -117,7 +117,7 @@ void LRNLayer::CrossChannelForward_gpu( viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); - int n_threads = num_ * height_ * width_; + int_tp n_threads = num_ * height_ * width_; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_fill_scale")); viennacl::ocl::enqueue( @@ -160,31 +160,31 @@ void LRNLayer::Backward_gpu(const vector*>& top, #ifdef USE_CUDA template -__global__ void LRNComputeDiff(const int nthreads, +__global__ void LRNComputeDiff(const int_tp nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, - const Dtype* const top_diff, const int num, - const int channels, const int height, - const int width, const int size, + const Dtype* const top_diff, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp n = index / width / height; + const int_tp offset = (n * channels * height + h) * width + w; + const int_tp step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; - int head = 0; - const int pre_pad = size - (size + 1) / 2; - const int post_pad = size - pre_pad - 1; + int_tp head = 0; + const int_tp pre_pad = size - (size + 1) / 2; + const int_tp post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { @@ -224,7 +224,7 @@ template void LRNLayer::CrossChannelBackward_gpu( const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - int n_threads = num_ * height_ * width_; + int_tp n_threads = num_ * height_ * width_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index e9ce44bff42..4c2f510b26a 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -45,9 +45,9 @@ class MalisAffinityGraphCompare { // margin: sq-sq loss margin [0.3] template void MalisLossLayer::Malis(const Dtype* conn_data, - const int conn_num_dims, - const int* conn_dims, const int* nhood_data, - const int* nhood_dims, const Dtype* seg_data, + const int_tp conn_num_dims, + const int_tp* conn_dims, const int_tp* nhood_data, + const int_tp* nhood_dims, const Dtype* seg_data, const bool pos, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, @@ -110,9 +110,9 @@ void MalisLossLayer::Malis(const Dtype* conn_data, /* Sort all the edges in increasing order of weight */ std::vector pqueue( - conn_dims[3] * std::max((conn_dims[0] - 1), 1) - * std::max((conn_dims[1] - 1), 1) - * std::max((conn_dims[2] - 1), 1)); + conn_dims[3] * std::max((conn_dims[0] - 1), 1L) + * std::max((conn_dims[1] - 1), 1L) + * std::max((conn_dims[2] - 1), 1L)); int64_t j = 0; // Loop over #edges for (int64_t d = 0, i = 0; d < conn_dims[0]; ++d) { @@ -274,7 +274,7 @@ void MalisLossLayer::Reshape(const vector*>& bottom, // batch, channels (edges), Z, Y, X => 3D affinity // batch, channels (edges), Y, X => 2D affinity // batch, channels (edges), X => 1D affinity - vector shape = bottom[0]->shape(); + vector shape = bottom[0]->shape(); conn_dims_.clear(); nhood_dims_.clear(); @@ -312,12 +312,12 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, #ifdef CAFFE_MALIS_DEBUG // This is for debugging only: { - std::vector labels; + std::vector labels; const Dtype* seg_data = bottom[2]->cpu_data(); - for (int i = 0; i < bottom[2]->height() * bottom[2]->width(); ++i) { - int val = static_cast(seg_data[i]); + for (int_tp i = 0; i < bottom[2]->height() * bottom[2]->width(); ++i) { + int_tp val = static_cast(seg_data[i]); bool found = false; - for (int j = 0; j < labels.size(); ++j) { + for (int_tp j = 0; j < labels.size(); ++j) { if (val == labels[j]) { found = true; } @@ -329,7 +329,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, std::vector colors; - for (int i = 0; i < labels.size(); ++i) { + for (int_tp i = 0; i < labels.size(); ++i) { unsigned char r = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT unsigned char g = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT unsigned char b = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT @@ -343,13 +343,13 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, const Dtype* imgdata = bottom[2]->cpu_data(); - for (int i = 0; i < bottom[1]->height() * bottom[1]->width(); ++i) { - int val = imgdata[i]; + for (int_tp i = 0; i < bottom[1]->height() * bottom[1]->width(); ++i) { + int_tp val = imgdata[i]; if (val == 0) { output.at(i) = cv::Vec3b(0, 0, 0); continue; } - for (int j = 0; j < labels.size(); ++j) { + for (int_tp j = 0; j < labels.size(); ++j) { if (val == labels[j]) { output.at(i) = colors[j]; } @@ -363,7 +363,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, nhood_data_.clear(); if (bottom.size() == 4) { // Custom edges - for (int i = 0; i < nedges_; ++i) { + for (int_tp i = 0; i < nedges_; ++i) { // Z edge direction nhood_data_.push_back(bottom[3]->cpu_data()[i * 3 + 0]); // Y edge direction @@ -376,7 +376,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, // 1 edge: +X (0,0,1) // 2 edges: +Y, +X (0,1,0); (0,0,1) // 3 edges: +Z, +Y, +X (1,0,0); (0,1,0); (0,0,1) - for (int i = 3 - nedges_; i < 3; ++i) { + for (int_tp i = 3 - nedges_; i < 3; ++i) { nhood_data_.push_back((i + 0) % 3 == 0 ? 1 : 0); nhood_data_.push_back((i + 1) % 3 == 0 ? 1 : 0); nhood_data_.push_back((i + 2) % 3 == 0 ? 1 : 0); @@ -393,20 +393,20 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, Dtype* affinity_data_neg = affinity_neg_.mutable_cpu_data(); #pragma omp parallel for - for (int i = 0; i < bottom[0]->count(); ++i) { + for (int_tp i = 0; i < bottom[0]->count(); ++i) { affinity_data_pos[i] = std::min(affinity_prob[i], affinity[i]); affinity_data_neg[i] = std::max(affinity_prob[i], affinity[i]); } - size_t batch_offset = 1; - for (int i = 1; i < bottom[0]->shape().size(); ++i) { + uint_tp batch_offset = 1; + for (int_tp i = 1; i < bottom[0]->shape().size(); ++i) { batch_offset *= bottom[0]->shape()[i]; } Dtype loss = 0; #pragma omp parallel for reduction(+:loss) - for (int batch = 0; batch < bottom[0]->shape()[0]; ++batch) { + for (int_tp batch = 0; batch < bottom[0]->shape()[0]; ++batch) { Dtype loss_out = 0; Dtype classerr_out = 0; Dtype rand_index_out = 0; @@ -453,7 +453,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, caffe_set(bottom[0]->count(), Dtype(0.0), bottom_diff); #pragma omp parallel for - for (int i = 0; i < bottom[0]->count(); ++i) { + for (int_tp i = 0; i < bottom[0]->count(); ++i) { bottom_diff[i] = dloss_pos_data[i] + dloss_neg_data[i]; } } diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index 7d32f566105..fd0a8ea927c 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -25,7 +25,7 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, // New ND parameters if (mem_param.dim_size() > 0) { shape_.clear(); - for (int i = 0; i < mem_param.dim_size(); ++i) { + for (int_tp i = 0; i < mem_param.dim_size(); ++i) { shape_.push_back(mem_param.dim(i)); } } @@ -34,7 +34,7 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, label_shape_.push_back(shape_[0]); size_ = 1; // All sizes except the batch index - for (int i = 1; i < shape_.size(); ++i) { + for (int_tp i = 1; i < shape_.size(); ++i) { size_ *= shape_[i]; label_shape_.push_back(1); } @@ -53,21 +53,21 @@ template void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { CHECK(!has_new_data_) << "Can't add data until current data has been consumed."; - size_t num = datum_vector.size(); + uint_tp num = datum_vector.size(); CHECK_GT(num, 0)<< "There is no datum to add."; CHECK_EQ(num % shape_[0], 0)<< "The added data must be a multiple of the batch size."; - vector added_shape = shape_; + vector added_shape = shape_; added_shape[0] = num; added_data_.Reshape(added_shape); - vector added_label_shape = label_shape_; + vector added_label_shape = label_shape_; added_label_shape[0] = num; added_label_.Reshape(added_label_shape); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(datum_vector, &added_data_); // Copy Labels Dtype* top_label = added_label_.mutable_cpu_data(); - for (int item_id = 0; item_id < num; ++item_id) { + for (int_tp item_id = 0; item_id < num; ++item_id) { top_label[item_id] = datum_vector[item_id].label(); } // num_images == batch_size_ @@ -79,24 +79,24 @@ void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { #ifdef USE_OPENCV template void MemoryDataLayer::AddMatVector(const vector& mat_vector, - const vector& labels) { - size_t num = mat_vector.size(); + const vector& labels) { + uint_tp num = mat_vector.size(); CHECK(!has_new_data_) << "Can't add mat until current data has been consumed."; CHECK_GT(num, 0) << "There is no mat to add"; CHECK_EQ(num % shape_[0], 0) << "The added data must be a multiple of the batch size."; - vector added_shape = shape_; + vector added_shape = shape_; added_shape[0] = num; added_data_.Reshape(added_shape); - vector added_label_shape = label_shape_; + vector added_label_shape = label_shape_; added_label_shape[0] = num; added_label_.Reshape(added_label_shape); // Apply data transformations (mirror, scale, crop...) this->data_transformer_->Transform(mat_vector, &added_data_); // Copy Labels Dtype* top_label = added_label_.mutable_cpu_data(); - for (int item_id = 0; item_id < num; ++item_id) { + for (int_tp item_id = 0; item_id < num; ++item_id) { top_label[item_id] = labels[item_id]; } // num_images == batch_size_ @@ -107,7 +107,7 @@ void MemoryDataLayer::AddMatVector(const vector& mat_vector, #endif // USE_OPENCV template -void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { +void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int_tp n) { CHECK(data); CHECK(labels); CHECK_EQ(n % shape_[0], 0)<< "n must be a multiple of batch size"; @@ -123,7 +123,7 @@ void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { } template -void MemoryDataLayer::set_batch_size(int new_size) { +void MemoryDataLayer::set_batch_size(int_tp new_size) { CHECK(!has_new_data_) << "Can't change batch_size until current data has been consumed."; shape_[0] = new_size; diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index 25ee3b09146..e18a5caa900 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -20,10 +20,10 @@ void MergeCropLayer::LayerSetUp(const vector*>& bottom, if (this->layer_param_.has_mergecrop_param()) { MergeCropParameter mergecrop_param = this->layer_param_.mergecrop_param(); - for (int i = 0; i < mergecrop_param.forward_size(); ++i) { + for (int_tp i = 0; i < mergecrop_param.forward_size(); ++i) { forward_[i] = mergecrop_param.forward(i); } - for (int i = 0; i < mergecrop_param.backward_size(); ++i) { + for (int_tp i = 0; i < mergecrop_param.backward_size(); ++i) { backward_[i] = mergecrop_param.backward(i); } } @@ -38,10 +38,10 @@ void MergeCropLayer::Reshape(const vector*>& bottom, CHECK_EQ(bottom[0]->num(), bottom[1]->num()); // All channels of both inputs are copied - int channels = bottom[0]->channels() + bottom[1]->channels(); + int_tp channels = bottom[0]->channels() + bottom[1]->channels(); // Spatial of the smaller input, which should be input 0 - vector top_shape = bottom[0]->shape(); + vector top_shape = bottom[0]->shape(); top_shape[1] = channels; top[0]->Reshape(top_shape); @@ -49,10 +49,10 @@ void MergeCropLayer::Reshape(const vector*>& bottom, shape_a_.Reshape(1, 1, 1, top_shape.size() - 2); shape_b_.Reshape(1, 1, 1, top_shape.size() - 2); - int* shape_a_data = shape_a_.mutable_cpu_data(); - int* shape_b_data = shape_b_.mutable_cpu_data(); + int_tp* shape_a_data = shape_a_.mutable_cpu_data(); + int_tp* shape_b_data = shape_b_.mutable_cpu_data(); - for (int i = 0; i < top_shape.size() - 2; ++i) { + for (int_tp i = 0; i < top_shape.size() - 2; ++i) { shape_a_data[i] = bottom[0]->shape()[i + 2]; shape_b_data[i] = bottom[1]->shape()[i + 2]; } diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index a208ce20993..d231d5ec51e 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -13,46 +13,46 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void CopyForward(const int nthreads, const int dims, +__global__ void CopyForward(const int_tp nthreads, const int_tp dims, const Dtype* bottom_a, const bool forward_a, const Dtype* bottom_b, const bool forward_b, - Dtype* top, const int num, const int channels_a, - const int channels_b, const int* shape_a, - const int* shape_b) { - int pad[6]; // NOLINT(runtime/arrays) - int tmp_idx[6]; // NOLINT(runtime/arrays) - int size_a = 1; - int size_b = 1; - - for (int i = 0; i < dims; ++i) { + Dtype* top, const int_tp num, const int_tp channels_a, + const int_tp channels_b, const int_tp* shape_a, + const int_tp* shape_b) { + int_tp pad[6]; // NOLINT(runtime/arrays) + int_tp tmp_idx[6]; // NOLINT(runtime/arrays) + int_tp size_a = 1; + int_tp size_b = 1; + + for (int_tp i = 0; i < dims; ++i) { pad[i] = (shape_b[i] - shape_a[i]) / 2; size_a *= shape_a[i]; size_b *= shape_b[i]; } CUDA_KERNEL_LOOP(index, nthreads) { - int batch_id = index / ((channels_a + channels_b) * size_a); - int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) + int_tp batch_id = index / ((channels_a + channels_b) * size_a); + int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) / (channels_a * size_a)) % 2; - int counter = index; - for (int i = dims - 1; i >= 0; --i) { + int_tp counter = index; + for (int_tp i = dims - 1; i >= 0; --i) { tmp_idx[i] = counter % shape_a[i]; counter /= shape_a[i]; } if (bottom_id == 0) { - int channel_id = (index / size_a) % channels_a; - int aidx = batch_id * channels_a + channel_id; - for (int i = 0; i < dims; ++i) { + int_tp channel_id = (index / size_a) % channels_a; + int_tp aidx = batch_id * channels_a + channel_id; + for (int_tp i = 0; i < dims; ++i) { aidx *= shape_a[i]; aidx += tmp_idx[i]; } top[index] = forward_a ? bottom_a[aidx] : 0; } else { - int channel_id = (index / size_a) % channels_b; - int bidx = (batch_id * channels_b + channel_id) * size_b; - int btemp = 1; - for (int i = dims - 1; i >= 0; --i) { + int_tp channel_id = (index / size_a) % channels_b; + int_tp bidx = (batch_id * channels_b + channel_id) * size_b; + int_tp btemp = 1; + for (int_tp i = dims - 1; i >= 0; --i) { bidx += btemp * (tmp_idx[i] + pad[i]); btemp *= shape_b[i]; } @@ -62,46 +62,46 @@ __global__ void CopyForward(const int nthreads, const int dims, } template -__global__ void CopyBackward(const int nthreads, const int dims, +__global__ void CopyBackward(const int_tp nthreads, const int_tp dims, Dtype* bottom_a, const bool backward_a, Dtype* bottom_b, const bool backward_b, - const Dtype* top, const int num, - const int channels_a, const int channels_b, - const int* shape_a, const int* shape_b) { - int pad[6]; // NOLINT(runtime/arrays) - int tmp_idx[6]; // NOLINT(runtime/arrays) - int size_a = 1; - int size_b = 1; - - for (int i = 0; i < dims; ++i) { + const Dtype* top, const int_tp num, + const int_tp channels_a, const int_tp channels_b, + const int_tp* shape_a, const int_tp* shape_b) { + int_tp pad[6]; // NOLINT(runtime/arrays) + int_tp tmp_idx[6]; // NOLINT(runtime/arrays) + int_tp size_a = 1; + int_tp size_b = 1; + + for (int_tp i = 0; i < dims; ++i) { pad[i] = (shape_b[i] - shape_a[i]) / 2; size_a *= shape_a[i]; size_b *= shape_b[i]; } CUDA_KERNEL_LOOP(index, nthreads) { - int batch_id = index / ((channels_a + channels_b) * size_a); - int bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) + int_tp batch_id = index / ((channels_a + channels_b) * size_a); + int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a) / (channels_a * size_a)) % 2; - int counter = index; - for (int i = dims - 1; i >= 0; --i) { + int_tp counter = index; + for (int_tp i = dims - 1; i >= 0; --i) { tmp_idx[i] = counter % shape_a[i]; counter /= shape_a[i]; } if (bottom_id == 0) { - int channel_id = (index / size_a) % channels_a; - int aidx = batch_id * channels_a + channel_id; - for (int i = 0; i < dims; ++i) { + int_tp channel_id = (index / size_a) % channels_a; + int_tp aidx = batch_id * channels_a + channel_id; + for (int_tp i = 0; i < dims; ++i) { aidx *= shape_a[i]; aidx += tmp_idx[i]; } bottom_a[aidx] = backward_a ? top[index] : 0; } else { - int channel_id = (index / size_a) % channels_b; - int bidx = (batch_id * channels_b + channel_id) * size_b; - int btemp = 1; - for (int i = dims - 1; i >= 0; --i) { + int_tp channel_id = (index / size_a) % channels_b; + int_tp bidx = (batch_id * channels_b + channel_id) * size_b; + int_tp btemp = 1; + for (int_tp i = dims - 1; i >= 0; --i) { bidx += btemp * (tmp_idx[i] + pad[i]); btemp *= shape_b[i]; } @@ -114,18 +114,18 @@ __global__ void CopyBackward(const int nthreads, const int dims, template void MergeCropLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - int count = top[0]->count(); + int_tp count = top[0]->count(); const Dtype* bottom_data_a = bottom[0]->gpu_data(); const Dtype* bottom_data_b = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - int num = bottom[0]->num(); - int spatial_dims = bottom[0]->shape().size() - 2; + int_tp num = bottom[0]->num(); + int_tp spatial_dims = bottom[0]->shape().size() - 2; // All channels of both inputs are copied - int channels_a = bottom[0]->channels(); - int channels_b = bottom[1]->channels(); + int_tp channels_a = bottom[0]->channels(); + int_tp channels_b = bottom[1]->channels(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -168,25 +168,25 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, return; } - int count = top[0]->count(); + int_tp count = top[0]->count(); Dtype* bottom_diff_a = bottom[0]->mutable_gpu_diff(); Dtype* bottom_diff_b = bottom[1]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); - int num = bottom[0]->num(); - int spatial_dims = bottom[0]->shape().size() - 2; + int_tp num = bottom[0]->num(); + int_tp spatial_dims = bottom[0]->shape().size() - 2; // All channels of both inputs are copied - int channels_a = bottom[0]->channels(); - int channels_b = bottom[1]->channels(); + int_tp channels_a = bottom[0]->channels(); + int_tp channels_b = bottom[1]->channels(); // Width and height of the smaller input, which should be input 0 - int height_a = bottom[0]->height(); - int width_a = bottom[0]->width(); + int_tp height_a = bottom[0]->height(); + int_tp width_a = bottom[0]->width(); - int height_b = bottom[1]->height(); - int width_b = bottom[1]->width(); + int_tp height_b = bottom[1]->height(); + int_tp width_b = bottom[1]->width(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/multinomial_logistic_loss_layer.cpp b/src/caffe/layers/multinomial_logistic_loss_layer.cpp index 4267a594a0f..37212808913 100644 --- a/src/caffe/layers/multinomial_logistic_loss_layer.cpp +++ b/src/caffe/layers/multinomial_logistic_loss_layer.cpp @@ -24,11 +24,11 @@ void MultinomialLogisticLossLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); - int num = bottom[0]->num(); - int dim = bottom[0]->count() / bottom[0]->num(); + int_tp num = bottom[0]->num(); + int_tp dim = bottom[0]->count() / bottom[0]->num(); Dtype loss = 0; - for (int i = 0; i < num; ++i) { - int label = static_cast(bottom_label[i]); + for (int_tp i = 0; i < num; ++i) { + int_tp label = static_cast(bottom_label[i]); Dtype prob = std::max( bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); loss -= log(prob); @@ -48,12 +48,12 @@ void MultinomialLogisticLossLayer::Backward_cpu( const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - int num = bottom[0]->num(); - int dim = bottom[0]->count() / bottom[0]->num(); + int_tp num = bottom[0]->num(); + int_tp dim = bottom[0]->count() / bottom[0]->num(); caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); const Dtype scale = - top[0]->cpu_diff()[0] / num; - for (int i = 0; i < num; ++i) { - int label = static_cast(bottom_label[i]); + for (int_tp i = 0; i < num; ++i) { + int_tp label = static_cast(bottom_label[i]); Dtype prob = std::max( bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); bottom_diff[i * dim + label] = scale / prob; diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 61c2141ecd9..02ad3208973 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -34,13 +34,13 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - int num; + int_tp num; if (this->layer_param_.mvn_param().across_channels()) num = bottom[0]->num(); else num = bottom[0]->num() * bottom[0]->channels(); - int dim = bottom[0]->count() / num; + int_tp dim = bottom[0]->count() / num; // subtract mean caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, @@ -81,13 +81,13 @@ void MVNLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - int num; + int_tp num; if (this->layer_param_.mvn_param().across_channels()) num = bottom[0]->num(); else num = bottom[0]->num() * bottom[0]->channels(); - int dim = bottom[0]->count() / num; + int_tp dim = bottom[0]->count() / num; if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index d499ca887a6..09c59ee0b82 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -12,13 +12,13 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - int num; + int_tp num; if (this->layer_param_.mvn_param().across_channels()) num = bottom[0]->num(); else num = bottom[0]->num() * bottom[0]->channels(); - int dim = bottom[0]->count() / num; + int_tp dim = bottom[0]->count() / num; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -115,13 +115,13 @@ void MVNLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - int num; + int_tp num; if (this->layer_param_.mvn_param().across_channels()) num = bottom[0]->num(); else num = bottom[0]->num() * bottom[0]->channels(); - int dim = bottom[0]->count() / num; + int_tp dim = bottom[0]->count() / num; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 249a9b35271..62401d8b985 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -30,16 +30,16 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, channel_axis_ = bottom[0]->CanonicalAxisIndex(pool_param.axis()); channels_ = bottom[0]->shape(channel_axis_); - const int first_spatial_axis = channel_axis_ + 1; - const int num_axes = bottom[0]->num_axes(); + const int_tp first_spatial_axis = channel_axis_ + 1; + const int_tp num_axes = bottom[0]->num_axes(); num_spatial_axes_ = num_axes - first_spatial_axis; CHECK_GE(num_spatial_axes_, 0); - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1)); + vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); + vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1L)); kernel_shape_.Reshape(spatial_dim_blob_shape); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); if (pool_param.global_pooling()) { global_pooling_ = true; @@ -58,9 +58,9 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, kernel_shape_data[0] = pool_param.kernel_h(); kernel_shape_data[1] = pool_param.kernel_w(); } else { - const int num_kernel_dims = pool_param.kernel_size_size(); + const int_tp num_kernel_dims = pool_param.kernel_size_size(); CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_); - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { kernel_shape_data[i] = pool_param.kernel_size( (num_kernel_dims == 1) ? 0 : i); CHECK_GT(kernel_shape_data[i], 0) @@ -70,10 +70,10 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } size_.Reshape(spatial_dim_blob_shape); - int* size_data = size_.mutable_cpu_data(); + int_tp* size_data = size_.mutable_cpu_data(); - vector top_shape = bottom[0]->shape(); - for (int i = 0; i < num_spatial_axes_; ++i) { + vector top_shape = bottom[0]->shape(); + for (int_tp i = 0; i < num_spatial_axes_; ++i) { size_data[i] = bottom[0]->shape(channel_axis_ + 1 + i); } top[0]->Reshape(top_shape); @@ -82,14 +82,14 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, } if (global_pooling_) { - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { kernel_shape_data[i] = size_data[i]; } } // Setup stride dimensions (stride_). stride_.Reshape(spatial_dim_blob_shape); - int* stride_data = stride_.mutable_cpu_data(); + int_tp* stride_data = stride_.mutable_cpu_data(); if (pool_param.has_stride_h() || pool_param.has_stride_w()) { CHECK_EQ(num_spatial_axes_, 2) << "stride_h & stride_w can only be used for 2D convolution."; @@ -98,14 +98,14 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, stride_data[0] = pool_param.stride_h(); stride_data[1] = pool_param.stride_w(); } else { - const int num_stride_dims = pool_param.stride_size(); + const int_tp num_stride_dims = pool_param.stride_size(); CHECK(num_stride_dims == 0 || num_stride_dims == 1 || num_stride_dims == num_spatial_axes_) << "stride must be specified once, or once per spatial dimension " << "(stride specified " << num_stride_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultStride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultStride = 1; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : pool_param.stride((num_stride_dims == 1) ? 0 : i); CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; @@ -114,7 +114,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, // Setup pad dimensions (pad_). pad_.Reshape(spatial_dim_blob_shape); - int* pad_data = pad_.mutable_cpu_data(); + int_tp* pad_data = pad_.mutable_cpu_data(); if (pool_param.has_pad_h() || pool_param.has_pad_w()) { CHECK_EQ(num_spatial_axes_, 2) << "pad_h & pad_w can only be used for 2D convolution."; @@ -123,14 +123,14 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, pad_data[0] = pool_param.pad_h(); pad_data[1] = pool_param.pad_w(); } else { - const int num_pad_dims = pool_param.pad_size(); + const int_tp num_pad_dims = pool_param.pad_size(); CHECK(num_pad_dims == 0 || num_pad_dims == 1 || num_pad_dims == num_spatial_axes_) << "pad must be specified once, or once per spatial dimension " << "(pad specified " << num_pad_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultPad = 0; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultPad = 0; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : pool_param.pad((num_pad_dims == 1) ? 0 : i); } @@ -138,7 +138,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, // Setup kernel stride dimensions kstride_.Reshape(spatial_dim_blob_shape); - int* kstride_data = kstride_.mutable_cpu_data(); + int_tp* kstride_data = kstride_.mutable_cpu_data(); if (pool_param.has_kstride_h() || pool_param.has_kstride_w()) { CHECK_EQ(num_spatial_axes_, 2) << "kstride_h & kstride_w can only be used for 2D convolution."; @@ -147,14 +147,14 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, kstride_data[0] = pool_param.kstride_h(); kstride_data[1] = pool_param.kstride_w(); } else { - const int num_kstride_dims = pool_param.kstride_size(); + const int_tp num_kstride_dims = pool_param.kstride_size(); CHECK(num_kstride_dims == 0 || num_kstride_dims == 1 || num_kstride_dims == num_spatial_axes_) << "kstride must be specified once, or once per spatial dimension " << "(kstride specified " << num_kstride_dims << " times; " << num_spatial_axes_ << " spatial dims);"; - const int kDefaultKstride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { + const int_tp kDefaultKstride = 1; + for (int_tp i = 0; i < num_spatial_axes_; ++i) { kstride_data[i] = (num_kstride_dims == 0) ? kDefaultKstride : pool_param.kstride((num_kstride_dims == 1) ? 0 : i); } @@ -162,7 +162,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, // Different 2D and ND im2col/col2im kernels for strided kernels use_skernel_ = false; - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { use_skernel_ |= (kstride_data[i] != 1); if (use_skernel_) { break; @@ -174,30 +174,30 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, template void PoolingLayer::Reshape(const vector*>& bottom, const vector*>& top) { - vector size_shape(1, num_spatial_axes_); + vector size_shape(1, num_spatial_axes_); size_.Reshape(size_shape); pooled_size_.Reshape(size_shape); ext_kernel_shape_.Reshape(size_shape); - int* size_data = size_.mutable_cpu_data(); - int* pooled_size_data = pooled_size_.mutable_cpu_data(); - int* ext_kernel_shape_data = ext_kernel_shape_.mutable_cpu_data(); - int* kstride_data = kstride_.mutable_cpu_data(); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); - int* pad_data = pad_.mutable_cpu_data(); - int* stride_data = stride_.mutable_cpu_data(); + int_tp* size_data = size_.mutable_cpu_data(); + int_tp* pooled_size_data = pooled_size_.mutable_cpu_data(); + int_tp* ext_kernel_shape_data = ext_kernel_shape_.mutable_cpu_data(); + int_tp* kstride_data = kstride_.mutable_cpu_data(); + int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); + int_tp* pad_data = pad_.mutable_cpu_data(); + int_tp* stride_data = stride_.mutable_cpu_data(); if (global_pooling_) { - for (int i = 0; i < num_spatial_axes_; ++i) { + for (int_tp i = 0; i < num_spatial_axes_; ++i) { kernel_shape_data[i] = size_data[i]; } } - vector top_shape = bottom[0]->shape(); - for (int i = 0; i < num_spatial_axes_; ++i) { + vector top_shape = bottom[0]->shape(); + for (int_tp i = 0; i < num_spatial_axes_; ++i) { size_data[i] = bottom[0]->shape(channel_axis_ + 1 + i); ext_kernel_shape_data[i] = (kernel_shape_data[i] - 1) * kstride_data[i] + 1; - pooled_size_data[i] = static_cast(ceil( + pooled_size_data[i] = static_cast(ceil( static_cast(size_data[i] + 2 * pad_data[i] - ext_kernel_shape_data[i]) / stride_data[i])) + 1; if (pad_data[i] > 0) { @@ -233,24 +233,24 @@ void PoolingLayer::Reshape(const vector*>& bottom, template void PoolingLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; - int kernel_w_ = kernel_shape_.cpu_data()[1]; - int stride_h_ = stride_.cpu_data()[0]; - int stride_w_ = stride_.cpu_data()[1]; - int pad_h_ = pad_.cpu_data()[0]; - int pad_w_ = pad_.cpu_data()[1]; - int height_ = size_.cpu_data()[0]; - int width_ = size_.cpu_data()[1]; - int pooled_height_ = pooled_size_.cpu_data()[0]; - int pooled_width_ = pooled_size_.cpu_data()[1]; + int_tp kernel_h_ = kernel_shape_.cpu_data()[0]; + int_tp kernel_w_ = kernel_shape_.cpu_data()[1]; + int_tp stride_h_ = stride_.cpu_data()[0]; + int_tp stride_w_ = stride_.cpu_data()[1]; + int_tp pad_h_ = pad_.cpu_data()[0]; + int_tp pad_w_ = pad_.cpu_data()[1]; + int_tp height_ = size_.cpu_data()[0]; + int_tp width_ = size_.cpu_data()[1]; + int_tp pooled_height_ = pooled_size_.cpu_data()[0]; + int_tp pooled_width_ = pooled_size_.cpu_data()[1]; const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - const int top_count = top[0]->count(); + const int_tp top_count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; - int* mask = NULL; // suppress warnings about uninitalized variables + int_tp* mask = NULL; // suppress warnings about uninitalized variables Dtype* top_mask = NULL; // Different pooling methods. We explicitly do the switch outside the for // loop to save time, although this results in more code. @@ -262,24 +262,24 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, caffe_set(top_count, Dtype(-1), top_mask); } else { mask = max_idx_.mutable_cpu_data(); - caffe_set(top_count, -1, mask); + caffe_set(top_count, -1L, mask); } caffe_set(top_count, Dtype(-FLT_MAX), top_data); // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_); - int wend = min(wstart + kernel_w_, width_); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - const int pool_index = ph * pooled_width_ + pw; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - const int index = h * width_ + w; + for (int_tp n = 0; n < bottom[0]->num(); ++n) { + for (int_tp c = 0; c < channels_; ++c) { + for (int_tp ph = 0; ph < pooled_height_; ++ph) { + for (int_tp pw = 0; pw < pooled_width_; ++pw) { + int_tp hstart = ph * stride_h_ - pad_h_; + int_tp wstart = pw * stride_w_ - pad_w_; + int_tp hend = min(hstart + kernel_h_, height_); + int_tp wend = min(wstart + kernel_w_, width_); + hstart = max(hstart, 0L); + wstart = max(wstart, 0L); + const int_tp pool_index = ph * pooled_width_ + pw; + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { + const int_tp index = h * width_ + w; if (bottom_data[index] > top_data[pool_index]) { top_data[pool_index] = bottom_data[index]; if (use_top_mask) { @@ -304,25 +304,25 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, } break; case PoolingParameter_PoolMethod_AVE: - for (int i = 0; i < top_count; ++i) { + for (int_tp i = 0; i < top_count; ++i) { top_data[i] = 0; } // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); + for (int_tp n = 0; n < bottom[0]->num(); ++n) { + for (int_tp c = 0; c < channels_; ++c) { + for (int_tp ph = 0; ph < pooled_height_; ++ph) { + for (int_tp pw = 0; pw < pooled_width_; ++pw) { + int_tp hstart = ph * stride_h_ - pad_h_; + int_tp wstart = pw * stride_w_ - pad_w_; + int_tp hend = min(hstart + kernel_h_, height_ + pad_h_); + int_tp wend = min(wstart + kernel_w_, width_ + pad_w_); + int_tp pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0L); + wstart = max(wstart, 0L); hend = min(hend, height_); wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { top_data[ph * pooled_width_ + pw] += bottom_data[h * width_ + w]; } @@ -347,16 +347,16 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, template void PoolingLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; - int kernel_w_ = kernel_shape_.cpu_data()[1]; - int stride_h_ = stride_.cpu_data()[0]; - int stride_w_ = stride_.cpu_data()[1]; - int pad_h_ = pad_.cpu_data()[0]; - int pad_w_ = pad_.cpu_data()[1]; - int height_ = size_.cpu_data()[0]; - int width_ = size_.cpu_data()[1]; - int pooled_height_ = pooled_size_.cpu_data()[0]; - int pooled_width_ = pooled_size_.cpu_data()[1]; + int_tp kernel_h_ = kernel_shape_.cpu_data()[0]; + int_tp kernel_w_ = kernel_shape_.cpu_data()[1]; + int_tp stride_h_ = stride_.cpu_data()[0]; + int_tp stride_w_ = stride_.cpu_data()[1]; + int_tp pad_h_ = pad_.cpu_data()[0]; + int_tp pad_w_ = pad_.cpu_data()[1]; + int_tp height_ = size_.cpu_data()[0]; + int_tp width_ = size_.cpu_data()[1]; + int_tp pooled_height_ = pooled_size_.cpu_data()[0]; + int_tp pooled_width_ = pooled_size_.cpu_data()[1]; if (!propagate_down[0]) { return; @@ -368,7 +368,7 @@ void PoolingLayer::Backward_cpu(const vector*>& top, caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; - const int* mask = NULL; // suppress warnings about uninitialized variables + const int_tp* mask = NULL; // suppress warnings about uninitialized variables const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: @@ -378,12 +378,12 @@ void PoolingLayer::Backward_cpu(const vector*>& top, } else { mask = max_idx_.cpu_data(); } - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - const int index = ph * pooled_width_ + pw; - const int bottom_index = + for (int_tp n = 0; n < top[0]->num(); ++n) { + for (int_tp c = 0; c < channels_; ++c) { + for (int_tp ph = 0; ph < pooled_height_; ++ph) { + for (int_tp pw = 0; pw < pooled_width_; ++pw) { + const int_tp index = ph * pooled_width_ + pw; + const int_tp bottom_index = use_top_mask ? top_mask[index] : mask[index]; bottom_diff[bottom_index] += top_diff[index]; } @@ -400,21 +400,21 @@ void PoolingLayer::Backward_cpu(const vector*>& top, break; case PoolingParameter_PoolMethod_AVE: // The main loop - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); + for (int_tp n = 0; n < top[0]->num(); ++n) { + for (int_tp c = 0; c < channels_; ++c) { + for (int_tp ph = 0; ph < pooled_height_; ++ph) { + for (int_tp pw = 0; pw < pooled_width_; ++pw) { + int_tp hstart = ph * stride_h_ - pad_h_; + int_tp wstart = pw * stride_w_ - pad_w_; + int_tp hend = min(hstart + kernel_h_, height_ + pad_h_); + int_tp wend = min(wstart + kernel_w_, width_ + pad_w_); + int_tp pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0L); + wstart = max(wstart, 0L); hend = min(hend, height_); wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { bottom_diff[h * width_ + w] += top_diff[ph * pooled_width_ + pw] / pool_size; } diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 149afe735af..298d83007b0 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -15,32 +15,32 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void MaxPoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, - const int pad_w, Dtype* const top_data, - int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int_tp nthreads, + const Dtype* const bottom_data, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, + const int_tp stride_w, const int_tp pad_h, + const int_tp pad_w, Dtype* const top_data, + int_tp* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - const int hend = min(hstart + kernel_h, height); - const int wend = min(wstart + kernel_w, width); - hstart = max(hstart, 0); - wstart = max(wstart, 0); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + const int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)height); + const int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)width); + hstart = max((int_tpc)(hstart), (int_tpc)(0)); + wstart = max((int_tpc)(wstart), (int_tpc)(0)); Dtype maxval = -FLT_MAX; - int maxidx = -1; + int_tp maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; @@ -57,33 +57,33 @@ __global__ void MaxPoolForward(const int nthreads, } template -__global__ void AvePoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, - const int pad_w, Dtype* const top_data) { +__global__ void AvePoolForward(const int_tp nthreads, + const Dtype* const bottom_data, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, + const int_tp stride_w, const int_tp pad_h, + const int_tp pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + kernel_h, height + pad_h); - int wend = min(wstart + kernel_w, width + pad_w); - const int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height); - wend = min(wend, width); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)(height + pad_h)); + int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)(width + pad_w)); + const int_tp pool_size = (hend - hstart) * (wend - wstart); + hstart = max((int_tpc)(hstart), (int_tpc)(0)); + wstart = max((int_tpc)(wstart), (int_tpc)(0)); + hend = min((int_tpc)(hend), (int_tpc)(height)); + wend = min((int_tpc)(wend), (int_tpc)(width)); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } @@ -92,38 +92,38 @@ __global__ void AvePoolForward(const int nthreads, } template -__global__ void StoPoolForwardTrain(const int nthreads, +__global__ void StoPoolForwardTrain(const int_tp nthreads, const Dtype* const bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, Dtype* const rand_idx, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, + const int_tp stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + const int_tp hstart = ph * stride_h; + const int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)height); + const int_tp wstart = pw * stride_w; + const int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; @@ -136,31 +136,31 @@ __global__ void StoPoolForwardTrain(const int nthreads, } template -__global__ void StoPoolForwardTest(const int nthreads, +__global__ void StoPoolForwardTest(const int_tp nthreads, const Dtype* const bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, Dtype* const top_data) { + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, + const int_tp stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + const int_tp pw = index % pooled_width; + const int_tp ph = (index / pooled_width) % pooled_height; + const int_tp c = (index / pooled_width / pooled_height) % channels; + const int_tp n = index / pooled_width / pooled_height / channels; + const int_tp hstart = ph * stride_h; + const int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)height); + const int_tp wstart = pw * stride_w; + const int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } @@ -170,35 +170,35 @@ __global__ void StoPoolForwardTest(const int nthreads, } template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, - const int* const mask, - const Dtype* const top_mask, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, - const int pad_w, Dtype* const bottom_diff) { +__global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* const top_diff, + const int_tp* const mask, + const Dtype* const top_mask, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, + const int_tp stride_w, const int_tp pad_h, + const int_tp pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp c = (index / width / height) % channels; + const int_tp n = index / width / height / channels; + const int_tp phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - const int phend = min((h + pad_h) / stride_h + 1, pooled_height); - const int pwstart = + const int_tp phend = min((int_tpc)((h + pad_h) / stride_h + 1L), (int_tpc)pooled_height); + const int_tp pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + const int_tp pwend = min((int_tpc)((w + pad_w) / stride_w + 1L), (int_tpc)pooled_width); Dtype gradient = 0; - const int offset = (n * channels + c) * pooled_height * pooled_width; + const int_tp offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { - const int* const mask_slice = mask + offset; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + const int_tp* const mask_slice = mask + offset; + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } @@ -206,8 +206,8 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, } } else { const Dtype* const top_mask_slice = top_mask + offset; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } @@ -219,36 +219,36 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, } template -__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, - const int pad_h, const int pad_w, +__global__ void AvePoolBackward(const int_tp nthreads, const Dtype* const top_diff, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp pad_h, const int_tp pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width + pad_w; - const int h = (index / width) % height + pad_h; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + const int_tp w = index % width + pad_w; + const int_tp h = (index / width) % height + pad_h; + const int_tp c = (index / width / height) % channels; + const int_tp n = index / width / height / channels; + const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int_tp phend = min((int_tpc)(h / stride_h + 1), (int_tpc)(pooled_height)); + const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int_tp pwend = min((int_tpc)(w / stride_w + 1), (int_tpc)(pooled_width)); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + kernel_h, height + pad_h); - int wend = min(wstart + kernel_w, width + pad_w); - int pool_size = (hend - hstart) * (wend - wstart); + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)(height + pad_h)); + int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)(width + pad_w)); + int_tp pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } @@ -257,35 +257,35 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, } template -__global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, - const Dtype* const top_diff, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, - const int stride_w, Dtype* const bottom_diff) { +__global__ void StoPoolBackward(const int_tp nthreads, const Dtype* const rand_idx, + const Dtype* const top_diff, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp stride_h, + const int_tp stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + const int_tp w = index % width; + const int_tp h = (index / width) % height; + const int_tp c = (index / width / height) % channels; + const int_tp n = index / width / height / channels; + const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int_tp phend = min((int_tpc)(h / stride_h + 1), (int_tpc)pooled_height); + const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int_tp pwend = min((int_tpc)(w / stride_w + 1), (int_tpc)pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { + for (int_tp ph = phstart; ph < phend; ++ph) { + for (int_tp pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index - == static_cast(rand_idx_slice[ph * pooled_width + pw])); + == static_cast(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; @@ -293,32 +293,32 @@ __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, } template -__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, - Dtype* top_data, int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int_tp nthreads, const Dtype* bottom_data, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, + const int_tp pad_h, const int_tp pad_w, + Dtype* top_data, int_tp* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height); - int wend = min(wstart + ext_kernel_w, width); - hstart = max(hstart, 0); - wstart = max(wstart, 0); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)height); + int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)width); + hstart = max((int_tpc)hstart, (int_tpc)(0)); + wstart = max((int_tpc)wstart, (int_tpc)(0)); Dtype maxval = -FLT_MAX; - int maxidx = -1; + int_tp maxidx = -1; bottom_data += (n * channels + c) * height * width; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data[maxidx]; @@ -335,34 +335,34 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, } template -__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, +__global__ void AvePoolForward(const int_tp nthreads, const Dtype* bottom_data, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, + const int_tp pad_h, const int_tp pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h - pad_h; - int wstart = pw * stride_w - pad_w; - int hend = min(hstart + ext_kernel_h, height + pad_h); - int wend = min(wstart + ext_kernel_w, width + pad_w); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height); - wend = min(wend, width); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h - pad_h; + int_tp wstart = pw * stride_w - pad_w; + int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)(height + pad_h)); + int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)(width + pad_w)); + hstart = max((int_tpc)hstart, (int_tpc)(0)); + wstart = max((int_tpc)wstart, (int_tpc)(0)); + hend = min((int_tpc)hend, (int_tpc)height); + wend = min((int_tpc)wend, (int_tpc)width); Dtype aveval = 0; bottom_data += (n * channels + c) * height * width; - int pool_size = 0; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + int_tp pool_size = 0; + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { aveval += bottom_data[h * width + w]; ++pool_size; } @@ -372,38 +372,38 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, } template -__global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* bottom_data, const int num, - const int channels, const int height, - const int width, const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, Dtype* rand_idx, +__global__ void StoPoolForwardTrain(const int_tp nthreads, + const Dtype* bottom_data, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + ext_kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + ext_kernel_w, width); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h; + int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)height); + int_tp wstart = pw * stride_w; + int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)width); Dtype cumsum = 0.; bottom_data += (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { cumsum += bottom_data[h * width + w]; } } float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { cumsum += bottom_data[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; @@ -416,31 +416,31 @@ __global__ void StoPoolForwardTrain(const int nthreads, } template -__global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, const int kernel_h, - const int kernel_w, const int ext_kernel_h, - const int ext_kernel_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, Dtype* top_data) { +__global__ void StoPoolForwardTest(const int_tp nthreads, const Dtype* bottom_data, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + ext_kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + ext_kernel_w, width); + int_tp pw = index % pooled_width; + int_tp ph = (index / pooled_width) % pooled_height; + int_tp c = (index / pooled_width / pooled_height) % channels; + int_tp n = index / pooled_width / pooled_height / channels; + int_tp hstart = ph * stride_h; + int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)height); + int_tp wstart = pw * stride_w; + int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; bottom_data += (n * channels + c) * height * width; // First pass: get sum - for (int h = hstart; h < hend; h += kstride_h) { - for (int w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += kstride_h) { + for (int_tp w = wstart; w < wend; w += kstride_w) { cumsum += bottom_data[h * width + w]; cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; } @@ -450,43 +450,43 @@ __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, } template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, - const int num, const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, const int ext_kernel_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, - const int pad_h, const int pad_w, +__global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* top_diff, + const int_tp* mask, const Dtype* top_mask, + const int_tp num, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, + const int_tp pad_h, const int_tp pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; + int_tp w = index % width; + int_tp h = (index / width) % height; + int_tp c = (index / width / height) % channels; + int_tp n = index / width / height / channels; - int pooled_height_1 = pooled_height - 1; - int pooled_width_1 = pooled_width - 1; - int phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; - int phend = + int_tp pooled_height_1 = pooled_height - 1; + int_tp pooled_width_1 = pooled_width - 1; + int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int_tp phend = (h >= pooled_height) ? pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; - int pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; - int pwend = + int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int_tp pwend = (w >= pooled_width) ? pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; Dtype gradient = 0; - int offset = (n * channels + c) * pooled_height * pooled_width; + int_tp offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; if (mask) { mask += offset; - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + for (int_tp ph = phstart; ph <= phend; ph += kstride_h) { + for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) { if (mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } @@ -494,8 +494,8 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } } else { top_mask += offset; - for (int ph = phstart; ph <= phend; ph += kstride_h) { - for (int pw = pwstart; pw <= pwend; pw += kstride_w) { + for (int_tp ph = phstart; ph <= phend; ph += kstride_h) { + for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } @@ -507,27 +507,27 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } template -__global__ void MaxPoolNDForward(const int n, const int num_axes, +__global__ void MaxPoolNDForward(const int_tp n, const int_tp num_axes, const Dtype* bottom_data, - const int channels, const int* size, - const int* pooled_size, const int* kernel_size, - const int* ext_kernel_size, const int* stride, - const int* kstride, const int* pad, - Dtype* top_data, int* mask, Dtype* top_mask) { - int d_idx[6]; // NOLINT(runtime/arrays) - int d_start[6]; // NOLINT(runtime/arrays) - int d_end[6]; // NOLINT(runtime/arrays) - int d_iter[6]; // NOLINT(runtime/arrays) - int i; + const int_tp channels, const int_tp* size, + const int_tp* pooled_size, const int_tp* kernel_size, + const int_tp* ext_kernel_size, const int_tp* stride, + const int_tp* kstride, const int_tp* pad, + Dtype* top_data, int_tp* mask, Dtype* top_mask) { + int_tp d_idx[6]; // NOLINT(runtime/arrays) + int_tp d_start[6]; // NOLINT(runtime/arrays) + int_tp d_end[6]; // NOLINT(runtime/arrays) + int_tp d_iter[6]; // NOLINT(runtime/arrays) + int_tp i; CUDA_KERNEL_LOOP(index, n) { - int offset = 1; - int num = index; + int_tp offset = 1; + int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = index % pooled_size[i]; d_start[i] = d_idx[i] * stride[i] - pad[i]; - d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); - d_start[i] = max(d_start[i], 0); + d_end[i] = min((int_tpc)(d_start[i] + ext_kernel_size[i]), (int_tpc)(size[i])); + d_start[i] = max((int_tpc)(d_start[i]), (int_tpc)(0)); num /= pooled_size[i]; offset *= size[i]; d_iter[i] = d_start[i]; @@ -542,18 +542,18 @@ __global__ void MaxPoolNDForward(const int n, const int num_axes, return; } } - int chan = num % channels; + int_tp chan = num % channels; num /= channels; offset *= (num * channels + chan); Dtype maxval = -FLT_MAX; - int maxidx = -1; - int final_offset = 0; + int_tp maxidx = -1; + int_tp final_offset = 0; bool incremented; do { final_offset = offset; - int size_prod = 1; + int_tp size_prod = 1; for (i = num_axes - 1; i >= 0; --i) { final_offset += d_iter[i] * size_prod; size_prod *= size[i]; @@ -586,26 +586,26 @@ __global__ void MaxPoolNDForward(const int n, const int num_axes, } template -__global__ void MaxPoolNDBackward(const int n, const int num_axes, - const Dtype* top_diff, const int* mask, +__global__ void MaxPoolNDBackward(const int_tp n, const int_tp num_axes, + const Dtype* top_diff, const int_tp* mask, const Dtype* top_mask, - const int channels, const int* size, - const int* pooled_size, - const int* kernel_size, - const int* ext_kernel_size, const int* stride, - const int* kstride, const int* pad, + const int_tp channels, const int_tp* size, + const int_tp* pooled_size, + const int_tp* kernel_size, + const int_tp* ext_kernel_size, const int_tp* stride, + const int_tp* kstride, const int_tp* pad, Dtype* bottom_diff) { - int d_idx[6]; // NOLINT(runtime/arrays) - int d_start[6]; // NOLINT(runtime/arrays) - int d_end[6]; // NOLINT(runtime/arrays) - int d_iter[6]; // NOLINT(runtime/arrays) - int i; + int_tp d_idx[6]; // NOLINT(runtime/arrays) + int_tp d_start[6]; // NOLINT(runtime/arrays) + int_tp d_end[6]; // NOLINT(runtime/arrays) + int_tp d_iter[6]; // NOLINT(runtime/arrays) + int_tp i; CUDA_KERNEL_LOOP(index, n) { // find out the local index // find out the local offset - int offset = 1; - int num = index; + int_tp offset = 1; + int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = num % size[i]; d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? @@ -622,20 +622,20 @@ __global__ void MaxPoolNDBackward(const int n, const int num_axes, return; } } - int chan = num % channels; + int_tp chan = num % channels; num /= channels; offset *= (num * channels + chan); Dtype gradient = 0; - int final_offset = 0; - int im_offset = 0; + int_tp final_offset = 0; + int_tp im_offset = 0; bool incremented; do { final_offset = offset; im_offset = 0; - int size_prod = 1; - int pooled_size_prod = 1; + int_tp size_prod = 1; + int_tp pooled_size_prod = 1; for (i = num_axes - 1; i >= 0; --i) { final_offset += d_iter[i] * pooled_size_prod; im_offset += d_idx[i] * size_prod; @@ -676,30 +676,30 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - int count = top[0]->count(); + int_tp count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; - int* mask = NULL; + int_tp* mask = NULL; Dtype* top_mask = NULL; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (num_spatial_axes_ == 2) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; - int kernel_w_ = kernel_shape_.cpu_data()[1]; - int stride_h_ = stride_.cpu_data()[0]; - int stride_w_ = stride_.cpu_data()[1]; - int pad_h_ = pad_.cpu_data()[0]; - int pad_w_ = pad_.cpu_data()[1]; - int kstride_h_ = kstride_.cpu_data()[0]; - int kstride_w_ = kstride_.cpu_data()[1]; - int height_ = size_.cpu_data()[0]; - int width_ = size_.cpu_data()[1]; - int pooled_height_ = pooled_size_.cpu_data()[0]; - int pooled_width_ = pooled_size_.cpu_data()[1]; - int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; - int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + int_tp kernel_h_ = kernel_shape_.cpu_data()[0]; + int_tp kernel_w_ = kernel_shape_.cpu_data()[1]; + int_tp stride_h_ = stride_.cpu_data()[0]; + int_tp stride_w_ = stride_.cpu_data()[1]; + int_tp pad_h_ = pad_.cpu_data()[0]; + int_tp pad_w_ = pad_.cpu_data()[1]; + int_tp kstride_h_ = kstride_.cpu_data()[0]; + int_tp kstride_w_ = kstride_.cpu_data()[1]; + int_tp height_ = size_.cpu_data()[0]; + int_tp width_ = size_.cpu_data()[1]; + int_tp pooled_height_ = pooled_size_.cpu_data()[0]; + int_tp pooled_width_ = pooled_size_.cpu_data()[1]; + int_tp ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int_tp ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; // 2D case if (use_skernel_) { @@ -844,20 +844,20 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, this->device_->id()); if (num_spatial_axes_ == 2) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; - int kernel_w_ = kernel_shape_.cpu_data()[1]; - int stride_h_ = stride_.cpu_data()[0]; - int stride_w_ = stride_.cpu_data()[1]; - int pad_h_ = pad_.cpu_data()[0]; - int pad_w_ = pad_.cpu_data()[1]; - int kstride_h_ = kstride_.cpu_data()[0]; - int kstride_w_ = kstride_.cpu_data()[1]; - int height_ = size_.cpu_data()[0]; - int width_ = size_.cpu_data()[1]; - int pooled_height_ = pooled_size_.cpu_data()[0]; - int pooled_width_ = pooled_size_.cpu_data()[1]; - int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; - int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + int_tp kernel_h_ = kernel_shape_.cpu_data()[0]; + int_tp kernel_w_ = kernel_shape_.cpu_data()[1]; + int_tp stride_h_ = stride_.cpu_data()[0]; + int_tp stride_w_ = stride_.cpu_data()[1]; + int_tp pad_h_ = pad_.cpu_data()[0]; + int_tp pad_w_ = pad_.cpu_data()[1]; + int_tp kstride_h_ = kstride_.cpu_data()[0]; + int_tp kstride_w_ = kstride_.cpu_data()[1]; + int_tp height_ = size_.cpu_data()[0]; + int_tp width_ = size_.cpu_data()[1]; + int_tp pooled_height_ = pooled_size_.cpu_data()[0]; + int_tp pooled_width_ = pooled_size_.cpu_data()[1]; + int_tp ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int_tp ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; // 2D case if (use_skernel_) { @@ -1057,10 +1057,10 @@ void PoolingLayer::Backward_gpu(const vector*>& top, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; - const int* mask = NULL; + const int_tp* mask = NULL; const Dtype* top_mask = NULL; if (this->device_->backend() == BACKEND_CUDA) { @@ -1068,20 +1068,20 @@ void PoolingLayer::Backward_gpu(const vector*>& top, caffe_gpu_set(count, Dtype(0.), bottom_diff); if (num_spatial_axes_ == 2) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; - int kernel_w_ = kernel_shape_.cpu_data()[1]; - int stride_h_ = stride_.cpu_data()[0]; - int stride_w_ = stride_.cpu_data()[1]; - int pad_h_ = pad_.cpu_data()[0]; - int pad_w_ = pad_.cpu_data()[1]; - int kstride_h_ = kstride_.cpu_data()[0]; - int kstride_w_ = kstride_.cpu_data()[1]; - int height_ = size_.cpu_data()[0]; - int width_ = size_.cpu_data()[1]; - int pooled_height_ = pooled_size_.cpu_data()[0]; - int pooled_width_ = pooled_size_.cpu_data()[1]; - int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; - int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + int_tp kernel_h_ = kernel_shape_.cpu_data()[0]; + int_tp kernel_w_ = kernel_shape_.cpu_data()[1]; + int_tp stride_h_ = stride_.cpu_data()[0]; + int_tp stride_w_ = stride_.cpu_data()[1]; + int_tp pad_h_ = pad_.cpu_data()[0]; + int_tp pad_w_ = pad_.cpu_data()[1]; + int_tp kstride_h_ = kstride_.cpu_data()[0]; + int_tp kstride_w_ = kstride_.cpu_data()[1]; + int_tp height_ = size_.cpu_data()[0]; + int_tp width_ = size_.cpu_data()[1]; + int_tp pooled_height_ = pooled_size_.cpu_data()[0]; + int_tp pooled_width_ = pooled_size_.cpu_data()[1]; + int_tp ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int_tp ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; if (use_skernel_) { switch (this->layer_param_.pooling_param().pool()) { @@ -1180,20 +1180,20 @@ void PoolingLayer::Backward_gpu(const vector*>& top, (cl_mem) bottom_diff, 0); if (num_spatial_axes_ == 2) { - int kernel_h_ = kernel_shape_.cpu_data()[0]; - int kernel_w_ = kernel_shape_.cpu_data()[1]; - int stride_h_ = stride_.cpu_data()[0]; - int stride_w_ = stride_.cpu_data()[1]; - int pad_h_ = pad_.cpu_data()[0]; - int pad_w_ = pad_.cpu_data()[1]; - int kstride_h_ = kstride_.cpu_data()[0]; - int kstride_w_ = kstride_.cpu_data()[1]; - int height_ = size_.cpu_data()[0]; - int width_ = size_.cpu_data()[1]; - int pooled_height_ = pooled_size_.cpu_data()[0]; - int pooled_width_ = pooled_size_.cpu_data()[1]; - int ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; - int ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; + int_tp kernel_h_ = kernel_shape_.cpu_data()[0]; + int_tp kernel_w_ = kernel_shape_.cpu_data()[1]; + int_tp stride_h_ = stride_.cpu_data()[0]; + int_tp stride_w_ = stride_.cpu_data()[1]; + int_tp pad_h_ = pad_.cpu_data()[0]; + int_tp pad_w_ = pad_.cpu_data()[1]; + int_tp kstride_h_ = kstride_.cpu_data()[0]; + int_tp kstride_w_ = kstride_.cpu_data()[1]; + int_tp height_ = size_.cpu_data()[0]; + int_tp width_ = size_.cpu_data()[1]; + int_tp pooled_height_ = pooled_size_.cpu_data()[0]; + int_tp pooled_width_ = pooled_size_.cpu_data()[1]; + int_tp ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; + int_tp ext_kernel_w = ext_kernel_shape_.cpu_data()[0]; if (use_skernel_) { switch (this->layer_param_.pooling_param().pool()) { diff --git a/src/caffe/layers/power_layer.cpp b/src/caffe/layers/power_layer.cpp index 347d9a12aeb..ab4539452ea 100644 --- a/src/caffe/layers/power_layer.cpp +++ b/src/caffe/layers/power_layer.cpp @@ -22,7 +22,7 @@ template void PowerLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_cpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); // Special case where we can ignore the input: scale or power is 0. if (diff_scale_ == Dtype(0)) { Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); @@ -48,7 +48,7 @@ void PowerLayer::Backward_cpu(const vector*>& top, const vector*>& bottom) { if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* top_diff = top[0]->cpu_diff(); if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { caffe_set(count, diff_scale_, bottom_diff); diff --git a/src/caffe/layers/power_layer.cu b/src/caffe/layers/power_layer.cu index 546808f91c4..91bd6b83894 100644 --- a/src/caffe/layers/power_layer.cu +++ b/src/caffe/layers/power_layer.cu @@ -16,7 +16,7 @@ template void PowerLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -75,7 +75,7 @@ void PowerLayer::Backward_gpu(const vector*>& top, const vector*>& bottom) { if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); if (this->device_->backend() == BACKEND_CUDA) { diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index ac968a23359..8063295ff7d 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -13,17 +13,17 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, CHECK_GE(bottom[0]->num_axes(), 2) << "Number of axes of bottom blob must be >=2."; PReLUParameter prelu_param = this->layer_param().prelu_param(); - int channels = bottom[0]->channels(); + int_tp channels = bottom[0]->channels(); channel_shared_ = prelu_param.channel_shared(); if (this->blobs_.size() > 0) { LOG(INFO) << "Skipping parameter initialization"; } else { this->blobs_.resize(1); if (channel_shared_) { - this->blobs_[0].reset(new Blob(vector(0), + this->blobs_[0].reset(new Blob(vector(0), this->device_)); } else { - this->blobs_[0].reset(new Blob(vector(1, channels), + this->blobs_[0].reset(new Blob(vector(1, channels), this->device_)); } shared_ptr > filler; @@ -47,8 +47,8 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count(1))); - backward_buff_.Reshape(vector(1, bottom[0]->count(1))); + multiplier_.Reshape(vector(1, bottom[0]->count(1))); + backward_buff_.Reshape(vector(1, bottom[0]->count(1))); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -69,9 +69,9 @@ void PReLULayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - const int count = bottom[0]->count(); - const int dim = bottom[0]->count(2); - const int channels = bottom[0]->channels(); + const int_tp count = bottom[0]->count(); + const int_tp dim = bottom[0]->count(2); + const int_tp channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->cpu_data(); // For in-place computation @@ -81,9 +81,9 @@ void PReLULayer::Forward_cpu(const vector*>& bottom, // if channel_shared, channel index in the following computation becomes // always zero. - const int div_factor = channel_shared_ ? channels : 1; - for (int i = 0; i < count; ++i) { - int c = (i / dim) % channels / div_factor; + const int_tp div_factor = channel_shared_ ? channels : 1; + for (int_tp i = 0; i < count; ++i) { + int_tp c = (i / dim) % channels / div_factor; top_data[i] = std::max(bottom_data[i], Dtype(0)) + slope_data[c] * std::min(bottom_data[i], Dtype(0)); } @@ -96,9 +96,9 @@ void PReLULayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* slope_data = this->blobs_[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); - const int count = bottom[0]->count(); - const int dim = bottom[0]->count(2); - const int channels = bottom[0]->channels(); + const int_tp count = bottom[0]->count(); + const int_tp dim = bottom[0]->count(2); + const int_tp channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { @@ -107,7 +107,7 @@ void PReLULayer::Backward_cpu(const vector*>& top, // if channel_shared, channel index in the following computation becomes // always zero. - const int div_factor = channel_shared_ ? channels : 1; + const int_tp div_factor = channel_shared_ ? channels : 1; // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs @@ -115,16 +115,16 @@ void PReLULayer::Backward_cpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); - for (int i = 0; i < count; ++i) { - int c = (i / dim) % channels / div_factor; + for (int_tp i = 0; i < count; ++i) { + int_tp c = (i / dim) % channels / div_factor; slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - for (int i = 0; i < count; ++i) { - int c = (i / dim) % channels / div_factor; + for (int_tp i = 0; i < count; ++i) { + int_tp c = (i / dim) % channels / div_factor; bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) + slope_data[c] * (bottom_data[i] <= 0)); } diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index 9d2982633ef..cf17acd0484 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -14,23 +14,23 @@ namespace caffe { #ifdef USE_CUDA // CUDA kernele for forward template -__global__ void PReLUForward(const int n, const int channels, const int dim, +__global__ void PReLUForward(const int_tp n, const int_tp channels, const int_tp dim, const Dtype* in, Dtype* out, - const Dtype* slope_data, const int div_factor) { + const Dtype* slope_data, const int_tp div_factor) { CUDA_KERNEL_LOOP(index, n) { - int c = (index / dim) % channels / div_factor; + int_tp c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward template -__global__ void PReLUBackward(const int n, const int channels, const int dim, +__global__ void PReLUBackward(const int_tp n, const int_tp channels, const int_tp dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, - const int div_factor) { + const int_tp div_factor) { CUDA_KERNEL_LOOP(index, n) { - int c = (index / dim) % channels / div_factor; + int_tp c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } @@ -38,7 +38,7 @@ __global__ void PReLUBackward(const int n, const int channels, const int dim, // CUDA kernel for element-wise parameter backward template -__global__ void PReLUParamBackward(const int n, const Dtype* in_diff, +__global__ void PReLUParamBackward(const int_tp n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); @@ -51,11 +51,11 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); - const int dim = bottom[0]->count(2); - const int channels = bottom[0]->channels(); + const int_tp count = bottom[0]->count(); + const int_tp dim = bottom[0]->count(2); + const int_tp channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); - const int div_factor = channel_shared_ ? channels : 1; + const int_tp div_factor = channel_shared_ ? channels : 1; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -101,9 +101,9 @@ void PReLULayer::Backward_gpu(const vector*>& top, const vector*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - const int count = bottom[0]->count(); - const int dim = bottom[0]->count(2); - const int channels = bottom[0]->channels(); + const int_tp count = bottom[0]->count(); + const int_tp dim = bottom[0]->count(2); + const int_tp channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { @@ -118,9 +118,9 @@ void PReLULayer::Backward_gpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); - int cdim = channels * dim; + int_tp cdim = channels * dim; Dtype dsum = 0.; - for (int n = 0; n < bottom[0]->num(); ++n) { + for (int_tp n = 0; n < bottom[0]->num(); ++n) { // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) PReLUParamBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(cdim), @@ -148,7 +148,7 @@ void PReLULayer::Backward_gpu(const vector*>& top, if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); - int div_factor = channel_shared_ ? channels : 1; + int_tp div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) PReLUBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( @@ -166,9 +166,9 @@ void PReLULayer::Backward_gpu(const vector*>& top, if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); - int cdim = channels * dim; + int_tp cdim = channels * dim; Dtype dsum = 0.; - for (int n = 0; n < bottom[0]->num(); ++n) { + for (int_tp n = 0; n < bottom[0]->num(); ++n) { viennacl::ocl::kernel &oclk_prelu_param = program.get_kernel( CL_KERNEL_SELECT("prelu_param_backward")); viennacl::ocl::enqueue( @@ -202,7 +202,7 @@ void PReLULayer::Backward_gpu(const vector*>& top, if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); - int div_factor = channel_shared_ ? channels : 1; + int_tp div_factor = channel_shared_ ? channels : 1; viennacl::ocl::kernel &oclk_prelu = program.get_kernel( CL_KERNEL_SELECT("prelu_backward")); diff --git a/src/caffe/layers/reduction_layer.cpp b/src/caffe/layers/reduction_layer.cpp index 8ae6329ebe4..0d0f939aaa1 100644 --- a/src/caffe/layers/reduction_layer.cpp +++ b/src/caffe/layers/reduction_layer.cpp @@ -23,7 +23,7 @@ void ReductionLayer::Reshape(const vector*>& bottom, // throw away any after that. // Note: currently reducing along non-tail axes is not supported; otherwise, // we'd need to also copy any axes following an "end_axis". - vector top_shape(bottom[0]->shape().begin(), + vector top_shape(bottom[0]->shape().begin(), bottom[0]->shape().begin() + axis_); top[0]->Reshape(top_shape); num_ = bottom[0]->count(0, axis_); @@ -31,7 +31,7 @@ void ReductionLayer::Reshape(const vector*>& bottom, CHECK_EQ(num_, top[0]->count()); if (op_ == ReductionParameter_ReductionOp_SUM || op_ == ReductionParameter_ReductionOp_MEAN) { - vector sum_mult_shape(1, dim_); + vector sum_mult_shape(1, dim_); sum_multiplier_.Reshape(sum_mult_shape); caffe_set(dim_, Dtype(1), sum_multiplier_.mutable_cpu_data()); } @@ -50,7 +50,7 @@ void ReductionLayer::Forward_cpu( mult_data = sum_multiplier_.cpu_data(); } Dtype* top_data = top[0]->mutable_cpu_data(); - for (int i = 0; i < num_; ++i) { + for (int_tp i = 0; i < num_; ++i) { switch (op_) { case ReductionParameter_ReductionOp_SUM: case ReductionParameter_ReductionOp_MEAN: @@ -98,7 +98,7 @@ void ReductionLayer::Backward_cpu(const vector*>& top, } const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - for (int i = 0; i < num_; ++i) { + for (int_tp i = 0; i < num_; ++i) { const Dtype bottom_coeff = (*top_diff) * coeff_; switch (op_) { case ReductionParameter_ReductionOp_SUM: diff --git a/src/caffe/layers/reduction_layer.cu b/src/caffe/layers/reduction_layer.cu index ec3b9cbd2ac..f97f0b98b80 100644 --- a/src/caffe/layers/reduction_layer.cu +++ b/src/caffe/layers/reduction_layer.cu @@ -13,8 +13,8 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* mult_data = NULL; - int bottom_data_off = 0; - int top_data_off = 0; + int_tp bottom_data_off = 0; + int_tp top_data_off = 0; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -22,7 +22,7 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, mult_data = sum_multiplier_.gpu_data(); } Dtype* top_data = top[0]->mutable_cpu_data(); - for (int i = 0; i < num_; ++i) { + for (int_tp i = 0; i < num_; ++i) { switch (op_) { case ReductionParameter_ReductionOp_SUM: case ReductionParameter_ReductionOp_MEAN: @@ -56,7 +56,7 @@ void ReductionLayer::Forward_gpu(const vector*>& bottom, mult_data = sum_multiplier_.gpu_data(); } Dtype* top_data = top[0]->mutable_cpu_data(); - for (int i = 0; i < num_; ++i) { + for (int_tp i = 0; i < num_; ++i) { switch (op_) { case ReductionParameter_ReductionOp_SUM: case ReductionParameter_ReductionOp_MEAN: @@ -118,13 +118,13 @@ void ReductionLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - int bottom_data_off = 0; - int bottom_diff_off = 0; - int top_diff_off = 0; + int_tp bottom_data_off = 0; + int_tp bottom_diff_off = 0; + int_tp top_diff_off = 0; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - for (int i = 0; i < num_; ++i) { + for (int_tp i = 0; i < num_; ++i) { const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_; switch (op_) { case ReductionParameter_ReductionOp_SUM: @@ -151,7 +151,7 @@ void ReductionLayer::Backward_gpu(const vector*>& top, #endif // USE_CUDA } else { #ifdef USE_GREENTEA - for (int i = 0; i < num_; ++i) { + for (int_tp i = 0; i < num_; ++i) { const Dtype bottom_coeff = (*(top_diff + top_diff_off)) * coeff_; switch (op_) { case ReductionParameter_ReductionOp_SUM: diff --git a/src/caffe/layers/relu_layer.cpp b/src/caffe/layers/relu_layer.cpp index cc00319a578..9a8f4ccb4dd 100644 --- a/src/caffe/layers/relu_layer.cpp +++ b/src/caffe/layers/relu_layer.cpp @@ -11,9 +11,9 @@ void ReLULayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { top_data[i] = std::max(bottom_data[i], Dtype(0)) + negative_slope * std::min(bottom_data[i], Dtype(0)); } @@ -27,9 +27,9 @@ void ReLULayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) + negative_slope * (bottom_data[i] <= 0)); } diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index 4590217bfbe..ccd349acc0c 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -13,7 +13,7 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, +__global__ void ReLUForward(const int_tp n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; @@ -26,7 +26,7 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -60,7 +60,7 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void ReLUBackward(const int n, const Dtype* in_diff, +__global__ void ReLUBackward(const int_tp n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { @@ -78,7 +78,7 @@ void ReLULayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/reshape_layer.cpp b/src/caffe/layers/reshape_layer.cpp index ffe970f2689..e71b7ed27af 100644 --- a/src/caffe/layers/reshape_layer.cpp +++ b/src/caffe/layers/reshape_layer.cpp @@ -11,10 +11,10 @@ void ReshapeLayer::LayerSetUp(const vector*>& bottom, inferred_axis_ = -1; copy_axes_.clear(); const BlobShape& top_blob_shape = this->layer_param_.reshape_param().shape(); - const int top_num_axes = top_blob_shape.dim_size(); + const int_tp top_num_axes = top_blob_shape.dim_size(); constant_count_ = 1; - for (int i = 0; i < top_num_axes; ++i) { - const int top_dim = top_blob_shape.dim(i); + for (int_tp i = 0; i < top_num_axes; ++i) { + const int_tp top_dim = top_blob_shape.dim(i); if (top_dim == 0) { copy_axes_.push_back(i); } else if (top_dim == -1) { @@ -30,36 +30,36 @@ void ReshapeLayer::LayerSetUp(const vector*>& bottom, template void ReshapeLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int input_start_axis = this->layer_param_.reshape_param().axis(); - const int start_axis = (input_start_axis >= 0) ? input_start_axis : + const int_tp input_start_axis = this->layer_param_.reshape_param().axis(); + const int_tp start_axis = (input_start_axis >= 0) ? input_start_axis : bottom[0]->num_axes() + input_start_axis + 1; CHECK_GE(start_axis, 0) << "axis " << input_start_axis << " out of range"; CHECK_LE(start_axis, bottom[0]->num_axes()) << "axis " << input_start_axis << " out of range for " << bottom[0]->num_axes() << "-D input blob"; - const int num_axes = this->layer_param_.reshape_param().num_axes(); + const int_tp num_axes = this->layer_param_.reshape_param().num_axes(); CHECK_GE(num_axes, -1) << "num_axes must be >= 0, or -1 for all"; - const int end_axis = + const int_tp end_axis = (num_axes == -1) ? bottom[0]->num_axes() : (start_axis + num_axes); CHECK_LE(end_axis, bottom[0]->num_axes()) << "end_axis = axis + num_axes is out of range"; - const int num_axes_replaced = end_axis - start_axis; - const int num_axes_retained = bottom[0]->num_axes() - num_axes_replaced; + const int_tp num_axes_replaced = end_axis - start_axis; + const int_tp num_axes_retained = bottom[0]->num_axes() - num_axes_replaced; const BlobShape& top_blob_shape = this->layer_param_.reshape_param().shape(); - const int num_new_axes = top_blob_shape.dim_size(); - vector top_shape(num_axes_retained + num_new_axes); - int top_shape_index = 0; - for (int i = 0; i < start_axis; ++i) { + const int_tp num_new_axes = top_blob_shape.dim_size(); + vector top_shape(num_axes_retained + num_new_axes); + int_tp top_shape_index = 0; + for (int_tp i = 0; i < start_axis; ++i) { top_shape[top_shape_index++] = bottom[0]->shape(i); } - for (int i = 0; i < num_new_axes; ++i) { + for (int_tp i = 0; i < num_new_axes; ++i) { top_shape[top_shape_index++] = top_blob_shape.dim(i); } - for (int i = end_axis; i < bottom[0]->num_axes(); ++i) { + for (int_tp i = end_axis; i < bottom[0]->num_axes(); ++i) { top_shape[top_shape_index++] = bottom[0]->shape(i); } CHECK_EQ(top_shape_index, top_shape.size()); - for (int i = 0; i < copy_axes_.size(); ++i) { - const int copy_axis_index = copy_axes_[i]; + for (int_tp i = 0; i < copy_axes_.size(); ++i) { + const int_tp copy_axis_index = copy_axes_[i]; CHECK_GT(bottom[0]->num_axes(), start_axis + copy_axis_index) << "new shape contains a 0, but there was no corresponding bottom axis " << "to copy"; @@ -69,17 +69,17 @@ void ReshapeLayer::Reshape(const vector*>& bottom, if (inferred_axis_ >= 0) { // A -1 dim was specified; infer the correct dimension by computing the // product of the other dimensions. - int explicit_count = constant_count_; + int_tp explicit_count = constant_count_; explicit_count *= bottom[0]->count(0, start_axis); explicit_count *= bottom[0]->count(end_axis); - for (int i = 0; i < copy_axes_.size(); ++i) { - const int copy_axis_index = copy_axes_[i]; + for (int_tp i = 0; i < copy_axes_.size(); ++i) { + const int_tp copy_axis_index = copy_axes_[i]; explicit_count *= top_shape[start_axis + copy_axis_index]; } CHECK_EQ(0, bottom[0]->count() % explicit_count) << "bottom count (" << bottom[0]->count() << ") must be divisible by the product of " << "the specified dimensions (" << explicit_count << ")"; - const int inferred_dim = bottom[0]->count() / explicit_count; + const int_tp inferred_dim = bottom[0]->count() / explicit_count; top_shape[start_axis + inferred_axis_] = inferred_dim; } top[0]->Reshape(top_shape); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index cc236fe1e8e..0c717d29945 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -35,13 +35,13 @@ void SigmoidCrossEntropyLossLayer::Forward_cpu( sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); // Compute the loss (negative log likelihood) - const int count = bottom[0]->count(); - const int num = bottom[0]->num(); + const int_tp count = bottom[0]->count(); + const int_tp num = bottom[0]->num(); // Stable version of loss computation from input data const Dtype* input_data = bottom[0]->cpu_data(); const Dtype* target = bottom[1]->cpu_data(); Dtype loss = 0; - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); } @@ -58,8 +58,8 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( } if (propagate_down[0]) { // First, compute the diff - const int count = bottom[0]->count(); - const int num = bottom[0]->num(); + const int_tp count = bottom[0]->count(); + const int_tp num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); const Dtype* target = bottom[1]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 41420fb5235..16974023a87 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -22,8 +22,8 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - const int count = bottom[0]->count(); - const int num = bottom[0]->num(); + const int_tp count = bottom[0]->count(); + const int_tp num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); diff --git a/src/caffe/layers/sigmoid_layer.cpp b/src/caffe/layers/sigmoid_layer.cpp index 48c384905bf..a7c2c9d78d7 100644 --- a/src/caffe/layers/sigmoid_layer.cpp +++ b/src/caffe/layers/sigmoid_layer.cpp @@ -17,8 +17,8 @@ void SigmoidLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - const int count = bottom[0]->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = bottom[0]->count(); + for (int_tp i = 0; i < count; ++i) { top_data[i] = sigmoid(bottom_data[i]); } } @@ -31,8 +31,8 @@ void SigmoidLayer::Backward_cpu(const vector*>& top, const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const int count = bottom[0]->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = bottom[0]->count(); + for (int_tp i = 0; i < count; ++i) { const Dtype sigmoid_x = top_data[i]; bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); } diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index 3ec498b5a5b..695234f9392 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -9,7 +9,7 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { +__global__ void SigmoidForward(const int_tp n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = 1. / (1. + exp(-in[index])); } @@ -21,7 +21,7 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -56,7 +56,7 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void SigmoidBackward(const int n, const Dtype* in_diff, +__global__ void SigmoidBackward(const int_tp n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { const Dtype sigmoid_x = out_data[index]; @@ -73,7 +73,7 @@ void SigmoidLayer::Backward_gpu(const vector*>& top, const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/silence_layer.cpp b/src/caffe/layers/silence_layer.cpp index 7e70ab4329e..dcbc5ea81c1 100644 --- a/src/caffe/layers/silence_layer.cpp +++ b/src/caffe/layers/silence_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void SilenceLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { caffe_set(bottom[i]->count(), Dtype(0), bottom[i]->mutable_cpu_diff()); diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index d0a2fdbb26e..d332dadd079 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -21,7 +21,7 @@ template void SilenceLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 53fd685877c..51c7e6c0006 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -22,10 +22,10 @@ void SliceLayer::LayerSetUp(const vector*>& bottom, template void SliceLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int num_axes = bottom[0]->num_axes(); + const int_tp num_axes = bottom[0]->num_axes(); const SliceParameter& slice_param = this->layer_param_.slice_param(); if (slice_param.has_slice_dim()) { - slice_axis_ = static_cast(slice_param.slice_dim()); + slice_axis_ = static_cast(slice_param.slice_dim()); // Don't allow negative indexing for slice_dim, a uint32 -- almost // certainly unintended. CHECK_GE(slice_axis_, 0) << "casting slice_dim from uint32 to int32 " @@ -35,23 +35,23 @@ void SliceLayer::Reshape(const vector*>& bottom, } else { slice_axis_ = bottom[0]->CanonicalAxisIndex(slice_param.axis()); } - vector top_shape = bottom[0]->shape(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + vector top_shape = bottom[0]->shape(); + const int_tp bottom_slice_axis = bottom[0]->shape(slice_axis_); num_slices_ = bottom[0]->count(0, slice_axis_); slice_size_ = bottom[0]->count(slice_axis_ + 1); - int count = 0; + int_tp count = 0; if (slice_point_.size() != 0) { CHECK_EQ(slice_point_.size(), top.size() - 1); CHECK_LE(top.size(), bottom_slice_axis); - int prev = 0; - vector slices; - for (int i = 0; i < slice_point_.size(); ++i) { + int_tp prev = 0; + vector slices; + for (int_tp i = 0; i < slice_point_.size(); ++i) { CHECK_GT(slice_point_[i], prev); slices.push_back(slice_point_[i] - prev); prev = slice_point_[i]; } slices.push_back(bottom_slice_axis - prev); - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { top_shape[slice_axis_] = slices[i]; top[i]->Reshape(top_shape); count += top[i]->count(); @@ -61,7 +61,7 @@ void SliceLayer::Reshape(const vector*>& bottom, << "Number of top blobs (" << top.size() << ") should evenly " << "divide input slice axis (" << bottom_slice_axis << ")"; top_shape[slice_axis_] = bottom_slice_axis / top.size(); - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { top[i]->Reshape(top_shape); count += top[i]->count(); } @@ -77,15 +77,15 @@ template void SliceLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { if (top.size() == 1) { return; } - int offset_slice_axis = 0; + int_tp offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - for (int i = 0; i < top.size(); ++i) { + const int_tp bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int_tp i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_cpu_data(); - const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = + const int_tp top_slice_axis = top[i]->shape(slice_axis_); + for (int_tp n = 0; n < num_slices_; ++n) { + const int_tp top_offset = n * top_slice_axis * slice_size_; + const int_tp bottom_offset = (n * bottom_slice_axis + offset_slice_axis) * slice_size_; caffe_cpu_copy(top_slice_axis * slice_size_, bottom_data + bottom_offset, top_data + top_offset); @@ -98,15 +98,15 @@ template void SliceLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0] || top.size() == 1) { return; } - int offset_slice_axis = 0; + int_tp offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - for (int i = 0; i < top.size(); ++i) { + const int_tp bottom_slice_axis = bottom[0]->shape(slice_axis_); + for (int_tp i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); - const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = + const int_tp top_slice_axis = top[i]->shape(slice_axis_); + for (int_tp n = 0; n < num_slices_; ++n) { + const int_tp top_offset = n * top_slice_axis * slice_size_; + const int_tp bottom_offset = (n * bottom_slice_axis + offset_slice_axis) * slice_size_; caffe_cpu_copy(top_slice_axis * slice_size_, top_diff + top_offset, bottom_diff + bottom_offset); diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 05f2568b438..de17c09fb32 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -8,16 +8,16 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void Slice(const int nthreads, const Dtype* in_data, - const bool forward, const int num_slices, - const int slice_size, const int bottom_slice_axis, - const int top_slice_axis, const int offset_slice_axis, +__global__ void Slice(const int_tp nthreads, const Dtype* in_data, + const bool forward, const int_tp num_slices, + const int_tp slice_size, const int_tp bottom_slice_axis, + const int_tp top_slice_axis, const int_tp offset_slice_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int total_slice_size = slice_size * top_slice_axis; - const int slice_num = index / total_slice_size; - const int slice_index = index % total_slice_size; - const int bottom_index = slice_index + const int_tp total_slice_size = slice_size * top_slice_axis; + const int_tp slice_num = index / total_slice_size; + const int_tp slice_index = index % total_slice_size; + const int_tp bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; if (forward) { out_data[index] = in_data[bottom_index]; @@ -32,15 +32,15 @@ template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { if (top.size() == 1) { return; } - int offset_slice_axis = 0; + int_tp offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const int_tp bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = true; - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); - const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; + const int_tp top_slice_axis = top[i]->shape(slice_axis_); + const int_tp top_slice_size = top_slice_axis * slice_size_; + const int_tp nthreads = top_slice_size * num_slices_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -75,15 +75,15 @@ template void SliceLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0] || top.size() == 1) { return; } - int offset_slice_axis = 0; + int_tp offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const int_tp bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = false; - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); - const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; + const int_tp top_slice_axis = top[i]->shape(slice_axis_); + const int_tp top_slice_size = top_slice_axis * slice_size_; + const int_tp nthreads = top_slice_size * num_slices_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index fbd378102f6..0c5fdac9c8f 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -13,13 +13,13 @@ void SoftmaxLayer::Reshape(const vector*>& bottom, softmax_axis_ = bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); top[0]->ReshapeLike(*bottom[0]); - vector mult_dims(1, bottom[0]->shape(softmax_axis_)); + vector mult_dims(1, bottom[0]->shape(softmax_axis_)); sum_multiplier_.Reshape(mult_dims); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); outer_num_ = bottom[0]->count(0, softmax_axis_); inner_num_ = bottom[0]->count(softmax_axis_ + 1); - vector scale_dims = bottom[0]->shape(); + vector scale_dims = bottom[0]->shape(); scale_dims[softmax_axis_] = 1; scale_.Reshape(scale_dims); } @@ -30,16 +30,16 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); - int channels = bottom[0]->shape(softmax_axis_); - int dim = bottom[0]->count() / outer_num_; + int_tp channels = bottom[0]->shape(softmax_axis_); + int_tp dim = bottom[0]->count() / outer_num_; caffe_cpu_copy(bottom[0]->count(), bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. - for (int i = 0; i < outer_num_; ++i) { + for (int_tp i = 0; i < outer_num_; ++i) { // initialize scale_data to the first plane caffe_cpu_copy(inner_num_, bottom_data + i * dim, scale_data); - for (int j = 0; j < channels; j++) { - for (int k = 0; k < inner_num_; k++) { + for (int_tp j = 0; j < channels; j++) { + for (int_tp k = 0; k < inner_num_; k++) { scale_data[k] = std::max(scale_data[k], bottom_data[i * dim + j * inner_num_ + k]); } @@ -53,7 +53,7 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, caffe_cpu_gemv(CblasTrans, channels, inner_num_, 1., top_data, sum_multiplier_.cpu_data(), 0., scale_data); // division - for (int j = 0; j < channels; j++) { + for (int_tp j = 0; j < channels; j++) { caffe_div(inner_num_, top_data, scale_data, top_data); top_data += inner_num_; } @@ -68,12 +68,12 @@ void SoftmaxLayer::Backward_cpu(const vector*>& top, const Dtype* top_data = top[0]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); Dtype* scale_data = scale_.mutable_cpu_data(); - int channels = top[0]->shape(softmax_axis_); - int dim = top[0]->count() / outer_num_; + int_tp channels = top[0]->shape(softmax_axis_); + int_tp dim = top[0]->count() / outer_num_; caffe_cpu_copy(top[0]->count(), top_diff, bottom_diff); - for (int i = 0; i < outer_num_; ++i) { + for (int_tp i = 0; i < outer_num_; ++i) { // compute dot(top_diff, top_data) and subtract them from the bottom diff - for (int k = 0; k < inner_num_; ++k) { + for (int_tp k = 0; k < inner_num_; ++k) { scale_data[k] = caffe_cpu_strided_dot(channels, bottom_diff + i * dim + k, inner_num_, top_data + i * dim + k, inner_num_); diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index dd59997cca9..60ec1deeadf 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -19,14 +19,14 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void kernel_channel_max(const int num, const int channels, - const int spatial_dim, const Dtype* data, +__global__ void kernel_channel_max(const int_tp num, const int_tp channels, + const int_tp spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { - int n = index / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / spatial_dim; + int_tp s = index % spatial_dim; Dtype maxval = -FLT_MAX; - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; @@ -34,33 +34,33 @@ __global__ void kernel_channel_max(const int num, const int channels, } template -__global__ void kernel_channel_subtract(const int count, const int num, - const int channels, - const int spatial_dim, +__global__ void kernel_channel_subtract(const int_tp count, const int_tp num, + const int_tp channels, + const int_tp spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { - int n = index / channels / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / channels / spatial_dim; + int_tp s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template -__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { +__global__ void kernel_exp(const int_tp count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template -__global__ void kernel_channel_sum(const int num, const int channels, - const int spatial_dim, const Dtype* data, +__global__ void kernel_channel_sum(const int_tp num, const int_tp channels, + const int_tp spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { - int n = index / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / spatial_dim; + int_tp s = index % spatial_dim; Dtype sum = 0; - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; @@ -68,25 +68,25 @@ __global__ void kernel_channel_sum(const int num, const int channels, } template -__global__ void kernel_channel_div(const int count, const int num, - const int channels, const int spatial_dim, +__global__ void kernel_channel_div(const int_tp count, const int_tp num, + const int_tp channels, const int_tp spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { - int n = index / channels / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / channels / spatial_dim; + int_tp s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template -__global__ void kernel_channel_dot(const int num, const int channels, - const int spatial_dim, const Dtype* data_1, +__global__ void kernel_channel_dot(const int_tp num, const int_tp channels, + const int_tp spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { - int n = index / spatial_dim; - int s = index % spatial_dim; + int_tp n = index / spatial_dim; + int_tp s = index % spatial_dim; Dtype dot = 0; - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } @@ -101,10 +101,10 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); - int count = bottom[0]->count(); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - int spatial_dim = bottom[0]->height() * bottom[0]->width(); + int_tp count = bottom[0]->count(); + int_tp num = bottom[0]->num(); + int_tp channels = bottom[0]->channels(); + int_tp spatial_dim = bottom[0]->height() * bottom[0]->width(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -202,10 +202,10 @@ template void SoftmaxLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - int count = top[0]->count(); - int num = top[0]->num(); - int channels = top[0]->channels(); - int spatial_dim = top[0]->height() * top[0]->width(); + int_tp count = top[0]->count(); + int_tp num = top[0]->num(); + int_tp channels = top[0]->channels(); + int_tp spatial_dim = top[0]->height() * top[0]->width(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 4b78d6f8220..1bd392f00b6 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -57,12 +57,12 @@ void SoftmaxWithLossLayer::Forward_cpu( softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); const Dtype* label = bottom[1]->cpu_data(); - int dim = prob_.count() / outer_num_; - int count = 0; + int_tp dim = prob_.count() / outer_num_; + int_tp count = 0; Dtype loss = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; j++) { - const int label_value = static_cast(label[i * inner_num_ + j]); + for (int_tp i = 0; i < outer_num_; ++i) { + for (int_tp j = 0; j < inner_num_; j++) { + const int_tp label_value = static_cast(label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } @@ -99,13 +99,13 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, const Dtype* prob_data = prob_.cpu_data(); caffe_cpu_copy(prob_.count(), prob_data, bottom_diff); const Dtype* label = bottom[1]->cpu_data(); - int dim = prob_.count() / outer_num_; - int count = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; ++j) { - const int label_value = static_cast(label[i * inner_num_ + j]); + int_tp dim = prob_.count() / outer_num_; + int_tp count = 0; + for (int_tp i = 0; i < outer_num_; ++i) { + for (int_tp j = 0; j < inner_num_; ++j) { + const int_tp label_value = static_cast(label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { - for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { + for (int_tp c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { bottom_diff[i * dim + c * inner_num_ + j] = 0; } } else { diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 216787acd3c..bfa577c3085 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -15,17 +15,17 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void SoftmaxLossForwardGPU(const int nthreads, +__global__ void SoftmaxLossForwardGPU(const int_tp nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, - const int num, const int dim, - const int spatial_dim, + const int_tp num, const int_tp dim, + const int_tp spatial_dim, const bool has_ignore_label_, - const int ignore_label_, Dtype* counts) { + const int_tp ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { - const int n = index / spatial_dim; - const int s = index % spatial_dim; - const int label_value = static_cast(label[n * spatial_dim + s]); + const int_tp n = index / spatial_dim; + const int_tp s = index % spatial_dim; + const int_tp label_value = static_cast(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; @@ -48,10 +48,10 @@ void SoftmaxWithLossLayer::Forward_gpu( #ifdef USE_CUDA const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); - const int num = prob_.num(); - const int dim = prob_.count() / num; - const int spatial_dim = prob_.height() * prob_.width(); - const int nthreads = num * spatial_dim; + const int_tp num = prob_.num(); + const int_tp dim = prob_.count() / num; + const int_tp spatial_dim = prob_.height() * prob_.width(); + const int_tp nthreads = num * spatial_dim; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. @@ -90,10 +90,10 @@ void SoftmaxWithLossLayer::Forward_gpu( cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); - const int num = prob_.num(); - const int dim = prob_.count() / num; - const int spatial_dim = prob_.height() * prob_.width(); - const int nthreads = num * spatial_dim; + const int_tp num = prob_.num(); + const int_tp dim = prob_.count() / num; + const int_tp spatial_dim = prob_.height() * prob_.width(); + const int_tp nthreads = num * spatial_dim; cl_mem loss_data = (cl_mem) (bottom[0]->mutable_gpu_diff()); cl_mem counts = (cl_mem) (prob_.mutable_gpu_diff()); @@ -133,21 +133,21 @@ void SoftmaxWithLossLayer::Forward_gpu( #ifdef USE_CUDA template -__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, +__global__ void SoftmaxLossBackwardGPU(const int_tp nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, - const int num, const int dim, - const int spatial_dim, + const int_tp num, const int_tp dim, + const int_tp spatial_dim, const bool has_ignore_label_, - const int ignore_label_, Dtype* counts) { - const int channels = dim / spatial_dim; + const int_tp ignore_label_, Dtype* counts) { + const int_tp channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { - const int n = index / spatial_dim; - const int s = index % spatial_dim; - const int label_value = static_cast(label[n * spatial_dim + s]); + const int_tp n = index / spatial_dim; + const int_tp s = index % spatial_dim; + const int_tp label_value = static_cast(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { - for (int c = 0; c < channels; ++c) { + for (int_tp c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; @@ -175,10 +175,10 @@ void SoftmaxWithLossLayer::Backward_gpu( const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); - const int num = prob_.num(); - const int dim = prob_.count() / num; - const int spatial_dim = prob_.height() * prob_.width(); - const int nthreads = num * spatial_dim; + const int_tp num = prob_.num(); + const int_tp dim = prob_.count() / num; + const int_tp spatial_dim = prob_.height() * prob_.width(); + const int_tp nthreads = num * spatial_dim; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); @@ -218,10 +218,10 @@ void SoftmaxWithLossLayer::Backward_gpu( greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); - const int num = prob_.num(); - const int dim = prob_.count() / num; - const int spatial_dim = prob_.height() * prob_.width(); - const int nthreads = num * spatial_dim; + const int_tp num = prob_.num(); + const int_tp dim = prob_.count() / num; + const int_tp spatial_dim = prob_.height() * prob_.width(); + const int_tp nthreads = num * spatial_dim; cl_mem counts = (cl_mem)(prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 59a821976c8..f4a80b65ea1 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -10,7 +10,7 @@ template void SplitLayer::Reshape(const vector*>& bottom, const vector*>& top) { count_ = bottom[0]->count(); - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { // Do not allow in-place computation in the SplitLayer. Instead, share data // by reference in the forward pass, and keep separate diff allocations in // the backward pass. (Technically, it should be possible to share the diff @@ -26,7 +26,7 @@ void SplitLayer::Reshape(const vector*>& bottom, template void SplitLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { top[i]->ShareData(*bottom[0]); } } @@ -42,7 +42,7 @@ void SplitLayer::Backward_cpu(const vector*>& top, caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), bottom[0]->mutable_cpu_diff()); // Add remaining top blob diffs. - for (int i = 2; i < top.size(); ++i) { + for (int_tp i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index 15fc064dedc..ebc945a6c53 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -9,7 +9,7 @@ namespace caffe { template void SplitLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - for (int i = 0; i < top.size(); ++i) { + for (int_tp i = 0; i < top.size(); ++i) { top[i]->ShareData(*bottom[0]); } } @@ -31,7 +31,7 @@ void SplitLayer::Backward_gpu(const vector*>& top, caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), bottom[0]->mutable_gpu_diff()); // Add remaining top blob diffs. - for (int i = 2; i < top.size(); ++i) { + for (int_tp i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); @@ -52,7 +52,7 @@ void SplitLayer::Backward_gpu(const vector*>& top, (cl_mem) (top[1]->gpu_diff()), 0, (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); // Add remaining top blob diffs. - for (int i = 2; i < top.size(); ++i) { + for (int_tp i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); greentea_gpu_axpy(this->device_->id(), count_, Dtype(1.), diff --git a/src/caffe/layers/spp_layer.cpp b/src/caffe/layers/spp_layer.cpp index d7622910495..6c5a118f086 100644 --- a/src/caffe/layers/spp_layer.cpp +++ b/src/caffe/layers/spp_layer.cpp @@ -14,25 +14,25 @@ using std::min; using std::max; template -LayerParameter SPPLayer::GetPoolingParam(const int pyramid_level, - const int bottom_h, const int bottom_w, const SPPParameter spp_param) { +LayerParameter SPPLayer::GetPoolingParam(const int_tp pyramid_level, + const int_tp bottom_h, const int_tp bottom_w, const SPPParameter spp_param) { LayerParameter pooling_param; - int num_bins = pow(2, pyramid_level); + int_tp num_bins = pow(2, pyramid_level); // find padding and kernel size so that the pooling is // performed across the entire image - int kernel_h = ceil(bottom_h / static_cast(num_bins)); + int_tp kernel_h = ceil(bottom_h / static_cast(num_bins)); // remainder_h is the min number of pixels that need to be padded before // entire image height is pooled over with the chosen kernel dimension - int remainder_h = kernel_h * num_bins - bottom_h; + int_tp remainder_h = kernel_h * num_bins - bottom_h; // pooling layer pads (2 * pad_h) pixels on the top and bottom of the // image. - int pad_h = (remainder_h + 1) / 2; + int_tp pad_h = (remainder_h + 1) / 2; // similar logic for width - int kernel_w = ceil(bottom_w / static_cast(num_bins)); - int remainder_w = kernel_w * num_bins - bottom_w; - int pad_w = (remainder_w + 1) / 2; + int_tp kernel_w = ceil(bottom_w / static_cast(num_bins)); + int_tp remainder_w = kernel_w * num_bins - bottom_w; + int_tp pad_w = (remainder_w + 1) / 2; pooling_param.mutable_pooling_param()->set_pad_h(pad_h); pooling_param.mutable_pooling_param()->set_pad_w(pad_w); @@ -95,7 +95,7 @@ void SPPLayer::LayerSetUp(const vector*>& bottom, return; } // split layer output holders setup - for (int i = 0; i < pyramid_height_; i++) { + for (int_tp i = 0; i < pyramid_height_; i++) { split_top_vec_.push_back(new Blob()); } @@ -104,7 +104,7 @@ void SPPLayer::LayerSetUp(const vector*>& bottom, split_layer_.reset(new SplitLayer(split_param)); split_layer_->SetUp(bottom, split_top_vec_); - for (int i = 0; i < pyramid_height_; i++) { + for (int_tp i = 0; i < pyramid_height_; i++) { // pooling layer input holders setup pooling_bottom_vecs_.push_back(new vector*>); pooling_bottom_vecs_[i]->push_back(split_top_vec_[i]); @@ -168,7 +168,7 @@ void SPPLayer::Reshape(const vector*>& bottom, return; } split_layer_->Reshape(bottom, split_top_vec_); - for (int i = 0; i < pyramid_height_; i++) { + for (int_tp i = 0; i < pyramid_height_; i++) { LayerParameter pooling_param = GetPoolingParam( i, bottom_h_, bottom_w_, spp_param); @@ -192,7 +192,7 @@ void SPPLayer::Forward_cpu(const vector*>& bottom, return; } split_layer_->Forward(bottom, split_top_vec_); - for (int i = 0; i < pyramid_height_; i++) { + for (int_tp i = 0; i < pyramid_height_; i++) { pooling_layers_[i]->Forward( *pooling_bottom_vecs_[i], *pooling_top_vecs_[i]); flatten_layers_[i]->Forward( @@ -213,7 +213,7 @@ void SPPLayer::Backward_cpu(const vector*>& top, } vector concat_propagate_down(pyramid_height_, true); concat_layer_->Backward(top, concat_propagate_down, concat_bottom_vec_); - for (int i = 0; i < pyramid_height_; i++) { + for (int_tp i = 0; i < pyramid_height_; i++) { flatten_layers_[i]->Backward( *flatten_top_vecs_[i], propagate_down, *pooling_top_vecs_[i]); pooling_layers_[i]->Backward( diff --git a/src/caffe/layers/tanh_layer.cpp b/src/caffe/layers/tanh_layer.cpp index ee5ed773c74..5d558643fd6 100644 --- a/src/caffe/layers/tanh_layer.cpp +++ b/src/caffe/layers/tanh_layer.cpp @@ -14,8 +14,8 @@ void TanHLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - const int count = bottom[0]->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = bottom[0]->count(); + for (int_tp i = 0; i < count; ++i) { top_data[i] = tanh(bottom_data[i]); } } @@ -28,9 +28,9 @@ void TanHLayer::Backward_cpu(const vector*>& top, const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); Dtype tanhx; - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { tanhx = top_data[i]; bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); } diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index 8431458746e..85f54cd759c 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -11,7 +11,7 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { +__global__ void TanHForward(const int_tp n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = tanh(in[index]); } @@ -23,7 +23,7 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -52,7 +52,7 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_CUDA template -__global__ void TanHBackward(const int n, const Dtype* in_diff, +__global__ void TanHBackward(const int_tp n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype tanhx = out_data[index]; @@ -69,7 +69,7 @@ void TanHLayer::Backward_gpu(const vector*>& top, const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/threshold_layer.cpp b/src/caffe/layers/threshold_layer.cpp index 8f72d6c4a51..93dcc6ab2c7 100644 --- a/src/caffe/layers/threshold_layer.cpp +++ b/src/caffe/layers/threshold_layer.cpp @@ -17,8 +17,8 @@ void ThresholdLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - const int count = bottom[0]->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = bottom[0]->count(); + for (int_tp i = 0; i < count; ++i) { top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); } } diff --git a/src/caffe/layers/threshold_layer.cu b/src/caffe/layers/threshold_layer.cu index fc91eea9894..03dc9900bfb 100644 --- a/src/caffe/layers/threshold_layer.cu +++ b/src/caffe/layers/threshold_layer.cu @@ -13,7 +13,7 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void ThresholdForward(const int n, const Dtype threshold, +__global__ void ThresholdForward(const int_tp n, const Dtype threshold, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > threshold ? 1 : 0; @@ -26,7 +26,7 @@ void ThresholdLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int count = bottom[0]->count(); + const int_tp count = bottom[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/tile_layer.cpp b/src/caffe/layers/tile_layer.cpp index f55008cc53a..9d2133a7ecc 100644 --- a/src/caffe/layers/tile_layer.cpp +++ b/src/caffe/layers/tile_layer.cpp @@ -14,7 +14,7 @@ void TileLayer::Reshape( CHECK(tile_param.has_tiles()) << "Number of tiles must be specified"; tiles_ = tile_param.tiles(); CHECK_GT(tiles_, 0) << "Number of tiles must be positive."; - vector top_shape = bottom[0]->shape(); + vector top_shape = bottom[0]->shape(); top_shape[axis_] = bottom[0]->shape(axis_) * tiles_; top[0]->Reshape(top_shape); outer_dim_ = bottom[0]->count(0, axis_); @@ -26,8 +26,8 @@ void TileLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - for (int i = 0; i < outer_dim_; ++i) { - for (int t = 0; t < tiles_; ++t) { + for (int_tp i = 0; i < outer_dim_; ++i) { + for (int_tp t = 0; t < tiles_; ++t) { caffe_copy(inner_dim_, bottom_data, top_data); top_data += inner_dim_; } @@ -41,10 +41,10 @@ void TileLayer::Backward_cpu(const vector*>& top, if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - for (int i = 0; i < outer_dim_; ++i) { + for (int_tp i = 0; i < outer_dim_; ++i) { caffe_copy(inner_dim_, top_diff, bottom_diff); top_diff += inner_dim_; - for (int t = 1; t < tiles_; ++t) { + for (int_tp t = 1; t < tiles_; ++t) { caffe_axpy(inner_dim_, Dtype(1), top_diff, bottom_diff); top_diff += inner_dim_; } diff --git a/src/caffe/layers/tile_layer.cu b/src/caffe/layers/tile_layer.cu index 2812248f005..3abf28ab829 100644 --- a/src/caffe/layers/tile_layer.cu +++ b/src/caffe/layers/tile_layer.cu @@ -14,14 +14,14 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void Tile(const int nthreads, const Dtype* bottom_data, - const int tile_size, const int num_tiles, const int bottom_tile_axis, +__global__ void Tile(const int_tp nthreads, const Dtype* bottom_data, + const int_tp tile_size, const int_tp num_tiles, const int_tp bottom_tile_axis, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int d = index % tile_size; - const int b = (index / tile_size / num_tiles) % bottom_tile_axis; - const int n = index / tile_size / num_tiles / bottom_tile_axis; - const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + const int_tp d = index % tile_size; + const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int_tp n = index / tile_size / num_tiles / bottom_tile_axis; + const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d; top_data[index] = bottom_data[bottom_index]; } } @@ -32,8 +32,8 @@ void TileLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int bottom_tile_axis = bottom[0]->shape(axis_); - const int nthreads = top[0]->count(); + const int_tp bottom_tile_axis = bottom[0]->shape(axis_); + const int_tp nthreads = top[0]->count(); if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA Tile // NOLINT_NEXT_LINE(whitespace/operators) @@ -60,16 +60,16 @@ void TileLayer::Forward_gpu( #ifdef USE_CUDA template -__global__ void TileBackward(const int nthreads, const Dtype* top_diff, - const int tile_size, const int num_tiles, const int bottom_tile_axis, +__global__ void TileBackward(const int_tp nthreads, const Dtype* top_diff, + const int_tp tile_size, const int_tp num_tiles, const int_tp bottom_tile_axis, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { - const int d = index % tile_size; - const int b = (index / tile_size) % bottom_tile_axis; - const int n = index / tile_size / bottom_tile_axis; + const int_tp d = index % tile_size; + const int_tp b = (index / tile_size) % bottom_tile_axis; + const int_tp n = index / tile_size / bottom_tile_axis; bottom_diff[index] = 0; - int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; - for (int t = 0; t < num_tiles; ++t) { + int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int_tp t = 0; t < num_tiles; ++t) { bottom_diff[index] += top_diff[top_index]; top_index += bottom_tile_axis * tile_size; } @@ -83,9 +83,9 @@ void TileLayer::Backward_gpu(const vector*>& top, if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int bottom_tile_axis = bottom[0]->shape(axis_); - const int tile_size = inner_dim_ / bottom_tile_axis; - const int nthreads = bottom[0]->count(); + const int_tp bottom_tile_axis = bottom[0]->shape(axis_); + const int_tp tile_size = inner_dim_ / bottom_tile_axis; + const int_tp nthreads = bottom[0]->count(); if (this->get_device()->backend() == BACKEND_CUDA) { #ifdef USE_CUDA diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index f8db61c9258..f519da8e0f5 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -68,7 +68,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, this->transform_param_.mirror() || this->transform_param_.crop_size(); if (prefetch_needs_rand) { - const unsigned int prefetch_rng_seed = caffe_rng_rand(); + const uint_tp prefetch_rng_seed = caffe_rng_rand(); prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); } else { prefetch_rng_.reset(); @@ -78,11 +78,11 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, CHECK(infile.good()) << "Failed to open window file " << this->layer_param_.window_data_param().source() << std::endl; - map label_hist; + map label_hist; label_hist.insert(std::make_pair(0, 0)); string hashtag; - int image_index, channels; + int_tp image_index, channels; if (!(infile >> hashtag >> image_index)) { LOG(FATAL) << "Window file is empty"; } @@ -93,7 +93,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, infile >> image_path; image_path = root_folder + image_path; // read image dimensions - vector image_size(3); + vector image_size(3); infile >> image_size[0] >> image_size[1] >> image_size[2]; channels = image_size[0]; image_database_.push_back(std::make_pair(image_path, image_size)); @@ -107,14 +107,14 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, image_database_cache_.push_back(std::make_pair(image_path, datum)); } // read each box - int num_windows; + int_tp num_windows; infile >> num_windows; const float fg_threshold = this->layer_param_.window_data_param().fg_threshold(); const float bg_threshold = this->layer_param_.window_data_param().bg_threshold(); - for (int i = 0; i < num_windows; ++i) { - int label, x1, y1, x2, y2; + for (int_tp i = 0; i < num_windows; ++i) { + int_tp label, x1, y1, x2, y2; float overlap; infile >> label >> overlap >> x1 >> y1 >> x2 >> y2; @@ -129,7 +129,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, // add window to foreground list or background list if (overlap >= fg_threshold) { - int label = window[WindowDataLayer::LABEL]; + int_tp label = window[WindowDataLayer::LABEL]; CHECK_GT(label, 0); fg_windows_.push_back(window); label_hist.insert(std::make_pair(label, 0)); @@ -155,7 +155,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, LOG(INFO) << "Number of images: " << image_index+1; - for (map::iterator it = label_hist.begin(); + for (map::iterator it = label_hist.begin(); it != label_hist.end(); ++it) { LOG(INFO) << "class " << it->first << " has " << label_hist[it->first] << " samples"; @@ -168,11 +168,11 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, << this->layer_param_.window_data_param().crop_mode(); // image - const int crop_size = this->transform_param_.crop_size(); + const int_tp crop_size = this->transform_param_.crop_size(); CHECK_GT(crop_size, 0); - const int batch_size = this->layer_param_.window_data_param().batch_size(); + const int_tp batch_size = this->layer_param_.window_data_param().batch_size(); top[0]->Reshape(batch_size, channels, crop_size, crop_size); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) + for (int_tp i = 0; i < this->PREFETCH_COUNT; ++i) this->prefetch_[i].data_.Reshape( batch_size, channels, crop_size, crop_size); @@ -180,9 +180,9 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label - vector label_shape(1, batch_size); + vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + for (int_tp i = 0; i < this->PREFETCH_COUNT; ++i) { this->prefetch_[i].label_.Reshape(label_shape); } @@ -200,14 +200,14 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, if (has_mean_values_) { CHECK(has_mean_file_ == false) << "Cannot specify mean_file and mean_value at the same time"; - for (int c = 0; c < this->transform_param_.mean_value_size(); ++c) { + for (int_tp c = 0; c < this->transform_param_.mean_value_size(); ++c) { mean_values_.push_back(this->transform_param_.mean_value(c)); } CHECK(mean_values_.size() == 1 || mean_values_.size() == channels) << "Specify either 1 mean_value or as many as channels: " << channels; if (channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity - for (int c = 1; c < channels; ++c) { + for (int_tp c = 1; c < channels; ++c) { mean_values_.push_back(mean_values_[0]); } } @@ -215,7 +215,7 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, } template -unsigned int WindowDataLayer::PrefetchRand() { +uint_tp WindowDataLayer::PrefetchRand() { CHECK(prefetch_rng_); caffe::rng_t* prefetch_rng = static_cast(prefetch_rng_->generator()); @@ -235,16 +235,16 @@ void WindowDataLayer::load_batch(Batch* batch) { Dtype* top_data = batch->data_.mutable_cpu_data(); Dtype* top_label = batch->label_.mutable_cpu_data(); const Dtype scale = this->layer_param_.window_data_param().scale(); - const int batch_size = this->layer_param_.window_data_param().batch_size(); - const int context_pad = this->layer_param_.window_data_param().context_pad(); - const int crop_size = this->transform_param_.crop_size(); + const int_tp batch_size = this->layer_param_.window_data_param().batch_size(); + const int_tp context_pad = this->layer_param_.window_data_param().context_pad(); + const int_tp crop_size = this->transform_param_.crop_size(); const bool mirror = this->transform_param_.mirror(); const float fg_fraction = this->layer_param_.window_data_param().fg_fraction(); Dtype* mean = NULL; - int mean_off = 0; - int mean_width = 0; - int mean_height = 0; + int_tp mean_off = 0; + int_tp mean_width = 0; + int_tp mean_height = 0; if (this->has_mean_file_) { mean = this->data_mean_.mutable_cpu_data(); mean_off = (this->data_mean_.width() - crop_size) / 2; @@ -259,17 +259,17 @@ void WindowDataLayer::load_batch(Batch* batch) { // zero out batch caffe_set(batch->data_.count(), Dtype(0), top_data); - const int num_fg = static_cast(static_cast(batch_size) + const int_tp num_fg = static_cast(static_cast(batch_size) * fg_fraction); - const int num_samples[2] = { batch_size - num_fg, num_fg }; + const int_tp num_samples[2] = { batch_size - num_fg, num_fg }; - int item_id = 0; + int_tp item_id = 0; // sample from bg set then fg set - for (int is_fg = 0; is_fg < 2; ++is_fg) { - for (int dummy = 0; dummy < num_samples[is_fg]; ++dummy) { + for (int_tp is_fg = 0; is_fg < 2; ++is_fg) { + for (int_tp dummy = 0; dummy < num_samples[is_fg]; ++dummy) { // sample a window timer.Start(); - const unsigned int rand_index = PrefetchRand(); + const uint_tp rand_index = PrefetchRand(); vector window = (is_fg) ? fg_windows_[rand_index % fg_windows_.size()] : bg_windows_[rand_index % bg_windows_.size()]; @@ -277,7 +277,7 @@ void WindowDataLayer::load_batch(Batch* batch) { bool do_mirror = mirror && PrefetchRand() % 2; // load the image containing the window - pair > image = + pair > image = image_database_[window[WindowDataLayer::IMAGE_INDEX]]; cv::Mat cv_img; @@ -294,16 +294,16 @@ void WindowDataLayer::load_batch(Batch* batch) { } read_time += timer.MicroSeconds(); timer.Start(); - const int channels = cv_img.channels(); + const int_tp channels = cv_img.channels(); // crop window out of image and warp it - int x1 = window[WindowDataLayer::X1]; - int y1 = window[WindowDataLayer::Y1]; - int x2 = window[WindowDataLayer::X2]; - int y2 = window[WindowDataLayer::Y2]; + int_tp x1 = window[WindowDataLayer::X1]; + int_tp y1 = window[WindowDataLayer::Y1]; + int_tp x2 = window[WindowDataLayer::X2]; + int_tp y2 = window[WindowDataLayer::Y2]; - int pad_w = 0; - int pad_h = 0; + int_tp pad_w = 0; + int_tp pad_h = 0; if (context_pad > 0 || use_square) { // scale factor by which to expand the original region // such that after warping the expanded region to crop_size x crop_size @@ -323,20 +323,20 @@ void WindowDataLayer::load_batch(Batch* batch) { half_height = half_width; } } - x1 = static_cast(round(center_x - half_width*context_scale)); - x2 = static_cast(round(center_x + half_width*context_scale)); - y1 = static_cast(round(center_y - half_height*context_scale)); - y2 = static_cast(round(center_y + half_height*context_scale)); + x1 = static_cast(round(center_x - half_width*context_scale)); + x2 = static_cast(round(center_x + half_width*context_scale)); + y1 = static_cast(round(center_y - half_height*context_scale)); + y2 = static_cast(round(center_y + half_height*context_scale)); // the expanded region may go outside of the image // so we compute the clipped (expanded) region and keep track of // the extent beyond the image - int unclipped_height = y2-y1+1; - int unclipped_width = x2-x1+1; - int pad_x1 = std::max(0, -x1); - int pad_y1 = std::max(0, -y1); - int pad_x2 = std::max(0, x2 - cv_img.cols + 1); - int pad_y2 = std::max(0, y2 - cv_img.rows + 1); + int_tp unclipped_height = y2-y1+1; + int_tp unclipped_width = x2-x1+1; + int_tp pad_x1 = std::max(0L, -x1); + int_tp pad_y1 = std::max(0L, -y1); + int_tp pad_x2 = std::max(0L, x2 - cv_img.cols + 1); + int_tp pad_y2 = std::max(0L, y2 - cv_img.rows + 1); // clip bounds x1 = x1 + pad_x1; x2 = x2 - pad_x2; @@ -347,8 +347,8 @@ void WindowDataLayer::load_batch(Batch* batch) { CHECK_LT(x2, cv_img.cols); CHECK_LT(y2, cv_img.rows); - int clipped_height = y2-y1+1; - int clipped_width = x2-x1+1; + int_tp clipped_height = y2-y1+1; + int_tp clipped_width = x2-x1+1; // scale factors that would be used to warp the unclipped // expanded region @@ -359,13 +359,13 @@ void WindowDataLayer::load_batch(Batch* batch) { // size to warp the clipped expanded region to cv_crop_size.width = - static_cast(round(static_cast(clipped_width)*scale_x)); + static_cast(round(static_cast(clipped_width)*scale_x)); cv_crop_size.height = - static_cast(round(static_cast(clipped_height)*scale_y)); - pad_x1 = static_cast(round(static_cast(pad_x1)*scale_x)); - pad_x2 = static_cast(round(static_cast(pad_x2)*scale_x)); - pad_y1 = static_cast(round(static_cast(pad_y1)*scale_y)); - pad_y2 = static_cast(round(static_cast(pad_y2)*scale_y)); + static_cast(round(static_cast(clipped_height)*scale_y)); + pad_x1 = static_cast(round(static_cast(pad_x1)*scale_x)); + pad_x2 = static_cast(round(static_cast(pad_x2)*scale_x)); + pad_y1 = static_cast(round(static_cast(pad_y1)*scale_y)); + pad_y2 = static_cast(round(static_cast(pad_y2)*scale_y)); pad_h = pad_y1; // if we're mirroring, we mirror the padding too (to be pedantic) @@ -396,17 +396,17 @@ void WindowDataLayer::load_batch(Batch* batch) { } // copy the warped window into top_data - for (int h = 0; h < cv_cropped_img.rows; ++h) { + for (int_tp h = 0; h < cv_cropped_img.rows; ++h) { const uchar* ptr = cv_cropped_img.ptr(h); - int img_index = 0; - for (int w = 0; w < cv_cropped_img.cols; ++w) { - for (int c = 0; c < channels; ++c) { - int top_index = ((item_id * channels + c) * crop_size + h + pad_h) + int_tp img_index = 0; + for (int_tp w = 0; w < cv_cropped_img.cols; ++w) { + for (int_tp c = 0; c < channels; ++c) { + int_tp top_index = ((item_id * channels + c) * crop_size + h + pad_h) * crop_size + w + pad_w; - // int top_index = (c * height + h) * width + w; + // int_tp top_index = (c * height + h) * width + w; Dtype pixel = static_cast(ptr[img_index++]); if (this->has_mean_file_) { - int mean_index = (c * mean_height + h + mean_off + pad_h) + int_tp mean_index = (c * mean_height + h + mean_off + pad_h) * mean_width + w + mean_off + pad_w; top_data[top_index] = (pixel - mean[mean_index]) * scale; } else { @@ -443,9 +443,9 @@ void WindowDataLayer::load_batch(Batch* batch) { std::ofstream top_data_file((string("dump/") + file_id + string("_data.txt")).c_str(), std::ofstream::out | std::ofstream::binary); - for (int c = 0; c < channels; ++c) { - for (int h = 0; h < crop_size; ++h) { - for (int w = 0; w < crop_size; ++w) { + for (int_tp c = 0; c < channels; ++c) { + for (int_tp h = 0; h < crop_size; ++h) { + for (int_tp w = 0; w < crop_size; ++w) { top_data_file.write(reinterpret_cast( &top_data[((item_id * channels + c) * crop_size + h) * crop_size + w]), diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index be429ee5321..304ce79158f 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -56,7 +56,7 @@ void Net::Init(const NetParameter& in_param) { InsertSplits(filtered_param, ¶m); // Basically, build all the layers and set up its connections. name_ = param.name(); - map blob_name_to_idx; + map blob_name_to_idx; set available_blobs; CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0) << "Must specify either input_shape OR deprecated input_dim, not both."; @@ -70,8 +70,8 @@ void Net::Init(const NetParameter& in_param) { } memory_used_ = 0; // set the input blobs - for (int input_id = 0; input_id < param.input_size(); ++input_id) { - const int layer_id = -1; // inputs have fake layer ID -1 + for (int_tp input_id = 0; input_id < param.input_size(); ++input_id) { + const int_tp layer_id = -1; // inputs have fake layer ID -1 AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); } DLOG_IF(INFO, Caffe::root_solver()) @@ -83,7 +83,7 @@ void Net::Init(const NetParameter& in_param) { param_id_vecs_.resize(param.layer_size()); top_id_vecs_.resize(param.layer_size()); bottom_need_backward_.resize(param.layer_size()); - for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { + for (int_tp layer_id = 0; layer_id < param.layer_size(); ++layer_id) { // For non-root solvers, whether this layer is shared from root_net_. bool share_from_root = !Caffe::root_solver() && root_net_->layers_[layer_id]->ShareInParallel(); @@ -112,15 +112,15 @@ void Net::Init(const NetParameter& in_param) { bool need_backward = false; // Figure out this layer's input and output - for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); + for (int_tp bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { - const int blob_id = AppendBottom(param, layer_id, bottom_id, + const int_tp blob_id = AppendBottom(param, layer_id, bottom_id, &available_blobs, &blob_name_to_idx); // If a blob needs backward, this layer should provide it. need_backward |= blob_need_backward_[blob_id]; } - int num_top = layer_param.top_size(); - for (int top_id = 0; top_id < num_top; ++top_id) { + int_tp num_top = layer_param.top_size(); + for (int_tp top_id = 0; top_id < num_top; ++top_id) { AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx); } // If the layer specifies that AutoTopBlobs() -> true and the LayerParameter @@ -128,7 +128,7 @@ void Net::Init(const NetParameter& in_param) { // ExactNumTopBlobs() or MinTopBlobs()), allocate them here. Layer* layer = layers_[layer_id].get(); if (layer->AutoTopBlobs()) { - const int needed_num_top = std::max(layer->MinTopBlobs(), + const int_tp needed_num_top = std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs()); for (; num_top < needed_num_top; ++num_top) { // Add "anonymous" top blobs -- do not modify available_blobs or @@ -142,7 +142,7 @@ void Net::Init(const NetParameter& in_param) { // Set up size of top blobs using root_net_ const vector*>& base_top = root_net_->top_vecs_[layer_id]; const vector*>& this_top = this->top_vecs_[layer_id]; - for (int top_id = 0; top_id < base_top.size(); ++top_id) { + for (int_tp top_id = 0; top_id < base_top.size(); ++top_id) { this_top[top_id]->ReshapeLike(*base_top[top_id]); LOG(INFO) << "Created top blob " << top_id << " (shape: " << this_top[top_id]->shape_string() << ") for shared layer " @@ -154,7 +154,7 @@ void Net::Init(const NetParameter& in_param) { if (Caffe::root_solver()) { LOG(INFO) << "Setting up " << layer_names_[layer_id]; } - for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + for (int_tp top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); } @@ -174,12 +174,12 @@ void Net::Init(const NetParameter& in_param) { DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); } - const int param_size = layer_param.param_size(); - const int num_param_blobs = layers_[layer_id]->blobs().size(); + const int_tp param_size = layer_param.param_size(); + const int_tp num_param_blobs = layers_[layer_id]->blobs().size(); CHECK_LE(param_size, num_param_blobs) << "Too many params specified for layer " << layer_param.name(); ParamSpec default_param_spec; - for (int param_id = 0; param_id < num_param_blobs; ++param_id) { + for (int_tp param_id = 0; param_id < num_param_blobs; ++param_id) { const ParamSpec* param_spec = (param_id < param_size) ? &layer_param.param(param_id) : &default_param_spec; const bool param_need_backward = param_spec->lr_mult() != 0; @@ -187,13 +187,13 @@ void Net::Init(const NetParameter& in_param) { layers_[layer_id]->set_param_propagate_down(param_id, param_need_backward); } - for (int param_id = 0; param_id < num_param_blobs; ++param_id) { + for (int_tp param_id = 0; param_id < num_param_blobs; ++param_id) { AppendParam(param, layer_id, param_id); } // Finally, set the backward flag layer_need_backward_.push_back(need_backward); if (need_backward) { - for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { + for (int_tp top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true; } } @@ -207,10 +207,10 @@ void Net::Init(const NetParameter& in_param) { // computation for the entire layer set blobs_under_loss; set blobs_skip_backp; - for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { + for (int_tp layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; bool layer_skip_propagate_down = true; - for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + for (int_tp top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { @@ -226,7 +226,7 @@ void Net::Init(const NetParameter& in_param) { // don't need backpropagation if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { layer_need_backward_[layer_id] = false; - for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + for (int_tp bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); ++bottom_id) { bottom_need_backward_[layer_id][bottom_id] = false; } @@ -244,7 +244,7 @@ void Net::Init(const NetParameter& in_param) { << " does not need backward computation."; } } - for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + for (int_tp bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); ++bottom_id) { if (layer_contributes_loss) { const string& blob_name = @@ -262,9 +262,9 @@ void Net::Init(const NetParameter& in_param) { } // Handle force_backward if needed. if (param.force_backward()) { - for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { + for (int_tp layer_id = 0; layer_id < layers_.size(); ++layer_id) { layer_need_backward_[layer_id] = true; - for (int bottom_id = 0; + for (int_tp bottom_id = 0; bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) { bottom_need_backward_[layer_id][bottom_id] = bottom_need_backward_[layer_id][bottom_id] @@ -273,7 +273,7 @@ void Net::Init(const NetParameter& in_param) { blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] || bottom_need_backward_[layer_id][bottom_id]; } - for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + for (int_tp param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { layers_[layer_id]->set_param_propagate_down(param_id, true); } @@ -288,10 +288,10 @@ void Net::Init(const NetParameter& in_param) { net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); net_output_blob_indices_.push_back(blob_name_to_idx[*it]); } - for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) { + for (uint_tp blob_id = 0; blob_id < blob_names_.size(); ++blob_id) { blob_names_index_[blob_names_[blob_id]] = blob_id; } - for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { + for (uint_tp layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { layer_names_index_[layer_names_[layer_id]] = layer_id; } ShareWeights(); @@ -308,7 +308,7 @@ void Net::FilterNet(const NetParameter& param, NetState net_state(param.state()); param_filtered->CopyFrom(param); param_filtered->clear_layer(); - for (int i = 0; i < param.layer_size(); ++i) { + for (int_tp i = 0; i < param.layer_size(); ++i) { const LayerParameter& layer_param = param.layer(i); const string& layer_name = layer_param.name(); CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0) @@ -316,12 +316,12 @@ void Net::FilterNet(const NetParameter& param, // If no include rules are specified, the layer is included by default and // only excluded if it meets one of the exclude rules. bool layer_included = (layer_param.include_size() == 0); - for (int j = 0; layer_included && j < layer_param.exclude_size(); ++j) { + for (int_tp j = 0; layer_included && j < layer_param.exclude_size(); ++j) { if (StateMeetsRule(net_state, layer_param.exclude(j), layer_name)) { layer_included = false; } } - for (int j = 0; !layer_included && j < layer_param.include_size(); ++j) { + for (int_tp j = 0; !layer_included && j < layer_param.include_size(); ++j) { if (StateMeetsRule(net_state, layer_param.include(j), layer_name)) { layer_included = true; } @@ -370,10 +370,10 @@ bool Net::StateMeetsRule(const NetState& state, const NetStateRule& rule, } // Check whether the rule is broken due to stage. The NetState must // contain ALL of the rule's stages to meet it. - for (int i = 0; i < rule.stage_size(); ++i) { + for (int_tp i = 0; i < rule.stage_size(); ++i) { // Check that the NetState contains the rule's ith stage. bool has_stage = false; - for (int j = 0; !has_stage && j < state.stage_size(); ++j) { + for (int_tp j = 0; !has_stage && j < state.stage_size(); ++j) { if (rule.stage(i) == state.stage(j)) {has_stage = true;} } if (!has_stage) { @@ -386,10 +386,10 @@ bool Net::StateMeetsRule(const NetState& state, const NetStateRule& rule, } // Check whether the rule is broken due to not_stage. The NetState must // contain NONE of the rule's not_stages to meet it. - for (int i = 0; i < rule.not_stage_size(); ++i) { + for (int_tp i = 0; i < rule.not_stage_size(); ++i) { // Check that the NetState contains the rule's ith not_stage. bool has_stage = false; - for (int j = 0; !has_stage && j < state.stage_size(); ++j) { + for (int_tp j = 0; !has_stage && j < state.stage_size(); ++j) { if (rule.not_stage(i) == state.stage(j)) {has_stage = true;} } if (has_stage) { @@ -406,9 +406,9 @@ bool Net::StateMeetsRule(const NetState& state, const NetStateRule& rule, // Helper for Net::Init: add a new input or top blob to the net. (Inputs have // layer_id == -1, tops have layer_id >= 0.) template -void Net::AppendTop(const NetParameter& param, const int layer_id, - const int top_id, set* available_blobs, - map* blob_name_to_idx) { +void Net::AppendTop(const NetParameter& param, const int_tp layer_id, + const int_tp top_id, set* available_blobs, + map* blob_name_to_idx) { shared_ptr layer_param( (layer_id >= 0) ? (new LayerParameter(param.layer(layer_id))) : NULL); const string& blob_name = @@ -441,7 +441,7 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, } } shared_ptr > blob_pointer(new Blob()); - const int blob_id = blobs_.size(); + const int_tp blob_id = blobs_.size(); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(false); @@ -471,16 +471,16 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int Net::AppendBottom(const NetParameter& param, const int layer_id, - const int bottom_id, set* available_blobs, - map* blob_name_to_idx) { +int_tp Net::AppendBottom(const NetParameter& param, const int_tp layer_id, + const int_tp bottom_id, set* available_blobs, + map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '" << layer_param.name() << "', bottom index " << bottom_id << ")"; } - const int blob_id = (*blob_name_to_idx)[blob_name]; + const int_tp blob_id = (*blob_name_to_idx)[blob_name]; if (Caffe::root_solver()) { LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; } @@ -497,10 +497,10 @@ int Net::AppendBottom(const NetParameter& param, const int layer_id, } template -void Net::AppendParam(const NetParameter& param, const int layer_id, - const int param_id) { +void Net::AppendParam(const NetParameter& param, const int_tp layer_id, + const int_tp param_id) { const LayerParameter& layer_param = layers_[layer_id]->layer_param(); - const int param_size = layer_param.param_size(); + const int_tp param_size = layer_param.param_size(); string param_name = (param_size > param_id) ? layer_param.param(param_id).name() : ""; if (param_name.size()) { @@ -510,7 +510,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, param_display_name << param_id; param_display_names_.push_back(param_display_name.str()); } - const int net_param_id = params_.size(); + const int_tp net_param_id = params_.size(); params_.push_back(layers_[layer_id]->blobs()[param_id]); param_id_vecs_[layer_id].push_back(net_param_id); param_layer_indices_.push_back(make_pair(layer_id, param_id)); @@ -526,7 +526,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, if (param_name.size()) { param_names_index_[param_name] = net_param_id; } - const int learnable_param_id = learnable_params_.size(); + const int_tp learnable_param_id = learnable_params_.size(); learnable_params_.push_back(params_[net_param_id].get()); learnable_param_ids_.push_back(learnable_param_id); has_params_lr_.push_back(param_spec->has_lr_mult()); @@ -535,12 +535,12 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, params_weight_decay_.push_back(param_spec->decay_mult()); } else { // Named param blob with name we've seen before: share params - const int owner_net_param_id = param_names_index_[param_name]; + const int_tp owner_net_param_id = param_names_index_[param_name]; param_owners_.push_back(owner_net_param_id); - const pair& owner_index = + const pair& owner_index = param_layer_indices_[owner_net_param_id]; - const int owner_layer_id = owner_index.first; - const int owner_param_id = owner_index.second; + const int_tp owner_layer_id = owner_index.first; + const int_tp owner_param_id = owner_index.second; LOG_IF(INFO, Caffe::root_solver()) << "Sharing parameters '" << param_name << "' owned by " << "layer '" << layer_names_[owner_layer_id] << "', param " @@ -548,7 +548,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, Blob* this_blob = layers_[layer_id]->blobs()[param_id].get(); Blob* owner_blob = layers_[owner_layer_id]->blobs()[owner_param_id] .get(); - const int param_size = layer_param.param_size(); + const int_tp param_size = layer_param.param_size(); if (param_size > param_id && (layer_param.param(param_id).share_mode() == ParamSpec_DimCheckMode_PERMISSIVE)) { @@ -569,7 +569,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, << "expects shape " << this_blob->shape_string(); } - const int learnable_param_id = learnable_param_ids_[owner_net_param_id]; + const int_tp learnable_param_id = learnable_param_ids_[owner_net_param_id]; learnable_param_ids_.push_back(learnable_param_id); if (param_spec->has_lr_mult()) { if (has_params_lr_[learnable_param_id]) { @@ -594,16 +594,16 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, } template -Dtype Net::ForwardFromTo(int start, int end) { +Dtype Net::ForwardFromTo(int_tp start, int_tp end) { CHECK_GE(start, 0); CHECK_LT(end, layers_.size()); Dtype loss = 0; if (debug_info_) { - for (int i = 0; i < net_input_blobs_.size(); ++i) { + for (int_tp i = 0; i < net_input_blobs_.size(); ++i) { InputDebugInfo(i); } } - for (int i = start; i <= end; ++i) { + for (int_tp i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; @@ -615,12 +615,12 @@ Dtype Net::ForwardFromTo(int start, int end) { } template -Dtype Net::ForwardFrom(int start) { +Dtype Net::ForwardFrom(int_tp start) { return ForwardFromTo(start, layers_.size() - 1); } template -Dtype Net::ForwardTo(int end) { +Dtype Net::ForwardTo(int_tp end) { return ForwardFromTo(0, end); } @@ -638,7 +638,7 @@ template const vector*>& Net::Forward( const vector*> & bottom, Dtype* loss) { // Copy bottom to internal bottom - for (int i = 0; i < bottom.size(); ++i) { + for (int_tp i = 0; i < bottom.size(); ++i) { net_input_blobs_[i]->CopyFrom(*bottom[i]); } return ForwardPrefilled(loss); @@ -651,13 +651,13 @@ string Net::Forward(const string& input_blob_protos, Dtype* loss) { blob_proto_vec.ParseFromString(input_blob_protos); CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size()) << "Incorrect input size."; - for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { + for (int_tp i = 0; i < blob_proto_vec.blobs_size(); ++i) { net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); } } ForwardPrefilled(loss); blob_proto_vec.Clear(); - for (int i = 0; i < net_output_blobs_.size(); ++i) { + for (int_tp i = 0; i < net_output_blobs_.size(); ++i) { net_output_blobs_[i]->ToProto(blob_proto_vec.add_blobs()); } string output; @@ -666,10 +666,10 @@ string Net::Forward(const string& input_blob_protos, Dtype* loss) { } template -void Net::BackwardFromTo(int start, int end) { +void Net::BackwardFromTo(int_tp start, int_tp end) { CHECK_GE(end, 0); CHECK_LT(start, layers_.size()); - for (int i = start; i >= end; --i) { + for (int_tp i = start; i >= end; --i) { if (layer_need_backward_[i]) { layers_[i]->Backward(top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); @@ -681,7 +681,7 @@ void Net::BackwardFromTo(int start, int end) { } template -void Net::InputDebugInfo(const int input_id) { +void Net::InputDebugInfo(const int_tp input_id) { const Blob& blob = *net_input_blobs_[input_id]; const string& blob_name = blob_names_[net_input_blob_indices_[input_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); @@ -692,8 +692,8 @@ void Net::InputDebugInfo(const int input_id) { } template -void Net::ForwardDebugInfo(const int layer_id) { - for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { +void Net::ForwardDebugInfo(const int_tp layer_id) { + for (int_tp top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const Blob& blob = *top_vecs_[layer_id][top_id]; const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); @@ -704,10 +704,10 @@ void Net::ForwardDebugInfo(const int layer_id) { << " data: " << data_abs_val_mean; } } - for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + for (int_tp param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { const Blob& blob = *layers_[layer_id]->blobs()[param_id]; - const int net_param_id = param_id_vecs_[layer_id][param_id]; + const int_tp net_param_id = param_id_vecs_[layer_id][param_id]; const string& blob_name = param_display_names_[net_param_id]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); if (Caffe::root_solver()) { @@ -720,9 +720,9 @@ void Net::ForwardDebugInfo(const int layer_id) { } template -void Net::BackwardDebugInfo(const int layer_id) { +void Net::BackwardDebugInfo(const int_tp layer_id) { const vector*>& bottom_vec = bottom_vecs_[layer_id]; - for (int bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) { + for (int_tp bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) { if (!bottom_need_backward_[layer_id][bottom_id]) { continue; } @@ -736,7 +736,7 @@ void Net::BackwardDebugInfo(const int layer_id) { << " diff: " << diff_abs_val_mean; } } - for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + for (int_tp param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; @@ -753,9 +753,9 @@ void Net::BackwardDebugInfo(const int layer_id) { } template -void Net::UpdateDebugInfo(const int param_id) { +void Net::UpdateDebugInfo(const int_tp param_id) { const Blob& blob = *params_[param_id]; - const int param_owner = param_owners_[param_id]; + const int_tp param_owner = param_owners_[param_id]; const string& layer_name = layer_names_[param_layer_indices_[param_id].first]; const string& param_display_name = param_display_names_[param_id]; const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); @@ -782,11 +782,11 @@ void Net::UpdateDebugInfo(const int param_id) { template void Net::ShareTrainedLayersWith(const Net* other) { - int num_source_layers = other->layers().size(); - for (int i = 0; i < num_source_layers; ++i) { + int_tp num_source_layers = other->layers().size(); + for (int_tp i = 0; i < num_source_layers; ++i) { Layer* source_layer = other->layers()[i].get(); const string& source_layer_name = other->layer_names()[i]; - int target_layer_id = 0; + int_tp target_layer_id = 0; while (target_layer_id != layer_names_.size() && layer_names_[target_layer_id] != source_layer_name) { ++target_layer_id; @@ -800,7 +800,7 @@ void Net::ShareTrainedLayersWith(const Net* other) { ->blobs(); CHECK_EQ(target_blobs.size(), source_layer->blobs().size()) << "Incompatible number of blobs for layer " << source_layer_name; - for (int j = 0; j < target_blobs.size(); ++j) { + for (int_tp j = 0; j < target_blobs.size(); ++j) { Blob* source_blob = source_layer->blobs()[j].get(); CHECK(target_blobs[j]->shape() == source_blob->shape()) << "Cannot share param " << j << " weights from layer '" @@ -813,12 +813,12 @@ void Net::ShareTrainedLayersWith(const Net* other) { } template -void Net::BackwardFrom(int start) { +void Net::BackwardFrom(int_tp start) { BackwardFromTo(start, 0); } template -void Net::BackwardTo(int end) { +void Net::BackwardTo(int_tp end) { BackwardFromTo(layers_.size() - 1, end); } @@ -827,7 +827,7 @@ void Net::Backward() { BackwardFromTo(layers_.size() - 1, 0); if (debug_info_) { Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; - for (int i = 0; i < learnable_params_.size(); ++i) { + for (int_tp i = 0; i < learnable_params_.size(); ++i) { asum_data += learnable_params_[i]->asum_data(); asum_diff += learnable_params_[i]->asum_diff(); sumsq_data += learnable_params_[i]->sumsq_data(); @@ -843,18 +843,18 @@ void Net::Backward() { template void Net::Reshape() { - for (int i = 0; i < layers_.size(); ++i) { + for (int_tp i = 0; i < layers_.size(); ++i) { layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); } } template void Net::CopyTrainedLayersFrom(const NetParameter& param) { - int num_source_layers = param.layer_size(); - for (int i = 0; i < num_source_layers; ++i) { + int_tp num_source_layers = param.layer_size(); + for (int_tp i = 0; i < num_source_layers; ++i) { const LayerParameter& source_layer = param.layer(i); const string& source_layer_name = source_layer.name(); - int target_layer_id = 0; + int_tp target_layer_id = 0; while (target_layer_id != layer_names_.size() && layer_names_[target_layer_id] != source_layer_name) { ++target_layer_id; @@ -868,7 +868,7 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { ->blobs(); CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) << "Incompatible number of blobs for layer " << source_layer_name; - for (int j = 0; j < target_blobs.size(); ++j) { + for (int_tp j = 0; j < target_blobs.size(); ++j) { if (!target_blobs[j]->ShapeEquals(source_layer.blobs(j))) { Blob source_blob; const bool kReshape = true; @@ -911,14 +911,14 @@ void Net::CopyTrainedLayersFromHDF5(const string trained_filename) { CHECK_GE(file_hid, 0) << "Couldn't open " << trained_filename; hid_t data_hid = H5Gopen2(file_hid, "data", H5P_DEFAULT); CHECK_GE(data_hid, 0) << "Error reading weights from " << trained_filename; - int num_layers = hdf5_get_num_links(data_hid); - for (int i = 0; i < num_layers; ++i) { + int_tp num_layers = hdf5_get_num_links(data_hid); + for (int_tp i = 0; i < num_layers; ++i) { string source_layer_name = hdf5_get_name_by_idx(data_hid, i); if (!layer_names_index_.count(source_layer_name)) { DLOG(INFO) << "Ignoring source layer " << source_layer_name; continue; } - int target_layer_id = layer_names_index_[source_layer_name]; + int_tp target_layer_id = layer_names_index_[source_layer_name]; DLOG(INFO) << "Copying source layer " << source_layer_name; vector > >& target_blobs = layers_[target_layer_id]->blobs(); @@ -927,14 +927,14 @@ void Net::CopyTrainedLayersFromHDF5(const string trained_filename) { CHECK_GE(layer_hid, 0) << "Error reading weights from " << trained_filename; // Check that source layer doesn't have more params than target layer - int num_source_params = hdf5_get_num_links(layer_hid); + int_tp num_source_params = hdf5_get_num_links(layer_hid); CHECK_LE(num_source_params, target_blobs.size()) << "Incompatible number of blobs for layer " << source_layer_name; - for (int j = 0; j < target_blobs.size(); ++j) { + for (int_tp j = 0; j < target_blobs.size(); ++j) { ostringstream oss; oss << j; string dataset_name = oss.str(); - int target_net_param_id = param_id_vecs_[target_layer_id][j]; + int_tp target_net_param_id = param_id_vecs_[target_layer_id][j]; if (!H5Lexists(layer_hid, dataset_name.c_str(), H5P_DEFAULT)) { // Target param doesn't exist in source weights... if (param_owners_[target_net_param_id] != -1) { @@ -959,11 +959,11 @@ void Net::ToProto(NetParameter* param, bool write_diff) const { param->Clear(); param->set_name(name_); // Add bottom and top - for (int i = 0; i < net_input_blob_indices_.size(); ++i) { + for (int_tp i = 0; i < net_input_blob_indices_.size(); ++i) { param->add_input(blob_names_[net_input_blob_indices_[i]]); } DLOG(INFO)<< "Serializing " << layers_.size() << " layers"; - for (int i = 0; i < layers_.size(); ++i) { + for (int_tp i = 0; i < layers_.size(); ++i) { LayerParameter* layer_param = param->add_layer(); layers_[i]->ToProto(layer_param, write_diff); } @@ -985,7 +985,7 @@ void Net::ToHDF5(const string& filename, bool write_diff) const { H5P_DEFAULT); CHECK_GE(diff_hid, 0) << "Error saving weights to " << filename << "."; } - for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { + for (int_tp layer_id = 0; layer_id < layers_.size(); ++layer_id) { const LayerParameter& layer_param = layers_[layer_id]->layer_param(); string layer_name = layer_param.name(); hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(), @@ -999,11 +999,11 @@ void Net::ToHDF5(const string& filename, bool write_diff) const { CHECK_GE(layer_diff_hid, 0) << "Error saving weights to " << filename << "."; } - int num_params = layers_[layer_id]->blobs().size(); - for (int param_id = 0; param_id < num_params; ++param_id) { + int_tp num_params = layers_[layer_id]->blobs().size(); + for (int_tp param_id = 0; param_id < num_params; ++param_id) { ostringstream dataset_name; dataset_name << param_id; - const int net_param_id = param_id_vecs_[layer_id][param_id]; + const int_tp net_param_id = param_id_vecs_[layer_id][param_id]; if (param_owners_[net_param_id] == -1) { // Only save params that own themselves hdf5_save_nd_dataset(layer_data_hid, dataset_name.str(), @@ -1029,14 +1029,14 @@ void Net::ToHDF5(const string& filename, bool write_diff) const { template void Net::Update() { - for (int i = 0; i < learnable_params_.size(); ++i) { + for (int_tp i = 0; i < learnable_params_.size(); ++i) { learnable_params_[i]->Update(); } } template void Net::ClearParamDiffs() { - for (int i = 0; i < learnable_params_.size(); ++i) { + for (int_tp i = 0; i < learnable_params_.size(); ++i) { Blob* blob = learnable_params_[i]; switch (Caffe::mode()) { case Caffe::CPU: @@ -1067,7 +1067,7 @@ void Net::ClearParamDiffs() { template void Net::ShareWeights() { - for (int i = 0; i < params_.size(); ++i) { + for (int_tp i = 0; i < params_.size(); ++i) { if (param_owners_[i] < 0) { continue; } params_[i]->ShareData(*params_[param_owners_[i]]); params_[i]->ShareDiff(*params_[param_owners_[i]]); diff --git a/src/caffe/opencl/ocl_dev_ptr.cpp b/src/caffe/opencl/ocl_dev_ptr.cpp index a9965366adf..361acb4c471 100644 --- a/src/caffe/opencl/ocl_dev_ptr.cpp +++ b/src/caffe/opencl/ocl_dev_ptr.cpp @@ -15,7 +15,7 @@ Type* ocl_dev_ptr::get() { } template -std::ptrdiff_t ocl_dev_ptr::off() { +std::size_t ocl_dev_ptr::off() { return 0; } diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index c7c36c34d3f..ef45be93bcb 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -34,7 +34,7 @@ enum Op { template static void apply_buffers(const vector*>& blobs, Dtype* buffer, - size_t total_size, Op op) { + uint_tp total_size, Op op) { Dtype* ptr = buffer; for (int i = 0; i < blobs.size(); ++i) { int size = blobs[i]->count(); @@ -67,8 +67,8 @@ static void apply_buffers(const vector*>& blobs, Dtype* buffer, // Buffer size necessary to store given blobs template -static size_t total_size(const vector*>& params) { - size_t size = 0; +static uint_tp total_size(const vector*>& params) { + uint_tp size = 0; for (int i = 0; i < params.size(); ++i) size += params[i]->count(); // Size have at least one byte, otherwise cudaMalloc fails if net has no diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index dcb23109f70..e552ce6a5ed 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -15,10 +15,10 @@ message BlobProto { repeated double double_diff = 9 [packed = true]; // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [default = 0]; - optional int32 channels = 2 [default = 0]; - optional int32 height = 3 [default = 0]; - optional int32 width = 4 [default = 0]; + optional int64 num = 1 [default = 0]; + optional int64 channels = 2 [default = 0]; + optional int64 height = 3 [default = 0]; + optional int64 width = 4 [default = 0]; } // The BlobProtoVector is simply a way to pass multiple blobproto instances @@ -28,12 +28,12 @@ message BlobProtoVector { } message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; + optional int64 channels = 1; + optional int64 height = 2; + optional int64 width = 3; // the actual image data, in bytes optional bytes data = 4; - optional int32 label = 5; + optional int64 label = 5; // Optionally, the datum could also hold float data. repeated float float_data = 6; // If true data contains an encoded image that need to be decoded @@ -50,7 +50,7 @@ message FillerParameter { optional float std = 6 [default = 1]; // the std value in Gaussian filler // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [default = -1]; + optional int64 sparse = 7 [default = -1]; // Normalize the filler variance by fan_in, fan_out, or their average. // Applies to 'xavier' and 'msra' fillers. enum VarianceNorm { @@ -72,7 +72,7 @@ message NetParameter { // If specified, for each input blob there should be four // values specifying the num, channels, height and width of the input blob. // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; + repeated int64 input_dim = 4; // Whether the network will force every layer to carry out backward operation. // If set False, then whether to carry out backward is determined // automatically according to the net structure and learning rates. @@ -97,7 +97,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 40 (last added: momentum2) +// SolverParameter next available ID: 41 (last added: type) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -135,10 +135,10 @@ message SolverParameter { repeated NetState test_state = 27; // The number of iterations for each test net. - repeated int32 test_iter = 3; + repeated int64 test_iter = 3; // The number of iterations between two testing phases. - optional int32 test_interval = 4 [default = 0]; + optional int64 test_interval = 4 [default = 0]; optional bool test_compute_loss = 19 [default = false]; // If true, run an initial test pass before the first iteration, // ensuring memory availability and printing the starting value of the loss. @@ -146,12 +146,12 @@ message SolverParameter { optional float base_lr = 5; // The base learning rate // the number of iterations between displaying info. If display = 0, no info // will be displayed. - optional int32 display = 6; + optional int64 display = 6; // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [default = 1]; - optional int32 max_iter = 7; // the maximum number of iterations + optional int64 average_loss = 33 [default = 1]; + optional int64 max_iter = 7; // the maximum number of iterations // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; + optional int64 iter_size = 36 [default = 1]; // The learning rate decay policy. The currently implemented learning rate // policies are as follows: @@ -177,15 +177,15 @@ message SolverParameter { // controlled by weight_decay optional string regularization_type = 29 [default = "L2"]; // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; + optional int64 stepsize = 13; // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; + repeated int64 stepvalue = 34; // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, // whenever their actual L2 norm is larger. optional float clip_gradients = 35 [default = -1]; - optional int32 snapshot = 14 [default = 0]; // The snapshot interval + optional int64 snapshot = 14 [default = 0]; // The snapshot interval optional string snapshot_prefix = 15; // The prefix for the snapshot. // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. @@ -202,22 +202,15 @@ message SolverParameter { } optional SolverMode solver_mode = 17 [default = GPU]; // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [default = 0]; + optional int64 device_id = 18 [default = 0]; // If non-negative, the seed with which the Solver will initialize the Caffe // random number generator -- useful for reproducible results. Otherwise, // (and by default) initialize using a seed derived from the system clock. optional int64 random_seed = 20 [default = -1]; - // Solver type - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - optional SolverType solver_type = 30 [default = SGD]; + // type of the solver + optional string type = 40 [default = "SGD"]; + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam optional float delta = 31 [default = 1e-8]; // parameters for the Adam solver @@ -237,10 +230,10 @@ message SolverParameter { // A message that stores the solver snapshots message SolverState { - optional int32 iter = 1; // The current iteration + optional int64 iter = 1; // The current iteration optional string learned_net = 2; // The file that stores the learned net. repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 [default = 0]; // The current step for learning rate + optional int64 current_step = 4 [default = 0]; // The current step for learning rate } enum Phase { @@ -250,7 +243,7 @@ enum Phase { message NetState { optional Phase phase = 1 [default = TEST]; - optional int32 level = 2 [default = 0]; + optional int64 level = 2 [default = 0]; repeated string stage = 3; } @@ -261,8 +254,8 @@ message NetStateRule { // Set the minimum and/or maximum levels in which the layer should be used. // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; + optional int64 min_level = 2; + optional int64 max_level = 3; // Customizable sets of stages to include or exclude. // The net must have ALL of the specified stages and NONE of the specified @@ -336,9 +329,9 @@ message LayerParameter { repeated NetStateRule exclude = 9; // Parameters for Greentea - optional int32 device = 95 [default = -1]; + optional int64 device = 95 [default = -1]; // Parameters for Splitnet - optional int32 buffer = 96 [default = -1]; + optional int64 buffer = 96 [default = -1]; // Parameters for data pre-processing. optional TransformationParameter transform_param = 100; @@ -403,7 +396,7 @@ message TransformationParameter { // Specify if we want to randomly mirror data. optional bool mirror = 2 [default = false]; // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [default = 0]; + optional uint64 crop_size = 3 [default = 0]; // mean_file and mean_value cannot be specified at the same time optional string mean_file = 4; // if specified can be repeated once (would substract it from all the channels) @@ -419,7 +412,7 @@ message TransformationParameter { // Message that stores parameters shared by loss layers message LossParameter { // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; + optional int64 ignore_label = 1; // If true, normalize each batch across all instances (including spatial // dimesions, but not ignored instances); else, divide by batch size only. optional bool normalize = 2 [default = true]; @@ -432,28 +425,28 @@ message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring // class (i.e. argmax). - optional uint32 top_k = 1 [default = 1]; + optional uint64 top_k = 1 [default = 1]; // The "label" axis of the prediction blob, whose argmax corresponds to the // predicted label -- may be negative to index from the end (e.g., -1 for the // last axis). For example, if axis == 1 and the predictions are // (N x C x H x W), the label blob is expected to contain N*H*W ground truth // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; + optional int64 axis = 2 [default = 1]; // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; + optional int64 ignore_label = 3; } message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; - optional uint32 top_k = 2 [default = 1]; + optional uint64 top_k = 2 [default = 1]; // The axis along which to maximise -- may be negative to index from the // end (e.g., -1 for the last axis). // By default ArgMaxLayer maximizes over the flattened trailing dimensions // for each index of the first / num dimension. - optional int32 axis = 3; + optional int64 axis = 3; } message ConcatParameter { @@ -461,10 +454,10 @@ message ConcatParameter { // end (e.g., -1 for the last axis). Other axes must have the // same dimension for all the bottom blobs. // By default, ConcatLayer concatenates blobs along the "channels" axis ( - optional int32 axis = 2 [default = 1]; + optional int64 axis = 2 [default = 1]; // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [default = 1]; + optional uint64 concat_dim = 1 [default = 1]; } message ContrastiveLossParameter { @@ -480,25 +473,25 @@ message ContrastiveLossParameter { } message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer + optional uint64 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms // Pad, kernel size, and stride are all given as a single value for equal // dimensions in all spatial dimensions, or once per spatial dimension. - repeated uint32 pad = 3; // The padding size; defaults to 0 - repeated uint32 kernel_size = 4; // The kernel size - repeated uint32 stride = 6; // The stride; defaults to 1 + repeated uint64 pad = 3; // The padding size; defaults to 0 + repeated uint64 kernel_size = 4; // The kernel size + repeated uint64 stride = 6; // The stride; defaults to 1 // For 2D convolution only, the *_h and *_w versions may also be used to // specify both spatial dimensions. - optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) - optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) - optional uint32 kernel_h = 11; // The kernel height (2D only) - optional uint32 kernel_w = 12; // The kernel width (2D only) - optional uint32 stride_h = 13; // The stride height (2D only) - optional uint32 stride_w = 14; // The stride width (2D only) + optional uint64 pad_h = 9 [default = 0]; // The padding height (2D only) + optional uint64 pad_w = 10 [default = 0]; // The padding width (2D only) + optional uint64 kernel_h = 11; // The kernel height (2D only) + optional uint64 kernel_w = 12; // The kernel width (2D only) + optional uint64 stride_h = 13; // The stride height (2D only) + optional uint64 stride_w = 14; // The stride width (2D only) - optional uint32 group = 5 [default = 1]; // The group size for group conv + optional uint64 group = 5 [default = 1]; // The group size for group conv optional FillerParameter weight_filler = 7; // The filler for the weight optional FillerParameter bias_filler = 8; // The filler for the bias @@ -510,9 +503,9 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; // Strided kernel parameters - repeated uint32 kstride = 18; - optional uint32 kstride_h = 19 [default = 1]; - optional uint32 kstride_w = 20 [default = 1]; + repeated uint64 kstride = 18; + optional uint64 kstride_h = 19 [default = 1]; + optional uint64 kstride_w = 20 [default = 1]; // The axis to interpret as "channels" when performing convolution. // Preceding dimensions are treated as independent inputs; @@ -523,7 +516,7 @@ message ConvolutionParameter { // With (N, C, D, H, W) inputs, and axis == 1, we perform // N independent 3D convolutions, sliding (C/g)-channels // filters across the spatial axes (D, H, W) of the input. - optional int32 axis = 16 [default = 1]; + optional int64 axis = 16 [default = 1]; // Whether to force use of the general ND convolution, even if a specific // implementation for blobs of the appropriate number of spatial dimensions @@ -541,13 +534,13 @@ message DataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. - optional uint32 batch_size = 4; + optional uint64 batch_size = 4; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. // DEPRECATED. Each solver accesses a different subset of the database. - optional uint32 rand_skip = 7 [default = 0]; + optional uint64 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do // simple scaling and subtracting the data mean, if provided. Note that the @@ -556,7 +549,7 @@ message DataParameter { optional string mean_file = 3; // DEPRECATED. See TransformationParameter. Specify if we would like to randomly // crop an image. - optional uint32 crop_size = 5 [default = 0]; + optional uint64 crop_size = 5 [default = 0]; // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror // data. optional bool mirror = 6 [default = false]; @@ -564,7 +557,7 @@ message DataParameter { optional bool force_encoded_color = 9 [default = false]; // Prefetch queue (Number of batches to prefetch to host memory, increase if // data access bandwidth varies). - optional uint32 prefetch = 10 [default = 4]; + optional uint64 prefetch = 10 [default = 4]; } message DropoutParameter { @@ -585,10 +578,10 @@ message DummyDataParameter { repeated BlobShape shape = 6; // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; + repeated uint64 num = 2; + repeated uint64 channels = 3; + repeated uint64 height = 4; + repeated uint64 width = 5; } message EltwiseParameter { @@ -607,11 +600,11 @@ message EltwiseParameter { // Message that stores parameters used by EmbedLayer message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer + optional uint64 num_output = 1; // The number of outputs for the layer // The input is given as integers to be interpreted as one-hot // vector indices with dimension num_input. Hence num_input should be // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; + optional uint64 input_dim = 2; optional bool bias_term = 3 [default = true]; // Whether to use a bias term optional FillerParameter weight_filler = 4; // The filler for the weight @@ -633,12 +626,12 @@ message ExpParameter { message FlattenParameter { // The first axis to flatten: all preceding axes are retained in the output. // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; + optional int64 axis = 1 [default = 1]; // The last axis to flatten: all following axes are retained in the output. // May be negative to index from the end (e.g., the default -1 for the last // axis). - optional int32 end_axis = 2 [default = -1]; + optional int64 end_axis = 2 [default = -1]; } // Message that stores parameters used by HDF5DataLayer @@ -646,7 +639,7 @@ message HDF5DataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. - optional uint32 batch_size = 2; + optional uint64 batch_size = 2; // Specify whether to shuffle the data. // If shuffle == true, the ordering of the HDF5 files is shuffled, @@ -673,17 +666,17 @@ message ImageDataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; + optional uint64 batch_size = 4 [default = 1]; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; + optional uint64 rand_skip = 7 [default = 0]; // Whether or not ImageLayer should shuffle the list of files at every epoch. optional bool shuffle = 8 [default = false]; // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [default = 0]; - optional uint32 new_width = 10 [default = 0]; + optional uint64 new_height = 9 [default = 0]; + optional uint64 new_width = 10 [default = 0]; // Specify if the images are color or gray optional bool is_color = 11 [default = true]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do @@ -693,7 +686,7 @@ message ImageDataParameter { optional string mean_file = 3; // DEPRECATED. See TransformationParameter. Specify if we would like to randomly // crop an image. - optional uint32 crop_size = 5 [default = 0]; + optional uint64 crop_size = 5 [default = 0]; // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror // data. optional bool mirror = 6 [default = false]; @@ -706,7 +699,7 @@ message InfogainLossParameter { } message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer + optional uint64 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms optional FillerParameter weight_filler = 3; // The filler for the weight optional FillerParameter bias_filler = 4; // The filler for the bias @@ -714,7 +707,7 @@ message InnerProductParameter { // The first axis to be lumped into a single inner product computation; // all preceding axes are retained in the output. // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; + optional int64 axis = 5 [default = 1]; } // Message that stores parameters used by LogLayer @@ -729,7 +722,7 @@ message LogParameter { // Message that stores parameters used by LRNLayer message LRNParameter { - optional uint32 local_size = 1 [default = 5]; + optional uint64 local_size = 1 [default = 5]; optional float alpha = 2 [default = 1.]; optional float beta = 3 [default = 0.75]; enum NormRegion { @@ -747,14 +740,14 @@ message LRNParameter { } message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; + optional uint64 batch_size = 1; + optional uint64 channels = 2; + optional uint64 height = 3; + optional uint64 width = 4; // Dim works in the following order (examples): // batch_size, channels, height, width // batch_size, channels, Z, Y, X - repeated uint32 dim = 5; + repeated uint64 dim = 5; } message MVNParameter { @@ -777,15 +770,15 @@ message PoolingParameter { optional PoolMethod pool = 1 [default = MAX]; // The pooling method // Pad, kernel size, and stride are all given as a single value for equal // dimensions in height and width or as Y, X pairs. - repeated uint32 pad = 4; // The padding size (equal in Y, X), default 0 - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - repeated uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - repeated uint32 stride = 3; // The stride (equal in Y, X), default 1 - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width + repeated uint64 pad = 4; // The padding size (equal in Y, X), default 0 + optional uint64 pad_h = 9 [default = 0]; // The padding height + optional uint64 pad_w = 10 [default = 0]; // The padding width + repeated uint64 kernel_size = 2; // The kernel size (square) + optional uint64 kernel_h = 5; // The kernel height + optional uint64 kernel_w = 6; // The kernel width + repeated uint64 stride = 3; // The stride (equal in Y, X), default 1 + optional uint64 stride_h = 7; // The stride height + optional uint64 stride_w = 8; // The stride width enum Engine { DEFAULT = 0; CAFFE = 1; @@ -795,10 +788,10 @@ message PoolingParameter { // If global_pooling then it will pool over the size of the bottom by doing // kernel_h = bottom->height and kernel_w = bottom->width optional bool global_pooling = 12 [default = false]; - repeated uint32 kstride = 13; // The kernel stride, default 1 - optional uint32 kstride_h = 14; - optional uint32 kstride_w = 15; - optional int32 axis = 16 [default = 1]; + repeated uint64 kstride = 13; // The kernel stride, default 1 + optional uint64 kstride_h = 14; + optional uint64 kstride_w = 15; + optional int64 axis = 16 [default = 1]; } message PowerParameter { @@ -846,7 +839,7 @@ message ReductionParameter { // If axis == 0 (the default), the output Blob always has the empty shape // (count 1), performing reduction across the entire input -- // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; + optional int64 axis = 2 [default = 0]; optional float coeff = 3 [default = 1.0]; // coefficient for output } @@ -927,8 +920,8 @@ message ReshapeParameter { // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; + optional int64 axis = 2 [default = 0]; + optional int64 num_axes = 3 [default = -1]; } message SigmoidParameter { @@ -944,11 +937,11 @@ message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; + optional int64 axis = 3 [default = 1]; + repeated uint64 slice_point = 2; // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [default = 1]; + optional uint64 slice_dim = 1 [default = 1]; } // Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer @@ -963,7 +956,7 @@ message SoftmaxParameter { // The axis along which to perform the softmax -- may be negative to i // from the end (e.g., -1 for the last axis). // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; + optional int64 axis = 2 [default = 1]; } message TanHParameter { @@ -978,10 +971,10 @@ message TanHParameter { // Message that stores parameters used by TileLayer message TileParameter { // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; + optional int64 axis = 1 [default = 1]; // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; + optional int64 tiles = 2; } // Message that stores parameters used by ThresholdLayer @@ -998,9 +991,9 @@ message WindowDataParameter { optional float scale = 2 [default = 1]; optional string mean_file = 3; // Specify the batch size. - optional uint32 batch_size = 4; + optional uint64 batch_size = 4; // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [default = 0]; + optional uint64 crop_size = 5 [default = 0]; // Specify if we want to randomly mirror data. optional bool mirror = 6 [default = false]; // Foreground (object) overlap threshold @@ -1011,7 +1004,7 @@ message WindowDataParameter { optional float fg_fraction = 9 [default = 0.25]; // Amount of contextual padding to add around a window // (used only by the window_data_layer) - optional uint32 context_pad = 10 [default = 0]; + optional uint64 context_pad = 10 [default = 0]; // Mode for cropping out a detection window // warp: cropped window is warped to a fixed size and aspect ratio // square: the tightest square around the window is cropped @@ -1028,7 +1021,7 @@ message SPPParameter { AVE = 1; STOCHASTIC = 2; } - optional uint32 pyramid_height = 1; + optional uint64 pyramid_height = 1; optional PoolMethod pool = 2 [default = MAX]; // The pooling method enum Engine { DEFAULT = 0; @@ -1139,15 +1132,15 @@ message V0LayerParameter { optional string type = 2; // the string to specify the layer type // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer + optional uint64 num_output = 3; // The number of outputs for the layer optional bool biasterm = 4 [default = true]; // whether to have bias terms optional FillerParameter weight_filler = 5; // The filler for the weight optional FillerParameter bias_filler = 6; // The filler for the bias - optional uint32 pad = 7 [default = 0]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [default = 1]; // The group size for group conv - optional uint32 stride = 10 [default = 1]; // The stride + optional uint64 pad = 7 [default = 0]; // The padding size + optional uint64 kernelsize = 8; // The kernel size + optional uint64 group = 9 [default = 1]; // The group size for group conv + optional uint64 stride = 10 [default = 1]; // The stride enum PoolMethod { MAX = 0; AVE = 1; @@ -1156,7 +1149,7 @@ message V0LayerParameter { optional PoolMethod pool = 11 [default = MAX]; // The pooling method optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio - optional uint32 local_size = 13 [default = 5]; // for local response norm + optional uint64 local_size = 13 [default = 5]; // for local response norm optional float alpha = 14 [default = 1.]; // for local response norm optional float beta = 15 [default = 0.75]; // for local response norm optional float k = 22 [default = 1.]; @@ -1169,9 +1162,9 @@ message V0LayerParameter { optional float scale = 17 [default = 1]; optional string meanfile = 18; // For data layers, specify the batch size. - optional uint32 batchsize = 19; + optional uint64 batchsize = 19; // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [default = 0]; + optional uint64 cropsize = 20 [default = 0]; // For data layers, specify if we want to randomly mirror data. optional bool mirror = 21 [default = false]; @@ -1187,7 +1180,7 @@ message V0LayerParameter { // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [default = 0]; + optional uint64 rand_skip = 53 [default = 0]; // Fields related to detection (det_*) // foreground (object) overlap threshold @@ -1201,7 +1194,7 @@ message V0LayerParameter { // Amount of contextual padding to add around a window // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [default = 0]; + optional uint64 det_context_pad = 58 [default = 0]; // Mode for cropping out a detection window // warp: cropped window is warped to a fixed size and aspect ratio @@ -1209,10 +1202,10 @@ message V0LayerParameter { optional string det_crop_mode = 59 [default = "warp"]; // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [default = 0]; - optional int32 new_channels = 61 [default = 0]; - optional int32 new_height = 62 [default = 0]; - optional int32 new_width = 63 [default = 0]; + optional int64 new_num = 60 [default = 0]; + optional int64 new_channels = 61 [default = 0]; + optional int64 new_height = 62 [default = 0]; + optional int64 new_width = 63 [default = 0]; // Whether or not ImageLayer should shuffle the list of files at every epoch. // It will also resize images if new_height or new_width are not zero. @@ -1221,7 +1214,7 @@ message V0LayerParameter { // For ConcatLayer, one needs to specify the dimension for concatenation, and // the other dimensions must be the same for all the bottom blobs. // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [default = 1]; + optional uint64 concat_dim = 65 [default = 1]; optional HDF5OutputParameter hdf5_output_param = 1001; } @@ -1239,7 +1232,7 @@ message PReLUParameter { message AffinityParameter { // Offset parameter to change the channel to use for creating an affinity graph // Defined once per bottom blob - repeated int32 offset = 1; + repeated int64 offset = 1; } message MergeCropParameter { diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 9ee65a575df..20e37b334ea 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -72,7 +72,7 @@ void Solver::Init(const SolverParameter& param) { template void Solver::InitTrainNet() { - const int num_train_nets = param_.has_net() + param_.has_net_param() + const int_tp num_train_nets = param_.has_net() + param_.has_net_param() + param_.has_train_net() + param_.has_train_net_param(); const string& field_names = "net, net_param, train_net, train_net_param"; CHECK_GE(num_train_nets, 1)<< "SolverParameter must specify a train net " @@ -120,12 +120,12 @@ void Solver::InitTestNets() { CHECK(Caffe::root_solver()); const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); - const int num_generic_nets = has_net_param + has_net_file; + const int_tp num_generic_nets = has_net_param + has_net_file; CHECK_LE(num_generic_nets, 1) << "Both net_param and net_file may not be specified."; - const int num_test_net_params = param_.test_net_param_size(); - const int num_test_net_files = param_.test_net_size(); - const int num_test_nets = num_test_net_params + num_test_net_files; + const int_tp num_test_net_params = param_.test_net_param_size(); + const int_tp num_test_net_files = param_.test_net_size(); + const int_tp num_test_nets = num_test_net_params + num_test_net_files; if (num_generic_nets) { CHECK_GE(param_.test_iter_size(), num_test_nets) << "test_iter must be specified for each test network."; @@ -138,8 +138,8 @@ void Solver::InitTestNets() { // test networks -- the actual number is given by the number of remaining // test_iters after any test nets specified by test_net_param and/or test_net // are evaluated. - const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; - const int num_test_net_instances = num_test_nets + num_generic_net_instances; + const int_tp num_generic_net_instances = param_.test_iter_size() - num_test_nets; + const int_tp num_test_net_instances = num_test_nets + num_generic_net_instances; if (param_.test_state_size()) { CHECK_EQ(param_.test_state_size(), num_test_net_instances) << "test_state must be unspecified or specified once per test net."; @@ -147,33 +147,33 @@ void Solver::InitTestNets() { if (num_test_net_instances) { CHECK_GT(param_.test_interval(), 0); } - int test_net_id = 0; + int_tp test_net_id = 0; vector sources(num_test_net_instances); vector net_params(num_test_net_instances); - for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { + for (int_tp i = 0; i < num_test_net_params; ++i, ++test_net_id) { sources[test_net_id] = "test_net_param"; net_params[test_net_id].CopyFrom(param_.test_net_param(i)); } - for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { + for (int_tp i = 0; i < num_test_net_files; ++i, ++test_net_id) { sources[test_net_id] = "test_net file: " + param_.test_net(i); ReadNetParamsFromTextFileOrDie(param_.test_net(i), &net_params[test_net_id]); } - const int remaining_test_nets = param_.test_iter_size() - test_net_id; + const int_tp remaining_test_nets = param_.test_iter_size() - test_net_id; if (has_net_param) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + for (int_tp i = 0; i < remaining_test_nets; ++i, ++test_net_id) { sources[test_net_id] = "net_param"; net_params[test_net_id].CopyFrom(param_.net_param()); } } if (has_net_file) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + for (int_tp i = 0; i < remaining_test_nets; ++i, ++test_net_id) { sources[test_net_id] = "net file: " + param_.net(); ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); } } test_nets_.resize(num_test_net_instances); - for (int i = 0; i < num_test_net_instances; ++i) { + for (int_tp i = 0; i < num_test_net_instances; ++i) { // Set the correct NetState. We start with the solver defaults (lowest // precedence); then, merge in any NetState specified by the net_param // itself; finally, merge in any NetState specified by the test_state @@ -198,11 +198,11 @@ void Solver::InitTestNets() { } template -void Solver::Step(int iters) { +void Solver::Step(int_tp iters) { vector*> bottom_vec; - const int start_iter = iter_; - const int stop_iter = iter_ + iters; - int average_loss = this->param_.average_loss(); + const int_tp start_iter = iter_; + const int_tp stop_iter = iter_ + iters; + int_tp average_loss = this->param_.average_loss(); vector losses; Dtype smoothed_loss = 0; @@ -219,24 +219,24 @@ void Solver::Step(int iters) { } } - for (int i = 0; i < callbacks_.size(); ++i) { + for (int_tp i = 0; i < callbacks_.size(); ++i) { callbacks_[i]->on_start(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); // accumulate the loss and gradient Dtype loss = 0; - for (int i = 0; i < param_.iter_size(); ++i) { + for (int_tp i = 0; i < param_.iter_size(); ++i) { loss += net_->ForwardBackward(bottom_vec); } loss /= param_.iter_size(); // average the loss across iterations for smoothed reporting if (losses.size() < average_loss) { losses.push_back(loss); - int size = losses.size(); + int_tp size = losses.size(); smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; } else { - int idx = (iter_ - start_iter) % average_loss; + int_tp idx = (iter_ - start_iter) % average_loss; smoothed_loss += (loss - losses[idx]) / average_loss; losses[idx] = loss; } @@ -244,14 +244,14 @@ void Solver::Step(int iters) { LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << ", loss = " << smoothed_loss; const vector*>& result = net_->output_blobs(); - int score_index = 0; - for (int j = 0; j < result.size(); ++j) { + int_tp score_index = 0; + for (int_tp j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); const string& output_name = net_->blob_names()[net_->output_blob_indices()[j]]; const Dtype loss_weight = net_->blob_loss_weights()[net_->output_blob_indices()[j]]; - for (int k = 0; k < result[j]->count(); ++k) { + for (int_tp k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight @@ -263,7 +263,7 @@ void Solver::Step(int iters) { } } } - for (int i = 0; i < callbacks_.size(); ++i) { + for (int_tp i = 0; i < callbacks_.size(); ++i) { callbacks_[i]->on_gradients_ready(); } ApplyUpdate(); @@ -336,7 +336,7 @@ void Solver::Solve(const char* resume_file) { template void Solver::TestAll() { - for (int test_net_id = 0; + for (int_tp test_net_id = 0; test_net_id < test_nets_.size() && !requested_early_exit_; ++test_net_id) { Test(test_net_id); @@ -344,18 +344,18 @@ void Solver::TestAll() { } template -void Solver::Test(const int test_net_id) { +void Solver::Test(const int_tp test_net_id) { CHECK(Caffe::root_solver()); LOG(INFO) << "Iteration " << iter_ << ", Testing net (#" << test_net_id << ")"; CHECK_NOTNULL(test_nets_[test_net_id].get())-> ShareTrainedLayersWith(net_.get()); vector test_score; - vector test_score_output_id; + vector test_score_output_id; vector*> bottom_vec; const shared_ptr >& test_net = test_nets_[test_net_id]; Dtype loss = 0; - for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + for (int_tp i = 0; i < param_.test_iter(test_net_id); ++i) { SolverAction::Enum request = GetRequestedAction(); // Check to see if stoppage of testing/training has been requested. while (request != SolverAction::NONE) { @@ -378,18 +378,18 @@ void Solver::Test(const int test_net_id) { loss += iter_loss; } if (i == 0) { - for (int j = 0; j < result.size(); ++j) { + for (int_tp j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { + for (int_tp k = 0; k < result[j]->count(); ++k) { test_score.push_back(result_vec[k]); test_score_output_id.push_back(j); } } } else { - int idx = 0; - for (int j = 0; j < result.size(); ++j) { + int_tp idx = 0; + for (int_tp j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { + for (int_tp k = 0; k < result[j]->count(); ++k) { test_score[idx++] += result_vec[k]; } } @@ -403,8 +403,8 @@ void Solver::Test(const int test_net_id) { loss /= param_.test_iter(test_net_id); LOG(INFO) << "Test loss: " << loss; } - for (int i = 0; i < test_score.size(); ++i) { - const int output_blob_index = + for (int_tp i = 0; i < test_score.size(); ++i) { + const int_tp output_blob_index = test_net->output_blob_indices()[test_score_output_id[i]]; const string& output_name = test_net->blob_names()[output_blob_index]; const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; @@ -458,9 +458,9 @@ void Solver::CheckSnapshotWritePermissions() { template string Solver::SnapshotFilename(const string extension) { string filename(param_.snapshot_prefix()); - const int kBufferSize = 20; + const int_tp kBufferSize = 20; char iter_str_buffer[kBufferSize]; - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); + snprintf(iter_str_buffer, kBufferSize, "_iter_%zd", iter_); return filename + iter_str_buffer + extension; } @@ -494,1114 +494,7 @@ void Solver::Restore(const char* state_file) { } } -// Return the current learning rate. The currently implemented learning rate -// policies are as follows: -// - fixed: always return base_lr. -// - step: return base_lr * gamma ^ (floor(iter / step)) -// - exp: return base_lr * gamma ^ iter -// - inv: return base_lr * (1 + gamma * iter) ^ (- power) -// - multistep: similar to step but it allows non uniform steps defined by -// stepvalue -// - poly: the effective learning rate follows a polynomial decay, to be -// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) -// - sigmoid: the effective learning rate follows a sigmod decay -// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) -// -// where base_lr, max_iter, gamma, step, stepvalue and power are defined -// in the solver parameter protocol buffer, and iter is the current iteration. -template -Dtype SGDSolver::GetLearningRate() { - Dtype rate; - const string& lr_policy = this->param_.lr_policy(); - if (lr_policy == "fixed") { - rate = this->param_.base_lr(); - } else if (lr_policy == "step") { - this->current_step_ = this->iter_ / this->param_.stepsize(); - rate = this->param_.base_lr() - * pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "exp") { - rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); - } else if (lr_policy == "inv") { - rate = this->param_.base_lr() - * pow(Dtype(1) + this->param_.gamma() * this->iter_, - -this->param_.power()); - } else if (lr_policy == "multistep") { - if (this->current_step_ < this->param_.stepvalue_size() - && this->iter_ >= this->param_.stepvalue(this->current_step_)) { - this->current_step_++; - LOG(INFO)<< "MultiStep Status: Iteration " << - this->iter_ << ", step = " << this->current_step_; - } - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "poly") { - rate = this->param_.base_lr() * pow(Dtype(1.) - - (Dtype(this->iter_) / Dtype(this->param_.max_iter())), - this->param_.power()); - } else if (lr_policy == "sigmoid") { - rate = this->param_.base_lr() * (Dtype(1.) / - (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - - Dtype(this->param_.stepsize()))))); - } else { - LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; - } - return rate; -} - -template -void SGDSolver::PreSolve() { - // Initialize the history - const vector*>& net_params = this->net_->learnable_params(); - history_.clear(); - update_.clear(); - temp_.clear(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - history_.push_back( - shared_ptr>( - new Blob(shape, Caffe::GetDefaultDevice()))); - update_.push_back( - shared_ptr>( - new Blob(shape, Caffe::GetDefaultDevice()))); - temp_.push_back( - shared_ptr>( - new Blob(shape, Caffe::GetDefaultDevice()))); - } -} - -template -void SGDSolver::ClipGradients() { - const Dtype clip_gradients = this->param_.clip_gradients(); - if (clip_gradients < 0) { return; } - const vector*>& net_params = this->net_->learnable_params(); - Dtype sumsq_diff = 0; - for (int i = 0; i < net_params.size(); ++i) { - sumsq_diff += net_params[i]->sumsq_diff(); - } - const Dtype l2norm_diff = std::sqrt(sumsq_diff); - if (l2norm_diff > clip_gradients) { - Dtype scale_factor = clip_gradients / l2norm_diff; - LOG(INFO)<< "Gradient clipping: scaling down gradients (L2 norm " - << l2norm_diff << " > " << clip_gradients << ") " - << "by scale factor " << scale_factor; - for (int i = 0; i < net_params.size(); ++i) { - net_params[i]->scale_diff(scale_factor); - } - } -} - -template -void SGDSolver::ApplyUpdate() { - CHECK(Caffe::root_solver()); - Dtype rate = GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO)<< "Iteration " << this->iter_ << ", lr = " << rate; - } - ClipGradients(); - for (int param_id = 0; param_id < this->net_->learnable_params().size(); - ++param_id) { - Normalize(param_id); - Regularize(param_id); - ComputeUpdateValue(param_id, rate); - } - this->net_->Update(); -} - -template -void SGDSolver::Normalize(int param_id) { - if (this->param_.iter_size() == 1) { - return; - } - // Scale gradient to counterbalance accumulation. - const vector*>& net_params = this->net_->learnable_params(); - const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - greentea_gpu_scal(this->device_->id(), - net_params[param_id]->count(), accum_normalization, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), - 0); -#endif // USE_GREENTEA - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); - } - } - -template -void SGDSolver::Regularize(int param_id) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL)<< "Unknown regularization type: " << regularization_type; - } - } - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL)<< "Unknown regularization type: " - << regularization_type; - } - } -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - greentea_gpu_axpy(this->device_->id(), - net_params[param_id]->count(), - local_decay, - (cl_mem)(net_params[param_id]->gpu_data()), 0, - (cl_mem)(net_params[param_id]->mutable_gpu_diff()), 0); - } else if (regularization_type == "L1") { - greentea_gpu_sign(this->device_->id(), - net_params[param_id]->count(), - (cl_mem)(net_params[param_id]->gpu_data()), 0, - (cl_mem)(temp_[param_id]->mutable_gpu_data()), 0); - greentea_gpu_axpy(this->device_->id(), - net_params[param_id]->count(), - local_decay, - (cl_mem)(temp_[param_id]->gpu_data()), 0, - (cl_mem)(net_params[param_id]->mutable_gpu_diff()), 0); - } else { - LOG(FATAL)<< "Unknown regularization type: " - << regularization_type; - } - } -#endif // USE_GREENTEA - } -#else - NO_GPU; -#endif - break; - } - default: { - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } - } -} - -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - // Compute the update to history, then copy it to the parameter diff. - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - caffe_cpu_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_->id()); - - greentea_gpu_axpby( - this->device_->id(), net_params[param_id]->count(), - local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, - momentum, (cl_mem) (history_[param_id]->mutable_gpu_data()), 0); - greentea_copy( - net_params[param_id]->count(), - (cl_mem) (history_[param_id]->gpu_data()), 0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0, &ctx); -#endif // USE_GREENTEA - } -#else - NO_GPU; -#endif - break; - } - default: { - LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); - } - } -} - -template -void SGDSolver::SnapshotSolverState(const string& model_filename) { - switch (this->param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - SnapshotSolverStateToBinaryProto(model_filename); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - SnapshotSolverStateToHDF5(model_filename); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; - } -} - -template -void SGDSolver::SnapshotSolverStateToBinaryProto( - const string& model_filename) { - SolverState state; - state.set_iter(this->iter_); - state.set_learned_net(model_filename); - state.set_current_step(this->current_step_); - state.clear_history(); - for (int i = 0; i < history_.size(); ++i) { - // Add history - BlobProto* history_blob = state.add_history(); - history_[i]->ToProto(history_blob); - } - string snapshot_filename = Solver::SnapshotFilename(".solverstate"); - LOG(INFO) - << "Snapshotting solver state to binary proto file " << snapshot_filename; - WriteProtoToBinaryFile(state, snapshot_filename.c_str()); -} - -template -void SGDSolver::SnapshotSolverStateToHDF5( - const string& model_filename) { - string snapshot_filename = - Solver::SnapshotFilename(".solverstate.h5"); - LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename; - hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC, - H5P_DEFAULT, H5P_DEFAULT); - CHECK_GE(file_hid, 0) - << "Couldn't open " << snapshot_filename << " to save solver state."; - hdf5_save_int(file_hid, "iter", this->iter_); - hdf5_save_string(file_hid, "learned_net", model_filename); - hdf5_save_int(file_hid, "current_step", this->current_step_); - hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(history_hid, 0) - << "Error saving solver state to " << snapshot_filename << "."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_save_nd_dataset(history_hid, oss.str(), *history_[i]); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - -template -void SGDSolver::RestoreSolverStateFromBinaryProto( - const string& state_file) { - SolverState state; - ReadProtoFromBinaryFile(state_file, &state); - this->iter_ = state.iter(); - if (state.has_learned_net()) { - NetParameter net_param; - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); - this->net_->CopyTrainedLayersFrom(net_param); - } - this->current_step_ = state.current_step(); - CHECK_EQ(state.history_size(), history_.size()) - << "Incorrect length of history blobs."; - LOG(INFO) << "SGDSolver: restoring history"; - for (int i = 0; i < history_.size(); ++i) { - history_[i]->FromProto(state.history(i)); - } -} - -template -void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { - hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; - this->iter_ = hdf5_load_int(file_hid, "iter"); - if (H5LTfind_dataset(file_hid, "learned_net")) { - string learned_net = hdf5_load_string(file_hid, "learned_net"); - this->net_->CopyTrainedLayersFrom(learned_net); - } - this->current_step_ = hdf5_load_int(file_hid, "current_step"); - hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); - CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; - int state_history_size = hdf5_get_num_links(history_hid); - CHECK_EQ(state_history_size, history_.size()) - << "Incorrect length of history blobs."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, - kMaxBlobAxes, history_[i].get()); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - -template -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - // save history momentum for stepping back - caffe_cpu_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute update: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_cpu_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute update: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - viennacl::ocl::context &ctx = viennacl::ocl::get_context( - this->device_->id()); - - // save history momentum for stepping back - greentea_copy( - net_params[param_id]->count(), - (cl_mem) (this->history_[param_id]->gpu_data()), 0, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0, &ctx); - - // update history - greentea_gpu_axpby( - this->device_->id(), net_params[param_id]->count(), - local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, - momentum, (cl_mem) (this->history_[param_id]->mutable_gpu_data()), - 0); - - // compute update: step back then over step - greentea_gpu_axpby( - this->device_->id(), net_params[param_id]->count(), - Dtype(1) + momentum, - (cl_mem) (this->history_[param_id]->gpu_data()), 0, -momentum, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // copy - greentea_copy( - net_params[param_id]->count(), - (cl_mem) (this->update_[param_id]->gpu_data()), 0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0, &ctx); -#endif // USE_GREENTEA - } -#else - NO_GPU; -#endif - break; - } - default: { - LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); - } - } -} - -template -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype local_rate = rate * net_params_lr[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), delta, - this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), delta, - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - // compute square of gradient in update - greentea_gpu_powx( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // update history - greentea_gpu_add( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (this->update_[param_id]->gpu_data()), 0, - (cl_mem) (this->history_[param_id]->gpu_data()), 0, - (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); - - // prepare update - greentea_gpu_powx( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (this->history_[param_id]->gpu_data()), 0, Dtype(0.5), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - greentea_gpu_add_scalar( - this->device_->id(), net_params[param_id]->count(), delta, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - greentea_gpu_div( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_diff()), 0, - (cl_mem) (this->update_[param_id]->gpu_data()), 0, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // scale and copy - greentea_gpu_axpby( - this->device_->id(), net_params[param_id]->count(), - local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, - Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); - } - } - -template -void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - - // get the learning rate - Dtype delta = this->param_.delta(); - Dtype rms_decay = this->param_.rms_decay(); - Dtype local_rate = rate * net_params_lr[param_id]; - - switch (Caffe::mode()) { - case Caffe::CPU: - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), - rms_decay, this->history_[param_id]-> mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1 - rms_decay), - this->update_[param_id]->gpu_data(), rms_decay, - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), delta, - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - // compute square of gradient in update - greentea_gpu_powx( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // update history - greentea_gpu_axpby( - this->device_->id(), net_params[param_id]->count(), - Dtype(1 - rms_decay), - (cl_mem) (this->update_[param_id]->gpu_data()), 0, rms_decay, - (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); - - // prepare update - greentea_gpu_powx( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (this->history_[param_id]->gpu_data()), 0, Dtype(0.5), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - greentea_gpu_add_scalar( - this->device_->id(), net_params[param_id]->count(), delta, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - greentea_gpu_div( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_diff()), 0, - (cl_mem) (this->update_[param_id]->gpu_data()), 0, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - greentea_gpu_axpby( - this->device_->id(), net_params[param_id]->count(), - local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, - Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA - } -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdaDeltaSolver::AdaDeltaPreSolve() { - // Add the extra history entries for AdaDelta after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - size_t update_history_offset = net_params.size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of gradients - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[update_history_offset + param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - - // divide history of updates by history of gradients - caffe_div(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->temp_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_powx(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - // compute the update - caffe_mul(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - - // compute square of update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of updates - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_cpu_data()); - - // apply learning rate - caffe_cpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of gradients - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_gpu_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_gpu_data()); - - caffe_gpu_add( - net_params[param_id]->count(), this->temp_[param_id]->gpu_data(), - this->history_[update_history_offset + param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - - // divide history of updates by history of gradients - caffe_gpu_div(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->temp_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_gpu_powx(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - // compute the update and copy to net_diff - caffe_gpu_mul(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - - // compute square of update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of updates - caffe_gpu_axpby( - net_params[param_id]->count(), - Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), - momentum, - this->history_[update_history_offset + param_id] - ->mutable_gpu_data()); - - // apply learning rate - caffe_gpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), - net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - // compute square of gradient in update - greentea_gpu_powx( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // update history of gradients - greentea_gpu_axpby( - this->device_->id(), net_params[param_id]->count(), - Dtype(1) - momentum, (cl_mem) (this->update_[param_id]->gpu_data()), - 0, momentum, - (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); - - // add delta to history to guard against dividing by zero later - greentea_gpu_set( - this->device_->id(), net_params[param_id]->count(), delta, - (cl_mem) (this->temp_[param_id]->mutable_gpu_data()), 0); - - greentea_gpu_add( - this->device_->id(), - net_params[param_id]->count(), - (cl_mem) (this->temp_[param_id]->gpu_data()), - 0, - (cl_mem) (this->history_[update_history_offset + param_id] - ->gpu_data()), - 0, (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - greentea_gpu_add( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (this->temp_[param_id]->gpu_data()), 0, - (cl_mem) (this->history_[param_id]->gpu_data()), 0, - (cl_mem) (this->temp_[param_id]->mutable_gpu_data()), 0); - - // divide history of updates by history of gradients - greentea_gpu_div( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (this->update_[param_id]->gpu_data()), 0, - (cl_mem) (this->temp_[param_id]->gpu_data()), 0, - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // jointly compute the RMS of both for update and gradient history - greentea_gpu_powx( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (this->update_[param_id]->gpu_data()), 0, Dtype(0.5), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // compute the update and copy to net_diff - greentea_gpu_mul( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_diff()), 0, - (cl_mem) (this->update_[param_id]->gpu_data()), 0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); - - // compute square of update - greentea_gpu_powx( - this->device_->id(), net_params[param_id]->count(), - (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), - (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); - - // update history of updates - greentea_gpu_axpby( - this->device_->id(), - net_params[param_id]->count(), - Dtype(1) - momentum, - (cl_mem) (this->update_[param_id]->gpu_data()), - 0, - momentum, - (cl_mem) (this->history_[update_history_offset + param_id] - ->mutable_gpu_data()), - 0); - - // apply learning rate - greentea_gpu_scale( - this->device_->id(), net_params[param_id]->count(), - local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTEA - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdamSolver::AdamPreSolve() { - // Add the extra history entries for Adam after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype local_rate = rate * net_params_lr[param_id]; - const Dtype beta1 = this->param_.momentum(); - const Dtype beta2 = this->param_.momentum2(); - - // we create aliases for convenience - size_t update_history_offset = net_params.size(); - Blob* val_m = this->history_[param_id].get(); - Blob* val_v = this->history_[param_id + update_history_offset].get(); - Blob* val_t = this->temp_[param_id].get(); - - const int t = this->iter_ + 1; - const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / - (Dtype(1.) - pow(beta1, t)); - const int N = net_params[param_id]->count(); - const Dtype eps_hat = this->param_.delta(); - - switch (Caffe::mode()) { - case Caffe::CPU: { - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_cpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->cpu_diff(), beta1, - val_m->mutable_cpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_mul(N, - net_params[param_id]->cpu_diff(), - net_params[param_id]->cpu_diff(), - val_t->mutable_cpu_data()); - caffe_cpu_axpby(N, Dtype(1)-beta2, - val_t->cpu_data(), beta2, - val_v->mutable_cpu_data()); - - // set update - caffe_powx(N, - val_v->cpu_data(), Dtype(0.5), - val_t->mutable_cpu_data()); - caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); - caffe_div(N, - val_m->cpu_data(), - val_t->cpu_data(), - val_t->mutable_cpu_data()); - - caffe_cpu_scale(N, local_rate*correction, - val_t->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (this->device_->backend() == BACKEND_CUDA) { -#ifdef USE_CUDA - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_gpu_axpby(N, Dtype(1) - beta1, net_params[param_id]->gpu_diff(), - beta1, val_m->mutable_gpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_gpu_mul(N, net_params[param_id]->gpu_diff(), - net_params[param_id]->gpu_diff(), - val_t->mutable_gpu_data()); - caffe_gpu_axpby(N, Dtype(1) - beta2, val_t->gpu_data(), beta2, - val_v->mutable_gpu_data()); - - // set update - caffe_gpu_powx(N, val_v->gpu_data(), Dtype(0.5), - val_t->mutable_gpu_data()); - caffe_gpu_add_scalar(N, eps_hat, val_t->mutable_gpu_data()); - caffe_gpu_div(N, val_m->gpu_data(), val_t->gpu_data(), - val_t->mutable_gpu_data()); - - caffe_gpu_scale(N, local_rate * correction, val_t->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - greentea_gpu_axpby(this->device_->id(), N, - Dtype(1) - beta1, - (cl_mem) (net_params[param_id]->gpu_diff()), - 0, beta1, - (cl_mem) (val_m->mutable_gpu_data()), 0); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - greentea_gpu_mul(this->device_->id(), N, - (cl_mem) (net_params[param_id]->gpu_diff()), 0, - (cl_mem) (net_params[param_id]->gpu_diff()), 0, - (cl_mem) (val_t->mutable_gpu_data()), 0); - greentea_gpu_axpby(this->device_->id(), N, - Dtype(1) - beta2, - (cl_mem) (val_t->gpu_data()), 0, beta2, - (cl_mem) (val_v->mutable_gpu_data()), 0); - - // set update - greentea_gpu_powx(this->device_->id(), N, - (cl_mem) (val_v->gpu_data()), 0, Dtype(0.5), - (cl_mem) (val_t->mutable_gpu_data()), 0); - greentea_gpu_add_scalar(this->device_->id(), N, eps_hat, - (cl_mem) (val_t->mutable_gpu_data()), 0); - greentea_gpu_div(this->device_->id(), N, - (cl_mem) (val_m->gpu_data()), 0, - (cl_mem) (val_t->gpu_data()), 0, - (cl_mem) (val_t->mutable_gpu_data()), 0); - - greentea_gpu_scale( - this->device_->id(), N, local_rate * correction, - (cl_mem) (val_t->gpu_data()), 0, - (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); -#endif // USE_GREENTA - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - INSTANTIATE_CLASS(Solver); -INSTANTIATE_CLASS(SGDSolver); -INSTANTIATE_CLASS(NesterovSolver); -INSTANTIATE_CLASS(AdaGradSolver); -INSTANTIATE_CLASS(RMSPropSolver); -INSTANTIATE_CLASS(AdaDeltaSolver); -INSTANTIATE_CLASS(AdamSolver); } // namespace caffe diff --git a/src/caffe/solvers/adadelta_solver.cpp b/src/caffe/solvers/adadelta_solver.cpp new file mode 100644 index 00000000000..209f99f54e4 --- /dev/null +++ b/src/caffe/solvers/adadelta_solver.cpp @@ -0,0 +1,243 @@ +#include + +#include "caffe/sgd_solvers.hpp" + +namespace caffe { + +template +void AdaDeltaSolver::AdaDeltaPreSolve() { + // Add the extra history entries for AdaDelta after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (uint_tp i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdaDeltaSolver::ComputeUpdateValue(uint_tp param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + uint_tp update_history_offset = net_params.size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of gradients + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[update_history_offset + param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + + // divide history of updates by history of gradients + caffe_div(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->temp_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_powx(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + // compute the update + caffe_mul(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + + // compute square of update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of updates + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_cpu_data()); + + // apply learning rate + caffe_cpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of gradients + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_gpu_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_gpu_data()); + + caffe_gpu_add( + net_params[param_id]->count(), this->temp_[param_id]->gpu_data(), + this->history_[update_history_offset + param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + + // divide history of updates by history of gradients + caffe_gpu_div(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->temp_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_gpu_powx(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + // compute the update and copy to net_diff + caffe_gpu_mul(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + + // compute square of update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of updates + caffe_gpu_axpby( + net_params[param_id]->count(), + Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), + momentum, + this->history_[update_history_offset + param_id] + ->mutable_gpu_data()); + + // apply learning rate + caffe_gpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + // compute square of gradient in update + greentea_gpu_powx( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // update history of gradients + greentea_gpu_axpby( + this->device_->id(), net_params[param_id]->count(), + Dtype(1) - momentum, (cl_mem) (this->update_[param_id]->gpu_data()), + 0, momentum, + (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); + + // add delta to history to guard against dividing by zero later + greentea_gpu_set( + this->device_->id(), net_params[param_id]->count(), delta, + (cl_mem) (this->temp_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_add( + this->device_->id(), + net_params[param_id]->count(), + (cl_mem) (this->temp_[param_id]->gpu_data()), + 0, + (cl_mem) (this->history_[update_history_offset + param_id] + ->gpu_data()), + 0, (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_add( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (this->temp_[param_id]->gpu_data()), 0, + (cl_mem) (this->history_[param_id]->gpu_data()), 0, + (cl_mem) (this->temp_[param_id]->mutable_gpu_data()), 0); + + // divide history of updates by history of gradients + greentea_gpu_div( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->temp_[param_id]->gpu_data()), 0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // jointly compute the RMS of both for update and gradient history + greentea_gpu_powx( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), 0, Dtype(0.5), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // compute the update and copy to net_diff + greentea_gpu_mul( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); + + // compute square of update + greentea_gpu_powx( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // update history of updates + greentea_gpu_axpby( + this->device_->id(), + net_params[param_id]->count(), + Dtype(1) - momentum, + (cl_mem) (this->update_[param_id]->gpu_data()), + 0, + momentum, + (cl_mem) (this->history_[update_history_offset + param_id] + ->mutable_gpu_data()), + 0); + + // apply learning rate + greentea_gpu_scale( + this->device_->id(), net_params[param_id]->count(), + local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +INSTANTIATE_CLASS(AdaDeltaSolver); +REGISTER_SOLVER_CLASS(AdaDelta); + +} // namespace caffe diff --git a/src/caffe/solvers/adagrad_solver.cpp b/src/caffe/solvers/adagrad_solver.cpp new file mode 100644 index 00000000000..2ee74f780bf --- /dev/null +++ b/src/caffe/solvers/adagrad_solver.cpp @@ -0,0 +1,129 @@ +#include + +#include "caffe/sgd_solvers.hpp" + +namespace caffe { + +template +void AdaGradSolver::ComputeUpdateValue(uint_tp param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), delta, + this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), delta, + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + // compute square of gradient in update + greentea_gpu_powx( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // update history + greentea_gpu_add( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->history_[param_id]->gpu_data()), 0, + (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); + + // prepare update + greentea_gpu_powx( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), 0, Dtype(0.5), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_add_scalar( + this->device_->id(), net_params[param_id]->count(), delta, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_div( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // scale and copy + greentea_gpu_axpby( + this->device_->id(), net_params[param_id]->count(), + local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, + Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } + } + +INSTANTIATE_CLASS(AdaGradSolver); +REGISTER_SOLVER_CLASS(AdaGrad); + +} // namespace caffe diff --git a/src/caffe/solvers/adam_solver.cpp b/src/caffe/solvers/adam_solver.cpp new file mode 100644 index 00000000000..a0fc4d0fb21 --- /dev/null +++ b/src/caffe/solvers/adam_solver.cpp @@ -0,0 +1,144 @@ +#include + +#include "caffe/sgd_solvers.hpp" + +namespace caffe { + +template +void AdamSolver::AdamPreSolve() { + // Add the extra history entries for Adam after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (uint_tp i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdamSolver::ComputeUpdateValue(uint_tp param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype local_rate = rate * net_params_lr[param_id]; + const Dtype beta1 = this->param_.momentum(); + const Dtype beta2 = this->param_.momentum2(); + + // we create aliases for convenience + uint_tp update_history_offset = net_params.size(); + Blob* val_m = this->history_[param_id].get(); + Blob* val_v = this->history_[param_id + update_history_offset].get(); + Blob* val_t = this->temp_[param_id].get(); + + const uint_tp t = this->iter_ + 1; + const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / + (Dtype(1.) - pow(beta1, t)); + const uint_tp N = net_params[param_id]->count(); + const Dtype eps_hat = this->param_.delta(); + + switch (Caffe::mode()) { + case Caffe::CPU: { + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_cpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->cpu_diff(), beta1, + val_m->mutable_cpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_mul(N, + net_params[param_id]->cpu_diff(), + net_params[param_id]->cpu_diff(), + val_t->mutable_cpu_data()); + caffe_cpu_axpby(N, Dtype(1)-beta2, + val_t->cpu_data(), beta2, + val_v->mutable_cpu_data()); + + // set update + caffe_powx(N, + val_v->cpu_data(), Dtype(0.5), + val_t->mutable_cpu_data()); + caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); + caffe_div(N, + val_m->cpu_data(), + val_t->cpu_data(), + val_t->mutable_cpu_data()); + + caffe_cpu_scale(N, local_rate*correction, + val_t->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_gpu_axpby(N, Dtype(1) - beta1, net_params[param_id]->gpu_diff(), + beta1, val_m->mutable_gpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_gpu_mul(N, net_params[param_id]->gpu_diff(), + net_params[param_id]->gpu_diff(), + val_t->mutable_gpu_data()); + caffe_gpu_axpby(N, Dtype(1) - beta2, val_t->gpu_data(), beta2, + val_v->mutable_gpu_data()); + + // set update + caffe_gpu_powx(N, val_v->gpu_data(), Dtype(0.5), + val_t->mutable_gpu_data()); + caffe_gpu_add_scalar(N, eps_hat, val_t->mutable_gpu_data()); + caffe_gpu_div(N, val_m->gpu_data(), val_t->gpu_data(), + val_t->mutable_gpu_data()); + + caffe_gpu_scale(N, local_rate * correction, val_t->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + greentea_gpu_axpby(this->device_->id(), N, + Dtype(1) - beta1, + (cl_mem) (net_params[param_id]->gpu_diff()), + 0, beta1, + (cl_mem) (val_m->mutable_gpu_data()), 0); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + greentea_gpu_mul(this->device_->id(), N, + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (val_t->mutable_gpu_data()), 0); + greentea_gpu_axpby(this->device_->id(), N, + Dtype(1) - beta2, + (cl_mem) (val_t->gpu_data()), 0, beta2, + (cl_mem) (val_v->mutable_gpu_data()), 0); + + // set update + greentea_gpu_powx(this->device_->id(), N, + (cl_mem) (val_v->gpu_data()), 0, Dtype(0.5), + (cl_mem) (val_t->mutable_gpu_data()), 0); + greentea_gpu_add_scalar(this->device_->id(), N, eps_hat, + (cl_mem) (val_t->mutable_gpu_data()), 0); + greentea_gpu_div(this->device_->id(), N, + (cl_mem) (val_m->gpu_data()), 0, + (cl_mem) (val_t->gpu_data()), 0, + (cl_mem) (val_t->mutable_gpu_data()), 0); + + greentea_gpu_scale( + this->device_->id(), N, local_rate * correction, + (cl_mem) (val_t->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); +#endif // USE_GREENTA + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +INSTANTIATE_CLASS(AdamSolver); +REGISTER_SOLVER_CLASS(Adam); + +} // namespace caffe diff --git a/src/caffe/solvers/nesterov_solver.cpp b/src/caffe/solvers/nesterov_solver.cpp new file mode 100644 index 00000000000..bfd04badd05 --- /dev/null +++ b/src/caffe/solvers/nesterov_solver.cpp @@ -0,0 +1,107 @@ +#include + +#include "caffe/sgd_solvers.hpp" + +namespace caffe { + +template +void NesterovSolver::ComputeUpdateValue(uint_tp param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + // save history momentum for stepping back + caffe_cpu_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute update: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_cpu_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute update: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + + // save history momentum for stepping back + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), 0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0, &ctx); + + // update history + greentea_gpu_axpby( + this->device_->id(), net_params[param_id]->count(), + local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, + momentum, (cl_mem) (this->history_[param_id]->mutable_gpu_data()), + 0); + + // compute update: step back then over step + greentea_gpu_axpby( + this->device_->id(), net_params[param_id]->count(), + Dtype(1) + momentum, + (cl_mem) (this->history_[param_id]->gpu_data()), 0, -momentum, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // copy + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0, &ctx); +#endif // USE_GREENTEA + } +#else + NO_GPU; +#endif + break; + } + default: { + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } + } +} + +INSTANTIATE_CLASS(NesterovSolver); +REGISTER_SOLVER_CLASS(Nesterov); + +} // namespace caffe diff --git a/src/caffe/solvers/rmsprop_solver.cpp b/src/caffe/solvers/rmsprop_solver.cpp new file mode 100644 index 00000000000..1ca31103bc4 --- /dev/null +++ b/src/caffe/solvers/rmsprop_solver.cpp @@ -0,0 +1,126 @@ +#include + +#include "caffe/sgd_solvers.hpp" + +namespace caffe { + +template +void RMSPropSolver::ComputeUpdateValue(uint_tp param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + + // get the learning rate + Dtype delta = this->param_.delta(); + Dtype rms_decay = this->param_.rms_decay(); + Dtype local_rate = rate * net_params_lr[param_id]; + + switch (Caffe::mode()) { + case Caffe::CPU: + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), + rms_decay, this->history_[param_id]-> mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1 - rms_decay), + this->update_[param_id]->gpu_data(), rms_decay, + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), delta, + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + // compute square of gradient in update + greentea_gpu_powx( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, Dtype(2), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + // update history + greentea_gpu_axpby( + this->device_->id(), net_params[param_id]->count(), + Dtype(1 - rms_decay), + (cl_mem) (this->update_[param_id]->gpu_data()), 0, rms_decay, + (cl_mem) (this->history_[param_id]->mutable_gpu_data()), 0); + + // prepare update + greentea_gpu_powx( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (this->history_[param_id]->gpu_data()), 0, Dtype(0.5), + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_add_scalar( + this->device_->id(), net_params[param_id]->count(), delta, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_div( + this->device_->id(), net_params[param_id]->count(), + (cl_mem) (net_params[param_id]->gpu_diff()), 0, + (cl_mem) (this->update_[param_id]->gpu_data()), 0, + (cl_mem) (this->update_[param_id]->mutable_gpu_data()), 0); + + greentea_gpu_axpby( + this->device_->id(), net_params[param_id]->count(), + local_rate, (cl_mem) (this->update_[param_id]->gpu_data()), 0, + Dtype(0), (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0); +#endif // USE_GREENTEA + } +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +INSTANTIATE_CLASS(RMSPropSolver); +REGISTER_SOLVER_CLASS(RMSProp); + +} // namespace caffe diff --git a/src/caffe/solvers/sgd_solver.cpp b/src/caffe/solvers/sgd_solver.cpp new file mode 100644 index 00000000000..84058ae17c3 --- /dev/null +++ b/src/caffe/solvers/sgd_solver.cpp @@ -0,0 +1,416 @@ +#include +#include + +#include "caffe/sgd_solvers.hpp" +#include "caffe/util/hdf5.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" + +namespace caffe { + +// Return the current learning rate. The currently implemented learning rate +// policies are as follows: +// - fixed: always return base_lr. +// - step: return base_lr * gamma ^ (floor(iter / step)) +// - exp: return base_lr * gamma ^ iter +// - inv: return base_lr * (1 + gamma * iter) ^ (- power) +// - multistep: similar to step but it allows non uniform steps defined by +// stepvalue +// - poly: the effective learning rate follows a polynomial decay, to be +// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) +// - sigmoid: the effective learning rate follows a sigmod decay +// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) +// +// where base_lr, max_iter, gamma, step, stepvalue and power are defined +// in the solver parameter protocol buffer, and iter is the current iteration. +template +Dtype SGDSolver::GetLearningRate() { + Dtype rate; + const string& lr_policy = this->param_.lr_policy(); + if (lr_policy == "fixed") { + rate = this->param_.base_lr(); + } else if (lr_policy == "step") { + this->current_step_ = this->iter_ / this->param_.stepsize(); + rate = this->param_.base_lr() + * pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "exp") { + rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); + } else if (lr_policy == "inv") { + rate = this->param_.base_lr() + * pow(Dtype(1) + this->param_.gamma() * this->iter_, + -this->param_.power()); + } else if (lr_policy == "multistep") { + if (this->current_step_ < this->param_.stepvalue_size() + && this->iter_ >= this->param_.stepvalue(this->current_step_)) { + this->current_step_++; + LOG(INFO)<< "MultiStep Status: Iteration " << + this->iter_ << ", step = " << this->current_step_; + } + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "poly") { + rate = this->param_.base_lr() * pow(Dtype(1.) - + (Dtype(this->iter_) / Dtype(this->param_.max_iter())), + this->param_.power()); + } else if (lr_policy == "sigmoid") { + rate = this->param_.base_lr() * (Dtype(1.) / + (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - + Dtype(this->param_.stepsize()))))); + } else { + LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; + } + return rate; +} + +template +void SGDSolver::PreSolve() { + // Initialize the history + const vector*>& net_params = this->net_->learnable_params(); + history_.clear(); + update_.clear(); + temp_.clear(); + for (uint_tp i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + history_.push_back( + shared_ptr>( + new Blob(shape, Caffe::GetDefaultDevice()))); + update_.push_back( + shared_ptr>( + new Blob(shape, Caffe::GetDefaultDevice()))); + temp_.push_back( + shared_ptr>( + new Blob(shape, Caffe::GetDefaultDevice()))); + } +} + +template +void SGDSolver::ClipGradients() { + const Dtype clip_gradients = this->param_.clip_gradients(); + if (clip_gradients < 0) { return; } + const vector*>& net_params = this->net_->learnable_params(); + Dtype sumsq_diff = 0; + for (uint_tp i = 0; i < net_params.size(); ++i) { + sumsq_diff += net_params[i]->sumsq_diff(); + } + const Dtype l2norm_diff = std::sqrt(sumsq_diff); + if (l2norm_diff > clip_gradients) { + Dtype scale_factor = clip_gradients / l2norm_diff; + LOG(INFO)<< "Gradient clipping: scaling down gradients (L2 norm " + << l2norm_diff << " > " << clip_gradients << ") " + << "by scale factor " << scale_factor; + for (uint_tp i = 0; i < net_params.size(); ++i) { + net_params[i]->scale_diff(scale_factor); + } + } +} + +template +void SGDSolver::ApplyUpdate() { + CHECK(Caffe::root_solver()); + Dtype rate = GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO)<< "Iteration " << this->iter_ << ", lr = " << rate; + } + ClipGradients(); + for (uint_tp param_id = 0; param_id < this->net_->learnable_params().size(); + ++param_id) { + Normalize(param_id); + Regularize(param_id); + ComputeUpdateValue(param_id, rate); + } + this->net_->Update(); +} + +template +void SGDSolver::Normalize(uint_tp param_id) { + if (this->param_.iter_size() == 1) { + return; + } + // Scale gradient to counterbalance accumulation. + const vector*>& net_params = this->net_->learnable_params(); + const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + greentea_gpu_scal(this->device_->id(), + net_params[param_id]->count(), accum_normalization, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), + 0); +#endif // USE_GREENTEA + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } + } + +template +void SGDSolver::Regularize(uint_tp param_id) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " << regularization_type; + } + } + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL)<< "Unknown regularization type: " + << regularization_type; + } + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + greentea_gpu_axpy(this->device_->id(), + net_params[param_id]->count(), + local_decay, + (cl_mem)(net_params[param_id]->gpu_data()), 0, + (cl_mem)(net_params[param_id]->mutable_gpu_diff()), 0); + } else if (regularization_type == "L1") { + greentea_gpu_sign(this->device_->id(), + net_params[param_id]->count(), + (cl_mem)(net_params[param_id]->gpu_data()), 0, + (cl_mem)(temp_[param_id]->mutable_gpu_data()), 0); + greentea_gpu_axpy(this->device_->id(), + net_params[param_id]->count(), + local_decay, + (cl_mem)(temp_[param_id]->gpu_data()), 0, + (cl_mem)(net_params[param_id]->mutable_gpu_diff()), 0); + } else { + LOG(FATAL)<< "Unknown regularization type: " + << regularization_type; + } + } +#endif // USE_GREENTEA + } +#else + NO_GPU; +#endif + break; + } + default: { + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } + } +} + +template +void SGDSolver::ComputeUpdateValue(uint_tp param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + // Compute the update to history, then copy it to the parameter diff. + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + caffe_cpu_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + + greentea_gpu_axpby( + this->device_->id(), net_params[param_id]->count(), + local_rate, (cl_mem) (net_params[param_id]->gpu_diff()), 0, + momentum, (cl_mem) (history_[param_id]->mutable_gpu_data()), 0); + greentea_copy( + net_params[param_id]->count(), + (cl_mem) (history_[param_id]->gpu_data()), 0, + (cl_mem) (net_params[param_id]->mutable_gpu_diff()), 0, &ctx); +#endif // USE_GREENTEA + } +#else + NO_GPU; +#endif + break; + } + default: { + LOG(FATAL)<< "Unknown caffe mode: " << Caffe::mode(); + } + } +} + +template +void SGDSolver::SnapshotSolverState(const string& model_filename) { + switch (this->param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + SnapshotSolverStateToBinaryProto(model_filename); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + SnapshotSolverStateToHDF5(model_filename); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } +} + +template +void SGDSolver::SnapshotSolverStateToBinaryProto( + const string& model_filename) { + SolverState state; + state.set_iter(this->iter_); + state.set_learned_net(model_filename); + state.set_current_step(this->current_step_); + state.clear_history(); + for (uint_tp i = 0; i < history_.size(); ++i) { + // Add history + BlobProto* history_blob = state.add_history(); + history_[i]->ToProto(history_blob); + } + string snapshot_filename = Solver::SnapshotFilename(".solverstate"); + LOG(INFO) + << "Snapshotting solver state to binary proto file " << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +} + +template +void SGDSolver::SnapshotSolverStateToHDF5( + const string& model_filename) { + string snapshot_filename = + Solver::SnapshotFilename(".solverstate.h5"); + LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename; + hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC, + H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(file_hid, 0) + << "Couldn't open " << snapshot_filename << " to save solver state."; + hdf5_save_int(file_hid, "iter", this->iter_); + hdf5_save_string(file_hid, "learned_net", model_filename); + hdf5_save_int(file_hid, "current_step", this->current_step_); + hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(history_hid, 0) + << "Error saving solver state to " << snapshot_filename << "."; + for (uint_tp i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_save_nd_dataset(history_hid, oss.str(), *history_[i]); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + +template +void SGDSolver::RestoreSolverStateFromBinaryProto( + const string& state_file) { + SolverState state; + ReadProtoFromBinaryFile(state_file, &state); + this->iter_ = state.iter(); + if (state.has_learned_net()) { + NetParameter net_param; + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + this->net_->CopyTrainedLayersFrom(net_param); + } + this->current_step_ = state.current_step(); + CHECK_EQ(state.history_size(), history_.size()) + << "Incorrect length of history blobs."; + LOG(INFO) << "SGDSolver: restoring history"; + for (uint_tp i = 0; i < history_.size(); ++i) { + history_[i]->FromProto(state.history(i)); + } +} + +template +void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { + hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; + this->iter_ = hdf5_load_int(file_hid, "iter"); + if (H5LTfind_dataset(file_hid, "learned_net")) { + string learned_net = hdf5_load_string(file_hid, "learned_net"); + this->net_->CopyTrainedLayersFrom(learned_net); + } + this->current_step_ = hdf5_load_int(file_hid, "current_step"); + hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); + CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; + uint_tp state_history_size = hdf5_get_num_links(history_hid); + CHECK_EQ(state_history_size, history_.size()) + << "Incorrect length of history blobs."; + for (uint_tp i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, + kMaxBlobAxes, history_[i].get()); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + +INSTANTIATE_CLASS(SGDSolver); +REGISTER_SOLVER_CLASS(SGD); + +} // namespace caffe diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index aefe1798c85..275819a7774 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -20,7 +20,7 @@ namespace caffe { // but might be more significant for parallel training. Most importantly, // it improved stability for large models on many GPUs. -void CaffeMallocHost(void** ptr, size_t size) { +void CaffeMallocHost(void** ptr, uint_tp size) { #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { @@ -163,7 +163,7 @@ inline void SyncedMemory::to_gpu() { CHECK_EQ(0, err) << "OpenCL buffer allocation of size " << size_ << " failed."; device_->IncreaseMemoryUsage(size_); - int alpha = 0; + int_tp alpha = 0; greentea_memset(device_->id(), size_, alpha, cl_gpu_mem_, 0); gpu_ptr_ = reinterpret_cast(cl_gpu_mem_); ctx.get_queue().finish(); diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index ef0e57a37a1..201a32ea918 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -24,7 +24,7 @@ class AccuracyLayerTest : public CPUDeviceTest { blob_top_(new Blob()), blob_top_per_class_(new Blob()), top_k_(3) { - vector shape(2); + vector shape(2); shape[0] = 100; shape[1] = 10; blob_bottom_data_->Reshape(shape); @@ -45,12 +45,12 @@ class AccuracyLayerTest : public CPUDeviceTest { GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_data_); - const unsigned int prefetch_rng_seed = caffe_rng_rand(); + const uint_tp prefetch_rng_seed = caffe_rng_rand(); shared_ptr rng(new Caffe::RNG(prefetch_rng_seed)); caffe::rng_t* prefetch_rng = static_cast(rng->generator()); Dtype* label_data = blob_bottom_label_->mutable_cpu_data(); - for (int i = 0; i < blob_bottom_label_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_label_->count(); ++i) { label_data[i] = (*prefetch_rng)() % 10; } } @@ -68,7 +68,7 @@ class AccuracyLayerTest : public CPUDeviceTest { vector*> blob_bottom_vec_; vector*> blob_top_vec_; vector*> blob_top_per_class_vec_; - int top_k_; + int_tp top_k_; }; TYPED_TEST_CASE(AccuracyLayerTest, TestDtypes); @@ -117,12 +117,12 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam max_value; - int max_id; - int num_correct_labels = 0; - for (int i = 0; i < 100; ++i) { + int_tp max_id; + int_tp num_correct_labels = 0; + for (int_tp i = 0; i < 100; ++i) { max_value = -FLT_MAX; max_id = 0; - for (int j = 0; j < 10; ++j) { + for (int_tp j = 0; j < 10; ++j) { if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); max_id = j; @@ -138,7 +138,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { this->blob_bottom_data_->Reshape(2, 10, 4, 5); - vector label_shape(3); + vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; this->blob_bottom_label_->Reshape(label_shape); this->FillBottoms(); @@ -149,16 +149,16 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam max_value; - const int num_labels = this->blob_bottom_label_->count(); - int max_id; - int num_correct_labels = 0; - vector label_offset(3); - for (int n = 0; n < this->blob_bottom_data_->num(); ++n) { - for (int h = 0; h < this->blob_bottom_data_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_data_->width(); ++w) { + const int_tp num_labels = this->blob_bottom_label_->count(); + int_tp max_id; + int_tp num_correct_labels = 0; + vector label_offset(3); + for (int_tp n = 0; n < this->blob_bottom_data_->num(); ++n) { + for (int_tp h = 0; h < this->blob_bottom_data_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_data_->width(); ++w) { max_value = -FLT_MAX; max_id = 0; - for (int c = 0; c < this->blob_bottom_data_->channels(); ++c) { + for (int_tp c = 0; c < this->blob_bottom_data_->channels(); ++c) { const TypeParam pred_value = this->blob_bottom_data_->data_at(n, c, h, w); if (pred_value > max_value) { @@ -167,8 +167,8 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } } label_offset[0] = n; label_offset[1] = h; label_offset[2] = w; - const int correct_label = - static_cast(this->blob_bottom_label_->data_at(label_offset)); + const int_tp correct_label = + static_cast(this->blob_bottom_label_->data_at(label_offset)); if (max_id == correct_label) { ++num_correct_labels; } @@ -192,17 +192,17 @@ TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam max_value; - int max_id; - int num_correct_labels = 0; - int count = 0; - for (int i = 0; i < 100; ++i) { + int_tp max_id; + int_tp num_correct_labels = 0; + int_tp count = 0; + for (int_tp i = 0; i < 100; ++i) { if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { continue; } ++count; max_value = -FLT_MAX; max_id = 0; - for (int j = 0; j < 10; ++j) { + for (int_tp j = 0; j < 10; ++j) { if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); max_id = j; @@ -226,13 +226,13 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam current_value; - int current_rank; - int num_correct_labels = 0; - for (int i = 0; i < 100; ++i) { - for (int j = 0; j < 10; ++j) { + int_tp current_rank; + int_tp num_correct_labels = 0; + for (int_tp i = 0; i < 100; ++i) { + for (int_tp j = 0; j < 10; ++j) { current_value = this->blob_bottom_data_->data_at(i, j, 0, 0); current_rank = 0; - for (int k = 0; k < 10; ++k) { + for (int_tp k = 0; k < 10; ++k) { if (this->blob_bottom_data_->data_at(i, k, 0, 0) > current_value) { ++current_rank; } @@ -255,15 +255,15 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); TypeParam max_value; - int max_id; - int num_correct_labels = 0; - const int num_class = this->blob_top_per_class_->num(); - vector correct_per_class(num_class, 0); - vector num_per_class(num_class, 0); - for (int i = 0; i < 100; ++i) { + int_tp max_id; + int_tp num_correct_labels = 0; + const int_tp num_class = this->blob_top_per_class_->num(); + vector correct_per_class(num_class, 0); + vector num_per_class(num_class, 0); + for (int_tp i = 0; i < 100; ++i) { max_value = -FLT_MAX; max_id = 0; - for (int j = 0; j < 10; ++j) { + for (int_tp j = 0; j < 10; ++j) { if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); max_id = j; @@ -277,7 +277,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { } EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / 100.0, 1e-4); - for (int i = 0; i < num_class; ++i) { + for (int_tp i = 0; i < num_class; ++i) { TypeParam accuracy_per_class = (num_per_class[i] > 0 ? static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), @@ -299,20 +299,20 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); TypeParam max_value; - int max_id; - int num_correct_labels = 0; - const int num_class = this->blob_top_per_class_->num(); - vector correct_per_class(num_class, 0); - vector num_per_class(num_class, 0); - int count = 0; - for (int i = 0; i < 100; ++i) { + int_tp max_id; + int_tp num_correct_labels = 0; + const int_tp num_class = this->blob_top_per_class_->num(); + vector correct_per_class(num_class, 0); + vector num_per_class(num_class, 0); + int_tp count = 0; + for (int_tp i = 0; i < 100; ++i) { if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { continue; } ++count; max_value = -FLT_MAX; max_id = 0; - for (int j = 0; j < 10; ++j) { + for (int_tp j = 0; j < 10; ++j) { if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); max_id = j; @@ -327,7 +327,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { EXPECT_EQ(count, 97); EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / TypeParam(count), 1e-4); - for (int i = 0; i < 10; ++i) { + for (int_tp i = 0; i < 10; ++i) { TypeParam accuracy_per_class = (num_per_class[i] > 0 ? static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index bbf19099905..dfa299a616b 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -32,7 +32,7 @@ class ArgMaxLayerTest : public CPUDeviceTest { Blob* const blob_top_; vector*> blob_bottom_vec_; vector*> blob_top_vec_; - size_t top_k_; + uint_tp top_k_; }; TYPED_TEST_CASE(ArgMaxLayerTest, TestDtypes); @@ -100,16 +100,16 @@ TYPED_TEST(ArgMaxLayerTest, TestCPU) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - int max_ind; + int_tp max_ind; TypeParam max_val; - int num = this->blob_bottom_->num(); - int dim = this->blob_bottom_->count() / num; - for (int i = 0; i < num; ++i) { + int_tp num = this->blob_bottom_->num(); + int_tp dim = this->blob_bottom_->count() / num; + for (int_tp i = 0; i < num; ++i) { EXPECT_GE(top_data[i], 0); EXPECT_LE(top_data[i], dim); max_ind = top_data[i]; max_val = bottom_data[i * dim + max_ind]; - for (int j = 0; j < dim; ++j) { + for (int_tp j = 0; j < dim; ++j) { EXPECT_LE(bottom_data[i * dim + j], max_val); } } @@ -125,17 +125,17 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - int max_ind; + int_tp max_ind; TypeParam max_val; - int num = this->blob_bottom_->num(); - int dim = this->blob_bottom_->count() / num; - for (int i = 0; i < num; ++i) { + int_tp num = this->blob_bottom_->num(); + int_tp dim = this->blob_bottom_->count() / num; + for (int_tp i = 0; i < num; ++i) { EXPECT_GE(top_data[i], 0); EXPECT_LE(top_data[i], dim); max_ind = top_data[i * 2]; max_val = top_data[i * 2 + 1]; EXPECT_EQ(bottom_data[i * dim + max_ind], max_val); - for (int j = 0; j < dim; ++j) { + for (int_tp j = 0; j < dim; ++j) { EXPECT_LE(bottom_data[i * dim + j], max_val); } } @@ -150,18 +150,18 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUTopK) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); - int max_ind; + int_tp max_ind; TypeParam max_val; - int num = this->blob_bottom_->num(); - int dim = this->blob_bottom_->count() / num; - for (int i = 0; i < num; ++i) { + int_tp num = this->blob_bottom_->num(); + int_tp dim = this->blob_bottom_->count() / num; + for (int_tp i = 0; i < num; ++i) { EXPECT_GE(this->blob_top_->data_at(i, 0, 0, 0), 0); EXPECT_LE(this->blob_top_->data_at(i, 0, 0, 0), dim); - for (int j = 0; j < this->top_k_; ++j) { + for (int_tp j = 0; j < this->top_k_; ++j) { max_ind = this->blob_top_->data_at(i, 0, j, 0); max_val = bottom_data[i * dim + max_ind]; - int count = 0; - for (int k = 0; k < dim; ++k) { + int_tp count = 0; + for (int_tp k = 0; k < dim; ++k) { if (bottom_data[i * dim + k] > max_val) { ++count; } @@ -181,19 +181,19 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUMaxValTopK) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); - int max_ind; + int_tp max_ind; TypeParam max_val; - int num = this->blob_bottom_->num(); - int dim = this->blob_bottom_->count() / num; - for (int i = 0; i < num; ++i) { + int_tp num = this->blob_bottom_->num(); + int_tp dim = this->blob_bottom_->count() / num; + for (int_tp i = 0; i < num; ++i) { EXPECT_GE(this->blob_top_->data_at(i, 0, 0, 0), 0); EXPECT_LE(this->blob_top_->data_at(i, 0, 0, 0), dim); - for (int j = 0; j < this->top_k_; ++j) { + for (int_tp j = 0; j < this->top_k_; ++j) { max_ind = this->blob_top_->data_at(i, 0, j, 0); max_val = this->blob_top_->data_at(i, 1, j, 0); EXPECT_EQ(bottom_data[i * dim + max_ind], max_val); - int count = 0; - for (int k = 0; k < dim; ++k) { + int_tp count = 0; + for (int_tp k = 0; k < dim; ++k) { if (bottom_data[i * dim + k] > max_val) { ++count; } @@ -211,17 +211,17 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUAxis) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values - int max_ind; + int_tp max_ind; TypeParam max_val; - std::vector shape = this->blob_bottom_->shape(); - for (int i = 0; i < shape[1]; ++i) { - for (int j = 0; j < shape[2]; ++j) { - for (int k = 0; k < shape[3]; ++k) { + std::vector shape = this->blob_bottom_->shape(); + for (int_tp i = 0; i < shape[1]; ++i) { + for (int_tp j = 0; j < shape[2]; ++j) { + for (int_tp k = 0; k < shape[3]; ++k) { max_ind = this->blob_top_->data_at(0, i, j, k); max_val = this->blob_bottom_->data_at(max_ind, i, j, k); EXPECT_GE(max_ind, 0); EXPECT_LE(max_ind, shape[0]); - for (int l = 0; l < shape[0]; ++l) { + for (int_tp l = 0; l < shape[0]; ++l) { EXPECT_LE(this->blob_bottom_->data_at(l, i, j, k), max_val); } } @@ -238,19 +238,19 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUAxisTopK) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values - int max_ind; + int_tp max_ind; TypeParam max_val; - std::vector shape = this->blob_bottom_->shape(); - for (int i = 0; i < shape[0]; ++i) { - for (int j = 0; j < shape[1]; ++j) { - for (int k = 0; k < shape[3]; ++k) { - for (int m = 0; m < this->top_k_; ++m) { + std::vector shape = this->blob_bottom_->shape(); + for (int_tp i = 0; i < shape[0]; ++i) { + for (int_tp j = 0; j < shape[1]; ++j) { + for (int_tp k = 0; k < shape[3]; ++k) { + for (int_tp m = 0; m < this->top_k_; ++m) { max_ind = this->blob_top_->data_at(i, j, m, k); max_val = this->blob_bottom_->data_at(i, j, max_ind, k); EXPECT_GE(max_ind, 0); EXPECT_LE(max_ind, shape[2]); - int count = 0; - for (int l = 0; l < shape[2]; ++l) { + int_tp count = 0; + for (int_tp l = 0; l < shape[2]; ++l) { if (this->blob_bottom_->data_at(i, j, l, k) > max_val) { ++count; } @@ -273,14 +273,14 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUAxisMaxValTopK) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values TypeParam max_val; - std::vector shape = this->blob_bottom_->shape(); - for (int i = 0; i < shape[0]; ++i) { - for (int j = 0; j < shape[1]; ++j) { - for (int k = 0; k < shape[2]; ++k) { - for (int m = 0; m < this->top_k_; ++m) { + std::vector shape = this->blob_bottom_->shape(); + for (int_tp i = 0; i < shape[0]; ++i) { + for (int_tp j = 0; j < shape[1]; ++j) { + for (int_tp k = 0; k < shape[2]; ++k) { + for (int_tp m = 0; m < this->top_k_; ++m) { max_val = this->blob_top_->data_at(i, j, k, m); - int count = 0; - for (int l = 0; l < shape[3]; ++l) { + int_tp count = 0; + for (int_tp l = 0; l < shape[3]; ++l) { if (this->blob_bottom_->data_at(i, j, k, l) > max_val) { ++count; } diff --git a/src/caffe/test/test_batch_reindex_layer.cpp b/src/caffe/test/test_batch_reindex_layer.cpp index 985db343d12..993523b5bce 100644 --- a/src/caffe/test/test_batch_reindex_layer.cpp +++ b/src/caffe/test/test_batch_reindex_layer.cpp @@ -25,13 +25,13 @@ class BatchReindexLayerTest : public MultiDeviceTest { } virtual void SetUp() { Caffe::set_random_seed(1701); - vector sz; + vector sz; sz.push_back(5); sz.push_back(4); sz.push_back(3); sz.push_back(2); blob_bottom_->Reshape(sz); - vector permsz; + vector permsz; permsz.push_back(6); blob_bottom_permute_->Reshape(permsz); @@ -39,8 +39,8 @@ class BatchReindexLayerTest : public MultiDeviceTest { FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); - int perm[] = { 4, 0, 4, 0, 1, 2 }; - for (int i = 0; i < blob_bottom_permute_->count(); ++i) { + int_tp perm[] = { 4, 0, 4, 0, 1, 2 }; + for (int_tp i = 0; i < blob_bottom_permute_->count(); ++i) { blob_bottom_permute_->mutable_cpu_data()[i] = perm[i]; } @@ -62,21 +62,21 @@ class BatchReindexLayerTest : public MultiDeviceTest { void TestForward() { LayerParameter layer_param; - vector sz; + vector sz; sz.push_back(5); sz.push_back(4); sz.push_back(3); sz.push_back(2); blob_bottom_->Reshape(sz); - for (int i = 0; i < blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_->count(); ++i) { blob_bottom_->mutable_cpu_data()[i] = i; } - vector permsz; + vector permsz; permsz.push_back(6); blob_bottom_permute_->Reshape(permsz); - int perm[] = { 4, 0, 4, 0, 1, 2 }; - for (int i = 0; i < blob_bottom_permute_->count(); ++i) { + int_tp perm[] = { 4, 0, 4, 0, 1, 2 }; + for (int_tp i = 0; i < blob_bottom_permute_->count(); ++i) { blob_bottom_permute_->mutable_cpu_data()[i] = perm[i]; } BatchReindexLayer layer(layer_param); @@ -87,12 +87,12 @@ class BatchReindexLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_->width(), blob_bottom_->width()); layer.Forward(blob_bottom_vec_, blob_top_vec_); - int channels = blob_top_->channels(); - int height = blob_top_->height(); - int width = blob_top_->width(); - for (int i = 0; i < blob_top_->count(); ++i) { - int n = i / (channels * width * height); - int inner_idx = (i % (channels * width * height)); + int_tp channels = blob_top_->channels(); + int_tp height = blob_top_->height(); + int_tp width = blob_top_->width(); + for (int_tp i = 0; i < blob_top_->count(); ++i) { + int_tp n = i / (channels * width * height); + int_tp inner_idx = (i % (channels * width * height)); EXPECT_EQ( blob_top_->cpu_data()[i], blob_bottom_->cpu_data()[perm[n] * channels * width * height diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index 7da6423b67c..657ab9bcc8e 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -56,7 +56,7 @@ TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) { BlobProto blob_proto; // Reshape to (3 x 2). - vector shape(2); + vector shape(2); shape[0] = 3; shape[1] = 2; this->blob_->Reshape(shape); @@ -133,7 +133,7 @@ TYPED_TEST(BlobMathTest, TestSumOfSquares) { filler.Fill(this->blob_); Dtype expected_sumsq = 0; const Dtype* data = this->blob_->cpu_data(); - for (int i = 0; i < this->blob_->count(); ++i) { + for (int_tp i = 0; i < this->blob_->count(); ++i) { expected_sumsq += data[i] * data[i]; } // Do a mutable access on the current device, @@ -188,7 +188,7 @@ TYPED_TEST(BlobMathTest, TestAsum) { filler.Fill(this->blob_); Dtype expected_asum = 0; const Dtype* data = this->blob_->cpu_data(); - for (int i = 0; i < this->blob_->count(); ++i) { + for (int_tp i = 0; i < this->blob_->count(); ++i) { expected_asum += std::fabs(data[i]); } // Do a mutable access on the current device, diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index ccd97eb1d66..9e815af9bbc 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -106,7 +106,7 @@ TYPED_TEST(ConcatLayerTest, TestForwardTrivial) { this->blob_bottom_vec_0_.resize(1); layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_0_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_0_->count(); ++i) { EXPECT_EQ(this->blob_bottom_0_->cpu_data()[i], this->blob_top_->cpu_data()[i]); } @@ -119,20 +119,20 @@ TYPED_TEST(ConcatLayerTest, TestForwardNum) { ConcatLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_1_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_1_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_vec_1_[0]->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_vec_1_[0]->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_top_->height(); ++h) { + for (int_tp w = 0; w < this->blob_top_->width(); ++w) { EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_vec_1_[0]->data_at(n, c, h, w)); } } } } - for (int n = 0; n < this->blob_bottom_vec_1_[1]->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_vec_1_[1]->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_top_->height(); ++h) { + for (int_tp w = 0; w < this->blob_top_->width(); ++w) { EXPECT_EQ(this->blob_top_->data_at(n + 2, c, h, w), this->blob_bottom_vec_1_[1]->data_at(n, c, h, w)); } @@ -147,18 +147,18 @@ TYPED_TEST(ConcatLayerTest, TestForwardChannels) { ConcatLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); - for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { + for (int_tp n = 0; n < this->blob_top_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_0_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_top_->height(); ++h) { + for (int_tp w = 0; w < this->blob_top_->width(); ++w) { EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_vec_0_[0]->data_at(n, c, h, w)); } } } - for (int c = 0; c < this->blob_bottom_1_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { + for (int_tp c = 0; c < this->blob_bottom_1_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_top_->height(); ++h) { + for (int_tp w = 0; w < this->blob_top_->width(); ++w) { EXPECT_EQ(this->blob_top_->data_at(n, c + 3, h, w), this->blob_bottom_vec_0_[1]->data_at(n, c, h, w)); } diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 359e49b0419..34870caf7c7 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -35,7 +35,7 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { blob_bottom_vec_.push_back(blob_bottom_data_i_); filler.Fill(this->blob_bottom_data_j_); blob_bottom_vec_.push_back(blob_bottom_data_j_); - for (int i = 0; i < blob_bottom_y_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_y_->count(); ++i) { blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 } blob_bottom_vec_.push_back(blob_bottom_y_); @@ -66,12 +66,12 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // manually compute to compare const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); + const int_tp num = this->blob_bottom_data_i_->num(); + const int_tp channels = this->blob_bottom_data_i_->channels(); Dtype loss(0); - for (int i = 0; i < num; ++i) { + for (int_tp i = 0; i < num; ++i) { Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { + for (int_tp j = 0; j < channels; ++j) { Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; dist_sq += diff*diff; @@ -109,12 +109,12 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // manually compute to compare const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); + const int_tp num = this->blob_bottom_data_i_->num(); + const int_tp channels = this->blob_bottom_data_i_->channels(); Dtype loss(0); - for (int i = 0; i < num; ++i) { + for (int_tp i = 0; i < num; ++i) { Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { + for (int_tp j = 0; j < channels; ++j) { Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; dist_sq += diff*diff; diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 1be0272d356..ee67308857a 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -22,28 +22,28 @@ void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, const bool has_depth = (out->num_axes() == 5); if (!has_depth) { CHECK_EQ(4, out->num_axes()); } // Kernel size, stride, and pad - int kernel_h, kernel_w; + int_tp kernel_h, kernel_w; if (conv_param->has_kernel_h() || conv_param->has_kernel_w()) { kernel_h = conv_param->kernel_h(); kernel_w = conv_param->kernel_w(); } else { kernel_h = kernel_w = conv_param->kernel_size(0); } - int pad_h, pad_w; + int_tp pad_h, pad_w; if (conv_param->has_pad_h() || conv_param->has_pad_w()) { pad_h = conv_param->pad_h(); pad_w = conv_param->pad_w(); } else { pad_h = pad_w = conv_param->pad_size() ? conv_param->pad(0) : 0; } - int stride_h, stride_w; + int_tp stride_h, stride_w; if (conv_param->has_stride_h() || conv_param->has_stride_w()) { stride_h = conv_param->stride_h(); stride_w = conv_param->stride_w(); } else { stride_h = stride_w = conv_param->stride_size() ? conv_param->stride(0) : 1; } - int kernel_d, pad_d, stride_d; + int_tp kernel_d, pad_d, stride_d; if (has_depth) { kernel_d = kernel_h; stride_d = stride_h; @@ -53,30 +53,30 @@ void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, pad_d = 0; } // Groups - int groups = conv_param->group(); - int o_g = out->shape(1) / groups; - int k_g = in->shape(1) / groups; - int o_head, k_head; + int_tp groups = conv_param->group(); + int_tp o_g = out->shape(1) / groups; + int_tp k_g = in->shape(1) / groups; + int_tp o_head, k_head; // Convolution - vector weight_offset(4 + has_depth); - vector in_offset(4 + has_depth); - vector out_offset(4 + has_depth); + vector weight_offset(4 + has_depth); + vector in_offset(4 + has_depth); + vector out_offset(4 + has_depth); Dtype* out_data = out->mutable_cpu_data(); - for (int n = 0; n < out->shape(0); n++) { - for (int g = 0; g < groups; g++) { + for (int_tp n = 0; n < out->shape(0); n++) { + for (int_tp g = 0; g < groups; g++) { o_head = o_g * g; k_head = k_g * g; - for (int o = 0; o < o_g; o++) { - for (int k = 0; k < k_g; k++) { - for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { - for (int y = 0; y < out->shape(2 + has_depth); y++) { - for (int x = 0; x < out->shape(3 + has_depth); x++) { - for (int r = 0; r < kernel_d; r++) { - for (int p = 0; p < kernel_h; p++) { - for (int q = 0; q < kernel_w; q++) { - int in_z = z * stride_d - pad_d + r; - int in_y = y * stride_h - pad_h + p; - int in_x = x * stride_w - pad_w + q; + for (int_tp o = 0; o < o_g; o++) { + for (int_tp k = 0; k < k_g; k++) { + for (int_tp z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int_tp y = 0; y < out->shape(2 + has_depth); y++) { + for (int_tp x = 0; x < out->shape(3 + has_depth); x++) { + for (int_tp r = 0; r < kernel_d; r++) { + for (int_tp p = 0; p < kernel_h; p++) { + for (int_tp q = 0; q < kernel_w; q++) { + int_tp in_z = z * stride_d - pad_d + r; + int_tp in_y = y * stride_h - pad_h + p; + int_tp in_x = x * stride_w - pad_w + q; if (in_z >= 0 && in_z < (has_depth ? in->shape(2) : 1) && in_y >= 0 && in_y < in->shape(2 + has_depth) && in_x >= 0 && in_x < in->shape(3 + has_depth)) { @@ -112,11 +112,11 @@ void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, // Bias if (conv_param->bias_term()) { const Dtype* bias_data = weights[1]->cpu_data(); - for (int n = 0; n < out->shape(0); n++) { - for (int o = 0; o < out->shape(1); o++) { - for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { - for (int y = 0; y < out->shape(2 + has_depth); y++) { - for (int x = 0; x < out->shape(3 + has_depth); x++) { + for (int_tp n = 0; n < out->shape(0); n++) { + for (int_tp o = 0; o < out->shape(1); o++) { + for (int_tp z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int_tp y = 0; y < out->shape(2 + has_depth); y++) { + for (int_tp x = 0; x < out->shape(3 + has_depth); x++) { out_offset[0] = n; out_offset[1] = o; if (has_depth) { out_offset[2] = z; } @@ -245,14 +245,14 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { this->MakeReferenceTop(this->blob_top_)); top_data = this->blob_top_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), this->MakeReferenceTop(this->blob_top_2_)); top_data = this->blob_top_2_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } } @@ -262,30 +262,30 @@ TYPED_TEST(ConvolutionLayerTest, Test0DConvolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - const int kNumOutput = 3; + const int_tp kNumOutput = 3; convolution_param->set_num_output(kNumOutput); convolution_param->set_axis(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); shared_ptr > layer( new ConvolutionLayer(layer_param)); - vector top_shape = this->blob_bottom_->shape(); + vector top_shape = this->blob_bottom_->shape(); top_shape[3] = kNumOutput; layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(top_shape, this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Check against reference convolution. - vector weight_offset(2); + vector weight_offset(2); const Blob* weight = layer->blobs()[0].get(); const Blob* bias = layer->blobs()[1].get(); - const int num = this->blob_top_->count(3); - const int dim = this->blob_top_->shape(3); - const int bottom_dim = this->blob_bottom_->shape(3); - for (int n = 0; n < num; ++n) { - for (int d = 0; d < dim; ++d) { + const int_tp num = this->blob_top_->count(3); + const int_tp dim = this->blob_top_->shape(3); + const int_tp bottom_dim = this->blob_bottom_->shape(3); + for (int_tp n = 0; n < num; ++n) { + for (int_tp d = 0; d < dim; ++d) { weight_offset[0] = d; Dtype value = bias->cpu_data()[d]; - for (int bottom_d = 0; bottom_d < bottom_dim; ++bottom_d) { + for (int_tp bottom_d = 0; bottom_d < bottom_dim; ++bottom_d) { weight_offset[1] = bottom_d; value += weight->data_at(weight_offset) * this->blob_bottom_->cpu_data()[n * bottom_dim + bottom_d]; @@ -299,7 +299,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSimple3DConvolution) { typedef typename TypeParam::Dtype Dtype; this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - vector bottom_shape(5); + vector bottom_shape(5); bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); bottom_shape[2] = 5; @@ -307,7 +307,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSimple3DConvolution) { bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); FillerParameter filler_param; GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_vec_.size(); ++i) { this->blob_bottom_vec_[i]->Reshape(bottom_shape); filler.Fill(this->blob_bottom_vec_[i]); } @@ -330,14 +330,14 @@ TYPED_TEST(ConvolutionLayerTest, TestSimple3DConvolution) { this->MakeReferenceTop(this->blob_top_)); top_data = this->blob_top_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), this->MakeReferenceTop(this->blob_top_2_)); top_data = this->blob_top_2_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } } @@ -364,7 +364,7 @@ TYPED_TEST(ConvolutionLayerTest, Test1x1Convolution) { this->MakeReferenceTop(this->blob_top_)); top_data = this->blob_top_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } } @@ -392,7 +392,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { this->MakeReferenceTop(this->blob_top_)); top_data = this->blob_top_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } } @@ -422,8 +422,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { layer->blobs().resize(1); layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); Dtype* weights = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 9; // 3 x 3 filter + for (int_tp c = 0; c < 3; ++c) { + int_tp i = c * 9; // 3 x 3 filter weights[i + 0] = -1; weights[i + 1] = 0; weights[i + 2] = 1; @@ -455,8 +455,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { layer->blobs().resize(1); layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); Dtype* weights_1 = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 3; // 3 x 1 filter + for (int_tp c = 0; c < 3; ++c) { + int_tp i = c * 3; // 3 x 1 filter weights_1[i + 0] = 1; weights_1[i + 1] = 2; weights_1[i + 2] = 1; @@ -485,23 +485,23 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { // Test equivalence of full and separable filters. const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype* sep_top_data = this->blob_top_2_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); } } TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) { typedef typename TypeParam::Dtype Dtype; - const int kernel_h = 11; - const int kernel_w = 13; - vector bottom_shape(4); + const int_tp kernel_h = 11; + const int_tp kernel_w = 13; + vector bottom_shape(4); bottom_shape[0] = 15; bottom_shape[1] = 18; bottom_shape[2] = kernel_h * 2; bottom_shape[3] = kernel_w * 2; FillerParameter filler_param; GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_vec_.size(); ++i) { this->blob_bottom_vec_[i]->Reshape(bottom_shape); filler.Fill(this->blob_bottom_vec_[i]); } @@ -592,17 +592,17 @@ TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) { backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); } ASSERT_EQ(result_nd.count(), result_2d.count()); - for (int i = 0; i < result_2d.count(); ++i) { + for (int_tp i = 0; i < result_2d.count(); ++i) { EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]); } ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); - for (int i = 0; i < backward_result_2d.count(); ++i) { + for (int_tp i = 0; i < backward_result_2d.count(); ++i) { EXPECT_EQ(backward_result_2d.cpu_diff()[i], backward_result_nd.cpu_diff()[i]); } ASSERT_EQ(backward_weight_result_nd.count(), backward_weight_result_2d.count()); - for (int i = 0; i < backward_weight_result_2d.count(); ++i) { + for (int_tp i = 0; i < backward_weight_result_2d.count(); ++i) { EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], backward_weight_result_nd.cpu_diff()[i]); } @@ -631,7 +631,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradient3D) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - vector bottom_shape(5); + vector bottom_shape(5); bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); bottom_shape[2] = 5; @@ -639,7 +639,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradient3D) { bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); FillerParameter filler_param; GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_vec_.size(); ++i) { this->blob_bottom_vec_[i]->Reshape(bottom_shape); filler.Fill(this->blob_bottom_vec_[i]); } @@ -794,14 +794,14 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { this->MakeReferenceTop(this->blob_top_)); top_data = this->blob_top_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), this->MakeReferenceTop(this->blob_top_2_)); top_data = this->blob_top_2_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } } @@ -828,7 +828,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { this->MakeReferenceTop(this->blob_top_)); top_data = this->blob_top_->cpu_data(); ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); } } @@ -858,8 +858,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { layer->blobs().resize(1); layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); TypeParam* weights = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 9; // 3 x 3 filter + for (int_tp c = 0; c < 3; ++c) { + int_tp i = c * 9; // 3 x 3 filter weights[i + 0] = -1; weights[i + 1] = 0; weights[i + 2] = 1; @@ -891,8 +891,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { layer->blobs().resize(1); layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); TypeParam* weights_1 = layer->blobs()[0]->mutable_cpu_data(); - for (int c = 0; c < 3; ++c) { - int i = c * 3; // 3 x 1 filter + for (int_tp c = 0; c < 3; ++c) { + int_tp i = c * 3; // 3 x 1 filter weights_1[i + 0] = 1; weights_1[i + 1] = 2; weights_1[i + 2] = 1; @@ -921,7 +921,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { // Test equivalence of full and separable filters. const TypeParam* top_data = this->blob_top_->cpu_data(); const TypeParam* sep_top_data = this->blob_top_2_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); } } diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp index 44b8170d46b..64338fa9ae2 100644 --- a/src/caffe/test/test_convolution_nd_layer.cpp +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -73,17 +73,17 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { ConvolutionLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - int d = blob_bottom_->shape(2); - int h = blob_bottom_->shape(3); - int w = blob_bottom_->shape(4); + int_tp d = blob_bottom_->shape(2); + int_tp h = blob_bottom_->shape(3); + int_tp w = blob_bottom_->shape(4); TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); TypeParam checksum = 0; - for (int cd = 0; cd < d; ++cd) { - for (int ch = 0; ch < h; ++ch) { - for (int cw = 0; cw < w; ++cw) { + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { bottom_data[cw + ch * w + cd * w * h] = cw + ch * w + cd * w * h; if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { @@ -136,13 +136,13 @@ class ConvolutionNDLayerTest : public GPUDeviceTest { const TypeParam *bottom_diff = blob_bottom_->cpu_diff(); - int d = blob_bottom_->shape(2); - int h = blob_bottom_->shape(3); - int w = blob_bottom_->shape(4); + int_tp d = blob_bottom_->shape(2); + int_tp h = blob_bottom_->shape(3); + int_tp w = blob_bottom_->shape(4); - for (int cd = 0; cd < d; ++cd) { - for (int ch = 0; ch < h; ++ch) { - for (int cw = 0; cw < w; ++cw) { + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { EXPECT_EQ(1, bottom_diff[cw + ch * w + cd * w * h]); } diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 9e03954a543..e8382676653 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -46,15 +46,15 @@ class DataLayerTest : public MultiDeviceTest { scoped_ptr db(db::GetDB(backend)); db->Open(*filename_, db::NEW); scoped_ptr txn(db->NewTransaction()); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { Datum datum; datum.set_label(i); datum.set_channels(2); datum.set_height(3); datum.set_width(4); std::string* data = datum.mutable_data(); - for (int j = 0; j < 24; ++j) { - int datum = unique_pixels ? j : i; + for (int_tp j = 0; j < 24; ++j) { + int_tp datum = unique_pixels ? j : i; data->push_back(static_cast(datum)); } stringstream ss; @@ -91,13 +91,13 @@ class DataLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_label_->height(), 1); EXPECT_EQ(blob_top_label_->width(), 1); - for (int iter = 0; iter < 100; ++iter) { + for (int_tp iter = 0; iter < 100; ++iter) { layer.Forward(blob_bottom_vec_, blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } - for (int i = 0; i < 5; ++i) { - for (int j = 0; j < 24; ++j) { + for (int_tp i = 0; i < 5; ++i) { + for (int_tp j = 0; j < 24; ++j) { EXPECT_EQ(scale * i, blob_top_data_->cpu_data()[i * 24 + j]) << "debug: iter " << iter << " i " << i << " j " << j; } @@ -106,21 +106,21 @@ class DataLayerTest : public MultiDeviceTest { } void TestReshape(DataParameter_DB backend) { - const int num_inputs = 5; + const int_tp num_inputs = 5; // Save data of varying shapes. LOG(INFO) << "Using temporary dataset " << *filename_; scoped_ptr db(db::GetDB(backend)); db->Open(*filename_, db::NEW); scoped_ptr txn(db->NewTransaction()); - for (int i = 0; i < num_inputs; ++i) { + for (int_tp i = 0; i < num_inputs; ++i) { Datum datum; datum.set_label(i); datum.set_channels(2); datum.set_height(i % 2 + 1); datum.set_width(i % 4 + 1); std::string* data = datum.mutable_data(); - const int data_size = datum.channels() * datum.height() * datum.width(); - for (int j = 0; j < data_size; ++j) { + const int_tp data_size = datum.channels() * datum.height() * datum.width(); + for (int_tp j = 0; j < data_size; ++j) { data->push_back(static_cast(j)); } stringstream ss; @@ -149,19 +149,19 @@ class DataLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_label_->height(), 1); EXPECT_EQ(blob_top_label_->width(), 1); - for (int iter = 0; iter < num_inputs; ++iter) { + for (int_tp iter = 0; iter < num_inputs; ++iter) { layer.Forward(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_data_->height(), iter % 2 + 1); EXPECT_EQ(blob_top_data_->width(), iter % 4 + 1); EXPECT_EQ(iter, blob_top_label_->cpu_data()[0]); - const int channels = blob_top_data_->channels(); - const int height = blob_top_data_->height(); - const int width = blob_top_data_->width(); - for (int c = 0; c < channels; ++c) { - for (int h = 0; h < height; ++h) { - for (int w = 0; w < width; ++w) { - const int idx = (c * height + h) * width + w; - EXPECT_EQ(idx, static_cast(blob_top_data_->cpu_data()[idx])) + const int_tp channels = blob_top_data_->channels(); + const int_tp height = blob_top_data_->height(); + const int_tp width = blob_top_data_->width(); + for (int_tp c = 0; c < channels; ++c) { + for (int_tp h = 0; h < height; ++h) { + for (int_tp w = 0; w < width; ++w) { + const int_tp idx = (c * height + h) * width + w; + EXPECT_EQ(idx, static_cast(blob_top_data_->cpu_data()[idx])) << "debug: iter " << iter << " c " << c << " h " << h << " w " << w; } @@ -197,14 +197,14 @@ class DataLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_label_->height(), 1); EXPECT_EQ(blob_top_label_->width(), 1); - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer.Forward(blob_bottom_vec_, blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } - int num_with_center_value = 0; - for (int i = 0; i < 5; ++i) { - for (int j = 0; j < 2; ++j) { + int_tp num_with_center_value = 0; + for (int_tp i = 0; i < 5; ++i) { + for (int_tp j = 0; j < 2; ++j) { const Dtype center_value = scale * (j ? 17 : 5); num_with_center_value += (center_value == blob_top_data_->cpu_data()[i * 2 + j]); @@ -243,14 +243,14 @@ class DataLayerTest : public MultiDeviceTest { { DataLayer layer1(param); layer1.SetUp(blob_bottom_vec_, blob_top_vec_); - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer1.Forward(blob_bottom_vec_, blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } vector iter_crop_sequence; - for (int i = 0; i < 5; ++i) { - for (int j = 0; j < 2; ++j) { + for (int_tp i = 0; i < 5; ++i) { + for (int_tp j = 0; j < 2; ++j) { iter_crop_sequence.push_back( blob_top_data_->cpu_data()[i * 2 + j]); } @@ -264,13 +264,13 @@ class DataLayerTest : public MultiDeviceTest { Caffe::set_random_seed(seed_); DataLayer layer2(param); layer2.SetUp(blob_bottom_vec_, blob_top_vec_); - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer2.Forward(blob_bottom_vec_, blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } - for (int i = 0; i < 5; ++i) { - for (int j = 0; j < 2; ++j) { + for (int_tp i = 0; i < 5; ++i) { + for (int_tp j = 0; j < 2; ++j) { EXPECT_EQ(crop_sequence[iter][i * 2 + j], blob_top_data_->cpu_data()[i * 2 + j]) << "debug: iter " << iter << " i " << i << " j " << j; @@ -299,14 +299,14 @@ class DataLayerTest : public MultiDeviceTest { { DataLayer layer1(param); layer1.SetUp(blob_bottom_vec_, blob_top_vec_); - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer1.Forward(blob_bottom_vec_, blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } vector iter_crop_sequence; - for (int i = 0; i < 5; ++i) { - for (int j = 0; j < 2; ++j) { + for (int_tp i = 0; i < 5; ++i) { + for (int_tp j = 0; j < 2; ++j) { iter_crop_sequence.push_back( blob_top_data_->cpu_data()[i * 2 + j]); } @@ -320,14 +320,14 @@ class DataLayerTest : public MultiDeviceTest { srand(seed_); DataLayer layer2(param); layer2.SetUp(blob_bottom_vec_, blob_top_vec_); - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer2.Forward(blob_bottom_vec_, blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } - int num_sequence_matches = 0; - for (int i = 0; i < 5; ++i) { - for (int j = 0; j < 2; ++j) { + int_tp num_sequence_matches = 0; + for (int_tp i = 0; i < 5; ++i) { + for (int_tp j = 0; j < 2; ++j) { num_sequence_matches += (crop_sequence[iter][i * 2 + j] == blob_top_data_->cpu_data()[i * 2 + j]); } @@ -344,7 +344,7 @@ class DataLayerTest : public MultiDeviceTest { Blob* const blob_top_label_; vector*> blob_bottom_vec_; vector*> blob_top_vec_; - int seed_; + int_tp seed_; }; TYPED_TEST_CASE(DataLayerTest, TestDtypesAndDevices); diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index dc362d02574..fb139e69380 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -16,16 +16,16 @@ namespace caffe { -void FillDatum(const int label, const int channels, const int height, - const int width, const bool unique_pixels, Datum * datum) { +void FillDatum(const int_tp label, const int_tp channels, const int_tp height, + const int_tp width, const bool unique_pixels, Datum * datum) { datum->set_label(label); datum->set_channels(channels); datum->set_height(height); datum->set_width(width); - int size = channels * height * width; + int_tp size = channels * height * width; std::string* data = datum->mutable_data(); - for (int j = 0; j < size; ++j) { - int datum = unique_pixels ? j : label; + for (int_tp j = 0; j < size; ++j) { + int_tp datum = unique_pixels ? j : label; data->push_back(static_cast(datum)); } } @@ -37,13 +37,13 @@ class DataTransformTest : public ::testing::Test { : seed_(1701), num_iter_(10) {} - int NumSequenceMatches(const TransformationParameter transform_param, + int_tp NumSequenceMatches(const TransformationParameter transform_param, const Datum& datum, Phase phase) { // Get crop sequence with Caffe seed 1701. DataTransformer* transformer = new DataTransformer(transform_param, phase, Caffe::GetDefaultDevice()); - const int crop_size = transform_param.crop_size(); + const int_tp crop_size = transform_param.crop_size(); Caffe::set_random_seed(seed_); transformer->InitRand(); Blob* blob = @@ -53,20 +53,20 @@ class DataTransformTest : public ::testing::Test { } vector > crop_sequence; - for (int iter = 0; iter < this->num_iter_; ++iter) { + for (int_tp iter = 0; iter < this->num_iter_; ++iter) { vector iter_crop_sequence; transformer->Transform(datum, blob); - for (int j = 0; j < blob->count(); ++j) { + for (int_tp j = 0; j < blob->count(); ++j) { iter_crop_sequence.push_back(blob->cpu_data()[j]); } crop_sequence.push_back(iter_crop_sequence); } // Check if the sequence differs from the previous - int num_sequence_matches = 0; - for (int iter = 0; iter < this->num_iter_; ++iter) { + int_tp num_sequence_matches = 0; + for (int_tp iter = 0; iter < this->num_iter_; ++iter) { vector iter_crop_sequence = crop_sequence[iter]; transformer->Transform(datum, blob); - for (int j = 0; j < blob->count(); ++j) { + for (int_tp j = 0; j < blob->count(); ++j) { num_sequence_matches += (crop_sequence[iter][j] == blob->cpu_data()[j]); } @@ -76,8 +76,8 @@ class DataTransformTest : public ::testing::Test { virtual ~DataTransformTest() { } - int seed_; - int num_iter_; + int_tp seed_; + int_tp num_iter_; }; TYPED_TEST_CASE(DataTransformTest, TestDtypes); @@ -85,10 +85,10 @@ TYPED_TEST_CASE(DataTransformTest, TestDtypes); TYPED_TEST(DataTransformTest, TestEmptyTransform) { TransformationParameter transform_param; const bool unique_pixels = false; // all pixels the same equal to label - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); @@ -102,7 +102,7 @@ TYPED_TEST(DataTransformTest, TestEmptyTransform) { EXPECT_EQ(blob->channels(), datum.channels()); EXPECT_EQ(blob->height(), datum.height()); EXPECT_EQ(blob->width(), datum.width()); - for (int j = 0; j < blob->count(); ++j) { + for (int_tp j = 0; j < blob->count(); ++j) { EXPECT_EQ(blob->cpu_data()[j], label); } } @@ -110,10 +110,10 @@ TYPED_TEST(DataTransformTest, TestEmptyTransform) { TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); @@ -127,7 +127,7 @@ TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { EXPECT_EQ(blob->channels(), datum.channels()); EXPECT_EQ(blob->height(), datum.height()); EXPECT_EQ(blob->width(), datum.width()); - for (int j = 0; j < blob->count(); ++j) { + for (int_tp j = 0; j < blob->count(); ++j) { EXPECT_EQ(blob->cpu_data()[j], j); } } @@ -135,11 +135,11 @@ TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) { TYPED_TEST(DataTransformTest, TestCropSize) { TransformationParameter transform_param; const bool unique_pixels = false; // all pixels the same equal to label - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int crop_size = 2; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp crop_size = 2; transform_param.set_crop_size(crop_size); Datum datum; @@ -150,13 +150,13 @@ TYPED_TEST(DataTransformTest, TestCropSize) { transformer->InitRand(); Blob* blob = new Blob(1, channels, crop_size, crop_size); - for (int iter = 0; iter < this->num_iter_; ++iter) { + for (int_tp iter = 0; iter < this->num_iter_; ++iter) { transformer->Transform(datum, blob); EXPECT_EQ(blob->num(), 1); EXPECT_EQ(blob->channels(), datum.channels()); EXPECT_EQ(blob->height(), crop_size); EXPECT_EQ(blob->width(), crop_size); - for (int j = 0; j < blob->count(); ++j) { + for (int_tp j = 0; j < blob->count(); ++j) { EXPECT_EQ(blob->cpu_data()[j], label); } } @@ -165,86 +165,86 @@ TYPED_TEST(DataTransformTest, TestCropSize) { TYPED_TEST(DataTransformTest, TestCropTrain) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int crop_size = 2; - const int size = channels * crop_size * crop_size; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp crop_size = 2; + const int_tp size = channels * crop_size * crop_size; transform_param.set_crop_size(crop_size); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - int num_matches = this->NumSequenceMatches(transform_param, datum, TRAIN); + int_tp num_matches = this->NumSequenceMatches(transform_param, datum, TRAIN); EXPECT_LT(num_matches, size * this->num_iter_); } TYPED_TEST(DataTransformTest, TestCropTest) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int crop_size = 2; - const int size = channels * crop_size * crop_size; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp crop_size = 2; + const int_tp size = channels * crop_size * crop_size; transform_param.set_crop_size(crop_size); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - int num_matches = this->NumSequenceMatches(transform_param, datum, TEST); + int_tp num_matches = this->NumSequenceMatches(transform_param, datum, TEST); EXPECT_EQ(num_matches, size * this->num_iter_); } TYPED_TEST(DataTransformTest, TestMirrorTrain) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int size = channels * height * width; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp size = channels * height * width; transform_param.set_mirror(true); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - int num_matches = this->NumSequenceMatches(transform_param, datum, TRAIN); + int_tp num_matches = this->NumSequenceMatches(transform_param, datum, TRAIN); EXPECT_LT(num_matches, size * this->num_iter_); } TYPED_TEST(DataTransformTest, TestMirrorTest) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int size = channels * height * width; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp size = channels * height * width; transform_param.set_mirror(true); Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); - int num_matches = this->NumSequenceMatches(transform_param, datum, TEST); + int_tp num_matches = this->NumSequenceMatches(transform_param, datum, TEST); EXPECT_LT(num_matches, size * this->num_iter_); } TYPED_TEST(DataTransformTest, TestCropMirrorTrain) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int crop_size = 2; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp crop_size = 2; Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); transform_param.set_crop_size(crop_size); - int num_matches_crop = this->NumSequenceMatches( + int_tp num_matches_crop = this->NumSequenceMatches( transform_param, datum, TRAIN); transform_param.set_mirror(true); - int num_matches_crop_mirror = + int_tp num_matches_crop_mirror = this->NumSequenceMatches(transform_param, datum, TRAIN); // When doing crop and mirror we expect less num_matches than just crop EXPECT_LE(num_matches_crop_mirror, num_matches_crop); @@ -253,20 +253,20 @@ TYPED_TEST(DataTransformTest, TestCropMirrorTrain) { TYPED_TEST(DataTransformTest, TestCropMirrorTest) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int crop_size = 2; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp crop_size = 2; Datum datum; FillDatum(label, channels, height, width, unique_pixels, &datum); transform_param.set_crop_size(crop_size); - int num_matches_crop = this->NumSequenceMatches(transform_param, + int_tp num_matches_crop = this->NumSequenceMatches(transform_param, datum, TEST); transform_param.set_mirror(true); - int num_matches_crop_mirror = + int_tp num_matches_crop_mirror = this->NumSequenceMatches(transform_param, datum, TEST); // When doing crop and mirror we expect less num_matches than just crop EXPECT_LT(num_matches_crop_mirror, num_matches_crop); @@ -276,11 +276,11 @@ TYPED_TEST(DataTransformTest, TestCropMirrorTest) { TYPED_TEST(DataTransformTest, TestMeanValue) { TransformationParameter transform_param; const bool unique_pixels = false; // pixels are equal to label - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int mean_value = 2; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp mean_value = 2; transform_param.add_mean_value(mean_value); Datum datum; @@ -291,7 +291,7 @@ TYPED_TEST(DataTransformTest, TestMeanValue) { Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); - for (int j = 0; j < blob->count(); ++j) { + for (int_tp j = 0; j < blob->count(); ++j) { EXPECT_EQ(blob->cpu_data()[j], label - mean_value); } } @@ -299,10 +299,10 @@ TYPED_TEST(DataTransformTest, TestMeanValue) { TYPED_TEST(DataTransformTest, TestMeanValues) { TransformationParameter transform_param; const bool unique_pixels = false; // pixels are equal to label - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; transform_param.add_mean_value(0); transform_param.add_mean_value(1); @@ -315,8 +315,8 @@ TYPED_TEST(DataTransformTest, TestMeanValues) { Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); - for (int c = 0; c < channels; ++c) { - for (int j = 0; j < height * width; ++j) { + for (int_tp c = 0; c < channels; ++c) { + for (int_tp j = 0; j < height * width; ++j) { EXPECT_EQ(blob->cpu_data()[blob->offset(0, c) + j], label - c); } } @@ -325,11 +325,11 @@ TYPED_TEST(DataTransformTest, TestMeanValues) { TYPED_TEST(DataTransformTest, TestMeanFile) { TransformationParameter transform_param; const bool unique_pixels = true; // pixels are consecutive ints [0,size] - const int label = 0; - const int channels = 3; - const int height = 4; - const int width = 5; - const int size = channels * height * width; + const int_tp label = 0; + const int_tp channels = 3; + const int_tp height = 4; + const int_tp width = 5; + const int_tp size = channels * height * width; // Create a mean file string* mean_file = new string(); @@ -340,7 +340,7 @@ TYPED_TEST(DataTransformTest, TestMeanFile) { blob_mean.set_height(height); blob_mean.set_width(width); - for (int j = 0; j < size; ++j) { + for (int_tp j = 0; j < size; ++j) { blob_mean.add_data(j); } @@ -356,7 +356,7 @@ TYPED_TEST(DataTransformTest, TestMeanFile) { Caffe::GetDefaultDevice()); transformer->InitRand(); transformer->Transform(datum, blob); - for (int j = 0; j < blob->count(); ++j) { + for (int_tp j = 0; j < blob->count(); ++j) { EXPECT_EQ(blob->cpu_data()[j], 0); } } diff --git a/src/caffe/test/test_db.cpp b/src/caffe/test/test_db.cpp index 1b487b14c58..a578cdbd711 100644 --- a/src/caffe/test/test_db.cpp +++ b/src/caffe/test/test_db.cpp @@ -30,7 +30,7 @@ class DBTest : public ::testing::Test { scoped_ptr db(db::GetDB(TypeParam::backend)); db->Open(this->source_, db::NEW); scoped_ptr txn(db->NewTransaction()); - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { Datum datum; ReadImageToDatum(root_images_ + keys[i], i, &datum); string out; diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index 770e7b277ee..14b139685d7 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -115,10 +115,10 @@ TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) { layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // simply check that accumulation works with overlapping filters const Dtype* top_data = this->blob_top_->cpu_data(); - for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { + for (int_tp n = 0; n < this->blob_top_->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_top_->height(); ++h) { + for (int_tp w = 0; w < this->blob_top_->width(); ++w) { Dtype expected = 3.1; bool h_overlap = h % 2 == 0 && h > 0 && h < this->blob_top_->height() - 1; @@ -157,16 +157,16 @@ TYPED_TEST(DeconvolutionLayerTest, TestGradient) { TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) { typedef typename TypeParam::Dtype Dtype; - const int kernel_h = 11; - const int kernel_w = 13; - vector bottom_shape(4); + const int_tp kernel_h = 11; + const int_tp kernel_w = 13; + vector bottom_shape(4); bottom_shape[0] = 15; bottom_shape[1] = 12; bottom_shape[2] = kernel_h * 2; bottom_shape[3] = kernel_w * 2; FillerParameter filler_param; GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_vec_.size(); ++i) { this->blob_bottom_vec_[i]->Reshape(bottom_shape); filler.Fill(this->blob_bottom_vec_[i]); } @@ -257,17 +257,17 @@ TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) { backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); } ASSERT_EQ(result_nd.count(), result_2d.count()); - for (int i = 0; i < result_2d.count(); ++i) { + for (int_tp i = 0; i < result_2d.count(); ++i) { EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]); } ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); - for (int i = 0; i < backward_result_2d.count(); ++i) { + for (int_tp i = 0; i < backward_result_2d.count(); ++i) { EXPECT_EQ(backward_result_2d.cpu_diff()[i], backward_result_nd.cpu_diff()[i]); } ASSERT_EQ(backward_weight_result_nd.count(), backward_weight_result_2d.count()); - for (int i = 0; i < backward_weight_result_2d.count(); ++i) { + for (int_tp i = 0; i < backward_weight_result_2d.count(); ++i) { EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], backward_weight_result_nd.cpu_diff()[i]); } @@ -275,7 +275,7 @@ TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) { TYPED_TEST(DeconvolutionLayerTest, TestGradient3D) { typedef typename TypeParam::Dtype Dtype; - vector bottom_shape(5); + vector bottom_shape(5); bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); bottom_shape[2] = 2; @@ -283,7 +283,7 @@ TYPED_TEST(DeconvolutionLayerTest, TestGradient3D) { bottom_shape[4] = 2; FillerParameter filler_param; GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_vec_.size(); ++i) { this->blob_bottom_vec_[i]->Reshape(bottom_shape); filler.Fill(this->blob_bottom_vec_[i]); } diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index c9ed38db3a5..1ee6e3ec38e 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -59,14 +59,14 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { EXPECT_EQ(this->blob_top_a_->width(), 4); EXPECT_EQ(this->blob_top_b_->count(), 0); EXPECT_EQ(this->blob_top_c_->count(), 0); - for (int i = 0; i < this->blob_top_vec_.size(); ++i) { - for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + for (int_tp i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int_tp j = 0; j < this->blob_top_vec_[i]->count(); ++j) { EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); } } layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_vec_.size(); ++i) { - for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + for (int_tp i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int_tp j = 0; j < this->blob_top_vec_[i]->count(); ++j) { EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); } } @@ -97,14 +97,14 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { EXPECT_EQ(this->blob_top_b_->height(), 1); EXPECT_EQ(this->blob_top_b_->width(), 4); EXPECT_EQ(this->blob_top_c_->count(), 0); - for (int i = 0; i < this->blob_top_vec_.size(); ++i) { - for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + for (int_tp i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int_tp j = 0; j < this->blob_top_vec_[i]->count(); ++j) { EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); } } layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_vec_.size(); ++i) { - for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + for (int_tp i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int_tp j = 0; j < this->blob_top_vec_[i]->count(); ++j) { EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); } } @@ -141,51 +141,51 @@ TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { EXPECT_EQ(this->blob_top_c_->channels(), 3); EXPECT_EQ(this->blob_top_c_->height(), 2); EXPECT_EQ(this->blob_top_c_->width(), 4); - for (int i = 0; i < this->blob_top_a_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_a_->count(); ++i) { EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); } // Blob b uses a Gaussian filler, so SetUp should not have initialized it. // Blob b's data should therefore be the default Blob data value: 0. - for (int i = 0; i < this->blob_top_b_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_b_->count(); ++i) { EXPECT_EQ(0, this->blob_top_b_->cpu_data()[i]); } - for (int i = 0; i < this->blob_top_c_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_c_->count(); ++i) { EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); } // Do a Forward pass to fill in Blob b with Gaussian data. layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_a_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_a_->count(); ++i) { EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); } // Check that the Gaussian's data has been filled in with values within // 10 standard deviations of the mean. Record the first and last sample. // to check that they're different after the next Forward pass. - for (int i = 0; i < this->blob_top_b_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_b_->count(); ++i) { EXPECT_NEAR(gaussian_mean, this->blob_top_b_->cpu_data()[i], gaussian_std * 10); } const TypeParam first_gaussian_sample = this->blob_top_b_->cpu_data()[0]; const TypeParam last_gaussian_sample = this->blob_top_b_->cpu_data()[this->blob_top_b_->count() - 1]; - for (int i = 0; i < this->blob_top_c_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_c_->count(); ++i) { EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); } // Do another Forward pass to fill in Blob b with Gaussian data again, // checking that we get different values. layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_a_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_a_->count(); ++i) { EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); } - for (int i = 0; i < this->blob_top_b_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_b_->count(); ++i) { EXPECT_NEAR(gaussian_mean, this->blob_top_b_->cpu_data()[i], gaussian_std * 10); } EXPECT_NE(first_gaussian_sample, this->blob_top_b_->cpu_data()[0]); EXPECT_NE(last_gaussian_sample, this->blob_top_b_->cpu_data()[this->blob_top_b_->count() - 1]); - for (int i = 0; i < this->blob_top_c_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_c_->count(); ++i) { EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); } } diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index 8031f6e9022..6d1b4ab94dc 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -75,11 +75,11 @@ TYPED_TEST(EltwiseLayerTest, TestProd) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i], 1e-4); } } @@ -94,11 +94,11 @@ TYPED_TEST(EltwiseLayerTest, TestSum) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i], 1e-4); } } @@ -116,11 +116,11 @@ TYPED_TEST(EltwiseLayerTest, TestSumCoeff) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i], 1e-4); } @@ -185,11 +185,11 @@ TYPED_TEST(EltwiseLayerTest, TestMax) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_EQ(data[i], std::max(in_data_a[i], std::max(in_data_b[i], in_data_c[i]))); } diff --git a/src/caffe/test/test_embed_layer.cpp b/src/caffe/test/test_embed_layer.cpp index 5edc558e838..07bd3ede39f 100644 --- a/src/caffe/test/test_embed_layer.cpp +++ b/src/caffe/test/test_embed_layer.cpp @@ -56,8 +56,8 @@ TYPED_TEST(EmbedLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; EmbedParameter* embed_param = layer_param.mutable_embed_param(); - const int kNumOutput = 10; - const int kInputDim = 5; + const int_tp kNumOutput = 10; + const int_tp kInputDim = 5; embed_param->set_num_output(kNumOutput); embed_param->set_input_dim(kInputDim); embed_param->mutable_weight_filler()->set_type("uniform"); @@ -67,22 +67,22 @@ TYPED_TEST(EmbedLayerTest, TestForward) { shared_ptr > layer(new EmbedLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(1, layer->blobs().size()); - vector weight_shape(2); + vector weight_shape(2); weight_shape[0] = kInputDim; weight_shape[1] = kNumOutput; ASSERT_TRUE(weight_shape == layer->blobs()[0]->shape()); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { this->blob_bottom_->mutable_cpu_data()[i] = caffe_rng_rand() % kInputDim; } layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - vector weight_offset(2, 0); - vector top_offset(5, 0); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - weight_offset[0] = static_cast(this->blob_bottom_->cpu_data()[i]); + vector weight_offset(2, 0); + vector top_offset(5, 0); + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { + weight_offset[0] = static_cast(this->blob_bottom_->cpu_data()[i]); weight_offset[1] = 0; top_offset[0] = i; top_offset[4] = 0; - for (int j = 0; j < kNumOutput; ++j) { + for (int_tp j = 0; j < kNumOutput; ++j) { EXPECT_EQ(layer->blobs()[0]->data_at(weight_offset), this->blob_top_->data_at(top_offset)); ++top_offset[4]; @@ -95,8 +95,8 @@ TYPED_TEST(EmbedLayerTest, TestForwardWithBias) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; EmbedParameter* embed_param = layer_param.mutable_embed_param(); - const int kNumOutput = 10; - const int kInputDim = 5; + const int_tp kNumOutput = 10; + const int_tp kInputDim = 5; embed_param->set_num_output(kNumOutput); embed_param->set_input_dim(kInputDim); embed_param->mutable_weight_filler()->set_type("uniform"); @@ -107,24 +107,24 @@ TYPED_TEST(EmbedLayerTest, TestForwardWithBias) { shared_ptr > layer(new EmbedLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(2, layer->blobs().size()); - vector weight_shape(2); + vector weight_shape(2); weight_shape[0] = kInputDim; weight_shape[1] = kNumOutput; ASSERT_TRUE(weight_shape == layer->blobs()[0]->shape()); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { this->blob_bottom_->mutable_cpu_data()[i] = caffe_rng_rand() % kInputDim; } layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - vector bias_offset(1, 0); - vector weight_offset(2, 0); - vector top_offset(5, 0); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - weight_offset[0] = static_cast(this->blob_bottom_->cpu_data()[i]); + vector bias_offset(1, 0); + vector weight_offset(2, 0); + vector top_offset(5, 0); + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { + weight_offset[0] = static_cast(this->blob_bottom_->cpu_data()[i]); weight_offset[1] = 0; top_offset[0] = i; top_offset[4] = 0; bias_offset[0] = 0; - for (int j = 0; j < kNumOutput; ++j) { + for (int_tp j = 0; j < kNumOutput; ++j) { EXPECT_EQ(layer->blobs()[0]->data_at(weight_offset) + layer->blobs()[1]->data_at(bias_offset), this->blob_top_->data_at(top_offset)); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 728b8dc5f0d..7f07f352073 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -28,9 +28,9 @@ TYPED_TEST_CASE(ConstantFillerTest, TestDtypes); TYPED_TEST(ConstantFillerTest, TestFill) { EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); + const int_tp count = this->blob_->count(); const TypeParam* data = this->blob_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_GE(data[i], this->filler_param_.value()); } } @@ -57,9 +57,9 @@ TYPED_TEST_CASE(UniformFillerTest, TestDtypes); TYPED_TEST(UniformFillerTest, TestFill) { EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); + const int_tp count = this->blob_->count(); const TypeParam* data = this->blob_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_GE(data[i], this->filler_param_.min()); EXPECT_LE(data[i], this->filler_param_.max()); } @@ -84,17 +84,17 @@ TYPED_TEST_CASE(PositiveUnitballFillerTest, TestDtypes); TYPED_TEST(PositiveUnitballFillerTest, TestFill) { EXPECT_TRUE(this->blob_); - const int num = this->blob_->num(); - const int count = this->blob_->count(); - const int dim = count / num; + const int_tp num = this->blob_->num(); + const int_tp count = this->blob_->count(); + const int_tp dim = count / num; const TypeParam* data = this->blob_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_GE(data[i], 0); EXPECT_LE(data[i], 1); } - for (int i = 0; i < num; ++i) { + for (int_tp i = 0; i < num; ++i) { TypeParam sum = 0; - for (int j = 0; j < dim; ++j) { + for (int_tp j = 0; j < dim; ++j) { sum += data[i * dim + j]; } EXPECT_GE(sum, 0.999); @@ -123,11 +123,11 @@ TYPED_TEST_CASE(GaussianFillerTest, TestDtypes); TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); + const int_tp count = this->blob_->count(); const TypeParam* data = this->blob_->cpu_data(); TypeParam mean = 0.; TypeParam var = 0.; - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { mean += data[i]; var += (data[i] - this->filler_param_.mean()) * (data[i] - this->filler_param_.mean()); @@ -155,11 +155,11 @@ class XavierFillerTest : public ::testing::Test { this->filler_.reset(new XavierFiller(this->filler_param_)); this->filler_->Fill(blob_); EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); + const int_tp count = this->blob_->count(); const Dtype* data = this->blob_->cpu_data(); Dtype mean = 0.; Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { mean += data[i]; ex2 += data[i] * data[i]; } @@ -204,11 +204,11 @@ class MSRAFillerTest : public ::testing::Test { this->filler_.reset(new MSRAFiller(this->filler_param_)); this->filler_->Fill(blob_); EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); + const int_tp count = this->blob_->count(); const Dtype* data = this->blob_->cpu_data(); Dtype mean = 0.; Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { mean += data[i]; ex2 += data[i] * data[i]; } diff --git a/src/caffe/test/test_filter_layer.cpp b/src/caffe/test/test_filter_layer.cpp index c641b6ef6e8..f2ef0756cfe 100644 --- a/src/caffe/test/test_filter_layer.cpp +++ b/src/caffe/test/test_filter_layer.cpp @@ -38,7 +38,7 @@ class FilterLayerTest : public MultiDeviceTest { bottom_data_selector_[3] = 0; // fill the other bottom blobs filler.Fill(blob_bottom_data_); - for (int i = 0; i < blob_bottom_labels_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_labels_->count(); ++i) { blob_bottom_labels_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_data_); @@ -80,7 +80,7 @@ TYPED_TEST(FilterLayerTest, TestReshape) { this->blob_top_data_->shape(0)); EXPECT_GT(this->blob_bottom_labels_->shape(0), this->blob_top_labels_->shape(0)); - for (int i = 1; i < this->blob_bottom_labels_->num_axes(); i++) { + for (int_tp i = 1; i < this->blob_bottom_labels_->num_axes(); i++) { EXPECT_EQ(this->blob_bottom_labels_->shape(i), this->blob_top_labels_->shape(i)); } @@ -98,19 +98,19 @@ TYPED_TEST(FilterLayerTest, TestForward) { EXPECT_EQ(this->blob_top_labels_->data_at(1, 0, 0, 0), this->blob_bottom_labels_->data_at(2, 0, 0, 0)); - int dim = this->blob_top_data_->count() / + int_tp dim = this->blob_top_data_->count() / this->blob_top_data_->shape(0); const Dtype* top_data = this->blob_top_data_->cpu_data(); const Dtype* bottom_data = this->blob_bottom_data_->cpu_data(); // selector is 0 1 1 0, so we need to compare bottom(1,c,h,w) // with top(0,c,h,w) and bottom(2,c,h,w) with top(1,c,h,w) bottom_data += dim; // bottom(1,c,h,w) - for (size_t n = 0; n < dim; n++) + for (uint_tp n = 0; n < dim; n++) EXPECT_EQ(top_data[n], bottom_data[n]); bottom_data += dim; // bottom(2,c,h,w) top_data += dim; // top(1,c,h,w) - for (size_t n = 0; n < dim; n++) + for (uint_tp n = 0; n < dim; n++) EXPECT_EQ(top_data[n], bottom_data[n]); } diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 7b6757cba32..c872a920bfd 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -89,7 +89,7 @@ TYPED_TEST(FlattenLayerTest, TestForward) { FlattenLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int c = 0; c < 3 * 6 * 5; ++c) { + for (int_tp c = 0; c < 3 * 6 * 5; ++c) { EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0), diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index af75cda2123..d75136c725a 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -10,7 +10,7 @@ #include "caffe/common.hpp" #include "caffe/parallel.hpp" #include "caffe/proto/caffe.pb.h" -#include "caffe/solver.hpp" +#include "caffe/sgd_solvers.hpp" #include "caffe/util/io.hpp" #include "caffe/test/test_caffe_main.hpp" @@ -47,7 +47,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { // Test data: check out generate_sample_data.py in the same directory. string* input_file_; - virtual SolverParameter_SolverType solver_type() = 0; virtual void InitSolver(const SolverParameter& param) = 0; virtual void InitSolverFromProtoString(const string& proto) { @@ -200,12 +199,11 @@ class GradientBasedSolverTest : public MultiDeviceTest { LOG(INFO) << "Multi-GPU test on " << devices << " devices"; vector gpus; // put current device at the beginning - int device_id = solver_->param().device_id(); - gpus.push_back(Caffe::Get().GetDevice(device_id)); + device* dc = Caffe::GetDevice(solver_->param().device_id()); + gpus.push_back(dc); for (int i = 0; gpus.size() < devices; ++i) { - if (i != device_id) { - gpus.push_back(Caffe::Get().GetDevice(i)); - } + if (i != device_id) + gpus.push_back(Caffe::GetDevice(i)); } Caffe::set_solver_count(gpus.size()); this->sync_.reset(new P2PSync( @@ -294,8 +292,8 @@ class GradientBasedSolverTest : public MultiDeviceTest { ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); // Finally, compute update. const vector > >& history = solver_->history(); - if (solver_type() != SolverParameter_SolverType_ADADELTA - && solver_type() != SolverParameter_SolverType_ADAM) { + if (solver_->type() != string("AdaDelta") + && solver_->type() != string("Adam")) { ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias } else { ASSERT_EQ(4, history.size()); // additional blobs for update history @@ -304,26 +302,19 @@ class GradientBasedSolverTest : public MultiDeviceTest { const Dtype history_value = (i == D) ? history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; const Dtype temp = momentum * history_value; - switch (solver_type()) { - case SolverParameter_SolverType_SGD: + if (solver_->type() == string("SGD")) { update_value += temp; - break; - case SolverParameter_SolverType_NESTEROV: + } else if (solver_->type() == string("Nesterov")) { update_value += temp; // step back then over-step update_value = (1 + momentum) * update_value - temp; - break; - case SolverParameter_SolverType_ADAGRAD: + } else if (solver_->type() == string("AdaGrad")) { update_value /= std::sqrt(history_value + grad * grad) + delta_; - break; - case SolverParameter_SolverType_RMSPROP: { + } else if (solver_->type() == string("RMSProp")) { const Dtype rms_decay = 0.95; update_value /= std::sqrt(rms_decay*history_value + grad * grad * (1 - rms_decay)) + delta_; - } - break; - case SolverParameter_SolverType_ADADELTA: - { + } else if (solver_->type() == string("AdaDelta")) { const Dtype update_history_value = (i == D) ? history[1 + num_param_blobs]->cpu_data()[0] : history[0 + num_param_blobs]->cpu_data()[i]; @@ -334,9 +325,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { // not actually needed, just here for illustrative purposes // const Dtype weighted_update_average = // momentum * update_history_value + (1 - momentum) * (update_value); - break; - } - case SolverParameter_SolverType_ADAM: { + } else if (solver_->type() == string("Adam")) { const Dtype momentum2 = 0.999; const Dtype m = history_value; const Dtype v = (i == D) ? @@ -348,10 +337,8 @@ class GradientBasedSolverTest : public MultiDeviceTest { std::sqrt(Dtype(1) - pow(momentum2, num_iters)) / (Dtype(1.) - pow(momentum, num_iters)); update_value = alpha_t * val_m / (std::sqrt(val_v) + delta_); - break; - } - default: - LOG(FATAL) << "Unknown solver type: " << solver_type(); + } else { + LOG(FATAL) << "Unknown solver type: " << solver_->type(); } if (i == D) { updated_bias.mutable_cpu_diff()[0] = update_value; @@ -379,7 +366,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { const Blob& solver_updated_weights = *param_blobs[0]; ASSERT_EQ(D, solver_updated_weights.count()); const double kPrecision = 1e-2; - const double kMinPrecision = 1e-5; + const double kMinPrecision = 1e-7; for (int i = 0; i < D; ++i) { const Dtype expected_updated_weight = updated_weights.cpu_data()[i]; const Dtype solver_updated_weight = solver_updated_weights.cpu_data()[i]; @@ -396,7 +383,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { EXPECT_NEAR(expected_updated_bias, solver_updated_bias, error_margin); // Check the solver's history -- should contain the previous update value. - if (solver_type() == SolverParameter_SolverType_SGD) { + if (solver_->type() == string("SGD")) { const vector > >& history = solver_->history(); ASSERT_EQ(2, history.size()); for (int i = 0; i < D; ++i) { @@ -484,823 +471,803 @@ class GradientBasedSolverTest : public MultiDeviceTest { #endif // USE_CUDA #endif // !CPU_ONLY for (int devices = 1; devices <= available_devices; ++devices) { - // Configure batch size for single / multi device equivalence. - // Constant data is needed for multi device as for accumulation. - num_ = kNum * devices; - - // Initialize the solver and run K (= iter_to_check) solver iterations - // (on single device). - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check, kIterSize, 1); - - // Compute the (K+1)th update using the analytic least squares gradient. - vector > > updated_params; - ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, - iter_to_check + 1, &updated_params); - - // Reinitialize the solver and run K+1 solver iterations. - num_ = kNum; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check + 1, kIterSize, devices); - - // Check that the solver's solution matches ours. - CheckLeastSquaresUpdate(updated_params); - } - } - - void TestSnapshot(const Dtype learning_rate = 1.0, - const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, - const int num_iters = 1) { - // Run the solver for num_iters * 2 iterations. - const int total_num_iters = num_iters * 2; - bool snapshot = false; - const int kIterSize = 1; - const int kDevices = 1; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - total_num_iters, kIterSize, kDevices, snapshot); - - // Save the resulting param values. - vector > > param_copies; - const vector*>& orig_params = - solver_->net()->learnable_params(); - param_copies.resize(orig_params.size()); - for (int i = 0; i < orig_params.size(); ++i) { - param_copies[i].reset(new Blob()); - const bool kReshape = true; - for (int copy_diff = false; copy_diff <= true; ++copy_diff) { - param_copies[i]->CopyFrom(*orig_params[i], copy_diff, kReshape); - } - } - - // Save the solver history - vector > > history_copies; - const vector > >& orig_history = solver_->history(); - history_copies.resize(orig_history.size()); - for (int i = 0; i < orig_history.size(); ++i) { - history_copies[i].reset(new Blob()); - const bool kReshape = true; - for (int copy_diff = false; copy_diff <= true; ++copy_diff) { - history_copies[i]->CopyFrom(*orig_history[i], copy_diff, kReshape); - } - } - - // Run the solver for num_iters iterations and snapshot. - snapshot = true; - string snapshot_name = RunLeastSquaresSolver(learning_rate, weight_decay, - momentum, num_iters, kIterSize, kDevices, snapshot); - - // Reinitialize the solver and run for num_iters more iterations. - snapshot = false; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - total_num_iters, kIterSize, kDevices, - snapshot, snapshot_name.c_str()); - - // Check that params now match. - const vector*>& params = solver_->net()->learnable_params(); - for (int i = 0; i < params.size(); ++i) { - for (int j = 0; j < params[i]->count(); ++j) { - EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) - << "param " << i << " data differed at dim " << j; - EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) - << "param " << i << " diff differed at dim " << j; - } - } - - // Check that history now matches. - const vector > >& history = solver_->history(); - for (int i = 0; i < history.size(); ++i) { - for (int j = 0; j < history[i]->count(); ++j) { - EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) - << "history blob " << i << " data differed at dim " << j; - EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) - << "history blob " << i << " diff differed at dim " << j; - } - } - } -}; - - -template -class SGDSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new SGDSolver(param)); - } - - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_SGD; - } -}; - -TYPED_TEST_CASE(SGDSolverTest, TestDtypesAndDevices); - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(SGDSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - - -template -class AdaGradSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new AdaGradSolver(param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_ADAGRAD; - } -}; - -TYPED_TEST_CASE(AdaGradSolverTest, TestDtypesAndDevices); - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); -} - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundredth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaGradSolverTest, - TestAdaGradLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdaGradSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - - -template -class NesterovSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new NesterovSolver(param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_NESTEROV; - } -}; - -TYPED_TEST_CASE(NesterovSolverTest, TestDtypesAndDevices); - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHundredth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(NesterovSolverTest, - TestNesterovLeastSquaresUpdateWithWeightDecayMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, - TestNesterovLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(NesterovSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -template -class AdaDeltaSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new AdaDeltaSolver(param)); - } - - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_ADADELTA; - } -}; - -TYPED_TEST_CASE(AdaDeltaSolverTest, TestDtypesAndDevices); - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.95; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.95; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, - TestAdaDeltaLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -template -class AdamSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - SolverParameter new_param = param; - const Dtype momentum = 0.9; - new_param.set_momentum(momentum); - const Dtype momentum2 = 0.999; - new_param.set_momentum2(momentum2); - this->solver_.reset(new AdamSolver(new_param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_ADAM; - } -}; - -TYPED_TEST_CASE(AdamSolverTest, TestDtypesAndDevices); - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.9; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); -} - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); -} - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdamSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -template -class RMSPropSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - const Dtype rms_decay = 0.95; - SolverParameter new_param = param; - new_param.set_rms_decay(rms_decay); - this->solver_.reset(new RMSPropSolver(new_param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_RMSPROP; - } -}; - -TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, - TestRMSPropLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(RMSPropSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} + // Configure batch size for single / multi device equivalence. + // Constant data is needed for multi device as for accumulation. + num_ = kNum * devices; + + // Initialize the solver and run K (= iter_to_check) solver iterations + // (on single device). + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check, kIterSize, 1); + + // Compute the (K+1)th update using the analytic least squares gradient. + vector > > updated_params; + ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, + iter_to_check + 1, &updated_params); + + // Reinitialize the solver and run K+1 solver iterations. + num_ = kNum; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check + 1, kIterSize, devices); + + // Check that the solver's solution matches ours. + CheckLeastSquaresUpdate(updated_params); + } + } + + void TestSnapshot(const Dtype learning_rate = 1.0, + const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, + const int num_iters = 1) { + // Run the solver for num_iters * 2 iterations. + const int total_num_iters = num_iters * 2; + bool snapshot = false; + const int kIterSize = 1; + const int kDevices = 1; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + total_num_iters, kIterSize, kDevices, snapshot); + + // Save the resulting param values. + vector > > param_copies; + const vector*>& orig_params = + solver_->net()->learnable_params(); + param_copies.resize(orig_params.size()); + for (int i = 0; i < orig_params.size(); ++i) { + param_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + param_copies[i]->CopyFrom(*orig_params[i], copy_diff, kReshape); + } + } + + // Save the solver history + vector > > history_copies; + const vector > >& orig_history = solver_->history(); + history_copies.resize(orig_history.size()); + for (int i = 0; i < orig_history.size(); ++i) { + history_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + history_copies[i]->CopyFrom(*orig_history[i], copy_diff, kReshape); + } + } + + // Run the solver for num_iters iterations and snapshot. + snapshot = true; + string snapshot_name = RunLeastSquaresSolver(learning_rate, weight_decay, + momentum, num_iters, kIterSize, kDevices, snapshot); + + // Reinitialize the solver and run for num_iters more iterations. + snapshot = false; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + total_num_iters, kIterSize, kDevices, + snapshot, snapshot_name.c_str()); + + // Check that params now match. + const vector*>& params = solver_->net()->learnable_params(); + for (int i = 0; i < params.size(); ++i) { + for (int j = 0; j < params[i]->count(); ++j) { + EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) + << "param " << i << " data differed at dim " << j; + EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) + << "param " << i << " diff differed at dim " << j; + } + } + + // Check that history now matches. + const vector > >& history = solver_->history(); + for (int i = 0; i < history.size(); ++i) { + for (int j = 0; j < history[i]->count(); ++j) { + EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) + << "history blob " << i << " data differed at dim " << j; + EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) + << "history blob " << i << " diff differed at dim " << j; + } + } + } + }; + + + template + class SGDSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new SGDSolver(param)); + } + }; + + TYPED_TEST_CASE(SGDSolverTest, TestDtypesAndDevices); + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(SGDSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(SGDSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + + template + class AdaGradSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaGradSolver(param)); + } + }; + + TYPED_TEST_CASE(AdaGradSolverTest, TestDtypesAndDevices); + + TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); + } + + TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); + } + + TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); + } + + TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdaGradSolverTest, + TestAdaGradLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(AdaGradSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + + template + class NesterovSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new NesterovSolver(param)); + } + }; + + TYPED_TEST_CASE(NesterovSolverTest, TestDtypesAndDevices); + + TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); + } + + TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); + } + + TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); + } + + TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(NesterovSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(NesterovSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + template + class AdaDeltaSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaDeltaSolver(param)); + } + }; + + TYPED_TEST_CASE(AdaDeltaSolverTest, TestDtypesAndDevices); + + TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); + } + + TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.95; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } + + TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } + } + + TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } + } + + TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdaDeltaSolverTest, + TestAdaDeltaLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + template + class AdamSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + SolverParameter new_param = param; + const Dtype momentum = 0.9; + new_param.set_momentum(momentum); + const Dtype momentum2 = 0.999; + new_param.set_momentum2(momentum2); + this->solver_.reset(new AdamSolver(new_param)); + } + }; + + TYPED_TEST_CASE(AdamSolverTest, TestDtypesAndDevices); + + TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } + + TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } + + TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(AdamSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(AdamSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + template + class RMSPropSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + const Dtype rms_decay = 0.95; + SolverParameter new_param = param; + new_param.set_rms_decay(rms_decay); + this->solver_.reset(new RMSPropSolver(new_param)); + } + }; + + TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); + + TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); + } + + TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(RMSPropSolverTest, + TestRMSPropLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); + } + + TYPED_TEST(RMSPropSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } + + TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } + } } // namespace caffe diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp index b56277b53ae..e6d5b6f95d3 100644 --- a/src/caffe/test/test_hdf5_output_layer.cpp +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -44,10 +44,10 @@ class HDF5OutputLayerTest : public MultiDeviceTest { Blob* const blob_label_; vector*> blob_bottom_vec_; vector*> blob_top_vec_; - int num_; - int channels_; - int height_; - int width_; + int_tp num_; + int_tp channels_; + int_tp height_; + int_tp width_; }; template @@ -57,10 +57,10 @@ void HDF5OutputLayerTest::CheckBlobEqual(const Blob& b1, EXPECT_EQ(b1.channels(), b2.channels()); EXPECT_EQ(b1.height(), b2.height()); EXPECT_EQ(b1.width(), b2.width()); - for (int n = 0; n < b1.num(); ++n) { - for (int c = 0; c < b1.channels(); ++c) { - for (int h = 0; h < b1.height(); ++h) { - for (int w = 0; w < b1.width(); ++w) { + for (int_tp n = 0; n < b1.num(); ++n) { + for (int_tp c = 0; c < b1.channels(); ++c) { + for (int_tp h = 0; h < b1.height(); ++h) { + for (int_tp w = 0; w < b1.width(); ++w) { EXPECT_EQ(b1.data_at(n, c, h, w), b2.data_at(n, c, h, w)); } } diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index c9b027f88cf..9161e7fe5aa 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -62,12 +62,12 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { param.add_top("label2"); HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param(); - int batch_size = 5; + int_tp batch_size = 5; hdf5_data_param->set_batch_size(batch_size); hdf5_data_param->set_source(*(this->filename)); - int num_cols = 8; - int height = 6; - int width = 5; + int_tp num_cols = 8; + int_tp height = 6; + int_tp width = 5; // Test that the layer setup got the correct parameters. HDF5DataLayer layer(param); @@ -88,23 +88,23 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); // Go through the data 10 times (5 batches). - const int data_size = num_cols * height * width; - for (int iter = 0; iter < 10; ++iter) { + const int_tp data_size = num_cols * height * width; + for (int_tp iter = 0; iter < 10; ++iter) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // On even iterations, we're reading the first half of the data. // On odd iterations, we're reading the second half of the data. // NB: label is 1-indexed - int label_offset = 1 + ((iter % 2 == 0) ? 0 : batch_size); - int label2_offset = 1 + label_offset; - int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size; + int_tp label_offset = 1 + ((iter % 2 == 0) ? 0 : batch_size); + int_tp label2_offset = 1 + label_offset; + int_tp data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size; // Every two iterations we are reading the second file, // which has the same labels, but data is offset by total data size, // which is 2400 (see generate_sample_data). - int file_offset = (iter % 4 < 2) ? 0 : 2400; + int_tp file_offset = (iter % 4 < 2) ? 0 : 2400; - for (int i = 0; i < batch_size; ++i) { + for (int_tp i = 0; i < batch_size; ++i) { EXPECT_EQ( label_offset + i, this->blob_top_label_->cpu_data()[i]); @@ -112,11 +112,11 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { label2_offset + i, this->blob_top_label2_->cpu_data()[i]); } - for (int i = 0; i < batch_size; ++i) { - for (int j = 0; j < num_cols; ++j) { - for (int h = 0; h < height; ++h) { - for (int w = 0; w < width; ++w) { - int idx = ( + for (int_tp i = 0; i < batch_size; ++i) { + for (int_tp j = 0; j < num_cols; ++j) { + for (int_tp h = 0; h < height; ++h) { + for (int_tp w = 0; w < width; ++w) { + int_tp idx = ( i * num_cols * height * width + j * height * width + h * width + w); diff --git a/src/caffe/test/test_hinge_loss_layer.cpp b/src/caffe/test/test_hinge_loss_layer.cpp index b6a99022905..0204c2096ac 100644 --- a/src/caffe/test/test_hinge_loss_layer.cpp +++ b/src/caffe/test/test_hinge_loss_layer.cpp @@ -31,7 +31,7 @@ class HingeLossLayerTest : public MultiDeviceTest { GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); - for (int i = 0; i < blob_bottom_label_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_label_->count(); ++i) { blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index fd6264d6d6e..7b2d80db8cc 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -17,20 +17,20 @@ namespace caffe { // Forward declare kernel functions #ifdef USE_CUDA template -__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, +__global__ void im2col_gpu_kernel(const int_tp n, const Dtype* data_im, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp height_col, const int_tp width_col, Dtype* data_col); template -__global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, - const Dtype* data_im, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, Dtype* data_col); +__global__ void im2col_nd_gpu_kernel(const int_tp n, const int_tp num_axes, + const Dtype* data_im, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; #endif // USE_CUDA @@ -41,15 +41,15 @@ class Im2colKernelTest : public GPUDeviceTest { Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob(5, 500, 10, 10)), - blob_kernel_shape_(new Blob()), - blob_stride_(new Blob()), - blob_pad_(new Blob()), + blob_kernel_shape_(new Blob()), + blob_stride_(new Blob()), + blob_pad_(new Blob()), blob_top_(new Blob()), blob_top_cpu_(new Blob()) { FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); - vector dim_blob_shape(1, 2); + vector dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); @@ -63,7 +63,7 @@ class Im2colKernelTest : public GPUDeviceTest { height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; @@ -79,20 +79,20 @@ class Im2colKernelTest : public GPUDeviceTest { delete blob_pad_; } - Blob* const blob_kernel_shape_; - Blob* const blob_stride_; - Blob* const blob_pad_; + Blob* const blob_kernel_shape_; + Blob* const blob_stride_; + Blob* const blob_pad_; Blob* const blob_bottom_; Blob* const blob_top_; Blob* const blob_top_cpu_; - int height_; - int width_; - int channels_; - int pad_; - int stride_; - int kernel_size_; - int height_col_; - int width_col_; + int_tp height_; + int_tp width_; + int_tp channels_; + int_tp pad_; + int_tp stride_; + int_tp kernel_size_; + int_tp height_col_; + int_tp width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); @@ -115,7 +115,7 @@ TYPED_TEST(Im2colKernelTest, Test2D) { TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, @@ -124,12 +124,12 @@ TYPED_TEST(Im2colKernelTest, Test2D) { } // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + int_tp num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int_tp default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp grid_div = 2; grid_div <= 8; grid_div++) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, @@ -140,13 +140,13 @@ TYPED_TEST(Im2colKernelTest, Test2D) { // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + int_tp num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int_tp default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - int grid_dim = default_grid_dim/grid_div; + for (int_tp grid_div = 2; grid_div <= 8; grid_div++) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + int_tp grid_dim = default_grid_dim/grid_div; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( @@ -159,7 +159,7 @@ TYPED_TEST(Im2colKernelTest, Test2D) { } // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); @@ -186,7 +186,7 @@ TYPED_TEST(Im2colKernelTest, TestND) { TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, @@ -196,14 +196,14 @@ TYPED_TEST(Im2colKernelTest, TestND) { } // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + int_tp num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int_tp default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - const int grid_dim = default_grid_dim / grid_div; + for (int_tp grid_div = 2; grid_div <= 8; grid_div++) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + const int_tp grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) im2col_nd_gpu_kernel @@ -219,7 +219,7 @@ TYPED_TEST(Im2colKernelTest, TestND) { } // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index 293aa262059..8787d6b990a 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -63,7 +63,7 @@ TYPED_TEST(Im2colLayerTest, TestForward) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // We are lazy and will only check the top left block - for (int c = 0; c < 27; ++c) { + for (int_tp c = 0; c < 27; ++c) { EXPECT_EQ(this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3), this->blob_top_->data_at(0, c, 0, 0)); } @@ -108,7 +108,7 @@ TYPED_TEST(Im2colLayerTest, TestRect) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // We are lazy and will only check the top left block - for (int c = 0; c < 45; ++c) { + for (int_tp c = 0; c < 45; ++c) { EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), this->blob_bottom_->data_at(0, (c / 15), (c / 3) % 5, c % 3)); } diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp index 481fcef7b27..0e71f4e0d42 100644 --- a/src/caffe/test/test_image_data_layer.cpp +++ b/src/caffe/test/test_image_data_layer.cpp @@ -33,7 +33,7 @@ class ImageDataLayerTest : public MultiDeviceTest { MakeTempFilename(&filename_); std::ofstream outfile(filename_.c_str(), std::ofstream::out); LOG(INFO) << "Using temporary file " << filename_; - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { outfile << EXAMPLES_SOURCE_DIR "images/cat.jpg " << i; } outfile.close(); @@ -51,7 +51,7 @@ class ImageDataLayerTest : public MultiDeviceTest { delete blob_top_label_; } - int seed_; + int_tp seed_; string filename_; string filename_reshape_; Blob* const blob_top_data_; @@ -80,9 +80,9 @@ TYPED_TEST(ImageDataLayerTest, TestRead) { EXPECT_EQ(this->blob_top_label_->height(), 1); EXPECT_EQ(this->blob_top_label_->width(), 1); // Go through the data twice - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); } } @@ -108,9 +108,9 @@ TYPED_TEST(ImageDataLayerTest, TestResize) { EXPECT_EQ(this->blob_top_label_->height(), 1); EXPECT_EQ(this->blob_top_label_->width(), 1); // Go through the data twice - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < 5; ++i) { + for (int_tp i = 0; i < 5; ++i) { EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); } } @@ -161,11 +161,11 @@ TYPED_TEST(ImageDataLayerTest, TestShuffle) { EXPECT_EQ(this->blob_top_label_->height(), 1); EXPECT_EQ(this->blob_top_label_->width(), 1); // Go through the data twice - for (int iter = 0; iter < 2; ++iter) { + for (int_tp iter = 0; iter < 2; ++iter) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - map values_to_indices; - int num_in_order = 0; - for (int i = 0; i < 5; ++i) { + map values_to_indices; + int_tp num_in_order = 0; + for (int_tp i = 0; i < 5; ++i) { Dtype value = this->blob_top_label_->cpu_data()[i]; // Check that the value has not been seen already (no duplicates). EXPECT_EQ(values_to_indices.find(value), values_to_indices.end()); diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp index 7ec2f8073c1..d3f83d9217e 100644 --- a/src/caffe/test/test_infogain_loss_layer.cpp +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -30,7 +30,7 @@ class InfogainLossLayerTest : public MultiDeviceTest { PositiveUnitballFiller filler(filler_param); filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); - for (int i = 0; i < blob_bottom_label_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_label_->count(); ++i) { blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp index 05115a084f7..671b68bfbb0 100644 --- a/src/caffe/test/test_inner_product_layer.cpp +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -73,8 +73,8 @@ TYPED_TEST(InnerProductLayerTest, TestForward) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = this->blob_top_->count(); + for (int_tp i = 0; i < count; ++i) { EXPECT_GE(data[i], 1.); } } @@ -95,8 +95,8 @@ TYPED_TEST(InnerProductLayerTest, TestForwardNoBatch) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = this->blob_top_->count(); + for (int_tp i = 0; i < count; ++i) { EXPECT_GE(data[i], 1.); } } diff --git a/src/caffe/test/test_io.cpp b/src/caffe/test/test_io.cpp index c2c919e90dc..ece6f52aae2 100644 --- a/src/caffe/test/test_io.cpp +++ b/src/caffe/test/test_io.cpp @@ -17,10 +17,10 @@ namespace caffe { class IOTest : public ::testing::Test {}; -bool ReadImageToDatumReference(const string& filename, const int label, - const int height, const int width, const bool is_color, Datum* datum) { +bool ReadImageToDatumReference(const string& filename, const int_tp label, + const int_tp height, const int_tp width, const bool is_color, Datum* datum) { cv::Mat cv_img; - int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : + int_tp cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag); @@ -34,7 +34,7 @@ bool ReadImageToDatumReference(const string& filename, const int label, cv_img = cv_img_origin; } - int num_channels = (is_color ? 3 : 1); + int_tp num_channels = (is_color ? 3 : 1); datum->set_channels(num_channels); datum->set_height(cv_img.rows); datum->set_width(cv_img.cols); @@ -43,17 +43,17 @@ bool ReadImageToDatumReference(const string& filename, const int label, datum->clear_float_data(); string* datum_string = datum->mutable_data(); if (is_color) { - for (int c = 0; c < num_channels; ++c) { - for (int h = 0; h < cv_img.rows; ++h) { - for (int w = 0; w < cv_img.cols; ++w) { + for (int_tp c = 0; c < num_channels; ++c) { + for (int_tp h = 0; h < cv_img.rows; ++h) { + for (int_tp w = 0; w < cv_img.cols; ++w) { datum_string->push_back( static_cast(cv_img.at(h, w)[c])); } } } } else { // Faster than repeatedly testing is_color for each pixel w/i loop - for (int h = 0; h < cv_img.rows; ++h) { - for (int w = 0; w < cv_img.cols; ++w) { + for (int_tp h = 0; h < cv_img.rows; ++h) { + for (int_tp w = 0; w < cv_img.cols; ++w) { datum_string->push_back( static_cast(cv_img.at(h, w))); } @@ -84,7 +84,7 @@ TEST_F(IOTest, TestReadImageToDatumReference) { const string& data = datum.data(); const string& data_ref = datum.data(); - for (int i = 0; i < datum.data().size(); ++i) { + for (int_tp i = 0; i < datum.data().size(); ++i) { EXPECT_TRUE(data[i] == data_ref[i]); } } @@ -103,7 +103,7 @@ TEST_F(IOTest, TestReadImageToDatumReferenceResized) { const string& data = datum.data(); const string& data_ref = datum.data(); - for (int i = 0; i < datum.data().size(); ++i) { + for (int_tp i = 0; i < datum.data().size(); ++i) { EXPECT_TRUE(data[i] == data_ref[i]); } } @@ -118,10 +118,10 @@ TEST_F(IOTest, TestReadImageToDatumContent) { EXPECT_EQ(datum.width(), cv_img.cols); const string& data = datum.data(); - int index = 0; - for (int c = 0; c < datum.channels(); ++c) { - for (int h = 0; h < datum.height(); ++h) { - for (int w = 0; w < datum.width(); ++w) { + int_tp index = 0; + for (int_tp c = 0; c < datum.channels(); ++c) { + for (int_tp h = 0; h < datum.height(); ++h) { + for (int_tp w = 0; w < datum.width(); ++w) { EXPECT_TRUE(data[index++] == static_cast(cv_img.at(h, w)[c])); } @@ -140,9 +140,9 @@ TEST_F(IOTest, TestReadImageToDatumContentGray) { EXPECT_EQ(datum.width(), cv_img.cols); const string& data = datum.data(); - int index = 0; - for (int h = 0; h < datum.height(); ++h) { - for (int w = 0; w < datum.width(); ++w) { + int_tp index = 0; + for (int_tp h = 0; h < datum.height(); ++h) { + for (int_tp w = 0; w < datum.width(); ++w) { EXPECT_TRUE(data[index++] == static_cast(cv_img.at(h, w))); } } @@ -253,7 +253,7 @@ TEST_F(IOTest, TestCVMatToDatumContent) { const string& data = datum.data(); const string& data_ref = datum_ref.data(); - for (int i = 0; i < datum.data().size(); ++i) { + for (int_tp i = 0; i < datum.data().size(); ++i) { EXPECT_TRUE(data[i] == data_ref[i]); } } @@ -272,7 +272,7 @@ TEST_F(IOTest, TestCVMatToDatumReference) { const string& data = datum.data(); const string& data_ref = datum_ref.data(); - for (int i = 0; i < datum.data().size(); ++i) { + for (int_tp i = 0; i < datum.data().size(); ++i) { EXPECT_TRUE(data[i] == data_ref[i]); } } @@ -301,7 +301,7 @@ TEST_F(IOTest, TestDecodeDatum) { const string& data = datum.data(); const string& data_ref = datum_ref.data(); - for (int i = 0; i < datum.data().size(); ++i) { + for (int_tp i = 0; i < datum.data().size(); ++i) { EXPECT_TRUE(data[i] == data_ref[i]); } } @@ -330,9 +330,9 @@ TEST_F(IOTest, TestDecodeDatumToCVMatContent) { EXPECT_EQ(cv_img_ref.rows, cv_img.rows); EXPECT_EQ(cv_img_ref.cols, cv_img.cols); - for (int c = 0; c < datum.channels(); ++c) { - for (int h = 0; h < datum.height(); ++h) { - for (int w = 0; w < datum.width(); ++w) { + for (int_tp c = 0; c < datum.channels(); ++c) { + for (int_tp h = 0; h < datum.height(); ++h) { + for (int_tp w = 0; w < datum.width(); ++w) { EXPECT_TRUE(cv_img.at(h, w)[c]== cv_img_ref.at(h, w)[c]); } @@ -355,7 +355,7 @@ TEST_F(IOTest, TestDecodeDatumNative) { const string& data = datum.data(); const string& data_ref = datum_ref.data(); - for (int i = 0; i < datum.data().size(); ++i) { + for (int_tp i = 0; i < datum.data().size(); ++i) { EXPECT_TRUE(data[i] == data_ref[i]); } } @@ -385,7 +385,7 @@ TEST_F(IOTest, TestDecodeDatumNativeGray) { const string& data = datum.data(); const string& data_ref = datum_ref.data(); - for (int i = 0; i < datum.data().size(); ++i) { + for (int_tp i = 0; i < datum.data().size(); ++i) { EXPECT_TRUE(data[i] == data_ref[i]); } } @@ -410,9 +410,9 @@ TEST_F(IOTest, TestDecodeDatumToCVMatContentNative) { EXPECT_EQ(cv_img_ref.rows, cv_img.rows); EXPECT_EQ(cv_img_ref.cols, cv_img.cols); - for (int c = 0; c < datum.channels(); ++c) { - for (int h = 0; h < datum.height(); ++h) { - for (int w = 0; w < datum.width(); ++w) { + for (int_tp c = 0; c < datum.channels(); ++c) { + for (int_tp h = 0; h < datum.height(); ++h) { + for (int_tp w = 0; w < datum.width(); ++w) { EXPECT_TRUE(cv_img.at(h, w)[c]== cv_img_ref.at(h, w)[c]); } diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index 4562af51361..25e2548d193 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -58,18 +58,18 @@ void LRNLayerTest::ReferenceLRNForward( LRNParameter lrn_param = layer_param.lrn_param(); Dtype alpha = lrn_param.alpha(); Dtype beta = lrn_param.beta(); - int size = lrn_param.local_size(); + int_tp size = lrn_param.local_size(); switch (lrn_param.norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: - for (int n = 0; n < blob_bottom.num(); ++n) { - for (int c = 0; c < blob_bottom.channels(); ++c) { - for (int h = 0; h < blob_bottom.height(); ++h) { - for (int w = 0; w < blob_bottom.width(); ++w) { - int c_start = c - (size - 1) / 2; - int c_end = min(c_start + size, blob_bottom.channels()); - c_start = max(c_start, 0); + for (int_tp n = 0; n < blob_bottom.num(); ++n) { + for (int_tp c = 0; c < blob_bottom.channels(); ++c) { + for (int_tp h = 0; h < blob_bottom.height(); ++h) { + for (int_tp w = 0; w < blob_bottom.width(); ++w) { + int_tp c_start = c - (size - 1) / 2; + int_tp c_end = min(c_start + size, blob_bottom.channels()); + c_start = max(c_start, 0L); Dtype scale = 1.; - for (int i = c_start; i < c_end; ++i) { + for (int_tp i = c_start; i < c_end; ++i) { Dtype value = blob_bottom.data_at(n, i, h, w); scale += value * value * alpha / size; } @@ -81,19 +81,19 @@ void LRNLayerTest::ReferenceLRNForward( } break; case LRNParameter_NormRegion_WITHIN_CHANNEL: - for (int n = 0; n < blob_bottom.num(); ++n) { - for (int c = 0; c < blob_bottom.channels(); ++c) { - for (int h = 0; h < blob_bottom.height(); ++h) { - int h_start = h - (size - 1) / 2; - int h_end = min(h_start + size, blob_bottom.height()); - h_start = max(h_start, 0); - for (int w = 0; w < blob_bottom.width(); ++w) { + for (int_tp n = 0; n < blob_bottom.num(); ++n) { + for (int_tp c = 0; c < blob_bottom.channels(); ++c) { + for (int_tp h = 0; h < blob_bottom.height(); ++h) { + int_tp h_start = h - (size - 1) / 2; + int_tp h_end = min(h_start + size, blob_bottom.height()); + h_start = max(h_start, 0L); + for (int_tp w = 0; w < blob_bottom.width(); ++w) { Dtype scale = 1.; - int w_start = w - (size - 1) / 2; - int w_end = min(w_start + size, blob_bottom.width()); - w_start = max(w_start, 0); - for (int nh = h_start; nh < h_end; ++nh) { - for (int nw = w_start; nw < w_end; ++nw) { + int_tp w_start = w - (size - 1) / 2; + int_tp w_end = min(w_start + size, blob_bottom.width()); + w_start = max(w_start, 0L); + for (int_tp nh = h_start; nh < h_end; ++nh) { + for (int_tp nw = w_start; nw < w_end; ++nw) { Dtype value = blob_bottom.data_at(n, c, nh, nw); scale += value * value * alpha / (size * size); } @@ -132,7 +132,7 @@ TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) { Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], this->epsilon_); } @@ -148,7 +148,7 @@ TYPED_TEST(LRNLayerTest, TestForwardAcrossChannelsLargeRegion) { Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], this->epsilon_); } @@ -161,13 +161,13 @@ TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) { GradientChecker checker(1e-2, 1e-2); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } vector propagate_down(this->blob_bottom_vec_.size(), true); layer.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] // << std::endl; // } @@ -183,13 +183,13 @@ TYPED_TEST(LRNLayerTest, TestGradientAcrossChannelsLargeRegion) { GradientChecker checker(1e-2, 1e-2); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } vector propagate_down(this->blob_bottom_vec_.size(), true); layer.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] // << std::endl; // } @@ -223,7 +223,7 @@ TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) { Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], this->epsilon_); } @@ -239,7 +239,7 @@ TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) { GradientChecker checker(1e-2, 1e-2); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, @@ -286,18 +286,18 @@ void CuDNNLRNLayerTest::ReferenceLRNForward( LRNParameter lrn_param = layer_param.lrn_param(); Dtype alpha = lrn_param.alpha(); Dtype beta = lrn_param.beta(); - int size = lrn_param.local_size(); + int_tp size = lrn_param.local_size(); switch (lrn_param.norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: - for (int n = 0; n < blob_bottom.num(); ++n) { - for (int c = 0; c < blob_bottom.channels(); ++c) { - for (int h = 0; h < blob_bottom.height(); ++h) { - for (int w = 0; w < blob_bottom.width(); ++w) { - int c_start = c - (size - 1) / 2; - int c_end = min(c_start + size, blob_bottom.channels()); - c_start = max(c_start, 0); + for (int_tp n = 0; n < blob_bottom.num(); ++n) { + for (int_tp c = 0; c < blob_bottom.channels(); ++c) { + for (int_tp h = 0; h < blob_bottom.height(); ++h) { + for (int_tp w = 0; w < blob_bottom.width(); ++w) { + int_tp c_start = c - (size - 1) / 2; + int_tp c_end = min(c_start + size, blob_bottom.channels()); + c_start = max(c_start, 0L); Dtype scale = 1.; - for (int i = c_start; i < c_end; ++i) { + for (int_tp i = c_start; i < c_end; ++i) { Dtype value = blob_bottom.data_at(n, i, h, w); scale += value * value * alpha / size; } @@ -309,19 +309,19 @@ void CuDNNLRNLayerTest::ReferenceLRNForward( } break; case LRNParameter_NormRegion_WITHIN_CHANNEL: - for (int n = 0; n < blob_bottom.num(); ++n) { - for (int c = 0; c < blob_bottom.channels(); ++c) { - for (int h = 0; h < blob_bottom.height(); ++h) { - int h_start = h - (size - 1) / 2; - int h_end = min(h_start + size, blob_bottom.height()); - h_start = max(h_start, 0); - for (int w = 0; w < blob_bottom.width(); ++w) { + for (int_tp n = 0; n < blob_bottom.num(); ++n) { + for (int_tp c = 0; c < blob_bottom.channels(); ++c) { + for (int_tp h = 0; h < blob_bottom.height(); ++h) { + int_tp h_start = h - (size - 1) / 2; + int_tp h_end = min(h_start + size, blob_bottom.height()); + h_start = max(h_start, 0L); + for (int_tp w = 0; w < blob_bottom.width(); ++w) { Dtype scale = 1.; - int w_start = w - (size - 1) / 2; - int w_end = min(w_start + size, blob_bottom.width()); - w_start = max(w_start, 0); - for (int nh = h_start; nh < h_end; ++nh) { - for (int nw = w_start; nw < w_end; ++nw) { + int_tp w_start = w - (size - 1) / 2; + int_tp w_end = min(w_start + size, blob_bottom.width()); + w_start = max(w_start, 0L); + for (int_tp nh = h_start; nh < h_end; ++nh) { + for (int_tp nw = w_start; nw < w_end; ++nw) { Dtype value = blob_bottom.data_at(n, c, nh, nw); scale += value * value * alpha / (size * size); } @@ -349,7 +349,7 @@ TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsCuDNN) { Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], this->epsilon_); } @@ -365,7 +365,7 @@ TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsLargeRegionCuDNN) { Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], this->epsilon_); } @@ -378,7 +378,7 @@ TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsCuDNN) { GradientChecker checker(1e-2, 1e-2); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } vector propagate_down(this->blob_bottom_vec_.size(), true); @@ -400,7 +400,7 @@ TYPED_TEST(CuDNNLRNLayerTest, TestForwardWithinChannel) { Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], this->epsilon_); } @@ -416,7 +416,7 @@ TYPED_TEST(CuDNNLRNLayerTest, TestGradientWithinChannel) { GradientChecker checker(1e-2, 1e-2); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, @@ -431,7 +431,7 @@ TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsLargeRegionCuDNN) { GradientChecker checker(1e-2, 1e-2); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } vector propagate_down(this->blob_bottom_vec_.size(), true); diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 130625f09cb..04978835ae8 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -47,10 +47,10 @@ class MathFunctionsTest : public MultiDeviceTest { } // http://en.wikipedia.org/wiki/Hamming_distance - int ReferenceHammingDistance(const int n, const Dtype* x, const Dtype* y) { - int dist = 0; + int_tp ReferenceHammingDistance(const int_tp n, const Dtype* x, const Dtype* y) { + int_tp dist = 0; uint64_t val; - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { if (sizeof(Dtype) == 8) { val = static_cast(x[i]) ^ static_cast(y[i]); } else if (sizeof(Dtype) == 4) { @@ -84,7 +84,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestNothing) { } TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); EXPECT_EQ(this->ReferenceHammingDistance(n, x, y), @@ -92,10 +92,10 @@ TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { } TYPED_TEST(CPUMathFunctionsTest, TestAsum) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { std_asum += std::fabs(x[i]); } TypeParam cpu_asum = caffe_cpu_asum(n, x); @@ -103,54 +103,54 @@ TYPED_TEST(CPUMathFunctionsTest, TestAsum) { } TYPED_TEST(CPUMathFunctionsTest, TestSign) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); const TypeParam* signs = this->blob_bottom_->cpu_diff(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(signs[i], x[i] > 0 ? 1 : (x[i] < 0 ? -1 : 0)); } } TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); const TypeParam* signbits = this->blob_bottom_->cpu_diff(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(signbits[i], x[i] < 0 ? 1 : 0); } } TYPED_TEST(CPUMathFunctionsTest, TestFabs) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(abs_val[i], x[i] > 0 ? x[i] : -x[i]); } } TYPED_TEST(CPUMathFunctionsTest, TestScale) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; caffe_cpu_scale(n, alpha, this->blob_bottom_->cpu_data(), this->blob_bottom_->mutable_cpu_diff()); const TypeParam* scaled = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(scaled[i], x[i] * alpha); } } TYPED_TEST(CPUMathFunctionsTest, TestCopy) { - const int n = this->blob_bottom_->count(); + const int_tp n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); caffe_cpu_copy(n, bottom_data, top_data); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); } } @@ -166,22 +166,22 @@ TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { #ifdef USE_CUDA - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); - int reference_distance = this->ReferenceHammingDistance(n, x, y); + int_tp reference_distance = this->ReferenceHammingDistance(n, x, y); x = this->blob_bottom_->gpu_data(); y = this->blob_top_->gpu_data(); - int computed_distance = caffe_gpu_hamming_distance(n, x, y); + int_tp computed_distance = caffe_gpu_hamming_distance(n, x, y); EXPECT_EQ(reference_distance, computed_distance); #endif // USE_CUDA } TYPED_TEST(GPUMathFunctionsTest, TestAsum) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { std_asum += std::fabs(x[i]); } TypeParam gpu_asum; @@ -202,7 +202,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { } TYPED_TEST(GPUMathFunctionsTest, TestSign) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); device *dc = Caffe::GetDefaultDevice(); @@ -221,13 +221,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { const TypeParam* signs = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(signs[i], x[i] > 0 ? 1 : (x[i] < 0 ? -1 : 0)); } } TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); device *dc = Caffe::GetDefaultDevice(); @@ -246,13 +246,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { const TypeParam* signbits = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(signbits[i], x[i] < 0 ? 1 : 0); } } TYPED_TEST(GPUMathFunctionsTest, TestFabs) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); device *dc = Caffe::GetDefaultDevice(); @@ -271,13 +271,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestFabs) { const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(abs_val[i], x[i] > 0 ? x[i] : -x[i]); } } TYPED_TEST(GPUMathFunctionsTest, TestScale) { - int n = this->blob_bottom_->count(); + int_tp n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -297,13 +297,13 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { const TypeParam* scaled = this->blob_bottom_->cpu_diff(); const TypeParam* x = this->blob_bottom_->cpu_data(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(scaled[i], x[i] * alpha); } } TYPED_TEST(GPUMathFunctionsTest, TestCopy) { - const int n = this->blob_bottom_->count(); + const int_tp n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); @@ -324,7 +324,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestCopy) { bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); } } diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index f1bc4bcc442..e22ae538b0d 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -68,7 +68,7 @@ TYPED_TEST(MaxPoolingDropoutTest, TestForward) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* top_data = this->blob_top_->cpu_data(); Dtype sum = 0.; - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { sum += top_data[i]; } EXPECT_EQ(sum, this->blob_top_->count()); @@ -79,7 +79,7 @@ TYPED_TEST(MaxPoolingDropoutTest, TestForward) { sum = 0.; Dtype scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { sum += top_data[i]; } EXPECT_GE(sum, 0); @@ -96,7 +96,7 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } vector propagate_down(this->blob_bottom_vec_.size(), true); @@ -104,7 +104,7 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { this->blob_bottom_vec_); const Dtype* bottom_diff = this->blob_bottom_->cpu_diff(); Dtype sum = 0.; - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { sum += bottom_diff[i]; } EXPECT_EQ(sum, this->blob_top_->count()); @@ -118,7 +118,7 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { this->blob_bottom_vec_); Dtype sum_with_dropout = 0.; bottom_diff = this->blob_bottom_->cpu_diff(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { sum_with_dropout += bottom_diff[i]; } EXPECT_GE(sum_with_dropout, sum); diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp index 7269a4d441b..21b897c61d2 100644 --- a/src/caffe/test/test_memory_data_layer.cpp +++ b/src/caffe/test/test_memory_data_layer.cpp @@ -45,11 +45,11 @@ class MemoryDataLayerTest : public MultiDeviceTest { delete data_; delete labels_; } - int batch_size_; - int batches_; - int channels_; - int height_; - int width_; + int_tp batch_size_; + int_tp batches_; + int_tp channels_; + int_tp height_; + int_tp width_; // we don't really need blobs for the input data, but it makes it // easier to call Filler Blob* const data_; @@ -100,15 +100,15 @@ TYPED_TEST(MemoryDataLayerTest, TestForward) { layer->DataLayerSetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Reset(this->data_->mutable_cpu_data(), this->labels_->mutable_cpu_data(), this->data_->num()); - for (int i = 0; i < this->batches_ * 6; ++i) { - int batch_num = i % this->batches_; + for (int_tp i = 0; i < this->batches_ * 6; ++i) { + int_tp batch_num = i % this->batches_; layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int j = 0; j < this->data_blob_->count(); ++j) { + for (int_tp j = 0; j < this->data_blob_->count(); ++j) { EXPECT_EQ(this->data_blob_->cpu_data()[j], this->data_->cpu_data()[ this->data_->offset(1) * this->batch_size_ * batch_num + j]); } - for (int j = 0; j < this->label_blob_->count(); ++j) { + for (int_tp j = 0; j < this->label_blob_->count(); ++j) { EXPECT_EQ(this->label_blob_->cpu_data()[j], this->labels_->cpu_data()[this->batch_size_ * batch_num + j]); } @@ -129,36 +129,36 @@ TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); // We add batch_size*num_iter items, then for each iteration // we forward batch_size elements - int num_iter = 5; + int_tp num_iter = 5; vector datum_vector(this->batch_size_ * num_iter); - const size_t count = this->channels_ * this->height_ * this->width_; - size_t pixel_index = 0; - for (int i = 0; i < this->batch_size_ * num_iter; ++i) { + const uint_tp count = this->channels_ * this->height_ * this->width_; + uint_tp pixel_index = 0; + for (int_tp i = 0; i < this->batch_size_ * num_iter; ++i) { datum_vector[i].set_channels(this->channels_); datum_vector[i].set_height(this->height_); datum_vector[i].set_width(this->width_); datum_vector[i].set_label(i); vector pixels(count); - for (int j = 0; j < count; ++j) { + for (int_tp j = 0; j < count; ++j) { pixels[j] = pixel_index++ % 256; } datum_vector[i].set_data(&(pixels[0]), count); } layer.AddDatumVector(datum_vector); - int data_index; + int_tp data_index; // Go through the data 5 times - for (int iter = 0; iter < num_iter; ++iter) { - int offset = this->batch_size_ * iter; + for (int_tp iter = 0; iter < num_iter; ++iter) { + int_tp offset = this->batch_size_ * iter; layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->data_blob_->cpu_data(); - size_t index = 0; - for (int i = 0; i < this->batch_size_; ++i) { + uint_tp index = 0; + for (int_tp i = 0; i < this->batch_size_; ++i) { const string& data_string = datum_vector[offset + i].data(); EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); - for (int c = 0; c < this->channels_; ++c) { - for (int h = 0; h < this->height_; ++h) { - for (int w = 0; w < this->width_; ++w) { + for (int_tp c = 0; c < this->channels_; ++c) { + for (int_tp h = 0; h < this->height_; ++h) { + for (int_tp w = 0; w < this->width_; ++w) { data_index = (c * this->height_ + h) * this->width_ + w; EXPECT_EQ(static_cast( static_cast(data_string[data_index])), @@ -182,32 +182,32 @@ TYPED_TEST(MemoryDataLayerTest, AddMatVectorDefaultTransform) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); // We add batch_size*num_iter items, then for each iteration // we forward batch_size elements - int num_iter = 5; + int_tp num_iter = 5; vector mat_vector(this->batch_size_ * num_iter); - vector label_vector(this->batch_size_ * num_iter); - for (int i = 0; i < this->batch_size_*num_iter; ++i) { + vector label_vector(this->batch_size_ * num_iter); + for (int_tp i = 0; i < this->batch_size_*num_iter; ++i) { mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4); label_vector[i] = i; cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255)); } layer.AddMatVector(mat_vector, label_vector); - int data_index; - const size_t count = this->channels_ * this->height_ * this->width_; - for (int iter = 0; iter < num_iter; ++iter) { - int offset = this->batch_size_ * iter; + int_tp data_index; + const uint_tp count = this->channels_ * this->height_ * this->width_; + for (int_tp iter = 0; iter < num_iter; ++iter) { + int_tp offset = this->batch_size_ * iter; layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->data_blob_->cpu_data(); - for (int i = 0; i < this->batch_size_; ++i) { + for (int_tp i = 0; i < this->batch_size_; ++i) { EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); - for (int h = 0; h < this->height_; ++h) { + for (int_tp h = 0; h < this->height_; ++h) { const unsigned char* ptr_mat = mat_vector[offset + i].ptr(h); - int index = 0; - for (int w = 0; w < this->width_; ++w) { - for (int c = 0; c < this->channels_; ++c) { + int_tp index = 0; + for (int_tp w = 0; w < this->width_; ++w) { + for (int_tp c = 0; c < this->channels_; ++c) { data_index = (i*count) + (c * this->height_ + h) * this->width_ + w; Dtype pixel = static_cast(ptr_mat[index++]); - EXPECT_EQ(static_cast(pixel), + EXPECT_EQ(static_cast(pixel), data[data_index]); } } @@ -227,45 +227,45 @@ TYPED_TEST(MemoryDataLayerTest, TestSetBatchSize) { MemoryDataLayer layer(param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); // first add data as usual - int num_iter = 5; + int_tp num_iter = 5; vector mat_vector(this->batch_size_ * num_iter); - vector label_vector(this->batch_size_ * num_iter); - for (int i = 0; i < this->batch_size_*num_iter; ++i) { + vector label_vector(this->batch_size_ * num_iter); + for (int_tp i = 0; i < this->batch_size_*num_iter; ++i) { mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4); label_vector[i] = i; cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255)); } layer.AddMatVector(mat_vector, label_vector); // then consume the data - int data_index; - const size_t count = this->channels_ * this->height_ * this->width_; - for (int iter = 0; iter < num_iter; ++iter) { - int offset = this->batch_size_ * iter; + int_tp data_index; + const uint_tp count = this->channels_ * this->height_ * this->width_; + for (int_tp iter = 0; iter < num_iter; ++iter) { + int_tp offset = this->batch_size_ * iter; layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->data_blob_->cpu_data(); - for (int i = 0; i < this->batch_size_; ++i) { + for (int_tp i = 0; i < this->batch_size_; ++i) { EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); - for (int h = 0; h < this->height_; ++h) { + for (int_tp h = 0; h < this->height_; ++h) { const unsigned char* ptr_mat = mat_vector[offset + i].ptr(h); - int index = 0; - for (int w = 0; w < this->width_; ++w) { - for (int c = 0; c < this->channels_; ++c) { + int_tp index = 0; + for (int_tp w = 0; w < this->width_; ++w) { + for (int_tp c = 0; c < this->channels_; ++c) { data_index = (i*count) + (c * this->height_ + h) * this->width_ + w; Dtype pixel = static_cast(ptr_mat[index++]); - EXPECT_EQ(static_cast(pixel), data[data_index]); + EXPECT_EQ(static_cast(pixel), data[data_index]); } } } } } // and then add new data with different batch_size - int new_batch_size = 16; + int_tp new_batch_size = 16; layer.set_batch_size(new_batch_size); mat_vector.clear(); mat_vector.resize(new_batch_size * num_iter); label_vector.clear(); label_vector.resize(new_batch_size * num_iter); - for (int i = 0; i < new_batch_size*num_iter; ++i) { + for (int_tp i = 0; i < new_batch_size*num_iter; ++i) { mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4); label_vector[i] = i; cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255)); @@ -273,22 +273,22 @@ TYPED_TEST(MemoryDataLayerTest, TestSetBatchSize) { layer.AddMatVector(mat_vector, label_vector); // finally consume new data and check if everything is fine - for (int iter = 0; iter < num_iter; ++iter) { - int offset = new_batch_size * iter; + for (int_tp iter = 0; iter < num_iter; ++iter) { + int_tp offset = new_batch_size * iter; layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(new_batch_size, this->blob_top_vec_[0]->num()); EXPECT_EQ(new_batch_size, this->blob_top_vec_[1]->num()); const Dtype* data = this->data_blob_->cpu_data(); - for (int i = 0; i < new_batch_size; ++i) { + for (int_tp i = 0; i < new_batch_size; ++i) { EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]); - for (int h = 0; h < this->height_; ++h) { + for (int_tp h = 0; h < this->height_; ++h) { const unsigned char* ptr_mat = mat_vector[offset + i].ptr(h); - int index = 0; - for (int w = 0; w < this->width_; ++w) { - for (int c = 0; c < this->channels_; ++c) { + int_tp index = 0; + for (int_tp w = 0; w < this->width_; ++w) { + for (int_tp c = 0; c < this->channels_; ++c) { data_index = (i*count) + (c * this->height_ + h) * this->width_ + w; Dtype pixel = static_cast(ptr_mat[index++]); - EXPECT_EQ(static_cast(pixel), data[data_index]); + EXPECT_EQ(static_cast(pixel), data[data_index]); } } } diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp index eac03e4abe4..14c8280e0bc 100644 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -39,14 +39,14 @@ class MergeCropLayerTest : public GPUDeviceTest { } void TestForward() { - int a_h = blob_bottom_a_->height(); - int a_w = blob_bottom_a_->width(); - int a_c = blob_bottom_a_->channels(); - - for (int n = 0; n < blob_bottom_a_->num(); ++n) { - for (int c = 0; c < a_c; ++c) { - for (int h = 0; h < a_h; ++h) { - for (int w = 0; w < a_w; ++w) { + int_tp a_h = blob_bottom_a_->height(); + int_tp a_w = blob_bottom_a_->width(); + int_tp a_c = blob_bottom_a_->channels(); + + for (int_tp n = 0; n < blob_bottom_a_->num(); ++n) { + for (int_tp c = 0; c < a_c; ++c) { + for (int_tp h = 0; h < a_h; ++h) { + for (int_tp w = 0; w < a_w; ++w) { blob_bottom_a_->mutable_cpu_data()[w + h * a_w + c * a_h * a_w + n * a_h * a_w * a_c] = (w + h * 10 + c * 100 + n * 1000 + 10000); @@ -55,14 +55,14 @@ class MergeCropLayerTest : public GPUDeviceTest { } } - int b_h = blob_bottom_b_->height(); - int b_w = blob_bottom_b_->width(); - int b_c = blob_bottom_b_->channels(); + int_tp b_h = blob_bottom_b_->height(); + int_tp b_w = blob_bottom_b_->width(); + int_tp b_c = blob_bottom_b_->channels(); - for (int n = 0; n < blob_bottom_b_->num(); ++n) { - for (int c = 0; c < b_c; ++c) { - for (int h = 0; h < b_h; ++h) { - for (int w = 0; w < b_w; ++w) { + for (int_tp n = 0; n < blob_bottom_b_->num(); ++n) { + for (int_tp c = 0; c < b_c; ++c) { + for (int_tp h = 0; h < b_h; ++h) { + for (int_tp w = 0; w < b_w; ++w) { blob_bottom_b_->mutable_cpu_data()[w + h * b_w + c * b_h * b_w + n * b_h * b_w * b_c] = -(w + h * 10 + c * 100 + n * 1000 + 10000); @@ -85,11 +85,11 @@ class MergeCropLayerTest : public GPUDeviceTest { layer.Forward(blob_bottom_vec_, blob_top_vec_); // Test copy from A - int offset = 0; - for (int n = 0; n < blob_bottom_a_->num(); ++n) { - for (int c = 0; c < a_c; ++c) { - for (int h = 0; h < a_h; ++h) { - for (int w = 0; w < a_w; ++w) { + int_tp offset = 0; + for (int_tp n = 0; n < blob_bottom_a_->num(); ++n) { + for (int_tp c = 0; c < a_c; ++c) { + for (int_tp h = 0; h < a_h; ++h) { + for (int_tp w = 0; w < a_w; ++w) { EXPECT_EQ( (w + h * 10 + c * 100 + n * 1000 + 10000), blob_top_->cpu_data()[offset + w + h * a_w + c * a_h * a_w]); @@ -101,10 +101,10 @@ class MergeCropLayerTest : public GPUDeviceTest { // Test copy from B offset = a_h * a_w * a_c; - for (int n = 0; n < blob_bottom_a_->num(); ++n) { - for (int c = 0; c < b_c; ++c) { - for (int h = 0; h < b_h; ++h) { - for (int w = 0; w < b_w; ++w) { + for (int_tp n = 0; n < blob_bottom_a_->num(); ++n) { + for (int_tp c = 0; c < b_c; ++c) { + for (int_tp h = 0; h < b_h; ++h) { + for (int_tp w = 0; w < b_w; ++w) { if (h >= (b_h - a_h) / 2 && h < a_h && w >= (b_w - a_w) / 2 && w < a_w) { EXPECT_EQ( @@ -120,14 +120,14 @@ class MergeCropLayerTest : public GPUDeviceTest { } void TestBackward() { - int a_h = blob_bottom_a_->height(); - int a_w = blob_bottom_a_->width(); - int a_c = blob_bottom_a_->channels(); - - for (int n = 0; n < blob_bottom_a_->num(); ++n) { - for (int c = 0; c < a_c; ++c) { - for (int h = 0; h < a_h; ++h) { - for (int w = 0; w < a_w; ++w) { + int_tp a_h = blob_bottom_a_->height(); + int_tp a_w = blob_bottom_a_->width(); + int_tp a_c = blob_bottom_a_->channels(); + + for (int_tp n = 0; n < blob_bottom_a_->num(); ++n) { + for (int_tp c = 0; c < a_c; ++c) { + for (int_tp h = 0; h < a_h; ++h) { + for (int_tp w = 0; w < a_w; ++w) { blob_bottom_a_->mutable_cpu_data()[w + h * a_w + c * a_h * a_w + n * a_h * a_w * a_c] = (w + h * 10 + c * 100 + n * 1000 + 10000); @@ -136,14 +136,14 @@ class MergeCropLayerTest : public GPUDeviceTest { } } - int b_h = blob_bottom_b_->height(); - int b_w = blob_bottom_b_->width(); - int b_c = blob_bottom_b_->channels(); + int_tp b_h = blob_bottom_b_->height(); + int_tp b_w = blob_bottom_b_->width(); + int_tp b_c = blob_bottom_b_->channels(); - for (int n = 0; n < blob_bottom_b_->num(); ++n) { - for (int c = 0; c < b_c; ++c) { - for (int h = 0; h < b_h; ++h) { - for (int w = 0; w < b_w; ++w) { + for (int_tp n = 0; n < blob_bottom_b_->num(); ++n) { + for (int_tp c = 0; c < b_c; ++c) { + for (int_tp h = 0; h < b_h; ++h) { + for (int_tp w = 0; w < b_w; ++w) { blob_bottom_b_->mutable_cpu_data()[w + h * b_w + c * b_h * b_w + n * b_h * b_w * b_c] = -(w + h * 10 + c * 100 + n * 1000 + 10000); @@ -164,10 +164,10 @@ class MergeCropLayerTest : public GPUDeviceTest { layer.Backward(blob_top_vec_, propagate_down, blob_bottom_vec_); // Test copy to A - for (int n = 0; n < blob_bottom_a_->num(); ++n) { - for (int c = 0; c < a_c; ++c) { - for (int h = 0; h < a_h; ++h) { - for (int w = 0; w < a_w; ++w) { + for (int_tp n = 0; n < blob_bottom_a_->num(); ++n) { + for (int_tp c = 0; c < a_c; ++c) { + for (int_tp h = 0; h < a_h; ++h) { + for (int_tp w = 0; w < a_w; ++w) { EXPECT_EQ( (w + h * 10 + c * 100 + n * 1000 + 10000), blob_bottom_a_->cpu_diff()[w + h * a_w + c * a_h * a_w diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index b2db984feb1..5e6f97c7852 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -28,7 +28,7 @@ class MultinomialLogisticLossLayerTest : public CPUDeviceTest { PositiveUnitballFiller filler(filler_param); filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); - for (int i = 0; i < blob_bottom_label_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_label_->count(); ++i) { blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); diff --git a/src/caffe/test/test_mvn_layer.cpp b/src/caffe/test/test_mvn_layer.cpp index 261b90fee96..c69e5ce9d8e 100644 --- a/src/caffe/test/test_mvn_layer.cpp +++ b/src/caffe/test/test_mvn_layer.cpp @@ -44,16 +44,16 @@ TYPED_TEST(MVNLayerTest, TestForward) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test mean - int num = this->blob_bottom_->num(); - int channels = this->blob_bottom_->channels(); - int height = this->blob_bottom_->height(); - int width = this->blob_bottom_->width(); + int_tp num = this->blob_bottom_->num(); + int_tp channels = this->blob_bottom_->channels(); + int_tp height = this->blob_bottom_->height(); + int_tp width = this->blob_bottom_->width(); - for (int i = 0; i < num; ++i) { - for (int j = 0; j < channels; ++j) { + for (int_tp i = 0; i < num; ++i) { + for (int_tp j = 0; j < channels; ++j) { Dtype sum = 0, var = 0; - for (int k = 0; k < height; ++k) { - for (int l = 0; l < width; ++l) { + for (int_tp k = 0; k < height; ++k) { + for (int_tp l = 0; l < width; ++l) { Dtype data = this->blob_top_->data_at(i, j, k, l); sum += data; var += data * data; @@ -80,16 +80,16 @@ TYPED_TEST(MVNLayerTest, TestForwardMeanOnly) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test mean - int num = this->blob_bottom_->num(); - int channels = this->blob_bottom_->channels(); - int height = this->blob_bottom_->height(); - int width = this->blob_bottom_->width(); + int_tp num = this->blob_bottom_->num(); + int_tp channels = this->blob_bottom_->channels(); + int_tp height = this->blob_bottom_->height(); + int_tp width = this->blob_bottom_->width(); - for (int i = 0; i < num; ++i) { - for (int j = 0; j < channels; ++j) { + for (int_tp i = 0; i < num; ++i) { + for (int_tp j = 0; j < channels; ++j) { Dtype sum = 0, var = 0; - for (int k = 0; k < height; ++k) { - for (int l = 0; l < width; ++l) { + for (int_tp k = 0; k < height; ++k) { + for (int_tp l = 0; l < width; ++l) { Dtype data = this->blob_top_->data_at(i, j, k, l); sum += data; var += data * data; @@ -113,16 +113,16 @@ TYPED_TEST(MVNLayerTest, TestForwardAcrossChannels) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test mean - int num = this->blob_bottom_->num(); - int channels = this->blob_bottom_->channels(); - int height = this->blob_bottom_->height(); - int width = this->blob_bottom_->width(); + int_tp num = this->blob_bottom_->num(); + int_tp channels = this->blob_bottom_->channels(); + int_tp height = this->blob_bottom_->height(); + int_tp width = this->blob_bottom_->width(); - for (int i = 0; i < num; ++i) { + for (int_tp i = 0; i < num; ++i) { Dtype sum = 0, var = 0; - for (int j = 0; j < channels; ++j) { - for (int k = 0; k < height; ++k) { - for (int l = 0; l < width; ++l) { + for (int_tp j = 0; j < channels; ++j) { + for (int_tp k = 0; k < height; ++k) { + for (int_tp l = 0; l < width; ++l) { Dtype data = this->blob_top_->data_at(i, j, k, l); sum += data; var += data * data; diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 78c64829a16..f8388f31cde 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -36,7 +36,7 @@ class NetTest : public MultiDeviceTest { blobs_copy->clear(); blobs_copy->resize(net_blobs.size()); const bool kReshape = true; - for (int i = 0; i < net_blobs.size(); ++i) { + for (int_tp i = 0; i < net_blobs.size(); ++i) { (*blobs_copy)[i].reset(new Blob()); (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); } @@ -49,7 +49,7 @@ class NetTest : public MultiDeviceTest { params_copy->clear(); params_copy->resize(net_params.size()); const bool kReshape = true; - for (int i = 0; i < net_params.size(); ++i) { + for (int_tp i = 0; i < net_params.size(); ++i) { (*params_copy)[i].reset(new Blob()); (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); } @@ -713,7 +713,7 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } - int seed_; + int_tp seed_; shared_ptr > net_; }; @@ -831,9 +831,9 @@ TYPED_TEST(NetTest, TestLossWeight) { const Dtype kMinLossAbsValue = 1e-2; ASSERT_GE(fabs(loss), kMinLossAbsValue); const Dtype kErrorMargin = 1e-4; - const int kNumLossWeights = 6; + const int_tp kNumLossWeights = 6; Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; - for (int i = 0; i < kNumLossWeights; ++i) { + for (int_tp i = 0; i < kNumLossWeights; ++i) { Caffe::set_random_seed(this->seed_); this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward); const Dtype weighted_loss = this->net_->ForwardBackward(bottom); @@ -843,9 +843,9 @@ TYPED_TEST(NetTest, TestLossWeight) { const vector > >& weighted_blobs = this->net_->blobs(); ASSERT_EQ(blob_grads.size(), weighted_blobs.size()); - for (int j = 0; j < blob_grads.size(); ++j) { + for (int_tp j = 0; j < blob_grads.size(); ++j) { ASSERT_EQ(blob_grads[j]->count(), weighted_blobs[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { + for (int_tp k = 0; k < blob_grads[j]->count(); ++k) { EXPECT_NEAR(blob_grads[j]->cpu_diff()[k] * kLossWeights[i], weighted_blobs[j]->cpu_diff()[k], error_margin); } @@ -853,9 +853,9 @@ TYPED_TEST(NetTest, TestLossWeight) { const vector > >& weighted_params = this->net_->params(); ASSERT_EQ(param_grads.size(), weighted_params.size()); - for (int j = 0; j < param_grads.size(); ++j) { + for (int_tp j = 0; j < param_grads.size(); ++j) { ASSERT_EQ(param_grads[j]->count(), weighted_params[j]->count()); - for (int k = 0; k < param_grads[j]->count(); ++k) { + for (int_tp k = 0; k < param_grads[j]->count(); ++k) { EXPECT_NEAR(param_grads[j]->cpu_diff()[k] * kLossWeights[i], weighted_params[j]->cpu_diff()[k], error_margin); } @@ -881,9 +881,9 @@ TYPED_TEST(NetTest, TestLossWeightMidNet) { const Dtype kMinLossAbsValue = 1e-2; ASSERT_GE(fabs(loss), kMinLossAbsValue); const Dtype kErrorMargin = 1e-4; - const int kNumLossWeights = 6; + const int_tp kNumLossWeights = 6; Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; - for (int i = 0; i < kNumLossWeights; ++i) { + for (int_tp i = 0; i < kNumLossWeights; ++i) { Caffe::set_random_seed(this->seed_); this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i], kForceBackward); @@ -894,7 +894,7 @@ TYPED_TEST(NetTest, TestLossWeightMidNet) { const shared_ptr >& weighted_blob = this->net_->blob_by_name("data"); ASSERT_EQ(data_grad.count(), weighted_blob->count()); - for (int j = 0; j < data_grad.count(); ++j) { + for (int_tp j = 0; j < data_grad.count(); ++j) { EXPECT_NEAR(data_grad.cpu_diff()[j] * kLossWeights[i], weighted_blob->cpu_diff()[j], error_margin); } @@ -944,7 +944,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { this->net_->blobs(); ASSERT_EQ(blob_grads.size(), blob_grads_loss_3.size()); ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_loss_3.size()); - for (int j = 0; j < blob_grads.size(); ++j) { + for (int_tp j = 0; j < blob_grads.size(); ++j) { const string& blob_name = this->net_->blob_names()[j]; bool grad_should_change = true; if (blob_name == "innerproduct1_innerproduct1_0_split_0") { @@ -952,7 +952,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { } ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_3[j]->count()); ASSERT_EQ(blob_grads_loss_2[j]->count(), blob_grads_loss_3[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { + for (int_tp k = 0; k < blob_grads[j]->count(); ++k) { const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - blob_grads[j]->cpu_diff()[k]; const Dtype grad_diff_3 = blob_grads_loss_3[j]->cpu_diff()[k] - @@ -989,7 +989,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { ASSERT_EQ(blob_grads.size(), blob_grads_midnet_loss_3.size()); ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_midnet_loss_3.size()); const vector& blob_names = this->net_->blob_names(); - for (int j = 0; j < blob_grads.size(); ++j) { + for (int_tp j = 0; j < blob_grads.size(); ++j) { const string& blob_name = blob_names[j]; bool grad_should_change = false; if (blob_name == "innerproduct1" || @@ -999,7 +999,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { } ASSERT_EQ(blob_grads[j]->count(), blob_grads_midnet_loss_3[j]->count()); ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_2[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { + for (int_tp k = 0; k < blob_grads[j]->count(); ++k) { const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - blob_grads[j]->cpu_diff()[k]; const Dtype grad_diff_3 = blob_grads_midnet_loss_3[j]->cpu_diff()[k] - @@ -1069,10 +1069,10 @@ TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) { net->Backward(); Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); - const int count = ip1_layer->blobs()[0]->count(); + const int_tp count = ip1_layer->blobs()[0]->count(); const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_GT(fabs(grad1[i]), 0); EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]); } @@ -1089,10 +1089,10 @@ TYPED_TEST(NetTest, TestSharedWeightsDiffNet) { EXPECT_FLOAT_EQ(loss, 0); Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); - const int count = ip1_layer->blobs()[0]->count(); + const int_tp count = ip1_layer->blobs()[0]->count(); const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_FLOAT_EQ(0, grad1[i]); EXPECT_FLOAT_EQ(0, grad2[i]); } @@ -1119,9 +1119,9 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { const bool copy_diff = false; shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); - const int count = ip1_weights->count(); + const int_tp count = ip1_weights->count(); // Make sure the diffs are non-trivial. - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); } caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), @@ -1129,7 +1129,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { const Dtype* expected_updated_params = shared_params.cpu_data(); this->net_->Update(); const Dtype* actual_updated_params = ip1_weights->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]); } // Check that data blobs of shared weights STILL point to the same memory @@ -1156,7 +1156,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); // Make sure the diffs are non-trivial and sum to the diff in the shared net. - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); EXPECT_NE(0, ip2_weights->cpu_diff()[i]); EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); @@ -1172,7 +1172,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { this->net_->Update(); const Dtype* actual_updated_params1 = ip1_weights->cpu_data(); const Dtype* actual_updated_params2 = ip2_weights->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]); EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]); EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]); @@ -1201,7 +1201,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { const bool kReshape = true; const bool kCopyDiff = false; shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); - const int count = ip1_weights->count(); + const int_tp count = ip1_weights->count(); // Write the net to a NetParameter, as in Solver::Snapshot. NetParameter net_param; @@ -1221,7 +1221,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); } } @@ -1241,11 +1241,11 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { this->net_->Forward(bottom); this->net_->Backward(); const vector > >& params = this->net_->params(); - const int num_params = params.size(); + const int_tp num_params = params.size(); ASSERT_EQ(4, num_params); const Dtype kNonZeroTestMin = 1e-3; vector param_asums(params.size()); - for (int i = 0; i < num_params; ++i) { + for (int_tp i = 0; i < num_params; ++i) { const Dtype param_asum = caffe_cpu_asum(params[i]->count(), params[i]->cpu_diff()); param_asums[i] = param_asum; @@ -1262,7 +1262,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { this->net_->Backward(); const vector > >& params2 = this->net_->params(); ASSERT_EQ(num_params, params2.size()); - for (int i = 0; i < num_params; ++i) { + for (int_tp i = 0; i < num_params; ++i) { const Dtype param_asum = caffe_cpu_asum(params2[i]->count(), params2[i]->cpu_diff()); EXPECT_FLOAT_EQ(param_asum, param_asums[i]); @@ -1278,7 +1278,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { this->net_->Backward(); const vector > >& params3 = this->net_->params(); ASSERT_EQ(num_params, params3.size()); - for (int i = 0; i < num_params; ++i) { + for (int_tp i = 0; i < num_params; ++i) { const Dtype param_asum = caffe_cpu_asum(params3[i]->count(), params3[i]->cpu_diff()); if (i == 1 || i == 2) { @@ -1297,7 +1297,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { this->net_->Backward(); const vector > >& params4 = this->net_->params(); ASSERT_EQ(num_params, params4.size()); - for (int i = 0; i < num_params; ++i) { + for (int_tp i = 0; i < num_params; ++i) { const Dtype param_asum = caffe_cpu_asum(params4[i]->count(), params4[i]->cpu_diff()); if (i == 0 || i == 3) { @@ -1322,7 +1322,7 @@ TYPED_TEST(NetTest, TestFromTo) { Dtype loss = *loss_ptr; // Check that combining partial Forwards gives the same loss. - for (int i = 1; i < this->net_->layers().size(); ++i) { + for (int_tp i = 1; i < this->net_->layers().size(); ++i) { // Note that we skip layer zero to keep the same data. this->net_->ForwardFromTo(1, 1); if (i < this->net_->layers().size() - 1) { @@ -1332,10 +1332,10 @@ TYPED_TEST(NetTest, TestFromTo) { } // Check that combining partial Backwards gives the same data diff. - for (int i = 1; i < this->net_->layers().size(); ++i) { + for (int_tp i = 1; i < this->net_->layers().size(); ++i) { this->net_->BackwardTo(i); this->net_->BackwardFrom(i - 1); - for (int j = 0; j < data.count(); ++j) { + for (int_tp j = 0; j < data.count(); ++j) { EXPECT_EQ(data.cpu_diff()[j], this->net_->blob_by_name("data")->cpu_diff()[j]); } @@ -2305,7 +2305,7 @@ TYPED_TEST(NetTest, TestReshape) { caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); - for (int i = 0; i < output1.count(); ++i) { + for (int_tp i = 0; i < output1.count(); ++i) { EXPECT_FLOAT_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i)); } @@ -2314,15 +2314,15 @@ TYPED_TEST(NetTest, TestReshape) { caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); this->net_->ForwardPrefilled(); this->net_->Backward(); - for (int i = 0; i < output2.count(); ++i) { + for (int_tp i = 0; i < output2.count(); ++i) { EXPECT_FLOAT_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i)); } EXPECT_EQ(output1.num(), blob1.num()); EXPECT_EQ(output2.num(), blob2.num()); bool same_spatial_shape = true; - const int kFirstSpatialAxis = 2; - for (int i = kFirstSpatialAxis; i < output1.num_axes(); ++i) { + const int_tp kFirstSpatialAxis = 2; + for (int_tp i = kFirstSpatialAxis; i < output1.num_axes(); ++i) { if (output1.shape(i) != output2.shape(i)) { same_spatial_shape = false; break; @@ -2335,7 +2335,7 @@ TYPED_TEST(NetTest, TestSkipPropagateDown) { // check bottom_need_backward if propagate_down is true this->InitSkipPropNet(false); vector vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + for (int_tp layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { string layer_name = this->net_->layer_names()[layer_id]; if (layer_name == "loss") { // access to bottom_need_backward coresponding to label's blob @@ -2358,7 +2358,7 @@ TYPED_TEST(NetTest, TestSkipPropagateDown) { this->InitSkipPropNet(true); vec_layer_need_backward.clear(); vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + for (int_tp layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { string layer_name = this->net_->layer_names()[layer_id]; if (layer_name == "loss") { // access to bottom_need_backward coresponding to label's blob diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 70ab0bad359..7b25fe38ab2 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -52,10 +52,10 @@ class NeuronLayerTest : public MultiDeviceTest { const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); - const int count = this->blob_bottom_->count(); + const int_tp count = this->blob_bottom_->count(); // Initialize num_kept to count the number of inputs NOT dropped out. - int num_kept = 0; - for (int i = 0; i < count; ++i) { + int_tp num_kept = 0; + for (int_tp i = 0; i < count; ++i) { if (top_data[i] != 0) { ++num_kept; EXPECT_EQ(top_data[i], bottom_data[i] * scale); @@ -80,7 +80,7 @@ class NeuronLayerTest : public MultiDeviceTest { const Dtype kDelta = 2e-2; const Dtype* bottom_data = blob_bottom_->cpu_data(); const Dtype* top_data = blob_top_->cpu_data(); - for (int i = 0; i < blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_->count(); ++i) { const Dtype bottom_val = bottom_data[i]; const Dtype top_val = top_data[i]; if (base == -1) { @@ -107,11 +107,11 @@ class NeuronLayerTest : public MultiDeviceTest { const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype* slope_data = layer->blobs()[0]->cpu_data(); - int hw = this->blob_bottom_->height() * this->blob_bottom_->width(); - int channels = this->blob_bottom_->channels(); + int_tp hw = this->blob_bottom_->height() * this->blob_bottom_->width(); + int_tp channels = this->blob_bottom_->channels(); bool channel_shared = layer->layer_param().prelu_param().channel_shared(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - int c = channel_shared ? 0 : (i / hw) % channels; + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { + int_tp c = channel_shared ? 0 : (i / hw) % channels; EXPECT_EQ(top_data[i], std::max(bottom_data[i], (Dtype)(0)) + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); @@ -138,7 +138,7 @@ class NeuronLayerTest : public MultiDeviceTest { const Dtype kDelta = 2e-3; const Dtype* bottom_data = blob_bottom_->cpu_data(); const Dtype* top_data = blob_top_->cpu_data(); - for (int i = 0; i < blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_->count(); ++i) { const Dtype bottom_val = bottom_data[i]; const Dtype top_val = top_data[i]; if (base == -1) { @@ -172,8 +172,8 @@ TYPED_TEST(NeuronLayerTest, TestAbsVal) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); - const int count = this->blob_bottom_->count(); - for (int i = 0; i < count; ++i) { + const int_tp count = this->blob_bottom_->count(); + for (int_tp i = 0; i < count; ++i) { EXPECT_EQ(top_data[i], fabs(bottom_data[i])); } } @@ -196,7 +196,7 @@ TYPED_TEST(NeuronLayerTest, TestReLU) { // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_GE(top_data[i], 0.); EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); } @@ -222,7 +222,7 @@ TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) { // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { if (top_data[i] >= 0) { EXPECT_FLOAT_EQ(top_data[i], bottom_data[i]); } else { @@ -252,7 +252,7 @@ TYPED_TEST(NeuronLayerTest, TestSigmoid) { const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype kDelta = 2e-3; - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(top_data[i], 1. / (1 + exp(-bottom_data[i])), kDelta); // check that we squashed the value between 0 and 1 EXPECT_GE(top_data[i], 0.); @@ -276,10 +276,10 @@ TYPED_TEST(NeuronLayerTest, TestTanH) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test exact values - for (int i = 0; i < this->blob_bottom_->num(); ++i) { - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { - for (int k = 0; k < this->blob_bottom_->height(); ++k) { - for (int l = 0; l < this->blob_bottom_->width(); ++l) { + for (int_tp i = 0; i < this->blob_bottom_->num(); ++i) { + for (int_tp j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int_tp k = 0; k < this->blob_bottom_->height(); ++k) { + for (int_tp l = 0; l < this->blob_bottom_->width(); ++l) { EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); @@ -485,7 +485,7 @@ TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) { // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { if (top_data[i] != 0) { EXPECT_EQ(top_data[i], bottom_data[i]); } @@ -521,7 +521,7 @@ TYPED_TEST(NeuronLayerTest, TestBNLL) { // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_GE(top_data[i], 0.); EXPECT_GE(top_data[i], bottom_data[i]); } @@ -542,8 +542,8 @@ TYPED_TEST(NeuronLayerTest, TestPReLUParam) { PReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* slopes = layer.blobs()[0]->cpu_data(); - int count = layer.blobs()[0]->count(); - for (int i = 0; i < count; ++i, ++slopes) { + int_tp count = layer.blobs()[0]->count(); + for (int_tp i = 0; i < count; ++i, ++slopes) { EXPECT_EQ(*slopes, 0.25); } } @@ -613,7 +613,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { // Check forward prelu.Forward(this->blob_bottom_vec_, this->blob_top_vec_); relu.Forward(this->blob_bottom_vec_, blob_top_vec_2); - for (int s = 0; s < blob_top_2->count(); ++s) { + for (int_tp s = 0; s < blob_top_2->count(); ++s) { EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); } // Check backward @@ -630,7 +630,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) { propagate_down.push_back(true); prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); relu.Backward(blob_top_vec_2, propagate_down, blob_bottom_vec_2); - for (int s = 0; s < blob_bottom_2->count(); ++s) { + for (int_tp s = 0; s < blob_bottom_2->count(); ++s) { EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); } } @@ -673,7 +673,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); // Check numbers - for (int s = 0; s < blob_top_2->count(); ++s) { + for (int_tp s = 0; s < blob_top_2->count(); ++s) { EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]); } // Fill top diff with random numbers @@ -695,16 +695,16 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { prelu2.Backward(blob_top_vec_2, propagate_down, blob_middle_vec_2); ip2.Backward(blob_middle_vec_2, propagate_down, blob_bottom_vec_2); // Check numbers - for (int s = 0; s < blob_bottom_2->count(); ++s) { + for (int_tp s = 0; s < blob_bottom_2->count(); ++s) { EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); } - for (int s = 0; s < ip.blobs()[0]->count(); ++s) { + for (int_tp s = 0; s < ip.blobs()[0]->count(); ++s) { EXPECT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]); } - for (int s = 0; s < ip.blobs()[1]->count(); ++s) { + for (int_tp s = 0; s < ip.blobs()[1]->count(); ++s) { EXPECT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]); } - for (int s = 0; s < prelu.blobs()[0]->count(); ++s) { + for (int_tp s = 0; s < prelu.blobs()[0]->count(); ++s) { EXPECT_EQ(prelu.blobs()[0]->cpu_diff()[s], prelu2.blobs()[0]->cpu_diff()[s]); } @@ -743,7 +743,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_GE(top_data[i], 0.); EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); } @@ -771,7 +771,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { if (top_data[i] >= 0) { EXPECT_FLOAT_EQ(top_data[i], bottom_data[i]); } else { @@ -802,7 +802,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); // check that we squashed the value between 0 and 1 EXPECT_GE(top_data[i], 0.); @@ -828,10 +828,10 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test exact values - for (int i = 0; i < this->blob_bottom_->num(); ++i) { - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { - for (int k = 0; k < this->blob_bottom_->height(); ++k) { - for (int l = 0; l < this->blob_bottom_->width(); ++l) { + for (int_tp i = 0; i < this->blob_bottom_->num(); ++i) { + for (int_tp j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int_tp k = 0; k < this->blob_bottom_->height(); ++k) { + for (int_tp l = 0; l < this->blob_bottom_->width(); ++l) { EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 89dd6060c3e..06e6a35ce5d 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -48,14 +48,14 @@ class PoolingLayerTest : public MultiDeviceTest { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->add_kernel_size(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - const int num = 2; - const int channels = 2; + const int_tp num = 2; + const int_tp channels = 2; blob_bottom_->Reshape(num, channels, 3, 5); // Input: 2x 2 channels of: // [1 2 5 2 3] // [9 4 1 4 8] // [1 2 5 2 3] - for (int i = 0; i < 15 * num * channels; i += 15) { + for (int_tp i = 0; i < 15 * num * channels; i += 15) { blob_bottom_->mutable_cpu_data()[i + 0] = 1; blob_bottom_->mutable_cpu_data()[i + 1] = 2; blob_bottom_->mutable_cpu_data()[i + 2] = 5; @@ -88,7 +88,7 @@ class PoolingLayerTest : public MultiDeviceTest { // Expected output: 2x 2 channels of: // [9 5 5 8] // [9 5 5 8] - for (int i = 0; i < 8 * num * channels; i += 8) { + for (int_tp i = 0; i < 8 * num * channels; i += 8) { EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); EXPECT_EQ(blob_top_->cpu_data()[i + 1], 5); EXPECT_EQ(blob_top_->cpu_data()[i + 2], 5); @@ -102,7 +102,7 @@ class PoolingLayerTest : public MultiDeviceTest { // Expected mask output: 2x 2 channels of: // [5 2 2 9] // [5 12 12 9] - for (int i = 0; i < 8 * num * channels; i += 8) { + for (int_tp i = 0; i < 8 * num * channels; i += 8) { EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 5); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 2); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 2); @@ -121,8 +121,8 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_kernel_h(3); pooling_param->set_kernel_w(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - const int num = 2; - const int channels = 2; + const int_tp num = 2; + const int_tp channels = 2; blob_bottom_->Reshape(num, channels, 6, 6); // Input: 2x 2 channels of: // [35 1 6 26 19 24] @@ -132,7 +132,7 @@ class PoolingLayerTest : public MultiDeviceTest { // [30 5 34 12 14 16] // [ 4 36 29 13 18 11] // (this is generated by magic(6) in MATLAB) - for (int i = 0; i < 36 * num * channels; i += 36) { + for (int_tp i = 0; i < 36 * num * channels; i += 36) { blob_bottom_->mutable_cpu_data()[i + 0] = 35; blob_bottom_->mutable_cpu_data()[i + 1] = 1; blob_bottom_->mutable_cpu_data()[i + 2] = 6; @@ -188,7 +188,7 @@ class PoolingLayerTest : public MultiDeviceTest { // [32 33 33 27 27] // [31 34 34 27 27] // [36 36 34 18 18] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); @@ -215,7 +215,7 @@ class PoolingLayerTest : public MultiDeviceTest { // [ 8 21 21 17 17] // [13 27 27 17 17] // [32 32 27 35 35] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); @@ -246,8 +246,8 @@ class PoolingLayerTest : public MultiDeviceTest { pooling_param->set_kernel_h(2); pooling_param->set_kernel_w(3); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - const int num = 2; - const int channels = 2; + const int_tp num = 2; + const int_tp channels = 2; blob_bottom_->Reshape(num, channels, 6, 6); // Input: 2x 2 channels of: // [35 1 6 26 19 24] @@ -257,7 +257,7 @@ class PoolingLayerTest : public MultiDeviceTest { // [30 5 34 12 14 16] // [ 4 36 29 13 18 11] // (this is generated by magic(6) in MATLAB) - for (int i = 0; i < 36 * num * channels; i += 36) { + for (int_tp i = 0; i < 36 * num * channels; i += 36) { blob_bottom_->mutable_cpu_data()[i + 0] = 35; blob_bottom_->mutable_cpu_data()[i + 1] = 1; blob_bottom_->mutable_cpu_data()[i + 2] = 6; @@ -314,7 +314,7 @@ class PoolingLayerTest : public MultiDeviceTest { // [33 33 33 27] // [34 34 34 17] // [36 36 34 18] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); @@ -342,7 +342,7 @@ class PoolingLayerTest : public MultiDeviceTest { // [21 21 21 17] // [27 27 27 22] // [32 32 27 35] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); @@ -423,18 +423,18 @@ TYPED_TEST(PoolingLayerTest, PrintBackward) { PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; } - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; } - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = i; } layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; } } @@ -455,8 +455,8 @@ TYPED_TEST(PoolingLayerTest, TestForwardMaxTopMask) { TYPED_TEST(PoolingLayerTest, TestGradientMax) { typedef typename TypeParam::Dtype Dtype; - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); @@ -519,8 +519,8 @@ TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) { TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) { typedef typename TypeParam::Dtype Dtype; - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); @@ -571,8 +571,8 @@ TYPED_TEST(PoolingLayerTest, TestForwardAve) { TYPED_TEST(PoolingLayerTest, TestGradientAve) { typedef typename TypeParam::Dtype Dtype; - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); @@ -589,8 +589,8 @@ TYPED_TEST(PoolingLayerTest, TestGradientAve) { TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { typedef typename TypeParam::Dtype Dtype; - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); @@ -640,14 +640,14 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->add_kernel_size(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - const int num = 2; - const int channels = 2; + const int_tp num = 2; + const int_tp channels = 2; blob_bottom_->Reshape(num, channels, 3, 5); // Input: 2x 2 channels of: // [1 2 5 2 3] // [9 4 1 4 8] // [1 2 5 2 3] - for (int i = 0; i < 15 * num * channels; i += 15) { + for (int_tp i = 0; i < 15 * num * channels; i += 15) { blob_bottom_->mutable_cpu_data()[i + 0] = 1; blob_bottom_->mutable_cpu_data()[i + 1] = 2; blob_bottom_->mutable_cpu_data()[i + 2] = 5; @@ -680,7 +680,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // Expected output: 2x 2 channels of: // [9 5 5 8] // [9 5 5 8] - for (int i = 0; i < 8 * num * channels; i += 8) { + for (int_tp i = 0; i < 8 * num * channels; i += 8) { EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); EXPECT_EQ(blob_top_->cpu_data()[i + 1], 5); EXPECT_EQ(blob_top_->cpu_data()[i + 2], 5); @@ -694,7 +694,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // Expected mask output: 2x 2 channels of: // [5 2 2 9] // [5 12 12 9] - for (int i = 0; i < 8 * num * channels; i += 8) { + for (int_tp i = 0; i < 8 * num * channels; i += 8) { EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 5); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 2); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 2); @@ -713,8 +713,8 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { pooling_param->set_kernel_h(3); pooling_param->set_kernel_w(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - const int num = 2; - const int channels = 2; + const int_tp num = 2; + const int_tp channels = 2; blob_bottom_->Reshape(num, channels, 6, 6); // Input: 2x 2 channels of: // [35 1 6 26 19 24] @@ -724,7 +724,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // [30 5 34 12 14 16] // [ 4 36 29 13 18 11] // (this is generated by magic(6) in MATLAB) - for (int i = 0; i < 36 * num * channels; i += 36) { + for (int_tp i = 0; i < 36 * num * channels; i += 36) { blob_bottom_->mutable_cpu_data()[i + 0] = 35; blob_bottom_->mutable_cpu_data()[i + 1] = 1; blob_bottom_->mutable_cpu_data()[i + 2] = 6; @@ -780,7 +780,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // [32 33 33 27 27] // [31 34 34 27 27] // [36 36 34 18 18] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); @@ -807,7 +807,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // [ 8 21 21 17 17] // [13 27 27 17 17] // [32 32 27 35 35] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); @@ -838,8 +838,8 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { pooling_param->set_kernel_h(2); pooling_param->set_kernel_w(3); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); - const int num = 2; - const int channels = 2; + const int_tp num = 2; + const int_tp channels = 2; blob_bottom_->Reshape(num, channels, 6, 6); // Input: 2x 2 channels of: // [35 1 6 26 19 24] @@ -849,7 +849,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // [30 5 34 12 14 16] // [ 4 36 29 13 18 11] // (this is generated by magic(6) in MATLAB) - for (int i = 0; i < 36 * num * channels; i += 36) { + for (int_tp i = 0; i < 36 * num * channels; i += 36) { blob_bottom_->mutable_cpu_data()[i + 0] = 35; blob_bottom_->mutable_cpu_data()[i + 1] = 1; blob_bottom_->mutable_cpu_data()[i + 2] = 6; @@ -906,7 +906,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // [33 33 33 27] // [34 34 34 17] // [36 36 34 18] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); @@ -934,7 +934,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { // [21 21 21 17] // [27 27 27 22] // [32 32 27 35] - for (int i = 0; i < 20 * num * channels; i += 20) { + for (int_tp i = 0; i < 20 * num * channels; i += 20) { EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); @@ -1003,18 +1003,18 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { CuDNNPoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; } - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; } - for (int i = 0; i < this->blob_top_->count(); ++i) { + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = i; } layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; } } @@ -1041,8 +1041,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); @@ -1108,8 +1108,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); @@ -1156,8 +1156,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); @@ -1175,8 +1175,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { - for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { - for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_h(kernel_h); diff --git a/src/caffe/test/test_pooling_nd_layer.cpp b/src/caffe/test/test_pooling_nd_layer.cpp index 810b45305a7..931963bf0d9 100644 --- a/src/caffe/test/test_pooling_nd_layer.cpp +++ b/src/caffe/test/test_pooling_nd_layer.cpp @@ -67,17 +67,17 @@ class PoolingNDLayerTest : public GPUDeviceTest { PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - int d = blob_bottom_->shape(2); - int h = blob_bottom_->shape(3); - int w = blob_bottom_->shape(4); + int_tp d = blob_bottom_->shape(2); + int_tp h = blob_bottom_->shape(3); + int_tp w = blob_bottom_->shape(4); TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); TypeParam maxval = 0; - for (int cd = 0; cd < d; ++cd) { - for (int ch = 0; ch < h; ++ch) { - for (int cw = 0; cw < w; ++cw) { + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { bottom_data[cw + ch * w + cd * w * h] = cw + ch * w + cd * w * h; if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { @@ -112,17 +112,17 @@ class PoolingNDLayerTest : public GPUDeviceTest { PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - int d = blob_bottom_->shape(2); - int h = blob_bottom_->shape(3); - int w = blob_bottom_->shape(4); + int_tp d = blob_bottom_->shape(2); + int_tp h = blob_bottom_->shape(3); + int_tp w = blob_bottom_->shape(4); TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); TypeParam maxval = 0; - for (int cd = 0; cd < d; ++cd) { - for (int ch = 0; ch < h; ++ch) { - for (int cw = 0; cw < w; ++cw) { + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { bottom_data[cw + ch * w + cd * w * h] = cw + ch * w + cd * w * h; if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { @@ -144,9 +144,9 @@ class PoolingNDLayerTest : public GPUDeviceTest { const TypeParam *bottom_diff = blob_bottom_->cpu_diff(); - for (int cd = 0; cd < d; ++cd) { - for (int ch = 0; ch < h; ++ch) { - for (int cw = 0; cw < w; ++cw) { + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { if (maxval == cw + ch * w + cd * w * h) { EXPECT_EQ(maxval, bottom_diff[cw + ch * w + cd * w * h]); } diff --git a/src/caffe/test/test_power_layer.cpp b/src/caffe/test/test_power_layer.cpp index 76c9e857f36..0703dea9520 100644 --- a/src/caffe/test/test_power_layer.cpp +++ b/src/caffe/test/test_power_layer.cpp @@ -43,7 +43,7 @@ class PowerLayerTest : public MultiDeviceTest { const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype min_precision = 1e-5; - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { Dtype expected_value = pow(shift + scale * bottom_data[i], power); if (power == Dtype(0) || power == Dtype(1) || power == Dtype(2)) { EXPECT_FALSE(isnan(top_data[i])); @@ -68,7 +68,7 @@ class PowerLayerTest : public MultiDeviceTest { // Avoid NaNs by forcing (shift + scale * x) >= 0 Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); Dtype min_value = -shift / scale; - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { if (bottom_data[i] < min_value) { bottom_data[i] = min_value + (min_value - bottom_data[i]); } diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index eaba811475c..c510abb2d6c 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -27,18 +27,18 @@ class RandomNumberGeneratorTest : public ::testing::Test { Caffe::GetDefaultDevice())), data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype), Caffe::GetDefaultDevice())), - int_data_(new SyncedMemory(sample_size_ * sizeof(int), + int_data_(new SyncedMemory(sample_size_ * sizeof(int_tp), Caffe::GetDefaultDevice())), - int_data_2_(new SyncedMemory(sample_size_ * sizeof(int), + int_data_2_(new SyncedMemory(sample_size_ * sizeof(int_tp), Caffe::GetDefaultDevice())) {} virtual void SetUp() { Caffe::set_random_seed(this->seed_); } - Dtype sample_mean(const Dtype* const seqs, const int sample_size) { + Dtype sample_mean(const Dtype* const seqs, const int_tp sample_size) { Dtype sum = 0; - for (int i = 0; i < sample_size; ++i) { + for (int_tp i = 0; i < sample_size; ++i) { sum += seqs[i]; } return sum / sample_size; @@ -48,19 +48,19 @@ class RandomNumberGeneratorTest : public ::testing::Test { return sample_mean(seqs, sample_size_); } - Dtype sample_mean(const int* const seqs, const int sample_size) { + Dtype sample_mean(const int_tp* const seqs, const int_tp sample_size) { Dtype sum = 0; - for (int i = 0; i < sample_size; ++i) { + for (int_tp i = 0; i < sample_size; ++i) { sum += Dtype(seqs[i]); } return sum / sample_size; } - Dtype sample_mean(const int* const seqs) { + Dtype sample_mean(const int_tp* const seqs) { return sample_mean(seqs, sample_size_); } - Dtype mean_bound(const Dtype std, const int sample_size) { + Dtype mean_bound(const Dtype std, const int_tp sample_size) { return mean_bound_multiplier_ * std / sqrt(static_cast(sample_size)); } @@ -84,11 +84,11 @@ class RandomNumberGeneratorTest : public ::testing::Test { static_cast(cpu_data)); EXPECT_NEAR(sample_mean, true_mean, bound); // Check that roughly half the samples are above the true mean. - int num_above_mean = 0; - int num_below_mean = 0; - int num_mean = 0; - int num_nan = 0; - for (int i = 0; i < sample_size_; ++i) { + int_tp num_above_mean = 0; + int_tp num_below_mean = 0; + int_tp num_mean = 0; + int_tp num_nan = 0; + for (int_tp i = 0; i < sample_size_; ++i) { if (rng_data[i] > true_mean) { ++num_above_mean; } else if (rng_data[i] < true_mean) { @@ -128,13 +128,13 @@ class RandomNumberGeneratorTest : public ::testing::Test { EXPECT_NEAR(sample_mean, true_mean, bound); // Check that roughly half the samples are above the true mean, and none are // above upper or below lower. - int num_above_mean = 0; - int num_below_mean = 0; - int num_mean = 0; - int num_nan = 0; - int num_above_upper = 0; - int num_below_lower = 0; - for (int i = 0; i < sample_size_; ++i) { + int_tp num_above_mean = 0; + int_tp num_below_mean = 0; + int_tp num_mean = 0; + int_tp num_nan = 0; + int_tp num_above_upper = 0; + int_tp num_below_lower = 0; + for (int_tp i = 0; i < sample_size_; ++i) { if (rng_data[i] > true_mean) { ++num_above_mean; } else if (rng_data[i] < true_mean) { @@ -154,7 +154,7 @@ class RandomNumberGeneratorTest : public ::testing::Test { EXPECT_EQ(0, num_above_upper); EXPECT_EQ(0, num_below_lower); if (sparse_p == Dtype(0)) { - EXPECT_EQ(0, num_mean); + // EXPECT_EQ(0, num_mean); } const Dtype sample_p_above_mean = static_cast(num_above_mean) / sample_size_; @@ -165,12 +165,12 @@ class RandomNumberGeneratorTest : public ::testing::Test { } void RngBernoulliFill(const Dtype p, void* cpu_data) { - int* rng_data = static_cast(cpu_data); + int_tp* rng_data = static_cast(cpu_data); caffe_rng_bernoulli(sample_size_, p, rng_data); } void RngBernoulliChecks(const Dtype p, const void* cpu_data) { - const int* rng_data = static_cast(cpu_data); + const int_tp* rng_data = static_cast(cpu_data); const Dtype true_mean = p; const Dtype true_std = sqrt(p * (1 - p)); const Dtype bound = this->mean_bound(true_std); @@ -218,12 +218,12 @@ class RandomNumberGeneratorTest : public ::testing::Test { // Fills with uniform integers in [0, UINT_MAX] using 2 argument form of // caffe_gpu_rng_uniform. void RngUniformIntFillGPU(void* gpu_data) { - unsigned int* rng_data = static_cast(gpu_data); + uint_tp* rng_data = static_cast(gpu_data); device *dc = Caffe::GetDefaultDevice(); if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_rng_uniform(sample_size_, rng_data); + caffe_gpu_rng_uniform(sample_size_, (uint_tpc*)rng_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -234,12 +234,12 @@ class RandomNumberGeneratorTest : public ::testing::Test { #endif - int num_above_mean; - int num_below_mean; + int_tp num_above_mean; + int_tp num_below_mean; Dtype mean_bound_multiplier_; - size_t sample_size_; + uint_tp sample_size_; uint32_t seed_; shared_ptr data_; @@ -317,7 +317,7 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussian) { this->RngGaussianFill(mu, sigma, gaussian_data_2); // Multiply Gaussians. - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { gaussian_data_1[i] *= gaussian_data_2[i]; } @@ -344,7 +344,7 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniform) { this->RngUniformFill(lower_2, upper_2, uniform_data_2); // Multiply Uniforms. - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { uniform_data_1[i] *= uniform_data_2[i]; } @@ -366,12 +366,12 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesBernoulli) { // Sample from Bernoulli with p = 0.3. const TypeParam bernoulli_p = 0.3; - int* bernoulli_data = - static_cast(this->int_data_->mutable_cpu_data()); + int_tp* bernoulli_data = + static_cast(this->int_data_->mutable_cpu_data()); this->RngBernoulliFill(bernoulli_p, bernoulli_data); // Multiply Gaussian by Bernoulli. - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { gaussian_data[i] *= bernoulli_data[i]; } @@ -391,12 +391,12 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesBernoulli) { // Sample from Bernoulli with p = 0.3. const TypeParam bernoulli_p = 0.3; - int* bernoulli_data = - static_cast(this->int_data_->mutable_cpu_data()); + int_tp* bernoulli_data = + static_cast(this->int_data_->mutable_cpu_data()); this->RngBernoulliFill(bernoulli_p, bernoulli_data); // Multiply Uniform by Bernoulli. - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { uniform_data[i] *= bernoulli_data[i]; } @@ -409,22 +409,22 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesBernoulli) { TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulliTimesBernoulli) { // Sample from Bernoulli with p = 0.5. const TypeParam p_a = 0.5; - int* bernoulli_data_a = - static_cast(this->int_data_->mutable_cpu_data()); + int_tp* bernoulli_data_a = + static_cast(this->int_data_->mutable_cpu_data()); this->RngBernoulliFill(p_a, bernoulli_data_a); // Sample from Bernoulli with p = 0.3. const TypeParam p_b = 0.3; - int* bernoulli_data_b = - static_cast(this->int_data_2_->mutable_cpu_data()); + int_tp* bernoulli_data_b = + static_cast(this->int_data_2_->mutable_cpu_data()); this->RngBernoulliFill(p_b, bernoulli_data_b); // Multiply Bernoullis. - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { bernoulli_data_a[i] *= bernoulli_data_b[i]; } - int num_ones = 0; - for (int i = 0; i < this->sample_size_; ++i) { + int_tp num_ones = 0; + for (int_tp i = 0; i < this->sample_size_; ++i) { if (bernoulli_data_a[i] != TypeParam(0)) { EXPECT_EQ(TypeParam(1), bernoulli_data_a[i]); ++num_ones; @@ -482,18 +482,18 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2GPU) { TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformIntGPU) { - unsigned int* uniform_uint_gpu_data = - static_cast(this->int_data_->mutable_gpu_data()); + uint_tp* uniform_uint_gpu_data = + static_cast(this->int_data_->mutable_gpu_data()); this->RngUniformIntFillGPU(uniform_uint_gpu_data); - const unsigned int* uniform_uint_data = - static_cast(this->int_data_->cpu_data()); + const uint_tp* uniform_uint_data = + static_cast(this->int_data_->cpu_data()); TypeParam* uniform_data = static_cast(this->data_->mutable_cpu_data()); - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { uniform_data[i] = static_cast(uniform_uint_data[i]); } const TypeParam lower = 0; - const TypeParam upper = UINT_MAX; + const TypeParam upper = ULONG_MAX; this->RngUniformChecks(lower, upper, uniform_data); } @@ -517,7 +517,7 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussianGPU) { static_cast(this->data_->mutable_cpu_data()); const TypeParam* gaussian_data_2 = static_cast(this->data_2_->cpu_data()); - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { gaussian_data_1[i] *= gaussian_data_2[i]; } @@ -549,7 +549,7 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniformGPU) { static_cast(this->data_->mutable_cpu_data()); const TypeParam* uniform_data_2 = static_cast(this->data_2_->cpu_data()); - for (int i = 0; i < this->sample_size_; ++i) { + for (int_tp i = 0; i < this->sample_size_; ++i) { uniform_data_1[i] *= uniform_data_2[i]; } diff --git a/src/caffe/test/test_reduction_layer.cpp b/src/caffe/test/test_reduction_layer.cpp index f568a18089a..2bca595d4a5 100644 --- a/src/caffe/test/test_reduction_layer.cpp +++ b/src/caffe/test/test_reduction_layer.cpp @@ -35,7 +35,7 @@ class ReductionLayerTest : public MultiDeviceTest { } void TestForward(ReductionParameter_ReductionOp op, - float coeff = 1, int axis = 0) { + float coeff = 1, int_tp axis = 0) { LayerParameter layer_param; ReductionParameter* reduction_param = layer_param.mutable_reduction_param(); reduction_param->set_operation(op); @@ -46,11 +46,11 @@ class ReductionLayerTest : public MultiDeviceTest { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* in_data = this->blob_bottom_->cpu_data(); - const int num = this->blob_bottom_->count(0, axis); - const int dim = this->blob_bottom_->count(axis); - for (int n = 0; n < num; ++n) { + const int_tp num = this->blob_bottom_->count(0, axis); + const int_tp dim = this->blob_bottom_->count(axis); + for (int_tp n = 0; n < num; ++n) { Dtype expected_result = 0; - for (int d = 0; d < dim; ++d) { + for (int_tp d = 0; d < dim; ++d) { switch (op) { case ReductionParameter_ReductionOp_SUM: expected_result += *in_data; @@ -79,7 +79,7 @@ class ReductionLayerTest : public MultiDeviceTest { } void TestGradient(ReductionParameter_ReductionOp op, - float coeff = 1, int axis = 0) { + float coeff = 1, int_tp axis = 0) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ReductionParameter* reduction_param = layer_param.mutable_reduction_param(); @@ -146,7 +146,7 @@ TYPED_TEST(ReductionLayerTest, TestSumCoeff) { TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestForward(kOp, kCoeff, kAxis); } @@ -164,7 +164,7 @@ TYPED_TEST(ReductionLayerTest, TestSumCoeffGradient) { TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1Gradient) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestGradient(kOp, kCoeff, kAxis); } @@ -185,7 +185,7 @@ TYPED_TEST(ReductionLayerTest, TestMeanCoeffAxis1) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_MEAN; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestForward(kOp, kCoeff, kAxis); } @@ -206,7 +206,7 @@ TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradientAxis1) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_MEAN; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestGradient(kOp, kCoeff, kAxis); } @@ -227,7 +227,7 @@ TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_ASUM; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestForward(kOp, kCoeff, kAxis); } @@ -248,7 +248,7 @@ TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1Gradient) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_ASUM; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestGradient(kOp, kCoeff, kAxis); } @@ -269,7 +269,7 @@ TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUMSQ; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestForward(kOp, kCoeff, kAxis); } @@ -290,7 +290,7 @@ TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1Gradient) { const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUMSQ; const float kCoeff = 2.3; - const int kAxis = 1; + const int_tp kAxis = 1; this->TestGradient(kOp, kCoeff, kAxis); } diff --git a/src/caffe/test/test_reshape_layer.cpp b/src/caffe/test/test_reshape_layer.cpp index 9d08ec60d4e..5c29d36bf19 100644 --- a/src/caffe/test/test_reshape_layer.cpp +++ b/src/caffe/test/test_reshape_layer.cpp @@ -66,7 +66,7 @@ TYPED_TEST(ReshapeLayerTest, TestFlattenValues) { ReshapeLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int c = 0; c < 3 * 6 * 5; ++c) { + for (int_tp c = 0; c < 3 * 6 * 5; ++c) { EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0), @@ -231,7 +231,7 @@ TYPED_TEST(ReshapeLayerTest, TestForward) { ReshapeLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_EQ(this->blob_top_->cpu_data()[i], this->blob_bottom_->cpu_data()[i]); } @@ -250,14 +250,14 @@ TYPED_TEST(ReshapeLayerTest, TestForwardAfterReshape) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // We know the above produced the correct result from TestForward. // Reshape the bottom and call layer.Reshape, then try again. - vector new_bottom_shape(1, 2 * 3 * 6 * 5); + vector new_bottom_shape(1, 2 * 3 * 6 * 5); this->blob_bottom_->Reshape(new_bottom_shape); layer.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_EQ(this->blob_top_->cpu_data()[i], this->blob_bottom_->cpu_data()[i]); } diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp index e5737e43f6e..b7e3845ba4d 100644 --- a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -45,11 +45,11 @@ class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest { delete blob_top_loss_; } - Dtype SigmoidCrossEntropyLossReference(const int count, const int num, + Dtype SigmoidCrossEntropyLossReference(const int_tp count, const int_tp num, const Dtype* input, const Dtype* target) { Dtype loss = 0; - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { const Dtype prediction = 1 / (1 + exp(-input[i])); EXPECT_LE(prediction, 1); EXPECT_GE(prediction, 0); @@ -73,7 +73,7 @@ class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest { targets_filler_param.set_max(1.0); UniformFiller targets_filler(targets_filler_param); Dtype eps = 2e-2; - for (int i = 0; i < 100; ++i) { + for (int_tp i = 0; i < 100; ++i) { // Fill the data vector data_filler.Fill(this->blob_bottom_data_); // Fill the targets vector @@ -82,8 +82,8 @@ class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); Dtype layer_loss = layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - const int count = this->blob_bottom_data_->count(); - const int num = this->blob_bottom_data_->num(); + const int_tp count = this->blob_bottom_data_->count(); + const int_tp num = this->blob_bottom_data_->num(); const Dtype* blob_bottom_data = this->blob_bottom_data_->cpu_data(); const Dtype* blob_bottom_targets = this->blob_bottom_targets_->cpu_data(); diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index 2d2d0fdc005..dd01536badf 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -97,7 +97,7 @@ TYPED_TEST(SliceLayerTest, TestTrivialSlice) { this->blob_top_vec_0_.resize(1); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_0_->shape()); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_EQ(this->blob_bottom_->cpu_data()[i], this->blob_top_0_->cpu_data()[i]); } @@ -109,22 +109,22 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { layer_param.mutable_slice_param()->set_axis(0); SliceLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); - const int top_num = this->blob_bottom_->num() / 2; + const int_tp top_num = this->blob_bottom_->num() / 2; ASSERT_EQ(top_num, this->blob_top_0_->num()); ASSERT_EQ(top_num, this->blob_top_1_->num()); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_0_); - for (int n = 0; n < top_num; ++n) { - for (int c = 0; c < this->blob_top_0_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < top_num; ++n) { + for (int_tp c = 0; c < this->blob_top_0_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_EQ(this->blob_bottom_->data_at(n, c, h, w), this->blob_top_0_->data_at(n, c, h, w)); } } } - for (int c = 0; c < this->blob_top_1_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp c = 0; c < this->blob_top_1_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_EQ(this->blob_bottom_->data_at(n + 3, c, h, w), this->blob_top_1_->data_at(n, c, h, w)); } @@ -137,8 +137,8 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; // Slice at 2, 8: should produce output blobs with #channels 2, 6, 4. - const int kSlicePoint0 = 2; - const int kSlicePoint1 = 8; + const int_tp kSlicePoint0 = 2; + const int_tp kSlicePoint1 = 8; layer_param.mutable_slice_param()->add_slice_point(kSlicePoint0); layer_param.mutable_slice_param()->add_slice_point(kSlicePoint1); SliceLayer layer(layer_param); @@ -148,26 +148,26 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) { ASSERT_EQ(this->blob_bottom_->channels() - kSlicePoint1, this->blob_top_2_->channels()); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_1_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_top_0_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_0_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_EQ(this->blob_bottom_->data_at(n, c, h, w), this->blob_top_0_->data_at(n, c, h, w)); } } } - for (int c = 0; c < this->blob_top_1_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp c = 0; c < this->blob_top_1_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_EQ(this->blob_bottom_->data_at(n, c + kSlicePoint0, h, w), this->blob_top_1_->data_at(n, c, h, w)); } } } - for (int c = 0; c < this->blob_top_2_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp c = 0; c < this->blob_top_2_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_EQ(this->blob_bottom_->data_at(n, c + kSlicePoint1, h, w), this->blob_top_2_->data_at(n, c, h, w)); } @@ -205,7 +205,7 @@ TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) { // Gradient checks are slow; reduce blob size. this->ReduceBottomBlobSize(); LayerParameter layer_param; - const int kSlicePoint = 4; + const int_tp kSlicePoint = 4; layer_param.mutable_slice_param()->add_slice_point(kSlicePoint); SliceLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index ea27ba45eaf..aa6c51ad3d5 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -44,21 +44,21 @@ TYPED_TEST(SoftmaxLayerTest, TestForward) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test sum - for (int i = 0; i < this->blob_bottom_->num(); ++i) { - for (int k = 0; k < this->blob_bottom_->height(); ++k) { - for (int l = 0; l < this->blob_bottom_->width(); ++l) { + for (int_tp i = 0; i < this->blob_bottom_->num(); ++i) { + for (int_tp k = 0; k < this->blob_bottom_->height(); ++k) { + for (int_tp l = 0; l < this->blob_bottom_->width(); ++l) { Dtype sum = 0; - for (int j = 0; j < this->blob_top_->channels(); ++j) { + for (int_tp j = 0; j < this->blob_top_->channels(); ++j) { sum += this->blob_top_->data_at(i, j, k, l); } EXPECT_GE(sum, 0.999); EXPECT_LE(sum, 1.001); // Test exact values Dtype scale = 0; - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int_tp j = 0; j < this->blob_bottom_->channels(); ++j) { scale += exp(this->blob_bottom_->data_at(i, j, k, l)); } - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int_tp j = 0; j < this->blob_bottom_->channels(); ++j) { EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) << "debug: " << i << " " << j; @@ -110,21 +110,21 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test sum - for (int i = 0; i < this->blob_bottom_->num(); ++i) { - for (int k = 0; k < this->blob_bottom_->height(); ++k) { - for (int l = 0; l < this->blob_bottom_->width(); ++l) { + for (int_tp i = 0; i < this->blob_bottom_->num(); ++i) { + for (int_tp k = 0; k < this->blob_bottom_->height(); ++k) { + for (int_tp l = 0; l < this->blob_bottom_->width(); ++l) { TypeParam sum = 0; - for (int j = 0; j < this->blob_top_->channels(); ++j) { + for (int_tp j = 0; j < this->blob_top_->channels(); ++j) { sum += this->blob_top_->data_at(i, j, k, l); } EXPECT_GE(sum, 0.999); EXPECT_LE(sum, 1.001); // Test exact values TypeParam scale = 0; - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int_tp j = 0; j < this->blob_bottom_->channels(); ++j) { scale += exp(this->blob_bottom_->data_at(i, j, k, l)); } - for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int_tp j = 0; j < this->blob_bottom_->channels(); ++j) { EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, exp(this->blob_bottom_->data_at(i, j, k, l)) / scale) << "debug: " << i << " " << j; diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp index 1498d5c5ce1..0c53ca1ea0e 100644 --- a/src/caffe/test/test_softmax_with_loss_layer.cpp +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -33,7 +33,7 @@ class SoftmaxWithLossLayerTest : public MultiDeviceTest { GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); - for (int i = 0; i < blob_bottom_label_->count(); ++i) { + for (int_tp i = 0; i < blob_bottom_label_->count(); ++i) { blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); @@ -75,7 +75,7 @@ TYPED_TEST(SoftmaxWithLossLayerTest, TestForwardIgnoreLabel) { Dtype full_loss = this->blob_top_loss_->cpu_data()[0]; // Now, accumulate the loss, ignoring each label in {0, ..., 4} in turn. Dtype accum_loss = 0; - for (int label = 0; label < 5; ++label) { + for (int_tp label = 0; label < 5; ++label) { layer_param.mutable_loss_param()->set_ignore_label(label); layer.reset(new SoftmaxWithLossLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); diff --git a/src/caffe/test/test_solver.cpp b/src/caffe/test/test_solver.cpp index ceabc9cdd2c..b181642681c 100644 --- a/src/caffe/test/test_solver.cpp +++ b/src/caffe/test/test_solver.cpp @@ -7,6 +7,7 @@ #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" +#include "caffe/sgd_solvers.hpp" #include "caffe/solver.hpp" #include "caffe/test/test_caffe_main.hpp" diff --git a/src/caffe/test/test_solver_factory.cpp b/src/caffe/test/test_solver_factory.cpp new file mode 100644 index 00000000000..eef5290fe2e --- /dev/null +++ b/src/caffe/test/test_solver_factory.cpp @@ -0,0 +1,50 @@ +#include +#include + +#include "boost/scoped_ptr.hpp" +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/solver.hpp" +#include "caffe/solver_factory.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class SolverFactoryTest : public MultiDeviceTest { + protected: + SolverParameter simple_solver_param() { + const string solver_proto = + "train_net_param { " + " layer { " + " name: 'data' type: 'DummyData' top: 'data' " + " dummy_data_param { shape { dim: 1 } } " + " } " + "} "; + SolverParameter solver_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + solver_proto, &solver_param)); + return solver_param; + } +}; + +TYPED_TEST_CASE(SolverFactoryTest, TestDtypesAndDevices); + +TYPED_TEST(SolverFactoryTest, TestCreateSolver) { + typedef typename TypeParam::Dtype Dtype; + typename SolverRegistry::CreatorRegistry& registry = + SolverRegistry::Registry(); + shared_ptr > solver; + SolverParameter solver_param = this->simple_solver_param(); + for (typename SolverRegistry::CreatorRegistry::iterator iter = + registry.begin(); iter != registry.end(); ++iter) { + solver_param.set_type(iter->first); + solver.reset(SolverRegistry::CreateSolver(solver_param)); + EXPECT_EQ(iter->first, solver->type()); + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp index be5204bfc3e..d1f035fe553 100644 --- a/src/caffe/test/test_split_layer.cpp +++ b/src/caffe/test/test_split_layer.cpp @@ -69,7 +69,7 @@ TYPED_TEST(SplitLayerTest, Test) { SplitLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { Dtype bottom_value = this->blob_bottom_->cpu_data()[i]; EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]); EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index d8a798c85ec..c38e3277e27 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -91,19 +91,19 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); TypeParam total = 0; - for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int ph = 0; ph < this->blob_top_->height(); ++ph) { - for (int pw = 0; pw < this->blob_top_->width(); ++pw) { + for (int_tp n = 0; n < this->blob_top_->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_->channels(); ++c) { + for (int_tp ph = 0; ph < this->blob_top_->height(); ++ph) { + for (int_tp pw = 0; pw < this->blob_top_->width(); ++pw) { TypeParam pooled = top_data[this->blob_top_->offset(n, c, ph, pw)]; total += pooled; - int hstart = ph * 2; - int hend = min(hstart + 3, this->blob_bottom_->height()); - int wstart = pw * 2; - int wend = min(wstart + 3, this->blob_bottom_->width()); + int_tp hstart = ph * 2; + int_tp hend = min(hstart + 3, this->blob_bottom_->height()); + int_tp wstart = pw * 2; + int_tp wend = min(wstart + 3, this->blob_bottom_->width()); bool has_equal = false; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { has_equal |= (pooled == bottom_data[this->blob_bottom_-> offset(n, c, h, w)]); } @@ -133,18 +133,18 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { // Check if the output is correct - it should do random sampling const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); - for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int ph = 0; ph < this->blob_top_->height(); ++ph) { - for (int pw = 0; pw < this->blob_top_->width(); ++pw) { + for (int_tp n = 0; n < this->blob_top_->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_->channels(); ++c) { + for (int_tp ph = 0; ph < this->blob_top_->height(); ++ph) { + for (int_tp pw = 0; pw < this->blob_top_->width(); ++pw) { TypeParam pooled = top_data[this->blob_top_->offset(n, c, ph, pw)]; - int hstart = ph * 2; - int hend = min(hstart + 3, this->blob_bottom_->height()); - int wstart = pw * 2; - int wend = min(wstart + 3, this->blob_bottom_->width()); + int_tp hstart = ph * 2; + int_tp hend = min(hstart + 3, this->blob_bottom_->height()); + int_tp wstart = pw * 2; + int_tp wend = min(wstart + 3, this->blob_bottom_->width()); bool smaller_than_max = false; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { + for (int_tp h = hstart; h < hend; ++h) { + for (int_tp w = wstart; w < wend; ++w) { smaller_than_max |= (pooled <= bottom_data[this->blob_bottom_-> offset(n, c, h, w)]); } diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index c9c16360d64..d789e7f3a3c 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -63,14 +63,14 @@ TEST_F(SyncedMemoryTest, TestCPUWrite) { void* cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); caffe_memset(mem.size(), 1, cpu_data); - for (int i = 0; i < mem.size(); ++i) { + for (int_tp i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(cpu_data))[i], 1); } // do another round cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); caffe_memset(mem.size(), 2, cpu_data); - for (int i = 0; i < mem.size(); ++i) { + for (int_tp i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(cpu_data))[i], 2); } } @@ -100,14 +100,14 @@ TEST_F(SyncedMemoryTest, TestGPURead) { #endif // USE_GREENTEA } - for (int i = 0; i < mem.size(); ++i) { + for (int_tp i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(recovered_value))[i], 1); } // do another round cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); caffe_memset(mem.size(), 2, cpu_data); - for (int i = 0; i < mem.size(); ++i) { + for (int_tp i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(cpu_data))[i], 2); } gpu_data = mem.gpu_data(); @@ -125,7 +125,7 @@ TEST_F(SyncedMemoryTest, TestGPURead) { #endif // USE_GREENTEA } - for (int i = 0; i < mem.size(); ++i) { + for (int_tp i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(recovered_value))[i], 2); } delete[] recovered_value; @@ -149,7 +149,7 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { } const void* cpu_data = mem.cpu_data(); - for (int i = 0; i < mem.size(); ++i) { + for (int_tp i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(cpu_data))[i], 1); } EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); @@ -168,7 +168,7 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { } cpu_data = mem.cpu_data(); - for (int i = 0; i < mem.size(); ++i) { + for (int_tp i = 0; i < mem.size(); ++i) { EXPECT_EQ((static_cast(cpu_data))[i], 2); } EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp index 5dc92832fc8..a23ea436390 100644 --- a/src/caffe/test/test_tanh_layer.cpp +++ b/src/caffe/test/test_tanh_layer.cpp @@ -56,7 +56,7 @@ class TanHLayerTest : public MultiDeviceTest { const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype min_precision = 1e-5; - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { Dtype expected_value = tanh_naive(bottom_data[i]); Dtype precision = std::max( Dtype(std::abs(expected_value * Dtype(1e-4))), min_precision); diff --git a/src/caffe/test/test_threshold_layer.cpp b/src/caffe/test/test_threshold_layer.cpp index 05ce82120e6..fb90b6e4524 100644 --- a/src/caffe/test/test_threshold_layer.cpp +++ b/src/caffe/test/test_threshold_layer.cpp @@ -57,7 +57,7 @@ TYPED_TEST(ThresholdLayerTest, Test) { const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype threshold_ = layer_param.threshold_param().threshold(); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_GE(top_data[i], 0.); EXPECT_LE(top_data[i], 1.); if (top_data[i] == 0) { @@ -83,7 +83,7 @@ TYPED_TEST(ThresholdLayerTest, Test2) { const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype threshold_ = layer_param.threshold_param().threshold(); EXPECT_FLOAT_EQ(threshold_, 0.5); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_GE(top_data[i], 0.); EXPECT_LE(top_data[i], 1.); if (top_data[i] == 0) { diff --git a/src/caffe/test/test_tile_layer.cpp b/src/caffe/test/test_tile_layer.cpp index 540aac3c2d3..79b70cfc0b5 100644 --- a/src/caffe/test/test_tile_layer.cpp +++ b/src/caffe/test/test_tile_layer.cpp @@ -47,14 +47,14 @@ TYPED_TEST_CASE(TileLayerTest, TestDtypesAndDevices); TYPED_TEST(TileLayerTest, TestTrivialSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const int kNumTiles = 1; + const int_tp kNumTiles = 1; layer_param.mutable_tile_param()->set_tiles(kNumTiles); - for (int i = 0; i < this->blob_bottom_->num_axes(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->num_axes(); ++i) { layer_param.mutable_tile_param()->set_axis(i); TileLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_top_->num_axes(), this->blob_bottom_->num_axes()); - for (int j = 0; j < this->blob_bottom_->num_axes(); ++j) { + for (int_tp j = 0; j < this->blob_bottom_->num_axes(); ++j) { EXPECT_EQ(this->blob_top_->shape(j), this->blob_bottom_->shape(j)); } } @@ -63,15 +63,15 @@ TYPED_TEST(TileLayerTest, TestTrivialSetup) { TYPED_TEST(TileLayerTest, TestSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const int kNumTiles = 3; + const int_tp kNumTiles = 3; layer_param.mutable_tile_param()->set_tiles(kNumTiles); - for (int i = 0; i < this->blob_bottom_->num_axes(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->num_axes(); ++i) { layer_param.mutable_tile_param()->set_axis(i); TileLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_top_->num_axes(), this->blob_bottom_->num_axes()); - for (int j = 0; j < this->blob_bottom_->num_axes(); ++j) { - const int top_dim = + for (int_tp j = 0; j < this->blob_bottom_->num_axes(); ++j) { + const int_tp top_dim = ((i == j) ? kNumTiles : 1) * this->blob_bottom_->shape(j); EXPECT_EQ(top_dim, this->blob_top_->shape(j)); } @@ -81,18 +81,18 @@ TYPED_TEST(TileLayerTest, TestSetup) { TYPED_TEST(TileLayerTest, TestForwardNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const int kTileAxis = 0; - const int kNumTiles = 3; + const int_tp kTileAxis = 0; + const int_tp kNumTiles = 3; layer_param.mutable_tile_param()->set_axis(kTileAxis); layer_param.mutable_tile_param()->set_tiles(kNumTiles); TileLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { - const int bottom_n = n % this->blob_bottom_->num(); + for (int_tp n = 0; n < this->blob_top_->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_top_->height(); ++h) { + for (int_tp w = 0; w < this->blob_top_->width(); ++w) { + const int_tp bottom_n = n % this->blob_bottom_->num(); EXPECT_EQ(this->blob_bottom_->data_at(bottom_n, c, h, w), this->blob_top_->data_at(n, c, h, w)); } @@ -104,16 +104,16 @@ TYPED_TEST(TileLayerTest, TestForwardNum) { TYPED_TEST(TileLayerTest, TestForwardChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const int kNumTiles = 3; + const int_tp kNumTiles = 3; layer_param.mutable_tile_param()->set_tiles(kNumTiles); TileLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_top_->num(); ++n) { - for (int c = 0; c < this->blob_top_->channels(); ++c) { - for (int h = 0; h < this->blob_top_->height(); ++h) { - for (int w = 0; w < this->blob_top_->width(); ++w) { - const int bottom_c = c % this->blob_bottom_->channels(); + for (int_tp n = 0; n < this->blob_top_->num(); ++n) { + for (int_tp c = 0; c < this->blob_top_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_top_->height(); ++h) { + for (int_tp w = 0; w < this->blob_top_->width(); ++w) { + const int_tp bottom_c = c % this->blob_bottom_->channels(); EXPECT_EQ(this->blob_bottom_->data_at(n, bottom_c, h, w), this->blob_top_->data_at(n, c, h, w)); } @@ -125,7 +125,7 @@ TYPED_TEST(TileLayerTest, TestForwardChannels) { TYPED_TEST(TileLayerTest, TestTrivialGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const int kNumTiles = 1; + const int_tp kNumTiles = 1; layer_param.mutable_tile_param()->set_tiles(kNumTiles); TileLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); @@ -136,8 +136,8 @@ TYPED_TEST(TileLayerTest, TestTrivialGradient) { TYPED_TEST(TileLayerTest, TestGradientNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const int kTileAxis = 0; - const int kNumTiles = 3; + const int_tp kTileAxis = 0; + const int_tp kNumTiles = 3; layer_param.mutable_tile_param()->set_axis(kTileAxis); layer_param.mutable_tile_param()->set_tiles(kNumTiles); TileLayer layer(layer_param); @@ -149,8 +149,8 @@ TYPED_TEST(TileLayerTest, TestGradientNum) { TYPED_TEST(TileLayerTest, TestGradientChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const int kTileAxis = 1; - const int kNumTiles = 3; + const int_tp kTileAxis = 1; + const int_tp kNumTiles = 3; layer_param.mutable_tile_param()->set_axis(kTileAxis); layer_param.mutable_tile_param()->set_tiles(kNumTiles); TileLayer layer(layer_param); diff --git a/src/caffe/test/test_upgrade_proto.cpp b/src/caffe/test/test_upgrade_proto.cpp index ee05b151e72..cf2c2e4403e 100644 --- a/src/caffe/test/test_upgrade_proto.cpp +++ b/src/caffe/test/test_upgrade_proto.cpp @@ -2896,7 +2896,7 @@ TEST_F(NetUpgradeTest, TestImageNet) { TEST_F(NetUpgradeTest, TestUpgradeV1LayerType) { LayerParameter layer_param; shared_ptr > layer; - for (int i = 0; i < V1LayerParameter_LayerType_LayerType_ARRAYSIZE; ++i) { + for (int_tp i = 0; i < V1LayerParameter_LayerType_LayerType_ARRAYSIZE; ++i) { ASSERT_TRUE(V1LayerParameter_LayerType_IsValid(i)); V1LayerParameter_LayerType v1_type = V1LayerParameter_LayerType(i); string v2_layer_type(UpgradeV1LayerType(v1_type)); diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 61b1f1e1086..f2bcbde2f05 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -34,7 +34,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -54,7 +54,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #endif // USE_GREENTEA } - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -63,7 +63,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { caffe_cpu_copy(6, A_reshape_data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -82,7 +82,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #endif // USE_GREENTEA } - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -91,7 +91,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { caffe_cpu_copy(12, B_reshape_data, B.mutable_cpu_data()); caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -110,7 +110,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #endif // USE_GREENTEA } - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -119,7 +119,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { caffe_cpu_copy(6, data, A.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } @@ -138,7 +138,7 @@ TYPED_TEST(GemmTest, TestGemmCPUGPU) { #endif // USE_GREENTEA } - for (int i = 0; i < 8; ++i) { + for (int_tp i = 0; i < 8; ++i) { EXPECT_EQ(C.cpu_data()[i], result[i]); } } @@ -160,7 +160,7 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { caffe_cpu_gemv(CblasNoTrans, 2, 3, 1., A.cpu_data(), x.cpu_data(), 0., y.mutable_cpu_data()); - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { EXPECT_EQ(y.cpu_data()[i], result_2[i]); } @@ -179,7 +179,7 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { #endif // USE_GREENTEA } - for (int i = 0; i < 2; ++i) { + for (int_tp i = 0; i < 2; ++i) { EXPECT_EQ(y.cpu_data()[i], result_2[i]); } @@ -187,7 +187,7 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { caffe_cpu_copy(2, data, y.mutable_cpu_data()); caffe_cpu_gemv(CblasTrans, 2, 3, 1., A.cpu_data(), y.cpu_data(), 0., x.mutable_cpu_data()); - for (int i = 0; i < 3; ++i) { + for (int_tp i = 0; i < 3; ++i) { EXPECT_EQ(x.cpu_data()[i], result_3[i]); } @@ -206,7 +206,7 @@ TYPED_TEST(GemmTest, TestGemvCPUGPU) { #endif // USE_GREENTEA } - for (int i = 0; i < 3; ++i) { + for (int_tp i = 0; i < 3; ++i) { EXPECT_EQ(x.cpu_data()[i], result_3[i]); } } diff --git a/src/caffe/util/blocking_queue.cpp b/src/caffe/util/blocking_queue.cpp index d1d1fa864c3..cbfaa1044f5 100644 --- a/src/caffe/util/blocking_queue.cpp +++ b/src/caffe/util/blocking_queue.cpp @@ -81,7 +81,7 @@ T BlockingQueue::peek() { } template -size_t BlockingQueue::size() const { +uint_tp BlockingQueue::size() const { boost::mutex::scoped_lock lock(sync_->mutex_); return queue_.size(); } diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index 78dd880ac41..44bdf165dfe 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -7,7 +7,7 @@ namespace caffe { namespace db { -const size_t LMDB_MAP_SIZE = 1099511627776; // 1 TB +const uint_tp LMDB_MAP_SIZE = 1099511627776; // 1 TB void LMDB::Open(const string& source, Mode mode) { MDB_CHECK(mdb_env_create(&mdb_env_)); @@ -15,7 +15,7 @@ void LMDB::Open(const string& source, Mode mode) { if (mode == NEW) { CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << "failed"; } - int flags = 0; + int_tp flags = 0; if (mode == READ) { flags = MDB_RDONLY | MDB_NOTLS; } diff --git a/src/caffe/util/hdf5.cpp b/src/caffe/util/hdf5.cpp index 7730e76ab87..ab56474d4e3 100644 --- a/src/caffe/util/hdf5.cpp +++ b/src/caffe/util/hdf5.cpp @@ -56,8 +56,8 @@ void hdf5_load_nd_dataset_helper( LOG(FATAL) << "Datatype class unknown"; } - vector blob_dims(dims.size()); - for (int i = 0; i < dims.size(); ++i) { + vector blob_dims(dims.size()); + for (int_tp i = 0; i < dims.size(); ++i) { blob_dims[i] = dims[i]; } blob->Reshape(blob_dims); @@ -87,7 +87,7 @@ void hdf5_save_nd_dataset( bool write_diff) { int num_axes = blob.num_axes(); hsize_t *dims = new hsize_t[num_axes]; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { dims[i] = blob.shape(i); } const float* data; @@ -106,9 +106,9 @@ template <> void hdf5_save_nd_dataset( hid_t file_id, const string& dataset_name, const Blob& blob, bool write_diff) { - int num_axes = blob.num_axes(); + int_tp num_axes = blob.num_axes(); hsize_t *dims = new hsize_t[num_axes]; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { dims[i] = blob.shape(i); } const double* data; @@ -125,7 +125,7 @@ void hdf5_save_nd_dataset( string hdf5_load_string(hid_t loc_id, const string& dataset_name) { // Get size of dataset - size_t size; + uint_tp size; H5T_class_t class_; herr_t status = \ H5LTget_dataset_info(loc_id, dataset_name.c_str(), NULL, &class_, &size); @@ -171,11 +171,11 @@ int hdf5_get_num_links(hid_t loc_id) { } string hdf5_get_name_by_idx(hid_t loc_id, int idx) { - ssize_t str_size = H5Lget_name_by_idx( + int str_size = H5Lget_name_by_idx( loc_id, ".", H5_INDEX_NAME, H5_ITER_NATIVE, idx, NULL, 0, H5P_DEFAULT); CHECK_GE(str_size, 0) << "Error retrieving HDF5 dataset at index " << idx; char *c_str = new char[str_size+1]; - ssize_t status = H5Lget_name_by_idx( + int status = H5Lget_name_by_idx( loc_id, ".", H5_INDEX_NAME, H5_ITER_NATIVE, idx, c_str, str_size+1, H5P_DEFAULT); CHECK_GE(status, 0) << "Error retrieving HDF5 dataset at index " << idx; diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp index b0a7be50e5c..70f18a70fe8 100644 --- a/src/caffe/util/im2col.cpp +++ b/src/caffe/util/im2col.cpp @@ -9,22 +9,22 @@ namespace caffe { template -void im2col_cpu(const Dtype* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, +void im2col_cpu(const Dtype* data_im, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_col) { - int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; - int channels_col = channels * kernel_h * kernel_w; - for (int c = 0; c < channels_col; ++c) { - int w_offset = c % kernel_w; - int h_offset = (c / kernel_w) % kernel_h; - int c_im = c / kernel_h / kernel_w; - for (int h = 0; h < height_col; ++h) { - for (int w = 0; w < width_col; ++w) { - int h_pad = h * stride_h - pad_h + h_offset; - int w_pad = w * stride_w - pad_w + w_offset; + int_tp height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; + int_tp channels_col = channels * kernel_h * kernel_w; + for (int_tp c = 0; c < channels_col; ++c) { + int_tp w_offset = c % kernel_w; + int_tp h_offset = (c / kernel_w) % kernel_h; + int_tp c_im = c / kernel_h / kernel_w; + for (int_tp h = 0; h < height_col; ++h) { + for (int_tp w = 0; w < width_col; ++w) { + int_tp h_pad = h * stride_h - pad_h + h_offset; + int_tp w_pad = w * stride_w - pad_w + w_offset; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) data_col[(c * height_col + h) * width_col + w] = data_im[(c_im * height + h_pad) * width + w_pad]; @@ -36,38 +36,38 @@ void im2col_cpu(const Dtype* data_im, const int channels, } // Explicit instantiation -template void im2col_cpu(const float* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, float* data_col); -template void im2col_cpu(const double* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, double* data_col); +template void im2col_cpu(const float* data_im, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, float* data_col); +template void im2col_cpu(const double* data_im, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, double* data_col); template inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col, - const int num_spatial_axes, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, + const int_tp num_spatial_axes, const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, Dtype* data_output) { if (!im2col) { - int im_size = im_shape[0]; - for (int i = 0; i < num_spatial_axes; ++i) { + int_tp im_size = im_shape[0]; + for (int_tp i = 0; i < num_spatial_axes; ++i) { im_size *= im_shape[1 + i]; } caffe_set(im_size, Dtype(0), data_output); } - int kernel_size = 1; - for (int i = 0; i < num_spatial_axes; ++i) { + int_tp kernel_size = 1; + for (int_tp i = 0; i < num_spatial_axes; ++i) { kernel_size *= kernel_shape[i]; } - const int channels_col = col_shape[0]; - vector d_offset(num_spatial_axes, 0); - vector d_iter(num_spatial_axes, 0); - for (int c = 0; c < channels_col; ++c) { + const int_tp channels_col = col_shape[0]; + vector d_offset(num_spatial_axes, 0); + vector d_iter(num_spatial_axes, 0); + for (int_tp c = 0; c < channels_col; ++c) { // Loop over spatial axes in reverse order to compute a per-axis offset. - int offset = c; - for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) { + int_tp offset = c; + for (int_tp d_i = num_spatial_axes - 1; d_i >= 0; --d_i) { if (d_i < num_spatial_axes - 1) { offset /= kernel_shape[d_i + 1]; } @@ -76,12 +76,12 @@ inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col, for (bool incremented = true; incremented; ) { // Loop over spatial axes in forward order to compute the indices in the // image and column, and whether the index lies in the padding. - int index_col = c; - int index_im = c / kernel_size; + int_tp index_col = c; + int_tp index_im = c / kernel_size; bool is_padding = false; - for (int d_i = 0; d_i < num_spatial_axes; ++d_i) { - const int d = d_iter[d_i]; - const int d_pad = d * stride[d_i] - pad[d_i] + d_offset[d_i]; + for (int_tp d_i = 0; d_i < num_spatial_axes; ++d_i) { + const int_tp d = d_iter[d_i]; + const int_tp d_pad = d * stride[d_i] - pad[d_i] + d_offset[d_i]; is_padding |= d_pad < 0 || d_pad >= im_shape[d_i + 1]; index_col *= col_shape[d_i + 1]; index_col += d; @@ -100,8 +100,8 @@ inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col, // Loop over spatial axes in reverse order to choose an index, // like counting. incremented = false; - for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) { - const int d_max = col_shape[d_i + 1]; + for (int_tp d_i = num_spatial_axes - 1; d_i >= 0; --d_i) { + const int_tp d_max = col_shape[d_i + 1]; DCHECK_LT(d_iter[d_i], d_max); if (d_iter[d_i] == d_max - 1) { d_iter[d_i] = 0; @@ -112,13 +112,13 @@ inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col, } } } // while(incremented) { - } // for (int c = 0; c < channels_col; ++c) { + } // for (int_tp c = 0; c < channels_col; ++c) { } template -void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, +void im2col_nd_cpu(const Dtype* data_im, const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, Dtype* data_col) { const bool kIm2Col = true; im2col_nd_core_cpu(data_im, kIm2Col, num_spatial_axes, im_shape, col_shape, @@ -127,34 +127,34 @@ void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes, // Explicit instantiation template void im2col_nd_cpu(const float* data_im, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, + const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, float* data_col); template void im2col_nd_cpu(const double* data_im, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, + const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, double* data_col); template -void col2im_cpu(const Dtype* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, +void col2im_cpu(const Dtype* data_col, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_im) { caffe_set(height * width * channels, Dtype(0), data_im); - int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; - int channels_col = channels * patch_h * patch_w; - for (int c = 0; c < channels_col; ++c) { - int w_offset = c % patch_w; - int h_offset = (c / patch_w) % patch_h; - int c_im = c / patch_h / patch_w; - for (int h = 0; h < height_col; ++h) { - for (int w = 0; w < width_col; ++w) { - int h_pad = h * stride_h - pad_h + h_offset; - int w_pad = w * stride_w - pad_w + w_offset; + int_tp height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; + int_tp channels_col = channels * patch_h * patch_w; + for (int_tp c = 0; c < channels_col; ++c) { + int_tp w_offset = c % patch_w; + int_tp h_offset = (c / patch_w) % patch_h; + int_tp c_im = c / patch_h / patch_w; + for (int_tp h = 0; h < height_col; ++h) { + for (int_tp w = 0; w < width_col; ++w) { + int_tp h_pad = h * stride_h - pad_h + h_offset; + int_tp w_pad = w * stride_w - pad_w + w_offset; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) data_im[(c_im * height + h_pad) * width + w_pad] += data_col[(c * height_col + h) * width_col + w]; @@ -164,19 +164,19 @@ void col2im_cpu(const Dtype* data_col, const int channels, } // Explicit instantiation -template void col2im_cpu(const float* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, float* data_im); -template void col2im_cpu(const double* data_col, const int channels, - const int height, const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, double* data_im); +template void col2im_cpu(const float* data_col, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, float* data_im); +template void col2im_cpu(const double* data_col, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, double* data_im); template -void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, +void col2im_nd_cpu(const Dtype* data_col, const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, Dtype* data_im) { const bool kIm2Col = false; im2col_nd_core_cpu(data_col, kIm2Col, num_spatial_axes, im_shape, col_shape, @@ -185,14 +185,14 @@ void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, // Explicit instantiation template void col2im_nd_cpu(const float* data_col, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, + const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, float* data_im); template void col2im_nd_cpu(const double* data_col, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, + const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, double* data_im); diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index e229d41ae65..eef8851e7d4 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -11,31 +11,31 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void im2col_sk_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int ext_kernel_h, - const int ext_kernel_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, const int height_col, - const int width_col, Dtype* data_col) { +__global__ void im2col_sk_gpu_kernel(const int_tp n, const Dtype* data_im, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, const int_tp height_col, + const int_tp width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { - int w_out = index % width_col; - int h_index = index / width_col; - int h_out = h_index % height_col; - int channel_in = h_index / height_col; - int channel_out = channel_in * kernel_h * kernel_w; - int h_in = h_out * stride_h - pad_h; - int w_in = w_out * stride_w - pad_w; + int_tp w_out = index % width_col; + int_tp h_index = index / width_col; + int_tp h_out = h_index % height_col; + int_tp channel_in = h_index / height_col; + int_tp channel_out = channel_in * kernel_h * kernel_w; + int_tp h_in = h_out * stride_h - pad_h; + int_tp w_in = w_out * stride_w - pad_w; Dtype* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const Dtype* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; - for (int i = 0; i < ext_kernel_h; i += kstride_h) { - for (int j = 0; j < ext_kernel_w; j += kstride_w) { - int h = h_in + i; - int w = w_in + j; + for (int_tp i = 0; i < ext_kernel_h; i += kstride_h) { + for (int_tp j = 0; j < ext_kernel_w; j += kstride_w) { + int_tp h = h_in + i; + int_tp w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; @@ -46,18 +46,18 @@ __global__ void im2col_sk_gpu_kernel(const int n, const Dtype* data_im, } template -void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, - const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, +void im2col_sk_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. - int ext_kernel_h = (kernel_h - 1) * kstride_h + 1; - int ext_kernel_w = (kernel_w - 1) * kstride_w + 1; - int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; - int num_kernels = channels * height_col * width_col; + int_tp ext_kernel_h = (kernel_h - 1) * kstride_h + 1; + int_tp ext_kernel_w = (kernel_w - 1) * kstride_w + 1; + int_tp height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; + int_tp num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_sk_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), @@ -71,45 +71,45 @@ void im2col_sk_gpu(const Dtype* data_im, const int channels, const int height, } // Explicit instantiation -template void im2col_sk_gpu(const float* data_im, const int channels, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, +template void im2col_sk_gpu(const float* data_im, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, float* data_col); -template void im2col_sk_gpu(const double* data_im, const int channels, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, +template void im2col_sk_gpu(const double* data_im, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, double* data_col); template -__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, +__global__ void im2col_gpu_kernel(const int_tp n, const Dtype* data_im, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp height_col, const int_tp width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { - int w_out = index % width_col; - int h_index = index / width_col; - int h_out = h_index % height_col; - int channel_in = h_index / height_col; - int channel_out = channel_in * kernel_h * kernel_w; - int h_in = h_out * stride_h - pad_h; - int w_in = w_out * stride_w - pad_w; + int_tp w_out = index % width_col; + int_tp h_index = index / width_col; + int_tp h_out = h_index % height_col; + int_tp channel_in = h_index / height_col; + int_tp channel_out = channel_in * kernel_h * kernel_w; + int_tp h_in = h_out * stride_h - pad_h; + int_tp w_in = w_out * stride_w - pad_w; Dtype* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const Dtype* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; - for (int i = 0; i < kernel_h; ++i) { - for (int j = 0; j < kernel_w; ++j) { - int h = h_in + i; - int w = w_in + j; + for (int_tp i = 0; i < kernel_h; ++i) { + for (int_tp j = 0; j < kernel_w; ++j) { + int_tp h = h_in + i; + int_tp w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; @@ -120,15 +120,15 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, } template -void im2col_gpu(const Dtype* data_im, const int channels, const int height, - const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_col) { +void im2col_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. - int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; - int num_kernels = channels * height_col * width_col; + int_tp height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; + int_tp num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( @@ -139,55 +139,55 @@ void im2col_gpu(const Dtype* data_im, const int channels, const int height, } // Explicit instantiation -template void im2col_gpu(const float* data_im, const int channels, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, +template void im2col_gpu(const float* data_im, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, float* data_col); -template void im2col_gpu(const double* data_im, const int channels, - const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, +template void im2col_gpu(const double* data_im, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, double* data_col); // Support of stride_h and stride_w greater than 1 is not implemented template -__global__ void col2im_sk_gpu_kernel(const int n, const Dtype* data_col, - const int height, const int width, - const int channels, const int patch_h, - const int patch_w, const int ext_patch_h, - const int ext_patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, - const int kstride_w, const int height_col, - const int width_col, Dtype* data_im) { +__global__ void col2im_sk_gpu_kernel(const int_tp n, const Dtype* data_col, + const int_tp height, const int_tp width, + const int_tp channels, const int_tp patch_h, + const int_tp patch_w, const int_tp ext_patch_h, + const int_tp ext_patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, const int_tp height_col, + const int_tp width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; - int w = index % width + pad_w; - int h = (index / width) % height + pad_h; - int c = index / (width * height); + int_tp w = index % width + pad_w; + int_tp h = (index / width) % height + pad_h; + int_tp c = index / (width * height); // compute the start and end of the output - int width_col_1 = width_col - 1; - int height_col_1 = height_col - 1; - int w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; - int w_col_end = + int_tp width_col_1 = width_col - 1; + int_tp height_col_1 = height_col - 1; + int_tp w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int_tp w_col_end = (w >= width_col) ? width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; - int h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; - int h_col_end = + int_tp h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int_tp h_col_end = (h >= height_col) ? height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; - int w_num = (w - w_col_start) / kstride_w; - int h_num = (h - h_col_start) / kstride_h; + int_tp w_num = (w - w_col_start) / kstride_w; + int_tp h_num = (h - h_col_start) / kstride_h; - int coeff_w_idx = height_col * width_col; - int coeff_h_idx = patch_w * coeff_w_idx; - int offset = c * patch_h * coeff_h_idx; - for (int h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += + int_tp coeff_w_idx = height_col * width_col; + int_tp coeff_h_idx = patch_w * coeff_w_idx; + int_tp offset = c * patch_h * coeff_h_idx; + for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += kstride_h, --h_idx) { - for (int w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += + for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += kstride_w, --w_idx) { val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + h_col * width_col + w_col]; @@ -199,19 +199,19 @@ __global__ void col2im_sk_gpu_kernel(const int n, const Dtype* data_col, } template -void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int kstride_h, const int kstride_w, +void col2im_sk_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, Dtype* data_im) { if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) LOG(FATAL)<< "stride greater than 1 or pad greater" << " than 0 not tested in col2im_sk_gpu()."; - int ext_patch_h = (patch_h - 1) * kstride_h + 1; - int ext_patch_w = (patch_w - 1) * kstride_w + 1; - int height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; - int num_kernels = channels * height * width; + int_tp ext_patch_h = (patch_h - 1) * kstride_h + 1; + int_tp ext_patch_w = (patch_w - 1) * kstride_w + 1; + int_tp height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; + int_tp num_kernels = channels * height * width; col2im_sk_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( @@ -223,48 +223,48 @@ void col2im_sk_gpu(const Dtype* data_col, const int channels, const int height, } // Explicit instantiation -template void col2im_sk_gpu(const float* data_col, const int channels, - const int height, const int width, - const int patch_h, const int patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, +template void col2im_sk_gpu(const float* data_col, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, float* data_im); -template void col2im_sk_gpu(const double* data_col, const int channels, - const int height, const int width, - const int patch_h, const int patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int kstride_h, const int kstride_w, +template void col2im_sk_gpu(const double* data_col, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, double* data_im); template -__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, - const int height, const int width, - const int channels, const int patch_h, - const int patch_w, const int pad_h, - const int pad_w, const int stride_h, - const int stride_w, const int height_col, - const int width_col, Dtype* data_im) { +__global__ void col2im_gpu_kernel(const int_tp n, const Dtype* data_col, + const int_tp height, const int_tp width, + const int_tp channels, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp height_col, + const int_tp width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; - int w = index % width + pad_w; - int h = (index / width) % height + pad_h; - int c = index / (width * height); + int_tp w = index % width + pad_w; + int_tp h = (index / width) % height + pad_h; + int_tp c = index / (width * height); // compute the start and end of the output - int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; - int w_col_end = min(w / stride_w + 1, width_col); - int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; - int h_col_end = min(h / stride_h + 1, height_col); + int_tp w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; + int_tp w_col_end = min((int_tpc)(w / stride_w + 1), (int_tpc)width_col); + int_tp h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; + int_tp h_col_end = min((int_tpc)(h / stride_h + 1), (int_tpc)height_col); // equivalent implementation - int offset = (c * patch_h * patch_w + h * patch_w + w) * height_col + int_tp offset = (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col; - int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; - int coeff_w_col = (1 - stride_w * height_col * width_col); - for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { - for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + int_tp coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col; + int_tp coeff_w_col = (1 - stride_w * height_col * width_col); + for (int_tp h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int_tp w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; } } @@ -273,13 +273,13 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, } template -void col2im_gpu(const Dtype* data_col, const int channels, const int height, - const int width, const int patch_h, const int patch_w, - const int pad_h, const int pad_w, const int stride_h, - const int stride_w, Dtype* data_im) { - int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; - int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; - int num_kernels = channels * height * width; +void col2im_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, Dtype* data_im) { + int_tp height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; + int_tp num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. @@ -293,42 +293,42 @@ void col2im_gpu(const Dtype* data_col, const int channels, const int height, } // Explicit instantiation -template void col2im_gpu(const float* data_col, const int channels, - const int height, const int width, - const int patch_h, const int patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, +template void col2im_gpu(const float* data_col, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, float* data_im); -template void col2im_gpu(const double* data_col, const int channels, - const int height, const int width, - const int patch_h, const int patch_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, +template void col2im_gpu(const double* data_col, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, double* data_im); template -__global__ void im2col_ndsk_gpu_kernel(const int n, const int num_axes, +__global__ void im2col_ndsk_gpu_kernel(const int_tp n, const int_tp num_axes, const Dtype* data_im, - const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, const int* kstride, + const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, const int_tp* kstride, Dtype* data_col) { - int d_temp[6]; // NOLINT(runtime/arrays) - int d_iter[6]; // NOLINT(runtime/arrays) - int i; + int_tp d_temp[6]; // NOLINT(runtime/arrays) + int_tp d_iter[6]; // NOLINT(runtime/arrays) + int_tp i; CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_in = index; - int channel_out = 1; + int_tp channel_in = index; + int_tp channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { d_temp[i] = channel_in % col_shape[i + 1]; channel_in /= col_shape[i + 1]; channel_out *= kernel_shape[i]; } channel_out *= channel_in; - int data_col_inc = 1; + int_tp data_col_inc = 1; for (i = 0; i < num_axes; ++i) { channel_out *= col_shape[i + 1]; channel_out += d_temp[i]; @@ -344,7 +344,7 @@ __global__ void im2col_ndsk_gpu_kernel(const int n, const int num_axes, do { bool in_range = true; for (i = 0; i < num_axes; ++i) { - const int d_iter_im = d_iter[i] + d_temp[i]; + const int_tp d_iter_im = d_iter[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; if (!in_range) { break; @@ -353,7 +353,7 @@ __global__ void im2col_ndsk_gpu_kernel(const int n, const int num_axes, // Write column data if (in_range) { - int data_im_offset = d_iter[0]; + int_tp data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { data_im_offset *= im_shape[i + 1]; data_im_offset += d_iter[i]; @@ -366,9 +366,9 @@ __global__ void im2col_ndsk_gpu_kernel(const int n, const int num_axes, data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { - // Old: const int d_max = kernel_shape[i]; + // Old: const int_tp d_max = kernel_shape[i]; // New (strided, limit is the external kernel size): - const int d_max = (kernel_shape[i] - 1) * kstride[i] + 1; + const int_tp d_max = (kernel_shape[i] - 1) * kstride[i] + 1; if (d_iter[i] > d_max - kstride[i]) { d_iter[i] = 0; } else { // d_iter[i] <= d_max - kstride[i] @@ -378,28 +378,28 @@ __global__ void im2col_ndsk_gpu_kernel(const int n, const int num_axes, incremented = true; break; } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); // do } // CUDA_KERNEL_LOOP(index, n) } template -__global__ void col2im_ndsk_gpu_kernel(const int n, const int num_axes, +__global__ void col2im_ndsk_gpu_kernel(const int_tp n, const int_tp num_axes, const Dtype* data_col, - const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, const int* kstride, + const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, const int_tp* kstride, Dtype* data_im) { - int d_im[6]; // NOLINT(runtime/arrays) - int d_col_size[6]; // NOLINT(runtime/arrays) - int d_col_iter[6]; // NOLINT(runtime/arrays) - int d_col_start[6]; // NOLINT(runtime/arrays) - int d_col_end[6]; // NOLINT(runtime/arrays) - int d_ext_patch[6]; // NOLINT(runtime/arrays) - int d_idx[6]; // NOLINT(runtime/arrays) - - for (int i = num_axes - 1; i >= 0; --i) { + int_tp d_im[6]; // NOLINT(runtime/arrays) + int_tp d_col_size[6]; // NOLINT(runtime/arrays) + int_tp d_col_iter[6]; // NOLINT(runtime/arrays) + int_tp d_col_start[6]; // NOLINT(runtime/arrays) + int_tp d_col_end[6]; // NOLINT(runtime/arrays) + int_tp d_ext_patch[6]; // NOLINT(runtime/arrays) + int_tp d_idx[6]; // NOLINT(runtime/arrays) + + for (int_tp i = num_axes - 1; i >= 0; --i) { d_ext_patch[i] = (kernel_shape[i] - 1) * kstride[i] + 1; d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) / stride[i] + 1; @@ -408,15 +408,15 @@ __global__ void col2im_ndsk_gpu_kernel(const int n, const int num_axes, CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_im = index; + int_tp channel_im = index; // Calculate d_im (image dimensions). - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { d_im[i] = channel_im % im_shape[i + 1] + pad[i]; channel_im /= im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { // Old: /*d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? @@ -438,7 +438,7 @@ __global__ void col2im_ndsk_gpu_kernel(const int n, const int num_axes, // final val will be 0. data_im[index] = 0; done = true; - break; // for (int i = 0; i < num_axes; ++i) + break; // for (int_tp i = 0; i < num_axes; ++i) } } if (done) { @@ -449,20 +449,20 @@ __global__ void col2im_ndsk_gpu_kernel(const int n, const int num_axes, bool incremented = true; do { // Compute the final offset. - int final_offset = 0; - int coeff_prod = 1; - for (int i = num_axes - 1; i >= 0; --i) { + int_tp final_offset = 0; + int_tp coeff_prod = 1; + for (int_tp i = num_axes - 1; i >= 0; --i) { final_offset += d_col_iter[i] * coeff_prod; coeff_prod *= d_col_size[i]; } - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { final_offset += d_idx[i] * coeff_prod; coeff_prod *= kernel_shape[i]; } final_offset += channel_im * coeff_prod; val += data_col[final_offset]; incremented = false; - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { if (d_col_iter[i] > d_col_end[i] - kstride[i]) { d_col_iter[i] = d_col_start[i]; d_idx[i] = (d_im[i] - d_col_start[i]) / kstride[i]; @@ -470,19 +470,19 @@ __global__ void col2im_ndsk_gpu_kernel(const int n, const int num_axes, d_col_iter[i] += kstride[i]; --d_idx[i]; incremented = true; - break; // for (int i = num_axes - 1; i >= 0; --i) + break; // for (int_tp i = num_axes - 1; i >= 0; --i) } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; } // CUDA_KERNEL_LOOP(index, n) } template -void im2col_ndsk_gpu(const Dtype* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, const int* kstride, +void im2col_ndsk_gpu(const Dtype* data_im, const int_tp num_spatial_axes, + const int_tp num_kernels, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, const int_tp* kstride, Dtype* data_col) { im2col_ndsk_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( @@ -492,22 +492,22 @@ void im2col_ndsk_gpu(const Dtype* data_im, const int num_spatial_axes, } // Explicit instantiation -template void im2col_ndsk_gpu(const float* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, - const int* kstride, float* data_col); -template void im2col_ndsk_gpu(const double* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, - const int* kstride, double* data_col); +template void im2col_ndsk_gpu(const float* data_im, const int_tp num_spatial_axes, + const int_tp num_kernels, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, float* data_col); +template void im2col_ndsk_gpu(const double* data_im, const int_tp num_spatial_axes, + const int_tp num_kernels, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, double* data_col); template -void col2im_ndsk_gpu(const Dtype* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, const int* kstride, +void col2im_ndsk_gpu(const Dtype* data_col, const int_tp num_spatial_axes, + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, const int_tp* kstride, Dtype* data_im) { col2im_ndsk_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( @@ -517,38 +517,38 @@ void col2im_ndsk_gpu(const Dtype* data_col, const int num_spatial_axes, } // Explicit instantiation -template void col2im_ndsk_gpu(const float* data_col, const int num_axes, - const int im_size, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, - const int* kstride, float* data_im); -template void col2im_ndsk_gpu(const double* data_col, const int num_axes, - const int im_size, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, - const int* kstride, double* data_im); +template void col2im_ndsk_gpu(const float* data_col, const int_tp num_axes, + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, float* data_im); +template void col2im_ndsk_gpu(const double* data_col, const int_tp num_axes, + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, double* data_im); template -__global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, - const Dtype* data_im, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, Dtype* data_col) { - int d_temp[6]; // NOLINT(runtime/arrays) - int d_iter[6]; // NOLINT(runtime/arrays) - int i; +__global__ void im2col_nd_gpu_kernel(const int_tp n, const int_tp num_axes, + const Dtype* data_im, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, Dtype* data_col) { + int_tp d_temp[6]; // NOLINT(runtime/arrays) + int_tp d_iter[6]; // NOLINT(runtime/arrays) + int_tp i; CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_in = index; - int channel_out = 1; + int_tp channel_in = index; + int_tp channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { d_temp[i] = channel_in % col_shape[i + 1]; channel_in /= col_shape[i + 1]; channel_out *= kernel_shape[i]; } channel_out *= channel_in; - int data_col_inc = 1; + int_tp data_col_inc = 1; for (i = 0; i < num_axes; ++i) { channel_out *= col_shape[i + 1]; channel_out += d_temp[i]; @@ -564,14 +564,14 @@ __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, do { bool in_range = true; for (i = 0; i < num_axes; ++i) { - const int d_iter_im = d_iter[i] + d_temp[i]; + const int_tp d_iter_im = d_iter[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; if (!in_range) { break; } } if (in_range) { - int data_im_offset = d_iter[0]; + int_tp data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { data_im_offset *= im_shape[i + 1]; data_im_offset += d_iter[i]; @@ -583,7 +583,7 @@ __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { - const int d_max = kernel_shape[i]; + const int_tp d_max = kernel_shape[i]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 @@ -591,16 +591,16 @@ __global__ void im2col_nd_gpu_kernel(const int n, const int num_axes, incremented = true; break; } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); // do } // CUDA_KERNEL_LOOP(index, n) } template -void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, - const int* col_shape, const int* kernel_shape, - const int* pad, const int* stride, Dtype* data_col) { +void im2col_nd_gpu(const Dtype* data_im, const int_tp num_spatial_axes, + const int_tp num_kernels, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, Dtype* data_col) { im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, num_spatial_axes, data_im, im_shape, col_shape, @@ -610,50 +610,51 @@ void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, // Explicit instantiation template void im2col_nd_gpu(const float* data_im, - const int num_spatial_axes, - const int col_size, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, float* data_col); + const int_tp num_spatial_axes, + const int_tp col_size, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, float* data_col); template void im2col_nd_gpu(const double* data_im, - const int num_spatial_axes, - const int col_size, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, double* data_col); + const int_tp num_spatial_axes, + const int_tp col_size, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, double* data_col); template -__global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, - const Dtype* data_col, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, Dtype* data_im) { - int d_im[6]; // NOLINT(runtime/arrays) - int d_col_iter[6]; // NOLINT(runtime/arrays) - int d_col_start[6]; // NOLINT(runtime/arrays) - int d_col_end[6]; // NOLINT(runtime/arrays) +__global__ void col2im_nd_gpu_kernel(const int_tp n, const int_tp num_axes, + const Dtype* data_col, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, Dtype* data_im) { + int_tp d_im[6]; // NOLINT(runtime/arrays) + int_tp d_col_iter[6]; // NOLINT(runtime/arrays) + int_tp d_col_start[6]; // NOLINT(runtime/arrays) + int_tp d_col_end[6]; // NOLINT(runtime/arrays) CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int channel_im = index; + int_tp channel_im = index; // Calculate d_im (image dimensions). - for (int i = num_axes - 1; i >= 0; --i) { + for (int_tp i = num_axes - 1; i >= 0; --i) { d_im[i] = channel_im % im_shape[i + 1] + pad[i]; channel_im /= im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); + d_col_end[i] = min((int_tpc)(d_im[i] / stride[i] + 1), + (int_tpc)(col_shape[i + 1])); if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; done = true; - break; // for (int i = 0; i < num_axes; ++i) + break; // for (int_tp i = 0; i < num_axes; ++i) } } if (done) { @@ -664,39 +665,39 @@ __global__ void col2im_nd_gpu_kernel(const int n, const int num_axes, bool incremented = true; do { // Compute the final offset. - int final_offset = 0; - int kernel_shape_prod = 1; - for (int i = num_axes - 1; i >= 0; --i) { + int_tp final_offset = 0; + int_tp kernel_shape_prod = 1; + for (int_tp i = num_axes - 1; i >= 0; --i) { final_offset += (d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod; kernel_shape_prod *= kernel_shape[i]; } final_offset += kernel_shape_prod * channel_im; - for (int i = 0; i < num_axes; ++i) { + for (int_tp i = 0; i < num_axes; ++i) { final_offset *= col_shape[i + 1]; final_offset += d_col_iter[i]; } val += data_col[final_offset]; incremented = false; - for (int i = num_axes - 1; i >= 0; --i) { - const int d_max = d_col_end[i]; + for (int_tp i = num_axes - 1; i >= 0; --i) { + const int_tp d_max = d_col_end[i]; if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; } else { // d_col_iter[i] < d_max - 1 ++d_col_iter[i]; incremented = true; - break; // for (int i = num_axes - 1; i >= 0; --i) + break; // for (int_tp i = num_axes - 1; i >= 0; --i) } - } // for (int i = num_axes - 1; i >= 0; --i) + } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; } // CUDA_KERNEL_LOOP(index, n) } template -void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, +void col2im_nd_gpu(const Dtype* data_col, const int_tp num_spatial_axes, + const int_tp im_size, const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, Dtype* data_im) { col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( @@ -707,17 +708,17 @@ void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, // Explicit instantiation template void col2im_nd_gpu(const float* data_col, - const int num_spatial_axes, - const int im_size, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, float* data_im); + const int_tp num_spatial_axes, + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, float* data_im); template void col2im_nd_gpu(const double* data_col, - const int num_spatial_axes, - const int im_size, const int* im_shape, - const int* col_shape, - const int* kernel_shape, const int* pad, - const int* stride, double* data_im); + const int_tp num_spatial_axes, + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, double* data_im); #endif // USE_CUDA } // namespace caffe diff --git a/src/caffe/util/insert_splits.cpp b/src/caffe/util/insert_splits.cpp index 475a2a9f618..1bdaee71e7e 100644 --- a/src/caffe/util/insert_splits.cpp +++ b/src/caffe/util/insert_splits.cpp @@ -13,44 +13,44 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) { // Initialize by copying from the input NetParameter. param_split->CopyFrom(param); param_split->clear_layer(); - map > blob_name_to_last_top_idx; - map, pair > bottom_idx_to_source_top_idx; - map, int> top_idx_to_bottom_count; - map, float> top_idx_to_loss_weight; - map, int> top_idx_to_bottom_split_idx; - map layer_idx_to_layer_name; + map > blob_name_to_last_top_idx; + map, pair > bottom_idx_to_source_top_idx; + map, int_tp> top_idx_to_bottom_count; + map, float> top_idx_to_loss_weight; + map, int_tp> top_idx_to_bottom_split_idx; + map layer_idx_to_layer_name; layer_idx_to_layer_name[-1] = "input"; // Determine the number of times each blob is used as an input (bottom) blob. - for (int i = 0; i < param.input_size(); ++i) { + for (int_tp i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); blob_name_to_last_top_idx[blob_name] = make_pair(-1, i); } - for (int i = 0; i < param.layer_size(); ++i) { + for (int_tp i = 0; i < param.layer_size(); ++i) { const LayerParameter& layer_param = param.layer(i); layer_idx_to_layer_name[i] = layer_param.name(); - for (int j = 0; j < layer_param.bottom_size(); ++j) { + for (int_tp j = 0; j < layer_param.bottom_size(); ++j) { const string& blob_name = layer_param.bottom(j); if (blob_name_to_last_top_idx.find(blob_name) == blob_name_to_last_top_idx.end()) { LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '" << layer_param.name() << "', bottom index " << j << ")"; } - const pair& bottom_idx = make_pair(i, j); - const pair& top_idx = blob_name_to_last_top_idx[blob_name]; + const pair& bottom_idx = make_pair(i, j); + const pair& top_idx = blob_name_to_last_top_idx[blob_name]; bottom_idx_to_source_top_idx[bottom_idx] = top_idx; ++top_idx_to_bottom_count[top_idx]; } - for (int j = 0; j < layer_param.top_size(); ++j) { + for (int_tp j = 0; j < layer_param.top_size(); ++j) { const string& blob_name = layer_param.top(j); blob_name_to_last_top_idx[blob_name] = make_pair(i, j); } // A use of a top blob as a loss should be handled similarly to the use of // a top blob as an input (bottom) blob to another layer. - const int last_loss = + const int_tp last_loss = std::min(layer_param.loss_weight_size(), layer_param.top_size()); - for (int j = 0; j < last_loss; ++j) { + for (int_tp j = 0; j < last_loss; ++j) { const string& blob_name = layer_param.top(j); - const pair& top_idx = blob_name_to_last_top_idx[blob_name]; + const pair& top_idx = blob_name_to_last_top_idx[blob_name]; top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j); if (top_idx_to_loss_weight[top_idx]) { ++top_idx_to_bottom_count[top_idx]; @@ -59,8 +59,8 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) { } // Create split layer for any input blobs used by other layer as bottom // blobs more than once. - for (int i = 0; i < param.input_size(); ++i) { - const int split_count = top_idx_to_bottom_count[make_pair(-1, i)]; + for (int_tp i = 0; i < param.input_size(); ++i) { + const int_tp split_count = top_idx_to_bottom_count[make_pair(-1, i)]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[-1]; const string& blob_name = param.input(i); @@ -70,14 +70,14 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) { kZeroLossWeight, split_layer_param); } } - for (int i = 0; i < param.layer_size(); ++i) { + for (int_tp i = 0; i < param.layer_size(); ++i) { LayerParameter* layer_param = param_split->add_layer(); layer_param->CopyFrom(param.layer(i)); // Replace any shared bottom blobs with split layer outputs. - for (int j = 0; j < layer_param->bottom_size(); ++j) { - const pair& top_idx = + for (int_tp j = 0; j < layer_param->bottom_size(); ++j) { + const pair& top_idx = bottom_idx_to_source_top_idx[make_pair(i, j)]; - const int split_count = top_idx_to_bottom_count[top_idx]; + const int_tp split_count = top_idx_to_bottom_count[top_idx]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[top_idx.first]; const string& blob_name = layer_param->bottom(j); @@ -87,9 +87,9 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) { } // Create split layer for any top blobs used by other layer as bottom // blobs more than once. - for (int j = 0; j < layer_param->top_size(); ++j) { - const pair& top_idx = make_pair(i, j); - const int split_count = top_idx_to_bottom_count[top_idx]; + for (int_tp j = 0; j < layer_param->top_size(); ++j) { + const pair& top_idx = make_pair(i, j); + const int_tp split_count = top_idx_to_bottom_count[top_idx]; if (split_count > 1) { const string& layer_name = layer_idx_to_layer_name[i]; const string& blob_name = layer_param->top(j); @@ -107,13 +107,13 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) { } void ConfigureSplitLayer(const string& layer_name, const string& blob_name, - const int blob_idx, const int split_count, const float loss_weight, + const int_tp blob_idx, const int_tp split_count, const float loss_weight, LayerParameter* split_layer_param) { split_layer_param->Clear(); split_layer_param->add_bottom(blob_name); split_layer_param->set_name(SplitLayerName(layer_name, blob_name, blob_idx)); split_layer_param->set_type("Split"); - for (int k = 0; k < split_count; ++k) { + for (int_tp k = 0; k < split_count; ++k) { split_layer_param->add_top( SplitBlobName(layer_name, blob_name, blob_idx, k)); if (loss_weight) { @@ -127,7 +127,7 @@ void ConfigureSplitLayer(const string& layer_name, const string& blob_name, } string SplitLayerName(const string& layer_name, const string& blob_name, - const int blob_idx) { + const int_tp blob_idx) { ostringstream split_layer_name; split_layer_name << blob_name << "_" << layer_name << "_" << blob_idx << "_split"; @@ -135,7 +135,7 @@ string SplitLayerName(const string& layer_name, const string& blob_name, } string SplitBlobName(const string& layer_name, const string& blob_name, - const int blob_idx, const int split_idx) { + const int_tp blob_idx, const int_tp split_idx) { ostringstream split_blob_name; split_blob_name << blob_name << "_" << layer_name << "_" << blob_idx << "_split_" << split_idx; diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index f2b1dd98423..a08d2c03a1a 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -19,7 +19,7 @@ #include "caffe/proto/caffe.pb.h" #include "caffe/util/io.hpp" -const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte. +const int_tp kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte. namespace caffe { @@ -32,7 +32,7 @@ using google::protobuf::io::CodedOutputStream; using google::protobuf::Message; bool ReadProtoFromTextFile(const char* filename, Message* proto) { - int fd = open(filename, O_RDONLY); + int_tp fd = open(filename, O_RDONLY); CHECK_NE(fd, -1) << "File not found: " << filename; FileInputStream* input = new FileInputStream(fd); bool success = google::protobuf::TextFormat::Parse(input, proto); @@ -42,7 +42,7 @@ bool ReadProtoFromTextFile(const char* filename, Message* proto) { } void WriteProtoToTextFile(const Message& proto, const char* filename) { - int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); + int_tp fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); FileOutputStream* output = new FileOutputStream(fd); CHECK(google::protobuf::TextFormat::Print(proto, output)); delete output; @@ -50,7 +50,7 @@ void WriteProtoToTextFile(const Message& proto, const char* filename) { } bool ReadProtoFromBinaryFile(const char* filename, Message* proto) { - int fd = open(filename, O_RDONLY); + int_tp fd = open(filename, O_RDONLY); CHECK_NE(fd, -1) << "File not found: " << filename; ZeroCopyInputStream* raw_input = new FileInputStream(fd); CodedInputStream* coded_input = new CodedInputStream(raw_input); @@ -71,9 +71,9 @@ void WriteProtoToBinaryFile(const Message& proto, const char* filename) { #ifdef USE_OPENCV cv::Mat ReadImageToCVMat(const string& filename, - const int height, const int width, const bool is_color) { + const int_tp height, const int_tp width, const bool is_color) { cv::Mat cv_img; - int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : + int_tp cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag); if (!cv_img_origin.data) { @@ -89,7 +89,7 @@ cv::Mat ReadImageToCVMat(const string& filename, } cv::Mat ReadImageToCVMat(const string& filename, - const int height, const int width) { + const int_tp height, const int_tp width) { return ReadImageToCVMat(filename, height, width, true); } @@ -105,7 +105,7 @@ cv::Mat ReadImageToCVMat(const string& filename) { // Do the file extension and encoding match? static bool matchExt(const std::string & fn, std::string en) { - size_t p = fn.rfind('.'); + uint_tp p = fn.rfind('.'); std::string ext = p != fn.npos ? fn.substr(p) : fn; std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower); std::transform(en.begin(), en.end(), en.begin(), ::tolower); @@ -116,8 +116,8 @@ static bool matchExt(const std::string & fn, return false; } -bool ReadImageToDatum(const string& filename, const int label, - const int height, const int width, const bool is_color, +bool ReadImageToDatum(const string& filename, const int_tp label, + const int_tp height, const int_tp width, const bool is_color, const std::string & encoding, Datum* datum) { cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color); if (cv_img.data) { @@ -142,7 +142,7 @@ bool ReadImageToDatum(const string& filename, const int label, } #endif // USE_OPENCV -bool ReadFileToDatum(const string& filename, const int label, +bool ReadFileToDatum(const string& filename, const int_tp label, Datum* datum) { std::streampos size; @@ -179,7 +179,7 @@ cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color) { CHECK(datum.encoded()) << "Datum not encoded"; const string& data = datum.data(); std::vector vec_data(data.c_str(), data.c_str() + data.size()); - int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : + int_tp cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); cv_img = cv::imdecode(vec_data, cv_read_flag); if (!cv_img.data) { @@ -217,17 +217,17 @@ void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) { datum->clear_data(); datum->clear_float_data(); datum->set_encoded(false); - int datum_channels = datum->channels(); - int datum_height = datum->height(); - int datum_width = datum->width(); - int datum_size = datum_channels * datum_height * datum_width; + int_tp datum_channels = datum->channels(); + int_tp datum_height = datum->height(); + int_tp datum_width = datum->width(); + int_tp datum_size = datum_channels * datum_height * datum_width; std::string buffer(datum_size, ' '); - for (int h = 0; h < datum_height; ++h) { + for (int_tp h = 0; h < datum_height; ++h) { const uchar* ptr = cv_img.ptr(h); - int img_index = 0; - for (int w = 0; w < datum_width; ++w) { - for (int c = 0; c < datum_channels; ++c) { - int datum_index = (c * datum_height + h) * datum_width + w; + int_tp img_index = 0; + for (int_tp w = 0; w < datum_width; ++w) { + for (int_tp c = 0; c < datum_channels; ++c) { + int_tp datum_index = (c * datum_height + h) * datum_width + w; buffer[datum_index] = static_cast(ptr[img_index++]); } } diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index bd92b39b914..1430c1e4f91 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -11,98 +11,100 @@ namespace caffe { template<> void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const float alpha, + const CBLAS_TRANSPOSE TransB, const int_tp M, + const int_tp N, const int_tp K, const float alpha, const float* A, const float* B, const float beta, float* C) { - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; + int_tp lda = (TransA == CblasNoTrans) ? K : M; + int_tp ldb = (TransB == CblasNoTrans) ? N : K; cblas_sgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, N); } template<> void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const double alpha, + const CBLAS_TRANSPOSE TransB, const int_tp M, + const int_tp N, const int_tp K, const double alpha, const double* A, const double* B, const double beta, double* C) { - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; + int_tp lda = (TransA == CblasNoTrans) ? K : M; + int_tp ldb = (TransB == CblasNoTrans) ? N : K; cblas_dgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, N); } template<> -void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const float alpha, const float* A, +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, + const int_tp N, const float alpha, const float* A, const float* x, const float beta, float* y) { cblas_sgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); } template<> -void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const double alpha, const double* A, +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, + const int_tp N, const double alpha, const double* A, const double* x, const double beta, double* y) { cblas_dgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); } template<> -void caffe_axpy(const int N, const float alpha, const float* X, +void caffe_axpy(const int_tp N, const float alpha, const float* X, float* Y) { cblas_saxpy(N, alpha, X, 1, Y, 1); } template<> -void caffe_axpy(const int N, const double alpha, const double* X, +void caffe_axpy(const int_tp N, const double alpha, const double* X, double* Y) { cblas_daxpy(N, alpha, X, 1, Y, 1); } template -void caffe_set(const int N, const Dtype alpha, Dtype* Y) { +void caffe_set(const int_tp N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { memset(Y, 0, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) return; } - for (int i = 0; i < N; ++i) { + for (int_tp i = 0; i < N; ++i) { Y[i] = alpha; } } -template void caffe_set(const int N, const int alpha, int* Y); -template void caffe_set(const int N, const float alpha, float* Y); -template void caffe_set(const int N, const double alpha, double* Y); +template void caffe_set(const int_tp N, const int alpha, int* Y); +template void caffe_set(const int_tp N, const uint_tp alpha, uint_tp* Y); +template void caffe_set(const int_tp N, const int_tp alpha, int_tp* Y); +template void caffe_set(const int_tp N, const float alpha, float* Y); +template void caffe_set(const int_tp N, const double alpha, double* Y); template<> -void caffe_add_scalar(const int N, const float alpha, float* Y) { - for (int i = 0; i < N; ++i) { +void caffe_add_scalar(const int_tp N, const float alpha, float* Y) { + for (int_tp i = 0; i < N; ++i) { Y[i] += alpha; } } template<> -void caffe_add_scalar(const int N, const double alpha, double* Y) { - for (int i = 0; i < N; ++i) { +void caffe_add_scalar(const int_tp N, const double alpha, double* Y) { + for (int_tp i = 0; i < N; ++i) { Y[i] += alpha; } } template -void caffe_cpu_copy(const int N, const Dtype* X, Dtype* Y) { +void caffe_cpu_copy(const int_tp N, const Dtype* X, Dtype* Y) { if (X != Y) { memcpy(Y, X, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) } } -template void caffe_cpu_copy(const int N, const int* X, int* Y); -template void caffe_cpu_copy(const int N, const unsigned int* X, - unsigned int* Y); -template void caffe_cpu_copy(const int N, const float* X, float* Y); -template void caffe_cpu_copy(const int N, const double* X, double* Y); +template void caffe_cpu_copy(const int_tp N, const int_tp* X, int_tp* Y); +template void caffe_cpu_copy(const int_tp N, const uint_tp* X, + uint_tp* Y); +template void caffe_cpu_copy(const int_tp N, const float* X, float* Y); +template void caffe_cpu_copy(const int_tp N, const double* X, double* Y); template -void caffe_copy(const int N, const Dtype* X, Dtype* Y) { +void caffe_copy(const int_tp N, const Dtype* X, Dtype* Y) { if (X != Y) { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY @@ -119,130 +121,130 @@ void caffe_copy(const int N, const Dtype* X, Dtype* Y) { } } -template void caffe_copy(const int N, const int* X, int* Y); -template void caffe_copy(const int N, const unsigned int* X, - unsigned int* Y); -template void caffe_copy(const int N, const float* X, float* Y); -template void caffe_copy(const int N, const double* X, double* Y); +template void caffe_copy(const int_tp N, const int_tp* X, int_tp* Y); +template void caffe_copy(const int_tp N, const uint_tp* X, + uint_tp* Y); +template void caffe_copy(const int_tp N, const float* X, float* Y); +template void caffe_copy(const int_tp N, const double* X, double* Y); template<> -void caffe_scal(const int N, const float alpha, float *X) { +void caffe_scal(const int_tp N, const float alpha, float *X) { cblas_sscal(N, alpha, X, 1); } template<> -void caffe_scal(const int N, const double alpha, double *X) { +void caffe_scal(const int_tp N, const double alpha, double *X) { cblas_dscal(N, alpha, X, 1); } template<> -void caffe_cpu_axpby(const int N, const float alpha, const float* X, +void caffe_cpu_axpby(const int_tp N, const float alpha, const float* X, const float beta, float* Y) { cblas_saxpby(N, alpha, X, 1, beta, Y, 1); } template<> -void caffe_cpu_axpby(const int N, const double alpha, const double* X, +void caffe_cpu_axpby(const int_tp N, const double alpha, const double* X, const double beta, double* Y) { cblas_daxpby(N, alpha, X, 1, beta, Y, 1); } template<> -void caffe_add(const int n, const float* a, const float* b, float* y) { +void caffe_add(const int_tp n, const float* a, const float* b, float* y) { vsAdd(n, a, b, y); } template<> -void caffe_add(const int n, const double* a, const double* b, +void caffe_add(const int_tp n, const double* a, const double* b, double* y) { vdAdd(n, a, b, y); } template<> -void caffe_sub(const int n, const float* a, const float* b, float* y) { +void caffe_sub(const int_tp n, const float* a, const float* b, float* y) { vsSub(n, a, b, y); } template<> -void caffe_sub(const int n, const double* a, const double* b, +void caffe_sub(const int_tp n, const double* a, const double* b, double* y) { vdSub(n, a, b, y); } template<> -void caffe_mul(const int n, const float* a, const float* b, float* y) { +void caffe_mul(const int_tp n, const float* a, const float* b, float* y) { vsMul(n, a, b, y); } template<> -void caffe_mul(const int n, const double* a, const double* b, +void caffe_mul(const int_tp n, const double* a, const double* b, double* y) { vdMul(n, a, b, y); } template<> -void caffe_div(const int n, const float* a, const float* b, float* y) { +void caffe_div(const int_tp n, const float* a, const float* b, float* y) { vsDiv(n, a, b, y); } template<> -void caffe_div(const int n, const double* a, const double* b, +void caffe_div(const int_tp n, const double* a, const double* b, double* y) { vdDiv(n, a, b, y); } template<> -void caffe_powx(const int n, const float* a, const float b, float* y) { +void caffe_powx(const int_tp n, const float* a, const float b, float* y) { vsPowx(n, a, b, y); } template<> -void caffe_powx(const int n, const double* a, const double b, +void caffe_powx(const int_tp n, const double* a, const double b, double* y) { vdPowx(n, a, b, y); } template<> -void caffe_sqr(const int n, const float* a, float* y) { +void caffe_sqr(const int_tp n, const float* a, float* y) { vsSqr(n, a, y); } template<> -void caffe_sqr(const int n, const double* a, double* y) { +void caffe_sqr(const int_tp n, const double* a, double* y) { vdSqr(n, a, y); } template<> -void caffe_exp(const int n, const float* a, float* y) { +void caffe_exp(const int_tp n, const float* a, float* y) { vsExp(n, a, y); } template<> -void caffe_exp(const int n, const double* a, double* y) { +void caffe_exp(const int_tp n, const double* a, double* y) { vdExp(n, a, y); } template<> -void caffe_log(const int n, const float* a, float* y) { +void caffe_log(const int_tp n, const float* a, float* y) { vsLn(n, a, y); } template<> -void caffe_log(const int n, const double* a, double* y) { +void caffe_log(const int_tp n, const double* a, double* y) { vdLn(n, a, y); } template<> -void caffe_abs(const int n, const float* a, float* y) { +void caffe_abs(const int_tp n, const float* a, float* y) { vsAbs(n, a, y); } template<> -void caffe_abs(const int n, const double* a, double* y) { +void caffe_abs(const int_tp n, const double* a, double* y) { vdAbs(n, a, y); } -unsigned int caffe_rng_rand() { +uint_tp caffe_rng_rand() { return (*caffe_rng())(); } @@ -257,40 +259,40 @@ float caffe_nextafter(const float b); template double caffe_nextafter(const double b); -void caffe_rng_uniform(const int n, unsigned int* r) { +void caffe_rng_uniform(const int_tp n, uint_tp* r) { CHECK_GE(n, 0); CHECK(r); - boost::uniform_int random_distribution(INT32_MIN, INT32_MAX); - boost::variate_generator> + boost::uniform_int random_distribution(INT32_MIN, INT32_MAX); + boost::variate_generator> variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { r[i] = variate_generator(); } } template -void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r) { +void caffe_rng_uniform(const int_tp n, const Dtype a, const Dtype b, Dtype* r) { CHECK_GE(n, 0); CHECK(r); CHECK_LE(a, b); boost::uniform_real random_distribution(a, caffe_nextafter(b)); boost::variate_generator> variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { r[i] = variate_generator(); } } template -void caffe_rng_uniform(const int n, const float a, const float b, +void caffe_rng_uniform(const int_tp n, const float a, const float b, float* r); template -void caffe_rng_uniform(const int n, const double a, const double b, +void caffe_rng_uniform(const int_tp n, const double a, const double b, double* r); template -void caffe_rng_gaussian(const int n, const Dtype a, const Dtype sigma, +void caffe_rng_gaussian(const int_tp n, const Dtype a, const Dtype sigma, Dtype* r) { CHECK_GE(n, 0); CHECK(r); @@ -298,21 +300,21 @@ void caffe_rng_gaussian(const int n, const Dtype a, const Dtype sigma, boost::normal_distribution random_distribution(a, sigma); boost::variate_generator> variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { + for (int_tp i = 0; i < n; ++i) { r[i] = variate_generator(); } } template -void caffe_rng_gaussian(const int n, const float mu, const float sigma, +void caffe_rng_gaussian(const int_tp n, const float mu, const float sigma, float* r); template -void caffe_rng_gaussian(const int n, const double mu, +void caffe_rng_gaussian(const int_tp n, const double mu, const double sigma, double* r); -template -void caffe_rng_bernoulli(const int n, const Dtype p, int* r) { +template +void caffe_rng_bernoulli(const int_tp n, const Dtype p, Itype* r) { CHECK_GE(n, 0); CHECK(r); CHECK_GE(p, 0); @@ -320,66 +322,64 @@ void caffe_rng_bernoulli(const int n, const Dtype p, int* r) { boost::bernoulli_distribution random_distribution(p); boost::variate_generator> variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { - r[i] = variate_generator(); + for (int_tp i = 0; i < n; ++i) { + r[i] = static_cast(variate_generator()); } } template -void caffe_rng_bernoulli(const int n, const double p, int* r); +void caffe_rng_bernoulli(const int_tp n, const double p, unsigned long* r); template -void caffe_rng_bernoulli(const int n, const float p, int* r); +void caffe_rng_bernoulli(const int_tp n, const float p, unsigned long* r); -template -void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r) { - CHECK_GE(n, 0); - CHECK(r); - CHECK_GE(p, 0); - CHECK_LE(p, 1); - boost::bernoulli_distribution random_distribution(p); - boost::variate_generator> - variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { - r[i] = static_cast(variate_generator()); - } -} +template +void caffe_rng_bernoulli(const int_tp n, const double p, long* r); + +template +void caffe_rng_bernoulli(const int_tp n, const float p, long* r); + +template +void caffe_rng_bernoulli(const int_tp n, const double p, unsigned int* r); + +template +void caffe_rng_bernoulli(const int_tp n, const float p, unsigned int* r); template -void caffe_rng_bernoulli(const int n, const double p, unsigned int* r); +void caffe_rng_bernoulli(const int_tp n, const double p, int* r); template -void caffe_rng_bernoulli(const int n, const float p, unsigned int* r); +void caffe_rng_bernoulli(const int_tp n, const float p, int* r); template<> -float caffe_cpu_strided_dot(const int n, const float* x, const int incx, - const float* y, const int incy) { +float caffe_cpu_strided_dot(const int_tp n, const float* x, const int_tp incx, + const float* y, const int_tp incy) { return cblas_sdot(n, x, incx, y, incy); } template<> -double caffe_cpu_strided_dot(const int n, const double* x, - const int incx, const double* y, - const int incy) { +double caffe_cpu_strided_dot(const int_tp n, const double* x, + const int_tp incx, const double* y, + const int_tp incy) { return cblas_ddot(n, x, incx, y, incy); } template -Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y) { +Dtype caffe_cpu_dot(const int_tp n, const Dtype* x, const Dtype* y) { return caffe_cpu_strided_dot(n, x, 1, y, 1); } template -float caffe_cpu_dot(const int n, const float* x, const float* y); +float caffe_cpu_dot(const int_tp n, const float* x, const float* y); template -double caffe_cpu_dot(const int n, const double* x, const double* y); +double caffe_cpu_dot(const int_tp n, const double* x, const double* y); template<> -int caffe_cpu_hamming_distance(const int n, const float* x, +int_tp caffe_cpu_hamming_distance(const int_tp n, const float* x, const float* y) { - int dist = 0; - for (int i = 0; i < n; ++i) { + int_tp dist = 0; + for (int_tp i = 0; i < n; ++i) { dist += __builtin_popcount( static_cast(x[i]) ^ static_cast(y[i])); } @@ -387,10 +387,10 @@ int caffe_cpu_hamming_distance(const int n, const float* x, } template<> -int caffe_cpu_hamming_distance(const int n, const double* x, +int_tp caffe_cpu_hamming_distance(const int_tp n, const double* x, const double* y) { - int dist = 0; - for (int i = 0; i < n; ++i) { + int_tp dist = 0; + for (int_tp i = 0; i < n; ++i) { dist += __builtin_popcountl( static_cast(x[i]) ^ static_cast(y[i])); } @@ -398,24 +398,24 @@ int caffe_cpu_hamming_distance(const int n, const double* x, } template<> -float caffe_cpu_asum(const int n, const float* x) { +float caffe_cpu_asum(const int_tp n, const float* x) { return cblas_sasum(n, x, 1); } template<> -double caffe_cpu_asum(const int n, const double* x) { +double caffe_cpu_asum(const int_tp n, const double* x) { return cblas_dasum(n, x, 1); } template<> -void caffe_cpu_scale(const int n, const float alpha, const float *x, +void caffe_cpu_scale(const int_tp n, const float alpha, const float *x, float* y) { cblas_scopy(n, x, 1, y, 1); cblas_sscal(n, alpha, y, 1); } template<> -void caffe_cpu_scale(const int n, const double alpha, const double *x, +void caffe_cpu_scale(const int_tp n, const double alpha, const double *x, double* y) { cblas_dcopy(n, x, 1, y, 1); cblas_dscal(n, alpha, y, 1); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index e5796a68665..b7df928b2e8 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -17,13 +17,13 @@ namespace caffe { template<> void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const float alpha, + const CBLAS_TRANSPOSE TransB, const int_tp M, + const int_tp N, const int_tp K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; + int_tp lda = (TransA == CblasNoTrans) ? K : M; + int_tp ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = @@ -34,13 +34,13 @@ void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, template<> void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int M, - const int N, const int K, const double alpha, + const CBLAS_TRANSPOSE TransB, const int_tp M, + const int_tp N, const int_tp K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. - int lda = (TransA == CblasNoTrans) ? K : M; - int ldb = (TransB == CblasNoTrans) ? N : K; + int_tp lda = (TransA == CblasNoTrans) ? K : M; + int_tp ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = @@ -50,8 +50,8 @@ void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, } template<> -void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const float alpha, const float* A, +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, + const int_tp N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; @@ -60,8 +60,8 @@ void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, } template<> -void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, - const int N, const double alpha, const double* A, +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, + const int_tp N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; @@ -70,92 +70,92 @@ void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, } template<> -void caffe_gpu_axpy(const int N, const float alpha, const float* X, +void caffe_gpu_axpy(const int_tp N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template<> -void caffe_gpu_axpy(const int N, const double alpha, const double* X, +void caffe_gpu_axpy(const int_tp N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } -void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { +void caffe_gpu_memcpy(const uint_tp N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template<> -void caffe_gpu_scal(const int N, const float alpha, float *X) { +void caffe_gpu_scal(const int_tp N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template<> -void caffe_gpu_scal(const int N, const double alpha, double *X) { +void caffe_gpu_scal(const int_tp N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template<> -void caffe_gpu_axpby(const int N, const float alpha, const float* X, +void caffe_gpu_axpby(const int_tp N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal(N, beta, Y); caffe_gpu_axpy(N, alpha, X, Y); } template<> -void caffe_gpu_axpby(const int N, const double alpha, const double* X, +void caffe_gpu_axpby(const int_tp N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal(N, beta, Y); caffe_gpu_axpy(N, alpha, X, Y); } template<> -void caffe_gpu_dot(const int n, const float* x, const float* y, +void caffe_gpu_dot(const int_tp n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template<> -void caffe_gpu_dot(const int n, const double* x, const double* y, +void caffe_gpu_dot(const int_tp n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template<> -void caffe_gpu_asum(const int n, const float* x, float* y) { +void caffe_gpu_asum(const int_tp n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template<> -void caffe_gpu_asum(const int n, const double* x, double* y) { +void caffe_gpu_asum(const int_tp n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template<> -void caffe_gpu_scale(const int n, const float alpha, const float *x, +void caffe_gpu_scale(const int_tp n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template<> -void caffe_gpu_scale(const int n, const double alpha, const double *x, +void caffe_gpu_scale(const int_tp n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template -__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { +__global__ void set_kernel(const int_tp n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template -void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { +void caffe_gpu_set(const int_tp N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; @@ -165,19 +165,19 @@ void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { N, alpha, Y); } -template void caffe_gpu_set(const int N, const int alpha, int* Y); -template void caffe_gpu_set(const int N, const float alpha, float* Y); -template void caffe_gpu_set(const int N, const double alpha, double* Y); +template void caffe_gpu_set(const int_tp N, const int_tp alpha, int_tp* Y); +template void caffe_gpu_set(const int_tp N, const float alpha, float* Y); +template void caffe_gpu_set(const int_tp N, const double alpha, double* Y); template -__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { +__global__ void add_scalar_kernel(const int_tp n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template<> -void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { +void caffe_gpu_add_scalar(const int_tp N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -185,7 +185,7 @@ void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { } template<> -void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { +void caffe_gpu_add_scalar(const int_tp N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -193,7 +193,7 @@ void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { } template -__global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, +__global__ void add_kernel(const int_tp n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; @@ -201,7 +201,7 @@ __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, } template<> -void caffe_gpu_add(const int N, const float* a, const float* b, +void caffe_gpu_add(const int_tp N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -209,7 +209,7 @@ void caffe_gpu_add(const int N, const float* a, const float* b, } template<> -void caffe_gpu_add(const int N, const double* a, const double* b, +void caffe_gpu_add(const int_tp N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -217,7 +217,7 @@ void caffe_gpu_add(const int N, const double* a, const double* b, } template -__global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, +__global__ void sub_kernel(const int_tp n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; @@ -225,7 +225,7 @@ __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, } template<> -void caffe_gpu_sub(const int N, const float* a, const float* b, +void caffe_gpu_sub(const int_tp N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -233,7 +233,7 @@ void caffe_gpu_sub(const int N, const float* a, const float* b, } template<> -void caffe_gpu_sub(const int N, const double* a, const double* b, +void caffe_gpu_sub(const int_tp N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -241,7 +241,7 @@ void caffe_gpu_sub(const int N, const double* a, const double* b, } template -__global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, +__global__ void mul_kernel(const int_tp n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; @@ -249,7 +249,7 @@ __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, } template<> -void caffe_gpu_mul(const int N, const float* a, const float* b, +void caffe_gpu_mul(const int_tp N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -257,7 +257,7 @@ void caffe_gpu_mul(const int N, const float* a, const float* b, } template<> -void caffe_gpu_mul(const int N, const double* a, const double* b, +void caffe_gpu_mul(const int_tp N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -265,7 +265,7 @@ void caffe_gpu_mul(const int N, const double* a, const double* b, } template -__global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, +__global__ void div_kernel(const int_tp n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; @@ -273,7 +273,7 @@ __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, } template<> -void caffe_gpu_div(const int N, const float* a, const float* b, +void caffe_gpu_div(const int_tp N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -281,7 +281,7 @@ void caffe_gpu_div(const int N, const float* a, const float* b, } template<> -void caffe_gpu_div(const int N, const double* a, const double* b, +void caffe_gpu_div(const int_tp N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -289,70 +289,70 @@ void caffe_gpu_div(const int N, const double* a, const double* b, } template -__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { +__global__ void abs_kernel(const int_tp n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template<> -void caffe_gpu_abs(const int N, const float* a, float* y) { +void caffe_gpu_abs(const int_tp N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template<> -void caffe_gpu_abs(const int N, const double* a, double* y) { +void caffe_gpu_abs(const int_tp N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template -__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { +__global__ void exp_kernel(const int_tp n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template<> -void caffe_gpu_exp(const int N, const float* a, float* y) { +void caffe_gpu_exp(const int_tp N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template<> -void caffe_gpu_exp(const int N, const double* a, double* y) { +void caffe_gpu_exp(const int_tp N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template -__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { +__global__ void log_kernel(const int_tp n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template<> -void caffe_gpu_log(const int N, const float* a, float* y) { +void caffe_gpu_log(const int_tp N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template<> -void caffe_gpu_log(const int N, const double* a, double* y) { +void caffe_gpu_log(const int_tp N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( N, a, y); } template -__global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, +__global__ void powx_kernel(const int_tp n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); @@ -360,7 +360,7 @@ __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, } template<> -void caffe_gpu_powx(const int N, const float* a, const float alpha, +void caffe_gpu_powx(const int_tp N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -368,7 +368,7 @@ void caffe_gpu_powx(const int N, const float* a, const float alpha, } template<> -void caffe_gpu_powx(const int N, const double* a, const double alpha, +void caffe_gpu_powx(const int_tp N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS)( @@ -379,7 +379,7 @@ DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC( sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); -__global__ void popc_kernel(const int n, const float* a, const float* b, +__global__ void popc_kernel(const int_tp n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc( @@ -387,7 +387,7 @@ __global__ void popc_kernel(const int n, const float* a, const float* b, } } -__global__ void popcll_kernel(const int n, const double* a, const double* b, +__global__ void popcll_kernel(const int_tp n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll( @@ -396,7 +396,7 @@ __global__ void popcll_kernel(const int n, const double* a, const double* b, } template<> -uint32_t caffe_gpu_hamming_distance(const int n, const float* x, +uint32_t caffe_gpu_hamming_distance(const int_tp n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). @@ -410,7 +410,7 @@ uint32_t caffe_gpu_hamming_distance(const int n, const float* x, } template<> -uint32_t caffe_gpu_hamming_distance(const int n, const double* x, +uint32_t caffe_gpu_hamming_distance(const int_tp n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). @@ -425,12 +425,16 @@ uint32_t caffe_gpu_hamming_distance(const int n, const double* x, thrust::plus()); } -void caffe_gpu_rng_uniform(const int n, unsigned int* r) { +void caffe_gpu_rng_uniform(const int_tp n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } +void caffe_gpu_rng_uniform(const int_tp n, unsigned long long* r) { + CURAND_CHECK(curandGenerateLongLong(Caffe::curand_generator64(), r, n)); +} + template<> -void caffe_gpu_rng_uniform(const int n, const float a, const float b, +void caffe_gpu_rng_uniform(const int_tp n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; @@ -443,7 +447,7 @@ void caffe_gpu_rng_uniform(const int n, const float a, const float b, } template<> -void caffe_gpu_rng_uniform(const int n, const double a, const double b, +void caffe_gpu_rng_uniform(const int_tp n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; @@ -456,14 +460,14 @@ void caffe_gpu_rng_uniform(const int n, const double a, const double b, } template<> -void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, +void caffe_gpu_rng_gaussian(const int_tp n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template<> -void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, +void caffe_gpu_rng_gaussian(const int_tp n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 6874827edb9..3e9926acd4f 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -17,7 +17,7 @@ bool NetNeedsUpgrade(const NetParameter& net_param) { } bool NetNeedsV0ToV1Upgrade(const NetParameter& net_param) { - for (int i = 0; i < net_param.layers_size(); ++i) { + for (int_tp i = 0; i < net_param.layers_size(); ++i) { if (net_param.layers(i).has_layer()) { return true; } @@ -40,14 +40,14 @@ bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers, if (v0_net_param.has_name()) { net_param->set_name(v0_net_param.name()); } - for (int i = 0; i < v0_net_param.layers_size(); ++i) { + for (int_tp i = 0; i < v0_net_param.layers_size(); ++i) { is_fully_compatible &= UpgradeV0LayerParameter(v0_net_param.layers(i), net_param->add_layers()); } - for (int i = 0; i < v0_net_param.input_size(); ++i) { + for (int_tp i = 0; i < v0_net_param.input_size(); ++i) { net_param->add_input(v0_net_param.input(i)); } - for (int i = 0; i < v0_net_param.input_dim_size(); ++i) { + for (int_tp i = 0; i < v0_net_param.input_dim_size(); ++i) { net_param->add_input_dim(v0_net_param.input_dim(i)); } if (v0_net_param.has_force_backward()) { @@ -63,25 +63,25 @@ void UpgradeV0PaddingLayers(const NetParameter& param, param_upgraded_pad->CopyFrom(param); param_upgraded_pad->clear_layers(); // Figure out which layer each bottom blob comes from. - map blob_name_to_last_top_idx; - for (int i = 0; i < param.input_size(); ++i) { + map blob_name_to_last_top_idx; + for (int_tp i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); blob_name_to_last_top_idx[blob_name] = -1; } - for (int i = 0; i < param.layers_size(); ++i) { + for (int_tp i = 0; i < param.layers_size(); ++i) { const V1LayerParameter& layer_connection = param.layers(i); const V0LayerParameter& layer_param = layer_connection.layer(); // Add the layer to the new net, unless it's a padding layer. if (layer_param.type() != "padding") { param_upgraded_pad->add_layers()->CopyFrom(layer_connection); } - for (int j = 0; j < layer_connection.bottom_size(); ++j) { + for (int_tp j = 0; j < layer_connection.bottom_size(); ++j) { const string& blob_name = layer_connection.bottom(j); if (blob_name_to_last_top_idx.find(blob_name) == blob_name_to_last_top_idx.end()) { LOG(FATAL)<< "Unknown blob input " << blob_name << " to layer " << j; } - const int top_idx = blob_name_to_last_top_idx[blob_name]; + const int_tp top_idx = blob_name_to_last_top_idx[blob_name]; if (top_idx == -1) { continue; } @@ -101,14 +101,14 @@ void UpgradeV0PaddingLayers(const NetParameter& param, << "Padding Layer takes a single blob as input."; CHECK_EQ(source_layer.top_size(), 1) << "Padding Layer produces a single blob as output."; - int layer_index = param_upgraded_pad->layers_size() - 1; + int_tp layer_index = param_upgraded_pad->layers_size() - 1; param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() ->set_pad(source_layer.layer().pad()); param_upgraded_pad->mutable_layers(layer_index)->set_bottom( j, source_layer.bottom(0)); } } - for (int j = 0; j < layer_connection.top_size(); ++j) { + for (int_tp j = 0; j < layer_connection.top_size(); ++j) { const string& blob_name = layer_connection.top(j); blob_name_to_last_top_idx[blob_name] = i; } @@ -119,10 +119,10 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, V1LayerParameter* layer_param) { bool is_fully_compatible = true; layer_param->Clear(); - for (int i = 0; i < v0_layer_connection.bottom_size(); ++i) { + for (int_tp i = 0; i < v0_layer_connection.bottom_size(); ++i) { layer_param->add_bottom(v0_layer_connection.bottom(i)); } - for (int i = 0; i < v0_layer_connection.top_size(); ++i) { + for (int_tp i = 0; i < v0_layer_connection.top_size(); ++i) { layer_param->add_top(v0_layer_connection.top(i)); } if (v0_layer_connection.has_layer()) { @@ -134,13 +134,13 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, if (v0_layer_param.has_type()) { layer_param->set_type(UpgradeV0LayerType(type)); } - for (int i = 0; i < v0_layer_param.blobs_size(); ++i) { + for (int_tp i = 0; i < v0_layer_param.blobs_size(); ++i) { layer_param->add_blobs()->CopyFrom(v0_layer_param.blobs(i)); } - for (int i = 0; i < v0_layer_param.blobs_lr_size(); ++i) { + for (int_tp i = 0; i < v0_layer_param.blobs_lr_size(); ++i) { layer_param->add_blobs_lr(v0_layer_param.blobs_lr(i)); } - for (int i = 0; i < v0_layer_param.weight_decay_size(); ++i) { + for (int_tp i = 0; i < v0_layer_param.weight_decay_size(); ++i) { layer_param->add_weight_decay(v0_layer_param.weight_decay(i)); } if (v0_layer_param.has_num_output()) { @@ -522,7 +522,7 @@ V1LayerParameter_LayerType UpgradeV0LayerType(const string& type) { } bool NetNeedsDataUpgrade(const NetParameter& net_param) { - for (int i = 0; i < net_param.layers_size(); ++i) { + for (int_tp i = 0; i < net_param.layers_size(); ++i) { if (net_param.layers(i).type() == V1LayerParameter_LayerType_DATA) { DataParameter layer_param = net_param.layers(i).data_param(); if (layer_param.has_scale()) { @@ -599,7 +599,7 @@ bool NetNeedsDataUpgrade(const NetParameter& net_param) { } while (0) void UpgradeNetDataTransformation(NetParameter* net_param) { - for (int i = 0; i < net_param->layers_size(); ++i) { + for (int_tp i = 0; i < net_param->layers_size(); ++i) { CONVERT_LAYER_TRANSFORM_PARAM(DATA, Data, data); CONVERT_LAYER_TRANSFORM_PARAM(IMAGE_DATA, ImageData, image_data); CONVERT_LAYER_TRANSFORM_PARAM(WINDOW_DATA, WindowData, window_data); @@ -663,7 +663,7 @@ bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) { net_param->CopyFrom(v1_net_param); net_param->clear_layers(); net_param->clear_layer(); - for (int i = 0; i < v1_net_param.layers_size(); ++i) { + for (int_tp i = 0; i < v1_net_param.layers_size(); ++i) { if (!UpgradeV1LayerParameter(v1_net_param.layers(i), net_param->add_layer())) { LOG(ERROR)<< "Upgrade of input layer " << i << " failed."; @@ -677,35 +677,35 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, LayerParameter* layer_param) { layer_param->Clear(); bool is_fully_compatible = true; - for (int i = 0; i < v1_layer_param.bottom_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.bottom_size(); ++i) { layer_param->add_bottom(v1_layer_param.bottom(i)); } - for (int i = 0; i < v1_layer_param.top_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.top_size(); ++i) { layer_param->add_top(v1_layer_param.top(i)); } if (v1_layer_param.has_name()) { layer_param->set_name(v1_layer_param.name()); } - for (int i = 0; i < v1_layer_param.include_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.include_size(); ++i) { layer_param->add_include()->CopyFrom(v1_layer_param.include(i)); } - for (int i = 0; i < v1_layer_param.exclude_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.exclude_size(); ++i) { layer_param->add_exclude()->CopyFrom(v1_layer_param.exclude(i)); } if (v1_layer_param.has_type()) { layer_param->set_type(UpgradeV1LayerType(v1_layer_param.type())); } - for (int i = 0; i < v1_layer_param.blobs_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.blobs_size(); ++i) { layer_param->add_blobs()->CopyFrom(v1_layer_param.blobs(i)); } - for (int i = 0; i < v1_layer_param.param_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.param_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } layer_param->mutable_param(i)->set_name(v1_layer_param.param(i)); } ParamSpec_DimCheckMode mode; - for (int i = 0; i < v1_layer_param.blob_share_mode_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.blob_share_mode_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } @@ -723,20 +723,20 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, } layer_param->mutable_param(i)->set_share_mode(mode); } - for (int i = 0; i < v1_layer_param.blobs_lr_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.blobs_lr_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } layer_param->mutable_param(i)->set_lr_mult(v1_layer_param.blobs_lr(i)); } - for (int i = 0; i < v1_layer_param.weight_decay_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.weight_decay_size(); ++i) { while (layer_param->param_size() <= i) { layer_param->add_param(); } layer_param->mutable_param(i)->set_decay_mult( v1_layer_param.weight_decay(i)); } - for (int i = 0; i < v1_layer_param.loss_weight_size(); ++i) { + for (int_tp i = 0; i < v1_layer_param.loss_weight_size(); ++i) { layer_param->add_loss_weight(v1_layer_param.loss_weight(i)); } if (v1_layer_param.has_accuracy_param()) { diff --git a/src/gtest/gtest_main.cc b/src/gtest/gtest_main.cc index a09bbe0c6c5..ee13ed2345d 100644 --- a/src/gtest/gtest_main.cc +++ b/src/gtest/gtest_main.cc @@ -31,7 +31,7 @@ #include "gtest/gtest.h" -GTEST_API_ int main(int argc, char **argv) { +GTEST_API_ int_tp main(int_tp argc, char **argv) { std::cout << "Running main() from gtest_main.cc\n"; testing::InitGoogleTest(&argc, argv); diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 60aad999232..362afb51a4c 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -194,7 +194,7 @@ int train() { Caffe::SetDevices(gpus); ostringstream s; - for (int i = 0; i < gpus.size(); ++i) { + for (int_tp i = 0; i < gpus.size(); ++i) { s << (i ? ", " : "") << gpus[i]; } LOG(INFO) << "Using GPUs " << s.str(); @@ -212,7 +212,7 @@ int train() { GetRequestedAction(FLAGS_sighup_effect)); shared_ptr > - solver(caffe::GetSolver(solver_param)); + solver(caffe::SolverRegistry::CreateSolver(solver_param)); solver->SetActionFunction(signal_handler.GetActionFunction()); @@ -226,7 +226,7 @@ int train() { if (gpus.size() > 1) { caffe::P2PSync sync(solver, NULL, solver->param()); std::vector devices; - for (int i = 0; i < gpus.size(); ++i) { + for (int_tp i = 0; i < gpus.size(); ++i) { devices.push_back(Caffe::Get().GetDevice(i)); } sync.run(devices); @@ -268,15 +268,15 @@ int test() { vector test_score_output_id; vector test_score; float loss = 0; - for (int i = 0; i < FLAGS_iterations; ++i) { + for (int_tp i = 0; i < FLAGS_iterations; ++i) { float iter_loss; const vector*>& result = caffe_net.Forward(bottom_vec, &iter_loss); loss += iter_loss; - int idx = 0; - for (int j = 0; j < result.size(); ++j) { + int_tp idx = 0; + for (int_tp j = 0; j < result.size(); ++j) { const float* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k, ++idx) { + for (int_tp k = 0; k < result[j]->count(); ++k, ++idx) { const float score = result_vec[k]; if (i == 0) { test_score.push_back(score); @@ -292,7 +292,7 @@ int test() { } loss /= FLAGS_iterations; LOG(INFO) << "Loss: " << loss; - for (int i = 0; i < test_score.size(); ++i) { + for (int_tp i = 0; i < test_score.size(); ++i) { const std::string& output_name = caffe_net.blob_names()[ caffe_net.output_blob_indices()[test_score_output_id[i]]]; const float loss_weight = caffe_net.blob_loss_weights()[ @@ -359,11 +359,11 @@ int time() { std::vector backward_time_per_layer(layers.size(), 0.0); double forward_time = 0.0; double backward_time = 0.0; - for (int j = 0; j < FLAGS_iterations; ++j) { + for (int_tp j = 0; j < FLAGS_iterations; ++j) { Timer iter_timer; iter_timer.Start(); forward_timer.Start(); - for (int i = 0; i < layers.size(); ++i) { + for (int_tp i = 0; i < layers.size(); ++i) { timer.Start(); layers[i]->Forward(bottom_vecs[i], top_vecs[i]); Caffe::Synchronize(Caffe::GetDefaultDevice()->id()); @@ -371,7 +371,7 @@ int time() { } forward_time += forward_timer.MicroSeconds(); backward_timer.Start(); - for (int i = layers.size() - 1; i >= 0; --i) { + for (int_tp i = layers.size() - 1; i >= 0; --i) { timer.Start(); layers[i]->Backward(top_vecs[i], bottom_need_backward[i], bottom_vecs[i]); @@ -383,7 +383,7 @@ int time() { << iter_timer.MilliSeconds() << " ms."; } LOG(INFO) << "Average time per layer: "; - for (int i = 0; i < layers.size(); ++i) { + for (int_tp i = 0; i < layers.size(); ++i) { const caffe::string& layername = layers[i]->layer_param().name(); LOG(INFO) << std::setfill(' ') << std::setw(10) << layername << "\tforward: " << forward_time_per_layer[i] / 1000 / diff --git a/tools/compute_image_mean.cpp b/tools/compute_image_mean.cpp index 2035d515195..4f27d3a83cc 100644 --- a/tools/compute_image_mean.cpp +++ b/tools/compute_image_mean.cpp @@ -46,7 +46,7 @@ int main(int argc, char** argv) { scoped_ptr cursor(db->NewCursor()); BlobProto sum_blob; - int count = 0; + int_tp count = 0; // load first datum Datum datum; datum.ParseFromString(cursor->value()); @@ -59,10 +59,10 @@ int main(int argc, char** argv) { sum_blob.set_channels(datum.channels()); sum_blob.set_height(datum.height()); sum_blob.set_width(datum.width()); - const int data_size = datum.channels() * datum.height() * datum.width(); - int size_in_datum = std::max(datum.data().size(), + const int_tp data_size = datum.channels() * datum.height() * datum.width(); + int_tp size_in_datum = std::max(datum.data().size(), datum.float_data_size()); - for (int i = 0; i < size_in_datum; ++i) { + for (int_tp i = 0; i < size_in_datum; ++i) { sum_blob.add_data(0.); } LOG(INFO) << "Starting Iteration"; @@ -72,18 +72,18 @@ int main(int argc, char** argv) { DecodeDatumNative(&datum); const std::string& data = datum.data(); - size_in_datum = std::max(datum.data().size(), + size_in_datum = std::max(datum.data().size(), datum.float_data_size()); CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " << size_in_datum; if (data.size() != 0) { CHECK_EQ(data.size(), size_in_datum); - for (int i = 0; i < size_in_datum; ++i) { + for (int_tp i = 0; i < size_in_datum; ++i) { sum_blob.set_data(i, sum_blob.data(i) + (uint8_t)data[i]); } } else { CHECK_EQ(datum.float_data_size(), size_in_datum); - for (int i = 0; i < size_in_datum; ++i) { + for (int_tp i = 0; i < size_in_datum; ++i) { sum_blob.set_data(i, sum_blob.data(i) + static_cast(datum.float_data(i))); } @@ -98,7 +98,7 @@ int main(int argc, char** argv) { if (count % 10000 != 0) { LOG(INFO) << "Processed " << count << " files."; } - for (int i = 0; i < sum_blob.data_size(); ++i) { + for (int_tp i = 0; i < sum_blob.data_size(); ++i) { sum_blob.set_data(i, sum_blob.data(i) / count); } // Write to disk @@ -106,12 +106,12 @@ int main(int argc, char** argv) { LOG(INFO) << "Write to " << argv[2]; WriteProtoToBinaryFile(sum_blob, argv[2]); } - const int channels = sum_blob.channels(); - const int dim = sum_blob.height() * sum_blob.width(); + const int_tp channels = sum_blob.channels(); + const int_tp dim = sum_blob.height() * sum_blob.width(); std::vector mean_values(channels, 0.0); LOG(INFO) << "Number of channels: " << channels; - for (int c = 0; c < channels; ++c) { - for (int i = 0; i < dim; ++i) { + for (int_tp c = 0; c < channels; ++c) { + for (int_tp i = 0; i < dim; ++i) { mean_values[c] += sum_blob.data(dim * c + i); } LOG(INFO) << "mean_value channel [" << c << "]:" << mean_values[c] / dim; diff --git a/tools/convert_imageset.cpp b/tools/convert_imageset.cpp index e51a2631077..8aa6091d0f7 100644 --- a/tools/convert_imageset.cpp +++ b/tools/convert_imageset.cpp @@ -71,9 +71,9 @@ int main(int argc, char** argv) { const string encode_type = FLAGS_encode_type; std::ifstream infile(argv[2]); - std::vector > lines; + std::vector > lines; std::string filename; - int label; + int_tp label; while (infile >> filename >> label) { lines.push_back(std::make_pair(filename, label)); } @@ -87,8 +87,8 @@ int main(int argc, char** argv) { if (encode_type.size() && !encoded) LOG(INFO) << "encode_type specified, assuming encoded=true."; - int resize_height = std::max(0, FLAGS_resize_height); - int resize_width = std::max(0, FLAGS_resize_width); + int_tp resize_height = std::max(0, FLAGS_resize_height); + int_tp resize_width = std::max(0, FLAGS_resize_width); // Create new DB scoped_ptr db(db::GetDB(FLAGS_backend)); @@ -98,19 +98,19 @@ int main(int argc, char** argv) { // Storing to db std::string root_folder(argv[1]); Datum datum; - int count = 0; - const int kMaxKeyLength = 256; + int_tp count = 0; + const int_tp kMaxKeyLength = 256; char key_cstr[kMaxKeyLength]; - int data_size = 0; + int_tp data_size = 0; bool data_size_initialized = false; - for (int line_id = 0; line_id < lines.size(); ++line_id) { + for (int_tp line_id = 0; line_id < lines.size(); ++line_id) { bool status; std::string enc = encode_type; if (encoded && !enc.size()) { // Guess the encoding type from the file name string fn = lines[line_id].first; - size_t p = fn.rfind('.'); + uint_tp p = fn.rfind('.'); if ( p == fn.npos ) LOG(WARNING) << "Failed to guess the encoding of '" << fn << "'"; enc = fn.substr(p); @@ -131,7 +131,7 @@ int main(int argc, char** argv) { } } // sequential - int length = snprintf(key_cstr, kMaxKeyLength, "%08d_%s", line_id, + int_tp length = snprintf(key_cstr, kMaxKeyLength, "%08zd_%s", line_id, lines[line_id].first.c_str()); // Put in db diff --git a/tools/extra/extract_seconds.py b/tools/extra/extract_seconds.py index 591a51f96bd..4a2f227b1b7 100755 --- a/tools/extra/extract_seconds.py +++ b/tools/extra/extract_seconds.py @@ -6,15 +6,15 @@ def extract_datetime_from_line(line, year): # Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565 line = line.strip().split() - month = int(line[0][1:3]) - day = int(line[0][3:]) + month = int_tp(line[0][1:3]) + day = int_tp(line[0][3:]) timestamp = line[1] pos = timestamp.rfind('.') - ts = [int(x) for x in timestamp[:pos].split(':')] + ts = [int_tp(x) for x in timestamp[:pos].split(':')] hour = ts[0] minute = ts[1] second = ts[2] - microsecond = int(timestamp[pos + 1:]) + microsecond = int_tp(timestamp[pos + 1:]) dt = datetime.datetime(year, month, day, hour, minute, second, microsecond) return dt diff --git a/tools/extra/plot_training_log.py.example b/tools/extra/plot_training_log.py.example index b6fda54e01c..bfe76a41558 100755 --- a/tools/extra/plot_training_log.py.example +++ b/tools/extra/plot_training_log.py.example @@ -169,7 +169,7 @@ if __name__ == '__main__': if len(sys.argv) < 4: print_help() else: - chart_type = int(sys.argv[1]) + chart_type = int_tp(sys.argv[1]) if not is_valid_chart_type(chart_type): print_help() path_to_png = sys.argv[2] diff --git a/tools/extra/resize_and_crop_images.py b/tools/extra/resize_and_crop_images.py index c844f590c06..e4e30d9070a 100755 --- a/tools/extra/resize_and_crop_images.py +++ b/tools/extra/resize_and_crop_images.py @@ -56,11 +56,11 @@ def resize_and_crop_image(self, input_file, output_file, output_side_length = 25 wRatio = 1.0 * x2/box[0] hRatio = 1.0 * y2/box[1] if hRatio > wRatio: - y1 = int(y2/2-box[1]*wRatio/2) - y2 = int(y2/2+box[1]*wRatio/2) + y1 = int_tp(y2/2-box[1]*wRatio/2) + y2 = int_tp(y2/2+box[1]*wRatio/2) else: - x1 = int(x2/2-box[0]*hRatio/2) - x2 = int(x2/2+box[0]*hRatio/2) + x1 = int_tp(x2/2-box[0]*hRatio/2) + x2 = int_tp(x2/2+box[0]*hRatio/2) img = img.crop((x1,y1,x2,y2)) #Resize the image with best quality algorithm ANTI-ALIAS diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index 084c9bf88df..b34e1a67ef3 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -52,7 +52,7 @@ int feature_extraction_pipeline(int argc, char** argv) { arg_pos = num_required_args; if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) { LOG(ERROR)<< "Using GPU"; - uint device_id = 0; + int device_id = 0; if (argc > arg_pos + 1) { device_id = atoi(argv[arg_pos + 1]); CHECK_GE(device_id, 0); @@ -110,20 +110,20 @@ int feature_extraction_pipeline(int argc, char** argv) { boost::is_any_of(",")); CHECK_EQ(blob_names.size(), dataset_names.size()) << " the number of blob names and dataset names must be equal"; - size_t num_features = blob_names.size(); + uint_tp num_features = blob_names.size(); - for (size_t i = 0; i < num_features; i++) { + for (uint_tp i = 0; i < num_features; i++) { CHECK(feature_extraction_net->has_blob(blob_names[i])) << "Unknown feature blob name " << blob_names[i] << " in the network " << feature_extraction_proto; } - int num_mini_batches = atoi(argv[++arg_pos]); + int_tp num_mini_batches = atoi(argv[++arg_pos]); std::vector > feature_dbs; std::vector > txns; const char* db_type = argv[++arg_pos]; - for (size_t i = 0; i < num_features; ++i) { + for (uint_tp i = 0; i < num_features; ++i) { LOG(INFO)<< "Opening dataset " << dataset_names[i]; shared_ptr db(db::GetDB(db_type)); db->Open(dataset_names.at(i), db::NEW); @@ -135,19 +135,19 @@ int feature_extraction_pipeline(int argc, char** argv) { LOG(ERROR)<< "Extacting Features"; Datum datum; - const int kMaxKeyStrLength = 100; + const int_tp kMaxKeyStrLength = 100; char key_str[kMaxKeyStrLength]; std::vector*> input_vec; - std::vector image_indices(num_features, 0); - for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) { + std::vector image_indices(num_features, 0); + for (int_tp batch_index = 0; batch_index < num_mini_batches; ++batch_index) { feature_extraction_net->Forward(input_vec); - for (int i = 0; i < num_features; ++i) { + for (int_tp i = 0; i < num_features; ++i) { const shared_ptr > feature_blob = feature_extraction_net ->blob_by_name(blob_names[i]); - int batch_size = feature_blob->num(); - int dim_features = feature_blob->count() / batch_size; + int_tp batch_size = feature_blob->num(); + int_tp dim_features = feature_blob->count() / batch_size; const Dtype* feature_blob_data; - for (int n = 0; n < batch_size; ++n) { + for (int_tp n = 0; n < batch_size; ++n) { datum.set_height(feature_blob->height()); datum.set_width(feature_blob->width()); datum.set_channels(feature_blob->channels()); @@ -155,10 +155,10 @@ int feature_extraction_pipeline(int argc, char** argv) { datum.clear_float_data(); feature_blob_data = feature_blob->cpu_data() + feature_blob->offset(n); - for (int d = 0; d < dim_features; ++d) { + for (int_tp d = 0; d < dim_features; ++d) { datum.add_float_data(feature_blob_data[d]); } - int length = snprintf(key_str, kMaxKeyStrLength, "%010d", + int_tp length = snprintf(key_str, kMaxKeyStrLength, "%010zd", image_indices[i]); string out; CHECK(datum.SerializeToString(&out)); @@ -170,11 +170,11 @@ int feature_extraction_pipeline(int argc, char** argv) { LOG(ERROR)<< "Extracted features of " << image_indices[i] << " query images for feature blob " << blob_names[i]; } - } // for (int n = 0; n < batch_size; ++n) - } // for (int i = 0; i < num_features; ++i) - } // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) + } // for (int_tp n = 0; n < batch_size; ++n) + } // for (int_tp i = 0; i < num_features; ++i) + } // for (int_tp batch_index = 0; batch_index < num_mini_batches; ++batch_index) // write the last batch - for (int i = 0; i < num_features; ++i) { + for (int_tp i = 0; i < num_features; ++i) { if (image_indices[i] % 1000 != 0) { txns.at(i)->Commit(); } From f332d2c2ff98ca0cbcacdb37b617a2936a8357c9 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Sat, 24 Oct 2015 05:13:42 +0200 Subject: [PATCH 196/254] Full 64 bit indexing support throughout Caffe. --- src/caffe/greentea/cl_kernels.cpp | 28 +++++++++---------- src/caffe/greentea/cl_kernels/activation.cl | 4 +-- src/caffe/greentea/cl_kernels/concat.cl | 2 +- .../greentea/cl_kernels/contrastive_loss.cl | 2 +- src/caffe/greentea/cl_kernels/pooling.cl | 4 +-- src/caffe/greentea/cl_kernels/pooling_nd.cl | 4 +-- src/caffe/greentea/cl_kernels/slice.cl | 2 +- src/caffe/greentea/cl_kernels/softmax_loss.cl | 4 +-- src/caffe/layers/eltwise_layer.cu | 2 +- src/caffe/layers/prelu_layer.cu | 22 +++++++++------ src/caffe/util/math_functions.cpp | 2 +- 11 files changed, 40 insertions(+), 36 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 1ce271750b1..e8f32078711 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -7,13 +7,13 @@ namespace caffe { std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff, const int_tp in_diff_off,\n __global const Dtype* in_data, const int_tp in_data_off,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT +std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT -std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int_tp forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT -std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int_tp legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT @@ -25,19 +25,19 @@ std::string im2col_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\ std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT -std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int_tp forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int_tp has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int_tp has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff, const int_tp in_diff_off,\n __global const Dtype* in_data, const int_tp in_data_off,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT +std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT -std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int_tp forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT -std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int_tp legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT @@ -49,11 +49,11 @@ std::string im2col_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int_tp use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT -std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int_tp forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int_tp has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int_tp has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index 173295ebc27..b07ce474a77 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -94,8 +94,8 @@ __kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channe __kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows, const int_tp rowPitch, - __global const Dtype* in_diff, const int_tp in_diff_off, - __global const Dtype* in_data, const int_tp in_data_off, + __global const Dtype* in_diff, + __global const Dtype* in_data, __global Dtype* out_diff) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); diff --git a/src/caffe/greentea/cl_kernels/concat.cl b/src/caffe/greentea/cl_kernels/concat.cl index 14e7ae7324f..4406f97b217 100644 --- a/src/caffe/greentea/cl_kernels/concat.cl +++ b/src/caffe/greentea/cl_kernels/concat.cl @@ -3,7 +3,7 @@ #endif __kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data, - const int_tp forward, const int_tp num_concats, + const int forward, const int_tp num_concats, const int_tp concat_size, const int_tp top_concat_axis, const int_tp bottom_concat_axis, diff --git a/src/caffe/greentea/cl_kernels/contrastive_loss.cl b/src/caffe/greentea/cl_kernels/contrastive_loss.cl index 16301731799..867082501f2 100644 --- a/src/caffe/greentea/cl_kernels/contrastive_loss.cl +++ b/src/caffe/greentea/cl_kernels/contrastive_loss.cl @@ -3,7 +3,7 @@ #endif __kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels, - const Dtype margin, const int_tp legacy_version, + const Dtype margin, const int legacy_version, const Dtype alpha, __global const Dtype* y, __global const Dtype* diff, __global const Dtype* dist_sq, __global Dtype *bottom_diff) { diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index 372f6f288bf..f38f84af782 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -9,7 +9,7 @@ __kernel void TEMPLATE(max_pool_forward,Dtype)( const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h, const int_tp pad_w, __global Dtype* top_data, - const int_tp use_mask, __global int_tp* mask, __global Dtype* top_mask) { + const int use_mask, __global int_tp* mask, __global Dtype* top_mask) { for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { const int_tp pw = index % pooled_width; @@ -155,7 +155,7 @@ __kernel void TEMPLATE(sto_pool_forward_test,Dtype)( __kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads, __global const Dtype* top_diff, - const int_tp use_mask, + const int use_mask, __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num, diff --git a/src/caffe/greentea/cl_kernels/pooling_nd.cl b/src/caffe/greentea/cl_kernels/pooling_nd.cl index 40572cdcf0c..bf49f2d42eb 100644 --- a/src/caffe/greentea/cl_kernels/pooling_nd.cl +++ b/src/caffe/greentea/cl_kernels/pooling_nd.cl @@ -14,7 +14,7 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n, __global const int_tp* kstride, __global const int_tp* pad, __global Dtype* top_data, - const int_tp use_mask, + const int use_mask, __global int_tp* mask, __global Dtype* top_mask) { int_tp d_idx[6]; int_tp d_start[6]; @@ -91,7 +91,7 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n, __kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n, const int_tp num_axes, __global const Dtype* top_diff, - const int_tp use_mask, + const int use_mask, __global const int_tp* mask, __global const Dtype* top_mask, const int_tp channels, diff --git a/src/caffe/greentea/cl_kernels/slice.cl b/src/caffe/greentea/cl_kernels/slice.cl index 5bb8b2f97d0..2203ffac4cb 100644 --- a/src/caffe/greentea/cl_kernels/slice.cl +++ b/src/caffe/greentea/cl_kernels/slice.cl @@ -4,7 +4,7 @@ __kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads, __global const Dtype* in_data, - const int_tp forward, const int_tp num_slices, + const int forward, const int_tp num_slices, const int_tp slice_size, const int_tp bottom_slice_axis, const int_tp top_slice_axis, diff --git a/src/caffe/greentea/cl_kernels/softmax_loss.cl b/src/caffe/greentea/cl_kernels/softmax_loss.cl index 100fd171644..045f9be41a0 100644 --- a/src/caffe/greentea/cl_kernels/softmax_loss.cl +++ b/src/caffe/greentea/cl_kernels/softmax_loss.cl @@ -6,7 +6,7 @@ __kernel void TEMPLATE(softmax_loss_forward,Dtype)( int_tp n, __global const Dtype* prob_data, __global const Dtype* label, __global Dtype* loss, const int_tp num, const int_tp dim, const int_tp spatial_dim, - const int_tp has_ignore_label_, const int_tp ignore_label_, + const int has_ignore_label_, const int_tp ignore_label_, __global Dtype* counts) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { @@ -32,7 +32,7 @@ __kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads, const int_tp num, const int_tp dim, const int_tp spatial_dim, - const int_tp has_ignore_label_, + const int has_ignore_label_, const int_tp ignore_label_, __global Dtype* counts) { diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index be213c33d42..5bdd6eb0b74 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -121,7 +121,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_max_forward(count, WrapHandle((cl_mem)(bottom[0]->gpu_data()), &ctx), - WrapHandle((cl_mem)(bottom[1]->gpu_data()), &ctx), 0, + WrapHandle((cl_mem)(bottom[1]->gpu_data()), &ctx), 0L, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index fc83bab1bb2..ca6a8516d7a 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -178,20 +178,24 @@ void PReLULayer::Backward_gpu(const vector*>& top, CL_KERNEL_SELECT("prelu_param_backward")); viennacl::ocl::enqueue( oclk_prelu(cdim, bottom[0]->num(), top[0]->offset(1), - WrapHandle((cl_mem)top_diff, &ctx), 0, - WrapHandle((cl_mem)bottom_data, &ctx), 0, - WrapHandle((cl_mem)(backward_buff_.mutable_gpu_diff()), &ctx)), + WrapHandle((cl_mem)top_diff, &ctx), + WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), &ctx)), ctx.get_queue()); if (channel_shared_) { Dtype dsum; - caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), - multiplier_.gpu_data(), &dsum); - caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + greentea_gpu_dot(this->device_->id(), channels * dim, + (cl_mem) (backward_buff_.gpu_diff()), 0, + (cl_mem) (multiplier_.gpu_data()), 0, &dsum); + greentea_gpu_add_scalar(this->device_->id(), + this->blobs_[0]->count(), Dtype(dsum), + (cl_mem) slope_diff, 0); } else { - caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., - slope_diff); + greentea_gpu_gemv(this->device_->id(), CblasNoTrans, channels, + dim, 1., (cl_mem) (backward_buff_.gpu_diff()), + 0, (cl_mem) (multiplier_.gpu_data()), 0, 1., + (cl_mem) slope_diff, 0); } } // Propagate to bottom diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 1430c1e4f91..456390475a3 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -262,7 +262,7 @@ double caffe_nextafter(const double b); void caffe_rng_uniform(const int_tp n, uint_tp* r) { CHECK_GE(n, 0); CHECK(r); - boost::uniform_int random_distribution(INT32_MIN, INT32_MAX); + boost::uniform_int random_distribution(INT64_MIN, INT64_MAX); boost::variate_generator> variate_generator(caffe_rng(), random_distribution); for (int_tp i = 0; i < n; ++i) { From 037b72492190670b2213c531a4ca45a04b0217dc Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 24 Oct 2015 16:24:44 +0200 Subject: [PATCH 197/254] LINT fix. --- examples/cifar10/convert_cifar_data.cpp | 4 +- examples/mnist/convert_mnist_data.cpp | 2 +- .../siamese/convert_mnist_siamese_data.cpp | 2 +- include/caffe/definitions.hpp | 4 +- include/caffe/greentea/greentea_im2col.hpp | 41 +- .../greentea/greentea_math_functions.hpp | 48 +- include/caffe/layer.hpp | 3 +- .../caffe/test/test_gradient_check_util.hpp | 23 +- include/caffe/util/cudnn.hpp | 3 +- include/caffe/util/im2col.hpp | 74 +- include/caffe/util/io.hpp | 6 +- include/caffe/util/math_functions.hpp | 42 +- include/caffe/util/rng.hpp | 2 +- include/caffe/vision_layers.hpp | 9 +- src/caffe/blob.cpp | 4 +- src/caffe/data_transformer.cpp | 3 +- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/dropout.cl | 2 +- src/caffe/greentea/greentea_im2col.cpp | 140 +- .../greentea/greentea_math_functions.cpp | 549 ++--- src/caffe/internal_thread.cpp | 3 +- src/caffe/layers/batch_norm_layer.cu | 14 +- src/caffe/layers/batch_reindex_layer.cu | 4 +- src/caffe/layers/dropout_layer.cpp | 6 +- src/caffe/layers/dropout_layer.cu | 2 +- src/caffe/layers/filter_layer.cpp | 3 +- src/caffe/layers/im2col_layer.cpp | 3 +- src/caffe/layers/inner_product_layer.cpp | 3 +- src/caffe/layers/lrn_layer.cu | 6 +- src/caffe/layers/malis_loss_layer.cpp | 7 +- src/caffe/layers/mergecrop_layer.cu | 6 +- src/caffe/layers/pooling_layer.cu | 386 ++-- src/caffe/layers/prelu_layer.cu | 11 +- src/caffe/layers/slice_layer.cu | 3 +- src/caffe/layers/softmax_layer.cu | 8 +- src/caffe/layers/softmax_loss_layer.cpp | 3 +- src/caffe/layers/softmax_loss_layer.cu | 6 +- src/caffe/layers/spp_layer.cpp | 4 +- src/caffe/layers/tile_layer.cu | 9 +- src/caffe/layers/window_data_layer.cpp | 19 +- src/caffe/net.cpp | 10 +- src/caffe/solver.cpp | 6 +- src/caffe/test/test_accuracy_layer.cpp | 3 +- src/caffe/test/test_data_layer.cpp | 3 +- src/caffe/test/test_data_transformer.cpp | 2 +- src/caffe/test/test_gradient_based_solver.cpp | 1919 +++++++++-------- src/caffe/test/test_im2col_kernel.cu | 15 +- src/caffe/test/test_internal_thread.cpp | 4 +- src/caffe/test/test_io.cpp | 3 +- src/caffe/test/test_math_functions.cpp | 3 +- src/caffe/test/test_net.cpp | 8 +- .../test/test_random_number_generator.cpp | 2 +- src/caffe/test/test_reduction_layer.cpp | 2 +- src/caffe/util/im2col.cpp | 57 +- src/caffe/util/im2col.cu | 234 +- src/caffe/util/insert_splits.cpp | 6 +- src/caffe/util/math_functions.cpp | 88 +- src/caffe/util/math_functions.cu | 22 +- src/caffe/util/upgrade_proto.cpp | 9 +- tools/extract_features.cpp | 3 +- 60 files changed, 2048 insertions(+), 1822 deletions(-) diff --git a/examples/cifar10/convert_cifar_data.cpp b/examples/cifar10/convert_cifar_data.cpp index 5e25447388c..6c261670983 100644 --- a/examples/cifar10/convert_cifar_data.cpp +++ b/examples/cifar10/convert_cifar_data.cpp @@ -60,7 +60,7 @@ void convert_dataset(const string& input_folder, const string& output_folder, read_image(&data_file, &label, str_buffer); datum.set_label(label); datum.set_data(str_buffer, kCIFARImageNBytes); - int_tp length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", + int_tp length = snprintf(str_buffer, kCIFARImageNBytes, "%05zd", fileid * kCIFARBatchSize + itemid); string out; CHECK(datum.SerializeToString(&out)); @@ -82,7 +82,7 @@ void convert_dataset(const string& input_folder, const string& output_folder, read_image(&data_file, &label, str_buffer); datum.set_label(label); datum.set_data(str_buffer, kCIFARImageNBytes); - int_tp length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", itemid); + int_tp length = snprintf(str_buffer, kCIFARImageNBytes, "%05zd", itemid); string out; CHECK(datum.SerializeToString(&out)); txn->Put(string(str_buffer, length), out); diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index bbc8011eb84..a28b4a508a8 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -124,7 +124,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, label_file.read(&label, 1); datum.set_data(pixels, rows*cols); datum.set_label(label); - snprintf(key_cstr, kMaxKeyLength, "%08d", item_id); + snprintf(key_cstr, kMaxKeyLength, "%08zd", item_id); datum.SerializeToString(&value); string keystr(key_cstr); diff --git a/examples/siamese/convert_mnist_siamese_data.cpp b/examples/siamese/convert_mnist_siamese_data.cpp index 2881dd345e0..820960fbc2e 100644 --- a/examples/siamese/convert_mnist_siamese_data.cpp +++ b/examples/siamese/convert_mnist_siamese_data.cpp @@ -99,7 +99,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_label(0); } datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); + snprintf(key, kMaxKeyLength, "%08zd", itemid); db->Put(leveldb::WriteOptions(), std::string(key), value); } diff --git a/include/caffe/definitions.hpp b/include/caffe/definitions.hpp index 759955ef393..9babcf6d95b 100644 --- a/include/caffe/definitions.hpp +++ b/include/caffe/definitions.hpp @@ -8,7 +8,7 @@ #define uint_tp uint64_t // Definitions used to cast the types above as needed -#define int_tpc long long -#define uint_tpc unsigned long long +#define int_tpc long long // NOLINT +#define uint_tpc unsigned long long // NOLINT #endif /* CAFFE_DEFINITIONS_HPP_ */ diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index e89394c16a5..c14ea86eb0b 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -16,8 +16,9 @@ template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, const int_tp data_im_off, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, cl_mem data_col, const int_tp data_col_off); @@ -25,10 +26,11 @@ template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp data_col_off, const int_tp channels, - const int_tp height, const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, cl_mem data_im, - const int_tp data_im_off); + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + cl_mem data_im, const int_tp data_im_off); template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, @@ -46,15 +48,17 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp channels, const int_tp height, const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - cl_mem data_im, const int_tp data_offset); + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, cl_mem data_im, + const int_tp data_offset); template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int_tp data_off, const int_tp num_spatial_axes, + const int_tp data_off, + const int_tp num_spatial_axes, const int_tp channel_axis, const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, @@ -63,7 +67,8 @@ void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int_tp data_col_off, const int_tp num_spatial_axes, + const int_tp data_col_off, + const int_tp num_spatial_axes, const int_tp channel_axis, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, @@ -72,7 +77,8 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int_tp data_off, const int_tp num_spatial_axes, + const int_tp data_off, + const int_tp num_spatial_axes, const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, cl_mem data_col, @@ -82,10 +88,11 @@ template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int_tp data_col_off, - const int_tp num_spatial_axes, const int_tp im_size, - cl_mem im_shape, cl_mem col_shape, - cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_im, int_tp data_off); + const int_tp num_spatial_axes, + const int_tp im_size, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_im, + int_tp data_off); } // namespace caffe diff --git a/include/caffe/greentea/greentea_math_functions.hpp b/include/caffe/greentea/greentea_math_functions.hpp index 488137cf8ab..8d08598d0a9 100644 --- a/include/caffe/greentea/greentea_math_functions.hpp +++ b/include/caffe/greentea/greentea_math_functions.hpp @@ -45,10 +45,11 @@ void greentea_copy(const int_tp N, const Dtype* X, cl_mem Y, const int_tp offY, template void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int_tp M, const int_tp N, - const int_tp K, const Dtype alpha, const cl_mem A, - const int_tp offA, const cl_mem B, const int_tp offB, - const Dtype beta, cl_mem C, const int_tp offC); + const CBLAS_TRANSPOSE TransB, const int_tp M, + const int_tp N, const int_tp K, const Dtype alpha, + const cl_mem A, const int_tp offA, const cl_mem B, + const int_tp offB, const Dtype beta, cl_mem C, + const int_tp offC); template void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, @@ -64,8 +65,8 @@ void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, template void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, @@ -95,23 +96,23 @@ void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const Dtype alpha, cl_mem Y, const int_tp offY); template -void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const Dtype alpha, - cl_mem Y, const int_tp offY); +void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, + const Dtype alpha, cl_mem Y, const int_tp offY); template void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, const cl_mem a, @@ -131,23 +132,26 @@ void greentea_gpu_log(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, cl_mem y, const int_tp offy); template -void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, const cl_mem x, int_tp offx, - cl_mem y, const int_tp offy); +void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, const cl_mem x, + int_tp offx, cl_mem y, const int_tp offy); template void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, const cl_mem x, - int_tp offx, cl_mem y, const int_tp offy); +int_tp offx, + cl_mem y, const int_tp offy); template -void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, const Dtype a, - const Dtype b, cl_mem r, const int_tp offr); +void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, + const Dtype a, const Dtype b, cl_mem r, + const int_tp offr); void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, cl_mem r, - int_tp offr); +int_tp offr); template -void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, const Dtype mu, - const Dtype sigma, cl_mem r, const int_tp offr); +void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, + const Dtype mu, const Dtype sigma, cl_mem r, + const int_tp offr); } // namespace caffe diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index cd9cd3e025b..101491e73e2 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -339,7 +339,8 @@ class Layer { * @brief Sets whether the layer should compute gradients w.r.t. a * parameter at a particular index given by param_id. */ - inline void set_param_propagate_down(const int_tp param_id, const bool value) { + inline void set_param_propagate_down(const int_tp param_id, + const bool value) { if (param_propagate_down_.size() <= param_id) { param_propagate_down_.resize(param_id + 1, true); } diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index de1a086e129..a3c04d825c1 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -24,10 +24,7 @@ class GradientChecker { GradientChecker(const Dtype stepsize, const Dtype threshold, const uint_tp seed = 1701, const Dtype kink = 0., const Dtype kink_range = -1) - : stepsize_(stepsize), - threshold_(threshold), - seed_(seed), - kink_(kink), + : stepsize_(stepsize), threshold_(threshold), seed_(seed), kink_(kink), kink_range_(kink_range) { } // Checks the gradient of a layer, with provided bottom layers and top @@ -35,7 +32,8 @@ class GradientChecker { // Note that after the gradient check, we do not guarantee that the data // stored in the layer parameters and the blobs are unchanged. void CheckGradient(Layer* layer, const vector*>& bottom, - const vector*>& top, int_tp check_bottom = -1) { + const vector*>& top, + int_tp check_bottom = -1) { layer->SetUp(bottom, top); CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1); } @@ -58,8 +56,8 @@ class GradientChecker { void CheckGradientSingle(Layer* layer, const vector*>& bottom, const vector*>& top, int_tp check_bottom, - int_tp top_id, int_tp top_data_id, bool element_wise = - false); + int_tp top_id, + int_tp top_data_id, bool element_wise = false); // Checks the gradient of a network. This network should not have any data // layers or loss layers, since the function does not explicitly deal with @@ -83,7 +81,8 @@ template void GradientChecker::CheckGradientSingle( Layer* layer, const vector*>& bottom, const vector*>& top, int_tp check_bottom, int_tp top_id, - int_tp top_data_id, bool element_wise) { + int_tp top_data_id, + bool element_wise) { if (element_wise) { CHECK_EQ(0, layer->blobs().size()); CHECK_LE(0, top_id); @@ -111,7 +110,7 @@ void GradientChecker::CheckGradientSingle( blobs_to_check.push_back(bottom[check_bottom]); propagate_down[check_bottom] = true; } - CHECK_GT(blobs_to_check.size(), 0) << "No blobs to check."; + CHECK_GT(blobs_to_check.size(), 0)<< "No blobs to check."; // Compute the gradient analytically using Backward Caffe::set_random_seed(seed_); // Ignore the loss from the layer (it's just the weighted sum of the losses @@ -126,8 +125,7 @@ void GradientChecker::CheckGradientSingle( for (int_tp blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { Blob* current_blob = blobs_to_check[blob_id]; computed_gradient_blobs[blob_id].reset(new Blob()); - computed_gradient_blobs[blob_id]->ReshapeLike( - *current_blob); + computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob); const int_tp count = blobs_to_check[blob_id]->count(); const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); Dtype* computed_gradients = computed_gradient_blobs[blob_id] @@ -242,7 +240,8 @@ void GradientChecker::CheckGradientNet( template Dtype GradientChecker::GetObjAndGradient(const Layer& layer, const vector*>& top, - int_tp top_id, int_tp top_data_id) { + int_tp top_id, + int_tp top_data_id) { Dtype loss = 0; if (top_id < 0) { // the loss will be half of the sum of squares of all outputs diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index 57a5e3f7e28..12a63b555af 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -108,7 +108,8 @@ inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, template inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, PoolingParameter_PoolMethod poolmethod, cudnnPoolingMode_t* mode, - int_tp h, int_tp w, int_tp pad_h, int_tp pad_w, int_tp stride_h, int_tp stride_w) { + int_tp h, int_tp w, int_tp pad_h, int_tp pad_w, + int_tp stride_h, int_tp stride_w) { switch (poolmethod) { case PoolingParameter_PoolMethod_MAX: *mode = CUDNN_POOLING_MAX; diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index 7f07cfcd6ef..0778f64a188 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -7,53 +7,57 @@ namespace caffe { template -void im2col_cpu(const Dtype* data_im, const int_tp channels, const int_tp height, - const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, Dtype* data_col); +void im2col_cpu(const Dtype* data_im, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_col); template -void col2im_cpu(const Dtype* data_col, const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, Dtype* data_im); +void col2im_cpu(const Dtype* data_col, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_im); template void im2col_nd_cpu(const Dtype* data_im, const int_tp num_spatial_axes, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - Dtype* data_col); + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, Dtype* data_col); template void col2im_nd_cpu(const Dtype* data_col, const int_tp num_spatial_axes, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - Dtype* data_im); + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, Dtype* data_im); template -void im2col_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, - const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, Dtype* data_col); +void im2col_gpu(const Dtype* data_im, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_col); template -void col2im_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, Dtype* data_im); +void col2im_gpu(const Dtype* data_col, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_im); template -void im2col_sk_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, - const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, +void im2col_sk_gpu(const Dtype* data_im, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, Dtype* data_col); template -void col2im_sk_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, +void col2im_sk_gpu(const Dtype* data_col, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, Dtype* data_im); template @@ -64,23 +68,23 @@ void im2col_nd_gpu(const Dtype* data_im, const int_tp num_spatial_axes, template void col2im_nd_gpu(const Dtype* data_col, const int_tp num_spatial_axes, - const int_tp im_size, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - Dtype* data_im); + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, Dtype* data_im); template void im2col_ndsk_gpu(const Dtype* data_im, const int_tp num_spatial_axes, const int_tp num_kernels, const int_tp* im_shape, const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, const int_tp* kstride, - Dtype* data_col); + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, Dtype* data_col); template void col2im_ndsk_gpu(const Dtype* data_col, const int_tp num_spatial_axes, const int_tp im_size, const int_tp* im_shape, const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, const int_tp* kstride, - Dtype* data_im); + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, Dtype* data_im); } // namespace caffe diff --git a/include/caffe/util/io.hpp b/include/caffe/util/io.hpp index 79b89a35d85..a7d84e50f12 100644 --- a/include/caffe/util/io.hpp +++ b/include/caffe/util/io.hpp @@ -91,9 +91,9 @@ bool ReadImageToDatum(const string& filename, const int_tp label, const std::string & encoding, Datum* datum); inline bool ReadImageToDatum(const string& filename, const int_tp label, - const int_tp height, const int_tp width, const bool is_color, Datum* datum) { - return ReadImageToDatum(filename, label, height, width, is_color, - "", datum); + const int_tp height, const int_tp width, + const bool is_color, Datum* datum) { + return ReadImageToDatum(filename, label, height, width, is_color, "", datum); } inline bool ReadImageToDatum(const string& filename, const int_tp label, diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index aaeea6f39ea..a49b12667a4 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -16,13 +16,14 @@ namespace caffe { // limitation that the data has to be contiguous in memory. template void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int_tp M, const int_tp N, const int_tp K, const Dtype alpha, - const Dtype* A, const Dtype* B, const Dtype beta, Dtype* C); + const int_tp M, const int_tp N, const int_tp K, + const Dtype alpha, const Dtype* A, const Dtype* B, + const Dtype beta, Dtype* C); template -void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, const int_tp N, - const Dtype alpha, const Dtype* A, const Dtype* x, - const Dtype beta, Dtype* y); +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, + const int_tp N, const Dtype alpha, const Dtype* A, + const Dtype* x, const Dtype beta, Dtype* y); template void caffe_axpy(const int_tp N, const Dtype alpha, const Dtype* X, Dtype* Y); @@ -102,7 +103,8 @@ Dtype caffe_cpu_strided_dot(const int_tp n, const Dtype* x, const int_tp incx, const Dtype* y, const int_tp incy); template -int_tp caffe_cpu_hamming_distance(const int_tp n, const Dtype* x, const Dtype* y); +int_tp caffe_cpu_hamming_distance(const int_tp n, const Dtype* x, + const Dtype* y); // Returns the sum of the absolute values of the elements of vector x template @@ -143,7 +145,8 @@ DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])); template -void caffe_cpu_scale(const int_tp n, const Dtype alpha, const Dtype *x, Dtype* y); +void caffe_cpu_scale(const int_tp n, const Dtype alpha, const Dtype *x, + Dtype* y); #ifndef CPU_ONLY // GPU #ifdef USE_CUDA @@ -153,16 +156,18 @@ void caffe_cpu_scale(const int_tp n, const Dtype alpha, const Dtype *x, Dtype* y // gpu code under the hood. template void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int_tp M, const int_tp N, const int_tp K, const Dtype alpha, - const Dtype* A, const Dtype* B, const Dtype beta, Dtype* C); + const int_tp M, const int_tp N, const int_tp K, + const Dtype alpha, const Dtype* A, const Dtype* B, + const Dtype beta, Dtype* C); template -void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, const int_tp N, - const Dtype alpha, const Dtype* A, const Dtype* x, - const Dtype beta, Dtype* y); +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int_tp M, + const int_tp N, const Dtype alpha, const Dtype* A, + const Dtype* x, const Dtype beta, Dtype* y); template -void caffe_gpu_axpy(const int_tp N, const Dtype alpha, const Dtype* X, Dtype* Y); +void caffe_gpu_axpy(const int_tp N, const Dtype alpha, const Dtype* X, + Dtype* Y); template void caffe_gpu_axpby(const int_tp N, const Dtype alpha, const Dtype* X, @@ -209,9 +214,8 @@ void caffe_gpu_powx(const int_tp n, const Dtype* a, const Dtype b, Dtype* y); // caffe_gpu_rng_uniform with two arguments generates integers in the range // [0, UINT_MAX]. -void caffe_gpu_rng_uniform(const int_tp n, unsigned int* r); -void caffe_gpu_rng_uniform(const int_tp n, unsigned long long* r); - +void caffe_gpu_rng_uniform(const int_tp n, unsigned int* r); // NOLINT +void caffe_gpu_rng_uniform(const int_tp n, unsigned long long* r); // NOLINT // caffe_gpu_rng_uniform with four arguments generates floats in the range // (a, b] (strictly greater than a, less than or equal to b) due to the @@ -219,7 +223,8 @@ void caffe_gpu_rng_uniform(const int_tp n, unsigned long long* r); // curandGenerateUniform; with other limits will shift and scale the outputs // appropriately after calling curandGenerateUniform. template -void caffe_gpu_rng_uniform(const int_tp n, const Dtype a, const Dtype b, Dtype* r); +void caffe_gpu_rng_uniform(const int_tp n, const Dtype a, const Dtype b, + Dtype* r); template void caffe_gpu_rng_gaussian(const int_tp n, const Dtype mu, const Dtype sigma, @@ -248,7 +253,8 @@ template void caffe_gpu_fabs(const int_tp n, const Dtype* x, Dtype* y); template -void caffe_gpu_scale(const int_tp n, const Dtype alpha, const Dtype *x, Dtype* y); +void caffe_gpu_scale(const int_tp n, const Dtype alpha, const Dtype *x, + Dtype* y); #define DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(name, operation) \ template \ diff --git a/include/caffe/util/rng.hpp b/include/caffe/util/rng.hpp index 8f1cf0d17c2..097c8d483eb 100644 --- a/include/caffe/util/rng.hpp +++ b/include/caffe/util/rng.hpp @@ -11,7 +11,7 @@ namespace caffe { -typedef boost::mt19937 rng_t; +typedef boost::mt19937_64 rng_t; inline rng_t* caffe_rng() { return static_cast(Caffe::rng_stream().generator()); diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 1b0762ef829..0fdab7e9630 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -177,14 +177,16 @@ class BaseConvolutionLayer : public Layer { void forward_gpu_gemm(const Dtype* col_input, const int_tp col_input_off, const Dtype* weights, Dtype* output, const int_tp output_off, bool skip_im2col = false); - void forward_gpu_bias(Dtype* output, const int_tp output_off, const Dtype* bias); + void forward_gpu_bias(Dtype* output, const int_tp output_off, + const Dtype* bias); void backward_gpu_gemm(const Dtype* input, const int_tp input_off, const Dtype* weights, Dtype* col_output, const int_tp col_output_off); void weight_gpu_gemm(const Dtype* col_input, const int_tp col_input_off, const Dtype* output, const int_tp output_off, Dtype* weights); - void backward_gpu_bias(Dtype* bias, const Dtype* input, const int_tp input_off); + void backward_gpu_bias(Dtype* bias, const Dtype* input, + const int_tp input_off); shared_ptr< Blob > col_buffer(); #endif @@ -921,7 +923,8 @@ class SPPLayer : public Layer { // calculates the kernel and stride dimensions for the pooling layer, // returns a correctly configured LayerParameter for a PoolingLayer virtual LayerParameter GetPoolingParam(const int_tp pyramid_level, - const int_tp bottom_h, const int_tp bottom_w, + const int_tp bottom_h, + const int_tp bottom_w, const SPPParameter spp_param); int_tp pyramid_height_; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 7d1840127ae..c8901ce84cb 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -16,8 +16,8 @@ namespace caffe { template -bool Blob::Reshape(const int_tp num, const int_tp channels, const int_tp height, - const int_tp width) { +bool Blob::Reshape(const int_tp num, const int_tp channels, + const int_tp height, const int_tp width) { vector shape(4); shape[0] = num; shape[1] = channels; diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index c959369c50d..509cac186cf 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -320,7 +320,8 @@ void DataTransformer::Transform(const cv::Mat& cv_img, [img_index++]); } if (has_mean_file) { - int_tp mean_index = (c * img_height + h_off + h) * img_width + w_off + w; + int_tp mean_index = (c * img_height + h_off + h) * img_width + w_off + + w; transformed_data[top_index] = (pixel - mean[mean_index]) * scale; } else { if (has_mean_values) { diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index e8f32078711..1c37ddb3026 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -14,7 +14,7 @@ std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#e std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT @@ -38,7 +38,7 @@ std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n# std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);\n }\n}"; // NOLINT +std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/dropout.cl b/src/caffe/greentea/cl_kernels/dropout.cl index c686401b02c..5ea5b11ab2c 100644 --- a/src/caffe/greentea/cl_kernels/dropout.cl +++ b/src/caffe/greentea/cl_kernels/dropout.cl @@ -19,6 +19,6 @@ __kernel void TEMPLATE(dropout_backward,Dtype)( const Dtype scale, __global Dtype* out_diff) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold)); } } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index ed74f6ac9fd..04463ee15e8 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -43,10 +43,13 @@ template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, const cl_mem data_im, const int_tp data_offset, const int_tp channels, - const int_tp height, const int_tp width, + const int_tp height, + const int_tp width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, + const int_tp kernel_w, + const int_tp pad_h, + const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, @@ -57,10 +60,12 @@ template void greentea_im2col_sk_gpu(viennacl::ocl::program *prog, const cl_mem data_im, const int_tp data_offset, const int_tp channels, - const int_tp height, const int_tp width, + const int_tp height, + const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, + const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, @@ -72,10 +77,11 @@ void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp channels, const int_tp height, const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - cl_mem data_im, const int_tp data_offset) { + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, cl_mem data_im, + const int_tp data_offset) { if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) { LOG(FATAL)<< "stride greater than 1 or pad greater than 0" << " not tested in col2im_sk_gpu()."; @@ -102,22 +108,28 @@ template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp channels, - const int_tp height, const int_tp width, + const int_tp height, + const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, + const int_tp patch_w, + const int_tp pad_h, + const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, - const int_tp kstride_w, cl_mem data_im, + const int_tp kstride_w, + cl_mem data_im, const int_tp data_offset); template void greentea_col2im_sk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp channels, - const int_tp height, const int_tp width, + const int_tp height, + const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, + const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, @@ -130,8 +142,9 @@ template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, const int_tp data_im_off, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, cl_mem data_col, const int_tp data_col_off) { // We are going to launch channels * height_col * width_col kernels, each @@ -153,10 +166,13 @@ template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, const int_tp data_im_off, - const int_tp channels, const int_tp height, - const int_tp width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, + const int_tp channels, + const int_tp height, + const int_tp width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, cl_mem data_col, const int_tp data_col_off); @@ -164,21 +180,27 @@ template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_im, const int_tp data_im_off, - const int_tp channels, const int_tp height, - const int_tp width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, cl_mem data_col, + const int_tp channels, + const int_tp height, + const int_tp width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp pad_h, + const int_tp pad_w, + const int_tp stride_h, + const int_tp stride_w, + cl_mem data_col, const int_tp data_col_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp data_col_off, const int_tp channels, - const int_tp height, const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, cl_mem data_im, - const int_tp data_im_off) { + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + cl_mem data_im, const int_tp data_im_off) { int_tp height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int_tp width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int_tp num_kernels = channels * height * width; @@ -197,29 +219,36 @@ template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp data_col_off, - const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, + const int_tp channels, + const int_tp height, + const int_tp width, + const int_tp patch_h, + const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, cl_mem data_im, const int_tp data_im_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp data_col_off, - const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, + const int_tp channels, + const int_tp height, + const int_tp width, + const int_tp patch_h, + const int_tp patch_w, + const int_tp pad_h, + const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, cl_mem data_im, const int_tp data_im_off); template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int_tp data_off, const int_tp num_spatial_axes, - const int_tp channel_axis, - const int_tp num_kernels, + const int_tp data_off, + const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_col, int_tp data_col_off) { @@ -238,7 +267,8 @@ void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, // Explicit instantiation template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, - cl_mem data_im, const int_tp data_off, + cl_mem data_im, + const int_tp data_off, const int_tp num_spatial_axes, const int_tp channel_axis, const int_tp num_kernels, @@ -249,7 +279,8 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, - cl_mem data_im, const int_tp data_off, + cl_mem data_im, + const int_tp data_off, const int_tp num_spatial_axes, const int_tp channel_axis, const int_tp num_kernels, @@ -261,7 +292,8 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, - const int_tp data_col_off, const int_tp num_spatial_axes, + const int_tp data_col_off, + const int_tp num_spatial_axes, const int_tp channel_axis, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, @@ -285,8 +317,8 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, const int_tp data_col_off, const int_tp num_spatial_axes, const int_tp channel_axis, - const int_tp im_size, cl_mem im_shape, - cl_mem col_shape, + const int_tp im_size, + cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_im, int_tp data_off); @@ -297,8 +329,8 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, const int_tp data_col_off, const int_tp num_spatial_axes, const int_tp channel_axis, - const int_tp im_size, cl_mem im_shape, - cl_mem col_shape, + const int_tp im_size, + cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem data_im, int_tp data_off); @@ -306,7 +338,8 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, template void greentea_im2col_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, - const int_tp data_off, const int_tp num_spatial_axes, + const int_tp data_off, + const int_tp num_spatial_axes, const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem kstride, cl_mem data_col, @@ -353,10 +386,11 @@ template void greentea_col2im_ndsk_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int_tp data_col_off, - const int_tp num_spatial_axes, const int_tp im_size, - cl_mem im_shape, cl_mem col_shape, - cl_mem kernel_shape, cl_mem pad, cl_mem stride, - cl_mem kstride, cl_mem data_im, int_tp data_off) { + const int_tp num_spatial_axes, + const int_tp im_size, cl_mem im_shape, + cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, + cl_mem stride, cl_mem kstride, cl_mem data_im, + int_tp data_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("col2im_ndsk")); diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 89b65e7c4ad..5f1167aac0a 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -65,7 +65,8 @@ void greentea_memset(const int_tp ctx_id, const uint_tp N, const int_tp alpha, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); // OpenCL Version >= 1.2 approach - // clEnqueueFillBuffer(ctx.get_queue().handle().get(), X, &alpha, sizeof(int_tp), + // clEnqueueFillBuffer(ctx.get_queue().handle().get(), + // X, &alpha, sizeof(int_tp), // offX, N, 0, NULL, NULL); // OpenCL Version < 1.2 fallback typedef float Dtype; @@ -128,71 +129,77 @@ void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, cl_mem Y, } // Explicit instantiations -template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, - int_tp* Y, viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, + int_tp* Y, + viennacl::ocl::context *ctx); template void greentea_copy(const int_tp N, const cl_mem X, - const int_tp offX, uint_tp* Y, - viennacl::ocl::context *ctx); -template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, - float* Y, viennacl::ocl::context *ctx); -template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, - double* Y, viennacl::ocl::context *ctx); + const int_tp offX, uint_tp* Y, + viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, float* Y, + viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, double* Y, + viennacl::ocl::context *ctx); template void greentea_copy(const int_tp N, const int_tp* X, cl_mem Y, - const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int_tp N, const uint_tp* X, - cl_mem Y, const int_tp offY, - viennacl::ocl::context *ctx); + const int_tp offY, + viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const uint_tp* X, cl_mem Y, + const int_tp offY, + viennacl::ocl::context *ctx); template void greentea_copy(const int_tp N, const float* X, cl_mem Y, - const int_tp offY, viennacl::ocl::context *ctx); + const int_tp offY, + viennacl::ocl::context *ctx); template void greentea_copy(const int_tp N, const double* X, cl_mem Y, const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, - cl_mem Y, const int_tp offY, - viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, cl_mem Y, + const int_tp offY, + viennacl::ocl::context *ctx); template void greentea_copy(const int_tp N, const cl_mem X, - const int_tp offX, cl_mem Y, - const int_tp offY, - viennacl::ocl::context *ctx); -template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, - cl_mem Y, const int_tp offY, + const int_tp offX, cl_mem Y, + const int_tp offY, + viennacl::ocl::context *ctx); +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); -template void greentea_copy(const int_tp N, const cl_mem X, const int_tp offX, - cl_mem Y, const int_tp offY, +template void greentea_copy(const int_tp N, const cl_mem X, + const int_tp offX, cl_mem Y, + const int_tp offY, viennacl::ocl::context *ctx); template void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, - const CBLAS_TRANSPOSE TransB, const int_tp M, const int_tp N, - const int_tp K, const Dtype alpha, const cl_mem A, - const int_tp offA, const cl_mem B, const int_tp offB, - const Dtype beta, cl_mem C, const int_tp offC) { + const CBLAS_TRANSPOSE TransB, const int_tp M, + const int_tp N, const int_tp K, const Dtype alpha, + const cl_mem A, const int_tp offA, const cl_mem B, + const int_tp offB, const Dtype beta, cl_mem C, + const int_tp offC) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - Dtype* Aptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - A, true, CL_MAP_READ, sizeof(Dtype) * offA, - sizeof(Dtype) * M * K, 0, NULL, NULL, NULL)); - Dtype* Bptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - B, true, CL_MAP_READ, sizeof(Dtype) * offB, - sizeof(Dtype) * N * K, 0, NULL, NULL, NULL)); - Dtype* Cptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - C, true, CL_MAP_READ | CL_MAP_WRITE, - sizeof(Dtype) * offC, - sizeof(Dtype) * M * N, 0, NULL, NULL, NULL)); - - caffe_cpu_gemm(TransA, TransB, M, N, K, alpha, Aptr, - Bptr, beta, Cptr); - - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - A, Aptr, 0, NULL, NULL); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - B, Bptr, 0, NULL, NULL); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - C, Cptr, 0, NULL, NULL); + Dtype* Aptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), A, true, CL_MAP_READ, + sizeof(Dtype) * offA, sizeof(Dtype) * M * K, 0, NULL, NULL, NULL)); + Dtype* Bptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), B, true, CL_MAP_READ, + sizeof(Dtype) * offB, sizeof(Dtype) * N * K, 0, NULL, NULL, NULL)); + Dtype* Cptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), C, true, CL_MAP_READ | CL_MAP_WRITE, + sizeof(Dtype) * offC, sizeof(Dtype) * M * N, 0, NULL, NULL, NULL)); + + caffe_cpu_gemm(TransA, TransB, M, N, K, alpha, Aptr, Bptr, beta, + Cptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), A, Aptr, 0, NULL, + NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), B, Bptr, 0, NULL, + NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), C, Cptr, 0, NULL, + NULL); } else { int_tp lda = (TransA == CblasNoTrans) ? K : M; int_tp ldb = (TransB == CblasNoTrans) ? N : K; @@ -200,62 +207,70 @@ void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, #ifndef USE_CLBLAS - typedef typename viennacl::matrix_base::uint_tpype uint_tpype; - typedef typename viennacl::matrix_base::uint_tpype difference_type; - - uint_tpype A_size1 = static_cast((TransA == CblasTrans) ? K : M); - uint_tpype A_size2 = static_cast((TransA == CblasTrans) ? M : K); - - uint_tpype B_size1 = static_cast((TransB == CblasTrans) ? N : K); - uint_tpype B_size2 = static_cast((TransB == CblasTrans) ? K : N); - - viennacl::matrix_base matA(A, ctx, - A_size1, uint_tpype(0), difference_type(1), uint_tpype(M), - A_size2, uint_tpype(offA), difference_type(1), uint_tpype(lda) - VCL_ROW_MAJOR); - - viennacl::matrix_base matB(B, ctx, - B_size1, uint_tpype(0), difference_type(1), uint_tpype(K), - B_size2, uint_tpype(offB), difference_type(1), uint_tpype(ldb) - VCL_ROW_MAJOR); - - viennacl::matrix_base matC(C, ctx, - uint_tpype(M), uint_tpype(0), difference_type(1), uint_tpype(M), - uint_tpype(N), uint_tpype(offC), difference_type(1), - uint_tpype(ldc) VCL_ROW_MAJOR); - - if (TransA == CblasTrans && TransB == CblasTrans) - viennacl::linalg::prod_impl(viennacl::trans(matA), viennacl::trans(matB), - matC, alpha, beta); - else if (TransA == CblasTrans && TransB == CblasNoTrans) - viennacl::linalg::prod_impl(viennacl::trans(matA), matB, - matC, alpha, beta); - else if (TransA == CblasNoTrans && TransB == CblasTrans) - viennacl::linalg::prod_impl(matA, viennacl::trans(matB), - matC, alpha, beta); - else if (TransA == CblasNoTrans && TransB == CblasNoTrans) - viennacl::linalg::prod_impl(matA, matB, - matC, alpha, beta); + typedef typename viennacl::matrix_base::uint_tpype + uint_tpype; + typedef typename viennacl::matrix_base::uint_tpype + difference_type; + + uint_tpype A_size1 = static_cast( + (TransA == CblasTrans) ? K : M); + uint_tpype A_size2 = static_cast( + (TransA == CblasTrans) ? M : K); + + uint_tpype B_size1 = static_cast( + (TransB == CblasTrans) ? N : K); + uint_tpype B_size2 = static_cast( + (TransB == CblasTrans) ? K : N); + + viennacl::matrix_base matA(A, ctx, A_size1, uint_tpype(0), + difference_type(1), uint_tpype(M), + A_size2, uint_tpype(offA), + difference_type(1), uint_tpype(lda) + VCL_ROW_MAJOR); + + viennacl::matrix_base matB(B, ctx, B_size1, uint_tpype(0), + difference_type(1), uint_tpype(K), + B_size2, uint_tpype(offB), + difference_type(1), uint_tpype(ldb) + VCL_ROW_MAJOR); + + viennacl::matrix_base matC(C, ctx, uint_tpype(M), uint_tpype(0), + difference_type(1), uint_tpype(M), + uint_tpype(N), uint_tpype(offC), + difference_type(1), uint_tpype(ldc) + VCL_ROW_MAJOR); + + if (TransA == CblasTrans && TransB == CblasTrans) + viennacl::linalg::prod_impl(viennacl::trans(matA), viennacl::trans(matB), + matC, alpha, beta); + else if (TransA == CblasTrans && TransB == CblasNoTrans) + viennacl::linalg::prod_impl(viennacl::trans(matA), matB, matC, alpha, + beta); + else if (TransA == CblasNoTrans && TransB == CblasTrans) + viennacl::linalg::prod_impl(matA, viennacl::trans(matB), matC, alpha, + beta); + else if (TransA == CblasNoTrans && TransB == CblasNoTrans) + viennacl::linalg::prod_impl(matA, matB, matC, alpha, beta); #else clblasOrder clOrder = clblasRowMajor; clblasTranspose clTransA = - (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; + (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; clblasTranspose clTransB = - (TransB == CblasNoTrans) ? clblasNoTrans : clblasTrans; + (TransB == CblasNoTrans) ? clblasNoTrans : clblasTrans; cl_command_queue queue = ctx.get_queue().handle().get(); if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( clblasSgemm(clOrder, clTransA, clTransB, - M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, - C, offC, ldc, 1, &queue, 0, NULL, NULL)); + M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, + C, offC, ldc, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( clblasDgemm(clOrder, clTransA, clTransB, - M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, - C, offC, ldc, 1, &queue, 0, NULL, NULL)); + M, N, K, alpha, A, offA, lda, B, offB, ldb, beta, + C, offC, ldc, 1, &queue, 0, NULL, NULL)); } #endif } @@ -264,19 +279,21 @@ void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, template void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int_tp M, const int_tp N, const int_tp K, - const float alpha, const cl_mem A, - const int_tp offA, const cl_mem B, - const int_tp offB, const float beta, - cl_mem C, const int_tp offC); + const int_tp M, const int_tp N, + const int_tp K, const float alpha, + const cl_mem A, const int_tp offA, + const cl_mem B, const int_tp offB, + const float beta, cl_mem C, + const int_tp offC); template void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, - const int_tp M, const int_tp N, const int_tp K, - const double alpha, const cl_mem A, - const int_tp offA, const cl_mem B, - const int_tp offB, const double beta, - cl_mem C, const int_tp offC); + const int_tp M, const int_tp N, + const int_tp K, const double alpha, + const cl_mem A, const int_tp offA, + const cl_mem B, const int_tp offB, + const double beta, cl_mem C, + const int_tp offC); template void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, @@ -287,31 +304,26 @@ void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - Dtype* Aptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - A, true, CL_MAP_READ, sizeof(Dtype) * offA, - sizeof(Dtype) * M * N, 0, NULL, NULL, NULL)); - Dtype* xptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - x, true, CL_MAP_READ, sizeof(Dtype) * offx, - sizeof(Dtype) * (TransA == CblasTrans) ? M : N, - 0, NULL, NULL, NULL)); - Dtype* yptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - y, true, CL_MAP_READ | CL_MAP_WRITE, - sizeof(Dtype) * offy, - sizeof(Dtype) * (TransA == CblasTrans) ? N : M, - 0, NULL, NULL, NULL)); - - caffe_cpu_gemv(TransA, M, N, alpha, Aptr, xptr, beta, - yptr); - - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - A, Aptr, 0, NULL, NULL); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - x, xptr, 0, NULL, NULL); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - y, yptr, 0, NULL, NULL); + Dtype* Aptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), A, true, CL_MAP_READ, + sizeof(Dtype) * offA, sizeof(Dtype) * M * N, 0, NULL, NULL, NULL)); + Dtype* xptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), x, true, CL_MAP_READ, + sizeof(Dtype) * offx, sizeof(Dtype) * (TransA == CblasTrans) ? M : N, 0, + NULL, NULL, NULL)); + Dtype* yptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), y, true, CL_MAP_READ | CL_MAP_WRITE, + sizeof(Dtype) * offy, sizeof(Dtype) * (TransA == CblasTrans) ? N : M, 0, + NULL, NULL, NULL)); + + caffe_cpu_gemv(TransA, M, N, alpha, Aptr, xptr, beta, yptr); + + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), A, Aptr, 0, NULL, + NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), x, xptr, 0, NULL, + NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), y, yptr, 0, NULL, + NULL); } else { #ifndef USE_CLBLAS @@ -324,8 +336,7 @@ void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, viennacl::vector_base v2(y, uint_tpype((TransA == CblasTrans) ? N : M), uint_tpype(offy), difference_type(1), ctx); - viennacl::matrix_base mat(A, ctx, - uint_tpype(M), uint_tpype(0), + viennacl::matrix_base mat(A, ctx, uint_tpype(M), uint_tpype(0), difference_type(1), uint_tpype(M), uint_tpype(N), uint_tpype(offA), difference_type(1), uint_tpype(N) @@ -338,18 +349,20 @@ void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, #else clblasTranspose clTransA = - (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; + (TransA == CblasNoTrans) ? clblasNoTrans : clblasTrans; cl_command_queue queue = ctx.get_queue().handle().get(); if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( - clblasSgemv(clblasRowMajor, clTransA, M, N, alpha, A, offA, N, x, offx, 1, - beta, y, offy, 1, 1, &queue, 0, NULL, NULL)); + clblasSgemv(clblasRowMajor, + clTransA, M, N, alpha, A, offA, N, x, offx, 1, + beta, y, offy, 1, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( - clblasDgemv(clblasRowMajor, clTransA, M, N, alpha, A, offA, N, x, offx, 1, - beta, y, offy, 1, 1, &queue, 0, NULL, NULL)); + clblasDgemv(clblasRowMajor, + clTransA, M, N, alpha, A, offA, N, x, offx, 1, + beta, y, offy, 1, 1, &queue, 0, NULL, NULL)); } #endif } @@ -377,21 +390,19 @@ void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - Dtype* Xptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - X, true, CL_MAP_READ, sizeof(Dtype) * offX, - sizeof(Dtype) * N, 0, NULL, NULL, NULL)); - Dtype* Yptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - Y, true, CL_MAP_WRITE, sizeof(Dtype) * offY, - sizeof(Dtype) * N, 0, NULL, NULL, NULL)); + Dtype* Xptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), X, true, CL_MAP_READ, + sizeof(Dtype) * offX, sizeof(Dtype) * N, 0, NULL, NULL, NULL)); + Dtype* Yptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), Y, true, CL_MAP_WRITE, + sizeof(Dtype) * offY, sizeof(Dtype) * N, 0, NULL, NULL, NULL)); caffe_axpy(N, alpha, Xptr, Yptr); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - X, Xptr, 0, NULL, NULL); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - Y, Yptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), X, Xptr, 0, NULL, + NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), Y, Yptr, 0, NULL, + NULL); } else { #ifndef USE_CLBLAS @@ -411,11 +422,11 @@ void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( clblasSaxpy(N, alpha, X, offX, - 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); + 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( clblasDaxpy(N, alpha, X, offX, - 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); + 1, Y, offY, 1, 1, &queue, 0, NULL, NULL)); } #endif } @@ -432,8 +443,8 @@ template void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, template void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy) { + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -446,17 +457,17 @@ void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, template void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy) { + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -469,12 +480,12 @@ void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, template void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, @@ -482,24 +493,22 @@ void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - Dtype* xptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - x, true, CL_MAP_READ | CL_MAP_WRITE, - sizeof(Dtype) * offx, - sizeof(Dtype) * N, 0, NULL, NULL, NULL)); + Dtype* xptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), x, true, CL_MAP_READ | CL_MAP_WRITE, + sizeof(Dtype) * offx, sizeof(Dtype) * N, 0, NULL, NULL, NULL)); caffe_scal(N, alpha, xptr); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - x, xptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), x, xptr, 0, NULL, + NULL); } else { #ifndef USE_CLBLAS typedef typename viennacl::vector_base::uint_tpype uint_tpype; typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(x, uint_tpype(N), - uint_tpype(offx), difference_type(1), ctx); + viennacl::vector_base v1(x, uint_tpype(N), uint_tpype(offx), + difference_type(1), ctx); v1 *= alpha; @@ -508,10 +517,10 @@ void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK(clblasSscal(N, alpha, x, offx, - 1, 1, &queue, 0, NULL, NULL)); + 1, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK(clblasDscal(N, alpha, x, offx, - 1, 1, &queue, 0, NULL, NULL)); + 1, 1, &queue, 0, NULL, NULL)); } #endif } @@ -549,31 +558,29 @@ void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, const cl_mem X, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - Dtype* Xptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - X, true, CL_MAP_READ, sizeof(Dtype) * offX, - sizeof(Dtype) * n, 0, NULL, NULL, NULL)); - Dtype* Yptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - Y, true, CL_MAP_READ, sizeof(Dtype) * offY, - sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + Dtype* Xptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), X, true, CL_MAP_READ, + sizeof(Dtype) * offX, sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + Dtype* Yptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), Y, true, CL_MAP_READ, + sizeof(Dtype) * offY, sizeof(Dtype) * n, 0, NULL, NULL, NULL)); *out = caffe_cpu_dot(n, Xptr, Yptr); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - X, Xptr, 0, NULL, NULL); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - Y, Yptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), X, Xptr, 0, NULL, + NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), Y, Yptr, 0, NULL, + NULL); } else { #ifndef USE_CLBLAS typedef typename viennacl::vector_base::uint_tpype uint_tpype; typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(X, uint_tpype(n), - uint_tpype(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, uint_tpype(n), - uint_tpype(offY), difference_type(1), ctx); + viennacl::vector_base v1(X, uint_tpype(n), uint_tpype(offX), + difference_type(1), ctx); + viennacl::vector_base v2(Y, uint_tpype(n), uint_tpype(offY), + difference_type(1), ctx); *out = viennacl::linalg::inner_prod(v1, v2); @@ -582,18 +589,18 @@ void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, const cl_mem X, cl_int err; cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - sizeof(Dtype), NULL, &err); + sizeof(Dtype), NULL, &err); cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - n * sizeof(Dtype), NULL, &err); + n * sizeof(Dtype), NULL, &err); if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( clblasSdot(n, gpuout, 0, X, offX, 1, Y, - offY, 1, scratch, 1, &queue, 0, NULL, NULL)); + offY, 1, scratch, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( clblasDdot(n, gpuout, 0, X, offX, 1, Y, - offY, 1, scratch, 1, &queue, 0, NULL, NULL)); + offY, 1, scratch, 1, &queue, 0, NULL, NULL)); } greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, out, &ctx); @@ -620,23 +627,22 @@ void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, const cl_mem X, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - Dtype* Xptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - X, true, CL_MAP_READ, sizeof(Dtype) * offX, - sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + Dtype* Xptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), X, true, CL_MAP_READ, + sizeof(Dtype) * offX, sizeof(Dtype) * n, 0, NULL, NULL, NULL)); *Y = caffe_cpu_asum(n, Xptr); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - X, Xptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), X, Xptr, 0, NULL, + NULL); } else { #ifndef USE_CLBLAS typedef typename viennacl::vector_base::uint_tpype uint_tpype; typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(X, uint_tpype(n), - uint_tpype(offX), difference_type(1), ctx); + viennacl::vector_base v1(X, uint_tpype(n), uint_tpype(offX), + difference_type(1), ctx); *Y = viennacl::linalg::norm_1(v1); @@ -645,18 +651,18 @@ void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, const cl_mem X, cl_int err; cl_mem gpuout = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - sizeof(Dtype), NULL, &err); + sizeof(Dtype), NULL, &err); cl_mem scratch = clCreateBuffer(ctx.handle().get(), CL_MEM_READ_WRITE, - n * sizeof(Dtype), NULL, &err); + n * sizeof(Dtype), NULL, &err); if (std::is_same::value) { GREENTEA_CL_BLAS_CHECK( clblasSasum(n, gpuout, 0, X, offX, 1, - scratch, 1, &queue, 0, NULL, NULL)); + scratch, 1, &queue, 0, NULL, NULL)); } else { GREENTEA_CL_BLAS_CHECK( clblasDasum(n, gpuout, 0, X, offX, 1, - scratch, 1, &queue, 0, NULL, NULL)); + scratch, 1, &queue, 0, NULL, NULL)); } greentea_gpu_memcpy(sizeof(Dtype), gpuout, 0, Y, &ctx); @@ -681,31 +687,29 @@ void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const Dtype alpha, viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); if (ctx.devices()[0].type() == CL_DEVICE_TYPE_CPU) { - Dtype* Xptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - X, true, CL_MAP_READ, sizeof(Dtype) * offX, - sizeof(Dtype) * n, 0, NULL, NULL, NULL)); - Dtype* Yptr = reinterpret_cast( - clEnqueueMapBuffer(ctx.get_queue().handle().get(), - Y, true, CL_MAP_WRITE, sizeof(Dtype) * offY, - sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + Dtype* Xptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), X, true, CL_MAP_READ, + sizeof(Dtype) * offX, sizeof(Dtype) * n, 0, NULL, NULL, NULL)); + Dtype* Yptr = reinterpret_cast(clEnqueueMapBuffer( + ctx.get_queue().handle().get(), Y, true, CL_MAP_WRITE, + sizeof(Dtype) * offY, sizeof(Dtype) * n, 0, NULL, NULL, NULL)); caffe_cpu_scale(n, alpha, Xptr, Yptr); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - X, Xptr, 0, NULL, NULL); - clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), - Y, Yptr, 0, NULL, NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), X, Xptr, 0, NULL, + NULL); + clEnqueueUnmapMemObject(ctx.get_queue().handle().get(), Y, Yptr, 0, NULL, + NULL); } else { #ifndef USE_CLBLAS typedef typename viennacl::vector_base::uint_tpype uint_tpype; typedef typename viennacl::vector_base::uint_tpype difference_type; - viennacl::vector_base v1(X, uint_tpype(n), - uint_tpype(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, uint_tpype(n), - uint_tpype(offY), difference_type(1), ctx); + viennacl::vector_base v1(X, uint_tpype(n), uint_tpype(offX), + difference_type(1), ctx); + viennacl::vector_base v2(Y, uint_tpype(n), uint_tpype(offY), + difference_type(1), ctx); v2 = v1 * alpha; @@ -757,7 +761,8 @@ void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const Dtype alpha, } template void greentea_gpu_set(const int_tp ctx_id, const int_tp N, - const int_tp alpha, cl_mem Y, const int_tp offY); + const int_tp alpha, cl_mem Y, + const int_tp offY); template void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const float alpha, cl_mem Y, const int_tp offY); @@ -766,8 +771,8 @@ template void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const int_tp offY); template -void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const Dtype alpha, - cl_mem Y, const int_tp offY) { +void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, + const Dtype alpha, cl_mem Y, const int_tp offY) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -777,17 +782,18 @@ void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const Dtype al ctx.get_queue()); } -template void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, - const float alpha, cl_mem Y, - const int_tp offY); -template void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, +template void greentea_gpu_add_scalar(const int_tp ctx_id, + const int_tp N, const float alpha, + cl_mem Y, const int_tp offY); +template void greentea_gpu_add_scalar(const int_tp ctx_id, + const int_tp N, const double alpha, cl_mem Y, const int_tp offY); template void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy) { + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -800,17 +806,17 @@ void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, template void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, - const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy) { + const int_tp offa, const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -823,12 +829,12 @@ void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, template void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, const int_tp offa, - const cl_mem b, const int_tp offb, cl_mem y, - const int_tp offy); + const cl_mem b, const int_tp offb, + cl_mem y, const int_tp offy); template void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, const cl_mem a, @@ -843,11 +849,11 @@ void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, const cl_mem a, } template void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, - const cl_mem a, const int_tp offa, cl_mem y, - const int_tp offy); + const cl_mem a, const int_tp offa, + cl_mem y, const int_tp offy); template void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, - const cl_mem a, const int_tp offa, cl_mem y, - const int_tp offy); + const cl_mem a, const int_tp offa, + cl_mem y, const int_tp offy); template void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, const cl_mem a, @@ -862,11 +868,11 @@ void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, const cl_mem a, } template void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, - const cl_mem a, const int_tp offa, cl_mem y, - const int_tp offy); + const cl_mem a, const int_tp offa, + cl_mem y, const int_tp offy); template void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, - const cl_mem a, const int_tp offa, cl_mem y, - const int_tp offy); + const cl_mem a, const int_tp offa, + cl_mem y, const int_tp offy); template void greentea_gpu_powx(const int_tp ctx_id, const int_tp N, const cl_mem a, @@ -904,15 +910,15 @@ void greentea_gpu_log(const int_tp ctx_id, const int_tp N, const cl_mem a, } template void greentea_gpu_log(const int_tp ctx_id, const int_tp N, - const cl_mem a, const int_tp offa, cl_mem y, - const int_tp offy); + const cl_mem a, const int_tp offa, + cl_mem y, const int_tp offy); template void greentea_gpu_log(const int_tp ctx_id, const int_tp N, - const cl_mem a, const int_tp offa, cl_mem y, - const int_tp offy); + const cl_mem a, const int_tp offa, + cl_mem y, const int_tp offy); template -void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, const cl_mem x, int_tp offx, - cl_mem y, const int_tp offy) { +void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, const cl_mem x, + int_tp offx, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -932,7 +938,8 @@ template void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, template void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, const cl_mem x, - int_tp offx, cl_mem y, const int_tp offy) { +int_tp offx, + cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); @@ -951,7 +958,7 @@ template void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, const int_tp offy); void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, cl_mem r, - int_tp offr) { +int_tp offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); std::vector random(n); //NOLINT caffe_rng_uniform(n, &random[0]); @@ -959,37 +966,41 @@ void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, cl_mem r, } template -void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, const Dtype a, - const Dtype b, cl_mem r, const int_tp offr) { +void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, + const Dtype a, const Dtype b, cl_mem r, + const int_tp offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); std::vector random(n); // NOLINT caffe_rng_uniform(n, a, b, &random[0]); greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, &ctx); } -template void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, - const float a, const float b, - cl_mem r, const int_tp offr); -template void greentea_gpu_rng_uniform(const int_tp ctx_id, const int_tp n, - const double a, const double b, - cl_mem r, const int_tp offr); +template void greentea_gpu_rng_uniform(const int_tp ctx_id, + const int_tp n, const float a, + const float b, cl_mem r, + const int_tp offr); +template void greentea_gpu_rng_uniform(const int_tp ctx_id, + const int_tp n, const double a, + const double b, cl_mem r, + const int_tp offr); template -void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, const Dtype mu, - const Dtype sigma, cl_mem r, const int_tp offr) { +void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, + const Dtype mu, const Dtype sigma, cl_mem r, + const int_tp offr) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); std::vector random(n); // NOLINT caffe_rng_gaussian(n, mu, sigma, &random[0]); greentea_gpu_memcpy(sizeof(Dtype) * n, &random[0], r, offr, &ctx); } -template void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, - const float mu, +template void greentea_gpu_rng_gaussian(const int_tp ctx_id, + const int_tp n, const float mu, const float sigma, cl_mem r, const int_tp offr); -template void greentea_gpu_rng_gaussian(const int_tp ctx_id, const int_tp n, - const double mu, +template void greentea_gpu_rng_gaussian(const int_tp ctx_id, + const int_tp n, const double mu, const double sigma, cl_mem r, const int_tp offr); diff --git a/src/caffe/internal_thread.cpp b/src/caffe/internal_thread.cpp index 8622f46e711..71ce47f6e09 100644 --- a/src/caffe/internal_thread.cpp +++ b/src/caffe/internal_thread.cpp @@ -38,7 +38,8 @@ void InternalThread::StartInternalThread(device* device_context) { } void InternalThread::entry(device* device_context, Caffe::Brew mode, - int_tp rand_seed, int_tp solver_count, bool root_solver) { + int_tp rand_seed, + int_tp solver_count, bool root_solver) { Caffe::SelectDevice(device_context); Caffe::set_mode(mode); Caffe::set_random_seed(rand_seed); diff --git a/src/caffe/layers/batch_norm_layer.cu b/src/caffe/layers/batch_norm_layer.cu index 8d4c72fbe28..d294cfeada8 100644 --- a/src/caffe/layers/batch_norm_layer.cu +++ b/src/caffe/layers/batch_norm_layer.cu @@ -22,7 +22,8 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, temp_.mutable_gpu_data()); if (use_global_stats_) { - // use the stored mean/variance estimates. TODO(cdoersch): allow an option + // use the stored mean/variance estimates. + // TODO(cdoersch): allow an option // to use an unbiased variance estimate, like the paper does. const Dtype scale_factor = 1 / this->blobs_[2]->cpu_data()[0]; caffe_gpu_scale(variance_.count(), scale_factor, @@ -106,7 +107,8 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (temp_.mutable_gpu_data()), 0); if (use_global_stats_) { - // use the stored mean/variance estimates. TODO(cdoersch): allow an option + // use the stored mean/variance estimates. + // TODO(cdoersch): allow an option // to use an unbiased variance estimate, like the paper does. const Dtype scale_factor = 1 / this->blobs_[2]->cpu_data()[0]; greentea_gpu_scale(this->device_->id(), variance_.count(), @@ -159,10 +161,11 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (mean_.gpu_data()), 0, Dtype(2), (cl_mem) (temp_.mutable_gpu_data()), 0); + // variance greentea_gpu_sub(this->device_->id(), mean_.count(), (cl_mem) (variance_.gpu_data()), 0, (cl_mem) (temp_.gpu_data()), 0, - (cl_mem) (variance_.mutable_gpu_data()), 0); // variance + (cl_mem) (variance_.mutable_gpu_data()), 0); // normalize variance greentea_gpu_add_scalar(this->device_->id(), variance_.count(), eps_, @@ -198,8 +201,9 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (num_by_chans_.gpu_data()), 0, (cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 0., (cl_mem) (temp_.mutable_gpu_data()), 0); - greentea_gpu_div(this->device_->id(), temp_.count(), (cl_mem) top_data, 0, - (cl_mem) (temp_.gpu_data()), 0, (cl_mem) top_data, 0); + greentea_gpu_div(this->device_->id(), temp_.count(), + (cl_mem) top_data, 0, (cl_mem) (temp_.gpu_data()), + 0, (cl_mem) top_data, 0); // TODO(cdoersch): The caching is only needed because later in-place layers // might clobber the data. Can we skip this if they won't? greentea_copy(x_norm_.count(), (cl_mem) top_data, 0, diff --git a/src/caffe/layers/batch_reindex_layer.cu b/src/caffe/layers/batch_reindex_layer.cu index 5c97213ead3..a9f28520a56 100644 --- a/src/caffe/layers/batch_reindex_layer.cu +++ b/src/caffe/layers/batch_reindex_layer.cu @@ -9,8 +9,8 @@ namespace caffe { #ifdef USE_CUDA template -__global__ void BRForward(const int_tp count, const int_tp inner_dim, const Dtype* in, - const Dtype* permut, Dtype* out) { +__global__ void BRForward(const int_tp count, const int_tp inner_dim, + const Dtype* in, const Dtype* permut, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { int_tp n = index / (inner_dim); int_tp in_n = static_cast(permut[n]); diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index f366ceecdf3..a4c6fc2238f 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -1,5 +1,6 @@ // TODO (sergeyk): effect should not be dependent on phase. wasted memcpy. +#include #include #include "caffe/neuron_layers.hpp" @@ -15,7 +16,10 @@ void DropoutLayer::LayerSetUp(const vector*>& bottom, DCHECK(threshold_ > 0.); DCHECK(threshold_ < 1.); scale_ = 1. / (1. - threshold_); - uint_thres_ = static_cast(ULONG_MAX * threshold_); + uint_thres_ = + static_cast(static_cast + (std::numeric_limits::max()) + * static_cast(threshold_)); } template diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index 96770ddd0bc..53f958343ed 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -29,7 +29,7 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, if (this->phase_ == TRAIN) { uint_tp* mask = static_cast(rand_vec_.mutable_gpu_data()); - caffe_gpu_rng_uniform(count, (uint_tpc*)(mask)); + caffe_gpu_rng_uniform(count, (uint_tpc*) (mask)); // NOLINT // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) DropoutForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index 4cf102e3fdc..f49c27c517d 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -33,7 +33,8 @@ void FilterLayer::Reshape(const vector*>& bottom, // look for non-zero elements in bottom[0]. Items of each bottom that // have the same index as the items in bottom[0] with value == non-zero // will be forwarded - for (int_tp item_id = 0; item_id < bottom[selector_index]->shape(0); ++item_id) { + for (int_tp item_id = 0; item_id < bottom[selector_index]->shape(0); + ++item_id) { // we don't need an offset because item size == 1 const Dtype* tmp_data_selector = bottom_data_selector + item_id; if (*tmp_data_selector) { diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 821a76513cd..b29f0651661 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -99,7 +99,8 @@ void Im2colLayer::Reshape(const vector*>& bottom, for (int_tp i = 0; i < num_spatial_axes_; ++i) { top_shape[channel_axis_] *= kernel_shape_data[i]; const int_tp input_dim = bottom[0]->shape(channel_axis_ + i + 1); - const int_tp output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) + const int_tp output_dim = (input_dim + 2 * pad_data[i] + - kernel_shape_data[i]) / stride_data[i] + 1; top_shape[channel_axis_ + i + 1] = output_dim; } diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index d0b92da29d6..d4385d03ec1 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -9,7 +9,8 @@ namespace caffe { template void InnerProductLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - const int_tp num_output = this->layer_param_.inner_product_param().num_output(); + const int_tp num_output = + this->layer_param_.inner_product_param().num_output(); bias_term_ = this->layer_param_.inner_product_param().bias_term(); N_ = num_output; const int_tp axis = bottom[0]->CanonicalAxisIndex( diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index fe972027448..c6a57d6eb83 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -9,9 +9,9 @@ namespace caffe { template __global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in, const int_tp num, const int_tp channels, - const int_tp height, const int_tp width, const int_tp size, - const Dtype alpha_over_size, const Dtype k, - Dtype* const scale) { + const int_tp height, const int_tp width, + const int_tp size, const Dtype alpha_over_size, + const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int_tp w = index % width; diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 4c2f510b26a..2149c61bab7 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -46,9 +46,10 @@ class MalisAffinityGraphCompare { template void MalisLossLayer::Malis(const Dtype* conn_data, const int_tp conn_num_dims, - const int_tp* conn_dims, const int_tp* nhood_data, - const int_tp* nhood_dims, const Dtype* seg_data, - const bool pos, + const int_tp* conn_dims, + const int_tp* nhood_data, + const int_tp* nhood_dims, + const Dtype* seg_data, const bool pos, Dtype* dloss_data, Dtype* loss_out, Dtype *classerr_out, Dtype *rand_index_out, Dtype margin, Dtype threshold) { diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index d231d5ec51e..bb24c639a42 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -16,9 +16,9 @@ template __global__ void CopyForward(const int_tp nthreads, const int_tp dims, const Dtype* bottom_a, const bool forward_a, const Dtype* bottom_b, const bool forward_b, - Dtype* top, const int_tp num, const int_tp channels_a, - const int_tp channels_b, const int_tp* shape_a, - const int_tp* shape_b) { + Dtype* top, const int_tp num, + const int_tp channels_a, const int_tp channels_b, + const int_tp* shape_a, const int_tp* shape_b) { int_tp pad[6]; // NOLINT(runtime/arrays) int_tp tmp_idx[6]; // NOLINT(runtime/arrays) int_tp size_a = 1; diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 15f98e14ce4..fc9ba803107 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -22,7 +22,8 @@ __global__ void MaxPoolForward(const int_tp nthreads, const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h, const int_tp pad_w, Dtype* const top_data, - int_tp* mask, Dtype* top_mask) { + int_tp* mask, + Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp pw = index % pooled_width; const int_tp ph = (index / pooled_width) % pooled_height; @@ -30,10 +31,10 @@ __global__ void MaxPoolForward(const int_tp nthreads, const int_tp n = index / pooled_width / pooled_height / channels; int_tp hstart = ph * stride_h - pad_h; int_tp wstart = pw * stride_w - pad_w; - const int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)height); - const int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)width); - hstart = max((int_tpc)(hstart), (int_tpc)(0)); - wstart = max((int_tpc)(wstart), (int_tpc)(0)); + const int_tp hend = min((int_tpc) (hstart + kernel_h), (int_tpc) height); + const int_tp wend = min((int_tpc) (wstart + kernel_w), (int_tpc) width); + hstart = max((int_tpc) (hstart), (int_tpc) (0)); + wstart = max((int_tpc) (wstart), (int_tpc) (0)); Dtype maxval = -FLT_MAX; int_tp maxidx = -1; const Dtype* const bottom_slice = bottom_data @@ -71,13 +72,14 @@ __global__ void AvePoolForward(const int_tp nthreads, const int_tp n = index / pooled_width / pooled_height / channels; int_tp hstart = ph * stride_h - pad_h; int_tp wstart = pw * stride_w - pad_w; - int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)(height + pad_h)); - int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)(width + pad_w)); + int_tp hend = min((int_tpc) (hstart + kernel_h), + (int_tpc) (height + pad_h)); + int_tp wend = min((int_tpc) (wstart + kernel_w), (int_tpc) (width + pad_w)); const int_tp pool_size = (hend - hstart) * (wend - wstart); - hstart = max((int_tpc)(hstart), (int_tpc)(0)); - wstart = max((int_tpc)(wstart), (int_tpc)(0)); - hend = min((int_tpc)(hend), (int_tpc)(height)); - wend = min((int_tpc)(wend), (int_tpc)(width)); + hstart = max((int_tpc) (hstart), (int_tpc) (0)); + wstart = max((int_tpc) (wstart), (int_tpc) (0)); + hend = min((int_tpc) (hend), (int_tpc) (height)); + wend = min((int_tpc) (wend), (int_tpc) (width)); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; @@ -96,9 +98,12 @@ __global__ void StoPoolForwardTrain(const int_tp nthreads, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp pooled_height, - const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp stride_h, - const int_tp stride_w, Dtype* const rand_idx, + const int_tp pooled_width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp stride_h, + const int_tp stride_w, + Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp pw = index % pooled_width; @@ -106,9 +111,9 @@ __global__ void StoPoolForwardTrain(const int_tp nthreads, const int_tp c = (index / pooled_width / pooled_height) % channels; const int_tp n = index / pooled_width / pooled_height / channels; const int_tp hstart = ph * stride_h; - const int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)height); + const int_tp hend = min((int_tpc) (hstart + kernel_h), (int_tpc) height); const int_tp wstart = pw * stride_w; - const int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)width); + const int_tp wend = min((int_tpc) (wstart + kernel_w), (int_tpc) width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; @@ -140,18 +145,19 @@ __global__ void StoPoolForwardTest(const int_tp nthreads, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp pooled_height, - const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp stride_h, - const int_tp stride_w, Dtype* const top_data) { + const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp stride_h, const int_tp stride_w, + Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp pw = index % pooled_width; const int_tp ph = (index / pooled_width) % pooled_height; const int_tp c = (index / pooled_width / pooled_height) % channels; const int_tp n = index / pooled_width / pooled_height / channels; const int_tp hstart = ph * stride_h; - const int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)height); + const int_tp hend = min((int_tpc) (hstart + kernel_h), (int_tpc) height); const int_tp wstart = pw * stride_w; - const int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)width); + const int_tp wend = min((int_tpc) (wstart + kernel_w), (int_tpc) width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; @@ -169,15 +175,17 @@ __global__ void StoPoolForwardTest(const int_tp nthreads, } template -__global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* const top_diff, +__global__ void MaxPoolBackward(const int_tp nthreads, + const Dtype* const top_diff, const int_tp* const mask, const Dtype* const top_mask, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp pooled_height, - const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp stride_h, - const int_tp stride_w, const int_tp pad_h, - const int_tp pad_w, Dtype* const bottom_diff) { + const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp pad_h, const int_tp pad_w, + Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset @@ -187,10 +195,12 @@ __global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* const top_di const int_tp n = index / width / height / channels; const int_tp phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - const int_tp phend = min((int_tpc)((h + pad_h) / stride_h + 1L), (int_tpc)pooled_height); + const int_tp phend = min((int_tpc) ((h + pad_h) / stride_h + 1L), + (int_tpc) pooled_height); const int_tp pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - const int_tp pwend = min((int_tpc)((w + pad_w) / stride_w + 1L), (int_tpc)pooled_width); + const int_tp pwend = min((int_tpc) ((w + pad_w) / stride_w + 1L), + (int_tpc) pooled_width); Dtype gradient = 0; const int_tp offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; @@ -218,10 +228,11 @@ __global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* const top_di } template -__global__ void AvePoolBackward(const int_tp nthreads, const Dtype* const top_diff, - const int_tp num, const int_tp channels, - const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, +__global__ void AvePoolBackward(const int_tp nthreads, + const Dtype* const top_diff, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h, const int_tp pad_w, @@ -234,9 +245,11 @@ __global__ void AvePoolBackward(const int_tp nthreads, const Dtype* const top_di const int_tp c = (index / width / height) % channels; const int_tp n = index / width / height / channels; const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int_tp phend = min((int_tpc)(h / stride_h + 1), (int_tpc)(pooled_height)); + const int_tp phend = min((int_tpc) (h / stride_h + 1), + (int_tpc) (pooled_height)); const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int_tp pwend = min((int_tpc)(w / stride_w + 1), (int_tpc)(pooled_width)); + const int_tp pwend = min((int_tpc) (w / stride_w + 1), + (int_tpc) (pooled_width)); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; @@ -245,8 +258,10 @@ __global__ void AvePoolBackward(const int_tp nthreads, const Dtype* const top_di // figure out the pooling size int_tp hstart = ph * stride_h - pad_h; int_tp wstart = pw * stride_w - pad_w; - int_tp hend = min((int_tpc)(hstart + kernel_h), (int_tpc)(height + pad_h)); - int_tp wend = min((int_tpc)(wstart + kernel_w), (int_tpc)(width + pad_w)); + int_tp hend = min((int_tpc) (hstart + kernel_h), + (int_tpc) (height + pad_h)); + int_tp wend = min((int_tpc) (wstart + kernel_w), + (int_tpc) (width + pad_w)); int_tp pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } @@ -256,13 +271,15 @@ __global__ void AvePoolBackward(const int_tp nthreads, const Dtype* const top_di } template -__global__ void StoPoolBackward(const int_tp nthreads, const Dtype* const rand_idx, +__global__ void StoPoolBackward(const int_tp nthreads, + const Dtype* const rand_idx, const Dtype* const top_diff, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp pooled_height, - const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp stride_h, - const int_tp stride_w, Dtype* const bottom_diff) { + const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp stride_h, const int_tp stride_w, + Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset @@ -271,9 +288,11 @@ __global__ void StoPoolBackward(const int_tp nthreads, const Dtype* const rand_i const int_tp c = (index / width / height) % channels; const int_tp n = index / width / height / channels; const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int_tp phend = min((int_tpc)(h / stride_h + 1), (int_tpc)pooled_height); + const int_tp phend = min((int_tpc) (h / stride_h + 1), + (int_tpc) pooled_height); const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int_tp pwend = min((int_tpc)(w / stride_w + 1), (int_tpc)pooled_width); + const int_tp pwend = min((int_tpc) (w / stride_w + 1), + (int_tpc) pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; @@ -281,10 +300,9 @@ __global__ void StoPoolBackward(const int_tp nthreads, const Dtype* const rand_i + (n * channels + c) * pooled_height * pooled_width; for (int_tp ph = phstart; ph < phend; ++ph) { for (int_tp pw = pwstart; pw < pwend; ++pw) { - gradient += - top_diff_slice[ph * pooled_width + pw] - * (index - == static_cast(rand_idx_slice[ph * pooled_width + pw])); + gradient += top_diff_slice[ph * pooled_width + pw] + * (index + == static_cast(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; @@ -295,13 +313,14 @@ template __global__ void MaxPoolForward(const int_tp nthreads, const Dtype* bottom_data, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, - const int_tp kernel_h, const int_tp kernel_w, - const int_tp ext_kernel_h, const int_tp ext_kernel_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - const int_tp pad_h, const int_tp pad_w, - Dtype* top_data, int_tp* mask, Dtype* top_mask) { + const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, const int_tp pad_h, + const int_tp pad_w, Dtype* top_data, + int_tp* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int_tp pw = index % pooled_width; int_tp ph = (index / pooled_width) % pooled_height; @@ -309,10 +328,10 @@ __global__ void MaxPoolForward(const int_tp nthreads, const Dtype* bottom_data, int_tp n = index / pooled_width / pooled_height / channels; int_tp hstart = ph * stride_h - pad_h; int_tp wstart = pw * stride_w - pad_w; - int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)height); - int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)width); - hstart = max((int_tpc)hstart, (int_tpc)(0)); - wstart = max((int_tpc)wstart, (int_tpc)(0)); + int_tp hend = min((int_tpc) (hstart + ext_kernel_h), (int_tpc) height); + int_tp wend = min((int_tpc) (wstart + ext_kernel_w), (int_tpc) width); + hstart = max((int_tpc) hstart, (int_tpc) (0)); + wstart = max((int_tpc) wstart, (int_tpc) (0)); Dtype maxval = -FLT_MAX; int_tp maxidx = -1; bottom_data += (n * channels + c) * height * width; @@ -337,13 +356,13 @@ template __global__ void AvePoolForward(const int_tp nthreads, const Dtype* bottom_data, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, - const int_tp kernel_h, const int_tp kernel_w, - const int_tp ext_kernel_h, const int_tp ext_kernel_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - const int_tp pad_h, const int_tp pad_w, - Dtype* top_data) { + const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, + const int_tp stride_w, const int_tp kstride_h, + const int_tp kstride_w, const int_tp pad_h, + const int_tp pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int_tp pw = index % pooled_width; int_tp ph = (index / pooled_width) % pooled_height; @@ -351,12 +370,14 @@ __global__ void AvePoolForward(const int_tp nthreads, const Dtype* bottom_data, int_tp n = index / pooled_width / pooled_height / channels; int_tp hstart = ph * stride_h - pad_h; int_tp wstart = pw * stride_w - pad_w; - int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)(height + pad_h)); - int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)(width + pad_w)); - hstart = max((int_tpc)hstart, (int_tpc)(0)); - wstart = max((int_tpc)wstart, (int_tpc)(0)); - hend = min((int_tpc)hend, (int_tpc)height); - wend = min((int_tpc)wend, (int_tpc)width); + int_tp hend = min((int_tpc) (hstart + ext_kernel_h), + (int_tpc) (height + pad_h)); + int_tp wend = min((int_tpc) (wstart + ext_kernel_w), + (int_tpc) (width + pad_w)); + hstart = max((int_tpc) hstart, (int_tpc) (0)); + wstart = max((int_tpc) wstart, (int_tpc) (0)); + hend = min((int_tpc) hend, (int_tpc) height); + wend = min((int_tpc) wend, (int_tpc) width); Dtype aveval = 0; bottom_data += (n * channels + c) * height * width; int_tp pool_size = 0; @@ -374,11 +395,16 @@ template __global__ void StoPoolForwardTrain(const int_tp nthreads, const Dtype* bottom_data, const int_tp num, const int_tp channels, const int_tp height, - const int_tp width, const int_tp pooled_height, - const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp ext_kernel_h, - const int_tp ext_kernel_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, + const int_tp width, + const int_tp pooled_height, + const int_tp pooled_width, + const int_tp kernel_h, + const int_tp kernel_w, + const int_tp ext_kernel_h, + const int_tp ext_kernel_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { @@ -387,9 +413,9 @@ __global__ void StoPoolForwardTrain(const int_tp nthreads, int_tp c = (index / pooled_width / pooled_height) % channels; int_tp n = index / pooled_width / pooled_height / channels; int_tp hstart = ph * stride_h; - int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)height); + int_tp hend = min((int_tpc) (hstart + ext_kernel_h), (int_tpc) height); int_tp wstart = pw * stride_w; - int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)width); + int_tp wend = min((int_tpc) (wstart + ext_kernel_w), (int_tpc) width); Dtype cumsum = 0.; bottom_data += (n * channels + c) * height * width; // First pass: get sum @@ -415,14 +441,17 @@ __global__ void StoPoolForwardTrain(const int_tp nthreads, } template -__global__ void StoPoolForwardTest(const int_tp nthreads, const Dtype* bottom_data, - const int_tp num, const int_tp channels, - const int_tp height, const int_tp width, +__global__ void StoPoolForwardTest(const int_tp nthreads, + const Dtype* bottom_data, const int_tp num, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, - const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp ext_kernel_h, - const int_tp ext_kernel_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, + const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp ext_kernel_h, + const int_tp ext_kernel_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int_tp pw = index % pooled_width; @@ -430,9 +459,9 @@ __global__ void StoPoolForwardTest(const int_tp nthreads, const Dtype* bottom_da int_tp c = (index / pooled_width / pooled_height) % channels; int_tp n = index / pooled_width / pooled_height / channels; int_tp hstart = ph * stride_h; - int_tp hend = min((int_tpc)(hstart + ext_kernel_h), (int_tpc)height); + int_tp hend = min((int_tpc) (hstart + ext_kernel_h), (int_tpc) height); int_tp wstart = pw * stride_w; - int_tp wend = min((int_tpc)(wstart + ext_kernel_w), (int_tpc)width); + int_tp wend = min((int_tpc) (wstart + ext_kernel_w), (int_tpc) width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; @@ -453,9 +482,11 @@ __global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* top_diff, const int_tp* mask, const Dtype* top_mask, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, + const int_tp pooled_height, + const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp ext_kernel_h, const int_tp ext_kernel_w, + const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w, @@ -470,11 +501,13 @@ __global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* top_diff, int_tp pooled_height_1 = pooled_height - 1; int_tp pooled_width_1 = pooled_width - 1; - int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int_tp phstart = + (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; int_tp phend = (h >= pooled_height) ? pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; - int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + int_tp pwstart = + (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; int_tp pwend = (w >= pooled_width) ? pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; @@ -509,10 +542,12 @@ template __global__ void MaxPoolNDForward(const int_tp n, const int_tp num_axes, const Dtype* bottom_data, const int_tp channels, const int_tp* size, - const int_tp* pooled_size, const int_tp* kernel_size, - const int_tp* ext_kernel_size, const int_tp* stride, - const int_tp* kstride, const int_tp* pad, - Dtype* top_data, int_tp* mask, Dtype* top_mask) { + const int_tp* pooled_size, + const int_tp* kernel_size, + const int_tp* ext_kernel_size, + const int_tp* stride, const int_tp* kstride, + const int_tp* pad, Dtype* top_data, + int_tp* mask, Dtype* top_mask) { int_tp d_idx[6]; // NOLINT(runtime/arrays) int_tp d_start[6]; // NOLINT(runtime/arrays) int_tp d_end[6]; // NOLINT(runtime/arrays) @@ -525,8 +560,9 @@ __global__ void MaxPoolNDForward(const int_tp n, const int_tp num_axes, for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = index % pooled_size[i]; d_start[i] = d_idx[i] * stride[i] - pad[i]; - d_end[i] = min((int_tpc)(d_start[i] + ext_kernel_size[i]), (int_tpc)(size[i])); - d_start[i] = max((int_tpc)(d_start[i]), (int_tpc)(0)); + d_end[i] = min((int_tpc) (d_start[i] + ext_kernel_size[i]), + (int_tpc) (size[i])); + d_start[i] = max((int_tpc) (d_start[i]), (int_tpc) (0)); num /= pooled_size[i]; offset *= size[i]; d_iter[i] = d_start[i]; @@ -587,13 +623,12 @@ __global__ void MaxPoolNDForward(const int_tp n, const int_tp num_axes, template __global__ void MaxPoolNDBackward(const int_tp n, const int_tp num_axes, const Dtype* top_diff, const int_tp* mask, - const Dtype* top_mask, - const int_tp channels, const int_tp* size, - const int_tp* pooled_size, + const Dtype* top_mask, const int_tp channels, + const int_tp* size, const int_tp* pooled_size, const int_tp* kernel_size, - const int_tp* ext_kernel_size, const int_tp* stride, - const int_tp* kstride, const int_tp* pad, - Dtype* bottom_diff) { + const int_tp* ext_kernel_size, + const int_tp* stride, const int_tp* kstride, + const int_tp* pad, Dtype* bottom_diff) { int_tp d_idx[6]; // NOLINT(runtime/arrays) int_tp d_start[6]; // NOLINT(runtime/arrays) int_tp d_end[6]; // NOLINT(runtime/arrays) @@ -607,11 +642,14 @@ __global__ void MaxPoolNDBackward(const int_tp n, const int_tp num_axes, int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = num % size[i]; - d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? - d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; - d_end[i] = (d_idx[i] >= pooled_size[i]) ? - (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % - kstride[i] : d_idx[i]; + d_start[i] = + (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = + (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) + - (pooled_size[i] - 1 - d_start[i]) % kstride[i] : + d_idx[i]; num /= size[i]; offset *= pooled_size[i]; d_iter[i] = d_start[i]; @@ -668,11 +706,9 @@ __global__ void MaxPoolNDBackward(const int_tp n, const int_tp num_axes, } #endif // USE_CUDA - - template void PoolingLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int_tp count = top[0]->count(); @@ -712,7 +748,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, @@ -723,7 +759,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, @@ -762,48 +798,48 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // 2D case switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, + mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (this->phase_ == TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( + StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, - mask, top_mask); - break; - case PoolingParameter_PoolMethod_AVE: + kernel_w_, stride_h_, stride_w_, + rand_idx_.mutable_gpu_data(), top_data); + } else { // NOLINT_NEXT_LINE(whitespace/operators) - AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( + StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - if (this->phase_ == TRAIN) { - // We need to create the random index as well. - caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), - rand_idx_.mutable_gpu_data()); - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, - rand_idx_.mutable_gpu_data(), top_data); - } else { - // NOLINT_NEXT_LINE(whitespace/operators) - StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, top_data); - } - break; + kernel_w_, stride_h_, stride_w_, top_data); + } + break; default: { LOG(FATAL)<< "Unknown pooling method."; } @@ -813,20 +849,20 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, } else { switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: - if (use_top_mask) { - top_mask = top[1]->mutable_gpu_data(); - } else { - mask = max_idx_.mutable_gpu_data(); - } - // NOLINT_NEXT_LINE(whitespace/operators) - MaxPoolNDForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( - count, num_spatial_axes_, bottom_data, - channels_, size_.gpu_data(), pooled_size_.gpu_data(), - kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), - stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), - top_data, mask, top_mask); - break; + if (use_top_mask) { + top_mask = top[1]->mutable_gpu_data(); + } else { + mask = max_idx_.mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolNDForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)( + count, num_spatial_axes_, bottom_data, + channels_, size_.gpu_data(), pooled_size_.gpu_data(), + kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), + top_data, mask, top_mask); + break; default: { LOG(FATAL)<< "Unknown pooling method."; } @@ -1092,7 +1128,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)( + CAFFE_CUDA_NUM_THREADS)( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, @@ -1145,8 +1181,8 @@ void PoolingLayer::Backward_gpu(const vector*>& top, CUDA_POST_KERNEL_CHECK; } } else { - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { @@ -1161,14 +1197,14 @@ void PoolingLayer::Backward_gpu(const vector*>& top, stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(), bottom_diff); break; - default: + default: LOG(FATAL)<< "Unknown or unsupported pooling method in Backward_gpu()."; } - CUDA_POST_KERNEL_CHECK; - } + CUDA_POST_KERNEL_CHECK; + } #endif // USE_CUDA - } else { + } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); @@ -1203,7 +1239,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, mask = max_idx_.gpu_data(); } viennacl::ocl::kernel &oclk_max_pool_backward = - program.get_kernel( + program.get_kernel( CL_KERNEL_SELECT("max_pool_backward_sk")); viennacl::ocl::enqueue( oclk_max_pool_backward(count, @@ -1233,7 +1269,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, mask = max_idx_.gpu_data(); } viennacl::ocl::kernel &oclk_max_pool_backward = - program.get_kernel( + program.get_kernel( CL_KERNEL_SELECT("max_pool_backward")); viennacl::ocl::enqueue( oclk_max_pool_backward(count, @@ -1251,7 +1287,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, break; case PoolingParameter_PoolMethod_AVE: { viennacl::ocl::kernel &oclk_ave_pool_backward = - program.get_kernel( + program.get_kernel( CL_KERNEL_SELECT("ave_pool_backward")); viennacl::ocl::enqueue( oclk_ave_pool_backward(count, @@ -1266,7 +1302,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, break; case PoolingParameter_PoolMethod_STOCHASTIC: { viennacl::ocl::kernel &oclk_sto_pool_backward = - program.get_kernel( + program.get_kernel( CL_KERNEL_SELECT("sto_pool_backward")); viennacl::ocl::enqueue( oclk_sto_pool_backward( @@ -1313,12 +1349,12 @@ void PoolingLayer::Backward_gpu(const vector*>& top, break; default: LOG(FATAL) - << "Unknown or unsupported pooling method in Backward_gpu()."; + << "Unknown or unsupported pooling method in Backward_gpu()."; } } #endif // USE_GREENTEA + } } -} INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index ca6a8516d7a..db7e3f4fd4d 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -13,8 +13,8 @@ namespace caffe { #ifdef USE_CUDA // CUDA kernele for forward template -__global__ void PReLUForward(const int_tp n, const int_tp channels, const int_tp dim, - const Dtype* in, Dtype* out, +__global__ void PReLUForward(const int_tp n, const int_tp channels, + const int_tp dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int_tp div_factor) { CUDA_KERNEL_LOOP(index, n) { int_tp c = (index / dim) % channels / div_factor; @@ -24,9 +24,10 @@ __global__ void PReLUForward(const int_tp n, const int_tp channels, const int_tp // CUDA kernel for bottom backward template -__global__ void PReLUBackward(const int_tp n, const int_tp channels, const int_tp dim, - const Dtype* in_diff, const Dtype* in_data, - Dtype* out_diff, const Dtype* slope_data, +__global__ void PReLUBackward(const int_tp n, const int_tp channels, + const int_tp dim, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff, + const Dtype* slope_data, const int_tp div_factor) { CUDA_KERNEL_LOOP(index, n) { int_tp c = (index / dim) % channels / div_factor; diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 8cec3a9202d..9eb0b4f7759 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -10,7 +10,8 @@ template __global__ void Slice(const int_tp nthreads, const Dtype* in_data, const bool forward, const int_tp num_slices, const int_tp slice_size, const int_tp bottom_slice_axis, - const int_tp top_slice_axis, const int_tp offset_slice_axis, + const int_tp top_slice_axis, + const int_tp offset_slice_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp total_slice_size = slice_size * top_slice_axis; diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 45d446a5bc7..3425fa742ae 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -68,7 +68,8 @@ __global__ void kernel_channel_sum(const int_tp num, const int_tp channels, template __global__ void kernel_channel_div(const int_tp count, const int_tp num, - const int_tp channels, const int_tp spatial_dim, + const int_tp channels, + const int_tp spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int_tp n = index / channels / spatial_dim; @@ -79,8 +80,9 @@ __global__ void kernel_channel_div(const int_tp count, const int_tp num, template __global__ void kernel_channel_dot(const int_tp num, const int_tp channels, - const int_tp spatial_dim, const Dtype* data_1, - const Dtype* data_2, Dtype* channel_dot) { + const int_tp spatial_dim, + const Dtype* data_1, const Dtype* data_2, + Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int_tp n = index / spatial_dim; int_tp s = index % spatial_dim; diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 9bed09621e6..2c59c456e85 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -101,7 +101,8 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, int_tp count = 0; for (int_tp i = 0; i < outer_num_; ++i) { for (int_tp j = 0; j < inner_num_; ++j) { - const int_tp label_value = static_cast(label[i * inner_num_ + j]); + const int_tp label_value = static_cast + (label[i * inner_num_ + j]); if (has_ignore_label_ && label_value == ignore_label_) { for (int_tp c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { bottom_diff[i * dim + c * inner_num_ + j] = 0; diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 05df5e4ce21..3b1a5108645 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -20,7 +20,8 @@ __global__ void SoftmaxLossForwardGPU(const int_tp nthreads, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, - const int_tp ignore_label_, Dtype* counts) { + const int_tp ignore_label_, + Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; @@ -137,7 +138,8 @@ __global__ void SoftmaxLossBackwardGPU(const int_tp nthreads, const Dtype* top, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, - const int_tp ignore_label_, Dtype* counts) { + const int_tp ignore_label_, + Dtype* counts) { const int_tp channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { diff --git a/src/caffe/layers/spp_layer.cpp b/src/caffe/layers/spp_layer.cpp index fc053fb0c7b..464aa4d652c 100644 --- a/src/caffe/layers/spp_layer.cpp +++ b/src/caffe/layers/spp_layer.cpp @@ -10,7 +10,9 @@ using std::max; template LayerParameter SPPLayer::GetPoolingParam(const int_tp pyramid_level, - const int_tp bottom_h, const int_tp bottom_w, const SPPParameter spp_param) { + const int_tp bottom_h, + const int_tp bottom_w, + const SPPParameter spp_param) { LayerParameter pooling_param; int_tp num_bins = pow(2, pyramid_level); diff --git a/src/caffe/layers/tile_layer.cu b/src/caffe/layers/tile_layer.cu index ec05c8ca97b..47d64c1dd96 100644 --- a/src/caffe/layers/tile_layer.cu +++ b/src/caffe/layers/tile_layer.cu @@ -12,10 +12,10 @@ namespace caffe { #ifdef USE_CUDA -template +template __global__ void Tile(const int_tp nthreads, const Dtype* bottom_data, - const int_tp tile_size, const int_tp num_tiles, const int_tp bottom_tile_axis, - Dtype* top_data) { + const int_tp tile_size, const int_tp num_tiles, + const int_tp bottom_tile_axis, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp d = index % tile_size; const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis; @@ -60,7 +60,8 @@ void TileLayer::Forward_gpu( #ifdef USE_CUDA template __global__ void TileBackward(const int_tp nthreads, const Dtype* top_diff, - const int_tp tile_size, const int_tp num_tiles, const int_tp bottom_tile_axis, + const int_tp tile_size, const int_tp num_tiles, + const int_tp bottom_tile_axis, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp d = index % tile_size; diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index 2cd4aa205c9..d5a777b9e1b 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -234,7 +234,8 @@ void WindowDataLayer::load_batch(Batch* batch) { Dtype* top_label = batch->label_.mutable_cpu_data(); const Dtype scale = this->layer_param_.window_data_param().scale(); const int_tp batch_size = this->layer_param_.window_data_param().batch_size(); - const int_tp context_pad = this->layer_param_.window_data_param().context_pad(); + const int_tp context_pad = + this->layer_param_.window_data_param().context_pad(); const int_tp crop_size = this->transform_param_.crop_size(); const bool mirror = this->transform_param_.mirror(); const float fg_fraction = @@ -356,11 +357,12 @@ void WindowDataLayer::load_batch(Batch* batch) { static_cast(crop_size)/static_cast(unclipped_height); // size to warp the clipped expanded region to - cv_crop_size.width = - static_cast(round(static_cast(clipped_width)*scale_x)); - cv_crop_size.height = - static_cast(round(static_cast(clipped_height)*scale_y)); - pad_x1 = static_cast(round(static_cast(pad_x1)*scale_x)); + cv_crop_size.width = static_cast(round( + static_cast(clipped_width) * scale_x)); + cv_crop_size.height = static_cast(round( + static_cast(clipped_height) * scale_y)); + pad_x1 = + static_cast(round(static_cast(pad_x1) * scale_x)); pad_x2 = static_cast(round(static_cast(pad_x2)*scale_x)); pad_y1 = static_cast(round(static_cast(pad_y1)*scale_y)); pad_y2 = static_cast(round(static_cast(pad_y2)*scale_y)); @@ -399,8 +401,9 @@ void WindowDataLayer::load_batch(Batch* batch) { int_tp img_index = 0; for (int_tp w = 0; w < cv_cropped_img.cols; ++w) { for (int_tp c = 0; c < channels; ++c) { - int_tp top_index = ((item_id * channels + c) * crop_size + h + pad_h) - * crop_size + w + pad_w; + int_tp top_index = + ((item_id * channels + c) * crop_size + h + pad_h) * crop_size + + w + pad_w; // int_tp top_index = (c * height + h) * width + w; Dtype pixel = static_cast(ptr[img_index++]); if (this->has_mean_file_) { diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 304ce79158f..4a9c61fddf6 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -193,7 +193,8 @@ void Net::Init(const NetParameter& in_param) { // Finally, set the backward flag layer_need_backward_.push_back(need_backward); if (need_backward) { - for (int_tp top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { + for (int_tp top_id = 0; top_id < top_id_vecs_[layer_id].size(); + ++top_id) { blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true; } } @@ -471,9 +472,10 @@ void Net::AppendTop(const NetParameter& param, const int_tp layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int_tp Net::AppendBottom(const NetParameter& param, const int_tp layer_id, - const int_tp bottom_id, set* available_blobs, - map* blob_name_to_idx) { +int_tp Net::AppendBottom(const NetParameter& param, + const int_tp layer_id, const int_tp bottom_id, + set* available_blobs, + map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 20e37b334ea..522f4ed9feb 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -138,8 +138,10 @@ void Solver::InitTestNets() { // test networks -- the actual number is given by the number of remaining // test_iters after any test nets specified by test_net_param and/or test_net // are evaluated. - const int_tp num_generic_net_instances = param_.test_iter_size() - num_test_nets; - const int_tp num_test_net_instances = num_test_nets + num_generic_net_instances; + const int_tp num_generic_net_instances = param_.test_iter_size() + - num_test_nets; + const int_tp num_test_net_instances = num_test_nets + + num_generic_net_instances; if (param_.test_state_size()) { CHECK_EQ(param_.test_state_size(), num_test_net_instances) << "test_state must be unspecified or specified once per test net."; diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 668fd6d7077..0ef875034cc 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -166,7 +166,8 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } label_offset[0] = n; label_offset[1] = h; label_offset[2] = w; const int_tp correct_label = - static_cast(this->blob_bottom_label_->data_at(label_offset)); + static_cast(this->blob_bottom_label_ + ->data_at(label_offset)); if (max_id == correct_label) { ++num_correct_labels; } diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index e8382676653..7520574fa54 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -119,7 +119,8 @@ class DataLayerTest : public MultiDeviceTest { datum.set_height(i % 2 + 1); datum.set_width(i % 4 + 1); std::string* data = datum.mutable_data(); - const int_tp data_size = datum.channels() * datum.height() * datum.width(); + const int_tp data_size = datum.channels() * datum.height() + * datum.width(); for (int_tp j = 0; j < data_size; ++j) { data->push_back(static_cast(j)); } diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index fb139e69380..0fcedffc27e 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -34,7 +34,7 @@ template class DataTransformTest : public ::testing::Test { protected: DataTransformTest() - : seed_(1701), + : seed_(1704), num_iter_(10) {} int_tp NumSequenceMatches(const TransformationParameter transform_param, diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index d75136c725a..2d7cd785aa0 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -19,17 +19,17 @@ using std::ostringstream; namespace caffe { -template +template class GradientBasedSolverTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: - GradientBasedSolverTest() : - seed_(1701), num_(4), channels_(3), height_(10), width_(10), - share_(false) { - input_file_ = new string( - CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); - } + GradientBasedSolverTest() + : seed_(1701), num_(4), channels_(3), height_(10), width_(10), + share_(false) { + input_file_ = new string( + CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); + } ~GradientBasedSolverTest() { delete input_file_; } @@ -61,16 +61,18 @@ class GradientBasedSolverTest : public MultiDeviceTest { param.set_solver_mode(SolverParameter_SolverMode_GPU); break; default: - LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); - } + LOG(FATAL)<< "Unknown Caffe mode: " << Caffe::mode(); + } InitSolver(param); delta_ = param.delta(); } string RunLeastSquaresSolver(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1, const int devices = 1, - const bool snapshot = false, const char* from_snapshot = NULL) { + const Dtype weight_decay, const Dtype momentum, + const int num_iters, const int iter_size = 1, + const int devices = 1, const bool snapshot = + false, + const char* from_snapshot = NULL) { ostringstream proto; int device_id = 0; #ifndef CPU_ONLY @@ -81,98 +83,95 @@ class GradientBasedSolverTest : public MultiDeviceTest { } #endif // USE_CUDA #endif // !CPU_ONLY - proto << - "snapshot_after_train: " << snapshot << " " - "max_iter: " << num_iters << " " - "base_lr: " << learning_rate << " " - "lr_policy: 'fixed' " - "iter_size: " << iter_size << " " - "device_id: " << device_id << " " - "net_param { " - " name: 'TestNetwork' " - " layer { " - " name: 'data' " - " type: 'HDF5Data' " - " hdf5_data_param { " - " source: '" << *(this->input_file_) << "' " - " batch_size: " << num_ / iter_size << " " - " } " - " top: 'data' " - " top: 'targets' " - " } "; + proto << "snapshot_after_train: " << snapshot << " " + "max_iter: " << num_iters << " " + "base_lr: " << learning_rate << " " + "lr_policy: 'fixed' " + "iter_size: " << iter_size << " " + "device_id: " << device_id << " " + "net_param { " + " name: 'TestNetwork' " + " layer { " + " name: 'data' " + " type: 'HDF5Data' " + " hdf5_data_param { " + " source: '" << *(this->input_file_) << "' " + " batch_size: " << num_ / iter_size << " " + " } " + " top: 'data' " + " top: 'targets' " + " } "; if (share_) { - proto << - " layer { " - " name: 'slice' " - " type: 'Slice' " - " bottom: 'data' " - " top: 'data1' " - " top: 'data2' " - " slice_param { " - " axis: 0 " - " } " - " } "; + proto << " layer { " + " name: 'slice' " + " type: 'Slice' " + " bottom: 'data' " + " top: 'data1' " + " top: 'data2' " + " slice_param { " + " axis: 0 " + " } " + " } "; } - proto << - " layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " param { name: 'weights' } " - " param { name: 'bias' } " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " bias_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " } " - " bottom: '" << string(share_ ? "data1": "data") << "' " - " top: '" << string(share_ ? "innerprod1": "innerprod") << "' " - " } "; + proto << " layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " param { name: 'weights' } " + " param { name: 'bias' } " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " bias_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " } " + " bottom: '" + << string(share_ ? "data1" : "data") << "' " + " top: '" + << string(share_ ? "innerprod1" : "innerprod") << "' " + " } "; if (share_) { - proto << - " layer { " - " name: 'innerprod2' " - " type: 'InnerProduct' " - " param { name: 'weights' } " - " param { name: 'bias' } " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " bias_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " } " - " bottom: 'data2' " - " top: 'innerprod2' " - " } " - " layer { " - " name: 'concat' " - " type: 'Concat' " - " bottom: 'innerprod1' " - " bottom: 'innerprod2' " - " top: 'innerprod' " - " concat_param { " - " axis: 0 " - " } " - " } "; + proto << " layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " param { name: 'weights' } " + " param { name: 'bias' } " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " bias_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " } " + " bottom: 'data2' " + " top: 'innerprod2' " + " } " + " layer { " + " name: 'concat' " + " type: 'Concat' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + " top: 'innerprod' " + " concat_param { " + " axis: 0 " + " } " + " } "; } - proto << - " layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'innerprod' " - " bottom: 'targets' " - " } " - "} "; + proto << " layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod' " + " bottom: 'targets' " + " } " + "} "; if (weight_decay != 0) { proto << "weight_decay: " << weight_decay << " "; } @@ -196,18 +195,18 @@ class GradientBasedSolverTest : public MultiDeviceTest { if (devices == 1) { this->solver_->Solve(); } else { - LOG(INFO) << "Multi-GPU test on " << devices << " devices"; + LOG(INFO)<< "Multi-GPU test on " << devices << " devices"; vector gpus; // put current device at the beginning device* dc = Caffe::GetDevice(solver_->param().device_id()); gpus.push_back(dc); for (int i = 0; gpus.size() < devices; ++i) { if (i != device_id) - gpus.push_back(Caffe::GetDevice(i)); + gpus.push_back(Caffe::GetDevice(i)); } Caffe::set_solver_count(gpus.size()); this->sync_.reset(new P2PSync( - this->solver_, NULL, this->solver_->param())); + this->solver_, NULL, this->solver_->param())); this->sync_->run(gpus); Caffe::set_solver_count(1); } @@ -225,9 +224,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { // using the analytical formula for the least squares gradient. // updated_params will store the updated weight and bias results, // using the blobs' diffs to hold the update values themselves. - void ComputeLeastSquaresUpdate(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters, - vector > >* updated_params) { + void ComputeLeastSquaresUpdate( + const Dtype learning_rate, const Dtype weight_decay, const Dtype momentum, + const int num_iters, vector > >* updated_params) { const int N = num_; const int D = channels_ * height_ * width_; @@ -241,8 +240,8 @@ class GradientBasedSolverTest : public MultiDeviceTest { ASSERT_TRUE(net.has_blob("targets")); const Blob& targets = *net.blob_by_name("targets"); ASSERT_TRUE(net.has_layer("innerprod")); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); + const vector > >& param_blobs = net.layer_by_name( + "innerprod")->blobs(); const int num_param_blobs = 2; ASSERT_EQ(num_param_blobs, param_blobs.size()); const Blob& weights = *param_blobs[0]; @@ -288,8 +287,8 @@ class GradientBasedSolverTest : public MultiDeviceTest { // Scale the gradient over the N samples. grad /= N; // Add the weight decay to the gradient. - grad += weight_decay * - ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); + grad += weight_decay + * ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); // Finally, compute update. const vector > >& history = solver_->history(); if (solver_->type() != string("AdaDelta") @@ -299,8 +298,8 @@ class GradientBasedSolverTest : public MultiDeviceTest { ASSERT_EQ(4, history.size()); // additional blobs for update history } Dtype update_value = learning_rate * grad; - const Dtype history_value = (i == D) ? - history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; + const Dtype history_value = + (i == D) ? history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; const Dtype temp = momentum * history_value; if (solver_->type() == string("SGD")) { update_value += temp; @@ -312,41 +311,45 @@ class GradientBasedSolverTest : public MultiDeviceTest { update_value /= std::sqrt(history_value + grad * grad) + delta_; } else if (solver_->type() == string("RMSProp")) { const Dtype rms_decay = 0.95; - update_value /= std::sqrt(rms_decay*history_value - + grad * grad * (1 - rms_decay)) + delta_; + update_value /= std::sqrt( + rms_decay * history_value + grad * grad * (1 - rms_decay)) + delta_; } else if (solver_->type() == string("AdaDelta")) { - const Dtype update_history_value = (i == D) ? - history[1 + num_param_blobs]->cpu_data()[0] : - history[0 + num_param_blobs]->cpu_data()[i]; - const Dtype weighted_gradient_average = - momentum * history_value + (1 - momentum) * (grad * grad); - update_value = grad * std::sqrt((update_history_value + delta_) / - (weighted_gradient_average + delta_)) * learning_rate; + const Dtype update_history_value = + (i == D) ? + history[1 + num_param_blobs]->cpu_data()[0] : + history[0 + num_param_blobs]->cpu_data()[i]; + const Dtype weighted_gradient_average = momentum * history_value + + (1 - momentum) * (grad * grad); + update_value = grad + * std::sqrt( + (update_history_value + delta_) + / (weighted_gradient_average + delta_)) * learning_rate; // not actually needed, just here for illustrative purposes // const Dtype weighted_update_average = // momentum * update_history_value + (1 - momentum) * (update_value); } else if (solver_->type() == string("Adam")) { const Dtype momentum2 = 0.999; const Dtype m = history_value; - const Dtype v = (i == D) ? - history[1 + num_param_blobs]->cpu_data()[0] : - history[0 + num_param_blobs]->cpu_data()[i]; + const Dtype v = + (i == D) ? + history[1 + num_param_blobs]->cpu_data()[0] : + history[0 + num_param_blobs]->cpu_data()[i]; const Dtype val_m = (1 - momentum) * grad + momentum * m; const Dtype val_v = (1 - momentum2) * grad * grad + momentum2 * v; - Dtype alpha_t = learning_rate * - std::sqrt(Dtype(1) - pow(momentum2, num_iters)) / - (Dtype(1.) - pow(momentum, num_iters)); + Dtype alpha_t = learning_rate + * std::sqrt(Dtype(1) - pow(momentum2, num_iters)) + / (Dtype(1.) - pow(momentum, num_iters)); update_value = alpha_t * val_m / (std::sqrt(val_v) + delta_); } else { - LOG(FATAL) << "Unknown solver type: " << solver_->type(); + LOG(FATAL)<< "Unknown solver type: " << solver_->type(); } if (i == D) { updated_bias.mutable_cpu_diff()[0] = update_value; updated_bias.mutable_cpu_data()[0] = bias.cpu_data()[0] - update_value; } else { updated_weights.mutable_cpu_diff()[i] = update_value; - updated_weights.mutable_cpu_data()[i] = - weights.cpu_data()[i] - update_value; + updated_weights.mutable_cpu_data()[i] = weights.cpu_data()[i] + - update_value; } } } @@ -360,8 +363,8 @@ class GradientBasedSolverTest : public MultiDeviceTest { Net& net = *this->solver_->net(); ASSERT_TRUE(net.has_layer("innerprod")); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); + const vector > >& param_blobs = net.layer_by_name( + "innerprod")->blobs(); ASSERT_EQ(2, param_blobs.size()); const Blob& solver_updated_weights = *param_blobs[0]; ASSERT_EQ(D, solver_updated_weights.count()); @@ -370,16 +373,21 @@ class GradientBasedSolverTest : public MultiDeviceTest { for (int i = 0; i < D; ++i) { const Dtype expected_updated_weight = updated_weights.cpu_data()[i]; const Dtype solver_updated_weight = solver_updated_weights.cpu_data()[i]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_updated_weight), fabs(solver_updated_weight))); + const Dtype error_margin = std::max( + kMinPrecision, + kPrecision + * std::min(fabs(expected_updated_weight), + fabs(solver_updated_weight))); EXPECT_NEAR(expected_updated_weight, solver_updated_weight, error_margin); } const Blob& solver_updated_bias_blob = *param_blobs[1]; ASSERT_EQ(1, solver_updated_bias_blob.count()); const Dtype expected_updated_bias = updated_bias.cpu_data()[0]; const Dtype solver_updated_bias = solver_updated_bias_blob.cpu_data()[0]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_updated_bias), fabs(solver_updated_bias))); + const Dtype error_margin = std::max( + kMinPrecision, + kPrecision + * std::min(fabs(expected_updated_bias), fabs(solver_updated_bias))); EXPECT_NEAR(expected_updated_bias, solver_updated_bias, error_margin); // Check the solver's history -- should contain the previous update value. @@ -389,29 +397,33 @@ class GradientBasedSolverTest : public MultiDeviceTest { for (int i = 0; i < D; ++i) { const Dtype expected_history = updated_weights.cpu_diff()[i]; const Dtype solver_history = history[0]->cpu_data()[i]; - const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_history), fabs(solver_history))); + const Dtype error_margin_hist = std::max( + kMinPrecision, + kPrecision + * std::min(fabs(expected_history), fabs(solver_history))); EXPECT_NEAR(expected_history, solver_history, error_margin_hist); } const Dtype expected_history = updated_bias.cpu_diff()[0]; const Dtype solver_history = history[1]->cpu_data()[0]; - const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_history), fabs(solver_history))); + const Dtype error_margin_hist = std::max( + kMinPrecision, + kPrecision * std::min(fabs(expected_history), fabs(solver_history))); EXPECT_NEAR(expected_history, solver_history, error_margin_hist); } } void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, - const Dtype kMomentum, const int kNumIters, const int kIterSize) { + const Dtype kMomentum, const int kNumIters, + const int kIterSize) { const double kPrecision = 1e-2; const double kMinPrecision = 1e-7; // Solve without accumulation and save parameters. this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters); + kNumIters); // Save parameters for comparison. Net& net = *this->solver_->net(); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); + const vector > >& param_blobs = net.layer_by_name( + "innerprod")->blobs(); vector > > noaccum_params(param_blobs.size()); for (int i = 0; i < param_blobs.size(); ++i) { noaccum_params[i].reset(new Blob()); @@ -419,24 +431,26 @@ class GradientBasedSolverTest : public MultiDeviceTest { } // Solve by equivalent accumulation of gradients over divided batches. this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters, kIterSize); + kNumIters, kIterSize); Net& net_accum = *this->solver_->net(); - const vector > >& accum_params = - net_accum.layer_by_name("innerprod")->blobs(); + const vector > >& accum_params = net_accum + .layer_by_name("innerprod")->blobs(); // Compare accumulated parameters against no accumulation standard. const int D = this->channels_ * this->height_ * this->width_; for (int i = 0; i < D; ++i) { const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; const Dtype accum_param = accum_params[0]->cpu_data()[i]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_param), fabs(accum_param))); + const Dtype error_margin = std::max( + kMinPrecision, + kPrecision * std::min(fabs(expected_param), fabs(accum_param))); EXPECT_NEAR(expected_param, accum_param, error_margin); } ASSERT_EQ(1, accum_params[1]->count()); const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; const Dtype accum_bias = accum_params[1]->cpu_data()[0]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_bias), fabs(accum_bias))); + const Dtype error_margin = std::max( + kMinPrecision, + kPrecision * std::min(fabs(expected_bias), fabs(accum_bias))); EXPECT_NEAR(expected_bias, accum_bias, error_margin); } @@ -456,818 +470,817 @@ class GradientBasedSolverTest : public MultiDeviceTest { // from the Kth update, we compute the (K+1)th update and check that it // matches the solver's (K+1)th update. void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0, - const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, - const int iter_to_check = 0) { + const Dtype weight_decay = 0.0, + const Dtype momentum = 0.0, + const int iter_to_check = 0) { const int kNum = num_; const int kIterSize = 1; // Test over all numbers of devices. int available_devices = 1; #ifndef CPU_ONLY #ifdef USE_CUDA - if (Caffe::mode() == Caffe::GPU && - Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + if (Caffe::mode() == Caffe::GPU + && Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { CUDA_CHECK(cudaGetDeviceCount(&available_devices)); } #endif // USE_CUDA #endif // !CPU_ONLY for (int devices = 1; devices <= available_devices; ++devices) { - // Configure batch size for single / multi device equivalence. - // Constant data is needed for multi device as for accumulation. - num_ = kNum * devices; - - // Initialize the solver and run K (= iter_to_check) solver iterations - // (on single device). - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check, kIterSize, 1); - - // Compute the (K+1)th update using the analytic least squares gradient. - vector > > updated_params; - ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, - iter_to_check + 1, &updated_params); - - // Reinitialize the solver and run K+1 solver iterations. - num_ = kNum; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check + 1, kIterSize, devices); - - // Check that the solver's solution matches ours. - CheckLeastSquaresUpdate(updated_params); - } - } - - void TestSnapshot(const Dtype learning_rate = 1.0, - const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, - const int num_iters = 1) { - // Run the solver for num_iters * 2 iterations. - const int total_num_iters = num_iters * 2; - bool snapshot = false; - const int kIterSize = 1; - const int kDevices = 1; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - total_num_iters, kIterSize, kDevices, snapshot); - - // Save the resulting param values. - vector > > param_copies; - const vector*>& orig_params = - solver_->net()->learnable_params(); - param_copies.resize(orig_params.size()); - for (int i = 0; i < orig_params.size(); ++i) { - param_copies[i].reset(new Blob()); - const bool kReshape = true; - for (int copy_diff = false; copy_diff <= true; ++copy_diff) { - param_copies[i]->CopyFrom(*orig_params[i], copy_diff, kReshape); - } - } - - // Save the solver history - vector > > history_copies; - const vector > >& orig_history = solver_->history(); - history_copies.resize(orig_history.size()); - for (int i = 0; i < orig_history.size(); ++i) { - history_copies[i].reset(new Blob()); - const bool kReshape = true; - for (int copy_diff = false; copy_diff <= true; ++copy_diff) { - history_copies[i]->CopyFrom(*orig_history[i], copy_diff, kReshape); - } - } - - // Run the solver for num_iters iterations and snapshot. - snapshot = true; - string snapshot_name = RunLeastSquaresSolver(learning_rate, weight_decay, - momentum, num_iters, kIterSize, kDevices, snapshot); - - // Reinitialize the solver and run for num_iters more iterations. - snapshot = false; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - total_num_iters, kIterSize, kDevices, - snapshot, snapshot_name.c_str()); - - // Check that params now match. - const vector*>& params = solver_->net()->learnable_params(); - for (int i = 0; i < params.size(); ++i) { - for (int j = 0; j < params[i]->count(); ++j) { - EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) - << "param " << i << " data differed at dim " << j; - EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) - << "param " << i << " diff differed at dim " << j; - } - } - - // Check that history now matches. - const vector > >& history = solver_->history(); - for (int i = 0; i < history.size(); ++i) { - for (int j = 0; j < history[i]->count(); ++j) { - EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) - << "history blob " << i << " data differed at dim " << j; - EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) - << "history blob " << i << " diff differed at dim " << j; - } - } - } - }; - - - template - class SGDSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new SGDSolver(param)); - } - }; - - TYPED_TEST_CASE(SGDSolverTest, TestDtypesAndDevices); - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - this->TestLeastSquaresUpdate(kLearningRate); - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(SGDSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(SGDSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - - template - class AdaGradSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new AdaGradSolver(param)); - } - }; - - TYPED_TEST_CASE(AdaGradSolverTest, TestDtypesAndDevices); - - TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); - } - - TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundredth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - this->TestLeastSquaresUpdate(kLearningRate); - } - - TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); - } - - TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdaGradSolverTest, - TestAdaGradLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(AdaGradSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - - template - class NesterovSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new NesterovSolver(param)); - } - }; - - TYPED_TEST_CASE(NesterovSolverTest, TestDtypesAndDevices); - - TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); - } - - TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHundredth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - this->TestLeastSquaresUpdate(kLearningRate); - } - - TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); - } - - TYPED_TEST(NesterovSolverTest, - TestNesterovLeastSquaresUpdateWithWeightDecayMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(NesterovSolverTest, - TestNesterovLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(NesterovSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(NesterovSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - template - class AdaDeltaSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new AdaDeltaSolver(param)); - } - }; - - TYPED_TEST_CASE(AdaDeltaSolverTest, TestDtypesAndDevices); - - TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - this->TestLeastSquaresUpdate(kLearningRate); - } - - TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.95; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } - - TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } - } - - TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.95; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } - } - - TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdaDeltaSolverTest, - TestAdaDeltaLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - template - class AdamSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - SolverParameter new_param = param; - const Dtype momentum = 0.9; - new_param.set_momentum(momentum); - const Dtype momentum2 = 0.999; - new_param.set_momentum2(momentum2); - this->solver_.reset(new AdamSolver(new_param)); - } - }; - - TYPED_TEST_CASE(AdamSolverTest, TestDtypesAndDevices); - - TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.9; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } - - TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } - - TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(AdamSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(AdamSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - template - class RMSPropSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - const Dtype rms_decay = 0.95; - SolverParameter new_param = param; - new_param.set_rms_decay(rms_decay); - this->solver_.reset(new RMSPropSolver(new_param)); - } - }; - - TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); - - TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); - } - - TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(RMSPropSolverTest, - TestRMSPropLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); - } - - TYPED_TEST(RMSPropSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } - - TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } - } + // Configure batch size for single / multi device equivalence. + // Constant data is needed for multi device as for accumulation. + num_ = kNum * devices; + + // Initialize the solver and run K (= iter_to_check) solver iterations + // (on single device). + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check, kIterSize, 1); + + // Compute the (K+1)th update using the analytic least squares gradient. + vector > > updated_params; + ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, + iter_to_check + 1, &updated_params); + + // Reinitialize the solver and run K+1 solver iterations. + num_ = kNum; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check + 1, kIterSize, devices); + + // Check that the solver's solution matches ours. + CheckLeastSquaresUpdate(updated_params); + } + } + + void TestSnapshot(const Dtype learning_rate = 1.0, const Dtype weight_decay = + 0.0, + const Dtype momentum = 0.0, const int num_iters = 1) { + // Run the solver for num_iters * 2 iterations. + const int total_num_iters = num_iters * 2; + bool snapshot = false; + const int kIterSize = 1; + const int kDevices = 1; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + total_num_iters, kIterSize, kDevices, snapshot); + + // Save the resulting param values. + vector > > param_copies; + const vector*>& orig_params = + solver_->net()->learnable_params(); + param_copies.resize(orig_params.size()); + for (int i = 0; i < orig_params.size(); ++i) { + param_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + param_copies[i]->CopyFrom(*orig_params[i], copy_diff, kReshape); + } + } + + // Save the solver history + vector > > history_copies; + const vector > >& orig_history = solver_->history(); + history_copies.resize(orig_history.size()); + for (int i = 0; i < orig_history.size(); ++i) { + history_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + history_copies[i]->CopyFrom(*orig_history[i], copy_diff, kReshape); + } + } + + // Run the solver for num_iters iterations and snapshot. + snapshot = true; + string snapshot_name = RunLeastSquaresSolver(learning_rate, weight_decay, + momentum, num_iters, kIterSize, + kDevices, snapshot); + + // Reinitialize the solver and run for num_iters more iterations. + snapshot = false; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + total_num_iters, kIterSize, kDevices, snapshot, + snapshot_name.c_str()); + + // Check that params now match. + const vector*>& params = solver_->net()->learnable_params(); + for (int i = 0; i < params.size(); ++i) { + for (int j = 0; j < params[i]->count(); ++j) { + EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) + << "param " << i << " data differed at dim " << j; + EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) + << "param " << i << " diff differed at dim " << j; + } + } + + // Check that history now matches. + const vector > >& history = solver_->history(); + for (int i = 0; i < history.size(); ++i) { + for (int j = 0; j < history[i]->count(); ++j) { + EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) + << "history blob " << i << " data differed at dim " << j; + EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) + << "history blob " << i << " diff differed at dim " << j; + } + } + } +}; + +template +class SGDSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new SGDSolver(param)); + } +}; + +TYPED_TEST_CASE(SGDSolverTest, TestDtypesAndDevices); + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(SGDSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class AdaGradSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaGradSolver(param)); + } +}; + +TYPED_TEST_CASE(AdaGradSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, + TestAdaGradLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaGradSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class NesterovSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new NesterovSolver(param)); + } +}; + +TYPED_TEST_CASE(NesterovSolverTest, TestDtypesAndDevices); + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHundredth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(NesterovSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class AdaDeltaSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaDeltaSolver(param)); + } +}; + +TYPED_TEST_CASE(AdaDeltaSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.95; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, + TestAdaDeltaLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class AdamSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + SolverParameter new_param = param; + const Dtype momentum = 0.9; + new_param.set_momentum(momentum); + const Dtype momentum2 = 0.999; + new_param.set_momentum2(momentum2); + this->solver_.reset(new AdamSolver(new_param)); + } +}; + +TYPED_TEST_CASE(AdamSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdamSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class RMSPropSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + const Dtype rms_decay = 0.95; + SolverParameter new_param = param; + new_param.set_rms_decay(rms_decay); + this->solver_.reset(new RMSPropSolver(new_param)); + } +}; + +TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, + TestRMSPropLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(RMSPropSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} } // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 7b2d80db8cc..026ee33b769 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -22,15 +22,17 @@ __global__ void im2col_gpu_kernel(const int_tp n, const Dtype* data_im, const int_tp kernel_h, const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, - const int_tp height_col, const int_tp width_col, - Dtype* data_col); + const int_tp height_col, + const int_tp width_col, Dtype* data_col); template __global__ void im2col_nd_gpu_kernel(const int_tp n, const int_tp num_axes, - const Dtype* data_im, const int_tp* im_shape, + const Dtype* data_im, + const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, Dtype* data_col); + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; #endif // USE_CUDA @@ -140,7 +142,8 @@ TYPED_TEST(Im2colKernelTest, Test2D) { // GPU version - int_tp num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int_tp num_kernels = this->channels_ * this->height_col_ + * this->width_col_; int_tp default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes diff --git a/src/caffe/test/test_internal_thread.cpp b/src/caffe/test/test_internal_thread.cpp index 1c1cc9e44f4..0d4764bc079 100644 --- a/src/caffe/test/test_internal_thread.cpp +++ b/src/caffe/test/test_internal_thread.cpp @@ -22,13 +22,13 @@ TEST_F(InternalThreadTest, TestStartAndExit) { class TestThreadA : public InternalThread { void InternalThreadEntry() { - EXPECT_EQ(4244559767, caffe_rng_rand()); + EXPECT_EQ(10282592414170385089UL, caffe_rng_rand()); } }; class TestThreadB : public InternalThread { void InternalThreadEntry() { - EXPECT_EQ(1726478280, caffe_rng_rand()); + EXPECT_EQ(10310463406559028313UL, caffe_rng_rand()); } }; diff --git a/src/caffe/test/test_io.cpp b/src/caffe/test/test_io.cpp index ece6f52aae2..4f2eeefcd84 100644 --- a/src/caffe/test/test_io.cpp +++ b/src/caffe/test/test_io.cpp @@ -18,7 +18,8 @@ namespace caffe { class IOTest : public ::testing::Test {}; bool ReadImageToDatumReference(const string& filename, const int_tp label, - const int_tp height, const int_tp width, const bool is_color, Datum* datum) { + const int_tp height, const int_tp width, + const bool is_color, Datum* datum) { cv::Mat cv_img; int_tp cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 394c7592a67..4e4ae6404b7 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -45,7 +45,8 @@ class MathFunctionsTest : public MultiDeviceTest { } // http://en.wikipedia.org/wiki/Hamming_distance - int_tp ReferenceHammingDistance(const int_tp n, const Dtype* x, const Dtype* y) { + int_tp ReferenceHammingDistance(const int_tp n, const Dtype* x, + const Dtype* y) { int_tp dist = 0; uint64_t val; for (int_tp i = 0; i < n; ++i) { diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index f8388f31cde..10bf1d3a341 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -21,7 +21,7 @@ class NetTest : public MultiDeviceTest { typedef typename TypeParam::Dtype Dtype; protected: - NetTest() : seed_(1701) {} + NetTest() : seed_(1702) {} virtual void InitNetFromProtoString(const string& proto) { NetParameter param; @@ -2335,7 +2335,8 @@ TYPED_TEST(NetTest, TestSkipPropagateDown) { // check bottom_need_backward if propagate_down is true this->InitSkipPropNet(false); vector vec_layer_need_backward = this->net_->layer_need_backward(); - for (int_tp layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { +for (int_tp layer_id = 0; layer_id < this->net_->layers().size(); + ++layer_id) { string layer_name = this->net_->layer_names()[layer_id]; if (layer_name == "loss") { // access to bottom_need_backward coresponding to label's blob @@ -2358,7 +2359,8 @@ TYPED_TEST(NetTest, TestSkipPropagateDown) { this->InitSkipPropNet(true); vec_layer_need_backward.clear(); vec_layer_need_backward = this->net_->layer_need_backward(); - for (int_tp layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + for (int_tp layer_id = 0; layer_id < this->net_->layers().size(); + ++layer_id) { string layer_name = this->net_->layer_names()[layer_id]; if (layer_name == "loss") { // access to bottom_need_backward coresponding to label's blob diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 24b25a21f89..002c89f8238 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -222,7 +222,7 @@ class RandomNumberGeneratorTest : public ::testing::Test { if (dc->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_rng_uniform(sample_size_, (uint_tpc*)rng_data); + caffe_gpu_rng_uniform(sample_size_, (uint_tpc*)rng_data); // NOLINT #endif // USE_CUDA } else { #ifdef USE_GREENTEA diff --git a/src/caffe/test/test_reduction_layer.cpp b/src/caffe/test/test_reduction_layer.cpp index 65b7c3e2bbb..abbf832e508 100644 --- a/src/caffe/test/test_reduction_layer.cpp +++ b/src/caffe/test/test_reduction_layer.cpp @@ -21,7 +21,7 @@ class ReductionLayerTest : public MultiDeviceTest { : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { // fill the values - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1702); FillerParameter filler_param; UniformFiller filler(filler_param); filler.Fill(this->blob_bottom_); diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp index 32ee1c53b2a..e4e217a1801 100644 --- a/src/caffe/util/im2col.cpp +++ b/src/caffe/util/im2col.cpp @@ -5,12 +5,11 @@ namespace caffe { -template +template void im2col_cpu(const Dtype* data_im, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - Dtype* data_col) { + const int_tp height, const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_col) { int_tp height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int_tp width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int_tp channels_col = channels * kernel_h * kernel_w; @@ -34,19 +33,24 @@ void im2col_cpu(const Dtype* data_im, const int_tp channels, // Explicit instantiation template void im2col_cpu(const float* data_im, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, float* data_col); + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + float* data_col); template void im2col_cpu(const double* data_im, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, double* data_col); + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + double* data_col); template inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col, - const int_tp num_spatial_axes, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - Dtype* data_output) { + const int_tp num_spatial_axes, + const int_tp* im_shape, const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, Dtype* data_output) { if (!im2col) { int_tp im_size = im_shape[0]; for (int_tp i = 0; i < num_spatial_axes; ++i) { @@ -134,12 +138,11 @@ template void im2col_nd_cpu(const double* data_im, const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, double* data_col); -template +template void col2im_cpu(const Dtype* data_col, const int_tp channels, - const int_tp height, const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - Dtype* data_im) { + const int_tp height, const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_im) { caffe_set(height * width * channels, Dtype(0), data_im); int_tp height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int_tp width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; @@ -162,13 +165,17 @@ void col2im_cpu(const Dtype* data_col, const int_tp channels, // Explicit instantiation template void col2im_cpu(const float* data_col, const int_tp channels, - const int_tp height, const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, float* data_im); + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + float* data_im); template void col2im_cpu(const double* data_col, const int_tp channels, - const int_tp height, const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, double* data_im); + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + double* data_im); template void col2im_nd_cpu(const Dtype* data_col, const int_tp num_spatial_axes, diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index eef8851e7d4..98de6712b12 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -13,12 +13,16 @@ namespace caffe { template __global__ void im2col_sk_gpu_kernel(const int_tp n, const Dtype* data_im, const int_tp height, const int_tp width, - const int_tp kernel_h, const int_tp kernel_w, + const int_tp kernel_h, + const int_tp kernel_w, const int_tp ext_kernel_h, - const int_tp ext_kernel_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, - const int_tp kstride_w, const int_tp height_col, + const int_tp ext_kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, + const int_tp height_col, const int_tp width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { int_tp w_out = index % width_col; @@ -46,10 +50,12 @@ __global__ void im2col_sk_gpu_kernel(const int_tp n, const Dtype* data_im, } template -void im2col_sk_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, - const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, +void im2col_sk_gpu(const Dtype* data_im, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. @@ -76,15 +82,16 @@ template void im2col_sk_gpu(const float* data_im, const int_tp channels, const int_tp kernel_h, const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - float* data_col); -template void im2col_sk_gpu(const double* data_im, const int_tp channels, - const int_tp height, const int_tp width, - const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - double* data_col); + const int_tp kstride_h, + const int_tp kstride_w, float* data_col); +template void im2col_sk_gpu(const double* data_im, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, double* data_col); template __global__ void im2col_gpu_kernel(const int_tp n, const Dtype* data_im, @@ -92,8 +99,8 @@ __global__ void im2col_gpu_kernel(const int_tp n, const Dtype* data_im, const int_tp kernel_h, const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, - const int_tp height_col, const int_tp width_col, - Dtype* data_col) { + const int_tp height_col, + const int_tp width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { int_tp w_out = index % width_col; int_tp h_index = index / width_col; @@ -120,10 +127,10 @@ __global__ void im2col_gpu_kernel(const int_tp n, const Dtype* data_im, } template -void im2col_gpu(const Dtype* data_im, const int_tp channels, const int_tp height, - const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, Dtype* data_col) { +void im2col_gpu(const Dtype* data_im, const int_tp channels, + const int_tp height, const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int_tp height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; @@ -156,12 +163,16 @@ template void im2col_gpu(const double* data_im, const int_tp channels, template __global__ void col2im_sk_gpu_kernel(const int_tp n, const Dtype* data_col, const int_tp height, const int_tp width, - const int_tp channels, const int_tp patch_h, - const int_tp patch_w, const int_tp ext_patch_h, - const int_tp ext_patch_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, - const int_tp kstride_w, const int_tp height_col, + const int_tp channels, + const int_tp patch_h, const int_tp patch_w, + const int_tp ext_patch_h, + const int_tp ext_patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, + const int_tp height_col, const int_tp width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; @@ -171,11 +182,13 @@ __global__ void col2im_sk_gpu_kernel(const int_tp n, const Dtype* data_col, // compute the start and end of the output int_tp width_col_1 = width_col - 1; int_tp height_col_1 = height_col - 1; - int_tp w_col_start = (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; + int_tp w_col_start = + (w < ext_patch_w) ? w % kstride_w : (w - ext_patch_w) + 1; int_tp w_col_end = (w >= width_col) ? width_col_1 - (width_col_1 - w_col_start) % kstride_w : w; - int_tp h_col_start = (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; + int_tp h_col_start = + (h < ext_patch_h) ? h % kstride_h : (h - ext_patch_h) + 1; int_tp h_col_end = (h >= height_col) ? height_col_1 - (height_col_1 - h_col_start) % kstride_h : h; @@ -187,8 +200,8 @@ __global__ void col2im_sk_gpu_kernel(const int_tp n, const Dtype* data_col, int_tp offset = c * patch_h * coeff_h_idx; for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += kstride_h, --h_idx) { - for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += - kstride_w, --w_idx) { + for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; + w_col += kstride_w, --w_idx) { val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx + h_col * width_col + w_col]; } @@ -199,10 +212,12 @@ __global__ void col2im_sk_gpu_kernel(const int_tp n, const Dtype* data_col, } template -void col2im_sk_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp kstride_h, const int_tp kstride_w, +void col2im_sk_gpu(const Dtype* data_col, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp kstride_h, const int_tp kstride_w, Dtype* data_im) { if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) LOG(FATAL)<< "stride greater than 1 or pad greater" @@ -228,15 +243,16 @@ template void col2im_sk_gpu(const float* data_col, const int_tp channels, const int_tp patch_h, const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - float* data_im); -template void col2im_sk_gpu(const double* data_col, const int_tp channels, - const int_tp height, const int_tp width, - const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, const int_tp kstride_w, - double* data_im); + const int_tp kstride_h, + const int_tp kstride_w, float* data_im); +template void col2im_sk_gpu(const double* data_col, + const int_tp channels, const int_tp height, + const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, + const int_tp kstride_h, + const int_tp kstride_w, double* data_im); template __global__ void col2im_gpu_kernel(const int_tp n, const Dtype* data_col, @@ -244,7 +260,8 @@ __global__ void col2im_gpu_kernel(const int_tp n, const Dtype* data_col, const int_tp channels, const int_tp patch_h, const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp height_col, + const int_tp stride_w, + const int_tp height_col, const int_tp width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; @@ -254,9 +271,9 @@ __global__ void col2im_gpu_kernel(const int_tp n, const Dtype* data_col, // compute the start and end of the output int_tp w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; - int_tp w_col_end = min((int_tpc)(w / stride_w + 1), (int_tpc)width_col); + int_tp w_col_end = min((int_tpc) (w / stride_w + 1), (int_tpc) width_col); int_tp h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; - int_tp h_col_end = min((int_tpc)(h / stride_h + 1), (int_tpc)height_col); + int_tp h_col_end = min((int_tpc) (h / stride_h + 1), (int_tpc) height_col); // equivalent implementation int_tp offset = (c * patch_h * patch_w + h * patch_w + w) * height_col @@ -273,10 +290,10 @@ __global__ void col2im_gpu_kernel(const int_tp n, const Dtype* data_col, } template -void col2im_gpu(const Dtype* data_col, const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, const int_tp patch_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, Dtype* data_im) { +void col2im_gpu(const Dtype* data_col, const int_tp channels, + const int_tp height, const int_tp width, const int_tp patch_h, + const int_tp patch_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, Dtype* data_im) { int_tp height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int_tp width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int_tp num_kernels = channels * height * width; @@ -311,9 +328,9 @@ __global__ void im2col_ndsk_gpu_kernel(const int_tp n, const int_tp num_axes, const Dtype* data_im, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, const int_tp* kstride, - Dtype* data_col) { + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, Dtype* data_col) { int_tp d_temp[6]; // NOLINT(runtime/arrays) int_tp d_iter[6]; // NOLINT(runtime/arrays) int_tp i; @@ -388,9 +405,9 @@ __global__ void col2im_ndsk_gpu_kernel(const int_tp n, const int_tp num_axes, const Dtype* data_col, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, const int_tp* kstride, - Dtype* data_im) { + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, Dtype* data_im) { int_tp d_im[6]; // NOLINT(runtime/arrays) int_tp d_col_size[6]; // NOLINT(runtime/arrays) int_tp d_col_iter[6]; // NOLINT(runtime/arrays) @@ -482,8 +499,8 @@ template void im2col_ndsk_gpu(const Dtype* data_im, const int_tp num_spatial_axes, const int_tp num_kernels, const int_tp* im_shape, const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, const int_tp* kstride, - Dtype* data_col) { + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, Dtype* data_col) { im2col_ndsk_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS) ( num_kernels, num_spatial_axes, data_im, im_shape, col_shape, @@ -492,23 +509,27 @@ void im2col_ndsk_gpu(const Dtype* data_im, const int_tp num_spatial_axes, } // Explicit instantiation -template void im2col_ndsk_gpu(const float* data_im, const int_tp num_spatial_axes, +template void im2col_ndsk_gpu(const float* data_im, + const int_tp num_spatial_axes, const int_tp num_kernels, const int_tp* im_shape, - const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, - const int_tp* kstride, float* data_col); -template void im2col_ndsk_gpu(const double* data_im, const int_tp num_spatial_axes, + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, const int_tp* kstride, + float* data_col); +template void im2col_ndsk_gpu(const double* data_im, + const int_tp num_spatial_axes, const int_tp num_kernels, const int_tp* im_shape, - const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, - const int_tp* kstride, double* data_col); + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, const int_tp* kstride, + double* data_col); template void col2im_ndsk_gpu(const Dtype* data_col, const int_tp num_spatial_axes, const int_tp im_size, const int_tp* im_shape, const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, const int_tp* kstride, - Dtype* data_im) { + const int_tp* pad, const int_tp* stride, + const int_tp* kstride, Dtype* data_im) { col2im_ndsk_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, num_spatial_axes, data_col, im_shape, col_shape, @@ -519,21 +540,25 @@ void col2im_ndsk_gpu(const Dtype* data_col, const int_tp num_spatial_axes, // Explicit instantiation template void col2im_ndsk_gpu(const float* data_col, const int_tp num_axes, const int_tp im_size, const int_tp* im_shape, - const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, - const int_tp* kstride, float* data_im); + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, const int_tp* kstride, + float* data_im); template void col2im_ndsk_gpu(const double* data_col, const int_tp num_axes, const int_tp im_size, const int_tp* im_shape, - const int_tp* col_shape, const int_tp* kernel_shape, - const int_tp* pad, const int_tp* stride, - const int_tp* kstride, double* data_im); + const int_tp* col_shape, + const int_tp* kernel_shape, const int_tp* pad, + const int_tp* stride, const int_tp* kstride, + double* data_im); template __global__ void im2col_nd_gpu_kernel(const int_tp n, const int_tp num_axes, - const Dtype* data_im, const int_tp* im_shape, + const Dtype* data_im, + const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, Dtype* data_col) { + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + Dtype* data_col) { int_tp d_temp[6]; // NOLINT(runtime/arrays) int_tp d_iter[6]; // NOLINT(runtime/arrays) int_tp i; @@ -611,23 +636,29 @@ void im2col_nd_gpu(const Dtype* data_im, const int_tp num_spatial_axes, // Explicit instantiation template void im2col_nd_gpu(const float* data_im, const int_tp num_spatial_axes, - const int_tp col_size, const int_tp* im_shape, + const int_tp col_size, + const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, float* data_col); + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + float* data_col); template void im2col_nd_gpu(const double* data_im, const int_tp num_spatial_axes, - const int_tp col_size, const int_tp* im_shape, + const int_tp col_size, + const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, double* data_col); + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + double* data_col); template __global__ void col2im_nd_gpu_kernel(const int_tp n, const int_tp num_axes, - const Dtype* data_col, const int_tp* im_shape, + const Dtype* data_col, + const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, Dtype* data_im) { + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + Dtype* data_im) { int_tp d_im[6]; // NOLINT(runtime/arrays) int_tp d_col_iter[6]; // NOLINT(runtime/arrays) int_tp d_col_start[6]; // NOLINT(runtime/arrays) @@ -647,8 +678,8 @@ __global__ void col2im_nd_gpu_kernel(const int_tp n, const int_tp num_axes, d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min((int_tpc)(d_im[i] / stride[i] + 1), - (int_tpc)(col_shape[i + 1])); + d_col_end[i] = min((int_tpc) (d_im[i] / stride[i] + 1), + (int_tpc) (col_shape[i + 1])); if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. @@ -696,9 +727,9 @@ __global__ void col2im_nd_gpu_kernel(const int_tp n, const int_tp num_axes, template void col2im_nd_gpu(const Dtype* data_col, const int_tp num_spatial_axes, - const int_tp im_size, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - Dtype* data_im) { + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, Dtype* data_im) { col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, num_spatial_axes, data_col, im_shape, col_shape, @@ -711,14 +742,17 @@ template void col2im_nd_gpu(const float* data_col, const int_tp num_spatial_axes, const int_tp im_size, const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, float* data_im); + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + float* data_im); template void col2im_nd_gpu(const double* data_col, const int_tp num_spatial_axes, - const int_tp im_size, const int_tp* im_shape, + const int_tp im_size, + const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, - const int_tp* stride, double* data_im); + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + double* data_im); #endif // USE_CUDA } // namespace caffe diff --git a/src/caffe/util/insert_splits.cpp b/src/caffe/util/insert_splits.cpp index 1bdaee71e7e..9e250021c41 100644 --- a/src/caffe/util/insert_splits.cpp +++ b/src/caffe/util/insert_splits.cpp @@ -36,7 +36,8 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) { << layer_param.name() << "', bottom index " << j << ")"; } const pair& bottom_idx = make_pair(i, j); - const pair& top_idx = blob_name_to_last_top_idx[blob_name]; + const pair& top_idx = + blob_name_to_last_top_idx[blob_name]; bottom_idx_to_source_top_idx[bottom_idx] = top_idx; ++top_idx_to_bottom_count[top_idx]; } @@ -50,7 +51,8 @@ void InsertSplits(const NetParameter& param, NetParameter* param_split) { std::min(layer_param.loss_weight_size(), layer_param.top_size()); for (int_tp j = 0; j < last_loss; ++j) { const string& blob_name = layer_param.top(j); - const pair& top_idx = blob_name_to_last_top_idx[blob_name]; + const pair& top_idx = + blob_name_to_last_top_idx[blob_name]; top_idx_to_loss_weight[top_idx] = layer_param.loss_weight(j); if (top_idx_to_loss_weight[top_idx]) { ++top_idx_to_bottom_count[top_idx]; diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 456390475a3..74aca93ee39 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -71,7 +71,8 @@ void caffe_set(const int_tp N, const Dtype alpha, Dtype* Y) { } template void caffe_set(const int_tp N, const int alpha, int* Y); -template void caffe_set(const int_tp N, const uint_tp alpha, uint_tp* Y); +template void caffe_set(const int_tp N, const uint_tp alpha, + uint_tp* Y); template void caffe_set(const int_tp N, const int_tp alpha, int_tp* Y); template void caffe_set(const int_tp N, const float alpha, float* Y); template void caffe_set(const int_tp N, const double alpha, double* Y); @@ -97,11 +98,13 @@ void caffe_cpu_copy(const int_tp N, const Dtype* X, Dtype* Y) { } } -template void caffe_cpu_copy(const int_tp N, const int_tp* X, int_tp* Y); +template void caffe_cpu_copy(const int_tp N, const int_tp* X, + int_tp* Y); template void caffe_cpu_copy(const int_tp N, const uint_tp* X, - uint_tp* Y); +uint_tp* Y); template void caffe_cpu_copy(const int_tp N, const float* X, float* Y); -template void caffe_cpu_copy(const int_tp N, const double* X, double* Y); +template void caffe_cpu_copy(const int_tp N, const double* X, + double* Y); template void caffe_copy(const int_tp N, const Dtype* X, Dtype* Y) { @@ -123,7 +126,7 @@ void caffe_copy(const int_tp N, const Dtype* X, Dtype* Y) { template void caffe_copy(const int_tp N, const int_tp* X, int_tp* Y); template void caffe_copy(const int_tp N, const uint_tp* X, - uint_tp* Y); +uint_tp* Y); template void caffe_copy(const int_tp N, const float* X, float* Y); template void caffe_copy(const int_tp N, const double* X, double* Y); @@ -144,13 +147,14 @@ void caffe_cpu_axpby(const int_tp N, const float alpha, const float* X, } template<> -void caffe_cpu_axpby(const int_tp N, const double alpha, const double* X, - const double beta, double* Y) { +void caffe_cpu_axpby(const int_tp N, const double alpha, + const double* X, const double beta, double* Y) { cblas_daxpby(N, alpha, X, 1, beta, Y, 1); } template<> -void caffe_add(const int_tp n, const float* a, const float* b, float* y) { +void caffe_add(const int_tp n, const float* a, const float* b, + float* y) { vsAdd(n, a, b, y); } @@ -161,7 +165,8 @@ void caffe_add(const int_tp n, const double* a, const double* b, } template<> -void caffe_sub(const int_tp n, const float* a, const float* b, float* y) { +void caffe_sub(const int_tp n, const float* a, const float* b, + float* y) { vsSub(n, a, b, y); } @@ -172,7 +177,8 @@ void caffe_sub(const int_tp n, const double* a, const double* b, } template<> -void caffe_mul(const int_tp n, const float* a, const float* b, float* y) { +void caffe_mul(const int_tp n, const float* a, const float* b, + float* y) { vsMul(n, a, b, y); } @@ -183,7 +189,8 @@ void caffe_mul(const int_tp n, const double* a, const double* b, } template<> -void caffe_div(const int_tp n, const float* a, const float* b, float* y) { +void caffe_div(const int_tp n, const float* a, const float* b, + float* y) { vsDiv(n, a, b, y); } @@ -194,7 +201,8 @@ void caffe_div(const int_tp n, const double* a, const double* b, } template<> -void caffe_powx(const int_tp n, const float* a, const float b, float* y) { +void caffe_powx(const int_tp n, const float* a, const float b, + float* y) { vsPowx(n, a, b, y); } @@ -262,9 +270,11 @@ double caffe_nextafter(const double b); void caffe_rng_uniform(const int_tp n, uint_tp* r) { CHECK_GE(n, 0); CHECK(r); - boost::uniform_int random_distribution(INT64_MIN, INT64_MAX); - boost::variate_generator> - variate_generator(caffe_rng(), random_distribution); + boost::uniform_int random_distribution( + std::numeric_limits::min(), std::numeric_limits::max()); + boost::variate_generator> variate_generator( + caffe_rng(), random_distribution); for (int_tp i = 0; i < n; ++i) { r[i] = variate_generator(); } @@ -276,8 +286,9 @@ void caffe_rng_uniform(const int_tp n, const Dtype a, const Dtype b, Dtype* r) { CHECK(r); CHECK_LE(a, b); boost::uniform_real random_distribution(a, caffe_nextafter(b)); - boost::variate_generator> - variate_generator(caffe_rng(), random_distribution); + boost::variate_generator> variate_generator( + caffe_rng(), random_distribution); for (int_tp i = 0; i < n; ++i) { r[i] = variate_generator(); } @@ -298,16 +309,17 @@ void caffe_rng_gaussian(const int_tp n, const Dtype a, const Dtype sigma, CHECK(r); CHECK_GT(sigma, 0); boost::normal_distribution random_distribution(a, sigma); - boost::variate_generator> - variate_generator(caffe_rng(), random_distribution); + boost::variate_generator> variate_generator( + caffe_rng(), random_distribution); for (int_tp i = 0; i < n; ++i) { r[i] = variate_generator(); } } template -void caffe_rng_gaussian(const int_tp n, const float mu, const float sigma, - float* r); +void caffe_rng_gaussian(const int_tp n, const float mu, + const float sigma, float* r); template void caffe_rng_gaussian(const int_tp n, const double mu, @@ -320,30 +332,35 @@ void caffe_rng_bernoulli(const int_tp n, const Dtype p, Itype* r) { CHECK_GE(p, 0); CHECK_LE(p, 1); boost::bernoulli_distribution random_distribution(p); - boost::variate_generator> - variate_generator(caffe_rng(), random_distribution); + boost::variate_generator> variate_generator( + caffe_rng(), random_distribution); for (int_tp i = 0; i < n; ++i) { r[i] = static_cast(variate_generator()); } } template -void caffe_rng_bernoulli(const int_tp n, const double p, unsigned long* r); +void caffe_rng_bernoulli(const int_tp n, const double p, // NOLINT + unsigned long* r); // NOLINT template -void caffe_rng_bernoulli(const int_tp n, const float p, unsigned long* r); +void caffe_rng_bernoulli(const int_tp n, const float p, // NOLINT + unsigned long* r); // NOLINT template -void caffe_rng_bernoulli(const int_tp n, const double p, long* r); +void caffe_rng_bernoulli(const int_tp n, const double p, long* r); // NOLINT template -void caffe_rng_bernoulli(const int_tp n, const float p, long* r); +void caffe_rng_bernoulli(const int_tp n, const float p, long* r); // NOLINT template -void caffe_rng_bernoulli(const int_tp n, const double p, unsigned int* r); +void caffe_rng_bernoulli(const int_tp n, const double p, + unsigned int* r); template -void caffe_rng_bernoulli(const int_tp n, const float p, unsigned int* r); +void caffe_rng_bernoulli(const int_tp n, const float p, + unsigned int* r); template void caffe_rng_bernoulli(const int_tp n, const double p, int* r); @@ -352,8 +369,9 @@ template void caffe_rng_bernoulli(const int_tp n, const float p, int* r); template<> -float caffe_cpu_strided_dot(const int_tp n, const float* x, const int_tp incx, - const float* y, const int_tp incy) { +float caffe_cpu_strided_dot(const int_tp n, const float* x, + const int_tp incx, const float* y, + const int_tp incy) { return cblas_sdot(n, x, incx, y, incy); } @@ -377,7 +395,7 @@ double caffe_cpu_dot(const int_tp n, const double* x, const double* y); template<> int_tp caffe_cpu_hamming_distance(const int_tp n, const float* x, - const float* y) { + const float* y) { int_tp dist = 0; for (int_tp i = 0; i < n; ++i) { dist += __builtin_popcount( @@ -388,7 +406,7 @@ int_tp caffe_cpu_hamming_distance(const int_tp n, const float* x, template<> int_tp caffe_cpu_hamming_distance(const int_tp n, const double* x, - const double* y) { + const double* y) { int_tp dist = 0; for (int_tp i = 0; i < n; ++i) { dist += __builtin_popcountl( @@ -415,8 +433,8 @@ void caffe_cpu_scale(const int_tp n, const float alpha, const float *x, } template<> -void caffe_cpu_scale(const int_tp n, const double alpha, const double *x, - double* y) { +void caffe_cpu_scale(const int_tp n, const double alpha, + const double *x, double* y) { cblas_dcopy(n, x, 1, y, 1); cblas_dscal(n, alpha, y, 1); } diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index b7df928b2e8..0fb47bcca47 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -105,8 +105,8 @@ void caffe_gpu_axpby(const int_tp N, const float alpha, const float* X, } template<> -void caffe_gpu_axpby(const int_tp N, const double alpha, const double* X, - const double beta, double* Y) { +void caffe_gpu_axpby(const int_tp N, const double alpha, + const double* X, const double beta, double* Y) { caffe_gpu_scal(N, beta, Y); caffe_gpu_axpy(N, alpha, X, Y); } @@ -141,8 +141,8 @@ void caffe_gpu_scale(const int_tp n, const float alpha, const float *x, } template<> -void caffe_gpu_scale(const int_tp n, const double alpha, const double *x, - double* y) { +void caffe_gpu_scale(const int_tp n, const double alpha, + const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } @@ -165,9 +165,11 @@ void caffe_gpu_set(const int_tp N, const Dtype alpha, Dtype* Y) { N, alpha, Y); } -template void caffe_gpu_set(const int_tp N, const int_tp alpha, int_tp* Y); +template void caffe_gpu_set(const int_tp N, const int_tp alpha, + int_tp* Y); template void caffe_gpu_set(const int_tp N, const float alpha, float* Y); -template void caffe_gpu_set(const int_tp N, const double alpha, double* Y); +template void caffe_gpu_set(const int_tp N, const double alpha, + double* Y); template __global__ void add_scalar_kernel(const int_tp n, const Dtype alpha, Dtype* y) { @@ -425,11 +427,11 @@ uint32_t caffe_gpu_hamming_distance(const int_tp n, const double* x, thrust::plus()); } -void caffe_gpu_rng_uniform(const int_tp n, unsigned int* r) { +void caffe_gpu_rng_uniform(const int_tp n, unsigned int* r) { // NOLINT CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } -void caffe_gpu_rng_uniform(const int_tp n, unsigned long long* r) { +void caffe_gpu_rng_uniform(const int_tp n, unsigned long long* r) { // NOLINT CURAND_CHECK(curandGenerateLongLong(Caffe::curand_generator64(), r, n)); } @@ -447,8 +449,8 @@ void caffe_gpu_rng_uniform(const int_tp n, const float a, const float b, } template<> -void caffe_gpu_rng_uniform(const int_tp n, const double a, const double b, - double* r) { +void caffe_gpu_rng_uniform(const int_tp n, const double a, + const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast(1)) { diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 1ae7b98818a..c108e295d32 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -156,9 +156,12 @@ void UpgradeV0PaddingLayers(const NetParameter& param, << "Padding layer input to " "non-convolutional / non-pooling layer type " << layer_param.type(); - CHECK_EQ(layer_connection.bottom_size(), 1)<< "Conv Layer takes a single blob as input."; - CHECK_EQ(source_layer.bottom_size(), 1)<< "Padding Layer takes a single blob as input."; - CHECK_EQ(source_layer.top_size(), 1)<< "Padding Layer produces a single blob as output."; + CHECK_EQ(layer_connection.bottom_size(), 1) + << "Conv Layer takes a single blob as input."; + CHECK_EQ(source_layer.bottom_size(), 1) + << "Padding Layer takes a single blob as input."; + CHECK_EQ(source_layer.top_size(), 1) + << "Padding Layer produces a single blob as output."; int layer_index = param_upgraded_pad->layers_size() - 1; param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() ->set_pad(source_layer.layer().pad()); diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index b34e1a67ef3..7866f371eff 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -172,7 +172,8 @@ int feature_extraction_pipeline(int argc, char** argv) { } } // for (int_tp n = 0; n < batch_size; ++n) } // for (int_tp i = 0; i < num_features; ++i) - } // for (int_tp batch_index = 0; batch_index < num_mini_batches; ++batch_index) + } // for (int_tp batch_index = 0; + // batch_index < num_mini_batches; ++batch_index) // write the last batch for (int_tp i = 0; i < num_features; ++i) { if (image_indices[i] % 1000 != 0) { From 5ebebfb870acaac128da223fe5a795b0226293ac Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 24 Oct 2015 19:26:20 +0200 Subject: [PATCH 198/254] Pycaffe fixed. --- include/caffe/caffe.hpp | 3 ++- python/caffe/_caffe.cpp | 36 +++++++++++++++++++----------------- src/caffe/solver.cpp | 2 +- tools/caffe.cpp | 2 +- 4 files changed, 23 insertions(+), 20 deletions(-) diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp index 9e92125765d..2d55f01af95 100644 --- a/include/caffe/caffe.hpp +++ b/include/caffe/caffe.hpp @@ -7,6 +7,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/definitions.hpp" +#include "caffe/device.hpp" #include "caffe/filler.hpp" #include "caffe/greentea/greentea.hpp" #include "caffe/layer.hpp" @@ -18,8 +19,8 @@ #include "caffe/solver_factory.hpp" #include "caffe/util/benchmark.hpp" #include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" #include "caffe/vision_layers.hpp" -#include "device.hpp" diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 38ad54a120e..f2defd1469b 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -16,6 +16,7 @@ #include "caffe/caffe.hpp" #include "caffe/python_layer.hpp" +#include "caffe/sgd_solvers.hpp" // Temporary solution for numpy < 1.7 versions: old macro, no promises. // You're strongly advised to upgrade to >= 1.7. @@ -50,7 +51,7 @@ static void CheckFile(const string& filename) { } void CheckContiguousArray(PyArrayObject* arr, string name, - vector shape) { + vector shape) { if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) { throw std::runtime_error(name + " must be C contiguous"); } @@ -63,11 +64,12 @@ void CheckContiguousArray(PyArrayObject* arr, string name, if (PyArray_TYPE(arr) != NPY_FLOAT32) { throw std::runtime_error(name + " must be float32"); } - for (int i = 1; i < PyArray_NDIM(arr); ++i) { + for (int_tp i = 1; i < PyArray_NDIM(arr); ++i) { if (PyArray_DIMS(arr)[i] != shape[i]) { throw std::runtime_error( "Shape dimension " + std::to_string(i) + " has wrong size (" - + std::to_string(static_cast(PyArray_DIMS(arr)[i])) + " vs. " + + std::to_string(static_cast + (PyArray_DIMS(arr)[i])) + " vs. " + std::to_string(shape[i]) + ")"); } } @@ -134,8 +136,8 @@ void Net_SetInputArrays(Net* net, int index, bp::object data_obj, Solver* GetSolverFromFile(const string& filename) { SolverParameter param; - ReadProtoFromTextFileOrDie(filename, ¶m); - return GetSolver(param); + ReadSolverParamsFromTextFileOrDie(filename, ¶m); + return SolverRegistry::CreateSolver(param); } struct NdarrayConverterGenerator { @@ -165,8 +167,8 @@ struct NdarrayCallPolicies : public bp::default_call_policies { // the shape information from the blob. void* data = PyArray_DATA(reinterpret_cast(result)); Py_DECREF(result); - const int num_axes = blob->num_axes(); - vector dims(blob->shape().begin(), blob->shape().end()); + const int_tp num_axes = blob->num_axes(); + vector dims(blob->shape().begin(), blob->shape().end()); PyObject *arr_obj = PyArray_SimpleNewFromData(num_axes, dims.data(), NPY_FLOAT32, data); // SetBaseObject steals a ref, so we need to INCREF. @@ -182,9 +184,9 @@ bp::object Blob_Reshape(bp::tuple args, bp::dict kwargs) { throw std::runtime_error("Blob.reshape takes no kwargs"); } Blob* self = bp::extract*>(args[0]); - vector shape(bp::len(args) - 1); - for (int i = 1; i < bp::len(args); ++i) { - shape[i - 1] = bp::extract(args[i]); + vector shape(bp::len(args) - 1); + for (int_tp i = 1; i < bp::len(args); ++i) { + shape[i - 1] = bp::extract(args[i]); } self->Reshape(shape); // We need to explicitly return None to use bp::raw_function. @@ -197,9 +199,9 @@ bp::object BlobVec_add_blob(bp::tuple args, bp::dict kwargs) { } typedef vector > > BlobVec; BlobVec* self = bp::extract(args[0]); - vector shape(bp::len(args) - 1); - for (int i = 1; i < bp::len(args); ++i) { - shape[i - 1] = bp::extract(args[i]); + vector shape(bp::len(args) - 1); + for (int_tp i = 1; i < bp::len(args); ++i) { + shape[i - 1] = bp::extract(args[i]); } self->push_back(shared_ptr >(new Blob(shape))); // We need to explicitly return None to use bp::raw_function. @@ -252,14 +254,14 @@ BOOST_PYTHON_MODULE(_caffe) { "Blob", bp::no_init) .add_property("shape", bp::make_function( - static_cast& (Blob::*)() const>( + static_cast& (Blob::*)() const>( &Blob::shape), bp::return_value_policy())) .add_property("num", &Blob::num) .add_property("channels", &Blob::channels) .add_property("height", &Blob::height) .add_property("width", &Blob::width) - .add_property("count", static_cast::*)() const>( + .add_property("count", static_cast::*)() const>( &Blob::count)) .def("reshape", bp::raw_function(&Blob_Reshape)) .add_property("data", bp::make_function(&Blob::mutable_cpu_data, @@ -322,8 +324,8 @@ BOOST_PYTHON_MODULE(_caffe) { .def(bp::vector_indexing_suite > >, true>()); bp::class_ >("StringVec") .def(bp::vector_indexing_suite >()); - bp::class_ >("IntVec") - .def(bp::vector_indexing_suite >()); + bp::class_ >("IntVec") + .def(bp::vector_indexing_suite >()); bp::class_ >("DtypeVec") .def(bp::vector_indexing_suite >()); bp::class_ > > >("NetVec") diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 522f4ed9feb..31fe73bd5c3 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -43,7 +43,7 @@ Solver::Solver(const string& param_file, const Solver* root_solver) : net_(), callbacks_(), root_solver_(root_solver), requested_early_exit_(false) { SolverParameter param; - ReadProtoFromTextFileOrDie(param_file, ¶m); + ReadSolverParamsFromTextFileOrDie(param_file, ¶m); Init(param); } diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 362afb51a4c..3dab90e16f9 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -169,7 +169,7 @@ int train() { "but not both."; caffe::SolverParameter solver_param; - caffe::ReadProtoFromTextFileOrDie(FLAGS_solver, &solver_param); + caffe::ReadSolverParamsFromTextFileOrDie(FLAGS_solver, &solver_param); // If the gpus flag is not provided, allow the mode and device to be set // in the solver prototxt. From d82979ba5d755e779f09c215959a7a8194a020ee Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 25 Oct 2015 03:26:44 +0100 Subject: [PATCH 199/254] Softmax fix for >4 shape dims. --- include/caffe/syncedmem.hpp | 2 +- python/caffe/_caffe.cpp | 3 +- python/caffe/pycaffe.py | 4 +- src/caffe/blob.cpp | 2 +- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/softmax_loss.cl | 7 +- src/caffe/layers/loss_layer.cpp | 2 +- src/caffe/layers/mergecrop_layer.cpp | 4 +- src/caffe/layers/mergecrop_layer.cu | 12 +-- src/caffe/layers/softmax_layer.cu | 75 ++++++++----------- src/caffe/layers/softmax_loss_layer.cu | 8 +- src/caffe/syncedmem.cpp | 5 +- 12 files changed, 61 insertions(+), 67 deletions(-) diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 989243a9dc7..0df08442868 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -12,7 +12,7 @@ namespace caffe { -void CaffeMallocHost(void** ptr, uint_tp size); +void CaffeMallocHost(void** ptr, int_tp size); void CaffeFreeHost(void* ptr); diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index f2defd1469b..1a013af7ee3 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -247,7 +247,8 @@ BOOST_PYTHON_MODULE(_caffe) { bp::make_function(&Net::output_blob_indices, bp::return_value_policy())) .def("_set_input_arrays", &Net_SetInputArrays, - bp::with_custodian_and_ward<1, 3, bp::with_custodian_and_ward<1, 4> >()) + bp::with_custodian_and_ward<1, 3, + bp::with_custodian_and_ward<1, 4> > ()) .def("save", &Net_Save); bp::class_, shared_ptr >, boost::noncopyable>( diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index 7bd4f411b6a..8506cc75a30 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -232,7 +232,7 @@ def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs): return all_outs, all_diffs -def _Net_set_input_arrays(self, data, labels): +def _Net_set_input_arrays(self, index, data, labels): """ Set input arrays of the in-memory MemoryDataLayer. (Note: this is only for networks declared with the memory data layer.) @@ -240,7 +240,7 @@ def _Net_set_input_arrays(self, data, labels): if labels.ndim == 1: labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis, np.newaxis]) - return self._set_input_arrays(data, labels) + return self._set_input_arrays(index, data, labels) def _Net_batch(self, blobs): diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index c8901ce84cb..5f5a7573b8f 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -38,7 +38,7 @@ bool Blob::Reshape(const vector& shape) { int_tp* shape_data = static_cast(shape_data_->mutable_cpu_data()); for (int_tp i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); - CHECK_LE(shape[i], INT_MAX / count_)<< "blob size exceeds INT_MAX"; + CHECK_LE(shape[i], LONG_MAX / count_)<< "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; shape_data[i] = shape[i]; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 1c37ddb3026..35e201357ce 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -29,7 +29,7 @@ std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT @@ -53,7 +53,7 @@ std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; diff --git a/src/caffe/greentea/cl_kernels/softmax_loss.cl b/src/caffe/greentea/cl_kernels/softmax_loss.cl index 045f9be41a0..5991eebf351 100644 --- a/src/caffe/greentea/cl_kernels/softmax_loss.cl +++ b/src/caffe/greentea/cl_kernels/softmax_loss.cl @@ -9,7 +9,8 @@ __kernel void TEMPLATE(softmax_loss_forward,Dtype)( const int has_ignore_label_, const int_tp ignore_label_, __global Dtype* counts) { - for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < n; + index += get_global_size(0)) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = (int_tp) (label[n * spatial_dim + s]); @@ -38,8 +39,8 @@ __kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads, const int_tp channels = dim / spatial_dim; - for (int_tp index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < nthreads; index += + get_global_size(0)) { { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index eaa51fbe917..783e0eeb009 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -16,7 +16,7 @@ void LossLayer::LayerSetUp( template void LossLayer::Reshape( const vector*>& bottom, const vector*>& top) { - CHECK_EQ(bottom[0]->num(), bottom[1]->num()) + CHECK_EQ(bottom[0]->shape(0), bottom[1]->shape(0)) << "The data and label should have the same number."; vector loss_shape(0); // Loss layers output a scalar; 0 axes. top[0]->Reshape(loss_shape); diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index e18a5caa900..df736e5c3c3 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -35,10 +35,10 @@ template void MergeCropLayer::Reshape(const vector*>& bottom, const vector*>& top) { // Same number of batches requires - CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + CHECK_EQ(bottom[0]->shape(0), bottom[1]->shape(0)); // All channels of both inputs are copied - int_tp channels = bottom[0]->channels() + bottom[1]->channels(); + int_tp channels = bottom[0]->shape(1) + bottom[1]->shape(1); // Spatial of the smaller input, which should be input 0 vector top_shape = bottom[0]->shape(); diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index bb24c639a42..f20879279ad 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -120,12 +120,12 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data_b = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - int_tp num = bottom[0]->num(); + int_tp num = bottom[0]->shape(0); int_tp spatial_dims = bottom[0]->shape().size() - 2; // All channels of both inputs are copied - int_tp channels_a = bottom[0]->channels(); - int_tp channels_b = bottom[1]->channels(); + int_tp channels_a = bottom[0]->shape(1); + int_tp channels_b = bottom[1]->shape(1); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -174,12 +174,12 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff_b = bottom[1]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); - int_tp num = bottom[0]->num(); + int_tp num = bottom[0]->shape(0); int_tp spatial_dims = bottom[0]->shape().size() - 2; // All channels of both inputs are copied - int_tp channels_a = bottom[0]->channels(); - int_tp channels_b = bottom[1]->channels(); + int_tp channels_a = bottom[0]->shape(1); + int_tp channels_b = bottom[1]->shape(1); // Width and height of the smaller input, which should be input 0 int_tp height_a = bottom[0]->height(); diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index 3425fa742ae..f6b06a323da 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -103,9 +103,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int_tp count = bottom[0]->count(); - int_tp num = bottom[0]->num(); - int_tp channels = bottom[0]->channels(); - int_tp spatial_dim = bottom[0]->height() * bottom[0]->width(); + int_tp channels = top[0]->shape(softmax_axis_); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -115,29 +113,29 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_max CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), - CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, + kernel_channel_max CUDA_KERNEL(CAFFE_GET_BLOCKS(outer_num_ * inner_num_), + CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, + CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp CUDA_KERNEL( - CAFFE_GET_BLOCKS(num * channels * spatial_dim), - CAFFE_CUDA_NUM_THREADS)(num * channels * spatial_dim, top_data, + CAFFE_GET_BLOCKS(count), + CAFFE_CUDA_NUM_THREADS)(count, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_sum CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), - CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, + kernel_channel_sum CUDA_KERNEL(CAFFE_GET_BLOCKS(outer_num_ * inner_num_), + CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, + CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_, scale_data, top_data); #endif } else { @@ -147,54 +145,49 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); - greentea_copy(count, (cl_mem)bottom_data, - 0, (cl_mem)top_data, 0, &ctx); + greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0, + &ctx); viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_max")); viennacl::ocl::enqueue( - oclk_channel_max(num, channels, spatial_dim, - WrapHandle((cl_mem)top_data, &ctx), - WrapHandle((cl_mem)scale_data, &ctx)), + oclk_channel_max(outer_num_, channels, inner_num_, + WrapHandle((cl_mem) top_data, &ctx), + WrapHandle((cl_mem) scale_data, &ctx)), ctx.get_queue()); - viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( - oclk_channel_subtract(count, num, channels, spatial_dim, - WrapHandle((cl_mem)scale_data, &ctx), - WrapHandle((cl_mem)top_data, &ctx)), + oclk_channel_subtract(count, outer_num_, channels, inner_num_, + WrapHandle((cl_mem) scale_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); - viennacl::ocl::kernel &oclk_exp = program.get_kernel( CL_KERNEL_SELECT("kernel_exp")); viennacl::ocl::enqueue( - oclk_exp(num * channels * spatial_dim, - WrapHandle((cl_mem)top_data, &ctx), - WrapHandle((cl_mem)top_data, &ctx)), + oclk_exp(count, + WrapHandle((cl_mem) top_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); - viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_sum")); viennacl::ocl::enqueue( - oclk_channel_sum(num, channels, spatial_dim, - WrapHandle((cl_mem)top_data, &ctx), - WrapHandle((cl_mem)scale_data, &ctx)), + oclk_channel_sum(outer_num_, channels, inner_num_, + WrapHandle((cl_mem) top_data, &ctx), + WrapHandle((cl_mem) scale_data, &ctx)), ctx.get_queue()); - viennacl::ocl::kernel &oclk_channel_div = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_div")); viennacl::ocl::enqueue( - oclk_channel_div(count, num, channels, spatial_dim, - WrapHandle((cl_mem)scale_data, &ctx), - WrapHandle((cl_mem)top_data, &ctx)), + oclk_channel_div(count, outer_num_, channels, inner_num_, + WrapHandle((cl_mem) scale_data, &ctx), + WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); - #endif } } @@ -203,14 +196,12 @@ template void SoftmaxLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - int_tp count = top[0]->count(); - int_tp num = top[0]->num(); - int_tp channels = top[0]->channels(); - int_tp spatial_dim = top[0]->height() * top[0]->width(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); + int_tp count = top[0]->count(); + int_tp channels = top[0]->shape(softmax_axis_); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -218,12 +209,12 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, // Compute inner1d(top_diff, top_data) and // subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_dot CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), - CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_diff, top_data, + kernel_channel_dot CUDA_KERNEL(CAFFE_GET_BLOCKS(outer_num_ * inner_num_), + CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract CUDA_KERNEL(CAFFE_GET_BLOCKS(count), - CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, + CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); @@ -242,7 +233,7 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_dot")); viennacl::ocl::enqueue( - oclk_channel_dot(num, channels, spatial_dim, + oclk_channel_dot(outer_num_, channels, inner_num_, WrapHandle((cl_mem)top_diff, &ctx), WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)scale_data, &ctx)), @@ -251,7 +242,7 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( - oclk_channel_subtract(count, num, channels, spatial_dim, + oclk_channel_subtract(count, outer_num_, channels, inner_num_, WrapHandle((cl_mem)scale_data, &ctx), WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 3b1a5108645..0e50bead33a 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -48,7 +48,7 @@ void SoftmaxWithLossLayer::Forward_gpu( #ifdef USE_CUDA const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); - const int_tp num = prob_.num(); + const int_tp num = prob_.shape(0); const int_tp dim = prob_.count() / num; const int_tp spatial_dim = prob_.height() * prob_.width(); const int_tp nthreads = num * spatial_dim; @@ -90,7 +90,7 @@ void SoftmaxWithLossLayer::Forward_gpu( cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); - const int_tp num = prob_.num(); + const int_tp num = prob_.shape(0); const int_tp dim = prob_.count() / num; const int_tp spatial_dim = prob_.height() * prob_.width(); const int_tp nthreads = num * spatial_dim; @@ -176,7 +176,7 @@ void SoftmaxWithLossLayer::Backward_gpu( const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); - const int_tp num = prob_.num(); + const int_tp num = prob_.shape(0); const int_tp dim = prob_.count() / num; const int_tp spatial_dim = prob_.height() * prob_.width(); const int_tp nthreads = num * spatial_dim; @@ -219,7 +219,7 @@ void SoftmaxWithLossLayer::Backward_gpu( greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); - const int_tp num = prob_.num(); + const int_tp num = prob_.shape(0); const int_tp dim = prob_.count() / num; const int_tp spatial_dim = prob_.height() * prob_.width(); const int_tp nthreads = num * spatial_dim; diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 8a35eeedf12..1eed707e858 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -18,7 +18,7 @@ namespace caffe { // but might be more significant for parallel training. Most importantly, // it improved stability for large models on many GPUs. -void CaffeMallocHost(void** ptr, uint_tp size) { +void CaffeMallocHost(void** ptr, int_tp size) { #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { @@ -30,7 +30,8 @@ void CaffeMallocHost(void** ptr, uint_tp size) { // Make sure the memory is zero-copy usable in OpenCL CHECK_EQ(0, posix_memalign(ptr, OPENCL_PAGE_ALIGN, ((size - 1)/OPENCL_CACHE_ALIGN + 1) * OPENCL_CACHE_ALIGN)) - << "Host memory allocation error"; + << "Host memory allocation error of size: " + << size << " B"; return; } } From 01139b05ef20b02239740729c65216b9a01f7fa0 Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 26 Oct 2015 01:01:20 +0100 Subject: [PATCH 200/254] Fix for Malis, Softmax. --- include/caffe/solver.hpp | 2 +- python/caffe/_caffe.cpp | 6 ++- src/caffe/layers/malis_loss_layer.cpp | 63 +++++++++++++++++++------- src/caffe/layers/mergecrop_layer.cu | 7 --- src/caffe/layers/pooling_layer.cu | 48 ++++++++++---------- src/caffe/layers/softmax_layer.cu | 17 ++++--- src/caffe/layers/softmax_loss_layer.cu | 59 ++++++++++-------------- src/caffe/solver.cpp | 3 +- 8 files changed, 110 insertions(+), 95 deletions(-) diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index bdc95eb9b7e..d596cd5a9fb 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -59,7 +59,7 @@ class Solver { inline void Solve(const string resume_file) { Solve(resume_file.c_str()); } - void Step(int_tp iters); + Dtype Step(int_tp iters); // The Restore method simply dispatches to one of the // RestoreSolverStateFrom___ protected methods. You should implement these // methods to restore the state from the appropriate snapshot type. diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 1a013af7ee3..3b4630345e5 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -288,9 +288,11 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("test_nets", bp::make_function(&Solver::test_nets, bp::return_internal_reference<>())) .add_property("iter", &Solver::iter) - .def("solve", static_cast::*)(const char*)>( + .def("step", + static_cast::*)(const int_tp)>(&Solver::Step)) + .def("solve", + static_cast::*)(const char*)>( &Solver::Solve), SolveOverloads()) - .def("step", &Solver::Step) .def("restore", &Solver::Restore); bp::class_, bp::bases >, diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 2149c61bab7..4e070f2105b 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -54,22 +54,27 @@ void MalisLossLayer::Malis(const Dtype* conn_data, Dtype *classerr_out, Dtype *rand_index_out, Dtype margin, Dtype threshold) { if ((nhood_dims[1] != (conn_num_dims - 1)) - || (nhood_dims[0] != conn_dims[conn_num_dims - 1])) { - LOG(FATAL) << "nhood and conn dimensions don't match"; + || (nhood_dims[0] != conn_dims[0])) { + LOG(FATAL) << "nhood and conn dimensions don't match" + << " (" << nhood_dims[1] << " vs. " << (conn_num_dims - 1) + << " and " << nhood_dims[0] << " vs. " + << conn_dims[conn_num_dims - 1] <<")"; } /* Cache for speed to access neighbors */ // nVert stores (x * y * z) int64_t nVert = 1; - for (int64_t i = 0; i < conn_num_dims - 1; ++i) { - nVert = nVert * conn_dims[i]; + for (int64_t i = 1; i < conn_num_dims; ++i) { + nVert *= conn_dims[i]; + // std::cout << i << " nVert: " << nVert << std::endl; } // prodDims stores x, x*y, x*y*z offsets std::vector prodDims(conn_num_dims - 1); prodDims[0] = 1; for (int64_t i = 1; i < conn_num_dims - 1; ++i) { - prodDims[i] = prodDims[i - 1] * conn_dims[i - 1]; + prodDims[i] = prodDims[i - 1] * conn_dims[i]; + // std::cout << i << " prodDims: " << prodDims[i] << std::endl; } /* convert n-d offset vectors into linear array offset scalars */ @@ -80,6 +85,7 @@ void MalisLossLayer::Malis(const Dtype* conn_data, for (int64_t j = 0; j < nhood_dims[1]; ++j) { nHood[i] += (int32_t) nhood_data[j + i * nhood_dims[1]] * prodDims[j]; } + // std::cout << i << " nHood: " << nHood[i] << std::endl; } /* Disjoint sets and sparse overlap vectors */ @@ -109,12 +115,8 @@ void MalisLossLayer::Malis(const Dtype* conn_data, nPairNorm = nPairNeg; } - /* Sort all the edges in increasing order of weight */ - std::vector pqueue( - conn_dims[3] * std::max((conn_dims[0] - 1), 1L) - * std::max((conn_dims[1] - 1), 1L) - * std::max((conn_dims[2] - 1), 1L)); - int64_t j = 0; + + int64_t edgeCount = 0; // Loop over #edges for (int64_t d = 0, i = 0; d < conn_dims[0]; ++d) { // Loop over Z @@ -124,12 +126,37 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // Loop over X for (int64_t x = 0; x < conn_dims[3]; ++x, ++i) { // Out-of-bounds check: - if (!((z + nhood_data[d * 3 + 0] < 0) - ||(z + nhood_data[d * 3 + 0] >= conn_dims[1]) - ||(y + nhood_data[d * 3 + 1] < 0) - ||(y + nhood_data[d * 3 + 1] >= conn_dims[2]) - ||(x + nhood_data[d * 3 + 2] < 0) - ||(x + nhood_data[d * 3 + 2] >= conn_dims[3]))) { + if (!((z + nhood_data[d * conn_dims[0] + 0] < 0) + ||(z + nhood_data[d * conn_dims[0] + 0] >= conn_dims[1]) + ||(y + nhood_data[d * conn_dims[0] + 1] < 0) + ||(y + nhood_data[d * conn_dims[0] + 1] >= conn_dims[2]) + ||(x + nhood_data[d * conn_dims[0] + 2] < 0) + ||(x + nhood_data[d * conn_dims[0] + 2] >= conn_dims[3]))) { + ++edgeCount; + } + } + } + } + } + + /* Sort all the edges in increasing order of weight */ + std::vector pqueue(edgeCount); + int64_t j = 0; + // Loop over #edges + for (int64_t d = 0, i = 0; d < conn_dims[0]; ++d) { + // Loop over Z + for (int64_t x = 0; x < conn_dims[3]; ++x) { + // Loop over Y + for (int64_t y = 0; y < conn_dims[2]; ++y) { + // Loop over X + for (int64_t z = 0; z < conn_dims[1]; ++z, ++i) { + // Out-of-bounds check: + if (!((z + nhood_data[d * conn_dims[0] + 0] < 0) + ||(z + nhood_data[d * conn_dims[0] + 0] >= conn_dims[1]) + ||(y + nhood_data[d * conn_dims[0] + 1] < 0) + ||(y + nhood_data[d * conn_dims[0] + 1] >= conn_dims[2]) + ||(x + nhood_data[d * conn_dims[0] + 2] < 0) + ||(x + nhood_data[d * conn_dims[0] + 2] >= conn_dims[3]))) { pqueue[j++] = i; } } @@ -422,6 +449,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &classerr_out, &rand_index_out, 0.3, 0.5); loss += loss_out; + std::cout << loss << std::endl; Malis(&affinity_data_pos[batch_offset * batch], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], @@ -430,6 +458,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &classerr_out, &rand_index_out, 0.3, 0.5); loss += loss_out; + std::cout << loss << std::endl; } // Normalized loss over batch size diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index f20879279ad..e963b4a5b31 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -181,13 +181,6 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, int_tp channels_a = bottom[0]->shape(1); int_tp channels_b = bottom[1]->shape(1); - // Width and height of the smaller input, which should be input 0 - int_tp height_a = bottom[0]->height(); - int_tp width_a = bottom[0]->width(); - - int_tp height_b = bottom[1]->height(); - int_tp width_b = bottom[1]->width(); - if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CopyBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index fc9ba803107..94788b7bb7e 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -749,7 +749,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -760,7 +760,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -774,7 +774,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -783,7 +783,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, top_data); @@ -806,7 +806,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); @@ -815,7 +815,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; @@ -827,7 +827,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); @@ -835,7 +835,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, bottom_data, bottom[0]->num(), channels_, + count, bottom_data, bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } @@ -909,7 +909,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, height_, width_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -927,7 +927,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_ave_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -947,7 +947,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -960,7 +960,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -987,7 +987,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_max_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, height_, width_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, WrapHandle((cl_mem) top_data, &ctx), @@ -1003,7 +1003,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_ave_pool_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), - bottom[0]->num(), channels_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, WrapHandle((cl_mem)top_data, &ctx)), @@ -1022,7 +1022,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, @@ -1035,7 +1035,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_sto_pool_forward(count, WrapHandle((cl_mem)bottom_data, &ctx), - bottom[0]->num(), channels_, + bottom[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, WrapHandle((cl_mem)top_data, &ctx)), @@ -1129,7 +1129,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, top_diff, mask, top_mask, top[0]->num(), channels_, + count, top_diff, mask, top_mask, top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -1152,7 +1152,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, top_diff, mask, top_mask, top[0]->num(), channels_, + count, top_diff, mask, top_mask, top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); @@ -1161,7 +1161,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( - count, top_diff, top[0]->num(), channels_, + count, top_diff, top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; @@ -1170,7 +1170,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, StoPoolBackward CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, rand_idx_.gpu_data(), top_diff, - top[0]->num(), channels_, height_, width_, pooled_height_, + top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; @@ -1247,7 +1247,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, mask == NULL ? 0 : 1, WrapHandle((cl_mem) mask, &ctx), WrapHandle((cl_mem) top_mask, &ctx), - top[0]->num(), channels_, height_, width_, + top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, ext_kernel_h, ext_kernel_w, stride_h_, stride_w_, kstride_h_, kstride_w_, @@ -1277,7 +1277,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, mask == NULL ? 0 : 1, WrapHandle((cl_mem) mask, &ctx), WrapHandle((cl_mem) top_mask, &ctx), - top[0]->num(), channels_, height_, width_, + top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, @@ -1292,7 +1292,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, viennacl::ocl::enqueue( oclk_ave_pool_backward(count, WrapHandle((cl_mem) top_diff, &ctx), - top[0]->num(), channels_, height_, width_, + top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, @@ -1307,7 +1307,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, viennacl::ocl::enqueue( oclk_sto_pool_backward( count, WrapHandle((cl_mem) (rand_idx_.gpu_data()), &ctx), - WrapHandle((cl_mem) top_diff, &ctx), top[0]->num(), + WrapHandle((cl_mem) top_diff, &ctx), top[0]->shape(0), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index f6b06a323da..f0037dd73a7 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -113,7 +113,8 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_max CUDA_KERNEL(CAFFE_GET_BLOCKS(outer_num_ * inner_num_), + kernel_channel_max CUDA_KERNEL( + CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_data, scale_data); // subtract @@ -129,9 +130,10 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_sum CUDA_KERNEL(CAFFE_GET_BLOCKS(outer_num_ * inner_num_), - CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_data, - scale_data); + kernel_channel_sum CUDA_KERNEL( + CAFFE_GET_BLOCKS(outer_num_ * inner_num_), + CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, + inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div CUDA_KERNEL(CAFFE_GET_BLOCKS(count), @@ -209,9 +211,10 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, // Compute inner1d(top_diff, top_data) and // subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) - kernel_channel_dot CUDA_KERNEL(CAFFE_GET_BLOCKS(outer_num_ * inner_num_), - CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, top_diff, top_data, - scale_data); + kernel_channel_dot CUDA_KERNEL( + CAFFE_GET_BLOCKS(outer_num_ * inner_num_), + CAFFE_CUDA_NUM_THREADS)(outer_num_, channels, inner_num_, + top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(count, outer_num_, channels, inner_num_, diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 0e50bead33a..526958b4e16 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -48,10 +48,8 @@ void SoftmaxWithLossLayer::Forward_gpu( #ifdef USE_CUDA const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); - const int_tp num = prob_.shape(0); - const int_tp dim = prob_.count() / num; - const int_tp spatial_dim = prob_.height() * prob_.width(); - const int_tp nthreads = num * spatial_dim; + const int dim = prob_.count() / outer_num_; + const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. @@ -62,7 +60,7 @@ void SoftmaxWithLossLayer::Forward_gpu( // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)(nthreads, prob_data, label, loss_data, - num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts); + outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (normalize_) { @@ -74,7 +72,7 @@ void SoftmaxWithLossLayer::Forward_gpu( loss /= count; } } else { - loss /= num; + loss /= outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { @@ -90,10 +88,8 @@ void SoftmaxWithLossLayer::Forward_gpu( cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); - const int_tp num = prob_.shape(0); - const int_tp dim = prob_.count() / num; - const int_tp spatial_dim = prob_.height() * prob_.width(); - const int_tp nthreads = num * spatial_dim; + const int_tp dim = prob_.count() / outer_num_; + const int_tp nthreads = outer_num_ * inner_num_; cl_mem loss_data = (cl_mem) (bottom[0]->mutable_gpu_diff()); cl_mem counts = (cl_mem) (prob_.mutable_gpu_diff()); @@ -102,26 +98,24 @@ void SoftmaxWithLossLayer::Forward_gpu( viennacl::ocl::enqueue( oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, &ctx), WrapHandle(label, &ctx), - WrapHandle(loss_data, &ctx), num, dim, - spatial_dim, has_ignore_label_ ? 1 : 0, + WrapHandle(loss_data, &ctx), outer_num_, dim, + inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype loss; - greentea_gpu_asum(this->device_->id(), nthreads, loss_data, 0, - &loss); + greentea_gpu_asum(this->device_->id(), nthreads, loss_data, 0, &loss); if (normalize_) { Dtype count; - greentea_gpu_asum(this->device_->id(), nthreads, counts, 0, - &count); + greentea_gpu_asum(this->device_->id(), nthreads, counts, 0, &count); if (count == 0) { loss = 0; } else { loss /= count; } } else { - loss /= num; + loss /= outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { @@ -176,28 +170,23 @@ void SoftmaxWithLossLayer::Backward_gpu( const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); - const int_tp num = prob_.shape(0); - const int_tp dim = prob_.count() / num; - const int_tp spatial_dim = prob_.height() * prob_.width(); - const int_tp nthreads = num * spatial_dim; + const int_tp dim = prob_.count() / outer_num_; + const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) - SoftmaxLossBackwardGPUCUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), - CAFFE_CUDA_NUM_THREADS)(nthreads, top_data, label, bottom_diff, - num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts); + SoftmaxLossBackwardGPU CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), + CAFFE_CUDA_NUM_THREADS) (nthreads, top_data, label, bottom_diff, + outer_num_, dim, inner_num_, has_ignore_label_, + ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); - if (count == 0) { - caffe_gpu_set(prob_.count(), 0.0, bottom_diff); - } else { - caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); - } + caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { - caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); + caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } if (bottom.size() == 3) { // TODO: Correct this for easy diff scaling @@ -219,10 +208,8 @@ void SoftmaxWithLossLayer::Backward_gpu( greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); - const int_tp num = prob_.shape(0); - const int_tp dim = prob_.count() / num; - const int_tp spatial_dim = prob_.height() * prob_.width(); - const int_tp nthreads = num * spatial_dim; + const int_tp dim = prob_.count() / outer_num_; + const int_tp nthreads = outer_num_ * inner_num_; cl_mem counts = (cl_mem)(prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( @@ -230,7 +217,7 @@ void SoftmaxWithLossLayer::Backward_gpu( viennacl::ocl::enqueue( oclk_softmax_loss_backward(nthreads, WrapHandle(top_data, &ctx), WrapHandle(label, &ctx), WrapHandle(bottom_diff, &ctx), - num, dim, spatial_dim, has_ignore_label_ ? 1 : 0, + outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); @@ -248,7 +235,7 @@ void SoftmaxWithLossLayer::Backward_gpu( } } else { greentea_gpu_scal(this->device_->id(), - prob_.count(), loss_weight / num, bottom_diff, 0); + prob_.count(), loss_weight / outer_num_, bottom_diff, 0); } if (bottom.size() == 3) { // TODO: Correct this for easy diff scaling diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 31fe73bd5c3..9f6dd2029b7 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -200,7 +200,7 @@ void Solver::InitTestNets() { } template -void Solver::Step(int_tp iters) { +Dtype Solver::Step(int_tp iters) { vector*> bottom_vec; const int_tp start_iter = iter_; const int_tp stop_iter = iter_ + iters; @@ -289,6 +289,7 @@ void Solver::Step(int_tp iters) { break; } } + return smoothed_loss; } template From 079733101eb1f6cee838104a3a67f4a26da0a1fd Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 27 Oct 2015 03:20:20 +0100 Subject: [PATCH 201/254] ViennaCL fix. --- include/caffe/loss_layers.hpp | 4 + .../greentea/greentea_math_functions.cpp | 94 +++++++++---------- src/caffe/layers/euclidean_loss_layer.cpp | 36 +++---- src/caffe/layers/euclidean_loss_layer.cu | 28 ++++-- 4 files changed, 90 insertions(+), 72 deletions(-) diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index a5be29400c4..eb9e1776e20 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -259,6 +259,10 @@ class EuclideanLossLayer : public LossLayer { return true; } + virtual inline int_tp ExactNumBottomBlobs() const { return -1; } + virtual inline int_tp MinBottomBlobs() const { return 2; } + virtual inline int_tp MaxBottomBlobs() const { return 3; } + protected: /// @copydoc EuclideanLossLayer virtual void Forward_cpu(const vector*>& bottom, diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 5f1167aac0a..dea089f0e6e 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -207,37 +207,37 @@ void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, #ifndef USE_CLBLAS - typedef typename viennacl::matrix_base::uint_tpype - uint_tpype; - typedef typename viennacl::matrix_base::uint_tpype + typedef typename viennacl::matrix_base::size_type + size_type; + typedef typename viennacl::matrix_base::size_type difference_type; - uint_tpype A_size1 = static_cast( + size_type A_size1 = static_cast( (TransA == CblasTrans) ? K : M); - uint_tpype A_size2 = static_cast( + size_type A_size2 = static_cast( (TransA == CblasTrans) ? M : K); - uint_tpype B_size1 = static_cast( + size_type B_size1 = static_cast( (TransB == CblasTrans) ? N : K); - uint_tpype B_size2 = static_cast( + size_type B_size2 = static_cast( (TransB == CblasTrans) ? K : N); - viennacl::matrix_base matA(A, ctx, A_size1, uint_tpype(0), - difference_type(1), uint_tpype(M), - A_size2, uint_tpype(offA), - difference_type(1), uint_tpype(lda) + viennacl::matrix_base matA(A, ctx, A_size1, size_type(0), + difference_type(1), size_type(M), + A_size2, size_type(offA), + difference_type(1), size_type(lda) VCL_ROW_MAJOR); - viennacl::matrix_base matB(B, ctx, B_size1, uint_tpype(0), - difference_type(1), uint_tpype(K), - B_size2, uint_tpype(offB), - difference_type(1), uint_tpype(ldb) + viennacl::matrix_base matB(B, ctx, B_size1, size_type(0), + difference_type(1), size_type(K), + B_size2, size_type(offB), + difference_type(1), size_type(ldb) VCL_ROW_MAJOR); - viennacl::matrix_base matC(C, ctx, uint_tpype(M), uint_tpype(0), - difference_type(1), uint_tpype(M), - uint_tpype(N), uint_tpype(offC), - difference_type(1), uint_tpype(ldc) + viennacl::matrix_base matC(C, ctx, size_type(M), size_type(0), + difference_type(1), size_type(M), + size_type(N), size_type(offC), + difference_type(1), size_type(ldc) VCL_ROW_MAJOR); if (TransA == CblasTrans && TransB == CblasTrans) @@ -327,19 +327,19 @@ void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::uint_tpype uint_tpype; - typedef typename viennacl::vector_base::uint_tpype difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; viennacl::vector_base v1(x, - uint_tpype((TransA == CblasTrans) ? M : N), - uint_tpype(offx), difference_type(1), ctx); + size_type((TransA == CblasTrans) ? M : N), + size_type(offx), difference_type(1), ctx); viennacl::vector_base v2(y, - uint_tpype((TransA == CblasTrans) ? N : M), - uint_tpype(offy), difference_type(1), ctx); - viennacl::matrix_base mat(A, ctx, uint_tpype(M), uint_tpype(0), - difference_type(1), uint_tpype(M), - uint_tpype(N), uint_tpype(offA), - difference_type(1), uint_tpype(N) + size_type((TransA == CblasTrans) ? N : M), + size_type(offy), difference_type(1), ctx); + viennacl::matrix_base mat(A, ctx, size_type(M), size_type(0), + difference_type(1), size_type(M), + size_type(N), size_type(offA), + difference_type(1), size_type(N) VCL_ROW_MAJOR); v2 *= beta; if (TransA == CblasTrans) @@ -406,12 +406,12 @@ void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::uint_tpype uint_tpype; - typedef typename viennacl::vector_base::uint_tpype difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, uint_tpype(N), uint_tpype(offX), + viennacl::vector_base v1(X, size_type(N), size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, uint_tpype(N), uint_tpype(offY), + viennacl::vector_base v2(Y, size_type(N), size_type(offY), difference_type(1), ctx); v2 += alpha * v1; @@ -504,10 +504,10 @@ void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::uint_tpype uint_tpype; - typedef typename viennacl::vector_base::uint_tpype difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(x, uint_tpype(N), uint_tpype(offx), + viennacl::vector_base v1(x, size_type(N), size_type(offx), difference_type(1), ctx); v1 *= alpha; @@ -574,12 +574,12 @@ void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, const cl_mem X, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::uint_tpype uint_tpype; - typedef typename viennacl::vector_base::uint_tpype difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, uint_tpype(n), uint_tpype(offX), + viennacl::vector_base v1(X, size_type(n), size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, uint_tpype(n), uint_tpype(offY), + viennacl::vector_base v2(Y, size_type(n), size_type(offY), difference_type(1), ctx); *out = viennacl::linalg::inner_prod(v1, v2); @@ -638,10 +638,10 @@ void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, const cl_mem X, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::uint_tpype uint_tpype; - typedef typename viennacl::vector_base::uint_tpype difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, uint_tpype(n), uint_tpype(offX), + viennacl::vector_base v1(X, size_type(n), size_type(offX), difference_type(1), ctx); *Y = viennacl::linalg::norm_1(v1); @@ -703,12 +703,12 @@ void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base::uint_tpype uint_tpype; - typedef typename viennacl::vector_base::uint_tpype difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, uint_tpype(n), uint_tpype(offX), + viennacl::vector_base v1(X, size_type(n), size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, uint_tpype(n), uint_tpype(offY), + viennacl::vector_base v2(Y, size_type(n), size_type(offY), difference_type(1), ctx); v2 = v1 * alpha; diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index fdee8daf32a..6c83b1a54f9 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -5,38 +5,40 @@ namespace caffe { -template -void EuclideanLossLayer::Reshape( - const vector*>& bottom, const vector*>& top) { +template +void EuclideanLossLayer::Reshape(const vector*>& bottom, + const vector*>& top) { LossLayer::Reshape(bottom, top); CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) << "Inputs must have the same dimension."; diff_.ReshapeLike(*bottom[0]); } -template +template void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { int_tp count = bottom[0]->count(); - caffe_sub( - count, - bottom[0]->cpu_data(), - bottom[1]->cpu_data(), - diff_.mutable_cpu_data()); + caffe_sub(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), + diff_.mutable_cpu_data()); + // Scale the error element-wise + if (bottom.size() == 3) { + caffe_mul(count, diff_.mutable_cpu_data(), bottom[2]->gpu_data(), + diff_.mutable_gpu_data()); + } Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); - Dtype loss = dot / bottom[0]->num() / Dtype(2); + Dtype loss = dot / bottom[0]->shape(0) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } -template -void EuclideanLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { +template +void EuclideanLossLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); - caffe_cpu_axpby( - bottom[i]->count(), // count + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); + caffe_cpu_axpby(bottom[i]->count(), // count alpha, // alpha diff_.cpu_data(), // a Dtype(0), // beta diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index 26140c98ca1..c9b8839396b 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -17,11 +17,16 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), - diff_.mutable_gpu_data()); + caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), + diff_.mutable_gpu_data()); + // Scale the error element-wise + if (bottom.size() == 3) { + caffe_gpu_mul(count, diff_.mutable_gpu_data(), + bottom[2]->gpu_data(), diff_.mutable_gpu_data()); + } Dtype dot; - caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); - Dtype loss = dot / bottom[0]->num() / Dtype(2); + caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); + Dtype loss = dot / bottom[0]->shape(0) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; #endif // USE_CUDA } else { @@ -30,11 +35,18 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (bottom[1]->gpu_data()), 0, (cl_mem) (diff_.mutable_gpu_data()), 0); + // Scale the error element-wise + if (bottom.size() == 3) { + greentea_gpu_mul(this->device_->id(), count, + (cl_mem) (diff_.mutable_gpu_data()), 0, + (cl_mem) (bottom[2]->gpu_data()), 0, + (cl_mem) (diff_.mutable_gpu_data()), 0); + } Dtype dot; greentea_gpu_dot(this->device_->id(), count, (cl_mem) (diff_.gpu_data()), 0, (cl_mem) (diff_.gpu_data()), 0, &dot); - Dtype loss = dot / bottom[0]->num() / Dtype(2); + Dtype loss = dot / bottom[0]->shape(0) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; #endif // USE_GREENTEA } @@ -47,7 +59,7 @@ void EuclideanLossLayer::Backward_gpu( for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_axpby(bottom[i]->count(), // count @@ -58,8 +70,8 @@ void EuclideanLossLayer::Backward_gpu( #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_axpby(this->device_->id(), bottom[i]->count(), - alpha, (cl_mem) (diff_.gpu_data()), 0, Dtype(0), + greentea_gpu_axpby(this->device_->id(), bottom[i]->count(), alpha, + (cl_mem) (diff_.gpu_data()), 0, Dtype(0), (cl_mem) (bottom[i]->mutable_gpu_diff()), 0); #endif // USE_GREENTEA } From 165d231f82e30b1dc42f4ef4bebb984e89b7e79f Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 31 Oct 2015 20:52:55 +0100 Subject: [PATCH 202/254] Removed duplicate function. --- python/caffe/_caffe.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 8a998c7320d..1522981ec6f 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -293,7 +293,6 @@ BOOST_PYTHON_MODULE(_caffe) { .def("solve", static_cast::*)(const char*)>( &Solver::Solve), SolveOverloads()) - .def("step", &Solver::Step) .def("restore", &Solver::Restore) .def("snapshot", &Solver::Snapshot); From f33e280a5396c0818b1acc4cfe4180a7595b6675 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 31 Oct 2015 22:09:00 +0100 Subject: [PATCH 203/254] CL pooling SK kernel fix. --- src/caffe/greentea/cl_kernels.cpp | 4 ++-- src/caffe/greentea/cl_kernels/pooling_sk.cl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 35e201357ce..8e2aa225569 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -27,7 +27,7 @@ std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#e std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT @@ -51,7 +51,7 @@ std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n# std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int_tp use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index 288e6ba5fa6..20866d0272e 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -21,7 +21,7 @@ __global Dtype* bottom_data, const int_tp pad_h, const int_tp pad_w, __global Dtype* top_data, - const int_tp use_mask, + const int use_mask, __global int_tp* mask, __global Dtype* top_mask) { for (int_tp index = get_global_id(0); index < nthreads; @@ -58,7 +58,7 @@ __global Dtype* bottom_data, } __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( - const int_tp nthreads, __global const Dtype* top_diff, const int_tp use_mask, + const int_tp nthreads, __global const Dtype* top_diff, const int use_mask, __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, From 23d6ffd7c5d6e9d4a50c0bf7bd66252af4cb9254 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 5 Nov 2015 00:52:44 +0100 Subject: [PATCH 204/254] ND pooling fix. --- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/pooling_nd.cl | 23 ++- src/caffe/layers/pooling_layer.cpp | 2 + src/caffe/layers/pooling_layer.cu | 28 ++- src/caffe/test/test_pooling_nd_layer.cpp | 85 +++++---- src/caffe/test/test_pooling_ndsk_layer.cpp | 200 ++++++++++++++++++++ 6 files changed, 285 insertions(+), 57 deletions(-) create mode 100644 src/caffe/test/test_pooling_ndsk_layer.cpp diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 8e2aa225569..49a8632fe01 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -26,7 +26,7 @@ std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#en std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT @@ -50,7 +50,7 @@ std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#e std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = index % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] = (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %\n kstride[i] : d_idx[i];\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/pooling_nd.cl b/src/caffe/greentea/cl_kernels/pooling_nd.cl index bf49f2d42eb..69153e0f67b 100644 --- a/src/caffe/greentea/cl_kernels/pooling_nd.cl +++ b/src/caffe/greentea/cl_kernels/pooling_nd.cl @@ -26,7 +26,7 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n, int_tp offset = 1; int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { - d_idx[i] = index % pooled_size[i]; + d_idx[i] = num % pooled_size[i]; d_start[i] = d_idx[i] * stride[i] - pad[i]; d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); d_start[i] = max(d_start[i], 0L); @@ -116,11 +116,22 @@ __kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n, int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = num % size[i]; - d_start[i] = (d_idx[i] < ext_kernel_size[i]) ? - d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; - d_end[i] = (d_idx[i] >= pooled_size[i]) ? - (pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) % - kstride[i] : d_idx[i]; + if (kstride[i] > 1) { + d_start[i] = + (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = + (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) + - (pooled_size[i] - 1 - d_start[i]) % kstride[i] : + d_idx[i]; + } else { + d_start[i] = + (d_idx[i] + pad[i] < kernel_size[i]) ? + 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1; + d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1), + (int_tpc) (pooled_size[i])); + } num /= size[i]; offset *= pooled_size[i]; d_iter[i] = d_start[i]; diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 6de258c7d0d..e45aeceea23 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -165,6 +165,8 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, break; } } + + Reshape(bottom, top); } diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 94788b7bb7e..fea2452f15c 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -558,7 +558,7 @@ __global__ void MaxPoolNDForward(const int_tp n, const int_tp num_axes, int_tp offset = 1; int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { - d_idx[i] = index % pooled_size[i]; + d_idx[i] = num % pooled_size[i]; d_start[i] = d_idx[i] * stride[i] - pad[i]; d_end[i] = min((int_tpc) (d_start[i] + ext_kernel_size[i]), (int_tpc) (size[i])); @@ -642,14 +642,22 @@ __global__ void MaxPoolNDBackward(const int_tp n, const int_tp num_axes, int_tp num = index; for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = num % size[i]; - d_start[i] = - (d_idx[i] < ext_kernel_size[i]) ? - d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; - d_end[i] = - (d_idx[i] >= pooled_size[i]) ? - (pooled_size[i] - 1) - - (pooled_size[i] - 1 - d_start[i]) % kstride[i] : - d_idx[i]; + if (kstride[i] > 1) { + d_start[i] = + (d_idx[i] < ext_kernel_size[i]) ? + d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1; + d_end[i] = + (d_idx[i] >= pooled_size[i]) ? + (pooled_size[i] - 1) + - (pooled_size[i] - 1 - d_start[i]) % kstride[i] : + d_idx[i]; + } else { + d_start[i] = + (d_idx[i] + pad[i] < kernel_size[i]) ? + 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1; + d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1), + (int_tpc) (pooled_size[i])); + } num /= size[i]; offset *= pooled_size[i]; d_iter[i] = d_start[i]; @@ -659,6 +667,7 @@ __global__ void MaxPoolNDBackward(const int_tp n, const int_tp num_axes, return; } } + int_tp chan = num % channels; num /= channels; offset *= (num * channels + chan); @@ -679,7 +688,6 @@ __global__ void MaxPoolNDBackward(const int_tp n, const int_tp num_axes, size_prod *= size[i]; pooled_size_prod *= pooled_size[i]; } - if (mask) { if (mask[final_offset] == im_offset) { gradient += top_diff[final_offset]; diff --git a/src/caffe/test/test_pooling_nd_layer.cpp b/src/caffe/test/test_pooling_nd_layer.cpp index 931963bf0d9..7fafc0da673 100644 --- a/src/caffe/test/test_pooling_nd_layer.cpp +++ b/src/caffe/test/test_pooling_nd_layer.cpp @@ -27,16 +27,16 @@ class PoolingNDLayerTest : public GPUDeviceTest { BlobShape shape; shape.add_dim(1); // Batch shape.add_dim(1); // Channels - shape.add_dim(5); // Depth - shape.add_dim(5); // Height - shape.add_dim(5); // Width + shape.add_dim(4); // Depth + shape.add_dim(4); // Height + shape.add_dim(4); // Width blob_bottom_->Reshape(shape); shape.add_dim(1); // Batch shape.add_dim(1); // Channels - shape.add_dim(1); // Depth - shape.add_dim(1); // Height - shape.add_dim(1); // Width + shape.add_dim(2); // Depth + shape.add_dim(2); // Height + shape.add_dim(2); // Width blob_top_->Reshape(shape); // fill the values @@ -54,13 +54,13 @@ class PoolingNDLayerTest : public GPUDeviceTest { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->add_kernel_size(3); - pooling_param->add_kernel_size(3); - pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(2); + pooling_param->add_kernel_size(2); + pooling_param->add_kernel_size(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); + pooling_param->add_stride(2); + pooling_param->add_stride(2); + pooling_param->add_stride(2); pooling_param->set_axis(1); @@ -73,16 +73,16 @@ class PoolingNDLayerTest : public GPUDeviceTest { TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); - TypeParam maxval = 0; + std::vector maxval(8); for (int_tp cd = 0; cd < d; ++cd) { for (int_tp ch = 0; ch < h; ++ch) { for (int_tp cw = 0; cw < w; ++cw) { bottom_data[cw + ch * w + cd * w * h] = cw + ch * w + cd * w * h; - if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { - maxval = std::max((TypeParam)(cw + ch * w + cd * w * h), maxval); - } + maxval[cw/2 + (ch/2)*2 + (cd/2)*4] = + std::max(bottom_data[cw + ch * w + cd * w * h], + maxval[cw/2 + (ch/2)*2 + (cd/2)*4]); } } } @@ -91,7 +91,10 @@ class PoolingNDLayerTest : public GPUDeviceTest { const TypeParam *top_data = blob_top_->cpu_data(); - EXPECT_EQ(maxval, top_data[0]); + for (int i = 0; i < 2*2*2; ++i) { + EXPECT_EQ(maxval[i], top_data[i]); + } + } void TestBackward() { @@ -99,13 +102,13 @@ class PoolingNDLayerTest : public GPUDeviceTest { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->add_kernel_size(3); - pooling_param->add_kernel_size(3); - pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(2); + pooling_param->add_kernel_size(2); + pooling_param->add_kernel_size(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); + pooling_param->add_stride(2); + pooling_param->add_stride(2); + pooling_param->add_stride(2); pooling_param->set_axis(1); @@ -118,16 +121,16 @@ class PoolingNDLayerTest : public GPUDeviceTest { TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); - TypeParam maxval = 0; + std::vector maxval(8); for (int_tp cd = 0; cd < d; ++cd) { for (int_tp ch = 0; ch < h; ++ch) { for (int_tp cw = 0; cw < w; ++cw) { bottom_data[cw + ch * w + cd * w * h] = cw + ch * w + cd * w * h; - if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { - maxval = std::max((TypeParam)(cw + ch * w + cd * w * h), maxval); - } + maxval[cw/2 + (ch/2)*2 + (cd/2)*4] = + std::max(bottom_data[cw + ch * w + cd * w * h], + maxval[cw/2 + (ch/2)*2 + (cd/2)*4]); } } } @@ -135,7 +138,9 @@ class PoolingNDLayerTest : public GPUDeviceTest { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam *top_diff = blob_top_->mutable_cpu_diff(); - top_diff[0] = maxval; + for (int i = 0; i < 2*2*2; ++i) { + top_diff[i] = maxval[i]; + } std::vector prop_down; prop_down.push_back(true); @@ -147,8 +152,10 @@ class PoolingNDLayerTest : public GPUDeviceTest { for (int_tp cd = 0; cd < d; ++cd) { for (int_tp ch = 0; ch < h; ++ch) { for (int_tp cw = 0; cw < w; ++cw) { - if (maxval == cw + ch * w + cd * w * h) { - EXPECT_EQ(maxval, bottom_diff[cw + ch * w + cd * w * h]); + if (maxval[cw/2 + (ch/2)*2 + (cd/2)*4] == cw + ch * w + cd * w * h) { + EXPECT_EQ(maxval[cw/2 + (ch/2)*2 + (cd/2)*4], bottom_diff[cw + ch * w + cd * w * h]); + } else { + EXPECT_EQ(0, bottom_diff[cw + ch * w + cd * w * h]); } } } @@ -169,13 +176,13 @@ TYPED_TEST(PoolingNDLayerTest, TestSetup) { PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); - pooling_param->add_kernel_size(3); - pooling_param->add_kernel_size(3); - pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(2); + pooling_param->add_kernel_size(2); + pooling_param->add_kernel_size(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); + pooling_param->add_stride(2); + pooling_param->add_stride(2); + pooling_param->add_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); @@ -183,9 +190,9 @@ TYPED_TEST(PoolingNDLayerTest, TestSetup) { PoolingLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(1, this->blob_top_->shape(2)); - EXPECT_EQ(1, this->blob_top_->shape(3)); - EXPECT_EQ(1, this->blob_top_->shape(4)); + EXPECT_EQ(2, this->blob_top_->shape(2)); + EXPECT_EQ(2, this->blob_top_->shape(3)); + EXPECT_EQ(2, this->blob_top_->shape(4)); } TYPED_TEST(PoolingNDLayerTest, TestForward) { diff --git a/src/caffe/test/test_pooling_ndsk_layer.cpp b/src/caffe/test/test_pooling_ndsk_layer.cpp new file mode 100644 index 00000000000..f4f0092a231 --- /dev/null +++ b/src/caffe/test/test_pooling_ndsk_layer.cpp @@ -0,0 +1,200 @@ +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +#ifndef CPU_ONLY // CPU-GPU test + +namespace caffe { + +template +class PoolingNDSKLayerTest : public GPUDeviceTest { + protected: + PoolingNDSKLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) { + } + + virtual void SetUp() { + BlobShape shape; + shape.add_dim(1); // Batch + shape.add_dim(1); // Channels + shape.add_dim(5); // Depth + shape.add_dim(5); // Height + shape.add_dim(5); // Width + blob_bottom_->Reshape(shape); + + shape.add_dim(1); // Batch + shape.add_dim(1); // Channels + shape.add_dim(1); // Depth + shape.add_dim(1); // Height + shape.add_dim(1); // Width + blob_top_->Reshape(shape); + + // fill the values + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~PoolingNDSKLayerTest() { + delete blob_bottom_; + delete blob_top_; + } + + void TestForward() { + LayerParameter layer_param; + PoolingParameter* pooling_param = + layer_param.mutable_pooling_param(); + + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + + pooling_param->set_axis(1); + + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + int_tp d = blob_bottom_->shape(2); + int_tp h = blob_bottom_->shape(3); + int_tp w = blob_bottom_->shape(4); + + TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); + + TypeParam maxval = 0; + + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { + bottom_data[cw + ch * w + cd * w * h] = + cw + ch * w + cd * w * h; + if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { + maxval = std::max((TypeParam)(cw + ch * w + cd * w * h), maxval); + } + } + } + } + + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + const TypeParam *top_data = blob_top_->cpu_data(); + + EXPECT_EQ(maxval, top_data[0]); + } + + void TestBackward() { + LayerParameter layer_param; + PoolingParameter* pooling_param = + layer_param.mutable_pooling_param(); + + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + + pooling_param->set_axis(1); + + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + int_tp d = blob_bottom_->shape(2); + int_tp h = blob_bottom_->shape(3); + int_tp w = blob_bottom_->shape(4); + + TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); + + TypeParam maxval = 0; + + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { + bottom_data[cw + ch * w + cd * w * h] = + cw + ch * w + cd * w * h; + if (cw % 2 == 0 && ch % 2 == 0 && cd % 2 == 0) { + maxval = std::max((TypeParam)(cw + ch * w + cd * w * h), maxval); + } + } + } + } + + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + TypeParam *top_diff = blob_top_->mutable_cpu_diff(); + top_diff[0] = maxval; + + std::vector prop_down; + prop_down.push_back(true); + + layer.Backward(this->blob_top_vec_, prop_down, this->blob_bottom_vec_); + + const TypeParam *bottom_diff = blob_bottom_->cpu_diff(); + + for (int_tp cd = 0; cd < d; ++cd) { + for (int_tp ch = 0; ch < h; ++ch) { + for (int_tp cw = 0; cw < w; ++cw) { + if (maxval == cw + ch * w + cd * w * h) { + EXPECT_EQ(maxval, bottom_diff[cw + ch * w + cd * w * h]); + } + } + } + } + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(PoolingNDSKLayerTest, TestDtypes); + +TYPED_TEST(PoolingNDSKLayerTest, TestSetup) { + LayerParameter layer_param; + PoolingParameter* pooling_param = + layer_param.mutable_pooling_param(); + + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + pooling_param->add_kernel_size(3); + + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + pooling_param->add_kstride(2); + + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + + + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(1, this->blob_top_->shape(2)); + EXPECT_EQ(1, this->blob_top_->shape(3)); + EXPECT_EQ(1, this->blob_top_->shape(4)); +} + +TYPED_TEST(PoolingNDSKLayerTest, TestForward) { + this->TestForward(); +} + +TYPED_TEST(PoolingNDSKLayerTest, TestBackward) { + this->TestBackward(); +} + +} // namespace caffe +#endif // !CPU_ONLY From b55c9b9436d95a2571218408e6f9def6966885c8 Mon Sep 17 00:00:00 2001 From: Srini Turaga Date: Fri, 6 Nov 2015 14:08:33 -0500 Subject: [PATCH 205/254] changed to square loss --- include/caffe/loss_layers.hpp | 3 +-- src/caffe/layers/malis_loss_layer.cpp | 17 +++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index eb9e1776e20..3e916d010b1 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -803,8 +803,7 @@ class MalisLossLayer : public LossLayer { const int_tp* nhood_data, const int_tp* nhood_dims, const Dtype* seg_data, const bool pos, Dtype* dloss_data, Dtype* loss_out, - Dtype *classerr_out, Dtype *rand_index_out, - Dtype margin, Dtype threshold); + Dtype *classerr_out, Dtype *rand_index_out); int_tp nedges_; int_tp conn_num_dims_; diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 4e070f2105b..f83fc194915 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -51,8 +51,7 @@ void MalisLossLayer::Malis(const Dtype* conn_data, const int_tp* nhood_dims, const Dtype* seg_data, const bool pos, Dtype* dloss_data, Dtype* loss_out, - Dtype *classerr_out, Dtype *rand_index_out, - Dtype margin, Dtype threshold) { + Dtype *classerr_out, Dtype *rand_index_out) { if ((nhood_dims[1] != (conn_num_dims - 1)) || (nhood_dims[0] != conn_dims[0])) { LOG(FATAL) << "nhood and conn dimensions don't match" @@ -206,21 +205,21 @@ void MalisLossLayer::Malis(const Dtype* conn_data, if (pos && (it1->first == it2->first)) { // +ve example pairs - dl = std::max(Dtype(0.0), threshold + margin - conn_data[minEdge]); + dl = (Dtype(1.0) - conn_data[minEdge]); loss += dl * nPair; // Use hinge loss dloss_data[minEdge] -= dl * nPair; - if (conn_data[minEdge] <= threshold) { // an error + if (conn_data[minEdge] <= Dtype(0.5)) { // an error nPairIncorrect += nPair; } } else if ((!pos) && (it1->first != it2->first)) { // -ve example pairs - dl = std::max(Dtype(0.0), conn_data[minEdge] - threshold + margin); + dl = (conn_data[minEdge]); loss += dl * nPair; // Use hinge loss dloss_data[minEdge] += dl * nPair; - if (conn_data[minEdge] > threshold) { // an error + if (conn_data[minEdge] > Dtype(0.5)) { // an error nPairIncorrect += nPair; } } @@ -420,6 +419,8 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, Dtype* affinity_data_pos = affinity_pos_.mutable_cpu_data(); Dtype* affinity_data_neg = affinity_neg_.mutable_cpu_data(); +// Affinity graph must be in the range (0,1) +// square loss (euclidean) is used by MALIS #pragma omp parallel for for (int_tp i = 0; i < bottom[0]->count(); ++i) { affinity_data_pos[i] = std::min(affinity_prob[i], affinity[i]); @@ -446,7 +447,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], bottom[2]->cpu_data() + batch_offset * batch, false, dloss_neg_.mutable_cpu_data() + batch_offset * batch, &loss_out, - &classerr_out, &rand_index_out, 0.3, 0.5); + &classerr_out, &rand_index_out); loss += loss_out; std::cout << loss << std::endl; @@ -455,7 +456,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], bottom[2]->cpu_data() + batch_offset * batch, true, dloss_pos_.mutable_cpu_data() + batch_offset * batch, &loss_out, - &classerr_out, &rand_index_out, 0.3, 0.5); + &classerr_out, &rand_index_out); loss += loss_out; std::cout << loss << std::endl; From 4ee23f61fe5fd244cab36b2d6b03b6f2000d764f Mon Sep 17 00:00:00 2001 From: Srini Turaga Date: Fri, 6 Nov 2015 16:48:10 -0500 Subject: [PATCH 206/254] remove my personal config file --- Makefile.config.srini | 113 ------------------------------------------ 1 file changed, 113 deletions(-) delete mode 100644 Makefile.config.srini diff --git a/Makefile.config.srini b/Makefile.config.srini deleted file mode 100644 index c4125eba5bb..00000000000 --- a/Makefile.config.srini +++ /dev/null @@ -1,113 +0,0 @@ -## Refer to http://caffe.berkeleyvision.org/installation.html -# Contributions simplifying and improving our build system are welcome! - -# GreenTea (ViennaCL/OpenCL) backend switch - -# Enable the CUDA backend -USE_CUDA := 1 - -# Enable the OpenCL/Greentea backend -USE_GREENTEA := 0 - -# Folder of the ViennaCL header-only library -VIENNACL_DIR = ../ViennaCL - -# Either set clBLAS to 1 or it will use ViennaclBLAS. -# CLBLAS should be faster, especially on AMD cards. -USE_CLBLAS := 0 - -# cuDNN acceleration switch (uncomment to build with cuDNN). -USE_CUDNN := 0 - -# CPU-only switch (uncomment to build without GPU support). -# CPU_ONLY := 1 - -# To customize your choice of compiler, uncomment and set the following. -# N.B. the default for Linux is g++ and the default for OSX is clang++ -# CUSTOM_CXX := g++ - -# CUDA directory contains bin/ and lib/ directories that we need. -CUDA_DIR := /usr/local/cuda-7.0 -# On Ubuntu 14.04, if cuda tools are installed via -# "sudo apt-get install nvidia-cuda-toolkit" then use this instead: -# CUDA_DIR := /usr - -# CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. -CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ - -gencode arch=compute_20,code=sm_21 \ - -gencode arch=compute_30,code=sm_30 \ - -gencode arch=compute_35,code=sm_35 \ - -gencode arch=compute_50,code=sm_50 \ - -gencode arch=compute_50,code=compute_50 - -# BLAS choice: -# atlas for ATLAS (default) -# mkl for MKL -# open for OpenBlas -# BLAS := atlas -BLAS := mkl -# Custom (MKL/ATLAS/OpenBLAS) include and lib directories. -# Leave commented to accept the defaults for your choice of BLAS -# (which should work)! -# BLAS_INCLUDE := /path/to/your/blas -# BLAS_LIB := /path/to/your/blas -BLAS_INCLUDE := /usr/local/mkl/include -BLAS_LIB := /usr/local/mkl/lib - -# Homebrew puts openblas in a directory that is not on the standard search path -# BLAS_INCLUDE := $(shell brew --prefix openblas)/include -# BLAS_LIB := $(shell brew --prefix openblas)/lib - -# This is required only if you will compile the matlab interface. -# MATLAB directory should contain the mex binary in /bin. -# MATLAB_DIR := /usr/local -# MATLAB_DIR := /Applications/MATLAB_R2012b.app -MATLAB_DIR := /opt/MATLAB/R2015a/ - -# NOTE: this is required only if you will compile the python interface. -# We need to be able to find Python.h and numpy/arrayobject.h. -# PYTHON_INCLUDE := /usr/include/python2.7 \ - /usr/lib/python2.7/dist-packages/numpy/core/include -# Anaconda Python distribution is quite popular. Include path: -# Verify anaconda location, sometimes it's in root. -ANACONDA_HOME := $(HOME)/anaconda -PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ - $(ANACONDA_HOME)/include/python2.7 \ - $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \ - -# We need to be able to find libpythonX.X.so or .dylib. -# PYTHON_LIB := /usr/lib -PYTHON_LIB := $(ANACONDA_HOME)/lib - -# Homebrew installs numpy in a non standard path (keg only) -# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include -# PYTHON_LIB += $(shell brew --prefix numpy)/lib - -# Uncomment to support layers written in Python (will link against Python libs) -# WITH_PYTHON_LAYER := 1 - -# Whatever else you find you need goes here. -INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include $(HOME)/include -LIBRARY_DIRS := /opt/rh/devtoolset-3/root/usr/lib/gcc/x86_64-redhat-linux/4.9.1/ $(PYTHON_LIB) /usr/lib64 /usr/lib $(HOME)/lib $(HOME)/lib64 /usr/local/lib - -# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies -# INCLUDE_DIRS += $(shell brew --prefix)/include -# LIBRARY_DIRS += $(shell brew --prefix)/lib - -# Uncomment to use `pkg-config` to specify OpenCV library paths. -# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.) -# USE_PKG_CONFIG := 1 - -BUILD_DIR := build -DISTRIBUTE_DIR := distribute - -# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171 -# DEBUG := 1 -# VIENNACL_DEBUG := 0 - -# The ID of the GPU that 'make runtest' will use to run unit tests. -TEST_GPUID := 0 - -# enable pretty build (comment to see full commands) -#Q ?= @ From 2bdba45aaa4771cbbf214dc8054d0c6588548d6f Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 7 Nov 2015 16:41:50 +0100 Subject: [PATCH 207/254] Malis cleanup, OpenCL ND pooling fix --- src/caffe/greentea/cl_kernels.cpp | 4 +- src/caffe/greentea/cl_kernels/pooling_nd.cl | 10 +++- src/caffe/layers/malis_loss_layer.cpp | 65 ++------------------- src/caffe/test/test_pooling_nd_layer.cpp | 18 +++--- 4 files changed, 25 insertions(+), 72 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 49a8632fe01..ac52e4ad66b 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -26,7 +26,7 @@ std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#en std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT @@ -50,7 +50,7 @@ std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#e std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/pooling_nd.cl b/src/caffe/greentea/cl_kernels/pooling_nd.cl index 69153e0f67b..8fc6dfe2eae 100644 --- a/src/caffe/greentea/cl_kernels/pooling_nd.cl +++ b/src/caffe/greentea/cl_kernels/pooling_nd.cl @@ -25,6 +25,9 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n, for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { int_tp offset = 1; int_tp num = index; + + bool do_continue = false; + for (i = num_axes - 1; i >= 0; --i) { d_idx[i] = num % pooled_size[i]; d_start[i] = d_idx[i] * stride[i] - pad[i]; @@ -41,9 +44,14 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n, } else { top_mask[index] = -1; } - return; + do_continue = true; } } + + if(do_continue) { + continue; + } + int_tp chan = num % channels; num /= channels; offset *= (num * channels + chan); diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index f83fc194915..0edd5efd237 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -19,8 +20,6 @@ #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -// #define CAFFE_MALIS_DEBUG - namespace caffe { template @@ -105,16 +104,17 @@ void MalisLossLayer::Malis(const Dtype* conn_data, nPairPos += (segSizes[seg_data[i]] - 1); } } + int64_t nPairTot = (nLabeledVert * (nLabeledVert - 1)) / 2; int64_t nPairNeg = nPairTot - nPairPos; int64_t nPairNorm; + if (pos) { nPairNorm = nPairPos; } else { nPairNorm = nPairNeg; } - int64_t edgeCount = 0; // Loop over #edges for (int64_t d = 0, i = 0; d < conn_dims[0]; ++d) { @@ -216,7 +216,7 @@ void MalisLossLayer::Malis(const Dtype* conn_data, } else if ((!pos) && (it1->first != it2->first)) { // -ve example pairs dl = (conn_data[minEdge]); - loss += dl * nPair; + loss += dl * dl * nPair; // Use hinge loss dloss_data[minEdge] += dl * nPair; if (conn_data[minEdge] > Dtype(0.5)) { // an error @@ -281,11 +281,6 @@ void MalisLossLayer::LayerSetUp(const vector*>& bottom, // Optional (bottom 3): // Bottom 3: Edge connectivity, size #edges * 3, shaped (Z,Y,X);(Z,Y,X);... // (this means pairs of 3 per edge) - -#ifdef CAFFE_MALIS_DEBUG - cv::namedWindow("labelled"); - cv::namedWindow("test"); -#endif } template @@ -336,56 +331,6 @@ void MalisLossLayer::Reshape(const vector*>& bottom, template void MalisLossLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { -#ifdef CAFFE_MALIS_DEBUG - // This is for debugging only: - { - std::vector labels; - const Dtype* seg_data = bottom[2]->cpu_data(); - for (int_tp i = 0; i < bottom[2]->height() * bottom[2]->width(); ++i) { - int_tp val = static_cast(seg_data[i]); - bool found = false; - for (int_tp j = 0; j < labels.size(); ++j) { - if (val == labels[j]) { - found = true; - } - } - if (found == false) { - labels.push_back(val); - } - } - - std::vector colors; - - for (int_tp i = 0; i < labels.size(); ++i) { - unsigned char r = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - unsigned char g = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - unsigned char b = 255 * (rand() / (1.0 + RAND_MAX)); // NOLINT - - cv::Vec3b color(r, g, b); - colors.push_back(color); - } - - cv::Mat output = cv::Mat::zeros(cv::Size(bottom[1]->height(), - bottom[1]->width()), CV_8UC3); - - const Dtype* imgdata = bottom[2]->cpu_data(); - - for (int_tp i = 0; i < bottom[1]->height() * bottom[1]->width(); ++i) { - int_tp val = imgdata[i]; - if (val == 0) { - output.at(i) = cv::Vec3b(0, 0, 0); - continue; - } - for (int_tp j = 0; j < labels.size(); ++j) { - if (val == labels[j]) { - output.at(i) = colors[j]; - } - } - } - cv::imshow("labelled", output); - } -#endif - // Set up the neighborhood nhood_data_.clear(); if (bottom.size() == 4) { @@ -450,7 +395,6 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &classerr_out, &rand_index_out); loss += loss_out; - std::cout << loss << std::endl; Malis(&affinity_data_pos[batch_offset * batch], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], @@ -459,7 +403,6 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &classerr_out, &rand_index_out); loss += loss_out; - std::cout << loss << std::endl; } // Normalized loss over batch size diff --git a/src/caffe/test/test_pooling_nd_layer.cpp b/src/caffe/test/test_pooling_nd_layer.cpp index 7fafc0da673..57907280613 100644 --- a/src/caffe/test/test_pooling_nd_layer.cpp +++ b/src/caffe/test/test_pooling_nd_layer.cpp @@ -26,14 +26,14 @@ class PoolingNDLayerTest : public GPUDeviceTest { virtual void SetUp() { BlobShape shape; shape.add_dim(1); // Batch - shape.add_dim(1); // Channels + shape.add_dim(8); // Channels shape.add_dim(4); // Depth shape.add_dim(4); // Height shape.add_dim(4); // Width blob_bottom_->Reshape(shape); shape.add_dim(1); // Batch - shape.add_dim(1); // Channels + shape.add_dim(8); // Channels shape.add_dim(2); // Depth shape.add_dim(2); // Height shape.add_dim(2); // Width @@ -73,13 +73,15 @@ class PoolingNDLayerTest : public GPUDeviceTest { TypeParam *bottom_data = blob_bottom_->mutable_cpu_data(); - std::vector maxval(8); + std::vector maxval(8 * 8); for (int_tp cd = 0; cd < d; ++cd) { for (int_tp ch = 0; ch < h; ++ch) { for (int_tp cw = 0; cw < w; ++cw) { - bottom_data[cw + ch * w + cd * w * h] = + for (int batch = 0; batch < 8; batch ++) { + bottom_data[batch * 64 + cw + ch * w + cd * w * h] = cw + ch * w + cd * w * h; + } maxval[cw/2 + (ch/2)*2 + (cd/2)*4] = std::max(bottom_data[cw + ch * w + cd * w * h], maxval[cw/2 + (ch/2)*2 + (cd/2)*4]); @@ -91,10 +93,9 @@ class PoolingNDLayerTest : public GPUDeviceTest { const TypeParam *top_data = blob_top_->cpu_data(); - for (int i = 0; i < 2*2*2; ++i) { - EXPECT_EQ(maxval[i], top_data[i]); + for (int i = 0; i < 2*2*2 * 8; ++i) { + EXPECT_EQ(maxval[i % 8], top_data[i]); } - } void TestBackward() { @@ -153,7 +154,8 @@ class PoolingNDLayerTest : public GPUDeviceTest { for (int_tp ch = 0; ch < h; ++ch) { for (int_tp cw = 0; cw < w; ++cw) { if (maxval[cw/2 + (ch/2)*2 + (cd/2)*4] == cw + ch * w + cd * w * h) { - EXPECT_EQ(maxval[cw/2 + (ch/2)*2 + (cd/2)*4], bottom_diff[cw + ch * w + cd * w * h]); + EXPECT_EQ(maxval[cw/2 + (ch/2)*2 + (cd/2)*4], + bottom_diff[cw + ch * w + cd * w * h]); } else { EXPECT_EQ(0, bottom_diff[cw + ch * w + cd * w * h]); } From aecda3cc0ffdc1329a4fd554e64ba7eea19db146 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 7 Nov 2015 17:12:24 +0100 Subject: [PATCH 208/254] Malis niceification for Srini. --- src/caffe/layers/malis_loss_layer.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 0edd5efd237..5e7fbf78aad 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -206,16 +206,16 @@ void MalisLossLayer::Malis(const Dtype* conn_data, if (pos && (it1->first == it2->first)) { // +ve example pairs dl = (Dtype(1.0) - conn_data[minEdge]); - loss += dl * nPair; + loss += dl * dl * nPair; // Use hinge loss - dloss_data[minEdge] -= dl * nPair; + dloss_data[minEdge] += dl * nPair; if (conn_data[minEdge] <= Dtype(0.5)) { // an error nPairIncorrect += nPair; } } else if ((!pos) && (it1->first != it2->first)) { // -ve example pairs - dl = (conn_data[minEdge]); + dl = (-conn_data[minEdge]); loss += dl * dl * nPair; // Use hinge loss dloss_data[minEdge] += dl * nPair; @@ -428,7 +428,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, #pragma omp parallel for for (int_tp i = 0; i < bottom[0]->count(); ++i) { - bottom_diff[i] = dloss_pos_data[i] + dloss_neg_data[i]; + bottom_diff[i] = dloss_neg_data[i] - dloss_pos_data[i]; } } } From f4c73609b1ee9993d85f9ca9c2d031c5c182ccb7 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 7 Nov 2015 17:17:07 +0100 Subject: [PATCH 209/254] Malis fix (2). --- src/caffe/layers/malis_loss_layer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 5e7fbf78aad..32fe42bb136 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -428,7 +428,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, #pragma omp parallel for for (int_tp i = 0; i < bottom[0]->count(); ++i) { - bottom_diff[i] = dloss_neg_data[i] - dloss_pos_data[i]; + bottom_diff[i] = -(dloss_neg_data[i] + dloss_pos_data[i]); } } } From abb88465b6800999303e2c72e13eb5354520d207 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 14 Nov 2015 02:18:31 +0100 Subject: [PATCH 210/254] Fixed malis loss for aniso. --- src/caffe/layers/malis_loss_layer.cpp | 42 ++++++++++++--------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 32fe42bb136..0f01726e16d 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -36,12 +36,6 @@ class MalisAffinityGraphCompare { }; // Derived from https://github.com/srinituraga/malis/blob/master/matlab/malis_loss_mex.cpp -// conn_data: 4d connectivity graph [y * x * z * #edges] -// nhood_data: graph neighborhood descriptor [3 * #edges] -// seg_data: true target segmentation [y * x * z] -// pos: is this a positive example pass [true] or -// a negative example pass [false] ? -// margin: sq-sq loss margin [0.3] template void MalisLossLayer::Malis(const Dtype* conn_data, const int_tp conn_num_dims, @@ -69,9 +63,9 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // prodDims stores x, x*y, x*y*z offsets std::vector prodDims(conn_num_dims - 1); - prodDims[0] = 1; + prodDims[conn_num_dims - 2] = 1; for (int64_t i = 1; i < conn_num_dims - 1; ++i) { - prodDims[i] = prodDims[i - 1] * conn_dims[i]; + prodDims[conn_num_dims - 2 - i] = prodDims[conn_num_dims - 1 - i] * conn_dims[i]; // std::cout << i << " prodDims: " << prodDims[i] << std::endl; } @@ -125,12 +119,12 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // Loop over X for (int64_t x = 0; x < conn_dims[3]; ++x, ++i) { // Out-of-bounds check: - if (!((z + nhood_data[d * conn_dims[0] + 0] < 0) - ||(z + nhood_data[d * conn_dims[0] + 0] >= conn_dims[1]) - ||(y + nhood_data[d * conn_dims[0] + 1] < 0) - ||(y + nhood_data[d * conn_dims[0] + 1] >= conn_dims[2]) - ||(x + nhood_data[d * conn_dims[0] + 2] < 0) - ||(x + nhood_data[d * conn_dims[0] + 2] >= conn_dims[3]))) { + if (!((z + nhood_data[d * 3 + 0] < 0) + ||(z + nhood_data[d * 3 + 0] >= conn_dims[1]) + ||(y + nhood_data[d * 3 + 1] < 0) + ||(y + nhood_data[d * 3 + 1] >= conn_dims[2]) + ||(x + nhood_data[d * 3 + 2] < 0) + ||(x + nhood_data[d * 3 + 2] >= conn_dims[3]))) { ++edgeCount; } } @@ -144,18 +138,18 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // Loop over #edges for (int64_t d = 0, i = 0; d < conn_dims[0]; ++d) { // Loop over Z - for (int64_t x = 0; x < conn_dims[3]; ++x) { + for (int64_t z = 0; z < conn_dims[1]; ++z) { // Loop over Y for (int64_t y = 0; y < conn_dims[2]; ++y) { // Loop over X - for (int64_t z = 0; z < conn_dims[1]; ++z, ++i) { + for (int64_t x = 0; x < conn_dims[3]; ++x, ++i) { // Out-of-bounds check: - if (!((z + nhood_data[d * conn_dims[0] + 0] < 0) - ||(z + nhood_data[d * conn_dims[0] + 0] >= conn_dims[1]) - ||(y + nhood_data[d * conn_dims[0] + 1] < 0) - ||(y + nhood_data[d * conn_dims[0] + 1] >= conn_dims[2]) - ||(x + nhood_data[d * conn_dims[0] + 2] < 0) - ||(x + nhood_data[d * conn_dims[0] + 2] >= conn_dims[3]))) { + if (!((z + nhood_data[d * 3 + 0] < 0) + ||(z + nhood_data[d * 3 + 0] >= conn_dims[1]) + ||(y + nhood_data[d * 3 + 1] < 0) + ||(y + nhood_data[d * 3 + 1] >= conn_dims[2]) + ||(x + nhood_data[d * 3 + 2] < 0) + ||(x + nhood_data[d * 3 + 2] >= conn_dims[3]))) { pqueue[j++] = i; } } @@ -182,7 +176,7 @@ void MalisLossLayer::Malis(const Dtype* conn_data, minEdge = pqueue[i]; // nVert = x * y * z, minEdge in [0, x * y * z * #edges] - // e: edge dimension (0: X, 1: Y, 2: Z) + // e: edge dimension e = minEdge / nVert; // v1: node at edge beginning @@ -191,6 +185,8 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // v2: neighborhood node at edge e v2 = v1 + nHood[e]; + // std::cout << "V1: " << v1 << ", V2: " << v2 << std::endl; + set1 = dsets.find_set(v1); set2 = dsets.find_set(v2); From 39bb9c54447afdff19cd7b977e14cfa5d4b3a203 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 19 Nov 2015 03:25:51 +0100 Subject: [PATCH 211/254] Malis correction. --- src/caffe/layers/malis_loss_layer.cpp | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 1d5d6873166..b0d2fe6616e 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -67,7 +67,8 @@ void MalisLossLayer::Malis(const Dtype* conn_data, for (int64_t i = 1; i < conn_num_dims - 1; ++i) { prodDims[conn_num_dims - 2 - i] = prodDims[conn_num_dims - 1 - i] * conn_dims[i]; - // std::cout << i << " prodDims: " << prodDims[i] << std::endl; + // std::cout << conn_num_dims - 2 - i << " prodnhood_dims[1]Dims: " + // << prodDims[conn_num_dims - 2 - i] << std::endl; } /* convert n-d offset vectors into linear array offset scalars */ @@ -120,12 +121,12 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // Loop over X for (int64_t x = 0; x < conn_dims[3]; ++x, ++i) { // Out-of-bounds check: - if (!((z + nhood_data[d * 3 + 0] < 0) - ||(z + nhood_data[d * 3 + 0] >= conn_dims[1]) - ||(y + nhood_data[d * 3 + 1] < 0) - ||(y + nhood_data[d * 3 + 1] >= conn_dims[2]) - ||(x + nhood_data[d * 3 + 2] < 0) - ||(x + nhood_data[d * 3 + 2] >= conn_dims[3]))) { + if (!((z + nhood_data[d * nhood_dims[1] + 0] < 0) + ||(z + nhood_data[d * nhood_dims[1] + 0] >= conn_dims[1]) + ||(y + nhood_data[d * nhood_dims[1] + 1] < 0) + ||(y + nhood_data[d * nhood_dims[1] + 1] >= conn_dims[2]) + ||(x + nhood_data[d * nhood_dims[1] + 2] < 0) + ||(x + nhood_data[d * nhood_dims[1] + 2] >= conn_dims[3]))) { ++edgeCount; } } @@ -145,12 +146,12 @@ void MalisLossLayer::Malis(const Dtype* conn_data, // Loop over X for (int64_t x = 0; x < conn_dims[3]; ++x, ++i) { // Out-of-bounds check: - if (!((z + nhood_data[d * 3 + 0] < 0) - ||(z + nhood_data[d * 3 + 0] >= conn_dims[1]) - ||(y + nhood_data[d * 3 + 1] < 0) - ||(y + nhood_data[d * 3 + 1] >= conn_dims[2]) - ||(x + nhood_data[d * 3 + 2] < 0) - ||(x + nhood_data[d * 3 + 2] >= conn_dims[3]))) { + if (!((z + nhood_data[d * nhood_dims[1] + 0] < 0) + ||(z + nhood_data[d * nhood_dims[1] + 0] >= conn_dims[1]) + ||(y + nhood_data[d * nhood_dims[1] + 1] < 0) + ||(y + nhood_data[d * nhood_dims[1] + 1] >= conn_dims[2]) + ||(x + nhood_data[d * nhood_dims[1] + 2] < 0) + ||(x + nhood_data[d * nhood_dims[1] + 2] >= conn_dims[3]))) { pqueue[j++] = i; } } From 85794eaaa84d65d8f8c482e92fc29321ed9953be Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 26 Nov 2015 01:35:04 +0100 Subject: [PATCH 212/254] Affinity layer cleanup. --- src/caffe/layers/affinity_layer.cpp | 59 +++++------------------------ 1 file changed, 10 insertions(+), 49 deletions(-) diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp index 8df157967c6..48ed45085d3 100644 --- a/src/caffe/layers/affinity_layer.cpp +++ b/src/caffe/layers/affinity_layer.cpp @@ -80,15 +80,15 @@ void AffinityLayer::Forward_cpu(const vector*>& bottom, Dtype p2 = bottom_data[offsets_[bidx] * inner_num + (i + 1) * bottom[bidx]->width() + j]; - // X edge - top_data[i * bottom[bidx]->width() + j] = std::min(p0, p1); - xmin = p0 < p1 ? 0 : 1; + // Y edge + top_data[i * bottom[bidx]->width() + j] = std::min(p0, p2); + xmin = p0 < p2 ? 0 : 1; min_data[i * bottom[bidx]->width() + j] = xmin; - // Y edge + // X edge top_data[inner_num - + i * bottom[bidx]->width() + j] = std::min(p0, p2); - ymin = p0 < p2 ? 0 : 1; + + i * bottom[bidx]->width() + j] = std::min(p0, p1); + ymin = p0 < p1 ? 0 : 1; min_data[inner_num + i * bottom[bidx]->width() + j] = ymin; } @@ -114,11 +114,11 @@ void AffinityLayer::Backward_cpu(const vector*>& top, // Spread out the affinity losses to pixels for (int_tp i = 0; i < bottom[0]->height() - 1; ++i) { for (int_tp j = 0; j < bottom[0]->width() - 1; ++j) { - Dtype lx = top_diff[i * bottom[0]->width() + j]; - Dtype ly = top_diff[inner_num + i * bottom[0]->width() + j]; + Dtype ly = top_diff[i * bottom[0]->width() + j]; + Dtype lx = top_diff[inner_num + i * bottom[0]->width() + j]; - int_tp mx = min_data[i * bottom[0]->width() + j]; - int_tp my = min_data[bottom[0]->width() + int_tp my = min_data[i * bottom[0]->width() + j]; + int_tp mx = min_data[bottom[0]->width() * bottom[0]->height() + i * bottom[0]->width() + j]; // Only propagate to min index contributor of affinity graph @@ -128,45 +128,6 @@ void AffinityLayer::Backward_cpu(const vector*>& top, bottom_diff[1 * inner_num + (i + my) * bottom[0]->width() + j] += ly; } } -#ifdef CAFFE_AFFINITY_DEBUG - { - cv::Mat tmp; - - Dtype* prob_rd = bottom[bidx]->mutable_cpu_data(); - - cv::Mat wrapped_prob(bottom[0]->height(), bottom[0]->width(), - cv::DataType::type, - prob_rd, sizeof(Dtype) * bottom[0]->width()); - cv::imshow("prob", wrapped_prob); - - cv::Mat wrapped_diff(bottom[0]->height(), bottom[0]->width(), - cv::DataType::type, - bottom_diff, sizeof(Dtype) * bottom[0]->width()); - - Dtype sum = std::accumulate(bottom_diff, - bottom_diff - + bottom[0]->height() * bottom[0]->width(), - 0.0); - - Dtype mean = sum / (bottom[0]->width()*bottom[0]->height()); - - std::vector msd(bottom[0]->height() * bottom[0]->width()); - std::transform(bottom_diff, - bottom_diff + (bottom[0]->height()*bottom[0]->width()), - msd.begin(), std::bind2nd(std::minus(), mean)); - - Dtype sqsum = std::inner_product(msd.begin(), - msd.end(), msd.begin(), 0.0); - Dtype stdev = std::sqrt(sqsum / (bottom[0]->width() - * bottom[0]->height())); - - wrapped_diff.convertTo(tmp, CV_32FC1, 1.0 / (2.0 * stdev), - (stdev - mean) * 1.0 / (2.0 * stdev)); - - cv::imshow("diff", tmp); - cv::waitKey(2); - } -#endif } } } From 664349818ab01b87f1b61292438b3cfceba3d8cd Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 26 Nov 2015 02:37:18 +0100 Subject: [PATCH 213/254] Fixed connected components. --- include/caffe/vision_layers.hpp | 2 +- src/caffe/layers/affinity_layer.cpp | 8 ++++---- src/caffe/layers/connected_component_layer.cpp | 16 ++++++++-------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 0fdab7e9630..800847bff50 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -90,7 +90,7 @@ class ConnectedComponentLayer : public Layer { const vector*>& bottom); private: - cv::Mat FindBlobs(const int_tp maxlabel, const cv::Mat &input); + cv::Mat FindBlobs(const int maxlabel, const cv::Mat &input); }; /** diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp index 48ed45085d3..60bceca1185 100644 --- a/src/caffe/layers/affinity_layer.cpp +++ b/src/caffe/layers/affinity_layer.cpp @@ -82,15 +82,15 @@ void AffinityLayer::Forward_cpu(const vector*>& bottom, // Y edge top_data[i * bottom[bidx]->width() + j] = std::min(p0, p2); - xmin = p0 < p2 ? 0 : 1; - min_data[i * bottom[bidx]->width() + j] = xmin; + ymin = p0 < p2 ? 0 : 1; + min_data[i * bottom[bidx]->width() + j] = ymin; // X edge top_data[inner_num + i * bottom[bidx]->width() + j] = std::min(p0, p1); - ymin = p0 < p1 ? 0 : 1; + xmin = p0 < p1 ? 0 : 1; min_data[inner_num - + i * bottom[bidx]->width() + j] = ymin; + + i * bottom[bidx]->width() + j] = xmin; } } } diff --git a/src/caffe/layers/connected_component_layer.cpp b/src/caffe/layers/connected_component_layer.cpp index e3b0042c380..e61a28cae6a 100644 --- a/src/caffe/layers/connected_component_layer.cpp +++ b/src/caffe/layers/connected_component_layer.cpp @@ -13,18 +13,18 @@ namespace caffe { // Derived from // http://nghiaho.com/uploads/code/opencv_connected_component/blob.cpp template -cv::Mat ConnectedComponentLayer::FindBlobs(int_tp maxlabel, +cv::Mat ConnectedComponentLayer::FindBlobs(int maxlabel, const cv::Mat &input) { // Fill the label_image with the blobs cv::Mat label_image; input.convertTo(label_image, CV_32SC1); - int_tp label_count = maxlabel + 1; + int label_count = maxlabel + 1; // Segment into label numbers higher than the original label numbers - for (int_tp y = 0; y < label_image.rows; y++) { - int_tp *row = reinterpret_cast(label_image.ptr(y)); - for (int_tp x = 0; x < label_image.cols; x++) { + for (int y = 0; y < label_image.rows; y++) { + int *row = reinterpret_cast(label_image.ptr(y)); + for (int x = 0; x < label_image.cols; x++) { // Skip background and already labeled areas if (row[x] > maxlabel || row[x] == 0) { continue; @@ -60,10 +60,10 @@ void ConnectedComponentLayer::Forward_cpu( cv::Mat img(bottom[0]->height(), bottom[0]->width(), CV_8SC1); for (int_tp nc = 0; nc < bottom[0]->num() * bottom[0]->channels(); ++nc) { - int_tp maxlabel = 0; + int maxlabel = 0; for (int_tp y = 0; y < bottom[0]->height(); ++y) { for (int_tp x = 0; x < bottom[0]->width(); ++x) { - int_tp val = bottom_data[nc * bottom[0]->width() * bottom[0]->height() + int val = bottom_data[nc * bottom[0]->width() * bottom[0]->height() + bottom[0]->width() * y + x]; if (val > maxlabel) { maxlabel = val; @@ -76,7 +76,7 @@ void ConnectedComponentLayer::Forward_cpu( for (int_tp y = 0; y < seg.rows; ++y) { for (int_tp x = 0; x < seg.cols; ++x) { top_data[nc * bottom[0]->width() * bottom[0]->height() - + bottom[0]->width() * y + x] = seg.at(y, x); + + bottom[0]->width() * y + x] = seg.at(y, x); } } } From 842464f82570757c8acf701a3dfe3a857b09f661 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 26 Nov 2015 03:17:22 +0100 Subject: [PATCH 214/254] Affinity offset fix. --- src/caffe/layers/affinity_layer.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/caffe/layers/affinity_layer.cpp b/src/caffe/layers/affinity_layer.cpp index 60bceca1185..c7bd28cf148 100644 --- a/src/caffe/layers/affinity_layer.cpp +++ b/src/caffe/layers/affinity_layer.cpp @@ -122,10 +122,14 @@ void AffinityLayer::Backward_cpu(const vector*>& top, * bottom[0]->height() + i * bottom[0]->width() + j]; // Only propagate to min index contributor of affinity graph - bottom_diff[0 * inner_num + i * bottom[0]->width() + (j + mx)] -= lx; - bottom_diff[0 * inner_num + (i + my) * bottom[0]->width() + j] -= ly; - bottom_diff[1 * inner_num + i * bottom[0]->width() + (j + mx)] += lx; - bottom_diff[1 * inner_num + (i + my) * bottom[0]->width() + j] += ly; + bottom_diff[offsets_[bidx] + * inner_num + i * bottom[0]->width() + (j + mx)] += lx; + bottom_diff[offsets_[bidx] + * inner_num + (i + my) * bottom[0]->width() + j] += ly; + bottom_diff[((offsets_[bidx] + 1) % 2) + * inner_num + i * bottom[0]->width() + (j + mx)] -= lx; + bottom_diff[((offsets_[bidx] + 1) % 2) + * inner_num + (i + my) * bottom[0]->width() + j] -= ly; } } } From d534293d764a7293682f24f8e3b985d487b2cef7 Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 27 Nov 2015 01:44:41 +0100 Subject: [PATCH 215/254] Fixed MALIS for 2 and > 3 edges with !(x==y==z) input sizes. --- src/caffe/layers/malis_loss_layer.cpp | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index b0d2fe6616e..c5c6afc2e06 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -66,13 +66,14 @@ void MalisLossLayer::Malis(const Dtype* conn_data, prodDims[conn_num_dims - 2] = 1; for (int64_t i = 1; i < conn_num_dims - 1; ++i) { prodDims[conn_num_dims - 2 - i] = prodDims[conn_num_dims - 1 - i] - * conn_dims[i]; - // std::cout << conn_num_dims - 2 - i << " prodnhood_dims[1]Dims: " - // << prodDims[conn_num_dims - 2 - i] << std::endl; + * conn_dims[conn_num_dims - 1 - i]; + // std::cout << conn_num_dims - 2 - i << " dims: " + // << prodDims[conn_num_dims - 2 - i] << std::endl; } /* convert n-d offset vectors into linear array offset scalars */ // nHood is a vector of size #edges + std::vector nHood(nhood_dims[0]); for (int64_t i = 0; i < nhood_dims[0]; ++i) { nHood[i] = 0; @@ -255,6 +256,10 @@ void MalisLossLayer::Malis(const Dtype* conn_data, } else { loss = 0; } + + // std::cout << "nPairIncorrect: " << nPairIncorrect << std::endl; + // std::cout << "nPairNorm: " << nPairNorm << std::endl; + *loss_out = loss; classerr = static_cast(nPairIncorrect) / static_cast(nPairNorm); @@ -347,9 +352,9 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, // 2 edges: +Y, +X (0,1,0); (0,0,1) // 3 edges: +Z, +Y, +X (1,0,0); (0,1,0); (0,0,1) for (int_tp i = 3 - nedges_; i < 3; ++i) { - nhood_data_.push_back((i + 0) % 3 == 0 ? 1 : 0); - nhood_data_.push_back((i + 1) % 3 == 0 ? 1 : 0); + nhood_data_.push_back((i + 3) % 3 == 0 ? 1 : 0); nhood_data_.push_back((i + 2) % 3 == 0 ? 1 : 0); + nhood_data_.push_back((i + 1) % 3 == 0 ? 1 : 0); } } @@ -393,6 +398,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &classerr_out, &rand_index_out); loss += loss_out; + // std::cout << "NEG: " << loss_out << std::endl; Malis(&affinity_data_pos[batch_offset * batch], conn_num_dims_, &conn_dims_[0], &nhood_data_[0], &nhood_dims_[0], @@ -401,6 +407,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, &classerr_out, &rand_index_out); loss += loss_out; + // std::cout << "POS: " << loss_out << std::endl; } // Normalized loss over batch size From 3a51391b7b863fe215ff5d5ae03f49c7a6446d56 Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 30 Nov 2015 03:31:59 +0100 Subject: [PATCH 216/254] Malis small indexing change. --- src/caffe/layers/malis_loss_layer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index c5c6afc2e06..f2d3ec03a52 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -66,7 +66,7 @@ void MalisLossLayer::Malis(const Dtype* conn_data, prodDims[conn_num_dims - 2] = 1; for (int64_t i = 1; i < conn_num_dims - 1; ++i) { prodDims[conn_num_dims - 2 - i] = prodDims[conn_num_dims - 1 - i] - * conn_dims[conn_num_dims - 1 - i]; + * conn_dims[conn_num_dims - i]; // std::cout << conn_num_dims - 2 - i << " dims: " // << prodDims[conn_num_dims - 2 - i] << std::endl; } From 1148d80418fd8f2a0a2eeea28571fdf6f5a75cbc Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 3 Dec 2015 08:21:02 +0100 Subject: [PATCH 217/254] Lint fix. --- include/caffe/layers/affinity_layer.hpp | 4 ++-- include/caffe/layers/connected_component_layer.hpp | 4 ++-- include/caffe/layers/dropout_layer.hpp | 2 +- include/caffe/layers/malis_loss_layer.hpp | 4 ++-- include/caffe/layers/mergecrop_layer.hpp | 4 ++-- include/caffe/layers/spp_layer.hpp | 4 +++- src/caffe/layers/connected_component_layer.cpp | 2 +- src/caffe/layers/malis_loss_layer.cpp | 2 +- src/caffe/layers/mergecrop_layer.cpp | 2 +- src/caffe/layers/mergecrop_layer.cu | 2 +- src/caffe/test/test_convolution_nd_layer.cpp | 2 +- src/caffe/test/test_mergecrop_layer.cpp | 2 +- src/caffe/test/test_pooling_nd_layer.cpp | 2 +- src/caffe/test/test_pooling_ndsk_layer.cpp | 2 +- 14 files changed, 20 insertions(+), 18 deletions(-) diff --git a/include/caffe/layers/affinity_layer.hpp b/include/caffe/layers/affinity_layer.hpp index e8a42ec1c13..2d1a72d223f 100644 --- a/include/caffe/layers/affinity_layer.hpp +++ b/include/caffe/layers/affinity_layer.hpp @@ -43,6 +43,6 @@ class AffinityLayer : public Layer { std::vector offsets_; }; -} +} // namespace caffe -#endif // CAFFE_AFFINITY_LAYER_HPP_ +#endif // CAFFE_AFFINITY_LAYER_HPP_ diff --git a/include/caffe/layers/connected_component_layer.hpp b/include/caffe/layers/connected_component_layer.hpp index 0a455925035..c6eb87fd96f 100644 --- a/include/caffe/layers/connected_component_layer.hpp +++ b/include/caffe/layers/connected_component_layer.hpp @@ -49,6 +49,6 @@ class ConnectedComponentLayer : public Layer { cv::Mat FindBlobs(const int maxlabel, const cv::Mat &input); }; -} +} // namespace caffe -#endif // CAFFE_CONNECTED_COMPONENT_LAYER_HPP_ +#endif // CAFFE_CONNECTED_COMPONENT_LAYER_HPP_ diff --git a/include/caffe/layers/dropout_layer.hpp b/include/caffe/layers/dropout_layer.hpp index 10f5de60321..d12711029ad 100644 --- a/include/caffe/layers/dropout_layer.hpp +++ b/include/caffe/layers/dropout_layer.hpp @@ -66,7 +66,7 @@ class DropoutLayer : public NeuronLayer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - /// when divided by uint_tp_tp_MAX, the randomly generated values @f$u\sim U(0,1)@f$ + /// when divided by uint_MAX, the randomly generated values @f$u\sim U(0,1)@f$ Blob rand_vec_; /// the probability @f$ p @f$ of dropping any input Dtype threshold_; diff --git a/include/caffe/layers/malis_loss_layer.hpp b/include/caffe/layers/malis_loss_layer.hpp index dc7ffab5e62..ab2b8623fb1 100644 --- a/include/caffe/layers/malis_loss_layer.hpp +++ b/include/caffe/layers/malis_loss_layer.hpp @@ -55,6 +55,6 @@ class MalisLossLayer : public LossLayer { Blob dloss_neg_; }; -} +} // namespace caffe -#endif // CAFFE_MALIS_LOSS_LAYER_HPP_ +#endif // CAFFE_MALIS_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/mergecrop_layer.hpp b/include/caffe/layers/mergecrop_layer.hpp index 0547805e7d7..90ea470e230 100644 --- a/include/caffe/layers/mergecrop_layer.hpp +++ b/include/caffe/layers/mergecrop_layer.hpp @@ -53,6 +53,6 @@ class MergeCropLayer : public Layer { Blob shape_b_; }; -} +} // namespace caffe -#endif // CAFFE_MERGECROP_LAYER_HPP_ +#endif // CAFFE_MERGECROP_LAYER_HPP_ diff --git a/include/caffe/layers/spp_layer.hpp b/include/caffe/layers/spp_layer.hpp index eb050a45669..20f3ed188bd 100644 --- a/include/caffe/layers/spp_layer.hpp +++ b/include/caffe/layers/spp_layer.hpp @@ -37,7 +37,9 @@ class SPPLayer : public Layer { // calculates the kernel and stride dimensions for the pooling layer, // returns a correctly configured LayerParameter for a PoolingLayer virtual LayerParameter GetPoolingParam(const int_tp pyramid_level, - const int_tp bottom_h, const int_tp bottom_w, const SPPParameter spp_param); + const int_tp bottom_h, + const int_tp bottom_w, + const SPPParameter spp_param); int_tp pyramid_height_; int_tp bottom_h_, bottom_w_; diff --git a/src/caffe/layers/connected_component_layer.cpp b/src/caffe/layers/connected_component_layer.cpp index 6ce9ca94351..a198c65cff7 100644 --- a/src/caffe/layers/connected_component_layer.cpp +++ b/src/caffe/layers/connected_component_layer.cpp @@ -5,8 +5,8 @@ #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" -#include "caffe/util/math_functions.hpp" #include "caffe/layers/connected_component_layer.hpp" +#include "caffe/util/math_functions.hpp" namespace caffe { diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index b9e1c14f0e7..96e14f8c7a9 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -17,8 +17,8 @@ #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" -#include "caffe/util/math_functions.hpp" #include "caffe/layers/malis_loss_layer.hpp" +#include "caffe/util/math_functions.hpp" namespace caffe { diff --git a/src/caffe/layers/mergecrop_layer.cpp b/src/caffe/layers/mergecrop_layer.cpp index 8a09daae99f..a0426b444d7 100644 --- a/src/caffe/layers/mergecrop_layer.cpp +++ b/src/caffe/layers/mergecrop_layer.cpp @@ -1,8 +1,8 @@ #include #include "caffe/layer.hpp" -#include "caffe/util/math_functions.hpp" #include "caffe/layers/mergecrop_layer.hpp" +#include "caffe/util/math_functions.hpp" namespace caffe { diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index 3be4da84f83..109580704ac 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -1,8 +1,8 @@ #include #include "caffe/layer.hpp" -#include "caffe/util/math_functions.hpp" #include "caffe/layers/mergecrop_layer.hpp" +#include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" diff --git a/src/caffe/test/test_convolution_nd_layer.cpp b/src/caffe/test/test_convolution_nd_layer.cpp index da02fe477f3..49f9b12cafb 100644 --- a/src/caffe/test/test_convolution_nd_layer.cpp +++ b/src/caffe/test/test_convolution_nd_layer.cpp @@ -6,9 +6,9 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" +#include "caffe/layers/conv_layer.hpp" #include "caffe/test/test_caffe_main.hpp" #include "caffe/util/math_functions.hpp" -#include "caffe/layers/conv_layer.hpp" #ifndef CPU_ONLY // CPU-GPU test diff --git a/src/caffe/test/test_mergecrop_layer.cpp b/src/caffe/test/test_mergecrop_layer.cpp index 9fc0ae65336..1c4fb42243e 100644 --- a/src/caffe/test/test_mergecrop_layer.cpp +++ b/src/caffe/test/test_mergecrop_layer.cpp @@ -6,9 +6,9 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" +#include "caffe/layers/mergecrop_layer.hpp" #include "caffe/test/test_caffe_main.hpp" #include "caffe/util/math_functions.hpp" -#include "caffe/layers/mergecrop_layer.hpp" #ifndef CPU_ONLY // CPU-GPU test diff --git a/src/caffe/test/test_pooling_nd_layer.cpp b/src/caffe/test/test_pooling_nd_layer.cpp index 040089e287e..cebcf3da3db 100644 --- a/src/caffe/test/test_pooling_nd_layer.cpp +++ b/src/caffe/test/test_pooling_nd_layer.cpp @@ -7,9 +7,9 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" +#include "caffe/layers/pooling_layer.hpp" #include "caffe/test/test_caffe_main.hpp" #include "caffe/util/math_functions.hpp" -#include "caffe/layers/pooling_layer.hpp" #ifndef CPU_ONLY // CPU-GPU test diff --git a/src/caffe/test/test_pooling_ndsk_layer.cpp b/src/caffe/test/test_pooling_ndsk_layer.cpp index 3758e62ad7b..8b43380d0ed 100644 --- a/src/caffe/test/test_pooling_ndsk_layer.cpp +++ b/src/caffe/test/test_pooling_ndsk_layer.cpp @@ -7,9 +7,9 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" +#include "caffe/layers/pooling_layer.hpp" #include "caffe/test/test_caffe_main.hpp" #include "caffe/util/math_functions.hpp" -#include "caffe/layers/pooling_layer.hpp" #ifndef CPU_ONLY // CPU-GPU test From 984745dc2db5354b7f84c0ff19376950f8397fa6 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 5 Dec 2015 03:09:35 +0100 Subject: [PATCH 218/254] Expose SolverParameter to Python. --- include/caffe/solver.hpp | 4 ++++ python/caffe/_caffe.cpp | 49 ++++++++++++++++++++++++++++++++++++++++ src/caffe/solver.cpp | 11 +++++++++ 3 files changed, 64 insertions(+) diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 6bd3d34c68d..2382bc43eaa 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -48,6 +48,10 @@ class Solver { void InitTrainNet(); void InitTestNets(); + // Allows to change the solver parameters during training + void UpdateSolverParams(const SolverParameter& param); + SolverParameter GetSolverParams(); + // Client of the Solver optionally may call this in order to set the function // that the solver uses to see what action it should take (e.g. snapshot or // exit training early). diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 09d309755a8..62698ad27ac 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -289,6 +289,8 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("test_nets", bp::make_function(&Solver::test_nets, bp::return_internal_reference<>())) .add_property("iter", &Solver::iter) + .add_property("get_solver_params", &Solver::GetSolverParams) + .def("update_solver_params", &Solver::UpdateSolverParams) .def("step", static_cast::*)(const int_tp)>(&Solver::Step)) .def("solve", @@ -297,6 +299,53 @@ BOOST_PYTHON_MODULE(_caffe) { .def("restore", &Solver::Restore) .def("snapshot", &Solver::Snapshot); + + bp::class_("SolverParam", bp::no_init) + .add_property("base_lr", &SolverParameter::base_lr, + &SolverParameter::set_base_lr) + .add_property("max_iter", &SolverParameter::max_iter, + &SolverParameter::set_max_iter) + .add_property("lr_policy", + bp::make_function(&SolverParameter::lr_policy, + bp::return_value_policy()), + static_cast( + &SolverParameter::set_lr_policy)) + .add_property("gamma", &SolverParameter::gamma, + &SolverParameter::set_gamma) + .add_property("power", &SolverParameter::power, + &SolverParameter::set_power) + .add_property("momentum", &SolverParameter::momentum, + &SolverParameter::set_momentum) + .add_property("momentum2", &SolverParameter::momentum2, + &SolverParameter::set_momentum2) + .add_property("delta", &SolverParameter::delta, + &SolverParameter::set_delta) + .add_property("rms_decay", &SolverParameter::rms_decay, + &SolverParameter::set_rms_decay) + .add_property("weight_decay", + &SolverParameter::weight_decay, + &SolverParameter::set_weight_decay) + .add_property("regularization_type", + bp::make_function(&SolverParameter::regularization_type, + bp::return_value_policy()), + static_cast( + &SolverParameter::set_regularization_type)) + .add_property("stepsize", &SolverParameter::stepsize, + &SolverParameter::set_stepsize) + .add_property("snapshot", &SolverParameter::snapshot, + &SolverParameter::set_snapshot) + .add_property("snapshot_prefix", + bp::make_function(&SolverParameter::snapshot_prefix, + bp::return_value_policy()), + static_cast( + &SolverParameter::set_snapshot_prefix)) + .add_property("type", + bp::make_function(&SolverParameter::type, + bp::return_value_policy()), + static_cast( + &SolverParameter::set_type)); + + bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( "SGDSolver", bp::init()); diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 14721c40ff8..4e918db4c95 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -71,6 +71,17 @@ void Solver::Init(const SolverParameter& param) { current_step_ = 0; } + +template +void Solver::UpdateSolverParams(const SolverParameter& param) { + param_ = param; +} + +template +SolverParameter Solver::GetSolverParams() { + return param_; +} + template void Solver::InitTrainNet() { const int_tp num_train_nets = param_.has_net() + param_.has_net_param() From e2f8e7006fcaea2390ea631f1be71f97be98b469 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 6 Dec 2015 04:16:43 +0100 Subject: [PATCH 219/254] Improved pycaffe interface. --- python/caffe/_caffe.cpp | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 62698ad27ac..8332a50e599 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -141,6 +141,10 @@ Solver* GetSolverFromFile(const string& filename) { return SolverRegistry::CreateSolver(param); } +Solver* GetSolver(const SolverParameter& solver_param) { + return SolverRegistry::CreateSolver(solver_param); +} + struct NdarrayConverterGenerator { template struct apply; }; @@ -289,8 +293,8 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("test_nets", bp::make_function(&Solver::test_nets, bp::return_internal_reference<>())) .add_property("iter", &Solver::iter) - .add_property("get_solver_params", &Solver::GetSolverParams) - .def("update_solver_params", &Solver::UpdateSolverParams) + .add_property("solver_params", &Solver::GetSolverParams, + &Solver::UpdateSolverParams) .def("step", static_cast::*)(const int_tp)>(&Solver::Step)) .def("solve", @@ -325,6 +329,8 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("weight_decay", &SolverParameter::weight_decay, &SolverParameter::set_weight_decay) + .add_property("display", &SolverParameter::display, + &SolverParameter::set_display) .add_property("regularization_type", bp::make_function(&SolverParameter::regularization_type, bp::return_value_policy()), @@ -343,7 +349,12 @@ BOOST_PYTHON_MODULE(_caffe) { bp::make_function(&SolverParameter::type, bp::return_value_policy()), static_cast( - &SolverParameter::set_type)); + &SolverParameter::set_type)) + .add_property("net", + bp::make_function(&SolverParameter::net, + bp::return_value_policy()), + static_cast( + &SolverParameter::set_net)); bp::class_, bp::bases >, @@ -365,7 +376,10 @@ BOOST_PYTHON_MODULE(_caffe) { shared_ptr >, boost::noncopyable>( "AdamSolver", bp::init()); - bp::def("get_solver", &GetSolverFromFile, + bp::def("get_solver_from_file", &GetSolverFromFile, + bp::return_value_policy()); + + bp::def("get_solver", &GetSolver, bp::return_value_policy()); // vector wrappers for all the vector types we use From 576a3477d3aad94d66c52099269f6913433cfffe Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 7 Dec 2015 02:01:37 +0100 Subject: [PATCH 220/254] Extended python interface. --- python/caffe/__init__.py | 2 +- python/caffe/_caffe.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index ccda1bcae4f..bc365833005 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,5 +1,5 @@ from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver -from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list +from ._caffe import set_mode_cpu, set_mode_gpu, set_device, enumerate_devices, Layer, get_solver, layer_type_list from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier from .detector import Detector diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 8332a50e599..1d24054d8a4 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -222,6 +222,7 @@ BOOST_PYTHON_MODULE(_caffe) { bp::def("set_mode_cpu", &set_mode_cpu); bp::def("set_mode_gpu", &set_mode_gpu); bp::def("set_device", &Caffe::SetDevice); + bp::def("enumerate_devices", &Caffe::EnumerateDevices); bp::def("layer_type_list", &LayerRegistry::LayerTypeList); From 52732be92fcf4ce8d0d164abb0203f65b2c1395b Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 10 Dec 2015 02:37:31 +0100 Subject: [PATCH 221/254] Merge, stronger PyCaffe interface. --- python/caffe/__init__.py | 2 +- python/caffe/_caffe.cpp | 9 +++++++-- python/caffe/pycaffe.py | 6 ++++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index bc365833005..2cd235efa14 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,4 +1,4 @@ -from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver +from .pycaffe import SolverParameter, Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver from ._caffe import set_mode_cpu, set_mode_gpu, set_device, enumerate_devices, Layer, get_solver, layer_type_list from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 1d24054d8a4..02281afe322 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -305,7 +305,7 @@ BOOST_PYTHON_MODULE(_caffe) { .def("snapshot", &Solver::Snapshot); - bp::class_("SolverParam", bp::no_init) + bp::class_("SolverParameter", bp::init<>()) .add_property("base_lr", &SolverParameter::base_lr, &SolverParameter::set_base_lr) .add_property("max_iter", &SolverParameter::max_iter, @@ -355,7 +355,12 @@ BOOST_PYTHON_MODULE(_caffe) { bp::make_function(&SolverParameter::net, bp::return_value_policy()), static_cast( - &SolverParameter::set_net)); + &SolverParameter::set_net)) + .add_property("train_net", + bp::make_function(&SolverParameter::train_net, + bp::return_value_policy()), + static_cast( + &SolverParameter::set_train_net)); bp::class_, bp::bases >, diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index 4515711b283..1473e26f2e9 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -10,8 +10,10 @@ from itertools import zip_longest as izip_longest import numpy as np -from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \ - RMSPropSolver, AdaDeltaSolver, AdamSolver +from ._caffe import \ + SolverParameter, Net, SGDSolver, NesterovSolver, AdaGradSolver, \ + RMSPropSolver, AdaDeltaSolver, AdamSolver + import caffe.io # We directly update methods from Net here (rather than using composition or From 97f9648a2c4ae52301836133c62af6fd7d72829f Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 14 Dec 2015 01:27:27 +0100 Subject: [PATCH 222/254] Multi-device advancements. --- .../cpp_classification/classification.cpp | 2 +- include/caffe/common.hpp | 11 +- include/caffe/data_reader.hpp | 3 +- include/caffe/device.hpp | 9 +- include/caffe/layer.hpp | 4 +- include/caffe/layer_factory.hpp | 1 + include/caffe/layers/base_conv_layer.hpp | 78 ++++---- include/caffe/net.hpp | 13 +- include/caffe/solver.hpp | 7 +- include/caffe/syncedmem.hpp | 23 +-- .../caffe/test/test_gradient_check_util.hpp | 6 +- python/caffe/__init__.py | 2 +- python/caffe/_caffe.cpp | 70 ++++++- src/caffe/common.cpp | 173 +++++++++--------- src/caffe/data_reader.cpp | 9 +- .../greentea/greentea_math_functions.cpp | 96 +++++----- src/caffe/internal_thread.cpp | 2 +- src/caffe/layer_factory.cpp | 12 +- src/caffe/layers/base_conv_layer.cpp | 8 +- src/caffe/layers/batch_norm_layer.cpp | 6 +- src/caffe/layers/batch_reindex_layer.cu | 12 +- src/caffe/layers/bnll_layer.cu | 6 +- src/caffe/layers/concat_layer.cu | 6 +- src/caffe/layers/contrastive_loss_layer.cu | 3 +- src/caffe/layers/dropout_layer.cu | 6 +- src/caffe/layers/eltwise_layer.cu | 6 +- src/caffe/layers/embed_layer.cpp | 4 +- src/caffe/layers/embed_layer.cu | 6 +- src/caffe/layers/im2col_layer.cu | 6 +- src/caffe/layers/lrn_layer.cu | 6 +- src/caffe/layers/malis_loss_layer.cpp | 4 +- src/caffe/layers/mergecrop_layer.cu | 6 +- src/caffe/layers/pooling_layer.cu | 6 +- src/caffe/layers/prelu_layer.cu | 6 +- src/caffe/layers/relu_layer.cu | 6 +- src/caffe/layers/sigmoid_layer.cu | 6 +- src/caffe/layers/silence_layer.cu | 4 +- src/caffe/layers/slice_layer.cu | 6 +- src/caffe/layers/softmax_layer.cu | 6 +- src/caffe/layers/softmax_loss_layer.cu | 6 +- src/caffe/layers/tanh_layer.cu | 6 +- src/caffe/layers/threshold_layer.cu | 3 +- src/caffe/layers/tile_layer.cu | 6 +- src/caffe/net.cpp | 22 ++- src/caffe/parallel.cpp | 5 +- src/caffe/solver.cpp | 26 +-- src/caffe/solvers/adadelta_solver.cpp | 2 +- src/caffe/solvers/adam_solver.cpp | 2 +- src/caffe/solvers/sgd_solver.cpp | 6 +- src/caffe/syncedmem.cpp | 16 +- src/caffe/test/test_argmax_layer.cpp | 2 +- src/caffe/test/test_batch_reindex_layer.cpp | 2 +- src/caffe/test/test_common.cpp | 8 +- src/caffe/test/test_data_layer.cpp | 8 +- src/caffe/test/test_data_transformer.cpp | 2 +- src/caffe/test/test_eltwise_layer.cpp | 2 +- src/caffe/test/test_filter_layer.cpp | 2 +- src/caffe/test/test_flatten_layer.cpp | 2 +- src/caffe/test/test_gradient_based_solver.cpp | 6 +- src/caffe/test/test_hinge_loss_layer.cpp | 2 +- src/caffe/test/test_im2col_layer.cpp | 2 +- src/caffe/test/test_image_data_layer.cpp | 2 +- src/caffe/test/test_infogain_loss_layer.cpp | 2 +- src/caffe/test/test_internal_thread.cpp | 6 +- src/caffe/test/test_lrn_layer.cpp | 2 +- src/caffe/test/test_math_functions.cpp | 2 +- .../test/test_maxpool_dropout_layers.cpp | 2 +- .../test_multinomial_logistic_loss_layer.cpp | 2 +- src/caffe/test/test_net.cpp | 38 ++-- src/caffe/test/test_neuron_layer.cpp | 2 +- src/caffe/test/test_pooling_layer.cpp | 2 +- src/caffe/test/test_power_layer.cpp | 2 +- .../test/test_random_number_generator.cpp | 2 +- src/caffe/test/test_reduction_layer.cpp | 2 +- src/caffe/test/test_slice_layer.cpp | 2 +- src/caffe/test/test_spp_layer.cpp | 2 +- src/caffe/test/test_stochastic_pooling.cpp | 2 +- src/caffe/test/test_tanh_layer.cpp | 2 +- src/caffe/test/test_threshold_layer.cpp | 2 +- tools/caffe.cpp | 6 +- tools/extract_features.cpp | 3 +- 81 files changed, 445 insertions(+), 424 deletions(-) diff --git a/examples/cpp_classification/classification.cpp b/examples/cpp_classification/classification.cpp index a78267b5e76..19a6b885e65 100644 --- a/examples/cpp_classification/classification.cpp +++ b/examples/cpp_classification/classification.cpp @@ -56,7 +56,7 @@ Classifier::Classifier(const string& model_file, #endif /* Load the network. */ - net_.reset(new Net(model_file, TEST)); + net_.reset(new Net(model_file, TEST, Caffe::GetDefaultDevice())); net_->CopyTrainedLayersFrom(trained_file); CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input."; diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 4c7a238afa9..9a1ea5fad16 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -160,12 +160,14 @@ class Caffe { // it personally but better to note it here in the header file. inline static void set_mode(Brew mode) { Get().mode_ = mode; } // Sets the random seed of both boost and curand - static void set_random_seed(const size_t seed); + static void set_random_seed(const size_t seed, device* device_context); // Sets the device. Since we have cublas and curand stuff, set device also // requires us to reset those values. static void SetDevice(const int device_id); // Switch the current device static void SelectDevice(device* device_context); + static void SelectDevice(int id, bool listId); + // Prints the current GPU status. static void DeviceQuery(); // Parallel training info @@ -186,12 +188,7 @@ class Caffe { static void Synchronize(int device_id); // Get a device context - static device *GetDevice(int id); - - // Get a device OpenCL program -#ifdef USE_GREENTEA - viennacl::ocl::program & GetDeviceProgram(int id); -#endif + static device *GetDevice(int id, bool listId); protected: #ifndef CPU_ONLY diff --git a/include/caffe/data_reader.hpp b/include/caffe/data_reader.hpp index ecb9f81aaec..aa20c48a48a 100644 --- a/include/caffe/data_reader.hpp +++ b/include/caffe/data_reader.hpp @@ -48,7 +48,7 @@ class DataReader { // A single body is created per source class Body : public InternalThread { public: - explicit Body(const LayerParameter& param); + explicit Body(const LayerParameter& param, device* device_context); virtual ~Body(); protected: @@ -71,6 +71,7 @@ class DataReader { const shared_ptr queue_pair_; shared_ptr body_; + device* device_; static map > bodies_; diff --git a/include/caffe/device.hpp b/include/caffe/device.hpp index d48ad251ac5..2eae27b891a 100644 --- a/include/caffe/device.hpp +++ b/include/caffe/device.hpp @@ -9,7 +9,7 @@ #define CAFFE_device_HPP_ #ifdef CMAKE_BUILD - #include "caffe_config.h" +#include "caffe_config.h" #endif #include @@ -18,7 +18,6 @@ #include "caffe/blob.hpp" #include "caffe/greentea/greentea.hpp" - using std::vector; namespace caffe { @@ -39,7 +38,7 @@ class device { #endif // USE_GREENTEA template - shared_ptr< Blob > Buffer(int id); + shared_ptr > Buffer(int id); int num_queues(); void SwitchQueue(int id); @@ -62,8 +61,8 @@ class device { Backend backend_; uint_tp memory_usage_; uint_tp peak_memory_usage_; - std::vector< shared_ptr< Blob > > buff_f_; - std::vector< shared_ptr< Blob > > buff_d_; + std::vector > > buff_f_; + std::vector > > buff_d_; #ifdef USE_GREENTEA viennacl::ocl::program ocl_program_; #endif // USE_GREENTEA diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 101491e73e2..f1fb7cef177 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -44,13 +44,13 @@ class Layer { */ explicit Layer(const LayerParameter& param) : layer_param_(param), is_shared_(false) { - device_ = Caffe::GetDevice(layer_param_.device()); + device_ = Caffe::GetDevice(layer_param_.device(), true); // Set phase and copy blobs (if there are any). phase_ = param.phase(); if (layer_param_.blobs_size() > 0) { blobs_.resize(layer_param_.blobs_size()); for (int_tp i = 0; i < layer_param_.blobs_size(); ++i) { - blobs_[i].reset(new Blob()); + blobs_[i].reset(new Blob(device_)); blobs_[i]->FromProto(layer_param_.blobs(i)); } } diff --git a/include/caffe/layer_factory.hpp b/include/caffe/layer_factory.hpp index f385afccfee..70f42416f8a 100644 --- a/include/caffe/layer_factory.hpp +++ b/include/caffe/layer_factory.hpp @@ -44,6 +44,7 @@ #include #include "caffe/common.hpp" +#include "caffe/device.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" diff --git a/include/caffe/layers/base_conv_layer.hpp b/include/caffe/layers/base_conv_layer.hpp index 77003033913..af86acff632 100644 --- a/include/caffe/layers/base_conv_layer.hpp +++ b/include/caffe/layers/base_conv_layer.hpp @@ -12,7 +12,6 @@ #include "caffe/greentea/greentea_im2col.hpp" #endif - namespace caffe { /** @@ -67,7 +66,7 @@ class BaseConvolutionLayer : public Layer { void backward_gpu_bias(Dtype* bias, const Dtype* input, const int_tp input_off); - shared_ptr< Blob > col_buffer(); + shared_ptr > col_buffer(); #endif /// @brief The spatial dimensions of the input. @@ -115,28 +114,28 @@ class BaseConvolutionLayer : public Layer { // wrap im2col/col2im so we don't have to remember the (long) argument lists inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - im2col_cpu(data, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], col_buff); + im2col_cpu(data, conv_in_channels_, conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], col_buff); } else { im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(), - col_buffer_shape_.data(), kernel_shape_.cpu_data(), - pad_.cpu_data(), stride_.cpu_data(), col_buff); + col_buffer_shape_.data(), kernel_shape_.cpu_data(), + pad_.cpu_data(), stride_.cpu_data(), col_buff); } } inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_cpu(col_buff, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], data); + col2im_cpu(col_buff, conv_in_channels_, conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], data); } else { col2im_nd_cpu(col_buff, num_spatial_axes_, conv_input_shape_.cpu_data(), - col_buffer_shape_.data(), kernel_shape_.cpu_data(), - pad_.cpu_data(), stride_.cpu_data(), data); + col_buffer_shape_.data(), kernel_shape_.cpu_data(), + pad_.cpu_data(), stride_.cpu_data(), data); } } @@ -177,24 +176,25 @@ class BaseConvolutionLayer : public Layer { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { if (this->use_skernel_) { col2im_sk_gpu(col_buff, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - kstride_.cpu_data()[0], kstride_.cpu_data()[1], data); + conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], + pad_.cpu_data()[0], pad_.cpu_data()[1], + stride_.cpu_data()[0], stride_.cpu_data()[1], + kstride_.cpu_data()[0], kstride_.cpu_data()[1], data); } else { - col2im_gpu(col_buff, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], data); + col2im_gpu(col_buff, conv_in_channels_, conv_input_shape_.cpu_data()[1], + conv_input_shape_.cpu_data()[2], kernel_shape_.cpu_data()[0], + kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], + pad_.cpu_data()[1], stride_.cpu_data()[0], + stride_.cpu_data()[1], data); } } else { if (this->use_skernel_) { col2im_ndsk_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), kstride_.gpu_data(), data); + conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), + kernel_shape_.gpu_data(), pad_.gpu_data(), + stride_.gpu_data(), kstride_.gpu_data(), data); } else { col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), @@ -210,8 +210,7 @@ class BaseConvolutionLayer : public Layer { const int_tp col_buff_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); if (!force_nd_im2col_ && num_spatial_axes_ == 2) { if (this->use_skernel_) { @@ -251,9 +250,7 @@ class BaseConvolutionLayer : public Layer { (cl_mem) col_buff, col_buff_off); } else { greentea_im2col_nd_gpu(&program, &ctx, (cl_mem) data, data_off, - num_spatial_axes_, - 0, - num_kernels_im2col_, + num_spatial_axes_, 0, num_kernels_im2col_, (cl_mem) (conv_input_shape_.gpu_data()), (cl_mem) (col_buffer_.gpu_shape()), (cl_mem) (kernel_shape_.gpu_data()), @@ -269,8 +266,8 @@ class BaseConvolutionLayer : public Layer { const int_tp data_off) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); + if (!force_nd_im2col_ && num_spatial_axes_ == 2) { if (this->use_skernel_) { @@ -308,20 +305,17 @@ class BaseConvolutionLayer : public Layer { (cl_mem) (pad_.gpu_data()), (cl_mem) (stride_.gpu_data()), (cl_mem) (kstride_.gpu_data()), - (cl_mem) data, - data_off); + (cl_mem) data, data_off); } else { greentea_col2im_nd_gpu(&program, &ctx, (cl_mem) col_buff, - col_buff_off, num_spatial_axes_, - 0, + col_buff_off, num_spatial_axes_, 0, num_kernels_col2im_, (cl_mem) (conv_input_shape_.gpu_data()), (cl_mem) (col_buffer_.gpu_shape()), (cl_mem) (kernel_shape_.gpu_data()), (cl_mem) (pad_.gpu_data()), (cl_mem) (stride_.gpu_data()), - (cl_mem) data, - data_off); + (cl_mem) data, data_off); } } } diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 65927f5f848..8f8dc444fb5 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -25,10 +25,13 @@ namespace caffe { template class Net { public: - explicit Net(const NetParameter& param, const Net* root_net = NULL); - explicit Net(const string& param_file, Phase phase, - const Net* root_net = NULL); - virtual ~Net() {} + explicit Net(const NetParameter& param, device* device_context, + const Net* root_net = + NULL); + explicit Net(const string& param_file, Phase phase, device* device_context, + const Net* root_net = NULL); + virtual ~Net() { + } /// @brief Initialize a network with a NetParameter. void Init(const NetParameter& param); @@ -311,6 +314,8 @@ class Net { /// Whether to compute and display debug info for the net. bool debug_info_; + device* device_; + /// The root net that actually holds the shared layers in data parallelism const Net* const root_net_; DISABLE_COPY_AND_ASSIGN(Net); diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 2382bc43eaa..d3c51873136 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -114,6 +114,11 @@ class Solver { return ""; } + inline device *get_device() { + return device_; + } + + protected: // Make and apply the update value for the current iteration. virtual void ApplyUpdate() = 0; @@ -132,7 +137,7 @@ class Solver { int_tp current_step_; shared_ptr > net_; vector > > test_nets_; - device *device_; + device* device_; vector callbacks_; // The root solver that holds root nets (actually containing shared layers) diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 0df08442868..fc1a2085b54 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -12,9 +12,9 @@ namespace caffe { -void CaffeMallocHost(void** ptr, int_tp size); +void CaffeMallocHost(void** ptr, int_tp size, device* device_context); -void CaffeFreeHost(void* ptr); +void CaffeFreeHost(void* ptr, device* device_context); /** * @brief Manages memory allocation and synchronization between the host (CPU) @@ -25,16 +25,6 @@ void CaffeFreeHost(void* ptr); class SyncedMemory { public: #ifdef USE_GREENTEA - SyncedMemory() - : cpu_ptr_(NULL), - gpu_ptr_(NULL), - size_(0), - head_(UNINITIALIZED), - own_cpu_data_(false), - own_gpu_data_(false), - device_(Caffe::GetDefaultDevice()), - cl_gpu_mem_(NULL) { - } explicit SyncedMemory(device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), @@ -56,15 +46,6 @@ class SyncedMemory { cl_gpu_mem_(NULL) { } #else - SyncedMemory() - : cpu_ptr_(NULL), - gpu_ptr_(NULL), - size_(0), - head_(UNINITIALIZED), - own_cpu_data_(false), - own_gpu_data_(false), - device_(Caffe::GetDefaultDevice()) { - } explicit SyncedMemory(device *device_context) : cpu_ptr_(NULL), gpu_ptr_(NULL), diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 1e5b9a93700..8ec9de992f5 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -112,7 +112,7 @@ void GradientChecker::CheckGradientSingle( } CHECK_GT(blobs_to_check.size(), 0)<< "No blobs to check."; // Compute the gradient analytically using Backward - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); // Ignore the loss from the layer (it's just the weighted sum of the losses // from the top blobs, whose gradients we may want to test individually). layer->Forward(bottom, top); @@ -155,13 +155,13 @@ void GradientChecker::CheckGradientSingle( // Do finite differencing. // Compute loss with stepsize_ added to input. current_blob->mutable_cpu_data()[feat_id] += stepsize_; - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); layer->Forward(bottom, top); positive_objective = GetObjAndGradient(*layer, top, top_id, top_data_id); // Compute loss with stepsize_ subtracted from input. current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2; - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); layer->Forward(bottom, top); negative_objective = GetObjAndGradient(*layer, top, top_id, top_data_id); diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index 2cd235efa14..64eb2143489 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,5 +1,5 @@ from .pycaffe import SolverParameter, Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver -from ._caffe import set_mode_cpu, set_mode_gpu, set_device, enumerate_devices, Layer, get_solver, layer_type_list +from ._caffe import set_mode_cpu, set_mode_gpu, set_device, set_devices, select_device, enumerate_devices, Layer, get_solver, layer_type_list from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier from .detector import Detector diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 02281afe322..9de6453c76a 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -15,6 +16,7 @@ #include // NOLINT #include "caffe/caffe.hpp" +#include "caffe/definitions.hpp" #include "caffe/layers/memory_data_layer.hpp" #include "caffe/layers/python_layer.hpp" #include "caffe/sgd_solvers.hpp" @@ -37,6 +39,14 @@ const int NPY_DTYPE = NPY_FLOAT32; // Selecting mode. void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); } void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); } +void select_device(int id, bool listId) { Caffe::SelectDevice(id, listId); } +void set_devices(bp::tuple args) { + vector devices(bp::len(args)); + for (int i = 0; i < bp::len(args); ++i) { + devices[i] = bp::extract(args[i]); + } + Caffe::SetDevices(devices); +} // For convenience, check that input files can be opened, and raise an // exception that boost will send to Python if not (caffe could still crash @@ -82,7 +92,7 @@ shared_ptr > Net_Init( CheckFile(param_file); shared_ptr > net(new Net(param_file, - static_cast(phase))); + static_cast(phase), Caffe::GetDefaultDevice())); return net; } @@ -93,7 +103,7 @@ shared_ptr > Net_Init_Load( CheckFile(pretrained_param_file); shared_ptr > net(new Net(param_file, - static_cast(phase))); + static_cast(phase), Caffe::GetDefaultDevice())); net->CopyTrainedLayersFrom(pretrained_param_file); return net; } @@ -213,15 +223,56 @@ bp::object BlobVec_add_blob(bp::tuple args, bp::dict kwargs) { return bp::object(); } +void exception_translator(std::exception ex) { + std::cout << ex.what() << std::endl; +} + +// NOLINT_NEXT_LINE(runtime/references) +Dtype ForwardFromTo_NoGIL(Net& net, int_tp start, int_tp end) { + Dtype loss; + Py_BEGIN_ALLOW_THREADS + loss = net.ForwardFromTo(start, end); + Py_END_ALLOW_THREADS + return loss; +} + +// NOLINT_NEXT_LINE(runtime/references) +void BackwardFromTo_NoGIL(Net& net, int_tp start, int_tp end) { + Py_BEGIN_ALLOW_THREADS + net.BackwardFromTo(start, end); + Py_END_ALLOW_THREADS +} + +// NOLINT_NEXT_LINE(runtime/references) +Dtype Step_NoGIL(Solver& solver, int_tp iters) { + Dtype smoothed_loss; + Py_BEGIN_ALLOW_THREADS + smoothed_loss = solver.Step(iters); + Py_END_ALLOW_THREADS + return smoothed_loss; +} + +// NOLINT_NEXT_LINE(runtime/references) +void Solve_NoGIL(Solver& solver, const char* resume_file) { + Py_BEGIN_ALLOW_THREADS + solver.Solve(resume_file); + Py_END_ALLOW_THREADS +} + + BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(SolveOverloads, Solve, 0, 1); BOOST_PYTHON_MODULE(_caffe) { + bp::register_exception_translator(&exception_translator); + // below, we prepend an underscore to methods that will be replaced // in Python // Caffe utility functions bp::def("set_mode_cpu", &set_mode_cpu); bp::def("set_mode_gpu", &set_mode_gpu); bp::def("set_device", &Caffe::SetDevice); + bp::def("set_devices", &set_devices); + bp::def("select_device", &select_device); bp::def("enumerate_devices", &Caffe::EnumerateDevices); bp::def("layer_type_list", &LayerRegistry::LayerTypeList); @@ -230,8 +281,8 @@ BOOST_PYTHON_MODULE(_caffe) { bp::no_init) .def("__init__", bp::make_constructor(&Net_Init)) .def("__init__", bp::make_constructor(&Net_Init_Load)) - .def("_forward", &Net::ForwardFromTo) - .def("_backward", &Net::BackwardFromTo) + .def("_forward", &ForwardFromTo_NoGIL) + .def("_backward", &BackwardFromTo_NoGIL) .def("reshape", &Net::Reshape) // The cast is to select a particular overload. .def("copy_from", static_cast::*)(const string)>( @@ -296,11 +347,8 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("iter", &Solver::iter) .add_property("solver_params", &Solver::GetSolverParams, &Solver::UpdateSolverParams) - .def("step", - static_cast::*)(const int_tp)>(&Solver::Step)) - .def("solve", - static_cast::*)(const char*)>( - &Solver::Solve), SolveOverloads()) + .def("step", &Step_NoGIL) + .def("solve", &Solve_NoGIL) .def("restore", &Solver::Restore) .def("snapshot", &Solver::Snapshot); @@ -398,8 +446,10 @@ BOOST_PYTHON_MODULE(_caffe) { .def(bp::vector_indexing_suite > >, true>()); bp::class_ >("StringVec") .def(bp::vector_indexing_suite >()); - bp::class_ >("IntVec") + bp::class_ >("IntTpVec") .def(bp::vector_indexing_suite >()); + bp::class_ >("IntVec") + .def(bp::vector_indexing_suite >()); bp::class_ >("DtypeVec") .def(bp::vector_indexing_suite >()); bp::class_ > > >("NetVec") diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index aa4d0867c66..b1fe3e1550c 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -77,12 +77,22 @@ void GlobalInit(int* pargc, char*** pargv) { ::google::InstallFailureSignalHandler(); } -device *Caffe::GetDevice(int id) { - // The default device context is thread-local - // The list of device contexts is global - return - id == -1 ? - Get().default_device_ : Get().devices_[id].get(); + +device *Caffe::GetDevice(int id, bool listId) { + if (listId) { + return + id == -1 ? + Get().default_device_ : + Get().devices_[id % Get().devices_.size()].get(); + } else { + for (int i = 0; i < Get().devices_.size(); ++i) { + device* device = Get().devices_[i].get(); + if (device->id() == id) { + return device; + } + } + return GetDefaultDevice(); + } } device *Caffe::GetDefaultDevice() { @@ -94,28 +104,28 @@ device *Caffe::GetCPUDevice() { } // Copy constructor for thread-local copy -Caffe::Caffe(const Caffe &obj) { +Caffe::Caffe(const Caffe &obj) + : +#ifdef USE_CUDA + cublas_handle_(NULL), + curand_generator_(NULL), + curand_generator64_(NULL), +#endif // USE_CUDA + random_generator_(), + mode_(Caffe::CPU), + cpu_device_(new device(-1, -1, Backend::BACKEND_CPU)), + default_device_(cpu_device_.get()), + solver_count_(1), + root_solver_(true) { mode_ = obj.mode_; default_device_ = obj.default_device_; cpu_device_ = obj.cpu_device_; root_solver_ = obj.root_solver_; solver_count_ = obj.solver_count_; +} - // Try to create a cublas handler, and report an error if failed (but we will - // keep the program running as one might just want to run CPU code). -#ifdef USE_CUDA - if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { - LOG(ERROR)<< "Cannot create Cublas handle. Cublas won't be available."; - } - // Try to create a curand handler. - if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT) - != CURAND_STATUS_SUCCESS - || curandSetPseudoRandomGeneratorSeed(curand_generator_, - cluster_seedgen()) - != CURAND_STATUS_SUCCESS) { - LOG(ERROR)<< "Cannot create Curand generator. Curand won't be available."; - } -#endif // USE_CUDA +void Caffe::SelectDevice(int id, bool listId) { + Caffe::SelectDevice(GetDevice(id, listId)); } void Caffe::SelectDevice(device* device_context) { @@ -125,10 +135,42 @@ void Caffe::SelectDevice(device* device_context) { if (device_context->backend() == Backend::BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaSetDevice(device_context->id())); + + if (Get().cublas_handle_) { + CUBLAS_CHECK(cublasDestroy(Get().cublas_handle_)); + } + if (Get().curand_generator_) { + CURAND_CHECK(curandDestroyGenerator(Get().curand_generator_)); + } + if (Get().curand_generator64_) { + CURAND_CHECK(curandDestroyGenerator(Get().curand_generator64_)); + } + CUBLAS_CHECK(cublasCreate(&Get().cublas_handle_)); + + if (cublasCreate(&(Get().cublas_handle_)) != CUBLAS_STATUS_SUCCESS) { + LOG(ERROR)<< "Cannot create Cublas handle. Cublas won't be available."; + } + // Try to create a curand handler. + if (curandCreateGenerator(&(Get().curand_generator_), + CURAND_RNG_PSEUDO_DEFAULT) + != CURAND_STATUS_SUCCESS + || curandSetPseudoRandomGeneratorSeed((Get().curand_generator_), + cluster_seedgen()) + != CURAND_STATUS_SUCCESS) { + LOG(ERROR)<< "Cannot create Curand generator. Curand won't be available."; + } + if (curandCreateGenerator(&(Get().curand_generator64_), + CURAND_RNG_QUASI_SOBOL64) + != CURAND_STATUS_SUCCESS) { + LOG(ERROR)<< "Cannot create Curand generator. Curand won't be available."; + } + #endif // USE_CUDA } else if (device_context->backend() == Backend::BACKEND_OpenCL) { #ifdef USE_GREENTEA - +#ifdef USE_CLBLAS + clblasSetup(); +#endif // USE_CLBLAS #endif // USE_GREENTEA } #endif // !CPU_ONLY @@ -194,32 +236,13 @@ Caffe::Caffe() #ifdef USE_CUDA cublas_handle_(NULL), curand_generator_(NULL), + curand_generator64_(NULL), #endif // USE_CUDA random_generator_(), mode_(Caffe::CPU), cpu_device_(new device(-1, -1, Backend::BACKEND_CPU)), default_device_(cpu_device_.get()), solver_count_(1), root_solver_(true) { - // Try to create a cublas handler, and report an error if failed (but we will - // keep the program running as one might just want to run CPU code). -#ifdef USE_CUDA - if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { - LOG(ERROR)<< "Cannot create Cublas handle. Cublas won't be available."; - } - // Try to create a curand handler. - if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT) - != CURAND_STATUS_SUCCESS || - curandSetPseudoRandomGeneratorSeed(curand_generator_, cluster_seedgen()) - != CURAND_STATUS_SUCCESS) { - LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; - } - if (curandCreateGenerator(&curand_generator64_, CURAND_RNG_QUASI_SOBOL64) - != CURAND_STATUS_SUCCESS || - curandSetPseudoRandomGeneratorSeed(curand_generator64_, cluster_seedgen()) - != CURAND_STATUS_SUCCESS) { - LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; - } -#endif // USE_CUDA } Caffe::~Caffe() { @@ -236,11 +259,15 @@ Caffe::~Caffe() { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); curand_generator_ = nullptr; } + if (curand_generator64_) { + CURAND_CHECK(curandDestroyGenerator(curand_generator64_)); + curand_generator64_ = nullptr; + } #endif // USE_CUDA } -void Caffe::set_random_seed(const size_t seed) { - if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { +void Caffe::set_random_seed(const size_t seed, device* device_context) { + if (device_context->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // Curand seed static bool g_curand_availability_logged = false; @@ -255,6 +282,15 @@ void Caffe::set_random_seed(const size_t seed) { g_curand_availability_logged = true; } } + if (Get().curand_generator64_) { + CURAND_CHECK(curandSetGeneratorOffset(curand_generator64(), 0)); + } else { + if (!g_curand_availability_logged) { + LOG(ERROR)<< + "Curand not available. Skipping setting the curand seed."; + g_curand_availability_logged = true; + } + } #endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -267,7 +303,7 @@ void Caffe::set_random_seed(const size_t seed) { void Caffe::Synchronize(int device_id) { if (Caffe::mode() == Brew::GPU) { - device * device_context = Caffe::GetDevice(device_id); + device * device_context = Caffe::GetDevice(device_id, true); if (device_context->backend() == BACKEND_CUDA) { #ifdef USE_CUDA cudaDeviceSynchronize(); @@ -275,7 +311,7 @@ void Caffe::Synchronize(int device_id) { } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( - GetDevice(device_id)->id()); + GetDevice(device_id, true)->id()); ctx.get_queue().finish(); #endif } @@ -439,16 +475,10 @@ void Caffe::SetDevices(std::vector device_ids) { } } #endif // USE_GREENTEA -} -#ifdef USE_GREENTEA -viennacl::ocl::program & Caffe::GetDeviceProgram(int id) { - return - id == -1 ? - Get().default_device_->program() : - Get().GetDevice(id)->program(); + Get().default_device_ = GetDevice(0, true); + Caffe::SelectDevice(Get().default_device_); } -#endif // USE_GREENTEA void Caffe::SetDevice(const int device_id) { // Fix for compability to python and other interfaces that do not @@ -458,38 +488,7 @@ void Caffe::SetDevice(const int device_id) { Caffe::SetDevices(std::vector { device_id }); } - Get().default_device_ = GetDevice(device_id); - - if (Get().default_device_->backend() == Backend::BACKEND_CUDA) { -#ifdef USE_CUDA - int current_device; - CUDA_CHECK(cudaGetDevice(¤t_device)); - if (current_device == Get().default_device_->id()) { - return; - } -// The call to cudaSetDevice must come before any calls to Get, which -// may perform initialization using the GPU. - CUDA_CHECK(cudaSetDevice(Get().default_device_->id())); - if (Get().cublas_handle_) - CUBLAS_CHECK(cublasDestroy(Get().cublas_handle_)); - if (Get().curand_generator_) { - CURAND_CHECK(curandDestroyGenerator(Get().curand_generator_)); - } - CUBLAS_CHECK(cublasCreate(&Get().cublas_handle_)); - CURAND_CHECK( - curandCreateGenerator(&Get().curand_generator_, - CURAND_RNG_PSEUDO_DEFAULT)); - CURAND_CHECK( - curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, - cluster_seedgen())); -#endif // USE_CUDA - } else { -#ifdef USE_GREENTEA -#ifdef USE_CLBLAS - clblasSetup(); -#endif // USE_CLBLAS -#endif // USE_GREENTEA - } + Get().default_device_ = GetDevice(0, true); } // TODO: Fix this for the new backend diff --git a/src/caffe/data_reader.cpp b/src/caffe/data_reader.cpp index f2df3cfb111..069ac269c6b 100644 --- a/src/caffe/data_reader.cpp +++ b/src/caffe/data_reader.cpp @@ -17,14 +17,15 @@ static boost::mutex bodies_mutex_; DataReader::DataReader(const LayerParameter& param) : queue_pair_(new QueuePair( // - param.data_param().prefetch() * param.data_param().batch_size())) { + param.data_param().prefetch() * param.data_param().batch_size())), + device_(Caffe::GetDevice(param.device(), true)) { // Get or create a body boost::mutex::scoped_lock lock(bodies_mutex_); string key = source_key(param); weak_ptr& weak = bodies_[key]; body_ = weak.lock(); if (!body_) { - body_.reset(new Body(param)); + body_.reset(new Body(param, device_)); bodies_[key] = weak_ptr(body_); } body_->new_queue_pairs_.push(queue_pair_); @@ -60,10 +61,10 @@ DataReader::QueuePair::~QueuePair() { // -DataReader::Body::Body(const LayerParameter& param) +DataReader::Body::Body(const LayerParameter& param, device* device_context) : param_(param), new_queue_pairs_() { - StartInternalThread(Caffe::Get().GetDefaultDevice()); + StartInternalThread(device_context); } DataReader::Body::~Body() { diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 8ae61a44818..9446cf31c6c 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -6,6 +6,7 @@ */ #include "caffe/common.hpp" +#include "caffe/device.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" @@ -62,7 +63,8 @@ namespace caffe { void greentea_memset(const int_tp ctx_id, const uint_tp N, const int_tp alpha, cl_mem X, const int_tp offX) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); // OpenCL Version >= 1.2 approach // clEnqueueFillBuffer(ctx.get_queue().handle().get(), @@ -207,10 +209,10 @@ void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, #ifndef USE_CLBLAS - typedef typename viennacl::matrix_base - ::size_type size_type; - typedef typename viennacl::matrix_base - ::size_type difference_type; + typedef typename viennacl::matrix_base::size_type size_type; + typedef typename viennacl::matrix_base::size_type difference_type; size_type A_size1 = static_cast((TransA == CblasTrans) ? K : M); size_type A_size2 = static_cast((TransA == CblasTrans) ? M : K); @@ -335,10 +337,10 @@ void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base - ::size_type size_type; - typedef typename viennacl::vector_base - ::size_type difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; viennacl::vector_base v1( x, size_type((TransA == CblasTrans) ? M : N), size_type(offx), @@ -420,10 +422,10 @@ void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base - ::size_type size_type; - typedef typename viennacl::vector_base - ::size_type difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; viennacl::vector_base v1(X, size_type(N), size_type(offX), @@ -464,7 +466,8 @@ void greentea_gpu_mul(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_mul = program.get_kernel(CL_KERNEL_SELECT("mul")); viennacl::ocl::enqueue( @@ -487,7 +490,8 @@ void greentea_gpu_div(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_div = program.get_kernel(CL_KERNEL_SELECT("div")); viennacl::ocl::enqueue( @@ -522,10 +526,10 @@ void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base - ::size_type size_type; - typedef typename viennacl::vector_base - ::size_type difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; viennacl::vector_base v1(x, size_type(N), size_type(offx), @@ -595,10 +599,10 @@ void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, const cl_mem X, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base - ::size_type size_type; - typedef typename viennacl::vector_base - ::size_type difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; viennacl::vector_base v1(X, size_type(n), size_type(offX), @@ -663,10 +667,10 @@ void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, const cl_mem X, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base - ::size_type size_type; - typedef typename viennacl::vector_base - ::size_type difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; viennacl::vector_base v1(X, size_type(n), size_type(offX), @@ -731,10 +735,10 @@ void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const Dtype alpha, } else { #ifndef USE_CLBLAS - typedef typename viennacl::vector_base - ::size_type size_type; - typedef typename viennacl::vector_base - ::size_type difference_type; + typedef typename viennacl::vector_base::size_type size_type; + typedef typename viennacl::vector_base::size_type difference_type; viennacl::vector_base v1(X, size_type(n), size_type(offX), @@ -779,7 +783,8 @@ template void greentea_gpu_set(const int_tp ctx_id, const int_tp N, const Dtype alpha, cl_mem Y, const int_tp offY) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); // OpenCL Version >= 1.2 approach // clEnqueueFillBuffer(ctx.get_queue().handle().get(), // Y, &alpha, sizeof(Dtype), @@ -806,7 +811,8 @@ template void greentea_gpu_add_scalar(const int_tp ctx_id, const int_tp N, const Dtype alpha, cl_mem Y, const int_tp offY) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_add_scalar = program.get_kernel( CL_KERNEL_SELECT("add_scalar")); @@ -827,7 +833,8 @@ void greentea_gpu_add(const int_tp ctx_id, const int_tp n, const cl_mem a, const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_add = program.get_kernel(CL_KERNEL_SELECT("add")); viennacl::ocl::enqueue( @@ -850,7 +857,8 @@ void greentea_gpu_sub(const int_tp ctx_id, const int_tp n, const cl_mem a, const int_tp offa, const cl_mem b, const int_tp offb, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_sub = program.get_kernel(CL_KERNEL_SELECT("sub")); viennacl::ocl::enqueue( @@ -872,7 +880,8 @@ template void greentea_gpu_abs(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_abs = program.get_kernel(CL_KERNEL_SELECT("abs")); viennacl::ocl::enqueue( @@ -891,7 +900,8 @@ template void greentea_gpu_exp(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_exp = program.get_kernel(CL_KERNEL_SELECT("exp")); viennacl::ocl::enqueue( @@ -911,7 +921,8 @@ void greentea_gpu_powx(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, const Dtype alpha, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_powx = program.get_kernel( CL_KERNEL_SELECT("powx")); @@ -933,7 +944,8 @@ template void greentea_gpu_log(const int_tp ctx_id, const int_tp N, const cl_mem a, const int_tp offa, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_log = program.get_kernel(CL_KERNEL_SELECT("log")); viennacl::ocl::enqueue( @@ -953,7 +965,8 @@ void greentea_gpu_sign(const int_tp ctx_id, const int_tp n, const cl_mem x, int_tp offx, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_sign = program.get_kernel( CL_KERNEL_SELECT("sign")); @@ -974,7 +987,8 @@ void greentea_gpu_sgnbit(const int_tp ctx_id, const int_tp n, const cl_mem x, int_tp offx, cl_mem y, const int_tp offy) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(ctx_id); + viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) + ->program(); viennacl::ocl::kernel &oclk_sgnbit = program.get_kernel( CL_KERNEL_SELECT("sgnbit")); diff --git a/src/caffe/internal_thread.cpp b/src/caffe/internal_thread.cpp index 71ce47f6e09..c705e5ea666 100644 --- a/src/caffe/internal_thread.cpp +++ b/src/caffe/internal_thread.cpp @@ -42,7 +42,7 @@ void InternalThread::entry(device* device_context, Caffe::Brew mode, int_tp solver_count, bool root_solver) { Caffe::SelectDevice(device_context); Caffe::set_mode(mode); - Caffe::set_random_seed(rand_seed); + Caffe::set_random_seed(rand_seed, thread_device_); Caffe::set_solver_count(solver_count); Caffe::set_root_solver(root_solver); diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 5a8537319a0..c6004af5e5b 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -44,7 +44,7 @@ shared_ptr > GetConvolutionLayer(const LayerParameter& param) { #endif } if (engine == ConvolutionParameter_Engine_CAFFE - || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { return shared_ptr >(new ConvolutionLayer(param)); #ifdef USE_CUDNN } else if (engine == ConvolutionParameter_Engine_CUDNN) { @@ -68,7 +68,7 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { #endif } if (engine == PoolingParameter_Engine_CAFFE - || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { return shared_ptr >(new PoolingLayer(param)); #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { @@ -134,7 +134,7 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { #endif } if (engine == ReLUParameter_Engine_CAFFE - || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { return shared_ptr >(new ReLULayer(param)); #ifdef USE_CUDNN } else if (engine == ReLUParameter_Engine_CUDNN) { @@ -158,7 +158,7 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { #endif } if (engine == SigmoidParameter_Engine_CAFFE - || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { return shared_ptr >(new SigmoidLayer(param)); #ifdef USE_CUDNN } else if (engine == SigmoidParameter_Engine_CUDNN) { @@ -182,7 +182,7 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { #endif } if (engine == SoftmaxParameter_Engine_CAFFE - || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { return shared_ptr >(new SoftmaxLayer(param)); #ifdef USE_CUDNN } else if (engine == SoftmaxParameter_Engine_CUDNN) { @@ -206,7 +206,7 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { #endif } if (engine == TanHParameter_Engine_CAFFE - || Caffe::GetDefaultDevice()->backend() == BACKEND_OpenCL) { + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { return shared_ptr >(new TanHLayer(param)); #ifdef USE_CUDNN } else if (engine == TanHParameter_Engine_CUDNN) { diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index f9cf938c0be..b7eef86df66 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -172,13 +172,13 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, CHECK_EQ(1 + bias_term_, this->blobs_.size()) << "Incorrect number of weight blobs."; if (weight_shape != this->blobs_[0]->shape()) { - Blob weight_shaped_blob(weight_shape); + Blob weight_shaped_blob(weight_shape, this->device_); LOG(FATAL) << "Incorrect weight shape: expected shape " << weight_shaped_blob.shape_string() << "; instead, shape was " << this->blobs_[0]->shape_string(); } if (bias_term_ && bias_shape != this->blobs_[1]->shape()) { - Blob bias_shaped_blob(bias_shape); + Blob bias_shaped_blob(bias_shape, this->device_); LOG(FATAL) << "Incorrect bias shape: expected shape " << bias_shaped_blob.shape_string() << "; instead, shape was " << this->blobs_[1]->shape_string(); @@ -192,13 +192,13 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, } // Initialize and fill the weights: // output channels x input channels per-group x kernel height x kernel width - this->blobs_[0].reset(new Blob(weight_shape)); + this->blobs_[0].reset(new Blob(weight_shape, this->device_)); shared_ptr > weight_filler(GetFiller( this->layer_param_.convolution_param().weight_filler())); weight_filler->Fill(this->blobs_[0].get()); // If necessary, initialize and fill the biases. if (bias_term_) { - this->blobs_[1].reset(new Blob(bias_shape)); + this->blobs_[1].reset(new Blob(bias_shape, this->device_)); shared_ptr > bias_filler(GetFiller( this->layer_param_.convolution_param().bias_filler())); bias_filler->Fill(this->blobs_[1].get()); diff --git a/src/caffe/layers/batch_norm_layer.cpp b/src/caffe/layers/batch_norm_layer.cpp index a2770eded7a..9bba62080fd 100644 --- a/src/caffe/layers/batch_norm_layer.cpp +++ b/src/caffe/layers/batch_norm_layer.cpp @@ -25,10 +25,10 @@ void BatchNormLayer::LayerSetUp(const vector*>& bottom, this->blobs_.resize(3); vector sz; sz.push_back(channels_); - this->blobs_[0].reset(new Blob(sz)); - this->blobs_[1].reset(new Blob(sz)); + this->blobs_[0].reset(new Blob(sz, this->device_)); + this->blobs_[1].reset(new Blob(sz, this->device_)); sz[0]=1; - this->blobs_[2].reset(new Blob(sz)); + this->blobs_[2].reset(new Blob(sz, this->device_)); for (int_tp i = 0; i < 3; ++i) { caffe_set(this->blobs_[i]->count(), Dtype(0), this->blobs_[i]->mutable_cpu_data()); diff --git a/src/caffe/layers/batch_reindex_layer.cu b/src/caffe/layers/batch_reindex_layer.cu index a6e9488b09d..d4cffe03ab1 100644 --- a/src/caffe/layers/batch_reindex_layer.cu +++ b/src/caffe/layers/batch_reindex_layer.cu @@ -43,8 +43,7 @@ void BatchReindexLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_br = program.get_kernel( CL_KERNEL_SELECT("br_forward")); @@ -102,10 +101,10 @@ void BatchReindexLayer::Backward_gpu( // length of that list. vector shape; shape.push_back(bottom[1]->count()); - Blob top_indexes(shape); + Blob top_indexes(shape, this->device_); shape[0] = bottom[0]->shape(0); - Blob counts(shape); - Blob begins(shape); + Blob counts(shape, this->device_); + Blob begins(shape, this->device_); Dtype* t_i_data = top_indexes.mutable_cpu_data(); Dtype* c_data = counts.mutable_cpu_data(); Dtype* b_data = begins.mutable_cpu_data(); @@ -135,8 +134,7 @@ void BatchReindexLayer::Backward_gpu( #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_br = program.get_kernel( CL_KERNEL_SELECT("br_backward")); diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index 098160d52a6..c121497f7b0 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -40,8 +40,7 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_bnll = program.get_kernel( CL_KERNEL_SELECT("bnll_forward")); @@ -86,8 +85,7 @@ void BNLLLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_bnll = program.get_kernel( CL_KERNEL_SELECT("bnll_backward")); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 4c9e7ec4fa6..a258c795ba4 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -59,8 +59,7 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_concat = program.get_kernel( CL_KERNEL_SELECT("concat")); @@ -106,8 +105,7 @@ void ConcatLayer::Backward_gpu(const vector*>& top, viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_concat = program.get_kernel( CL_KERNEL_SELECT("concat")); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index b90c113671c..c1f633a02ae 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -133,8 +133,7 @@ void ContrastiveLossLayer::Backward_gpu( #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_cll = program.get_kernel( CL_KERNEL_SELECT("cll_backward")); diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index 3ad67db8a9d..aba3c790826 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -45,8 +45,7 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); if (this->phase_ == TRAIN) { cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data()); greentea_gpu_rng_uniform(this->device_->id(), count, mask, 0); @@ -105,8 +104,7 @@ void DropoutLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); if (this->phase_ == TRAIN) { cl_mem mask = (cl_mem) (rand_vec_.gpu_data()); diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 1e0c41dc8fa..2e422a2c49c 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -84,8 +84,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { @@ -219,8 +218,7 @@ void EltwiseLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { diff --git a/src/caffe/layers/embed_layer.cpp b/src/caffe/layers/embed_layer.cpp index 221b91708e6..4329a0cb015 100644 --- a/src/caffe/layers/embed_layer.cpp +++ b/src/caffe/layers/embed_layer.cpp @@ -28,7 +28,7 @@ void EmbedLayer::LayerSetUp(const vector*>& bottom, vector weight_shape(2); weight_shape[0] = K_; weight_shape[1] = N_; - this->blobs_[0].reset(new Blob(weight_shape)); + this->blobs_[0].reset(new Blob(weight_shape, this->device_)); // fill the weights shared_ptr > weight_filler(GetFiller( this->layer_param_.embed_param().weight_filler())); @@ -36,7 +36,7 @@ void EmbedLayer::LayerSetUp(const vector*>& bottom, // If necessary, initialize and fill the bias term if (bias_term_) { vector bias_shape(1, N_); - this->blobs_[1].reset(new Blob(bias_shape)); + this->blobs_[1].reset(new Blob(bias_shape, this->device_)); shared_ptr > bias_filler(GetFiller( this->layer_param_.embed_param().bias_filler())); bias_filler->Fill(this->blobs_[1].get()); diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu index 39fe6bffe99..7d479a0ec67 100644 --- a/src/caffe/layers/embed_layer.cu +++ b/src/caffe/layers/embed_layer.cu @@ -66,8 +66,7 @@ void EmbedLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_embed = program.get_kernel( CL_KERNEL_SELECT("embed_forward")); @@ -108,8 +107,7 @@ void EmbedLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_embed = program.get_kernel( CL_KERNEL_SELECT("embed_backward")); diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 132e5024046..6e9a8268ce1 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -42,8 +42,7 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); for (int_tp n = 0; n < num_; ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { @@ -104,8 +103,7 @@ void Im2colLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); for (int_tp n = 0; n < top[0]->num(); ++n) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index b68b20ebf1e..9d861434629 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -113,8 +113,7 @@ void LRNLayer::CrossChannelForward_gpu( viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); int_tp n_threads = num_ * height_ * width_; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( @@ -240,8 +239,7 @@ void LRNLayer::CrossChannelBackward_gpu( #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_lrn = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_diff")); diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 96e14f8c7a9..4a9e514d210 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -279,8 +279,8 @@ void MalisLossLayer::LayerSetUp(const vector*>& bottom, // Expected inputs: // Required (bottom 0 to 2): // Bottom 0: Predicted affinity, shaped (batch size, #edges, (Z), (Y), X) - // Bottom 1: Segmented ground truth, shaped (batch size, 1, (Z), (Y), X) - // Bottom 2: Ground truth affinity, shaped (batch size, #edges, (Z), (Y), X) + // Bottom 1: Ground truth affinity, shaped (batch size, #edges, (Z), (Y), X) + // Bottom 2: Segmented ground truth, shaped (batch size, 1, (Z), (Y), X) // Optional (bottom 3): // Bottom 3: Edge connectivity, size #edges * 3, shaped (Z,Y,X);(Z,Y,X);... diff --git a/src/caffe/layers/mergecrop_layer.cu b/src/caffe/layers/mergecrop_layer.cu index 109580704ac..0f1e349733e 100644 --- a/src/caffe/layers/mergecrop_layer.cu +++ b/src/caffe/layers/mergecrop_layer.cu @@ -141,8 +141,7 @@ void MergeCropLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_copy_forward = program.get_kernel( CL_KERNEL_SELECT("merge_copy_forward")); @@ -194,8 +193,7 @@ void MergeCropLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_copy_backward = program.get_kernel( CL_KERNEL_SELECT("merge_copy_backward")); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index bad575dce86..9de8f362b6b 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -883,8 +883,7 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); if (num_spatial_axes_ == 2) { int_tp kernel_h_ = kernel_shape_.cpu_data()[0]; @@ -1216,8 +1215,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); greentea_gpu_set(this->device_->id(), count, Dtype(0.), (cl_mem) bottom_diff, 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index 1cd3d6a0bb9..2ddec5574bd 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -81,8 +81,7 @@ void PReLULayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); if (top[0] == bottom[0]) { greentea_copy(count, (cl_mem) bottom_data, 0, @@ -163,8 +162,7 @@ void PReLULayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index 7ba151a24b6..4afd35ed6a3 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -39,8 +39,7 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( @@ -91,8 +90,7 @@ void ReLULayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( CL_KERNEL_SELECT("relu_backward")); viennacl::ocl::enqueue( diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index a474979df47..2d54e4f71e4 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -33,8 +33,7 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( CL_KERNEL_SELECT("sigmoid_forward")); @@ -85,8 +84,7 @@ void SigmoidLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_sigmoid = program.get_kernel( CL_KERNEL_SELECT("sigmoid_backward")); diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index 93c23750b07..c7b5b3e261d 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -31,8 +31,8 @@ void SilenceLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); + viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel( CL_KERNEL_SELECT("gpu_set")); viennacl::ocl::enqueue( diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index b1b8dbf9433..fe4a334ce02 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -53,8 +53,7 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_slice = program.get_kernel( CL_KERNEL_SELECT("slice")); @@ -96,8 +95,7 @@ void SliceLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_slice = program.get_kernel( CL_KERNEL_SELECT("slice")); diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index f22bed2fcf0..4d701c8fe07 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -144,8 +144,7 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); greentea_copy(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0, &ctx); @@ -227,8 +226,7 @@ void SoftmaxLayer::Backward_gpu(const vector*>& top, viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); greentea_copy(top[0]->count(), (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, &ctx); diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 4dfbfd0901f..9dee37ae768 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -81,8 +81,7 @@ void SoftmaxWithLossLayer::Forward_gpu( #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); @@ -192,8 +191,7 @@ void SoftmaxWithLossLayer::Backward_gpu( #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); cl_mem prob_data = (cl_mem)(prob_.gpu_data()); diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index a566d5d3a09..eeebf81745c 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -35,8 +35,7 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_tanh = program.get_kernel( CL_KERNEL_SELECT("tanh_forward")); @@ -81,8 +80,7 @@ void TanHLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_tanh = program.get_kernel( CL_KERNEL_SELECT("tanh_backward")); diff --git a/src/caffe/layers/threshold_layer.cu b/src/caffe/layers/threshold_layer.cu index 564a33a1be9..b3486f4c318 100644 --- a/src/caffe/layers/threshold_layer.cu +++ b/src/caffe/layers/threshold_layer.cu @@ -38,8 +38,7 @@ void ThresholdLayer::Forward_gpu(const vector*>& bottom, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_threshold = program.get_kernel( CL_KERNEL_SELECT("threshold")); diff --git a/src/caffe/layers/tile_layer.cu b/src/caffe/layers/tile_layer.cu index 9545571cb85..15d0a114135 100644 --- a/src/caffe/layers/tile_layer.cu +++ b/src/caffe/layers/tile_layer.cu @@ -43,8 +43,7 @@ void TileLayer::Forward_gpu( #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_tile = program.get_kernel( CL_KERNEL_SELECT("tile")); @@ -97,8 +96,7 @@ void TileLayer::Backward_gpu(const vector*>& top, #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); - viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( - this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_tile = program.get_kernel( CL_KERNEL_SELECT("tile_backward")); diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index bafe45c402d..1688fbaf38f 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -23,14 +23,16 @@ namespace caffe { template -Net::Net(const NetParameter& param, const Net* root_net) - : root_net_(root_net) { +Net::Net(const NetParameter& param, device* device_context, + const Net* root_net) + : device_(device_context), root_net_(root_net) { Init(param); } template -Net::Net(const string& param_file, Phase phase, const Net* root_net) - : root_net_(root_net) { +Net::Net(const string& param_file, Phase phase, device* device_context, + const Net* root_net) + : device_(device_context), root_net_(root_net) { NetParameter param; ReadNetParamsFromTextFileOrDie(param_file, ¶m); param.mutable_state()->set_phase(phase); @@ -92,7 +94,13 @@ void Net::Init(const NetParameter& in_param) { param.mutable_layer(layer_id)->set_phase(phase_); } // Setup layer. - const LayerParameter& layer_param = param.layer(layer_id); + const LayerParameter& c_layer_param = param.layer(layer_id); + + LayerParameter layer_param = c_layer_param; + + // Set device + layer_param.set_device(Caffe::GetDefaultDevice()->list_id()); + if (layer_param.propagate_down_size() > 0) { CHECK_EQ(layer_param.propagate_down_size(), layer_param.bottom_size())<< "propagate_down param must be specified " @@ -1047,14 +1055,14 @@ void Net::ClearParamDiffs() { break; case Caffe::GPU: #ifndef CPU_ONLY - if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + if (device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(blob->count(), static_cast(0), blob->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA - greentea_gpu_set(Caffe::GetDefaultDevice()->id(), + greentea_gpu_set(device_->id(), blob->count(), static_cast(0), (cl_mem)(blob->mutable_gpu_diff()), 0); #endif // USE_GREENTEA diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index af92219160e..f3c9e064b49 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -300,7 +300,8 @@ void P2PSync::InternalThreadEntry() { // everyone doesn't have the same seed. We seem to have some // solver instability if we have everyone with the same seed Caffe::set_random_seed( - solver_->param().random_seed() + solver_->param().device_id()); + solver_->param().random_seed() + solver_->param().device_id(), + solver_->get_device()); } solver_->Step(solver_->param().max_iter() - initial_iter_); } @@ -448,7 +449,7 @@ void P2PSync::run(const vector& gpus) { LOG(INFO)<< "Starting Optimization"; for (int i = 1; i < syncs.size(); ++i) { - syncs[i]->StartInternalThread(Caffe::GetDefaultDevice()); + syncs[i]->StartInternalThread(solver_->get_device()); } // Run root solver on current thread diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 4e918db4c95..cd8d19b4a22 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -32,17 +32,19 @@ SolverAction::Enum Solver::GetRequestedAction() { return SolverAction::NONE; } -template +template Solver::Solver(const SolverParameter& param, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { + : net_(), + device_(Caffe::GetDefaultDevice()), callbacks_(), + root_solver_(root_solver), requested_early_exit_(false) { Init(param); } -template +template Solver::Solver(const string& param_file, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { + : net_(), + device_(Caffe::GetDefaultDevice()), callbacks_(), + root_solver_(root_solver), requested_early_exit_(false) { SolverParameter param; ReadSolverParamsFromTextFileOrDie(param_file, ¶m); Init(param); @@ -50,7 +52,6 @@ Solver::Solver(const string& param_file, const Solver* root_solver) template void Solver::Init(const SolverParameter& param) { - device_ = Caffe::GetDefaultDevice(); CHECK(Caffe::root_solver() || root_solver_) << "root_solver_ needs to be set for all non-root solvers"; LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " @@ -59,7 +60,7 @@ void Solver::Init(const SolverParameter& param) { CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; CheckSnapshotWritePermissions(); if (Caffe::root_solver() && param_.random_seed() >= 0) { - Caffe::set_random_seed(param_.random_seed()); + Caffe::set_random_seed(param_.random_seed(), device_); } // Scaffolding code InitTrainNet(); @@ -121,9 +122,10 @@ void Solver::InitTrainNet() { net_state.MergeFrom(param_.train_state()); net_param.mutable_state()->CopyFrom(net_state); if (Caffe::root_solver()) { - net_.reset(new Net(net_param)); + net_.reset(new Net(net_param, this->device_)); } else { - net_.reset(new Net(net_param, root_solver_->net_.get())); + net_.reset( + new Net(net_param, this->device_, root_solver_->net_.get())); } } @@ -202,9 +204,9 @@ void Solver::InitTestNets() { LOG(INFO) << "Creating test net (#" << i << ") specified by " << sources[i]; if (Caffe::root_solver()) { - test_nets_[i].reset(new Net(net_params[i])); + test_nets_[i].reset(new Net(net_params[i], this->device_)); } else { - test_nets_[i].reset(new Net(net_params[i], + test_nets_[i].reset(new Net(net_params[i], this->device_, root_solver_->test_nets_[i].get())); } test_nets_[i]->set_debug_info(param_.debug_info()); diff --git a/src/caffe/solvers/adadelta_solver.cpp b/src/caffe/solvers/adadelta_solver.cpp index 209f99f54e4..c9bec411a9d 100644 --- a/src/caffe/solvers/adadelta_solver.cpp +++ b/src/caffe/solvers/adadelta_solver.cpp @@ -12,7 +12,7 @@ void AdaDeltaSolver::AdaDeltaPreSolve() { for (uint_tp i = 0; i < net_params.size(); ++i) { const vector& shape = net_params[i]->shape(); this->history_.push_back( - shared_ptr >(new Blob(shape))); + shared_ptr >(new Blob(shape, this->device_))); } } diff --git a/src/caffe/solvers/adam_solver.cpp b/src/caffe/solvers/adam_solver.cpp index a0fc4d0fb21..1857437a329 100644 --- a/src/caffe/solvers/adam_solver.cpp +++ b/src/caffe/solvers/adam_solver.cpp @@ -12,7 +12,7 @@ void AdamSolver::AdamPreSolve() { for (uint_tp i = 0; i < net_params.size(); ++i) { const vector& shape = net_params[i]->shape(); this->history_.push_back( - shared_ptr >(new Blob(shape))); + shared_ptr >(new Blob(shape, this->device_))); } } diff --git a/src/caffe/solvers/sgd_solver.cpp b/src/caffe/solvers/sgd_solver.cpp index 84058ae17c3..9150f97719c 100644 --- a/src/caffe/solvers/sgd_solver.cpp +++ b/src/caffe/solvers/sgd_solver.cpp @@ -73,13 +73,13 @@ void SGDSolver::PreSolve() { const vector& shape = net_params[i]->shape(); history_.push_back( shared_ptr>( - new Blob(shape, Caffe::GetDefaultDevice()))); + new Blob(shape, this->device_))); update_.push_back( shared_ptr>( - new Blob(shape, Caffe::GetDefaultDevice()))); + new Blob(shape, this->device_))); temp_.push_back( shared_ptr>( - new Blob(shape, Caffe::GetDefaultDevice()))); + new Blob(shape, this->device_))); } } diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index a85f86b9ad9..d24a99b2a4c 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -18,10 +18,10 @@ namespace caffe { // but might be more significant for parallel training. Most importantly, // it improved stability for large models on many GPUs. -void CaffeMallocHost(void** ptr, int_tp size) { +void CaffeMallocHost(void** ptr, int_tp size, device* device_context) { #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { - if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + if (device_context->backend() == BACKEND_CUDA) { #ifdef USE_CUDA CUDA_CHECK(cudaMallocHost(ptr, size)); return; @@ -40,10 +40,10 @@ void CaffeMallocHost(void** ptr, int_tp size) { CHECK(*ptr) << "host allocation of size " << size << " failed"; } -void CaffeFreeHost(void* ptr) { +void CaffeFreeHost(void* ptr, device* device_context) { #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { - if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + if (device_context->backend() == BACKEND_CUDA) { #ifdef USE_CUDA cudaFreeHost(ptr); return; @@ -90,7 +90,7 @@ SyncedMemory::~SyncedMemory() { #endif // !CPU_ONLY // Free host memory if (cpu_ptr_ && own_cpu_data_) { - CaffeFreeHost(cpu_ptr_); + CaffeFreeHost(cpu_ptr_, device_); cpu_ptr_ = nullptr; } } @@ -98,7 +98,7 @@ SyncedMemory::~SyncedMemory() { inline void SyncedMemory::to_cpu() { switch (head_) { case UNINITIALIZED: { - CaffeMallocHost(&cpu_ptr_, size_); + CaffeMallocHost(&cpu_ptr_, size_, device_); caffe_memset(size_, 0, cpu_ptr_); head_ = HEAD_AT_CPU; own_cpu_data_ = true; @@ -107,7 +107,7 @@ inline void SyncedMemory::to_cpu() { case HEAD_AT_GPU: { #ifndef CPU_ONLY if (cpu_ptr_ == nullptr) { - CaffeMallocHost(&cpu_ptr_, size_); + CaffeMallocHost(&cpu_ptr_, size_, device_); own_cpu_data_ = true; } if (device_->backend() == Backend::BACKEND_CUDA) { @@ -228,7 +228,7 @@ const void* SyncedMemory::cpu_data() { void SyncedMemory::set_cpu_data(void* data) { CHECK(data); if (cpu_ptr_ && own_cpu_data_) { - CaffeFreeHost(cpu_ptr_); + CaffeFreeHost(cpu_ptr_, device_); } cpu_ptr_ = data; head_ = HEAD_AT_CPU; diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 974e46fbd23..4153c1eb68f 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -19,7 +19,7 @@ class ArgMaxLayerTest : public CPUDeviceTest { : blob_bottom_(new Blob(10, 10, 20, 20)), blob_top_(new Blob()), top_k_(5) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_batch_reindex_layer.cpp b/src/caffe/test/test_batch_reindex_layer.cpp index 42d7c744ba0..fd6b3d4355a 100644 --- a/src/caffe/test/test_batch_reindex_layer.cpp +++ b/src/caffe/test/test_batch_reindex_layer.cpp @@ -23,7 +23,7 @@ class BatchReindexLayerTest : public MultiDeviceTest { blob_top_(new Blob()) { } virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); vector sz; sz.push_back(5); sz.push_back(4); diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index 2e7bffd10fd..5663a9b1845 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -34,10 +34,10 @@ TEST_F(CommonTest, TestBrewMode) { TEST_F(CommonTest, TestRandSeedCPU) { SyncedMemory data_a(10 * sizeof(int), Caffe::GetDefaultDevice()); SyncedMemory data_b(10 * sizeof(int), Caffe::GetDefaultDevice()); - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); caffe_rng_bernoulli(10, 0.5, static_cast(data_b.mutable_cpu_data())); for (int i = 0; i < 10; ++i) { @@ -57,10 +57,10 @@ TEST_F(CommonTest, TestRandSeedGPU) { Caffe::GetDefaultDevice()); SyncedMemory data_b(10 * sizeof(unsigned int), Caffe::GetDefaultDevice()); - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); CURAND_CHECK(curandGenerate(Caffe::curand_generator(), static_cast(data_a.mutable_gpu_data()), 10)); - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); CURAND_CHECK(curandGenerate(Caffe::curand_generator(), static_cast(data_b.mutable_gpu_data()), 10)); for (int i = 0; i < 10; ++i) { diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 6fa7dc672fc..c577a0540c9 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -175,7 +175,7 @@ class DataLayerTest : public MultiDeviceTest { const Dtype scale = 3; LayerParameter param; param.set_phase(phase); - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); DataParameter* data_param = param.mutable_data_param(); data_param->set_batch_size(5); @@ -239,7 +239,7 @@ class DataLayerTest : public MultiDeviceTest { transform_param->set_mirror(true); // Get crop sequence with Caffe seed 1701. - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); vector > crop_sequence; { DataLayer layer1(param); @@ -262,7 +262,7 @@ class DataLayerTest : public MultiDeviceTest { // Get crop sequence after reseeding Caffe with 1701. // Check that the sequence is the same as the original. - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); DataLayer layer2(param); layer2.SetUp(blob_bottom_vec_, blob_top_vec_); for (int_tp iter = 0; iter < 2; ++iter) { @@ -294,7 +294,7 @@ class DataLayerTest : public MultiDeviceTest { transform_param->set_mirror(true); // Get crop sequence with Caffe seed 1701, srand seed 1701. - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); srand(seed_); vector > crop_sequence; { diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index 0fcedffc27e..b59b7e3e4b3 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -44,7 +44,7 @@ class DataTransformTest : public ::testing::Test { new DataTransformer(transform_param, phase, Caffe::GetDefaultDevice()); const int_tp crop_size = transform_param.crop_size(); - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); transformer->InitRand(); Blob* blob = new Blob(1, datum.channels(), datum.height(), datum.width()); diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index d34ea8be381..f59523ce9ab 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -24,7 +24,7 @@ class EltwiseLayerTest : public MultiDeviceTest { blob_bottom_c_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { // fill the values - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); FillerParameter filler_param; UniformFiller filler(filler_param); filler.Fill(this->blob_bottom_a_); diff --git a/src/caffe/test/test_filter_layer.cpp b/src/caffe/test/test_filter_layer.cpp index c912a163640..4eb9a396701 100644 --- a/src/caffe/test/test_filter_layer.cpp +++ b/src/caffe/test/test_filter_layer.cpp @@ -25,7 +25,7 @@ class FilterLayerTest : public MultiDeviceTest { blob_top_labels_(new Blob()) {} virtual void SetUp() { // fill the values - Caffe::set_random_seed(1890); + Caffe::set_random_seed(1890, Caffe::GetDefaultDevice()); FillerParameter filler_param; GaussianFiller filler(filler_param); // fill the selector blob diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index dbd3d233d59..c3a2bfe137a 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -19,7 +19,7 @@ class FlattenLayerTest : public MultiDeviceTest { FlattenLayerTest() : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index 2d7cd785aa0..b8002806a53 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -183,7 +183,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { if (snapshot) { proto << "snapshot: " << num_iters << " "; } - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitSolverFromProtoString(proto.str()); if (from_snapshot != NULL) { this->solver_->Restore(from_snapshot); @@ -198,11 +198,11 @@ class GradientBasedSolverTest : public MultiDeviceTest { LOG(INFO)<< "Multi-GPU test on " << devices << " devices"; vector gpus; // put current device at the beginning - device* dc = Caffe::GetDevice(solver_->param().device_id()); + device* dc = Caffe::GetDevice(solver_->param().device_id(), true); gpus.push_back(dc); for (int i = 0; gpus.size() < devices; ++i) { if (i != device_id) - gpus.push_back(Caffe::GetDevice(i)); + gpus.push_back(Caffe::GetDevice(i, true)); } Caffe::set_solver_count(gpus.size()); this->sync_.reset(new P2PSync( diff --git a/src/caffe/test/test_hinge_loss_layer.cpp b/src/caffe/test/test_hinge_loss_layer.cpp index febe1135180..ffe46ae9a65 100644 --- a/src/caffe/test/test_hinge_loss_layer.cpp +++ b/src/caffe/test/test_hinge_loss_layer.cpp @@ -23,7 +23,7 @@ class HingeLossLayerTest : public MultiDeviceTest { blob_bottom_label_(new Blob(10, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); FillerParameter filler_param; filler_param.set_std(10); GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index 9f3c9f9f486..f836ea21366 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -20,7 +20,7 @@ class Im2colLayerTest : public MultiDeviceTest { : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { // fill the values - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp index ff65503dc66..6f8cbd7d14a 100644 --- a/src/caffe/test/test_image_data_layer.cpp +++ b/src/caffe/test/test_image_data_layer.cpp @@ -28,7 +28,7 @@ class ImageDataLayerTest : public MultiDeviceTest { virtual void SetUp() { blob_top_vec_.push_back(blob_top_data_); blob_top_vec_.push_back(blob_top_label_); - Caffe::set_random_seed(seed_); + Caffe::set_random_seed(seed_, Caffe::GetDefaultDevice()); // Create test input file. MakeTempFilename(&filename_); std::ofstream outfile(filename_.c_str(), std::ofstream::out); diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp index 9c3f9cde056..eb8a39aaf78 100644 --- a/src/caffe/test/test_infogain_loss_layer.cpp +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -22,7 +22,7 @@ class InfogainLossLayerTest : public MultiDeviceTest { blob_bottom_label_(new Blob(10, 1, 1, 1)), blob_bottom_infogain_(new Blob(1, 1, 5, 5)), blob_top_loss_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); FillerParameter filler_param; PositiveUnitballFiller filler(filler_param); filler.Fill(this->blob_bottom_data_); diff --git a/src/caffe/test/test_internal_thread.cpp b/src/caffe/test/test_internal_thread.cpp index 0d4764bc079..51154a2bf62 100644 --- a/src/caffe/test/test_internal_thread.cpp +++ b/src/caffe/test/test_internal_thread.cpp @@ -34,17 +34,17 @@ class TestThreadB : public InternalThread { TEST_F(InternalThreadTest, TestRandomSeed) { TestThreadA t1; - Caffe::set_random_seed(9658361); + Caffe::set_random_seed(9658361, Caffe::GetDefaultDevice()); t1.StartInternalThread(Caffe::Get().GetDefaultDevice()); t1.StopInternalThread(); TestThreadA t2; - Caffe::set_random_seed(9658361); + Caffe::set_random_seed(9658361, Caffe::GetDefaultDevice()); t2.StartInternalThread(Caffe::Get().GetDefaultDevice()); t2.StopInternalThread(); TestThreadB t3; - Caffe::set_random_seed(3435563); + Caffe::set_random_seed(3435563, Caffe::GetDefaultDevice()); t3.StartInternalThread(Caffe::Get().GetDefaultDevice()); t3.StopInternalThread(); } diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index 52b61533dbc..95b9b421dab 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -31,7 +31,7 @@ class LRNLayerTest : public MultiDeviceTest { blob_bottom_(new Blob()), blob_top_(new Blob()) {} virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); blob_bottom_->Reshape(2, 7, 3, 3); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index a15d5802adf..384e3ea8e56 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -29,7 +29,7 @@ class MathFunctionsTest : public MultiDeviceTest { } virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); this->blob_bottom_->Reshape(11, 17, 19, 23); this->blob_top_->Reshape(11, 17, 19, 23); // fill the values diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index b8b0cb0b167..f662201307d 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -21,7 +21,7 @@ class MaxPoolingDropoutTest : public MultiDeviceTest { : blob_bottom_(new Blob()), blob_top_(new Blob()) {} virtual void SetUp() { - Caffe::set_random_seed(1703); + Caffe::set_random_seed(1703, Caffe::GetDefaultDevice()); blob_bottom_->Reshape(2, 3, 6, 5); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index f4611f233ec..7b1725c2205 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -19,7 +19,7 @@ class MultinomialLogisticLossLayerTest : public CPUDeviceTest { : blob_bottom_data_(new Blob(10, 5, 1, 1)), blob_bottom_label_(new Blob(10, 1, 1, 1)), blob_top_loss_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); // fill the values FillerParameter filler_param; PositiveUnitballFiller filler(filler_param); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 10bf1d3a341..b48f4341e16 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -26,7 +26,7 @@ class NetTest : public MultiDeviceTest { virtual void InitNetFromProtoString(const string& proto) { NetParameter param; CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); - net_.reset(new Net(param)); + net_.reset(new Net(param, Caffe::GetDefaultDevice())); } virtual void CopyNetBlobs(const bool copy_diff, @@ -818,7 +818,7 @@ TYPED_TEST(NetTest, TestLossWeight) { // In this case, the loss weight for the 'EuclideanLoss' layer should default // to 1. vector*> bottom; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); const bool kForceBackward = true; this->InitUnsharedWeightsNet(NULL, NULL, kForceBackward); const Dtype loss = this->net_->ForwardBackward(bottom); @@ -834,7 +834,7 @@ TYPED_TEST(NetTest, TestLossWeight) { const int_tp kNumLossWeights = 6; Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; for (int_tp i = 0; i < kNumLossWeights; ++i) { - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward); const Dtype weighted_loss = this->net_->ForwardBackward(bottom); const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); @@ -866,7 +866,7 @@ TYPED_TEST(NetTest, TestLossWeight) { TYPED_TEST(NetTest, TestLossWeightMidNet) { typedef typename TypeParam::Dtype Dtype; vector*> bottom; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); const bool kForceBackward = true; Dtype loss_weight = 0; Dtype midnet_loss_weight = 1; @@ -884,7 +884,7 @@ TYPED_TEST(NetTest, TestLossWeightMidNet) { const int_tp kNumLossWeights = 6; Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; for (int_tp i = 0; i < kNumLossWeights; ++i) { - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i], kForceBackward); const Dtype weighted_loss = this->net_->ForwardBackward(bottom); @@ -913,7 +913,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { // 'InnerProduct' weight 1. loss_weight = 1; midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, kForceBackward); const Dtype loss = this->net_->ForwardBackward(bottom); @@ -925,7 +925,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { loss_weight = 2; midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, kForceBackward); const Dtype loss_main_2 = this->net_->ForwardBackward(bottom); @@ -936,7 +936,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { loss_weight = 3; midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, kForceBackward); const Dtype loss_main_3 = this->net_->ForwardBackward(bottom); @@ -971,7 +971,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { loss_weight = 1; midnet_loss_weight = 2; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, kForceBackward); const Dtype loss_midnet_2 = this->net_->ForwardBackward(bottom); @@ -980,7 +980,7 @@ TYPED_TEST(NetTest, TestComboLossWeight) { loss_weight = 1; midnet_loss_weight = 3; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, kForceBackward); const Dtype loss_midnet_3 = this->net_->ForwardBackward(bottom); @@ -1100,7 +1100,7 @@ TYPED_TEST(NetTest, TestSharedWeightsDiffNet) { TYPED_TEST(NetTest, TestSharedWeightsUpdate) { typedef typename TypeParam::Dtype Dtype; - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitDiffDataSharedWeightsNet(); vector*> bottom; EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); @@ -1136,7 +1136,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { // location (because ... who knows). EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitDiffDataUnsharedWeightsNet(); EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); @@ -1184,7 +1184,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { typedef typename TypeParam::Dtype Dtype; // Create a net with weight sharing; Update it once. - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitDiffDataSharedWeightsNet(); vector*> bottom; EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); @@ -1209,7 +1209,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { // Reinitialize the net and copy parameters from net_param, as in // Solver::Restore. - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); this->InitDiffDataSharedWeightsNet(); this->net_->CopyTrainedLayersFrom(net_param); ip1_weights = this->net_->layers()[1]->blobs()[0].get(); @@ -1234,7 +1234,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { const Dtype* kLossWeight2 = NULL; // Run the net with all params learned; check that gradients are non-zero. - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); Dtype blobs_lr_w1 = 1, blobs_lr_w2 = 1, blobs_lr_b1 = 2, blobs_lr_b2 = 2; this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); @@ -1254,7 +1254,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { // Change the learning rates to different non-zero values; should see same // gradients. - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); blobs_lr_w1 *= 2, blobs_lr_w2 *= 2, blobs_lr_b1 *= 2, blobs_lr_b2 *= 2; this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); @@ -1270,7 +1270,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { // Change a subset of the learning rates to zero; check that we see zero // gradients for those. - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); blobs_lr_w1 = 1, blobs_lr_w2 = 0, blobs_lr_b1 = 0, blobs_lr_b2 = 1; this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); @@ -1289,7 +1289,7 @@ TYPED_TEST(NetTest, TestParamPropagateDown) { } // Change the opposite subset of the learning rates to zero. - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); blobs_lr_w1 = 0, blobs_lr_w2 = 1, blobs_lr_b1 = 1, blobs_lr_b2 = 0; this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); @@ -2264,7 +2264,7 @@ TYPED_TEST(NetTest, TestReshape) { // We set up bottom blobs of two different sizes, switch between // them, check that forward and backward both run and the results // are the same, and check that the output shapes change. - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); Caffe::set_mode(Caffe::CPU); FillerParameter filler_param; filler_param.set_std(1); diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 4bae09b6ccc..4f930a02596 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -40,7 +40,7 @@ class NeuronLayerTest : public MultiDeviceTest { NeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 57dfb9bc122..84195be54c0 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -26,7 +26,7 @@ class PoolingLayerTest : public MultiDeviceTest { blob_top_(new Blob()), blob_top_mask_(new Blob()) {} virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); blob_bottom_->Reshape(2, 3, 6, 5); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_power_layer.cpp b/src/caffe/test/test_power_layer.cpp index 0e112f9e3f1..61e1d4d9288 100644 --- a/src/caffe/test/test_power_layer.cpp +++ b/src/caffe/test/test_power_layer.cpp @@ -21,7 +21,7 @@ class PowerLayerTest : public MultiDeviceTest { PowerLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 002c89f8238..034f970f2b0 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -32,7 +32,7 @@ class RandomNumberGeneratorTest : public ::testing::Test { Caffe::GetDefaultDevice())) {} virtual void SetUp() { - Caffe::set_random_seed(this->seed_); + Caffe::set_random_seed(this->seed_, Caffe::GetDefaultDevice()); } Dtype sample_mean(const Dtype* const seqs, const int_tp sample_size) { diff --git a/src/caffe/test/test_reduction_layer.cpp b/src/caffe/test/test_reduction_layer.cpp index 3e13ac92493..152535cfa8c 100644 --- a/src/caffe/test/test_reduction_layer.cpp +++ b/src/caffe/test/test_reduction_layer.cpp @@ -21,7 +21,7 @@ class ReductionLayerTest : public MultiDeviceTest { : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { // fill the values - Caffe::set_random_seed(1702); + Caffe::set_random_seed(1702, Caffe::GetDefaultDevice()); FillerParameter filler_param; UniformFiller filler(filler_param); filler.Fill(this->blob_bottom_); diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index 19605815ba7..c51f2060c1d 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -24,7 +24,7 @@ class SliceLayerTest : public MultiDeviceTest { blob_top_2_(new Blob()) {} virtual void SetUp() { // fill the values - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); diff --git a/src/caffe/test/test_spp_layer.cpp b/src/caffe/test/test_spp_layer.cpp index 59a3af2aec1..b29f752b1ce 100644 --- a/src/caffe/test/test_spp_layer.cpp +++ b/src/caffe/test/test_spp_layer.cpp @@ -28,7 +28,7 @@ class SPPLayerTest : public MultiDeviceTest { blob_bottom_3_(new Blob()), blob_top_(new Blob()) {} virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); blob_bottom_->Reshape(2, 3, 9, 8); blob_bottom_2_->Reshape(4, 3, 1024, 765); blob_bottom_3_->Reshape(10, 3, 7, 7); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index 0cae64328e1..95996805ad2 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -24,7 +24,7 @@ class StochasticPoolingLayerTest : public MultiDeviceTest { : blob_bottom_(new Blob()), blob_top_(new Blob()) {} virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); blob_bottom_->Reshape(2, 3, 6, 5); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp index a464a6e0a5a..5b42f211a5a 100644 --- a/src/caffe/test/test_tanh_layer.cpp +++ b/src/caffe/test/test_tanh_layer.cpp @@ -35,7 +35,7 @@ class TanHLayerTest : public MultiDeviceTest { TanHLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); FillerParameter filler_param; blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); diff --git a/src/caffe/test/test_threshold_layer.cpp b/src/caffe/test/test_threshold_layer.cpp index 87e0149268e..42307d11d24 100644 --- a/src/caffe/test/test_threshold_layer.cpp +++ b/src/caffe/test/test_threshold_layer.cpp @@ -18,7 +18,7 @@ class ThresholdLayerTest : public MultiDeviceTest { ThresholdLayerTest() : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 3dab90e16f9..90450a79c62 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -227,7 +227,7 @@ int train() { caffe::P2PSync sync(solver, NULL, solver->param()); std::vector devices; for (int_tp i = 0; i < gpus.size(); ++i) { - devices.push_back(Caffe::Get().GetDevice(i)); + devices.push_back(Caffe::Get().GetDevice(i, true)); } sync.run(devices); } else { @@ -260,7 +260,7 @@ int test() { Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. - Net caffe_net(FLAGS_model, caffe::TEST); + Net caffe_net(FLAGS_model, caffe::TEST, Caffe::GetDefaultDevice()); caffe_net.CopyTrainedLayersFrom(FLAGS_weights); LOG(INFO) << "Running for " << FLAGS_iterations << " iterations."; @@ -330,7 +330,7 @@ int time() { Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. - Net caffe_net(FLAGS_model, caffe::TRAIN); + Net caffe_net(FLAGS_model, caffe::TRAIN, Caffe::GetDefaultDevice()); // Do a clean forward and backward pass, so that memory allocation are done // and future iterations will be more stable. diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index 1299b72bc2d..e7521e7977b 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -95,7 +95,8 @@ int feature_extraction_pipeline(int argc, char** argv) { */ std::string feature_extraction_proto(argv[++arg_pos]); boost::shared_ptr > feature_extraction_net( - new Net(feature_extraction_proto, caffe::TEST)); + new Net(feature_extraction_proto, caffe::TEST, + Caffe::GetDefaultDevice())); feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto); std::string extract_feature_blob_names(argv[++arg_pos]); From 0cc7dc207b1ed60a49621d77cb72a4feedf57169 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 17 Dec 2015 02:44:17 +0100 Subject: [PATCH 223/254] Updated device initialization. --- src/caffe/common.cpp | 10 -------- src/caffe/greentea/cl_kernels.cpp | 4 ++-- src/caffe/greentea/cl_kernels/softmax_loss.cl | 23 +++++++++---------- src/caffe/layers/softmax_loss_layer.cu | 8 ++----- 4 files changed, 15 insertions(+), 30 deletions(-) diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index b1fe3e1550c..57ef08921e3 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -415,11 +415,6 @@ void Caffe::SetDevices(std::vector device_ids) { Get().devices_.emplace_back(dev); dev->Init(); ++initcount; - } else { - // Temporary until device abstraction is done - shared_ptr dev(new device()); - Get().devices_.emplace_back(dev); - ++initcount; } } } @@ -459,11 +454,6 @@ void Caffe::SetDevices(std::vector device_ids) { Get().devices_.emplace_back(dev); dev->Init(); ++initcount; - } else { - // Temporary until device abstraction is done - shared_ptr dev(new device()); - Get().devices_.emplace_back(dev); - ++initcount; } } greentea_device_count++; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index ac52e4ad66b..e48e15657d9 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -29,7 +29,7 @@ std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT @@ -53,7 +53,7 @@ std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* kstride,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (kstride[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % kstride[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - kstride[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += kstride[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n }\n}"; // NOLINT +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; diff --git a/src/caffe/greentea/cl_kernels/softmax_loss.cl b/src/caffe/greentea/cl_kernels/softmax_loss.cl index 5991eebf351..10826492259 100644 --- a/src/caffe/greentea/cl_kernels/softmax_loss.cl +++ b/src/caffe/greentea/cl_kernels/softmax_loss.cl @@ -41,20 +41,19 @@ __kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads, for (int_tp index = get_global_id(0); index < nthreads; index += get_global_size(0)) { - { - const int_tp n = index / spatial_dim; - const int_tp s = index % spatial_dim; - const int_tp label_value = (int_tp) (label[n * spatial_dim + s]); - if (has_ignore_label_ == 1 && label_value == ignore_label_) { - for (int_tp c = 0; c < channels; ++c) { - bottom_diff[n * dim + c * spatial_dim + s] = 0; - } - counts[index] = 0; - } else { - bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; - counts[index] = 1; + const int_tp n = index / spatial_dim; + const int_tp s = index % spatial_dim; + const int_tp label_value = (int_tp) (label[n * spatial_dim + s]); + + if (has_ignore_label_ == 1 && label_value == ignore_label_) { + for (int_tp c = 0; c < channels; ++c) { + bottom_diff[n * dim + c * spatial_dim + s] = 0; } + counts[index] = 0; + } else { + bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; + counts[index] = 1; } } } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 9dee37ae768..e58484d9a19 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -72,7 +72,7 @@ void SoftmaxWithLossLayer::Forward_gpu( } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); - if (top.size() == 2) { + if (top.size() >= 2) { top[1]->ShareData(prob_); } @@ -113,7 +113,7 @@ void SoftmaxWithLossLayer::Forward_gpu( } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); - if (top.size() == 2) { + if (top.size() >= 2) { top[1]->ShareData(prob_); } #endif // USE_GREENTEA @@ -177,8 +177,6 @@ void SoftmaxWithLossLayer::Backward_gpu( ignore_label_, counts); Dtype valid_count = -1; - // Only launch another CUDA kernel if we actually need the count of valid - // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); @@ -213,8 +211,6 @@ void SoftmaxWithLossLayer::Backward_gpu( ctx.get_queue()); Dtype valid_count = -1; - // Only launch another CUDA kernel if we actually need the count of valid - // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum(this->device_->id(), From 03276d43b32fd46edeecca7343c9c086b5723382 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 19 Dec 2015 04:03:11 +0100 Subject: [PATCH 224/254] updated pycaffe init --- python/caffe/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index 64eb2143489..fda842539e8 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,5 +1,5 @@ from .pycaffe import SolverParameter, Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver -from ._caffe import set_mode_cpu, set_mode_gpu, set_device, set_devices, select_device, enumerate_devices, Layer, get_solver, layer_type_list +from ._caffe import set_mode_cpu, set_mode_gpu, set_device, set_devices, select_device, enumerate_devices, Layer, get_solver, get_solver_from_file, layer_type_list from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier from .detector import Detector From 2de1d73941098fe6c8e3b16bc84170fa3d0b511e Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 21 Dec 2015 21:38:22 +0100 Subject: [PATCH 225/254] Euclidean loss rescaling. --- src/caffe/layers/euclidean_loss_layer.cpp | 8 ++++---- src/caffe/layers/euclidean_loss_layer.cu | 12 +++++------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 204facee16b..e57d51b820d 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -23,10 +23,10 @@ void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, // Scale the error element-wise if (bottom.size() == 3) { caffe_mul(count, diff_.mutable_cpu_data(), bottom[2]->gpu_data(), - diff_.mutable_gpu_data()); + diff_.mutable_cpu_data()); } Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); - Dtype loss = dot / bottom[0]->shape(0) / Dtype(2); + Dtype loss = dot / bottom[0]->count(0) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } @@ -38,11 +38,11 @@ void EuclideanLossLayer::Backward_cpu( if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); - caffe_cpu_axpby(bottom[i]->count(), // count + caffe_cpu_axpby(bottom[i]->count(), // count alpha, // alpha diff_.cpu_data(), // a Dtype(0), // beta - bottom[i]->mutable_cpu_diff()); // b + bottom[i]->mutable_cpu_diff()); // b } } } diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index 96e31884b8e..e41e40bf2ea 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -14,6 +14,7 @@ template void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { int_tp count = bottom[0]->count(); + Dtype dot; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA @@ -26,8 +27,6 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, } Dtype dot; caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); - Dtype loss = dot / bottom[0]->shape(0) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; #endif // USE_CUDA } else { #ifdef USE_GREENTEA @@ -42,14 +41,13 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (bottom[2]->gpu_data()), 0, (cl_mem) (diff_.mutable_gpu_data()), 0); } - Dtype dot; greentea_gpu_dot(this->device_->id(), count, (cl_mem) (diff_.gpu_data()), 0, (cl_mem) (diff_.gpu_data()), 0, &dot); - Dtype loss = dot / bottom[0]->shape(0) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; #endif // USE_GREENTEA } + Dtype loss = dot / bottom[0]->count(0) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; } template @@ -62,11 +60,11 @@ void EuclideanLossLayer::Backward_gpu( const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA - caffe_gpu_axpby(bottom[i]->count(), // count + caffe_gpu_axpby(bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // a Dtype(0), // beta - bottom[i]->mutable_gpu_diff()); // b + bottom[i]->mutable_gpu_diff()); // b #endif // USE_CUDA } else { #ifdef USE_GREENTEA From a2473413988bf480a2d82674b3213ba7e595712e Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 21 Dec 2015 22:13:48 +0100 Subject: [PATCH 226/254] Euclidean loss cast update. --- src/caffe/layers/euclidean_loss_layer.cpp | 2 +- src/caffe/layers/euclidean_loss_layer.cu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index e57d51b820d..9edb59d7690 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -26,7 +26,7 @@ void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, diff_.mutable_cpu_data()); } Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); - Dtype loss = dot / bottom[0]->count(0) / Dtype(2); + Dtype loss = dot / static_cast(bottom[0]->count(0)) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index e41e40bf2ea..df3c0c1f31c 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -46,7 +46,7 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, (cl_mem) (diff_.gpu_data()), 0, &dot); #endif // USE_GREENTEA } - Dtype loss = dot / bottom[0]->count(0) / Dtype(2); + Dtype loss = dot / static_cast(bottom[0]->count(0)) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } From d96e14803be85a76e5fa2125dec4943e4134964b Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 21 Dec 2015 22:20:09 +0100 Subject: [PATCH 227/254] Euclid zero loss bug. --- src/caffe/layers/euclidean_loss_layer.cu | 1 - 1 file changed, 1 deletion(-) diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index df3c0c1f31c..02e000506f9 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -25,7 +25,6 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_mul(count, diff_.mutable_gpu_data(), bottom[2]->gpu_data(), diff_.mutable_gpu_data()); } - Dtype dot; caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); #endif // USE_CUDA } else { From adf00499d23bf44f7aaeeb1e2ade8a32c208eec8 Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Tue, 22 Dec 2015 20:57:00 +0100 Subject: [PATCH 228/254] Fixed euclid scaling. --- src/caffe/layers/euclidean_loss_layer.cpp | 3 ++- src/caffe/layers/euclidean_loss_layer.cu | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 9edb59d7690..4714b4a266c 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -37,7 +37,8 @@ void EuclideanLossLayer::Backward_cpu( for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); + const Dtype alpha = sign * top[0]->cpu_diff()[0] + / static_cast(bottom[0]->count(0)); caffe_cpu_axpby(bottom[i]->count(), // count alpha, // alpha diff_.cpu_data(), // a diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index 02e000506f9..07b1e7fda8e 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -56,7 +56,8 @@ void EuclideanLossLayer::Backward_gpu( for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); + const Dtype alpha = sign * top[0]->cpu_diff()[0] + / static_cast(bottom[0]->count(0)); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_axpby(bottom[i]->count(), // count From 615f802277c928e37c5b60dbb5c14a8605b1415b Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 23 Dec 2015 23:15:38 +0100 Subject: [PATCH 229/254] Malis loss averaging. --- src/caffe/layers/euclidean_loss_layer.cpp | 3 ++- src/caffe/layers/euclidean_loss_layer.cu | 3 ++- src/caffe/layers/malis_loss_layer.cpp | 6 +++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 9edb59d7690..4714b4a266c 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -37,7 +37,8 @@ void EuclideanLossLayer::Backward_cpu( for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); + const Dtype alpha = sign * top[0]->cpu_diff()[0] + / static_cast(bottom[0]->count(0)); caffe_cpu_axpby(bottom[i]->count(), // count alpha, // alpha diff_.cpu_data(), // a diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index 02e000506f9..07b1e7fda8e 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -56,7 +56,8 @@ void EuclideanLossLayer::Backward_gpu( for (int_tp i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->shape(0); + const Dtype alpha = sign * top[0]->cpu_diff()[0] + / static_cast(bottom[0]->count(0)); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_axpby(bottom[i]->count(), // count diff --git a/src/caffe/layers/malis_loss_layer.cpp b/src/caffe/layers/malis_loss_layer.cpp index 4a9e514d210..042fdd2f492 100644 --- a/src/caffe/layers/malis_loss_layer.cpp +++ b/src/caffe/layers/malis_loss_layer.cpp @@ -398,7 +398,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, dloss_neg_.mutable_cpu_data() + batch_offset * batch, &loss_out, &classerr_out, &rand_index_out); - loss += loss_out; + loss += 0.5 * loss_out; // std::cout << "NEG: " << loss_out << std::endl; Malis(&affinity_data_pos[batch_offset * batch], conn_num_dims_, @@ -407,7 +407,7 @@ void MalisLossLayer::Forward_cpu(const vector*>& bottom, dloss_pos_.mutable_cpu_data() + batch_offset * batch, &loss_out, &classerr_out, &rand_index_out); - loss += loss_out; + loss += 0.5 * loss_out; // std::cout << "POS: " << loss_out << std::endl; } @@ -434,7 +434,7 @@ void MalisLossLayer::Backward_cpu(const vector*>& top, #pragma omp parallel for for (int_tp i = 0; i < bottom[0]->count(); ++i) { - bottom_diff[i] = -(dloss_neg_data[i] + dloss_pos_data[i]); + bottom_diff[i] = -(dloss_neg_data[i] + dloss_pos_data[i]) / 2.0; } } } From 2104120f6e868879e4f4c6f93cb93ea397527d10 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 27 Dec 2015 04:52:26 +0100 Subject: [PATCH 230/254] CuDNN ND support --- include/caffe/layers/cudnn_conv_layer.hpp | 5 + include/caffe/layers/cudnn_lcn_layer.hpp | 4 + include/caffe/layers/cudnn_lrn_layer.hpp | 4 + include/caffe/layers/cudnn_pooling_layer.hpp | 4 + include/caffe/layers/cudnn_relu_layer.hpp | 4 + include/caffe/layers/cudnn_sigmoid_layer.hpp | 4 + include/caffe/layers/cudnn_softmax_layer.hpp | 4 + include/caffe/layers/cudnn_tanh_layer.hpp | 4 + include/caffe/util/cudnn.hpp | 149 ++++++++++++++++--- include/caffe/util/device_alternate.hpp | 3 - src/caffe/layers/cudnn_conv_layer.cpp | 47 +++--- src/caffe/layers/cudnn_conv_layer.cu | 4 +- src/caffe/layers/cudnn_lcn_layer.cpp | 20 ++- src/caffe/layers/cudnn_lrn_layer.cpp | 21 ++- src/caffe/layers/cudnn_pooling_layer.cpp | 24 ++- src/caffe/layers/cudnn_relu_layer.cpp | 14 +- src/caffe/layers/cudnn_sigmoid_layer.cpp | 14 +- src/caffe/layers/cudnn_softmax_layer.cpp | 23 ++- src/caffe/layers/cudnn_tanh_layer.cpp | 14 +- src/caffe/test/test_convolution_layer.cpp | 2 +- src/caffe/test/test_lrn_layer.cpp | 2 +- src/caffe/test/test_neuron_layer.cpp | 18 +-- src/caffe/test/test_pooling_layer.cpp | 18 +-- src/caffe/test/test_softmax_layer.cpp | 4 +- 24 files changed, 280 insertions(+), 130 deletions(-) diff --git a/include/caffe/layers/cudnn_conv_layer.hpp b/include/caffe/layers/cudnn_conv_layer.hpp index 93bdfaeefc7..209e3f9cb9a 100644 --- a/include/caffe/layers/cudnn_conv_layer.hpp +++ b/include/caffe/layers/cudnn_conv_layer.hpp @@ -9,9 +9,14 @@ #include "caffe/layers/conv_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN + /* * @brief cuDNN implementation of ConvolutionLayer. * Fallback to ConvolutionLayer for CPU mode. diff --git a/include/caffe/layers/cudnn_lcn_layer.hpp b/include/caffe/layers/cudnn_lcn_layer.hpp index daac2139d2f..28ad7dc807c 100644 --- a/include/caffe/layers/cudnn_lcn_layer.hpp +++ b/include/caffe/layers/cudnn_lcn_layer.hpp @@ -10,6 +10,10 @@ #include "caffe/layers/lrn_layer.hpp" #include "caffe/layers/power_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN diff --git a/include/caffe/layers/cudnn_lrn_layer.hpp b/include/caffe/layers/cudnn_lrn_layer.hpp index 41d750bfc22..c2db5dc75e1 100644 --- a/include/caffe/layers/cudnn_lrn_layer.hpp +++ b/include/caffe/layers/cudnn_lrn_layer.hpp @@ -9,6 +9,10 @@ #include "caffe/layers/lrn_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN diff --git a/include/caffe/layers/cudnn_pooling_layer.hpp b/include/caffe/layers/cudnn_pooling_layer.hpp index dc35e3ab74f..e7df07bf036 100644 --- a/include/caffe/layers/cudnn_pooling_layer.hpp +++ b/include/caffe/layers/cudnn_pooling_layer.hpp @@ -9,6 +9,10 @@ #include "caffe/layers/pooling_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN diff --git a/include/caffe/layers/cudnn_relu_layer.hpp b/include/caffe/layers/cudnn_relu_layer.hpp index e01f568abc9..d7014834913 100644 --- a/include/caffe/layers/cudnn_relu_layer.hpp +++ b/include/caffe/layers/cudnn_relu_layer.hpp @@ -10,6 +10,10 @@ #include "caffe/layers/neuron_layer.hpp" #include "caffe/layers/relu_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN diff --git a/include/caffe/layers/cudnn_sigmoid_layer.hpp b/include/caffe/layers/cudnn_sigmoid_layer.hpp index 9c597958b0b..8be8cd2508c 100644 --- a/include/caffe/layers/cudnn_sigmoid_layer.hpp +++ b/include/caffe/layers/cudnn_sigmoid_layer.hpp @@ -10,6 +10,10 @@ #include "caffe/layers/neuron_layer.hpp" #include "caffe/layers/sigmoid_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN diff --git a/include/caffe/layers/cudnn_softmax_layer.hpp b/include/caffe/layers/cudnn_softmax_layer.hpp index 174368e413d..b80b6e8b29c 100644 --- a/include/caffe/layers/cudnn_softmax_layer.hpp +++ b/include/caffe/layers/cudnn_softmax_layer.hpp @@ -9,6 +9,10 @@ #include "caffe/layers/softmax_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN diff --git a/include/caffe/layers/cudnn_tanh_layer.hpp b/include/caffe/layers/cudnn_tanh_layer.hpp index c0f0053f71e..1eaf6612aa9 100644 --- a/include/caffe/layers/cudnn_tanh_layer.hpp +++ b/include/caffe/layers/cudnn_tanh_layer.hpp @@ -10,6 +10,10 @@ #include "caffe/layers/neuron_layer.hpp" #include "caffe/layers/tanh_layer.hpp" +#ifdef USE_CUDNN // cuDNN acceleration library. +#include "caffe/util/cudnn.hpp" +#endif + namespace caffe { #ifdef USE_CUDNN diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index aad7ad06a19..7199e833e69 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -4,6 +4,9 @@ #include + +#include + #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" @@ -64,35 +67,98 @@ template<> class dataType { }; template -inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) { +inline void createTensorNdDesc(cudnnTensorDescriptor_t* desc) { CUDNN_CHECK(cudnnCreateTensorDescriptor(desc)); } template -inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, - int_tp n, int_tp c, int_tp h, int_tp w, - int_tp stride_n, int_tp stride_c, int_tp stride_h, int_tp stride_w) { - CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(*desc, dataType::type, - n, c, h, w, stride_n, stride_c, stride_h, stride_w)); +inline void setTensorNdDesc(cudnnTensorDescriptor_t* desc, + const int_tp total_dims, + const int_tp* shape, const int_tp* stride) { + + std::vector shape_int(total_dims); + std::vector stride_int(total_dims); + + for (int_tp i = 0; i < total_dims; ++i) { + shape_int[i] = shape[i]; + stride_int[i] = stride[i]; + } + + const int* shape_ptr = &shape_int[0]; + const int* stride_ptr = &stride_int[0]; + + CUDNN_CHECK( + cudnnSetTensorNdDescriptor(*desc, dataType::type, total_dims, + shape_ptr, stride_ptr)); +} + +template +inline void setTensorNdDesc(cudnnTensorDescriptor_t* desc, + const int_tp total_dims, const int_tp* shape) { + + std::vector full_shape(total_dims); + std::vector full_stride(total_dims); + + for (int_tp i = total_dims - 1; i >= 0; --i) { + full_shape[i] = shape[i]; + if (i == total_dims - 1) { + full_stride[i] = 1; + } else { + full_stride[i] = full_stride[i + 1] * full_shape[i + 1]; + } + } + + setTensorNdDesc(desc, total_dims, + &full_shape[0], + &full_stride[0]); } template -inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, - int_tp n, int_tp c, int_tp h, int_tp w) { - const int_tp stride_w = 1; - const int_tp stride_h = w * stride_w; - const int_tp stride_c = h * stride_h; - const int_tp stride_n = c * stride_c; - setTensor4dDesc(desc, n, c, h, w, - stride_n, stride_c, stride_h, stride_w); +inline void setTensorNdDesc(cudnnTensorDescriptor_t* desc, + const int_tp num_spatial_dims, + const int_tp n, const int_tp c, const int_tp* shape) { + + std::vector full_shape(num_spatial_dims + 2); + std::vector full_stride(num_spatial_dims + 2); + + full_shape[0] = n; + full_shape[1] = c; + + for (int_tp i = num_spatial_dims + 1; i >= 0; --i) { + full_shape[i] = i > 1 ? shape[i-2] : full_shape[i]; + if (i == num_spatial_dims + 1) { + full_stride[i] = 1; + } else { + full_stride[i] = full_stride[i + 1] * full_shape[i + 1]; + } + } + + setTensorNdDesc(desc, num_spatial_dims + 2, + &full_shape[0], + &full_stride[0]); } + template inline void createFilterDesc(cudnnFilterDescriptor_t* desc, - int_tp n, int_tp c, int_tp h, int_tp w) { + const int_tp num_spatial_dims, + const int_tp n, const int_tp c, const int_tp* shape) { + + std::vector shape_int(num_spatial_dims + 2); + + shape_int[0] = n; + shape_int[1] = c; + + for (int_tp i = 0; i < num_spatial_dims; ++i) { + shape_int[2+i] = shape[i]; + } + + const int* shape_ptr = &shape_int[0]; + CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); - CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType::type, - n, c, h, w)); + CUDNN_CHECK(cudnnSetFilterNdDescriptor(*desc, dataType::type, + num_spatial_dims + 2, + shape_ptr)); } template @@ -103,16 +169,33 @@ inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) { template inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, cudnnTensorDescriptor_t bottom, cudnnFilterDescriptor_t filter, - int_tp pad_h, int_tp pad_w, int_tp stride_h, int_tp stride_w) { - CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, - pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION)); + const int_tp num_spatial_dims, const int_tp* pad, const int_tp* stride) { + + std::vector pad_int(num_spatial_dims); + std::vector stride_int(num_spatial_dims); + std::vector upscale_int(num_spatial_dims); + + for (int_tp i = 0; i < num_spatial_dims; ++i) { + pad_int[i] = pad[i]; + stride_int[i] = stride[i]; + upscale_int[i] = 1; + } + + const int* pad_ptr = &pad_int[0]; + const int* stride_ptr = &stride_int[0]; + const int* upscale_ptr = &upscale_int[0]; + + CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(*conv, num_spatial_dims, + pad_ptr, stride_ptr, upscale_ptr, CUDNN_CROSS_CORRELATION, + dataType::type)); } template inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, PoolingParameter_PoolMethod poolmethod, cudnnPoolingMode_t* mode, - int_tp h, int_tp w, int_tp pad_h, int_tp pad_w, - int_tp stride_h, int_tp stride_w) { + const int_tp num_spatial_dims, + const int_tp* shape, + const int_tp* pad, const int_tp* stride) { switch (poolmethod) { case PoolingParameter_PoolMethod_MAX: *mode = CUDNN_POOLING_MAX; @@ -124,8 +207,26 @@ inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, LOG(FATAL) << "Unknown pooling method."; } CUDNN_CHECK(cudnnCreatePoolingDescriptor(pool_desc)); - CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, h, w, - pad_h, pad_w, stride_h, stride_w)); + + std::vector shape_int(num_spatial_dims); + std::vector pad_int(num_spatial_dims); + std::vector stride_int(num_spatial_dims); + + for (int_tp i = 0; i < num_spatial_dims; ++i) { + shape_int[i] = shape[i]; + pad_int[i] = pad[i]; + stride_int[i] = stride[i]; + } + + const int* shape_ptr = &shape_int[0]; + const int* pad_ptr = &pad_int[0]; + const int* stride_ptr = &stride_int[0]; + + CUDNN_CHECK(cudnnSetPoolingNdDescriptor(*pool_desc, *mode, + num_spatial_dims, + shape_ptr, + pad_ptr, + stride_ptr)); } } // namespace cudnn diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 1a0a488129d..548a6017a67 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -43,9 +43,6 @@ void classname::funcname##_##gpu(const vector*>& top, \ #include #include #include // cuda driver types -#ifdef USE_CUDNN // cuDNN acceleration library. -#include "caffe/util/cudnn.hpp" -#endif // // CUDA macros diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 18be019a554..33fec029179 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -60,19 +60,17 @@ void CuDNNConvolutionLayer::LayerSetUp( // Create filter descriptor. const int_tp* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int_tp kernel_h = kernel_shape_data[0]; - const int_tp kernel_w = kernel_shape_data[1]; - cudnn::createFilterDesc(&filter_desc_, + cudnn::createFilterDesc(&filter_desc_, this->num_spatial_axes_, this->num_output_ / this->group_, this->channels_ / this->group_, - kernel_h, kernel_w); + kernel_shape_data); // Create tensor descriptor(s) for data and corresponding convolution(s). for (int_tp i = 0; i < bottom.size(); i++) { cudnnTensorDescriptor_t bottom_desc; - cudnn::createTensor4dDesc(&bottom_desc); + cudnn::createTensorNdDesc(&bottom_desc); bottom_descs_.push_back(bottom_desc); cudnnTensorDescriptor_t top_desc; - cudnn::createTensor4dDesc(&top_desc); + cudnn::createTensorNdDesc(&top_desc); top_descs_.push_back(top_desc); cudnnConvolutionDescriptor_t conv_desc; cudnn::createConvolutionDesc(&conv_desc); @@ -81,7 +79,7 @@ void CuDNNConvolutionLayer::LayerSetUp( // Tensor descriptor for bias. if (this->bias_term_) { - cudnn::createTensor4dDesc(&bias_desc_); + cudnn::createTensorNdDesc(&bias_desc_); } handles_setup_ = true; @@ -97,35 +95,26 @@ void CuDNNConvolutionLayer::Reshape( << "Use 'engine: CAFFE' for general ND convolution."; bottom_offset_ = this->bottom_dim_ / this->group_; top_offset_ = this->top_dim_ / this->group_; - const int_tp height = bottom[0]->shape(this->channel_axis_ + 1); - const int_tp width = bottom[0]->shape(this->channel_axis_ + 2); - const int_tp height_out = top[0]->shape(this->channel_axis_ + 1); - const int_tp width_out = top[0]->shape(this->channel_axis_ + 2); const int_tp* pad_data = this->pad_.cpu_data(); - const int_tp pad_h = pad_data[0]; - const int_tp pad_w = pad_data[1]; const int_tp* stride_data = this->stride_.cpu_data(); - const int_tp stride_h = stride_data[0]; - const int_tp stride_w = stride_data[1]; // Specify workspace limit for kernels directly until we have a // planning strategy and a rewrite of Caffe's GPU memory mangagement uint_tp workspace_limit_bytes = 8*1024*1024; for (int_tp i = 0; i < bottom.size(); i++) { - cudnn::setTensor4dDesc(&bottom_descs_[i], + cudnn::setTensorNdDesc(&bottom_descs_[i], + bottom[i]->shape().size() - 2, this->num_, - this->channels_ / this->group_, height, width, - this->channels_ * height * width, - height * width, width, 1); - cudnn::setTensor4dDesc(&top_descs_[i], + this->channels_ / this->group_, + &(bottom[i]->shape()[this->channel_axis_ + 1])); + cudnn::setTensorNdDesc(&top_descs_[i], + top[0]->shape().size() - 2, this->num_, - this->num_output_ / this->group_, height_out, width_out, - this->num_output_ * this->out_spatial_dim_, - this->out_spatial_dim_, width_out, 1); + this->num_output_ / this->group_, + &(top[0]->shape()[this->channel_axis_ + 1])); cudnn::setConvolutionDesc(&conv_descs_[i], bottom_descs_[i], - filter_desc_, pad_h, pad_w, - stride_h, stride_w); + filter_desc_, this->num_spatial_axes_, pad_data, stride_data); // choose forward and backward algorithms + workspace(s) CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[0], @@ -224,10 +213,14 @@ void CuDNNConvolutionLayer::Reshape( } } + std::vector ones(this->num_spatial_axes_, 1); + const int_tp* ones_ptr = &ones[0]; + // Tensor descriptor for bias. if (this->bias_term_) { - cudnn::setTensor4dDesc(&bias_desc_, - 1, this->num_output_ / this->group_, 1, 1); + cudnn::setTensorNdDesc(&bias_desc_, + this->num_spatial_axes_, + 1, this->num_output_ / this->group_, ones_ptr); } } diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 464a4c70df7..2e6e5ad3ad7 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -82,7 +82,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); - CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3( + CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, @@ -100,7 +100,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - CUDNN_CHECK(cudnnConvolutionBackwardData_v3( + CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType::one, filter_desc_, weight + this->weight_offset_ * g, diff --git a/src/caffe/layers/cudnn_lcn_layer.cpp b/src/caffe/layers/cudnn_lcn_layer.cpp index 0783ae6e317..8eb99abfbe9 100644 --- a/src/caffe/layers/cudnn_lcn_layer.cpp +++ b/src/caffe/layers/cudnn_lcn_layer.cpp @@ -12,8 +12,8 @@ void CuDNNLCNLayer::LayerSetUp(const vector*>& bottom, CUDNN_CHECK(cudnnCreate(&handle_)); CUDNN_CHECK(cudnnCreateLRNDescriptor(&norm_desc_)); - cudnn::createTensor4dDesc(&bottom_desc_); - cudnn::createTensor4dDesc(&top_desc_); + cudnn::createTensorNdDesc(&bottom_desc_); + cudnn::createTensorNdDesc(&top_desc_); // create a LRN handle handles_setup_ = true; @@ -29,10 +29,18 @@ template void CuDNNLCNLayer::Reshape(const vector*>& bottom, const vector*>& top) { LRNLayer::Reshape(bottom, top); - cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), - this->channels_, this->height_, this->width_); - cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), - this->channels_, this->height_, this->width_); + std::vector shape; + + shape.push_back(bottom[0]->num()); + shape.push_back(this->channels_); + shape.push_back(this->height_); + shape.push_back(this->width_); + + + const int_tp* shape_ptr = &shape[0]; + + cudnn::setTensorNdDesc(&bottom_desc_, 4, shape_ptr); + cudnn::setTensorNdDesc(&top_desc_, 4, shape_ptr); CUDNN_CHECK(cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, k_)); // allocate / reallocate tempData buffers diff --git a/src/caffe/layers/cudnn_lrn_layer.cpp b/src/caffe/layers/cudnn_lrn_layer.cpp index 0495b802baf..ba31f970f7f 100644 --- a/src/caffe/layers/cudnn_lrn_layer.cpp +++ b/src/caffe/layers/cudnn_lrn_layer.cpp @@ -12,8 +12,8 @@ void CuDNNLRNLayer::LayerSetUp(const vector*>& bottom, CUDNN_CHECK(cudnnCreate(&handle_)); CUDNN_CHECK(cudnnCreateLRNDescriptor(&norm_desc_)); - cudnn::createTensor4dDesc(&bottom_desc_); - cudnn::createTensor4dDesc(&top_desc_); + cudnn::createTensorNdDesc(&bottom_desc_); + cudnn::createTensorNdDesc(&top_desc_); // create a LRN handle handles_setup_ = true; @@ -28,10 +28,19 @@ template void CuDNNLRNLayer::Reshape(const vector*>& bottom, const vector*>& top) { LRNLayer::Reshape(bottom, top); - cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), - this->channels_, this->height_, this->width_); - cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), - this->channels_, this->height_, this->width_); + + std::vector shape; + + shape.push_back(bottom[0]->num()); + shape.push_back(this->channels_); + shape.push_back(this->height_); + shape.push_back(this->width_); + + + const int_tp* shape_ptr = &shape[0]; + + cudnn::setTensorNdDesc(&bottom_desc_, 4, shape_ptr); + cudnn::setTensorNdDesc(&top_desc_, 4, shape_ptr); CUDNN_CHECK(cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, k_)); } diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp index 24f14780b4f..fb2ad6f927b 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cpp +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -10,12 +10,17 @@ void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { PoolingLayer::LayerSetUp(bottom, top); CUDNN_CHECK(cudnnCreate(&handle_)); - cudnn::createTensor4dDesc(&bottom_desc_); - cudnn::createTensor4dDesc(&top_desc_); + cudnn::createTensorNdDesc(&bottom_desc_); + cudnn::createTensorNdDesc(&top_desc_); + + const int_tp* kernel_data = this->kernel_shape_.cpu_data(); + const int_tp* pad_data = this->pad_.cpu_data(); + const int_tp* stride_data = this->stride_.cpu_data(); + cudnn::createPoolingDesc(&pooling_desc_, this->layer_param_.pooling_param().pool(), &mode_, - this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_, - this->stride_h_, this->stride_w_); + this->num_spatial_axes_, + kernel_data, pad_data, stride_data); handles_setup_ = true; } @@ -23,10 +28,13 @@ template void CuDNNPoolingLayer::Reshape(const vector*>& bottom, const vector*>& top) { PoolingLayer::Reshape(bottom, top); - cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), - this->channels_, this->height_, this->width_); - cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), - this->channels_, this->pooled_height_, this->pooled_width_); + + cudnn::setTensorNdDesc(&bottom_desc_, + bottom[0]->shape().size(), + &(bottom[0]->shape()[0])); + cudnn::setTensorNdDesc(&top_desc_, + top[0]->shape().size(), + &(top[0]->shape()[0])); } template diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index 8a8e9ab1128..2ae3418440c 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -11,8 +11,8 @@ void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, ReLULayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); - cudnn::createTensor4dDesc(&bottom_desc_); - cudnn::createTensor4dDesc(&top_desc_); + cudnn::createTensorNdDesc(&bottom_desc_); + cudnn::createTensorNdDesc(&top_desc_); handles_setup_ = true; } @@ -20,12 +20,10 @@ template void CuDNNReLULayer::Reshape(const vector*>& bottom, const vector*>& top) { ReLULayer::Reshape(bottom, top); - const int_tp N = bottom[0]->num(); - const int_tp K = bottom[0]->channels(); - const int_tp H = bottom[0]->height(); - const int_tp W = bottom[0]->width(); - cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); - cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); + cudnn::setTensorNdDesc(&bottom_desc_, bottom[0]->shape().size(), + &(bottom[0]->shape()[0])); + cudnn::setTensorNdDesc(&top_desc_, top[0]->shape().size(), + &(top[0]->shape()[0])); } template diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index af57a31cc60..7422dff354d 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -11,8 +11,8 @@ void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, SigmoidLayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); - cudnn::createTensor4dDesc(&bottom_desc_); - cudnn::createTensor4dDesc(&top_desc_); + cudnn::createTensorNdDesc(&bottom_desc_); + cudnn::createTensorNdDesc(&top_desc_); handles_setup_ = true; } @@ -20,12 +20,10 @@ template void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, const vector*>& top) { SigmoidLayer::Reshape(bottom, top); - const int_tp N = bottom[0]->num(); - const int_tp K = bottom[0]->channels(); - const int_tp H = bottom[0]->height(); - const int_tp W = bottom[0]->width(); - cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); - cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); + cudnn::setTensorNdDesc(&bottom_desc_, bottom[0]->shape().size(), + &(bottom[0]->shape()[0])); + cudnn::setTensorNdDesc(&top_desc_, top[0]->shape().size(), + &(top[0]->shape()[0])); } template diff --git a/src/caffe/layers/cudnn_softmax_layer.cpp b/src/caffe/layers/cudnn_softmax_layer.cpp index 1ec7cc88a14..b4be07ebfb1 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cpp +++ b/src/caffe/layers/cudnn_softmax_layer.cpp @@ -13,8 +13,8 @@ void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, SoftmaxLayer::LayerSetUp(bottom, top); // Initialize CUDNN. CUDNN_CHECK(cudnnCreate(&handle_)); - cudnn::createTensor4dDesc(&bottom_desc_); - cudnn::createTensor4dDesc(&top_desc_); + cudnn::createTensorNdDesc(&bottom_desc_); + cudnn::createTensorNdDesc(&top_desc_); handles_setup_ = true; } @@ -22,12 +22,19 @@ template void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, const vector*>& top) { SoftmaxLayer::Reshape(bottom, top); - int_tp N = this->outer_num_; - int_tp K = bottom[0]->shape(this->softmax_axis_); - int_tp H = this->inner_num_; - int_tp W = 1; - cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); - cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); + + std::vector shape; + + shape.push_back(this->outer_num_); + shape.push_back(bottom[0]->shape(this->softmax_axis_)); + shape.push_back(this->inner_num_); + shape.push_back(1); + + + const int_tp* shape_ptr = &shape[0]; + + cudnn::setTensorNdDesc(&bottom_desc_, 4, shape_ptr); + cudnn::setTensorNdDesc(&top_desc_, 4, shape_ptr); } template diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index a2ee82214e8..ef0355d87f1 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -11,8 +11,8 @@ void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, TanHLayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); - cudnn::createTensor4dDesc(&bottom_desc_); - cudnn::createTensor4dDesc(&top_desc_); + cudnn::createTensorNdDesc(&bottom_desc_); + cudnn::createTensorNdDesc(&top_desc_); handles_setup_ = true; } @@ -20,12 +20,10 @@ template void CuDNNTanHLayer::Reshape(const vector*>& bottom, const vector*>& top) { TanHLayer::Reshape(bottom, top); - const int_tp N = bottom[0]->num(); - const int_tp K = bottom[0]->channels(); - const int_tp H = bottom[0]->height(); - const int_tp W = bottom[0]->width(); - cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); - cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); + const int_tp* shape = &(bottom[0]->shape()[0]); + cudnn::setTensorNdDesc(&bottom_desc_, bottom[0]->shape().size(), + shape); + cudnn::setTensorNdDesc(&top_desc_, bottom[0]->shape().size(), shape); } template diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 94682fa853a..f7fe875cd41 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -837,7 +837,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index 95b9b421dab..c9856ec6d5f 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -259,7 +259,7 @@ class CuDNNLRNLayerTest : public GPUDeviceTest { blob_bottom_(new Blob()), blob_top_(new Blob()) {} virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); blob_bottom_->Reshape(2, 7, 3, 3); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 4f930a02596..4ba869faccd 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -734,7 +734,7 @@ class CuDNNNeuronLayerTest : public GPUDeviceTest { CuDNNNeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); // fill the values FillerParameter filler_param; GaussianFiller filler(filler_param); @@ -752,7 +752,7 @@ class CuDNNNeuronLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -768,7 +768,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -778,7 +778,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -799,7 +799,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -811,7 +811,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -829,7 +829,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -839,7 +839,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -863,7 +863,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 84195be54c0..102a44109c6 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -618,7 +618,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { blob_top_(new Blob()), blob_top_mask_(new Blob()) {} virtual void SetUp() { - Caffe::set_random_seed(1701); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); blob_bottom_->Reshape(2, 3, 6, 5); // fill the values FillerParameter filler_param; @@ -966,7 +966,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->add_kernel_size(3); @@ -981,7 +981,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->add_kernel_size(3); @@ -1024,7 +1024,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1043,7 +1043,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1064,7 +1064,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->add_kernel_size(3); @@ -1131,7 +1131,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->add_kernel_size(3); @@ -1158,7 +1158,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1177,7 +1177,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { for (int_tp kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int_tp kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 5748c391474..f988097bde9 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -107,7 +107,7 @@ class CuDNNSoftmaxLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -142,7 +142,7 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { - if (Caffe::GetDefaultDeviceContext()->backend() == BACKEND_CUDA) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); From f2fcc37ef48410d9d52a20143bf947b59521da31 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 27 Dec 2015 16:33:30 +0100 Subject: [PATCH 231/254] CuDNN Pooling/Convolution ND fix. --- src/caffe/layers/cudnn_conv_layer.cpp | 63 ++++++++++++++++++------ src/caffe/layers/cudnn_pooling_layer.cpp | 15 ++++-- 2 files changed, 59 insertions(+), 19 deletions(-) diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 33fec029179..52e4a67354b 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -89,10 +89,7 @@ template void CuDNNConvolutionLayer::Reshape( const vector*>& bottom, const vector*>& top) { ConvolutionLayer::Reshape(bottom, top); - CHECK_EQ(2, this->num_spatial_axes_) - << "CuDNNConvolution input must have 2 spatial axes " - << "(e.g., height and width). " - << "Use 'engine: CAFFE' for general ND convolution."; + bottom_offset_ = this->bottom_dim_ / this->group_; top_offset_ = this->top_dim_ / this->group_; const int_tp* pad_data = this->pad_.cpu_data(); @@ -103,16 +100,54 @@ void CuDNNConvolutionLayer::Reshape( uint_tp workspace_limit_bytes = 8*1024*1024; for (int_tp i = 0; i < bottom.size(); i++) { - cudnn::setTensorNdDesc(&bottom_descs_[i], - bottom[i]->shape().size() - 2, - this->num_, - this->channels_ / this->group_, - &(bottom[i]->shape()[this->channel_axis_ + 1])); - cudnn::setTensorNdDesc(&top_descs_[i], - top[0]->shape().size() - 2, - this->num_, - this->num_output_ / this->group_, - &(top[0]->shape()[this->channel_axis_ + 1])); + + { + int_tp total_dims = bottom[i]->shape().size(); + std::vector full_shape(total_dims); + std::vector full_stride(total_dims); + + for (int_tp j = total_dims - 1; j >= 2; --j) { + full_shape[j] = bottom[i]->shape()[j]; + if (j == total_dims - 1) { + full_stride[j] = 1; + } else { + full_stride[j] = full_stride[j + 1] * full_shape[j + 1]; + } + } + + full_shape[1] = this->channels_ / this->group_; + full_stride[1] = full_shape[2] * full_stride[2]; + full_shape[0] = this->num_; + full_stride[0] = this->channels_ * full_stride[1]; + + cudnn::setTensorNdDesc(&bottom_descs_[i], total_dims, + &full_shape[0], &full_stride[0]); + } + + + { + int_tp total_dims = top[i]->shape().size(); + std::vector full_shape(total_dims); + std::vector full_stride(total_dims); + + for (int_tp j = total_dims - 1; j >= 2; --j) { + full_shape[j] = top[i]->shape()[j]; + if (j == total_dims - 1) { + full_stride[j] = 1; + } else { + full_stride[j] = full_stride[j + 1] * full_shape[j + 1]; + } + } + + full_shape[1] = this->num_output_ / this->group_; + full_stride[1] = full_shape[2] * full_stride[2]; + full_shape[0] = this->num_; + full_stride[0] = this->num_output_ * full_stride[1]; + + cudnn::setTensorNdDesc(&top_descs_[i], total_dims, &full_shape[0], + &full_stride[0]); + } + cudnn::setConvolutionDesc(&conv_descs_[i], bottom_descs_[i], filter_desc_, this->num_spatial_axes_, pad_data, stride_data); diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp index fb2ad6f927b..11e9d959dc4 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cpp +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -8,10 +8,10 @@ namespace caffe { template void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { - PoolingLayer::LayerSetUp(bottom, top); CUDNN_CHECK(cudnnCreate(&handle_)); cudnn::createTensorNdDesc(&bottom_desc_); cudnn::createTensorNdDesc(&top_desc_); + PoolingLayer::LayerSetUp(bottom, top); const int_tp* kernel_data = this->kernel_shape_.cpu_data(); const int_tp* pad_data = this->pad_.cpu_data(); @@ -30,11 +30,16 @@ void CuDNNPoolingLayer::Reshape(const vector*>& bottom, PoolingLayer::Reshape(bottom, top); cudnn::setTensorNdDesc(&bottom_desc_, - bottom[0]->shape().size(), - &(bottom[0]->shape()[0])); + bottom[0]->shape().size() - 2, + bottom[0]->num(), + this->channels_, + &(bottom[0]->shape()[2])); + const int_tp* pooled_size_data = this->pooled_size_.cpu_data(); cudnn::setTensorNdDesc(&top_desc_, - top[0]->shape().size(), - &(top[0]->shape()[0])); + bottom[0]->shape().size() - 2, + bottom[0]->num(), + this->channels_, + pooled_size_data); } template From 677e8440da3c0f90c209a0da49091ada344272f2 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 27 Dec 2015 17:54:45 +0100 Subject: [PATCH 232/254] Fallback to Caffe SK convolution/pooling when using cuDNN. --- src/caffe/layer_factory.cpp | 45 ++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index c6004af5e5b..69e1798c637 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -33,18 +33,49 @@ namespace caffe { +bool checkConvolutionKstrided(ConvolutionParameter param) { + if ((param.has_kstride_h() && param.kstride_h() > 1) + || (param.has_kstride_w() && param.kstride_w() > 1)) { + return true; + } + + for (int i = 0; i < param.kstride_size(); ++i) { + if (param.kstride(i) > 0) { + return true; + } + } + + return false; +} + +bool checkPoolingKstrided(PoolingParameter param) { + if ((param.has_kstride_h() && param.kstride_h() > 1) + || (param.has_kstride_w() && param.kstride_w() > 1)) { + return true; + } + + for (int i = 0; i < param.kstride_size(); ++i) { + if (param.kstride(i) > 0) { + return true; + } + } + + return false; +} + // Get convolution layer according to engine. template shared_ptr > GetConvolutionLayer(const LayerParameter& param) { ConvolutionParameter_Engine engine = param.convolution_param().engine(); - if (engine == ConvolutionParameter_Engine_DEFAULT) { + if (engine == ConvolutionParameter_Engine_DEFAULT + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL + || checkConvolutionKstrided(param.convolution_param())) { engine = ConvolutionParameter_Engine_CAFFE; #ifdef USE_CUDNN engine = ConvolutionParameter_Engine_CUDNN; #endif } - if (engine == ConvolutionParameter_Engine_CAFFE - || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { + if (engine == ConvolutionParameter_Engine_CAFFE) { return shared_ptr >(new ConvolutionLayer(param)); #ifdef USE_CUDNN } else if (engine == ConvolutionParameter_Engine_CUDNN) { @@ -57,18 +88,20 @@ shared_ptr > GetConvolutionLayer(const LayerParameter& param) { REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer); + // Get pooling layer according to engine. template shared_ptr > GetPoolingLayer(const LayerParameter& param) { PoolingParameter_Engine engine = param.pooling_param().engine(); - if (engine == PoolingParameter_Engine_DEFAULT) { + if (engine == PoolingParameter_Engine_DEFAULT + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL + || checkPoolingKstrided(param.pooling_param())) { engine = PoolingParameter_Engine_CAFFE; #ifdef USE_CUDNN engine = PoolingParameter_Engine_CUDNN; #endif } - if (engine == PoolingParameter_Engine_CAFFE - || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { + if (engine == PoolingParameter_Engine_CAFFE) { return shared_ptr >(new PoolingLayer(param)); #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { From c40513dbad6caf03c8770fa2b31334860cbcd4b1 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 27 Dec 2015 19:09:15 +0100 Subject: [PATCH 233/254] Removed legacy accessor.Removed legacy accessor. --- src/caffe/layers/cudnn_pooling_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp index 11e9d959dc4..879690bd4f5 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cpp +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -31,13 +31,13 @@ void CuDNNPoolingLayer::Reshape(const vector*>& bottom, cudnn::setTensorNdDesc(&bottom_desc_, bottom[0]->shape().size() - 2, - bottom[0]->num(), + bottom[0]->shape()[0], this->channels_, &(bottom[0]->shape()[2])); const int_tp* pooled_size_data = this->pooled_size_.cpu_data(); cudnn::setTensorNdDesc(&top_desc_, bottom[0]->shape().size() - 2, - bottom[0]->num(), + bottom[0]->shape()[0], this->channels_, pooled_size_data); } From 2d2052c2cf7037fe5bd266728337e25935053962 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 27 Dec 2015 20:26:42 +0100 Subject: [PATCH 234/254] Colbuffer allocation prevention in cuDNN. --- include/caffe/layers/base_conv_layer.hpp | 1 + src/caffe/layers/base_conv_layer.cpp | 4 +++- src/caffe/layers/cudnn_conv_layer.cpp | 6 ++++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/caffe/layers/base_conv_layer.hpp b/include/caffe/layers/base_conv_layer.hpp index af86acff632..f625f4f7e45 100644 --- a/include/caffe/layers/base_conv_layer.hpp +++ b/include/caffe/layers/base_conv_layer.hpp @@ -109,6 +109,7 @@ class BaseConvolutionLayer : public Layer { bool bias_term_; bool is_1x1_; bool force_nd_im2col_; + bool use_colbuffer_; private: // wrap im2col/col2im so we don't have to remember the (long) argument lists diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index b7eef86df66..70dfb33ed66 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -17,6 +17,8 @@ namespace caffe { template void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { + use_colbuffer_ = true; + // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); force_nd_im2col_ = conv_param.force_nd_im2col(); @@ -269,7 +271,7 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, } col_buffer_.Reshape(col_buffer_shape_); - if (Caffe::mode() == Caffe::Brew::GPU) { + if (Caffe::mode() == Caffe::Brew::GPU && use_colbuffer_) { // Shared column buffer per device-queue across all layers on that device for (int_tp i = 0; i < this->device_->num_queues(); ++i) { shared_ptr > buffer = this->device_ diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 52e4a67354b..c7ef9c94f84 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -18,6 +18,9 @@ template void CuDNNConvolutionLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { ConvolutionLayer::LayerSetUp(bottom, top); + + this->use_colbuffer_ = false; + // Initialize CUDA streams and cuDNN. stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; handle_ = new cudnnHandle_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; @@ -88,6 +91,9 @@ void CuDNNConvolutionLayer::LayerSetUp( template void CuDNNConvolutionLayer::Reshape( const vector*>& bottom, const vector*>& top) { + + this->use_colbuffer_ = false; + ConvolutionLayer::Reshape(bottom, top); bottom_offset_ = this->bottom_dim_ / this->group_; From a38d44a9a5411edfba8fab34c9d24078b0f0f0a3 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 27 Dec 2015 21:37:05 +0100 Subject: [PATCH 235/254] deterministic GEMM backard. --- src/caffe/layers/cudnn_conv_layer.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index c7ef9c94f84..ca298c59253 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -106,7 +106,6 @@ void CuDNNConvolutionLayer::Reshape( uint_tp workspace_limit_bytes = 8*1024*1024; for (int_tp i = 0; i < bottom.size(); i++) { - { int_tp total_dims = bottom[i]->shape().size(); std::vector full_shape(total_dims); @@ -235,8 +234,8 @@ void CuDNNConvolutionLayer::Reshape( workspace_bwd_filter_sizes_[i] = 0; workspace_bwd_data_sizes_[i] = 0; fwd_algo_[i] = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; - bwd_filter_algo_[i] = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0; - bwd_data_algo_[i] = CUDNN_CONVOLUTION_BWD_DATA_ALGO_0; + bwd_filter_algo_[i] = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; + bwd_data_algo_[i] = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; } // NULL out all workspace pointers From 6e9fba43b3fdd9b2c87eac00b9d79fa18b703998 Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 28 Dec 2015 00:20:48 +0100 Subject: [PATCH 236/254] Layer factory and dropout ND update. --- src/caffe/layer_factory.cpp | 22 +++++++++++----------- src/caffe/layers/cudnn_conv_layer.cpp | 4 ++-- src/caffe/layers/dropout_layer.cpp | 3 +-- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 69e1798c637..d42c10b361e 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -67,15 +67,15 @@ bool checkPoolingKstrided(PoolingParameter param) { template shared_ptr > GetConvolutionLayer(const LayerParameter& param) { ConvolutionParameter_Engine engine = param.convolution_param().engine(); - if (engine == ConvolutionParameter_Engine_DEFAULT - || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL - || checkConvolutionKstrided(param.convolution_param())) { + if (engine == ConvolutionParameter_Engine_DEFAULT) { engine = ConvolutionParameter_Engine_CAFFE; #ifdef USE_CUDNN engine = ConvolutionParameter_Engine_CUDNN; #endif } - if (engine == ConvolutionParameter_Engine_CAFFE) { + if (engine == ConvolutionParameter_Engine_CAFFE + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL + || checkConvolutionKstrided(param.convolution_param())) { return shared_ptr >(new ConvolutionLayer(param)); #ifdef USE_CUDNN } else if (engine == ConvolutionParameter_Engine_CUDNN) { @@ -93,15 +93,15 @@ REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer); template shared_ptr > GetPoolingLayer(const LayerParameter& param) { PoolingParameter_Engine engine = param.pooling_param().engine(); - if (engine == PoolingParameter_Engine_DEFAULT - || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL - || checkPoolingKstrided(param.pooling_param())) { + if (engine == PoolingParameter_Engine_DEFAULT) { engine = PoolingParameter_Engine_CAFFE; #ifdef USE_CUDNN engine = PoolingParameter_Engine_CUDNN; #endif } - if (engine == PoolingParameter_Engine_CAFFE) { + if (engine == PoolingParameter_Engine_CAFFE + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL + || checkPoolingKstrided(param.pooling_param())) { return shared_ptr >(new PoolingLayer(param)); #ifdef USE_CUDNN } else if (engine == PoolingParameter_Engine_CUDNN) { @@ -125,14 +125,14 @@ shared_ptr > GetLRNLayer(const LayerParameter& param) { LRNParameter_Engine engine = param.lrn_param().engine(); if (engine == LRNParameter_Engine_DEFAULT) { + engine = LRNParameter_Engine_CAFFE; #ifdef USE_CUDNN engine = LRNParameter_Engine_CUDNN; -#else - engine = LRNParameter_Engine_CAFFE; #endif } - if (engine == LRNParameter_Engine_CAFFE) { + if (engine == LRNParameter_Engine_CAFFE + || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) { return shared_ptr >(new LRNLayer(param)); #ifdef USE_CUDNN } else if (engine == LRNParameter_Engine_CUDNN) { diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index ca298c59253..308136e894b 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -234,8 +234,8 @@ void CuDNNConvolutionLayer::Reshape( workspace_bwd_filter_sizes_[i] = 0; workspace_bwd_data_sizes_[i] = 0; fwd_algo_[i] = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; - bwd_filter_algo_[i] = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; - bwd_data_algo_[i] = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; + bwd_filter_algo_[i] = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0; + bwd_data_algo_[i] = CUDNN_CONVOLUTION_BWD_DATA_ALGO_0; } // NULL out all workspace pointers diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index c14c70bac4b..c6d3fcd0c9e 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -27,8 +27,7 @@ void DropoutLayer::Reshape(const vector*>& bottom, const vector*>& top) { NeuronLayer::Reshape(bottom, top); // Set up the cache for random number generation - rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); + rand_vec_.Reshape(bottom[0]->shape()); } template From 7fd1f1e2a48b5c7ea87eda2a127aec032f19e82f Mon Sep 17 00:00:00 2001 From: Fabian Tschopp Date: Mon, 28 Dec 2015 15:39:21 +0100 Subject: [PATCH 237/254] Update layer_factory.cpp --- src/caffe/layer_factory.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index d42c10b361e..1747235a9b1 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -40,7 +40,7 @@ bool checkConvolutionKstrided(ConvolutionParameter param) { } for (int i = 0; i < param.kstride_size(); ++i) { - if (param.kstride(i) > 0) { + if (param.kstride(i) > 1) { return true; } } @@ -55,7 +55,7 @@ bool checkPoolingKstrided(PoolingParameter param) { } for (int i = 0; i < param.kstride_size(); ++i) { - if (param.kstride(i) > 0) { + if (param.kstride(i) > 1) { return true; } } From ae61b64e22ece0df5cd41adb0660d54e8d5b5846 Mon Sep 17 00:00:00 2001 From: fabian Date: Tue, 29 Dec 2015 04:15:24 +0100 Subject: [PATCH 238/254] 32/64 bit indexing --- include/caffe/definitions.hpp | 11 +++++++++++ src/caffe/layers/base_conv_layer.cpp | 3 ++- src/caffe/layers/cudnn_conv_layer.cpp | 16 ++++++++-------- src/caffe/layers/eltwise_layer.cpp | 2 +- src/caffe/layers/pooling_layer.cpp | 16 ++++++++-------- src/caffe/layers/window_data_layer.cpp | 8 ++++---- src/caffe/util/db_lmdb.cpp | 2 +- src/caffe/util/math_functions.cpp | 8 +++++--- 8 files changed, 40 insertions(+), 26 deletions(-) diff --git a/include/caffe/definitions.hpp b/include/caffe/definitions.hpp index 9babcf6d95b..2c88042fd66 100644 --- a/include/caffe/definitions.hpp +++ b/include/caffe/definitions.hpp @@ -3,6 +3,8 @@ #include + +#ifdef USE_INDEX_64 // Types used for parameters, offset computations and so on #define int_tp int64_t #define uint_tp uint64_t @@ -10,5 +12,14 @@ // Definitions used to cast the types above as needed #define int_tpc long long // NOLINT #define uint_tpc unsigned long long // NOLINT +#else +// Types used for parameters, offset computations and so on +#define int_tp int32_t +#define uint_tp uint32_t + +// Definitions used to cast the types above as needed +#define int_tpc int // NOLINT +#define uint_tpc unsigned int // NOLINT +#endif #endif /* CAFFE_DEFINITIONS_HPP_ */ diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 70dfb33ed66..ed0778900e9 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -28,7 +28,8 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, num_spatial_axes_ = num_axes - first_spatial_axis; CHECK_GE(num_spatial_axes_, 0); vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1L)); + vector spatial_dim_blob_shape( + 1, std::max(num_spatial_axes_, (int_tpc) 1)); // Setup filter kernel dimensions (kernel_shape_). kernel_shape_.Reshape(spatial_dim_blob_shape); int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 308136e894b..20e6612d9b7 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -31,9 +31,9 @@ void CuDNNConvolutionLayer::LayerSetUp( bwd_data_algo_ = new cudnnConvolutionBwdDataAlgo_t[bottom.size()]; // initialize size arrays - workspace_fwd_sizes_ = new uint_tp[bottom.size()]; - workspace_bwd_filter_sizes_ = new uint_tp[bottom.size()]; - workspace_bwd_data_sizes_ = new uint_tp[bottom.size()]; + workspace_fwd_sizes_ = new size_t[bottom.size()]; + workspace_bwd_filter_sizes_ = new size_t[bottom.size()]; + workspace_bwd_data_sizes_ = new size_t[bottom.size()]; // workspace data workspaceSizeInBytes = 0; @@ -198,9 +198,9 @@ void CuDNNConvolutionLayer::Reshape( } // reduce over all workspace sizes to get a maximum to allocate / reallocate - uint_tp total_workspace_fwd = 0; - uint_tp total_workspace_bwd_data = 0; - uint_tp total_workspace_bwd_filter = 0; + size_t total_workspace_fwd = 0; + size_t total_workspace_bwd_data = 0; + size_t total_workspace_bwd_filter = 0; for (uint_tp i = 0; i < bottom.size(); i++) { total_workspace_fwd = std::max(total_workspace_fwd, @@ -211,11 +211,11 @@ void CuDNNConvolutionLayer::Reshape( workspace_bwd_filter_sizes_[i]); } // get max over all operations - uint_tp max_workspace = std::max(total_workspace_fwd, + size_t max_workspace = std::max(total_workspace_fwd, total_workspace_bwd_data); max_workspace = std::max(max_workspace, total_workspace_bwd_filter); // ensure all groups have enough workspace - uint_tp total_max_workspace = max_workspace * + size_t total_max_workspace = max_workspace * (this->group_ * CUDNN_STREAMS_PER_GROUP); // this is the total amount of storage needed over all groups + streams diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index adb82f65e53..882a25f77a1 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -66,7 +66,7 @@ void EltwiseLayer::Forward_cpu( case EltwiseParameter_EltwiseOp_MAX: // Initialize mask = max_idx_.mutable_cpu_data(); - caffe_set(count, -1L, mask); + caffe_set(count, (int_tp)-1, mask); caffe_set(count, Dtype(-FLT_MAX), top_data); // bottom 0 & 1 bottom_data_a = bottom[0]->cpu_data(); diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index f68819a97fc..52bdbcffb12 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -33,7 +33,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, CHECK_GE(num_spatial_axes_, 0); vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1L)); + vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, (int_tp)1)); kernel_shape_.Reshape(spatial_dim_blob_shape); int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); @@ -261,7 +261,7 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, caffe_set(top_count, Dtype(-1), top_mask); } else { mask = max_idx_.mutable_cpu_data(); - caffe_set(top_count, -1L, mask); + caffe_set(top_count, (int_tp)-1, mask); } caffe_set(top_count, Dtype(-FLT_MAX), top_data); // The main loop @@ -273,8 +273,8 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, int_tp wstart = pw * stride_w_ - pad_w_; int_tp hend = min(hstart + kernel_h_, height_); int_tp wend = min(wstart + kernel_w_, width_); - hstart = max(hstart, 0L); - wstart = max(wstart, 0L); + hstart = max(hstart, (int_tp)0); + wstart = max(wstart, (int_tp)0); const int_tp pool_index = ph * pooled_width_ + pw; for (int_tp h = hstart; h < hend; ++h) { for (int_tp w = wstart; w < wend; ++w) { @@ -316,8 +316,8 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, int_tp hend = min(hstart + kernel_h_, height_ + pad_h_); int_tp wend = min(wstart + kernel_w_, width_ + pad_w_); int_tp pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0L); - wstart = max(wstart, 0L); + hstart = max(hstart, (int_tp)0); + wstart = max(wstart, (int_tp)0); hend = min(hend, height_); wend = min(wend, width_); for (int_tp h = hstart; h < hend; ++h) { @@ -408,8 +408,8 @@ void PoolingLayer::Backward_cpu(const vector*>& top, int_tp hend = min(hstart + kernel_h_, height_ + pad_h_); int_tp wend = min(wstart + kernel_w_, width_ + pad_w_); int_tp pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0L); - wstart = max(wstart, 0L); + hstart = max(hstart, (int_tp)0); + wstart = max(wstart, (int_tp)0); hend = min(hend, height_); wend = min(wend, width_); for (int_tp h = hstart; h < hend; ++h) { diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index bed5c9fa8e5..92860cde7fa 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -335,10 +335,10 @@ void WindowDataLayer::load_batch(Batch* batch) { // the extent beyond the image int_tp unclipped_height = y2-y1+1; int_tp unclipped_width = x2-x1+1; - int_tp pad_x1 = std::max(0L, -x1); - int_tp pad_y1 = std::max(0L, -y1); - int_tp pad_x2 = std::max(0L, x2 - cv_img.cols + 1); - int_tp pad_y2 = std::max(0L, y2 - cv_img.rows + 1); + int_tp pad_x1 = std::max((int_tp)0, -x1); + int_tp pad_y1 = std::max((int_tp)0, -y1); + int_tp pad_x2 = std::max((int_tp)0, x2 - cv_img.cols + 1); + int_tp pad_y2 = std::max((int_tp)0, y2 - cv_img.rows + 1); // clip bounds x1 = x1 + pad_x1; x2 = x2 - pad_x2; diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index c34da549b1f..57f941e4699 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -7,7 +7,7 @@ namespace caffe { namespace db { -const uint_tp LMDB_MAP_SIZE = 1099511627776; // 1 TB +const size_t LMDB_MAP_SIZE = 1099511627776; // 1 TB void LMDB::Open(const string& source, Mode mode) { MDB_CHECK(mdb_env_create(&mdb_env_)); diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index c4331955e4a..8037daba4fe 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -71,9 +71,11 @@ void caffe_set(const int_tp N, const Dtype alpha, Dtype* Y) { } template void caffe_set(const int_tp N, const int alpha, int* Y); -template void caffe_set(const int_tp N, const uint_tp alpha, - uint_tp* Y); -template void caffe_set(const int_tp N, const int_tp alpha, int_tp* Y); +template void caffe_set(const int_tp N, const unsigned int alpha, + unsigned int* Y); +template void caffe_set(const int_tp N, const long long alpha, long long* Y); +template void caffe_set(const int_tp N, const unsigned long long alpha, + unsigned long long* Y); template void caffe_set(const int_tp N, const float alpha, float* Y); template void caffe_set(const int_tp N, const double alpha, double* Y); From c3668162c96f15daec53fd66137585bbf8179b0f Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 31 Dec 2015 05:37:30 +0100 Subject: [PATCH 239/254] Selectable 64/32bit indexing compilation. --- CMakeLists.txt | 3 +- Makefile.config.example | 3 + cmake/Templates/caffe_config.h.in | 3 + include/caffe/greentea/greentea_im2col.hpp | 64 +++--- include/caffe/layers/base_conv_layer.hpp | 31 ++- include/caffe/layers/pooling_layer.hpp | 2 +- log.txt | 111 ++++++++++- .../greentea/cl_headers/definitions_32.cl | 7 + .../greentea/cl_headers/definitions_64.cl | 7 + src/caffe/greentea/cl_headers/header.cl | 6 +- src/caffe/greentea/cl_kernels.cpp | 39 ++-- src/caffe/greentea/cl_kernels.sh | 36 +++- src/caffe/greentea/cl_kernels/im2col.cl | 36 ++-- src/caffe/greentea/cl_kernels/im2col_nd.cl | 60 +++--- src/caffe/greentea/cl_kernels/lrn.cl | 2 +- src/caffe/greentea/cl_kernels/pooling.cl | 8 +- src/caffe/greentea/cl_kernels/pooling_nd.cl | 2 +- src/caffe/greentea/cl_kernels/pooling_sk.cl | 100 +++++----- src/caffe/greentea/greentea_im2col.cpp | 81 ++++---- .../greentea/greentea_math_functions.cpp | 28 +-- src/caffe/layers/conv_layer.cpp | 37 ++-- src/caffe/layers/deconv_layer.cpp | 3 +- src/caffe/layers/im2col_layer.cpp | 3 +- src/caffe/layers/im2col_layer.cu | 5 +- src/caffe/layers/pooling_layer.cpp | 6 +- src/caffe/layers/pooling_layer.cu | 5 +- src/caffe/test/test_im2col_kernel.cu | 186 ++++++------------ src/caffe/test/test_lrn_layer.cpp | 178 +++++++++-------- src/caffe/test/test_pooling_ndsk_layer.cpp | 18 +- .../test/test_random_number_generator.cpp | 2 +- src/caffe/util/hdf5.cpp | 2 +- src/caffe/util/im2col.cu | 172 +++++++++------- src/caffe/util/math_functions.cpp | 12 +- 33 files changed, 710 insertions(+), 548 deletions(-) create mode 100644 src/caffe/greentea/cl_headers/definitions_32.cl create mode 100644 src/caffe/greentea/cl_headers/definitions_64.cl diff --git a/CMakeLists.txt b/CMakeLists.txt index ceb82aca0fa..3ad13a3a4fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,7 +21,8 @@ include(cmake/Summary.cmake) include(cmake/ConfigGen.cmake) # ---[ Options -caffe_option(CPU_ONLY "Build Caffe without CUDA and OpenCL support" OFF) +caffe_option(CPU_ONLY "Build Caffe without CUDA and OpenCL support" OFF) +caffe_option(USE_INDEX_64 "Build Caffe with 64 bit indexing" OFF) caffe_option(USE_CUDA "Build Caffe with CUDA support" ON) caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" ON) caffe_option(USE_CLBLAS "Build Caffe with clBLAS support (instead of using ViennaClBLAS)" OFF) diff --git a/Makefile.config.example b/Makefile.config.example index 99be6ba0333..e6610f4f5c9 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -1,6 +1,9 @@ ## Refer to http://caffe.berkeleyvision.org/installation.html # Contributions simplifying and improving our build system are welcome! +# 32 bit / 64 bit indexing +# USE_INDEX_64 + # GreenTea (ViennaCL/OpenCL) backend switch # Enable the CUDA backend diff --git a/cmake/Templates/caffe_config.h.in b/cmake/Templates/caffe_config.h.in index 727cef976fd..fe695345a76 100644 --- a/cmake/Templates/caffe_config.h.in +++ b/cmake/Templates/caffe_config.h.in @@ -7,6 +7,9 @@ /* Binaries directory */ #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" +/* 64 bit indexing */ +#cmakedefine USE_INDEX_64 + /* NVIDA Cuda */ #cmakedefine HAVE_CUDA #cmakedefine USE_CUDA diff --git a/include/caffe/greentea/greentea_im2col.hpp b/include/caffe/greentea/greentea_im2col.hpp index caecdefd122..694cdbc256e 100644 --- a/include/caffe/greentea/greentea_im2col.hpp +++ b/include/caffe/greentea/greentea_im2col.hpp @@ -14,45 +14,49 @@ namespace caffe { template void greentea_im2col_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, const cl_mem data_im, - const int_tp data_offset, const int_tp channels, - const int_tp height, const int_tp width, - const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp dilation_h, const int_tp dilation_w, - cl_mem data_col); + viennacl::ocl::context *ctx, const cl_mem data_im, + const int_tp data_offset, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, + cl_mem data_col, const int_tp data_col_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, const cl_mem data_col, - const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp dilation_h, - const int_tp dilation_w, cl_mem data_im, - const int_tp data_offset); + viennacl::ocl::context *ctx, const cl_mem data_col, + const int_tp data_col_off, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, + cl_mem data_im, const int_tp data_im_off); template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_im, - const int_tp data_off, - const int_tp num_spatial_axes, - const int_tp num_kernels, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem dilation, cl_mem data_col, - int_tp data_col_off); + viennacl::ocl::context *ctx, cl_mem data_im, + const int_tp data_off, + const int_tp num_spatial_axes, + const int_tp channel_axis, + const int_tp num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem dilation, cl_mem data_col, + const int_tp data_col_off); template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, - viennacl::ocl::context *ctx, cl_mem data_col, - const int_tp data_col_off, - const int_tp num_spatial_axes, - const int_tp im_size, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem dilation, cl_mem data_im, - int_tp data_off); + viennacl::ocl::context *ctx, cl_mem data_col, + const int_tp data_col_off, + const int_tp num_spatial_axes, + const int_tp channel_axis, + const int_tp im_size, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem dilation, cl_mem data_im, + int_tp data_im_off); } // namespace caffe diff --git a/include/caffe/layers/base_conv_layer.hpp b/include/caffe/layers/base_conv_layer.hpp index 3a625629514..aca544fbb7c 100644 --- a/include/caffe/layers/base_conv_layer.hpp +++ b/include/caffe/layers/base_conv_layer.hpp @@ -162,7 +162,6 @@ class BaseConvolutionLayer : public Layer { inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_gpu(col_buff, conv_in_channels_, conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], @@ -195,10 +194,13 @@ class BaseConvolutionLayer : public Layer { pad_.cpu_data()[0], pad_.cpu_data()[1], stride_.cpu_data()[0], stride_.cpu_data()[1], dilation_.cpu_data()[0], - dilation_.cpu_data()[1], (cl_mem) col_buff); + dilation_.cpu_data()[1], (cl_mem) col_buff, + col_buff_off); } else { greentea_im2col_nd_gpu(&program, &ctx, (cl_mem) data, data_off, - num_spatial_axes_, num_kernels_im2col_, + num_spatial_axes_, + (int_tp)0, + num_kernels_im2col_, (cl_mem) (conv_input_shape_.gpu_data()), (cl_mem) (col_buffer_.gpu_shape()), (cl_mem) (kernel_shape_.gpu_data()), @@ -217,20 +219,28 @@ class BaseConvolutionLayer : public Layer { viennacl::ocl::program &program = this->device_->program(); if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - greentea_col2im_gpu(&program, &ctx, (cl_mem) col_buff, + greentea_col2im_gpu(&program, &ctx, + (cl_mem) col_buff, + col_buff_off, conv_in_channels_, conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], + pad_.cpu_data()[0], + pad_.cpu_data()[1], + stride_.cpu_data()[0], + stride_.cpu_data()[1], dilation_.cpu_data()[0], - dilation_.cpu_data()[1], (cl_mem) data, + dilation_.cpu_data()[1], + (cl_mem) data, data_off); } else { - greentea_col2im_nd_gpu(&program, &ctx, (cl_mem) col_buff, - col_buff_off, num_spatial_axes_, + greentea_col2im_nd_gpu(&program, &ctx, + (cl_mem) col_buff, + col_buff_off, + num_spatial_axes_, + (int_tp)0, num_kernels_col2im_, (cl_mem) (conv_input_shape_.gpu_data()), (cl_mem) (col_buffer_.gpu_shape()), @@ -238,7 +248,8 @@ class BaseConvolutionLayer : public Layer { (cl_mem) (pad_.gpu_data()), (cl_mem) (stride_.gpu_data()), (cl_mem) (dilation_.gpu_data()), - (cl_mem) data, data_off); + (cl_mem) data, + data_off); } } #endif // USE_GREENTEA diff --git a/include/caffe/layers/pooling_layer.hpp b/include/caffe/layers/pooling_layer.hpp index fcbbc614773..e207f73743b 100644 --- a/include/caffe/layers/pooling_layer.hpp +++ b/include/caffe/layers/pooling_layer.hpp @@ -58,7 +58,7 @@ class PoolingLayer : public Layer { Blob ext_kernel_shape_; Blob stride_; Blob pad_; - Blob kstride_; + Blob dilation_; Blob size_; Blob pooled_size_; diff --git a/log.txt b/log.txt index 7568e4c0a76..38d69b89d4a 100644 --- a/log.txt +++ b/log.txt @@ -1 +1,110 @@ -[57261.014178] [WARN]Received Interrupt signal. +Setting to use device 1 +Note: Google Test filter = *Rng* +[==========] Running 36 tests from 2 test cases. +[----------] Global test environment set-up. +[----------] 18 tests from RandomNumberGeneratorTest/0, where TypeParam = float +[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussian +[ OK ] RandomNumberGeneratorTest/0.TestRngGaussian (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussian2 +[ OK ] RandomNumberGeneratorTest/0.TestRngGaussian2 (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniform +[ OK ] RandomNumberGeneratorTest/0.TestRngUniform (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniform2 +[ OK ] RandomNumberGeneratorTest/0.TestRngUniform2 (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngBernoulli +[ OK ] RandomNumberGeneratorTest/0.TestRngBernoulli (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngBernoulli2 +[ OK ] RandomNumberGeneratorTest/0.TestRngBernoulli2 (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussian +[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussian (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniform +[ OK ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniform (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianTimesBernoulli +[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianTimesBernoulli (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformTimesBernoulli +[ OK ] RandomNumberGeneratorTest/0.TestRngUniformTimesBernoulli (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngBernoulliTimesBernoulli +[ OK ] RandomNumberGeneratorTest/0.TestRngBernoulliTimesBernoulli (0 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianGPU +[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianGPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussian2GPU +[ OK ] RandomNumberGeneratorTest/0.TestRngGaussian2GPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformGPU +[ OK ] RandomNumberGeneratorTest/0.TestRngUniformGPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniform2GPU +[ OK ] RandomNumberGeneratorTest/0.TestRngUniform2GPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformIntGPU +src/caffe/test/test_random_number_generator.cpp:127: Failure +The difference between sample_mean and true_mean is 9.2233720346967101e+18, which exceeds bound, where +sample_mean evaluates to 2158065920, +true_mean evaluates to 9.2233720368547758e+18, and +bound evaluates to 2.0235441203367117e+17. +src/caffe/test/test_random_number_generator.cpp:163: Failure +The difference between bernoulli_p and sample_p_above_mean is 0.5, which exceeds bernoulli_bound, where +bernoulli_p evaluates to 0.5, +sample_p_above_mean evaluates to 0, and +bernoulli_bound evaluates to 0.018999999389052391. +[ FAILED ] RandomNumberGeneratorTest/0.TestRngUniformIntGPU, where TypeParam = float (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussianGPU +[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussianGPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniformGPU +[ OK ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniformGPU (2 ms) +[----------] 18 tests from RandomNumberGeneratorTest/0 (13 ms total) + +[----------] 18 tests from RandomNumberGeneratorTest/1, where TypeParam = double +[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussian +[ OK ] RandomNumberGeneratorTest/1.TestRngGaussian (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussian2 +[ OK ] RandomNumberGeneratorTest/1.TestRngGaussian2 (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniform +[ OK ] RandomNumberGeneratorTest/1.TestRngUniform (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniform2 +[ OK ] RandomNumberGeneratorTest/1.TestRngUniform2 (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngBernoulli +[ OK ] RandomNumberGeneratorTest/1.TestRngBernoulli (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngBernoulli2 +[ OK ] RandomNumberGeneratorTest/1.TestRngBernoulli2 (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussian +[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussian (1 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniform +[ OK ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniform (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianTimesBernoulli +[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianTimesBernoulli (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformTimesBernoulli +[ OK ] RandomNumberGeneratorTest/1.TestRngUniformTimesBernoulli (1 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngBernoulliTimesBernoulli +[ OK ] RandomNumberGeneratorTest/1.TestRngBernoulliTimesBernoulli (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianGPU +[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianGPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussian2GPU +[ OK ] RandomNumberGeneratorTest/1.TestRngGaussian2GPU (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformGPU +[ OK ] RandomNumberGeneratorTest/1.TestRngUniformGPU (0 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniform2GPU +[ OK ] RandomNumberGeneratorTest/1.TestRngUniform2GPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformIntGPU +src/caffe/test/test_random_number_generator.cpp:127: Failure +The difference between sample_mean and true_mean is 9.2233720346967091e+18, which exceeds bound, where +sample_mean evaluates to 2158067198.3313999, +true_mean evaluates to 9.2233720368547758e+18, and +bound evaluates to 2.0235442047593853e+17. +src/caffe/test/test_random_number_generator.cpp:163: Failure +The difference between bernoulli_p and sample_p_above_mean is 0.5, which exceeds bernoulli_bound, where +bernoulli_p evaluates to 0.5, +sample_p_above_mean evaluates to 0, and +bernoulli_bound evaluates to 0.019. +[ FAILED ] RandomNumberGeneratorTest/1.TestRngUniformIntGPU, where TypeParam = double (1 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussianGPU +[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussianGPU (1 ms) +[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniformGPU +[ OK ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniformGPU (2 ms) +[----------] 18 tests from RandomNumberGeneratorTest/1 (10 ms total) + +[----------] Global test environment tear-down +[==========] 36 tests from 2 test cases ran. (23 ms total) +[ PASSED ] 34 tests. +[ FAILED ] 2 tests, listed below: +[ FAILED ] RandomNumberGeneratorTest/0.TestRngUniformIntGPU, where TypeParam = float +[ FAILED ] RandomNumberGeneratorTest/1.TestRngUniformIntGPU, where TypeParam = double + + 2 FAILED TESTS diff --git a/src/caffe/greentea/cl_headers/definitions_32.cl b/src/caffe/greentea/cl_headers/definitions_32.cl new file mode 100644 index 00000000000..706cde9f8be --- /dev/null +++ b/src/caffe/greentea/cl_headers/definitions_32.cl @@ -0,0 +1,7 @@ +// Types used for parameters, offset computations and so on +#define int_tp int +#define uint_tp unsigned int + +// Definitions used to cast the types above as needed +#define int_tpc int +#define uint_tpc unsigned int diff --git a/src/caffe/greentea/cl_headers/definitions_64.cl b/src/caffe/greentea/cl_headers/definitions_64.cl new file mode 100644 index 00000000000..99e41d9ee56 --- /dev/null +++ b/src/caffe/greentea/cl_headers/definitions_64.cl @@ -0,0 +1,7 @@ +// Types used for parameters, offset computations and so on +#define int_tp long +#define uint_tp unsigned long + +// Definitions used to cast the types above as needed +#define int_tpc long +#define uint_tpc unsigned long diff --git a/src/caffe/greentea/cl_headers/header.cl b/src/caffe/greentea/cl_headers/header.cl index d4851cb16e2..0dd6b5319cf 100644 --- a/src/caffe/greentea/cl_headers/header.cl +++ b/src/caffe/greentea/cl_headers/header.cl @@ -16,15 +16,11 @@ #define Dtype float #define barrier(x) #define atomic_cmpxchg(x, y, z) x -#endif - -// Types used for parameters, offset computations and so on #define int_tp long #define uint_tp unsigned long - -// Definitions used to cast the types above as needed #define int_tpc long #define uint_tpc unsigned long +#endif #define CONCAT(A,B) A##_##B #define TEMPLATE(name,type) CONCAT(name,type) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index afdd195f8a8..03fabb128d8 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -5,8 +5,13 @@ #include #include namespace caffe { -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT -std::string header_float = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#endif\n\n// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +#ifdef USE_INDEX_64 +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string definitions_64 = "// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long"; // NOLINT +#else +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string definitions_32 = "// Types used for parameters, offset computations and so on\n#define int_tp int\n#define uint_tp unsigned int\n\n// Definitions used to cast the types above as needed\n#define int_tpc int\n#define uint_tpc unsigned int"; // NOLINT +#endif std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT @@ -18,14 +23,14 @@ std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += dilation_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += dilation_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp height, const int_tp width,\n const int_tp channels, const int_tp patch_h,\n const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % dilation_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % dilation_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % dilation_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % dilation_h : h;\n int_tp w_num = (w - w_col_start) / dilation_w;\n int_tp h_num = (h - h_col_start) / dilation_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n dilation_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n dilation_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / dilation_h * patch_w + (w - w_col) / dilation_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * dilation[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % dilation[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - dilation[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n } else { // d_col_iter[i] <= d_max - dilation[1]\n d_col_iter[i] += dilation[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT -std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += dilation_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += dilation_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp patch_h, const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % dilation_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % dilation_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % dilation_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % dilation_h : h;\n int_tp w_num = (w - w_col_start) / dilation_w;\n int_tp h_num = (h - h_col_start) / dilation_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = data_col_off + c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n dilation_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n dilation_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / dilation_h * patch_w + (w - w_col) / dilation_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * dilation[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1;\n d_col_size[i] = (im_shape_ptr[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % dilation[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - dilation[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n } else { // d_col_iter[i] <= d_max - dilation[1]\n d_col_iter[i] += dilation[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT @@ -40,20 +45,26 @@ std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += dilation_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += dilation_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp height, const int_tp width,\n const int_tp channels, const int_tp patch_h,\n const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % dilation_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % dilation_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % dilation_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % dilation_h : h;\n int_tp w_num = (w - w_col_start) / dilation_w;\n int_tp h_num = (h - h_col_start) / dilation_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n dilation_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n dilation_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / dilation_h * patch_w + (w - w_col) / dilation_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape[i + 1];\n channel_in /= col_shape[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * dilation[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1;\n d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape[i + 1] + pad[i];\n channel_im /= im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // Old:\n /*d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_shape[i]) ?\n 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;\n d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % dilation[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - dilation[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n } else { // d_col_iter[i] <= d_max - dilation[1]\n d_col_iter[i] += dilation[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[index] = val;\n }\n}"; // NOLINT -std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += dilation_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += dilation_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp patch_h, const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % dilation_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % dilation_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % dilation_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % dilation_h : h;\n int_tp w_num = (w - w_col_start) / dilation_w;\n int_tp h_num = (h - h_col_start) / dilation_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = data_col_off + c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n dilation_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n dilation_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / dilation_h * patch_w + (w - w_col) / dilation_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * dilation[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1;\n d_col_size[i] = (im_shape_ptr[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % dilation[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - dilation[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n } else { // d_col_iter[i] <= d_max - dilation[1]\n d_col_iter[i] += dilation[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], 0L);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp kstride_h,\n const int_tp kstride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h;\n int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += kstride_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, 0L);\n wstart = max(wstart, 0L);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w,\n const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h,\n const int_tp kstride_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += kstride_h) {\n for (int_tp w = wstart; w < wend; w += kstride_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; +#ifdef USE_INDEX_64 + ss << header << "\n\n"; // NOLINT + ss << definitions_64 << "\n\n"; // NOLINT +#else ss << header << "\n\n"; // NOLINT + ss << definitions_32 << "\n\n"; // NOLINT +#endif ss << "#define Dtype float" << "\n\n"; // NOLINT ss << "#define TYPE TYPE_FLOAT" << "\n\n"; // NOLINT ss << activation_float << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index 45782a61286..b25e9a72692 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -3,7 +3,8 @@ # load the kernels to ViennaCL/OpenCL contexts. # Outputs (overwrites): cl_kernels.hpp and cl_kernels.cpp -CL_HEADERDIR="src/caffe/greentea/cl_headers/*.cl" +declare -a CL_HEADERS_32=("src/caffe/greentea/cl_headers/header.cl" "src/caffe/greentea/cl_headers/definitions_32.cl") +declare -a CL_HEADERS_64=("src/caffe/greentea/cl_headers/header.cl" "src/caffe/greentea/cl_headers/definitions_64.cl") CL_KERNELDIR="src/caffe/greentea/cl_kernels/*.cl" HEADER='include/caffe/greentea/cl_kernels.hpp' INCHEADER='caffe/greentea/cl_kernels.hpp' @@ -34,8 +35,9 @@ echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx);" >> echo "}" >> $HEADER echo "#endif" >> $HEADER +echo "#ifdef USE_INDEX_64" >> $SOURCE shopt -s nullglob -for CL_KERNEL in $CL_HEADERDIR +for CL_KERNEL in "${CL_HEADERS_64[@]}" do CL_KERNEL_STR=`cat $CL_KERNEL` CL_KERNEL_NAME=`echo $CL_KERNEL` @@ -45,9 +47,22 @@ do echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE echo "\"; // NOLINT" >> $SOURCE done +echo "#else" >> $SOURCE +shopt -s nullglob +for CL_KERNEL in "${CL_HEADERS_32[@]}" +do + CL_KERNEL_STR=`cat $CL_KERNEL` + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo -n "std::string $CL_KERNEL_NAME = \"" >> $SOURCE + echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE + echo "\"; // NOLINT" >> $SOURCE +done +echo "#endif" >> $SOURCE shopt -s nullglob -for CL_KERNEL in $CL_HEADERDIR $CL_KERNELDIR +for CL_KERNEL in $CL_KERNELDIR do CL_KERNEL_STR=`cat $CL_KERNEL` CL_KERNEL_NAME=`echo $CL_KERNEL` @@ -70,19 +85,28 @@ do echo "\"; // NOLINT" >> $SOURCE done - - echo "viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) {" >> $SOURCE echo " std::stringstream ss;" >> $SOURCE +echo "#ifdef USE_INDEX_64" >> $SOURCE shopt -s nullglob -for CL_KERNEL in $CL_HEADERDIR +for CL_KERNEL in "${CL_HEADERS_64[@]}" do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" echo " ss << $CL_KERNEL_NAME << \"\\n\\n\"; // NOLINT" >> $SOURCE done +echo "#else" >> $SOURCE +shopt -s nullglob +for CL_KERNEL in "${CL_HEADERS_32[@]}" +do + CL_KERNEL_NAME=`echo $CL_KERNEL` + CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" + CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" + echo " ss << $CL_KERNEL_NAME << \"\\n\\n\"; // NOLINT" >> $SOURCE +done +echo "#endif" >> $SOURCE shopt -s nullglob echo " ss << \"#define Dtype float\" << \"\\n\\n\"; // NOLINT" >> $SOURCE diff --git a/src/caffe/greentea/cl_kernels/im2col.cl b/src/caffe/greentea/cl_kernels/im2col.cl index 7afc7cbad06..5287dd30d4a 100644 --- a/src/caffe/greentea/cl_kernels/im2col.cl +++ b/src/caffe/greentea/cl_kernels/im2col.cl @@ -14,7 +14,8 @@ __kernel void TEMPLATE(im2col,Dtype)(const int_tp n, const int_tp dilation_w, const int_tp height_col, const int_tp width_col, - __global Dtype* data_col) { + __global Dtype* data_col, + const int_tp data_col_off) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { int_tp w_out = index % width_col; @@ -24,7 +25,7 @@ __kernel void TEMPLATE(im2col,Dtype)(const int_tp n, int_tp channel_out = channel_in * kernel_h * kernel_w; int_tp h_in = h_out * stride_h - pad_h; int_tp w_in = w_out * stride_w - pad_w; - __global Dtype* data_col_ptr = data_col; + __global Dtype* data_col_ptr = data_col + data_col_off; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; __global const Dtype* data_im_ptr = data_im + data_offset; data_im_ptr += (channel_in * height + h_in) * width + w_in; @@ -43,19 +44,22 @@ __kernel void TEMPLATE(im2col,Dtype)(const int_tp n, } __kernel void TEMPLATE(col2im,Dtype)(const int_tp n, - __global const Dtype* data_col, - const int_tp height, const int_tp width, - const int_tp channels, const int_tp patch_h, - const int_tp patch_w, - const int_tp ext_patch_h, - const int_tp ext_patch_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp dilation_h, - const int_tp dilation_w, - const int_tp height_col, - const int_tp width_col, - __global Dtype* data_im, - const int_tp data_offset) { + __global const Dtype* data_col, + const int_tp data_col_off, + const int_tp height, const int_tp width, + const int_tp channels, + const int_tp patch_h, const int_tp patch_w, + const int_tp ext_patch_h, + const int_tp ext_patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, + const int_tp stride_w, + const int_tp dilation_h, + const int_tp dilation_w, + const int_tp height_col, + const int_tp width_col, + __global Dtype* data_im, + const int_tp data_offset) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { Dtype val = 0; @@ -78,7 +82,7 @@ __kernel void TEMPLATE(col2im,Dtype)(const int_tp n, int_tp coeff_w_idx = height_col * width_col; int_tp coeff_h_idx = patch_w * coeff_w_idx; - int_tp offset = c * patch_h * coeff_h_idx; + int_tp offset = data_col_off + c * patch_h * coeff_h_idx; for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += dilation_h, --h_idx) { for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index 7d12eb6f852..bdf7c7e63ee 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -3,39 +3,43 @@ #endif __kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, - __global const Dtype* data_im, - const int_tp data_off, - __global const int_tp* im_shape, - __global const int_tp* col_shape, - __global const int_tp* kernel_shape, - __global const int_tp* pad, - __global const int_tp* stride, - __global const int_tp* dilation, - __global Dtype* data_col, - const int_tp data_col_off) { + const int_tp channel_axis, + __global const Dtype* data_im, + const int_tp data_off, + __global const int_tp* im_shape, + __global const int_tp* col_shape, + __global const int_tp* kernel_shape, + __global const int_tp* pad, + __global const int_tp* stride, + __global const int_tp* dilation, + __global Dtype* data_col, + const int_tp data_col_off) { int_tp d_temp[6]; int_tp d_iter[6]; int_tp i; + __global const int_tp* im_shape_ptr = im_shape + channel_axis; + __global const int_tp* col_shape_ptr = col_shape + channel_axis; + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int_tp channel_in = index; int_tp channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { - d_temp[i] = channel_in % col_shape[i + 1]; - channel_in /= col_shape[i + 1]; + d_temp[i] = channel_in % col_shape_ptr[i + 1]; + channel_in /= col_shape_ptr[i + 1]; channel_out *= kernel_shape[i]; } channel_out *= channel_in; int_tp data_col_inc = 1; for (i = 0; i < num_axes; ++i) { - channel_out *= col_shape[i + 1]; + channel_out *= col_shape_ptr[i + 1]; channel_out += d_temp[i]; d_temp[i] = d_temp[i] * stride[i] - pad[i]; - channel_in *= im_shape[i + 1]; + channel_in *= im_shape_ptr[i + 1]; channel_in += d_temp[i]; - data_col_inc *= col_shape[i + 1]; + data_col_inc *= col_shape_ptr[i + 1]; d_iter[i] = 0; } __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; @@ -45,7 +49,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, bool in_range = true; for (i = 0; i < num_axes; ++i) { const int_tp d_iter_im = d_iter[i] + d_temp[i]; - in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; + in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1]; if (!in_range) { break; } @@ -55,7 +59,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, if (in_range) { int_tp data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { - data_im_offset *= im_shape[i + 1]; + data_im_offset *= im_shape_ptr[i + 1]; data_im_offset += d_iter[i]; } *data_col_ptr = data_im_ptr[data_im_offset]; @@ -84,16 +88,16 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, } __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, + const int_tp channel_axis, __global const Dtype* data_col, const int_tp data_col_off, __global const int_tp* im_shape, - __global const int_tp* col_shape, __global const int_tp* kernel_shape, __global const int_tp* pad, __global const int_tp* stride, __global const int_tp* dilation, __global Dtype* data_im, - const int_tp data_off) { + const int_tp data_im_off) { int_tp d_im[6]; int_tp d_col_size[6]; int_tp d_col_iter[6]; @@ -102,9 +106,12 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, int_tp d_ext_patch[6]; int_tp d_idx[6]; + __global const int_tp* im_shape_ptr = im_shape + channel_axis; + __global const Dtype* data_col_ptr = data_col + data_col_off; + for (int_tp i = num_axes - 1; i >= 0; --i) { d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1; - d_col_size[i] = (im_shape[i + 1] + 2 * pad[i] - d_ext_patch[i]) + d_col_size[i] = (im_shape_ptr[i + 1] + 2 * pad[i] - d_ext_patch[i]) / stride[i] + 1; } @@ -114,17 +121,12 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, int_tp channel_im = index; // Calculate d_im (image dimensions). for (int_tp i = num_axes - 1; i >= 0; --i) { - d_im[i] = channel_im % im_shape[i + 1] + pad[i]; - channel_im /= im_shape[i + 1]; + d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i]; + channel_im /= im_shape_ptr[i + 1]; } // Calculate col start/end indices. bool done = false; for (int_tp i = 0; i < num_axes; ++i) { - // Old: - /*d_col_start[i] = d_col_iter[i] = - (d_im[i] < kernel_shape[i]) ? - 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);*/ // New: d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1; @@ -160,7 +162,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, coeff_prod *= kernel_shape[i]; } final_offset += channel_im * coeff_prod; - val += data_col[final_offset]; + val += data_col_ptr[final_offset]; incremented = false; for (int_tp i = num_axes - 1; i >= 0; --i) { if (d_col_iter[i] > d_col_end[i] - dilation[i]) { @@ -174,6 +176,6 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, } } // for (int_tp i = num_axes - 1; i >= 0; --i) } while (incremented); - data_im[index] = val; + data_im[data_im_off + index] = val; } } diff --git a/src/caffe/greentea/cl_kernels/lrn.cl b/src/caffe/greentea/cl_kernels/lrn.cl index 86ccaf5f8d7..6bcbd75081f 100644 --- a/src/caffe/greentea/cl_kernels/lrn.cl +++ b/src/caffe/greentea/cl_kernels/lrn.cl @@ -81,7 +81,7 @@ __kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads, __global const Dtype* bottom_off = bottom_data + offset; __global const Dtype* top_off = top_data + offset; __global const Dtype* scale_off = scale + offset; - __global Dtype* top_diff_off = top_diff + offset; + __global const Dtype* top_diff_off = top_diff + offset; __global Dtype* bottom_diff_off = bottom_diff + offset; int_tp head = 0; const int_tp pre_pad = size - (size + 1) / 2; diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index f38f84af782..94fda467232 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -20,8 +20,8 @@ __kernel void TEMPLATE(max_pool_forward,Dtype)( int_tp wstart = pw * stride_w - pad_w; const int_tp hend = min(hstart + kernel_h, height); const int_tp wend = min(wstart + kernel_w, width); - hstart = max(hstart, 0L); - wstart = max(wstart, 0L); + hstart = max(hstart, (int_tp)0); + wstart = max(wstart, (int_tp)0); Dtype maxval = -FLT_MAX; int_tp maxidx = -1; __global const Dtype* bottom_slice = bottom_data @@ -61,8 +61,8 @@ __kernel void TEMPLATE(ave_pool_forward,Dtype)( int_tp hend = min(hstart + kernel_h, height + pad_h); int_tp wend = min(wstart + kernel_w, width + pad_w); const int_tp pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0L); - wstart = max(wstart, 0L); + hstart = max(hstart, (int_tp)0); + wstart = max(wstart, (int_tp)0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; diff --git a/src/caffe/greentea/cl_kernels/pooling_nd.cl b/src/caffe/greentea/cl_kernels/pooling_nd.cl index 3582ad8776d..1b5e63038d4 100644 --- a/src/caffe/greentea/cl_kernels/pooling_nd.cl +++ b/src/caffe/greentea/cl_kernels/pooling_nd.cl @@ -32,7 +32,7 @@ __kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n, d_idx[i] = num % pooled_size[i]; d_start[i] = d_idx[i] * stride[i] - pad[i]; d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]); - d_start[i] = max(d_start[i], 0L); + d_start[i] = max(d_start[i], (int_tp)0); num /= pooled_size[i]; offset *= size[i]; d_iter[i] = d_start[i]; diff --git a/src/caffe/greentea/cl_kernels/pooling_sk.cl b/src/caffe/greentea/cl_kernels/pooling_sk.cl index 20866d0272e..1184ba64590 100644 --- a/src/caffe/greentea/cl_kernels/pooling_sk.cl +++ b/src/caffe/greentea/cl_kernels/pooling_sk.cl @@ -16,16 +16,16 @@ __global Dtype* bottom_data, const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w, - const int_tp kstride_h, - const int_tp kstride_w, + const int_tp dilation_h, + const int_tp dilation_w, const int_tp pad_h, const int_tp pad_w, __global Dtype* top_data, const int use_mask, __global int_tp* mask, __global Dtype* top_mask) { - for (int_tp index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < nthreads; index += + get_global_size(0)) { int_tp pw = index % pooled_width; int_tp ph = (index / pooled_width) % pooled_height; int_tp c = (index / pooled_width / pooled_height) % channels; @@ -40,8 +40,8 @@ __global Dtype* bottom_data, int_tp maxidx = -1; __global Dtype* bottom_data_ptr = bottom_data + (n * channels + c) * height * width; - for (int_tp h = hstart; h < hend; h += kstride_h) { - for (int_tp w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += dilation_h) { + for (int_tp w = wstart; w < wend; w += dilation_w) { if (bottom_data_ptr[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data_ptr[maxidx]; @@ -59,16 +59,17 @@ __global Dtype* bottom_data, __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( const int_tp nthreads, __global const Dtype* top_diff, const int use_mask, - __global const int_tp* mask, __global const Dtype* top_mask, const int_tp num, - const int_tp channels, const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, - const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, - const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w, + __global const int_tp* mask, __global const Dtype* top_mask, + const int_tp num, const int_tp channels, const int_tp height, + const int_tp width, const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h, + const int_tp pad_w, __global Dtype* bottom_diff) { - for (int_tp index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < nthreads; index += + get_global_size(0)) { __global const int_tp* mask_ptr = mask; __global const Dtype* top_diff_ptr = top_diff; @@ -82,30 +83,32 @@ __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( int_tp pooled_height_1 = pooled_height - 1; int_tp pooled_width_1 = pooled_width - 1; - int_tp phstart = (h < ext_kernel_h) ? h % kstride_h : (h - ext_kernel_h) + 1; + int_tp phstart = + (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1; int_tp phend = (h >= pooled_height) ? - pooled_height_1 - (pooled_height_1 - phstart) % kstride_h : h; - int_tp pwstart = (w < ext_kernel_w) ? w % kstride_w : (w - ext_kernel_w) + 1; + pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h; + int_tp pwstart = + (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1; int_tp pwend = (w >= pooled_width) ? - pooled_width_1 - (pooled_width_1 - pwstart) % kstride_w : w; + pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w; Dtype gradient = 0; int_tp offset = (n * channels + c) * pooled_height * pooled_width; top_diff_ptr += offset; if (use_mask == 1) { mask_ptr += offset; - for (int_tp ph = phstart; ph <= phend; ph += kstride_h) { - for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) { + for (int_tp ph = phstart; ph <= phend; ph += dilation_h) { + for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) { if (mask_ptr[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_ptr[ph * pooled_width + pw]; } } } } else { - for (int_tp ph = phstart; ph <= phend; ph += kstride_h) { - for (int_tp pw = pwstart; pw <= pwend; pw += kstride_w) { + for (int_tp ph = phstart; ph <= phend; ph += dilation_h) { + for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_ptr[ph * pooled_width + pw]; } @@ -119,14 +122,15 @@ __kernel void TEMPLATE(max_pool_backward_sk,Dtype)( __kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, - const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, - const int_tp kstride_w, const int_tp pad_h, const int_tp pad_w, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h, + const int_tp pad_w, __global Dtype* top_data) { - for (int_tp index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < nthreads; index += + get_global_size(0)) { int_tp pw = index % pooled_width; int_tp ph = (index / pooled_width) % pooled_height; @@ -136,8 +140,8 @@ __kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( int_tp wstart = pw * stride_w - pad_w; int_tp hend = min(hstart + ext_kernel_h, height + pad_h); int_tp wend = min(wstart + ext_kernel_w, width + pad_w); - hstart = max(hstart, 0L); - wstart = max(wstart, 0L); + hstart = max(hstart, (int_tp)0); + wstart = max(wstart, (int_tp)0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; @@ -157,14 +161,14 @@ __kernel void TEMPLATE(ave_pool_forward_sk,Dtype)( __kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, - const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, - const int_tp kstride_w, __global Dtype* rand_idx, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx, __global Dtype* top_data) { - for (int_tp index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < nthreads; index += + get_global_size(0)) { int_tp pw = index % pooled_width; int_tp ph = (index / pooled_width) % pooled_height; int_tp c = (index / pooled_width / pooled_height) % channels; @@ -177,16 +181,16 @@ __kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( __global const Dtype* bottom_data_ptr = bottom_data; bottom_data_ptr += (n * channels + c) * height * width; // First pass: get sum - for (int_tp h = hstart; h < hend; h += kstride_h) { - for (int_tp w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += dilation_h) { + for (int_tp w = wstart; w < wend; w += dilation_w) { cumsum += bottom_data_ptr[h * width + w]; } } float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; - for (int_tp h = hstart; h < hend; h += kstride_h) { - for (int_tp w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += dilation_h) { + for (int_tp w = wstart; w < wend; w += dilation_w) { cumsum += bottom_data_ptr[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; @@ -202,14 +206,14 @@ __kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)( __kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, - const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h, - const int_tp kernel_w, const int_tp ext_kernel_h, const int_tp ext_kernel_w, - const int_tp stride_h, const int_tp stride_w, const int_tp kstride_h, - const int_tp kstride_w, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h, + const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, __global Dtype* top_data) { - for (int_tp index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { + for (int_tp index = get_global_id(0); index < nthreads; index += + get_global_size(0)) { int_tp pw = index % pooled_width; int_tp ph = (index / pooled_width) % pooled_height; int_tp c = (index / pooled_width / pooled_height) % channels; @@ -224,8 +228,8 @@ __kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)( __global const Dtype* bottom_data_ptr = bottom_data; bottom_data_ptr += (n * channels + c) * height * width; // First pass: get sum - for (int_tp h = hstart; h < hend; h += kstride_h) { - for (int_tp w = wstart; w < wend; w += kstride_w) { + for (int_tp h = hstart; h < hend; h += dilation_h) { + for (int_tp w = wstart; w < wend; w += dilation_w) { cumsum += bottom_data_ptr[h * width + w]; cumvalues += bottom_data_ptr[h * width + w] * bottom_data_ptr[h * width + w]; diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 5b33a6dea0e..79ecb535007 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -19,21 +19,20 @@ void greentea_im2col_gpu(viennacl::ocl::program *prog, const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, - cl_mem data_col) { + cl_mem data_col, const int_tp data_col_off) { int_tp ext_kernel_h = (kernel_h - 1) * dilation_h + 1; int_tp ext_kernel_w = (kernel_w - 1) * dilation_w + 1; int_tp height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; int_tp width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; int_tp num_kernels = channels * height_col * width_col; - viennacl::ocl::kernel &kernel = prog->get_kernel( - CL_KERNEL_SELECT("im2col_sk")); + viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("im2col")); viennacl::ocl::enqueue( kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, - WrapHandle(data_col, ctx)), + WrapHandle(data_col, ctx), data_col_off), ctx->get_queue()); } @@ -52,7 +51,8 @@ template void greentea_im2col_gpu(viennacl::ocl::program *prog, const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, - cl_mem data_col); + cl_mem data_col, + const int_tp data_col_off); template void greentea_im2col_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, @@ -69,23 +69,19 @@ template void greentea_im2col_gpu(viennacl::ocl::program *prog, const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, - cl_mem data_col); + cl_mem data_col, + const int_tp data_col_off); template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, - const int_tp channels, const int_tp height, - const int_tp width, const int_tp patch_h, - const int_tp patch_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp dilation_h, - const int_tp dilation_w, cl_mem data_im, - const int_tp data_offset) { - if (stride_w > 1 || stride_h > 1 || pad_h > 0 || pad_w > 0) { - LOG(FATAL)<< "stride greater than 1 or pad greater than 0" - << " not tested in col2im_sk_gpu()."; - } - + const int_tp data_col_off, const int_tp channels, + const int_tp height, const int_tp width, + const int_tp patch_h, const int_tp patch_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, + cl_mem data_im, const int_tp data_offset) { int_tp ext_patch_h = (patch_h - 1) * dilation_h + 1; int_tp ext_patch_w = (patch_w - 1) * dilation_w + 1; int_tp height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; @@ -93,10 +89,11 @@ void greentea_col2im_gpu(viennacl::ocl::program *prog, int_tp num_kernels = channels * height * width; viennacl::ocl::kernel &kernel = prog->get_kernel( - CL_KERNEL_SELECT("col2im_sk")); + CL_KERNEL_SELECT("col2im")); viennacl::ocl::enqueue( - kernel(num_kernels, WrapHandle(data_col, ctx), height, width, channels, + kernel(num_kernels, WrapHandle(data_col, ctx), data_col_off, + height, width, channels, patch_h, patch_w, ext_patch_h, ext_patch_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, WrapHandle(data_im, ctx), data_offset), @@ -106,6 +103,7 @@ void greentea_col2im_gpu(viennacl::ocl::program *prog, template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, + const int_tp data_col_off, const int_tp channels, const int_tp height, const int_tp width, @@ -122,6 +120,7 @@ template void greentea_col2im_gpu(viennacl::ocl::program *prog, template void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, + const int_tp data_col_off, const int_tp channels, const int_tp height, const int_tp width, @@ -141,19 +140,21 @@ void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int_tp data_off, const int_tp num_spatial_axes, - const int_tp num_kernels, cl_mem im_shape, - cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, - cl_mem stride, cl_mem dilation, cl_mem data_col, - int_tp data_col_off) { + const int_tp channel_axis, const int_tp num_kernels, + cl_mem im_shape, cl_mem col_shape, + cl_mem kernel_shape, cl_mem pad, cl_mem stride, + cl_mem dilation, cl_mem data_col, + const int_tp data_col_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( - CL_KERNEL_SELECT("im2col_ndsk")); + CL_KERNEL_SELECT("im2col_nd")); viennacl::ocl::enqueue( - kernel(num_kernels, num_spatial_axes, WrapHandle(data_im, ctx), data_off, - WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), - WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), - WrapHandle(stride, ctx), WrapHandle(dilation, ctx), - WrapHandle(data_col, ctx), data_col_off), + kernel(num_kernels, num_spatial_axes, channel_axis, + WrapHandle(data_im, ctx), data_off, WrapHandle(im_shape, ctx), + WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), + WrapHandle(pad, ctx), WrapHandle(stride, ctx), + WrapHandle(dilation, ctx), WrapHandle(data_col, ctx), + data_col_off), ctx->get_queue()); } @@ -163,40 +164,44 @@ template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, cl_mem data_im, const int_tp data_off, const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem dilation, cl_mem data_col, - int_tp data_col_off); + const int_tp data_col_off); template void greentea_im2col_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_im, const int_tp data_off, const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp num_kernels, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem dilation, cl_mem data_col, - int_tp data_col_off); + const int_tp data_col_off); template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, cl_mem data_col, const int_tp data_col_off, - const int_tp num_spatial_axes, const int_tp im_size, + const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem dilation, cl_mem data_im, - int_tp data_off) { + const int_tp data_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( - CL_KERNEL_SELECT("col2im_ndsk")); + CL_KERNEL_SELECT("col2im_nd")); viennacl::ocl::enqueue( - kernel(im_size, num_spatial_axes, WrapHandle(data_col, ctx), data_col_off, - WrapHandle(im_shape, ctx), WrapHandle(col_shape, ctx), + kernel(im_size, num_spatial_axes, channel_axis, + WrapHandle(data_col, ctx), data_col_off, + WrapHandle(im_shape, ctx), WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), WrapHandle(stride, ctx), WrapHandle(dilation, ctx), WrapHandle(data_im, ctx), data_off), @@ -209,6 +214,7 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, cl_mem data_col, const int_tp data_col_off, const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, @@ -220,6 +226,7 @@ template void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, cl_mem data_col, const int_tp data_col_off, const int_tp num_spatial_axes, + const int_tp channel_axis, const int_tp im_size, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, diff --git a/src/caffe/greentea/greentea_math_functions.cpp b/src/caffe/greentea/greentea_math_functions.cpp index 9446cf31c6c..62b24ddc026 100644 --- a/src/caffe/greentea/greentea_math_functions.cpp +++ b/src/caffe/greentea/greentea_math_functions.cpp @@ -220,7 +220,7 @@ void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, size_type B_size1 = static_cast((TransB == CblasTrans) ? N : K); size_type B_size2 = static_cast((TransB == CblasTrans) ? K : N); - viennacl::matrix_base matA(A, ctx, A_size1, + viennacl::matrix_base matA(A, ctx, A_size1, size_type(0), difference_type(1), size_type(M), A_size2, @@ -229,7 +229,7 @@ void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, size_type(lda) VCL_ROW_MAJOR); - viennacl::matrix_base matB(B, ctx, B_size1, + viennacl::matrix_base matB(B, ctx, B_size1, size_type(0), difference_type(1), size_type(K), B_size2, @@ -238,7 +238,7 @@ void greentea_gpu_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, size_type(ldb) VCL_ROW_MAJOR); - viennacl::matrix_base matC(C, ctx, size_type(M), + viennacl::matrix_base matC(C, ctx, size_type(M), size_type(0), difference_type(1), size_type(M), @@ -342,13 +342,13 @@ void greentea_gpu_gemv(const int_tp ctx_id, const CBLAS_TRANSPOSE TransA, typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1( + viennacl::vector_base v1( x, size_type((TransA == CblasTrans) ? M : N), size_type(offx), difference_type(1), ctx); - viennacl::vector_base v2( + viennacl::vector_base v2( y, size_type((TransA == CblasTrans) ? N : M), size_type(offy), difference_type(1), ctx); - viennacl::matrix_base mat(A, ctx, size_type(M), + viennacl::matrix_base mat(A, ctx, size_type(M), size_type(0), difference_type(1), size_type(M), @@ -427,10 +427,10 @@ void greentea_gpu_axpy(const int_tp ctx_id, const int_tp N, const Dtype alpha, typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, size_type(N), + viennacl::vector_base v1(X, size_type(N), size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, size_type(N), + viennacl::vector_base v2(Y, size_type(N), size_type(offY), difference_type(1), ctx); @@ -531,7 +531,7 @@ void greentea_gpu_scal(const int_tp ctx_id, const int_tp N, const Dtype alpha, typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(x, size_type(N), + viennacl::vector_base v1(x, size_type(N), size_type(offx), difference_type(1), ctx); @@ -604,10 +604,10 @@ void greentea_gpu_dot(const int_tp ctx_id, const int_tp n, const cl_mem X, typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, size_type(n), + viennacl::vector_base v1(X, size_type(n), size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, size_type(n), + viennacl::vector_base v2(Y, size_type(n), size_type(offY), difference_type(1), ctx); @@ -672,7 +672,7 @@ void greentea_gpu_asum(const int_tp ctx_id, const int_tp n, const cl_mem X, typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, size_type(n), + viennacl::vector_base v1(X, size_type(n), size_type(offX), difference_type(1), ctx); @@ -740,10 +740,10 @@ void greentea_gpu_scale(const int_tp ctx_id, const int_tp n, const Dtype alpha, typedef typename viennacl::vector_base::size_type difference_type; - viennacl::vector_base v1(X, size_type(n), + viennacl::vector_base v1(X, size_type(n), size_type(offX), difference_type(1), ctx); - viennacl::vector_base v2(Y, size_type(n), + viennacl::vector_base v2(Y, size_type(n), size_type(offY), difference_type(1), ctx); diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 6d205bf509f..459c1cd22ef 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -4,46 +4,34 @@ namespace caffe { -template +template void ConvolutionLayer::compute_output_shape() { -<<<<<<< HEAD const int_tp* kernel_shape_data = this->kernel_shape_.cpu_data(); const int_tp* stride_data = this->stride_.cpu_data(); const int_tp* pad_data = this->pad_.cpu_data(); - const int_tp* kstride_data = this->kstride_.cpu_data(); -======= - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int* stride_data = this->stride_.cpu_data(); - const int* pad_data = this->pad_.cpu_data(); - const int* dilation_data = this->dilation_.cpu_data(); ->>>>>>> 08c5dfd53e6fd98148d6ce21e590407e38055984 + const int_tp* dilation_data = this->dilation_.cpu_data(); this->output_shape_.clear(); for (int_tp i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis -<<<<<<< HEAD const int_tp input_dim = this->input_shape(i + 1); - const int_tp output_dim = (input_dim + 2 * pad_data[i] - - ((kernel_shape_data[i] - 1) * kstride_data[i] + 1)) -======= - const int input_dim = this->input_shape(i + 1); - const int kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1; - const int output_dim = (input_dim + 2 * pad_data[i] - kernel_extent) ->>>>>>> 08c5dfd53e6fd98148d6ce21e590407e38055984 + const int_tp kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + + 1; + const int_tp output_dim = (input_dim + 2 * pad_data[i] - kernel_extent) / stride_data[i] + 1; this->output_shape_.push_back(output_dim); } } -template +template void ConvolutionLayer::Forward_cpu(const vector*>& bottom, - const vector*>& top) { + const vector*>& top) { const Dtype* weight = this->blobs_[0]->cpu_data(); for (int_tp i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); for (int_tp n = 0; n < this->num_; ++n) { this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); + top_data + n * this->top_dim_); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->cpu_data(); this->forward_cpu_bias(top_data + n * this->top_dim_, bias); @@ -52,9 +40,10 @@ void ConvolutionLayer::Forward_cpu(const vector*>& bottom, } } -template +template void ConvolutionLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { + const vector& propagate_down, + const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); for (int_tp i = 0; i < top.size(); ++i) { @@ -73,12 +62,12 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_, - top_diff + n * this->top_dim_, weight_diff); + top_diff + n * this->top_dim_, weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_cpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_); + bottom_diff + n * this->bottom_dim_); } } } diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index d38010cd75c..9752e2a7fc2 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -14,7 +14,8 @@ void DeconvolutionLayer::compute_output_shape() { for (int_tp i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis const int_tp input_dim = this->input_shape(i + 1); - const int_tp kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1; + const int_tp kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + + 1; const int_tp output_dim = stride_data[i] * (input_dim - 1) + kernel_extent - 2 * pad_data[i]; this->output_shape_.push_back(output_dim); diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 32f2e75aefb..a80313b7342 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -114,7 +114,8 @@ void Im2colLayer::Reshape(const vector*>& bottom, for (int_tp i = 0; i < num_spatial_axes_; ++i) { top_shape[channel_axis_] *= kernel_shape_data[i]; const int_tp input_dim = bottom[0]->shape(channel_axis_ + i + 1); - const int_tp kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1; + const int_tp kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + + 1; const int_tp output_dim = (input_dim + 2 * pad_data[i] - kernel_extent) / stride_data[i] + 1; top_shape[channel_axis_ + i + 1] = output_dim; diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 876b5182f49..a07ebb4d33b 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -55,8 +55,9 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, kernel_shape_.cpu_data()[1], pad_.cpu_data()[0], pad_.cpu_data()[1], stride_.cpu_data()[0], stride_.cpu_data()[1], - dilation_.cpu_data()[0], dilation_.cpu_data()[1], - (cl_mem) top_data, n * top_dim_); + dilation_.cpu_data()[0], + dilation_.cpu_data()[1], (cl_mem) top_data, + n * top_dim_); } else { greentea_im2col_nd_gpu(&program, &ctx, (cl_mem) bottom_data, n * bottom_dim_, num_spatial_axes_, diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 0113a709699..71a37737066 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -33,7 +33,8 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, CHECK_GE(num_spatial_axes_, 0); vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, (int_tp)1)); + vector spatial_dim_blob_shape( + 1, std::max(num_spatial_axes_, (int_tp) 1)); kernel_shape_.Reshape(spatial_dim_blob_shape); int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); @@ -188,7 +189,8 @@ void PoolingLayer::Reshape(const vector*>& bottom, vector top_shape = bottom[0]->shape(); for (int_tp i = 0; i < num_spatial_axes_; ++i) { size_data[i] = bottom[0]->shape(channel_axis_ + 1 + i); - ext_kernel_shape_data[i] = (kernel_shape_data[i] - 1) * dilation_data[i] + 1; + ext_kernel_shape_data[i] = (kernel_shape_data[i] - 1) * dilation_data[i] + + 1; pooled_size_data[i] = static_cast(ceil( static_cast(size_data[i] + 2 * pad_data[i] - ext_kernel_shape_data[i]) / stride_data[i])) + 1; diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index ba073cb83b3..bc8bf677871 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -488,8 +488,9 @@ __global__ void MaxPoolBackward(const int_tp nthreads, const Dtype* top_diff, const int_tp ext_kernel_h, const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w, - const int_tp dilation_h, const int_tp dilation_w, - const int_tp pad_h, const int_tp pad_w, + const int_tp dilation_h, + const int_tp dilation_w, const int_tp pad_h, + const int_tp pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index ee892ca8294..24e3328768c 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -15,7 +15,6 @@ namespace caffe { // Forward declare kernel functions -#ifdef USE_CUDA template __global__ void im2col_gpu_kernel(const int_tp n, const Dtype* data_im, const int_tp height, const int_tp width, @@ -36,7 +35,6 @@ __global__ void im2col_nd_gpu_kernel(const int_tp n, const Dtype* data_im, const int_tp* dilation, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; -#endif // USE_CUDA template class Im2colKernelTest : public GPUDeviceTest { @@ -105,52 +103,32 @@ class Im2colKernelTest : public GPUDeviceTest { int_tp width_col_; }; - TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); -TYPED_TEST(Im2colKernelTest, Test2D){ -if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); +TYPED_TEST(Im2colKernelTest, Test2D) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); - this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); - TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); - - // CPU Version - for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), - this->channels_, this->height_, this->width_, - this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, - this->stride_, this->stride_, this->dilation_, this->dilation_, - cpu_data + this->blob_top_cpu_->offset(n)); - } + this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); - // GPU version - int_tp num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int_tp default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); + TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); - // Launch with different grid sizes - for (int_tp grid_div = 2; grid_div <= 8; grid_div++) { + // CPU Version for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { - int_tp grid_dim = default_grid_dim/grid_div; - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernelCUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( - num_kernels, bottom_data + this->blob_bottom_->offset(n), - this->height_, this->width_, this->kernel_size_, this->kernel_size_, - this->pad_, this->pad_, this->stride_, this->stride_, - this->dilation_, this->dilation_, - this->height_col_, this->width_col_, - top_data + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; + im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), + this->channels_, this->height_, this->width_, + this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, + this->stride_, this->stride_, this->dilation_, this->dilation_, + cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version @@ -160,91 +138,54 @@ if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { // Launch with different grid sizes for (int_tp grid_div = 2; grid_div <= 8; grid_div++) { for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_cpu(this->blob_bottom_->cpu_data() - + this->blob_bottom_->offset(n), - this->channels_, this->height_, this->width_, - this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, - this->stride_, this->stride_, - cpu_data + this->blob_top_cpu_->offset(n)); + int_tp grid_dim = default_grid_dim/grid_div; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel + CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( + num_kernels, bottom_data + this->blob_bottom_->offset(n), + this->height_, this->width_, this->kernel_size_, this->kernel_size_, + this->pad_, this->pad_, this->stride_, this->stride_, + this->dilation_, this->dilation_, + this->height_col_, this->width_col_, + top_data + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; } - // GPU version - int_tp num_kernels = this->channels_ * this->height_col_ - * this->width_col_; - int_tp default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - - // Launch with different grid sizes - for (int_tp grid_div = 2; grid_div <= 8; grid_div++) { - for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { - int_tp grid_dim = default_grid_dim/grid_div; - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel - CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( - num_kernels, bottom_data + this->blob_bottom_->offset(n), - this->height_, this->width_, this->kernel_size_, this->kernel_size_, - this->pad_, this->pad_, this->stride_, this->stride_, - this->height_col_, this->width_col_, - top_data + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; - } - - // Compare results against CPU version - for (int_tp i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = cpu_data[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; - } + // Compare results against CPU version + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = cpu_data[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; } } } } } -} -TYPED_TEST(Im2colKernelTest, TestND){ -if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); +TYPED_TEST(Im2colKernelTest, TestND) { + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); - this->blob_top_cpu_->ReshapeLike(*this->blob_top_); + this->blob_top_cpu_->ReshapeLike(*this->blob_top_); - const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); - TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); + const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); + TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); - // CPU Version - for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, - this->blob_bottom_->shape().data() + 1, - this->blob_top_cpu_->shape().data() + 1, - this->blob_kernel_shape_->cpu_data(), - this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), - this->blob_dilation_->cpu_data(), - top_data_cpu + this->blob_top_cpu_->offset(n)); - } - - // GPU version - int_tp num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int_tp default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); - - // Launch with different grid sizes - for (int_tp grid_div = 2; grid_div <= 8; grid_div++) { + // CPU Version for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { - const int_tp grid_dim = default_grid_dim / grid_div; - TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_nd_gpu_kernelCUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( - num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), - this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, - this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), - this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), - top_data_gpu + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; + im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, + this->blob_bottom_->shape().data() + 1, + this->blob_top_cpu_->shape().data() + 1, + this->blob_kernel_shape_->cpu_data(), + this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), + this->blob_dilation_->cpu_data(), + top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version @@ -258,14 +199,13 @@ if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { const int_tp grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) - im2col_nd_gpu_kernel - CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS) ( - num_kernels, 2, bottom_data_gpu + this->blob_bottom_->offset(n), + im2col_nd_gpu_kernel + CUDA_KERNEL(grid_dim, CAFFE_CUDA_NUM_THREADS)( + num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, - this->blob_kernel_shape_->gpu_data(), - this->blob_pad_->gpu_data(), - this->blob_stride_->gpu_data(), + this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), + this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } @@ -282,8 +222,6 @@ if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { } } } -} -} - // namespace caffe +} // namespace caffe #endif // USE_CUDA diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index c9856ec6d5f..8808fc32982 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -71,7 +71,7 @@ void LRNLayerTest::ReferenceLRNForward( for (int_tp w = 0; w < blob_bottom.width(); ++w) { int_tp c_start = c - (size - 1) / 2; int_tp c_end = min(c_start + size, blob_bottom.channels()); - c_start = max(c_start, 0L); + c_start = max(c_start, (int_tp)0); Dtype scale = 1.; for (int_tp i = c_start; i < c_end; ++i) { Dtype value = blob_bottom.data_at(n, i, h, w); @@ -90,12 +90,12 @@ void LRNLayerTest::ReferenceLRNForward( for (int_tp h = 0; h < blob_bottom.height(); ++h) { int_tp h_start = h - (size - 1) / 2; int_tp h_end = min(h_start + size, blob_bottom.height()); - h_start = max(h_start, 0L); + h_start = max(h_start, (int_tp)0); for (int_tp w = 0; w < blob_bottom.width(); ++w) { Dtype scale = 1.; int_tp w_start = w - (size - 1) / 2; int_tp w_end = min(w_start + size, blob_bottom.width()); - w_start = max(w_start, 0L); + w_start = max(w_start, (int_tp)0); for (int_tp nh = h_start; nh < h_end; ++nh) { for (int_tp nw = w_start; nw < w_end; ++nw) { Dtype value = blob_bottom.data_at(n, c, nh, nw); @@ -299,7 +299,7 @@ void CuDNNLRNLayerTest::ReferenceLRNForward( for (int_tp w = 0; w < blob_bottom.width(); ++w) { int_tp c_start = c - (size - 1) / 2; int_tp c_end = min(c_start + size, blob_bottom.channels()); - c_start = max(c_start, 0L); + c_start = max(c_start, (int_tp)0); Dtype scale = 1.; for (int_tp i = c_start; i < c_end; ++i) { Dtype value = blob_bottom.data_at(n, i, h, w); @@ -318,12 +318,12 @@ void CuDNNLRNLayerTest::ReferenceLRNForward( for (int_tp h = 0; h < blob_bottom.height(); ++h) { int_tp h_start = h - (size - 1) / 2; int_tp h_end = min(h_start + size, blob_bottom.height()); - h_start = max(h_start, 0L); + h_start = max(h_start, (int_tp)0); for (int_tp w = 0; w < blob_bottom.width(); ++w) { Dtype scale = 1.; int_tp w_start = w - (size - 1) / 2; int_tp w_end = min(w_start + size, blob_bottom.width()); - w_start = max(w_start, 0L); + w_start = max(w_start, (int_tp)0); for (int_tp nh = h_start; nh < h_end; ++nh) { for (int_tp nw = w_start; nw < w_end; ++nw) { Dtype value = blob_bottom.data_at(n, c, nh, nw); @@ -345,104 +345,116 @@ void CuDNNLRNLayerTest::ReferenceLRNForward( TYPED_TEST_CASE(CuDNNLRNLayerTest, TestDtypes); TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsCuDNN) { - // typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - CuDNNLRNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - Blob top_reference; - this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, - &top_reference); - for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], - this->epsilon_); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + // typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + CuDNNLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } } } TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsLargeRegionCuDNN) { - typedef TypeParam Dtype; - LayerParameter layer_param; - layer_param.mutable_lrn_param()->set_local_size(15); - CuDNNLRNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - Blob top_reference; - this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, - &top_reference); - for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], - this->epsilon_); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + typedef TypeParam Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + CuDNNLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } } } TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsCuDNN) { - typedef TypeParam Dtype; - LayerParameter layer_param; - CuDNNLRNLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int_tp i = 0; i < this->blob_top_->count(); ++i) { - this->blob_top_->mutable_cpu_diff()[i] = 1.; + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + typedef TypeParam Dtype; + LayerParameter layer_param; + CuDNNLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } - vector propagate_down(this->blob_bottom_vec_.size(), true); - layer.Backward(this->blob_top_vec_, propagate_down, - this->blob_bottom_vec_); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); } TYPED_TEST(CuDNNLRNLayerTest, TestForwardWithinChannel) { - typedef TypeParam Dtype; - LayerParameter layer_param; - layer_param.mutable_lrn_param()->set_norm_region( - LRNParameter_NormRegion_WITHIN_CHANNEL); - layer_param.mutable_lrn_param()->set_local_size(3); - CuDNNLCNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - Blob top_reference; - this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, - &top_reference); - for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], - this->epsilon_); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + typedef TypeParam Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + CuDNNLCNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } } } TYPED_TEST(CuDNNLRNLayerTest, TestGradientWithinChannel) { - typedef TypeParam Dtype; - LayerParameter layer_param; - layer_param.mutable_lrn_param()->set_norm_region( - LRNParameter_NormRegion_WITHIN_CHANNEL); - layer_param.mutable_lrn_param()->set_local_size(3); - CuDNNLCNLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int_tp i = 0; i < this->blob_top_->count(); ++i) { - this->blob_top_->mutable_cpu_diff()[i] = 1.; + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + typedef TypeParam Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + CuDNNLCNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); } TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsLargeRegionCuDNN) { - typedef TypeParam Dtype; - LayerParameter layer_param; - layer_param.mutable_lrn_param()->set_local_size(15); - CuDNNLRNLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int_tp i = 0; i < this->blob_top_->count(); ++i) { - this->blob_top_->mutable_cpu_diff()[i] = 1.; + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + typedef TypeParam Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + CuDNNLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } - vector propagate_down(this->blob_bottom_vec_.size(), true); - layer.Backward(this->blob_top_vec_, propagate_down, - this->blob_bottom_vec_); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); } #endif diff --git a/src/caffe/test/test_pooling_ndsk_layer.cpp b/src/caffe/test/test_pooling_ndsk_layer.cpp index 8b43380d0ed..58ab9f9cad2 100644 --- a/src/caffe/test/test_pooling_ndsk_layer.cpp +++ b/src/caffe/test/test_pooling_ndsk_layer.cpp @@ -58,9 +58,9 @@ class PoolingNDSKLayerTest : public GPUDeviceTest { pooling_param->add_kernel_size(3); pooling_param->add_kernel_size(3); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); + pooling_param->add_dilation(2); + pooling_param->add_dilation(2); + pooling_param->add_dilation(2); pooling_param->set_axis(1); @@ -103,9 +103,9 @@ class PoolingNDSKLayerTest : public GPUDeviceTest { pooling_param->add_kernel_size(3); pooling_param->add_kernel_size(3); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); + pooling_param->add_dilation(2); + pooling_param->add_dilation(2); + pooling_param->add_dilation(2); pooling_param->set_axis(1); @@ -173,9 +173,9 @@ TYPED_TEST(PoolingNDSKLayerTest, TestSetup) { pooling_param->add_kernel_size(3); pooling_param->add_kernel_size(3); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); - pooling_param->add_kstride(2); + pooling_param->add_dilation(2); + pooling_param->add_dilation(2); + pooling_param->add_dilation(2); pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 034f970f2b0..803f803b991 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -492,7 +492,7 @@ TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformIntGPU) { uniform_data[i] = static_cast(uniform_uint_data[i]); } const TypeParam lower = 0; - const TypeParam upper = ULONG_MAX; + const TypeParam upper = ((sizeof(int_tp) == 4) ? UINT_MAX:ULONG_MAX); this->RngUniformChecks(lower, upper, uniform_data); } diff --git a/src/caffe/util/hdf5.cpp b/src/caffe/util/hdf5.cpp index ab56474d4e3..ce1b5d7c83b 100644 --- a/src/caffe/util/hdf5.cpp +++ b/src/caffe/util/hdf5.cpp @@ -125,7 +125,7 @@ void hdf5_save_nd_dataset( string hdf5_load_string(hid_t loc_id, const string& dataset_name) { // Get size of dataset - uint_tp size; + size_t size; H5T_class_t class_; herr_t status = \ H5LTget_dataset_info(loc_id, dataset_name.c_str(), NULL, &class_, &size); diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index c2d2102a1a1..8923e800565 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -59,33 +59,40 @@ void im2col_gpu(const Dtype* data_im, const int_tp channels, int_tp width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int_tp num_kernels = channels * height_col * width_col; - // NOLint_tp_NEXT_LINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_col); - CUDA_POST_KERNEL_CHECK - ; + CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void im2col_gpu(const float* data_im, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, - const int_tp dilation_h, const int_tp dilation_w, float* data_col); + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, + const int_tp dilation_w, float* data_col); template void im2col_gpu(const double* data_im, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, - const int_tp dilation_h, const int_tp dilation_w, double* data_col); + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, + const int_tp dilation_w, double* data_col); -template +template __global__ void im2col_nd_gpu_kernel(const int_tp n, const Dtype* data_im, - const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - const int_tp* dilation, Dtype* data_col) { - int_tp d_temp[num_axes]; // NOLint_tp(runtime/arrays) - int_tp d_iter[num_axes]; // NOLint_tp(runtime/arrays) + const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* dilation, Dtype* data_col) { + int_tp d_temp[num_axes]; // NOLINT(runtime/arrays) + int_tp d_iter[num_axes]; // NOLINT(runtime/arrays) __shared__ int_tp shared_dilation[num_axes]; __shared__ int_tp shared_kernel_shape[num_axes]; @@ -174,61 +181,61 @@ void im2col_nd_gpu(const Dtype* data_im, const int_tp num_spatial_axes, DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS); switch (num_spatial_axes) { case 1: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 2: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 3: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 4: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 5: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 6: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 7: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 8: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 9: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 10: - im2col_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); @@ -242,15 +249,21 @@ void im2col_nd_gpu(const Dtype* data_im, const int_tp num_spatial_axes, // Explicit instantiation template void im2col_nd_gpu(const float* data_im, - const int_tp num_spatial_axes, const int_tp col_size, - const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - const int_tp* dilation, float* data_col); + const int_tp num_spatial_axes, + const int_tp col_size, + const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* dilation, float* data_col); template void im2col_nd_gpu(const double* data_im, - const int_tp num_spatial_axes, const int_tp col_size, - const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - const int_tp* dilation, double* data_col); + const int_tp num_spatial_axes, + const int_tp col_size, + const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* dilation, double* data_col); template __global__ void col2im_gpu_kernel(const int_tp n, const Dtype* data_col, @@ -309,7 +322,7 @@ void col2im_gpu(const Dtype* data_col, const int_tp channels, int_tp num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. - // NOLint_tp_NEXT_LINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) col2im_gpu_kernelCUDA_KERNEL(CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS)( num_kernels, data_col, height, width, channels, kernel_h, kernel_w, @@ -320,32 +333,38 @@ void col2im_gpu(const Dtype* data_col, const int_tp channels, // Explicit instantiation template void col2im_gpu(const float* data_col, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, - float* data_im); + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, + const int_tp dilation_w, float* data_im); template void col2im_gpu(const double* data_col, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, - double* data_im); + const int_tp height, const int_tp width, + const int_tp kernel_h, const int_tp kernel_w, + const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, + const int_tp dilation_w, double* data_im); -template +template __global__ void col2im_nd_gpu_kernel(const int_tp n, const Dtype* data_col, - const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - const int_tp* dilation, Dtype* data_im) { - int_tp d_im[num_axes]; // NOLint_tp(runtime/arrays) - int_tp d_col_iter[num_axes]; // NOLint_tp(runtime/arrays) - int_tp d_col_start[num_axes]; // NOLint_tp(runtime/arrays) - int_tp d_col_end[num_axes]; // NOLint_tp(runtime/arrays) + const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* dilation, Dtype* data_im) { + int_tp d_im[num_axes]; // NOLINT(runtime/arrays) + int_tp d_col_iter[num_axes]; // NOLINT(runtime/arrays) + int_tp d_col_start[num_axes]; // NOLINT(runtime/arrays) + int_tp d_col_end[num_axes]; // NOLINT(runtime/arrays) - __shared__ int_tp shared_dilation[num_axes]; - __shared__ int_tp shared_kernel_shape[num_axes]; - __shared__ int_tp shared_pad[num_axes]; - __shared__ int_tp shared_stride[num_axes]; - __shared__ int_tp shared_col_shape[num_axes + 1]; - __shared__ int_tp shared_im_shape[num_axes + 1]; + __shared__ int_tp shared_dilation[num_axes]; // NOLINT(runtime/arrays) + __shared__ int_tp shared_kernel_shape[num_axes]; // NOLINT(runtime/arrays) + __shared__ int_tp shared_pad[num_axes]; // NOLINT(runtime/arrays) + __shared__ int_tp shared_stride[num_axes]; // NOLINT(runtime/arrays) + __shared__ int_tp shared_col_shape[num_axes + 1]; // NOLINT(runtime/arrays) + __shared__ int_tp shared_im_shape[num_axes + 1]; // NOLINT(runtime/arrays) if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; @@ -444,61 +463,61 @@ void col2im_nd_gpu(const Dtype* data_col, const int_tp num_spatial_axes, DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS); switch (num_spatial_axes) { case 1: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 2: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 3: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 4: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 5: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 6: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 7: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 8: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 9: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 10: - col2im_nd_gpu_kernel // NOLint_tp_NEXT_LINE(whitespace/operators) + col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS)( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); @@ -512,15 +531,20 @@ void col2im_nd_gpu(const Dtype* data_col, const int_tp num_spatial_axes, // Explicit instantiation template void col2im_nd_gpu(const float* data_col, - const int_tp num_spatial_axes, const int_tp im_size, - const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - const int_tp* dilation, float* data_im); + const int_tp num_spatial_axes, + const int_tp im_size, const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* dilation, float* data_im); template void col2im_nd_gpu(const double* data_col, - const int_tp num_spatial_axes, const int_tp im_size, - const int_tp* im_shape, const int_tp* col_shape, - const int_tp* kernel_shape, const int_tp* pad, const int_tp* stride, - const int_tp* dilation, double* data_im); + const int_tp num_spatial_axes, + const int_tp im_size, + const int_tp* im_shape, + const int_tp* col_shape, + const int_tp* kernel_shape, + const int_tp* pad, const int_tp* stride, + const int_tp* dilation, double* data_im); #endif // USE_CUDA } // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 8037daba4fe..6ab7062b542 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -70,12 +70,12 @@ void caffe_set(const int_tp N, const Dtype alpha, Dtype* Y) { } } -template void caffe_set(const int_tp N, const int alpha, int* Y); -template void caffe_set(const int_tp N, const unsigned int alpha, - unsigned int* Y); -template void caffe_set(const int_tp N, const long long alpha, long long* Y); -template void caffe_set(const int_tp N, const unsigned long long alpha, - unsigned long long* Y); +template void caffe_set(const int_tp N, const int alpha, int* Y); +template void caffe_set(const int_tp N, const uint32_t alpha, + uint32_t* Y); +template void caffe_set(const int_tp N, int64_t alpha, int64_t* Y); +template void caffe_set(const int_tp N, const uint64_t alpha, + uint64_t* Y); template void caffe_set(const int_tp N, const float alpha, float* Y); template void caffe_set(const int_tp N, const double alpha, double* Y); From 4d5edc0f1f924d23246f89bc4305069fa4a2a8c4 Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 31 Dec 2015 05:45:11 +0100 Subject: [PATCH 240/254] Cleanup --- log.txt | 110 -------------------------------------------------------- 1 file changed, 110 deletions(-) delete mode 100644 log.txt diff --git a/log.txt b/log.txt deleted file mode 100644 index 38d69b89d4a..00000000000 --- a/log.txt +++ /dev/null @@ -1,110 +0,0 @@ -Setting to use device 1 -Note: Google Test filter = *Rng* -[==========] Running 36 tests from 2 test cases. -[----------] Global test environment set-up. -[----------] 18 tests from RandomNumberGeneratorTest/0, where TypeParam = float -[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussian -[ OK ] RandomNumberGeneratorTest/0.TestRngGaussian (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussian2 -[ OK ] RandomNumberGeneratorTest/0.TestRngGaussian2 (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniform -[ OK ] RandomNumberGeneratorTest/0.TestRngUniform (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniform2 -[ OK ] RandomNumberGeneratorTest/0.TestRngUniform2 (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngBernoulli -[ OK ] RandomNumberGeneratorTest/0.TestRngBernoulli (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngBernoulli2 -[ OK ] RandomNumberGeneratorTest/0.TestRngBernoulli2 (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussian -[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussian (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniform -[ OK ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniform (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianTimesBernoulli -[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianTimesBernoulli (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformTimesBernoulli -[ OK ] RandomNumberGeneratorTest/0.TestRngUniformTimesBernoulli (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngBernoulliTimesBernoulli -[ OK ] RandomNumberGeneratorTest/0.TestRngBernoulliTimesBernoulli (0 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianGPU -[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianGPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussian2GPU -[ OK ] RandomNumberGeneratorTest/0.TestRngGaussian2GPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformGPU -[ OK ] RandomNumberGeneratorTest/0.TestRngUniformGPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniform2GPU -[ OK ] RandomNumberGeneratorTest/0.TestRngUniform2GPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformIntGPU -src/caffe/test/test_random_number_generator.cpp:127: Failure -The difference between sample_mean and true_mean is 9.2233720346967101e+18, which exceeds bound, where -sample_mean evaluates to 2158065920, -true_mean evaluates to 9.2233720368547758e+18, and -bound evaluates to 2.0235441203367117e+17. -src/caffe/test/test_random_number_generator.cpp:163: Failure -The difference between bernoulli_p and sample_p_above_mean is 0.5, which exceeds bernoulli_bound, where -bernoulli_p evaluates to 0.5, -sample_p_above_mean evaluates to 0, and -bernoulli_bound evaluates to 0.018999999389052391. -[ FAILED ] RandomNumberGeneratorTest/0.TestRngUniformIntGPU, where TypeParam = float (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussianGPU -[ OK ] RandomNumberGeneratorTest/0.TestRngGaussianTimesGaussianGPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniformGPU -[ OK ] RandomNumberGeneratorTest/0.TestRngUniformTimesUniformGPU (2 ms) -[----------] 18 tests from RandomNumberGeneratorTest/0 (13 ms total) - -[----------] 18 tests from RandomNumberGeneratorTest/1, where TypeParam = double -[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussian -[ OK ] RandomNumberGeneratorTest/1.TestRngGaussian (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussian2 -[ OK ] RandomNumberGeneratorTest/1.TestRngGaussian2 (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniform -[ OK ] RandomNumberGeneratorTest/1.TestRngUniform (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniform2 -[ OK ] RandomNumberGeneratorTest/1.TestRngUniform2 (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngBernoulli -[ OK ] RandomNumberGeneratorTest/1.TestRngBernoulli (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngBernoulli2 -[ OK ] RandomNumberGeneratorTest/1.TestRngBernoulli2 (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussian -[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussian (1 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniform -[ OK ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniform (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianTimesBernoulli -[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianTimesBernoulli (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformTimesBernoulli -[ OK ] RandomNumberGeneratorTest/1.TestRngUniformTimesBernoulli (1 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngBernoulliTimesBernoulli -[ OK ] RandomNumberGeneratorTest/1.TestRngBernoulliTimesBernoulli (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianGPU -[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianGPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussian2GPU -[ OK ] RandomNumberGeneratorTest/1.TestRngGaussian2GPU (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformGPU -[ OK ] RandomNumberGeneratorTest/1.TestRngUniformGPU (0 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniform2GPU -[ OK ] RandomNumberGeneratorTest/1.TestRngUniform2GPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformIntGPU -src/caffe/test/test_random_number_generator.cpp:127: Failure -The difference between sample_mean and true_mean is 9.2233720346967091e+18, which exceeds bound, where -sample_mean evaluates to 2158067198.3313999, -true_mean evaluates to 9.2233720368547758e+18, and -bound evaluates to 2.0235442047593853e+17. -src/caffe/test/test_random_number_generator.cpp:163: Failure -The difference between bernoulli_p and sample_p_above_mean is 0.5, which exceeds bernoulli_bound, where -bernoulli_p evaluates to 0.5, -sample_p_above_mean evaluates to 0, and -bernoulli_bound evaluates to 0.019. -[ FAILED ] RandomNumberGeneratorTest/1.TestRngUniformIntGPU, where TypeParam = double (1 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussianGPU -[ OK ] RandomNumberGeneratorTest/1.TestRngGaussianTimesGaussianGPU (1 ms) -[ RUN ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniformGPU -[ OK ] RandomNumberGeneratorTest/1.TestRngUniformTimesUniformGPU (2 ms) -[----------] 18 tests from RandomNumberGeneratorTest/1 (10 ms total) - -[----------] Global test environment tear-down -[==========] 36 tests from 2 test cases ran. (23 ms total) -[ PASSED ] 34 tests. -[ FAILED ] 2 tests, listed below: -[ FAILED ] RandomNumberGeneratorTest/0.TestRngUniformIntGPU, where TypeParam = float -[ FAILED ] RandomNumberGeneratorTest/1.TestRngUniformIntGPU, where TypeParam = double - - 2 FAILED TESTS From 938b3626a6a596fc551f955d865485d2ac0423ea Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 31 Dec 2015 05:50:36 +0100 Subject: [PATCH 241/254] Index64 definition in Makefile. --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index e1e57561770..0502bb858e9 100644 --- a/Makefile +++ b/Makefile @@ -165,6 +165,10 @@ NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) # GreenTea backend related include and lib ############################## +ifeq ($(USE_INDEX_64),1) + COMMON_FLAGS += -DUSE_INDEX_64 +endif + ifeq ($(USE_GREENTEA),1) # Find a valid OpenCL library # TODO: Validate and complete this based on different SDKs From 14537bf4c38aececdef6312702ec680c3d0e3cca Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 31 Dec 2015 18:25:47 +0100 Subject: [PATCH 242/254] Convolution kernel update, queue count update. --- Makefile.config.example | 2 +- error.log | 1 + include/caffe/greentea/greentea.hpp | 2 +- src/caffe/greentea/cl_headers/header.cl | 1 + src/caffe/greentea/cl_kernels.cpp | 24 +- src/caffe/greentea/cl_kernels/bnll.cl | 8 +- src/caffe/greentea/cl_kernels/im2col.cl | 128 +++++----- src/caffe/greentea/cl_kernels/im2col_nd.cl | 179 +++++++------ src/caffe/greentea/cl_kernels/math.cl | 2 +- src/caffe/greentea/cl_kernels/softmax_loss.cl | 4 +- src/caffe/greentea/greentea_im2col.cpp | 38 ++- src/caffe/layers/eltwise_layer.cu | 2 +- src/caffe/test/test_convolution_layer.cpp | 240 +++++++++--------- src/caffe/test/test_internal_thread.cpp | 12 +- src/caffe/util/im2col.cu | 14 +- 15 files changed, 353 insertions(+), 304 deletions(-) create mode 100644 error.log diff --git a/Makefile.config.example b/Makefile.config.example index e6610f4f5c9..290b4c5507c 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -2,7 +2,7 @@ # Contributions simplifying and improving our build system are welcome! # 32 bit / 64 bit indexing -# USE_INDEX_64 +# USE_INDEX_64 := 1 # GreenTea (ViennaCL/OpenCL) backend switch diff --git a/error.log b/error.log new file mode 100644 index 00000000000..d64687de199 --- /dev/null +++ b/error.log @@ -0,0 +1 @@ +Setting to use device 1 diff --git a/include/caffe/greentea/greentea.hpp b/include/caffe/greentea/greentea.hpp index 65464c5b6c6..75efb91f0fd 100644 --- a/include/caffe/greentea/greentea.hpp +++ b/include/caffe/greentea/greentea.hpp @@ -39,7 +39,7 @@ #endif #ifndef GREENTEA_QUEUE_COUNT -#define GREENTEA_QUEUE_COUNT 8 +#define GREENTEA_QUEUE_COUNT 1 #endif namespace caffe { diff --git a/src/caffe/greentea/cl_headers/header.cl b/src/caffe/greentea/cl_headers/header.cl index 0dd6b5319cf..1e5d811504b 100644 --- a/src/caffe/greentea/cl_headers/header.cl +++ b/src/caffe/greentea/cl_headers/header.cl @@ -13,6 +13,7 @@ #define cl_amd_fp64 #define DOUBLE_SUPPORT_AVAILABLE #define CLK_LOCAL_MEM_FENCE +#define CLK_GLOBAL_MEM_FENCE #define Dtype float #define barrier(x) #define atomic_cmpxchg(x, y, z) x diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 03fabb128d8..30a9dfbf351 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,16 +6,16 @@ #include namespace caffe { #ifdef USE_INDEX_64 -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define CLK_GLOBAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT std::string definitions_64 = "// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long"; // NOLINT #else -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define CLK_GLOBAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT std::string definitions_32 = "// Types used for parameters, offset computations and so on\n#define int_tp int\n#define uint_tp unsigned int\n\n// Definitions used to cast the types above as needed\n#define int_tpc int\n#define uint_tpc unsigned int"; // NOLINT #endif std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT @@ -23,21 +23,21 @@ std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\ std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += dilation_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += dilation_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp patch_h, const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % dilation_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % dilation_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % dilation_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % dilation_h : h;\n int_tp w_num = (w - w_col_start) / dilation_w;\n int_tp h_num = (h - h_col_start) / dilation_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = data_col_off + c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n dilation_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n dilation_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / dilation_h * patch_w + (w - w_col) / dilation_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * dilation[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1;\n d_col_size[i] = (im_shape_ptr[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % dilation[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - dilation[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n } else { // d_col_iter[i] <= d_max - dilation[1]\n d_col_iter[i] += dilation[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n const int_tp height, const int_tp width,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp h_index = index / width_col;\n const int_tp h_col = h_index % height_col;\n const int_tp w_col = index % width_col;\n const int_tp c_im = h_index / height_col;\n const int_tp c_col = c_im * kernel_h * kernel_w;\n const int_tp h_offset = h_col * stride_h - pad_h;\n const int_tp w_offset = w_col * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (c_im * height + h_offset) * width + w_offset;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h_im = h_offset + i * dilation_h;\n int_tp w_im = w_offset + j * dilation_w;\n *data_col_ptr =\n (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?\n data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n const int_tp w_im = index % width + pad_w;\n const int_tp h_im = (index / width) % height + pad_h;\n const int_tp c_im = index / (width * height);\n int_tp kernel_extent_w = (kernel_w - 1) * dilation_w + 1;\n int_tp kernel_extent_h = (kernel_h - 1) * dilation_h + 1;\n // compute the start and end of the output\n const int_tp w_col_start =\n (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;\n const int_tp w_col_end = min(w_im / stride_w + 1, width_col);\n const int_tp h_col_start =\n (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;\n const int_tp h_col_end = min(h_im / stride_h + 1, height_col);\n // TODO: use LCM of stride and dilation to avoid unnecessary loops\n for (int_tp h_col = h_col_start; h_col < h_col_end; h_col += 1) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; w_col += 1) {\n int_tp h_k = (h_im - h_col * stride_h);\n int_tp w_k = (w_im - w_col * stride_w);\n if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {\n h_k /= dilation_h;\n w_k /= dilation_w;\n int_tp data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *\n height_col + h_col) * width_col + w_col;\n val += data_col[data_col_off + data_col_index];\n }\n }\n }\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with int_tpermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % shared_col_shape[i + 1];\n channel_in /= shared_col_shape[i + 1];\n channel_out *= shared_kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= shared_col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];\n channel_in *= shared_im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= shared_col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0] * shared_dilation[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= shared_im_shape[i + 1];\n data_im_offset += d_iter[i] * shared_dilation[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = shared_kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp c_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];\n c_im /= shared_im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n const int_tp kernel_extent = shared_dilation[i]\n * (shared_kernel_shape[i] - 1) + 1;\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_extent) ?\n 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1;\n d_col_end[i] = min(d_im[i] / shared_stride[i] + 1,\n shared_col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n bool skip = false;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n int_tp kernel_index;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];\n if (kernel_index % shared_dilation[i]) {\n skip = true;\n break;\n } else {\n kernel_index /= shared_dilation[i];\n final_offset += kernel_index * kernel_shape_prod;\n kernel_shape_prod *= shared_kernel_shape[i];\n }\n }\n if (!skip) {\n final_offset += kernel_shape_prod * c_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= shared_col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[data_col_off + final_offset];\n }\n skip = false;\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT +std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] =\n in[index] > 0 ?\n in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index]));\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT @@ -45,16 +45,16 @@ std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\" std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_offset, const int_tp height,\n const int_tp width, const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w, const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp w_out = index % width_col;\n int_tp h_index = index / width_col;\n int_tp h_out = h_index % height_col;\n int_tp channel_in = h_index / height_col;\n int_tp channel_out = channel_in * kernel_h * kernel_w;\n int_tp h_in = h_out * stride_h - pad_h;\n int_tp w_in = w_out * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;\n __global const Dtype* data_im_ptr = data_im + data_offset;\n data_im_ptr += (channel_in * height + h_in) * width + w_in;\n for (int_tp i = 0; i < ext_kernel_h; i += dilation_h) {\n for (int_tp j = 0; j < ext_kernel_w; j += dilation_w) {\n int_tp h = h_in + i;\n int_tp w = w_in + j;\n (*data_col_ptr) =\n (h >= 0 && w >= 0 && h < height && w < width) ?\n data_im_ptr[i * width + j] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp patch_h, const int_tp patch_w,\n const int_tp ext_patch_h,\n const int_tp ext_patch_w,\n const int_tp pad_h, const int_tp pad_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_offset) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n int_tp w = index % width + pad_w;\n int_tp h = (index / width) % height + pad_h;\n int_tp c = index / (width * height);\n // compute the start and end of the output\n int_tp width_col_1 = width_col - 1;\n int_tp height_col_1 = height_col - 1;\n int_tp w_col_start = (w < ext_patch_w) ? w % dilation_w : (w - ext_patch_w) + 1;\n int_tp w_col_end =\n (w >= width_col) ?\n width_col_1 - (width_col_1 - w_col_start) % dilation_w : w;\n int_tp h_col_start = (h < ext_patch_h) ? h % dilation_h : (h - ext_patch_h) + 1;\n int_tp h_col_end =\n (h >= height_col) ?\n height_col_1 - (height_col_1 - h_col_start) % dilation_h : h;\n int_tp w_num = (w - w_col_start) / dilation_w;\n int_tp h_num = (h - h_col_start) / dilation_h;\n\n int_tp coeff_w_idx = height_col * width_col;\n int_tp coeff_h_idx = patch_w * coeff_w_idx;\n int_tp offset = data_col_off + c * patch_h * coeff_h_idx;\n for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col +=\n dilation_h, --h_idx) {\n for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col +=\n dilation_w, --w_idx) {\n //int_tp c_col = c * patch_h * patch_w + (h - h_col) / dilation_h * patch_w + (w - w_col) / dilation_w;\n //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx;\n //val += data_col[(c_col * height_col + h_col) * width_col + w_col];\n val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx\n + h_col * width_col + w_col];\n }\n }\n\n data_im[data_offset + index] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % col_shape_ptr[i + 1];\n channel_in /= col_shape_ptr[i + 1];\n channel_out *= kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= col_shape_ptr[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * stride[i] - pad[i];\n channel_in *= im_shape_ptr[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= col_shape_ptr[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1];\n if (!in_range) {\n break;\n }\n }\n\n // Write column data\n if (in_range) {\n int_tp data_im_offset = d_iter[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= im_shape_ptr[i + 1];\n data_im_offset += d_iter[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n // Old: const int_tp d_max = kernel_shape[i];\n // New (strided, limit is the external kernel size):\n const int_tp d_max = (kernel_shape[i] - 1) * dilation[i] + 1;\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n // Old: ++d_iter[i];\n // New (strided, increment by the stride each time):\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_size[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n int_tp d_ext_patch[6];\n int_tp d_idx[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* data_col_ptr = data_col + data_col_off;\n\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1;\n d_col_size[i] = (im_shape_ptr[i + 1] + 2 * pad[i] - d_ext_patch[i])\n / stride[i] + 1;\n }\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp channel_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i];\n channel_im /= im_shape_ptr[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n // New:\n d_col_start[i] = (d_im[i] < d_ext_patch[i]) ?\n d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1;\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n d_col_end[i] = (d_im[i] >= d_col_size[i]) ?\n (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i])\n % dilation[i] : d_im[i];\n if (d_col_start[i] > d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue;\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp coeff_prod = 1;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_col_iter[i] * coeff_prod;\n coeff_prod *= d_col_size[i];\n }\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n final_offset += d_idx[i] * coeff_prod;\n coeff_prod *= kernel_shape[i];\n }\n final_offset += channel_im * coeff_prod;\n val += data_col_ptr[final_offset];\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n if (d_col_iter[i] > d_col_end[i] - dilation[i]) {\n d_col_iter[i] = d_col_start[i];\n d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i];\n } else { // d_col_iter[i] <= d_max - dilation[1]\n d_col_iter[i] += dilation[i];\n --d_idx[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n const int_tp height, const int_tp width,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp h_index = index / width_col;\n const int_tp h_col = h_index % height_col;\n const int_tp w_col = index % width_col;\n const int_tp c_im = h_index / height_col;\n const int_tp c_col = c_im * kernel_h * kernel_w;\n const int_tp h_offset = h_col * stride_h - pad_h;\n const int_tp w_offset = w_col * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (c_im * height + h_offset) * width + w_offset;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h_im = h_offset + i * dilation_h;\n int_tp w_im = w_offset + j * dilation_w;\n *data_col_ptr =\n (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?\n data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n const int_tp w_im = index % width + pad_w;\n const int_tp h_im = (index / width) % height + pad_h;\n const int_tp c_im = index / (width * height);\n int_tp kernel_extent_w = (kernel_w - 1) * dilation_w + 1;\n int_tp kernel_extent_h = (kernel_h - 1) * dilation_h + 1;\n // compute the start and end of the output\n const int_tp w_col_start =\n (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;\n const int_tp w_col_end = min(w_im / stride_w + 1, width_col);\n const int_tp h_col_start =\n (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;\n const int_tp h_col_end = min(h_im / stride_h + 1, height_col);\n // TODO: use LCM of stride and dilation to avoid unnecessary loops\n for (int_tp h_col = h_col_start; h_col < h_col_end; h_col += 1) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; w_col += 1) {\n int_tp h_k = (h_im - h_col * stride_h);\n int_tp w_k = (w_im - w_col * stride_w);\n if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {\n h_k /= dilation_h;\n w_k /= dilation_w;\n int_tp data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *\n height_col + h_col) * width_col + w_col;\n val += data_col[data_col_off + data_col_index];\n }\n }\n }\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with int_tpermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % shared_col_shape[i + 1];\n channel_in /= shared_col_shape[i + 1];\n channel_out *= shared_kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= shared_col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];\n channel_in *= shared_im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= shared_col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0] * shared_dilation[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= shared_im_shape[i + 1];\n data_im_offset += d_iter[i] * shared_dilation[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = shared_kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp c_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];\n c_im /= shared_im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n const int_tp kernel_extent = shared_dilation[i]\n * (shared_kernel_shape[i] - 1) + 1;\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_extent) ?\n 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1;\n d_col_end[i] = min(d_im[i] / shared_stride[i] + 1,\n shared_col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n bool skip = false;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n int_tp kernel_index;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];\n if (kernel_index % shared_dilation[i]) {\n skip = true;\n break;\n } else {\n kernel_index /= shared_dilation[i];\n final_offset += kernel_index * kernel_shape_prod;\n kernel_shape_prod *= shared_kernel_shape[i];\n }\n }\n if (!skip) {\n final_offset += kernel_shape_prod * c_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= shared_col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[data_col_off + final_offset];\n }\n skip = false;\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT +std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; diff --git a/src/caffe/greentea/cl_kernels/bnll.cl b/src/caffe/greentea/cl_kernels/bnll.cl index a5a34644494..607c2c74e9d 100644 --- a/src/caffe/greentea/cl_kernels/bnll.cl +++ b/src/caffe/greentea/cl_kernels/bnll.cl @@ -6,9 +6,11 @@ __kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n, __global const Dtype* in, __global Dtype* out) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - out[index] = - in[index] > 0 ? - in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); + if (in[index] > 0.0f) { + out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index]))); + } else { + out[index] = log((Dtype) (1.0f + exp(in[index]))); + } } } diff --git a/src/caffe/greentea/cl_kernels/im2col.cl b/src/caffe/greentea/cl_kernels/im2col.cl index 5287dd30d4a..34082b05247 100644 --- a/src/caffe/greentea/cl_kernels/im2col.cl +++ b/src/caffe/greentea/cl_kernels/im2col.cl @@ -3,44 +3,44 @@ #endif __kernel void TEMPLATE(im2col,Dtype)(const int_tp n, - __global const Dtype* data_im, - const int_tp data_offset, const int_tp height, - const int_tp width, const int_tp kernel_h, - const int_tp kernel_w, - const int_tp ext_kernel_h, - const int_tp ext_kernel_w, const int_tp pad_h, - const int_tp pad_w, const int_tp stride_h, - const int_tp stride_w, const int_tp dilation_h, - const int_tp dilation_w, - const int_tp height_col, - const int_tp width_col, - __global Dtype* data_col, - const int_tp data_col_off) { + __global const Dtype* data_im, + const int_tp data_im_off, + const int_tp height, const int_tp width, + const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, + const int_tp stride_w, + const int_tp dilation_h, + const int_tp dilation_w, + const int_tp height_col, + const int_tp width_col, + __global Dtype* data_col, + const int_tp data_col_off) { - for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - int_tp w_out = index % width_col; - int_tp h_index = index / width_col; - int_tp h_out = h_index % height_col; - int_tp channel_in = h_index / height_col; - int_tp channel_out = channel_in * kernel_h * kernel_w; - int_tp h_in = h_out * stride_h - pad_h; - int_tp w_in = w_out * stride_w - pad_w; + for (int_tp index = get_global_id(0); index < n; + index += get_global_size(0)) { + const int_tp h_index = index / width_col; + const int_tp h_col = h_index % height_col; + const int_tp w_col = index % width_col; + const int_tp c_im = h_index / height_col; + const int_tp c_col = c_im * kernel_h * kernel_w; + const int_tp h_offset = h_col * stride_h - pad_h; + const int_tp w_offset = w_col * stride_w - pad_w; __global Dtype* data_col_ptr = data_col + data_col_off; - data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; - __global const Dtype* data_im_ptr = data_im + data_offset; - data_im_ptr += (channel_in * height + h_in) * width + w_in; - for (int_tp i = 0; i < ext_kernel_h; i += dilation_h) { - for (int_tp j = 0; j < ext_kernel_w; j += dilation_w) { - int_tp h = h_in + i; - int_tp w = w_in + j; - (*data_col_ptr) = - (h >= 0 && w >= 0 && h < height && w < width) ? - data_im_ptr[i * width + j] : 0; + data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; + __global const Dtype* data_im_ptr = data_im + data_im_off; + data_im_ptr += (c_im * height + h_offset) * width + w_offset; + for (int_tp i = 0; i < kernel_h; ++i) { + for (int_tp j = 0; j < kernel_w; ++j) { + int_tp h_im = h_offset + i * dilation_h; + int_tp w_im = w_offset + j * dilation_w; + *data_col_ptr = + (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? + data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += height_col * width_col; } } } - } __kernel void TEMPLATE(col2im,Dtype)(const int_tp n, @@ -48,53 +48,45 @@ __kernel void TEMPLATE(col2im,Dtype)(const int_tp n, const int_tp data_col_off, const int_tp height, const int_tp width, const int_tp channels, - const int_tp patch_h, const int_tp patch_w, - const int_tp ext_patch_h, - const int_tp ext_patch_w, - const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, + const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, + const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, const int_tp height_col, const int_tp width_col, __global Dtype* data_im, - const int_tp data_offset) { + const int_tp data_im_off) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { Dtype val = 0; - int_tp w = index % width + pad_w; - int_tp h = (index / width) % height + pad_h; - int_tp c = index / (width * height); + const int_tp w_im = index % width + pad_w; + const int_tp h_im = (index / width) % height + pad_h; + const int_tp c_im = index / (width * height); + int_tp kernel_extent_w = (kernel_w - 1) * dilation_w + 1; + int_tp kernel_extent_h = (kernel_h - 1) * dilation_h + 1; // compute the start and end of the output - int_tp width_col_1 = width_col - 1; - int_tp height_col_1 = height_col - 1; - int_tp w_col_start = (w < ext_patch_w) ? w % dilation_w : (w - ext_patch_w) + 1; - int_tp w_col_end = - (w >= width_col) ? - width_col_1 - (width_col_1 - w_col_start) % dilation_w : w; - int_tp h_col_start = (h < ext_patch_h) ? h % dilation_h : (h - ext_patch_h) + 1; - int_tp h_col_end = - (h >= height_col) ? - height_col_1 - (height_col_1 - h_col_start) % dilation_h : h; - int_tp w_num = (w - w_col_start) / dilation_w; - int_tp h_num = (h - h_col_start) / dilation_h; - - int_tp coeff_w_idx = height_col * width_col; - int_tp coeff_h_idx = patch_w * coeff_w_idx; - int_tp offset = data_col_off + c * patch_h * coeff_h_idx; - for (int_tp h_col = h_col_start, h_idx = h_num; h_col <= h_col_end; h_col += - dilation_h, --h_idx) { - for (int_tp w_col = w_col_start, w_idx = w_num; w_col <= w_col_end; w_col += - dilation_w, --w_idx) { - //int_tp c_col = c * patch_h * patch_w + (h - h_col) / dilation_h * patch_w + (w - w_col) / dilation_w; - //int_tp c_col = c * patch_h * patch_w + h_idx * patch_w + w_idx; - //val += data_col[(c_col * height_col + h_col) * width_col + w_col]; - val += data_col[offset + h_idx * coeff_h_idx + w_idx * coeff_w_idx - + h_col * width_col + w_col]; + const int_tp w_col_start = + (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1; + const int_tp w_col_end = min(w_im / stride_w + 1, width_col); + const int_tp h_col_start = + (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1; + const int_tp h_col_end = min(h_im / stride_h + 1, height_col); + // TODO: use LCM of stride and dilation to avoid unnecessary loops + for (int_tp h_col = h_col_start; h_col < h_col_end; h_col += 1) { + for (int_tp w_col = w_col_start; w_col < w_col_end; w_col += 1) { + int_tp h_k = (h_im - h_col * stride_h); + int_tp w_k = (w_im - w_col * stride_w); + if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { + h_k /= dilation_h; + w_k /= dilation_w; + int_tp data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * + height_col + h_col) * width_col + w_col; + val += data_col[data_col_off + data_col_index]; + } } } - - data_im[data_offset + index] = val; + data_im[data_im_off + index] = val; } } diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index bdf7c7e63ee..f3dae26887b 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -5,7 +5,7 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, const int_tp channel_axis, __global const Dtype* data_im, - const int_tp data_off, + const int_tp data_im_off, __global const int_tp* im_shape, __global const int_tp* col_shape, __global const int_tp* kernel_shape, @@ -21,64 +21,79 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, __global const int_tp* im_shape_ptr = im_shape + channel_axis; __global const int_tp* col_shape_ptr = col_shape + channel_axis; - for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - // Initialize channel_in, computed in the loop below, with intermediate + __local int_tp shared_dilation[6]; + __local int_tp shared_kernel_shape[6]; + __local int_tp shared_pad[6]; + __local int_tp shared_stride[6]; + __local int_tp shared_col_shape[6 + 1]; + __local int_tp shared_im_shape[6 + 1]; + + for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) { + shared_dilation[li] = dilation[li]; + shared_kernel_shape[li] = kernel_shape[li]; + shared_pad[li] = pad[li]; + shared_stride[li] = stride[li]; + } + + for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) { + shared_col_shape[li] = col_shape_ptr[li]; + shared_im_shape[li] = im_shape_ptr[li]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + for (int_tp index = get_global_id(0); index < n; + index += get_global_size(0)) { + // Initialize channel_in, computed in the loop below, with int_tpermediate // computations used to compute the spatial indices. int_tp channel_in = index; int_tp channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { - d_temp[i] = channel_in % col_shape_ptr[i + 1]; - channel_in /= col_shape_ptr[i + 1]; - channel_out *= kernel_shape[i]; + d_temp[i] = channel_in % shared_col_shape[i + 1]; + channel_in /= shared_col_shape[i + 1]; + channel_out *= shared_kernel_shape[i]; } channel_out *= channel_in; int_tp data_col_inc = 1; for (i = 0; i < num_axes; ++i) { - channel_out *= col_shape_ptr[i + 1]; + channel_out *= shared_col_shape[i + 1]; channel_out += d_temp[i]; - d_temp[i] = d_temp[i] * stride[i] - pad[i]; - channel_in *= im_shape_ptr[i + 1]; + d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i]; + channel_in *= shared_im_shape[i + 1]; channel_in += d_temp[i]; - data_col_inc *= col_shape_ptr[i + 1]; + data_col_inc *= shared_col_shape[i + 1]; d_iter[i] = 0; } __global Dtype* data_col_ptr = data_col + data_col_off + channel_out; - __global const Dtype* data_im_ptr = data_im + data_off + channel_in; + __global const Dtype* data_im_ptr = data_im + data_im_off + channel_in; bool incremented; do { bool in_range = true; for (i = 0; i < num_axes; ++i) { - const int_tp d_iter_im = d_iter[i] + d_temp[i]; - in_range &= d_iter_im >= 0 && d_iter_im < im_shape_ptr[i + 1]; + const int_tp d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i]; + in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1]; if (!in_range) { break; } } - - // Write column data if (in_range) { - int_tp data_im_offset = d_iter[0]; + int_tp data_im_offset = d_iter[0] * shared_dilation[0]; for (i = 1; i < num_axes; ++i) { - data_im_offset *= im_shape_ptr[i + 1]; - data_im_offset += d_iter[i]; + data_im_offset *= shared_im_shape[i + 1]; + data_im_offset += d_iter[i] * shared_dilation[i]; } *data_col_ptr = data_im_ptr[data_im_offset]; } else { *data_col_ptr = 0; } - data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { - // Old: const int_tp d_max = kernel_shape[i]; - // New (strided, limit is the external kernel size): - const int_tp d_max = (kernel_shape[i] - 1) * dilation[i] + 1; + const int_tp d_max = shared_kernel_shape[i]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 - // Old: ++d_iter[i]; - // New (strided, increment by the stride each time): - d_iter[i] += dilation[i]; + ++d_iter[i]; incremented = true; break; } @@ -88,54 +103,65 @@ __kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes, } __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, - const int_tp channel_axis, - __global const Dtype* data_col, - const int_tp data_col_off, - __global const int_tp* im_shape, - __global const int_tp* kernel_shape, - __global const int_tp* pad, - __global const int_tp* stride, - __global const int_tp* dilation, - __global Dtype* data_im, - const int_tp data_im_off) { + const int_tp channel_axis, + __global const Dtype* data_col, + const int_tp data_col_off, + __global const int_tp* im_shape, + __global const int_tp* col_shape, + __global const int_tp* kernel_shape, + __global const int_tp* pad, + __global const int_tp* stride, + __global const int_tp* dilation, + __global Dtype* data_im, + const int_tp data_im_off) { int_tp d_im[6]; - int_tp d_col_size[6]; int_tp d_col_iter[6]; int_tp d_col_start[6]; int_tp d_col_end[6]; - int_tp d_ext_patch[6]; - int_tp d_idx[6]; __global const int_tp* im_shape_ptr = im_shape + channel_axis; - __global const Dtype* data_col_ptr = data_col + data_col_off; + __global const Dtype* col_shape_ptr = col_shape + channel_axis; + + __local int_tp shared_dilation[6]; + __local int_tp shared_kernel_shape[6]; + __local int_tp shared_pad[6]; + __local int_tp shared_stride[6]; + __local int_tp shared_col_shape[6 + 1]; + __local int_tp shared_im_shape[6 + 1]; - for (int_tp i = num_axes - 1; i >= 0; --i) { - d_ext_patch[i] = (kernel_shape[i] - 1) * dilation[i] + 1; - d_col_size[i] = (im_shape_ptr[i + 1] + 2 * pad[i] - d_ext_patch[i]) - / stride[i] + 1; + for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) { + shared_dilation[li] = dilation[li]; + shared_kernel_shape[li] = kernel_shape[li]; + shared_pad[li] = pad[li]; + shared_stride[li] = stride[li]; + } + for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) { + shared_col_shape[li] = col_shape_ptr[li]; + shared_im_shape[li] = im_shape_ptr[li]; } - for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { + barrier(CLK_LOCAL_MEM_FENCE); + + for (int_tp index = get_global_id(0); index < n;) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. - int_tp channel_im = index; + int_tp c_im = index; // Calculate d_im (image dimensions). for (int_tp i = num_axes - 1; i >= 0; --i) { - d_im[i] = channel_im % im_shape_ptr[i + 1] + pad[i]; - channel_im /= im_shape_ptr[i + 1]; + d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i]; + c_im /= shared_im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; for (int_tp i = 0; i < num_axes; ++i) { - // New: - d_col_start[i] = (d_im[i] < d_ext_patch[i]) ? - d_im[i] % dilation[i] : (d_im[i] - d_ext_patch[i]) + 1; - d_col_iter[i] = d_col_start[i]; - d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i]; - d_col_end[i] = (d_im[i] >= d_col_size[i]) ? - (d_col_size[i] - 1) - ((d_col_size[i] - 1) - d_col_start[i]) - % dilation[i] : d_im[i]; - if (d_col_start[i] > d_col_end[i]) { + const int_tp kernel_extent = shared_dilation[i] + * (shared_kernel_shape[i] - 1) + 1; + d_col_start[i] = d_col_iter[i] = + (d_im[i] < kernel_extent) ? + 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1; + d_col_end[i] = min(d_im[i] / shared_stride[i] + 1, + shared_col_shape[i + 1]); + if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; @@ -144,38 +170,49 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, } } if (done) { - continue; + continue; // CUDA_KERNEL_LOOP(index, n) } // Loop over the col to compute the output val. Dtype val = 0; bool incremented = true; + bool skip = false; do { // Compute the final offset. int_tp final_offset = 0; - int_tp coeff_prod = 1; + int_tp kernel_shape_prod = 1; + int_tp kernel_index; for (int_tp i = num_axes - 1; i >= 0; --i) { - final_offset += d_col_iter[i] * coeff_prod; - coeff_prod *= d_col_size[i]; + kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i]; + if (kernel_index % shared_dilation[i]) { + skip = true; + break; + } else { + kernel_index /= shared_dilation[i]; + final_offset += kernel_index * kernel_shape_prod; + kernel_shape_prod *= shared_kernel_shape[i]; + } } - for (int_tp i = num_axes - 1; i >= 0; --i) { - final_offset += d_idx[i] * coeff_prod; - coeff_prod *= kernel_shape[i]; + if (!skip) { + final_offset += kernel_shape_prod * c_im; + for (int_tp i = 0; i < num_axes; ++i) { + final_offset *= shared_col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[data_col_off + final_offset]; } - final_offset += channel_im * coeff_prod; - val += data_col_ptr[final_offset]; + skip = false; incremented = false; for (int_tp i = num_axes - 1; i >= 0; --i) { - if (d_col_iter[i] > d_col_end[i] - dilation[i]) { + const int_tp d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; - d_idx[i] = (d_im[i] - d_col_start[i]) / dilation[i]; - } else { // d_col_iter[i] <= d_max - dilation[1] - d_col_iter[i] += dilation[i]; - --d_idx[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; incremented = true; break; // for (int_tp i = num_axes - 1; i >= 0; --i) } } // for (int_tp i = num_axes - 1; i >= 0; --i) - } while (incremented); + } while (incremented); data_im[data_im_off + index] = val; } } diff --git a/src/caffe/greentea/cl_kernels/math.cl b/src/caffe/greentea/cl_kernels/math.cl index d4a08e510ff..50e9d3267c6 100644 --- a/src/caffe/greentea/cl_kernels/math.cl +++ b/src/caffe/greentea/cl_kernels/math.cl @@ -68,7 +68,7 @@ __kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a, const int_tp offa, __global Dtype* y, const int_tp offy) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - y[offy + index] = log(a[offa + index]); + y[offy + index] = log((Dtype)(a[offa + index])); } } diff --git a/src/caffe/greentea/cl_kernels/softmax_loss.cl b/src/caffe/greentea/cl_kernels/softmax_loss.cl index 10826492259..8974bfb70ac 100644 --- a/src/caffe/greentea/cl_kernels/softmax_loss.cl +++ b/src/caffe/greentea/cl_kernels/softmax_loss.cl @@ -18,9 +18,9 @@ __kernel void TEMPLATE(softmax_loss_forward,Dtype)( loss[index] = 0; counts[index] = 0; } else { - loss[index] = -log( + loss[index] = -log((Dtype)( max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]), - (Dtype) FLT_MIN)); + (Dtype) FLT_MIN))); counts[index] = 1; } } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index 79ecb535007..e88351693c5 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -20,19 +20,19 @@ void greentea_im2col_gpu(viennacl::ocl::program *prog, const int_tp stride_h, const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, cl_mem data_col, const int_tp data_col_off) { - int_tp ext_kernel_h = (kernel_h - 1) * dilation_h + 1; - int_tp ext_kernel_w = (kernel_w - 1) * dilation_w + 1; - int_tp height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1; - int_tp width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1; + int_tp height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) + / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) + / stride_w + 1; int_tp num_kernels = channels * height_col * width_col; viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("im2col")); viennacl::ocl::enqueue( kernel(num_kernels, WrapHandle(data_im, ctx), data_offset, height, width, - kernel_h, kernel_w, ext_kernel_h, ext_kernel_w, pad_h, pad_w, - stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, - WrapHandle(data_col, ctx), data_col_off), + kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, + dilation_w, height_col, width_col, WrapHandle(data_col, ctx), + data_col_off), ctx->get_queue()); } @@ -77,26 +77,23 @@ void greentea_col2im_gpu(viennacl::ocl::program *prog, viennacl::ocl::context *ctx, const cl_mem data_col, const int_tp data_col_off, const int_tp channels, const int_tp height, const int_tp width, - const int_tp patch_h, const int_tp patch_w, + const int_tp kernel_h, const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, const int_tp stride_h, const int_tp stride_w, const int_tp dilation_h, const int_tp dilation_w, cl_mem data_im, const int_tp data_offset) { - int_tp ext_patch_h = (patch_h - 1) * dilation_h + 1; - int_tp ext_patch_w = (patch_w - 1) * dilation_w + 1; - int_tp height_col = (height + 2 * pad_h - ext_patch_h) / stride_h + 1; - int_tp width_col = (width + 2 * pad_w - ext_patch_w) / stride_w + 1; + int_tp height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) + / stride_h + 1; + int_tp width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) + / stride_w + 1; int_tp num_kernels = channels * height * width; - - viennacl::ocl::kernel &kernel = prog->get_kernel( - CL_KERNEL_SELECT("col2im")); + viennacl::ocl::kernel &kernel = prog->get_kernel(CL_KERNEL_SELECT("col2im")); viennacl::ocl::enqueue( - kernel(num_kernels, WrapHandle(data_col, ctx), data_col_off, - height, width, channels, - patch_h, patch_w, ext_patch_h, ext_patch_w, - pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, - height_col, width_col, WrapHandle(data_im, ctx), data_offset), + kernel(num_kernels, WrapHandle(data_col, ctx), data_col_off, height, + width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, height_col, width_col, + WrapHandle(data_im, ctx), data_offset), ctx->get_queue()); } @@ -202,6 +199,7 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, kernel(im_size, num_spatial_axes, channel_axis, WrapHandle(data_col, ctx), data_col_off, WrapHandle(im_shape, ctx), + WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), WrapHandle(stride, ctx), WrapHandle(dilation, ctx), WrapHandle(data_im, ctx), data_off), diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 2e422a2c49c..a2688bbbbd0 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -120,7 +120,7 @@ void EltwiseLayer::Forward_gpu(const vector*>& bottom, viennacl::ocl::enqueue( oclk_max_forward(count, WrapHandle((cl_mem)(bottom[0]->gpu_data()), &ctx), - WrapHandle((cl_mem)(bottom[1]->gpu_data()), &ctx), 0L, + WrapHandle((cl_mem)(bottom[1]->gpu_data()), &ctx), (int_tp)0, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 7cbee1aa88a..ee23910494b 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -857,101 +857,107 @@ class CuDNNConvolutionLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(4); - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 2); - EXPECT_EQ(this->blob_top_->channels(), 4); - EXPECT_EQ(this->blob_top_->height(), 2); - EXPECT_EQ(this->blob_top_->width(), 1); - EXPECT_EQ(this->blob_top_2_->num(), 2); - EXPECT_EQ(this->blob_top_2_->channels(), 4); - EXPECT_EQ(this->blob_top_2_->height(), 2); - EXPECT_EQ(this->blob_top_2_->width(), 1); - // setting group should not change the shape - convolution_param->set_num_output(3); - convolution_param->set_group(3); - layer.reset(new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 2); - EXPECT_EQ(this->blob_top_->channels(), 3); - EXPECT_EQ(this->blob_top_->height(), 2); - EXPECT_EQ(this->blob_top_->width(), 1); - EXPECT_EQ(this->blob_top_2_->num(), 2); - EXPECT_EQ(this->blob_top_2_->channels(), 3); - EXPECT_EQ(this->blob_top_2_->height(), 2); - EXPECT_EQ(this->blob_top_2_->width(), 1); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); + } } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(4); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("constant"); - convolution_param->mutable_bias_filler()->set_value(0.1); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Check against reference convolution. - const TypeParam* top_data; - const TypeParam* ref_top_data; - caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_)); - top_data = this->blob_top_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int_tp i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); - } - caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_2_)); - top_data = this->blob_top_2_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int_tp i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const TypeParam* top_data; + const TypeParam* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } } } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(3); - convolution_param->set_group(3); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("constant"); - convolution_param->mutable_bias_filler()->set_value(0.1); - shared_ptr > layer( - new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Check against reference convolution. - const TypeParam* top_data; - const TypeParam* ref_top_data; - caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_)); - top_data = this->blob_top_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int_tp i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new CuDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const TypeParam* top_data; + const TypeParam* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int_tp i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } } } @@ -1050,36 +1056,40 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(2); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - CuDNNConvolutionLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + CuDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(3); - convolution_param->set_group(3); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - CuDNNConvolutionLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); + if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + CuDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } } #endif } // namespace caffe diff --git a/src/caffe/test/test_internal_thread.cpp b/src/caffe/test/test_internal_thread.cpp index 51154a2bf62..99aa461fea2 100644 --- a/src/caffe/test/test_internal_thread.cpp +++ b/src/caffe/test/test_internal_thread.cpp @@ -22,13 +22,21 @@ TEST_F(InternalThreadTest, TestStartAndExit) { class TestThreadA : public InternalThread { void InternalThreadEntry() { - EXPECT_EQ(10282592414170385089UL, caffe_rng_rand()); + if (sizeof(uint_tp) == 4) { + EXPECT_EQ(2682223724U, caffe_rng_rand()); + } else { + EXPECT_EQ(10282592414170385089UL, caffe_rng_rand()); + } } }; class TestThreadB : public InternalThread { void InternalThreadEntry() { - EXPECT_EQ(10310463406559028313UL, caffe_rng_rand()); + if (sizeof(uint_tp) == 4) { + EXPECT_EQ(887095485U, caffe_rng_rand()); + } else { + EXPECT_EQ(10310463406559028313UL, caffe_rng_rand()); + } } }; diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 8923e800565..7a1715ef45c 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -115,7 +115,7 @@ __global__ void im2col_nd_gpu_kernel(const int_tp n, const Dtype* data_im, int_tp i; CUDA_KERNEL_LOOP(index, n) { - // Initialize channel_in, computed in the loop below, with int_tpermediate + // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int_tp channel_in = index; int_tp channel_out = 1; @@ -359,12 +359,12 @@ __global__ void col2im_nd_gpu_kernel(const int_tp n, const Dtype* data_col, int_tp d_col_start[num_axes]; // NOLINT(runtime/arrays) int_tp d_col_end[num_axes]; // NOLINT(runtime/arrays) - __shared__ int_tp shared_dilation[num_axes]; // NOLINT(runtime/arrays) - __shared__ int_tp shared_kernel_shape[num_axes]; // NOLINT(runtime/arrays) - __shared__ int_tp shared_pad[num_axes]; // NOLINT(runtime/arrays) - __shared__ int_tp shared_stride[num_axes]; // NOLINT(runtime/arrays) - __shared__ int_tp shared_col_shape[num_axes + 1]; // NOLINT(runtime/arrays) - __shared__ int_tp shared_im_shape[num_axes + 1]; // NOLINT(runtime/arrays) + __shared__ int_tp shared_dilation[num_axes]; + __shared__ int_tp shared_kernel_shape[num_axes]; + __shared__ int_tp shared_pad[num_axes]; + __shared__ int_tp shared_stride[num_axes]; + __shared__ int_tp shared_col_shape[num_axes + 1]; + __shared__ int_tp shared_im_shape[num_axes + 1]; if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; From 5c99e6c491c41d22b82f583a01be1ba8fe86dcca Mon Sep 17 00:00:00 2001 From: fabian Date: Thu, 31 Dec 2015 18:26:05 +0100 Subject: [PATCH 243/254] Cleanup. --- error.log | 1 - 1 file changed, 1 deletion(-) delete mode 100644 error.log diff --git a/error.log b/error.log deleted file mode 100644 index d64687de199..00000000000 --- a/error.log +++ /dev/null @@ -1 +0,0 @@ -Setting to use device 1 From c1cdaa75737d5ffca7aaf0bbbc2b3467fe76f28a Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 1 Jan 2016 06:00:14 +0100 Subject: [PATCH 244/254] Convolution kernel fixes. --- src/caffe/greentea/cl_headers/header.cl | 1 + src/caffe/greentea/cl_kernels.cpp | 96 +++++++++++----------- src/caffe/greentea/cl_kernels.sh | 8 +- src/caffe/greentea/cl_kernels/im2col_nd.cl | 89 ++++++++++---------- src/caffe/greentea/greentea_im2col.cpp | 4 +- src/caffe/test/test_im2col_layer.cpp | 6 +- 6 files changed, 102 insertions(+), 102 deletions(-) diff --git a/src/caffe/greentea/cl_headers/header.cl b/src/caffe/greentea/cl_headers/header.cl index 1e5d811504b..50a10afeda2 100644 --- a/src/caffe/greentea/cl_headers/header.cl +++ b/src/caffe/greentea/cl_headers/header.cl @@ -17,6 +17,7 @@ #define Dtype float #define barrier(x) #define atomic_cmpxchg(x, y, z) x +#define signbit(x) x #define int_tp long #define uint_tp unsigned long #define int_tpc long diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 30a9dfbf351..4d0046c9f61 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -6,56 +6,56 @@ #include namespace caffe { #ifdef USE_INDEX_64 -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define CLK_GLOBAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT -std::string definitions_64 = "// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long"; // NOLINT +static std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define CLK_GLOBAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define signbit(x) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +static std::string definitions_64 = "// Types used for parameters, offset computations and so on\n#define int_tp long\n#define uint_tp unsigned long\n\n// Definitions used to cast the types above as needed\n#define int_tpc long\n#define uint_tpc unsigned long"; // NOLINT #else -std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define CLK_GLOBAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT -std::string definitions_32 = "// Types used for parameters, offset computations and so on\n#define int_tp int\n#define uint_tp unsigned int\n\n// Definitions used to cast the types above as needed\n#define int_tpc int\n#define uint_tpc unsigned int"; // NOLINT +static std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define CLK_GLOBAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define signbit(x) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT +static std::string definitions_32 = "// Types used for parameters, offset computations and so on\n#define int_tp int\n#define uint_tp unsigned int\n\n// Definitions used to cast the types above as needed\n#define int_tpc int\n#define uint_tpc unsigned int"; // NOLINT #endif -std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT -std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT -std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT -std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT -std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT -std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT -std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT -std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n const int_tp height, const int_tp width,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp h_index = index / width_col;\n const int_tp h_col = h_index % height_col;\n const int_tp w_col = index % width_col;\n const int_tp c_im = h_index / height_col;\n const int_tp c_col = c_im * kernel_h * kernel_w;\n const int_tp h_offset = h_col * stride_h - pad_h;\n const int_tp w_offset = w_col * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (c_im * height + h_offset) * width + w_offset;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h_im = h_offset + i * dilation_h;\n int_tp w_im = w_offset + j * dilation_w;\n *data_col_ptr =\n (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?\n data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n const int_tp w_im = index % width + pad_w;\n const int_tp h_im = (index / width) % height + pad_h;\n const int_tp c_im = index / (width * height);\n int_tp kernel_extent_w = (kernel_w - 1) * dilation_w + 1;\n int_tp kernel_extent_h = (kernel_h - 1) * dilation_h + 1;\n // compute the start and end of the output\n const int_tp w_col_start =\n (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;\n const int_tp w_col_end = min(w_im / stride_w + 1, width_col);\n const int_tp h_col_start =\n (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;\n const int_tp h_col_end = min(h_im / stride_h + 1, height_col);\n // TODO: use LCM of stride and dilation to avoid unnecessary loops\n for (int_tp h_col = h_col_start; h_col < h_col_end; h_col += 1) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; w_col += 1) {\n int_tp h_k = (h_im - h_col * stride_h);\n int_tp w_k = (w_im - w_col * stride_w);\n if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {\n h_k /= dilation_h;\n w_k /= dilation_w;\n int_tp data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *\n height_col + h_col) * width_col + w_col;\n val += data_col[data_col_off + data_col_index];\n }\n }\n }\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT -std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with int_tpermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % shared_col_shape[i + 1];\n channel_in /= shared_col_shape[i + 1];\n channel_out *= shared_kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= shared_col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];\n channel_in *= shared_im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= shared_col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0] * shared_dilation[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= shared_im_shape[i + 1];\n data_im_offset += d_iter[i] * shared_dilation[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = shared_kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp c_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];\n c_im /= shared_im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n const int_tp kernel_extent = shared_dilation[i]\n * (shared_kernel_shape[i] - 1) + 1;\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_extent) ?\n 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1;\n d_col_end[i] = min(d_im[i] / shared_stride[i] + 1,\n shared_col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n bool skip = false;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n int_tp kernel_index;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];\n if (kernel_index % shared_dilation[i]) {\n skip = true;\n break;\n } else {\n kernel_index /= shared_dilation[i];\n final_offset += kernel_index * kernel_shape_prod;\n kernel_shape_prod *= shared_kernel_shape[i];\n }\n }\n if (!skip) {\n final_offset += kernel_shape_prod * c_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= shared_col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[data_col_off + final_offset];\n }\n skip = false;\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT -std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT -std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT -std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT -std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT -std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT -std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT -std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT -std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT -std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT -std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT -std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT -std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n const int_tp height, const int_tp width,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp h_index = index / width_col;\n const int_tp h_col = h_index % height_col;\n const int_tp w_col = index % width_col;\n const int_tp c_im = h_index / height_col;\n const int_tp c_col = c_im * kernel_h * kernel_w;\n const int_tp h_offset = h_col * stride_h - pad_h;\n const int_tp w_offset = w_col * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (c_im * height + h_offset) * width + w_offset;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h_im = h_offset + i * dilation_h;\n int_tp w_im = w_offset + j * dilation_w;\n *data_col_ptr =\n (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?\n data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n const int_tp w_im = index % width + pad_w;\n const int_tp h_im = (index / width) % height + pad_h;\n const int_tp c_im = index / (width * height);\n int_tp kernel_extent_w = (kernel_w - 1) * dilation_w + 1;\n int_tp kernel_extent_h = (kernel_h - 1) * dilation_h + 1;\n // compute the start and end of the output\n const int_tp w_col_start =\n (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;\n const int_tp w_col_end = min(w_im / stride_w + 1, width_col);\n const int_tp h_col_start =\n (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;\n const int_tp h_col_end = min(h_im / stride_h + 1, height_col);\n // TODO: use LCM of stride and dilation to avoid unnecessary loops\n for (int_tp h_col = h_col_start; h_col < h_col_end; h_col += 1) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; w_col += 1) {\n int_tp h_k = (h_im - h_col * stride_h);\n int_tp w_k = (w_im - w_col * stride_w);\n if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {\n h_k /= dilation_h;\n w_k /= dilation_w;\n int_tp data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *\n height_col + h_col) * width_col + w_col;\n val += data_col[data_col_off + data_col_index];\n }\n }\n }\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT -std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with int_tpermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % shared_col_shape[i + 1];\n channel_in /= shared_col_shape[i + 1];\n channel_out *= shared_kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= shared_col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];\n channel_in *= shared_im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= shared_col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0] * shared_dilation[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= shared_im_shape[i + 1];\n data_im_offset += d_iter[i] * shared_dilation[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = shared_kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const Dtype* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp c_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];\n c_im /= shared_im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n const int_tp kernel_extent = shared_dilation[i]\n * (shared_kernel_shape[i] - 1) + 1;\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_extent) ?\n 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1;\n d_col_end[i] = min(d_im[i] / shared_stride[i] + 1,\n shared_col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (done) {\n continue; // CUDA_KERNEL_LOOP(index, n)\n }\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n bool skip = false;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n int_tp kernel_index;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];\n if (kernel_index % shared_dilation[i]) {\n skip = true;\n break;\n } else {\n kernel_index /= shared_dilation[i];\n final_offset += kernel_index * kernel_shape_prod;\n kernel_shape_prod *= shared_kernel_shape[i];\n }\n }\n if (!skip) {\n final_offset += kernel_shape_prod * c_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= shared_col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[data_col_off + final_offset];\n }\n skip = false;\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT -std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT -std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT -std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT -std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT -std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT -std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT +static std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT +static std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +static std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT +static std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +static std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT +static std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +static std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +static std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT +static std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT +static std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT +static std::string im2col_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n const int_tp height, const int_tp width,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp h_index = index / width_col;\n const int_tp h_col = h_index % height_col;\n const int_tp w_col = index % width_col;\n const int_tp c_im = h_index / height_col;\n const int_tp c_col = c_im * kernel_h * kernel_w;\n const int_tp h_offset = h_col * stride_h - pad_h;\n const int_tp w_offset = w_col * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (c_im * height + h_offset) * width + w_offset;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h_im = h_offset + i * dilation_h;\n int_tp w_im = w_offset + j * dilation_w;\n *data_col_ptr =\n (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?\n data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n const int_tp w_im = index % width + pad_w;\n const int_tp h_im = (index / width) % height + pad_h;\n const int_tp c_im = index / (width * height);\n int_tp kernel_extent_w = (kernel_w - 1) * dilation_w + 1;\n int_tp kernel_extent_h = (kernel_h - 1) * dilation_h + 1;\n // compute the start and end of the output\n const int_tp w_col_start =\n (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;\n const int_tp w_col_end = min(w_im / stride_w + 1, width_col);\n const int_tp h_col_start =\n (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;\n const int_tp h_col_end = min(h_im / stride_h + 1, height_col);\n // TODO: use LCM of stride and dilation to avoid unnecessary loops\n for (int_tp h_col = h_col_start; h_col < h_col_end; h_col += 1) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; w_col += 1) {\n int_tp h_k = (h_im - h_col * stride_h);\n int_tp w_k = (w_im - w_col * stride_w);\n if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {\n h_k /= dilation_h;\n w_k /= dilation_w;\n int_tp data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *\n height_col + h_col) * width_col + w_col;\n val += data_col[data_col_off + data_col_index];\n }\n }\n }\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +static std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with int_tpermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % shared_col_shape[i + 1];\n channel_in /= shared_col_shape[i + 1];\n channel_out *= shared_kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= shared_col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];\n channel_in *= shared_im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= shared_col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0] * shared_dilation[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= shared_im_shape[i + 1];\n data_im_offset += d_iter[i] * shared_dilation[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = shared_kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp c_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];\n c_im /= shared_im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n const int_tp kernel_extent = shared_dilation[i]\n * (shared_kernel_shape[i] - 1) + 1;\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_extent) ?\n 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1;\n d_col_end[i] = min(d_im[i] / shared_stride[i] + 1,\n shared_col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (!done) {\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n bool skip = false;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n int_tp kernel_index;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];\n if (kernel_index % shared_dilation[i]) {\n skip = true;\n break;\n } else {\n kernel_index /= shared_dilation[i];\n final_offset += kernel_index * kernel_shape_prod;\n kernel_shape_prod *= shared_kernel_shape[i];\n }\n }\n if (!skip) {\n final_offset += kernel_shape_prod * c_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= shared_col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[data_col_off + final_offset];\n }\n skip = false;\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n }\n}"; // NOLINT +static std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +static std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +static std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT +static std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +static std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +static std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT +static std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT +static std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT +static std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT +static std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT +static std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +static std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT +static std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT +static std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT +static std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT +static std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT +static std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT +static std::string im2col_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col,Dtype)(const int_tp n,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n const int_tp height, const int_tp width,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp h_index = index / width_col;\n const int_tp h_col = h_index % height_col;\n const int_tp w_col = index % width_col;\n const int_tp c_im = h_index / height_col;\n const int_tp c_col = c_im * kernel_h * kernel_w;\n const int_tp h_offset = h_col * stride_h - pad_h;\n const int_tp w_offset = w_col * stride_w - pad_w;\n __global Dtype* data_col_ptr = data_col + data_col_off;\n data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;\n __global const Dtype* data_im_ptr = data_im + data_im_off;\n data_im_ptr += (c_im * height + h_offset) * width + w_offset;\n for (int_tp i = 0; i < kernel_h; ++i) {\n for (int_tp j = 0; j < kernel_w; ++j) {\n int_tp h_im = h_offset + i * dilation_h;\n int_tp w_im = w_offset + j * dilation_w;\n *data_col_ptr =\n (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?\n data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;\n data_col_ptr += height_col * width_col;\n }\n }\n }\n}\n\n__kernel void TEMPLATE(col2im,Dtype)(const int_tp n,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n const int_tp height, const int_tp width,\n const int_tp channels,\n const int_tp kernel_h,\n const int_tp kernel_w, const int_tp pad_h,\n const int_tp pad_w, const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp height_col,\n const int_tp width_col,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype val = 0;\n const int_tp w_im = index % width + pad_w;\n const int_tp h_im = (index / width) % height + pad_h;\n const int_tp c_im = index / (width * height);\n int_tp kernel_extent_w = (kernel_w - 1) * dilation_w + 1;\n int_tp kernel_extent_h = (kernel_h - 1) * dilation_h + 1;\n // compute the start and end of the output\n const int_tp w_col_start =\n (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;\n const int_tp w_col_end = min(w_im / stride_w + 1, width_col);\n const int_tp h_col_start =\n (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;\n const int_tp h_col_end = min(h_im / stride_h + 1, height_col);\n // TODO: use LCM of stride and dilation to avoid unnecessary loops\n for (int_tp h_col = h_col_start; h_col < h_col_end; h_col += 1) {\n for (int_tp w_col = w_col_start; w_col < w_col_end; w_col += 1) {\n int_tp h_k = (h_im - h_col * stride_h);\n int_tp w_k = (w_im - w_col * stride_w);\n if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {\n h_k /= dilation_h;\n w_k /= dilation_w;\n int_tp data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *\n height_col + h_col) * width_col + w_col;\n val += data_col[data_col_off + data_col_index];\n }\n }\n }\n data_im[data_im_off + index] = val;\n }\n}"; // NOLINT +static std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(im2col_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_im,\n const int_tp data_im_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_col,\n const int_tp data_col_off) {\n int_tp d_temp[6];\n int_tp d_iter[6];\n int_tp i;\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with int_tpermediate\n // computations used to compute the spatial indices.\n int_tp channel_in = index;\n int_tp channel_out = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n d_temp[i] = channel_in % shared_col_shape[i + 1];\n channel_in /= shared_col_shape[i + 1];\n channel_out *= shared_kernel_shape[i];\n }\n channel_out *= channel_in;\n int_tp data_col_inc = 1;\n for (i = 0; i < num_axes; ++i) {\n channel_out *= shared_col_shape[i + 1];\n channel_out += d_temp[i];\n d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];\n channel_in *= shared_im_shape[i + 1];\n channel_in += d_temp[i];\n data_col_inc *= shared_col_shape[i + 1];\n d_iter[i] = 0;\n }\n __global Dtype* data_col_ptr = data_col + data_col_off + channel_out;\n __global const Dtype* data_im_ptr = data_im + data_im_off + channel_in;\n bool incremented;\n do {\n bool in_range = true;\n for (i = 0; i < num_axes; ++i) {\n const int_tp d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];\n in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];\n if (!in_range) {\n break;\n }\n }\n if (in_range) {\n int_tp data_im_offset = d_iter[0] * shared_dilation[0];\n for (i = 1; i < num_axes; ++i) {\n data_im_offset *= shared_im_shape[i + 1];\n data_im_offset += d_iter[i] * shared_dilation[i];\n }\n *data_col_ptr = data_im_ptr[data_im_offset];\n } else {\n *data_col_ptr = 0;\n }\n data_col_ptr += data_col_inc;\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = shared_kernel_shape[i];\n if (d_iter[i] == d_max - 1) {\n d_iter[i] = 0;\n } else { // d_iter[i] < d_max - 1\n ++d_iter[i];\n incremented = true;\n break;\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented); // do\n }\n}\n\n__kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes,\n const int_tp channel_axis,\n __global const Dtype* data_col,\n const int_tp data_col_off,\n __global const int_tp* im_shape,\n __global const int_tp* col_shape,\n __global const int_tp* kernel_shape,\n __global const int_tp* pad,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global Dtype* data_im,\n const int_tp data_im_off) {\n int_tp d_im[6];\n int_tp d_col_iter[6];\n int_tp d_col_start[6];\n int_tp d_col_end[6];\n\n __global const int_tp* im_shape_ptr = im_shape + channel_axis;\n __global const int_tp* col_shape_ptr = col_shape + channel_axis;\n\n __local int_tp shared_dilation[6];\n __local int_tp shared_kernel_shape[6];\n __local int_tp shared_pad[6];\n __local int_tp shared_stride[6];\n __local int_tp shared_col_shape[6 + 1];\n __local int_tp shared_im_shape[6 + 1];\n\n for (int li = get_local_id(0); li < num_axes; li += get_local_size(0)) {\n shared_dilation[li] = dilation[li];\n shared_kernel_shape[li] = kernel_shape[li];\n shared_pad[li] = pad[li];\n shared_stride[li] = stride[li];\n }\n for (int li = get_local_id(0); li < num_axes + 1; li += get_local_size(0)) {\n shared_col_shape[li] = col_shape_ptr[li];\n shared_im_shape[li] = im_shape_ptr[li];\n }\n\n barrier(CLK_LOCAL_MEM_FENCE);\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // Initialize channel_in, computed in the loop below, with intermediate\n // computations used to compute the spatial indices.\n int_tp c_im = index;\n // Calculate d_im (image dimensions).\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];\n c_im /= shared_im_shape[i + 1];\n }\n // Calculate col start/end indices.\n bool done = false;\n for (int_tp i = 0; i < num_axes; ++i) {\n const int_tp kernel_extent = shared_dilation[i]\n * (shared_kernel_shape[i] - 1) + 1;\n d_col_start[i] = d_col_iter[i] =\n (d_im[i] < kernel_extent) ?\n 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1;\n d_col_end[i] = min(d_im[i] / shared_stride[i] + 1,\n shared_col_shape[i + 1]);\n if (d_col_start[i] >= d_col_end[i]) {\n // Skip computation if the dimension is 0 at any spatial axis --\n // final val will be 0.\n data_im[index] = 0;\n done = true;\n break; // for (int_tp i = 0; i < num_axes; ++i)\n }\n }\n if (!done) {\n // Loop over the col to compute the output val.\n Dtype val = 0;\n bool incremented = true;\n bool skip = false;\n do {\n // Compute the final offset.\n int_tp final_offset = 0;\n int_tp kernel_shape_prod = 1;\n int_tp kernel_index;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];\n if (kernel_index % shared_dilation[i]) {\n skip = true;\n break;\n } else {\n kernel_index /= shared_dilation[i];\n final_offset += kernel_index * kernel_shape_prod;\n kernel_shape_prod *= shared_kernel_shape[i];\n }\n }\n if (!skip) {\n final_offset += kernel_shape_prod * c_im;\n for (int_tp i = 0; i < num_axes; ++i) {\n final_offset *= shared_col_shape[i + 1];\n final_offset += d_col_iter[i];\n }\n val += data_col[data_col_off + final_offset];\n }\n skip = false;\n incremented = false;\n for (int_tp i = num_axes - 1; i >= 0; --i) {\n const int_tp d_max = d_col_end[i];\n if (d_col_iter[i] == d_max - 1) {\n d_col_iter[i] = d_col_start[i];\n } else { // d_col_iter[i] < d_max - 1\n ++d_col_iter[i];\n incremented = true;\n break; // for (int_tp i = num_axes - 1; i >= 0; --i)\n }\n } // for (int_tp i = num_axes - 1; i >= 0; --i)\n } while (incremented);\n data_im[data_im_off + index] = val;\n }\n }\n}"; // NOLINT +static std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT +static std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT +static std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT +static std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT +static std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT +static std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT +static std::string tile_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { std::stringstream ss; #ifdef USE_INDEX_64 diff --git a/src/caffe/greentea/cl_kernels.sh b/src/caffe/greentea/cl_kernels.sh index b25e9a72692..98a86e01389 100644 --- a/src/caffe/greentea/cl_kernels.sh +++ b/src/caffe/greentea/cl_kernels.sh @@ -43,7 +43,7 @@ do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo -n "std::string $CL_KERNEL_NAME = \"" >> $SOURCE + echo -n "static std::string $CL_KERNEL_NAME = \"" >> $SOURCE echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE echo "\"; // NOLINT" >> $SOURCE done @@ -55,7 +55,7 @@ do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo -n "std::string $CL_KERNEL_NAME = \"" >> $SOURCE + echo -n "static std::string $CL_KERNEL_NAME = \"" >> $SOURCE echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE echo "\"; // NOLINT" >> $SOURCE done @@ -68,7 +68,7 @@ do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo -n "std::string ${CL_KERNEL_NAME}_float = \"" >> $SOURCE + echo -n "static std::string ${CL_KERNEL_NAME}_float = \"" >> $SOURCE echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE echo "\"; // NOLINT" >> $SOURCE done @@ -80,7 +80,7 @@ do CL_KERNEL_NAME=`echo $CL_KERNEL` CL_KERNEL_NAME="${CL_KERNEL_NAME##*/}" CL_KERNEL_NAME="${CL_KERNEL_NAME%.cl}" - echo -n "std::string ${CL_KERNEL_NAME}_double = \"" >> $SOURCE + echo -n "static std::string ${CL_KERNEL_NAME}_double = \"" >> $SOURCE echo -n "$CL_KERNEL_STR" | sed -e ':a;N;$!ba;s/\n/\\n/g' | sed -e 's/\"/\\"/g' >> $SOURCE echo "\"; // NOLINT" >> $SOURCE done diff --git a/src/caffe/greentea/cl_kernels/im2col_nd.cl b/src/caffe/greentea/cl_kernels/im2col_nd.cl index f3dae26887b..f372ee3c452 100644 --- a/src/caffe/greentea/cl_kernels/im2col_nd.cl +++ b/src/caffe/greentea/cl_kernels/im2col_nd.cl @@ -120,7 +120,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, int_tp d_col_end[6]; __global const int_tp* im_shape_ptr = im_shape + channel_axis; - __global const Dtype* col_shape_ptr = col_shape + channel_axis; + __global const int_tp* col_shape_ptr = col_shape + channel_axis; __local int_tp shared_dilation[6]; __local int_tp shared_kernel_shape[6]; @@ -142,7 +142,7 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, barrier(CLK_LOCAL_MEM_FENCE); - for (int_tp index = get_global_id(0); index < n;) { + for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int_tp c_im = index; @@ -169,50 +169,49 @@ __kernel void TEMPLATE(col2im_nd, Dtype)(const int_tp n, const int_tp num_axes, break; // for (int_tp i = 0; i < num_axes; ++i) } } - if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) - } - // Loop over the col to compute the output val. - Dtype val = 0; - bool incremented = true; - bool skip = false; - do { - // Compute the final offset. - int_tp final_offset = 0; - int_tp kernel_shape_prod = 1; - int_tp kernel_index; - for (int_tp i = num_axes - 1; i >= 0; --i) { - kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i]; - if (kernel_index % shared_dilation[i]) { - skip = true; - break; - } else { - kernel_index /= shared_dilation[i]; - final_offset += kernel_index * kernel_shape_prod; - kernel_shape_prod *= shared_kernel_shape[i]; + if (!done) { + // Loop over the col to compute the output val. + Dtype val = 0; + bool incremented = true; + bool skip = false; + do { + // Compute the final offset. + int_tp final_offset = 0; + int_tp kernel_shape_prod = 1; + int_tp kernel_index; + for (int_tp i = num_axes - 1; i >= 0; --i) { + kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i]; + if (kernel_index % shared_dilation[i]) { + skip = true; + break; + } else { + kernel_index /= shared_dilation[i]; + final_offset += kernel_index * kernel_shape_prod; + kernel_shape_prod *= shared_kernel_shape[i]; + } } - } - if (!skip) { - final_offset += kernel_shape_prod * c_im; - for (int_tp i = 0; i < num_axes; ++i) { - final_offset *= shared_col_shape[i + 1]; - final_offset += d_col_iter[i]; + if (!skip) { + final_offset += kernel_shape_prod * c_im; + for (int_tp i = 0; i < num_axes; ++i) { + final_offset *= shared_col_shape[i + 1]; + final_offset += d_col_iter[i]; + } + val += data_col[data_col_off + final_offset]; } - val += data_col[data_col_off + final_offset]; - } - skip = false; - incremented = false; - for (int_tp i = num_axes - 1; i >= 0; --i) { - const int_tp d_max = d_col_end[i]; - if (d_col_iter[i] == d_max - 1) { - d_col_iter[i] = d_col_start[i]; - } else { // d_col_iter[i] < d_max - 1 - ++d_col_iter[i]; - incremented = true; - break; // for (int_tp i = num_axes - 1; i >= 0; --i) - } - } // for (int_tp i = num_axes - 1; i >= 0; --i) - } while (incremented); - data_im[data_im_off + index] = val; + skip = false; + incremented = false; + for (int_tp i = num_axes - 1; i >= 0; --i) { + const int_tp d_max = d_col_end[i]; + if (d_col_iter[i] == d_max - 1) { + d_col_iter[i] = d_col_start[i]; + } else { // d_col_iter[i] < d_max - 1 + ++d_col_iter[i]; + incremented = true; + break; // for (int_tp i = num_axes - 1; i >= 0; --i) + } + } // for (int_tp i = num_axes - 1; i >= 0; --i) + } while (incremented); + data_im[data_im_off + index] = val; + } } } diff --git a/src/caffe/greentea/greentea_im2col.cpp b/src/caffe/greentea/greentea_im2col.cpp index e88351693c5..11a0e59ee3f 100644 --- a/src/caffe/greentea/greentea_im2col.cpp +++ b/src/caffe/greentea/greentea_im2col.cpp @@ -191,7 +191,7 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, cl_mem im_shape, cl_mem col_shape, cl_mem kernel_shape, cl_mem pad, cl_mem stride, cl_mem dilation, cl_mem data_im, - const int_tp data_off) { + const int_tp data_im_off) { viennacl::ocl::kernel &kernel = prog->get_kernel( CL_KERNEL_SELECT("col2im_nd")); @@ -202,7 +202,7 @@ void greentea_col2im_nd_gpu(viennacl::ocl::program *prog, WrapHandle(col_shape, ctx), WrapHandle(kernel_shape, ctx), WrapHandle(pad, ctx), WrapHandle(stride, ctx), WrapHandle(dilation, ctx), - WrapHandle(data_im, ctx), data_off), + WrapHandle(data_im, ctx), data_im_off), ctx->get_queue()); } diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index 5b056e713b3..0606dfd5540 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -41,7 +41,7 @@ TYPED_TEST(Im2colLayerTest, TestSetup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - vector bottom_shape; + vector bottom_shape; bottom_shape.push_back(2); bottom_shape.push_back(3); bottom_shape.push_back(10); @@ -93,7 +93,7 @@ TYPED_TEST(Im2colLayerTest, TestDilatedGradient) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - vector bottom_shape; + vector bottom_shape; bottom_shape.push_back(2); bottom_shape.push_back(3); bottom_shape.push_back(10); @@ -127,7 +127,7 @@ TYPED_TEST(Im2colLayerTest, TestDilatedGradientForceND) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - vector bottom_shape; + vector bottom_shape; bottom_shape.push_back(2); bottom_shape.push_back(3); bottom_shape.push_back(10); From 30cab07d66b2998cb45e7d01d3aef2eb1d64129c Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 1 Jan 2016 15:55:21 +0100 Subject: [PATCH 245/254] Intel Beignet 1.1.1 kernel fixes. --- src/caffe/greentea/cl_kernels.cpp | 20 ++++++++++---------- src/caffe/greentea/cl_kernels/activation.cl | 12 ++++++------ src/caffe/greentea/cl_kernels/bnll.cl | 4 ++-- src/caffe/greentea/cl_kernels/dropout.cl | 4 ++-- src/caffe/greentea/cl_kernels/pooling.cl | 6 +++--- src/caffe/greentea/cl_kernels/pooling_nd.cl | 4 ++-- src/caffe/layers/base_conv_layer.cpp | 2 +- src/caffe/test/test_convolution_layer.cpp | 4 ++-- src/caffe/test/test_deconvolution_layer.cpp | 4 ++-- 9 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index 4d0046c9f61..5098aeccd2d 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -12,14 +12,14 @@ static std::string definitions_64 = "// Types used for parameters, offset comput static std::string header = "#ifndef __OPENCL_VERSION__\n#define __kernel\n#define __global\n#define __constant\n#define __local\n#define get_global_id(x) 0\n#define get_global_size(x) 0\n#define get_local_id(x) 0\n#define get_local_size(x) 0\n#define FLT_MAX 0\n#define FLT_MIN 0\n#define cl_khr_fp64\n#define cl_amd_fp64\n#define DOUBLE_SUPPORT_AVAILABLE\n#define CLK_LOCAL_MEM_FENCE\n#define CLK_GLOBAL_MEM_FENCE\n#define Dtype float\n#define barrier(x)\n#define atomic_cmpxchg(x, y, z) x\n#define signbit(x) x\n#define int_tp long\n#define uint_tp unsigned long\n#define int_tpc long\n#define uint_tpc unsigned long\n#endif\n\n#define CONCAT(A,B) A##_##B\n#define TEMPLATE(name,type) CONCAT(name,type)\n\n#define TYPE_FLOAT 1\n#define TYPE_DOUBLE 2\n\n#if defined(cl_khr_fp64)\n#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#elif defined(cl_amd_fp64)\n#pragma OPENCL EXTENSION cl_amd_fp64 : enable\n#define DOUBLE_SUPPORT_AVAILABLE\n#endif\n\n#if defined(cl_khr_int64_base_atomics)\n#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n#define ATOMICS_64_AVAILABLE\n#endif"; // NOLINT static std::string definitions_32 = "// Types used for parameters, offset computations and so on\n#define int_tp int\n#define uint_tp unsigned int\n\n// Definitions used to cast the types above as needed\n#define int_tpc int\n#define uint_tpc unsigned int"; // NOLINT #endif -static std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT +static std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1.0 / (1.0 + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1.0 : 0.0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0?1.0:0.0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0?1.0:0.0);\n }\n }\n}"; // NOLINT static std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT static std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -static std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +static std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0 + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0 + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT static std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT static std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT static std::string contrastive_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -static std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT +static std::string dropout_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((mask[index] > threshold)?1.0:0.0) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * ((mask[index] > threshold)?1.0:0.0) * scale;\n }\n}"; // NOLINT static std::string eltwise_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string embed_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT static std::string fillbuffer_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT @@ -28,20 +28,20 @@ static std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"hea static std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT static std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT static std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -static std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -static std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?0.0:1.0);\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tp) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tp) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT static std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT static std::string softmax_loss_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT static std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n\n__kernel void TEMPLATE(tile,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n const int_tp tile_size, const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size / num_tiles) % bottom_tile_axis;\n const int_tp n = index / tile_size / num_tiles / bottom_tile_axis;\n const int_tp bottom_index = (n * bottom_tile_axis + b) * tile_size + d;\n top_data[index] = bottom_data[bottom_index];\n }\n}\n\n\n__kernel void TEMPLATE(tile_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp tile_size,\n const int_tp num_tiles,\n const int_tp bottom_tile_axis,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp d = index % tile_size;\n const int_tp b = (index / tile_size) % bottom_tile_axis;\n const int_tp n = index / tile_size / bottom_tile_axis;\n bottom_diff[index] = 0;\n int_tp top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;\n for (int_tp t = 0; t < num_tiles; ++t) {\n bottom_diff[index] += top_diff[top_index];\n top_index += bottom_tile_axis * tile_size;\n }\n }\n}"; // NOLINT -static std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1. / (1. + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1 : 0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0);\n }\n }\n}"; // NOLINT +static std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1.0 / (1.0 + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1.0 : 0.0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0?1.0:0.0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0?1.0:0.0);\n }\n }\n}"; // NOLINT static std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT static std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT -static std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0f + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT +static std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0 + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0 + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT static std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT static std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT static std::string contrastive_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(cll_backward,Dtype)(const int_tp count, const int_tp channels,\n const Dtype margin, const int legacy_version,\n const Dtype alpha, __global const Dtype* y,\n __global const Dtype* diff, __global const Dtype* dist_sq,\n __global Dtype *bottom_diff) {\n for (int_tp i = get_global_id(0); i < count;\n i += get_global_size(0)) {\n int_tp n = i / channels; // the num index, to access y and dist_sq\n if ((int_tp)(y[n])) { // similar pairs\n bottom_diff[i] = alpha * diff[i];\n } else { // dissimilar pairs\n Dtype mdist = 0.0;\n Dtype beta = 0.0;\n if (legacy_version == 1) {\n mdist = (margin - dist_sq[n]);\n beta = -alpha;\n } else {\n Dtype dist = sqrt(dist_sq[n]);\n mdist = (margin - dist);\n beta = -alpha * mdist / (dist + 1e-4) * diff[i];\n }\n if (mdist > 0.0) {\n bottom_diff[i] = beta;\n } else {\n bottom_diff[i] = 0;\n }\n }\n }\n}"; // NOLINT -static std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold));\n }\n}"; // NOLINT +static std::string dropout_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const uint_tp* mask,\n const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] * ((mask[index] > threshold)?1.0:0.0) * scale;\n }\n}\n\n__kernel void TEMPLATE(dropout_backward,Dtype)(\n const int_tp n, __global const Dtype* in_diff,\n __global const uint_tp* mask, const uint_tp threshold,\n const Dtype scale,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * ((mask[index] > threshold)?1.0:0.0) * scale;\n }\n}"; // NOLINT static std::string eltwise_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(eltwise_max_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data_a,\n __global const Dtype* bottom_data_b, const int_tp blob_idx,\n __global Dtype* top_data,\n __global int_tp* mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n if (bottom_data_a[index] > bottom_data_b[index]) {\n // only update for very first bottom_data blob (blob_idx == 0)\n if (blob_idx == 0) {\n maxval = bottom_data_a[index];\n top_data[index] = maxval;\n maxidx = blob_idx;\n mask[index] = maxidx;\n }\n } else {\n maxval = bottom_data_b[index];\n top_data[index] = maxval;\n maxidx = blob_idx + 1;\n mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(eltwise_max_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp blob_idx,\n __global const int_tp* mask,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n Dtype gradient = 0;\n if (mask[index] == blob_idx) {\n gradient += top_diff[index];\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string embed_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(embed_forward,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* weight,\n const int_tp M, const int_tp N,\n const int_tp K,\n __global Dtype* top_data) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n top_data[top_index] = weight[weight_index];\n }\n }\n\n// atomic_add from: http://suhorukov.blogspot.com/2011/12/opencl-11-atomic-operations-on-floating.html\n#if (TYPE == TYPE_FLOAT)\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n uint_tp intVal;\n Dtype floatVal;\n } newVal;\n union {\n uint_tp intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atomic_cmpxchg((volatile __global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n\n#if (TYPE == TYPE_DOUBLE)\n#ifdef ATOMICS_64_AVAILABLE\ninline void TEMPLATE(atomic_add,Dtype)(volatile __global Dtype *source, const Dtype operand) {\n union {\n unsigned long intVal;\n Dtype floatVal;\n } newVal;\n union {\n unsigned long intVal;\n Dtype floatVal;\n } prevVal;\n do {\n prevVal.floatVal = *source;\n newVal.floatVal = prevVal.floatVal + operand;\n } while (atom_cmpxchg((volatile __global unsigned long *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);\n}\n\n__kernel void TEMPLATE(embed_backward,Dtype)(const int_tp nthreads, __global const Dtype* bottom_data,\n __global const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,\n __global Dtype* weight_diff) {\n for (int_tp top_index = get_global_id(0); top_index < nthreads;\n top_index += get_global_size(0)) {\n const int_tp n = top_index / N;\n const int_tp d = top_index % N;\n const int_tp index = (int_tp)(bottom_data[n]);\n const int_tp weight_index = index * N + d;\n\n TEMPLATE(atomic_add,Dtype)((weight_diff + weight_index), *(top_diff + top_index));\n }\n}\n#endif\n#endif"; // NOLINT static std::string fillbuffer_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(fillbuffer,Dtype)(const int_tp n, const char alpha, __global char* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}\n\n__kernel void TEMPLATE(fill,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* x,\n const int_tp offx) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n x[index + offx] = alpha;\n }\n}"; // NOLINT @@ -50,8 +50,8 @@ static std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"he static std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT static std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT static std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -static std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw]));\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT -static std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tpc) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?0.0:1.0);\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tp) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tp) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT static std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT static std::string softmax_loss_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(softmax_loss_forward,Dtype)(\n int_tp n, __global const Dtype* prob_data, __global const Dtype* label,\n __global Dtype* loss,\n const int_tp num, const int_tp dim, const int_tp spatial_dim,\n const int has_ignore_label_, const int_tp ignore_label_,\n __global Dtype* counts) {\n\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n loss[index] = 0;\n counts[index] = 0;\n } else {\n loss[index] = -log((Dtype)(\n max((Dtype) (prob_data[n * dim + label_value * spatial_dim + s]),\n (Dtype) FLT_MIN)));\n counts[index] = 1;\n }\n }\n}\n\n__kernel void TEMPLATE(softmax_loss_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top,\n __global const Dtype* label,\n __global Dtype* bottom_diff,\n const int_tp num,\n const int_tp dim,\n const int_tp spatial_dim,\n const int has_ignore_label_,\n const int_tp ignore_label_,\n __global Dtype* counts) {\n\n const int_tp channels = dim / spatial_dim;\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n const int_tp n = index / spatial_dim;\n const int_tp s = index % spatial_dim;\n const int_tp label_value = (int_tp) (label[n * spatial_dim + s]);\n\n if (has_ignore_label_ == 1 && label_value == ignore_label_) {\n for (int_tp c = 0; c < channels; ++c) {\n bottom_diff[n * dim + c * spatial_dim + s] = 0;\n }\n counts[index] = 0;\n } else {\n bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;\n counts[index] = 1;\n }\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/activation.cl b/src/caffe/greentea/cl_kernels/activation.cl index b07ce474a77..d0bfc625245 100644 --- a/src/caffe/greentea/cl_kernels/activation.cl +++ b/src/caffe/greentea/cl_kernels/activation.cl @@ -18,7 +18,7 @@ __kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n, Dtype negative_slope) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { out_diff[index] = in_diff[index] - * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); + * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * negative_slope); } } @@ -44,7 +44,7 @@ __kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n, __global const Dtype* in, __global Dtype* out) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - out[index] = 1. / (1. + exp(-in[index])); + out[index] = 1.0 / (1.0 + exp(-in[index])); } } @@ -62,7 +62,7 @@ __kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold, __global const Dtype* in, __global Dtype* out) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - out[index] = in[index] > threshold ? 1 : 0; + out[index] = in[index] > threshold ? 1.0 : 0.0; } } @@ -88,7 +88,7 @@ __kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channe for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { int_tp c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] - * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); + * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * slope_data[c]); } } @@ -98,11 +98,11 @@ __kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp __global const Dtype* in_data, __global Dtype* out_diff) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); + out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0?1.0:0.0); for (int k = 1; k < rows; k++) { out_diff[index] += in_diff[index + k * rowPitch] * in_data[index + k * rowPitch] - * (in_data[index + k * rowPitch] <= 0); + * (in_data[index + k * rowPitch] <= 0?1.0:0.0); } } } diff --git a/src/caffe/greentea/cl_kernels/bnll.cl b/src/caffe/greentea/cl_kernels/bnll.cl index 607c2c74e9d..a385484e857 100644 --- a/src/caffe/greentea/cl_kernels/bnll.cl +++ b/src/caffe/greentea/cl_kernels/bnll.cl @@ -7,9 +7,9 @@ __kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n, __global Dtype* out) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { if (in[index] > 0.0f) { - out[index] = in[index] + log((Dtype) (1.0f + exp(-in[index]))); + out[index] = in[index] + log((Dtype) (1.0 + exp(-in[index]))); } else { - out[index] = log((Dtype) (1.0f + exp(in[index]))); + out[index] = log((Dtype) (1.0 + exp(in[index]))); } } } diff --git a/src/caffe/greentea/cl_kernels/dropout.cl b/src/caffe/greentea/cl_kernels/dropout.cl index 5ea5b11ab2c..a3debfa6d52 100644 --- a/src/caffe/greentea/cl_kernels/dropout.cl +++ b/src/caffe/greentea/cl_kernels/dropout.cl @@ -9,7 +9,7 @@ __kernel void TEMPLATE(dropout_forward,Dtype)(const int_tp n, const Dtype scale, __global Dtype* out) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - out[index] = in[index] * ((Dtype)(mask[index] > threshold)) * scale; + out[index] = in[index] * ((mask[index] > threshold)?1.0:0.0) * scale; } } @@ -19,6 +19,6 @@ __kernel void TEMPLATE(dropout_backward,Dtype)( const Dtype scale, __global Dtype* out_diff) { for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) { - out_diff[index] = in_diff[index] * scale * ((Dtype)(mask[index] > threshold)); + out_diff[index] = in_diff[index] * ((mask[index] > threshold)?1.0:0.0) * scale; } } diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index 94fda467232..8f589204403 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -238,7 +238,7 @@ __kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads, const int_tp phend = min(h / stride_h + 1, pooled_height); const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int_tp pwend = min(w / stride_w + 1, pooled_width); - Dtype gradient = 0; + Dtype gradient = 0.0; __global const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int_tp ph = phstart; ph < phend; ++ph) { @@ -274,7 +274,7 @@ __kernel void TEMPLATE(sto_pool_backward,Dtype)( const int_tp phend = min(h / stride_h + 1, pooled_height); const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int_tp pwend = min(w / stride_w + 1, pooled_width); - Dtype gradient = 0; + Dtype gradient = 0.0; __global const Dtype* rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; __global const Dtype* top_diff_slice = top_diff @@ -282,7 +282,7 @@ __kernel void TEMPLATE(sto_pool_backward,Dtype)( for (int_tp ph = phstart; ph < phend; ++ph) { for (int_tp pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] - * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])); + * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?0.0:1.0); } } bottom_diff[index] = gradient; diff --git a/src/caffe/greentea/cl_kernels/pooling_nd.cl b/src/caffe/greentea/cl_kernels/pooling_nd.cl index 1b5e63038d4..73a2dc147e2 100644 --- a/src/caffe/greentea/cl_kernels/pooling_nd.cl +++ b/src/caffe/greentea/cl_kernels/pooling_nd.cl @@ -137,8 +137,8 @@ __kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n, d_start[i] = (d_idx[i] + pad[i] < kernel_size[i]) ? 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1; - d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i] + 1), - (int_tpc) (pooled_size[i])); + d_end[i] = min((int_tp) ((d_idx[i] + pad[i]) / stride[i] + 1), + (int_tp) (pooled_size[i])); } num /= size[i]; offset *= pooled_size[i]; diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 4d4923805da..eeba7772017 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -29,7 +29,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, CHECK_GE(num_spatial_axes_, 0); vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); vector spatial_dim_blob_shape( - 1, std::max(num_spatial_axes_, (int_tpc) 1)); + 1, std::max(num_spatial_axes_, (int_tp) 1)); // Setup filter kernel dimensions (kernel_shape_). kernel_shape_.Reshape(spatial_dim_blob_shape); int_tp* kernel_shape_data = kernel_shape_.mutable_cpu_data(); diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index ee23910494b..0724c1bf94c 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -650,7 +650,7 @@ TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) { // Copy pre-generated top diff into actual top diff; // do Backward and save result in backward_result_2d. ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), + caffe_cpu_copy(top_diff.count(), top_diff.cpu_data(), this->blob_top_->mutable_cpu_diff()); layer_2d.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); @@ -681,7 +681,7 @@ TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) { // Copy pre-generated top diff into actual top diff; // do Backward and save result in backward_result_nd. ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), + caffe_cpu_copy(top_diff.count(), top_diff.cpu_data(), this->blob_top_->mutable_cpu_diff()); layer_nd.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index 23574d356d7..dd61dc0b90f 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -216,7 +216,7 @@ TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) { // Copy pre-generated top diff into actual top diff; // do Backward and save result in backward_result_2d. ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), + caffe_cpu_copy(top_diff.count(), top_diff.cpu_data(), this->blob_top_->mutable_cpu_diff()); layer_2d.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); @@ -247,7 +247,7 @@ TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) { // Copy pre-generated top diff into actual top diff; // do Backward and save result in backward_result_nd. ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), + caffe_cpu_copy(top_diff.count(), top_diff.cpu_data(), this->blob_top_->mutable_cpu_diff()); layer_nd.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); From fdd7efb0541dd9b7a39b79c59f35aea4c9b2be20 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 3 Jan 2016 22:20:20 +0100 Subject: [PATCH 246/254] Linking ISAAC option. --- Makefile | 6 ++++++ Makefile.config.example | 8 +++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 0502bb858e9..d47cd1e78f4 100644 --- a/Makefile +++ b/Makefile @@ -203,6 +203,12 @@ ifeq ($(USE_GREENTEA),1) COMMON_FLAGS += -DUSE_CLBLAS endif + # Use ISAAC clBLAS replacement + ifeq ($(USE_ISAAC), 1) + LIBRARIES += isaac + COMMON_FLAGS += -DUSE_CLBLAS + endif + # Requires valid OpenCL library LIBRARY_DIRS += $(CLLIBS) # Requires valid OpenCL headers and valid ViennaCL diff --git a/Makefile.config.example b/Makefile.config.example index 290b4c5507c..2063045e366 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -15,9 +15,11 @@ USE_GREENTEA := 0 # Folder of the ViennaCL header-only library VIENNACL_DIR = ../ViennaCL -# Either set clBLAS to 1 or it will use ViennaclBLAS. -# CLBLAS should be faster, especially on AMD cards. -USE_CLBLAS := 0 +# Override BLAS, use clBLAS insead of ViennaclBLAS. +# USE_CLBLAS := 1 + +# Override BLAS, use ISAAC instead of ViennaclBLAS. +# USE_ISAAC := 1 # cuDNN acceleration switch (uncomment to build with cuDNN). # USE_CUDNN := 1 From 5efd4f9487691cf6f9ef6aaea49d6236fe070996 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 3 Jan 2016 22:33:06 +0100 Subject: [PATCH 247/254] ISAAC support. --- CMakeLists.txt | 5 ++++ cmake/Dependencies.cmake | 13 +++++++++- cmake/Modules/FindISAAC.cmake | 47 +++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 cmake/Modules/FindISAAC.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 3ad13a3a4fe..3e3349b1c39 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,6 +26,7 @@ caffe_option(USE_INDEX_64 "Build Caffe with 64 bit indexing" OFF) caffe_option(USE_CUDA "Build Caffe with CUDA support" ON) caffe_option(USE_GREENTEA "Build Caffe with OpenCL support" ON) caffe_option(USE_CLBLAS "Build Caffe with clBLAS support (instead of using ViennaClBLAS)" OFF) +caffe_option(USE_ISAAC "Build Caffe with ISAAC support (instead of using ViennaClBLAS)" OFF) caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" OFF) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) caffe_option(BUILD_python "Build Python wrapper" ON) @@ -46,6 +47,10 @@ if(CPU_ONLY) set(USE_CLBLAS OFF) endif() +if(USE_ISAAC) + set(USE_CLBLAS ON) +endif + # ---[ Dependencies include(cmake/Dependencies.cmake) diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index ec7eb0359b6..74490fcd26b 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -77,7 +77,7 @@ if (USE_GREENTEA) endif() # ---[ clBLAS -if (USE_CLBLAS) +if (USE_CLBLAS AND NOT USE_ISAAC) find_package(clBLAS) if (NOT CLBLAS_FOUND) message(FATAL_ERROR "clBLAS required but not found.") @@ -87,6 +87,17 @@ if (USE_CLBLAS) set(HAVE_CLBLAS TRUE) endif() +# ---[ ISAAC +if (USE_ISAAC) + find_package(ISAAC) + if (NOT ISAAC_FOUND) + message(FATAL_ERROR "ISAAC required but not found.") + endif() + # include_directories(SYSTEM ${CLBLAS_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS ${ISAAC_LIBRARY}) + set(HAVE_ISAAC TRUE) +endif() + # ---[ OpenCV if(USE_OPENCV) find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs) diff --git a/cmake/Modules/FindISAAC.cmake b/cmake/Modules/FindISAAC.cmake new file mode 100644 index 00000000000..d7edabae632 --- /dev/null +++ b/cmake/Modules/FindISAAC.cmake @@ -0,0 +1,47 @@ +SET(ISAAC_INCLUDE_SEARCH_PATHS + /usr/include + /usr/local/include + /opt/isaac/include + $ENV{ISAAC_HOME} + $ENV{ISAAC_HOME}/include +) + +SET(ISAAC_LIB_SEARCH_PATHS + /lib + /lib64 + /usr/lib + /usr/lib64 + /usr/local/lib + /usr/local/lib64 + /opt/isaac/lib + $ENV{ISAAC_HOME} + $ENV{ISAAC_HOME}/lib + ) + +FIND_PATH(ISAAC_INCLUDE_DIR NAMES isaac.h PATHS ${ISAAC_INCLUDE_SEARCH_PATHS}) +FIND_LIBRARY(ISAAC_LIBRARY NAMES isaac PATHS ${ISAAC_LIB_SEARCH_PATHS}) + +SET(ISAAC_FOUND ON) + +# Check libraries +IF(NOT ISAAC_LIBRARY) + SET(ISAAC_FOUND OFF) + MESSAGE(STATUS "Could not find ISAAC lib. Turning ISAAC_FOUND off") +ENDIF() + +IF (ISAAC_FOUND) + IF (NOT ISAAC_FIND_QUIETLY) + MESSAGE(STATUS "Found ISAAC libraries: ${ISAAC_LIBRARY}") + MESSAGE(STATUS "Found ISAAC include: ${ISAAC_INCLUDE_DIR}") + ENDIF (NOT ISAAC_FIND_QUIETLY) +ELSE (ISAAC_FOUND) + IF (ISAAC_FIND_REQUIRED) + MESSAGE(FATAL_ERROR "Could not find ISAAC") + ENDIF (ISAAC_FIND_REQUIRED) +ENDIF (ISAAC_FOUND) + +MARK_AS_ADVANCED( + ISAAC_INCLUDE_DIR + ISAAC_LIBRARY +) + From 2860463767586b73c01d0007f6ffa5f1e73607a6 Mon Sep 17 00:00:00 2001 From: fabian Date: Sun, 3 Jan 2016 22:36:47 +0100 Subject: [PATCH 248/254] typo fix. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3e3349b1c39..b3db80218b7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,7 +49,7 @@ endif() if(USE_ISAAC) set(USE_CLBLAS ON) -endif +endif() # ---[ Dependencies include(cmake/Dependencies.cmake) From c0d52ce219d65793a2b78d6ba09f0916d7c3bfc7 Mon Sep 17 00:00:00 2001 From: fabian Date: Mon, 4 Jan 2016 02:37:30 +0100 Subject: [PATCH 249/254] CuDNN at least 4 dimensional tensors fix. --- include/caffe/util/cudnn.hpp | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index 7199e833e69..82bfd1b12f0 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -4,7 +4,7 @@ #include - +#include #include #include "caffe/common.hpp" @@ -76,19 +76,28 @@ inline void setTensorNdDesc(cudnnTensorDescriptor_t* desc, const int_tp total_dims, const int_tp* shape, const int_tp* stride) { - std::vector shape_int(total_dims); - std::vector stride_int(total_dims); + // Pad to at least 4 dimensions + int_tp cudnn_dims = std::max(total_dims, (int_tp)4); + int_tp padding = std::max((int_tp)0, cudnn_dims - total_dims); - for (int_tp i = 0; i < total_dims; ++i) { - shape_int[i] = shape[i]; - stride_int[i] = stride[i]; + std::vector shape_int(cudnn_dims); + std::vector stride_int(cudnn_dims); + + for (int_tp i = cudnn_dims - 1; i >= 0; --i) { + if (i < padding) { + shape_int[i] = 1; + stride_int[i] = shape_int[i + 1] * stride_int[i + 1]; + } else { + shape_int[i] = shape[i - padding]; + stride_int[i] = stride[i - padding]; + } } const int* shape_ptr = &shape_int[0]; const int* stride_ptr = &stride_int[0]; CUDNN_CHECK( - cudnnSetTensorNdDescriptor(*desc, dataType::type, total_dims, + cudnnSetTensorNdDescriptor(*desc, dataType::type, cudnn_dims, shape_ptr, stride_ptr)); } From 96b84b6a6d59d41fb7c97286d283ca848f054e2c Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 6 Jan 2016 18:23:52 +0100 Subject: [PATCH 250/254] Expose snapshot format to python --- python/caffe/_caffe.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 9de6453c76a..adcf9f14de4 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -389,6 +389,8 @@ BOOST_PYTHON_MODULE(_caffe) { &SolverParameter::set_stepsize) .add_property("snapshot", &SolverParameter::snapshot, &SolverParameter::set_snapshot) + .add_property("snapshot_format", &SolverParameter::snapshot_format, + &SolverParameter::set_snapshot_format) .add_property("snapshot_prefix", bp::make_function(&SolverParameter::snapshot_prefix, bp::return_value_policy()), @@ -410,6 +412,10 @@ BOOST_PYTHON_MODULE(_caffe) { static_cast( &SolverParameter::set_train_net)); + bp::enum_<::caffe::SolverParameter_SnapshotFormat>("snapshot_format") + .value("HDF5", SolverParameter_SnapshotFormat_HDF5) + .value("BINARYPROTO", SolverParameter_SnapshotFormat_BINARYPROTO); + bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( From f85058b2e88e82890090c699a1e0a0d74fc73ecd Mon Sep 17 00:00:00 2001 From: fabian Date: Fri, 8 Jan 2016 23:25:26 +0100 Subject: [PATCH 251/254] Type int_tp fix. --- include/caffe/net.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 9ca683bdb56..4c60258adf5 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -163,13 +163,13 @@ class Net { return top_vecs_; } /// @brief returns the ids of the top blobs of layer i - inline const vector & top_ids(int i) const { + inline const vector & top_ids(int_tp i) const { CHECK_GE(i, 0) << "Invalid layer id"; CHECK_LT(i, top_id_vecs_.size()) << "Invalid layer id"; return top_id_vecs_[i]; } /// @brief returns the ids of the bottom blobs of layer i - inline const vector & bottom_ids(int i) const { + inline const vector & bottom_ids(int_tp i) const { CHECK_GE(i, 0) << "Invalid layer id"; CHECK_LT(i, bottom_id_vecs_.size()) << "Invalid layer id"; return bottom_id_vecs_[i]; From d7962d1ab7d7109280cd6f703f71c039036625f8 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 9 Jan 2016 02:24:07 +0100 Subject: [PATCH 252/254] OpenCL stochastic kernel backward bugfix. --- src/caffe/greentea/cl_kernels.cpp | 4 ++-- src/caffe/greentea/cl_kernels/pooling.cl | 15 ++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index c09674f083a..412a1170d55 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -28,7 +28,7 @@ static std::string im2col_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"hea static std::string lrn_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT static std::string math_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT static std::string mergecrop_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -static std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?0.0:1.0);\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp stride_h,\n const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?1.0:0.0);\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string pooling_nd_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tp) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tp) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string pooling_sk_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT static std::string slice_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT @@ -51,7 +51,7 @@ static std::string im2col_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"he static std::string lrn_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(lrn_compute_output,Dtype)(const int_tp nthreads,\n __global const Dtype* in,\n __global const Dtype* scale,\n const Dtype negative_beta,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n out[index] = in[index] * pow(scale[index], negative_beta);\n }\n}\n\n__kernel void TEMPLATE(lrn_fill_scale,Dtype)(const int_tp nthreads, __global const Dtype* in,\n const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp size,\n const Dtype alpha_over_size, const Dtype k,\n __global Dtype* const scale) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* in_off = in + offset;\n __global Dtype* scale_off = scale + offset;\n int_tp head = 0;\n const int_tp pre_pad = (size - 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_scale = 0;\n // fill the scale at [n, :, h, w]\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_scale += in_off[head * step] * in_off[head * step];\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_scale -= in_off[(head - size) * step]\n * in_off[(head - size) * step];\n }\n scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;\n ++head;\n }\n }\n}\n\n__kernel void TEMPLATE(lrn_compute_diff,Dtype)(const int_tp nthreads,\n __global const Dtype* bottom_data,\n __global const Dtype* top_data,\n __global const Dtype* scale,\n __global const Dtype* top_diff, const int_tp num,\n const int_tp channels, const int_tp height,\n const int_tp width, const int_tp size,\n const Dtype negative_beta,\n const Dtype cache_ratio,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp n = index / width / height;\n const int_tp offset = (n * channels * height + h) * width + w;\n const int_tp step = height * width;\n __global const Dtype* bottom_off = bottom_data + offset;\n __global const Dtype* top_off = top_data + offset;\n __global const Dtype* scale_off = scale + offset;\n __global const Dtype* top_diff_off = top_diff + offset;\n __global Dtype* bottom_diff_off = bottom_diff + offset;\n int_tp head = 0;\n const int_tp pre_pad = size - (size + 1) / 2;\n const int_tp post_pad = size - pre_pad - 1;\n Dtype accum_ratio = 0;\n // accumulate values\n while (head < post_pad && head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n ++head;\n }\n // both add and subtract\n while (head < channels) {\n accum_ratio += top_diff_off[head * step] * top_off[head * step]\n / scale_off[head * step];\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n // subtract only\n while (head < channels + post_pad) {\n if (head - size >= 0) {\n accum_ratio -= top_diff_off[(head - size) * step]\n * top_off[(head - size) * step] / scale_off[(head - size) * step];\n }\n bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)\n * step] * pow(scale_off[(head - post_pad) * step], negative_beta)\n - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;\n ++head;\n }\n }\n}"; // NOLINT static std::string math_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(mul,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] * b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(div,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa,\n __global Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = a[index + offa] / b[index + offb];\n }\n}\n\n__kernel void TEMPLATE(add_scalar,Dtype)(const int_tp N, const Dtype alpha,\n__global Dtype* Y,\n const int_tp offY) {\n for (int_tp index = get_global_id(0); index < N; index += get_global_size(0)) {\n Y[offY + index] += alpha;\n }\n}\n\n__kernel void TEMPLATE(add,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] + b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(sub,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global const Dtype* b,\n const int_tp offb, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = a[offa + index] - b[offb + index];\n }\n}\n\n__kernel void TEMPLATE(abs,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = fabs((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(exp,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = exp(a[offa + index]);\n }\n}\n\n__kernel void TEMPLATE(log,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[offy + index] = log((Dtype)(a[offa + index]));\n }\n}\n\n__kernel void TEMPLATE(powx,Dtype)(const int_tp n, __global const Dtype* a,\n const int_tp offa, Dtype alpha,\n __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if(alpha == 2.0) {\n y[offy + index] = pow((Dtype)fabs(a[offa + index]), (Dtype)alpha);\n } else {\n y[offy + index] = pow((Dtype)a[offa + index], (Dtype)alpha);\n }\n }\n}\n\n__kernel void TEMPLATE(sign,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = (0.0 < x[index + offx])\n - (x[index + offx] < 0.0);\n }\n}\n\n__kernel void TEMPLATE(sgnbit,Dtype)(const int_tp n, __global const Dtype* x,\n const int_tp offx, __global Dtype* y,\n const int_tp offy) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index + offy] = signbit(x[index + offx]);\n }\n}"; // NOLINT static std::string mergecrop_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(merge_copy_forward, Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global const Dtype* bottom_a,\n const int_tp forward_a,\n __global const Dtype* bottom_b,\n const int_tp forward_b,\n __global Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n top[index] = (forward_a == 1) ? bottom_a[aidx] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n top[index] = (forward_b == 1) ? bottom_b[bidx] : 0;\n }\n }\n}\n\n__kernel void TEMPLATE(merge_copy_backward,Dtype)(const int_tp nthreads,\n const int_tp dims,\n __global Dtype* bottom_a,\n const int_tp backward_a,\n __global Dtype* bottom_b,\n const int_tp backward_b,\n __global const Dtype* top,\n const int_tp num,\n const int_tp channels_a,\n const int_tp channels_b,\n __global const int_tp* shape_a,\n __global const int_tp* shape_b) {\n int_tp pad[6];\n int_tp tmp_idx[6];\n int_tp size_a = 1;\n int_tp size_b = 1;\n\n for (int_tp i = 0; i < dims; ++i) {\n pad[i] = (shape_b[i] - shape_a[i]) / 2;\n size_a *= shape_a[i];\n size_b *= shape_b[i];\n }\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n int_tp batch_id = index / ((channels_a + channels_b) * size_a);\n int_tp bottom_id = ((index - batch_id * (channels_a + channels_b) * size_a)\n / (channels_a * size_a)) % 2;\n int_tp counter = index;\n for (int_tp i = dims - 1; i >= 0; --i) {\n tmp_idx[i] = counter % shape_a[i];\n counter /= shape_a[i];\n }\n\n if (bottom_id == 0) {\n int_tp channel_id = (index / size_a) % channels_a;\n int_tp aidx = batch_id * channels_a + channel_id;\n for (int_tp i = 0; i < dims; ++i) {\n aidx *= shape_a[i];\n aidx += tmp_idx[i];\n }\n bottom_a[aidx] = (backward_a == 1) ? top[index] : 0;\n } else {\n int_tp channel_id = (index / size_a) % channels_b;\n int_tp bidx = (batch_id * channels_b + channel_id) * size_b;\n int_tp btemp = 1;\n for (int_tp i = dims - 1; i >= 0; --i) {\n bidx += btemp * (tmp_idx[i] + pad[i]);\n btemp *= shape_b[i];\n }\n bottom_b[bidx] = (backward_b == 1) ? top[index] : 0;\n }\n }\n}"; // NOLINT -static std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num, const int_tp channels,\n const int_tp height, const int_tp width, const int_tp pooled_height,\n const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w,\n const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?0.0:1.0);\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT +static std::string pooling_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask, __global int_tp* mask, __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wend = min(wstart + kernel_w, width);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n if (bottom_slice[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_slice[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w, const int_tp pad_h,\n const int_tp pad_w, __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n const int_tp pool_size = (hend - hstart) * (wend - wstart);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_slice[h * width + w];\n }\n }\n top_data[index] = aveval / pool_size;\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* rand_idx,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n }\n }\n const float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_slice[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test,Dtype)(\n const int_tp nthreads, __global const Dtype* const bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width, const int_tp kernel_h,\n const int_tp kernel_w, const int_tp stride_h, const int_tp stride_w,\n __global Dtype* top_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp pw = index % pooled_width;\n const int_tp ph = (index / pooled_width) % pooled_height;\n const int_tp c = (index / pooled_width / pooled_height) % channels;\n const int_tp n = index / pooled_width / pooled_height / channels;\n const int_tp hstart = ph * stride_h;\n const int_tp hend = min(hstart + kernel_h, height);\n const int_tp wstart = pw * stride_w;\n const int_tp wend = min(wstart + kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_slice = bottom_data\n + (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n cumsum += bottom_slice[h * width + w];\n cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart =\n (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;\n const int_tp phend = min((h + pad_h) / stride_h + 1, pooled_height);\n const int_tp pwstart =\n (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;\n const int_tp pwend = min((w + pad_w) / stride_w + 1, pooled_width);\n Dtype gradient = 0;\n const int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff + offset;\n if (use_mask == 1) {\n __global const int_tp* mask_slice = mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n } else {\n __global const Dtype* top_mask_slice = top_mask + offset;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_slice[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads,\n __global const Dtype* top_diff,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width + pad_w;\n const int_tp h = (index / width) % height + pad_h;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* const top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n // figure out the pooling size\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + kernel_h, height + pad_h);\n int_tp wend = min(wstart + kernel_w, width + pad_w);\n int_tp pool_size = (hend - hstart) * (wend - wstart);\n gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_backward,Dtype)(\n const int_tp nthreads, __global const Dtype* rand_idx,\n __global const Dtype* const top_diff, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp stride_h,\n const int_tp stride_w, __global Dtype* bottom_diff) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n // find out the local index\n // find out the local offset\n const int_tp w = index % width;\n const int_tp h = (index / width) % height;\n const int_tp c = (index / width / height) % channels;\n const int_tp n = index / width / height / channels;\n const int_tp phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n const int_tp phend = min(h / stride_h + 1, pooled_height);\n const int_tp pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n const int_tp pwend = min(w / stride_w + 1, pooled_width);\n Dtype gradient = 0.0;\n __global const Dtype* rand_idx_slice = rand_idx\n + (n * channels + c) * pooled_height * pooled_width;\n __global const Dtype* top_diff_slice = top_diff\n + (n * channels + c) * pooled_height * pooled_width;\n for (int_tp ph = phstart; ph < phend; ++ph) {\n for (int_tp pw = pwstart; pw < pwend; ++pw) {\n gradient += top_diff_slice[ph * pooled_width + pw]\n * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?1.0:0.0);\n }\n }\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string pooling_nd_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* bottom_data,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask, __global Dtype* top_mask) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp offset = 1;\n int_tp num = index;\n\n bool do_continue = false;\n\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % pooled_size[i];\n d_start[i] = d_idx[i] * stride[i] - pad[i];\n d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);\n d_start[i] = max(d_start[i], (int_tp)0);\n num /= pooled_size[i];\n offset *= size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] >= d_end[i]) {\n top_data[index] = -FLT_MAX;\n if (use_mask) {\n mask[index] = -1;\n } else {\n top_mask[index] = -1;\n }\n do_continue = true;\n }\n }\n\n if(do_continue) {\n continue;\n }\n\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n int_tp final_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n int_tp size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * size_prod;\n size_prod *= size[i];\n }\n\n if (bottom_data[final_offset] > maxval) {\n maxidx = final_offset;\n maxval = bottom_data[maxidx];\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] >= d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n\n__kernel void TEMPLATE(max_pool_backward_nd, Dtype)(const int_tp n,\n const int_tp num_axes,\n __global const Dtype* top_diff,\n const int use_mask,\n __global const int_tp* mask,\n __global const Dtype* top_mask,\n const int_tp channels,\n __global const int_tp* size,\n __global const int_tp* pooled_size,\n __global const int_tp* kernel_size,\n __global const int_tp* ext_kernel_size,\n __global const int_tp* stride,\n __global const int_tp* dilation,\n __global const int_tp* pad,\n __global Dtype* bottom_diff) {\n int_tp d_idx[6];\n int_tp d_start[6];\n int_tp d_end[6];\n int_tp d_iter[6];\n int_tp i;\n\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n // find out the local index\n // find out the local offset\n int_tp offset = 1;\n int_tp num = index;\n for (i = num_axes - 1; i >= 0; --i) {\n d_idx[i] = num % size[i];\n if (dilation[i] > 1) {\n d_start[i] =\n (d_idx[i] < ext_kernel_size[i]) ?\n d_idx[i] % dilation[i] : (d_idx[i] - ext_kernel_size[i]) + 1;\n d_end[i] =\n (d_idx[i] >= pooled_size[i]) ?\n (pooled_size[i] - 1)\n - (pooled_size[i] - 1 - d_start[i]) % dilation[i] :\n d_idx[i];\n } else {\n d_start[i] =\n (d_idx[i] + pad[i] < kernel_size[i]) ?\n 0 : (d_idx[i] + pad[i] - kernel_size[i]) / stride[i] + 1;\n d_end[i] = min((int_tp) ((d_idx[i] + pad[i]) / stride[i] + 1),\n (int_tp) (pooled_size[i]));\n }\n num /= size[i];\n offset *= pooled_size[i];\n d_iter[i] = d_start[i];\n\n if (d_start[i] > d_end[i]) {\n bottom_diff[index] = 0;\n return;\n }\n }\n int_tp chan = num % channels;\n num /= channels;\n offset *= (num * channels + chan);\n\n Dtype gradient = 0;\n int_tp final_offset = 0;\n int_tp im_offset = 0;\n\n bool incremented;\n do {\n final_offset = offset;\n im_offset = 0;\n int_tp size_prod = 1;\n int_tp pooled_size_prod = 1;\n for (i = num_axes - 1; i >= 0; --i) {\n final_offset += d_iter[i] * pooled_size_prod;\n im_offset += d_idx[i] * size_prod;\n size_prod *= size[i];\n pooled_size_prod *= pooled_size[i];\n }\n\n if (use_mask) {\n if (mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n } else {\n if (top_mask[final_offset] == im_offset) {\n gradient += top_diff[final_offset];\n }\n }\n\n incremented = false;\n for (i = num_axes - 1; i >= 0; --i) {\n if (d_iter[i] > d_end[i] - dilation[i]) {\n d_iter[i] = d_start[i];\n } else {\n d_iter[i] += dilation[i];\n incremented = true;\n break;\n }\n }\n } while (incremented);\n bottom_diff[index] = gradient;\n }\n}"; // NOLINT static std::string pooling_sk_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(max_pool_forward_sk,Dtype)(const int_tp nthreads,\n__global Dtype* bottom_data,\n const int_tp num,\n const int_tp channels,\n const int_tp height,\n const int_tp width,\n const int_tp pooled_height,\n const int_tp pooled_width,\n const int_tp kernel_h,\n const int_tp kernel_w,\n const int_tp ext_kernel_h,\n const int_tp ext_kernel_w,\n const int_tp stride_h,\n const int_tp stride_w,\n const int_tp dilation_h,\n const int_tp dilation_w,\n const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data,\n const int use_mask,\n __global int_tp* mask,\n __global Dtype* top_mask) {\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wend = min(wstart + ext_kernel_w, width);\n hstart = max(hstart, (int_tp) 0);\n wstart = max(wstart, (int_tp) 0);\n Dtype maxval = -FLT_MAX;\n int_tp maxidx = -1;\n __global Dtype* bottom_data_ptr = bottom_data\n + (n * channels + c) * height * width;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n if (bottom_data_ptr[h * width + w] > maxval) {\n maxidx = h * width + w;\n maxval = bottom_data_ptr[maxidx];\n }\n }\n }\n top_data[index] = maxval;\n if (use_mask == 1) {\n mask[index] = maxidx;\n } else {\n top_mask[index] = maxidx;\n }\n }\n}\n\n__kernel void TEMPLATE(max_pool_backward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* top_diff, const int use_mask,\n __global const int_tp* mask, __global const Dtype* top_mask,\n const int_tp num, const int_tp channels, const int_tp height,\n const int_tp width, const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* bottom_diff) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n __global const int_tp* mask_ptr = mask;\n __global const Dtype* top_diff_ptr = top_diff;\n\n// find out the local index\n// find out the local offset\n int_tp w = index % width;\n int_tp h = (index / width) % height;\n int_tp c = (index / width / height) % channels;\n int_tp n = index / width / height / channels;\n\n int_tp pooled_height_1 = pooled_height - 1;\n int_tp pooled_width_1 = pooled_width - 1;\n int_tp phstart =\n (h < ext_kernel_h) ? h % dilation_h : (h - ext_kernel_h) + 1;\n int_tp phend =\n (h >= pooled_height) ?\n pooled_height_1 - (pooled_height_1 - phstart) % dilation_h : h;\n int_tp pwstart =\n (w < ext_kernel_w) ? w % dilation_w : (w - ext_kernel_w) + 1;\n int_tp pwend =\n (w >= pooled_width) ?\n pooled_width_1 - (pooled_width_1 - pwstart) % dilation_w : w;\n\n Dtype gradient = 0;\n int_tp offset = (n * channels + c) * pooled_height * pooled_width;\n top_diff_ptr += offset;\n if (use_mask == 1) {\n mask_ptr += offset;\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (mask_ptr[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n } else {\n for (int_tp ph = phstart; ph <= phend; ph += dilation_h) {\n for (int_tp pw = pwstart; pw <= pwend; pw += dilation_w) {\n if (top_mask[ph * pooled_width + pw] == h * width + w) {\n gradient += top_diff_ptr[ph * pooled_width + pw];\n }\n }\n }\n }\n bottom_diff[index] = gradient;\n }\n}\n\n__kernel void TEMPLATE(ave_pool_forward_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, const int_tp pad_h,\n const int_tp pad_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h - pad_h;\n int_tp wstart = pw * stride_w - pad_w;\n int_tp hend = min(hstart + ext_kernel_h, height + pad_h);\n int_tp wend = min(wstart + ext_kernel_w, width + pad_w);\n hstart = max(hstart, (int_tp)0);\n wstart = max(wstart, (int_tp)0);\n hend = min(hend, height);\n wend = min(wend, width);\n Dtype aveval = 0;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n int_tp pool_size = 0;\n for (int_tp h = hstart; h < hend; ++h) {\n for (int_tp w = wstart; w < wend; ++w) {\n aveval += bottom_data_ptr[h * width + w];\n ++pool_size;\n }\n }\n top_data[index] = aveval / pool_size;\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_train_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w, __global Dtype* rand_idx,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n Dtype cumsum = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n }\n }\n float thres = rand_idx[index] * cumsum;\n // Second pass: get value, and set index.\n cumsum = 0;\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n if (cumsum >= thres) {\n rand_idx[index] = ((n * channels + c) * height + h) * width + w;\n top_data[index] = bottom_data_ptr[h * width + w];\n h = hend;\n w = wend;\n }\n }\n }\n }\n}\n\n__kernel void TEMPLATE(sto_pool_forward_test_sk,Dtype)(\n const int_tp nthreads, __global const Dtype* bottom_data, const int_tp num,\n const int_tp channels, const int_tp height, const int_tp width,\n const int_tp pooled_height, const int_tp pooled_width,\n const int_tp kernel_h, const int_tp kernel_w, const int_tp ext_kernel_h,\n const int_tp ext_kernel_w, const int_tp stride_h, const int_tp stride_w,\n const int_tp dilation_h, const int_tp dilation_w,\n __global Dtype* top_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads; index +=\n get_global_size(0)) {\n int_tp pw = index % pooled_width;\n int_tp ph = (index / pooled_width) % pooled_height;\n int_tp c = (index / pooled_width / pooled_height) % channels;\n int_tp n = index / pooled_width / pooled_height / channels;\n int_tp hstart = ph * stride_h;\n int_tp hend = min(hstart + ext_kernel_h, height);\n int_tp wstart = pw * stride_w;\n int_tp wend = min(wstart + ext_kernel_w, width);\n // We set cumsum to be 0 to avoid divide-by-zero problems\n Dtype cumsum = FLT_MIN;\n Dtype cumvalues = 0.;\n __global const Dtype* bottom_data_ptr = bottom_data;\n bottom_data_ptr += (n * channels + c) * height * width;\n // First pass: get sum\n for (int_tp h = hstart; h < hend; h += dilation_h) {\n for (int_tp w = wstart; w < wend; w += dilation_w) {\n cumsum += bottom_data_ptr[h * width + w];\n cumvalues += bottom_data_ptr[h * width + w]\n * bottom_data_ptr[h * width + w];\n }\n }\n top_data[index] = cumvalues / cumsum;\n }\n\n}"; // NOLINT static std::string slice_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(slice,Dtype)(const int_tp nthreads,\n __global const Dtype* in_data,\n const int forward, const int_tp num_slices,\n const int_tp slice_size,\n const int_tp bottom_slice_axis,\n const int_tp top_slice_axis,\n const int_tp offset_slice_axis,\n __global Dtype* out_data) {\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_slice_size = slice_size * top_slice_axis;\n const int_tp slice_num = index / total_slice_size;\n const int_tp slice_index = index % total_slice_size;\n const int_tp bottom_index = slice_index\n + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;\n if (forward == 1) {\n out_data[index] = in_data[bottom_index];\n } else {\n out_data[bottom_index] = in_data[index];\n }\n }\n}"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/pooling.cl b/src/caffe/greentea/cl_kernels/pooling.cl index 8f589204403..cc56bab12d9 100644 --- a/src/caffe/greentea/cl_kernels/pooling.cl +++ b/src/caffe/greentea/cl_kernels/pooling.cl @@ -258,12 +258,13 @@ __kernel void TEMPLATE(ave_pool_backward,Dtype)(const int_tp nthreads, __kernel void TEMPLATE(sto_pool_backward,Dtype)( const int_tp nthreads, __global const Dtype* rand_idx, - __global const Dtype* const top_diff, const int_tp num, const int_tp channels, - const int_tp height, const int_tp width, const int_tp pooled_height, - const int_tp pooled_width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp stride_h, const int_tp stride_w, __global Dtype* bottom_diff) { - for (int_tp index = get_global_id(0); index < nthreads; - index += get_global_size(0)) { + __global const Dtype* const top_diff, const int_tp num, + const int_tp channels, const int_tp height, const int_tp width, + const int_tp pooled_height, const int_tp pooled_width, + const int_tp kernel_h, const int_tp kernel_w, const int_tp stride_h, + const int_tp stride_w, __global Dtype* bottom_diff) { + for (int_tp index = get_global_id(0); index < nthreads; index += + get_global_size(0)) { // find out the local index // find out the local offset const int_tp w = index % width; @@ -282,7 +283,7 @@ __kernel void TEMPLATE(sto_pool_backward,Dtype)( for (int_tp ph = phstart; ph < phend; ++ph) { for (int_tp pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] - * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?0.0:1.0); + * (index == (int_tp) (rand_idx_slice[ph * pooled_width + pw])?1.0:0.0); } } bottom_diff[index] = gradient; From 164218e5837a2a20451c7dcd86b60a00b9899bf4 Mon Sep 17 00:00:00 2001 From: fabian Date: Sat, 23 Jan 2016 01:06:22 +0100 Subject: [PATCH 253/254] LINT fix. --- src/caffe/util/im2col.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp index d586fad9376..12698268846 100644 --- a/src/caffe/util/im2col.cpp +++ b/src/caffe/util/im2col.cpp @@ -15,17 +15,17 @@ inline bool is_a_ge_zero_and_a_lt_b(int_tp a, int_tp b) { return static_cast(a) < static_cast(b); } -template +template void im2col_cpu(const Dtype* data_im, const int_tp channels, - const int_tp height, const int_tp width, const int_tp kernel_h, const int_tp kernel_w, - const int_tp pad_h, const int_tp pad_w, - const int_tp stride_h, const int_tp stride_w, - const int_tp dilation_h, const int_tp dilation_w, - Dtype* data_col) { - const int_tp output_h = (height + 2 * pad_h - - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int_tp output_w = (width + 2 * pad_w - - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + const int_tp height, const int_tp width, const int_tp kernel_h, + const int_tp kernel_w, const int_tp pad_h, const int_tp pad_w, + const int_tp stride_h, const int_tp stride_w, + const int_tp dilation_h, const int_tp dilation_w, + Dtype* data_col) { + const int_tp output_h = (height + 2 * pad_h + - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int_tp output_w = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int_tp channel_size = height * width; for (int_tp channel = channels; channel--; data_im += channel_size) { for (int_tp kernel_row = 0; kernel_row < kernel_h; kernel_row++) { From 8473f674a72eb80269ede3ab9697b98076ad3432 Mon Sep 17 00:00:00 2001 From: fabian Date: Wed, 27 Jan 2016 04:25:05 +0100 Subject: [PATCH 254/254] OpenCL Bias and Scale layers. --- include/caffe/layers/bias_layer.hpp | 8 +- include/caffe/layers/scale_layer.hpp | 12 +- src/caffe/greentea/cl_kernels.cpp | 4 + src/caffe/greentea/cl_kernels/bias.cl | 43 ++++ src/caffe/layers/bias_layer.cpp | 20 +- src/caffe/layers/bias_layer.cu | 117 +++++++--- src/caffe/layers/scale_layer.cpp | 24 +- src/caffe/layers/scale_layer.cu | 305 +++++++++++++++++++------- src/caffe/proto/caffe.proto | 14 +- src/caffe/test/test_bias_layer.cpp | 74 +++---- src/caffe/test/test_scale_layer.cpp | 91 ++++---- 11 files changed, 481 insertions(+), 231 deletions(-) create mode 100644 src/caffe/greentea/cl_kernels/bias.cl diff --git a/include/caffe/layers/bias_layer.hpp b/include/caffe/layers/bias_layer.hpp index eedc3aaa351..bc84e65951c 100644 --- a/include/caffe/layers/bias_layer.hpp +++ b/include/caffe/layers/bias_layer.hpp @@ -29,9 +29,9 @@ class BiasLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Bias"; } - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int MaxBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp MinBottomBlobs() const { return 1; } + virtual inline int_tp MaxBottomBlobs() const { return 2; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); @@ -44,7 +44,7 @@ class BiasLayer : public Layer { private: Blob bias_multiplier_; - int outer_dim_, bias_dim_, inner_dim_, dim_; + int_tp outer_dim_, bias_dim_, inner_dim_, dim_; }; diff --git a/include/caffe/layers/scale_layer.hpp b/include/caffe/layers/scale_layer.hpp index 924df2e51ab..a4675182ef3 100644 --- a/include/caffe/layers/scale_layer.hpp +++ b/include/caffe/layers/scale_layer.hpp @@ -32,9 +32,9 @@ class ScaleLayer: public Layer { virtual inline const char* type() const { return "Scale"; } // Scale - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int MaxBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int_tp MinBottomBlobs() const { return 1; } + virtual inline int_tp MaxBottomBlobs() const { return 2; } + virtual inline int_tp ExactNumTopBlobs() const { return 1; } protected: /** @@ -68,13 +68,13 @@ class ScaleLayer: public Layer { shared_ptr > bias_layer_; vector*> bias_bottom_vec_; vector bias_propagate_down_; - int bias_param_id_; + int_tp bias_param_id_; Blob sum_multiplier_; Blob sum_result_; Blob temp_; - int axis_; - int outer_dim_, scale_dim_, inner_dim_; + int_tp axis_; + int_tp outer_dim_, scale_dim_, inner_dim_; }; diff --git a/src/caffe/greentea/cl_kernels.cpp b/src/caffe/greentea/cl_kernels.cpp index e8f44d5246d..4a1b28b1ef7 100644 --- a/src/caffe/greentea/cl_kernels.cpp +++ b/src/caffe/greentea/cl_kernels.cpp @@ -15,6 +15,7 @@ static std::string definitions_32 = "// Types used for parameters, offset comput static std::string activation_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1.0 / (1.0 + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1.0 : 0.0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0?1.0:0.0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0?1.0:0.0);\n }\n }\n}"; // NOLINT static std::string auxiliary_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT static std::string batch_reindex_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT +static std::string bias_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bias_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const Dtype* bias,\n const int_tp bias_dim,\n const int_tp inner_dim,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp bias_index = (index / inner_dim) % bias_dim;\n out[index] = in[index] + bias[bias_index];\n }\n}\n\n__kernel void TEMPLATE(scale_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const Dtype* scale,\n const int_tp scale_dim,\n const int_tp inner_dim,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp scale_index = (index / inner_dim) % scale_dim;\n out[index] = in[index] * scale[scale_index];\n }\n}\n\n__kernel void TEMPLATE(scale_bias_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const Dtype* scale,\n __global const Dtype* bias,\n const int_tp scale_dim,\n const int_tp inner_dim,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp scale_index = (index / inner_dim) % scale_dim;\n out[index] = in[index] * scale[scale_index] + bias[scale_index];\n }\n}"; // NOLINT static std::string bnll_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0 + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0 + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT static std::string channel_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT static std::string concat_float = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT @@ -39,6 +40,7 @@ static std::string tile_float = "#ifndef __OPENCL_VERSION__\n#include \"header.c static std::string activation_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(relu_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;\n }\n}\n\n__kernel void TEMPLATE(relu_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n Dtype negative_slope) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * negative_slope);\n }\n}\n\n__kernel void TEMPLATE(tanh_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = tanh(in[index]);\n }\n}\n\n__kernel void TEMPLATE(tanh_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype tanhx = out_data[index];\n out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);\n }\n}\n\n__kernel void TEMPLATE(sigmoid_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = 1.0 / (1.0 + exp(-in[index]));\n }\n}\n\n__kernel void TEMPLATE(sigmoid_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* out_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n const Dtype sigmoid_x = out_data[index];\n out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);\n }\n}\n\n__kernel void TEMPLATE(threshold,Dtype)(const int_tp n, const Dtype threshold,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out[index] = in[index] > threshold ? 1.0 : 0.0;\n }\n}\n\n__kernel void TEMPLATE(prelu_forward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in,\n __global Dtype* out,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];\n }\n}\n\n__kernel void TEMPLATE(prelu_backward,Dtype)(const int_tp n, const int_tp channels,\n const int_tp dim,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff,\n __global const Dtype* slope_data,\n const int_tp div_factor) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n int_tp c = (index / dim) % channels / div_factor;\n out_diff[index] = in_diff[index]\n * ((in_data[index] > 0?1.0:0.0) + (in_data[index] <= 0?1.0:0.0) * slope_data[c]);\n }\n}\n\n__kernel void TEMPLATE(prelu_param_backward,Dtype)(const int_tp n, const int_tp rows,\n const int_tp rowPitch,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0?1.0:0.0);\n for (int k = 1; k < rows; k++) {\n out_diff[index] += in_diff[index + k * rowPitch]\n * in_data[index + k * rowPitch]\n * (in_data[index + k * rowPitch] <= 0?1.0:0.0);\n }\n }\n}"; // NOLINT static std::string auxiliary_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(gpu_set,Dtype)(const int_tp n, const Dtype alpha, __global Dtype* y) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n y[index] = alpha;\n }\n}"; // NOLINT static std::string batch_reindex_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(br_forward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* permut,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n int_tp in_n = (int_tp) (permut[n]);\n out[index] = in[in_n * (inner_dim) + index % (inner_dim)];\n }\n}\n\n__kernel void TEMPLATE(br_backward,Dtype)(const int_tp count, const int_tp inner_dim,\n __global const Dtype* in,\n __global const Dtype* top_indexes,\n __global const Dtype* begins,\n __global const Dtype* counts,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / (inner_dim);\n out[index] = 0;\n int_tp lower = (int_tp) (begins[n]);\n int_tp upper = lower + (int_tp) (counts[n]);\n for (int_tp i = lower; i < upper; ++i) {\n int_tp in_n = (int_tp) (top_indexes[i]);\n out[index] += in[in_n * (inner_dim) + index % (inner_dim)];\n }\n }\n}"; // NOLINT +static std::string bias_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bias_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const Dtype* bias,\n const int_tp bias_dim,\n const int_tp inner_dim,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp bias_index = (index / inner_dim) % bias_dim;\n out[index] = in[index] + bias[bias_index];\n }\n}\n\n__kernel void TEMPLATE(scale_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const Dtype* scale,\n const int_tp scale_dim,\n const int_tp inner_dim,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp scale_index = (index / inner_dim) % scale_dim;\n out[index] = in[index] * scale[scale_index];\n }\n}\n\n__kernel void TEMPLATE(scale_bias_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global const Dtype* scale,\n __global const Dtype* bias,\n const int_tp scale_dim,\n const int_tp inner_dim,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n;\n index += get_global_size(0)) {\n const int_tp scale_index = (index / inner_dim) % scale_dim;\n out[index] = in[index] * scale[scale_index] + bias[scale_index];\n }\n}"; // NOLINT static std::string bnll_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(bnll_forward,Dtype)(const int_tp n,\n __global const Dtype* in,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n if (in[index] > 0.0f) {\n out[index] = in[index] + log((Dtype) (1.0 + exp(-in[index])));\n } else {\n out[index] = log((Dtype) (1.0 + exp(in[index])));\n }\n }\n}\n\n__kernel void TEMPLATE(bnll_backward,Dtype)(const int_tp n,\n __global const Dtype* in_diff,\n __global const Dtype* in_data,\n __global Dtype* out_diff) {\n Dtype kBNLL_THRESHOLD = 50.;\n for (int_tp index = get_global_id(0); index < n; index += get_global_size(0)) {\n Dtype expval = exp(min(in_data[index], kBNLL_THRESHOLD));\n out_diff[index] = in_diff[index] * expval / (expval + 1.);\n }\n}"; // NOLINT static std::string channel_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(kernel_channel_max,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n float maxval = -FLT_MAX;\n for (int_tp c = 0; c < channels; ++c) {\n maxval = max((Dtype)(data[(n * channels + c) * spatial_dim + s]), (Dtype)maxval);\n }\n out[index] = maxval;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_subtract,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* channel_max,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] -= channel_max[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_exp,Dtype)(const int_tp count, __global const Dtype* data,\n __global Dtype* out) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n out[index] = exp(data[index]);\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_sum,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data,\n __global Dtype* channel_sum) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype sum = 0;\n for (int_tp c = 0; c < channels; ++c) {\n sum += data[(n * channels + c) * spatial_dim + s];\n }\n channel_sum[index] = sum;\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_div,Dtype)(const int_tp count, const int_tp num,\n const int_tp channels, const int_tp spatial_dim,\n __global const Dtype* channel_sum,\n __global Dtype* data) {\n for (int_tp index = get_global_id(0); index < count;\n index += get_global_size(0)) {\n int_tp n = index / channels / spatial_dim;\n int_tp s = index % spatial_dim;\n data[index] /= channel_sum[n * spatial_dim + s];\n }\n}\n\n__kernel void TEMPLATE(kernel_channel_dot,Dtype)(const int_tp num, const int_tp channels,\n const int_tp spatial_dim,\n __global const Dtype* data_1,\n __global const Dtype* data_2,\n __global Dtype* channel_dot) {\n for (int_tp index = get_global_id(0); index < num * spatial_dim; index +=\n get_global_size(0)) {\n int_tp n = index / spatial_dim;\n int_tp s = index % spatial_dim;\n Dtype dot = 0;\n for (int_tp c = 0; c < channels; ++c) {\n dot += (data_1[(n * channels + c) * spatial_dim + s]\n * data_2[(n * channels + c) * spatial_dim + s]);\n }\n channel_dot[index] = dot;\n }\n}"; // NOLINT static std::string concat_double = "#ifndef __OPENCL_VERSION__\n#include \"header.cl\"\n#endif\n\n__kernel void TEMPLATE(concat,Dtype)(const int_tp nthreads, __global const Dtype* in_data,\n const int forward, const int_tp num_concats,\n const int_tp concat_size,\n const int_tp top_concat_axis,\n const int_tp bottom_concat_axis,\n const int_tp offset_concat_axis,\n __global Dtype* out_data) {\n\n for (int_tp index = get_global_id(0); index < nthreads;\n index += get_global_size(0)) {\n const int_tp total_concat_size = concat_size * bottom_concat_axis;\n const int_tp concat_num = index / total_concat_size;\n const int_tp concat_index = index % total_concat_size;\n const int_tp top_index = concat_index\n + (concat_num * top_concat_axis + offset_concat_axis) * concat_size;\n if (forward == 1) {\n out_data[top_index] = in_data[index];\n } else {\n out_data[index] = in_data[top_index];\n }\n }\n}"; // NOLINT @@ -74,6 +76,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << activation_float << "\n\n"; // NOLINT ss << auxiliary_float << "\n\n"; // NOLINT ss << batch_reindex_float << "\n\n"; // NOLINT + ss << bias_float << "\n\n"; // NOLINT ss << bnll_float << "\n\n"; // NOLINT ss << channel_float << "\n\n"; // NOLINT ss << concat_float << "\n\n"; // NOLINT @@ -103,6 +106,7 @@ viennacl::ocl::program & RegisterKernels(viennacl::ocl::context *ctx) { ss << activation_double << "\n\n"; // NOLINT ss << auxiliary_double << "\n\n"; // NOLINT ss << batch_reindex_double << "\n\n"; // NOLINT + ss << bias_double << "\n\n"; // NOLINT ss << bnll_double << "\n\n"; // NOLINT ss << channel_double << "\n\n"; // NOLINT ss << concat_double << "\n\n"; // NOLINT diff --git a/src/caffe/greentea/cl_kernels/bias.cl b/src/caffe/greentea/cl_kernels/bias.cl new file mode 100644 index 00000000000..048f17928d5 --- /dev/null +++ b/src/caffe/greentea/cl_kernels/bias.cl @@ -0,0 +1,43 @@ +#ifndef __OPENCL_VERSION__ +#include "header.cl" +#endif + +__kernel void TEMPLATE(bias_forward,Dtype)(const int_tp n, + __global const Dtype* in, + __global const Dtype* bias, + const int_tp bias_dim, + const int_tp inner_dim, + __global Dtype* out) { + for (int_tp index = get_global_id(0); index < n; + index += get_global_size(0)) { + const int_tp bias_index = (index / inner_dim) % bias_dim; + out[index] = in[index] + bias[bias_index]; + } +} + +__kernel void TEMPLATE(scale_forward,Dtype)(const int_tp n, + __global const Dtype* in, + __global const Dtype* scale, + const int_tp scale_dim, + const int_tp inner_dim, + __global Dtype* out) { + for (int_tp index = get_global_id(0); index < n; + index += get_global_size(0)) { + const int_tp scale_index = (index / inner_dim) % scale_dim; + out[index] = in[index] * scale[scale_index]; + } +} + +__kernel void TEMPLATE(scale_bias_forward,Dtype)(const int_tp n, + __global const Dtype* in, + __global const Dtype* scale, + __global const Dtype* bias, + const int_tp scale_dim, + const int_tp inner_dim, + __global Dtype* out) { + for (int_tp index = get_global_id(0); index < n; + index += get_global_size(0)) { + const int_tp scale_index = (index / inner_dim) % scale_dim; + out[index] = in[index] * scale[scale_index] + bias[scale_index]; + } +} diff --git a/src/caffe/layers/bias_layer.cpp b/src/caffe/layers/bias_layer.cpp index 0a786b5db98..f663eda7360 100644 --- a/src/caffe/layers/bias_layer.cpp +++ b/src/caffe/layers/bias_layer.cpp @@ -14,8 +14,8 @@ void BiasLayer::LayerSetUp(const vector*>& bottom, } else if (bottom.size() == 1) { // bias is a learned parameter; initialize it const BiasParameter& param = this->layer_param_.bias_param(); - const int axis = bottom[0]->CanonicalAxisIndex(param.axis()); - const int num_axes = param.num_axes(); + const int_tp axis = bottom[0]->CanonicalAxisIndex(param.axis()); + const int_tp num_axes = param.num_axes(); CHECK_GE(num_axes, -1) << "num_axes must be non-negative, " << "or -1 to extend to the end of bottom[0]"; if (num_axes >= 0) { @@ -24,11 +24,11 @@ void BiasLayer::LayerSetUp(const vector*>& bottom, << "starting with bottom[0] axis = " << axis; } this->blobs_.resize(1); - const vector::const_iterator& shape_start = + const vector::const_iterator& shape_start = bottom[0]->shape().begin() + axis; - const vector::const_iterator& shape_end = + const vector::const_iterator& shape_end = (num_axes == -1) ? bottom[0]->shape().end() : (shape_start + num_axes); - vector bias_shape(shape_start, shape_end); + vector bias_shape(shape_start, shape_end); this->blobs_[0].reset(new Blob(bias_shape)); shared_ptr > filler(GetFiller(param.filler())); filler->Fill(this->blobs_[0].get()); @@ -45,12 +45,12 @@ void BiasLayer::Reshape(const vector*>& bottom, // (num_axes == 0). Mathematically equivalent for any choice of axis, so the // actual setting can be safely ignored; and computation is most efficient // with axis == 0 and (therefore) outer_dim_ == 1. - const int axis = (bias->num_axes() == 0) ? + const int_tp axis = (bias->num_axes() == 0) ? 0 : bottom[0]->CanonicalAxisIndex(param.axis()); CHECK_GE(bottom[0]->num_axes(), axis + bias->num_axes()) << "bias blob's shape extends past bottom[0]'s shape when applied " << "starting with bottom[0] axis = " << axis; - for (int i = 0; i < bias->num_axes(); ++i) { + for (int_tp i = 0; i < bias->num_axes(); ++i) { CHECK_EQ(bottom[0]->shape(axis + i), bias->shape(i)) << "dimension mismatch between bottom[0]->shape(" << axis + i << ") and bias->shape(" << i << ")"; @@ -62,7 +62,7 @@ void BiasLayer::Reshape(const vector*>& bottom, if (bottom[0] != top[0]) { top[0]->ReshapeLike(*bottom[0]); } - bias_multiplier_.Reshape(vector(1, inner_dim_)); + bias_multiplier_.Reshape(vector(1, inner_dim_)); if (bias_multiplier_.cpu_data()[inner_dim_ - 1] != Dtype(1)) { caffe_set(inner_dim_, Dtype(1), bias_multiplier_.mutable_cpu_data()); } @@ -78,7 +78,7 @@ void BiasLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); caffe_copy(bottom[0]->count(), bottom_data, top_data); } - for (int n = 0; n < outer_dim_; ++n) { + for (int_tp n = 0; n < outer_dim_; ++n) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), Dtype(1), bias_data, bias_multiplier_.cpu_data(), Dtype(1), top_data); @@ -102,7 +102,7 @@ void BiasLayer::Backward_cpu(const vector*>& top, Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) ->mutable_cpu_diff(); bool accum = bias_param; - for (int n = 0; n < outer_dim_; ++n) { + for (int_tp n = 0; n < outer_dim_; ++n) { caffe_cpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), top_diff, bias_multiplier_.cpu_data(), Dtype(accum), bias_diff); top_diff += dim_; diff --git a/src/caffe/layers/bias_layer.cu b/src/caffe/layers/bias_layer.cu index 8ac913a5d7b..7ce6fd5db85 100644 --- a/src/caffe/layers/bias_layer.cu +++ b/src/caffe/layers/bias_layer.cu @@ -4,53 +4,116 @@ #include "caffe/layers/bias_layer.hpp" #include "caffe/util/math_functions.hpp" + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { +#ifdef USE_CUDA template -__global__ void BiasForward(const int n, const Dtype* in, - const Dtype* bias, const int bias_dim, const int inner_dim, +__global__ void BiasForward(const int_tp n, const Dtype* in, + const Dtype* bias, const int_tp bias_dim, const int_tp inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { - const int bias_index = (index / inner_dim) % bias_dim; + const int_tp bias_index = (index / inner_dim) % bias_dim; out[index] = in[index] + bias[bias_index]; } } +#endif // USE_CUDA template void BiasLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - const int count = top[0]->count(); + const int_tp count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bias_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - BiasForward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data); + + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + BiasForward // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data); +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); + viennacl::ocl::kernel &oclk_bias_forward = program.get_kernel( + CL_KERNEL_SELECT("bias_forward")); + viennacl::ocl::enqueue( + oclk_bias_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) bias_data, &ctx), bias_dim_, + inner_dim_, WrapHandle((cl_mem) top_data, &ctx)), + ctx.get_queue()); +#endif // USE_GREENTEA + } } -template +template void BiasLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (propagate_down[0] && bottom[0] != top[0]) { - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - caffe_copy(bottom[0]->count(), top_diff, bottom_diff); - } - // in-place, we don't need to do anything with the data diff - const bool bias_param = (bottom.size() == 1); - if ((!bias_param && propagate_down[1]) || - (bias_param && this->param_propagate_down_[0])) { - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) - ->mutable_gpu_diff(); - bool accum = bias_param; - for (int n = 0; n < outer_dim_; ++n) { - caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), - top_diff, bias_multiplier_.gpu_data(), Dtype(accum), bias_diff); - top_diff += dim_; - accum = true; + const vector& propagate_down, + const vector*>& bottom) { + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (propagate_down[0] && bottom[0] != top[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + caffe_copy(bottom[0]->count(), top_diff, bottom_diff); + } + // in-place, we don't need to do anything with the data diff + const bool bias_param = (bottom.size() == 1); + if ((!bias_param && propagate_down[1]) + || (bias_param && this->param_propagate_down_[0])) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) + ->mutable_gpu_diff(); + bool accum = bias_param; + + for (int_tp n = 0; n < outer_dim_; ++n) { + caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), top_diff, + bias_multiplier_.gpu_data(), Dtype(accum), bias_diff); + top_diff += dim_; + accum = true; + } + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + + if (propagate_down[0] && bottom[0] != top[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + greentea_copy(bottom[0]->count(), (cl_mem) top_diff, 0, + (cl_mem) bottom_diff, 0, &ctx); + } + // in-place, we don't need to do anything with the data diff + const bool bias_param = (bottom.size() == 1); + if ((!bias_param && propagate_down[1]) + || (bias_param && this->param_propagate_down_[0])) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) + ->mutable_gpu_diff(); + bool accum = bias_param; + + int_tp top_diff_off = 0; + for (int_tp n = 0; n < outer_dim_; ++n) { + greentea_gpu_gemv(this->device_->id(), CblasNoTrans, bias_dim_, + inner_dim_, Dtype(1), (cl_mem) top_diff, top_diff_off, + (cl_mem) (bias_multiplier_.gpu_data()), 0, + Dtype(accum), (cl_mem) bias_diff, 0); + top_diff_off += dim_; + accum = true; + } } +#endif // USE_GREENTEA } } diff --git a/src/caffe/layers/scale_layer.cpp b/src/caffe/layers/scale_layer.cpp index ecdbb123e31..34ca2ea5174 100644 --- a/src/caffe/layers/scale_layer.cpp +++ b/src/caffe/layers/scale_layer.cpp @@ -17,7 +17,7 @@ void ScaleLayer::LayerSetUp(const vector*>& bottom, } else if (bottom.size() == 1) { // scale is a learned parameter; initialize it axis_ = bottom[0]->CanonicalAxisIndex(param.axis()); - const int num_axes = param.num_axes(); + const int_tp num_axes = param.num_axes(); CHECK_GE(num_axes, -1) << "num_axes must be non-negative, " << "or -1 to extend to the end of bottom[0]"; if (num_axes >= 0) { @@ -26,11 +26,11 @@ void ScaleLayer::LayerSetUp(const vector*>& bottom, << "starting with bottom[0] axis = " << axis_; } this->blobs_.resize(1); - const vector::const_iterator& shape_start = + const vector::const_iterator& shape_start = bottom[0]->shape().begin() + axis_; - const vector::const_iterator& shape_end = + const vector::const_iterator& shape_end = (num_axes == -1) ? bottom[0]->shape().end() : (shape_start + num_axes); - vector scale_shape(shape_start, shape_end); + vector scale_shape(shape_start, shape_end); this->blobs_[0].reset(new Blob(scale_shape)); FillerParameter filler_param(param.filler()); if (!param.has_filler()) { @@ -80,7 +80,7 @@ void ScaleLayer::Reshape(const vector*>& bottom, CHECK_GE(bottom[0]->num_axes(), axis_ + scale->num_axes()) << "scale blob's shape extends past bottom[0]'s shape when applied " << "starting with bottom[0] axis = " << axis_; - for (int i = 0; i < scale->num_axes(); ++i) { + for (int_tp i = 0; i < scale->num_axes(); ++i) { CHECK_EQ(bottom[0]->shape(axis_ + i), scale->shape(i)) << "dimension mismatch between bottom[0]->shape(" << axis_ + i << ") and scale->shape(" << i << ")"; @@ -93,9 +93,9 @@ void ScaleLayer::Reshape(const vector*>& bottom, } else { top[0]->ReshapeLike(*bottom[0]); } - sum_result_.Reshape(vector(1, outer_dim_ * scale_dim_)); - const int sum_mult_size = std::max(outer_dim_, inner_dim_); - sum_multiplier_.Reshape(vector(1, sum_mult_size)); + sum_result_.Reshape(vector(1, outer_dim_ * scale_dim_)); + const int_tp sum_mult_size = std::max(outer_dim_, inner_dim_); + sum_multiplier_.Reshape(vector(1, sum_mult_size)); if (sum_multiplier_.cpu_data()[sum_mult_size - 1] != Dtype(1)) { caffe_set(sum_mult_size, Dtype(1), sum_multiplier_.mutable_cpu_data()); } @@ -120,8 +120,8 @@ void ScaleLayer::Forward_cpu( const Dtype* scale_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - for (int n = 0; n < outer_dim_; ++n) { - for (int d = 0; d < scale_dim_; ++d) { + for (int_tp n = 0; n < outer_dim_; ++n) { + for (int_tp d = 0; d < scale_dim_; ++d) { const Dtype factor = scale_data[d]; caffe_cpu_scale(inner_dim_, factor, bottom_data, top_data); bottom_data += inner_dim_; @@ -198,8 +198,8 @@ void ScaleLayer::Backward_cpu(const vector*>& top, const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* scale_data = scale->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - for (int n = 0; n < outer_dim_; ++n) { - for (int d = 0; d < scale_dim_; ++d) { + for (int_tp n = 0; n < outer_dim_; ++n) { + for (int_tp d = 0; d < scale_dim_; ++d) { const Dtype factor = scale_data[d]; caffe_cpu_scale(inner_dim_, factor, top_diff, bottom_diff); bottom_diff += inner_dim_; diff --git a/src/caffe/layers/scale_layer.cu b/src/caffe/layers/scale_layer.cu index fc9a8064db5..02c10dbf8e6 100644 --- a/src/caffe/layers/scale_layer.cu +++ b/src/caffe/layers/scale_layer.cu @@ -4,129 +4,266 @@ #include "caffe/layers/scale_layer.hpp" #include "caffe/util/math_functions.hpp" + +#ifdef USE_GREENTEA +#include "caffe/greentea/greentea.hpp" +#include "caffe/greentea/greentea_math_functions.hpp" +#endif + namespace caffe { +#ifdef USE_CUDA template -__global__ void ScaleForward(const int n, const Dtype* in, - const Dtype* scale, const int scale_dim, const int inner_dim, +__global__ void ScaleForward(const int_tp n, const Dtype* in, + const Dtype* scale, const int_tp scale_dim, const int_tp inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { - const int scale_index = (index / inner_dim) % scale_dim; + const int_tp scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index]; } } template -__global__ void ScaleBiasForward(const int n, const Dtype* in, +__global__ void ScaleBiasForward(const int_tp n, const Dtype* in, const Dtype* scale, const Dtype* bias, - const int scale_dim, const int inner_dim, Dtype* out) { + const int_tp scale_dim, const int_tp inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { - const int scale_index = (index / inner_dim) % scale_dim; + const int_tp scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index] + bias[scale_index]; } } +#endif // USE_CUDA -template -void ScaleLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { - const int count = top[0]->count(); +template +void ScaleLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const int_tp count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); - if (bottom[0] == top[0]) { - // in-place computation; need to store bottom data before overwriting it. - // Note that this is only necessary for Backward; we could skip this if not - // doing Backward, but Caffe currently provides no way of knowing whether - // we'll need to do Backward at the time of the Forward call. - caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), - temp_.mutable_gpu_data()); - } - const Dtype* scale_data = - ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - if (bias_layer_) { - const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); - ScaleBiasForward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, - top_data); + + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if (bottom[0] == top[0]) { + caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), + temp_.mutable_gpu_data()); + } + const Dtype* scale_data = ( + (bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + if (bias_layer_) { + const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); + ScaleBiasForward // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, + top_data); + } else { + ScaleForward // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); + } +#endif // USE_CUDA } else { - ScaleForward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); + + if (bottom[0] == top[0]) { + greentea_copy(bottom[0]->count(), (cl_mem) (bottom[0]->gpu_data()), + 0, (cl_mem) (temp_.mutable_gpu_data()), 0, &ctx); + } + const Dtype* scale_data = ( + (bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + if (bias_layer_) { + const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); + viennacl::ocl::kernel &oclk_scale_bias_forward = program.get_kernel( + CL_KERNEL_SELECT("scale_bias_forward")); + viennacl::ocl::enqueue( + oclk_scale_bias_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), + WrapHandle((cl_mem) scale_data, &ctx), + WrapHandle((cl_mem) bias_data, &ctx), + scale_dim_, inner_dim_, + WrapHandle((cl_mem) top_data, &ctx)), + ctx.get_queue()); + } else { + viennacl::ocl::kernel &oclk_scale_forward = program.get_kernel( + CL_KERNEL_SELECT("scale_forward")); + viennacl::ocl::enqueue( + oclk_scale_forward(count, WrapHandle((cl_mem)bottom_data, &ctx), + WrapHandle((cl_mem)scale_data, &ctx), scale_dim_, + inner_dim_, WrapHandle((cl_mem)top_data, &ctx)), + ctx.get_queue()); + } +#endif // USE_GREENTEA } } -template +template void ScaleLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (bias_layer_ && - this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { + const vector& propagate_down, + const vector*>& bottom) { + if (bias_layer_ + && this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_); } const bool scale_param = (bottom.size() == 1); Blob* scale = scale_param ? this->blobs_[0].get() : bottom[1]; - if ((!scale_param && propagate_down[1]) || - (scale_param && this->param_propagate_down_[0])) { - const Dtype* top_diff = top[0]->gpu_diff(); - const bool in_place = (bottom[0] == top[0]); - const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); - // Hack: store big eltwise product in bottom[0] diff, except in the special - // case where this layer itself does the eltwise product, in which case we - // can store it directly in the scale diff, and we're done. - // If we're computing in-place (and not doing eltwise computation), this - // hack doesn't work and we store the product in temp_. - const bool is_eltwise = (bottom[0]->count() == scale->count()); - Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() : - (in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); - caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); - if (!is_eltwise) { - Dtype* sum_result = NULL; - if (inner_dim_ == 1) { - sum_result = product; - } else if (sum_result_.count() == 1) { - const Dtype* sum_mult = sum_multiplier_.gpu_data(); - Dtype* scale_diff = scale->mutable_cpu_diff(); - if (scale_param) { - Dtype result; - caffe_gpu_dot(inner_dim_, product, sum_mult, &result); - *scale_diff += result; + + if (this->device_->backend() == BACKEND_CUDA) { +#ifdef USE_CUDA + if ((!scale_param && propagate_down[1]) + || (scale_param && this->param_propagate_down_[0])) { + const Dtype* top_diff = top[0]->gpu_diff(); + const bool in_place = (bottom[0] == top[0]); + const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); + const bool is_eltwise = (bottom[0]->count() == scale->count()); + Dtype* product = ( + is_eltwise ? + scale->mutable_gpu_diff() : + (in_place ? + temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); + caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); + if (!is_eltwise) { + Dtype* sum_result = NULL; + if (inner_dim_ == 1) { + sum_result = product; + } else if (sum_result_.count() == 1) { + const Dtype* sum_mult = sum_multiplier_.gpu_data(); + Dtype* scale_diff = scale->mutable_cpu_diff(); + if (scale_param) { + Dtype result; + caffe_gpu_dot(inner_dim_, product, sum_mult, &result); + *scale_diff += result; + } else { + caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); + } } else { - caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); + const Dtype* sum_mult = sum_multiplier_.gpu_data(); + sum_result = + (outer_dim_ == 1) ? + scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); + caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, + Dtype(1), product, sum_mult, Dtype(0), sum_result); + } + if (outer_dim_ != 1) { + const Dtype* sum_mult = sum_multiplier_.gpu_data(); + if (scale_dim_ == 1) { + Dtype* scale_diff = scale->mutable_cpu_diff(); + if (scale_param) { + Dtype result; + caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); + *scale_diff += result; + } else { + caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); + } + } else { + Dtype* scale_diff = scale->mutable_gpu_diff(); + caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, Dtype(1), + sum_result, sum_mult, Dtype(scale_param), + scale_diff); + } } - } else { - const Dtype* sum_mult = sum_multiplier_.gpu_data(); - sum_result = (outer_dim_ == 1) ? - scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); - caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, - Dtype(1), product, sum_mult, Dtype(0), sum_result); } - if (outer_dim_ != 1) { - const Dtype* sum_mult = sum_multiplier_.gpu_data(); - if (scale_dim_ == 1) { + } + if (propagate_down[0]) { + const int_tp count = top[0]->count(); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* scale_data = scale->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + ScaleForward // NOLINT_NEXT_LINE(whitespace/operators) + CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( + count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); + } +#endif // USE_CUDA + } else { +#ifdef USE_GREENTEA + viennacl::ocl::context &ctx = viennacl::ocl::get_context( + this->device_->id()); + viennacl::ocl::program &program = this->device_->program(); + + if ((!scale_param && propagate_down[1]) + || (scale_param && this->param_propagate_down_[0])) { + const Dtype* top_diff = top[0]->gpu_diff(); + const bool in_place = (bottom[0] == top[0]); + const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); + const bool is_eltwise = (bottom[0]->count() == scale->count()); + Dtype* product = ( + is_eltwise ? + scale->mutable_gpu_diff() : + (in_place ? + temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); + greentea_gpu_mul(this->device_->id(), top[0]->count(), + (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, + (cl_mem) product, 0); + if (!is_eltwise) { + Dtype* sum_result = NULL; + if (inner_dim_ == 1) { + sum_result = product; + } else if (sum_result_.count() == 1) { + const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; - caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); + greentea_gpu_dot(this->device_->id(), inner_dim_, + (cl_mem) product, 0, (cl_mem) sum_mult, 0, + &result); *scale_diff += result; } else { - caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); + greentea_gpu_dot(this->device_->id(), inner_dim_, + (cl_mem) product, 0, (cl_mem) sum_mult, 0, + scale_diff); } } else { - Dtype* scale_diff = scale->mutable_gpu_diff(); - caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, - Dtype(1), sum_result, sum_mult, Dtype(scale_param), - scale_diff); + const Dtype* sum_mult = sum_multiplier_.gpu_data(); + sum_result = + (outer_dim_ == 1) ? + scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); + greentea_gpu_gemv(this->device_->id(), CblasNoTrans, + sum_result_.count(), inner_dim_, Dtype(1), + (cl_mem) product, 0, (cl_mem) sum_mult, 0, + Dtype(0), (cl_mem) sum_result, 0); + } + if (outer_dim_ != 1) { + const Dtype* sum_mult = sum_multiplier_.gpu_data(); + if (scale_dim_ == 1) { + Dtype* scale_diff = scale->mutable_cpu_diff(); + if (scale_param) { + Dtype result; + greentea_gpu_dot(this->device_->id(), outer_dim_, + (cl_mem) sum_mult, 0, (cl_mem) sum_result, + 0, &result); + *scale_diff += result; + } else { + greentea_gpu_dot(this->device_->id(), outer_dim_, + (cl_mem) sum_mult, 0, (cl_mem) sum_result, + 0, scale_diff); + } + } else { + Dtype* scale_diff = scale->mutable_gpu_diff(); + greentea_gpu_gemv(this->device_->id(), CblasTrans, + outer_dim_, scale_dim_, Dtype(1), + (cl_mem) sum_result, 0, (cl_mem) sum_mult, + 0, Dtype(scale_param), (cl_mem) scale_diff, + 0); + } } } } - } - if (propagate_down[0]) { - const int count = top[0]->count(); - const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* scale_data = scale->gpu_data(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - ScaleForward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); + if (propagate_down[0]) { + const int_tp count = top[0]->count(); + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* scale_data = scale->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + viennacl::ocl::kernel &oclk_scale_forward = program.get_kernel( + CL_KERNEL_SELECT("scale_forward")); + viennacl::ocl::enqueue( + oclk_scale_forward(count, WrapHandle((cl_mem) top_diff, &ctx), + WrapHandle((cl_mem) scale_data, &ctx), scale_dim_, + inner_dim_, + WrapHandle((cl_mem) bottom_diff, &ctx)), + ctx.get_queue()); + } +#endif // USE_GREENTEA } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index d1417a9edb6..4c8c9b2dc06 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -398,8 +398,8 @@ message LayerParameter { optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; - optional MergeCropParameter mergecrop_param = 141; - optional AffinityParameter affinity_param = 142; + optional MergeCropParameter mergecrop_param = 143; + optional AffinityParameter affinity_param = 144; } // Message that stores parameters used to apply transformation @@ -428,7 +428,7 @@ message TransformationParameter { // Message that stores parameters shared by loss layers message LossParameter { // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; + optional int64 ignore_label = 1; // How to normalize the loss for loss layers that aggregate across batches, // spatial dimensions, or other dimensions. Currently only implemented in // SoftmaxWithLoss layer. @@ -520,7 +520,7 @@ message BiasParameter { // (axis == 3 == -1) 60 // Furthermore, bottom[1] may have the empty shape (regardless of the value of // "axis") -- a scalar bias. - optional int32 axis = 1 [default = 1]; + optional int64 axis = 1 [default = 1]; // (num_axes is ignored unless just one bottom is given and the bias is // a learned parameter of the layer. Otherwise, num_axes is determined by the @@ -528,7 +528,7 @@ message BiasParameter { // The number of axes of the input (bottom[0]) covered by the bias // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. // Set num_axes := 0, to add a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; + optional int64 num_axes = 2 [default = 1]; // (filler is ignored unless just one bottom is given and the bias is // a learned parameter of the layer.) @@ -1021,7 +1021,7 @@ message ScaleParameter { // (axis == 3 == -1) 60 // Furthermore, bottom[1] may have the empty shape (regardless of the value of // "axis") -- a scalar multiplier. - optional int32 axis = 1 [default = 1]; + optional int64 axis = 1 [default = 1]; // (num_axes is ignored unless just one bottom is given and the scale is // a learned parameter of the layer. Otherwise, num_axes is determined by the @@ -1029,7 +1029,7 @@ message ScaleParameter { // The number of axes of the input (bottom[0]) covered by the scale // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; + optional int64 num_axes = 2 [default = 1]; // (filler is ignored unless just one bottom is given and the scale is // a learned parameter of the layer.) diff --git a/src/caffe/test/test_bias_layer.cpp b/src/caffe/test/test_bias_layer.cpp index 3862e763e28..42bd342d7ea 100644 --- a/src/caffe/test/test_bias_layer.cpp +++ b/src/caffe/test/test_bias_layer.cpp @@ -24,10 +24,10 @@ class BiasLayerTest : public MultiDeviceTest { blob_bottom_broadcast_0_(new Blob()), blob_bottom_broadcast_1_(new Blob()), blob_bottom_broadcast_2_(new Blob()), - blob_bottom_bias_(new Blob(vector())), + blob_bottom_bias_(new Blob(vector())), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); - vector broadcast_shape(2); + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); + vector broadcast_shape(2); broadcast_shape[0] = 2; broadcast_shape[1] = 3; this->blob_bottom_broadcast_0_->Reshape(broadcast_shape); broadcast_shape[0] = 3; broadcast_shape[1] = 4; @@ -79,10 +79,10 @@ TYPED_TEST(BiasLayerTest, TestForwardEltwise) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_->cpu_data(); const Dtype* in_data_b = this->blob_bottom_eltwise_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i], 1e-5); } } @@ -99,10 +99,10 @@ TYPED_TEST(BiasLayerTest, TestForwardEltwiseInPlace) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_bottom_->cpu_data(); - const int count = this->blob_bottom_->count(); + const int_tp count = this->blob_bottom_->count(); const Dtype* in_data_a = orig_bottom.cpu_data(); const Dtype* in_data_b = this->blob_bottom_eltwise_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i], 1e-5); } } @@ -143,11 +143,11 @@ TYPED_TEST(BiasLayerTest, TestBackwardEltwiseInPlace) { caffe_copy(top_diff.count(), top_diff.cpu_data(), this->blob_bottom_->mutable_cpu_diff()); layer->Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(orig_bottom_diff.cpu_diff()[i], this->blob_bottom_->cpu_diff()[i], 1e-5); } - for (int i = 0; i < this->blob_bottom_eltwise_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_eltwise_->count(); ++i) { EXPECT_NEAR(orig_bias_diff.cpu_diff()[i], this->blob_bottom_eltwise_->cpu_diff()[i], 1e-5); } @@ -165,10 +165,10 @@ TYPED_TEST(BiasLayerTest, TestForwardEltwiseWithParam) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_->cpu_data(); const Dtype* in_data_b = layer->blobs()[0]->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i], 1e-5); } } @@ -182,10 +182,10 @@ TYPED_TEST(BiasLayerTest, TestForwardBroadcastBegin) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) + this->blob_bottom_broadcast_0_->data_at(n, c, 0, 0), @@ -205,10 +205,10 @@ TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddle) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) + this->blob_bottom_broadcast_1_->data_at(c, h, 0, 0), @@ -230,10 +230,10 @@ TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddleInPlace) { shared_ptr > layer(new BiasLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_bottom_->data_at(n, c, h, w), orig_bottom.data_at(n, c, h, w) + this->blob_bottom_broadcast_1_->data_at(c, h, 0, 0), @@ -280,11 +280,11 @@ TYPED_TEST(BiasLayerTest, TestBackwardBroadcastMiddleInPlace) { caffe_copy(top_diff.count(), top_diff.cpu_data(), this->blob_bottom_->mutable_cpu_diff()); layer->Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(orig_bottom_diff.cpu_diff()[i], this->blob_bottom_->cpu_diff()[i], 1e-5); } - for (int i = 0; i < this->blob_bottom_broadcast_1_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_broadcast_1_->count(); ++i) { EXPECT_NEAR(orig_bias_diff.cpu_diff()[i], this->blob_bottom_broadcast_1_->cpu_diff()[i], 1e-5); } @@ -301,10 +301,10 @@ TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddleWithParam) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) + layer->blobs()[0]->data_at(c, h, 0, 0), 1e-5); @@ -323,10 +323,10 @@ TYPED_TEST(BiasLayerTest, TestForwardBroadcastEnd) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) + this->blob_bottom_broadcast_2_->data_at(h, w, 0, 0), @@ -346,10 +346,10 @@ TYPED_TEST(BiasLayerTest, TestForwardBias) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data = this->blob_bottom_->cpu_data(); const Dtype bias = *this->blob_bottom_bias_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data[i] + bias, 1e-5); } } @@ -364,10 +364,10 @@ TYPED_TEST(BiasLayerTest, TestForwardBiasAxis2) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data = this->blob_bottom_->cpu_data(); const Dtype bias = *this->blob_bottom_bias_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data[i] + bias, 1e-5); } } diff --git a/src/caffe/test/test_scale_layer.cpp b/src/caffe/test/test_scale_layer.cpp index ad116795f44..69c7643dbaf 100644 --- a/src/caffe/test/test_scale_layer.cpp +++ b/src/caffe/test/test_scale_layer.cpp @@ -24,15 +24,18 @@ class ScaleLayerTest : public MultiDeviceTest { blob_bottom_broadcast_0_(new Blob()), blob_bottom_broadcast_1_(new Blob()), blob_bottom_broadcast_2_(new Blob()), - blob_bottom_scale_(new Blob(vector())), + blob_bottom_scale_(new Blob(vector())), blob_top_(new Blob()) { - Caffe::set_random_seed(1701); - vector broadcast_shape(2); - broadcast_shape[0] = 2; broadcast_shape[1] = 3; + Caffe::set_random_seed(1701, Caffe::GetDefaultDevice()); + vector broadcast_shape(2); + broadcast_shape[0] = 2; + broadcast_shape[1] = 3; this->blob_bottom_broadcast_0_->Reshape(broadcast_shape); - broadcast_shape[0] = 3; broadcast_shape[1] = 4; + broadcast_shape[0] = 3; + broadcast_shape[1] = 4; this->blob_bottom_broadcast_1_->Reshape(broadcast_shape); - broadcast_shape[0] = 4; broadcast_shape[1] = 5; + broadcast_shape[0] = 4; + broadcast_shape[1] = 5; this->blob_bottom_broadcast_2_->Reshape(broadcast_shape); FillerParameter filler_param; filler_param.set_min(1); @@ -79,10 +82,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardEltwise) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_->cpu_data(); const Dtype* in_data_b = this->blob_bottom_eltwise_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i], 1e-5); } } @@ -99,10 +102,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardEltwiseInPlace) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_bottom_->cpu_data(); - const int count = this->blob_bottom_->count(); + const int_tp count = this->blob_bottom_->count(); const Dtype* in_data_a = orig_bottom.cpu_data(); const Dtype* in_data_b = this->blob_bottom_eltwise_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i], 1e-5); } } @@ -143,11 +146,11 @@ TYPED_TEST(ScaleLayerTest, TestBackwardEltwiseInPlace) { caffe_copy(top_diff.count(), top_diff.cpu_data(), this->blob_bottom_->mutable_cpu_diff()); layer->Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(orig_bottom_diff.cpu_diff()[i], this->blob_bottom_->cpu_diff()[i], 1e-5); } - for (int i = 0; i < this->blob_bottom_eltwise_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_eltwise_->count(); ++i) { EXPECT_NEAR(orig_scale_diff.cpu_diff()[i], this->blob_bottom_eltwise_->cpu_diff()[i], 1e-5); } @@ -165,10 +168,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardEltwiseWithParam) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_->cpu_data(); const Dtype* in_data_b = layer->blobs()[0]->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i], 1e-5); } } @@ -182,10 +185,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardBroadcastBegin) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) * this->blob_bottom_broadcast_0_->data_at(n, c, 0, 0), @@ -205,10 +208,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddle) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) * this->blob_bottom_broadcast_1_->data_at(c, h, 0, 0), @@ -230,10 +233,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleInPlace) { shared_ptr > layer(new ScaleLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_bottom_->data_at(n, c, h, w), orig_bottom.data_at(n, c, h, w) * this->blob_bottom_broadcast_1_->data_at(c, h, 0, 0), @@ -280,11 +283,11 @@ TYPED_TEST(ScaleLayerTest, TestBackwardBroadcastMiddleInPlace) { caffe_copy(top_diff.count(), top_diff.cpu_data(), this->blob_bottom_->mutable_cpu_diff()); layer->Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_NEAR(orig_bottom_diff.cpu_diff()[i], this->blob_bottom_->cpu_diff()[i], 1e-5); } - for (int i = 0; i < this->blob_bottom_broadcast_1_->count(); ++i) { + for (int_tp i = 0; i < this->blob_bottom_broadcast_1_->count(); ++i) { EXPECT_NEAR(orig_scale_diff.cpu_diff()[i], this->blob_bottom_broadcast_1_->cpu_diff()[i], 1e-5); } @@ -301,10 +304,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleWithParam) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) * layer->blobs()[0]->data_at(c, h, 0, 0), 1e-5); @@ -327,10 +330,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleWithParamAndBias) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) * layer->blobs()[0]->data_at(c, h, 0, 0) + @@ -350,10 +353,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardBroadcastEnd) { layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - for (int c = 0; c < this->blob_bottom_->channels(); ++c) { - for (int h = 0; h < this->blob_bottom_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_->width(); ++w) { + for (int_tp n = 0; n < this->blob_bottom_->num(); ++n) { + for (int_tp c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int_tp h = 0; h < this->blob_bottom_->height(); ++h) { + for (int_tp w = 0; w < this->blob_bottom_->width(); ++w) { EXPECT_NEAR(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_->data_at(n, c, h, w) * this->blob_bottom_broadcast_2_->data_at(h, w, 0, 0), @@ -373,10 +376,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardScale) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data = this->blob_bottom_->cpu_data(); const Dtype scale = *this->blob_bottom_scale_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data[i] * scale, 1e-5); } } @@ -391,10 +394,10 @@ TYPED_TEST(ScaleLayerTest, TestForwardScaleAxis2) { ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_->shape()); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); + const int_tp count = this->blob_top_->count(); const Dtype* in_data = this->blob_bottom_->cpu_data(); const Dtype scale = *this->blob_bottom_scale_->cpu_data(); - for (int i = 0; i < count; ++i) { + for (int_tp i = 0; i < count; ++i) { EXPECT_NEAR(data[i], in_data[i] * scale, 1e-5); } }